diff -Nru juju-core-2.0~beta15/debian/changelog juju-core-2.0.0/debian/changelog --- juju-core-2.0~beta15/debian/changelog 2016-08-22 12:22:41.000000000 +0000 +++ juju-core-2.0.0/debian/changelog 2016-10-31 19:55:49.000000000 +0000 @@ -1,3 +1,38 @@ +juju-core (2.0.0-0ubuntu0.16.04.2) xenial; urgency=medium + + * DPKG_MAINTSCRIPT_ARCH doesn't work in .config, use dpkg check (LP: #1614969) + + -- Nicholas Skaggs Mon, 31 Oct 2016 15:55:49 -0400 + +juju-core (2.0.0-0ubuntu0.16.04.1) xenial-proposed; urgency=medium + + [ Nicholas Skaggs ] + * New upstream release 2.0.0 (LP: #1617440) + * Disable future manual provider test (LP: #1629376) + * Add sysctl files for lxd provider (LP: #1631038) + * Juju can interact with LXD > 2.0 (LP: #1614559) + * Change LXD to depends to ensure autopkgtest run on LXD upload (LP: #1614724) + * d/copyright updated for 2.0.0 vendored packages. + * Update bootstrap order for autpopkgtests for juju cli changes + * Display debconf note upon install or upgrade for unsupported architectures + * Skip autopkgtests on unsupported arches + * Fix lxd-provider test to utilize *_proxy settings and provide additional info + * Add template and config files for debconf messaging + * Remove all quilt patches + * Add upstream signing key + * Update watch file to point to lp.net/juju to reflect project move + * Add po-debconf dependency for template translation support + + [ Mathieu Trudel-Lapierre ] + * debian/*.postinst: use DPKG_MAINTSCRIPT_ARCH to detect architectures + * debian/control: Depends on golang-golang-x-crypto-dev + (>= 1:0.0~git20161012.0.5f3178) + + [Curtis Hovey] + * d/t/setup-lxd.sh support lxd 2.0 and 2.3 network configuration in tests + + -- Nicholas Skaggs Thu, 20 Oct 2016 15:28:12 +0200 + juju-core (2.0~beta15-0ubuntu2.16.04.1) xenial-proposed; urgency=medium * Restore all arches to building diff -Nru juju-core-2.0~beta15/debian/control juju-core-2.0.0/debian/control --- juju-core-2.0~beta15/debian/control 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/control 2016-10-20 13:28:12.000000000 +0000 @@ -10,12 +10,13 @@ golang-github-coreos-go-systemd-dev, golang-go (>= 2:1.6), golang-go-dbus-dev, - golang-golang-x-crypto-dev, + golang-golang-x-crypto-dev (>= 1:0.0~git20161012.0.5f31782), golang-golang-x-net-dev, golang-gopkg-tomb.v2-dev, golang-websocket-dev, golang-yaml.v2-dev, lsb-release, + po-debconf, python Standards-Version: 3.9.7 Homepage: http://launchpad.net/juju-core @@ -24,8 +25,8 @@ Package: juju-2.0 Architecture: any -Depends: distro-info, ${misc:Depends}, ${shlibs:Depends} -Recommends: lxd, bash-completion +Depends: distro-info, lxd, ${misc:Depends}, ${shlibs:Depends} +Recommends: bash-completion Breaks: juju-core (<= 1.25.4) Conflicts: juju2, juju-core2 Replaces: juju2, juju-core2 diff -Nru juju-core-2.0~beta15/debian/copyright juju-core-2.0.0/debian/copyright --- juju-core-2.0~beta15/debian/copyright 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/copyright 2016-10-20 13:28:12.000000000 +0000 @@ -26,7 +26,12 @@ Files: src/github.com/Azure/azure-sdk-for-go/* Copyright: 2009-2015 The Go Authors. All rights reserved. License: Apache-2.0 -Comment: Last verified commit 3b480eaaf6b4236d43a3c06cba969da6f53c8b66 +Comment: Last verified commit 902d95d9f311ae585ee98cfd18f418b467d60d5a + +Files: src/github.com/Azure/go-autorest/* +Copyright: 2015 Microsoft Corporation. All rights reserved. +License: Apache-2.0 +Comment: Last verified commit 6f40a8acfe03270d792cb8155e2942c09d7cff95 Files: src/github.com/ajstarks/svgo/* Copyright: 2010 Anthony Starks @@ -40,6 +45,11 @@ License: AGPL-3 Comment: Last verified commit 31228935eec685587914528585da4eb9b073c76d +Files: src/github.com/beorn7/perks/* +Copyright: 2013 Blake Mizerany +License: Expat +Comment: Last verified commit 3ac7bf7a47d159a033b107610db8a1b6575507a4 + Files: src/github.com/bmizerany/pat/* Copyright: 2012 Keith Rarick, Blake Mizerany License: Expat @@ -52,6 +62,11 @@ File src/github.com/coreos/go-systemd/daemon/sdnotify.go forked from Docker project, also under Apache-2.0. +Files: src/github.com/dgrijalva/jwt-go/* +Copyright: 2012 Dave Grijalva +License: Expat +Comment: Last verified commit 01aeca54ebda6e0fbfafd0a524d234159c05ec20 + Files: src/github.com/dustin/go-humanize/* Copyright: 2005-2008 Dustin Sallings License: Expat @@ -69,6 +84,11 @@ License: BSD-2-clause Comment: Last verified commit 32c6cc29c14570de4cf6d7e7737d68fb2d01ad15 +Files: src/github.com/golang/protobuf/* +Copyright: Copyright 2010 The Go Authors. All rights reserved. +License: BSD-3-clause +Comment: Last verified commit 34a5f244f1c01cdfee8e60324258cfbb97a42aec + Files: src/github.com/google/go-querystring/* Copyright: 2013 The Go Authors. All rights reserved. Copyright 2013 Google. @@ -110,6 +130,11 @@ License: LGPL-3+ Comment: Last verified commit 0da0d5f1342065321c97812b1f4ac0c2b0bab56c +Files: src/github.com/juju/ansiterm/* +Copyright: 2015 Canonical Ltd +License: LGPL-3 with linking exception +Comment: Last verified commit b99631de12cf04a906c1d4e4ec54fb86eae5863d + Files: src/github.com/juju/blobstore/* Copyright: 2014, 2015 Canonical Ltd. License: LGPL-3 with linking exception @@ -123,13 +148,18 @@ Files: src/github.com/juju/cmd/* Copyright: 2012-2016 Canonical Ltd. License: LGPL-3 with linking exception -Comment: Last verified commit 035efd5daac768531ef240ab9e5ee32e3498fbef +Comment: Last verified commit 1c6973d59b804e4d3c293fbf240f067e73436bc9 Files: src/github.com/juju/errors/* Copyright: 2013-2015 Canonical Ltd. License: LGPL-3 with linking exception Comment: Last verified commit 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 +Files: src/github.com/juju/gnuflag/* +Copyright: 2009, 2010, 2012 The Go Authors. All rights reserved. +License: BSD-3-clause +Comment: Last verified commit 4e76c56581859c14d9d87e1ddbe29e1c0f10195f + Files: src/github.com/juju/go4/* Copyright: 2011, 2014, 2015, 2016 The go4 Authors License: Apache-2.0 @@ -203,7 +233,7 @@ Files: src/github.com/juju/gomaasapi/* Copyright: 2012-2016, Canonical Ltd. License: LGPL-3+ -Comment: Last verified commit c4008a71e7212cb6a99a9c17bb218034927d82b7 +Comment: Last verified commit 8c484173e0870fc49c9214c56c6ae8dc9c26463d Files: src/github.com/juju/govmomi/* Copyright: 2014-2015 VMware, Inc. All Rights Reserved. @@ -226,7 +256,7 @@ Files: src/github.com/juju/httprequest/* Copyright: 2015 Canonical Ltd. License: LGPL-3+ -Comment: Last verified commit 796aaafaf712f666df58d31a482c51233038bf9f +Comment: Last verified commit 266fd1e9debf09c037a63f074d099a2da4559ece Files: src/github.com/juju/idmclient/* Copyright: 2014, 2015, 2016 Canonical Ltd. @@ -245,25 +275,6 @@ 2014-2015 Cloudbase Solutions SRL License: AGPL-3 -Files: src/github.com/juju/juju/cloudconfig/powershell_helpers.go -Copyright: 2011-2015 Canonical Ltd. - 2014-2015 Cloudbase Solutions - 2012 Aaron Jensen - 2009 Vladimir Vasiltsov -License: AGPL-3 -Comment: - Contains some code borrowed from - https://bitbucket.org/splatteredbits/carbon. - Original file can be found at Source/Security/Privilege.cs and - licensed under Apache-2.0 license. This license is compatible with - AGPLv3 and derived works can and have been licensed under AGPLv3. - The original Apache-2.0 license for the external source can be found - inside cloudconfigApache-License.txt. - - Contains code borrowed from https://github.com/gintsgints/tar-cs - which is under BSD3 License. This license is compatible with - AGPLv3 and derived works can and have been licensed under AGPLv3. - The original BSD3 license for the external source can be found - inside src/github.com/juju/juju/cloudconfig/BSD3-License.txt - Files: src/github.com/juju/juju/cloudconfig/windows_userdata_test.go Copyright: 2011-2015 Canonical Ltd. 2014 Cloudbase Solutions @@ -277,7 +288,7 @@ The original Apache-2.0 license for the external source can be found inside src/github.com/juju/juju/cloudconfig/Apache-License.txt. -Files: src/github.com/juju/juju/etc/bash_completion.d/juju +Files: src/github.com/juju/juju/etc/bash_completion.d/juju* Copyright: 2013+ Canonical Ltd. License: GPL-3 @@ -294,7 +305,7 @@ Files: src/github.com/juju/loggo/* Copyright: 2014, 2016 Canonical Ltd. License: LGPL-3 with linking exception -Comment: Last verified commit 15901ae4de786d05edae84a27c93d3fbef66c91e +Comment: Last verified commit 3b7ece48644d35850f4ced4c2cbc2cf8413f58e0 Files: src/github.com/juju/mutex/* Copyright: 2016 Canonical Ltd. @@ -304,7 +315,7 @@ Files: src/github.com/juju/persistent-cookiejar/* Copyright: 2012, 2013, 2015 The Go Authors. All rights reserved. License: BSD-3-clause -Comment: Last verified commit e710b897c13ca52828ca2fc9769465186fd6d15c +Comment: Last verified commit b48f5b9290d63455d10de0c0e4c26e06e6e74842 Files: src/github.com/juju/replicaset/* Copyright: 2013-2015 Canonical Ltd @@ -324,7 +335,7 @@ Files: src/github.com/juju/romulus/* Copyright: 2016 Canonical Ltd. License: AGPL-3 -Comment: Last verified commit f790f93d956741903ce5b1f027df4c9404227d55 +Comment: Last verified commit bf7827fa2f360ab762c134766ff1d4fff959ea03 Files: src/github.com/juju/schema/* Copyright: 2011-2016 Canonical Ltd. @@ -339,7 +350,7 @@ Files: src/github.com/juju/testing/* Copyright: 2011-2016 Canonical Ltd. License: LGPL-3 with linking exception -Comment: Last verified commit d325c22badd4ba3a5fde01d479b188c7a06df755 +Comment: Last verified commit 692d58e72934a2e2b56f663259696e035e6351ff Files: src/github.com/juju/testing/checkers/file_test.go src/github.com/juju/testing/mgo_windows.go @@ -357,7 +368,7 @@ Files: src/github.com/juju/txn/* Copyright: 2014, 2015 Canonical Ltd. License: LGPL-3 with linking exception -Comment: Last verified commit 99ec629d0066a4d73c54d8e021a7fc1dc07df614 +Comment: Last verified commit 18d812a45ffc407a4d5f849036b7d8d12febaf08 Files: src/github.com/juju/usso/* Copyright: 2015 Canonical Ltd. @@ -368,7 +379,7 @@ Copyright: 2011-2016 Canonical Ltd. 2014, 2015, 2016 Cloudbase Solutions SRL License: LGPL-3 with linking exception -Comment: Last verified commit 10adcbfe55417518543ed3c3341de2c7db0a3450 +Comment: Last verified commit 406e7197d0690a3f28c5a147138774eec4c1355e Files: src/github.com/juju/utils/du/diskusage.go src/github.com/juju/utils/du/diskusage_windows.go @@ -411,6 +422,11 @@ License: BSD-3-clause Comment: Last verified commit 77a895ad01ebc98a4dc95d8355bc825ce80a56f6 +Files: src/github.com/lunixbochs/vtclean/* +Copyright: 2015 Ryan Hileman +License: expat +Comment: Last verified commit 4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36 + Files: src/github.com/julienschmidt/httprouter/path.go src/github.com/julienschmidt/httprouter/path_test.go Copyright: 2013 Julien Schmidt. All rights reserved. @@ -422,7 +438,7 @@ Files: src/github.com/lxc/lxd/* Copyright: 2015 LXD contributors License: Apache-2.0 -Comment: Last verified commit 62f62e9d6e0da14947023f99764eac29c26cef8d +Comment: Last verified commit 95a324a23696e937c466996d57554e3677b3c84a Files: src/github.com/lxc/lxd/shared/cert.go Copyright: 2009, 2010 The Go Authors. All rights reserved. @@ -440,20 +456,55 @@ is not included in the gnuflag dir. The file is included at src/launchpad.net/gnuflag/LICENSE +Files: src/github.com/mattn/go-colorable/* +Copyright: 2016 Yasuhiro Matsumoto +License: expat +Comment: Last verified commit ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8 + +Files: src/github.com/mattn/go-isatty/* +Copyright: 2016 Yasuhiro Matsumoto +License: expat +Comment: Last verified commit 66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8 + Files: src/github.com/mattn/go-runewidth/* Copyright: 2013-2016 Yasuhiro Matsumoto License: Expat Comment: Last verified commit d96d1bd051f2bd9e7e43d602782b37b93b1b5666 +FIles: src/github.com/matttproud/golang_protobuf_extensions/* +Copyright: 2013-2016 golang protobuf extensions Authors +License: Apache-2.0 +Comment: Last verified commit c12348ce28de40eed0136aa2b644d0ee0650e56c + +Files: src/github.com/prometheus/client_golang/* +Copyright: 2012 - 2016 Prometheus Authors +License: Apache-2.0 +Comment: Last verified commit b90ee0840e8e7dfb84c08d13b9c4f3a794586a21 + +Files: src/github.com/prometheus/client_model/* +Copyright: 2013 - 2015 Prometheus Authors +License: Apache-2.0 +Comment: Last verified commit fa8ad6fec33561be4280a8f0514318c79d7f6cb6 + +Files: src/github.com/prometheus/common/* +Copyright: 2013 - 2015 Prometheus Authors +License: Apache-2.0 +Comment: Last verified commit dd586c1c5abb0be59e60f942c22af711a2008cb4 + +Files: src/github.com/prometheus/procfs/* +Copyright: 2013 - 2015 Prometheus Authors +License: Apache-2.0 +Comment: Last verified commit abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 + Files: src/github.com/rogpeppe/fastuuid/* Copyright: 2014 Roger Peppe License: BSD-3-clause Comment: Last verified commit 6724a57986aff9bff1a1770e9347036def7c89f6 Files: src/golang.org/x/crypto/* -Copyright: 2009-2015 The Go Authors. All rights reserved. +Copyright: 2009-2016 The Go Authors. All rights reserved. License: BSD-3-clause -Comment: Last verified commit aedad9a179ec1ea11b7064c57cbc6dc30d7724ec +Comment: Last verified commit 8e06e8ddd9629eb88639aba897641bff8031f1d3 Files: src/golang.org/x/net/* Copyright: 2009-2015 The Go Authors. All rights reserved. @@ -489,7 +540,7 @@ 2011 AppsAttic Ltd. 2011 Memeo Inc. License: LGPL-3 with linking exception -Comment: Last verified commit a651c43e72df7778b14ac6b54e5ac119d32b1263 +Comment: Last verified commit 18899065239e006cc73b0e66800c98c2ce4eee50 Files: src/gopkg.in/check.v1/* Copyright: 2010-2013 Gustavo Niemeyer @@ -528,32 +579,32 @@ Files: src/gopkg.in/juju/charm.v6-unstable/* Copyright: 2011-2016 Canonical Ltd. License: LGPL-3 with linking exception -Comment: Last verified commit a3bb92d047b0892452b6a39ece59b4d3a2ac35b9 +Comment: Last verified commit 83771c4919d6810bce5b7e63f46bea5fbfed0b93 Files: src/gopkg.in/juju/charmrepo.v2-unstable/* Copyright: 2012-2016 Canonical Ltd. License: LGPL-3 with linking exception -Comment: Last verified commit 6e6733987fb03100f30e494cc1134351fe4a593b +Comment: Last verified commit 73c1113f7ddee0306f4b3c19773d35a3f153c04a Files: src/gopkg.in/juju/charmstore.v5-unstable/* Copyright: 2012-2016 Canonical Ltd. License: AGPL-3 -Comment: Last verified commit 2cb9f80553dddaae8c5e2161ea45f4be5d9afc00 +Comment: Last verified commit fd1eef3002fc6b6daff5e97efab6f5056d22dcc7 Files: src/gopkg.in/juju/environschema.v1/* Copyright: 2015 Canonical Ltd. License: LGPL-3 with linking exception Comment: Last verified commit 7359fc7857abe2b11b5b3e23811a9c64cb6b01e0 -Files: src/gopkg.in/juju/jujusvg.v1/* -Copyright: 2014, 2015 Canonical Ltd. +Files: src/gopkg.in/juju/jujusvg.v2/* +Copyright: 2015 Canonical Ltd. License: LGPL-3 with linking exception -Comment: Last verified commit cc128825adce31ea13020d24e7b3302bac86a8c3 +Comment: Last verified commit d82160011935ef79fc7aca84aba2c6f74700fe75 Files: src/gopkg.in/macaroon-bakery.v1/* Copyright: 2014, Roger Peppe, Canonical Inc. License: LGPL-3 with linking exception -Comment: Last verified commit b097c9d99b2537efaf54492e08f7e148f956ba51 +Comment: Last verified commit 469b44e6f1f9479e115c8ae879ef80695be624d5 Files: src/gopkg.in/macaroon.v1/* Copyright: 2014, Roger Peppe @@ -563,7 +614,7 @@ Files: src/gopkg.in/mgo.v2/* Copyright: 2010-2015 Gustavo Niemeyer License: BSD-2-clause -Comment: Last verified commit 29cc868a5ca65f401ff318143f9408d02f4799cc +Comment: Last verified commit f2b6f6c918c452ad107eec89615f074e3bd80e33 Files: src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.* Copyright: 2013-2015 - Christian Amor Kvalheim @@ -576,7 +627,7 @@ Files: src/gopkg.in/juju/names.v2/* Copyright: 2013-2016 Canonical Ltd. License: LGPL-3 with linking exception -Comment: Last verified commit 3e0d33a444fec55aea7269b849eb22da41e73072 +Comment: Last verified commit 3317ff7471a685109e262892b5f81b940ad5782f Files: src/gopkg.in/natefinch/lumberjack.v2/* Copyright: 2014 Nate Finch @@ -589,6 +640,11 @@ License: Expat Comment: Last verified commit c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6 +Files: src/gopkg.in/tomb.v1* +Copyright: 2010-2011 - Gustavo Niemeyer +License: BSD-3-clause +Comment: Last verified commit dd632973f1e7218eb1089048e0798ec9ae7dceb8 + Files: src/gopkg.in/yaml.v1/* Copyright: 2011-2014 Canonical Inc. License: LGPL-3 with linking exception @@ -621,22 +677,6 @@ Copyright: 2006 Kirill Simonov License: Expat -Files: src/launchpad.net/gnuflag/* -Copyright: 2009, 2010, 2012 The Go Authors. All rights reserved. -License: BSD-3-clause -Comment: Last verified commit roger.peppe@canonical.com-20140716064605-pk32dnmfust02yab - -Files: src/launchpad.net/golxc/* -Copyright: 2012-2013, Canonical Ltd. -License: LGPL-3+ -Comment: Last verified commit ian.booth@canonical.com-20141121040613-ztm1q0iy9rune3zt - -Files: src/launchpad.net/tomb/* -Copyright: 2010-2011 - Gustavo Niemeyer -License: BSD-3-clause -Comment: Last verified commit gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q - - License: BSD-3-clause Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff -Nru juju-core-2.0~beta15/debian/juju-2.0.conf juju-core-2.0.0/debian/juju-2.0.conf --- juju-core-2.0~beta15/debian/juju-2.0.conf 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/debian/juju-2.0.conf 2016-10-20 13:28:12.000000000 +0000 @@ -0,0 +1,2 @@ +fs.inotify.max_user_watches = 524288 +fs.inotify.max_user_instances = 256 diff -Nru juju-core-2.0~beta15/debian/juju-2.0.config juju-core-2.0.0/debian/juju-2.0.config --- juju-core-2.0~beta15/debian/juju-2.0.config 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/debian/juju-2.0.config 2016-10-31 19:55:49.000000000 +0000 @@ -0,0 +1,12 @@ +#!/bin/sh +set -e + +# Source debconf library +. /usr/share/debconf/confmodule + +ARCH=`dpkg --print-architecture` +case $ARCH in amd64|arm64|ppc64el|s390x) + exit 0 +esac +db_input critical juju/unsupportedarch || true +db_go diff -Nru juju-core-2.0~beta15/debian/juju-2.0.postinst juju-core-2.0.0/debian/juju-2.0.postinst --- juju-core-2.0~beta15/debian/juju-2.0.postinst 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/debian/juju-2.0.postinst 2016-10-20 13:28:12.000000000 +0000 @@ -0,0 +1,29 @@ +#!/bin/sh +# postinst script for juju-2.0 + +set -e + +case $DPKG_MAINTSCRIPT_ARCH in +amd64|arm64|ppc64el|s390x) + case "$1" in + configure) + if [ -x /etc/init.d/procps ]; then + invoke-rc.d procps restart || true + fi + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; + esac + ;; +esac + + +exit 0 + +##DEBHELPER## diff -Nru juju-core-2.0~beta15/debian/juju-2.0.templates juju-core-2.0.0/debian/juju-2.0.templates --- juju-core-2.0~beta15/debian/juju-2.0.templates 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/debian/juju-2.0.templates 2016-10-20 13:28:12.000000000 +0000 @@ -0,0 +1,8 @@ +Template: juju/unsupportedarch +Type: note +_Description: This package is not supported under this architecture. + The juju 2.0 client package is not supported under this architecture. + Please use a supported architecture (amd64, arm64, ppc64el, s390x). + . + This package did not install juju, contains no binaries, and should be + removed once this message has been viewed. diff -Nru juju-core-2.0~beta15/debian/patches/gccgo-vsphere-storage.diff juju-core-2.0.0/debian/patches/gccgo-vsphere-storage.diff --- juju-core-2.0~beta15/debian/patches/gccgo-vsphere-storage.diff 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/patches/gccgo-vsphere-storage.diff 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -Index: quilt/src/github.com/juju/juju/provider/vsphere/storage.go -=================================================================== ---- quilt.orig/src/github.com/juju/juju/provider/vsphere/storage.go -+++ quilt/src/github.com/juju/juju/provider/vsphere/storage.go -@@ -1,6 +1,8 @@ - // Copyright 2016 Canonical Ltd. - // Licensed under the AGPLv3, see LICENCE file for details. - -+// +build !gccgo -+ - package vsphere - - import ( diff -Nru juju-core-2.0~beta15/debian/patches/series juju-core-2.0.0/debian/patches/series --- juju-core-2.0~beta15/debian/patches/series 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -gccgo-vsphere-storage.diff diff -Nru juju-core-2.0~beta15/debian/po/POTFILES.in juju-core-2.0.0/debian/po/POTFILES.in --- juju-core-2.0~beta15/debian/po/POTFILES.in 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/debian/po/POTFILES.in 2016-10-20 13:28:12.000000000 +0000 @@ -0,0 +1 @@ +[type: gettext/rfc822deb] juju-2.0.templates diff -Nru juju-core-2.0~beta15/debian/po/templates.pot juju-core-2.0.0/debian/po/templates.pot --- juju-core-2.0~beta15/debian/po/templates.pot 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/debian/po/templates.pot 2016-10-20 13:28:12.000000000 +0000 @@ -0,0 +1,40 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the juju-core package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: juju-core\n" +"Report-Msgid-Bugs-To: juju-core@packages.debian.org\n" +"POT-Creation-Date: 2016-10-25 12:39-0400\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=CHARSET\n" +"Content-Transfer-Encoding: 8bit\n" + +#. Type: note +#. Description +#: ../juju-2.0.templates:1001 +msgid "This package is not supported under this architecture." +msgstr "" + +#. Type: note +#. Description +#: ../juju-2.0.templates:1001 +msgid "" +"The juju 2.0 client package is not supported under this architecture. " +"Please use a supported architecture (amd64, arm64, ppc64el, s390x)." +msgstr "" + +#. Type: note +#. Description +#: ../juju-2.0.templates:1001 +msgid "" +"This package did not install juju, contains no binaries, and should be " +"removed once this message has been viewed." +msgstr "" diff -Nru juju-core-2.0~beta15/debian/rules juju-core-2.0.0/debian/rules --- juju-core-2.0~beta15/debian/rules 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/rules 2016-10-20 13:28:12.000000000 +0000 @@ -23,13 +23,7 @@ dh $@ --with=golang --buildsystem=golang --builddirectory=_build COMMON_FLAGS:= -x -v -golang_archs:= amd64 i386 armhf s390x ppc64el arm64 -ifeq (,$(filter $(DEB_HOST_ARCH), $(golang_archs))) -# NOTE(james-page) statically link libgo for the jujud binary for gccgo -# this allows the binary to be re-cut for upstream tool distribution and -# mimics the behaviour of the golang gc compiler. -JUJUD_FLAGS:= -gccgoflags -static-libgo -endif +supported_archs:= amd64 s390x ppc64el arm64 # NOTE: ensure /usr/share/gocode is use in preference to any # embedded source for dependencies. @@ -37,10 +31,12 @@ ./debian/helpers/setup-build-directory.py override_dh_auto_build: - dh_auto_build -- $(COMMON_FLAGS) - rm _build/bin/jujud - # re-link jujud with specific options - go install $(COMMON_FLAGS) $(JUJUD_FLAGS) github.com/juju/juju/cmd/jujud +ifneq (,$(filter $(DEB_HOST_ARCH), $(supported_archs))) + dh_auto_build -- $(COMMON_FLAGS) + rm _build/bin/jujud + # re-link jujud with specific options + go install $(COMMON_FLAGS) $(JUJUD_FLAGS) github.com/juju/juju/cmd/jujud +endif # Don't run the tests -- the juju unit tests are too heavyweight to run during # package build. @@ -48,6 +44,7 @@ : override_dh_auto_install: +ifneq (,$(filter $(DEB_HOST_ARCH), $(supported_archs))) echo '#!/bin/sh\nexport PATH=/usr/lib/juju-$(VERSION)/bin:"$$PATH"\nexec juju "$$@"' > _build/bin/juju-$(VERSION) chmod 755 _build/bin/juju-$(VERSION) mkdir -p _build/home @@ -63,6 +60,8 @@ cp src/github.com/juju/juju/etc/bash_completion.d/juju-version _build/juju dh_install _build/juju src/github.com/juju/juju/etc/bash_completion.d/juju-2.0 \ usr/share/bash-completion/completions + dh_install debian/juju-2.0.conf usr/lib/sysctl.d +endif override_dh_link: dh_link -pjuju-2.0 usr/bin/juju-$(VERSION) usr/bin/juju @@ -71,7 +70,9 @@ dh_link override_dh_compress: - dh_compress usr/lib/juju-$(VERSION)/man/man1/juju.1 +ifneq (,$(filter $(DEB_HOST_ARCH), $(supported_archs))) + dh_compress -pjuju-2.0 usr/lib/juju-$(VERSION)/man/man1/juju.1 +endif dh_compress override_dh_auto_clean: diff -Nru juju-core-2.0~beta15/debian/tests/client juju-core-2.0.0/debian/tests/client --- juju-core-2.0~beta15/debian/tests/client 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/tests/client 2016-10-20 13:28:12.000000000 +0000 @@ -2,6 +2,14 @@ set -ex -echo "Testing juju version: " -juju version -echo "OK" +ARCH=`dpkg --print-architecture` +if [ "$ARCH" = "amd64" -o "$ARCH" = "arm64" -o "$ARCH" = "ppc64el" -o "$ARCH" = "s390x" ] +then + echo "Testing juju version: " + juju version + echo "OK" +else + echo "SKIP: Unsupported Architecture" + exit 0 +fi + diff -Nru juju-core-2.0~beta15/debian/tests/current-lxd-provider juju-core-2.0.0/debian/tests/current-lxd-provider --- juju-core-2.0~beta15/debian/tests/current-lxd-provider 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/tests/current-lxd-provider 2016-10-20 13:28:12.000000000 +0000 @@ -1,6 +1,13 @@ #!/bin/sh set -ex -sh debian/tests/setup-lxd.sh -ln /var/log/lxd/lxd.log "$ADT_ARTIFACTS/lxd.log" -sh debian/tests/normal-user.sh debian/tests/lxd-provider +ARCH=`dpkg --print-architecture` +if [ "$ARCH" = "amd64" -o "$ARCH" = "arm64" -o "$ARCH" = "ppc64el" -o "$ARCH" = "s390x" ] +then + sh debian/tests/setup-lxd.sh + ln /var/log/lxd/lxd.log "$ADT_ARTIFACTS/lxd.log" + sh debian/tests/normal-user.sh debian/tests/lxd-provider +else + echo "SKIP: Unsupported Architecture" + exit 0 +fi diff -Nru juju-core-2.0~beta15/debian/tests/current-manual-provider juju-core-2.0.0/debian/tests/current-manual-provider --- juju-core-2.0~beta15/debian/tests/current-manual-provider 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/tests/current-manual-provider 2016-10-20 13:28:12.000000000 +0000 @@ -1,9 +1,16 @@ #!/bin/sh set -ex -if ! apt-cache show juju-mongodb3.2 >/dev/null 2>&1; then - echo "SKIP: 32-bit state servers not supported after xenial." - exit 0 -fi +ARCH=`dpkg --print-architecture` +if [ "$ARCH" = "amd64" -o "$ARCH" = "arm64" -o "$ARCH" = "ppc64el" -o "$ARCH" = "s390x" ] +then + if ! apt-cache show juju-mongodb3.2 >/dev/null 2>&1; then + echo "SKIP: 32-bit state servers not supported after xenial." + exit 0 + fi -sh debian/tests/normal-user.sh debian/tests/manual-provider + sh debian/tests/normal-user.sh debian/tests/manual-provider +else + echo "SKIP: Unsupported Architecture" + exit 0 +fi diff -Nru juju-core-2.0~beta15/debian/tests/future-lxd-provider juju-core-2.0.0/debian/tests/future-lxd-provider --- juju-core-2.0~beta15/debian/tests/future-lxd-provider 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/tests/future-lxd-provider 2016-10-20 13:28:12.000000000 +0000 @@ -1,12 +1,19 @@ #!/bin/sh set -ex -if [ ! -d /run/systemd/system ]; then - echo "SKIP: non-systemd series are not valid future series." - exit 0 -fi +ARCH=`dpkg --print-architecture` +if [ "$ARCH" = "amd64" -o "$ARCH" = "arm64" -o "$ARCH" = "ppc64el" -o "$ARCH" = "s390x" ] +then + if [ ! -d /run/systemd/system ]; then + echo "SKIP: non-systemd series are not valid future series." + exit 0 + fi -sh debian/tests/fake-future.sh -sh debian/tests/setup-lxd.sh -ln /var/log/lxd/lxd.log "$ADT_ARTIFACTS/lxd.log" -sh debian/tests/normal-user.sh debian/tests/lxd-provider + sh debian/tests/fake-future.sh + sh debian/tests/setup-lxd.sh + ln /var/log/lxd/lxd.log "$ADT_ARTIFACTS/lxd.log" + sh debian/tests/normal-user.sh debian/tests/lxd-provider +else + echo "SKIP: Unsupported Architecture" + exit 0 +fi diff -Nru juju-core-2.0~beta15/debian/tests/future-manual-provider juju-core-2.0.0/debian/tests/future-manual-provider --- juju-core-2.0~beta15/debian/tests/future-manual-provider 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/tests/future-manual-provider 2016-10-20 13:28:12.000000000 +0000 @@ -1,14 +1,26 @@ #!/bin/sh set -ex -if [ ! -d /run/systemd/system ]; then - echo "SKIP: non-systemd series are not valid future series." - exit 0 -fi -if ! apt-cache show juju-mongodb3.2 >/dev/null 2>&1; then - echo "SKIP: 32-bit state servers not supported in future series." - exit 0 -fi +ARCH=`dpkg --print-architecture` +if [ "$ARCH" = "amd64" -o "$ARCH" = "arm64" -o "$ARCH" = "ppc64el" -o "$ARCH" = "s390x" ] +then + # bug 1629376 prevents juju from working in this scenario + # juju will not bootstrap published versions of juju on unknown series + echo "SKIP: Juju won't bootstrap unknown without published agent (LP:1629376)" + exit 0 -sh debian/tests/fake-future.sh -sh debian/tests/normal-user.sh debian/tests/manual-provider + if [ ! -d /run/systemd/system ]; then + echo "SKIP: non-systemd series are not valid future series." + exit 0 + fi + if ! apt-cache show juju-mongodb3.2 >/dev/null 2>&1; then + echo "SKIP: 32-bit state servers not supported in future series." + exit 0 + fi + + sh debian/tests/fake-future.sh + sh debian/tests/normal-user.sh debian/tests/manual-provider +else + echo "SKIP: Unsupported Architecture" + exit 0 +fi diff -Nru juju-core-2.0~beta15/debian/tests/lxd-provider juju-core-2.0.0/debian/tests/lxd-provider --- juju-core-2.0~beta15/debian/tests/lxd-provider 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/tests/lxd-provider 2016-10-20 13:28:12.000000000 +0000 @@ -12,9 +12,20 @@ echo "Prepopulating lxd with daily $SERIES image: " lxc image copy ubuntu-daily:$SERIES local: --alias ubuntu-$SERIES +lxc image list +lxc info echo "Testing juju bootstrap: " -juju bootstrap my-controller lxd --upload-tools --debug --config default-series=$SERIES +BOOTSTRAP_ARGS="--debug --config default-series=$SERIES --config enable-os-upgrade=false --config no-proxy=$no_proxy" +if [ "$http_proxy" ] +then + BOOTSTRAP_ARGS=$BOOTSTRAP_ARGS' --config http-proxy='$http_proxy +fi +if [ "$https_proxy" ] +then + BOOTSTRAP_ARGS=$BOOTSTRAP_ARGS' --config https-proxy='$https_proxy +fi +juju bootstrap lxd my-controller $BOOTSTRAP_ARGS echo "OK" echo "Waiting for environment to bootstrap: " diff -Nru juju-core-2.0~beta15/debian/tests/manual-provider juju-core-2.0.0/debian/tests/manual-provider --- juju-core-2.0~beta15/debian/tests/manual-provider 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/tests/manual-provider 2016-10-20 13:28:12.000000000 +0000 @@ -14,4 +14,4 @@ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys ssh-keyscan "$ip" >> ~/.ssh/known_hosts -juju bootstrap my-controller manual/$ip --upload-tools --debug +juju bootstrap manual/$ip my-controller --debug --config enable-os-upgrade=false diff -Nru juju-core-2.0~beta15/debian/tests/setup-lxd.sh juju-core-2.0.0/debian/tests/setup-lxd.sh --- juju-core-2.0~beta15/debian/tests/setup-lxd.sh 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/tests/setup-lxd.sh 2016-10-20 13:28:12.000000000 +0000 @@ -5,6 +5,22 @@ # bridge so the juju lxd provider can function. Taken from changes made # to cloud-init to do approximately this. +VERSION=$(lxd --version) + +# LXD 2.3+ needs lxdbr0 setup via lxc. +if dpkg --compare-versions "$VERSION" gt "2.2"; then + if ! lxc network list | grep -q lxdbr0; then + # Configure a known address ranges for lxdbr0. + lxc network create lxdbr0 \ + ipv4.address=10.0.8.1/24 ipv4.nat=true \ + ipv6.address=none ipv6.nat=false + fi + lxc network show lxdbr0 + lxc config show + exit 0 +fi + +# LXD 2.2 and earlier use debconf to create and configure the network. debconf-communicate << EOF set lxd/setup-bridge true set lxd/bridge-domain lxd @@ -24,4 +40,4 @@ dpkg-reconfigure lxd --frontend=noninteractive # Must run a command for systemd socket activation to start the service -lxc finger +lxc config show diff -Nru juju-core-2.0~beta15/debian/upstream/signing-key.asc juju-core-2.0.0/debian/upstream/signing-key.asc --- juju-core-2.0~beta15/debian/upstream/signing-key.asc 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/debian/upstream/signing-key.asc 2016-10-20 13:28:12.000000000 +0000 @@ -0,0 +1,26 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQENBFStm2IBCACyp8ayW/1D5Qta2mgz7YrD6lY9eKID8YTIQEAnGf1SPQL9iBKR +I7EAAzOqeBlHsPRwUOUw7fv7YFmm8XQTqyj/e5zbe89OLrC08qrgZL0p83ejR1J+ +P/7q4LT8bD0DxYFJ+VzvFTlOOCHhofi3NykgCu7FmJroTJX9NKwrfQptSyYfLq9T +1zH30eGp7vtv9QwypAivxRq43dwtcDUameQNdDmgwx5mqUxHXkH7V+yWJqZW+Rw6 +YCQmUc8RrtTrkoRRMLBwB7gQRWGJnwuyNDgVF3ZHSJxH3W1vTSqzgWckfLg2a/Yg +2kYcop94qZUvLRvUfJoK5CRBbl08XZfSN1PVABEBAAGJATcEHwECACEFAlStm2IX +DIARgSee7n7In7eBcCra95Ni2kSi0dsCBwAACgkQa0knIBmuGVLIZAgAowYVL3b3 +sJeq0juwP2zYpewr/HOpxnzl5CJ6IqwKl678WbH2gGuIkGjpvM27j1So4uTOBFIP +3vrBDoIsk5/TZ3KnBcSrP2nBvz/jGRIFnivavs0GfXBINDX5RHhF0FD14f6M61Hx +1+1mC3qBr8BC7b4OT0zR/v070Ii21WAcu4gerGFsK5Ib0E2RbnumW4BM0sOoE9lK +MgnNuyg4QQ7XVPPztLN0R6dwrUOjBBmUvHFLR5dO5pe0E3a8NqpzaT9TgT9Z/a3O +ZoycFoxgy9gD7kcTSm2rPd2/BRVxienB7sNGHA6X9Md9cXMJLk5/wJ1SyEUaqwTm +lgK8UoUl9+qX7LQ4Q2Fub25pY2FsIEp1anUgUUEgQm90IDxhYXJvbi5iZW50bGV5 +K2NqcWFAY2Fub25pY2FsLmNvbT6JAVoEEwECAEQFAlStm2ICGwMFCQlmAYAGCwkI +BwMCBhUIAgkKCwQWAgMBAh4BAheAGxhoa3A6Ly9rZXlzZXJ2ZXIudWJ1bnR1LmNv +bQAKCRBrSScgGa4ZUlqZB/9zOnPS1gjvdpxoGdbLu1YIa/uNCCzdSa8eq1TvakjI +sM/Y3JSXAKAF+dDo4naFSnXTtIQJ+vxRM1qelDFLCDcWCFZj83F1H557CcvrQlN5 +k1qlDTx5GB+NEj2ssuLcLJxFYzxvhI0/jvNVj6fU6jRsBXnMBU588+ls5/Hfj1Cf +WYSWC8laPz1ynk+oGSqh6Zt8PH+qSHyfTA+wvrt+NAateLhyC9O7+8d7B4yZ3bJk +qca5Uif8XtE40Ryp/ER8HtV30yEJXdOwuSCh/GcWr5xznkmMeEcyuxkDhCQwhwj5 +eCegY4pbZcnZMlkubEEVVM/B3BnbaMUxfcaGdW8135Rt +=8Jlc +-----END PGP PUBLIC KEY BLOCK----- diff -Nru juju-core-2.0~beta15/debian/watch juju-core-2.0.0/debian/watch --- juju-core-2.0~beta15/debian/watch 2016-08-22 12:17:34.000000000 +0000 +++ juju-core-2.0.0/debian/watch 2016-10-20 13:28:12.000000000 +0000 @@ -1,3 +1,5 @@ version=3 -opts="uversionmangle=s/-/./" \ - https://launchpad.net/juju-core/+download https://launchpad.net/juju-core/.*/.*/.*/juju-core_(.*)\.tar\.gz +opts="uversionmangle=s/^1://;s/-/~/" \ + https://launchpad.net/juju/+download https://launchpad.net/juju/.*/.*/.*/juju-core_(.*)\.tar\.gz +opts=pgpsigurlmangle=s/$/.asc/ \ + https://launchpad.net/juju/+download https://launchpad.net/juju/.*/.*/.*/juju-core_(.*)\.tar\.gz diff -Nru juju-core-2.0~beta15/src/github.com/altoros/gosigma/.gitignore juju-core-2.0.0/src/github.com/altoros/gosigma/.gitignore --- juju-core-2.0~beta15/src/github.com/altoros/gosigma/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/altoros/gosigma/.gitignore 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,2 @@ +*.test +*cover*.out \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/classicadministrators.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/classicadministrators.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/classicadministrators.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/classicadministrators.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,125 @@ +package authorization + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ClassicAdministratorsClient is the client for the ClassicAdministrators +// methods of the Authorization service. +type ClassicAdministratorsClient struct { + ManagementClient +} + +// NewClassicAdministratorsClient creates an instance of the +// ClassicAdministratorsClient client. +func NewClassicAdministratorsClient(subscriptionID string) ClassicAdministratorsClient { + return NewClassicAdministratorsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewClassicAdministratorsClientWithBaseURI creates an instance of the +// ClassicAdministratorsClient client. +func NewClassicAdministratorsClientWithBaseURI(baseURI string, subscriptionID string) ClassicAdministratorsClient { + return ClassicAdministratorsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets a list of classic administrators for the subscription. +func (client ClassicAdministratorsClient) List() (result ClassicAdministratorListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.ClassicAdministratorsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.ClassicAdministratorsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.ClassicAdministratorsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ClassicAdministratorsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/classicAdministrators", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ClassicAdministratorsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ClassicAdministratorsClient) ListResponder(resp *http.Response) (result ClassicAdministratorListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ClassicAdministratorsClient) ListNextResults(lastResults ClassicAdministratorListResult) (result ClassicAdministratorListResult, err error) { + req, err := lastResults.ClassicAdministratorListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.ClassicAdministratorsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.ClassicAdministratorsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.ClassicAdministratorsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -1,3 +1,6 @@ +// Package authorization implements the Azure ARM Authorization service API +// version 2015-07-01. +// package authorization // Copyright (c) Microsoft and contributors. All rights reserved. @@ -14,17 +17,17 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" ) const ( // APIVersion is the version of the Authorization - APIVersion = "2015-01-01" + APIVersion = "2015-07-01" // DefaultBaseURI is the default URI used for the service Authorization DefaultBaseURI = "https://management.azure.com" @@ -34,6 +37,7 @@ type ManagementClient struct { autorest.Client BaseURI string + APIVersion string SubscriptionID string } @@ -47,6 +51,7 @@ return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, + APIVersion: APIVersion, SubscriptionID: subscriptionID, } } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/managementlocks.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/managementlocks.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/managementlocks.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/managementlocks.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,858 +0,0 @@ -package authorization - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// ManagementLocksClient is the client for the ManagementLocks methods of the -// Authorization service. -type ManagementLocksClient struct { - ManagementClient -} - -// NewManagementLocksClient creates an instance of the ManagementLocksClient -// client. -func NewManagementLocksClient(subscriptionID string) ManagementLocksClient { - return NewManagementLocksClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewManagementLocksClientWithBaseURI creates an instance of the -// ManagementLocksClient client. -func NewManagementLocksClientWithBaseURI(baseURI string, subscriptionID string) ManagementLocksClient { - return ManagementLocksClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdateAtResourceGroupLevel create or update a management lock at -// the resource group level. -// -// resourceGroupName is the resource group name. lockName is the lock name. -// parameters is the management lock parameters. -func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevel(resourceGroupName string, lockName string, parameters ManagementLockProperties) (result ManagementLockObject, ae error) { - req, err := client.CreateOrUpdateAtResourceGroupLevelPreparer(resourceGroupName, lockName, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceGroupLevel", "Failure preparing request") - } - - resp, err := client.CreateOrUpdateAtResourceGroupLevelSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceGroupLevel", "Failure sending request") - } - - result, err = client.CreateOrUpdateAtResourceGroupLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceGroupLevel", "Failure responding to request") - } - - return -} - -// CreateOrUpdateAtResourceGroupLevelPreparer prepares the CreateOrUpdateAtResourceGroupLevel request. -func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevelPreparer(resourceGroupName string, lockName string, parameters ManagementLockProperties) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "lockName": url.QueryEscape(lockName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CreateOrUpdateAtResourceGroupLevelSender sends the CreateOrUpdateAtResourceGroupLevel request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevelSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusCreated) -} - -// CreateOrUpdateAtResourceGroupLevelResponder handles the response to the CreateOrUpdateAtResourceGroupLevel request. The method always -// closes the http.Response Body. -func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevelResponder(resp *http.Response) (result ManagementLockObject, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateOrUpdateAtResourceLevel create or update a management lock at the -// resource level or any level below resource. -// -// resourceGroupName is the name of the resource group. -// resourceProviderNamespace is resource identity. parentResourcePath is -// resource identity. resourceType is resource identity. resourceName is -// resource identity. lockName is the name of lock. parameters is create or -// update management lock parameters. -func (client ManagementLocksClient) CreateOrUpdateAtResourceLevel(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string, parameters ManagementLockProperties) (result ManagementLockObject, ae error) { - req, err := client.CreateOrUpdateAtResourceLevelPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, lockName, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceLevel", "Failure preparing request") - } - - resp, err := client.CreateOrUpdateAtResourceLevelSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceLevel", "Failure sending request") - } - - result, err = client.CreateOrUpdateAtResourceLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceLevel", "Failure responding to request") - } - - return -} - -// CreateOrUpdateAtResourceLevelPreparer prepares the CreateOrUpdateAtResourceLevel request. -func (client ManagementLocksClient) CreateOrUpdateAtResourceLevelPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string, parameters ManagementLockProperties) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "lockName": url.QueryEscape(lockName), - "parentResourcePath": parentResourcePath, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "resourceName": url.QueryEscape(resourceName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "resourceType": resourceType, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CreateOrUpdateAtResourceLevelSender sends the CreateOrUpdateAtResourceLevel request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementLocksClient) CreateOrUpdateAtResourceLevelSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusCreated) -} - -// CreateOrUpdateAtResourceLevelResponder handles the response to the CreateOrUpdateAtResourceLevel request. The method always -// closes the http.Response Body. -func (client ManagementLocksClient) CreateOrUpdateAtResourceLevelResponder(resp *http.Response) (result ManagementLockObject, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateOrUpdateAtSubscriptionLevel create or update a management lock at the -// subscription level. -// -// lockName is the name of lock. parameters is the management lock parameters. -func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevel(lockName string, parameters ManagementLockProperties) (result ManagementLockObject, ae error) { - req, err := client.CreateOrUpdateAtSubscriptionLevelPreparer(lockName, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtSubscriptionLevel", "Failure preparing request") - } - - resp, err := client.CreateOrUpdateAtSubscriptionLevelSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtSubscriptionLevel", "Failure sending request") - } - - result, err = client.CreateOrUpdateAtSubscriptionLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtSubscriptionLevel", "Failure responding to request") - } - - return -} - -// CreateOrUpdateAtSubscriptionLevelPreparer prepares the CreateOrUpdateAtSubscriptionLevel request. -func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevelPreparer(lockName string, parameters ManagementLockProperties) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "lockName": url.QueryEscape(lockName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CreateOrUpdateAtSubscriptionLevelSender sends the CreateOrUpdateAtSubscriptionLevel request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevelSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) -} - -// CreateOrUpdateAtSubscriptionLevelResponder handles the response to the CreateOrUpdateAtSubscriptionLevel request. The method always -// closes the http.Response Body. -func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevelResponder(resp *http.Response) (result ManagementLockObject, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteAtResourceGroupLevel deletes the management lock of a resource group. -// -// resourceGroup is the resource group names. lockName is the name of lock. -func (client ManagementLocksClient) DeleteAtResourceGroupLevel(resourceGroup string, lockName string) (result autorest.Response, ae error) { - req, err := client.DeleteAtResourceGroupLevelPreparer(resourceGroup, lockName) - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceGroupLevel", "Failure preparing request") - } - - resp, err := client.DeleteAtResourceGroupLevelSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceGroupLevel", "Failure sending request") - } - - result, err = client.DeleteAtResourceGroupLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceGroupLevel", "Failure responding to request") - } - - return -} - -// DeleteAtResourceGroupLevelPreparer prepares the DeleteAtResourceGroupLevel request. -func (client ManagementLocksClient) DeleteAtResourceGroupLevelPreparer(resourceGroup string, lockName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "lockName": url.QueryEscape(lockName), - "resourceGroup": url.QueryEscape(resourceGroup), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Authorization/locks/{lockName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DeleteAtResourceGroupLevelSender sends the DeleteAtResourceGroupLevel request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementLocksClient) DeleteAtResourceGroupLevelSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusOK, http.StatusAccepted) -} - -// DeleteAtResourceGroupLevelResponder handles the response to the DeleteAtResourceGroupLevel request. The method always -// closes the http.Response Body. -func (client ManagementLocksClient) DeleteAtResourceGroupLevelResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} - -// DeleteAtResourceLevel deletes the management lock of a resource or any -// level below resource. -// -// resourceGroupName is the name of the resource group. -// resourceProviderNamespace is resource identity. parentResourcePath is -// resource identity. resourceType is resource identity. resourceName is -// resource identity. lockName is the name of lock. -func (client ManagementLocksClient) DeleteAtResourceLevel(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string) (result autorest.Response, ae error) { - req, err := client.DeleteAtResourceLevelPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, lockName) - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceLevel", "Failure preparing request") - } - - resp, err := client.DeleteAtResourceLevelSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceLevel", "Failure sending request") - } - - result, err = client.DeleteAtResourceLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceLevel", "Failure responding to request") - } - - return -} - -// DeleteAtResourceLevelPreparer prepares the DeleteAtResourceLevel request. -func (client ManagementLocksClient) DeleteAtResourceLevelPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "lockName": url.QueryEscape(lockName), - "parentResourcePath": parentResourcePath, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "resourceName": url.QueryEscape(resourceName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "resourceType": resourceType, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DeleteAtResourceLevelSender sends the DeleteAtResourceLevel request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementLocksClient) DeleteAtResourceLevelSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusOK, http.StatusAccepted) -} - -// DeleteAtResourceLevelResponder handles the response to the DeleteAtResourceLevel request. The method always -// closes the http.Response Body. -func (client ManagementLocksClient) DeleteAtResourceLevelResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} - -// DeleteAtSubscriptionLevel deletes the management lock of a subscription. -// -// lockName is the name of lock. -func (client ManagementLocksClient) DeleteAtSubscriptionLevel(lockName string) (result autorest.Response, ae error) { - req, err := client.DeleteAtSubscriptionLevelPreparer(lockName) - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtSubscriptionLevel", "Failure preparing request") - } - - resp, err := client.DeleteAtSubscriptionLevelSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtSubscriptionLevel", "Failure sending request") - } - - result, err = client.DeleteAtSubscriptionLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtSubscriptionLevel", "Failure responding to request") - } - - return -} - -// DeleteAtSubscriptionLevelPreparer prepares the DeleteAtSubscriptionLevel request. -func (client ManagementLocksClient) DeleteAtSubscriptionLevelPreparer(lockName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "lockName": url.QueryEscape(lockName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DeleteAtSubscriptionLevelSender sends the DeleteAtSubscriptionLevel request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementLocksClient) DeleteAtSubscriptionLevelSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusOK, http.StatusAccepted) -} - -// DeleteAtSubscriptionLevelResponder handles the response to the DeleteAtSubscriptionLevel request. The method always -// closes the http.Response Body. -func (client ManagementLocksClient) DeleteAtSubscriptionLevelResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the management lock of a scope. -// -// lockName is name of the management lock. -func (client ManagementLocksClient) Get(lockName string) (result ManagementLockObject, ae error) { - req, err := client.GetPreparer(lockName) - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client ManagementLocksClient) GetPreparer(lockName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "lockName": url.QueryEscape(lockName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementLocksClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ManagementLocksClient) GetResponder(resp *http.Response) (result ManagementLockObject, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListAtResourceGroupLevel gets all the management locks of a resource group. -// -// resourceGroupName is resource group name. filter is the filter to apply on -// the operation. -func (client ManagementLocksClient) ListAtResourceGroupLevel(resourceGroupName string, filter string) (result ManagementLockListResult, ae error) { - req, err := client.ListAtResourceGroupLevelPreparer(resourceGroupName, filter) - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure preparing request") - } - - resp, err := client.ListAtResourceGroupLevelSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure sending request") - } - - result, err = client.ListAtResourceGroupLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure responding to request") - } - - return -} - -// ListAtResourceGroupLevelPreparer prepares the ListAtResourceGroupLevel request. -func (client ManagementLocksClient) ListAtResourceGroupLevelPreparer(resourceGroupName string, filter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListAtResourceGroupLevelSender sends the ListAtResourceGroupLevel request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementLocksClient) ListAtResourceGroupLevelSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListAtResourceGroupLevelResponder handles the response to the ListAtResourceGroupLevel request. The method always -// closes the http.Response Body. -func (client ManagementLocksClient) ListAtResourceGroupLevelResponder(resp *http.Response) (result ManagementLockListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListAtResourceGroupLevelNextResults retrieves the next set of results, if any. -func (client ManagementLocksClient) ListAtResourceGroupLevelNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, ae error) { - req, err := lastResults.ManagementLockListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListAtResourceGroupLevelSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure sending next results request request") - } - - result, err = client.ListAtResourceGroupLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure responding to next results request request") - } - - return -} - -// ListAtResourceLevel gets all the management locks of a resource or any -// level below resource. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. resourceProviderNamespace is resource identity. -// parentResourcePath is resource identity. resourceType is resource -// identity. resourceName is resource identity. filter is the filter to apply -// on the operation. -func (client ManagementLocksClient) ListAtResourceLevel(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (result ManagementLockListResult, ae error) { - req, err := client.ListAtResourceLevelPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, filter) - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure preparing request") - } - - resp, err := client.ListAtResourceLevelSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure sending request") - } - - result, err = client.ListAtResourceLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure responding to request") - } - - return -} - -// ListAtResourceLevelPreparer prepares the ListAtResourceLevel request. -func (client ManagementLocksClient) ListAtResourceLevelPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "parentResourcePath": parentResourcePath, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "resourceName": url.QueryEscape(resourceName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "resourceType": resourceType, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListAtResourceLevelSender sends the ListAtResourceLevel request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementLocksClient) ListAtResourceLevelSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListAtResourceLevelResponder handles the response to the ListAtResourceLevel request. The method always -// closes the http.Response Body. -func (client ManagementLocksClient) ListAtResourceLevelResponder(resp *http.Response) (result ManagementLockListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListAtResourceLevelNextResults retrieves the next set of results, if any. -func (client ManagementLocksClient) ListAtResourceLevelNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, ae error) { - req, err := lastResults.ManagementLockListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListAtResourceLevelSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure sending next results request request") - } - - result, err = client.ListAtResourceLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure responding to next results request request") - } - - return -} - -// ListAtSubscriptionLevel gets all the management locks of a subscription. -// -// filter is the filter to apply on the operation. -func (client ManagementLocksClient) ListAtSubscriptionLevel(filter string) (result ManagementLockListResult, ae error) { - req, err := client.ListAtSubscriptionLevelPreparer(filter) - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure preparing request") - } - - resp, err := client.ListAtSubscriptionLevelSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure sending request") - } - - result, err = client.ListAtSubscriptionLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure responding to request") - } - - return -} - -// ListAtSubscriptionLevelPreparer prepares the ListAtSubscriptionLevel request. -func (client ManagementLocksClient) ListAtSubscriptionLevelPreparer(filter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListAtSubscriptionLevelSender sends the ListAtSubscriptionLevel request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementLocksClient) ListAtSubscriptionLevelSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListAtSubscriptionLevelResponder handles the response to the ListAtSubscriptionLevel request. The method always -// closes the http.Response Body. -func (client ManagementLocksClient) ListAtSubscriptionLevelResponder(resp *http.Response) (result ManagementLockListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListAtSubscriptionLevelNextResults retrieves the next set of results, if any. -func (client ManagementLocksClient) ListAtSubscriptionLevelNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, ae error) { - req, err := lastResults.ManagementLockListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListAtSubscriptionLevelSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure sending next results request request") - } - - result, err = client.ListAtSubscriptionLevelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure responding to next results request request") - } - - return -} - -// ListNext get a list of management locks at resource level or below. -// -// nextLink is nextLink from the previous successful call to List operation. -func (client ManagementLocksClient) ListNext(nextLink string) (result ManagementLockListResult, ae error) { - req, err := client.ListNextPreparer(nextLink) - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure preparing request") - } - - resp, err := client.ListNextSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure sending request") - } - - result, err = client.ListNextResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure responding to request") - } - - return -} - -// ListNextPreparer prepares the ListNext request. -func (client ManagementLocksClient) ListNextPreparer(nextLink string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "nextLink": nextLink, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/{nextLink}"), - autorest.WithPathParameters(pathParameters)) -} - -// ListNextSender sends the ListNext request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementLocksClient) ListNextSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListNextResponder handles the response to the ListNext request. The method always -// closes the http.Response Body. -func (client ManagementLocksClient) ListNextResponder(resp *http.Response) (result ManagementLockListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextNextResults retrieves the next set of results, if any. -func (client ManagementLocksClient) ListNextNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, ae error) { - req, err := lastResults.ManagementLockListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListNextSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure sending next results request request") - } - - result, err = client.ListNextResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure responding to next results request request") - } - - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,50 +14,105 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" "net/http" ) -// LockLevel enumerates the values for lock level. -type LockLevel string +// ClassicAdministrator is classic Administrators +type ClassicAdministrator struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties *ClassicAdministratorProperties `json:"properties,omitempty"` +} -const ( - // CanNotDelete specifies the can not delete state for lock level. - CanNotDelete LockLevel = "CanNotDelete" - // NotSpecified specifies the not specified state for lock level. - NotSpecified LockLevel = "NotSpecified" - // ReadOnly specifies the read only state for lock level. - ReadOnly LockLevel = "ReadOnly" -) +// ClassicAdministratorListResult is classicAdministrator list result +// information. +type ClassicAdministratorListResult struct { + autorest.Response `json:"-"` + Value *[]ClassicAdministrator `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// ClassicAdministratorListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ClassicAdministratorListResult) ClassicAdministratorListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ClassicAdministratorProperties is classic Administrator properties. +type ClassicAdministratorProperties struct { + EmailAddress *string `json:"emailAddress,omitempty"` + Role *string `json:"role,omitempty"` +} + +// Permission is role definition permissions. +type Permission struct { + Actions *[]string `json:"actions,omitempty"` + NotActions *[]string `json:"notActions,omitempty"` +} + +// PermissionGetResult is permissions information. +type PermissionGetResult struct { + autorest.Response `json:"-"` + Value *[]Permission `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} -// DeploymentExtendedFilter is deployment filter. -type DeploymentExtendedFilter struct { - ProvisioningState *string `json:"provisioningState,omitempty"` +// PermissionGetResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client PermissionGetResult) PermissionGetResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) } -// GenericResourceFilter is resource filter. -type GenericResourceFilter struct { - ResourceType *string `json:"resourceType,omitempty"` - Tagname *string `json:"tagname,omitempty"` - Tagvalue *string `json:"tagvalue,omitempty"` +// ProviderOperation is operation +type ProviderOperation struct { + Name *string `json:"name,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + Description *string `json:"description,omitempty"` + Origin *string `json:"origin,omitempty"` + Properties *map[string]interface{} `json:"properties,omitempty"` } -// ManagementLockListResult is list of management locks. -type ManagementLockListResult struct { +// ProviderOperationsMetadata is provider Operations metadata +type ProviderOperationsMetadata struct { autorest.Response `json:"-"` - Value *[]ManagementLockObject `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + ResourceTypes *[]ResourceType `json:"resourceTypes,omitempty"` + Operations *[]ProviderOperation `json:"operations,omitempty"` } -// ManagementLockListResultPreparer prepares a request to retrieve the next set of results. It returns +// ProviderOperationsMetadataListResult is provider operations metadata list +type ProviderOperationsMetadataListResult struct { + autorest.Response `json:"-"` + Value *[]ProviderOperationsMetadata `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// ProviderOperationsMetadataListResultPreparer prepares a request to retrieve the next set of results. It returns // nil if no more results exist. -func (client ManagementLockListResult) ManagementLockListResultPreparer() (*http.Request, error) { +func (client ProviderOperationsMetadataListResult) ProviderOperationsMetadataListResultPreparer() (*http.Request, error) { if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { return nil, nil } @@ -67,37 +122,102 @@ autorest.WithBaseURL(to.String(client.NextLink))) } -// ManagementLockObject is management lock information. -type ManagementLockObject struct { +// ResourceType is resource Type +type ResourceType struct { + Name *string `json:"name,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + Operations *[]ProviderOperation `json:"operations,omitempty"` +} + +// RoleAssignment is role Assignments +type RoleAssignment struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties *RoleAssignmentPropertiesWithScope `json:"properties,omitempty"` +} + +// RoleAssignmentCreateParameters is role assignment create parameters. +type RoleAssignmentCreateParameters struct { + Properties *RoleAssignmentProperties `json:"properties,omitempty"` +} + +// RoleAssignmentFilter is role Assignments filter +type RoleAssignmentFilter struct { + PrincipalID *string `json:"principalId,omitempty"` +} + +// RoleAssignmentListResult is role assignment list operation result. +type RoleAssignmentListResult struct { + autorest.Response `json:"-"` + Value *[]RoleAssignment `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// RoleAssignmentListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client RoleAssignmentListResult) RoleAssignmentListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// RoleAssignmentProperties is role assignment properties. +type RoleAssignmentProperties struct { + RoleDefinitionID *string `json:"roleDefinitionId,omitempty"` + PrincipalID *string `json:"principalId,omitempty"` +} + +// RoleAssignmentPropertiesWithScope is role assignment properties with scope. +type RoleAssignmentPropertiesWithScope struct { + Scope *string `json:"scope,omitempty"` + RoleDefinitionID *string `json:"roleDefinitionId,omitempty"` + PrincipalID *string `json:"principalId,omitempty"` +} + +// RoleDefinition is role definition. +type RoleDefinition struct { autorest.Response `json:"-"` - Properties *ManagementLockProperties `json:"properties,omitempty"` ID *string `json:"id,omitempty"` - Type *string `json:"type,omitempty"` Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties *RoleDefinitionProperties `json:"properties,omitempty"` +} + +// RoleDefinitionFilter is role Definitions filter +type RoleDefinitionFilter struct { + RoleName *string `json:"roleName,omitempty"` +} + +// RoleDefinitionListResult is role definition list operation result. +type RoleDefinitionListResult struct { + autorest.Response `json:"-"` + Value *[]RoleDefinition `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// RoleDefinitionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client RoleDefinitionListResult) RoleDefinitionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) } -// ManagementLockProperties is the management lock properties. -type ManagementLockProperties struct { - Level LockLevel `json:"level,omitempty"` - Notes *string `json:"notes,omitempty"` -} - -// Resource is -type Resource struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` -} - -// ResourceGroupFilter is resource group filter. -type ResourceGroupFilter struct { - TagName *string `json:"tagName,omitempty"` - TagValue *string `json:"tagValue,omitempty"` -} - -// SubResource is -type SubResource struct { - ID *string `json:"id,omitempty"` +// RoleDefinitionProperties is role definition properties. +type RoleDefinitionProperties struct { + RoleName *string `json:"roleName,omitempty"` + Description *string `json:"description,omitempty"` + Type *string `json:"type,omitempty"` + Permissions *[]Permission `json:"permissions,omitempty"` + AssignableScopes *[]string `json:"assignableScopes,omitempty"` } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/permissions.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/permissions.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/permissions.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/permissions.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,219 @@ +package authorization + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// PermissionsClient is the client for the Permissions methods of the +// Authorization service. +type PermissionsClient struct { + ManagementClient +} + +// NewPermissionsClient creates an instance of the PermissionsClient client. +func NewPermissionsClient(subscriptionID string) PermissionsClient { + return NewPermissionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPermissionsClientWithBaseURI creates an instance of the +// PermissionsClient client. +func NewPermissionsClientWithBaseURI(baseURI string, subscriptionID string) PermissionsClient { + return PermissionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ListForResource gets a resource permissions. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource parentResourcePath is +// resource resourceType is resource resourceName is resource +func (client PermissionsClient) ListForResource(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result PermissionGetResult, err error) { + req, err := client.ListForResourcePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResource", nil, "Failure preparing request") + } + + resp, err := client.ListForResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResource", resp, "Failure sending request") + } + + result, err = client.ListForResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResource", resp, "Failure responding to request") + } + + return +} + +// ListForResourcePreparer prepares the ListForResource request. +func (client PermissionsClient) ListForResourcePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/permissions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListForResourceSender sends the ListForResource request. The method will close the +// http.Response Body if it receives an error. +func (client PermissionsClient) ListForResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListForResourceResponder handles the response to the ListForResource request. The method always +// closes the http.Response Body. +func (client PermissionsClient) ListForResourceResponder(resp *http.Response) (result PermissionGetResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListForResourceNextResults retrieves the next set of results, if any. +func (client PermissionsClient) ListForResourceNextResults(lastResults PermissionGetResult) (result PermissionGetResult, err error) { + req, err := lastResults.PermissionGetResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResource", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListForResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResource", resp, "Failure sending next results request request") + } + + result, err = client.ListForResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResource", resp, "Failure responding to next results request request") + } + + return +} + +// ListForResourceGroup gets a resource group permissions. +// +// resourceGroupName is name of the resource group to get the permissions +// for.The name is case insensitive. +func (client PermissionsClient) ListForResourceGroup(resourceGroupName string) (result PermissionGetResult, err error) { + req, err := client.ListForResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListForResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListForResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListForResourceGroupPreparer prepares the ListForResourceGroup request. +func (client PermissionsClient) ListForResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Authorization/permissions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListForResourceGroupSender sends the ListForResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client PermissionsClient) ListForResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListForResourceGroupResponder handles the response to the ListForResourceGroup request. The method always +// closes the http.Response Body. +func (client PermissionsClient) ListForResourceGroupResponder(resp *http.Response) (result PermissionGetResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListForResourceGroupNextResults retrieves the next set of results, if any. +func (client PermissionsClient) ListForResourceGroupNextResults(lastResults PermissionGetResult) (result PermissionGetResult, err error) { + req, err := lastResults.PermissionGetResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResourceGroup", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListForResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResourceGroup", resp, "Failure sending next results request request") + } + + result, err = client.ListForResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.PermissionsClient", "ListForResourceGroup", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/provideroperationsmetadataoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/provideroperationsmetadataoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/provideroperationsmetadataoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/provideroperationsmetadataoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,188 @@ +package authorization + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ProviderOperationsMetadataOperationsClient is the client for the +// ProviderOperationsMetadataOperations methods of the Authorization service. +type ProviderOperationsMetadataOperationsClient struct { + ManagementClient +} + +// NewProviderOperationsMetadataOperationsClient creates an instance of the +// ProviderOperationsMetadataOperationsClient client. +func NewProviderOperationsMetadataOperationsClient(subscriptionID string) ProviderOperationsMetadataOperationsClient { + return NewProviderOperationsMetadataOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProviderOperationsMetadataOperationsClientWithBaseURI creates an +// instance of the ProviderOperationsMetadataOperationsClient client. +func NewProviderOperationsMetadataOperationsClientWithBaseURI(baseURI string, subscriptionID string) ProviderOperationsMetadataOperationsClient { + return ProviderOperationsMetadataOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets provider operations metadata +// +// resourceProviderNamespace is namespace of the resource provider. +func (client ProviderOperationsMetadataOperationsClient) Get(resourceProviderNamespace string, expand string) (result ProviderOperationsMetadata, err error) { + req, err := client.GetPreparer(resourceProviderNamespace, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.ProviderOperationsMetadataOperationsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.ProviderOperationsMetadataOperationsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.ProviderOperationsMetadataOperationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ProviderOperationsMetadataOperationsClient) GetPreparer(resourceProviderNamespace string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Authorization/providerOperations/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ProviderOperationsMetadataOperationsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ProviderOperationsMetadataOperationsClient) GetResponder(resp *http.Response) (result ProviderOperationsMetadata, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets provider operations metadata list +// +func (client ProviderOperationsMetadataOperationsClient) List(expand string) (result ProviderOperationsMetadataListResult, err error) { + req, err := client.ListPreparer(expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.ProviderOperationsMetadataOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.ProviderOperationsMetadataOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.ProviderOperationsMetadataOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ProviderOperationsMetadataOperationsClient) ListPreparer(expand string) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Authorization/providerOperations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ProviderOperationsMetadataOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ProviderOperationsMetadataOperationsClient) ListResponder(resp *http.Response) (result ProviderOperationsMetadataListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ProviderOperationsMetadataOperationsClient) ListNextResults(lastResults ProviderOperationsMetadataListResult) (result ProviderOperationsMetadataListResult, err error) { + req, err := lastResults.ProviderOperationsMetadataListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.ProviderOperationsMetadataOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.ProviderOperationsMetadataOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.ProviderOperationsMetadataOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/roleassignments.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/roleassignments.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/roleassignments.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/roleassignments.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,769 @@ +package authorization + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// RoleAssignmentsClient is the client for the RoleAssignments methods of the +// Authorization service. +type RoleAssignmentsClient struct { + ManagementClient +} + +// NewRoleAssignmentsClient creates an instance of the RoleAssignmentsClient +// client. +func NewRoleAssignmentsClient(subscriptionID string) RoleAssignmentsClient { + return NewRoleAssignmentsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRoleAssignmentsClientWithBaseURI creates an instance of the +// RoleAssignmentsClient client. +func NewRoleAssignmentsClientWithBaseURI(baseURI string, subscriptionID string) RoleAssignmentsClient { + return RoleAssignmentsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create create role assignment. +// +// scope is scope. roleAssignmentName is role assignment name. parameters is +// role assignment. +func (client RoleAssignmentsClient) Create(scope string, roleAssignmentName string, parameters RoleAssignmentCreateParameters) (result RoleAssignment, err error) { + req, err := client.CreatePreparer(scope, roleAssignmentName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client RoleAssignmentsClient) CreatePreparer(scope string, roleAssignmentName string, parameters RoleAssignmentCreateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "roleAssignmentName": autorest.Encode("path", roleAssignmentName), + "scope": scope, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client RoleAssignmentsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client RoleAssignmentsClient) CreateResponder(resp *http.Response) (result RoleAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateByID create role assignment by Id. +// +// roleAssignmentID is role assignment Id parameters is role assignment. +func (client RoleAssignmentsClient) CreateByID(roleAssignmentID string, parameters RoleAssignmentCreateParameters) (result RoleAssignment, err error) { + req, err := client.CreateByIDPreparer(roleAssignmentID, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "CreateByID", nil, "Failure preparing request") + } + + resp, err := client.CreateByIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "CreateByID", resp, "Failure sending request") + } + + result, err = client.CreateByIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "CreateByID", resp, "Failure responding to request") + } + + return +} + +// CreateByIDPreparer prepares the CreateByID request. +func (client RoleAssignmentsClient) CreateByIDPreparer(roleAssignmentID string, parameters RoleAssignmentCreateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "roleAssignmentId": roleAssignmentID, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{roleAssignmentId}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateByIDSender sends the CreateByID request. The method will close the +// http.Response Body if it receives an error. +func (client RoleAssignmentsClient) CreateByIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateByIDResponder handles the response to the CreateByID request. The method always +// closes the http.Response Body. +func (client RoleAssignmentsClient) CreateByIDResponder(resp *http.Response) (result RoleAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete role assignment. +// +// scope is scope. roleAssignmentName is role assignment name. +func (client RoleAssignmentsClient) Delete(scope string, roleAssignmentName string) (result RoleAssignment, err error) { + req, err := client.DeletePreparer(scope, roleAssignmentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RoleAssignmentsClient) DeletePreparer(scope string, roleAssignmentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "roleAssignmentName": autorest.Encode("path", roleAssignmentName), + "scope": scope, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RoleAssignmentsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RoleAssignmentsClient) DeleteResponder(resp *http.Response) (result RoleAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteByID delete role assignment. +// +// roleAssignmentID is role assignment Id +func (client RoleAssignmentsClient) DeleteByID(roleAssignmentID string) (result RoleAssignment, err error) { + req, err := client.DeleteByIDPreparer(roleAssignmentID) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "DeleteByID", nil, "Failure preparing request") + } + + resp, err := client.DeleteByIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "DeleteByID", resp, "Failure sending request") + } + + result, err = client.DeleteByIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "DeleteByID", resp, "Failure responding to request") + } + + return +} + +// DeleteByIDPreparer prepares the DeleteByID request. +func (client RoleAssignmentsClient) DeleteByIDPreparer(roleAssignmentID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "roleAssignmentId": roleAssignmentID, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{roleAssignmentId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteByIDSender sends the DeleteByID request. The method will close the +// http.Response Body if it receives an error. +func (client RoleAssignmentsClient) DeleteByIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteByIDResponder handles the response to the DeleteByID request. The method always +// closes the http.Response Body. +func (client RoleAssignmentsClient) DeleteByIDResponder(resp *http.Response) (result RoleAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get single role assignment. +// +// scope is scope. roleAssignmentName is role assignment name. +func (client RoleAssignmentsClient) Get(scope string, roleAssignmentName string) (result RoleAssignment, err error) { + req, err := client.GetPreparer(scope, roleAssignmentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RoleAssignmentsClient) GetPreparer(scope string, roleAssignmentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "roleAssignmentName": autorest.Encode("path", roleAssignmentName), + "scope": scope, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RoleAssignmentsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RoleAssignmentsClient) GetResponder(resp *http.Response) (result RoleAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetByID get single role assignment. +// +// roleAssignmentID is role assignment Id +func (client RoleAssignmentsClient) GetByID(roleAssignmentID string) (result RoleAssignment, err error) { + req, err := client.GetByIDPreparer(roleAssignmentID) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "GetByID", nil, "Failure preparing request") + } + + resp, err := client.GetByIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "GetByID", resp, "Failure sending request") + } + + result, err = client.GetByIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "GetByID", resp, "Failure responding to request") + } + + return +} + +// GetByIDPreparer prepares the GetByID request. +func (client RoleAssignmentsClient) GetByIDPreparer(roleAssignmentID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "roleAssignmentId": roleAssignmentID, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{roleAssignmentId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetByIDSender sends the GetByID request. The method will close the +// http.Response Body if it receives an error. +func (client RoleAssignmentsClient) GetByIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetByIDResponder handles the response to the GetByID request. The method always +// closes the http.Response Body. +func (client RoleAssignmentsClient) GetByIDResponder(resp *http.Response) (result RoleAssignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets role assignments of the subscription. +// +// filter is the filter to apply on the operation. +func (client RoleAssignmentsClient) List(filter string) (result RoleAssignmentListResult, err error) { + req, err := client.ListPreparer(filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RoleAssignmentsClient) ListPreparer(filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/roleAssignments", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RoleAssignmentsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RoleAssignmentsClient) ListResponder(resp *http.Response) (result RoleAssignmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client RoleAssignmentsClient) ListNextResults(lastResults RoleAssignmentListResult) (result RoleAssignmentListResult, err error) { + req, err := lastResults.RoleAssignmentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListForResource gets role assignments of the resource. +// +// resourceGroupName is the name of the resource group. +// resourceProviderNamespace is resource identity. parentResourcePath is +// resource identity. resourceType is resource identity. resourceName is +// resource identity. filter is the filter to apply on the operation. +func (client RoleAssignmentsClient) ListForResource(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (result RoleAssignmentListResult, err error) { + req, err := client.ListForResourcePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResource", nil, "Failure preparing request") + } + + resp, err := client.ListForResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResource", resp, "Failure sending request") + } + + result, err = client.ListForResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResource", resp, "Failure responding to request") + } + + return +} + +// ListForResourcePreparer prepares the ListForResource request. +func (client RoleAssignmentsClient) ListForResourcePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}providers/Microsoft.Authorization/roleAssignments", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListForResourceSender sends the ListForResource request. The method will close the +// http.Response Body if it receives an error. +func (client RoleAssignmentsClient) ListForResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListForResourceResponder handles the response to the ListForResource request. The method always +// closes the http.Response Body. +func (client RoleAssignmentsClient) ListForResourceResponder(resp *http.Response) (result RoleAssignmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListForResourceNextResults retrieves the next set of results, if any. +func (client RoleAssignmentsClient) ListForResourceNextResults(lastResults RoleAssignmentListResult) (result RoleAssignmentListResult, err error) { + req, err := lastResults.RoleAssignmentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResource", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListForResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResource", resp, "Failure sending next results request request") + } + + result, err = client.ListForResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResource", resp, "Failure responding to next results request request") + } + + return +} + +// ListForResourceGroup gets role assignments of the resource group. +// +// resourceGroupName is resource group name. filter is the filter to apply on +// the operation. +func (client RoleAssignmentsClient) ListForResourceGroup(resourceGroupName string, filter string) (result RoleAssignmentListResult, err error) { + req, err := client.ListForResourceGroupPreparer(resourceGroupName, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListForResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListForResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListForResourceGroupPreparer prepares the ListForResourceGroup request. +func (client RoleAssignmentsClient) ListForResourceGroupPreparer(resourceGroupName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/roleAssignments", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListForResourceGroupSender sends the ListForResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client RoleAssignmentsClient) ListForResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListForResourceGroupResponder handles the response to the ListForResourceGroup request. The method always +// closes the http.Response Body. +func (client RoleAssignmentsClient) ListForResourceGroupResponder(resp *http.Response) (result RoleAssignmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListForResourceGroupNextResults retrieves the next set of results, if any. +func (client RoleAssignmentsClient) ListForResourceGroupNextResults(lastResults RoleAssignmentListResult) (result RoleAssignmentListResult, err error) { + req, err := lastResults.RoleAssignmentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResourceGroup", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListForResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResourceGroup", resp, "Failure sending next results request request") + } + + result, err = client.ListForResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForResourceGroup", resp, "Failure responding to next results request request") + } + + return +} + +// ListForScope gets role assignments of the scope. +// +// scope is scope. filter is the filter to apply on the operation. +func (client RoleAssignmentsClient) ListForScope(scope string, filter string) (result RoleAssignmentListResult, err error) { + req, err := client.ListForScopePreparer(scope, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForScope", nil, "Failure preparing request") + } + + resp, err := client.ListForScopeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForScope", resp, "Failure sending request") + } + + result, err = client.ListForScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForScope", resp, "Failure responding to request") + } + + return +} + +// ListForScopePreparer prepares the ListForScope request. +func (client RoleAssignmentsClient) ListForScopePreparer(scope string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "scope": scope, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/roleAssignments", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListForScopeSender sends the ListForScope request. The method will close the +// http.Response Body if it receives an error. +func (client RoleAssignmentsClient) ListForScopeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListForScopeResponder handles the response to the ListForScope request. The method always +// closes the http.Response Body. +func (client RoleAssignmentsClient) ListForScopeResponder(resp *http.Response) (result RoleAssignmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListForScopeNextResults retrieves the next set of results, if any. +func (client RoleAssignmentsClient) ListForScopeNextResults(lastResults RoleAssignmentListResult) (result RoleAssignmentListResult, err error) { + req, err := lastResults.RoleAssignmentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForScope", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListForScopeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForScope", resp, "Failure sending next results request request") + } + + result, err = client.ListForScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleAssignmentsClient", "ListForScope", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/roledefinitions.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/roledefinitions.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/roledefinitions.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/roledefinitions.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,377 @@ +package authorization + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// RoleDefinitionsClient is the client for the RoleDefinitions methods of the +// Authorization service. +type RoleDefinitionsClient struct { + ManagementClient +} + +// NewRoleDefinitionsClient creates an instance of the RoleDefinitionsClient +// client. +func NewRoleDefinitionsClient(subscriptionID string) RoleDefinitionsClient { + return NewRoleDefinitionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRoleDefinitionsClientWithBaseURI creates an instance of the +// RoleDefinitionsClient client. +func NewRoleDefinitionsClientWithBaseURI(baseURI string, subscriptionID string) RoleDefinitionsClient { + return RoleDefinitionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a role definition. +// +// scope is scope roleDefinitionID is role definition id. roleDefinition is +// role definition. +func (client RoleDefinitionsClient) CreateOrUpdate(scope string, roleDefinitionID string, roleDefinition RoleDefinition) (result RoleDefinition, err error) { + req, err := client.CreateOrUpdatePreparer(scope, roleDefinitionID, roleDefinition) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RoleDefinitionsClient) CreateOrUpdatePreparer(scope string, roleDefinitionID string, roleDefinition RoleDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "roleDefinitionId": autorest.Encode("path", roleDefinitionID), + "scope": scope, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/roleDefinitions/{roleDefinitionId}", pathParameters), + autorest.WithJSON(roleDefinition), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RoleDefinitionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RoleDefinitionsClient) CreateOrUpdateResponder(resp *http.Response) (result RoleDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the role definition. +// +// scope is scope roleDefinitionID is role definition id. +func (client RoleDefinitionsClient) Delete(scope string, roleDefinitionID string) (result RoleDefinition, err error) { + req, err := client.DeletePreparer(scope, roleDefinitionID) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RoleDefinitionsClient) DeletePreparer(scope string, roleDefinitionID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "roleDefinitionId": autorest.Encode("path", roleDefinitionID), + "scope": scope, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/roleDefinitions/{roleDefinitionId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RoleDefinitionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RoleDefinitionsClient) DeleteResponder(resp *http.Response) (result RoleDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get role definition by name (GUID). +// +// scope is scope roleDefinitionID is role definition Id +func (client RoleDefinitionsClient) Get(scope string, roleDefinitionID string) (result RoleDefinition, err error) { + req, err := client.GetPreparer(scope, roleDefinitionID) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RoleDefinitionsClient) GetPreparer(scope string, roleDefinitionID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "roleDefinitionId": autorest.Encode("path", roleDefinitionID), + "scope": scope, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/roleDefinitions/{roleDefinitionId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RoleDefinitionsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RoleDefinitionsClient) GetResponder(resp *http.Response) (result RoleDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetByID get role definition by name (GUID). +// +// roleDefinitionID is fully qualified role definition Id +func (client RoleDefinitionsClient) GetByID(roleDefinitionID string) (result RoleDefinition, err error) { + req, err := client.GetByIDPreparer(roleDefinitionID) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "GetByID", nil, "Failure preparing request") + } + + resp, err := client.GetByIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "GetByID", resp, "Failure sending request") + } + + result, err = client.GetByIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "GetByID", resp, "Failure responding to request") + } + + return +} + +// GetByIDPreparer prepares the GetByID request. +func (client RoleDefinitionsClient) GetByIDPreparer(roleDefinitionID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "roleDefinitionId": roleDefinitionID, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{roleDefinitionId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetByIDSender sends the GetByID request. The method will close the +// http.Response Body if it receives an error. +func (client RoleDefinitionsClient) GetByIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetByIDResponder handles the response to the GetByID request. The method always +// closes the http.Response Body. +func (client RoleDefinitionsClient) GetByIDResponder(resp *http.Response) (result RoleDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get all role definitions that are applicable at scope and above. Use +// atScopeAndBelow filter to search below the given scope as well +// +// scope is scope filter is the filter to apply on the operation. +func (client RoleDefinitionsClient) List(scope string, filter string) (result RoleDefinitionListResult, err error) { + req, err := client.ListPreparer(scope, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RoleDefinitionsClient) ListPreparer(scope string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "scope": scope, + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/roleDefinitions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RoleDefinitionsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RoleDefinitionsClient) ListResponder(resp *http.Response) (result RoleDefinitionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client RoleDefinitionsClient) ListNextResults(lastResults RoleDefinitionListResult) (result RoleDefinitionListResult, err error) { + req, err := lastResults.RoleDefinitionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "authorization.RoleDefinitionsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/authorization/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/authorization/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -23,18 +23,18 @@ ) const ( - major = "0" - minor = "3" + major = "3" + minor = "1" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" ) // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "authorization", "2015-01-01") + return fmt.Sprintf(userAgentFormat, Version(), "authorization", "2015-07-01") } // Version returns the semantic version (see http://semver.org) of the client. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/account.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/account.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/account.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/account.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,676 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// AccountClient is the client for the Account methods of the Batch service. +type AccountClient struct { + ManagementClient +} + +// NewAccountClient creates an instance of the AccountClient client. +func NewAccountClient(subscriptionID string) AccountClient { + return NewAccountClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAccountClientWithBaseURI creates an instance of the AccountClient client. +func NewAccountClientWithBaseURI(baseURI string, subscriptionID string) AccountClient { + return AccountClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a new Batch account with the specified parameters. Existing +// accounts cannot be updated with this API and should instead be updated +// with the Update Batch Account API. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group that contains the new +// Batch account. accountName is a name for the Batch account which must be +// unique within the region. Batch account names must be between 3 and 24 +// characters in length and must use only numbers and lowercase letters. This +// name is used as part of the DNS name that is used to access the Batch +// service in the region in which the account is created. For example: +// http://accountname.region.batch.azure.com/. parameters is additional +// parameters for account creation. +func (client AccountClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreatePreparer(resourceGroupName, accountName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client AccountClient) CreatePreparer(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client AccountClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client AccountClient) CreateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified Batch account. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account to be deleted. accountName is the name of the account to be +// deleted. +func (client AccountClient) Delete(resourceGroupName string, accountName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, accountName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AccountClient) DeletePreparer(resourceGroupName string, accountName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AccountClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AccountClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets information about the specified Batch account. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the account. +func (client AccountClient) Get(resourceGroupName string, accountName string) (result AccountResource, err error) { + req, err := client.GetPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AccountClient) GetPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AccountClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AccountClient) GetResponder(resp *http.Response) (result AccountResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets information about the Batch accounts associated with the +// subscription. +func (client AccountClient) List() (result AccountListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AccountClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Batch/batchAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AccountClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AccountClient) ListResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client AccountClient) ListNextResults(lastResults AccountListResult) (result AccountListResult, err error) { + req, err := lastResults.AccountListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListByResourceGroup gets information about the Batch accounts associated +// within the specified resource group. +// +// resourceGroupName is the name of the resource group whose Batch accounts to +// list. +func (client AccountClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "ListByResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "ListByResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client AccountClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client AccountClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client AccountClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client AccountClient) ListByResourceGroupNextResults(lastResults AccountListResult) (result AccountListResult, err error) { + req, err := lastResults.AccountListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "ListByResourceGroup", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "ListByResourceGroup", resp, "Failure sending next results request request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListByResourceGroup", resp, "Failure responding to next results request request") + } + + return +} + +// ListKeys lists the account keys for the specified Batch account. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the account. +func (client AccountClient) ListKeys(resourceGroupName string, accountName string) (result AccountListKeyResult, err error) { + req, err := client.ListKeysPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "ListKeys", nil, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "ListKeys", resp, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client AccountClient) ListKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client AccountClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client AccountClient) ListKeysResponder(resp *http.Response) (result AccountListKeyResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKey regenerates the specified account key for the specified Batch +// account. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the account. parameters is the type of +// key to regenerate. +func (client AccountClient) RegenerateKey(resourceGroupName string, accountName string, parameters AccountRegenerateKeyParameters) (result AccountRegenerateKeyResult, err error) { + req, err := client.RegenerateKeyPreparer(resourceGroupName, accountName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "RegenerateKey", nil, "Failure preparing request") + } + + resp, err := client.RegenerateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "RegenerateKey", resp, "Failure sending request") + } + + result, err = client.RegenerateKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "RegenerateKey", resp, "Failure responding to request") + } + + return +} + +// RegenerateKeyPreparer prepares the RegenerateKey request. +func (client AccountClient) RegenerateKeyPreparer(resourceGroupName string, accountName string, parameters AccountRegenerateKeyParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/regenerateKeys", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegenerateKeySender sends the RegenerateKey request. The method will close the +// http.Response Body if it receives an error. +func (client AccountClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always +// closes the http.Response Body. +func (client AccountClient) RegenerateKeyResponder(resp *http.Response) (result AccountRegenerateKeyResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SynchronizeAutoStorageKeys synchronizes access keys for the auto storage +// account configured for the specified Batch account. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the Batch account. +func (client AccountClient) SynchronizeAutoStorageKeys(resourceGroupName string, accountName string) (result autorest.Response, err error) { + req, err := client.SynchronizeAutoStorageKeysPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "SynchronizeAutoStorageKeys", nil, "Failure preparing request") + } + + resp, err := client.SynchronizeAutoStorageKeysSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "SynchronizeAutoStorageKeys", resp, "Failure sending request") + } + + result, err = client.SynchronizeAutoStorageKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "SynchronizeAutoStorageKeys", resp, "Failure responding to request") + } + + return +} + +// SynchronizeAutoStorageKeysPreparer prepares the SynchronizeAutoStorageKeys request. +func (client AccountClient) SynchronizeAutoStorageKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/syncAutoStorageKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// SynchronizeAutoStorageKeysSender sends the SynchronizeAutoStorageKeys request. The method will close the +// http.Response Body if it receives an error. +func (client AccountClient) SynchronizeAutoStorageKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// SynchronizeAutoStorageKeysResponder handles the response to the SynchronizeAutoStorageKeys request. The method always +// closes the http.Response Body. +func (client AccountClient) SynchronizeAutoStorageKeysResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update updates the properties of an existing Batch account. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the account. parameters is additional +// parameters for account update. +func (client AccountClient) Update(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result AccountResource, err error) { + req, err := client.UpdatePreparer(resourceGroupName, accountName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.AccountClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.AccountClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AccountClient) UpdatePreparer(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AccountClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AccountClient) UpdateResponder(resp *http.Response) (result AccountResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/applicationoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/applicationoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/applicationoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/applicationoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,667 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ApplicationOperationsClient is the client for the ApplicationOperations +// methods of the Batch service. +type ApplicationOperationsClient struct { + ManagementClient +} + +// NewApplicationOperationsClient creates an instance of the +// ApplicationOperationsClient client. +func NewApplicationOperationsClient(subscriptionID string) ApplicationOperationsClient { + return NewApplicationOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewApplicationOperationsClientWithBaseURI creates an instance of the +// ApplicationOperationsClient client. +func NewApplicationOperationsClientWithBaseURI(baseURI string, subscriptionID string) ApplicationOperationsClient { + return ApplicationOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ActivateApplicationPackage activates the specified application package. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the Batch account. id is the id of the +// application. version is the version of the application to activate. +// parameters is the parameters for the request. +func (client ApplicationOperationsClient) ActivateApplicationPackage(resourceGroupName string, accountName string, id string, version string, parameters ActivateApplicationPackageParameters) (result autorest.Response, err error) { + req, err := client.ActivateApplicationPackagePreparer(resourceGroupName, accountName, id, version, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "ActivateApplicationPackage", nil, "Failure preparing request") + } + + resp, err := client.ActivateApplicationPackageSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "ActivateApplicationPackage", resp, "Failure sending request") + } + + result, err = client.ActivateApplicationPackageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "ActivateApplicationPackage", resp, "Failure responding to request") + } + + return +} + +// ActivateApplicationPackagePreparer prepares the ActivateApplicationPackage request. +func (client ApplicationOperationsClient) ActivateApplicationPackagePreparer(resourceGroupName string, accountName string, id string, version string, parameters ActivateApplicationPackageParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "id": autorest.Encode("path", id), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "version": autorest.Encode("path", version), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{id}/versions/{version}/activate", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ActivateApplicationPackageSender sends the ActivateApplicationPackage request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationOperationsClient) ActivateApplicationPackageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ActivateApplicationPackageResponder handles the response to the ActivateApplicationPackage request. The method always +// closes the http.Response Body. +func (client ApplicationOperationsClient) ActivateApplicationPackageResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// AddApplication adds an application to the specified Batch account. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the Batch account. applicationID is +// the id of the application. parameters is the parameters for the request. +func (client ApplicationOperationsClient) AddApplication(resourceGroupName string, accountName string, applicationID string, parameters *AddApplicationParameters) (result Application, err error) { + req, err := client.AddApplicationPreparer(resourceGroupName, accountName, applicationID, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "AddApplication", nil, "Failure preparing request") + } + + resp, err := client.AddApplicationSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "AddApplication", resp, "Failure sending request") + } + + result, err = client.AddApplicationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "AddApplication", resp, "Failure responding to request") + } + + return +} + +// AddApplicationPreparer prepares the AddApplication request. +func (client ApplicationOperationsClient) AddApplicationPreparer(resourceGroupName string, accountName string, applicationID string, parameters *AddApplicationParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "applicationId": autorest.Encode("path", applicationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare(&http.Request{}) +} + +// AddApplicationSender sends the AddApplication request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationOperationsClient) AddApplicationSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// AddApplicationResponder handles the response to the AddApplication request. The method always +// closes the http.Response Body. +func (client ApplicationOperationsClient) AddApplicationResponder(resp *http.Response) (result Application, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// AddApplicationPackage creates an application package record. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the Batch account. applicationID is +// the id of the application. version is the version of the application. +func (client ApplicationOperationsClient) AddApplicationPackage(resourceGroupName string, accountName string, applicationID string, version string) (result AddApplicationPackageResult, err error) { + req, err := client.AddApplicationPackagePreparer(resourceGroupName, accountName, applicationID, version) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "AddApplicationPackage", nil, "Failure preparing request") + } + + resp, err := client.AddApplicationPackageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "AddApplicationPackage", resp, "Failure sending request") + } + + result, err = client.AddApplicationPackageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "AddApplicationPackage", resp, "Failure responding to request") + } + + return +} + +// AddApplicationPackagePreparer prepares the AddApplicationPackage request. +func (client ApplicationOperationsClient) AddApplicationPackagePreparer(resourceGroupName string, accountName string, applicationID string, version string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "applicationId": autorest.Encode("path", applicationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "version": autorest.Encode("path", version), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}/versions/{version}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// AddApplicationPackageSender sends the AddApplicationPackage request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationOperationsClient) AddApplicationPackageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// AddApplicationPackageResponder handles the response to the AddApplicationPackage request. The method always +// closes the http.Response Body. +func (client ApplicationOperationsClient) AddApplicationPackageResponder(resp *http.Response) (result AddApplicationPackageResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteApplication deletes an application. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the Batch account. applicationID is +// the id of the application. +func (client ApplicationOperationsClient) DeleteApplication(resourceGroupName string, accountName string, applicationID string) (result autorest.Response, err error) { + req, err := client.DeleteApplicationPreparer(resourceGroupName, accountName, applicationID) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "DeleteApplication", nil, "Failure preparing request") + } + + resp, err := client.DeleteApplicationSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "DeleteApplication", resp, "Failure sending request") + } + + result, err = client.DeleteApplicationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "DeleteApplication", resp, "Failure responding to request") + } + + return +} + +// DeleteApplicationPreparer prepares the DeleteApplication request. +func (client ApplicationOperationsClient) DeleteApplicationPreparer(resourceGroupName string, accountName string, applicationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "applicationId": autorest.Encode("path", applicationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteApplicationSender sends the DeleteApplication request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationOperationsClient) DeleteApplicationSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteApplicationResponder handles the response to the DeleteApplication request. The method always +// closes the http.Response Body. +func (client ApplicationOperationsClient) DeleteApplicationResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteApplicationPackage deletes an application package record and its +// associated binary file. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the Batch account. applicationID is +// the id of the application. version is the version of the application to +// delete. +func (client ApplicationOperationsClient) DeleteApplicationPackage(resourceGroupName string, accountName string, applicationID string, version string) (result autorest.Response, err error) { + req, err := client.DeleteApplicationPackagePreparer(resourceGroupName, accountName, applicationID, version) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "DeleteApplicationPackage", nil, "Failure preparing request") + } + + resp, err := client.DeleteApplicationPackageSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "DeleteApplicationPackage", resp, "Failure sending request") + } + + result, err = client.DeleteApplicationPackageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "DeleteApplicationPackage", resp, "Failure responding to request") + } + + return +} + +// DeleteApplicationPackagePreparer prepares the DeleteApplicationPackage request. +func (client ApplicationOperationsClient) DeleteApplicationPackagePreparer(resourceGroupName string, accountName string, applicationID string, version string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "applicationId": autorest.Encode("path", applicationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "version": autorest.Encode("path", version), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}/versions/{version}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteApplicationPackageSender sends the DeleteApplicationPackage request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationOperationsClient) DeleteApplicationPackageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteApplicationPackageResponder handles the response to the DeleteApplicationPackage request. The method always +// closes the http.Response Body. +func (client ApplicationOperationsClient) DeleteApplicationPackageResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetApplication gets information about the specified application. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the Batch account. applicationID is +// the id of the application. +func (client ApplicationOperationsClient) GetApplication(resourceGroupName string, accountName string, applicationID string) (result Application, err error) { + req, err := client.GetApplicationPreparer(resourceGroupName, accountName, applicationID) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "GetApplication", nil, "Failure preparing request") + } + + resp, err := client.GetApplicationSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "GetApplication", resp, "Failure sending request") + } + + result, err = client.GetApplicationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "GetApplication", resp, "Failure responding to request") + } + + return +} + +// GetApplicationPreparer prepares the GetApplication request. +func (client ApplicationOperationsClient) GetApplicationPreparer(resourceGroupName string, accountName string, applicationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "applicationId": autorest.Encode("path", applicationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetApplicationSender sends the GetApplication request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationOperationsClient) GetApplicationSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetApplicationResponder handles the response to the GetApplication request. The method always +// closes the http.Response Body. +func (client ApplicationOperationsClient) GetApplicationResponder(resp *http.Response) (result Application, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetApplicationPackage gets information about the specified application +// package. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the Batch account. applicationID is +// the id of the application. version is the version of the application. +func (client ApplicationOperationsClient) GetApplicationPackage(resourceGroupName string, accountName string, applicationID string, version string) (result GetApplicationPackageResult, err error) { + req, err := client.GetApplicationPackagePreparer(resourceGroupName, accountName, applicationID, version) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "GetApplicationPackage", nil, "Failure preparing request") + } + + resp, err := client.GetApplicationPackageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "GetApplicationPackage", resp, "Failure sending request") + } + + result, err = client.GetApplicationPackageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "GetApplicationPackage", resp, "Failure responding to request") + } + + return +} + +// GetApplicationPackagePreparer prepares the GetApplicationPackage request. +func (client ApplicationOperationsClient) GetApplicationPackagePreparer(resourceGroupName string, accountName string, applicationID string, version string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "applicationId": autorest.Encode("path", applicationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "version": autorest.Encode("path", version), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}/versions/{version}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetApplicationPackageSender sends the GetApplicationPackage request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationOperationsClient) GetApplicationPackageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetApplicationPackageResponder handles the response to the GetApplicationPackage request. The method always +// closes the http.Response Body. +func (client ApplicationOperationsClient) GetApplicationPackageResponder(resp *http.Response) (result GetApplicationPackageResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all of the applications in the specified account. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the Batch account. maxresults is the +// maximum number of items to return in the response. +func (client ApplicationOperationsClient) List(resourceGroupName string, accountName string, maxresults *int32) (result ListApplicationsResult, err error) { + req, err := client.ListPreparer(resourceGroupName, accountName, maxresults) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ApplicationOperationsClient) ListPreparer(resourceGroupName string, accountName string, maxresults *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if maxresults != nil { + queryParameters["maxresults"] = autorest.Encode("query", *maxresults) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ApplicationOperationsClient) ListResponder(resp *http.Response) (result ListApplicationsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ApplicationOperationsClient) ListNextResults(lastResults ListApplicationsResult) (result ListApplicationsResult, err error) { + req, err := lastResults.ListApplicationsResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// UpdateApplication updates settings for the specified application. +// +// resourceGroupName is the name of the resource group that contains the Batch +// account. accountName is the name of the Batch account. applicationID is +// the id of the application. parameters is the parameters for the request. +func (client ApplicationOperationsClient) UpdateApplication(resourceGroupName string, accountName string, applicationID string, parameters UpdateApplicationParameters) (result autorest.Response, err error) { + req, err := client.UpdateApplicationPreparer(resourceGroupName, accountName, applicationID, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "UpdateApplication", nil, "Failure preparing request") + } + + resp, err := client.UpdateApplicationSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "UpdateApplication", resp, "Failure sending request") + } + + result, err = client.UpdateApplicationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.ApplicationOperationsClient", "UpdateApplication", resp, "Failure responding to request") + } + + return +} + +// UpdateApplicationPreparer prepares the UpdateApplication request. +func (client ApplicationOperationsClient) UpdateApplicationPreparer(resourceGroupName string, accountName string, applicationID string, parameters UpdateApplicationParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "applicationId": autorest.Encode("path", applicationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateApplicationSender sends the UpdateApplication request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationOperationsClient) UpdateApplicationSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateApplicationResponder handles the response to the UpdateApplication request. The method always +// closes the http.Response Body. +func (client ApplicationOperationsClient) UpdateApplicationResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,56 @@ +// Package batch implements the Azure ARM Batch service API version 2015-12-01. +// +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Batch + APIVersion = "2015-12-01" + + // DefaultBaseURI is the default URI used for the service Batch + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Batch. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,256 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// AccountKeyType enumerates the values for account key type. +type AccountKeyType string + +const ( + // Primary specifies the primary state for account key type. + Primary AccountKeyType = "Primary" + // Secondary specifies the secondary state for account key type. + Secondary AccountKeyType = "Secondary" +) + +// AccountProvisioningState enumerates the values for account provisioning +// state. +type AccountProvisioningState string + +const ( + // Cancelled specifies the cancelled state for account provisioning state. + Cancelled AccountProvisioningState = "Cancelled" + // Creating specifies the creating state for account provisioning state. + Creating AccountProvisioningState = "Creating" + // Deleting specifies the deleting state for account provisioning state. + Deleting AccountProvisioningState = "Deleting" + // Failed specifies the failed state for account provisioning state. + Failed AccountProvisioningState = "Failed" + // Invalid specifies the invalid state for account provisioning state. + Invalid AccountProvisioningState = "Invalid" + // Succeeded specifies the succeeded state for account provisioning state. + Succeeded AccountProvisioningState = "Succeeded" +) + +// PackageState enumerates the values for package state. +type PackageState string + +const ( + // Active specifies the active state for package state. + Active PackageState = "active" + // Pending specifies the pending state for package state. + Pending PackageState = "pending" + // Unmapped specifies the unmapped state for package state. + Unmapped PackageState = "unmapped" +) + +// AccountBaseProperties is the properties of a Batch account. +type AccountBaseProperties struct { + AutoStorage *AutoStorageBaseProperties `json:"autoStorage,omitempty"` +} + +// AccountCreateParameters is parameters supplied to the Create operation. +type AccountCreateParameters struct { + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AccountBaseProperties `json:"properties,omitempty"` +} + +// AccountListKeyResult is values returned by the GetKeys operation. +type AccountListKeyResult struct { + autorest.Response `json:"-"` + Primary *string `json:"primary,omitempty"` + Secondary *string `json:"secondary,omitempty"` +} + +// AccountListResult is values returned by the List operation. +type AccountListResult struct { + autorest.Response `json:"-"` + Value *[]AccountResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// AccountListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client AccountListResult) AccountListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// AccountProperties is account specific properties. +type AccountProperties struct { + AccountEndpoint *string `json:"accountEndpoint,omitempty"` + ProvisioningState AccountProvisioningState `json:"provisioningState,omitempty"` + AutoStorage *AutoStorageProperties `json:"autoStorage,omitempty"` + CoreQuota *int32 `json:"coreQuota,omitempty"` + PoolQuota *int32 `json:"poolQuota,omitempty"` + ActiveJobAndJobScheduleQuota *int32 `json:"activeJobAndJobScheduleQuota,omitempty"` +} + +// AccountRegenerateKeyParameters is parameters supplied to the RegenerateKey +// operation. +type AccountRegenerateKeyParameters struct { + KeyName AccountKeyType `json:"keyName,omitempty"` +} + +// AccountRegenerateKeyResult is values returned by the RegenerateKey +// operation. +type AccountRegenerateKeyResult struct { + autorest.Response `json:"-"` + Primary *string `json:"primary,omitempty"` + Secondary *string `json:"secondary,omitempty"` +} + +// AccountResource is contains information about an Azure Batch account. +type AccountResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AccountProperties `json:"properties,omitempty"` +} + +// AccountUpdateParameters is parameters supplied to the Update operation. +type AccountUpdateParameters struct { + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AccountBaseProperties `json:"properties,omitempty"` +} + +// ActivateApplicationPackageParameters is parameters for an +// ApplicationOperations.ActivateApplicationPackage request. +type ActivateApplicationPackageParameters struct { + Format *string `json:"format,omitempty"` +} + +// AddApplicationPackageResult is response to an +// ApplicationOperations.AddApplicationPackage request. +type AddApplicationPackageResult struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Version *string `json:"version,omitempty"` + StorageURL *string `json:"storageUrl,omitempty"` + StorageURLExpiry *date.Time `json:"storageUrlExpiry,omitempty"` +} + +// AddApplicationParameters is parameters for an +// ApplicationOperations.AddApplication request. +type AddApplicationParameters struct { + AllowUpdates *bool `json:"allowUpdates,omitempty"` + DisplayName *string `json:"displayName,omitempty"` +} + +// Application is contains information about an application in a Batch account. +type Application struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + Packages *[]ApplicationPackage `json:"packages,omitempty"` + AllowUpdates *bool `json:"allowUpdates,omitempty"` + DefaultVersion *string `json:"defaultVersion,omitempty"` +} + +// ApplicationPackage is contains information about an application package. +type ApplicationPackage struct { + Version *string `json:"version,omitempty"` + State PackageState `json:"state,omitempty"` + Format *string `json:"format,omitempty"` + LastActivationTime *date.Time `json:"lastActivationTime,omitempty"` +} + +// AutoStorageBaseProperties is the properties related to auto storage account. +type AutoStorageBaseProperties struct { + StorageAccountID *string `json:"storageAccountId,omitempty"` +} + +// AutoStorageProperties is contains information about the auto storage +// account associated with a Batch account. +type AutoStorageProperties struct { + StorageAccountID *string `json:"storageAccountId,omitempty"` + LastKeySync *date.Time `json:"lastKeySync,omitempty"` +} + +// GetApplicationPackageResult is response to an +// ApplicationOperations.GetApplicationPackage request. +type GetApplicationPackageResult struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Version *string `json:"version,omitempty"` + State PackageState `json:"state,omitempty"` + Format *string `json:"format,omitempty"` + StorageURL *string `json:"storageUrl,omitempty"` + StorageURLExpiry *date.Time `json:"storageUrlExpiry,omitempty"` + LastActivationTime *date.Time `json:"lastActivationTime,omitempty"` +} + +// ListApplicationsResult is response to an +// ApplicationOperations.ListApplications request. +type ListApplicationsResult struct { + autorest.Response `json:"-"` + Value *[]Application `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ListApplicationsResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ListApplicationsResult) ListApplicationsResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// SubscriptionQuotasGetResult is values returned by the Get Subscription +// Quotas operation. +type SubscriptionQuotasGetResult struct { + autorest.Response `json:"-"` + AccountQuota *int32 `json:"accountQuota,omitempty"` +} + +// UpdateApplicationParameters is parameters for an +// ApplicationOperations.UpdateApplication request. +type UpdateApplicationParameters struct { + AllowUpdates *bool `json:"allowUpdates,omitempty"` + DefaultVersion *string `json:"defaultVersion,omitempty"` + DisplayName *string `json:"displayName,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/subscription.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/subscription.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/subscription.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/subscription.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,104 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// SubscriptionClient is the client for the Subscription methods of the Batch +// service. +type SubscriptionClient struct { + ManagementClient +} + +// NewSubscriptionClient creates an instance of the SubscriptionClient client. +func NewSubscriptionClient(subscriptionID string) SubscriptionClient { + return NewSubscriptionClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSubscriptionClientWithBaseURI creates an instance of the +// SubscriptionClient client. +func NewSubscriptionClientWithBaseURI(baseURI string, subscriptionID string) SubscriptionClient { + return SubscriptionClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetSubscriptionQuotas gets the Batch service quotas for the specified +// suscription. +// +// locationName is the desired region for the quotas. +func (client SubscriptionClient) GetSubscriptionQuotas(locationName string) (result SubscriptionQuotasGetResult, err error) { + req, err := client.GetSubscriptionQuotasPreparer(locationName) + if err != nil { + return result, autorest.NewErrorWithError(err, "batch.SubscriptionClient", "GetSubscriptionQuotas", nil, "Failure preparing request") + } + + resp, err := client.GetSubscriptionQuotasSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "batch.SubscriptionClient", "GetSubscriptionQuotas", resp, "Failure sending request") + } + + result, err = client.GetSubscriptionQuotasResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "batch.SubscriptionClient", "GetSubscriptionQuotas", resp, "Failure responding to request") + } + + return +} + +// GetSubscriptionQuotasPreparer prepares the GetSubscriptionQuotas request. +func (client SubscriptionClient) GetSubscriptionQuotasPreparer(locationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "locationName": autorest.Encode("path", locationName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Batch/locations/{locationName}/quotas", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSubscriptionQuotasSender sends the GetSubscriptionQuotas request. The method will close the +// http.Response Body if it receives an error. +func (client SubscriptionClient) GetSubscriptionQuotasSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetSubscriptionQuotasResponder handles the response to the GetSubscriptionQuotas request. The method always +// closes the http.Response Body. +func (client SubscriptionClient) GetSubscriptionQuotasResponder(resp *http.Response) (result SubscriptionQuotasGetResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/batch/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/batch/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package batch + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "batch", "2015-12-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,60 @@ +// Package cdn implements the Azure ARM Cdn service API version 2016-04-02. +// +// Use these APIs to manage Azure CDN resources through the Azure Resource +// Manager. You must make sure that requests made to these resources are +// secure. For more information, see Authenticating Azure Resource Manager +// requests (https://msdn.microsoft.com/en-us/library/azure/dn790557.aspx). +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Cdn + APIVersion = "2016-04-02" + + // DefaultBaseURI is the default URI used for the service Cdn + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Cdn. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,392 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// CustomDomainsClient is the use these APIs to manage Azure CDN resources +// through the Azure Resource Manager. You must make sure that requests made +// to these resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type CustomDomainsClient struct { + ManagementClient +} + +// NewCustomDomainsClient creates an instance of the CustomDomainsClient +// client. +func NewCustomDomainsClient(subscriptionID string) CustomDomainsClient { + return NewCustomDomainsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCustomDomainsClientWithBaseURI creates an instance of the +// CustomDomainsClient client. +func NewCustomDomainsClientWithBaseURI(baseURI string, subscriptionID string) CustomDomainsClient { + return CustomDomainsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create sends the create request. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// customDomainName is name of the custom domain within an endpoint. +// customDomainProperties is custom domain properties required for creation. +// endpointName is name of the endpoint within the CDN profile. profileName +// is name of the CDN profile within the resource group. resourceGroupName is +// name of the resource group within the Azure subscription. +func (client CustomDomainsClient) Create(customDomainName string, customDomainProperties CustomDomainParameters, endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreatePreparer(customDomainName, customDomainProperties, endpointName, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client CustomDomainsClient) CreatePreparer(customDomainName string, customDomainProperties CustomDomainParameters, endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "customDomainName": autorest.Encode("path", customDomainName), + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}", pathParameters), + autorest.WithJSON(customDomainProperties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client CustomDomainsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client CustomDomainsClient) CreateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteIfExists sends the delete if exists request. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// customDomainName is name of the custom domain within an endpoint. +// endpointName is name of the endpoint within the CDN profile. profileName +// is name of the CDN profile within the resource group. resourceGroupName is +// name of the resource group within the Azure subscription. +func (client CustomDomainsClient) DeleteIfExists(customDomainName string, endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeleteIfExistsPreparer(customDomainName, endpointName, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "DeleteIfExists", nil, "Failure preparing request") + } + + resp, err := client.DeleteIfExistsSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "DeleteIfExists", resp, "Failure sending request") + } + + result, err = client.DeleteIfExistsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "DeleteIfExists", resp, "Failure responding to request") + } + + return +} + +// DeleteIfExistsPreparer prepares the DeleteIfExists request. +func (client CustomDomainsClient) DeleteIfExistsPreparer(customDomainName string, endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "customDomainName": autorest.Encode("path", customDomainName), + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteIfExistsSender sends the DeleteIfExists request. The method will close the +// http.Response Body if it receives an error. +func (client CustomDomainsClient) DeleteIfExistsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteIfExistsResponder handles the response to the DeleteIfExists request. The method always +// closes the http.Response Body. +func (client CustomDomainsClient) DeleteIfExistsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get sends the get request. +// +// customDomainName is name of the custom domain within an endpoint. +// endpointName is name of the endpoint within the CDN profile. profileName +// is name of the CDN profile within the resource group. resourceGroupName is +// name of the resource group within the Azure subscription. +func (client CustomDomainsClient) Get(customDomainName string, endpointName string, profileName string, resourceGroupName string) (result CustomDomain, err error) { + req, err := client.GetPreparer(customDomainName, endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client CustomDomainsClient) GetPreparer(customDomainName string, endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "customDomainName": autorest.Encode("path", customDomainName), + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CustomDomainsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CustomDomainsClient) GetResponder(resp *http.Response) (result CustomDomain, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByEndpoint sends the list by endpoint request. +// +// endpointName is name of the endpoint within the CDN profile. profileName is +// name of the CDN profile within the resource group. resourceGroupName is +// name of the resource group within the Azure subscription. +func (client CustomDomainsClient) ListByEndpoint(endpointName string, profileName string, resourceGroupName string) (result CustomDomainListResult, err error) { + req, err := client.ListByEndpointPreparer(endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "ListByEndpoint", nil, "Failure preparing request") + } + + resp, err := client.ListByEndpointSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "ListByEndpoint", resp, "Failure sending request") + } + + result, err = client.ListByEndpointResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "ListByEndpoint", resp, "Failure responding to request") + } + + return +} + +// ListByEndpointPreparer prepares the ListByEndpoint request. +func (client CustomDomainsClient) ListByEndpointPreparer(endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByEndpointSender sends the ListByEndpoint request. The method will close the +// http.Response Body if it receives an error. +func (client CustomDomainsClient) ListByEndpointSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByEndpointResponder handles the response to the ListByEndpoint request. The method always +// closes the http.Response Body. +func (client CustomDomainsClient) ListByEndpointResponder(resp *http.Response) (result CustomDomainListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update sends the update request. +// +// customDomainName is name of the custom domain within an endpoint. +// customDomainProperties is custom domain properties to update. endpointName +// is name of the endpoint within the CDN profile. profileName is name of the +// CDN profile within the resource group. resourceGroupName is name of the +// resource group within the Azure subscription. +func (client CustomDomainsClient) Update(customDomainName string, customDomainProperties CustomDomainParameters, endpointName string, profileName string, resourceGroupName string) (result ErrorResponse, err error) { + req, err := client.UpdatePreparer(customDomainName, customDomainProperties, endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.CustomDomainsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client CustomDomainsClient) UpdatePreparer(customDomainName string, customDomainProperties CustomDomainParameters, endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "customDomainName": autorest.Encode("path", customDomainName), + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/customDomains/{customDomainName}", pathParameters), + autorest.WithJSON(customDomainProperties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client CustomDomainsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client CustomDomainsClient) UpdateResponder(resp *http.Response) (result ErrorResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,735 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// EndpointsClient is the use these APIs to manage Azure CDN resources through +// the Azure Resource Manager. You must make sure that requests made to these +// resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type EndpointsClient struct { + ManagementClient +} + +// NewEndpointsClient creates an instance of the EndpointsClient client. +func NewEndpointsClient(subscriptionID string) EndpointsClient { + return NewEndpointsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewEndpointsClientWithBaseURI creates an instance of the EndpointsClient +// client. +func NewEndpointsClientWithBaseURI(baseURI string, subscriptionID string) EndpointsClient { + return EndpointsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create sends the create request. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// endpointName is name of the endpoint within the CDN profile. +// endpointProperties is endpoint properties profileName is name of the CDN +// profile within the resource group. resourceGroupName is name of the +// resource group within the Azure subscription. +func (client EndpointsClient) Create(endpointName string, endpointProperties EndpointCreateParameters, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreatePreparer(endpointName, endpointProperties, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client EndpointsClient) CreatePreparer(endpointName string, endpointProperties EndpointCreateParameters, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}", pathParameters), + autorest.WithJSON(endpointProperties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client EndpointsClient) CreateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteIfExists sends the delete if exists request. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// endpointName is name of the endpoint within the CDN profile. profileName is +// name of the CDN profile within the resource group. resourceGroupName is +// name of the resource group within the Azure subscription. +func (client EndpointsClient) DeleteIfExists(endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeleteIfExistsPreparer(endpointName, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "DeleteIfExists", nil, "Failure preparing request") + } + + resp, err := client.DeleteIfExistsSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "DeleteIfExists", resp, "Failure sending request") + } + + result, err = client.DeleteIfExistsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.EndpointsClient", "DeleteIfExists", resp, "Failure responding to request") + } + + return +} + +// DeleteIfExistsPreparer prepares the DeleteIfExists request. +func (client EndpointsClient) DeleteIfExistsPreparer(endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteIfExistsSender sends the DeleteIfExists request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) DeleteIfExistsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteIfExistsResponder handles the response to the DeleteIfExists request. The method always +// closes the http.Response Body. +func (client EndpointsClient) DeleteIfExistsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get sends the get request. +// +// endpointName is name of the endpoint within the CDN profile. profileName is +// name of the CDN profile within the resource group. resourceGroupName is +// name of the resource group within the Azure subscription. +func (client EndpointsClient) Get(endpointName string, profileName string, resourceGroupName string) (result Endpoint, err error) { + req, err := client.GetPreparer(endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client EndpointsClient) GetPreparer(endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client EndpointsClient) GetResponder(resp *http.Response) (result Endpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByProfile sends the list by profile request. +// +// profileName is name of the CDN profile within the resource group. +// resourceGroupName is name of the resource group within the Azure +// subscription. +func (client EndpointsClient) ListByProfile(profileName string, resourceGroupName string) (result EndpointListResult, err error) { + req, err := client.ListByProfilePreparer(profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "ListByProfile", nil, "Failure preparing request") + } + + resp, err := client.ListByProfileSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "ListByProfile", resp, "Failure sending request") + } + + result, err = client.ListByProfileResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.EndpointsClient", "ListByProfile", resp, "Failure responding to request") + } + + return +} + +// ListByProfilePreparer prepares the ListByProfile request. +func (client EndpointsClient) ListByProfilePreparer(profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByProfileSender sends the ListByProfile request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) ListByProfileSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByProfileResponder handles the response to the ListByProfile request. The method always +// closes the http.Response Body. +func (client EndpointsClient) ListByProfileResponder(resp *http.Response) (result EndpointListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// LoadContent sends the load content request. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// endpointName is name of the endpoint within the CDN profile. +// contentFilePaths is the path to the content to be loaded. Path should +// describe a file. profileName is name of the CDN profile within the +// resource group. resourceGroupName is name of the resource group within the +// Azure subscription. +func (client EndpointsClient) LoadContent(endpointName string, contentFilePaths LoadParameters, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.LoadContentPreparer(endpointName, contentFilePaths, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "LoadContent", nil, "Failure preparing request") + } + + resp, err := client.LoadContentSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "LoadContent", resp, "Failure sending request") + } + + result, err = client.LoadContentResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.EndpointsClient", "LoadContent", resp, "Failure responding to request") + } + + return +} + +// LoadContentPreparer prepares the LoadContent request. +func (client EndpointsClient) LoadContentPreparer(endpointName string, contentFilePaths LoadParameters, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/load", pathParameters), + autorest.WithJSON(contentFilePaths), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// LoadContentSender sends the LoadContent request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) LoadContentSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// LoadContentResponder handles the response to the LoadContent request. The method always +// closes the http.Response Body. +func (client EndpointsClient) LoadContentResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// PurgeContent sends the purge content request. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// endpointName is name of the endpoint within the CDN profile. +// contentFilePaths is the path to the content to be purged. Path can +// describe a file or directory. profileName is name of the CDN profile +// within the resource group. resourceGroupName is name of the resource group +// within the Azure subscription. +func (client EndpointsClient) PurgeContent(endpointName string, contentFilePaths PurgeParameters, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.PurgeContentPreparer(endpointName, contentFilePaths, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "PurgeContent", nil, "Failure preparing request") + } + + resp, err := client.PurgeContentSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "PurgeContent", resp, "Failure sending request") + } + + result, err = client.PurgeContentResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.EndpointsClient", "PurgeContent", resp, "Failure responding to request") + } + + return +} + +// PurgeContentPreparer prepares the PurgeContent request. +func (client EndpointsClient) PurgeContentPreparer(endpointName string, contentFilePaths PurgeParameters, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/purge", pathParameters), + autorest.WithJSON(contentFilePaths), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// PurgeContentSender sends the PurgeContent request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) PurgeContentSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// PurgeContentResponder handles the response to the PurgeContent request. The method always +// closes the http.Response Body. +func (client EndpointsClient) PurgeContentResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start sends the start request. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. +// +// endpointName is name of the endpoint within the CDN profile. profileName is +// name of the CDN profile within the resource group. resourceGroupName is +// name of the resource group within the Azure subscription. +func (client EndpointsClient) Start(endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StartPreparer(endpointName, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Start", nil, "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Start", resp, "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Start", resp, "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client EndpointsClient) StartPreparer(endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) StartSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client EndpointsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Stop sends the stop request. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. +// +// endpointName is name of the endpoint within the CDN profile. profileName is +// name of the CDN profile within the resource group. resourceGroupName is +// name of the resource group within the Azure subscription. +func (client EndpointsClient) Stop(endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StopPreparer(endpointName, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Stop", nil, "Failure preparing request") + } + + resp, err := client.StopSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Stop", resp, "Failure sending request") + } + + result, err = client.StopResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Stop", resp, "Failure responding to request") + } + + return +} + +// StopPreparer prepares the Stop request. +func (client EndpointsClient) StopPreparer(endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/stop", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// StopSender sends the Stop request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) StopSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// StopResponder handles the response to the Stop request. The method always +// closes the http.Response Body. +func (client EndpointsClient) StopResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update sends the update request. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// endpointName is name of the endpoint within the CDN profile. +// endpointProperties is endpoint properties profileName is name of the CDN +// profile within the resource group. resourceGroupName is name of the +// resource group within the Azure subscription. +func (client EndpointsClient) Update(endpointName string, endpointProperties EndpointUpdateParameters, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.UpdatePreparer(endpointName, endpointProperties, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.EndpointsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client EndpointsClient) UpdatePreparer(endpointName string, endpointProperties EndpointUpdateParameters, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}", pathParameters), + autorest.WithJSON(endpointProperties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client EndpointsClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// ValidateCustomDomain sends the validate custom domain request. +// +// endpointName is name of the endpoint within the CDN profile. +// customDomainProperties is custom domain to validate. profileName is name +// of the CDN profile within the resource group. resourceGroupName is name of +// the resource group within the Azure subscription. +func (client EndpointsClient) ValidateCustomDomain(endpointName string, customDomainProperties ValidateCustomDomainInput, profileName string, resourceGroupName string) (result ValidateCustomDomainOutput, err error) { + req, err := client.ValidateCustomDomainPreparer(endpointName, customDomainProperties, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "ValidateCustomDomain", nil, "Failure preparing request") + } + + resp, err := client.ValidateCustomDomainSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.EndpointsClient", "ValidateCustomDomain", resp, "Failure sending request") + } + + result, err = client.ValidateCustomDomainResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.EndpointsClient", "ValidateCustomDomain", resp, "Failure responding to request") + } + + return +} + +// ValidateCustomDomainPreparer prepares the ValidateCustomDomain request. +func (client EndpointsClient) ValidateCustomDomainPreparer(endpointName string, customDomainProperties ValidateCustomDomainInput, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/validateCustomDomain", pathParameters), + autorest.WithJSON(customDomainProperties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ValidateCustomDomainSender sends the ValidateCustomDomain request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) ValidateCustomDomainSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ValidateCustomDomainResponder handles the response to the ValidateCustomDomain request. The method always +// closes the http.Response Body. +func (client EndpointsClient) ValidateCustomDomainResponder(resp *http.Response) (result ValidateCustomDomainOutput, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,439 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// CustomDomainResourceState enumerates the values for custom domain resource +// state. +type CustomDomainResourceState string + +const ( + // Active specifies the active state for custom domain resource state. + Active CustomDomainResourceState = "Active" + // Creating specifies the creating state for custom domain resource state. + Creating CustomDomainResourceState = "Creating" + // Deleting specifies the deleting state for custom domain resource state. + Deleting CustomDomainResourceState = "Deleting" +) + +// EndpointResourceState enumerates the values for endpoint resource state. +type EndpointResourceState string + +const ( + // EndpointResourceStateCreating specifies the endpoint resource state + // creating state for endpoint resource state. + EndpointResourceStateCreating EndpointResourceState = "Creating" + // EndpointResourceStateDeleting specifies the endpoint resource state + // deleting state for endpoint resource state. + EndpointResourceStateDeleting EndpointResourceState = "Deleting" + // EndpointResourceStateRunning specifies the endpoint resource state + // running state for endpoint resource state. + EndpointResourceStateRunning EndpointResourceState = "Running" + // EndpointResourceStateStarting specifies the endpoint resource state + // starting state for endpoint resource state. + EndpointResourceStateStarting EndpointResourceState = "Starting" + // EndpointResourceStateStopped specifies the endpoint resource state + // stopped state for endpoint resource state. + EndpointResourceStateStopped EndpointResourceState = "Stopped" + // EndpointResourceStateStopping specifies the endpoint resource state + // stopping state for endpoint resource state. + EndpointResourceStateStopping EndpointResourceState = "Stopping" +) + +// OriginResourceState enumerates the values for origin resource state. +type OriginResourceState string + +const ( + // OriginResourceStateActive specifies the origin resource state active + // state for origin resource state. + OriginResourceStateActive OriginResourceState = "Active" + // OriginResourceStateCreating specifies the origin resource state + // creating state for origin resource state. + OriginResourceStateCreating OriginResourceState = "Creating" + // OriginResourceStateDeleting specifies the origin resource state + // deleting state for origin resource state. + OriginResourceStateDeleting OriginResourceState = "Deleting" +) + +// ProfileResourceState enumerates the values for profile resource state. +type ProfileResourceState string + +const ( + // ProfileResourceStateActive specifies the profile resource state active + // state for profile resource state. + ProfileResourceStateActive ProfileResourceState = "Active" + // ProfileResourceStateCreating specifies the profile resource state + // creating state for profile resource state. + ProfileResourceStateCreating ProfileResourceState = "Creating" + // ProfileResourceStateDeleting specifies the profile resource state + // deleting state for profile resource state. + ProfileResourceStateDeleting ProfileResourceState = "Deleting" + // ProfileResourceStateDisabled specifies the profile resource state + // disabled state for profile resource state. + ProfileResourceStateDisabled ProfileResourceState = "Disabled" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // ProvisioningStateCreating specifies the provisioning state creating + // state for provisioning state. + ProvisioningStateCreating ProvisioningState = "Creating" + // ProvisioningStateFailed specifies the provisioning state failed state + // for provisioning state. + ProvisioningStateFailed ProvisioningState = "Failed" + // ProvisioningStateSucceeded specifies the provisioning state succeeded + // state for provisioning state. + ProvisioningStateSucceeded ProvisioningState = "Succeeded" +) + +// QueryStringCachingBehavior enumerates the values for query string caching +// behavior. +type QueryStringCachingBehavior string + +const ( + // BypassCaching specifies the bypass caching state for query string + // caching behavior. + BypassCaching QueryStringCachingBehavior = "BypassCaching" + // IgnoreQueryString specifies the ignore query string state for query + // string caching behavior. + IgnoreQueryString QueryStringCachingBehavior = "IgnoreQueryString" + // NotSet specifies the not set state for query string caching behavior. + NotSet QueryStringCachingBehavior = "NotSet" + // UseQueryString specifies the use query string state for query string + // caching behavior. + UseQueryString QueryStringCachingBehavior = "UseQueryString" +) + +// ResourceType enumerates the values for resource type. +type ResourceType string + +const ( + // MicrosoftCdnProfilesEndpoints specifies the microsoft cdn profiles + // endpoints state for resource type. + MicrosoftCdnProfilesEndpoints ResourceType = "Microsoft.Cdn/Profiles/Endpoints" +) + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // CustomVerizon specifies the custom verizon state for sku name. + CustomVerizon SkuName = "Custom_Verizon" + // PremiumVerizon specifies the premium verizon state for sku name. + PremiumVerizon SkuName = "Premium_Verizon" + // StandardAkamai specifies the standard akamai state for sku name. + StandardAkamai SkuName = "Standard_Akamai" + // StandardVerizon specifies the standard verizon state for sku name. + StandardVerizon SkuName = "Standard_Verizon" +) + +// CheckNameAvailabilityInput is input of CheckNameAvailability API. +type CheckNameAvailabilityInput struct { + Name *string `json:"name,omitempty"` + Type ResourceType `json:"type,omitempty"` +} + +// CheckNameAvailabilityOutput is output of check name availability API. +type CheckNameAvailabilityOutput struct { + autorest.Response `json:"-"` + NameAvailable *bool `json:"NameAvailable,omitempty"` + Reason *string `json:"Reason,omitempty"` + Message *string `json:"Message,omitempty"` +} + +// CustomDomain is cDN CustomDomain represents a mapping between a user +// specified domain name and a CDN endpoint. This is to use custom domain +// names to represent the URLs for branding purposes. +type CustomDomain struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties *CustomDomainProperties `json:"properties,omitempty"` +} + +// CustomDomainListResult is +type CustomDomainListResult struct { + autorest.Response `json:"-"` + Value *[]CustomDomain `json:"value,omitempty"` +} + +// CustomDomainParameters is customDomain properties required for custom +// domain creation or update. +type CustomDomainParameters struct { + Properties *CustomDomainPropertiesParameters `json:"properties,omitempty"` +} + +// CustomDomainProperties is +type CustomDomainProperties struct { + HostName *string `json:"hostName,omitempty"` + ResourceState CustomDomainResourceState `json:"resourceState,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// CustomDomainPropertiesParameters is +type CustomDomainPropertiesParameters struct { + HostName *string `json:"hostName,omitempty"` +} + +// DeepCreatedOrigin is deep created origins within a CDN endpoint. +type DeepCreatedOrigin struct { + Name *string `json:"name,omitempty"` + Properties *DeepCreatedOriginProperties `json:"properties,omitempty"` +} + +// DeepCreatedOriginProperties is properties of deep created origin on a CDN +// endpoint. +type DeepCreatedOriginProperties struct { + HostName *string `json:"hostName,omitempty"` + HTTPPort *int32 `json:"httpPort,omitempty"` + HTTPSPort *int32 `json:"httpsPort,omitempty"` +} + +// Endpoint is cDN endpoint is the entity within a CDN profile containing +// configuration information regarding caching behaviors and origins. The CDN +// endpoint is exposed using the URL format .azureedge.net by +// default, but custom domains can also be created. +type Endpoint struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *EndpointProperties `json:"properties,omitempty"` +} + +// EndpointCreateParameters is endpoint properties required for new endpoint +// creation. +type EndpointCreateParameters struct { + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *EndpointPropertiesCreateParameters `json:"properties,omitempty"` +} + +// EndpointListResult is +type EndpointListResult struct { + autorest.Response `json:"-"` + Value *[]Endpoint `json:"value,omitempty"` +} + +// EndpointProperties is +type EndpointProperties struct { + HostName *string `json:"hostName,omitempty"` + OriginHostHeader *string `json:"originHostHeader,omitempty"` + OriginPath *string `json:"originPath,omitempty"` + ContentTypesToCompress *[]string `json:"contentTypesToCompress,omitempty"` + IsCompressionEnabled *bool `json:"isCompressionEnabled,omitempty"` + IsHTTPAllowed *bool `json:"isHttpAllowed,omitempty"` + IsHTTPSAllowed *bool `json:"isHttpsAllowed,omitempty"` + QueryStringCachingBehavior QueryStringCachingBehavior `json:"queryStringCachingBehavior,omitempty"` + Origins *[]DeepCreatedOrigin `json:"origins,omitempty"` + ResourceState EndpointResourceState `json:"resourceState,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// EndpointPropertiesCreateParameters is +type EndpointPropertiesCreateParameters struct { + OriginHostHeader *string `json:"originHostHeader,omitempty"` + OriginPath *string `json:"originPath,omitempty"` + ContentTypesToCompress *[]string `json:"contentTypesToCompress,omitempty"` + IsCompressionEnabled *bool `json:"isCompressionEnabled,omitempty"` + IsHTTPAllowed *bool `json:"isHttpAllowed,omitempty"` + IsHTTPSAllowed *bool `json:"isHttpsAllowed,omitempty"` + QueryStringCachingBehavior QueryStringCachingBehavior `json:"queryStringCachingBehavior,omitempty"` + Origins *[]DeepCreatedOrigin `json:"origins,omitempty"` +} + +// EndpointPropertiesUpdateParameters is +type EndpointPropertiesUpdateParameters struct { + OriginHostHeader *string `json:"originHostHeader,omitempty"` + OriginPath *string `json:"originPath,omitempty"` + ContentTypesToCompress *[]string `json:"contentTypesToCompress,omitempty"` + IsCompressionEnabled *bool `json:"isCompressionEnabled,omitempty"` + IsHTTPAllowed *bool `json:"isHttpAllowed,omitempty"` + IsHTTPSAllowed *bool `json:"isHttpsAllowed,omitempty"` + QueryStringCachingBehavior QueryStringCachingBehavior `json:"queryStringCachingBehavior,omitempty"` +} + +// EndpointUpdateParameters is endpoint properties required for new endpoint +// creation. +type EndpointUpdateParameters struct { + Tags *map[string]*string `json:"tags,omitempty"` + Properties *EndpointPropertiesUpdateParameters `json:"properties,omitempty"` +} + +// ErrorResponse is +type ErrorResponse struct { + autorest.Response `json:"-"` + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` +} + +// LoadParameters is parameters required for endpoint load. +type LoadParameters struct { + ContentPaths *[]string `json:"contentPaths,omitempty"` +} + +// Operation is cDN REST API operation +type Operation struct { + Name *string `json:"name,omitempty"` + Display *OperationDisplay `json:"display,omitempty"` +} + +// OperationDisplay is +type OperationDisplay struct { + Provider *string `json:"provider,omitempty"` + Resource *string `json:"resource,omitempty"` + Operation *string `json:"operation,omitempty"` +} + +// OperationListResult is +type OperationListResult struct { + autorest.Response `json:"-"` + Value *[]Operation `json:"value,omitempty"` +} + +// Origin is cDN origin is the source of the content being delivered via CDN. +// When the edge nodes represented by an endpoint do not have the requested +// content cached, they attempt to fetch it from one or more of the +// configured origins. +type Origin struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties *OriginProperties `json:"properties,omitempty"` +} + +// OriginListResult is +type OriginListResult struct { + autorest.Response `json:"-"` + Value *[]Origin `json:"value,omitempty"` +} + +// OriginParameters is origin properties needed for origin creation or update. +type OriginParameters struct { + Properties *OriginPropertiesParameters `json:"properties,omitempty"` +} + +// OriginProperties is +type OriginProperties struct { + HostName *string `json:"hostName,omitempty"` + HTTPPort *int32 `json:"httpPort,omitempty"` + HTTPSPort *int32 `json:"httpsPort,omitempty"` + ResourceState OriginResourceState `json:"resourceState,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// OriginPropertiesParameters is +type OriginPropertiesParameters struct { + HostName *string `json:"hostName,omitempty"` + HTTPPort *int32 `json:"httpPort,omitempty"` + HTTPSPort *int32 `json:"httpsPort,omitempty"` +} + +// Profile is cDN profile represents the top level resource and the entry +// point into the CDN API. This allows users to set up a logical grouping of +// endpoints in addition to creating shared configuration settings and +// selecting pricing tiers and providers. +type Profile struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Properties *ProfileProperties `json:"properties,omitempty"` +} + +// ProfileCreateParameters is profile properties required for profile creation. +type ProfileCreateParameters struct { + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Sku *Sku `json:"sku,omitempty"` +} + +// ProfileListResult is +type ProfileListResult struct { + autorest.Response `json:"-"` + Value *[]Profile `json:"value,omitempty"` +} + +// ProfileProperties is +type ProfileProperties struct { + ResourceState ProfileResourceState `json:"resourceState,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// ProfileUpdateParameters is profile properties required for profile update. +type ProfileUpdateParameters struct { + Tags *map[string]*string `json:"tags,omitempty"` +} + +// PurgeParameters is parameters required for endpoint purge. +type PurgeParameters struct { + ContentPaths *[]string `json:"contentPaths,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// Sku is the SKU (pricing tier) of the CDN profile. +type Sku struct { + Name SkuName `json:"name,omitempty"` +} + +// SsoURI is sSO URI required to login to third party web portal. +type SsoURI struct { + autorest.Response `json:"-"` + SsoURIValue *string `json:"ssoUriValue,omitempty"` +} + +// TrackedResource is aRM tracked resource +type TrackedResource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ValidateCustomDomainInput is input of the custom domain to be validated. +type ValidateCustomDomainInput struct { + HostName *string `json:"hostName,omitempty"` +} + +// ValidateCustomDomainOutput is output of custom domain validation. +type ValidateCustomDomainOutput struct { + autorest.Response `json:"-"` + CustomDomainValidated *bool `json:"customDomainValidated,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,104 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// NameAvailabilityClient is the use these APIs to manage Azure CDN resources +// through the Azure Resource Manager. You must make sure that requests made +// to these resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type NameAvailabilityClient struct { + ManagementClient +} + +// NewNameAvailabilityClient creates an instance of the NameAvailabilityClient +// client. +func NewNameAvailabilityClient(subscriptionID string) NameAvailabilityClient { + return NewNameAvailabilityClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewNameAvailabilityClientWithBaseURI creates an instance of the +// NameAvailabilityClient client. +func NewNameAvailabilityClientWithBaseURI(baseURI string, subscriptionID string) NameAvailabilityClient { + return NameAvailabilityClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability sends the check name availability request. +// +// checkNameAvailabilityInput is input to check. +func (client NameAvailabilityClient) CheckNameAvailability(checkNameAvailabilityInput CheckNameAvailabilityInput) (result CheckNameAvailabilityOutput, err error) { + req, err := client.CheckNameAvailabilityPreparer(checkNameAvailabilityInput) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.NameAvailabilityClient", "CheckNameAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.NameAvailabilityClient", "CheckNameAvailability", resp, "Failure sending request") + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.NameAvailabilityClient", "CheckNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client NameAvailabilityClient) CheckNameAvailabilityPreparer(checkNameAvailabilityInput CheckNameAvailabilityInput) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Cdn/checkNameAvailability"), + autorest.WithJSON(checkNameAvailabilityInput), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client NameAvailabilityClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client NameAvailabilityClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityOutput, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,99 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// OperationsClient is the use these APIs to manage Azure CDN resources +// through the Azure Resource Manager. You must make sure that requests made +// to these resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type OperationsClient struct { + ManagementClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient +// client. +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List sends the list request. +func (client OperationsClient) List() (result OperationListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.OperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.OperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer() (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Cdn/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,394 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// OriginsClient is the use these APIs to manage Azure CDN resources through +// the Azure Resource Manager. You must make sure that requests made to these +// resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type OriginsClient struct { + ManagementClient +} + +// NewOriginsClient creates an instance of the OriginsClient client. +func NewOriginsClient(subscriptionID string) OriginsClient { + return NewOriginsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOriginsClientWithBaseURI creates an instance of the OriginsClient client. +func NewOriginsClientWithBaseURI(baseURI string, subscriptionID string) OriginsClient { + return OriginsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create sends the create request. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// originName is name of the origin, an arbitrary value but it needs to be +// unique under endpoint originProperties is origin properties endpointName +// is name of the endpoint within the CDN profile. profileName is name of the +// CDN profile within the resource group. resourceGroupName is name of the +// resource group within the Azure subscription. +func (client OriginsClient) Create(originName string, originProperties OriginParameters, endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreatePreparer(originName, originProperties, endpointName, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.OriginsClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.OriginsClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.OriginsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client OriginsClient) CreatePreparer(originName string, originProperties OriginParameters, endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "originName": autorest.Encode("path", originName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/origins/{originName}", pathParameters), + autorest.WithJSON(originProperties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client OriginsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client OriginsClient) CreateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteIfExists sends the delete if exists request. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// originName is name of the origin. Must be unique within endpoint. +// endpointName is name of the endpoint within the CDN profile. profileName +// is name of the CDN profile within the resource group. resourceGroupName is +// name of the resource group within the Azure subscription. +func (client OriginsClient) DeleteIfExists(originName string, endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeleteIfExistsPreparer(originName, endpointName, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.OriginsClient", "DeleteIfExists", nil, "Failure preparing request") + } + + resp, err := client.DeleteIfExistsSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.OriginsClient", "DeleteIfExists", resp, "Failure sending request") + } + + result, err = client.DeleteIfExistsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.OriginsClient", "DeleteIfExists", resp, "Failure responding to request") + } + + return +} + +// DeleteIfExistsPreparer prepares the DeleteIfExists request. +func (client OriginsClient) DeleteIfExistsPreparer(originName string, endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "originName": autorest.Encode("path", originName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/origins/{originName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteIfExistsSender sends the DeleteIfExists request. The method will close the +// http.Response Body if it receives an error. +func (client OriginsClient) DeleteIfExistsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteIfExistsResponder handles the response to the DeleteIfExists request. The method always +// closes the http.Response Body. +func (client OriginsClient) DeleteIfExistsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get sends the get request. +// +// originName is name of the origin, an arbitrary value but it needs to be +// unique under endpoint endpointName is name of the endpoint within the CDN +// profile. profileName is name of the CDN profile within the resource group. +// resourceGroupName is name of the resource group within the Azure +// subscription. +func (client OriginsClient) Get(originName string, endpointName string, profileName string, resourceGroupName string) (result Origin, err error) { + req, err := client.GetPreparer(originName, endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.OriginsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.OriginsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.OriginsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client OriginsClient) GetPreparer(originName string, endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "originName": autorest.Encode("path", originName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/origins/{originName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client OriginsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client OriginsClient) GetResponder(resp *http.Response) (result Origin, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByEndpoint sends the list by endpoint request. +// +// endpointName is name of the endpoint within the CDN profile. profileName is +// name of the CDN profile within the resource group. resourceGroupName is +// name of the resource group within the Azure subscription. +func (client OriginsClient) ListByEndpoint(endpointName string, profileName string, resourceGroupName string) (result OriginListResult, err error) { + req, err := client.ListByEndpointPreparer(endpointName, profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.OriginsClient", "ListByEndpoint", nil, "Failure preparing request") + } + + resp, err := client.ListByEndpointSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.OriginsClient", "ListByEndpoint", resp, "Failure sending request") + } + + result, err = client.ListByEndpointResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.OriginsClient", "ListByEndpoint", resp, "Failure responding to request") + } + + return +} + +// ListByEndpointPreparer prepares the ListByEndpoint request. +func (client OriginsClient) ListByEndpointPreparer(endpointName string, profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/origins", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByEndpointSender sends the ListByEndpoint request. The method will close the +// http.Response Body if it receives an error. +func (client OriginsClient) ListByEndpointSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByEndpointResponder handles the response to the ListByEndpoint request. The method always +// closes the http.Response Body. +func (client OriginsClient) ListByEndpointResponder(resp *http.Response) (result OriginListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update sends the update request. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// originName is name of the origin. Must be unique within endpoint. +// originProperties is origin properties endpointName is name of the endpoint +// within the CDN profile. profileName is name of the CDN profile within the +// resource group. resourceGroupName is name of the resource group within the +// Azure subscription. +func (client OriginsClient) Update(originName string, originProperties OriginParameters, endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.UpdatePreparer(originName, originProperties, endpointName, profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.OriginsClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.OriginsClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.OriginsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client OriginsClient) UpdatePreparer(originName string, originProperties OriginParameters, endpointName string, profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "originName": autorest.Encode("path", originName), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/origins/{originName}", pathParameters), + autorest.WithJSON(originProperties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client OriginsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client OriginsClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,501 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ProfilesClient is the use these APIs to manage Azure CDN resources through +// the Azure Resource Manager. You must make sure that requests made to these +// resources are secure. For more information, see Authenticating +// Azure Resource Manager requests. +type ProfilesClient struct { + ManagementClient +} + +// NewProfilesClient creates an instance of the ProfilesClient client. +func NewProfilesClient(subscriptionID string) ProfilesClient { + return NewProfilesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProfilesClientWithBaseURI creates an instance of the ProfilesClient +// client. +func NewProfilesClientWithBaseURI(baseURI string, subscriptionID string) ProfilesClient { + return ProfilesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create sends the create request. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// profileName is name of the CDN profile within the resource group. +// profileProperties is profile properties needed for creation. +// resourceGroupName is name of the resource group within the Azure +// subscription. +func (client ProfilesClient) Create(profileName string, profileProperties ProfileCreateParameters, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreatePreparer(profileName, profileProperties, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.ProfilesClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client ProfilesClient) CreatePreparer(profileName string, profileProperties ProfileCreateParameters, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}", pathParameters), + autorest.WithJSON(profileProperties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client ProfilesClient) CreateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteIfExists sends the delete if exists request. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// profileName is name of the CDN profile within the resource group. +// resourceGroupName is name of the resource group within the Azure +// subscription. +func (client ProfilesClient) DeleteIfExists(profileName string, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeleteIfExistsPreparer(profileName, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "DeleteIfExists", nil, "Failure preparing request") + } + + resp, err := client.DeleteIfExistsSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "DeleteIfExists", resp, "Failure sending request") + } + + result, err = client.DeleteIfExistsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.ProfilesClient", "DeleteIfExists", resp, "Failure responding to request") + } + + return +} + +// DeleteIfExistsPreparer prepares the DeleteIfExists request. +func (client ProfilesClient) DeleteIfExistsPreparer(profileName string, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteIfExistsSender sends the DeleteIfExists request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) DeleteIfExistsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteIfExistsResponder handles the response to the DeleteIfExists request. The method always +// closes the http.Response Body. +func (client ProfilesClient) DeleteIfExistsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GenerateSsoURI sends the generate sso uri request. +// +// profileName is name of the CDN profile within the resource group. +// resourceGroupName is name of the resource group within the Azure +// subscription. +func (client ProfilesClient) GenerateSsoURI(profileName string, resourceGroupName string) (result SsoURI, err error) { + req, err := client.GenerateSsoURIPreparer(profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "GenerateSsoURI", nil, "Failure preparing request") + } + + resp, err := client.GenerateSsoURISender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "GenerateSsoURI", resp, "Failure sending request") + } + + result, err = client.GenerateSsoURIResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.ProfilesClient", "GenerateSsoURI", resp, "Failure responding to request") + } + + return +} + +// GenerateSsoURIPreparer prepares the GenerateSsoURI request. +func (client ProfilesClient) GenerateSsoURIPreparer(profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/generateSsoUri", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GenerateSsoURISender sends the GenerateSsoURI request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) GenerateSsoURISender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GenerateSsoURIResponder handles the response to the GenerateSsoURI request. The method always +// closes the http.Response Body. +func (client ProfilesClient) GenerateSsoURIResponder(resp *http.Response) (result SsoURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get sends the get request. +// +// profileName is name of the CDN profile within the resource group. +// resourceGroupName is name of the resource group within the Azure +// subscription. +func (client ProfilesClient) Get(profileName string, resourceGroupName string) (result Profile, err error) { + req, err := client.GetPreparer(profileName, resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.ProfilesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ProfilesClient) GetPreparer(profileName string, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ProfilesClient) GetResponder(resp *http.Response) (result Profile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup sends the list by resource group request. +// +// resourceGroupName is name of the resource group within the Azure +// subscription. +func (client ProfilesClient) ListByResourceGroup(resourceGroupName string) (result ProfileListResult, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "ListByResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "ListByResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.ProfilesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ProfilesClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ProfilesClient) ListByResourceGroupResponder(resp *http.Response) (result ProfileListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscriptionID sends the list by subscription id request. +func (client ProfilesClient) ListBySubscriptionID() (result ProfileListResult, err error) { + req, err := client.ListBySubscriptionIDPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "ListBySubscriptionID", nil, "Failure preparing request") + } + + resp, err := client.ListBySubscriptionIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "ListBySubscriptionID", resp, "Failure sending request") + } + + result, err = client.ListBySubscriptionIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.ProfilesClient", "ListBySubscriptionID", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionIDPreparer prepares the ListBySubscriptionID request. +func (client ProfilesClient) ListBySubscriptionIDPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Cdn/profiles", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListBySubscriptionIDSender sends the ListBySubscriptionID request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) ListBySubscriptionIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListBySubscriptionIDResponder handles the response to the ListBySubscriptionID request. The method always +// closes the http.Response Body. +func (client ProfilesClient) ListBySubscriptionIDResponder(resp *http.Response) (result ProfileListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update sends the update request. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// profileName is name of the CDN profile within the resource group. +// profileProperties is profile properties needed for update. +// resourceGroupName is name of the resource group within the Azure +// subscription. +func (client ProfilesClient) Update(profileName string, profileProperties ProfileUpdateParameters, resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.UpdatePreparer(profileName, profileProperties, resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cdn.ProfilesClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cdn.ProfilesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client ProfilesClient) UpdatePreparer(profileName string, profileProperties ProfileUpdateParameters, resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}", pathParameters), + autorest.WithJSON(profileProperties), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ProfilesClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package cdn + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "cdn", "2016-04-02") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/CHANGELOG.md juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/CHANGELOG.md --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/CHANGELOG.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ -# CHANGELOG - ------ - -## `v0.3.0-beta` - -- Corrected unintentional struct field renaming and client renaming in v0.2.0-beta - ------ - -## `v0.2.0-beta` - -- Added support for DNS, Redis, and Web site services -- Updated Storage service to API version 2015-06-15 -- Updated Network to include routing table support -- Address https://github.com/Azure/azure-sdk-for-go/issues/232 -- Address https://github.com/Azure/azure-sdk-for-go/issues/231 -- Address https://github.com/Azure/azure-sdk-for-go/issues/230 -- Address https://github.com/Azure/azure-sdk-for-go/issues/224 -- Address https://github.com/Azure/azure-sdk-for-go/issues/184 -- Address https://github.com/Azure/azure-sdk-for-go/issues/183 - ------- - -## `v0.1.1-beta` - -- Improves the UserAgent string to disambiguate arm packages from others in the SDK -- Improves setting the http.Response into generated results (reduces likelihood of a nil reference) -- Adds gofmt, golint, and govet to Travis CI for the arm packages - -##### Fixed Issues - -- https://github.com/Azure/azure-sdk-for-go/issues/196 -- https://github.com/Azure/azure-sdk-for-go/issues/213 - ------- - -## v0.1.0-beta - -This release addresses the issues raised against the alpha release and adds more features. Most -notably, to address the challenges of encoding JSON -(see the [comments](https://github.com/Azure/go-autorest#handling-empty-values) in the -[go-autorest](https://github.com/Azure/go-autorest) package) by using pointers for *all* structure -fields (with the exception of enumerations). The -[go-autorest/autorest/to](https://github.com/Azure/go-autorest/tree/master/autorest/to) package -provides helpers to convert to / from pointers. The examples demonstrate their usage. - -Additionally, the packages now align with Go coding standards and pass both `golint` and `govet`. -Accomplishing this required renaming various fields and parameters (such as changing Url to URL). - -##### Changes - -- Changed request / response structures to use pointer fields. -- Changed methods to return `error` instead of `autorest.Error`. -- Re-divided methods to ease asynchronous requests. -- Added paged results support. -- Added a UserAgent string. -- Added changes necessary to pass golint and govet. -- Updated README.md with details on asynchronous requests and paging. -- Saved package dependencies through Godep (for the entire SDK). - -##### Fixed Issues: - -- https://github.com/Azure/azure-sdk-for-go/issues/205 -- https://github.com/Azure/azure-sdk-for-go/issues/206 -- https://github.com/Azure/azure-sdk-for-go/issues/211 -- https://github.com/Azure/azure-sdk-for-go/issues/212 - ------ - -## v0.1.0-alpha - -This release introduces the Azure Resource Manager packages generated from the corresponding -[Swagger API](http://swagger.io) [definitions](https://github.com/Azure/azure-rest-api-specs). diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/accounts.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/accounts.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/accounts.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/accounts.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,636 @@ +package cognitiveservices + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// AccountsClient is the cognitive Services Management Client +type AccountsClient struct { + ManagementClient +} + +// NewAccountsClient creates an instance of the AccountsClient client. +func NewAccountsClient(subscriptionID string) AccountsClient { + return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAccountsClientWithBaseURI creates an instance of the AccountsClient +// client. +func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { + return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create create Cognitive Services Account. Accounts is a resource group wide +// resource type. It holds the keys for developer to access intelligent APIs. +// It's also the resource type for billing. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the cognitive services account +// within the specified resource group. Cognitive Services account names must +// be between 3 and 24 characters in length and use numbers and lower-case +// letters only. parameters is the parameters to provide for the created +// account. +func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters) (result Account, err error) { + req, err := client.CreatePreparer(resourceGroupName, accountName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client AccountsClient) CreatePreparer(resourceGroupName string, accountName string, parameters AccountCreateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Cognitive Services account from the resource group. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the cognitive services account +// within the specified resource group. Cognitive Services account names must +// be between 3 and 24 characters in length and use numbers and lower-case +// letters only. +func (client AccountsClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AccountsClient) DeletePreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetProperties returns a Cognitive Services account specified by the +// parameters. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the cognitive services account +// within the specified resource group. Cognitive Services account names must +// be between 3 and 24 characters in length and use numbers and lower-case +// letters only. +func (client AccountsClient) GetProperties(resourceGroupName string, accountName string) (result Account, err error) { + req, err := client.GetPropertiesPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "GetProperties", nil, "Failure preparing request") + } + + resp, err := client.GetPropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "GetProperties", resp, "Failure sending request") + } + + result, err = client.GetPropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "GetProperties", resp, "Failure responding to request") + } + + return +} + +// GetPropertiesPreparer prepares the GetProperties request. +func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetPropertiesSender sends the GetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetPropertiesResponder handles the response to the GetProperties request. The method always +// closes the http.Response Body. +func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List returns all the resources of a particular type belonging to a +// subscription. +func (client AccountsClient) List() (result AccountListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AccountsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.CognitiveServices/accounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup returns all the resources of a particular type +// belonging to a resource group +// +// resourceGroupName is the name of the resource group within the user's +// subscription. +func (client AccountsClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListKeys lists the account keys for the specified Cognitive Services +// account. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the cognitive services account +// within the specified resource group. Congitive Services account names must +// be between 3 and 24 characters in length and use numbers and lower-case +// letters only. +func (client AccountsClient) ListKeys(resourceGroupName string, accountName string) (result AccountKeys, err error) { + req, err := client.ListKeysPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "ListKeys", nil, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "ListKeys", resp, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListSkus list available SKUs for the requested Cognitive Services account +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the cognitive services account +// within the specified resource group. Cognitive Services account names must +// be between 3 and 24 characters in length and use numbers and lower-case +// letters only. +func (client AccountsClient) ListSkus(resourceGroupName string, accountName string) (result AccountEnumerateSkusResult, err error) { + req, err := client.ListSkusPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "ListSkus", nil, "Failure preparing request") + } + + resp, err := client.ListSkusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "ListSkus", resp, "Failure sending request") + } + + result, err = client.ListSkusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "ListSkus", resp, "Failure responding to request") + } + + return +} + +// ListSkusPreparer prepares the ListSkus request. +func (client AccountsClient) ListSkusPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSkusSender sends the ListSkus request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListSkusSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListSkusResponder handles the response to the ListSkus request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListSkusResponder(resp *http.Response) (result AccountEnumerateSkusResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKey regenerates the specified account key for the specified +// Cognitive Services account. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the cognitive services account +// within the specified resource group. Cognitive Services account names must +// be between 3 and 24 characters in length and use numbers and lower-case +// letters only. body is regenerate key parameters. +func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName string, body RegenerateKeyParameters) (result AccountKeys, err error) { + req, err := client.RegenerateKeyPreparer(resourceGroupName, accountName, body) + if err != nil { + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "RegenerateKey", nil, "Failure preparing request") + } + + resp, err := client.RegenerateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "RegenerateKey", resp, "Failure sending request") + } + + result, err = client.RegenerateKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "RegenerateKey", resp, "Failure responding to request") + } + + return +} + +// RegenerateKeyPreparer prepares the RegenerateKey request. +func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, accountName string, body RegenerateKeyParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}/regenerateKey", pathParameters), + autorest.WithJSON(body), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegenerateKeySender sends the RegenerateKey request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always +// closes the http.Response Body. +func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates a Cognitive Services account +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the cognitive services account +// within the specified resource group. Cognitive Services account names must +// be between 3 and 24 characters in length and use numbers and lower-case +// letters only. body is the parameters to provide for the created account. +func (client AccountsClient) Update(resourceGroupName string, accountName string, body AccountUpdateParameters) (result Account, err error) { + req, err := client.UpdatePreparer(resourceGroupName, accountName, body) + if err != nil { + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "cognitiveservices.AccountsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountName string, body AccountUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}", pathParameters), + autorest.WithJSON(body), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,58 @@ +// Package cognitiveservices implements the Azure ARM Cognitiveservices +// service API version 2016-02-01-preview. +// +// Cognitive Services Management Client +package cognitiveservices + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Cognitiveservices + APIVersion = "2016-02-01-preview" + + // DefaultBaseURI is the default URI used for the service Cognitiveservices + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Cognitiveservices. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,184 @@ +package cognitiveservices + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// KeyName enumerates the values for key name. +type KeyName string + +const ( + // Key1 specifies the key 1 state for key name. + Key1 KeyName = "Key1" + // Key2 specifies the key 2 state for key name. + Key2 KeyName = "Key2" +) + +// Kind enumerates the values for kind. +type Kind string + +const ( + // ComputerVision specifies the computer vision state for kind. + ComputerVision Kind = "ComputerVision" + // Emotion specifies the emotion state for kind. + Emotion Kind = "Emotion" + // Face specifies the face state for kind. + Face Kind = "Face" + // LUIS specifies the luis state for kind. + LUIS Kind = "LUIS" + // Recommendations specifies the recommendations state for kind. + Recommendations Kind = "Recommendations" + // Speech specifies the speech state for kind. + Speech Kind = "Speech" + // TextAnalytics specifies the text analytics state for kind. + TextAnalytics Kind = "TextAnalytics" + // WebLM specifies the web lm state for kind. + WebLM Kind = "WebLM" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Creating specifies the creating state for provisioning state. + Creating ProvisioningState = "Creating" + // Failed specifies the failed state for provisioning state. + Failed ProvisioningState = "Failed" + // ResolvingDNS specifies the resolving dns state for provisioning state. + ResolvingDNS ProvisioningState = "ResolvingDNS" + // Succeeded specifies the succeeded state for provisioning state. + Succeeded ProvisioningState = "Succeeded" +) + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // F0 specifies the f0 state for sku name. + F0 SkuName = "F0" + // S0 specifies the s0 state for sku name. + S0 SkuName = "S0" + // S1 specifies the s1 state for sku name. + S1 SkuName = "S1" + // S2 specifies the s2 state for sku name. + S2 SkuName = "S2" + // S3 specifies the s3 state for sku name. + S3 SkuName = "S3" + // S4 specifies the s4 state for sku name. + S4 SkuName = "S4" +) + +// SkuTier enumerates the values for sku tier. +type SkuTier string + +const ( + // Free specifies the free state for sku tier. + Free SkuTier = "Free" + // Premium specifies the premium state for sku tier. + Premium SkuTier = "Premium" + // Standard specifies the standard state for sku tier. + Standard SkuTier = "Standard" +) + +// Account is cognitive Services Account is an Azure resource representing the +// provisioned account, its type, location and SKU. +type Account struct { + autorest.Response `json:"-"` + Etag *string `json:"etag,omitempty"` + ID *string `json:"id,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *AccountProperties `json:"properties,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} + +// AccountCreateParameters is the parameters to provide for the account. +type AccountCreateParameters struct { + Sku *Sku `json:"sku,omitempty"` + Kind Kind `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *map[string]interface{} `json:"properties,omitempty"` +} + +// AccountEnumerateSkusResult is the list of cognitive services accounts +// operation response. +type AccountEnumerateSkusResult struct { + autorest.Response `json:"-"` + Value *[]ResourceAndSku `json:"value,omitempty"` +} + +// AccountKeys is the access keys for the cognitive services account. +type AccountKeys struct { + autorest.Response `json:"-"` + Key1 *string `json:"key1,omitempty"` + Key2 *string `json:"key2,omitempty"` +} + +// AccountListResult is the list of cognitive services accounts operation +// response. +type AccountListResult struct { + autorest.Response `json:"-"` + Value *[]Account `json:"value,omitempty"` +} + +// AccountProperties is +type AccountProperties struct { + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + Endpoint *string `json:"endpoint,omitempty"` +} + +// AccountUpdateParameters is the parameters to provide for the account. +type AccountUpdateParameters struct { + Sku *Sku `json:"sku,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// Error is +type Error struct { + ErrorProperty *ErrorBody `json:"error,omitempty"` +} + +// ErrorBody is +type ErrorBody struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` +} + +// RegenerateKeyParameters is regenerate key parameters. +type RegenerateKeyParameters struct { + KeyName KeyName `json:"keyName,omitempty"` +} + +// ResourceAndSku is +type ResourceAndSku struct { + ResourceType *string `json:"resourceType,omitempty"` + Sku *Sku `json:"sku,omitempty"` +} + +// Sku is the SKU of the cognitive services account. +type Sku struct { + Name SkuName `json:"name,omitempty"` + Tier SkuTier `json:"tier,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/cognitiveservices/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package cognitiveservices + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "cognitiveservices", "2016-02-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // AvailabilitySetsClient is the the Compute Management Client. @@ -46,21 +46,21 @@ // resourceGroupName is the name of the resource group. name is parameters // supplied to the Create Availability Set operation. parameters is // parameters supplied to the Create Availability Set operation. -func (client AvailabilitySetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters AvailabilitySet) (result AvailabilitySet, ae error) { +func (client AvailabilitySetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters AvailabilitySet) (result AvailabilitySet, err error) { req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "CreateOrUpdate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure responding to request") } return @@ -69,29 +69,29 @@ // CreateOrUpdatePreparer prepares the CreateOrUpdate request. func (client AvailabilitySetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters AvailabilitySet) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{name}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -100,7 +100,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -111,21 +111,21 @@ // // resourceGroupName is the name of the resource group. availabilitySetName is // the name of the availability set. -func (client AvailabilitySetsClient) Delete(resourceGroupName string, availabilitySetName string) (result autorest.Response, ae error) { +func (client AvailabilitySetsClient) Delete(resourceGroupName string, availabilitySetName string) (result autorest.Response, err error) { req, err := client.DeletePreparer(resourceGroupName, availabilitySetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure responding to request") } return @@ -134,28 +134,27 @@ // DeletePreparer prepares the Delete request. func (client AvailabilitySetsClient) DeletePreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "availabilitySetName": url.QueryEscape(availabilitySetName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "availabilitySetName": autorest.Encode("path", availabilitySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteResponder handles the response to the Delete request. The method always @@ -164,7 +163,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -174,21 +173,21 @@ // // resourceGroupName is the name of the resource group. availabilitySetName is // the name of the availability set. -func (client AvailabilitySetsClient) Get(resourceGroupName string, availabilitySetName string) (result AvailabilitySet, ae error) { +func (client AvailabilitySetsClient) Get(resourceGroupName string, availabilitySetName string) (result AvailabilitySet, err error) { req, err := client.GetPreparer(resourceGroupName, availabilitySetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure responding to request") } return @@ -197,28 +196,27 @@ // GetPreparer prepares the Get request. func (client AvailabilitySetsClient) GetPreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "availabilitySetName": url.QueryEscape(availabilitySetName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "availabilitySetName": autorest.Encode("path", availabilitySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -227,7 +225,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -237,21 +235,21 @@ // List the operation to list the availability sets. // // resourceGroupName is the name of the resource group. -func (client AvailabilitySetsClient) List(resourceGroupName string) (result AvailabilitySetListResult, ae error) { +func (client AvailabilitySetsClient) List(resourceGroupName string) (result AvailabilitySetListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure responding to request") } return @@ -260,27 +258,26 @@ // ListPreparer prepares the List request. func (client AvailabilitySetsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -289,33 +286,33 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListAvailableSizes lists virtual-machine-sizes available to be used for an -// availability set. +// ListAvailableSizes lists all available virtual machine sizes that can be +// used to create a new virtual machine in an existing availability set. // // resourceGroupName is the name of the resource group. availabilitySetName is // the name of the availability set. -func (client AvailabilitySetsClient) ListAvailableSizes(resourceGroupName string, availabilitySetName string) (result VirtualMachineSizeListResult, ae error) { +func (client AvailabilitySetsClient) ListAvailableSizes(resourceGroupName string, availabilitySetName string) (result VirtualMachineSizeListResult, err error) { req, err := client.ListAvailableSizesPreparer(resourceGroupName, availabilitySetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", nil, "Failure preparing request") } resp, err := client.ListAvailableSizesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure sending request") } result, err = client.ListAvailableSizesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure responding to request") } return @@ -324,28 +321,27 @@ // ListAvailableSizesPreparer prepares the ListAvailableSizes request. func (client AvailabilitySetsClient) ListAvailableSizesPreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "availabilitySetName": url.QueryEscape(availabilitySetName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "availabilitySetName": autorest.Encode("path", availabilitySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always @@ -354,7 +350,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -1,3 +1,7 @@ +// Package compute implements the Azure ARM Compute service API version +// 2016-03-30. +// +// The Compute Management Client. package compute // Copyright (c) Microsoft and contributors. All rights reserved. @@ -14,26 +18,27 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" ) const ( // APIVersion is the version of the Compute - APIVersion = "2015-06-15" + APIVersion = "2016-03-30" // DefaultBaseURI is the default URI used for the service Compute DefaultBaseURI = "https://management.azure.com" ) -// ManagementClient is the the Compute Management Client. +// ManagementClient is the base client for Compute. type ManagementClient struct { autorest.Client BaseURI string + APIVersion string SubscriptionID string } @@ -47,6 +52,7 @@ return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, + APIVersion: APIVersion, SubscriptionID: subscriptionID, } } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,44 @@ +// Package compute implements the Azure ARM Compute service API version +// 2016-03-30. +// +// The Container Service Client. +package compute + +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Compute + APIVersion = "2016-03-30" + + // DefaultBaseURI is the default URI used for the service Compute + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Compute. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/containerserviceoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/containerserviceoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/containerserviceoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/containerserviceoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,292 @@ +package compute + +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ContainerServiceOperationsClient is the the Container Service Client. +type ContainerServiceOperationsClient struct { + ManagementClient +} + +// NewContainerServiceOperationsClient creates an instance of the +// ContainerServiceOperationsClient client. +func NewContainerServiceOperationsClient(subscriptionID string) ContainerServiceOperationsClient { + return NewContainerServiceOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewContainerServiceOperationsClientWithBaseURI creates an instance of the +// ContainerServiceOperationsClient client. +func NewContainerServiceOperationsClientWithBaseURI(baseURI string, subscriptionID string) ContainerServiceOperationsClient { + return ContainerServiceOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update a container service. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. containerServiceName +// is the name of the container service within the given subscription and +// resource group. parameters is parameters supplied to the Create Container +// Service operation. +func (client ContainerServiceOperationsClient) CreateOrUpdate(resourceGroupName string, containerServiceName string, parameters ContainerService, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, containerServiceName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ContainerServiceOperationsClient) CreateOrUpdatePreparer(resourceGroupName string, containerServiceName string, parameters ContainerService, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServiceOperationsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ContainerServiceOperationsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete the operation to delete a container service. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. containerServiceName +// is the name of the container service within the given subscription and +// resource group. +func (client ContainerServiceOperationsClient) Delete(resourceGroupName string, containerServiceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, containerServiceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ContainerServiceOperationsClient) DeletePreparer(resourceGroupName string, containerServiceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServiceOperationsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ContainerServiceOperationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get a container service. +// +// resourceGroupName is the name of the resource group. containerServiceName +// is the name of the container service within the given subscription and +// resource group. +func (client ContainerServiceOperationsClient) Get(resourceGroupName string, containerServiceName string) (result ContainerService, err error) { + req, err := client.GetPreparer(resourceGroupName, containerServiceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ContainerServiceOperationsClient) GetPreparer(resourceGroupName string, containerServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServiceOperationsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ContainerServiceOperationsClient) GetResponder(resp *http.Response) (result ContainerService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the operation to list container services. +// +// resourceGroupName is the name of the resource group. +func (client ContainerServiceOperationsClient) List(resourceGroupName string) (result ContainerServiceListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServiceOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ContainerServiceOperationsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServiceOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ContainerServiceOperationsClient) ListResponder(resp *http.Response) (result ContainerServiceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,261 @@ +package compute + +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// ContainerServiceOchestratorTypes enumerates the values for container +// service ochestrator types. +type ContainerServiceOchestratorTypes string + +const ( + // DCOS specifies the dcos state for container service ochestrator types. + DCOS ContainerServiceOchestratorTypes = "DCOS" + // Swarm specifies the swarm state for container service ochestrator types. + Swarm ContainerServiceOchestratorTypes = "Swarm" +) + +// ContainerServiceVMSizeTypes enumerates the values for container service vm +// size types. +type ContainerServiceVMSizeTypes string + +const ( + // StandardA0 specifies the standard a0 state for container service vm + // size types. + StandardA0 ContainerServiceVMSizeTypes = "Standard_A0" + // StandardA1 specifies the standard a1 state for container service vm + // size types. + StandardA1 ContainerServiceVMSizeTypes = "Standard_A1" + // StandardA10 specifies the standard a10 state for container service vm + // size types. + StandardA10 ContainerServiceVMSizeTypes = "Standard_A10" + // StandardA11 specifies the standard a11 state for container service vm + // size types. + StandardA11 ContainerServiceVMSizeTypes = "Standard_A11" + // StandardA2 specifies the standard a2 state for container service vm + // size types. + StandardA2 ContainerServiceVMSizeTypes = "Standard_A2" + // StandardA3 specifies the standard a3 state for container service vm + // size types. + StandardA3 ContainerServiceVMSizeTypes = "Standard_A3" + // StandardA4 specifies the standard a4 state for container service vm + // size types. + StandardA4 ContainerServiceVMSizeTypes = "Standard_A4" + // StandardA5 specifies the standard a5 state for container service vm + // size types. + StandardA5 ContainerServiceVMSizeTypes = "Standard_A5" + // StandardA6 specifies the standard a6 state for container service vm + // size types. + StandardA6 ContainerServiceVMSizeTypes = "Standard_A6" + // StandardA7 specifies the standard a7 state for container service vm + // size types. + StandardA7 ContainerServiceVMSizeTypes = "Standard_A7" + // StandardA8 specifies the standard a8 state for container service vm + // size types. + StandardA8 ContainerServiceVMSizeTypes = "Standard_A8" + // StandardA9 specifies the standard a9 state for container service vm + // size types. + StandardA9 ContainerServiceVMSizeTypes = "Standard_A9" + // StandardD1 specifies the standard d1 state for container service vm + // size types. + StandardD1 ContainerServiceVMSizeTypes = "Standard_D1" + // StandardD11 specifies the standard d11 state for container service vm + // size types. + StandardD11 ContainerServiceVMSizeTypes = "Standard_D11" + // StandardD11V2 specifies the standard d11v2 state for container service + // vm size types. + StandardD11V2 ContainerServiceVMSizeTypes = "Standard_D11_v2" + // StandardD12 specifies the standard d12 state for container service vm + // size types. + StandardD12 ContainerServiceVMSizeTypes = "Standard_D12" + // StandardD12V2 specifies the standard d12v2 state for container service + // vm size types. + StandardD12V2 ContainerServiceVMSizeTypes = "Standard_D12_v2" + // StandardD13 specifies the standard d13 state for container service vm + // size types. + StandardD13 ContainerServiceVMSizeTypes = "Standard_D13" + // StandardD13V2 specifies the standard d13v2 state for container service + // vm size types. + StandardD13V2 ContainerServiceVMSizeTypes = "Standard_D13_v2" + // StandardD14 specifies the standard d14 state for container service vm + // size types. + StandardD14 ContainerServiceVMSizeTypes = "Standard_D14" + // StandardD14V2 specifies the standard d14v2 state for container service + // vm size types. + StandardD14V2 ContainerServiceVMSizeTypes = "Standard_D14_v2" + // StandardD1V2 specifies the standard d1v2 state for container service vm + // size types. + StandardD1V2 ContainerServiceVMSizeTypes = "Standard_D1_v2" + // StandardD2 specifies the standard d2 state for container service vm + // size types. + StandardD2 ContainerServiceVMSizeTypes = "Standard_D2" + // StandardD2V2 specifies the standard d2v2 state for container service vm + // size types. + StandardD2V2 ContainerServiceVMSizeTypes = "Standard_D2_v2" + // StandardD3 specifies the standard d3 state for container service vm + // size types. + StandardD3 ContainerServiceVMSizeTypes = "Standard_D3" + // StandardD3V2 specifies the standard d3v2 state for container service vm + // size types. + StandardD3V2 ContainerServiceVMSizeTypes = "Standard_D3_v2" + // StandardD4 specifies the standard d4 state for container service vm + // size types. + StandardD4 ContainerServiceVMSizeTypes = "Standard_D4" + // StandardD4V2 specifies the standard d4v2 state for container service vm + // size types. + StandardD4V2 ContainerServiceVMSizeTypes = "Standard_D4_v2" + // StandardD5V2 specifies the standard d5v2 state for container service vm + // size types. + StandardD5V2 ContainerServiceVMSizeTypes = "Standard_D5_v2" + // StandardDS1 specifies the standard ds1 state for container service vm + // size types. + StandardDS1 ContainerServiceVMSizeTypes = "Standard_DS1" + // StandardDS11 specifies the standard ds11 state for container service vm + // size types. + StandardDS11 ContainerServiceVMSizeTypes = "Standard_DS11" + // StandardDS12 specifies the standard ds12 state for container service vm + // size types. + StandardDS12 ContainerServiceVMSizeTypes = "Standard_DS12" + // StandardDS13 specifies the standard ds13 state for container service vm + // size types. + StandardDS13 ContainerServiceVMSizeTypes = "Standard_DS13" + // StandardDS14 specifies the standard ds14 state for container service vm + // size types. + StandardDS14 ContainerServiceVMSizeTypes = "Standard_DS14" + // StandardDS2 specifies the standard ds2 state for container service vm + // size types. + StandardDS2 ContainerServiceVMSizeTypes = "Standard_DS2" + // StandardDS3 specifies the standard ds3 state for container service vm + // size types. + StandardDS3 ContainerServiceVMSizeTypes = "Standard_DS3" + // StandardDS4 specifies the standard ds4 state for container service vm + // size types. + StandardDS4 ContainerServiceVMSizeTypes = "Standard_DS4" + // StandardG1 specifies the standard g1 state for container service vm + // size types. + StandardG1 ContainerServiceVMSizeTypes = "Standard_G1" + // StandardG2 specifies the standard g2 state for container service vm + // size types. + StandardG2 ContainerServiceVMSizeTypes = "Standard_G2" + // StandardG3 specifies the standard g3 state for container service vm + // size types. + StandardG3 ContainerServiceVMSizeTypes = "Standard_G3" + // StandardG4 specifies the standard g4 state for container service vm + // size types. + StandardG4 ContainerServiceVMSizeTypes = "Standard_G4" + // StandardG5 specifies the standard g5 state for container service vm + // size types. + StandardG5 ContainerServiceVMSizeTypes = "Standard_G5" + // StandardGS1 specifies the standard gs1 state for container service vm + // size types. + StandardGS1 ContainerServiceVMSizeTypes = "Standard_GS1" + // StandardGS2 specifies the standard gs2 state for container service vm + // size types. + StandardGS2 ContainerServiceVMSizeTypes = "Standard_GS2" + // StandardGS3 specifies the standard gs3 state for container service vm + // size types. + StandardGS3 ContainerServiceVMSizeTypes = "Standard_GS3" + // StandardGS4 specifies the standard gs4 state for container service vm + // size types. + StandardGS4 ContainerServiceVMSizeTypes = "Standard_GS4" + // StandardGS5 specifies the standard gs5 state for container service vm + // size types. + StandardGS5 ContainerServiceVMSizeTypes = "Standard_GS5" +) + +// ContainerService is container service +type ContainerService struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *ContainerServiceProperties `json:"properties,omitempty"` +} + +// ContainerServiceAgentPoolProfile is profile for container service agent pool +type ContainerServiceAgentPoolProfile struct { + Name *string `json:"name,omitempty"` + Count *int32 `json:"count,omitempty"` + VMSize ContainerServiceVMSizeTypes `json:"vmSize,omitempty"` + DNSPrefix *string `json:"dnsPrefix,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` +} + +// ContainerServiceDiagnosticsProfile is +type ContainerServiceDiagnosticsProfile struct { + VMDiagnostics *ContainerServiceVMDiagnostics `json:"vmDiagnostics,omitempty"` +} + +// ContainerServiceLinuxProfile is profile for Linux VMs +type ContainerServiceLinuxProfile struct { + AdminUsername *string `json:"adminUsername,omitempty"` + SSH *ContainerServiceSSHConfiguration `json:"ssh,omitempty"` +} + +// ContainerServiceListResult is the List Container Service operation response +type ContainerServiceListResult struct { + autorest.Response `json:"-"` + Value *[]ContainerService `json:"value,omitempty"` +} + +// ContainerServiceMasterProfile is profile for container service master +type ContainerServiceMasterProfile struct { + Count *int32 `json:"count,omitempty"` + DNSPrefix *string `json:"dnsPrefix,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` +} + +// ContainerServiceOrchestratorProfile is profile for Orchestrator +type ContainerServiceOrchestratorProfile struct { + OrchestratorType ContainerServiceOchestratorTypes `json:"orchestratorType,omitempty"` +} + +// ContainerServiceProperties is properties of container service +type ContainerServiceProperties struct { + ProvisioningState *string `json:"provisioningState,omitempty"` + OrchestratorProfile *ContainerServiceOrchestratorProfile `json:"orchestratorProfile,omitempty"` + MasterProfile *ContainerServiceMasterProfile `json:"masterProfile,omitempty"` + AgentPoolProfiles *[]ContainerServiceAgentPoolProfile `json:"agentPoolProfiles,omitempty"` + WindowsProfile *ContainerServiceWindowsProfile `json:"windowsProfile,omitempty"` + LinuxProfile *ContainerServiceLinuxProfile `json:"linuxProfile,omitempty"` + DiagnosticsProfile *ContainerServiceDiagnosticsProfile `json:"diagnosticsProfile,omitempty"` +} + +// ContainerServiceSSHConfiguration is sSH configuration for Linux based VMs +// running on Azure +type ContainerServiceSSHConfiguration struct { + PublicKeys *[]ContainerServiceSSHPublicKey `json:"publicKeys,omitempty"` +} + +// ContainerServiceSSHPublicKey is contains information about SSH certificate +// public key data. +type ContainerServiceSSHPublicKey struct { + KeyData *string `json:"keyData,omitempty"` +} + +// ContainerServiceVMDiagnostics is describes VM Diagnostics. +type ContainerServiceVMDiagnostics struct { + Enabled *bool `json:"enabled,omitempty"` + StorageURI *string `json:"storageUri,omitempty"` +} + +// ContainerServiceWindowsProfile is profile for Windows VMs +type ContainerServiceWindowsProfile struct { + AdminUsername *string `json:"adminUsername,omitempty"` + AdminPassword *string `json:"adminPassword,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/containerservice/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,29 @@ +package compute + +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "compute", "2016-03-30") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" "net/http" ) @@ -58,6 +58,14 @@ FromImage DiskCreateOptionTypes = "fromImage" ) +// InstanceViewTypes enumerates the values for instance view types. +type InstanceViewTypes string + +const ( + // InstanceView specifies the instance view state for instance view types. + InstanceView InstanceViewTypes = "instanceView" +) + // OperatingSystemTypes enumerates the values for operating system types. type OperatingSystemTypes string @@ -68,36 +76,6 @@ Windows OperatingSystemTypes = "Windows" ) -// OperationStatus enumerates the values for operation status. -type OperationStatus string - -const ( - // Failed specifies the failed state for operation status. - Failed OperationStatus = "Failed" - // InProgress specifies the in progress state for operation status. - InProgress OperationStatus = "InProgress" - // Succeeded specifies the succeeded state for operation status. - Succeeded OperationStatus = "Succeeded" -) - -// OperationStatusEnum enumerates the values for operation status enum. -type OperationStatusEnum string - -const ( - // OperationStatusEnumFailed specifies the operation status enum failed - // state for operation status enum. - OperationStatusEnumFailed OperationStatusEnum = "Failed" - // OperationStatusEnumInProgress specifies the operation status enum in - // progress state for operation status enum. - OperationStatusEnumInProgress OperationStatusEnum = "InProgress" - // OperationStatusEnumPreempted specifies the operation status enum - // preempted state for operation status enum. - OperationStatusEnumPreempted OperationStatusEnum = "Preempted" - // OperationStatusEnumSucceeded specifies the operation status enum - // succeeded state for operation status enum. - OperationStatusEnumSucceeded OperationStatusEnum = "Succeeded" -) - // PassNames enumerates the values for pass names. type PassNames string @@ -149,14 +127,6 @@ Manual UpgradeMode = "Manual" ) -// UsageUnit enumerates the values for usage unit. -type UsageUnit string - -const ( - // Count specifies the count state for usage unit. - Count UsageUnit = "Count" -) - // VirtualMachineScaleSetSkuScaleType enumerates the values for virtual // machine scale set sku scale type. type VirtualMachineScaleSetSkuScaleType string @@ -250,6 +220,9 @@ // StandardD14V2 specifies the standard d14v2 state for virtual machine // size types. StandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2" + // StandardD15V2 specifies the standard d15v2 state for virtual machine + // size types. + StandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2" // StandardD1V2 specifies the standard d1v2 state for virtual machine size // types. StandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2" @@ -280,24 +253,54 @@ // StandardDS11 specifies the standard ds11 state for virtual machine size // types. StandardDS11 VirtualMachineSizeTypes = "Standard_DS11" + // StandardDS11V2 specifies the standard ds11v2 state for virtual machine + // size types. + StandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2" // StandardDS12 specifies the standard ds12 state for virtual machine size // types. StandardDS12 VirtualMachineSizeTypes = "Standard_DS12" + // StandardDS12V2 specifies the standard ds12v2 state for virtual machine + // size types. + StandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2" // StandardDS13 specifies the standard ds13 state for virtual machine size // types. StandardDS13 VirtualMachineSizeTypes = "Standard_DS13" + // StandardDS13V2 specifies the standard ds13v2 state for virtual machine + // size types. + StandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2" // StandardDS14 specifies the standard ds14 state for virtual machine size // types. StandardDS14 VirtualMachineSizeTypes = "Standard_DS14" + // StandardDS14V2 specifies the standard ds14v2 state for virtual machine + // size types. + StandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2" + // StandardDS15V2 specifies the standard ds15v2 state for virtual machine + // size types. + StandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2" + // StandardDS1V2 specifies the standard ds1v2 state for virtual machine + // size types. + StandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2" // StandardDS2 specifies the standard ds2 state for virtual machine size // types. StandardDS2 VirtualMachineSizeTypes = "Standard_DS2" + // StandardDS2V2 specifies the standard ds2v2 state for virtual machine + // size types. + StandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2" // StandardDS3 specifies the standard ds3 state for virtual machine size // types. StandardDS3 VirtualMachineSizeTypes = "Standard_DS3" + // StandardDS3V2 specifies the standard ds3v2 state for virtual machine + // size types. + StandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2" // StandardDS4 specifies the standard ds4 state for virtual machine size // types. StandardDS4 VirtualMachineSizeTypes = "Standard_DS4" + // StandardDS4V2 specifies the standard ds4v2 state for virtual machine + // size types. + StandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2" + // StandardDS5V2 specifies the standard ds5v2 state for virtual machine + // size types. + StandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2" // StandardG1 specifies the standard g1 state for virtual machine size // types. StandardG1 VirtualMachineSizeTypes = "Standard_G1" @@ -330,10 +333,10 @@ StandardGS5 VirtualMachineSizeTypes = "Standard_GS5" ) -// AdditionalUnattendContent is gets or sets additional XML formatted -// information that can be included in the Unattend.xml file, which is used -// by Windows Setup. Contents are defined by setting name, component name, -// and the pass in which the content is a applied. +// AdditionalUnattendContent is additional XML formatted information that can +// be included in the Unattend.xml file, which is used by Windows Setup. +// Contents are defined by setting name, component name, and the pass in +// which the content is a applied. type AdditionalUnattendContent struct { PassName PassNames `json:"passName,omitempty"` ComponentName ComponentNames `json:"componentName,omitempty"` @@ -381,8 +384,8 @@ // AvailabilitySetProperties is the instance view of a resource. type AvailabilitySetProperties struct { - PlatformUpdateDomainCount *int `json:"platformUpdateDomainCount,omitempty"` - PlatformFaultDomainCount *int `json:"platformFaultDomainCount,omitempty"` + PlatformUpdateDomainCount *int32 `json:"platformUpdateDomainCount,omitempty"` + PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` VirtualMachines *[]SubResource `json:"virtualMachines,omitempty"` Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` } @@ -402,27 +405,18 @@ // DataDisk is describes a data disk. type DataDisk struct { - Lun *int `json:"lun,omitempty"` + Lun *int32 `json:"lun,omitempty"` Name *string `json:"name,omitempty"` Vhd *VirtualHardDisk `json:"vhd,omitempty"` Image *VirtualHardDisk `json:"image,omitempty"` Caching CachingTypes `json:"caching,omitempty"` CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - DiskSizeGB *int `json:"diskSizeGB,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` } // DataDiskImage is contains the data disk images information. type DataDiskImage struct { - Lun *int `json:"lun,omitempty"` -} - -// DeleteOperationResult is the compute long running operation response. -type DeleteOperationResult struct { - OperationID *string `json:"operationId,omitempty"` - Status OperationStatus `json:"status,omitempty"` - StartTime *date.Time `json:"startTime,omitempty"` - EndTime *date.Time `json:"endTime,omitempty"` - Error *APIError `json:"error,omitempty"` + Lun *int32 `json:"lun,omitempty"` } // DiagnosticsProfile is describes a diagnostics profile. @@ -434,6 +428,7 @@ type DiskEncryptionSettings struct { DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` + Enabled *bool `json:"enabled,omitempty"` } // DiskInstanceView is the instance view of the disk. @@ -492,6 +487,31 @@ type ListUsagesResult struct { autorest.Response `json:"-"` Value *[]Usage `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ListUsagesResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ListUsagesResult) ListUsagesResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ListVirtualMachineExtensionImage is +type ListVirtualMachineExtensionImage struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineExtensionImage `json:"value,omitempty"` +} + +// ListVirtualMachineImageResource is +type ListVirtualMachineImageResource struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineImageResource `json:"value,omitempty"` } // LongRunningOperationProperties is compute-specific operation properties, @@ -500,17 +520,6 @@ Output *map[string]interface{} `json:"output,omitempty"` } -// LongRunningOperationResult is the Compute service response for long-running -// operations. -type LongRunningOperationResult struct { - OperationID *string `json:"operationId,omitempty"` - Status OperationStatusEnum `json:"status,omitempty"` - StartTime *date.Time `json:"startTime,omitempty"` - EndTime *date.Time `json:"endTime,omitempty"` - Properties *LongRunningOperationProperties `json:"properties,omitempty"` - Error *APIError `json:"error,omitempty"` -} - // NetworkInterfaceReference is describes a network interface reference. type NetworkInterfaceReference struct { ID *string `json:"id,omitempty"` @@ -537,7 +546,7 @@ Image *VirtualHardDisk `json:"image,omitempty"` Caching CachingTypes `json:"caching,omitempty"` CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - DiskSizeGB *int `json:"diskSizeGB,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` } // OSDiskImage is contains the os disk image information. @@ -585,7 +594,7 @@ type Sku struct { Name *string `json:"name,omitempty"` Tier *string `json:"tier,omitempty"` - Capacity *int32 `json:"capacity,omitempty"` + Capacity *int64 `json:"capacity,omitempty"` } // SSHConfiguration is sSH configuration for Linux based VMs running on Azure @@ -619,9 +628,9 @@ // Usage is describes Compute Resource Usage. type Usage struct { - Unit UsageUnit `json:"unit,omitempty"` - CurrentValue *int `json:"currentValue,omitempty"` - Limit *int32 `json:"limit,omitempty"` + Unit *string `json:"unit,omitempty"` + CurrentValue *int32 `json:"currentValue,omitempty"` + Limit *int64 `json:"limit,omitempty"` Name *UsageName `json:"name,omitempty"` } @@ -714,10 +723,11 @@ type VirtualMachineExtensionImage struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` - Properties *VirtualMachineExtensionImageProperties `json:"properties,omitempty"` Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` Location *string `json:"location,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` + Properties *VirtualMachineExtensionImageProperties `json:"properties,omitempty"` } // VirtualMachineExtensionImageProperties is describes the properties of a @@ -743,6 +753,7 @@ // VirtualMachineExtensionProperties is describes the properties of a Virtual // Machine Extension. type VirtualMachineExtensionProperties struct { + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` Publisher *string `json:"publisher,omitempty"` Type *string `json:"type,omitempty"` TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` @@ -757,10 +768,10 @@ type VirtualMachineImage struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` - Properties *VirtualMachineImageProperties `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Location *string `json:"location,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` + Properties *VirtualMachineImageProperties `json:"properties,omitempty"` } // VirtualMachineImageProperties is describes the properties of a Virtual @@ -779,16 +790,10 @@ Tags *map[string]*string `json:"tags,omitempty"` } -// VirtualMachineImageResourceList is -type VirtualMachineImageResourceList struct { - autorest.Response `json:"-"` - Value *[]VirtualMachineImageResource `json:"value,omitempty"` -} - // VirtualMachineInstanceView is the instance view of a virtual machine. type VirtualMachineInstanceView struct { - PlatformUpdateDomain *int `json:"platformUpdateDomain,omitempty"` - PlatformFaultDomain *int `json:"platformFaultDomain,omitempty"` + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` Disks *[]DiskInstanceView `json:"disks,omitempty"` @@ -826,6 +831,8 @@ AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"` + LicenseType *string `json:"licenseType,omitempty"` + VMID *string `json:"vmId,omitempty"` } // VirtualMachineScaleSet is describes a Virtual Machine Scale Set. @@ -892,8 +899,10 @@ // VirtualMachineScaleSetIPConfigurationProperties is describes a virtual // machine scale set network profile's IP configuration properties. type VirtualMachineScaleSetIPConfigurationProperties struct { - Subnet *APIEntityReference `json:"subnet,omitempty"` - LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + Subnet *APIEntityReference `json:"subnet,omitempty"` + ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"` } // VirtualMachineScaleSetListResult is the List Virtual Machine operation @@ -901,6 +910,19 @@ type VirtualMachineScaleSetListResult struct { autorest.Response `json:"-"` Value *[]VirtualMachineScaleSet `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetListResult) VirtualMachineScaleSetListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) } // VirtualMachineScaleSetListSkusResult is the Virtual Machine Scale Set List @@ -908,6 +930,19 @@ type VirtualMachineScaleSetListSkusResult struct { autorest.Response `json:"-"` Value *[]VirtualMachineScaleSetSku `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListSkusResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetListSkusResult) VirtualMachineScaleSetListSkusResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) } // VirtualMachineScaleSetListWithLinkResult is the List Virtual Machine @@ -980,6 +1015,7 @@ UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` + OverProvision *bool `json:"overProvision,omitempty"` } // VirtualMachineScaleSetSku is describes an available virtual machine scale @@ -992,9 +1028,9 @@ // VirtualMachineScaleSetSkuCapacity is describes scaling information of a sku. type VirtualMachineScaleSetSkuCapacity struct { - Minimum *int32 `json:"minimum,omitempty"` - Maximum *int32 `json:"maximum,omitempty"` - DefaultCapacity *int32 `json:"defaultCapacity,omitempty"` + Minimum *int64 `json:"minimum,omitempty"` + Maximum *int64 `json:"maximum,omitempty"` + DefaultCapacity *int64 `json:"defaultCapacity,omitempty"` ScaleType VirtualMachineScaleSetSkuScaleType `json:"scaleType,omitempty"` } @@ -1044,8 +1080,8 @@ // machine scale set VM. type VirtualMachineScaleSetVMInstanceView struct { autorest.Response `json:"-"` - PlatformUpdateDomain *int `json:"platformUpdateDomain,omitempty"` - PlatformFaultDomain *int `json:"platformFaultDomain,omitempty"` + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` Disks *[]DiskInstanceView `json:"disks,omitempty"` @@ -1059,6 +1095,19 @@ type VirtualMachineScaleSetVMListResult struct { autorest.Response `json:"-"` Value *[]VirtualMachineScaleSetVM `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetVMListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetVMListResult) VirtualMachineScaleSetVMListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) } // VirtualMachineScaleSetVMProfile is describes a virtual machine scale set @@ -1082,16 +1131,17 @@ DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` + LicenseType *string `json:"licenseType,omitempty"` } // VirtualMachineSize is describes the properties of a VM size. type VirtualMachineSize struct { Name *string `json:"name,omitempty"` - NumberOfCores *int `json:"numberOfCores,omitempty"` - OsDiskSizeInMB *int `json:"osDiskSizeInMB,omitempty"` - ResourceDiskSizeInMB *int `json:"resourceDiskSizeInMB,omitempty"` - MemoryInMB *int `json:"memoryInMB,omitempty"` - MaxDataDiskCount *int `json:"maxDataDiskCount,omitempty"` + NumberOfCores *int32 `json:"numberOfCores,omitempty"` + OsDiskSizeInMB *int32 `json:"osDiskSizeInMB,omitempty"` + ResourceDiskSizeInMB *int32 `json:"resourceDiskSizeInMB,omitempty"` + MemoryInMB *int32 `json:"memoryInMB,omitempty"` + MaxDataDiskCount *int32 `json:"maxDataDiskCount,omitempty"` } // VirtualMachineSizeListResult is the List Virtual Machine operation response. @@ -1104,7 +1154,7 @@ // machine scale set instance view status summary. type VirtualMachineStatusCodeCount struct { Code *string `json:"code,omitempty"` - Count *int `json:"count,omitempty"` + Count *int32 `json:"count,omitempty"` } // WindowsConfiguration is describes Windows Configuration of the OS Profile. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // UsageOperationsClient is the the Compute Management Client. @@ -44,21 +44,21 @@ // List lists compute usages for a subscription. // // location is the location upon which resource usage is queried. -func (client UsageOperationsClient) List(location string) (result ListUsagesResult, ae error) { +func (client UsageOperationsClient) List(location string) (result ListUsagesResult, err error) { req, err := client.ListPreparer(location) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure responding to request") } return @@ -67,27 +67,26 @@ // ListPreparer prepares the List request. func (client UsageOperationsClient) ListPreparer(location string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -96,9 +95,33 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } + +// ListNextResults retrieves the next set of results, if any. +func (client UsageOperationsClient) ListNextResults(lastResults ListUsagesResult) (result ListUsagesResult, err error) { + req, err := lastResults.ListUsagesResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -23,18 +23,18 @@ ) const ( - major = "0" - minor = "3" + major = "3" + minor = "1" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" ) // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "compute", "2015-06-15") + return fmt.Sprintf(userAgentFormat, Version(), "compute", "2016-03-30") } // Version returns the semantic version (see http://semver.org) of the client. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // VirtualMachineExtensionImagesClient is the the Compute Management Client. @@ -43,21 +43,21 @@ // Get gets a virtual machine extension image. // -func (client VirtualMachineExtensionImagesClient) Get(location string, publisherName string, typeParameter string, version string) (result VirtualMachineExtensionImage, ae error) { +func (client VirtualMachineExtensionImagesClient) Get(location string, publisherName string, typeParameter string, version string) (result VirtualMachineExtensionImage, err error) { req, err := client.GetPreparer(location, publisherName, typeParameter, version) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure responding to request") } return @@ -66,30 +66,29 @@ // GetPreparer prepares the Get request. func (client VirtualMachineExtensionImagesClient) GetPreparer(location string, publisherName string, typeParameter string, version string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "publisherName": url.QueryEscape(publisherName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "type": url.QueryEscape(typeParameter), - "version": url.QueryEscape(version), + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "type": autorest.Encode("path", typeParameter), + "version": autorest.Encode("path", version), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -98,7 +97,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -107,21 +106,21 @@ // ListTypes gets a list of virtual machine extension image types. // -func (client VirtualMachineExtensionImagesClient) ListTypes(location string, publisherName string) (result VirtualMachineImageResourceList, ae error) { +func (client VirtualMachineExtensionImagesClient) ListTypes(location string, publisherName string) (result ListVirtualMachineExtensionImage, err error) { req, err := client.ListTypesPreparer(location, publisherName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListTypes", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", nil, "Failure preparing request") } resp, err := client.ListTypesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListTypes", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure sending request") } result, err = client.ListTypesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListTypes", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure responding to request") } return @@ -130,37 +129,36 @@ // ListTypesPreparer prepares the ListTypes request. func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(location string, publisherName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "publisherName": url.QueryEscape(publisherName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListTypesSender sends the ListTypes request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListTypesResponder handles the response to the ListTypes request. The method always // closes the http.Response Body. -func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { +func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -170,70 +168,69 @@ // ListVersions gets a list of virtual machine extension image versions. // // filter is the filter to apply on the operation. -func (client VirtualMachineExtensionImagesClient) ListVersions(location string, publisherName string, typeParameter string, filter string, top *int, orderBy string) (result VirtualMachineImageResourceList, ae error) { - req, err := client.ListVersionsPreparer(location, publisherName, typeParameter, filter, top, orderBy) +func (client VirtualMachineExtensionImagesClient) ListVersions(location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (result ListVirtualMachineExtensionImage, err error) { + req, err := client.ListVersionsPreparer(location, publisherName, typeParameter, filter, top, orderby) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListVersions", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", nil, "Failure preparing request") } resp, err := client.ListVersionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListVersions", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure sending request") } result, err = client.ListVersionsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListVersions", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure responding to request") } return } // ListVersionsPreparer prepares the ListVersions request. -func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(location string, publisherName string, typeParameter string, filter string, top *int, orderBy string) (*http.Request, error) { +func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "publisherName": url.QueryEscape(publisherName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "type": url.QueryEscape(typeParameter), + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "type": autorest.Encode("path", typeParameter), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } if top != nil { - queryParameters["$top"] = top + queryParameters["$top"] = autorest.Encode("query", *top) } - if len(orderBy) > 0 { - queryParameters["$orderBy"] = orderBy + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListVersionsSender sends the ListVersions request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListVersionsResponder handles the response to the ListVersions request. The method always // closes the http.Response Body. -func (client VirtualMachineExtensionImagesClient) ListVersionsResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { +func (client VirtualMachineExtensionImagesClient) ListVersionsResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // VirtualMachineExtensionsClient is the the Compute Management Client. @@ -41,126 +41,134 @@ return VirtualMachineExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate the operation to create or update the extension. +// CreateOrUpdate the operation to create or update the extension. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine where the extension should be create or updated. // vmExtensionName is the name of the virtual machine extension. // extensionParameters is parameters supplied to the Create Virtual Machine // Extension operation. -func (client VirtualMachineExtensionsClient) CreateOrUpdate(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension) (result VirtualMachineExtension, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, vmExtensionName, extensionParameters) +func (client VirtualMachineExtensionsClient) CreateOrUpdate(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, vmExtensionName, extensionParameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension) (*http.Request, error) { +func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmExtensionName": url.QueryEscape(vmExtensionName), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", vmExtensionName), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters), autorest.WithJSON(extensionParameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) { +func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } -// Delete the operation to delete the extension. +// Delete the operation to delete the extension. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine where the extension should be deleted. vmExtensionName // is the name of the virtual machine extension. -func (client VirtualMachineExtensionsClient) Delete(resourceGroupName string, vmName string, vmExtensionName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, vmName, vmExtensionName) +func (client VirtualMachineExtensionsClient) Delete(resourceGroupName string, vmName string, vmExtensionName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, vmName, vmExtensionName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client VirtualMachineExtensionsClient) DeletePreparer(resourceGroupName string, vmName string, vmExtensionName string) (*http.Request, error) { +func (client VirtualMachineExtensionsClient) DeletePreparer(resourceGroupName string, vmName string, vmExtensionName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmExtensionName": url.QueryEscape(vmExtensionName), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", vmExtensionName), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusAccepted, http.StatusNoContent, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -169,7 +177,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -181,21 +189,21 @@ // the virtual machine containing the extension. vmExtensionName is the name // of the virtual machine extension. expand is the expand expression to apply // on the operation. -func (client VirtualMachineExtensionsClient) Get(resourceGroupName string, vmName string, vmExtensionName string, expand string) (result VirtualMachineExtension, ae error) { +func (client VirtualMachineExtensionsClient) Get(resourceGroupName string, vmName string, vmExtensionName string, expand string) (result VirtualMachineExtension, err error) { req, err := client.GetPreparer(resourceGroupName, vmName, vmExtensionName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure responding to request") } return @@ -204,32 +212,31 @@ // GetPreparer prepares the Get request. func (client VirtualMachineExtensionsClient) GetPreparer(resourceGroupName string, vmName string, vmExtensionName string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmExtensionName": url.QueryEscape(vmExtensionName), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", vmExtensionName), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(expand) > 0 { - queryParameters["$expand"] = expand + queryParameters["$expand"] = autorest.Encode("query", expand) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -238,7 +245,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // VirtualMachineImagesClient is the the Compute Management Client. @@ -43,21 +43,21 @@ // Get gets a virtual machine image. // -func (client VirtualMachineImagesClient) Get(location string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, ae error) { +func (client VirtualMachineImagesClient) Get(location string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, err error) { req, err := client.GetPreparer(location, publisherName, offer, skus, version) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure responding to request") } return @@ -66,31 +66,30 @@ // GetPreparer prepares the Get request. func (client VirtualMachineImagesClient) GetPreparer(location string, publisherName string, offer string, skus string, version string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "offer": url.QueryEscape(offer), - "publisherName": url.QueryEscape(publisherName), - "skus": url.QueryEscape(skus), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "version": url.QueryEscape(version), + "location": autorest.Encode("path", location), + "offer": autorest.Encode("path", offer), + "publisherName": autorest.Encode("path", publisherName), + "skus": autorest.Encode("path", skus), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "version": autorest.Encode("path", version), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -99,7 +98,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -109,71 +108,70 @@ // List gets a list of virtual machine images. // // filter is the filter to apply on the operation. -func (client VirtualMachineImagesClient) List(location string, publisherName string, offer string, skus string, filter string, top *int, orderby string) (result VirtualMachineImageResourceList, ae error) { +func (client VirtualMachineImagesClient) List(location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (result ListVirtualMachineImageResource, err error) { req, err := client.ListPreparer(location, publisherName, offer, skus, filter, top, orderby) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure responding to request") } return } // ListPreparer prepares the List request. -func (client VirtualMachineImagesClient) ListPreparer(location string, publisherName string, offer string, skus string, filter string, top *int, orderby string) (*http.Request, error) { +func (client VirtualMachineImagesClient) ListPreparer(location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "offer": url.QueryEscape(offer), - "publisherName": url.QueryEscape(publisherName), - "skus": url.QueryEscape(skus), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "location": autorest.Encode("path", location), + "offer": autorest.Encode("path", offer), + "publisherName": autorest.Encode("path", publisherName), + "skus": autorest.Encode("path", skus), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } if top != nil { - queryParameters["$top"] = top + queryParameters["$top"] = autorest.Encode("query", *top) } if len(orderby) > 0 { - queryParameters["$orderby"] = orderby + queryParameters["$orderby"] = autorest.Encode("query", orderby) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always // closes the http.Response Body. -func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { +func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -182,21 +180,21 @@ // ListOffers gets a list of virtual machine image offers. // -func (client VirtualMachineImagesClient) ListOffers(location string, publisherName string) (result VirtualMachineImageResourceList, ae error) { +func (client VirtualMachineImagesClient) ListOffers(location string, publisherName string) (result ListVirtualMachineImageResource, err error) { req, err := client.ListOffersPreparer(location, publisherName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListOffers", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", nil, "Failure preparing request") } resp, err := client.ListOffersSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListOffers", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure sending request") } result, err = client.ListOffersResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListOffers", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure responding to request") } return @@ -205,37 +203,36 @@ // ListOffersPreparer prepares the ListOffers request. func (client VirtualMachineImagesClient) ListOffersPreparer(location string, publisherName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "publisherName": url.QueryEscape(publisherName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListOffersSender sends the ListOffers request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListOffersResponder handles the response to the ListOffers request. The method always // closes the http.Response Body. -func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { +func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -244,21 +241,21 @@ // ListPublishers gets a list of virtual machine image publishers. // -func (client VirtualMachineImagesClient) ListPublishers(location string) (result VirtualMachineImageResourceList, ae error) { +func (client VirtualMachineImagesClient) ListPublishers(location string) (result ListVirtualMachineImageResource, err error) { req, err := client.ListPublishersPreparer(location) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListPublishers", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", nil, "Failure preparing request") } resp, err := client.ListPublishersSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListPublishers", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure sending request") } result, err = client.ListPublishersResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListPublishers", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure responding to request") } return @@ -267,36 +264,35 @@ // ListPublishersPreparer prepares the ListPublishers request. func (client VirtualMachineImagesClient) ListPublishersPreparer(location string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListPublishersSender sends the ListPublishers request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListPublishersResponder handles the response to the ListPublishers request. The method always // closes the http.Response Body. -func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { +func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -305,21 +301,21 @@ // ListSkus gets a list of virtual machine image skus. // -func (client VirtualMachineImagesClient) ListSkus(location string, publisherName string, offer string) (result VirtualMachineImageResourceList, ae error) { +func (client VirtualMachineImagesClient) ListSkus(location string, publisherName string, offer string) (result ListVirtualMachineImageResource, err error) { req, err := client.ListSkusPreparer(location, publisherName, offer) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListSkus", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", nil, "Failure preparing request") } resp, err := client.ListSkusSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListSkus", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure sending request") } result, err = client.ListSkusResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListSkus", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure responding to request") } return @@ -328,38 +324,37 @@ // ListSkusPreparer prepares the ListSkus request. func (client VirtualMachineImagesClient) ListSkusPreparer(location string, publisherName string, offer string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "offer": url.QueryEscape(offer), - "publisherName": url.QueryEscape(publisherName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "location": autorest.Encode("path", location), + "offer": autorest.Encode("path", offer), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSkusSender sends the ListSkus request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSkusResponder handles the response to the ListSkus request. The method always // closes the http.Response Body. -func (client VirtualMachineImagesClient) ListSkusResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { +func (client VirtualMachineImagesClient) ListSkusResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // VirtualMachineScaleSetsClient is the the Compute Management Client. @@ -41,129 +41,138 @@ return VirtualMachineScaleSetsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate the operation to create or update a virtual machine scale -// set. +// CreateOrUpdate allows you to create or update a virtual machine scale set +// by providing parameters or a path to pre-configured parameter file. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. name is parameters // supplied to the Create Virtual Machine Scale Set operation. parameters is // parameters supplied to the Create Virtual Machine Scale Set operation. -func (client VirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters VirtualMachineScaleSet) (result VirtualMachineScaleSet, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters) +func (client VirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters VirtualMachineScaleSet) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{name}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { +func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } -// Deallocate the operation to deallocate virtual machines in a virtual -// machine scale set. +// Deallocate allows you to deallocate virtual machines in a virtual machine +// scale set. Shuts down the virtual machines and releases the compute +// resources. You are not billed for the compute resources that this virtual +// machine scale set uses. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be +// used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. vmInstanceIDs is the list of // virtual machine scale set instance IDs. -func (client VirtualMachineScaleSetsClient) Deallocate(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result autorest.Response, ae error) { - req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) +func (client VirtualMachineScaleSetsClient) Deallocate(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Deallocate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", nil, "Failure preparing request") } resp, err := client.DeallocateSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Deallocate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", resp, "Failure sending request") } result, err = client.DeallocateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Deallocate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", resp, "Failure responding to request") } return } // DeallocatePreparer prepares the Deallocate request. -func (client VirtualMachineScaleSetsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/deallocate"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/deallocate", pathParameters), autorest.WithQueryParameters(queryParameters)) if vmInstanceIDs != nil { preparer = autorest.DecoratePreparer(preparer, autorest.WithJSON(vmInstanceIDs)) } - return preparer.Prepare(&http.Request{}) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeallocateSender sends the Deallocate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeallocateResponder handles the response to the Deallocate request. The method always @@ -172,61 +181,65 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Delete the operation to delete a virtual machine scale set. +// Delete allows you to delete a virtual machine scale set. This method may +// poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. -func (client VirtualMachineScaleSetsClient) Delete(resourceGroupName string, vmScaleSetName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName) +func (client VirtualMachineScaleSetsClient) Delete(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client VirtualMachineScaleSetsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusAccepted, http.StatusOK, http.StatusNoContent) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -235,64 +248,68 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } -// DeleteInstances the operation to delete virtual machines in a virtual -// machine scale set. +// DeleteInstances allows you to delete virtual machines in a virtual machine +// scale set. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. vmInstanceIDs is the list of // virtual machine scale set instance IDs. -func (client VirtualMachineScaleSetsClient) DeleteInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (result autorest.Response, ae error) { - req, err := client.DeleteInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) +func (client VirtualMachineScaleSetsClient) DeleteInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeleteInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "DeleteInstances", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", nil, "Failure preparing request") } resp, err := client.DeleteInstancesSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "DeleteInstances", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", resp, "Failure sending request") } result, err = client.DeleteInstancesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "DeleteInstances", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", resp, "Failure responding to request") } return } // DeleteInstancesPreparer prepares the DeleteInstances request. -func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/delete"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/delete", pathParameters), autorest.WithJSON(vmInstanceIDs), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteInstancesSender sends the DeleteInstances request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteInstancesResponder handles the response to the DeleteInstances request. The method always @@ -301,31 +318,31 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Get the operation to get a virtual machine scale set. +// Get display information about a virtual machine scale set. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. -func (client VirtualMachineScaleSetsClient) Get(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSet, ae error) { +func (client VirtualMachineScaleSetsClient) Get(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSet, err error) { req, err := client.GetPreparer(resourceGroupName, vmScaleSetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure responding to request") } return @@ -334,28 +351,27 @@ // GetPreparer prepares the Get request. func (client VirtualMachineScaleSetsClient) GetPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -364,33 +380,32 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// GetInstanceView the operation to get a virtual machine scale set instance -// view. +// GetInstanceView displays status of a virtual machine scale set instance. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. -func (client VirtualMachineScaleSetsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetInstanceView, ae error) { +func (client VirtualMachineScaleSetsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetInstanceView, err error) { req, err := client.GetInstanceViewPreparer(resourceGroupName, vmScaleSetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "GetInstanceView", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", nil, "Failure preparing request") } resp, err := client.GetInstanceViewSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "GetInstanceView", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure sending request") } result, err = client.GetInstanceViewResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "GetInstanceView", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure responding to request") } return @@ -399,28 +414,27 @@ // GetInstanceViewPreparer prepares the GetInstanceView request. func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/instanceView"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/instanceView", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetInstanceViewSender sends the GetInstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetInstanceViewResponder handles the response to the GetInstanceView request. The method always @@ -429,32 +443,31 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// List the operation to list virtual machine scale sets under a resource -// group. +// List lists all virtual machine scale sets under a resource group. // // resourceGroupName is the name of the resource group. -func (client VirtualMachineScaleSetsClient) List(resourceGroupName string) (result VirtualMachineScaleSetListResult, ae error) { +func (client VirtualMachineScaleSetsClient) List(resourceGroupName string) (result VirtualMachineScaleSetListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure responding to request") } return @@ -463,27 +476,26 @@ // ListPreparer prepares the List request. func (client VirtualMachineScaleSetsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -492,32 +504,56 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListAll gets the list of Virtual Machine Scale Sets in the subscription. -// Use nextLink property in the response to get the next page of Virtual -// Machine Scale Sets. Do this till nextLink is not null to fetch all the -// Virtual Machine Scale Sets. -func (client VirtualMachineScaleSetsClient) ListAll() (result VirtualMachineScaleSetListWithLinkResult, ae error) { +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetsClient) ListNextResults(lastResults VirtualMachineScaleSetListResult) (result VirtualMachineScaleSetListResult, err error) { + req, err := lastResults.VirtualMachineScaleSetListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListAll lists all Virtual Machine Scale Sets in the subscription. Use +// nextLink property in the response to get the next page of Virtual Machine +// Scale Sets. Do this till nextLink is not null to fetch all the Virtual +// Machine Scale Sets. +func (client VirtualMachineScaleSetsClient) ListAll() (result VirtualMachineScaleSetListWithLinkResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", nil, "Failure preparing request") } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure sending request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure responding to request") } return @@ -526,26 +562,25 @@ // ListAllPreparer prepares the ListAll request. func (client VirtualMachineScaleSetsClient) ListAllPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAllResponder handles the response to the ListAll request. The method always @@ -554,7 +589,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -562,10 +597,10 @@ } // ListAllNextResults retrieves the next set of results, if any. -func (client VirtualMachineScaleSetsClient) ListAllNextResults(lastResults VirtualMachineScaleSetListWithLinkResult) (result VirtualMachineScaleSetListWithLinkResult, ae error) { +func (client VirtualMachineScaleSetsClient) ListAllNextResults(lastResults VirtualMachineScaleSetListWithLinkResult) (result VirtualMachineScaleSetListWithLinkResult, err error) { req, err := lastResults.VirtualMachineScaleSetListWithLinkResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", nil, "Failure preparing next results request request") } if req == nil { return @@ -574,37 +609,38 @@ resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure sending next results request request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListAll", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure responding to next results request request") } return } -// ListSkus the operation to list available skus for a virtual machine scale -// set. +// ListSkus displays available skus for your virtual machine scale set +// including the minimum and maximum vm instances allowed for a particular +// sku. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. -func (client VirtualMachineScaleSetsClient) ListSkus(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetListSkusResult, ae error) { +func (client VirtualMachineScaleSetsClient) ListSkus(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetListSkusResult, err error) { req, err := client.ListSkusPreparer(resourceGroupName, vmScaleSetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListSkus", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", nil, "Failure preparing request") } resp, err := client.ListSkusSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListSkus", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure sending request") } result, err = client.ListSkusResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "ListSkus", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure responding to request") } return @@ -613,28 +649,27 @@ // ListSkusPreparer prepares the ListSkus request. func (client VirtualMachineScaleSetsClient) ListSkusPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/skus"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/skus", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSkusSender sends the ListSkus request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListSkusSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSkusResponder handles the response to the ListSkus request. The method always @@ -643,69 +678,98 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// PowerOff the operation to power off (stop) virtual machines in a virtual -// machine scale set. +// ListSkusNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetsClient) ListSkusNextResults(lastResults VirtualMachineScaleSetListSkusResult) (result VirtualMachineScaleSetListSkusResult, err error) { + req, err := lastResults.VirtualMachineScaleSetListSkusResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSkusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure sending next results request request") + } + + result, err = client.ListSkusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure responding to next results request request") + } + + return +} + +// PowerOff allows you to power off (stop) virtual machines in a virtual +// machine scale set. Note that resources are still attached and you are +// getting charged for the resources. Use deallocate to release resources. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. vmInstanceIDs is the list of // virtual machine scale set instance IDs. -func (client VirtualMachineScaleSetsClient) PowerOff(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result autorest.Response, ae error) { - req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) +func (client VirtualMachineScaleSetsClient) PowerOff(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "PowerOff", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", nil, "Failure preparing request") } resp, err := client.PowerOffSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "PowerOff", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", resp, "Failure sending request") } result, err = client.PowerOffResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "PowerOff", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", resp, "Failure responding to request") } return } // PowerOffPreparer prepares the PowerOff request. -func (client VirtualMachineScaleSetsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/poweroff"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/poweroff", pathParameters), autorest.WithQueryParameters(queryParameters)) if vmInstanceIDs != nil { preparer = autorest.DecoratePreparer(preparer, autorest.WithJSON(vmInstanceIDs)) } - return preparer.Prepare(&http.Request{}) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // PowerOffSender sends the PowerOff request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // PowerOffResponder handles the response to the PowerOff request. The method always @@ -714,68 +778,139 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Reimage allows you to re-image(update the version of the installed +// operating system) virtual machines in a virtual machine scale set. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. +func (client VirtualMachineScaleSetsClient) Reimage(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ReimagePreparer(resourceGroupName, vmScaleSetName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", nil, "Failure preparing request") + } + + resp, err := client.ReimageSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", resp, "Failure sending request") + } + + result, err = client.ReimageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", resp, "Failure responding to request") + } + + return +} + +// ReimagePreparer prepares the Reimage request. +func (client VirtualMachineScaleSetsClient) ReimagePreparer(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimage", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ReimageSender sends the Reimage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ReimageResponder handles the response to the Reimage request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Restart the operation to restart virtual machines in a virtual machine -// scale set. +// Restart allows you to restart virtual machines in a virtual machine scale +// set. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. vmInstanceIDs is the list of // virtual machine scale set instance IDs. -func (client VirtualMachineScaleSetsClient) Restart(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result autorest.Response, ae error) { - req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) +func (client VirtualMachineScaleSetsClient) Restart(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Restart", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", nil, "Failure preparing request") } resp, err := client.RestartSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Restart", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", resp, "Failure sending request") } result, err = client.RestartResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Restart", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", resp, "Failure responding to request") } return } // RestartPreparer prepares the Restart request. -func (client VirtualMachineScaleSetsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/restart"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/restart", pathParameters), autorest.WithQueryParameters(queryParameters)) if vmInstanceIDs != nil { preparer = autorest.DecoratePreparer(preparer, autorest.WithJSON(vmInstanceIDs)) } - return preparer.Prepare(&http.Request{}) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // RestartSender sends the Restart request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // RestartResponder handles the response to the Restart request. The method always @@ -784,68 +919,71 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Start the operation to start virtual machines in a virtual machine scale -// set. +// Start allows you to start virtual machines in a virtual machine scale set. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. vmInstanceIDs is the list of // virtual machine scale set instance IDs. -func (client VirtualMachineScaleSetsClient) Start(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result autorest.Response, ae error) { - req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) +func (client VirtualMachineScaleSetsClient) Start(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Start", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", nil, "Failure preparing request") } resp, err := client.StartSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Start", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", resp, "Failure sending request") } result, err = client.StartResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "Start", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", resp, "Failure responding to request") } return } // StartPreparer prepares the Start request. -func (client VirtualMachineScaleSetsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/start"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/start", pathParameters), autorest.WithQueryParameters(queryParameters)) if vmInstanceIDs != nil { preparer = autorest.DecoratePreparer(preparer, autorest.WithJSON(vmInstanceIDs)) } - return preparer.Prepare(&http.Request{}) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // StartSender sends the Start request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // StartResponder handles the response to the Start request. The method always @@ -854,64 +992,68 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// UpdateInstances the operation to manually upgrade virtual machines in a -// virtual machine scale set. +// UpdateInstances allows you to manually upgrade virtual machines in a +// virtual machine scale set. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. vmInstanceIDs is the list of // virtual machine scale set instance IDs. -func (client VirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (result autorest.Response, ae error) { - req, err := client.UpdateInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs) +func (client VirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.UpdateInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "UpdateInstances", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", nil, "Failure preparing request") } resp, err := client.UpdateInstancesSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "UpdateInstances", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", resp, "Failure sending request") } result, err = client.UpdateInstancesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetsClient", "UpdateInstances", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", resp, "Failure responding to request") } return } // UpdateInstancesPreparer prepares the UpdateInstances request. -func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/manualupgrade"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/manualupgrade", pathParameters), autorest.WithJSON(vmInstanceIDs), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // UpdateInstancesSender sends the UpdateInstances request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // UpdateInstancesResponder handles the response to the UpdateInstances request. The method always @@ -920,7 +1062,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // VirtualMachineScaleSetVMsClient is the the Compute Management Client. @@ -41,57 +41,63 @@ return VirtualMachineScaleSetVMsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// Deallocate the operation to deallocate a virtual machine scale set. +// Deallocate allows you to deallocate a virtual machine scale set virtual +// machine. Shuts down the virtual machine and releases the compute +// resources. You are not billed for the compute resources that this virtual +// machine uses. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. instanceID is the instance id of // the virtual machine. -func (client VirtualMachineScaleSetVMsClient) Deallocate(resourceGroupName string, vmScaleSetName string, instanceID string) (result autorest.Response, ae error) { - req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, instanceID) +func (client VirtualMachineScaleSetVMsClient) Deallocate(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Deallocate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", nil, "Failure preparing request") } resp, err := client.DeallocateSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Deallocate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", resp, "Failure sending request") } result, err = client.DeallocateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Deallocate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", resp, "Failure responding to request") } return } // DeallocatePreparer prepares the Deallocate request. -func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "instanceId": url.QueryEscape(instanceID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeallocateSender sends the Deallocate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeallocateResponder handles the response to the Deallocate request. The method always @@ -100,63 +106,67 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Delete the operation to delete a virtual machine scale set. +// Delete allows you to delete a virtual machine scale set. This method may +// poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. instanceID is the instance id of // the virtual machine. -func (client VirtualMachineScaleSetVMsClient) Delete(resourceGroupName string, vmScaleSetName string, instanceID string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName, instanceID) +func (client VirtualMachineScaleSetVMsClient) Delete(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client VirtualMachineScaleSetVMsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "instanceId": url.QueryEscape(instanceID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -165,32 +175,32 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } -// Get the operation to get a virtual machine scale set virtual machine. +// Get displays information about a virtual machine scale set virtual machine. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. instanceID is the instance id of // the virtual machine. -func (client VirtualMachineScaleSetVMsClient) Get(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, ae error) { +func (client VirtualMachineScaleSetVMsClient) Get(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, err error) { req, err := client.GetPreparer(resourceGroupName, vmScaleSetName, instanceID) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure responding to request") } return @@ -199,29 +209,28 @@ // GetPreparer prepares the Get request. func (client VirtualMachineScaleSetVMsClient) GetPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "instanceId": url.QueryEscape(instanceID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -230,34 +239,34 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// GetInstanceView the operation to get a virtual machine scale set virtual +// GetInstanceView displays the status of a virtual machine scale set virtual // machine. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. instanceID is the instance id of // the virtual machine. -func (client VirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMInstanceView, ae error) { +func (client VirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMInstanceView, err error) { req, err := client.GetInstanceViewPreparer(resourceGroupName, vmScaleSetName, instanceID) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "GetInstanceView", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", nil, "Failure preparing request") } resp, err := client.GetInstanceViewSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "GetInstanceView", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure sending request") } result, err = client.GetInstanceViewResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "GetInstanceView", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure responding to request") } return @@ -266,29 +275,28 @@ // GetInstanceViewPreparer prepares the GetInstanceView request. func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "instanceId": url.QueryEscape(instanceID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetInstanceViewSender sends the GetInstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetInstanceViewResponder handles the response to the GetInstanceView request. The method always @@ -297,35 +305,35 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// List the operation to list virtual machine scale sets VMs. +// List lists all virtual machines in a VM scale sets. // // resourceGroupName is the name of the resource group. // virtualMachineScaleSetName is the name of the virtual machine scale set. // filter is the filter to apply on the operation. selectParameter is the // list parameters. expand is the expand expression to apply on the // operation. -func (client VirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResult, ae error) { +func (client VirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResult, err error) { req, err := client.ListPreparer(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure responding to request") } return @@ -334,37 +342,36 @@ // ListPreparer prepares the List request. func (client VirtualMachineScaleSetVMsClient) ListPreparer(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualMachineScaleSetName": url.QueryEscape(virtualMachineScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } if len(selectParameter) > 0 { - queryParameters["$select"] = selectParameter + queryParameters["$select"] = autorest.Encode("query", selectParameter) } if len(expand) > 0 { - queryParameters["$expand"] = expand + queryParameters["$expand"] = autorest.Encode("query", expand) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -373,64 +380,92 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// PowerOff the operation to power off (stop) a virtual machine scale set. +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetVMsClient) ListNextResults(lastResults VirtualMachineScaleSetVMListResult) (result VirtualMachineScaleSetVMListResult, err error) { + req, err := lastResults.VirtualMachineScaleSetVMListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// PowerOff allows you to power off (stop) a virtual machine in a VM scale +// set. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. instanceID is the instance id of // the virtual machine. -func (client VirtualMachineScaleSetVMsClient) PowerOff(resourceGroupName string, vmScaleSetName string, instanceID string) (result autorest.Response, ae error) { - req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, instanceID) +func (client VirtualMachineScaleSetVMsClient) PowerOff(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "PowerOff", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", nil, "Failure preparing request") } resp, err := client.PowerOffSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "PowerOff", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", resp, "Failure sending request") } result, err = client.PowerOffResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "PowerOff", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", resp, "Failure responding to request") } return } // PowerOffPreparer prepares the PowerOff request. -func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "instanceId": url.QueryEscape(instanceID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // PowerOffSender sends the PowerOff request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // PowerOffResponder handles the response to the PowerOff request. The method always @@ -439,63 +474,137 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Reimage allows you to re-image(update the version of the installed +// operating system) a virtual machine scale set instance. This method may +// poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the virtual machine scale set. instanceID is the instance id of +// the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Reimage(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ReimagePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", nil, "Failure preparing request") + } + + resp, err := client.ReimageSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", resp, "Failure sending request") + } + + result, err = client.ReimageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", resp, "Failure responding to request") + } + + return +} + +// ReimagePreparer prepares the Reimage request. +func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ReimageSender sends the Reimage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ReimageResponder handles the response to the Reimage request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Restart the operation to restart a virtual machine scale set. +// Restart allows you to restart a virtual machine in a VM scale set. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. instanceID is the instance id of // the virtual machine. -func (client VirtualMachineScaleSetVMsClient) Restart(resourceGroupName string, vmScaleSetName string, instanceID string) (result autorest.Response, ae error) { - req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, instanceID) +func (client VirtualMachineScaleSetVMsClient) Restart(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Restart", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", nil, "Failure preparing request") } resp, err := client.RestartSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Restart", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", resp, "Failure sending request") } result, err = client.RestartResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Restart", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", resp, "Failure responding to request") } return } // RestartPreparer prepares the Restart request. -func (client VirtualMachineScaleSetVMsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "instanceId": url.QueryEscape(instanceID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // RestartSender sends the Restart request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // RestartResponder handles the response to the Restart request. The method always @@ -504,63 +613,67 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Start the operation to start a virtual machine scale set. +// Start allows you to start a virtual machine in a VM scale set. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. instanceID is the instance id of // the virtual machine. -func (client VirtualMachineScaleSetVMsClient) Start(resourceGroupName string, vmScaleSetName string, instanceID string) (result autorest.Response, ae error) { - req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, instanceID) +func (client VirtualMachineScaleSetVMsClient) Start(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Start", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", nil, "Failure preparing request") } resp, err := client.StartSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Start", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", resp, "Failure sending request") } result, err = client.StartResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineScaleSetVMsClient", "Start", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", resp, "Failure responding to request") } return } // StartPreparer prepares the Start request. -func (client VirtualMachineScaleSetVMsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "instanceId": url.QueryEscape(instanceID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmScaleSetName": url.QueryEscape(vmScaleSetName), + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // StartSender sends the Start request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // StartResponder handles the response to the Start request. The method always @@ -569,7 +682,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // VirtualMachinesClient is the the Compute Management Client. @@ -41,190 +41,201 @@ return VirtualMachinesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// Capture captures the VM by copying VirtualHardDisks of the VM and outputs a -// template that can be used to create similar VMs. +// Capture captures the VM by copying virtual hard disks of the VM and outputs +// a template that can be used to create similar VMs. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine. parameters is parameters supplied to the Capture // Virtual Machine operation. -func (client VirtualMachinesClient) Capture(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters) (result VirtualMachineCaptureResult, ae error) { - req, err := client.CapturePreparer(resourceGroupName, vmName, parameters) +func (client VirtualMachinesClient) Capture(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CapturePreparer(resourceGroupName, vmName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Capture", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", nil, "Failure preparing request") } resp, err := client.CaptureSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Capture", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", resp, "Failure sending request") } result, err = client.CaptureResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Capture", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", resp, "Failure responding to request") } return } // CapturePreparer prepares the Capture request. -func (client VirtualMachinesClient) CapturePreparer(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters) (*http.Request, error) { +func (client VirtualMachinesClient) CapturePreparer(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CaptureSender sends the Capture request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) CaptureSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CaptureResponder handles the response to the Capture request. The method always // closes the http.Response Body. -func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (result VirtualMachineCaptureResult, err error) { +func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } -// CreateOrUpdate the operation to create or update a virtual machine. +// CreateOrUpdate the operation to create or update a virtual machine. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine. parameters is parameters supplied to the Create // Virtual Machine operation. -func (client VirtualMachinesClient) CreateOrUpdate(resourceGroupName string, vmName string, parameters VirtualMachine) (result VirtualMachine, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, parameters) +func (client VirtualMachinesClient) CreateOrUpdate(resourceGroupName string, vmName string, parameters VirtualMachine, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client VirtualMachinesClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, parameters VirtualMachine) (*http.Request, error) { +func (client VirtualMachinesClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, parameters VirtualMachine, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachine, err error) { +func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Deallocate shuts down the Virtual Machine and releases the compute // resources. You are not billed for the compute resources that this Virtual -// Machine uses. +// Machine uses. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine. -func (client VirtualMachinesClient) Deallocate(resourceGroupName string, vmName string) (result autorest.Response, ae error) { - req, err := client.DeallocatePreparer(resourceGroupName, vmName) +func (client VirtualMachinesClient) Deallocate(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeallocatePreparer(resourceGroupName, vmName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Deallocate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", nil, "Failure preparing request") } resp, err := client.DeallocateSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Deallocate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", resp, "Failure sending request") } result, err = client.DeallocateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Deallocate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", resp, "Failure responding to request") } return } // DeallocatePreparer prepares the Deallocate request. -func (client VirtualMachinesClient) DeallocatePreparer(resourceGroupName string, vmName string) (*http.Request, error) { +func (client VirtualMachinesClient) DeallocatePreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeallocateSender sends the Deallocate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeallocateResponder handles the response to the Deallocate request. The method always @@ -233,61 +244,65 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Delete the operation to delete a virtual machine. +// Delete the operation to delete a virtual machine. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine. -func (client VirtualMachinesClient) Delete(resourceGroupName string, vmName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, vmName) +func (client VirtualMachinesClient) Delete(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, vmName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client VirtualMachinesClient) DeletePreparer(resourceGroupName string, vmName string) (*http.Request, error) { +func (client VirtualMachinesClient) DeletePreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -296,7 +311,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -306,21 +321,21 @@ // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine. -func (client VirtualMachinesClient) Generalize(resourceGroupName string, vmName string) (result autorest.Response, ae error) { +func (client VirtualMachinesClient) Generalize(resourceGroupName string, vmName string) (result autorest.Response, err error) { req, err := client.GeneralizePreparer(resourceGroupName, vmName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Generalize", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", nil, "Failure preparing request") } resp, err := client.GeneralizeSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Generalize", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure sending request") } result, err = client.GeneralizeResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Generalize", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure responding to request") } return @@ -329,28 +344,27 @@ // GeneralizePreparer prepares the Generalize request. func (client VirtualMachinesClient) GeneralizePreparer(resourceGroupName string, vmName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GeneralizeSender sends the Generalize request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GeneralizeResponder handles the response to the Generalize request. The method always @@ -359,7 +373,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) result.Response = resp return @@ -369,55 +383,54 @@ // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine. expand is the expand expression to apply on the -// operation. -func (client VirtualMachinesClient) Get(resourceGroupName string, vmName string, expand string) (result VirtualMachine, ae error) { +// operation. Possible values include: 'instanceView' +func (client VirtualMachinesClient) Get(resourceGroupName string, vmName string, expand InstanceViewTypes) (result VirtualMachine, err error) { req, err := client.GetPreparer(resourceGroupName, vmName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. -func (client VirtualMachinesClient) GetPreparer(resourceGroupName string, vmName string, expand string) (*http.Request, error) { +func (client VirtualMachinesClient) GetPreparer(resourceGroupName string, vmName string, expand InstanceViewTypes) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - if len(expand) > 0 { - queryParameters["$expand"] = expand + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -426,7 +439,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -436,21 +449,21 @@ // List the operation to list virtual machines under a resource group. // // resourceGroupName is the name of the resource group. -func (client VirtualMachinesClient) List(resourceGroupName string) (result VirtualMachineListResult, ae error) { +func (client VirtualMachinesClient) List(resourceGroupName string) (result VirtualMachineListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure responding to request") } return @@ -459,27 +472,26 @@ // ListPreparer prepares the List request. func (client VirtualMachinesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -488,7 +500,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -496,10 +508,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client VirtualMachinesClient) ListNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, ae error) { +func (client VirtualMachinesClient) ListNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) { req, err := lastResults.VirtualMachineListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -508,12 +520,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure responding to next results request request") } return @@ -522,21 +534,21 @@ // ListAll gets the list of Virtual Machines in the subscription. Use nextLink // property in the response to get the next page of Virtual Machines. Do this // till nextLink is not null to fetch all the Virtual Machines. -func (client VirtualMachinesClient) ListAll() (result VirtualMachineListResult, ae error) { +func (client VirtualMachinesClient) ListAll() (result VirtualMachineListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing request") } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure sending request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure responding to request") } return @@ -545,26 +557,25 @@ // ListAllPreparer prepares the ListAll request. func (client VirtualMachinesClient) ListAllPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAllResponder handles the response to the ListAll request. The method always @@ -573,7 +584,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -581,10 +592,10 @@ } // ListAllNextResults retrieves the next set of results, if any. -func (client VirtualMachinesClient) ListAllNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, ae error) { +func (client VirtualMachinesClient) ListAllNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) { req, err := lastResults.VirtualMachineListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing next results request request") } if req == nil { return @@ -593,37 +604,37 @@ resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure sending next results request request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure responding to next results request request") } return } -// ListAvailableSizes lists virtual-machine-sizes available to be used for a -// virtual machine. +// ListAvailableSizes lists all available virtual machine sizes it can be +// resized to for a virtual machine. // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine. -func (client VirtualMachinesClient) ListAvailableSizes(resourceGroupName string, vmName string) (result VirtualMachineSizeListResult, ae error) { +func (client VirtualMachinesClient) ListAvailableSizes(resourceGroupName string, vmName string) (result VirtualMachineSizeListResult, err error) { req, err := client.ListAvailableSizesPreparer(resourceGroupName, vmName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", nil, "Failure preparing request") } resp, err := client.ListAvailableSizesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure sending request") } result, err = client.ListAvailableSizesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure responding to request") } return @@ -632,28 +643,27 @@ // ListAvailableSizesPreparer prepares the ListAvailableSizes request. func (client VirtualMachinesClient) ListAvailableSizesPreparer(resourceGroupName string, vmName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always @@ -662,62 +672,66 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// PowerOff the operation to power off (stop) a virtual machine. +// PowerOff the operation to power off (stop) a virtual machine. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine. -func (client VirtualMachinesClient) PowerOff(resourceGroupName string, vmName string) (result autorest.Response, ae error) { - req, err := client.PowerOffPreparer(resourceGroupName, vmName) +func (client VirtualMachinesClient) PowerOff(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.PowerOffPreparer(resourceGroupName, vmName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "PowerOff", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", nil, "Failure preparing request") } resp, err := client.PowerOffSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "PowerOff", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", resp, "Failure sending request") } result, err = client.PowerOffResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "PowerOff", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", resp, "Failure responding to request") } return } // PowerOffPreparer prepares the PowerOff request. -func (client VirtualMachinesClient) PowerOffPreparer(resourceGroupName string, vmName string) (*http.Request, error) { +func (client VirtualMachinesClient) PowerOffPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // PowerOffSender sends the PowerOff request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // PowerOffResponder handles the response to the PowerOff request. The method always @@ -726,61 +740,132 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Restart the operation to restart a virtual machine. +// Redeploy the operation to redeploy a virtual machine. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine. -func (client VirtualMachinesClient) Restart(resourceGroupName string, vmName string) (result autorest.Response, ae error) { - req, err := client.RestartPreparer(resourceGroupName, vmName) +func (client VirtualMachinesClient) Redeploy(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RedeployPreparer(resourceGroupName, vmName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", nil, "Failure preparing request") + } + + resp, err := client.RedeploySender(req) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Restart", "Failure preparing request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", resp, "Failure sending request") + } + + result, err = client.RedeployResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", resp, "Failure responding to request") + } + + return +} + +// RedeployPreparer prepares the Redeploy request. +func (client VirtualMachinesClient) RedeployPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RedeploySender sends the Redeploy request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) RedeploySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RedeployResponder handles the response to the Redeploy request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) RedeployResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Restart the operation to restart a virtual machine. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Restart(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RestartPreparer(resourceGroupName, vmName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", nil, "Failure preparing request") } resp, err := client.RestartSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Restart", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", resp, "Failure sending request") } result, err = client.RestartResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Restart", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", resp, "Failure responding to request") } return } // RestartPreparer prepares the Restart request. -func (client VirtualMachinesClient) RestartPreparer(resourceGroupName string, vmName string) (*http.Request, error) { +func (client VirtualMachinesClient) RestartPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // RestartSender sends the Restart request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) RestartSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // RestartResponder handles the response to the Restart request. The method always @@ -789,61 +874,65 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Start the operation to start a virtual machine. +// Start the operation to start a virtual machine. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. vmName is the name of // the virtual machine. -func (client VirtualMachinesClient) Start(resourceGroupName string, vmName string) (result autorest.Response, ae error) { - req, err := client.StartPreparer(resourceGroupName, vmName) +func (client VirtualMachinesClient) Start(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StartPreparer(resourceGroupName, vmName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Start", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", nil, "Failure preparing request") } resp, err := client.StartSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Start", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", resp, "Failure sending request") } result, err = client.StartResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Start", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", resp, "Failure responding to request") } return } // StartPreparer prepares the Start request. -func (client VirtualMachinesClient) StartPreparer(resourceGroupName string, vmName string) (*http.Request, error) { +func (client VirtualMachinesClient) StartPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vmName": url.QueryEscape(vmName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // StartSender sends the Start request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) StartSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // StartResponder handles the response to the Start request. The method always @@ -852,7 +941,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // VirtualMachineSizesClient is the the Compute Management Client. @@ -41,24 +41,25 @@ return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// List lists virtual-machine-sizes available in a location for a subscription. +// List lists all available virtual machine sizes for a subscription in a +// location. // // location is the location upon which virtual-machine-sizes is queried. -func (client VirtualMachineSizesClient) List(location string) (result VirtualMachineSizeListResult, ae error) { +func (client VirtualMachineSizesClient) List(location string) (result VirtualMachineSizeListResult, err error) { req, err := client.ListPreparer(location) if err != nil { - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure responding to request") } return @@ -67,27 +68,26 @@ // ListPreparer prepares the List request. func (client VirtualMachineSizesClient) ListPreparer(location string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -96,7 +96,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/artifactoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/artifactoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/artifactoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/artifactoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,278 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ArtifactOperationsClient is the the DevTest Labs Client. +type ArtifactOperationsClient struct { + ManagementClient +} + +// NewArtifactOperationsClient creates an instance of the +// ArtifactOperationsClient client. +func NewArtifactOperationsClient(subscriptionID string) ArtifactOperationsClient { + return NewArtifactOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewArtifactOperationsClientWithBaseURI creates an instance of the +// ArtifactOperationsClient client. +func NewArtifactOperationsClientWithBaseURI(baseURI string, subscriptionID string) ArtifactOperationsClient { + return ArtifactOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GenerateArmTemplate generates an ARM template for the given artifact, +// uploads the required files to a storage account, and validates the +// generated artifact. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. artifactSourceName is the name of the artifact source. name is +// the name of the artifact. +func (client ArtifactOperationsClient) GenerateArmTemplate(resourceGroupName string, labName string, artifactSourceName string, name string, generateArmTemplateRequest GenerateArmTemplateRequest) (result ArmTemplateInfo, err error) { + req, err := client.GenerateArmTemplatePreparer(resourceGroupName, labName, artifactSourceName, name, generateArmTemplateRequest) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "GenerateArmTemplate", nil, "Failure preparing request") + } + + resp, err := client.GenerateArmTemplateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "GenerateArmTemplate", resp, "Failure sending request") + } + + result, err = client.GenerateArmTemplateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "GenerateArmTemplate", resp, "Failure responding to request") + } + + return +} + +// GenerateArmTemplatePreparer prepares the GenerateArmTemplate request. +func (client ArtifactOperationsClient) GenerateArmTemplatePreparer(resourceGroupName string, labName string, artifactSourceName string, name string, generateArmTemplateRequest GenerateArmTemplateRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "artifactSourceName": autorest.Encode("path", artifactSourceName), + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{artifactSourceName}/artifacts/{name}/generateArmTemplate", pathParameters), + autorest.WithJSON(generateArmTemplateRequest), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GenerateArmTemplateSender sends the GenerateArmTemplate request. The method will close the +// http.Response Body if it receives an error. +func (client ArtifactOperationsClient) GenerateArmTemplateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GenerateArmTemplateResponder handles the response to the GenerateArmTemplate request. The method always +// closes the http.Response Body. +func (client ArtifactOperationsClient) GenerateArmTemplateResponder(resp *http.Response) (result ArmTemplateInfo, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetResource get artifact. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. artifactSourceName is the name of the artifact source. name is +// the name of the artifact. +func (client ArtifactOperationsClient) GetResource(resourceGroupName string, labName string, artifactSourceName string, name string) (result Artifact, err error) { + req, err := client.GetResourcePreparer(resourceGroupName, labName, artifactSourceName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "GetResource", nil, "Failure preparing request") + } + + resp, err := client.GetResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "GetResource", resp, "Failure sending request") + } + + result, err = client.GetResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "GetResource", resp, "Failure responding to request") + } + + return +} + +// GetResourcePreparer prepares the GetResource request. +func (client ArtifactOperationsClient) GetResourcePreparer(resourceGroupName string, labName string, artifactSourceName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "artifactSourceName": autorest.Encode("path", artifactSourceName), + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{artifactSourceName}/artifacts/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetResourceSender sends the GetResource request. The method will close the +// http.Response Body if it receives an error. +func (client ArtifactOperationsClient) GetResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResourceResponder handles the response to the GetResource request. The method always +// closes the http.Response Body. +func (client ArtifactOperationsClient) GetResourceResponder(resp *http.Response) (result Artifact, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list artifacts in a given artifact source. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. artifactSourceName is the name of the artifact source. filter is +// the filter to apply on the operation. top is the maximum number of +// resources to return from the operation. orderBy is the ordering expression +// for the results, using OData notation. +func (client ArtifactOperationsClient) List(resourceGroupName string, labName string, artifactSourceName string, filter string, top *int32, orderBy string) (result ResponseWithContinuationArtifact, err error) { + req, err := client.ListPreparer(resourceGroupName, labName, artifactSourceName, filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ArtifactOperationsClient) ListPreparer(resourceGroupName string, labName string, artifactSourceName string, filter string, top *int32, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "artifactSourceName": autorest.Encode("path", artifactSourceName), + "labName": autorest.Encode("path", labName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = autorest.Encode("query", orderBy) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{artifactSourceName}/artifacts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ArtifactOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ArtifactOperationsClient) ListResponder(resp *http.Response) (result ResponseWithContinuationArtifact, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ArtifactOperationsClient) ListNextResults(lastResults ResponseWithContinuationArtifact) (result ResponseWithContinuationArtifact, err error) { + req, err := lastResults.ResponseWithContinuationArtifactPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ArtifactOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/artifactsourceoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/artifactsourceoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/artifactsourceoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/artifactsourceoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,399 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ArtifactSourceOperationsClient is the the DevTest Labs Client. +type ArtifactSourceOperationsClient struct { + ManagementClient +} + +// NewArtifactSourceOperationsClient creates an instance of the +// ArtifactSourceOperationsClient client. +func NewArtifactSourceOperationsClient(subscriptionID string) ArtifactSourceOperationsClient { + return NewArtifactSourceOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewArtifactSourceOperationsClientWithBaseURI creates an instance of the +// ArtifactSourceOperationsClient client. +func NewArtifactSourceOperationsClientWithBaseURI(baseURI string, subscriptionID string) ArtifactSourceOperationsClient { + return ArtifactSourceOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdateResource create or replace an existing artifact source. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the artifact source. +func (client ArtifactSourceOperationsClient) CreateOrUpdateResource(resourceGroupName string, labName string, name string, artifactSource ArtifactSource) (result ArtifactSource, err error) { + req, err := client.CreateOrUpdateResourcePreparer(resourceGroupName, labName, name, artifactSource) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "CreateOrUpdateResource", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "CreateOrUpdateResource", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "CreateOrUpdateResource", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateResourcePreparer prepares the CreateOrUpdateResource request. +func (client ArtifactSourceOperationsClient) CreateOrUpdateResourcePreparer(resourceGroupName string, labName string, name string, artifactSource ArtifactSource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{name}", pathParameters), + autorest.WithJSON(artifactSource), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateResourceSender sends the CreateOrUpdateResource request. The method will close the +// http.Response Body if it receives an error. +func (client ArtifactSourceOperationsClient) CreateOrUpdateResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResourceResponder handles the response to the CreateOrUpdateResource request. The method always +// closes the http.Response Body. +func (client ArtifactSourceOperationsClient) CreateOrUpdateResourceResponder(resp *http.Response) (result ArtifactSource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteResource delete artifact source. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the artifact source. +func (client ArtifactSourceOperationsClient) DeleteResource(resourceGroupName string, labName string, name string) (result autorest.Response, err error) { + req, err := client.DeleteResourcePreparer(resourceGroupName, labName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "DeleteResource", nil, "Failure preparing request") + } + + resp, err := client.DeleteResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "DeleteResource", resp, "Failure sending request") + } + + result, err = client.DeleteResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "DeleteResource", resp, "Failure responding to request") + } + + return +} + +// DeleteResourcePreparer prepares the DeleteResource request. +func (client ArtifactSourceOperationsClient) DeleteResourcePreparer(resourceGroupName string, labName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteResourceSender sends the DeleteResource request. The method will close the +// http.Response Body if it receives an error. +func (client ArtifactSourceOperationsClient) DeleteResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResourceResponder handles the response to the DeleteResource request. The method always +// closes the http.Response Body. +func (client ArtifactSourceOperationsClient) DeleteResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetResource get artifact source. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the artifact source. +func (client ArtifactSourceOperationsClient) GetResource(resourceGroupName string, labName string, name string) (result ArtifactSource, err error) { + req, err := client.GetResourcePreparer(resourceGroupName, labName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "GetResource", nil, "Failure preparing request") + } + + resp, err := client.GetResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "GetResource", resp, "Failure sending request") + } + + result, err = client.GetResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "GetResource", resp, "Failure responding to request") + } + + return +} + +// GetResourcePreparer prepares the GetResource request. +func (client ArtifactSourceOperationsClient) GetResourcePreparer(resourceGroupName string, labName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetResourceSender sends the GetResource request. The method will close the +// http.Response Body if it receives an error. +func (client ArtifactSourceOperationsClient) GetResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResourceResponder handles the response to the GetResource request. The method always +// closes the http.Response Body. +func (client ArtifactSourceOperationsClient) GetResourceResponder(resp *http.Response) (result ArtifactSource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list artifact sources in a given lab. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. filter is the filter to apply on the operation. top is the +// maximum number of resources to return from the operation. orderBy is the +// ordering expression for the results, using OData notation. +func (client ArtifactSourceOperationsClient) List(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (result ResponseWithContinuationArtifactSource, err error) { + req, err := client.ListPreparer(resourceGroupName, labName, filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ArtifactSourceOperationsClient) ListPreparer(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = autorest.Encode("query", orderBy) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ArtifactSourceOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ArtifactSourceOperationsClient) ListResponder(resp *http.Response) (result ResponseWithContinuationArtifactSource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ArtifactSourceOperationsClient) ListNextResults(lastResults ResponseWithContinuationArtifactSource) (result ResponseWithContinuationArtifactSource, err error) { + req, err := lastResults.ResponseWithContinuationArtifactSourcePreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// PatchResource modify properties of artifact sources. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the artifact source. +func (client ArtifactSourceOperationsClient) PatchResource(resourceGroupName string, labName string, name string, artifactSource ArtifactSource) (result ArtifactSource, err error) { + req, err := client.PatchResourcePreparer(resourceGroupName, labName, name, artifactSource) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "PatchResource", nil, "Failure preparing request") + } + + resp, err := client.PatchResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "PatchResource", resp, "Failure sending request") + } + + result, err = client.PatchResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ArtifactSourceOperationsClient", "PatchResource", resp, "Failure responding to request") + } + + return +} + +// PatchResourcePreparer prepares the PatchResource request. +func (client ArtifactSourceOperationsClient) PatchResourcePreparer(resourceGroupName string, labName string, name string, artifactSource ArtifactSource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{name}", pathParameters), + autorest.WithJSON(artifactSource), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// PatchResourceSender sends the PatchResource request. The method will close the +// http.Response Body if it receives an error. +func (client ArtifactSourceOperationsClient) PatchResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// PatchResourceResponder handles the response to the PatchResource request. The method always +// closes the http.Response Body. +func (client ArtifactSourceOperationsClient) PatchResourceResponder(resp *http.Response) (result ArtifactSource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,58 @@ +// Package devtestlabs implements the Azure ARM Devtestlabs service API +// version 2016-05-15. +// +// The DevTest Labs Client. +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Devtestlabs + APIVersion = "2016-05-15" + + // DefaultBaseURI is the default URI used for the service Devtestlabs + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Devtestlabs. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/costoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/costoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/costoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/costoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,106 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// CostOperationsClient is the the DevTest Labs Client. +type CostOperationsClient struct { + ManagementClient +} + +// NewCostOperationsClient creates an instance of the CostOperationsClient +// client. +func NewCostOperationsClient(subscriptionID string) CostOperationsClient { + return NewCostOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCostOperationsClientWithBaseURI creates an instance of the +// CostOperationsClient client. +func NewCostOperationsClientWithBaseURI(baseURI string, subscriptionID string) CostOperationsClient { + return CostOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetResource get cost. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the cost. +func (client CostOperationsClient) GetResource(resourceGroupName string, labName string, name string) (result Cost, err error) { + req, err := client.GetResourcePreparer(resourceGroupName, labName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.CostOperationsClient", "GetResource", nil, "Failure preparing request") + } + + resp, err := client.GetResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.CostOperationsClient", "GetResource", resp, "Failure sending request") + } + + result, err = client.GetResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.CostOperationsClient", "GetResource", resp, "Failure responding to request") + } + + return +} + +// GetResourcePreparer prepares the GetResource request. +func (client CostOperationsClient) GetResourcePreparer(resourceGroupName string, labName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/costs/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetResourceSender sends the GetResource request. The method will close the +// http.Response Body if it receives an error. +func (client CostOperationsClient) GetResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResourceResponder handles the response to the GetResource request. The method always +// closes the http.Response Body. +func (client CostOperationsClient) GetResourceResponder(resp *http.Response) (result Cost, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/customimageoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/customimageoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/customimageoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/customimageoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,343 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// CustomImageOperationsClient is the the DevTest Labs Client. +type CustomImageOperationsClient struct { + ManagementClient +} + +// NewCustomImageOperationsClient creates an instance of the +// CustomImageOperationsClient client. +func NewCustomImageOperationsClient(subscriptionID string) CustomImageOperationsClient { + return NewCustomImageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCustomImageOperationsClientWithBaseURI creates an instance of the +// CustomImageOperationsClient client. +func NewCustomImageOperationsClientWithBaseURI(baseURI string, subscriptionID string) CustomImageOperationsClient { + return CustomImageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdateResource create or replace an existing custom image. This +// operation can take a while to complete. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the custom image. +func (client CustomImageOperationsClient) CreateOrUpdateResource(resourceGroupName string, labName string, name string, customImage CustomImage, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateResourcePreparer(resourceGroupName, labName, name, customImage, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "CreateOrUpdateResource", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "CreateOrUpdateResource", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "CreateOrUpdateResource", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateResourcePreparer prepares the CreateOrUpdateResource request. +func (client CustomImageOperationsClient) CreateOrUpdateResourcePreparer(resourceGroupName string, labName string, name string, customImage CustomImage, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/customimages/{name}", pathParameters), + autorest.WithJSON(customImage), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateResourceSender sends the CreateOrUpdateResource request. The method will close the +// http.Response Body if it receives an error. +func (client CustomImageOperationsClient) CreateOrUpdateResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResourceResponder handles the response to the CreateOrUpdateResource request. The method always +// closes the http.Response Body. +func (client CustomImageOperationsClient) CreateOrUpdateResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteResource delete custom image. This operation can take a while to +// complete. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the custom image. +func (client CustomImageOperationsClient) DeleteResource(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeleteResourcePreparer(resourceGroupName, labName, name, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "DeleteResource", nil, "Failure preparing request") + } + + resp, err := client.DeleteResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "DeleteResource", resp, "Failure sending request") + } + + result, err = client.DeleteResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "DeleteResource", resp, "Failure responding to request") + } + + return +} + +// DeleteResourcePreparer prepares the DeleteResource request. +func (client CustomImageOperationsClient) DeleteResourcePreparer(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/customimages/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteResourceSender sends the DeleteResource request. The method will close the +// http.Response Body if it receives an error. +func (client CustomImageOperationsClient) DeleteResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResourceResponder handles the response to the DeleteResource request. The method always +// closes the http.Response Body. +func (client CustomImageOperationsClient) DeleteResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetResource get custom image. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the custom image. +func (client CustomImageOperationsClient) GetResource(resourceGroupName string, labName string, name string) (result CustomImage, err error) { + req, err := client.GetResourcePreparer(resourceGroupName, labName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "GetResource", nil, "Failure preparing request") + } + + resp, err := client.GetResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "GetResource", resp, "Failure sending request") + } + + result, err = client.GetResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "GetResource", resp, "Failure responding to request") + } + + return +} + +// GetResourcePreparer prepares the GetResource request. +func (client CustomImageOperationsClient) GetResourcePreparer(resourceGroupName string, labName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/customimages/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetResourceSender sends the GetResource request. The method will close the +// http.Response Body if it receives an error. +func (client CustomImageOperationsClient) GetResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResourceResponder handles the response to the GetResource request. The method always +// closes the http.Response Body. +func (client CustomImageOperationsClient) GetResourceResponder(resp *http.Response) (result CustomImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list custom images in a given lab. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. filter is the filter to apply on the operation. top is the +// maximum number of resources to return from the operation. orderBy is the +// ordering expression for the results, using OData notation. +func (client CustomImageOperationsClient) List(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (result ResponseWithContinuationCustomImage, err error) { + req, err := client.ListPreparer(resourceGroupName, labName, filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client CustomImageOperationsClient) ListPreparer(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = autorest.Encode("query", orderBy) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/customimages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client CustomImageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client CustomImageOperationsClient) ListResponder(resp *http.Response) (result ResponseWithContinuationCustomImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client CustomImageOperationsClient) ListNextResults(lastResults ResponseWithContinuationCustomImage) (result ResponseWithContinuationCustomImage, err error) { + req, err := lastResults.ResponseWithContinuationCustomImagePreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.CustomImageOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/formulaoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/formulaoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/formulaoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/formulaoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,338 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// FormulaOperationsClient is the the DevTest Labs Client. +type FormulaOperationsClient struct { + ManagementClient +} + +// NewFormulaOperationsClient creates an instance of the +// FormulaOperationsClient client. +func NewFormulaOperationsClient(subscriptionID string) FormulaOperationsClient { + return NewFormulaOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewFormulaOperationsClientWithBaseURI creates an instance of the +// FormulaOperationsClient client. +func NewFormulaOperationsClientWithBaseURI(baseURI string, subscriptionID string) FormulaOperationsClient { + return FormulaOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdateResource create or replace an existing Formula. This +// operation can take a while to complete. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the formula. +func (client FormulaOperationsClient) CreateOrUpdateResource(resourceGroupName string, labName string, name string, formula Formula, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateResourcePreparer(resourceGroupName, labName, name, formula, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "CreateOrUpdateResource", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "CreateOrUpdateResource", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "CreateOrUpdateResource", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateResourcePreparer prepares the CreateOrUpdateResource request. +func (client FormulaOperationsClient) CreateOrUpdateResourcePreparer(resourceGroupName string, labName string, name string, formula Formula, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/formulas/{name}", pathParameters), + autorest.WithJSON(formula), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateResourceSender sends the CreateOrUpdateResource request. The method will close the +// http.Response Body if it receives an error. +func (client FormulaOperationsClient) CreateOrUpdateResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResourceResponder handles the response to the CreateOrUpdateResource request. The method always +// closes the http.Response Body. +func (client FormulaOperationsClient) CreateOrUpdateResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteResource delete formula. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the formula. +func (client FormulaOperationsClient) DeleteResource(resourceGroupName string, labName string, name string) (result autorest.Response, err error) { + req, err := client.DeleteResourcePreparer(resourceGroupName, labName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "DeleteResource", nil, "Failure preparing request") + } + + resp, err := client.DeleteResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "DeleteResource", resp, "Failure sending request") + } + + result, err = client.DeleteResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "DeleteResource", resp, "Failure responding to request") + } + + return +} + +// DeleteResourcePreparer prepares the DeleteResource request. +func (client FormulaOperationsClient) DeleteResourcePreparer(resourceGroupName string, labName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/formulas/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteResourceSender sends the DeleteResource request. The method will close the +// http.Response Body if it receives an error. +func (client FormulaOperationsClient) DeleteResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResourceResponder handles the response to the DeleteResource request. The method always +// closes the http.Response Body. +func (client FormulaOperationsClient) DeleteResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetResource get formula. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the formula. +func (client FormulaOperationsClient) GetResource(resourceGroupName string, labName string, name string) (result Formula, err error) { + req, err := client.GetResourcePreparer(resourceGroupName, labName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "GetResource", nil, "Failure preparing request") + } + + resp, err := client.GetResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "GetResource", resp, "Failure sending request") + } + + result, err = client.GetResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "GetResource", resp, "Failure responding to request") + } + + return +} + +// GetResourcePreparer prepares the GetResource request. +func (client FormulaOperationsClient) GetResourcePreparer(resourceGroupName string, labName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/formulas/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetResourceSender sends the GetResource request. The method will close the +// http.Response Body if it receives an error. +func (client FormulaOperationsClient) GetResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResourceResponder handles the response to the GetResource request. The method always +// closes the http.Response Body. +func (client FormulaOperationsClient) GetResourceResponder(resp *http.Response) (result Formula, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list formulas in a given lab. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. filter is the filter to apply on the operation. top is the +// maximum number of resources to return from the operation. orderBy is the +// ordering expression for the results, using OData notation. +func (client FormulaOperationsClient) List(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (result ResponseWithContinuationFormula, err error) { + req, err := client.ListPreparer(resourceGroupName, labName, filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client FormulaOperationsClient) ListPreparer(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = autorest.Encode("query", orderBy) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/formulas", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client FormulaOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client FormulaOperationsClient) ListResponder(resp *http.Response) (result ResponseWithContinuationFormula, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client FormulaOperationsClient) ListNextResults(lastResults ResponseWithContinuationFormula) (result ResponseWithContinuationFormula, err error) { + req, err := lastResults.ResponseWithContinuationFormulaPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.FormulaOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/galleryimageoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/galleryimageoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/galleryimageoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/galleryimageoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,140 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// GalleryImageOperationsClient is the the DevTest Labs Client. +type GalleryImageOperationsClient struct { + ManagementClient +} + +// NewGalleryImageOperationsClient creates an instance of the +// GalleryImageOperationsClient client. +func NewGalleryImageOperationsClient(subscriptionID string) GalleryImageOperationsClient { + return NewGalleryImageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGalleryImageOperationsClientWithBaseURI creates an instance of the +// GalleryImageOperationsClient client. +func NewGalleryImageOperationsClientWithBaseURI(baseURI string, subscriptionID string) GalleryImageOperationsClient { + return GalleryImageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List list gallery images in a given lab. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. filter is the filter to apply on the operation. top is the +// maximum number of resources to return from the operation. orderBy is the +// ordering expression for the results, using OData notation. +func (client GalleryImageOperationsClient) List(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (result ResponseWithContinuationGalleryImage, err error) { + req, err := client.ListPreparer(resourceGroupName, labName, filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.GalleryImageOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.GalleryImageOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.GalleryImageOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client GalleryImageOperationsClient) ListPreparer(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = autorest.Encode("query", orderBy) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/galleryimages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client GalleryImageOperationsClient) ListResponder(resp *http.Response) (result ResponseWithContinuationGalleryImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client GalleryImageOperationsClient) ListNextResults(lastResults ResponseWithContinuationGalleryImage) (result ResponseWithContinuationGalleryImage, err error) { + req, err := lastResults.ResponseWithContinuationGalleryImagePreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.GalleryImageOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.GalleryImageOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.GalleryImageOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/laboperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/laboperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/laboperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/laboperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,719 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// LabOperationsClient is the the DevTest Labs Client. +type LabOperationsClient struct { + ManagementClient +} + +// NewLabOperationsClient creates an instance of the LabOperationsClient +// client. +func NewLabOperationsClient(subscriptionID string) LabOperationsClient { + return NewLabOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLabOperationsClientWithBaseURI creates an instance of the +// LabOperationsClient client. +func NewLabOperationsClientWithBaseURI(baseURI string, subscriptionID string) LabOperationsClient { + return LabOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateEnvironment create virtual machines in a Lab. This operation can take +// a while to complete. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. name is the name of +// the lab. +func (client LabOperationsClient) CreateEnvironment(resourceGroupName string, name string, labVirtualMachine LabVirtualMachine, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateEnvironmentPreparer(resourceGroupName, name, labVirtualMachine, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "CreateEnvironment", nil, "Failure preparing request") + } + + resp, err := client.CreateEnvironmentSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "CreateEnvironment", resp, "Failure sending request") + } + + result, err = client.CreateEnvironmentResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "CreateEnvironment", resp, "Failure responding to request") + } + + return +} + +// CreateEnvironmentPreparer prepares the CreateEnvironment request. +func (client LabOperationsClient) CreateEnvironmentPreparer(resourceGroupName string, name string, labVirtualMachine LabVirtualMachine, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{name}/createEnvironment", pathParameters), + autorest.WithJSON(labVirtualMachine), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateEnvironmentSender sends the CreateEnvironment request. The method will close the +// http.Response Body if it receives an error. +func (client LabOperationsClient) CreateEnvironmentSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateEnvironmentResponder handles the response to the CreateEnvironment request. The method always +// closes the http.Response Body. +func (client LabOperationsClient) CreateEnvironmentResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdateResource create or replace an existing Lab. This operation +// can take a while to complete. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. name is the name of +// the lab. +func (client LabOperationsClient) CreateOrUpdateResource(resourceGroupName string, name string, lab Lab, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateResourcePreparer(resourceGroupName, name, lab, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "CreateOrUpdateResource", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "CreateOrUpdateResource", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "CreateOrUpdateResource", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateResourcePreparer prepares the CreateOrUpdateResource request. +func (client LabOperationsClient) CreateOrUpdateResourcePreparer(resourceGroupName string, name string, lab Lab, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{name}", pathParameters), + autorest.WithJSON(lab), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateResourceSender sends the CreateOrUpdateResource request. The method will close the +// http.Response Body if it receives an error. +func (client LabOperationsClient) CreateOrUpdateResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResourceResponder handles the response to the CreateOrUpdateResource request. The method always +// closes the http.Response Body. +func (client LabOperationsClient) CreateOrUpdateResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteResource delete lab. This operation can take a while to complete. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. name is the name of +// the lab. +func (client LabOperationsClient) DeleteResource(resourceGroupName string, name string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeleteResourcePreparer(resourceGroupName, name, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "DeleteResource", nil, "Failure preparing request") + } + + resp, err := client.DeleteResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "DeleteResource", resp, "Failure sending request") + } + + result, err = client.DeleteResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "DeleteResource", resp, "Failure responding to request") + } + + return +} + +// DeleteResourcePreparer prepares the DeleteResource request. +func (client LabOperationsClient) DeleteResourcePreparer(resourceGroupName string, name string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteResourceSender sends the DeleteResource request. The method will close the +// http.Response Body if it receives an error. +func (client LabOperationsClient) DeleteResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResourceResponder handles the response to the DeleteResource request. The method always +// closes the http.Response Body. +func (client LabOperationsClient) DeleteResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GenerateUploadURI generate a URI for uploading custom disk images to a Lab. +// +// resourceGroupName is the name of the resource group. name is the name of +// the lab. +func (client LabOperationsClient) GenerateUploadURI(resourceGroupName string, name string, generateUploadURIParameter GenerateUploadURIParameter) (result GenerateUploadURIResponse, err error) { + req, err := client.GenerateUploadURIPreparer(resourceGroupName, name, generateUploadURIParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "GenerateUploadURI", nil, "Failure preparing request") + } + + resp, err := client.GenerateUploadURISender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "GenerateUploadURI", resp, "Failure sending request") + } + + result, err = client.GenerateUploadURIResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "GenerateUploadURI", resp, "Failure responding to request") + } + + return +} + +// GenerateUploadURIPreparer prepares the GenerateUploadURI request. +func (client LabOperationsClient) GenerateUploadURIPreparer(resourceGroupName string, name string, generateUploadURIParameter GenerateUploadURIParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{name}/generateUploadUri", pathParameters), + autorest.WithJSON(generateUploadURIParameter), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GenerateUploadURISender sends the GenerateUploadURI request. The method will close the +// http.Response Body if it receives an error. +func (client LabOperationsClient) GenerateUploadURISender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GenerateUploadURIResponder handles the response to the GenerateUploadURI request. The method always +// closes the http.Response Body. +func (client LabOperationsClient) GenerateUploadURIResponder(resp *http.Response) (result GenerateUploadURIResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetResource get lab. +// +// resourceGroupName is the name of the resource group. name is the name of +// the lab. +func (client LabOperationsClient) GetResource(resourceGroupName string, name string) (result Lab, err error) { + req, err := client.GetResourcePreparer(resourceGroupName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "GetResource", nil, "Failure preparing request") + } + + resp, err := client.GetResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "GetResource", resp, "Failure sending request") + } + + result, err = client.GetResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "GetResource", resp, "Failure responding to request") + } + + return +} + +// GetResourcePreparer prepares the GetResource request. +func (client LabOperationsClient) GetResourcePreparer(resourceGroupName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetResourceSender sends the GetResource request. The method will close the +// http.Response Body if it receives an error. +func (client LabOperationsClient) GetResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResourceResponder handles the response to the GetResource request. The method always +// closes the http.Response Body. +func (client LabOperationsClient) GetResourceResponder(resp *http.Response) (result Lab, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup list labs in a resource group. +// +// resourceGroupName is the name of the resource group. filter is the filter +// to apply on the operation. top is the maximum number of resources to +// return from the operation. orderBy is the ordering expression for the +// results, using OData notation. +func (client LabOperationsClient) ListByResourceGroup(resourceGroupName string, filter string, top *int32, orderBy string) (result ResponseWithContinuationLab, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName, filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListByResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListByResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client LabOperationsClient) ListByResourceGroupPreparer(resourceGroupName string, filter string, top *int32, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = autorest.Encode("query", orderBy) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client LabOperationsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client LabOperationsClient) ListByResourceGroupResponder(resp *http.Response) (result ResponseWithContinuationLab, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client LabOperationsClient) ListByResourceGroupNextResults(lastResults ResponseWithContinuationLab) (result ResponseWithContinuationLab, err error) { + req, err := lastResults.ResponseWithContinuationLabPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListByResourceGroup", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListByResourceGroup", resp, "Failure sending next results request request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListByResourceGroup", resp, "Failure responding to next results request request") + } + + return +} + +// ListBySubscription list labs in a subscription. +// +// filter is the filter to apply on the operation. top is the maximum number +// of resources to return from the operation. orderBy is the ordering +// expression for the results, using OData notation. +func (client LabOperationsClient) ListBySubscription(filter string, top *int32, orderBy string) (result ResponseWithContinuationLab, err error) { + req, err := client.ListBySubscriptionPreparer(filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListBySubscription", nil, "Failure preparing request") + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListBySubscription", resp, "Failure sending request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListBySubscription", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client LabOperationsClient) ListBySubscriptionPreparer(filter string, top *int32, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = autorest.Encode("query", orderBy) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DevTestLab/labs", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client LabOperationsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client LabOperationsClient) ListBySubscriptionResponder(resp *http.Response) (result ResponseWithContinuationLab, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscriptionNextResults retrieves the next set of results, if any. +func (client LabOperationsClient) ListBySubscriptionNextResults(lastResults ResponseWithContinuationLab) (result ResponseWithContinuationLab, err error) { + req, err := lastResults.ResponseWithContinuationLabPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListBySubscription", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListBySubscription", resp, "Failure sending next results request request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListBySubscription", resp, "Failure responding to next results request request") + } + + return +} + +// ListVhds list disk images available for custom image creation. +// +// resourceGroupName is the name of the resource group. name is the name of +// the lab. +func (client LabOperationsClient) ListVhds(resourceGroupName string, name string) (result ResponseWithContinuationLabVhd, err error) { + req, err := client.ListVhdsPreparer(resourceGroupName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListVhds", nil, "Failure preparing request") + } + + resp, err := client.ListVhdsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListVhds", resp, "Failure sending request") + } + + result, err = client.ListVhdsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListVhds", resp, "Failure responding to request") + } + + return +} + +// ListVhdsPreparer prepares the ListVhds request. +func (client LabOperationsClient) ListVhdsPreparer(resourceGroupName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{name}/listVhds", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListVhdsSender sends the ListVhds request. The method will close the +// http.Response Body if it receives an error. +func (client LabOperationsClient) ListVhdsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListVhdsResponder handles the response to the ListVhds request. The method always +// closes the http.Response Body. +func (client LabOperationsClient) ListVhdsResponder(resp *http.Response) (result ResponseWithContinuationLabVhd, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListVhdsNextResults retrieves the next set of results, if any. +func (client LabOperationsClient) ListVhdsNextResults(lastResults ResponseWithContinuationLabVhd) (result ResponseWithContinuationLabVhd, err error) { + req, err := lastResults.ResponseWithContinuationLabVhdPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListVhds", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListVhdsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListVhds", resp, "Failure sending next results request request") + } + + result, err = client.ListVhdsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "ListVhds", resp, "Failure responding to next results request request") + } + + return +} + +// PatchResource modify properties of labs. +// +// resourceGroupName is the name of the resource group. name is the name of +// the lab. +func (client LabOperationsClient) PatchResource(resourceGroupName string, name string, lab Lab) (result Lab, err error) { + req, err := client.PatchResourcePreparer(resourceGroupName, name, lab) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "PatchResource", nil, "Failure preparing request") + } + + resp, err := client.PatchResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "PatchResource", resp, "Failure sending request") + } + + result, err = client.PatchResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.LabOperationsClient", "PatchResource", resp, "Failure responding to request") + } + + return +} + +// PatchResourcePreparer prepares the PatchResource request. +func (client LabOperationsClient) PatchResourcePreparer(resourceGroupName string, name string, lab Lab) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{name}", pathParameters), + autorest.WithJSON(lab), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// PatchResourceSender sends the PatchResource request. The method will close the +// http.Response Body if it receives an error. +func (client LabOperationsClient) PatchResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// PatchResourceResponder handles the response to the PatchResource request. The method always +// closes the http.Response Body. +func (client LabOperationsClient) PatchResourceResponder(resp *http.Response) (result Lab, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,859 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// CustomImageOsType enumerates the values for custom image os type. +type CustomImageOsType string + +const ( + // Linux specifies the linux state for custom image os type. + Linux CustomImageOsType = "Linux" + // None specifies the none state for custom image os type. + None CustomImageOsType = "None" + // Windows specifies the windows state for custom image os type. + Windows CustomImageOsType = "Windows" +) + +// EnableStatus enumerates the values for enable status. +type EnableStatus string + +const ( + // Disabled specifies the disabled state for enable status. + Disabled EnableStatus = "Disabled" + // Enabled specifies the enabled state for enable status. + Enabled EnableStatus = "Enabled" +) + +// LabCostType enumerates the values for lab cost type. +type LabCostType string + +const ( + // Projected specifies the projected state for lab cost type. + Projected LabCostType = "Projected" + // Reported specifies the reported state for lab cost type. + Reported LabCostType = "Reported" + // Unavailable specifies the unavailable state for lab cost type. + Unavailable LabCostType = "Unavailable" +) + +// LabStorageType enumerates the values for lab storage type. +type LabStorageType string + +const ( + // Premium specifies the premium state for lab storage type. + Premium LabStorageType = "Premium" + // Standard specifies the standard state for lab storage type. + Standard LabStorageType = "Standard" +) + +// LinuxOsState enumerates the values for linux os state. +type LinuxOsState string + +const ( + // DeprovisionApplied specifies the deprovision applied state for linux os + // state. + DeprovisionApplied LinuxOsState = "DeprovisionApplied" + // DeprovisionRequested specifies the deprovision requested state for + // linux os state. + DeprovisionRequested LinuxOsState = "DeprovisionRequested" + // NonDeprovisioned specifies the non deprovisioned state for linux os + // state. + NonDeprovisioned LinuxOsState = "NonDeprovisioned" +) + +// PolicyEvaluatorType enumerates the values for policy evaluator type. +type PolicyEvaluatorType string + +const ( + // AllowedValuesPolicy specifies the allowed values policy state for + // policy evaluator type. + AllowedValuesPolicy PolicyEvaluatorType = "AllowedValuesPolicy" + // MaxValuePolicy specifies the max value policy state for policy + // evaluator type. + MaxValuePolicy PolicyEvaluatorType = "MaxValuePolicy" +) + +// PolicyFactName enumerates the values for policy fact name. +type PolicyFactName string + +const ( + // PolicyFactNameGalleryImage specifies the policy fact name gallery image + // state for policy fact name. + PolicyFactNameGalleryImage PolicyFactName = "GalleryImage" + // PolicyFactNameLabVMCount specifies the policy fact name lab vm count + // state for policy fact name. + PolicyFactNameLabVMCount PolicyFactName = "LabVmCount" + // PolicyFactNameLabVMSize specifies the policy fact name lab vm size + // state for policy fact name. + PolicyFactNameLabVMSize PolicyFactName = "LabVmSize" + // PolicyFactNameUserOwnedLabVMCount specifies the policy fact name user + // owned lab vm count state for policy fact name. + PolicyFactNameUserOwnedLabVMCount PolicyFactName = "UserOwnedLabVmCount" + // PolicyFactNameUserOwnedLabVMCountInSubnet specifies the policy fact + // name user owned lab vm count in subnet state for policy fact name. + PolicyFactNameUserOwnedLabVMCountInSubnet PolicyFactName = "UserOwnedLabVmCountInSubnet" +) + +// PolicyStatus enumerates the values for policy status. +type PolicyStatus string + +const ( + // PolicyStatusDisabled specifies the policy status disabled state for + // policy status. + PolicyStatusDisabled PolicyStatus = "Disabled" + // PolicyStatusEnabled specifies the policy status enabled state for + // policy status. + PolicyStatusEnabled PolicyStatus = "Enabled" +) + +// SourceControlType enumerates the values for source control type. +type SourceControlType string + +const ( + // GitHub specifies the git hub state for source control type. + GitHub SourceControlType = "GitHub" + // VsoGit specifies the vso git state for source control type. + VsoGit SourceControlType = "VsoGit" +) + +// SubscriptionNotificationState enumerates the values for subscription +// notification state. +type SubscriptionNotificationState string + +const ( + // Deleted specifies the deleted state for subscription notification state. + Deleted SubscriptionNotificationState = "Deleted" + // NotDefined specifies the not defined state for subscription + // notification state. + NotDefined SubscriptionNotificationState = "NotDefined" + // Registered specifies the registered state for subscription notification + // state. + Registered SubscriptionNotificationState = "Registered" + // Suspended specifies the suspended state for subscription notification + // state. + Suspended SubscriptionNotificationState = "Suspended" + // Unregistered specifies the unregistered state for subscription + // notification state. + Unregistered SubscriptionNotificationState = "Unregistered" + // Warned specifies the warned state for subscription notification state. + Warned SubscriptionNotificationState = "Warned" +) + +// UsagePermissionType enumerates the values for usage permission type. +type UsagePermissionType string + +const ( + // Allow specifies the allow state for usage permission type. + Allow UsagePermissionType = "Allow" + // Default specifies the default state for usage permission type. + Default UsagePermissionType = "Default" + // Deny specifies the deny state for usage permission type. + Deny UsagePermissionType = "Deny" +) + +// WindowsOsState enumerates the values for windows os state. +type WindowsOsState string + +const ( + // NonSysprepped specifies the non sysprepped state for windows os state. + NonSysprepped WindowsOsState = "NonSysprepped" + // SysprepApplied specifies the sysprep applied state for windows os state. + SysprepApplied WindowsOsState = "SysprepApplied" + // SysprepRequested specifies the sysprep requested state for windows os + // state. + SysprepRequested WindowsOsState = "SysprepRequested" +) + +// ApplyArtifactsRequest is request body for applying artifacts to a virtual +// machine. +type ApplyArtifactsRequest struct { + Artifacts *[]ArtifactInstallProperties `json:"artifacts,omitempty"` +} + +// ArmTemplateInfo is information about a generated ARM template. +type ArmTemplateInfo struct { + autorest.Response `json:"-"` + Template *map[string]interface{} `json:"template,omitempty"` + Parameters *map[string]interface{} `json:"parameters,omitempty"` +} + +// Artifact is an artifact. +type Artifact struct { + autorest.Response `json:"-"` + Properties *ArtifactProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ArtifactDeploymentStatusProperties is properties of an artifact deployment. +type ArtifactDeploymentStatusProperties struct { + DeploymentStatus *string `json:"deploymentStatus,omitempty"` + ArtifactsApplied *int32 `json:"artifactsApplied,omitempty"` + TotalArtifacts *int32 `json:"totalArtifacts,omitempty"` +} + +// ArtifactInstallProperties is properties of an artifact. +type ArtifactInstallProperties struct { + ArtifactID *string `json:"artifactId,omitempty"` + Parameters *[]ArtifactParameterProperties `json:"parameters,omitempty"` +} + +// ArtifactParameterProperties is properties of an artifact parameter. +type ArtifactParameterProperties struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// ArtifactProperties is properties of an artifact. +type ArtifactProperties struct { + Title *string `json:"title,omitempty"` + Description *string `json:"description,omitempty"` + FilePath *string `json:"filePath,omitempty"` + Icon *string `json:"icon,omitempty"` + TargetOsType *string `json:"targetOsType,omitempty"` + Parameters *map[string]interface{} `json:"parameters,omitempty"` +} + +// ArtifactSource is properties of an artifact source. +type ArtifactSource struct { + autorest.Response `json:"-"` + Properties *ArtifactSourceProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ArtifactSourceProperties is properties of an artifact source. +type ArtifactSourceProperties struct { + DisplayName *string `json:"displayName,omitempty"` + URI *string `json:"uri,omitempty"` + SourceType SourceControlType `json:"sourceType,omitempty"` + FolderPath *string `json:"folderPath,omitempty"` + BranchRef *string `json:"branchRef,omitempty"` + SecurityToken *string `json:"securityToken,omitempty"` + Status EnableStatus `json:"status,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + UniqueIdentifier *string `json:"uniqueIdentifier,omitempty"` +} + +// CloudError is +type CloudError struct { + Error *CloudErrorBody `json:"error,omitempty"` +} + +// CloudErrorBody is +type CloudErrorBody struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` + Details *[]CloudErrorBody `json:"details,omitempty"` +} + +// Cost is a cost item. +type Cost struct { + autorest.Response `json:"-"` + Properties *CostProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// CostPerDayProperties is the properties of a lab cost item. +type CostPerDayProperties struct { + Date *date.Time `json:"date,omitempty"` + Cost *float64 `json:"cost,omitempty"` + CostType LabCostType `json:"costType,omitempty"` +} + +// CostProperties is properties of a cost item. +type CostProperties struct { + CurrencyCode *string `json:"currencyCode,omitempty"` + Costs *[]CostPerDayProperties `json:"costs,omitempty"` + ResourceCosts *[]ResourceCostProperties `json:"resourceCosts,omitempty"` +} + +// CustomImage is a custom image. +type CustomImage struct { + autorest.Response `json:"-"` + Properties *CustomImageProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// CustomImageProperties is properties of a custom image. +type CustomImageProperties struct { + VM *CustomImagePropertiesFromVM `json:"vm,omitempty"` + Vhd *CustomImagePropertiesCustom `json:"vhd,omitempty"` + Description *string `json:"description,omitempty"` + Author *string `json:"author,omitempty"` + CreationDate *date.Time `json:"creationDate,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + UniqueIdentifier *string `json:"uniqueIdentifier,omitempty"` +} + +// CustomImagePropertiesCustom is properties for creating a custom image from +// a VHD. +type CustomImagePropertiesCustom struct { + ImageName *string `json:"imageName,omitempty"` + SysPrep *bool `json:"sysPrep,omitempty"` + OsType CustomImageOsType `json:"osType,omitempty"` +} + +// CustomImagePropertiesFromVM is properties for creating a custom image from +// a virtual machine. +type CustomImagePropertiesFromVM struct { + SourceVMID *string `json:"sourceVmId,omitempty"` + WindowsOsInfo *WindowsOsInfo `json:"windowsOsInfo,omitempty"` + LinuxOsInfo *LinuxOsInfo `json:"linuxOsInfo,omitempty"` +} + +// DayDetails is properties of a daily schedule. +type DayDetails struct { + Time *string `json:"time,omitempty"` +} + +// EvaluatePoliciesProperties is properties for evaluating a policy set. +type EvaluatePoliciesProperties struct { + FactName *string `json:"factName,omitempty"` + FactData *string `json:"factData,omitempty"` + ValueOffset *string `json:"valueOffset,omitempty"` +} + +// EvaluatePoliciesRequest is request body for evaluating a policy set. +type EvaluatePoliciesRequest struct { + Policies *[]EvaluatePoliciesProperties `json:"policies,omitempty"` +} + +// EvaluatePoliciesResponse is response body for evaluating a policy set. +type EvaluatePoliciesResponse struct { + autorest.Response `json:"-"` + Results *[]PolicySetResult `json:"results,omitempty"` +} + +// Formula is a formula. +type Formula struct { + autorest.Response `json:"-"` + Properties *FormulaProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// FormulaProperties is properties of a formula. +type FormulaProperties struct { + Description *string `json:"description,omitempty"` + Author *string `json:"author,omitempty"` + OsType *string `json:"osType,omitempty"` + CreationDate *date.Time `json:"creationDate,omitempty"` + FormulaContent *LabVirtualMachine `json:"formulaContent,omitempty"` + VM *FormulaPropertiesFromVM `json:"vm,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + UniqueIdentifier *string `json:"uniqueIdentifier,omitempty"` +} + +// FormulaPropertiesFromVM is information about a VM from which a formula is +// to be created. +type FormulaPropertiesFromVM struct { + LabVMID *string `json:"labVmId,omitempty"` +} + +// GalleryImage is a gallery image. +type GalleryImage struct { + Properties *GalleryImageProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// GalleryImageProperties is properties of a gallery image. +type GalleryImageProperties struct { + Author *string `json:"author,omitempty"` + CreatedDate *date.Time `json:"createdDate,omitempty"` + Description *string `json:"description,omitempty"` + ImageReference *GalleryImageReference `json:"imageReference,omitempty"` + Icon *string `json:"icon,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +// GalleryImageReference is the reference information for an Azure Marketplace +// image. +type GalleryImageReference struct { + Offer *string `json:"offer,omitempty"` + Publisher *string `json:"publisher,omitempty"` + Sku *string `json:"sku,omitempty"` + OsType *string `json:"osType,omitempty"` + Version *string `json:"version,omitempty"` +} + +// GenerateArmTemplateRequest is parameters for generating an ARM template for +// deploying artifacts. +type GenerateArmTemplateRequest struct { + VirtualMachineName *string `json:"virtualMachineName,omitempty"` + Parameters *[]ParameterInfo `json:"parameters,omitempty"` + Location *string `json:"location,omitempty"` +} + +// GenerateUploadURIParameter is properties for generating an upload URI. +type GenerateUploadURIParameter struct { + BlobName *string `json:"blobName,omitempty"` +} + +// GenerateUploadURIResponse is reponse body for generating an upload URI. +type GenerateUploadURIResponse struct { + autorest.Response `json:"-"` + UploadURI *string `json:"uploadUri,omitempty"` +} + +// HourDetails is properties of an hourly schedule. +type HourDetails struct { + Minute *int32 `json:"minute,omitempty"` +} + +// Lab is a lab. +type Lab struct { + autorest.Response `json:"-"` + Properties *LabProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// LabProperties is properties of a lab. +type LabProperties struct { + DefaultStorageAccount *string `json:"defaultStorageAccount,omitempty"` + ArtifactsStorageAccount *string `json:"artifactsStorageAccount,omitempty"` + VaultName *string `json:"vaultName,omitempty"` + LabStorageType LabStorageType `json:"labStorageType,omitempty"` + CreatedDate *date.Time `json:"createdDate,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + UniqueIdentifier *string `json:"uniqueIdentifier,omitempty"` +} + +// LabVhd is properties of a VHD in the lab. +type LabVhd struct { + ID *string `json:"id,omitempty"` +} + +// LabVirtualMachine is a virtual machine. +type LabVirtualMachine struct { + autorest.Response `json:"-"` + Properties *LabVirtualMachineProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// LabVirtualMachineProperties is properties of a virtual machine. +type LabVirtualMachineProperties struct { + Notes *string `json:"notes,omitempty"` + OwnerObjectID *string `json:"ownerObjectId,omitempty"` + CreatedByUserID *string `json:"createdByUserId,omitempty"` + CreatedByUser *string `json:"createdByUser,omitempty"` + ComputeID *string `json:"computeId,omitempty"` + CustomImageID *string `json:"customImageId,omitempty"` + OsType *string `json:"osType,omitempty"` + Size *string `json:"size,omitempty"` + UserName *string `json:"userName,omitempty"` + Password *string `json:"password,omitempty"` + SSHKey *string `json:"sshKey,omitempty"` + IsAuthenticationWithSSHKey *bool `json:"isAuthenticationWithSshKey,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` + LabSubnetName *string `json:"labSubnetName,omitempty"` + LabVirtualNetworkID *string `json:"labVirtualNetworkId,omitempty"` + DisallowPublicIPAddress *bool `json:"disallowPublicIpAddress,omitempty"` + Artifacts *[]ArtifactInstallProperties `json:"artifacts,omitempty"` + ArtifactDeploymentStatus *ArtifactDeploymentStatusProperties `json:"artifactDeploymentStatus,omitempty"` + GalleryImageReference *GalleryImageReference `json:"galleryImageReference,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + UniqueIdentifier *string `json:"uniqueIdentifier,omitempty"` +} + +// LinuxOsInfo is information about a Linux OS. +type LinuxOsInfo struct { + LinuxOsState LinuxOsState `json:"linuxOsState,omitempty"` +} + +// ParameterInfo is +type ParameterInfo struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// Policy is a Policy. +type Policy struct { + autorest.Response `json:"-"` + Properties *PolicyProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// PolicyProperties is properties of a Policy. +type PolicyProperties struct { + Description *string `json:"description,omitempty"` + Status PolicyStatus `json:"status,omitempty"` + FactName PolicyFactName `json:"factName,omitempty"` + FactData *string `json:"factData,omitempty"` + Threshold *string `json:"threshold,omitempty"` + EvaluatorType PolicyEvaluatorType `json:"evaluatorType,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + UniqueIdentifier *string `json:"uniqueIdentifier,omitempty"` +} + +// PolicySetResult is result of a policy set evaluation. +type PolicySetResult struct { + HasError *bool `json:"hasError,omitempty"` + PolicyViolations *[]PolicyViolation `json:"policyViolations,omitempty"` +} + +// PolicyViolation is policy violation. +type PolicyViolation struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` +} + +// ResourceCostProperties is the properties of a resource cost item. +type ResourceCostProperties struct { + Resourcename *string `json:"resourcename,omitempty"` + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + ResourceCost *float64 `json:"resourceCost,omitempty"` + Owner *string `json:"owner,omitempty"` + Category *string `json:"category,omitempty"` + Exists *bool `json:"exists,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` +} + +// ResponseWithContinuationArtifact is the response of a list operation. +type ResponseWithContinuationArtifact struct { + autorest.Response `json:"-"` + Value *[]Artifact `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResponseWithContinuationArtifactPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResponseWithContinuationArtifact) ResponseWithContinuationArtifactPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResponseWithContinuationArtifactSource is the response of a list operation. +type ResponseWithContinuationArtifactSource struct { + autorest.Response `json:"-"` + Value *[]ArtifactSource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResponseWithContinuationArtifactSourcePreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResponseWithContinuationArtifactSource) ResponseWithContinuationArtifactSourcePreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResponseWithContinuationCustomImage is the response of a list operation. +type ResponseWithContinuationCustomImage struct { + autorest.Response `json:"-"` + Value *[]CustomImage `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResponseWithContinuationCustomImagePreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResponseWithContinuationCustomImage) ResponseWithContinuationCustomImagePreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResponseWithContinuationFormula is the response of a list operation. +type ResponseWithContinuationFormula struct { + autorest.Response `json:"-"` + Value *[]Formula `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResponseWithContinuationFormulaPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResponseWithContinuationFormula) ResponseWithContinuationFormulaPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResponseWithContinuationGalleryImage is the response of a list operation. +type ResponseWithContinuationGalleryImage struct { + autorest.Response `json:"-"` + Value *[]GalleryImage `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResponseWithContinuationGalleryImagePreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResponseWithContinuationGalleryImage) ResponseWithContinuationGalleryImagePreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResponseWithContinuationLab is the response of a list operation. +type ResponseWithContinuationLab struct { + autorest.Response `json:"-"` + Value *[]Lab `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResponseWithContinuationLabPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResponseWithContinuationLab) ResponseWithContinuationLabPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResponseWithContinuationLabVhd is the response of a list operation. +type ResponseWithContinuationLabVhd struct { + autorest.Response `json:"-"` + Value *[]LabVhd `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResponseWithContinuationLabVhdPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResponseWithContinuationLabVhd) ResponseWithContinuationLabVhdPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResponseWithContinuationLabVirtualMachine is the response of a list +// operation. +type ResponseWithContinuationLabVirtualMachine struct { + autorest.Response `json:"-"` + Value *[]LabVirtualMachine `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResponseWithContinuationLabVirtualMachinePreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResponseWithContinuationLabVirtualMachine) ResponseWithContinuationLabVirtualMachinePreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResponseWithContinuationPolicy is the response of a list operation. +type ResponseWithContinuationPolicy struct { + autorest.Response `json:"-"` + Value *[]Policy `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResponseWithContinuationPolicyPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResponseWithContinuationPolicy) ResponseWithContinuationPolicyPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResponseWithContinuationSchedule is the response of a list operation. +type ResponseWithContinuationSchedule struct { + autorest.Response `json:"-"` + Value *[]Schedule `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResponseWithContinuationSchedulePreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResponseWithContinuationSchedule) ResponseWithContinuationSchedulePreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResponseWithContinuationVirtualNetwork is the response of a list operation. +type ResponseWithContinuationVirtualNetwork struct { + autorest.Response `json:"-"` + Value *[]VirtualNetwork `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResponseWithContinuationVirtualNetworkPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResponseWithContinuationVirtualNetwork) ResponseWithContinuationVirtualNetworkPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// Schedule is a schedule. +type Schedule struct { + autorest.Response `json:"-"` + Properties *ScheduleProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ScheduleProperties is properties of a schedule. +type ScheduleProperties struct { + Status EnableStatus `json:"status,omitempty"` + TaskType *string `json:"taskType,omitempty"` + WeeklyRecurrence *WeekDetails `json:"weeklyRecurrence,omitempty"` + DailyRecurrence *DayDetails `json:"dailyRecurrence,omitempty"` + HourlyRecurrence *HourDetails `json:"hourlyRecurrence,omitempty"` + TimeZoneID *string `json:"timeZoneId,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + UniqueIdentifier *string `json:"uniqueIdentifier,omitempty"` +} + +// Subnet is +type Subnet struct { + ResourceID *string `json:"resourceId,omitempty"` + LabSubnetName *string `json:"labSubnetName,omitempty"` + AllowPublicIP UsagePermissionType `json:"allowPublicIp,omitempty"` +} + +// SubnetOverride is property overrides on a subnet of a virtual network. +type SubnetOverride struct { + ResourceID *string `json:"resourceId,omitempty"` + LabSubnetName *string `json:"labSubnetName,omitempty"` + UseInVMCreationPermission UsagePermissionType `json:"useInVmCreationPermission,omitempty"` + UsePublicIPAddressPermission UsagePermissionType `json:"usePublicIpAddressPermission,omitempty"` +} + +// SubscriptionNotification is +type SubscriptionNotification struct { + RegistrationDate *string `json:"registrationDate,omitempty"` + State SubscriptionNotificationState `json:"state,omitempty"` + Properties *SubscriptionNotificationProperties `json:"properties,omitempty"` +} + +// SubscriptionNotificationProperties is +type SubscriptionNotificationProperties struct { + TenantID *string `json:"tenantId,omitempty"` +} + +// VirtualNetwork is a virtual network. +type VirtualNetwork struct { + autorest.Response `json:"-"` + Properties *VirtualNetworkProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// VirtualNetworkProperties is properties of a virtual network. +type VirtualNetworkProperties struct { + AllowedSubnets *[]Subnet `json:"allowedSubnets,omitempty"` + Description *string `json:"description,omitempty"` + ExternalProviderResourceID *string `json:"externalProviderResourceId,omitempty"` + SubnetOverrides *[]SubnetOverride `json:"subnetOverrides,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + UniqueIdentifier *string `json:"uniqueIdentifier,omitempty"` +} + +// WeekDetails is properties of a weekly schedule. +type WeekDetails struct { + Weekdays *[]string `json:"weekdays,omitempty"` + Time *string `json:"time,omitempty"` +} + +// WindowsOsInfo is information about a Windows OS. +type WindowsOsInfo struct { + WindowsOsState WindowsOsState `json:"windowsOsState,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/policyoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/policyoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/policyoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/policyoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,409 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// PolicyOperationsClient is the the DevTest Labs Client. +type PolicyOperationsClient struct { + ManagementClient +} + +// NewPolicyOperationsClient creates an instance of the PolicyOperationsClient +// client. +func NewPolicyOperationsClient(subscriptionID string) PolicyOperationsClient { + return NewPolicyOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPolicyOperationsClientWithBaseURI creates an instance of the +// PolicyOperationsClient client. +func NewPolicyOperationsClientWithBaseURI(baseURI string, subscriptionID string) PolicyOperationsClient { + return PolicyOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdateResource create or replace an existing policy. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. policySetName is the name of the policy set. name is the name of +// the policy. +func (client PolicyOperationsClient) CreateOrUpdateResource(resourceGroupName string, labName string, policySetName string, name string, policy Policy) (result Policy, err error) { + req, err := client.CreateOrUpdateResourcePreparer(resourceGroupName, labName, policySetName, name, policy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "CreateOrUpdateResource", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "CreateOrUpdateResource", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "CreateOrUpdateResource", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateResourcePreparer prepares the CreateOrUpdateResource request. +func (client PolicyOperationsClient) CreateOrUpdateResourcePreparer(resourceGroupName string, labName string, policySetName string, name string, policy Policy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "policySetName": autorest.Encode("path", policySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/policysets/{policySetName}/policies/{name}", pathParameters), + autorest.WithJSON(policy), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateResourceSender sends the CreateOrUpdateResource request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyOperationsClient) CreateOrUpdateResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResourceResponder handles the response to the CreateOrUpdateResource request. The method always +// closes the http.Response Body. +func (client PolicyOperationsClient) CreateOrUpdateResourceResponder(resp *http.Response) (result Policy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteResource delete policy. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. policySetName is the name of the policy set. name is the name of +// the policy. +func (client PolicyOperationsClient) DeleteResource(resourceGroupName string, labName string, policySetName string, name string) (result autorest.Response, err error) { + req, err := client.DeleteResourcePreparer(resourceGroupName, labName, policySetName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "DeleteResource", nil, "Failure preparing request") + } + + resp, err := client.DeleteResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "DeleteResource", resp, "Failure sending request") + } + + result, err = client.DeleteResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "DeleteResource", resp, "Failure responding to request") + } + + return +} + +// DeleteResourcePreparer prepares the DeleteResource request. +func (client PolicyOperationsClient) DeleteResourcePreparer(resourceGroupName string, labName string, policySetName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "policySetName": autorest.Encode("path", policySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/policysets/{policySetName}/policies/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteResourceSender sends the DeleteResource request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyOperationsClient) DeleteResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResourceResponder handles the response to the DeleteResource request. The method always +// closes the http.Response Body. +func (client PolicyOperationsClient) DeleteResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetResource get policy. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. policySetName is the name of the policy set. name is the name of +// the policy. +func (client PolicyOperationsClient) GetResource(resourceGroupName string, labName string, policySetName string, name string) (result Policy, err error) { + req, err := client.GetResourcePreparer(resourceGroupName, labName, policySetName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "GetResource", nil, "Failure preparing request") + } + + resp, err := client.GetResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "GetResource", resp, "Failure sending request") + } + + result, err = client.GetResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "GetResource", resp, "Failure responding to request") + } + + return +} + +// GetResourcePreparer prepares the GetResource request. +func (client PolicyOperationsClient) GetResourcePreparer(resourceGroupName string, labName string, policySetName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "policySetName": autorest.Encode("path", policySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/policysets/{policySetName}/policies/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetResourceSender sends the GetResource request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyOperationsClient) GetResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResourceResponder handles the response to the GetResource request. The method always +// closes the http.Response Body. +func (client PolicyOperationsClient) GetResourceResponder(resp *http.Response) (result Policy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list policies in a given policy set. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. policySetName is the name of the policy set. filter is the filter +// to apply on the operation. top is the maximum number of resources to +// return from the operation. orderBy is the ordering expression for the +// results, using OData notation. +func (client PolicyOperationsClient) List(resourceGroupName string, labName string, policySetName string, filter string, top *int32, orderBy string) (result ResponseWithContinuationPolicy, err error) { + req, err := client.ListPreparer(resourceGroupName, labName, policySetName, filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client PolicyOperationsClient) ListPreparer(resourceGroupName string, labName string, policySetName string, filter string, top *int32, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "policySetName": autorest.Encode("path", policySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = autorest.Encode("query", orderBy) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/policysets/{policySetName}/policies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client PolicyOperationsClient) ListResponder(resp *http.Response) (result ResponseWithContinuationPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client PolicyOperationsClient) ListNextResults(lastResults ResponseWithContinuationPolicy) (result ResponseWithContinuationPolicy, err error) { + req, err := lastResults.ResponseWithContinuationPolicyPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// PatchResource modify properties of policies. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. policySetName is the name of the policy set. name is the name of +// the policy. +func (client PolicyOperationsClient) PatchResource(resourceGroupName string, labName string, policySetName string, name string, policy Policy) (result Policy, err error) { + req, err := client.PatchResourcePreparer(resourceGroupName, labName, policySetName, name, policy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "PatchResource", nil, "Failure preparing request") + } + + resp, err := client.PatchResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "PatchResource", resp, "Failure sending request") + } + + result, err = client.PatchResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.PolicyOperationsClient", "PatchResource", resp, "Failure responding to request") + } + + return +} + +// PatchResourcePreparer prepares the PatchResource request. +func (client PolicyOperationsClient) PatchResourcePreparer(resourceGroupName string, labName string, policySetName string, name string, policy Policy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "policySetName": autorest.Encode("path", policySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/policysets/{policySetName}/policies/{name}", pathParameters), + autorest.WithJSON(policy), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// PatchResourceSender sends the PatchResource request. The method will close the +// http.Response Body if it receives an error. +func (client PolicyOperationsClient) PatchResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// PatchResourceResponder handles the response to the PatchResource request. The method always +// closes the http.Response Body. +func (client PolicyOperationsClient) PatchResourceResponder(resp *http.Response) (result Policy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/policyset.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/policyset.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/policyset.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/policyset.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,107 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// PolicySetClient is the the DevTest Labs Client. +type PolicySetClient struct { + ManagementClient +} + +// NewPolicySetClient creates an instance of the PolicySetClient client. +func NewPolicySetClient(subscriptionID string) PolicySetClient { + return NewPolicySetClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPolicySetClientWithBaseURI creates an instance of the PolicySetClient +// client. +func NewPolicySetClientWithBaseURI(baseURI string, subscriptionID string) PolicySetClient { + return PolicySetClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// EvaluatePolicies evaluates Lab Policy. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the policy set. +func (client PolicySetClient) EvaluatePolicies(resourceGroupName string, labName string, name string, evaluatePoliciesRequest EvaluatePoliciesRequest) (result EvaluatePoliciesResponse, err error) { + req, err := client.EvaluatePoliciesPreparer(resourceGroupName, labName, name, evaluatePoliciesRequest) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicySetClient", "EvaluatePolicies", nil, "Failure preparing request") + } + + resp, err := client.EvaluatePoliciesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.PolicySetClient", "EvaluatePolicies", resp, "Failure sending request") + } + + result, err = client.EvaluatePoliciesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.PolicySetClient", "EvaluatePolicies", resp, "Failure responding to request") + } + + return +} + +// EvaluatePoliciesPreparer prepares the EvaluatePolicies request. +func (client PolicySetClient) EvaluatePoliciesPreparer(resourceGroupName string, labName string, name string, evaluatePoliciesRequest EvaluatePoliciesRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/policysets/{name}/evaluatePolicies", pathParameters), + autorest.WithJSON(evaluatePoliciesRequest), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// EvaluatePoliciesSender sends the EvaluatePolicies request. The method will close the +// http.Response Body if it receives an error. +func (client PolicySetClient) EvaluatePoliciesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// EvaluatePoliciesResponder handles the response to the EvaluatePolicies request. The method always +// closes the http.Response Body. +func (client PolicySetClient) EvaluatePoliciesResponder(resp *http.Response) (result EvaluatePoliciesResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/scheduleoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/scheduleoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/scheduleoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/scheduleoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,467 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ScheduleOperationsClient is the the DevTest Labs Client. +type ScheduleOperationsClient struct { + ManagementClient +} + +// NewScheduleOperationsClient creates an instance of the +// ScheduleOperationsClient client. +func NewScheduleOperationsClient(subscriptionID string) ScheduleOperationsClient { + return NewScheduleOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewScheduleOperationsClientWithBaseURI creates an instance of the +// ScheduleOperationsClient client. +func NewScheduleOperationsClientWithBaseURI(baseURI string, subscriptionID string) ScheduleOperationsClient { + return ScheduleOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdateResource create or replace an existing schedule. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the schedule. +func (client ScheduleOperationsClient) CreateOrUpdateResource(resourceGroupName string, labName string, name string, schedule Schedule) (result Schedule, err error) { + req, err := client.CreateOrUpdateResourcePreparer(resourceGroupName, labName, name, schedule) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "CreateOrUpdateResource", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "CreateOrUpdateResource", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "CreateOrUpdateResource", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateResourcePreparer prepares the CreateOrUpdateResource request. +func (client ScheduleOperationsClient) CreateOrUpdateResourcePreparer(resourceGroupName string, labName string, name string, schedule Schedule) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules/{name}", pathParameters), + autorest.WithJSON(schedule), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateResourceSender sends the CreateOrUpdateResource request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduleOperationsClient) CreateOrUpdateResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResourceResponder handles the response to the CreateOrUpdateResource request. The method always +// closes the http.Response Body. +func (client ScheduleOperationsClient) CreateOrUpdateResourceResponder(resp *http.Response) (result Schedule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteResource delete schedule. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the schedule. +func (client ScheduleOperationsClient) DeleteResource(resourceGroupName string, labName string, name string) (result autorest.Response, err error) { + req, err := client.DeleteResourcePreparer(resourceGroupName, labName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "DeleteResource", nil, "Failure preparing request") + } + + resp, err := client.DeleteResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "DeleteResource", resp, "Failure sending request") + } + + result, err = client.DeleteResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "DeleteResource", resp, "Failure responding to request") + } + + return +} + +// DeleteResourcePreparer prepares the DeleteResource request. +func (client ScheduleOperationsClient) DeleteResourcePreparer(resourceGroupName string, labName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteResourceSender sends the DeleteResource request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduleOperationsClient) DeleteResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResourceResponder handles the response to the DeleteResource request. The method always +// closes the http.Response Body. +func (client ScheduleOperationsClient) DeleteResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Execute execute a schedule. This operation can take a while to complete. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the schedule. +func (client ScheduleOperationsClient) Execute(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ExecutePreparer(resourceGroupName, labName, name, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "Execute", nil, "Failure preparing request") + } + + resp, err := client.ExecuteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "Execute", resp, "Failure sending request") + } + + result, err = client.ExecuteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "Execute", resp, "Failure responding to request") + } + + return +} + +// ExecutePreparer prepares the Execute request. +func (client ScheduleOperationsClient) ExecutePreparer(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules/{name}/execute", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ExecuteSender sends the Execute request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduleOperationsClient) ExecuteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ExecuteResponder handles the response to the Execute request. The method always +// closes the http.Response Body. +func (client ScheduleOperationsClient) ExecuteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetResource get schedule. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the schedule. +func (client ScheduleOperationsClient) GetResource(resourceGroupName string, labName string, name string) (result Schedule, err error) { + req, err := client.GetResourcePreparer(resourceGroupName, labName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "GetResource", nil, "Failure preparing request") + } + + resp, err := client.GetResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "GetResource", resp, "Failure sending request") + } + + result, err = client.GetResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "GetResource", resp, "Failure responding to request") + } + + return +} + +// GetResourcePreparer prepares the GetResource request. +func (client ScheduleOperationsClient) GetResourcePreparer(resourceGroupName string, labName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetResourceSender sends the GetResource request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduleOperationsClient) GetResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResourceResponder handles the response to the GetResource request. The method always +// closes the http.Response Body. +func (client ScheduleOperationsClient) GetResourceResponder(resp *http.Response) (result Schedule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list schedules in a given lab. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. filter is the filter to apply on the operation. top is the +// maximum number of resources to return from the operation. orderBy is the +// ordering expression for the results, using OData notation. +func (client ScheduleOperationsClient) List(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (result ResponseWithContinuationSchedule, err error) { + req, err := client.ListPreparer(resourceGroupName, labName, filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ScheduleOperationsClient) ListPreparer(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = autorest.Encode("query", orderBy) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduleOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ScheduleOperationsClient) ListResponder(resp *http.Response) (result ResponseWithContinuationSchedule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ScheduleOperationsClient) ListNextResults(lastResults ResponseWithContinuationSchedule) (result ResponseWithContinuationSchedule, err error) { + req, err := lastResults.ResponseWithContinuationSchedulePreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// PatchResource modify properties of schedules. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the schedule. +func (client ScheduleOperationsClient) PatchResource(resourceGroupName string, labName string, name string, schedule Schedule) (result Schedule, err error) { + req, err := client.PatchResourcePreparer(resourceGroupName, labName, name, schedule) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "PatchResource", nil, "Failure preparing request") + } + + resp, err := client.PatchResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "PatchResource", resp, "Failure sending request") + } + + result, err = client.PatchResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.ScheduleOperationsClient", "PatchResource", resp, "Failure responding to request") + } + + return +} + +// PatchResourcePreparer prepares the PatchResource request. +func (client ScheduleOperationsClient) PatchResourcePreparer(resourceGroupName string, labName string, name string, schedule Schedule) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/schedules/{name}", pathParameters), + autorest.WithJSON(schedule), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// PatchResourceSender sends the PatchResource request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduleOperationsClient) PatchResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// PatchResourceResponder handles the response to the PatchResource request. The method always +// closes the http.Response Body. +func (client ScheduleOperationsClient) PatchResourceResponder(resp *http.Response) (result Schedule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "devtestlabs", "2016-05-15") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/virtualmachine.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/virtualmachine.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/virtualmachine.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/virtualmachine.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,615 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualMachineClient is the the DevTest Labs Client. +type VirtualMachineClient struct { + ManagementClient +} + +// NewVirtualMachineClient creates an instance of the VirtualMachineClient +// client. +func NewVirtualMachineClient(subscriptionID string) VirtualMachineClient { + return NewVirtualMachineClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineClientWithBaseURI creates an instance of the +// VirtualMachineClient client. +func NewVirtualMachineClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineClient { + return VirtualMachineClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ApplyArtifacts apply artifacts to Lab VM. This operation can take a while +// to complete. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the virtual Machine. +func (client VirtualMachineClient) ApplyArtifacts(resourceGroupName string, labName string, name string, applyArtifactsRequest ApplyArtifactsRequest, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ApplyArtifactsPreparer(resourceGroupName, labName, name, applyArtifactsRequest, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "ApplyArtifacts", nil, "Failure preparing request") + } + + resp, err := client.ApplyArtifactsSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "ApplyArtifacts", resp, "Failure sending request") + } + + result, err = client.ApplyArtifactsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "ApplyArtifacts", resp, "Failure responding to request") + } + + return +} + +// ApplyArtifactsPreparer prepares the ApplyArtifacts request. +func (client VirtualMachineClient) ApplyArtifactsPreparer(resourceGroupName string, labName string, name string, applyArtifactsRequest ApplyArtifactsRequest, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}/applyArtifacts", pathParameters), + autorest.WithJSON(applyArtifactsRequest), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ApplyArtifactsSender sends the ApplyArtifacts request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineClient) ApplyArtifactsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ApplyArtifactsResponder handles the response to the ApplyArtifacts request. The method always +// closes the http.Response Body. +func (client VirtualMachineClient) ApplyArtifactsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdateResource create or replace an existing Virtual Machine. This +// operation can take a while to complete. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the virtual Machine. +func (client VirtualMachineClient) CreateOrUpdateResource(resourceGroupName string, labName string, name string, labVirtualMachine LabVirtualMachine, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateResourcePreparer(resourceGroupName, labName, name, labVirtualMachine, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "CreateOrUpdateResource", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "CreateOrUpdateResource", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "CreateOrUpdateResource", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateResourcePreparer prepares the CreateOrUpdateResource request. +func (client VirtualMachineClient) CreateOrUpdateResourcePreparer(resourceGroupName string, labName string, name string, labVirtualMachine LabVirtualMachine, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}", pathParameters), + autorest.WithJSON(labVirtualMachine), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateResourceSender sends the CreateOrUpdateResource request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineClient) CreateOrUpdateResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResourceResponder handles the response to the CreateOrUpdateResource request. The method always +// closes the http.Response Body. +func (client VirtualMachineClient) CreateOrUpdateResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteResource delete virtual machine. This operation can take a while to +// complete. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the virtual Machine. +func (client VirtualMachineClient) DeleteResource(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeleteResourcePreparer(resourceGroupName, labName, name, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "DeleteResource", nil, "Failure preparing request") + } + + resp, err := client.DeleteResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "DeleteResource", resp, "Failure sending request") + } + + result, err = client.DeleteResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "DeleteResource", resp, "Failure responding to request") + } + + return +} + +// DeleteResourcePreparer prepares the DeleteResource request. +func (client VirtualMachineClient) DeleteResourcePreparer(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteResourceSender sends the DeleteResource request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineClient) DeleteResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResourceResponder handles the response to the DeleteResource request. The method always +// closes the http.Response Body. +func (client VirtualMachineClient) DeleteResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetResource get virtual machine. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the virtual Machine. +func (client VirtualMachineClient) GetResource(resourceGroupName string, labName string, name string) (result LabVirtualMachine, err error) { + req, err := client.GetResourcePreparer(resourceGroupName, labName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "GetResource", nil, "Failure preparing request") + } + + resp, err := client.GetResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "GetResource", resp, "Failure sending request") + } + + result, err = client.GetResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "GetResource", resp, "Failure responding to request") + } + + return +} + +// GetResourcePreparer prepares the GetResource request. +func (client VirtualMachineClient) GetResourcePreparer(resourceGroupName string, labName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetResourceSender sends the GetResource request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineClient) GetResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResourceResponder handles the response to the GetResource request. The method always +// closes the http.Response Body. +func (client VirtualMachineClient) GetResourceResponder(resp *http.Response) (result LabVirtualMachine, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list virtual machines in a given lab. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. filter is the filter to apply on the operation. top is the +// maximum number of resources to return from the operation. orderBy is the +// ordering expression for the results, using OData notation. +func (client VirtualMachineClient) List(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (result ResponseWithContinuationLabVirtualMachine, err error) { + req, err := client.ListPreparer(resourceGroupName, labName, filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineClient) ListPreparer(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = autorest.Encode("query", orderBy) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineClient) ListResponder(resp *http.Response) (result ResponseWithContinuationLabVirtualMachine, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachineClient) ListNextResults(lastResults ResponseWithContinuationLabVirtualMachine) (result ResponseWithContinuationLabVirtualMachine, err error) { + req, err := lastResults.ResponseWithContinuationLabVirtualMachinePreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// PatchResource modify properties of virtual machines. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the virtual Machine. +func (client VirtualMachineClient) PatchResource(resourceGroupName string, labName string, name string, labVirtualMachine LabVirtualMachine) (result LabVirtualMachine, err error) { + req, err := client.PatchResourcePreparer(resourceGroupName, labName, name, labVirtualMachine) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "PatchResource", nil, "Failure preparing request") + } + + resp, err := client.PatchResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "PatchResource", resp, "Failure sending request") + } + + result, err = client.PatchResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "PatchResource", resp, "Failure responding to request") + } + + return +} + +// PatchResourcePreparer prepares the PatchResource request. +func (client VirtualMachineClient) PatchResourcePreparer(resourceGroupName string, labName string, name string, labVirtualMachine LabVirtualMachine) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}", pathParameters), + autorest.WithJSON(labVirtualMachine), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// PatchResourceSender sends the PatchResource request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineClient) PatchResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// PatchResourceResponder handles the response to the PatchResource request. The method always +// closes the http.Response Body. +func (client VirtualMachineClient) PatchResourceResponder(resp *http.Response) (result LabVirtualMachine, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Start start a Lab VM. This operation can take a while to complete. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the virtual Machine. +func (client VirtualMachineClient) Start(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StartPreparer(resourceGroupName, labName, name, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "Start", nil, "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "Start", resp, "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "Start", resp, "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client VirtualMachineClient) StartPreparer(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineClient) StartSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client VirtualMachineClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Stop stop a Lab VM. This operation can take a while to complete. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the virtual Machine. +func (client VirtualMachineClient) Stop(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StopPreparer(resourceGroupName, labName, name, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "Stop", nil, "Failure preparing request") + } + + resp, err := client.StopSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "Stop", resp, "Failure sending request") + } + + result, err = client.StopResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualMachineClient", "Stop", resp, "Failure responding to request") + } + + return +} + +// StopPreparer prepares the Stop request. +func (client VirtualMachineClient) StopPreparer(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualmachines/{name}/stop", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// StopSender sends the Stop request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineClient) StopSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// StopResponder handles the response to the Stop request. The method always +// closes the http.Response Body. +func (client VirtualMachineClient) StopResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/virtualnetworkoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/virtualnetworkoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/virtualnetworkoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/devtestlabs/virtualnetworkoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,409 @@ +package devtestlabs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualNetworkOperationsClient is the the DevTest Labs Client. +type VirtualNetworkOperationsClient struct { + ManagementClient +} + +// NewVirtualNetworkOperationsClient creates an instance of the +// VirtualNetworkOperationsClient client. +func NewVirtualNetworkOperationsClient(subscriptionID string) VirtualNetworkOperationsClient { + return NewVirtualNetworkOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkOperationsClientWithBaseURI creates an instance of the +// VirtualNetworkOperationsClient client. +func NewVirtualNetworkOperationsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkOperationsClient { + return VirtualNetworkOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdateResource create or replace an existing virtual network. This +// operation can take a while to complete. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the virtual network. +func (client VirtualNetworkOperationsClient) CreateOrUpdateResource(resourceGroupName string, labName string, name string, virtualNetwork VirtualNetwork, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateResourcePreparer(resourceGroupName, labName, name, virtualNetwork, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "CreateOrUpdateResource", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "CreateOrUpdateResource", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "CreateOrUpdateResource", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateResourcePreparer prepares the CreateOrUpdateResource request. +func (client VirtualNetworkOperationsClient) CreateOrUpdateResourcePreparer(resourceGroupName string, labName string, name string, virtualNetwork VirtualNetwork, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualnetworks/{name}", pathParameters), + autorest.WithJSON(virtualNetwork), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateResourceSender sends the CreateOrUpdateResource request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkOperationsClient) CreateOrUpdateResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResourceResponder handles the response to the CreateOrUpdateResource request. The method always +// closes the http.Response Body. +func (client VirtualNetworkOperationsClient) CreateOrUpdateResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteResource delete virtual network. This operation can take a while to +// complete. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the virtual network. +func (client VirtualNetworkOperationsClient) DeleteResource(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeleteResourcePreparer(resourceGroupName, labName, name, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "DeleteResource", nil, "Failure preparing request") + } + + resp, err := client.DeleteResourceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "DeleteResource", resp, "Failure sending request") + } + + result, err = client.DeleteResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "DeleteResource", resp, "Failure responding to request") + } + + return +} + +// DeleteResourcePreparer prepares the DeleteResource request. +func (client VirtualNetworkOperationsClient) DeleteResourcePreparer(resourceGroupName string, labName string, name string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualnetworks/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteResourceSender sends the DeleteResource request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkOperationsClient) DeleteResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResourceResponder handles the response to the DeleteResource request. The method always +// closes the http.Response Body. +func (client VirtualNetworkOperationsClient) DeleteResourceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetResource get virtual network. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the virtual network. +func (client VirtualNetworkOperationsClient) GetResource(resourceGroupName string, labName string, name string) (result VirtualNetwork, err error) { + req, err := client.GetResourcePreparer(resourceGroupName, labName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "GetResource", nil, "Failure preparing request") + } + + resp, err := client.GetResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "GetResource", resp, "Failure sending request") + } + + result, err = client.GetResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "GetResource", resp, "Failure responding to request") + } + + return +} + +// GetResourcePreparer prepares the GetResource request. +func (client VirtualNetworkOperationsClient) GetResourcePreparer(resourceGroupName string, labName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualnetworks/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetResourceSender sends the GetResource request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkOperationsClient) GetResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResourceResponder handles the response to the GetResource request. The method always +// closes the http.Response Body. +func (client VirtualNetworkOperationsClient) GetResourceResponder(resp *http.Response) (result VirtualNetwork, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list virtual networks in a given lab. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. filter is the filter to apply on the operation. top is the +// maximum number of resources to return from the operation. orderBy is the +// ordering expression for the results, using OData notation. +func (client VirtualNetworkOperationsClient) List(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (result ResponseWithContinuationVirtualNetwork, err error) { + req, err := client.ListPreparer(resourceGroupName, labName, filter, top, orderBy) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworkOperationsClient) ListPreparer(resourceGroupName string, labName string, filter string, top *int32, orderBy string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderBy) > 0 { + queryParameters["$orderBy"] = autorest.Encode("query", orderBy) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualnetworks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworkOperationsClient) ListResponder(resp *http.Response) (result ResponseWithContinuationVirtualNetwork, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworkOperationsClient) ListNextResults(lastResults ResponseWithContinuationVirtualNetwork) (result ResponseWithContinuationVirtualNetwork, err error) { + req, err := lastResults.ResponseWithContinuationVirtualNetworkPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// PatchResource modify properties of virtual networks. +// +// resourceGroupName is the name of the resource group. labName is the name of +// the lab. name is the name of the virtual network. +func (client VirtualNetworkOperationsClient) PatchResource(resourceGroupName string, labName string, name string, virtualNetwork VirtualNetwork) (result VirtualNetwork, err error) { + req, err := client.PatchResourcePreparer(resourceGroupName, labName, name, virtualNetwork) + if err != nil { + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "PatchResource", nil, "Failure preparing request") + } + + resp, err := client.PatchResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "PatchResource", resp, "Failure sending request") + } + + result, err = client.PatchResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "devtestlabs.VirtualNetworkOperationsClient", "PatchResource", resp, "Failure responding to request") + } + + return +} + +// PatchResourcePreparer prepares the PatchResource request. +func (client VirtualNetworkOperationsClient) PatchResourcePreparer(resourceGroupName string, labName string, name string, virtualNetwork VirtualNetwork) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "labName": autorest.Encode("path", labName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/virtualnetworks/{name}", pathParameters), + autorest.WithJSON(virtualNetwork), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// PatchResourceSender sends the PatchResource request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkOperationsClient) PatchResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// PatchResourceResponder handles the response to the PatchResource request. The method always +// closes the http.Response Body. +func (client VirtualNetworkOperationsClient) PatchResourceResponder(resp *http.Response) (result VirtualNetwork, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/dns/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/dns/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/dns/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/dns/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -1,3 +1,5 @@ +// Package dns implements the Azure ARM Dns service API version 2016-04-01. +// package dns // Copyright (c) Microsoft and contributors. All rights reserved. @@ -14,26 +16,27 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" ) const ( // APIVersion is the version of the Dns - APIVersion = "2015-11-01" + APIVersion = "2016-04-01" // DefaultBaseURI is the default URI used for the service Dns DefaultBaseURI = "https://management.azure.com" ) -// ManagementClient is the client for managing DNS zones and record. +// ManagementClient is the base client for Dns. type ManagementClient struct { autorest.Client BaseURI string + APIVersion string SubscriptionID string } @@ -47,6 +50,7 @@ return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, + APIVersion: APIVersion, SubscriptionID: subscriptionID, } } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/dns/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/dns/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/dns/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/dns/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,12 +14,145 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// HtpStatusCode enumerates the values for htp status code. +type HtpStatusCode string + +const ( + // Accepted specifies the accepted state for htp status code. + Accepted HtpStatusCode = "Accepted" + // Ambiguous specifies the ambiguous state for htp status code. + Ambiguous HtpStatusCode = "Ambiguous" + // BadGateway specifies the bad gateway state for htp status code. + BadGateway HtpStatusCode = "BadGateway" + // BadRequest specifies the bad request state for htp status code. + BadRequest HtpStatusCode = "BadRequest" + // Conflict specifies the conflict state for htp status code. + Conflict HtpStatusCode = "Conflict" + // Continue specifies the continue state for htp status code. + Continue HtpStatusCode = "Continue" + // Created specifies the created state for htp status code. + Created HtpStatusCode = "Created" + // ExpectationFailed specifies the expectation failed state for htp status + // code. + ExpectationFailed HtpStatusCode = "ExpectationFailed" + // Forbidden specifies the forbidden state for htp status code. + Forbidden HtpStatusCode = "Forbidden" + // Found specifies the found state for htp status code. + Found HtpStatusCode = "Found" + // GatewayTimeout specifies the gateway timeout state for htp status code. + GatewayTimeout HtpStatusCode = "GatewayTimeout" + // Gone specifies the gone state for htp status code. + Gone HtpStatusCode = "Gone" + // HTTPVersionNotSupported specifies the http version not supported state + // for htp status code. + HTTPVersionNotSupported HtpStatusCode = "HttpVersionNotSupported" + // InternalServerError specifies the internal server error state for htp + // status code. + InternalServerError HtpStatusCode = "InternalServerError" + // LengthRequired specifies the length required state for htp status code. + LengthRequired HtpStatusCode = "LengthRequired" + // MethodNotAllowed specifies the method not allowed state for htp status + // code. + MethodNotAllowed HtpStatusCode = "MethodNotAllowed" + // Moved specifies the moved state for htp status code. + Moved HtpStatusCode = "Moved" + // MovedPermanently specifies the moved permanently state for htp status + // code. + MovedPermanently HtpStatusCode = "MovedPermanently" + // MultipleChoices specifies the multiple choices state for htp status + // code. + MultipleChoices HtpStatusCode = "MultipleChoices" + // NoContent specifies the no content state for htp status code. + NoContent HtpStatusCode = "NoContent" + // NonAuthoritativeInformation specifies the non authoritative information + // state for htp status code. + NonAuthoritativeInformation HtpStatusCode = "NonAuthoritativeInformation" + // NotAcceptable specifies the not acceptable state for htp status code. + NotAcceptable HtpStatusCode = "NotAcceptable" + // NotFound specifies the not found state for htp status code. + NotFound HtpStatusCode = "NotFound" + // NotImplemented specifies the not implemented state for htp status code. + NotImplemented HtpStatusCode = "NotImplemented" + // NotModified specifies the not modified state for htp status code. + NotModified HtpStatusCode = "NotModified" + // OK specifies the ok state for htp status code. + OK HtpStatusCode = "OK" + // PartialContent specifies the partial content state for htp status code. + PartialContent HtpStatusCode = "PartialContent" + // PaymentRequired specifies the payment required state for htp status + // code. + PaymentRequired HtpStatusCode = "PaymentRequired" + // PreconditionFailed specifies the precondition failed state for htp + // status code. + PreconditionFailed HtpStatusCode = "PreconditionFailed" + // ProxyAuthenticationRequired specifies the proxy authentication required + // state for htp status code. + ProxyAuthenticationRequired HtpStatusCode = "ProxyAuthenticationRequired" + // Redirect specifies the redirect state for htp status code. + Redirect HtpStatusCode = "Redirect" + // RedirectKeepVerb specifies the redirect keep verb state for htp status + // code. + RedirectKeepVerb HtpStatusCode = "RedirectKeepVerb" + // RedirectMethod specifies the redirect method state for htp status code. + RedirectMethod HtpStatusCode = "RedirectMethod" + // RequestedRangeNotSatisfiable specifies the requested range not + // satisfiable state for htp status code. + RequestedRangeNotSatisfiable HtpStatusCode = "RequestedRangeNotSatisfiable" + // RequestEntityTooLarge specifies the request entity too large state for + // htp status code. + RequestEntityTooLarge HtpStatusCode = "RequestEntityTooLarge" + // RequestTimeout specifies the request timeout state for htp status code. + RequestTimeout HtpStatusCode = "RequestTimeout" + // RequestURITooLong specifies the request uri too long state for htp + // status code. + RequestURITooLong HtpStatusCode = "RequestUriTooLong" + // ResetContent specifies the reset content state for htp status code. + ResetContent HtpStatusCode = "ResetContent" + // SeeOther specifies the see other state for htp status code. + SeeOther HtpStatusCode = "SeeOther" + // ServiceUnavailable specifies the service unavailable state for htp + // status code. + ServiceUnavailable HtpStatusCode = "ServiceUnavailable" + // SwitchingProtocols specifies the switching protocols state for htp + // status code. + SwitchingProtocols HtpStatusCode = "SwitchingProtocols" + // TemporaryRedirect specifies the temporary redirect state for htp status + // code. + TemporaryRedirect HtpStatusCode = "TemporaryRedirect" + // Unauthorized specifies the unauthorized state for htp status code. + Unauthorized HtpStatusCode = "Unauthorized" + // UnsupportedMediaType specifies the unsupported media type state for htp + // status code. + UnsupportedMediaType HtpStatusCode = "UnsupportedMediaType" + // Unused specifies the unused state for htp status code. + Unused HtpStatusCode = "Unused" + // UpgradeRequired specifies the upgrade required state for htp status + // code. + UpgradeRequired HtpStatusCode = "UpgradeRequired" + // UseProxy specifies the use proxy state for htp status code. + UseProxy HtpStatusCode = "UseProxy" +) + +// OperationStatus enumerates the values for operation status. +type OperationStatus string + +const ( + // Failed specifies the failed state for operation status. + Failed OperationStatus = "Failed" + // InProgress specifies the in progress state for operation status. + InProgress OperationStatus = "InProgress" + // Succeeded specifies the succeeded state for operation status. + Succeeded OperationStatus = "Succeeded" ) // RecordType enumerates the values for record type. @@ -56,6 +189,19 @@ Ipv4Address *string `json:"ipv4Address,omitempty"` } +// CloudError is +type CloudError struct { + Error *CloudErrorBody `json:"error,omitempty"` +} + +// CloudErrorBody is +type CloudErrorBody struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` + Details *[]CloudErrorBody `json:"details,omitempty"` +} + // CnameRecord is a CNAME record. type CnameRecord struct { Cname *string `json:"cname,omitempty"` @@ -63,7 +209,7 @@ // MxRecord is an MX record. type MxRecord struct { - Preference *int `json:"preference,omitempty"` + Preference *int32 `json:"preference,omitempty"` Exchange *string `json:"exchange,omitempty"` } @@ -84,18 +230,10 @@ ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` Etag *string `json:"etag,omitempty"` Properties *RecordSetProperties `json:"properties,omitempty"` } -// RecordSetCreateOrUpdateParameters is parameters supplied to create or -// update a RecordSet. -type RecordSetCreateOrUpdateParameters struct { - RecordSet *RecordSet `json:"RecordSet,omitempty"` -} - // RecordSetListResult is the response to a RecordSet List operation. type RecordSetListResult struct { autorest.Response `json:"-"` @@ -103,19 +241,37 @@ NextLink *string `json:"nextLink,omitempty"` } +// RecordSetListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client RecordSetListResult) RecordSetListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + // RecordSetProperties is represents the properties of the records in the // RecordSet. type RecordSetProperties struct { - TTL *int32 `json:"TTL,omitempty"` - ARecords *[]ARecord `json:"ARecords,omitempty"` - AAAARecords *[]AaaaRecord `json:"AAAARecords,omitempty"` - MXRecords *[]MxRecord `json:"MXRecords,omitempty"` - NSRecords *[]NsRecord `json:"NSRecords,omitempty"` - PTRRecords *[]PtrRecord `json:"PTRRecords,omitempty"` - SRVRecords *[]SrvRecord `json:"SRVRecords,omitempty"` - TXTRecords *[]TxtRecord `json:"TXTRecords,omitempty"` - CNAMERecord *CnameRecord `json:"CNAMERecord,omitempty"` - SOARecord *SoaRecord `json:"SOARecord,omitempty"` + Metadata *map[string]*string `json:"metadata,omitempty"` + TTL *int64 `json:"TTL,omitempty"` + ARecords *[]ARecord `json:"ARecords,omitempty"` + AAAARecords *[]AaaaRecord `json:"AAAARecords,omitempty"` + MXRecords *[]MxRecord `json:"MXRecords,omitempty"` + NSRecords *[]NsRecord `json:"NSRecords,omitempty"` + PTRRecords *[]PtrRecord `json:"PTRRecords,omitempty"` + SRVRecords *[]SrvRecord `json:"SRVRecords,omitempty"` + TXTRecords *[]TxtRecord `json:"TXTRecords,omitempty"` + CNAMERecord *CnameRecord `json:"CNAMERecord,omitempty"` + SOARecord *SoaRecord `json:"SOARecord,omitempty"` +} + +// RecordSetUpdateParameters is parameters supplied to update a RecordSet. +type RecordSetUpdateParameters struct { + RecordSet *RecordSet `json:"RecordSet,omitempty"` } // Resource is @@ -131,18 +287,18 @@ type SoaRecord struct { Host *string `json:"host,omitempty"` Email *string `json:"email,omitempty"` - SerialNumber *int32 `json:"serialNumber,omitempty"` - RefreshTime *int32 `json:"refreshTime,omitempty"` - RetryTime *int32 `json:"retryTime,omitempty"` - ExpireTime *int32 `json:"expireTime,omitempty"` - MinimumTTL *int32 `json:"minimumTTL,omitempty"` + SerialNumber *int64 `json:"serialNumber,omitempty"` + RefreshTime *int64 `json:"refreshTime,omitempty"` + RetryTime *int64 `json:"retryTime,omitempty"` + ExpireTime *int64 `json:"expireTime,omitempty"` + MinimumTTL *int64 `json:"minimumTTL,omitempty"` } // SrvRecord is an SRV record. type SrvRecord struct { - Priority *int `json:"priority,omitempty"` - Weight *int `json:"weight,omitempty"` - Port *int `json:"port,omitempty"` + Priority *int32 `json:"priority,omitempty"` + Weight *int32 `json:"weight,omitempty"` + Port *int32 `json:"port,omitempty"` Target *string `json:"target,omitempty"` } @@ -168,9 +324,13 @@ Properties *ZoneProperties `json:"properties,omitempty"` } -// ZoneCreateOrUpdateParameters is parameters supplied to create a zone. -type ZoneCreateOrUpdateParameters struct { - Zone *Zone `json:"Zone,omitempty"` +// ZoneDeleteResult is the response to a Zone Delete operation. +type ZoneDeleteResult struct { + autorest.Response `json:"-"` + AzureAsyncOperation *string `json:"azureAsyncOperation,omitempty"` + Status OperationStatus `json:"status,omitempty"` + StatusCode HtpStatusCode `json:"statusCode,omitempty"` + RequestID *string `json:"requestId,omitempty"` } // ZoneListResult is the response to a Zone List or ListAll operation. @@ -180,8 +340,21 @@ NextLink *string `json:"nextLink,omitempty"` } +// ZoneListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ZoneListResult) ZoneListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + // ZoneProperties is represents the properties of the zone. type ZoneProperties struct { - MaxNumberOfRecordSets *int32 `json:"maxNumberOfRecordSets,omitempty"` - NumberOfRecordSets *int32 `json:"numberOfRecordSets,omitempty"` + MaxNumberOfRecordSets *int64 `json:"maxNumberOfRecordSets,omitempty"` + NumberOfRecordSets *int64 `json:"numberOfRecordSets,omitempty"` + NameServers *[]string `json:"nameServers,omitempty"` } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/dns/recordsets.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/dns/recordsets.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/dns/recordsets.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/dns/recordsets.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,17 +14,18 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// RecordSetsClient is the client for managing DNS zones and record. +// RecordSetsClient is the client for the RecordSets methods of the Dns +// service. type RecordSetsClient struct { ManagementClient } @@ -40,64 +41,72 @@ return RecordSetsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates a RecordSet within a DNS zone. +// CreateOrUpdate creates or Updates a RecordSet within a DNS zone. // // resourceGroupName is the name of the resource group. zoneName is the name -// of the zone without a terminating dot. recordType is the type of DNS -// record. Possible values for this parameter include: 'A', 'AAAA', 'CNAME', -// 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' relativeRecordSetName is the name -// of the RecordSet, relative to the name of the zone. parameters is -// parameters supplied to the CreateOrUpdate operation. ifMatch is the etag -// of RecordSet. ifNoneMatch is defines the If-None-Match condition. Set to -// '*' to force Create-If-Not-Exist. Other values will be ignored. -func (client RecordSetsClient) CreateOrUpdate(resourceGroupName string, zoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSetCreateOrUpdateParameters, ifMatch string, ifNoneMatch string) (result RecordSet, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, zoneName, recordType, relativeRecordSetName, parameters, ifMatch, ifNoneMatch) +// of the zone without a terminating dot. relativeRecordSetName is the name +// of the RecordSet, relative to the name of the zone. recordType is the type +// of DNS record. Possible values include: 'A', 'AAAA', 'CNAME', 'MX', 'NS', +// 'PTR', 'SOA', 'SRV', 'TXT' parameters is parameters supplied to the +// CreateOrUpdate operation. ifMatch is the etag of Recordset. ifNoneMatch is +// defines the If-None-Match condition. Set to '*' to force +// Create-If-Not-Exist. Other values will be ignored. +func (client RecordSetsClient) CreateOrUpdate(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string, ifNoneMatch string) (result RecordSet, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, zoneName, relativeRecordSetName, recordType, parameters, ifMatch, ifNoneMatch) if err != nil { - return result, autorest.NewErrorWithError(err, "dns/RecordSetsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns/RecordSetsClient", "CreateOrUpdate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "dns/RecordSetsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client RecordSetsClient) CreateOrUpdatePreparer(resourceGroupName string, zoneName string, recordType RecordType, relativeRecordSetName string, parameters RecordSetCreateOrUpdateParameters, ifMatch string, ifNoneMatch string) (*http.Request, error) { +func (client RecordSetsClient) CreateOrUpdatePreparer(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string, ifNoneMatch string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "recordType": url.QueryEscape(string(recordType)), + "recordType": autorest.Encode("path", recordType), "relativeRecordSetName": relativeRecordSetName, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "zoneName": url.QueryEscape(zoneName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}/{recordType}/{relativeRecordSetName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client RecordSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, req) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -106,7 +115,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -116,59 +125,68 @@ // Delete removes a RecordSet from a DNS zone. // // resourceGroupName is the name of the resource group. zoneName is the name -// of the zone without a terminating dot. recordType is the type of DNS -// record. Possible values for this parameter include: 'A', 'AAAA', 'CNAME', -// 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' relativeRecordSetName is the name -// of the RecordSet, relative to the name of the zone. ifMatch is defines the -// If-Match condition. The delete operation will be performed only if the -// ETag of the zone on the server matches this value. -func (client RecordSetsClient) Delete(resourceGroupName string, zoneName string, recordType RecordType, relativeRecordSetName string, ifMatch string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, zoneName, recordType, relativeRecordSetName, ifMatch) +// of the zone without a terminating dot. relativeRecordSetName is the name +// of the RecordSet, relative to the name of the zone. recordType is the type +// of DNS record. Possible values include: 'A', 'AAAA', 'CNAME', 'MX', 'NS', +// 'PTR', 'SOA', 'SRV', 'TXT' ifMatch is defines the If-Match condition. The +// delete operation will be performed only if the ETag of the zone on the +// server matches this value. ifNoneMatch is defines the If-None-Match +// condition. The delete operation will be performed only if the ETag of the +// zone on the server does not match this value. +func (client RecordSetsClient) Delete(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, ifMatch string, ifNoneMatch string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, zoneName, relativeRecordSetName, recordType, ifMatch, ifNoneMatch) if err != nil { - return result, autorest.NewErrorWithError(err, "dns/RecordSetsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "dns/RecordSetsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "dns/RecordSetsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client RecordSetsClient) DeletePreparer(resourceGroupName string, zoneName string, recordType RecordType, relativeRecordSetName string, ifMatch string) (*http.Request, error) { +func (client RecordSetsClient) DeletePreparer(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, ifMatch string, ifNoneMatch string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "recordType": url.QueryEscape(string(recordType)), + "recordType": autorest.Encode("path", recordType), "relativeRecordSetName": relativeRecordSetName, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "zoneName": url.QueryEscape(zoneName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}/{recordType}/{relativeRecordSetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + return preparer.Prepare(&http.Request{}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client RecordSetsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteResponder handles the response to the Delete request. The method always @@ -177,7 +195,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), autorest.ByClosing()) result.Response = resp return @@ -186,57 +204,56 @@ // Get gets a RecordSet. // // resourceGroupName is the name of the resource group. zoneName is the name -// of the zone without a terminating dot. recordType is the type of DNS -// record. Possible values for this parameter include: 'A', 'AAAA', 'CNAME', -// 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' relativeRecordSetName is the name -// of the RecordSet, relative to the name of the zone. -func (client RecordSetsClient) Get(resourceGroupName string, zoneName string, recordType RecordType, relativeRecordSetName string) (result RecordSet, ae error) { - req, err := client.GetPreparer(resourceGroupName, zoneName, recordType, relativeRecordSetName) +// of the zone without a terminating dot. relativeRecordSetName is the name +// of the RecordSet, relative to the name of the zone. recordType is the type +// of DNS record. Possible values include: 'A', 'AAAA', 'CNAME', 'MX', 'NS', +// 'PTR', 'SOA', 'SRV', 'TXT' +func (client RecordSetsClient) Get(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType) (result RecordSet, err error) { + req, err := client.GetPreparer(resourceGroupName, zoneName, relativeRecordSetName, recordType) if err != nil { - return result, autorest.NewErrorWithError(err, "dns/RecordSetsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns/RecordSetsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "dns/RecordSetsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. -func (client RecordSetsClient) GetPreparer(resourceGroupName string, zoneName string, recordType RecordType, relativeRecordSetName string) (*http.Request, error) { +func (client RecordSetsClient) GetPreparer(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType) (*http.Request, error) { pathParameters := map[string]interface{}{ - "recordType": url.QueryEscape(string(recordType)), + "recordType": autorest.Encode("path", recordType), "relativeRecordSetName": relativeRecordSetName, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "zoneName": url.QueryEscape(zoneName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}/{recordType}/{relativeRecordSetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RecordSetsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -245,154 +262,275 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// List lists the RecordSets of a specified type in a DNS zone. +// ListAllInResourceGroup lists all RecordSets in a DNS zone. // // resourceGroupName is the name of the resource group that contains the zone. -// zoneName is the name of the zone from which to enumerate RecordsSets. -// recordType is the type of record sets to enumerate. Possible values for -// this parameter include: 'A', 'AAAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', -// 'SRV', 'TXT' top is query parameters. If null is passed returns the -// default number of zones. filter is the filter to apply on the operation. -func (client RecordSetsClient) List(resourceGroupName string, zoneName string, recordType RecordType, top string, filter string) (result RecordSetListResult, ae error) { - req, err := client.ListPreparer(resourceGroupName, zoneName, recordType, top, filter) +// zoneName is the name of the zone from which to enumerate RecordSets. top +// is query parameters. If null is passed returns the default number of +// zones. +func (client RecordSetsClient) ListAllInResourceGroup(resourceGroupName string, zoneName string, top string) (result RecordSetListResult, err error) { + req, err := client.ListAllInResourceGroupPreparer(resourceGroupName, zoneName, top) if err != nil { - return result, autorest.NewErrorWithError(err, "dns/RecordSetsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllInResourceGroup", nil, "Failure preparing request") } - resp, err := client.ListSender(req) + resp, err := client.ListAllInResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns/RecordSetsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllInResourceGroup", resp, "Failure sending request") } - result, err = client.ListResponder(resp) + result, err = client.ListAllInResourceGroupResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "dns/RecordSetsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllInResourceGroup", resp, "Failure responding to request") } return } -// ListPreparer prepares the List request. -func (client RecordSetsClient) ListPreparer(resourceGroupName string, zoneName string, recordType RecordType, top string, filter string) (*http.Request, error) { +// ListAllInResourceGroupPreparer prepares the ListAllInResourceGroup request. +func (client RecordSetsClient) ListAllInResourceGroupPreparer(resourceGroupName string, zoneName string, top string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "recordType": url.QueryEscape(string(recordType)), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "zoneName": url.QueryEscape(zoneName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(top) > 0 { - queryParameters["$top"] = top - } - if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$top"] = autorest.Encode("query", top) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}/{recordType}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}/recordsets", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// ListSender sends the List request. The method will close the +// ListAllInResourceGroupSender sends the ListAllInResourceGroup request. The method will close the // http.Response Body if it receives an error. -func (client RecordSetsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client RecordSetsClient) ListAllInResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// ListResponder handles the response to the List request. The method always +// ListAllInResourceGroupResponder handles the response to the ListAllInResourceGroup request. The method always // closes the http.Response Body. -func (client RecordSetsClient) ListResponder(resp *http.Response) (result RecordSetListResult, err error) { +func (client RecordSetsClient) ListAllInResourceGroupResponder(resp *http.Response) (result RecordSetListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListAll lists all RecordSets in a DNS zone. +// ListAllInResourceGroupNextResults retrieves the next set of results, if any. +func (client RecordSetsClient) ListAllInResourceGroupNextResults(lastResults RecordSetListResult) (result RecordSetListResult, err error) { + req, err := lastResults.RecordSetListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllInResourceGroup", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllInResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllInResourceGroup", resp, "Failure sending next results request request") + } + + result, err = client.ListAllInResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllInResourceGroup", resp, "Failure responding to next results request request") + } + + return +} + +// ListByType lists the RecordSets of a specified type in a DNS zone. // // resourceGroupName is the name of the resource group that contains the zone. -// zoneName is the name of the zone from which to enumerate RecordSets. top +// zoneName is the name of the zone from which to enumerate RecordsSets. +// recordType is the type of record sets to enumerate. Possible values +// include: 'A', 'AAAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT' top // is query parameters. If null is passed returns the default number of -// zones. filter is the filter to apply on the operation. -func (client RecordSetsClient) ListAll(resourceGroupName string, zoneName string, top string, filter string) (result RecordSetListResult, ae error) { - req, err := client.ListAllPreparer(resourceGroupName, zoneName, top, filter) +// zones. +func (client RecordSetsClient) ListByType(resourceGroupName string, zoneName string, recordType RecordType, top string) (result RecordSetListResult, err error) { + req, err := client.ListByTypePreparer(resourceGroupName, zoneName, recordType, top) if err != nil { - return result, autorest.NewErrorWithError(err, "dns/RecordSetsClient", "ListAll", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", nil, "Failure preparing request") } - resp, err := client.ListAllSender(req) + resp, err := client.ListByTypeSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns/RecordSetsClient", "ListAll", "Failure sending request") + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure sending request") } - result, err = client.ListAllResponder(resp) + result, err = client.ListByTypeResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "dns/RecordSetsClient", "ListAll", "Failure responding to request") + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure responding to request") } return } -// ListAllPreparer prepares the ListAll request. -func (client RecordSetsClient) ListAllPreparer(resourceGroupName string, zoneName string, top string, filter string) (*http.Request, error) { +// ListByTypePreparer prepares the ListByType request. +func (client RecordSetsClient) ListByTypePreparer(resourceGroupName string, zoneName string, recordType RecordType, top string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "zoneName": url.QueryEscape(zoneName), + "recordType": autorest.Encode("path", recordType), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(top) > 0 { - queryParameters["$top"] = top + queryParameters["$top"] = autorest.Encode("query", top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}/{recordType}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByTypeSender sends the ListByType request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) ListByTypeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByTypeResponder handles the response to the ListByType request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) ListByTypeResponder(resp *http.Response) (result RecordSetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByTypeNextResults retrieves the next set of results, if any. +func (client RecordSetsClient) ListByTypeNextResults(lastResults RecordSetListResult) (result RecordSetListResult, err error) { + req, err := lastResults.RecordSetListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", nil, "Failure preparing next results request request") } - if len(filter) > 0 { - queryParameters["$filter"] = filter + if req == nil { + return } - return autorest.Prepare(&http.Request{}, + resp, err := client.ListByTypeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure sending next results request request") + } + + result, err = client.ListByTypeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure responding to next results request request") + } + + return +} + +// Update updates a RecordSet within a DNS zone. +// +// resourceGroupName is the name of the resource group. zoneName is the name +// of the zone without a terminating dot. relativeRecordSetName is the name +// of the RecordSet, relative to the name of the zone. recordType is the type +// of DNS record. Possible values include: 'A', 'AAAA', 'CNAME', 'MX', 'NS', +// 'PTR', 'SOA', 'SRV', 'TXT' parameters is parameters supplied to the Update +// operation. ifMatch is the etag of Zone. ifNoneMatch is defines the +// If-None-Match condition. Set to '*' to force Create-If-Not-Exist. Other +// values will be ignored. +func (client RecordSetsClient) Update(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string, ifNoneMatch string) (result RecordSet, err error) { + req, err := client.UpdatePreparer(resourceGroupName, zoneName, relativeRecordSetName, recordType, parameters, ifMatch, ifNoneMatch) + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client RecordSetsClient) UpdatePreparer(resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string, ifNoneMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "recordType": autorest.Encode("path", recordType), + "relativeRecordSetName": relativeRecordSetName, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( autorest.AsJSON(), - autorest.AsGet(), + autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}/recordsets"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), + autorest.WithJSON(parameters), autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + return preparer.Prepare(&http.Request{}) } -// ListAllSender sends the ListAll request. The method will close the +// UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. -func (client RecordSetsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client RecordSetsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// ListAllResponder handles the response to the ListAll request. The method always +// UpdateResponder handles the response to the Update request. The method always // closes the http.Response Body. -func (client RecordSetsClient) ListAllResponder(resp *http.Response) (result RecordSetListResult, err error) { +func (client RecordSetsClient) UpdateResponder(resp *http.Response) (result RecordSet, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/dns/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/dns/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/dns/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/dns/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -23,18 +23,18 @@ ) const ( - major = "0" - minor = "3" + major = "3" + minor = "1" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" ) // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "dns", "2015-11-01") + return fmt.Sprintf(userAgentFormat, Version(), "dns", "2016-04-01") } // Version returns the semantic version (see http://semver.org) of the client. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/dns/zones.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/dns/zones.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/dns/zones.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/dns/zones.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,17 +14,17 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// ZonesClient is the client for managing DNS zones and record. +// ZonesClient is the client for the Zones methods of the Dns service. type ZonesClient struct { ManagementClient } @@ -39,59 +39,67 @@ return ZonesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates a DNS zone within a resource group. +// CreateOrUpdate creates or Updates a DNS zone within a resource group. // // resourceGroupName is the name of the resource group. zoneName is the name // of the zone without a terminating dot. parameters is parameters supplied // to the CreateOrUpdate operation. ifMatch is the etag of Zone. ifNoneMatch // is defines the If-None-Match condition. Set to '*' to force // Create-If-Not-Exist. Other values will be ignored. -func (client ZonesClient) CreateOrUpdate(resourceGroupName string, zoneName string, parameters ZoneCreateOrUpdateParameters, ifMatch string, ifNoneMatch string) (result Zone, ae error) { +func (client ZonesClient) CreateOrUpdate(resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (result Zone, err error) { req, err := client.CreateOrUpdatePreparer(resourceGroupName, zoneName, parameters, ifMatch, ifNoneMatch) if err != nil { - return result, autorest.NewErrorWithError(err, "dns/ZonesClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns/ZonesClient", "CreateOrUpdate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "dns/ZonesClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ZonesClient) CreateOrUpdatePreparer(resourceGroupName string, zoneName string, parameters ZoneCreateOrUpdateParameters, ifMatch string, ifNoneMatch string) (*http.Request, error) { +func (client ZonesClient) CreateOrUpdatePreparer(resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "zoneName": url.QueryEscape(zoneName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ZonesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, req) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -100,64 +108,78 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// Delete removes a DNS zone from a resource group. +// Delete removes a DNS zone from a resource group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. zoneName is the name // of the zone without a terminating dot. ifMatch is defines the If-Match // condition. The delete operation will be performed only if the ETag of the -// zone on the server matches this value. -func (client ZonesClient) Delete(resourceGroupName string, zoneName string, ifMatch string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, zoneName, ifMatch) +// zone on the server matches this value. ifNoneMatch is defines the +// If-None-Match condition. The delete operation will be performed only if +// the ETag of the zone on the server does not match this value. +func (client ZonesClient) Delete(resourceGroupName string, zoneName string, ifMatch string, ifNoneMatch string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, zoneName, ifMatch, ifNoneMatch, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "dns/ZonesClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "dns/ZonesClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "dns/ZonesClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client ZonesClient) DeletePreparer(resourceGroupName string, zoneName string, ifMatch string) (*http.Request, error) { +func (client ZonesClient) DeletePreparer(resourceGroupName string, zoneName string, ifMatch string, ifNoneMatch string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "zoneName": url.QueryEscape(zoneName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ZonesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -166,7 +188,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), autorest.ByClosing()) result.Response = resp return @@ -176,21 +198,21 @@ // // resourceGroupName is the name of the resource group. zoneName is the name // of the zone without a terminating dot. -func (client ZonesClient) Get(resourceGroupName string, zoneName string) (result Zone, ae error) { +func (client ZonesClient) Get(resourceGroupName string, zoneName string) (result Zone, err error) { req, err := client.GetPreparer(resourceGroupName, zoneName) if err != nil { - return result, autorest.NewErrorWithError(err, "dns/ZonesClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns/ZonesClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "dns/ZonesClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure responding to request") } return @@ -199,28 +221,27 @@ // GetPreparer prepares the Get request. func (client ZonesClient) GetPreparer(resourceGroupName string, zoneName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "zoneName": url.QueryEscape(zoneName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones/{zoneName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ZonesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -229,147 +250,186 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListZonesInResourceGroup lists the DNS zones within a resource group. +// ListInResourceGroup lists the DNS zones within a resource group. // // resourceGroupName is the name of the resource group. top is query -// parameters. If null is passed returns the default number of zones. filter -// is the filter to apply on the operation. -func (client ZonesClient) ListZonesInResourceGroup(resourceGroupName string, top string, filter string) (result ZoneListResult, ae error) { - req, err := client.ListZonesInResourceGroupPreparer(resourceGroupName, top, filter) +// parameters. If null is passed returns the default number of zones. +func (client ZonesClient) ListInResourceGroup(resourceGroupName string, top string) (result ZoneListResult, err error) { + req, err := client.ListInResourceGroupPreparer(resourceGroupName, top) if err != nil { - return result, autorest.NewErrorWithError(err, "dns/ZonesClient", "ListZonesInResourceGroup", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInResourceGroup", nil, "Failure preparing request") } - resp, err := client.ListZonesInResourceGroupSender(req) + resp, err := client.ListInResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns/ZonesClient", "ListZonesInResourceGroup", "Failure sending request") + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInResourceGroup", resp, "Failure sending request") } - result, err = client.ListZonesInResourceGroupResponder(resp) + result, err = client.ListInResourceGroupResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "dns/ZonesClient", "ListZonesInResourceGroup", "Failure responding to request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInResourceGroup", resp, "Failure responding to request") } return } -// ListZonesInResourceGroupPreparer prepares the ListZonesInResourceGroup request. -func (client ZonesClient) ListZonesInResourceGroupPreparer(resourceGroupName string, top string, filter string) (*http.Request, error) { +// ListInResourceGroupPreparer prepares the ListInResourceGroup request. +func (client ZonesClient) ListInResourceGroupPreparer(resourceGroupName string, top string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(top) > 0 { - queryParameters["$top"] = top - } - if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$top"] = autorest.Encode("query", top) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnszones", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// ListZonesInResourceGroupSender sends the ListZonesInResourceGroup request. The method will close the +// ListInResourceGroupSender sends the ListInResourceGroup request. The method will close the // http.Response Body if it receives an error. -func (client ZonesClient) ListZonesInResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client ZonesClient) ListInResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// ListZonesInResourceGroupResponder handles the response to the ListZonesInResourceGroup request. The method always +// ListInResourceGroupResponder handles the response to the ListInResourceGroup request. The method always // closes the http.Response Body. -func (client ZonesClient) ListZonesInResourceGroupResponder(resp *http.Response) (result ZoneListResult, err error) { +func (client ZonesClient) ListInResourceGroupResponder(resp *http.Response) (result ZoneListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListZonesInSubscription lists the DNS zones within a resource group. +// ListInResourceGroupNextResults retrieves the next set of results, if any. +func (client ZonesClient) ListInResourceGroupNextResults(lastResults ZoneListResult) (result ZoneListResult, err error) { + req, err := lastResults.ZoneListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInResourceGroup", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListInResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInResourceGroup", resp, "Failure sending next results request request") + } + + result, err = client.ListInResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInResourceGroup", resp, "Failure responding to next results request request") + } + + return +} + +// ListInSubscription lists the DNS zones within a resource group. // // top is query parameters. If null is passed returns the default number of -// zones. filter is the filter to apply on the operation. -func (client ZonesClient) ListZonesInSubscription(top string, filter string) (result ZoneListResult, ae error) { - req, err := client.ListZonesInSubscriptionPreparer(top, filter) +// zones. +func (client ZonesClient) ListInSubscription(top string) (result ZoneListResult, err error) { + req, err := client.ListInSubscriptionPreparer(top) if err != nil { - return result, autorest.NewErrorWithError(err, "dns/ZonesClient", "ListZonesInSubscription", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInSubscription", nil, "Failure preparing request") } - resp, err := client.ListZonesInSubscriptionSender(req) + resp, err := client.ListInSubscriptionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "dns/ZonesClient", "ListZonesInSubscription", "Failure sending request") + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInSubscription", resp, "Failure sending request") } - result, err = client.ListZonesInSubscriptionResponder(resp) + result, err = client.ListInSubscriptionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "dns/ZonesClient", "ListZonesInSubscription", "Failure responding to request") + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInSubscription", resp, "Failure responding to request") } return } -// ListZonesInSubscriptionPreparer prepares the ListZonesInSubscription request. -func (client ZonesClient) ListZonesInSubscriptionPreparer(top string, filter string) (*http.Request, error) { +// ListInSubscriptionPreparer prepares the ListInSubscription request. +func (client ZonesClient) ListInSubscriptionPreparer(top string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(top) > 0 { - queryParameters["$top"] = top - } - if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$top"] = autorest.Encode("query", top) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/dnszones"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/dnszones", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// ListZonesInSubscriptionSender sends the ListZonesInSubscription request. The method will close the +// ListInSubscriptionSender sends the ListInSubscription request. The method will close the // http.Response Body if it receives an error. -func (client ZonesClient) ListZonesInSubscriptionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client ZonesClient) ListInSubscriptionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// ListZonesInSubscriptionResponder handles the response to the ListZonesInSubscription request. The method always +// ListInSubscriptionResponder handles the response to the ListInSubscription request. The method always // closes the http.Response Body. -func (client ZonesClient) ListZonesInSubscriptionResponder(resp *http.Response) (result ZoneListResult, err error) { +func (client ZonesClient) ListInSubscriptionResponder(resp *http.Response) (result ZoneListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } + +// ListInSubscriptionNextResults retrieves the next set of results, if any. +func (client ZonesClient) ListInSubscriptionNextResults(lastResults ZoneListResult) (result ZoneListResult, err error) { + req, err := lastResults.ZoneListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInSubscription", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListInSubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInSubscription", resp, "Failure sending next results request request") + } + + result, err = client.ListInSubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListInSubscription", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/examples/check.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/examples/check.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/examples/check.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/examples/check.go 2016-10-13 14:32:06.000000000 +0000 @@ -6,11 +6,11 @@ "net/http" "os" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" "github.com/Azure/azure-sdk-for-go/arm/examples/helpers" "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" ) func withInspection() autorest.PrepareDecorator { @@ -39,7 +39,7 @@ ac := storage.NewAccountsClient(c["subscriptionID"]) - spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.AzureResourceManagerScope) + spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.PublicCloud.ResourceManagerEndpoint) if err != nil { log.Fatalf("Error: %v", err) } @@ -50,7 +50,6 @@ ac.RequestInspector = withInspection() ac.ResponseInspector = byInspecting() - cna, err := ac.CheckNameAvailability( storage.AccountCheckNameAvailabilityParameters{ Name: to.StringPtr(name), diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/examples/create.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/examples/create.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/examples/create.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/examples/create.go 2016-10-13 14:32:06.000000000 +0000 @@ -3,33 +3,13 @@ import ( "fmt" "log" - "net/http" - "time" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" "github.com/Azure/azure-sdk-for-go/arm/examples/helpers" "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" ) -func withWatcher() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - fmt.Printf("Sending %s %s\n", r.Method, r.URL) - resp, err := s.Do(r) - fmt.Printf("...received status %s\n", resp.Status) - if autorest.ResponseRequiresPolling(resp) { - fmt.Printf("...will poll after %d seconds\n", - int(autorest.GetPollingDelay(resp, time.Duration(0))/time.Second)) - fmt.Printf("...will poll at %s\n", autorest.GetPollingLocation(resp)) - } - fmt.Println("") - return resp, err - }) - } -} - func createAccount(resourceGroup, name string) { c, err := helpers.LoadCredentials() if err != nil { @@ -38,7 +18,7 @@ ac := storage.NewAccountsClient(c["subscriptionID"]) - spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.AzureResourceManagerScope) + spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.PublicCloud.ResourceManagerEndpoint) if err != nil { log.Fatalf("Error: %v", err) } @@ -58,30 +38,18 @@ } fmt.Printf("%s is available\n\n", name) - ac.Sender = autorest.CreateSender(withWatcher()) - ac.PollingMode = autorest.PollUntilAttempts - ac.PollingAttempts = 5 - - cp := storage.AccountCreateParameters{} + cp := storage.AccountCreateParameters{Sku: &storage.Sku{Name: storage.StandardLRS, Tier: storage.Standard}} cp.Location = to.StringPtr("westus") - cp.Properties = &storage.AccountPropertiesCreateParameters{AccountType: storage.StandardLRS} - sa, err := ac.Create(resourceGroup, name, cp) + cancel := make(chan struct{}) + _, err = ac.Create(resourceGroup, name, cp, cancel) if err != nil { - if sa.Response.StatusCode != http.StatusAccepted { - fmt.Printf("Creation of %s.%s failed with err -- %v\n", resourceGroup, name, err) - return - } - fmt.Printf("Create initiated for %s.%s -- poll %s to check status\n", - resourceGroup, - name, - sa.GetPollingLocation()) + fmt.Printf("Create failed: %v\n", err) return } fmt.Printf("Successfully created %s.%s\n\n", resourceGroup, name) - ac.Sender = nil r, err := ac.Delete(resourceGroup, name) if err != nil { fmt.Printf("Delete of %s.%s failed with status %s\n...%v\n", resourceGroup, name, r.Status, err) diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/examples/helpers/helpers.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/examples/helpers/helpers.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/examples/helpers/helpers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/examples/helpers/helpers.go 2016-10-13 14:32:06.000000000 +0000 @@ -7,7 +7,7 @@ "os" "os/user" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure" ) const ( @@ -24,7 +24,11 @@ // NewServicePrincipalTokenFromCredentials creates a new ServicePrincipalToken using values of the // passed credentials map. func NewServicePrincipalTokenFromCredentials(c map[string]string, scope string) (*azure.ServicePrincipalToken, error) { - return azure.NewServicePrincipalToken(c["clientID"], c["clientSecret"], c["tenantID"], scope) + oauthConfig, err := azure.PublicCloud.OAuthConfigForTenant(c["tenantID"]) + if err != nil { + panic(err) + } + return azure.NewServicePrincipalToken(*oauthConfig, c["clientID"], c["clientSecret"], scope) } // LoadCredentials reads credentials from a ~/.azure/credentials.json file. See the accompanying diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/features/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/features/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/features/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/features/client.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,352 +0,0 @@ -package features - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -const ( - // APIVersion is the version of the Features - APIVersion = "2014-08-01-preview" - - // DefaultBaseURI is the default URI used for the service Features - DefaultBaseURI = "https://management.azure.com" -) - -// ManagementClient is the base client for Features. -type ManagementClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the ManagementClient client. -func New(subscriptionID string) ManagementClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the ManagementClient client. -func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { - return ManagementClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} - -// Get get all features under the subscription. -// -// resourceProviderNamespace is namespace of the resource provider. -// featureName is previewed feature name in the resource provider. -func (client ManagementClient) Get(resourceProviderNamespace string, featureName string) (result FeatureResult, ae error) { - req, err := client.GetPreparer(resourceProviderNamespace, featureName) - if err != nil { - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/ManagementClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client ManagementClient) GetPreparer(resourceProviderNamespace string, featureName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "featureName": url.QueryEscape(featureName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ManagementClient) GetResponder(resp *http.Response) (result FeatureResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of previewed features of a resource provider. -// -// resourceProviderNamespace is the namespace of the resource provider. -func (client ManagementClient) List(resourceProviderNamespace string) (result FeatureOperationsListResult, ae error) { - req, err := client.ListPreparer(resourceProviderNamespace) - if err != nil { - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client ManagementClient) ListPreparer(resourceProviderNamespace string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client ManagementClient) ListResponder(resp *http.Response) (result FeatureOperationsListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client ManagementClient) ListNextResults(lastResults FeatureOperationsListResult) (result FeatureOperationsListResult, ae error) { - req, err := lastResults.FeatureOperationsListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure responding to next results request request") - } - - return -} - -// ListAll gets a list of previewed features for all the providers in the -// current subscription. -func (client ManagementClient) ListAll() (result FeatureOperationsListResult, ae error) { - req, err := client.ListAllPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure preparing request") - } - - resp, err := client.ListAllSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure sending request") - } - - result, err = client.ListAllResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure responding to request") - } - - return -} - -// ListAllPreparer prepares the ListAll request. -func (client ManagementClient) ListAllPreparer() (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/features"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListAllSender sends the ListAll request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListAllResponder handles the response to the ListAll request. The method always -// closes the http.Response Body. -func (client ManagementClient) ListAllResponder(resp *http.Response) (result FeatureOperationsListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListAllNextResults retrieves the next set of results, if any. -func (client ManagementClient) ListAllNextResults(lastResults FeatureOperationsListResult) (result FeatureOperationsListResult, ae error) { - req, err := lastResults.FeatureOperationsListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListAllSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure sending next results request request") - } - - result, err = client.ListAllResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure responding to next results request request") - } - - return -} - -// Register registers for a previewed feature of a resource provider. -// -// resourceProviderNamespace is namespace of the resource provider. -// featureName is previewed feature name in the resource provider. -func (client ManagementClient) Register(resourceProviderNamespace string, featureName string) (result FeatureResult, ae error) { - req, err := client.RegisterPreparer(resourceProviderNamespace, featureName) - if err != nil { - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "Register", "Failure preparing request") - } - - resp, err := client.RegisterSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/ManagementClient", "Register", "Failure sending request") - } - - result, err = client.RegisterResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/ManagementClient", "Register", "Failure responding to request") - } - - return -} - -// RegisterPreparer prepares the Register request. -func (client ManagementClient) RegisterPreparer(resourceProviderNamespace string, featureName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "featureName": url.QueryEscape(featureName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}/register"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// RegisterSender sends the Register request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) RegisterSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// RegisterResponder handles the response to the Register request. The method always -// closes the http.Response Body. -func (client ManagementClient) RegisterResponder(resp *http.Response) (result FeatureResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/features/features.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/features/features.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/features/features.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/features/features.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,338 +0,0 @@ -package features - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// Client is the client for the Features methods of the Features service. -type Client struct { - ManagementClient -} - -// NewClient creates an instance of the Client client. -func NewClient(subscriptionID string) Client { - return NewClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewClientWithBaseURI creates an instance of the Client client. -func NewClientWithBaseURI(baseURI string, subscriptionID string) Client { - return Client{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get get all features under the subscription. -// -// resourceProviderNamespace is namespace of the resource provider. -// featureName is previewed feature name in the resource provider. -func (client Client) Get(resourceProviderNamespace string, featureName string) (result FeatureResult, ae error) { - req, err := client.GetPreparer(resourceProviderNamespace, featureName) - if err != nil { - return result, autorest.NewErrorWithError(err, "features/Client", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/Client", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/Client", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client Client) GetPreparer(resourceProviderNamespace string, featureName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "featureName": url.QueryEscape(featureName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client Client) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client Client) GetResponder(resp *http.Response) (result FeatureResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of previewed features of a resource provider. -// -// resourceProviderNamespace is the namespace of the resource provider. -func (client Client) List(resourceProviderNamespace string) (result FeatureOperationsListResult, ae error) { - req, err := client.ListPreparer(resourceProviderNamespace) - if err != nil { - return result, autorest.NewErrorWithError(err, "features/Client", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/Client", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/Client", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client Client) ListPreparer(resourceProviderNamespace string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client Client) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client Client) ListResponder(resp *http.Response) (result FeatureOperationsListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client Client) ListNextResults(lastResults FeatureOperationsListResult) (result FeatureOperationsListResult, ae error) { - req, err := lastResults.FeatureOperationsListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "features/Client", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/Client", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/Client", "List", "Failure responding to next results request request") - } - - return -} - -// ListAll gets a list of previewed features for all the providers in the -// current subscription. -func (client Client) ListAll() (result FeatureOperationsListResult, ae error) { - req, err := client.ListAllPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "features/Client", "ListAll", "Failure preparing request") - } - - resp, err := client.ListAllSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/Client", "ListAll", "Failure sending request") - } - - result, err = client.ListAllResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/Client", "ListAll", "Failure responding to request") - } - - return -} - -// ListAllPreparer prepares the ListAll request. -func (client Client) ListAllPreparer() (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/features"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListAllSender sends the ListAll request. The method will close the -// http.Response Body if it receives an error. -func (client Client) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListAllResponder handles the response to the ListAll request. The method always -// closes the http.Response Body. -func (client Client) ListAllResponder(resp *http.Response) (result FeatureOperationsListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListAllNextResults retrieves the next set of results, if any. -func (client Client) ListAllNextResults(lastResults FeatureOperationsListResult) (result FeatureOperationsListResult, ae error) { - req, err := lastResults.FeatureOperationsListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "features/Client", "ListAll", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListAllSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/Client", "ListAll", "Failure sending next results request request") - } - - result, err = client.ListAllResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/Client", "ListAll", "Failure responding to next results request request") - } - - return -} - -// Register registers for a previewed feature of a resource provider. -// -// resourceProviderNamespace is namespace of the resource provider. -// featureName is previewed feature name in the resource provider. -func (client Client) Register(resourceProviderNamespace string, featureName string) (result FeatureResult, ae error) { - req, err := client.RegisterPreparer(resourceProviderNamespace, featureName) - if err != nil { - return result, autorest.NewErrorWithError(err, "features/Client", "Register", "Failure preparing request") - } - - resp, err := client.RegisterSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "features/Client", "Register", "Failure sending request") - } - - result, err = client.RegisterResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "features/Client", "Register", "Failure responding to request") - } - - return -} - -// RegisterPreparer prepares the Register request. -func (client Client) RegisterPreparer(resourceProviderNamespace string, featureName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "featureName": url.QueryEscape(featureName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}/register"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// RegisterSender sends the Register request. The method will close the -// http.Response Body if it receives an error. -func (client Client) RegisterSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// RegisterResponder handles the response to the Register request. The method always -// closes the http.Response Body. -func (client Client) RegisterResponder(resp *http.Response) (result FeatureResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/features/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/features/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/features/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/features/models.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -package features - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" - "net/http" -) - -// DeploymentExtendedFilter is deployment filter. -type DeploymentExtendedFilter struct { - ProvisioningState *string `json:"provisioningState,omitempty"` -} - -// FeatureOperationsListResult is list of previewed features. -type FeatureOperationsListResult struct { - autorest.Response `json:"-"` - Value *[]FeatureResult `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` -} - -// FeatureOperationsListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client FeatureOperationsListResult) FeatureOperationsListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) -} - -// FeatureProperties is previewed feature information. -type FeatureProperties struct { - State *string `json:"state,omitempty"` -} - -// FeatureResult is previewed feature information. -type FeatureResult struct { - autorest.Response `json:"-"` - Name *string `json:"name,omitempty"` - Properties *FeatureProperties `json:"properties,omitempty"` - ID *string `json:"id,omitempty"` - Type *string `json:"type,omitempty"` -} - -// GenericResourceFilter is resource filter. -type GenericResourceFilter struct { - ResourceType *string `json:"resourceType,omitempty"` - Tagname *string `json:"tagname,omitempty"` - Tagvalue *string `json:"tagvalue,omitempty"` -} - -// Resource is -type Resource struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` -} - -// ResourceGroupFilter is resource group filter. -type ResourceGroupFilter struct { - TagName *string `json:"tagName,omitempty"` - TagValue *string `json:"tagValue,omitempty"` -} - -// SubResource is -type SubResource struct { - ID *string `json:"id,omitempty"` -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/features/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/features/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/features/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/features/version.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -package features - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "fmt" -) - -const ( - major = "0" - minor = "3" - patch = "0" - // Always begin a "tag" with a dash (as per http://semver.org) - tag = "-beta" - semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" -) - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "features", "2014-08-01-preview") -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return fmt.Sprintf(semVerFormat, major, minor, patch, tag) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/intune/android.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/intune/android.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/intune/android.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/intune/android.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,827 @@ +package intune + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// AndroidClient is the microsoft.Intune Resource provider Api features in the +// swagger-2.0 specification +type AndroidClient struct { + ManagementClient +} + +// NewAndroidClient creates an instance of the AndroidClient client. +func NewAndroidClient() AndroidClient { + return NewAndroidClientWithBaseURI(DefaultBaseURI) +} + +// NewAndroidClientWithBaseURI creates an instance of the AndroidClient client. +func NewAndroidClientWithBaseURI(baseURI string) AndroidClient { + return AndroidClient{NewWithBaseURI(baseURI)} +} + +// AddAppForMAMPolicy add app to an AndroidMAMPolicy. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy appName is application unique Name parameters is parameters +// supplied to the Create or update app to an android policy operation. +func (client AndroidClient) AddAppForMAMPolicy(hostName string, policyName string, appName string, parameters MAMPolicyAppIDOrGroupIDPayload) (result autorest.Response, err error) { + req, err := client.AddAppForMAMPolicyPreparer(hostName, policyName, appName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "AddAppForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.AddAppForMAMPolicySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "AddAppForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.AddAppForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "AddAppForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// AddAppForMAMPolicyPreparer prepares the AddAppForMAMPolicy request. +func (client AndroidClient) AddAppForMAMPolicyPreparer(hostName string, policyName string, appName string, parameters MAMPolicyAppIDOrGroupIDPayload) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appName": autorest.Encode("path", appName), + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/androidPolicies/{policyName}/apps/{appName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// AddAppForMAMPolicySender sends the AddAppForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client AndroidClient) AddAppForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// AddAppForMAMPolicyResponder handles the response to the AddAppForMAMPolicy request. The method always +// closes the http.Response Body. +func (client AndroidClient) AddAppForMAMPolicyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// AddGroupForMAMPolicy add group to an AndroidMAMPolicy. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy groupID is group Id parameters is parameters supplied to the +// Create or update app to an android policy operation. +func (client AndroidClient) AddGroupForMAMPolicy(hostName string, policyName string, groupID string, parameters MAMPolicyAppIDOrGroupIDPayload) (result autorest.Response, err error) { + req, err := client.AddGroupForMAMPolicyPreparer(hostName, policyName, groupID, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "AddGroupForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.AddGroupForMAMPolicySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "AddGroupForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.AddGroupForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "AddGroupForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// AddGroupForMAMPolicyPreparer prepares the AddGroupForMAMPolicy request. +func (client AndroidClient) AddGroupForMAMPolicyPreparer(hostName string, policyName string, groupID string, parameters MAMPolicyAppIDOrGroupIDPayload) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "groupId": autorest.Encode("path", groupID), + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/androidPolicies/{policyName}/groups/{groupId}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// AddGroupForMAMPolicySender sends the AddGroupForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client AndroidClient) AddGroupForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// AddGroupForMAMPolicyResponder handles the response to the AddGroupForMAMPolicy request. The method always +// closes the http.Response Body. +func (client AndroidClient) AddGroupForMAMPolicyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdateMAMPolicy creates or updates AndroidMAMPolicy. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy parameters is parameters supplied to the Create or update an +// android policy operation. +func (client AndroidClient) CreateOrUpdateMAMPolicy(hostName string, policyName string, parameters AndroidMAMPolicy) (result AndroidMAMPolicy, err error) { + req, err := client.CreateOrUpdateMAMPolicyPreparer(hostName, policyName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "CreateOrUpdateMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "CreateOrUpdateMAMPolicy", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "CreateOrUpdateMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateMAMPolicyPreparer prepares the CreateOrUpdateMAMPolicy request. +func (client AndroidClient) CreateOrUpdateMAMPolicyPreparer(hostName string, policyName string, parameters AndroidMAMPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/androidPolicies/{policyName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateMAMPolicySender sends the CreateOrUpdateMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client AndroidClient) CreateOrUpdateMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateMAMPolicyResponder handles the response to the CreateOrUpdateMAMPolicy request. The method always +// closes the http.Response Body. +func (client AndroidClient) CreateOrUpdateMAMPolicyResponder(resp *http.Response) (result AndroidMAMPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteAppForMAMPolicy delete App for Android Policy +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy appName is application unique Name +func (client AndroidClient) DeleteAppForMAMPolicy(hostName string, policyName string, appName string) (result autorest.Response, err error) { + req, err := client.DeleteAppForMAMPolicyPreparer(hostName, policyName, appName) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "DeleteAppForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.DeleteAppForMAMPolicySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "DeleteAppForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.DeleteAppForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "DeleteAppForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// DeleteAppForMAMPolicyPreparer prepares the DeleteAppForMAMPolicy request. +func (client AndroidClient) DeleteAppForMAMPolicyPreparer(hostName string, policyName string, appName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appName": autorest.Encode("path", appName), + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/androidPolicies/{policyName}/apps/{appName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAppForMAMPolicySender sends the DeleteAppForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client AndroidClient) DeleteAppForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAppForMAMPolicyResponder handles the response to the DeleteAppForMAMPolicy request. The method always +// closes the http.Response Body. +func (client AndroidClient) DeleteAppForMAMPolicyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteGroupForMAMPolicy delete Group for Android Policy +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy groupID is application unique Name +func (client AndroidClient) DeleteGroupForMAMPolicy(hostName string, policyName string, groupID string) (result autorest.Response, err error) { + req, err := client.DeleteGroupForMAMPolicyPreparer(hostName, policyName, groupID) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "DeleteGroupForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.DeleteGroupForMAMPolicySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "DeleteGroupForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.DeleteGroupForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "DeleteGroupForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// DeleteGroupForMAMPolicyPreparer prepares the DeleteGroupForMAMPolicy request. +func (client AndroidClient) DeleteGroupForMAMPolicyPreparer(hostName string, policyName string, groupID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "groupId": autorest.Encode("path", groupID), + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/androidPolicies/{policyName}/groups/{groupId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteGroupForMAMPolicySender sends the DeleteGroupForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client AndroidClient) DeleteGroupForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteGroupForMAMPolicyResponder handles the response to the DeleteGroupForMAMPolicy request. The method always +// closes the http.Response Body. +func (client AndroidClient) DeleteGroupForMAMPolicyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteMAMPolicy delete Android Policy +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy +func (client AndroidClient) DeleteMAMPolicy(hostName string, policyName string) (result autorest.Response, err error) { + req, err := client.DeleteMAMPolicyPreparer(hostName, policyName) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "DeleteMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.DeleteMAMPolicySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "DeleteMAMPolicy", resp, "Failure sending request") + } + + result, err = client.DeleteMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "DeleteMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// DeleteMAMPolicyPreparer prepares the DeleteMAMPolicy request. +func (client AndroidClient) DeleteMAMPolicyPreparer(hostName string, policyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/androidPolicies/{policyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteMAMPolicySender sends the DeleteMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client AndroidClient) DeleteMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteMAMPolicyResponder handles the response to the DeleteMAMPolicy request. The method always +// closes the http.Response Body. +func (client AndroidClient) DeleteMAMPolicyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetAppForMAMPolicy get apps for an AndroidMAMPolicy. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy filter is the filter to apply on the operation. selectParameter +// is select specific fields in entity. +func (client AndroidClient) GetAppForMAMPolicy(hostName string, policyName string, filter string, top *int32, selectParameter string) (result ApplicationCollection, err error) { + req, err := client.GetAppForMAMPolicyPreparer(hostName, policyName, filter, top, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetAppForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.GetAppForMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetAppForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.GetAppForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "GetAppForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// GetAppForMAMPolicyPreparer prepares the GetAppForMAMPolicy request. +func (client AndroidClient) GetAppForMAMPolicyPreparer(hostName string, policyName string, filter string, top *int32, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/AndroidPolicies/{policyName}/apps", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAppForMAMPolicySender sends the GetAppForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client AndroidClient) GetAppForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAppForMAMPolicyResponder handles the response to the GetAppForMAMPolicy request. The method always +// closes the http.Response Body. +func (client AndroidClient) GetAppForMAMPolicyResponder(resp *http.Response) (result ApplicationCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAppForMAMPolicyNextResults retrieves the next set of results, if any. +func (client AndroidClient) GetAppForMAMPolicyNextResults(lastResults ApplicationCollection) (result ApplicationCollection, err error) { + req, err := lastResults.ApplicationCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetAppForMAMPolicy", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetAppForMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetAppForMAMPolicy", resp, "Failure sending next results request request") + } + + result, err = client.GetAppForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "GetAppForMAMPolicy", resp, "Failure responding to next results request request") + } + + return +} + +// GetGroupsForMAMPolicy returns groups for a given AndroidMAMPolicy. +// +// hostName is location hostName for the tenant policyName is policy name for +// the tenant +func (client AndroidClient) GetGroupsForMAMPolicy(hostName string, policyName string) (result GroupsCollection, err error) { + req, err := client.GetGroupsForMAMPolicyPreparer(hostName, policyName) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetGroupsForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.GetGroupsForMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetGroupsForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.GetGroupsForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "GetGroupsForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// GetGroupsForMAMPolicyPreparer prepares the GetGroupsForMAMPolicy request. +func (client AndroidClient) GetGroupsForMAMPolicyPreparer(hostName string, policyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/androidPolicies/{policyName}/groups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetGroupsForMAMPolicySender sends the GetGroupsForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client AndroidClient) GetGroupsForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetGroupsForMAMPolicyResponder handles the response to the GetGroupsForMAMPolicy request. The method always +// closes the http.Response Body. +func (client AndroidClient) GetGroupsForMAMPolicyResponder(resp *http.Response) (result GroupsCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetGroupsForMAMPolicyNextResults retrieves the next set of results, if any. +func (client AndroidClient) GetGroupsForMAMPolicyNextResults(lastResults GroupsCollection) (result GroupsCollection, err error) { + req, err := lastResults.GroupsCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetGroupsForMAMPolicy", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetGroupsForMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetGroupsForMAMPolicy", resp, "Failure sending next results request request") + } + + result, err = client.GetGroupsForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "GetGroupsForMAMPolicy", resp, "Failure responding to next results request request") + } + + return +} + +// GetMAMPolicies returns Intune Android policies. +// +// hostName is location hostName for the tenant filter is the filter to apply +// on the operation. selectParameter is select specific fields in entity. +func (client AndroidClient) GetMAMPolicies(hostName string, filter string, top *int32, selectParameter string) (result AndroidMAMPolicyCollection, err error) { + req, err := client.GetMAMPoliciesPreparer(hostName, filter, top, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetMAMPolicies", nil, "Failure preparing request") + } + + resp, err := client.GetMAMPoliciesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetMAMPolicies", resp, "Failure sending request") + } + + result, err = client.GetMAMPoliciesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "GetMAMPolicies", resp, "Failure responding to request") + } + + return +} + +// GetMAMPoliciesPreparer prepares the GetMAMPolicies request. +func (client AndroidClient) GetMAMPoliciesPreparer(hostName string, filter string, top *int32, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/androidPolicies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetMAMPoliciesSender sends the GetMAMPolicies request. The method will close the +// http.Response Body if it receives an error. +func (client AndroidClient) GetMAMPoliciesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetMAMPoliciesResponder handles the response to the GetMAMPolicies request. The method always +// closes the http.Response Body. +func (client AndroidClient) GetMAMPoliciesResponder(resp *http.Response) (result AndroidMAMPolicyCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetMAMPoliciesNextResults retrieves the next set of results, if any. +func (client AndroidClient) GetMAMPoliciesNextResults(lastResults AndroidMAMPolicyCollection) (result AndroidMAMPolicyCollection, err error) { + req, err := lastResults.AndroidMAMPolicyCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetMAMPolicies", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetMAMPoliciesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetMAMPolicies", resp, "Failure sending next results request request") + } + + result, err = client.GetMAMPoliciesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "GetMAMPolicies", resp, "Failure responding to next results request request") + } + + return +} + +// GetMAMPolicyByName returns AndroidMAMPolicy with given name. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy selectParameter is select specific fields in entity. +func (client AndroidClient) GetMAMPolicyByName(hostName string, policyName string, selectParameter string) (result AndroidMAMPolicy, err error) { + req, err := client.GetMAMPolicyByNamePreparer(hostName, policyName, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetMAMPolicyByName", nil, "Failure preparing request") + } + + resp, err := client.GetMAMPolicyByNameSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "GetMAMPolicyByName", resp, "Failure sending request") + } + + result, err = client.GetMAMPolicyByNameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "GetMAMPolicyByName", resp, "Failure responding to request") + } + + return +} + +// GetMAMPolicyByNamePreparer prepares the GetMAMPolicyByName request. +func (client AndroidClient) GetMAMPolicyByNamePreparer(hostName string, policyName string, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/androidPolicies/{policyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetMAMPolicyByNameSender sends the GetMAMPolicyByName request. The method will close the +// http.Response Body if it receives an error. +func (client AndroidClient) GetMAMPolicyByNameSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetMAMPolicyByNameResponder handles the response to the GetMAMPolicyByName request. The method always +// closes the http.Response Body. +func (client AndroidClient) GetMAMPolicyByNameResponder(resp *http.Response) (result AndroidMAMPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// PatchMAMPolicy patch AndroidMAMPolicy. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy parameters is parameters supplied to the Create or update an +// android policy operation. +func (client AndroidClient) PatchMAMPolicy(hostName string, policyName string, parameters AndroidMAMPolicy) (result AndroidMAMPolicy, err error) { + req, err := client.PatchMAMPolicyPreparer(hostName, policyName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "PatchMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.PatchMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.AndroidClient", "PatchMAMPolicy", resp, "Failure sending request") + } + + result, err = client.PatchMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.AndroidClient", "PatchMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// PatchMAMPolicyPreparer prepares the PatchMAMPolicy request. +func (client AndroidClient) PatchMAMPolicyPreparer(hostName string, policyName string, parameters AndroidMAMPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/androidPolicies/{policyName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// PatchMAMPolicySender sends the PatchMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client AndroidClient) PatchMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// PatchMAMPolicyResponder handles the response to the PatchMAMPolicy request. The method always +// closes the http.Response Body. +func (client AndroidClient) PatchMAMPolicyResponder(resp *http.Response) (result AndroidMAMPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/intune/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/intune/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/intune/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/intune/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,945 @@ +// Package intune implements the Azure ARM Intune service API version +// 2015-01-14-preview. +// +// Microsoft.Intune Resource provider Api features in the swagger-2.0 +// specification +package intune + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +const ( + // APIVersion is the version of the Intune + APIVersion = "2015-01-14-preview" + + // DefaultBaseURI is the default URI used for the service Intune + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Intune. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string +} + +// New creates an instance of the ManagementClient client. +func New() ManagementClient { + return NewWithBaseURI(DefaultBaseURI) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + } +} + +// GetApps returns Intune Manageable apps. +// +// hostName is location hostName for the tenant filter is the filter to apply +// on the operation. selectParameter is select specific fields in entity. +func (client ManagementClient) GetApps(hostName string, filter string, top *int32, selectParameter string) (result ApplicationCollection, err error) { + req, err := client.GetAppsPreparer(hostName, filter, top, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetApps", nil, "Failure preparing request") + } + + resp, err := client.GetAppsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetApps", resp, "Failure sending request") + } + + result, err = client.GetAppsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetApps", resp, "Failure responding to request") + } + + return +} + +// GetAppsPreparer prepares the GetApps request. +func (client ManagementClient) GetAppsPreparer(hostName string, filter string, top *int32, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/apps", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAppsSender sends the GetApps request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetAppsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAppsResponder handles the response to the GetApps request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetAppsResponder(resp *http.Response) (result ApplicationCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAppsNextResults retrieves the next set of results, if any. +func (client ManagementClient) GetAppsNextResults(lastResults ApplicationCollection) (result ApplicationCollection, err error) { + req, err := lastResults.ApplicationCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetApps", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetAppsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetApps", resp, "Failure sending next results request request") + } + + result, err = client.GetAppsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetApps", resp, "Failure responding to next results request request") + } + + return +} + +// GetLocationByHostName returns location for given tenant. +func (client ManagementClient) GetLocationByHostName() (result Location, err error) { + req, err := client.GetLocationByHostNamePreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetLocationByHostName", nil, "Failure preparing request") + } + + resp, err := client.GetLocationByHostNameSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetLocationByHostName", resp, "Failure sending request") + } + + result, err = client.GetLocationByHostNameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetLocationByHostName", resp, "Failure responding to request") + } + + return +} + +// GetLocationByHostNamePreparer prepares the GetLocationByHostName request. +func (client ManagementClient) GetLocationByHostNamePreparer() (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Intune/locations/hostName"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetLocationByHostNameSender sends the GetLocationByHostName request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetLocationByHostNameSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetLocationByHostNameResponder handles the response to the GetLocationByHostName request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetLocationByHostNameResponder(resp *http.Response) (result Location, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetLocations returns location for user tenant. +func (client ManagementClient) GetLocations() (result LocationCollection, err error) { + req, err := client.GetLocationsPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetLocations", nil, "Failure preparing request") + } + + resp, err := client.GetLocationsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetLocations", resp, "Failure sending request") + } + + result, err = client.GetLocationsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetLocations", resp, "Failure responding to request") + } + + return +} + +// GetLocationsPreparer prepares the GetLocations request. +func (client ManagementClient) GetLocationsPreparer() (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Intune/locations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetLocationsSender sends the GetLocations request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetLocationsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetLocationsResponder handles the response to the GetLocations request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetLocationsResponder(resp *http.Response) (result LocationCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetLocationsNextResults retrieves the next set of results, if any. +func (client ManagementClient) GetLocationsNextResults(lastResults LocationCollection) (result LocationCollection, err error) { + req, err := lastResults.LocationCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetLocations", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetLocationsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetLocations", resp, "Failure sending next results request request") + } + + result, err = client.GetLocationsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetLocations", resp, "Failure responding to next results request request") + } + + return +} + +// GetMAMFlaggedUserByName returns Intune flagged user details +// +// hostName is location hostName for the tenant userName is flagged userName +// selectParameter is select specific fields in entity. +func (client ManagementClient) GetMAMFlaggedUserByName(hostName string, userName string, selectParameter string) (result FlaggedUser, err error) { + req, err := client.GetMAMFlaggedUserByNamePreparer(hostName, userName, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMFlaggedUserByName", nil, "Failure preparing request") + } + + resp, err := client.GetMAMFlaggedUserByNameSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMFlaggedUserByName", resp, "Failure sending request") + } + + result, err = client.GetMAMFlaggedUserByNameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMFlaggedUserByName", resp, "Failure responding to request") + } + + return +} + +// GetMAMFlaggedUserByNamePreparer prepares the GetMAMFlaggedUserByName request. +func (client ManagementClient) GetMAMFlaggedUserByNamePreparer(hostName string, userName string, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "userName": autorest.Encode("path", userName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/flaggedUsers/{userName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetMAMFlaggedUserByNameSender sends the GetMAMFlaggedUserByName request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetMAMFlaggedUserByNameSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetMAMFlaggedUserByNameResponder handles the response to the GetMAMFlaggedUserByName request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetMAMFlaggedUserByNameResponder(resp *http.Response) (result FlaggedUser, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetMAMFlaggedUsers returns Intune flagged user collection +// +// hostName is location hostName for the tenant filter is the filter to apply +// on the operation. selectParameter is select specific fields in entity. +func (client ManagementClient) GetMAMFlaggedUsers(hostName string, filter string, top *int32, selectParameter string) (result FlaggedUserCollection, err error) { + req, err := client.GetMAMFlaggedUsersPreparer(hostName, filter, top, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMFlaggedUsers", nil, "Failure preparing request") + } + + resp, err := client.GetMAMFlaggedUsersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMFlaggedUsers", resp, "Failure sending request") + } + + result, err = client.GetMAMFlaggedUsersResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMFlaggedUsers", resp, "Failure responding to request") + } + + return +} + +// GetMAMFlaggedUsersPreparer prepares the GetMAMFlaggedUsers request. +func (client ManagementClient) GetMAMFlaggedUsersPreparer(hostName string, filter string, top *int32, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/flaggedUsers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetMAMFlaggedUsersSender sends the GetMAMFlaggedUsers request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetMAMFlaggedUsersSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetMAMFlaggedUsersResponder handles the response to the GetMAMFlaggedUsers request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetMAMFlaggedUsersResponder(resp *http.Response) (result FlaggedUserCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetMAMFlaggedUsersNextResults retrieves the next set of results, if any. +func (client ManagementClient) GetMAMFlaggedUsersNextResults(lastResults FlaggedUserCollection) (result FlaggedUserCollection, err error) { + req, err := lastResults.FlaggedUserCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMFlaggedUsers", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetMAMFlaggedUsersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMFlaggedUsers", resp, "Failure sending next results request request") + } + + result, err = client.GetMAMFlaggedUsersResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMFlaggedUsers", resp, "Failure responding to next results request request") + } + + return +} + +// GetMAMStatuses returns Intune Tenant level statuses. +// +// hostName is location hostName for the tenant +func (client ManagementClient) GetMAMStatuses(hostName string) (result StatusesDefault, err error) { + req, err := client.GetMAMStatusesPreparer(hostName) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMStatuses", nil, "Failure preparing request") + } + + resp, err := client.GetMAMStatusesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMStatuses", resp, "Failure sending request") + } + + result, err = client.GetMAMStatusesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMStatuses", resp, "Failure responding to request") + } + + return +} + +// GetMAMStatusesPreparer prepares the GetMAMStatuses request. +func (client ManagementClient) GetMAMStatusesPreparer(hostName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/statuses/default", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetMAMStatusesSender sends the GetMAMStatuses request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetMAMStatusesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetMAMStatusesResponder handles the response to the GetMAMStatuses request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetMAMStatusesResponder(resp *http.Response) (result StatusesDefault, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetMAMStatusesNextResults retrieves the next set of results, if any. +func (client ManagementClient) GetMAMStatusesNextResults(lastResults StatusesDefault) (result StatusesDefault, err error) { + req, err := lastResults.StatusesDefaultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMStatuses", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetMAMStatusesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMStatuses", resp, "Failure sending next results request request") + } + + result, err = client.GetMAMStatusesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMStatuses", resp, "Failure responding to next results request request") + } + + return +} + +// GetMAMUserDeviceByDeviceName get a unique device for a user. +// +// hostName is location hostName for the tenant userName is unique user name +// deviceName is device name selectParameter is select specific fields in +// entity. +func (client ManagementClient) GetMAMUserDeviceByDeviceName(hostName string, userName string, deviceName string, selectParameter string) (result Device, err error) { + req, err := client.GetMAMUserDeviceByDeviceNamePreparer(hostName, userName, deviceName, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserDeviceByDeviceName", nil, "Failure preparing request") + } + + resp, err := client.GetMAMUserDeviceByDeviceNameSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserDeviceByDeviceName", resp, "Failure sending request") + } + + result, err = client.GetMAMUserDeviceByDeviceNameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserDeviceByDeviceName", resp, "Failure responding to request") + } + + return +} + +// GetMAMUserDeviceByDeviceNamePreparer prepares the GetMAMUserDeviceByDeviceName request. +func (client ManagementClient) GetMAMUserDeviceByDeviceNamePreparer(hostName string, userName string, deviceName string, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deviceName": autorest.Encode("path", deviceName), + "hostName": autorest.Encode("path", hostName), + "userName": autorest.Encode("path", userName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/users/{userName}/devices/{deviceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetMAMUserDeviceByDeviceNameSender sends the GetMAMUserDeviceByDeviceName request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetMAMUserDeviceByDeviceNameSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetMAMUserDeviceByDeviceNameResponder handles the response to the GetMAMUserDeviceByDeviceName request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetMAMUserDeviceByDeviceNameResponder(resp *http.Response) (result Device, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetMAMUserDevices get devices for a user. +// +// hostName is location hostName for the tenant userName is user unique Name +// filter is the filter to apply on the operation. selectParameter is select +// specific fields in entity. +func (client ManagementClient) GetMAMUserDevices(hostName string, userName string, filter string, top *int32, selectParameter string) (result DeviceCollection, err error) { + req, err := client.GetMAMUserDevicesPreparer(hostName, userName, filter, top, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserDevices", nil, "Failure preparing request") + } + + resp, err := client.GetMAMUserDevicesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserDevices", resp, "Failure sending request") + } + + result, err = client.GetMAMUserDevicesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserDevices", resp, "Failure responding to request") + } + + return +} + +// GetMAMUserDevicesPreparer prepares the GetMAMUserDevices request. +func (client ManagementClient) GetMAMUserDevicesPreparer(hostName string, userName string, filter string, top *int32, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "userName": autorest.Encode("path", userName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/users/{userName}/devices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetMAMUserDevicesSender sends the GetMAMUserDevices request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetMAMUserDevicesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetMAMUserDevicesResponder handles the response to the GetMAMUserDevices request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetMAMUserDevicesResponder(resp *http.Response) (result DeviceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetMAMUserDevicesNextResults retrieves the next set of results, if any. +func (client ManagementClient) GetMAMUserDevicesNextResults(lastResults DeviceCollection) (result DeviceCollection, err error) { + req, err := lastResults.DeviceCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserDevices", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetMAMUserDevicesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserDevices", resp, "Failure sending next results request request") + } + + result, err = client.GetMAMUserDevicesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserDevices", resp, "Failure responding to next results request request") + } + + return +} + +// GetMAMUserFlaggedEnrolledApps returns Intune flagged enrolled app +// collection for the User +// +// hostName is location hostName for the tenant userName is user name for the +// tenant filter is the filter to apply on the operation. selectParameter is +// select specific fields in entity. +func (client ManagementClient) GetMAMUserFlaggedEnrolledApps(hostName string, userName string, filter string, top *int32, selectParameter string) (result FlaggedEnrolledAppCollection, err error) { + req, err := client.GetMAMUserFlaggedEnrolledAppsPreparer(hostName, userName, filter, top, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserFlaggedEnrolledApps", nil, "Failure preparing request") + } + + resp, err := client.GetMAMUserFlaggedEnrolledAppsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserFlaggedEnrolledApps", resp, "Failure sending request") + } + + result, err = client.GetMAMUserFlaggedEnrolledAppsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserFlaggedEnrolledApps", resp, "Failure responding to request") + } + + return +} + +// GetMAMUserFlaggedEnrolledAppsPreparer prepares the GetMAMUserFlaggedEnrolledApps request. +func (client ManagementClient) GetMAMUserFlaggedEnrolledAppsPreparer(hostName string, userName string, filter string, top *int32, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "userName": autorest.Encode("path", userName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/flaggedUsers/{userName}/flaggedEnrolledApps", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetMAMUserFlaggedEnrolledAppsSender sends the GetMAMUserFlaggedEnrolledApps request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetMAMUserFlaggedEnrolledAppsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetMAMUserFlaggedEnrolledAppsResponder handles the response to the GetMAMUserFlaggedEnrolledApps request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetMAMUserFlaggedEnrolledAppsResponder(resp *http.Response) (result FlaggedEnrolledAppCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetMAMUserFlaggedEnrolledAppsNextResults retrieves the next set of results, if any. +func (client ManagementClient) GetMAMUserFlaggedEnrolledAppsNextResults(lastResults FlaggedEnrolledAppCollection) (result FlaggedEnrolledAppCollection, err error) { + req, err := lastResults.FlaggedEnrolledAppCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserFlaggedEnrolledApps", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetMAMUserFlaggedEnrolledAppsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserFlaggedEnrolledApps", resp, "Failure sending next results request request") + } + + result, err = client.GetMAMUserFlaggedEnrolledAppsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetMAMUserFlaggedEnrolledApps", resp, "Failure responding to next results request request") + } + + return +} + +// GetOperationResults returns operationResults. +// +// hostName is location hostName for the tenant filter is the filter to apply +// on the operation. selectParameter is select specific fields in entity. +func (client ManagementClient) GetOperationResults(hostName string, filter string, top *int32, selectParameter string) (result OperationResultCollection, err error) { + req, err := client.GetOperationResultsPreparer(hostName, filter, top, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetOperationResults", nil, "Failure preparing request") + } + + resp, err := client.GetOperationResultsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetOperationResults", resp, "Failure sending request") + } + + result, err = client.GetOperationResultsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetOperationResults", resp, "Failure responding to request") + } + + return +} + +// GetOperationResultsPreparer prepares the GetOperationResults request. +func (client ManagementClient) GetOperationResultsPreparer(hostName string, filter string, top *int32, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/operationResults", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetOperationResultsSender sends the GetOperationResults request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetOperationResultsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetOperationResultsResponder handles the response to the GetOperationResults request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetOperationResultsResponder(resp *http.Response) (result OperationResultCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetOperationResultsNextResults retrieves the next set of results, if any. +func (client ManagementClient) GetOperationResultsNextResults(lastResults OperationResultCollection) (result OperationResultCollection, err error) { + req, err := lastResults.OperationResultCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetOperationResults", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetOperationResultsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "GetOperationResults", resp, "Failure sending next results request request") + } + + result, err = client.GetOperationResultsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "GetOperationResults", resp, "Failure responding to next results request request") + } + + return +} + +// WipeMAMUserDevice wipe a device for a user. +// +// hostName is location hostName for the tenant userName is unique user name +// deviceName is device name +func (client ManagementClient) WipeMAMUserDevice(hostName string, userName string, deviceName string) (result WipeDeviceOperationResult, err error) { + req, err := client.WipeMAMUserDevicePreparer(hostName, userName, deviceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "WipeMAMUserDevice", nil, "Failure preparing request") + } + + resp, err := client.WipeMAMUserDeviceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.ManagementClient", "WipeMAMUserDevice", resp, "Failure sending request") + } + + result, err = client.WipeMAMUserDeviceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.ManagementClient", "WipeMAMUserDevice", resp, "Failure responding to request") + } + + return +} + +// WipeMAMUserDevicePreparer prepares the WipeMAMUserDevice request. +func (client ManagementClient) WipeMAMUserDevicePreparer(hostName string, userName string, deviceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deviceName": autorest.Encode("path", deviceName), + "hostName": autorest.Encode("path", hostName), + "userName": autorest.Encode("path", userName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/users/{userName}/devices/{deviceName}/wipe", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// WipeMAMUserDeviceSender sends the WipeMAMUserDevice request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) WipeMAMUserDeviceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// WipeMAMUserDeviceResponder handles the response to the WipeMAMUserDevice request. The method always +// closes the http.Response Body. +func (client ManagementClient) WipeMAMUserDeviceResponder(resp *http.Response) (result WipeDeviceOperationResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/intune/ios.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/intune/ios.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/intune/ios.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/intune/ios.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,827 @@ +package intune + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// IosClient is the microsoft.Intune Resource provider Api features in the +// swagger-2.0 specification +type IosClient struct { + ManagementClient +} + +// NewIosClient creates an instance of the IosClient client. +func NewIosClient() IosClient { + return NewIosClientWithBaseURI(DefaultBaseURI) +} + +// NewIosClientWithBaseURI creates an instance of the IosClient client. +func NewIosClientWithBaseURI(baseURI string) IosClient { + return IosClient{NewWithBaseURI(baseURI)} +} + +// AddAppForMAMPolicy add app to an iOSMAMPolicy. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy appName is application unique Name parameters is parameters +// supplied to add an app to an ios policy. +func (client IosClient) AddAppForMAMPolicy(hostName string, policyName string, appName string, parameters MAMPolicyAppIDOrGroupIDPayload) (result autorest.Response, err error) { + req, err := client.AddAppForMAMPolicyPreparer(hostName, policyName, appName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "AddAppForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.AddAppForMAMPolicySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "intune.IosClient", "AddAppForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.AddAppForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "AddAppForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// AddAppForMAMPolicyPreparer prepares the AddAppForMAMPolicy request. +func (client IosClient) AddAppForMAMPolicyPreparer(hostName string, policyName string, appName string, parameters MAMPolicyAppIDOrGroupIDPayload) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appName": autorest.Encode("path", appName), + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/iosPolicies/{policyName}/apps/{appName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// AddAppForMAMPolicySender sends the AddAppForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client IosClient) AddAppForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// AddAppForMAMPolicyResponder handles the response to the AddAppForMAMPolicy request. The method always +// closes the http.Response Body. +func (client IosClient) AddAppForMAMPolicyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// AddGroupForMAMPolicy add group to an iOSMAMPolicy. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy groupID is group Id parameters is parameters supplied to the +// Create or update app to an android policy operation. +func (client IosClient) AddGroupForMAMPolicy(hostName string, policyName string, groupID string, parameters MAMPolicyAppIDOrGroupIDPayload) (result autorest.Response, err error) { + req, err := client.AddGroupForMAMPolicyPreparer(hostName, policyName, groupID, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "AddGroupForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.AddGroupForMAMPolicySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "intune.IosClient", "AddGroupForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.AddGroupForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "AddGroupForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// AddGroupForMAMPolicyPreparer prepares the AddGroupForMAMPolicy request. +func (client IosClient) AddGroupForMAMPolicyPreparer(hostName string, policyName string, groupID string, parameters MAMPolicyAppIDOrGroupIDPayload) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "groupId": autorest.Encode("path", groupID), + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/iosPolicies/{policyName}/groups/{groupId}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// AddGroupForMAMPolicySender sends the AddGroupForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client IosClient) AddGroupForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// AddGroupForMAMPolicyResponder handles the response to the AddGroupForMAMPolicy request. The method always +// closes the http.Response Body. +func (client IosClient) AddGroupForMAMPolicyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdateMAMPolicy creates or updates iOSMAMPolicy. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy parameters is parameters supplied to the Create or update an +// android policy operation. +func (client IosClient) CreateOrUpdateMAMPolicy(hostName string, policyName string, parameters IOSMAMPolicy) (result IOSMAMPolicy, err error) { + req, err := client.CreateOrUpdateMAMPolicyPreparer(hostName, policyName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "CreateOrUpdateMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.IosClient", "CreateOrUpdateMAMPolicy", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "CreateOrUpdateMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateMAMPolicyPreparer prepares the CreateOrUpdateMAMPolicy request. +func (client IosClient) CreateOrUpdateMAMPolicyPreparer(hostName string, policyName string, parameters IOSMAMPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/iosPolicies/{policyName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateMAMPolicySender sends the CreateOrUpdateMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client IosClient) CreateOrUpdateMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateMAMPolicyResponder handles the response to the CreateOrUpdateMAMPolicy request. The method always +// closes the http.Response Body. +func (client IosClient) CreateOrUpdateMAMPolicyResponder(resp *http.Response) (result IOSMAMPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteAppForMAMPolicy delete App for Ios Policy +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy appName is application unique Name +func (client IosClient) DeleteAppForMAMPolicy(hostName string, policyName string, appName string) (result autorest.Response, err error) { + req, err := client.DeleteAppForMAMPolicyPreparer(hostName, policyName, appName) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "DeleteAppForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.DeleteAppForMAMPolicySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "intune.IosClient", "DeleteAppForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.DeleteAppForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "DeleteAppForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// DeleteAppForMAMPolicyPreparer prepares the DeleteAppForMAMPolicy request. +func (client IosClient) DeleteAppForMAMPolicyPreparer(hostName string, policyName string, appName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appName": autorest.Encode("path", appName), + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/iosPolicies/{policyName}/apps/{appName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAppForMAMPolicySender sends the DeleteAppForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client IosClient) DeleteAppForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAppForMAMPolicyResponder handles the response to the DeleteAppForMAMPolicy request. The method always +// closes the http.Response Body. +func (client IosClient) DeleteAppForMAMPolicyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteGroupForMAMPolicy delete Group for iOS Policy +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy groupID is application unique Name +func (client IosClient) DeleteGroupForMAMPolicy(hostName string, policyName string, groupID string) (result autorest.Response, err error) { + req, err := client.DeleteGroupForMAMPolicyPreparer(hostName, policyName, groupID) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "DeleteGroupForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.DeleteGroupForMAMPolicySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "intune.IosClient", "DeleteGroupForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.DeleteGroupForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "DeleteGroupForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// DeleteGroupForMAMPolicyPreparer prepares the DeleteGroupForMAMPolicy request. +func (client IosClient) DeleteGroupForMAMPolicyPreparer(hostName string, policyName string, groupID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "groupId": autorest.Encode("path", groupID), + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/iosPolicies/{policyName}/groups/{groupId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteGroupForMAMPolicySender sends the DeleteGroupForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client IosClient) DeleteGroupForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteGroupForMAMPolicyResponder handles the response to the DeleteGroupForMAMPolicy request. The method always +// closes the http.Response Body. +func (client IosClient) DeleteGroupForMAMPolicyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteMAMPolicy delete Ios Policy +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy +func (client IosClient) DeleteMAMPolicy(hostName string, policyName string) (result autorest.Response, err error) { + req, err := client.DeleteMAMPolicyPreparer(hostName, policyName) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "DeleteMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.DeleteMAMPolicySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "intune.IosClient", "DeleteMAMPolicy", resp, "Failure sending request") + } + + result, err = client.DeleteMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "DeleteMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// DeleteMAMPolicyPreparer prepares the DeleteMAMPolicy request. +func (client IosClient) DeleteMAMPolicyPreparer(hostName string, policyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/iosPolicies/{policyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteMAMPolicySender sends the DeleteMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client IosClient) DeleteMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteMAMPolicyResponder handles the response to the DeleteMAMPolicy request. The method always +// closes the http.Response Body. +func (client IosClient) DeleteMAMPolicyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetAppForMAMPolicy get apps for an iOSMAMPolicy. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy filter is the filter to apply on the operation. selectParameter +// is select specific fields in entity. +func (client IosClient) GetAppForMAMPolicy(hostName string, policyName string, filter string, top *int32, selectParameter string) (result ApplicationCollection, err error) { + req, err := client.GetAppForMAMPolicyPreparer(hostName, policyName, filter, top, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetAppForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.GetAppForMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetAppForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.GetAppForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "GetAppForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// GetAppForMAMPolicyPreparer prepares the GetAppForMAMPolicy request. +func (client IosClient) GetAppForMAMPolicyPreparer(hostName string, policyName string, filter string, top *int32, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/iosPolicies/{policyName}/apps", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAppForMAMPolicySender sends the GetAppForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client IosClient) GetAppForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAppForMAMPolicyResponder handles the response to the GetAppForMAMPolicy request. The method always +// closes the http.Response Body. +func (client IosClient) GetAppForMAMPolicyResponder(resp *http.Response) (result ApplicationCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAppForMAMPolicyNextResults retrieves the next set of results, if any. +func (client IosClient) GetAppForMAMPolicyNextResults(lastResults ApplicationCollection) (result ApplicationCollection, err error) { + req, err := lastResults.ApplicationCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetAppForMAMPolicy", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetAppForMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetAppForMAMPolicy", resp, "Failure sending next results request request") + } + + result, err = client.GetAppForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "GetAppForMAMPolicy", resp, "Failure responding to next results request request") + } + + return +} + +// GetGroupsForMAMPolicy returns groups for a given iOSMAMPolicy. +// +// hostName is location hostName for the tenant policyName is policy name for +// the tenant +func (client IosClient) GetGroupsForMAMPolicy(hostName string, policyName string) (result GroupsCollection, err error) { + req, err := client.GetGroupsForMAMPolicyPreparer(hostName, policyName) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetGroupsForMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.GetGroupsForMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetGroupsForMAMPolicy", resp, "Failure sending request") + } + + result, err = client.GetGroupsForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "GetGroupsForMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// GetGroupsForMAMPolicyPreparer prepares the GetGroupsForMAMPolicy request. +func (client IosClient) GetGroupsForMAMPolicyPreparer(hostName string, policyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/iosPolicies/{policyName}/groups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetGroupsForMAMPolicySender sends the GetGroupsForMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client IosClient) GetGroupsForMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetGroupsForMAMPolicyResponder handles the response to the GetGroupsForMAMPolicy request. The method always +// closes the http.Response Body. +func (client IosClient) GetGroupsForMAMPolicyResponder(resp *http.Response) (result GroupsCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetGroupsForMAMPolicyNextResults retrieves the next set of results, if any. +func (client IosClient) GetGroupsForMAMPolicyNextResults(lastResults GroupsCollection) (result GroupsCollection, err error) { + req, err := lastResults.GroupsCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetGroupsForMAMPolicy", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetGroupsForMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetGroupsForMAMPolicy", resp, "Failure sending next results request request") + } + + result, err = client.GetGroupsForMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "GetGroupsForMAMPolicy", resp, "Failure responding to next results request request") + } + + return +} + +// GetMAMPolicies returns Intune iOSPolicies. +// +// hostName is location hostName for the tenant filter is the filter to apply +// on the operation. selectParameter is select specific fields in entity. +func (client IosClient) GetMAMPolicies(hostName string, filter string, top *int32, selectParameter string) (result IOSMAMPolicyCollection, err error) { + req, err := client.GetMAMPoliciesPreparer(hostName, filter, top, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetMAMPolicies", nil, "Failure preparing request") + } + + resp, err := client.GetMAMPoliciesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetMAMPolicies", resp, "Failure sending request") + } + + result, err = client.GetMAMPoliciesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "GetMAMPolicies", resp, "Failure responding to request") + } + + return +} + +// GetMAMPoliciesPreparer prepares the GetMAMPolicies request. +func (client IosClient) GetMAMPoliciesPreparer(hostName string, filter string, top *int32, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/iosPolicies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetMAMPoliciesSender sends the GetMAMPolicies request. The method will close the +// http.Response Body if it receives an error. +func (client IosClient) GetMAMPoliciesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetMAMPoliciesResponder handles the response to the GetMAMPolicies request. The method always +// closes the http.Response Body. +func (client IosClient) GetMAMPoliciesResponder(resp *http.Response) (result IOSMAMPolicyCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetMAMPoliciesNextResults retrieves the next set of results, if any. +func (client IosClient) GetMAMPoliciesNextResults(lastResults IOSMAMPolicyCollection) (result IOSMAMPolicyCollection, err error) { + req, err := lastResults.IOSMAMPolicyCollectionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetMAMPolicies", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.GetMAMPoliciesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetMAMPolicies", resp, "Failure sending next results request request") + } + + result, err = client.GetMAMPoliciesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "GetMAMPolicies", resp, "Failure responding to next results request request") + } + + return +} + +// GetMAMPolicyByName returns Intune iOS policies. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy selectParameter is select specific fields in entity. +func (client IosClient) GetMAMPolicyByName(hostName string, policyName string, selectParameter string) (result IOSMAMPolicy, err error) { + req, err := client.GetMAMPolicyByNamePreparer(hostName, policyName, selectParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetMAMPolicyByName", nil, "Failure preparing request") + } + + resp, err := client.GetMAMPolicyByNameSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.IosClient", "GetMAMPolicyByName", resp, "Failure sending request") + } + + result, err = client.GetMAMPolicyByNameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "GetMAMPolicyByName", resp, "Failure responding to request") + } + + return +} + +// GetMAMPolicyByNamePreparer prepares the GetMAMPolicyByName request. +func (client IosClient) GetMAMPolicyByNamePreparer(hostName string, policyName string, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/iosPolicies/{policyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetMAMPolicyByNameSender sends the GetMAMPolicyByName request. The method will close the +// http.Response Body if it receives an error. +func (client IosClient) GetMAMPolicyByNameSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetMAMPolicyByNameResponder handles the response to the GetMAMPolicyByName request. The method always +// closes the http.Response Body. +func (client IosClient) GetMAMPolicyByNameResponder(resp *http.Response) (result IOSMAMPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// PatchMAMPolicy patch an iOSMAMPolicy. +// +// hostName is location hostName for the tenant policyName is unique name for +// the policy parameters is parameters supplied to the Create or update an +// android policy operation. +func (client IosClient) PatchMAMPolicy(hostName string, policyName string, parameters IOSMAMPolicy) (result IOSMAMPolicy, err error) { + req, err := client.PatchMAMPolicyPreparer(hostName, policyName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "intune.IosClient", "PatchMAMPolicy", nil, "Failure preparing request") + } + + resp, err := client.PatchMAMPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "intune.IosClient", "PatchMAMPolicy", resp, "Failure sending request") + } + + result, err = client.PatchMAMPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "intune.IosClient", "PatchMAMPolicy", resp, "Failure responding to request") + } + + return +} + +// PatchMAMPolicyPreparer prepares the PatchMAMPolicy request. +func (client IosClient) PatchMAMPolicyPreparer(hostName string, policyName string, parameters IOSMAMPolicy) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "policyName": autorest.Encode("path", policyName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Intune/locations/{hostName}/iosPolicies/{policyName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// PatchMAMPolicySender sends the PatchMAMPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client IosClient) PatchMAMPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// PatchMAMPolicyResponder handles the response to the PatchMAMPolicy request. The method always +// closes the http.Response Body. +func (client IosClient) PatchMAMPolicyResponder(resp *http.Response) (result IOSMAMPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/intune/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/intune/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/intune/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/intune/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,690 @@ +package intune + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// AppSharingFromLevel enumerates the values for app sharing from level. +type AppSharingFromLevel string + +const ( + // AllApps specifies the all apps state for app sharing from level. + AllApps AppSharingFromLevel = "allApps" + // None specifies the none state for app sharing from level. + None AppSharingFromLevel = "none" + // PolicyManagedApps specifies the policy managed apps state for app + // sharing from level. + PolicyManagedApps AppSharingFromLevel = "policyManagedApps" +) + +// AppSharingToLevel enumerates the values for app sharing to level. +type AppSharingToLevel string + +const ( + // AppSharingToLevelAllApps specifies the app sharing to level all apps + // state for app sharing to level. + AppSharingToLevelAllApps AppSharingToLevel = "allApps" + // AppSharingToLevelNone specifies the app sharing to level none state for + // app sharing to level. + AppSharingToLevelNone AppSharingToLevel = "none" + // AppSharingToLevelPolicyManagedApps specifies the app sharing to level + // policy managed apps state for app sharing to level. + AppSharingToLevelPolicyManagedApps AppSharingToLevel = "policyManagedApps" +) + +// Authentication enumerates the values for authentication. +type Authentication string + +const ( + // NotRequired specifies the not required state for authentication. + NotRequired Authentication = "notRequired" + // Required specifies the required state for authentication. + Required Authentication = "required" +) + +// ClipboardSharingLevel enumerates the values for clipboard sharing level. +type ClipboardSharingLevel string + +const ( + // ClipboardSharingLevelAllApps specifies the clipboard sharing level all + // apps state for clipboard sharing level. + ClipboardSharingLevelAllApps ClipboardSharingLevel = "allApps" + // ClipboardSharingLevelBlocked specifies the clipboard sharing level + // blocked state for clipboard sharing level. + ClipboardSharingLevelBlocked ClipboardSharingLevel = "blocked" + // ClipboardSharingLevelPolicyManagedApps specifies the clipboard sharing + // level policy managed apps state for clipboard sharing level. + ClipboardSharingLevelPolicyManagedApps ClipboardSharingLevel = "policyManagedApps" + // ClipboardSharingLevelPolicyManagedAppsWithPasteIn specifies the + // clipboard sharing level policy managed apps with paste in state for + // clipboard sharing level. + ClipboardSharingLevelPolicyManagedAppsWithPasteIn ClipboardSharingLevel = "policyManagedAppsWithPasteIn" +) + +// DataBackup enumerates the values for data backup. +type DataBackup string + +const ( + // Allow specifies the allow state for data backup. + Allow DataBackup = "allow" + // Block specifies the block state for data backup. + Block DataBackup = "block" +) + +// DeviceCompliance enumerates the values for device compliance. +type DeviceCompliance string + +const ( + // Disable specifies the disable state for device compliance. + Disable DeviceCompliance = "disable" + // Enable specifies the enable state for device compliance. + Enable DeviceCompliance = "enable" +) + +// FileEncryption enumerates the values for file encryption. +type FileEncryption string + +const ( + // FileEncryptionNotRequired specifies the file encryption not required + // state for file encryption. + FileEncryptionNotRequired FileEncryption = "notRequired" + // FileEncryptionRequired specifies the file encryption required state for + // file encryption. + FileEncryptionRequired FileEncryption = "required" +) + +// FileEncryptionLevel enumerates the values for file encryption level. +type FileEncryptionLevel string + +const ( + // AfterDeviceRestart specifies the after device restart state for file + // encryption level. + AfterDeviceRestart FileEncryptionLevel = "afterDeviceRestart" + // DeviceLocked specifies the device locked state for file encryption + // level. + DeviceLocked FileEncryptionLevel = "deviceLocked" + // DeviceLockedExceptFilesOpen specifies the device locked except files + // open state for file encryption level. + DeviceLockedExceptFilesOpen FileEncryptionLevel = "deviceLockedExceptFilesOpen" + // UseDeviceSettings specifies the use device settings state for file + // encryption level. + UseDeviceSettings FileEncryptionLevel = "useDeviceSettings" +) + +// FileSharingSaveAs enumerates the values for file sharing save as. +type FileSharingSaveAs string + +const ( + // FileSharingSaveAsAllow specifies the file sharing save as allow state + // for file sharing save as. + FileSharingSaveAsAllow FileSharingSaveAs = "allow" + // FileSharingSaveAsBlock specifies the file sharing save as block state + // for file sharing save as. + FileSharingSaveAsBlock FileSharingSaveAs = "block" +) + +// GroupStatus enumerates the values for group status. +type GroupStatus string + +const ( + // NotTargeted specifies the not targeted state for group status. + NotTargeted GroupStatus = "notTargeted" + // Targeted specifies the targeted state for group status. + Targeted GroupStatus = "targeted" +) + +// ManagedBrowser enumerates the values for managed browser. +type ManagedBrowser string + +const ( + // ManagedBrowserNotRequired specifies the managed browser not required + // state for managed browser. + ManagedBrowserNotRequired ManagedBrowser = "notRequired" + // ManagedBrowserRequired specifies the managed browser required state for + // managed browser. + ManagedBrowserRequired ManagedBrowser = "required" +) + +// Pin enumerates the values for pin. +type Pin string + +const ( + // PinNotRequired specifies the pin not required state for pin. + PinNotRequired Pin = "notRequired" + // PinRequired specifies the pin required state for pin. + PinRequired Pin = "required" +) + +// Platform enumerates the values for platform. +type Platform string + +const ( + // Android specifies the android state for platform. + Android Platform = "android" + // Ios specifies the ios state for platform. + Ios Platform = "ios" + // Windows specifies the windows state for platform. + Windows Platform = "windows" +) + +// ScreenCapture enumerates the values for screen capture. +type ScreenCapture string + +const ( + // ScreenCaptureAllow specifies the screen capture allow state for screen + // capture. + ScreenCaptureAllow ScreenCapture = "allow" + // ScreenCaptureBlock specifies the screen capture block state for screen + // capture. + ScreenCaptureBlock ScreenCapture = "block" +) + +// TouchID enumerates the values for touch id. +type TouchID string + +const ( + // TouchIDDisable specifies the touch id disable state for touch id. + TouchIDDisable TouchID = "disable" + // TouchIDEnable specifies the touch id enable state for touch id. + TouchIDEnable TouchID = "enable" +) + +// AndroidMAMPolicy is android Policy entity for Intune MAM. +type AndroidMAMPolicy struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Properties *AndroidMAMPolicyProperties `json:"properties,omitempty"` +} + +// AndroidMAMPolicyCollection is +type AndroidMAMPolicyCollection struct { + autorest.Response `json:"-"` + Value *[]AndroidMAMPolicy `json:"value,omitempty"` + Nextlink *string `json:"nextlink,omitempty"` +} + +// AndroidMAMPolicyCollectionPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client AndroidMAMPolicyCollection) AndroidMAMPolicyCollectionPreparer() (*http.Request, error) { + if client.Nextlink == nil || len(to.String(client.Nextlink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.Nextlink))) +} + +// AndroidMAMPolicyProperties is intune MAM iOS Policy Properties. +type AndroidMAMPolicyProperties struct { + FriendlyName *string `json:"friendlyName,omitempty"` + Description *string `json:"description,omitempty"` + AppSharingFromLevel AppSharingFromLevel `json:"appSharingFromLevel,omitempty"` + AppSharingToLevel AppSharingToLevel `json:"appSharingToLevel,omitempty"` + Authentication Authentication `json:"authentication,omitempty"` + ClipboardSharingLevel ClipboardSharingLevel `json:"clipboardSharingLevel,omitempty"` + DataBackup DataBackup `json:"dataBackup,omitempty"` + FileSharingSaveAs FileSharingSaveAs `json:"fileSharingSaveAs,omitempty"` + Pin Pin `json:"pin,omitempty"` + PinNumRetry *int32 `json:"pinNumRetry,omitempty"` + DeviceCompliance DeviceCompliance `json:"deviceCompliance,omitempty"` + ManagedBrowser ManagedBrowser `json:"managedBrowser,omitempty"` + AccessRecheckOfflineTimeout *string `json:"accessRecheckOfflineTimeout,omitempty"` + AccessRecheckOnlineTimeout *string `json:"accessRecheckOnlineTimeout,omitempty"` + OfflineWipeTimeout *string `json:"offlineWipeTimeout,omitempty"` + NumOfApps *int32 `json:"numOfApps,omitempty"` + GroupStatus GroupStatus `json:"groupStatus,omitempty"` + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + ScreenCapture ScreenCapture `json:"screenCapture,omitempty"` + FileEncryption FileEncryption `json:"fileEncryption,omitempty"` +} + +// Application is application entity for Intune MAM. +type Application struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Properties *ApplicationProperties `json:"properties,omitempty"` +} + +// ApplicationCollection is +type ApplicationCollection struct { + autorest.Response `json:"-"` + Value *[]Application `json:"value,omitempty"` + Nextlink *string `json:"nextlink,omitempty"` +} + +// ApplicationCollectionPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ApplicationCollection) ApplicationCollectionPreparer() (*http.Request, error) { + if client.Nextlink == nil || len(to.String(client.Nextlink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.Nextlink))) +} + +// ApplicationProperties is +type ApplicationProperties struct { + FriendlyName *string `json:"friendlyName,omitempty"` + Platform Platform `json:"platform,omitempty"` + AppID *string `json:"appId,omitempty"` +} + +// Device is device entity for Intune. +type Device struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Properties *DeviceProperties `json:"properties,omitempty"` +} + +// DeviceCollection is +type DeviceCollection struct { + autorest.Response `json:"-"` + Value *[]Device `json:"value,omitempty"` + Nextlink *string `json:"nextlink,omitempty"` +} + +// DeviceCollectionPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client DeviceCollection) DeviceCollectionPreparer() (*http.Request, error) { + if client.Nextlink == nil || len(to.String(client.Nextlink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.Nextlink))) +} + +// DeviceProperties is +type DeviceProperties struct { + UserID *string `json:"userId,omitempty"` + FriendlyName *string `json:"friendlyName,omitempty"` + Platform *string `json:"platform,omitempty"` + PlatformVersion *string `json:"platformVersion,omitempty"` + DeviceType *string `json:"deviceType,omitempty"` +} + +// Error is +type Error struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` +} + +// FlaggedEnrolledApp is flagged Enrolled App for the given tenant. +type FlaggedEnrolledApp struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Properties *FlaggedEnrolledAppProperties `json:"properties,omitempty"` +} + +// FlaggedEnrolledAppCollection is flagged Enrolled App collection for the +// given tenant. +type FlaggedEnrolledAppCollection struct { + autorest.Response `json:"-"` + Value *[]FlaggedEnrolledApp `json:"value,omitempty"` + Nextlink *string `json:"nextlink,omitempty"` +} + +// FlaggedEnrolledAppCollectionPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client FlaggedEnrolledAppCollection) FlaggedEnrolledAppCollectionPreparer() (*http.Request, error) { + if client.Nextlink == nil || len(to.String(client.Nextlink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.Nextlink))) +} + +// FlaggedEnrolledAppError is +type FlaggedEnrolledAppError struct { + ErrorCode *string `json:"errorCode,omitempty"` + Severity *string `json:"severity,omitempty"` +} + +// FlaggedEnrolledAppProperties is +type FlaggedEnrolledAppProperties struct { + DeviceType *string `json:"deviceType,omitempty"` + FriendlyName *string `json:"friendlyName,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + Platform *string `json:"platform,omitempty"` + Errors *[]FlaggedEnrolledAppError `json:"errors,omitempty"` +} + +// FlaggedUser is flagged user for the given tenant. +type FlaggedUser struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Properties *FlaggedUserProperties `json:"properties,omitempty"` +} + +// FlaggedUserCollection is flagged user collection for the given tenant. +type FlaggedUserCollection struct { + autorest.Response `json:"-"` + Value *[]FlaggedUser `json:"value,omitempty"` + Nextlink *string `json:"nextlink,omitempty"` +} + +// FlaggedUserCollectionPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client FlaggedUserCollection) FlaggedUserCollectionPreparer() (*http.Request, error) { + if client.Nextlink == nil || len(to.String(client.Nextlink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.Nextlink))) +} + +// FlaggedUserProperties is +type FlaggedUserProperties struct { + ErrorCount *int32 `json:"errorCount,omitempty"` + FriendlyName *string `json:"friendlyName,omitempty"` +} + +// GroupItem is group entity for Intune MAM. +type GroupItem struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Properties *GroupProperties `json:"properties,omitempty"` +} + +// GroupProperties is +type GroupProperties struct { + FriendlyName *string `json:"friendlyName,omitempty"` +} + +// GroupsCollection is +type GroupsCollection struct { + autorest.Response `json:"-"` + Value *[]GroupItem `json:"value,omitempty"` + Nextlink *string `json:"nextlink,omitempty"` +} + +// GroupsCollectionPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client GroupsCollection) GroupsCollectionPreparer() (*http.Request, error) { + if client.Nextlink == nil || len(to.String(client.Nextlink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.Nextlink))) +} + +// IOSMAMPolicy is iOS Policy entity for Intune MAM. +type IOSMAMPolicy struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Properties *IOSMAMPolicyProperties `json:"properties,omitempty"` +} + +// IOSMAMPolicyCollection is +type IOSMAMPolicyCollection struct { + autorest.Response `json:"-"` + Value *[]IOSMAMPolicy `json:"value,omitempty"` + Nextlink *string `json:"nextlink,omitempty"` +} + +// IOSMAMPolicyCollectionPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client IOSMAMPolicyCollection) IOSMAMPolicyCollectionPreparer() (*http.Request, error) { + if client.Nextlink == nil || len(to.String(client.Nextlink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.Nextlink))) +} + +// IOSMAMPolicyProperties is intune MAM iOS Policy Properties. +type IOSMAMPolicyProperties struct { + FriendlyName *string `json:"friendlyName,omitempty"` + Description *string `json:"description,omitempty"` + AppSharingFromLevel AppSharingFromLevel `json:"appSharingFromLevel,omitempty"` + AppSharingToLevel AppSharingToLevel `json:"appSharingToLevel,omitempty"` + Authentication Authentication `json:"authentication,omitempty"` + ClipboardSharingLevel ClipboardSharingLevel `json:"clipboardSharingLevel,omitempty"` + DataBackup DataBackup `json:"dataBackup,omitempty"` + FileSharingSaveAs FileSharingSaveAs `json:"fileSharingSaveAs,omitempty"` + Pin Pin `json:"pin,omitempty"` + PinNumRetry *int32 `json:"pinNumRetry,omitempty"` + DeviceCompliance DeviceCompliance `json:"deviceCompliance,omitempty"` + ManagedBrowser ManagedBrowser `json:"managedBrowser,omitempty"` + AccessRecheckOfflineTimeout *string `json:"accessRecheckOfflineTimeout,omitempty"` + AccessRecheckOnlineTimeout *string `json:"accessRecheckOnlineTimeout,omitempty"` + OfflineWipeTimeout *string `json:"offlineWipeTimeout,omitempty"` + NumOfApps *int32 `json:"numOfApps,omitempty"` + GroupStatus GroupStatus `json:"groupStatus,omitempty"` + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + FileEncryptionLevel FileEncryptionLevel `json:"fileEncryptionLevel,omitempty"` + TouchID TouchID `json:"touchId,omitempty"` +} + +// Location is location entity for given tenant. +type Location struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Properties *LocationProperties `json:"properties,omitempty"` +} + +// LocationCollection is +type LocationCollection struct { + autorest.Response `json:"-"` + Value *[]Location `json:"value,omitempty"` + Nextlink *string `json:"nextlink,omitempty"` +} + +// LocationCollectionPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client LocationCollection) LocationCollectionPreparer() (*http.Request, error) { + if client.Nextlink == nil || len(to.String(client.Nextlink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.Nextlink))) +} + +// LocationProperties is +type LocationProperties struct { + HostName *string `json:"hostName,omitempty"` +} + +// MAMPolicyAppIDOrGroupIDPayload is mAM Policy request body for properties +// Intune MAM. +type MAMPolicyAppIDOrGroupIDPayload struct { + Properties *MAMPolicyAppOrGroupIDProperties `json:"properties,omitempty"` +} + +// MAMPolicyAppOrGroupIDProperties is android Policy request body for Intune +// MAM. +type MAMPolicyAppOrGroupIDProperties struct { + URL *string `json:"url,omitempty"` +} + +// MAMPolicyProperties is +type MAMPolicyProperties struct { + FriendlyName *string `json:"friendlyName,omitempty"` + Description *string `json:"description,omitempty"` + AppSharingFromLevel AppSharingFromLevel `json:"appSharingFromLevel,omitempty"` + AppSharingToLevel AppSharingToLevel `json:"appSharingToLevel,omitempty"` + Authentication Authentication `json:"authentication,omitempty"` + ClipboardSharingLevel ClipboardSharingLevel `json:"clipboardSharingLevel,omitempty"` + DataBackup DataBackup `json:"dataBackup,omitempty"` + FileSharingSaveAs FileSharingSaveAs `json:"fileSharingSaveAs,omitempty"` + Pin Pin `json:"pin,omitempty"` + PinNumRetry *int32 `json:"pinNumRetry,omitempty"` + DeviceCompliance DeviceCompliance `json:"deviceCompliance,omitempty"` + ManagedBrowser ManagedBrowser `json:"managedBrowser,omitempty"` + AccessRecheckOfflineTimeout *string `json:"accessRecheckOfflineTimeout,omitempty"` + AccessRecheckOnlineTimeout *string `json:"accessRecheckOnlineTimeout,omitempty"` + OfflineWipeTimeout *string `json:"offlineWipeTimeout,omitempty"` + NumOfApps *int32 `json:"numOfApps,omitempty"` + GroupStatus GroupStatus `json:"groupStatus,omitempty"` + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` +} + +// OperationMetadataProperties is +type OperationMetadataProperties struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// OperationResult is operationResult entity for Intune. +type OperationResult struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Properties *OperationResultProperties `json:"properties,omitempty"` +} + +// OperationResultCollection is +type OperationResultCollection struct { + autorest.Response `json:"-"` + Value *[]OperationResult `json:"value,omitempty"` + Nextlink *string `json:"nextlink,omitempty"` +} + +// OperationResultCollectionPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client OperationResultCollection) OperationResultCollectionPreparer() (*http.Request, error) { + if client.Nextlink == nil || len(to.String(client.Nextlink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.Nextlink))) +} + +// OperationResultProperties is +type OperationResultProperties struct { + FriendlyName *string `json:"friendlyName,omitempty"` + Category *string `json:"category,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *string `json:"state,omitempty"` + OperationMetadata *[]OperationMetadataProperties `json:"operationMetadata,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` +} + +// StatusesDefault is default Statuses entity for the given tenant. +type StatusesDefault struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Properties *StatusesProperties `json:"properties,omitempty"` + Nextlink *string `json:",omitempty"` +} + +// StatusesDefaultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client StatusesDefault) StatusesDefaultPreparer() (*http.Request, error) { + if client.Nextlink == nil || len(to.String(client.Nextlink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.Nextlink))) +} + +// StatusesProperties is +type StatusesProperties struct { + DeployedPolicies *int32 `json:"deployedPolicies,omitempty"` + EnrolledUsers *int32 `json:"enrolledUsers,omitempty"` + FlaggedUsers *int32 `json:"flaggedUsers,omitempty"` + LastModifiedTime *date.Time `json:"lastModifiedTime,omitempty"` + PolicyAppliedUsers *int32 `json:"policyAppliedUsers,omitempty"` + Status *string `json:"status,omitempty"` + WipeFailedApps *int32 `json:"wipeFailedApps,omitempty"` + WipePendingApps *int32 `json:"wipePendingApps,omitempty"` + WipeSucceededApps *int32 `json:"wipeSucceededApps,omitempty"` +} + +// WipeDeviceOperationResult is device entity for Intune. +type WipeDeviceOperationResult struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Properties *WipeDeviceOperationResultProperties `json:"properties,omitempty"` +} + +// WipeDeviceOperationResultProperties is +type WipeDeviceOperationResultProperties struct { + Value *string `json:"value,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/intune/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/intune/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/intune/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/intune/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package intune + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "intune", "2015-01-14-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -1,3 +1,6 @@ +// Package logic implements the Azure ARM Logic service API version +// 2015-08-01-preview. +// package logic // Copyright (c) Microsoft and contributors. All rights reserved. @@ -14,17 +17,17 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" ) const ( // APIVersion is the version of the Logic - APIVersion = "2015-02-01-preview" + APIVersion = "2015-08-01-preview" // DefaultBaseURI is the default URI used for the service Logic DefaultBaseURI = "https://management.azure.com" @@ -34,6 +37,7 @@ type ManagementClient struct { autorest.Client BaseURI string + APIVersion string SubscriptionID string } @@ -47,6 +51,7 @@ return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, + APIVersion: APIVersion, SubscriptionID: subscriptionID, } } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountagreements.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountagreements.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountagreements.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountagreements.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,333 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// IntegrationAccountAgreementsClient is the client for the +// IntegrationAccountAgreements methods of the Logic service. +type IntegrationAccountAgreementsClient struct { + ManagementClient +} + +// NewIntegrationAccountAgreementsClient creates an instance of the +// IntegrationAccountAgreementsClient client. +func NewIntegrationAccountAgreementsClient(subscriptionID string) IntegrationAccountAgreementsClient { + return NewIntegrationAccountAgreementsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewIntegrationAccountAgreementsClientWithBaseURI creates an instance of the +// IntegrationAccountAgreementsClient client. +func NewIntegrationAccountAgreementsClientWithBaseURI(baseURI string, subscriptionID string) IntegrationAccountAgreementsClient { + return IntegrationAccountAgreementsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an integration account agreement. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. agreementName is the integration account +// agreement name. agreement is the integration account agreement. +func (client IntegrationAccountAgreementsClient) CreateOrUpdate(resourceGroupName string, integrationAccountName string, agreementName string, agreement IntegrationAccountAgreement) (result IntegrationAccountAgreement, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, integrationAccountName, agreementName, agreement) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client IntegrationAccountAgreementsClient) CreateOrUpdatePreparer(resourceGroupName string, integrationAccountName string, agreementName string, agreement IntegrationAccountAgreement) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "agreementName": autorest.Encode("path", agreementName), + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/agreements/{agreementName}", pathParameters), + autorest.WithJSON(agreement), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountAgreementsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client IntegrationAccountAgreementsClient) CreateOrUpdateResponder(resp *http.Response) (result IntegrationAccountAgreement, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an integration account agreement. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. agreementName is the integration account +// agreement name. +func (client IntegrationAccountAgreementsClient) Delete(resourceGroupName string, integrationAccountName string, agreementName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, integrationAccountName, agreementName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client IntegrationAccountAgreementsClient) DeletePreparer(resourceGroupName string, integrationAccountName string, agreementName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "agreementName": autorest.Encode("path", agreementName), + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/agreements/{agreementName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountAgreementsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client IntegrationAccountAgreementsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets an integration account agreement. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. agreementName is the integration account +// agreement name. +func (client IntegrationAccountAgreementsClient) Get(resourceGroupName string, integrationAccountName string, agreementName string) (result IntegrationAccountAgreement, err error) { + req, err := client.GetPreparer(resourceGroupName, integrationAccountName, agreementName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client IntegrationAccountAgreementsClient) GetPreparer(resourceGroupName string, integrationAccountName string, agreementName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "agreementName": autorest.Encode("path", agreementName), + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/agreements/{agreementName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountAgreementsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client IntegrationAccountAgreementsClient) GetResponder(resp *http.Response) (result IntegrationAccountAgreement, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of integration account agreements. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. top is the number of items to be included in the +// result. filter is the filter to apply on the operation. +func (client IntegrationAccountAgreementsClient) List(resourceGroupName string, integrationAccountName string, top *int32, filter string) (result IntegrationAccountAgreementListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, integrationAccountName, top, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client IntegrationAccountAgreementsClient) ListPreparer(resourceGroupName string, integrationAccountName string, top *int32, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/agreements", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountAgreementsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client IntegrationAccountAgreementsClient) ListResponder(resp *http.Response) (result IntegrationAccountAgreementListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client IntegrationAccountAgreementsClient) ListNextResults(lastResults IntegrationAccountAgreementListResult) (result IntegrationAccountAgreementListResult, err error) { + req, err := lastResults.IntegrationAccountAgreementListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountAgreementsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountcertificates.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountcertificates.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountcertificates.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountcertificates.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,330 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// IntegrationAccountCertificatesClient is the client for the +// IntegrationAccountCertificates methods of the Logic service. +type IntegrationAccountCertificatesClient struct { + ManagementClient +} + +// NewIntegrationAccountCertificatesClient creates an instance of the +// IntegrationAccountCertificatesClient client. +func NewIntegrationAccountCertificatesClient(subscriptionID string) IntegrationAccountCertificatesClient { + return NewIntegrationAccountCertificatesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewIntegrationAccountCertificatesClientWithBaseURI creates an instance of +// the IntegrationAccountCertificatesClient client. +func NewIntegrationAccountCertificatesClientWithBaseURI(baseURI string, subscriptionID string) IntegrationAccountCertificatesClient { + return IntegrationAccountCertificatesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an integration account certificate. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. certificateName is the integration account +// certificate name. certificate is the integration account certificate. +func (client IntegrationAccountCertificatesClient) CreateOrUpdate(resourceGroupName string, integrationAccountName string, certificateName string, certificate IntegrationAccountCertificate) (result IntegrationAccountCertificate, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, integrationAccountName, certificateName, certificate) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client IntegrationAccountCertificatesClient) CreateOrUpdatePreparer(resourceGroupName string, integrationAccountName string, certificateName string, certificate IntegrationAccountCertificate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "certificateName": autorest.Encode("path", certificateName), + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/certificates/{certificateName}", pathParameters), + autorest.WithJSON(certificate), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountCertificatesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client IntegrationAccountCertificatesClient) CreateOrUpdateResponder(resp *http.Response) (result IntegrationAccountCertificate, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an integration account certificate. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. certificateName is the integration account +// certificate name. +func (client IntegrationAccountCertificatesClient) Delete(resourceGroupName string, integrationAccountName string, certificateName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, integrationAccountName, certificateName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client IntegrationAccountCertificatesClient) DeletePreparer(resourceGroupName string, integrationAccountName string, certificateName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "certificateName": autorest.Encode("path", certificateName), + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/certificates/{certificateName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountCertificatesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client IntegrationAccountCertificatesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets an integration account certificate. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. certificateName is the integration account +// certificate name. +func (client IntegrationAccountCertificatesClient) Get(resourceGroupName string, integrationAccountName string, certificateName string) (result IntegrationAccountCertificate, err error) { + req, err := client.GetPreparer(resourceGroupName, integrationAccountName, certificateName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client IntegrationAccountCertificatesClient) GetPreparer(resourceGroupName string, integrationAccountName string, certificateName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "certificateName": autorest.Encode("path", certificateName), + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/certificates/{certificateName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountCertificatesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client IntegrationAccountCertificatesClient) GetResponder(resp *http.Response) (result IntegrationAccountCertificate, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of integration account certificates. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. top is the number of items to be included in the +// result. +func (client IntegrationAccountCertificatesClient) List(resourceGroupName string, integrationAccountName string, top *int32) (result IntegrationAccountCertificateListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, integrationAccountName, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client IntegrationAccountCertificatesClient) ListPreparer(resourceGroupName string, integrationAccountName string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/certificates", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountCertificatesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client IntegrationAccountCertificatesClient) ListResponder(resp *http.Response) (result IntegrationAccountCertificateListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client IntegrationAccountCertificatesClient) ListNextResults(lastResults IntegrationAccountCertificateListResult) (result IntegrationAccountCertificateListResult, err error) { + req, err := lastResults.IntegrationAccountCertificateListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountCertificatesClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountmaps.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountmaps.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountmaps.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountmaps.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,331 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// IntegrationAccountMapsClient is the client for the IntegrationAccountMaps +// methods of the Logic service. +type IntegrationAccountMapsClient struct { + ManagementClient +} + +// NewIntegrationAccountMapsClient creates an instance of the +// IntegrationAccountMapsClient client. +func NewIntegrationAccountMapsClient(subscriptionID string) IntegrationAccountMapsClient { + return NewIntegrationAccountMapsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewIntegrationAccountMapsClientWithBaseURI creates an instance of the +// IntegrationAccountMapsClient client. +func NewIntegrationAccountMapsClientWithBaseURI(baseURI string, subscriptionID string) IntegrationAccountMapsClient { + return IntegrationAccountMapsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an integration account map. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. mapName is the integration account map name. +// mapParameter is the integration account map. +func (client IntegrationAccountMapsClient) CreateOrUpdate(resourceGroupName string, integrationAccountName string, mapName string, mapParameter IntegrationAccountMap) (result IntegrationAccountMap, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, integrationAccountName, mapName, mapParameter) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client IntegrationAccountMapsClient) CreateOrUpdatePreparer(resourceGroupName string, integrationAccountName string, mapName string, mapParameter IntegrationAccountMap) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "mapName": autorest.Encode("path", mapName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/maps/{mapName}", pathParameters), + autorest.WithJSON(mapParameter), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountMapsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client IntegrationAccountMapsClient) CreateOrUpdateResponder(resp *http.Response) (result IntegrationAccountMap, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an integration account map. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. mapName is the integration account map name. +func (client IntegrationAccountMapsClient) Delete(resourceGroupName string, integrationAccountName string, mapName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, integrationAccountName, mapName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client IntegrationAccountMapsClient) DeletePreparer(resourceGroupName string, integrationAccountName string, mapName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "mapName": autorest.Encode("path", mapName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/maps/{mapName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountMapsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client IntegrationAccountMapsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets an integration account map. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. mapName is the integration account map name. +func (client IntegrationAccountMapsClient) Get(resourceGroupName string, integrationAccountName string, mapName string) (result IntegrationAccountMap, err error) { + req, err := client.GetPreparer(resourceGroupName, integrationAccountName, mapName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client IntegrationAccountMapsClient) GetPreparer(resourceGroupName string, integrationAccountName string, mapName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "mapName": autorest.Encode("path", mapName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/maps/{mapName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountMapsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client IntegrationAccountMapsClient) GetResponder(resp *http.Response) (result IntegrationAccountMap, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of integration account maps. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. top is the number of items to be included in the +// result. filter is the filter to apply on the operation. +func (client IntegrationAccountMapsClient) List(resourceGroupName string, integrationAccountName string, top *int32, filter string) (result IntegrationAccountMapListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, integrationAccountName, top, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client IntegrationAccountMapsClient) ListPreparer(resourceGroupName string, integrationAccountName string, top *int32, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/maps", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountMapsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client IntegrationAccountMapsClient) ListResponder(resp *http.Response) (result IntegrationAccountMapListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client IntegrationAccountMapsClient) ListNextResults(lastResults IntegrationAccountMapListResult) (result IntegrationAccountMapListResult, err error) { + req, err := lastResults.IntegrationAccountMapListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountMapsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountpartners.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountpartners.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountpartners.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountpartners.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,333 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// IntegrationAccountPartnersClient is the client for the +// IntegrationAccountPartners methods of the Logic service. +type IntegrationAccountPartnersClient struct { + ManagementClient +} + +// NewIntegrationAccountPartnersClient creates an instance of the +// IntegrationAccountPartnersClient client. +func NewIntegrationAccountPartnersClient(subscriptionID string) IntegrationAccountPartnersClient { + return NewIntegrationAccountPartnersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewIntegrationAccountPartnersClientWithBaseURI creates an instance of the +// IntegrationAccountPartnersClient client. +func NewIntegrationAccountPartnersClientWithBaseURI(baseURI string, subscriptionID string) IntegrationAccountPartnersClient { + return IntegrationAccountPartnersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an integration account partner. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. partnerName is the integration account partner +// name. partner is the integration account partner. +func (client IntegrationAccountPartnersClient) CreateOrUpdate(resourceGroupName string, integrationAccountName string, partnerName string, partner IntegrationAccountPartner) (result IntegrationAccountPartner, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, integrationAccountName, partnerName, partner) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client IntegrationAccountPartnersClient) CreateOrUpdatePreparer(resourceGroupName string, integrationAccountName string, partnerName string, partner IntegrationAccountPartner) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "partnerName": autorest.Encode("path", partnerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/partners/{partnerName}", pathParameters), + autorest.WithJSON(partner), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountPartnersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client IntegrationAccountPartnersClient) CreateOrUpdateResponder(resp *http.Response) (result IntegrationAccountPartner, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an integration account partner. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. partnerName is the integration account partner +// name. +func (client IntegrationAccountPartnersClient) Delete(resourceGroupName string, integrationAccountName string, partnerName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, integrationAccountName, partnerName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client IntegrationAccountPartnersClient) DeletePreparer(resourceGroupName string, integrationAccountName string, partnerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "partnerName": autorest.Encode("path", partnerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/partners/{partnerName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountPartnersClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client IntegrationAccountPartnersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets an integration account partner. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. partnerName is the integration account partner +// name. +func (client IntegrationAccountPartnersClient) Get(resourceGroupName string, integrationAccountName string, partnerName string) (result IntegrationAccountPartner, err error) { + req, err := client.GetPreparer(resourceGroupName, integrationAccountName, partnerName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client IntegrationAccountPartnersClient) GetPreparer(resourceGroupName string, integrationAccountName string, partnerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "partnerName": autorest.Encode("path", partnerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/partners/{partnerName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountPartnersClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client IntegrationAccountPartnersClient) GetResponder(resp *http.Response) (result IntegrationAccountPartner, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of integration account partners. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. top is the number of items to be included in the +// result. filter is the filter to apply on the operation. +func (client IntegrationAccountPartnersClient) List(resourceGroupName string, integrationAccountName string, top *int32, filter string) (result IntegrationAccountPartnerListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, integrationAccountName, top, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client IntegrationAccountPartnersClient) ListPreparer(resourceGroupName string, integrationAccountName string, top *int32, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/partners", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountPartnersClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client IntegrationAccountPartnersClient) ListResponder(resp *http.Response) (result IntegrationAccountPartnerListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client IntegrationAccountPartnersClient) ListNextResults(lastResults IntegrationAccountPartnerListResult) (result IntegrationAccountPartnerListResult, err error) { + req, err := lastResults.IntegrationAccountPartnerListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountPartnersClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountschemas.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountschemas.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountschemas.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccountschemas.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,333 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// IntegrationAccountSchemasClient is the client for the +// IntegrationAccountSchemas methods of the Logic service. +type IntegrationAccountSchemasClient struct { + ManagementClient +} + +// NewIntegrationAccountSchemasClient creates an instance of the +// IntegrationAccountSchemasClient client. +func NewIntegrationAccountSchemasClient(subscriptionID string) IntegrationAccountSchemasClient { + return NewIntegrationAccountSchemasClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewIntegrationAccountSchemasClientWithBaseURI creates an instance of the +// IntegrationAccountSchemasClient client. +func NewIntegrationAccountSchemasClientWithBaseURI(baseURI string, subscriptionID string) IntegrationAccountSchemasClient { + return IntegrationAccountSchemasClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an integration account schema. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. schemaName is the integration account schema +// name. schema is the integration account schema. +func (client IntegrationAccountSchemasClient) CreateOrUpdate(resourceGroupName string, integrationAccountName string, schemaName string, schema IntegrationAccountSchema) (result IntegrationAccountSchema, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, integrationAccountName, schemaName, schema) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client IntegrationAccountSchemasClient) CreateOrUpdatePreparer(resourceGroupName string, integrationAccountName string, schemaName string, schema IntegrationAccountSchema) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "schemaName": autorest.Encode("path", schemaName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/schemas/{schemaName}", pathParameters), + autorest.WithJSON(schema), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountSchemasClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client IntegrationAccountSchemasClient) CreateOrUpdateResponder(resp *http.Response) (result IntegrationAccountSchema, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an integration account schema. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. schemaName is the integration account schema +// name. +func (client IntegrationAccountSchemasClient) Delete(resourceGroupName string, integrationAccountName string, schemaName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, integrationAccountName, schemaName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client IntegrationAccountSchemasClient) DeletePreparer(resourceGroupName string, integrationAccountName string, schemaName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "schemaName": autorest.Encode("path", schemaName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/schemas/{schemaName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountSchemasClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client IntegrationAccountSchemasClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets an integration account schema. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. schemaName is the integration account schema +// name. +func (client IntegrationAccountSchemasClient) Get(resourceGroupName string, integrationAccountName string, schemaName string) (result IntegrationAccountSchema, err error) { + req, err := client.GetPreparer(resourceGroupName, integrationAccountName, schemaName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client IntegrationAccountSchemasClient) GetPreparer(resourceGroupName string, integrationAccountName string, schemaName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "schemaName": autorest.Encode("path", schemaName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/schemas/{schemaName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountSchemasClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client IntegrationAccountSchemasClient) GetResponder(resp *http.Response) (result IntegrationAccountSchema, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of integration account schemas. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. top is the number of items to be included in the +// result. filter is the filter to apply on the operation. +func (client IntegrationAccountSchemasClient) List(resourceGroupName string, integrationAccountName string, top *int32, filter string) (result IntegrationAccountSchemaListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, integrationAccountName, top, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client IntegrationAccountSchemasClient) ListPreparer(resourceGroupName string, integrationAccountName string, top *int32, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/schemas", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountSchemasClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client IntegrationAccountSchemasClient) ListResponder(resp *http.Response) (result IntegrationAccountSchemaListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client IntegrationAccountSchemasClient) ListNextResults(lastResults IntegrationAccountSchemaListResult) (result IntegrationAccountSchemaListResult, err error) { + req, err := lastResults.IntegrationAccountSchemaListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountSchemasClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccounts.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccounts.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccounts.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/integrationaccounts.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,539 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// IntegrationAccountsClient is the client for the IntegrationAccounts methods +// of the Logic service. +type IntegrationAccountsClient struct { + ManagementClient +} + +// NewIntegrationAccountsClient creates an instance of the +// IntegrationAccountsClient client. +func NewIntegrationAccountsClient(subscriptionID string) IntegrationAccountsClient { + return NewIntegrationAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewIntegrationAccountsClientWithBaseURI creates an instance of the +// IntegrationAccountsClient client. +func NewIntegrationAccountsClientWithBaseURI(baseURI string, subscriptionID string) IntegrationAccountsClient { + return IntegrationAccountsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an integration account. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. integrationAccount is the integration account. +func (client IntegrationAccountsClient) CreateOrUpdate(resourceGroupName string, integrationAccountName string, integrationAccount IntegrationAccount) (result IntegrationAccount, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, integrationAccountName, integrationAccount) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client IntegrationAccountsClient) CreateOrUpdatePreparer(resourceGroupName string, integrationAccountName string, integrationAccount IntegrationAccount) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}", pathParameters), + autorest.WithJSON(integrationAccount), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client IntegrationAccountsClient) CreateOrUpdateResponder(resp *http.Response) (result IntegrationAccount, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an integration account. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. +func (client IntegrationAccountsClient) Delete(resourceGroupName string, integrationAccountName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, integrationAccountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client IntegrationAccountsClient) DeletePreparer(resourceGroupName string, integrationAccountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client IntegrationAccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets an integration account. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. +func (client IntegrationAccountsClient) Get(resourceGroupName string, integrationAccountName string) (result IntegrationAccount, err error) { + req, err := client.GetPreparer(resourceGroupName, integrationAccountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client IntegrationAccountsClient) GetPreparer(resourceGroupName string, integrationAccountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client IntegrationAccountsClient) GetResponder(resp *http.Response) (result IntegrationAccount, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup gets a list of integration accounts by resource group. +// +// resourceGroupName is the resource group name. top is the number of items to +// be included in the result. +func (client IntegrationAccountsClient) ListByResourceGroup(resourceGroupName string, top *int32) (result IntegrationAccountListResult, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListByResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListByResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client IntegrationAccountsClient) ListByResourceGroupPreparer(resourceGroupName string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client IntegrationAccountsClient) ListByResourceGroupResponder(resp *http.Response) (result IntegrationAccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client IntegrationAccountsClient) ListByResourceGroupNextResults(lastResults IntegrationAccountListResult) (result IntegrationAccountListResult, err error) { + req, err := lastResults.IntegrationAccountListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListByResourceGroup", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListByResourceGroup", resp, "Failure sending next results request request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListByResourceGroup", resp, "Failure responding to next results request request") + } + + return +} + +// ListBySubscription gets a list of integration accounts by subscription. +// +// top is the number of items to be included in the result. +func (client IntegrationAccountsClient) ListBySubscription(top *int32) (result IntegrationAccountListResult, err error) { + req, err := client.ListBySubscriptionPreparer(top) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListBySubscription", nil, "Failure preparing request") + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListBySubscription", resp, "Failure sending request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListBySubscription", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client IntegrationAccountsClient) ListBySubscriptionPreparer(top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Logic/integrationAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client IntegrationAccountsClient) ListBySubscriptionResponder(resp *http.Response) (result IntegrationAccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscriptionNextResults retrieves the next set of results, if any. +func (client IntegrationAccountsClient) ListBySubscriptionNextResults(lastResults IntegrationAccountListResult) (result IntegrationAccountListResult, err error) { + req, err := lastResults.IntegrationAccountListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListBySubscription", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListBySubscription", resp, "Failure sending next results request request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListBySubscription", resp, "Failure responding to next results request request") + } + + return +} + +// ListCallbackURL lists the integration account callback URL. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. parameters is the callback URL parameters. +func (client IntegrationAccountsClient) ListCallbackURL(resourceGroupName string, integrationAccountName string, parameters ListCallbackURLParameters) (result CallbackURL, err error) { + req, err := client.ListCallbackURLPreparer(resourceGroupName, integrationAccountName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListCallbackURL", nil, "Failure preparing request") + } + + resp, err := client.ListCallbackURLSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListCallbackURL", resp, "Failure sending request") + } + + result, err = client.ListCallbackURLResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "ListCallbackURL", resp, "Failure responding to request") + } + + return +} + +// ListCallbackURLPreparer prepares the ListCallbackURL request. +func (client IntegrationAccountsClient) ListCallbackURLPreparer(resourceGroupName string, integrationAccountName string, parameters ListCallbackURLParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/listCallbackUrl", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListCallbackURLSender sends the ListCallbackURL request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountsClient) ListCallbackURLSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListCallbackURLResponder handles the response to the ListCallbackURL request. The method always +// closes the http.Response Body. +func (client IntegrationAccountsClient) ListCallbackURLResponder(resp *http.Response) (result CallbackURL, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates an integration account. +// +// resourceGroupName is the resource group name. integrationAccountName is the +// integration account name. integrationAccount is the integration account. +func (client IntegrationAccountsClient) Update(resourceGroupName string, integrationAccountName string, integrationAccount IntegrationAccount) (result IntegrationAccount, err error) { + req, err := client.UpdatePreparer(resourceGroupName, integrationAccountName, integrationAccount) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "logic.IntegrationAccountsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client IntegrationAccountsClient) UpdatePreparer(resourceGroupName string, integrationAccountName string, integrationAccount IntegrationAccount) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "integrationAccountName": autorest.Encode("path", integrationAccountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}", pathParameters), + autorest.WithJSON(integrationAccount), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client IntegrationAccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client IntegrationAccountsClient) UpdateResponder(resp *http.Response) (result IntegrationAccount, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,80 +14,212 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" "net/http" ) -// KeyType enumerates the values for key type. -type KeyType string +// AgreementType enumerates the values for agreement type. +type AgreementType string const ( - // NotSpecified specifies the not specified state for key type. - NotSpecified KeyType = "NotSpecified" - // Primary specifies the primary state for key type. - Primary KeyType = "Primary" - // Secondary specifies the secondary state for key type. - Secondary KeyType = "Secondary" + // AS2 specifies the as2 state for agreement type. + AS2 AgreementType = "AS2" + // Edifact specifies the edifact state for agreement type. + Edifact AgreementType = "Edifact" + // NotSpecified specifies the not specified state for agreement type. + NotSpecified AgreementType = "NotSpecified" + // X12 specifies the x12 state for agreement type. + X12 AgreementType = "X12" ) -// ParameterType enumerates the values for parameter type. -type ParameterType string +// EdifactCharacterSet enumerates the values for edifact character set. +type EdifactCharacterSet string const ( - // ParameterTypeArray specifies the parameter type array state for - // parameter type. - ParameterTypeArray ParameterType = "Array" - // ParameterTypeBool specifies the parameter type bool state for parameter - // type. - ParameterTypeBool ParameterType = "Bool" - // ParameterTypeFloat specifies the parameter type float state for - // parameter type. - ParameterTypeFloat ParameterType = "Float" - // ParameterTypeInt specifies the parameter type int state for parameter + // EdifactCharacterSetKECA specifies the edifact character set keca state + // for edifact character set. + EdifactCharacterSetKECA EdifactCharacterSet = "KECA" + // EdifactCharacterSetNotSpecified specifies the edifact character set not + // specified state for edifact character set. + EdifactCharacterSetNotSpecified EdifactCharacterSet = "NotSpecified" + // EdifactCharacterSetUNOA specifies the edifact character set unoa state + // for edifact character set. + EdifactCharacterSetUNOA EdifactCharacterSet = "UNOA" + // EdifactCharacterSetUNOB specifies the edifact character set unob state + // for edifact character set. + EdifactCharacterSetUNOB EdifactCharacterSet = "UNOB" + // EdifactCharacterSetUNOC specifies the edifact character set unoc state + // for edifact character set. + EdifactCharacterSetUNOC EdifactCharacterSet = "UNOC" + // EdifactCharacterSetUNOD specifies the edifact character set unod state + // for edifact character set. + EdifactCharacterSetUNOD EdifactCharacterSet = "UNOD" + // EdifactCharacterSetUNOE specifies the edifact character set unoe state + // for edifact character set. + EdifactCharacterSetUNOE EdifactCharacterSet = "UNOE" + // EdifactCharacterSetUNOF specifies the edifact character set unof state + // for edifact character set. + EdifactCharacterSetUNOF EdifactCharacterSet = "UNOF" + // EdifactCharacterSetUNOG specifies the edifact character set unog state + // for edifact character set. + EdifactCharacterSetUNOG EdifactCharacterSet = "UNOG" + // EdifactCharacterSetUNOH specifies the edifact character set unoh state + // for edifact character set. + EdifactCharacterSetUNOH EdifactCharacterSet = "UNOH" + // EdifactCharacterSetUNOI specifies the edifact character set unoi state + // for edifact character set. + EdifactCharacterSetUNOI EdifactCharacterSet = "UNOI" + // EdifactCharacterSetUNOJ specifies the edifact character set unoj state + // for edifact character set. + EdifactCharacterSetUNOJ EdifactCharacterSet = "UNOJ" + // EdifactCharacterSetUNOK specifies the edifact character set unok state + // for edifact character set. + EdifactCharacterSetUNOK EdifactCharacterSet = "UNOK" + // EdifactCharacterSetUNOX specifies the edifact character set unox state + // for edifact character set. + EdifactCharacterSetUNOX EdifactCharacterSet = "UNOX" + // EdifactCharacterSetUNOY specifies the edifact character set unoy state + // for edifact character set. + EdifactCharacterSetUNOY EdifactCharacterSet = "UNOY" +) + +// EdifactDecimalIndicator enumerates the values for edifact decimal indicator. +type EdifactDecimalIndicator string + +const ( + // EdifactDecimalIndicatorComma specifies the edifact decimal indicator + // comma state for edifact decimal indicator. + EdifactDecimalIndicatorComma EdifactDecimalIndicator = "Comma" + // EdifactDecimalIndicatorDecimal specifies the edifact decimal indicator + // decimal state for edifact decimal indicator. + EdifactDecimalIndicatorDecimal EdifactDecimalIndicator = "Decimal" + // EdifactDecimalIndicatorNotSpecified specifies the edifact decimal + // indicator not specified state for edifact decimal indicator. + EdifactDecimalIndicatorNotSpecified EdifactDecimalIndicator = "NotSpecified" +) + +// EncryptionAlgorithm enumerates the values for encryption algorithm. +type EncryptionAlgorithm string + +const ( + // EncryptionAlgorithmAES128 specifies the encryption algorithm aes128 + // state for encryption algorithm. + EncryptionAlgorithmAES128 EncryptionAlgorithm = "AES128" + // EncryptionAlgorithmAES192 specifies the encryption algorithm aes192 + // state for encryption algorithm. + EncryptionAlgorithmAES192 EncryptionAlgorithm = "AES192" + // EncryptionAlgorithmAES256 specifies the encryption algorithm aes256 + // state for encryption algorithm. + EncryptionAlgorithmAES256 EncryptionAlgorithm = "AES256" + // EncryptionAlgorithmDES3 specifies the encryption algorithm des3 state + // for encryption algorithm. + EncryptionAlgorithmDES3 EncryptionAlgorithm = "DES3" + // EncryptionAlgorithmNone specifies the encryption algorithm none state + // for encryption algorithm. + EncryptionAlgorithmNone EncryptionAlgorithm = "None" + // EncryptionAlgorithmNotSpecified specifies the encryption algorithm not + // specified state for encryption algorithm. + EncryptionAlgorithmNotSpecified EncryptionAlgorithm = "NotSpecified" + // EncryptionAlgorithmRC2 specifies the encryption algorithm rc2 state for + // encryption algorithm. + EncryptionAlgorithmRC2 EncryptionAlgorithm = "RC2" +) + +// HashingAlgorithm enumerates the values for hashing algorithm. +type HashingAlgorithm string + +const ( + // HashingAlgorithmNone specifies the hashing algorithm none state for + // hashing algorithm. + HashingAlgorithmNone HashingAlgorithm = "None" + // HashingAlgorithmNotSpecified specifies the hashing algorithm not + // specified state for hashing algorithm. + HashingAlgorithmNotSpecified HashingAlgorithm = "NotSpecified" + // HashingAlgorithmSHA2256 specifies the hashing algorithm sha2256 state + // for hashing algorithm. + HashingAlgorithmSHA2256 HashingAlgorithm = "SHA2256" + // HashingAlgorithmSHA2384 specifies the hashing algorithm sha2384 state + // for hashing algorithm. + HashingAlgorithmSHA2384 HashingAlgorithm = "SHA2384" + // HashingAlgorithmSHA2512 specifies the hashing algorithm sha2512 state + // for hashing algorithm. + HashingAlgorithmSHA2512 HashingAlgorithm = "SHA2512" +) + +// MapType enumerates the values for map type. +type MapType string + +const ( + // MapTypeNotSpecified specifies the map type not specified state for map // type. - ParameterTypeInt ParameterType = "Int" - // ParameterTypeNotSpecified specifies the parameter type not specified - // state for parameter type. - ParameterTypeNotSpecified ParameterType = "NotSpecified" - // ParameterTypeObject specifies the parameter type object state for - // parameter type. - ParameterTypeObject ParameterType = "Object" - // ParameterTypeSecureObject specifies the parameter type secure object - // state for parameter type. - ParameterTypeSecureObject ParameterType = "SecureObject" - // ParameterTypeSecureString specifies the parameter type secure string - // state for parameter type. - ParameterTypeSecureString ParameterType = "SecureString" - // ParameterTypeString specifies the parameter type string state for - // parameter type. - ParameterTypeString ParameterType = "String" -) - -// RecurrenceFrequency enumerates the values for recurrence frequency. -type RecurrenceFrequency string - -const ( - // Day specifies the day state for recurrence frequency. - Day RecurrenceFrequency = "Day" - // Hour specifies the hour state for recurrence frequency. - Hour RecurrenceFrequency = "Hour" - // Minute specifies the minute state for recurrence frequency. - Minute RecurrenceFrequency = "Minute" - // Month specifies the month state for recurrence frequency. - Month RecurrenceFrequency = "Month" - // Second specifies the second state for recurrence frequency. - Second RecurrenceFrequency = "Second" - // Week specifies the week state for recurrence frequency. - Week RecurrenceFrequency = "Week" - // Year specifies the year state for recurrence frequency. - Year RecurrenceFrequency = "Year" + MapTypeNotSpecified MapType = "NotSpecified" + // MapTypeXslt specifies the map type xslt state for map type. + MapTypeXslt MapType = "Xslt" +) + +// MessageFilterType enumerates the values for message filter type. +type MessageFilterType string + +const ( + // MessageFilterTypeExclude specifies the message filter type exclude + // state for message filter type. + MessageFilterTypeExclude MessageFilterType = "Exclude" + // MessageFilterTypeInclude specifies the message filter type include + // state for message filter type. + MessageFilterTypeInclude MessageFilterType = "Include" + // MessageFilterTypeNotSpecified specifies the message filter type not + // specified state for message filter type. + MessageFilterTypeNotSpecified MessageFilterType = "NotSpecified" +) + +// PartnerType enumerates the values for partner type. +type PartnerType string + +const ( + // PartnerTypeB2B specifies the partner type b2b state for partner type. + PartnerTypeB2B PartnerType = "B2B" + // PartnerTypeNotSpecified specifies the partner type not specified state + // for partner type. + PartnerTypeNotSpecified PartnerType = "NotSpecified" +) + +// SchemaType enumerates the values for schema type. +type SchemaType string + +const ( + // SchemaTypeNotSpecified specifies the schema type not specified state + // for schema type. + SchemaTypeNotSpecified SchemaType = "NotSpecified" + // SchemaTypeXML specifies the schema type xml state for schema type. + SchemaTypeXML SchemaType = "Xml" +) + +// SegmentTerminatorSuffix enumerates the values for segment terminator suffix. +type SegmentTerminatorSuffix string + +const ( + // SegmentTerminatorSuffixCR specifies the segment terminator suffix cr + // state for segment terminator suffix. + SegmentTerminatorSuffixCR SegmentTerminatorSuffix = "CR" + // SegmentTerminatorSuffixCRLF specifies the segment terminator suffix + // crlf state for segment terminator suffix. + SegmentTerminatorSuffixCRLF SegmentTerminatorSuffix = "CRLF" + // SegmentTerminatorSuffixLF specifies the segment terminator suffix lf + // state for segment terminator suffix. + SegmentTerminatorSuffixLF SegmentTerminatorSuffix = "LF" + // SegmentTerminatorSuffixNone specifies the segment terminator suffix + // none state for segment terminator suffix. + SegmentTerminatorSuffixNone SegmentTerminatorSuffix = "None" + // SegmentTerminatorSuffixNotSpecified specifies the segment terminator + // suffix not specified state for segment terminator suffix. + SegmentTerminatorSuffixNotSpecified SegmentTerminatorSuffix = "NotSpecified" ) // SkuName enumerates the values for sku name. @@ -109,182 +241,445 @@ SkuNameStandard SkuName = "Standard" ) -// WorkflowProvisioningState enumerates the values for workflow provisioning -// state. -type WorkflowProvisioningState string - -const ( - // WorkflowProvisioningStateMoving specifies the workflow provisioning - // state moving state for workflow provisioning state. - WorkflowProvisioningStateMoving WorkflowProvisioningState = "Moving" - // WorkflowProvisioningStateNotSpecified specifies the workflow - // provisioning state not specified state for workflow provisioning state. - WorkflowProvisioningStateNotSpecified WorkflowProvisioningState = "NotSpecified" - // WorkflowProvisioningStateSucceeded specifies the workflow provisioning - // state succeeded state for workflow provisioning state. - WorkflowProvisioningStateSucceeded WorkflowProvisioningState = "Succeeded" -) - -// WorkflowState enumerates the values for workflow state. -type WorkflowState string - -const ( - // WorkflowStateDeleted specifies the workflow state deleted state for - // workflow state. - WorkflowStateDeleted WorkflowState = "Deleted" - // WorkflowStateDisabled specifies the workflow state disabled state for - // workflow state. - WorkflowStateDisabled WorkflowState = "Disabled" - // WorkflowStateEnabled specifies the workflow state enabled state for - // workflow state. - WorkflowStateEnabled WorkflowState = "Enabled" - // WorkflowStateNotSpecified specifies the workflow state not specified - // state for workflow state. - WorkflowStateNotSpecified WorkflowState = "NotSpecified" - // WorkflowStateSuspended specifies the workflow state suspended state for - // workflow state. - WorkflowStateSuspended WorkflowState = "Suspended" -) - -// WorkflowStatus enumerates the values for workflow status. -type WorkflowStatus string - -const ( - // WorkflowStatusCancelled specifies the workflow status cancelled state - // for workflow status. - WorkflowStatusCancelled WorkflowStatus = "Cancelled" - // WorkflowStatusFailed specifies the workflow status failed state for - // workflow status. - WorkflowStatusFailed WorkflowStatus = "Failed" - // WorkflowStatusNotSpecified specifies the workflow status not specified - // state for workflow status. - WorkflowStatusNotSpecified WorkflowStatus = "NotSpecified" - // WorkflowStatusPaused specifies the workflow status paused state for - // workflow status. - WorkflowStatusPaused WorkflowStatus = "Paused" - // WorkflowStatusRunning specifies the workflow status running state for - // workflow status. - WorkflowStatusRunning WorkflowStatus = "Running" - // WorkflowStatusSkipped specifies the workflow status skipped state for - // workflow status. - WorkflowStatusSkipped WorkflowStatus = "Skipped" - // WorkflowStatusSucceeded specifies the workflow status succeeded state - // for workflow status. - WorkflowStatusSucceeded WorkflowStatus = "Succeeded" - // WorkflowStatusSuspended specifies the workflow status suspended state - // for workflow status. - WorkflowStatusSuspended WorkflowStatus = "Suspended" - // WorkflowStatusWaiting specifies the workflow status waiting state for - // workflow status. - WorkflowStatusWaiting WorkflowStatus = "Waiting" -) - -// WorkflowTriggerProvisioningState enumerates the values for workflow trigger -// provisioning state. -type WorkflowTriggerProvisioningState string - -const ( - // WorkflowTriggerProvisioningStateCreating specifies the workflow trigger - // provisioning state creating state for workflow trigger provisioning - // state. - WorkflowTriggerProvisioningStateCreating WorkflowTriggerProvisioningState = "Creating" - // WorkflowTriggerProvisioningStateNotSpecified specifies the workflow - // trigger provisioning state not specified state for workflow trigger - // provisioning state. - WorkflowTriggerProvisioningStateNotSpecified WorkflowTriggerProvisioningState = "NotSpecified" - // WorkflowTriggerProvisioningStateSucceeded specifies the workflow - // trigger provisioning state succeeded state for workflow trigger - // provisioning state. - WorkflowTriggerProvisioningStateSucceeded WorkflowTriggerProvisioningState = "Succeeded" - // WorkflowTriggerProvisioningStateUpdating specifies the workflow trigger - // provisioning state updating state for workflow trigger provisioning - // state. - WorkflowTriggerProvisioningStateUpdating WorkflowTriggerProvisioningState = "Updating" +// TrailingSeparatorPolicy enumerates the values for trailing separator policy. +type TrailingSeparatorPolicy string + +const ( + // TrailingSeparatorPolicyMandatory specifies the trailing separator + // policy mandatory state for trailing separator policy. + TrailingSeparatorPolicyMandatory TrailingSeparatorPolicy = "Mandatory" + // TrailingSeparatorPolicyNotAllowed specifies the trailing separator + // policy not allowed state for trailing separator policy. + TrailingSeparatorPolicyNotAllowed TrailingSeparatorPolicy = "NotAllowed" + // TrailingSeparatorPolicyNotSpecified specifies the trailing separator + // policy not specified state for trailing separator policy. + TrailingSeparatorPolicyNotSpecified TrailingSeparatorPolicy = "NotSpecified" + // TrailingSeparatorPolicyOptional specifies the trailing separator policy + // optional state for trailing separator policy. + TrailingSeparatorPolicyOptional TrailingSeparatorPolicy = "Optional" ) -// ContentHash is -type ContentHash struct { - Algorithm *string `json:"algorithm,omitempty"` - Value *string `json:"value,omitempty"` -} +// UsageIndicator enumerates the values for usage indicator. +type UsageIndicator string -// ContentLink is -type ContentLink struct { - URI *string `json:"uri,omitempty"` - ContentVersion *string `json:"contentVersion,omitempty"` - ContentSize *int32 `json:"contentSize,omitempty"` - ContentHash *ContentHash `json:"contentHash,omitempty"` - Metadata *map[string]interface{} `json:"metadata,omitempty"` -} - -// RegenerateSecretKeyParameters is -type RegenerateSecretKeyParameters struct { - KeyType KeyType `json:"keyType,omitempty"` -} +const ( + // UsageIndicatorInformation specifies the usage indicator information + // state for usage indicator. + UsageIndicatorInformation UsageIndicator = "Information" + // UsageIndicatorNotSpecified specifies the usage indicator not specified + // state for usage indicator. + UsageIndicatorNotSpecified UsageIndicator = "NotSpecified" + // UsageIndicatorProduction specifies the usage indicator production state + // for usage indicator. + UsageIndicatorProduction UsageIndicator = "Production" + // UsageIndicatorTest specifies the usage indicator test state for usage + // indicator. + UsageIndicatorTest UsageIndicator = "Test" +) -// Resource is -type Resource struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` -} +// X12CharacterSet enumerates the values for x12 character set. +type X12CharacterSet string -// ResourceReference is -type ResourceReference struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` -} +const ( + // X12CharacterSetBasic specifies the x12 character set basic state for + // x12 character set. + X12CharacterSetBasic X12CharacterSet = "Basic" + // X12CharacterSetExtended specifies the x12 character set extended state + // for x12 character set. + X12CharacterSetExtended X12CharacterSet = "Extended" + // X12CharacterSetNotSpecified specifies the x12 character set not + // specified state for x12 character set. + X12CharacterSetNotSpecified X12CharacterSet = "NotSpecified" + // X12CharacterSetUTF8 specifies the x12 character set utf8 state for x12 + // character set. + X12CharacterSetUTF8 X12CharacterSet = "UTF8" +) + +// X12DateFormat enumerates the values for x12 date format. +type X12DateFormat string + +const ( + // X12DateFormatCCYYMMDD specifies the x12 date format ccyymmdd state for + // x12 date format. + X12DateFormatCCYYMMDD X12DateFormat = "CCYYMMDD" + // X12DateFormatNotSpecified specifies the x12 date format not specified + // state for x12 date format. + X12DateFormatNotSpecified X12DateFormat = "NotSpecified" + // X12DateFormatYYMMDD specifies the x12 date format yymmdd state for x12 + // date format. + X12DateFormatYYMMDD X12DateFormat = "YYMMDD" +) + +// X12TimeFormat enumerates the values for x12 time format. +type X12TimeFormat string -// RunWorkflowParameters is -type RunWorkflowParameters struct { - Name *string `json:"name,omitempty"` - Outputs *map[string]interface{} `json:"outputs,omitempty"` +const ( + // X12TimeFormatHHMM specifies the x12 time format hhmm state for x12 time + // format. + X12TimeFormatHHMM X12TimeFormat = "HHMM" + // X12TimeFormatHHMMSS specifies the x12 time format hhmmss state for x12 + // time format. + X12TimeFormatHHMMSS X12TimeFormat = "HHMMSS" + // X12TimeFormatHHMMSSd specifies the x12 time format hhmms sd state for + // x12 time format. + X12TimeFormatHHMMSSd X12TimeFormat = "HHMMSSd" + // X12TimeFormatHHMMSSdd specifies the x12 time format hhmms sdd state for + // x12 time format. + X12TimeFormatHHMMSSdd X12TimeFormat = "HHMMSSdd" + // X12TimeFormatNotSpecified specifies the x12 time format not specified + // state for x12 time format. + X12TimeFormatNotSpecified X12TimeFormat = "NotSpecified" +) + +// AgreementContent is +type AgreementContent struct { + AS2 *AS2AgreementContent `json:"AS2,omitempty"` + X12 *X12AgreementContent `json:"X12,omitempty"` + Edifact *EdifactAgreementContent `json:"Edifact,omitempty"` +} + +// AS2AcknowledgementConnectionSettings is +type AS2AcknowledgementConnectionSettings struct { + IgnoreCertificateNameMismatch *bool `json:"ignoreCertificateNameMismatch,omitempty"` + SupportHTTPStatusCodeContinue *bool `json:"supportHttpStatusCodeContinue,omitempty"` + KeepHTTPConnectionAlive *bool `json:"keepHttpConnectionAlive,omitempty"` + UnfoldHTTPHeaders *bool `json:"unfoldHttpHeaders,omitempty"` +} + +// AS2AgreementContent is +type AS2AgreementContent struct { + ReceiveAgreement *AS2OneWayAgreement `json:"receiveAgreement,omitempty"` + SendAgreement *AS2OneWayAgreement `json:"sendAgreement,omitempty"` +} + +// AS2EnvelopeSettings is +type AS2EnvelopeSettings struct { + MessageContentType *string `json:"messageContentType,omitempty"` + TransmitFileNameInMimeHeader *bool `json:"transmitFileNameInMimeHeader,omitempty"` + FileNameTemplate *string `json:"fileNameTemplate,omitempty"` + SuspendMessageOnFileNameGenerationError *bool `json:"SuspendMessageOnFileNameGenerationError,omitempty"` + AutogenerateFileName *bool `json:"AutogenerateFileName,omitempty"` +} + +// AS2ErrorSettings is +type AS2ErrorSettings struct { + SuspendDuplicateMessage *bool `json:"SuspendDuplicateMessage,omitempty"` + ResendIfMdnNotReceived *bool `json:"ResendIfMdnNotReceived,omitempty"` +} + +// AS2MdnSettings is +type AS2MdnSettings struct { + NeedMdn *bool `json:"needMdn,omitempty"` + SignMdn *bool `json:"signMdn,omitempty"` + SendMdnAsynchronously *bool `json:"sendMdnAsynchronously,omitempty"` + ReceiptDeliveryURL *string `json:"receiptDeliveryUrl,omitempty"` + DispositionNotificationTo *string `json:"dispositionNotificationTo,omitempty"` + SignOutboundMdnIfOptional *bool `json:"signOutboundMdnIfOptional,omitempty"` + MdnText *string `json:"mdnText,omitempty"` + SendInboundMdnToMessageBox *bool `json:"sendInboundMdnToMessageBox,omitempty"` + MicHashingAlgorithm HashingAlgorithm `json:"micHashingAlgorithm,omitempty"` +} + +// AS2MessageConnectionSettings is +type AS2MessageConnectionSettings struct { + IgnoreCertificateNameMismatch *bool `json:"ignoreCertificateNameMismatch,omitempty"` + SupportHTTPStatusCodeContinue *bool `json:"supportHttpStatusCodeContinue,omitempty"` + KeepHTTPConnectionAlive *bool `json:"keepHttpConnectionAlive,omitempty"` + UnfoldHTTPHeaders *bool `json:"unfoldHttpHeaders,omitempty"` +} + +// AS2OneWayAgreement is +type AS2OneWayAgreement struct { + SenderBusinessIdentity *BusinessIdentity `json:"senderBusinessIdentity,omitempty"` + ReceiverBusinessIdentity *BusinessIdentity `json:"receiverBusinessIdentity,omitempty"` + ProtocolSettings *AS2ProtocolSettings `json:"protocolSettings,omitempty"` +} + +// AS2ProtocolSettings is +type AS2ProtocolSettings struct { + MessageConnectionSettings *AS2MessageConnectionSettings `json:"messageConnectionSettings,omitempty"` + AcknowledgementConnectionSettings *AS2AcknowledgementConnectionSettings `json:"acknowledgementConnectionSettings,omitempty"` + MdnSettings *AS2MdnSettings `json:"mdnSettings,omitempty"` + SecuritySettings *AS2SecuritySettings `json:"securitySettings,omitempty"` + ValidationSettings *AS2ValidationSettings `json:"validationSettings,omitempty"` + EnvelopeSettings *AS2EnvelopeSettings `json:"envelopeSettings,omitempty"` + ErrorSettings *AS2ErrorSettings `json:"errorSettings,omitempty"` +} + +// AS2SecuritySettings is +type AS2SecuritySettings struct { + OverrideGroupSigningCertificate *bool `json:"overrideGroupSigningCertificate,omitempty"` + SigningCertificateName *string `json:"signingCertificateName,omitempty"` + EncryptionCertificateName *string `json:"encryptionCertificateName,omitempty"` + EnableNrrForInboundEncodedMessages *bool `json:"enableNrrForInboundEncodedMessages,omitempty"` + EnableNrrForInboundDecodedMessages *bool `json:"enableNrrForInboundDecodedMessages,omitempty"` + EnableNrrForOutboundMdn *bool `json:"enableNrrForOutboundMdn,omitempty"` + EnableNrrForOutboundEncodedMessages *bool `json:"enableNrrForOutboundEncodedMessages,omitempty"` + EnableNrrForOutboundDecodedMessages *bool `json:"enableNrrForOutboundDecodedMessages,omitempty"` + EnableNrrForInboundMdn *bool `json:"enableNrrForInboundMdn,omitempty"` +} + +// AS2ValidationSettings is +type AS2ValidationSettings struct { + OverrideMessageProperties *bool `json:"overrideMessageProperties,omitempty"` + EncryptMessage *bool `json:"encryptMessage,omitempty"` + SignMessage *bool `json:"signMessage,omitempty"` + CompressMessage *bool `json:"compressMessage,omitempty"` + CheckDuplicateMessage *bool `json:"checkDuplicateMessage,omitempty"` + InterchangeDuplicatesValidityDays *int32 `json:"interchangeDuplicatesValidityDays,omitempty"` + CheckCertificateRevocationListOnSend *bool `json:"checkCertificateRevocationListOnSend,omitempty"` + CheckCertificateRevocationListOnReceive *bool `json:"checkCertificateRevocationListOnReceive,omitempty"` + EncryptionAlgorithm EncryptionAlgorithm `json:"encryptionAlgorithm,omitempty"` +} + +// B2BPartnerContent is +type B2BPartnerContent struct { + BusinessIdentities *[]BusinessIdentity `json:"businessIdentities,omitempty"` +} + +// BusinessIdentity is +type BusinessIdentity struct { + Qualifier *string `json:"Qualifier,omitempty"` + Value *string `json:"Value,omitempty"` } -// Sku is -type Sku struct { - Name SkuName `json:"name,omitempty"` - Plan *ResourceReference `json:"plan,omitempty"` +// CallbackURL is +type CallbackURL struct { + autorest.Response `json:"-"` + Value *string `json:"value,omitempty"` } -// SubResource is -type SubResource struct { - ID *string `json:"id,omitempty"` +// EdifactAcknowledgementSettings is +type EdifactAcknowledgementSettings struct { + NeedTechnicalAcknowledgement *bool `json:"needTechnicalAcknowledgement,omitempty"` + BatchTechnicalAcknowledgements *bool `json:"batchTechnicalAcknowledgements,omitempty"` + NeedFunctionalAcknowledgement *bool `json:"needFunctionalAcknowledgement,omitempty"` + BatchFunctionalAcknowledgements *bool `json:"batchFunctionalAcknowledgements,omitempty"` + NeedLoopForValidMessages *bool `json:"needLoopForValidMessages,omitempty"` + SendSynchronousAcknowledgement *bool `json:"sendSynchronousAcknowledgement,omitempty"` + AcknowledgementControlNumberPrefix *string `json:"acknowledgementControlNumberPrefix,omitempty"` + AcknowledgementControlNumberSuffix *string `json:"acknowledgementControlNumberSuffix,omitempty"` + AcknowledgementControlNumberLowerBound *int32 `json:"acknowledgementControlNumberLowerBound,omitempty"` + AcknowledgementControlNumberUpperBound *int32 `json:"acknowledgementControlNumberUpperBound,omitempty"` + RolloverAcknowledgementControlNumber *bool `json:"rolloverAcknowledgementControlNumber,omitempty"` +} + +// EdifactAgreementContent is +type EdifactAgreementContent struct { + ReceiveAgreement *EdifactOneWayAgreement `json:"receiveAgreement,omitempty"` + SendAgreement *EdifactOneWayAgreement `json:"sendAgreement,omitempty"` +} + +// EdifactDelimiterOverride is +type EdifactDelimiterOverride struct { + MessageID *string `json:"messageId,omitempty"` + MessageVersion *string `json:"messageVersion,omitempty"` + MessageRelease *string `json:"messageRelease,omitempty"` + DataElementSeparator *int32 `json:"dataElementSeparator,omitempty"` + ComponentSeparator *int32 `json:"componentSeparator,omitempty"` + SegmentTerminator *int32 `json:"segmentTerminator,omitempty"` + RepetitionSeparator *int32 `json:"repetitionSeparator,omitempty"` + SegmentTerminatorSuffix SegmentTerminatorSuffix `json:"segmentTerminatorSuffix,omitempty"` + DecimalPointIndicator EdifactDecimalIndicator `json:"decimalPointIndicator,omitempty"` + ReleaseIndicator *int32 `json:"releaseIndicator,omitempty"` + MessageAssociationAssignedCode *string `json:"messageAssociationAssignedCode,omitempty"` + TargetNamespace *string `json:"targetNamespace,omitempty"` +} + +// EdifactEnvelopeOverride is +type EdifactEnvelopeOverride struct { + MessageID *string `json:"messageId,omitempty"` + MessageVersion *string `json:"messageVersion,omitempty"` + MessageRelease *string `json:"messageRelease,omitempty"` + MessageAssociationAssignedCode *string `json:"messageAssociationAssignedCode,omitempty"` + TargetNamespace *string `json:"targetNamespace,omitempty"` + FunctionalGroupID *string `json:"functionalGroupId,omitempty"` + SenderApplicationQualifier *string `json:"senderApplicationQualifier,omitempty"` + SenderApplicationID *string `json:"senderApplicationId,omitempty"` + ReceiverApplicationQualifier *string `json:"receiverApplicationQualifier,omitempty"` + ReceiverApplicationID *string `json:"receiverApplicationId,omitempty"` + ControllingAgencyCode *string `json:"controllingAgencyCode,omitempty"` + GroupHeaderMessageVersion *string `json:"groupHeaderMessageVersion,omitempty"` + GroupHeaderMessageRelease *string `json:"groupHeaderMessageRelease,omitempty"` + AssociationAssignedCode *string `json:"associationAssignedCode,omitempty"` + ApplicationPassword *string `json:"applicationPassword,omitempty"` +} + +// EdifactEnvelopeSettings is +type EdifactEnvelopeSettings struct { + GroupAssociationAssignedCode *string `json:"groupAssociationAssignedCode,omitempty"` + CommunicationAgreementID *string `json:"communicationAgreementId,omitempty"` + ApplyDelimiterStringAdvice *bool `json:"applyDelimiterStringAdvice,omitempty"` + CreateGroupingSegments *bool `json:"createGroupingSegments,omitempty"` + EnableDefaultGroupHeaders *bool `json:"enableDefaultGroupHeaders,omitempty"` + RecipientReferencePasswordValue *string `json:"recipientReferencePasswordValue,omitempty"` + RecipientReferencePasswordQualifier *string `json:"recipientReferencePasswordQualifier,omitempty"` + ApplicationReferenceID *string `json:"applicationReferenceId,omitempty"` + ProcessingPriorityCode *string `json:"processingPriorityCode,omitempty"` + InterchangeControlNumberLowerBound *int64 `json:"interchangeControlNumberLowerBound,omitempty"` + InterchangeControlNumberUpperBound *int64 `json:"interchangeControlNumberUpperBound,omitempty"` + RolloverInterchangeControlNumber *bool `json:"rolloverInterchangeControlNumber,omitempty"` + InterchangeControlNumberPrefix *string `json:"interchangeControlNumberPrefix,omitempty"` + InterchangeControlNumberSuffix *string `json:"interchangeControlNumberSuffix,omitempty"` + SenderReverseRoutingAddress *string `json:"senderReverseRoutingAddress,omitempty"` + ReceiverReverseRoutingAddress *string `json:"receiverReverseRoutingAddress,omitempty"` + FunctionalGroupID *string `json:"functionalGroupId,omitempty"` + GroupControllingAgencyCode *string `json:"groupControllingAgencyCode,omitempty"` + GroupMessageVersion *string `json:"groupMessageVersion,omitempty"` + GroupMessageRelease *string `json:"groupMessageRelease,omitempty"` + GroupControlNumberLowerBound *int64 `json:"groupControlNumberLowerBound,omitempty"` + GroupControlNumberUpperBound *int64 `json:"groupControlNumberUpperBound,omitempty"` + RolloverGroupControlNumber *bool `json:"rolloverGroupControlNumber,omitempty"` + GroupControlNumberPrefix *string `json:"groupControlNumberPrefix,omitempty"` + GroupControlNumberSuffix *string `json:"groupControlNumberSuffix,omitempty"` + GroupApplicationReceiverQualifier *string `json:"groupApplicationReceiverQualifier,omitempty"` + GroupApplicationReceiverID *string `json:"groupApplicationReceiverId,omitempty"` + GroupApplicationSenderQualifier *string `json:"groupApplicationSenderQualifier,omitempty"` + GroupApplicationSenderID *string `json:"groupApplicationSenderId,omitempty"` + GroupApplicationPassword *string `json:"groupApplicationPassword,omitempty"` + OverwriteExistingTransactionSetControlNumber *bool `json:"overwriteExistingTransactionSetControlNumber,omitempty"` + TransactionSetControlNumberPrefix *string `json:"transactionSetControlNumberPrefix,omitempty"` + TransactionSetControlNumberSuffix *string `json:"transactionSetControlNumberSuffix,omitempty"` + TransactionSetControlNumberLowerBound *int64 `json:"transactionSetControlNumberLowerBound,omitempty"` + TransactionSetControlNumberUpperBound *int64 `json:"transactionSetControlNumberUpperBound,omitempty"` + RolloverTransactionSetControlNumber *bool `json:"rolloverTransactionSetControlNumber,omitempty"` + IsTestInterchange *bool `json:"isTestInterchange,omitempty"` + SenderInternalIdentification *string `json:"senderInternalIdentification,omitempty"` + SenderInternalSubIdentification *string `json:"senderInternalSubIdentification,omitempty"` + ReceiverInternalIdentification *string `json:"receiverInternalIdentification,omitempty"` + ReceiverInternalSubIdentification *string `json:"receiverInternalSubIdentification,omitempty"` +} + +// EdifactFramingSettings is +type EdifactFramingSettings struct { + ServiceCodeListDirectoryVersion *string `json:"serviceCodeListDirectoryVersion,omitempty"` + CharacterEncoding *string `json:"characterEncoding,omitempty"` + ProtocolVersion *int32 `json:"protocolVersion,omitempty"` + DataElementSeparator *int32 `json:"dataElementSeparator,omitempty"` + ComponentSeparator *int32 `json:"componentSeparator,omitempty"` + SegmentTerminator *int32 `json:"segmentTerminator,omitempty"` + ReleaseIndicator *int32 `json:"releaseIndicator,omitempty"` + RepetitionSeparator *int32 `json:"repetitionSeparator,omitempty"` + CharacterSet EdifactCharacterSet `json:"characterSet,omitempty"` + DecimalPointIndicator EdifactDecimalIndicator `json:"decimalPointIndicator,omitempty"` + SegmentTerminatorSuffix SegmentTerminatorSuffix `json:"segmentTerminatorSuffix,omitempty"` +} + +// EdifactMessageFilter is +type EdifactMessageFilter struct { + MessageFilterType MessageFilterType `json:"messageFilterType,omitempty"` +} + +// EdifactMessageIdentifier is +type EdifactMessageIdentifier struct { + MessageID *string `json:"messageId,omitempty"` +} + +// EdifactOneWayAgreement is +type EdifactOneWayAgreement struct { + SenderBusinessIdentity *BusinessIdentity `json:"senderBusinessIdentity,omitempty"` + ReceiverBusinessIdentity *BusinessIdentity `json:"receiverBusinessIdentity,omitempty"` + ProtocolSettings *EdifactProtocolSettings `json:"protocolSettings,omitempty"` +} + +// EdifactProcessingSettings is +type EdifactProcessingSettings struct { + MaskSecurityInfo *bool `json:"maskSecurityInfo,omitempty"` + PreserveInterchange *bool `json:"preserveInterchange,omitempty"` + SuspendInterchangeOnError *bool `json:"suspendInterchangeOnError,omitempty"` + CreateEmptyXMLTagsForTrailingSeparators *bool `json:"createEmptyXmlTagsForTrailingSeparators,omitempty"` + UseDotAsDecimalSeparator *bool `json:"useDotAsDecimalSeparator,omitempty"` +} + +// EdifactProtocolSettings is +type EdifactProtocolSettings struct { + ValidationSettings *EdifactValidationSettings `json:"validationSettings,omitempty"` + FramingSettings *EdifactFramingSettings `json:"framingSettings,omitempty"` + EnvelopeSettings *EdifactEnvelopeSettings `json:"envelopeSettings,omitempty"` + AcknowledgementSettings *EdifactAcknowledgementSettings `json:"acknowledgementSettings,omitempty"` + MessageFilter *EdifactMessageFilter `json:"messageFilter,omitempty"` + ProcessingSettings *EdifactProcessingSettings `json:"processingSettings,omitempty"` + EnvelopeOverrides *[]EdifactEnvelopeOverride `json:"envelopeOverrides,omitempty"` + MessageFilterList *[]EdifactMessageIdentifier `json:"messageFilterList,omitempty"` + SchemaReferences *[]EdifactSchemaReference `json:"schemaReferences,omitempty"` + ValidationOverrides *[]EdifactValidationOverride `json:"validationOverrides,omitempty"` + EdifactDelimiterOverrides *[]EdifactDelimiterOverride `json:"edifactDelimiterOverrides,omitempty"` +} + +// EdifactSchemaReference is +type EdifactSchemaReference struct { + MessageID *string `json:"messageId,omitempty"` + MessageVersion *string `json:"messageVersion,omitempty"` + MessageRelease *string `json:"messageRelease,omitempty"` + SenderApplicationID *string `json:"senderApplicationId,omitempty"` + SenderApplicationQualifier *string `json:"senderApplicationQualifier,omitempty"` + AssociationAssignedCode *string `json:"associationAssignedCode,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` +} + +// EdifactValidationOverride is +type EdifactValidationOverride struct { + MessageID *string `json:"messageId,omitempty"` + EnforceCharacterSet *bool `json:"enforceCharacterSet,omitempty"` + ValidateEDITypes *bool `json:"validateEDITypes,omitempty"` + ValidateXSDTypes *bool `json:"validateXSDTypes,omitempty"` + AllowLeadingAndTrailingSpacesAndZeroes *bool `json:"allowLeadingAndTrailingSpacesAndZeroes,omitempty"` + TrailingSeparatorPolicy TrailingSeparatorPolicy `json:"trailingSeparatorPolicy,omitempty"` + TrimLeadingAndTrailingSpacesAndZeroes *bool `json:"trimLeadingAndTrailingSpacesAndZeroes,omitempty"` +} + +// EdifactValidationSettings is +type EdifactValidationSettings struct { + ValidateCharacterSet *bool `json:"validateCharacterSet,omitempty"` + CheckDuplicateInterchangeControlNumber *bool `json:"checkDuplicateInterchangeControlNumber,omitempty"` + InterchangeControlNumberValidityDays *int32 `json:"interchangeControlNumberValidityDays,omitempty"` + CheckDuplicateGroupControlNumber *bool `json:"checkDuplicateGroupControlNumber,omitempty"` + CheckDuplicateTransactionSetControlNumber *bool `json:"checkDuplicateTransactionSetControlNumber,omitempty"` + ValidateEDITypes *bool `json:"validateEDITypes,omitempty"` + ValidateXSDTypes *bool `json:"validateXSDTypes,omitempty"` + AllowLeadingAndTrailingSpacesAndZeroes *bool `json:"allowLeadingAndTrailingSpacesAndZeroes,omitempty"` + TrimLeadingAndTrailingSpacesAndZeroes *bool `json:"trimLeadingAndTrailingSpacesAndZeroes,omitempty"` + TrailingSeparatorPolicy TrailingSeparatorPolicy `json:"trailingSeparatorPolicy,omitempty"` } -// Workflow is -type Workflow struct { +// IntegrationAccount is +type IntegrationAccount struct { autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` - Properties *WorkflowProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *map[string]interface{} `json:"properties,omitempty"` + Sku *IntegrationAccountSku `json:"sku,omitempty"` } -// WorkflowAccessKey is -type WorkflowAccessKey struct { +// IntegrationAccountAgreement is +type IntegrationAccountAgreement struct { autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Properties *WorkflowAccessKeyProperties `json:"properties,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *IntegrationAccountAgreementProperties `json:"properties,omitempty"` } -// WorkflowAccessKeyListResult is -type WorkflowAccessKeyListResult struct { +// IntegrationAccountAgreementFilter is +type IntegrationAccountAgreementFilter struct { + AgreementType AgreementType `json:"agreementType,omitempty"` +} + +// IntegrationAccountAgreementListResult is +type IntegrationAccountAgreementListResult struct { autorest.Response `json:"-"` - Value *[]WorkflowAccessKey `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` + Value *[]IntegrationAccountAgreement `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` } -// WorkflowAccessKeyListResultPreparer prepares a request to retrieve the next set of results. It returns +// IntegrationAccountAgreementListResultPreparer prepares a request to retrieve the next set of results. It returns // nil if no more results exist. -func (client WorkflowAccessKeyListResult) WorkflowAccessKeyListResultPreparer() (*http.Request, error) { +func (client IntegrationAccountAgreementListResult) IntegrationAccountAgreementListResultPreparer() (*http.Request, error) { if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { return nil, nil } @@ -294,27 +689,40 @@ autorest.WithBaseURL(to.String(client.NextLink))) } -// WorkflowAccessKeyProperties is -type WorkflowAccessKeyProperties struct { - NotBefore *date.Time `json:"notBefore,omitempty"` - NotAfter *date.Time `json:"notAfter,omitempty"` +// IntegrationAccountAgreementProperties is +type IntegrationAccountAgreementProperties struct { + CreatedTime *date.Time `json:"createdTime,omitempty"` + ChangedTime *date.Time `json:"changedTime,omitempty"` + Metadata *map[string]interface{} `json:"metadata,omitempty"` + AgreementType AgreementType `json:"agreementType,omitempty"` + HostPartner *string `json:"hostPartner,omitempty"` + GuestPartner *string `json:"guestPartner,omitempty"` + HostIdentity *BusinessIdentity `json:"hostIdentity,omitempty"` + GuestIdentity *BusinessIdentity `json:"guestIdentity,omitempty"` + Content *AgreementContent `json:"content,omitempty"` } -// WorkflowFilter is -type WorkflowFilter struct { - State WorkflowState `json:"state,omitempty"` +// IntegrationAccountCertificate is +type IntegrationAccountCertificate struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *IntegrationAccountCertificateProperties `json:"properties,omitempty"` } -// WorkflowListResult is -type WorkflowListResult struct { +// IntegrationAccountCertificateListResult is +type IntegrationAccountCertificateListResult struct { autorest.Response `json:"-"` - Value *[]Workflow `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` + Value *[]IntegrationAccountCertificate `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` } -// WorkflowListResultPreparer prepares a request to retrieve the next set of results. It returns +// IntegrationAccountCertificateListResultPreparer prepares a request to retrieve the next set of results. It returns // nil if no more results exist. -func (client WorkflowListResult) WorkflowListResultPreparer() (*http.Request, error) { +func (client IntegrationAccountCertificateListResult) IntegrationAccountCertificateListResultPreparer() (*http.Request, error) { if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { return nil, nil } @@ -324,69 +732,40 @@ autorest.WithBaseURL(to.String(client.NextLink))) } -// WorkflowOutputParameter is -type WorkflowOutputParameter struct { - Type ParameterType `json:"type,omitempty"` - Value *map[string]interface{} `json:"value,omitempty"` - Metadata *map[string]interface{} `json:"metadata,omitempty"` - Error *map[string]interface{} `json:"error,omitempty"` +// IntegrationAccountCertificateProperties is +type IntegrationAccountCertificateProperties struct { + CreatedTime *date.Time `json:"createdTime,omitempty"` + ChangedTime *date.Time `json:"changedTime,omitempty"` + Metadata *map[string]interface{} `json:"metadata,omitempty"` + Key *KeyVaultKeyReference `json:"key,omitempty"` + PublicCertificate *string `json:"publicCertificate,omitempty"` } -// WorkflowParameter is -type WorkflowParameter struct { - Type ParameterType `json:"type,omitempty"` - Value *map[string]interface{} `json:"value,omitempty"` - Metadata *map[string]interface{} `json:"metadata,omitempty"` -} - -// WorkflowProperties is -type WorkflowProperties struct { - ProvisioningState WorkflowProvisioningState `json:"provisioningState,omitempty"` - CreatedTime *date.Time `json:"createdTime,omitempty"` - ChangedTime *date.Time `json:"changedTime,omitempty"` - State WorkflowState `json:"state,omitempty"` - Version *string `json:"version,omitempty"` - AccessEndpoint *string `json:"accessEndpoint,omitempty"` - Sku *Sku `json:"sku,omitempty"` - DefinitionLink *ContentLink `json:"definitionLink,omitempty"` - Definition *map[string]interface{} `json:"definition,omitempty"` - ParametersLink *ContentLink `json:"parametersLink,omitempty"` - Parameters *map[string]*WorkflowParameter `json:"parameters,omitempty"` -} - -// WorkflowRun is -type WorkflowRun struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Properties *WorkflowRunProperties `json:"properties,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` -} - -// WorkflowRunAction is -type WorkflowRunAction struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Properties *WorkflowRunActionProperties `json:"properties,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` +// IntegrationAccountContentHash is +type IntegrationAccountContentHash struct { + Algorithm *string `json:"algorithm,omitempty"` + Value *string `json:"value,omitempty"` } -// WorkflowRunActionFilter is -type WorkflowRunActionFilter struct { - Status WorkflowStatus `json:"status,omitempty"` +// IntegrationAccountContentLink is +type IntegrationAccountContentLink struct { + URI *string `json:"uri,omitempty"` + ContentVersion *string `json:"contentVersion,omitempty"` + ContentSize *int64 `json:"contentSize,omitempty"` + ContentHash *IntegrationAccountContentHash `json:"contentHash,omitempty"` + Metadata *map[string]interface{} `json:"metadata,omitempty"` } -// WorkflowRunActionListResult is -type WorkflowRunActionListResult struct { +// IntegrationAccountListResult is +type IntegrationAccountListResult struct { autorest.Response `json:"-"` - Value *[]WorkflowRunAction `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` + Value *[]IntegrationAccount `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` } -// WorkflowRunActionListResultPreparer prepares a request to retrieve the next set of results. It returns +// IntegrationAccountListResultPreparer prepares a request to retrieve the next set of results. It returns // nil if no more results exist. -func (client WorkflowRunActionListResult) WorkflowRunActionListResultPreparer() (*http.Request, error) { +func (client IntegrationAccountListResult) IntegrationAccountListResultPreparer() (*http.Request, error) { if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { return nil, nil } @@ -396,33 +775,32 @@ autorest.WithBaseURL(to.String(client.NextLink))) } -// WorkflowRunActionProperties is -type WorkflowRunActionProperties struct { - StartTime *date.Time `json:"startTime,omitempty"` - EndTime *date.Time `json:"endTime,omitempty"` - Status WorkflowStatus `json:"status,omitempty"` - Code *string `json:"code,omitempty"` - Error *map[string]interface{} `json:"error,omitempty"` - TrackingID *string `json:"trackingId,omitempty"` - InputsLink *ContentLink `json:"inputsLink,omitempty"` - OutputsLink *ContentLink `json:"outputsLink,omitempty"` +// IntegrationAccountMap is +type IntegrationAccountMap struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *IntegrationAccountMapProperties `json:"properties,omitempty"` } -// WorkflowRunFilter is -type WorkflowRunFilter struct { - Status WorkflowStatus `json:"status,omitempty"` +// IntegrationAccountMapFilter is +type IntegrationAccountMapFilter struct { + SchemaType MapType `json:"schemaType,omitempty"` } -// WorkflowRunListResult is -type WorkflowRunListResult struct { +// IntegrationAccountMapListResult is +type IntegrationAccountMapListResult struct { autorest.Response `json:"-"` - Value *[]WorkflowRun `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` + Value *[]IntegrationAccountMap `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` } -// WorkflowRunListResultPreparer prepares a request to retrieve the next set of results. It returns +// IntegrationAccountMapListResultPreparer prepares a request to retrieve the next set of results. It returns // nil if no more results exist. -func (client WorkflowRunListResult) WorkflowRunListResultPreparer() (*http.Request, error) { +func (client IntegrationAccountMapListResult) IntegrationAccountMapListResultPreparer() (*http.Request, error) { if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { return nil, nil } @@ -432,74 +810,43 @@ autorest.WithBaseURL(to.String(client.NextLink))) } -// WorkflowRunProperties is -type WorkflowRunProperties struct { - StartTime *date.Time `json:"startTime,omitempty"` - EndTime *date.Time `json:"endTime,omitempty"` - Status WorkflowStatus `json:"status,omitempty"` - Code *string `json:"code,omitempty"` - Error *map[string]interface{} `json:"error,omitempty"` - CorrelationID *string `json:"correlationId,omitempty"` - Workflow *ResourceReference `json:"workflow,omitempty"` - Trigger *WorkflowRunTrigger `json:"trigger,omitempty"` - Outputs *map[string]*WorkflowOutputParameter `json:"outputs,omitempty"` -} - -// WorkflowRunTrigger is -type WorkflowRunTrigger struct { - Name *string `json:"name,omitempty"` - Inputs *map[string]interface{} `json:"inputs,omitempty"` - InputsLink *ContentLink `json:"inputsLink,omitempty"` - Outputs *map[string]interface{} `json:"outputs,omitempty"` - OutputsLink *ContentLink `json:"outputsLink,omitempty"` - StartTime *date.Time `json:"startTime,omitempty"` - EndTime *date.Time `json:"endTime,omitempty"` - TrackingID *string `json:"trackingId,omitempty"` - Code *string `json:"code,omitempty"` - Status WorkflowStatus `json:"status,omitempty"` - Error *map[string]interface{} `json:"error,omitempty"` +// IntegrationAccountMapProperties is +type IntegrationAccountMapProperties struct { + MapType MapType `json:"mapType,omitempty"` + CreatedTime *date.Time `json:"createdTime,omitempty"` + ChangedTime *date.Time `json:"changedTime,omitempty"` + Content *map[string]interface{} `json:"content,omitempty"` + ContentType *string `json:"contentType,omitempty"` + ContentLink *IntegrationAccountContentLink `json:"contentLink,omitempty"` + Metadata *map[string]interface{} `json:"metadata,omitempty"` } -// WorkflowSecretKeys is -type WorkflowSecretKeys struct { - autorest.Response `json:"-"` - PrimarySecretKey *string `json:"primarySecretKey,omitempty"` - SecondarySecretKey *string `json:"secondarySecretKey,omitempty"` -} - -// WorkflowTrigger is -type WorkflowTrigger struct { +// IntegrationAccountPartner is +type IntegrationAccountPartner struct { autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Properties *WorkflowTriggerProperties `json:"properties,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *IntegrationAccountPartnerProperties `json:"properties,omitempty"` } -// WorkflowTriggerFilter is -type WorkflowTriggerFilter struct { - State WorkflowState `json:"state,omitempty"` +// IntegrationAccountPartnerFilter is +type IntegrationAccountPartnerFilter struct { + PartnerType PartnerType `json:"partnerType,omitempty"` } -// WorkflowTriggerHistory is -type WorkflowTriggerHistory struct { +// IntegrationAccountPartnerListResult is +type IntegrationAccountPartnerListResult struct { autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Properties *WorkflowTriggerHistoryProperties `json:"properties,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` + Value *[]IntegrationAccountPartner `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` } -// WorkflowTriggerHistoryListResult is -type WorkflowTriggerHistoryListResult struct { - autorest.Response `json:"-"` - Value *[]WorkflowTriggerHistory `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` -} - -// WorkflowTriggerHistoryListResultPreparer prepares a request to retrieve the next set of results. It returns +// IntegrationAccountPartnerListResultPreparer prepares a request to retrieve the next set of results. It returns // nil if no more results exist. -func (client WorkflowTriggerHistoryListResult) WorkflowTriggerHistoryListResultPreparer() (*http.Request, error) { +func (client IntegrationAccountPartnerListResult) IntegrationAccountPartnerListResultPreparer() (*http.Request, error) { if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { return nil, nil } @@ -509,30 +856,50 @@ autorest.WithBaseURL(to.String(client.NextLink))) } -// WorkflowTriggerHistoryProperties is -type WorkflowTriggerHistoryProperties struct { - StartTime *date.Time `json:"startTime,omitempty"` - EndTime *date.Time `json:"endTime,omitempty"` - Status WorkflowStatus `json:"status,omitempty"` - Code *string `json:"code,omitempty"` - Error *map[string]interface{} `json:"error,omitempty"` - TrackingID *string `json:"trackingId,omitempty"` - InputsLink *ContentLink `json:"inputsLink,omitempty"` - OutputsLink *ContentLink `json:"outputsLink,omitempty"` - Fired *bool `json:"fired,omitempty"` - Run *ResourceReference `json:"run,omitempty"` +// IntegrationAccountPartnerProperties is +type IntegrationAccountPartnerProperties struct { + PartnerType PartnerType `json:"partnerType,omitempty"` + CreatedTime *date.Time `json:"createdTime,omitempty"` + ChangedTime *date.Time `json:"changedTime,omitempty"` + Metadata *map[string]interface{} `json:"metadata,omitempty"` + Content *PartnerContent `json:"content,omitempty"` +} + +// IntegrationAccountResource is +type IntegrationAccountResource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// IntegrationAccountSchema is +type IntegrationAccountSchema struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *IntegrationAccountSchemaProperties `json:"properties,omitempty"` +} + +// IntegrationAccountSchemaFilter is +type IntegrationAccountSchemaFilter struct { + SchemaType SchemaType `json:"schemaType,omitempty"` } -// WorkflowTriggerListResult is -type WorkflowTriggerListResult struct { +// IntegrationAccountSchemaListResult is +type IntegrationAccountSchemaListResult struct { autorest.Response `json:"-"` - Value *[]WorkflowTrigger `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` + Value *[]IntegrationAccountSchema `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` } -// WorkflowTriggerListResultPreparer prepares a request to retrieve the next set of results. It returns +// IntegrationAccountSchemaListResultPreparer prepares a request to retrieve the next set of results. It returns // nil if no more results exist. -func (client WorkflowTriggerListResult) WorkflowTriggerListResultPreparer() (*http.Request, error) { +func (client IntegrationAccountSchemaListResult) IntegrationAccountSchemaListResultPreparer() (*http.Request, error) { if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { return nil, nil } @@ -542,48 +909,218 @@ autorest.WithBaseURL(to.String(client.NextLink))) } -// WorkflowTriggerProperties is -type WorkflowTriggerProperties struct { - ProvisioningState WorkflowTriggerProvisioningState `json:"provisioningState,omitempty"` - CreatedTime *date.Time `json:"createdTime,omitempty"` - ChangedTime *date.Time `json:"changedTime,omitempty"` - State WorkflowState `json:"state,omitempty"` - Status WorkflowStatus `json:"status,omitempty"` - LastExecutionTime *date.Time `json:"lastExecutionTime,omitempty"` - NextExecutionTime *date.Time `json:"nextExecutionTime,omitempty"` - Recurrence *WorkflowTriggerRecurrence `json:"recurrence,omitempty"` - Workflow *ResourceReference `json:"workflow,omitempty"` -} - -// WorkflowTriggerRecurrence is -type WorkflowTriggerRecurrence struct { - Frequency RecurrenceFrequency `json:"frequency,omitempty"` - Interval *int `json:"interval,omitempty"` - StartTime *date.Time `json:"startTime,omitempty"` - TimeZone *string `json:"timeZone,omitempty"` -} - -// WorkflowVersion is -type WorkflowVersion struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` - Properties *WorkflowVersionProperties `json:"properties,omitempty"` -} - -// WorkflowVersionProperties is -type WorkflowVersionProperties struct { - CreatedTime *date.Time `json:"createdTime,omitempty"` - ChangedTime *date.Time `json:"changedTime,omitempty"` - State WorkflowState `json:"state,omitempty"` - Version *string `json:"version,omitempty"` - AccessEndpoint *string `json:"accessEndpoint,omitempty"` - Sku *Sku `json:"sku,omitempty"` - DefinitionLink *ContentLink `json:"definitionLink,omitempty"` - Definition *map[string]interface{} `json:"definition,omitempty"` - ParametersLink *ContentLink `json:"parametersLink,omitempty"` - Parameters *map[string]*WorkflowParameter `json:"parameters,omitempty"` +// IntegrationAccountSchemaProperties is +type IntegrationAccountSchemaProperties struct { + SchemaType SchemaType `json:"schemaType,omitempty"` + TargetNamespace *string `json:"targetNamespace,omitempty"` + CreatedTime *date.Time `json:"createdTime,omitempty"` + ChangedTime *date.Time `json:"changedTime,omitempty"` + Content *map[string]interface{} `json:"content,omitempty"` + ContentType *string `json:"contentType,omitempty"` + ContentLink *IntegrationAccountContentLink `json:"contentLink,omitempty"` + Metadata *map[string]interface{} `json:"metadata,omitempty"` +} + +// IntegrationAccountSku is +type IntegrationAccountSku struct { + Name SkuName `json:"name,omitempty"` +} + +// KeyVaultKeyReference is +type KeyVaultKeyReference struct { + KeyVault *ResourceReference `json:"keyVault,omitempty"` + KeyName *string `json:"keyName,omitempty"` + KeyVersion *string `json:"keyVersion,omitempty"` +} + +// ListCallbackURLParameters is +type ListCallbackURLParameters struct { + NotAfter *date.Time `json:"NotAfter,omitempty"` +} + +// PartnerContent is +type PartnerContent struct { + B2b *B2BPartnerContent `json:"b2b,omitempty"` +} + +// ResourceReference is +type ResourceReference struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// X12AcknowledgementSettings is +type X12AcknowledgementSettings struct { + NeedTechnicalAcknowledgement *bool `json:"needTechnicalAcknowledgement,omitempty"` + BatchTechnicalAcknowledgements *bool `json:"batchTechnicalAcknowledgements,omitempty"` + NeedFunctionalAcknowledgement *bool `json:"needFunctionalAcknowledgement,omitempty"` + FunctionalAcknowledgementVersion *string `json:"functionalAcknowledgementVersion,omitempty"` + BatchFunctionalAcknowledgements *bool `json:"batchFunctionalAcknowledgements,omitempty"` + NeedImplementationAcknowledgement *bool `json:"needImplementationAcknowledgement,omitempty"` + ImplementationAcknowledgementVersion *string `json:"implementationAcknowledgementVersion,omitempty"` + BatchImplementationAcknowledgements *bool `json:"batchImplementationAcknowledgements,omitempty"` + NeedLoopForValidMessages *bool `json:"needLoopForValidMessages,omitempty"` + SendSynchronousAcknowledgement *bool `json:"sendSynchronousAcknowledgement,omitempty"` + AcknowledgementControlNumberPrefix *string `json:"acknowledgementControlNumberPrefix,omitempty"` + AcknowledgementControlNumberSuffix *string `json:"acknowledgementControlNumberSuffix,omitempty"` + AcknowledgementControlNumberLowerBound *int32 `json:"acknowledgementControlNumberLowerBound,omitempty"` + AcknowledgementControlNumberUpperBound *int32 `json:"acknowledgementControlNumberUpperBound,omitempty"` + RolloverAcknowledgementControlNumber *bool `json:"rolloverAcknowledgementControlNumber,omitempty"` +} + +// X12AgreementContent is +type X12AgreementContent struct { + ReceiveAgreement *X12OneWayAgreement `json:"receiveAgreement,omitempty"` + SendAgreement *X12OneWayAgreement `json:"sendAgreement,omitempty"` +} + +// X12DelimiterOverrides is +type X12DelimiterOverrides struct { + ProtocolVersion *string `json:"protocolVersion,omitempty"` + MessageID *string `json:"messageId,omitempty"` + DataElementSeparator *int32 `json:"dataElementSeparator,omitempty"` + ComponentSeparator *int32 `json:"componentSeparator,omitempty"` + SegmentTerminator *int32 `json:"segmentTerminator,omitempty"` + SegmentTerminatorSuffix SegmentTerminatorSuffix `json:"segmentTerminatorSuffix,omitempty"` + ReplaceCharacter *int32 `json:"replaceCharacter,omitempty"` + ReplaceSeparatorsInPayload *bool `json:"replaceSeparatorsInPayload,omitempty"` + TargetNamespace *string `json:"targetNamespace,omitempty"` +} + +// X12EnvelopeOverride is +type X12EnvelopeOverride struct { + TargetNamespace *string `json:"targetNamespace,omitempty"` + ProtocolVersion *string `json:"protocolVersion,omitempty"` + MessageID *string `json:"messageId,omitempty"` + ResponsibleAgencyCode *int32 `json:"responsibleAgencyCode,omitempty"` + HeaderVersion *string `json:"headerVersion,omitempty"` + SenderApplicationID *string `json:"senderApplicationId,omitempty"` + ReceiverApplicationID *string `json:"receiverApplicationId,omitempty"` + FunctionalIdentifierCode *string `json:"functionalIdentifierCode,omitempty"` + DateFormat X12DateFormat `json:"dateFormat,omitempty"` + TimeFormat X12TimeFormat `json:"timeFormat,omitempty"` +} + +// X12EnvelopeSettings is +type X12EnvelopeSettings struct { + ControlStandardsID *int32 `json:"controlStandardsId,omitempty"` + UseControlStandardsIDAsRepetitionCharacter *bool `json:"useControlStandardsIdAsRepetitionCharacter,omitempty"` + SenderApplicationID *string `json:"senderApplicationId,omitempty"` + ReceiverApplicationID *string `json:"receiverApplicationId,omitempty"` + ControlVersionNumber *string `json:"controlVersionNumber,omitempty"` + InterchangeControlNumberLowerBound *int32 `json:"interchangeControlNumberLowerBound,omitempty"` + InterchangeControlNumberUpperBound *int32 `json:"interchangeControlNumberUpperBound,omitempty"` + RolloverInterchangeControlNumber *bool `json:"rolloverInterchangeControlNumber,omitempty"` + EnableDefaultGroupHeaders *bool `json:"enableDefaultGroupHeaders,omitempty"` + FunctionalGroupID *string `json:"functionalGroupId,omitempty"` + GroupControlNumberLowerBound *int32 `json:"groupControlNumberLowerBound,omitempty"` + GroupControlNumberUpperBound *int32 `json:"groupControlNumberUpperBound,omitempty"` + RolloverGroupControlNumber *bool `json:"rolloverGroupControlNumber,omitempty"` + GroupHeaderAgencyCode *string `json:"groupHeaderAgencyCode,omitempty"` + GroupHeaderVersion *string `json:"groupHeaderVersion,omitempty"` + TransactionSetControlNumberLowerBound *int32 `json:"transactionSetControlNumberLowerBound,omitempty"` + TransactionSetControlNumberUpperBound *int32 `json:"transactionSetControlNumberUpperBound,omitempty"` + RolloverTransactionSetControlNumber *bool `json:"rolloverTransactionSetControlNumber,omitempty"` + TransactionSetControlNumberPrefix *string `json:"transactionSetControlNumberPrefix,omitempty"` + TransactionSetControlNumberSuffix *string `json:"transactionSetControlNumberSuffix,omitempty"` + OverwriteExistingTransactionSetControlNumber *bool `json:"overwriteExistingTransactionSetControlNumber,omitempty"` + GroupHeaderDateFormat X12DateFormat `json:"groupHeaderDateFormat,omitempty"` + GroupHeaderTimeFormat X12TimeFormat `json:"groupHeaderTimeFormat,omitempty"` + UsageIndicator UsageIndicator `json:"usageIndicator,omitempty"` +} + +// X12FramingSettings is +type X12FramingSettings struct { + DataElementSeparator *int32 `json:"dataElementSeparator,omitempty"` + ComponentSeparator *int32 `json:"componentSeparator,omitempty"` + ReplaceSeparatorsInPayload *bool `json:"replaceSeparatorsInPayload,omitempty"` + ReplaceCharacter *int32 `json:"replaceCharacter,omitempty"` + SegmentTerminator *int32 `json:"segmentTerminator,omitempty"` + CharacterSet X12CharacterSet `json:"characterSet,omitempty"` + SegmentTerminatorSuffix SegmentTerminatorSuffix `json:"segmentTerminatorSuffix,omitempty"` +} + +// X12MessageFilter is +type X12MessageFilter struct { + MessageFilterType MessageFilterType `json:"messageFilterType,omitempty"` +} + +// X12MessageIdentifier is +type X12MessageIdentifier struct { + MessageID *string `json:"messageId,omitempty"` +} + +// X12OneWayAgreement is +type X12OneWayAgreement struct { + SenderBusinessIdentity *BusinessIdentity `json:"senderBusinessIdentity,omitempty"` + ReceiverBusinessIdentity *BusinessIdentity `json:"receiverBusinessIdentity,omitempty"` + ProtocolSettings *X12ProtocolSettings `json:"protocolSettings,omitempty"` +} + +// X12ProcessingSettings is +type X12ProcessingSettings struct { + MaskSecurityInfo *bool `json:"maskSecurityInfo,omitempty"` + ConvertImpliedDecimal *bool `json:"convertImpliedDecimal,omitempty"` + PreserveInterchange *bool `json:"preserveInterchange,omitempty"` + SuspendInterchangeOnError *bool `json:"suspendInterchangeOnError,omitempty"` + CreateEmptyXMLTagsForTrailingSeparators *bool `json:"createEmptyXmlTagsForTrailingSeparators,omitempty"` + UseDotAsDecimalSeparator *bool `json:"useDotAsDecimalSeparator,omitempty"` +} + +// X12ProtocolSettings is +type X12ProtocolSettings struct { + ValidationSettings *X12ValidationSettings `json:"validationSettings,omitempty"` + FramingSettings *X12FramingSettings `json:"framingSettings,omitempty"` + EnvelopeSettings *X12EnvelopeSettings `json:"envelopeSettings,omitempty"` + AcknowledgementSettings *X12AcknowledgementSettings `json:"acknowledgementSettings,omitempty"` + MessageFilter *X12MessageFilter `json:"messageFilter,omitempty"` + SecuritySettings *X12SecuritySettings `json:"securitySettings,omitempty"` + ProcessingSettings *X12ProcessingSettings `json:"processingSettings,omitempty"` + EnvelopeOverrides *[]X12EnvelopeOverride `json:"envelopeOverrides,omitempty"` + ValidationOverrides *[]X12ValidationOverride `json:"validationOverrides,omitempty"` + MessageFilterList *[]X12MessageIdentifier `json:"messageFilterList,omitempty"` + SchemaReferences *[]X12SchemaReference `json:"schemaReferences,omitempty"` + X12DelimiterOverrides *[]X12DelimiterOverrides `json:"x12DelimiterOverrides,omitempty"` +} + +// X12SchemaReference is +type X12SchemaReference struct { + MessageID *string `json:"messageId,omitempty"` + SenderApplicationID *string `json:"senderApplicationId,omitempty"` + SchemaVersion *string `json:"schemaVersion,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` +} + +// X12SecuritySettings is +type X12SecuritySettings struct { + AuthorizationQualifier *string `json:"authorizationQualifier,omitempty"` + AuthorizationValue *string `json:"authorizationValue,omitempty"` + SecurityQualifier *string `json:"securityQualifier,omitempty"` + PasswordValue *string `json:"passwordValue,omitempty"` +} + +// X12ValidationOverride is +type X12ValidationOverride struct { + MessageID *string `json:"messageId,omitempty"` + ValidateEDITypes *bool `json:"validateEDITypes,omitempty"` + ValidateXSDTypes *bool `json:"validateXSDTypes,omitempty"` + AllowLeadingAndTrailingSpacesAndZeroes *bool `json:"allowLeadingAndTrailingSpacesAndZeroes,omitempty"` + ValidateCharacterSet *bool `json:"validateCharacterSet,omitempty"` + TrimLeadingAndTrailingSpacesAndZeroes *bool `json:"trimLeadingAndTrailingSpacesAndZeroes,omitempty"` + TrailingSeparatorPolicy TrailingSeparatorPolicy `json:"trailingSeparatorPolicy,omitempty"` +} + +// X12ValidationSettings is +type X12ValidationSettings struct { + ValidateCharacterSet *bool `json:"validateCharacterSet,omitempty"` + CheckDuplicateInterchangeControlNumber *bool `json:"checkDuplicateInterchangeControlNumber,omitempty"` + InterchangeControlNumberValidityDays *int32 `json:"interchangeControlNumberValidityDays,omitempty"` + CheckDuplicateGroupControlNumber *bool `json:"checkDuplicateGroupControlNumber,omitempty"` + CheckDuplicateTransactionSetControlNumber *bool `json:"checkDuplicateTransactionSetControlNumber,omitempty"` + ValidateEDITypes *bool `json:"validateEDITypes,omitempty"` + ValidateXSDTypes *bool `json:"validateXSDTypes,omitempty"` + AllowLeadingAndTrailingSpacesAndZeroes *bool `json:"allowLeadingAndTrailingSpacesAndZeroes,omitempty"` + TrimLeadingAndTrailingSpacesAndZeroes *bool `json:"trimLeadingAndTrailingSpacesAndZeroes,omitempty"` + TrailingSeparatorPolicy TrailingSeparatorPolicy `json:"trailingSeparatorPolicy,omitempty"` } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -23,18 +23,18 @@ ) const ( - major = "0" - minor = "3" + major = "3" + minor = "1" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" ) // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "logic", "2015-02-01-preview") + return fmt.Sprintf(userAgentFormat, Version(), "logic", "2015-08-01-preview") } // Version returns the semantic version (see http://semver.org) of the client. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowaccesskeys.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowaccesskeys.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowaccesskeys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowaccesskeys.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,462 +0,0 @@ -package logic - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// WorkflowAccessKeysClient is the client for the WorkflowAccessKeys methods -// of the Logic service. -type WorkflowAccessKeysClient struct { - ManagementClient -} - -// NewWorkflowAccessKeysClient creates an instance of the -// WorkflowAccessKeysClient client. -func NewWorkflowAccessKeysClient(subscriptionID string) WorkflowAccessKeysClient { - return NewWorkflowAccessKeysClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWorkflowAccessKeysClientWithBaseURI creates an instance of the -// WorkflowAccessKeysClient client. -func NewWorkflowAccessKeysClientWithBaseURI(baseURI string, subscriptionID string) WorkflowAccessKeysClient { - return WorkflowAccessKeysClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates a workflow access key. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. accessKeyName is the workflow access key name. workflowAccesskey is -// the workflow access key. -func (client WorkflowAccessKeysClient) CreateOrUpdate(resourceGroupName string, workflowName string, accessKeyName string, workflowAccesskey WorkflowAccessKey) (result WorkflowAccessKey, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, workflowName, accessKeyName, workflowAccesskey) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "CreateOrUpdate", "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "CreateOrUpdate", "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "CreateOrUpdate", "Failure responding to request") - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client WorkflowAccessKeysClient) CreateOrUpdatePreparer(resourceGroupName string, workflowName string, accessKeyName string, workflowAccesskey WorkflowAccessKey) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accessKeyName": url.QueryEscape(accessKeyName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys/{accessKeyName}"), - autorest.WithJSON(workflowAccesskey), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowAccessKeysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusCreated) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client WorkflowAccessKeysClient) CreateOrUpdateResponder(resp *http.Response) (result WorkflowAccessKey, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes a workflow access key. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. accessKeyName is the workflow access key name. -func (client WorkflowAccessKeysClient) Delete(resourceGroupName string, workflowName string, accessKeyName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, workflowName, accessKeyName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Delete", "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Delete", "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Delete", "Failure responding to request") - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client WorkflowAccessKeysClient) DeletePreparer(resourceGroupName string, workflowName string, accessKeyName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accessKeyName": url.QueryEscape(accessKeyName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys/{accessKeyName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowAccessKeysClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client WorkflowAccessKeysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets a workflow access key. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. accessKeyName is the workflow access key name. -func (client WorkflowAccessKeysClient) Get(resourceGroupName string, workflowName string, accessKeyName string) (result WorkflowAccessKey, ae error) { - req, err := client.GetPreparer(resourceGroupName, workflowName, accessKeyName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client WorkflowAccessKeysClient) GetPreparer(resourceGroupName string, workflowName string, accessKeyName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accessKeyName": url.QueryEscape(accessKeyName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys/{accessKeyName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowAccessKeysClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client WorkflowAccessKeysClient) GetResponder(resp *http.Response) (result WorkflowAccessKey, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of workflow access keys. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. top is the number of items to be included in the result. -func (client WorkflowAccessKeysClient) List(resourceGroupName string, workflowName string, top *int) (result WorkflowAccessKeyListResult, ae error) { - req, err := client.ListPreparer(resourceGroupName, workflowName, top) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client WorkflowAccessKeysClient) ListPreparer(resourceGroupName string, workflowName string, top *int) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = top - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowAccessKeysClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client WorkflowAccessKeysClient) ListResponder(resp *http.Response) (result WorkflowAccessKeyListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client WorkflowAccessKeysClient) ListNextResults(lastResults WorkflowAccessKeyListResult) (result WorkflowAccessKeyListResult, ae error) { - req, err := lastResults.WorkflowAccessKeyListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure responding to next results request request") - } - - return -} - -// ListSecretKeys lists secret keys. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. accessKeyName is the workflow access key name. -func (client WorkflowAccessKeysClient) ListSecretKeys(resourceGroupName string, workflowName string, accessKeyName string) (result WorkflowSecretKeys, ae error) { - req, err := client.ListSecretKeysPreparer(resourceGroupName, workflowName, accessKeyName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "ListSecretKeys", "Failure preparing request") - } - - resp, err := client.ListSecretKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "ListSecretKeys", "Failure sending request") - } - - result, err = client.ListSecretKeysResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "ListSecretKeys", "Failure responding to request") - } - - return -} - -// ListSecretKeysPreparer prepares the ListSecretKeys request. -func (client WorkflowAccessKeysClient) ListSecretKeysPreparer(resourceGroupName string, workflowName string, accessKeyName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accessKeyName": url.QueryEscape(accessKeyName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys/{accessKeyName}/list"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSecretKeysSender sends the ListSecretKeys request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowAccessKeysClient) ListSecretKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListSecretKeysResponder handles the response to the ListSecretKeys request. The method always -// closes the http.Response Body. -func (client WorkflowAccessKeysClient) ListSecretKeysResponder(resp *http.Response) (result WorkflowSecretKeys, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RegenerateSecretKey regenerates secret key. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. accessKeyName is the workflow access key name. parameters is the -// parameters. -func (client WorkflowAccessKeysClient) RegenerateSecretKey(resourceGroupName string, workflowName string, accessKeyName string, parameters RegenerateSecretKeyParameters) (result WorkflowSecretKeys, ae error) { - req, err := client.RegenerateSecretKeyPreparer(resourceGroupName, workflowName, accessKeyName, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "RegenerateSecretKey", "Failure preparing request") - } - - resp, err := client.RegenerateSecretKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "RegenerateSecretKey", "Failure sending request") - } - - result, err = client.RegenerateSecretKeyResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "RegenerateSecretKey", "Failure responding to request") - } - - return -} - -// RegenerateSecretKeyPreparer prepares the RegenerateSecretKey request. -func (client WorkflowAccessKeysClient) RegenerateSecretKeyPreparer(resourceGroupName string, workflowName string, accessKeyName string, parameters RegenerateSecretKeyParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "accessKeyName": url.QueryEscape(accessKeyName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys/{accessKeyName}/regenerate"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// RegenerateSecretKeySender sends the RegenerateSecretKey request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowAccessKeysClient) RegenerateSecretKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// RegenerateSecretKeyResponder handles the response to the RegenerateSecretKey request. The method always -// closes the http.Response Body. -func (client WorkflowAccessKeysClient) RegenerateSecretKeyResponder(resp *http.Response) (result WorkflowSecretKeys, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowrunactions.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowrunactions.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowrunactions.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowrunactions.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,206 +0,0 @@ -package logic - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// WorkflowRunActionsClient is the client for the WorkflowRunActions methods -// of the Logic service. -type WorkflowRunActionsClient struct { - ManagementClient -} - -// NewWorkflowRunActionsClient creates an instance of the -// WorkflowRunActionsClient client. -func NewWorkflowRunActionsClient(subscriptionID string) WorkflowRunActionsClient { - return NewWorkflowRunActionsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWorkflowRunActionsClientWithBaseURI creates an instance of the -// WorkflowRunActionsClient client. -func NewWorkflowRunActionsClientWithBaseURI(baseURI string, subscriptionID string) WorkflowRunActionsClient { - return WorkflowRunActionsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get gets a workflow run action. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. runName is the workflow run name. actionName is the workflow action -// name. -func (client WorkflowRunActionsClient) Get(resourceGroupName string, workflowName string, runName string, actionName string) (result WorkflowRunAction, ae error) { - req, err := client.GetPreparer(resourceGroupName, workflowName, runName, actionName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client WorkflowRunActionsClient) GetPreparer(resourceGroupName string, workflowName string, runName string, actionName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "actionName": url.QueryEscape(actionName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "runName": url.QueryEscape(runName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}/actions/{actionName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowRunActionsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client WorkflowRunActionsClient) GetResponder(resp *http.Response) (result WorkflowRunAction, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of workflow run actions. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. runName is the workflow run name. top is the number of items to be -// included in the result. filter is the filter to apply on the operation. -func (client WorkflowRunActionsClient) List(resourceGroupName string, workflowName string, runName string, top *int, filter string) (result WorkflowRunActionListResult, ae error) { - req, err := client.ListPreparer(resourceGroupName, workflowName, runName, top, filter) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client WorkflowRunActionsClient) ListPreparer(resourceGroupName string, workflowName string, runName string, top *int, filter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "runName": url.QueryEscape(runName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = top - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}/actions"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowRunActionsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client WorkflowRunActionsClient) ListResponder(resp *http.Response) (result WorkflowRunActionListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client WorkflowRunActionsClient) ListNextResults(lastResults WorkflowRunActionListResult) (result WorkflowRunActionListResult, ae error) { - req, err := lastResults.WorkflowRunActionListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure responding to next results request request") - } - - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowruns.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowruns.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowruns.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowruns.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,266 +0,0 @@ -package logic - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// WorkflowRunsClient is the client for the WorkflowRuns methods of the Logic -// service. -type WorkflowRunsClient struct { - ManagementClient -} - -// NewWorkflowRunsClient creates an instance of the WorkflowRunsClient client. -func NewWorkflowRunsClient(subscriptionID string) WorkflowRunsClient { - return NewWorkflowRunsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWorkflowRunsClientWithBaseURI creates an instance of the -// WorkflowRunsClient client. -func NewWorkflowRunsClientWithBaseURI(baseURI string, subscriptionID string) WorkflowRunsClient { - return WorkflowRunsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Cancel cancels a workflow run. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. runName is the workflow run name. -func (client WorkflowRunsClient) Cancel(resourceGroupName string, workflowName string, runName string) (result autorest.Response, ae error) { - req, err := client.CancelPreparer(resourceGroupName, workflowName, runName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Cancel", "Failure preparing request") - } - - resp, err := client.CancelSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Cancel", "Failure sending request") - } - - result, err = client.CancelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Cancel", "Failure responding to request") - } - - return -} - -// CancelPreparer prepares the Cancel request. -func (client WorkflowRunsClient) CancelPreparer(resourceGroupName string, workflowName string, runName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "runName": url.QueryEscape(runName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}/cancel"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CancelSender sends the Cancel request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowRunsClient) CancelSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// CancelResponder handles the response to the Cancel request. The method always -// closes the http.Response Body. -func (client WorkflowRunsClient) CancelResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets a workflow run. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. runName is the workflow run name. -func (client WorkflowRunsClient) Get(resourceGroupName string, workflowName string, runName string) (result WorkflowRun, ae error) { - req, err := client.GetPreparer(resourceGroupName, workflowName, runName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client WorkflowRunsClient) GetPreparer(resourceGroupName string, workflowName string, runName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "runName": url.QueryEscape(runName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowRunsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client WorkflowRunsClient) GetResponder(resp *http.Response) (result WorkflowRun, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of workflow runs. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. top is the number of items to be included in the result. filter is -// the filter to apply on the operation. -func (client WorkflowRunsClient) List(resourceGroupName string, workflowName string, top *int, filter string) (result WorkflowRunListResult, ae error) { - req, err := client.ListPreparer(resourceGroupName, workflowName, top, filter) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client WorkflowRunsClient) ListPreparer(resourceGroupName string, workflowName string, top *int, filter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = top - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowRunsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client WorkflowRunsClient) ListResponder(resp *http.Response) (result WorkflowRunListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client WorkflowRunsClient) ListNextResults(lastResults WorkflowRunListResult) (result WorkflowRunListResult, ae error) { - req, err := lastResults.WorkflowRunListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure responding to next results request request") - } - - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflows.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflows.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflows.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflows.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,739 +0,0 @@ -package logic - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// WorkflowsClient is the client for the Workflows methods of the Logic -// service. -type WorkflowsClient struct { - ManagementClient -} - -// NewWorkflowsClient creates an instance of the WorkflowsClient client. -func NewWorkflowsClient(subscriptionID string) WorkflowsClient { - return NewWorkflowsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWorkflowsClientWithBaseURI creates an instance of the WorkflowsClient -// client. -func NewWorkflowsClientWithBaseURI(baseURI string, subscriptionID string) WorkflowsClient { - return WorkflowsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates a workflow. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. workflow is the workflow. -func (client WorkflowsClient) CreateOrUpdate(resourceGroupName string, workflowName string, workflow Workflow) (result Workflow, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, workflowName, workflow) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "CreateOrUpdate", "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "CreateOrUpdate", "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "CreateOrUpdate", "Failure responding to request") - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client WorkflowsClient) CreateOrUpdatePreparer(resourceGroupName string, workflowName string, workflow Workflow) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}"), - autorest.WithJSON(workflow), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusCreated) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client WorkflowsClient) CreateOrUpdateResponder(resp *http.Response) (result Workflow, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes a workflow. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. -func (client WorkflowsClient) Delete(resourceGroupName string, workflowName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, workflowName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Delete", "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Delete", "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Delete", "Failure responding to request") - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client WorkflowsClient) DeletePreparer(resourceGroupName string, workflowName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client WorkflowsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Disable disables a workflow. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. -func (client WorkflowsClient) Disable(resourceGroupName string, workflowName string) (result autorest.Response, ae error) { - req, err := client.DisablePreparer(resourceGroupName, workflowName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Disable", "Failure preparing request") - } - - resp, err := client.DisableSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Disable", "Failure sending request") - } - - result, err = client.DisableResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Disable", "Failure responding to request") - } - - return -} - -// DisablePreparer prepares the Disable request. -func (client WorkflowsClient) DisablePreparer(resourceGroupName string, workflowName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/disable"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DisableSender sends the Disable request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowsClient) DisableSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// DisableResponder handles the response to the Disable request. The method always -// closes the http.Response Body. -func (client WorkflowsClient) DisableResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Enable enables a workflow. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. -func (client WorkflowsClient) Enable(resourceGroupName string, workflowName string) (result autorest.Response, ae error) { - req, err := client.EnablePreparer(resourceGroupName, workflowName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Enable", "Failure preparing request") - } - - resp, err := client.EnableSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Enable", "Failure sending request") - } - - result, err = client.EnableResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Enable", "Failure responding to request") - } - - return -} - -// EnablePreparer prepares the Enable request. -func (client WorkflowsClient) EnablePreparer(resourceGroupName string, workflowName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/enable"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// EnableSender sends the Enable request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowsClient) EnableSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// EnableResponder handles the response to the Enable request. The method always -// closes the http.Response Body. -func (client WorkflowsClient) EnableResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets a workflow. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. -func (client WorkflowsClient) Get(resourceGroupName string, workflowName string) (result Workflow, ae error) { - req, err := client.GetPreparer(resourceGroupName, workflowName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client WorkflowsClient) GetPreparer(resourceGroupName string, workflowName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client WorkflowsClient) GetResponder(resp *http.Response) (result Workflow, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByResourceGroup gets a list of workflows by resource group. -// -// resourceGroupName is the resource group name. top is the number of items to -// be included in the result. filter is the filter to apply on the operation. -func (client WorkflowsClient) ListByResourceGroup(resourceGroupName string, top *int, filter string) (result WorkflowListResult, ae error) { - req, err := client.ListByResourceGroupPreparer(resourceGroupName, top, filter) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure preparing request") - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure sending request") - } - - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure responding to request") - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client WorkflowsClient) ListByResourceGroupPreparer(resourceGroupName string, top *int, filter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = top - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client WorkflowsClient) ListByResourceGroupResponder(resp *http.Response) (result WorkflowListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByResourceGroupNextResults retrieves the next set of results, if any. -func (client WorkflowsClient) ListByResourceGroupNextResults(lastResults WorkflowListResult) (result WorkflowListResult, ae error) { - req, err := lastResults.WorkflowListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure sending next results request request") - } - - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure responding to next results request request") - } - - return -} - -// ListBySubscription gets a list of workflows by subscription. -// -// top is the number of items to be included in the result. filter is the -// filter to apply on the operation. -func (client WorkflowsClient) ListBySubscription(top *int, filter string) (result WorkflowListResult, ae error) { - req, err := client.ListBySubscriptionPreparer(top, filter) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure preparing request") - } - - resp, err := client.ListBySubscriptionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure sending request") - } - - result, err = client.ListBySubscriptionResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure responding to request") - } - - return -} - -// ListBySubscriptionPreparer prepares the ListBySubscription request. -func (client WorkflowsClient) ListBySubscriptionPreparer(top *int, filter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = top - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Logic/workflows"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListBySubscriptionSender sends the ListBySubscription request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always -// closes the http.Response Body. -func (client WorkflowsClient) ListBySubscriptionResponder(resp *http.Response) (result WorkflowListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListBySubscriptionNextResults retrieves the next set of results, if any. -func (client WorkflowsClient) ListBySubscriptionNextResults(lastResults WorkflowListResult) (result WorkflowListResult, ae error) { - req, err := lastResults.WorkflowListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListBySubscriptionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure sending next results request request") - } - - result, err = client.ListBySubscriptionResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure responding to next results request request") - } - - return -} - -// Run runs a workflow. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. parameters is the parameters. -func (client WorkflowsClient) Run(resourceGroupName string, workflowName string, parameters RunWorkflowParameters) (result WorkflowRun, ae error) { - req, err := client.RunPreparer(resourceGroupName, workflowName, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Run", "Failure preparing request") - } - - resp, err := client.RunSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Run", "Failure sending request") - } - - result, err = client.RunResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Run", "Failure responding to request") - } - - return -} - -// RunPreparer prepares the Run request. -func (client WorkflowsClient) RunPreparer(resourceGroupName string, workflowName string, parameters RunWorkflowParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/run"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// RunSender sends the Run request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowsClient) RunSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) -} - -// RunResponder handles the response to the Run request. The method always -// closes the http.Response Body. -func (client WorkflowsClient) RunResponder(resp *http.Response) (result WorkflowRun, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Update updates a workflow. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. workflow is the workflow. -func (client WorkflowsClient) Update(resourceGroupName string, workflowName string, workflow Workflow) (result Workflow, ae error) { - req, err := client.UpdatePreparer(resourceGroupName, workflowName, workflow) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Update", "Failure preparing request") - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Update", "Failure sending request") - } - - result, err = client.UpdateResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Update", "Failure responding to request") - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client WorkflowsClient) UpdatePreparer(resourceGroupName string, workflowName string, workflow Workflow) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}"), - autorest.WithJSON(workflow), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client WorkflowsClient) UpdateResponder(resp *http.Response) (result Workflow, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Validate validates a workflow. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. workflow is the workflow. -func (client WorkflowsClient) Validate(resourceGroupName string, workflowName string, workflow Workflow) (result autorest.Response, ae error) { - req, err := client.ValidatePreparer(resourceGroupName, workflowName, workflow) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Validate", "Failure preparing request") - } - - resp, err := client.ValidateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Validate", "Failure sending request") - } - - result, err = client.ValidateResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Validate", "Failure responding to request") - } - - return -} - -// ValidatePreparer prepares the Validate request. -func (client WorkflowsClient) ValidatePreparer(resourceGroupName string, workflowName string, workflow Workflow) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/validate"), - autorest.WithJSON(workflow), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ValidateSender sends the Validate request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowsClient) ValidateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ValidateResponder handles the response to the Validate request. The method always -// closes the http.Response Body. -func (client WorkflowsClient) ValidateResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggerhistories.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggerhistories.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggerhistories.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggerhistories.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,203 +0,0 @@ -package logic - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// WorkflowTriggerHistoriesClient is the client for the -// WorkflowTriggerHistories methods of the Logic service. -type WorkflowTriggerHistoriesClient struct { - ManagementClient -} - -// NewWorkflowTriggerHistoriesClient creates an instance of the -// WorkflowTriggerHistoriesClient client. -func NewWorkflowTriggerHistoriesClient(subscriptionID string) WorkflowTriggerHistoriesClient { - return NewWorkflowTriggerHistoriesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWorkflowTriggerHistoriesClientWithBaseURI creates an instance of the -// WorkflowTriggerHistoriesClient client. -func NewWorkflowTriggerHistoriesClientWithBaseURI(baseURI string, subscriptionID string) WorkflowTriggerHistoriesClient { - return WorkflowTriggerHistoriesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get gets a workflow trigger history. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. triggerName is the workflow trigger name. historyName is the -// workflow trigger history name. -func (client WorkflowTriggerHistoriesClient) Get(resourceGroupName string, workflowName string, triggerName string, historyName string) (result WorkflowTriggerHistory, ae error) { - req, err := client.GetPreparer(resourceGroupName, workflowName, triggerName, historyName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client WorkflowTriggerHistoriesClient) GetPreparer(resourceGroupName string, workflowName string, triggerName string, historyName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "historyName": url.QueryEscape(historyName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "triggerName": url.QueryEscape(triggerName), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/histories/{historyName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowTriggerHistoriesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client WorkflowTriggerHistoriesClient) GetResponder(resp *http.Response) (result WorkflowTriggerHistory, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of workflow trigger histories. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. triggerName is the workflow trigger name. top is the number of items -// to be included in the result. -func (client WorkflowTriggerHistoriesClient) List(resourceGroupName string, workflowName string, triggerName string, top *int) (result WorkflowTriggerHistoryListResult, ae error) { - req, err := client.ListPreparer(resourceGroupName, workflowName, triggerName, top) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client WorkflowTriggerHistoriesClient) ListPreparer(resourceGroupName string, workflowName string, triggerName string, top *int) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "triggerName": url.QueryEscape(triggerName), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = top - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/histories"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowTriggerHistoriesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client WorkflowTriggerHistoriesClient) ListResponder(resp *http.Response) (result WorkflowTriggerHistoryListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client WorkflowTriggerHistoriesClient) ListNextResults(lastResults WorkflowTriggerHistoryListResult) (result WorkflowTriggerHistoryListResult, ae error) { - req, err := lastResults.WorkflowTriggerHistoryListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure responding to next results request request") - } - - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggers.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggers.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggers.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,267 +0,0 @@ -package logic - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// WorkflowTriggersClient is the client for the WorkflowTriggers methods of -// the Logic service. -type WorkflowTriggersClient struct { - ManagementClient -} - -// NewWorkflowTriggersClient creates an instance of the WorkflowTriggersClient -// client. -func NewWorkflowTriggersClient(subscriptionID string) WorkflowTriggersClient { - return NewWorkflowTriggersClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWorkflowTriggersClientWithBaseURI creates an instance of the -// WorkflowTriggersClient client. -func NewWorkflowTriggersClientWithBaseURI(baseURI string, subscriptionID string) WorkflowTriggersClient { - return WorkflowTriggersClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get gets a workflow trigger. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. triggerName is the workflow trigger name. -func (client WorkflowTriggersClient) Get(resourceGroupName string, workflowName string, triggerName string) (result WorkflowTrigger, ae error) { - req, err := client.GetPreparer(resourceGroupName, workflowName, triggerName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client WorkflowTriggersClient) GetPreparer(resourceGroupName string, workflowName string, triggerName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "triggerName": url.QueryEscape(triggerName), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowTriggersClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client WorkflowTriggersClient) GetResponder(resp *http.Response) (result WorkflowTrigger, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of workflow triggers. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. top is the number of items to be included in the result. filter is -// the filter to apply on the operation. -func (client WorkflowTriggersClient) List(resourceGroupName string, workflowName string, top *int, filter string) (result WorkflowTriggerListResult, ae error) { - req, err := client.ListPreparer(resourceGroupName, workflowName, top, filter) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client WorkflowTriggersClient) ListPreparer(resourceGroupName string, workflowName string, top *int, filter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = top - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowTriggersClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client WorkflowTriggersClient) ListResponder(resp *http.Response) (result WorkflowTriggerListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client WorkflowTriggersClient) ListNextResults(lastResults WorkflowTriggerListResult) (result WorkflowTriggerListResult, ae error) { - req, err := lastResults.WorkflowTriggerListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure responding to next results request request") - } - - return -} - -// Run runs a workflow trigger. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. triggerName is the workflow trigger name. -func (client WorkflowTriggersClient) Run(resourceGroupName string, workflowName string, triggerName string) (result autorest.Response, ae error) { - req, err := client.RunPreparer(resourceGroupName, workflowName, triggerName) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Run", "Failure preparing request") - } - - resp, err := client.RunSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Run", "Failure sending request") - } - - result, err = client.RunResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Run", "Failure responding to request") - } - - return -} - -// RunPreparer prepares the Run request. -func (client WorkflowTriggersClient) RunPreparer(resourceGroupName string, workflowName string, triggerName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "triggerName": url.QueryEscape(triggerName), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/run"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// RunSender sends the Run request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowTriggersClient) RunSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// RunResponder handles the response to the Run request. The method always -// closes the http.Response Body. -func (client WorkflowTriggersClient) RunResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowversions.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowversions.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowversions.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowversions.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,108 +0,0 @@ -package logic - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// WorkflowVersionsClient is the client for the WorkflowVersions methods of -// the Logic service. -type WorkflowVersionsClient struct { - ManagementClient -} - -// NewWorkflowVersionsClient creates an instance of the WorkflowVersionsClient -// client. -func NewWorkflowVersionsClient(subscriptionID string) WorkflowVersionsClient { - return NewWorkflowVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWorkflowVersionsClientWithBaseURI creates an instance of the -// WorkflowVersionsClient client. -func NewWorkflowVersionsClientWithBaseURI(baseURI string, subscriptionID string) WorkflowVersionsClient { - return WorkflowVersionsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get gets a workflow version. -// -// resourceGroupName is the resource group name. workflowName is the workflow -// name. versionID is the workflow versionId. -func (client WorkflowVersionsClient) Get(resourceGroupName string, workflowName string, versionID string) (result WorkflowVersion, ae error) { - req, err := client.GetPreparer(resourceGroupName, workflowName, versionID) - if err != nil { - return result, autorest.NewErrorWithError(err, "logic/WorkflowVersionsClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "logic/WorkflowVersionsClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "logic/WorkflowVersionsClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client WorkflowVersionsClient) GetPreparer(resourceGroupName string, workflowName string, versionID string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "versionId": url.QueryEscape(versionID), - "workflowName": url.QueryEscape(workflowName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/versions/{versionId}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client WorkflowVersionsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client WorkflowVersionsClient) GetResponder(resp *http.Response) (result WorkflowVersion, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,63 @@ +// Package machinelearning implements the Azure ARM Machinelearning service +// API version 2016-05-01-preview. +// +// These APIs allow end users to operate on Azure Machine Learning Web +// Services resources. They support the following operations:
  • Create +// or update a web service
  • Get a web service
  • Patch a web +// service
  • Delete a web service
  • Get All Web Services in a +// Resource Group
  • Get All Web Services in a Subscription
  • Get +// Web Services Keys
+package machinelearning + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Machinelearning + APIVersion = "2016-05-01-preview" + + // DefaultBaseURI is the default URI used for the service Machinelearning + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Machinelearning. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,403 @@ +package machinelearning + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" +) + +// AssetType enumerates the values for asset type. +type AssetType string + +const ( + // AssetTypeModule specifies the asset type module state for asset type. + AssetTypeModule AssetType = "Module" + // AssetTypeResource specifies the asset type resource state for asset + // type. + AssetTypeResource AssetType = "Resource" +) + +// ColumnFormat enumerates the values for column format. +type ColumnFormat string + +const ( + // Byte specifies the byte state for column format. + Byte ColumnFormat = "Byte" + // Char specifies the char state for column format. + Char ColumnFormat = "Char" + // Datetime specifies the datetime state for column format. + Datetime ColumnFormat = "Datetime" + // Double specifies the double state for column format. + Double ColumnFormat = "Double" + // Duration specifies the duration state for column format. + Duration ColumnFormat = "Duration" + // Float specifies the float state for column format. + Float ColumnFormat = "Float" + // Int16 specifies the int 16 state for column format. + Int16 ColumnFormat = "Int16" + // Int32 specifies the int 32 state for column format. + Int32 ColumnFormat = "Int32" + // Int64 specifies the int 64 state for column format. + Int64 ColumnFormat = "Int64" + // Int8 specifies the int 8 state for column format. + Int8 ColumnFormat = "Int8" + // Uint16 specifies the uint 16 state for column format. + Uint16 ColumnFormat = "Uint16" + // Uint32 specifies the uint 32 state for column format. + Uint32 ColumnFormat = "Uint32" + // Uint64 specifies the uint 64 state for column format. + Uint64 ColumnFormat = "Uint64" + // Uint8 specifies the uint 8 state for column format. + Uint8 ColumnFormat = "Uint8" +) + +// ColumnType enumerates the values for column type. +type ColumnType string + +const ( + // Boolean specifies the boolean state for column type. + Boolean ColumnType = "Boolean" + // Integer specifies the integer state for column type. + Integer ColumnType = "Integer" + // Number specifies the number state for column type. + Number ColumnType = "Number" + // String specifies the string state for column type. + String ColumnType = "String" +) + +// DiagnosticsLevel enumerates the values for diagnostics level. +type DiagnosticsLevel string + +const ( + // All specifies the all state for diagnostics level. + All DiagnosticsLevel = "All" + // Error specifies the error state for diagnostics level. + Error DiagnosticsLevel = "Error" + // None specifies the none state for diagnostics level. + None DiagnosticsLevel = "None" +) + +// InputPortType enumerates the values for input port type. +type InputPortType string + +const ( + // Dataset specifies the dataset state for input port type. + Dataset InputPortType = "Dataset" +) + +// OutputPortType enumerates the values for output port type. +type OutputPortType string + +const ( + // OutputPortTypeDataset specifies the output port type dataset state for + // output port type. + OutputPortTypeDataset OutputPortType = "Dataset" +) + +// ParameterType enumerates the values for parameter type. +type ParameterType string + +const ( + // ParameterTypeBoolean specifies the parameter type boolean state for + // parameter type. + ParameterTypeBoolean ParameterType = "Boolean" + // ParameterTypeColumnPicker specifies the parameter type column picker + // state for parameter type. + ParameterTypeColumnPicker ParameterType = "ColumnPicker" + // ParameterTypeCredential specifies the parameter type credential state + // for parameter type. + ParameterTypeCredential ParameterType = "Credential" + // ParameterTypeDataGatewayName specifies the parameter type data gateway + // name state for parameter type. + ParameterTypeDataGatewayName ParameterType = "DataGatewayName" + // ParameterTypeDouble specifies the parameter type double state for + // parameter type. + ParameterTypeDouble ParameterType = "Double" + // ParameterTypeEnumerated specifies the parameter type enumerated state + // for parameter type. + ParameterTypeEnumerated ParameterType = "Enumerated" + // ParameterTypeFloat specifies the parameter type float state for + // parameter type. + ParameterTypeFloat ParameterType = "Float" + // ParameterTypeInt specifies the parameter type int state for parameter + // type. + ParameterTypeInt ParameterType = "Int" + // ParameterTypeMode specifies the parameter type mode state for parameter + // type. + ParameterTypeMode ParameterType = "Mode" + // ParameterTypeParameterRange specifies the parameter type parameter + // range state for parameter type. + ParameterTypeParameterRange ParameterType = "ParameterRange" + // ParameterTypeScript specifies the parameter type script state for + // parameter type. + ParameterTypeScript ParameterType = "Script" + // ParameterTypeString specifies the parameter type string state for + // parameter type. + ParameterTypeString ParameterType = "String" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Canceled specifies the canceled state for provisioning state. + Canceled ProvisioningState = "Canceled" + // Failed specifies the failed state for provisioning state. + Failed ProvisioningState = "Failed" + // Provisioning specifies the provisioning state for provisioning state. + Provisioning ProvisioningState = "Provisioning" + // Succeeded specifies the succeeded state for provisioning state. + Succeeded ProvisioningState = "Succeeded" + // Unknown specifies the unknown state for provisioning state. + Unknown ProvisioningState = "Unknown" +) + +// AssetItem is information about an asset associated with the web service. +type AssetItem struct { + Name *string `json:"name,omitempty"` + ID *string `json:"id,omitempty"` + Type AssetType `json:"type,omitempty"` + LocationInfo *AssetLocation `json:"locationInfo,omitempty"` + InputPorts *map[string]*InputPort `json:"inputPorts,omitempty"` + OutputPorts *map[string]*OutputPort `json:"outputPorts,omitempty"` + Metadata *map[string]*string `json:"metadata,omitempty"` + Parameters *[]ModuleAssetParameter `json:"parameters,omitempty"` +} + +// AssetLocation is describes the access location for a web service asset. +type AssetLocation struct { + URI *string `json:"uri,omitempty"` + Credentials *string `json:"credentials,omitempty"` +} + +// ColumnSpecification is swagger 2.0 schema for a column within the data +// table representing a web service input or output. See Swagger +// specification: http://swagger.io/specification/ +type ColumnSpecification struct { + Type ColumnType `json:"type,omitempty"` + Format ColumnFormat `json:"format,omitempty"` + Enum *[]map[string]interface{} `json:"enum,omitempty"` + XMsIsnullable *bool `json:"x-ms-isnullable,omitempty"` + XMsIsordered *bool `json:"x-ms-isordered,omitempty"` +} + +// CommitmentPlan is information about the machine learning commitment plan +// associated with the web service. +type CommitmentPlan struct { + ID *string `json:"id,omitempty"` +} + +// DiagnosticsConfiguration is diagnostics settings for an Azure ML web +// service. +type DiagnosticsConfiguration struct { + Level DiagnosticsLevel `json:"level,omitempty"` + Expiry *date.Time `json:"expiry,omitempty"` +} + +// ExampleRequest is sample input data for the service's input(s). +type ExampleRequest struct { + Inputs *map[string][][]map[string]interface{} `json:"inputs,omitempty"` + GlobalParameters *map[string]map[string]interface{} `json:"globalParameters,omitempty"` +} + +// GraphEdge is defines an edge within the web service's graph. +type GraphEdge struct { + SourceNodeID *string `json:"sourceNodeId,omitempty"` + SourcePortID *string `json:"sourcePortId,omitempty"` + TargetNodeID *string `json:"targetNodeId,omitempty"` + TargetPortID *string `json:"targetPortId,omitempty"` +} + +// GraphNode is specifies a node in the web service graph. The node can either +// be an input, output or asset node, so only one of the corresponding id +// properties is populated at any given time. +type GraphNode struct { + AssetID *string `json:"assetId,omitempty"` + InputID *string `json:"inputId,omitempty"` + OutputID *string `json:"outputId,omitempty"` + Parameters *map[string]*string `json:"parameters,omitempty"` +} + +// GraphPackage is defines the graph of modules making up the machine learning +// solution. +type GraphPackage struct { + Nodes *map[string]*GraphNode `json:"nodes,omitempty"` + Edges *[]GraphEdge `json:"edges,omitempty"` + GraphParameters *map[string]*GraphParameter `json:"graphParameters,omitempty"` +} + +// GraphParameter is defines a global parameter in the graph. +type GraphParameter struct { + Description *string `json:"description,omitempty"` + Type ParameterType `json:"type,omitempty"` + Links *[]GraphParameterLink `json:"links,omitempty"` +} + +// GraphParameterLink is association link for a graph global parameter to a +// node in the graph. +type GraphParameterLink struct { + NodeID *string `json:"nodeId,omitempty"` + ParameterKey *string `json:"parameterKey,omitempty"` +} + +// InputPort is asset input port +type InputPort struct { + Type InputPortType `json:"type,omitempty"` +} + +// ModeValueInfo is nested parameter definition. +type ModeValueInfo struct { + InterfaceString *string `json:"interfaceString,omitempty"` + Parameters *[]ModuleAssetParameter `json:"parameters,omitempty"` +} + +// ModuleAssetParameter is parameter definition for a module asset. +type ModuleAssetParameter struct { + Name *string `json:"name,omitempty"` + ParameterType *string `json:"parameterType,omitempty"` + ModeValuesInfo *map[string]*ModeValueInfo `json:"modeValuesInfo,omitempty"` +} + +// OutputPort is asset output port +type OutputPort struct { + Type OutputPortType `json:"type,omitempty"` +} + +// PaginatedWebServicesList is paginated list of web services. +type PaginatedWebServicesList struct { + autorest.Response `json:"-"` + Value *[]WebService `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// RealtimeConfiguration is holds the available configuration options for an +// Azure ML web service endpoint. +type RealtimeConfiguration struct { + MaxConcurrentCalls *int32 `json:"maxConcurrentCalls,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ServiceInputOutputSpecification is the swagger 2.0 schema describing the +// service's inputs or outputs. See Swagger specification: +// http://swagger.io/specification/ +type ServiceInputOutputSpecification struct { + Title *string `json:"title,omitempty"` + Description *string `json:"description,omitempty"` + Type *string `json:"type,omitempty"` + Properties *map[string]*TableSpecification `json:"properties,omitempty"` +} + +// StorageAccount is access information for a storage account. +type StorageAccount struct { + Name *string `json:"name,omitempty"` + Key *string `json:"key,omitempty"` +} + +// TableSpecification is the swagger 2.0 schema describing a single service +// input or output. See Swagger specification: +// http://swagger.io/specification/ +type TableSpecification struct { + Title *string `json:"title,omitempty"` + Description *string `json:"description,omitempty"` + Type *string `json:"type,omitempty"` + Format *string `json:"format,omitempty"` + Properties *map[string]*ColumnSpecification `json:"properties,omitempty"` +} + +// WebService is instance of an Azure ML web service resource. +type WebService struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *WebServiceProperties `json:"properties,omitempty"` +} + +// WebServiceKeys is access keys for the web service calls. +type WebServiceKeys struct { + autorest.Response `json:"-"` + Primary *string `json:"primary,omitempty"` + Secondary *string `json:"secondary,omitempty"` +} + +// WebServiceProperties is the set of properties specific to the Azure ML web +// service resource. +type WebServiceProperties struct { + Title *string `json:"title,omitempty"` + Description *string `json:"description,omitempty"` + CreatedOn *date.Time `json:"createdOn,omitempty"` + ModifiedOn *date.Time `json:"modifiedOn,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + Keys *WebServiceKeys `json:"keys,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + SwaggerLocation *string `json:"swaggerLocation,omitempty"` + ExposeSampleData *bool `json:"exposeSampleData,omitempty"` + RealtimeConfiguration *RealtimeConfiguration `json:"realtimeConfiguration,omitempty"` + Diagnostics *DiagnosticsConfiguration `json:"diagnostics,omitempty"` + StorageAccount *StorageAccount `json:"storageAccount,omitempty"` + MachineLearningWorkspace *Workspace `json:"machineLearningWorkspace,omitempty"` + CommitmentPlan *CommitmentPlan `json:"commitmentPlan,omitempty"` + Input *ServiceInputOutputSpecification `json:"input,omitempty"` + Output *ServiceInputOutputSpecification `json:"output,omitempty"` + ExampleRequest *ExampleRequest `json:"exampleRequest,omitempty"` + Assets *map[string]*AssetItem `json:"assets,omitempty"` + Parameters *map[string]*string `json:"parameters,omitempty"` +} + +// WebServicePropertiesForGraph is properties specific to a Graph based web +// service. +type WebServicePropertiesForGraph struct { + Title *string `json:"title,omitempty"` + Description *string `json:"description,omitempty"` + CreatedOn *date.Time `json:"createdOn,omitempty"` + ModifiedOn *date.Time `json:"modifiedOn,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + Keys *WebServiceKeys `json:"keys,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + SwaggerLocation *string `json:"swaggerLocation,omitempty"` + ExposeSampleData *bool `json:"exposeSampleData,omitempty"` + RealtimeConfiguration *RealtimeConfiguration `json:"realtimeConfiguration,omitempty"` + Diagnostics *DiagnosticsConfiguration `json:"diagnostics,omitempty"` + StorageAccount *StorageAccount `json:"storageAccount,omitempty"` + MachineLearningWorkspace *Workspace `json:"machineLearningWorkspace,omitempty"` + CommitmentPlan *CommitmentPlan `json:"commitmentPlan,omitempty"` + Input *ServiceInputOutputSpecification `json:"input,omitempty"` + Output *ServiceInputOutputSpecification `json:"output,omitempty"` + ExampleRequest *ExampleRequest `json:"exampleRequest,omitempty"` + Assets *map[string]*AssetItem `json:"assets,omitempty"` + Parameters *map[string]*string `json:"parameters,omitempty"` + Package *GraphPackage `json:"package,omitempty"` +} + +// Workspace is information about the machine learning workspace containing +// the experiment that is source for the web service. +type Workspace struct { + ID *string `json:"id,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package machinelearning + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "machinelearning", "2016-05-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/webservices.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/webservices.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/webservices.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/machinelearning/webservices.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,509 @@ +package machinelearning + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// WebServicesClient is the these APIs allow end users to operate on Azure +// Machine Learning Web Services resources. They support the following +// operations:
  • Create or update a web service
  • Get a web +// service
  • Patch a web service
  • Delete a web +// service
  • Get All Web Services in a Resource Group
  • Get All +// Web Services in a Subscription
  • Get Web Services Keys
+type WebServicesClient struct { + ManagementClient +} + +// NewWebServicesClient creates an instance of the WebServicesClient client. +func NewWebServicesClient(subscriptionID string) WebServicesClient { + return NewWebServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWebServicesClientWithBaseURI creates an instance of the +// WebServicesClient client. +func NewWebServicesClientWithBaseURI(baseURI string, subscriptionID string) WebServicesClient { + return WebServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a new Azure ML web service or update an +// existing one. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// createOrUpdatePayload is the payload to create or update the Azure ML web +// service. resourceGroupName is name of the resource group. webServiceName +// is the Azure ML web service name which you want to reach. +func (client WebServicesClient) CreateOrUpdate(createOrUpdatePayload WebService, resourceGroupName string, webServiceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(createOrUpdatePayload, resourceGroupName, webServiceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client WebServicesClient) CreateOrUpdatePreparer(createOrUpdatePayload WebService, resourceGroupName string, webServiceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webServiceName": autorest.Encode("path", webServiceName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices/{webServiceName}", pathParameters), + autorest.WithJSON(createOrUpdatePayload), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client WebServicesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client WebServicesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieve an Azure ML web service definition by its subscription, +// resource group and name. +// +// resourceGroupName is name of the resource group. webServiceName is the +// Azure ML web service name which you want to reach. +func (client WebServicesClient) Get(resourceGroupName string, webServiceName string) (result WebService, err error) { + req, err := client.GetPreparer(resourceGroupName, webServiceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WebServicesClient) GetPreparer(resourceGroupName string, webServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webServiceName": autorest.Encode("path", webServiceName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices/{webServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WebServicesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WebServicesClient) GetResponder(resp *http.Response) (result WebService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List retrieve all Azure ML web services in the current Azure subscription. +// +// skiptoken is continuation token for pagination. +func (client WebServicesClient) List(skiptoken string) (result PaginatedWebServicesList, err error) { + req, err := client.ListPreparer(skiptoken) + if err != nil { + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client WebServicesClient) ListPreparer(skiptoken string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(skiptoken) > 0 { + queryParameters["$skiptoken"] = autorest.Encode("query", skiptoken) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearning/webServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client WebServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client WebServicesClient) ListResponder(resp *http.Response) (result PaginatedWebServicesList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListInResourceGroup retrieve all Azure ML web services in a given resource +// group. +// +// resourceGroupName is name of the resource group. skiptoken is continuation +// token for pagination. +func (client WebServicesClient) ListInResourceGroup(resourceGroupName string, skiptoken string) (result PaginatedWebServicesList, err error) { + req, err := client.ListInResourceGroupPreparer(resourceGroupName, skiptoken) + if err != nil { + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "ListInResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListInResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "ListInResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListInResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "ListInResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListInResourceGroupPreparer prepares the ListInResourceGroup request. +func (client WebServicesClient) ListInResourceGroupPreparer(resourceGroupName string, skiptoken string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(skiptoken) > 0 { + queryParameters["$skiptoken"] = autorest.Encode("query", skiptoken) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListInResourceGroupSender sends the ListInResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client WebServicesClient) ListInResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListInResourceGroupResponder handles the response to the ListInResourceGroup request. The method always +// closes the http.Response Body. +func (client WebServicesClient) ListInResourceGroupResponder(resp *http.Response) (result PaginatedWebServicesList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListKeys get the access keys of a particular Azure ML web service +// +// resourceGroupName is name of the resource group. webServiceName is the +// Azure ML web service name which you want to reach. +func (client WebServicesClient) ListKeys(resourceGroupName string, webServiceName string) (result WebServiceKeys, err error) { + req, err := client.ListKeysPreparer(resourceGroupName, webServiceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "ListKeys", nil, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "ListKeys", resp, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client WebServicesClient) ListKeysPreparer(resourceGroupName string, webServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webServiceName": autorest.Encode("path", webServiceName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices/{webServiceName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client WebServicesClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client WebServicesClient) ListKeysResponder(resp *http.Response) (result WebServiceKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Patch patch an existing Azure ML web service resource. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// patchPayload is the payload to patch the Azure ML web service with. +// resourceGroupName is name of the resource group. webServiceName is the +// Azure ML web service name which you want to reach. +func (client WebServicesClient) Patch(patchPayload WebService, resourceGroupName string, webServiceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.PatchPreparer(patchPayload, resourceGroupName, webServiceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "Patch", nil, "Failure preparing request") + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "Patch", resp, "Failure sending request") + } + + result, err = client.PatchResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "Patch", resp, "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client WebServicesClient) PatchPreparer(patchPayload WebService, resourceGroupName string, webServiceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webServiceName": autorest.Encode("path", webServiceName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices/{webServiceName}", pathParameters), + autorest.WithJSON(patchPayload), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client WebServicesClient) PatchSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client WebServicesClient) PatchResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Remove remove an existing Azure ML web service. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is name of the resource group. webServiceName is the +// Azure ML web service name which you want to reach. +func (client WebServicesClient) Remove(resourceGroupName string, webServiceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RemovePreparer(resourceGroupName, webServiceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "Remove", nil, "Failure preparing request") + } + + resp, err := client.RemoveSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "Remove", resp, "Failure sending request") + } + + result, err = client.RemoveResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "machinelearning.WebServicesClient", "Remove", resp, "Failure responding to request") + } + + return +} + +// RemovePreparer prepares the Remove request. +func (client WebServicesClient) RemovePreparer(resourceGroupName string, webServiceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webServiceName": autorest.Encode("path", webServiceName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearning/webServices/{webServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RemoveSender sends the Remove request. The method will close the +// http.Response Body if it receives an error. +func (client WebServicesClient) RemoveSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RemoveResponder handles the response to the Remove request. The method always +// closes the http.Response Body. +func (client WebServicesClient) RemoveResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/appcollections.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/appcollections.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/appcollections.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/appcollections.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,186 @@ +package mobileengagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// AppCollectionsClient is the microsoft Azure Mobile Engagement REST APIs. +type AppCollectionsClient struct { + ManagementClient +} + +// NewAppCollectionsClient creates an instance of the AppCollectionsClient +// client. +func NewAppCollectionsClient(subscriptionID string, resourceGroupName string, appCollection string, appName string) AppCollectionsClient { + return NewAppCollectionsClientWithBaseURI(DefaultBaseURI, subscriptionID, resourceGroupName, appCollection, appName) +} + +// NewAppCollectionsClientWithBaseURI creates an instance of the +// AppCollectionsClient client. +func NewAppCollectionsClientWithBaseURI(baseURI string, subscriptionID string, resourceGroupName string, appCollection string, appName string) AppCollectionsClient { + return AppCollectionsClient{NewWithBaseURI(baseURI, subscriptionID, resourceGroupName, appCollection, appName)} +} + +// CheckNameAvailability checks availability of an app collection name in the +// Engagement domain. +// +func (client AppCollectionsClient) CheckNameAvailability(parameters AppCollectionNameAvailability) (result AppCollectionNameAvailability, err error) { + req, err := client.CheckNameAvailabilityPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.AppCollectionsClient", "CheckNameAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.AppCollectionsClient", "CheckNameAvailability", resp, "Failure sending request") + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.AppCollectionsClient", "CheckNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client AppCollectionsClient) CheckNameAvailabilityPreparer(parameters AppCollectionNameAvailability) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.MobileEngagement/checkAppCollectionNameAvailability", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client AppCollectionsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client AppCollectionsClient) CheckNameAvailabilityResponder(resp *http.Response) (result AppCollectionNameAvailability, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists app collections in a subscription. +func (client AppCollectionsClient) List() (result AppCollectionListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.AppCollectionsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.AppCollectionsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.AppCollectionsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AppCollectionsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.MobileEngagement/appCollections", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AppCollectionsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AppCollectionsClient) ListResponder(resp *http.Response) (result AppCollectionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client AppCollectionsClient) ListNextResults(lastResults AppCollectionListResult) (result AppCollectionListResult, err error) { + req, err := lastResults.AppCollectionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.AppCollectionsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.AppCollectionsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.AppCollectionsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/apps.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/apps.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/apps.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/apps.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,124 @@ +package mobileengagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// AppsClient is the microsoft Azure Mobile Engagement REST APIs. +type AppsClient struct { + ManagementClient +} + +// NewAppsClient creates an instance of the AppsClient client. +func NewAppsClient(subscriptionID string, resourceGroupName string, appCollection string, appName string) AppsClient { + return NewAppsClientWithBaseURI(DefaultBaseURI, subscriptionID, resourceGroupName, appCollection, appName) +} + +// NewAppsClientWithBaseURI creates an instance of the AppsClient client. +func NewAppsClientWithBaseURI(baseURI string, subscriptionID string, resourceGroupName string, appCollection string, appName string) AppsClient { + return AppsClient{NewWithBaseURI(baseURI, subscriptionID, resourceGroupName, appCollection, appName)} +} + +// List lists apps in an appCollection. +func (client AppsClient) List() (result AppListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.AppsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.AppsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.AppsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AppsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AppsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AppsClient) ListResponder(resp *http.Response) (result AppListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client AppsClient) ListNextResults(lastResults AppListResult) (result AppListResult, err error) { + req, err := lastResults.AppListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.AppsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.AppsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.AppsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/campaigns.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/campaigns.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/campaigns.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/campaigns.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,977 @@ +package mobileengagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// CampaignsClient is the microsoft Azure Mobile Engagement REST APIs. +type CampaignsClient struct { + ManagementClient +} + +// NewCampaignsClient creates an instance of the CampaignsClient client. +func NewCampaignsClient(subscriptionID string, resourceGroupName string, appCollection string, appName string) CampaignsClient { + return NewCampaignsClientWithBaseURI(DefaultBaseURI, subscriptionID, resourceGroupName, appCollection, appName) +} + +// NewCampaignsClientWithBaseURI creates an instance of the CampaignsClient +// client. +func NewCampaignsClientWithBaseURI(baseURI string, subscriptionID string, resourceGroupName string, appCollection string, appName string) CampaignsClient { + return CampaignsClient{NewWithBaseURI(baseURI, subscriptionID, resourceGroupName, appCollection, appName)} +} + +// Activate activate a campaign previously created by a call to Create +// campaign. +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' id is campaign identifier. +func (client CampaignsClient) Activate(kind CampaignKinds, id int32) (result CampaignStateResult, err error) { + req, err := client.ActivatePreparer(kind, id) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Activate", nil, "Failure preparing request") + } + + resp, err := client.ActivateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Activate", resp, "Failure sending request") + } + + result, err = client.ActivateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Activate", resp, "Failure responding to request") + } + + return +} + +// ActivatePreparer prepares the Activate request. +func (client CampaignsClient) ActivatePreparer(kind CampaignKinds, id int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "id": autorest.Encode("path", id), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}/{id}/activate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ActivateSender sends the Activate request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) ActivateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ActivateResponder handles the response to the Activate request. The method always +// closes the http.Response Body. +func (client CampaignsClient) ActivateResponder(resp *http.Response) (result CampaignStateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Create create a push campaign (announcement, poll, data push or native +// push). +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' parameters is parameters supplied to the +// Update Campaign operation. +func (client CampaignsClient) Create(kind CampaignKinds, parameters Campaign) (result CampaignStateResult, err error) { + req, err := client.CreatePreparer(kind, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client CampaignsClient) CreatePreparer(kind CampaignKinds, parameters Campaign) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client CampaignsClient) CreateResponder(resp *http.Response) (result CampaignStateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a campaign previously created by a call to Create campaign. +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' id is campaign identifier. +func (client CampaignsClient) Delete(kind CampaignKinds, id int32) (result autorest.Response, err error) { + req, err := client.DeletePreparer(kind, id) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client CampaignsClient) DeletePreparer(kind CampaignKinds, id int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "id": autorest.Encode("path", id), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}/{id}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client CampaignsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Finish finish a push campaign previously activated by a call to Activate +// campaign. +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' id is campaign identifier. +func (client CampaignsClient) Finish(kind CampaignKinds, id int32) (result CampaignStateResult, err error) { + req, err := client.FinishPreparer(kind, id) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Finish", nil, "Failure preparing request") + } + + resp, err := client.FinishSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Finish", resp, "Failure sending request") + } + + result, err = client.FinishResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Finish", resp, "Failure responding to request") + } + + return +} + +// FinishPreparer prepares the Finish request. +func (client CampaignsClient) FinishPreparer(kind CampaignKinds, id int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "id": autorest.Encode("path", id), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}/{id}/finish", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// FinishSender sends the Finish request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) FinishSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// FinishResponder handles the response to the Finish request. The method always +// closes the http.Response Body. +func (client CampaignsClient) FinishResponder(resp *http.Response) (result CampaignStateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get the Get campaign operation retrieves information about a previously +// created campaign. +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' id is campaign identifier. +func (client CampaignsClient) Get(kind CampaignKinds, id int32) (result CampaignResult, err error) { + req, err := client.GetPreparer(kind, id) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client CampaignsClient) GetPreparer(kind CampaignKinds, id int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "id": autorest.Encode("path", id), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}/{id}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client CampaignsClient) GetResponder(resp *http.Response) (result CampaignResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetByName the Get campaign operation retrieves information about a +// previously created campaign. +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' name is campaign name. +func (client CampaignsClient) GetByName(kind CampaignKinds, name string) (result CampaignResult, err error) { + req, err := client.GetByNamePreparer(kind, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "GetByName", nil, "Failure preparing request") + } + + resp, err := client.GetByNameSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "GetByName", resp, "Failure sending request") + } + + result, err = client.GetByNameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "GetByName", resp, "Failure responding to request") + } + + return +} + +// GetByNamePreparer prepares the GetByName request. +func (client CampaignsClient) GetByNamePreparer(kind CampaignKinds, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "kind": autorest.Encode("path", kind), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaignsByName/{kind}/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetByNameSender sends the GetByName request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) GetByNameSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetByNameResponder handles the response to the GetByName request. The method always +// closes the http.Response Body. +func (client CampaignsClient) GetByNameResponder(resp *http.Response) (result CampaignResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetStatistics get all the campaign statistics. +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' id is campaign identifier. +func (client CampaignsClient) GetStatistics(kind CampaignKinds, id int32) (result CampaignStatisticsResult, err error) { + req, err := client.GetStatisticsPreparer(kind, id) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "GetStatistics", nil, "Failure preparing request") + } + + resp, err := client.GetStatisticsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "GetStatistics", resp, "Failure sending request") + } + + result, err = client.GetStatisticsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "GetStatistics", resp, "Failure responding to request") + } + + return +} + +// GetStatisticsPreparer prepares the GetStatistics request. +func (client CampaignsClient) GetStatisticsPreparer(kind CampaignKinds, id int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "id": autorest.Encode("path", id), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}/{id}/statistics", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetStatisticsSender sends the GetStatistics request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) GetStatisticsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetStatisticsResponder handles the response to the GetStatistics request. The method always +// closes the http.Response Body. +func (client CampaignsClient) GetStatisticsResponder(resp *http.Response) (result CampaignStatisticsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get the list of campaigns. +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' skip is control paging of campaigns, start +// results at the given offset, defaults to 0 (1st page of data). top is +// control paging of campaigns, number of campaigns to return with each call. +// It returns all campaigns by default. When specifying $top parameter, the +// response contains a `nextLink` property describing the path to get the +// next page if there are more results. filter is filter can be used to +// restrict the results to campaigns matching a specific state. The syntax is +// `$filter=state eq 'draft'`. Valid state values are: draft, scheduled, +// in-progress, and finished. Only the eq operator and the state property are +// supported. orderby is sort results by an expression which looks like +// `$orderby=id asc` (this example is actually the default behavior). The +// syntax is orderby={property} {direction} or just orderby={property}. The +// available sorting properties are id, name, state, activatedDate, and +// finishedDate. The available directions are asc (for ascending order) and +// desc (for descending order). When not specified the asc direction is used. +// Only one property at a time can be used for sorting. search is restrict +// results to campaigns matching the optional `search` expression. This +// currently performs the search based on the name on the campaign only, case +// insensitive. If the campaign contains the value of the `search` parameter +// anywhere in the name, it matches. +func (client CampaignsClient) List(kind CampaignKinds, skip *int32, top *int32, filter string, orderby string, search string) (result CampaignsListResult, err error) { + req, err := client.ListPreparer(kind, skip, top, filter, orderby, search) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client CampaignsClient) ListPreparer(kind CampaignKinds, skip *int32, top *int32, filter string, orderby string, search string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if skip != nil { + queryParameters["$skip"] = autorest.Encode("query", *skip) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + if len(search) > 0 { + queryParameters["$search"] = autorest.Encode("query", search) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client CampaignsClient) ListResponder(resp *http.Response) (result CampaignsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client CampaignsClient) ListNextResults(lastResults CampaignsListResult) (result CampaignsListResult, err error) { + req, err := lastResults.CampaignsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// Push push a previously saved campaign (created with Create campaign) to a +// set of devices. +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' id is campaign identifier. parameters is +// parameters supplied to the Push Campaign operation. +func (client CampaignsClient) Push(kind CampaignKinds, id int32, parameters CampaignPushParameters) (result CampaignPushResult, err error) { + req, err := client.PushPreparer(kind, id, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Push", nil, "Failure preparing request") + } + + resp, err := client.PushSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Push", resp, "Failure sending request") + } + + result, err = client.PushResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Push", resp, "Failure responding to request") + } + + return +} + +// PushPreparer prepares the Push request. +func (client CampaignsClient) PushPreparer(kind CampaignKinds, id int32, parameters CampaignPushParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "id": autorest.Encode("path", id), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}/{id}/push", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// PushSender sends the Push request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) PushSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// PushResponder handles the response to the Push request. The method always +// closes the http.Response Body. +func (client CampaignsClient) PushResponder(resp *http.Response) (result CampaignPushResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Suspend suspend a push campaign previously activated by a call to Activate +// campaign. +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' id is campaign identifier. +func (client CampaignsClient) Suspend(kind CampaignKinds, id int32) (result CampaignStateResult, err error) { + req, err := client.SuspendPreparer(kind, id) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Suspend", nil, "Failure preparing request") + } + + resp, err := client.SuspendSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Suspend", resp, "Failure sending request") + } + + result, err = client.SuspendResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Suspend", resp, "Failure responding to request") + } + + return +} + +// SuspendPreparer prepares the Suspend request. +func (client CampaignsClient) SuspendPreparer(kind CampaignKinds, id int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "id": autorest.Encode("path", id), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}/{id}/suspend", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// SuspendSender sends the Suspend request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) SuspendSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// SuspendResponder handles the response to the Suspend request. The method always +// closes the http.Response Body. +func (client CampaignsClient) SuspendResponder(resp *http.Response) (result CampaignStateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// TestNew test a new campaign on a set of devices. +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' parameters is parameters supplied to the Test +// Campaign operation. +func (client CampaignsClient) TestNew(kind CampaignKinds, parameters CampaignTestNewParameters) (result CampaignState, err error) { + req, err := client.TestNewPreparer(kind, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "TestNew", nil, "Failure preparing request") + } + + resp, err := client.TestNewSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "TestNew", resp, "Failure sending request") + } + + result, err = client.TestNewResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "TestNew", resp, "Failure responding to request") + } + + return +} + +// TestNewPreparer prepares the TestNew request. +func (client CampaignsClient) TestNewPreparer(kind CampaignKinds, parameters CampaignTestNewParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}/test", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// TestNewSender sends the TestNew request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) TestNewSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// TestNewResponder handles the response to the TestNew request. The method always +// closes the http.Response Body. +func (client CampaignsClient) TestNewResponder(resp *http.Response) (result CampaignState, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// TestSaved test an existing campaign (created with Create campaign) on a set +// of devices. +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' id is campaign identifier. parameters is +// parameters supplied to the Test Campaign operation. +func (client CampaignsClient) TestSaved(kind CampaignKinds, id int32, parameters CampaignTestSavedParameters) (result CampaignStateResult, err error) { + req, err := client.TestSavedPreparer(kind, id, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "TestSaved", nil, "Failure preparing request") + } + + resp, err := client.TestSavedSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "TestSaved", resp, "Failure sending request") + } + + result, err = client.TestSavedResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "TestSaved", resp, "Failure responding to request") + } + + return +} + +// TestSavedPreparer prepares the TestSaved request. +func (client CampaignsClient) TestSavedPreparer(kind CampaignKinds, id int32, parameters CampaignTestSavedParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "id": autorest.Encode("path", id), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}/{id}/test", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// TestSavedSender sends the TestSaved request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) TestSavedSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// TestSavedResponder handles the response to the TestSaved request. The method always +// closes the http.Response Body. +func (client CampaignsClient) TestSavedResponder(resp *http.Response) (result CampaignStateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update update an existing push campaign (announcement, poll, data push or +// native push). +// +// kind is campaign kind. Possible values include: 'announcements', 'polls', +// 'dataPushes', 'nativePushes' id is campaign identifier. parameters is +// parameters supplied to the Update Campaign operation. +func (client CampaignsClient) Update(kind CampaignKinds, id int32, parameters Campaign) (result CampaignStateResult, err error) { + req, err := client.UpdatePreparer(kind, id, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.CampaignsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client CampaignsClient) UpdatePreparer(kind CampaignKinds, id int32, parameters Campaign) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "id": autorest.Encode("path", id), + "kind": autorest.Encode("path", kind), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/campaigns/{kind}/{id}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client CampaignsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client CampaignsClient) UpdateResponder(resp *http.Response) (result CampaignStateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,64 @@ +// Package mobileengagement implements the Azure ARM Mobileengagement service +// API version 2014-12-01. +// +// Microsoft Azure Mobile Engagement REST APIs. +package mobileengagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Mobileengagement + APIVersion = "2014-12-01" + + // DefaultBaseURI is the default URI used for the service Mobileengagement + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Mobileengagement. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string + ResourceGroupName string + AppCollection string + AppName string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string, resourceGroupName string, appCollection string, appName string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID, resourceGroupName, appCollection, appName) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string, resourceGroupName string, appCollection string, appName string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + ResourceGroupName: resourceGroupName, + AppCollection: appCollection, + AppName: appName, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/devices.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/devices.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/devices.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/devices.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,442 @@ +package mobileengagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// DevicesClient is the microsoft Azure Mobile Engagement REST APIs. +type DevicesClient struct { + ManagementClient +} + +// NewDevicesClient creates an instance of the DevicesClient client. +func NewDevicesClient(subscriptionID string, resourceGroupName string, appCollection string, appName string) DevicesClient { + return NewDevicesClientWithBaseURI(DefaultBaseURI, subscriptionID, resourceGroupName, appCollection, appName) +} + +// NewDevicesClientWithBaseURI creates an instance of the DevicesClient client. +func NewDevicesClientWithBaseURI(baseURI string, subscriptionID string, resourceGroupName string, appCollection string, appName string) DevicesClient { + return DevicesClient{NewWithBaseURI(baseURI, subscriptionID, resourceGroupName, appCollection, appName)} +} + +// GetByDeviceID get the information associated to a device running an +// application. +// +// deviceID is device identifier. +func (client DevicesClient) GetByDeviceID(deviceID string) (result Device, err error) { + req, err := client.GetByDeviceIDPreparer(deviceID) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "GetByDeviceID", nil, "Failure preparing request") + } + + resp, err := client.GetByDeviceIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "GetByDeviceID", resp, "Failure sending request") + } + + result, err = client.GetByDeviceIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "GetByDeviceID", resp, "Failure responding to request") + } + + return +} + +// GetByDeviceIDPreparer prepares the GetByDeviceID request. +func (client DevicesClient) GetByDeviceIDPreparer(deviceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "deviceId": autorest.Encode("path", deviceID), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/{deviceId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetByDeviceIDSender sends the GetByDeviceID request. The method will close the +// http.Response Body if it receives an error. +func (client DevicesClient) GetByDeviceIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetByDeviceIDResponder handles the response to the GetByDeviceID request. The method always +// closes the http.Response Body. +func (client DevicesClient) GetByDeviceIDResponder(resp *http.Response) (result Device, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetByUserID get the information associated to a device running an +// application using the user identifier. +// +// userID is user identifier. +func (client DevicesClient) GetByUserID(userID string) (result Device, err error) { + req, err := client.GetByUserIDPreparer(userID) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "GetByUserID", nil, "Failure preparing request") + } + + resp, err := client.GetByUserIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "GetByUserID", resp, "Failure sending request") + } + + result, err = client.GetByUserIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "GetByUserID", resp, "Failure responding to request") + } + + return +} + +// GetByUserIDPreparer prepares the GetByUserID request. +func (client DevicesClient) GetByUserIDPreparer(userID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "userId": autorest.Encode("path", userID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/users/{userId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetByUserIDSender sends the GetByUserID request. The method will close the +// http.Response Body if it receives an error. +func (client DevicesClient) GetByUserIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetByUserIDResponder handles the response to the GetByUserID request. The method always +// closes the http.Response Body. +func (client DevicesClient) GetByUserIDResponder(resp *http.Response) (result Device, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List query the information associated to the devices running an application. +// +// top is number of devices to return with each call. Defaults to 100 and +// cannot return more. Passing a greater value is ignored. The response +// contains a `nextLink` property describing the URI path to get the next +// page of results if not all results could be returned at once. +// selectParameter is by default all `meta` and `appInfo` properties are +// returned, this property is used to restrict the output to the desired +// properties. It also excludes all devices from the output that have none of +// the selected properties. In other terms, only devices having at least one +// of the selected property being set is part of the results. Examples: - +// `$select=appInfo` : select all devices having at least 1 appInfo, return +// them all and don’t return any meta property. - `$select=meta` : return +// only meta properties in the output. - +// `$select=appInfo,meta/firstSeen,meta/lastSeen` : return all `appInfo`, +// plus meta object containing only firstSeen and lastSeen properties. The +// format is thus a comma separated list of properties to select. Use +// `appInfo` to select all appInfo properties, `meta` to select all meta +// properties. Use `appInfo/{key}` and `meta/{key}` to select specific +// appInfo and meta properties. filter is filter can be used to reduce the +// number of results. Filter is a boolean expression that can look like the +// following examples: * `$filter=deviceId gt +// 'abcdef0123456789abcdef0123456789'` * `$filter=lastModified le +// 1447284263690L` * `$filter=(deviceId ge +// 'abcdef0123456789abcdef0123456789') and (deviceId lt +// 'bacdef0123456789abcdef0123456789') and (lastModified gt 1447284263690L)` +// The first example is used automatically for paging when returning the +// `nextLink` property. The filter expression is a combination of checks on +// some properties that can be compared to their value. The available +// operators are: * `gt` : greater than * `ge` : greater than or equals * +// `lt` : less than * `le` : less than or equals * `and` : to add multiple +// checks (all checks must pass), optional parentheses can be used. The +// properties that can be used in the expression are the following: * +// `deviceId {operator} '{deviceIdValue}'` : a lexicographical comparison is +// made on the deviceId value, use single quotes for the value. * +// `lastModified {operator} {number}L` : returns only meta properties or +// appInfo properties whose last value modification timestamp compared to the +// specified value is matching (value is milliseconds since January 1st, 1970 +// UTC). Please note the `L` character after the number of milliseconds, its +// required when the number of milliseconds exceeds `2^31 - 1` (which is +// always the case for recent timestamps). Using `lastModified` excludes all +// devices from the output that have no property matching the timestamp +// criteria, like `$select`. Please note that the internal value of +// `lastModified` timestamp for a given property is never part of the +// results. +func (client DevicesClient) List(top *int32, selectParameter string, filter string) (result DevicesQueryResult, err error) { + req, err := client.ListPreparer(top, selectParameter, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DevicesClient) ListPreparer(top *int32, selectParameter string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DevicesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DevicesClient) ListResponder(resp *http.Response) (result DevicesQueryResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DevicesClient) ListNextResults(lastResults DevicesQueryResult) (result DevicesQueryResult, err error) { + req, err := lastResults.DevicesQueryResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// TagByDeviceID update the tags registered for a set of devices running an +// application. Updates are performed asynchronously, meaning that a few +// seconds are needed before the modifications appear in the results of the +// Get device command. +// +func (client DevicesClient) TagByDeviceID(parameters DeviceTagsParameters) (result DeviceTagsResult, err error) { + req, err := client.TagByDeviceIDPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "TagByDeviceID", nil, "Failure preparing request") + } + + resp, err := client.TagByDeviceIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "TagByDeviceID", resp, "Failure sending request") + } + + result, err = client.TagByDeviceIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "TagByDeviceID", resp, "Failure responding to request") + } + + return +} + +// TagByDeviceIDPreparer prepares the TagByDeviceID request. +func (client DevicesClient) TagByDeviceIDPreparer(parameters DeviceTagsParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/tag", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// TagByDeviceIDSender sends the TagByDeviceID request. The method will close the +// http.Response Body if it receives an error. +func (client DevicesClient) TagByDeviceIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// TagByDeviceIDResponder handles the response to the TagByDeviceID request. The method always +// closes the http.Response Body. +func (client DevicesClient) TagByDeviceIDResponder(resp *http.Response) (result DeviceTagsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// TagByUserID update the tags registered for a set of users running an +// application. Updates are performed asynchronously, meaning that a few +// seconds are needed before the modifications appear in the results of the +// Get device command. +// +func (client DevicesClient) TagByUserID(parameters DeviceTagsParameters) (result DeviceTagsResult, err error) { + req, err := client.TagByUserIDPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "TagByUserID", nil, "Failure preparing request") + } + + resp, err := client.TagByUserIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "TagByUserID", resp, "Failure sending request") + } + + result, err = client.TagByUserIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.DevicesClient", "TagByUserID", resp, "Failure responding to request") + } + + return +} + +// TagByUserIDPreparer prepares the TagByUserID request. +func (client DevicesClient) TagByUserIDPreparer(parameters DeviceTagsParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/users/tag", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// TagByUserIDSender sends the TagByUserID request. The method will close the +// http.Response Body if it receives an error. +func (client DevicesClient) TagByUserIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// TagByUserIDResponder handles the response to the TagByUserID request. The method always +// closes the http.Response Body. +func (client DevicesClient) TagByUserIDResponder(resp *http.Response) (result DeviceTagsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/exporttasks.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/exporttasks.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/exporttasks.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/exporttasks.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,858 @@ +package mobileengagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ExportTasksClient is the microsoft Azure Mobile Engagement REST APIs. +type ExportTasksClient struct { + ManagementClient +} + +// NewExportTasksClient creates an instance of the ExportTasksClient client. +func NewExportTasksClient(subscriptionID string, resourceGroupName string, appCollection string, appName string) ExportTasksClient { + return NewExportTasksClientWithBaseURI(DefaultBaseURI, subscriptionID, resourceGroupName, appCollection, appName) +} + +// NewExportTasksClientWithBaseURI creates an instance of the +// ExportTasksClient client. +func NewExportTasksClientWithBaseURI(baseURI string, subscriptionID string, resourceGroupName string, appCollection string, appName string) ExportTasksClient { + return ExportTasksClient{NewWithBaseURI(baseURI, subscriptionID, resourceGroupName, appCollection, appName)} +} + +// CreateActivitiesTask creates a task to export activities. +// +func (client ExportTasksClient) CreateActivitiesTask(parameters DateRangeExportTaskParameter) (result ExportTaskResult, err error) { + req, err := client.CreateActivitiesTaskPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateActivitiesTask", nil, "Failure preparing request") + } + + resp, err := client.CreateActivitiesTaskSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateActivitiesTask", resp, "Failure sending request") + } + + result, err = client.CreateActivitiesTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateActivitiesTask", resp, "Failure responding to request") + } + + return +} + +// CreateActivitiesTaskPreparer prepares the CreateActivitiesTask request. +func (client ExportTasksClient) CreateActivitiesTaskPreparer(parameters DateRangeExportTaskParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks/activities", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateActivitiesTaskSender sends the CreateActivitiesTask request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) CreateActivitiesTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateActivitiesTaskResponder handles the response to the CreateActivitiesTask request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) CreateActivitiesTaskResponder(resp *http.Response) (result ExportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateCrashesTask creates a task to export crashes. +// +func (client ExportTasksClient) CreateCrashesTask(parameters DateRangeExportTaskParameter) (result ExportTaskResult, err error) { + req, err := client.CreateCrashesTaskPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateCrashesTask", nil, "Failure preparing request") + } + + resp, err := client.CreateCrashesTaskSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateCrashesTask", resp, "Failure sending request") + } + + result, err = client.CreateCrashesTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateCrashesTask", resp, "Failure responding to request") + } + + return +} + +// CreateCrashesTaskPreparer prepares the CreateCrashesTask request. +func (client ExportTasksClient) CreateCrashesTaskPreparer(parameters DateRangeExportTaskParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks/crashes", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateCrashesTaskSender sends the CreateCrashesTask request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) CreateCrashesTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateCrashesTaskResponder handles the response to the CreateCrashesTask request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) CreateCrashesTaskResponder(resp *http.Response) (result ExportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateErrorsTask creates a task to export errors. +// +func (client ExportTasksClient) CreateErrorsTask(parameters DateRangeExportTaskParameter) (result ExportTaskResult, err error) { + req, err := client.CreateErrorsTaskPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateErrorsTask", nil, "Failure preparing request") + } + + resp, err := client.CreateErrorsTaskSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateErrorsTask", resp, "Failure sending request") + } + + result, err = client.CreateErrorsTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateErrorsTask", resp, "Failure responding to request") + } + + return +} + +// CreateErrorsTaskPreparer prepares the CreateErrorsTask request. +func (client ExportTasksClient) CreateErrorsTaskPreparer(parameters DateRangeExportTaskParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks/errors", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateErrorsTaskSender sends the CreateErrorsTask request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) CreateErrorsTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateErrorsTaskResponder handles the response to the CreateErrorsTask request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) CreateErrorsTaskResponder(resp *http.Response) (result ExportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateEventsTask creates a task to export events. +// +func (client ExportTasksClient) CreateEventsTask(parameters DateRangeExportTaskParameter) (result ExportTaskResult, err error) { + req, err := client.CreateEventsTaskPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateEventsTask", nil, "Failure preparing request") + } + + resp, err := client.CreateEventsTaskSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateEventsTask", resp, "Failure sending request") + } + + result, err = client.CreateEventsTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateEventsTask", resp, "Failure responding to request") + } + + return +} + +// CreateEventsTaskPreparer prepares the CreateEventsTask request. +func (client ExportTasksClient) CreateEventsTaskPreparer(parameters DateRangeExportTaskParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks/events", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateEventsTaskSender sends the CreateEventsTask request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) CreateEventsTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateEventsTaskResponder handles the response to the CreateEventsTask request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) CreateEventsTaskResponder(resp *http.Response) (result ExportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateFeedbackTaskByCampaign creates a task to export push campaign data +// for a set of campaigns. +// +func (client ExportTasksClient) CreateFeedbackTaskByCampaign(parameters FeedbackByCampaignParameter) (result ExportTaskResult, err error) { + req, err := client.CreateFeedbackTaskByCampaignPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateFeedbackTaskByCampaign", nil, "Failure preparing request") + } + + resp, err := client.CreateFeedbackTaskByCampaignSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateFeedbackTaskByCampaign", resp, "Failure sending request") + } + + result, err = client.CreateFeedbackTaskByCampaignResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateFeedbackTaskByCampaign", resp, "Failure responding to request") + } + + return +} + +// CreateFeedbackTaskByCampaignPreparer prepares the CreateFeedbackTaskByCampaign request. +func (client ExportTasksClient) CreateFeedbackTaskByCampaignPreparer(parameters FeedbackByCampaignParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks/feedbackByCampaign", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateFeedbackTaskByCampaignSender sends the CreateFeedbackTaskByCampaign request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) CreateFeedbackTaskByCampaignSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateFeedbackTaskByCampaignResponder handles the response to the CreateFeedbackTaskByCampaign request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) CreateFeedbackTaskByCampaignResponder(resp *http.Response) (result ExportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateFeedbackTaskByDateRange creates a task to export push campaign data +// for a date range. +// +func (client ExportTasksClient) CreateFeedbackTaskByDateRange(parameters FeedbackByDateRangeParameter) (result ExportTaskResult, err error) { + req, err := client.CreateFeedbackTaskByDateRangePreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateFeedbackTaskByDateRange", nil, "Failure preparing request") + } + + resp, err := client.CreateFeedbackTaskByDateRangeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateFeedbackTaskByDateRange", resp, "Failure sending request") + } + + result, err = client.CreateFeedbackTaskByDateRangeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateFeedbackTaskByDateRange", resp, "Failure responding to request") + } + + return +} + +// CreateFeedbackTaskByDateRangePreparer prepares the CreateFeedbackTaskByDateRange request. +func (client ExportTasksClient) CreateFeedbackTaskByDateRangePreparer(parameters FeedbackByDateRangeParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks/feedbackByDate", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateFeedbackTaskByDateRangeSender sends the CreateFeedbackTaskByDateRange request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) CreateFeedbackTaskByDateRangeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateFeedbackTaskByDateRangeResponder handles the response to the CreateFeedbackTaskByDateRange request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) CreateFeedbackTaskByDateRangeResponder(resp *http.Response) (result ExportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateJobsTask creates a task to export jobs. +// +func (client ExportTasksClient) CreateJobsTask(parameters DateRangeExportTaskParameter) (result ExportTaskResult, err error) { + req, err := client.CreateJobsTaskPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateJobsTask", nil, "Failure preparing request") + } + + resp, err := client.CreateJobsTaskSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateJobsTask", resp, "Failure sending request") + } + + result, err = client.CreateJobsTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateJobsTask", resp, "Failure responding to request") + } + + return +} + +// CreateJobsTaskPreparer prepares the CreateJobsTask request. +func (client ExportTasksClient) CreateJobsTaskPreparer(parameters DateRangeExportTaskParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks/jobs", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateJobsTaskSender sends the CreateJobsTask request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) CreateJobsTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateJobsTaskResponder handles the response to the CreateJobsTask request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) CreateJobsTaskResponder(resp *http.Response) (result ExportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateSessionsTask creates a task to export sessions. +// +func (client ExportTasksClient) CreateSessionsTask(parameters DateRangeExportTaskParameter) (result ExportTaskResult, err error) { + req, err := client.CreateSessionsTaskPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateSessionsTask", nil, "Failure preparing request") + } + + resp, err := client.CreateSessionsTaskSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateSessionsTask", resp, "Failure sending request") + } + + result, err = client.CreateSessionsTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateSessionsTask", resp, "Failure responding to request") + } + + return +} + +// CreateSessionsTaskPreparer prepares the CreateSessionsTask request. +func (client ExportTasksClient) CreateSessionsTaskPreparer(parameters DateRangeExportTaskParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks/sessions", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateSessionsTaskSender sends the CreateSessionsTask request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) CreateSessionsTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateSessionsTaskResponder handles the response to the CreateSessionsTask request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) CreateSessionsTaskResponder(resp *http.Response) (result ExportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateTagsTask creates a task to export tags. +// +func (client ExportTasksClient) CreateTagsTask(parameters ExportTaskParameter) (result ExportTaskResult, err error) { + req, err := client.CreateTagsTaskPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateTagsTask", nil, "Failure preparing request") + } + + resp, err := client.CreateTagsTaskSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateTagsTask", resp, "Failure sending request") + } + + result, err = client.CreateTagsTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateTagsTask", resp, "Failure responding to request") + } + + return +} + +// CreateTagsTaskPreparer prepares the CreateTagsTask request. +func (client ExportTasksClient) CreateTagsTaskPreparer(parameters ExportTaskParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks/tags", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateTagsTaskSender sends the CreateTagsTask request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) CreateTagsTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateTagsTaskResponder handles the response to the CreateTagsTask request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) CreateTagsTaskResponder(resp *http.Response) (result ExportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateTokensTask creates a task to export tags. +// +func (client ExportTasksClient) CreateTokensTask(parameters ExportTaskParameter) (result ExportTaskResult, err error) { + req, err := client.CreateTokensTaskPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateTokensTask", nil, "Failure preparing request") + } + + resp, err := client.CreateTokensTaskSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateTokensTask", resp, "Failure sending request") + } + + result, err = client.CreateTokensTaskResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "CreateTokensTask", resp, "Failure responding to request") + } + + return +} + +// CreateTokensTaskPreparer prepares the CreateTokensTask request. +func (client ExportTasksClient) CreateTokensTaskPreparer(parameters ExportTaskParameter) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks/tokens", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateTokensTaskSender sends the CreateTokensTask request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) CreateTokensTaskSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateTokensTaskResponder handles the response to the CreateTokensTask request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) CreateTokensTaskResponder(resp *http.Response) (result ExportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get retrieves information about a previously created export task. +// +// id is export task identifier. +func (client ExportTasksClient) Get(id string) (result ExportTaskResult, err error) { + req, err := client.GetPreparer(id) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ExportTasksClient) GetPreparer(id string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "id": autorest.Encode("path", id), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks/{id}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) GetResponder(resp *http.Response) (result ExportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get the list of export tasks. +// +// skip is control paging of export tasks, start results at the given offset, +// defaults to 0 (1st page of data). top is control paging of export tasks, +// number of export tasks to return with each call. By default, it returns +// all export tasks with a default paging of 20. +// The response contains a `nextLink` property describing the path to get the +// next page if there are more results. +// The maximum paging limit for $top is 40. orderby is sort results by an +// expression which looks like `$orderby=taskId asc` (default when not +// specified). +// The syntax is orderby={property} {direction} or just orderby={property}. +// Properties that can be specified for sorting: taskId, errorDetails, +// dateCreated, taskStatus, and dateCreated. +// The available directions are asc (for ascending order) and desc (for +// descending order). +// When not specified the asc direction is used. +// Only one orderby property can be specified. +func (client ExportTasksClient) List(skip *int32, top *int32, orderby string) (result ExportTaskListResult, err error) { + req, err := client.ListPreparer(skip, top, orderby) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExportTasksClient) ListPreparer(skip *int32, top *int32, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if skip != nil { + queryParameters["$skip"] = autorest.Encode("query", *skip) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/exportTasks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExportTasksClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExportTasksClient) ListResponder(resp *http.Response) (result ExportTaskListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ExportTasksClient) ListNextResults(lastResults ExportTaskListResult) (result ExportTaskListResult, err error) { + req, err := lastResults.ExportTaskListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ExportTasksClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/importtasks.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/importtasks.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/importtasks.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/importtasks.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,281 @@ +package mobileengagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ImportTasksClient is the microsoft Azure Mobile Engagement REST APIs. +type ImportTasksClient struct { + ManagementClient +} + +// NewImportTasksClient creates an instance of the ImportTasksClient client. +func NewImportTasksClient(subscriptionID string, resourceGroupName string, appCollection string, appName string) ImportTasksClient { + return NewImportTasksClientWithBaseURI(DefaultBaseURI, subscriptionID, resourceGroupName, appCollection, appName) +} + +// NewImportTasksClientWithBaseURI creates an instance of the +// ImportTasksClient client. +func NewImportTasksClientWithBaseURI(baseURI string, subscriptionID string, resourceGroupName string, appCollection string, appName string) ImportTasksClient { + return ImportTasksClient{NewWithBaseURI(baseURI, subscriptionID, resourceGroupName, appCollection, appName)} +} + +// Create creates a job to import the specified data to a storageUrl. +// +func (client ImportTasksClient) Create(parameters ImportTask) (result ImportTaskResult, err error) { + req, err := client.CreatePreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client ImportTasksClient) CreatePreparer(parameters ImportTask) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/importTasks", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client ImportTasksClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client ImportTasksClient) CreateResponder(resp *http.Response) (result ImportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get the Get import job operation retrieves information about a previously +// created import job. +// +// id is import job identifier. +func (client ImportTasksClient) Get(id string) (result ImportTaskResult, err error) { + req, err := client.GetPreparer(id) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ImportTasksClient) GetPreparer(id string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "id": autorest.Encode("path", id), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/importTasks/{id}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ImportTasksClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ImportTasksClient) GetResponder(resp *http.Response) (result ImportTaskResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get the list of import jobs. +// +// skip is control paging of import jobs, start results at the given offset, +// defaults to 0 (1st page of data). top is control paging of import jobs, +// number of import jobs to return with each call. By default, it returns all +// import jobs with a default paging of 20. +// The response contains a `nextLink` property describing the path to get the +// next page if there are more results. +// The maximum paging limit for $top is 40. orderby is sort results by an +// expression which looks like `$orderby=jobId asc` (default when not +// specified). +// The syntax is orderby={property} {direction} or just orderby={property}. +// Properties that can be specified for sorting: jobId, errorDetails, +// dateCreated, jobStatus, and dateCreated. +// The available directions are asc (for ascending order) and desc (for +// descending order). +// When not specified the asc direction is used. +// Only one orderby property can be specified. +func (client ImportTasksClient) List(skip *int32, top *int32, orderby string) (result ImportTaskListResult, err error) { + req, err := client.ListPreparer(skip, top, orderby) + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ImportTasksClient) ListPreparer(skip *int32, top *int32, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "appCollection": autorest.Encode("path", client.AppCollection), + "appName": autorest.Encode("path", client.AppName), + "resourceGroupName": autorest.Encode("path", client.ResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if skip != nil { + queryParameters["$skip"] = autorest.Encode("query", *skip) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MobileEngagement/appcollections/{appCollection}/apps/{appName}/devices/importTasks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ImportTasksClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ImportTasksClient) ListResponder(resp *http.Response) (result ImportTaskListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ImportTasksClient) ListNextResults(lastResults ImportTaskListResult) (result ImportTaskListResult, err error) { + req, err := lastResults.ImportTaskListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.ImportTasksClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,920 @@ +package mobileengagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// AudienceOperators enumerates the values for audience operators. +type AudienceOperators string + +const ( + // EQ specifies the eq state for audience operators. + EQ AudienceOperators = "EQ" + // GE specifies the ge state for audience operators. + GE AudienceOperators = "GE" + // GT specifies the gt state for audience operators. + GT AudienceOperators = "GT" + // LE specifies the le state for audience operators. + LE AudienceOperators = "LE" + // LT specifies the lt state for audience operators. + LT AudienceOperators = "LT" +) + +// CampaignFeedbacks enumerates the values for campaign feedbacks. +type CampaignFeedbacks string + +const ( + // Actioned specifies the actioned state for campaign feedbacks. + Actioned CampaignFeedbacks = "actioned" + // Exited specifies the exited state for campaign feedbacks. + Exited CampaignFeedbacks = "exited" + // Pushed specifies the pushed state for campaign feedbacks. + Pushed CampaignFeedbacks = "pushed" + // Replied specifies the replied state for campaign feedbacks. + Replied CampaignFeedbacks = "replied" +) + +// CampaignKinds enumerates the values for campaign kinds. +type CampaignKinds string + +const ( + // Announcements specifies the announcements state for campaign kinds. + Announcements CampaignKinds = "announcements" + // DataPushes specifies the data pushes state for campaign kinds. + DataPushes CampaignKinds = "dataPushes" + // NativePushes specifies the native pushes state for campaign kinds. + NativePushes CampaignKinds = "nativePushes" + // Polls specifies the polls state for campaign kinds. + Polls CampaignKinds = "polls" +) + +// CampaignStates enumerates the values for campaign states. +type CampaignStates string + +const ( + // Draft specifies the draft state for campaign states. + Draft CampaignStates = "draft" + // Finished specifies the finished state for campaign states. + Finished CampaignStates = "finished" + // InProgress specifies the in progress state for campaign states. + InProgress CampaignStates = "in-progress" + // Queued specifies the queued state for campaign states. + Queued CampaignStates = "queued" + // Scheduled specifies the scheduled state for campaign states. + Scheduled CampaignStates = "scheduled" +) + +// CampaignType enumerates the values for campaign type. +type CampaignType string + +const ( + // Announcement specifies the announcement state for campaign type. + Announcement CampaignType = "Announcement" + // DataPush specifies the data push state for campaign type. + DataPush CampaignType = "DataPush" + // NativePush specifies the native push state for campaign type. + NativePush CampaignType = "NativePush" + // Poll specifies the poll state for campaign type. + Poll CampaignType = "Poll" +) + +// CampaignTypes enumerates the values for campaign types. +type CampaignTypes string + +const ( + // OnlyNotif specifies the only notif state for campaign types. + OnlyNotif CampaignTypes = "only_notif" + // Textbase64 specifies the textbase 64 state for campaign types. + Textbase64 CampaignTypes = "text/base64" + // Texthtml specifies the texthtml state for campaign types. + Texthtml CampaignTypes = "text/html" + // Textplain specifies the textplain state for campaign types. + Textplain CampaignTypes = "text/plain" +) + +// DeliveryTimes enumerates the values for delivery times. +type DeliveryTimes string + +const ( + // Any specifies the any state for delivery times. + Any DeliveryTimes = "any" + // Background specifies the background state for delivery times. + Background DeliveryTimes = "background" + // Session specifies the session state for delivery times. + Session DeliveryTimes = "session" +) + +// ExportFormat enumerates the values for export format. +type ExportFormat string + +const ( + // CsvBlob specifies the csv blob state for export format. + CsvBlob ExportFormat = "CsvBlob" + // JSONBlob specifies the json blob state for export format. + JSONBlob ExportFormat = "JsonBlob" +) + +// ExportState enumerates the values for export state. +type ExportState string + +const ( + // ExportStateFailed specifies the export state failed state for export + // state. + ExportStateFailed ExportState = "Failed" + // ExportStateQueued specifies the export state queued state for export + // state. + ExportStateQueued ExportState = "Queued" + // ExportStateStarted specifies the export state started state for export + // state. + ExportStateStarted ExportState = "Started" + // ExportStateSucceeded specifies the export state succeeded state for + // export state. + ExportStateSucceeded ExportState = "Succeeded" +) + +// ExportType enumerates the values for export type. +type ExportType string + +const ( + // ExportTypeActivity specifies the export type activity state for export + // type. + ExportTypeActivity ExportType = "Activity" + // ExportTypeCrash specifies the export type crash state for export type. + ExportTypeCrash ExportType = "Crash" + // ExportTypeError specifies the export type error state for export type. + ExportTypeError ExportType = "Error" + // ExportTypeEvent specifies the export type event state for export type. + ExportTypeEvent ExportType = "Event" + // ExportTypeJob specifies the export type job state for export type. + ExportTypeJob ExportType = "Job" + // ExportTypePush specifies the export type push state for export type. + ExportTypePush ExportType = "Push" + // ExportTypeSession specifies the export type session state for export + // type. + ExportTypeSession ExportType = "Session" + // ExportTypeTag specifies the export type tag state for export type. + ExportTypeTag ExportType = "Tag" + // ExportTypeToken specifies the export type token state for export type. + ExportTypeToken ExportType = "Token" +) + +// JobStates enumerates the values for job states. +type JobStates string + +const ( + // JobStatesFailed specifies the job states failed state for job states. + JobStatesFailed JobStates = "Failed" + // JobStatesQueued specifies the job states queued state for job states. + JobStatesQueued JobStates = "Queued" + // JobStatesStarted specifies the job states started state for job states. + JobStatesStarted JobStates = "Started" + // JobStatesSucceeded specifies the job states succeeded state for job + // states. + JobStatesSucceeded JobStates = "Succeeded" +) + +// NotificationTypes enumerates the values for notification types. +type NotificationTypes string + +const ( + // Popup specifies the popup state for notification types. + Popup NotificationTypes = "popup" + // System specifies the system state for notification types. + System NotificationTypes = "system" +) + +// ProvisioningStates enumerates the values for provisioning states. +type ProvisioningStates string + +const ( + // Creating specifies the creating state for provisioning states. + Creating ProvisioningStates = "Creating" + // Succeeded specifies the succeeded state for provisioning states. + Succeeded ProvisioningStates = "Succeeded" +) + +// PushModes enumerates the values for push modes. +type PushModes string + +const ( + // Manual specifies the manual state for push modes. + Manual PushModes = "manual" + // OneShot specifies the one shot state for push modes. + OneShot PushModes = "one-shot" + // RealTime specifies the real time state for push modes. + RealTime PushModes = "real-time" +) + +// AnnouncementFeedbackCriterion is used to target devices who received an +// announcement. +type AnnouncementFeedbackCriterion struct { + ContentID *int32 `json:"content-id,omitempty"` + Action CampaignFeedbacks `json:"action,omitempty"` +} + +// APIError is +type APIError struct { + Error *APIErrorError `json:"error,omitempty"` +} + +// APIErrorError is +type APIErrorError struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` +} + +// App is the Mobile Engagement App resource. +type App struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AppProperties `json:"properties,omitempty"` +} + +// AppCollection is the AppCollection resource. +type AppCollection struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AppCollectionProperties `json:"properties,omitempty"` +} + +// AppCollectionListResult is the list AppCollections operation response. +type AppCollectionListResult struct { + autorest.Response `json:"-"` + Value *[]AppCollection `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// AppCollectionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client AppCollectionListResult) AppCollectionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// AppCollectionNameAvailability is +type AppCollectionNameAvailability struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + Available *bool `json:"available,omitempty"` + UnavailabilityReason *string `json:"unavailabilityReason,omitempty"` +} + +// AppCollectionProperties is +type AppCollectionProperties struct { + ProvisioningState ProvisioningStates `json:"provisioningState,omitempty"` +} + +// AppInfoFilter is send only to users who have some app info set. This is a +// special filter that is automatically added if your campaign contains +// appInfo parameters. It is not intended to be public and should not be used +// as it could be removed or replaced by the API. +type AppInfoFilter struct { + AppInfo *[]string `json:"appInfo,omitempty"` +} + +// ApplicationVersionCriterion is used to target devices based on the version +// of the application they are using. +type ApplicationVersionCriterion struct { + Name *string `json:"name,omitempty"` +} + +// AppListResult is the list Apps operation response. +type AppListResult struct { + autorest.Response `json:"-"` + Value *[]App `json:"value,omitempty"` + NextLink *string `json:",omitempty"` +} + +// AppListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client AppListResult) AppListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// AppProperties is +type AppProperties struct { + BackendID *string `json:"backendId,omitempty"` + Platform *string `json:"platform,omitempty"` + AppState *string `json:"appState,omitempty"` +} + +// BooleanTagCriterion is target devices based on a boolean tag value. +type BooleanTagCriterion struct { + Name *string `json:"name,omitempty"` + Value *bool `json:"value,omitempty"` +} + +// Campaign is +type Campaign struct { + NotificationTitle *string `json:"notificationTitle,omitempty"` + NotificationMessage *string `json:"notificationMessage,omitempty"` + NotificationImage *[]byte `json:"notificationImage,omitempty"` + NotificationOptions *NotificationOptions `json:"notificationOptions,omitempty"` + Title *string `json:"title,omitempty"` + Body *string `json:"body,omitempty"` + ActionButtonText *string `json:"actionButtonText,omitempty"` + ExitButtonText *string `json:"exitButtonText,omitempty"` + ActionURL *string `json:"actionUrl,omitempty"` + Payload *map[string]interface{} `json:"payload,omitempty"` + Name *string `json:"name,omitempty"` + Audience *CampaignAudience `json:"audience,omitempty"` + Category *string `json:"category,omitempty"` + PushMode PushModes `json:"pushMode,omitempty"` + Type CampaignTypes `json:"type,omitempty"` + DeliveryTime DeliveryTimes `json:"deliveryTime,omitempty"` + DeliveryActivities *[]string `json:"deliveryActivities,omitempty"` + StartTime *string `json:"startTime,omitempty"` + EndTime *string `json:"endTime,omitempty"` + Timezone *string `json:"timezone,omitempty"` + NotificationType NotificationTypes `json:"notificationType,omitempty"` + NotificationIcon *bool `json:"notificationIcon,omitempty"` + NotificationCloseable *bool `json:"notificationCloseable,omitempty"` + NotificationVibrate *bool `json:"notificationVibrate,omitempty"` + NotificationSound *bool `json:"notificationSound,omitempty"` + NotificationBadge *bool `json:"notificationBadge,omitempty"` + Localization *map[string]*CampaignLocalization `json:"localization,omitempty"` + Questions *[]PollQuestion `json:"questions,omitempty"` +} + +// CampaignAudience is specify which users will be targeted by this campaign. +// By default, all users will be targeted. If you set `pushMode` property to +// `manual`, the only thing you can specify in the audience is the push quota +// filter. An audience is a boolean expression made of criteria (variables) +// operators (`not`, `and` or `or`) and parenthesis. Additionally, a set of +// filters can be added to an audience. 65535 bytes max as per JSON encoding. +type CampaignAudience struct { + Expression *string `json:"expression,omitempty"` + Criteria *map[string]*Criterion `json:"criteria,omitempty"` + Filters *[]Filter `json:"filters,omitempty"` +} + +// CampaignListResult is +type CampaignListResult struct { + State CampaignStates `json:"state,omitempty"` + ID *int32 `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ActivatedDate *date.Time `json:"activatedDate,omitempty"` + FinishedDate *date.Time `json:"finishedDate,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Timezone *string `json:"timezone,omitempty"` +} + +// CampaignLocalization is +type CampaignLocalization struct { + NotificationTitle *string `json:"notificationTitle,omitempty"` + NotificationMessage *string `json:"notificationMessage,omitempty"` + NotificationImage *[]byte `json:"notificationImage,omitempty"` + NotificationOptions *NotificationOptions `json:"notificationOptions,omitempty"` + Title *string `json:"title,omitempty"` + Body *string `json:"body,omitempty"` + ActionButtonText *string `json:"actionButtonText,omitempty"` + ExitButtonText *string `json:"exitButtonText,omitempty"` + ActionURL *string `json:"actionUrl,omitempty"` + Payload *map[string]interface{} `json:"payload,omitempty"` +} + +// CampaignPushParameters is +type CampaignPushParameters struct { + DeviceIds *[]string `json:"deviceIds,omitempty"` + Data *Campaign `json:"data,omitempty"` +} + +// CampaignPushResult is +type CampaignPushResult struct { + autorest.Response `json:"-"` + InvalidDeviceIds *[]string `json:"invalidDeviceIds,omitempty"` +} + +// CampaignResult is +type CampaignResult struct { + autorest.Response `json:"-"` + NotificationTitle *string `json:"notificationTitle,omitempty"` + NotificationMessage *string `json:"notificationMessage,omitempty"` + NotificationImage *[]byte `json:"notificationImage,omitempty"` + NotificationOptions *NotificationOptions `json:"notificationOptions,omitempty"` + Title *string `json:"title,omitempty"` + Body *string `json:"body,omitempty"` + ActionButtonText *string `json:"actionButtonText,omitempty"` + ExitButtonText *string `json:"exitButtonText,omitempty"` + ActionURL *string `json:"actionUrl,omitempty"` + Payload *map[string]interface{} `json:"payload,omitempty"` + Name *string `json:"name,omitempty"` + Audience *CampaignAudience `json:"audience,omitempty"` + Category *string `json:"category,omitempty"` + PushMode PushModes `json:"pushMode,omitempty"` + Type CampaignTypes `json:"type,omitempty"` + DeliveryTime DeliveryTimes `json:"deliveryTime,omitempty"` + DeliveryActivities *[]string `json:"deliveryActivities,omitempty"` + StartTime *string `json:"startTime,omitempty"` + EndTime *string `json:"endTime,omitempty"` + Timezone *string `json:"timezone,omitempty"` + NotificationType NotificationTypes `json:"notificationType,omitempty"` + NotificationIcon *bool `json:"notificationIcon,omitempty"` + NotificationCloseable *bool `json:"notificationCloseable,omitempty"` + NotificationVibrate *bool `json:"notificationVibrate,omitempty"` + NotificationSound *bool `json:"notificationSound,omitempty"` + NotificationBadge *bool `json:"notificationBadge,omitempty"` + Localization *map[string]*CampaignLocalization `json:"localization,omitempty"` + Questions *[]PollQuestion `json:"questions,omitempty"` + ID *int32 `json:"id,omitempty"` + State CampaignStates `json:"state,omitempty"` + ActivatedDate *date.Time `json:"activatedDate,omitempty"` + FinishedDate *date.Time `json:"finishedDate,omitempty"` +} + +// CampaignsListResult is the campaigns list result. +type CampaignsListResult struct { + autorest.Response `json:"-"` + Value *[]CampaignListResult `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// CampaignsListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client CampaignsListResult) CampaignsListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// CampaignState is +type CampaignState struct { + autorest.Response `json:"-"` + State CampaignStates `json:"state,omitempty"` +} + +// CampaignStateResult is +type CampaignStateResult struct { + autorest.Response `json:"-"` + State CampaignStates `json:"state,omitempty"` + ID *int32 `json:"id,omitempty"` +} + +// CampaignStatisticsResult is +type CampaignStatisticsResult struct { + autorest.Response `json:"-"` + Queued *int32 `json:"queued,omitempty"` + Pushed *int32 `json:"pushed,omitempty"` + PushedNative *int32 `json:"pushed-native,omitempty"` + PushedNativeGoogle *int32 `json:"pushed-native-google,omitempty"` + PushedNativeAdm *int32 `json:"pushed-native-adm,omitempty"` + Delivered *int32 `json:"delivered,omitempty"` + Dropped *int32 `json:"dropped,omitempty"` + SystemNotificationDisplayed *int32 `json:"system-notification-displayed,omitempty"` + InAppNotificationDisplayed *int32 `json:"in-app-notification-displayed,omitempty"` + ContentDisplayed *int32 `json:"content-displayed,omitempty"` + SystemNotificationActioned *int32 `json:"system-notification-actioned,omitempty"` + SystemNotificationExited *int32 `json:"system-notification-exited,omitempty"` + InAppNotificationActioned *int32 `json:"in-app-notification-actioned,omitempty"` + InAppNotificationExited *int32 `json:"in-app-notification-exited,omitempty"` + ContentActioned *int32 `json:"content-actioned,omitempty"` + ContentExited *int32 `json:"content-exited,omitempty"` + Answers *map[string]map[string]interface{} `json:"answers,omitempty"` +} + +// CampaignTestNewParameters is +type CampaignTestNewParameters struct { + DeviceID *string `json:"deviceId,omitempty"` + Lang *string `json:"lang,omitempty"` + Data *Campaign `json:"data,omitempty"` +} + +// CampaignTestSavedParameters is +type CampaignTestSavedParameters struct { + DeviceID *string `json:"deviceId,omitempty"` + Lang *string `json:"lang,omitempty"` +} + +// CarrierCountryCriterion is used to target devices based on their carrier +// country. +type CarrierCountryCriterion struct { + Name *string `json:"name,omitempty"` +} + +// CarrierNameCriterion is used to target devices based on their carrier name. +type CarrierNameCriterion struct { + Name *string `json:"name,omitempty"` +} + +// Criterion is +type Criterion struct { +} + +// DatapushFeedbackCriterion is used to target devices who received a data +// push. +type DatapushFeedbackCriterion struct { + ContentID *int32 `json:"content-id,omitempty"` + Action CampaignFeedbacks `json:"action,omitempty"` +} + +// DateRangeExportTaskParameter is +type DateRangeExportTaskParameter struct { + ContainerURL *string `json:"containerUrl,omitempty"` + Description *string `json:"description,omitempty"` + StartDate *date.Date `json:"startDate,omitempty"` + EndDate *date.Date `json:"endDate,omitempty"` + ExportFormat ExportFormat `json:"exportFormat,omitempty"` +} + +// DateTagCriterion is target devices based on a date tag value. +type DateTagCriterion struct { + Name *string `json:"name,omitempty"` + Value *date.Date `json:"value,omitempty"` + Op AudienceOperators `json:"op,omitempty"` +} + +// Device is +type Device struct { + autorest.Response `json:"-"` + DeviceID *string `json:"deviceId,omitempty"` + Meta *DeviceMeta `json:"meta,omitempty"` + Info *DeviceInfo `json:"info,omitempty"` + Location *DeviceLocation `json:"location,omitempty"` + AppInfo *map[string]*string `json:"appInfo,omitempty"` +} + +// DeviceInfo is +type DeviceInfo struct { + PhoneModel *string `json:"phoneModel,omitempty"` + PhoneManufacturer *string `json:"phoneManufacturer,omitempty"` + FirmwareVersion *string `json:"firmwareVersion,omitempty"` + FirmwareName *string `json:"firmwareName,omitempty"` + AndroidAPILevel *int32 `json:"androidAPILevel,omitempty"` + CarrierCountry *string `json:"carrierCountry,omitempty"` + Locale *string `json:"locale,omitempty"` + CarrierName *string `json:"carrierName,omitempty"` + NetworkType *string `json:"networkType,omitempty"` + NetworkSubtype *string `json:"networkSubtype,omitempty"` + ApplicationVersionName *string `json:"applicationVersionName,omitempty"` + ApplicationVersionCode *int32 `json:"applicationVersionCode,omitempty"` + TimeZoneOffset *int32 `json:"timeZoneOffset,omitempty"` + ServiceVersion *string `json:"serviceVersion,omitempty"` +} + +// DeviceLocation is +type DeviceLocation struct { + Countrycode *string `json:"countrycode,omitempty"` + Region *string `json:"region,omitempty"` + Locality *string `json:"locality,omitempty"` +} + +// DeviceManufacturerCriterion is used to target devices based on the device +// manufacturer. +type DeviceManufacturerCriterion struct { + Name *string `json:"name,omitempty"` +} + +// DeviceMeta is +type DeviceMeta struct { + FirstSeen *int64 `json:"firstSeen,omitempty"` + LastSeen *int64 `json:"lastSeen,omitempty"` + LastInfo *int64 `json:"lastInfo,omitempty"` + LastLocation *int64 `json:"lastLocation,omitempty"` + NativePushEnabled *bool `json:"nativePushEnabled,omitempty"` +} + +// DeviceModelCriterion is used to target devices based on the device model. +type DeviceModelCriterion struct { + Name *string `json:"name,omitempty"` +} + +// DeviceQueryResult is +type DeviceQueryResult struct { + DeviceID *string `json:"deviceId,omitempty"` + Meta *DeviceMeta `json:"meta,omitempty"` + AppInfo *map[string]*string `json:"appInfo,omitempty"` +} + +// DevicesQueryResult is the campaigns list result. +type DevicesQueryResult struct { + autorest.Response `json:"-"` + Value *[]DeviceQueryResult `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// DevicesQueryResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client DevicesQueryResult) DevicesQueryResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// DeviceTagsParameters is +type DeviceTagsParameters struct { + Tags *map[string]map[string]string `json:"tags,omitempty"` + DeleteOnNull *bool `json:"deleteOnNull,omitempty"` +} + +// DeviceTagsResult is +type DeviceTagsResult struct { + autorest.Response `json:"-"` + InvalidIds *[]string `json:"invalidIds,omitempty"` +} + +// EngageActiveUsersFilter is send only to users who have used the app in the +// last {threshold} days. +type EngageActiveUsersFilter struct { + Threshold *int32 `json:"threshold,omitempty"` +} + +// EngageIdleUsersFilter is send only to users who haven't used the app in the +// last {threshold} days. +type EngageIdleUsersFilter struct { + Threshold *int32 `json:"threshold,omitempty"` +} + +// EngageNewUsersFilter is send only to users whose first app use is less than +// {threshold} days old. +type EngageNewUsersFilter struct { + Threshold *int32 `json:"threshold,omitempty"` +} + +// EngageOldUsersFilter is send only to users whose first app use is more than +// {threshold} days old. +type EngageOldUsersFilter struct { + Threshold *int32 `json:"threshold,omitempty"` +} + +// EngageSubsetFilter is send only to a maximum of max users. +type EngageSubsetFilter struct { + Max *int32 `json:"max,omitempty"` +} + +// ExportOptions is options to control export generation. +type ExportOptions struct { + ExportUserID *bool `json:"exportUserId,omitempty"` +} + +// ExportTaskListResult is gets a paged list of ExportTasks. +type ExportTaskListResult struct { + autorest.Response `json:"-"` + Value *[]ExportTaskResult `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExportTaskListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ExportTaskListResult) ExportTaskListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ExportTaskParameter is +type ExportTaskParameter struct { + ContainerURL *string `json:"containerUrl,omitempty"` + Description *string `json:"description,omitempty"` + ExportFormat ExportFormat `json:"exportFormat,omitempty"` +} + +// ExportTaskResult is +type ExportTaskResult struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Description *string `json:"description,omitempty"` + State ExportState `json:"state,omitempty"` + DateCreated *date.Time `json:"dateCreated,omitempty"` + DateCompleted *date.Time `json:"dateCompleted,omitempty"` + ExportType ExportType `json:"exportType,omitempty"` + ErrorDetails *string `json:"errorDetails,omitempty"` +} + +// FeedbackByCampaignParameter is +type FeedbackByCampaignParameter struct { + ContainerURL *string `json:"containerUrl,omitempty"` + Description *string `json:"description,omitempty"` + CampaignType CampaignType `json:"campaignType,omitempty"` + CampaignIds *[]int32 `json:"campaignIds,omitempty"` + ExportFormat ExportFormat `json:"exportFormat,omitempty"` +} + +// FeedbackByDateRangeParameter is +type FeedbackByDateRangeParameter struct { + ContainerURL *string `json:"containerUrl,omitempty"` + Description *string `json:"description,omitempty"` + CampaignType CampaignType `json:"campaignType,omitempty"` + CampaignWindowStart *date.Time `json:"campaignWindowStart,omitempty"` + CampaignWindowEnd *date.Time `json:"campaignWindowEnd,omitempty"` + ExportFormat ExportFormat `json:"exportFormat,omitempty"` +} + +// Filter is +type Filter struct { +} + +// FirmwareVersionCriterion is used to target devices based on their firmware +// version. +type FirmwareVersionCriterion struct { + Name *string `json:"name,omitempty"` +} + +// GeoFencingCriterion is used to target devices based on a specific region. A +// center point (defined by a latitude and longitude) and a radius form the +// boundary for the region. This criterion will be met when the user crosses +// the boundaries of the region. +type GeoFencingCriterion struct { + Lat *float64 `json:"lat,omitempty"` + Lon *float64 `json:"lon,omitempty"` + Radius *int32 `json:"radius,omitempty"` + Expiration *int32 `json:"expiration,omitempty"` +} + +// ImportTask is +type ImportTask struct { + StorageURL *string `json:"storageUrl,omitempty"` +} + +// ImportTaskListResult is gets a paged list of import tasks. +type ImportTaskListResult struct { + autorest.Response `json:"-"` + Value *[]ImportTaskResult `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ImportTaskListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ImportTaskListResult) ImportTaskListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ImportTaskResult is +type ImportTaskResult struct { + autorest.Response `json:"-"` + StorageURL *string `json:"storageUrl,omitempty"` + ID *string `json:"id,omitempty"` + State JobStates `json:"state,omitempty"` + DateCreated *date.Time `json:"dateCreated,omitempty"` + DateCompleted *date.Time `json:"dateCompleted,omitempty"` + ErrorDetails *string `json:"errorDetails,omitempty"` +} + +// IntegerTagCriterion is target devices based on an integer tag value. +type IntegerTagCriterion struct { + Name *string `json:"name,omitempty"` + Value *int32 `json:"value,omitempty"` + Op AudienceOperators `json:"op,omitempty"` +} + +// LanguageCriterion is used to target devices based on the language of their +// device. +type LanguageCriterion struct { + Name *string `json:"name,omitempty"` +} + +// LocationCriterion is used to target devices based on their last know area. +type LocationCriterion struct { + Country *string `json:"country,omitempty"` + Region *string `json:"region,omitempty"` + Locality *string `json:"locality,omitempty"` +} + +// NativePushEnabledFilter is engage only users with native push enabled. +type NativePushEnabledFilter struct { +} + +// NetworkTypeCriterion is used to target devices based their network type. +type NetworkTypeCriterion struct { + Name *string `json:"name,omitempty"` +} + +// NotificationOptions is +type NotificationOptions struct { + BigText *string `json:"bigText,omitempty"` + BigPicture *string `json:"bigPicture,omitempty"` + Sound *string `json:"sound,omitempty"` + ActionText *string `json:"actionText,omitempty"` +} + +// PollAnswerFeedbackCriterion is used to target devices who answered X to a +// given question. +type PollAnswerFeedbackCriterion struct { + ContentID *int32 `json:"content-id,omitempty"` + ChoiceID *int32 `json:"choice-id,omitempty"` +} + +// PollFeedbackCriterion is used to target devices who received a poll. +type PollFeedbackCriterion struct { + ContentID *int32 `json:"content-id,omitempty"` + Action CampaignFeedbacks `json:"action,omitempty"` +} + +// PollQuestion is +type PollQuestion struct { + Title *string `json:"title,omitempty"` + ID *int32 `json:"id,omitempty"` + Localization *map[string]*PollQuestionLocalization `json:"localization,omitempty"` + Choices *[]PollQuestionChoice `json:"choices,omitempty"` +} + +// PollQuestionChoice is +type PollQuestionChoice struct { + Title *string `json:"title,omitempty"` + ID *int32 `json:"id,omitempty"` + Localization *map[string]*PollQuestionChoiceLocalization `json:"localization,omitempty"` + IsDefault *bool `json:"isDefault,omitempty"` +} + +// PollQuestionChoiceLocalization is +type PollQuestionChoiceLocalization struct { + Title *string `json:"title,omitempty"` +} + +// PollQuestionLocalization is +type PollQuestionLocalization struct { + Title *string `json:"title,omitempty"` +} + +// PushQuotaFilter is engage only users for whom the push quota is not reached. +type PushQuotaFilter struct { +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ScreenSizeCriterion is used to target devices based on the screen +// resolution of their device. +type ScreenSizeCriterion struct { + Name *string `json:"name,omitempty"` +} + +// SegmentCriterion is target devices based on an existing segment. +type SegmentCriterion struct { + ID *int32 `json:"id,omitempty"` + Exclude *bool `json:"exclude,omitempty"` +} + +// StringTagCriterion is target devices based on a string tag value. +type StringTagCriterion struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// SupportedPlatformsListResult is +type SupportedPlatformsListResult struct { + autorest.Response `json:"-"` + Platforms *[]string `json:"platforms,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/supportedplatforms.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/supportedplatforms.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/supportedplatforms.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/supportedplatforms.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,101 @@ +package mobileengagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// SupportedPlatformsClient is the microsoft Azure Mobile Engagement REST +// APIs. +type SupportedPlatformsClient struct { + ManagementClient +} + +// NewSupportedPlatformsClient creates an instance of the +// SupportedPlatformsClient client. +func NewSupportedPlatformsClient(subscriptionID string, resourceGroupName string, appCollection string, appName string) SupportedPlatformsClient { + return NewSupportedPlatformsClientWithBaseURI(DefaultBaseURI, subscriptionID, resourceGroupName, appCollection, appName) +} + +// NewSupportedPlatformsClientWithBaseURI creates an instance of the +// SupportedPlatformsClient client. +func NewSupportedPlatformsClientWithBaseURI(baseURI string, subscriptionID string, resourceGroupName string, appCollection string, appName string) SupportedPlatformsClient { + return SupportedPlatformsClient{NewWithBaseURI(baseURI, subscriptionID, resourceGroupName, appCollection, appName)} +} + +// List lists supported platforms for Engagement applications. +func (client SupportedPlatformsClient) List() (result SupportedPlatformsListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "mobileengagement.SupportedPlatformsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "mobileengagement.SupportedPlatformsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "mobileengagement.SupportedPlatformsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SupportedPlatformsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.MobileEngagement/supportedPlatforms", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SupportedPlatformsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SupportedPlatformsClient) ListResponder(resp *http.Response) (result SupportedPlatformsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/mobileengagement/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package mobileengagement + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "mobileengagement", "2014-12-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,20 +14,20 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// ApplicationGatewaysClient is the the Windows Azure Network management API -// provides a RESTful set of web services that interact with Windows Azure +// ApplicationGatewaysClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure // Networks service to manage your network resrources. The API has entities -// that capture the relationship between an end user and the Windows Azure +// that capture the relationship between an end user and the Microsoft Azure // Networks service. type ApplicationGatewaysClient struct { ManagementClient @@ -46,122 +46,128 @@ } // CreateOrUpdate the Put ApplicationGateway operation creates/updates a -// ApplicationGateway +// ApplicationGateway This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. applicationGatewayName // is the name of the ApplicationGateway. parameters is parameters supplied // to the create/delete ApplicationGateway operation -func (client ApplicationGatewaysClient) CreateOrUpdate(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway) (result ApplicationGateway, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, applicationGatewayName, parameters) +func (client ApplicationGatewaysClient) CreateOrUpdate(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, applicationGatewayName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ApplicationGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway) (*http.Request, error) { +func (client ApplicationGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "applicationGatewayName": url.QueryEscape(applicationGatewayName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client ApplicationGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result ApplicationGateway, err error) { +func (client ApplicationGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the delete applicationgateway operation deletes the specified -// applicationgateway. +// applicationgateway. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. applicationGatewayName // is the name of the applicationgateway. -func (client ApplicationGatewaysClient) Delete(resourceGroupName string, applicationGatewayName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, applicationGatewayName) +func (client ApplicationGatewaysClient) Delete(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, applicationGatewayName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client ApplicationGatewaysClient) DeletePreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { +func (client ApplicationGatewaysClient) DeletePreparer(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "applicationGatewayName": url.QueryEscape(applicationGatewayName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -170,7 +176,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK), autorest.ByClosing()) result.Response = resp return @@ -181,21 +187,21 @@ // // resourceGroupName is the name of the resource group. applicationGatewayName // is the name of the applicationgateway. -func (client ApplicationGatewaysClient) Get(resourceGroupName string, applicationGatewayName string) (result ApplicationGateway, ae error) { +func (client ApplicationGatewaysClient) Get(resourceGroupName string, applicationGatewayName string) (result ApplicationGateway, err error) { req, err := client.GetPreparer(resourceGroupName, applicationGatewayName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", resp, "Failure responding to request") } return @@ -204,28 +210,27 @@ // GetPreparer prepares the Get request. func (client ApplicationGatewaysClient) GetPreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "applicationGatewayName": url.QueryEscape(applicationGatewayName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -234,7 +239,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -245,21 +250,21 @@ // applicationgateways in a resource group. // // resourceGroupName is the name of the resource group. -func (client ApplicationGatewaysClient) List(resourceGroupName string) (result ApplicationGatewayListResult, ae error) { +func (client ApplicationGatewaysClient) List(resourceGroupName string) (result ApplicationGatewayListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure responding to request") } return @@ -268,27 +273,26 @@ // ListPreparer prepares the List request. func (client ApplicationGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -297,7 +301,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -305,10 +309,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client ApplicationGatewaysClient) ListNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, ae error) { +func (client ApplicationGatewaysClient) ListNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, err error) { req, err := lastResults.ApplicationGatewayListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -317,12 +321,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure responding to next results request request") } return @@ -330,21 +334,21 @@ // ListAll the List applicationgateway opertion retrieves all the // applicationgateways in a subscription. -func (client ApplicationGatewaysClient) ListAll() (result ApplicationGatewayListResult, ae error) { +func (client ApplicationGatewaysClient) ListAll() (result ApplicationGatewayListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", nil, "Failure preparing request") } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure sending request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure responding to request") } return @@ -353,26 +357,25 @@ // ListAllPreparer prepares the ListAll request. func (client ApplicationGatewaysClient) ListAllPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAllResponder handles the response to the ListAll request. The method always @@ -381,7 +384,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -389,10 +392,10 @@ } // ListAllNextResults retrieves the next set of results, if any. -func (client ApplicationGatewaysClient) ListAllNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, ae error) { +func (client ApplicationGatewaysClient) ListAllNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, err error) { req, err := lastResults.ApplicationGatewayListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", nil, "Failure preparing next results request request") } if req == nil { return @@ -401,67 +404,71 @@ resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure sending next results request request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure responding to next results request request") } return } // Start the Start ApplicationGateway operation starts application gatewayin -// the specified resource group through Network resource provider. +// the specified resource group through Network resource provider. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. applicationGatewayName // is the name of the application gateway. -func (client ApplicationGatewaysClient) Start(resourceGroupName string, applicationGatewayName string) (result autorest.Response, ae error) { - req, err := client.StartPreparer(resourceGroupName, applicationGatewayName) +func (client ApplicationGatewaysClient) Start(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StartPreparer(resourceGroupName, applicationGatewayName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Start", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", nil, "Failure preparing request") } resp, err := client.StartSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Start", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", resp, "Failure sending request") } result, err = client.StartResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Start", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", resp, "Failure responding to request") } return } // StartPreparer prepares the Start request. -func (client ApplicationGatewaysClient) StartPreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { +func (client ApplicationGatewaysClient) StartPreparer(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "applicationGatewayName": url.QueryEscape(applicationGatewayName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // StartSender sends the Start request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) StartSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // StartResponder handles the response to the Start request. The method always @@ -470,62 +477,66 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } // Stop the STOP ApplicationGateway operation stops application gatewayin the -// specified resource group through Network resource provider. +// specified resource group through Network resource provider. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group. applicationGatewayName // is the name of the application gateway. -func (client ApplicationGatewaysClient) Stop(resourceGroupName string, applicationGatewayName string) (result autorest.Response, ae error) { - req, err := client.StopPreparer(resourceGroupName, applicationGatewayName) +func (client ApplicationGatewaysClient) Stop(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StopPreparer(resourceGroupName, applicationGatewayName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Stop", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", nil, "Failure preparing request") } resp, err := client.StopSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Stop", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", resp, "Failure sending request") } result, err = client.StopResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Stop", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", resp, "Failure responding to request") } return } // StopPreparer prepares the Stop request. -func (client ApplicationGatewaysClient) StopPreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { +func (client ApplicationGatewaysClient) StopPreparer(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "applicationGatewayName": url.QueryEscape(applicationGatewayName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // StopSender sends the Stop request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) StopSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // StopResponder handles the response to the Stop request. The method always @@ -534,7 +545,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -1,3 +1,10 @@ +// Package network implements the Azure ARM Network service API version +// 2016-03-30. +// +// The Microsoft Azure Network management API provides a RESTful set of web +// services that interact with Microsoft Azure Networks service to manage +// your network resrources. The API has entities that capture the +// relationship between an end user and the Microsoft Azure Networks service. package network // Copyright (c) Microsoft and contributors. All rights reserved. @@ -14,32 +21,29 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) const ( // APIVersion is the version of the Network - APIVersion = "2015-05-01-preview" + APIVersion = "2016-03-30" // DefaultBaseURI is the default URI used for the service Network DefaultBaseURI = "https://management.azure.com" ) -// ManagementClient is the the Windows Azure Network management API provides a -// RESTful set of web services that interact with Windows Azure Networks -// service to manage your network resrources. The API has entities that -// capture the relationship between an end user and the Windows Azure -// Networks service. +// ManagementClient is the base client for Network. type ManagementClient struct { autorest.Client BaseURI string + APIVersion string SubscriptionID string } @@ -53,6 +57,7 @@ return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, + APIVersion: APIVersion, SubscriptionID: subscriptionID, } } @@ -63,21 +68,21 @@ // location is the location of the domain name domainNameLabel is the domain // name to be verified. It must conform to the following regular expression: // ^[a-z][a-z0-9-]{1,61}[a-z0-9]$. -func (client ManagementClient) CheckDNSNameAvailability(location string, domainNameLabel string) (result DNSNameAvailabilityResult, ae error) { +func (client ManagementClient) CheckDNSNameAvailability(location string, domainNameLabel string) (result DNSNameAvailabilityResult, err error) { req, err := client.CheckDNSNameAvailabilityPreparer(location, domainNameLabel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ManagementClient", "CheckDNSNameAvailability", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", nil, "Failure preparing request") } resp, err := client.CheckDNSNameAvailabilitySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ManagementClient", "CheckDNSNameAvailability", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", resp, "Failure sending request") } result, err = client.CheckDNSNameAvailabilityResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ManagementClient", "CheckDNSNameAvailability", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", resp, "Failure responding to request") } return @@ -86,30 +91,29 @@ // CheckDNSNameAvailabilityPreparer prepares the CheckDNSNameAvailability request. func (client ManagementClient) CheckDNSNameAvailabilityPreparer(location string, domainNameLabel string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(domainNameLabel) > 0 { - queryParameters["domainNameLabel"] = domainNameLabel + queryParameters["domainNameLabel"] = autorest.Encode("query", domainNameLabel) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CheckDNSNameAvailabilitySender sends the CheckDNSNameAvailability request. The method will close the // http.Response Body if it receives an error. func (client ManagementClient) CheckDNSNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CheckDNSNameAvailabilityResponder handles the response to the CheckDNSNameAvailability request. The method always @@ -118,7 +122,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,21 +14,21 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// ExpressRouteCircuitAuthorizationsClient is the the Windows Azure Network +// ExpressRouteCircuitAuthorizationsClient is the the Microsoft Azure Network // management API provides a RESTful set of web services that interact with -// Windows Azure Networks service to manage your network resrources. The API -// has entities that capture the relationship between an end user and the -// Windows Azure Networks service. +// Microsoft Azure Networks service to manage your network resrources. The +// API has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. type ExpressRouteCircuitAuthorizationsClient struct { ManagementClient } @@ -46,126 +46,134 @@ } // CreateOrUpdate the Put Authorization operation creates/updates an -// authorization in thespecified ExpressRouteCircuits +// authorization in thespecified ExpressRouteCircuits This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. circuitName is the // name of the express route circuit. authorizationName is the name of the // authorization. authorizationParameters is parameters supplied to the // create/update ExpressRouteCircuitAuthorization operation -func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization) (result ExpressRouteCircuitAuthorization, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, authorizationName, authorizationParameters) +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, authorizationName, authorizationParameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization) (*http.Request, error) { +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "authorizationName": url.QueryEscape(authorizationName), - "circuitName": url.QueryEscape(circuitName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "authorizationName": autorest.Encode("path", authorizationName), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters), autorest.WithJSON(authorizationParameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuitAuthorization, err error) { +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the delete authorization operation deletes the specified -// authorization from the specified ExpressRouteCircuit. +// authorization from the specified ExpressRouteCircuit. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. circuitName is the // name of the express route circuit. authorizationName is the name of the // authorization. -func (client ExpressRouteCircuitAuthorizationsClient) Delete(resourceGroupName string, circuitName string, authorizationName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, circuitName, authorizationName) +func (client ExpressRouteCircuitAuthorizationsClient) Delete(resourceGroupName string, circuitName string, authorizationName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, circuitName, authorizationName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client ExpressRouteCircuitAuthorizationsClient) DeletePreparer(resourceGroupName string, circuitName string, authorizationName string) (*http.Request, error) { +func (client ExpressRouteCircuitAuthorizationsClient) DeletePreparer(resourceGroupName string, circuitName string, authorizationName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "authorizationName": url.QueryEscape(authorizationName), - "circuitName": url.QueryEscape(circuitName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "authorizationName": autorest.Encode("path", authorizationName), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitAuthorizationsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -174,7 +182,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -186,21 +194,21 @@ // resourceGroupName is the name of the resource group. circuitName is the // name of the express route circuit. authorizationName is the name of the // authorization. -func (client ExpressRouteCircuitAuthorizationsClient) Get(resourceGroupName string, circuitName string, authorizationName string) (result ExpressRouteCircuitAuthorization, ae error) { +func (client ExpressRouteCircuitAuthorizationsClient) Get(resourceGroupName string, circuitName string, authorizationName string) (result ExpressRouteCircuitAuthorization, err error) { req, err := client.GetPreparer(resourceGroupName, circuitName, authorizationName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", resp, "Failure responding to request") } return @@ -209,29 +217,28 @@ // GetPreparer prepares the Get request. func (client ExpressRouteCircuitAuthorizationsClient) GetPreparer(resourceGroupName string, circuitName string, authorizationName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "authorizationName": url.QueryEscape(authorizationName), - "circuitName": url.QueryEscape(circuitName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "authorizationName": autorest.Encode("path", authorizationName), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitAuthorizationsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -240,7 +247,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -252,21 +259,21 @@ // // resourceGroupName is the name of the resource group. circuitName is the // name of the curcuit. -func (client ExpressRouteCircuitAuthorizationsClient) List(resourceGroupName string, circuitName string) (result AuthorizationListResult, ae error) { +func (client ExpressRouteCircuitAuthorizationsClient) List(resourceGroupName string, circuitName string) (result AuthorizationListResult, err error) { req, err := client.ListPreparer(resourceGroupName, circuitName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure responding to request") } return @@ -275,28 +282,27 @@ // ListPreparer prepares the List request. func (client ExpressRouteCircuitAuthorizationsClient) ListPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "circuitName": url.QueryEscape(circuitName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitAuthorizationsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -305,7 +311,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -313,10 +319,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client ExpressRouteCircuitAuthorizationsClient) ListNextResults(lastResults AuthorizationListResult) (result AuthorizationListResult, ae error) { +func (client ExpressRouteCircuitAuthorizationsClient) ListNextResults(lastResults AuthorizationListResult) (result AuthorizationListResult, err error) { req, err := lastResults.AuthorizationListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -325,12 +331,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitAuthorizationsClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,21 +14,21 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// ExpressRouteCircuitPeeringsClient is the the Windows Azure Network +// ExpressRouteCircuitPeeringsClient is the the Microsoft Azure Network // management API provides a RESTful set of web services that interact with -// Windows Azure Networks service to manage your network resrources. The API -// has entities that capture the relationship between an end user and the -// Windows Azure Networks service. +// Microsoft Azure Networks service to manage your network resrources. The +// API has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. type ExpressRouteCircuitPeeringsClient struct { ManagementClient } @@ -46,125 +46,131 @@ } // CreateOrUpdate the Put Pering operation creates/updates an peering in the -// specified ExpressRouteCircuits +// specified ExpressRouteCircuits This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. circuitName is the // name of the express route circuit. peeringName is the name of the peering. // peeringParameters is parameters supplied to the create/update // ExpressRouteCircuit Peering operation -func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering) (result ExpressRouteCircuitPeering, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, peeringName, peeringParameters) +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, peeringName, peeringParameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering) (*http.Request, error) { +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "circuitName": url.QueryEscape(circuitName), - "peeringName": url.QueryEscape(peeringName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "circuitName": autorest.Encode("path", circuitName), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters), autorest.WithJSON(peeringParameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuitPeering, err error) { +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the delete peering operation deletes the specified peering from the -// ExpressRouteCircuit. +// ExpressRouteCircuit. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. circuitName is the // name of the express route circuit. peeringName is the name of the peering. -func (client ExpressRouteCircuitPeeringsClient) Delete(resourceGroupName string, circuitName string, peeringName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, circuitName, peeringName) +func (client ExpressRouteCircuitPeeringsClient) Delete(resourceGroupName string, circuitName string, peeringName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, circuitName, peeringName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client ExpressRouteCircuitPeeringsClient) DeletePreparer(resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) { +func (client ExpressRouteCircuitPeeringsClient) DeletePreparer(resourceGroupName string, circuitName string, peeringName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "circuitName": url.QueryEscape(circuitName), - "peeringName": url.QueryEscape(peeringName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "circuitName": autorest.Encode("path", circuitName), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitPeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -173,7 +179,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -184,21 +190,21 @@ // // resourceGroupName is the name of the resource group. circuitName is the // name of the express route circuit. peeringName is the name of the peering. -func (client ExpressRouteCircuitPeeringsClient) Get(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitPeering, ae error) { +func (client ExpressRouteCircuitPeeringsClient) Get(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitPeering, err error) { req, err := client.GetPreparer(resourceGroupName, circuitName, peeringName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", resp, "Failure responding to request") } return @@ -207,29 +213,28 @@ // GetPreparer prepares the Get request. func (client ExpressRouteCircuitPeeringsClient) GetPreparer(resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "circuitName": url.QueryEscape(circuitName), - "peeringName": url.QueryEscape(peeringName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "circuitName": autorest.Encode("path", circuitName), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitPeeringsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -238,7 +243,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -250,21 +255,21 @@ // // resourceGroupName is the name of the resource group. circuitName is the // name of the curcuit. -func (client ExpressRouteCircuitPeeringsClient) List(resourceGroupName string, circuitName string) (result ExpressRouteCircuitPeeringListResult, ae error) { +func (client ExpressRouteCircuitPeeringsClient) List(resourceGroupName string, circuitName string) (result ExpressRouteCircuitPeeringListResult, err error) { req, err := client.ListPreparer(resourceGroupName, circuitName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure responding to request") } return @@ -273,28 +278,27 @@ // ListPreparer prepares the List request. func (client ExpressRouteCircuitPeeringsClient) ListPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "circuitName": url.QueryEscape(circuitName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitPeeringsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -303,7 +307,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -311,10 +315,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client ExpressRouteCircuitPeeringsClient) ListNextResults(lastResults ExpressRouteCircuitPeeringListResult) (result ExpressRouteCircuitPeeringListResult, ae error) { +func (client ExpressRouteCircuitPeeringsClient) ListNextResults(lastResults ExpressRouteCircuitPeeringListResult) (result ExpressRouteCircuitPeeringListResult, err error) { req, err := lastResults.ExpressRouteCircuitPeeringListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -323,12 +327,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitPeeringsClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,21 +14,21 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// ExpressRouteCircuitsClient is the the Windows Azure Network management API -// provides a RESTful set of web services that interact with Windows Azure -// Networks service to manage your network resrources. The API has entities -// that capture the relationship between an end user and the Windows Azure -// Networks service. +// ExpressRouteCircuitsClient is the the Microsoft Azure Network management +// API provides a RESTful set of web services that interact with Microsoft +// Azure Networks service to manage your network resrources. The API has +// entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. type ExpressRouteCircuitsClient struct { ManagementClient } @@ -46,122 +46,128 @@ } // CreateOrUpdate the Put ExpressRouteCircuit operation creates/updates a -// ExpressRouteCircuit +// ExpressRouteCircuit This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. circuitName is the // name of the circuit. parameters is parameters supplied to the // create/delete ExpressRouteCircuit operation -func (client ExpressRouteCircuitsClient) CreateOrUpdate(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit) (result ExpressRouteCircuit, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, parameters) +func (client ExpressRouteCircuitsClient) CreateOrUpdate(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ExpressRouteCircuitsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit) (*http.Request, error) { +func (client ExpressRouteCircuitsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "circuitName": url.QueryEscape(circuitName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuit, err error) { +func (client ExpressRouteCircuitsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the delete ExpressRouteCircuit operation deletes the specified -// ExpressRouteCircuit. +// ExpressRouteCircuit. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. circuitName is the // name of the express route Circuit. -func (client ExpressRouteCircuitsClient) Delete(resourceGroupName string, circuitName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, circuitName) +func (client ExpressRouteCircuitsClient) Delete(resourceGroupName string, circuitName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, circuitName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client ExpressRouteCircuitsClient) DeletePreparer(resourceGroupName string, circuitName string) (*http.Request, error) { +func (client ExpressRouteCircuitsClient) DeletePreparer(resourceGroupName string, circuitName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "circuitName": url.QueryEscape(circuitName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -170,7 +176,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), autorest.ByClosing()) result.Response = resp return @@ -181,21 +187,21 @@ // // resourceGroupName is the name of the resource group. circuitName is the // name of the circuit. -func (client ExpressRouteCircuitsClient) Get(resourceGroupName string, circuitName string) (result ExpressRouteCircuit, ae error) { +func (client ExpressRouteCircuitsClient) Get(resourceGroupName string, circuitName string) (result ExpressRouteCircuit, err error) { req, err := client.GetPreparer(resourceGroupName, circuitName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure responding to request") } return @@ -204,28 +210,27 @@ // GetPreparer prepares the Get request. func (client ExpressRouteCircuitsClient) GetPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "circuitName": url.QueryEscape(circuitName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -234,7 +239,136 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetPeeringStats the Liststats ExpressRouteCircuit opertion retrieves all +// the stats from a ExpressRouteCircuits in a resource group. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the circuit. peeringName is the name of the peering. +func (client ExpressRouteCircuitsClient) GetPeeringStats(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitStats, err error) { + req, err := client.GetPeeringStatsPreparer(resourceGroupName, circuitName, peeringName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", nil, "Failure preparing request") + } + + resp, err := client.GetPeeringStatsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", resp, "Failure sending request") + } + + result, err = client.GetPeeringStatsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", resp, "Failure responding to request") + } + + return +} + +// GetPeeringStatsPreparer prepares the GetPeeringStats request. +func (client ExpressRouteCircuitsClient) GetPeeringStatsPreparer(resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetPeeringStatsSender sends the GetPeeringStats request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) GetPeeringStatsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetPeeringStatsResponder handles the response to the GetPeeringStats request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) GetPeeringStatsResponder(resp *http.Response) (result ExpressRouteCircuitStats, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetStats the Liststats ExpressRouteCircuit opertion retrieves all the stats +// from a ExpressRouteCircuits in a resource group. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the circuit. +func (client ExpressRouteCircuitsClient) GetStats(resourceGroupName string, circuitName string) (result ExpressRouteCircuitStats, err error) { + req, err := client.GetStatsPreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", nil, "Failure preparing request") + } + + resp, err := client.GetStatsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", resp, "Failure sending request") + } + + result, err = client.GetStatsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", resp, "Failure responding to request") + } + + return +} + +// GetStatsPreparer prepares the GetStats request. +func (client ExpressRouteCircuitsClient) GetStatsPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetStatsSender sends the GetStats request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) GetStatsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetStatsResponder handles the response to the GetStats request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) GetStatsResponder(resp *http.Response) (result ExpressRouteCircuitStats, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -245,21 +379,21 @@ // ExpressRouteCircuits in a resource group. // // resourceGroupName is the name of the resource group. -func (client ExpressRouteCircuitsClient) List(resourceGroupName string) (result ExpressRouteCircuitListResult, ae error) { +func (client ExpressRouteCircuitsClient) List(resourceGroupName string) (result ExpressRouteCircuitListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure responding to request") } return @@ -268,27 +402,26 @@ // ListPreparer prepares the List request. func (client ExpressRouteCircuitsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -297,7 +430,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -305,10 +438,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client ExpressRouteCircuitsClient) ListNextResults(lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, ae error) { +func (client ExpressRouteCircuitsClient) ListNextResults(lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, err error) { req, err := lastResults.ExpressRouteCircuitListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -317,12 +450,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure responding to next results request request") } return @@ -330,21 +463,21 @@ // ListAll the List ExpressRouteCircuit opertion retrieves all the // ExpressRouteCircuits in a subscription. -func (client ExpressRouteCircuitsClient) ListAll() (result ExpressRouteCircuitListResult, ae error) { +func (client ExpressRouteCircuitsClient) ListAll() (result ExpressRouteCircuitListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", nil, "Failure preparing request") } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure sending request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure responding to request") } return @@ -353,26 +486,25 @@ // ListAllPreparer prepares the ListAll request. func (client ExpressRouteCircuitsClient) ListAllPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAllResponder handles the response to the ListAll request. The method always @@ -381,7 +513,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -389,10 +521,10 @@ } // ListAllNextResults retrieves the next set of results, if any. -func (client ExpressRouteCircuitsClient) ListAllNextResults(lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, ae error) { +func (client ExpressRouteCircuitsClient) ListAllNextResults(lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, err error) { req, err := lastResults.ExpressRouteCircuitListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", nil, "Failure preparing next results request request") } if req == nil { return @@ -401,12 +533,12 @@ resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure sending next results request request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListAll", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure responding to next results request request") } return @@ -414,269 +546,216 @@ // ListArpTable the ListArpTable from ExpressRouteCircuit opertion retrieves // the currently advertised arp table associated with the -// ExpressRouteCircuits in a resource group. +// ExpressRouteCircuits in a resource group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. circuitName is the -// name of the circuit. -func (client ExpressRouteCircuitsClient) ListArpTable(resourceGroupName string, circuitName string) (result ExpressRouteCircuitsArpTableListResult, ae error) { - req, err := client.ListArpTablePreparer(resourceGroupName, circuitName) +// name of the circuit. peeringName is the name of the peering. devicePath is +// the path of the device. +func (client ExpressRouteCircuitsClient) ListArpTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ListArpTablePreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", nil, "Failure preparing request") } resp, err := client.ListArpTableSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure sending request") } result, err = client.ListArpTableResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure responding to request") } return } // ListArpTablePreparer prepares the ListArpTable request. -func (client ExpressRouteCircuitsClient) ListArpTablePreparer(resourceGroupName string, circuitName string) (*http.Request, error) { +func (client ExpressRouteCircuitsClient) ListArpTablePreparer(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "circuitName": url.QueryEscape(circuitName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "circuitName": autorest.Encode("path", circuitName), + "devicePath": autorest.Encode("path", devicePath), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), + preparer := autorest.CreatePreparer( + autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}arpTable"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // ListArpTableSender sends the ListArpTable request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // ListArpTableResponder handles the response to the ListArpTable request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitsClient) ListArpTableResponder(resp *http.Response) (result ExpressRouteCircuitsArpTableListResult, err error) { +func (client ExpressRouteCircuitsClient) ListArpTableResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListArpTableNextResults retrieves the next set of results, if any. -func (client ExpressRouteCircuitsClient) ListArpTableNextResults(lastResults ExpressRouteCircuitsArpTableListResult) (result ExpressRouteCircuitsArpTableListResult, ae error) { - req, err := lastResults.ExpressRouteCircuitsArpTableListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListArpTableSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", "Failure sending next results request request") - } - - result, err = client.ListArpTableResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListArpTable", "Failure responding to next results request request") - } - + result.Response = resp return } // ListRoutesTable the ListRoutesTable from ExpressRouteCircuit opertion // retrieves the currently advertised routes table associated with the -// ExpressRouteCircuits in a resource group. +// ExpressRouteCircuits in a resource group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. circuitName is the -// name of the circuit. -func (client ExpressRouteCircuitsClient) ListRoutesTable(resourceGroupName string, circuitName string) (result ExpressRouteCircuitsRoutesTableListResult, ae error) { - req, err := client.ListRoutesTablePreparer(resourceGroupName, circuitName) +// name of the circuit. peeringName is the name of the peering. devicePath is +// the path of the device. +func (client ExpressRouteCircuitsClient) ListRoutesTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ListRoutesTablePreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", nil, "Failure preparing request") } resp, err := client.ListRoutesTableSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure sending request") } result, err = client.ListRoutesTableResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure responding to request") } return } // ListRoutesTablePreparer prepares the ListRoutesTable request. -func (client ExpressRouteCircuitsClient) ListRoutesTablePreparer(resourceGroupName string, circuitName string) (*http.Request, error) { +func (client ExpressRouteCircuitsClient) ListRoutesTablePreparer(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "circuitName": url.QueryEscape(circuitName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "circuitName": autorest.Encode("path", circuitName), + "devicePath": autorest.Encode("path", devicePath), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), + preparer := autorest.CreatePreparer( + autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}routesTable"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // ListRoutesTableSender sends the ListRoutesTable request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // ListRoutesTableResponder handles the response to the ListRoutesTable request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Response) (result ExpressRouteCircuitsRoutesTableListResult, err error) { +func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListRoutesTableNextResults retrieves the next set of results, if any. -func (client ExpressRouteCircuitsClient) ListRoutesTableNextResults(lastResults ExpressRouteCircuitsRoutesTableListResult) (result ExpressRouteCircuitsRoutesTableListResult, ae error) { - req, err := lastResults.ExpressRouteCircuitsRoutesTableListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListRoutesTableSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", "Failure sending next results request request") - } - - result, err = client.ListRoutesTableResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListRoutesTable", "Failure responding to next results request request") - } - + result.Response = resp return } -// ListStats the Liststats ExpressRouteCircuit opertion retrieves all the -// stats from a ExpressRouteCircuits in a resource group. +// ListRoutesTableSummary the ListRoutesTable from ExpressRouteCircuit +// opertion retrieves the currently advertised routes table associated with +// the ExpressRouteCircuits in a resource group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. circuitName is the -// name of the loadBalancer. -func (client ExpressRouteCircuitsClient) ListStats(resourceGroupName string, circuitName string) (result ExpressRouteCircuitsStatsListResult, ae error) { - req, err := client.ListStatsPreparer(resourceGroupName, circuitName) +// name of the circuit. peeringName is the name of the peering. devicePath is +// the path of the device. +func (client ExpressRouteCircuitsClient) ListRoutesTableSummary(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ListRoutesTableSummaryPreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", nil, "Failure preparing request") } - resp, err := client.ListStatsSender(req) + resp, err := client.ListRoutesTableSummarySender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", resp, "Failure sending request") } - result, err = client.ListStatsResponder(resp) + result, err = client.ListRoutesTableSummaryResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", resp, "Failure responding to request") } return } -// ListStatsPreparer prepares the ListStats request. -func (client ExpressRouteCircuitsClient) ListStatsPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { +// ListRoutesTableSummaryPreparer prepares the ListRoutesTableSummary request. +func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryPreparer(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "circuitName": url.QueryEscape(circuitName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "circuitName": autorest.Encode("path", circuitName), + "devicePath": autorest.Encode("path", devicePath), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), + preparer := autorest.CreatePreparer( + autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}stats"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } -// ListStatsSender sends the ListStats request. The method will close the +// ListRoutesTableSummarySender sends the ListRoutesTableSummary request. The method will close the // http.Response Body if it receives an error. -func (client ExpressRouteCircuitsClient) ListStatsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client ExpressRouteCircuitsClient) ListRoutesTableSummarySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } -// ListStatsResponder handles the response to the ListStats request. The method always +// ListRoutesTableSummaryResponder handles the response to the ListRoutesTableSummary request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitsClient) ListStatsResponder(resp *http.Response) (result ExpressRouteCircuitsStatsListResult, err error) { +func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListStatsNextResults retrieves the next set of results, if any. -func (client ExpressRouteCircuitsClient) ListStatsNextResults(lastResults ExpressRouteCircuitsStatsListResult) (result ExpressRouteCircuitsStatsListResult, ae error) { - req, err := lastResults.ExpressRouteCircuitsStatsListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListStatsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", "Failure sending next results request request") - } - - result, err = client.ListStatsResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteCircuitsClient", "ListStats", "Failure responding to next results request request") - } - + result.Response = resp return } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,21 +14,21 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// ExpressRouteServiceProvidersClient is the the Windows Azure Network +// ExpressRouteServiceProvidersClient is the the Microsoft Azure Network // management API provides a RESTful set of web services that interact with -// Windows Azure Networks service to manage your network resrources. The API -// has entities that capture the relationship between an end user and the -// Windows Azure Networks service. +// Microsoft Azure Networks service to manage your network resrources. The +// API has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. type ExpressRouteServiceProvidersClient struct { ManagementClient } @@ -47,21 +47,21 @@ // List the List ExpressRouteServiceProvider opertion retrieves all the // available ExpressRouteServiceProviders. -func (client ExpressRouteServiceProvidersClient) List() (result ExpressRouteServiceProviderListResult, ae error) { +func (client ExpressRouteServiceProvidersClient) List() (result ExpressRouteServiceProviderListResult, err error) { req, err := client.ListPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure responding to request") } return @@ -70,26 +70,25 @@ // ListPreparer prepares the List request. func (client ExpressRouteServiceProvidersClient) ListPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteServiceProvidersClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -98,7 +97,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -106,10 +105,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client ExpressRouteServiceProvidersClient) ListNextResults(lastResults ExpressRouteServiceProviderListResult) (result ExpressRouteServiceProviderListResult, ae error) { +func (client ExpressRouteServiceProvidersClient) ListNextResults(lastResults ExpressRouteServiceProviderListResult) (result ExpressRouteServiceProviderListResult, err error) { req, err := lastResults.ExpressRouteServiceProviderListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -118,12 +117,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/ExpressRouteServiceProvidersClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,20 +14,20 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// InterfacesClient is the the Windows Azure Network management API provides a -// RESTful set of web services that interact with Windows Azure Networks +// InterfacesClient is the the Microsoft Azure Network management API provides +// a RESTful set of web services that interact with Microsoft Azure Networks // service to manage your network resrources. The API has entities that -// capture the relationship between an end user and the Windows Azure +// capture the relationship between an end user and the Microsoft Azure // Networks service. type InterfacesClient struct { ManagementClient @@ -45,122 +45,128 @@ } // CreateOrUpdate the Put NetworkInterface operation creates/updates a -// networkInterface +// networkInterface This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. networkInterfaceName // is the name of the network interface. parameters is parameters supplied to // the create/update NetworkInterface operation -func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters Interface) (result Interface, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkInterfaceName, parameters) +func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkInterfaceName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, networkInterfaceName string, parameters Interface) (*http.Request, error) { +func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "networkInterfaceName": url.QueryEscape(networkInterfaceName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result Interface, err error) { +func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the delete netwokInterface operation deletes the specified -// netwokInterface. +// netwokInterface. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. networkInterfaceName // is the name of the network interface. -func (client InterfacesClient) Delete(resourceGroupName string, networkInterfaceName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, networkInterfaceName) +func (client InterfacesClient) Delete(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, networkInterfaceName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkInterfaceName string) (*http.Request, error) { +func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "networkInterfaceName": url.QueryEscape(networkInterfaceName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -169,7 +175,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), autorest.ByClosing()) result.Response = resp return @@ -179,52 +185,55 @@ // specified network interface. // // resourceGroupName is the name of the resource group. networkInterfaceName -// is the name of the network interface. -func (client InterfacesClient) Get(resourceGroupName string, networkInterfaceName string) (result Interface, ae error) { - req, err := client.GetPreparer(resourceGroupName, networkInterfaceName) +// is the name of the network interface. expand is expand references +// resources. +func (client InterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result Interface, err error) { + req, err := client.GetPreparer(resourceGroupName, networkInterfaceName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. -func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInterfaceName string) (*http.Request, error) { +func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInterfaceName string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "networkInterfaceName": url.QueryEscape(networkInterfaceName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -233,7 +242,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -247,54 +256,56 @@ // resourceGroupName is the name of the resource group. // virtualMachineScaleSetName is the name of the virtual machine scale set. // virtualmachineIndex is the virtual machine index. networkInterfaceName is -// the name of the network interface. -func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string) (result Interface, ae error) { - req, err := client.GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) +// the name of the network interface. expand is expand references resources. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result Interface, err error) { + req, err := client.GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", nil, "Failure preparing request") } resp, err := client.GetVirtualMachineScaleSetNetworkInterfaceSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure sending request") } result, err = client.GetVirtualMachineScaleSetNetworkInterfaceResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure responding to request") } return } // GetVirtualMachineScaleSetNetworkInterfacePreparer prepares the GetVirtualMachineScaleSetNetworkInterface request. -func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string) (*http.Request, error) { +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "networkInterfaceName": url.QueryEscape(networkInterfaceName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualmachineIndex": url.QueryEscape(virtualmachineIndex), - "virtualMachineScaleSetName": url.QueryEscape(virtualMachineScaleSetName), + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualmachineIndex": autorest.Encode("path", virtualmachineIndex), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetVirtualMachineScaleSetNetworkInterfaceSender sends the GetVirtualMachineScaleSetNetworkInterface request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetVirtualMachineScaleSetNetworkInterfaceResponder handles the response to the GetVirtualMachineScaleSetNetworkInterface request. The method always @@ -303,7 +314,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -314,21 +325,21 @@ // networkInterfaces in a resource group. // // resourceGroupName is the name of the resource group. -func (client InterfacesClient) List(resourceGroupName string) (result InterfaceListResult, ae error) { +func (client InterfacesClient) List(resourceGroupName string) (result InterfaceListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to request") } return @@ -337,27 +348,26 @@ // ListPreparer prepares the List request. func (client InterfacesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -366,7 +376,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -374,10 +384,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client InterfacesClient) ListNextResults(lastResults InterfaceListResult) (result InterfaceListResult, ae error) { +func (client InterfacesClient) ListNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) { req, err := lastResults.InterfaceListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -386,12 +396,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to next results request request") } return @@ -399,21 +409,21 @@ // ListAll the List networkInterfaces opertion retrieves all the // networkInterfaces in a subscription. -func (client InterfacesClient) ListAll() (result InterfaceListResult, ae error) { +func (client InterfacesClient) ListAll() (result InterfaceListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing request") } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to request") } return @@ -422,26 +432,25 @@ // ListAllPreparer prepares the ListAll request. func (client InterfacesClient) ListAllPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAllResponder handles the response to the ListAll request. The method always @@ -450,7 +459,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -458,10 +467,10 @@ } // ListAllNextResults retrieves the next set of results, if any. -func (client InterfacesClient) ListAllNextResults(lastResults InterfaceListResult) (result InterfaceListResult, ae error) { +func (client InterfacesClient) ListAllNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) { req, err := lastResults.InterfaceListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing next results request request") } if req == nil { return @@ -470,12 +479,12 @@ resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending next results request request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to next results request request") } return @@ -487,21 +496,21 @@ // // resourceGroupName is the name of the resource group. // virtualMachineScaleSetName is the name of the virtual machine scale set. -func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResult, ae error) { +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResult, err error) { req, err := client.ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing request") } resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending request") } result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to request") } return @@ -510,28 +519,27 @@ // ListVirtualMachineScaleSetNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetNetworkInterfaces request. func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualMachineScaleSetName": url.QueryEscape(virtualMachineScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListVirtualMachineScaleSetNetworkInterfacesSender sends the ListVirtualMachineScaleSetNetworkInterfaces request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListVirtualMachineScaleSetNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetNetworkInterfaces request. The method always @@ -540,7 +548,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -548,10 +556,10 @@ } // ListVirtualMachineScaleSetNetworkInterfacesNextResults retrieves the next set of results, if any. -func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, ae error) { +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) { req, err := lastResults.InterfaceListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing next results request request") } if req == nil { return @@ -560,12 +568,12 @@ resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending next results request request") } result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to next results request request") } return @@ -578,21 +586,21 @@ // resourceGroupName is the name of the resource group. // virtualMachineScaleSetName is the name of the virtual machine scale set. // virtualmachineIndex is the virtual machine index. -func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResult, ae error) { +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResult, err error) { req, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex) if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing request") } resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending request") } result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to request") } return @@ -601,29 +609,28 @@ // ListVirtualMachineScaleSetVMNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetVMNetworkInterfaces request. func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualmachineIndex": url.QueryEscape(virtualmachineIndex), - "virtualMachineScaleSetName": url.QueryEscape(virtualMachineScaleSetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualmachineIndex": autorest.Encode("path", virtualmachineIndex), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListVirtualMachineScaleSetVMNetworkInterfacesSender sends the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListVirtualMachineScaleSetVMNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method always @@ -632,7 +639,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -640,10 +647,10 @@ } // ListVirtualMachineScaleSetVMNetworkInterfacesNextResults retrieves the next set of results, if any. -func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, ae error) { +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) { req, err := lastResults.InterfaceListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing next results request request") } if req == nil { return @@ -652,12 +659,12 @@ resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending next results request request") } result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,20 +14,20 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// LoadBalancersClient is the the Windows Azure Network management API -// provides a RESTful set of web services that interact with Windows Azure +// LoadBalancersClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure // Networks service to manage your network resrources. The API has entities -// that capture the relationship between an end user and the Windows Azure +// that capture the relationship between an end user and the Microsoft Azure // Networks service. type LoadBalancersClient struct { ManagementClient @@ -45,121 +45,129 @@ return LoadBalancersClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate the Put LoadBalancer operation creates/updates a LoadBalancer +// CreateOrUpdate the Put LoadBalancer operation creates/updates a +// LoadBalancer This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. loadBalancerName is // the name of the loadBalancer. parameters is parameters supplied to the // create/delete LoadBalancer operation -func (client LoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters LoadBalancer) (result LoadBalancer, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, loadBalancerName, parameters) +func (client LoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters LoadBalancer, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, loadBalancerName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client LoadBalancersClient) CreateOrUpdatePreparer(resourceGroupName string, loadBalancerName string, parameters LoadBalancer) (*http.Request, error) { +func (client LoadBalancersClient) CreateOrUpdatePreparer(resourceGroupName string, loadBalancerName string, parameters LoadBalancer, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "loadBalancerName": url.QueryEscape(loadBalancerName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client LoadBalancersClient) CreateOrUpdateResponder(resp *http.Response) (result LoadBalancer, err error) { +func (client LoadBalancersClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } -// Delete the delete loadbalancer operation deletes the specified loadbalancer. +// Delete the delete loadbalancer operation deletes the specified +// loadbalancer. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. loadBalancerName is // the name of the loadBalancer. -func (client LoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, loadBalancerName) +func (client LoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, loadBalancerName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client LoadBalancersClient) DeletePreparer(resourceGroupName string, loadBalancerName string) (*http.Request, error) { +func (client LoadBalancersClient) DeletePreparer(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "loadBalancerName": url.QueryEscape(loadBalancerName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancersClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -168,7 +176,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), autorest.ByClosing()) result.Response = resp return @@ -178,52 +186,54 @@ // specified network interface. // // resourceGroupName is the name of the resource group. loadBalancerName is -// the name of the loadBalancer. -func (client LoadBalancersClient) Get(resourceGroupName string, loadBalancerName string) (result LoadBalancer, ae error) { - req, err := client.GetPreparer(resourceGroupName, loadBalancerName) +// the name of the loadBalancer. expand is expand references resources. +func (client LoadBalancersClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result LoadBalancer, err error) { + req, err := client.GetPreparer(resourceGroupName, loadBalancerName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. -func (client LoadBalancersClient) GetPreparer(resourceGroupName string, loadBalancerName string) (*http.Request, error) { +func (client LoadBalancersClient) GetPreparer(resourceGroupName string, loadBalancerName string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "loadBalancerName": url.QueryEscape(loadBalancerName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancersClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -232,7 +242,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -243,21 +253,21 @@ // resource group. // // resourceGroupName is the name of the resource group. -func (client LoadBalancersClient) List(resourceGroupName string) (result LoadBalancerListResult, ae error) { +func (client LoadBalancersClient) List(resourceGroupName string) (result LoadBalancerListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure responding to request") } return @@ -266,27 +276,26 @@ // ListPreparer prepares the List request. func (client LoadBalancersClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancersClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -295,7 +304,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -303,10 +312,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client LoadBalancersClient) ListNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, ae error) { +func (client LoadBalancersClient) ListNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, err error) { req, err := lastResults.LoadBalancerListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -315,12 +324,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure responding to next results request request") } return @@ -328,21 +337,21 @@ // ListAll the List loadBalancer opertion retrieves all the loadbalancers in a // subscription. -func (client LoadBalancersClient) ListAll() (result LoadBalancerListResult, ae error) { +func (client LoadBalancersClient) ListAll() (result LoadBalancerListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", nil, "Failure preparing request") } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure sending request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure responding to request") } return @@ -351,26 +360,25 @@ // ListAllPreparer prepares the ListAll request. func (client LoadBalancersClient) ListAllPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancersClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAllResponder handles the response to the ListAll request. The method always @@ -379,7 +387,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -387,10 +395,10 @@ } // ListAllNextResults retrieves the next set of results, if any. -func (client LoadBalancersClient) ListAllNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, ae error) { +func (client LoadBalancersClient) ListAllNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, err error) { req, err := lastResults.LoadBalancerListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", nil, "Failure preparing next results request request") } if req == nil { return @@ -399,12 +407,12 @@ resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure sending next results request request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,21 +14,21 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// LocalNetworkGatewaysClient is the the Windows Azure Network management API -// provides a RESTful set of web services that interact with Windows Azure -// Networks service to manage your network resrources. The API has entities -// that capture the relationship between an end user and the Windows Azure -// Networks service. +// LocalNetworkGatewaysClient is the the Microsoft Azure Network management +// API provides a RESTful set of web services that interact with Microsoft +// Azure Networks service to manage your network resrources. The API has +// entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. type LocalNetworkGatewaysClient struct { ManagementClient } @@ -47,123 +47,130 @@ // CreateOrUpdate the Put LocalNetworkGateway operation creates/updates a // local network gateway in the specified resource group through Network -// resource provider. +// resource provider. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // localNetworkGatewayName is the name of the local network gateway. // parameters is parameters supplied to the Begin Create or update Local // Network Gateway operation through Network resource provider. -func (client LocalNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway) (result LocalNetworkGateway, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, localNetworkGatewayName, parameters) +func (client LocalNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, localNetworkGatewayName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client LocalNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway) (*http.Request, error) { +func (client LocalNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "localNetworkGatewayName": url.QueryEscape(localNetworkGatewayName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client LocalNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result LocalNetworkGateway, err error) { +func (client LocalNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the Delete LocalNetworkGateway operation deletes the specifed local -// network Gateway through Network resource provider. +// network Gateway through Network resource provider. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. // localNetworkGatewayName is the name of the local network gateway. -func (client LocalNetworkGatewaysClient) Delete(resourceGroupName string, localNetworkGatewayName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, localNetworkGatewayName) +func (client LocalNetworkGatewaysClient) Delete(resourceGroupName string, localNetworkGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, localNetworkGatewayName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client LocalNetworkGatewaysClient) DeletePreparer(resourceGroupName string, localNetworkGatewayName string) (*http.Request, error) { +func (client LocalNetworkGatewaysClient) DeletePreparer(resourceGroupName string, localNetworkGatewayName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "localNetworkGatewayName": url.QueryEscape(localNetworkGatewayName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}/"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -172,7 +179,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return @@ -183,21 +190,21 @@ // // resourceGroupName is the name of the resource group. // localNetworkGatewayName is the name of the local network gateway. -func (client LocalNetworkGatewaysClient) Get(resourceGroupName string, localNetworkGatewayName string) (result LocalNetworkGateway, ae error) { +func (client LocalNetworkGatewaysClient) Get(resourceGroupName string, localNetworkGatewayName string) (result LocalNetworkGateway, err error) { req, err := client.GetPreparer(resourceGroupName, localNetworkGatewayName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", resp, "Failure responding to request") } return @@ -206,28 +213,27 @@ // GetPreparer prepares the Get request. func (client LocalNetworkGatewaysClient) GetPreparer(resourceGroupName string, localNetworkGatewayName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "localNetworkGatewayName": url.QueryEscape(localNetworkGatewayName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LocalNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -236,7 +242,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -247,21 +253,21 @@ // gateways stored. // // resourceGroupName is the name of the resource group. -func (client LocalNetworkGatewaysClient) List(resourceGroupName string) (result LocalNetworkGatewayListResult, ae error) { +func (client LocalNetworkGatewaysClient) List(resourceGroupName string) (result LocalNetworkGatewayListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure responding to request") } return @@ -270,27 +276,26 @@ // ListPreparer prepares the List request. func (client LocalNetworkGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LocalNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -299,7 +304,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -307,10 +312,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client LocalNetworkGatewaysClient) ListNextResults(lastResults LocalNetworkGatewayListResult) (result LocalNetworkGatewayListResult, ae error) { +func (client LocalNetworkGatewaysClient) ListNextResults(lastResults LocalNetworkGatewayListResult) (result LocalNetworkGatewayListResult, err error) { req, err := lastResults.LocalNetworkGatewayListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -319,12 +324,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,13 +14,13 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" "net/http" ) @@ -75,6 +75,9 @@ // Basic specifies the basic state for application gateway request routing // rule type. Basic ApplicationGatewayRequestRoutingRuleType = "Basic" + // PathBasedRouting specifies the path based routing state for application + // gateway request routing rule type. + PathBasedRouting ApplicationGatewayRequestRoutingRuleType = "PathBasedRouting" ) // ApplicationGatewaySkuName enumerates the values for application gateway sku @@ -197,6 +200,16 @@ Static IPAllocationMethod = "Static" ) +// IPVersion enumerates the values for ip version. +type IPVersion string + +const ( + // IPv4 specifies the i pv 4 state for ip version. + IPv4 IPVersion = "IPv4" + // IPv6 specifies the i pv 6 state for ip version. + IPv6 IPVersion = "IPv6" +) + // LoadDistribution enumerates the values for load distribution. type LoadDistribution string @@ -234,6 +247,16 @@ ProbeProtocolTCP ProbeProtocol = "Tcp" ) +// ProcessorArchitecture enumerates the values for processor architecture. +type ProcessorArchitecture string + +const ( + // Amd64 specifies the amd 64 state for processor architecture. + Amd64 ProcessorArchitecture = "Amd64" + // X86 specifies the x86 state for processor architecture. + X86 ProcessorArchitecture = "X86" +) + // RouteNextHopType enumerates the values for route next hop type. type RouteNextHopType string @@ -318,14 +341,6 @@ TransportProtocolUDP TransportProtocol = "Udp" ) -// UsageUnit enumerates the values for usage unit. -type UsageUnit string - -const ( - // Count specifies the count state for usage unit. - Count UsageUnit = "Count" -) - // VirtualNetworkGatewayConnectionStatus enumerates the values for virtual // network gateway connection status. type VirtualNetworkGatewayConnectionStatus string @@ -364,6 +379,40 @@ VPNClient VirtualNetworkGatewayConnectionType = "VPNClient" ) +// VirtualNetworkGatewaySkuName enumerates the values for virtual network +// gateway sku name. +type VirtualNetworkGatewaySkuName string + +const ( + // VirtualNetworkGatewaySkuNameBasic specifies the virtual network gateway + // sku name basic state for virtual network gateway sku name. + VirtualNetworkGatewaySkuNameBasic VirtualNetworkGatewaySkuName = "Basic" + // VirtualNetworkGatewaySkuNameHighPerformance specifies the virtual + // network gateway sku name high performance state for virtual network + // gateway sku name. + VirtualNetworkGatewaySkuNameHighPerformance VirtualNetworkGatewaySkuName = "HighPerformance" + // VirtualNetworkGatewaySkuNameStandard specifies the virtual network + // gateway sku name standard state for virtual network gateway sku name. + VirtualNetworkGatewaySkuNameStandard VirtualNetworkGatewaySkuName = "Standard" +) + +// VirtualNetworkGatewaySkuTier enumerates the values for virtual network +// gateway sku tier. +type VirtualNetworkGatewaySkuTier string + +const ( + // VirtualNetworkGatewaySkuTierBasic specifies the virtual network gateway + // sku tier basic state for virtual network gateway sku tier. + VirtualNetworkGatewaySkuTierBasic VirtualNetworkGatewaySkuTier = "Basic" + // VirtualNetworkGatewaySkuTierHighPerformance specifies the virtual + // network gateway sku tier high performance state for virtual network + // gateway sku tier. + VirtualNetworkGatewaySkuTierHighPerformance VirtualNetworkGatewaySkuTier = "HighPerformance" + // VirtualNetworkGatewaySkuTierStandard specifies the virtual network + // gateway sku tier standard state for virtual network gateway sku tier. + VirtualNetworkGatewaySkuTierStandard VirtualNetworkGatewaySkuTier = "Standard" +) + // VirtualNetworkGatewayType enumerates the values for virtual network gateway // type. type VirtualNetworkGatewayType string @@ -423,7 +472,7 @@ // ApplicationGatewayBackendAddressPoolPropertiesFormat is properties of // Backend Address Pool of application gateway type ApplicationGatewayBackendAddressPoolPropertiesFormat struct { - BackendIPConfigurations *[]SubResource `json:"backendIPConfigurations,omitempty"` + BackendIPConfigurations *[]InterfaceIPConfiguration `json:"backendIPConfigurations,omitempty"` BackendAddresses *[]ApplicationGatewayBackendAddress `json:"backendAddresses,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -440,9 +489,11 @@ // ApplicationGatewayBackendHTTPSettingsPropertiesFormat is properties of // Backend address pool settings of application gateway type ApplicationGatewayBackendHTTPSettingsPropertiesFormat struct { - Port *int `json:"port,omitempty"` + Port *int32 `json:"port,omitempty"` Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` CookieBasedAffinity ApplicationGatewayCookieBasedAffinity `json:"cookieBasedAffinity,omitempty"` + RequestTimeout *int32 `json:"requestTimeout,omitempty"` + Probe *SubResource `json:"probe,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -476,7 +527,7 @@ // ApplicationGatewayFrontendPortPropertiesFormat is properties of Frontend // Port of application gateway type ApplicationGatewayFrontendPortPropertiesFormat struct { - Port *int `json:"port,omitempty"` + Port *int32 `json:"port,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -491,11 +542,13 @@ // ApplicationGatewayHTTPListenerPropertiesFormat is properties of Http // listener of application gateway type ApplicationGatewayHTTPListenerPropertiesFormat struct { - FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` - FrontendPort *SubResource `json:"frontendPort,omitempty"` - Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` - SslCertificate *SubResource `json:"sslCertificate,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + FrontendPort *SubResource `json:"frontendPort,omitempty"` + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + HostName *string `json:"hostName,omitempty"` + SslCertificate *SubResource `json:"sslCertificate,omitempty"` + RequireServerNameIndication *bool `json:"requireServerNameIndication,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // ApplicationGatewayIPConfiguration is iP configuration of application gateway @@ -533,6 +586,44 @@ autorest.WithBaseURL(to.String(client.NextLink))) } +// ApplicationGatewayPathRule is path rule of URL path map of application +// gateway +type ApplicationGatewayPathRule struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayPathRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayPathRulePropertiesFormat is properties of probe of +// application gateway +type ApplicationGatewayPathRulePropertiesFormat struct { + Paths *[]string `json:"paths,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayProbe is probe of application gateway +type ApplicationGatewayProbe struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayProbePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayProbePropertiesFormat is properties of probe of +// application gateway +type ApplicationGatewayProbePropertiesFormat struct { + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + Host *string `json:"host,omitempty"` + Path *string `json:"path,omitempty"` + Interval *int32 `json:"interval,omitempty"` + Timeout *int32 `json:"timeout,omitempty"` + UnhealthyThreshold *int32 `json:"unhealthyThreshold,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + // ApplicationGatewayPropertiesFormat is properties of Application Gateway type ApplicationGatewayPropertiesFormat struct { Sku *ApplicationGatewaySku `json:"sku,omitempty"` @@ -541,9 +632,11 @@ SslCertificates *[]ApplicationGatewaySslCertificate `json:"sslCertificates,omitempty"` FrontendIPConfigurations *[]ApplicationGatewayFrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` FrontendPorts *[]ApplicationGatewayFrontendPort `json:"frontendPorts,omitempty"` + Probes *[]ApplicationGatewayProbe `json:"probes,omitempty"` BackendAddressPools *[]ApplicationGatewayBackendAddressPool `json:"backendAddressPools,omitempty"` BackendHTTPSettingsCollection *[]ApplicationGatewayBackendHTTPSettings `json:"backendHttpSettingsCollection,omitempty"` HTTPListeners *[]ApplicationGatewayHTTPListener `json:"httpListeners,omitempty"` + URLPathMaps *[]ApplicationGatewayURLPathMap `json:"urlPathMaps,omitempty"` RequestRoutingRules *[]ApplicationGatewayRequestRoutingRule `json:"requestRoutingRules,omitempty"` ResourceGUID *string `json:"resourceGuid,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` @@ -565,6 +658,7 @@ BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` HTTPListener *SubResource `json:"httpListener,omitempty"` + URLPathMap *SubResource `json:"urlPathMap,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -572,7 +666,7 @@ type ApplicationGatewaySku struct { Name ApplicationGatewaySkuName `json:"name,omitempty"` Tier ApplicationGatewayTier `json:"tier,omitempty"` - Capacity *int `json:"capacity,omitempty"` + Capacity *int32 `json:"capacity,omitempty"` } // ApplicationGatewaySslCertificate is sSL certificates of application gateway @@ -592,6 +686,23 @@ ProvisioningState *string `json:"provisioningState,omitempty"` } +// ApplicationGatewayURLPathMap is urlPathMap of application gateway +type ApplicationGatewayURLPathMap struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayURLPathMapPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayURLPathMapPropertiesFormat is properties of probe of +// application gateway +type ApplicationGatewayURLPathMapPropertiesFormat struct { + DefaultBackendAddressPool *SubResource `json:"defaultBackendAddressPool,omitempty"` + DefaultBackendHTTPSettings *SubResource `json:"defaultBackendHttpSettings,omitempty"` + PathRules *[]ApplicationGatewayPathRule `json:"pathRules,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + // AuthorizationListResult is response for ListAuthorizations Api service // callRetrieves all authorizations that belongs to an ExpressRouteCircuit type AuthorizationListResult struct { @@ -642,16 +753,23 @@ // BackendAddressPoolPropertiesFormat is properties of BackendAddressPool type BackendAddressPoolPropertiesFormat struct { - BackendIPConfigurations *[]SubResource `json:"backendIPConfigurations,omitempty"` - LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` - OutboundNatRule *SubResource `json:"outboundNatRule,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + BackendIPConfigurations *[]InterfaceIPConfiguration `json:"backendIPConfigurations,omitempty"` + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + OutboundNatRule *SubResource `json:"outboundNatRule,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// BgpSettings is +type BgpSettings struct { + Asn *int64 `json:"asn,omitempty"` + BgpPeeringAddress *string `json:"bgpPeeringAddress,omitempty"` + PeerWeight *int32 `json:"peerWeight,omitempty"` } // ConnectionResetSharedKey is type ConnectionResetSharedKey struct { autorest.Response `json:"-"` - KeyLength *int32 `json:"keyLength,omitempty"` + KeyLength *int64 `json:"keyLength,omitempty"` } // ConnectionSharedKey is response for GetConnectionSharedKey Api servive call @@ -660,6 +778,13 @@ Value *string `json:"value,omitempty"` } +// ConnectionSharedKeyResult is response for CheckConnectionSharedKey Api +// servive call +type ConnectionSharedKeyResult struct { + autorest.Response `json:"-"` + Value *string `json:"value,omitempty"` +} + // DhcpOptions is dHCPOptions contains an array of DNS servers available to // VMs deployed in the virtual networkStandard DHCP option for a subnet // overrides VNET DHCP options. @@ -706,6 +831,8 @@ // ExpressRouteCircuitArpTable is the arp table associated with the // ExpressRouteCircuit type ExpressRouteCircuitArpTable struct { + Age *int32 `json:"age,omitempty"` + Interface *string `json:"interface,omitempty"` IPAddress *string `json:"ipAddress,omitempty"` MacAddress *string `json:"macAddress,omitempty"` } @@ -753,7 +880,7 @@ type ExpressRouteCircuitPeeringConfig struct { AdvertisedPublicPrefixes *[]string `json:"advertisedPublicPrefixes,omitempty"` AdvertisedPublicPrefixesState ExpressRouteCircuitPeeringAdvertisedPublicPrefixState `json:"advertisedPublicPrefixesState,omitempty"` - CustomerASN *int `json:"customerASN,omitempty"` + CustomerASN *int32 `json:"customerASN,omitempty"` RoutingRegistryName *string `json:"routingRegistryName,omitempty"` } @@ -781,14 +908,14 @@ type ExpressRouteCircuitPeeringPropertiesFormat struct { PeeringType ExpressRouteCircuitPeeringType `json:"peeringType,omitempty"` State ExpressRouteCircuitPeeringState `json:"state,omitempty"` - AzureASN *int `json:"azureASN,omitempty"` - PeerASN *int `json:"peerASN,omitempty"` + AzureASN *int32 `json:"azureASN,omitempty"` + PeerASN *int32 `json:"peerASN,omitempty"` PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty"` SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty"` PrimaryAzurePort *string `json:"primaryAzurePort,omitempty"` SecondaryAzurePort *string `json:"secondaryAzurePort,omitempty"` SharedKey *string `json:"sharedKey,omitempty"` - VlanID *int `json:"vlanId,omitempty"` + VlanID *int32 `json:"vlanId,omitempty"` MicrosoftPeeringConfig *ExpressRouteCircuitPeeringConfig `json:"microsoftPeeringConfig,omitempty"` Stats *ExpressRouteCircuitStats `json:"stats,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` @@ -796,6 +923,7 @@ // ExpressRouteCircuitPropertiesFormat is properties of ExpressRouteCircuit type ExpressRouteCircuitPropertiesFormat struct { + AllowClassicOperations *bool `json:"allowClassicOperations,omitempty"` CircuitProvisioningState *string `json:"circuitProvisioningState,omitempty"` ServiceProviderProvisioningState ServiceProviderProvisioningState `json:"serviceProviderProvisioningState,omitempty"` Authorizations *[]ExpressRouteCircuitAuthorization `json:"authorizations,omitempty"` @@ -809,10 +937,21 @@ // ExpressRouteCircuitRoutesTable is the routes table associated with the // ExpressRouteCircuit type ExpressRouteCircuitRoutesTable struct { - AddressPrefix *string `json:"addressPrefix,omitempty"` - NextHopType RouteNextHopType `json:"nextHopType,omitempty"` - NextHopIP *string `json:"nextHopIP,omitempty"` - AsPath *string `json:"asPath,omitempty"` + Network *string `json:"network,omitempty"` + NextHop *string `json:"nextHop,omitempty"` + LocPrf *string `json:"locPrf,omitempty"` + Weight *int32 `json:"weight,omitempty"` + Path *string `json:"path,omitempty"` +} + +// ExpressRouteCircuitRoutesTableSummary is the routes table associated with +// the ExpressRouteCircuit +type ExpressRouteCircuitRoutesTableSummary struct { + Neighbor *string `json:"neighbor,omitempty"` + V *int32 `json:"v,omitempty"` + As *int32 `json:"as,omitempty"` + UpDown *string `json:"upDown,omitempty"` + StatePfxRcd *string `json:"statePfxRcd,omitempty"` } // ExpressRouteCircuitsArpTableListResult is response for ListArpTable @@ -823,24 +962,12 @@ NextLink *string `json:"nextLink,omitempty"` } -// ExpressRouteCircuitsArpTableListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client ExpressRouteCircuitsArpTableListResult) ExpressRouteCircuitsArpTableListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) -} - // ExpressRouteCircuitServiceProviderProperties is contains // ServiceProviderProperties in an ExpressRouteCircuit type ExpressRouteCircuitServiceProviderProperties struct { ServiceProviderName *string `json:"serviceProviderName,omitempty"` PeeringLocation *string `json:"peeringLocation,omitempty"` - BandwidthInMbps *int `json:"bandwidthInMbps,omitempty"` + BandwidthInMbps *int32 `json:"bandwidthInMbps,omitempty"` } // ExpressRouteCircuitSku is contains sku in an ExpressRouteCircuit @@ -858,42 +985,21 @@ NextLink *string `json:"nextLink,omitempty"` } -// ExpressRouteCircuitsRoutesTableListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client ExpressRouteCircuitsRoutesTableListResult) ExpressRouteCircuitsRoutesTableListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) -} - -// ExpressRouteCircuitsStatsListResult is response for ListStats from Express -// Route Circuits Api service call -type ExpressRouteCircuitsStatsListResult struct { +// ExpressRouteCircuitsRoutesTableSummaryListResult is response for +// ListRoutesTable associated with the Express Route Circuits Api +type ExpressRouteCircuitsRoutesTableSummaryListResult struct { autorest.Response `json:"-"` - Value *[]ExpressRouteCircuitStats `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` -} - -// ExpressRouteCircuitsStatsListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client ExpressRouteCircuitsStatsListResult) ExpressRouteCircuitsStatsListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) + Value *[]ExpressRouteCircuitRoutesTableSummary `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` } // ExpressRouteCircuitStats is contains Stats associated with the peering type ExpressRouteCircuitStats struct { - BytesIn *int `json:"bytesIn,omitempty"` - BytesOut *int `json:"bytesOut,omitempty"` + autorest.Response `json:"-"` + PrimarybytesIn *int64 `json:"primarybytesIn,omitempty"` + PrimarybytesOut *int64 `json:"primarybytesOut,omitempty"` + SecondarybytesIn *int64 `json:"secondarybytesIn,omitempty"` + SecondarybytesOut *int64 `json:"secondarybytesOut,omitempty"` } // ExpressRouteServiceProvider is expressRouteResourceProvider object @@ -901,6 +1007,8 @@ ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` Properties *ExpressRouteServiceProviderPropertiesFormat `json:"properties,omitempty"` } @@ -908,7 +1016,7 @@ // in ExpressRouteServiceProviders type ExpressRouteServiceProviderBandwidthsOffered struct { OfferName *string `json:"offerName,omitempty"` - ValueInMbps *int `json:"valueInMbps,omitempty"` + ValueInMbps *int32 `json:"valueInMbps,omitempty"` } // ExpressRouteServiceProviderListResult is response for @@ -950,14 +1058,14 @@ // FrontendIPConfigurationPropertiesFormat is properties of Frontend IP // Configuration of the load balancer type FrontendIPConfigurationPropertiesFormat struct { - PrivateIPAddress *string `json:"privateIPAddress,omitempty"` - PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` - Subnet *SubResource `json:"subnet,omitempty"` - PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` InboundNatRules *[]SubResource `json:"inboundNatRules,omitempty"` InboundNatPools *[]SubResource `json:"inboundNatPools,omitempty"` OutboundNatRules *[]SubResource `json:"outboundNatRules,omitempty"` LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *Subnet `json:"subnet,omitempty"` + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -973,9 +1081,9 @@ type InboundNatPoolPropertiesFormat struct { FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` Protocol TransportProtocol `json:"protocol,omitempty"` - FrontendPortRangeStart *int `json:"frontendPortRangeStart,omitempty"` - FrontendPortRangeEnd *int `json:"frontendPortRangeEnd,omitempty"` - BackendPort *int `json:"backendPort,omitempty"` + FrontendPortRangeStart *int32 `json:"frontendPortRangeStart,omitempty"` + FrontendPortRangeEnd *int32 `json:"frontendPortRangeEnd,omitempty"` + BackendPort *int32 `json:"backendPort,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -989,14 +1097,14 @@ // InboundNatRulePropertiesFormat is properties of Inbound NAT rule type InboundNatRulePropertiesFormat struct { - FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` - BackendIPConfiguration *SubResource `json:"backendIPConfiguration,omitempty"` - Protocol TransportProtocol `json:"protocol,omitempty"` - FrontendPort *int `json:"frontendPort,omitempty"` - BackendPort *int `json:"backendPort,omitempty"` - IdleTimeoutInMinutes *int `json:"idleTimeoutInMinutes,omitempty"` - EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + BackendIPConfiguration *InterfaceIPConfiguration `json:"backendIPConfiguration,omitempty"` + Protocol TransportProtocol `json:"protocol,omitempty"` + FrontendPort *int32 `json:"frontendPort,omitempty"` + BackendPort *int32 `json:"backendPort,omitempty"` + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // Interface is a NetworkInterface in a resource group @@ -1013,10 +1121,11 @@ // InterfaceDNSSettings is dns Settings of a network interface type InterfaceDNSSettings struct { - DNSServers *[]string `json:"dnsServers,omitempty"` - AppliedDNSServers *[]string `json:"appliedDnsServers,omitempty"` - InternalDNSNameLabel *string `json:"internalDnsNameLabel,omitempty"` - InternalFqdn *string `json:"internalFqdn,omitempty"` + DNSServers *[]string `json:"dnsServers,omitempty"` + AppliedDNSServers *[]string `json:"appliedDnsServers,omitempty"` + InternalDNSNameLabel *string `json:"internalDnsNameLabel,omitempty"` + InternalFqdn *string `json:"internalFqdn,omitempty"` + InternalDomainNameSuffix *string `json:"internalDomainNameSuffix,omitempty"` } // InterfaceIPConfiguration is iPConfiguration in a NetworkInterface @@ -1029,13 +1138,16 @@ // InterfaceIPConfigurationPropertiesFormat is properties of IPConfiguration type InterfaceIPConfigurationPropertiesFormat struct { - PrivateIPAddress *string `json:"privateIPAddress,omitempty"` - PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` - Subnet *SubResource `json:"subnet,omitempty"` - PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` - LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` - LoadBalancerInboundNatRules *[]SubResource `json:"loadBalancerInboundNatRules,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + ApplicationGatewayBackendAddressPools *[]ApplicationGatewayBackendAddressPool `json:"applicationGatewayBackendAddressPools,omitempty"` + LoadBalancerBackendAddressPools *[]BackendAddressPool `json:"loadBalancerBackendAddressPools,omitempty"` + LoadBalancerInboundNatRules *[]InboundNatRule `json:"loadBalancerInboundNatRules,omitempty"` + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` + Subnet *Subnet `json:"subnet,omitempty"` + Primary *bool `json:"primary,omitempty"` + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // InterfaceListResult is response for ListNetworkInterface Api service call @@ -1060,7 +1172,7 @@ // InterfacePropertiesFormat is networkInterface properties. type InterfacePropertiesFormat struct { VirtualMachine *SubResource `json:"virtualMachine,omitempty"` - NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` IPConfigurations *[]InterfaceIPConfiguration `json:"ipConfigurations,omitempty"` DNSSettings *InterfaceDNSSettings `json:"dnsSettings,omitempty"` MacAddress *string `json:"macAddress,omitempty"` @@ -1070,6 +1182,23 @@ ProvisioningState *string `json:"provisioningState,omitempty"` } +// IPConfiguration is iPConfiguration +type IPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *IPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// IPConfigurationPropertiesFormat is properties of IPConfiguration +type IPConfigurationPropertiesFormat struct { + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *Subnet `json:"subnet,omitempty"` + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + // LoadBalancer is loadBalancer resource type LoadBalancer struct { autorest.Response `json:"-"` @@ -1129,9 +1258,9 @@ Probe *SubResource `json:"probe,omitempty"` Protocol TransportProtocol `json:"protocol,omitempty"` LoadDistribution LoadDistribution `json:"loadDistribution,omitempty"` - FrontendPort *int `json:"frontendPort,omitempty"` - BackendPort *int `json:"backendPort,omitempty"` - IdleTimeoutInMinutes *int `json:"idleTimeoutInMinutes,omitempty"` + FrontendPort *int32 `json:"frontendPort,omitempty"` + BackendPort *int32 `json:"backendPort,omitempty"` + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -1172,6 +1301,7 @@ type LocalNetworkGatewayPropertiesFormat struct { LocalNetworkAddressSpace *AddressSpace `json:"localNetworkAddressSpace,omitempty"` GatewayIPAddress *string `json:"gatewayIpAddress,omitempty"` + BgpSettings *BgpSettings `json:"bgpSettings,omitempty"` ResourceGUID *string `json:"resourceGuid,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -1186,7 +1316,7 @@ // OutboundNatRulePropertiesFormat is outbound NAT pool of the loadbalancer type OutboundNatRulePropertiesFormat struct { - AllocatedOutboundPorts *int `json:"allocatedOutboundPorts,omitempty"` + AllocatedOutboundPorts *int32 `json:"allocatedOutboundPorts,omitempty"` FrontendIPConfigurations *[]SubResource `json:"frontendIPConfigurations,omitempty"` BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` @@ -1204,9 +1334,9 @@ type ProbePropertiesFormat struct { LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` Protocol ProbeProtocol `json:"protocol,omitempty"` - Port *int `json:"port,omitempty"` - IntervalInSeconds *int `json:"intervalInSeconds,omitempty"` - NumberOfProbes *int `json:"numberOfProbes,omitempty"` + Port *int32 `json:"port,omitempty"` + IntervalInSeconds *int32 `json:"intervalInSeconds,omitempty"` + NumberOfProbes *int32 `json:"numberOfProbes,omitempty"` RequestPath *string `json:"requestPath,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -1254,10 +1384,11 @@ // PublicIPAddressPropertiesFormat is publicIpAddress properties type PublicIPAddressPropertiesFormat struct { PublicIPAllocationMethod IPAllocationMethod `json:"publicIPAllocationMethod,omitempty"` - IPConfiguration *SubResource `json:"ipConfiguration,omitempty"` + PublicIPAddressVersion IPVersion `json:"publicIPAddressVersion,omitempty"` + IPConfiguration *IPConfiguration `json:"ipConfiguration,omitempty"` DNSSettings *PublicIPAddressDNSSettings `json:"dnsSettings,omitempty"` IPAddress *string `json:"ipAddress,omitempty"` - IdleTimeoutInMinutes *int `json:"idleTimeoutInMinutes,omitempty"` + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` ResourceGUID *string `json:"resourceGuid,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -1340,9 +1471,9 @@ // RouteTablePropertiesFormat is route Table resource type RouteTablePropertiesFormat struct { - Routes *[]Route `json:"routes,omitempty"` - Subnets *[]SubResource `json:"subnets,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + Routes *[]Route `json:"routes,omitempty"` + Subnets *[]Subnet `json:"subnets,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // SecurityGroup is networkSecurityGroup resource @@ -1381,8 +1512,8 @@ type SecurityGroupPropertiesFormat struct { SecurityRules *[]SecurityRule `json:"securityRules,omitempty"` DefaultSecurityRules *[]SecurityRule `json:"defaultSecurityRules,omitempty"` - NetworkInterfaces *[]SubResource `json:"networkInterfaces,omitempty"` - Subnets *[]SubResource `json:"subnets,omitempty"` + NetworkInterfaces *[]Interface `json:"networkInterfaces,omitempty"` + Subnets *[]Subnet `json:"subnets,omitempty"` ResourceGUID *string `json:"resourceGuid,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -1425,11 +1556,17 @@ SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` Access SecurityRuleAccess `json:"access,omitempty"` - Priority *int `json:"priority,omitempty"` + Priority *int32 `json:"priority,omitempty"` Direction SecurityRuleDirection `json:"direction,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } +// String is +type String struct { + autorest.Response `json:"-"` + Value *string `json:"value,omitempty"` +} + // Subnet is subnet in a VirtualNework resource type Subnet struct { autorest.Response `json:"-"` @@ -1461,11 +1598,11 @@ // SubnetPropertiesFormat is type SubnetPropertiesFormat struct { - AddressPrefix *string `json:"addressPrefix,omitempty"` - NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` - RouteTable *SubResource `json:"routeTable,omitempty"` - IPConfigurations *[]SubResource `json:"ipConfigurations,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + AddressPrefix *string `json:"addressPrefix,omitempty"` + NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` + RouteTable *RouteTable `json:"routeTable,omitempty"` + IPConfigurations *[]IPConfiguration `json:"ipConfigurations,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // SubResource is @@ -1475,9 +1612,9 @@ // Usage is describes Network Resource Usage. type Usage struct { - Unit UsageUnit `json:"unit,omitempty"` - CurrentValue *int `json:"currentValue,omitempty"` - Limit *int32 `json:"limit,omitempty"` + Unit *string `json:"unit,omitempty"` + CurrentValue *int64 `json:"currentValue,omitempty"` + Limit *int64 `json:"limit,omitempty"` Name *UsageName `json:"name,omitempty"` } @@ -1553,16 +1690,18 @@ // VirtualNetworkGatewayConnectionPropertiesFormat is // virtualNeworkGatewayConnection properties type VirtualNetworkGatewayConnectionPropertiesFormat struct { + AuthorizationKey *string `json:"authorizationKey,omitempty"` VirtualNetworkGateway1 *VirtualNetworkGateway `json:"virtualNetworkGateway1,omitempty"` VirtualNetworkGateway2 *VirtualNetworkGateway `json:"virtualNetworkGateway2,omitempty"` LocalNetworkGateway2 *LocalNetworkGateway `json:"localNetworkGateway2,omitempty"` ConnectionType VirtualNetworkGatewayConnectionType `json:"connectionType,omitempty"` - RoutingWeight *int `json:"routingWeight,omitempty"` + RoutingWeight *int32 `json:"routingWeight,omitempty"` SharedKey *string `json:"sharedKey,omitempty"` ConnectionStatus VirtualNetworkGatewayConnectionStatus `json:"connectionStatus,omitempty"` - EgressBytesTransferred *int32 `json:"egressBytesTransferred,omitempty"` - IngressBytesTransferred *int32 `json:"ingressBytesTransferred,omitempty"` + EgressBytesTransferred *int64 `json:"egressBytesTransferred,omitempty"` + IngressBytesTransferred *int64 `json:"ingressBytesTransferred,omitempty"` Peer *SubResource `json:"peer,omitempty"` + EnableBgp *bool `json:"enableBgp,omitempty"` ResourceGUID *string `json:"resourceGuid,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -1608,13 +1747,23 @@ // VirtualNetworkGatewayPropertiesFormat is virtualNeworkGateay properties type VirtualNetworkGatewayPropertiesFormat struct { - IPConfigurations *[]VirtualNetworkGatewayIPConfiguration `json:"ipConfigurations,omitempty"` - GatewayType VirtualNetworkGatewayType `json:"gatewayType,omitempty"` - VpnType VpnType `json:"vpnType,omitempty"` - EnableBgp *bool `json:"enableBgp,omitempty"` - GatewayDefaultSite *SubResource `json:"gatewayDefaultSite,omitempty"` - ResourceGUID *string `json:"resourceGuid,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + IPConfigurations *[]VirtualNetworkGatewayIPConfiguration `json:"ipConfigurations,omitempty"` + GatewayType VirtualNetworkGatewayType `json:"gatewayType,omitempty"` + VpnType VpnType `json:"vpnType,omitempty"` + EnableBgp *bool `json:"enableBgp,omitempty"` + GatewayDefaultSite *SubResource `json:"gatewayDefaultSite,omitempty"` + Sku *VirtualNetworkGatewaySku `json:"sku,omitempty"` + VpnClientConfiguration *VpnClientConfiguration `json:"vpnClientConfiguration,omitempty"` + BgpSettings *BgpSettings `json:"bgpSettings,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewaySku is virtualNetworkGatewaySku details +type VirtualNetworkGatewaySku struct { + Name VirtualNetworkGatewaySkuName `json:"name,omitempty"` + Tier VirtualNetworkGatewaySkuTier `json:"tier,omitempty"` + Capacity *int32 `json:"capacity,omitempty"` } // VirtualNetworkListResult is response for ListVirtualNetworks Api servive @@ -1645,3 +1794,47 @@ ResourceGUID *string `json:"resourceGuid,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } + +// VpnClientConfiguration is vpnClientConfiguration for P2S client +type VpnClientConfiguration struct { + VpnClientAddressPool *AddressSpace `json:"vpnClientAddressPool,omitempty"` + VpnClientRootCertificates *[]VpnClientRootCertificate `json:"vpnClientRootCertificates,omitempty"` + VpnClientRevokedCertificates *[]VpnClientRevokedCertificate `json:"vpnClientRevokedCertificates,omitempty"` +} + +// VpnClientParameters is vpnClientParameters +type VpnClientParameters struct { + ProcessorArchitecture ProcessorArchitecture `json:"ProcessorArchitecture,omitempty"` +} + +// VpnClientRevokedCertificate is vPN client revoked certificate of virtual +// network gateway +type VpnClientRevokedCertificate struct { + ID *string `json:"id,omitempty"` + Properties *VpnClientRevokedCertificatePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VpnClientRevokedCertificatePropertiesFormat is properties of the revoked +// VPN client certificate of virtual network gateway +type VpnClientRevokedCertificatePropertiesFormat struct { + Thumbprint *string `json:"thumbprint,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VpnClientRootCertificate is vPN client root certificate of virtual network +// gateway +type VpnClientRootCertificate struct { + ID *string `json:"id,omitempty"` + Properties *VpnClientRootCertificatePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VpnClientRootCertificatePropertiesFormat is properties of SSL certificates +// of application gateway +type VpnClientRootCertificatePropertiesFormat struct { + PublicCertData *string `json:"publicCertData,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,20 +14,20 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// PublicIPAddressesClient is the the Windows Azure Network management API -// provides a RESTful set of web services that interact with Windows Azure +// PublicIPAddressesClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure // Networks service to manage your network resrources. The API has entities -// that capture the relationship between an end user and the Windows Azure +// that capture the relationship between an end user and the Microsoft Azure // Networks service. type PublicIPAddressesClient struct { ManagementClient @@ -46,122 +46,128 @@ } // CreateOrUpdate the Put PublicIPAddress operation creates/updates a -// stable/dynamic PublicIP address +// stable/dynamic PublicIP address This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. publicIPAddressName is // the name of the publicIpAddress. parameters is parameters supplied to the // create/update PublicIPAddress operation -func (client PublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress) (result PublicIPAddress, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, publicIPAddressName, parameters) +func (client PublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, publicIPAddressName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client PublicIPAddressesClient) CreateOrUpdatePreparer(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress) (*http.Request, error) { +func (client PublicIPAddressesClient) CreateOrUpdatePreparer(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "publicIpAddressName": url.QueryEscape(publicIPAddressName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "publicIpAddressName": autorest.Encode("path", publicIPAddressName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}/"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client PublicIPAddressesClient) CreateOrUpdateResponder(resp *http.Response) (result PublicIPAddress, err error) { +func (client PublicIPAddressesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the delete publicIpAddress operation deletes the specified -// publicIpAddress. +// publicIpAddress. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. publicIPAddressName is // the name of the subnet. -func (client PublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, publicIPAddressName) +func (client PublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, publicIPAddressName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client PublicIPAddressesClient) DeletePreparer(resourceGroupName string, publicIPAddressName string) (*http.Request, error) { +func (client PublicIPAddressesClient) DeletePreparer(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "publicIpAddressName": url.QueryEscape(publicIPAddressName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "publicIpAddressName": autorest.Encode("path", publicIPAddressName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}/"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -170,7 +176,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), autorest.ByClosing()) result.Response = resp return @@ -180,52 +186,54 @@ // specified pubicIpAddress // // resourceGroupName is the name of the resource group. publicIPAddressName is -// the name of the subnet. -func (client PublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string) (result PublicIPAddress, ae error) { - req, err := client.GetPreparer(resourceGroupName, publicIPAddressName) +// the name of the subnet. expand is expand references resources. +func (client PublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result PublicIPAddress, err error) { + req, err := client.GetPreparer(resourceGroupName, publicIPAddressName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. -func (client PublicIPAddressesClient) GetPreparer(resourceGroupName string, publicIPAddressName string) (*http.Request, error) { +func (client PublicIPAddressesClient) GetPreparer(resourceGroupName string, publicIPAddressName string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "publicIpAddressName": url.QueryEscape(publicIPAddressName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "publicIpAddressName": autorest.Encode("path", publicIPAddressName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}/"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -234,7 +242,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -245,21 +253,21 @@ // in a resource group. // // resourceGroupName is the name of the resource group. -func (client PublicIPAddressesClient) List(resourceGroupName string) (result PublicIPAddressListResult, ae error) { +func (client PublicIPAddressesClient) List(resourceGroupName string) (result PublicIPAddressListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure responding to request") } return @@ -268,27 +276,26 @@ // ListPreparer prepares the List request. func (client PublicIPAddressesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -297,7 +304,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -305,10 +312,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client PublicIPAddressesClient) ListNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, ae error) { +func (client PublicIPAddressesClient) ListNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, err error) { req, err := lastResults.PublicIPAddressListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -317,12 +324,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure responding to next results request request") } return @@ -330,21 +337,21 @@ // ListAll the List publicIpAddress opertion retrieves all the // publicIpAddresses in a subscription. -func (client PublicIPAddressesClient) ListAll() (result PublicIPAddressListResult, ae error) { +func (client PublicIPAddressesClient) ListAll() (result PublicIPAddressListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", nil, "Failure preparing request") } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure sending request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure responding to request") } return @@ -353,26 +360,25 @@ // ListAllPreparer prepares the ListAll request. func (client PublicIPAddressesClient) ListAllPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAllResponder handles the response to the ListAll request. The method always @@ -381,7 +387,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -389,10 +395,10 @@ } // ListAllNextResults retrieves the next set of results, if any. -func (client PublicIPAddressesClient) ListAllNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, ae error) { +func (client PublicIPAddressesClient) ListAllNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, err error) { req, err := lastResults.PublicIPAddressListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", nil, "Failure preparing next results request request") } if req == nil { return @@ -401,12 +407,12 @@ resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure sending next results request request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/routes.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/routes.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/routes.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/routes.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,20 +14,20 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// RoutesClient is the the Windows Azure Network management API provides a -// RESTful set of web services that interact with Windows Azure Networks +// RoutesClient is the the Microsoft Azure Network management API provides a +// RESTful set of web services that interact with Microsoft Azure Networks // service to manage your network resrources. The API has entities that -// capture the relationship between an end user and the Windows Azure +// capture the relationship between an end user and the Microsoft Azure // Networks service. type RoutesClient struct { ManagementClient @@ -44,124 +44,130 @@ } // CreateOrUpdate the Put route operation creates/updates a route in the -// specified route table +// specified route table This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. routeTableName is the // name of the route table. routeName is the name of the route. // routeParameters is parameters supplied to the create/update routeoperation -func (client RoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters Route) (result Route, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, routeName, routeParameters) +func (client RoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters Route, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, routeName, routeParameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/RoutesClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/RoutesClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RoutesClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client RoutesClient) CreateOrUpdatePreparer(resourceGroupName string, routeTableName string, routeName string, routeParameters Route) (*http.Request, error) { +func (client RoutesClient) CreateOrUpdatePreparer(resourceGroupName string, routeTableName string, routeName string, routeParameters Route, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "routeName": url.QueryEscape(routeName), - "routeTableName": url.QueryEscape(routeTableName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters), autorest.WithJSON(routeParameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client RoutesClient) CreateOrUpdateResponder(resp *http.Response) (result Route, err error) { +func (client RoutesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the delete route operation deletes the specified route from a route -// table. +// table. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. routeTableName is the // name of the route table. routeName is the name of the route. -func (client RoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, routeTableName, routeName) +func (client RoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, routeTableName, routeName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/RoutesClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/RoutesClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RoutesClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client RoutesClient) DeletePreparer(resourceGroupName string, routeTableName string, routeName string) (*http.Request, error) { +func (client RoutesClient) DeletePreparer(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "routeName": url.QueryEscape(routeName), - "routeTableName": url.QueryEscape(routeTableName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client RoutesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -170,7 +176,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -181,21 +187,21 @@ // // resourceGroupName is the name of the resource group. routeTableName is the // name of the route table. routeName is the name of the route. -func (client RoutesClient) Get(resourceGroupName string, routeTableName string, routeName string) (result Route, ae error) { +func (client RoutesClient) Get(resourceGroupName string, routeTableName string, routeName string) (result Route, err error) { req, err := client.GetPreparer(resourceGroupName, routeTableName, routeName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/RoutesClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/RoutesClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RoutesClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Get", resp, "Failure responding to request") } return @@ -204,29 +210,28 @@ // GetPreparer prepares the Get request. func (client RoutesClient) GetPreparer(resourceGroupName string, routeTableName string, routeName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "routeName": url.QueryEscape(routeName), - "routeTableName": url.QueryEscape(routeTableName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RoutesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -235,7 +240,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -247,21 +252,21 @@ // // resourceGroupName is the name of the resource group. routeTableName is the // name of the route table. -func (client RoutesClient) List(resourceGroupName string, routeTableName string) (result RouteListResult, ae error) { +func (client RoutesClient) List(resourceGroupName string, routeTableName string) (result RouteListResult, err error) { req, err := client.ListPreparer(resourceGroupName, routeTableName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/RoutesClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/RoutesClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RoutesClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure responding to request") } return @@ -270,28 +275,27 @@ // ListPreparer prepares the List request. func (client RoutesClient) ListPreparer(resourceGroupName string, routeTableName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "routeTableName": url.QueryEscape(routeTableName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RoutesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -300,7 +304,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -308,10 +312,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client RoutesClient) ListNextResults(lastResults RouteListResult) (result RouteListResult, ae error) { +func (client RoutesClient) ListNextResults(lastResults RouteListResult) (result RouteListResult, err error) { req, err := lastResults.RouteListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/RoutesClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -320,12 +324,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/RoutesClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RoutesClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,20 +14,20 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// RouteTablesClient is the the Windows Azure Network management API provides -// a RESTful set of web services that interact with Windows Azure Networks -// service to manage your network resrources. The API has entities that -// capture the relationship between an end user and the Windows Azure +// RouteTablesClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resrources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure // Networks service. type RouteTablesClient struct { ManagementClient @@ -45,121 +45,128 @@ } // CreateOrUpdate the Put RouteTable operation creates/updates a route tablein -// the specified resource group. +// the specified resource group. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. routeTableName is the // name of the route table. parameters is parameters supplied to the // create/update Route Table operation -func (client RouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters RouteTable) (result RouteTable, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, parameters) +func (client RouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters RouteTable, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client RouteTablesClient) CreateOrUpdatePreparer(resourceGroupName string, routeTableName string, parameters RouteTable) (*http.Request, error) { +func (client RouteTablesClient) CreateOrUpdatePreparer(resourceGroupName string, routeTableName string, parameters RouteTable, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "routeTableName": url.QueryEscape(routeTableName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client RouteTablesClient) CreateOrUpdateResponder(resp *http.Response) (result RouteTable, err error) { +func (client RouteTablesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the Delete RouteTable operation deletes the specifed Route Table +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. routeTableName is the // name of the route table. -func (client RouteTablesClient) Delete(resourceGroupName string, routeTableName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, routeTableName) +func (client RouteTablesClient) Delete(resourceGroupName string, routeTableName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, routeTableName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client RouteTablesClient) DeletePreparer(resourceGroupName string, routeTableName string) (*http.Request, error) { +func (client RouteTablesClient) DeletePreparer(resourceGroupName string, routeTableName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "routeTableName": url.QueryEscape(routeTableName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client RouteTablesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -168,7 +175,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return @@ -178,52 +185,54 @@ // route table. // // resourceGroupName is the name of the resource group. routeTableName is the -// name of the route table. -func (client RouteTablesClient) Get(resourceGroupName string, routeTableName string) (result RouteTable, ae error) { - req, err := client.GetPreparer(resourceGroupName, routeTableName) +// name of the route table. expand is expand references resources. +func (client RouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result RouteTable, err error) { + req, err := client.GetPreparer(resourceGroupName, routeTableName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. -func (client RouteTablesClient) GetPreparer(resourceGroupName string, routeTableName string) (*http.Request, error) { +func (client RouteTablesClient) GetPreparer(resourceGroupName string, routeTableName string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "routeTableName": url.QueryEscape(routeTableName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RouteTablesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -232,7 +241,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -242,21 +251,21 @@ // List the list RouteTables returns all route tables in a resource group // // resourceGroupName is the name of the resource group. -func (client RouteTablesClient) List(resourceGroupName string) (result RouteTableListResult, ae error) { +func (client RouteTablesClient) List(resourceGroupName string) (result RouteTableListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure responding to request") } return @@ -265,27 +274,26 @@ // ListPreparer prepares the List request. func (client RouteTablesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RouteTablesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -294,7 +302,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -302,10 +310,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client RouteTablesClient) ListNextResults(lastResults RouteTableListResult) (result RouteTableListResult, ae error) { +func (client RouteTablesClient) ListNextResults(lastResults RouteTableListResult) (result RouteTableListResult, err error) { req, err := lastResults.RouteTableListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -314,33 +322,33 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure responding to next results request request") } return } // ListAll the list RouteTables returns all route tables in a subscription -func (client RouteTablesClient) ListAll() (result RouteTableListResult, ae error) { +func (client RouteTablesClient) ListAll() (result RouteTableListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", nil, "Failure preparing request") } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure sending request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure responding to request") } return @@ -349,26 +357,25 @@ // ListAllPreparer prepares the ListAll request. func (client RouteTablesClient) ListAllPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client RouteTablesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAllResponder handles the response to the ListAll request. The method always @@ -377,7 +384,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -385,10 +392,10 @@ } // ListAllNextResults retrieves the next set of results, if any. -func (client RouteTablesClient) ListAllNextResults(lastResults RouteTableListResult) (result RouteTableListResult, ae error) { +func (client RouteTablesClient) ListAllNextResults(lastResults RouteTableListResult) (result RouteTableListResult, err error) { req, err := lastResults.RouteTableListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", nil, "Failure preparing next results request request") } if req == nil { return @@ -397,12 +404,12 @@ resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure sending next results request request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/RouteTablesClient", "ListAll", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,20 +14,20 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// SecurityGroupsClient is the the Windows Azure Network management API -// provides a RESTful set of web services that interact with Windows Azure +// SecurityGroupsClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure // Networks service to manage your network resrources. The API has entities -// that capture the relationship between an end user and the Windows Azure +// that capture the relationship between an end user and the Microsoft Azure // Networks service. type SecurityGroupsClient struct { ManagementClient @@ -46,123 +46,130 @@ } // CreateOrUpdate the Put NetworkSecurityGroup operation creates/updates a -// network security groupin the specified resource group. +// network security groupin the specified resource group. This method may +// poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. // networkSecurityGroupName is the name of the network security group. // parameters is parameters supplied to the create/update Network Security // Group operation -func (client SecurityGroupsClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup) (result SecurityGroup, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, parameters) +func (client SecurityGroupsClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client SecurityGroupsClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup) (*http.Request, error) { +func (client SecurityGroupsClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result SecurityGroup, err error) { +func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the Delete NetworkSecurityGroup operation deletes the specifed -// network security group +// network security group This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // networkSecurityGroupName is the name of the network security group. -func (client SecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName) +func (client SecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client SecurityGroupsClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { +func (client SecurityGroupsClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client SecurityGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -171,7 +178,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -181,52 +188,55 @@ // specified network security group. // // resourceGroupName is the name of the resource group. -// networkSecurityGroupName is the name of the network security group. -func (client SecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string) (result SecurityGroup, ae error) { - req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName) +// networkSecurityGroupName is the name of the network security group. expand +// is expand references resources. +func (client SecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result SecurityGroup, err error) { + req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. -func (client SecurityGroupsClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { +func (client SecurityGroupsClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -235,7 +245,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -246,21 +256,21 @@ // a resource group // // resourceGroupName is the name of the resource group. -func (client SecurityGroupsClient) List(resourceGroupName string) (result SecurityGroupListResult, ae error) { +func (client SecurityGroupsClient) List(resourceGroupName string) (result SecurityGroupListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure responding to request") } return @@ -269,27 +279,26 @@ // ListPreparer prepares the List request. func (client SecurityGroupsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -298,7 +307,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -306,10 +315,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client SecurityGroupsClient) ListNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, ae error) { +func (client SecurityGroupsClient) ListNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, err error) { req, err := lastResults.SecurityGroupListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -318,12 +327,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure responding to next results request request") } return @@ -331,21 +340,21 @@ // ListAll the list NetworkSecurityGroups returns all network security groups // in a subscription -func (client SecurityGroupsClient) ListAll() (result SecurityGroupListResult, ae error) { +func (client SecurityGroupsClient) ListAll() (result SecurityGroupListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", nil, "Failure preparing request") } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure sending request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure responding to request") } return @@ -354,26 +363,25 @@ // ListAllPreparer prepares the ListAll request. func (client SecurityGroupsClient) ListAllPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client SecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAllResponder handles the response to the ListAll request. The method always @@ -382,7 +390,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -390,10 +398,10 @@ } // ListAllNextResults retrieves the next set of results, if any. -func (client SecurityGroupsClient) ListAllNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, ae error) { +func (client SecurityGroupsClient) ListAllNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, err error) { req, err := lastResults.SecurityGroupListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", nil, "Failure preparing next results request request") } if req == nil { return @@ -402,12 +410,12 @@ resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure sending next results request request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,20 +14,20 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// SecurityRulesClient is the the Windows Azure Network management API -// provides a RESTful set of web services that interact with Windows Azure +// SecurityRulesClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure // Networks service to manage your network resrources. The API has entities -// that capture the relationship between an end user and the Windows Azure +// that capture the relationship between an end user and the Microsoft Azure // Networks service. type SecurityRulesClient struct { ManagementClient @@ -46,127 +46,134 @@ } // CreateOrUpdate the Put network security rule operation creates/updates a -// security rule in the specified network security group +// security rule in the specified network security group This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. // networkSecurityGroupName is the name of the network security group. // securityRuleName is the name of the security rule. securityRuleParameters // is parameters supplied to the create/update network security rule // operation -func (client SecurityRulesClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule) (result SecurityRule, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, securityRuleParameters) +func (client SecurityRulesClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, securityRuleParameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client SecurityRulesClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule) (*http.Request, error) { +func (client SecurityRulesClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "securityRuleName": url.QueryEscape(securityRuleName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "securityRuleName": autorest.Encode("path", securityRuleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters), autorest.WithJSON(securityRuleParameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client SecurityRulesClient) CreateOrUpdateResponder(resp *http.Response) (result SecurityRule, err error) { +func (client SecurityRulesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the delete network security rule operation deletes the specified -// network security rule. +// network security rule. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // networkSecurityGroupName is the name of the network security group. // securityRuleName is the name of the security rule. -func (client SecurityRulesClient) Delete(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName) +func (client SecurityRulesClient) Delete(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client SecurityRulesClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (*http.Request, error) { +func (client SecurityRulesClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "securityRuleName": url.QueryEscape(securityRuleName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "securityRuleName": autorest.Encode("path", securityRuleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client SecurityRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -175,7 +182,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), autorest.ByClosing()) result.Response = resp return @@ -187,21 +194,21 @@ // resourceGroupName is the name of the resource group. // networkSecurityGroupName is the name of the network security group. // securityRuleName is the name of the security rule. -func (client SecurityRulesClient) Get(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result SecurityRule, ae error) { +func (client SecurityRulesClient) Get(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result SecurityRule, err error) { req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, securityRuleName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", resp, "Failure responding to request") } return @@ -210,29 +217,28 @@ // GetPreparer prepares the Get request. func (client SecurityRulesClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "securityRuleName": url.QueryEscape(securityRuleName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "securityRuleName": autorest.Encode("path", securityRuleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SecurityRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -241,7 +247,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -253,21 +259,21 @@ // // resourceGroupName is the name of the resource group. // networkSecurityGroupName is the name of the network security group. -func (client SecurityRulesClient) List(resourceGroupName string, networkSecurityGroupName string) (result SecurityRuleListResult, ae error) { +func (client SecurityRulesClient) List(resourceGroupName string, networkSecurityGroupName string) (result SecurityRuleListResult, err error) { req, err := client.ListPreparer(resourceGroupName, networkSecurityGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure responding to request") } return @@ -276,28 +282,27 @@ // ListPreparer prepares the List request. func (client SecurityRulesClient) ListPreparer(resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SecurityRulesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -306,7 +311,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -314,10 +319,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client SecurityRulesClient) ListNextResults(lastResults SecurityRuleListResult) (result SecurityRuleListResult, ae error) { +func (client SecurityRulesClient) ListNextResults(lastResults SecurityRuleListResult) (result SecurityRuleListResult, err error) { req, err := lastResults.SecurityRuleListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -326,12 +331,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,20 +14,20 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// SubnetsClient is the the Windows Azure Network management API provides a -// RESTful set of web services that interact with Windows Azure Networks +// SubnetsClient is the the Microsoft Azure Network management API provides a +// RESTful set of web services that interact with Microsoft Azure Networks // service to manage your network resrources. The API has entities that -// capture the relationship between an end user and the Windows Azure +// capture the relationship between an end user and the Microsoft Azure // Networks service. type SubnetsClient struct { ManagementClient @@ -44,124 +44,131 @@ } // CreateOrUpdate the Put Subnet operation creates/updates a subnet in -// thespecified virtual network +// thespecified virtual network This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. virtualNetworkName is // the name of the virtual network. subnetName is the name of the subnet. // subnetParameters is parameters supplied to the create/update Subnet // operation -func (client SubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet) (result Subnet, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, subnetName, subnetParameters) +func (client SubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, subnetName, subnetParameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client SubnetsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet) (*http.Request, error) { +func (client SubnetsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subnetName": url.QueryEscape(subnetName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkName": url.QueryEscape(virtualNetworkName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subnetName": autorest.Encode("path", subnetName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters), autorest.WithJSON(subnetParameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client SubnetsClient) CreateOrUpdateResponder(resp *http.Response) (result Subnet, err error) { +func (client SubnetsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } -// Delete the delete subnet operation deletes the specified subnet. +// Delete the delete subnet operation deletes the specified subnet. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. virtualNetworkName is // the name of the virtual network. subnetName is the name of the subnet. -func (client SubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, subnetName) +func (client SubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, subnetName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client SubnetsClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, subnetName string) (*http.Request, error) { +func (client SubnetsClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subnetName": url.QueryEscape(subnetName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkName": url.QueryEscape(virtualNetworkName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subnetName": autorest.Encode("path", subnetName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client SubnetsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -170,7 +177,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return @@ -181,52 +188,55 @@ // // resourceGroupName is the name of the resource group. virtualNetworkName is // the name of the virtual network. subnetName is the name of the subnet. -func (client SubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string) (result Subnet, ae error) { - req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, subnetName) +// expand is expand references resources. +func (client SubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result Subnet, err error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, subnetName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. -func (client SubnetsClient) GetPreparer(resourceGroupName string, virtualNetworkName string, subnetName string) (*http.Request, error) { +func (client SubnetsClient) GetPreparer(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subnetName": url.QueryEscape(subnetName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkName": url.QueryEscape(virtualNetworkName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subnetName": autorest.Encode("path", subnetName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SubnetsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -235,7 +245,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -247,21 +257,21 @@ // // resourceGroupName is the name of the resource group. virtualNetworkName is // the name of the virtual network. -func (client SubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result SubnetListResult, ae error) { +func (client SubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result SubnetListResult, err error) { req, err := client.ListPreparer(resourceGroupName, virtualNetworkName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure responding to request") } return @@ -270,28 +280,27 @@ // ListPreparer prepares the List request. func (client SubnetsClient) ListPreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkName": url.QueryEscape(virtualNetworkName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SubnetsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -300,7 +309,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -308,10 +317,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client SubnetsClient) ListNextResults(lastResults SubnetListResult) (result SubnetListResult, ae error) { +func (client SubnetsClient) ListNextResults(lastResults SubnetListResult) (result SubnetListResult, err error) { req, err := lastResults.SubnetListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -320,12 +329,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/usages.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/usages.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/usages.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/usages.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,20 +14,20 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// UsagesClient is the the Windows Azure Network management API provides a -// RESTful set of web services that interact with Windows Azure Networks +// UsagesClient is the the Microsoft Azure Network management API provides a +// RESTful set of web services that interact with Microsoft Azure Networks // service to manage your network resrources. The API has entities that -// capture the relationship between an end user and the Windows Azure +// capture the relationship between an end user and the Microsoft Azure // Networks service. type UsagesClient struct { ManagementClient @@ -46,21 +46,21 @@ // List lists compute usages for a subscription. // // location is the location upon which resource usage is queried. -func (client UsagesClient) List(location string) (result UsagesListResult, ae error) { +func (client UsagesClient) List(location string) (result UsagesListResult, err error) { req, err := client.ListPreparer(location) if err != nil { - return result, autorest.NewErrorWithError(err, "network/UsagesClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/UsagesClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/UsagesClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure responding to request") } return @@ -69,27 +69,26 @@ // ListPreparer prepares the List request. func (client UsagesClient) ListPreparer(location string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "location": url.QueryEscape(location), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsagesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -98,7 +97,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -23,18 +23,18 @@ ) const ( - major = "0" - minor = "3" + major = "3" + minor = "1" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" ) // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "network", "2015-05-01-preview") + return fmt.Sprintf(userAgentFormat, Version(), "network", "2016-03-30") } // Version returns the semantic version (see http://semver.org) of the client. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,21 +14,21 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// VirtualNetworkGatewayConnectionsClient is the the Windows Azure Network +// VirtualNetworkGatewayConnectionsClient is the the Microsoft Azure Network // management API provides a RESTful set of web services that interact with -// Windows Azure Networks service to manage your network resrources. The API -// has entities that capture the relationship between an end user and the -// Windows Azure Networks service. +// Microsoft Azure Networks service to manage your network resrources. The +// API has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. type VirtualNetworkGatewayConnectionsClient struct { ManagementClient } @@ -47,126 +47,133 @@ // CreateOrUpdate the Put VirtualNetworkGatewayConnection operation // creates/updates a virtual network gateway connection in the specified -// resource group through Network resource provider. +// resource group through Network resource provider. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayConnectionName is the name of the virtual network // gateway conenction. parameters is parameters supplied to the Begin Create // or update Virtual Network Gateway connection operation through Network // resource provider. -func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection) (result VirtualNetworkGatewayConnection, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters) +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection) (*http.Request, error) { +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkGatewayConnection, err error) { +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the Delete VirtualNetworkGatewayConnection operation deletes the // specifed virtual network Gateway connection through Network resource -// provider. +// provider. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayConnectionName is the name of the virtual network // gateway connection. -func (client VirtualNetworkGatewayConnectionsClient) Delete(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayConnectionName) +func (client VirtualNetworkGatewayConnectionsClient) Delete(resourceGroupName string, virtualNetworkGatewayConnectionName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client VirtualNetworkGatewayConnectionsClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { +func (client VirtualNetworkGatewayConnectionsClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -175,7 +182,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -188,21 +195,21 @@ // resourceGroupName is the name of the resource group. // virtualNetworkGatewayConnectionName is the name of the virtual network // gateway connection. -func (client VirtualNetworkGatewayConnectionsClient) Get(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result VirtualNetworkGatewayConnection, ae error) { +func (client VirtualNetworkGatewayConnectionsClient) Get(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result VirtualNetworkGatewayConnection, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayConnectionName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", resp, "Failure responding to request") } return @@ -211,28 +218,27 @@ // GetPreparer prepares the Get request. func (client VirtualNetworkGatewayConnectionsClient) GetPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -241,7 +247,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -253,62 +259,61 @@ // connection shared key through Network resource provider. // // resourceGroupName is the name of the resource group. -// virtualNetworkGatewayConnectionName is the virtual network gateway -// connection shared key name. -func (client VirtualNetworkGatewayConnectionsClient) GetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result ConnectionSharedKey, ae error) { - req, err := client.GetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName) +// connectionSharedKeyName is the virtual network gateway connection shared +// key name. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKey(resourceGroupName string, connectionSharedKeyName string) (result ConnectionSharedKeyResult, err error) { + req, err := client.GetSharedKeyPreparer(resourceGroupName, connectionSharedKeyName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "GetSharedKey", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", nil, "Failure preparing request") } resp, err := client.GetSharedKeySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "GetSharedKey", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp, "Failure sending request") } result, err = client.GetSharedKeyResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "GetSharedKey", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp, "Failure responding to request") } return } // GetSharedKeyPreparer prepares the GetSharedKey request. -func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyPreparer(resourceGroupName string, connectionSharedKeyName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + "connectionSharedKeyName": autorest.Encode("path", connectionSharedKeyName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{connectionSharedKeyName}/sharedkey", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSharedKeySender sends the GetSharedKey request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSharedKeyResponder handles the response to the GetSharedKey request. The method always // closes the http.Response Body. -func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKey, err error) { +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKeyResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -319,21 +324,21 @@ // virtual network gateways connections created. // // resourceGroupName is the name of the resource group. -func (client VirtualNetworkGatewayConnectionsClient) List(resourceGroupName string) (result VirtualNetworkGatewayConnectionListResult, ae error) { +func (client VirtualNetworkGatewayConnectionsClient) List(resourceGroupName string) (result VirtualNetworkGatewayConnectionListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure responding to request") } return @@ -342,27 +347,26 @@ // ListPreparer prepares the List request. func (client VirtualNetworkGatewayConnectionsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -371,7 +375,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -379,10 +383,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client VirtualNetworkGatewayConnectionsClient) ListNextResults(lastResults VirtualNetworkGatewayConnectionListResult) (result VirtualNetworkGatewayConnectionListResult, ae error) { +func (client VirtualNetworkGatewayConnectionsClient) ListNextResults(lastResults VirtualNetworkGatewayConnectionListResult) (result VirtualNetworkGatewayConnectionListResult, err error) { req, err := lastResults.VirtualNetworkGatewayConnectionListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -391,12 +395,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure responding to next results request request") } return @@ -405,141 +409,147 @@ // ResetSharedKey the VirtualNetworkGatewayConnectionResetSharedKey operation // resets the virtual network gateway connection shared key for passed // virtual network gateway connection in the specified resource group through -// Network resource provider. +// Network resource provider. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayConnectionName is the virtual network gateway // connection reset shared key Name. parameters is parameters supplied to the // Begin Reset Virtual Network Gateway connection shared key operation // through Network resource provider. -func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey) (result ConnectionResetSharedKey, ae error) { - req, err := client.ResetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters) +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ResetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", nil, "Failure preparing request") } resp, err := client.ResetSharedKeySender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp, "Failure sending request") } result, err = client.ResetSharedKeyResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp, "Failure responding to request") } return } // ResetSharedKeyPreparer prepares the ResetSharedKey request. -func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey) (*http.Request, error) { +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // ResetSharedKeySender sends the ResetSharedKey request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // ResetSharedKeyResponder handles the response to the ResetSharedKey request. The method always // closes the http.Response Body. -func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyResponder(resp *http.Response) (result ConnectionResetSharedKey, err error) { +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // SetSharedKey the Put VirtualNetworkGatewayConnectionSharedKey operation // sets the virtual network gateway connection shared key for passed virtual // network gateway connection in the specified resource group through Network -// resource provider. +// resource provider. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayConnectionName is the virtual network gateway // connection name. parameters is parameters supplied to the Begin Set // Virtual Network Gateway conection Shared key operation throughNetwork // resource provider. -func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey) (result ConnectionSharedKey, ae error) { - req, err := client.SetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters) +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.SetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "SetSharedKey", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", nil, "Failure preparing request") } resp, err := client.SetSharedKeySender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "SetSharedKey", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp, "Failure sending request") } result, err = client.SetSharedKeyResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "SetSharedKey", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp, "Failure responding to request") } return } // SetSharedKeyPreparer prepares the SetSharedKey request. -func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey) (*http.Request, error) { +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // SetSharedKeySender sends the SetSharedKey request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // SetSharedKeyResponder handles the response to the SetSharedKey request. The method always // closes the http.Response Body. -func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKey, err error) { +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,21 +14,21 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// VirtualNetworkGatewaysClient is the the Windows Azure Network management -// API provides a RESTful set of web services that interact with Windows +// VirtualNetworkGatewaysClient is the the Microsoft Azure Network management +// API provides a RESTful set of web services that interact with Microsoft // Azure Networks service to manage your network resrources. The API has -// entities that capture the relationship between an end user and the Windows -// Azure Networks service. +// entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. type VirtualNetworkGatewaysClient struct { ManagementClient } @@ -47,123 +47,130 @@ // CreateOrUpdate the Put VirtualNetworkGateway operation creates/updates a // virtual network gateway in the specified resource group through Network -// resource provider. +// resource provider. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayName is the name of the virtual network gateway. // parameters is parameters supplied to the Begin Create or update Virtual // Network Gateway operation through Network resource provider. -func (client VirtualNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (result VirtualNetworkGateway, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayName, parameters) +func (client VirtualNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client VirtualNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (*http.Request, error) { +func (client VirtualNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworkgateways/{virtualNetworkGatewayName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { +func (client VirtualNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the Delete VirtualNetworkGateway operation deletes the specifed -// virtual network Gateway through Network resource provider. +// virtual network Gateway through Network resource provider. This method may +// poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayName is the name of the virtual network gateway. -func (client VirtualNetworkGatewaysClient) Delete(resourceGroupName string, virtualNetworkGatewayName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayName) +func (client VirtualNetworkGatewaysClient) Delete(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client VirtualNetworkGatewaysClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) { +func (client VirtualNetworkGatewaysClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -172,32 +179,101 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), autorest.ByClosing()) result.Response = resp return } +// Generatevpnclientpackage the Generatevpnclientpackage operation generates +// Vpn client package for P2S client of the virtual network gateway in the +// specified resource group through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +// parameters is parameters supplied to the Begin Generating Virtual Network +// Gateway Vpn client package operation through Network resource provider. +func (client VirtualNetworkGatewaysClient) Generatevpnclientpackage(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (result String, err error) { + req, err := client.GeneratevpnclientpackagePreparer(resourceGroupName, virtualNetworkGatewayName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", nil, "Failure preparing request") + } + + resp, err := client.GeneratevpnclientpackageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure sending request") + } + + result, err = client.GeneratevpnclientpackageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure responding to request") + } + + return +} + +// GeneratevpnclientpackagePreparer prepares the Generatevpnclientpackage request. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackagePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GeneratevpnclientpackageSender sends the Generatevpnclientpackage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GeneratevpnclientpackageResponder handles the response to the Generatevpnclientpackage request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageResponder(resp *http.Response) (result String, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // Get the Get VirtualNetworkGateway operation retrieves information about the // specified virtual network gateway through Network resource provider. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayName is the name of the virtual network gateway. -func (client VirtualNetworkGatewaysClient) Get(resourceGroupName string, virtualNetworkGatewayName string) (result VirtualNetworkGateway, ae error) { +func (client VirtualNetworkGatewaysClient) Get(resourceGroupName string, virtualNetworkGatewayName string) (result VirtualNetworkGateway, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", resp, "Failure responding to request") } return @@ -206,28 +282,27 @@ // GetPreparer prepares the Get request. func (client VirtualNetworkGatewaysClient) GetPreparer(resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworkgateways/{virtualNetworkGatewayName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -236,7 +311,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -247,21 +322,21 @@ // network gateways stored. // // resourceGroupName is the name of the resource group. -func (client VirtualNetworkGatewaysClient) List(resourceGroupName string) (result VirtualNetworkGatewayListResult, ae error) { +func (client VirtualNetworkGatewaysClient) List(resourceGroupName string) (result VirtualNetworkGatewayListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure responding to request") } return @@ -270,27 +345,26 @@ // ListPreparer prepares the List request. func (client VirtualNetworkGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -299,7 +373,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -307,10 +381,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client VirtualNetworkGatewaysClient) ListNextResults(lastResults VirtualNetworkGatewayListResult) (result VirtualNetworkGatewayListResult, ae error) { +func (client VirtualNetworkGatewaysClient) ListNextResults(lastResults VirtualNetworkGatewayListResult) (result VirtualNetworkGatewayListResult, err error) { req, err := lastResults.VirtualNetworkGatewayListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -319,82 +393,85 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure responding to next results request request") } return } // Reset the Reset VirtualNetworkGateway operation resets the primary of the -// virtual network gatewayin the specified resource group through Network -// resource provider. +// virtual network gateway in the specified resource group through Network +// resource provider. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayName is the name of the virtual network gateway. // parameters is parameters supplied to the Begin Reset Virtual Network // Gateway operation through Network resource provider. -func (client VirtualNetworkGatewaysClient) Reset(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (result VirtualNetworkGateway, ae error) { - req, err := client.ResetPreparer(resourceGroupName, virtualNetworkGatewayName, parameters) +func (client VirtualNetworkGatewaysClient) Reset(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ResetPreparer(resourceGroupName, virtualNetworkGatewayName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Reset", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", nil, "Failure preparing request") } resp, err := client.ResetSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Reset", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", resp, "Failure sending request") } result, err = client.ResetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Reset", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", resp, "Failure responding to request") } return } // ResetPreparer prepares the Reset request. -func (client VirtualNetworkGatewaysClient) ResetPreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (*http.Request, error) { +func (client VirtualNetworkGatewaysClient) ResetPreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworkgateways/{virtualNetworkGatewayName}/reset"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // ResetSender sends the Reset request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // ResetResponder handles the response to the Reset request. The method always // closes the http.Response Body. -func (client VirtualNetworkGatewaysClient) ResetResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { +func (client VirtualNetworkGatewaysClient) ResetResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,20 +14,20 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// VirtualNetworksClient is the the Windows Azure Network management API -// provides a RESTful set of web services that interact with Windows Azure +// VirtualNetworksClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure // Networks service to manage your network resrources. The API has entities -// that capture the relationship between an end user and the Windows Azure +// that capture the relationship between an end user and the Microsoft Azure // Networks service. type VirtualNetworksClient struct { ManagementClient @@ -46,122 +46,129 @@ } // CreateOrUpdate the Put VirtualNetwork operation creates/updates a virtual -// network in the specified resource group. +// network in the specified resource group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the name of the resource group. virtualNetworkName is // the name of the virtual network. parameters is parameters supplied to the // create/update Virtual Network operation -func (client VirtualNetworksClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork) (result VirtualNetwork, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, parameters) +func (client VirtualNetworksClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "CreateOrUpdate", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client VirtualNetworksClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork) (*http.Request, error) { +func (client VirtualNetworksClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkName": url.QueryEscape(virtualNetworkName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualNetworksClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetwork, err error) { +func (client VirtualNetworksClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // Delete the Delete VirtualNetwork operation deletes the specifed virtual -// network +// network This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. virtualNetworkName is // the name of the virtual network. -func (client VirtualNetworksClient) Delete(resourceGroupName string, virtualNetworkName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName) +func (client VirtualNetworksClient) Delete(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client VirtualNetworksClient) DeletePreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { +func (client VirtualNetworksClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkName": url.QueryEscape(virtualNetworkName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -170,7 +177,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK), autorest.ByClosing()) result.Response = resp return @@ -180,52 +187,54 @@ // specified virtual network. // // resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. -func (client VirtualNetworksClient) Get(resourceGroupName string, virtualNetworkName string) (result VirtualNetwork, ae error) { - req, err := client.GetPreparer(resourceGroupName, virtualNetworkName) +// the name of the virtual network. expand is expand references resources. +func (client VirtualNetworksClient) Get(resourceGroupName string, virtualNetworkName string, expand string) (result VirtualNetwork, err error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. -func (client VirtualNetworksClient) GetPreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { +func (client VirtualNetworksClient) GetPreparer(resourceGroupName string, virtualNetworkName string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "virtualNetworkName": url.QueryEscape(virtualNetworkName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -234,7 +243,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -245,21 +254,21 @@ // group // // resourceGroupName is the name of the resource group. -func (client VirtualNetworksClient) List(resourceGroupName string) (result VirtualNetworkListResult, ae error) { +func (client VirtualNetworksClient) List(resourceGroupName string) (result VirtualNetworkListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure responding to request") } return @@ -268,27 +277,26 @@ // ListPreparer prepares the List request. func (client VirtualNetworksClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -297,7 +305,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -305,10 +313,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client VirtualNetworksClient) ListNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, ae error) { +func (client VirtualNetworksClient) ListNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, err error) { req, err := lastResults.VirtualNetworkListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -317,12 +325,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure responding to next results request request") } return @@ -330,21 +338,21 @@ // ListAll the list VirtualNetwork returns all Virtual Networks in a // subscription -func (client VirtualNetworksClient) ListAll() (result VirtualNetworkListResult, ae error) { +func (client VirtualNetworksClient) ListAll() (result VirtualNetworkListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", nil, "Failure preparing request") } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure sending request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure sending request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure responding to request") } return @@ -353,26 +361,25 @@ // ListAllPreparer prepares the ListAll request. func (client VirtualNetworksClient) ListAllPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualnetworks"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) ListAllSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListAllResponder handles the response to the ListAll request. The method always @@ -381,7 +388,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -389,10 +396,10 @@ } // ListAllNextResults retrieves the next set of results, if any. -func (client VirtualNetworksClient) ListAllNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, ae error) { +func (client VirtualNetworksClient) ListAllNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, err error) { req, err := lastResults.VirtualNetworkListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", nil, "Failure preparing next results request request") } if req == nil { return @@ -401,12 +408,12 @@ resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure sending next results request request") } result, err = client.ListAllResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure responding to next results request request") } return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,831 @@ +// Package notificationhubs implements the Azure ARM Notificationhubs service +// API version 2014-09-01. +// +// Azure NotificationHub client +package notificationhubs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +const ( + // APIVersion is the version of the Notificationhubs + APIVersion = "2014-09-01" + + // DefaultBaseURI is the default URI used for the service Notificationhubs + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Notificationhubs. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} + +// CheckAvailability checks the availability of the given notificationHub in a +// namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. parameters is the notificationHub name. +func (client ManagementClient) CheckAvailability(resourceGroupName string, namespaceName string, parameters CheckAvailabilityParameters) (result CheckAvailabilityResource, err error) { + req, err := client.CheckAvailabilityPreparer(resourceGroupName, namespaceName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "CheckAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "CheckAvailability", resp, "Failure sending request") + } + + result, err = client.CheckAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "CheckAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckAvailabilityPreparer prepares the CheckAvailability request. +func (client ManagementClient) CheckAvailabilityPreparer(resourceGroupName string, namespaceName string, parameters CheckAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/checkNotificationHubAvailability", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckAvailabilitySender sends the CheckAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CheckAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckAvailabilityResponder handles the response to the CheckAvailability request. The method always +// closes the http.Response Body. +func (client ManagementClient) CheckAvailabilityResponder(resp *http.Response) (result CheckAvailabilityResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate creates/Update a NotificationHub in a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +// parameters is parameters supplied to the create/update a NotificationHub +// Resource. +func (client ManagementClient) CreateOrUpdate(resourceGroupName string, namespaceName string, notificationHubName string, parameters NotificationHubCreateOrUpdateParameters) (result NotificationHubResource, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, namespaceName, notificationHubName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ManagementClient) CreateOrUpdatePreparer(resourceGroupName string, namespaceName string, notificationHubName string, parameters NotificationHubCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ManagementClient) CreateOrUpdateResponder(resp *http.Response) (result NotificationHubResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateAuthorizationRule creates/Updates an authorization rule for a +// NotificationHub +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +// authorizationRuleName is authorization Rule Name. parameters is the shared +// access authorization rule. +func (client ManagementClient) CreateOrUpdateAuthorizationRule(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.CreateOrUpdateAuthorizationRulePreparer(resourceGroupName, namespaceName, notificationHubName, authorizationRuleName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "CreateOrUpdateAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "CreateOrUpdateAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "CreateOrUpdateAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateAuthorizationRulePreparer prepares the CreateOrUpdateAuthorizationRule request. +func (client ManagementClient) CreateOrUpdateAuthorizationRulePreparer(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateAuthorizationRuleSender sends the CreateOrUpdateAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CreateOrUpdateAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateAuthorizationRuleResponder handles the response to the CreateOrUpdateAuthorizationRule request. The method always +// closes the http.Response Body. +func (client ManagementClient) CreateOrUpdateAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a notification hub associated with a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +func (client ManagementClient) Delete(resourceGroupName string, namespaceName string, notificationHubName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, namespaceName, notificationHubName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ManagementClient) DeletePreparer(resourceGroupName string, namespaceName string, notificationHubName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ManagementClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAuthorizationRule deletes a notificationHub authorization rule +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +// authorizationRuleName is authorization Rule Name. +func (client ManagementClient) DeleteAuthorizationRule(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (result autorest.Response, err error) { + req, err := client.DeleteAuthorizationRulePreparer(resourceGroupName, namespaceName, notificationHubName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "DeleteAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.DeleteAuthorizationRuleSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "DeleteAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.DeleteAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "DeleteAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// DeleteAuthorizationRulePreparer prepares the DeleteAuthorizationRule request. +func (client ManagementClient) DeleteAuthorizationRulePreparer(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAuthorizationRuleSender sends the DeleteAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) DeleteAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAuthorizationRuleResponder handles the response to the DeleteAuthorizationRule request. The method always +// closes the http.Response Body. +func (client ManagementClient) DeleteAuthorizationRuleResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get lists the notification hubs associated with a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +func (client ManagementClient) Get(resourceGroupName string, namespaceName string, notificationHubName string) (result NotificationHubResource, err error) { + req, err := client.GetPreparer(resourceGroupName, namespaceName, notificationHubName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementClient) GetPreparer(resourceGroupName string, namespaceName string, notificationHubName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetResponder(resp *http.Response) (result NotificationHubResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAuthorizationRule gets an authorization rule for a NotificationHub by +// name. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name notificationHubName is the notification hub name. +// authorizationRuleName is authorization rule name. +func (client ManagementClient) GetAuthorizationRule(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.GetAuthorizationRulePreparer(resourceGroupName, namespaceName, notificationHubName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "GetAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.GetAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "GetAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.GetAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "GetAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// GetAuthorizationRulePreparer prepares the GetAuthorizationRule request. +func (client ManagementClient) GetAuthorizationRulePreparer(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAuthorizationRuleSender sends the GetAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAuthorizationRuleResponder handles the response to the GetAuthorizationRule request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetPnsCredentials lists the PNS Credentials associated with a notification +// hub . +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +func (client ManagementClient) GetPnsCredentials(resourceGroupName string, namespaceName string, notificationHubName string) (result NotificationHubResource, err error) { + req, err := client.GetPnsCredentialsPreparer(resourceGroupName, namespaceName, notificationHubName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "GetPnsCredentials", nil, "Failure preparing request") + } + + resp, err := client.GetPnsCredentialsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "GetPnsCredentials", resp, "Failure sending request") + } + + result, err = client.GetPnsCredentialsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "GetPnsCredentials", resp, "Failure responding to request") + } + + return +} + +// GetPnsCredentialsPreparer prepares the GetPnsCredentials request. +func (client ManagementClient) GetPnsCredentialsPreparer(resourceGroupName string, namespaceName string, notificationHubName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/pnsCredentials", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetPnsCredentialsSender sends the GetPnsCredentials request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetPnsCredentialsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetPnsCredentialsResponder handles the response to the GetPnsCredentials request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetPnsCredentialsResponder(resp *http.Response) (result NotificationHubResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the notification hubs associated with a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. +func (client ManagementClient) List(resourceGroupName string, namespaceName string) (result NotificationHubListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, namespaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ManagementClient) ListPreparer(resourceGroupName string, namespaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListResponder(resp *http.Response) (result NotificationHubListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ManagementClient) ListNextResults(lastResults NotificationHubListResult) (result NotificationHubListResult, err error) { + req, err := lastResults.NotificationHubListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListAuthorizationRules gets the authorization rules for a NotificationHub. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name notificationHubName is the notification hub name. +func (client ManagementClient) ListAuthorizationRules(resourceGroupName string, namespaceName string, notificationHubName string) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := client.ListAuthorizationRulesPreparer(resourceGroupName, namespaceName, notificationHubName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "ListAuthorizationRules", nil, "Failure preparing request") + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "ListAuthorizationRules", resp, "Failure sending request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "ListAuthorizationRules", resp, "Failure responding to request") + } + + return +} + +// ListAuthorizationRulesPreparer prepares the ListAuthorizationRules request. +func (client ManagementClient) ListAuthorizationRulesPreparer(resourceGroupName string, namespaceName string, notificationHubName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAuthorizationRulesSender sends the ListAuthorizationRules request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListAuthorizationRulesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAuthorizationRulesResponder handles the response to the ListAuthorizationRules request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListAuthorizationRulesResponder(resp *http.Response) (result SharedAccessAuthorizationRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAuthorizationRulesNextResults retrieves the next set of results, if any. +func (client ManagementClient) ListAuthorizationRulesNextResults(lastResults SharedAccessAuthorizationRuleListResult) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := lastResults.SharedAccessAuthorizationRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "ListAuthorizationRules", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "ListAuthorizationRules", resp, "Failure sending next results request request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "ListAuthorizationRules", resp, "Failure responding to next results request request") + } + + return +} + +// ListKeys gets the Primary and Secondary ConnectionStrings to the +// NotificationHub +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +// authorizationRuleName is the connection string of the NotificationHub for +// the specified authorizationRule. +func (client ManagementClient) ListKeys(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (result ResourceListKeys, err error) { + req, err := client.ListKeysPreparer(resourceGroupName, namespaceName, notificationHubName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "ListKeys", nil, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "ListKeys", resp, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.ManagementClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client ManagementClient) ListKeysPreparer(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListKeysResponder(resp *http.Response) (result ResourceListKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,321 @@ +package notificationhubs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// AccessRights enumerates the values for access rights. +type AccessRights string + +const ( + // Listen specifies the listen state for access rights. + Listen AccessRights = "Listen" + // Manage specifies the manage state for access rights. + Manage AccessRights = "Manage" + // Send specifies the send state for access rights. + Send AccessRights = "Send" +) + +// NamespaceType enumerates the values for namespace type. +type NamespaceType string + +const ( + // Messaging specifies the messaging state for namespace type. + Messaging NamespaceType = "Messaging" + // NotificationHub specifies the notification hub state for namespace type. + NotificationHub NamespaceType = "NotificationHub" +) + +// AdmCredential is description of a NotificationHub AdmCredential. +type AdmCredential struct { + Properties *AdmCredentialProperties `json:"properties,omitempty"` +} + +// AdmCredentialProperties is description of a NotificationHub AdmCredential. +type AdmCredentialProperties struct { + ClientID *string `json:"clientId,omitempty"` + ClientSecret *string `json:"clientSecret,omitempty"` + AuthTokenURL *string `json:"authTokenUrl,omitempty"` +} + +// ApnsCredential is description of a NotificationHub ApnsCredential. +type ApnsCredential struct { + Properties *ApnsCredentialProperties `json:"properties,omitempty"` +} + +// ApnsCredentialProperties is description of a NotificationHub ApnsCredential. +type ApnsCredentialProperties struct { + ApnsCertificate *string `json:"apnsCertificate,omitempty"` + CertificateKey *string `json:"certificateKey,omitempty"` + Endpoint *string `json:"endpoint,omitempty"` + Thumbprint *string `json:"thumbprint,omitempty"` +} + +// BaiduCredential is description of a NotificationHub BaiduCredential. +type BaiduCredential struct { + Properties *BaiduCredentialProperties `json:"properties,omitempty"` +} + +// BaiduCredentialProperties is description of a NotificationHub +// BaiduCredential. +type BaiduCredentialProperties struct { + BaiduAPIKey *string `json:"baiduApiKey,omitempty"` + BaiduEndPoint *string `json:"baiduEndPoint,omitempty"` + BaiduSecretKey *string `json:"baiduSecretKey,omitempty"` +} + +// CheckAvailabilityParameters is parameters supplied to the Check Name +// Availability for Namespace and NotificationHubs. +type CheckAvailabilityParameters struct { + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + IsAvailiable *bool `json:"isAvailiable,omitempty"` +} + +// CheckAvailabilityResource is description of a CheckAvailibility resource. +type CheckAvailabilityResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + IsAvailiable *bool `json:"isAvailiable,omitempty"` +} + +// GcmCredential is description of a NotificationHub GcmCredential. +type GcmCredential struct { + Properties *GcmCredentialProperties `json:"properties,omitempty"` +} + +// GcmCredentialProperties is description of a NotificationHub GcmCredential. +type GcmCredentialProperties struct { + GcmEndpoint *string `json:"gcmEndpoint,omitempty"` + GoogleAPIKey *string `json:"googleApiKey,omitempty"` +} + +// MpnsCredential is description of a NotificationHub MpnsCredential. +type MpnsCredential struct { + Properties *MpnsCredentialProperties `json:"properties,omitempty"` +} + +// MpnsCredentialProperties is description of a NotificationHub MpnsCredential. +type MpnsCredentialProperties struct { + MpnsCertificate *string `json:"mpnsCertificate,omitempty"` + CertificateKey *string `json:"certificateKey,omitempty"` + Thumbprint *string `json:"thumbprint,omitempty"` +} + +// NamespaceCreateOrUpdateParameters is parameters supplied to the +// CreateOrUpdate Namespace operation. +type NamespaceCreateOrUpdateParameters struct { + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *NamespaceProperties `json:"properties,omitempty"` +} + +// NamespaceListResult is the response of the List Namespace operation. +type NamespaceListResult struct { + autorest.Response `json:"-"` + Value *[]NamespaceResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// NamespaceListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client NamespaceListResult) NamespaceListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// NamespaceProperties is namespace properties. +type NamespaceProperties struct { + Name *string `json:"name,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + Region *string `json:"region,omitempty"` + Status *string `json:"status,omitempty"` + CreatedAt *date.Time `json:"createdAt,omitempty"` + ServiceBusEndpoint *string `json:"serviceBusEndpoint,omitempty"` + SubscriptionID *string `json:"subscriptionId,omitempty"` + ScaleUnit *string `json:"scaleUnit,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Critical *bool `json:"critical,omitempty"` + NamespaceType NamespaceType `json:"namespaceType,omitempty"` +} + +// NamespaceResource is description of a Namespace resource. +type NamespaceResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *NamespaceProperties `json:"properties,omitempty"` +} + +// NotificationHubCreateOrUpdateParameters is parameters supplied to the +// CreateOrUpdate NotificationHub operation. +type NotificationHubCreateOrUpdateParameters struct { + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *NotificationHubProperties `json:"properties,omitempty"` +} + +// NotificationHubListResult is the response of the List NotificationHub +// operation. +type NotificationHubListResult struct { + autorest.Response `json:"-"` + Value *[]NotificationHubResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// NotificationHubListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client NotificationHubListResult) NotificationHubListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// NotificationHubProperties is notificationHub properties. +type NotificationHubProperties struct { + Name *string `json:"name,omitempty"` + RegistrationTTL *string `json:"registrationTtl,omitempty"` + AuthorizationRules *[]SharedAccessAuthorizationRuleProperties `json:"authorizationRules,omitempty"` + ApnsCredential *ApnsCredential `json:"apnsCredential,omitempty"` + WnsCredential *WnsCredential `json:"wnsCredential,omitempty"` + GcmCredential *GcmCredential `json:"gcmCredential,omitempty"` + MpnsCredential *MpnsCredential `json:"mpnsCredential,omitempty"` + AdmCredential *AdmCredential `json:"admCredential,omitempty"` + BaiduCredential *BaiduCredential `json:"baiduCredential,omitempty"` +} + +// NotificationHubResource is description of a NotificationHub Resource. +type NotificationHubResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *NotificationHubProperties `json:"properties,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceListKeys is namespace/NotificationHub Connection String +type ResourceListKeys struct { + autorest.Response `json:"-"` + PrimaryConnectionString *string `json:"primaryConnectionString,omitempty"` + SecondaryConnectionString *string `json:"secondaryConnectionString,omitempty"` +} + +// SharedAccessAuthorizationRuleCreateOrUpdateParameters is parameters +// supplied to the CreateOrUpdate Namespace AuthorizationRules. +type SharedAccessAuthorizationRuleCreateOrUpdateParameters struct { + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *SharedAccessAuthorizationRuleProperties `json:"properties,omitempty"` +} + +// SharedAccessAuthorizationRuleListResult is the response of the List +// Namespace operation. +type SharedAccessAuthorizationRuleListResult struct { + autorest.Response `json:"-"` + Value *[]SharedAccessAuthorizationRuleResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SharedAccessAuthorizationRuleListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SharedAccessAuthorizationRuleListResult) SharedAccessAuthorizationRuleListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SharedAccessAuthorizationRuleProperties is sharedAccessAuthorizationRule +// properties. +type SharedAccessAuthorizationRuleProperties struct { + PrimaryKey *string `json:"primaryKey,omitempty"` + SecondaryKey *string `json:"secondaryKey,omitempty"` + KeyName *string `json:"keyName,omitempty"` + ClaimType *string `json:"claimType,omitempty"` + ClaimValue *string `json:"claimValue,omitempty"` + Rights *[]AccessRights `json:"rights,omitempty"` + CreatedTime *date.Time `json:"createdTime,omitempty"` + ModifiedTime *date.Time `json:"modifiedTime,omitempty"` + Revision *int32 `json:"revision,omitempty"` +} + +// SharedAccessAuthorizationRuleResource is description of a Namespace +// AuthorizationRules. +type SharedAccessAuthorizationRuleResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *SharedAccessAuthorizationRuleProperties `json:"properties,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// WnsCredential is description of a NotificationHub WnsCredential. +type WnsCredential struct { + Properties *WnsCredentialProperties `json:"properties,omitempty"` +} + +// WnsCredentialProperties is description of a NotificationHub WnsCredential. +type WnsCredentialProperties struct { + PackageSid *string `json:"packageSid,omitempty"` + SecretKey *string `json:"secretKey,omitempty"` + WindowsLiveEndpoint *string `json:"windowsLiveEndpoint,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/namespaces.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/namespaces.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/namespaces.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/namespaces.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,876 @@ +package notificationhubs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// NamespacesClient is the azure NotificationHub client +type NamespacesClient struct { + ManagementClient +} + +// NewNamespacesClient creates an instance of the NamespacesClient client. +func NewNamespacesClient(subscriptionID string) NamespacesClient { + return NewNamespacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewNamespacesClientWithBaseURI creates an instance of the NamespacesClient +// client. +func NewNamespacesClientWithBaseURI(baseURI string, subscriptionID string) NamespacesClient { + return NamespacesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckAvailability checks the availability of the given service namespace +// across all Windows Azure subscriptions. This is useful because the domain +// name is created based on the service namespace name. +// +// parameters is the namespace name. +func (client NamespacesClient) CheckAvailability(parameters CheckAvailabilityParameters) (result CheckAvailabilityResource, err error) { + req, err := client.CheckAvailabilityPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "CheckAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "CheckAvailability", resp, "Failure sending request") + } + + result, err = client.CheckAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "CheckAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckAvailabilityPreparer prepares the CheckAvailability request. +func (client NamespacesClient) CheckAvailabilityPreparer(parameters CheckAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.NotificationHubs/checkNamespaceAvailability", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckAvailabilitySender sends the CheckAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) CheckAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckAvailabilityResponder handles the response to the CheckAvailability request. The method always +// closes the http.Response Body. +func (client NamespacesClient) CheckAvailabilityResponder(resp *http.Response) (result CheckAvailabilityResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate creates/Updates a service namespace. Once created, this +// namespace's resource manifest is immutable. This operation is idempotent. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. parameters is parameters supplied to create a Namespace +// Resource. +func (client NamespacesClient) CreateOrUpdate(resourceGroupName string, namespaceName string, parameters NamespaceCreateOrUpdateParameters) (result NamespaceResource, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, namespaceName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client NamespacesClient) CreateOrUpdatePreparer(resourceGroupName string, namespaceName string, parameters NamespaceCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client NamespacesClient) CreateOrUpdateResponder(resp *http.Response) (result NamespaceResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateAuthorizationRule creates an authorization rule for a +// namespace +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. authorizationRuleName is aauthorization Rule Name. +// parameters is the shared access authorization rule. +func (client NamespacesClient) CreateOrUpdateAuthorizationRule(resourceGroupName string, namespaceName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.CreateOrUpdateAuthorizationRulePreparer(resourceGroupName, namespaceName, authorizationRuleName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "CreateOrUpdateAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "CreateOrUpdateAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "CreateOrUpdateAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateAuthorizationRulePreparer prepares the CreateOrUpdateAuthorizationRule request. +func (client NamespacesClient) CreateOrUpdateAuthorizationRulePreparer(resourceGroupName string, namespaceName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateAuthorizationRuleSender sends the CreateOrUpdateAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) CreateOrUpdateAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateAuthorizationRuleResponder handles the response to the CreateOrUpdateAuthorizationRule request. The method always +// closes the http.Response Body. +func (client NamespacesClient) CreateOrUpdateAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an existing namespace. This operation also removes all +// associated notificationHubs under the namespace. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. +func (client NamespacesClient) Delete(resourceGroupName string, namespaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, namespaceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client NamespacesClient) DeletePreparer(resourceGroupName string, namespaceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client NamespacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAuthorizationRule deletes a namespace authorization rule +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. authorizationRuleName is authorization Rule Name. +func (client NamespacesClient) DeleteAuthorizationRule(resourceGroupName string, namespaceName string, authorizationRuleName string) (result autorest.Response, err error) { + req, err := client.DeleteAuthorizationRulePreparer(resourceGroupName, namespaceName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "DeleteAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.DeleteAuthorizationRuleSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "DeleteAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.DeleteAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "DeleteAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// DeleteAuthorizationRulePreparer prepares the DeleteAuthorizationRule request. +func (client NamespacesClient) DeleteAuthorizationRulePreparer(resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAuthorizationRuleSender sends the DeleteAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) DeleteAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAuthorizationRuleResponder handles the response to the DeleteAuthorizationRule request. The method always +// closes the http.Response Body. +func (client NamespacesClient) DeleteAuthorizationRuleResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns the description for the specified namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. +func (client NamespacesClient) Get(resourceGroupName string, namespaceName string) (result NamespaceResource, err error) { + req, err := client.GetPreparer(resourceGroupName, namespaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client NamespacesClient) GetPreparer(resourceGroupName string, namespaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client NamespacesClient) GetResponder(resp *http.Response) (result NamespaceResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAuthorizationRule gets an authorization rule for a namespace by name. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name authorizationRuleName is authorization rule name. +func (client NamespacesClient) GetAuthorizationRule(resourceGroupName string, namespaceName string, authorizationRuleName string) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.GetAuthorizationRulePreparer(resourceGroupName, namespaceName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "GetAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.GetAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "GetAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.GetAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "GetAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// GetAuthorizationRulePreparer prepares the GetAuthorizationRule request. +func (client NamespacesClient) GetAuthorizationRulePreparer(resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAuthorizationRuleSender sends the GetAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) GetAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAuthorizationRuleResponder handles the response to the GetAuthorizationRule request. The method always +// closes the http.Response Body. +func (client NamespacesClient) GetAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetLongRunningOperationStatus the Get Operation Status operation returns +// the status of the specified operation. After calling an asynchronous +// operation, you can call Get Operation Status to determine whether the +// operation has succeeded, failed, or is still in progress. +// +// operationStatusLink is location value returned by the Begin operation. +func (client NamespacesClient) GetLongRunningOperationStatus(operationStatusLink string) (result autorest.Response, err error) { + req, err := client.GetLongRunningOperationStatusPreparer(operationStatusLink) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "GetLongRunningOperationStatus", nil, "Failure preparing request") + } + + resp, err := client.GetLongRunningOperationStatusSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "GetLongRunningOperationStatus", resp, "Failure sending request") + } + + result, err = client.GetLongRunningOperationStatusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "GetLongRunningOperationStatus", resp, "Failure responding to request") + } + + return +} + +// GetLongRunningOperationStatusPreparer prepares the GetLongRunningOperationStatus request. +func (client NamespacesClient) GetLongRunningOperationStatusPreparer(operationStatusLink string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "operationStatusLink": operationStatusLink, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{operationStatusLink}", pathParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetLongRunningOperationStatusSender sends the GetLongRunningOperationStatus request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) GetLongRunningOperationStatusSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetLongRunningOperationStatusResponder handles the response to the GetLongRunningOperationStatus request. The method always +// closes the http.Response Body. +func (client NamespacesClient) GetLongRunningOperationStatusResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNotFound, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// List lists the available namespaces within a resourceGroup. +// +// resourceGroupName is the name of the resource group. If resourceGroupName +// value is null the method lists all the namespaces within subscription +func (client NamespacesClient) List(resourceGroupName string) (result NamespaceListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client NamespacesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListResponder(resp *http.Response) (result NamespaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client NamespacesClient) ListNextResults(lastResults NamespaceListResult) (result NamespaceListResult, err error) { + req, err := lastResults.NamespaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListAll lists all the available namespaces within the subscription +// irrespective of the resourceGroups. +func (client NamespacesClient) ListAll() (result NamespaceListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client NamespacesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.NotificationHubs/namespaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListAllResponder(resp *http.Response) (result NamespaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client NamespacesClient) ListAllNextResults(lastResults NamespaceListResult) (result NamespaceListResult, err error) { + req, err := lastResults.NamespaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAll", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAll", resp, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAll", resp, "Failure responding to next results request request") + } + + return +} + +// ListAuthorizationRules gets the authorization rules for a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name +func (client NamespacesClient) ListAuthorizationRules(resourceGroupName string, namespaceName string) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := client.ListAuthorizationRulesPreparer(resourceGroupName, namespaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAuthorizationRules", nil, "Failure preparing request") + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAuthorizationRules", resp, "Failure sending request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAuthorizationRules", resp, "Failure responding to request") + } + + return +} + +// ListAuthorizationRulesPreparer prepares the ListAuthorizationRules request. +func (client NamespacesClient) ListAuthorizationRulesPreparer(resourceGroupName string, namespaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/AuthorizationRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAuthorizationRulesSender sends the ListAuthorizationRules request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListAuthorizationRulesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAuthorizationRulesResponder handles the response to the ListAuthorizationRules request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListAuthorizationRulesResponder(resp *http.Response) (result SharedAccessAuthorizationRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAuthorizationRulesNextResults retrieves the next set of results, if any. +func (client NamespacesClient) ListAuthorizationRulesNextResults(lastResults SharedAccessAuthorizationRuleListResult) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := lastResults.SharedAccessAuthorizationRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAuthorizationRules", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAuthorizationRules", resp, "Failure sending next results request request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListAuthorizationRules", resp, "Failure responding to next results request request") + } + + return +} + +// ListKeys gets the Primary and Secondary ConnectionStrings to the namespace +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. authorizationRuleName is the connection string of the +// namespace for the specified authorizationRule. +func (client NamespacesClient) ListKeys(resourceGroupName string, namespaceName string, authorizationRuleName string) (result ResourceListKeys, err error) { + req, err := client.ListKeysPreparer(resourceGroupName, namespaceName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListKeys", nil, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListKeys", resp, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.NamespacesClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client NamespacesClient) ListKeysPreparer(resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListKeysResponder(resp *http.Response) (result ResourceListKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/notificationhubs.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/notificationhubs.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/notificationhubs.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/notificationhubs.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,811 @@ +package notificationhubs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// Client is the azure NotificationHub client +type Client struct { + ManagementClient +} + +// NewClient creates an instance of the Client client. +func NewClient(subscriptionID string) Client { + return NewClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewClientWithBaseURI creates an instance of the Client client. +func NewClientWithBaseURI(baseURI string, subscriptionID string) Client { + return Client{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckAvailability checks the availability of the given notificationHub in a +// namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. parameters is the notificationHub name. +func (client Client) CheckAvailability(resourceGroupName string, namespaceName string, parameters CheckAvailabilityParameters) (result CheckAvailabilityResource, err error) { + req, err := client.CheckAvailabilityPreparer(resourceGroupName, namespaceName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "CheckAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "CheckAvailability", resp, "Failure sending request") + } + + result, err = client.CheckAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "CheckAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckAvailabilityPreparer prepares the CheckAvailability request. +func (client Client) CheckAvailabilityPreparer(resourceGroupName string, namespaceName string, parameters CheckAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/checkNotificationHubAvailability", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckAvailabilitySender sends the CheckAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CheckAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckAvailabilityResponder handles the response to the CheckAvailability request. The method always +// closes the http.Response Body. +func (client Client) CheckAvailabilityResponder(resp *http.Response) (result CheckAvailabilityResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate creates/Update a NotificationHub in a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +// parameters is parameters supplied to the create/update a NotificationHub +// Resource. +func (client Client) CreateOrUpdate(resourceGroupName string, namespaceName string, notificationHubName string, parameters NotificationHubCreateOrUpdateParameters) (result NotificationHubResource, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, namespaceName, notificationHubName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client Client) CreateOrUpdatePreparer(resourceGroupName string, namespaceName string, notificationHubName string, parameters NotificationHubCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client Client) CreateOrUpdateResponder(resp *http.Response) (result NotificationHubResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateAuthorizationRule creates/Updates an authorization rule for a +// NotificationHub +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +// authorizationRuleName is authorization Rule Name. parameters is the shared +// access authorization rule. +func (client Client) CreateOrUpdateAuthorizationRule(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.CreateOrUpdateAuthorizationRulePreparer(resourceGroupName, namespaceName, notificationHubName, authorizationRuleName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "CreateOrUpdateAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "CreateOrUpdateAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "CreateOrUpdateAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateAuthorizationRulePreparer prepares the CreateOrUpdateAuthorizationRule request. +func (client Client) CreateOrUpdateAuthorizationRulePreparer(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateAuthorizationRuleSender sends the CreateOrUpdateAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CreateOrUpdateAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateAuthorizationRuleResponder handles the response to the CreateOrUpdateAuthorizationRule request. The method always +// closes the http.Response Body. +func (client Client) CreateOrUpdateAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a notification hub associated with a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +func (client Client) Delete(resourceGroupName string, namespaceName string, notificationHubName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, namespaceName, notificationHubName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client Client) DeletePreparer(resourceGroupName string, namespaceName string, notificationHubName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client Client) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client Client) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAuthorizationRule deletes a notificationHub authorization rule +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +// authorizationRuleName is authorization Rule Name. +func (client Client) DeleteAuthorizationRule(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (result autorest.Response, err error) { + req, err := client.DeleteAuthorizationRulePreparer(resourceGroupName, namespaceName, notificationHubName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "DeleteAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.DeleteAuthorizationRuleSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "DeleteAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.DeleteAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "DeleteAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// DeleteAuthorizationRulePreparer prepares the DeleteAuthorizationRule request. +func (client Client) DeleteAuthorizationRulePreparer(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAuthorizationRuleSender sends the DeleteAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client Client) DeleteAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAuthorizationRuleResponder handles the response to the DeleteAuthorizationRule request. The method always +// closes the http.Response Body. +func (client Client) DeleteAuthorizationRuleResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get lists the notification hubs associated with a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +func (client Client) Get(resourceGroupName string, namespaceName string, notificationHubName string) (result NotificationHubResource, err error) { + req, err := client.GetPreparer(resourceGroupName, namespaceName, notificationHubName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client Client) GetPreparer(resourceGroupName string, namespaceName string, notificationHubName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client Client) GetResponder(resp *http.Response) (result NotificationHubResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAuthorizationRule gets an authorization rule for a NotificationHub by +// name. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name notificationHubName is the notification hub name. +// authorizationRuleName is authorization rule name. +func (client Client) GetAuthorizationRule(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.GetAuthorizationRulePreparer(resourceGroupName, namespaceName, notificationHubName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "GetAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.GetAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "GetAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.GetAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "GetAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// GetAuthorizationRulePreparer prepares the GetAuthorizationRule request. +func (client Client) GetAuthorizationRulePreparer(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAuthorizationRuleSender sends the GetAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAuthorizationRuleResponder handles the response to the GetAuthorizationRule request. The method always +// closes the http.Response Body. +func (client Client) GetAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetPnsCredentials lists the PNS Credentials associated with a notification +// hub . +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +func (client Client) GetPnsCredentials(resourceGroupName string, namespaceName string, notificationHubName string) (result NotificationHubResource, err error) { + req, err := client.GetPnsCredentialsPreparer(resourceGroupName, namespaceName, notificationHubName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "GetPnsCredentials", nil, "Failure preparing request") + } + + resp, err := client.GetPnsCredentialsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "GetPnsCredentials", resp, "Failure sending request") + } + + result, err = client.GetPnsCredentialsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "GetPnsCredentials", resp, "Failure responding to request") + } + + return +} + +// GetPnsCredentialsPreparer prepares the GetPnsCredentials request. +func (client Client) GetPnsCredentialsPreparer(resourceGroupName string, namespaceName string, notificationHubName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/pnsCredentials", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetPnsCredentialsSender sends the GetPnsCredentials request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetPnsCredentialsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetPnsCredentialsResponder handles the response to the GetPnsCredentials request. The method always +// closes the http.Response Body. +func (client Client) GetPnsCredentialsResponder(resp *http.Response) (result NotificationHubResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the notification hubs associated with a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. +func (client Client) List(resourceGroupName string, namespaceName string) (result NotificationHubListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, namespaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client Client) ListPreparer(resourceGroupName string, namespaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client Client) ListResponder(resp *http.Response) (result NotificationHubListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client Client) ListNextResults(lastResults NotificationHubListResult) (result NotificationHubListResult, err error) { + req, err := lastResults.NotificationHubListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListAuthorizationRules gets the authorization rules for a NotificationHub. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name notificationHubName is the notification hub name. +func (client Client) ListAuthorizationRules(resourceGroupName string, namespaceName string, notificationHubName string) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := client.ListAuthorizationRulesPreparer(resourceGroupName, namespaceName, notificationHubName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "ListAuthorizationRules", nil, "Failure preparing request") + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "ListAuthorizationRules", resp, "Failure sending request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "ListAuthorizationRules", resp, "Failure responding to request") + } + + return +} + +// ListAuthorizationRulesPreparer prepares the ListAuthorizationRules request. +func (client Client) ListAuthorizationRulesPreparer(resourceGroupName string, namespaceName string, notificationHubName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAuthorizationRulesSender sends the ListAuthorizationRules request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListAuthorizationRulesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAuthorizationRulesResponder handles the response to the ListAuthorizationRules request. The method always +// closes the http.Response Body. +func (client Client) ListAuthorizationRulesResponder(resp *http.Response) (result SharedAccessAuthorizationRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAuthorizationRulesNextResults retrieves the next set of results, if any. +func (client Client) ListAuthorizationRulesNextResults(lastResults SharedAccessAuthorizationRuleListResult) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := lastResults.SharedAccessAuthorizationRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "ListAuthorizationRules", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "ListAuthorizationRules", resp, "Failure sending next results request request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "ListAuthorizationRules", resp, "Failure responding to next results request request") + } + + return +} + +// ListKeys gets the Primary and Secondary ConnectionStrings to the +// NotificationHub +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. notificationHubName is the notification hub name. +// authorizationRuleName is the connection string of the NotificationHub for +// the specified authorizationRule. +func (client Client) ListKeys(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (result ResourceListKeys, err error) { + req, err := client.ListKeysPreparer(resourceGroupName, namespaceName, notificationHubName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "ListKeys", nil, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "notificationhubs.Client", "ListKeys", resp, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "notificationhubs.Client", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client Client) ListKeysPreparer(resourceGroupName string, namespaceName string, notificationHubName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "notificationHubName": autorest.Encode("path", notificationHubName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/notificationHubs/{notificationHubName}/AuthorizationRules/{authorizationRuleName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client Client) ListKeysResponder(resp *http.Response) (result ResourceListKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/notificationhubs/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package notificationhubs + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "notificationhubs", "2014-09-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,116 @@ +// Package powerbiembedded implements the Azure ARM Powerbiembedded service +// API version 2016-01-29. +// +// Client to manage your Power BI embedded workspace collections and retrieve +// workspaces. +package powerbiembedded + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +const ( + // APIVersion is the version of the Powerbiembedded + APIVersion = "2016-01-29" + + // DefaultBaseURI is the default URI used for the service Powerbiembedded + DefaultBaseURI = "http://management.azure.com" +) + +// ManagementClient is the base client for Powerbiembedded. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} + +// GetAvailableOperations indicates which operations can be performed by the +// Power BI Resource Provider. +func (client ManagementClient) GetAvailableOperations() (result OperationList, err error) { + req, err := client.GetAvailableOperationsPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.ManagementClient", "GetAvailableOperations", nil, "Failure preparing request") + } + + resp, err := client.GetAvailableOperationsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "powerbiembedded.ManagementClient", "GetAvailableOperations", resp, "Failure sending request") + } + + result, err = client.GetAvailableOperationsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.ManagementClient", "GetAvailableOperations", resp, "Failure responding to request") + } + + return +} + +// GetAvailableOperationsPreparer prepares the GetAvailableOperations request. +func (client ManagementClient) GetAvailableOperationsPreparer() (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.PowerBI/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAvailableOperationsSender sends the GetAvailableOperations request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetAvailableOperationsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAvailableOperationsResponder handles the response to the GetAvailableOperations request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetAvailableOperationsResponder(resp *http.Response) (result OperationList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,162 @@ +package powerbiembedded + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// AccessKeyName enumerates the values for access key name. +type AccessKeyName string + +const ( + // Key1 specifies the key 1 state for access key name. + Key1 AccessKeyName = "key1" + // Key2 specifies the key 2 state for access key name. + Key2 AccessKeyName = "key2" +) + +// CheckNameReason enumerates the values for check name reason. +type CheckNameReason string + +const ( + // Invalid specifies the invalid state for check name reason. + Invalid CheckNameReason = "Invalid" + // Unavailable specifies the unavailable state for check name reason. + Unavailable CheckNameReason = "Unavailable" +) + +// AzureSku is +type AzureSku struct { + Name *string `json:"name,omitempty"` + Tier *string `json:"tier,omitempty"` +} + +// CheckNameRequest is +type CheckNameRequest struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// CheckNameResponse is +type CheckNameResponse struct { + autorest.Response `json:"-"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason CheckNameReason `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// CreateWorkspaceCollectionRequest is +type CreateWorkspaceCollectionRequest struct { + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Sku *AzureSku `json:"sku,omitempty"` +} + +// Display is +type Display struct { + Provider *string `json:"provider,omitempty"` + Resource *string `json:"resource,omitempty"` + Operation *string `json:"operation,omitempty"` + Description *string `json:"description,omitempty"` + Origin *string `json:"origin,omitempty"` +} + +// Error is +type Error struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` + Details *[]ErrorDetail `json:"details,omitempty"` +} + +// ErrorDetail is +type ErrorDetail struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` +} + +// MigrateWorkspaceCollectionRequest is +type MigrateWorkspaceCollectionRequest struct { + TargetResourceGroup *string `json:"targetResourceGroup,omitempty"` + Resources *[]string `json:"resources,omitempty"` +} + +// Operation is +type Operation struct { + Name *string `json:"name,omitempty"` + Display *Display `json:"display,omitempty"` +} + +// OperationList is +type OperationList struct { + autorest.Response `json:"-"` + Value *[]Operation `json:"value,omitempty"` +} + +// UpdateWorkspaceCollectionRequest is +type UpdateWorkspaceCollectionRequest struct { + Tags *map[string]*string `json:"tags,omitempty"` + Sku *AzureSku `json:"sku,omitempty"` +} + +// Workspace is +type Workspace struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties *map[string]interface{} `json:"properties,omitempty"` +} + +// WorkspaceCollection is +type WorkspaceCollection struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Sku *AzureSku `json:"sku,omitempty"` + Properties *map[string]interface{} `json:"properties,omitempty"` +} + +// WorkspaceCollectionAccessKey is +type WorkspaceCollectionAccessKey struct { + KeyName AccessKeyName `json:"keyName,omitempty"` +} + +// WorkspaceCollectionAccessKeys is +type WorkspaceCollectionAccessKeys struct { + autorest.Response `json:"-"` + Key1 *string `json:"key1,omitempty"` + Key2 *string `json:"key2,omitempty"` +} + +// WorkspaceCollectionList is +type WorkspaceCollectionList struct { + autorest.Response `json:"-"` + Value *[]WorkspaceCollection `json:"value,omitempty"` +} + +// WorkspaceList is +type WorkspaceList struct { + autorest.Response `json:"-"` + Value *[]Workspace `json:"value,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package powerbiembedded + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "powerbiembedded", "2016-01-29") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/workspacecollections.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/workspacecollections.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/workspacecollections.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/workspacecollections.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,688 @@ +package powerbiembedded + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// WorkspaceCollectionsClient is the client to manage your Power BI embedded +// workspace collections and retrieve workspaces. +type WorkspaceCollectionsClient struct { + ManagementClient +} + +// NewWorkspaceCollectionsClient creates an instance of the +// WorkspaceCollectionsClient client. +func NewWorkspaceCollectionsClient(subscriptionID string) WorkspaceCollectionsClient { + return NewWorkspaceCollectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkspaceCollectionsClientWithBaseURI creates an instance of the +// WorkspaceCollectionsClient client. +func NewWorkspaceCollectionsClientWithBaseURI(baseURI string, subscriptionID string) WorkspaceCollectionsClient { + return WorkspaceCollectionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability check that the specified Power BI Workspace +// Collection name is valid and not in use. +// +// location is azure location body is check name availability request +func (client WorkspaceCollectionsClient) CheckNameAvailability(location string, body CheckNameRequest) (result CheckNameResponse, err error) { + req, err := client.CheckNameAvailabilityPreparer(location, body) + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "CheckNameAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "CheckNameAvailability", resp, "Failure sending request") + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "CheckNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client WorkspaceCollectionsClient) CheckNameAvailabilityPreparer(location string, body CheckNameRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.PowerBI/locations/{location}/checkNameAvailability", pathParameters), + autorest.WithJSON(body), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceCollectionsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client WorkspaceCollectionsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Create creates a new Power BI Workspace Collection with the specified +// properties. A Power BI Workspace Collection contains one or more Power BI +// Workspaces and can be used to provision keys that provide API access to +// those Power BI Workspaces. +// +// resourceGroupName is azure resource group workspaceCollectionName is power +// BI Embedded workspace collection name body is create workspace collection +// request +func (client WorkspaceCollectionsClient) Create(resourceGroupName string, workspaceCollectionName string, body CreateWorkspaceCollectionRequest) (result WorkspaceCollection, err error) { + req, err := client.CreatePreparer(resourceGroupName, workspaceCollectionName, body) + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client WorkspaceCollectionsClient) CreatePreparer(resourceGroupName string, workspaceCollectionName string, body CreateWorkspaceCollectionRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceCollectionName": autorest.Encode("path", workspaceCollectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}", pathParameters), + autorest.WithJSON(body), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceCollectionsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client WorkspaceCollectionsClient) CreateResponder(resp *http.Response) (result WorkspaceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a Power BI Workspace Collection. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is azure resource group workspaceCollectionName is power +// BI Embedded workspace collection name +func (client WorkspaceCollectionsClient) Delete(resourceGroupName string, workspaceCollectionName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, workspaceCollectionName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client WorkspaceCollectionsClient) DeletePreparer(resourceGroupName string, workspaceCollectionName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceCollectionName": autorest.Encode("path", workspaceCollectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceCollectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client WorkspaceCollectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetAccessKeys retrieves the primary and secondary access keys for the +// specified Power BI Workspace Collection. +// +// resourceGroupName is azure resource group workspaceCollectionName is power +// BI Embedded workspace collection name +func (client WorkspaceCollectionsClient) GetAccessKeys(resourceGroupName string, workspaceCollectionName string) (result WorkspaceCollectionAccessKeys, err error) { + req, err := client.GetAccessKeysPreparer(resourceGroupName, workspaceCollectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "GetAccessKeys", nil, "Failure preparing request") + } + + resp, err := client.GetAccessKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "GetAccessKeys", resp, "Failure sending request") + } + + result, err = client.GetAccessKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "GetAccessKeys", resp, "Failure responding to request") + } + + return +} + +// GetAccessKeysPreparer prepares the GetAccessKeys request. +func (client WorkspaceCollectionsClient) GetAccessKeysPreparer(resourceGroupName string, workspaceCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceCollectionName": autorest.Encode("path", workspaceCollectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAccessKeysSender sends the GetAccessKeys request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceCollectionsClient) GetAccessKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAccessKeysResponder handles the response to the GetAccessKeys request. The method always +// closes the http.Response Body. +func (client WorkspaceCollectionsClient) GetAccessKeysResponder(resp *http.Response) (result WorkspaceCollectionAccessKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetByName retrieves an existing Power BI Workspace Collection. +// +// resourceGroupName is azure resource group workspaceCollectionName is power +// BI Embedded workspace collection name +func (client WorkspaceCollectionsClient) GetByName(resourceGroupName string, workspaceCollectionName string) (result WorkspaceCollection, err error) { + req, err := client.GetByNamePreparer(resourceGroupName, workspaceCollectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "GetByName", nil, "Failure preparing request") + } + + resp, err := client.GetByNameSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "GetByName", resp, "Failure sending request") + } + + result, err = client.GetByNameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "GetByName", resp, "Failure responding to request") + } + + return +} + +// GetByNamePreparer prepares the GetByName request. +func (client WorkspaceCollectionsClient) GetByNamePreparer(resourceGroupName string, workspaceCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceCollectionName": autorest.Encode("path", workspaceCollectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetByNameSender sends the GetByName request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceCollectionsClient) GetByNameSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetByNameResponder handles the response to the GetByName request. The method always +// closes the http.Response Body. +func (client WorkspaceCollectionsClient) GetByNameResponder(resp *http.Response) (result WorkspaceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup retrieves all existing Power BI Workspace Collections +// in the specified resource group. +// +// resourceGroupName is azure resource group +func (client WorkspaceCollectionsClient) ListByResourceGroup(resourceGroupName string) (result WorkspaceCollectionList, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "ListByResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "ListByResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client WorkspaceCollectionsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceCollectionsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client WorkspaceCollectionsClient) ListByResourceGroupResponder(resp *http.Response) (result WorkspaceCollectionList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscription retrieves all existing Power BI Workspace Collections in +// the specified subscription. +func (client WorkspaceCollectionsClient) ListBySubscription() (result WorkspaceCollectionList, err error) { + req, err := client.ListBySubscriptionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "ListBySubscription", nil, "Failure preparing request") + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "ListBySubscription", resp, "Failure sending request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "ListBySubscription", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client WorkspaceCollectionsClient) ListBySubscriptionPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.PowerBI/workspaceCollections", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceCollectionsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client WorkspaceCollectionsClient) ListBySubscriptionResponder(resp *http.Response) (result WorkspaceCollectionList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Migrate migrates an existing Power BI Workspace Collection to a different +// resource group and/or subscription. +// +// resourceGroupName is azure resource group body is workspace migration +// request +func (client WorkspaceCollectionsClient) Migrate(resourceGroupName string, body MigrateWorkspaceCollectionRequest) (result autorest.Response, err error) { + req, err := client.MigratePreparer(resourceGroupName, body) + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Migrate", nil, "Failure preparing request") + } + + resp, err := client.MigrateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Migrate", resp, "Failure sending request") + } + + result, err = client.MigrateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Migrate", resp, "Failure responding to request") + } + + return +} + +// MigratePreparer prepares the Migrate request. +func (client WorkspaceCollectionsClient) MigratePreparer(resourceGroupName string, body MigrateWorkspaceCollectionRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/moveResources", pathParameters), + autorest.WithJSON(body), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// MigrateSender sends the Migrate request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceCollectionsClient) MigrateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// MigrateResponder handles the response to the Migrate request. The method always +// closes the http.Response Body. +func (client WorkspaceCollectionsClient) MigrateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// RegenerateKey regenerates the primary or secondary access key for the +// specified Power BI Workspace Collection. +// +// resourceGroupName is azure resource group workspaceCollectionName is power +// BI Embedded workspace collection name body is access key to regenerate +func (client WorkspaceCollectionsClient) RegenerateKey(resourceGroupName string, workspaceCollectionName string, body WorkspaceCollectionAccessKey) (result WorkspaceCollectionAccessKeys, err error) { + req, err := client.RegenerateKeyPreparer(resourceGroupName, workspaceCollectionName, body) + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "RegenerateKey", nil, "Failure preparing request") + } + + resp, err := client.RegenerateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "RegenerateKey", resp, "Failure sending request") + } + + result, err = client.RegenerateKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "RegenerateKey", resp, "Failure responding to request") + } + + return +} + +// RegenerateKeyPreparer prepares the RegenerateKey request. +func (client WorkspaceCollectionsClient) RegenerateKeyPreparer(resourceGroupName string, workspaceCollectionName string, body WorkspaceCollectionAccessKey) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceCollectionName": autorest.Encode("path", workspaceCollectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}/regenerateKey", pathParameters), + autorest.WithJSON(body), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegenerateKeySender sends the RegenerateKey request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceCollectionsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always +// closes the http.Response Body. +func (client WorkspaceCollectionsClient) RegenerateKeyResponder(resp *http.Response) (result WorkspaceCollectionAccessKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update update an existing Power BI Workspace Collection with the specified +// properties. +// +// resourceGroupName is azure resource group workspaceCollectionName is power +// BI Embedded workspace collection name body is update workspace collection +// request +func (client WorkspaceCollectionsClient) Update(resourceGroupName string, workspaceCollectionName string, body UpdateWorkspaceCollectionRequest) (result WorkspaceCollection, err error) { + req, err := client.UpdatePreparer(resourceGroupName, workspaceCollectionName, body) + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.WorkspaceCollectionsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client WorkspaceCollectionsClient) UpdatePreparer(resourceGroupName string, workspaceCollectionName string, body UpdateWorkspaceCollectionRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceCollectionName": autorest.Encode("path", workspaceCollectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}", pathParameters), + autorest.WithJSON(body), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspaceCollectionsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client WorkspaceCollectionsClient) UpdateResponder(resp *http.Response) (result WorkspaceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/workspaces.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/workspaces.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/workspaces.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/powerbiembedded/workspaces.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,106 @@ +package powerbiembedded + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// WorkspacesClient is the client to manage your Power BI embedded workspace +// collections and retrieve workspaces. +type WorkspacesClient struct { + ManagementClient +} + +// NewWorkspacesClient creates an instance of the WorkspacesClient client. +func NewWorkspacesClient(subscriptionID string) WorkspacesClient { + return NewWorkspacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkspacesClientWithBaseURI creates an instance of the WorkspacesClient +// client. +func NewWorkspacesClientWithBaseURI(baseURI string, subscriptionID string) WorkspacesClient { + return WorkspacesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List retrieves all existing Power BI Workspaces in the specified Workspace +// Collection. +// +// resourceGroupName is azure resource group workspaceCollectionName is power +// BI Embedded workspace collection name +func (client WorkspacesClient) List(resourceGroupName string, workspaceCollectionName string) (result WorkspaceList, err error) { + req, err := client.ListPreparer(resourceGroupName, workspaceCollectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspacesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "powerbiembedded.WorkspacesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "powerbiembedded.WorkspacesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client WorkspacesClient) ListPreparer(resourceGroupName string, workspaceCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workspaceCollectionName": autorest.Encode("path", workspaceCollectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.PowerBI/workspaceCollections/{workspaceCollectionName}/workspaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client WorkspacesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client WorkspacesClient) ListResponder(resp *http.Response) (result WorkspaceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/README.md juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/README.md --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/README.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/README.md 2016-10-13 14:32:06.000000000 +0000 @@ -118,7 +118,7 @@ ac := storage.NewAccountsClient(c["subscriptionID"]) - spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.AzureResourceManagerScope) + spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.PublicCloud.ResourceManagerEndpoint) if err != nil { log.Fatalf("Error: %v", err) } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/redis/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/redis/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/redis/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/redis/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -1,3 +1,6 @@ +// Package redis implements the Azure ARM Redis service API version 2016-04-01. +// +// REST API for Azure Redis Cache Service package redis // Copyright (c) Microsoft and contributors. All rights reserved. @@ -14,29 +17,29 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) const ( // APIVersion is the version of the Redis - APIVersion = "2015-08-01" + APIVersion = "2016-04-01" // DefaultBaseURI is the default URI used for the service Redis DefaultBaseURI = "https://management.azure.com" ) -// ManagementClient is the .Net client wrapper for the REST API for Azure -// Redis Cache Management Service +// ManagementClient is the base client for Redis. type ManagementClient struct { autorest.Client BaseURI string + APIVersion string SubscriptionID string } @@ -50,6 +53,7 @@ return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, + APIVersion: APIVersion, SubscriptionID: subscriptionID, } } @@ -60,21 +64,21 @@ // resourceGroupName is the name of the resource group. name is the name of // the redis cache. parameters is parameters supplied to the CreateOrUpdate // redis operation. -func (client ManagementClient) CreateOrUpdate(resourceGroupName string, name string, parameters CreateOrUpdateParameters) (result ResourceWithAccessKey, ae error) { +func (client ManagementClient) CreateOrUpdate(resourceGroupName string, name string, parameters CreateOrUpdateParameters) (result ResourceWithAccessKey, err error) { req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "CreateOrUpdate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/ManagementClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "CreateOrUpdate", resp, "Failure responding to request") } return @@ -83,29 +87,29 @@ // CreateOrUpdatePreparer prepares the CreateOrUpdate request. func (client ManagementClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters CreateOrUpdateParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ManagementClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -114,7 +118,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -125,21 +129,21 @@ // // resourceGroupName is the name of the resource group. name is the name of // the redis cache. -func (client ManagementClient) Delete(resourceGroupName string, name string) (result autorest.Response, ae error) { +func (client ManagementClient) Delete(resourceGroupName string, name string) (result autorest.Response, err error) { req, err := client.DeletePreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/ManagementClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "Delete", resp, "Failure responding to request") } return @@ -148,28 +152,27 @@ // DeletePreparer prepares the Delete request. func (client ManagementClient) DeletePreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ManagementClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNotFound) + return autorest.SendWithSender(client, req) } // DeleteResponder handles the response to the Delete request. The method always @@ -178,7 +181,140 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNotFound), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Export import data into redis cache. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. name is the name of +// the redis cache. parameters is parameters for redis export operation. +func (client ManagementClient) Export(resourceGroupName string, name string, parameters ExportRDBParameters, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ExportPreparer(resourceGroupName, name, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "Export", nil, "Failure preparing request") + } + + resp, err := client.ExportSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "Export", resp, "Failure sending request") + } + + result, err = client.ExportResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "Export", resp, "Failure responding to request") + } + + return +} + +// ExportPreparer prepares the Export request. +func (client ManagementClient) ExportPreparer(resourceGroupName string, name string, parameters ExportRDBParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/export", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ExportSender sends the Export request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ExportSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ExportResponder handles the response to the Export request. The method always +// closes the http.Response Body. +func (client ManagementClient) ExportResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// ForceReboot reboot specified redis node(s). This operation requires write +// permission to the cache resource. There can be potential data loss. +// +// resourceGroupName is the name of the resource group. name is the name of +// the redis cache. parameters is specifies which redis node(s) to reboot. +func (client ManagementClient) ForceReboot(resourceGroupName string, name string, parameters RebootParameters) (result autorest.Response, err error) { + req, err := client.ForceRebootPreparer(resourceGroupName, name, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "ForceReboot", nil, "Failure preparing request") + } + + resp, err := client.ForceRebootSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "ForceReboot", resp, "Failure sending request") + } + + result, err = client.ForceRebootResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "ForceReboot", resp, "Failure responding to request") + } + + return +} + +// ForceRebootPreparer prepares the ForceReboot request. +func (client ManagementClient) ForceRebootPreparer(resourceGroupName string, name string, parameters RebootParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/forceReboot", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ForceRebootSender sends the ForceReboot request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ForceRebootSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ForceRebootResponder handles the response to the ForceReboot request. The method always +// closes the http.Response Body. +func (client ManagementClient) ForceRebootResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -188,21 +324,21 @@ // // resourceGroupName is the name of the resource group. name is the name of // the redis cache. -func (client ManagementClient) Get(resourceGroupName string, name string) (result ResourceType, ae error) { +func (client ManagementClient) Get(resourceGroupName string, name string) (result ResourceType, err error) { req, err := client.GetPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/ManagementClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "Get", resp, "Failure responding to request") } return @@ -211,28 +347,27 @@ // GetPreparer prepares the Get request. func (client ManagementClient) GetPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -241,29 +376,97 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } +// Import import data into redis cache. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. name is the name of +// the redis cache. parameters is parameters for redis import operation. +func (client ManagementClient) Import(resourceGroupName string, name string, parameters ImportRDBParameters, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ImportPreparer(resourceGroupName, name, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "Import", nil, "Failure preparing request") + } + + resp, err := client.ImportSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "Import", resp, "Failure sending request") + } + + result, err = client.ImportResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "Import", resp, "Failure responding to request") + } + + return +} + +// ImportPreparer prepares the Import request. +func (client ManagementClient) ImportPreparer(resourceGroupName string, name string, parameters ImportRDBParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/import", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ImportSender sends the Import request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ImportSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ImportResponder handles the response to the Import request. The method always +// closes the http.Response Body. +func (client ManagementClient) ImportResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + // List gets all redis caches in the specified subscription. -func (client ManagementClient) List() (result ListResult, ae error) { +func (client ManagementClient) List() (result ListResult, err error) { req, err := client.ListPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/ManagementClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "List", resp, "Failure responding to request") } return @@ -272,26 +475,25 @@ // ListPreparer prepares the List request. func (client ManagementClient) ListPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Cache/Redis/"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Cache/Redis/", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -300,7 +502,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -308,10 +510,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client ManagementClient) ListNextResults(lastResults ListResult) (result ListResult, ae error) { +func (client ManagementClient) ListNextResults(lastResults ListResult) (result ListResult, err error) { req, err := lastResults.ListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -320,12 +522,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/ManagementClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "List", resp, "Failure responding to next results request request") } return @@ -334,21 +536,21 @@ // ListByResourceGroup gets all redis caches in a resource group. // // resourceGroupName is the name of the resource group. -func (client ManagementClient) ListByResourceGroup(resourceGroupName string) (result ListResult, ae error) { +func (client ManagementClient) ListByResourceGroup(resourceGroupName string) (result ListResult, err error) { req, err := client.ListByResourceGroupPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "ListByResourceGroup", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "ListByResourceGroup", nil, "Failure preparing request") } resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "ListByResourceGroup", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "ListByResourceGroup", resp, "Failure sending request") } result, err = client.ListByResourceGroupResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/ManagementClient", "ListByResourceGroup", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "ListByResourceGroup", resp, "Failure responding to request") } return @@ -357,27 +559,26 @@ // ListByResourceGroupPreparer prepares the ListByResourceGroup request. func (client ManagementClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client ManagementClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -386,7 +587,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -394,10 +595,10 @@ } // ListByResourceGroupNextResults retrieves the next set of results, if any. -func (client ManagementClient) ListByResourceGroupNextResults(lastResults ListResult) (result ListResult, ae error) { +func (client ManagementClient) ListByResourceGroupNextResults(lastResults ListResult) (result ListResult, err error) { req, err := lastResults.ListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "ListByResourceGroup", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "ListByResourceGroup", nil, "Failure preparing next results request request") } if req == nil { return @@ -406,12 +607,12 @@ resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "ListByResourceGroup", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "ListByResourceGroup", resp, "Failure sending next results request request") } result, err = client.ListByResourceGroupResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/ManagementClient", "ListByResourceGroup", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "ListByResourceGroup", resp, "Failure responding to next results request request") } return @@ -422,21 +623,21 @@ // // resourceGroupName is the name of the resource group. name is the name of // the redis cache. -func (client ManagementClient) ListKeys(resourceGroupName string, name string) (result ListKeysResult, ae error) { +func (client ManagementClient) ListKeys(resourceGroupName string, name string) (result ListKeysResult, err error) { req, err := client.ListKeysPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "ListKeys", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "ListKeys", nil, "Failure preparing request") } resp, err := client.ListKeysSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "ListKeys", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "ListKeys", resp, "Failure sending request") } result, err = client.ListKeysResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/ManagementClient", "ListKeys", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "ListKeys", resp, "Failure responding to request") } return @@ -445,28 +646,27 @@ // ListKeysPreparer prepares the ListKeys request. func (client ManagementClient) ListKeysPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/listKeys"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/listKeys", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListKeysSender sends the ListKeys request. The method will close the // http.Response Body if it receives an error. func (client ManagementClient) ListKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListKeysResponder handles the response to the ListKeys request. The method always @@ -475,7 +675,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -487,21 +687,21 @@ // // resourceGroupName is the name of the resource group. name is the name of // the redis cache. parameters is specifies which key to reset. -func (client ManagementClient) RegenerateKey(resourceGroupName string, name string, parameters RegenerateKeyParameters) (result ListKeysResult, ae error) { +func (client ManagementClient) RegenerateKey(resourceGroupName string, name string, parameters RegenerateKeyParameters) (result ListKeysResult, err error) { req, err := client.RegenerateKeyPreparer(resourceGroupName, name, parameters) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "RegenerateKey", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "RegenerateKey", nil, "Failure preparing request") } resp, err := client.RegenerateKeySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/ManagementClient", "RegenerateKey", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.ManagementClient", "RegenerateKey", resp, "Failure sending request") } result, err = client.RegenerateKeyResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/ManagementClient", "RegenerateKey", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.ManagementClient", "RegenerateKey", resp, "Failure responding to request") } return @@ -510,29 +710,29 @@ // RegenerateKeyPreparer prepares the RegenerateKey request. func (client ManagementClient) RegenerateKeyPreparer(resourceGroupName string, name string, parameters RegenerateKeyParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/regenerateKey"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/regenerateKey", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // RegenerateKeySender sends the RegenerateKey request. The method will close the // http.Response Body if it receives an error. func (client ManagementClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // RegenerateKeyResponder handles the response to the RegenerateKey request. The method always @@ -541,7 +741,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/redis/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/redis/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/redis/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/redis/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,16 +14,36 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" "net/http" ) +// DayOfWeek enumerates the values for day of week. +type DayOfWeek string + +const ( + // Friday specifies the friday state for day of week. + Friday DayOfWeek = "Friday" + // Monday specifies the monday state for day of week. + Monday DayOfWeek = "Monday" + // Saturday specifies the saturday state for day of week. + Saturday DayOfWeek = "Saturday" + // Sunday specifies the sunday state for day of week. + Sunday DayOfWeek = "Sunday" + // Thursday specifies the thursday state for day of week. + Thursday DayOfWeek = "Thursday" + // Tuesday specifies the tuesday state for day of week. + Tuesday DayOfWeek = "Tuesday" + // Wednesday specifies the wednesday state for day of week. + Wednesday DayOfWeek = "Wednesday" +) + // KeyType enumerates the values for key type. type KeyType string @@ -34,6 +54,18 @@ Secondary KeyType = "Secondary" ) +// RebootType enumerates the values for reboot type. +type RebootType string + +const ( + // AllNodes specifies the all nodes state for reboot type. + AllNodes RebootType = "AllNodes" + // PrimaryNode specifies the primary node state for reboot type. + PrimaryNode RebootType = "PrimaryNode" + // SecondaryNode specifies the secondary node state for reboot type. + SecondaryNode RebootType = "SecondaryNode" +) + // SkuFamily enumerates the values for sku family. type SkuFamily string @@ -73,6 +105,19 @@ Properties *Properties `json:"properties,omitempty"` } +// ExportRDBParameters is parameters for redis export operation. +type ExportRDBParameters struct { + Format *string `json:"format,omitempty"` + Prefix *string `json:"prefix,omitempty"` + Container *string `json:"container,omitempty"` +} + +// ImportRDBParameters is parameters for redis import operation. +type ImportRDBParameters struct { + Format *string `json:"format,omitempty"` + Files *[]string `json:"files,omitempty"` +} + // ListKeysResult is the response of redis list keys operation. type ListKeysResult struct { autorest.Response `json:"-"` @@ -99,53 +144,72 @@ autorest.WithBaseURL(to.String(client.NextLink))) } -// Properties is parameters supplied to CreateOrUpdate redis operation. +// PatchSchedulesRequest is parameters to set patch schedules for redis cache. +type PatchSchedulesRequest struct { + Properties *ScheduleEntries `json:"properties,omitempty"` +} + +// PatchSchedulesResponse is response to put/get patch schedules for redis +// cache. +type PatchSchedulesResponse struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Properties *ScheduleEntries `json:"properties,omitempty"` +} + +// Properties is properties supplied to CreateOrUpdate redis operation. type Properties struct { RedisVersion *string `json:"redisVersion,omitempty"` Sku *Sku `json:"sku,omitempty"` RedisConfiguration *map[string]*string `json:"redisConfiguration,omitempty"` EnableNonSslPort *bool `json:"enableNonSslPort,omitempty"` TenantSettings *map[string]*string `json:"tenantSettings,omitempty"` - ShardCount *int `json:"shardCount,omitempty"` - VirtualNetwork *string `json:"virtualNetwork,omitempty"` - Subnet *string `json:"subnet,omitempty"` + ShardCount *int32 `json:"shardCount,omitempty"` + SubnetID *string `json:"subnetId,omitempty"` StaticIP *string `json:"staticIP,omitempty"` } // ReadableProperties is parameters describing a redis instance type ReadableProperties struct { - ProvisioningState *string `json:"provisioningState,omitempty"` - HostName *string `json:"hostName,omitempty"` - Port *int `json:"port,omitempty"` - SslPort *int `json:"sslPort,omitempty"` RedisVersion *string `json:"redisVersion,omitempty"` Sku *Sku `json:"sku,omitempty"` RedisConfiguration *map[string]*string `json:"redisConfiguration,omitempty"` EnableNonSslPort *bool `json:"enableNonSslPort,omitempty"` TenantSettings *map[string]*string `json:"tenantSettings,omitempty"` - ShardCount *int `json:"shardCount,omitempty"` - VirtualNetwork *string `json:"virtualNetwork,omitempty"` - Subnet *string `json:"subnet,omitempty"` + ShardCount *int32 `json:"shardCount,omitempty"` + SubnetID *string `json:"subnetId,omitempty"` StaticIP *string `json:"staticIP,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + HostName *string `json:"hostName,omitempty"` + Port *int32 `json:"port,omitempty"` + SslPort *int32 `json:"sslPort,omitempty"` } // ReadablePropertiesWithAccessKey is properties generated only in response to // CreateOrUpdate redis operation. type ReadablePropertiesWithAccessKey struct { - AccessKeys *AccessKeys `json:"accessKeys,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` - HostName *string `json:"hostName,omitempty"` - Port *int `json:"port,omitempty"` - SslPort *int `json:"sslPort,omitempty"` RedisVersion *string `json:"redisVersion,omitempty"` Sku *Sku `json:"sku,omitempty"` RedisConfiguration *map[string]*string `json:"redisConfiguration,omitempty"` EnableNonSslPort *bool `json:"enableNonSslPort,omitempty"` TenantSettings *map[string]*string `json:"tenantSettings,omitempty"` - ShardCount *int `json:"shardCount,omitempty"` - VirtualNetwork *string `json:"virtualNetwork,omitempty"` - Subnet *string `json:"subnet,omitempty"` + ShardCount *int32 `json:"shardCount,omitempty"` + SubnetID *string `json:"subnetId,omitempty"` StaticIP *string `json:"staticIP,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + HostName *string `json:"hostName,omitempty"` + Port *int32 `json:"port,omitempty"` + SslPort *int32 `json:"sslPort,omitempty"` + AccessKeys *AccessKeys `json:"accessKeys,omitempty"` +} + +// RebootParameters is specifies which redis node(s) to reboot. +type RebootParameters struct { + RebootType RebootType `json:"rebootType,omitempty"` + ShardID *int32 `json:"shardId,omitempty"` } // RegenerateKeyParameters is specifies which redis access keys to reset. @@ -184,14 +248,21 @@ Properties *ReadablePropertiesWithAccessKey `json:"properties,omitempty"` } +// ScheduleEntries is list of patch schedules for redis cache. +type ScheduleEntries struct { + ScheduleEntriesProperty *[]ScheduleEntry `json:"scheduleEntries,omitempty"` +} + +// ScheduleEntry is +type ScheduleEntry struct { + DayOfWeek DayOfWeek `json:"dayOfWeek,omitempty"` + StartHourUtc *int32 `json:"startHourUtc,omitempty"` + MaintenanceWindow *string `json:"maintenanceWindow,omitempty"` +} + // Sku is sku parameters supplied to the create redis operation. type Sku struct { Name SkuName `json:"name,omitempty"` Family SkuFamily `json:"family,omitempty"` - Capacity *int `json:"capacity,omitempty"` -} - -// SubResource is -type SubResource struct { - ID *string `json:"id,omitempty"` + Capacity *int32 `json:"capacity,omitempty"` } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/redis/patchschedules.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/redis/patchschedules.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/redis/patchschedules.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/redis/patchschedules.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,233 @@ +package redis + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// PatchSchedulesClient is the rEST API for Azure Redis Cache Service +type PatchSchedulesClient struct { + ManagementClient +} + +// NewPatchSchedulesClient creates an instance of the PatchSchedulesClient +// client. +func NewPatchSchedulesClient(subscriptionID string) PatchSchedulesClient { + return NewPatchSchedulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPatchSchedulesClientWithBaseURI creates an instance of the +// PatchSchedulesClient client. +func NewPatchSchedulesClientWithBaseURI(baseURI string, subscriptionID string) PatchSchedulesClient { + return PatchSchedulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or replace the patching schedule for redis cache. +// +// resourceGroupName is the name of the resource group. name is the name of +// the redis cache. parameters is parameters to set patch schedules for redis +// cache. +func (client PatchSchedulesClient) CreateOrUpdate(resourceGroupName string, name string, parameters PatchSchedulesRequest) (result PatchSchedulesResponse, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "redis.PatchSchedulesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "redis.PatchSchedulesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "redis.PatchSchedulesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client PatchSchedulesClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters PatchSchedulesRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/patchSchedules/default", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client PatchSchedulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client PatchSchedulesClient) CreateOrUpdateResponder(resp *http.Response) (result PatchSchedulesResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the patching schedule for redis cache. +// +// resourceGroupName is the name of the resource group. name is the name of +// the redis cache. +func (client PatchSchedulesClient) Delete(resourceGroupName string, name string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "redis.PatchSchedulesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "redis.PatchSchedulesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "redis.PatchSchedulesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client PatchSchedulesClient) DeletePreparer(resourceGroupName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/patchSchedules/default", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client PatchSchedulesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client PatchSchedulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the patching schedule for redis cache. +// +// resourceGroupName is the name of the resource group. name is the name of +// the redis cache. +func (client PatchSchedulesClient) Get(resourceGroupName string, name string) (result PatchSchedulesResponse, err error) { + req, err := client.GetPreparer(resourceGroupName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "redis.PatchSchedulesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "redis.PatchSchedulesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "redis.PatchSchedulesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client PatchSchedulesClient) GetPreparer(resourceGroupName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/patchSchedules/default", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client PatchSchedulesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client PatchSchedulesClient) GetResponder(resp *http.Response) (result PatchSchedulesResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/redis/redis.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/redis/redis.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/redis/redis.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/redis/redis.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,18 +14,17 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) -// Client is the .Net client wrapper for the REST API for Azure Redis Cache -// Management Service +// Client is the rEST API for Azure Redis Cache Service type Client struct { ManagementClient } @@ -46,21 +45,21 @@ // resourceGroupName is the name of the resource group. name is the name of // the redis cache. parameters is parameters supplied to the CreateOrUpdate // redis operation. -func (client Client) CreateOrUpdate(resourceGroupName string, name string, parameters CreateOrUpdateParameters) (result ResourceWithAccessKey, ae error) { +func (client Client) CreateOrUpdate(resourceGroupName string, name string, parameters CreateOrUpdateParameters) (result ResourceWithAccessKey, err error) { req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/Client", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.Client", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/Client", "CreateOrUpdate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.Client", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/Client", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.Client", "CreateOrUpdate", resp, "Failure responding to request") } return @@ -69,29 +68,29 @@ // CreateOrUpdatePreparer prepares the CreateOrUpdate request. func (client Client) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters CreateOrUpdateParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client Client) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -100,7 +99,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -111,21 +110,21 @@ // // resourceGroupName is the name of the resource group. name is the name of // the redis cache. -func (client Client) Delete(resourceGroupName string, name string) (result autorest.Response, ae error) { +func (client Client) Delete(resourceGroupName string, name string) (result autorest.Response, err error) { req, err := client.DeletePreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/Client", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.Client", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "redis/Client", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.Client", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/Client", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.Client", "Delete", resp, "Failure responding to request") } return @@ -134,28 +133,27 @@ // DeletePreparer prepares the Delete request. func (client Client) DeletePreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client Client) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNotFound) + return autorest.SendWithSender(client, req) } // DeleteResponder handles the response to the Delete request. The method always @@ -164,7 +162,140 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNotFound), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Export import data into redis cache. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. name is the name of +// the redis cache. parameters is parameters for redis export operation. +func (client Client) Export(resourceGroupName string, name string, parameters ExportRDBParameters, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ExportPreparer(resourceGroupName, name, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "redis.Client", "Export", nil, "Failure preparing request") + } + + resp, err := client.ExportSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "redis.Client", "Export", resp, "Failure sending request") + } + + result, err = client.ExportResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "redis.Client", "Export", resp, "Failure responding to request") + } + + return +} + +// ExportPreparer prepares the Export request. +func (client Client) ExportPreparer(resourceGroupName string, name string, parameters ExportRDBParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/export", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ExportSender sends the Export request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ExportSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ExportResponder handles the response to the Export request. The method always +// closes the http.Response Body. +func (client Client) ExportResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// ForceReboot reboot specified redis node(s). This operation requires write +// permission to the cache resource. There can be potential data loss. +// +// resourceGroupName is the name of the resource group. name is the name of +// the redis cache. parameters is specifies which redis node(s) to reboot. +func (client Client) ForceReboot(resourceGroupName string, name string, parameters RebootParameters) (result autorest.Response, err error) { + req, err := client.ForceRebootPreparer(resourceGroupName, name, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "redis.Client", "ForceReboot", nil, "Failure preparing request") + } + + resp, err := client.ForceRebootSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "redis.Client", "ForceReboot", resp, "Failure sending request") + } + + result, err = client.ForceRebootResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "redis.Client", "ForceReboot", resp, "Failure responding to request") + } + + return +} + +// ForceRebootPreparer prepares the ForceReboot request. +func (client Client) ForceRebootPreparer(resourceGroupName string, name string, parameters RebootParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/forceReboot", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ForceRebootSender sends the ForceReboot request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ForceRebootSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ForceRebootResponder handles the response to the ForceReboot request. The method always +// closes the http.Response Body. +func (client Client) ForceRebootResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -174,21 +305,21 @@ // // resourceGroupName is the name of the resource group. name is the name of // the redis cache. -func (client Client) Get(resourceGroupName string, name string) (result ResourceType, ae error) { +func (client Client) Get(resourceGroupName string, name string) (result ResourceType, err error) { req, err := client.GetPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/Client", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.Client", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/Client", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.Client", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/Client", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.Client", "Get", resp, "Failure responding to request") } return @@ -197,28 +328,27 @@ // GetPreparer prepares the Get request. func (client Client) GetPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client Client) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -227,29 +357,97 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } +// Import import data into redis cache. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. name is the name of +// the redis cache. parameters is parameters for redis import operation. +func (client Client) Import(resourceGroupName string, name string, parameters ImportRDBParameters, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ImportPreparer(resourceGroupName, name, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "redis.Client", "Import", nil, "Failure preparing request") + } + + resp, err := client.ImportSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "redis.Client", "Import", resp, "Failure sending request") + } + + result, err = client.ImportResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "redis.Client", "Import", resp, "Failure responding to request") + } + + return +} + +// ImportPreparer prepares the Import request. +func (client Client) ImportPreparer(resourceGroupName string, name string, parameters ImportRDBParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/import", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ImportSender sends the Import request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ImportSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ImportResponder handles the response to the Import request. The method always +// closes the http.Response Body. +func (client Client) ImportResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + // List gets all redis caches in the specified subscription. -func (client Client) List() (result ListResult, ae error) { +func (client Client) List() (result ListResult, err error) { req, err := client.ListPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "redis/Client", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.Client", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/Client", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.Client", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/Client", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.Client", "List", resp, "Failure responding to request") } return @@ -258,26 +456,25 @@ // ListPreparer prepares the List request. func (client Client) ListPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Cache/Redis/"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Cache/Redis/", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client Client) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -286,7 +483,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -294,10 +491,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client Client) ListNextResults(lastResults ListResult) (result ListResult, ae error) { +func (client Client) ListNextResults(lastResults ListResult) (result ListResult, err error) { req, err := lastResults.ListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "redis/Client", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "redis.Client", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -306,12 +503,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/Client", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "redis.Client", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/Client", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "redis.Client", "List", resp, "Failure responding to next results request request") } return @@ -320,21 +517,21 @@ // ListByResourceGroup gets all redis caches in a resource group. // // resourceGroupName is the name of the resource group. -func (client Client) ListByResourceGroup(resourceGroupName string) (result ListResult, ae error) { +func (client Client) ListByResourceGroup(resourceGroupName string) (result ListResult, err error) { req, err := client.ListByResourceGroupPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/Client", "ListByResourceGroup", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.Client", "ListByResourceGroup", nil, "Failure preparing request") } resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/Client", "ListByResourceGroup", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.Client", "ListByResourceGroup", resp, "Failure sending request") } result, err = client.ListByResourceGroupResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/Client", "ListByResourceGroup", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.Client", "ListByResourceGroup", resp, "Failure responding to request") } return @@ -343,27 +540,26 @@ // ListByResourceGroupPreparer prepares the ListByResourceGroup request. func (client Client) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client Client) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -372,7 +568,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -380,10 +576,10 @@ } // ListByResourceGroupNextResults retrieves the next set of results, if any. -func (client Client) ListByResourceGroupNextResults(lastResults ListResult) (result ListResult, ae error) { +func (client Client) ListByResourceGroupNextResults(lastResults ListResult) (result ListResult, err error) { req, err := lastResults.ListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "redis/Client", "ListByResourceGroup", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "redis.Client", "ListByResourceGroup", nil, "Failure preparing next results request request") } if req == nil { return @@ -392,12 +588,12 @@ resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/Client", "ListByResourceGroup", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "redis.Client", "ListByResourceGroup", resp, "Failure sending next results request request") } result, err = client.ListByResourceGroupResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/Client", "ListByResourceGroup", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "redis.Client", "ListByResourceGroup", resp, "Failure responding to next results request request") } return @@ -408,21 +604,21 @@ // // resourceGroupName is the name of the resource group. name is the name of // the redis cache. -func (client Client) ListKeys(resourceGroupName string, name string) (result ListKeysResult, ae error) { +func (client Client) ListKeys(resourceGroupName string, name string) (result ListKeysResult, err error) { req, err := client.ListKeysPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/Client", "ListKeys", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.Client", "ListKeys", nil, "Failure preparing request") } resp, err := client.ListKeysSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/Client", "ListKeys", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.Client", "ListKeys", resp, "Failure sending request") } result, err = client.ListKeysResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/Client", "ListKeys", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.Client", "ListKeys", resp, "Failure responding to request") } return @@ -431,28 +627,27 @@ // ListKeysPreparer prepares the ListKeys request. func (client Client) ListKeysPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/listKeys"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/listKeys", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListKeysSender sends the ListKeys request. The method will close the // http.Response Body if it receives an error. func (client Client) ListKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListKeysResponder handles the response to the ListKeys request. The method always @@ -461,7 +656,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -473,21 +668,21 @@ // // resourceGroupName is the name of the resource group. name is the name of // the redis cache. parameters is specifies which key to reset. -func (client Client) RegenerateKey(resourceGroupName string, name string, parameters RegenerateKeyParameters) (result ListKeysResult, ae error) { +func (client Client) RegenerateKey(resourceGroupName string, name string, parameters RegenerateKeyParameters) (result ListKeysResult, err error) { req, err := client.RegenerateKeyPreparer(resourceGroupName, name, parameters) if err != nil { - return result, autorest.NewErrorWithError(err, "redis/Client", "RegenerateKey", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "redis.Client", "RegenerateKey", nil, "Failure preparing request") } resp, err := client.RegenerateKeySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "redis/Client", "RegenerateKey", "Failure sending request") + return result, autorest.NewErrorWithError(err, "redis.Client", "RegenerateKey", resp, "Failure sending request") } result, err = client.RegenerateKeyResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "redis/Client", "RegenerateKey", "Failure responding to request") + err = autorest.NewErrorWithError(err, "redis.Client", "RegenerateKey", resp, "Failure responding to request") } return @@ -496,29 +691,29 @@ // RegenerateKeyPreparer prepares the RegenerateKey request. func (client Client) RegenerateKeyPreparer(resourceGroupName string, name string, parameters RegenerateKeyParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/regenerateKey"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/Redis/{name}/regenerateKey", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // RegenerateKeySender sends the RegenerateKey request. The method will close the // http.Response Body if it receives an error. func (client Client) RegenerateKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // RegenerateKeyResponder handles the response to the RegenerateKey request. The method always @@ -527,7 +722,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/redis/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/redis/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/redis/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/redis/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -23,18 +23,18 @@ ) const ( - major = "0" - minor = "3" + major = "3" + minor = "1" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" ) // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "redis", "2015-08-01") + return fmt.Sprintf(userAgentFormat, Version(), "redis", "2016-04-01") } // Version returns the semantic version (see http://semver.org) of the client. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/client.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,486 +0,0 @@ -package resources - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -const ( - // APIVersion is the version of the Resources - APIVersion = "2014-04-01-preview" - - // DefaultBaseURI is the default URI used for the service Resources - DefaultBaseURI = "https://management.azure.com" -) - -// ManagementClient is the base client for Resources. -type ManagementClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the ManagementClient client. -func New(subscriptionID string) ManagementClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the ManagementClient client. -func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { - return ManagementClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} - -// CheckExistence checks whether resource exists. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. resourceProviderNamespace is resource identity. -// parentResourcePath is resource identity. resourceType is resource -// identity. resourceName is resource identity. -func (client ManagementClient) CheckExistence(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result autorest.Response, ae error) { - req, err := client.CheckExistencePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CheckExistence", "Failure preparing request") - } - - resp, err := client.CheckExistenceSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CheckExistence", "Failure sending request") - } - - result, err = client.CheckExistenceResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "CheckExistence", "Failure responding to request") - } - - return -} - -// CheckExistencePreparer prepares the CheckExistence request. -func (client ManagementClient) CheckExistencePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "parentResourcePath": parentResourcePath, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "resourceName": url.QueryEscape(resourceName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "resourceType": resourceType, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsHead(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CheckExistenceSender sends the CheckExistence request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusNotFound) -} - -// CheckExistenceResponder handles the response to the CheckExistence request. The method always -// closes the http.Response Body. -func (client ManagementClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), - autorest.ByClosing()) - result.Response = resp - return -} - -// CreateOrUpdate create a resource. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. resourceProviderNamespace is resource identity. -// parentResourcePath is resource identity. resourceType is resource -// identity. resourceName is resource identity. parameters is create or -// update resource parameters. -func (client ManagementClient) CreateOrUpdate(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string, parameters GenericResource) (result GenericResource, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CreateOrUpdate", "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CreateOrUpdate", "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "CreateOrUpdate", "Failure responding to request") - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ManagementClient) CreateOrUpdatePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string, parameters GenericResource) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "parentResourcePath": parentResourcePath, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "resourceName": url.QueryEscape(resourceName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "resourceType": resourceType, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client ManagementClient) CreateOrUpdateResponder(resp *http.Response) (result GenericResource, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete delete resource and all of its resources. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. resourceProviderNamespace is resource identity. -// parentResourcePath is resource identity. resourceType is resource -// identity. resourceName is resource identity. -func (client ManagementClient) Delete(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Delete", "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Delete", "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "Delete", "Failure responding to request") - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client ManagementClient) DeletePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "parentResourcePath": parentResourcePath, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "resourceName": url.QueryEscape(resourceName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "resourceType": resourceType, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client ManagementClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get returns a resource belonging to a resource group. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. resourceProviderNamespace is resource identity. -// parentResourcePath is resource identity. resourceType is resource -// identity. resourceName is resource identity. -func (client ManagementClient) Get(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result GenericResource, ae error) { - req, err := client.GetPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client ManagementClient) GetPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "parentResourcePath": parentResourcePath, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "resourceName": url.QueryEscape(resourceName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "resourceType": resourceType, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ManagementClient) GetResponder(resp *http.Response) (result GenericResource, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List get all of the resources under a subscription. -// -// filter is the filter to apply on the operation. top is query parameters. If -// null is passed returns all resource groups. -func (client ManagementClient) List(filter string, top *int) (result ResourceListResult, ae error) { - req, err := client.ListPreparer(filter, top) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client ManagementClient) ListPreparer(filter string, top *int) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - if top != nil { - queryParameters["$top"] = top - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resources"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client ManagementClient) ListResponder(resp *http.Response) (result ResourceListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client ManagementClient) ListNextResults(lastResults ResourceListResult) (result ResourceListResult, ae error) { - req, err := lastResults.ResourceListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure responding to next results request request") - } - - return -} - -// MoveResources begin moving resources.To determine whether the operation has -// finished processing the request, call GetLongRunningOperationStatus. -// -// sourceResourceGroupName is source resource group name. parameters is move -// resources' parameters. -func (client ManagementClient) MoveResources(sourceResourceGroupName string, parameters MoveInfo) (result autorest.Response, ae error) { - req, err := client.MoveResourcesPreparer(sourceResourceGroupName, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "MoveResources", "Failure preparing request") - } - - resp, err := client.MoveResourcesSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "MoveResources", "Failure sending request") - } - - result, err = client.MoveResourcesResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "MoveResources", "Failure responding to request") - } - - return -} - -// MoveResourcesPreparer prepares the MoveResources request. -func (client ManagementClient) MoveResourcesPreparer(sourceResourceGroupName string, parameters MoveInfo) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "sourceResourceGroupName": url.QueryEscape(sourceResourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// MoveResourcesSender sends the MoveResources request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) MoveResourcesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) -} - -// MoveResourcesResponder handles the response to the MoveResources request. The method always -// closes the http.Response Body. -func (client ManagementClient) MoveResourcesResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/deploymentoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/deploymentoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/deploymentoperations.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/deploymentoperations.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ -package resources - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// DeploymentOperationsClient is the client for the DeploymentOperations -// methods of the Resources service. -type DeploymentOperationsClient struct { - ManagementClient -} - -// NewDeploymentOperationsClient creates an instance of the -// DeploymentOperationsClient client. -func NewDeploymentOperationsClient(subscriptionID string) DeploymentOperationsClient { - return NewDeploymentOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewDeploymentOperationsClientWithBaseURI creates an instance of the -// DeploymentOperationsClient client. -func NewDeploymentOperationsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentOperationsClient { - return DeploymentOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get get a list of deployments operations. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. deploymentName is the name of the deployment. operationID is -// operation Id. -func (client DeploymentOperationsClient) Get(resourceGroupName string, deploymentName string, operationID string) (result DeploymentOperation, ae error) { - req, err := client.GetPreparer(resourceGroupName, deploymentName, operationID) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client DeploymentOperationsClient) GetPreparer(resourceGroupName string, deploymentName string, operationID string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "deploymentName": url.QueryEscape(deploymentName), - "operationId": url.QueryEscape(operationID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client DeploymentOperationsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client DeploymentOperationsClient) GetResponder(resp *http.Response) (result DeploymentOperation, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of deployments operations. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. deploymentName is the name of the deployment. top is query -// parameters. -func (client DeploymentOperationsClient) List(resourceGroupName string, deploymentName string, top *int) (result DeploymentOperationsListResult, ae error) { - req, err := client.ListPreparer(resourceGroupName, deploymentName, top) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client DeploymentOperationsClient) ListPreparer(resourceGroupName string, deploymentName string, top *int) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "deploymentName": url.QueryEscape(deploymentName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = top - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client DeploymentOperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client DeploymentOperationsClient) ListResponder(resp *http.Response) (result DeploymentOperationsListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client DeploymentOperationsClient) ListNextResults(lastResults DeploymentOperationsListResult) (result DeploymentOperationsListResult, ae error) { - req, err := lastResults.DeploymentOperationsListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure responding to next results request request") - } - - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/deployments.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/deployments.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/deployments.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/deployments.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,522 +0,0 @@ -package resources - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// DeploymentsClient is the client for the Deployments methods of the -// Resources service. -type DeploymentsClient struct { - ManagementClient -} - -// NewDeploymentsClient creates an instance of the DeploymentsClient client. -func NewDeploymentsClient(subscriptionID string) DeploymentsClient { - return NewDeploymentsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewDeploymentsClientWithBaseURI creates an instance of the -// DeploymentsClient client. -func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentsClient { - return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Cancel cancel a currently running template deployment. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. deploymentName is the name of the deployment. -func (client DeploymentsClient) Cancel(resourceGroupName string, deploymentName string) (result autorest.Response, ae error) { - req, err := client.CancelPreparer(resourceGroupName, deploymentName) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Cancel", "Failure preparing request") - } - - resp, err := client.CancelSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Cancel", "Failure sending request") - } - - result, err = client.CancelResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Cancel", "Failure responding to request") - } - - return -} - -// CancelPreparer prepares the Cancel request. -func (client DeploymentsClient) CancelPreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "deploymentName": url.QueryEscape(deploymentName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/cancel"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CancelSender sends the Cancel request. The method will close the -// http.Response Body if it receives an error. -func (client DeploymentsClient) CancelSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) -} - -// CancelResponder handles the response to the Cancel request. The method always -// closes the http.Response Body. -func (client DeploymentsClient) CancelResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// CheckExistence checks whether deployment exists. -// -// resourceGroupName is the name of the resource group to check. The name is -// case insensitive. deploymentName is the name of the deployment. -func (client DeploymentsClient) CheckExistence(resourceGroupName string, deploymentName string) (result autorest.Response, ae error) { - req, err := client.CheckExistencePreparer(resourceGroupName, deploymentName) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CheckExistence", "Failure preparing request") - } - - resp, err := client.CheckExistenceSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CheckExistence", "Failure sending request") - } - - result, err = client.CheckExistenceResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CheckExistence", "Failure responding to request") - } - - return -} - -// CheckExistencePreparer prepares the CheckExistence request. -func (client DeploymentsClient) CheckExistencePreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "deploymentName": url.QueryEscape(deploymentName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsHead(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CheckExistenceSender sends the CheckExistence request. The method will close the -// http.Response Body if it receives an error. -func (client DeploymentsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusNotFound) -} - -// CheckExistenceResponder handles the response to the CheckExistence request. The method always -// closes the http.Response Body. -func (client DeploymentsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), - autorest.ByClosing()) - result.Response = resp - return -} - -// CreateOrUpdate create a named template deployment using a template. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. deploymentName is the name of the deployment. parameters is -// additional parameters supplied to the operation. -func (client DeploymentsClient) CreateOrUpdate(resourceGroupName string, deploymentName string, parameters Deployment) (result DeploymentExtended, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, deploymentName, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CreateOrUpdate", "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CreateOrUpdate", "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CreateOrUpdate", "Failure responding to request") - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client DeploymentsClient) CreateOrUpdatePreparer(resourceGroupName string, deploymentName string, parameters Deployment) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "deploymentName": url.QueryEscape(deploymentName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client DeploymentsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusCreated) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client DeploymentsClient) CreateOrUpdateResponder(resp *http.Response) (result DeploymentExtended, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete begin deleting deployment.To determine whether the operation has -// finished processing the request, call GetLongRunningOperationStatus. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. deploymentName is the name of the deployment to be deleted. -func (client DeploymentsClient) Delete(resourceGroupName string, deploymentName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, deploymentName) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Delete", "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Delete", "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Delete", "Failure responding to request") - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client DeploymentsClient) DeletePreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "deploymentName": url.QueryEscape(deploymentName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client DeploymentsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client DeploymentsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get get a deployment. -// -// resourceGroupName is the name of the resource group to get. The name is -// case insensitive. deploymentName is the name of the deployment. -func (client DeploymentsClient) Get(resourceGroupName string, deploymentName string) (result DeploymentExtended, ae error) { - req, err := client.GetPreparer(resourceGroupName, deploymentName) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client DeploymentsClient) GetPreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "deploymentName": url.QueryEscape(deploymentName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client DeploymentsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client DeploymentsClient) GetResponder(resp *http.Response) (result DeploymentExtended, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List get a list of deployments. -// -// resourceGroupName is the name of the resource group to filter by. The name -// is case insensitive. filter is the filter to apply on the operation. top -// is query parameters. If null is passed returns all deployments. -func (client DeploymentsClient) List(resourceGroupName string, filter string, top *int) (result DeploymentListResult, ae error) { - req, err := client.ListPreparer(resourceGroupName, filter, top) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client DeploymentsClient) ListPreparer(resourceGroupName string, filter string, top *int) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - if top != nil { - queryParameters["$top"] = top - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client DeploymentsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client DeploymentsClient) ListResponder(resp *http.Response) (result DeploymentListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client DeploymentsClient) ListNextResults(lastResults DeploymentListResult) (result DeploymentListResult, ae error) { - req, err := lastResults.DeploymentListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure responding to next results request request") - } - - return -} - -// Validate validate a deployment template. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. deploymentName is the name of the deployment. parameters is -// deployment to validate. -func (client DeploymentsClient) Validate(resourceGroupName string, deploymentName string, parameters Deployment) (result DeploymentValidateResult, ae error) { - req, err := client.ValidatePreparer(resourceGroupName, deploymentName, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Validate", "Failure preparing request") - } - - resp, err := client.ValidateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Validate", "Failure sending request") - } - - result, err = client.ValidateResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Validate", "Failure responding to request") - } - - return -} - -// ValidatePreparer prepares the Validate request. -func (client DeploymentsClient) ValidatePreparer(resourceGroupName string, deploymentName string, parameters Deployment) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "deploymentName": url.QueryEscape(deploymentName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.resources/deployments/{deploymentName}/validate"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ValidateSender sends the Validate request. The method will close the -// http.Response Body if it receives an error. -func (client DeploymentsClient) ValidateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusBadRequest) -} - -// ValidateResponder handles the response to the Validate request. The method always -// closes the http.Response Body. -func (client DeploymentsClient) ValidateResponder(resp *http.Response) (result DeploymentValidateResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,353 @@ +// Package features implements the Azure ARM Features service API version +// 2015-12-01. +// +package features + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +const ( + // APIVersion is the version of the Features + APIVersion = "2015-12-01" + + // DefaultBaseURI is the default URI used for the service Features + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Features. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} + +// Get get all features under the subscription. +// +// resourceProviderNamespace is namespace of the resource provider. +// featureName is previewed feature name in the resource provider. +func (client ManagementClient) Get(resourceProviderNamespace string, featureName string) (result FeatureResult, err error) { + req, err := client.GetPreparer(resourceProviderNamespace, featureName) + if err != nil { + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ManagementClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementClient) GetPreparer(resourceProviderNamespace string, featureName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "featureName": autorest.Encode("path", featureName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetResponder(resp *http.Response) (result FeatureResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of previewed features of a resource provider. +// +// resourceProviderNamespace is the namespace of the resource provider. +func (client ManagementClient) List(resourceProviderNamespace string) (result FeatureOperationsListResult, err error) { + req, err := client.ListPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ManagementClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ManagementClient) ListPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListResponder(resp *http.Response) (result FeatureOperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ManagementClient) ListNextResults(lastResults FeatureOperationsListResult) (result FeatureOperationsListResult, err error) { + req, err := lastResults.FeatureOperationsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ManagementClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListAll gets a list of previewed features for all the providers in the +// current subscription. +func (client ManagementClient) ListAll() (result FeatureOperationsListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ManagementClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ManagementClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Features/features", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListAllResponder(resp *http.Response) (result FeatureOperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client ManagementClient) ListAllNextResults(lastResults FeatureOperationsListResult) (result FeatureOperationsListResult, err error) { + req, err := lastResults.FeatureOperationsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "ListAll", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "ListAll", resp, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ManagementClient", "ListAll", resp, "Failure responding to next results request request") + } + + return +} + +// Register registers for a previewed feature of a resource provider. +// +// resourceProviderNamespace is namespace of the resource provider. +// featureName is previewed feature name in the resource provider. +func (client ManagementClient) Register(resourceProviderNamespace string, featureName string) (result FeatureResult, err error) { + req, err := client.RegisterPreparer(resourceProviderNamespace, featureName) + if err != nil { + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "Register", nil, "Failure preparing request") + } + + resp, err := client.RegisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.ManagementClient", "Register", resp, "Failure sending request") + } + + result, err = client.RegisterResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.ManagementClient", "Register", resp, "Failure responding to request") + } + + return +} + +// RegisterPreparer prepares the Register request. +func (client ManagementClient) RegisterPreparer(resourceProviderNamespace string, featureName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "featureName": autorest.Encode("path", featureName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}/register", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegisterSender sends the Register request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) RegisterSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegisterResponder handles the response to the Register request. The method always +// closes the http.Response Body. +func (client ManagementClient) RegisterResponder(resp *http.Response) (result FeatureResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/features.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/features.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/features.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/features.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,334 @@ +package features + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// Client is the client for the Features methods of the Features service. +type Client struct { + ManagementClient +} + +// NewClient creates an instance of the Client client. +func NewClient(subscriptionID string) Client { + return NewClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewClientWithBaseURI creates an instance of the Client client. +func NewClientWithBaseURI(baseURI string, subscriptionID string) Client { + return Client{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get all features under the subscription. +// +// resourceProviderNamespace is namespace of the resource provider. +// featureName is previewed feature name in the resource provider. +func (client Client) Get(resourceProviderNamespace string, featureName string) (result FeatureResult, err error) { + req, err := client.GetPreparer(resourceProviderNamespace, featureName) + if err != nil { + return result, autorest.NewErrorWithError(err, "features.Client", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.Client", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.Client", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client Client) GetPreparer(resourceProviderNamespace string, featureName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "featureName": autorest.Encode("path", featureName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client Client) GetResponder(resp *http.Response) (result FeatureResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of previewed features of a resource provider. +// +// resourceProviderNamespace is the namespace of the resource provider. +func (client Client) List(resourceProviderNamespace string) (result FeatureOperationsListResult, err error) { + req, err := client.ListPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "features.Client", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.Client", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.Client", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client Client) ListPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client Client) ListResponder(resp *http.Response) (result FeatureOperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client Client) ListNextResults(lastResults FeatureOperationsListResult) (result FeatureOperationsListResult, err error) { + req, err := lastResults.FeatureOperationsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "features.Client", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.Client", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.Client", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListAll gets a list of previewed features for all the providers in the +// current subscription. +func (client Client) ListAll() (result FeatureOperationsListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "features.Client", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.Client", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.Client", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client Client) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Features/features", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client Client) ListAllResponder(resp *http.Response) (result FeatureOperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client Client) ListAllNextResults(lastResults FeatureOperationsListResult) (result FeatureOperationsListResult, err error) { + req, err := lastResults.FeatureOperationsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "features.Client", "ListAll", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.Client", "ListAll", resp, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.Client", "ListAll", resp, "Failure responding to next results request request") + } + + return +} + +// Register registers for a previewed feature of a resource provider. +// +// resourceProviderNamespace is namespace of the resource provider. +// featureName is previewed feature name in the resource provider. +func (client Client) Register(resourceProviderNamespace string, featureName string) (result FeatureResult, err error) { + req, err := client.RegisterPreparer(resourceProviderNamespace, featureName) + if err != nil { + return result, autorest.NewErrorWithError(err, "features.Client", "Register", nil, "Failure preparing request") + } + + resp, err := client.RegisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features.Client", "Register", resp, "Failure sending request") + } + + result, err = client.RegisterResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "features.Client", "Register", resp, "Failure responding to request") + } + + return +} + +// RegisterPreparer prepares the Register request. +func (client Client) RegisterPreparer(resourceProviderNamespace string, featureName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "featureName": autorest.Encode("path", featureName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}/register", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegisterSender sends the Register request. The method will close the +// http.Response Body if it receives an error. +func (client Client) RegisterSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegisterResponder handles the response to the Register request. The method always +// closes the http.Response Body. +func (client Client) RegisterResponder(resp *http.Response) (result FeatureResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,90 @@ +package features + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// DeploymentExtendedFilter is deployment filter. +type DeploymentExtendedFilter struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// FeatureOperationsListResult is list of previewed features. +type FeatureOperationsListResult struct { + autorest.Response `json:"-"` + Value *[]FeatureResult `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// FeatureOperationsListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client FeatureOperationsListResult) FeatureOperationsListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// FeatureProperties is previewed feature information. +type FeatureProperties struct { + State *string `json:"state,omitempty"` +} + +// FeatureResult is previewed feature information. +type FeatureResult struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + Properties *FeatureProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Type *string `json:"type,omitempty"` +} + +// GenericResourceFilter is resource filter. +type GenericResourceFilter struct { + ResourceType *string `json:"resourceType,omitempty"` + Tagname *string `json:"tagname,omitempty"` + Tagvalue *string `json:"tagvalue,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceGroupFilter is resource group filter. +type ResourceGroupFilter struct { + TagName *string `json:"tagName,omitempty"` + TagValue *string `json:"tagValue,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/features/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package features + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "features", "2015-12-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/groups.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/groups.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/groups.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/groups.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,547 +0,0 @@ -package resources - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// GroupsClient is the client for the Groups methods of the Resources service. -type GroupsClient struct { - ManagementClient -} - -// NewGroupsClient creates an instance of the GroupsClient client. -func NewGroupsClient(subscriptionID string) GroupsClient { - return NewGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client. -func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient { - return GroupsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CheckExistence checks whether resource group exists. -// -// resourceGroupName is the name of the resource group to check. The name is -// case insensitive. -func (client GroupsClient) CheckExistence(resourceGroupName string) (result autorest.Response, ae error) { - req, err := client.CheckExistencePreparer(resourceGroupName) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CheckExistence", "Failure preparing request") - } - - resp, err := client.CheckExistenceSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CheckExistence", "Failure sending request") - } - - result, err = client.CheckExistenceResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "CheckExistence", "Failure responding to request") - } - - return -} - -// CheckExistencePreparer prepares the CheckExistence request. -func (client GroupsClient) CheckExistencePreparer(resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsHead(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CheckExistenceSender sends the CheckExistence request. The method will close the -// http.Response Body if it receives an error. -func (client GroupsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusNotFound) -} - -// CheckExistenceResponder handles the response to the CheckExistence request. The method always -// closes the http.Response Body. -func (client GroupsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), - autorest.ByClosing()) - result.Response = resp - return -} - -// CreateOrUpdate create a resource group. -// -// resourceGroupName is the name of the resource group to be created or -// updated. parameters is parameters supplied to the create or update -// resource group service operation. -func (client GroupsClient) CreateOrUpdate(resourceGroupName string, parameters ResourceGroup) (result ResourceGroup, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CreateOrUpdate", "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CreateOrUpdate", "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "CreateOrUpdate", "Failure responding to request") - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client GroupsClient) CreateOrUpdatePreparer(resourceGroupName string, parameters ResourceGroup) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client GroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client GroupsClient) CreateOrUpdateResponder(resp *http.Response) (result ResourceGroup, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete begin deleting resource group.To determine whether the operation has -// finished processing the request, call GetLongRunningOperationStatus. -// -// resourceGroupName is the name of the resource group to be deleted. The name -// is case insensitive. -func (client GroupsClient) Delete(resourceGroupName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Delete", "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Delete", "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "Delete", "Failure responding to request") - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client GroupsClient) DeletePreparer(resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client GroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusAccepted, http.StatusOK) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client GroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get get a resource group. -// -// resourceGroupName is the name of the resource group to get. The name is -// case insensitive. -func (client GroupsClient) Get(resourceGroupName string) (result ResourceGroup, ae error) { - req, err := client.GetPreparer(resourceGroupName) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client GroupsClient) GetPreparer(resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client GroupsClient) GetResponder(resp *http.Response) (result ResourceGroup, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a collection of resource groups. -// -// filter is the filter to apply on the operation. top is query parameters. If -// null is passed returns all resource groups. -func (client GroupsClient) List(filter string, top *int) (result ResourceGroupListResult, ae error) { - req, err := client.ListPreparer(filter, top) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client GroupsClient) ListPreparer(filter string, top *int) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - if top != nil { - queryParameters["$top"] = top - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client GroupsClient) ListResponder(resp *http.Response) (result ResourceGroupListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client GroupsClient) ListNextResults(lastResults ResourceGroupListResult) (result ResourceGroupListResult, ae error) { - req, err := lastResults.ResourceGroupListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure responding to next results request request") - } - - return -} - -// ListResources get all of the resources under a subscription. -// -// resourceGroupName is query parameters. If null is passed returns all -// resource groups. filter is the filter to apply on the operation. top is -// query parameters. If null is passed returns all resource groups. -func (client GroupsClient) ListResources(resourceGroupName string, filter string, top *int) (result ResourceListResult, ae error) { - req, err := client.ListResourcesPreparer(resourceGroupName, filter, top) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure preparing request") - } - - resp, err := client.ListResourcesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure sending request") - } - - result, err = client.ListResourcesResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure responding to request") - } - - return -} - -// ListResourcesPreparer prepares the ListResources request. -func (client GroupsClient) ListResourcesPreparer(resourceGroupName string, filter string, top *int) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - if top != nil { - queryParameters["$top"] = top - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListResourcesSender sends the ListResources request. The method will close the -// http.Response Body if it receives an error. -func (client GroupsClient) ListResourcesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResourcesResponder handles the response to the ListResources request. The method always -// closes the http.Response Body. -func (client GroupsClient) ListResourcesResponder(resp *http.Response) (result ResourceListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListResourcesNextResults retrieves the next set of results, if any. -func (client GroupsClient) ListResourcesNextResults(lastResults ResourceListResult) (result ResourceListResult, ae error) { - req, err := lastResults.ResourceListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListResourcesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure sending next results request request") - } - - result, err = client.ListResourcesResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure responding to next results request request") - } - - return -} - -// Patch resource groups can be updated through a simple PATCH operation to a -// group address. The format of the request is the same as that for creating -// a resource groups, though if a field is unspecified current value will be -// carried over. -// -// resourceGroupName is the name of the resource group to be created or -// updated. The name is case insensitive. parameters is parameters supplied -// to the update state resource group service operation. -func (client GroupsClient) Patch(resourceGroupName string, parameters ResourceGroup) (result ResourceGroup, ae error) { - req, err := client.PatchPreparer(resourceGroupName, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Patch", "Failure preparing request") - } - - resp, err := client.PatchSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Patch", "Failure sending request") - } - - result, err = client.PatchResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "Patch", "Failure responding to request") - } - - return -} - -// PatchPreparer prepares the Patch request. -func (client GroupsClient) PatchPreparer(resourceGroupName string, parameters ResourceGroup) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// PatchSender sends the Patch request. The method will close the -// http.Response Body if it receives an error. -func (client GroupsClient) PatchSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// PatchResponder handles the response to the Patch request. The method always -// closes the http.Response Body. -func (client GroupsClient) PatchResponder(resp *http.Response) (result ResourceGroup, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,56 @@ +// Package locks implements the Azure ARM Locks service API version 2015-01-01. +// +package locks + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Locks + APIVersion = "2015-01-01" + + // DefaultBaseURI is the default URI used for the service Locks + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Locks. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/managementlocks.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/managementlocks.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/managementlocks.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/managementlocks.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,850 @@ +package locks + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ManagementLocksClient is the client for the ManagementLocks methods of the +// Locks service. +type ManagementLocksClient struct { + ManagementClient +} + +// NewManagementLocksClient creates an instance of the ManagementLocksClient +// client. +func NewManagementLocksClient(subscriptionID string) ManagementLocksClient { + return NewManagementLocksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewManagementLocksClientWithBaseURI creates an instance of the +// ManagementLocksClient client. +func NewManagementLocksClientWithBaseURI(baseURI string, subscriptionID string) ManagementLocksClient { + return ManagementLocksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdateAtResourceGroupLevel create or update a management lock at +// the resource group level. +// +// resourceGroupName is the resource group name. lockName is the lock name. +// parameters is the management lock parameters. +func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevel(resourceGroupName string, lockName string, parameters ManagementLockObject) (result ManagementLockObject, err error) { + req, err := client.CreateOrUpdateAtResourceGroupLevelPreparer(resourceGroupName, lockName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "CreateOrUpdateAtResourceGroupLevel", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAtResourceGroupLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "CreateOrUpdateAtResourceGroupLevel", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateAtResourceGroupLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "CreateOrUpdateAtResourceGroupLevel", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateAtResourceGroupLevelPreparer prepares the CreateOrUpdateAtResourceGroupLevel request. +func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevelPreparer(resourceGroupName string, lockName string, parameters ManagementLockObject) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": autorest.Encode("path", lockName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateAtResourceGroupLevelSender sends the CreateOrUpdateAtResourceGroupLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateAtResourceGroupLevelResponder handles the response to the CreateOrUpdateAtResourceGroupLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevelResponder(resp *http.Response) (result ManagementLockObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateAtResourceLevel create or update a management lock at the +// resource level or any level below resource. +// +// resourceGroupName is the name of the resource group. +// resourceProviderNamespace is resource identity. parentResourcePath is +// resource identity. resourceType is resource identity. resourceName is +// resource identity. lockName is the name of lock. parameters is create or +// update management lock parameters. +func (client ManagementLocksClient) CreateOrUpdateAtResourceLevel(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string, parameters ManagementLockObject) (result ManagementLockObject, err error) { + req, err := client.CreateOrUpdateAtResourceLevelPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, lockName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "CreateOrUpdateAtResourceLevel", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAtResourceLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "CreateOrUpdateAtResourceLevel", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateAtResourceLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "CreateOrUpdateAtResourceLevel", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateAtResourceLevelPreparer prepares the CreateOrUpdateAtResourceLevel request. +func (client ManagementLocksClient) CreateOrUpdateAtResourceLevelPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string, parameters ManagementLockObject) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": autorest.Encode("path", lockName), + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateAtResourceLevelSender sends the CreateOrUpdateAtResourceLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) CreateOrUpdateAtResourceLevelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateAtResourceLevelResponder handles the response to the CreateOrUpdateAtResourceLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) CreateOrUpdateAtResourceLevelResponder(resp *http.Response) (result ManagementLockObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateAtSubscriptionLevel create or update a management lock at the +// subscription level. +// +// lockName is the name of lock. parameters is the management lock parameters. +func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevel(lockName string, parameters ManagementLockObject) (result ManagementLockObject, err error) { + req, err := client.CreateOrUpdateAtSubscriptionLevelPreparer(lockName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "CreateOrUpdateAtSubscriptionLevel", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAtSubscriptionLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "CreateOrUpdateAtSubscriptionLevel", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateAtSubscriptionLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "CreateOrUpdateAtSubscriptionLevel", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateAtSubscriptionLevelPreparer prepares the CreateOrUpdateAtSubscriptionLevel request. +func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevelPreparer(lockName string, parameters ManagementLockObject) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": autorest.Encode("path", lockName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateAtSubscriptionLevelSender sends the CreateOrUpdateAtSubscriptionLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateAtSubscriptionLevelResponder handles the response to the CreateOrUpdateAtSubscriptionLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevelResponder(resp *http.Response) (result ManagementLockObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteAtResourceGroupLevel deletes the management lock of a resource group. +// +// resourceGroup is the resource group names. lockName is the name of lock. +func (client ManagementLocksClient) DeleteAtResourceGroupLevel(resourceGroup string, lockName string) (result autorest.Response, err error) { + req, err := client.DeleteAtResourceGroupLevelPreparer(resourceGroup, lockName) + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "DeleteAtResourceGroupLevel", nil, "Failure preparing request") + } + + resp, err := client.DeleteAtResourceGroupLevelSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "DeleteAtResourceGroupLevel", resp, "Failure sending request") + } + + result, err = client.DeleteAtResourceGroupLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "DeleteAtResourceGroupLevel", resp, "Failure responding to request") + } + + return +} + +// DeleteAtResourceGroupLevelPreparer prepares the DeleteAtResourceGroupLevel request. +func (client ManagementLocksClient) DeleteAtResourceGroupLevelPreparer(resourceGroup string, lockName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": autorest.Encode("path", lockName), + "resourceGroup": autorest.Encode("path", resourceGroup), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Authorization/locks/{lockName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAtResourceGroupLevelSender sends the DeleteAtResourceGroupLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) DeleteAtResourceGroupLevelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAtResourceGroupLevelResponder handles the response to the DeleteAtResourceGroupLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) DeleteAtResourceGroupLevelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAtResourceLevel deletes the management lock of a resource or any +// level below resource. +// +// resourceGroupName is the name of the resource group. +// resourceProviderNamespace is resource identity. parentResourcePath is +// resource identity. resourceType is resource identity. resourceName is +// resource identity. lockName is the name of lock. +func (client ManagementLocksClient) DeleteAtResourceLevel(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string) (result autorest.Response, err error) { + req, err := client.DeleteAtResourceLevelPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, lockName) + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "DeleteAtResourceLevel", nil, "Failure preparing request") + } + + resp, err := client.DeleteAtResourceLevelSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "DeleteAtResourceLevel", resp, "Failure sending request") + } + + result, err = client.DeleteAtResourceLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "DeleteAtResourceLevel", resp, "Failure responding to request") + } + + return +} + +// DeleteAtResourceLevelPreparer prepares the DeleteAtResourceLevel request. +func (client ManagementLocksClient) DeleteAtResourceLevelPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": autorest.Encode("path", lockName), + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAtResourceLevelSender sends the DeleteAtResourceLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) DeleteAtResourceLevelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAtResourceLevelResponder handles the response to the DeleteAtResourceLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) DeleteAtResourceLevelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAtSubscriptionLevel deletes the management lock of a subscription. +// +// lockName is the name of lock. +func (client ManagementLocksClient) DeleteAtSubscriptionLevel(lockName string) (result autorest.Response, err error) { + req, err := client.DeleteAtSubscriptionLevelPreparer(lockName) + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "DeleteAtSubscriptionLevel", nil, "Failure preparing request") + } + + resp, err := client.DeleteAtSubscriptionLevelSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "DeleteAtSubscriptionLevel", resp, "Failure sending request") + } + + result, err = client.DeleteAtSubscriptionLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "DeleteAtSubscriptionLevel", resp, "Failure responding to request") + } + + return +} + +// DeleteAtSubscriptionLevelPreparer prepares the DeleteAtSubscriptionLevel request. +func (client ManagementLocksClient) DeleteAtSubscriptionLevelPreparer(lockName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": autorest.Encode("path", lockName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAtSubscriptionLevelSender sends the DeleteAtSubscriptionLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) DeleteAtSubscriptionLevelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAtSubscriptionLevelResponder handles the response to the DeleteAtSubscriptionLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) DeleteAtSubscriptionLevelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the management lock of a scope. +// +// lockName is name of the management lock. +func (client ManagementLocksClient) Get(lockName string) (result ManagementLockObject, err error) { + req, err := client.GetPreparer(lockName) + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementLocksClient) GetPreparer(lockName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": autorest.Encode("path", lockName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) GetResponder(resp *http.Response) (result ManagementLockObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAtResourceGroupLevel gets all the management locks of a resource group. +// +// resourceGroupName is resource group name. filter is the filter to apply on +// the operation. +func (client ManagementLocksClient) ListAtResourceGroupLevel(resourceGroupName string, filter string) (result ManagementLockListResult, err error) { + req, err := client.ListAtResourceGroupLevelPreparer(resourceGroupName, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceGroupLevel", nil, "Failure preparing request") + } + + resp, err := client.ListAtResourceGroupLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceGroupLevel", resp, "Failure sending request") + } + + result, err = client.ListAtResourceGroupLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceGroupLevel", resp, "Failure responding to request") + } + + return +} + +// ListAtResourceGroupLevelPreparer prepares the ListAtResourceGroupLevel request. +func (client ManagementLocksClient) ListAtResourceGroupLevelPreparer(resourceGroupName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAtResourceGroupLevelSender sends the ListAtResourceGroupLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) ListAtResourceGroupLevelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAtResourceGroupLevelResponder handles the response to the ListAtResourceGroupLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) ListAtResourceGroupLevelResponder(resp *http.Response) (result ManagementLockListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAtResourceGroupLevelNextResults retrieves the next set of results, if any. +func (client ManagementLocksClient) ListAtResourceGroupLevelNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, err error) { + req, err := lastResults.ManagementLockListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceGroupLevel", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAtResourceGroupLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceGroupLevel", resp, "Failure sending next results request request") + } + + result, err = client.ListAtResourceGroupLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceGroupLevel", resp, "Failure responding to next results request request") + } + + return +} + +// ListAtResourceLevel gets all the management locks of a resource or any +// level below resource. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. filter is the filter to apply +// on the operation. +func (client ManagementLocksClient) ListAtResourceLevel(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (result ManagementLockListResult, err error) { + req, err := client.ListAtResourceLevelPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceLevel", nil, "Failure preparing request") + } + + resp, err := client.ListAtResourceLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceLevel", resp, "Failure sending request") + } + + result, err = client.ListAtResourceLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceLevel", resp, "Failure responding to request") + } + + return +} + +// ListAtResourceLevelPreparer prepares the ListAtResourceLevel request. +func (client ManagementLocksClient) ListAtResourceLevelPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAtResourceLevelSender sends the ListAtResourceLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) ListAtResourceLevelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAtResourceLevelResponder handles the response to the ListAtResourceLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) ListAtResourceLevelResponder(resp *http.Response) (result ManagementLockListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAtResourceLevelNextResults retrieves the next set of results, if any. +func (client ManagementLocksClient) ListAtResourceLevelNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, err error) { + req, err := lastResults.ManagementLockListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceLevel", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAtResourceLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceLevel", resp, "Failure sending next results request request") + } + + result, err = client.ListAtResourceLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtResourceLevel", resp, "Failure responding to next results request request") + } + + return +} + +// ListAtSubscriptionLevel gets all the management locks of a subscription. +// +// filter is the filter to apply on the operation. +func (client ManagementLocksClient) ListAtSubscriptionLevel(filter string) (result ManagementLockListResult, err error) { + req, err := client.ListAtSubscriptionLevelPreparer(filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtSubscriptionLevel", nil, "Failure preparing request") + } + + resp, err := client.ListAtSubscriptionLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtSubscriptionLevel", resp, "Failure sending request") + } + + result, err = client.ListAtSubscriptionLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtSubscriptionLevel", resp, "Failure responding to request") + } + + return +} + +// ListAtSubscriptionLevelPreparer prepares the ListAtSubscriptionLevel request. +func (client ManagementLocksClient) ListAtSubscriptionLevelPreparer(filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAtSubscriptionLevelSender sends the ListAtSubscriptionLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) ListAtSubscriptionLevelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAtSubscriptionLevelResponder handles the response to the ListAtSubscriptionLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) ListAtSubscriptionLevelResponder(resp *http.Response) (result ManagementLockListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAtSubscriptionLevelNextResults retrieves the next set of results, if any. +func (client ManagementLocksClient) ListAtSubscriptionLevelNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, err error) { + req, err := lastResults.ManagementLockListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtSubscriptionLevel", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAtSubscriptionLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtSubscriptionLevel", resp, "Failure sending next results request request") + } + + result, err = client.ListAtSubscriptionLevelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListAtSubscriptionLevel", resp, "Failure responding to next results request request") + } + + return +} + +// ListNext get a list of management locks at resource level or below. +// +// nextLink is nextLink from the previous successful call to List operation. +func (client ManagementLocksClient) ListNext(nextLink string) (result ManagementLockListResult, err error) { + req, err := client.ListNextPreparer(nextLink) + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListNext", nil, "Failure preparing request") + } + + resp, err := client.ListNextSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListNext", resp, "Failure sending request") + } + + result, err = client.ListNextResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListNext", resp, "Failure responding to request") + } + + return +} + +// ListNextPreparer prepares the ListNext request. +func (client ManagementLocksClient) ListNextPreparer(nextLink string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "nextLink": nextLink, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{nextLink}", pathParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListNextSender sends the ListNext request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) ListNextSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListNextResponder handles the response to the ListNext request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) ListNextResponder(resp *http.Response) (result ManagementLockListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextNextResults retrieves the next set of results, if any. +func (client ManagementLocksClient) ListNextNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, err error) { + req, err := lastResults.ManagementLockListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListNext", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListNextSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListNext", resp, "Failure sending next results request request") + } + + result, err = client.ListNextResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "locks.ManagementLocksClient", "ListNext", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,103 @@ +package locks + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// LockLevel enumerates the values for lock level. +type LockLevel string + +const ( + // CanNotDelete specifies the can not delete state for lock level. + CanNotDelete LockLevel = "CanNotDelete" + // NotSpecified specifies the not specified state for lock level. + NotSpecified LockLevel = "NotSpecified" + // ReadOnly specifies the read only state for lock level. + ReadOnly LockLevel = "ReadOnly" +) + +// DeploymentExtendedFilter is deployment filter. +type DeploymentExtendedFilter struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// GenericResourceFilter is resource filter. +type GenericResourceFilter struct { + ResourceType *string `json:"resourceType,omitempty"` + Tagname *string `json:"tagname,omitempty"` + Tagvalue *string `json:"tagvalue,omitempty"` +} + +// ManagementLockListResult is list of management locks. +type ManagementLockListResult struct { + autorest.Response `json:"-"` + Value *[]ManagementLockObject `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ManagementLockListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ManagementLockListResult) ManagementLockListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ManagementLockObject is management lock information. +type ManagementLockObject struct { + autorest.Response `json:"-"` + Properties *ManagementLockProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Type *string `json:"type,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ManagementLockProperties is the management lock properties. +type ManagementLockProperties struct { + Level LockLevel `json:"level,omitempty"` + Notes *string `json:"notes,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceGroupFilter is resource group filter. +type ResourceGroupFilter struct { + TagName *string `json:"tagName,omitempty"` + TagValue *string `json:"tagValue,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/locks/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package locks + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "locks", "2015-01-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/models.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,433 +0,0 @@ -package resources - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" - "net/http" -) - -// DeploymentMode enumerates the values for deployment mode. -type DeploymentMode string - -const ( - // Complete specifies the complete state for deployment mode. - Complete DeploymentMode = "Complete" - // Incremental specifies the incremental state for deployment mode. - Incremental DeploymentMode = "Incremental" -) - -// BasicDependency is deployment dependency information. -type BasicDependency struct { - ID *string `json:"id,omitempty"` - ResourceType *string `json:"resourceType,omitempty"` - ResourceName *string `json:"resourceName,omitempty"` -} - -// Dependency is deployment dependency information. -type Dependency struct { - DependsOn *[]BasicDependency `json:"dependsOn,omitempty"` - ID *string `json:"id,omitempty"` - ResourceType *string `json:"resourceType,omitempty"` - ResourceName *string `json:"resourceName,omitempty"` -} - -// Deployment is deployment operation parameters. -type Deployment struct { - Properties *DeploymentProperties `json:"properties,omitempty"` -} - -// DeploymentExtended is deployment information. -type DeploymentExtended struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` -} - -// DeploymentExtendedFilter is deployment filter. -type DeploymentExtendedFilter struct { - ProvisioningState *string `json:"provisioningState,omitempty"` -} - -// DeploymentListResult is list of deployments. -type DeploymentListResult struct { - autorest.Response `json:"-"` - Value *[]DeploymentExtended `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` -} - -// DeploymentListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client DeploymentListResult) DeploymentListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) -} - -// DeploymentOperation is deployment operation information. -type DeploymentOperation struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - OperationID *string `json:"operationId,omitempty"` - Properties *DeploymentOperationProperties `json:"properties,omitempty"` -} - -// DeploymentOperationProperties is deployment operation properties. -type DeploymentOperationProperties struct { - ProvisioningState *string `json:"provisioningState,omitempty"` - Timestamp *date.Time `json:"timestamp,omitempty"` - StatusCode *string `json:"statusCode,omitempty"` - StatusMessage *map[string]interface{} `json:"statusMessage,omitempty"` - TargetResource *TargetResource `json:"targetResource,omitempty"` -} - -// DeploymentOperationsListResult is list of deployment operations. -type DeploymentOperationsListResult struct { - autorest.Response `json:"-"` - Value *[]DeploymentOperation `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` -} - -// DeploymentOperationsListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client DeploymentOperationsListResult) DeploymentOperationsListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) -} - -// DeploymentProperties is deployment properties. -type DeploymentProperties struct { - Template *map[string]interface{} `json:"template,omitempty"` - TemplateLink *TemplateLink `json:"templateLink,omitempty"` - Parameters *map[string]interface{} `json:"parameters,omitempty"` - ParametersLink *ParametersLink `json:"parametersLink,omitempty"` - Mode DeploymentMode `json:"mode,omitempty"` -} - -// DeploymentPropertiesExtended is deployment properties with additional -// details. -type DeploymentPropertiesExtended struct { - ProvisioningState *string `json:"provisioningState,omitempty"` - CorrelationID *string `json:"correlationId,omitempty"` - Timestamp *date.Time `json:"timestamp,omitempty"` - Outputs *map[string]interface{} `json:"outputs,omitempty"` - Providers *[]Provider `json:"providers,omitempty"` - Dependencies *[]Dependency `json:"dependencies,omitempty"` - Template *map[string]interface{} `json:"template,omitempty"` - TemplateLink *TemplateLink `json:"templateLink,omitempty"` - Parameters *map[string]interface{} `json:"parameters,omitempty"` - ParametersLink *ParametersLink `json:"parametersLink,omitempty"` - Mode DeploymentMode `json:"mode,omitempty"` -} - -// DeploymentValidateResult is information from validate template deployment -// response. -type DeploymentValidateResult struct { - autorest.Response `json:"-"` - Error *ResourceManagementErrorWithDetails `json:"error,omitempty"` - Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` -} - -// GenericResource is resource information. -type GenericResource struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` - Plan *Plan `json:"plan,omitempty"` - Properties *map[string]interface{} `json:"properties,omitempty"` -} - -// GenericResourceFilter is resource filter. -type GenericResourceFilter struct { - ResourceType *string `json:"resourceType,omitempty"` - Tagname *string `json:"tagname,omitempty"` - Tagvalue *string `json:"tagvalue,omitempty"` -} - -// MoveInfo is parameters of move resources. -type MoveInfo struct { - Resources *[]string `json:"resources,omitempty"` - TargetResourceGroup *string `json:"targetResourceGroup,omitempty"` -} - -// Operation is operation -type Operation struct { - Name *string `json:"name,omitempty"` - DisplayName *string `json:"displayName,omitempty"` - Description *string `json:"description,omitempty"` - Origin *string `json:"origin,omitempty"` - Properties *map[string]interface{} `json:"properties,omitempty"` -} - -// ParametersLink is entity representing the reference to the deployment -// paramaters. -type ParametersLink struct { - URI *string `json:"uri,omitempty"` - ContentVersion *string `json:"contentVersion,omitempty"` -} - -// Plan is plan for the resource. -type Plan struct { - Name *string `json:"name,omitempty"` - Publisher *string `json:"publisher,omitempty"` - Product *string `json:"product,omitempty"` - PromotionCode *string `json:"promotionCode,omitempty"` -} - -// Provider is resource provider information. -type Provider struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Namespace *string `json:"namespace,omitempty"` - RegistrationState *string `json:"registrationState,omitempty"` - ResourceTypes *[]ProviderResourceType `json:"resourceTypes,omitempty"` -} - -// ProviderListResult is list of resource providers. -type ProviderListResult struct { - autorest.Response `json:"-"` - Value *[]Provider `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` -} - -// ProviderListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client ProviderListResult) ProviderListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) -} - -// ProviderOperationsMetadata is provider Operations metadata -type ProviderOperationsMetadata struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - DisplayName *string `json:"displayName,omitempty"` - ResourceTypes *[]ResourceType `json:"resourceTypes,omitempty"` - Operations *[]Operation `json:"operations,omitempty"` -} - -// ProviderOperationsMetadataListResult is provider operations metadata list -type ProviderOperationsMetadataListResult struct { - autorest.Response `json:"-"` - Value *[]ProviderOperationsMetadata `json:"value,omitempty"` -} - -// ProviderResourceType is resource type managed by the resource provider. -type ProviderResourceType struct { - ResourceType *string `json:"resourceType,omitempty"` - Locations *[]string `json:"locations,omitempty"` - APIVersions *[]string `json:"apiVersions,omitempty"` - Properties *map[string]*string `json:"properties,omitempty"` -} - -// Resource is -type Resource struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` -} - -// ResourceGroup is resource group information. -type ResourceGroup struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Properties *ResourceGroupProperties `json:"properties,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` -} - -// ResourceGroupFilter is resource group filter. -type ResourceGroupFilter struct { - TagName *string `json:"tagName,omitempty"` - TagValue *string `json:"tagValue,omitempty"` -} - -// ResourceGroupListResult is list of resource groups. -type ResourceGroupListResult struct { - autorest.Response `json:"-"` - Value *[]ResourceGroup `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` -} - -// ResourceGroupListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client ResourceGroupListResult) ResourceGroupListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) -} - -// ResourceGroupProperties is the resource group properties. -type ResourceGroupProperties struct { - ProvisioningState *string `json:"provisioningState,omitempty"` -} - -// ResourceListResult is list of resource groups. -type ResourceListResult struct { - autorest.Response `json:"-"` - Value *[]GenericResource `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` -} - -// ResourceListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client ResourceListResult) ResourceListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) -} - -// ResourceManagementError is -type ResourceManagementError struct { - Code *string `json:"code,omitempty"` - Message *string `json:"message,omitempty"` - Target *string `json:"target,omitempty"` -} - -// ResourceManagementErrorWithDetails is -type ResourceManagementErrorWithDetails struct { - Details *[]ResourceManagementError `json:"details,omitempty"` - Code *string `json:"code,omitempty"` - Message *string `json:"message,omitempty"` - Target *string `json:"target,omitempty"` -} - -// ResourceProviderOperationDefinition is resource provider operation -// information. -type ResourceProviderOperationDefinition struct { - Name *string `json:"name,omitempty"` - Display *ResourceProviderOperationDisplayProperties `json:"display,omitempty"` -} - -// ResourceProviderOperationDetailListResult is list of resource provider -// operations. -type ResourceProviderOperationDetailListResult struct { - autorest.Response `json:"-"` - Value *[]ResourceProviderOperationDefinition `json:"value,omitempty"` -} - -// ResourceProviderOperationDisplayProperties is resource provider operation's -// display properties. -type ResourceProviderOperationDisplayProperties struct { - Publisher *string `json:"publisher,omitempty"` - Provider *string `json:"provider,omitempty"` - Resource *string `json:"resource,omitempty"` - Operation *string `json:"operation,omitempty"` - Description *string `json:"description,omitempty"` -} - -// ResourceType is resource Type -type ResourceType struct { - Name *string `json:"name,omitempty"` - DisplayName *string `json:"displayName,omitempty"` - Operations *[]Operation `json:"operations,omitempty"` -} - -// SubResource is -type SubResource struct { - ID *string `json:"id,omitempty"` -} - -// TagCount is tag count. -type TagCount struct { - Type *string `json:"type,omitempty"` - Value *string `json:"value,omitempty"` -} - -// TagDetails is tag details. -type TagDetails struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - TagName *string `json:"tagName,omitempty"` - Count *TagCount `json:"count,omitempty"` - Values *[]TagValue `json:"values,omitempty"` -} - -// TagsListResult is list of subscription tags. -type TagsListResult struct { - autorest.Response `json:"-"` - Value *[]TagDetails `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` -} - -// TagsListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client TagsListResult) TagsListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) -} - -// TagValue is tag information. -type TagValue struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - TagValueProperty *string `json:"tagValue,omitempty"` - Count *TagCount `json:"count,omitempty"` -} - -// TargetResource is target resource. -type TargetResource struct { - ID *string `json:"id,omitempty"` - ResourceName *string `json:"resourceName,omitempty"` - ResourceType *string `json:"resourceType,omitempty"` -} - -// TemplateLink is entity representing the reference to the template. -type TemplateLink struct { - URI *string `json:"uri,omitempty"` - ContentVersion *string `json:"contentVersion,omitempty"` -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/assignments.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/assignments.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/assignments.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/assignments.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,690 @@ +package policy + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// AssignmentsClient is the client for the Assignments methods of the Policy +// service. +type AssignmentsClient struct { + ManagementClient +} + +// NewAssignmentsClient creates an instance of the AssignmentsClient client. +func NewAssignmentsClient(subscriptionID string) AssignmentsClient { + return NewAssignmentsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAssignmentsClientWithBaseURI creates an instance of the +// AssignmentsClient client. +func NewAssignmentsClientWithBaseURI(baseURI string, subscriptionID string) AssignmentsClient { + return AssignmentsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create create policy assignment. +// +// scope is scope of the policy assignment. policyAssignmentName is policy +// assignment name. parameters is policy assignment. +func (client AssignmentsClient) Create(scope string, policyAssignmentName string, parameters Assignment) (result Assignment, err error) { + req, err := client.CreatePreparer(scope, policyAssignmentName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client AssignmentsClient) CreatePreparer(scope string, policyAssignmentName string, parameters Assignment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentName": autorest.Encode("path", policyAssignmentName), + "scope": scope, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client AssignmentsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client AssignmentsClient) CreateResponder(resp *http.Response) (result Assignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateByID create policy assignment by Id. +// +// policyAssignmentID is policy assignment Id parameters is policy assignment. +func (client AssignmentsClient) CreateByID(policyAssignmentID string, parameters Assignment) (result Assignment, err error) { + req, err := client.CreateByIDPreparer(policyAssignmentID, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "CreateByID", nil, "Failure preparing request") + } + + resp, err := client.CreateByIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "CreateByID", resp, "Failure sending request") + } + + result, err = client.CreateByIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "CreateByID", resp, "Failure responding to request") + } + + return +} + +// CreateByIDPreparer prepares the CreateByID request. +func (client AssignmentsClient) CreateByIDPreparer(policyAssignmentID string, parameters Assignment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentId": policyAssignmentID, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{policyAssignmentId}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateByIDSender sends the CreateByID request. The method will close the +// http.Response Body if it receives an error. +func (client AssignmentsClient) CreateByIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateByIDResponder handles the response to the CreateByID request. The method always +// closes the http.Response Body. +func (client AssignmentsClient) CreateByIDResponder(resp *http.Response) (result Assignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete policy assignment. +// +// scope is scope of the policy assignment. policyAssignmentName is policy +// assignment name. +func (client AssignmentsClient) Delete(scope string, policyAssignmentName string) (result Assignment, err error) { + req, err := client.DeletePreparer(scope, policyAssignmentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AssignmentsClient) DeletePreparer(scope string, policyAssignmentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentName": autorest.Encode("path", policyAssignmentName), + "scope": scope, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AssignmentsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AssignmentsClient) DeleteResponder(resp *http.Response) (result Assignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteByID delete policy assignment. +// +// policyAssignmentID is policy assignment Id +func (client AssignmentsClient) DeleteByID(policyAssignmentID string) (result Assignment, err error) { + req, err := client.DeleteByIDPreparer(policyAssignmentID) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "DeleteByID", nil, "Failure preparing request") + } + + resp, err := client.DeleteByIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "DeleteByID", resp, "Failure sending request") + } + + result, err = client.DeleteByIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "DeleteByID", resp, "Failure responding to request") + } + + return +} + +// DeleteByIDPreparer prepares the DeleteByID request. +func (client AssignmentsClient) DeleteByIDPreparer(policyAssignmentID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentId": policyAssignmentID, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{policyAssignmentId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteByIDSender sends the DeleteByID request. The method will close the +// http.Response Body if it receives an error. +func (client AssignmentsClient) DeleteByIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteByIDResponder handles the response to the DeleteByID request. The method always +// closes the http.Response Body. +func (client AssignmentsClient) DeleteByIDResponder(resp *http.Response) (result Assignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get single policy assignment. +// +// scope is scope of the policy assignment. policyAssignmentName is policy +// assignment name. +func (client AssignmentsClient) Get(scope string, policyAssignmentName string) (result Assignment, err error) { + req, err := client.GetPreparer(scope, policyAssignmentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AssignmentsClient) GetPreparer(scope string, policyAssignmentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentName": autorest.Encode("path", policyAssignmentName), + "scope": scope, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AssignmentsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AssignmentsClient) GetResponder(resp *http.Response) (result Assignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetByID get single policy assignment. +// +// policyAssignmentID is policy assignment Id +func (client AssignmentsClient) GetByID(policyAssignmentID string) (result Assignment, err error) { + req, err := client.GetByIDPreparer(policyAssignmentID) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "GetByID", nil, "Failure preparing request") + } + + resp, err := client.GetByIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "GetByID", resp, "Failure sending request") + } + + result, err = client.GetByIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "GetByID", resp, "Failure responding to request") + } + + return +} + +// GetByIDPreparer prepares the GetByID request. +func (client AssignmentsClient) GetByIDPreparer(policyAssignmentID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyAssignmentId": policyAssignmentID, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{policyAssignmentId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetByIDSender sends the GetByID request. The method will close the +// http.Response Body if it receives an error. +func (client AssignmentsClient) GetByIDSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetByIDResponder handles the response to the GetByID request. The method always +// closes the http.Response Body. +func (client AssignmentsClient) GetByIDResponder(resp *http.Response) (result Assignment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the policy assignments of a subscription. +// +// filter is the filter to apply on the operation. +func (client AssignmentsClient) List(filter string) (result AssignmentListResult, err error) { + req, err := client.ListPreparer(filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AssignmentsClient) ListPreparer(filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyassignments", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AssignmentsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AssignmentsClient) ListResponder(resp *http.Response) (result AssignmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client AssignmentsClient) ListNextResults(lastResults AssignmentListResult) (result AssignmentListResult, err error) { + req, err := lastResults.AssignmentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListForResource gets policy assignments of the resource. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is the resource provider namespace. +// parentResourcePath is the parent resource path. resourceType is the +// resource type. resourceName is the resource name. filter is the filter to +// apply on the operation. +func (client AssignmentsClient) ListForResource(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (result AssignmentListResult, err error) { + req, err := client.ListForResourcePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResource", nil, "Failure preparing request") + } + + resp, err := client.ListForResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResource", resp, "Failure sending request") + } + + result, err = client.ListForResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResource", resp, "Failure responding to request") + } + + return +} + +// ListForResourcePreparer prepares the ListForResource request. +func (client AssignmentsClient) ListForResourcePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyassignments", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListForResourceSender sends the ListForResource request. The method will close the +// http.Response Body if it receives an error. +func (client AssignmentsClient) ListForResourceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListForResourceResponder handles the response to the ListForResource request. The method always +// closes the http.Response Body. +func (client AssignmentsClient) ListForResourceResponder(resp *http.Response) (result AssignmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListForResourceNextResults retrieves the next set of results, if any. +func (client AssignmentsClient) ListForResourceNextResults(lastResults AssignmentListResult) (result AssignmentListResult, err error) { + req, err := lastResults.AssignmentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResource", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListForResourceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResource", resp, "Failure sending next results request request") + } + + result, err = client.ListForResourceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResource", resp, "Failure responding to next results request request") + } + + return +} + +// ListForResourceGroup gets policy assignments of the resource group. +// +// resourceGroupName is resource group name. filter is the filter to apply on +// the operation. +func (client AssignmentsClient) ListForResourceGroup(resourceGroupName string, filter string) (result AssignmentListResult, err error) { + req, err := client.ListForResourceGroupPreparer(resourceGroupName, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListForResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListForResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListForResourceGroupPreparer prepares the ListForResourceGroup request. +func (client AssignmentsClient) ListForResourceGroupPreparer(resourceGroupName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = filter + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListForResourceGroupSender sends the ListForResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client AssignmentsClient) ListForResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListForResourceGroupResponder handles the response to the ListForResourceGroup request. The method always +// closes the http.Response Body. +func (client AssignmentsClient) ListForResourceGroupResponder(resp *http.Response) (result AssignmentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListForResourceGroupNextResults retrieves the next set of results, if any. +func (client AssignmentsClient) ListForResourceGroupNextResults(lastResults AssignmentListResult) (result AssignmentListResult, err error) { + req, err := lastResults.AssignmentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResourceGroup", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListForResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResourceGroup", resp, "Failure sending next results request request") + } + + result, err = client.ListForResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.AssignmentsClient", "ListForResourceGroup", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,57 @@ +// Package policy implements the Azure ARM Policy service API version +// 2015-10-01-preview. +// +package policy + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Policy + APIVersion = "2015-10-01-preview" + + // DefaultBaseURI is the default URI used for the service Policy + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Policy. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/definitions.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/definitions.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/definitions.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/definitions.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,314 @@ +package policy + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// DefinitionsClient is the client for the Definitions methods of the Policy +// service. +type DefinitionsClient struct { + ManagementClient +} + +// NewDefinitionsClient creates an instance of the DefinitionsClient client. +func NewDefinitionsClient(subscriptionID string) DefinitionsClient { + return NewDefinitionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDefinitionsClientWithBaseURI creates an instance of the +// DefinitionsClient client. +func NewDefinitionsClientWithBaseURI(baseURI string, subscriptionID string) DefinitionsClient { + return DefinitionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a policy definition. +// +// policyDefinitionName is the policy definition name. parameters is the +// policy definition properties. +func (client DefinitionsClient) CreateOrUpdate(policyDefinitionName string, parameters Definition) (result Definition, err error) { + req, err := client.CreateOrUpdatePreparer(policyDefinitionName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DefinitionsClient) CreateOrUpdatePreparer(policyDefinitionName string, parameters Definition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyDefinitionName": autorest.Encode("path", policyDefinitionName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DefinitionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DefinitionsClient) CreateOrUpdateResponder(resp *http.Response) (result Definition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the policy definition. +// +// policyDefinitionName is the policy definition name. +func (client DefinitionsClient) Delete(policyDefinitionName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(policyDefinitionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DefinitionsClient) DeletePreparer(policyDefinitionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyDefinitionName": autorest.Encode("path", policyDefinitionName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DefinitionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DefinitionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the policy definition. +// +// policyDefinitionName is the policy definition name. +func (client DefinitionsClient) Get(policyDefinitionName string) (result Definition, err error) { + req, err := client.GetPreparer(policyDefinitionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DefinitionsClient) GetPreparer(policyDefinitionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "policyDefinitionName": autorest.Encode("path", policyDefinitionName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DefinitionsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DefinitionsClient) GetResponder(resp *http.Response) (result Definition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the policy definitions of a subscription. +// +// filter is the filter to apply on the operation. +func (client DefinitionsClient) List(filter string) (result DefinitionListResult, err error) { + req, err := client.ListPreparer(filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DefinitionsClient) ListPreparer(filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DefinitionsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DefinitionsClient) ListResponder(resp *http.Response) (result DefinitionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DefinitionsClient) ListNextResults(lastResults DefinitionListResult) (result DefinitionListResult, err error) { + req, err := lastResults.DefinitionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "policy.DefinitionsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "policy.DefinitionsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,107 @@ +package policy + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// Type enumerates the values for type. +type Type string + +const ( + // BuiltIn specifies the built in state for type. + BuiltIn Type = "BuiltIn" + // Custom specifies the custom state for type. + Custom Type = "Custom" + // NotSpecified specifies the not specified state for type. + NotSpecified Type = "NotSpecified" +) + +// Assignment is the policy definition. +type Assignment struct { + autorest.Response `json:"-"` + Properties *AssignmentProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Type *string `json:"type,omitempty"` + Name *string `json:"name,omitempty"` +} + +// AssignmentListResult is list of policy assignments. +type AssignmentListResult struct { + autorest.Response `json:"-"` + Value *[]Assignment `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// AssignmentListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client AssignmentListResult) AssignmentListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// AssignmentProperties is the policy assignment properties. +type AssignmentProperties struct { + DisplayName *string `json:"displayName,omitempty"` + PolicyDefinitionID *string `json:"policyDefinitionId,omitempty"` + Scope *string `json:"scope,omitempty"` +} + +// Definition is the policy definition. +type Definition struct { + autorest.Response `json:"-"` + Properties *DefinitionProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` +} + +// DefinitionListResult is list of policy definitions. +type DefinitionListResult struct { + autorest.Response `json:"-"` + Value *[]Definition `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// DefinitionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client DefinitionListResult) DefinitionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// DefinitionProperties is the policy definition properties. +type DefinitionProperties struct { + PolicyType Type `json:"policyType,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + Description *string `json:"description,omitempty"` + PolicyRule *map[string]interface{} `json:"policyRule,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/policy/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package policy + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "policy", "2015-10-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/provideroperationdetails.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/provideroperationdetails.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/provideroperationdetails.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/provideroperationdetails.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,105 +0,0 @@ -package resources - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// ProviderOperationDetailsClient is the client for the -// ProviderOperationDetails methods of the Resources service. -type ProviderOperationDetailsClient struct { - ManagementClient -} - -// NewProviderOperationDetailsClient creates an instance of the -// ProviderOperationDetailsClient client. -func NewProviderOperationDetailsClient(subscriptionID string) ProviderOperationDetailsClient { - return NewProviderOperationDetailsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewProviderOperationDetailsClientWithBaseURI creates an instance of the -// ProviderOperationDetailsClient client. -func NewProviderOperationDetailsClientWithBaseURI(baseURI string, subscriptionID string) ProviderOperationDetailsClient { - return ProviderOperationDetailsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List gets a list of resource providers. -// -// resourceProviderNamespace is resource identity. -func (client ProviderOperationDetailsClient) List(resourceProviderNamespace string, apiVersion string) (result ResourceProviderOperationDetailListResult, ae error) { - req, err := client.ListPreparer(resourceProviderNamespace, apiVersion) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client ProviderOperationDetailsClient) ListPreparer(resourceProviderNamespace string, apiVersion string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/{resourceProviderNamespace}/operations"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client ProviderOperationDetailsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client ProviderOperationDetailsClient) ListResponder(resp *http.Response) (result ResourceProviderOperationDetailListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/provideroperationsmetadataoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/provideroperationsmetadataoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/provideroperationsmetadataoperations.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/provideroperationsmetadataoperations.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,171 +0,0 @@ -package resources - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// ProviderOperationsMetadataOperationsClient is the client for the -// ProviderOperationsMetadataOperations methods of the Resources service. -type ProviderOperationsMetadataOperationsClient struct { - ManagementClient -} - -// NewProviderOperationsMetadataOperationsClient creates an instance of the -// ProviderOperationsMetadataOperationsClient client. -func NewProviderOperationsMetadataOperationsClient(subscriptionID string) ProviderOperationsMetadataOperationsClient { - return NewProviderOperationsMetadataOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewProviderOperationsMetadataOperationsClientWithBaseURI creates an -// instance of the ProviderOperationsMetadataOperationsClient client. -func NewProviderOperationsMetadataOperationsClientWithBaseURI(baseURI string, subscriptionID string) ProviderOperationsMetadataOperationsClient { - return ProviderOperationsMetadataOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get gets provider operations metadata -// -// resourceProviderNamespace is namespace of the resource provider. -func (client ProviderOperationsMetadataOperationsClient) Get(resourceProviderNamespace string, apiVersion string, expand string) (result ProviderOperationsMetadata, ae error) { - req, err := client.GetPreparer(resourceProviderNamespace, apiVersion, expand) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ProviderOperationsMetadataOperationsClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ProviderOperationsMetadataOperationsClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ProviderOperationsMetadataOperationsClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client ProviderOperationsMetadataOperationsClient) GetPreparer(resourceProviderNamespace string, apiVersion string, expand string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(expand) > 0 { - queryParameters["$expand"] = expand - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.Authorization/providerOperations/{resourceProviderNamespace}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ProviderOperationsMetadataOperationsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ProviderOperationsMetadataOperationsClient) GetResponder(resp *http.Response) (result ProviderOperationsMetadata, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets provider operations metadata list -// -func (client ProviderOperationsMetadataOperationsClient) List(apiVersion string, expand string) (result ProviderOperationsMetadataListResult, ae error) { - req, err := client.ListPreparer(apiVersion, expand) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ProviderOperationsMetadataOperationsClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ProviderOperationsMetadataOperationsClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ProviderOperationsMetadataOperationsClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client ProviderOperationsMetadataOperationsClient) ListPreparer(apiVersion string, expand string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(expand) > 0 { - queryParameters["$expand"] = expand - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.Authorization/providerOperations"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client ProviderOperationsMetadataOperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client ProviderOperationsMetadataOperationsClient) ListResponder(resp *http.Response) (result ProviderOperationsMetadataListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/providers.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/providers.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/providers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/providers.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,316 +0,0 @@ -package resources - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// ProvidersClient is the client for the Providers methods of the Resources -// service. -type ProvidersClient struct { - ManagementClient -} - -// NewProvidersClient creates an instance of the ProvidersClient client. -func NewProvidersClient(subscriptionID string) ProvidersClient { - return NewProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewProvidersClientWithBaseURI creates an instance of the ProvidersClient -// client. -func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient { - return ProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get gets a resource provider. -// -// resourceProviderNamespace is namespace of the resource provider. -func (client ProvidersClient) Get(resourceProviderNamespace string) (result Provider, ae error) { - req, err := client.GetPreparer(resourceProviderNamespace) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client ProvidersClient) GetPreparer(resourceProviderNamespace string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ProvidersClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ProvidersClient) GetResponder(resp *http.Response) (result Provider, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of resource providers. -// -// top is query parameters. If null is passed returns all deployments. -func (client ProvidersClient) List(top *int) (result ProviderListResult, ae error) { - req, err := client.ListPreparer(top) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client ProvidersClient) ListPreparer(top *int) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if top != nil { - queryParameters["$top"] = top - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client ProvidersClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client ProvidersClient) ListResponder(resp *http.Response) (result ProviderListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client ProvidersClient) ListNextResults(lastResults ProviderListResult) (result ProviderListResult, ae error) { - req, err := lastResults.ProviderListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure responding to next results request request") - } - - return -} - -// Register registers provider to be used with a subscription. -// -// resourceProviderNamespace is namespace of the resource provider. -func (client ProvidersClient) Register(resourceProviderNamespace string) (result Provider, ae error) { - req, err := client.RegisterPreparer(resourceProviderNamespace) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Register", "Failure preparing request") - } - - resp, err := client.RegisterSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Register", "Failure sending request") - } - - result, err = client.RegisterResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "Register", "Failure responding to request") - } - - return -} - -// RegisterPreparer prepares the Register request. -func (client ProvidersClient) RegisterPreparer(resourceProviderNamespace string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// RegisterSender sends the Register request. The method will close the -// http.Response Body if it receives an error. -func (client ProvidersClient) RegisterSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// RegisterResponder handles the response to the Register request. The method always -// closes the http.Response Body. -func (client ProvidersClient) RegisterResponder(resp *http.Response) (result Provider, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Unregister unregisters provider from a subscription. -// -// resourceProviderNamespace is namespace of the resource provider. -func (client ProvidersClient) Unregister(resourceProviderNamespace string) (result Provider, ae error) { - req, err := client.UnregisterPreparer(resourceProviderNamespace) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Unregister", "Failure preparing request") - } - - resp, err := client.UnregisterSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Unregister", "Failure sending request") - } - - result, err = client.UnregisterResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "Unregister", "Failure responding to request") - } - - return -} - -// UnregisterPreparer prepares the Unregister request. -func (client ProvidersClient) UnregisterPreparer(resourceProviderNamespace string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// UnregisterSender sends the Unregister request. The method will close the -// http.Response Body if it receives an error. -func (client ProvidersClient) UnregisterSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// UnregisterResponder handles the response to the Unregister request. The method always -// closes the http.Response Body. -func (client ProvidersClient) UnregisterResponder(resp *http.Response) (result Provider, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,492 @@ +// Package resources implements the Azure ARM Resources service API version +// 2016-02-01. +// +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +const ( + // APIVersion is the version of the Resources + APIVersion = "2016-02-01" + + // DefaultBaseURI is the default URI used for the service Resources + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Resources. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} + +// CheckExistence checks whether resource exists. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client ManagementClient) CheckExistence(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result autorest.Response, err error) { + req, err := client.CheckExistencePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "CheckExistence", nil, "Failure preparing request") + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "CheckExistence", resp, "Failure sending request") + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ManagementClient", "CheckExistence", resp, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client ManagementClient) CheckExistencePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client ManagementClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a resource. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. parameters is create or +// update resource parameters. +func (client ManagementClient) CreateOrUpdate(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (result GenericResource, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ManagementClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ManagementClient) CreateOrUpdatePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ManagementClient) CreateOrUpdateResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete resource and all of its resources. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client ManagementClient) Delete(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ManagementClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ManagementClient) DeletePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ManagementClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns a resource belonging to a resource group. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client ManagementClient) Get(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result GenericResource, err error) { + req, err := client.GetPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ManagementClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementClient) GetPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get all of the resources under a subscription. +// +// filter is the filter to apply on the operation. top is query parameters. If +// null is passed returns all resource groups. +func (client ManagementClient) List(filter string, top *int32) (result ResourceListResult, err error) { + req, err := client.ListPreparer(filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ManagementClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ManagementClient) ListPreparer(filter string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resources", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListResponder(resp *http.Response) (result ResourceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ManagementClient) ListNextResults(lastResults ResourceListResult) (result ResourceListResult, err error) { + req, err := lastResults.ResourceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ManagementClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// MoveResources move resources from one resource group to another. The +// resources being moved should all be in the same resource group. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// sourceResourceGroupName is source resource group name. parameters is move +// resources' parameters. +func (client ManagementClient) MoveResources(sourceResourceGroupName string, parameters MoveInfo, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.MoveResourcesPreparer(sourceResourceGroupName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "MoveResources", nil, "Failure preparing request") + } + + resp, err := client.MoveResourcesSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "MoveResources", resp, "Failure sending request") + } + + result, err = client.MoveResourcesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ManagementClient", "MoveResources", resp, "Failure responding to request") + } + + return +} + +// MoveResourcesPreparer prepares the MoveResources request. +func (client ManagementClient) MoveResourcesPreparer(sourceResourceGroupName string, parameters MoveInfo, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "sourceResourceGroupName": autorest.Encode("path", sourceResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// MoveResourcesSender sends the MoveResources request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) MoveResourcesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// MoveResourcesResponder handles the response to the MoveResources request. The method always +// closes the http.Response Body. +func (client ManagementClient) MoveResourcesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,199 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// DeploymentOperationsClient is the client for the DeploymentOperations +// methods of the Resources service. +type DeploymentOperationsClient struct { + ManagementClient +} + +// NewDeploymentOperationsClient creates an instance of the +// DeploymentOperationsClient client. +func NewDeploymentOperationsClient(subscriptionID string) DeploymentOperationsClient { + return NewDeploymentOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentOperationsClientWithBaseURI creates an instance of the +// DeploymentOperationsClient client. +func NewDeploymentOperationsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentOperationsClient { + return DeploymentOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get a list of deployments operations. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. operationID is +// operation Id. +func (client DeploymentOperationsClient) Get(resourceGroupName string, deploymentName string, operationID string) (result DeploymentOperation, err error) { + req, err := client.GetPreparer(resourceGroupName, deploymentName, operationID) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DeploymentOperationsClient) GetPreparer(resourceGroupName string, deploymentName string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) GetResponder(resp *http.Response) (result DeploymentOperation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of deployments operations. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. top is query +// parameters. +func (client DeploymentOperationsClient) List(resourceGroupName string, deploymentName string, top *int32) (result DeploymentOperationsListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, deploymentName, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DeploymentOperationsClient) ListPreparer(resourceGroupName string, deploymentName string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) ListResponder(resp *http.Response) (result DeploymentOperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DeploymentOperationsClient) ListNextResults(lastResults DeploymentOperationsListResult) (result DeploymentOperationsListResult, err error) { + req, err := lastResults.DeploymentOperationsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,587 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// DeploymentsClient is the client for the Deployments methods of the +// Resources service. +type DeploymentsClient struct { + ManagementClient +} + +// NewDeploymentsClient creates an instance of the DeploymentsClient client. +func NewDeploymentsClient(subscriptionID string) DeploymentsClient { + return NewDeploymentsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentsClientWithBaseURI creates an instance of the +// DeploymentsClient client. +func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentsClient { + return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Cancel cancel a currently running template deployment. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. +func (client DeploymentsClient) Cancel(resourceGroupName string, deploymentName string) (result autorest.Response, err error) { + req, err := client.CancelPreparer(resourceGroupName, deploymentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Cancel", nil, "Failure preparing request") + } + + resp, err := client.CancelSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Cancel", resp, "Failure sending request") + } + + result, err = client.CancelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Cancel", resp, "Failure responding to request") + } + + return +} + +// CancelPreparer prepares the Cancel request. +func (client DeploymentsClient) CancelPreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CancelSender sends the Cancel request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CancelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CancelResponder handles the response to the Cancel request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CancelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// CheckExistence checks whether deployment exists. +// +// resourceGroupName is the name of the resource group to check. The name is +// case insensitive. deploymentName is the name of the deployment. +func (client DeploymentsClient) CheckExistence(resourceGroupName string, deploymentName string) (result autorest.Response, err error) { + req, err := client.CheckExistencePreparer(resourceGroupName, deploymentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistence", nil, "Failure preparing request") + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistence", resp, "Failure sending request") + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistence", resp, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client DeploymentsClient) CheckExistencePreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a named template deployment using a template. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. parameters is +// additional parameters supplied to the operation. +func (client DeploymentsClient) CreateOrUpdate(resourceGroupName string, deploymentName string, parameters Deployment, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, deploymentName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DeploymentsClient) CreateOrUpdatePreparer(resourceGroupName string, deploymentName string, parameters Deployment, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete delete deployment. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be +// used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment to be deleted. +func (client DeploymentsClient) Delete(resourceGroupName string, deploymentName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, deploymentName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DeploymentsClient) DeletePreparer(resourceGroupName string, deploymentName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// ExportTemplate exports a deployment template. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. +func (client DeploymentsClient) ExportTemplate(resourceGroupName string, deploymentName string) (result DeploymentExportResult, err error) { + req, err := client.ExportTemplatePreparer(resourceGroupName, deploymentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplate", nil, "Failure preparing request") + } + + resp, err := client.ExportTemplateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplate", resp, "Failure sending request") + } + + result, err = client.ExportTemplateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplate", resp, "Failure responding to request") + } + + return +} + +// ExportTemplatePreparer prepares the ExportTemplate request. +func (client DeploymentsClient) ExportTemplatePreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ExportTemplateSender sends the ExportTemplate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ExportTemplateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ExportTemplateResponder handles the response to the ExportTemplate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ExportTemplateResponder(resp *http.Response) (result DeploymentExportResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get a deployment. +// +// resourceGroupName is the name of the resource group to get. The name is +// case insensitive. deploymentName is the name of the deployment. +func (client DeploymentsClient) Get(resourceGroupName string, deploymentName string) (result DeploymentExtended, err error) { + req, err := client.GetPreparer(resourceGroupName, deploymentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DeploymentsClient) GetPreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) GetResponder(resp *http.Response) (result DeploymentExtended, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get a list of deployments. +// +// resourceGroupName is the name of the resource group to filter by. The name +// is case insensitive. filter is the filter to apply on the operation. top +// is query parameters. If null is passed returns all deployments. +func (client DeploymentsClient) List(resourceGroupName string, filter string, top *int32) (result DeploymentListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DeploymentsClient) ListPreparer(resourceGroupName string, filter string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ListResponder(resp *http.Response) (result DeploymentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DeploymentsClient) ListNextResults(lastResults DeploymentListResult) (result DeploymentListResult, err error) { + req, err := lastResults.DeploymentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// Validate validate a deployment template. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. parameters is +// deployment to validate. +func (client DeploymentsClient) Validate(resourceGroupName string, deploymentName string, parameters Deployment) (result DeploymentValidateResult, err error) { + req, err := client.ValidatePreparer(resourceGroupName, deploymentName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Validate", nil, "Failure preparing request") + } + + resp, err := client.ValidateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Validate", resp, "Failure sending request") + } + + result, err = client.ValidateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Validate", resp, "Failure responding to request") + } + + return +} + +// ValidatePreparer prepares the Validate request. +func (client DeploymentsClient) ValidatePreparer(resourceGroupName string, deploymentName string, parameters Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ValidateSender sends the Validate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ValidateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ValidateResponder handles the response to the Validate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ValidateResponder(resp *http.Response) (result DeploymentValidateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,610 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// GroupsClient is the client for the Groups methods of the Resources service. +type GroupsClient struct { + ManagementClient +} + +// NewGroupsClient creates an instance of the GroupsClient client. +func NewGroupsClient(subscriptionID string) GroupsClient { + return NewGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client. +func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient { + return GroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckExistence checks whether resource group exists. +// +// resourceGroupName is the name of the resource group to check. The name is +// case insensitive. +func (client GroupsClient) CheckExistence(resourceGroupName string) (result autorest.Response, err error) { + req, err := client.CheckExistencePreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", nil, "Failure preparing request") + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", resp, "Failure sending request") + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", resp, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client GroupsClient) CheckExistencePreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client GroupsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a resource group. +// +// resourceGroupName is the name of the resource group to be created or +// updated. parameters is parameters supplied to the create or update +// resource group service operation. +func (client GroupsClient) CreateOrUpdate(resourceGroupName string, parameters ResourceGroup) (result ResourceGroup, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GroupsClient) CreateOrUpdatePreparer(resourceGroupName string, parameters ResourceGroup) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GroupsClient) CreateOrUpdateResponder(resp *http.Response) (result ResourceGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete resource group. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group to be deleted. The name +// is case insensitive. +func (client GroupsClient) Delete(resourceGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GroupsClient) DeletePreparer(resourceGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// ExportTemplate captures the specified resource group as a template. +// +// resourceGroupName is the name of the resource group to be created or +// updated. parameters is parameters supplied to the export template resource +// group operation. +func (client GroupsClient) ExportTemplate(resourceGroupName string, parameters ExportTemplateRequest) (result ResourceGroupExportResult, err error) { + req, err := client.ExportTemplatePreparer(resourceGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", nil, "Failure preparing request") + } + + resp, err := client.ExportTemplateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", resp, "Failure sending request") + } + + result, err = client.ExportTemplateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", resp, "Failure responding to request") + } + + return +} + +// ExportTemplatePreparer prepares the ExportTemplate request. +func (client GroupsClient) ExportTemplatePreparer(resourceGroupName string, parameters ExportTemplateRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ExportTemplateSender sends the ExportTemplate request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ExportTemplateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ExportTemplateResponder handles the response to the ExportTemplate request. The method always +// closes the http.Response Body. +func (client GroupsClient) ExportTemplateResponder(resp *http.Response) (result ResourceGroupExportResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get a resource group. +// +// resourceGroupName is the name of the resource group to get. The name is +// case insensitive. +func (client GroupsClient) Get(resourceGroupName string) (result ResourceGroup, err error) { + req, err := client.GetPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GroupsClient) GetPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GroupsClient) GetResponder(resp *http.Response) (result ResourceGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a collection of resource groups. +// +// filter is the filter to apply on the operation. top is query parameters. If +// null is passed returns all resource groups. +func (client GroupsClient) List(filter string, top *int32) (result ResourceGroupListResult, err error) { + req, err := client.ListPreparer(filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client GroupsClient) ListPreparer(filter string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client GroupsClient) ListResponder(resp *http.Response) (result ResourceGroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client GroupsClient) ListNextResults(lastResults ResourceGroupListResult) (result ResourceGroupListResult, err error) { + req, err := lastResults.ResourceGroupListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListResources get all of the resources under a subscription. +// +// resourceGroupName is query parameters. If null is passed returns all +// resource groups. filter is the filter to apply on the operation. top is +// query parameters. If null is passed returns all resource groups. +func (client GroupsClient) ListResources(resourceGroupName string, filter string, top *int32) (result ResourceListResult, err error) { + req, err := client.ListResourcesPreparer(resourceGroupName, filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "ListResources", nil, "Failure preparing request") + } + + resp, err := client.ListResourcesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "ListResources", resp, "Failure sending request") + } + + result, err = client.ListResourcesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ListResources", resp, "Failure responding to request") + } + + return +} + +// ListResourcesPreparer prepares the ListResources request. +func (client GroupsClient) ListResourcesPreparer(resourceGroupName string, filter string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListResourcesSender sends the ListResources request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ListResourcesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResourcesResponder handles the response to the ListResources request. The method always +// closes the http.Response Body. +func (client GroupsClient) ListResourcesResponder(resp *http.Response) (result ResourceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListResourcesNextResults retrieves the next set of results, if any. +func (client GroupsClient) ListResourcesNextResults(lastResults ResourceListResult) (result ResourceListResult, err error) { + req, err := lastResults.ResourceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "ListResources", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListResourcesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "ListResources", resp, "Failure sending next results request request") + } + + result, err = client.ListResourcesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ListResources", resp, "Failure responding to next results request request") + } + + return +} + +// Patch resource groups can be updated through a simple PATCH operation to a +// group address. The format of the request is the same as that for creating +// a resource groups, though if a field is unspecified current value will be +// carried over. +// +// resourceGroupName is the name of the resource group to be created or +// updated. The name is case insensitive. parameters is parameters supplied +// to the update state resource group service operation. +func (client GroupsClient) Patch(resourceGroupName string, parameters ResourceGroup) (result ResourceGroup, err error) { + req, err := client.PatchPreparer(resourceGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "Patch", nil, "Failure preparing request") + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "Patch", resp, "Failure sending request") + } + + result, err = client.PatchResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Patch", resp, "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client GroupsClient) PatchPreparer(resourceGroupName string, parameters ResourceGroup) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) PatchSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client GroupsClient) PatchResponder(resp *http.Response) (result ResourceGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,444 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// DeploymentMode enumerates the values for deployment mode. +type DeploymentMode string + +const ( + // Complete specifies the complete state for deployment mode. + Complete DeploymentMode = "Complete" + // Incremental specifies the incremental state for deployment mode. + Incremental DeploymentMode = "Incremental" +) + +// ResourceIdentityType enumerates the values for resource identity type. +type ResourceIdentityType string + +const ( + // SystemAssigned specifies the system assigned state for resource + // identity type. + SystemAssigned ResourceIdentityType = "SystemAssigned" +) + +// BasicDependency is deployment dependency information. +type BasicDependency struct { + ID *string `json:"id,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + ResourceName *string `json:"resourceName,omitempty"` +} + +// DebugSetting is +type DebugSetting struct { + DetailLevel *string `json:"detailLevel,omitempty"` +} + +// Dependency is deployment dependency information. +type Dependency struct { + DependsOn *[]BasicDependency `json:"dependsOn,omitempty"` + ID *string `json:"id,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + ResourceName *string `json:"resourceName,omitempty"` +} + +// Deployment is deployment operation parameters. +type Deployment struct { + Properties *DeploymentProperties `json:"properties,omitempty"` +} + +// DeploymentExportResult is +type DeploymentExportResult struct { + autorest.Response `json:"-"` + Template *map[string]interface{} `json:"template,omitempty"` +} + +// DeploymentExtended is deployment information. +type DeploymentExtended struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` +} + +// DeploymentExtendedFilter is deployment filter. +type DeploymentExtendedFilter struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// DeploymentListResult is list of deployments. +type DeploymentListResult struct { + autorest.Response `json:"-"` + Value *[]DeploymentExtended `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// DeploymentListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client DeploymentListResult) DeploymentListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// DeploymentOperation is deployment operation information. +type DeploymentOperation struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + OperationID *string `json:"operationId,omitempty"` + Properties *DeploymentOperationProperties `json:"properties,omitempty"` +} + +// DeploymentOperationProperties is deployment operation properties. +type DeploymentOperationProperties struct { + ProvisioningState *string `json:"provisioningState,omitempty"` + Timestamp *date.Time `json:"timestamp,omitempty"` + ServiceRequestID *string `json:"serviceRequestId,omitempty"` + StatusCode *string `json:"statusCode,omitempty"` + StatusMessage *map[string]interface{} `json:"statusMessage,omitempty"` + TargetResource *TargetResource `json:"targetResource,omitempty"` + Request *HTTPMessage `json:"request,omitempty"` + Response *HTTPMessage `json:"response,omitempty"` +} + +// DeploymentOperationsListResult is list of deployment operations. +type DeploymentOperationsListResult struct { + autorest.Response `json:"-"` + Value *[]DeploymentOperation `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// DeploymentOperationsListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client DeploymentOperationsListResult) DeploymentOperationsListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// DeploymentProperties is deployment properties. +type DeploymentProperties struct { + Template *map[string]interface{} `json:"template,omitempty"` + TemplateLink *TemplateLink `json:"templateLink,omitempty"` + Parameters *map[string]interface{} `json:"parameters,omitempty"` + ParametersLink *ParametersLink `json:"parametersLink,omitempty"` + Mode DeploymentMode `json:"mode,omitempty"` + DebugSetting *DebugSetting `json:"debugSetting,omitempty"` +} + +// DeploymentPropertiesExtended is deployment properties with additional +// details. +type DeploymentPropertiesExtended struct { + ProvisioningState *string `json:"provisioningState,omitempty"` + CorrelationID *string `json:"correlationId,omitempty"` + Timestamp *date.Time `json:"timestamp,omitempty"` + Outputs *map[string]interface{} `json:"outputs,omitempty"` + Providers *[]Provider `json:"providers,omitempty"` + Dependencies *[]Dependency `json:"dependencies,omitempty"` + Template *map[string]interface{} `json:"template,omitempty"` + TemplateLink *TemplateLink `json:"templateLink,omitempty"` + Parameters *map[string]interface{} `json:"parameters,omitempty"` + ParametersLink *ParametersLink `json:"parametersLink,omitempty"` + Mode DeploymentMode `json:"mode,omitempty"` + DebugSetting *DebugSetting `json:"debugSetting,omitempty"` +} + +// DeploymentValidateResult is information from validate template deployment +// response. +type DeploymentValidateResult struct { + autorest.Response `json:"-"` + Error *ResourceManagementErrorWithDetails `json:"error,omitempty"` + Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` +} + +// ExportTemplateRequest is export resource group template request parameters. +type ExportTemplateRequest struct { + Resources *[]string `json:"resources,omitempty"` + Options *string `json:"options,omitempty"` +} + +// GenericResource is resource information. +type GenericResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Properties *map[string]interface{} `json:"properties,omitempty"` + Kind *string `json:"kind,omitempty"` + ManagedBy *string `json:"managedBy,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Identity *Identity `json:"identity,omitempty"` +} + +// GenericResourceFilter is resource filter. +type GenericResourceFilter struct { + ResourceType *string `json:"resourceType,omitempty"` + Tagname *string `json:"tagname,omitempty"` + Tagvalue *string `json:"tagvalue,omitempty"` + Expand *string `json:"expand,omitempty"` +} + +// HTTPMessage is +type HTTPMessage struct { + Content *map[string]interface{} `json:"content,omitempty"` +} + +// Identity is identity for the resource. +type Identity struct { + PrincipalID *string `json:"principalId,omitempty"` + TenantID *string `json:"tenantId,omitempty"` + Type ResourceIdentityType `json:"type,omitempty"` +} + +// MoveInfo is parameters of move resources. +type MoveInfo struct { + Resources *[]string `json:"resources,omitempty"` + TargetResourceGroup *string `json:"targetResourceGroup,omitempty"` +} + +// ParametersLink is entity representing the reference to the deployment +// paramaters. +type ParametersLink struct { + URI *string `json:"uri,omitempty"` + ContentVersion *string `json:"contentVersion,omitempty"` +} + +// Plan is plan for the resource. +type Plan struct { + Name *string `json:"name,omitempty"` + Publisher *string `json:"publisher,omitempty"` + Product *string `json:"product,omitempty"` + PromotionCode *string `json:"promotionCode,omitempty"` +} + +// Provider is resource provider information. +type Provider struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Namespace *string `json:"namespace,omitempty"` + RegistrationState *string `json:"registrationState,omitempty"` + ResourceTypes *[]ProviderResourceType `json:"resourceTypes,omitempty"` +} + +// ProviderListResult is list of resource providers. +type ProviderListResult struct { + autorest.Response `json:"-"` + Value *[]Provider `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ProviderListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ProviderListResult) ProviderListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ProviderResourceType is resource type managed by the resource provider. +type ProviderResourceType struct { + ResourceType *string `json:"resourceType,omitempty"` + Locations *[]string `json:"locations,omitempty"` + APIVersions *[]string `json:"apiVersions,omitempty"` + Properties *map[string]*string `json:"properties,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceGroup is resource group information. +type ResourceGroup struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ResourceGroupProperties `json:"properties,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceGroupExportResult is +type ResourceGroupExportResult struct { + autorest.Response `json:"-"` + Template *map[string]interface{} `json:"template,omitempty"` + Error *ResourceManagementErrorWithDetails `json:"error,omitempty"` +} + +// ResourceGroupFilter is resource group filter. +type ResourceGroupFilter struct { + TagName *string `json:"tagName,omitempty"` + TagValue *string `json:"tagValue,omitempty"` +} + +// ResourceGroupListResult is list of resource groups. +type ResourceGroupListResult struct { + autorest.Response `json:"-"` + Value *[]ResourceGroup `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceGroupListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResourceGroupListResult) ResourceGroupListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResourceGroupProperties is the resource group properties. +type ResourceGroupProperties struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ResourceListResult is list of resource groups. +type ResourceListResult struct { + autorest.Response `json:"-"` + Value *[]GenericResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResourceListResult) ResourceListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResourceManagementErrorWithDetails is +type ResourceManagementErrorWithDetails struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` + Details *[]ResourceManagementErrorWithDetails `json:"details,omitempty"` +} + +// ResourceProviderOperationDisplayProperties is resource provider operation's +// display properties. +type ResourceProviderOperationDisplayProperties struct { + Publisher *string `json:"publisher,omitempty"` + Provider *string `json:"provider,omitempty"` + Resource *string `json:"resource,omitempty"` + Operation *string `json:"operation,omitempty"` + Description *string `json:"description,omitempty"` +} + +// Sku is sku for the resource. +type Sku struct { + Name *string `json:"name,omitempty"` + Tier *string `json:"tier,omitempty"` + Size *string `json:"size,omitempty"` + Family *string `json:"family,omitempty"` + Model *string `json:"model,omitempty"` + Capacity *int32 `json:"capacity,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// TagCount is tag count. +type TagCount struct { + Type *string `json:"type,omitempty"` + Value *string `json:"value,omitempty"` +} + +// TagDetails is tag details. +type TagDetails struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + TagName *string `json:"tagName,omitempty"` + Count *TagCount `json:"count,omitempty"` + Values *[]TagValue `json:"values,omitempty"` +} + +// TagsListResult is list of subscription tags. +type TagsListResult struct { + autorest.Response `json:"-"` + Value *[]TagDetails `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// TagsListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client TagsListResult) TagsListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// TagValue is tag information. +type TagValue struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + TagValueProperty *string `json:"tagValue,omitempty"` + Count *TagCount `json:"count,omitempty"` +} + +// TargetResource is target resource. +type TargetResource struct { + ID *string `json:"id,omitempty"` + ResourceName *string `json:"resourceName,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` +} + +// TemplateLink is entity representing the reference to the template. +type TemplateLink struct { + URI *string `json:"uri,omitempty"` + ContentVersion *string `json:"contentVersion,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,312 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ProvidersClient is the client for the Providers methods of the Resources +// service. +type ProvidersClient struct { + ManagementClient +} + +// NewProvidersClient creates an instance of the ProvidersClient client. +func NewProvidersClient(subscriptionID string) ProvidersClient { + return NewProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProvidersClientWithBaseURI creates an instance of the ProvidersClient +// client. +func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient { + return ProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a resource provider. +// +// resourceProviderNamespace is namespace of the resource provider. +func (client ProvidersClient) Get(resourceProviderNamespace string) (result Provider, err error) { + req, err := client.GetPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ProvidersClient) GetPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ProvidersClient) GetResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of resource providers. +// +// top is query parameters. If null is passed returns all deployments. +func (client ProvidersClient) List(top *int32) (result ProviderListResult, err error) { + req, err := client.ListPreparer(top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ProvidersClient) ListPreparer(top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ProvidersClient) ListResponder(resp *http.Response) (result ProviderListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ProvidersClient) ListNextResults(lastResults ProviderListResult) (result ProviderListResult, err error) { + req, err := lastResults.ProviderListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// Register registers provider to be used with a subscription. +// +// resourceProviderNamespace is namespace of the resource provider. +func (client ProvidersClient) Register(resourceProviderNamespace string) (result Provider, err error) { + req, err := client.RegisterPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", nil, "Failure preparing request") + } + + resp, err := client.RegisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", resp, "Failure sending request") + } + + result, err = client.RegisterResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", resp, "Failure responding to request") + } + + return +} + +// RegisterPreparer prepares the Register request. +func (client ProvidersClient) RegisterPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegisterSender sends the Register request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) RegisterSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegisterResponder handles the response to the Register request. The method always +// closes the http.Response Body. +func (client ProvidersClient) RegisterResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Unregister unregisters provider from a subscription. +// +// resourceProviderNamespace is namespace of the resource provider. +func (client ProvidersClient) Unregister(resourceProviderNamespace string) (result Provider, err error) { + req, err := client.UnregisterPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", nil, "Failure preparing request") + } + + resp, err := client.UnregisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", resp, "Failure sending request") + } + + result, err = client.UnregisterResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", resp, "Failure responding to request") + } + + return +} + +// UnregisterPreparer prepares the Unregister request. +func (client ProvidersClient) UnregisterPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UnregisterSender sends the Unregister request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) UnregisterSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UnregisterResponder handles the response to the Unregister request. The method always +// closes the http.Response Body. +func (client ProvidersClient) UnregisterResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,473 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// Client is the client for the Resources methods of the Resources service. +type Client struct { + ManagementClient +} + +// NewClient creates an instance of the Client client. +func NewClient(subscriptionID string) Client { + return NewClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewClientWithBaseURI creates an instance of the Client client. +func NewClientWithBaseURI(baseURI string, subscriptionID string) Client { + return Client{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckExistence checks whether resource exists. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client Client) CheckExistence(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result autorest.Response, err error) { + req, err := client.CheckExistencePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.Client", "CheckExistence", nil, "Failure preparing request") + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.Client", "CheckExistence", resp, "Failure sending request") + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CheckExistence", resp, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client Client) CheckExistencePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client Client) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a resource. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. parameters is create or +// update resource parameters. +func (client Client) CreateOrUpdate(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (result GenericResource, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.Client", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.Client", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client Client) CreateOrUpdatePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client Client) CreateOrUpdateResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete resource and all of its resources. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client Client) Delete(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.Client", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.Client", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client Client) DeletePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client Client) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client Client) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns a resource belonging to a resource group. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client Client) Get(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result GenericResource, err error) { + req, err := client.GetPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.Client", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.Client", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client Client) GetPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client Client) GetResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get all of the resources under a subscription. +// +// filter is the filter to apply on the operation. top is query parameters. If +// null is passed returns all resource groups. +func (client Client) List(filter string, top *int32) (result ResourceListResult, err error) { + req, err := client.ListPreparer(filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.Client", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.Client", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client Client) ListPreparer(filter string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resources", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client Client) ListResponder(resp *http.Response) (result ResourceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client Client) ListNextResults(lastResults ResourceListResult) (result ResourceListResult, err error) { + req, err := lastResults.ResourceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.Client", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.Client", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "List", resp, "Failure responding to next results request request") + } + + return +} + +// MoveResources move resources from one resource group to another. The +// resources being moved should all be in the same resource group. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// sourceResourceGroupName is source resource group name. parameters is move +// resources' parameters. +func (client Client) MoveResources(sourceResourceGroupName string, parameters MoveInfo, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.MoveResourcesPreparer(sourceResourceGroupName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.Client", "MoveResources", nil, "Failure preparing request") + } + + resp, err := client.MoveResourcesSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.Client", "MoveResources", resp, "Failure sending request") + } + + result, err = client.MoveResourcesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "MoveResources", resp, "Failure responding to request") + } + + return +} + +// MoveResourcesPreparer prepares the MoveResources request. +func (client Client) MoveResourcesPreparer(sourceResourceGroupName string, parameters MoveInfo, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "sourceResourceGroupName": autorest.Encode("path", sourceResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// MoveResourcesSender sends the MoveResources request. The method will close the +// http.Response Body if it receives an error. +func (client Client) MoveResourcesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// MoveResourcesResponder handles the response to the MoveResources request. The method always +// closes the http.Response Body. +func (client Client) MoveResourcesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,366 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// TagsClient is the client for the Tags methods of the Resources service. +type TagsClient struct { + ManagementClient +} + +// NewTagsClient creates an instance of the TagsClient client. +func NewTagsClient(subscriptionID string) TagsClient { + return NewTagsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTagsClientWithBaseURI creates an instance of the TagsClient client. +func NewTagsClientWithBaseURI(baseURI string, subscriptionID string) TagsClient { + return TagsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create a subscription resource tag. +// +// tagName is the name of the tag. +func (client TagsClient) CreateOrUpdate(tagName string) (result TagDetails, err error) { + req, err := client.CreateOrUpdatePreparer(tagName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client TagsClient) CreateOrUpdatePreparer(tagName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client TagsClient) CreateOrUpdateResponder(resp *http.Response) (result TagDetails, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateValue create a subscription resource tag value. +// +// tagName is the name of the tag. tagValue is the value of the tag. +func (client TagsClient) CreateOrUpdateValue(tagName string, tagValue string) (result TagValue, err error) { + req, err := client.CreateOrUpdateValuePreparer(tagName, tagValue) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateValueSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateValueResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateValuePreparer prepares the CreateOrUpdateValue request. +func (client TagsClient) CreateOrUpdateValuePreparer(tagName string, tagValue string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + "tagValue": autorest.Encode("path", tagValue), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateValueSender sends the CreateOrUpdateValue request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) CreateOrUpdateValueSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateValueResponder handles the response to the CreateOrUpdateValue request. The method always +// closes the http.Response Body. +func (client TagsClient) CreateOrUpdateValueResponder(resp *http.Response) (result TagValue, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a subscription resource tag. +// +// tagName is the name of the tag. +func (client TagsClient) Delete(tagName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(tagName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client TagsClient) DeletePreparer(tagName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client TagsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteValue delete a subscription resource tag value. +// +// tagName is the name of the tag. tagValue is the value of the tag. +func (client TagsClient) DeleteValue(tagName string, tagValue string) (result autorest.Response, err error) { + req, err := client.DeleteValuePreparer(tagName, tagValue) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", nil, "Failure preparing request") + } + + resp, err := client.DeleteValueSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", resp, "Failure sending request") + } + + result, err = client.DeleteValueResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", resp, "Failure responding to request") + } + + return +} + +// DeleteValuePreparer prepares the DeleteValue request. +func (client TagsClient) DeleteValuePreparer(tagName string, tagValue string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + "tagValue": autorest.Encode("path", tagValue), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteValueSender sends the DeleteValue request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) DeleteValueSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteValueResponder handles the response to the DeleteValue request. The method always +// closes the http.Response Body. +func (client TagsClient) DeleteValueResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// List get a list of subscription resource tags. +func (client TagsClient) List() (result TagsListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client TagsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TagsClient) ListResponder(resp *http.Response) (result TagsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client TagsClient) ListNextResults(lastResults TagsListResult) (result TagsListResult, err error) { + req, err := lastResults.TagsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "resources", "2016-02-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/resources.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,472 +0,0 @@ -package resources - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// Client is the client for the Resources methods of the Resources service. -type Client struct { - ManagementClient -} - -// NewClient creates an instance of the Client client. -func NewClient(subscriptionID string) Client { - return NewClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewClientWithBaseURI creates an instance of the Client client. -func NewClientWithBaseURI(baseURI string, subscriptionID string) Client { - return Client{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CheckExistence checks whether resource exists. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. resourceProviderNamespace is resource identity. -// parentResourcePath is resource identity. resourceType is resource -// identity. resourceName is resource identity. -func (client Client) CheckExistence(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result autorest.Response, ae error) { - req, err := client.CheckExistencePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/Client", "CheckExistence", "Failure preparing request") - } - - resp, err := client.CheckExistenceSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/Client", "CheckExistence", "Failure sending request") - } - - result, err = client.CheckExistenceResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/Client", "CheckExistence", "Failure responding to request") - } - - return -} - -// CheckExistencePreparer prepares the CheckExistence request. -func (client Client) CheckExistencePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "parentResourcePath": parentResourcePath, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "resourceName": url.QueryEscape(resourceName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "resourceType": resourceType, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsHead(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CheckExistenceSender sends the CheckExistence request. The method will close the -// http.Response Body if it receives an error. -func (client Client) CheckExistenceSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusNotFound) -} - -// CheckExistenceResponder handles the response to the CheckExistence request. The method always -// closes the http.Response Body. -func (client Client) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), - autorest.ByClosing()) - result.Response = resp - return -} - -// CreateOrUpdate create a resource. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. resourceProviderNamespace is resource identity. -// parentResourcePath is resource identity. resourceType is resource -// identity. resourceName is resource identity. parameters is create or -// update resource parameters. -func (client Client) CreateOrUpdate(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string, parameters GenericResource) (result GenericResource, ae error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/Client", "CreateOrUpdate", "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/Client", "CreateOrUpdate", "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/Client", "CreateOrUpdate", "Failure responding to request") - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client Client) CreateOrUpdatePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string, parameters GenericResource) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "parentResourcePath": parentResourcePath, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "resourceName": url.QueryEscape(resourceName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "resourceType": resourceType, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client Client) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusCreated, http.StatusOK) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client Client) CreateOrUpdateResponder(resp *http.Response) (result GenericResource, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete delete resource and all of its resources. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. resourceProviderNamespace is resource identity. -// parentResourcePath is resource identity. resourceType is resource -// identity. resourceName is resource identity. -func (client Client) Delete(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/Client", "Delete", "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/Client", "Delete", "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/Client", "Delete", "Failure responding to request") - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client Client) DeletePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "parentResourcePath": parentResourcePath, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "resourceName": url.QueryEscape(resourceName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "resourceType": resourceType, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client Client) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client Client) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get returns a resource belonging to a resource group. -// -// resourceGroupName is the name of the resource group. The name is case -// insensitive. resourceProviderNamespace is resource identity. -// parentResourcePath is resource identity. resourceType is resource -// identity. resourceName is resource identity. -func (client Client) Get(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result GenericResource, ae error) { - req, err := client.GetPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/Client", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/Client", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/Client", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client Client) GetPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "parentResourcePath": parentResourcePath, - "resourceGroupName": url.QueryEscape(resourceGroupName), - "resourceName": url.QueryEscape(resourceName), - "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), - "resourceType": resourceType, - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client Client) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client Client) GetResponder(resp *http.Response) (result GenericResource, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List get all of the resources under a subscription. -// -// filter is the filter to apply on the operation. top is query parameters. If -// null is passed returns all resource groups. -func (client Client) List(filter string, top *int) (result ResourceListResult, ae error) { - req, err := client.ListPreparer(filter, top) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/Client", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/Client", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/Client", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client Client) ListPreparer(filter string, top *int) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = filter - } - if top != nil { - queryParameters["$top"] = top - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resources"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client Client) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client Client) ListResponder(resp *http.Response) (result ResourceListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client Client) ListNextResults(lastResults ResourceListResult) (result ResourceListResult, ae error) { - req, err := lastResults.ResourceListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/Client", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/Client", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/Client", "List", "Failure responding to next results request request") - } - - return -} - -// MoveResources begin moving resources.To determine whether the operation has -// finished processing the request, call GetLongRunningOperationStatus. -// -// sourceResourceGroupName is source resource group name. parameters is move -// resources' parameters. -func (client Client) MoveResources(sourceResourceGroupName string, parameters MoveInfo) (result autorest.Response, ae error) { - req, err := client.MoveResourcesPreparer(sourceResourceGroupName, parameters) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/Client", "MoveResources", "Failure preparing request") - } - - resp, err := client.MoveResourcesSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/Client", "MoveResources", "Failure sending request") - } - - result, err = client.MoveResourcesResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/Client", "MoveResources", "Failure responding to request") - } - - return -} - -// MoveResourcesPreparer prepares the MoveResources request. -func (client Client) MoveResourcesPreparer(sourceResourceGroupName string, parameters MoveInfo) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "sourceResourceGroupName": url.QueryEscape(sourceResourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"), - autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// MoveResourcesSender sends the MoveResources request. The method will close the -// http.Response Body if it receives an error. -func (client Client) MoveResourcesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) -} - -// MoveResourcesResponder handles the response to the MoveResources request. The method always -// closes the http.Response Body. -func (client Client) MoveResourcesResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,255 @@ +// Package subscriptions implements the Azure ARM Subscriptions service API +// version 2015-11-01. +// +package subscriptions + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +const ( + // APIVersion is the version of the Subscriptions + APIVersion = "2015-11-01" + + // DefaultBaseURI is the default URI used for the service Subscriptions + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Subscriptions. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string +} + +// New creates an instance of the ManagementClient client. +func New() ManagementClient { + return NewWithBaseURI(DefaultBaseURI) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + } +} + +// Get gets details about particular subscription. +// +// subscriptionID is id of the subscription. +func (client ManagementClient) Get(subscriptionID string) (result Subscription, err error) { + req, err := client.GetPreparer(subscriptionID) + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementClient) GetPreparer(subscriptionID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", subscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetResponder(resp *http.Response) (result Subscription, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of the subscriptionIds. +func (client ManagementClient) List() (result SubscriptionListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ManagementClient) ListPreparer() (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListResponder(resp *http.Response) (result SubscriptionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ManagementClient) ListNextResults(lastResults SubscriptionListResult) (result SubscriptionListResult, err error) { + req, err := lastResults.SubscriptionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListLocations gets a list of the subscription locations. +// +// subscriptionID is id of the subscription +func (client ManagementClient) ListLocations(subscriptionID string) (result LocationListResult, err error) { + req, err := client.ListLocationsPreparer(subscriptionID) + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "ListLocations", nil, "Failure preparing request") + } + + resp, err := client.ListLocationsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "ListLocations", resp, "Failure sending request") + } + + result, err = client.ListLocationsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "subscriptions.ManagementClient", "ListLocations", resp, "Failure responding to request") + } + + return +} + +// ListLocationsPreparer prepares the ListLocations request. +func (client ManagementClient) ListLocationsPreparer(subscriptionID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", subscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/locations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListLocationsSender sends the ListLocations request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListLocationsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListLocationsResponder handles the response to the ListLocations request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListLocationsResponder(resp *http.Response) (result LocationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,101 @@ +package subscriptions + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// Location is location information. +type Location struct { + ID *string `json:"id,omitempty"` + SubscriptionID *string `json:"subscriptionId,omitempty"` + Name *string `json:"name,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + Latitude *string `json:"latitude,omitempty"` + Longitude *string `json:"longitude,omitempty"` +} + +// LocationListResult is location list operation response. +type LocationListResult struct { + autorest.Response `json:"-"` + Value *[]Location `json:"value,omitempty"` +} + +// Subscription is subscription information. +type Subscription struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + SubscriptionID *string `json:"subscriptionId,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + State *string `json:"state,omitempty"` + SubscriptionPolicies *SubscriptionPolicies `json:"subscriptionPolicies,omitempty"` +} + +// SubscriptionListResult is subscription list operation response. +type SubscriptionListResult struct { + autorest.Response `json:"-"` + Value *[]Subscription `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SubscriptionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SubscriptionListResult) SubscriptionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SubscriptionPolicies is subscription policies. +type SubscriptionPolicies struct { + LocationPlacementID *string `json:"locationPlacementId,omitempty"` + QuotaID *string `json:"quotaId,omitempty"` +} + +// TenantIDDescription is tenant Id information +type TenantIDDescription struct { + ID *string `json:"id,omitempty"` + TenantID *string `json:"tenantId,omitempty"` +} + +// TenantListResult is tenant Ids information. +type TenantListResult struct { + autorest.Response `json:"-"` + Value *[]TenantIDDescription `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// TenantListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client TenantListResult) TenantListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/subscriptions.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/subscriptions.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/subscriptions.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/subscriptions.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,239 @@ +package subscriptions + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// Client is the client for the Subscriptions methods of the Subscriptions +// service. +type Client struct { + ManagementClient +} + +// NewClient creates an instance of the Client client. +func NewClient() Client { + return NewClientWithBaseURI(DefaultBaseURI) +} + +// NewClientWithBaseURI creates an instance of the Client client. +func NewClientWithBaseURI(baseURI string) Client { + return Client{NewWithBaseURI(baseURI)} +} + +// Get gets details about particular subscription. +// +// subscriptionID is id of the subscription. +func (client Client) Get(subscriptionID string) (result Subscription, err error) { + req, err := client.GetPreparer(subscriptionID) + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions.Client", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions.Client", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "subscriptions.Client", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client Client) GetPreparer(subscriptionID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", subscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client Client) GetResponder(resp *http.Response) (result Subscription, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of the subscriptionIds. +func (client Client) List() (result SubscriptionListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions.Client", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions.Client", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "subscriptions.Client", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client Client) ListPreparer() (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client Client) ListResponder(resp *http.Response) (result SubscriptionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client Client) ListNextResults(lastResults SubscriptionListResult) (result SubscriptionListResult, err error) { + req, err := lastResults.SubscriptionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions.Client", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions.Client", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "subscriptions.Client", "List", resp, "Failure responding to next results request request") + } + + return +} + +// ListLocations gets a list of the subscription locations. +// +// subscriptionID is id of the subscription +func (client Client) ListLocations(subscriptionID string) (result LocationListResult, err error) { + req, err := client.ListLocationsPreparer(subscriptionID) + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions.Client", "ListLocations", nil, "Failure preparing request") + } + + resp, err := client.ListLocationsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions.Client", "ListLocations", resp, "Failure sending request") + } + + result, err = client.ListLocationsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "subscriptions.Client", "ListLocations", resp, "Failure responding to request") + } + + return +} + +// ListLocationsPreparer prepares the ListLocations request. +func (client Client) ListLocationsPreparer(subscriptionID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", subscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/locations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListLocationsSender sends the ListLocations request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListLocationsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListLocationsResponder handles the response to the ListLocations request. The method always +// closes the http.Response Body. +func (client Client) ListLocationsResponder(resp *http.Response) (result LocationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/tenants.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/tenants.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/tenants.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/tenants.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,119 @@ +package subscriptions + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// TenantsClient is the client for the Tenants methods of the Subscriptions +// service. +type TenantsClient struct { + ManagementClient +} + +// NewTenantsClient creates an instance of the TenantsClient client. +func NewTenantsClient() TenantsClient { + return NewTenantsClientWithBaseURI(DefaultBaseURI) +} + +// NewTenantsClientWithBaseURI creates an instance of the TenantsClient client. +func NewTenantsClientWithBaseURI(baseURI string) TenantsClient { + return TenantsClient{NewWithBaseURI(baseURI)} +} + +// List gets a list of the tenantIds. +func (client TenantsClient) List() (result TenantListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions.TenantsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions.TenantsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "subscriptions.TenantsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client TenantsClient) ListPreparer() (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/tenants"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TenantsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TenantsClient) ListResponder(resp *http.Response) (result TenantListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client TenantsClient) ListNextResults(lastResults TenantListResult) (result TenantListResult, err error) { + req, err := lastResults.TenantListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions.TenantsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions.TenantsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "subscriptions.TenantsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package subscriptions + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "subscriptions", "2015-11-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/tags.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/tags.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/tags.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/tags.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,371 +0,0 @@ -package resources - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// TagsClient is the client for the Tags methods of the Resources service. -type TagsClient struct { - ManagementClient -} - -// NewTagsClient creates an instance of the TagsClient client. -func NewTagsClient(subscriptionID string) TagsClient { - return NewTagsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewTagsClientWithBaseURI creates an instance of the TagsClient client. -func NewTagsClientWithBaseURI(baseURI string, subscriptionID string) TagsClient { - return TagsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate create a subscription resource tag. -// -// tagName is the name of the tag. -func (client TagsClient) CreateOrUpdate(tagName string) (result TagDetails, ae error) { - req, err := client.CreateOrUpdatePreparer(tagName) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdate", "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdate", "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdate", "Failure responding to request") - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client TagsClient) CreateOrUpdatePreparer(tagName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "tagName": url.QueryEscape(tagName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client TagsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusCreated) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client TagsClient) CreateOrUpdateResponder(resp *http.Response) (result TagDetails, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateOrUpdateValue create a subscription resource tag value. -// -// tagName is the name of the tag. tagValue is the value of the tag. -func (client TagsClient) CreateOrUpdateValue(tagName string, tagValue string) (result TagValue, ae error) { - req, err := client.CreateOrUpdateValuePreparer(tagName, tagValue) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdateValue", "Failure preparing request") - } - - resp, err := client.CreateOrUpdateValueSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdateValue", "Failure sending request") - } - - result, err = client.CreateOrUpdateValueResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdateValue", "Failure responding to request") - } - - return -} - -// CreateOrUpdateValuePreparer prepares the CreateOrUpdateValue request. -func (client TagsClient) CreateOrUpdateValuePreparer(tagName string, tagValue string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "tagName": url.QueryEscape(tagName), - "tagValue": url.QueryEscape(tagValue), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// CreateOrUpdateValueSender sends the CreateOrUpdateValue request. The method will close the -// http.Response Body if it receives an error. -func (client TagsClient) CreateOrUpdateValueSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusCreated) -} - -// CreateOrUpdateValueResponder handles the response to the CreateOrUpdateValue request. The method always -// closes the http.Response Body. -func (client TagsClient) CreateOrUpdateValueResponder(resp *http.Response) (result TagValue, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete delete a subscription resource tag. -// -// tagName is the name of the tag. -func (client TagsClient) Delete(tagName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(tagName) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "Delete", "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "Delete", "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/TagsClient", "Delete", "Failure responding to request") - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client TagsClient) DeletePreparer(tagName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "tagName": url.QueryEscape(tagName), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client TagsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client TagsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// DeleteValue delete a subscription resource tag value. -// -// tagName is the name of the tag. tagValue is the value of the tag. -func (client TagsClient) DeleteValue(tagName string, tagValue string) (result autorest.Response, ae error) { - req, err := client.DeleteValuePreparer(tagName, tagValue) - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "DeleteValue", "Failure preparing request") - } - - resp, err := client.DeleteValueSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "DeleteValue", "Failure sending request") - } - - result, err = client.DeleteValueResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/TagsClient", "DeleteValue", "Failure responding to request") - } - - return -} - -// DeleteValuePreparer prepares the DeleteValue request. -func (client TagsClient) DeleteValuePreparer(tagName string, tagValue string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "tagName": url.QueryEscape(tagName), - "tagValue": url.QueryEscape(tagValue), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// DeleteValueSender sends the DeleteValue request. The method will close the -// http.Response Body if it receives an error. -func (client TagsClient) DeleteValueSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) -} - -// DeleteValueResponder handles the response to the DeleteValue request. The method always -// closes the http.Response Body. -func (client TagsClient) DeleteValueResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// List get a list of subscription resource tags. -func (client TagsClient) List() (result TagsListResult, ae error) { - req, err := client.ListPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client TagsClient) ListPreparer() (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/tagNames"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client TagsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client TagsClient) ListResponder(resp *http.Response) (result TagsListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client TagsClient) ListNextResults(lastResults TagsListResult) (result TagsListResult, ae error) { - req, err := lastResults.TagsListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure responding to next results request request") - } - - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/resources/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/resources/version.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -package resources - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "fmt" -) - -const ( - major = "0" - minor = "3" - patch = "0" - // Always begin a "tag" with a dash (as per http://semver.org) - tag = "-beta" - semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" -) - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "resources", "2014-04-01-preview") -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return fmt.Sprintf(semVerFormat, major, minor, patch, tag) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -1,3 +1,6 @@ +// Package scheduler implements the Azure ARM Scheduler service API version +// 2016-03-01. +// package scheduler // Copyright (c) Microsoft and contributors. All rights reserved. @@ -14,17 +17,17 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" ) const ( // APIVersion is the version of the Scheduler - APIVersion = "2014-08-01-preview" + APIVersion = "2016-03-01" // DefaultBaseURI is the default URI used for the service Scheduler DefaultBaseURI = "https://management.azure.com" @@ -34,6 +37,7 @@ type ManagementClient struct { autorest.Client BaseURI string + APIVersion string SubscriptionID string } @@ -47,6 +51,7 @@ return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, + APIVersion: APIVersion, SubscriptionID: subscriptionID, } } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // JobCollectionsClient is the client for the JobCollections methods of the @@ -47,21 +47,21 @@ // // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. jobCollection is the job collection definition. -func (client JobCollectionsClient) CreateOrUpdate(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (result JobCollectionDefinition, ae error) { +func (client JobCollectionsClient) CreateOrUpdate(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (result JobCollectionDefinition, err error) { req, err := client.CreateOrUpdatePreparer(resourceGroupName, jobCollectionName, jobCollection) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "CreateOrUpdate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "CreateOrUpdate", resp, "Failure responding to request") } return @@ -70,29 +70,29 @@ // CreateOrUpdatePreparer prepares the CreateOrUpdate request. func (client JobCollectionsClient) CreateOrUpdatePreparer(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}", pathParameters), autorest.WithJSON(jobCollection), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client JobCollectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusCreated) + return autorest.SendWithSender(client, req) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -101,62 +101,65 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// Delete deletes a job collection. +// Delete deletes a job collection. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. -func (client JobCollectionsClient) Delete(resourceGroupName string, jobCollectionName string) (result autorest.Response, ae error) { - req, err := client.DeletePreparer(resourceGroupName, jobCollectionName) +func (client JobCollectionsClient) Delete(resourceGroupName string, jobCollectionName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, jobCollectionName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. -func (client JobCollectionsClient) DeletePreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { +func (client JobCollectionsClient) DeletePreparer(resourceGroupName string, jobCollectionName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client JobCollectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -165,61 +168,65 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Disable disables all of the jobs in the job collection. +// Disable disables all of the jobs in the job collection. This method may +// poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. -func (client JobCollectionsClient) Disable(resourceGroupName string, jobCollectionName string) (result autorest.Response, ae error) { - req, err := client.DisablePreparer(resourceGroupName, jobCollectionName) +func (client JobCollectionsClient) Disable(resourceGroupName string, jobCollectionName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DisablePreparer(resourceGroupName, jobCollectionName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Disable", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Disable", nil, "Failure preparing request") } resp, err := client.DisableSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Disable", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Disable", resp, "Failure sending request") } result, err = client.DisableResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Disable", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Disable", resp, "Failure responding to request") } return } // DisablePreparer prepares the Disable request. -func (client JobCollectionsClient) DisablePreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { +func (client JobCollectionsClient) DisablePreparer(resourceGroupName string, jobCollectionName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/disable"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/disable", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DisableSender sends the Disable request. The method will close the // http.Response Body if it receives an error. func (client JobCollectionsClient) DisableSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DisableResponder handles the response to the Disable request. The method always @@ -228,61 +235,65 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// Enable enables all of the jobs in the job collection. +// Enable enables all of the jobs in the job collection. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. -func (client JobCollectionsClient) Enable(resourceGroupName string, jobCollectionName string) (result autorest.Response, ae error) { - req, err := client.EnablePreparer(resourceGroupName, jobCollectionName) +func (client JobCollectionsClient) Enable(resourceGroupName string, jobCollectionName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.EnablePreparer(resourceGroupName, jobCollectionName, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Enable", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Enable", nil, "Failure preparing request") } resp, err := client.EnableSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Enable", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Enable", resp, "Failure sending request") } result, err = client.EnableResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Enable", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Enable", resp, "Failure responding to request") } return } // EnablePreparer prepares the Enable request. -func (client JobCollectionsClient) EnablePreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { +func (client JobCollectionsClient) EnablePreparer(resourceGroupName string, jobCollectionName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/enable"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/enable", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // EnableSender sends the Enable request. The method will close the // http.Response Body if it receives an error. func (client JobCollectionsClient) EnableSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // EnableResponder handles the response to the Enable request. The method always @@ -291,7 +302,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return @@ -301,21 +312,21 @@ // // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. -func (client JobCollectionsClient) Get(resourceGroupName string, jobCollectionName string) (result JobCollectionDefinition, ae error) { +func (client JobCollectionsClient) Get(resourceGroupName string, jobCollectionName string) (result JobCollectionDefinition, err error) { req, err := client.GetPreparer(resourceGroupName, jobCollectionName) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Get", resp, "Failure responding to request") } return @@ -324,28 +335,27 @@ // GetPreparer prepares the Get request. func (client JobCollectionsClient) GetPreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client JobCollectionsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -354,7 +364,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -364,21 +374,21 @@ // ListByResourceGroup gets all job collections under specified resource group. // // resourceGroupName is the resource group name. -func (client JobCollectionsClient) ListByResourceGroup(resourceGroupName string) (result JobCollectionListResult, ae error) { +func (client JobCollectionsClient) ListByResourceGroup(resourceGroupName string) (result JobCollectionListResult, err error) { req, err := client.ListByResourceGroupPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListByResourceGroup", nil, "Failure preparing request") } resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListByResourceGroup", resp, "Failure sending request") } result, err = client.ListByResourceGroupResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListByResourceGroup", resp, "Failure responding to request") } return @@ -387,27 +397,26 @@ // ListByResourceGroupPreparer prepares the ListByResourceGroup request. func (client JobCollectionsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client JobCollectionsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -416,7 +425,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -424,10 +433,10 @@ } // ListByResourceGroupNextResults retrieves the next set of results, if any. -func (client JobCollectionsClient) ListByResourceGroupNextResults(lastResults JobCollectionListResult) (result JobCollectionListResult, ae error) { +func (client JobCollectionsClient) ListByResourceGroupNextResults(lastResults JobCollectionListResult) (result JobCollectionListResult, err error) { req, err := lastResults.JobCollectionListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListByResourceGroup", nil, "Failure preparing next results request request") } if req == nil { return @@ -436,33 +445,33 @@ resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListByResourceGroup", resp, "Failure sending next results request request") } result, err = client.ListByResourceGroupResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListByResourceGroup", resp, "Failure responding to next results request request") } return } // ListBySubscription gets all job collections under specified subscription. -func (client JobCollectionsClient) ListBySubscription() (result JobCollectionListResult, ae error) { +func (client JobCollectionsClient) ListBySubscription() (result JobCollectionListResult, err error) { req, err := client.ListBySubscriptionPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListBySubscription", nil, "Failure preparing request") } resp, err := client.ListBySubscriptionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListBySubscription", resp, "Failure sending request") } result, err = client.ListBySubscriptionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListBySubscription", resp, "Failure responding to request") } return @@ -471,26 +480,25 @@ // ListBySubscriptionPreparer prepares the ListBySubscription request. func (client JobCollectionsClient) ListBySubscriptionPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Scheduler/jobCollections"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Scheduler/jobCollections", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListBySubscriptionSender sends the ListBySubscription request. The method will close the // http.Response Body if it receives an error. func (client JobCollectionsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always @@ -499,7 +507,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -507,10 +515,10 @@ } // ListBySubscriptionNextResults retrieves the next set of results, if any. -func (client JobCollectionsClient) ListBySubscriptionNextResults(lastResults JobCollectionListResult) (result JobCollectionListResult, ae error) { +func (client JobCollectionsClient) ListBySubscriptionNextResults(lastResults JobCollectionListResult) (result JobCollectionListResult, err error) { req, err := lastResults.JobCollectionListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListBySubscription", nil, "Failure preparing next results request request") } if req == nil { return @@ -519,12 +527,12 @@ resp, err := client.ListBySubscriptionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListBySubscription", resp, "Failure sending next results request request") } result, err = client.ListBySubscriptionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "ListBySubscription", resp, "Failure responding to next results request request") } return @@ -534,21 +542,21 @@ // // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. jobCollection is the job collection definition. -func (client JobCollectionsClient) Patch(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (result JobCollectionDefinition, ae error) { +func (client JobCollectionsClient) Patch(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (result JobCollectionDefinition, err error) { req, err := client.PatchPreparer(resourceGroupName, jobCollectionName, jobCollection) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Patch", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Patch", nil, "Failure preparing request") } resp, err := client.PatchSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Patch", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Patch", resp, "Failure sending request") } result, err = client.PatchResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Patch", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobCollectionsClient", "Patch", resp, "Failure responding to request") } return @@ -557,29 +565,29 @@ // PatchPreparer prepares the Patch request. func (client JobCollectionsClient) PatchPreparer(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}", pathParameters), autorest.WithJSON(jobCollection), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // PatchSender sends the Patch request. The method will close the // http.Response Body if it receives an error. func (client JobCollectionsClient) PatchSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // PatchResponder handles the response to the Patch request. The method always @@ -588,7 +596,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // JobsClient is the client for the Jobs methods of the Scheduler service. @@ -43,21 +43,21 @@ // // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. jobName is the job name. job is the job definition. -func (client JobsClient) CreateOrUpdate(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (result JobDefinition, ae error) { +func (client JobsClient) CreateOrUpdate(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (result JobDefinition, err error) { req, err := client.CreateOrUpdatePreparer(resourceGroupName, jobCollectionName, jobName, job) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "CreateOrUpdate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobsClient", "CreateOrUpdate", resp, "Failure responding to request") } return @@ -66,30 +66,30 @@ // CreateOrUpdatePreparer prepares the CreateOrUpdate request. func (client JobsClient) CreateOrUpdatePreparer(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "jobName": url.QueryEscape(jobName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "jobName": autorest.Encode("path", jobName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}", pathParameters), autorest.WithJSON(job), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client JobsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusCreated) + return autorest.SendWithSender(client, req) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -98,7 +98,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -109,21 +109,21 @@ // // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. jobName is the job name. -func (client JobsClient) Delete(resourceGroupName string, jobCollectionName string, jobName string) (result autorest.Response, ae error) { +func (client JobsClient) Delete(resourceGroupName string, jobCollectionName string, jobName string) (result autorest.Response, err error) { req, err := client.DeletePreparer(resourceGroupName, jobCollectionName, jobName) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobsClient", "Delete", resp, "Failure responding to request") } return @@ -132,29 +132,28 @@ // DeletePreparer prepares the Delete request. func (client JobsClient) DeletePreparer(resourceGroupName string, jobCollectionName string, jobName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "jobName": url.QueryEscape(jobName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "jobName": autorest.Encode("path", jobName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client JobsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteResponder handles the response to the Delete request. The method always @@ -163,7 +162,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) result.Response = resp return @@ -173,21 +172,21 @@ // // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. jobName is the job name. -func (client JobsClient) Get(resourceGroupName string, jobCollectionName string, jobName string) (result JobDefinition, ae error) { +func (client JobsClient) Get(resourceGroupName string, jobCollectionName string, jobName string) (result JobDefinition, err error) { req, err := client.GetPreparer(resourceGroupName, jobCollectionName, jobName) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Get", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "Get", nil, "Failure preparing request") } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Get", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "Get", resp, "Failure sending request") } result, err = client.GetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Get", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobsClient", "Get", resp, "Failure responding to request") } return @@ -196,29 +195,28 @@ // GetPreparer prepares the Get request. func (client JobsClient) GetPreparer(resourceGroupName string, jobCollectionName string, jobName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "jobName": url.QueryEscape(jobName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "jobName": autorest.Encode("path", jobName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client JobsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetResponder handles the response to the Get request. The method always @@ -227,7 +225,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -239,58 +237,61 @@ // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. top is the number of jobs to request, in the of range // [1..100]. skip is the (0-based) index of the job history list from which -// to begin requesting entries. -func (client JobsClient) List(resourceGroupName string, jobCollectionName string, top *int, skip *int) (result JobListResult, ae error) { - req, err := client.ListPreparer(resourceGroupName, jobCollectionName, top, skip) +// to begin requesting entries. filter is the filter to apply on the job +// state. +func (client JobsClient) List(resourceGroupName string, jobCollectionName string, top *int32, skip *int32, filter string) (result JobListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, jobCollectionName, top, skip, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobsClient", "List", resp, "Failure responding to request") } return } // ListPreparer prepares the List request. -func (client JobsClient) ListPreparer(resourceGroupName string, jobCollectionName string, top *int, skip *int) (*http.Request, error) { +func (client JobsClient) ListPreparer(resourceGroupName string, jobCollectionName string, top *int32, skip *int32, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if top != nil { - queryParameters["$top"] = top + queryParameters["$top"] = autorest.Encode("query", *top) } if skip != nil { - queryParameters["$skip"] = skip + queryParameters["$skip"] = autorest.Encode("query", *skip) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client JobsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -299,7 +300,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -307,10 +308,10 @@ } // ListNextResults retrieves the next set of results, if any. -func (client JobsClient) ListNextResults(lastResults JobListResult) (result JobListResult, ae error) { +func (client JobsClient) ListNextResults(lastResults JobListResult) (result JobListResult, err error) { req, err := lastResults.JobListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "List", nil, "Failure preparing next results request request") } if req == nil { return @@ -319,12 +320,12 @@ resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "List", resp, "Failure sending next results request request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "scheduler.JobsClient", "List", resp, "Failure responding to next results request request") } return @@ -335,59 +336,62 @@ // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. jobName is the job name. top is the number of job history // to request, in the of range [1..100]. skip is the (0-based) index of the -// job history list from which to begin requesting entries. -func (client JobsClient) ListJobHistory(resourceGroupName string, jobCollectionName string, jobName string, top *int, skip *int) (result JobHistoryListResult, ae error) { - req, err := client.ListJobHistoryPreparer(resourceGroupName, jobCollectionName, jobName, top, skip) +// job history list from which to begin requesting entries. filter is the +// filter to apply on the job state. +func (client JobsClient) ListJobHistory(resourceGroupName string, jobCollectionName string, jobName string, top *int32, skip *int32, filter string) (result JobHistoryListResult, err error) { + req, err := client.ListJobHistoryPreparer(resourceGroupName, jobCollectionName, jobName, top, skip, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "ListJobHistory", nil, "Failure preparing request") } resp, err := client.ListJobHistorySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "ListJobHistory", resp, "Failure sending request") } result, err = client.ListJobHistoryResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobsClient", "ListJobHistory", resp, "Failure responding to request") } return } // ListJobHistoryPreparer prepares the ListJobHistory request. -func (client JobsClient) ListJobHistoryPreparer(resourceGroupName string, jobCollectionName string, jobName string, top *int, skip *int) (*http.Request, error) { +func (client JobsClient) ListJobHistoryPreparer(resourceGroupName string, jobCollectionName string, jobName string, top *int32, skip *int32, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "jobName": url.QueryEscape(jobName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "jobName": autorest.Encode("path", jobName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if top != nil { - queryParameters["$top"] = top + queryParameters["$top"] = autorest.Encode("query", *top) } if skip != nil { - queryParameters["$skip"] = skip + queryParameters["$skip"] = autorest.Encode("query", *skip) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}/history"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}/history", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListJobHistorySender sends the ListJobHistory request. The method will close the // http.Response Body if it receives an error. func (client JobsClient) ListJobHistorySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListJobHistoryResponder handles the response to the ListJobHistory request. The method always @@ -396,7 +400,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -404,10 +408,10 @@ } // ListJobHistoryNextResults retrieves the next set of results, if any. -func (client JobsClient) ListJobHistoryNextResults(lastResults JobHistoryListResult) (result JobHistoryListResult, ae error) { +func (client JobsClient) ListJobHistoryNextResults(lastResults JobHistoryListResult) (result JobHistoryListResult, err error) { req, err := lastResults.JobHistoryListResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "ListJobHistory", nil, "Failure preparing next results request request") } if req == nil { return @@ -416,12 +420,12 @@ resp, err := client.ListJobHistorySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "ListJobHistory", resp, "Failure sending next results request request") } result, err = client.ListJobHistoryResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "scheduler.JobsClient", "ListJobHistory", resp, "Failure responding to next results request request") } return @@ -431,21 +435,21 @@ // // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. jobName is the job name. job is the job definition. -func (client JobsClient) Patch(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (result JobDefinition, ae error) { +func (client JobsClient) Patch(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (result JobDefinition, err error) { req, err := client.PatchPreparer(resourceGroupName, jobCollectionName, jobName, job) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Patch", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "Patch", nil, "Failure preparing request") } resp, err := client.PatchSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Patch", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "Patch", resp, "Failure sending request") } result, err = client.PatchResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Patch", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobsClient", "Patch", resp, "Failure responding to request") } return @@ -454,30 +458,30 @@ // PatchPreparer prepares the Patch request. func (client JobsClient) PatchPreparer(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "jobName": url.QueryEscape(jobName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "jobName": autorest.Encode("path", jobName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}", pathParameters), autorest.WithJSON(job), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // PatchSender sends the Patch request. The method will close the // http.Response Body if it receives an error. func (client JobsClient) PatchSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // PatchResponder handles the response to the Patch request. The method always @@ -486,7 +490,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -497,21 +501,21 @@ // // resourceGroupName is the resource group name. jobCollectionName is the job // collection name. jobName is the job name. -func (client JobsClient) Run(resourceGroupName string, jobCollectionName string, jobName string) (result autorest.Response, ae error) { +func (client JobsClient) Run(resourceGroupName string, jobCollectionName string, jobName string) (result autorest.Response, err error) { req, err := client.RunPreparer(resourceGroupName, jobCollectionName, jobName) if err != nil { - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Run", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "Run", nil, "Failure preparing request") } resp, err := client.RunSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Run", "Failure sending request") + return result, autorest.NewErrorWithError(err, "scheduler.JobsClient", "Run", resp, "Failure sending request") } result, err = client.RunResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Run", "Failure responding to request") + err = autorest.NewErrorWithError(err, "scheduler.JobsClient", "Run", resp, "Failure responding to request") } return @@ -520,29 +524,28 @@ // RunPreparer prepares the Run request. func (client JobsClient) RunPreparer(resourceGroupName string, jobCollectionName string, jobName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "jobCollectionName": url.QueryEscape(jobCollectionName), - "jobName": url.QueryEscape(jobName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "jobCollectionName": autorest.Encode("path", jobCollectionName), + "jobName": autorest.Encode("path", jobName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}/run"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}/run", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // RunSender sends the Run request. The method will close the // http.Response Body if it receives an error. func (client JobsClient) RunSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // RunResponder handles the response to the Run request. The method always @@ -551,7 +554,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) result.Response = resp return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" "net/http" ) @@ -98,11 +98,6 @@ type JobExecutionStatus string const ( - // CallbackNotFound specifies the callback not found state for job - // execution status. - CallbackNotFound JobExecutionStatus = "CallbackNotFound" - // Cancelled specifies the cancelled state for job execution status. - Cancelled JobExecutionStatus = "Cancelled" // Completed specifies the completed state for job execution status. Completed JobExecutionStatus = "Completed" // Failed specifies the failed state for job execution status. @@ -226,8 +221,10 @@ const ( // Free specifies the free state for sku definition. Free SkuDefinition = "Free" - // Premium specifies the premium state for sku definition. - Premium SkuDefinition = "Premium" + // P10Premium specifies the p10 premium state for sku definition. + P10Premium SkuDefinition = "P10Premium" + // P20Premium specifies the p20 premium state for sku definition. + P20Premium SkuDefinition = "P20Premium" // Standard specifies the standard state for sku definition. Standard SkuDefinition = "Standard" ) @@ -256,11 +253,11 @@ // HTTPRequest is type HTTPRequest struct { - HTTPAuthentication *HTTPAuthentication `json:"httpAuthentication,omitempty"` - URI *string `json:"uri,omitempty"` - Method *string `json:"method,omitempty"` - Body *string `json:"body,omitempty"` - Headers *map[string]*string `json:"headers,omitempty"` + Authentication *HTTPAuthentication `json:"authentication,omitempty"` + URI *string `json:"uri,omitempty"` + Method *string `json:"method,omitempty"` + Body *string `json:"body,omitempty"` + Headers *map[string]*string `json:"headers,omitempty"` } // JobAction is @@ -313,8 +310,8 @@ // JobCollectionQuota is type JobCollectionQuota struct { - MaxJobCount *int `json:"maxJobCount,omitempty"` - MaxJobOccurrence *int `json:"maxJobOccurrence,omitempty"` + MaxJobCount *int32 `json:"maxJobCount,omitempty"` + MaxJobOccurrence *int32 `json:"maxJobOccurrence,omitempty"` MaxRecurrence *JobMaxRecurrence `json:"maxRecurrence,omitempty"` } @@ -353,8 +350,13 @@ ActionName JobHistoryActionName `json:"actionName,omitempty"` Status JobExecutionStatus `json:"status,omitempty"` Message *string `json:"message,omitempty"` - RetryCount *int `json:"retryCount,omitempty"` - RepeatCount *int `json:"repeatCount,omitempty"` + RetryCount *int32 `json:"retryCount,omitempty"` + RepeatCount *int32 `json:"repeatCount,omitempty"` +} + +// JobHistoryFilter is +type JobHistoryFilter struct { + Status JobExecutionStatus `json:"status,omitempty"` } // JobHistoryListResult is @@ -398,7 +400,7 @@ // JobMaxRecurrence is type JobMaxRecurrence struct { Frequency RecurrenceFrequency `json:"frequency,omitempty"` - Interval *int `json:"interval,omitempty"` + Interval *int32 `json:"interval,omitempty"` } // JobProperties is @@ -413,8 +415,8 @@ // JobRecurrence is type JobRecurrence struct { Frequency RecurrenceFrequency `json:"frequency,omitempty"` - Interval *int `json:"interval,omitempty"` - Count *int `json:"count,omitempty"` + Interval *int32 `json:"interval,omitempty"` + Count *int32 `json:"count,omitempty"` EndTime *date.Time `json:"endTime,omitempty"` Schedule *JobRecurrenceSchedule `json:"schedule,omitempty"` } @@ -422,23 +424,28 @@ // JobRecurrenceSchedule is type JobRecurrenceSchedule struct { WeekDays *[]DayOfWeek `json:"weekDays,omitempty"` - Hours *[]int `json:"hours,omitempty"` - Minutes *[]int `json:"minutes,omitempty"` - MonthDays *[]int `json:"monthDays,omitempty"` + Hours *[]int32 `json:"hours,omitempty"` + Minutes *[]int32 `json:"minutes,omitempty"` + MonthDays *[]int32 `json:"monthDays,omitempty"` MonthlyOccurrences *[]JobRecurrenceScheduleMonthlyOccurrence `json:"monthlyOccurrences,omitempty"` } // JobRecurrenceScheduleMonthlyOccurrence is type JobRecurrenceScheduleMonthlyOccurrence struct { Day JobScheduleDay `json:"day,omitempty"` - Occurrence *int `json:"Occurrence,omitempty"` + Occurrence *int32 `json:"Occurrence,omitempty"` +} + +// JobStateFilter is +type JobStateFilter struct { + State JobState `json:"state,omitempty"` } // JobStatus is type JobStatus struct { - ExecutionCount *int `json:"executionCount,omitempty"` - FailureCount *int `json:"failureCount,omitempty"` - FaultedCount *int `json:"faultedCount,omitempty"` + ExecutionCount *int32 `json:"executionCount,omitempty"` + FailureCount *int32 `json:"failureCount,omitempty"` + FaultedCount *int32 `json:"faultedCount,omitempty"` LastExecutionTime *date.Time `json:"lastExecutionTime,omitempty"` NextExecutionTime *date.Time `json:"nextExecutionTime,omitempty"` } @@ -456,7 +463,7 @@ type RetryPolicy struct { RetryType RetryType `json:"retryType,omitempty"` RetryInterval *string `json:"retryInterval,omitempty"` - RetryCount *int `json:"retryCount,omitempty"` + RetryCount *int32 `json:"retryCount,omitempty"` } // ServiceBusAuthentication is diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -23,18 +23,18 @@ ) const ( - major = "0" - minor = "3" + major = "3" + minor = "1" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" ) // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "scheduler", "2014-08-01-preview") + return fmt.Sprintf(userAgentFormat, Version(), "scheduler", "2016-03-01") } // Version returns the semantic version (see http://semver.org) of the client. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/adminkeys.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/adminkeys.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/adminkeys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/adminkeys.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // AdminKeysClient is the client that can be used to manage Azure Search @@ -47,21 +47,21 @@ // resourceGroupName is the name of the resource group within the current // subscription. serviceName is the name of the Search service for which to // list admin keys. -func (client AdminKeysClient) List(resourceGroupName string, serviceName string) (result AdminKeyResult, ae error) { +func (client AdminKeysClient) List(resourceGroupName string, serviceName string) (result AdminKeyResult, err error) { req, err := client.ListPreparer(resourceGroupName, serviceName) if err != nil { - return result, autorest.NewErrorWithError(err, "search/AdminKeysClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "search.AdminKeysClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "search/AdminKeysClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "search.AdminKeysClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "search/AdminKeysClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "search.AdminKeysClient", "List", resp, "Failure responding to request") } return @@ -70,28 +70,27 @@ // ListPreparer prepares the List request. func (client AdminKeysClient) ListPreparer(resourceGroupName string, serviceName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "serviceName": url.QueryEscape(serviceName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}/listAdminKeys"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}/listAdminKeys", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client AdminKeysClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -100,7 +99,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -1,3 +1,7 @@ +// Package search implements the Azure ARM Search service API version +// 2015-02-28. +// +// Client that can be used to manage Azure Search services and API keys. package search // Copyright (c) Microsoft and contributors. All rights reserved. @@ -14,12 +18,12 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" ) const ( @@ -30,11 +34,11 @@ DefaultBaseURI = "https://management.azure.com" ) -// ManagementClient is the client that can be used to manage Azure Search -// services and API keys. +// ManagementClient is the base client for Search. type ManagementClient struct { autorest.Client BaseURI string + APIVersion string SubscriptionID string } @@ -48,6 +52,7 @@ return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, + APIVersion: APIVersion, SubscriptionID: subscriptionID, } } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,12 +14,12 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" ) // ProvisioningState enumerates the values for provisioning state. @@ -119,9 +119,9 @@ // ServiceProperties is defines properties of an Azure Search service that can // be modified. type ServiceProperties struct { - Sku *Sku `json:"sku,omitempty"` - ReplicaCount *int `json:"replicaCount,omitempty"` - PartitionCount *int `json:"partitionCount,omitempty"` + Sku *Sku `json:"sku,omitempty"` + ReplicaCount *int32 `json:"replicaCount,omitempty"` + PartitionCount *int32 `json:"partitionCount,omitempty"` } // ServiceReadableProperties is defines all the properties of an Azure Search @@ -131,13 +131,14 @@ StatusDetails *string `json:"statusDetails,omitempty"` ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` Sku *Sku `json:"sku,omitempty"` - ReplicaCount *int `json:"replicaCount,omitempty"` - PartitionCount *int `json:"partitionCount,omitempty"` + ReplicaCount *int32 `json:"replicaCount,omitempty"` + PartitionCount *int32 `json:"partitionCount,omitempty"` } // ServiceResource is describes an Azure Search service and its current state. type ServiceResource struct { autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Location *string `json:"location,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/querykeys.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/querykeys.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/querykeys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/querykeys.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // QueryKeysClient is the client that can be used to manage Azure Search @@ -46,21 +46,21 @@ // resourceGroupName is the name of the resource group within the current // subscription. serviceName is the name of the Search service for which to // list query keys. -func (client QueryKeysClient) List(resourceGroupName string, serviceName string) (result ListQueryKeysResult, ae error) { +func (client QueryKeysClient) List(resourceGroupName string, serviceName string) (result ListQueryKeysResult, err error) { req, err := client.ListPreparer(resourceGroupName, serviceName) if err != nil { - return result, autorest.NewErrorWithError(err, "search/QueryKeysClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "search.QueryKeysClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "search/QueryKeysClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "search.QueryKeysClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "search/QueryKeysClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "search.QueryKeysClient", "List", resp, "Failure responding to request") } return @@ -69,28 +69,27 @@ // ListPreparer prepares the List request. func (client QueryKeysClient) ListPreparer(resourceGroupName string, serviceName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "serviceName": url.QueryEscape(serviceName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}/listQueryKeys"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}/listQueryKeys", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client QueryKeysClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -99,7 +98,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/services.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/services.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/services.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/services.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // ServicesClient is the client that can be used to manage Azure Search @@ -49,21 +49,21 @@ // subscription. serviceName is the name of the Search service to create or // update. parameters is the properties to set or update on the Search // service. -func (client ServicesClient) CreateOrUpdate(resourceGroupName string, serviceName string, parameters ServiceCreateOrUpdateParameters) (result ServiceResource, ae error) { +func (client ServicesClient) CreateOrUpdate(resourceGroupName string, serviceName string, parameters ServiceCreateOrUpdateParameters) (result ServiceResource, err error) { req, err := client.CreateOrUpdatePreparer(resourceGroupName, serviceName, parameters) if err != nil { - return result, autorest.NewErrorWithError(err, "search/ServicesClient", "CreateOrUpdate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "search.ServicesClient", "CreateOrUpdate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "search/ServicesClient", "CreateOrUpdate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "search.ServicesClient", "CreateOrUpdate", resp, "Failure sending request") } result, err = client.CreateOrUpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "search/ServicesClient", "CreateOrUpdate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "search.ServicesClient", "CreateOrUpdate", resp, "Failure responding to request") } return @@ -72,29 +72,29 @@ // CreateOrUpdatePreparer prepares the CreateOrUpdate request. func (client ServicesClient) CreateOrUpdatePreparer(resourceGroupName string, serviceName string, parameters ServiceCreateOrUpdateParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "serviceName": url.QueryEscape(serviceName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ServicesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusCreated) + return autorest.SendWithSender(client, req) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -103,7 +103,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -115,21 +115,21 @@ // // resourceGroupName is the name of the resource group within the current // subscription. serviceName is the name of the Search service to delete. -func (client ServicesClient) Delete(resourceGroupName string, serviceName string) (result autorest.Response, ae error) { +func (client ServicesClient) Delete(resourceGroupName string, serviceName string) (result autorest.Response, err error) { req, err := client.DeletePreparer(resourceGroupName, serviceName) if err != nil { - return result, autorest.NewErrorWithError(err, "search/ServicesClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "search.ServicesClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "search/ServicesClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "search.ServicesClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "search/ServicesClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "search.ServicesClient", "Delete", resp, "Failure responding to request") } return @@ -138,28 +138,27 @@ // DeletePreparer prepares the Delete request. func (client ServicesClient) DeletePreparer(resourceGroupName string, serviceName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "serviceName": url.QueryEscape(serviceName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serviceName": autorest.Encode("path", serviceName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ServicesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNotFound, http.StatusNoContent) + return autorest.SendWithSender(client, req) } // DeleteResponder handles the response to the Delete request. The method always @@ -168,7 +167,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound, http.StatusNoContent), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -178,21 +177,21 @@ // // resourceGroupName is the name of the resource group within the current // subscription. -func (client ServicesClient) List(resourceGroupName string) (result ServiceListResult, ae error) { +func (client ServicesClient) List(resourceGroupName string) (result ServiceListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "search/ServicesClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "search.ServicesClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "search/ServicesClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "search.ServicesClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "search/ServicesClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "search.ServicesClient", "List", resp, "Failure responding to request") } return @@ -201,27 +200,26 @@ // ListPreparer prepares the List request. func (client ServicesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ServicesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -230,7 +228,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/search/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/search/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -23,13 +23,13 @@ ) const ( - major = "0" - minor = "3" + major = "3" + minor = "1" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" ) // UserAgent returns the UserAgent string to use when sending http.Requests. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,58 @@ +// Package servicebus implements the Azure ARM Servicebus service API version +// 2014-09-01. +// +// Azure Service Bus client +package servicebus + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Servicebus + APIVersion = "2014-09-01" + + // DefaultBaseURI is the default URI used for the service Servicebus + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Servicebus. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,248 @@ +package servicebus + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// AccessRights enumerates the values for access rights. +type AccessRights string + +const ( + // Listen specifies the listen state for access rights. + Listen AccessRights = "Listen" + // Manage specifies the manage state for access rights. + Manage AccessRights = "Manage" + // Send specifies the send state for access rights. + Send AccessRights = "Send" +) + +// Kind enumerates the values for kind. +type Kind string + +const ( + // EventHub specifies the event hub state for kind. + EventHub Kind = "EventHub" + // Messaging specifies the messaging state for kind. + Messaging Kind = "Messaging" +) + +// Name enumerates the values for name. +type Name string + +const ( + // Basic specifies the basic state for name. + Basic Name = "Basic" + // Premium specifies the premium state for name. + Premium Name = "Premium" + // Standard specifies the standard state for name. + Standard Name = "Standard" +) + +// NamespaceType enumerates the values for namespace type. +type NamespaceType string + +const ( + // NamespaceTypeEventHub specifies the namespace type event hub state for + // namespace type. + NamespaceTypeEventHub NamespaceType = "EventHub" + // NamespaceTypeMessaging specifies the namespace type messaging state for + // namespace type. + NamespaceTypeMessaging NamespaceType = "Messaging" + // NamespaceTypeNotificationHub specifies the namespace type notification + // hub state for namespace type. + NamespaceTypeNotificationHub NamespaceType = "NotificationHub" +) + +// Policykey enumerates the values for policykey. +type Policykey string + +const ( + // PrimaryKey specifies the primary key state for policykey. + PrimaryKey Policykey = "PrimaryKey" + // SecondayKey specifies the seconday key state for policykey. + SecondayKey Policykey = "SecondayKey" +) + +// Tier enumerates the values for tier. +type Tier string + +const ( + // TierBasic specifies the tier basic state for tier. + TierBasic Tier = "Basic" + // TierPremium specifies the tier premium state for tier. + TierPremium Tier = "Premium" + // TierStandard specifies the tier standard state for tier. + TierStandard Tier = "Standard" +) + +// ARMSku is sku of the Namespace. +type ARMSku struct { + Name Name `json:"Name,omitempty"` + Tier Tier `json:"Tier,omitempty"` + Capacity *int32 `json:"Capacity,omitempty"` +} + +// CheckAvailabilityParameters is parameters supplied to the Check Name +// Availability for Namespace and ServiceBus. +type CheckAvailabilityParameters struct { + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + IsAvailiable *bool `json:"isAvailiable,omitempty"` +} + +// CheckAvailabilityResource is description of a CheckAvailibility resource. +type CheckAvailabilityResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + IsAvailiable *bool `json:"isAvailiable,omitempty"` +} + +// NamespaceCreateOrUpdateParameters is parameters supplied to the +// CreateOrUpdate Namespace operation. +type NamespaceCreateOrUpdateParameters struct { + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *NamespaceProperties `json:"properties,omitempty"` +} + +// NamespaceListResult is the response of the List Namespace operation. +type NamespaceListResult struct { + autorest.Response `json:"-"` + Value *[]NamespaceResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// NamespaceListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client NamespaceListResult) NamespaceListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// NamespaceProperties is properties of the Namespace. +type NamespaceProperties struct { + Name *string `json:"name,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + Region *string `json:"region,omitempty"` + Status *string `json:"status,omitempty"` + CreatedAt *date.Time `json:"createdAt,omitempty"` + ServiceBusEndpoint *string `json:"serviceBusEndpoint,omitempty"` + SubscriptionID *string `json:"subscriptionId,omitempty"` + Critical *bool `json:"critical,omitempty"` + NamespaceType NamespaceType `json:"namespaceType,omitempty"` +} + +// NamespaceResource is description of a Namespace resource. +type NamespaceResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Kind Kind `json:"kind,omitempty"` + Sku *ARMSku `json:"sku,omitempty"` + Properties *NamespaceProperties `json:"properties,omitempty"` +} + +// RegenerateKeysParameters is parameters supplied to the Regenerate Namespace +// Auth Rule. +type RegenerateKeysParameters struct { + Policykey Policykey `json:"Policykey,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceListKeys is namespace/ServiceBus Connection String +type ResourceListKeys struct { + autorest.Response `json:"-"` + PrimaryConnectionString *string `json:"primaryConnectionString,omitempty"` + SecondaryConnectionString *string `json:"secondaryConnectionString,omitempty"` + PrimaryKey *string `json:"primaryKey,omitempty"` + SecondaryKey *string `json:"secondaryKey,omitempty"` + KeyName *string `json:"keyName,omitempty"` +} + +// SharedAccessAuthorizationRuleCreateOrUpdateParameters is parameters +// supplied to the CreateOrUpdate Namespace AuthorizationRules. +type SharedAccessAuthorizationRuleCreateOrUpdateParameters struct { + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *SharedAccessAuthorizationRuleProperties `json:"properties,omitempty"` +} + +// SharedAccessAuthorizationRuleListResult is the response of the List +// Namespace operation. +type SharedAccessAuthorizationRuleListResult struct { + autorest.Response `json:"-"` + Value *[]SharedAccessAuthorizationRuleResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SharedAccessAuthorizationRuleListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SharedAccessAuthorizationRuleListResult) SharedAccessAuthorizationRuleListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SharedAccessAuthorizationRuleProperties is sharedAccessAuthorizationRule +// properties. +type SharedAccessAuthorizationRuleProperties struct { + Rights *[]AccessRights `json:"rights,omitempty"` +} + +// SharedAccessAuthorizationRuleResource is description of a Namespace +// AuthorizationRules. +type SharedAccessAuthorizationRuleResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *SharedAccessAuthorizationRuleProperties `json:"properties,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/namespaces.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/namespaces.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/namespaces.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/namespaces.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,892 @@ +package servicebus + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// NamespacesClient is the azure Service Bus client +type NamespacesClient struct { + ManagementClient +} + +// NewNamespacesClient creates an instance of the NamespacesClient client. +func NewNamespacesClient(subscriptionID string) NamespacesClient { + return NewNamespacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewNamespacesClientWithBaseURI creates an instance of the NamespacesClient +// client. +func NewNamespacesClientWithBaseURI(baseURI string, subscriptionID string) NamespacesClient { + return NamespacesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckAvailability checks the availability of the given service namespace +// across all Windows Azure subscriptions. This is useful because the domain +// name is created based on the service namespace name. +// +// parameters is the namespace name. +func (client NamespacesClient) CheckAvailability(parameters CheckAvailabilityParameters) (result CheckAvailabilityResource, err error) { + req, err := client.CheckAvailabilityPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CheckAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CheckAvailability", resp, "Failure sending request") + } + + result, err = client.CheckAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CheckAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckAvailabilityPreparer prepares the CheckAvailability request. +func (client NamespacesClient) CheckAvailabilityPreparer(parameters CheckAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ServiceBus/checkNamespaceAvailability", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckAvailabilitySender sends the CheckAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) CheckAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckAvailabilityResponder handles the response to the CheckAvailability request. The method always +// closes the http.Response Body. +func (client NamespacesClient) CheckAvailabilityResponder(resp *http.Response) (result CheckAvailabilityResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate creates/Updates a service namespace. Once created, this +// namespace's resource manifest is immutable. This operation is idempotent. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. parameters is parameters supplied to create a Namespace +// Resource. +func (client NamespacesClient) CreateOrUpdate(resourceGroupName string, namespaceName string, parameters NamespaceCreateOrUpdateParameters, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, namespaceName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client NamespacesClient) CreateOrUpdatePreparer(resourceGroupName string, namespaceName string, parameters NamespaceCreateOrUpdateParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client NamespacesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdateAuthorizationRule creates an authorization rule for a +// namespace +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. authorizationRuleName is aauthorization Rule Name. +// parameters is the shared access authorization rule. +func (client NamespacesClient) CreateOrUpdateAuthorizationRule(resourceGroupName string, namespaceName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.CreateOrUpdateAuthorizationRulePreparer(resourceGroupName, namespaceName, authorizationRuleName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdateAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdateAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdateAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateAuthorizationRulePreparer prepares the CreateOrUpdateAuthorizationRule request. +func (client NamespacesClient) CreateOrUpdateAuthorizationRulePreparer(resourceGroupName string, namespaceName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateAuthorizationRuleSender sends the CreateOrUpdateAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) CreateOrUpdateAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateAuthorizationRuleResponder handles the response to the CreateOrUpdateAuthorizationRule request. The method always +// closes the http.Response Body. +func (client NamespacesClient) CreateOrUpdateAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an existing namespace. This operation also removes all +// associated resources under the namespace. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. +func (client NamespacesClient) Delete(resourceGroupName string, namespaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, namespaceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client NamespacesClient) DeletePreparer(resourceGroupName string, namespaceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client NamespacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAuthorizationRule deletes a namespace authorization rule +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. authorizationRuleName is authorization Rule Name. +func (client NamespacesClient) DeleteAuthorizationRule(resourceGroupName string, namespaceName string, authorizationRuleName string) (result autorest.Response, err error) { + req, err := client.DeleteAuthorizationRulePreparer(resourceGroupName, namespaceName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "DeleteAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.DeleteAuthorizationRuleSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "DeleteAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.DeleteAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "DeleteAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// DeleteAuthorizationRulePreparer prepares the DeleteAuthorizationRule request. +func (client NamespacesClient) DeleteAuthorizationRulePreparer(resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAuthorizationRuleSender sends the DeleteAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) DeleteAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAuthorizationRuleResponder handles the response to the DeleteAuthorizationRule request. The method always +// closes the http.Response Body. +func (client NamespacesClient) DeleteAuthorizationRuleResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns the description for the specified namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. +func (client NamespacesClient) Get(resourceGroupName string, namespaceName string) (result NamespaceResource, err error) { + req, err := client.GetPreparer(resourceGroupName, namespaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client NamespacesClient) GetPreparer(resourceGroupName string, namespaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client NamespacesClient) GetResponder(resp *http.Response) (result NamespaceResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAuthorizationRule gets an authorization rule for a namespace by name. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name authorizationRuleName is authorization rule name. +func (client NamespacesClient) GetAuthorizationRule(resourceGroupName string, namespaceName string, authorizationRuleName string) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.GetAuthorizationRulePreparer(resourceGroupName, namespaceName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "GetAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.GetAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "GetAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.GetAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "GetAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// GetAuthorizationRulePreparer prepares the GetAuthorizationRule request. +func (client NamespacesClient) GetAuthorizationRulePreparer(resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAuthorizationRuleSender sends the GetAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) GetAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAuthorizationRuleResponder handles the response to the GetAuthorizationRule request. The method always +// closes the http.Response Body. +func (client NamespacesClient) GetAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAuthorizationRules gets the authorization rules for a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name +func (client NamespacesClient) ListAuthorizationRules(resourceGroupName string, namespaceName string) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := client.ListAuthorizationRulesPreparer(resourceGroupName, namespaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", nil, "Failure preparing request") + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", resp, "Failure sending request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", resp, "Failure responding to request") + } + + return +} + +// ListAuthorizationRulesPreparer prepares the ListAuthorizationRules request. +func (client NamespacesClient) ListAuthorizationRulesPreparer(resourceGroupName string, namespaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAuthorizationRulesSender sends the ListAuthorizationRules request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListAuthorizationRulesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAuthorizationRulesResponder handles the response to the ListAuthorizationRules request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListAuthorizationRulesResponder(resp *http.Response) (result SharedAccessAuthorizationRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAuthorizationRulesNextResults retrieves the next set of results, if any. +func (client NamespacesClient) ListAuthorizationRulesNextResults(lastResults SharedAccessAuthorizationRuleListResult) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := lastResults.SharedAccessAuthorizationRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", resp, "Failure sending next results request request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", resp, "Failure responding to next results request request") + } + + return +} + +// ListByResourceGroup lists the available namespaces within a resourceGroup. +// +// resourceGroupName is the name of the resource group. If resourceGroupName +// value is null the method lists all the namespaces within subscription +func (client NamespacesClient) ListByResourceGroup(resourceGroupName string) (result NamespaceListResult, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client NamespacesClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListByResourceGroupResponder(resp *http.Response) (result NamespaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client NamespacesClient) ListByResourceGroupNextResults(lastResults NamespaceListResult) (result NamespaceListResult, err error) { + req, err := lastResults.NamespaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", resp, "Failure sending next results request request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", resp, "Failure responding to next results request request") + } + + return +} + +// ListBySubscription lists all the available namespaces within the +// subscription irrespective of the resourceGroups. +func (client NamespacesClient) ListBySubscription() (result NamespaceListResult, err error) { + req, err := client.ListBySubscriptionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", nil, "Failure preparing request") + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", resp, "Failure sending request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client NamespacesClient) ListBySubscriptionPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ServiceBus/namespaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListBySubscriptionResponder(resp *http.Response) (result NamespaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscriptionNextResults retrieves the next set of results, if any. +func (client NamespacesClient) ListBySubscriptionNextResults(lastResults NamespaceListResult) (result NamespaceListResult, err error) { + req, err := lastResults.NamespaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", resp, "Failure sending next results request request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", resp, "Failure responding to next results request request") + } + + return +} + +// ListKeys gets the Primary and Secondary ConnectionStrings to the namespace +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. authorizationRuleName is the connection string of the +// namespace for the specified authorizationRule. +func (client NamespacesClient) ListKeys(resourceGroupName string, namespaceName string, authorizationRuleName string) (result ResourceListKeys, err error) { + req, err := client.ListKeysPreparer(resourceGroupName, namespaceName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListKeys", nil, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListKeys", resp, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client NamespacesClient) ListKeysPreparer(resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListKeysResponder(resp *http.Response) (result ResourceListKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKeys gets the Primary and Secondary ConnectionStrings to the +// namespace +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. authorizationRuleName is the connection string of the +// namespace for the specified authorizationRule. parameters is parameters +// supplied to regenerate Auth Rule. +func (client NamespacesClient) RegenerateKeys(resourceGroupName string, namespaceName string, authorizationRuleName string, parameters RegenerateKeysParameters) (result ResourceListKeys, err error) { + req, err := client.RegenerateKeysPreparer(resourceGroupName, namespaceName, authorizationRuleName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "RegenerateKeys", nil, "Failure preparing request") + } + + resp, err := client.RegenerateKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "RegenerateKeys", resp, "Failure sending request") + } + + result, err = client.RegenerateKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "RegenerateKeys", resp, "Failure responding to request") + } + + return +} + +// RegenerateKeysPreparer prepares the RegenerateKeys request. +func (client NamespacesClient) RegenerateKeysPreparer(resourceGroupName string, namespaceName string, authorizationRuleName string, parameters RegenerateKeysParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}/regenerateKeys", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegenerateKeysSender sends the RegenerateKeys request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) RegenerateKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegenerateKeysResponder handles the response to the RegenerateKeys request. The method always +// closes the http.Response Body. +func (client NamespacesClient) RegenerateKeysResponder(resp *http.Response) (result ResourceListKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/servicebus/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package servicebus + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "servicebus", "2014-09-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/sql/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/sql/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/sql/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/sql/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,61 @@ +// Package sql implements the Azure ARM Sql service API version +// 2015-05-01-preview. +// +// The Azure SQL Database management API provides a RESTful set of web +// services that interact with Azure SQL Database services to manage your +// databases. The API enables users to create, retrieve, update, and delete +// Databases, Servers and related resources. +package sql + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Sql + APIVersion = "2015-05-01-preview" + + // DefaultBaseURI is the default URI used for the service Sql + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Sql. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/sql/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/sql/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/sql/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/sql/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,81 @@ +package sql + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// EmailAccountAdmins enumerates the values for email account admins. +type EmailAccountAdmins string + +const ( + // Disabled specifies the disabled state for email account admins. + Disabled EmailAccountAdmins = "Disabled" + // Enabled specifies the enabled state for email account admins. + Enabled EmailAccountAdmins = "Enabled" +) + +// State enumerates the values for state. +type State string + +const ( + // StateDisabled specifies the state disabled state for state. + StateDisabled State = "Disabled" + // StateEnabled specifies the state enabled state for state. + StateEnabled State = "Enabled" + // StateNew specifies the state new state for state. + StateNew State = "New" +) + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ServerSecurityAlertPolicy is represents an Azure SQL Server Security Alert +// Policy. +type ServerSecurityAlertPolicy struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *ServerSecurityAlertPolicyProperties `json:"properties,omitempty"` +} + +// ServerSecurityAlertPolicyCreateOrUpdateParameters is create or update +// server security alert policy parameters. +type ServerSecurityAlertPolicyCreateOrUpdateParameters struct { + Properties *ServerSecurityAlertPolicyProperties `json:"properties,omitempty"` +} + +// ServerSecurityAlertPolicyProperties is represents the properties of an +// Azure SQL Security alert policy. +type ServerSecurityAlertPolicyProperties struct { + State State `json:"state,omitempty"` + DisabledAlerts *string `json:"disabledAlerts,omitempty"` + EmailAddresses *string `json:"emailAddresses,omitempty"` + EmailAccountAdmins EmailAccountAdmins `json:"emailAccountAdmins,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/sql/securityalertpolicy.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/sql/securityalertpolicy.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/sql/securityalertpolicy.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/sql/securityalertpolicy.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,180 @@ +package sql + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// SecurityAlertPolicyClient is the the Azure SQL Database management API +// provides a RESTful set of web services that interact with Azure SQL +// Database services to manage your databases. The API enables users to +// create, retrieve, update, and delete Databases, Servers and related +// resources. +type SecurityAlertPolicyClient struct { + ManagementClient +} + +// NewSecurityAlertPolicyClient creates an instance of the +// SecurityAlertPolicyClient client. +func NewSecurityAlertPolicyClient(subscriptionID string) SecurityAlertPolicyClient { + return NewSecurityAlertPolicyClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSecurityAlertPolicyClientWithBaseURI creates an instance of the +// SecurityAlertPolicyClient client. +func NewSecurityAlertPolicyClientWithBaseURI(baseURI string, subscriptionID string) SecurityAlertPolicyClient { + return SecurityAlertPolicyClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an Azure SQL Server security alert +// policy. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// parameters is the required parameters for creating or updating a Azure SQL +// Server security alert policy. resourceGroupName is the name of the +// Resource Group to which the server belongs. serverName is the name of the +// Azure SQL Server. +func (client SecurityAlertPolicyClient) CreateOrUpdate(parameters ServerSecurityAlertPolicyCreateOrUpdateParameters, resourceGroupName string, serverName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(parameters, resourceGroupName, serverName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "sql.SecurityAlertPolicyClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "sql.SecurityAlertPolicyClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "sql.SecurityAlertPolicyClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SecurityAlertPolicyClient) CreateOrUpdatePreparer(parameters ServerSecurityAlertPolicyCreateOrUpdateParameters, resourceGroupName string, serverName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serverName": autorest.Encode("path", serverName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/securityAlertPolicies/Default", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityAlertPolicyClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SecurityAlertPolicyClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns an Azure SQL Server security alert policy. +// +// resourceGroupName is the name of the Resource Group to which the server +// belongs. serverName is the name of the Azure SQL Server. +func (client SecurityAlertPolicyClient) Get(resourceGroupName string, serverName string) (result ServerSecurityAlertPolicy, err error) { + req, err := client.GetPreparer(resourceGroupName, serverName) + if err != nil { + return result, autorest.NewErrorWithError(err, "sql.SecurityAlertPolicyClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "sql.SecurityAlertPolicyClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "sql.SecurityAlertPolicyClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SecurityAlertPolicyClient) GetPreparer(resourceGroupName string, serverName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "serverName": autorest.Encode("path", serverName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/securityAlertPolicies/Default", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityAlertPolicyClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SecurityAlertPolicyClient) GetResponder(resp *http.Response) (result ServerSecurityAlertPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/sql/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/sql/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/sql/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/sql/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package sql + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "sql", "2015-05-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // AccountsClient is the the Storage Management Client. @@ -45,21 +45,21 @@ // accountName is the name of the storage account within the specified // resource group. Storage account names must be between 3 and 24 characters // in length and use numbers and lower-case letters only. -func (client AccountsClient) CheckNameAvailability(accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, ae error) { +func (client AccountsClient) CheckNameAvailability(accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) { req, err := client.CheckNameAvailabilityPreparer(accountName) if err != nil { - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "CheckNameAvailability", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request") } resp, err := client.CheckNameAvailabilitySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "CheckNameAvailability", "Failure sending request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request") } result, err = client.CheckNameAvailabilityResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "CheckNameAvailability", "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request") } return @@ -68,27 +68,27 @@ // CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. func (client AccountsClient) CheckNameAvailabilityPreparer(accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability", pathParameters), autorest.WithJSON(accountName), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always @@ -97,7 +97,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -105,74 +105,78 @@ } // Create asynchronously creates a new storage account with the specified -// parameters. Existing accounts cannot be updated with this API and should -// instead use the Update Storage Account API. If an account is already -// created and subsequent PUT request is issued with exact same set of -// properties, then HTTP 200 would be returned. +// parameters. If an account is already created and subsequent create request +// is issued with different properties, the account properties will be +// updated. If an account is already created and subsequent create or update +// request is issued with exact same set of properties, the request will +// succeed. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group within the user's // subscription. accountName is the name of the storage account within the // specified resource group. Storage account names must be between 3 and 24 // characters in length and use numbers and lower-case letters only. // parameters is the parameters to provide for the created account. -func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters) (result Account, ae error) { - req, err := client.CreatePreparer(resourceGroupName, accountName, parameters) +func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreatePreparer(resourceGroupName, accountName, parameters, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Create", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request") } resp, err := client.CreateSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Create", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure sending request") } result, err = client.CreateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "Create", "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure responding to request") } return } // CreatePreparer prepares the Create request. -func (client AccountsClient) CreatePreparer(resourceGroupName string, accountName string, parameters AccountCreateParameters) (*http.Request, error) { +func (client AccountsClient) CreatePreparer(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "accountName": url.QueryEscape(accountName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) CreateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateResponder handles the response to the Create request. The method always // closes the http.Response Body. -func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) { +func (client AccountsClient) CreateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } @@ -182,21 +186,21 @@ // subscription. accountName is the name of the storage account within the // specified resource group. Storage account names must be between 3 and 24 // characters in length and use numbers and lower-case letters only. -func (client AccountsClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, ae error) { +func (client AccountsClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { req, err := client.DeletePreparer(resourceGroupName, accountName) if err != nil { - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Delete", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request") } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Delete", "Failure sending request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request") } result, err = client.DeleteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "Delete", "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure responding to request") } return @@ -205,28 +209,27 @@ // DeletePreparer prepares the Delete request. func (client AccountsClient) DeletePreparer(resourceGroupName string, accountName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "accountName": url.QueryEscape(accountName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) + return autorest.SendWithSender(client, req) } // DeleteResponder handles the response to the Delete request. The method always @@ -235,7 +238,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -249,21 +252,21 @@ // subscription. accountName is the name of the storage account within the // specified resource group. Storage account names must be between 3 and 24 // characters in length and use numbers and lower-case letters only. -func (client AccountsClient) GetProperties(resourceGroupName string, accountName string) (result Account, ae error) { +func (client AccountsClient) GetProperties(resourceGroupName string, accountName string) (result Account, err error) { req, err := client.GetPropertiesPreparer(resourceGroupName, accountName) if err != nil { - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "GetProperties", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request") } resp, err := client.GetPropertiesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "GetProperties", "Failure sending request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request") } result, err = client.GetPropertiesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "GetProperties", "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure responding to request") } return @@ -272,28 +275,27 @@ // GetPropertiesPreparer prepares the GetProperties request. func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, accountName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "accountName": url.QueryEscape(accountName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetPropertiesSender sends the GetProperties request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetPropertiesResponder handles the response to the GetProperties request. The method always @@ -302,7 +304,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -311,21 +313,21 @@ // List lists all the storage accounts available under the subscription. Note // that storage keys are not returned; use the ListKeys operation for this. -func (client AccountsClient) List() (result AccountListResult, ae error) { +func (client AccountsClient) List() (result AccountListResult, err error) { req, err := client.ListPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure responding to request") } return @@ -334,26 +336,25 @@ // ListPreparer prepares the List request. func (client AccountsClient) ListPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -362,7 +363,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -375,21 +376,21 @@ // // resourceGroupName is the name of the resource group within the user's // subscription. -func (client AccountsClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, ae error) { +func (client AccountsClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, err error) { req, err := client.ListByResourceGroupPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListByResourceGroup", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") } resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListByResourceGroup", "Failure sending request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") } result, err = client.ListByResourceGroupResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "ListByResourceGroup", "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request") } return @@ -398,27 +399,26 @@ // ListByResourceGroupPreparer prepares the ListByResourceGroup request. func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -427,7 +427,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -438,21 +438,21 @@ // // resourceGroupName is the name of the resource group. accountName is the // name of the storage account. -func (client AccountsClient) ListKeys(resourceGroupName string, accountName string) (result AccountKeys, ae error) { +func (client AccountsClient) ListKeys(resourceGroupName string, accountName string) (result AccountListKeysResult, err error) { req, err := client.ListKeysPreparer(resourceGroupName, accountName) if err != nil { - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListKeys", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request") } resp, err := client.ListKeysSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListKeys", "Failure sending request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request") } result, err = client.ListKeysResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "ListKeys", "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure responding to request") } return @@ -461,37 +461,36 @@ // ListKeysPreparer prepares the ListKeys request. func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "accountName": url.QueryEscape(accountName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListKeysSender sends the ListKeys request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListKeysResponder handles the response to the ListKeys request. The method always // closes the http.Response Body. -func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountKeys, err error) { +func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountListKeysResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -506,21 +505,21 @@ // characters in length and use numbers and lower-case letters only. // regenerateKey is specifies name of the key which should be regenerated. // key1 or key2 for the default keys -func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountKeys, ae error) { +func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) { req, err := client.RegenerateKeyPreparer(resourceGroupName, accountName, regenerateKey) if err != nil { - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "RegenerateKey", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request") } resp, err := client.RegenerateKeySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "RegenerateKey", "Failure sending request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request") } result, err = client.RegenerateKeyResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "RegenerateKey", "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure responding to request") } return @@ -529,78 +528,75 @@ // RegenerateKeyPreparer prepares the RegenerateKey request. func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ - "accountName": url.QueryEscape(accountName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey", pathParameters), autorest.WithJSON(regenerateKey), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // RegenerateKeySender sends the RegenerateKey request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // RegenerateKeyResponder handles the response to the RegenerateKey request. The method always // closes the http.Response Body. -func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountKeys, err error) { +func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountListKeysResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// Update updates the account type or tags for a storage account. It can also -// be used to add a custom domain (note that custom domains cannot be added -// via the Create operation). Only one custom domain is supported per storage -// account. In order to replace a custom domain, the old value must be -// cleared before a new value may be set. To clear a custom domain, simply -// update the custom domain with empty string. Then call update again with -// the new cutsom domain name. The update API can only be used to update one -// of tags, accountType, or customDomain per call. To update multiple of -// these properties, call the API multiple times with one change per call. -// This call does not change the storage keys for the account. If you want to -// change storage account keys, use the RegenerateKey operation. The location -// and name of the storage account cannot be changed after creation. +// Update the update operation can be used to update the account type, +// encryption, or tags for a storage account. It can also be used to map the +// account to a custom domain. Only one custom domain is supported per +// storage account and. replacement/change of custom domain is not supported. +// In order to replace an old custom domain, the old value must be +// cleared/unregistered before a new value may be set. Update of multiple +// properties is supported. This call does not change the storage keys for +// the account. If you want to change storage account keys, use the +// regenerate keys operation. The location and name of the storage account +// cannot be changed after creation. // // resourceGroupName is the name of the resource group within the user's // subscription. accountName is the name of the storage account within the // specified resource group. Storage account names must be between 3 and 24 // characters in length and use numbers and lower-case letters only. -// parameters is the parameters to update on the account. Note that only one -// property can be changed at a time using this API. -func (client AccountsClient) Update(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, ae error) { +// parameters is the parameters to provide for the updated account. +func (client AccountsClient) Update(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) { req, err := client.UpdatePreparer(resourceGroupName, accountName, parameters) if err != nil { - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Update", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request") } resp, err := client.UpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Update", "Failure sending request") + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request") } result, err = client.UpdateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "Update", "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure responding to request") } return @@ -609,29 +605,29 @@ // UpdatePreparer prepares the Update request. func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ - "accountName": url.QueryEscape(accountName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateResponder handles the response to the Update request. The method always @@ -640,7 +636,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/storage/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/storage/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/storage/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/storage/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -1,3 +1,7 @@ +// Package storage implements the Azure ARM Storage service API version +// 2016-01-01. +// +// The Storage Management Client. package storage // Copyright (c) Microsoft and contributors. All rights reserved. @@ -14,26 +18,27 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" ) const ( // APIVersion is the version of the Storage - APIVersion = "2015-06-15" + APIVersion = "2016-01-01" // DefaultBaseURI is the default URI used for the service Storage DefaultBaseURI = "https://management.azure.com" ) -// ManagementClient is the the Storage Management Client. +// ManagementClient is the base client for Storage. type ManagementClient struct { autorest.Client BaseURI string + APIVersion string SubscriptionID string } @@ -47,6 +52,7 @@ return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, + APIVersion: APIVersion, SubscriptionID: subscriptionID, } } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/storage/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/storage/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/storage/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/storage/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,13 +14,23 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" +) + +// AccessTier enumerates the values for access tier. +type AccessTier string + +const ( + // Cool specifies the cool state for access tier. + Cool AccessTier = "Cool" + // Hot specifies the hot state for access tier. + Hot AccessTier = "Hot" ) // AccountStatus enumerates the values for account status. @@ -33,20 +43,24 @@ Unavailable AccountStatus = "Unavailable" ) -// AccountType enumerates the values for account type. -type AccountType string +// KeyPermission enumerates the values for key permission. +type KeyPermission string const ( - // PremiumLRS specifies the premium lrs state for account type. - PremiumLRS AccountType = "Premium_LRS" - // StandardGRS specifies the standard grs state for account type. - StandardGRS AccountType = "Standard_GRS" - // StandardLRS specifies the standard lrs state for account type. - StandardLRS AccountType = "Standard_LRS" - // StandardRAGRS specifies the standard ragrs state for account type. - StandardRAGRS AccountType = "Standard_RAGRS" - // StandardZRS specifies the standard zrs state for account type. - StandardZRS AccountType = "Standard_ZRS" + // FULL specifies the full state for key permission. + FULL KeyPermission = "FULL" + // READ specifies the read state for key permission. + READ KeyPermission = "READ" +) + +// Kind enumerates the values for kind. +type Kind string + +const ( + // BlobStorage specifies the blob storage state for kind. + BlobStorage Kind = "BlobStorage" + // Storage specifies the storage state for kind. + Storage Kind = "Storage" ) // ProvisioningState enumerates the values for provisioning state. @@ -71,6 +85,32 @@ AlreadyExists Reason = "AlreadyExists" ) +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // PremiumLRS specifies the premium lrs state for sku name. + PremiumLRS SkuName = "Premium_LRS" + // StandardGRS specifies the standard grs state for sku name. + StandardGRS SkuName = "Standard_GRS" + // StandardLRS specifies the standard lrs state for sku name. + StandardLRS SkuName = "Standard_LRS" + // StandardRAGRS specifies the standard ragrs state for sku name. + StandardRAGRS SkuName = "Standard_RAGRS" + // StandardZRS specifies the standard zrs state for sku name. + StandardZRS SkuName = "Standard_ZRS" +) + +// SkuTier enumerates the values for sku tier. +type SkuTier string + +const ( + // Premium specifies the premium state for sku tier. + Premium SkuTier = "Premium" + // Standard specifies the standard state for sku tier. + Standard SkuTier = "Standard" +) + // UsageUnit enumerates the values for usage unit. type UsageUnit string @@ -97,6 +137,8 @@ Type *string `json:"type,omitempty"` Location *string `json:"location,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Kind Kind `json:"kind,omitempty"` Properties *AccountProperties `json:"properties,omitempty"` } @@ -108,19 +150,24 @@ // AccountCreateParameters is the parameters to provide for the account. type AccountCreateParameters struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Kind Kind `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` Properties *AccountPropertiesCreateParameters `json:"properties,omitempty"` } -// AccountKeys is the access keys for the storage account. -type AccountKeys struct { +// AccountKey is an access key for the storage account. +type AccountKey struct { + KeyName *string `json:"keyName,omitempty"` + Value *string `json:"value,omitempty"` + Permissions KeyPermission `json:"permissions,omitempty"` +} + +// AccountListKeysResult is the ListKeys operation response. +type AccountListKeysResult struct { autorest.Response `json:"-"` - Key1 *string `json:"key1,omitempty"` - Key2 *string `json:"key2,omitempty"` + Keys *[]AccountKey `json:"keys,omitempty"` } // AccountListResult is the list storage accounts operation response. @@ -132,7 +179,6 @@ // AccountProperties is type AccountProperties struct { ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` - AccountType AccountType `json:"accountType,omitempty"` PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` PrimaryLocation *string `json:"primaryLocation,omitempty"` StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` @@ -142,17 +188,22 @@ CreationTime *date.Time `json:"creationTime,omitempty"` CustomDomain *CustomDomain `json:"customDomain,omitempty"` SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` } // AccountPropertiesCreateParameters is type AccountPropertiesCreateParameters struct { - AccountType AccountType `json:"accountType,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` } // AccountPropertiesUpdateParameters is type AccountPropertiesUpdateParameters struct { - AccountType AccountType `json:"accountType,omitempty"` CustomDomain *CustomDomain `json:"customDomain,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` } // AccountRegenerateKeyParameters is @@ -160,12 +211,9 @@ KeyName *string `json:"keyName,omitempty"` } -// AccountUpdateParameters is the parameters to update on the account. +// AccountUpdateParameters is the parameters to provide for the account. type AccountUpdateParameters struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` + Sku *Sku `json:"sku,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` Properties *AccountPropertiesUpdateParameters `json:"properties,omitempty"` } @@ -185,6 +233,23 @@ UseSubDomain *bool `json:"useSubDomain,omitempty"` } +// Encryption is the encryption settings on the account. +type Encryption struct { + Services *EncryptionServices `json:"services,omitempty"` + KeySource *string `json:"keySource,omitempty"` +} + +// EncryptionService is an encrypted service. +type EncryptionService struct { + Enabled *bool `json:"enabled,omitempty"` + LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` +} + +// EncryptionServices is the encrypted services. +type EncryptionServices struct { + Blob *EncryptionService `json:"blob,omitempty"` +} + // Endpoints is the URIs that are used to perform a retrieval of a public // blob, queue or table object. type Endpoints struct { @@ -203,16 +268,17 @@ Tags *map[string]*string `json:"tags,omitempty"` } -// SubResource is -type SubResource struct { - ID *string `json:"id,omitempty"` +// Sku is the SKU of the storage account. +type Sku struct { + Name SkuName `json:"name,omitempty"` + Tier SkuTier `json:"tier,omitempty"` } // Usage is describes Storage Resource Usage. type Usage struct { Unit UsageUnit `json:"unit,omitempty"` - CurrentValue *int `json:"currentValue,omitempty"` - Limit *int `json:"limit,omitempty"` + CurrentValue *int32 `json:"currentValue,omitempty"` + Limit *int32 `json:"limit,omitempty"` Name *UsageName `json:"name,omitempty"` } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // UsageOperationsClient is the the Storage Management Client. @@ -43,50 +43,48 @@ // List gets the current usage count and the limit for the resources under the // subscription. -// -func (client UsageOperationsClient) List(apiVersion string) (result UsageListResult, ae error) { - req, err := client.ListPreparer(apiVersion) +func (client UsageOperationsClient) List() (result UsageListResult, err error) { + req, err := client.ListPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "storage/UsageOperationsClient", "List", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "storage.UsageOperationsClient", "List", nil, "Failure preparing request") } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage/UsageOperationsClient", "List", "Failure sending request") + return result, autorest.NewErrorWithError(err, "storage.UsageOperationsClient", "List", resp, "Failure sending request") } result, err = client.ListResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "storage/UsageOperationsClient", "List", "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.UsageOperationsClient", "List", resp, "Failure responding to request") } return } // ListPreparer prepares the List request. -func (client UsageOperationsClient) ListPreparer(apiVersion string) (*http.Request, error) { +func (client UsageOperationsClient) ListPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always @@ -95,7 +93,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/storage/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/storage/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/storage/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/storage/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -23,18 +23,18 @@ ) const ( - major = "0" - minor = "3" + major = "3" + minor = "1" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" ) // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "storage", "2015-06-15") + return fmt.Sprintf(userAgentFormat, Version(), "storage", "2016-01-01") } // Version returns the semantic version (see http://semver.org) of the client. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/client.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,259 +0,0 @@ -package subscriptions - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -const ( - // APIVersion is the version of the Subscriptions - APIVersion = "2014-04-01-preview" - - // DefaultBaseURI is the default URI used for the service Subscriptions - DefaultBaseURI = "https://management.azure.com" -) - -// ManagementClient is the base client for Subscriptions. -type ManagementClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the ManagementClient client. -func New(subscriptionID string) ManagementClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the ManagementClient client. -func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { - return ManagementClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} - -// Get gets details about particular subscription. -// -// subscriptionID is id of the subscription. -func (client ManagementClient) Get(subscriptionID string) (result Subscription, ae error) { - req, err := client.GetPreparer(subscriptionID) - if err != nil { - return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client ManagementClient) GetPreparer(subscriptionID string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(subscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ManagementClient) GetResponder(resp *http.Response) (result Subscription, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of the subscriptionIds. -func (client ManagementClient) List() (result SubscriptionListResult, ae error) { - req, err := client.ListPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client ManagementClient) ListPreparer() (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client ManagementClient) ListResponder(resp *http.Response) (result SubscriptionListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client ManagementClient) ListNextResults(lastResults SubscriptionListResult) (result SubscriptionListResult, ae error) { - req, err := lastResults.SubscriptionListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure responding to next results request request") - } - - return -} - -// ListLocations gets a list of the subscription locations. -// -// subscriptionID is id of the subscription -func (client ManagementClient) ListLocations(subscriptionID string) (result LocationListResult, ae error) { - req, err := client.ListLocationsPreparer(subscriptionID) - if err != nil { - return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "ListLocations", "Failure preparing request") - } - - resp, err := client.ListLocationsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "ListLocations", "Failure sending request") - } - - result, err = client.ListLocationsResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "ListLocations", "Failure responding to request") - } - - return -} - -// ListLocationsPreparer prepares the ListLocations request. -func (client ManagementClient) ListLocationsPreparer(subscriptionID string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(subscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/locations"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListLocationsSender sends the ListLocations request. The method will close the -// http.Response Body if it receives an error. -func (client ManagementClient) ListLocationsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListLocationsResponder handles the response to the ListLocations request. The method always -// closes the http.Response Body. -func (client ManagementClient) ListLocationsResponder(resp *http.Response) (result LocationListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/models.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -package subscriptions - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" - "net/http" -) - -// DeploymentExtendedFilter is deployment filter. -type DeploymentExtendedFilter struct { - ProvisioningState *string `json:"provisioningState,omitempty"` -} - -// GenericResourceFilter is resource filter. -type GenericResourceFilter struct { - ResourceType *string `json:"resourceType,omitempty"` - Tagname *string `json:"tagname,omitempty"` - Tagvalue *string `json:"tagvalue,omitempty"` -} - -// Location is location information. -type Location struct { - ID *string `json:"id,omitempty"` - SubscriptionID *string `json:"subscriptionId,omitempty"` - Name *string `json:"name,omitempty"` - DisplayName *string `json:"displayName,omitempty"` - Latitude *string `json:"latitude,omitempty"` - Longitude *string `json:"longitude,omitempty"` -} - -// LocationListResult is location list operation response. -type LocationListResult struct { - autorest.Response `json:"-"` - Value *[]Location `json:"value,omitempty"` -} - -// Resource is -type Resource struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Type *string `json:"type,omitempty"` - Location *string `json:"location,omitempty"` - Tags *map[string]*string `json:"tags,omitempty"` -} - -// ResourceGroupFilter is resource group filter. -type ResourceGroupFilter struct { - TagName *string `json:"tagName,omitempty"` - TagValue *string `json:"tagValue,omitempty"` -} - -// SubResource is -type SubResource struct { - ID *string `json:"id,omitempty"` -} - -// Subscription is subscription information. -type Subscription struct { - autorest.Response `json:"-"` - ID *string `json:"id,omitempty"` - SubscriptionID *string `json:"subscriptionId,omitempty"` - DisplayName *string `json:"displayName,omitempty"` - State *string `json:"state,omitempty"` -} - -// SubscriptionListResult is subscription list operation response. -type SubscriptionListResult struct { - autorest.Response `json:"-"` - Value *[]Subscription `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` -} - -// SubscriptionListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client SubscriptionListResult) SubscriptionListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) -} - -// TenantIDDescription is tenant Id information -type TenantIDDescription struct { - ID *string `json:"id,omitempty"` - TenantID *string `json:"tenantId,omitempty"` -} - -// TenantListResult is tenant Ids information. -type TenantListResult struct { - autorest.Response `json:"-"` - Value *[]TenantIDDescription `json:"value,omitempty"` - NextLink *string `json:"nextLink,omitempty"` -} - -// TenantListResultPreparer prepares a request to retrieve the next set of results. It returns -// nil if no more results exist. -func (client TenantListResult) TenantListResultPreparer() (*http.Request, error) { - if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { - return nil, nil - } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(client.NextLink))) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/subscriptions.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/subscriptions.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/subscriptions.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/subscriptions.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,246 +0,0 @@ -package subscriptions - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// Client is the client for the Subscriptions methods of the Subscriptions -// service. -type Client struct { - ManagementClient -} - -// NewClient creates an instance of the Client client. -func NewClient(subscriptionID string) Client { - return NewClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewClientWithBaseURI creates an instance of the Client client. -func NewClientWithBaseURI(baseURI string, subscriptionID string) Client { - return Client{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get gets details about particular subscription. -// -// subscriptionID is id of the subscription. -func (client Client) Get(subscriptionID string) (result Subscription, ae error) { - req, err := client.GetPreparer(subscriptionID) - if err != nil { - return result, autorest.NewErrorWithError(err, "subscriptions/Client", "Get", "Failure preparing request") - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "subscriptions/Client", "Get", "Failure sending request") - } - - result, err = client.GetResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "subscriptions/Client", "Get", "Failure responding to request") - } - - return -} - -// GetPreparer prepares the Get request. -func (client Client) GetPreparer(subscriptionID string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(subscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client Client) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client Client) GetResponder(resp *http.Response) (result Subscription, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets a list of the subscriptionIds. -func (client Client) List() (result SubscriptionListResult, ae error) { - req, err := client.ListPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "subscriptions/Client", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "subscriptions/Client", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "subscriptions/Client", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client Client) ListPreparer() (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client Client) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client Client) ListResponder(resp *http.Response) (result SubscriptionListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client Client) ListNextResults(lastResults SubscriptionListResult) (result SubscriptionListResult, ae error) { - req, err := lastResults.SubscriptionListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "subscriptions/Client", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "subscriptions/Client", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "subscriptions/Client", "List", "Failure responding to next results request request") - } - - return -} - -// ListLocations gets a list of the subscription locations. -// -// subscriptionID is id of the subscription -func (client Client) ListLocations(subscriptionID string) (result LocationListResult, ae error) { - req, err := client.ListLocationsPreparer(subscriptionID) - if err != nil { - return result, autorest.NewErrorWithError(err, "subscriptions/Client", "ListLocations", "Failure preparing request") - } - - resp, err := client.ListLocationsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "subscriptions/Client", "ListLocations", "Failure sending request") - } - - result, err = client.ListLocationsResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "subscriptions/Client", "ListLocations", "Failure responding to request") - } - - return -} - -// ListLocationsPreparer prepares the ListLocations request. -func (client Client) ListLocationsPreparer(subscriptionID string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(subscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/locations"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListLocationsSender sends the ListLocations request. The method will close the -// http.Response Body if it receives an error. -func (client Client) ListLocationsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListLocationsResponder handles the response to the ListLocations request. The method always -// closes the http.Response Body. -func (client Client) ListLocationsResponder(resp *http.Response) (result LocationListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/tenants.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/tenants.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/tenants.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/tenants.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,124 +0,0 @@ -package subscriptions - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "net/http" - "net/url" -) - -// TenantsClient is the client for the Tenants methods of the Subscriptions -// service. -type TenantsClient struct { - ManagementClient -} - -// NewTenantsClient creates an instance of the TenantsClient client. -func NewTenantsClient(subscriptionID string) TenantsClient { - return NewTenantsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewTenantsClientWithBaseURI creates an instance of the TenantsClient client. -func NewTenantsClientWithBaseURI(baseURI string, subscriptionID string) TenantsClient { - return TenantsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List gets a list of the tenantIds. -func (client TenantsClient) List() (result TenantListResult, ae error) { - req, err := client.ListPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure preparing request") - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure sending request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure responding to request") - } - - return -} - -// ListPreparer prepares the List request. -func (client TenantsClient) ListPreparer() (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/tenants"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client TenantsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client TenantsClient) ListResponder(resp *http.Response) (result TenantListResult, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNextResults retrieves the next set of results, if any. -func (client TenantsClient) ListNextResults(lastResults TenantListResult) (result TenantListResult, ae error) { - req, err := lastResults.TenantListResultPreparer() - if err != nil { - return result, autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure preparing next results request request") - } - if req == nil { - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure sending next results request request") - } - - result, err = client.ListResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure responding to next results request request") - } - - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/version.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -package subscriptions - -// Copyright (c) Microsoft and contributors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. - -import ( - "fmt" -) - -const ( - major = "0" - minor = "3" - patch = "0" - // Always begin a "tag" with a dash (as per http://semver.org) - tag = "-beta" - semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" -) - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "subscriptions", "2014-04-01-preview") -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return fmt.Sprintf(semVerFormat, major, minor, patch, tag) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,57 @@ +// Package trafficmanager implements the Azure ARM Trafficmanager service API +// version 2015-11-01. +// +package trafficmanager + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Trafficmanager + APIVersion = "2015-11-01" + + // DefaultBaseURI is the default URI used for the service Trafficmanager + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Trafficmanager. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,318 @@ +package trafficmanager + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// EndpointsClient is the client for the Endpoints methods of the +// Trafficmanager service. +type EndpointsClient struct { + ManagementClient +} + +// NewEndpointsClient creates an instance of the EndpointsClient client. +func NewEndpointsClient(subscriptionID string) EndpointsClient { + return NewEndpointsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewEndpointsClientWithBaseURI creates an instance of the EndpointsClient +// client. +func NewEndpointsClientWithBaseURI(baseURI string, subscriptionID string) EndpointsClient { + return EndpointsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a Traffic Manager endpoint. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager endpoint to be created or updated. profileName is the name of the +// Traffic Manager profile. endpointType is the type of the Traffic Manager +// endpoint to be created or updated. endpointName is the name of the Traffic +// Manager endpoint to be created or updated. parameters is the Traffic +// Manager endpoint parameters supplied to the CreateOrUpdate operation. +func (client EndpointsClient) CreateOrUpdate(resourceGroupName string, profileName string, endpointType string, endpointName string, parameters Endpoint) (result Endpoint, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, profileName, endpointType, endpointName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client EndpointsClient) CreateOrUpdatePreparer(resourceGroupName string, profileName string, endpointType string, endpointName string, parameters Endpoint) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "endpointType": autorest.Encode("path", endpointType), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}/{endpointType}/{endpointName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client EndpointsClient) CreateOrUpdateResponder(resp *http.Response) (result Endpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Traffic Manager endpoint. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager endpoint to be deleted. profileName is the name of the Traffic +// Manager profile. endpointType is the type of the Traffic Manager endpoint +// to be deleted. endpointName is the name of the Traffic Manager endpoint to +// be deleted. +func (client EndpointsClient) Delete(resourceGroupName string, profileName string, endpointType string, endpointName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, profileName, endpointType, endpointName) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client EndpointsClient) DeletePreparer(resourceGroupName string, profileName string, endpointType string, endpointName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "endpointType": autorest.Encode("path", endpointType), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}/{endpointType}/{endpointName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client EndpointsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a Traffic Manager endpoint. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager endpoint. profileName is the name of the Traffic Manager profile. +// endpointType is the type of the Traffic Manager endpoint. endpointName is +// the name of the Traffic Manager endpoint. +func (client EndpointsClient) Get(resourceGroupName string, profileName string, endpointType string, endpointName string) (result Endpoint, err error) { + req, err := client.GetPreparer(resourceGroupName, profileName, endpointType, endpointName) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client EndpointsClient) GetPreparer(resourceGroupName string, profileName string, endpointType string, endpointName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "endpointType": autorest.Encode("path", endpointType), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}/{endpointType}/{endpointName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client EndpointsClient) GetResponder(resp *http.Response) (result Endpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update update a Traffic Manager endpoint. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager endpoint to be updated. profileName is the name of the Traffic +// Manager profile. endpointType is the type of the Traffic Manager endpoint +// to be updated. endpointName is the name of the Traffic Manager endpoint to +// be updated. parameters is the Traffic Manager endpoint parameters supplied +// to the Update operation. +func (client EndpointsClient) Update(resourceGroupName string, profileName string, endpointType string, endpointName string, parameters Endpoint) (result Endpoint, err error) { + req, err := client.UpdatePreparer(resourceGroupName, profileName, endpointType, endpointName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client EndpointsClient) UpdatePreparer(resourceGroupName string, profileName string, endpointType string, endpointName string, parameters Endpoint) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "endpointType": autorest.Encode("path", endpointType), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}/{endpointType}/{endpointName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client EndpointsClient) UpdateResponder(resp *http.Response) (result Endpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,120 @@ +package trafficmanager + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// CheckTrafficManagerRelativeDNSNameAvailabilityParameters is parameters +// supplied to check Traffic Manager name operation. +type CheckTrafficManagerRelativeDNSNameAvailabilityParameters struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// DNSConfig is class containing DNS settings in a Traffic Manager profile. +type DNSConfig struct { + RelativeName *string `json:"relativeName,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` + TTL *int64 `json:"ttl,omitempty"` +} + +// Endpoint is class respresenting a Traffic Manager endpoint. +type Endpoint struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties *EndpointProperties `json:"properties,omitempty"` +} + +// EndpointProperties is class respresenting a Traffic Manager endpoint +// properties. +type EndpointProperties struct { + TargetResourceID *string `json:"targetResourceId,omitempty"` + Target *string `json:"target,omitempty"` + EndpointStatus *string `json:"endpointStatus,omitempty"` + Weight *int64 `json:"weight,omitempty"` + Priority *int64 `json:"priority,omitempty"` + EndpointLocation *string `json:"endpointLocation,omitempty"` + EndpointMonitorStatus *string `json:"endpointMonitorStatus,omitempty"` + MinChildEndpoints *int64 `json:"minChildEndpoints,omitempty"` +} + +// MonitorConfig is class containing endpoint monitoring settings in a Traffic +// Manager profile. +type MonitorConfig struct { + ProfileMonitorStatus *string `json:"profileMonitorStatus,omitempty"` + Protocol *string `json:"protocol,omitempty"` + Port *int64 `json:"port,omitempty"` + Path *string `json:"path,omitempty"` +} + +// NameAvailability is class representing a Traffic Manager Name Availability +// response. +type NameAvailability struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// Profile is class representing a Traffic Manager profile. +type Profile struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *ProfileProperties `json:"properties,omitempty"` +} + +// ProfileListResult is the list Traffic Manager profiles operation response. +type ProfileListResult struct { + autorest.Response `json:"-"` + Value *[]Profile `json:"value,omitempty"` +} + +// ProfileProperties is class representing the Traffic Manager profile +// properties. +type ProfileProperties struct { + ProfileStatus *string `json:"profileStatus,omitempty"` + TrafficRoutingMethod *string `json:"trafficRoutingMethod,omitempty"` + DNSConfig *DNSConfig `json:"dnsConfig,omitempty"` + MonitorConfig *MonitorConfig `json:"monitorConfig,omitempty"` + Endpoints *[]Endpoint `json:"endpoints,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,487 @@ +package trafficmanager + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ProfilesClient is the client for the Profiles methods of the Trafficmanager +// service. +type ProfilesClient struct { + ManagementClient +} + +// NewProfilesClient creates an instance of the ProfilesClient client. +func NewProfilesClient(subscriptionID string) ProfilesClient { + return NewProfilesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProfilesClientWithBaseURI creates an instance of the ProfilesClient +// client. +func NewProfilesClientWithBaseURI(baseURI string, subscriptionID string) ProfilesClient { + return ProfilesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckTrafficManagerRelativeDNSNameAvailability checks the availability of a +// Traffic Manager Relative DNS name. +// +// parameters is the Traffic Manager name parameters supplied to the +// CheckTrafficManagerNameAvailability operation. +func (client ProfilesClient) CheckTrafficManagerRelativeDNSNameAvailability(parameters CheckTrafficManagerRelativeDNSNameAvailabilityParameters) (result NameAvailability, err error) { + req, err := client.CheckTrafficManagerRelativeDNSNameAvailabilityPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CheckTrafficManagerRelativeDNSNameAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckTrafficManagerRelativeDNSNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CheckTrafficManagerRelativeDNSNameAvailability", resp, "Failure sending request") + } + + result, err = client.CheckTrafficManagerRelativeDNSNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CheckTrafficManagerRelativeDNSNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckTrafficManagerRelativeDNSNameAvailabilityPreparer prepares the CheckTrafficManagerRelativeDNSNameAvailability request. +func (client ProfilesClient) CheckTrafficManagerRelativeDNSNameAvailabilityPreparer(parameters CheckTrafficManagerRelativeDNSNameAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Network/checkTrafficManagerNameAvailability", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckTrafficManagerRelativeDNSNameAvailabilitySender sends the CheckTrafficManagerRelativeDNSNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) CheckTrafficManagerRelativeDNSNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckTrafficManagerRelativeDNSNameAvailabilityResponder handles the response to the CheckTrafficManagerRelativeDNSNameAvailability request. The method always +// closes the http.Response Body. +func (client ProfilesClient) CheckTrafficManagerRelativeDNSNameAvailabilityResponder(resp *http.Response) (result NameAvailability, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate create or update a Traffic Manager profile. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager profile. profileName is the name of the Traffic Manager profile. +// parameters is the Traffic Manager profile parameters supplied to the +// CreateOrUpdate operation. +func (client ProfilesClient) CreateOrUpdate(resourceGroupName string, profileName string, parameters Profile) (result Profile, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, profileName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ProfilesClient) CreateOrUpdatePreparer(resourceGroupName string, profileName string, parameters Profile) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ProfilesClient) CreateOrUpdateResponder(resp *http.Response) (result Profile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Traffic Manager profile. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager profile to be deleted. profileName is the name of the Traffic +// Manager profile to be deleted. +func (client ProfilesClient) Delete(resourceGroupName string, profileName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, profileName) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ProfilesClient) DeletePreparer(resourceGroupName string, profileName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ProfilesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a Traffic Manager profile. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager profile. profileName is the name of the Traffic Manager profile. +func (client ProfilesClient) Get(resourceGroupName string, profileName string) (result Profile, err error) { + req, err := client.GetPreparer(resourceGroupName, profileName) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ProfilesClient) GetPreparer(resourceGroupName string, profileName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ProfilesClient) GetResponder(resp *http.Response) (result Profile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAll lists all Traffic Manager profiles within a subscription. +func (client ProfilesClient) ListAll() (result ProfileListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ProfilesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficmanagerprofiles", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ProfilesClient) ListAllResponder(resp *http.Response) (result ProfileListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllInResourceGroup lists all Traffic Manager profiles within a resource +// group. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager profiles to be listed. +func (client ProfilesClient) ListAllInResourceGroup(resourceGroupName string) (result ProfileListResult, err error) { + req, err := client.ListAllInResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAllInResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListAllInResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAllInResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListAllInResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAllInResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListAllInResourceGroupPreparer prepares the ListAllInResourceGroup request. +func (client ProfilesClient) ListAllInResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllInResourceGroupSender sends the ListAllInResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) ListAllInResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllInResourceGroupResponder handles the response to the ListAllInResourceGroup request. The method always +// closes the http.Response Body. +func (client ProfilesClient) ListAllInResourceGroupResponder(resp *http.Response) (result ProfileListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update update a Traffic Manager profile. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager profile. profileName is the name of the Traffic Manager profile. +// parameters is the Traffic Manager profile parameters supplied to the +// Update operation. +func (client ProfilesClient) Update(resourceGroupName string, profileName string, parameters Profile) (result Profile, err error) { + req, err := client.UpdatePreparer(resourceGroupName, profileName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client ProfilesClient) UpdatePreparer(resourceGroupName string, profileName string, parameters Profile) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ProfilesClient) UpdateResponder(resp *http.Response) (result Profile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,43 @@ +package trafficmanager + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "1" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "trafficmanager", "2015-11-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/certificateorders.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/certificateorders.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/certificateorders.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/certificateorders.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // CertificateOrdersClient is the use these APIs to manage Azure Websites @@ -53,21 +53,21 @@ // resourceGroupName is azure resource group name certificateOrderName is // certificate name name is certificate name keyVaultCertificate is key Vault // secret csm Id -func (client CertificateOrdersClient) CreateOrUpdateCertificate(resourceGroupName string, certificateOrderName string, name string, keyVaultCertificate CertificateOrderCertificate) (result CertificateOrderCertificate, ae error) { +func (client CertificateOrdersClient) CreateOrUpdateCertificate(resourceGroupName string, certificateOrderName string, name string, keyVaultCertificate CertificateOrderCertificate) (result CertificateOrderCertificate, err error) { req, err := client.CreateOrUpdateCertificatePreparer(resourceGroupName, certificateOrderName, name, keyVaultCertificate) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "CreateOrUpdateCertificate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "CreateOrUpdateCertificate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateCertificateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "CreateOrUpdateCertificate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "CreateOrUpdateCertificate", resp, "Failure sending request") } result, err = client.CreateOrUpdateCertificateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "CreateOrUpdateCertificate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "CreateOrUpdateCertificate", resp, "Failure responding to request") } return @@ -76,30 +76,30 @@ // CreateOrUpdateCertificatePreparer prepares the CreateOrUpdateCertificate request. func (client CertificateOrdersClient) CreateOrUpdateCertificatePreparer(resourceGroupName string, certificateOrderName string, name string, keyVaultCertificate CertificateOrderCertificate) (*http.Request, error) { pathParameters := map[string]interface{}{ - "certificateOrderName": url.QueryEscape(certificateOrderName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "certificateOrderName": autorest.Encode("path", certificateOrderName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}", pathParameters), autorest.WithJSON(keyVaultCertificate), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateCertificateSender sends the CreateOrUpdateCertificate request. The method will close the // http.Response Body if it receives an error. func (client CertificateOrdersClient) CreateOrUpdateCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateCertificateResponder handles the response to the CreateOrUpdateCertificate request. The method always @@ -108,7 +108,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -121,21 +121,21 @@ // resourceGroupName is azure resource group name name is certificate name // certificateDistinguishedName is distinguished name to be used for // purchasing certificate -func (client CertificateOrdersClient) CreateOrUpdateCertificateOrder(resourceGroupName string, name string, certificateDistinguishedName CertificateOrder) (result CertificateOrder, ae error) { +func (client CertificateOrdersClient) CreateOrUpdateCertificateOrder(resourceGroupName string, name string, certificateDistinguishedName CertificateOrder) (result CertificateOrder, err error) { req, err := client.CreateOrUpdateCertificateOrderPreparer(resourceGroupName, name, certificateDistinguishedName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "CreateOrUpdateCertificateOrder", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "CreateOrUpdateCertificateOrder", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateCertificateOrderSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "CreateOrUpdateCertificateOrder", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "CreateOrUpdateCertificateOrder", resp, "Failure sending request") } result, err = client.CreateOrUpdateCertificateOrderResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "CreateOrUpdateCertificateOrder", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "CreateOrUpdateCertificateOrder", resp, "Failure responding to request") } return @@ -144,29 +144,29 @@ // CreateOrUpdateCertificateOrderPreparer prepares the CreateOrUpdateCertificateOrder request. func (client CertificateOrdersClient) CreateOrUpdateCertificateOrderPreparer(resourceGroupName string, name string, certificateDistinguishedName CertificateOrder) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}", pathParameters), autorest.WithJSON(certificateDistinguishedName), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateCertificateOrderSender sends the CreateOrUpdateCertificateOrder request. The method will close the // http.Response Body if it receives an error. func (client CertificateOrdersClient) CreateOrUpdateCertificateOrderSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateCertificateOrderResponder handles the response to the CreateOrUpdateCertificateOrder request. The method always @@ -175,7 +175,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -186,21 +186,21 @@ // // resourceGroupName is azure resource group name certificateOrderName is // certificate name name is certificate name -func (client CertificateOrdersClient) DeleteCertificate(resourceGroupName string, certificateOrderName string, name string) (result ObjectSet, ae error) { +func (client CertificateOrdersClient) DeleteCertificate(resourceGroupName string, certificateOrderName string, name string) (result SetObject, err error) { req, err := client.DeleteCertificatePreparer(resourceGroupName, certificateOrderName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "DeleteCertificate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "DeleteCertificate", nil, "Failure preparing request") } resp, err := client.DeleteCertificateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "DeleteCertificate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "DeleteCertificate", resp, "Failure sending request") } result, err = client.DeleteCertificateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "DeleteCertificate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "DeleteCertificate", resp, "Failure responding to request") } return @@ -209,38 +209,37 @@ // DeleteCertificatePreparer prepares the DeleteCertificate request. func (client CertificateOrdersClient) DeleteCertificatePreparer(resourceGroupName string, certificateOrderName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "certificateOrderName": url.QueryEscape(certificateOrderName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "certificateOrderName": autorest.Encode("path", certificateOrderName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteCertificateSender sends the DeleteCertificate request. The method will close the // http.Response Body if it receives an error. func (client CertificateOrdersClient) DeleteCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteCertificateResponder handles the response to the DeleteCertificate request. The method always // closes the http.Response Body. -func (client CertificateOrdersClient) DeleteCertificateResponder(resp *http.Response) (result ObjectSet, err error) { +func (client CertificateOrdersClient) DeleteCertificateResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -250,21 +249,21 @@ // DeleteCertificateOrder sends the delete certificate order request. // // resourceGroupName is azure resource group name name is certificate name -func (client CertificateOrdersClient) DeleteCertificateOrder(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client CertificateOrdersClient) DeleteCertificateOrder(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.DeleteCertificateOrderPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "DeleteCertificateOrder", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "DeleteCertificateOrder", nil, "Failure preparing request") } resp, err := client.DeleteCertificateOrderSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "DeleteCertificateOrder", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "DeleteCertificateOrder", resp, "Failure sending request") } result, err = client.DeleteCertificateOrderResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "DeleteCertificateOrder", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "DeleteCertificateOrder", resp, "Failure responding to request") } return @@ -273,37 +272,36 @@ // DeleteCertificateOrderPreparer prepares the DeleteCertificateOrder request. func (client CertificateOrdersClient) DeleteCertificateOrderPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteCertificateOrderSender sends the DeleteCertificateOrder request. The method will close the // http.Response Body if it receives an error. func (client CertificateOrdersClient) DeleteCertificateOrderSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteCertificateOrderResponder handles the response to the DeleteCertificateOrder request. The method always // closes the http.Response Body. -func (client CertificateOrdersClient) DeleteCertificateOrderResponder(resp *http.Response) (result ObjectSet, err error) { +func (client CertificateOrdersClient) DeleteCertificateOrderResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -314,21 +312,21 @@ // // resourceGroupName is azure resource group name certificateOrderName is // certificate name name is certificate name -func (client CertificateOrdersClient) GetCertificate(resourceGroupName string, certificateOrderName string, name string) (result CertificateOrderCertificate, ae error) { +func (client CertificateOrdersClient) GetCertificate(resourceGroupName string, certificateOrderName string, name string) (result CertificateOrderCertificate, err error) { req, err := client.GetCertificatePreparer(resourceGroupName, certificateOrderName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificate", nil, "Failure preparing request") } resp, err := client.GetCertificateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificate", resp, "Failure sending request") } result, err = client.GetCertificateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificate", resp, "Failure responding to request") } return @@ -337,29 +335,28 @@ // GetCertificatePreparer prepares the GetCertificate request. func (client CertificateOrdersClient) GetCertificatePreparer(resourceGroupName string, certificateOrderName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "certificateOrderName": url.QueryEscape(certificateOrderName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "certificateOrderName": autorest.Encode("path", certificateOrderName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetCertificateSender sends the GetCertificate request. The method will close the // http.Response Body if it receives an error. func (client CertificateOrdersClient) GetCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetCertificateResponder handles the response to the GetCertificate request. The method always @@ -368,7 +365,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -378,21 +375,21 @@ // GetCertificateOrder sends the get certificate order request. // // resourceGroupName is azure resource group name name is certificate name -func (client CertificateOrdersClient) GetCertificateOrder(resourceGroupName string, name string) (result CertificateOrder, ae error) { +func (client CertificateOrdersClient) GetCertificateOrder(resourceGroupName string, name string) (result CertificateOrder, err error) { req, err := client.GetCertificateOrderPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificateOrder", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificateOrder", nil, "Failure preparing request") } resp, err := client.GetCertificateOrderSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificateOrder", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificateOrder", resp, "Failure sending request") } result, err = client.GetCertificateOrderResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificateOrder", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificateOrder", resp, "Failure responding to request") } return @@ -401,28 +398,27 @@ // GetCertificateOrderPreparer prepares the GetCertificateOrder request. func (client CertificateOrdersClient) GetCertificateOrderPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetCertificateOrderSender sends the GetCertificateOrder request. The method will close the // http.Response Body if it receives an error. func (client CertificateOrdersClient) GetCertificateOrderSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetCertificateOrderResponder handles the response to the GetCertificateOrder request. The method always @@ -431,7 +427,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -441,21 +437,21 @@ // GetCertificateOrders sends the get certificate orders request. // // resourceGroupName is azure resource group name -func (client CertificateOrdersClient) GetCertificateOrders(resourceGroupName string) (result CertificateOrderCollection, ae error) { +func (client CertificateOrdersClient) GetCertificateOrders(resourceGroupName string) (result CertificateOrderCollection, err error) { req, err := client.GetCertificateOrdersPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificateOrders", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificateOrders", nil, "Failure preparing request") } resp, err := client.GetCertificateOrdersSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificateOrders", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificateOrders", resp, "Failure sending request") } result, err = client.GetCertificateOrdersResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificateOrders", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificateOrders", resp, "Failure responding to request") } return @@ -464,27 +460,26 @@ // GetCertificateOrdersPreparer prepares the GetCertificateOrders request. func (client CertificateOrdersClient) GetCertificateOrdersPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetCertificateOrdersSender sends the GetCertificateOrders request. The method will close the // http.Response Body if it receives an error. func (client CertificateOrdersClient) GetCertificateOrdersSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetCertificateOrdersResponder handles the response to the GetCertificateOrders request. The method always @@ -493,7 +488,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -504,21 +499,21 @@ // // resourceGroupName is azure resource group name certificateOrderName is // certificate name -func (client CertificateOrdersClient) GetCertificates(resourceGroupName string, certificateOrderName string) (result CertificateOrderCertificateCollection, ae error) { +func (client CertificateOrdersClient) GetCertificates(resourceGroupName string, certificateOrderName string) (result CertificateOrderCertificateCollection, err error) { req, err := client.GetCertificatesPreparer(resourceGroupName, certificateOrderName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificates", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificates", nil, "Failure preparing request") } resp, err := client.GetCertificatesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificates", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificates", resp, "Failure sending request") } result, err = client.GetCertificatesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "GetCertificates", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "GetCertificates", resp, "Failure responding to request") } return @@ -527,28 +522,27 @@ // GetCertificatesPreparer prepares the GetCertificates request. func (client CertificateOrdersClient) GetCertificatesPreparer(resourceGroupName string, certificateOrderName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "certificateOrderName": url.QueryEscape(certificateOrderName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "certificateOrderName": autorest.Encode("path", certificateOrderName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetCertificatesSender sends the GetCertificates request. The method will close the // http.Response Body if it receives an error. func (client CertificateOrdersClient) GetCertificatesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetCertificatesResponder handles the response to the GetCertificates request. The method always @@ -557,33 +551,353 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } +// ReissueCertificateOrder sends the reissue certificate order request. +// +// resourceGroupName is azure resource group name name is certificate name +// reissueCertificateOrderRequest is reissue parameters +func (client CertificateOrdersClient) ReissueCertificateOrder(resourceGroupName string, name string, reissueCertificateOrderRequest ReissueCertificateOrderRequest) (result SetObject, err error) { + req, err := client.ReissueCertificateOrderPreparer(resourceGroupName, name, reissueCertificateOrderRequest) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "ReissueCertificateOrder", nil, "Failure preparing request") + } + + resp, err := client.ReissueCertificateOrderSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "ReissueCertificateOrder", resp, "Failure sending request") + } + + result, err = client.ReissueCertificateOrderResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "ReissueCertificateOrder", resp, "Failure responding to request") + } + + return +} + +// ReissueCertificateOrderPreparer prepares the ReissueCertificateOrder request. +func (client CertificateOrdersClient) ReissueCertificateOrderPreparer(resourceGroupName string, name string, reissueCertificateOrderRequest ReissueCertificateOrderRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/reissue", pathParameters), + autorest.WithJSON(reissueCertificateOrderRequest), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ReissueCertificateOrderSender sends the ReissueCertificateOrder request. The method will close the +// http.Response Body if it receives an error. +func (client CertificateOrdersClient) ReissueCertificateOrderSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ReissueCertificateOrderResponder handles the response to the ReissueCertificateOrder request. The method always +// closes the http.Response Body. +func (client CertificateOrdersClient) ReissueCertificateOrderResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RenewCertificateOrder sends the renew certificate order request. +// +// resourceGroupName is azure resource group name name is certificate name +// renewCertificateOrderRequest is renew parameters +func (client CertificateOrdersClient) RenewCertificateOrder(resourceGroupName string, name string, renewCertificateOrderRequest RenewCertificateOrderRequest) (result SetObject, err error) { + req, err := client.RenewCertificateOrderPreparer(resourceGroupName, name, renewCertificateOrderRequest) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "RenewCertificateOrder", nil, "Failure preparing request") + } + + resp, err := client.RenewCertificateOrderSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "RenewCertificateOrder", resp, "Failure sending request") + } + + result, err = client.RenewCertificateOrderResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "RenewCertificateOrder", resp, "Failure responding to request") + } + + return +} + +// RenewCertificateOrderPreparer prepares the RenewCertificateOrder request. +func (client CertificateOrdersClient) RenewCertificateOrderPreparer(resourceGroupName string, name string, renewCertificateOrderRequest RenewCertificateOrderRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/renew", pathParameters), + autorest.WithJSON(renewCertificateOrderRequest), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RenewCertificateOrderSender sends the RenewCertificateOrder request. The method will close the +// http.Response Body if it receives an error. +func (client CertificateOrdersClient) RenewCertificateOrderSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RenewCertificateOrderResponder handles the response to the RenewCertificateOrder request. The method always +// closes the http.Response Body. +func (client CertificateOrdersClient) RenewCertificateOrderResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ResendCertificateEmail sends the resend certificate email request. +// +// resourceGroupName is azure resource group name name is certificate order +// name +func (client CertificateOrdersClient) ResendCertificateEmail(resourceGroupName string, name string) (result SetObject, err error) { + req, err := client.ResendCertificateEmailPreparer(resourceGroupName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "ResendCertificateEmail", nil, "Failure preparing request") + } + + resp, err := client.ResendCertificateEmailSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "ResendCertificateEmail", resp, "Failure sending request") + } + + result, err = client.ResendCertificateEmailResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "ResendCertificateEmail", resp, "Failure responding to request") + } + + return +} + +// ResendCertificateEmailPreparer prepares the ResendCertificateEmail request. +func (client CertificateOrdersClient) ResendCertificateEmailPreparer(resourceGroupName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/resendEmail", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ResendCertificateEmailSender sends the ResendCertificateEmail request. The method will close the +// http.Response Body if it receives an error. +func (client CertificateOrdersClient) ResendCertificateEmailSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ResendCertificateEmailResponder handles the response to the ResendCertificateEmail request. The method always +// closes the http.Response Body. +func (client CertificateOrdersClient) ResendCertificateEmailResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RetrieveCertificateActions sends the retrieve certificate actions request. +// +// resourceGroupName is azure resource group name name is certificate order +// name +func (client CertificateOrdersClient) RetrieveCertificateActions(resourceGroupName string, name string) (result ListCertificateOrderAction, err error) { + req, err := client.RetrieveCertificateActionsPreparer(resourceGroupName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "RetrieveCertificateActions", nil, "Failure preparing request") + } + + resp, err := client.RetrieveCertificateActionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "RetrieveCertificateActions", resp, "Failure sending request") + } + + result, err = client.RetrieveCertificateActionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "RetrieveCertificateActions", resp, "Failure responding to request") + } + + return +} + +// RetrieveCertificateActionsPreparer prepares the RetrieveCertificateActions request. +func (client CertificateOrdersClient) RetrieveCertificateActionsPreparer(resourceGroupName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveCertificateActions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RetrieveCertificateActionsSender sends the RetrieveCertificateActions request. The method will close the +// http.Response Body if it receives an error. +func (client CertificateOrdersClient) RetrieveCertificateActionsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RetrieveCertificateActionsResponder handles the response to the RetrieveCertificateActions request. The method always +// closes the http.Response Body. +func (client CertificateOrdersClient) RetrieveCertificateActionsResponder(resp *http.Response) (result ListCertificateOrderAction, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RetrieveCertificateEmailHistory sends the retrieve certificate email +// history request. +// +// resourceGroupName is azure resource group name name is certificate order +// name +func (client CertificateOrdersClient) RetrieveCertificateEmailHistory(resourceGroupName string, name string) (result ListCertificateEmail, err error) { + req, err := client.RetrieveCertificateEmailHistoryPreparer(resourceGroupName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "RetrieveCertificateEmailHistory", nil, "Failure preparing request") + } + + resp, err := client.RetrieveCertificateEmailHistorySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "RetrieveCertificateEmailHistory", resp, "Failure sending request") + } + + result, err = client.RetrieveCertificateEmailHistoryResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "RetrieveCertificateEmailHistory", resp, "Failure responding to request") + } + + return +} + +// RetrieveCertificateEmailHistoryPreparer prepares the RetrieveCertificateEmailHistory request. +func (client CertificateOrdersClient) RetrieveCertificateEmailHistoryPreparer(resourceGroupName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveEmailHistory", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RetrieveCertificateEmailHistorySender sends the RetrieveCertificateEmailHistory request. The method will close the +// http.Response Body if it receives an error. +func (client CertificateOrdersClient) RetrieveCertificateEmailHistorySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RetrieveCertificateEmailHistoryResponder handles the response to the RetrieveCertificateEmailHistory request. The method always +// closes the http.Response Body. +func (client CertificateOrdersClient) RetrieveCertificateEmailHistoryResponder(resp *http.Response) (result ListCertificateEmail, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // UpdateCertificate sends the update certificate request. // // resourceGroupName is azure resource group name certificateOrderName is // certificate name name is certificate name keyVaultCertificate is key Vault // secret csm Id -func (client CertificateOrdersClient) UpdateCertificate(resourceGroupName string, certificateOrderName string, name string, keyVaultCertificate CertificateOrderCertificate) (result CertificateOrderCertificate, ae error) { +func (client CertificateOrdersClient) UpdateCertificate(resourceGroupName string, certificateOrderName string, name string, keyVaultCertificate CertificateOrderCertificate) (result CertificateOrderCertificate, err error) { req, err := client.UpdateCertificatePreparer(resourceGroupName, certificateOrderName, name, keyVaultCertificate) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "UpdateCertificate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "UpdateCertificate", nil, "Failure preparing request") } resp, err := client.UpdateCertificateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "UpdateCertificate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "UpdateCertificate", resp, "Failure sending request") } result, err = client.UpdateCertificateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "UpdateCertificate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "UpdateCertificate", resp, "Failure responding to request") } return @@ -592,30 +906,30 @@ // UpdateCertificatePreparer prepares the UpdateCertificate request. func (client CertificateOrdersClient) UpdateCertificatePreparer(resourceGroupName string, certificateOrderName string, name string, keyVaultCertificate CertificateOrderCertificate) (*http.Request, error) { pathParameters := map[string]interface{}{ - "certificateOrderName": url.QueryEscape(certificateOrderName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "certificateOrderName": autorest.Encode("path", certificateOrderName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}", pathParameters), autorest.WithJSON(keyVaultCertificate), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateCertificateSender sends the UpdateCertificate request. The method will close the // http.Response Body if it receives an error. func (client CertificateOrdersClient) UpdateCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateCertificateResponder handles the response to the UpdateCertificate request. The method always @@ -624,7 +938,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -636,21 +950,21 @@ // resourceGroupName is azure resource group name name is certificate name // certificateDistinguishedName is distinguished name to be used for // purchasing certificate -func (client CertificateOrdersClient) UpdateCertificateOrder(resourceGroupName string, name string, certificateDistinguishedName CertificateOrder) (result CertificateOrder, ae error) { +func (client CertificateOrdersClient) UpdateCertificateOrder(resourceGroupName string, name string, certificateDistinguishedName CertificateOrder) (result CertificateOrder, err error) { req, err := client.UpdateCertificateOrderPreparer(resourceGroupName, name, certificateDistinguishedName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "UpdateCertificateOrder", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "UpdateCertificateOrder", nil, "Failure preparing request") } resp, err := client.UpdateCertificateOrderSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "UpdateCertificateOrder", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "UpdateCertificateOrder", resp, "Failure sending request") } result, err = client.UpdateCertificateOrderResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificateOrdersClient", "UpdateCertificateOrder", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "UpdateCertificateOrder", resp, "Failure responding to request") } return @@ -659,29 +973,29 @@ // UpdateCertificateOrderPreparer prepares the UpdateCertificateOrder request. func (client CertificateOrdersClient) UpdateCertificateOrderPreparer(resourceGroupName string, name string, certificateDistinguishedName CertificateOrder) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}", pathParameters), autorest.WithJSON(certificateDistinguishedName), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateCertificateOrderSender sends the UpdateCertificateOrder request. The method will close the // http.Response Body if it receives an error. func (client CertificateOrdersClient) UpdateCertificateOrderSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateCertificateOrderResponder handles the response to the UpdateCertificateOrder request. The method always @@ -690,9 +1004,72 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return +} + +// VerifyDomainOwnership sends the verify domain ownership request. +// +// resourceGroupName is azure resource group name name is certificate order +// name +func (client CertificateOrdersClient) VerifyDomainOwnership(resourceGroupName string, name string) (result SetObject, err error) { + req, err := client.VerifyDomainOwnershipPreparer(resourceGroupName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "VerifyDomainOwnership", nil, "Failure preparing request") + } + + resp, err := client.VerifyDomainOwnershipSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "VerifyDomainOwnership", resp, "Failure sending request") + } + + result, err = client.VerifyDomainOwnershipResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.CertificateOrdersClient", "VerifyDomainOwnership", resp, "Failure responding to request") + } + + return +} + +// VerifyDomainOwnershipPreparer prepares the VerifyDomainOwnership request. +func (client CertificateOrdersClient) VerifyDomainOwnershipPreparer(resourceGroupName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/verifyDomainOwnership", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// VerifyDomainOwnershipSender sends the VerifyDomainOwnership request. The method will close the +// http.Response Body if it receives an error. +func (client CertificateOrdersClient) VerifyDomainOwnershipSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// VerifyDomainOwnershipResponder handles the response to the VerifyDomainOwnership request. The method always +// closes the http.Response Body. +func (client CertificateOrdersClient) VerifyDomainOwnershipResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/certificates.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/certificates.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/certificates.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/certificates.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // CertificatesClient is the use these APIs to manage Azure Websites resources @@ -52,21 +52,21 @@ // resourceGroupName is name of the resource group name is name of the // certificate. certificateEnvelope is details of certificate if it exists // already. -func (client CertificatesClient) CreateOrUpdateCertificate(resourceGroupName string, name string, certificateEnvelope Certificate) (result Certificate, ae error) { +func (client CertificatesClient) CreateOrUpdateCertificate(resourceGroupName string, name string, certificateEnvelope Certificate) (result Certificate, err error) { req, err := client.CreateOrUpdateCertificatePreparer(resourceGroupName, name, certificateEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "CreateOrUpdateCertificate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "CreateOrUpdateCertificate", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateCertificateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "CreateOrUpdateCertificate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "CreateOrUpdateCertificate", resp, "Failure sending request") } result, err = client.CreateOrUpdateCertificateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificatesClient", "CreateOrUpdateCertificate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificatesClient", "CreateOrUpdateCertificate", resp, "Failure responding to request") } return @@ -75,29 +75,29 @@ // CreateOrUpdateCertificatePreparer prepares the CreateOrUpdateCertificate request. func (client CertificatesClient) CreateOrUpdateCertificatePreparer(resourceGroupName string, name string, certificateEnvelope Certificate) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}", pathParameters), autorest.WithJSON(certificateEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateCertificateSender sends the CreateOrUpdateCertificate request. The method will close the // http.Response Body if it receives an error. func (client CertificatesClient) CreateOrUpdateCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateCertificateResponder handles the response to the CreateOrUpdateCertificate request. The method always @@ -106,7 +106,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -118,21 +118,21 @@ // resourceGroupName is name of the resource group name is name of the // certificate. csrEnvelope is details of certificate signing request if it // exists already. -func (client CertificatesClient) CreateOrUpdateCsr(resourceGroupName string, name string, csrEnvelope Csr) (result Csr, ae error) { +func (client CertificatesClient) CreateOrUpdateCsr(resourceGroupName string, name string, csrEnvelope Csr) (result Csr, err error) { req, err := client.CreateOrUpdateCsrPreparer(resourceGroupName, name, csrEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "CreateOrUpdateCsr", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "CreateOrUpdateCsr", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateCsrSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "CreateOrUpdateCsr", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "CreateOrUpdateCsr", resp, "Failure sending request") } result, err = client.CreateOrUpdateCsrResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificatesClient", "CreateOrUpdateCsr", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificatesClient", "CreateOrUpdateCsr", resp, "Failure responding to request") } return @@ -141,29 +141,29 @@ // CreateOrUpdateCsrPreparer prepares the CreateOrUpdateCsr request. func (client CertificatesClient) CreateOrUpdateCsrPreparer(resourceGroupName string, name string, csrEnvelope Csr) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/csrs/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/csrs/{name}", pathParameters), autorest.WithJSON(csrEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateCsrSender sends the CreateOrUpdateCsr request. The method will close the // http.Response Body if it receives an error. func (client CertificatesClient) CreateOrUpdateCsrSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateCsrResponder handles the response to the CreateOrUpdateCsr request. The method always @@ -172,7 +172,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -183,21 +183,21 @@ // // resourceGroupName is name of the resource group name is name of the // certificate to be deleted. -func (client CertificatesClient) DeleteCertificate(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client CertificatesClient) DeleteCertificate(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.DeleteCertificatePreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "DeleteCertificate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "DeleteCertificate", nil, "Failure preparing request") } resp, err := client.DeleteCertificateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "DeleteCertificate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "DeleteCertificate", resp, "Failure sending request") } result, err = client.DeleteCertificateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificatesClient", "DeleteCertificate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificatesClient", "DeleteCertificate", resp, "Failure responding to request") } return @@ -206,37 +206,36 @@ // DeleteCertificatePreparer prepares the DeleteCertificate request. func (client CertificatesClient) DeleteCertificatePreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteCertificateSender sends the DeleteCertificate request. The method will close the // http.Response Body if it receives an error. func (client CertificatesClient) DeleteCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteCertificateResponder handles the response to the DeleteCertificate request. The method always // closes the http.Response Body. -func (client CertificatesClient) DeleteCertificateResponder(resp *http.Response) (result ObjectSet, err error) { +func (client CertificatesClient) DeleteCertificateResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -247,21 +246,21 @@ // // resourceGroupName is name of the resource group name is name of the // certificate signing request. -func (client CertificatesClient) DeleteCsr(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client CertificatesClient) DeleteCsr(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.DeleteCsrPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "DeleteCsr", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "DeleteCsr", nil, "Failure preparing request") } resp, err := client.DeleteCsrSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "DeleteCsr", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "DeleteCsr", resp, "Failure sending request") } result, err = client.DeleteCsrResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificatesClient", "DeleteCsr", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificatesClient", "DeleteCsr", resp, "Failure responding to request") } return @@ -270,37 +269,36 @@ // DeleteCsrPreparer prepares the DeleteCsr request. func (client CertificatesClient) DeleteCsrPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/csrs/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/csrs/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteCsrSender sends the DeleteCsr request. The method will close the // http.Response Body if it receives an error. func (client CertificatesClient) DeleteCsrSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteCsrResponder handles the response to the DeleteCsr request. The method always // closes the http.Response Body. -func (client CertificatesClient) DeleteCsrResponder(resp *http.Response) (result ObjectSet, err error) { +func (client CertificatesClient) DeleteCsrResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -311,21 +309,21 @@ // // resourceGroupName is name of the resource group name is name of the // certificate. -func (client CertificatesClient) GetCertificate(resourceGroupName string, name string) (result Certificate, ae error) { +func (client CertificatesClient) GetCertificate(resourceGroupName string, name string) (result Certificate, err error) { req, err := client.GetCertificatePreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCertificate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCertificate", nil, "Failure preparing request") } resp, err := client.GetCertificateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCertificate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCertificate", resp, "Failure sending request") } result, err = client.GetCertificateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCertificate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCertificate", resp, "Failure responding to request") } return @@ -334,28 +332,27 @@ // GetCertificatePreparer prepares the GetCertificate request. func (client CertificatesClient) GetCertificatePreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetCertificateSender sends the GetCertificate request. The method will close the // http.Response Body if it receives an error. func (client CertificatesClient) GetCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetCertificateResponder handles the response to the GetCertificate request. The method always @@ -364,7 +361,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -374,21 +371,21 @@ // GetCertificates sends the get certificates request. // // resourceGroupName is name of the resource group -func (client CertificatesClient) GetCertificates(resourceGroupName string) (result CertificateCollection, ae error) { +func (client CertificatesClient) GetCertificates(resourceGroupName string) (result CertificateCollection, err error) { req, err := client.GetCertificatesPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCertificates", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCertificates", nil, "Failure preparing request") } resp, err := client.GetCertificatesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCertificates", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCertificates", resp, "Failure sending request") } result, err = client.GetCertificatesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCertificates", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCertificates", resp, "Failure responding to request") } return @@ -397,27 +394,26 @@ // GetCertificatesPreparer prepares the GetCertificates request. func (client CertificatesClient) GetCertificatesPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetCertificatesSender sends the GetCertificates request. The method will close the // http.Response Body if it receives an error. func (client CertificatesClient) GetCertificatesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetCertificatesResponder handles the response to the GetCertificates request. The method always @@ -426,7 +422,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -437,21 +433,21 @@ // // resourceGroupName is name of the resource group name is name of the // certificate. -func (client CertificatesClient) GetCsr(resourceGroupName string, name string) (result Csr, ae error) { +func (client CertificatesClient) GetCsr(resourceGroupName string, name string) (result Csr, err error) { req, err := client.GetCsrPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCsr", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCsr", nil, "Failure preparing request") } resp, err := client.GetCsrSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCsr", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCsr", resp, "Failure sending request") } result, err = client.GetCsrResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCsr", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCsr", resp, "Failure responding to request") } return @@ -460,28 +456,27 @@ // GetCsrPreparer prepares the GetCsr request. func (client CertificatesClient) GetCsrPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/csrs/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/csrs/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetCsrSender sends the GetCsr request. The method will close the // http.Response Body if it receives an error. func (client CertificatesClient) GetCsrSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetCsrResponder handles the response to the GetCsr request. The method always @@ -490,7 +485,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -500,21 +495,21 @@ // GetCsrs sends the get csrs request. // // resourceGroupName is name of the resource group -func (client CertificatesClient) GetCsrs(resourceGroupName string) (result CsrList, ae error) { +func (client CertificatesClient) GetCsrs(resourceGroupName string) (result ListCsr, err error) { req, err := client.GetCsrsPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCsrs", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCsrs", nil, "Failure preparing request") } resp, err := client.GetCsrsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCsrs", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCsrs", resp, "Failure sending request") } result, err = client.GetCsrsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificatesClient", "GetCsrs", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificatesClient", "GetCsrs", resp, "Failure responding to request") } return @@ -523,36 +518,35 @@ // GetCsrsPreparer prepares the GetCsrs request. func (client CertificatesClient) GetCsrsPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/csrs"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/csrs", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetCsrsSender sends the GetCsrs request. The method will close the // http.Response Body if it receives an error. func (client CertificatesClient) GetCsrsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetCsrsResponder handles the response to the GetCsrs request. The method always // closes the http.Response Body. -func (client CertificatesClient) GetCsrsResponder(resp *http.Response) (result CsrList, err error) { +func (client CertificatesClient) GetCsrsResponder(resp *http.Response) (result ListCsr, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -564,21 +558,21 @@ // resourceGroupName is name of the resource group name is name of the // certificate. certificateEnvelope is details of certificate if it exists // already. -func (client CertificatesClient) UpdateCertificate(resourceGroupName string, name string, certificateEnvelope Certificate) (result Certificate, ae error) { +func (client CertificatesClient) UpdateCertificate(resourceGroupName string, name string, certificateEnvelope Certificate) (result Certificate, err error) { req, err := client.UpdateCertificatePreparer(resourceGroupName, name, certificateEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "UpdateCertificate", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "UpdateCertificate", nil, "Failure preparing request") } resp, err := client.UpdateCertificateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "UpdateCertificate", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "UpdateCertificate", resp, "Failure sending request") } result, err = client.UpdateCertificateResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificatesClient", "UpdateCertificate", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificatesClient", "UpdateCertificate", resp, "Failure responding to request") } return @@ -587,29 +581,29 @@ // UpdateCertificatePreparer prepares the UpdateCertificate request. func (client CertificatesClient) UpdateCertificatePreparer(resourceGroupName string, name string, certificateEnvelope Certificate) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}", pathParameters), autorest.WithJSON(certificateEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateCertificateSender sends the UpdateCertificate request. The method will close the // http.Response Body if it receives an error. func (client CertificatesClient) UpdateCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateCertificateResponder handles the response to the UpdateCertificate request. The method always @@ -618,7 +612,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -630,21 +624,21 @@ // resourceGroupName is name of the resource group name is name of the // certificate. csrEnvelope is details of certificate signing request if it // exists already. -func (client CertificatesClient) UpdateCsr(resourceGroupName string, name string, csrEnvelope Csr) (result Csr, ae error) { +func (client CertificatesClient) UpdateCsr(resourceGroupName string, name string, csrEnvelope Csr) (result Csr, err error) { req, err := client.UpdateCsrPreparer(resourceGroupName, name, csrEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "UpdateCsr", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "UpdateCsr", nil, "Failure preparing request") } resp, err := client.UpdateCsrSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/CertificatesClient", "UpdateCsr", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.CertificatesClient", "UpdateCsr", resp, "Failure sending request") } result, err = client.UpdateCsrResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/CertificatesClient", "UpdateCsr", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.CertificatesClient", "UpdateCsr", resp, "Failure responding to request") } return @@ -653,29 +647,29 @@ // UpdateCsrPreparer prepares the UpdateCsr request. func (client CertificatesClient) UpdateCsrPreparer(resourceGroupName string, name string, csrEnvelope Csr) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/csrs/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/csrs/{name}", pathParameters), autorest.WithJSON(csrEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateCsrSender sends the UpdateCsr request. The method will close the // http.Response Body if it receives an error. func (client CertificatesClient) UpdateCsrSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateCsrResponder handles the response to the UpdateCsr request. The method always @@ -684,7 +678,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/classicmobileservices.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/classicmobileservices.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/classicmobileservices.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/classicmobileservices.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // ClassicMobileServicesClient is the use these APIs to manage Azure Websites @@ -51,21 +51,21 @@ // DeleteClassicMobileService sends the delete classic mobile service request. // // resourceGroupName is name of resource group name is name of mobile service -func (client ClassicMobileServicesClient) DeleteClassicMobileService(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client ClassicMobileServicesClient) DeleteClassicMobileService(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.DeleteClassicMobileServicePreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ClassicMobileServicesClient", "DeleteClassicMobileService", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ClassicMobileServicesClient", "DeleteClassicMobileService", nil, "Failure preparing request") } resp, err := client.DeleteClassicMobileServiceSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ClassicMobileServicesClient", "DeleteClassicMobileService", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ClassicMobileServicesClient", "DeleteClassicMobileService", resp, "Failure sending request") } result, err = client.DeleteClassicMobileServiceResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ClassicMobileServicesClient", "DeleteClassicMobileService", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ClassicMobileServicesClient", "DeleteClassicMobileService", resp, "Failure responding to request") } return @@ -74,37 +74,36 @@ // DeleteClassicMobileServicePreparer prepares the DeleteClassicMobileService request. func (client ClassicMobileServicesClient) DeleteClassicMobileServicePreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/classicMobileServices/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/classicMobileServices/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteClassicMobileServiceSender sends the DeleteClassicMobileService request. The method will close the // http.Response Body if it receives an error. func (client ClassicMobileServicesClient) DeleteClassicMobileServiceSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteClassicMobileServiceResponder handles the response to the DeleteClassicMobileService request. The method always // closes the http.Response Body. -func (client ClassicMobileServicesClient) DeleteClassicMobileServiceResponder(resp *http.Response) (result ObjectSet, err error) { +func (client ClassicMobileServicesClient) DeleteClassicMobileServiceResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -114,21 +113,21 @@ // GetClassicMobileService sends the get classic mobile service request. // // resourceGroupName is name of resource group name is name of mobile service -func (client ClassicMobileServicesClient) GetClassicMobileService(resourceGroupName string, name string) (result ClassicMobileService, ae error) { +func (client ClassicMobileServicesClient) GetClassicMobileService(resourceGroupName string, name string) (result ClassicMobileService, err error) { req, err := client.GetClassicMobileServicePreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ClassicMobileServicesClient", "GetClassicMobileService", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ClassicMobileServicesClient", "GetClassicMobileService", nil, "Failure preparing request") } resp, err := client.GetClassicMobileServiceSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ClassicMobileServicesClient", "GetClassicMobileService", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ClassicMobileServicesClient", "GetClassicMobileService", resp, "Failure sending request") } result, err = client.GetClassicMobileServiceResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ClassicMobileServicesClient", "GetClassicMobileService", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ClassicMobileServicesClient", "GetClassicMobileService", resp, "Failure responding to request") } return @@ -137,28 +136,27 @@ // GetClassicMobileServicePreparer prepares the GetClassicMobileService request. func (client ClassicMobileServicesClient) GetClassicMobileServicePreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/classicMobileServices/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/classicMobileServices/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetClassicMobileServiceSender sends the GetClassicMobileService request. The method will close the // http.Response Body if it receives an error. func (client ClassicMobileServicesClient) GetClassicMobileServiceSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetClassicMobileServiceResponder handles the response to the GetClassicMobileService request. The method always @@ -167,7 +165,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -177,21 +175,21 @@ // GetClassicMobileServices sends the get classic mobile services request. // // resourceGroupName is name of resource group -func (client ClassicMobileServicesClient) GetClassicMobileServices(resourceGroupName string) (result ClassicMobileServiceCollection, ae error) { +func (client ClassicMobileServicesClient) GetClassicMobileServices(resourceGroupName string) (result ClassicMobileServiceCollection, err error) { req, err := client.GetClassicMobileServicesPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ClassicMobileServicesClient", "GetClassicMobileServices", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ClassicMobileServicesClient", "GetClassicMobileServices", nil, "Failure preparing request") } resp, err := client.GetClassicMobileServicesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ClassicMobileServicesClient", "GetClassicMobileServices", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ClassicMobileServicesClient", "GetClassicMobileServices", resp, "Failure sending request") } result, err = client.GetClassicMobileServicesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ClassicMobileServicesClient", "GetClassicMobileServices", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ClassicMobileServicesClient", "GetClassicMobileServices", resp, "Failure responding to request") } return @@ -200,27 +198,26 @@ // GetClassicMobileServicesPreparer prepares the GetClassicMobileServices request. func (client ClassicMobileServicesClient) GetClassicMobileServicesPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/classicMobileServices"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/classicMobileServices", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetClassicMobileServicesSender sends the GetClassicMobileServices request. The method will close the // http.Response Body if it receives an error. func (client ClassicMobileServicesClient) GetClassicMobileServicesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetClassicMobileServicesResponder handles the response to the GetClassicMobileServices request. The method always @@ -229,7 +226,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -1,3 +1,12 @@ +// Package web implements the Azure ARM Web service API version 2015-08-01. +// +// Use these APIs to manage Azure Websites resources through the Azure +// Resource Manager. All task operations conform to the HTTP/1.1 protocol +// specification and each operation returns an x-ms-request-id header that +// can be used to obtain information about the request. You must make sure +// that requests made to these resources are secure. For more information, +// see Authenticating Azure Resource Manager requests +// (https://msdn.microsoft.com/en-us/library/azure/dn790557.aspx). package web // Copyright (c) Microsoft and contributors. All rights reserved. @@ -14,12 +23,12 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" ) const ( @@ -30,17 +39,11 @@ DefaultBaseURI = "https://management.azure.com" ) -// ManagementClient is the use these APIs to manage Azure Websites resources -// through the Azure Resource Manager. All task operations conform to the -// HTTP/1.1 protocol specification and each operation returns an -// x-ms-request-id header that can be used to obtain information about the -// request. You must make sure that requests made to these resources are -// secure. For more information, see Authenticating -// Azure Resource Manager requests. +// ManagementClient is the base client for Web. type ManagementClient struct { autorest.Client BaseURI string + APIVersion string SubscriptionID string } @@ -54,6 +57,7 @@ return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, + APIVersion: APIVersion, SubscriptionID: subscriptionID, } } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/domains.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/domains.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/domains.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/domains.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // DomainsClient is the use these APIs to manage Azure Websites resources @@ -50,21 +50,21 @@ // // resourceGroupName is >Name of the resource group domainName is name of // the domain domain is domain registration information -func (client DomainsClient) CreateOrUpdateDomain(resourceGroupName string, domainName string, domain Domain) (result Domain, ae error) { +func (client DomainsClient) CreateOrUpdateDomain(resourceGroupName string, domainName string, domain Domain) (result Domain, err error) { req, err := client.CreateOrUpdateDomainPreparer(resourceGroupName, domainName, domain) if err != nil { - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "CreateOrUpdateDomain", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "CreateOrUpdateDomain", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateDomainSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "CreateOrUpdateDomain", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "CreateOrUpdateDomain", resp, "Failure sending request") } result, err = client.CreateOrUpdateDomainResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/DomainsClient", "CreateOrUpdateDomain", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.DomainsClient", "CreateOrUpdateDomain", resp, "Failure responding to request") } return @@ -73,29 +73,29 @@ // CreateOrUpdateDomainPreparer prepares the CreateOrUpdateDomain request. func (client DomainsClient) CreateOrUpdateDomainPreparer(resourceGroupName string, domainName string, domain Domain) (*http.Request, error) { pathParameters := map[string]interface{}{ - "domainName": url.QueryEscape(domainName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}", pathParameters), autorest.WithJSON(domain), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateDomainSender sends the CreateOrUpdateDomain request. The method will close the // http.Response Body if it receives an error. func (client DomainsClient) CreateOrUpdateDomainSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateDomainResponder handles the response to the CreateOrUpdateDomain request. The method always @@ -104,7 +104,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -116,21 +116,21 @@ // resourceGroupName is name of the resource group domainName is name of the // domain forceHardDeleteDomain is if true then the domain will be deleted // immediately instead of after 24 hours -func (client DomainsClient) DeleteDomain(resourceGroupName string, domainName string, forceHardDeleteDomain *bool) (result ObjectSet, ae error) { +func (client DomainsClient) DeleteDomain(resourceGroupName string, domainName string, forceHardDeleteDomain *bool) (result SetObject, err error) { req, err := client.DeleteDomainPreparer(resourceGroupName, domainName, forceHardDeleteDomain) if err != nil { - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "DeleteDomain", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "DeleteDomain", nil, "Failure preparing request") } resp, err := client.DeleteDomainSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "DeleteDomain", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "DeleteDomain", resp, "Failure sending request") } result, err = client.DeleteDomainResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/DomainsClient", "DeleteDomain", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.DomainsClient", "DeleteDomain", resp, "Failure responding to request") } return @@ -139,40 +139,39 @@ // DeleteDomainPreparer prepares the DeleteDomain request. func (client DomainsClient) DeleteDomainPreparer(resourceGroupName string, domainName string, forceHardDeleteDomain *bool) (*http.Request, error) { pathParameters := map[string]interface{}{ - "domainName": url.QueryEscape(domainName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if forceHardDeleteDomain != nil { - queryParameters["forceHardDeleteDomain"] = forceHardDeleteDomain + queryParameters["forceHardDeleteDomain"] = autorest.Encode("query", *forceHardDeleteDomain) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteDomainSender sends the DeleteDomain request. The method will close the // http.Response Body if it receives an error. func (client DomainsClient) DeleteDomainSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) + return autorest.SendWithSender(client, req) } // DeleteDomainResponder handles the response to the DeleteDomain request. The method always // closes the http.Response Body. -func (client DomainsClient) DeleteDomainResponder(resp *http.Response) (result ObjectSet, err error) { +func (client DomainsClient) DeleteDomainResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -183,21 +182,21 @@ // // resourceGroupName is name of the resource group domainName is name of the // domain -func (client DomainsClient) GetDomain(resourceGroupName string, domainName string) (result Domain, ae error) { +func (client DomainsClient) GetDomain(resourceGroupName string, domainName string) (result Domain, err error) { req, err := client.GetDomainPreparer(resourceGroupName, domainName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "GetDomain", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "GetDomain", nil, "Failure preparing request") } resp, err := client.GetDomainSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "GetDomain", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "GetDomain", resp, "Failure sending request") } result, err = client.GetDomainResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/DomainsClient", "GetDomain", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.DomainsClient", "GetDomain", resp, "Failure responding to request") } return @@ -206,28 +205,27 @@ // GetDomainPreparer prepares the GetDomain request. func (client DomainsClient) GetDomainPreparer(resourceGroupName string, domainName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "domainName": url.QueryEscape(domainName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetDomainSender sends the GetDomain request. The method will close the // http.Response Body if it receives an error. func (client DomainsClient) GetDomainSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetDomainResponder handles the response to the GetDomain request. The method always @@ -236,7 +234,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -247,21 +245,21 @@ // // resourceGroupName is name of the resource group domainName is name of the // domain operationID is domain purchase operation Id -func (client DomainsClient) GetDomainOperation(resourceGroupName string, domainName string, operationID string) (result Domain, ae error) { +func (client DomainsClient) GetDomainOperation(resourceGroupName string, domainName string, operationID string) (result Domain, err error) { req, err := client.GetDomainOperationPreparer(resourceGroupName, domainName, operationID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "GetDomainOperation", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "GetDomainOperation", nil, "Failure preparing request") } resp, err := client.GetDomainOperationSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "GetDomainOperation", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "GetDomainOperation", resp, "Failure sending request") } result, err = client.GetDomainOperationResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/DomainsClient", "GetDomainOperation", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.DomainsClient", "GetDomainOperation", resp, "Failure responding to request") } return @@ -270,29 +268,28 @@ // GetDomainOperationPreparer prepares the GetDomainOperation request. func (client DomainsClient) GetDomainOperationPreparer(resourceGroupName string, domainName string, operationID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "domainName": url.QueryEscape(domainName), - "operationId": url.QueryEscape(operationID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "domainName": autorest.Encode("path", domainName), + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/operationresults/{operationId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}/operationresults/{operationId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetDomainOperationSender sends the GetDomainOperation request. The method will close the // http.Response Body if it receives an error. func (client DomainsClient) GetDomainOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusAccepted, http.StatusOK, http.StatusInternalServerError) + return autorest.SendWithSender(client, req) } // GetDomainOperationResponder handles the response to the GetDomainOperation request. The method always @@ -301,7 +298,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusInternalServerError), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusInternalServerError), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -311,21 +308,21 @@ // GetDomains sends the get domains request. // // resourceGroupName is name of the resource group -func (client DomainsClient) GetDomains(resourceGroupName string) (result DomainCollection, ae error) { +func (client DomainsClient) GetDomains(resourceGroupName string) (result DomainCollection, err error) { req, err := client.GetDomainsPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "GetDomains", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "GetDomains", nil, "Failure preparing request") } resp, err := client.GetDomainsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "GetDomains", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "GetDomains", resp, "Failure sending request") } result, err = client.GetDomainsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/DomainsClient", "GetDomains", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.DomainsClient", "GetDomains", resp, "Failure responding to request") } return @@ -334,27 +331,26 @@ // GetDomainsPreparer prepares the GetDomains request. func (client DomainsClient) GetDomainsPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetDomainsSender sends the GetDomains request. The method will close the // http.Response Body if it receives an error. func (client DomainsClient) GetDomainsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetDomainsResponder handles the response to the GetDomains request. The method always @@ -363,7 +359,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -374,21 +370,21 @@ // // resourceGroupName is >Name of the resource group domainName is name of // the domain domain is domain registration information -func (client DomainsClient) UpdateDomain(resourceGroupName string, domainName string, domain Domain) (result Domain, ae error) { +func (client DomainsClient) UpdateDomain(resourceGroupName string, domainName string, domain Domain) (result Domain, err error) { req, err := client.UpdateDomainPreparer(resourceGroupName, domainName, domain) if err != nil { - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "UpdateDomain", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "UpdateDomain", nil, "Failure preparing request") } resp, err := client.UpdateDomainSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/DomainsClient", "UpdateDomain", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.DomainsClient", "UpdateDomain", resp, "Failure sending request") } result, err = client.UpdateDomainResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/DomainsClient", "UpdateDomain", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.DomainsClient", "UpdateDomain", resp, "Failure responding to request") } return @@ -397,29 +393,29 @@ // UpdateDomainPreparer prepares the UpdateDomain request. func (client DomainsClient) UpdateDomainPreparer(resourceGroupName string, domainName string, domain Domain) (*http.Request, error) { pathParameters := map[string]interface{}{ - "domainName": url.QueryEscape(domainName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "domainName": autorest.Encode("path", domainName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DomainRegistration/domains/{domainName}", pathParameters), autorest.WithJSON(domain), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateDomainSender sends the UpdateDomain request. The method will close the // http.Response Body if it receives an error. func (client DomainsClient) UpdateDomainSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusAccepted, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateDomainResponder handles the response to the UpdateDomain request. The method always @@ -428,7 +424,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/globalcertificateorder.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/globalcertificateorder.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/globalcertificateorder.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/globalcertificateorder.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // GlobalCertificateOrderClient is the use these APIs to manage Azure Websites @@ -49,21 +49,21 @@ } // GetAllCertificateOrders sends the get all certificate orders request. -func (client GlobalCertificateOrderClient) GetAllCertificateOrders() (result CertificateOrderCollection, ae error) { +func (client GlobalCertificateOrderClient) GetAllCertificateOrders() (result CertificateOrderCollection, err error) { req, err := client.GetAllCertificateOrdersPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalCertificateOrderClient", "GetAllCertificateOrders", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalCertificateOrderClient", "GetAllCertificateOrders", nil, "Failure preparing request") } resp, err := client.GetAllCertificateOrdersSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalCertificateOrderClient", "GetAllCertificateOrders", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalCertificateOrderClient", "GetAllCertificateOrders", resp, "Failure sending request") } result, err = client.GetAllCertificateOrdersResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalCertificateOrderClient", "GetAllCertificateOrders", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalCertificateOrderClient", "GetAllCertificateOrders", resp, "Failure responding to request") } return @@ -72,26 +72,25 @@ // GetAllCertificateOrdersPreparer prepares the GetAllCertificateOrders request. func (client GlobalCertificateOrderClient) GetAllCertificateOrdersPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/certificateOrders"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/certificateOrders", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetAllCertificateOrdersSender sends the GetAllCertificateOrders request. The method will close the // http.Response Body if it receives an error. func (client GlobalCertificateOrderClient) GetAllCertificateOrdersSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetAllCertificateOrdersResponder handles the response to the GetAllCertificateOrders request. The method always @@ -100,9 +99,72 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return +} + +// ValidateCertificatePurchaseInformation sends the validate certificate +// purchase information request. +// +// certificateOrder is certificate order +func (client GlobalCertificateOrderClient) ValidateCertificatePurchaseInformation(certificateOrder CertificateOrder) (result SetObject, err error) { + req, err := client.ValidateCertificatePurchaseInformationPreparer(certificateOrder) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.GlobalCertificateOrderClient", "ValidateCertificatePurchaseInformation", nil, "Failure preparing request") + } + + resp, err := client.ValidateCertificatePurchaseInformationSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.GlobalCertificateOrderClient", "ValidateCertificatePurchaseInformation", resp, "Failure sending request") + } + + result, err = client.ValidateCertificatePurchaseInformationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.GlobalCertificateOrderClient", "ValidateCertificatePurchaseInformation", resp, "Failure responding to request") + } + + return +} + +// ValidateCertificatePurchaseInformationPreparer prepares the ValidateCertificatePurchaseInformation request. +func (client GlobalCertificateOrderClient) ValidateCertificatePurchaseInformationPreparer(certificateOrder CertificateOrder) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/validateCertificateRegistrationInformation", pathParameters), + autorest.WithJSON(certificateOrder), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ValidateCertificatePurchaseInformationSender sends the ValidateCertificatePurchaseInformation request. The method will close the +// http.Response Body if it receives an error. +func (client GlobalCertificateOrderClient) ValidateCertificatePurchaseInformationSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ValidateCertificatePurchaseInformationResponder handles the response to the ValidateCertificatePurchaseInformation request. The method always +// closes the http.Response Body. +func (client GlobalCertificateOrderClient) ValidateCertificatePurchaseInformationResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/globaldomainregistration.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/globaldomainregistration.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/globaldomainregistration.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/globaldomainregistration.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // GlobalDomainRegistrationClient is the use these APIs to manage Azure @@ -51,21 +51,21 @@ // CheckDomainAvailability sends the check domain availability request. // // identifier is name of the domain -func (client GlobalDomainRegistrationClient) CheckDomainAvailability(identifier NameIdentifier) (result DomainAvailablilityCheckResult, ae error) { +func (client GlobalDomainRegistrationClient) CheckDomainAvailability(identifier NameIdentifier) (result DomainAvailablilityCheckResult, err error) { req, err := client.CheckDomainAvailabilityPreparer(identifier) if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "CheckDomainAvailability", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "CheckDomainAvailability", nil, "Failure preparing request") } resp, err := client.CheckDomainAvailabilitySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "CheckDomainAvailability", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "CheckDomainAvailability", resp, "Failure sending request") } result, err = client.CheckDomainAvailabilityResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "CheckDomainAvailability", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "CheckDomainAvailability", resp, "Failure responding to request") } return @@ -74,27 +74,27 @@ // CheckDomainAvailabilityPreparer prepares the CheckDomainAvailability request. func (client GlobalDomainRegistrationClient) CheckDomainAvailabilityPreparer(identifier NameIdentifier) (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/checkDomainAvailability"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/checkDomainAvailability", pathParameters), autorest.WithJSON(identifier), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CheckDomainAvailabilitySender sends the CheckDomainAvailability request. The method will close the // http.Response Body if it receives an error. func (client GlobalDomainRegistrationClient) CheckDomainAvailabilitySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CheckDomainAvailabilityResponder handles the response to the CheckDomainAvailability request. The method always @@ -103,7 +103,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -111,21 +111,21 @@ } // GetAllDomains sends the get all domains request. -func (client GlobalDomainRegistrationClient) GetAllDomains() (result DomainCollection, ae error) { +func (client GlobalDomainRegistrationClient) GetAllDomains() (result DomainCollection, err error) { req, err := client.GetAllDomainsPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "GetAllDomains", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "GetAllDomains", nil, "Failure preparing request") } resp, err := client.GetAllDomainsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "GetAllDomains", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "GetAllDomains", resp, "Failure sending request") } result, err = client.GetAllDomainsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "GetAllDomains", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "GetAllDomains", resp, "Failure responding to request") } return @@ -134,26 +134,25 @@ // GetAllDomainsPreparer prepares the GetAllDomains request. func (client GlobalDomainRegistrationClient) GetAllDomainsPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/domains"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/domains", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetAllDomainsSender sends the GetAllDomains request. The method will close the // http.Response Body if it receives an error. func (client GlobalDomainRegistrationClient) GetAllDomainsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetAllDomainsResponder handles the response to the GetAllDomains request. The method always @@ -162,7 +161,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -171,21 +170,21 @@ // GetDomainControlCenterSsoRequest sends the get domain control center sso // request request. -func (client GlobalDomainRegistrationClient) GetDomainControlCenterSsoRequest() (result DomainControlCenterSsoRequest, ae error) { +func (client GlobalDomainRegistrationClient) GetDomainControlCenterSsoRequest() (result DomainControlCenterSsoRequest, err error) { req, err := client.GetDomainControlCenterSsoRequestPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "GetDomainControlCenterSsoRequest", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "GetDomainControlCenterSsoRequest", nil, "Failure preparing request") } resp, err := client.GetDomainControlCenterSsoRequestSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "GetDomainControlCenterSsoRequest", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "GetDomainControlCenterSsoRequest", resp, "Failure sending request") } result, err = client.GetDomainControlCenterSsoRequestResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "GetDomainControlCenterSsoRequest", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "GetDomainControlCenterSsoRequest", resp, "Failure responding to request") } return @@ -194,26 +193,25 @@ // GetDomainControlCenterSsoRequestPreparer prepares the GetDomainControlCenterSsoRequest request. func (client GlobalDomainRegistrationClient) GetDomainControlCenterSsoRequestPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/generateSsoRequest"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/generateSsoRequest", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetDomainControlCenterSsoRequestSender sends the GetDomainControlCenterSsoRequest request. The method will close the // http.Response Body if it receives an error. func (client GlobalDomainRegistrationClient) GetDomainControlCenterSsoRequestSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetDomainControlCenterSsoRequestResponder handles the response to the GetDomainControlCenterSsoRequest request. The method always @@ -222,7 +220,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -232,21 +230,21 @@ // ListDomainRecommendations sends the list domain recommendations request. // // parameters is domain recommendation search parameters -func (client GlobalDomainRegistrationClient) ListDomainRecommendations(parameters DomainRecommendationSearchParameters) (result NameIdentifierCollection, ae error) { +func (client GlobalDomainRegistrationClient) ListDomainRecommendations(parameters DomainRecommendationSearchParameters) (result NameIdentifierCollection, err error) { req, err := client.ListDomainRecommendationsPreparer(parameters) if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "ListDomainRecommendations", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "ListDomainRecommendations", nil, "Failure preparing request") } resp, err := client.ListDomainRecommendationsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "ListDomainRecommendations", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "ListDomainRecommendations", resp, "Failure sending request") } result, err = client.ListDomainRecommendationsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "ListDomainRecommendations", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "ListDomainRecommendations", resp, "Failure responding to request") } return @@ -255,27 +253,27 @@ // ListDomainRecommendationsPreparer prepares the ListDomainRecommendations request. func (client GlobalDomainRegistrationClient) ListDomainRecommendationsPreparer(parameters DomainRecommendationSearchParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/listDomainRecommendations"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/listDomainRecommendations", pathParameters), autorest.WithJSON(parameters), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListDomainRecommendationsSender sends the ListDomainRecommendations request. The method will close the // http.Response Body if it receives an error. func (client GlobalDomainRegistrationClient) ListDomainRecommendationsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListDomainRecommendationsResponder handles the response to the ListDomainRecommendations request. The method always @@ -284,7 +282,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -295,21 +293,21 @@ // information request. // // domainRegistrationInput is domain registration information -func (client GlobalDomainRegistrationClient) ValidateDomainPurchaseInformation(domainRegistrationInput DomainRegistrationInput) (result ObjectSet, ae error) { +func (client GlobalDomainRegistrationClient) ValidateDomainPurchaseInformation(domainRegistrationInput DomainRegistrationInput) (result SetObject, err error) { req, err := client.ValidateDomainPurchaseInformationPreparer(domainRegistrationInput) if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "ValidateDomainPurchaseInformation", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "ValidateDomainPurchaseInformation", nil, "Failure preparing request") } resp, err := client.ValidateDomainPurchaseInformationSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "ValidateDomainPurchaseInformation", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "ValidateDomainPurchaseInformation", resp, "Failure sending request") } result, err = client.ValidateDomainPurchaseInformationResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalDomainRegistrationClient", "ValidateDomainPurchaseInformation", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalDomainRegistrationClient", "ValidateDomainPurchaseInformation", resp, "Failure responding to request") } return @@ -318,36 +316,36 @@ // ValidateDomainPurchaseInformationPreparer prepares the ValidateDomainPurchaseInformation request. func (client GlobalDomainRegistrationClient) ValidateDomainPurchaseInformationPreparer(domainRegistrationInput DomainRegistrationInput) (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/validateDomainRegistrationInformation"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/validateDomainRegistrationInformation", pathParameters), autorest.WithJSON(domainRegistrationInput), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ValidateDomainPurchaseInformationSender sends the ValidateDomainPurchaseInformation request. The method will close the // http.Response Body if it receives an error. func (client GlobalDomainRegistrationClient) ValidateDomainPurchaseInformationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ValidateDomainPurchaseInformationResponder handles the response to the ValidateDomainPurchaseInformation request. The method always // closes the http.Response Body. -func (client GlobalDomainRegistrationClient) ValidateDomainPurchaseInformationResponder(resp *http.Response) (result ObjectSet, err error) { +func (client GlobalDomainRegistrationClient) ValidateDomainPurchaseInformationResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/global.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/global.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/global.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/global.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // GlobalClient is the use these APIs to manage Azure Websites resources @@ -49,21 +49,21 @@ // CheckNameAvailability sends the check name availability request. // // request is name availability request -func (client GlobalClient) CheckNameAvailability(request ResourceNameAvailabilityRequest) (result ResourceNameAvailability, ae error) { +func (client GlobalClient) CheckNameAvailability(request ResourceNameAvailabilityRequest) (result ResourceNameAvailability, err error) { req, err := client.CheckNameAvailabilityPreparer(request) if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "CheckNameAvailability", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "CheckNameAvailability", nil, "Failure preparing request") } resp, err := client.CheckNameAvailabilitySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "CheckNameAvailability", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "CheckNameAvailability", resp, "Failure sending request") } result, err = client.CheckNameAvailabilityResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "CheckNameAvailability", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "CheckNameAvailability", resp, "Failure responding to request") } return @@ -72,27 +72,27 @@ // CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. func (client GlobalClient) CheckNameAvailabilityPreparer(request ResourceNameAvailabilityRequest) (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/checknameavailability"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/checknameavailability", pathParameters), autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always @@ -101,7 +101,65 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAllCertificates sends the get all certificates request. +func (client GlobalClient) GetAllCertificates() (result CertificateCollection, err error) { + req, err := client.GetAllCertificatesPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllCertificates", nil, "Failure preparing request") + } + + resp, err := client.GetAllCertificatesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllCertificates", resp, "Failure sending request") + } + + result, err = client.GetAllCertificatesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllCertificates", resp, "Failure responding to request") + } + + return +} + +// GetAllCertificatesPreparer prepares the GetAllCertificates request. +func (client GlobalClient) GetAllCertificatesPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/certificates", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAllCertificatesSender sends the GetAllCertificates request. The method will close the +// http.Response Body if it receives an error. +func (client GlobalClient) GetAllCertificatesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAllCertificatesResponder handles the response to the GetAllCertificates request. The method always +// closes the http.Response Body. +func (client GlobalClient) GetAllCertificatesResponder(resp *http.Response) (result CertificateCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -110,21 +168,21 @@ // GetAllClassicMobileServices sends the get all classic mobile services // request. -func (client GlobalClient) GetAllClassicMobileServices() (result ClassicMobileServiceCollection, ae error) { +func (client GlobalClient) GetAllClassicMobileServices() (result ClassicMobileServiceCollection, err error) { req, err := client.GetAllClassicMobileServicesPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllClassicMobileServices", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllClassicMobileServices", nil, "Failure preparing request") } resp, err := client.GetAllClassicMobileServicesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllClassicMobileServices", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllClassicMobileServices", resp, "Failure sending request") } result, err = client.GetAllClassicMobileServicesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllClassicMobileServices", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllClassicMobileServices", resp, "Failure responding to request") } return @@ -133,26 +191,25 @@ // GetAllClassicMobileServicesPreparer prepares the GetAllClassicMobileServices request. func (client GlobalClient) GetAllClassicMobileServicesPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/classicMobileServices"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/classicMobileServices", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetAllClassicMobileServicesSender sends the GetAllClassicMobileServices request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) GetAllClassicMobileServicesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetAllClassicMobileServicesResponder handles the response to the GetAllClassicMobileServices request. The method always @@ -161,7 +218,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -169,21 +226,21 @@ } // GetAllHostingEnvironments sends the get all hosting environments request. -func (client GlobalClient) GetAllHostingEnvironments() (result HostingEnvironmentCollection, ae error) { +func (client GlobalClient) GetAllHostingEnvironments() (result HostingEnvironmentCollection, err error) { req, err := client.GetAllHostingEnvironmentsPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllHostingEnvironments", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllHostingEnvironments", nil, "Failure preparing request") } resp, err := client.GetAllHostingEnvironmentsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllHostingEnvironments", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllHostingEnvironments", resp, "Failure sending request") } result, err = client.GetAllHostingEnvironmentsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllHostingEnvironments", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllHostingEnvironments", resp, "Failure responding to request") } return @@ -192,26 +249,25 @@ // GetAllHostingEnvironmentsPreparer prepares the GetAllHostingEnvironments request. func (client GlobalClient) GetAllHostingEnvironmentsPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/hostingEnvironments"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/hostingEnvironments", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetAllHostingEnvironmentsSender sends the GetAllHostingEnvironments request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) GetAllHostingEnvironmentsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetAllHostingEnvironmentsResponder handles the response to the GetAllHostingEnvironments request. The method always @@ -220,7 +276,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -229,21 +285,21 @@ // GetAllManagedHostingEnvironments sends the get all managed hosting // environments request. -func (client GlobalClient) GetAllManagedHostingEnvironments() (result ManagedHostingEnvironmentCollection, ae error) { +func (client GlobalClient) GetAllManagedHostingEnvironments() (result ManagedHostingEnvironmentCollection, err error) { req, err := client.GetAllManagedHostingEnvironmentsPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllManagedHostingEnvironments", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllManagedHostingEnvironments", nil, "Failure preparing request") } resp, err := client.GetAllManagedHostingEnvironmentsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllManagedHostingEnvironments", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllManagedHostingEnvironments", resp, "Failure sending request") } result, err = client.GetAllManagedHostingEnvironmentsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllManagedHostingEnvironments", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllManagedHostingEnvironments", resp, "Failure responding to request") } return @@ -252,26 +308,25 @@ // GetAllManagedHostingEnvironmentsPreparer prepares the GetAllManagedHostingEnvironments request. func (client GlobalClient) GetAllManagedHostingEnvironmentsPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/managedHostingEnvironments"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/managedHostingEnvironments", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetAllManagedHostingEnvironmentsSender sends the GetAllManagedHostingEnvironments request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) GetAllManagedHostingEnvironmentsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetAllManagedHostingEnvironmentsResponder handles the response to the GetAllManagedHostingEnvironments request. The method always @@ -280,7 +335,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -292,21 +347,21 @@ // detailed is false to return a subset of App Service Plan properties, true // to return all of the properties. // Retrieval of all properties may increase the API latency. -func (client GlobalClient) GetAllServerFarms(detailed *bool) (result ServerFarmCollection, ae error) { +func (client GlobalClient) GetAllServerFarms(detailed *bool) (result ServerFarmCollection, err error) { req, err := client.GetAllServerFarmsPreparer(detailed) if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllServerFarms", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllServerFarms", nil, "Failure preparing request") } resp, err := client.GetAllServerFarmsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllServerFarms", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllServerFarms", resp, "Failure sending request") } result, err = client.GetAllServerFarmsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllServerFarms", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllServerFarms", resp, "Failure responding to request") } return @@ -315,29 +370,28 @@ // GetAllServerFarmsPreparer prepares the GetAllServerFarms request. func (client GlobalClient) GetAllServerFarmsPreparer(detailed *bool) (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if detailed != nil { - queryParameters["detailed"] = detailed + queryParameters["detailed"] = autorest.Encode("query", *detailed) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/serverfarms"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/serverfarms", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetAllServerFarmsSender sends the GetAllServerFarms request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) GetAllServerFarmsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetAllServerFarmsResponder handles the response to the GetAllServerFarms request. The method always @@ -346,7 +400,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -354,21 +408,21 @@ } // GetAllSites sends the get all sites request. -func (client GlobalClient) GetAllSites() (result SiteCollection, ae error) { +func (client GlobalClient) GetAllSites() (result SiteCollection, err error) { req, err := client.GetAllSitesPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllSites", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllSites", nil, "Failure preparing request") } resp, err := client.GetAllSitesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllSites", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllSites", resp, "Failure sending request") } result, err = client.GetAllSitesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllSites", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "GetAllSites", resp, "Failure responding to request") } return @@ -377,26 +431,25 @@ // GetAllSitesPreparer prepares the GetAllSites request. func (client GlobalClient) GetAllSitesPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/sites"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/sites", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetAllSitesSender sends the GetAllSites request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) GetAllSitesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetAllSitesResponder handles the response to the GetAllSites request. The method always @@ -405,73 +458,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetAllWebHostingPlans sends the get all web hosting plans request. -// -// detailed is false to return a subset of App Service Plan properties, true -// to return all of the properties. -// Retrieval of all properties may increase the API latency. -func (client GlobalClient) GetAllWebHostingPlans(detailed *bool) (result ServerFarmCollection, ae error) { - req, err := client.GetAllWebHostingPlansPreparer(detailed) - if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllWebHostingPlans", "Failure preparing request") - } - - resp, err := client.GetAllWebHostingPlansSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllWebHostingPlans", "Failure sending request") - } - - result, err = client.GetAllWebHostingPlansResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "GetAllWebHostingPlans", "Failure responding to request") - } - - return -} - -// GetAllWebHostingPlansPreparer prepares the GetAllWebHostingPlans request. -func (client GlobalClient) GetAllWebHostingPlansPreparer(detailed *bool) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if detailed != nil { - queryParameters["detailed"] = detailed - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/webhostingplans"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetAllWebHostingPlansSender sends the GetAllWebHostingPlans request. The method will close the -// http.Response Body if it receives an error. -func (client GlobalClient) GetAllWebHostingPlansSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetAllWebHostingPlansResponder handles the response to the GetAllWebHostingPlans request. The method always -// closes the http.Response Body. -func (client GlobalClient) GetAllWebHostingPlansResponder(resp *http.Response) (result ServerFarmCollection, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -479,49 +466,53 @@ } // GetSubscriptionGeoRegions sends the get subscription geo regions request. -func (client GlobalClient) GetSubscriptionGeoRegions() (result GeoRegionCollection, ae error) { - req, err := client.GetSubscriptionGeoRegionsPreparer() +// +// sku is filter only to regions that support this sku +func (client GlobalClient) GetSubscriptionGeoRegions(sku string) (result GeoRegionCollection, err error) { + req, err := client.GetSubscriptionGeoRegionsPreparer(sku) if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetSubscriptionGeoRegions", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetSubscriptionGeoRegions", nil, "Failure preparing request") } resp, err := client.GetSubscriptionGeoRegionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetSubscriptionGeoRegions", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetSubscriptionGeoRegions", resp, "Failure sending request") } result, err = client.GetSubscriptionGeoRegionsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "GetSubscriptionGeoRegions", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "GetSubscriptionGeoRegions", resp, "Failure responding to request") } return } // GetSubscriptionGeoRegionsPreparer prepares the GetSubscriptionGeoRegions request. -func (client GlobalClient) GetSubscriptionGeoRegionsPreparer() (*http.Request, error) { +func (client GlobalClient) GetSubscriptionGeoRegionsPreparer(sku string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(sku) > 0 { + queryParameters["sku"] = autorest.Encode("query", sku) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/geoRegions"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/geoRegions", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSubscriptionGeoRegionsSender sends the GetSubscriptionGeoRegions request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) GetSubscriptionGeoRegionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSubscriptionGeoRegionsResponder handles the response to the GetSubscriptionGeoRegions request. The method always @@ -530,7 +521,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -539,21 +530,21 @@ // GetSubscriptionPublishingCredentials sends the get subscription publishing // credentials request. -func (client GlobalClient) GetSubscriptionPublishingCredentials() (result User, ae error) { +func (client GlobalClient) GetSubscriptionPublishingCredentials() (result User, err error) { req, err := client.GetSubscriptionPublishingCredentialsPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetSubscriptionPublishingCredentials", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetSubscriptionPublishingCredentials", nil, "Failure preparing request") } resp, err := client.GetSubscriptionPublishingCredentialsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "GetSubscriptionPublishingCredentials", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "GetSubscriptionPublishingCredentials", resp, "Failure sending request") } result, err = client.GetSubscriptionPublishingCredentialsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "GetSubscriptionPublishingCredentials", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "GetSubscriptionPublishingCredentials", resp, "Failure responding to request") } return @@ -562,26 +553,25 @@ // GetSubscriptionPublishingCredentialsPreparer prepares the GetSubscriptionPublishingCredentials request. func (client GlobalClient) GetSubscriptionPublishingCredentialsPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/publishingCredentials"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/publishingCredentials", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSubscriptionPublishingCredentialsSender sends the GetSubscriptionPublishingCredentials request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) GetSubscriptionPublishingCredentialsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSubscriptionPublishingCredentialsResponder handles the response to the GetSubscriptionPublishingCredentials request. The method always @@ -590,7 +580,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -601,21 +591,21 @@ // available request. // // name is hosting environment name -func (client GlobalClient) IsHostingEnvironmentNameAvailable(name string) (result ObjectSet, ae error) { +func (client GlobalClient) IsHostingEnvironmentNameAvailable(name string) (result SetObject, err error) { req, err := client.IsHostingEnvironmentNameAvailablePreparer(name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "IsHostingEnvironmentNameAvailable", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "IsHostingEnvironmentNameAvailable", nil, "Failure preparing request") } resp, err := client.IsHostingEnvironmentNameAvailableSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "IsHostingEnvironmentNameAvailable", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "IsHostingEnvironmentNameAvailable", resp, "Failure sending request") } result, err = client.IsHostingEnvironmentNameAvailableResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "IsHostingEnvironmentNameAvailable", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "IsHostingEnvironmentNameAvailable", resp, "Failure responding to request") } return @@ -624,36 +614,35 @@ // IsHostingEnvironmentNameAvailablePreparer prepares the IsHostingEnvironmentNameAvailable request. func (client GlobalClient) IsHostingEnvironmentNameAvailablePreparer(name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, - "name": name, + "api-version": client.APIVersion, + "name": autorest.Encode("query", name), } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/ishostingenvironmentnameavailable"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/ishostingenvironmentnameavailable", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // IsHostingEnvironmentNameAvailableSender sends the IsHostingEnvironmentNameAvailable request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) IsHostingEnvironmentNameAvailableSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // IsHostingEnvironmentNameAvailableResponder handles the response to the IsHostingEnvironmentNameAvailable request. The method always // closes the http.Response Body. -func (client GlobalClient) IsHostingEnvironmentNameAvailableResponder(resp *http.Response) (result ObjectSet, err error) { +func (client GlobalClient) IsHostingEnvironmentNameAvailableResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -664,21 +653,21 @@ // environment with legacy name available request. // // name is hosting environment name -func (client GlobalClient) IsHostingEnvironmentWithLegacyNameAvailable(name string) (result ObjectSet, ae error) { +func (client GlobalClient) IsHostingEnvironmentWithLegacyNameAvailable(name string) (result SetObject, err error) { req, err := client.IsHostingEnvironmentWithLegacyNameAvailablePreparer(name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "IsHostingEnvironmentWithLegacyNameAvailable", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "IsHostingEnvironmentWithLegacyNameAvailable", nil, "Failure preparing request") } resp, err := client.IsHostingEnvironmentWithLegacyNameAvailableSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "IsHostingEnvironmentWithLegacyNameAvailable", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "IsHostingEnvironmentWithLegacyNameAvailable", resp, "Failure sending request") } result, err = client.IsHostingEnvironmentWithLegacyNameAvailableResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "IsHostingEnvironmentWithLegacyNameAvailable", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "IsHostingEnvironmentWithLegacyNameAvailable", resp, "Failure responding to request") } return @@ -687,36 +676,35 @@ // IsHostingEnvironmentWithLegacyNameAvailablePreparer prepares the IsHostingEnvironmentWithLegacyNameAvailable request. func (client GlobalClient) IsHostingEnvironmentWithLegacyNameAvailablePreparer(name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/ishostingenvironmentnameavailable/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/ishostingenvironmentnameavailable/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // IsHostingEnvironmentWithLegacyNameAvailableSender sends the IsHostingEnvironmentWithLegacyNameAvailable request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) IsHostingEnvironmentWithLegacyNameAvailableSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // IsHostingEnvironmentWithLegacyNameAvailableResponder handles the response to the IsHostingEnvironmentWithLegacyNameAvailable request. The method always // closes the http.Response Body. -func (client GlobalClient) IsHostingEnvironmentWithLegacyNameAvailableResponder(resp *http.Response) (result ObjectSet, err error) { +func (client GlobalClient) IsHostingEnvironmentWithLegacyNameAvailableResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -724,21 +712,21 @@ } // ListPremierAddOnOffers sends the list premier add on offers request. -func (client GlobalClient) ListPremierAddOnOffers() (result ObjectSet, ae error) { +func (client GlobalClient) ListPremierAddOnOffers() (result SetObject, err error) { req, err := client.ListPremierAddOnOffersPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "ListPremierAddOnOffers", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "ListPremierAddOnOffers", nil, "Failure preparing request") } resp, err := client.ListPremierAddOnOffersSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "ListPremierAddOnOffers", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "ListPremierAddOnOffers", resp, "Failure sending request") } result, err = client.ListPremierAddOnOffersResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "ListPremierAddOnOffers", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "ListPremierAddOnOffers", resp, "Failure responding to request") } return @@ -747,35 +735,34 @@ // ListPremierAddOnOffersPreparer prepares the ListPremierAddOnOffers request. func (client GlobalClient) ListPremierAddOnOffersPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/premieraddonoffers"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/premieraddonoffers", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListPremierAddOnOffersSender sends the ListPremierAddOnOffers request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) ListPremierAddOnOffersSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListPremierAddOnOffersResponder handles the response to the ListPremierAddOnOffers request. The method always // closes the http.Response Body. -func (client GlobalClient) ListPremierAddOnOffersResponder(resp *http.Response) (result ObjectSet, err error) { +func (client GlobalClient) ListPremierAddOnOffersResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -786,21 +773,21 @@ // publishing credentials request. // // requestMessage is requestMessage with new publishing credentials -func (client GlobalClient) UpdateSubscriptionPublishingCredentials(requestMessage User) (result User, ae error) { +func (client GlobalClient) UpdateSubscriptionPublishingCredentials(requestMessage User) (result User, err error) { req, err := client.UpdateSubscriptionPublishingCredentialsPreparer(requestMessage) if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "UpdateSubscriptionPublishingCredentials", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "UpdateSubscriptionPublishingCredentials", nil, "Failure preparing request") } resp, err := client.UpdateSubscriptionPublishingCredentialsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/GlobalClient", "UpdateSubscriptionPublishingCredentials", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalClient", "UpdateSubscriptionPublishingCredentials", resp, "Failure sending request") } result, err = client.UpdateSubscriptionPublishingCredentialsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalClient", "UpdateSubscriptionPublishingCredentials", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalClient", "UpdateSubscriptionPublishingCredentials", resp, "Failure responding to request") } return @@ -809,27 +796,27 @@ // UpdateSubscriptionPublishingCredentialsPreparer prepares the UpdateSubscriptionPublishingCredentials request. func (client GlobalClient) UpdateSubscriptionPublishingCredentialsPreparer(requestMessage User) (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Web/publishingCredentials"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/publishingCredentials", pathParameters), autorest.WithJSON(requestMessage), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSubscriptionPublishingCredentialsSender sends the UpdateSubscriptionPublishingCredentials request. The method will close the // http.Response Body if it receives an error. func (client GlobalClient) UpdateSubscriptionPublishingCredentialsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSubscriptionPublishingCredentialsResponder handles the response to the UpdateSubscriptionPublishingCredentials request. The method always @@ -838,7 +825,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/globalresourcegroups.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/globalresourcegroups.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/globalresourcegroups.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/globalresourcegroups.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // GlobalResourceGroupsClient is the use these APIs to manage Azure Websites @@ -50,21 +50,21 @@ // MoveResources sends the move resources request. // -func (client GlobalResourceGroupsClient) MoveResources(resourceGroupName string, moveResourceEnvelope CsmMoveResourceEnvelope) (result autorest.Response, ae error) { +func (client GlobalResourceGroupsClient) MoveResources(resourceGroupName string, moveResourceEnvelope CsmMoveResourceEnvelope) (result autorest.Response, err error) { req, err := client.MoveResourcesPreparer(resourceGroupName, moveResourceEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/GlobalResourceGroupsClient", "MoveResources", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.GlobalResourceGroupsClient", "MoveResources", nil, "Failure preparing request") } resp, err := client.MoveResourcesSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "web/GlobalResourceGroupsClient", "MoveResources", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.GlobalResourceGroupsClient", "MoveResources", resp, "Failure sending request") } result, err = client.MoveResourcesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/GlobalResourceGroupsClient", "MoveResources", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.GlobalResourceGroupsClient", "MoveResources", resp, "Failure responding to request") } return @@ -73,28 +73,28 @@ // MoveResourcesPreparer prepares the MoveResources request. func (client GlobalResourceGroupsClient) MoveResourcesPreparer(resourceGroupName string, moveResourceEnvelope CsmMoveResourceEnvelope) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/moveResources"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/moveResources", pathParameters), autorest.WithJSON(moveResourceEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // MoveResourcesSender sends the MoveResources request. The method will close the // http.Response Body if it receives an error. func (client GlobalResourceGroupsClient) MoveResourcesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNoContent) + return autorest.SendWithSender(client, req) } // MoveResourcesResponder handles the response to the MoveResources request. The method always @@ -103,7 +103,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/hostingenvironments.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/hostingenvironments.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/hostingenvironments.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/hostingenvironments.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // HostingEnvironmentsClient is the use these APIs to manage Azure Websites @@ -49,271 +49,284 @@ } // CreateOrUpdateHostingEnvironment sends the create or update hosting -// environment request. +// environment request. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) hostingEnvironmentEnvelope is // properties of hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) CreateOrUpdateHostingEnvironment(resourceGroupName string, name string, hostingEnvironmentEnvelope HostingEnvironment) (result HostingEnvironment, ae error) { - req, err := client.CreateOrUpdateHostingEnvironmentPreparer(resourceGroupName, name, hostingEnvironmentEnvelope) +func (client HostingEnvironmentsClient) CreateOrUpdateHostingEnvironment(resourceGroupName string, name string, hostingEnvironmentEnvelope HostingEnvironment, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateHostingEnvironmentPreparer(resourceGroupName, name, hostingEnvironmentEnvelope, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "CreateOrUpdateHostingEnvironment", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "CreateOrUpdateHostingEnvironment", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateHostingEnvironmentSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "CreateOrUpdateHostingEnvironment", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "CreateOrUpdateHostingEnvironment", resp, "Failure sending request") } result, err = client.CreateOrUpdateHostingEnvironmentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "CreateOrUpdateHostingEnvironment", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "CreateOrUpdateHostingEnvironment", resp, "Failure responding to request") } return } // CreateOrUpdateHostingEnvironmentPreparer prepares the CreateOrUpdateHostingEnvironment request. -func (client HostingEnvironmentsClient) CreateOrUpdateHostingEnvironmentPreparer(resourceGroupName string, name string, hostingEnvironmentEnvelope HostingEnvironment) (*http.Request, error) { +func (client HostingEnvironmentsClient) CreateOrUpdateHostingEnvironmentPreparer(resourceGroupName string, name string, hostingEnvironmentEnvelope HostingEnvironment, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}", pathParameters), autorest.WithJSON(hostingEnvironmentEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateHostingEnvironmentSender sends the CreateOrUpdateHostingEnvironment request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) CreateOrUpdateHostingEnvironmentSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateHostingEnvironmentResponder handles the response to the CreateOrUpdateHostingEnvironment request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) CreateOrUpdateHostingEnvironmentResponder(resp *http.Response) (result HostingEnvironment, err error) { +func (client HostingEnvironmentsClient) CreateOrUpdateHostingEnvironmentResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // CreateOrUpdateMultiRolePool sends the create or update multi role pool -// request. +// request. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) multiRolePoolEnvelope is // properties of multiRole pool -func (client HostingEnvironmentsClient) CreateOrUpdateMultiRolePool(resourceGroupName string, name string, multiRolePoolEnvelope WorkerPool) (result WorkerPool, ae error) { - req, err := client.CreateOrUpdateMultiRolePoolPreparer(resourceGroupName, name, multiRolePoolEnvelope) +func (client HostingEnvironmentsClient) CreateOrUpdateMultiRolePool(resourceGroupName string, name string, multiRolePoolEnvelope WorkerPool, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateMultiRolePoolPreparer(resourceGroupName, name, multiRolePoolEnvelope, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "CreateOrUpdateMultiRolePool", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "CreateOrUpdateMultiRolePool", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateMultiRolePoolSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "CreateOrUpdateMultiRolePool", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "CreateOrUpdateMultiRolePool", resp, "Failure sending request") } result, err = client.CreateOrUpdateMultiRolePoolResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "CreateOrUpdateMultiRolePool", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "CreateOrUpdateMultiRolePool", resp, "Failure responding to request") } return } // CreateOrUpdateMultiRolePoolPreparer prepares the CreateOrUpdateMultiRolePool request. -func (client HostingEnvironmentsClient) CreateOrUpdateMultiRolePoolPreparer(resourceGroupName string, name string, multiRolePoolEnvelope WorkerPool) (*http.Request, error) { +func (client HostingEnvironmentsClient) CreateOrUpdateMultiRolePoolPreparer(resourceGroupName string, name string, multiRolePoolEnvelope WorkerPool, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default", pathParameters), autorest.WithJSON(multiRolePoolEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateMultiRolePoolSender sends the CreateOrUpdateMultiRolePool request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) CreateOrUpdateMultiRolePoolSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateMultiRolePoolResponder handles the response to the CreateOrUpdateMultiRolePool request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) CreateOrUpdateMultiRolePoolResponder(resp *http.Response) (result WorkerPool, err error) { +func (client HostingEnvironmentsClient) CreateOrUpdateMultiRolePoolResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // CreateOrUpdateWorkerPool sends the create or update worker pool request. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) workerPoolName is name of // worker pool workerPoolEnvelope is properties of worker pool -func (client HostingEnvironmentsClient) CreateOrUpdateWorkerPool(resourceGroupName string, name string, workerPoolName string, workerPoolEnvelope WorkerPool) (result WorkerPool, ae error) { - req, err := client.CreateOrUpdateWorkerPoolPreparer(resourceGroupName, name, workerPoolName, workerPoolEnvelope) +func (client HostingEnvironmentsClient) CreateOrUpdateWorkerPool(resourceGroupName string, name string, workerPoolName string, workerPoolEnvelope WorkerPool, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateWorkerPoolPreparer(resourceGroupName, name, workerPoolName, workerPoolEnvelope, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "CreateOrUpdateWorkerPool", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "CreateOrUpdateWorkerPool", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateWorkerPoolSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "CreateOrUpdateWorkerPool", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "CreateOrUpdateWorkerPool", resp, "Failure sending request") } result, err = client.CreateOrUpdateWorkerPoolResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "CreateOrUpdateWorkerPool", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "CreateOrUpdateWorkerPool", resp, "Failure responding to request") } return } // CreateOrUpdateWorkerPoolPreparer prepares the CreateOrUpdateWorkerPool request. -func (client HostingEnvironmentsClient) CreateOrUpdateWorkerPoolPreparer(resourceGroupName string, name string, workerPoolName string, workerPoolEnvelope WorkerPool) (*http.Request, error) { +func (client HostingEnvironmentsClient) CreateOrUpdateWorkerPoolPreparer(resourceGroupName string, name string, workerPoolName string, workerPoolEnvelope WorkerPool, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workerPoolName": url.QueryEscape(workerPoolName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workerPoolName": autorest.Encode("path", workerPoolName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}", pathParameters), autorest.WithJSON(workerPoolEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateWorkerPoolSender sends the CreateOrUpdateWorkerPool request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) CreateOrUpdateWorkerPoolSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateWorkerPoolResponder handles the response to the CreateOrUpdateWorkerPool request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) CreateOrUpdateWorkerPoolResponder(resp *http.Response) (result WorkerPool, err error) { +func (client HostingEnvironmentsClient) CreateOrUpdateWorkerPoolResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } -// DeleteHostingEnvironment sends the delete hosting environment request. +// DeleteHostingEnvironment sends the delete hosting environment request. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) forceDelete is delete even if // the hostingEnvironment (App Service Environment) contains resources -func (client HostingEnvironmentsClient) DeleteHostingEnvironment(resourceGroupName string, name string, forceDelete *bool) (result ObjectSet, ae error) { - req, err := client.DeleteHostingEnvironmentPreparer(resourceGroupName, name, forceDelete) +func (client HostingEnvironmentsClient) DeleteHostingEnvironment(resourceGroupName string, name string, forceDelete *bool, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeleteHostingEnvironmentPreparer(resourceGroupName, name, forceDelete, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "DeleteHostingEnvironment", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "DeleteHostingEnvironment", nil, "Failure preparing request") } resp, err := client.DeleteHostingEnvironmentSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "DeleteHostingEnvironment", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "DeleteHostingEnvironment", resp, "Failure sending request") } result, err = client.DeleteHostingEnvironmentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "DeleteHostingEnvironment", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "DeleteHostingEnvironment", resp, "Failure responding to request") } return } // DeleteHostingEnvironmentPreparer prepares the DeleteHostingEnvironment request. -func (client HostingEnvironmentsClient) DeleteHostingEnvironmentPreparer(resourceGroupName string, name string, forceDelete *bool) (*http.Request, error) { +func (client HostingEnvironmentsClient) DeleteHostingEnvironmentPreparer(resourceGroupName string, name string, forceDelete *bool, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if forceDelete != nil { - queryParameters["forceDelete"] = forceDelete + queryParameters["forceDelete"] = autorest.Encode("query", *forceDelete) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteHostingEnvironmentSender sends the DeleteHostingEnvironment request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) DeleteHostingEnvironmentSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteHostingEnvironmentResponder handles the response to the DeleteHostingEnvironment request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) DeleteHostingEnvironmentResponder(resp *http.Response) (result ObjectSet, err error) { +func (client HostingEnvironmentsClient) DeleteHostingEnvironmentResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), - autorest.ByUnmarshallingJSON(&result.Value), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } @@ -321,21 +334,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetHostingEnvironment(resourceGroupName string, name string) (result HostingEnvironment, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironment(resourceGroupName string, name string) (result HostingEnvironment, err error) { req, err := client.GetHostingEnvironmentPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironment", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironment", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironment", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironment", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironment", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironment", resp, "Failure responding to request") } return @@ -344,28 +357,27 @@ // GetHostingEnvironmentPreparer prepares the GetHostingEnvironment request. func (client HostingEnvironmentsClient) GetHostingEnvironmentPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentSender sends the GetHostingEnvironment request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentResponder handles the response to the GetHostingEnvironment request. The method always @@ -374,7 +386,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -386,21 +398,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetHostingEnvironmentCapacities(resourceGroupName string, name string) (result StampCapacityCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentCapacities(resourceGroupName string, name string) (result StampCapacityCollection, err error) { req, err := client.GetHostingEnvironmentCapacitiesPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentCapacities", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentCapacities", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentCapacitiesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentCapacities", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentCapacities", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentCapacitiesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentCapacities", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentCapacities", resp, "Failure responding to request") } return @@ -409,28 +421,27 @@ // GetHostingEnvironmentCapacitiesPreparer prepares the GetHostingEnvironmentCapacities request. func (client HostingEnvironmentsClient) GetHostingEnvironmentCapacitiesPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/compute"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/compute", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentCapacitiesSender sends the GetHostingEnvironmentCapacities request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentCapacitiesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentCapacitiesResponder handles the response to the GetHostingEnvironmentCapacities request. The method always @@ -439,7 +450,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -451,21 +462,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetHostingEnvironmentDiagnostics(resourceGroupName string, name string) (result HostingEnvironmentDiagnosticsList, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentDiagnostics(resourceGroupName string, name string) (result ListHostingEnvironmentDiagnostics, err error) { req, err := client.GetHostingEnvironmentDiagnosticsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentDiagnostics", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentDiagnostics", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentDiagnosticsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentDiagnostics", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentDiagnostics", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentDiagnosticsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentDiagnostics", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentDiagnostics", resp, "Failure responding to request") } return @@ -474,37 +485,36 @@ // GetHostingEnvironmentDiagnosticsPreparer prepares the GetHostingEnvironmentDiagnostics request. func (client HostingEnvironmentsClient) GetHostingEnvironmentDiagnosticsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentDiagnosticsSender sends the GetHostingEnvironmentDiagnostics request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentDiagnosticsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentDiagnosticsResponder handles the response to the GetHostingEnvironmentDiagnostics request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) GetHostingEnvironmentDiagnosticsResponder(resp *http.Response) (result HostingEnvironmentDiagnosticsList, err error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentDiagnosticsResponder(resp *http.Response) (result ListHostingEnvironmentDiagnostics, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -517,21 +527,21 @@ // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) diagnosticsName is name of // the diagnostics -func (client HostingEnvironmentsClient) GetHostingEnvironmentDiagnosticsItem(resourceGroupName string, name string, diagnosticsName string) (result HostingEnvironmentDiagnostics, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentDiagnosticsItem(resourceGroupName string, name string, diagnosticsName string) (result HostingEnvironmentDiagnostics, err error) { req, err := client.GetHostingEnvironmentDiagnosticsItemPreparer(resourceGroupName, name, diagnosticsName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentDiagnosticsItem", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentDiagnosticsItem", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentDiagnosticsItemSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentDiagnosticsItem", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentDiagnosticsItem", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentDiagnosticsItemResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentDiagnosticsItem", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentDiagnosticsItem", resp, "Failure responding to request") } return @@ -540,29 +550,28 @@ // GetHostingEnvironmentDiagnosticsItemPreparer prepares the GetHostingEnvironmentDiagnosticsItem request. func (client HostingEnvironmentsClient) GetHostingEnvironmentDiagnosticsItemPreparer(resourceGroupName string, name string, diagnosticsName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "diagnosticsName": url.QueryEscape(diagnosticsName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "diagnosticsName": autorest.Encode("path", diagnosticsName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics/{diagnosticsName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics/{diagnosticsName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentDiagnosticsItemSender sends the GetHostingEnvironmentDiagnosticsItem request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentDiagnosticsItemSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentDiagnosticsItemResponder handles the response to the GetHostingEnvironmentDiagnosticsItem request. The method always @@ -571,7 +580,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -583,21 +592,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetHostingEnvironmentMetricDefinitions(resourceGroupName string, name string) (result MetricDefinition, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentMetricDefinitions(resourceGroupName string, name string) (result MetricDefinition, err error) { req, err := client.GetHostingEnvironmentMetricDefinitionsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMetricDefinitions", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMetricDefinitions", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentMetricDefinitionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMetricDefinitions", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMetricDefinitions", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentMetricDefinitionsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMetricDefinitions", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMetricDefinitions", resp, "Failure responding to request") } return @@ -606,28 +615,27 @@ // GetHostingEnvironmentMetricDefinitionsPreparer prepares the GetHostingEnvironmentMetricDefinitions request. func (client HostingEnvironmentsClient) GetHostingEnvironmentMetricDefinitionsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/metricdefinitions"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/metricdefinitions", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentMetricDefinitionsSender sends the GetHostingEnvironmentMetricDefinitions request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentMetricDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentMetricDefinitionsResponder handles the response to the GetHostingEnvironmentMetricDefinitions request. The method always @@ -636,7 +644,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -653,21 +661,21 @@ // or name.value eq 'Metric2') and startTime eq '2014-01-01T00:00:00Z' and // endTime eq '2014-12-31T23:59:59Z' and timeGrain eq // duration'[Hour|Minute|Day]'. -func (client HostingEnvironmentsClient) GetHostingEnvironmentMetrics(resourceGroupName string, name string, details *bool, filter string) (result ResourceMetricCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentMetrics(resourceGroupName string, name string, details *bool, filter string) (result ResourceMetricCollection, err error) { req, err := client.GetHostingEnvironmentMetricsPreparer(resourceGroupName, name, details, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMetrics", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMetrics", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentMetricsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMetrics", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMetrics", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentMetricsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMetrics", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMetrics", resp, "Failure responding to request") } return @@ -676,34 +684,33 @@ // GetHostingEnvironmentMetricsPreparer prepares the GetHostingEnvironmentMetrics request. func (client HostingEnvironmentsClient) GetHostingEnvironmentMetricsPreparer(resourceGroupName string, name string, details *bool, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if details != nil { - queryParameters["details"] = details + queryParameters["details"] = autorest.Encode("query", *details) } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/metrics"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/metrics", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentMetricsSender sends the GetHostingEnvironmentMetrics request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentMetricsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentMetricsResponder handles the response to the GetHostingEnvironmentMetrics request. The method always @@ -712,7 +719,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -724,21 +731,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleMetricDefinitions(resourceGroupName string, name string) (result MetricDefinitionCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleMetricDefinitions(resourceGroupName string, name string) (result MetricDefinitionCollection, err error) { req, err := client.GetHostingEnvironmentMultiRoleMetricDefinitionsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetricDefinitions", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetricDefinitions", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentMultiRoleMetricDefinitionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetricDefinitions", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetricDefinitions", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentMultiRoleMetricDefinitionsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetricDefinitions", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetricDefinitions", resp, "Failure responding to request") } return @@ -747,28 +754,27 @@ // GetHostingEnvironmentMultiRoleMetricDefinitionsPreparer prepares the GetHostingEnvironmentMultiRoleMetricDefinitions request. func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleMetricDefinitionsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/metricdefinitions"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/metricdefinitions", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentMultiRoleMetricDefinitionsSender sends the GetHostingEnvironmentMultiRoleMetricDefinitions request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleMetricDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentMultiRoleMetricDefinitionsResponder handles the response to the GetHostingEnvironmentMultiRoleMetricDefinitions request. The method always @@ -777,7 +783,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -795,21 +801,21 @@ // odata syntax. Example: $filter=(name.value eq 'Metric1' or name.value eq // 'Metric2') and startTime eq '2014-01-01T00:00:00Z' and endTime eq // '2014-12-31T23:59:59Z' and timeGrain eq duration'[Hour|Minute|Day]'. -func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleMetrics(resourceGroupName string, name string, startTime string, endTime string, timeGrain string, details *bool, filter string) (result ResourceMetricCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleMetrics(resourceGroupName string, name string, startTime string, endTime string, timeGrain string, details *bool, filter string) (result ResourceMetricCollection, err error) { req, err := client.GetHostingEnvironmentMultiRoleMetricsPreparer(resourceGroupName, name, startTime, endTime, timeGrain, details, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetrics", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetrics", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentMultiRoleMetricsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetrics", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetrics", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentMultiRoleMetricsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetrics", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleMetrics", resp, "Failure responding to request") } return @@ -818,43 +824,42 @@ // GetHostingEnvironmentMultiRoleMetricsPreparer prepares the GetHostingEnvironmentMultiRoleMetrics request. func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleMetricsPreparer(resourceGroupName string, name string, startTime string, endTime string, timeGrain string, details *bool, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(startTime) > 0 { - queryParameters["startTime"] = startTime + queryParameters["startTime"] = autorest.Encode("query", startTime) } if len(endTime) > 0 { - queryParameters["endTime"] = endTime + queryParameters["endTime"] = autorest.Encode("query", endTime) } if len(timeGrain) > 0 { - queryParameters["timeGrain"] = timeGrain + queryParameters["timeGrain"] = autorest.Encode("query", timeGrain) } if details != nil { - queryParameters["details"] = details + queryParameters["details"] = autorest.Encode("query", *details) } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/metrics"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/metrics", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentMultiRoleMetricsSender sends the GetHostingEnvironmentMultiRoleMetrics request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleMetricsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentMultiRoleMetricsResponder handles the response to the GetHostingEnvironmentMultiRoleMetrics request. The method always @@ -863,7 +868,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -875,21 +880,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleUsages(resourceGroupName string, name string) (result UsageCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleUsages(resourceGroupName string, name string) (result UsageCollection, err error) { req, err := client.GetHostingEnvironmentMultiRoleUsagesPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleUsages", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleUsages", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentMultiRoleUsagesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleUsages", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleUsages", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentMultiRoleUsagesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleUsages", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentMultiRoleUsages", resp, "Failure responding to request") } return @@ -898,28 +903,27 @@ // GetHostingEnvironmentMultiRoleUsagesPreparer prepares the GetHostingEnvironmentMultiRoleUsages request. func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleUsagesPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/usages"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/usages", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentMultiRoleUsagesSender sends the GetHostingEnvironmentMultiRoleUsages request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentMultiRoleUsagesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentMultiRoleUsagesResponder handles the response to the GetHostingEnvironmentMultiRoleUsages request. The method always @@ -928,7 +932,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -941,21 +945,21 @@ // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) operationID is operation // identifier GUID -func (client HostingEnvironmentsClient) GetHostingEnvironmentOperation(resourceGroupName string, name string, operationID string) (result ObjectSet, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentOperation(resourceGroupName string, name string, operationID string) (result SetObject, err error) { req, err := client.GetHostingEnvironmentOperationPreparer(resourceGroupName, name, operationID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentOperation", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentOperation", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentOperationSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentOperation", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentOperation", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentOperationResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentOperation", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentOperation", resp, "Failure responding to request") } return @@ -964,38 +968,37 @@ // GetHostingEnvironmentOperationPreparer prepares the GetHostingEnvironmentOperation request. func (client HostingEnvironmentsClient) GetHostingEnvironmentOperationPreparer(resourceGroupName string, name string, operationID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "operationId": url.QueryEscape(operationID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/operations/{operationId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/operations/{operationId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentOperationSender sends the GetHostingEnvironmentOperation request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNotFound, http.StatusInternalServerError) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentOperationResponder handles the response to the GetHostingEnvironmentOperation request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) GetHostingEnvironmentOperationResponder(resp *http.Response) (result ObjectSet, err error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentOperationResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNotFound, http.StatusInternalServerError), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNotFound, http.StatusInternalServerError), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1007,21 +1010,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetHostingEnvironmentOperations(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentOperations(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.GetHostingEnvironmentOperationsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentOperations", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentOperations", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentOperationsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentOperations", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentOperations", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentOperationsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentOperations", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentOperations", resp, "Failure responding to request") } return @@ -1030,37 +1033,36 @@ // GetHostingEnvironmentOperationsPreparer prepares the GetHostingEnvironmentOperations request. func (client HostingEnvironmentsClient) GetHostingEnvironmentOperationsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/operations"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/operations", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentOperationsSender sends the GetHostingEnvironmentOperations request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentOperationsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentOperationsResponder handles the response to the GetHostingEnvironmentOperations request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) GetHostingEnvironmentOperationsResponder(resp *http.Response) (result ObjectSet, err error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentOperationsResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1070,21 +1072,21 @@ // GetHostingEnvironments sends the get hosting environments request. // // resourceGroupName is name of resource group -func (client HostingEnvironmentsClient) GetHostingEnvironments(resourceGroupName string) (result HostingEnvironmentCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironments(resourceGroupName string) (result HostingEnvironmentCollection, err error) { req, err := client.GetHostingEnvironmentsPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironments", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironments", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironments", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironments", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironments", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironments", resp, "Failure responding to request") } return @@ -1093,27 +1095,26 @@ // GetHostingEnvironmentsPreparer prepares the GetHostingEnvironments request. func (client HostingEnvironmentsClient) GetHostingEnvironmentsPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentsSender sends the GetHostingEnvironments request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentsResponder handles the response to the GetHostingEnvironments request. The method always @@ -1122,7 +1123,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1134,21 +1135,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetHostingEnvironmentServerFarms(resourceGroupName string, name string) (result ServerFarmCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentServerFarms(resourceGroupName string, name string) (result ServerFarmCollection, err error) { req, err := client.GetHostingEnvironmentServerFarmsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentServerFarms", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentServerFarms", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentServerFarmsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentServerFarms", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentServerFarms", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentServerFarmsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentServerFarms", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentServerFarms", resp, "Failure responding to request") } return @@ -1157,28 +1158,27 @@ // GetHostingEnvironmentServerFarmsPreparer prepares the GetHostingEnvironmentServerFarms request. func (client HostingEnvironmentsClient) GetHostingEnvironmentServerFarmsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/serverfarms"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/serverfarms", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentServerFarmsSender sends the GetHostingEnvironmentServerFarms request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentServerFarmsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentServerFarmsResponder handles the response to the GetHostingEnvironmentServerFarms request. The method always @@ -1187,7 +1187,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1199,21 +1199,21 @@ // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) propertiesToInclude is comma // separated list of site properties to include -func (client HostingEnvironmentsClient) GetHostingEnvironmentSites(resourceGroupName string, name string, propertiesToInclude string) (result SiteCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentSites(resourceGroupName string, name string, propertiesToInclude string) (result SiteCollection, err error) { req, err := client.GetHostingEnvironmentSitesPreparer(resourceGroupName, name, propertiesToInclude) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentSites", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentSites", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentSitesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentSites", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentSites", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentSitesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentSites", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentSites", resp, "Failure responding to request") } return @@ -1222,31 +1222,30 @@ // GetHostingEnvironmentSitesPreparer prepares the GetHostingEnvironmentSites request. func (client HostingEnvironmentsClient) GetHostingEnvironmentSitesPreparer(resourceGroupName string, name string, propertiesToInclude string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(propertiesToInclude) > 0 { - queryParameters["propertiesToInclude"] = propertiesToInclude + queryParameters["propertiesToInclude"] = autorest.Encode("query", propertiesToInclude) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/sites"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/sites", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentSitesSender sends the GetHostingEnvironmentSites request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentSitesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentSitesResponder handles the response to the GetHostingEnvironmentSites request. The method always @@ -1255,7 +1254,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1271,21 +1270,21 @@ // Example: $filter=(name.value eq 'Metric1' or name.value eq 'Metric2') and // startTime eq '2014-01-01T00:00:00Z' and endTime eq '2014-12-31T23:59:59Z' // and timeGrain eq duration'[Hour|Minute|Day]'. -func (client HostingEnvironmentsClient) GetHostingEnvironmentUsages(resourceGroupName string, name string, filter string) (result CsmUsageQuotaCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentUsages(resourceGroupName string, name string, filter string) (result CsmUsageQuotaCollection, err error) { req, err := client.GetHostingEnvironmentUsagesPreparer(resourceGroupName, name, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentUsages", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentUsages", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentUsagesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentUsages", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentUsages", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentUsagesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentUsages", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentUsages", resp, "Failure responding to request") } return @@ -1294,31 +1293,30 @@ // GetHostingEnvironmentUsagesPreparer prepares the GetHostingEnvironmentUsages request. func (client HostingEnvironmentsClient) GetHostingEnvironmentUsagesPreparer(resourceGroupName string, name string, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/usages"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/usages", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentUsagesSender sends the GetHostingEnvironmentUsages request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentUsagesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentUsagesResponder handles the response to the GetHostingEnvironmentUsages request. The method always @@ -1327,7 +1325,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1338,21 +1336,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetHostingEnvironmentVips(resourceGroupName string, name string) (result AddressResponse, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentVips(resourceGroupName string, name string) (result AddressResponse, err error) { req, err := client.GetHostingEnvironmentVipsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentVips", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentVips", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentVipsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentVips", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentVips", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentVipsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentVips", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentVips", resp, "Failure responding to request") } return @@ -1361,28 +1359,27 @@ // GetHostingEnvironmentVipsPreparer prepares the GetHostingEnvironmentVips request. func (client HostingEnvironmentsClient) GetHostingEnvironmentVipsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/virtualip"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/virtualip", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentVipsSender sends the GetHostingEnvironmentVips request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentVipsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentVipsResponder handles the response to the GetHostingEnvironmentVips request. The method always @@ -1391,7 +1388,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1403,21 +1400,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetHostingEnvironmentWebHostingPlans(resourceGroupName string, name string) (result ServerFarmCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentWebHostingPlans(resourceGroupName string, name string) (result ServerFarmCollection, err error) { req, err := client.GetHostingEnvironmentWebHostingPlansPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebHostingPlans", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebHostingPlans", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentWebHostingPlansSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebHostingPlans", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebHostingPlans", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentWebHostingPlansResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebHostingPlans", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebHostingPlans", resp, "Failure responding to request") } return @@ -1426,28 +1423,27 @@ // GetHostingEnvironmentWebHostingPlansPreparer prepares the GetHostingEnvironmentWebHostingPlans request. func (client HostingEnvironmentsClient) GetHostingEnvironmentWebHostingPlansPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/webhostingplans"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/webhostingplans", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentWebHostingPlansSender sends the GetHostingEnvironmentWebHostingPlans request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentWebHostingPlansSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentWebHostingPlansResponder handles the response to the GetHostingEnvironmentWebHostingPlans request. The method always @@ -1456,7 +1452,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1469,21 +1465,21 @@ // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) workerPoolName is name of // worker pool -func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerMetricDefinitions(resourceGroupName string, name string, workerPoolName string) (result MetricDefinitionCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerMetricDefinitions(resourceGroupName string, name string, workerPoolName string) (result MetricDefinitionCollection, err error) { req, err := client.GetHostingEnvironmentWebWorkerMetricDefinitionsPreparer(resourceGroupName, name, workerPoolName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetricDefinitions", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetricDefinitions", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentWebWorkerMetricDefinitionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetricDefinitions", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetricDefinitions", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentWebWorkerMetricDefinitionsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetricDefinitions", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetricDefinitions", resp, "Failure responding to request") } return @@ -1492,29 +1488,28 @@ // GetHostingEnvironmentWebWorkerMetricDefinitionsPreparer prepares the GetHostingEnvironmentWebWorkerMetricDefinitions request. func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerMetricDefinitionsPreparer(resourceGroupName string, name string, workerPoolName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workerPoolName": url.QueryEscape(workerPoolName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workerPoolName": autorest.Encode("path", workerPoolName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/metricdefinitions"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/metricdefinitions", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentWebWorkerMetricDefinitionsSender sends the GetHostingEnvironmentWebWorkerMetricDefinitions request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerMetricDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentWebWorkerMetricDefinitionsResponder handles the response to the GetHostingEnvironmentWebWorkerMetricDefinitions request. The method always @@ -1523,7 +1518,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1540,21 +1535,21 @@ // Example: $filter=(name.value eq 'Metric1' or name.value eq 'Metric2') and // startTime eq '2014-01-01T00:00:00Z' and endTime eq '2014-12-31T23:59:59Z' // and timeGrain eq duration'[Hour|Minute|Day]'. -func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerMetrics(resourceGroupName string, name string, workerPoolName string, details *bool, filter string) (result ResourceMetricCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerMetrics(resourceGroupName string, name string, workerPoolName string, details *bool, filter string) (result ResourceMetricCollection, err error) { req, err := client.GetHostingEnvironmentWebWorkerMetricsPreparer(resourceGroupName, name, workerPoolName, details, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetrics", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetrics", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentWebWorkerMetricsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetrics", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetrics", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentWebWorkerMetricsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetrics", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerMetrics", resp, "Failure responding to request") } return @@ -1563,35 +1558,34 @@ // GetHostingEnvironmentWebWorkerMetricsPreparer prepares the GetHostingEnvironmentWebWorkerMetrics request. func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerMetricsPreparer(resourceGroupName string, name string, workerPoolName string, details *bool, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workerPoolName": url.QueryEscape(workerPoolName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workerPoolName": autorest.Encode("path", workerPoolName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if details != nil { - queryParameters["details"] = details + queryParameters["details"] = autorest.Encode("query", *details) } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/metrics"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/metrics", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentWebWorkerMetricsSender sends the GetHostingEnvironmentWebWorkerMetrics request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerMetricsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentWebWorkerMetricsResponder handles the response to the GetHostingEnvironmentWebWorkerMetrics request. The method always @@ -1600,7 +1594,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1613,21 +1607,21 @@ // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) workerPoolName is name of // worker pool -func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerUsages(resourceGroupName string, name string, workerPoolName string) (result UsageCollection, ae error) { +func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerUsages(resourceGroupName string, name string, workerPoolName string) (result UsageCollection, err error) { req, err := client.GetHostingEnvironmentWebWorkerUsagesPreparer(resourceGroupName, name, workerPoolName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerUsages", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerUsages", nil, "Failure preparing request") } resp, err := client.GetHostingEnvironmentWebWorkerUsagesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerUsages", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerUsages", resp, "Failure sending request") } result, err = client.GetHostingEnvironmentWebWorkerUsagesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerUsages", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetHostingEnvironmentWebWorkerUsages", resp, "Failure responding to request") } return @@ -1636,29 +1630,28 @@ // GetHostingEnvironmentWebWorkerUsagesPreparer prepares the GetHostingEnvironmentWebWorkerUsages request. func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerUsagesPreparer(resourceGroupName string, name string, workerPoolName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workerPoolName": url.QueryEscape(workerPoolName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workerPoolName": autorest.Encode("path", workerPoolName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/usages"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/usages", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetHostingEnvironmentWebWorkerUsagesSender sends the GetHostingEnvironmentWebWorkerUsages request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetHostingEnvironmentWebWorkerUsagesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetHostingEnvironmentWebWorkerUsagesResponder handles the response to the GetHostingEnvironmentWebWorkerUsages request. The method always @@ -1667,7 +1660,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1678,21 +1671,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetMultiRolePool(resourceGroupName string, name string) (result WorkerPool, ae error) { +func (client HostingEnvironmentsClient) GetMultiRolePool(resourceGroupName string, name string) (result WorkerPool, err error) { req, err := client.GetMultiRolePoolPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePool", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePool", nil, "Failure preparing request") } resp, err := client.GetMultiRolePoolSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePool", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePool", resp, "Failure sending request") } result, err = client.GetMultiRolePoolResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePool", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePool", resp, "Failure responding to request") } return @@ -1701,28 +1694,27 @@ // GetMultiRolePoolPreparer prepares the GetMultiRolePool request. func (client HostingEnvironmentsClient) GetMultiRolePoolPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetMultiRolePoolSender sends the GetMultiRolePool request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetMultiRolePoolSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetMultiRolePoolResponder handles the response to the GetMultiRolePool request. The method always @@ -1731,7 +1723,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1744,21 +1736,21 @@ // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) instance is name of instance // in the multiRole pool> -func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetricDefinitions(resourceGroupName string, name string, instance string) (result ObjectSet, ae error) { +func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetricDefinitions(resourceGroupName string, name string, instance string) (result SetObject, err error) { req, err := client.GetMultiRolePoolInstanceMetricDefinitionsPreparer(resourceGroupName, name, instance) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetricDefinitions", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetricDefinitions", nil, "Failure preparing request") } resp, err := client.GetMultiRolePoolInstanceMetricDefinitionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetricDefinitions", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetricDefinitions", resp, "Failure sending request") } result, err = client.GetMultiRolePoolInstanceMetricDefinitionsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetricDefinitions", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetricDefinitions", resp, "Failure responding to request") } return @@ -1767,38 +1759,37 @@ // GetMultiRolePoolInstanceMetricDefinitionsPreparer prepares the GetMultiRolePoolInstanceMetricDefinitions request. func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetricDefinitionsPreparer(resourceGroupName string, name string, instance string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "instance": url.QueryEscape(instance), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "instance": autorest.Encode("path", instance), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/instances/{instance}/metricdefinitions"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/instances/{instance}/metricdefinitions", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetMultiRolePoolInstanceMetricDefinitionsSender sends the GetMultiRolePoolInstanceMetricDefinitions request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetricDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetMultiRolePoolInstanceMetricDefinitionsResponder handles the response to the GetMultiRolePoolInstanceMetricDefinitions request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetricDefinitionsResponder(resp *http.Response) (result ObjectSet, err error) { +func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetricDefinitionsResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1811,21 +1802,21 @@ // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) instance is name of instance // in the multiRole pool details is include instance details -func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetrics(resourceGroupName string, name string, instance string, details *bool) (result ObjectSet, ae error) { +func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetrics(resourceGroupName string, name string, instance string, details *bool) (result SetObject, err error) { req, err := client.GetMultiRolePoolInstanceMetricsPreparer(resourceGroupName, name, instance, details) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetrics", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetrics", nil, "Failure preparing request") } resp, err := client.GetMultiRolePoolInstanceMetricsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetrics", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetrics", resp, "Failure sending request") } result, err = client.GetMultiRolePoolInstanceMetricsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetrics", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePoolInstanceMetrics", resp, "Failure responding to request") } return @@ -1834,41 +1825,40 @@ // GetMultiRolePoolInstanceMetricsPreparer prepares the GetMultiRolePoolInstanceMetrics request. func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetricsPreparer(resourceGroupName string, name string, instance string, details *bool) (*http.Request, error) { pathParameters := map[string]interface{}{ - "instance": url.QueryEscape(instance), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "instance": autorest.Encode("path", instance), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if details != nil { - queryParameters["details"] = details + queryParameters["details"] = autorest.Encode("query", *details) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/instances/{instance}/metrics"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/instances/{instance}/metrics", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetMultiRolePoolInstanceMetricsSender sends the GetMultiRolePoolInstanceMetrics request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetricsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetMultiRolePoolInstanceMetricsResponder handles the response to the GetMultiRolePoolInstanceMetrics request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetricsResponder(resp *http.Response) (result ObjectSet, err error) { +func (client HostingEnvironmentsClient) GetMultiRolePoolInstanceMetricsResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1879,21 +1869,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetMultiRolePools(resourceGroupName string, name string) (result WorkerPoolCollection, ae error) { +func (client HostingEnvironmentsClient) GetMultiRolePools(resourceGroupName string, name string) (result WorkerPoolCollection, err error) { req, err := client.GetMultiRolePoolsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePools", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePools", nil, "Failure preparing request") } resp, err := client.GetMultiRolePoolsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePools", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePools", resp, "Failure sending request") } result, err = client.GetMultiRolePoolsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePools", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePools", resp, "Failure responding to request") } return @@ -1902,28 +1892,27 @@ // GetMultiRolePoolsPreparer prepares the GetMultiRolePools request. func (client HostingEnvironmentsClient) GetMultiRolePoolsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetMultiRolePoolsSender sends the GetMultiRolePools request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetMultiRolePoolsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetMultiRolePoolsResponder handles the response to the GetMultiRolePools request. The method always @@ -1932,7 +1921,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1943,21 +1932,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetMultiRolePoolSkus(resourceGroupName string, name string) (result SkuInfoCollection, ae error) { +func (client HostingEnvironmentsClient) GetMultiRolePoolSkus(resourceGroupName string, name string) (result SkuInfoCollection, err error) { req, err := client.GetMultiRolePoolSkusPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePoolSkus", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePoolSkus", nil, "Failure preparing request") } resp, err := client.GetMultiRolePoolSkusSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePoolSkus", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePoolSkus", resp, "Failure sending request") } result, err = client.GetMultiRolePoolSkusResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetMultiRolePoolSkus", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetMultiRolePoolSkus", resp, "Failure responding to request") } return @@ -1966,28 +1955,27 @@ // GetMultiRolePoolSkusPreparer prepares the GetMultiRolePoolSkus request. func (client HostingEnvironmentsClient) GetMultiRolePoolSkusPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/skus"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/skus", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetMultiRolePoolSkusSender sends the GetMultiRolePoolSkus request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetMultiRolePoolSkusSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetMultiRolePoolSkusResponder handles the response to the GetMultiRolePoolSkus request. The method always @@ -1996,7 +1984,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -2008,21 +1996,21 @@ // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) workerPoolName is name of // worker pool -func (client HostingEnvironmentsClient) GetWorkerPool(resourceGroupName string, name string, workerPoolName string) (result WorkerPool, ae error) { +func (client HostingEnvironmentsClient) GetWorkerPool(resourceGroupName string, name string, workerPoolName string) (result WorkerPool, err error) { req, err := client.GetWorkerPoolPreparer(resourceGroupName, name, workerPoolName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPool", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPool", nil, "Failure preparing request") } resp, err := client.GetWorkerPoolSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPool", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPool", resp, "Failure sending request") } result, err = client.GetWorkerPoolResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPool", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPool", resp, "Failure responding to request") } return @@ -2031,29 +2019,28 @@ // GetWorkerPoolPreparer prepares the GetWorkerPool request. func (client HostingEnvironmentsClient) GetWorkerPoolPreparer(resourceGroupName string, name string, workerPoolName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workerPoolName": url.QueryEscape(workerPoolName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workerPoolName": autorest.Encode("path", workerPoolName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetWorkerPoolSender sends the GetWorkerPool request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetWorkerPoolSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetWorkerPoolResponder handles the response to the GetWorkerPool request. The method always @@ -2062,7 +2049,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -2075,21 +2062,21 @@ // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) workerPoolName is name of // worker pool instance is name of instance in the worker pool -func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetricDefinitions(resourceGroupName string, name string, workerPoolName string, instance string) (result ObjectSet, ae error) { +func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetricDefinitions(resourceGroupName string, name string, workerPoolName string, instance string) (result SetObject, err error) { req, err := client.GetWorkerPoolInstanceMetricDefinitionsPreparer(resourceGroupName, name, workerPoolName, instance) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPoolInstanceMetricDefinitions", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPoolInstanceMetricDefinitions", nil, "Failure preparing request") } resp, err := client.GetWorkerPoolInstanceMetricDefinitionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPoolInstanceMetricDefinitions", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPoolInstanceMetricDefinitions", resp, "Failure sending request") } result, err = client.GetWorkerPoolInstanceMetricDefinitionsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPoolInstanceMetricDefinitions", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPoolInstanceMetricDefinitions", resp, "Failure responding to request") } return @@ -2098,39 +2085,38 @@ // GetWorkerPoolInstanceMetricDefinitionsPreparer prepares the GetWorkerPoolInstanceMetricDefinitions request. func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetricDefinitionsPreparer(resourceGroupName string, name string, workerPoolName string, instance string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "instance": url.QueryEscape(instance), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workerPoolName": url.QueryEscape(workerPoolName), + "instance": autorest.Encode("path", instance), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workerPoolName": autorest.Encode("path", workerPoolName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/instances/{instance}/metricdefinitions"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/instances/{instance}/metricdefinitions", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetWorkerPoolInstanceMetricDefinitionsSender sends the GetWorkerPoolInstanceMetricDefinitions request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetricDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetWorkerPoolInstanceMetricDefinitionsResponder handles the response to the GetWorkerPoolInstanceMetricDefinitions request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetricDefinitionsResponder(resp *http.Response) (result ObjectSet, err error) { +func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetricDefinitionsResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -2148,21 +2134,21 @@ // eq 'Metric1' or name.value eq 'Metric2') and startTime eq // '2014-01-01T00:00:00Z' and endTime eq '2014-12-31T23:59:59Z' and timeGrain // eq duration'[Hour|Minute|Day]'. -func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetrics(resourceGroupName string, name string, workerPoolName string, instance string, details *bool, filter string) (result ObjectSet, ae error) { +func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetrics(resourceGroupName string, name string, workerPoolName string, instance string, details *bool, filter string) (result SetObject, err error) { req, err := client.GetWorkerPoolInstanceMetricsPreparer(resourceGroupName, name, workerPoolName, instance, details, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPoolInstanceMetrics", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPoolInstanceMetrics", nil, "Failure preparing request") } resp, err := client.GetWorkerPoolInstanceMetricsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPoolInstanceMetrics", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPoolInstanceMetrics", resp, "Failure sending request") } result, err = client.GetWorkerPoolInstanceMetricsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPoolInstanceMetrics", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPoolInstanceMetrics", resp, "Failure responding to request") } return @@ -2171,45 +2157,44 @@ // GetWorkerPoolInstanceMetricsPreparer prepares the GetWorkerPoolInstanceMetrics request. func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetricsPreparer(resourceGroupName string, name string, workerPoolName string, instance string, details *bool, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "instance": url.QueryEscape(instance), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workerPoolName": url.QueryEscape(workerPoolName), + "instance": autorest.Encode("path", instance), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workerPoolName": autorest.Encode("path", workerPoolName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if details != nil { - queryParameters["details"] = details + queryParameters["details"] = autorest.Encode("query", *details) } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/instances/{instance}/metrics"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/instances/{instance}/metrics", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetWorkerPoolInstanceMetricsSender sends the GetWorkerPoolInstanceMetrics request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetricsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetWorkerPoolInstanceMetricsResponder handles the response to the GetWorkerPoolInstanceMetrics request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetricsResponder(resp *http.Response) (result ObjectSet, err error) { +func (client HostingEnvironmentsClient) GetWorkerPoolInstanceMetricsResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -2220,21 +2205,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) GetWorkerPools(resourceGroupName string, name string) (result WorkerPoolCollection, ae error) { +func (client HostingEnvironmentsClient) GetWorkerPools(resourceGroupName string, name string) (result WorkerPoolCollection, err error) { req, err := client.GetWorkerPoolsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPools", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPools", nil, "Failure preparing request") } resp, err := client.GetWorkerPoolsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPools", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPools", resp, "Failure sending request") } result, err = client.GetWorkerPoolsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPools", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPools", resp, "Failure responding to request") } return @@ -2243,28 +2228,27 @@ // GetWorkerPoolsPreparer prepares the GetWorkerPools request. func (client HostingEnvironmentsClient) GetWorkerPoolsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetWorkerPoolsSender sends the GetWorkerPools request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetWorkerPoolsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetWorkerPoolsResponder handles the response to the GetWorkerPools request. The method always @@ -2273,7 +2257,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -2285,21 +2269,21 @@ // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) workerPoolName is name of // worker pool -func (client HostingEnvironmentsClient) GetWorkerPoolSkus(resourceGroupName string, name string, workerPoolName string) (result SkuInfoCollection, ae error) { +func (client HostingEnvironmentsClient) GetWorkerPoolSkus(resourceGroupName string, name string, workerPoolName string) (result SkuInfoCollection, err error) { req, err := client.GetWorkerPoolSkusPreparer(resourceGroupName, name, workerPoolName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPoolSkus", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPoolSkus", nil, "Failure preparing request") } resp, err := client.GetWorkerPoolSkusSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPoolSkus", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPoolSkus", resp, "Failure sending request") } result, err = client.GetWorkerPoolSkusResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "GetWorkerPoolSkus", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "GetWorkerPoolSkus", resp, "Failure responding to request") } return @@ -2308,29 +2292,28 @@ // GetWorkerPoolSkusPreparer prepares the GetWorkerPoolSkus request. func (client HostingEnvironmentsClient) GetWorkerPoolSkusPreparer(resourceGroupName string, name string, workerPoolName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workerPoolName": url.QueryEscape(workerPoolName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workerPoolName": autorest.Encode("path", workerPoolName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/skus"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/skus", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetWorkerPoolSkusSender sends the GetWorkerPoolSkus request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) GetWorkerPoolSkusSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetWorkerPoolSkusResponder handles the response to the GetWorkerPoolSkus request. The method always @@ -2339,7 +2322,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -2350,21 +2333,21 @@ // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) RebootHostingEnvironment(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client HostingEnvironmentsClient) RebootHostingEnvironment(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.RebootHostingEnvironmentPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "RebootHostingEnvironment", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "RebootHostingEnvironment", nil, "Failure preparing request") } resp, err := client.RebootHostingEnvironmentSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "RebootHostingEnvironment", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "RebootHostingEnvironment", resp, "Failure sending request") } result, err = client.RebootHostingEnvironmentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "RebootHostingEnvironment", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "RebootHostingEnvironment", resp, "Failure responding to request") } return @@ -2373,167 +2356,172 @@ // RebootHostingEnvironmentPreparer prepares the RebootHostingEnvironment request. func (client HostingEnvironmentsClient) RebootHostingEnvironmentPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/reboot"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/reboot", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // RebootHostingEnvironmentSender sends the RebootHostingEnvironment request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) RebootHostingEnvironmentSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict) + return autorest.SendWithSender(client, req) } // RebootHostingEnvironmentResponder handles the response to the RebootHostingEnvironment request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) RebootHostingEnvironmentResponder(resp *http.Response) (result ObjectSet, err error) { +func (client HostingEnvironmentsClient) RebootHostingEnvironmentResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ResumeHostingEnvironment sends the resume hosting environment request. +// ResumeHostingEnvironment sends the resume hosting environment request. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) ResumeHostingEnvironment(resourceGroupName string, name string) (result SiteCollection, ae error) { - req, err := client.ResumeHostingEnvironmentPreparer(resourceGroupName, name) +func (client HostingEnvironmentsClient) ResumeHostingEnvironment(resourceGroupName string, name string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ResumeHostingEnvironmentPreparer(resourceGroupName, name, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "ResumeHostingEnvironment", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "ResumeHostingEnvironment", nil, "Failure preparing request") } resp, err := client.ResumeHostingEnvironmentSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "ResumeHostingEnvironment", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "ResumeHostingEnvironment", resp, "Failure sending request") } result, err = client.ResumeHostingEnvironmentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "ResumeHostingEnvironment", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "ResumeHostingEnvironment", resp, "Failure responding to request") } return } // ResumeHostingEnvironmentPreparer prepares the ResumeHostingEnvironment request. -func (client HostingEnvironmentsClient) ResumeHostingEnvironmentPreparer(resourceGroupName string, name string) (*http.Request, error) { +func (client HostingEnvironmentsClient) ResumeHostingEnvironmentPreparer(resourceGroupName string, name string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/resume"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/resume", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // ResumeHostingEnvironmentSender sends the ResumeHostingEnvironment request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) ResumeHostingEnvironmentSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // ResumeHostingEnvironmentResponder handles the response to the ResumeHostingEnvironment request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) ResumeHostingEnvironmentResponder(resp *http.Response) (result SiteCollection, err error) { +func (client HostingEnvironmentsClient) ResumeHostingEnvironmentResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // SuspendHostingEnvironment sends the suspend hosting environment request. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of // hostingEnvironment (App Service Environment) -func (client HostingEnvironmentsClient) SuspendHostingEnvironment(resourceGroupName string, name string) (result SiteCollection, ae error) { - req, err := client.SuspendHostingEnvironmentPreparer(resourceGroupName, name) +func (client HostingEnvironmentsClient) SuspendHostingEnvironment(resourceGroupName string, name string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.SuspendHostingEnvironmentPreparer(resourceGroupName, name, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "SuspendHostingEnvironment", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "SuspendHostingEnvironment", nil, "Failure preparing request") } resp, err := client.SuspendHostingEnvironmentSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "SuspendHostingEnvironment", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "SuspendHostingEnvironment", resp, "Failure sending request") } result, err = client.SuspendHostingEnvironmentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/HostingEnvironmentsClient", "SuspendHostingEnvironment", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.HostingEnvironmentsClient", "SuspendHostingEnvironment", resp, "Failure responding to request") } return } // SuspendHostingEnvironmentPreparer prepares the SuspendHostingEnvironment request. -func (client HostingEnvironmentsClient) SuspendHostingEnvironmentPreparer(resourceGroupName string, name string) (*http.Request, error) { +func (client HostingEnvironmentsClient) SuspendHostingEnvironmentPreparer(resourceGroupName string, name string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/suspend"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/suspend", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // SuspendHostingEnvironmentSender sends the SuspendHostingEnvironment request. The method will close the // http.Response Body if it receives an error. func (client HostingEnvironmentsClient) SuspendHostingEnvironmentSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // SuspendHostingEnvironmentResponder handles the response to the SuspendHostingEnvironment request. The method always // closes the http.Response Body. -func (client HostingEnvironmentsClient) SuspendHostingEnvironmentResponder(resp *http.Response) (result SiteCollection, err error) { +func (client HostingEnvironmentsClient) SuspendHostingEnvironmentResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/managedhostingenvironments.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/managedhostingenvironments.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/managedhostingenvironments.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/managedhostingenvironments.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // ManagedHostingEnvironmentsClient is the use these APIs to manage Azure @@ -49,138 +49,143 @@ } // CreateOrUpdateManagedHostingEnvironment sends the create or update managed -// hosting environment request. +// hosting environment request. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of managed hosting // environment managedHostingEnvironmentEnvelope is properties of managed // hosting environment -func (client ManagedHostingEnvironmentsClient) CreateOrUpdateManagedHostingEnvironment(resourceGroupName string, name string, managedHostingEnvironmentEnvelope HostingEnvironment) (result HostingEnvironment, ae error) { - req, err := client.CreateOrUpdateManagedHostingEnvironmentPreparer(resourceGroupName, name, managedHostingEnvironmentEnvelope) +func (client ManagedHostingEnvironmentsClient) CreateOrUpdateManagedHostingEnvironment(resourceGroupName string, name string, managedHostingEnvironmentEnvelope HostingEnvironment, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateManagedHostingEnvironmentPreparer(resourceGroupName, name, managedHostingEnvironmentEnvelope, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "CreateOrUpdateManagedHostingEnvironment", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "CreateOrUpdateManagedHostingEnvironment", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateManagedHostingEnvironmentSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "CreateOrUpdateManagedHostingEnvironment", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "CreateOrUpdateManagedHostingEnvironment", resp, "Failure sending request") } result, err = client.CreateOrUpdateManagedHostingEnvironmentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "CreateOrUpdateManagedHostingEnvironment", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "CreateOrUpdateManagedHostingEnvironment", resp, "Failure responding to request") } return } // CreateOrUpdateManagedHostingEnvironmentPreparer prepares the CreateOrUpdateManagedHostingEnvironment request. -func (client ManagedHostingEnvironmentsClient) CreateOrUpdateManagedHostingEnvironmentPreparer(resourceGroupName string, name string, managedHostingEnvironmentEnvelope HostingEnvironment) (*http.Request, error) { +func (client ManagedHostingEnvironmentsClient) CreateOrUpdateManagedHostingEnvironmentPreparer(resourceGroupName string, name string, managedHostingEnvironmentEnvelope HostingEnvironment, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}", pathParameters), autorest.WithJSON(managedHostingEnvironmentEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateManagedHostingEnvironmentSender sends the CreateOrUpdateManagedHostingEnvironment request. The method will close the // http.Response Body if it receives an error. func (client ManagedHostingEnvironmentsClient) CreateOrUpdateManagedHostingEnvironmentSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateManagedHostingEnvironmentResponder handles the response to the CreateOrUpdateManagedHostingEnvironment request. The method always // closes the http.Response Body. -func (client ManagedHostingEnvironmentsClient) CreateOrUpdateManagedHostingEnvironmentResponder(resp *http.Response) (result HostingEnvironment, err error) { +func (client ManagedHostingEnvironmentsClient) CreateOrUpdateManagedHostingEnvironmentResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // DeleteManagedHostingEnvironment sends the delete managed hosting -// environment request. +// environment request. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of managed hosting // environment forceDelete is delete even if the managed hosting environment // contains resources -func (client ManagedHostingEnvironmentsClient) DeleteManagedHostingEnvironment(resourceGroupName string, name string, forceDelete *bool) (result ObjectSet, ae error) { - req, err := client.DeleteManagedHostingEnvironmentPreparer(resourceGroupName, name, forceDelete) +func (client ManagedHostingEnvironmentsClient) DeleteManagedHostingEnvironment(resourceGroupName string, name string, forceDelete *bool, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeleteManagedHostingEnvironmentPreparer(resourceGroupName, name, forceDelete, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "DeleteManagedHostingEnvironment", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "DeleteManagedHostingEnvironment", nil, "Failure preparing request") } resp, err := client.DeleteManagedHostingEnvironmentSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "DeleteManagedHostingEnvironment", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "DeleteManagedHostingEnvironment", resp, "Failure sending request") } result, err = client.DeleteManagedHostingEnvironmentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "DeleteManagedHostingEnvironment", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "DeleteManagedHostingEnvironment", resp, "Failure responding to request") } return } // DeleteManagedHostingEnvironmentPreparer prepares the DeleteManagedHostingEnvironment request. -func (client ManagedHostingEnvironmentsClient) DeleteManagedHostingEnvironmentPreparer(resourceGroupName string, name string, forceDelete *bool) (*http.Request, error) { +func (client ManagedHostingEnvironmentsClient) DeleteManagedHostingEnvironmentPreparer(resourceGroupName string, name string, forceDelete *bool, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if forceDelete != nil { - queryParameters["forceDelete"] = forceDelete + queryParameters["forceDelete"] = autorest.Encode("query", *forceDelete) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteManagedHostingEnvironmentSender sends the DeleteManagedHostingEnvironment request. The method will close the // http.Response Body if it receives an error. func (client ManagedHostingEnvironmentsClient) DeleteManagedHostingEnvironmentSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteManagedHostingEnvironmentResponder handles the response to the DeleteManagedHostingEnvironment request. The method always // closes the http.Response Body. -func (client ManagedHostingEnvironmentsClient) DeleteManagedHostingEnvironmentResponder(resp *http.Response) (result ObjectSet, err error) { +func (client ManagedHostingEnvironmentsClient) DeleteManagedHostingEnvironmentResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), - autorest.ByUnmarshallingJSON(&result.Value), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusBadRequest, http.StatusNotFound, http.StatusConflict), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } @@ -189,21 +194,21 @@ // // resourceGroupName is name of resource group name is name of managed hosting // environment -func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironment(resourceGroupName string, name string) (result ManagedHostingEnvironment, ae error) { +func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironment(resourceGroupName string, name string) (result ManagedHostingEnvironment, err error) { req, err := client.GetManagedHostingEnvironmentPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironment", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironment", nil, "Failure preparing request") } resp, err := client.GetManagedHostingEnvironmentSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironment", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironment", resp, "Failure sending request") } result, err = client.GetManagedHostingEnvironmentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironment", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironment", resp, "Failure responding to request") } return @@ -212,28 +217,27 @@ // GetManagedHostingEnvironmentPreparer prepares the GetManagedHostingEnvironment request. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetManagedHostingEnvironmentSender sends the GetManagedHostingEnvironment request. The method will close the // http.Response Body if it receives an error. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetManagedHostingEnvironmentResponder handles the response to the GetManagedHostingEnvironment request. The method always @@ -242,7 +246,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -254,21 +258,21 @@ // // resourceGroupName is name of resource group name is name of managed hosting // environment operationID is operation identifier GUID -func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentOperation(resourceGroupName string, name string, operationID string) (result ObjectSet, ae error) { +func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentOperation(resourceGroupName string, name string, operationID string) (result SetObject, err error) { req, err := client.GetManagedHostingEnvironmentOperationPreparer(resourceGroupName, name, operationID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentOperation", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentOperation", nil, "Failure preparing request") } resp, err := client.GetManagedHostingEnvironmentOperationSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentOperation", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentOperation", resp, "Failure sending request") } result, err = client.GetManagedHostingEnvironmentOperationResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentOperation", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentOperation", resp, "Failure responding to request") } return @@ -277,38 +281,37 @@ // GetManagedHostingEnvironmentOperationPreparer prepares the GetManagedHostingEnvironmentOperation request. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentOperationPreparer(resourceGroupName string, name string, operationID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "operationId": url.QueryEscape(operationID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}/operations/{operationId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}/operations/{operationId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetManagedHostingEnvironmentOperationSender sends the GetManagedHostingEnvironmentOperation request. The method will close the // http.Response Body if it receives an error. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNotFound, http.StatusInternalServerError) + return autorest.SendWithSender(client, req) } // GetManagedHostingEnvironmentOperationResponder handles the response to the GetManagedHostingEnvironmentOperation request. The method always // closes the http.Response Body. -func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentOperationResponder(resp *http.Response) (result ObjectSet, err error) { +func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentOperationResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNotFound, http.StatusInternalServerError), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNotFound, http.StatusInternalServerError), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -319,21 +322,21 @@ // request. // // resourceGroupName is name of resource group -func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironments(resourceGroupName string) (result HostingEnvironmentCollection, ae error) { +func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironments(resourceGroupName string) (result HostingEnvironmentCollection, err error) { req, err := client.GetManagedHostingEnvironmentsPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironments", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironments", nil, "Failure preparing request") } resp, err := client.GetManagedHostingEnvironmentsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironments", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironments", resp, "Failure sending request") } result, err = client.GetManagedHostingEnvironmentsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironments", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironments", resp, "Failure responding to request") } return @@ -342,27 +345,26 @@ // GetManagedHostingEnvironmentsPreparer prepares the GetManagedHostingEnvironments request. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentsPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetManagedHostingEnvironmentsSender sends the GetManagedHostingEnvironments request. The method will close the // http.Response Body if it receives an error. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetManagedHostingEnvironmentsResponder handles the response to the GetManagedHostingEnvironments request. The method always @@ -371,7 +373,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -383,21 +385,21 @@ // // resourceGroupName is name of resource group name is name of managed hosting // environment -func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentServerFarms(resourceGroupName string, name string) (result ServerFarmCollection, ae error) { +func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentServerFarms(resourceGroupName string, name string) (result ServerFarmCollection, err error) { req, err := client.GetManagedHostingEnvironmentServerFarmsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentServerFarms", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentServerFarms", nil, "Failure preparing request") } resp, err := client.GetManagedHostingEnvironmentServerFarmsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentServerFarms", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentServerFarms", resp, "Failure sending request") } result, err = client.GetManagedHostingEnvironmentServerFarmsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentServerFarms", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentServerFarms", resp, "Failure responding to request") } return @@ -406,28 +408,27 @@ // GetManagedHostingEnvironmentServerFarmsPreparer prepares the GetManagedHostingEnvironmentServerFarms request. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentServerFarmsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}/serverfarms"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}/serverfarms", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetManagedHostingEnvironmentServerFarmsSender sends the GetManagedHostingEnvironmentServerFarms request. The method will close the // http.Response Body if it receives an error. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentServerFarmsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetManagedHostingEnvironmentServerFarmsResponder handles the response to the GetManagedHostingEnvironmentServerFarms request. The method always @@ -436,7 +437,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -449,21 +450,21 @@ // resourceGroupName is name of resource group name is name of managed hosting // environment propertiesToInclude is comma separated list of site properties // to include -func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentSites(resourceGroupName string, name string, propertiesToInclude string) (result SiteCollection, ae error) { +func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentSites(resourceGroupName string, name string, propertiesToInclude string) (result SiteCollection, err error) { req, err := client.GetManagedHostingEnvironmentSitesPreparer(resourceGroupName, name, propertiesToInclude) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentSites", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentSites", nil, "Failure preparing request") } resp, err := client.GetManagedHostingEnvironmentSitesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentSites", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentSites", resp, "Failure sending request") } result, err = client.GetManagedHostingEnvironmentSitesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentSites", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentSites", resp, "Failure responding to request") } return @@ -472,31 +473,30 @@ // GetManagedHostingEnvironmentSitesPreparer prepares the GetManagedHostingEnvironmentSites request. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentSitesPreparer(resourceGroupName string, name string, propertiesToInclude string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(propertiesToInclude) > 0 { - queryParameters["propertiesToInclude"] = propertiesToInclude + queryParameters["propertiesToInclude"] = autorest.Encode("query", propertiesToInclude) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}/sites"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}/sites", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetManagedHostingEnvironmentSitesSender sends the GetManagedHostingEnvironmentSites request. The method will close the // http.Response Body if it receives an error. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentSitesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetManagedHostingEnvironmentSitesResponder handles the response to the GetManagedHostingEnvironmentSites request. The method always @@ -505,7 +505,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -517,21 +517,21 @@ // // resourceGroupName is name of resource group name is name of managed hosting // environment -func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentVips(resourceGroupName string, name string) (result AddressResponse, ae error) { +func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentVips(resourceGroupName string, name string) (result AddressResponse, err error) { req, err := client.GetManagedHostingEnvironmentVipsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentVips", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentVips", nil, "Failure preparing request") } resp, err := client.GetManagedHostingEnvironmentVipsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentVips", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentVips", resp, "Failure sending request") } result, err = client.GetManagedHostingEnvironmentVipsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentVips", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentVips", resp, "Failure responding to request") } return @@ -540,28 +540,27 @@ // GetManagedHostingEnvironmentVipsPreparer prepares the GetManagedHostingEnvironmentVips request. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentVipsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}/capacities/virtualip"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}/capacities/virtualip", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetManagedHostingEnvironmentVipsSender sends the GetManagedHostingEnvironmentVips request. The method will close the // http.Response Body if it receives an error. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentVipsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetManagedHostingEnvironmentVipsResponder handles the response to the GetManagedHostingEnvironmentVips request. The method always @@ -570,7 +569,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -582,21 +581,21 @@ // // resourceGroupName is name of resource group name is name of managed hosting // environment -func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentWebHostingPlans(resourceGroupName string, name string) (result ServerFarmCollection, ae error) { +func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentWebHostingPlans(resourceGroupName string, name string) (result ServerFarmCollection, err error) { req, err := client.GetManagedHostingEnvironmentWebHostingPlansPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentWebHostingPlans", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentWebHostingPlans", nil, "Failure preparing request") } resp, err := client.GetManagedHostingEnvironmentWebHostingPlansSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentWebHostingPlans", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentWebHostingPlans", resp, "Failure sending request") } result, err = client.GetManagedHostingEnvironmentWebHostingPlansResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentWebHostingPlans", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ManagedHostingEnvironmentsClient", "GetManagedHostingEnvironmentWebHostingPlans", resp, "Failure responding to request") } return @@ -605,28 +604,27 @@ // GetManagedHostingEnvironmentWebHostingPlansPreparer prepares the GetManagedHostingEnvironmentWebHostingPlans request. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentWebHostingPlansPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}/webhostingplans"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/managedHostingEnvironments/{name}/webhostingplans", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetManagedHostingEnvironmentWebHostingPlansSender sends the GetManagedHostingEnvironmentWebHostingPlans request. The method will close the // http.Response Body if it receives an error. func (client ManagedHostingEnvironmentsClient) GetManagedHostingEnvironmentWebHostingPlansSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetManagedHostingEnvironmentWebHostingPlansResponder handles the response to the GetManagedHostingEnvironmentWebHostingPlans request. The method always @@ -635,7 +633,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/models.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/models.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/models.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,15 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "io" "net/http" ) @@ -123,14 +124,37 @@ Twitter BuiltInAuthenticationProvider = "Twitter" ) -// CertificateAction enumerates the values for certificate action. -type CertificateAction string - -const ( - // Rekey specifies the rekey state for certificate action. - Rekey CertificateAction = "Rekey" - // Renew specifies the renew state for certificate action. - Renew CertificateAction = "Renew" +// CertificateOrderActionType enumerates the values for certificate order +// action type. +type CertificateOrderActionType string + +const ( + // CertificateIssued specifies the certificate issued state for + // certificate order action type. + CertificateIssued CertificateOrderActionType = "CertificateIssued" + // CertificateOrderCanceled specifies the certificate order canceled state + // for certificate order action type. + CertificateOrderCanceled CertificateOrderActionType = "CertificateOrderCanceled" + // CertificateOrderCreated specifies the certificate order created state + // for certificate order action type. + CertificateOrderCreated CertificateOrderActionType = "CertificateOrderCreated" + // CertificateRevoked specifies the certificate revoked state for + // certificate order action type. + CertificateRevoked CertificateOrderActionType = "CertificateRevoked" + // DomainValidationComplete specifies the domain validation complete state + // for certificate order action type. + DomainValidationComplete CertificateOrderActionType = "DomainValidationComplete" + // FraudDetected specifies the fraud detected state for certificate order + // action type. + FraudDetected CertificateOrderActionType = "FraudDetected" + // OrgNameChange specifies the org name change state for certificate order + // action type. + OrgNameChange CertificateOrderActionType = "OrgNameChange" + // OrgValidationComplete specifies the org validation complete state for + // certificate order action type. + OrgValidationComplete CertificateOrderActionType = "OrgValidationComplete" + // SanDrop specifies the san drop state for certificate order action type. + SanDrop CertificateOrderActionType = "SanDrop" ) // CertificateOrderStatus enumerates the values for certificate order status. @@ -175,12 +199,41 @@ StandardDomainValidatedWildCardSsl CertificateProductType = "StandardDomainValidatedWildCardSsl" ) +// Channels enumerates the values for channels. +type Channels string + +const ( + // All specifies the all state for channels. + All Channels = "All" + // API specifies the api state for channels. + API Channels = "Api" + // Email specifies the email state for channels. + Email Channels = "Email" + // Notification specifies the notification state for channels. + Notification Channels = "Notification" +) + +// CloneAbilityResult enumerates the values for clone ability result. +type CloneAbilityResult string + +const ( + // Cloneable specifies the cloneable state for clone ability result. + Cloneable CloneAbilityResult = "Cloneable" + // NotCloneable specifies the not cloneable state for clone ability result. + NotCloneable CloneAbilityResult = "NotCloneable" + // PartiallyCloneable specifies the partially cloneable state for clone + // ability result. + PartiallyCloneable CloneAbilityResult = "PartiallyCloneable" +) + // ComputeModeOptions enumerates the values for compute mode options. type ComputeModeOptions string const ( // Dedicated specifies the dedicated state for compute mode options. Dedicated ComputeModeOptions = "Dedicated" + // Dynamic specifies the dynamic state for compute mode options. + Dynamic ComputeModeOptions = "Dynamic" // Shared specifies the shared state for compute mode options. Shared ComputeModeOptions = "Shared" ) @@ -423,6 +476,45 @@ Integrated ManagedPipelineMode = "Integrated" ) +// NotificationLevel enumerates the values for notification level. +type NotificationLevel string + +const ( + // NotificationLevelCritical specifies the notification level critical + // state for notification level. + NotificationLevelCritical NotificationLevel = "Critical" + // NotificationLevelInformation specifies the notification level + // information state for notification level. + NotificationLevelInformation NotificationLevel = "Information" + // NotificationLevelNonUrgentSuggestion specifies the notification level + // non urgent suggestion state for notification level. + NotificationLevelNonUrgentSuggestion NotificationLevel = "NonUrgentSuggestion" + // NotificationLevelWarning specifies the notification level warning state + // for notification level. + NotificationLevelWarning NotificationLevel = "Warning" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // ProvisioningStateCanceled specifies the provisioning state canceled + // state for provisioning state. + ProvisioningStateCanceled ProvisioningState = "Canceled" + // ProvisioningStateDeleting specifies the provisioning state deleting + // state for provisioning state. + ProvisioningStateDeleting ProvisioningState = "Deleting" + // ProvisioningStateFailed specifies the provisioning state failed state + // for provisioning state. + ProvisioningStateFailed ProvisioningState = "Failed" + // ProvisioningStateInProgress specifies the provisioning state in + // progress state for provisioning state. + ProvisioningStateInProgress ProvisioningState = "InProgress" + // ProvisioningStateSucceeded specifies the provisioning state succeeded + // state for provisioning state. + ProvisioningStateSucceeded ProvisioningState = "Succeeded" +) + // SiteAvailabilityState enumerates the values for site availability state. type SiteAvailabilityState string @@ -508,12 +600,18 @@ type WorkerSizeOptions string const ( - // Large specifies the large state for worker size options. - Large WorkerSizeOptions = "Large" - // Medium specifies the medium state for worker size options. - Medium WorkerSizeOptions = "Medium" - // Small specifies the small state for worker size options. - Small WorkerSizeOptions = "Small" + // WorkerSizeOptionsDefault specifies the worker size options default + // state for worker size options. + WorkerSizeOptionsDefault WorkerSizeOptions = "Default" + // WorkerSizeOptionsLarge specifies the worker size options large state + // for worker size options. + WorkerSizeOptionsLarge WorkerSizeOptions = "Large" + // WorkerSizeOptionsMedium specifies the worker size options medium state + // for worker size options. + WorkerSizeOptionsMedium WorkerSizeOptions = "Medium" + // WorkerSizeOptionsSmall specifies the worker size options small state + // for worker size options. + WorkerSizeOptionsSmall WorkerSizeOptions = "Small" ) // Address is address information for domain registration @@ -535,6 +633,12 @@ VipMappings *[]VirtualIPMapping `json:"vipMappings,omitempty"` } +// APIDefinitionInfo is information about the formal API definition for the +// web app. +type APIDefinitionInfo struct { + URL *string `json:"url,omitempty"` +} + // ApplicationLogsConfig is application logs configuration type ApplicationLogsConfig struct { FileSystem *FileSystemApplicationLogsConfig `json:"fileSystem,omitempty"` @@ -554,8 +658,9 @@ // AutoHealActions is autoHealActions - Describes the actions which can be // taken by the auto-heal module when a rule is triggered. type AutoHealActions struct { - ActionType AutoHealActionType `json:"actionType,omitempty"` - CustomAction *AutoHealCustomAction `json:"customAction,omitempty"` + ActionType AutoHealActionType `json:"actionType,omitempty"` + CustomAction *AutoHealCustomAction `json:"customAction,omitempty"` + MinProcessExecutionTime *string `json:"minProcessExecutionTime,omitempty"` } // AutoHealCustomAction is autoHealCustomAction - Describes the custom action @@ -576,7 +681,7 @@ // AutoHealTriggers is autoHealTriggers - describes the triggers for auto-heal. type AutoHealTriggers struct { Requests *RequestsBasedTrigger `json:"requests,omitempty"` - PrivateBytesInKB *int `json:"privateBytesInKB,omitempty"` + PrivateBytesInKB *int32 `json:"privateBytesInKB,omitempty"` StatusCodes *[]StatusCodesBasedTrigger `json:"statusCodes,omitempty"` SlowRequests *SlowRequestsBasedTrigger `json:"slowRequests,omitempty"` } @@ -586,14 +691,14 @@ type AzureBlobStorageApplicationLogsConfig struct { Level LogLevel `json:"level,omitempty"` SasURL *string `json:"sasUrl,omitempty"` - RetentionInDays *int `json:"retentionInDays,omitempty"` + RetentionInDays *int32 `json:"retentionInDays,omitempty"` } // AzureBlobStorageHTTPLogsConfig is http logs to azure blob storage // configuration type AzureBlobStorageHTTPLogsConfig struct { SasURL *string `json:"sasUrl,omitempty"` - RetentionInDays *int `json:"retentionInDays,omitempty"` + RetentionInDays *int32 `json:"retentionInDays,omitempty"` Enabled *bool `json:"enabled,omitempty"` } @@ -609,6 +714,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -624,11 +730,12 @@ // BackupItemProperties is type BackupItemProperties struct { + ID *int32 `json:"id,omitempty"` StorageAccountURL *string `json:"storageAccountUrl,omitempty"` BlobName *string `json:"blobName,omitempty"` Name *string `json:"name,omitempty"` Status BackupItemStatus `json:"status,omitempty"` - SizeInBytes *int32 `json:"sizeInBytes,omitempty"` + SizeInBytes *int64 `json:"sizeInBytes,omitempty"` Created *date.Time `json:"created,omitempty"` Log *string `json:"log,omitempty"` Databases *[]DatabaseBackupSetting `json:"databases,omitempty"` @@ -636,7 +743,7 @@ LastRestoreTimeStamp *date.Time `json:"lastRestoreTimeStamp,omitempty"` FinishedTimeStamp *date.Time `json:"finishedTimeStamp,omitempty"` CorrelationID *string `json:"correlationId,omitempty"` - WebsiteSizeInBytes *int32 `json:"websiteSizeInBytes,omitempty"` + WebsiteSizeInBytes *int64 `json:"websiteSizeInBytes,omitempty"` } // BackupRequest is description of a backup which will be performed @@ -644,6 +751,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -663,10 +771,10 @@ // BackupSchedule is description of a backup schedule. Describes how often // should be the backup performed and what should be the retention policy. type BackupSchedule struct { - FrequencyInterval *int `json:"frequencyInterval,omitempty"` + FrequencyInterval *int32 `json:"frequencyInterval,omitempty"` FrequencyUnit FrequencyUnit `json:"frequencyUnit,omitempty"` KeepAtLeastOneBackup *bool `json:"keepAtLeastOneBackup,omitempty"` - RetentionPeriodInDays *int `json:"retentionPeriodInDays,omitempty"` + RetentionPeriodInDays *int32 `json:"retentionPeriodInDays,omitempty"` StartTime *date.Time `json:"startTime,omitempty"` LastExecutionTime *date.Time `json:"lastExecutionTime,omitempty"` } @@ -676,6 +784,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -689,23 +798,83 @@ NextLink *string `json:"nextLink,omitempty"` } +// CertificateDetails is certificate Details +type CertificateDetails struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *CertificateDetailsProperties `json:"properties,omitempty"` +} + +// CertificateDetailsProperties is +type CertificateDetailsProperties struct { + Version *int32 `json:"version,omitempty"` + SerialNumber *string `json:"serialNumber,omitempty"` + Thumbprint *string `json:"thumbprint,omitempty"` + Subject *string `json:"subject,omitempty"` + NotBefore *date.Time `json:"notBefore,omitempty"` + NotAfter *date.Time `json:"notAfter,omitempty"` + SignatureAlgorithm *string `json:"signatureAlgorithm,omitempty"` + Issuer *string `json:"issuer,omitempty"` + RawData *string `json:"rawData,omitempty"` +} + +// CertificateEmail is certificate Email +type CertificateEmail struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *CertificateEmailProperties `json:"properties,omitempty"` +} + +// CertificateEmailProperties is +type CertificateEmailProperties struct { + EmailID *string `json:"emailId,omitempty"` + TimeStamp *date.Time `json:"timeStamp,omitempty"` +} + // CertificateOrder is certificate purchase order type CertificateOrder struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` Properties *CertificateOrderProperties `json:"properties,omitempty"` } +// CertificateOrderAction is represents a certificate action +type CertificateOrderAction struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *CertificateOrderActionProperties `json:"properties,omitempty"` +} + +// CertificateOrderActionProperties is +type CertificateOrderActionProperties struct { + Type CertificateOrderActionType `json:"type,omitempty"` + CreatedAt *date.Time `json:"createdAt,omitempty"` +} + // CertificateOrderCertificate is class representing the Key Vault container // for certificate purchased through Azure type CertificateOrderCertificate struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -722,10 +891,9 @@ // CertificateOrderCertificateProperties is type CertificateOrderCertificateProperties struct { - KeyVaultCsmID *string `json:"keyVaultCsmId,omitempty"` + KeyVaultID *string `json:"keyVaultId,omitempty"` KeyVaultSecretName *string `json:"keyVaultSecretName,omitempty"` ProvisioningState KeyVaultSecretStatus `json:"provisioningState,omitempty"` - Thumbprint *string `json:"thumbprint,omitempty"` } // CertificateOrderCollection is collection of ceritificate orders @@ -737,21 +905,22 @@ // CertificateOrderProperties is type CertificateOrderProperties struct { - Certificates *map[string]*CertificateOrderCertificate `json:"certificates,omitempty"` - DistinguishedName *string `json:"distinguishedName,omitempty"` - DomainVerificationToken *string `json:"domainVerificationToken,omitempty"` - ValidityInYears *int `json:"validityInYears,omitempty"` - KeySize *int `json:"keySize,omitempty"` - ProductType CertificateProductType `json:"productType,omitempty"` - Status CertificateOrderStatus `json:"status,omitempty"` - SignedCertificate *string `json:"signedCertificate,omitempty"` - Csr *string `json:"csr,omitempty"` - Intermediate *string `json:"intermediate,omitempty"` - Root *string `json:"root,omitempty"` - SerialNumber *string `json:"serialNumber,omitempty"` - Action CertificateAction `json:"action,omitempty"` - KeyVaultCsmID *string `json:"keyVaultCsmId,omitempty"` - DelayExistingRevokeInHours *int `json:"delayExistingRevokeInHours,omitempty"` + Certificates *map[string]*CertificateOrderCertificate `json:"certificates,omitempty"` + DistinguishedName *string `json:"distinguishedName,omitempty"` + DomainVerificationToken *string `json:"domainVerificationToken,omitempty"` + ValidityInYears *int32 `json:"validityInYears,omitempty"` + KeySize *int32 `json:"keySize,omitempty"` + ProductType CertificateProductType `json:"productType,omitempty"` + AutoRenew *bool `json:"autoRenew,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + Status CertificateOrderStatus `json:"status,omitempty"` + SignedCertificate *CertificateDetails `json:"signedCertificate,omitempty"` + Csr *string `json:"csr,omitempty"` + Intermediate *CertificateDetails `json:"intermediate,omitempty"` + Root *CertificateDetails `json:"root,omitempty"` + SerialNumber *string `json:"serialNumber,omitempty"` + LastCertificateIssuanceTime *date.Time `json:"lastCertificateIssuanceTime,omitempty"` + ExpirationTime *date.Time `json:"expirationTime,omitempty"` } // CertificateProperties is @@ -771,8 +940,6 @@ CerBlob *string `json:"cerBlob,omitempty"` PublicKeyHash *string `json:"publicKeyHash,omitempty"` HostingEnvironmentProfile *HostingEnvironmentProfile `json:"hostingEnvironmentProfile,omitempty"` - KeyVaultCsmID *string `json:"keyVaultCsmId,omitempty"` - KeyVaultSecretName *string `json:"keyVaultSecretName,omitempty"` } // ClassicMobileService is a mobile service @@ -780,6 +947,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -817,6 +985,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -851,6 +1020,12 @@ Phone *string `json:"phone,omitempty"` } +// CorsSettings is cross-Origin Resource Sharing (CORS) settings for the web +// app. +type CorsSettings struct { + AllowedOrigins *[]string `json:"allowedOrigins,omitempty"` +} + // CsmMoveResourceEnvelope is class containing a list of the resources that // need to be moved and the resource group they should be moved to type CsmMoveResourceEnvelope struct { @@ -866,9 +1041,10 @@ // CsmSiteRecoveryEntity is class containting details about site recovery // operation. type CsmSiteRecoveryEntity struct { - SnapshotTime *date.Time `json:"snapshotTime,omitempty"` - SiteName *string `json:"siteName,omitempty"` - SlotName *string `json:"slotName,omitempty"` + SnapshotTime *date.Time `json:"snapshotTime,omitempty"` + RecoverConfig *bool `json:"recoverConfig,omitempty"` + SiteName *string `json:"siteName,omitempty"` + SlotName *string `json:"slotName,omitempty"` } // CsmSlotEntity is class containing deployment slot parameters @@ -881,8 +1057,8 @@ type CsmUsageQuota struct { Unit *string `json:"unit,omitempty"` NextResetTime *date.Time `json:"nextResetTime,omitempty"` - CurrentValue *int32 `json:"currentValue,omitempty"` - Limit *int32 `json:"limit,omitempty"` + CurrentValue *int64 `json:"currentValue,omitempty"` + Limit *int64 `json:"limit,omitempty"` Name *LocalizableString `json:"name,omitempty"` } @@ -898,18 +1074,13 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` Properties *CsrProperties `json:"properties,omitempty"` } -// CsrList is -type CsrList struct { - autorest.Response `json:"-"` - Value *[]Csr `json:"value,omitempty"` -} - // CsrProperties is type CsrProperties struct { Name *string `json:"name,omitempty"` @@ -937,6 +1108,7 @@ type DeletedSite struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -974,8 +1146,47 @@ GatewaySiteName *string `json:"gatewaySiteName,omitempty"` ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty"` ClientCertEnabled *bool `json:"clientCertEnabled,omitempty"` + HostNamesDisabled *bool `json:"hostNamesDisabled,omitempty"` OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty"` + ContainerSize *int32 `json:"containerSize,omitempty"` + MaxNumberOfWorkers *int32 `json:"maxNumberOfWorkers,omitempty"` CloningInfo *CloningInfo `json:"cloningInfo,omitempty"` + ResourceGroup *string `json:"resourceGroup,omitempty"` + IsDefaultContainer *bool `json:"isDefaultContainer,omitempty"` + DefaultHostName *string `json:"defaultHostName,omitempty"` +} + +// Deployment is represents user crendentials used for publishing activity +type Deployment struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *DeploymentProperties `json:"properties,omitempty"` +} + +// DeploymentCollection is collection of app deployments +type DeploymentCollection struct { + autorest.Response `json:"-"` + Value *[]Deployment `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// DeploymentProperties is +type DeploymentProperties struct { + ID *string `json:"id,omitempty"` + Status *int32 `json:"status,omitempty"` + Message *string `json:"message,omitempty"` + Author *string `json:"author,omitempty"` + Deployer *string `json:"deployer,omitempty"` + AuthorEmail *string `json:"author_email,omitempty"` + StartTime *date.Time `json:"start_time,omitempty"` + EndTime *date.Time `json:"end_time,omitempty"` + Active *bool `json:"active,omitempty"` + Details *string `json:"details,omitempty"` } // Domain is represents a domain @@ -983,6 +1194,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1020,6 +1232,7 @@ ContactRegistrant *Contact `json:"contactRegistrant,omitempty"` ContactTech *Contact `json:"contactTech,omitempty"` RegistrationStatus DomainStatus `json:"registrationStatus,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` NameServers *[]string `json:"nameServers,omitempty"` Privacy *bool `json:"privacy,omitempty"` CreatedTime *date.Time `json:"createdTime,omitempty"` @@ -1029,6 +1242,7 @@ ReadyForDNSRecordManagement *bool `json:"readyForDnsRecordManagement,omitempty"` ManagedHostNames *[]HostName `json:"managedHostNames,omitempty"` Consent *DomainPurchaseConsent `json:"consent,omitempty"` + DomainNotRenewableReasons *[]string `json:"domainNotRenewableReasons,omitempty"` } // DomainPurchaseConsent is domain purchase consent object representing @@ -1043,13 +1257,14 @@ // parameters type DomainRecommendationSearchParameters struct { Keywords *string `json:"keywords,omitempty"` - MaxDomainRecommendations *int `json:"maxDomainRecommendations,omitempty"` + MaxDomainRecommendations *int32 `json:"maxDomainRecommendations,omitempty"` } // DomainRegistrationInput is domain registration input for validation Api type DomainRegistrationInput struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1064,6 +1279,7 @@ ContactRegistrant *Contact `json:"contactRegistrant,omitempty"` ContactTech *Contact `json:"contactTech,omitempty"` RegistrationStatus DomainStatus `json:"registrationStatus,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` NameServers *[]string `json:"nameServers,omitempty"` Privacy *bool `json:"privacy,omitempty"` CreatedTime *date.Time `json:"createdTime,omitempty"` @@ -1073,6 +1289,7 @@ ReadyForDNSRecordManagement *bool `json:"readyForDnsRecordManagement,omitempty"` ManagedHostNames *[]HostName `json:"managedHostNames,omitempty"` Consent *DomainPurchaseConsent `json:"consent,omitempty"` + DomainNotRenewableReasons *[]string `json:"domainNotRenewableReasons,omitempty"` } // EnabledConfig is enabled configuration @@ -1093,15 +1310,16 @@ // FileSystemHTTPLogsConfig is http logs to file system configuration type FileSystemHTTPLogsConfig struct { - RetentionInMb *int `json:"retentionInMb,omitempty"` - RetentionInDays *int `json:"retentionInDays,omitempty"` - Enabled *bool `json:"enabled,omitempty"` + RetentionInMb *int32 `json:"retentionInMb,omitempty"` + RetentionInDays *int32 `json:"retentionInDays,omitempty"` + Enabled *bool `json:"enabled,omitempty"` } // GeoRegion is geographical region type GeoRegion struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1138,6 +1356,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1160,12 +1379,6 @@ DiagnosicsOutput *string `json:"diagnosicsOutput,omitempty"` } -// HostingEnvironmentDiagnosticsList is -type HostingEnvironmentDiagnosticsList struct { - autorest.Response `json:"-"` - Value *[]HostingEnvironmentDiagnostics `json:"value,omitempty"` -} - // HostingEnvironmentProfile is specification for a hostingEnvironment (App // Service Environment) to use for this resource type HostingEnvironmentProfile struct { @@ -1178,6 +1391,7 @@ type HostingEnvironmentProperties struct { Name *string `json:"name,omitempty"` Location *string `json:"location,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` Status HostingEnvironmentStatus `json:"status,omitempty"` VnetName *string `json:"vnetName,omitempty"` VnetResourceGroupName *string `json:"vnetResourceGroupName,omitempty"` @@ -1185,19 +1399,19 @@ VirtualNetwork *VirtualNetworkProfile `json:"virtualNetwork,omitempty"` InternalLoadBalancingMode InternalLoadBalancingMode `json:"internalLoadBalancingMode,omitempty"` MultiSize *string `json:"multiSize,omitempty"` - MultiRoleCount *int `json:"multiRoleCount,omitempty"` + MultiRoleCount *int32 `json:"multiRoleCount,omitempty"` WorkerPools *[]WorkerPool `json:"workerPools,omitempty"` - IpsslAddressCount *int `json:"ipsslAddressCount,omitempty"` + IpsslAddressCount *int32 `json:"ipsslAddressCount,omitempty"` DatabaseEdition *string `json:"databaseEdition,omitempty"` DatabaseServiceObjective *string `json:"databaseServiceObjective,omitempty"` - UpgradeDomains *int `json:"upgradeDomains,omitempty"` + UpgradeDomains *int32 `json:"upgradeDomains,omitempty"` SubscriptionID *string `json:"subscriptionId,omitempty"` DNSSuffix *string `json:"dnsSuffix,omitempty"` LastAction *string `json:"lastAction,omitempty"` LastActionResult *string `json:"lastActionResult,omitempty"` AllowedMultiSizes *string `json:"allowedMultiSizes,omitempty"` AllowedWorkerSizes *string `json:"allowedWorkerSizes,omitempty"` - MaximumNumberOfMachines *int `json:"maximumNumberOfMachines,omitempty"` + MaximumNumberOfMachines *int32 `json:"maximumNumberOfMachines,omitempty"` VipMappings *[]VirtualIPMapping `json:"vipMappings,omitempty"` EnvironmentCapacities *[]StampCapacity `json:"environmentCapacities,omitempty"` NetworkAccessControlList *[]NetworkAccessControlEntry `json:"networkAccessControlList,omitempty"` @@ -1206,6 +1420,7 @@ ResourceGroup *string `json:"resourceGroup,omitempty"` APIManagementAccountID *string `json:"apiManagementAccountId,omitempty"` Suspended *bool `json:"suspended,omitempty"` + ClusterSettings *[]NameValuePair `json:"clusterSettings,omitempty"` } // HostName is details of a hostname derived from a domain @@ -1223,6 +1438,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1249,12 +1465,11 @@ // HostNameSslState is object that represents a SSL-enabled host name. type HostNameSslState struct { - Name *string `json:"name,omitempty"` - SslState SslState `json:"sslState,omitempty"` - VirtualIP *string `json:"virtualIP,omitempty"` - Thumbprint *string `json:"thumbprint,omitempty"` - ToUpdate *bool `json:"toUpdate,omitempty"` - ToUpdateIPBasedSsl *bool `json:"toUpdateIpBasedSsl,omitempty"` + Name *string `json:"name,omitempty"` + SslState SslState `json:"sslState,omitempty"` + VirtualIP *string `json:"virtualIP,omitempty"` + Thumbprint *string `json:"thumbprint,omitempty"` + ToUpdate *bool `json:"toUpdate,omitempty"` } // HTTPLogsConfig is http logs configuration @@ -1263,12 +1478,60 @@ AzureBlobStorage *AzureBlobStorageHTTPLogsConfig `json:"azureBlobStorage,omitempty"` } +// IPSecurityRestriction is represents an ip security restriction on a web app. +type IPSecurityRestriction struct { + IPAddress *string `json:"ipAddress,omitempty"` + SubnetMask *string `json:"subnetMask,omitempty"` +} + // KeyValuePairStringString is type KeyValuePairStringString struct { Key *string `json:"key,omitempty"` Value *string `json:"value,omitempty"` } +// ListCertificateEmail is +type ListCertificateEmail struct { + autorest.Response `json:"-"` + Value *[]CertificateEmail `json:"value,omitempty"` +} + +// ListCertificateOrderAction is +type ListCertificateOrderAction struct { + autorest.Response `json:"-"` + Value *[]CertificateOrderAction `json:"value,omitempty"` +} + +// ListCsr is +type ListCsr struct { + autorest.Response `json:"-"` + Value *[]Csr `json:"value,omitempty"` +} + +// ListHostingEnvironmentDiagnostics is +type ListHostingEnvironmentDiagnostics struct { + autorest.Response `json:"-"` + Value *[]HostingEnvironmentDiagnostics `json:"value,omitempty"` +} + +// ListRecommendation is +type ListRecommendation struct { + autorest.Response `json:"-"` + Value *[]Recommendation `json:"value,omitempty"` +} + +// ListVnetInfo is +type ListVnetInfo struct { + autorest.Response `json:"-"` + Value *[]VnetInfo `json:"value,omitempty"` +} + +// ListVnetRoute is +type ListVnetRoute struct { + autorest.Response `json:"-"` + Value *[]VnetRoute `json:"value,omitempty"` +} + // LocalizableString is localizableString object containing the name and a // localized value. type LocalizableString struct { @@ -1281,6 +1544,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1301,13 +1565,14 @@ Location *string `json:"location,omitempty"` Status ManagedHostingEnvironmentStatus `json:"status,omitempty"` VirtualNetwork *VirtualNetworkProfile `json:"virtualNetwork,omitempty"` - IpsslAddressCount *int `json:"ipsslAddressCount,omitempty"` + IpsslAddressCount *int32 `json:"ipsslAddressCount,omitempty"` DNSSuffix *string `json:"dnsSuffix,omitempty"` SubscriptionID *string `json:"subscriptionId,omitempty"` ResourceGroup *string `json:"resourceGroup,omitempty"` EnvironmentIsHealthy *bool `json:"environmentIsHealthy,omitempty"` EnvironmentStatus *string `json:"environmentStatus,omitempty"` Suspended *bool `json:"suspended,omitempty"` + APIManagementAccount *string `json:"apiManagementAccount,omitempty"` } // MetricAvailabilily is class repesenting metrics availability and retention @@ -1321,6 +1586,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1365,7 +1631,7 @@ type NetworkAccessControlEntry struct { Action AccessControlEntryAction `json:"action,omitempty"` Description *string `json:"description,omitempty"` - Order *int `json:"order,omitempty"` + Order *int32 `json:"order,omitempty"` RemoteSubnet *string `json:"remoteSubnet,omitempty"` } @@ -1376,6 +1642,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1389,12 +1656,6 @@ HybridConnections *[]RelayServiceConnectionEntity `json:"hybridConnections,omitempty"` } -// ObjectSet is -type ObjectSet struct { - autorest.Response `json:"-"` - Value *map[string]interface{} `json:"value,omitempty"` -} - // PremierAddOnRequest is type PremierAddOnRequest struct { Location *string `json:"location,omitempty"` @@ -1411,19 +1672,83 @@ ActionHostName *string `json:"actionHostName,omitempty"` ReroutePercentage *float64 `json:"reroutePercentage,omitempty"` ChangeStep *float64 `json:"changeStep,omitempty"` - ChangeIntervalInMinutes *int `json:"changeIntervalInMinutes,omitempty"` + ChangeIntervalInMinutes *int32 `json:"changeIntervalInMinutes,omitempty"` MinReroutePercentage *float64 `json:"minReroutePercentage,omitempty"` MaxReroutePercentage *float64 `json:"maxReroutePercentage,omitempty"` ChangeDecisionCallbackURL *string `json:"changeDecisionCallbackUrl,omitempty"` Name *string `json:"name,omitempty"` } +// ReadCloser is +type ReadCloser struct { + autorest.Response `json:"-"` + Value *io.ReadCloser `json:"value,omitempty"` +} + +// Recommendation is represents a recommendation result generated by the +// recommendation engine +type Recommendation struct { + CreationTime *date.Time `json:"creationTime,omitempty"` + RecommendationID *string `json:"recommendationId,omitempty"` + ResourceID *string `json:"resourceId,omitempty"` + ResourceScope *string `json:"resourceScope,omitempty"` + RuleName *string `json:"ruleName,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + Message *string `json:"message,omitempty"` + Level NotificationLevel `json:"level,omitempty"` + Channels Channels `json:"channels,omitempty"` + Tags *[]string `json:"tags,omitempty"` + ActionName *string `json:"actionName,omitempty"` + Enabled *int32 `json:"enabled,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + NextNotificationTime *date.Time `json:"nextNotificationTime,omitempty"` + NotificationExpirationTime *date.Time `json:"notificationExpirationTime,omitempty"` + NotifiedTime *date.Time `json:"notifiedTime,omitempty"` + Score *float64 `json:"score,omitempty"` +} + +// RecommendationRule is represents a recommendation rule that the +// recommendation engine can perform +type RecommendationRule struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + Message *string `json:"message,omitempty"` + RecommendationID *string `json:"recommendationId,omitempty"` + Description *string `json:"description,omitempty"` + ActionName *string `json:"actionName,omitempty"` + Enabled *int32 `json:"enabled,omitempty"` + Level NotificationLevel `json:"level,omitempty"` + Channels Channels `json:"channels,omitempty"` + Tags *[]string `json:"tags,omitempty"` +} + +// ReissueCertificateOrderRequest is class representing certificate reissue +// request +type ReissueCertificateOrderRequest struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *ReissueCertificateOrderRequestProperties `json:"properties,omitempty"` +} + +// ReissueCertificateOrderRequestProperties is +type ReissueCertificateOrderRequestProperties struct { + KeySize *int32 `json:"keySize,omitempty"` + DelayExistingRevokeInHours *int32 `json:"delayExistingRevokeInHours,omitempty"` +} + // RelayServiceConnectionEntity is class that represents a Biztalk Hybrid // Connection type RelayServiceConnectionEntity struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1437,13 +1762,29 @@ ResourceType *string `json:"resourceType,omitempty"` ResourceConnectionString *string `json:"resourceConnectionString,omitempty"` Hostname *string `json:"hostname,omitempty"` - Port *int `json:"port,omitempty"` + Port *int32 `json:"port,omitempty"` BiztalkURI *string `json:"biztalkUri,omitempty"` } +// RenewCertificateOrderRequest is class representing certificate renew request +type RenewCertificateOrderRequest struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *RenewCertificateOrderRequestProperties `json:"properties,omitempty"` +} + +// RenewCertificateOrderRequestProperties is +type RenewCertificateOrderRequestProperties struct { + KeySize *int32 `json:"keySize,omitempty"` +} + // RequestsBasedTrigger is requestsBasedTrigger type RequestsBasedTrigger struct { - Count *int `json:"count,omitempty"` + Count *int32 `json:"count,omitempty"` TimeInterval *string `json:"timeInterval,omitempty"` } @@ -1451,6 +1792,7 @@ type Resource struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1512,6 +1854,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1536,6 +1879,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1564,6 +1908,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1574,22 +1919,30 @@ // ServerFarmWithRichSkuProperties is type ServerFarmWithRichSkuProperties struct { Name *string `json:"name,omitempty"` + WorkerTierName *string `json:"workerTierName,omitempty"` Status StatusOptions `json:"status,omitempty"` Subscription *string `json:"subscription,omitempty"` AdminSiteName *string `json:"adminSiteName,omitempty"` HostingEnvironmentProfile *HostingEnvironmentProfile `json:"hostingEnvironmentProfile,omitempty"` - MaximumNumberOfWorkers *int `json:"maximumNumberOfWorkers,omitempty"` + MaximumNumberOfWorkers *int32 `json:"maximumNumberOfWorkers,omitempty"` GeoRegion *string `json:"geoRegion,omitempty"` PerSiteScaling *bool `json:"perSiteScaling,omitempty"` - NumberOfSites *int `json:"numberOfSites,omitempty"` + NumberOfSites *int32 `json:"numberOfSites,omitempty"` ResourceGroup *string `json:"resourceGroup,omitempty"` } +// SetObject is +type SetObject struct { + autorest.Response `json:"-"` + Value *map[string]interface{} `json:"value,omitempty"` +} + // Site is represents a web app type Site struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1606,10 +1959,12 @@ TokenStoreEnabled *bool `json:"tokenStoreEnabled,omitempty"` AllowedExternalRedirectUrls *[]string `json:"allowedExternalRedirectUrls,omitempty"` DefaultProvider BuiltInAuthenticationProvider `json:"defaultProvider,omitempty"` + TokenRefreshExtensionHours *float64 `json:"tokenRefreshExtensionHours,omitempty"` ClientID *string `json:"clientId,omitempty"` ClientSecret *string `json:"clientSecret,omitempty"` Issuer *string `json:"issuer,omitempty"` AllowedAudiences *[]string `json:"allowedAudiences,omitempty"` + AdditionalLoginParams *[]string `json:"additionalLoginParams,omitempty"` AadClientID *string `json:"aadClientId,omitempty"` OpenIDIssuer *string `json:"openIdIssuer,omitempty"` GoogleClientID *string `json:"googleClientId,omitempty"` @@ -1625,6 +1980,21 @@ MicrosoftAccountOAuthScopes *[]string `json:"microsoftAccountOAuthScopes,omitempty"` } +// SiteCloneability is represents whether or not a web app is cloneable +type SiteCloneability struct { + autorest.Response `json:"-"` + Result CloneAbilityResult `json:"result,omitempty"` + BlockingFeatures *[]SiteCloneabilityCriterion `json:"blockingFeatures,omitempty"` + UnsupportedFeatures *[]SiteCloneabilityCriterion `json:"unsupportedFeatures,omitempty"` + BlockingCharacteristics *[]SiteCloneabilityCriterion `json:"blockingCharacteristics,omitempty"` +} + +// SiteCloneabilityCriterion is represents a site cloneability criterion +type SiteCloneabilityCriterion struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` +} + // SiteCollection is collection of sites type SiteCollection struct { autorest.Response `json:"-"` @@ -1649,6 +2019,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1657,48 +2028,53 @@ // SiteConfigProperties is type SiteConfigProperties struct { - NumberOfWorkers *int `json:"numberOfWorkers,omitempty"` - DefaultDocuments *[]string `json:"defaultDocuments,omitempty"` - NetFrameworkVersion *string `json:"netFrameworkVersion,omitempty"` - PhpVersion *string `json:"phpVersion,omitempty"` - PythonVersion *string `json:"pythonVersion,omitempty"` - RequestTracingEnabled *bool `json:"requestTracingEnabled,omitempty"` - RequestTracingExpirationTime *date.Time `json:"requestTracingExpirationTime,omitempty"` - RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty"` - RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty"` - HTTPLoggingEnabled *bool `json:"httpLoggingEnabled,omitempty"` - LogsDirectorySizeLimit *int `json:"logsDirectorySizeLimit,omitempty"` - DetailedErrorLoggingEnabled *bool `json:"detailedErrorLoggingEnabled,omitempty"` - PublishingUsername *string `json:"publishingUsername,omitempty"` - PublishingPassword *string `json:"publishingPassword,omitempty"` - AppSettings *[]NameValuePair `json:"appSettings,omitempty"` - Metadata *[]NameValuePair `json:"metadata,omitempty"` - ConnectionStrings *[]ConnStringInfo `json:"connectionStrings,omitempty"` - HandlerMappings *[]HandlerMapping `json:"handlerMappings,omitempty"` - DocumentRoot *string `json:"documentRoot,omitempty"` - ScmType *string `json:"scmType,omitempty"` - Use32BitWorkerProcess *bool `json:"use32BitWorkerProcess,omitempty"` - WebSocketsEnabled *bool `json:"webSocketsEnabled,omitempty"` - AlwaysOn *bool `json:"alwaysOn,omitempty"` - JavaVersion *string `json:"javaVersion,omitempty"` - JavaContainer *string `json:"javaContainer,omitempty"` - JavaContainerVersion *string `json:"javaContainerVersion,omitempty"` - ManagedPipelineMode ManagedPipelineMode `json:"managedPipelineMode,omitempty"` - VirtualApplications *[]VirtualApplication `json:"virtualApplications,omitempty"` - LoadBalancing SiteLoadBalancing `json:"loadBalancing,omitempty"` - Experiments *Experiments `json:"experiments,omitempty"` - Limits *SiteLimits `json:"limits,omitempty"` - AutoHealEnabled *bool `json:"autoHealEnabled,omitempty"` - AutoHealRules *AutoHealRules `json:"autoHealRules,omitempty"` - TracingOptions *string `json:"tracingOptions,omitempty"` - VnetName *string `json:"vnetName,omitempty"` - AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty"` + NumberOfWorkers *int32 `json:"numberOfWorkers,omitempty"` + DefaultDocuments *[]string `json:"defaultDocuments,omitempty"` + NetFrameworkVersion *string `json:"netFrameworkVersion,omitempty"` + PhpVersion *string `json:"phpVersion,omitempty"` + PythonVersion *string `json:"pythonVersion,omitempty"` + RequestTracingEnabled *bool `json:"requestTracingEnabled,omitempty"` + RequestTracingExpirationTime *date.Time `json:"requestTracingExpirationTime,omitempty"` + RemoteDebuggingEnabled *bool `json:"remoteDebuggingEnabled,omitempty"` + RemoteDebuggingVersion *string `json:"remoteDebuggingVersion,omitempty"` + HTTPLoggingEnabled *bool `json:"httpLoggingEnabled,omitempty"` + LogsDirectorySizeLimit *int32 `json:"logsDirectorySizeLimit,omitempty"` + DetailedErrorLoggingEnabled *bool `json:"detailedErrorLoggingEnabled,omitempty"` + PublishingUsername *string `json:"publishingUsername,omitempty"` + PublishingPassword *string `json:"publishingPassword,omitempty"` + AppSettings *[]NameValuePair `json:"appSettings,omitempty"` + Metadata *[]NameValuePair `json:"metadata,omitempty"` + ConnectionStrings *[]ConnStringInfo `json:"connectionStrings,omitempty"` + HandlerMappings *[]HandlerMapping `json:"handlerMappings,omitempty"` + DocumentRoot *string `json:"documentRoot,omitempty"` + ScmType *string `json:"scmType,omitempty"` + Use32BitWorkerProcess *bool `json:"use32BitWorkerProcess,omitempty"` + WebSocketsEnabled *bool `json:"webSocketsEnabled,omitempty"` + AlwaysOn *bool `json:"alwaysOn,omitempty"` + JavaVersion *string `json:"javaVersion,omitempty"` + JavaContainer *string `json:"javaContainer,omitempty"` + JavaContainerVersion *string `json:"javaContainerVersion,omitempty"` + ManagedPipelineMode ManagedPipelineMode `json:"managedPipelineMode,omitempty"` + VirtualApplications *[]VirtualApplication `json:"virtualApplications,omitempty"` + LoadBalancing SiteLoadBalancing `json:"loadBalancing,omitempty"` + Experiments *Experiments `json:"experiments,omitempty"` + Limits *SiteLimits `json:"limits,omitempty"` + AutoHealEnabled *bool `json:"autoHealEnabled,omitempty"` + AutoHealRules *AutoHealRules `json:"autoHealRules,omitempty"` + TracingOptions *string `json:"tracingOptions,omitempty"` + VnetName *string `json:"vnetName,omitempty"` + Cors *CorsSettings `json:"cors,omitempty"` + APIDefinition *APIDefinitionInfo `json:"apiDefinition,omitempty"` + AutoSwapSlotName *string `json:"autoSwapSlotName,omitempty"` + LocalMySQLEnabled *bool `json:"localMySqlEnabled,omitempty"` + IPSecurityRestrictions *[]IPSecurityRestriction `json:"ipSecurityRestrictions,omitempty"` } // SiteInstance is instance of a web app type SiteInstance struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1720,8 +2096,8 @@ // SiteLimits is represents metric limits set on a web app. type SiteLimits struct { MaxPercentageCPU *float64 `json:"maxPercentageCpu,omitempty"` - MaxMemoryInMb *int32 `json:"maxMemoryInMb,omitempty"` - MaxDiskSizeInMb *int32 `json:"maxDiskSizeInMb,omitempty"` + MaxMemoryInMb *int64 `json:"maxMemoryInMb,omitempty"` + MaxDiskSizeInMb *int64 `json:"maxDiskSizeInMb,omitempty"` } // SiteLogsConfig is configuration of Azure web site @@ -1729,6 +2105,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1766,8 +2143,14 @@ GatewaySiteName *string `json:"gatewaySiteName,omitempty"` ClientAffinityEnabled *bool `json:"clientAffinityEnabled,omitempty"` ClientCertEnabled *bool `json:"clientCertEnabled,omitempty"` + HostNamesDisabled *bool `json:"hostNamesDisabled,omitempty"` OutboundIPAddresses *string `json:"outboundIpAddresses,omitempty"` + ContainerSize *int32 `json:"containerSize,omitempty"` + MaxNumberOfWorkers *int32 `json:"maxNumberOfWorkers,omitempty"` CloningInfo *CloningInfo `json:"cloningInfo,omitempty"` + ResourceGroup *string `json:"resourceGroup,omitempty"` + IsDefaultContainer *bool `json:"isDefaultContainer,omitempty"` + DefaultHostName *string `json:"defaultHostName,omitempty"` } // SitePropertiesModel is @@ -1782,6 +2165,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1799,9 +2183,9 @@ // SkuCapacity is description of the App Service Plan scale options type SkuCapacity struct { - Minimum *int `json:"minimum,omitempty"` - Maximum *int `json:"maximum,omitempty"` - Default *int `json:"default,omitempty"` + Minimum *int32 `json:"minimum,omitempty"` + Maximum *int32 `json:"maximum,omitempty"` + Default *int32 `json:"default,omitempty"` ScaleType *string `json:"scaleType,omitempty"` } @@ -1811,7 +2195,7 @@ Tier *string `json:"tier,omitempty"` Size *string `json:"size,omitempty"` Family *string `json:"family,omitempty"` - Capacity *int `json:"capacity,omitempty"` + Capacity *int32 `json:"capacity,omitempty"` } // SkuInfo is sku discovery information @@ -1842,6 +2226,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1859,6 +2244,7 @@ type SlotDifference struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1886,7 +2272,7 @@ // SlowRequestsBasedTrigger is slowRequestsBasedTrigger type SlowRequestsBasedTrigger struct { TimeTaken *string `json:"timeTaken,omitempty"` - Count *int `json:"count,omitempty"` + Count *int32 `json:"count,omitempty"` TimeInterval *string `json:"timeInterval,omitempty"` } @@ -1895,6 +2281,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1920,12 +2307,12 @@ // StampCapacity is class containing stamp capacity information type StampCapacity struct { Name *string `json:"name,omitempty"` - AvailableCapacity *int32 `json:"availableCapacity,omitempty"` - TotalCapacity *int32 `json:"totalCapacity,omitempty"` + AvailableCapacity *int64 `json:"availableCapacity,omitempty"` + TotalCapacity *int64 `json:"totalCapacity,omitempty"` Unit *string `json:"unit,omitempty"` ComputeMode ComputeModeOptions `json:"computeMode,omitempty"` WorkerSize WorkerSizeOptions `json:"workerSize,omitempty"` - WorkerSizeID *int `json:"workerSizeId,omitempty"` + WorkerSizeID *int32 `json:"workerSizeId,omitempty"` ExcludeFromCapacityAllocation *bool `json:"excludeFromCapacityAllocation,omitempty"` IsApplicableForAllComputeModes *bool `json:"isApplicableForAllComputeModes,omitempty"` SiteMode *string `json:"siteMode,omitempty"` @@ -1940,10 +2327,10 @@ // StatusCodesBasedTrigger is statusCodeBasedTrigger type StatusCodesBasedTrigger struct { - Status *int `json:"status,omitempty"` - SubStatus *int `json:"subStatus,omitempty"` - Win32Status *int `json:"win32Status,omitempty"` - Count *int `json:"count,omitempty"` + Status *int32 `json:"status,omitempty"` + SubStatus *int32 `json:"subStatus,omitempty"` + Win32Status *int32 `json:"win32Status,omitempty"` + Count *int32 `json:"count,omitempty"` TimeInterval *string `json:"timeInterval,omitempty"` } @@ -1952,6 +2339,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -1978,6 +2366,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -2007,6 +2396,7 @@ type Usage struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -2026,8 +2416,8 @@ Name *string `json:"name,omitempty"` ResourceName *string `json:"resourceName,omitempty"` Unit *string `json:"unit,omitempty"` - CurrentValue *int32 `json:"currentValue,omitempty"` - Limit *int32 `json:"limit,omitempty"` + CurrentValue *int64 `json:"currentValue,omitempty"` + Limit *int64 `json:"limit,omitempty"` NextResetTime *date.Time `json:"nextResetTime,omitempty"` ComputeMode ComputeModeOptions `json:"computeMode,omitempty"` SiteMode *string `json:"siteMode,omitempty"` @@ -2038,6 +2428,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -2046,13 +2437,10 @@ // UserProperties is type UserProperties struct { - Name *string `json:"name,omitempty"` - PublishingUserName *string `json:"publishingUserName,omitempty"` - PublishingPassword *string `json:"publishingPassword,omitempty"` - LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` - Metadata *string `json:"metadata,omitempty"` - IsDeleted *bool `json:"isDeleted,omitempty"` - ScmURI *string `json:"scmUri,omitempty"` + Name *string `json:"name,omitempty"` + PublishingUserName *string `json:"publishingUserName,omitempty"` + PublishingPassword *string `json:"publishingPassword,omitempty"` + ScmURI *string `json:"scmUri,omitempty"` } // VirtualApplication is @@ -2072,8 +2460,8 @@ // VirtualIPMapping is class that represents a VIP mapping type VirtualIPMapping struct { VirtualIP *string `json:"virtualIP,omitempty"` - InternalHTTPPort *int `json:"internalHttpPort,omitempty"` - InternalHTTPSPort *int `json:"internalHttpsPort,omitempty"` + InternalHTTPPort *int32 `json:"internalHttpPort,omitempty"` + InternalHTTPSPort *int32 `json:"internalHttpsPort,omitempty"` InUse *bool `json:"inUse,omitempty"` } @@ -2091,6 +2479,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -2109,24 +2498,21 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` Properties *VnetInfoProperties `json:"properties,omitempty"` } -// VnetInfoList is -type VnetInfoList struct { - autorest.Response `json:"-"` - Value *[]VnetInfo `json:"value,omitempty"` -} - // VnetInfoProperties is type VnetInfoProperties struct { VnetResourceID *string `json:"vnetResourceId,omitempty"` CertThumbprint *string `json:"certThumbprint,omitempty"` CertBlob *string `json:"certBlob,omitempty"` Routes *[]VnetRoute `json:"routes,omitempty"` + ResyncRequired *bool `json:"resyncRequired,omitempty"` + DNSServers *string `json:"dnsServers,omitempty"` } // VnetRoute is vnetRoute contract used to pass routing information for a vnet. @@ -2134,18 +2520,13 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` Properties *VnetRouteProperties `json:"properties,omitempty"` } -// VnetRouteList is -type VnetRouteList struct { - autorest.Response `json:"-"` - Value *[]VnetRoute `json:"value,omitempty"` -} - // VnetRouteProperties is type VnetRouteProperties struct { Name *string `json:"name,omitempty"` @@ -2159,6 +2540,7 @@ autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` + Kind *string `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Type *string `json:"type,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -2175,9 +2557,9 @@ // WorkerPoolProperties is type WorkerPoolProperties struct { - WorkerSizeID *int `json:"workerSizeId,omitempty"` + WorkerSizeID *int32 `json:"workerSizeId,omitempty"` ComputeMode ComputeModeOptions `json:"computeMode,omitempty"` WorkerSize *string `json:"workerSize,omitempty"` - WorkerCount *int `json:"workerCount,omitempty"` + WorkerCount *int32 `json:"workerCount,omitempty"` InstanceNames *[]string `json:"instanceNames,omitempty"` } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/provider.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/provider.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/provider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/provider.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // ProviderClient is the use these APIs to manage Azure Websites resources @@ -48,21 +48,21 @@ } // GetPublishingUser sends the get publishing user request. -func (client ProviderClient) GetPublishingUser() (result User, ae error) { +func (client ProviderClient) GetPublishingUser() (result User, err error) { req, err := client.GetPublishingUserPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/ProviderClient", "GetPublishingUser", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ProviderClient", "GetPublishingUser", nil, "Failure preparing request") } resp, err := client.GetPublishingUserSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ProviderClient", "GetPublishingUser", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ProviderClient", "GetPublishingUser", resp, "Failure sending request") } result, err = client.GetPublishingUserResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ProviderClient", "GetPublishingUser", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ProviderClient", "GetPublishingUser", resp, "Failure responding to request") } return @@ -71,21 +71,21 @@ // GetPublishingUserPreparer prepares the GetPublishingUser request. func (client ProviderClient) GetPublishingUserPreparer() (*http.Request, error) { queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/providers/Microsoft.Web/publishingUsers/web"), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetPublishingUserSender sends the GetPublishingUser request. The method will close the // http.Response Body if it receives an error. func (client ProviderClient) GetPublishingUserSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetPublishingUserResponder handles the response to the GetPublishingUser request. The method always @@ -94,7 +94,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -104,21 +104,21 @@ // GetSourceControl sends the get source control request. // // sourceControlType is type of source control -func (client ProviderClient) GetSourceControl(sourceControlType string) (result SourceControl, ae error) { +func (client ProviderClient) GetSourceControl(sourceControlType string) (result SourceControl, err error) { req, err := client.GetSourceControlPreparer(sourceControlType) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ProviderClient", "GetSourceControl", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ProviderClient", "GetSourceControl", nil, "Failure preparing request") } resp, err := client.GetSourceControlSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ProviderClient", "GetSourceControl", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ProviderClient", "GetSourceControl", resp, "Failure sending request") } result, err = client.GetSourceControlResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ProviderClient", "GetSourceControl", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ProviderClient", "GetSourceControl", resp, "Failure responding to request") } return @@ -127,26 +127,25 @@ // GetSourceControlPreparer prepares the GetSourceControl request. func (client ProviderClient) GetSourceControlPreparer(sourceControlType string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "sourceControlType": url.QueryEscape(sourceControlType), + "sourceControlType": autorest.Encode("path", sourceControlType), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.Web/sourcecontrols/{sourceControlType}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/providers/Microsoft.Web/sourcecontrols/{sourceControlType}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSourceControlSender sends the GetSourceControl request. The method will close the // http.Response Body if it receives an error. func (client ProviderClient) GetSourceControlSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSourceControlResponder handles the response to the GetSourceControl request. The method always @@ -155,7 +154,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -163,21 +162,21 @@ } // GetSourceControls sends the get source controls request. -func (client ProviderClient) GetSourceControls() (result SourceControlCollection, ae error) { +func (client ProviderClient) GetSourceControls() (result SourceControlCollection, err error) { req, err := client.GetSourceControlsPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/ProviderClient", "GetSourceControls", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ProviderClient", "GetSourceControls", nil, "Failure preparing request") } resp, err := client.GetSourceControlsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ProviderClient", "GetSourceControls", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ProviderClient", "GetSourceControls", resp, "Failure sending request") } result, err = client.GetSourceControlsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ProviderClient", "GetSourceControls", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ProviderClient", "GetSourceControls", resp, "Failure responding to request") } return @@ -186,21 +185,21 @@ // GetSourceControlsPreparer prepares the GetSourceControls request. func (client ProviderClient) GetSourceControlsPreparer() (*http.Request, error) { queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/providers/Microsoft.Web/sourcecontrols"), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSourceControlsSender sends the GetSourceControls request. The method will close the // http.Response Body if it receives an error. func (client ProviderClient) GetSourceControlsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSourceControlsResponder handles the response to the GetSourceControls request. The method always @@ -209,7 +208,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -219,21 +218,21 @@ // UpdatePublishingUser sends the update publishing user request. // // requestMessage is details of publishing user -func (client ProviderClient) UpdatePublishingUser(requestMessage User) (result User, ae error) { +func (client ProviderClient) UpdatePublishingUser(requestMessage User) (result User, err error) { req, err := client.UpdatePublishingUserPreparer(requestMessage) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ProviderClient", "UpdatePublishingUser", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ProviderClient", "UpdatePublishingUser", nil, "Failure preparing request") } resp, err := client.UpdatePublishingUserSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ProviderClient", "UpdatePublishingUser", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ProviderClient", "UpdatePublishingUser", resp, "Failure sending request") } result, err = client.UpdatePublishingUserResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ProviderClient", "UpdatePublishingUser", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ProviderClient", "UpdatePublishingUser", resp, "Failure responding to request") } return @@ -242,22 +241,23 @@ // UpdatePublishingUserPreparer prepares the UpdatePublishingUser request. func (client ProviderClient) UpdatePublishingUserPreparer(requestMessage User) (*http.Request, error) { queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/providers/Microsoft.Web/publishingUsers/web"), autorest.WithJSON(requestMessage), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdatePublishingUserSender sends the UpdatePublishingUser request. The method will close the // http.Response Body if it receives an error. func (client ProviderClient) UpdatePublishingUserSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdatePublishingUserResponder handles the response to the UpdatePublishingUser request. The method always @@ -266,7 +266,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -277,21 +277,21 @@ // // sourceControlType is type of source control requestMessage is source // control token information -func (client ProviderClient) UpdateSourceControl(sourceControlType string, requestMessage SourceControl) (result SourceControl, ae error) { +func (client ProviderClient) UpdateSourceControl(sourceControlType string, requestMessage SourceControl) (result SourceControl, err error) { req, err := client.UpdateSourceControlPreparer(sourceControlType, requestMessage) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ProviderClient", "UpdateSourceControl", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ProviderClient", "UpdateSourceControl", nil, "Failure preparing request") } resp, err := client.UpdateSourceControlSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ProviderClient", "UpdateSourceControl", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ProviderClient", "UpdateSourceControl", resp, "Failure sending request") } result, err = client.UpdateSourceControlResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ProviderClient", "UpdateSourceControl", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ProviderClient", "UpdateSourceControl", resp, "Failure responding to request") } return @@ -300,27 +300,27 @@ // UpdateSourceControlPreparer prepares the UpdateSourceControl request. func (client ProviderClient) UpdateSourceControlPreparer(sourceControlType string, requestMessage SourceControl) (*http.Request, error) { pathParameters := map[string]interface{}{ - "sourceControlType": url.QueryEscape(sourceControlType), + "sourceControlType": autorest.Encode("path", sourceControlType), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.Web/sourcecontrols/{sourceControlType}"), + autorest.WithPathParameters("/providers/Microsoft.Web/sourcecontrols/{sourceControlType}", pathParameters), autorest.WithJSON(requestMessage), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSourceControlSender sends the UpdateSourceControl request. The method will close the // http.Response Body if it receives an error. func (client ProviderClient) UpdateSourceControlSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSourceControlResponder handles the response to the UpdateSourceControl request. The method always @@ -329,7 +329,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/recommendations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/recommendations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/recommendations.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/recommendations.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,332 @@ +package web + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// RecommendationsClient is the use these APIs to manage Azure Websites +// resources through the Azure Resource Manager. All task operations conform +// to the HTTP/1.1 protocol specification and each operation returns an +// x-ms-request-id header that can be used to obtain information about the +// request. You must make sure that requests made to these resources are +// secure. For more information, see Authenticating +// Azure Resource Manager requests. +type RecommendationsClient struct { + ManagementClient +} + +// NewRecommendationsClient creates an instance of the RecommendationsClient +// client. +func NewRecommendationsClient(subscriptionID string) RecommendationsClient { + return NewRecommendationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRecommendationsClientWithBaseURI creates an instance of the +// RecommendationsClient client. +func NewRecommendationsClientWithBaseURI(baseURI string, subscriptionID string) RecommendationsClient { + return RecommendationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetRecommendationBySubscription sends the get recommendation by +// subscription request. +// +// featured is if set, this API returns only the most critical recommendation +// among the others. Otherwise this API returns all recommendations available +// filter is return only channels specified in the filter. Filter is +// specified by using OData syntax. Example: $filter=channels eq 'Api' or +// channel eq 'Notification' +func (client RecommendationsClient) GetRecommendationBySubscription(featured *bool, filter string) (result ListRecommendation, err error) { + req, err := client.GetRecommendationBySubscriptionPreparer(featured, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRecommendationBySubscription", nil, "Failure preparing request") + } + + resp, err := client.GetRecommendationBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRecommendationBySubscription", resp, "Failure sending request") + } + + result, err = client.GetRecommendationBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRecommendationBySubscription", resp, "Failure responding to request") + } + + return +} + +// GetRecommendationBySubscriptionPreparer prepares the GetRecommendationBySubscription request. +func (client RecommendationsClient) GetRecommendationBySubscriptionPreparer(featured *bool, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if featured != nil { + queryParameters["featured"] = autorest.Encode("query", *featured) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Web/recommendations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetRecommendationBySubscriptionSender sends the GetRecommendationBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client RecommendationsClient) GetRecommendationBySubscriptionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetRecommendationBySubscriptionResponder handles the response to the GetRecommendationBySubscription request. The method always +// closes the http.Response Body. +func (client RecommendationsClient) GetRecommendationBySubscriptionResponder(resp *http.Response) (result ListRecommendation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetRecommendationHistoryForSite sends the get recommendation history for +// site request. +// +// resourceGroupName is resource group name siteName is site name startTime is +// the start time of a time range to query, e.g. $filter=startTime eq +// '2015-01-01T00:00:00Z' and endTime eq '2015-01-02T00:00:00Z' endTime is +// the end time of a time range to query, e.g. $filter=startTime eq +// '2015-01-01T00:00:00Z' and endTime eq '2015-01-02T00:00:00Z' +func (client RecommendationsClient) GetRecommendationHistoryForSite(resourceGroupName string, siteName string, startTime string, endTime string) (result ListRecommendation, err error) { + req, err := client.GetRecommendationHistoryForSitePreparer(resourceGroupName, siteName, startTime, endTime) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRecommendationHistoryForSite", nil, "Failure preparing request") + } + + resp, err := client.GetRecommendationHistoryForSiteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRecommendationHistoryForSite", resp, "Failure sending request") + } + + result, err = client.GetRecommendationHistoryForSiteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRecommendationHistoryForSite", resp, "Failure responding to request") + } + + return +} + +// GetRecommendationHistoryForSitePreparer prepares the GetRecommendationHistoryForSite request. +func (client RecommendationsClient) GetRecommendationHistoryForSitePreparer(resourceGroupName string, siteName string, startTime string, endTime string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "siteName": autorest.Encode("path", siteName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(startTime) > 0 { + queryParameters["startTime"] = autorest.Encode("query", startTime) + } + if len(endTime) > 0 { + queryParameters["endTime"] = autorest.Encode("query", endTime) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/recommendationHistory", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetRecommendationHistoryForSiteSender sends the GetRecommendationHistoryForSite request. The method will close the +// http.Response Body if it receives an error. +func (client RecommendationsClient) GetRecommendationHistoryForSiteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetRecommendationHistoryForSiteResponder handles the response to the GetRecommendationHistoryForSite request. The method always +// closes the http.Response Body. +func (client RecommendationsClient) GetRecommendationHistoryForSiteResponder(resp *http.Response) (result ListRecommendation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetRecommendedRulesForSite sends the get recommended rules for site request. +// +// resourceGroupName is resource group name siteName is site name featured is +// if set, this API returns only the most critical recommendation among the +// others. Otherwise this API returns all recommendations available siteSku +// is the name of site SKU. numSlots is the number of site slots associated +// to the site +func (client RecommendationsClient) GetRecommendedRulesForSite(resourceGroupName string, siteName string, featured *bool, siteSku string, numSlots *int32) (result ListRecommendation, err error) { + req, err := client.GetRecommendedRulesForSitePreparer(resourceGroupName, siteName, featured, siteSku, numSlots) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRecommendedRulesForSite", nil, "Failure preparing request") + } + + resp, err := client.GetRecommendedRulesForSiteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRecommendedRulesForSite", resp, "Failure sending request") + } + + result, err = client.GetRecommendedRulesForSiteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRecommendedRulesForSite", resp, "Failure responding to request") + } + + return +} + +// GetRecommendedRulesForSitePreparer prepares the GetRecommendedRulesForSite request. +func (client RecommendationsClient) GetRecommendedRulesForSitePreparer(resourceGroupName string, siteName string, featured *bool, siteSku string, numSlots *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "siteName": autorest.Encode("path", siteName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if featured != nil { + queryParameters["featured"] = autorest.Encode("query", *featured) + } + if len(siteSku) > 0 { + queryParameters["siteSku"] = autorest.Encode("query", siteSku) + } + if numSlots != nil { + queryParameters["numSlots"] = autorest.Encode("query", *numSlots) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/recommendations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetRecommendedRulesForSiteSender sends the GetRecommendedRulesForSite request. The method will close the +// http.Response Body if it receives an error. +func (client RecommendationsClient) GetRecommendedRulesForSiteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetRecommendedRulesForSiteResponder handles the response to the GetRecommendedRulesForSite request. The method always +// closes the http.Response Body. +func (client RecommendationsClient) GetRecommendedRulesForSiteResponder(resp *http.Response) (result ListRecommendation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetRuleDetailsBySiteName sends the get rule details by site name request. +// +// resourceGroupName is resource group name siteName is site name name is +// recommendation rule name +func (client RecommendationsClient) GetRuleDetailsBySiteName(resourceGroupName string, siteName string, name string) (result RecommendationRule, err error) { + req, err := client.GetRuleDetailsBySiteNamePreparer(resourceGroupName, siteName, name) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRuleDetailsBySiteName", nil, "Failure preparing request") + } + + resp, err := client.GetRuleDetailsBySiteNameSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRuleDetailsBySiteName", resp, "Failure sending request") + } + + result, err = client.GetRuleDetailsBySiteNameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.RecommendationsClient", "GetRuleDetailsBySiteName", resp, "Failure responding to request") + } + + return +} + +// GetRuleDetailsBySiteNamePreparer prepares the GetRuleDetailsBySiteName request. +func (client RecommendationsClient) GetRuleDetailsBySiteNamePreparer(resourceGroupName string, siteName string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "siteName": autorest.Encode("path", siteName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{siteName}/recommendations/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetRuleDetailsBySiteNameSender sends the GetRuleDetailsBySiteName request. The method will close the +// http.Response Body if it receives an error. +func (client RecommendationsClient) GetRuleDetailsBySiteNameSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetRuleDetailsBySiteNameResponder handles the response to the GetRuleDetailsBySiteName request. The method always +// closes the http.Response Body. +func (client RecommendationsClient) GetRuleDetailsBySiteNameResponder(resp *http.Response) (result RecommendationRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/serverfarms.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/serverfarms.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/serverfarms.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/serverfarms.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // ServerFarmsClient is the use these APIs to manage Azure Websites resources @@ -48,71 +48,75 @@ } // CreateOrUpdateServerFarm sends the create or update server farm request. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of App Service // Plan serverFarmEnvelope is details of App Service Plan allowPendingState // is oBSOLETE: If true, allow pending state for App Service Plan -func (client ServerFarmsClient) CreateOrUpdateServerFarm(resourceGroupName string, name string, serverFarmEnvelope ServerFarmWithRichSku, allowPendingState *bool) (result ServerFarmWithRichSku, ae error) { - req, err := client.CreateOrUpdateServerFarmPreparer(resourceGroupName, name, serverFarmEnvelope, allowPendingState) +func (client ServerFarmsClient) CreateOrUpdateServerFarm(resourceGroupName string, name string, serverFarmEnvelope ServerFarmWithRichSku, allowPendingState *bool, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateServerFarmPreparer(resourceGroupName, name, serverFarmEnvelope, allowPendingState, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "CreateOrUpdateServerFarm", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "CreateOrUpdateServerFarm", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateServerFarmSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "CreateOrUpdateServerFarm", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "CreateOrUpdateServerFarm", resp, "Failure sending request") } result, err = client.CreateOrUpdateServerFarmResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "CreateOrUpdateServerFarm", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "CreateOrUpdateServerFarm", resp, "Failure responding to request") } return } // CreateOrUpdateServerFarmPreparer prepares the CreateOrUpdateServerFarm request. -func (client ServerFarmsClient) CreateOrUpdateServerFarmPreparer(resourceGroupName string, name string, serverFarmEnvelope ServerFarmWithRichSku, allowPendingState *bool) (*http.Request, error) { +func (client ServerFarmsClient) CreateOrUpdateServerFarmPreparer(resourceGroupName string, name string, serverFarmEnvelope ServerFarmWithRichSku, allowPendingState *bool, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if allowPendingState != nil { - queryParameters["allowPendingState"] = allowPendingState + queryParameters["allowPendingState"] = autorest.Encode("query", *allowPendingState) } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}", pathParameters), autorest.WithJSON(serverFarmEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateServerFarmSender sends the CreateOrUpdateServerFarm request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) CreateOrUpdateServerFarmSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateServerFarmResponder handles the response to the CreateOrUpdateServerFarm request. The method always // closes the http.Response Body. -func (client ServerFarmsClient) CreateOrUpdateServerFarmResponder(resp *http.Response) (result ServerFarmWithRichSku, err error) { +func (client ServerFarmsClient) CreateOrUpdateServerFarmResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } @@ -121,21 +125,21 @@ // resourceGroupName is name of resource group name is name of App Service // Plan vnetName is name of virtual network routeName is name of the virtual // network route route is the route object -func (client ServerFarmsClient) CreateOrUpdateVnetRoute(resourceGroupName string, name string, vnetName string, routeName string, route VnetRoute) (result VnetRoute, ae error) { +func (client ServerFarmsClient) CreateOrUpdateVnetRoute(resourceGroupName string, name string, vnetName string, routeName string, route VnetRoute) (result VnetRoute, err error) { req, err := client.CreateOrUpdateVnetRoutePreparer(resourceGroupName, name, vnetName, routeName, route) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "CreateOrUpdateVnetRoute", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "CreateOrUpdateVnetRoute", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateVnetRouteSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "CreateOrUpdateVnetRoute", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "CreateOrUpdateVnetRoute", resp, "Failure sending request") } result, err = client.CreateOrUpdateVnetRouteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "CreateOrUpdateVnetRoute", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "CreateOrUpdateVnetRoute", resp, "Failure responding to request") } return @@ -144,31 +148,31 @@ // CreateOrUpdateVnetRoutePreparer prepares the CreateOrUpdateVnetRoute request. func (client ServerFarmsClient) CreateOrUpdateVnetRoutePreparer(resourceGroupName string, name string, vnetName string, routeName string, route VnetRoute) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "routeName": url.QueryEscape(routeName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}", pathParameters), autorest.WithJSON(route), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateVnetRouteSender sends the CreateOrUpdateVnetRoute request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) CreateOrUpdateVnetRouteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusBadRequest, http.StatusNotFound) + return autorest.SendWithSender(client, req) } // CreateOrUpdateVnetRouteResponder handles the response to the CreateOrUpdateVnetRoute request. The method always @@ -177,7 +181,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest, http.StatusNotFound), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest, http.StatusNotFound), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -188,21 +192,21 @@ // // resourceGroupName is name of resource group name is name of App Service // Plan -func (client ServerFarmsClient) DeleteServerFarm(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client ServerFarmsClient) DeleteServerFarm(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.DeleteServerFarmPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "DeleteServerFarm", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "DeleteServerFarm", nil, "Failure preparing request") } resp, err := client.DeleteServerFarmSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "DeleteServerFarm", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "DeleteServerFarm", resp, "Failure sending request") } result, err = client.DeleteServerFarmResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "DeleteServerFarm", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "DeleteServerFarm", resp, "Failure responding to request") } return @@ -211,37 +215,36 @@ // DeleteServerFarmPreparer prepares the DeleteServerFarm request. func (client ServerFarmsClient) DeleteServerFarmPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteServerFarmSender sends the DeleteServerFarm request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) DeleteServerFarmSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteServerFarmResponder handles the response to the DeleteServerFarm request. The method always // closes the http.Response Body. -func (client ServerFarmsClient) DeleteServerFarmResponder(resp *http.Response) (result ObjectSet, err error) { +func (client ServerFarmsClient) DeleteServerFarmResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -253,21 +256,21 @@ // resourceGroupName is name of resource group name is name of App Service // Plan vnetName is name of virtual network routeName is name of the virtual // network route -func (client ServerFarmsClient) DeleteVnetRoute(resourceGroupName string, name string, vnetName string, routeName string) (result ObjectSet, ae error) { +func (client ServerFarmsClient) DeleteVnetRoute(resourceGroupName string, name string, vnetName string, routeName string) (result SetObject, err error) { req, err := client.DeleteVnetRoutePreparer(resourceGroupName, name, vnetName, routeName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "DeleteVnetRoute", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "DeleteVnetRoute", nil, "Failure preparing request") } resp, err := client.DeleteVnetRouteSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "DeleteVnetRoute", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "DeleteVnetRoute", resp, "Failure sending request") } result, err = client.DeleteVnetRouteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "DeleteVnetRoute", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "DeleteVnetRoute", resp, "Failure responding to request") } return @@ -276,39 +279,38 @@ // DeleteVnetRoutePreparer prepares the DeleteVnetRoute request. func (client ServerFarmsClient) DeleteVnetRoutePreparer(resourceGroupName string, name string, vnetName string, routeName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "routeName": url.QueryEscape(routeName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteVnetRouteSender sends the DeleteVnetRoute request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) DeleteVnetRouteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNotFound) + return autorest.SendWithSender(client, req) } // DeleteVnetRouteResponder handles the response to the DeleteVnetRoute request. The method always // closes the http.Response Body. -func (client ServerFarmsClient) DeleteVnetRouteResponder(resp *http.Response) (result ObjectSet, err error) { +func (client ServerFarmsClient) DeleteVnetRouteResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -320,21 +322,21 @@ // resourceGroupName is name of resource group name is name of App Service // Plan vnetName is name of virtual network routeName is name of the virtual // network route -func (client ServerFarmsClient) GetRouteForVnet(resourceGroupName string, name string, vnetName string, routeName string) (result VnetRouteList, ae error) { +func (client ServerFarmsClient) GetRouteForVnet(resourceGroupName string, name string, vnetName string, routeName string) (result ListVnetRoute, err error) { req, err := client.GetRouteForVnetPreparer(resourceGroupName, name, vnetName, routeName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetRouteForVnet", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetRouteForVnet", nil, "Failure preparing request") } resp, err := client.GetRouteForVnetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetRouteForVnet", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetRouteForVnet", resp, "Failure sending request") } result, err = client.GetRouteForVnetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetRouteForVnet", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetRouteForVnet", resp, "Failure responding to request") } return @@ -343,39 +345,38 @@ // GetRouteForVnetPreparer prepares the GetRouteForVnet request. func (client ServerFarmsClient) GetRouteForVnetPreparer(resourceGroupName string, name string, vnetName string, routeName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "routeName": url.QueryEscape(routeName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetRouteForVnetSender sends the GetRouteForVnet request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) GetRouteForVnetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNotFound) + return autorest.SendWithSender(client, req) } // GetRouteForVnetResponder handles the response to the GetRouteForVnet request. The method always // closes the http.Response Body. -func (client ServerFarmsClient) GetRouteForVnetResponder(resp *http.Response) (result VnetRouteList, err error) { +func (client ServerFarmsClient) GetRouteForVnetResponder(resp *http.Response) (result ListVnetRoute, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -386,21 +387,21 @@ // // resourceGroupName is name of resource group name is name of App Service // Plan vnetName is name of virtual network -func (client ServerFarmsClient) GetRoutesForVnet(resourceGroupName string, name string, vnetName string) (result VnetRouteList, ae error) { +func (client ServerFarmsClient) GetRoutesForVnet(resourceGroupName string, name string, vnetName string) (result ListVnetRoute, err error) { req, err := client.GetRoutesForVnetPreparer(resourceGroupName, name, vnetName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetRoutesForVnet", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetRoutesForVnet", nil, "Failure preparing request") } resp, err := client.GetRoutesForVnetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetRoutesForVnet", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetRoutesForVnet", resp, "Failure sending request") } result, err = client.GetRoutesForVnetResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetRoutesForVnet", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetRoutesForVnet", resp, "Failure responding to request") } return @@ -409,38 +410,37 @@ // GetRoutesForVnetPreparer prepares the GetRoutesForVnet request. func (client ServerFarmsClient) GetRoutesForVnetPreparer(resourceGroupName string, name string, vnetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetRoutesForVnetSender sends the GetRoutesForVnet request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) GetRoutesForVnetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetRoutesForVnetResponder handles the response to the GetRoutesForVnet request. The method always // closes the http.Response Body. -func (client ServerFarmsClient) GetRoutesForVnetResponder(resp *http.Response) (result VnetRouteList, err error) { +func (client ServerFarmsClient) GetRoutesForVnetResponder(resp *http.Response) (result ListVnetRoute, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -451,21 +451,21 @@ // // resourceGroupName is name of resource group name is name of App Service // Plan -func (client ServerFarmsClient) GetServerFarm(resourceGroupName string, name string) (result ServerFarmWithRichSku, ae error) { +func (client ServerFarmsClient) GetServerFarm(resourceGroupName string, name string) (result ServerFarmWithRichSku, err error) { req, err := client.GetServerFarmPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarm", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarm", nil, "Failure preparing request") } resp, err := client.GetServerFarmSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarm", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarm", resp, "Failure sending request") } result, err = client.GetServerFarmResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarm", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarm", resp, "Failure responding to request") } return @@ -474,28 +474,27 @@ // GetServerFarmPreparer prepares the GetServerFarm request. func (client ServerFarmsClient) GetServerFarmPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetServerFarmSender sends the GetServerFarm request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) GetServerFarmSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetServerFarmResponder handles the response to the GetServerFarm request. The method always @@ -504,7 +503,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -516,21 +515,21 @@ // // resourceGroupName is name of resource group name is name of App Service // Plan -func (client ServerFarmsClient) GetServerFarmMetricDefintions(resourceGroupName string, name string) (result MetricDefinitionCollection, ae error) { +func (client ServerFarmsClient) GetServerFarmMetricDefintions(resourceGroupName string, name string) (result MetricDefinitionCollection, err error) { req, err := client.GetServerFarmMetricDefintionsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmMetricDefintions", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmMetricDefintions", nil, "Failure preparing request") } resp, err := client.GetServerFarmMetricDefintionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmMetricDefintions", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmMetricDefintions", resp, "Failure sending request") } result, err = client.GetServerFarmMetricDefintionsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmMetricDefintions", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmMetricDefintions", resp, "Failure responding to request") } return @@ -539,28 +538,27 @@ // GetServerFarmMetricDefintionsPreparer prepares the GetServerFarmMetricDefintions request. func (client ServerFarmsClient) GetServerFarmMetricDefintionsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/metricdefinitions"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/metricdefinitions", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetServerFarmMetricDefintionsSender sends the GetServerFarmMetricDefintions request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) GetServerFarmMetricDefintionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetServerFarmMetricDefintionsResponder handles the response to the GetServerFarmMetricDefintions request. The method always @@ -569,7 +567,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -585,21 +583,21 @@ // or name.value eq 'Metric2') and startTime eq '2014-01-01T00:00:00Z' and // endTime eq '2014-12-31T23:59:59Z' and timeGrain eq // duration'[Hour|Minute|Day]'. -func (client ServerFarmsClient) GetServerFarmMetrics(resourceGroupName string, name string, details *bool, filter string) (result ResourceMetricCollection, ae error) { +func (client ServerFarmsClient) GetServerFarmMetrics(resourceGroupName string, name string, details *bool, filter string) (result ResourceMetricCollection, err error) { req, err := client.GetServerFarmMetricsPreparer(resourceGroupName, name, details, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmMetrics", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmMetrics", nil, "Failure preparing request") } resp, err := client.GetServerFarmMetricsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmMetrics", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmMetrics", resp, "Failure sending request") } result, err = client.GetServerFarmMetricsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmMetrics", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmMetrics", resp, "Failure responding to request") } return @@ -608,34 +606,33 @@ // GetServerFarmMetricsPreparer prepares the GetServerFarmMetrics request. func (client ServerFarmsClient) GetServerFarmMetricsPreparer(resourceGroupName string, name string, details *bool, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if details != nil { - queryParameters["details"] = details + queryParameters["details"] = autorest.Encode("query", *details) } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/metrics"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/metrics", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetServerFarmMetricsSender sends the GetServerFarmMetrics request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) GetServerFarmMetricsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetServerFarmMetricsResponder handles the response to the GetServerFarmMetrics request. The method always @@ -644,7 +641,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -655,21 +652,21 @@ // // resourceGroupName is name of resource group name is name of server farm // operationID is id of Server farm operation"> -func (client ServerFarmsClient) GetServerFarmOperation(resourceGroupName string, name string, operationID string) (result ServerFarmWithRichSku, ae error) { +func (client ServerFarmsClient) GetServerFarmOperation(resourceGroupName string, name string, operationID string) (result ServerFarmWithRichSku, err error) { req, err := client.GetServerFarmOperationPreparer(resourceGroupName, name, operationID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmOperation", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmOperation", nil, "Failure preparing request") } resp, err := client.GetServerFarmOperationSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmOperation", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmOperation", resp, "Failure sending request") } result, err = client.GetServerFarmOperationResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmOperation", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmOperation", resp, "Failure responding to request") } return @@ -678,29 +675,28 @@ // GetServerFarmOperationPreparer prepares the GetServerFarmOperation request. func (client ServerFarmsClient) GetServerFarmOperationPreparer(resourceGroupName string, name string, operationID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "operationId": url.QueryEscape(operationID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/operationresults/{operationId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/operationresults/{operationId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetServerFarmOperationSender sends the GetServerFarmOperation request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) GetServerFarmOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetServerFarmOperationResponder handles the response to the GetServerFarmOperation request. The method always @@ -709,7 +705,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -719,21 +715,21 @@ // GetServerFarms sends the get server farms request. // // resourceGroupName is name of resource group -func (client ServerFarmsClient) GetServerFarms(resourceGroupName string) (result ServerFarmCollection, ae error) { +func (client ServerFarmsClient) GetServerFarms(resourceGroupName string) (result ServerFarmCollection, err error) { req, err := client.GetServerFarmsPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarms", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarms", nil, "Failure preparing request") } resp, err := client.GetServerFarmsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarms", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarms", resp, "Failure sending request") } result, err = client.GetServerFarmsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarms", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarms", resp, "Failure responding to request") } return @@ -742,27 +738,26 @@ // GetServerFarmsPreparer prepares the GetServerFarms request. func (client ServerFarmsClient) GetServerFarmsPreparer(resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetServerFarmsSender sends the GetServerFarms request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) GetServerFarmsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetServerFarmsResponder handles the response to the GetServerFarms request. The method always @@ -771,7 +766,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -787,21 +782,21 @@ // the list filter is supported filter: $filter=state eq running. Returns // only web apps that are currently running top is list page size. If // specified, results are paged. -func (client ServerFarmsClient) GetServerFarmSites(resourceGroupName string, name string, skipToken string, filter string, top string) (result SiteCollection, ae error) { +func (client ServerFarmsClient) GetServerFarmSites(resourceGroupName string, name string, skipToken string, filter string, top string) (result SiteCollection, err error) { req, err := client.GetServerFarmSitesPreparer(resourceGroupName, name, skipToken, filter, top) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmSites", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmSites", nil, "Failure preparing request") } resp, err := client.GetServerFarmSitesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmSites", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmSites", resp, "Failure sending request") } result, err = client.GetServerFarmSitesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmSites", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmSites", resp, "Failure responding to request") } return @@ -810,37 +805,36 @@ // GetServerFarmSitesPreparer prepares the GetServerFarmSites request. func (client ServerFarmsClient) GetServerFarmSitesPreparer(resourceGroupName string, name string, skipToken string, filter string, top string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(skipToken) > 0 { - queryParameters["$skipToken"] = skipToken + queryParameters["$skipToken"] = autorest.Encode("query", skipToken) } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } if len(top) > 0 { - queryParameters["$top"] = top + queryParameters["$top"] = autorest.Encode("query", top) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/sites"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/sites", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetServerFarmSitesSender sends the GetServerFarmSites request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) GetServerFarmSitesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetServerFarmSitesResponder handles the response to the GetServerFarmSites request. The method always @@ -849,7 +843,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -857,10 +851,10 @@ } // GetServerFarmSitesNextResults retrieves the next set of results, if any. -func (client ServerFarmsClient) GetServerFarmSitesNextResults(lastResults SiteCollection) (result SiteCollection, ae error) { +func (client ServerFarmsClient) GetServerFarmSitesNextResults(lastResults SiteCollection) (result SiteCollection, err error) { req, err := lastResults.SiteCollectionPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmSites", "Failure preparing next results request request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmSites", nil, "Failure preparing next results request request") } if req == nil { return @@ -869,12 +863,12 @@ resp, err := client.GetServerFarmSitesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmSites", "Failure sending next results request request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmSites", resp, "Failure sending next results request request") } result, err = client.GetServerFarmSitesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmSites", "Failure responding to next results request request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmSites", resp, "Failure responding to next results request request") } return @@ -885,21 +879,21 @@ // resourceGroupName is name of resource group name is name of the App Service // Plan vnetName is name of the virtual network gatewayName is name of the // gateway. Only the 'primary' gateway is supported. -func (client ServerFarmsClient) GetServerFarmVnetGateway(resourceGroupName string, name string, vnetName string, gatewayName string) (result VnetGateway, ae error) { +func (client ServerFarmsClient) GetServerFarmVnetGateway(resourceGroupName string, name string, vnetName string, gatewayName string) (result VnetGateway, err error) { req, err := client.GetServerFarmVnetGatewayPreparer(resourceGroupName, name, vnetName, gatewayName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmVnetGateway", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmVnetGateway", nil, "Failure preparing request") } resp, err := client.GetServerFarmVnetGatewaySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmVnetGateway", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmVnetGateway", resp, "Failure sending request") } result, err = client.GetServerFarmVnetGatewayResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetServerFarmVnetGateway", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetServerFarmVnetGateway", resp, "Failure responding to request") } return @@ -908,30 +902,29 @@ // GetServerFarmVnetGatewayPreparer prepares the GetServerFarmVnetGateway request. func (client ServerFarmsClient) GetServerFarmVnetGatewayPreparer(resourceGroupName string, name string, vnetName string, gatewayName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "gatewayName": url.QueryEscape(gatewayName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "gatewayName": autorest.Encode("path", gatewayName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetServerFarmVnetGatewaySender sends the GetServerFarmVnetGateway request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) GetServerFarmVnetGatewaySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetServerFarmVnetGatewayResponder handles the response to the GetServerFarmVnetGateway request. The method always @@ -940,7 +933,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -951,21 +944,21 @@ // // resourceGroupName is name of resource group name is name of App Service // Plan vnetName is name of virtual network -func (client ServerFarmsClient) GetVnetFromServerFarm(resourceGroupName string, name string, vnetName string) (result VnetInfo, ae error) { +func (client ServerFarmsClient) GetVnetFromServerFarm(resourceGroupName string, name string, vnetName string) (result VnetInfo, err error) { req, err := client.GetVnetFromServerFarmPreparer(resourceGroupName, name, vnetName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetVnetFromServerFarm", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetVnetFromServerFarm", nil, "Failure preparing request") } resp, err := client.GetVnetFromServerFarmSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetVnetFromServerFarm", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetVnetFromServerFarm", resp, "Failure sending request") } result, err = client.GetVnetFromServerFarmResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetVnetFromServerFarm", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetVnetFromServerFarm", resp, "Failure responding to request") } return @@ -974,29 +967,28 @@ // GetVnetFromServerFarmPreparer prepares the GetVnetFromServerFarm request. func (client ServerFarmsClient) GetVnetFromServerFarmPreparer(resourceGroupName string, name string, vnetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetVnetFromServerFarmSender sends the GetVnetFromServerFarm request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) GetVnetFromServerFarmSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNotFound) + return autorest.SendWithSender(client, req) } // GetVnetFromServerFarmResponder handles the response to the GetVnetFromServerFarm request. The method always @@ -1005,7 +997,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1016,21 +1008,21 @@ // // resourceGroupName is name of resource group name is name of App Service // Plan -func (client ServerFarmsClient) GetVnetsForServerFarm(resourceGroupName string, name string) (result VnetInfoList, ae error) { +func (client ServerFarmsClient) GetVnetsForServerFarm(resourceGroupName string, name string) (result ListVnetInfo, err error) { req, err := client.GetVnetsForServerFarmPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetVnetsForServerFarm", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetVnetsForServerFarm", nil, "Failure preparing request") } resp, err := client.GetVnetsForServerFarmSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetVnetsForServerFarm", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetVnetsForServerFarm", resp, "Failure sending request") } result, err = client.GetVnetsForServerFarmResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "GetVnetsForServerFarm", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "GetVnetsForServerFarm", resp, "Failure responding to request") } return @@ -1039,37 +1031,36 @@ // GetVnetsForServerFarmPreparer prepares the GetVnetsForServerFarm request. func (client ServerFarmsClient) GetVnetsForServerFarmPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetVnetsForServerFarmSender sends the GetVnetsForServerFarm request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) GetVnetsForServerFarmSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetVnetsForServerFarmResponder handles the response to the GetVnetsForServerFarm request. The method always // closes the http.Response Body. -func (client ServerFarmsClient) GetVnetsForServerFarmResponder(resp *http.Response) (result VnetInfoList, err error) { +func (client ServerFarmsClient) GetVnetsForServerFarmResponder(resp *http.Response) (result ListVnetInfo, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1079,22 +1070,22 @@ // RebootWorkerForServerFarm sends the reboot worker for server farm request. // // resourceGroupName is name of resource group name is name of server farm -// workerName is name of worker machine, typically IP address -func (client ServerFarmsClient) RebootWorkerForServerFarm(resourceGroupName string, name string, workerName string) (result ObjectSet, ae error) { +// workerName is name of worker machine, typically starts with RD +func (client ServerFarmsClient) RebootWorkerForServerFarm(resourceGroupName string, name string, workerName string) (result SetObject, err error) { req, err := client.RebootWorkerForServerFarmPreparer(resourceGroupName, name, workerName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "RebootWorkerForServerFarm", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "RebootWorkerForServerFarm", nil, "Failure preparing request") } resp, err := client.RebootWorkerForServerFarmSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "RebootWorkerForServerFarm", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "RebootWorkerForServerFarm", resp, "Failure sending request") } result, err = client.RebootWorkerForServerFarmResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "RebootWorkerForServerFarm", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "RebootWorkerForServerFarm", resp, "Failure responding to request") } return @@ -1103,38 +1094,37 @@ // RebootWorkerForServerFarmPreparer prepares the RebootWorkerForServerFarm request. func (client ServerFarmsClient) RebootWorkerForServerFarmPreparer(resourceGroupName string, name string, workerName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "workerName": url.QueryEscape(workerName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "workerName": autorest.Encode("path", workerName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/workers/{workerName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/workers/{workerName}/reboot", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // RebootWorkerForServerFarmSender sends the RebootWorkerForServerFarm request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) RebootWorkerForServerFarmSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // RebootWorkerForServerFarmResponder handles the response to the RebootWorkerForServerFarm request. The method always // closes the http.Response Body. -func (client ServerFarmsClient) RebootWorkerForServerFarmResponder(resp *http.Response) (result ObjectSet, err error) { +func (client ServerFarmsClient) RebootWorkerForServerFarmResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1147,21 +1137,21 @@ // Plan softRestart is soft restart applies the configuration settings and // restarts the apps if necessary. Hard restart always restarts and // reprovisions the apps -func (client ServerFarmsClient) RestartSitesForServerFarm(resourceGroupName string, name string, softRestart *bool) (result ObjectSet, ae error) { +func (client ServerFarmsClient) RestartSitesForServerFarm(resourceGroupName string, name string, softRestart *bool) (result SetObject, err error) { req, err := client.RestartSitesForServerFarmPreparer(resourceGroupName, name, softRestart) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "RestartSitesForServerFarm", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "RestartSitesForServerFarm", nil, "Failure preparing request") } resp, err := client.RestartSitesForServerFarmSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "RestartSitesForServerFarm", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "RestartSitesForServerFarm", resp, "Failure sending request") } result, err = client.RestartSitesForServerFarmResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "RestartSitesForServerFarm", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "RestartSitesForServerFarm", resp, "Failure responding to request") } return @@ -1170,40 +1160,39 @@ // RestartSitesForServerFarmPreparer prepares the RestartSitesForServerFarm request. func (client ServerFarmsClient) RestartSitesForServerFarmPreparer(resourceGroupName string, name string, softRestart *bool) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if softRestart != nil { - queryParameters["softRestart"] = softRestart + queryParameters["softRestart"] = autorest.Encode("query", *softRestart) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/restartSites"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/restartSites", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // RestartSitesForServerFarmSender sends the RestartSitesForServerFarm request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) RestartSitesForServerFarmSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // RestartSitesForServerFarmResponder handles the response to the RestartSitesForServerFarm request. The method always // closes the http.Response Body. -func (client ServerFarmsClient) RestartSitesForServerFarmResponder(resp *http.Response) (result ObjectSet, err error) { +func (client ServerFarmsClient) RestartSitesForServerFarmResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1217,21 +1206,21 @@ // Plan vnetName is the name of the virtual network gatewayName is the name // of the gateway. Only 'primary' is supported. connectionEnvelope is the // gateway entity. -func (client ServerFarmsClient) UpdateServerFarmVnetGateway(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway) (result VnetGateway, ae error) { +func (client ServerFarmsClient) UpdateServerFarmVnetGateway(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway) (result VnetGateway, err error) { req, err := client.UpdateServerFarmVnetGatewayPreparer(resourceGroupName, name, vnetName, gatewayName, connectionEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "UpdateServerFarmVnetGateway", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "UpdateServerFarmVnetGateway", nil, "Failure preparing request") } resp, err := client.UpdateServerFarmVnetGatewaySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "UpdateServerFarmVnetGateway", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "UpdateServerFarmVnetGateway", resp, "Failure sending request") } result, err = client.UpdateServerFarmVnetGatewayResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "UpdateServerFarmVnetGateway", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "UpdateServerFarmVnetGateway", resp, "Failure responding to request") } return @@ -1240,31 +1229,31 @@ // UpdateServerFarmVnetGatewayPreparer prepares the UpdateServerFarmVnetGateway request. func (client ServerFarmsClient) UpdateServerFarmVnetGatewayPreparer(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway) (*http.Request, error) { pathParameters := map[string]interface{}{ - "gatewayName": url.QueryEscape(gatewayName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "gatewayName": autorest.Encode("path", gatewayName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateServerFarmVnetGatewaySender sends the UpdateServerFarmVnetGateway request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) UpdateServerFarmVnetGatewaySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateServerFarmVnetGatewayResponder handles the response to the UpdateServerFarmVnetGateway request. The method always @@ -1273,7 +1262,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1285,21 +1274,21 @@ // resourceGroupName is name of resource group name is name of App Service // Plan vnetName is name of virtual network routeName is name of the virtual // network route route is the route object -func (client ServerFarmsClient) UpdateVnetRoute(resourceGroupName string, name string, vnetName string, routeName string, route VnetRoute) (result VnetRoute, ae error) { +func (client ServerFarmsClient) UpdateVnetRoute(resourceGroupName string, name string, vnetName string, routeName string, route VnetRoute) (result VnetRoute, err error) { req, err := client.UpdateVnetRoutePreparer(resourceGroupName, name, vnetName, routeName, route) if err != nil { - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "UpdateVnetRoute", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "UpdateVnetRoute", nil, "Failure preparing request") } resp, err := client.UpdateVnetRouteSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/ServerFarmsClient", "UpdateVnetRoute", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.ServerFarmsClient", "UpdateVnetRoute", resp, "Failure sending request") } result, err = client.UpdateVnetRouteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/ServerFarmsClient", "UpdateVnetRoute", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.ServerFarmsClient", "UpdateVnetRoute", resp, "Failure responding to request") } return @@ -1308,31 +1297,31 @@ // UpdateVnetRoutePreparer prepares the UpdateVnetRoute request. func (client ServerFarmsClient) UpdateVnetRoutePreparer(resourceGroupName string, name string, vnetName string, routeName string, route VnetRoute) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "routeName": url.QueryEscape(routeName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/serverfarms/{name}/virtualNetworkConnections/{vnetName}/routes/{routeName}", pathParameters), autorest.WithJSON(route), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateVnetRouteSender sends the UpdateVnetRoute request. The method will close the // http.Response Body if it receives an error. func (client ServerFarmsClient) UpdateVnetRouteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusBadRequest, http.StatusNotFound) + return autorest.SendWithSender(client, req) } // UpdateVnetRouteResponder handles the response to the UpdateVnetRoute request. The method always @@ -1341,7 +1330,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest, http.StatusNotFound), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest, http.StatusNotFound), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/sites.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/sites.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/sites.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/sites.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // SitesClient is the use these APIs to manage Azure Websites resources @@ -48,21 +48,21 @@ // AddSitePremierAddOn sends the add site premier add on request. // -func (client SitesClient) AddSitePremierAddOn(resourceGroupName string, name string, premierAddOnName string, premierAddOn PremierAddOnRequest) (result ObjectSet, ae error) { +func (client SitesClient) AddSitePremierAddOn(resourceGroupName string, name string, premierAddOnName string, premierAddOn PremierAddOnRequest) (result SetObject, err error) { req, err := client.AddSitePremierAddOnPreparer(resourceGroupName, name, premierAddOnName, premierAddOn) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "AddSitePremierAddOn", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "AddSitePremierAddOn", nil, "Failure preparing request") } resp, err := client.AddSitePremierAddOnSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "AddSitePremierAddOn", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "AddSitePremierAddOn", resp, "Failure sending request") } result, err = client.AddSitePremierAddOnResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "AddSitePremierAddOn", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "AddSitePremierAddOn", resp, "Failure responding to request") } return @@ -71,39 +71,39 @@ // AddSitePremierAddOnPreparer prepares the AddSitePremierAddOn request. func (client SitesClient) AddSitePremierAddOnPreparer(resourceGroupName string, name string, premierAddOnName string, premierAddOn PremierAddOnRequest) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "premierAddOnName": url.QueryEscape(premierAddOnName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "premierAddOnName": autorest.Encode("path", premierAddOnName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/premieraddons/{premierAddOnName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/premieraddons/{premierAddOnName}", pathParameters), autorest.WithJSON(premierAddOn), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // AddSitePremierAddOnSender sends the AddSitePremierAddOn request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) AddSitePremierAddOnSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // AddSitePremierAddOnResponder handles the response to the AddSitePremierAddOn request. The method always // closes the http.Response Body. -func (client SitesClient) AddSitePremierAddOnResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) AddSitePremierAddOnResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -112,21 +112,21 @@ // AddSitePremierAddOnSlot sends the add site premier add on slot request. // -func (client SitesClient) AddSitePremierAddOnSlot(resourceGroupName string, name string, premierAddOnName string, premierAddOn PremierAddOnRequest, slot string) (result ObjectSet, ae error) { +func (client SitesClient) AddSitePremierAddOnSlot(resourceGroupName string, name string, premierAddOnName string, premierAddOn PremierAddOnRequest, slot string) (result SetObject, err error) { req, err := client.AddSitePremierAddOnSlotPreparer(resourceGroupName, name, premierAddOnName, premierAddOn, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "AddSitePremierAddOnSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "AddSitePremierAddOnSlot", nil, "Failure preparing request") } resp, err := client.AddSitePremierAddOnSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "AddSitePremierAddOnSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "AddSitePremierAddOnSlot", resp, "Failure sending request") } result, err = client.AddSitePremierAddOnSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "AddSitePremierAddOnSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "AddSitePremierAddOnSlot", resp, "Failure responding to request") } return @@ -135,40 +135,40 @@ // AddSitePremierAddOnSlotPreparer prepares the AddSitePremierAddOnSlot request. func (client SitesClient) AddSitePremierAddOnSlotPreparer(resourceGroupName string, name string, premierAddOnName string, premierAddOn PremierAddOnRequest, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "premierAddOnName": url.QueryEscape(premierAddOnName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "premierAddOnName": autorest.Encode("path", premierAddOnName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/premieraddons/{premierAddOnName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/premieraddons/{premierAddOnName}", pathParameters), autorest.WithJSON(premierAddOn), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // AddSitePremierAddOnSlotSender sends the AddSitePremierAddOnSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) AddSitePremierAddOnSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // AddSitePremierAddOnSlotResponder handles the response to the AddSitePremierAddOnSlot request. The method always // closes the http.Response Body. -func (client SitesClient) AddSitePremierAddOnSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) AddSitePremierAddOnSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -182,21 +182,21 @@ // Settings from that slot will be applied on the source slot slot is name of // the source slot. Settings from the target slot will be applied onto this // slot -func (client SitesClient) ApplySlotConfigSlot(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, slot string) (result ObjectSet, ae error) { +func (client SitesClient) ApplySlotConfigSlot(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, slot string) (result SetObject, err error) { req, err := client.ApplySlotConfigSlotPreparer(resourceGroupName, name, slotSwapEntity, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ApplySlotConfigSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ApplySlotConfigSlot", nil, "Failure preparing request") } resp, err := client.ApplySlotConfigSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ApplySlotConfigSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ApplySlotConfigSlot", resp, "Failure sending request") } result, err = client.ApplySlotConfigSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ApplySlotConfigSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ApplySlotConfigSlot", resp, "Failure responding to request") } return @@ -205,39 +205,39 @@ // ApplySlotConfigSlotPreparer prepares the ApplySlotConfigSlot request. func (client SitesClient) ApplySlotConfigSlotPreparer(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/applySlotConfig"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/applySlotConfig", pathParameters), autorest.WithJSON(slotSwapEntity), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ApplySlotConfigSlotSender sends the ApplySlotConfigSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ApplySlotConfigSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ApplySlotConfigSlotResponder handles the response to the ApplySlotConfigSlot request. The method always // closes the http.Response Body. -func (client SitesClient) ApplySlotConfigSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) ApplySlotConfigSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -250,21 +250,21 @@ // resourceGroupName is name of resource group name is name of web app // slotSwapEntity is request body that contains the target slot name. // Settings from that slot will be applied on the source slot -func (client SitesClient) ApplySlotConfigToProduction(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity) (result ObjectSet, ae error) { +func (client SitesClient) ApplySlotConfigToProduction(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity) (result SetObject, err error) { req, err := client.ApplySlotConfigToProductionPreparer(resourceGroupName, name, slotSwapEntity) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ApplySlotConfigToProduction", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ApplySlotConfigToProduction", nil, "Failure preparing request") } resp, err := client.ApplySlotConfigToProductionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ApplySlotConfigToProduction", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ApplySlotConfigToProduction", resp, "Failure sending request") } result, err = client.ApplySlotConfigToProductionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ApplySlotConfigToProduction", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ApplySlotConfigToProduction", resp, "Failure responding to request") } return @@ -273,38 +273,38 @@ // ApplySlotConfigToProductionPreparer prepares the ApplySlotConfigToProduction request. func (client SitesClient) ApplySlotConfigToProductionPreparer(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/applySlotConfig"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/applySlotConfig", pathParameters), autorest.WithJSON(slotSwapEntity), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ApplySlotConfigToProductionSender sends the ApplySlotConfigToProduction request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ApplySlotConfigToProductionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ApplySlotConfigToProductionResponder handles the response to the ApplySlotConfigToProduction request. The method always // closes the http.Response Body. -func (client SitesClient) ApplySlotConfigToProductionResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) ApplySlotConfigToProductionResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -315,21 +315,21 @@ // // resourceGroupName is name of resource group name is name of web app request // is information on backup request -func (client SitesClient) BackupSite(resourceGroupName string, name string, request BackupRequest) (result BackupItem, ae error) { +func (client SitesClient) BackupSite(resourceGroupName string, name string, request BackupRequest) (result BackupItem, err error) { req, err := client.BackupSitePreparer(resourceGroupName, name, request) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "BackupSite", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "BackupSite", nil, "Failure preparing request") } resp, err := client.BackupSiteSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "BackupSite", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "BackupSite", resp, "Failure sending request") } result, err = client.BackupSiteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "BackupSite", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "BackupSite", resp, "Failure responding to request") } return @@ -338,29 +338,29 @@ // BackupSitePreparer prepares the BackupSite request. func (client SitesClient) BackupSitePreparer(resourceGroupName string, name string, request BackupRequest) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backup"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backup", pathParameters), autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // BackupSiteSender sends the BackupSite request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) BackupSiteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // BackupSiteResponder handles the response to the BackupSite request. The method always @@ -369,213 +369,357 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// BackupSiteDeprecated sends the backup site deprecated request. +// BackupSiteSlot sends the backup site slot request. // // resourceGroupName is name of resource group name is name of web app request -// is information on backup request -func (client SitesClient) BackupSiteDeprecated(resourceGroupName string, name string, request BackupRequest) (result BackupItem, ae error) { - req, err := client.BackupSiteDeprecatedPreparer(resourceGroupName, name, request) +// is information on backup request slot is name of web app slot. If not +// specified then will default to production slot. +func (client SitesClient) BackupSiteSlot(resourceGroupName string, name string, request BackupRequest, slot string) (result BackupItem, err error) { + req, err := client.BackupSiteSlotPreparer(resourceGroupName, name, request, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "BackupSiteDeprecated", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "BackupSiteSlot", nil, "Failure preparing request") } - resp, err := client.BackupSiteDeprecatedSender(req) + resp, err := client.BackupSiteSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "BackupSiteDeprecated", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "BackupSiteSlot", resp, "Failure sending request") } - result, err = client.BackupSiteDeprecatedResponder(resp) + result, err = client.BackupSiteSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "BackupSiteDeprecated", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "BackupSiteSlot", resp, "Failure responding to request") } return } -// BackupSiteDeprecatedPreparer prepares the BackupSiteDeprecated request. -func (client SitesClient) BackupSiteDeprecatedPreparer(resourceGroupName string, name string, request BackupRequest) (*http.Request, error) { +// BackupSiteSlotPreparer prepares the BackupSiteSlot request. +func (client SitesClient) BackupSiteSlotPreparer(resourceGroupName string, name string, request BackupRequest, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), - autorest.AsPut(), + autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backup"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backup", pathParameters), autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// BackupSiteDeprecatedSender sends the BackupSiteDeprecated request. The method will close the +// BackupSiteSlotSender sends the BackupSiteSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) BackupSiteDeprecatedSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) BackupSiteSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// BackupSiteDeprecatedResponder handles the response to the BackupSiteDeprecated request. The method always +// BackupSiteSlotResponder handles the response to the BackupSiteSlot request. The method always // closes the http.Response Body. -func (client SitesClient) BackupSiteDeprecatedResponder(resp *http.Response) (result BackupItem, err error) { +func (client SitesClient) BackupSiteSlotResponder(resp *http.Response) (result BackupItem, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// BackupSiteDeprecatedSlot sends the backup site deprecated slot request. +// CreateDeployment sends the create deployment request. // -// resourceGroupName is name of resource group name is name of web app request -// is information on backup request slot is name of web app slot. If not -// specified then will default to production slot. -func (client SitesClient) BackupSiteDeprecatedSlot(resourceGroupName string, name string, request BackupRequest, slot string) (result BackupItem, ae error) { - req, err := client.BackupSiteDeprecatedSlotPreparer(resourceGroupName, name, request, slot) +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment deployment is details of deployment +func (client SitesClient) CreateDeployment(resourceGroupName string, name string, id string, deployment Deployment) (result Deployment, err error) { + req, err := client.CreateDeploymentPreparer(resourceGroupName, name, id, deployment) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "BackupSiteDeprecatedSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateDeployment", nil, "Failure preparing request") } - resp, err := client.BackupSiteDeprecatedSlotSender(req) + resp, err := client.CreateDeploymentSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "BackupSiteDeprecatedSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateDeployment", resp, "Failure sending request") } - result, err = client.BackupSiteDeprecatedSlotResponder(resp) + result, err = client.CreateDeploymentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "BackupSiteDeprecatedSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateDeployment", resp, "Failure responding to request") } return } -// BackupSiteDeprecatedSlotPreparer prepares the BackupSiteDeprecatedSlot request. -func (client SitesClient) BackupSiteDeprecatedSlotPreparer(resourceGroupName string, name string, request BackupRequest, slot string) (*http.Request, error) { +// CreateDeploymentPreparer prepares the CreateDeployment request. +func (client SitesClient) CreateDeploymentPreparer(resourceGroupName string, name string, id string, deployment Deployment) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "id": autorest.Encode("path", id), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backup"), - autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/deployments/{id}", pathParameters), + autorest.WithJSON(deployment), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// BackupSiteDeprecatedSlotSender sends the BackupSiteDeprecatedSlot request. The method will close the +// CreateDeploymentSender sends the CreateDeployment request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) BackupSiteDeprecatedSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) CreateDeploymentSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// BackupSiteDeprecatedSlotResponder handles the response to the BackupSiteDeprecatedSlot request. The method always +// CreateDeploymentResponder handles the response to the CreateDeployment request. The method always // closes the http.Response Body. -func (client SitesClient) BackupSiteDeprecatedSlotResponder(resp *http.Response) (result BackupItem, err error) { +func (client SitesClient) CreateDeploymentResponder(resp *http.Response) (result Deployment, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// BackupSiteSlot sends the backup site slot request. +// CreateDeploymentSlot sends the create deployment slot request. // -// resourceGroupName is name of resource group name is name of web app request -// is information on backup request slot is name of web app slot. If not -// specified then will default to production slot. -func (client SitesClient) BackupSiteSlot(resourceGroupName string, name string, request BackupRequest, slot string) (result BackupItem, ae error) { - req, err := client.BackupSiteSlotPreparer(resourceGroupName, name, request, slot) +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment slot is name of web app slot. If not specified then +// will default to production slot. deployment is details of deployment +func (client SitesClient) CreateDeploymentSlot(resourceGroupName string, name string, id string, slot string, deployment Deployment) (result Deployment, err error) { + req, err := client.CreateDeploymentSlotPreparer(resourceGroupName, name, id, slot, deployment) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "BackupSiteSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateDeploymentSlot", nil, "Failure preparing request") } - resp, err := client.BackupSiteSlotSender(req) + resp, err := client.CreateDeploymentSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "BackupSiteSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateDeploymentSlot", resp, "Failure sending request") } - result, err = client.BackupSiteSlotResponder(resp) + result, err = client.CreateDeploymentSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "BackupSiteSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateDeploymentSlot", resp, "Failure responding to request") } return } -// BackupSiteSlotPreparer prepares the BackupSiteSlot request. -func (client SitesClient) BackupSiteSlotPreparer(resourceGroupName string, name string, request BackupRequest, slot string) (*http.Request, error) { +// CreateDeploymentSlotPreparer prepares the CreateDeploymentSlot request. +func (client SitesClient) CreateDeploymentSlotPreparer(resourceGroupName string, name string, id string, slot string, deployment Deployment) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "id": autorest.Encode("path", id), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), - autorest.AsPost(), + autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backup"), - autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/deployments/{id}", pathParameters), + autorest.WithJSON(deployment), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// BackupSiteSlotSender sends the BackupSiteSlot request. The method will close the +// CreateDeploymentSlotSender sends the CreateDeploymentSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) BackupSiteSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) CreateDeploymentSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// BackupSiteSlotResponder handles the response to the BackupSiteSlot request. The method always +// CreateDeploymentSlotResponder handles the response to the CreateDeploymentSlot request. The method always // closes the http.Response Body. -func (client SitesClient) BackupSiteSlotResponder(resp *http.Response) (result BackupItem, err error) { +func (client SitesClient) CreateDeploymentSlotResponder(resp *http.Response) (result Deployment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateInstanceDeployment sends the create instance deployment request. +// +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment instanceID is id of web app instance deployment is +// details of deployment +func (client SitesClient) CreateInstanceDeployment(resourceGroupName string, name string, id string, instanceID string, deployment Deployment) (result Deployment, err error) { + req, err := client.CreateInstanceDeploymentPreparer(resourceGroupName, name, id, instanceID, deployment) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateInstanceDeployment", nil, "Failure preparing request") + } + + resp, err := client.CreateInstanceDeploymentSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateInstanceDeployment", resp, "Failure sending request") + } + + result, err = client.CreateInstanceDeploymentResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateInstanceDeployment", resp, "Failure responding to request") + } + + return +} + +// CreateInstanceDeploymentPreparer prepares the CreateInstanceDeployment request. +func (client SitesClient) CreateInstanceDeploymentPreparer(resourceGroupName string, name string, id string, instanceID string, deployment Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "id": autorest.Encode("path", id), + "instanceId": autorest.Encode("path", instanceID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/instances/{instanceId}/deployments/{id}", pathParameters), + autorest.WithJSON(deployment), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateInstanceDeploymentSender sends the CreateInstanceDeployment request. The method will close the +// http.Response Body if it receives an error. +func (client SitesClient) CreateInstanceDeploymentSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateInstanceDeploymentResponder handles the response to the CreateInstanceDeployment request. The method always +// closes the http.Response Body. +func (client SitesClient) CreateInstanceDeploymentResponder(resp *http.Response) (result Deployment, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateInstanceDeploymentSlot sends the create instance deployment slot +// request. +// +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment slot is name of web app slot. If not specified then +// will default to production slot. instanceID is id of web app instance +// deployment is details of deployment +func (client SitesClient) CreateInstanceDeploymentSlot(resourceGroupName string, name string, id string, slot string, instanceID string, deployment Deployment) (result Deployment, err error) { + req, err := client.CreateInstanceDeploymentSlotPreparer(resourceGroupName, name, id, slot, instanceID, deployment) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateInstanceDeploymentSlot", nil, "Failure preparing request") + } + + resp, err := client.CreateInstanceDeploymentSlotSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateInstanceDeploymentSlot", resp, "Failure sending request") + } + + result, err = client.CreateInstanceDeploymentSlotResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateInstanceDeploymentSlot", resp, "Failure responding to request") + } + + return +} + +// CreateInstanceDeploymentSlotPreparer prepares the CreateInstanceDeploymentSlot request. +func (client SitesClient) CreateInstanceDeploymentSlotPreparer(resourceGroupName string, name string, id string, slot string, instanceID string, deployment Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "id": autorest.Encode("path", id), + "instanceId": autorest.Encode("path", instanceID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/instances/{instanceId}/deployments/{id}", pathParameters), + autorest.WithJSON(deployment), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateInstanceDeploymentSlotSender sends the CreateInstanceDeploymentSlot request. The method will close the +// http.Response Body if it receives an error. +func (client SitesClient) CreateInstanceDeploymentSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateInstanceDeploymentSlotResponder handles the response to the CreateInstanceDeploymentSlot request. The method always +// closes the http.Response Body. +func (client SitesClient) CreateInstanceDeploymentSlotResponder(resp *http.Response) (result Deployment, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// CreateOrUpdateSite sends the create or update site request. +// CreateOrUpdateSite sends the create or update site request. This method may +// poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is name of the resource group name is name of the web app // siteEnvelope is details of web app if it exists already @@ -586,76 +730,77 @@ // not verified. forceDNSRegistration is if true, web app hostname is force // registered with DNS ttlInSeconds is time to live in seconds for web app's // default domain name -func (client SitesClient) CreateOrUpdateSite(resourceGroupName string, name string, siteEnvelope Site, skipDNSRegistration string, skipCustomDomainVerification string, forceDNSRegistration string, ttlInSeconds string) (result Site, ae error) { - req, err := client.CreateOrUpdateSitePreparer(resourceGroupName, name, siteEnvelope, skipDNSRegistration, skipCustomDomainVerification, forceDNSRegistration, ttlInSeconds) +func (client SitesClient) CreateOrUpdateSite(resourceGroupName string, name string, siteEnvelope Site, skipDNSRegistration string, skipCustomDomainVerification string, forceDNSRegistration string, ttlInSeconds string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateSitePreparer(resourceGroupName, name, siteEnvelope, skipDNSRegistration, skipCustomDomainVerification, forceDNSRegistration, ttlInSeconds, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSite", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSite", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSite", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSite", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSite", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSite", resp, "Failure responding to request") } return } // CreateOrUpdateSitePreparer prepares the CreateOrUpdateSite request. -func (client SitesClient) CreateOrUpdateSitePreparer(resourceGroupName string, name string, siteEnvelope Site, skipDNSRegistration string, skipCustomDomainVerification string, forceDNSRegistration string, ttlInSeconds string) (*http.Request, error) { +func (client SitesClient) CreateOrUpdateSitePreparer(resourceGroupName string, name string, siteEnvelope Site, skipDNSRegistration string, skipCustomDomainVerification string, forceDNSRegistration string, ttlInSeconds string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(skipDNSRegistration) > 0 { - queryParameters["skipDnsRegistration"] = skipDNSRegistration + queryParameters["skipDnsRegistration"] = autorest.Encode("query", skipDNSRegistration) } if len(skipCustomDomainVerification) > 0 { - queryParameters["skipCustomDomainVerification"] = skipCustomDomainVerification + queryParameters["skipCustomDomainVerification"] = autorest.Encode("query", skipCustomDomainVerification) } if len(forceDNSRegistration) > 0 { - queryParameters["forceDnsRegistration"] = forceDNSRegistration + queryParameters["forceDnsRegistration"] = autorest.Encode("query", forceDNSRegistration) } if len(ttlInSeconds) > 0 { - queryParameters["ttlInSeconds"] = ttlInSeconds + queryParameters["ttlInSeconds"] = autorest.Encode("query", ttlInSeconds) } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}", pathParameters), autorest.WithJSON(siteEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSiteSender sends the CreateOrUpdateSite request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateSiteResponder handles the response to the CreateOrUpdateSite request. The method always // closes the http.Response Body. -func (client SitesClient) CreateOrUpdateSiteResponder(resp *http.Response) (result Site, err error) { +func (client SitesClient) CreateOrUpdateSiteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } @@ -664,21 +809,21 @@ // resourceGroupName is name of resource group name is name of web app // siteConfig is request body that contains the configuraiton setting for the // web app -func (client SitesClient) CreateOrUpdateSiteConfig(resourceGroupName string, name string, siteConfig SiteConfig) (result SiteConfig, ae error) { +func (client SitesClient) CreateOrUpdateSiteConfig(resourceGroupName string, name string, siteConfig SiteConfig) (result SiteConfig, err error) { req, err := client.CreateOrUpdateSiteConfigPreparer(resourceGroupName, name, siteConfig) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteConfig", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteConfig", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteConfigSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteConfig", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteConfig", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteConfigResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteConfig", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteConfig", resp, "Failure responding to request") } return @@ -687,29 +832,29 @@ // CreateOrUpdateSiteConfigPreparer prepares the CreateOrUpdateSiteConfig request. func (client SitesClient) CreateOrUpdateSiteConfigPreparer(resourceGroupName string, name string, siteConfig SiteConfig) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/web"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/web", pathParameters), autorest.WithJSON(siteConfig), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteConfigSender sends the CreateOrUpdateSiteConfig request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteConfigSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteConfigResponder handles the response to the CreateOrUpdateSiteConfig request. The method always @@ -718,7 +863,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -732,21 +877,21 @@ // siteConfig is request body that contains the configuraiton setting for the // web app slot is name of web app slot. If not specified then will default // to production slot. -func (client SitesClient) CreateOrUpdateSiteConfigSlot(resourceGroupName string, name string, siteConfig SiteConfig, slot string) (result SiteConfig, ae error) { +func (client SitesClient) CreateOrUpdateSiteConfigSlot(resourceGroupName string, name string, siteConfig SiteConfig, slot string) (result SiteConfig, err error) { req, err := client.CreateOrUpdateSiteConfigSlotPreparer(resourceGroupName, name, siteConfig, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteConfigSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteConfigSlot", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteConfigSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteConfigSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteConfigSlot", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteConfigSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteConfigSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteConfigSlot", resp, "Failure responding to request") } return @@ -755,30 +900,30 @@ // CreateOrUpdateSiteConfigSlotPreparer prepares the CreateOrUpdateSiteConfigSlot request. func (client SitesClient) CreateOrUpdateSiteConfigSlotPreparer(resourceGroupName string, name string, siteConfig SiteConfig, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/web"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/web", pathParameters), autorest.WithJSON(siteConfig), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteConfigSlotSender sends the CreateOrUpdateSiteConfigSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteConfigSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteConfigSlotResponder handles the response to the CreateOrUpdateSiteConfigSlot request. The method always @@ -787,7 +932,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -799,21 +944,21 @@ // // resourceGroupName is name of resource group name is name of web app // hostName is name of host hostNameBinding is host name binding information -func (client SitesClient) CreateOrUpdateSiteHostNameBinding(resourceGroupName string, name string, hostName string, hostNameBinding HostNameBinding) (result HostNameBinding, ae error) { +func (client SitesClient) CreateOrUpdateSiteHostNameBinding(resourceGroupName string, name string, hostName string, hostNameBinding HostNameBinding) (result HostNameBinding, err error) { req, err := client.CreateOrUpdateSiteHostNameBindingPreparer(resourceGroupName, name, hostName, hostNameBinding) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteHostNameBinding", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteHostNameBinding", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteHostNameBindingSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteHostNameBinding", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteHostNameBinding", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteHostNameBindingResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteHostNameBinding", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteHostNameBinding", resp, "Failure responding to request") } return @@ -822,30 +967,30 @@ // CreateOrUpdateSiteHostNameBindingPreparer prepares the CreateOrUpdateSiteHostNameBinding request. func (client SitesClient) CreateOrUpdateSiteHostNameBindingPreparer(resourceGroupName string, name string, hostName string, hostNameBinding HostNameBinding) (*http.Request, error) { pathParameters := map[string]interface{}{ - "hostName": url.QueryEscape(hostName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "hostName": autorest.Encode("path", hostName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostNameBindings/{hostName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostNameBindings/{hostName}", pathParameters), autorest.WithJSON(hostNameBinding), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteHostNameBindingSender sends the CreateOrUpdateSiteHostNameBinding request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteHostNameBindingSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteHostNameBindingResponder handles the response to the CreateOrUpdateSiteHostNameBinding request. The method always @@ -854,7 +999,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -868,21 +1013,21 @@ // hostName is name of host hostNameBinding is host name binding information // slot is name of web app slot. If not specified then will default to // production slot. -func (client SitesClient) CreateOrUpdateSiteHostNameBindingSlot(resourceGroupName string, name string, hostName string, hostNameBinding HostNameBinding, slot string) (result HostNameBinding, ae error) { +func (client SitesClient) CreateOrUpdateSiteHostNameBindingSlot(resourceGroupName string, name string, hostName string, hostNameBinding HostNameBinding, slot string) (result HostNameBinding, err error) { req, err := client.CreateOrUpdateSiteHostNameBindingSlotPreparer(resourceGroupName, name, hostName, hostNameBinding, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteHostNameBindingSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteHostNameBindingSlot", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteHostNameBindingSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteHostNameBindingSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteHostNameBindingSlot", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteHostNameBindingSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteHostNameBindingSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteHostNameBindingSlot", resp, "Failure responding to request") } return @@ -891,31 +1036,31 @@ // CreateOrUpdateSiteHostNameBindingSlotPreparer prepares the CreateOrUpdateSiteHostNameBindingSlot request. func (client SitesClient) CreateOrUpdateSiteHostNameBindingSlotPreparer(resourceGroupName string, name string, hostName string, hostNameBinding HostNameBinding, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "hostName": url.QueryEscape(hostName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "hostName": autorest.Encode("path", hostName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hostNameBindings/{hostName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hostNameBindings/{hostName}", pathParameters), autorest.WithJSON(hostNameBinding), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteHostNameBindingSlotSender sends the CreateOrUpdateSiteHostNameBindingSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteHostNameBindingSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteHostNameBindingSlotResponder handles the response to the CreateOrUpdateSiteHostNameBindingSlot request. The method always @@ -924,7 +1069,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -937,21 +1082,21 @@ // resourceGroupName is the resource group name name is the name of the web // app entityName is the name by which the Hybrid Connection is identified // connectionEnvelope is the details of the Hybrid Connection -func (client SitesClient) CreateOrUpdateSiteRelayServiceConnection(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity) (result RelayServiceConnectionEntity, ae error) { +func (client SitesClient) CreateOrUpdateSiteRelayServiceConnection(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity) (result RelayServiceConnectionEntity, err error) { req, err := client.CreateOrUpdateSiteRelayServiceConnectionPreparer(resourceGroupName, name, entityName, connectionEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteRelayServiceConnection", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteRelayServiceConnection", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteRelayServiceConnectionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteRelayServiceConnection", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteRelayServiceConnection", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteRelayServiceConnectionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteRelayServiceConnection", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteRelayServiceConnection", resp, "Failure responding to request") } return @@ -960,30 +1105,30 @@ // CreateOrUpdateSiteRelayServiceConnectionPreparer prepares the CreateOrUpdateSiteRelayServiceConnection request. func (client SitesClient) CreateOrUpdateSiteRelayServiceConnectionPreparer(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity) (*http.Request, error) { pathParameters := map[string]interface{}{ - "entityName": url.QueryEscape(entityName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "entityName": autorest.Encode("path", entityName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hybridconnection/{entityName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hybridconnection/{entityName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteRelayServiceConnectionSender sends the CreateOrUpdateSiteRelayServiceConnection request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteRelayServiceConnectionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteRelayServiceConnectionResponder handles the response to the CreateOrUpdateSiteRelayServiceConnection request. The method always @@ -992,7 +1137,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1006,21 +1151,21 @@ // app entityName is the name by which the Hybrid Connection is identified // connectionEnvelope is the details of the Hybrid Connection slot is the // name of the slot for the web app. -func (client SitesClient) CreateOrUpdateSiteRelayServiceConnectionSlot(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity, slot string) (result RelayServiceConnectionEntity, ae error) { +func (client SitesClient) CreateOrUpdateSiteRelayServiceConnectionSlot(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity, slot string) (result RelayServiceConnectionEntity, err error) { req, err := client.CreateOrUpdateSiteRelayServiceConnectionSlotPreparer(resourceGroupName, name, entityName, connectionEnvelope, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteRelayServiceConnectionSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteRelayServiceConnectionSlot", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteRelayServiceConnectionSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteRelayServiceConnectionSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteRelayServiceConnectionSlot", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteRelayServiceConnectionSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteRelayServiceConnectionSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteRelayServiceConnectionSlot", resp, "Failure responding to request") } return @@ -1029,31 +1174,31 @@ // CreateOrUpdateSiteRelayServiceConnectionSlotPreparer prepares the CreateOrUpdateSiteRelayServiceConnectionSlot request. func (client SitesClient) CreateOrUpdateSiteRelayServiceConnectionSlotPreparer(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "entityName": url.QueryEscape(entityName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "entityName": autorest.Encode("path", entityName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hybridconnection/{entityName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hybridconnection/{entityName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteRelayServiceConnectionSlotSender sends the CreateOrUpdateSiteRelayServiceConnectionSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteRelayServiceConnectionSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteRelayServiceConnectionSlotResponder handles the response to the CreateOrUpdateSiteRelayServiceConnectionSlot request. The method always @@ -1062,14 +1207,17 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// CreateOrUpdateSiteSlot sends the create or update site slot request. +// CreateOrUpdateSiteSlot sends the create or update site slot request. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. // // resourceGroupName is name of the resource group name is name of the web app // siteEnvelope is details of web app if it exists already slot is name of @@ -1081,77 +1229,78 @@ // not verified. forceDNSRegistration is if true, web app hostname is force // registered with DNS ttlInSeconds is time to live in seconds for web app's // default domain name -func (client SitesClient) CreateOrUpdateSiteSlot(resourceGroupName string, name string, siteEnvelope Site, slot string, skipDNSRegistration string, skipCustomDomainVerification string, forceDNSRegistration string, ttlInSeconds string) (result Site, ae error) { - req, err := client.CreateOrUpdateSiteSlotPreparer(resourceGroupName, name, siteEnvelope, slot, skipDNSRegistration, skipCustomDomainVerification, forceDNSRegistration, ttlInSeconds) +func (client SitesClient) CreateOrUpdateSiteSlot(resourceGroupName string, name string, siteEnvelope Site, slot string, skipDNSRegistration string, skipCustomDomainVerification string, forceDNSRegistration string, ttlInSeconds string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdateSiteSlotPreparer(resourceGroupName, name, siteEnvelope, slot, skipDNSRegistration, skipCustomDomainVerification, forceDNSRegistration, ttlInSeconds, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteSlot", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteSlotSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteSlot", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteSlot", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteSlot", resp, "Failure responding to request") } return } // CreateOrUpdateSiteSlotPreparer prepares the CreateOrUpdateSiteSlot request. -func (client SitesClient) CreateOrUpdateSiteSlotPreparer(resourceGroupName string, name string, siteEnvelope Site, slot string, skipDNSRegistration string, skipCustomDomainVerification string, forceDNSRegistration string, ttlInSeconds string) (*http.Request, error) { +func (client SitesClient) CreateOrUpdateSiteSlotPreparer(resourceGroupName string, name string, siteEnvelope Site, slot string, skipDNSRegistration string, skipCustomDomainVerification string, forceDNSRegistration string, ttlInSeconds string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(skipDNSRegistration) > 0 { - queryParameters["skipDnsRegistration"] = skipDNSRegistration + queryParameters["skipDnsRegistration"] = autorest.Encode("query", skipDNSRegistration) } if len(skipCustomDomainVerification) > 0 { - queryParameters["skipCustomDomainVerification"] = skipCustomDomainVerification + queryParameters["skipCustomDomainVerification"] = autorest.Encode("query", skipCustomDomainVerification) } if len(forceDNSRegistration) > 0 { - queryParameters["forceDnsRegistration"] = forceDNSRegistration + queryParameters["forceDnsRegistration"] = autorest.Encode("query", forceDNSRegistration) } if len(ttlInSeconds) > 0 { - queryParameters["ttlInSeconds"] = ttlInSeconds + queryParameters["ttlInSeconds"] = autorest.Encode("query", ttlInSeconds) } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}", pathParameters), autorest.WithJSON(siteEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // CreateOrUpdateSiteSlotSender sends the CreateOrUpdateSiteSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // CreateOrUpdateSiteSlotResponder handles the response to the CreateOrUpdateSiteSlot request. The method always // closes the http.Response Body. -func (client SitesClient) CreateOrUpdateSiteSlotResponder(resp *http.Response) (result Site, err error) { +func (client SitesClient) CreateOrUpdateSiteSlotResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } @@ -1161,21 +1310,21 @@ // resourceGroupName is name of resource group name is name of web app // siteSourceControl is request body that contains the source control // parameters -func (client SitesClient) CreateOrUpdateSiteSourceControl(resourceGroupName string, name string, siteSourceControl SiteSourceControl) (result SiteSourceControl, ae error) { +func (client SitesClient) CreateOrUpdateSiteSourceControl(resourceGroupName string, name string, siteSourceControl SiteSourceControl) (result SiteSourceControl, err error) { req, err := client.CreateOrUpdateSiteSourceControlPreparer(resourceGroupName, name, siteSourceControl) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteSourceControl", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteSourceControl", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteSourceControlSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteSourceControl", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteSourceControl", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteSourceControlResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteSourceControl", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteSourceControl", resp, "Failure responding to request") } return @@ -1184,29 +1333,29 @@ // CreateOrUpdateSiteSourceControlPreparer prepares the CreateOrUpdateSiteSourceControl request. func (client SitesClient) CreateOrUpdateSiteSourceControlPreparer(resourceGroupName string, name string, siteSourceControl SiteSourceControl) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/sourcecontrols/web"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/sourcecontrols/web", pathParameters), autorest.WithJSON(siteSourceControl), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteSourceControlSender sends the CreateOrUpdateSiteSourceControl request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteSourceControlSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteSourceControlResponder handles the response to the CreateOrUpdateSiteSourceControl request. The method always @@ -1215,7 +1364,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1229,21 +1378,21 @@ // siteSourceControl is request body that contains the source control // parameters slot is name of web app slot. If not specified then will // default to production slot. -func (client SitesClient) CreateOrUpdateSiteSourceControlSlot(resourceGroupName string, name string, siteSourceControl SiteSourceControl, slot string) (result SiteSourceControl, ae error) { +func (client SitesClient) CreateOrUpdateSiteSourceControlSlot(resourceGroupName string, name string, siteSourceControl SiteSourceControl, slot string) (result SiteSourceControl, err error) { req, err := client.CreateOrUpdateSiteSourceControlSlotPreparer(resourceGroupName, name, siteSourceControl, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteSourceControlSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteSourceControlSlot", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteSourceControlSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteSourceControlSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteSourceControlSlot", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteSourceControlSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteSourceControlSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteSourceControlSlot", resp, "Failure responding to request") } return @@ -1252,30 +1401,30 @@ // CreateOrUpdateSiteSourceControlSlotPreparer prepares the CreateOrUpdateSiteSourceControlSlot request. func (client SitesClient) CreateOrUpdateSiteSourceControlSlotPreparer(resourceGroupName string, name string, siteSourceControl SiteSourceControl, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/sourcecontrols/web"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/sourcecontrols/web", pathParameters), autorest.WithJSON(siteSourceControl), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteSourceControlSlotSender sends the CreateOrUpdateSiteSourceControlSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteSourceControlSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteSourceControlSlotResponder handles the response to the CreateOrUpdateSiteSourceControlSlot request. The method always @@ -1284,7 +1433,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1297,21 +1446,21 @@ // resourceGroupName is the resource group name name is the name of the web // app vnetName is the name of the Virtual Network connectionEnvelope is the // properties of this Virtual Network Connection -func (client SitesClient) CreateOrUpdateSiteVNETConnection(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo) (result VnetInfo, ae error) { +func (client SitesClient) CreateOrUpdateSiteVNETConnection(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo) (result VnetInfo, err error) { req, err := client.CreateOrUpdateSiteVNETConnectionPreparer(resourceGroupName, name, vnetName, connectionEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnection", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnection", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteVNETConnectionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnection", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnection", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteVNETConnectionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnection", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnection", resp, "Failure responding to request") } return @@ -1320,30 +1469,30 @@ // CreateOrUpdateSiteVNETConnectionPreparer prepares the CreateOrUpdateSiteVNETConnection request. func (client SitesClient) CreateOrUpdateSiteVNETConnectionPreparer(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteVNETConnectionSender sends the CreateOrUpdateSiteVNETConnection request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteVNETConnectionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteVNETConnectionResponder handles the response to the CreateOrUpdateSiteVNETConnection request. The method always @@ -1352,7 +1501,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1366,21 +1515,21 @@ // app vnetName is the name of the Virtual Network gatewayName is the name of // the gateway. The only gateway that exists presently is "primary" // connectionEnvelope is the properties to update this gateway with. -func (client SitesClient) CreateOrUpdateSiteVNETConnectionGateway(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway) (result VnetGateway, ae error) { +func (client SitesClient) CreateOrUpdateSiteVNETConnectionGateway(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway) (result VnetGateway, err error) { req, err := client.CreateOrUpdateSiteVNETConnectionGatewayPreparer(resourceGroupName, name, vnetName, gatewayName, connectionEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnectionGateway", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnectionGateway", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteVNETConnectionGatewaySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnectionGateway", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnectionGateway", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteVNETConnectionGatewayResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnectionGateway", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnectionGateway", resp, "Failure responding to request") } return @@ -1389,31 +1538,31 @@ // CreateOrUpdateSiteVNETConnectionGatewayPreparer prepares the CreateOrUpdateSiteVNETConnectionGateway request. func (client SitesClient) CreateOrUpdateSiteVNETConnectionGatewayPreparer(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway) (*http.Request, error) { pathParameters := map[string]interface{}{ - "gatewayName": url.QueryEscape(gatewayName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "gatewayName": autorest.Encode("path", gatewayName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteVNETConnectionGatewaySender sends the CreateOrUpdateSiteVNETConnectionGateway request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteVNETConnectionGatewaySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteVNETConnectionGatewayResponder handles the response to the CreateOrUpdateSiteVNETConnectionGateway request. The method always @@ -1422,7 +1571,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1437,21 +1586,21 @@ // the gateway. The only gateway that exists presently is "primary" // connectionEnvelope is the properties to update this gateway with. slot is // the name of the slot for this web app. -func (client SitesClient) CreateOrUpdateSiteVNETConnectionGatewaySlot(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway, slot string) (result VnetGateway, ae error) { +func (client SitesClient) CreateOrUpdateSiteVNETConnectionGatewaySlot(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway, slot string) (result VnetGateway, err error) { req, err := client.CreateOrUpdateSiteVNETConnectionGatewaySlotPreparer(resourceGroupName, name, vnetName, gatewayName, connectionEnvelope, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnectionGatewaySlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnectionGatewaySlot", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteVNETConnectionGatewaySlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnectionGatewaySlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnectionGatewaySlot", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteVNETConnectionGatewaySlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnectionGatewaySlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnectionGatewaySlot", resp, "Failure responding to request") } return @@ -1460,32 +1609,32 @@ // CreateOrUpdateSiteVNETConnectionGatewaySlotPreparer prepares the CreateOrUpdateSiteVNETConnectionGatewaySlot request. func (client SitesClient) CreateOrUpdateSiteVNETConnectionGatewaySlotPreparer(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "gatewayName": url.QueryEscape(gatewayName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "gatewayName": autorest.Encode("path", gatewayName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteVNETConnectionGatewaySlotSender sends the CreateOrUpdateSiteVNETConnectionGatewaySlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteVNETConnectionGatewaySlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteVNETConnectionGatewaySlotResponder handles the response to the CreateOrUpdateSiteVNETConnectionGatewaySlot request. The method always @@ -1494,7 +1643,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1508,21 +1657,21 @@ // app vnetName is the name of the Virtual Network connectionEnvelope is the // properties of this Virtual Network Connection slot is the name of the slot // for this web app. -func (client SitesClient) CreateOrUpdateSiteVNETConnectionSlot(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo, slot string) (result VnetInfo, ae error) { +func (client SitesClient) CreateOrUpdateSiteVNETConnectionSlot(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo, slot string) (result VnetInfo, err error) { req, err := client.CreateOrUpdateSiteVNETConnectionSlotPreparer(resourceGroupName, name, vnetName, connectionEnvelope, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnectionSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnectionSlot", nil, "Failure preparing request") } resp, err := client.CreateOrUpdateSiteVNETConnectionSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnectionSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnectionSlot", resp, "Failure sending request") } result, err = client.CreateOrUpdateSiteVNETConnectionSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "CreateOrUpdateSiteVNETConnectionSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "CreateOrUpdateSiteVNETConnectionSlot", resp, "Failure responding to request") } return @@ -1531,31 +1680,31 @@ // CreateOrUpdateSiteVNETConnectionSlotPreparer prepares the CreateOrUpdateSiteVNETConnectionSlot request. func (client SitesClient) CreateOrUpdateSiteVNETConnectionSlotPreparer(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // CreateOrUpdateSiteVNETConnectionSlotSender sends the CreateOrUpdateSiteVNETConnectionSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) CreateOrUpdateSiteVNETConnectionSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // CreateOrUpdateSiteVNETConnectionSlotResponder handles the response to the CreateOrUpdateSiteVNETConnectionSlot request. The method always @@ -1564,7 +1713,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1575,21 +1724,21 @@ // // resourceGroupName is name of resource group name is name of web app // backupID is id of backup -func (client SitesClient) DeleteBackup(resourceGroupName string, name string, backupID string) (result BackupItem, ae error) { +func (client SitesClient) DeleteBackup(resourceGroupName string, name string, backupID string) (result BackupItem, err error) { req, err := client.DeleteBackupPreparer(resourceGroupName, name, backupID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteBackup", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteBackup", nil, "Failure preparing request") } resp, err := client.DeleteBackupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteBackup", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteBackup", resp, "Failure sending request") } result, err = client.DeleteBackupResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteBackup", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteBackup", resp, "Failure responding to request") } return @@ -1598,29 +1747,28 @@ // DeleteBackupPreparer prepares the DeleteBackup request. func (client SitesClient) DeleteBackupPreparer(resourceGroupName string, name string, backupID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "backupId": url.QueryEscape(backupID), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "backupId": autorest.Encode("path", backupID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups/{backupId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups/{backupId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteBackupSender sends the DeleteBackup request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) DeleteBackupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteBackupResponder handles the response to the DeleteBackup request. The method always @@ -1629,7 +1777,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -1641,21 +1789,21 @@ // resourceGroupName is name of resource group name is name of web app // backupID is id of backup slot is name of web app slot. If not specified // then will default to production slot. -func (client SitesClient) DeleteBackupSlot(resourceGroupName string, name string, backupID string, slot string) (result BackupItem, ae error) { +func (client SitesClient) DeleteBackupSlot(resourceGroupName string, name string, backupID string, slot string) (result BackupItem, err error) { req, err := client.DeleteBackupSlotPreparer(resourceGroupName, name, backupID, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteBackupSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteBackupSlot", nil, "Failure preparing request") } resp, err := client.DeleteBackupSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteBackupSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteBackupSlot", resp, "Failure sending request") } result, err = client.DeleteBackupSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteBackupSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteBackupSlot", resp, "Failure responding to request") } return @@ -1664,30 +1812,29 @@ // DeleteBackupSlotPreparer prepares the DeleteBackupSlot request. func (client SitesClient) DeleteBackupSlotPreparer(resourceGroupName string, name string, backupID string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "backupId": url.QueryEscape(backupID), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "backupId": autorest.Encode("path", backupID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups/{backupId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups/{backupId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteBackupSlotSender sends the DeleteBackupSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) DeleteBackupSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteBackupSlotResponder handles the response to the DeleteBackupSlot request. The method always @@ -1696,441 +1843,698 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DeleteSite sends the delete site request. +// DeleteDeployment sends the delete deployment request. // -// resourceGroupName is name of resource group name is name of web app -// deleteMetrics is if true, web app metrics are also deleted -// deleteEmptyServerFarm is if true and App Service Plan is empty after web -// app deletion, App Service Plan is also deleted skipDNSRegistration is if -// true, DNS registration is skipped deleteAllSlots is if true, all slots -// associated with web app are also deleted -func (client SitesClient) DeleteSite(resourceGroupName string, name string, deleteMetrics string, deleteEmptyServerFarm string, skipDNSRegistration string, deleteAllSlots string) (result ObjectSet, ae error) { - req, err := client.DeleteSitePreparer(resourceGroupName, name, deleteMetrics, deleteEmptyServerFarm, skipDNSRegistration, deleteAllSlots) +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment +func (client SitesClient) DeleteDeployment(resourceGroupName string, name string, id string) (result SetObject, err error) { + req, err := client.DeleteDeploymentPreparer(resourceGroupName, name, id) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSite", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteDeployment", nil, "Failure preparing request") } - resp, err := client.DeleteSiteSender(req) + resp, err := client.DeleteDeploymentSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSite", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteDeployment", resp, "Failure sending request") } - result, err = client.DeleteSiteResponder(resp) + result, err = client.DeleteDeploymentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSite", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteDeployment", resp, "Failure responding to request") } return } -// DeleteSitePreparer prepares the DeleteSite request. -func (client SitesClient) DeleteSitePreparer(resourceGroupName string, name string, deleteMetrics string, deleteEmptyServerFarm string, skipDNSRegistration string, deleteAllSlots string) (*http.Request, error) { +// DeleteDeploymentPreparer prepares the DeleteDeployment request. +func (client SitesClient) DeleteDeploymentPreparer(resourceGroupName string, name string, id string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "id": autorest.Encode("path", id), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(deleteMetrics) > 0 { - queryParameters["deleteMetrics"] = deleteMetrics - } - if len(deleteEmptyServerFarm) > 0 { - queryParameters["deleteEmptyServerFarm"] = deleteEmptyServerFarm - } - if len(skipDNSRegistration) > 0 { - queryParameters["skipDnsRegistration"] = skipDNSRegistration - } - if len(deleteAllSlots) > 0 { - queryParameters["deleteAllSlots"] = deleteAllSlots + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/deployments/{id}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DeleteSiteSender sends the DeleteSite request. The method will close the +// DeleteDeploymentSender sends the DeleteDeployment request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DeleteSiteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) DeleteDeploymentSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DeleteSiteResponder handles the response to the DeleteSite request. The method always +// DeleteDeploymentResponder handles the response to the DeleteDeployment request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSiteResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) DeleteDeploymentResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DeleteSiteHostNameBinding sends the delete site host name binding request. +// DeleteDeploymentSlot sends the delete deployment slot request. // -// resourceGroupName is name of resource group name is name of web app -// hostName is name of host -func (client SitesClient) DeleteSiteHostNameBinding(resourceGroupName string, name string, hostName string) (result ObjectSet, ae error) { - req, err := client.DeleteSiteHostNameBindingPreparer(resourceGroupName, name, hostName) +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment slot is name of web app slot. If not specified then +// will default to production slot. +func (client SitesClient) DeleteDeploymentSlot(resourceGroupName string, name string, id string, slot string) (result SetObject, err error) { + req, err := client.DeleteDeploymentSlotPreparer(resourceGroupName, name, id, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteHostNameBinding", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteDeploymentSlot", nil, "Failure preparing request") } - resp, err := client.DeleteSiteHostNameBindingSender(req) + resp, err := client.DeleteDeploymentSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteHostNameBinding", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteDeploymentSlot", resp, "Failure sending request") } - result, err = client.DeleteSiteHostNameBindingResponder(resp) + result, err = client.DeleteDeploymentSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteHostNameBinding", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteDeploymentSlot", resp, "Failure responding to request") } return } -// DeleteSiteHostNameBindingPreparer prepares the DeleteSiteHostNameBinding request. -func (client SitesClient) DeleteSiteHostNameBindingPreparer(resourceGroupName string, name string, hostName string) (*http.Request, error) { +// DeleteDeploymentSlotPreparer prepares the DeleteDeploymentSlot request. +func (client SitesClient) DeleteDeploymentSlotPreparer(resourceGroupName string, name string, id string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "hostName": url.QueryEscape(hostName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "id": autorest.Encode("path", id), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostNameBindings/{hostName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/deployments/{id}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DeleteSiteHostNameBindingSender sends the DeleteSiteHostNameBinding request. The method will close the +// DeleteDeploymentSlotSender sends the DeleteDeploymentSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DeleteSiteHostNameBindingSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) DeleteDeploymentSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DeleteSiteHostNameBindingResponder handles the response to the DeleteSiteHostNameBinding request. The method always +// DeleteDeploymentSlotResponder handles the response to the DeleteDeploymentSlot request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSiteHostNameBindingResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) DeleteDeploymentSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DeleteSiteHostNameBindingSlot sends the delete site host name binding slot -// request. +// DeleteInstanceDeployment sends the delete instance deployment request. // -// resourceGroupName is name of resource group name is name of web app slot is -// name of web app slot. If not specified then will default to production -// slot. hostName is name of host -func (client SitesClient) DeleteSiteHostNameBindingSlot(resourceGroupName string, name string, slot string, hostName string) (result ObjectSet, ae error) { - req, err := client.DeleteSiteHostNameBindingSlotPreparer(resourceGroupName, name, slot, hostName) +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment instanceID is id of web app instance +func (client SitesClient) DeleteInstanceDeployment(resourceGroupName string, name string, id string, instanceID string) (result SetObject, err error) { + req, err := client.DeleteInstanceDeploymentPreparer(resourceGroupName, name, id, instanceID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteHostNameBindingSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteInstanceDeployment", nil, "Failure preparing request") } - resp, err := client.DeleteSiteHostNameBindingSlotSender(req) + resp, err := client.DeleteInstanceDeploymentSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteHostNameBindingSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteInstanceDeployment", resp, "Failure sending request") } - result, err = client.DeleteSiteHostNameBindingSlotResponder(resp) + result, err = client.DeleteInstanceDeploymentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteHostNameBindingSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteInstanceDeployment", resp, "Failure responding to request") } return } -// DeleteSiteHostNameBindingSlotPreparer prepares the DeleteSiteHostNameBindingSlot request. -func (client SitesClient) DeleteSiteHostNameBindingSlotPreparer(resourceGroupName string, name string, slot string, hostName string) (*http.Request, error) { +// DeleteInstanceDeploymentPreparer prepares the DeleteInstanceDeployment request. +func (client SitesClient) DeleteInstanceDeploymentPreparer(resourceGroupName string, name string, id string, instanceID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "hostName": url.QueryEscape(hostName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "id": autorest.Encode("path", id), + "instanceId": autorest.Encode("path", instanceID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hostNameBindings/{hostName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/instances/{instanceId}/deployments/{id}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DeleteSiteHostNameBindingSlotSender sends the DeleteSiteHostNameBindingSlot request. The method will close the +// DeleteInstanceDeploymentSender sends the DeleteInstanceDeployment request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DeleteSiteHostNameBindingSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) DeleteInstanceDeploymentSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DeleteSiteHostNameBindingSlotResponder handles the response to the DeleteSiteHostNameBindingSlot request. The method always +// DeleteInstanceDeploymentResponder handles the response to the DeleteInstanceDeployment request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSiteHostNameBindingSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) DeleteInstanceDeploymentResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DeleteSitePremierAddOn sends the delete site premier add on request. +// DeleteInstanceDeploymentSlot sends the delete instance deployment slot +// request. // -func (client SitesClient) DeleteSitePremierAddOn(resourceGroupName string, name string, premierAddOnName string) (result ObjectSet, ae error) { - req, err := client.DeleteSitePremierAddOnPreparer(resourceGroupName, name, premierAddOnName) +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment slot is name of web app slot. If not specified then +// will default to production slot. instanceID is id of web app instance +func (client SitesClient) DeleteInstanceDeploymentSlot(resourceGroupName string, name string, id string, slot string, instanceID string) (result SetObject, err error) { + req, err := client.DeleteInstanceDeploymentSlotPreparer(resourceGroupName, name, id, slot, instanceID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSitePremierAddOn", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteInstanceDeploymentSlot", nil, "Failure preparing request") } - resp, err := client.DeleteSitePremierAddOnSender(req) + resp, err := client.DeleteInstanceDeploymentSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSitePremierAddOn", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteInstanceDeploymentSlot", resp, "Failure sending request") } - result, err = client.DeleteSitePremierAddOnResponder(resp) + result, err = client.DeleteInstanceDeploymentSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSitePremierAddOn", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteInstanceDeploymentSlot", resp, "Failure responding to request") } return } -// DeleteSitePremierAddOnPreparer prepares the DeleteSitePremierAddOn request. -func (client SitesClient) DeleteSitePremierAddOnPreparer(resourceGroupName string, name string, premierAddOnName string) (*http.Request, error) { +// DeleteInstanceDeploymentSlotPreparer prepares the DeleteInstanceDeploymentSlot request. +func (client SitesClient) DeleteInstanceDeploymentSlotPreparer(resourceGroupName string, name string, id string, slot string, instanceID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "premierAddOnName": url.QueryEscape(premierAddOnName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "id": autorest.Encode("path", id), + "instanceId": autorest.Encode("path", instanceID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/premieraddons/{premierAddOnName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/instances/{instanceId}/deployments/{id}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DeleteSitePremierAddOnSender sends the DeleteSitePremierAddOn request. The method will close the +// DeleteInstanceDeploymentSlotSender sends the DeleteInstanceDeploymentSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DeleteSitePremierAddOnSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) DeleteInstanceDeploymentSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DeleteSitePremierAddOnResponder handles the response to the DeleteSitePremierAddOn request. The method always +// DeleteInstanceDeploymentSlotResponder handles the response to the DeleteInstanceDeploymentSlot request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSitePremierAddOnResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) DeleteInstanceDeploymentSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DeleteSitePremierAddOnSlot sends the delete site premier add on slot -// request. +// DeleteSite sends the delete site request. // -func (client SitesClient) DeleteSitePremierAddOnSlot(resourceGroupName string, name string, premierAddOnName string, slot string) (result ObjectSet, ae error) { - req, err := client.DeleteSitePremierAddOnSlotPreparer(resourceGroupName, name, premierAddOnName, slot) +// resourceGroupName is name of resource group name is name of web app +// deleteMetrics is if true, web app metrics are also deleted +// deleteEmptyServerFarm is if true and App Service Plan is empty after web +// app deletion, App Service Plan is also deleted skipDNSRegistration is if +// true, DNS registration is skipped deleteAllSlots is if true, all slots +// associated with web app are also deleted +func (client SitesClient) DeleteSite(resourceGroupName string, name string, deleteMetrics string, deleteEmptyServerFarm string, skipDNSRegistration string, deleteAllSlots string) (result SetObject, err error) { + req, err := client.DeleteSitePreparer(resourceGroupName, name, deleteMetrics, deleteEmptyServerFarm, skipDNSRegistration, deleteAllSlots) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSitePremierAddOnSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSite", nil, "Failure preparing request") } - resp, err := client.DeleteSitePremierAddOnSlotSender(req) + resp, err := client.DeleteSiteSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSitePremierAddOnSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSite", resp, "Failure sending request") } - result, err = client.DeleteSitePremierAddOnSlotResponder(resp) + result, err = client.DeleteSiteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSitePremierAddOnSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSite", resp, "Failure responding to request") } return } -// DeleteSitePremierAddOnSlotPreparer prepares the DeleteSitePremierAddOnSlot request. -func (client SitesClient) DeleteSitePremierAddOnSlotPreparer(resourceGroupName string, name string, premierAddOnName string, slot string) (*http.Request, error) { +// DeleteSitePreparer prepares the DeleteSite request. +func (client SitesClient) DeleteSitePreparer(resourceGroupName string, name string, deleteMetrics string, deleteEmptyServerFarm string, skipDNSRegistration string, deleteAllSlots string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "premierAddOnName": url.QueryEscape(premierAddOnName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(deleteMetrics) > 0 { + queryParameters["deleteMetrics"] = autorest.Encode("query", deleteMetrics) + } + if len(deleteEmptyServerFarm) > 0 { + queryParameters["deleteEmptyServerFarm"] = autorest.Encode("query", deleteEmptyServerFarm) + } + if len(skipDNSRegistration) > 0 { + queryParameters["skipDnsRegistration"] = autorest.Encode("query", skipDNSRegistration) + } + if len(deleteAllSlots) > 0 { + queryParameters["deleteAllSlots"] = autorest.Encode("query", deleteAllSlots) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/premieraddons/{premierAddOnName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DeleteSitePremierAddOnSlotSender sends the DeleteSitePremierAddOnSlot request. The method will close the +// DeleteSiteSender sends the DeleteSite request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DeleteSitePremierAddOnSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) DeleteSiteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DeleteSitePremierAddOnSlotResponder handles the response to the DeleteSitePremierAddOnSlot request. The method always +// DeleteSiteResponder handles the response to the DeleteSite request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSitePremierAddOnSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) DeleteSiteResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DeleteSiteRelayServiceConnection sends the delete site relay service -// connection request. +// DeleteSiteHostNameBinding sends the delete site host name binding request. // -// resourceGroupName is the resource group name name is the name of the web -// app entityName is the name by which the Hybrid Connection is identified -func (client SitesClient) DeleteSiteRelayServiceConnection(resourceGroupName string, name string, entityName string) (result ObjectSet, ae error) { - req, err := client.DeleteSiteRelayServiceConnectionPreparer(resourceGroupName, name, entityName) +// resourceGroupName is name of resource group name is name of web app +// hostName is name of host +func (client SitesClient) DeleteSiteHostNameBinding(resourceGroupName string, name string, hostName string) (result SetObject, err error) { + req, err := client.DeleteSiteHostNameBindingPreparer(resourceGroupName, name, hostName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteRelayServiceConnection", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteHostNameBinding", nil, "Failure preparing request") } - resp, err := client.DeleteSiteRelayServiceConnectionSender(req) + resp, err := client.DeleteSiteHostNameBindingSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteRelayServiceConnection", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteHostNameBinding", resp, "Failure sending request") } - result, err = client.DeleteSiteRelayServiceConnectionResponder(resp) + result, err = client.DeleteSiteHostNameBindingResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteRelayServiceConnection", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteHostNameBinding", resp, "Failure responding to request") } return } -// DeleteSiteRelayServiceConnectionPreparer prepares the DeleteSiteRelayServiceConnection request. -func (client SitesClient) DeleteSiteRelayServiceConnectionPreparer(resourceGroupName string, name string, entityName string) (*http.Request, error) { +// DeleteSiteHostNameBindingPreparer prepares the DeleteSiteHostNameBinding request. +func (client SitesClient) DeleteSiteHostNameBindingPreparer(resourceGroupName string, name string, hostName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "entityName": url.QueryEscape(entityName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "hostName": autorest.Encode("path", hostName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hybridconnection/{entityName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostNameBindings/{hostName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DeleteSiteRelayServiceConnectionSender sends the DeleteSiteRelayServiceConnection request. The method will close the +// DeleteSiteHostNameBindingSender sends the DeleteSiteHostNameBinding request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DeleteSiteRelayServiceConnectionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) DeleteSiteHostNameBindingSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DeleteSiteRelayServiceConnectionResponder handles the response to the DeleteSiteRelayServiceConnection request. The method always +// DeleteSiteHostNameBindingResponder handles the response to the DeleteSiteHostNameBinding request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSiteRelayServiceConnectionResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) DeleteSiteHostNameBindingResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DeleteSiteRelayServiceConnectionSlot sends the delete site relay service -// connection slot request. +// DeleteSiteHostNameBindingSlot sends the delete site host name binding slot +// request. // -// resourceGroupName is the resource group name name is the name of the web -// app entityName is the name by which the Hybrid Connection is identified -// slot is the name of the slot for the web app. -func (client SitesClient) DeleteSiteRelayServiceConnectionSlot(resourceGroupName string, name string, entityName string, slot string) (result ObjectSet, ae error) { - req, err := client.DeleteSiteRelayServiceConnectionSlotPreparer(resourceGroupName, name, entityName, slot) - if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteRelayServiceConnectionSlot", "Failure preparing request") - } - +// resourceGroupName is name of resource group name is name of web app slot is +// name of web app slot. If not specified then will default to production +// slot. hostName is name of host +func (client SitesClient) DeleteSiteHostNameBindingSlot(resourceGroupName string, name string, slot string, hostName string) (result SetObject, err error) { + req, err := client.DeleteSiteHostNameBindingSlotPreparer(resourceGroupName, name, slot, hostName) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteHostNameBindingSlot", nil, "Failure preparing request") + } + + resp, err := client.DeleteSiteHostNameBindingSlotSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteHostNameBindingSlot", resp, "Failure sending request") + } + + result, err = client.DeleteSiteHostNameBindingSlotResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteHostNameBindingSlot", resp, "Failure responding to request") + } + + return +} + +// DeleteSiteHostNameBindingSlotPreparer prepares the DeleteSiteHostNameBindingSlot request. +func (client SitesClient) DeleteSiteHostNameBindingSlotPreparer(resourceGroupName string, name string, slot string, hostName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "hostName": autorest.Encode("path", hostName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hostNameBindings/{hostName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSiteHostNameBindingSlotSender sends the DeleteSiteHostNameBindingSlot request. The method will close the +// http.Response Body if it receives an error. +func (client SitesClient) DeleteSiteHostNameBindingSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteSiteHostNameBindingSlotResponder handles the response to the DeleteSiteHostNameBindingSlot request. The method always +// closes the http.Response Body. +func (client SitesClient) DeleteSiteHostNameBindingSlotResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteSitePremierAddOn sends the delete site premier add on request. +// +func (client SitesClient) DeleteSitePremierAddOn(resourceGroupName string, name string, premierAddOnName string) (result SetObject, err error) { + req, err := client.DeleteSitePremierAddOnPreparer(resourceGroupName, name, premierAddOnName) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSitePremierAddOn", nil, "Failure preparing request") + } + + resp, err := client.DeleteSitePremierAddOnSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSitePremierAddOn", resp, "Failure sending request") + } + + result, err = client.DeleteSitePremierAddOnResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSitePremierAddOn", resp, "Failure responding to request") + } + + return +} + +// DeleteSitePremierAddOnPreparer prepares the DeleteSitePremierAddOn request. +func (client SitesClient) DeleteSitePremierAddOnPreparer(resourceGroupName string, name string, premierAddOnName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "premierAddOnName": autorest.Encode("path", premierAddOnName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/premieraddons/{premierAddOnName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSitePremierAddOnSender sends the DeleteSitePremierAddOn request. The method will close the +// http.Response Body if it receives an error. +func (client SitesClient) DeleteSitePremierAddOnSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteSitePremierAddOnResponder handles the response to the DeleteSitePremierAddOn request. The method always +// closes the http.Response Body. +func (client SitesClient) DeleteSitePremierAddOnResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteSitePremierAddOnSlot sends the delete site premier add on slot +// request. +// +func (client SitesClient) DeleteSitePremierAddOnSlot(resourceGroupName string, name string, premierAddOnName string, slot string) (result SetObject, err error) { + req, err := client.DeleteSitePremierAddOnSlotPreparer(resourceGroupName, name, premierAddOnName, slot) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSitePremierAddOnSlot", nil, "Failure preparing request") + } + + resp, err := client.DeleteSitePremierAddOnSlotSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSitePremierAddOnSlot", resp, "Failure sending request") + } + + result, err = client.DeleteSitePremierAddOnSlotResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSitePremierAddOnSlot", resp, "Failure responding to request") + } + + return +} + +// DeleteSitePremierAddOnSlotPreparer prepares the DeleteSitePremierAddOnSlot request. +func (client SitesClient) DeleteSitePremierAddOnSlotPreparer(resourceGroupName string, name string, premierAddOnName string, slot string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "premierAddOnName": autorest.Encode("path", premierAddOnName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/premieraddons/{premierAddOnName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSitePremierAddOnSlotSender sends the DeleteSitePremierAddOnSlot request. The method will close the +// http.Response Body if it receives an error. +func (client SitesClient) DeleteSitePremierAddOnSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteSitePremierAddOnSlotResponder handles the response to the DeleteSitePremierAddOnSlot request. The method always +// closes the http.Response Body. +func (client SitesClient) DeleteSitePremierAddOnSlotResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteSiteRelayServiceConnection sends the delete site relay service +// connection request. +// +// resourceGroupName is the resource group name name is the name of the web +// app entityName is the name by which the Hybrid Connection is identified +func (client SitesClient) DeleteSiteRelayServiceConnection(resourceGroupName string, name string, entityName string) (result SetObject, err error) { + req, err := client.DeleteSiteRelayServiceConnectionPreparer(resourceGroupName, name, entityName) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteRelayServiceConnection", nil, "Failure preparing request") + } + + resp, err := client.DeleteSiteRelayServiceConnectionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteRelayServiceConnection", resp, "Failure sending request") + } + + result, err = client.DeleteSiteRelayServiceConnectionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteRelayServiceConnection", resp, "Failure responding to request") + } + + return +} + +// DeleteSiteRelayServiceConnectionPreparer prepares the DeleteSiteRelayServiceConnection request. +func (client SitesClient) DeleteSiteRelayServiceConnectionPreparer(resourceGroupName string, name string, entityName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "entityName": autorest.Encode("path", entityName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hybridconnection/{entityName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSiteRelayServiceConnectionSender sends the DeleteSiteRelayServiceConnection request. The method will close the +// http.Response Body if it receives an error. +func (client SitesClient) DeleteSiteRelayServiceConnectionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteSiteRelayServiceConnectionResponder handles the response to the DeleteSiteRelayServiceConnection request. The method always +// closes the http.Response Body. +func (client SitesClient) DeleteSiteRelayServiceConnectionResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteSiteRelayServiceConnectionSlot sends the delete site relay service +// connection slot request. +// +// resourceGroupName is the resource group name name is the name of the web +// app entityName is the name by which the Hybrid Connection is identified +// slot is the name of the slot for the web app. +func (client SitesClient) DeleteSiteRelayServiceConnectionSlot(resourceGroupName string, name string, entityName string, slot string) (result SetObject, err error) { + req, err := client.DeleteSiteRelayServiceConnectionSlotPreparer(resourceGroupName, name, entityName, slot) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteRelayServiceConnectionSlot", nil, "Failure preparing request") + } + resp, err := client.DeleteSiteRelayServiceConnectionSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteRelayServiceConnectionSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteRelayServiceConnectionSlot", resp, "Failure sending request") } result, err = client.DeleteSiteRelayServiceConnectionSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteRelayServiceConnectionSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteRelayServiceConnectionSlot", resp, "Failure responding to request") } return @@ -2139,39 +2543,38 @@ // DeleteSiteRelayServiceConnectionSlotPreparer prepares the DeleteSiteRelayServiceConnectionSlot request. func (client SitesClient) DeleteSiteRelayServiceConnectionSlotPreparer(resourceGroupName string, name string, entityName string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "entityName": url.QueryEscape(entityName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "entityName": autorest.Encode("path", entityName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hybridconnection/{entityName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hybridconnection/{entityName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteSiteRelayServiceConnectionSlotSender sends the DeleteSiteRelayServiceConnectionSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) DeleteSiteRelayServiceConnectionSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteSiteRelayServiceConnectionSlotResponder handles the response to the DeleteSiteRelayServiceConnectionSlot request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSiteRelayServiceConnectionSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) DeleteSiteRelayServiceConnectionSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -2187,21 +2590,21 @@ // app deletion, App Service Plan is also deleted skipDNSRegistration is if // true, DNS registration is skipped deleteAllSlots is if true, all slots // associated with web app are also deleted -func (client SitesClient) DeleteSiteSlot(resourceGroupName string, name string, slot string, deleteMetrics string, deleteEmptyServerFarm string, skipDNSRegistration string, deleteAllSlots string) (result ObjectSet, ae error) { +func (client SitesClient) DeleteSiteSlot(resourceGroupName string, name string, slot string, deleteMetrics string, deleteEmptyServerFarm string, skipDNSRegistration string, deleteAllSlots string) (result SetObject, err error) { req, err := client.DeleteSiteSlotPreparer(resourceGroupName, name, slot, deleteMetrics, deleteEmptyServerFarm, skipDNSRegistration, deleteAllSlots) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteSlot", nil, "Failure preparing request") } resp, err := client.DeleteSiteSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteSlot", resp, "Failure sending request") } result, err = client.DeleteSiteSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteSlot", resp, "Failure responding to request") } return @@ -2210,50 +2613,49 @@ // DeleteSiteSlotPreparer prepares the DeleteSiteSlot request. func (client SitesClient) DeleteSiteSlotPreparer(resourceGroupName string, name string, slot string, deleteMetrics string, deleteEmptyServerFarm string, skipDNSRegistration string, deleteAllSlots string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(deleteMetrics) > 0 { - queryParameters["deleteMetrics"] = deleteMetrics + queryParameters["deleteMetrics"] = autorest.Encode("query", deleteMetrics) } if len(deleteEmptyServerFarm) > 0 { - queryParameters["deleteEmptyServerFarm"] = deleteEmptyServerFarm + queryParameters["deleteEmptyServerFarm"] = autorest.Encode("query", deleteEmptyServerFarm) } if len(skipDNSRegistration) > 0 { - queryParameters["skipDnsRegistration"] = skipDNSRegistration + queryParameters["skipDnsRegistration"] = autorest.Encode("query", skipDNSRegistration) } if len(deleteAllSlots) > 0 { - queryParameters["deleteAllSlots"] = deleteAllSlots + queryParameters["deleteAllSlots"] = autorest.Encode("query", deleteAllSlots) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // DeleteSiteSlotSender sends the DeleteSiteSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) DeleteSiteSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // DeleteSiteSlotResponder handles the response to the DeleteSiteSlot request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSiteSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) DeleteSiteSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -2263,21 +2665,21 @@ // DeleteSiteSourceControl sends the delete site source control request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) DeleteSiteSourceControl(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client SitesClient) DeleteSiteSourceControl(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.DeleteSiteSourceControlPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteSourceControl", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteSourceControl", nil, "Failure preparing request") } resp, err := client.DeleteSiteSourceControlSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteSourceControl", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteSourceControl", resp, "Failure sending request") } result, err = client.DeleteSiteSourceControlResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteSourceControl", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteSourceControl", resp, "Failure responding to request") } return @@ -2286,966 +2688,1211 @@ // DeleteSiteSourceControlPreparer prepares the DeleteSiteSourceControl request. func (client SitesClient) DeleteSiteSourceControlPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/sourcecontrols/web", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSiteSourceControlSender sends the DeleteSiteSourceControl request. The method will close the +// http.Response Body if it receives an error. +func (client SitesClient) DeleteSiteSourceControlSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteSiteSourceControlResponder handles the response to the DeleteSiteSourceControl request. The method always +// closes the http.Response Body. +func (client SitesClient) DeleteSiteSourceControlResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteSiteSourceControlSlot sends the delete site source control slot +// request. +// +// resourceGroupName is name of resource group name is name of web app slot is +// name of web app slot. If not specified then will default to production +// slot. +func (client SitesClient) DeleteSiteSourceControlSlot(resourceGroupName string, name string, slot string) (result SetObject, err error) { + req, err := client.DeleteSiteSourceControlSlotPreparer(resourceGroupName, name, slot) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteSourceControlSlot", nil, "Failure preparing request") + } + + resp, err := client.DeleteSiteSourceControlSlotSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteSourceControlSlot", resp, "Failure sending request") + } + + result, err = client.DeleteSiteSourceControlSlotResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteSourceControlSlot", resp, "Failure responding to request") + } + + return +} + +// DeleteSiteSourceControlSlotPreparer prepares the DeleteSiteSourceControlSlot request. +func (client SitesClient) DeleteSiteSourceControlSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/sourcecontrols/web", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSiteSourceControlSlotSender sends the DeleteSiteSourceControlSlot request. The method will close the +// http.Response Body if it receives an error. +func (client SitesClient) DeleteSiteSourceControlSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteSiteSourceControlSlotResponder handles the response to the DeleteSiteSourceControlSlot request. The method always +// closes the http.Response Body. +func (client SitesClient) DeleteSiteSourceControlSlotResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteSiteVNETConnection sends the delete site vnet connection request. +// +// resourceGroupName is the resource group name name is the name of the web +// app vnetName is the name of the Virtual Network +func (client SitesClient) DeleteSiteVNETConnection(resourceGroupName string, name string, vnetName string) (result SetObject, err error) { + req, err := client.DeleteSiteVNETConnectionPreparer(resourceGroupName, name, vnetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteVNETConnection", nil, "Failure preparing request") + } + + resp, err := client.DeleteSiteVNETConnectionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteVNETConnection", resp, "Failure sending request") + } + + result, err = client.DeleteSiteVNETConnectionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteVNETConnection", resp, "Failure responding to request") + } + + return +} + +// DeleteSiteVNETConnectionPreparer prepares the DeleteSiteVNETConnection request. +func (client SitesClient) DeleteSiteVNETConnectionPreparer(resourceGroupName string, name string, vnetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSiteVNETConnectionSender sends the DeleteSiteVNETConnection request. The method will close the +// http.Response Body if it receives an error. +func (client SitesClient) DeleteSiteVNETConnectionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteSiteVNETConnectionResponder handles the response to the DeleteSiteVNETConnection request. The method always +// closes the http.Response Body. +func (client SitesClient) DeleteSiteVNETConnectionResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteSiteVNETConnectionSlot sends the delete site vnet connection slot +// request. +// +// resourceGroupName is the resource group name name is the name of the web +// app vnetName is the name of the Virtual Network slot is the name of the +// slot for this web app. +func (client SitesClient) DeleteSiteVNETConnectionSlot(resourceGroupName string, name string, vnetName string, slot string) (result SetObject, err error) { + req, err := client.DeleteSiteVNETConnectionSlotPreparer(resourceGroupName, name, vnetName, slot) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteVNETConnectionSlot", nil, "Failure preparing request") + } + + resp, err := client.DeleteSiteVNETConnectionSlotSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteVNETConnectionSlot", resp, "Failure sending request") + } + + result, err = client.DeleteSiteVNETConnectionSlotResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.SitesClient", "DeleteSiteVNETConnectionSlot", resp, "Failure responding to request") + } + + return +} + +// DeleteSiteVNETConnectionSlotPreparer prepares the DeleteSiteVNETConnectionSlot request. +func (client SitesClient) DeleteSiteVNETConnectionSlotPreparer(resourceGroupName string, name string, vnetName string, slot string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSiteVNETConnectionSlotSender sends the DeleteSiteVNETConnectionSlot request. The method will close the +// http.Response Body if it receives an error. +func (client SitesClient) DeleteSiteVNETConnectionSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteSiteVNETConnectionSlotResponder handles the response to the DeleteSiteVNETConnectionSlot request. The method always +// closes the http.Response Body. +func (client SitesClient) DeleteSiteVNETConnectionSlotResponder(resp *http.Response) (result SetObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DiscoverSiteRestore sends the discover site restore request. +// +// resourceGroupName is name of resource group name is name of web app request +// is information on restore request +func (client SitesClient) DiscoverSiteRestore(resourceGroupName string, name string, request RestoreRequest) (result RestoreRequest, err error) { + req, err := client.DiscoverSiteRestorePreparer(resourceGroupName, name, request) + if err != nil { + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DiscoverSiteRestore", nil, "Failure preparing request") + } + + resp, err := client.DiscoverSiteRestoreSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DiscoverSiteRestore", resp, "Failure sending request") + } + + result, err = client.DiscoverSiteRestoreResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "web.SitesClient", "DiscoverSiteRestore", resp, "Failure responding to request") + } + + return +} + +// DiscoverSiteRestorePreparer prepares the DiscoverSiteRestore request. +func (client SitesClient) DiscoverSiteRestorePreparer(resourceGroupName string, name string, request RestoreRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), - autorest.AsDelete(), + autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/sourcecontrols/web"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups/discover", pathParameters), + autorest.WithJSON(request), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DeleteSiteSourceControlSender sends the DeleteSiteSourceControl request. The method will close the +// DiscoverSiteRestoreSender sends the DiscoverSiteRestore request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DeleteSiteSourceControlSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) DiscoverSiteRestoreSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DeleteSiteSourceControlResponder handles the response to the DeleteSiteSourceControl request. The method always +// DiscoverSiteRestoreResponder handles the response to the DiscoverSiteRestore request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSiteSourceControlResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) DiscoverSiteRestoreResponder(resp *http.Response) (result RestoreRequest, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result.Value), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DeleteSiteSourceControlSlot sends the delete site source control slot -// request. +// DiscoverSiteRestoreSlot sends the discover site restore slot request. // -// resourceGroupName is name of resource group name is name of web app slot is -// name of web app slot. If not specified then will default to production -// slot. -func (client SitesClient) DeleteSiteSourceControlSlot(resourceGroupName string, name string, slot string) (result ObjectSet, ae error) { - req, err := client.DeleteSiteSourceControlSlotPreparer(resourceGroupName, name, slot) +// resourceGroupName is name of resource group name is name of web app request +// is information on restore request slot is name of web app slot. If not +// specified then will default to production slot. +func (client SitesClient) DiscoverSiteRestoreSlot(resourceGroupName string, name string, request RestoreRequest, slot string) (result RestoreRequest, err error) { + req, err := client.DiscoverSiteRestoreSlotPreparer(resourceGroupName, name, request, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteSourceControlSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DiscoverSiteRestoreSlot", nil, "Failure preparing request") } - resp, err := client.DeleteSiteSourceControlSlotSender(req) + resp, err := client.DiscoverSiteRestoreSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteSourceControlSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "DiscoverSiteRestoreSlot", resp, "Failure sending request") } - result, err = client.DeleteSiteSourceControlSlotResponder(resp) + result, err = client.DiscoverSiteRestoreSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteSourceControlSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "DiscoverSiteRestoreSlot", resp, "Failure responding to request") } return } -// DeleteSiteSourceControlSlotPreparer prepares the DeleteSiteSourceControlSlot request. -func (client SitesClient) DeleteSiteSourceControlSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { +// DiscoverSiteRestoreSlotPreparer prepares the DiscoverSiteRestoreSlot request. +func (client SitesClient) DiscoverSiteRestoreSlotPreparer(resourceGroupName string, name string, request RestoreRequest, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), - autorest.AsDelete(), + autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/sourcecontrols/web"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups/discover", pathParameters), + autorest.WithJSON(request), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DeleteSiteSourceControlSlotSender sends the DeleteSiteSourceControlSlot request. The method will close the +// DiscoverSiteRestoreSlotSender sends the DiscoverSiteRestoreSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DeleteSiteSourceControlSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) DiscoverSiteRestoreSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DeleteSiteSourceControlSlotResponder handles the response to the DeleteSiteSourceControlSlot request. The method always +// DiscoverSiteRestoreSlotResponder handles the response to the DiscoverSiteRestoreSlot request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSiteSourceControlSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) DiscoverSiteRestoreSlotResponder(resp *http.Response) (result RestoreRequest, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result.Value), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DeleteSiteVNETConnection sends the delete site vnet connection request. +// GenerateNewSitePublishingPassword sends the generate new site publishing +// password request. // -// resourceGroupName is the resource group name name is the name of the web -// app vnetName is the name of the Virtual Network -func (client SitesClient) DeleteSiteVNETConnection(resourceGroupName string, name string, vnetName string) (result ObjectSet, ae error) { - req, err := client.DeleteSiteVNETConnectionPreparer(resourceGroupName, name, vnetName) +// resourceGroupName is name of resource group name is name of web app +func (client SitesClient) GenerateNewSitePublishingPassword(resourceGroupName string, name string) (result SetObject, err error) { + req, err := client.GenerateNewSitePublishingPasswordPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteVNETConnection", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GenerateNewSitePublishingPassword", nil, "Failure preparing request") } - resp, err := client.DeleteSiteVNETConnectionSender(req) + resp, err := client.GenerateNewSitePublishingPasswordSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteVNETConnection", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GenerateNewSitePublishingPassword", resp, "Failure sending request") } - result, err = client.DeleteSiteVNETConnectionResponder(resp) + result, err = client.GenerateNewSitePublishingPasswordResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteVNETConnection", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GenerateNewSitePublishingPassword", resp, "Failure responding to request") } return } -// DeleteSiteVNETConnectionPreparer prepares the DeleteSiteVNETConnection request. -func (client SitesClient) DeleteSiteVNETConnectionPreparer(resourceGroupName string, name string, vnetName string) (*http.Request, error) { +// GenerateNewSitePublishingPasswordPreparer prepares the GenerateNewSitePublishingPassword request. +func (client SitesClient) GenerateNewSitePublishingPasswordPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), + preparer := autorest.CreatePreparer( + autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/newpassword", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DeleteSiteVNETConnectionSender sends the DeleteSiteVNETConnection request. The method will close the +// GenerateNewSitePublishingPasswordSender sends the GenerateNewSitePublishingPassword request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DeleteSiteVNETConnectionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GenerateNewSitePublishingPasswordSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DeleteSiteVNETConnectionResponder handles the response to the DeleteSiteVNETConnection request. The method always +// GenerateNewSitePublishingPasswordResponder handles the response to the GenerateNewSitePublishingPassword request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSiteVNETConnectionResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GenerateNewSitePublishingPasswordResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DeleteSiteVNETConnectionSlot sends the delete site vnet connection slot -// request. +// GenerateNewSitePublishingPasswordSlot sends the generate new site +// publishing password slot request. // -// resourceGroupName is the resource group name name is the name of the web -// app vnetName is the name of the Virtual Network slot is the name of the -// slot for this web app. -func (client SitesClient) DeleteSiteVNETConnectionSlot(resourceGroupName string, name string, vnetName string, slot string) (result ObjectSet, ae error) { - req, err := client.DeleteSiteVNETConnectionSlotPreparer(resourceGroupName, name, vnetName, slot) +// resourceGroupName is name of resource group name is name of web app slot is +// name of web app slot. If not specified then will default to production +// slot. +func (client SitesClient) GenerateNewSitePublishingPasswordSlot(resourceGroupName string, name string, slot string) (result SetObject, err error) { + req, err := client.GenerateNewSitePublishingPasswordSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteVNETConnectionSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GenerateNewSitePublishingPasswordSlot", nil, "Failure preparing request") } - resp, err := client.DeleteSiteVNETConnectionSlotSender(req) + resp, err := client.GenerateNewSitePublishingPasswordSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteVNETConnectionSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GenerateNewSitePublishingPasswordSlot", resp, "Failure sending request") } - result, err = client.DeleteSiteVNETConnectionSlotResponder(resp) + result, err = client.GenerateNewSitePublishingPasswordSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DeleteSiteVNETConnectionSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GenerateNewSitePublishingPasswordSlot", resp, "Failure responding to request") } return } -// DeleteSiteVNETConnectionSlotPreparer prepares the DeleteSiteVNETConnectionSlot request. -func (client SitesClient) DeleteSiteVNETConnectionSlotPreparer(resourceGroupName string, name string, vnetName string, slot string) (*http.Request, error) { +// GenerateNewSitePublishingPasswordSlotPreparer prepares the GenerateNewSitePublishingPasswordSlot request. +func (client SitesClient) GenerateNewSitePublishingPasswordSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsDelete(), + preparer := autorest.CreatePreparer( + autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/newpassword", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DeleteSiteVNETConnectionSlotSender sends the DeleteSiteVNETConnectionSlot request. The method will close the +// GenerateNewSitePublishingPasswordSlotSender sends the GenerateNewSitePublishingPasswordSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DeleteSiteVNETConnectionSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GenerateNewSitePublishingPasswordSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DeleteSiteVNETConnectionSlotResponder handles the response to the DeleteSiteVNETConnectionSlot request. The method always +// GenerateNewSitePublishingPasswordSlotResponder handles the response to the GenerateNewSitePublishingPasswordSlot request. The method always // closes the http.Response Body. -func (client SitesClient) DeleteSiteVNETConnectionSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GenerateNewSitePublishingPasswordSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DiscoverSiteRestore sends the discover site restore request. +// GetDeletedSites sends the get deleted sites request. // -// resourceGroupName is name of resource group name is name of web app request -// is information on restore request -func (client SitesClient) DiscoverSiteRestore(resourceGroupName string, name string, request RestoreRequest) (result RestoreRequest, ae error) { - req, err := client.DiscoverSiteRestorePreparer(resourceGroupName, name, request) +// resourceGroupName is name of resource group propertiesToInclude is +// additional web app properties included in the response includeSiteTypes is +// types of apps included in the response +func (client SitesClient) GetDeletedSites(resourceGroupName string, propertiesToInclude string, includeSiteTypes string) (result DeletedSiteCollection, err error) { + req, err := client.GetDeletedSitesPreparer(resourceGroupName, propertiesToInclude, includeSiteTypes) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestore", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetDeletedSites", nil, "Failure preparing request") } - resp, err := client.DiscoverSiteRestoreSender(req) + resp, err := client.GetDeletedSitesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestore", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetDeletedSites", resp, "Failure sending request") } - result, err = client.DiscoverSiteRestoreResponder(resp) + result, err = client.GetDeletedSitesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestore", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetDeletedSites", resp, "Failure responding to request") } return } -// DiscoverSiteRestorePreparer prepares the DiscoverSiteRestore request. -func (client SitesClient) DiscoverSiteRestorePreparer(resourceGroupName string, name string, request RestoreRequest) (*http.Request, error) { +// GetDeletedSitesPreparer prepares the GetDeletedSites request. +func (client SitesClient) GetDeletedSitesPreparer(resourceGroupName string, propertiesToInclude string, includeSiteTypes string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(propertiesToInclude) > 0 { + queryParameters["propertiesToInclude"] = autorest.Encode("query", propertiesToInclude) + } + if len(includeSiteTypes) > 0 { + queryParameters["includeSiteTypes"] = autorest.Encode("query", includeSiteTypes) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), + preparer := autorest.CreatePreparer( + autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups/discover"), - autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/deletedSites", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DiscoverSiteRestoreSender sends the DiscoverSiteRestore request. The method will close the +// GetDeletedSitesSender sends the GetDeletedSites request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DiscoverSiteRestoreSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GetDeletedSitesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DiscoverSiteRestoreResponder handles the response to the DiscoverSiteRestore request. The method always +// GetDeletedSitesResponder handles the response to the GetDeletedSites request. The method always // closes the http.Response Body. -func (client SitesClient) DiscoverSiteRestoreResponder(resp *http.Response) (result RestoreRequest, err error) { +func (client SitesClient) GetDeletedSitesResponder(resp *http.Response) (result DeletedSiteCollection, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DiscoverSiteRestoreDeprecated sends the discover site restore deprecated -// request. +// GetDeployment sends the get deployment request. // -// resourceGroupName is name of resource group name is name of web app request -// is information on restore request -func (client SitesClient) DiscoverSiteRestoreDeprecated(resourceGroupName string, name string, request RestoreRequest) (result RestoreRequest, ae error) { - req, err := client.DiscoverSiteRestoreDeprecatedPreparer(resourceGroupName, name, request) +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment +func (client SitesClient) GetDeployment(resourceGroupName string, name string, id string) (result Deployment, err error) { + req, err := client.GetDeploymentPreparer(resourceGroupName, name, id) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestoreDeprecated", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetDeployment", nil, "Failure preparing request") } - resp, err := client.DiscoverSiteRestoreDeprecatedSender(req) + resp, err := client.GetDeploymentSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestoreDeprecated", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetDeployment", resp, "Failure sending request") } - result, err = client.DiscoverSiteRestoreDeprecatedResponder(resp) + result, err = client.GetDeploymentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestoreDeprecated", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetDeployment", resp, "Failure responding to request") } return } -// DiscoverSiteRestoreDeprecatedPreparer prepares the DiscoverSiteRestoreDeprecated request. -func (client SitesClient) DiscoverSiteRestoreDeprecatedPreparer(resourceGroupName string, name string, request RestoreRequest) (*http.Request, error) { +// GetDeploymentPreparer prepares the GetDeployment request. +func (client SitesClient) GetDeploymentPreparer(resourceGroupName string, name string, id string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "id": autorest.Encode("path", id), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), + preparer := autorest.CreatePreparer( + autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/restore/discover"), - autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/deployments/{id}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DiscoverSiteRestoreDeprecatedSender sends the DiscoverSiteRestoreDeprecated request. The method will close the +// GetDeploymentSender sends the GetDeployment request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DiscoverSiteRestoreDeprecatedSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GetDeploymentSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DiscoverSiteRestoreDeprecatedResponder handles the response to the DiscoverSiteRestoreDeprecated request. The method always +// GetDeploymentResponder handles the response to the GetDeployment request. The method always // closes the http.Response Body. -func (client SitesClient) DiscoverSiteRestoreDeprecatedResponder(resp *http.Response) (result RestoreRequest, err error) { +func (client SitesClient) GetDeploymentResponder(resp *http.Response) (result Deployment, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DiscoverSiteRestoreDeprecatedSlot sends the discover site restore -// deprecated slot request. +// GetDeployments sends the get deployments request. // -// resourceGroupName is name of resource group name is name of web app request -// is information on restore request slot is name of web app slot. If not -// specified then will default to production slot. -func (client SitesClient) DiscoverSiteRestoreDeprecatedSlot(resourceGroupName string, name string, request RestoreRequest, slot string) (result RestoreRequest, ae error) { - req, err := client.DiscoverSiteRestoreDeprecatedSlotPreparer(resourceGroupName, name, request, slot) +// resourceGroupName is name of resource group name is name of web app +func (client SitesClient) GetDeployments(resourceGroupName string, name string) (result DeploymentCollection, err error) { + req, err := client.GetDeploymentsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestoreDeprecatedSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetDeployments", nil, "Failure preparing request") } - resp, err := client.DiscoverSiteRestoreDeprecatedSlotSender(req) + resp, err := client.GetDeploymentsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestoreDeprecatedSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetDeployments", resp, "Failure sending request") } - result, err = client.DiscoverSiteRestoreDeprecatedSlotResponder(resp) + result, err = client.GetDeploymentsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestoreDeprecatedSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetDeployments", resp, "Failure responding to request") } return } -// DiscoverSiteRestoreDeprecatedSlotPreparer prepares the DiscoverSiteRestoreDeprecatedSlot request. -func (client SitesClient) DiscoverSiteRestoreDeprecatedSlotPreparer(resourceGroupName string, name string, request RestoreRequest, slot string) (*http.Request, error) { +// GetDeploymentsPreparer prepares the GetDeployments request. +func (client SitesClient) GetDeploymentsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), + preparer := autorest.CreatePreparer( + autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/restore/discover"), - autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/deployments", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DiscoverSiteRestoreDeprecatedSlotSender sends the DiscoverSiteRestoreDeprecatedSlot request. The method will close the +// GetDeploymentsSender sends the GetDeployments request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DiscoverSiteRestoreDeprecatedSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GetDeploymentsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DiscoverSiteRestoreDeprecatedSlotResponder handles the response to the DiscoverSiteRestoreDeprecatedSlot request. The method always +// GetDeploymentsResponder handles the response to the GetDeployments request. The method always // closes the http.Response Body. -func (client SitesClient) DiscoverSiteRestoreDeprecatedSlotResponder(resp *http.Response) (result RestoreRequest, err error) { +func (client SitesClient) GetDeploymentsResponder(resp *http.Response) (result DeploymentCollection, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// DiscoverSiteRestoreSlot sends the discover site restore slot request. +// GetDeploymentSlot sends the get deployment slot request. // -// resourceGroupName is name of resource group name is name of web app request -// is information on restore request slot is name of web app slot. If not -// specified then will default to production slot. -func (client SitesClient) DiscoverSiteRestoreSlot(resourceGroupName string, name string, request RestoreRequest, slot string) (result RestoreRequest, ae error) { - req, err := client.DiscoverSiteRestoreSlotPreparer(resourceGroupName, name, request, slot) +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment slot is name of web app slot. If not specified then +// will default to production slot. +func (client SitesClient) GetDeploymentSlot(resourceGroupName string, name string, id string, slot string) (result Deployment, err error) { + req, err := client.GetDeploymentSlotPreparer(resourceGroupName, name, id, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestoreSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetDeploymentSlot", nil, "Failure preparing request") } - resp, err := client.DiscoverSiteRestoreSlotSender(req) + resp, err := client.GetDeploymentSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestoreSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetDeploymentSlot", resp, "Failure sending request") } - result, err = client.DiscoverSiteRestoreSlotResponder(resp) + result, err = client.GetDeploymentSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "DiscoverSiteRestoreSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetDeploymentSlot", resp, "Failure responding to request") } return } -// DiscoverSiteRestoreSlotPreparer prepares the DiscoverSiteRestoreSlot request. -func (client SitesClient) DiscoverSiteRestoreSlotPreparer(resourceGroupName string, name string, request RestoreRequest, slot string) (*http.Request, error) { +// GetDeploymentSlotPreparer prepares the GetDeploymentSlot request. +func (client SitesClient) GetDeploymentSlotPreparer(resourceGroupName string, name string, id string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "id": autorest.Encode("path", id), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), + preparer := autorest.CreatePreparer( + autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups/discover"), - autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/deployments/{id}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// DiscoverSiteRestoreSlotSender sends the DiscoverSiteRestoreSlot request. The method will close the +// GetDeploymentSlotSender sends the GetDeploymentSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) DiscoverSiteRestoreSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GetDeploymentSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// DiscoverSiteRestoreSlotResponder handles the response to the DiscoverSiteRestoreSlot request. The method always +// GetDeploymentSlotResponder handles the response to the GetDeploymentSlot request. The method always // closes the http.Response Body. -func (client SitesClient) DiscoverSiteRestoreSlotResponder(resp *http.Response) (result RestoreRequest, err error) { +func (client SitesClient) GetDeploymentSlotResponder(resp *http.Response) (result Deployment, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// GenerateNewSitePublishingPassword sends the generate new site publishing -// password request. +// GetDeploymentsSlot sends the get deployments slot request. // -// resourceGroupName is name of resource group name is name of web app -func (client SitesClient) GenerateNewSitePublishingPassword(resourceGroupName string, name string) (result ObjectSet, ae error) { - req, err := client.GenerateNewSitePublishingPasswordPreparer(resourceGroupName, name) +// resourceGroupName is name of resource group name is name of web app slot is +// name of web app slot. If not specified then will default to production +// slot. +func (client SitesClient) GetDeploymentsSlot(resourceGroupName string, name string, slot string) (result DeploymentCollection, err error) { + req, err := client.GetDeploymentsSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GenerateNewSitePublishingPassword", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetDeploymentsSlot", nil, "Failure preparing request") } - resp, err := client.GenerateNewSitePublishingPasswordSender(req) + resp, err := client.GetDeploymentsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GenerateNewSitePublishingPassword", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetDeploymentsSlot", resp, "Failure sending request") } - result, err = client.GenerateNewSitePublishingPasswordResponder(resp) + result, err = client.GetDeploymentsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GenerateNewSitePublishingPassword", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetDeploymentsSlot", resp, "Failure responding to request") } return } -// GenerateNewSitePublishingPasswordPreparer prepares the GenerateNewSitePublishingPassword request. -func (client SitesClient) GenerateNewSitePublishingPasswordPreparer(resourceGroupName string, name string) (*http.Request, error) { +// GetDeploymentsSlotPreparer prepares the GetDeploymentsSlot request. +func (client SitesClient) GetDeploymentsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), + preparer := autorest.CreatePreparer( + autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/newpassword"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/deployments", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// GenerateNewSitePublishingPasswordSender sends the GenerateNewSitePublishingPassword request. The method will close the +// GetDeploymentsSlotSender sends the GetDeploymentsSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) GenerateNewSitePublishingPasswordSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GetDeploymentsSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// GenerateNewSitePublishingPasswordResponder handles the response to the GenerateNewSitePublishingPassword request. The method always +// GetDeploymentsSlotResponder handles the response to the GetDeploymentsSlot request. The method always // closes the http.Response Body. -func (client SitesClient) GenerateNewSitePublishingPasswordResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GetDeploymentsSlotResponder(resp *http.Response) (result DeploymentCollection, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result.Value), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// GenerateNewSitePublishingPasswordSlot sends the generate new site -// publishing password slot request. +// GetInstanceDeployment sends the get instance deployment request. // -// resourceGroupName is name of resource group name is name of web app slot is -// name of web app slot. If not specified then will default to production -// slot. -func (client SitesClient) GenerateNewSitePublishingPasswordSlot(resourceGroupName string, name string, slot string) (result ObjectSet, ae error) { - req, err := client.GenerateNewSitePublishingPasswordSlotPreparer(resourceGroupName, name, slot) +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment instanceID is id of web app instance +func (client SitesClient) GetInstanceDeployment(resourceGroupName string, name string, id string, instanceID string) (result Deployment, err error) { + req, err := client.GetInstanceDeploymentPreparer(resourceGroupName, name, id, instanceID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GenerateNewSitePublishingPasswordSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeployment", nil, "Failure preparing request") } - resp, err := client.GenerateNewSitePublishingPasswordSlotSender(req) + resp, err := client.GetInstanceDeploymentSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GenerateNewSitePublishingPasswordSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeployment", resp, "Failure sending request") } - result, err = client.GenerateNewSitePublishingPasswordSlotResponder(resp) + result, err = client.GetInstanceDeploymentResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GenerateNewSitePublishingPasswordSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeployment", resp, "Failure responding to request") } return } -// GenerateNewSitePublishingPasswordSlotPreparer prepares the GenerateNewSitePublishingPasswordSlot request. -func (client SitesClient) GenerateNewSitePublishingPasswordSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { +// GetInstanceDeploymentPreparer prepares the GetInstanceDeployment request. +func (client SitesClient) GetInstanceDeploymentPreparer(resourceGroupName string, name string, id string, instanceID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "id": autorest.Encode("path", id), + "instanceId": autorest.Encode("path", instanceID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), + preparer := autorest.CreatePreparer( + autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/newpassword"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/instances/{instanceId}/deployments/{id}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// GenerateNewSitePublishingPasswordSlotSender sends the GenerateNewSitePublishingPasswordSlot request. The method will close the +// GetInstanceDeploymentSender sends the GetInstanceDeployment request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) GenerateNewSitePublishingPasswordSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GetInstanceDeploymentSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// GenerateNewSitePublishingPasswordSlotResponder handles the response to the GenerateNewSitePublishingPasswordSlot request. The method always +// GetInstanceDeploymentResponder handles the response to the GetInstanceDeployment request. The method always // closes the http.Response Body. -func (client SitesClient) GenerateNewSitePublishingPasswordSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GetInstanceDeploymentResponder(resp *http.Response) (result Deployment, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result.Value), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// GetDeletedSites sends the get deleted sites request. +// GetInstanceDeployments sends the get instance deployments request. // -// resourceGroupName is name of resource group propertiesToInclude is -// additional web app properties included in the response includeSiteTypes is -// types of apps included in the response -func (client SitesClient) GetDeletedSites(resourceGroupName string, propertiesToInclude string, includeSiteTypes string) (result DeletedSiteCollection, ae error) { - req, err := client.GetDeletedSitesPreparer(resourceGroupName, propertiesToInclude, includeSiteTypes) +// resourceGroupName is name of resource group name is name of web app +// instanceID is id of web app instance +func (client SitesClient) GetInstanceDeployments(resourceGroupName string, name string, instanceID string) (result DeploymentCollection, err error) { + req, err := client.GetInstanceDeploymentsPreparer(resourceGroupName, name, instanceID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetDeletedSites", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeployments", nil, "Failure preparing request") } - resp, err := client.GetDeletedSitesSender(req) + resp, err := client.GetInstanceDeploymentsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetDeletedSites", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeployments", resp, "Failure sending request") } - result, err = client.GetDeletedSitesResponder(resp) + result, err = client.GetInstanceDeploymentsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetDeletedSites", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeployments", resp, "Failure responding to request") } return } -// GetDeletedSitesPreparer prepares the GetDeletedSites request. -func (client SitesClient) GetDeletedSitesPreparer(resourceGroupName string, propertiesToInclude string, includeSiteTypes string) (*http.Request, error) { +// GetInstanceDeploymentsPreparer prepares the GetInstanceDeployments request. +func (client SitesClient) GetInstanceDeploymentsPreparer(resourceGroupName string, name string, instanceID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "instanceId": autorest.Encode("path", instanceID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(propertiesToInclude) > 0 { - queryParameters["propertiesToInclude"] = propertiesToInclude - } - if len(includeSiteTypes) > 0 { - queryParameters["includeSiteTypes"] = includeSiteTypes + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/deletedSites"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/instances/{instanceId}/deployments", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// GetDeletedSitesSender sends the GetDeletedSites request. The method will close the +// GetInstanceDeploymentsSender sends the GetInstanceDeployments request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) GetDeletedSitesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GetInstanceDeploymentsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// GetDeletedSitesResponder handles the response to the GetDeletedSites request. The method always +// GetInstanceDeploymentsResponder handles the response to the GetInstanceDeployments request. The method always // closes the http.Response Body. -func (client SitesClient) GetDeletedSitesResponder(resp *http.Response) (result DeletedSiteCollection, err error) { +func (client SitesClient) GetInstanceDeploymentsResponder(resp *http.Response) (result DeploymentCollection, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// GetSite sends the get site request. +// GetInstanceDeploymentSlot sends the get instance deployment slot request. // -// resourceGroupName is name of resource group name is name of web app -// propertiesToInclude is additional web app properties included in the -// response -func (client SitesClient) GetSite(resourceGroupName string, name string, propertiesToInclude string) (result Site, ae error) { - req, err := client.GetSitePreparer(resourceGroupName, name, propertiesToInclude) +// resourceGroupName is name of resource group name is name of web app id is +// id of the deployment slot is name of web app slot. If not specified then +// will default to production slot. instanceID is id of web app instance +func (client SitesClient) GetInstanceDeploymentSlot(resourceGroupName string, name string, id string, slot string, instanceID string) (result Deployment, err error) { + req, err := client.GetInstanceDeploymentSlotPreparer(resourceGroupName, name, id, slot, instanceID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSite", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeploymentSlot", nil, "Failure preparing request") } - resp, err := client.GetSiteSender(req) + resp, err := client.GetInstanceDeploymentSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSite", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeploymentSlot", resp, "Failure sending request") } - result, err = client.GetSiteResponder(resp) + result, err = client.GetInstanceDeploymentSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSite", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeploymentSlot", resp, "Failure responding to request") } return } -// GetSitePreparer prepares the GetSite request. -func (client SitesClient) GetSitePreparer(resourceGroupName string, name string, propertiesToInclude string) (*http.Request, error) { +// GetInstanceDeploymentSlotPreparer prepares the GetInstanceDeploymentSlot request. +func (client SitesClient) GetInstanceDeploymentSlotPreparer(resourceGroupName string, name string, id string, slot string, instanceID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "id": autorest.Encode("path", id), + "instanceId": autorest.Encode("path", instanceID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(propertiesToInclude) > 0 { - queryParameters["propertiesToInclude"] = propertiesToInclude + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/instances/{instanceId}/deployments/{id}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// GetSiteSender sends the GetSite request. The method will close the +// GetInstanceDeploymentSlotSender sends the GetInstanceDeploymentSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) GetSiteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GetInstanceDeploymentSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// GetSiteResponder handles the response to the GetSite request. The method always +// GetInstanceDeploymentSlotResponder handles the response to the GetInstanceDeploymentSlot request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteResponder(resp *http.Response) (result Site, err error) { +func (client SitesClient) GetInstanceDeploymentSlotResponder(resp *http.Response) (result Deployment, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// GetSiteBackupConfiguration sends the get site backup configuration request. +// GetInstanceDeploymentsSlot sends the get instance deployments slot request. // -// resourceGroupName is name of resource group name is name of web app -func (client SitesClient) GetSiteBackupConfiguration(resourceGroupName string, name string) (result BackupRequest, ae error) { - req, err := client.GetSiteBackupConfigurationPreparer(resourceGroupName, name) +// resourceGroupName is name of resource group name is name of web app slot is +// name of web app slot. If not specified then will default to production +// slot. instanceID is id of web app instance +func (client SitesClient) GetInstanceDeploymentsSlot(resourceGroupName string, name string, slot string, instanceID string) (result DeploymentCollection, err error) { + req, err := client.GetInstanceDeploymentsSlotPreparer(resourceGroupName, name, slot, instanceID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfiguration", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeploymentsSlot", nil, "Failure preparing request") } - resp, err := client.GetSiteBackupConfigurationSender(req) + resp, err := client.GetInstanceDeploymentsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfiguration", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeploymentsSlot", resp, "Failure sending request") } - result, err = client.GetSiteBackupConfigurationResponder(resp) + result, err = client.GetInstanceDeploymentsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfiguration", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetInstanceDeploymentsSlot", resp, "Failure responding to request") } return } -// GetSiteBackupConfigurationPreparer prepares the GetSiteBackupConfiguration request. -func (client SitesClient) GetSiteBackupConfigurationPreparer(resourceGroupName string, name string) (*http.Request, error) { +// GetInstanceDeploymentsSlotPreparer prepares the GetInstanceDeploymentsSlot request. +func (client SitesClient) GetInstanceDeploymentsSlotPreparer(resourceGroupName string, name string, slot string, instanceID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "instanceId": autorest.Encode("path", instanceID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), + preparer := autorest.CreatePreparer( + autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/backup/list"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/instances/{instanceId}/deployments", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// GetSiteBackupConfigurationSender sends the GetSiteBackupConfiguration request. The method will close the +// GetInstanceDeploymentsSlotSender sends the GetInstanceDeploymentsSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) GetSiteBackupConfigurationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GetInstanceDeploymentsSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// GetSiteBackupConfigurationResponder handles the response to the GetSiteBackupConfiguration request. The method always +// GetInstanceDeploymentsSlotResponder handles the response to the GetInstanceDeploymentsSlot request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteBackupConfigurationResponder(resp *http.Response) (result BackupRequest, err error) { +func (client SitesClient) GetInstanceDeploymentsSlotResponder(resp *http.Response) (result DeploymentCollection, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// GetSiteBackupConfigurationDeprecated sends the get site backup -// configuration deprecated request. +// GetSite sends the get site request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) GetSiteBackupConfigurationDeprecated(resourceGroupName string, name string) (result BackupRequest, ae error) { - req, err := client.GetSiteBackupConfigurationDeprecatedPreparer(resourceGroupName, name) +// propertiesToInclude is additional web app properties included in the +// response +func (client SitesClient) GetSite(resourceGroupName string, name string, propertiesToInclude string) (result Site, err error) { + req, err := client.GetSitePreparer(resourceGroupName, name, propertiesToInclude) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfigurationDeprecated", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSite", nil, "Failure preparing request") } - resp, err := client.GetSiteBackupConfigurationDeprecatedSender(req) + resp, err := client.GetSiteSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfigurationDeprecated", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSite", resp, "Failure sending request") } - result, err = client.GetSiteBackupConfigurationDeprecatedResponder(resp) + result, err = client.GetSiteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfigurationDeprecated", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSite", resp, "Failure responding to request") } return } -// GetSiteBackupConfigurationDeprecatedPreparer prepares the GetSiteBackupConfigurationDeprecated request. -func (client SitesClient) GetSiteBackupConfigurationDeprecatedPreparer(resourceGroupName string, name string) (*http.Request, error) { +// GetSitePreparer prepares the GetSite request. +func (client SitesClient) GetSitePreparer(resourceGroupName string, name string, propertiesToInclude string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, + } + if len(propertiesToInclude) > 0 { + queryParameters["propertiesToInclude"] = autorest.Encode("query", propertiesToInclude) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backup/config"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// GetSiteBackupConfigurationDeprecatedSender sends the GetSiteBackupConfigurationDeprecated request. The method will close the +// GetSiteSender sends the GetSite request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) GetSiteBackupConfigurationDeprecatedSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GetSiteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// GetSiteBackupConfigurationDeprecatedResponder handles the response to the GetSiteBackupConfigurationDeprecated request. The method always +// GetSiteResponder handles the response to the GetSite request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteBackupConfigurationDeprecatedResponder(resp *http.Response) (result BackupRequest, err error) { +func (client SitesClient) GetSiteResponder(resp *http.Response) (result Site, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// GetSiteBackupConfigurationDeprecatedSlot sends the get site backup -// configuration deprecated slot request. +// GetSiteBackupConfiguration sends the get site backup configuration request. // -// resourceGroupName is name of resource group name is name of web app slot is -// name of web app slot. If not specified then will default to production -// slot. -func (client SitesClient) GetSiteBackupConfigurationDeprecatedSlot(resourceGroupName string, name string, slot string) (result BackupRequest, ae error) { - req, err := client.GetSiteBackupConfigurationDeprecatedSlotPreparer(resourceGroupName, name, slot) +// resourceGroupName is name of resource group name is name of web app +func (client SitesClient) GetSiteBackupConfiguration(resourceGroupName string, name string) (result BackupRequest, err error) { + req, err := client.GetSiteBackupConfigurationPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfigurationDeprecatedSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupConfiguration", nil, "Failure preparing request") } - resp, err := client.GetSiteBackupConfigurationDeprecatedSlotSender(req) + resp, err := client.GetSiteBackupConfigurationSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfigurationDeprecatedSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupConfiguration", resp, "Failure sending request") } - result, err = client.GetSiteBackupConfigurationDeprecatedSlotResponder(resp) + result, err = client.GetSiteBackupConfigurationResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfigurationDeprecatedSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupConfiguration", resp, "Failure responding to request") } return } -// GetSiteBackupConfigurationDeprecatedSlotPreparer prepares the GetSiteBackupConfigurationDeprecatedSlot request. -func (client SitesClient) GetSiteBackupConfigurationDeprecatedSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { +// GetSiteBackupConfigurationPreparer prepares the GetSiteBackupConfiguration request. +func (client SitesClient) GetSiteBackupConfigurationPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), + preparer := autorest.CreatePreparer( + autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backup/config"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/backup/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// GetSiteBackupConfigurationDeprecatedSlotSender sends the GetSiteBackupConfigurationDeprecatedSlot request. The method will close the +// GetSiteBackupConfigurationSender sends the GetSiteBackupConfiguration request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) GetSiteBackupConfigurationDeprecatedSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) GetSiteBackupConfigurationSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// GetSiteBackupConfigurationDeprecatedSlotResponder handles the response to the GetSiteBackupConfigurationDeprecatedSlot request. The method always +// GetSiteBackupConfigurationResponder handles the response to the GetSiteBackupConfiguration request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteBackupConfigurationDeprecatedSlotResponder(resp *http.Response) (result BackupRequest, err error) { +func (client SitesClient) GetSiteBackupConfigurationResponder(resp *http.Response) (result BackupRequest, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3258,21 +3905,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) GetSiteBackupConfigurationSlot(resourceGroupName string, name string, slot string) (result BackupRequest, ae error) { +func (client SitesClient) GetSiteBackupConfigurationSlot(resourceGroupName string, name string, slot string) (result BackupRequest, err error) { req, err := client.GetSiteBackupConfigurationSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfigurationSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupConfigurationSlot", nil, "Failure preparing request") } resp, err := client.GetSiteBackupConfigurationSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfigurationSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupConfigurationSlot", resp, "Failure sending request") } result, err = client.GetSiteBackupConfigurationSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupConfigurationSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupConfigurationSlot", resp, "Failure responding to request") } return @@ -3281,29 +3928,28 @@ // GetSiteBackupConfigurationSlotPreparer prepares the GetSiteBackupConfigurationSlot request. func (client SitesClient) GetSiteBackupConfigurationSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/backup/list"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/backup/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteBackupConfigurationSlotSender sends the GetSiteBackupConfigurationSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteBackupConfigurationSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteBackupConfigurationSlotResponder handles the response to the GetSiteBackupConfigurationSlot request. The method always @@ -3312,7 +3958,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3323,21 +3969,21 @@ // // resourceGroupName is name of resource group name is name of web app // backupID is id of backup -func (client SitesClient) GetSiteBackupStatus(resourceGroupName string, name string, backupID string) (result BackupItem, ae error) { +func (client SitesClient) GetSiteBackupStatus(resourceGroupName string, name string, backupID string) (result BackupItem, err error) { req, err := client.GetSiteBackupStatusPreparer(resourceGroupName, name, backupID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatus", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatus", nil, "Failure preparing request") } resp, err := client.GetSiteBackupStatusSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatus", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatus", resp, "Failure sending request") } result, err = client.GetSiteBackupStatusResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatus", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatus", resp, "Failure responding to request") } return @@ -3346,29 +3992,28 @@ // GetSiteBackupStatusPreparer prepares the GetSiteBackupStatus request. func (client SitesClient) GetSiteBackupStatusPreparer(resourceGroupName string, name string, backupID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "backupId": url.QueryEscape(backupID), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "backupId": autorest.Encode("path", backupID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups/{backupId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups/{backupId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteBackupStatusSender sends the GetSiteBackupStatus request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteBackupStatusSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteBackupStatusResponder handles the response to the GetSiteBackupStatus request. The method always @@ -3377,7 +4022,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3388,21 +4033,21 @@ // // resourceGroupName is name of resource group name is name of web app // backupID is id of backup request is information on backup request -func (client SitesClient) GetSiteBackupStatusSecrets(resourceGroupName string, name string, backupID string, request BackupRequest) (result BackupItem, ae error) { +func (client SitesClient) GetSiteBackupStatusSecrets(resourceGroupName string, name string, backupID string, request BackupRequest) (result BackupItem, err error) { req, err := client.GetSiteBackupStatusSecretsPreparer(resourceGroupName, name, backupID, request) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatusSecrets", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatusSecrets", nil, "Failure preparing request") } resp, err := client.GetSiteBackupStatusSecretsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatusSecrets", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatusSecrets", resp, "Failure sending request") } result, err = client.GetSiteBackupStatusSecretsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatusSecrets", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatusSecrets", resp, "Failure responding to request") } return @@ -3411,30 +4056,30 @@ // GetSiteBackupStatusSecretsPreparer prepares the GetSiteBackupStatusSecrets request. func (client SitesClient) GetSiteBackupStatusSecretsPreparer(resourceGroupName string, name string, backupID string, request BackupRequest) (*http.Request, error) { pathParameters := map[string]interface{}{ - "backupId": url.QueryEscape(backupID), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "backupId": autorest.Encode("path", backupID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups/{backupId}/list"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups/{backupId}/list", pathParameters), autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteBackupStatusSecretsSender sends the GetSiteBackupStatusSecrets request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteBackupStatusSecretsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteBackupStatusSecretsResponder handles the response to the GetSiteBackupStatusSecrets request. The method always @@ -3443,7 +4088,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3457,21 +4102,21 @@ // backupID is id of backup request is information on backup request slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) GetSiteBackupStatusSecretsSlot(resourceGroupName string, name string, backupID string, request BackupRequest, slot string) (result BackupItem, ae error) { +func (client SitesClient) GetSiteBackupStatusSecretsSlot(resourceGroupName string, name string, backupID string, request BackupRequest, slot string) (result BackupItem, err error) { req, err := client.GetSiteBackupStatusSecretsSlotPreparer(resourceGroupName, name, backupID, request, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatusSecretsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatusSecretsSlot", nil, "Failure preparing request") } resp, err := client.GetSiteBackupStatusSecretsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatusSecretsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatusSecretsSlot", resp, "Failure sending request") } result, err = client.GetSiteBackupStatusSecretsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatusSecretsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatusSecretsSlot", resp, "Failure responding to request") } return @@ -3480,31 +4125,31 @@ // GetSiteBackupStatusSecretsSlotPreparer prepares the GetSiteBackupStatusSecretsSlot request. func (client SitesClient) GetSiteBackupStatusSecretsSlotPreparer(resourceGroupName string, name string, backupID string, request BackupRequest, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "backupId": url.QueryEscape(backupID), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "backupId": autorest.Encode("path", backupID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups/{backupId}/list"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups/{backupId}/list", pathParameters), autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteBackupStatusSecretsSlotSender sends the GetSiteBackupStatusSecretsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteBackupStatusSecretsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteBackupStatusSecretsSlotResponder handles the response to the GetSiteBackupStatusSecretsSlot request. The method always @@ -3513,7 +4158,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3525,21 +4170,21 @@ // resourceGroupName is name of resource group name is name of web app // backupID is id of backup slot is name of web app slot. If not specified // then will default to production slot. -func (client SitesClient) GetSiteBackupStatusSlot(resourceGroupName string, name string, backupID string, slot string) (result BackupItem, ae error) { +func (client SitesClient) GetSiteBackupStatusSlot(resourceGroupName string, name string, backupID string, slot string) (result BackupItem, err error) { req, err := client.GetSiteBackupStatusSlotPreparer(resourceGroupName, name, backupID, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatusSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatusSlot", nil, "Failure preparing request") } resp, err := client.GetSiteBackupStatusSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatusSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatusSlot", resp, "Failure sending request") } result, err = client.GetSiteBackupStatusSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteBackupStatusSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteBackupStatusSlot", resp, "Failure responding to request") } return @@ -3548,30 +4193,29 @@ // GetSiteBackupStatusSlotPreparer prepares the GetSiteBackupStatusSlot request. func (client SitesClient) GetSiteBackupStatusSlotPreparer(resourceGroupName string, name string, backupID string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "backupId": url.QueryEscape(backupID), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "backupId": autorest.Encode("path", backupID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups/{backupId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups/{backupId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteBackupStatusSlotSender sends the GetSiteBackupStatusSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteBackupStatusSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteBackupStatusSlotResponder handles the response to the GetSiteBackupStatusSlot request. The method always @@ -3580,7 +4224,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3590,21 +4234,21 @@ // GetSiteConfig sends the get site config request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) GetSiteConfig(resourceGroupName string, name string) (result SiteConfig, ae error) { +func (client SitesClient) GetSiteConfig(resourceGroupName string, name string) (result SiteConfig, err error) { req, err := client.GetSiteConfigPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteConfig", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteConfig", nil, "Failure preparing request") } resp, err := client.GetSiteConfigSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteConfig", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteConfig", resp, "Failure sending request") } result, err = client.GetSiteConfigResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteConfig", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteConfig", resp, "Failure responding to request") } return @@ -3613,28 +4257,27 @@ // GetSiteConfigPreparer prepares the GetSiteConfig request. func (client SitesClient) GetSiteConfigPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/web"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/web", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteConfigSender sends the GetSiteConfig request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteConfigSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteConfigResponder handles the response to the GetSiteConfig request. The method always @@ -3643,7 +4286,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3655,21 +4298,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) GetSiteConfigSlot(resourceGroupName string, name string, slot string) (result SiteConfig, ae error) { +func (client SitesClient) GetSiteConfigSlot(resourceGroupName string, name string, slot string) (result SiteConfig, err error) { req, err := client.GetSiteConfigSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteConfigSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteConfigSlot", nil, "Failure preparing request") } resp, err := client.GetSiteConfigSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteConfigSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteConfigSlot", resp, "Failure sending request") } result, err = client.GetSiteConfigSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteConfigSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteConfigSlot", resp, "Failure responding to request") } return @@ -3678,29 +4321,28 @@ // GetSiteConfigSlotPreparer prepares the GetSiteConfigSlot request. func (client SitesClient) GetSiteConfigSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/web"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/web", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteConfigSlotSender sends the GetSiteConfigSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteConfigSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteConfigSlotResponder handles the response to the GetSiteConfigSlot request. The method always @@ -3709,7 +4351,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3720,21 +4362,21 @@ // // resourceGroupName is name of resource group name is name of web app // hostName is name of host -func (client SitesClient) GetSiteHostNameBinding(resourceGroupName string, name string, hostName string) (result HostNameBinding, ae error) { +func (client SitesClient) GetSiteHostNameBinding(resourceGroupName string, name string, hostName string) (result HostNameBinding, err error) { req, err := client.GetSiteHostNameBindingPreparer(resourceGroupName, name, hostName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBinding", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBinding", nil, "Failure preparing request") } resp, err := client.GetSiteHostNameBindingSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBinding", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBinding", resp, "Failure sending request") } result, err = client.GetSiteHostNameBindingResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBinding", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBinding", resp, "Failure responding to request") } return @@ -3743,29 +4385,28 @@ // GetSiteHostNameBindingPreparer prepares the GetSiteHostNameBinding request. func (client SitesClient) GetSiteHostNameBindingPreparer(resourceGroupName string, name string, hostName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "hostName": url.QueryEscape(hostName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "hostName": autorest.Encode("path", hostName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostNameBindings/{hostName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostNameBindings/{hostName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteHostNameBindingSender sends the GetSiteHostNameBinding request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteHostNameBindingSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteHostNameBindingResponder handles the response to the GetSiteHostNameBinding request. The method always @@ -3774,7 +4415,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3784,21 +4425,21 @@ // GetSiteHostNameBindings sends the get site host name bindings request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) GetSiteHostNameBindings(resourceGroupName string, name string) (result HostNameBindingCollection, ae error) { +func (client SitesClient) GetSiteHostNameBindings(resourceGroupName string, name string) (result HostNameBindingCollection, err error) { req, err := client.GetSiteHostNameBindingsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBindings", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBindings", nil, "Failure preparing request") } resp, err := client.GetSiteHostNameBindingsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBindings", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBindings", resp, "Failure sending request") } result, err = client.GetSiteHostNameBindingsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBindings", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBindings", resp, "Failure responding to request") } return @@ -3807,28 +4448,27 @@ // GetSiteHostNameBindingsPreparer prepares the GetSiteHostNameBindings request. func (client SitesClient) GetSiteHostNameBindingsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostNameBindings"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hostNameBindings", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteHostNameBindingsSender sends the GetSiteHostNameBindings request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteHostNameBindingsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteHostNameBindingsResponder handles the response to the GetSiteHostNameBindings request. The method always @@ -3837,7 +4477,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3850,21 +4490,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. hostName is name of host -func (client SitesClient) GetSiteHostNameBindingSlot(resourceGroupName string, name string, slot string, hostName string) (result HostNameBinding, ae error) { +func (client SitesClient) GetSiteHostNameBindingSlot(resourceGroupName string, name string, slot string, hostName string) (result HostNameBinding, err error) { req, err := client.GetSiteHostNameBindingSlotPreparer(resourceGroupName, name, slot, hostName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBindingSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBindingSlot", nil, "Failure preparing request") } resp, err := client.GetSiteHostNameBindingSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBindingSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBindingSlot", resp, "Failure sending request") } result, err = client.GetSiteHostNameBindingSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBindingSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBindingSlot", resp, "Failure responding to request") } return @@ -3873,30 +4513,29 @@ // GetSiteHostNameBindingSlotPreparer prepares the GetSiteHostNameBindingSlot request. func (client SitesClient) GetSiteHostNameBindingSlotPreparer(resourceGroupName string, name string, slot string, hostName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "hostName": url.QueryEscape(hostName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "hostName": autorest.Encode("path", hostName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hostNameBindings/{hostName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hostNameBindings/{hostName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteHostNameBindingSlotSender sends the GetSiteHostNameBindingSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteHostNameBindingSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteHostNameBindingSlotResponder handles the response to the GetSiteHostNameBindingSlot request. The method always @@ -3905,7 +4544,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3918,21 +4557,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) GetSiteHostNameBindingsSlot(resourceGroupName string, name string, slot string) (result HostNameBindingCollection, ae error) { +func (client SitesClient) GetSiteHostNameBindingsSlot(resourceGroupName string, name string, slot string) (result HostNameBindingCollection, err error) { req, err := client.GetSiteHostNameBindingsSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBindingsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBindingsSlot", nil, "Failure preparing request") } resp, err := client.GetSiteHostNameBindingsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBindingsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBindingsSlot", resp, "Failure sending request") } result, err = client.GetSiteHostNameBindingsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteHostNameBindingsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteHostNameBindingsSlot", resp, "Failure responding to request") } return @@ -3941,29 +4580,28 @@ // GetSiteHostNameBindingsSlotPreparer prepares the GetSiteHostNameBindingsSlot request. func (client SitesClient) GetSiteHostNameBindingsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hostNameBindings"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hostNameBindings", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteHostNameBindingsSlotSender sends the GetSiteHostNameBindingsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteHostNameBindingsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteHostNameBindingsSlotResponder handles the response to the GetSiteHostNameBindingsSlot request. The method always @@ -3972,7 +4610,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -3982,21 +4620,21 @@ // GetSiteInstanceIdentifiers sends the get site instance identifiers request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) GetSiteInstanceIdentifiers(resourceGroupName string, name string) (result SiteInstanceCollection, ae error) { +func (client SitesClient) GetSiteInstanceIdentifiers(resourceGroupName string, name string) (result SiteInstanceCollection, err error) { req, err := client.GetSiteInstanceIdentifiersPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteInstanceIdentifiers", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteInstanceIdentifiers", nil, "Failure preparing request") } resp, err := client.GetSiteInstanceIdentifiersSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteInstanceIdentifiers", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteInstanceIdentifiers", resp, "Failure sending request") } result, err = client.GetSiteInstanceIdentifiersResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteInstanceIdentifiers", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteInstanceIdentifiers", resp, "Failure responding to request") } return @@ -4005,28 +4643,27 @@ // GetSiteInstanceIdentifiersPreparer prepares the GetSiteInstanceIdentifiers request. func (client SitesClient) GetSiteInstanceIdentifiersPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/instances"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/instances", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteInstanceIdentifiersSender sends the GetSiteInstanceIdentifiers request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteInstanceIdentifiersSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteInstanceIdentifiersResponder handles the response to the GetSiteInstanceIdentifiers request. The method always @@ -4035,7 +4672,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4048,21 +4685,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) GetSiteInstanceIdentifiersSlot(resourceGroupName string, name string, slot string) (result SiteInstanceCollection, ae error) { +func (client SitesClient) GetSiteInstanceIdentifiersSlot(resourceGroupName string, name string, slot string) (result SiteInstanceCollection, err error) { req, err := client.GetSiteInstanceIdentifiersSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteInstanceIdentifiersSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteInstanceIdentifiersSlot", nil, "Failure preparing request") } resp, err := client.GetSiteInstanceIdentifiersSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteInstanceIdentifiersSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteInstanceIdentifiersSlot", resp, "Failure sending request") } result, err = client.GetSiteInstanceIdentifiersSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteInstanceIdentifiersSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteInstanceIdentifiersSlot", resp, "Failure responding to request") } return @@ -4071,29 +4708,28 @@ // GetSiteInstanceIdentifiersSlotPreparer prepares the GetSiteInstanceIdentifiersSlot request. func (client SitesClient) GetSiteInstanceIdentifiersSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/instances"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/instances", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteInstanceIdentifiersSlotSender sends the GetSiteInstanceIdentifiersSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteInstanceIdentifiersSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteInstanceIdentifiersSlotResponder handles the response to the GetSiteInstanceIdentifiersSlot request. The method always @@ -4102,7 +4738,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4112,21 +4748,21 @@ // GetSiteLogsConfig sends the get site logs config request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) GetSiteLogsConfig(resourceGroupName string, name string) (result SiteLogsConfig, ae error) { +func (client SitesClient) GetSiteLogsConfig(resourceGroupName string, name string) (result SiteLogsConfig, err error) { req, err := client.GetSiteLogsConfigPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteLogsConfig", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteLogsConfig", nil, "Failure preparing request") } resp, err := client.GetSiteLogsConfigSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteLogsConfig", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteLogsConfig", resp, "Failure sending request") } result, err = client.GetSiteLogsConfigResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteLogsConfig", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteLogsConfig", resp, "Failure responding to request") } return @@ -4135,28 +4771,27 @@ // GetSiteLogsConfigPreparer prepares the GetSiteLogsConfig request. func (client SitesClient) GetSiteLogsConfigPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/logs"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/logs", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteLogsConfigSender sends the GetSiteLogsConfig request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteLogsConfigSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteLogsConfigResponder handles the response to the GetSiteLogsConfig request. The method always @@ -4165,7 +4800,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4177,21 +4812,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) GetSiteLogsConfigSlot(resourceGroupName string, name string, slot string) (result SiteLogsConfig, ae error) { +func (client SitesClient) GetSiteLogsConfigSlot(resourceGroupName string, name string, slot string) (result SiteLogsConfig, err error) { req, err := client.GetSiteLogsConfigSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteLogsConfigSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteLogsConfigSlot", nil, "Failure preparing request") } resp, err := client.GetSiteLogsConfigSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteLogsConfigSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteLogsConfigSlot", resp, "Failure sending request") } result, err = client.GetSiteLogsConfigSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteLogsConfigSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteLogsConfigSlot", resp, "Failure responding to request") } return @@ -4200,29 +4835,28 @@ // GetSiteLogsConfigSlotPreparer prepares the GetSiteLogsConfigSlot request. func (client SitesClient) GetSiteLogsConfigSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/logs"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/logs", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteLogsConfigSlotSender sends the GetSiteLogsConfigSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteLogsConfigSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteLogsConfigSlotResponder handles the response to the GetSiteLogsConfigSlot request. The method always @@ -4231,7 +4865,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4241,21 +4875,21 @@ // GetSiteMetricDefinitions sends the get site metric definitions request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) GetSiteMetricDefinitions(resourceGroupName string, name string) (result MetricDefinitionCollection, ae error) { +func (client SitesClient) GetSiteMetricDefinitions(resourceGroupName string, name string) (result MetricDefinitionCollection, err error) { req, err := client.GetSiteMetricDefinitionsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetricDefinitions", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetricDefinitions", nil, "Failure preparing request") } resp, err := client.GetSiteMetricDefinitionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetricDefinitions", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetricDefinitions", resp, "Failure sending request") } result, err = client.GetSiteMetricDefinitionsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetricDefinitions", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetricDefinitions", resp, "Failure responding to request") } return @@ -4264,28 +4898,27 @@ // GetSiteMetricDefinitionsPreparer prepares the GetSiteMetricDefinitions request. func (client SitesClient) GetSiteMetricDefinitionsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/metricdefinitions"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/metricdefinitions", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteMetricDefinitionsSender sends the GetSiteMetricDefinitions request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteMetricDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteMetricDefinitionsResponder handles the response to the GetSiteMetricDefinitions request. The method always @@ -4294,7 +4927,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4307,21 +4940,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) GetSiteMetricDefinitionsSlot(resourceGroupName string, name string, slot string) (result MetricDefinitionCollection, ae error) { +func (client SitesClient) GetSiteMetricDefinitionsSlot(resourceGroupName string, name string, slot string) (result MetricDefinitionCollection, err error) { req, err := client.GetSiteMetricDefinitionsSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetricDefinitionsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetricDefinitionsSlot", nil, "Failure preparing request") } resp, err := client.GetSiteMetricDefinitionsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetricDefinitionsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetricDefinitionsSlot", resp, "Failure sending request") } result, err = client.GetSiteMetricDefinitionsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetricDefinitionsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetricDefinitionsSlot", resp, "Failure responding to request") } return @@ -4330,29 +4963,28 @@ // GetSiteMetricDefinitionsSlotPreparer prepares the GetSiteMetricDefinitionsSlot request. func (client SitesClient) GetSiteMetricDefinitionsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/metricdefinitions"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/metricdefinitions", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteMetricDefinitionsSlotSender sends the GetSiteMetricDefinitionsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteMetricDefinitionsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteMetricDefinitionsSlotResponder handles the response to the GetSiteMetricDefinitionsSlot request. The method always @@ -4361,7 +4993,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4376,21 +5008,21 @@ // Example: $filter=(name.value eq 'Metric1' or name.value eq 'Metric2') and // startTime eq '2014-01-01T00:00:00Z' and endTime eq '2014-12-31T23:59:59Z' // and timeGrain eq duration'[Hour|Minute|Day]'. -func (client SitesClient) GetSiteMetrics(resourceGroupName string, name string, details *bool, filter string) (result ResourceMetricCollection, ae error) { +func (client SitesClient) GetSiteMetrics(resourceGroupName string, name string, details *bool, filter string) (result ResourceMetricCollection, err error) { req, err := client.GetSiteMetricsPreparer(resourceGroupName, name, details, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetrics", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetrics", nil, "Failure preparing request") } resp, err := client.GetSiteMetricsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetrics", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetrics", resp, "Failure sending request") } result, err = client.GetSiteMetricsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetrics", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetrics", resp, "Failure responding to request") } return @@ -4399,34 +5031,33 @@ // GetSiteMetricsPreparer prepares the GetSiteMetrics request. func (client SitesClient) GetSiteMetricsPreparer(resourceGroupName string, name string, details *bool, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if details != nil { - queryParameters["details"] = details + queryParameters["details"] = autorest.Encode("query", *details) } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/metrics"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/metrics", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteMetricsSender sends the GetSiteMetrics request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteMetricsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteMetricsResponder handles the response to the GetSiteMetrics request. The method always @@ -4435,7 +5066,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4451,21 +5082,21 @@ // odata syntax. Example: $filter=(name.value eq 'Metric1' or name.value eq // 'Metric2') and startTime eq '2014-01-01T00:00:00Z' and endTime eq // '2014-12-31T23:59:59Z' and timeGrain eq duration'[Hour|Minute|Day]'. -func (client SitesClient) GetSiteMetricsSlot(resourceGroupName string, name string, slot string, details *bool, filter string) (result ResourceMetricCollection, ae error) { +func (client SitesClient) GetSiteMetricsSlot(resourceGroupName string, name string, slot string, details *bool, filter string) (result ResourceMetricCollection, err error) { req, err := client.GetSiteMetricsSlotPreparer(resourceGroupName, name, slot, details, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetricsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetricsSlot", nil, "Failure preparing request") } resp, err := client.GetSiteMetricsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetricsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetricsSlot", resp, "Failure sending request") } result, err = client.GetSiteMetricsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteMetricsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteMetricsSlot", resp, "Failure responding to request") } return @@ -4474,35 +5105,34 @@ // GetSiteMetricsSlotPreparer prepares the GetSiteMetricsSlot request. func (client SitesClient) GetSiteMetricsSlotPreparer(resourceGroupName string, name string, slot string, details *bool, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if details != nil { - queryParameters["details"] = details + queryParameters["details"] = autorest.Encode("query", *details) } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/metrics"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/metrics", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteMetricsSlotSender sends the GetSiteMetricsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteMetricsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteMetricsSlotResponder handles the response to the GetSiteMetricsSlot request. The method always @@ -4511,7 +5141,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4522,21 +5152,21 @@ // // resourceGroupName is the resource group name name is the name of the web // app view is the type of view. This can either be "summary" or "detailed". -func (client SitesClient) GetSiteNetworkFeatures(resourceGroupName string, name string, view string) (result NetworkFeatures, ae error) { +func (client SitesClient) GetSiteNetworkFeatures(resourceGroupName string, name string, view string) (result NetworkFeatures, err error) { req, err := client.GetSiteNetworkFeaturesPreparer(resourceGroupName, name, view) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteNetworkFeatures", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteNetworkFeatures", nil, "Failure preparing request") } resp, err := client.GetSiteNetworkFeaturesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteNetworkFeatures", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteNetworkFeatures", resp, "Failure sending request") } result, err = client.GetSiteNetworkFeaturesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteNetworkFeatures", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteNetworkFeatures", resp, "Failure responding to request") } return @@ -4545,29 +5175,28 @@ // GetSiteNetworkFeaturesPreparer prepares the GetSiteNetworkFeatures request. func (client SitesClient) GetSiteNetworkFeaturesPreparer(resourceGroupName string, name string, view string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "view": url.QueryEscape(view), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "view": autorest.Encode("path", view), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/networkFeatures/{view}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/networkFeatures/{view}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteNetworkFeaturesSender sends the GetSiteNetworkFeatures request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteNetworkFeaturesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNotFound) + return autorest.SendWithSender(client, req) } // GetSiteNetworkFeaturesResponder handles the response to the GetSiteNetworkFeatures request. The method always @@ -4576,7 +5205,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4588,21 +5217,21 @@ // resourceGroupName is the resource group name name is the name of the web // app view is the type of view. This can either be "summary" or "detailed". // slot is the name of the slot for this web app. -func (client SitesClient) GetSiteNetworkFeaturesSlot(resourceGroupName string, name string, view string, slot string) (result NetworkFeatures, ae error) { +func (client SitesClient) GetSiteNetworkFeaturesSlot(resourceGroupName string, name string, view string, slot string) (result NetworkFeatures, err error) { req, err := client.GetSiteNetworkFeaturesSlotPreparer(resourceGroupName, name, view, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteNetworkFeaturesSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteNetworkFeaturesSlot", nil, "Failure preparing request") } resp, err := client.GetSiteNetworkFeaturesSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteNetworkFeaturesSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteNetworkFeaturesSlot", resp, "Failure sending request") } result, err = client.GetSiteNetworkFeaturesSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteNetworkFeaturesSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteNetworkFeaturesSlot", resp, "Failure responding to request") } return @@ -4611,30 +5240,29 @@ // GetSiteNetworkFeaturesSlotPreparer prepares the GetSiteNetworkFeaturesSlot request. func (client SitesClient) GetSiteNetworkFeaturesSlotPreparer(resourceGroupName string, name string, view string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "view": url.QueryEscape(view), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "view": autorest.Encode("path", view), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/networkFeatures/{view}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/networkFeatures/{view}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteNetworkFeaturesSlotSender sends the GetSiteNetworkFeaturesSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteNetworkFeaturesSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNotFound) + return autorest.SendWithSender(client, req) } // GetSiteNetworkFeaturesSlotResponder handles the response to the GetSiteNetworkFeaturesSlot request. The method always @@ -4643,7 +5271,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4654,21 +5282,21 @@ // // resourceGroupName is name of resource group name is name of web app // operationID is id of an operation -func (client SitesClient) GetSiteOperation(resourceGroupName string, name string, operationID string) (result ObjectSet, ae error) { +func (client SitesClient) GetSiteOperation(resourceGroupName string, name string, operationID string) (result SetObject, err error) { req, err := client.GetSiteOperationPreparer(resourceGroupName, name, operationID) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteOperation", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteOperation", nil, "Failure preparing request") } resp, err := client.GetSiteOperationSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteOperation", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteOperation", resp, "Failure sending request") } result, err = client.GetSiteOperationResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteOperation", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteOperation", resp, "Failure responding to request") } return @@ -4677,38 +5305,37 @@ // GetSiteOperationPreparer prepares the GetSiteOperation request. func (client SitesClient) GetSiteOperationPreparer(resourceGroupName string, name string, operationID string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "operationId": url.QueryEscape(operationID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/operationresults/{operationId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/operationresults/{operationId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteOperationSender sends the GetSiteOperation request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteOperationResponder handles the response to the GetSiteOperation request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteOperationResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GetSiteOperationResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4720,21 +5347,21 @@ // resourceGroupName is name of resource group name is name of web app // operationID is id of an operation slot is name of web app slot. If not // specified then will default to production slot. -func (client SitesClient) GetSiteOperationSlot(resourceGroupName string, name string, operationID string, slot string) (result ObjectSet, ae error) { +func (client SitesClient) GetSiteOperationSlot(resourceGroupName string, name string, operationID string, slot string) (result SetObject, err error) { req, err := client.GetSiteOperationSlotPreparer(resourceGroupName, name, operationID, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteOperationSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteOperationSlot", nil, "Failure preparing request") } resp, err := client.GetSiteOperationSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteOperationSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteOperationSlot", resp, "Failure sending request") } result, err = client.GetSiteOperationSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteOperationSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteOperationSlot", resp, "Failure responding to request") } return @@ -4743,39 +5370,38 @@ // GetSiteOperationSlotPreparer prepares the GetSiteOperationSlot request. func (client SitesClient) GetSiteOperationSlotPreparer(resourceGroupName string, name string, operationID string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "operationId": url.QueryEscape(operationID), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/operationresults/{operationId}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/operationresults/{operationId}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteOperationSlotSender sends the GetSiteOperationSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteOperationSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteOperationSlotResponder handles the response to the GetSiteOperationSlot request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteOperationSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GetSiteOperationSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4784,21 +5410,21 @@ // GetSitePremierAddOn sends the get site premier add on request. // -func (client SitesClient) GetSitePremierAddOn(resourceGroupName string, name string, premierAddOnName string) (result ObjectSet, ae error) { +func (client SitesClient) GetSitePremierAddOn(resourceGroupName string, name string, premierAddOnName string) (result SetObject, err error) { req, err := client.GetSitePremierAddOnPreparer(resourceGroupName, name, premierAddOnName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSitePremierAddOn", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSitePremierAddOn", nil, "Failure preparing request") } resp, err := client.GetSitePremierAddOnSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSitePremierAddOn", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSitePremierAddOn", resp, "Failure sending request") } result, err = client.GetSitePremierAddOnResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSitePremierAddOn", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSitePremierAddOn", resp, "Failure responding to request") } return @@ -4807,38 +5433,37 @@ // GetSitePremierAddOnPreparer prepares the GetSitePremierAddOn request. func (client SitesClient) GetSitePremierAddOnPreparer(resourceGroupName string, name string, premierAddOnName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "premierAddOnName": url.QueryEscape(premierAddOnName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "premierAddOnName": autorest.Encode("path", premierAddOnName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/premieraddons/{premierAddOnName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/premieraddons/{premierAddOnName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSitePremierAddOnSender sends the GetSitePremierAddOn request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSitePremierAddOnSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSitePremierAddOnResponder handles the response to the GetSitePremierAddOn request. The method always // closes the http.Response Body. -func (client SitesClient) GetSitePremierAddOnResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GetSitePremierAddOnResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4847,21 +5472,21 @@ // GetSitePremierAddOnSlot sends the get site premier add on slot request. // -func (client SitesClient) GetSitePremierAddOnSlot(resourceGroupName string, name string, premierAddOnName string, slot string) (result ObjectSet, ae error) { +func (client SitesClient) GetSitePremierAddOnSlot(resourceGroupName string, name string, premierAddOnName string, slot string) (result SetObject, err error) { req, err := client.GetSitePremierAddOnSlotPreparer(resourceGroupName, name, premierAddOnName, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSitePremierAddOnSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSitePremierAddOnSlot", nil, "Failure preparing request") } resp, err := client.GetSitePremierAddOnSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSitePremierAddOnSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSitePremierAddOnSlot", resp, "Failure sending request") } result, err = client.GetSitePremierAddOnSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSitePremierAddOnSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSitePremierAddOnSlot", resp, "Failure responding to request") } return @@ -4870,39 +5495,38 @@ // GetSitePremierAddOnSlotPreparer prepares the GetSitePremierAddOnSlot request. func (client SitesClient) GetSitePremierAddOnSlotPreparer(resourceGroupName string, name string, premierAddOnName string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "premierAddOnName": url.QueryEscape(premierAddOnName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "premierAddOnName": autorest.Encode("path", premierAddOnName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/premieraddons/{premierAddOnName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/premieraddons/{premierAddOnName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSitePremierAddOnSlotSender sends the GetSitePremierAddOnSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSitePremierAddOnSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSitePremierAddOnSlotResponder handles the response to the GetSitePremierAddOnSlot request. The method always // closes the http.Response Body. -func (client SitesClient) GetSitePremierAddOnSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GetSitePremierAddOnSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4914,21 +5538,21 @@ // // resourceGroupName is the resource group name name is the name of the web // app entityName is the name by which the Hybrid Connection is identified -func (client SitesClient) GetSiteRelayServiceConnection(resourceGroupName string, name string, entityName string) (result RelayServiceConnectionEntity, ae error) { +func (client SitesClient) GetSiteRelayServiceConnection(resourceGroupName string, name string, entityName string) (result RelayServiceConnectionEntity, err error) { req, err := client.GetSiteRelayServiceConnectionPreparer(resourceGroupName, name, entityName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteRelayServiceConnection", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteRelayServiceConnection", nil, "Failure preparing request") } resp, err := client.GetSiteRelayServiceConnectionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteRelayServiceConnection", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteRelayServiceConnection", resp, "Failure sending request") } result, err = client.GetSiteRelayServiceConnectionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteRelayServiceConnection", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteRelayServiceConnection", resp, "Failure responding to request") } return @@ -4937,29 +5561,28 @@ // GetSiteRelayServiceConnectionPreparer prepares the GetSiteRelayServiceConnection request. func (client SitesClient) GetSiteRelayServiceConnectionPreparer(resourceGroupName string, name string, entityName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "entityName": url.QueryEscape(entityName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "entityName": autorest.Encode("path", entityName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hybridconnection/{entityName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hybridconnection/{entityName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteRelayServiceConnectionSender sends the GetSiteRelayServiceConnection request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteRelayServiceConnectionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteRelayServiceConnectionResponder handles the response to the GetSiteRelayServiceConnection request. The method always @@ -4968,7 +5591,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -4981,21 +5604,21 @@ // resourceGroupName is the resource group name name is the name of the web // app entityName is the name by which the Hybrid Connection is identified // slot is the name of the slot for the web app. -func (client SitesClient) GetSiteRelayServiceConnectionSlot(resourceGroupName string, name string, entityName string, slot string) (result RelayServiceConnectionEntity, ae error) { +func (client SitesClient) GetSiteRelayServiceConnectionSlot(resourceGroupName string, name string, entityName string, slot string) (result RelayServiceConnectionEntity, err error) { req, err := client.GetSiteRelayServiceConnectionSlotPreparer(resourceGroupName, name, entityName, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteRelayServiceConnectionSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteRelayServiceConnectionSlot", nil, "Failure preparing request") } resp, err := client.GetSiteRelayServiceConnectionSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteRelayServiceConnectionSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteRelayServiceConnectionSlot", resp, "Failure sending request") } result, err = client.GetSiteRelayServiceConnectionSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteRelayServiceConnectionSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteRelayServiceConnectionSlot", resp, "Failure responding to request") } return @@ -5004,30 +5627,29 @@ // GetSiteRelayServiceConnectionSlotPreparer prepares the GetSiteRelayServiceConnectionSlot request. func (client SitesClient) GetSiteRelayServiceConnectionSlotPreparer(resourceGroupName string, name string, entityName string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "entityName": url.QueryEscape(entityName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "entityName": autorest.Encode("path", entityName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hybridconnection/{entityName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hybridconnection/{entityName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteRelayServiceConnectionSlotSender sends the GetSiteRelayServiceConnectionSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteRelayServiceConnectionSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteRelayServiceConnectionSlotResponder handles the response to the GetSiteRelayServiceConnectionSlot request. The method always @@ -5036,7 +5658,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5049,21 +5671,21 @@ // additional web app properties included in the response includeSiteTypes is // types of apps included in the response includeSlots is whether or not to // include deployments slots in results -func (client SitesClient) GetSites(resourceGroupName string, propertiesToInclude string, includeSiteTypes string, includeSlots *bool) (result SiteCollection, ae error) { +func (client SitesClient) GetSites(resourceGroupName string, propertiesToInclude string, includeSiteTypes string, includeSlots *bool) (result SiteCollection, err error) { req, err := client.GetSitesPreparer(resourceGroupName, propertiesToInclude, includeSiteTypes, includeSlots) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSites", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSites", nil, "Failure preparing request") } resp, err := client.GetSitesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSites", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSites", resp, "Failure sending request") } result, err = client.GetSitesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSites", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSites", resp, "Failure responding to request") } return @@ -5072,36 +5694,35 @@ // GetSitesPreparer prepares the GetSites request. func (client SitesClient) GetSitesPreparer(resourceGroupName string, propertiesToInclude string, includeSiteTypes string, includeSlots *bool) (*http.Request, error) { pathParameters := map[string]interface{}{ - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(propertiesToInclude) > 0 { - queryParameters["propertiesToInclude"] = propertiesToInclude + queryParameters["propertiesToInclude"] = autorest.Encode("query", propertiesToInclude) } if len(includeSiteTypes) > 0 { - queryParameters["includeSiteTypes"] = includeSiteTypes + queryParameters["includeSiteTypes"] = autorest.Encode("query", includeSiteTypes) } if includeSlots != nil { - queryParameters["includeSlots"] = includeSlots + queryParameters["includeSlots"] = autorest.Encode("query", *includeSlots) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSitesSender sends the GetSites request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSitesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSitesResponder handles the response to the GetSites request. The method always @@ -5110,7 +5731,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5123,21 +5744,21 @@ // name of web app slot. If not specified then will default to production // slot. propertiesToInclude is additional web app properties included in the // response -func (client SitesClient) GetSiteSlot(resourceGroupName string, name string, slot string, propertiesToInclude string) (result Site, ae error) { +func (client SitesClient) GetSiteSlot(resourceGroupName string, name string, slot string, propertiesToInclude string) (result Site, err error) { req, err := client.GetSiteSlotPreparer(resourceGroupName, name, slot, propertiesToInclude) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSlot", nil, "Failure preparing request") } resp, err := client.GetSiteSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSlot", resp, "Failure sending request") } result, err = client.GetSiteSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSlot", resp, "Failure responding to request") } return @@ -5146,32 +5767,31 @@ // GetSiteSlotPreparer prepares the GetSiteSlot request. func (client SitesClient) GetSiteSlotPreparer(resourceGroupName string, name string, slot string, propertiesToInclude string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(propertiesToInclude) > 0 { - queryParameters["propertiesToInclude"] = propertiesToInclude + queryParameters["propertiesToInclude"] = autorest.Encode("query", propertiesToInclude) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteSlotSender sends the GetSiteSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteSlotResponder handles the response to the GetSiteSlot request. The method always @@ -5180,7 +5800,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5191,21 +5811,21 @@ // // resourceGroupName is name of resource group name is name of web app // propertiesToInclude is list of app properties to include in the response -func (client SitesClient) GetSiteSlots(resourceGroupName string, name string, propertiesToInclude string) (result SiteCollection, ae error) { +func (client SitesClient) GetSiteSlots(resourceGroupName string, name string, propertiesToInclude string) (result SiteCollection, err error) { req, err := client.GetSiteSlotsPreparer(resourceGroupName, name, propertiesToInclude) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSlots", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSlots", nil, "Failure preparing request") } resp, err := client.GetSiteSlotsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSlots", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSlots", resp, "Failure sending request") } result, err = client.GetSiteSlotsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSlots", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSlots", resp, "Failure responding to request") } return @@ -5214,31 +5834,30 @@ // GetSiteSlotsPreparer prepares the GetSiteSlots request. func (client SitesClient) GetSiteSlotsPreparer(resourceGroupName string, name string, propertiesToInclude string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(propertiesToInclude) > 0 { - queryParameters["propertiesToInclude"] = propertiesToInclude + queryParameters["propertiesToInclude"] = autorest.Encode("query", propertiesToInclude) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteSlotsSender sends the GetSiteSlots request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteSlotsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteSlotsResponder handles the response to the GetSiteSlots request. The method always @@ -5247,7 +5866,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5256,197 +5875,60 @@ // GetSiteSnapshots sends the get site snapshots request. // -// subscriptionName is azure subscription webspaceName is webspace name is -// website Name -func (client SitesClient) GetSiteSnapshots(subscriptionName string, webspaceName string, name string, resourceGroupName string) (result ObjectSet, ae error) { - req, err := client.GetSiteSnapshotsPreparer(subscriptionName, webspaceName, name, resourceGroupName) +// resourceGroupName is webspace name is website Name +func (client SitesClient) GetSiteSnapshots(resourceGroupName string, name string) (result SetObject, err error) { + req, err := client.GetSiteSnapshotsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshots", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSnapshots", nil, "Failure preparing request") } resp, err := client.GetSiteSnapshotsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshots", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSnapshots", resp, "Failure sending request") } result, err = client.GetSiteSnapshotsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshots", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSnapshots", resp, "Failure responding to request") } return } // GetSiteSnapshotsPreparer prepares the GetSiteSnapshots request. -func (client SitesClient) GetSiteSnapshotsPreparer(subscriptionName string, webspaceName string, name string, resourceGroupName string) (*http.Request, error) { +func (client SitesClient) GetSiteSnapshotsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, - "subscriptionName": subscriptionName, - "webspaceName": webspaceName, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/snapshots"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/snapshots", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteSnapshotsSender sends the GetSiteSnapshots request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteSnapshotsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteSnapshotsResponder handles the response to the GetSiteSnapshots request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteSnapshotsResponder(resp *http.Response) (result ObjectSet, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result.Value), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetSiteSnapshotsOnSku sends the get site snapshots on sku request. -// -// subscriptionName is azure subscription webspaceName is webspace name is -// website Name -func (client SitesClient) GetSiteSnapshotsOnSku(subscriptionName string, webspaceName string, name string, resourceGroupName string) (result ObjectSet, ae error) { - req, err := client.GetSiteSnapshotsOnSkuPreparer(subscriptionName, webspaceName, name, resourceGroupName) - if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshotsOnSku", "Failure preparing request") - } - - resp, err := client.GetSiteSnapshotsOnSkuSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshotsOnSku", "Failure sending request") - } - - result, err = client.GetSiteSnapshotsOnSkuResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshotsOnSku", "Failure responding to request") - } - - return -} - -// GetSiteSnapshotsOnSkuPreparer prepares the GetSiteSnapshotsOnSku request. -func (client SitesClient) GetSiteSnapshotsOnSkuPreparer(subscriptionName string, webspaceName string, name string, resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - "subscriptionName": subscriptionName, - "webspaceName": webspaceName, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/restorableSnapshots"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSiteSnapshotsOnSkuSender sends the GetSiteSnapshotsOnSku request. The method will close the -// http.Response Body if it receives an error. -func (client SitesClient) GetSiteSnapshotsOnSkuSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetSiteSnapshotsOnSkuResponder handles the response to the GetSiteSnapshotsOnSku request. The method always -// closes the http.Response Body. -func (client SitesClient) GetSiteSnapshotsOnSkuResponder(resp *http.Response) (result ObjectSet, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result.Value), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetSiteSnapshotsOnSkuSlot sends the get site snapshots on sku slot request. -// -// subscriptionName is azure subscription webspaceName is webspace name is -// website Name -func (client SitesClient) GetSiteSnapshotsOnSkuSlot(subscriptionName string, webspaceName string, name string, resourceGroupName string, slot string) (result ObjectSet, ae error) { - req, err := client.GetSiteSnapshotsOnSkuSlotPreparer(subscriptionName, webspaceName, name, resourceGroupName, slot) - if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshotsOnSkuSlot", "Failure preparing request") - } - - resp, err := client.GetSiteSnapshotsOnSkuSlotSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshotsOnSkuSlot", "Failure sending request") - } - - result, err = client.GetSiteSnapshotsOnSkuSlotResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshotsOnSkuSlot", "Failure responding to request") - } - - return -} - -// GetSiteSnapshotsOnSkuSlotPreparer prepares the GetSiteSnapshotsOnSkuSlot request. -func (client SitesClient) GetSiteSnapshotsOnSkuSlotPreparer(subscriptionName string, webspaceName string, name string, resourceGroupName string, slot string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - "subscriptionName": subscriptionName, - "webspaceName": webspaceName, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/restorableSnapshots"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// GetSiteSnapshotsOnSkuSlotSender sends the GetSiteSnapshotsOnSkuSlot request. The method will close the -// http.Response Body if it receives an error. -func (client SitesClient) GetSiteSnapshotsOnSkuSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// GetSiteSnapshotsOnSkuSlotResponder handles the response to the GetSiteSnapshotsOnSkuSlot request. The method always -// closes the http.Response Body. -func (client SitesClient) GetSiteSnapshotsOnSkuSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GetSiteSnapshotsResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5455,65 +5937,61 @@ // GetSiteSnapshotsSlot sends the get site snapshots slot request. // -// subscriptionName is azure subscription webspaceName is webspace name is -// website Name -func (client SitesClient) GetSiteSnapshotsSlot(subscriptionName string, webspaceName string, name string, resourceGroupName string, slot string) (result ObjectSet, ae error) { - req, err := client.GetSiteSnapshotsSlotPreparer(subscriptionName, webspaceName, name, resourceGroupName, slot) +// resourceGroupName is webspace name is website Name slot is website Slot +func (client SitesClient) GetSiteSnapshotsSlot(resourceGroupName string, name string, slot string) (result SetObject, err error) { + req, err := client.GetSiteSnapshotsSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshotsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSnapshotsSlot", nil, "Failure preparing request") } resp, err := client.GetSiteSnapshotsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshotsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSnapshotsSlot", resp, "Failure sending request") } result, err = client.GetSiteSnapshotsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSnapshotsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSnapshotsSlot", resp, "Failure responding to request") } return } // GetSiteSnapshotsSlotPreparer prepares the GetSiteSnapshotsSlot request. -func (client SitesClient) GetSiteSnapshotsSlotPreparer(subscriptionName string, webspaceName string, name string, resourceGroupName string, slot string) (*http.Request, error) { +func (client SitesClient) GetSiteSnapshotsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, - "subscriptionName": subscriptionName, - "webspaceName": webspaceName, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/snapshots"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/snapshots", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteSnapshotsSlotSender sends the GetSiteSnapshotsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteSnapshotsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteSnapshotsSlotResponder handles the response to the GetSiteSnapshotsSlot request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteSnapshotsSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GetSiteSnapshotsSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5523,21 +6001,21 @@ // GetSiteSourceControl sends the get site source control request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) GetSiteSourceControl(resourceGroupName string, name string) (result SiteSourceControl, ae error) { +func (client SitesClient) GetSiteSourceControl(resourceGroupName string, name string) (result SiteSourceControl, err error) { req, err := client.GetSiteSourceControlPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSourceControl", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSourceControl", nil, "Failure preparing request") } resp, err := client.GetSiteSourceControlSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSourceControl", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSourceControl", resp, "Failure sending request") } result, err = client.GetSiteSourceControlResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSourceControl", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSourceControl", resp, "Failure responding to request") } return @@ -5546,28 +6024,27 @@ // GetSiteSourceControlPreparer prepares the GetSiteSourceControl request. func (client SitesClient) GetSiteSourceControlPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/sourcecontrols/web"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/sourcecontrols/web", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteSourceControlSender sends the GetSiteSourceControl request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteSourceControlSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteSourceControlResponder handles the response to the GetSiteSourceControl request. The method always @@ -5576,7 +6053,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5588,21 +6065,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) GetSiteSourceControlSlot(resourceGroupName string, name string, slot string) (result SiteSourceControl, ae error) { +func (client SitesClient) GetSiteSourceControlSlot(resourceGroupName string, name string, slot string) (result SiteSourceControl, err error) { req, err := client.GetSiteSourceControlSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSourceControlSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSourceControlSlot", nil, "Failure preparing request") } resp, err := client.GetSiteSourceControlSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSourceControlSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSourceControlSlot", resp, "Failure sending request") } result, err = client.GetSiteSourceControlSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteSourceControlSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteSourceControlSlot", resp, "Failure responding to request") } return @@ -5611,29 +6088,28 @@ // GetSiteSourceControlSlotPreparer prepares the GetSiteSourceControlSlot request. func (client SitesClient) GetSiteSourceControlSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/sourcecontrols/web"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/sourcecontrols/web", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteSourceControlSlotSender sends the GetSiteSourceControlSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteSourceControlSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteSourceControlSlotResponder handles the response to the GetSiteSourceControlSlot request. The method always @@ -5642,7 +6118,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5657,21 +6133,21 @@ // name.value eq 'Metric2') and startTime eq '2014-01-01T00:00:00Z' and // endTime eq '2014-12-31T23:59:59Z' and timeGrain eq // duration'[Hour|Minute|Day]'. -func (client SitesClient) GetSiteUsages(resourceGroupName string, name string, filter string) (result CsmUsageQuotaCollection, ae error) { +func (client SitesClient) GetSiteUsages(resourceGroupName string, name string, filter string) (result CsmUsageQuotaCollection, err error) { req, err := client.GetSiteUsagesPreparer(resourceGroupName, name, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteUsages", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteUsages", nil, "Failure preparing request") } resp, err := client.GetSiteUsagesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteUsages", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteUsages", resp, "Failure sending request") } result, err = client.GetSiteUsagesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteUsages", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteUsages", resp, "Failure responding to request") } return @@ -5680,31 +6156,30 @@ // GetSiteUsagesPreparer prepares the GetSiteUsages request. func (client SitesClient) GetSiteUsagesPreparer(resourceGroupName string, name string, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/usages"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/usages", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteUsagesSender sends the GetSiteUsages request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteUsagesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteUsagesResponder handles the response to the GetSiteUsages request. The method always @@ -5713,7 +6188,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5729,21 +6204,21 @@ // or name.value eq 'Metric2') and startTime eq '2014-01-01T00:00:00Z' and // endTime eq '2014-12-31T23:59:59Z' and timeGrain eq // duration'[Hour|Minute|Day]'. -func (client SitesClient) GetSiteUsagesSlot(resourceGroupName string, name string, slot string, filter string) (result CsmUsageQuotaCollection, ae error) { +func (client SitesClient) GetSiteUsagesSlot(resourceGroupName string, name string, slot string, filter string) (result CsmUsageQuotaCollection, err error) { req, err := client.GetSiteUsagesSlotPreparer(resourceGroupName, name, slot, filter) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteUsagesSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteUsagesSlot", nil, "Failure preparing request") } resp, err := client.GetSiteUsagesSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteUsagesSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteUsagesSlot", resp, "Failure sending request") } result, err = client.GetSiteUsagesSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteUsagesSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteUsagesSlot", resp, "Failure responding to request") } return @@ -5752,32 +6227,31 @@ // GetSiteUsagesSlotPreparer prepares the GetSiteUsagesSlot request. func (client SitesClient) GetSiteUsagesSlotPreparer(resourceGroupName string, name string, slot string, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if len(filter) > 0 { - queryParameters["$filter"] = filter + queryParameters["$filter"] = autorest.Encode("query", filter) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/usages"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/usages", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteUsagesSlotSender sends the GetSiteUsagesSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteUsagesSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteUsagesSlotResponder handles the response to the GetSiteUsagesSlot request. The method always @@ -5786,7 +6260,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5797,21 +6271,21 @@ // // resourceGroupName is the resource group name name is the name of the web // app vnetName is the name of the Virtual Network -func (client SitesClient) GetSiteVNETConnection(resourceGroupName string, name string, vnetName string) (result VnetInfo, ae error) { +func (client SitesClient) GetSiteVNETConnection(resourceGroupName string, name string, vnetName string) (result VnetInfo, err error) { req, err := client.GetSiteVNETConnectionPreparer(resourceGroupName, name, vnetName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnection", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnection", nil, "Failure preparing request") } resp, err := client.GetSiteVNETConnectionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnection", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnection", resp, "Failure sending request") } result, err = client.GetSiteVNETConnectionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnection", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnection", resp, "Failure responding to request") } return @@ -5820,29 +6294,28 @@ // GetSiteVNETConnectionPreparer prepares the GetSiteVNETConnection request. func (client SitesClient) GetSiteVNETConnectionPreparer(resourceGroupName string, name string, vnetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteVNETConnectionSender sends the GetSiteVNETConnection request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteVNETConnectionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteVNETConnectionResponder handles the response to the GetSiteVNETConnection request. The method always @@ -5851,7 +6324,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5862,21 +6335,21 @@ // // resourceGroupName is the resource group name name is the name of the web // app -func (client SitesClient) GetSiteVNETConnections(resourceGroupName string, name string) (result VnetInfoList, ae error) { +func (client SitesClient) GetSiteVNETConnections(resourceGroupName string, name string) (result ListVnetInfo, err error) { req, err := client.GetSiteVNETConnectionsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnections", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnections", nil, "Failure preparing request") } resp, err := client.GetSiteVNETConnectionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnections", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnections", resp, "Failure sending request") } result, err = client.GetSiteVNETConnectionsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnections", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnections", resp, "Failure responding to request") } return @@ -5885,37 +6358,36 @@ // GetSiteVNETConnectionsPreparer prepares the GetSiteVNETConnections request. func (client SitesClient) GetSiteVNETConnectionsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteVNETConnectionsSender sends the GetSiteVNETConnections request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteVNETConnectionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteVNETConnectionsResponder handles the response to the GetSiteVNETConnections request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteVNETConnectionsResponder(resp *http.Response) (result VnetInfoList, err error) { +func (client SitesClient) GetSiteVNETConnectionsResponder(resp *http.Response) (result ListVnetInfo, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5927,21 +6399,21 @@ // resourceGroupName is the resource group name name is the name of the web // app vnetName is the name of the Virtual Network slot is the name of the // slot for this web app. -func (client SitesClient) GetSiteVNETConnectionSlot(resourceGroupName string, name string, vnetName string, slot string) (result VnetInfo, ae error) { +func (client SitesClient) GetSiteVNETConnectionSlot(resourceGroupName string, name string, vnetName string, slot string) (result VnetInfo, err error) { req, err := client.GetSiteVNETConnectionSlotPreparer(resourceGroupName, name, vnetName, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnectionSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnectionSlot", nil, "Failure preparing request") } resp, err := client.GetSiteVNETConnectionSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnectionSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnectionSlot", resp, "Failure sending request") } result, err = client.GetSiteVNETConnectionSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnectionSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnectionSlot", resp, "Failure responding to request") } return @@ -5950,30 +6422,29 @@ // GetSiteVNETConnectionSlotPreparer prepares the GetSiteVNETConnectionSlot request. func (client SitesClient) GetSiteVNETConnectionSlotPreparer(resourceGroupName string, name string, vnetName string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteVNETConnectionSlotSender sends the GetSiteVNETConnectionSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteVNETConnectionSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteVNETConnectionSlotResponder handles the response to the GetSiteVNETConnectionSlot request. The method always @@ -5982,7 +6453,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -5993,21 +6464,21 @@ // // resourceGroupName is the resource group name name is the name of the web // app slot is the name of the slot for this web app. -func (client SitesClient) GetSiteVNETConnectionsSlot(resourceGroupName string, name string, slot string) (result VnetInfoList, ae error) { +func (client SitesClient) GetSiteVNETConnectionsSlot(resourceGroupName string, name string, slot string) (result ListVnetInfo, err error) { req, err := client.GetSiteVNETConnectionsSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnectionsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnectionsSlot", nil, "Failure preparing request") } resp, err := client.GetSiteVNETConnectionsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnectionsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnectionsSlot", resp, "Failure sending request") } result, err = client.GetSiteVNETConnectionsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVNETConnectionsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVNETConnectionsSlot", resp, "Failure responding to request") } return @@ -6016,38 +6487,37 @@ // GetSiteVNETConnectionsSlotPreparer prepares the GetSiteVNETConnectionsSlot request. func (client SitesClient) GetSiteVNETConnectionsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteVNETConnectionsSlotSender sends the GetSiteVNETConnectionsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteVNETConnectionsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSiteVNETConnectionsSlotResponder handles the response to the GetSiteVNETConnectionsSlot request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteVNETConnectionsSlotResponder(resp *http.Response) (result VnetInfoList, err error) { +func (client SitesClient) GetSiteVNETConnectionsSlotResponder(resp *http.Response) (result ListVnetInfo, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -6059,21 +6529,21 @@ // resourceGroupName is the resource group name name is the name of the web // app vnetName is the name of the Virtual Network gatewayName is the name of // the gateway. The only gateway that exists presently is "primary" -func (client SitesClient) GetSiteVnetGateway(resourceGroupName string, name string, vnetName string, gatewayName string) (result ObjectSet, ae error) { +func (client SitesClient) GetSiteVnetGateway(resourceGroupName string, name string, vnetName string, gatewayName string) (result SetObject, err error) { req, err := client.GetSiteVnetGatewayPreparer(resourceGroupName, name, vnetName, gatewayName) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVnetGateway", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVnetGateway", nil, "Failure preparing request") } resp, err := client.GetSiteVnetGatewaySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVnetGateway", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVnetGateway", resp, "Failure sending request") } result, err = client.GetSiteVnetGatewayResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVnetGateway", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVnetGateway", resp, "Failure responding to request") } return @@ -6082,39 +6552,38 @@ // GetSiteVnetGatewayPreparer prepares the GetSiteVnetGateway request. func (client SitesClient) GetSiteVnetGatewayPreparer(resourceGroupName string, name string, vnetName string, gatewayName string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "gatewayName": url.QueryEscape(gatewayName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "gatewayName": autorest.Encode("path", gatewayName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteVnetGatewaySender sends the GetSiteVnetGateway request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteVnetGatewaySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNotFound) + return autorest.SendWithSender(client, req) } // GetSiteVnetGatewayResponder handles the response to the GetSiteVnetGateway request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteVnetGatewayResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GetSiteVnetGatewayResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -6127,21 +6596,21 @@ // app vnetName is the name of the Virtual Network gatewayName is the name of // the gateway. The only gateway that exists presently is "primary" slot is // the name of the slot for this web app. -func (client SitesClient) GetSiteVnetGatewaySlot(resourceGroupName string, name string, vnetName string, gatewayName string, slot string) (result ObjectSet, ae error) { +func (client SitesClient) GetSiteVnetGatewaySlot(resourceGroupName string, name string, vnetName string, gatewayName string, slot string) (result SetObject, err error) { req, err := client.GetSiteVnetGatewaySlotPreparer(resourceGroupName, name, vnetName, gatewayName, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVnetGatewaySlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVnetGatewaySlot", nil, "Failure preparing request") } resp, err := client.GetSiteVnetGatewaySlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVnetGatewaySlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVnetGatewaySlot", resp, "Failure sending request") } result, err = client.GetSiteVnetGatewaySlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSiteVnetGatewaySlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSiteVnetGatewaySlot", resp, "Failure responding to request") } return @@ -6150,40 +6619,39 @@ // GetSiteVnetGatewaySlotPreparer prepares the GetSiteVnetGatewaySlot request. func (client SitesClient) GetSiteVnetGatewaySlotPreparer(resourceGroupName string, name string, vnetName string, gatewayName string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "gatewayName": url.QueryEscape(gatewayName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "gatewayName": autorest.Encode("path", gatewayName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSiteVnetGatewaySlotSender sends the GetSiteVnetGatewaySlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSiteVnetGatewaySlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusNotFound) + return autorest.SendWithSender(client, req) } // GetSiteVnetGatewaySlotResponder handles the response to the GetSiteVnetGatewaySlot request. The method always // closes the http.Response Body. -func (client SitesClient) GetSiteVnetGatewaySlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) GetSiteVnetGatewaySlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -6193,21 +6661,21 @@ // GetSlotConfigNames sends the get slot config names request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) GetSlotConfigNames(resourceGroupName string, name string) (result SlotConfigNamesResource, ae error) { +func (client SitesClient) GetSlotConfigNames(resourceGroupName string, name string) (result SlotConfigNamesResource, err error) { req, err := client.GetSlotConfigNamesPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSlotConfigNames", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSlotConfigNames", nil, "Failure preparing request") } resp, err := client.GetSlotConfigNamesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSlotConfigNames", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSlotConfigNames", resp, "Failure sending request") } result, err = client.GetSlotConfigNamesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSlotConfigNames", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSlotConfigNames", resp, "Failure responding to request") } return @@ -6216,28 +6684,27 @@ // GetSlotConfigNamesPreparer prepares the GetSlotConfigNames request. func (client SitesClient) GetSlotConfigNamesPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/slotConfigNames"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/slotConfigNames", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSlotConfigNamesSender sends the GetSlotConfigNames request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSlotConfigNamesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSlotConfigNamesResponder handles the response to the GetSlotConfigNames request. The method always @@ -6246,7 +6713,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -6258,21 +6725,21 @@ // // resourceGroupName is name of resource group name is name of web app // slotSwapEntity is request body that contains the target slot name -func (client SitesClient) GetSlotsDifferencesFromProduction(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity) (result SlotDifferenceCollection, ae error) { +func (client SitesClient) GetSlotsDifferencesFromProduction(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity) (result SlotDifferenceCollection, err error) { req, err := client.GetSlotsDifferencesFromProductionPreparer(resourceGroupName, name, slotSwapEntity) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSlotsDifferencesFromProduction", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSlotsDifferencesFromProduction", nil, "Failure preparing request") } resp, err := client.GetSlotsDifferencesFromProductionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSlotsDifferencesFromProduction", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSlotsDifferencesFromProduction", resp, "Failure sending request") } result, err = client.GetSlotsDifferencesFromProductionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSlotsDifferencesFromProduction", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSlotsDifferencesFromProduction", resp, "Failure responding to request") } return @@ -6281,29 +6748,29 @@ // GetSlotsDifferencesFromProductionPreparer prepares the GetSlotsDifferencesFromProduction request. func (client SitesClient) GetSlotsDifferencesFromProductionPreparer(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slotsdiffs"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slotsdiffs", pathParameters), autorest.WithJSON(slotSwapEntity), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSlotsDifferencesFromProductionSender sends the GetSlotsDifferencesFromProduction request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSlotsDifferencesFromProductionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSlotsDifferencesFromProductionResponder handles the response to the GetSlotsDifferencesFromProduction request. The method always @@ -6312,7 +6779,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -6324,21 +6791,21 @@ // resourceGroupName is name of resource group name is name of web app // slotSwapEntity is request body that contains the target slot name slot is // name of the source slot -func (client SitesClient) GetSlotsDifferencesSlot(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, slot string) (result SlotDifferenceCollection, ae error) { +func (client SitesClient) GetSlotsDifferencesSlot(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, slot string) (result SlotDifferenceCollection, err error) { req, err := client.GetSlotsDifferencesSlotPreparer(resourceGroupName, name, slotSwapEntity, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSlotsDifferencesSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSlotsDifferencesSlot", nil, "Failure preparing request") } resp, err := client.GetSlotsDifferencesSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "GetSlotsDifferencesSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "GetSlotsDifferencesSlot", resp, "Failure sending request") } result, err = client.GetSlotsDifferencesSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "GetSlotsDifferencesSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "GetSlotsDifferencesSlot", resp, "Failure responding to request") } return @@ -6347,30 +6814,30 @@ // GetSlotsDifferencesSlotPreparer prepares the GetSlotsDifferencesSlot request. func (client SitesClient) GetSlotsDifferencesSlotPreparer(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/slotsdiffs"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/slotsdiffs", pathParameters), autorest.WithJSON(slotSwapEntity), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetSlotsDifferencesSlotSender sends the GetSlotsDifferencesSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) GetSlotsDifferencesSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetSlotsDifferencesSlotResponder handles the response to the GetSlotsDifferencesSlot request. The method always @@ -6379,589 +6846,450 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListSiteAppSettings sends the list site app settings request. -// -// resourceGroupName is name of resource group name is name of web app -func (client SitesClient) ListSiteAppSettings(resourceGroupName string, name string) (result StringDictionary, ae error) { - req, err := client.ListSiteAppSettingsPreparer(resourceGroupName, name) - if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAppSettings", "Failure preparing request") - } - - resp, err := client.ListSiteAppSettingsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAppSettings", "Failure sending request") - } - - result, err = client.ListSiteAppSettingsResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAppSettings", "Failure responding to request") - } - - return -} - -// ListSiteAppSettingsPreparer prepares the ListSiteAppSettings request. -func (client SitesClient) ListSiteAppSettingsPreparer(resourceGroupName string, name string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/appsettings/list"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSiteAppSettingsSender sends the ListSiteAppSettings request. The method will close the -// http.Response Body if it receives an error. -func (client SitesClient) ListSiteAppSettingsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListSiteAppSettingsResponder handles the response to the ListSiteAppSettings request. The method always -// closes the http.Response Body. -func (client SitesClient) ListSiteAppSettingsResponder(resp *http.Response) (result StringDictionary, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListSiteAppSettingsSlot sends the list site app settings slot request. -// -// resourceGroupName is name of resource group name is name of web app slot is -// name of web app slot. If not specified then will default to production -// slot. -func (client SitesClient) ListSiteAppSettingsSlot(resourceGroupName string, name string, slot string) (result StringDictionary, ae error) { - req, err := client.ListSiteAppSettingsSlotPreparer(resourceGroupName, name, slot) - if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAppSettingsSlot", "Failure preparing request") - } - - resp, err := client.ListSiteAppSettingsSlotSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAppSettingsSlot", "Failure sending request") - } - - result, err = client.ListSiteAppSettingsSlotResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAppSettingsSlot", "Failure responding to request") - } - - return -} - -// ListSiteAppSettingsSlotPreparer prepares the ListSiteAppSettingsSlot request. -func (client SitesClient) ListSiteAppSettingsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/appsettings/list"), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// ListSiteAppSettingsSlotSender sends the ListSiteAppSettingsSlot request. The method will close the -// http.Response Body if it receives an error. -func (client SitesClient) ListSiteAppSettingsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// ListSiteAppSettingsSlotResponder handles the response to the ListSiteAppSettingsSlot request. The method always -// closes the http.Response Body. -func (client SitesClient) ListSiteAppSettingsSlotResponder(resp *http.Response) (result StringDictionary, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListSiteAuthSettings sends the list site auth settings request. +// IsSiteCloneable sends the is site cloneable request. // -// resourceGroupName is name of resource group name is name of web app -func (client SitesClient) ListSiteAuthSettings(resourceGroupName string, name string) (result SiteAuthSettings, ae error) { - req, err := client.ListSiteAuthSettingsPreparer(resourceGroupName, name) +// resourceGroupName is name of the resource group name is name of the web app +func (client SitesClient) IsSiteCloneable(resourceGroupName string, name string) (result SiteCloneability, err error) { + req, err := client.IsSiteCloneablePreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAuthSettings", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "IsSiteCloneable", nil, "Failure preparing request") } - resp, err := client.ListSiteAuthSettingsSender(req) + resp, err := client.IsSiteCloneableSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAuthSettings", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "IsSiteCloneable", resp, "Failure sending request") } - result, err = client.ListSiteAuthSettingsResponder(resp) + result, err = client.IsSiteCloneableResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAuthSettings", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "IsSiteCloneable", resp, "Failure responding to request") } return } -// ListSiteAuthSettingsPreparer prepares the ListSiteAuthSettings request. -func (client SitesClient) ListSiteAuthSettingsPreparer(resourceGroupName string, name string) (*http.Request, error) { +// IsSiteCloneablePreparer prepares the IsSiteCloneable request. +func (client SitesClient) IsSiteCloneablePreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/authsettings/list"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/iscloneable", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// ListSiteAuthSettingsSender sends the ListSiteAuthSettings request. The method will close the +// IsSiteCloneableSender sends the IsSiteCloneable request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) ListSiteAuthSettingsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) IsSiteCloneableSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// ListSiteAuthSettingsResponder handles the response to the ListSiteAuthSettings request. The method always +// IsSiteCloneableResponder handles the response to the IsSiteCloneable request. The method always // closes the http.Response Body. -func (client SitesClient) ListSiteAuthSettingsResponder(resp *http.Response) (result SiteAuthSettings, err error) { +func (client SitesClient) IsSiteCloneableResponder(resp *http.Response) (result SiteCloneability, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListSiteAuthSettingsSlot sends the list site auth settings slot request. +// IsSiteCloneableSlot sends the is site cloneable slot request. // -// resourceGroupName is name of resource group name is name of web app slot is -// name of web app slot. If not specified then will default to production -// slot. -func (client SitesClient) ListSiteAuthSettingsSlot(resourceGroupName string, name string, slot string) (result SiteAuthSettings, ae error) { - req, err := client.ListSiteAuthSettingsSlotPreparer(resourceGroupName, name, slot) +// resourceGroupName is name of the resource group name is name of the web app +// slot is name of web app slot. If not specified then will default to +// production slot. +func (client SitesClient) IsSiteCloneableSlot(resourceGroupName string, name string, slot string) (result SiteCloneability, err error) { + req, err := client.IsSiteCloneableSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAuthSettingsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "IsSiteCloneableSlot", nil, "Failure preparing request") } - resp, err := client.ListSiteAuthSettingsSlotSender(req) + resp, err := client.IsSiteCloneableSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAuthSettingsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "IsSiteCloneableSlot", resp, "Failure sending request") } - result, err = client.ListSiteAuthSettingsSlotResponder(resp) + result, err = client.IsSiteCloneableSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteAuthSettingsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "IsSiteCloneableSlot", resp, "Failure responding to request") } return } -// ListSiteAuthSettingsSlotPreparer prepares the ListSiteAuthSettingsSlot request. -func (client SitesClient) ListSiteAuthSettingsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { +// IsSiteCloneableSlotPreparer prepares the IsSiteCloneableSlot request. +func (client SitesClient) IsSiteCloneableSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/authsettings/list"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/iscloneable", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// ListSiteAuthSettingsSlotSender sends the ListSiteAuthSettingsSlot request. The method will close the +// IsSiteCloneableSlotSender sends the IsSiteCloneableSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) ListSiteAuthSettingsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) IsSiteCloneableSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// ListSiteAuthSettingsSlotResponder handles the response to the ListSiteAuthSettingsSlot request. The method always +// IsSiteCloneableSlotResponder handles the response to the IsSiteCloneableSlot request. The method always // closes the http.Response Body. -func (client SitesClient) ListSiteAuthSettingsSlotResponder(resp *http.Response) (result SiteAuthSettings, err error) { +func (client SitesClient) IsSiteCloneableSlotResponder(resp *http.Response) (result SiteCloneability, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListSiteBackupConfigurationDeprecated sends the list site backup -// configuration deprecated request. +// ListSiteAppSettings sends the list site app settings request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) ListSiteBackupConfigurationDeprecated(resourceGroupName string, name string) (result BackupRequest, ae error) { - req, err := client.ListSiteBackupConfigurationDeprecatedPreparer(resourceGroupName, name) +func (client SitesClient) ListSiteAppSettings(resourceGroupName string, name string) (result StringDictionary, err error) { + req, err := client.ListSiteAppSettingsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupConfigurationDeprecated", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAppSettings", nil, "Failure preparing request") } - resp, err := client.ListSiteBackupConfigurationDeprecatedSender(req) + resp, err := client.ListSiteAppSettingsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupConfigurationDeprecated", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAppSettings", resp, "Failure sending request") } - result, err = client.ListSiteBackupConfigurationDeprecatedResponder(resp) + result, err = client.ListSiteAppSettingsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupConfigurationDeprecated", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAppSettings", resp, "Failure responding to request") } return } -// ListSiteBackupConfigurationDeprecatedPreparer prepares the ListSiteBackupConfigurationDeprecated request. -func (client SitesClient) ListSiteBackupConfigurationDeprecatedPreparer(resourceGroupName string, name string) (*http.Request, error) { +// ListSiteAppSettingsPreparer prepares the ListSiteAppSettings request. +func (client SitesClient) ListSiteAppSettingsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backup/config"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/appsettings/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// ListSiteBackupConfigurationDeprecatedSender sends the ListSiteBackupConfigurationDeprecated request. The method will close the +// ListSiteAppSettingsSender sends the ListSiteAppSettings request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) ListSiteBackupConfigurationDeprecatedSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) ListSiteAppSettingsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// ListSiteBackupConfigurationDeprecatedResponder handles the response to the ListSiteBackupConfigurationDeprecated request. The method always +// ListSiteAppSettingsResponder handles the response to the ListSiteAppSettings request. The method always // closes the http.Response Body. -func (client SitesClient) ListSiteBackupConfigurationDeprecatedResponder(resp *http.Response) (result BackupRequest, err error) { +func (client SitesClient) ListSiteAppSettingsResponder(resp *http.Response) (result StringDictionary, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListSiteBackupConfigurationDeprecatedSlot sends the list site backup -// configuration deprecated slot request. +// ListSiteAppSettingsSlot sends the list site app settings slot request. // // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) ListSiteBackupConfigurationDeprecatedSlot(resourceGroupName string, name string, slot string) (result BackupRequest, ae error) { - req, err := client.ListSiteBackupConfigurationDeprecatedSlotPreparer(resourceGroupName, name, slot) +func (client SitesClient) ListSiteAppSettingsSlot(resourceGroupName string, name string, slot string) (result StringDictionary, err error) { + req, err := client.ListSiteAppSettingsSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupConfigurationDeprecatedSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAppSettingsSlot", nil, "Failure preparing request") } - resp, err := client.ListSiteBackupConfigurationDeprecatedSlotSender(req) + resp, err := client.ListSiteAppSettingsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupConfigurationDeprecatedSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAppSettingsSlot", resp, "Failure sending request") } - result, err = client.ListSiteBackupConfigurationDeprecatedSlotResponder(resp) + result, err = client.ListSiteAppSettingsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupConfigurationDeprecatedSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAppSettingsSlot", resp, "Failure responding to request") } return } -// ListSiteBackupConfigurationDeprecatedSlotPreparer prepares the ListSiteBackupConfigurationDeprecatedSlot request. -func (client SitesClient) ListSiteBackupConfigurationDeprecatedSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { +// ListSiteAppSettingsSlotPreparer prepares the ListSiteAppSettingsSlot request. +func (client SitesClient) ListSiteAppSettingsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backup/config"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/appsettings/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// ListSiteBackupConfigurationDeprecatedSlotSender sends the ListSiteBackupConfigurationDeprecatedSlot request. The method will close the +// ListSiteAppSettingsSlotSender sends the ListSiteAppSettingsSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) ListSiteBackupConfigurationDeprecatedSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) ListSiteAppSettingsSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// ListSiteBackupConfigurationDeprecatedSlotResponder handles the response to the ListSiteBackupConfigurationDeprecatedSlot request. The method always +// ListSiteAppSettingsSlotResponder handles the response to the ListSiteAppSettingsSlot request. The method always // closes the http.Response Body. -func (client SitesClient) ListSiteBackupConfigurationDeprecatedSlotResponder(resp *http.Response) (result BackupRequest, err error) { +func (client SitesClient) ListSiteAppSettingsSlotResponder(resp *http.Response) (result StringDictionary, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListSiteBackups sends the list site backups request. +// ListSiteAuthSettings sends the list site auth settings request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) ListSiteBackups(resourceGroupName string, name string) (result BackupItemCollection, ae error) { - req, err := client.ListSiteBackupsPreparer(resourceGroupName, name) +func (client SitesClient) ListSiteAuthSettings(resourceGroupName string, name string) (result SiteAuthSettings, err error) { + req, err := client.ListSiteAuthSettingsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackups", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAuthSettings", nil, "Failure preparing request") } - resp, err := client.ListSiteBackupsSender(req) + resp, err := client.ListSiteAuthSettingsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackups", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAuthSettings", resp, "Failure sending request") } - result, err = client.ListSiteBackupsResponder(resp) + result, err = client.ListSiteAuthSettingsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackups", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAuthSettings", resp, "Failure responding to request") } return } -// ListSiteBackupsPreparer prepares the ListSiteBackups request. -func (client SitesClient) ListSiteBackupsPreparer(resourceGroupName string, name string) (*http.Request, error) { +// ListSiteAuthSettingsPreparer prepares the ListSiteAuthSettings request. +func (client SitesClient) ListSiteAuthSettingsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), + preparer := autorest.CreatePreparer( + autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/authsettings/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// ListSiteBackupsSender sends the ListSiteBackups request. The method will close the +// ListSiteAuthSettingsSender sends the ListSiteAuthSettings request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) ListSiteBackupsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) ListSiteAuthSettingsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// ListSiteBackupsResponder handles the response to the ListSiteBackups request. The method always +// ListSiteAuthSettingsResponder handles the response to the ListSiteAuthSettings request. The method always // closes the http.Response Body. -func (client SitesClient) ListSiteBackupsResponder(resp *http.Response) (result BackupItemCollection, err error) { +func (client SitesClient) ListSiteAuthSettingsResponder(resp *http.Response) (result SiteAuthSettings, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListSiteBackupsDeprecated sends the list site backups deprecated request. +// ListSiteAuthSettingsSlot sends the list site auth settings slot request. // -// resourceGroupName is name of resource group name is name of web app -func (client SitesClient) ListSiteBackupsDeprecated(resourceGroupName string, name string) (result BackupItemCollection, ae error) { - req, err := client.ListSiteBackupsDeprecatedPreparer(resourceGroupName, name) +// resourceGroupName is name of resource group name is name of web app slot is +// name of web app slot. If not specified then will default to production +// slot. +func (client SitesClient) ListSiteAuthSettingsSlot(resourceGroupName string, name string, slot string) (result SiteAuthSettings, err error) { + req, err := client.ListSiteAuthSettingsSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupsDeprecated", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAuthSettingsSlot", nil, "Failure preparing request") } - resp, err := client.ListSiteBackupsDeprecatedSender(req) + resp, err := client.ListSiteAuthSettingsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupsDeprecated", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAuthSettingsSlot", resp, "Failure sending request") } - result, err = client.ListSiteBackupsDeprecatedResponder(resp) + result, err = client.ListSiteAuthSettingsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupsDeprecated", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteAuthSettingsSlot", resp, "Failure responding to request") } return } -// ListSiteBackupsDeprecatedPreparer prepares the ListSiteBackupsDeprecated request. -func (client SitesClient) ListSiteBackupsDeprecatedPreparer(resourceGroupName string, name string) (*http.Request, error) { +// ListSiteAuthSettingsSlotPreparer prepares the ListSiteAuthSettingsSlot request. +func (client SitesClient) ListSiteAuthSettingsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsGet(), + preparer := autorest.CreatePreparer( + autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/restore"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/authsettings/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// ListSiteBackupsDeprecatedSender sends the ListSiteBackupsDeprecated request. The method will close the +// ListSiteAuthSettingsSlotSender sends the ListSiteAuthSettingsSlot request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) ListSiteBackupsDeprecatedSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) ListSiteAuthSettingsSlotSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// ListSiteBackupsDeprecatedResponder handles the response to the ListSiteBackupsDeprecated request. The method always +// ListSiteAuthSettingsSlotResponder handles the response to the ListSiteAuthSettingsSlot request. The method always // closes the http.Response Body. -func (client SitesClient) ListSiteBackupsDeprecatedResponder(resp *http.Response) (result BackupItemCollection, err error) { +func (client SitesClient) ListSiteAuthSettingsSlotResponder(resp *http.Response) (result SiteAuthSettings, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// ListSiteBackupsDeprecatedSlot sends the list site backups deprecated slot -// request. +// ListSiteBackups sends the list site backups request. // -// resourceGroupName is name of resource group name is name of web app slot is -// name of web app slot. If not specified then will default to production -// slot. -func (client SitesClient) ListSiteBackupsDeprecatedSlot(resourceGroupName string, name string, slot string) (result BackupItemCollection, ae error) { - req, err := client.ListSiteBackupsDeprecatedSlotPreparer(resourceGroupName, name, slot) +// resourceGroupName is name of resource group name is name of web app +func (client SitesClient) ListSiteBackups(resourceGroupName string, name string) (result BackupItemCollection, err error) { + req, err := client.ListSiteBackupsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupsDeprecatedSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteBackups", nil, "Failure preparing request") } - resp, err := client.ListSiteBackupsDeprecatedSlotSender(req) + resp, err := client.ListSiteBackupsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupsDeprecatedSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteBackups", resp, "Failure sending request") } - result, err = client.ListSiteBackupsDeprecatedSlotResponder(resp) + result, err = client.ListSiteBackupsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupsDeprecatedSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteBackups", resp, "Failure responding to request") } return } -// ListSiteBackupsDeprecatedSlotPreparer prepares the ListSiteBackupsDeprecatedSlot request. -func (client SitesClient) ListSiteBackupsDeprecatedSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { +// ListSiteBackupsPreparer prepares the ListSiteBackups request. +func (client SitesClient) ListSiteBackupsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/restore"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } -// ListSiteBackupsDeprecatedSlotSender sends the ListSiteBackupsDeprecatedSlot request. The method will close the +// ListSiteBackupsSender sends the ListSiteBackups request. The method will close the // http.Response Body if it receives an error. -func (client SitesClient) ListSiteBackupsDeprecatedSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) +func (client SitesClient) ListSiteBackupsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) } -// ListSiteBackupsDeprecatedSlotResponder handles the response to the ListSiteBackupsDeprecatedSlot request. The method always +// ListSiteBackupsResponder handles the response to the ListSiteBackups request. The method always // closes the http.Response Body. -func (client SitesClient) ListSiteBackupsDeprecatedSlotResponder(resp *http.Response) (result BackupItemCollection, err error) { +func (client SitesClient) ListSiteBackupsResponder(resp *http.Response) (result BackupItemCollection, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -6973,21 +7301,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) ListSiteBackupsSlot(resourceGroupName string, name string, slot string) (result BackupItemCollection, ae error) { +func (client SitesClient) ListSiteBackupsSlot(resourceGroupName string, name string, slot string) (result BackupItemCollection, err error) { req, err := client.ListSiteBackupsSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteBackupsSlot", nil, "Failure preparing request") } resp, err := client.ListSiteBackupsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteBackupsSlot", resp, "Failure sending request") } result, err = client.ListSiteBackupsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteBackupsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteBackupsSlot", resp, "Failure responding to request") } return @@ -6996,29 +7324,28 @@ // ListSiteBackupsSlotPreparer prepares the ListSiteBackupsSlot request. func (client SitesClient) ListSiteBackupsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSiteBackupsSlotSender sends the ListSiteBackupsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSiteBackupsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSiteBackupsSlotResponder handles the response to the ListSiteBackupsSlot request. The method always @@ -7027,7 +7354,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -7037,21 +7364,21 @@ // ListSiteConnectionStrings sends the list site connection strings request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) ListSiteConnectionStrings(resourceGroupName string, name string) (result ConnectionStringDictionary, ae error) { +func (client SitesClient) ListSiteConnectionStrings(resourceGroupName string, name string) (result ConnectionStringDictionary, err error) { req, err := client.ListSiteConnectionStringsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteConnectionStrings", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteConnectionStrings", nil, "Failure preparing request") } resp, err := client.ListSiteConnectionStringsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteConnectionStrings", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteConnectionStrings", resp, "Failure sending request") } result, err = client.ListSiteConnectionStringsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteConnectionStrings", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteConnectionStrings", resp, "Failure responding to request") } return @@ -7060,28 +7387,27 @@ // ListSiteConnectionStringsPreparer prepares the ListSiteConnectionStrings request. func (client SitesClient) ListSiteConnectionStringsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/connectionstrings/list"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/connectionstrings/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSiteConnectionStringsSender sends the ListSiteConnectionStrings request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSiteConnectionStringsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSiteConnectionStringsResponder handles the response to the ListSiteConnectionStrings request. The method always @@ -7090,7 +7416,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -7103,21 +7429,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) ListSiteConnectionStringsSlot(resourceGroupName string, name string, slot string) (result ConnectionStringDictionary, ae error) { +func (client SitesClient) ListSiteConnectionStringsSlot(resourceGroupName string, name string, slot string) (result ConnectionStringDictionary, err error) { req, err := client.ListSiteConnectionStringsSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteConnectionStringsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteConnectionStringsSlot", nil, "Failure preparing request") } resp, err := client.ListSiteConnectionStringsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteConnectionStringsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteConnectionStringsSlot", resp, "Failure sending request") } result, err = client.ListSiteConnectionStringsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteConnectionStringsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteConnectionStringsSlot", resp, "Failure responding to request") } return @@ -7126,29 +7452,28 @@ // ListSiteConnectionStringsSlotPreparer prepares the ListSiteConnectionStringsSlot request. func (client SitesClient) ListSiteConnectionStringsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/connectionstrings/list"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/connectionstrings/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSiteConnectionStringsSlotSender sends the ListSiteConnectionStringsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSiteConnectionStringsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSiteConnectionStringsSlotResponder handles the response to the ListSiteConnectionStringsSlot request. The method always @@ -7157,7 +7482,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -7167,21 +7492,21 @@ // ListSiteMetadata sends the list site metadata request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) ListSiteMetadata(resourceGroupName string, name string) (result StringDictionary, ae error) { +func (client SitesClient) ListSiteMetadata(resourceGroupName string, name string) (result StringDictionary, err error) { req, err := client.ListSiteMetadataPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteMetadata", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteMetadata", nil, "Failure preparing request") } resp, err := client.ListSiteMetadataSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteMetadata", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteMetadata", resp, "Failure sending request") } result, err = client.ListSiteMetadataResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteMetadata", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteMetadata", resp, "Failure responding to request") } return @@ -7190,28 +7515,27 @@ // ListSiteMetadataPreparer prepares the ListSiteMetadata request. func (client SitesClient) ListSiteMetadataPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/metadata/list"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/metadata/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSiteMetadataSender sends the ListSiteMetadata request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSiteMetadataSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSiteMetadataResponder handles the response to the ListSiteMetadata request. The method always @@ -7220,7 +7544,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -7232,21 +7556,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) ListSiteMetadataSlot(resourceGroupName string, name string, slot string) (result StringDictionary, ae error) { +func (client SitesClient) ListSiteMetadataSlot(resourceGroupName string, name string, slot string) (result StringDictionary, err error) { req, err := client.ListSiteMetadataSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteMetadataSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteMetadataSlot", nil, "Failure preparing request") } resp, err := client.ListSiteMetadataSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteMetadataSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteMetadataSlot", resp, "Failure sending request") } result, err = client.ListSiteMetadataSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteMetadataSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteMetadataSlot", resp, "Failure responding to request") } return @@ -7255,29 +7579,28 @@ // ListSiteMetadataSlotPreparer prepares the ListSiteMetadataSlot request. func (client SitesClient) ListSiteMetadataSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/metadata/list"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/metadata/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSiteMetadataSlotSender sends the ListSiteMetadataSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSiteMetadataSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSiteMetadataSlotResponder handles the response to the ListSiteMetadataSlot request. The method always @@ -7286,7 +7609,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -7295,21 +7618,21 @@ // ListSitePremierAddOns sends the list site premier add ons request. // -func (client SitesClient) ListSitePremierAddOns(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client SitesClient) ListSitePremierAddOns(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.ListSitePremierAddOnsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePremierAddOns", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePremierAddOns", nil, "Failure preparing request") } resp, err := client.ListSitePremierAddOnsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePremierAddOns", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePremierAddOns", resp, "Failure sending request") } result, err = client.ListSitePremierAddOnsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePremierAddOns", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePremierAddOns", resp, "Failure responding to request") } return @@ -7318,37 +7641,36 @@ // ListSitePremierAddOnsPreparer prepares the ListSitePremierAddOns request. func (client SitesClient) ListSitePremierAddOnsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/premieraddons"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/premieraddons", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSitePremierAddOnsSender sends the ListSitePremierAddOns request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSitePremierAddOnsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSitePremierAddOnsResponder handles the response to the ListSitePremierAddOns request. The method always // closes the http.Response Body. -func (client SitesClient) ListSitePremierAddOnsResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) ListSitePremierAddOnsResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -7357,21 +7679,21 @@ // ListSitePremierAddOnsSlot sends the list site premier add ons slot request. // -func (client SitesClient) ListSitePremierAddOnsSlot(resourceGroupName string, name string, slot string) (result ObjectSet, ae error) { +func (client SitesClient) ListSitePremierAddOnsSlot(resourceGroupName string, name string, slot string) (result SetObject, err error) { req, err := client.ListSitePremierAddOnsSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePremierAddOnsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePremierAddOnsSlot", nil, "Failure preparing request") } resp, err := client.ListSitePremierAddOnsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePremierAddOnsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePremierAddOnsSlot", resp, "Failure sending request") } result, err = client.ListSitePremierAddOnsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePremierAddOnsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePremierAddOnsSlot", resp, "Failure responding to request") } return @@ -7380,38 +7702,37 @@ // ListSitePremierAddOnsSlotPreparer prepares the ListSitePremierAddOnsSlot request. func (client SitesClient) ListSitePremierAddOnsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/premieraddons"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/premieraddons", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSitePremierAddOnsSlotSender sends the ListSitePremierAddOnsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSitePremierAddOnsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSitePremierAddOnsSlotResponder handles the response to the ListSitePremierAddOnsSlot request. The method always // closes the http.Response Body. -func (client SitesClient) ListSitePremierAddOnsSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) ListSitePremierAddOnsSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -7419,133 +7740,137 @@ } // ListSitePublishingCredentials sends the list site publishing credentials -// request. +// request. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) ListSitePublishingCredentials(resourceGroupName string, name string) (result User, ae error) { - req, err := client.ListSitePublishingCredentialsPreparer(resourceGroupName, name) +func (client SitesClient) ListSitePublishingCredentials(resourceGroupName string, name string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ListSitePublishingCredentialsPreparer(resourceGroupName, name, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingCredentials", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingCredentials", nil, "Failure preparing request") } resp, err := client.ListSitePublishingCredentialsSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingCredentials", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingCredentials", resp, "Failure sending request") } result, err = client.ListSitePublishingCredentialsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingCredentials", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingCredentials", resp, "Failure responding to request") } return } // ListSitePublishingCredentialsPreparer prepares the ListSitePublishingCredentials request. -func (client SitesClient) ListSitePublishingCredentialsPreparer(resourceGroupName string, name string) (*http.Request, error) { +func (client SitesClient) ListSitePublishingCredentialsPreparer(resourceGroupName string, name string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/publishingcredentials/list"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/publishingcredentials/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // ListSitePublishingCredentialsSender sends the ListSitePublishingCredentials request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSitePublishingCredentialsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // ListSitePublishingCredentialsResponder handles the response to the ListSitePublishingCredentials request. The method always // closes the http.Response Body. -func (client SitesClient) ListSitePublishingCredentialsResponder(resp *http.Response) (result User, err error) { +func (client SitesClient) ListSitePublishingCredentialsResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // ListSitePublishingCredentialsSlot sends the list site publishing -// credentials slot request. +// credentials slot request. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be +// used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) ListSitePublishingCredentialsSlot(resourceGroupName string, name string, slot string) (result User, ae error) { - req, err := client.ListSitePublishingCredentialsSlotPreparer(resourceGroupName, name, slot) +func (client SitesClient) ListSitePublishingCredentialsSlot(resourceGroupName string, name string, slot string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ListSitePublishingCredentialsSlotPreparer(resourceGroupName, name, slot, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingCredentialsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingCredentialsSlot", nil, "Failure preparing request") } resp, err := client.ListSitePublishingCredentialsSlotSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingCredentialsSlot", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingCredentialsSlot", resp, "Failure sending request") } result, err = client.ListSitePublishingCredentialsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingCredentialsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingCredentialsSlot", resp, "Failure responding to request") } return } // ListSitePublishingCredentialsSlotPreparer prepares the ListSitePublishingCredentialsSlot request. -func (client SitesClient) ListSitePublishingCredentialsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { +func (client SitesClient) ListSitePublishingCredentialsSlotPreparer(resourceGroupName string, name string, slot string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/publishingcredentials/list"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/publishingcredentials/list", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // ListSitePublishingCredentialsSlotSender sends the ListSitePublishingCredentialsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSitePublishingCredentialsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // ListSitePublishingCredentialsSlotResponder handles the response to the ListSitePublishingCredentialsSlot request. The method always // closes the http.Response Body. -func (client SitesClient) ListSitePublishingCredentialsSlotResponder(resp *http.Response) (result User, err error) { +func (client SitesClient) ListSitePublishingCredentialsSlotResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } @@ -7555,21 +7880,21 @@ // resourceGroupName is name of resource group name is name of web app options // is specifies options for publishing profile. Pass // CsmPublishingProfileOptions.Format=FileZilla3 for FileZilla FTP format. -func (client SitesClient) ListSitePublishingProfileXML(resourceGroupName string, name string, options CsmPublishingProfileOptions) (result autorest.Response, ae error) { +func (client SitesClient) ListSitePublishingProfileXML(resourceGroupName string, name string, options CsmPublishingProfileOptions) (result ReadCloser, err error) { req, err := client.ListSitePublishingProfileXMLPreparer(resourceGroupName, name, options) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingProfileXML", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingProfileXML", nil, "Failure preparing request") } resp, err := client.ListSitePublishingProfileXMLSender(req) if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingProfileXML", "Failure sending request") + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingProfileXML", resp, "Failure sending request") } result, err = client.ListSitePublishingProfileXMLResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingProfileXML", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingProfileXML", resp, "Failure responding to request") } return @@ -7578,39 +7903,40 @@ // ListSitePublishingProfileXMLPreparer prepares the ListSitePublishingProfileXML request. func (client SitesClient) ListSitePublishingProfileXMLPreparer(resourceGroupName string, name string, options CsmPublishingProfileOptions) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/publishxml"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/publishxml", pathParameters), autorest.WithJSON(options), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSitePublishingProfileXMLSender sends the ListSitePublishingProfileXML request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSitePublishingProfileXMLSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSitePublishingProfileXMLResponder handles the response to the ListSitePublishingProfileXML request. The method always // closes the http.Response Body. -func (client SitesClient) ListSitePublishingProfileXMLResponder(resp *http.Response) (result autorest.Response, err error) { +func (client SitesClient) ListSitePublishingProfileXMLResponder(resp *http.Response) (result ReadCloser, err error) { + result.Value = &resp.Body err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK)) - result.Response = resp + azure.WithErrorUnlessStatusCode(http.StatusOK)) + result.Response = autorest.Response{Response: resp} return } @@ -7622,21 +7948,21 @@ // CsmPublishingProfileOptions.Format=FileZilla3 for FileZilla FTP format. // slot is name of web app slot. If not specified then will default to // production slot. -func (client SitesClient) ListSitePublishingProfileXMLSlot(resourceGroupName string, name string, options CsmPublishingProfileOptions, slot string) (result autorest.Response, ae error) { +func (client SitesClient) ListSitePublishingProfileXMLSlot(resourceGroupName string, name string, options CsmPublishingProfileOptions, slot string) (result ReadCloser, err error) { req, err := client.ListSitePublishingProfileXMLSlotPreparer(resourceGroupName, name, options, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingProfileXMLSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingProfileXMLSlot", nil, "Failure preparing request") } resp, err := client.ListSitePublishingProfileXMLSlotSender(req) if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingProfileXMLSlot", "Failure sending request") + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingProfileXMLSlot", resp, "Failure sending request") } result, err = client.ListSitePublishingProfileXMLSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSitePublishingProfileXMLSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSitePublishingProfileXMLSlot", resp, "Failure responding to request") } return @@ -7645,40 +7971,41 @@ // ListSitePublishingProfileXMLSlotPreparer prepares the ListSitePublishingProfileXMLSlot request. func (client SitesClient) ListSitePublishingProfileXMLSlotPreparer(resourceGroupName string, name string, options CsmPublishingProfileOptions, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/publishxml"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/publishxml", pathParameters), autorest.WithJSON(options), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSitePublishingProfileXMLSlotSender sends the ListSitePublishingProfileXMLSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSitePublishingProfileXMLSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSitePublishingProfileXMLSlotResponder handles the response to the ListSitePublishingProfileXMLSlot request. The method always // closes the http.Response Body. -func (client SitesClient) ListSitePublishingProfileXMLSlotResponder(resp *http.Response) (result autorest.Response, err error) { +func (client SitesClient) ListSitePublishingProfileXMLSlotResponder(resp *http.Response) (result ReadCloser, err error) { + result.Value = &resp.Body err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK)) - result.Response = resp + azure.WithErrorUnlessStatusCode(http.StatusOK)) + result.Response = autorest.Response{Response: resp} return } @@ -7687,21 +8014,21 @@ // // resourceGroupName is the resource group name name is the name of the web // app -func (client SitesClient) ListSiteRelayServiceConnections(resourceGroupName string, name string) (result RelayServiceConnectionEntity, ae error) { +func (client SitesClient) ListSiteRelayServiceConnections(resourceGroupName string, name string) (result RelayServiceConnectionEntity, err error) { req, err := client.ListSiteRelayServiceConnectionsPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteRelayServiceConnections", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteRelayServiceConnections", nil, "Failure preparing request") } resp, err := client.ListSiteRelayServiceConnectionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteRelayServiceConnections", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteRelayServiceConnections", resp, "Failure sending request") } result, err = client.ListSiteRelayServiceConnectionsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteRelayServiceConnections", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteRelayServiceConnections", resp, "Failure responding to request") } return @@ -7710,28 +8037,27 @@ // ListSiteRelayServiceConnectionsPreparer prepares the ListSiteRelayServiceConnections request. func (client SitesClient) ListSiteRelayServiceConnectionsPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hybridconnection"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hybridconnection", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSiteRelayServiceConnectionsSender sends the ListSiteRelayServiceConnections request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSiteRelayServiceConnectionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSiteRelayServiceConnectionsResponder handles the response to the ListSiteRelayServiceConnections request. The method always @@ -7740,7 +8066,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -7752,21 +8078,21 @@ // // resourceGroupName is the resource group name name is the name of the web // app slot is the name of the slot for the web app. -func (client SitesClient) ListSiteRelayServiceConnectionsSlot(resourceGroupName string, name string, slot string) (result RelayServiceConnectionEntity, ae error) { +func (client SitesClient) ListSiteRelayServiceConnectionsSlot(resourceGroupName string, name string, slot string) (result RelayServiceConnectionEntity, err error) { req, err := client.ListSiteRelayServiceConnectionsSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteRelayServiceConnectionsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteRelayServiceConnectionsSlot", nil, "Failure preparing request") } resp, err := client.ListSiteRelayServiceConnectionsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteRelayServiceConnectionsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteRelayServiceConnectionsSlot", resp, "Failure sending request") } result, err = client.ListSiteRelayServiceConnectionsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ListSiteRelayServiceConnectionsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ListSiteRelayServiceConnectionsSlot", resp, "Failure responding to request") } return @@ -7775,29 +8101,28 @@ // ListSiteRelayServiceConnectionsSlotPreparer prepares the ListSiteRelayServiceConnectionsSlot request. func (client SitesClient) ListSiteRelayServiceConnectionsSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hybridconnection"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hybridconnection", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListSiteRelayServiceConnectionsSlotSender sends the ListSiteRelayServiceConnectionsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ListSiteRelayServiceConnectionsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListSiteRelayServiceConnectionsSlotResponder handles the response to the ListSiteRelayServiceConnectionsSlot request. The method always @@ -7806,165 +8131,175 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// RecoverSite sends the recover site request. +// RecoverSite sends the recover site request. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is name of resource group name is name of web app -// snapshot is snapshot data used for web app recovery. Snapshot information -// can be obtained by call GetDeletedSites API. -func (client SitesClient) RecoverSite(resourceGroupName string, name string, snapshot CsmSiteRecoveryEntity) (result ObjectSet, ae error) { - req, err := client.RecoverSitePreparer(resourceGroupName, name, snapshot) +// recoveryEntity is snapshot data used for web app recovery. Snapshot +// information can be obtained by calling GetDeletedSites or GetSiteSnapshots +// API. +func (client SitesClient) RecoverSite(resourceGroupName string, name string, recoveryEntity CsmSiteRecoveryEntity, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RecoverSitePreparer(resourceGroupName, name, recoveryEntity, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RecoverSite", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RecoverSite", nil, "Failure preparing request") } resp, err := client.RecoverSiteSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RecoverSite", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RecoverSite", resp, "Failure sending request") } result, err = client.RecoverSiteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "RecoverSite", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "RecoverSite", resp, "Failure responding to request") } return } // RecoverSitePreparer prepares the RecoverSite request. -func (client SitesClient) RecoverSitePreparer(resourceGroupName string, name string, snapshot CsmSiteRecoveryEntity) (*http.Request, error) { +func (client SitesClient) RecoverSitePreparer(resourceGroupName string, name string, recoveryEntity CsmSiteRecoveryEntity, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/recover"), - autorest.WithJSON(snapshot), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/recover", pathParameters), + autorest.WithJSON(recoveryEntity), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // RecoverSiteSender sends the RecoverSite request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) RecoverSiteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // RecoverSiteResponder handles the response to the RecoverSite request. The method always // closes the http.Response Body. -func (client SitesClient) RecoverSiteResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) RecoverSiteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result.Value), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNotFound), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } -// RecoverSiteSlot sends the recover site slot request. +// RecoverSiteSlot sends the recover site slot request. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is name of resource group name is name of web app -// snapshot is snapshot data used for web app recovery. Snapshot information -// can be obtained by call GetDeletedSites API. slot is name of web app slot. -// If not specified then will default to production slot. -func (client SitesClient) RecoverSiteSlot(resourceGroupName string, name string, snapshot CsmSiteRecoveryEntity, slot string) (result ObjectSet, ae error) { - req, err := client.RecoverSiteSlotPreparer(resourceGroupName, name, snapshot, slot) +// recoveryEntity is snapshot data used for web app recovery. Snapshot +// information can be obtained by calling GetDeletedSites or GetSiteSnapshots +// API. slot is name of web app slot. If not specified then will default to +// production slot. +func (client SitesClient) RecoverSiteSlot(resourceGroupName string, name string, recoveryEntity CsmSiteRecoveryEntity, slot string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RecoverSiteSlotPreparer(resourceGroupName, name, recoveryEntity, slot, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RecoverSiteSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RecoverSiteSlot", nil, "Failure preparing request") } resp, err := client.RecoverSiteSlotSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RecoverSiteSlot", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RecoverSiteSlot", resp, "Failure sending request") } result, err = client.RecoverSiteSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "RecoverSiteSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "RecoverSiteSlot", resp, "Failure responding to request") } return } // RecoverSiteSlotPreparer prepares the RecoverSiteSlot request. -func (client SitesClient) RecoverSiteSlotPreparer(resourceGroupName string, name string, snapshot CsmSiteRecoveryEntity, slot string) (*http.Request, error) { +func (client SitesClient) RecoverSiteSlotPreparer(resourceGroupName string, name string, recoveryEntity CsmSiteRecoveryEntity, slot string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/recover"), - autorest.WithJSON(snapshot), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/recover", pathParameters), + autorest.WithJSON(recoveryEntity), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // RecoverSiteSlotSender sends the RecoverSiteSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) RecoverSiteSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // RecoverSiteSlotResponder handles the response to the RecoverSiteSlot request. The method always // closes the http.Response Body. -func (client SitesClient) RecoverSiteSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) RecoverSiteSlotResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result.Value), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNotFound), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // ResetProductionSlotConfig sends the reset production slot config request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) ResetProductionSlotConfig(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client SitesClient) ResetProductionSlotConfig(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.ResetProductionSlotConfigPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ResetProductionSlotConfig", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ResetProductionSlotConfig", nil, "Failure preparing request") } resp, err := client.ResetProductionSlotConfigSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ResetProductionSlotConfig", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ResetProductionSlotConfig", resp, "Failure sending request") } result, err = client.ResetProductionSlotConfigResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ResetProductionSlotConfig", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ResetProductionSlotConfig", resp, "Failure responding to request") } return @@ -7973,37 +8308,36 @@ // ResetProductionSlotConfigPreparer prepares the ResetProductionSlotConfig request. func (client SitesClient) ResetProductionSlotConfigPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/resetSlotConfig"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/resetSlotConfig", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ResetProductionSlotConfigSender sends the ResetProductionSlotConfig request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ResetProductionSlotConfigSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ResetProductionSlotConfigResponder handles the response to the ResetProductionSlotConfig request. The method always // closes the http.Response Body. -func (client SitesClient) ResetProductionSlotConfigResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) ResetProductionSlotConfigResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -8015,21 +8349,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) ResetSlotConfigSlot(resourceGroupName string, name string, slot string) (result ObjectSet, ae error) { +func (client SitesClient) ResetSlotConfigSlot(resourceGroupName string, name string, slot string) (result SetObject, err error) { req, err := client.ResetSlotConfigSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ResetSlotConfigSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ResetSlotConfigSlot", nil, "Failure preparing request") } resp, err := client.ResetSlotConfigSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "ResetSlotConfigSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "ResetSlotConfigSlot", resp, "Failure sending request") } result, err = client.ResetSlotConfigSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "ResetSlotConfigSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "ResetSlotConfigSlot", resp, "Failure responding to request") } return @@ -8038,38 +8372,37 @@ // ResetSlotConfigSlotPreparer prepares the ResetSlotConfigSlot request. func (client SitesClient) ResetSlotConfigSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/resetSlotConfig"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/resetSlotConfig", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ResetSlotConfigSlotSender sends the ResetSlotConfigSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) ResetSlotConfigSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ResetSlotConfigSlotResponder handles the response to the ResetSlotConfigSlot request. The method always // closes the http.Response Body. -func (client SitesClient) ResetSlotConfigSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) ResetSlotConfigSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -8083,21 +8416,21 @@ // restarts the app if necessary. Hard restart always restarts and // reprovisions the app synchronous is if true then the API will block until // the app has been restarted -func (client SitesClient) RestartSite(resourceGroupName string, name string, softRestart *bool, synchronous *bool) (result ObjectSet, ae error) { +func (client SitesClient) RestartSite(resourceGroupName string, name string, softRestart *bool, synchronous *bool) (result SetObject, err error) { req, err := client.RestartSitePreparer(resourceGroupName, name, softRestart, synchronous) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RestartSite", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RestartSite", nil, "Failure preparing request") } resp, err := client.RestartSiteSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RestartSite", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RestartSite", resp, "Failure sending request") } result, err = client.RestartSiteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "RestartSite", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "RestartSite", resp, "Failure responding to request") } return @@ -8106,43 +8439,42 @@ // RestartSitePreparer prepares the RestartSite request. func (client SitesClient) RestartSitePreparer(resourceGroupName string, name string, softRestart *bool, synchronous *bool) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if softRestart != nil { - queryParameters["softRestart"] = softRestart + queryParameters["softRestart"] = autorest.Encode("query", *softRestart) } if synchronous != nil { - queryParameters["synchronous"] = synchronous + queryParameters["synchronous"] = autorest.Encode("query", *synchronous) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/restart"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/restart", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // RestartSiteSender sends the RestartSite request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) RestartSiteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // RestartSiteResponder handles the response to the RestartSite request. The method always // closes the http.Response Body. -func (client SitesClient) RestartSiteResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) RestartSiteResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -8157,21 +8489,21 @@ // restarts the app if necessary. Hard restart always restarts and // reprovisions the app synchronous is if true then the API will block until // the app has been restarted -func (client SitesClient) RestartSiteSlot(resourceGroupName string, name string, slot string, softRestart *bool, synchronous *bool) (result ObjectSet, ae error) { +func (client SitesClient) RestartSiteSlot(resourceGroupName string, name string, slot string, softRestart *bool, synchronous *bool) (result SetObject, err error) { req, err := client.RestartSiteSlotPreparer(resourceGroupName, name, slot, softRestart, synchronous) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RestartSiteSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RestartSiteSlot", nil, "Failure preparing request") } resp, err := client.RestartSiteSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RestartSiteSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RestartSiteSlot", resp, "Failure sending request") } result, err = client.RestartSiteSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "RestartSiteSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "RestartSiteSlot", resp, "Failure responding to request") } return @@ -8180,204 +8512,211 @@ // RestartSiteSlotPreparer prepares the RestartSiteSlot request. func (client SitesClient) RestartSiteSlotPreparer(resourceGroupName string, name string, slot string, softRestart *bool, synchronous *bool) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } if softRestart != nil { - queryParameters["softRestart"] = softRestart + queryParameters["softRestart"] = autorest.Encode("query", *softRestart) } if synchronous != nil { - queryParameters["synchronous"] = synchronous + queryParameters["synchronous"] = autorest.Encode("query", *synchronous) } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/restart"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/restart", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // RestartSiteSlotSender sends the RestartSiteSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) RestartSiteSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // RestartSiteSlotResponder handles the response to the RestartSiteSlot request. The method always // closes the http.Response Body. -func (client SitesClient) RestartSiteSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) RestartSiteSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// RestoreSite sends the restore site request. +// RestoreSite sends the restore site request. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is name of resource group name is name of web app // backupID is id of backup to restore request is information on restore // request -func (client SitesClient) RestoreSite(resourceGroupName string, name string, backupID string, request RestoreRequest) (result RestoreResponse, ae error) { - req, err := client.RestoreSitePreparer(resourceGroupName, name, backupID, request) +func (client SitesClient) RestoreSite(resourceGroupName string, name string, backupID string, request RestoreRequest, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RestoreSitePreparer(resourceGroupName, name, backupID, request, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RestoreSite", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RestoreSite", nil, "Failure preparing request") } resp, err := client.RestoreSiteSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RestoreSite", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RestoreSite", resp, "Failure sending request") } result, err = client.RestoreSiteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "RestoreSite", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "RestoreSite", resp, "Failure responding to request") } return } // RestoreSitePreparer prepares the RestoreSite request. -func (client SitesClient) RestoreSitePreparer(resourceGroupName string, name string, backupID string, request RestoreRequest) (*http.Request, error) { +func (client SitesClient) RestoreSitePreparer(resourceGroupName string, name string, backupID string, request RestoreRequest, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "backupId": url.QueryEscape(backupID), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "backupId": autorest.Encode("path", backupID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups/{backupId}/restore"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backups/{backupId}/restore", pathParameters), autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // RestoreSiteSender sends the RestoreSite request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) RestoreSiteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // RestoreSiteResponder handles the response to the RestoreSite request. The method always // closes the http.Response Body. -func (client SitesClient) RestoreSiteResponder(resp *http.Response) (result RestoreResponse, err error) { +func (client SitesClient) RestoreSiteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } -// RestoreSiteSlot sends the restore site slot request. +// RestoreSiteSlot sends the restore site slot request. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is name of resource group name is name of web app // backupID is id of backup to restore request is information on restore // request slot is name of web app slot. If not specified then will default // to production slot. -func (client SitesClient) RestoreSiteSlot(resourceGroupName string, name string, backupID string, request RestoreRequest, slot string) (result RestoreResponse, ae error) { - req, err := client.RestoreSiteSlotPreparer(resourceGroupName, name, backupID, request, slot) +func (client SitesClient) RestoreSiteSlot(resourceGroupName string, name string, backupID string, request RestoreRequest, slot string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RestoreSiteSlotPreparer(resourceGroupName, name, backupID, request, slot, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RestoreSiteSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RestoreSiteSlot", nil, "Failure preparing request") } resp, err := client.RestoreSiteSlotSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "RestoreSiteSlot", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.SitesClient", "RestoreSiteSlot", resp, "Failure sending request") } result, err = client.RestoreSiteSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "RestoreSiteSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "RestoreSiteSlot", resp, "Failure responding to request") } return } // RestoreSiteSlotPreparer prepares the RestoreSiteSlot request. -func (client SitesClient) RestoreSiteSlotPreparer(resourceGroupName string, name string, backupID string, request RestoreRequest, slot string) (*http.Request, error) { +func (client SitesClient) RestoreSiteSlotPreparer(resourceGroupName string, name string, backupID string, request RestoreRequest, slot string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "backupId": url.QueryEscape(backupID), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "backupId": autorest.Encode("path", backupID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups/{backupId}/restore"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backups/{backupId}/restore", pathParameters), autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // RestoreSiteSlotSender sends the RestoreSiteSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) RestoreSiteSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // RestoreSiteSlotResponder handles the response to the RestoreSiteSlot request. The method always // closes the http.Response Body. -func (client SitesClient) RestoreSiteSlotResponder(resp *http.Response) (result RestoreResponse, err error) { +func (client SitesClient) RestoreSiteSlotResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // StartSite sends the start site request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) StartSite(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client SitesClient) StartSite(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.StartSitePreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "StartSite", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "StartSite", nil, "Failure preparing request") } resp, err := client.StartSiteSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "StartSite", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "StartSite", resp, "Failure sending request") } result, err = client.StartSiteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "StartSite", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "StartSite", resp, "Failure responding to request") } return @@ -8386,37 +8725,36 @@ // StartSitePreparer prepares the StartSite request. func (client SitesClient) StartSitePreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/start"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/start", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // StartSiteSender sends the StartSite request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) StartSiteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // StartSiteResponder handles the response to the StartSite request. The method always // closes the http.Response Body. -func (client SitesClient) StartSiteResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) StartSiteResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -8428,21 +8766,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) StartSiteSlot(resourceGroupName string, name string, slot string) (result ObjectSet, ae error) { +func (client SitesClient) StartSiteSlot(resourceGroupName string, name string, slot string) (result SetObject, err error) { req, err := client.StartSiteSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "StartSiteSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "StartSiteSlot", nil, "Failure preparing request") } resp, err := client.StartSiteSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "StartSiteSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "StartSiteSlot", resp, "Failure sending request") } result, err = client.StartSiteSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "StartSiteSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "StartSiteSlot", resp, "Failure responding to request") } return @@ -8451,38 +8789,37 @@ // StartSiteSlotPreparer prepares the StartSiteSlot request. func (client SitesClient) StartSiteSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/start"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/start", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // StartSiteSlotSender sends the StartSiteSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) StartSiteSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // StartSiteSlotResponder handles the response to the StartSiteSlot request. The method always // closes the http.Response Body. -func (client SitesClient) StartSiteSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) StartSiteSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -8492,21 +8829,21 @@ // StopSite sends the stop site request. // // resourceGroupName is name of resource group name is name of web app -func (client SitesClient) StopSite(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client SitesClient) StopSite(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.StopSitePreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "StopSite", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "StopSite", nil, "Failure preparing request") } resp, err := client.StopSiteSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "StopSite", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "StopSite", resp, "Failure sending request") } result, err = client.StopSiteResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "StopSite", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "StopSite", resp, "Failure responding to request") } return @@ -8515,37 +8852,36 @@ // StopSitePreparer prepares the StopSite request. func (client SitesClient) StopSitePreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/stop"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/stop", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // StopSiteSender sends the StopSite request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) StopSiteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // StopSiteResponder handles the response to the StopSite request. The method always // closes the http.Response Body. -func (client SitesClient) StopSiteResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) StopSiteResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -8557,21 +8893,21 @@ // resourceGroupName is name of resource group name is name of web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) StopSiteSlot(resourceGroupName string, name string, slot string) (result ObjectSet, ae error) { +func (client SitesClient) StopSiteSlot(resourceGroupName string, name string, slot string) (result SetObject, err error) { req, err := client.StopSiteSlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "StopSiteSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "StopSiteSlot", nil, "Failure preparing request") } resp, err := client.StopSiteSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "StopSiteSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "StopSiteSlot", resp, "Failure sending request") } result, err = client.StopSiteSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "StopSiteSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "StopSiteSlot", resp, "Failure responding to request") } return @@ -8580,193 +8916,200 @@ // StopSiteSlotPreparer prepares the StopSiteSlot request. func (client SitesClient) StopSiteSlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/stop"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/stop", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // StopSiteSlotSender sends the StopSiteSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) StopSiteSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // StopSiteSlotResponder handles the response to the StopSiteSlot request. The method always // closes the http.Response Body. -func (client SitesClient) StopSiteSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) StopSiteSlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// SwapSlotsSlot sends the swap slots slot request. +// SwapSlotsSlot sends the swap slots slot request. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // // resourceGroupName is name of resource group name is name of web app // slotSwapEntity is request body that contains the target slot name slot is // name of source slot for the swap -func (client SitesClient) SwapSlotsSlot(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, slot string) (result ObjectSet, ae error) { - req, err := client.SwapSlotsSlotPreparer(resourceGroupName, name, slotSwapEntity, slot) +func (client SitesClient) SwapSlotsSlot(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, slot string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.SwapSlotsSlotPreparer(resourceGroupName, name, slotSwapEntity, slot, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "SwapSlotsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "SwapSlotsSlot", nil, "Failure preparing request") } resp, err := client.SwapSlotsSlotSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "SwapSlotsSlot", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.SitesClient", "SwapSlotsSlot", resp, "Failure sending request") } result, err = client.SwapSlotsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "SwapSlotsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "SwapSlotsSlot", resp, "Failure responding to request") } return } // SwapSlotsSlotPreparer prepares the SwapSlotsSlot request. -func (client SitesClient) SwapSlotsSlotPreparer(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, slot string) (*http.Request, error) { +func (client SitesClient) SwapSlotsSlotPreparer(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, slot string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/slotsswap"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/slotsswap", pathParameters), autorest.WithJSON(slotSwapEntity), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // SwapSlotsSlotSender sends the SwapSlotsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) SwapSlotsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // SwapSlotsSlotResponder handles the response to the SwapSlotsSlot request. The method always // closes the http.Response Body. -func (client SitesClient) SwapSlotsSlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) SwapSlotsSlotResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result.Value), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } -// SwapSlotWithProduction sends the swap slot with production request. +// SwapSlotWithProduction sends the swap slot with production request. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. // // resourceGroupName is name of resource group name is name of web app // slotSwapEntity is request body that contains the target slot name -func (client SitesClient) SwapSlotWithProduction(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity) (result ObjectSet, ae error) { - req, err := client.SwapSlotWithProductionPreparer(resourceGroupName, name, slotSwapEntity) +func (client SitesClient) SwapSlotWithProduction(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.SwapSlotWithProductionPreparer(resourceGroupName, name, slotSwapEntity, cancel) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "SwapSlotWithProduction", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "SwapSlotWithProduction", nil, "Failure preparing request") } resp, err := client.SwapSlotWithProductionSender(req) if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "SwapSlotWithProduction", "Failure sending request") + result.Response = resp + return result, autorest.NewErrorWithError(err, "web.SitesClient", "SwapSlotWithProduction", resp, "Failure sending request") } result, err = client.SwapSlotWithProductionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "SwapSlotWithProduction", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "SwapSlotWithProduction", resp, "Failure responding to request") } return } // SwapSlotWithProductionPreparer prepares the SwapSlotWithProduction request. -func (client SitesClient) SwapSlotWithProductionPreparer(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity) (*http.Request, error) { +func (client SitesClient) SwapSlotWithProductionPreparer(resourceGroupName string, name string, slotSwapEntity CsmSlotEntity, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slotsswap"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slotsswap", pathParameters), autorest.WithJSON(slotSwapEntity), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // SwapSlotWithProductionSender sends the SwapSlotWithProduction request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) SwapSlotWithProductionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK, http.StatusAccepted) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // SwapSlotWithProductionResponder handles the response to the SwapSlotWithProduction request. The method always // closes the http.Response Body. -func (client SitesClient) SwapSlotWithProductionResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) SwapSlotWithProductionResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result.Value), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} + result.Response = resp return } // SyncSiteRepository sends the sync site repository request. // -func (client SitesClient) SyncSiteRepository(resourceGroupName string, name string) (result ObjectSet, ae error) { +func (client SitesClient) SyncSiteRepository(resourceGroupName string, name string) (result SetObject, err error) { req, err := client.SyncSiteRepositoryPreparer(resourceGroupName, name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "SyncSiteRepository", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "SyncSiteRepository", nil, "Failure preparing request") } resp, err := client.SyncSiteRepositorySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "SyncSiteRepository", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "SyncSiteRepository", resp, "Failure sending request") } result, err = client.SyncSiteRepositoryResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "SyncSiteRepository", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "SyncSiteRepository", resp, "Failure responding to request") } return @@ -8775,37 +9118,36 @@ // SyncSiteRepositoryPreparer prepares the SyncSiteRepository request. func (client SitesClient) SyncSiteRepositoryPreparer(resourceGroupName string, name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/sync"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/sync", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // SyncSiteRepositorySender sends the SyncSiteRepository request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) SyncSiteRepositorySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // SyncSiteRepositoryResponder handles the response to the SyncSiteRepository request. The method always // closes the http.Response Body. -func (client SitesClient) SyncSiteRepositoryResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) SyncSiteRepositoryResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -8814,21 +9156,21 @@ // SyncSiteRepositorySlot sends the sync site repository slot request. // -func (client SitesClient) SyncSiteRepositorySlot(resourceGroupName string, name string, slot string) (result ObjectSet, ae error) { +func (client SitesClient) SyncSiteRepositorySlot(resourceGroupName string, name string, slot string) (result SetObject, err error) { req, err := client.SyncSiteRepositorySlotPreparer(resourceGroupName, name, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "SyncSiteRepositorySlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "SyncSiteRepositorySlot", nil, "Failure preparing request") } resp, err := client.SyncSiteRepositorySlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "SyncSiteRepositorySlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "SyncSiteRepositorySlot", resp, "Failure sending request") } result, err = client.SyncSiteRepositorySlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "SyncSiteRepositorySlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "SyncSiteRepositorySlot", resp, "Failure responding to request") } return @@ -8837,38 +9179,37 @@ // SyncSiteRepositorySlotPreparer prepares the SyncSiteRepositorySlot request. func (client SitesClient) SyncSiteRepositorySlotPreparer(resourceGroupName string, name string, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/sync"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/sync", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // SyncSiteRepositorySlotSender sends the SyncSiteRepositorySlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) SyncSiteRepositorySlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // SyncSiteRepositorySlotResponder handles the response to the SyncSiteRepositorySlot request. The method always // closes the http.Response Body. -func (client SitesClient) SyncSiteRepositorySlotResponder(resp *http.Response) (result ObjectSet, err error) { +func (client SitesClient) SyncSiteRepositorySlotResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -8879,21 +9220,21 @@ // // resourceGroupName is name of resource group name is name of web app // appSettings is application settings of web app -func (client SitesClient) UpdateSiteAppSettings(resourceGroupName string, name string, appSettings StringDictionary) (result StringDictionary, ae error) { +func (client SitesClient) UpdateSiteAppSettings(resourceGroupName string, name string, appSettings StringDictionary) (result StringDictionary, err error) { req, err := client.UpdateSiteAppSettingsPreparer(resourceGroupName, name, appSettings) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAppSettings", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAppSettings", nil, "Failure preparing request") } resp, err := client.UpdateSiteAppSettingsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAppSettings", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAppSettings", resp, "Failure sending request") } result, err = client.UpdateSiteAppSettingsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAppSettings", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAppSettings", resp, "Failure responding to request") } return @@ -8902,29 +9243,29 @@ // UpdateSiteAppSettingsPreparer prepares the UpdateSiteAppSettings request. func (client SitesClient) UpdateSiteAppSettingsPreparer(resourceGroupName string, name string, appSettings StringDictionary) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/appsettings"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/appsettings", pathParameters), autorest.WithJSON(appSettings), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteAppSettingsSender sends the UpdateSiteAppSettings request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteAppSettingsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteAppSettingsResponder handles the response to the UpdateSiteAppSettings request. The method always @@ -8933,7 +9274,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -8945,21 +9286,21 @@ // resourceGroupName is name of resource group name is name of web app // appSettings is application settings of web app slot is name of web app // slot. If not specified then will default to production slot. -func (client SitesClient) UpdateSiteAppSettingsSlot(resourceGroupName string, name string, appSettings StringDictionary, slot string) (result StringDictionary, ae error) { +func (client SitesClient) UpdateSiteAppSettingsSlot(resourceGroupName string, name string, appSettings StringDictionary, slot string) (result StringDictionary, err error) { req, err := client.UpdateSiteAppSettingsSlotPreparer(resourceGroupName, name, appSettings, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAppSettingsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAppSettingsSlot", nil, "Failure preparing request") } resp, err := client.UpdateSiteAppSettingsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAppSettingsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAppSettingsSlot", resp, "Failure sending request") } result, err = client.UpdateSiteAppSettingsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAppSettingsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAppSettingsSlot", resp, "Failure responding to request") } return @@ -8968,30 +9309,30 @@ // UpdateSiteAppSettingsSlotPreparer prepares the UpdateSiteAppSettingsSlot request. func (client SitesClient) UpdateSiteAppSettingsSlotPreparer(resourceGroupName string, name string, appSettings StringDictionary, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/appsettings"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/appsettings", pathParameters), autorest.WithJSON(appSettings), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteAppSettingsSlotSender sends the UpdateSiteAppSettingsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteAppSettingsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteAppSettingsSlotResponder handles the response to the UpdateSiteAppSettingsSlot request. The method always @@ -9000,7 +9341,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9011,21 +9352,21 @@ // // resourceGroupName is name of resource group name is name of web app // siteAuthSettings is auth settings associated with web app -func (client SitesClient) UpdateSiteAuthSettings(resourceGroupName string, name string, siteAuthSettings SiteAuthSettings) (result SiteAuthSettings, ae error) { +func (client SitesClient) UpdateSiteAuthSettings(resourceGroupName string, name string, siteAuthSettings SiteAuthSettings) (result SiteAuthSettings, err error) { req, err := client.UpdateSiteAuthSettingsPreparer(resourceGroupName, name, siteAuthSettings) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAuthSettings", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAuthSettings", nil, "Failure preparing request") } resp, err := client.UpdateSiteAuthSettingsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAuthSettings", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAuthSettings", resp, "Failure sending request") } result, err = client.UpdateSiteAuthSettingsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAuthSettings", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAuthSettings", resp, "Failure responding to request") } return @@ -9034,29 +9375,29 @@ // UpdateSiteAuthSettingsPreparer prepares the UpdateSiteAuthSettings request. func (client SitesClient) UpdateSiteAuthSettingsPreparer(resourceGroupName string, name string, siteAuthSettings SiteAuthSettings) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/authsettings"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/authsettings", pathParameters), autorest.WithJSON(siteAuthSettings), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteAuthSettingsSender sends the UpdateSiteAuthSettings request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteAuthSettingsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteAuthSettingsResponder handles the response to the UpdateSiteAuthSettings request. The method always @@ -9065,7 +9406,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9077,21 +9418,21 @@ // resourceGroupName is name of resource group name is name of web app // siteAuthSettings is auth settings associated with web app slot is name of // web app slot. If not specified then will default to production slot. -func (client SitesClient) UpdateSiteAuthSettingsSlot(resourceGroupName string, name string, siteAuthSettings SiteAuthSettings, slot string) (result SiteAuthSettings, ae error) { +func (client SitesClient) UpdateSiteAuthSettingsSlot(resourceGroupName string, name string, siteAuthSettings SiteAuthSettings, slot string) (result SiteAuthSettings, err error) { req, err := client.UpdateSiteAuthSettingsSlotPreparer(resourceGroupName, name, siteAuthSettings, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAuthSettingsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAuthSettingsSlot", nil, "Failure preparing request") } resp, err := client.UpdateSiteAuthSettingsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAuthSettingsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAuthSettingsSlot", resp, "Failure sending request") } result, err = client.UpdateSiteAuthSettingsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteAuthSettingsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteAuthSettingsSlot", resp, "Failure responding to request") } return @@ -9100,30 +9441,30 @@ // UpdateSiteAuthSettingsSlotPreparer prepares the UpdateSiteAuthSettingsSlot request. func (client SitesClient) UpdateSiteAuthSettingsSlotPreparer(resourceGroupName string, name string, siteAuthSettings SiteAuthSettings, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/authsettings"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/authsettings", pathParameters), autorest.WithJSON(siteAuthSettings), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteAuthSettingsSlotSender sends the UpdateSiteAuthSettingsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteAuthSettingsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteAuthSettingsSlotResponder handles the response to the UpdateSiteAuthSettingsSlot request. The method always @@ -9132,7 +9473,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9144,21 +9485,21 @@ // // resourceGroupName is name of resource group name is name of web app request // is information on backup request -func (client SitesClient) UpdateSiteBackupConfiguration(resourceGroupName string, name string, request BackupRequest) (result BackupRequest, ae error) { +func (client SitesClient) UpdateSiteBackupConfiguration(resourceGroupName string, name string, request BackupRequest) (result BackupRequest, err error) { req, err := client.UpdateSiteBackupConfigurationPreparer(resourceGroupName, name, request) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfiguration", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteBackupConfiguration", nil, "Failure preparing request") } resp, err := client.UpdateSiteBackupConfigurationSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfiguration", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteBackupConfiguration", resp, "Failure sending request") } result, err = client.UpdateSiteBackupConfigurationResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfiguration", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteBackupConfiguration", resp, "Failure responding to request") } return @@ -9167,29 +9508,29 @@ // UpdateSiteBackupConfigurationPreparer prepares the UpdateSiteBackupConfiguration request. func (client SitesClient) UpdateSiteBackupConfigurationPreparer(resourceGroupName string, name string, request BackupRequest) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/backup"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/backup", pathParameters), autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteBackupConfigurationSender sends the UpdateSiteBackupConfiguration request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteBackupConfigurationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteBackupConfigurationResponder handles the response to the UpdateSiteBackupConfiguration request. The method always @@ -9198,141 +9539,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateSiteBackupConfigurationDeprecated sends the update site backup -// configuration deprecated request. -// -// resourceGroupName is name of resource group name is name of web app request -// is information on backup request -func (client SitesClient) UpdateSiteBackupConfigurationDeprecated(resourceGroupName string, name string, request BackupRequest) (result BackupRequest, ae error) { - req, err := client.UpdateSiteBackupConfigurationDeprecatedPreparer(resourceGroupName, name, request) - if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfigurationDeprecated", "Failure preparing request") - } - - resp, err := client.UpdateSiteBackupConfigurationDeprecatedSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfigurationDeprecated", "Failure sending request") - } - - result, err = client.UpdateSiteBackupConfigurationDeprecatedResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfigurationDeprecated", "Failure responding to request") - } - - return -} - -// UpdateSiteBackupConfigurationDeprecatedPreparer prepares the UpdateSiteBackupConfigurationDeprecated request. -func (client SitesClient) UpdateSiteBackupConfigurationDeprecatedPreparer(resourceGroupName string, name string, request BackupRequest) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/backup/config"), - autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// UpdateSiteBackupConfigurationDeprecatedSender sends the UpdateSiteBackupConfigurationDeprecated request. The method will close the -// http.Response Body if it receives an error. -func (client SitesClient) UpdateSiteBackupConfigurationDeprecatedSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// UpdateSiteBackupConfigurationDeprecatedResponder handles the response to the UpdateSiteBackupConfigurationDeprecated request. The method always -// closes the http.Response Body. -func (client SitesClient) UpdateSiteBackupConfigurationDeprecatedResponder(resp *http.Response) (result BackupRequest, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateSiteBackupConfigurationDeprecatedSlot sends the update site backup -// configuration deprecated slot request. -// -// resourceGroupName is name of resource group name is name of web app request -// is information on backup request slot is name of web app slot. If not -// specified then will default to production slot. -func (client SitesClient) UpdateSiteBackupConfigurationDeprecatedSlot(resourceGroupName string, name string, request BackupRequest, slot string) (result BackupRequest, ae error) { - req, err := client.UpdateSiteBackupConfigurationDeprecatedSlotPreparer(resourceGroupName, name, request, slot) - if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfigurationDeprecatedSlot", "Failure preparing request") - } - - resp, err := client.UpdateSiteBackupConfigurationDeprecatedSlotSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfigurationDeprecatedSlot", "Failure sending request") - } - - result, err = client.UpdateSiteBackupConfigurationDeprecatedSlotResponder(resp) - if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfigurationDeprecatedSlot", "Failure responding to request") - } - - return -} - -// UpdateSiteBackupConfigurationDeprecatedSlotPreparer prepares the UpdateSiteBackupConfigurationDeprecatedSlot request. -func (client SitesClient) UpdateSiteBackupConfigurationDeprecatedSlotPreparer(resourceGroupName string, name string, request BackupRequest, slot string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/backup/config"), - autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), - autorest.WithQueryParameters(queryParameters)) -} - -// UpdateSiteBackupConfigurationDeprecatedSlotSender sends the UpdateSiteBackupConfigurationDeprecatedSlot request. The method will close the -// http.Response Body if it receives an error. -func (client SitesClient) UpdateSiteBackupConfigurationDeprecatedSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) -} - -// UpdateSiteBackupConfigurationDeprecatedSlotResponder handles the response to the UpdateSiteBackupConfigurationDeprecatedSlot request. The method always -// closes the http.Response Body. -func (client SitesClient) UpdateSiteBackupConfigurationDeprecatedSlotResponder(resp *http.Response) (result BackupRequest, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9345,21 +9552,21 @@ // resourceGroupName is name of resource group name is name of web app request // is information on backup request slot is name of web app slot. If not // specified then will default to production slot. -func (client SitesClient) UpdateSiteBackupConfigurationSlot(resourceGroupName string, name string, request BackupRequest, slot string) (result BackupRequest, ae error) { +func (client SitesClient) UpdateSiteBackupConfigurationSlot(resourceGroupName string, name string, request BackupRequest, slot string) (result BackupRequest, err error) { req, err := client.UpdateSiteBackupConfigurationSlotPreparer(resourceGroupName, name, request, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfigurationSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteBackupConfigurationSlot", nil, "Failure preparing request") } resp, err := client.UpdateSiteBackupConfigurationSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfigurationSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteBackupConfigurationSlot", resp, "Failure sending request") } result, err = client.UpdateSiteBackupConfigurationSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteBackupConfigurationSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteBackupConfigurationSlot", resp, "Failure responding to request") } return @@ -9368,30 +9575,30 @@ // UpdateSiteBackupConfigurationSlotPreparer prepares the UpdateSiteBackupConfigurationSlot request. func (client SitesClient) UpdateSiteBackupConfigurationSlotPreparer(resourceGroupName string, name string, request BackupRequest, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/backup"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/backup", pathParameters), autorest.WithJSON(request), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteBackupConfigurationSlotSender sends the UpdateSiteBackupConfigurationSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteBackupConfigurationSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteBackupConfigurationSlotResponder handles the response to the UpdateSiteBackupConfigurationSlot request. The method always @@ -9400,7 +9607,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9412,21 +9619,21 @@ // resourceGroupName is name of resource group name is name of web app // siteConfig is request body that contains the configuraiton setting for the // web app -func (client SitesClient) UpdateSiteConfig(resourceGroupName string, name string, siteConfig SiteConfig) (result SiteConfig, ae error) { +func (client SitesClient) UpdateSiteConfig(resourceGroupName string, name string, siteConfig SiteConfig) (result SiteConfig, err error) { req, err := client.UpdateSiteConfigPreparer(resourceGroupName, name, siteConfig) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConfig", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConfig", nil, "Failure preparing request") } resp, err := client.UpdateSiteConfigSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConfig", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConfig", resp, "Failure sending request") } result, err = client.UpdateSiteConfigResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConfig", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConfig", resp, "Failure responding to request") } return @@ -9435,29 +9642,29 @@ // UpdateSiteConfigPreparer prepares the UpdateSiteConfig request. func (client SitesClient) UpdateSiteConfigPreparer(resourceGroupName string, name string, siteConfig SiteConfig) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/web"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/web", pathParameters), autorest.WithJSON(siteConfig), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteConfigSender sends the UpdateSiteConfig request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteConfigSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteConfigResponder handles the response to the UpdateSiteConfig request. The method always @@ -9466,7 +9673,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9479,21 +9686,21 @@ // siteConfig is request body that contains the configuraiton setting for the // web app slot is name of web app slot. If not specified then will default // to production slot. -func (client SitesClient) UpdateSiteConfigSlot(resourceGroupName string, name string, siteConfig SiteConfig, slot string) (result SiteConfig, ae error) { +func (client SitesClient) UpdateSiteConfigSlot(resourceGroupName string, name string, siteConfig SiteConfig, slot string) (result SiteConfig, err error) { req, err := client.UpdateSiteConfigSlotPreparer(resourceGroupName, name, siteConfig, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConfigSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConfigSlot", nil, "Failure preparing request") } resp, err := client.UpdateSiteConfigSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConfigSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConfigSlot", resp, "Failure sending request") } result, err = client.UpdateSiteConfigSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConfigSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConfigSlot", resp, "Failure responding to request") } return @@ -9502,30 +9709,30 @@ // UpdateSiteConfigSlotPreparer prepares the UpdateSiteConfigSlot request. func (client SitesClient) UpdateSiteConfigSlotPreparer(resourceGroupName string, name string, siteConfig SiteConfig, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/web"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/web", pathParameters), autorest.WithJSON(siteConfig), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteConfigSlotSender sends the UpdateSiteConfigSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteConfigSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteConfigSlotResponder handles the response to the UpdateSiteConfigSlot request. The method always @@ -9534,7 +9741,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9546,21 +9753,21 @@ // // resourceGroupName is name of resource group name is name of web app // connectionStrings is connection strings associated with web app -func (client SitesClient) UpdateSiteConnectionStrings(resourceGroupName string, name string, connectionStrings ConnectionStringDictionary) (result ConnectionStringDictionary, ae error) { +func (client SitesClient) UpdateSiteConnectionStrings(resourceGroupName string, name string, connectionStrings ConnectionStringDictionary) (result ConnectionStringDictionary, err error) { req, err := client.UpdateSiteConnectionStringsPreparer(resourceGroupName, name, connectionStrings) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConnectionStrings", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConnectionStrings", nil, "Failure preparing request") } resp, err := client.UpdateSiteConnectionStringsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConnectionStrings", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConnectionStrings", resp, "Failure sending request") } result, err = client.UpdateSiteConnectionStringsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConnectionStrings", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConnectionStrings", resp, "Failure responding to request") } return @@ -9569,29 +9776,29 @@ // UpdateSiteConnectionStringsPreparer prepares the UpdateSiteConnectionStrings request. func (client SitesClient) UpdateSiteConnectionStringsPreparer(resourceGroupName string, name string, connectionStrings ConnectionStringDictionary) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/connectionstrings"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/connectionstrings", pathParameters), autorest.WithJSON(connectionStrings), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteConnectionStringsSender sends the UpdateSiteConnectionStrings request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteConnectionStringsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteConnectionStringsResponder handles the response to the UpdateSiteConnectionStrings request. The method always @@ -9600,7 +9807,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9614,21 +9821,21 @@ // connectionStrings is connection strings associated with web app slot is // name of web app slot. If not specified then will default to production // slot. -func (client SitesClient) UpdateSiteConnectionStringsSlot(resourceGroupName string, name string, connectionStrings ConnectionStringDictionary, slot string) (result ConnectionStringDictionary, ae error) { +func (client SitesClient) UpdateSiteConnectionStringsSlot(resourceGroupName string, name string, connectionStrings ConnectionStringDictionary, slot string) (result ConnectionStringDictionary, err error) { req, err := client.UpdateSiteConnectionStringsSlotPreparer(resourceGroupName, name, connectionStrings, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConnectionStringsSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConnectionStringsSlot", nil, "Failure preparing request") } resp, err := client.UpdateSiteConnectionStringsSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConnectionStringsSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConnectionStringsSlot", resp, "Failure sending request") } result, err = client.UpdateSiteConnectionStringsSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteConnectionStringsSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteConnectionStringsSlot", resp, "Failure responding to request") } return @@ -9637,30 +9844,30 @@ // UpdateSiteConnectionStringsSlotPreparer prepares the UpdateSiteConnectionStringsSlot request. func (client SitesClient) UpdateSiteConnectionStringsSlotPreparer(resourceGroupName string, name string, connectionStrings ConnectionStringDictionary, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/connectionstrings"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/connectionstrings", pathParameters), autorest.WithJSON(connectionStrings), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteConnectionStringsSlotSender sends the UpdateSiteConnectionStringsSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteConnectionStringsSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteConnectionStringsSlotResponder handles the response to the UpdateSiteConnectionStringsSlot request. The method always @@ -9669,7 +9876,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9680,21 +9887,21 @@ // // resourceGroupName is name of resource group name is name of web app // siteLogsConfig is site logs configuration -func (client SitesClient) UpdateSiteLogsConfig(resourceGroupName string, name string, siteLogsConfig SiteLogsConfig) (result SiteLogsConfig, ae error) { +func (client SitesClient) UpdateSiteLogsConfig(resourceGroupName string, name string, siteLogsConfig SiteLogsConfig) (result SiteLogsConfig, err error) { req, err := client.UpdateSiteLogsConfigPreparer(resourceGroupName, name, siteLogsConfig) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteLogsConfig", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteLogsConfig", nil, "Failure preparing request") } resp, err := client.UpdateSiteLogsConfigSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteLogsConfig", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteLogsConfig", resp, "Failure sending request") } result, err = client.UpdateSiteLogsConfigResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteLogsConfig", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteLogsConfig", resp, "Failure responding to request") } return @@ -9703,29 +9910,29 @@ // UpdateSiteLogsConfigPreparer prepares the UpdateSiteLogsConfig request. func (client SitesClient) UpdateSiteLogsConfigPreparer(resourceGroupName string, name string, siteLogsConfig SiteLogsConfig) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/logs"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/logs", pathParameters), autorest.WithJSON(siteLogsConfig), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteLogsConfigSender sends the UpdateSiteLogsConfig request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteLogsConfigSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteLogsConfigResponder handles the response to the UpdateSiteLogsConfig request. The method always @@ -9734,7 +9941,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9746,21 +9953,21 @@ // resourceGroupName is name of resource group name is name of web app // siteLogsConfig is site logs configuration slot is name of web app slot. If // not specified then will default to production slot. -func (client SitesClient) UpdateSiteLogsConfigSlot(resourceGroupName string, name string, siteLogsConfig SiteLogsConfig, slot string) (result SiteLogsConfig, ae error) { +func (client SitesClient) UpdateSiteLogsConfigSlot(resourceGroupName string, name string, siteLogsConfig SiteLogsConfig, slot string) (result SiteLogsConfig, err error) { req, err := client.UpdateSiteLogsConfigSlotPreparer(resourceGroupName, name, siteLogsConfig, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteLogsConfigSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteLogsConfigSlot", nil, "Failure preparing request") } resp, err := client.UpdateSiteLogsConfigSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteLogsConfigSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteLogsConfigSlot", resp, "Failure sending request") } result, err = client.UpdateSiteLogsConfigSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteLogsConfigSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteLogsConfigSlot", resp, "Failure responding to request") } return @@ -9769,30 +9976,30 @@ // UpdateSiteLogsConfigSlotPreparer prepares the UpdateSiteLogsConfigSlot request. func (client SitesClient) UpdateSiteLogsConfigSlotPreparer(resourceGroupName string, name string, siteLogsConfig SiteLogsConfig, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/logs"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/logs", pathParameters), autorest.WithJSON(siteLogsConfig), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteLogsConfigSlotSender sends the UpdateSiteLogsConfigSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteLogsConfigSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteLogsConfigSlotResponder handles the response to the UpdateSiteLogsConfigSlot request. The method always @@ -9801,7 +10008,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9812,21 +10019,21 @@ // // resourceGroupName is name of resource group name is name of web app // metadata is meta data of web app -func (client SitesClient) UpdateSiteMetadata(resourceGroupName string, name string, metadata StringDictionary) (result StringDictionary, ae error) { +func (client SitesClient) UpdateSiteMetadata(resourceGroupName string, name string, metadata StringDictionary) (result StringDictionary, err error) { req, err := client.UpdateSiteMetadataPreparer(resourceGroupName, name, metadata) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteMetadata", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteMetadata", nil, "Failure preparing request") } resp, err := client.UpdateSiteMetadataSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteMetadata", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteMetadata", resp, "Failure sending request") } result, err = client.UpdateSiteMetadataResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteMetadata", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteMetadata", resp, "Failure responding to request") } return @@ -9835,29 +10042,29 @@ // UpdateSiteMetadataPreparer prepares the UpdateSiteMetadata request. func (client SitesClient) UpdateSiteMetadataPreparer(resourceGroupName string, name string, metadata StringDictionary) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/metadata"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/metadata", pathParameters), autorest.WithJSON(metadata), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteMetadataSender sends the UpdateSiteMetadata request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteMetadataSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteMetadataResponder handles the response to the UpdateSiteMetadata request. The method always @@ -9866,7 +10073,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9878,21 +10085,21 @@ // resourceGroupName is name of resource group name is name of web app // metadata is meta data of web app slot is name of web app slot. If not // specified then will default to production slot. -func (client SitesClient) UpdateSiteMetadataSlot(resourceGroupName string, name string, metadata StringDictionary, slot string) (result StringDictionary, ae error) { +func (client SitesClient) UpdateSiteMetadataSlot(resourceGroupName string, name string, metadata StringDictionary, slot string) (result StringDictionary, err error) { req, err := client.UpdateSiteMetadataSlotPreparer(resourceGroupName, name, metadata, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteMetadataSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteMetadataSlot", nil, "Failure preparing request") } resp, err := client.UpdateSiteMetadataSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteMetadataSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteMetadataSlot", resp, "Failure sending request") } result, err = client.UpdateSiteMetadataSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteMetadataSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteMetadataSlot", resp, "Failure responding to request") } return @@ -9901,30 +10108,30 @@ // UpdateSiteMetadataSlotPreparer prepares the UpdateSiteMetadataSlot request. func (client SitesClient) UpdateSiteMetadataSlotPreparer(resourceGroupName string, name string, metadata StringDictionary, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/metadata"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/config/metadata", pathParameters), autorest.WithJSON(metadata), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteMetadataSlotSender sends the UpdateSiteMetadataSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteMetadataSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteMetadataSlotResponder handles the response to the UpdateSiteMetadataSlot request. The method always @@ -9933,7 +10140,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -9946,21 +10153,21 @@ // resourceGroupName is the resource group name name is the name of the web // app entityName is the name by which the Hybrid Connection is identified // connectionEnvelope is the details of the Hybrid Connection -func (client SitesClient) UpdateSiteRelayServiceConnection(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity) (result RelayServiceConnectionEntity, ae error) { +func (client SitesClient) UpdateSiteRelayServiceConnection(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity) (result RelayServiceConnectionEntity, err error) { req, err := client.UpdateSiteRelayServiceConnectionPreparer(resourceGroupName, name, entityName, connectionEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteRelayServiceConnection", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteRelayServiceConnection", nil, "Failure preparing request") } resp, err := client.UpdateSiteRelayServiceConnectionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteRelayServiceConnection", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteRelayServiceConnection", resp, "Failure sending request") } result, err = client.UpdateSiteRelayServiceConnectionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteRelayServiceConnection", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteRelayServiceConnection", resp, "Failure responding to request") } return @@ -9969,30 +10176,30 @@ // UpdateSiteRelayServiceConnectionPreparer prepares the UpdateSiteRelayServiceConnection request. func (client SitesClient) UpdateSiteRelayServiceConnectionPreparer(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity) (*http.Request, error) { pathParameters := map[string]interface{}{ - "entityName": url.QueryEscape(entityName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "entityName": autorest.Encode("path", entityName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hybridconnection/{entityName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/hybridconnection/{entityName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteRelayServiceConnectionSender sends the UpdateSiteRelayServiceConnection request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteRelayServiceConnectionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteRelayServiceConnectionResponder handles the response to the UpdateSiteRelayServiceConnection request. The method always @@ -10001,7 +10208,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -10015,21 +10222,21 @@ // app entityName is the name by which the Hybrid Connection is identified // connectionEnvelope is the details of the Hybrid Connection slot is the // name of the slot for the web app. -func (client SitesClient) UpdateSiteRelayServiceConnectionSlot(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity, slot string) (result RelayServiceConnectionEntity, ae error) { +func (client SitesClient) UpdateSiteRelayServiceConnectionSlot(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity, slot string) (result RelayServiceConnectionEntity, err error) { req, err := client.UpdateSiteRelayServiceConnectionSlotPreparer(resourceGroupName, name, entityName, connectionEnvelope, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteRelayServiceConnectionSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteRelayServiceConnectionSlot", nil, "Failure preparing request") } resp, err := client.UpdateSiteRelayServiceConnectionSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteRelayServiceConnectionSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteRelayServiceConnectionSlot", resp, "Failure sending request") } result, err = client.UpdateSiteRelayServiceConnectionSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteRelayServiceConnectionSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteRelayServiceConnectionSlot", resp, "Failure responding to request") } return @@ -10038,31 +10245,31 @@ // UpdateSiteRelayServiceConnectionSlotPreparer prepares the UpdateSiteRelayServiceConnectionSlot request. func (client SitesClient) UpdateSiteRelayServiceConnectionSlotPreparer(resourceGroupName string, name string, entityName string, connectionEnvelope RelayServiceConnectionEntity, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "entityName": url.QueryEscape(entityName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "entityName": autorest.Encode("path", entityName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hybridconnection/{entityName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/hybridconnection/{entityName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteRelayServiceConnectionSlotSender sends the UpdateSiteRelayServiceConnectionSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteRelayServiceConnectionSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteRelayServiceConnectionSlotResponder handles the response to the UpdateSiteRelayServiceConnectionSlot request. The method always @@ -10071,7 +10278,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -10083,21 +10290,21 @@ // resourceGroupName is name of resource group name is name of web app // siteSourceControl is request body that contains the source control // parameters -func (client SitesClient) UpdateSiteSourceControl(resourceGroupName string, name string, siteSourceControl SiteSourceControl) (result SiteSourceControl, ae error) { +func (client SitesClient) UpdateSiteSourceControl(resourceGroupName string, name string, siteSourceControl SiteSourceControl) (result SiteSourceControl, err error) { req, err := client.UpdateSiteSourceControlPreparer(resourceGroupName, name, siteSourceControl) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteSourceControl", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteSourceControl", nil, "Failure preparing request") } resp, err := client.UpdateSiteSourceControlSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteSourceControl", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteSourceControl", resp, "Failure sending request") } result, err = client.UpdateSiteSourceControlResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteSourceControl", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteSourceControl", resp, "Failure responding to request") } return @@ -10106,29 +10313,29 @@ // UpdateSiteSourceControlPreparer prepares the UpdateSiteSourceControl request. func (client SitesClient) UpdateSiteSourceControlPreparer(resourceGroupName string, name string, siteSourceControl SiteSourceControl) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/sourcecontrols/web"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/sourcecontrols/web", pathParameters), autorest.WithJSON(siteSourceControl), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteSourceControlSender sends the UpdateSiteSourceControl request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteSourceControlSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteSourceControlResponder handles the response to the UpdateSiteSourceControl request. The method always @@ -10137,7 +10344,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -10151,21 +10358,21 @@ // siteSourceControl is request body that contains the source control // parameters slot is name of web app slot. If not specified then will // default to production slot. -func (client SitesClient) UpdateSiteSourceControlSlot(resourceGroupName string, name string, siteSourceControl SiteSourceControl, slot string) (result SiteSourceControl, ae error) { +func (client SitesClient) UpdateSiteSourceControlSlot(resourceGroupName string, name string, siteSourceControl SiteSourceControl, slot string) (result SiteSourceControl, err error) { req, err := client.UpdateSiteSourceControlSlotPreparer(resourceGroupName, name, siteSourceControl, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteSourceControlSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteSourceControlSlot", nil, "Failure preparing request") } resp, err := client.UpdateSiteSourceControlSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteSourceControlSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteSourceControlSlot", resp, "Failure sending request") } result, err = client.UpdateSiteSourceControlSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteSourceControlSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteSourceControlSlot", resp, "Failure responding to request") } return @@ -10174,30 +10381,30 @@ // UpdateSiteSourceControlSlotPreparer prepares the UpdateSiteSourceControlSlot request. func (client SitesClient) UpdateSiteSourceControlSlotPreparer(resourceGroupName string, name string, siteSourceControl SiteSourceControl, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/sourcecontrols/web"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/sourcecontrols/web", pathParameters), autorest.WithJSON(siteSourceControl), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteSourceControlSlotSender sends the UpdateSiteSourceControlSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteSourceControlSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteSourceControlSlotResponder handles the response to the UpdateSiteSourceControlSlot request. The method always @@ -10206,7 +10413,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -10218,21 +10425,21 @@ // resourceGroupName is the resource group name name is the name of the web // app vnetName is the name of the Virtual Network connectionEnvelope is the // properties of this Virtual Network Connection -func (client SitesClient) UpdateSiteVNETConnection(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo) (result VnetInfo, ae error) { +func (client SitesClient) UpdateSiteVNETConnection(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo) (result VnetInfo, err error) { req, err := client.UpdateSiteVNETConnectionPreparer(resourceGroupName, name, vnetName, connectionEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnection", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnection", nil, "Failure preparing request") } resp, err := client.UpdateSiteVNETConnectionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnection", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnection", resp, "Failure sending request") } result, err = client.UpdateSiteVNETConnectionResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnection", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnection", resp, "Failure responding to request") } return @@ -10241,30 +10448,30 @@ // UpdateSiteVNETConnectionPreparer prepares the UpdateSiteVNETConnection request. func (client SitesClient) UpdateSiteVNETConnectionPreparer(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteVNETConnectionSender sends the UpdateSiteVNETConnection request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteVNETConnectionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteVNETConnectionResponder handles the response to the UpdateSiteVNETConnection request. The method always @@ -10273,7 +10480,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -10287,21 +10494,21 @@ // app vnetName is the name of the Virtual Network gatewayName is the name of // the gateway. The only gateway that exists presently is "primary" // connectionEnvelope is the properties to update this gateway with. -func (client SitesClient) UpdateSiteVNETConnectionGateway(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway) (result VnetGateway, ae error) { +func (client SitesClient) UpdateSiteVNETConnectionGateway(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway) (result VnetGateway, err error) { req, err := client.UpdateSiteVNETConnectionGatewayPreparer(resourceGroupName, name, vnetName, gatewayName, connectionEnvelope) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnectionGateway", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnectionGateway", nil, "Failure preparing request") } resp, err := client.UpdateSiteVNETConnectionGatewaySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnectionGateway", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnectionGateway", resp, "Failure sending request") } result, err = client.UpdateSiteVNETConnectionGatewayResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnectionGateway", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnectionGateway", resp, "Failure responding to request") } return @@ -10310,31 +10517,31 @@ // UpdateSiteVNETConnectionGatewayPreparer prepares the UpdateSiteVNETConnectionGateway request. func (client SitesClient) UpdateSiteVNETConnectionGatewayPreparer(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway) (*http.Request, error) { pathParameters := map[string]interface{}{ - "gatewayName": url.QueryEscape(gatewayName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "gatewayName": autorest.Encode("path", gatewayName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteVNETConnectionGatewaySender sends the UpdateSiteVNETConnectionGateway request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteVNETConnectionGatewaySender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteVNETConnectionGatewayResponder handles the response to the UpdateSiteVNETConnectionGateway request. The method always @@ -10343,7 +10550,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -10358,21 +10565,21 @@ // the gateway. The only gateway that exists presently is "primary" // connectionEnvelope is the properties to update this gateway with. slot is // the name of the slot for this web app. -func (client SitesClient) UpdateSiteVNETConnectionGatewaySlot(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway, slot string) (result VnetGateway, ae error) { +func (client SitesClient) UpdateSiteVNETConnectionGatewaySlot(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway, slot string) (result VnetGateway, err error) { req, err := client.UpdateSiteVNETConnectionGatewaySlotPreparer(resourceGroupName, name, vnetName, gatewayName, connectionEnvelope, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnectionGatewaySlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnectionGatewaySlot", nil, "Failure preparing request") } resp, err := client.UpdateSiteVNETConnectionGatewaySlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnectionGatewaySlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnectionGatewaySlot", resp, "Failure sending request") } result, err = client.UpdateSiteVNETConnectionGatewaySlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnectionGatewaySlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnectionGatewaySlot", resp, "Failure responding to request") } return @@ -10381,32 +10588,32 @@ // UpdateSiteVNETConnectionGatewaySlotPreparer prepares the UpdateSiteVNETConnectionGatewaySlot request. func (client SitesClient) UpdateSiteVNETConnectionGatewaySlotPreparer(resourceGroupName string, name string, vnetName string, gatewayName string, connectionEnvelope VnetGateway, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "gatewayName": url.QueryEscape(gatewayName), - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "gatewayName": autorest.Encode("path", gatewayName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}/gateways/{gatewayName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteVNETConnectionGatewaySlotSender sends the UpdateSiteVNETConnectionGatewaySlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteVNETConnectionGatewaySlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteVNETConnectionGatewaySlotResponder handles the response to the UpdateSiteVNETConnectionGatewaySlot request. The method always @@ -10415,7 +10622,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -10429,21 +10636,21 @@ // app vnetName is the name of the Virtual Network connectionEnvelope is the // properties of this Virtual Network Connection slot is the name of the slot // for this web app. -func (client SitesClient) UpdateSiteVNETConnectionSlot(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo, slot string) (result VnetInfo, ae error) { +func (client SitesClient) UpdateSiteVNETConnectionSlot(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo, slot string) (result VnetInfo, err error) { req, err := client.UpdateSiteVNETConnectionSlotPreparer(resourceGroupName, name, vnetName, connectionEnvelope, slot) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnectionSlot", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnectionSlot", nil, "Failure preparing request") } resp, err := client.UpdateSiteVNETConnectionSlotSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnectionSlot", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnectionSlot", resp, "Failure sending request") } result, err = client.UpdateSiteVNETConnectionSlotResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSiteVNETConnectionSlot", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSiteVNETConnectionSlot", resp, "Failure responding to request") } return @@ -10452,31 +10659,31 @@ // UpdateSiteVNETConnectionSlotPreparer prepares the UpdateSiteVNETConnectionSlot request. func (client SitesClient) UpdateSiteVNETConnectionSlotPreparer(resourceGroupName string, name string, vnetName string, connectionEnvelope VnetInfo, slot string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "slot": url.QueryEscape(slot), - "subscriptionId": url.QueryEscape(client.SubscriptionID), - "vnetName": url.QueryEscape(vnetName), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "slot": autorest.Encode("path", slot), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vnetName": autorest.Encode("path", vnetName), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/slots/{slot}/virtualNetworkConnections/{vnetName}", pathParameters), autorest.WithJSON(connectionEnvelope), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSiteVNETConnectionSlotSender sends the UpdateSiteVNETConnectionSlot request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSiteVNETConnectionSlotSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSiteVNETConnectionSlotResponder handles the response to the UpdateSiteVNETConnectionSlot request. The method always @@ -10485,7 +10692,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -10497,21 +10704,21 @@ // resourceGroupName is name of resource group name is name of web app // slotConfigNames is request body containing the names of application // settings and connection strings -func (client SitesClient) UpdateSlotConfigNames(resourceGroupName string, name string, slotConfigNames SlotConfigNamesResource) (result SlotConfigNamesResource, ae error) { +func (client SitesClient) UpdateSlotConfigNames(resourceGroupName string, name string, slotConfigNames SlotConfigNamesResource) (result SlotConfigNamesResource, err error) { req, err := client.UpdateSlotConfigNamesPreparer(resourceGroupName, name, slotConfigNames) if err != nil { - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSlotConfigNames", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSlotConfigNames", nil, "Failure preparing request") } resp, err := client.UpdateSlotConfigNamesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSlotConfigNames", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSlotConfigNames", resp, "Failure sending request") } result, err = client.UpdateSlotConfigNamesResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/SitesClient", "UpdateSlotConfigNames", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.SitesClient", "UpdateSlotConfigNames", resp, "Failure responding to request") } return @@ -10520,29 +10727,29 @@ // UpdateSlotConfigNamesPreparer prepares the UpdateSlotConfigNames request. func (client SitesClient) UpdateSlotConfigNamesPreparer(resourceGroupName string, name string, slotConfigNames SlotConfigNamesResource) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/slotConfigNames"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/sites/{name}/config/slotConfigNames", pathParameters), autorest.WithJSON(slotConfigNames), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // UpdateSlotConfigNamesSender sends the UpdateSlotConfigNames request. The method will close the // http.Response Body if it receives an error. func (client SitesClient) UpdateSlotConfigNamesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // UpdateSlotConfigNamesResponder handles the response to the UpdateSlotConfigNames request. The method always @@ -10551,7 +10758,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/topleveldomains.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/topleveldomains.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/topleveldomains.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/topleveldomains.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // TopLevelDomainsClient is the use these APIs to manage Azure Websites @@ -49,21 +49,21 @@ } // GetGetTopLevelDomains sends the get get top level domains request. -func (client TopLevelDomainsClient) GetGetTopLevelDomains() (result TopLevelDomainCollection, ae error) { +func (client TopLevelDomainsClient) GetGetTopLevelDomains() (result TopLevelDomainCollection, err error) { req, err := client.GetGetTopLevelDomainsPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "web/TopLevelDomainsClient", "GetGetTopLevelDomains", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.TopLevelDomainsClient", "GetGetTopLevelDomains", nil, "Failure preparing request") } resp, err := client.GetGetTopLevelDomainsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/TopLevelDomainsClient", "GetGetTopLevelDomains", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.TopLevelDomainsClient", "GetGetTopLevelDomains", resp, "Failure sending request") } result, err = client.GetGetTopLevelDomainsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/TopLevelDomainsClient", "GetGetTopLevelDomains", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.TopLevelDomainsClient", "GetGetTopLevelDomains", resp, "Failure responding to request") } return @@ -72,26 +72,25 @@ // GetGetTopLevelDomainsPreparer prepares the GetGetTopLevelDomains request. func (client TopLevelDomainsClient) GetGetTopLevelDomainsPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetGetTopLevelDomainsSender sends the GetGetTopLevelDomains request. The method will close the // http.Response Body if it receives an error. func (client TopLevelDomainsClient) GetGetTopLevelDomainsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetGetTopLevelDomainsResponder handles the response to the GetGetTopLevelDomains request. The method always @@ -100,7 +99,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -110,21 +109,21 @@ // GetTopLevelDomain sends the get top level domain request. // // name is name of the top level domain -func (client TopLevelDomainsClient) GetTopLevelDomain(name string) (result TopLevelDomain, ae error) { +func (client TopLevelDomainsClient) GetTopLevelDomain(name string) (result TopLevelDomain, err error) { req, err := client.GetTopLevelDomainPreparer(name) if err != nil { - return result, autorest.NewErrorWithError(err, "web/TopLevelDomainsClient", "GetTopLevelDomain", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.TopLevelDomainsClient", "GetTopLevelDomain", nil, "Failure preparing request") } resp, err := client.GetTopLevelDomainSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/TopLevelDomainsClient", "GetTopLevelDomain", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.TopLevelDomainsClient", "GetTopLevelDomain", resp, "Failure sending request") } result, err = client.GetTopLevelDomainResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/TopLevelDomainsClient", "GetTopLevelDomain", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.TopLevelDomainsClient", "GetTopLevelDomain", resp, "Failure responding to request") } return @@ -133,27 +132,26 @@ // GetTopLevelDomainPreparer prepares the GetTopLevelDomain request. func (client TopLevelDomainsClient) GetTopLevelDomainPreparer(name string) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetTopLevelDomainSender sends the GetTopLevelDomain request. The method will close the // http.Response Body if it receives an error. func (client TopLevelDomainsClient) GetTopLevelDomainSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetTopLevelDomainResponder handles the response to the GetTopLevelDomain request. The method always @@ -162,7 +160,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -174,21 +172,21 @@ // // name is name of the top level domain agreementOption is domain agreement // options -func (client TopLevelDomainsClient) ListTopLevelDomainAgreements(name string, agreementOption TopLevelDomainAgreementOption) (result TldLegalAgreementCollection, ae error) { +func (client TopLevelDomainsClient) ListTopLevelDomainAgreements(name string, agreementOption TopLevelDomainAgreementOption) (result TldLegalAgreementCollection, err error) { req, err := client.ListTopLevelDomainAgreementsPreparer(name, agreementOption) if err != nil { - return result, autorest.NewErrorWithError(err, "web/TopLevelDomainsClient", "ListTopLevelDomainAgreements", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.TopLevelDomainsClient", "ListTopLevelDomainAgreements", nil, "Failure preparing request") } resp, err := client.ListTopLevelDomainAgreementsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/TopLevelDomainsClient", "ListTopLevelDomainAgreements", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.TopLevelDomainsClient", "ListTopLevelDomainAgreements", resp, "Failure sending request") } result, err = client.ListTopLevelDomainAgreementsResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/TopLevelDomainsClient", "ListTopLevelDomainAgreements", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.TopLevelDomainsClient", "ListTopLevelDomainAgreements", resp, "Failure responding to request") } return @@ -197,28 +195,28 @@ // ListTopLevelDomainAgreementsPreparer prepares the ListTopLevelDomainAgreements request. func (client TopLevelDomainsClient) ListTopLevelDomainAgreementsPreparer(name string, agreementOption TopLevelDomainAgreementOption) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": url.QueryEscape(name), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "name": autorest.Encode("path", name), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, + "api-version": client.APIVersion, } - return autorest.Prepare(&http.Request{}, + preparer := autorest.CreatePreparer( autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}/listAgreements"), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}/listAgreements", pathParameters), autorest.WithJSON(agreementOption), - autorest.WithPathParameters(pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // ListTopLevelDomainAgreementsSender sends the ListTopLevelDomainAgreements request. The method will close the // http.Response Body if it receives an error. func (client TopLevelDomainsClient) ListTopLevelDomainAgreementsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // ListTopLevelDomainAgreementsResponder handles the response to the ListTopLevelDomainAgreements request. The method always @@ -227,7 +225,7 @@ err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/usageoperations.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/usageoperations.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/usageoperations.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/usageoperations.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,14 +14,14 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" "net/http" - "net/url" ) // UsageOperationsClient is the use these APIs to manage Azure Websites @@ -53,62 +53,61 @@ // resourceGroupName is name of resource group environmentName is environment // name lastID is last marker that was returned from the batch batchSize is // size of the batch to be returned. -func (client UsageOperationsClient) GetUsage(resourceGroupName string, environmentName string, lastID string, batchSize int) (result ObjectSet, ae error) { +func (client UsageOperationsClient) GetUsage(resourceGroupName string, environmentName string, lastID string, batchSize int32) (result SetObject, err error) { req, err := client.GetUsagePreparer(resourceGroupName, environmentName, lastID, batchSize) if err != nil { - return result, autorest.NewErrorWithError(err, "web/UsageOperationsClient", "GetUsage", "Failure preparing request") + return result, autorest.NewErrorWithError(err, "web.UsageOperationsClient", "GetUsage", nil, "Failure preparing request") } resp, err := client.GetUsageSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "web/UsageOperationsClient", "GetUsage", "Failure sending request") + return result, autorest.NewErrorWithError(err, "web.UsageOperationsClient", "GetUsage", resp, "Failure sending request") } result, err = client.GetUsageResponder(resp) if err != nil { - ae = autorest.NewErrorWithError(err, "web/UsageOperationsClient", "GetUsage", "Failure responding to request") + err = autorest.NewErrorWithError(err, "web.UsageOperationsClient", "GetUsage", resp, "Failure responding to request") } return } // GetUsagePreparer prepares the GetUsage request. -func (client UsageOperationsClient) GetUsagePreparer(resourceGroupName string, environmentName string, lastID string, batchSize int) (*http.Request, error) { +func (client UsageOperationsClient) GetUsagePreparer(resourceGroupName string, environmentName string, lastID string, batchSize int32) (*http.Request, error) { pathParameters := map[string]interface{}{ - "environmentName": url.QueryEscape(environmentName), - "resourceGroupName": url.QueryEscape(resourceGroupName), - "subscriptionId": url.QueryEscape(client.SubscriptionID), + "environmentName": autorest.Encode("path", environmentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } queryParameters := map[string]interface{}{ - "api-version": APIVersion, - "batchSize": batchSize, - "lastId": lastID, + "api-version": client.APIVersion, + "batchSize": autorest.Encode("query", batchSize), + "lastId": autorest.Encode("query", lastID), } - return autorest.Prepare(&http.Request{}, - autorest.AsJSON(), + preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web.Admin/environments/{environmentName}/usage"), - autorest.WithPathParameters(pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web.Admin/environments/{environmentName}/usage", pathParameters), autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) } // GetUsageSender sends the GetUsage request. The method will close the // http.Response Body if it receives an error. func (client UsageOperationsClient) GetUsageSender(req *http.Request) (*http.Response, error) { - return client.Send(req, http.StatusOK) + return autorest.SendWithSender(client, req) } // GetUsageResponder handles the response to the GetUsage request. The method always // closes the http.Response Body. -func (client UsageOperationsClient) GetUsageResponder(resp *http.Response) (result ObjectSet, err error) { +func (client UsageOperationsClient) GetUsageResponder(resp *http.Response) (result SetObject, err error) { err = autorest.Respond( resp, client.ByInspecting(), - autorest.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/arm/web/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/arm/web/version.go 2016-10-13 14:32:06.000000000 +0000 @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.12.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -23,13 +23,13 @@ ) const ( - major = "0" - minor = "3" + major = "3" + minor = "1" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" ) // UserAgent returns the UserAgent string to use when sending http.Requests. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/CHANGELOG.md juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/CHANGELOG.md --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/CHANGELOG.md 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,205 @@ +# CHANGELOG + +----- + +## `v3.1.0-beta` + +- Added a new arm/compute/containerservice (2016-03-30) package +- Reintroduced NewxxClientWithBaseURI method. +- Uses go-autorest version - v7.0.7. + + +## `v3.0.0-beta` + +This release brings the Go SDK ARM packages up-to-date with Azure ARM Swagger files for most +services. Since the underlying [Swagger files](https://github.com/Azure/azure-rest-api-specs) +continue to change substantially, the ARM packages are still in *beta* status. + +The ARM packages now align with the following API versions (*highlighted* packages are new or +updated in this release): + +| api | version | note | +|:----------------------------|:--------------------|:----------| +| arm/authorization | 2015-07-01 | no change | +| arm/intune | 2015-01-14-preview | no change | +| arm/notificationhubs | 2014-09-01 | no change | +| arm/resources/features | 2015-12-01 | no change | +| arm/resources/subscriptions | 2015-11-01 | no change | +| arm/web | 2015-08-01 | no change | +| arm/cdn | 2016-04-02 | updated | +| arm/compute | 2016-03-30 | updated | +| arm/dns | 2016-04-01 | updated | +| arm/logic | 2015-08-01-preview | updated | +| arm/network | 2016-03-30 | updated | +| arm/redis | 2016-04-01 | updated | +| arm/resources/resources | 2016-02-01 | updated | +| arm/resources/policy | 2015-10-01-preview | updated | +| arm/resources/locks | 2015-01-01 | updated (resources/authorization earlier)| +| arm/scheduler | 2016-03-01 | updated | +| arm/storage | 2016-01-01 | updated | +| arm/search | 2015-02-28 | updated | +| arm/batch | 2015-12-01 | new | +| arm/cognitiveservices | 2016-02-01-preview | new | +| arm/devtestlabs | 2016-05-15 | new | +| arm/machinelearning | 2016-05-01-preview | new | +| arm/powerbiembedded | 2016-01-29 | new | +| arm/mobileengagement | 2014-12-01 | new | +| arm/servicebus | 2014-09-01 | new | +| arm/sql | 2015-05-01 | new | +| arm/trafficmanager | 2015-11-01 | new | + + +Below are some design changes. +- Removed Api version from method arguments. +- Removed New...ClientWithBaseURI() method in all clients. BaseURI value is set in client.go. +- Uses go-autorest version v7.0.6. + + +## `v2.2.0-beta` + +- Uses go-autorest version v7.0.5. +- Update version of pacakges "jwt-go" and "crypto" in glide.lock. + + +## `v2.1.1-beta` + +- arm: Better error messages for long running operation failures (Uses go-autorest version v7.0.4). + + +## `v2.1.0-beta` + +- arm: Uses go-autorest v7.0.3 (polling related updates). +- arm: Cancel channel argument added in long-running calls. +- storage: Allow caller to provide headers for DeleteBlob methods. +- storage: Enables connection sharing with http keepalive. +- storage: Add BlobPrefixes and Delimiter to BlobListResponse + + +## `v2.0.0-beta` + +- Uses go-autorest v6.0.0 (Polling and Asynchronous requests related changes). + + +## `v0.5.0-beta` + +Updated following packages to new API versions: +- arm/resources/features 2015-12-01 +- arm/resources/resources 2015-11-01 +- arm/resources/subscriptions 2015-11-01 + + +### Changes + + - SDK now uses go-autorest v3.0.0. + + + +## `v0.4.0-beta` + +This release brings the Go SDK ARM packages up-to-date with Azure ARM Swagger files for most +services. Since the underlying [Swagger files](https://github.com/Azure/azure-rest-api-specs) +continue to change substantially, the ARM packages are still in *beta* status. + +The ARM packages now align with the following API versions (*highlighted* packages are new or +updated in this release): + +- *arm/authorization 2015-07-01* +- *arm/cdn 2015-06-01* +- arm/compute 2015-06-15 +- arm/dns 2015-05-04-preview +- *arm/intune 2015-01-14-preview* +- arm/logic 2015-02-01-preview +- *arm/network 2015-06-15* +- *arm/notificationhubs 2014-09-01* +- arm/redis 2015-08-01 +- *arm/resources/authorization 2015-01-01* +- *arm/resources/features 2014-08-01-preview* +- *arm/resources/resources 2014-04-01-preview* +- *arm/resources/subscriptions 2014-04-01-preview* +- *arm/scheduler 2016-01-01* +- arm/storage 2015-06-15 +- arm/web 2015-08-01 + +### Changes + +- Moved the arm/authorization, arm/features, arm/resources, and arm/subscriptions packages under a new, resources, package (to reflect the corresponding Swagger structure) +- Added a new arm/authoriation (2015-07-01) package +- Added a new arm/cdn (2015-06-01) package +- Added a new arm/intune (2015-01-14-preview) package +- Udated arm/network (2015-06-01) +- Added a new arm/notificationhubs (2014-09-01) package +- Updated arm/scheduler (2016-01-01) package + + +----- + +## `v0.3.0-beta` + +- Corrected unintentional struct field renaming and client renaming in v0.2.0-beta + +----- + +## `v0.2.0-beta` + +- Added support for DNS, Redis, and Web site services +- Updated Storage service to API version 2015-06-15 +- Updated Network to include routing table support +- Address https://github.com/Azure/azure-sdk-for-go/issues/232 +- Address https://github.com/Azure/azure-sdk-for-go/issues/231 +- Address https://github.com/Azure/azure-sdk-for-go/issues/230 +- Address https://github.com/Azure/azure-sdk-for-go/issues/224 +- Address https://github.com/Azure/azure-sdk-for-go/issues/184 +- Address https://github.com/Azure/azure-sdk-for-go/issues/183 + +------ + +## `v0.1.1-beta` + +- Improves the UserAgent string to disambiguate arm packages from others in the SDK +- Improves setting the http.Response into generated results (reduces likelihood of a nil reference) +- Adds gofmt, golint, and govet to Travis CI for the arm packages + +##### Fixed Issues + +- https://github.com/Azure/azure-sdk-for-go/issues/196 +- https://github.com/Azure/azure-sdk-for-go/issues/213 + +------ + +## v0.1.0-beta + +This release addresses the issues raised against the alpha release and adds more features. Most +notably, to address the challenges of encoding JSON +(see the [comments](https://github.com/Azure/go-autorest#handling-empty-values) in the +[go-autorest](https://github.com/Azure/go-autorest) package) by using pointers for *all* structure +fields (with the exception of enumerations). The +[go-autorest/autorest/to](https://github.com/Azure/go-autorest/tree/master/autorest/to) package +provides helpers to convert to / from pointers. The examples demonstrate their usage. + +Additionally, the packages now align with Go coding standards and pass both `golint` and `govet`. +Accomplishing this required renaming various fields and parameters (such as changing Url to URL). + +##### Changes + +- Changed request / response structures to use pointer fields. +- Changed methods to return `error` instead of `autorest.Error`. +- Re-divided methods to ease asynchronous requests. +- Added paged results support. +- Added a UserAgent string. +- Added changes necessary to pass golint and govet. +- Updated README.md with details on asynchronous requests and paging. +- Saved package dependencies through Godep (for the entire SDK). + +##### Fixed Issues: + +- https://github.com/Azure/azure-sdk-for-go/issues/205 +- https://github.com/Azure/azure-sdk-for-go/issues/206 +- https://github.com/Azure/azure-sdk-for-go/issues/211 +- https://github.com/Azure/azure-sdk-for-go/issues/212 + +----- + +## v0.1.0-alpha + +This release introduces the Azure Resource Manager packages generated from the corresponding +[Swagger API](http://swagger.io) [definitions](https://github.com/Azure/azure-rest-api-specs). diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,206 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements CGI from the perspective of a child -// process. - -package cgi - -import ( - "bufio" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" -) - -// Request returns the HTTP request as represented in the current -// environment. This assumes the current program is being run -// by a web server in a CGI environment. -// The returned Request's Body is populated, if applicable. -func Request() (*http.Request, error) { - r, err := RequestFromMap(envMap(os.Environ())) - if err != nil { - return nil, err - } - if r.ContentLength > 0 { - r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, r.ContentLength)) - } - return r, nil -} - -func envMap(env []string) map[string]string { - m := make(map[string]string) - for _, kv := range env { - if idx := strings.Index(kv, "="); idx != -1 { - m[kv[:idx]] = kv[idx+1:] - } - } - return m -} - -// RequestFromMap creates an http.Request from CGI variables. -// The returned Request's Body field is not populated. -func RequestFromMap(params map[string]string) (*http.Request, error) { - r := new(http.Request) - r.Method = params["REQUEST_METHOD"] - if r.Method == "" { - return nil, errors.New("cgi: no REQUEST_METHOD in environment") - } - - r.Proto = params["SERVER_PROTOCOL"] - var ok bool - r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto) - if !ok { - return nil, errors.New("cgi: invalid SERVER_PROTOCOL version") - } - - r.Close = true - r.Trailer = http.Header{} - r.Header = http.Header{} - - r.Host = params["HTTP_HOST"] - - if lenstr := params["CONTENT_LENGTH"]; lenstr != "" { - clen, err := strconv.ParseInt(lenstr, 10, 64) - if err != nil { - return nil, errors.New("cgi: bad CONTENT_LENGTH in environment: " + lenstr) - } - r.ContentLength = clen - } - - if ct := params["CONTENT_TYPE"]; ct != "" { - r.Header.Set("Content-Type", ct) - } - - // Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers - for k, v := range params { - if !strings.HasPrefix(k, "HTTP_") || k == "HTTP_HOST" { - continue - } - r.Header.Add(strings.Replace(k[5:], "_", "-", -1), v) - } - - // TODO: cookies. parsing them isn't exported, though. - - uriStr := params["REQUEST_URI"] - if uriStr == "" { - // Fallback to SCRIPT_NAME, PATH_INFO and QUERY_STRING. - uriStr = params["SCRIPT_NAME"] + params["PATH_INFO"] - s := params["QUERY_STRING"] - if s != "" { - uriStr += "?" + s - } - } - - // There's apparently a de-facto standard for this. - // http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636 - if s := params["HTTPS"]; s == "on" || s == "ON" || s == "1" { - r.TLS = &tls.ConnectionState{HandshakeComplete: true} - } - - if r.Host != "" { - // Hostname is provided, so we can reasonably construct a URL. - rawurl := r.Host + uriStr - if r.TLS == nil { - rawurl = "http://" + rawurl - } else { - rawurl = "https://" + rawurl - } - url, err := url.Parse(rawurl) - if err != nil { - return nil, errors.New("cgi: failed to parse host and REQUEST_URI into a URL: " + rawurl) - } - r.URL = url - } - // Fallback logic if we don't have a Host header or the URL - // failed to parse - if r.URL == nil { - url, err := url.Parse(uriStr) - if err != nil { - return nil, errors.New("cgi: failed to parse REQUEST_URI into a URL: " + uriStr) - } - r.URL = url - } - - // Request.RemoteAddr has its port set by Go's standard http - // server, so we do here too. We don't have one, though, so we - // use a dummy one. - r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], "0") - - return r, nil -} - -// Serve executes the provided Handler on the currently active CGI -// request, if any. If there's no current CGI environment -// an error is returned. The provided handler may be nil to use -// http.DefaultServeMux. -func Serve(handler http.Handler) error { - req, err := Request() - if err != nil { - return err - } - if handler == nil { - handler = http.DefaultServeMux - } - rw := &response{ - req: req, - header: make(http.Header), - bufw: bufio.NewWriter(os.Stdout), - } - handler.ServeHTTP(rw, req) - rw.Write(nil) // make sure a response is sent - if err = rw.bufw.Flush(); err != nil { - return err - } - return nil -} - -type response struct { - req *http.Request - header http.Header - bufw *bufio.Writer - headerSent bool -} - -func (r *response) Flush() { - r.bufw.Flush() -} - -func (r *response) Header() http.Header { - return r.header -} - -func (r *response) Write(p []byte) (n int, err error) { - if !r.headerSent { - r.WriteHeader(http.StatusOK) - } - return r.bufw.Write(p) -} - -func (r *response) WriteHeader(code int) { - if r.headerSent { - // Note: explicitly using Stderr, as Stdout is our HTTP output. - fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL) - return - } - r.headerSent = true - fmt.Fprintf(r.bufw, "Status: %d %s\r\n", code, http.StatusText(code)) - - // Set a default Content-Type - if _, hasType := r.header["Content-Type"]; !hasType { - r.header.Add("Content-Type", "text/html; charset=utf-8") - } - - r.header.Write(r.bufw) - r.bufw.WriteString("\r\n") - r.bufw.Flush() -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,131 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Tests for CGI (the child process perspective) - -package cgi - -import ( - "testing" -) - -func TestRequest(t *testing.T) { - env := map[string]string{ - "SERVER_PROTOCOL": "HTTP/1.1", - "REQUEST_METHOD": "GET", - "HTTP_HOST": "example.com", - "HTTP_REFERER": "elsewhere", - "HTTP_USER_AGENT": "goclient", - "HTTP_FOO_BAR": "baz", - "REQUEST_URI": "/path?a=b", - "CONTENT_LENGTH": "123", - "CONTENT_TYPE": "text/xml", - "REMOTE_ADDR": "5.6.7.8", - } - req, err := RequestFromMap(env) - if err != nil { - t.Fatalf("RequestFromMap: %v", err) - } - if g, e := req.UserAgent(), "goclient"; e != g { - t.Errorf("expected UserAgent %q; got %q", e, g) - } - if g, e := req.Method, "GET"; e != g { - t.Errorf("expected Method %q; got %q", e, g) - } - if g, e := req.Header.Get("Content-Type"), "text/xml"; e != g { - t.Errorf("expected Content-Type %q; got %q", e, g) - } - if g, e := req.ContentLength, int64(123); e != g { - t.Errorf("expected ContentLength %d; got %d", e, g) - } - if g, e := req.Referer(), "elsewhere"; e != g { - t.Errorf("expected Referer %q; got %q", e, g) - } - if req.Header == nil { - t.Fatalf("unexpected nil Header") - } - if g, e := req.Header.Get("Foo-Bar"), "baz"; e != g { - t.Errorf("expected Foo-Bar %q; got %q", e, g) - } - if g, e := req.URL.String(), "http://example.com/path?a=b"; e != g { - t.Errorf("expected URL %q; got %q", e, g) - } - if g, e := req.FormValue("a"), "b"; e != g { - t.Errorf("expected FormValue(a) %q; got %q", e, g) - } - if req.Trailer == nil { - t.Errorf("unexpected nil Trailer") - } - if req.TLS != nil { - t.Errorf("expected nil TLS") - } - if e, g := "5.6.7.8:0", req.RemoteAddr; e != g { - t.Errorf("RemoteAddr: got %q; want %q", g, e) - } -} - -func TestRequestWithTLS(t *testing.T) { - env := map[string]string{ - "SERVER_PROTOCOL": "HTTP/1.1", - "REQUEST_METHOD": "GET", - "HTTP_HOST": "example.com", - "HTTP_REFERER": "elsewhere", - "REQUEST_URI": "/path?a=b", - "CONTENT_TYPE": "text/xml", - "HTTPS": "1", - "REMOTE_ADDR": "5.6.7.8", - } - req, err := RequestFromMap(env) - if err != nil { - t.Fatalf("RequestFromMap: %v", err) - } - if g, e := req.URL.String(), "https://example.com/path?a=b"; e != g { - t.Errorf("expected URL %q; got %q", e, g) - } - if req.TLS == nil { - t.Errorf("expected non-nil TLS") - } -} - -func TestRequestWithoutHost(t *testing.T) { - env := map[string]string{ - "SERVER_PROTOCOL": "HTTP/1.1", - "HTTP_HOST": "", - "REQUEST_METHOD": "GET", - "REQUEST_URI": "/path?a=b", - "CONTENT_LENGTH": "123", - } - req, err := RequestFromMap(env) - if err != nil { - t.Fatalf("RequestFromMap: %v", err) - } - if req.URL == nil { - t.Fatalf("unexpected nil URL") - } - if g, e := req.URL.String(), "/path?a=b"; e != g { - t.Errorf("URL = %q; want %q", g, e) - } -} - -func TestRequestWithoutRequestURI(t *testing.T) { - env := map[string]string{ - "SERVER_PROTOCOL": "HTTP/1.1", - "HTTP_HOST": "example.com", - "REQUEST_METHOD": "GET", - "SCRIPT_NAME": "/dir/scriptname", - "PATH_INFO": "/p1/p2", - "QUERY_STRING": "a=1&b=2", - "CONTENT_LENGTH": "123", - } - req, err := RequestFromMap(env) - if err != nil { - t.Fatalf("RequestFromMap: %v", err) - } - if req.URL == nil { - t.Fatalf("unexpected nil URL") - } - if g, e := req.URL.String(), "http://example.com/dir/scriptname/p1/p2?a=1&b=2"; e != g { - t.Errorf("URL = %q; want %q", g, e) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,377 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements the host side of CGI (being the webserver -// parent process). - -// Package cgi implements CGI (Common Gateway Interface) as specified -// in RFC 3875. -// -// Note that using CGI means starting a new process to handle each -// request, which is typically less efficient than using a -// long-running server. This package is intended primarily for -// compatibility with existing systems. -package cgi - -import ( - "bufio" - "fmt" - "io" - "log" - "net/http" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" -) - -var trailingPort = regexp.MustCompile(`:([0-9]+)$`) - -var osDefaultInheritEnv = map[string][]string{ - "darwin": {"DYLD_LIBRARY_PATH"}, - "freebsd": {"LD_LIBRARY_PATH"}, - "hpux": {"LD_LIBRARY_PATH", "SHLIB_PATH"}, - "irix": {"LD_LIBRARY_PATH", "LD_LIBRARYN32_PATH", "LD_LIBRARY64_PATH"}, - "linux": {"LD_LIBRARY_PATH"}, - "openbsd": {"LD_LIBRARY_PATH"}, - "solaris": {"LD_LIBRARY_PATH", "LD_LIBRARY_PATH_32", "LD_LIBRARY_PATH_64"}, - "windows": {"SystemRoot", "COMSPEC", "PATHEXT", "WINDIR"}, -} - -// Handler runs an executable in a subprocess with a CGI environment. -type Handler struct { - Path string // path to the CGI executable - Root string // root URI prefix of handler or empty for "/" - - // Dir specifies the CGI executable's working directory. - // If Dir is empty, the base directory of Path is used. - // If Path has no base directory, the current working - // directory is used. - Dir string - - Env []string // extra environment variables to set, if any, as "key=value" - InheritEnv []string // environment variables to inherit from host, as "key" - Logger *log.Logger // optional log for errors or nil to use log.Print - Args []string // optional arguments to pass to child process - - // PathLocationHandler specifies the root http Handler that - // should handle internal redirects when the CGI process - // returns a Location header value starting with a "/", as - // specified in RFC 3875 § 6.3.2. This will likely be - // http.DefaultServeMux. - // - // If nil, a CGI response with a local URI path is instead sent - // back to the client and not redirected internally. - PathLocationHandler http.Handler -} - -// removeLeadingDuplicates remove leading duplicate in environments. -// It's possible to override environment like following. -// cgi.Handler{ -// ... -// Env: []string{"SCRIPT_FILENAME=foo.php"}, -// } -func removeLeadingDuplicates(env []string) (ret []string) { - n := len(env) - for i := 0; i < n; i++ { - e := env[i] - s := strings.SplitN(e, "=", 2)[0] - found := false - for j := i + 1; j < n; j++ { - if s == strings.SplitN(env[j], "=", 2)[0] { - found = true - break - } - } - if !found { - ret = append(ret, e) - } - } - return -} - -func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - root := h.Root - if root == "" { - root = "/" - } - - if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" { - rw.WriteHeader(http.StatusBadRequest) - rw.Write([]byte("Chunked request bodies are not supported by CGI.")) - return - } - - pathInfo := req.URL.Path - if root != "/" && strings.HasPrefix(pathInfo, root) { - pathInfo = pathInfo[len(root):] - } - - port := "80" - if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 { - port = matches[1] - } - - env := []string{ - "SERVER_SOFTWARE=go", - "SERVER_NAME=" + req.Host, - "SERVER_PROTOCOL=HTTP/1.1", - "HTTP_HOST=" + req.Host, - "GATEWAY_INTERFACE=CGI/1.1", - "REQUEST_METHOD=" + req.Method, - "QUERY_STRING=" + req.URL.RawQuery, - "REQUEST_URI=" + req.URL.RequestURI(), - "PATH_INFO=" + pathInfo, - "SCRIPT_NAME=" + root, - "SCRIPT_FILENAME=" + h.Path, - "REMOTE_ADDR=" + req.RemoteAddr, - "REMOTE_HOST=" + req.RemoteAddr, - "SERVER_PORT=" + port, - } - - if req.TLS != nil { - env = append(env, "HTTPS=on") - } - - for k, v := range req.Header { - k = strings.Map(upperCaseAndUnderscore, k) - joinStr := ", " - if k == "COOKIE" { - joinStr = "; " - } - env = append(env, "HTTP_"+k+"="+strings.Join(v, joinStr)) - } - - if req.ContentLength > 0 { - env = append(env, fmt.Sprintf("CONTENT_LENGTH=%d", req.ContentLength)) - } - if ctype := req.Header.Get("Content-Type"); ctype != "" { - env = append(env, "CONTENT_TYPE="+ctype) - } - - if h.Env != nil { - env = append(env, h.Env...) - } - - envPath := os.Getenv("PATH") - if envPath == "" { - envPath = "/bin:/usr/bin:/usr/ucb:/usr/bsd:/usr/local/bin" - } - env = append(env, "PATH="+envPath) - - for _, e := range h.InheritEnv { - if v := os.Getenv(e); v != "" { - env = append(env, e+"="+v) - } - } - - for _, e := range osDefaultInheritEnv[runtime.GOOS] { - if v := os.Getenv(e); v != "" { - env = append(env, e+"="+v) - } - } - - env = removeLeadingDuplicates(env) - - var cwd, path string - if h.Dir != "" { - path = h.Path - cwd = h.Dir - } else { - cwd, path = filepath.Split(h.Path) - } - if cwd == "" { - cwd = "." - } - - internalError := func(err error) { - rw.WriteHeader(http.StatusInternalServerError) - h.printf("CGI error: %v", err) - } - - cmd := &exec.Cmd{ - Path: path, - Args: append([]string{h.Path}, h.Args...), - Dir: cwd, - Env: env, - Stderr: os.Stderr, // for now - } - if req.ContentLength != 0 { - cmd.Stdin = req.Body - } - stdoutRead, err := cmd.StdoutPipe() - if err != nil { - internalError(err) - return - } - - err = cmd.Start() - if err != nil { - internalError(err) - return - } - if hook := testHookStartProcess; hook != nil { - hook(cmd.Process) - } - defer cmd.Wait() - defer stdoutRead.Close() - - linebody := bufio.NewReaderSize(stdoutRead, 1024) - headers := make(http.Header) - statusCode := 0 - headerLines := 0 - sawBlankLine := false - for { - line, isPrefix, err := linebody.ReadLine() - if isPrefix { - rw.WriteHeader(http.StatusInternalServerError) - h.printf("cgi: long header line from subprocess.") - return - } - if err == io.EOF { - break - } - if err != nil { - rw.WriteHeader(http.StatusInternalServerError) - h.printf("cgi: error reading headers: %v", err) - return - } - if len(line) == 0 { - sawBlankLine = true - break - } - headerLines++ - parts := strings.SplitN(string(line), ":", 2) - if len(parts) < 2 { - h.printf("cgi: bogus header line: %s", string(line)) - continue - } - header, val := parts[0], parts[1] - header = strings.TrimSpace(header) - val = strings.TrimSpace(val) - switch { - case header == "Status": - if len(val) < 3 { - h.printf("cgi: bogus status (short): %q", val) - return - } - code, err := strconv.Atoi(val[0:3]) - if err != nil { - h.printf("cgi: bogus status: %q", val) - h.printf("cgi: line was %q", line) - return - } - statusCode = code - default: - headers.Add(header, val) - } - } - if headerLines == 0 || !sawBlankLine { - rw.WriteHeader(http.StatusInternalServerError) - h.printf("cgi: no headers") - return - } - - if loc := headers.Get("Location"); loc != "" { - if strings.HasPrefix(loc, "/") && h.PathLocationHandler != nil { - h.handleInternalRedirect(rw, req, loc) - return - } - if statusCode == 0 { - statusCode = http.StatusFound - } - } - - if statusCode == 0 && headers.Get("Content-Type") == "" { - rw.WriteHeader(http.StatusInternalServerError) - h.printf("cgi: missing required Content-Type in headers") - return - } - - if statusCode == 0 { - statusCode = http.StatusOK - } - - // Copy headers to rw's headers, after we've decided not to - // go into handleInternalRedirect, which won't want its rw - // headers to have been touched. - for k, vv := range headers { - for _, v := range vv { - rw.Header().Add(k, v) - } - } - - rw.WriteHeader(statusCode) - - _, err = io.Copy(rw, linebody) - if err != nil { - h.printf("cgi: copy error: %v", err) - // And kill the child CGI process so we don't hang on - // the deferred cmd.Wait above if the error was just - // the client (rw) going away. If it was a read error - // (because the child died itself), then the extra - // kill of an already-dead process is harmless (the PID - // won't be reused until the Wait above). - cmd.Process.Kill() - } -} - -func (h *Handler) printf(format string, v ...interface{}) { - if h.Logger != nil { - h.Logger.Printf(format, v...) - } else { - log.Printf(format, v...) - } -} - -func (h *Handler) handleInternalRedirect(rw http.ResponseWriter, req *http.Request, path string) { - url, err := req.URL.Parse(path) - if err != nil { - rw.WriteHeader(http.StatusInternalServerError) - h.printf("cgi: error resolving local URI path %q: %v", path, err) - return - } - // TODO: RFC 3875 isn't clear if only GET is supported, but it - // suggests so: "Note that any message-body attached to the - // request (such as for a POST request) may not be available - // to the resource that is the target of the redirect." We - // should do some tests against Apache to see how it handles - // POST, HEAD, etc. Does the internal redirect get the same - // method or just GET? What about incoming headers? - // (e.g. Cookies) Which headers, if any, are copied into the - // second request? - newReq := &http.Request{ - Method: "GET", - URL: url, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: url.Host, - RemoteAddr: req.RemoteAddr, - TLS: req.TLS, - } - h.PathLocationHandler.ServeHTTP(rw, newReq) -} - -func upperCaseAndUnderscore(r rune) rune { - switch { - case r >= 'a' && r <= 'z': - return r - ('a' - 'A') - case r == '-': - return '_' - case r == '=': - // Maybe not part of the CGI 'spec' but would mess up - // the environment in any case, as Go represents the - // environment as a slice of "key=value" strings. - return '_' - } - // TODO: other transformations in spec or practice? - return r -} - -var testHookStartProcess func(*os.Process) // nil except for some tests diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,461 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Tests for package cgi - -package cgi - -import ( - "bufio" - "fmt" - "io" - "net" - "net/http" - "net/http/httptest" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "testing" - "time" -) - -func newRequest(httpreq string) *http.Request { - buf := bufio.NewReader(strings.NewReader(httpreq)) - req, err := http.ReadRequest(buf) - if err != nil { - panic("cgi: bogus http request in test: " + httpreq) - } - req.RemoteAddr = "1.2.3.4" - return req -} - -func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string]string) *httptest.ResponseRecorder { - rw := httptest.NewRecorder() - req := newRequest(httpreq) - h.ServeHTTP(rw, req) - - // Make a map to hold the test map that the CGI returns. - m := make(map[string]string) - m["_body"] = rw.Body.String() - linesRead := 0 -readlines: - for { - line, err := rw.Body.ReadString('\n') - switch { - case err == io.EOF: - break readlines - case err != nil: - t.Fatalf("unexpected error reading from CGI: %v", err) - } - linesRead++ - trimmedLine := strings.TrimRight(line, "\r\n") - split := strings.SplitN(trimmedLine, "=", 2) - if len(split) != 2 { - t.Fatalf("Unexpected %d parts from invalid line number %v: %q; existing map=%v", - len(split), linesRead, line, m) - } - m[split[0]] = split[1] - } - - for key, expected := range expectedMap { - got := m[key] - if key == "cwd" { - // For Windows. golang.org/issue/4645. - fi1, _ := os.Stat(got) - fi2, _ := os.Stat(expected) - if os.SameFile(fi1, fi2) { - got = expected - } - } - if got != expected { - t.Errorf("for key %q got %q; expected %q", key, got, expected) - } - } - return rw -} - -var cgiTested, cgiWorks bool - -func check(t *testing.T) { - if !cgiTested { - cgiTested = true - cgiWorks = exec.Command("./testdata/test.cgi").Run() == nil - } - if !cgiWorks { - // No Perl on Windows, needed by test.cgi - // TODO: make the child process be Go, not Perl. - t.Skip("Skipping test: test.cgi failed.") - } -} - -func TestCGIBasicGet(t *testing.T) { - check(t) - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap := map[string]string{ - "test": "Hello CGI", - "param-a": "b", - "param-foo": "bar", - "env-GATEWAY_INTERFACE": "CGI/1.1", - "env-HTTP_HOST": "example.com", - "env-PATH_INFO": "", - "env-QUERY_STRING": "foo=bar&a=b", - "env-REMOTE_ADDR": "1.2.3.4", - "env-REMOTE_HOST": "1.2.3.4", - "env-REQUEST_METHOD": "GET", - "env-REQUEST_URI": "/test.cgi?foo=bar&a=b", - "env-SCRIPT_FILENAME": "testdata/test.cgi", - "env-SCRIPT_NAME": "/test.cgi", - "env-SERVER_NAME": "example.com", - "env-SERVER_PORT": "80", - "env-SERVER_SOFTWARE": "go", - } - replay := runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) - - if expected, got := "text/html", replay.Header().Get("Content-Type"); got != expected { - t.Errorf("got a Content-Type of %q; expected %q", got, expected) - } - if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { - t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) - } -} - -func TestCGIBasicGetAbsPath(t *testing.T) { - check(t) - pwd, err := os.Getwd() - if err != nil { - t.Fatalf("getwd error: %v", err) - } - h := &Handler{ - Path: pwd + "/testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap := map[string]string{ - "env-REQUEST_URI": "/test.cgi?foo=bar&a=b", - "env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi", - "env-SCRIPT_NAME": "/test.cgi", - } - runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -func TestPathInfo(t *testing.T) { - check(t) - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap := map[string]string{ - "param-a": "b", - "env-PATH_INFO": "/extrapath", - "env-QUERY_STRING": "a=b", - "env-REQUEST_URI": "/test.cgi/extrapath?a=b", - "env-SCRIPT_FILENAME": "testdata/test.cgi", - "env-SCRIPT_NAME": "/test.cgi", - } - runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -func TestPathInfoDirRoot(t *testing.T) { - check(t) - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/myscript/", - } - expectedMap := map[string]string{ - "env-PATH_INFO": "bar", - "env-QUERY_STRING": "a=b", - "env-REQUEST_URI": "/myscript/bar?a=b", - "env-SCRIPT_FILENAME": "testdata/test.cgi", - "env-SCRIPT_NAME": "/myscript/", - } - runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -func TestDupHeaders(t *testing.T) { - check(t) - h := &Handler{ - Path: "testdata/test.cgi", - } - expectedMap := map[string]string{ - "env-REQUEST_URI": "/myscript/bar?a=b", - "env-SCRIPT_FILENAME": "testdata/test.cgi", - "env-HTTP_COOKIE": "nom=NOM; yum=YUM", - "env-HTTP_X_FOO": "val1, val2", - } - runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+ - "Cookie: nom=NOM\n"+ - "Cookie: yum=YUM\n"+ - "X-Foo: val1\n"+ - "X-Foo: val2\n"+ - "Host: example.com\n\n", - expectedMap) -} - -func TestPathInfoNoRoot(t *testing.T) { - check(t) - h := &Handler{ - Path: "testdata/test.cgi", - Root: "", - } - expectedMap := map[string]string{ - "env-PATH_INFO": "/bar", - "env-QUERY_STRING": "a=b", - "env-REQUEST_URI": "/bar?a=b", - "env-SCRIPT_FILENAME": "testdata/test.cgi", - "env-SCRIPT_NAME": "/", - } - runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -func TestCGIBasicPost(t *testing.T) { - check(t) - postReq := `POST /test.cgi?a=b HTTP/1.0 -Host: example.com -Content-Type: application/x-www-form-urlencoded -Content-Length: 15 - -postfoo=postbar` - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap := map[string]string{ - "test": "Hello CGI", - "param-postfoo": "postbar", - "env-REQUEST_METHOD": "POST", - "env-CONTENT_LENGTH": "15", - "env-REQUEST_URI": "/test.cgi?a=b", - } - runCgiTest(t, h, postReq, expectedMap) -} - -func chunk(s string) string { - return fmt.Sprintf("%x\r\n%s\r\n", len(s), s) -} - -// The CGI spec doesn't allow chunked requests. -func TestCGIPostChunked(t *testing.T) { - check(t) - postReq := `POST /test.cgi?a=b HTTP/1.1 -Host: example.com -Content-Type: application/x-www-form-urlencoded -Transfer-Encoding: chunked - -` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("") - - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap := map[string]string{} - resp := runCgiTest(t, h, postReq, expectedMap) - if got, expected := resp.Code, http.StatusBadRequest; got != expected { - t.Fatalf("Expected %v response code from chunked request body; got %d", - expected, got) - } -} - -func TestRedirect(t *testing.T) { - check(t) - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - rec := runCgiTest(t, h, "GET /test.cgi?loc=http://foo.com/ HTTP/1.0\nHost: example.com\n\n", nil) - if e, g := 302, rec.Code; e != g { - t.Errorf("expected status code %d; got %d", e, g) - } - if e, g := "http://foo.com/", rec.Header().Get("Location"); e != g { - t.Errorf("expected Location header of %q; got %q", e, g) - } -} - -func TestInternalRedirect(t *testing.T) { - check(t) - baseHandler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - fmt.Fprintf(rw, "basepath=%s\n", req.URL.Path) - fmt.Fprintf(rw, "remoteaddr=%s\n", req.RemoteAddr) - }) - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - PathLocationHandler: baseHandler, - } - expectedMap := map[string]string{ - "basepath": "/foo", - "remoteaddr": "1.2.3.4", - } - runCgiTest(t, h, "GET /test.cgi?loc=/foo HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -// TestCopyError tests that we kill the process if there's an error copying -// its output. (for example, from the client having gone away) -func TestCopyError(t *testing.T) { - check(t) - if runtime.GOOS == "windows" { - t.Skipf("skipping test on %q", runtime.GOOS) - } - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - ts := httptest.NewServer(h) - defer ts.Close() - - conn, err := net.Dial("tcp", ts.Listener.Addr().String()) - if err != nil { - t.Fatal(err) - } - req, _ := http.NewRequest("GET", "http://example.com/test.cgi?bigresponse=1", nil) - err = req.Write(conn) - if err != nil { - t.Fatalf("Write: %v", err) - } - - res, err := http.ReadResponse(bufio.NewReader(conn), req) - if err != nil { - t.Fatalf("ReadResponse: %v", err) - } - - pidstr := res.Header.Get("X-CGI-Pid") - if pidstr == "" { - t.Fatalf("expected an X-CGI-Pid header in response") - } - pid, err := strconv.Atoi(pidstr) - if err != nil { - t.Fatalf("invalid X-CGI-Pid value") - } - - var buf [5000]byte - n, err := io.ReadFull(res.Body, buf[:]) - if err != nil { - t.Fatalf("ReadFull: %d bytes, %v", n, err) - } - - childRunning := func() bool { - return isProcessRunning(t, pid) - } - - if !childRunning() { - t.Fatalf("pre-conn.Close, expected child to be running") - } - conn.Close() - - tries := 0 - for tries < 25 && childRunning() { - time.Sleep(50 * time.Millisecond * time.Duration(tries)) - tries++ - } - if childRunning() { - t.Fatalf("post-conn.Close, expected child to be gone") - } -} - -func TestDirUnix(t *testing.T) { - check(t) - if runtime.GOOS == "windows" { - t.Skipf("skipping test on %q", runtime.GOOS) - } - cwd, _ := os.Getwd() - h := &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - Dir: cwd, - } - expectedMap := map[string]string{ - "cwd": cwd, - } - runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) - - cwd, _ = os.Getwd() - cwd = filepath.Join(cwd, "testdata") - h = &Handler{ - Path: "testdata/test.cgi", - Root: "/test.cgi", - } - expectedMap = map[string]string{ - "cwd": cwd, - } - runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -func TestDirWindows(t *testing.T) { - if runtime.GOOS != "windows" { - t.Skip("Skipping windows specific test.") - } - - cgifile, _ := filepath.Abs("testdata/test.cgi") - - var perl string - var err error - perl, err = exec.LookPath("perl") - if err != nil { - t.Skip("Skipping test: perl not found.") - } - perl, _ = filepath.Abs(perl) - - cwd, _ := os.Getwd() - h := &Handler{ - Path: perl, - Root: "/test.cgi", - Dir: cwd, - Args: []string{cgifile}, - Env: []string{"SCRIPT_FILENAME=" + cgifile}, - } - expectedMap := map[string]string{ - "cwd": cwd, - } - runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) - - // If not specify Dir on windows, working directory should be - // base directory of perl. - cwd, _ = filepath.Split(perl) - if cwd != "" && cwd[len(cwd)-1] == filepath.Separator { - cwd = cwd[:len(cwd)-1] - } - h = &Handler{ - Path: perl, - Root: "/test.cgi", - Args: []string{cgifile}, - Env: []string{"SCRIPT_FILENAME=" + cgifile}, - } - expectedMap = map[string]string{ - "cwd": cwd, - } - runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) -} - -func TestEnvOverride(t *testing.T) { - cgifile, _ := filepath.Abs("testdata/test.cgi") - - var perl string - var err error - perl, err = exec.LookPath("perl") - if err != nil { - t.Skipf("Skipping test: perl not found.") - } - perl, _ = filepath.Abs(perl) - - cwd, _ := os.Getwd() - h := &Handler{ - Path: perl, - Root: "/test.cgi", - Dir: cwd, - Args: []string{cgifile}, - Env: []string{ - "SCRIPT_FILENAME=" + cgifile, - "REQUEST_URI=/foo/bar"}, - } - expectedMap := map[string]string{ - "cwd": cwd, - "env-SCRIPT_FILENAME": cgifile, - "env-REQUEST_URI": "/foo/bar", - } - runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/matryoshka_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/matryoshka_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/matryoshka_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/matryoshka_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,228 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Tests a Go CGI program running under a Go CGI host process. -// Further, the two programs are the same binary, just checking -// their environment to figure out what mode to run in. - -package cgi - -import ( - "bytes" - "errors" - "fmt" - "io" - "net/http" - "net/http/httptest" - "os" - "runtime" - "testing" - "time" -) - -// This test is a CGI host (testing host.go) that runs its own binary -// as a child process testing the other half of CGI (child.go). -func TestHostingOurselves(t *testing.T) { - if runtime.GOOS == "nacl" { - t.Skip("skipping on nacl") - } - - h := &Handler{ - Path: os.Args[0], - Root: "/test.go", - Args: []string{"-test.run=TestBeChildCGIProcess"}, - } - expectedMap := map[string]string{ - "test": "Hello CGI-in-CGI", - "param-a": "b", - "param-foo": "bar", - "env-GATEWAY_INTERFACE": "CGI/1.1", - "env-HTTP_HOST": "example.com", - "env-PATH_INFO": "", - "env-QUERY_STRING": "foo=bar&a=b", - "env-REMOTE_ADDR": "1.2.3.4", - "env-REMOTE_HOST": "1.2.3.4", - "env-REQUEST_METHOD": "GET", - "env-REQUEST_URI": "/test.go?foo=bar&a=b", - "env-SCRIPT_FILENAME": os.Args[0], - "env-SCRIPT_NAME": "/test.go", - "env-SERVER_NAME": "example.com", - "env-SERVER_PORT": "80", - "env-SERVER_SOFTWARE": "go", - } - replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) - - if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected { - t.Errorf("got a Content-Type of %q; expected %q", got, expected) - } - if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { - t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) - } -} - -type customWriterRecorder struct { - w io.Writer - *httptest.ResponseRecorder -} - -func (r *customWriterRecorder) Write(p []byte) (n int, err error) { - return r.w.Write(p) -} - -type limitWriter struct { - w io.Writer - n int -} - -func (w *limitWriter) Write(p []byte) (n int, err error) { - if len(p) > w.n { - p = p[:w.n] - } - if len(p) > 0 { - n, err = w.w.Write(p) - w.n -= n - } - if w.n == 0 { - err = errors.New("past write limit") - } - return -} - -// If there's an error copying the child's output to the parent, test -// that we kill the child. -func TestKillChildAfterCopyError(t *testing.T) { - if runtime.GOOS == "nacl" { - t.Skip("skipping on nacl") - } - - defer func() { testHookStartProcess = nil }() - proc := make(chan *os.Process, 1) - testHookStartProcess = func(p *os.Process) { - proc <- p - } - - h := &Handler{ - Path: os.Args[0], - Root: "/test.go", - Args: []string{"-test.run=TestBeChildCGIProcess"}, - } - req, _ := http.NewRequest("GET", "http://example.com/test.cgi?write-forever=1", nil) - rec := httptest.NewRecorder() - var out bytes.Buffer - const writeLen = 50 << 10 - rw := &customWriterRecorder{&limitWriter{&out, writeLen}, rec} - - donec := make(chan bool, 1) - go func() { - h.ServeHTTP(rw, req) - donec <- true - }() - - select { - case <-donec: - if out.Len() != writeLen || out.Bytes()[0] != 'a' { - t.Errorf("unexpected output: %q", out.Bytes()) - } - case <-time.After(5 * time.Second): - t.Errorf("timeout. ServeHTTP hung and didn't kill the child process?") - select { - case p := <-proc: - p.Kill() - t.Logf("killed process") - default: - t.Logf("didn't kill process") - } - } -} - -// Test that a child handler writing only headers works. -// golang.org/issue/7196 -func TestChildOnlyHeaders(t *testing.T) { - if runtime.GOOS == "nacl" { - t.Skip("skipping on nacl") - } - - h := &Handler{ - Path: os.Args[0], - Root: "/test.go", - Args: []string{"-test.run=TestBeChildCGIProcess"}, - } - expectedMap := map[string]string{ - "_body": "", - } - replay := runCgiTest(t, h, "GET /test.go?no-body=1 HTTP/1.0\nHost: example.com\n\n", expectedMap) - if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { - t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) - } -} - -// golang.org/issue/7198 -func Test500WithNoHeaders(t *testing.T) { want500Test(t, "/immediate-disconnect") } -func Test500WithNoContentType(t *testing.T) { want500Test(t, "/no-content-type") } -func Test500WithEmptyHeaders(t *testing.T) { want500Test(t, "/empty-headers") } - -func want500Test(t *testing.T, path string) { - h := &Handler{ - Path: os.Args[0], - Root: "/test.go", - Args: []string{"-test.run=TestBeChildCGIProcess"}, - } - expectedMap := map[string]string{ - "_body": "", - } - replay := runCgiTest(t, h, "GET "+path+" HTTP/1.0\nHost: example.com\n\n", expectedMap) - if replay.Code != 500 { - t.Errorf("Got code %d; want 500", replay.Code) - } -} - -type neverEnding byte - -func (b neverEnding) Read(p []byte) (n int, err error) { - for i := range p { - p[i] = byte(b) - } - return len(p), nil -} - -// Note: not actually a test. -func TestBeChildCGIProcess(t *testing.T) { - if os.Getenv("REQUEST_METHOD") == "" { - // Not in a CGI environment; skipping test. - return - } - switch os.Getenv("REQUEST_URI") { - case "/immediate-disconnect": - os.Exit(0) - case "/no-content-type": - fmt.Printf("Content-Length: 6\n\nHello\n") - os.Exit(0) - case "/empty-headers": - fmt.Printf("\nHello") - os.Exit(0) - } - Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - rw.Header().Set("X-Test-Header", "X-Test-Value") - req.ParseForm() - if req.FormValue("no-body") == "1" { - return - } - if req.FormValue("write-forever") == "1" { - io.Copy(rw, neverEnding('a')) - for { - time.Sleep(5 * time.Second) // hang forever, until killed - } - } - fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n") - for k, vv := range req.Form { - for _, v := range vv { - fmt.Fprintf(rw, "param-%s=%s\n", k, v) - } - } - for _, kv := range os.Environ() { - fmt.Fprintf(rw, "env-%s\n", kv) - } - })) - os.Exit(0) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/plan9_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/plan9_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/plan9_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/plan9_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build plan9 - -package cgi - -import ( - "os" - "strconv" - "testing" -) - -func isProcessRunning(t *testing.T, pid int) bool { - _, err := os.Stat("/proc/" + strconv.Itoa(pid)) - return err == nil -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/posix_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/posix_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/posix_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/posix_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,21 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -package cgi - -import ( - "os" - "syscall" - "testing" -) - -func isProcessRunning(t *testing.T, pid int) bool { - p, err := os.FindProcess(pid) - if err != nil { - return false - } - return p.Signal(syscall.Signal(0)) == nil -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata/test.cgi juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata/test.cgi --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata/test.cgi 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata/test.cgi 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -#!/usr/bin/perl -# Copyright 2011 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. -# -# Test script run as a child process under cgi_test.go - -use strict; -use Cwd; - -binmode STDOUT; - -my $q = MiniCGI->new; -my $params = $q->Vars; - -if ($params->{"loc"}) { - print "Location: $params->{loc}\r\n\r\n"; - exit(0); -} - -print "Content-Type: text/html\r\n"; -print "X-CGI-Pid: $$\r\n"; -print "X-Test-Header: X-Test-Value\r\n"; -print "\r\n"; - -if ($params->{"bigresponse"}) { - # 17 MB, for OS X: golang.org/issue/4958 - for (1..(17 * 1024)) { - print "A" x 1024, "\r\n"; - } - exit 0; -} - -print "test=Hello CGI\r\n"; - -foreach my $k (sort keys %$params) { - print "param-$k=$params->{$k}\r\n"; -} - -foreach my $k (sort keys %ENV) { - my $clean_env = $ENV{$k}; - $clean_env =~ s/[\n\r]//g; - print "env-$k=$clean_env\r\n"; -} - -# NOTE: msys perl returns /c/go/src/... not C:\go\.... -my $dir = getcwd(); -if ($^O eq 'MSWin32' || $^O eq 'msys') { - if ($dir =~ /^.:/) { - $dir =~ s!/!\\!g; - } else { - my $cmd = $ENV{'COMSPEC'} || 'c:\\windows\\system32\\cmd.exe'; - $cmd =~ s!\\!/!g; - $dir = `$cmd /c cd`; - chomp $dir; - } -} -print "cwd=$dir\r\n"; - -# A minimal version of CGI.pm, for people without the perl-modules -# package installed. (CGI.pm used to be part of the Perl core, but -# some distros now bundle perl-base and perl-modules separately...) -package MiniCGI; - -sub new { - my $class = shift; - return bless {}, $class; -} - -sub Vars { - my $self = shift; - my $pairs; - if ($ENV{CONTENT_LENGTH}) { - $pairs = do { local $/; }; - } else { - $pairs = $ENV{QUERY_STRING}; - } - my $vars = {}; - foreach my $kv (split(/&/, $pairs)) { - my ($k, $v) = split(/=/, $kv, 2); - $vars->{_urldecode($k)} = _urldecode($v); - } - return $vars; -} - -sub _urldecode { - my $v = shift; - $v =~ tr/+/ /; - $v =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg; - return $v; -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,497 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cookiejar implements an in-memory RFC 6265-compliant http.CookieJar. -package cookiejar - -import ( - "errors" - "fmt" - "net" - "net/http" - "net/url" - "sort" - "strings" - "sync" - "time" -) - -// PublicSuffixList provides the public suffix of a domain. For example: -// - the public suffix of "example.com" is "com", -// - the public suffix of "foo1.foo2.foo3.co.uk" is "co.uk", and -// - the public suffix of "bar.pvt.k12.ma.us" is "pvt.k12.ma.us". -// -// Implementations of PublicSuffixList must be safe for concurrent use by -// multiple goroutines. -// -// An implementation that always returns "" is valid and may be useful for -// testing but it is not secure: it means that the HTTP server for foo.com can -// set a cookie for bar.com. -// -// A public suffix list implementation is in the package -// code.google.com/p/go.net/publicsuffix. -type PublicSuffixList interface { - // PublicSuffix returns the public suffix of domain. - // - // TODO: specify which of the caller and callee is responsible for IP - // addresses, for leading and trailing dots, for case sensitivity, and - // for IDN/Punycode. - PublicSuffix(domain string) string - - // String returns a description of the source of this public suffix - // list. The description will typically contain something like a time - // stamp or version number. - String() string -} - -// Options are the options for creating a new Jar. -type Options struct { - // PublicSuffixList is the public suffix list that determines whether - // an HTTP server can set a cookie for a domain. - // - // A nil value is valid and may be useful for testing but it is not - // secure: it means that the HTTP server for foo.co.uk can set a cookie - // for bar.co.uk. - PublicSuffixList PublicSuffixList -} - -// Jar implements the http.CookieJar interface from the net/http package. -type Jar struct { - psList PublicSuffixList - - // mu locks the remaining fields. - mu sync.Mutex - - // entries is a set of entries, keyed by their eTLD+1 and subkeyed by - // their name/domain/path. - entries map[string]map[string]entry - - // nextSeqNum is the next sequence number assigned to a new cookie - // created SetCookies. - nextSeqNum uint64 -} - -// New returns a new cookie jar. A nil *Options is equivalent to a zero -// Options. -func New(o *Options) (*Jar, error) { - jar := &Jar{ - entries: make(map[string]map[string]entry), - } - if o != nil { - jar.psList = o.PublicSuffixList - } - return jar, nil -} - -// entry is the internal representation of a cookie. -// -// This struct type is not used outside of this package per se, but the exported -// fields are those of RFC 6265. -type entry struct { - Name string - Value string - Domain string - Path string - Secure bool - HttpOnly bool - Persistent bool - HostOnly bool - Expires time.Time - Creation time.Time - LastAccess time.Time - - // seqNum is a sequence number so that Cookies returns cookies in a - // deterministic order, even for cookies that have equal Path length and - // equal Creation time. This simplifies testing. - seqNum uint64 -} - -// Id returns the domain;path;name triple of e as an id. -func (e *entry) id() string { - return fmt.Sprintf("%s;%s;%s", e.Domain, e.Path, e.Name) -} - -// shouldSend determines whether e's cookie qualifies to be included in a -// request to host/path. It is the caller's responsibility to check if the -// cookie is expired. -func (e *entry) shouldSend(https bool, host, path string) bool { - return e.domainMatch(host) && e.pathMatch(path) && (https || !e.Secure) -} - -// domainMatch implements "domain-match" of RFC 6265 section 5.1.3. -func (e *entry) domainMatch(host string) bool { - if e.Domain == host { - return true - } - return !e.HostOnly && hasDotSuffix(host, e.Domain) -} - -// pathMatch implements "path-match" according to RFC 6265 section 5.1.4. -func (e *entry) pathMatch(requestPath string) bool { - if requestPath == e.Path { - return true - } - if strings.HasPrefix(requestPath, e.Path) { - if e.Path[len(e.Path)-1] == '/' { - return true // The "/any/" matches "/any/path" case. - } else if requestPath[len(e.Path)] == '/' { - return true // The "/any" matches "/any/path" case. - } - } - return false -} - -// hasDotSuffix reports whether s ends in "."+suffix. -func hasDotSuffix(s, suffix string) bool { - return len(s) > len(suffix) && s[len(s)-len(suffix)-1] == '.' && s[len(s)-len(suffix):] == suffix -} - -// byPathLength is a []entry sort.Interface that sorts according to RFC 6265 -// section 5.4 point 2: by longest path and then by earliest creation time. -type byPathLength []entry - -func (s byPathLength) Len() int { return len(s) } - -func (s byPathLength) Less(i, j int) bool { - if len(s[i].Path) != len(s[j].Path) { - return len(s[i].Path) > len(s[j].Path) - } - if !s[i].Creation.Equal(s[j].Creation) { - return s[i].Creation.Before(s[j].Creation) - } - return s[i].seqNum < s[j].seqNum -} - -func (s byPathLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// Cookies implements the Cookies method of the http.CookieJar interface. -// -// It returns an empty slice if the URL's scheme is not HTTP or HTTPS. -func (j *Jar) Cookies(u *url.URL) (cookies []*http.Cookie) { - return j.cookies(u, time.Now()) -} - -// cookies is like Cookies but takes the current time as a parameter. -func (j *Jar) cookies(u *url.URL, now time.Time) (cookies []*http.Cookie) { - if u.Scheme != "http" && u.Scheme != "https" { - return cookies - } - host, err := canonicalHost(u.Host) - if err != nil { - return cookies - } - key := jarKey(host, j.psList) - - j.mu.Lock() - defer j.mu.Unlock() - - submap := j.entries[key] - if submap == nil { - return cookies - } - - https := u.Scheme == "https" - path := u.Path - if path == "" { - path = "/" - } - - modified := false - var selected []entry - for id, e := range submap { - if e.Persistent && !e.Expires.After(now) { - delete(submap, id) - modified = true - continue - } - if !e.shouldSend(https, host, path) { - continue - } - e.LastAccess = now - submap[id] = e - selected = append(selected, e) - modified = true - } - if modified { - if len(submap) == 0 { - delete(j.entries, key) - } else { - j.entries[key] = submap - } - } - - sort.Sort(byPathLength(selected)) - for _, e := range selected { - cookies = append(cookies, &http.Cookie{Name: e.Name, Value: e.Value}) - } - - return cookies -} - -// SetCookies implements the SetCookies method of the http.CookieJar interface. -// -// It does nothing if the URL's scheme is not HTTP or HTTPS. -func (j *Jar) SetCookies(u *url.URL, cookies []*http.Cookie) { - j.setCookies(u, cookies, time.Now()) -} - -// setCookies is like SetCookies but takes the current time as parameter. -func (j *Jar) setCookies(u *url.URL, cookies []*http.Cookie, now time.Time) { - if len(cookies) == 0 { - return - } - if u.Scheme != "http" && u.Scheme != "https" { - return - } - host, err := canonicalHost(u.Host) - if err != nil { - return - } - key := jarKey(host, j.psList) - defPath := defaultPath(u.Path) - - j.mu.Lock() - defer j.mu.Unlock() - - submap := j.entries[key] - - modified := false - for _, cookie := range cookies { - e, remove, err := j.newEntry(cookie, now, defPath, host) - if err != nil { - continue - } - id := e.id() - if remove { - if submap != nil { - if _, ok := submap[id]; ok { - delete(submap, id) - modified = true - } - } - continue - } - if submap == nil { - submap = make(map[string]entry) - } - - if old, ok := submap[id]; ok { - e.Creation = old.Creation - e.seqNum = old.seqNum - } else { - e.Creation = now - e.seqNum = j.nextSeqNum - j.nextSeqNum++ - } - e.LastAccess = now - submap[id] = e - modified = true - } - - if modified { - if len(submap) == 0 { - delete(j.entries, key) - } else { - j.entries[key] = submap - } - } -} - -// canonicalHost strips port from host if present and returns the canonicalized -// host name. -func canonicalHost(host string) (string, error) { - var err error - host = strings.ToLower(host) - if hasPort(host) { - host, _, err = net.SplitHostPort(host) - if err != nil { - return "", err - } - } - if strings.HasSuffix(host, ".") { - // Strip trailing dot from fully qualified domain names. - host = host[:len(host)-1] - } - return toASCII(host) -} - -// hasPort reports whether host contains a port number. host may be a host -// name, an IPv4 or an IPv6 address. -func hasPort(host string) bool { - colons := strings.Count(host, ":") - if colons == 0 { - return false - } - if colons == 1 { - return true - } - return host[0] == '[' && strings.Contains(host, "]:") -} - -// jarKey returns the key to use for a jar. -func jarKey(host string, psl PublicSuffixList) string { - if isIP(host) { - return host - } - - var i int - if psl == nil { - i = strings.LastIndex(host, ".") - if i == -1 { - return host - } - } else { - suffix := psl.PublicSuffix(host) - if suffix == host { - return host - } - i = len(host) - len(suffix) - if i <= 0 || host[i-1] != '.' { - // The provided public suffix list psl is broken. - // Storing cookies under host is a safe stopgap. - return host - } - } - prevDot := strings.LastIndex(host[:i-1], ".") - return host[prevDot+1:] -} - -// isIP reports whether host is an IP address. -func isIP(host string) bool { - return net.ParseIP(host) != nil -} - -// defaultPath returns the directory part of an URL's path according to -// RFC 6265 section 5.1.4. -func defaultPath(path string) string { - if len(path) == 0 || path[0] != '/' { - return "/" // Path is empty or malformed. - } - - i := strings.LastIndex(path, "/") // Path starts with "/", so i != -1. - if i == 0 { - return "/" // Path has the form "/abc". - } - return path[:i] // Path is either of form "/abc/xyz" or "/abc/xyz/". -} - -// newEntry creates an entry from a http.Cookie c. now is the current time and -// is compared to c.Expires to determine deletion of c. defPath and host are the -// default-path and the canonical host name of the URL c was received from. -// -// remove records whether the jar should delete this cookie, as it has already -// expired with respect to now. In this case, e may be incomplete, but it will -// be valid to call e.id (which depends on e's Name, Domain and Path). -// -// A malformed c.Domain will result in an error. -func (j *Jar) newEntry(c *http.Cookie, now time.Time, defPath, host string) (e entry, remove bool, err error) { - e.Name = c.Name - - if c.Path == "" || c.Path[0] != '/' { - e.Path = defPath - } else { - e.Path = c.Path - } - - e.Domain, e.HostOnly, err = j.domainAndType(host, c.Domain) - if err != nil { - return e, false, err - } - - // MaxAge takes precedence over Expires. - if c.MaxAge < 0 { - return e, true, nil - } else if c.MaxAge > 0 { - e.Expires = now.Add(time.Duration(c.MaxAge) * time.Second) - e.Persistent = true - } else { - if c.Expires.IsZero() { - e.Expires = endOfTime - e.Persistent = false - } else { - if !c.Expires.After(now) { - return e, true, nil - } - e.Expires = c.Expires - e.Persistent = true - } - } - - e.Value = c.Value - e.Secure = c.Secure - e.HttpOnly = c.HttpOnly - - return e, false, nil -} - -var ( - errIllegalDomain = errors.New("cookiejar: illegal cookie domain attribute") - errMalformedDomain = errors.New("cookiejar: malformed cookie domain attribute") - errNoHostname = errors.New("cookiejar: no host name available (IP only)") -) - -// endOfTime is the time when session (non-persistent) cookies expire. -// This instant is representable in most date/time formats (not just -// Go's time.Time) and should be far enough in the future. -var endOfTime = time.Date(9999, 12, 31, 23, 59, 59, 0, time.UTC) - -// domainAndType determines the cookie's domain and hostOnly attribute. -func (j *Jar) domainAndType(host, domain string) (string, bool, error) { - if domain == "" { - // No domain attribute in the SetCookie header indicates a - // host cookie. - return host, true, nil - } - - if isIP(host) { - // According to RFC 6265 domain-matching includes not being - // an IP address. - // TODO: This might be relaxed as in common browsers. - return "", false, errNoHostname - } - - // From here on: If the cookie is valid, it is a domain cookie (with - // the one exception of a public suffix below). - // See RFC 6265 section 5.2.3. - if domain[0] == '.' { - domain = domain[1:] - } - - if len(domain) == 0 || domain[0] == '.' { - // Received either "Domain=." or "Domain=..some.thing", - // both are illegal. - return "", false, errMalformedDomain - } - domain = strings.ToLower(domain) - - if domain[len(domain)-1] == '.' { - // We received stuff like "Domain=www.example.com.". - // Browsers do handle such stuff (actually differently) but - // RFC 6265 seems to be clear here (e.g. section 4.1.2.3) in - // requiring a reject. 4.1.2.3 is not normative, but - // "Domain Matching" (5.1.3) and "Canonicalized Host Names" - // (5.1.2) are. - return "", false, errMalformedDomain - } - - // See RFC 6265 section 5.3 #5. - if j.psList != nil { - if ps := j.psList.PublicSuffix(domain); ps != "" && !hasDotSuffix(domain, ps) { - if host == domain { - // This is the one exception in which a cookie - // with a domain attribute is a host cookie. - return host, true, nil - } - return "", false, errIllegalDomain - } - } - - // The domain must domain-match host: www.mycompany.com cannot - // set cookies for .ourcompetitors.com. - if host != domain && !hasDotSuffix(host, domain) { - return "", false, errIllegalDomain - } - - return domain, false, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,1267 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cookiejar - -import ( - "fmt" - "net/http" - "net/url" - "sort" - "strings" - "testing" - "time" -) - -// tNow is the synthetic current time used as now during testing. -var tNow = time.Date(2013, 1, 1, 12, 0, 0, 0, time.UTC) - -// testPSL implements PublicSuffixList with just two rules: "co.uk" -// and the default rule "*". -type testPSL struct{} - -func (testPSL) String() string { - return "testPSL" -} -func (testPSL) PublicSuffix(d string) string { - if d == "co.uk" || strings.HasSuffix(d, ".co.uk") { - return "co.uk" - } - return d[strings.LastIndex(d, ".")+1:] -} - -// newTestJar creates an empty Jar with testPSL as the public suffix list. -func newTestJar() *Jar { - jar, err := New(&Options{PublicSuffixList: testPSL{}}) - if err != nil { - panic(err) - } - return jar -} - -var hasDotSuffixTests = [...]struct { - s, suffix string -}{ - {"", ""}, - {"", "."}, - {"", "x"}, - {".", ""}, - {".", "."}, - {".", ".."}, - {".", "x"}, - {".", "x."}, - {".", ".x"}, - {".", ".x."}, - {"x", ""}, - {"x", "."}, - {"x", ".."}, - {"x", "x"}, - {"x", "x."}, - {"x", ".x"}, - {"x", ".x."}, - {".x", ""}, - {".x", "."}, - {".x", ".."}, - {".x", "x"}, - {".x", "x."}, - {".x", ".x"}, - {".x", ".x."}, - {"x.", ""}, - {"x.", "."}, - {"x.", ".."}, - {"x.", "x"}, - {"x.", "x."}, - {"x.", ".x"}, - {"x.", ".x."}, - {"com", ""}, - {"com", "m"}, - {"com", "om"}, - {"com", "com"}, - {"com", ".com"}, - {"com", "x.com"}, - {"com", "xcom"}, - {"com", "xorg"}, - {"com", "org"}, - {"com", "rg"}, - {"foo.com", ""}, - {"foo.com", "m"}, - {"foo.com", "om"}, - {"foo.com", "com"}, - {"foo.com", ".com"}, - {"foo.com", "o.com"}, - {"foo.com", "oo.com"}, - {"foo.com", "foo.com"}, - {"foo.com", ".foo.com"}, - {"foo.com", "x.foo.com"}, - {"foo.com", "xfoo.com"}, - {"foo.com", "xfoo.org"}, - {"foo.com", "foo.org"}, - {"foo.com", "oo.org"}, - {"foo.com", "o.org"}, - {"foo.com", ".org"}, - {"foo.com", "org"}, - {"foo.com", "rg"}, -} - -func TestHasDotSuffix(t *testing.T) { - for _, tc := range hasDotSuffixTests { - got := hasDotSuffix(tc.s, tc.suffix) - want := strings.HasSuffix(tc.s, "."+tc.suffix) - if got != want { - t.Errorf("s=%q, suffix=%q: got %v, want %v", tc.s, tc.suffix, got, want) - } - } -} - -var canonicalHostTests = map[string]string{ - "www.example.com": "www.example.com", - "WWW.EXAMPLE.COM": "www.example.com", - "wWw.eXAmple.CoM": "www.example.com", - "www.example.com:80": "www.example.com", - "192.168.0.10": "192.168.0.10", - "192.168.0.5:8080": "192.168.0.5", - "2001:4860:0:2001::68": "2001:4860:0:2001::68", - "[2001:4860:0:::68]:8080": "2001:4860:0:::68", - "www.bücher.de": "www.xn--bcher-kva.de", - "www.example.com.": "www.example.com", - "[bad.unmatched.bracket:": "error", -} - -func TestCanonicalHost(t *testing.T) { - for h, want := range canonicalHostTests { - got, err := canonicalHost(h) - if want == "error" { - if err == nil { - t.Errorf("%q: got nil error, want non-nil", h) - } - continue - } - if err != nil { - t.Errorf("%q: %v", h, err) - continue - } - if got != want { - t.Errorf("%q: got %q, want %q", h, got, want) - continue - } - } -} - -var hasPortTests = map[string]bool{ - "www.example.com": false, - "www.example.com:80": true, - "127.0.0.1": false, - "127.0.0.1:8080": true, - "2001:4860:0:2001::68": false, - "[2001::0:::68]:80": true, -} - -func TestHasPort(t *testing.T) { - for host, want := range hasPortTests { - if got := hasPort(host); got != want { - t.Errorf("%q: got %t, want %t", host, got, want) - } - } -} - -var jarKeyTests = map[string]string{ - "foo.www.example.com": "example.com", - "www.example.com": "example.com", - "example.com": "example.com", - "com": "com", - "foo.www.bbc.co.uk": "bbc.co.uk", - "www.bbc.co.uk": "bbc.co.uk", - "bbc.co.uk": "bbc.co.uk", - "co.uk": "co.uk", - "uk": "uk", - "192.168.0.5": "192.168.0.5", -} - -func TestJarKey(t *testing.T) { - for host, want := range jarKeyTests { - if got := jarKey(host, testPSL{}); got != want { - t.Errorf("%q: got %q, want %q", host, got, want) - } - } -} - -var jarKeyNilPSLTests = map[string]string{ - "foo.www.example.com": "example.com", - "www.example.com": "example.com", - "example.com": "example.com", - "com": "com", - "foo.www.bbc.co.uk": "co.uk", - "www.bbc.co.uk": "co.uk", - "bbc.co.uk": "co.uk", - "co.uk": "co.uk", - "uk": "uk", - "192.168.0.5": "192.168.0.5", -} - -func TestJarKeyNilPSL(t *testing.T) { - for host, want := range jarKeyNilPSLTests { - if got := jarKey(host, nil); got != want { - t.Errorf("%q: got %q, want %q", host, got, want) - } - } -} - -var isIPTests = map[string]bool{ - "127.0.0.1": true, - "1.2.3.4": true, - "2001:4860:0:2001::68": true, - "example.com": false, - "1.1.1.300": false, - "www.foo.bar.net": false, - "123.foo.bar.net": false, -} - -func TestIsIP(t *testing.T) { - for host, want := range isIPTests { - if got := isIP(host); got != want { - t.Errorf("%q: got %t, want %t", host, got, want) - } - } -} - -var defaultPathTests = map[string]string{ - "/": "/", - "/abc": "/", - "/abc/": "/abc", - "/abc/xyz": "/abc", - "/abc/xyz/": "/abc/xyz", - "/a/b/c.html": "/a/b", - "": "/", - "strange": "/", - "//": "/", - "/a//b": "/a/", - "/a/./b": "/a/.", - "/a/../b": "/a/..", -} - -func TestDefaultPath(t *testing.T) { - for path, want := range defaultPathTests { - if got := defaultPath(path); got != want { - t.Errorf("%q: got %q, want %q", path, got, want) - } - } -} - -var domainAndTypeTests = [...]struct { - host string // host Set-Cookie header was received from - domain string // domain attribute in Set-Cookie header - wantDomain string // expected domain of cookie - wantHostOnly bool // expected host-cookie flag - wantErr error // expected error -}{ - {"www.example.com", "", "www.example.com", true, nil}, - {"127.0.0.1", "", "127.0.0.1", true, nil}, - {"2001:4860:0:2001::68", "", "2001:4860:0:2001::68", true, nil}, - {"www.example.com", "example.com", "example.com", false, nil}, - {"www.example.com", ".example.com", "example.com", false, nil}, - {"www.example.com", "www.example.com", "www.example.com", false, nil}, - {"www.example.com", ".www.example.com", "www.example.com", false, nil}, - {"foo.sso.example.com", "sso.example.com", "sso.example.com", false, nil}, - {"bar.co.uk", "bar.co.uk", "bar.co.uk", false, nil}, - {"foo.bar.co.uk", ".bar.co.uk", "bar.co.uk", false, nil}, - {"127.0.0.1", "127.0.0.1", "", false, errNoHostname}, - {"2001:4860:0:2001::68", "2001:4860:0:2001::68", "2001:4860:0:2001::68", false, errNoHostname}, - {"www.example.com", ".", "", false, errMalformedDomain}, - {"www.example.com", "..", "", false, errMalformedDomain}, - {"www.example.com", "other.com", "", false, errIllegalDomain}, - {"www.example.com", "com", "", false, errIllegalDomain}, - {"www.example.com", ".com", "", false, errIllegalDomain}, - {"foo.bar.co.uk", ".co.uk", "", false, errIllegalDomain}, - {"127.www.0.0.1", "127.0.0.1", "", false, errIllegalDomain}, - {"com", "", "com", true, nil}, - {"com", "com", "com", true, nil}, - {"com", ".com", "com", true, nil}, - {"co.uk", "", "co.uk", true, nil}, - {"co.uk", "co.uk", "co.uk", true, nil}, - {"co.uk", ".co.uk", "co.uk", true, nil}, -} - -func TestDomainAndType(t *testing.T) { - jar := newTestJar() - for _, tc := range domainAndTypeTests { - domain, hostOnly, err := jar.domainAndType(tc.host, tc.domain) - if err != tc.wantErr { - t.Errorf("%q/%q: got %q error, want %q", - tc.host, tc.domain, err, tc.wantErr) - continue - } - if err != nil { - continue - } - if domain != tc.wantDomain || hostOnly != tc.wantHostOnly { - t.Errorf("%q/%q: got %q/%t want %q/%t", - tc.host, tc.domain, domain, hostOnly, - tc.wantDomain, tc.wantHostOnly) - } - } -} - -// expiresIn creates an expires attribute delta seconds from tNow. -func expiresIn(delta int) string { - t := tNow.Add(time.Duration(delta) * time.Second) - return "expires=" + t.Format(time.RFC1123) -} - -// mustParseURL parses s to an URL and panics on error. -func mustParseURL(s string) *url.URL { - u, err := url.Parse(s) - if err != nil || u.Scheme == "" || u.Host == "" { - panic(fmt.Sprintf("Unable to parse URL %s.", s)) - } - return u -} - -// jarTest encapsulates the following actions on a jar: -// 1. Perform SetCookies with fromURL and the cookies from setCookies. -// (Done at time tNow + 0 ms.) -// 2. Check that the entries in the jar matches content. -// (Done at time tNow + 1001 ms.) -// 3. For each query in tests: Check that Cookies with toURL yields the -// cookies in want. -// (Query n done at tNow + (n+2)*1001 ms.) -type jarTest struct { - description string // The description of what this test is supposed to test - fromURL string // The full URL of the request from which Set-Cookie headers where received - setCookies []string // All the cookies received from fromURL - content string // The whole (non-expired) content of the jar - queries []query // Queries to test the Jar.Cookies method -} - -// query contains one test of the cookies returned from Jar.Cookies. -type query struct { - toURL string // the URL in the Cookies call - want string // the expected list of cookies (order matters) -} - -// run runs the jarTest. -func (test jarTest) run(t *testing.T, jar *Jar) { - now := tNow - - // Populate jar with cookies. - setCookies := make([]*http.Cookie, len(test.setCookies)) - for i, cs := range test.setCookies { - cookies := (&http.Response{Header: http.Header{"Set-Cookie": {cs}}}).Cookies() - if len(cookies) != 1 { - panic(fmt.Sprintf("Wrong cookie line %q: %#v", cs, cookies)) - } - setCookies[i] = cookies[0] - } - jar.setCookies(mustParseURL(test.fromURL), setCookies, now) - now = now.Add(1001 * time.Millisecond) - - // Serialize non-expired entries in the form "name1=val1 name2=val2". - var cs []string - for _, submap := range jar.entries { - for _, cookie := range submap { - if !cookie.Expires.After(now) { - continue - } - cs = append(cs, cookie.Name+"="+cookie.Value) - } - } - sort.Strings(cs) - got := strings.Join(cs, " ") - - // Make sure jar content matches our expectations. - if got != test.content { - t.Errorf("Test %q Content\ngot %q\nwant %q", - test.description, got, test.content) - } - - // Test different calls to Cookies. - for i, query := range test.queries { - now = now.Add(1001 * time.Millisecond) - var s []string - for _, c := range jar.cookies(mustParseURL(query.toURL), now) { - s = append(s, c.Name+"="+c.Value) - } - if got := strings.Join(s, " "); got != query.want { - t.Errorf("Test %q #%d\ngot %q\nwant %q", test.description, i, got, query.want) - } - } -} - -// basicsTests contains fundamental tests. Each jarTest has to be performed on -// a fresh, empty Jar. -var basicsTests = [...]jarTest{ - { - "Retrieval of a plain host cookie.", - "http://www.host.test/", - []string{"A=a"}, - "A=a", - []query{ - {"http://www.host.test", "A=a"}, - {"http://www.host.test/", "A=a"}, - {"http://www.host.test/some/path", "A=a"}, - {"https://www.host.test", "A=a"}, - {"https://www.host.test/", "A=a"}, - {"https://www.host.test/some/path", "A=a"}, - {"ftp://www.host.test", ""}, - {"ftp://www.host.test/", ""}, - {"ftp://www.host.test/some/path", ""}, - {"http://www.other.org", ""}, - {"http://sibling.host.test", ""}, - {"http://deep.www.host.test", ""}, - }, - }, - { - "Secure cookies are not returned to http.", - "http://www.host.test/", - []string{"A=a; secure"}, - "A=a", - []query{ - {"http://www.host.test", ""}, - {"http://www.host.test/", ""}, - {"http://www.host.test/some/path", ""}, - {"https://www.host.test", "A=a"}, - {"https://www.host.test/", "A=a"}, - {"https://www.host.test/some/path", "A=a"}, - }, - }, - { - "Explicit path.", - "http://www.host.test/", - []string{"A=a; path=/some/path"}, - "A=a", - []query{ - {"http://www.host.test", ""}, - {"http://www.host.test/", ""}, - {"http://www.host.test/some", ""}, - {"http://www.host.test/some/", ""}, - {"http://www.host.test/some/path", "A=a"}, - {"http://www.host.test/some/paths", ""}, - {"http://www.host.test/some/path/foo", "A=a"}, - {"http://www.host.test/some/path/foo/", "A=a"}, - }, - }, - { - "Implicit path #1: path is a directory.", - "http://www.host.test/some/path/", - []string{"A=a"}, - "A=a", - []query{ - {"http://www.host.test", ""}, - {"http://www.host.test/", ""}, - {"http://www.host.test/some", ""}, - {"http://www.host.test/some/", ""}, - {"http://www.host.test/some/path", "A=a"}, - {"http://www.host.test/some/paths", ""}, - {"http://www.host.test/some/path/foo", "A=a"}, - {"http://www.host.test/some/path/foo/", "A=a"}, - }, - }, - { - "Implicit path #2: path is not a directory.", - "http://www.host.test/some/path/index.html", - []string{"A=a"}, - "A=a", - []query{ - {"http://www.host.test", ""}, - {"http://www.host.test/", ""}, - {"http://www.host.test/some", ""}, - {"http://www.host.test/some/", ""}, - {"http://www.host.test/some/path", "A=a"}, - {"http://www.host.test/some/paths", ""}, - {"http://www.host.test/some/path/foo", "A=a"}, - {"http://www.host.test/some/path/foo/", "A=a"}, - }, - }, - { - "Implicit path #3: no path in URL at all.", - "http://www.host.test", - []string{"A=a"}, - "A=a", - []query{ - {"http://www.host.test", "A=a"}, - {"http://www.host.test/", "A=a"}, - {"http://www.host.test/some/path", "A=a"}, - }, - }, - { - "Cookies are sorted by path length.", - "http://www.host.test/", - []string{ - "A=a; path=/foo/bar", - "B=b; path=/foo/bar/baz/qux", - "C=c; path=/foo/bar/baz", - "D=d; path=/foo"}, - "A=a B=b C=c D=d", - []query{ - {"http://www.host.test/foo/bar/baz/qux", "B=b C=c A=a D=d"}, - {"http://www.host.test/foo/bar/baz/", "C=c A=a D=d"}, - {"http://www.host.test/foo/bar", "A=a D=d"}, - }, - }, - { - "Creation time determines sorting on same length paths.", - "http://www.host.test/", - []string{ - "A=a; path=/foo/bar", - "X=x; path=/foo/bar", - "Y=y; path=/foo/bar/baz/qux", - "B=b; path=/foo/bar/baz/qux", - "C=c; path=/foo/bar/baz", - "W=w; path=/foo/bar/baz", - "Z=z; path=/foo", - "D=d; path=/foo"}, - "A=a B=b C=c D=d W=w X=x Y=y Z=z", - []query{ - {"http://www.host.test/foo/bar/baz/qux", "Y=y B=b C=c W=w A=a X=x Z=z D=d"}, - {"http://www.host.test/foo/bar/baz/", "C=c W=w A=a X=x Z=z D=d"}, - {"http://www.host.test/foo/bar", "A=a X=x Z=z D=d"}, - }, - }, - { - "Sorting of same-name cookies.", - "http://www.host.test/", - []string{ - "A=1; path=/", - "A=2; path=/path", - "A=3; path=/quux", - "A=4; path=/path/foo", - "A=5; domain=.host.test; path=/path", - "A=6; domain=.host.test; path=/quux", - "A=7; domain=.host.test; path=/path/foo", - }, - "A=1 A=2 A=3 A=4 A=5 A=6 A=7", - []query{ - {"http://www.host.test/path", "A=2 A=5 A=1"}, - {"http://www.host.test/path/foo", "A=4 A=7 A=2 A=5 A=1"}, - }, - }, - { - "Disallow domain cookie on public suffix.", - "http://www.bbc.co.uk", - []string{ - "a=1", - "b=2; domain=co.uk", - }, - "a=1", - []query{{"http://www.bbc.co.uk", "a=1"}}, - }, - { - "Host cookie on IP.", - "http://192.168.0.10", - []string{"a=1"}, - "a=1", - []query{{"http://192.168.0.10", "a=1"}}, - }, - { - "Port is ignored #1.", - "http://www.host.test/", - []string{"a=1"}, - "a=1", - []query{ - {"http://www.host.test", "a=1"}, - {"http://www.host.test:8080/", "a=1"}, - }, - }, - { - "Port is ignored #2.", - "http://www.host.test:8080/", - []string{"a=1"}, - "a=1", - []query{ - {"http://www.host.test", "a=1"}, - {"http://www.host.test:8080/", "a=1"}, - {"http://www.host.test:1234/", "a=1"}, - }, - }, -} - -func TestBasics(t *testing.T) { - for _, test := range basicsTests { - jar := newTestJar() - test.run(t, jar) - } -} - -// updateAndDeleteTests contains jarTests which must be performed on the same -// Jar. -var updateAndDeleteTests = [...]jarTest{ - { - "Set initial cookies.", - "http://www.host.test", - []string{ - "a=1", - "b=2; secure", - "c=3; httponly", - "d=4; secure; httponly"}, - "a=1 b=2 c=3 d=4", - []query{ - {"http://www.host.test", "a=1 c=3"}, - {"https://www.host.test", "a=1 b=2 c=3 d=4"}, - }, - }, - { - "Update value via http.", - "http://www.host.test", - []string{ - "a=w", - "b=x; secure", - "c=y; httponly", - "d=z; secure; httponly"}, - "a=w b=x c=y d=z", - []query{ - {"http://www.host.test", "a=w c=y"}, - {"https://www.host.test", "a=w b=x c=y d=z"}, - }, - }, - { - "Clear Secure flag from a http.", - "http://www.host.test/", - []string{ - "b=xx", - "d=zz; httponly"}, - "a=w b=xx c=y d=zz", - []query{{"http://www.host.test", "a=w b=xx c=y d=zz"}}, - }, - { - "Delete all.", - "http://www.host.test/", - []string{ - "a=1; max-Age=-1", // delete via MaxAge - "b=2; " + expiresIn(-10), // delete via Expires - "c=2; max-age=-1; " + expiresIn(-10), // delete via both - "d=4; max-age=-1; " + expiresIn(10)}, // MaxAge takes precedence - "", - []query{{"http://www.host.test", ""}}, - }, - { - "Refill #1.", - "http://www.host.test", - []string{ - "A=1", - "A=2; path=/foo", - "A=3; domain=.host.test", - "A=4; path=/foo; domain=.host.test"}, - "A=1 A=2 A=3 A=4", - []query{{"http://www.host.test/foo", "A=2 A=4 A=1 A=3"}}, - }, - { - "Refill #2.", - "http://www.google.com", - []string{ - "A=6", - "A=7; path=/foo", - "A=8; domain=.google.com", - "A=9; path=/foo; domain=.google.com"}, - "A=1 A=2 A=3 A=4 A=6 A=7 A=8 A=9", - []query{ - {"http://www.host.test/foo", "A=2 A=4 A=1 A=3"}, - {"http://www.google.com/foo", "A=7 A=9 A=6 A=8"}, - }, - }, - { - "Delete A7.", - "http://www.google.com", - []string{"A=; path=/foo; max-age=-1"}, - "A=1 A=2 A=3 A=4 A=6 A=8 A=9", - []query{ - {"http://www.host.test/foo", "A=2 A=4 A=1 A=3"}, - {"http://www.google.com/foo", "A=9 A=6 A=8"}, - }, - }, - { - "Delete A4.", - "http://www.host.test", - []string{"A=; path=/foo; domain=host.test; max-age=-1"}, - "A=1 A=2 A=3 A=6 A=8 A=9", - []query{ - {"http://www.host.test/foo", "A=2 A=1 A=3"}, - {"http://www.google.com/foo", "A=9 A=6 A=8"}, - }, - }, - { - "Delete A6.", - "http://www.google.com", - []string{"A=; max-age=-1"}, - "A=1 A=2 A=3 A=8 A=9", - []query{ - {"http://www.host.test/foo", "A=2 A=1 A=3"}, - {"http://www.google.com/foo", "A=9 A=8"}, - }, - }, - { - "Delete A3.", - "http://www.host.test", - []string{"A=; domain=host.test; max-age=-1"}, - "A=1 A=2 A=8 A=9", - []query{ - {"http://www.host.test/foo", "A=2 A=1"}, - {"http://www.google.com/foo", "A=9 A=8"}, - }, - }, - { - "No cross-domain delete.", - "http://www.host.test", - []string{ - "A=; domain=google.com; max-age=-1", - "A=; path=/foo; domain=google.com; max-age=-1"}, - "A=1 A=2 A=8 A=9", - []query{ - {"http://www.host.test/foo", "A=2 A=1"}, - {"http://www.google.com/foo", "A=9 A=8"}, - }, - }, - { - "Delete A8 and A9.", - "http://www.google.com", - []string{ - "A=; domain=google.com; max-age=-1", - "A=; path=/foo; domain=google.com; max-age=-1"}, - "A=1 A=2", - []query{ - {"http://www.host.test/foo", "A=2 A=1"}, - {"http://www.google.com/foo", ""}, - }, - }, -} - -func TestUpdateAndDelete(t *testing.T) { - jar := newTestJar() - for _, test := range updateAndDeleteTests { - test.run(t, jar) - } -} - -func TestExpiration(t *testing.T) { - jar := newTestJar() - jarTest{ - "Expiration.", - "http://www.host.test", - []string{ - "a=1", - "b=2; max-age=3", - "c=3; " + expiresIn(3), - "d=4; max-age=5", - "e=5; " + expiresIn(5), - "f=6; max-age=100", - }, - "a=1 b=2 c=3 d=4 e=5 f=6", // executed at t0 + 1001 ms - []query{ - {"http://www.host.test", "a=1 b=2 c=3 d=4 e=5 f=6"}, // t0 + 2002 ms - {"http://www.host.test", "a=1 d=4 e=5 f=6"}, // t0 + 3003 ms - {"http://www.host.test", "a=1 d=4 e=5 f=6"}, // t0 + 4004 ms - {"http://www.host.test", "a=1 f=6"}, // t0 + 5005 ms - {"http://www.host.test", "a=1 f=6"}, // t0 + 6006 ms - }, - }.run(t, jar) -} - -// -// Tests derived from Chromium's cookie_store_unittest.h. -// - -// See http://src.chromium.org/viewvc/chrome/trunk/src/net/cookies/cookie_store_unittest.h?revision=159685&content-type=text/plain -// Some of the original tests are in a bad condition (e.g. -// DomainWithTrailingDotTest) or are not RFC 6265 conforming (e.g. -// TestNonDottedAndTLD #1 and #6) and have not been ported. - -// chromiumBasicsTests contains fundamental tests. Each jarTest has to be -// performed on a fresh, empty Jar. -var chromiumBasicsTests = [...]jarTest{ - { - "DomainWithTrailingDotTest.", - "http://www.google.com/", - []string{ - "a=1; domain=.www.google.com.", - "b=2; domain=.www.google.com.."}, - "", - []query{ - {"http://www.google.com", ""}, - }, - }, - { - "ValidSubdomainTest #1.", - "http://a.b.c.d.com", - []string{ - "a=1; domain=.a.b.c.d.com", - "b=2; domain=.b.c.d.com", - "c=3; domain=.c.d.com", - "d=4; domain=.d.com"}, - "a=1 b=2 c=3 d=4", - []query{ - {"http://a.b.c.d.com", "a=1 b=2 c=3 d=4"}, - {"http://b.c.d.com", "b=2 c=3 d=4"}, - {"http://c.d.com", "c=3 d=4"}, - {"http://d.com", "d=4"}, - }, - }, - { - "ValidSubdomainTest #2.", - "http://a.b.c.d.com", - []string{ - "a=1; domain=.a.b.c.d.com", - "b=2; domain=.b.c.d.com", - "c=3; domain=.c.d.com", - "d=4; domain=.d.com", - "X=bcd; domain=.b.c.d.com", - "X=cd; domain=.c.d.com"}, - "X=bcd X=cd a=1 b=2 c=3 d=4", - []query{ - {"http://b.c.d.com", "b=2 c=3 d=4 X=bcd X=cd"}, - {"http://c.d.com", "c=3 d=4 X=cd"}, - }, - }, - { - "InvalidDomainTest #1.", - "http://foo.bar.com", - []string{ - "a=1; domain=.yo.foo.bar.com", - "b=2; domain=.foo.com", - "c=3; domain=.bar.foo.com", - "d=4; domain=.foo.bar.com.net", - "e=5; domain=ar.com", - "f=6; domain=.", - "g=7; domain=/", - "h=8; domain=http://foo.bar.com", - "i=9; domain=..foo.bar.com", - "j=10; domain=..bar.com", - "k=11; domain=.foo.bar.com?blah", - "l=12; domain=.foo.bar.com/blah", - "m=12; domain=.foo.bar.com:80", - "n=14; domain=.foo.bar.com:", - "o=15; domain=.foo.bar.com#sup", - }, - "", // Jar is empty. - []query{{"http://foo.bar.com", ""}}, - }, - { - "InvalidDomainTest #2.", - "http://foo.com.com", - []string{"a=1; domain=.foo.com.com.com"}, - "", - []query{{"http://foo.bar.com", ""}}, - }, - { - "DomainWithoutLeadingDotTest #1.", - "http://manage.hosted.filefront.com", - []string{"a=1; domain=filefront.com"}, - "a=1", - []query{{"http://www.filefront.com", "a=1"}}, - }, - { - "DomainWithoutLeadingDotTest #2.", - "http://www.google.com", - []string{"a=1; domain=www.google.com"}, - "a=1", - []query{ - {"http://www.google.com", "a=1"}, - {"http://sub.www.google.com", "a=1"}, - {"http://something-else.com", ""}, - }, - }, - { - "CaseInsensitiveDomainTest.", - "http://www.google.com", - []string{ - "a=1; domain=.GOOGLE.COM", - "b=2; domain=.www.gOOgLE.coM"}, - "a=1 b=2", - []query{{"http://www.google.com", "a=1 b=2"}}, - }, - { - "TestIpAddress #1.", - "http://1.2.3.4/foo", - []string{"a=1; path=/"}, - "a=1", - []query{{"http://1.2.3.4/foo", "a=1"}}, - }, - { - "TestIpAddress #2.", - "http://1.2.3.4/foo", - []string{ - "a=1; domain=.1.2.3.4", - "b=2; domain=.3.4"}, - "", - []query{{"http://1.2.3.4/foo", ""}}, - }, - { - "TestIpAddress #3.", - "http://1.2.3.4/foo", - []string{"a=1; domain=1.2.3.4"}, - "", - []query{{"http://1.2.3.4/foo", ""}}, - }, - { - "TestNonDottedAndTLD #2.", - "http://com./index.html", - []string{"a=1"}, - "a=1", - []query{ - {"http://com./index.html", "a=1"}, - {"http://no-cookies.com./index.html", ""}, - }, - }, - { - "TestNonDottedAndTLD #3.", - "http://a.b", - []string{ - "a=1; domain=.b", - "b=2; domain=b"}, - "", - []query{{"http://bar.foo", ""}}, - }, - { - "TestNonDottedAndTLD #4.", - "http://google.com", - []string{ - "a=1; domain=.com", - "b=2; domain=com"}, - "", - []query{{"http://google.com", ""}}, - }, - { - "TestNonDottedAndTLD #5.", - "http://google.co.uk", - []string{ - "a=1; domain=.co.uk", - "b=2; domain=.uk"}, - "", - []query{ - {"http://google.co.uk", ""}, - {"http://else.co.com", ""}, - {"http://else.uk", ""}, - }, - }, - { - "TestHostEndsWithDot.", - "http://www.google.com", - []string{ - "a=1", - "b=2; domain=.www.google.com."}, - "a=1", - []query{{"http://www.google.com", "a=1"}}, - }, - { - "PathTest", - "http://www.google.izzle", - []string{"a=1; path=/wee"}, - "a=1", - []query{ - {"http://www.google.izzle/wee", "a=1"}, - {"http://www.google.izzle/wee/", "a=1"}, - {"http://www.google.izzle/wee/war", "a=1"}, - {"http://www.google.izzle/wee/war/more/more", "a=1"}, - {"http://www.google.izzle/weehee", ""}, - {"http://www.google.izzle/", ""}, - }, - }, -} - -func TestChromiumBasics(t *testing.T) { - for _, test := range chromiumBasicsTests { - jar := newTestJar() - test.run(t, jar) - } -} - -// chromiumDomainTests contains jarTests which must be executed all on the -// same Jar. -var chromiumDomainTests = [...]jarTest{ - { - "Fill #1.", - "http://www.google.izzle", - []string{"A=B"}, - "A=B", - []query{{"http://www.google.izzle", "A=B"}}, - }, - { - "Fill #2.", - "http://www.google.izzle", - []string{"C=D; domain=.google.izzle"}, - "A=B C=D", - []query{{"http://www.google.izzle", "A=B C=D"}}, - }, - { - "Verify A is a host cookie and not accessible from subdomain.", - "http://unused.nil", - []string{}, - "A=B C=D", - []query{{"http://foo.www.google.izzle", "C=D"}}, - }, - { - "Verify domain cookies are found on proper domain.", - "http://www.google.izzle", - []string{"E=F; domain=.www.google.izzle"}, - "A=B C=D E=F", - []query{{"http://www.google.izzle", "A=B C=D E=F"}}, - }, - { - "Leading dots in domain attributes are optional.", - "http://www.google.izzle", - []string{"G=H; domain=www.google.izzle"}, - "A=B C=D E=F G=H", - []query{{"http://www.google.izzle", "A=B C=D E=F G=H"}}, - }, - { - "Verify domain enforcement works #1.", - "http://www.google.izzle", - []string{"K=L; domain=.bar.www.google.izzle"}, - "A=B C=D E=F G=H", - []query{{"http://bar.www.google.izzle", "C=D E=F G=H"}}, - }, - { - "Verify domain enforcement works #2.", - "http://unused.nil", - []string{}, - "A=B C=D E=F G=H", - []query{{"http://www.google.izzle", "A=B C=D E=F G=H"}}, - }, -} - -func TestChromiumDomain(t *testing.T) { - jar := newTestJar() - for _, test := range chromiumDomainTests { - test.run(t, jar) - } - -} - -// chromiumDeletionTests must be performed all on the same Jar. -var chromiumDeletionTests = [...]jarTest{ - { - "Create session cookie a1.", - "http://www.google.com", - []string{"a=1"}, - "a=1", - []query{{"http://www.google.com", "a=1"}}, - }, - { - "Delete sc a1 via MaxAge.", - "http://www.google.com", - []string{"a=1; max-age=-1"}, - "", - []query{{"http://www.google.com", ""}}, - }, - { - "Create session cookie b2.", - "http://www.google.com", - []string{"b=2"}, - "b=2", - []query{{"http://www.google.com", "b=2"}}, - }, - { - "Delete sc b2 via Expires.", - "http://www.google.com", - []string{"b=2; " + expiresIn(-10)}, - "", - []query{{"http://www.google.com", ""}}, - }, - { - "Create persistent cookie c3.", - "http://www.google.com", - []string{"c=3; max-age=3600"}, - "c=3", - []query{{"http://www.google.com", "c=3"}}, - }, - { - "Delete pc c3 via MaxAge.", - "http://www.google.com", - []string{"c=3; max-age=-1"}, - "", - []query{{"http://www.google.com", ""}}, - }, - { - "Create persistent cookie d4.", - "http://www.google.com", - []string{"d=4; max-age=3600"}, - "d=4", - []query{{"http://www.google.com", "d=4"}}, - }, - { - "Delete pc d4 via Expires.", - "http://www.google.com", - []string{"d=4; " + expiresIn(-10)}, - "", - []query{{"http://www.google.com", ""}}, - }, -} - -func TestChromiumDeletion(t *testing.T) { - jar := newTestJar() - for _, test := range chromiumDeletionTests { - test.run(t, jar) - } -} - -// domainHandlingTests tests and documents the rules for domain handling. -// Each test must be performed on an empty new Jar. -var domainHandlingTests = [...]jarTest{ - { - "Host cookie", - "http://www.host.test", - []string{"a=1"}, - "a=1", - []query{ - {"http://www.host.test", "a=1"}, - {"http://host.test", ""}, - {"http://bar.host.test", ""}, - {"http://foo.www.host.test", ""}, - {"http://other.test", ""}, - {"http://test", ""}, - }, - }, - { - "Domain cookie #1", - "http://www.host.test", - []string{"a=1; domain=host.test"}, - "a=1", - []query{ - {"http://www.host.test", "a=1"}, - {"http://host.test", "a=1"}, - {"http://bar.host.test", "a=1"}, - {"http://foo.www.host.test", "a=1"}, - {"http://other.test", ""}, - {"http://test", ""}, - }, - }, - { - "Domain cookie #2", - "http://www.host.test", - []string{"a=1; domain=.host.test"}, - "a=1", - []query{ - {"http://www.host.test", "a=1"}, - {"http://host.test", "a=1"}, - {"http://bar.host.test", "a=1"}, - {"http://foo.www.host.test", "a=1"}, - {"http://other.test", ""}, - {"http://test", ""}, - }, - }, - { - "Host cookie on IDNA domain #1", - "http://www.bücher.test", - []string{"a=1"}, - "a=1", - []query{ - {"http://www.bücher.test", "a=1"}, - {"http://www.xn--bcher-kva.test", "a=1"}, - {"http://bücher.test", ""}, - {"http://xn--bcher-kva.test", ""}, - {"http://bar.bücher.test", ""}, - {"http://bar.xn--bcher-kva.test", ""}, - {"http://foo.www.bücher.test", ""}, - {"http://foo.www.xn--bcher-kva.test", ""}, - {"http://other.test", ""}, - {"http://test", ""}, - }, - }, - { - "Host cookie on IDNA domain #2", - "http://www.xn--bcher-kva.test", - []string{"a=1"}, - "a=1", - []query{ - {"http://www.bücher.test", "a=1"}, - {"http://www.xn--bcher-kva.test", "a=1"}, - {"http://bücher.test", ""}, - {"http://xn--bcher-kva.test", ""}, - {"http://bar.bücher.test", ""}, - {"http://bar.xn--bcher-kva.test", ""}, - {"http://foo.www.bücher.test", ""}, - {"http://foo.www.xn--bcher-kva.test", ""}, - {"http://other.test", ""}, - {"http://test", ""}, - }, - }, - { - "Domain cookie on IDNA domain #1", - "http://www.bücher.test", - []string{"a=1; domain=xn--bcher-kva.test"}, - "a=1", - []query{ - {"http://www.bücher.test", "a=1"}, - {"http://www.xn--bcher-kva.test", "a=1"}, - {"http://bücher.test", "a=1"}, - {"http://xn--bcher-kva.test", "a=1"}, - {"http://bar.bücher.test", "a=1"}, - {"http://bar.xn--bcher-kva.test", "a=1"}, - {"http://foo.www.bücher.test", "a=1"}, - {"http://foo.www.xn--bcher-kva.test", "a=1"}, - {"http://other.test", ""}, - {"http://test", ""}, - }, - }, - { - "Domain cookie on IDNA domain #2", - "http://www.xn--bcher-kva.test", - []string{"a=1; domain=xn--bcher-kva.test"}, - "a=1", - []query{ - {"http://www.bücher.test", "a=1"}, - {"http://www.xn--bcher-kva.test", "a=1"}, - {"http://bücher.test", "a=1"}, - {"http://xn--bcher-kva.test", "a=1"}, - {"http://bar.bücher.test", "a=1"}, - {"http://bar.xn--bcher-kva.test", "a=1"}, - {"http://foo.www.bücher.test", "a=1"}, - {"http://foo.www.xn--bcher-kva.test", "a=1"}, - {"http://other.test", ""}, - {"http://test", ""}, - }, - }, - { - "Host cookie on TLD.", - "http://com", - []string{"a=1"}, - "a=1", - []query{ - {"http://com", "a=1"}, - {"http://any.com", ""}, - {"http://any.test", ""}, - }, - }, - { - "Domain cookie on TLD becomes a host cookie.", - "http://com", - []string{"a=1; domain=com"}, - "a=1", - []query{ - {"http://com", "a=1"}, - {"http://any.com", ""}, - {"http://any.test", ""}, - }, - }, - { - "Host cookie on public suffix.", - "http://co.uk", - []string{"a=1"}, - "a=1", - []query{ - {"http://co.uk", "a=1"}, - {"http://uk", ""}, - {"http://some.co.uk", ""}, - {"http://foo.some.co.uk", ""}, - {"http://any.uk", ""}, - }, - }, - { - "Domain cookie on public suffix is ignored.", - "http://some.co.uk", - []string{"a=1; domain=co.uk"}, - "", - []query{ - {"http://co.uk", ""}, - {"http://uk", ""}, - {"http://some.co.uk", ""}, - {"http://foo.some.co.uk", ""}, - {"http://any.uk", ""}, - }, - }, -} - -func TestDomainHandling(t *testing.T) { - for _, test := range domainHandlingTests { - jar := newTestJar() - test.run(t, jar) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,159 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cookiejar - -// This file implements the Punycode algorithm from RFC 3492. - -import ( - "fmt" - "strings" - "unicode/utf8" -) - -// These parameter values are specified in section 5. -// -// All computation is done with int32s, so that overflow behavior is identical -// regardless of whether int is 32-bit or 64-bit. -const ( - base int32 = 36 - damp int32 = 700 - initialBias int32 = 72 - initialN int32 = 128 - skew int32 = 38 - tmax int32 = 26 - tmin int32 = 1 -) - -// encode encodes a string as specified in section 6.3 and prepends prefix to -// the result. -// -// The "while h < length(input)" line in the specification becomes "for -// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. -func encode(prefix, s string) (string, error) { - output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) - copy(output, prefix) - delta, n, bias := int32(0), initialN, initialBias - b, remaining := int32(0), int32(0) - for _, r := range s { - if r < 0x80 { - b++ - output = append(output, byte(r)) - } else { - remaining++ - } - } - h := b - if b > 0 { - output = append(output, '-') - } - for remaining != 0 { - m := int32(0x7fffffff) - for _, r := range s { - if m > r && r >= n { - m = r - } - } - delta += (m - n) * (h + 1) - if delta < 0 { - return "", fmt.Errorf("cookiejar: invalid label %q", s) - } - n = m - for _, r := range s { - if r < n { - delta++ - if delta < 0 { - return "", fmt.Errorf("cookiejar: invalid label %q", s) - } - continue - } - if r > n { - continue - } - q := delta - for k := base; ; k += base { - t := k - bias - if t < tmin { - t = tmin - } else if t > tmax { - t = tmax - } - if q < t { - break - } - output = append(output, encodeDigit(t+(q-t)%(base-t))) - q = (q - t) / (base - t) - } - output = append(output, encodeDigit(q)) - bias = adapt(delta, h+1, h == b) - delta = 0 - h++ - remaining-- - } - delta++ - n++ - } - return string(output), nil -} - -func encodeDigit(digit int32) byte { - switch { - case 0 <= digit && digit < 26: - return byte(digit + 'a') - case 26 <= digit && digit < 36: - return byte(digit + ('0' - 26)) - } - panic("cookiejar: internal error in punycode encoding") -} - -// adapt is the bias adaptation function specified in section 6.1. -func adapt(delta, numPoints int32, firstTime bool) int32 { - if firstTime { - delta /= damp - } else { - delta /= 2 - } - delta += delta / numPoints - k := int32(0) - for delta > ((base-tmin)*tmax)/2 { - delta /= base - tmin - k += base - } - return k + (base-tmin+1)*delta/(delta+skew) -} - -// Strictly speaking, the remaining code below deals with IDNA (RFC 5890 and -// friends) and not Punycode (RFC 3492) per se. - -// acePrefix is the ASCII Compatible Encoding prefix. -const acePrefix = "xn--" - -// toASCII converts a domain or domain label to its ASCII form. For example, -// toASCII("bücher.example.com") is "xn--bcher-kva.example.com", and -// toASCII("golang") is "golang". -func toASCII(s string) (string, error) { - if ascii(s) { - return s, nil - } - labels := strings.Split(s, ".") - for i, label := range labels { - if !ascii(label) { - a, err := encode(acePrefix, label) - if err != nil { - return "", err - } - labels[i] = a - } - } - return strings.Join(labels, "."), nil -} - -func ascii(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,161 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cookiejar - -import ( - "testing" -) - -var punycodeTestCases = [...]struct { - s, encoded string -}{ - {"", ""}, - {"-", "--"}, - {"-a", "-a-"}, - {"-a-", "-a--"}, - {"a", "a-"}, - {"a-", "a--"}, - {"a-b", "a-b-"}, - {"books", "books-"}, - {"bücher", "bcher-kva"}, - {"Hello世界", "Hello-ck1hg65u"}, - {"ü", "tda"}, - {"üý", "tdac"}, - - // The test cases below come from RFC 3492 section 7.1 with Errata 3026. - { - // (A) Arabic (Egyptian). - "\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" + - "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F", - "egbpdaj6bu4bxfgehfvwxn", - }, - { - // (B) Chinese (simplified). - "\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587", - "ihqwcrb4cv8a8dqg056pqjye", - }, - { - // (C) Chinese (traditional). - "\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587", - "ihqwctvzc91f659drss3x8bo0yb", - }, - { - // (D) Czech. - "\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" + - "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" + - "\u0065\u0073\u006B\u0079", - "Proprostnemluvesky-uyb24dma41a", - }, - { - // (E) Hebrew. - "\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" + - "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" + - "\u05D1\u05E8\u05D9\u05EA", - "4dbcagdahymbxekheh6e0a7fei0b", - }, - { - // (F) Hindi (Devanagari). - "\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" + - "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" + - "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" + - "\u0939\u0948\u0902", - "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd", - }, - { - // (G) Japanese (kanji and hiragana). - "\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" + - "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B", - "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa", - }, - { - // (H) Korean (Hangul syllables). - "\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" + - "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" + - "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C", - "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" + - "psd879ccm6fea98c", - }, - { - // (I) Russian (Cyrillic). - "\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" + - "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" + - "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" + - "\u0438", - "b1abfaaepdrnnbgefbadotcwatmq2g4l", - }, - { - // (J) Spanish. - "\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" + - "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" + - "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" + - "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" + - "\u0061\u00F1\u006F\u006C", - "PorqunopuedensimplementehablarenEspaol-fmd56a", - }, - { - // (K) Vietnamese. - "\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" + - "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" + - "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" + - "\u0056\u0069\u1EC7\u0074", - "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g", - }, - { - // (L) 3B. - "\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F", - "3B-ww4c5e180e575a65lsy2b", - }, - { - // (M) -with-SUPER-MONKEYS. - "\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" + - "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" + - "\u004F\u004E\u004B\u0045\u0059\u0053", - "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n", - }, - { - // (N) Hello-Another-Way-. - "\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" + - "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" + - "\u305D\u308C\u305E\u308C\u306E\u5834\u6240", - "Hello-Another-Way--fc4qua05auwb3674vfr0b", - }, - { - // (O) 2. - "\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032", - "2-u9tlzr9756bt3uc0v", - }, - { - // (P) MajiKoi5 - "\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" + - "\u308B\u0035\u79D2\u524D", - "MajiKoi5-783gue6qz075azm5e", - }, - { - // (Q) de - "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", - "de-jg4avhby1noc0d", - }, - { - // (R) - "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", - "d9juau41awczczp", - }, - { - // (S) -> $1.00 <- - "\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" + - "\u003C\u002D", - "-> $1.00 <--", - }, -} - -func TestPunycode(t *testing.T) { - for _, tc := range punycodeTestCases { - if got, err := encode("", tc.s); err != nil { - t.Errorf(`encode("", %q): %v`, tc.s, err) - } else if got != tc.encoded { - t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded) - } - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/child.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/child.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/child.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/child.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,305 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fcgi - -// This file implements FastCGI from the perspective of a child process. - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/cgi" - "os" - "strings" - "sync" - "time" -) - -// request holds the state for an in-progress request. As soon as it's complete, -// it's converted to an http.Request. -type request struct { - pw *io.PipeWriter - reqId uint16 - params map[string]string - buf [1024]byte - rawParams []byte - keepConn bool -} - -func newRequest(reqId uint16, flags uint8) *request { - r := &request{ - reqId: reqId, - params: map[string]string{}, - keepConn: flags&flagKeepConn != 0, - } - r.rawParams = r.buf[:0] - return r -} - -// parseParams reads an encoded []byte into Params. -func (r *request) parseParams() { - text := r.rawParams - r.rawParams = nil - for len(text) > 0 { - keyLen, n := readSize(text) - if n == 0 { - return - } - text = text[n:] - valLen, n := readSize(text) - if n == 0 { - return - } - text = text[n:] - key := readString(text, keyLen) - text = text[keyLen:] - val := readString(text, valLen) - text = text[valLen:] - r.params[key] = val - } -} - -// response implements http.ResponseWriter. -type response struct { - req *request - header http.Header - w *bufWriter - wroteHeader bool -} - -func newResponse(c *child, req *request) *response { - return &response{ - req: req, - header: http.Header{}, - w: newWriter(c.conn, typeStdout, req.reqId), - } -} - -func (r *response) Header() http.Header { - return r.header -} - -func (r *response) Write(data []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - return r.w.Write(data) -} - -func (r *response) WriteHeader(code int) { - if r.wroteHeader { - return - } - r.wroteHeader = true - if code == http.StatusNotModified { - // Must not have body. - r.header.Del("Content-Type") - r.header.Del("Content-Length") - r.header.Del("Transfer-Encoding") - } else if r.header.Get("Content-Type") == "" { - r.header.Set("Content-Type", "text/html; charset=utf-8") - } - - if r.header.Get("Date") == "" { - r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) - } - - fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code)) - r.header.Write(r.w) - r.w.WriteString("\r\n") -} - -func (r *response) Flush() { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - r.w.Flush() -} - -func (r *response) Close() error { - r.Flush() - return r.w.Close() -} - -type child struct { - conn *conn - handler http.Handler - - mu sync.Mutex // protects requests: - requests map[uint16]*request // keyed by request ID -} - -func newChild(rwc io.ReadWriteCloser, handler http.Handler) *child { - return &child{ - conn: newConn(rwc), - handler: handler, - requests: make(map[uint16]*request), - } -} - -func (c *child) serve() { - defer c.conn.Close() - var rec record - for { - if err := rec.read(c.conn.rwc); err != nil { - return - } - if err := c.handleRecord(&rec); err != nil { - return - } - } -} - -var errCloseConn = errors.New("fcgi: connection should be closed") - -var emptyBody = ioutil.NopCloser(strings.NewReader("")) - -func (c *child) handleRecord(rec *record) error { - c.mu.Lock() - req, ok := c.requests[rec.h.Id] - c.mu.Unlock() - if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues { - // The spec says to ignore unknown request IDs. - return nil - } - - switch rec.h.Type { - case typeBeginRequest: - if req != nil { - // The server is trying to begin a request with the same ID - // as an in-progress request. This is an error. - return errors.New("fcgi: received ID that is already in-flight") - } - - var br beginRequest - if err := br.read(rec.content()); err != nil { - return err - } - if br.role != roleResponder { - c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole) - return nil - } - req = newRequest(rec.h.Id, br.flags) - c.mu.Lock() - c.requests[rec.h.Id] = req - c.mu.Unlock() - return nil - case typeParams: - // NOTE(eds): Technically a key-value pair can straddle the boundary - // between two packets. We buffer until we've received all parameters. - if len(rec.content()) > 0 { - req.rawParams = append(req.rawParams, rec.content()...) - return nil - } - req.parseParams() - return nil - case typeStdin: - content := rec.content() - if req.pw == nil { - var body io.ReadCloser - if len(content) > 0 { - // body could be an io.LimitReader, but it shouldn't matter - // as long as both sides are behaving. - body, req.pw = io.Pipe() - } else { - body = emptyBody - } - go c.serveRequest(req, body) - } - if len(content) > 0 { - // TODO(eds): This blocks until the handler reads from the pipe. - // If the handler takes a long time, it might be a problem. - req.pw.Write(content) - } else if req.pw != nil { - req.pw.Close() - } - return nil - case typeGetValues: - values := map[string]string{"FCGI_MPXS_CONNS": "1"} - c.conn.writePairs(typeGetValuesResult, 0, values) - return nil - case typeData: - // If the filter role is implemented, read the data stream here. - return nil - case typeAbortRequest: - println("abort") - c.mu.Lock() - delete(c.requests, rec.h.Id) - c.mu.Unlock() - c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete) - if !req.keepConn { - // connection will close upon return - return errCloseConn - } - return nil - default: - b := make([]byte, 8) - b[0] = byte(rec.h.Type) - c.conn.writeRecord(typeUnknownType, 0, b) - return nil - } -} - -func (c *child) serveRequest(req *request, body io.ReadCloser) { - r := newResponse(c, req) - httpReq, err := cgi.RequestFromMap(req.params) - if err != nil { - // there was an error reading the request - r.WriteHeader(http.StatusInternalServerError) - c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error())) - } else { - httpReq.Body = body - c.handler.ServeHTTP(r, httpReq) - } - r.Close() - c.mu.Lock() - delete(c.requests, req.reqId) - c.mu.Unlock() - c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete) - - // Consume the entire body, so the host isn't still writing to - // us when we close the socket below in the !keepConn case, - // otherwise we'd send a RST. (golang.org/issue/4183) - // TODO(bradfitz): also bound this copy in time. Or send - // some sort of abort request to the host, so the host - // can properly cut off the client sending all the data. - // For now just bound it a little and - io.CopyN(ioutil.Discard, body, 100<<20) - body.Close() - - if !req.keepConn { - c.conn.Close() - } -} - -// Serve accepts incoming FastCGI connections on the listener l, creating a new -// goroutine for each. The goroutine reads requests and then calls handler -// to reply to them. -// If l is nil, Serve accepts connections from os.Stdin. -// If handler is nil, http.DefaultServeMux is used. -func Serve(l net.Listener, handler http.Handler) error { - if l == nil { - var err error - l, err = net.FileListener(os.Stdin) - if err != nil { - return err - } - defer l.Close() - } - if handler == nil { - handler = http.DefaultServeMux - } - for { - rw, err := l.Accept() - if err != nil { - return err - } - c := newChild(rw, handler) - go c.serve() - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,274 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fcgi implements the FastCGI protocol. -// Currently only the responder role is supported. -// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22 -package fcgi - -// This file defines the raw protocol and some utilities used by the child and -// the host. - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "io" - "sync" -) - -// recType is a record type, as defined by -// http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S8 -type recType uint8 - -const ( - typeBeginRequest recType = 1 - typeAbortRequest recType = 2 - typeEndRequest recType = 3 - typeParams recType = 4 - typeStdin recType = 5 - typeStdout recType = 6 - typeStderr recType = 7 - typeData recType = 8 - typeGetValues recType = 9 - typeGetValuesResult recType = 10 - typeUnknownType recType = 11 -) - -// keep the connection between web-server and responder open after request -const flagKeepConn = 1 - -const ( - maxWrite = 65535 // maximum record body - maxPad = 255 -) - -const ( - roleResponder = iota + 1 // only Responders are implemented. - roleAuthorizer - roleFilter -) - -const ( - statusRequestComplete = iota - statusCantMultiplex - statusOverloaded - statusUnknownRole -) - -const headerLen = 8 - -type header struct { - Version uint8 - Type recType - Id uint16 - ContentLength uint16 - PaddingLength uint8 - Reserved uint8 -} - -type beginRequest struct { - role uint16 - flags uint8 - reserved [5]uint8 -} - -func (br *beginRequest) read(content []byte) error { - if len(content) != 8 { - return errors.New("fcgi: invalid begin request record") - } - br.role = binary.BigEndian.Uint16(content) - br.flags = content[2] - return nil -} - -// for padding so we don't have to allocate all the time -// not synchronized because we don't care what the contents are -var pad [maxPad]byte - -func (h *header) init(recType recType, reqId uint16, contentLength int) { - h.Version = 1 - h.Type = recType - h.Id = reqId - h.ContentLength = uint16(contentLength) - h.PaddingLength = uint8(-contentLength & 7) -} - -// conn sends records over rwc -type conn struct { - mutex sync.Mutex - rwc io.ReadWriteCloser - - // to avoid allocations - buf bytes.Buffer - h header -} - -func newConn(rwc io.ReadWriteCloser) *conn { - return &conn{rwc: rwc} -} - -func (c *conn) Close() error { - c.mutex.Lock() - defer c.mutex.Unlock() - return c.rwc.Close() -} - -type record struct { - h header - buf [maxWrite + maxPad]byte -} - -func (rec *record) read(r io.Reader) (err error) { - if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil { - return err - } - if rec.h.Version != 1 { - return errors.New("fcgi: invalid header version") - } - n := int(rec.h.ContentLength) + int(rec.h.PaddingLength) - if _, err = io.ReadFull(r, rec.buf[:n]); err != nil { - return err - } - return nil -} - -func (r *record) content() []byte { - return r.buf[:r.h.ContentLength] -} - -// writeRecord writes and sends a single record. -func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error { - c.mutex.Lock() - defer c.mutex.Unlock() - c.buf.Reset() - c.h.init(recType, reqId, len(b)) - if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { - return err - } - if _, err := c.buf.Write(b); err != nil { - return err - } - if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil { - return err - } - _, err := c.rwc.Write(c.buf.Bytes()) - return err -} - -func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error { - b := [8]byte{byte(role >> 8), byte(role), flags} - return c.writeRecord(typeBeginRequest, reqId, b[:]) -} - -func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error { - b := make([]byte, 8) - binary.BigEndian.PutUint32(b, uint32(appStatus)) - b[4] = protocolStatus - return c.writeRecord(typeEndRequest, reqId, b) -} - -func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error { - w := newWriter(c, recType, reqId) - b := make([]byte, 8) - for k, v := range pairs { - n := encodeSize(b, uint32(len(k))) - n += encodeSize(b[n:], uint32(len(v))) - if _, err := w.Write(b[:n]); err != nil { - return err - } - if _, err := w.WriteString(k); err != nil { - return err - } - if _, err := w.WriteString(v); err != nil { - return err - } - } - w.Close() - return nil -} - -func readSize(s []byte) (uint32, int) { - if len(s) == 0 { - return 0, 0 - } - size, n := uint32(s[0]), 1 - if size&(1<<7) != 0 { - if len(s) < 4 { - return 0, 0 - } - n = 4 - size = binary.BigEndian.Uint32(s) - size &^= 1 << 31 - } - return size, n -} - -func readString(s []byte, size uint32) string { - if size > uint32(len(s)) { - return "" - } - return string(s[:size]) -} - -func encodeSize(b []byte, size uint32) int { - if size > 127 { - size |= 1 << 31 - binary.BigEndian.PutUint32(b, size) - return 4 - } - b[0] = byte(size) - return 1 -} - -// bufWriter encapsulates bufio.Writer but also closes the underlying stream when -// Closed. -type bufWriter struct { - closer io.Closer - *bufio.Writer -} - -func (w *bufWriter) Close() error { - if err := w.Writer.Flush(); err != nil { - w.closer.Close() - return err - } - return w.closer.Close() -} - -func newWriter(c *conn, recType recType, reqId uint16) *bufWriter { - s := &streamWriter{c: c, recType: recType, reqId: reqId} - w := bufio.NewWriterSize(s, maxWrite) - return &bufWriter{s, w} -} - -// streamWriter abstracts out the separation of a stream into discrete records. -// It only writes maxWrite bytes at a time. -type streamWriter struct { - c *conn - recType recType - reqId uint16 -} - -func (w *streamWriter) Write(p []byte) (int, error) { - nn := 0 - for len(p) > 0 { - n := len(p) - if n > maxWrite { - n = maxWrite - } - if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil { - return nn, err - } - nn += n - p = p[n:] - } - return nn, nil -} - -func (w *streamWriter) Close() error { - // send empty record to close the stream - return w.c.writeRecord(w.recType, w.reqId, nil) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,150 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fcgi - -import ( - "bytes" - "errors" - "io" - "testing" -) - -var sizeTests = []struct { - size uint32 - bytes []byte -}{ - {0, []byte{0x00}}, - {127, []byte{0x7F}}, - {128, []byte{0x80, 0x00, 0x00, 0x80}}, - {1000, []byte{0x80, 0x00, 0x03, 0xE8}}, - {33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}}, -} - -func TestSize(t *testing.T) { - b := make([]byte, 4) - for i, test := range sizeTests { - n := encodeSize(b, test.size) - if !bytes.Equal(b[:n], test.bytes) { - t.Errorf("%d expected %x, encoded %x", i, test.bytes, b) - } - size, n := readSize(test.bytes) - if size != test.size { - t.Errorf("%d expected %d, read %d", i, test.size, size) - } - if len(test.bytes) != n { - t.Errorf("%d did not consume all the bytes", i) - } - } -} - -var streamTests = []struct { - desc string - recType recType - reqId uint16 - content []byte - raw []byte -}{ - {"single record", typeStdout, 1, nil, - []byte{1, byte(typeStdout), 0, 1, 0, 0, 0, 0}, - }, - // this data will have to be split into two records - {"two records", typeStdin, 300, make([]byte, 66000), - bytes.Join([][]byte{ - // header for the first record - {1, byte(typeStdin), 0x01, 0x2C, 0xFF, 0xFF, 1, 0}, - make([]byte, 65536), - // header for the second - {1, byte(typeStdin), 0x01, 0x2C, 0x01, 0xD1, 7, 0}, - make([]byte, 472), - // header for the empty record - {1, byte(typeStdin), 0x01, 0x2C, 0, 0, 0, 0}, - }, - nil), - }, -} - -type nilCloser struct { - io.ReadWriter -} - -func (c *nilCloser) Close() error { return nil } - -func TestStreams(t *testing.T) { - var rec record -outer: - for _, test := range streamTests { - buf := bytes.NewBuffer(test.raw) - var content []byte - for buf.Len() > 0 { - if err := rec.read(buf); err != nil { - t.Errorf("%s: error reading record: %v", test.desc, err) - continue outer - } - content = append(content, rec.content()...) - } - if rec.h.Type != test.recType { - t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType) - continue - } - if rec.h.Id != test.reqId { - t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId) - continue - } - if !bytes.Equal(content, test.content) { - t.Errorf("%s: read wrong content", test.desc) - continue - } - buf.Reset() - c := newConn(&nilCloser{buf}) - w := newWriter(c, test.recType, test.reqId) - if _, err := w.Write(test.content); err != nil { - t.Errorf("%s: error writing record: %v", test.desc, err) - continue - } - if err := w.Close(); err != nil { - t.Errorf("%s: error closing stream: %v", test.desc, err) - continue - } - if !bytes.Equal(buf.Bytes(), test.raw) { - t.Errorf("%s: wrote wrong content", test.desc) - } - } -} - -type writeOnlyConn struct { - buf []byte -} - -func (c *writeOnlyConn) Write(p []byte) (int, error) { - c.buf = append(c.buf, p...) - return len(p), nil -} - -func (c *writeOnlyConn) Read(p []byte) (int, error) { - return 0, errors.New("conn is write-only") -} - -func (c *writeOnlyConn) Close() error { - return nil -} - -func TestGetValues(t *testing.T) { - var rec record - rec.h.Type = typeGetValues - - wc := new(writeOnlyConn) - c := newChild(wc, nil) - err := c.handleRecord(&rec) - if err != nil { - t.Fatalf("handleRecord: %v", err) - } - - const want = "\x01\n\x00\x00\x00\x12\x06\x00" + - "\x0f\x01FCGI_MPXS_CONNS1" + - "\x00\x00\x00\x00\x00\x00\x01\n\x00\x00\x00\x00\x00\x00" - if got := string(wc.buf); got != want { - t.Errorf(" got: %q\nwant: %q\n", got, want) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/example_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/example_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/example_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/example_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package httptest_test - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - "net/http/httptest" -) - -func ExampleResponseRecorder() { - handler := func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "something failed", http.StatusInternalServerError) - } - - req, err := http.NewRequest("GET", "http://example.com/foo", nil) - if err != nil { - log.Fatal(err) - } - - w := httptest.NewRecorder() - handler(w, req) - - fmt.Printf("%d - %s", w.Code, w.Body.String()) - // Output: 500 - something failed -} - -func ExampleServer() { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "Hello, client") - })) - defer ts.Close() - - res, err := http.Get(ts.URL) - if err != nil { - log.Fatal(err) - } - greeting, err := ioutil.ReadAll(res.Body) - res.Body.Close() - if err != nil { - log.Fatal(err) - } - - fmt.Printf("%s", greeting) - // Output: Hello, client -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package httptest provides utilities for HTTP testing. -package httptest - -import ( - "bytes" - "net/http" -) - -// ResponseRecorder is an implementation of http.ResponseWriter that -// records its mutations for later inspection in tests. -type ResponseRecorder struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - Flushed bool - - wroteHeader bool -} - -// NewRecorder returns an initialized ResponseRecorder. -func NewRecorder() *ResponseRecorder { - return &ResponseRecorder{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - Code: 200, - } -} - -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. -const DefaultRemoteAddr = "1.2.3.4" - -// Header returns the response headers. -func (rw *ResponseRecorder) Header() http.Header { - m := rw.HeaderMap - if m == nil { - m = make(http.Header) - rw.HeaderMap = m - } - return m -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (rw *ResponseRecorder) Write(buf []byte) (int, error) { - if !rw.wroteHeader { - rw.WriteHeader(200) - } - if rw.Body != nil { - rw.Body.Write(buf) - } - return len(buf), nil -} - -// WriteHeader sets rw.Code. -func (rw *ResponseRecorder) WriteHeader(code int) { - if !rw.wroteHeader { - rw.Code = code - } - rw.wroteHeader = true -} - -// Flush sets rw.Flushed to true. -func (rw *ResponseRecorder) Flush() { - if !rw.wroteHeader { - rw.WriteHeader(200) - } - rw.Flushed = true -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package httptest - -import ( - "fmt" - "net/http" - "testing" -) - -func TestRecorder(t *testing.T) { - type checkFunc func(*ResponseRecorder) error - check := func(fns ...checkFunc) []checkFunc { return fns } - - hasStatus := func(wantCode int) checkFunc { - return func(rec *ResponseRecorder) error { - if rec.Code != wantCode { - return fmt.Errorf("Status = %d; want %d", rec.Code, wantCode) - } - return nil - } - } - hasContents := func(want string) checkFunc { - return func(rec *ResponseRecorder) error { - if rec.Body.String() != want { - return fmt.Errorf("wrote = %q; want %q", rec.Body.String(), want) - } - return nil - } - } - hasFlush := func(want bool) checkFunc { - return func(rec *ResponseRecorder) error { - if rec.Flushed != want { - return fmt.Errorf("Flushed = %v; want %v", rec.Flushed, want) - } - return nil - } - } - - tests := []struct { - name string - h func(w http.ResponseWriter, r *http.Request) - checks []checkFunc - }{ - { - "200 default", - func(w http.ResponseWriter, r *http.Request) {}, - check(hasStatus(200), hasContents("")), - }, - { - "first code only", - func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(201) - w.WriteHeader(202) - w.Write([]byte("hi")) - }, - check(hasStatus(201), hasContents("hi")), - }, - { - "write sends 200", - func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("hi first")) - w.WriteHeader(201) - w.WriteHeader(202) - }, - check(hasStatus(200), hasContents("hi first"), hasFlush(false)), - }, - { - "flush", - func(w http.ResponseWriter, r *http.Request) { - w.(http.Flusher).Flush() // also sends a 200 - w.WriteHeader(201) - }, - check(hasStatus(200), hasFlush(true)), - }, - } - r, _ := http.NewRequest("GET", "http://foo.com/", nil) - for _, tt := range tests { - h := http.HandlerFunc(tt.h) - rec := NewRecorder() - h.ServeHTTP(rec, r) - for _, check := range tt.checks { - if err := check(rec); err != nil { - t.Errorf("%s: %v", tt.name, err) - } - } - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,228 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Implementation of Server - -package httptest - -import ( - "crypto/tls" - "flag" - "fmt" - "net" - "net/http" - "os" - "sync" -) - -// A Server is an HTTP server listening on a system-chosen port on the -// local loopback interface, for use in end-to-end HTTP tests. -type Server struct { - URL string // base URL of form http://ipaddr:port with no trailing slash - Listener net.Listener - - // TLS is the optional TLS configuration, populated with a new config - // after TLS is started. If set on an unstarted server before StartTLS - // is called, existing fields are copied into the new config. - TLS *tls.Config - - // Config may be changed after calling NewUnstartedServer and - // before Start or StartTLS. - Config *http.Server - - // wg counts the number of outstanding HTTP requests on this server. - // Close blocks until all requests are finished. - wg sync.WaitGroup -} - -// historyListener keeps track of all connections that it's ever -// accepted. -type historyListener struct { - net.Listener - sync.Mutex // protects history - history []net.Conn -} - -func (hs *historyListener) Accept() (c net.Conn, err error) { - c, err = hs.Listener.Accept() - if err == nil { - hs.Lock() - hs.history = append(hs.history, c) - hs.Unlock() - } - return -} - -func newLocalListener() net.Listener { - if *serve != "" { - l, err := net.Listen("tcp", *serve) - if err != nil { - panic(fmt.Sprintf("httptest: failed to listen on %v: %v", *serve, err)) - } - return l - } - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - if l, err = net.Listen("tcp6", "[::1]:0"); err != nil { - panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err)) - } - } - return l -} - -// When debugging a particular http server-based test, -// this flag lets you run -// go test -run=BrokenTest -httptest.serve=127.0.0.1:8000 -// to start the broken server so you can interact with it manually. -var serve = flag.String("httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks") - -// NewServer starts and returns a new Server. -// The caller should call Close when finished, to shut it down. -func NewServer(handler http.Handler) *Server { - ts := NewUnstartedServer(handler) - ts.Start() - return ts -} - -// NewUnstartedServer returns a new Server but doesn't start it. -// -// After changing its configuration, the caller should call Start or -// StartTLS. -// -// The caller should call Close when finished, to shut it down. -func NewUnstartedServer(handler http.Handler) *Server { - return &Server{ - Listener: newLocalListener(), - Config: &http.Server{Handler: handler}, - } -} - -// Start starts a server from NewUnstartedServer. -func (s *Server) Start() { - if s.URL != "" { - panic("Server already started") - } - s.Listener = &historyListener{Listener: s.Listener} - s.URL = "http://" + s.Listener.Addr().String() - s.wrapHandler() - go s.Config.Serve(s.Listener) - if *serve != "" { - fmt.Fprintln(os.Stderr, "httptest: serving on", s.URL) - select {} - } -} - -// StartTLS starts TLS on a server from NewUnstartedServer. -func (s *Server) StartTLS() { - if s.URL != "" { - panic("Server already started") - } - cert, err := tls.X509KeyPair(localhostCert, localhostKey) - if err != nil { - panic(fmt.Sprintf("httptest: NewTLSServer: %v", err)) - } - - existingConfig := s.TLS - s.TLS = new(tls.Config) - if existingConfig != nil { - *s.TLS = *existingConfig - } - if s.TLS.NextProtos == nil { - s.TLS.NextProtos = []string{"http/1.1"} - } - if len(s.TLS.Certificates) == 0 { - s.TLS.Certificates = []tls.Certificate{cert} - } - tlsListener := tls.NewListener(s.Listener, s.TLS) - - s.Listener = &historyListener{Listener: tlsListener} - s.URL = "https://" + s.Listener.Addr().String() - s.wrapHandler() - go s.Config.Serve(s.Listener) -} - -func (s *Server) wrapHandler() { - h := s.Config.Handler - if h == nil { - h = http.DefaultServeMux - } - s.Config.Handler = &waitGroupHandler{ - s: s, - h: h, - } -} - -// NewTLSServer starts and returns a new Server using TLS. -// The caller should call Close when finished, to shut it down. -func NewTLSServer(handler http.Handler) *Server { - ts := NewUnstartedServer(handler) - ts.StartTLS() - return ts -} - -// Close shuts down the server and blocks until all outstanding -// requests on this server have completed. -func (s *Server) Close() { - s.Listener.Close() - s.wg.Wait() - s.CloseClientConnections() - if t, ok := http.DefaultTransport.(*http.Transport); ok { - t.CloseIdleConnections() - } -} - -// CloseClientConnections closes any currently open HTTP connections -// to the test Server. -func (s *Server) CloseClientConnections() { - hl, ok := s.Listener.(*historyListener) - if !ok { - return - } - hl.Lock() - for _, conn := range hl.history { - conn.Close() - } - hl.Unlock() -} - -// waitGroupHandler wraps a handler, incrementing and decrementing a -// sync.WaitGroup on each request, to enable Server.Close to block -// until outstanding requests are finished. -type waitGroupHandler struct { - s *Server - h http.Handler // non-nil -} - -func (h *waitGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - h.s.wg.Add(1) - defer h.s.wg.Done() // a defer, in case ServeHTTP below panics - h.h.ServeHTTP(w, r) -} - -// localhostCert is a PEM-encoded TLS cert with SAN IPs -// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end -// of ASN.1 time). -// generated from src/pkg/crypto/tls: -// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h -var localhostCert = []byte(`-----BEGIN CERTIFICATE----- -MIIBdzCCASOgAwIBAgIBADALBgkqhkiG9w0BAQUwEjEQMA4GA1UEChMHQWNtZSBD -bzAeFw03MDAxMDEwMDAwMDBaFw00OTEyMzEyMzU5NTlaMBIxEDAOBgNVBAoTB0Fj -bWUgQ28wWjALBgkqhkiG9w0BAQEDSwAwSAJBAN55NcYKZeInyTuhcCwFMhDHCmwa -IUSdtXdcbItRB/yfXGBhiex00IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEA -AaNoMGYwDgYDVR0PAQH/BAQDAgCkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1Ud -EwEB/wQFMAMBAf8wLgYDVR0RBCcwJYILZXhhbXBsZS5jb22HBH8AAAGHEAAAAAAA -AAAAAAAAAAAAAAEwCwYJKoZIhvcNAQEFA0EAAoQn/ytgqpiLcZu9XKbCJsJcvkgk -Se6AbGXgSlq+ZCEVo0qIwSgeBqmsJxUu7NCSOwVJLYNEBO2DtIxoYVk+MA== ------END CERTIFICATE-----`) - -// localhostKey is the private key for localhostCert. -var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIBPAIBAAJBAN55NcYKZeInyTuhcCwFMhDHCmwaIUSdtXdcbItRB/yfXGBhiex0 -0IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEAAQJBAQdUx66rfh8sYsgfdcvV -NoafYpnEcB5s4m/vSVe6SU7dCK6eYec9f9wpT353ljhDUHq3EbmE4foNzJngh35d -AekCIQDhRQG5Li0Wj8TM4obOnnXUXf1jRv0UkzE9AHWLG5q3AwIhAPzSjpYUDjVW -MCUXgckTpKCuGwbJk7424Nb8bLzf3kllAiA5mUBgjfr/WtFSJdWcPQ4Zt9KTMNKD -EUO0ukpTwEIl6wIhAMbGqZK3zAAFdq8DD2jPx+UJXnh0rnOkZBzDtJ6/iN69AiEA -1Aq8MJgTaYsDQWyU/hDq5YkDJc9e9DSCvUIzqxQWMQE= ------END RSA PRIVATE KEY-----`) diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,52 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package httptest - -import ( - "io/ioutil" - "net/http" - "testing" - "time" -) - -func TestServer(t *testing.T) { - ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("hello")) - })) - defer ts.Close() - res, err := http.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - got, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if string(got) != "hello" { - t.Errorf("got %q, want hello", string(got)) - } -} - -func TestIssue7264(t *testing.T) { - for i := 0; i < 1000; i++ { - func() { - inHandler := make(chan bool, 1) - ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - inHandler <- true - })) - defer ts.Close() - tr := &http.Transport{ - ResponseHeaderTimeout: time.Nanosecond, - } - defer tr.CloseIdleConnections() - c := &http.Client{Transport: tr} - res, err := c.Get(ts.URL) - <-inHandler - if err == nil { - res.Body.Close() - } - }() - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,203 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The wire protocol for HTTP's "chunked" Transfer-Encoding. - -// This code is duplicated in net/http and net/http/httputil. -// Please make any changes in both files. - -package httputil - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" -) - -const maxLineLength = 4096 // assumed <= bufio.defaultBufSize - -var ErrLineTooLong = errors.New("header line too long") - -// newChunkedReader returns a new chunkedReader that translates the data read from r -// out of HTTP "chunked" format before returning it. -// The chunkedReader returns io.EOF when the final 0-length chunk is read. -// -// newChunkedReader is not needed by normal applications. The http package -// automatically decodes chunking when reading response bodies. -func newChunkedReader(r io.Reader) io.Reader { - br, ok := r.(*bufio.Reader) - if !ok { - br = bufio.NewReader(r) - } - return &chunkedReader{r: br} -} - -type chunkedReader struct { - r *bufio.Reader - n uint64 // unread bytes in chunk - err error - buf [2]byte -} - -func (cr *chunkedReader) beginChunk() { - // chunk-size CRLF - var line []byte - line, cr.err = readLine(cr.r) - if cr.err != nil { - return - } - cr.n, cr.err = parseHexUint(line) - if cr.err != nil { - return - } - if cr.n == 0 { - cr.err = io.EOF - } -} - -func (cr *chunkedReader) chunkHeaderAvailable() bool { - n := cr.r.Buffered() - if n > 0 { - peek, _ := cr.r.Peek(n) - return bytes.IndexByte(peek, '\n') >= 0 - } - return false -} - -func (cr *chunkedReader) Read(b []uint8) (n int, err error) { - for cr.err == nil { - if cr.n == 0 { - if n > 0 && !cr.chunkHeaderAvailable() { - // We've read enough. Don't potentially block - // reading a new chunk header. - break - } - cr.beginChunk() - continue - } - if len(b) == 0 { - break - } - rbuf := b - if uint64(len(rbuf)) > cr.n { - rbuf = rbuf[:cr.n] - } - var n0 int - n0, cr.err = cr.r.Read(rbuf) - n += n0 - b = b[n0:] - cr.n -= uint64(n0) - // If we're at the end of a chunk, read the next two - // bytes to verify they are "\r\n". - if cr.n == 0 && cr.err == nil { - if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil { - if cr.buf[0] != '\r' || cr.buf[1] != '\n' { - cr.err = errors.New("malformed chunked encoding") - } - } - } - } - return n, cr.err -} - -// Read a line of bytes (up to \n) from b. -// Give up if the line exceeds maxLineLength. -// The returned bytes are a pointer into storage in -// the bufio, so they are only valid until the next bufio read. -func readLine(b *bufio.Reader) (p []byte, err error) { - if p, err = b.ReadSlice('\n'); err != nil { - // We always know when EOF is coming. - // If the caller asked for a line, there should be a line. - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if err == bufio.ErrBufferFull { - err = ErrLineTooLong - } - return nil, err - } - if len(p) >= maxLineLength { - return nil, ErrLineTooLong - } - return trimTrailingWhitespace(p), nil -} - -func trimTrailingWhitespace(b []byte) []byte { - for len(b) > 0 && isASCIISpace(b[len(b)-1]) { - b = b[:len(b)-1] - } - return b -} - -func isASCIISpace(b byte) bool { - return b == ' ' || b == '\t' || b == '\n' || b == '\r' -} - -// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP -// "chunked" format before writing them to w. Closing the returned chunkedWriter -// sends the final 0-length chunk that marks the end of the stream. -// -// newChunkedWriter is not needed by normal applications. The http -// package adds chunking automatically if handlers don't set a -// Content-Length header. Using newChunkedWriter inside a handler -// would result in double chunking or chunking with a Content-Length -// length, both of which are wrong. -func newChunkedWriter(w io.Writer) io.WriteCloser { - return &chunkedWriter{w} -} - -// Writing to chunkedWriter translates to writing in HTTP chunked Transfer -// Encoding wire format to the underlying Wire chunkedWriter. -type chunkedWriter struct { - Wire io.Writer -} - -// Write the contents of data as one chunk to Wire. -// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has -// a bug since it does not check for success of io.WriteString -func (cw *chunkedWriter) Write(data []byte) (n int, err error) { - - // Don't send 0-length data. It looks like EOF for chunked encoding. - if len(data) == 0 { - return 0, nil - } - - if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil { - return 0, err - } - if n, err = cw.Wire.Write(data); err != nil { - return - } - if n != len(data) { - err = io.ErrShortWrite - return - } - _, err = io.WriteString(cw.Wire, "\r\n") - - return -} - -func (cw *chunkedWriter) Close() error { - _, err := io.WriteString(cw.Wire, "0\r\n") - return err -} - -func parseHexUint(v []byte) (n uint64, err error) { - for _, b := range v { - n <<= 4 - switch { - case '0' <= b && b <= '9': - b = b - '0' - case 'a' <= b && b <= 'f': - b = b - 'a' + 10 - case 'A' <= b && b <= 'F': - b = b - 'A' + 10 - default: - return 0, errors.New("invalid byte in chunk length") - } - n |= uint64(b) - } - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,159 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code is duplicated in net/http and net/http/httputil. -// Please make any changes in both files. - -package httputil - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" -) - -func TestChunk(t *testing.T) { - var b bytes.Buffer - - w := newChunkedWriter(&b) - const chunk1 = "hello, " - const chunk2 = "world! 0123456789abcdef" - w.Write([]byte(chunk1)) - w.Write([]byte(chunk2)) - w.Close() - - if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e { - t.Fatalf("chunk writer wrote %q; want %q", g, e) - } - - r := newChunkedReader(&b) - data, err := ioutil.ReadAll(r) - if err != nil { - t.Logf(`data: "%s"`, data) - t.Fatalf("ReadAll from reader: %v", err) - } - if g, e := string(data), chunk1+chunk2; g != e { - t.Errorf("chunk reader read %q; want %q", g, e) - } -} - -func TestChunkReadMultiple(t *testing.T) { - // Bunch of small chunks, all read together. - { - var b bytes.Buffer - w := newChunkedWriter(&b) - w.Write([]byte("foo")) - w.Write([]byte("bar")) - w.Close() - - r := newChunkedReader(&b) - buf := make([]byte, 10) - n, err := r.Read(buf) - if n != 6 || err != io.EOF { - t.Errorf("Read = %d, %v; want 6, EOF", n, err) - } - buf = buf[:n] - if string(buf) != "foobar" { - t.Errorf("Read = %q; want %q", buf, "foobar") - } - } - - // One big chunk followed by a little chunk, but the small bufio.Reader size - // should prevent the second chunk header from being read. - { - var b bytes.Buffer - w := newChunkedWriter(&b) - // fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes, - // the same as the bufio ReaderSize below (the minimum), so even - // though we're going to try to Read with a buffer larger enough to also - // receive "foo", the second chunk header won't be read yet. - const fillBufChunk = "0123456789a" - const shortChunk = "foo" - w.Write([]byte(fillBufChunk)) - w.Write([]byte(shortChunk)) - w.Close() - - r := newChunkedReader(bufio.NewReaderSize(&b, 16)) - buf := make([]byte, len(fillBufChunk)+len(shortChunk)) - n, err := r.Read(buf) - if n != len(fillBufChunk) || err != nil { - t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk)) - } - buf = buf[:n] - if string(buf) != fillBufChunk { - t.Errorf("Read = %q; want %q", buf, fillBufChunk) - } - - n, err = r.Read(buf) - if n != len(shortChunk) || err != io.EOF { - t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk)) - } - } - - // And test that we see an EOF chunk, even though our buffer is already full: - { - r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n"))) - buf := make([]byte, 3) - n, err := r.Read(buf) - if n != 3 || err != io.EOF { - t.Errorf("Read = %d, %v; want 3, EOF", n, err) - } - if string(buf) != "foo" { - t.Errorf("buf = %q; want foo", buf) - } - } -} - -func TestChunkReaderAllocs(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode") - } - var buf bytes.Buffer - w := newChunkedWriter(&buf) - a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc") - w.Write(a) - w.Write(b) - w.Write(c) - w.Close() - - readBuf := make([]byte, len(a)+len(b)+len(c)+1) - byter := bytes.NewReader(buf.Bytes()) - bufr := bufio.NewReader(byter) - mallocs := testing.AllocsPerRun(100, func() { - byter.Seek(0, 0) - bufr.Reset(byter) - r := newChunkedReader(bufr) - n, err := io.ReadFull(r, readBuf) - if n != len(readBuf)-1 { - t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1) - } - if err != io.ErrUnexpectedEOF { - t.Fatalf("read error = %v; want ErrUnexpectedEOF", err) - } - }) - if mallocs > 1.5 { - t.Errorf("mallocs = %v; want 1", mallocs) - } -} - -func TestParseHexUint(t *testing.T) { - for i := uint64(0); i <= 1234; i++ { - line := []byte(fmt.Sprintf("%x", i)) - got, err := parseHexUint(line) - if err != nil { - t.Fatalf("on %d: %v", i, err) - } - if got != i { - t.Errorf("for input %q = %d; want %d", line, got, i) - } - } - _, err := parseHexUint([]byte("bogus")) - if err == nil { - t.Error("expected error on bogus input") - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,276 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package httputil - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "strings" - "time" -) - -// One of the copies, say from b to r2, could be avoided by using a more -// elaborate trick where the other copy is made during Request/Response.Write. -// This would complicate things too much, given that these functions are for -// debugging only. -func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) { - var buf bytes.Buffer - if _, err = buf.ReadFrom(b); err != nil { - return nil, nil, err - } - if err = b.Close(); err != nil { - return nil, nil, err - } - return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewReader(buf.Bytes())), nil -} - -// dumpConn is a net.Conn which writes to Writer and reads from Reader -type dumpConn struct { - io.Writer - io.Reader -} - -func (c *dumpConn) Close() error { return nil } -func (c *dumpConn) LocalAddr() net.Addr { return nil } -func (c *dumpConn) RemoteAddr() net.Addr { return nil } -func (c *dumpConn) SetDeadline(t time.Time) error { return nil } -func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil } -func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil } - -type neverEnding byte - -func (b neverEnding) Read(p []byte) (n int, err error) { - for i := range p { - p[i] = byte(b) - } - return len(p), nil -} - -// DumpRequestOut is like DumpRequest but includes -// headers that the standard http.Transport adds, -// such as User-Agent. -func DumpRequestOut(req *http.Request, body bool) ([]byte, error) { - save := req.Body - dummyBody := false - if !body || req.Body == nil { - req.Body = nil - if req.ContentLength != 0 { - req.Body = ioutil.NopCloser(io.LimitReader(neverEnding('x'), req.ContentLength)) - dummyBody = true - } - } else { - var err error - save, req.Body, err = drainBody(req.Body) - if err != nil { - return nil, err - } - } - - // Since we're using the actual Transport code to write the request, - // switch to http so the Transport doesn't try to do an SSL - // negotiation with our dumpConn and its bytes.Buffer & pipe. - // The wire format for https and http are the same, anyway. - reqSend := req - if req.URL.Scheme == "https" { - reqSend = new(http.Request) - *reqSend = *req - reqSend.URL = new(url.URL) - *reqSend.URL = *req.URL - reqSend.URL.Scheme = "http" - } - - // Use the actual Transport code to record what we would send - // on the wire, but not using TCP. Use a Transport with a - // custom dialer that returns a fake net.Conn that waits - // for the full input (and recording it), and then responds - // with a dummy response. - var buf bytes.Buffer // records the output - pr, pw := io.Pipe() - dr := &delegateReader{c: make(chan io.Reader)} - // Wait for the request before replying with a dummy response: - go func() { - http.ReadRequest(bufio.NewReader(pr)) - dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\n\r\n") - }() - - t := &http.Transport{ - Dial: func(net, addr string) (net.Conn, error) { - return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil - }, - } - defer t.CloseIdleConnections() - - _, err := t.RoundTrip(reqSend) - - req.Body = save - if err != nil { - return nil, err - } - dump := buf.Bytes() - - // If we used a dummy body above, remove it now. - // TODO: if the req.ContentLength is large, we allocate memory - // unnecessarily just to slice it off here. But this is just - // a debug function, so this is acceptable for now. We could - // discard the body earlier if this matters. - if dummyBody { - if i := bytes.Index(dump, []byte("\r\n\r\n")); i >= 0 { - dump = dump[:i+4] - } - } - return dump, nil -} - -// delegateReader is a reader that delegates to another reader, -// once it arrives on a channel. -type delegateReader struct { - c chan io.Reader - r io.Reader // nil until received from c -} - -func (r *delegateReader) Read(p []byte) (int, error) { - if r.r == nil { - r.r = <-r.c - } - return r.r.Read(p) -} - -// Return value if nonempty, def otherwise. -func valueOrDefault(value, def string) string { - if value != "" { - return value - } - return def -} - -var reqWriteExcludeHeaderDump = map[string]bool{ - "Host": true, // not in Header map anyway - "Content-Length": true, - "Transfer-Encoding": true, - "Trailer": true, -} - -// dumpAsReceived writes req to w in the form as it was received, or -// at least as accurately as possible from the information retained in -// the request. -func dumpAsReceived(req *http.Request, w io.Writer) error { - return nil -} - -// DumpRequest returns the as-received wire representation of req, -// optionally including the request body, for debugging. -// DumpRequest is semantically a no-op, but in order to -// dump the body, it reads the body data into memory and -// changes req.Body to refer to the in-memory copy. -// The documentation for http.Request.Write details which fields -// of req are used. -func DumpRequest(req *http.Request, body bool) (dump []byte, err error) { - save := req.Body - if !body || req.Body == nil { - req.Body = nil - } else { - save, req.Body, err = drainBody(req.Body) - if err != nil { - return - } - } - - var b bytes.Buffer - - fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"), - req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor) - - host := req.Host - if host == "" && req.URL != nil { - host = req.URL.Host - } - if host != "" { - fmt.Fprintf(&b, "Host: %s\r\n", host) - } - - chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" - if len(req.TransferEncoding) > 0 { - fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ",")) - } - if req.Close { - fmt.Fprintf(&b, "Connection: close\r\n") - } - - err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump) - if err != nil { - return - } - - io.WriteString(&b, "\r\n") - - if req.Body != nil { - var dest io.Writer = &b - if chunked { - dest = NewChunkedWriter(dest) - } - _, err = io.Copy(dest, req.Body) - if chunked { - dest.(io.Closer).Close() - io.WriteString(&b, "\r\n") - } - } - - req.Body = save - if err != nil { - return - } - dump = b.Bytes() - return -} - -// errNoBody is a sentinel error value used by failureToReadBody so we can detect -// that the lack of body was intentional. -var errNoBody = errors.New("sentinel error value") - -// failureToReadBody is a io.ReadCloser that just returns errNoBody on -// Read. It's swapped in when we don't actually want to consume the -// body, but need a non-nil one, and want to distinguish the error -// from reading the dummy body. -type failureToReadBody struct{} - -func (failureToReadBody) Read([]byte) (int, error) { return 0, errNoBody } -func (failureToReadBody) Close() error { return nil } - -var emptyBody = ioutil.NopCloser(strings.NewReader("")) - -// DumpResponse is like DumpRequest but dumps a response. -func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) { - var b bytes.Buffer - save := resp.Body - savecl := resp.ContentLength - - if !body { - resp.Body = failureToReadBody{} - } else if resp.Body == nil { - resp.Body = emptyBody - } else { - save, resp.Body, err = drainBody(resp.Body) - if err != nil { - return - } - } - err = resp.Write(&b) - if err == errNoBody { - err = nil - } - resp.Body = save - resp.ContentLength = savecl - if err != nil { - return nil, err - } - return b.Bytes(), nil -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,263 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package httputil - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "runtime" - "strings" - "testing" -) - -type dumpTest struct { - Req http.Request - Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body - - WantDump string - WantDumpOut string - NoBody bool // if true, set DumpRequest{,Out} body to false -} - -var dumpTests = []dumpTest{ - - // HTTP/1.1 => chunked coding; body; empty trailer - { - Req: http.Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "http", - Host: "www.google.com", - Path: "/search", - }, - ProtoMajor: 1, - ProtoMinor: 1, - TransferEncoding: []string{"chunked"}, - }, - - Body: []byte("abcdef"), - - WantDump: "GET /search HTTP/1.1\r\n" + - "Host: www.google.com\r\n" + - "Transfer-Encoding: chunked\r\n\r\n" + - chunk("abcdef") + chunk(""), - }, - - // Verify that DumpRequest preserves the HTTP version number, doesn't add a Host, - // and doesn't add a User-Agent. - { - Req: http.Request{ - Method: "GET", - URL: mustParseURL("/foo"), - ProtoMajor: 1, - ProtoMinor: 0, - Header: http.Header{ - "X-Foo": []string{"X-Bar"}, - }, - }, - - WantDump: "GET /foo HTTP/1.0\r\n" + - "X-Foo: X-Bar\r\n\r\n", - }, - - { - Req: *mustNewRequest("GET", "http://example.com/foo", nil), - - WantDumpOut: "GET /foo HTTP/1.1\r\n" + - "Host: example.com\r\n" + - "User-Agent: Go 1.1 package http\r\n" + - "Accept-Encoding: gzip\r\n\r\n", - }, - - // Test that an https URL doesn't try to do an SSL negotiation - // with a bytes.Buffer and hang with all goroutines not - // runnable. - { - Req: *mustNewRequest("GET", "https://example.com/foo", nil), - - WantDumpOut: "GET /foo HTTP/1.1\r\n" + - "Host: example.com\r\n" + - "User-Agent: Go 1.1 package http\r\n" + - "Accept-Encoding: gzip\r\n\r\n", - }, - - // Request with Body, but Dump requested without it. - { - Req: http.Request{ - Method: "POST", - URL: &url.URL{ - Scheme: "http", - Host: "post.tld", - Path: "/", - }, - ContentLength: 6, - ProtoMajor: 1, - ProtoMinor: 1, - }, - - Body: []byte("abcdef"), - - WantDumpOut: "POST / HTTP/1.1\r\n" + - "Host: post.tld\r\n" + - "User-Agent: Go 1.1 package http\r\n" + - "Content-Length: 6\r\n" + - "Accept-Encoding: gzip\r\n\r\n", - - NoBody: true, - }, -} - -func TestDumpRequest(t *testing.T) { - numg0 := runtime.NumGoroutine() - for i, tt := range dumpTests { - setBody := func() { - if tt.Body == nil { - return - } - switch b := tt.Body.(type) { - case []byte: - tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b)) - case func() io.ReadCloser: - tt.Req.Body = b() - } - } - setBody() - if tt.Req.Header == nil { - tt.Req.Header = make(http.Header) - } - - if tt.WantDump != "" { - setBody() - dump, err := DumpRequest(&tt.Req, !tt.NoBody) - if err != nil { - t.Errorf("DumpRequest #%d: %s", i, err) - continue - } - if string(dump) != tt.WantDump { - t.Errorf("DumpRequest %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDump, string(dump)) - continue - } - } - - if tt.WantDumpOut != "" { - setBody() - dump, err := DumpRequestOut(&tt.Req, !tt.NoBody) - if err != nil { - t.Errorf("DumpRequestOut #%d: %s", i, err) - continue - } - if string(dump) != tt.WantDumpOut { - t.Errorf("DumpRequestOut %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDumpOut, string(dump)) - continue - } - } - } - if dg := runtime.NumGoroutine() - numg0; dg > 4 { - t.Errorf("Unexpectedly large number of new goroutines: %d new", dg) - } -} - -func chunk(s string) string { - return fmt.Sprintf("%x\r\n%s\r\n", len(s), s) -} - -func mustParseURL(s string) *url.URL { - u, err := url.Parse(s) - if err != nil { - panic(fmt.Sprintf("Error parsing URL %q: %v", s, err)) - } - return u -} - -func mustNewRequest(method, url string, body io.Reader) *http.Request { - req, err := http.NewRequest(method, url, body) - if err != nil { - panic(fmt.Sprintf("NewRequest(%q, %q, %p) err = %v", method, url, body, err)) - } - return req -} - -var dumpResTests = []struct { - res *http.Response - body bool - want string -}{ - { - res: &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - ContentLength: 50, - Header: http.Header{ - "Foo": []string{"Bar"}, - }, - Body: ioutil.NopCloser(strings.NewReader("foo")), // shouldn't be used - }, - body: false, // to verify we see 50, not empty or 3. - want: `HTTP/1.1 200 OK -Content-Length: 50 -Foo: Bar`, - }, - - { - res: &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - ContentLength: 3, - Body: ioutil.NopCloser(strings.NewReader("foo")), - }, - body: true, - want: `HTTP/1.1 200 OK -Content-Length: 3 - -foo`, - }, - - { - res: &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - ContentLength: -1, - Body: ioutil.NopCloser(strings.NewReader("foo")), - TransferEncoding: []string{"chunked"}, - }, - body: true, - want: `HTTP/1.1 200 OK -Transfer-Encoding: chunked - -3 -foo -0`, - }, -} - -func TestDumpResponse(t *testing.T) { - for i, tt := range dumpResTests { - gotb, err := DumpResponse(tt.res, tt.body) - if err != nil { - t.Errorf("%d. DumpResponse = %v", i, err) - continue - } - got := string(gotb) - got = strings.TrimSpace(got) - got = strings.Replace(got, "\r", "", -1) - - if got != tt.want { - t.Errorf("%d.\nDumpResponse got:\n%s\n\nWant:\n%s\n", i, got, tt.want) - } - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/httputil.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/httputil.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/httputil.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/httputil.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package httputil provides HTTP utility functions, complementing the -// more common ones in the net/http package. -package httputil - -import "io" - -// NewChunkedReader returns a new chunkedReader that translates the data read from r -// out of HTTP "chunked" format before returning it. -// The chunkedReader returns io.EOF when the final 0-length chunk is read. -// -// NewChunkedReader is not needed by normal applications. The http package -// automatically decodes chunking when reading response bodies. -func NewChunkedReader(r io.Reader) io.Reader { - return newChunkedReader(r) -} - -// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP -// "chunked" format before writing them to w. Closing the returned chunkedWriter -// sends the final 0-length chunk that marks the end of the stream. -// -// NewChunkedWriter is not needed by normal applications. The http -// package adds chunking automatically if handlers don't set a -// Content-Length header. Using NewChunkedWriter inside a handler -// would result in double chunking or chunking with a Content-Length -// length, both of which are wrong. -func NewChunkedWriter(w io.Writer) io.WriteCloser { - return newChunkedWriter(w) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/persist.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/persist.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/persist.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/persist.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,429 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package httputil - -import ( - "bufio" - "errors" - "io" - "net" - "net/http" - "net/textproto" - "sync" -) - -var ( - ErrPersistEOF = &http.ProtocolError{ErrorString: "persistent connection closed"} - ErrClosed = &http.ProtocolError{ErrorString: "connection closed by user"} - ErrPipeline = &http.ProtocolError{ErrorString: "pipeline error"} -) - -// This is an API usage error - the local side is closed. -// ErrPersistEOF (above) reports that the remote side is closed. -var errClosed = errors.New("i/o operation on closed connection") - -// A ServerConn reads requests and sends responses over an underlying -// connection, until the HTTP keepalive logic commands an end. ServerConn -// also allows hijacking the underlying connection by calling Hijack -// to regain control over the connection. ServerConn supports pipe-lining, -// i.e. requests can be read out of sync (but in the same order) while the -// respective responses are sent. -// -// ServerConn is low-level and old. Applications should instead use Server -// in the net/http package. -type ServerConn struct { - lk sync.Mutex // read-write protects the following fields - c net.Conn - r *bufio.Reader - re, we error // read/write errors - lastbody io.ReadCloser - nread, nwritten int - pipereq map[*http.Request]uint - - pipe textproto.Pipeline -} - -// NewServerConn returns a new ServerConn reading and writing c. If r is not -// nil, it is the buffer to use when reading c. -// -// ServerConn is low-level and old. Applications should instead use Server -// in the net/http package. -func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn { - if r == nil { - r = bufio.NewReader(c) - } - return &ServerConn{c: c, r: r, pipereq: make(map[*http.Request]uint)} -} - -// Hijack detaches the ServerConn and returns the underlying connection as well -// as the read-side bufio which may have some left over data. Hijack may be -// called before Read has signaled the end of the keep-alive logic. The user -// should not call Hijack while Read or Write is in progress. -func (sc *ServerConn) Hijack() (c net.Conn, r *bufio.Reader) { - sc.lk.Lock() - defer sc.lk.Unlock() - c = sc.c - r = sc.r - sc.c = nil - sc.r = nil - return -} - -// Close calls Hijack and then also closes the underlying connection -func (sc *ServerConn) Close() error { - c, _ := sc.Hijack() - if c != nil { - return c.Close() - } - return nil -} - -// Read returns the next request on the wire. An ErrPersistEOF is returned if -// it is gracefully determined that there are no more requests (e.g. after the -// first request on an HTTP/1.0 connection, or after a Connection:close on a -// HTTP/1.1 connection). -func (sc *ServerConn) Read() (req *http.Request, err error) { - - // Ensure ordered execution of Reads and Writes - id := sc.pipe.Next() - sc.pipe.StartRequest(id) - defer func() { - sc.pipe.EndRequest(id) - if req == nil { - sc.pipe.StartResponse(id) - sc.pipe.EndResponse(id) - } else { - // Remember the pipeline id of this request - sc.lk.Lock() - sc.pipereq[req] = id - sc.lk.Unlock() - } - }() - - sc.lk.Lock() - if sc.we != nil { // no point receiving if write-side broken or closed - defer sc.lk.Unlock() - return nil, sc.we - } - if sc.re != nil { - defer sc.lk.Unlock() - return nil, sc.re - } - if sc.r == nil { // connection closed by user in the meantime - defer sc.lk.Unlock() - return nil, errClosed - } - r := sc.r - lastbody := sc.lastbody - sc.lastbody = nil - sc.lk.Unlock() - - // Make sure body is fully consumed, even if user does not call body.Close - if lastbody != nil { - // body.Close is assumed to be idempotent and multiple calls to - // it should return the error that its first invocation - // returned. - err = lastbody.Close() - if err != nil { - sc.lk.Lock() - defer sc.lk.Unlock() - sc.re = err - return nil, err - } - } - - req, err = http.ReadRequest(r) - sc.lk.Lock() - defer sc.lk.Unlock() - if err != nil { - if err == io.ErrUnexpectedEOF { - // A close from the opposing client is treated as a - // graceful close, even if there was some unparse-able - // data before the close. - sc.re = ErrPersistEOF - return nil, sc.re - } else { - sc.re = err - return req, err - } - } - sc.lastbody = req.Body - sc.nread++ - if req.Close { - sc.re = ErrPersistEOF - return req, sc.re - } - return req, err -} - -// Pending returns the number of unanswered requests -// that have been received on the connection. -func (sc *ServerConn) Pending() int { - sc.lk.Lock() - defer sc.lk.Unlock() - return sc.nread - sc.nwritten -} - -// Write writes resp in response to req. To close the connection gracefully, set the -// Response.Close field to true. Write should be considered operational until -// it returns an error, regardless of any errors returned on the Read side. -func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error { - - // Retrieve the pipeline ID of this request/response pair - sc.lk.Lock() - id, ok := sc.pipereq[req] - delete(sc.pipereq, req) - if !ok { - sc.lk.Unlock() - return ErrPipeline - } - sc.lk.Unlock() - - // Ensure pipeline order - sc.pipe.StartResponse(id) - defer sc.pipe.EndResponse(id) - - sc.lk.Lock() - if sc.we != nil { - defer sc.lk.Unlock() - return sc.we - } - if sc.c == nil { // connection closed by user in the meantime - defer sc.lk.Unlock() - return ErrClosed - } - c := sc.c - if sc.nread <= sc.nwritten { - defer sc.lk.Unlock() - return errors.New("persist server pipe count") - } - if resp.Close { - // After signaling a keep-alive close, any pipelined unread - // requests will be lost. It is up to the user to drain them - // before signaling. - sc.re = ErrPersistEOF - } - sc.lk.Unlock() - - err := resp.Write(c) - sc.lk.Lock() - defer sc.lk.Unlock() - if err != nil { - sc.we = err - return err - } - sc.nwritten++ - - return nil -} - -// A ClientConn sends request and receives headers over an underlying -// connection, while respecting the HTTP keepalive logic. ClientConn -// supports hijacking the connection calling Hijack to -// regain control of the underlying net.Conn and deal with it as desired. -// -// ClientConn is low-level and old. Applications should instead use -// Client or Transport in the net/http package. -type ClientConn struct { - lk sync.Mutex // read-write protects the following fields - c net.Conn - r *bufio.Reader - re, we error // read/write errors - lastbody io.ReadCloser - nread, nwritten int - pipereq map[*http.Request]uint - - pipe textproto.Pipeline - writeReq func(*http.Request, io.Writer) error -} - -// NewClientConn returns a new ClientConn reading and writing c. If r is not -// nil, it is the buffer to use when reading c. -// -// ClientConn is low-level and old. Applications should use Client or -// Transport in the net/http package. -func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn { - if r == nil { - r = bufio.NewReader(c) - } - return &ClientConn{ - c: c, - r: r, - pipereq: make(map[*http.Request]uint), - writeReq: (*http.Request).Write, - } -} - -// NewProxyClientConn works like NewClientConn but writes Requests -// using Request's WriteProxy method. -// -// New code should not use NewProxyClientConn. See Client or -// Transport in the net/http package instead. -func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn { - cc := NewClientConn(c, r) - cc.writeReq = (*http.Request).WriteProxy - return cc -} - -// Hijack detaches the ClientConn and returns the underlying connection as well -// as the read-side bufio which may have some left over data. Hijack may be -// called before the user or Read have signaled the end of the keep-alive -// logic. The user should not call Hijack while Read or Write is in progress. -func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) { - cc.lk.Lock() - defer cc.lk.Unlock() - c = cc.c - r = cc.r - cc.c = nil - cc.r = nil - return -} - -// Close calls Hijack and then also closes the underlying connection -func (cc *ClientConn) Close() error { - c, _ := cc.Hijack() - if c != nil { - return c.Close() - } - return nil -} - -// Write writes a request. An ErrPersistEOF error is returned if the connection -// has been closed in an HTTP keepalive sense. If req.Close equals true, the -// keepalive connection is logically closed after this request and the opposing -// server is informed. An ErrUnexpectedEOF indicates the remote closed the -// underlying TCP connection, which is usually considered as graceful close. -func (cc *ClientConn) Write(req *http.Request) (err error) { - - // Ensure ordered execution of Writes - id := cc.pipe.Next() - cc.pipe.StartRequest(id) - defer func() { - cc.pipe.EndRequest(id) - if err != nil { - cc.pipe.StartResponse(id) - cc.pipe.EndResponse(id) - } else { - // Remember the pipeline id of this request - cc.lk.Lock() - cc.pipereq[req] = id - cc.lk.Unlock() - } - }() - - cc.lk.Lock() - if cc.re != nil { // no point sending if read-side closed or broken - defer cc.lk.Unlock() - return cc.re - } - if cc.we != nil { - defer cc.lk.Unlock() - return cc.we - } - if cc.c == nil { // connection closed by user in the meantime - defer cc.lk.Unlock() - return errClosed - } - c := cc.c - if req.Close { - // We write the EOF to the write-side error, because there - // still might be some pipelined reads - cc.we = ErrPersistEOF - } - cc.lk.Unlock() - - err = cc.writeReq(req, c) - cc.lk.Lock() - defer cc.lk.Unlock() - if err != nil { - cc.we = err - return err - } - cc.nwritten++ - - return nil -} - -// Pending returns the number of unanswered requests -// that have been sent on the connection. -func (cc *ClientConn) Pending() int { - cc.lk.Lock() - defer cc.lk.Unlock() - return cc.nwritten - cc.nread -} - -// Read reads the next response from the wire. A valid response might be -// returned together with an ErrPersistEOF, which means that the remote -// requested that this be the last request serviced. Read can be called -// concurrently with Write, but not with another Read. -func (cc *ClientConn) Read(req *http.Request) (resp *http.Response, err error) { - // Retrieve the pipeline ID of this request/response pair - cc.lk.Lock() - id, ok := cc.pipereq[req] - delete(cc.pipereq, req) - if !ok { - cc.lk.Unlock() - return nil, ErrPipeline - } - cc.lk.Unlock() - - // Ensure pipeline order - cc.pipe.StartResponse(id) - defer cc.pipe.EndResponse(id) - - cc.lk.Lock() - if cc.re != nil { - defer cc.lk.Unlock() - return nil, cc.re - } - if cc.r == nil { // connection closed by user in the meantime - defer cc.lk.Unlock() - return nil, errClosed - } - r := cc.r - lastbody := cc.lastbody - cc.lastbody = nil - cc.lk.Unlock() - - // Make sure body is fully consumed, even if user does not call body.Close - if lastbody != nil { - // body.Close is assumed to be idempotent and multiple calls to - // it should return the error that its first invocation - // returned. - err = lastbody.Close() - if err != nil { - cc.lk.Lock() - defer cc.lk.Unlock() - cc.re = err - return nil, err - } - } - - resp, err = http.ReadResponse(r, req) - cc.lk.Lock() - defer cc.lk.Unlock() - if err != nil { - cc.re = err - return resp, err - } - cc.lastbody = resp.Body - - cc.nread++ - - if resp.Close { - cc.re = ErrPersistEOF // don't send any more requests - return resp, cc.re - } - return resp, err -} - -// Do is convenience method that writes a request and reads a response. -func (cc *ClientConn) Do(req *http.Request) (resp *http.Response, err error) { - err = cc.Write(req) - if err != nil { - return - } - return cc.Read(req) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,211 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP reverse proxy handler - -package httputil - -import ( - "io" - "log" - "net" - "net/http" - "net/url" - "strings" - "sync" - "time" -) - -// onExitFlushLoop is a callback set by tests to detect the state of the -// flushLoop() goroutine. -var onExitFlushLoop func() - -// ReverseProxy is an HTTP Handler that takes an incoming request and -// sends it to another server, proxying the response back to the -// client. -type ReverseProxy struct { - // Director must be a function which modifies - // the request into a new request to be sent - // using Transport. Its response is then copied - // back to the original client unmodified. - Director func(*http.Request) - - // The transport used to perform proxy requests. - // If nil, http.DefaultTransport is used. - Transport http.RoundTripper - - // FlushInterval specifies the flush interval - // to flush to the client while copying the - // response body. - // If zero, no periodic flushing is done. - FlushInterval time.Duration -} - -func singleJoiningSlash(a, b string) string { - aslash := strings.HasSuffix(a, "/") - bslash := strings.HasPrefix(b, "/") - switch { - case aslash && bslash: - return a + b[1:] - case !aslash && !bslash: - return a + "/" + b - } - return a + b -} - -// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites -// URLs to the scheme, host, and base path provided in target. If the -// target's path is "/base" and the incoming request was for "/dir", -// the target request will be for /base/dir. -func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy { - targetQuery := target.RawQuery - director := func(req *http.Request) { - req.URL.Scheme = target.Scheme - req.URL.Host = target.Host - req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) - if targetQuery == "" || req.URL.RawQuery == "" { - req.URL.RawQuery = targetQuery + req.URL.RawQuery - } else { - req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery - } - } - return &ReverseProxy{Director: director} -} - -func copyHeader(dst, src http.Header) { - for k, vv := range src { - for _, v := range vv { - dst.Add(k, v) - } - } -} - -// Hop-by-hop headers. These are removed when sent to the backend. -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html -var hopHeaders = []string{ - "Connection", - "Keep-Alive", - "Proxy-Authenticate", - "Proxy-Authorization", - "Te", // canonicalized version of "TE" - "Trailers", - "Transfer-Encoding", - "Upgrade", -} - -func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - transport := p.Transport - if transport == nil { - transport = http.DefaultTransport - } - - outreq := new(http.Request) - *outreq = *req // includes shallow copies of maps, but okay - - p.Director(outreq) - outreq.Proto = "HTTP/1.1" - outreq.ProtoMajor = 1 - outreq.ProtoMinor = 1 - outreq.Close = false - - // Remove hop-by-hop headers to the backend. Especially - // important is "Connection" because we want a persistent - // connection, regardless of what the client sent to us. This - // is modifying the same underlying map from req (shallow - // copied above) so we only copy it if necessary. - copiedHeaders := false - for _, h := range hopHeaders { - if outreq.Header.Get(h) != "" { - if !copiedHeaders { - outreq.Header = make(http.Header) - copyHeader(outreq.Header, req.Header) - copiedHeaders = true - } - outreq.Header.Del(h) - } - } - - if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { - // If we aren't the first proxy retain prior - // X-Forwarded-For information as a comma+space - // separated list and fold multiple headers into one. - if prior, ok := outreq.Header["X-Forwarded-For"]; ok { - clientIP = strings.Join(prior, ", ") + ", " + clientIP - } - outreq.Header.Set("X-Forwarded-For", clientIP) - } - - res, err := transport.RoundTrip(outreq) - if err != nil { - log.Printf("http: proxy error: %v", err) - rw.WriteHeader(http.StatusInternalServerError) - return - } - defer res.Body.Close() - - for _, h := range hopHeaders { - res.Header.Del(h) - } - - copyHeader(rw.Header(), res.Header) - - rw.WriteHeader(res.StatusCode) - p.copyResponse(rw, res.Body) -} - -func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) { - if p.FlushInterval != 0 { - if wf, ok := dst.(writeFlusher); ok { - mlw := &maxLatencyWriter{ - dst: wf, - latency: p.FlushInterval, - done: make(chan bool), - } - go mlw.flushLoop() - defer mlw.stop() - dst = mlw - } - } - - io.Copy(dst, src) -} - -type writeFlusher interface { - io.Writer - http.Flusher -} - -type maxLatencyWriter struct { - dst writeFlusher - latency time.Duration - - lk sync.Mutex // protects Write + Flush - done chan bool -} - -func (m *maxLatencyWriter) Write(p []byte) (int, error) { - m.lk.Lock() - defer m.lk.Unlock() - return m.dst.Write(p) -} - -func (m *maxLatencyWriter) flushLoop() { - t := time.NewTicker(m.latency) - defer t.Stop() - for { - select { - case <-m.done: - if onExitFlushLoop != nil { - onExitFlushLoop() - } - return - case <-t.C: - m.lk.Lock() - m.dst.Flush() - m.lk.Unlock() - } - } -} - -func (m *maxLatencyWriter) stop() { m.done <- true } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,213 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Reverse proxy tests. - -package httputil - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - "time" -) - -const fakeHopHeader = "X-Fake-Hop-Header-For-Test" - -func init() { - hopHeaders = append(hopHeaders, fakeHopHeader) -} - -func TestReverseProxy(t *testing.T) { - const backendResponse = "I am the backend" - const backendStatus = 404 - backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if len(r.TransferEncoding) > 0 { - t.Errorf("backend got unexpected TransferEncoding: %v", r.TransferEncoding) - } - if r.Header.Get("X-Forwarded-For") == "" { - t.Errorf("didn't get X-Forwarded-For header") - } - if c := r.Header.Get("Connection"); c != "" { - t.Errorf("handler got Connection header value %q", c) - } - if c := r.Header.Get("Upgrade"); c != "" { - t.Errorf("handler got Upgrade header value %q", c) - } - if g, e := r.Host, "some-name"; g != e { - t.Errorf("backend got Host header %q, want %q", g, e) - } - w.Header().Set("X-Foo", "bar") - w.Header().Set("Upgrade", "foo") - w.Header().Set(fakeHopHeader, "foo") - w.Header().Add("X-Multi-Value", "foo") - w.Header().Add("X-Multi-Value", "bar") - http.SetCookie(w, &http.Cookie{Name: "flavor", Value: "chocolateChip"}) - w.WriteHeader(backendStatus) - w.Write([]byte(backendResponse)) - })) - defer backend.Close() - backendURL, err := url.Parse(backend.URL) - if err != nil { - t.Fatal(err) - } - proxyHandler := NewSingleHostReverseProxy(backendURL) - frontend := httptest.NewServer(proxyHandler) - defer frontend.Close() - - getReq, _ := http.NewRequest("GET", frontend.URL, nil) - getReq.Host = "some-name" - getReq.Header.Set("Connection", "close") - getReq.Header.Set("Upgrade", "foo") - getReq.Close = true - res, err := http.DefaultClient.Do(getReq) - if err != nil { - t.Fatalf("Get: %v", err) - } - if g, e := res.StatusCode, backendStatus; g != e { - t.Errorf("got res.StatusCode %d; expected %d", g, e) - } - if g, e := res.Header.Get("X-Foo"), "bar"; g != e { - t.Errorf("got X-Foo %q; expected %q", g, e) - } - if c := res.Header.Get(fakeHopHeader); c != "" { - t.Errorf("got %s header value %q", fakeHopHeader, c) - } - if g, e := len(res.Header["X-Multi-Value"]), 2; g != e { - t.Errorf("got %d X-Multi-Value header values; expected %d", g, e) - } - if g, e := len(res.Header["Set-Cookie"]), 1; g != e { - t.Fatalf("got %d SetCookies, want %d", g, e) - } - if cookie := res.Cookies()[0]; cookie.Name != "flavor" { - t.Errorf("unexpected cookie %q", cookie.Name) - } - bodyBytes, _ := ioutil.ReadAll(res.Body) - if g, e := string(bodyBytes), backendResponse; g != e { - t.Errorf("got body %q; expected %q", g, e) - } -} - -func TestXForwardedFor(t *testing.T) { - const prevForwardedFor = "client ip" - const backendResponse = "I am the backend" - const backendStatus = 404 - backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get("X-Forwarded-For") == "" { - t.Errorf("didn't get X-Forwarded-For header") - } - if !strings.Contains(r.Header.Get("X-Forwarded-For"), prevForwardedFor) { - t.Errorf("X-Forwarded-For didn't contain prior data") - } - w.WriteHeader(backendStatus) - w.Write([]byte(backendResponse)) - })) - defer backend.Close() - backendURL, err := url.Parse(backend.URL) - if err != nil { - t.Fatal(err) - } - proxyHandler := NewSingleHostReverseProxy(backendURL) - frontend := httptest.NewServer(proxyHandler) - defer frontend.Close() - - getReq, _ := http.NewRequest("GET", frontend.URL, nil) - getReq.Host = "some-name" - getReq.Header.Set("Connection", "close") - getReq.Header.Set("X-Forwarded-For", prevForwardedFor) - getReq.Close = true - res, err := http.DefaultClient.Do(getReq) - if err != nil { - t.Fatalf("Get: %v", err) - } - if g, e := res.StatusCode, backendStatus; g != e { - t.Errorf("got res.StatusCode %d; expected %d", g, e) - } - bodyBytes, _ := ioutil.ReadAll(res.Body) - if g, e := string(bodyBytes), backendResponse; g != e { - t.Errorf("got body %q; expected %q", g, e) - } -} - -var proxyQueryTests = []struct { - baseSuffix string // suffix to add to backend URL - reqSuffix string // suffix to add to frontend's request URL - want string // what backend should see for final request URL (without ?) -}{ - {"", "", ""}, - {"?sta=tic", "?us=er", "sta=tic&us=er"}, - {"", "?us=er", "us=er"}, - {"?sta=tic", "", "sta=tic"}, -} - -func TestReverseProxyQuery(t *testing.T) { - backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("X-Got-Query", r.URL.RawQuery) - w.Write([]byte("hi")) - })) - defer backend.Close() - - for i, tt := range proxyQueryTests { - backendURL, err := url.Parse(backend.URL + tt.baseSuffix) - if err != nil { - t.Fatal(err) - } - frontend := httptest.NewServer(NewSingleHostReverseProxy(backendURL)) - req, _ := http.NewRequest("GET", frontend.URL+tt.reqSuffix, nil) - req.Close = true - res, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("%d. Get: %v", i, err) - } - if g, e := res.Header.Get("X-Got-Query"), tt.want; g != e { - t.Errorf("%d. got query %q; expected %q", i, g, e) - } - res.Body.Close() - frontend.Close() - } -} - -func TestReverseProxyFlushInterval(t *testing.T) { - const expected = "hi" - backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(expected)) - })) - defer backend.Close() - - backendURL, err := url.Parse(backend.URL) - if err != nil { - t.Fatal(err) - } - - proxyHandler := NewSingleHostReverseProxy(backendURL) - proxyHandler.FlushInterval = time.Microsecond - - done := make(chan bool) - onExitFlushLoop = func() { done <- true } - defer func() { onExitFlushLoop = nil }() - - frontend := httptest.NewServer(proxyHandler) - defer frontend.Close() - - req, _ := http.NewRequest("GET", frontend.URL, nil) - req.Close = true - res, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Get: %v", err) - } - defer res.Body.Close() - if bodyBytes, _ := ioutil.ReadAll(res.Body); string(bodyBytes) != expected { - t.Errorf("got body %q; expected %q", bodyBytes, expected) - } - - select { - case <-done: - // OK - case <-time.After(5 * time.Second): - t.Error("maxLatencyWriter flushLoop() never exited") - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/pprof/pprof.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/pprof/pprof.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/pprof/pprof.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/pprof/pprof.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,205 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pprof serves via its HTTP server runtime profiling data -// in the format expected by the pprof visualization tool. -// For more information about pprof, see -// http://code.google.com/p/google-perftools/. -// -// The package is typically only imported for the side effect of -// registering its HTTP handlers. -// The handled paths all begin with /debug/pprof/. -// -// To use pprof, link this package into your program: -// import _ "net/http/pprof" -// -// If your application is not already running an http server, you -// need to start one. Add "net/http" and "log" to your imports and -// the following code to your main function: -// -// go func() { -// log.Println(http.ListenAndServe("localhost:6060", nil)) -// }() -// -// Then use the pprof tool to look at the heap profile: -// -// go tool pprof http://localhost:6060/debug/pprof/heap -// -// Or to look at a 30-second CPU profile: -// -// go tool pprof http://localhost:6060/debug/pprof/profile -// -// Or to look at the goroutine blocking profile: -// -// go tool pprof http://localhost:6060/debug/pprof/block -// -// To view all available profiles, open http://localhost:6060/debug/pprof/ -// in your browser. -// -// For a study of the facility in action, visit -// -// http://blog.golang.org/2011/06/profiling-go-programs.html -// -package pprof - -import ( - "bufio" - "bytes" - "fmt" - "html/template" - "io" - "log" - "net/http" - "os" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "time" -) - -func init() { - http.Handle("/debug/pprof/", http.HandlerFunc(Index)) - http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline)) - http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile)) - http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol)) -} - -// Cmdline responds with the running program's -// command line, with arguments separated by NUL bytes. -// The package initialization registers it as /debug/pprof/cmdline. -func Cmdline(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprintf(w, strings.Join(os.Args, "\x00")) -} - -// Profile responds with the pprof-formatted cpu profile. -// The package initialization registers it as /debug/pprof/profile. -func Profile(w http.ResponseWriter, r *http.Request) { - sec, _ := strconv.ParseInt(r.FormValue("seconds"), 10, 64) - if sec == 0 { - sec = 30 - } - - // Set Content Type assuming StartCPUProfile will work, - // because if it does it starts writing. - w.Header().Set("Content-Type", "application/octet-stream") - if err := pprof.StartCPUProfile(w); err != nil { - // StartCPUProfile failed, so no writes yet. - // Can change header back to text content - // and send error code. - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err) - return - } - time.Sleep(time.Duration(sec) * time.Second) - pprof.StopCPUProfile() -} - -// Symbol looks up the program counters listed in the request, -// responding with a table mapping program counters to function names. -// The package initialization registers it as /debug/pprof/symbol. -func Symbol(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - - // We have to read the whole POST body before - // writing any output. Buffer the output here. - var buf bytes.Buffer - - // We don't know how many symbols we have, but we - // do have symbol information. Pprof only cares whether - // this number is 0 (no symbols available) or > 0. - fmt.Fprintf(&buf, "num_symbols: 1\n") - - var b *bufio.Reader - if r.Method == "POST" { - b = bufio.NewReader(r.Body) - } else { - b = bufio.NewReader(strings.NewReader(r.URL.RawQuery)) - } - - for { - word, err := b.ReadSlice('+') - if err == nil { - word = word[0 : len(word)-1] // trim + - } - pc, _ := strconv.ParseUint(string(word), 0, 64) - if pc != 0 { - f := runtime.FuncForPC(uintptr(pc)) - if f != nil { - fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name()) - } - } - - // Wait until here to check for err; the last - // symbol will have an err because it doesn't end in +. - if err != nil { - if err != io.EOF { - fmt.Fprintf(&buf, "reading request: %v\n", err) - } - break - } - } - - w.Write(buf.Bytes()) -} - -// Handler returns an HTTP handler that serves the named profile. -func Handler(name string) http.Handler { - return handler(name) -} - -type handler string - -func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - debug, _ := strconv.Atoi(r.FormValue("debug")) - p := pprof.Lookup(string(name)) - if p == nil { - w.WriteHeader(404) - fmt.Fprintf(w, "Unknown profile: %s\n", name) - return - } - p.WriteTo(w, debug) - return -} - -// Index responds with the pprof-formatted profile named by the request. -// For example, "/debug/pprof/heap" serves the "heap" profile. -// Index responds to a request for "/debug/pprof/" with an HTML page -// listing the available profiles. -func Index(w http.ResponseWriter, r *http.Request) { - if strings.HasPrefix(r.URL.Path, "/debug/pprof/") { - name := strings.TrimPrefix(r.URL.Path, "/debug/pprof/") - if name != "" { - handler(name).ServeHTTP(w, r) - return - } - } - - profiles := pprof.Profiles() - if err := indexTmpl.Execute(w, profiles); err != nil { - log.Print(err) - } -} - -var indexTmpl = template.Must(template.New("index").Parse(` - -/debug/pprof/ - -/debug/pprof/
-
- -profiles:
- -{{range .}} -
{{.Count}}{{.Name}} -{{end}} -
-
-full goroutine stack dump
- - -`)) diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/file juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/file --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/file 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/file 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -0123456789 diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/index.html juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/index.html --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/index.html 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/index.html 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -index.html says hello diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/style.css juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/style.css --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/style.css 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/core/http/testdata/style.css 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -body {} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/.gitignore juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/.gitignore --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/.gitignore 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Editor swap files +*.swp +*~ +.DS_Store + +# ignore vendor/ +vendor/ diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/glide.lock juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/glide.lock --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/glide.lock 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/glide.lock 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,20 @@ +hash: 7407050cee9bb9ce89e23ef26bce4051cce63d558338a4937f027a18b789e3a1 +updated: 2016-07-19T16:20:11.6189576-07:00 +imports: +- name: github.com/Azure/go-autorest + version: 6f40a8acfe03270d792cb8155e2942c09d7cff95 + subpackages: + - autorest + - autorest/azure + - autorest/date + - autorest/to +- name: github.com/dgrijalva/jwt-go + version: 01aeca54ebda6e0fbfafd0a524d234159c05ec20 +- name: golang.org/x/crypto + version: 911fafb28f4ee7c7bd483539a6c96190bbbccc3f + subpackages: + - pkcs12 + - pkcs12/internal/rc2 +- name: gopkg.in/check.v1 + version: 4f90aeace3a26ad7021961c297b22c42160c7b25 +testImports: [] diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/glide.yaml juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/glide.yaml --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/glide.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/glide.yaml 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,12 @@ +package: github.com/Azure/azure-sdk-for-go +import: +- package: github.com/Azure/go-autorest + subpackages: + - /autorest + - autorest/azure + - autorest/date + - autorest/to +- package: golang.org/x/crypto + subpackages: + - /pkcs12 +- package: gopkg.in/check.v1 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/Godeps.json juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/Godeps.json --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/Godeps.json 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/Godeps.json 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -{ - "ImportPath": "github.com/Azure/azure-sdk-for-go", - "GoVersion": "go1.5.1", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/Azure/go-autorest/autorest", - "Comment": "v1.1.0-6-ga3998c5", - "Rev": "a3998c5cde825491a61b5dfdb92a098e6155fef0" - }, - { - "ImportPath": "github.com/dgrijalva/jwt-go", - "Comment": "v2.4.0", - "Rev": "f164e17f59b82642a3895ba065c385db6c547344" - }, - { - "ImportPath": "golang.org/x/crypto/pkcs12", - "Rev": "346896d57731cb5670b36c6178fc5519f3225980" - }, - { - "ImportPath": "gopkg.in/check.v1", - "Rev": "11d3bc7aa68e238947792f30573146a3231fc0f1" - } - ] -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/Readme juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/Readme --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/Readme 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/Readme 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,163 +0,0 @@ -/* -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByClosing()) - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - -will set the URL to: - - https://microsoft.com/a/b/c - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., ByUnmarshallingJson) is likely incorrect. - -Lastly, the Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure -correct parsing and formatting. - -Errors raised by autorest objects and methods will conform to the autorest.Error interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. -*/ -package autorest - -import ( - "net/http" - "time" -) - -const ( - headerLocation = "Location" - headerRetryAfter = "Retry-After" -) - -// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set -// and false otherwise. -func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { - return containsInt(codes, resp.StatusCode) -} - -// ResponseRequiresPolling returns true if the passed http.Response requires polling follow-up -// request (as determined by the status code being in the passed set, which defaults to HTTP 202 -// Accepted). -func ResponseRequiresPolling(resp *http.Response, codes ...int) bool { - if resp.StatusCode == http.StatusOK { - return false - } - - if len(codes) == 0 { - codes = []int{http.StatusAccepted} - } - - return ResponseHasStatusCode(resp, codes...) -} - -// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. If -// it successfully creates the request, it will also close the body of the passed response, -// otherwise the body remains open. -func NewPollingRequest(resp *http.Response, authorizer Authorizer) (*http.Request, error) { - location := GetPollingLocation(resp) - if location == "" { - return nil, NewError("autorest", "NewPollingRequest", "Location header missing from response that requires polling") - } - - req, err := Prepare(&http.Request{}, - AsGet(), - WithBaseURL(location), - authorizer.WithAuthorization()) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", "Failure creating poll request to %s", location) - } - - Respond(resp, - ByClosing()) - - return req, nil -} - -// GetPollingDelay extracts the polling delay from the Retry-After header of the passed response. If -// the header is absent or is malformed, it will return the supplied default delay time.Duration. -func GetPollingDelay(resp *http.Response, defaultDelay time.Duration) time.Duration { - retry := resp.Header.Get(headerRetryAfter) - if retry == "" { - return defaultDelay - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - return defaultDelay - } - - return d -} - -// GetPollingLocation retrieves the polling URL from the Location header of the passed response. -func GetPollingLocation(resp *http.Response) string { - return resp.Header.Get(headerLocation) -} - -// PollForAttempts will retry the passed http.Request until it receives an HTTP status code outside -// the passed set or has made the specified number of attempts. The set of status codes defaults to -// HTTP 202 Accepted. -func PollForAttempts(s Sender, req *http.Request, defaultDelay time.Duration, attempts int, codes ...int) (*http.Response, error) { - return SendWithSender( - decorateForPolling(s, defaultDelay, codes...), - req, - DoRetryForAttempts(attempts, time.Duration(0))) -} - -// PollForDuration will retry the passed http.Request until it receives an HTTP status code outside -// the passed set or the total time meets or exceeds the specified duration. The set of status codes -// defaults to HTTP 202 Accepted. -func PollForDuration(s Sender, req *http.Request, defaultDelay time.Duration, total time.Duration, codes ...int) (*http.Response, error) { - return SendWithSender( - decorateForPolling(s, defaultDelay, codes...), - req, - DoRetryForDuration(total, time.Duration(0))) -} - -func decorateForPolling(s Sender, defaultDelay time.Duration, codes ...int) Sender { - if len(codes) == 0 { - codes = []int{http.StatusAccepted} - } - - return DecorateSender(s, - AfterRetryDelay(defaultDelay), - DoErrorIfStatusCode(codes...), - DoCloseIfError()) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,348 +0,0 @@ -package autorest - -import ( - "net/http" - "testing" - "time" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" -) - -func TestResponseHasStatusCode(t *testing.T) { - codes := []int{http.StatusOK, http.StatusAccepted} - resp := &http.Response{StatusCode: http.StatusAccepted} - if !ResponseHasStatusCode(resp, codes...) { - t.Errorf("autorest: ResponseHasStatusCode failed to find %v in %v", resp.StatusCode, codes) - } -} - -func TestResponseHasStatusCodeNotPresent(t *testing.T) { - codes := []int{http.StatusOK, http.StatusAccepted} - resp := &http.Response{StatusCode: http.StatusInternalServerError} - if ResponseHasStatusCode(resp, codes...) { - t.Errorf("autorest: ResponseHasStatusCode unexpectedly found %v in %v", resp.StatusCode, codes) - } -} - -func TestResponseRequiresPollingIgnoresSuccess(t *testing.T) { - resp := mocks.NewResponse() - - if ResponseRequiresPolling(resp) { - t.Error("autorest: ResponseRequiresPolling did not ignore a successful response") - } -} - -func TestResponseRequiresPollingLeavesBodyOpen(t *testing.T) { - resp := mocks.NewResponse() - - ResponseRequiresPolling(resp) - if !resp.Body.(*mocks.Body).IsOpen() { - t.Error("autorest: ResponseRequiresPolling closed the responise body while ignoring a successful response") - } -} - -func TestResponseRequiresPollingDefaultsToAcceptedStatusCode(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - if !ResponseRequiresPolling(resp) { - t.Error("autorest: ResponseRequiresPolling failed to create a request for default 202 Accepted status code") - } -} - -func TestResponseRequiresPollingReturnsFalseForUnexpectedStatusCodes(t *testing.T) { - resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) - mocks.SetAcceptedHeaders(resp) - - if ResponseRequiresPolling(resp) { - t.Error("autorest: ResponseRequiresPolling did not return false when ignoring a status code") - } -} - -func TestNewPollingRequestLeavesBodyOpenWhenLocationHeaderIsMissing(t *testing.T) { - resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) - - NewPollingRequest(resp, NullAuthorizer{}) - if !resp.Body.(*mocks.Body).IsOpen() { - t.Error("autorest: NewPollingRequest closed the http.Request Body when the Location header was missing") - } -} - -func TestNewPollingRequestDoesNotReturnARequestWhenLocationHeaderIsMissing(t *testing.T) { - resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - if req != nil { - t.Error("autorest: NewPollingRequest returned an http.Request when the Location header was missing") - } -} - -func TestNewPollingRequestReturnsAnErrorWhenPrepareFails(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - _, err := NewPollingRequest(resp, mockFailingAuthorizer{}) - if err == nil { - t.Error("autorest: NewPollingRequest failed to return an error when Prepare fails") - } -} - -func TestNewPollingRequestLeavesBodyOpenWhenPrepareFails(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - resp.Header.Set(http.CanonicalHeaderKey(headerLocation), testBadURL) - - _, err := NewPollingRequest(resp, NullAuthorizer{}) - if !resp.Body.(*mocks.Body).IsOpen() { - t.Errorf("autorest: NewPollingRequest closed the http.Request Body when Prepare returned an error (%v)", err) - } -} - -func TestNewPollingRequestDoesNotReturnARequestWhenPrepareFails(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - resp.Header.Set(http.CanonicalHeaderKey(headerLocation), testBadURL) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - if req != nil { - t.Error("autorest: NewPollingRequest returned an http.Request when Prepare failed") - } -} - -func TestNewPollingRequestClosesTheResponseBody(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - NewPollingRequest(resp, NullAuthorizer{}) - if resp.Body.(*mocks.Body).IsOpen() { - t.Error("autorest: NewPollingRequest failed to close the response body when creating a new request") - } -} - -func TestNewPollingRequestReturnsAGetRequest(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - if req.Method != "GET" { - t.Errorf("autorest: NewPollingRequest did not create an HTTP GET request -- actual method %v", req.Method) - } -} - -func TestNewPollingRequestProvidesTheURL(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - if req.URL.String() != mocks.TestURL { - t.Errorf("autorest: NewPollingRequest did not create an HTTP with the expected URL -- received %v, expected %v", req.URL, mocks.TestURL) - } -} - -func TestNewPollingRequestAppliesAuthorization(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - req, _ := NewPollingRequest(resp, mockAuthorizer{}) - if req.Header.Get(http.CanonicalHeaderKey(headerAuthorization)) != testAuthorizationHeader { - t.Errorf("autorest: NewPollingRequest did not apply authorization -- received %v, expected %v", - req.Header.Get(http.CanonicalHeaderKey(headerAuthorization)), testAuthorizationHeader) - } -} - -func TestGetPollingLocation(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - l := GetPollingLocation(resp) - if len(l) == 0 { - t.Errorf("autorest: GetPollingLocation failed to return Location header -- expected %v, received %v", mocks.TestURL, l) - } -} - -func TestGetPollingLocationReturnsEmptyStringForMissingLocation(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - - l := GetPollingLocation(resp) - if len(l) != 0 { - t.Errorf("autorest: GetPollingLocation return a value without a Location header -- received %v", l) - } -} - -func TestGetPollingDelay(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - d := GetPollingDelay(resp, DefaultPollingDelay) - if d != mocks.TestDelay { - t.Errorf("autorest: GetPollingDelay failed to returned the expected delay -- expected %v, received %v", mocks.TestDelay, d) - } -} - -func TestGetPollingDelayReturnsDefaultDelayIfRetryHeaderIsMissing(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - - d := GetPollingDelay(resp, DefaultPollingDelay) - if d != DefaultPollingDelay { - t.Errorf("autorest: GetPollingDelay failed to returned the default delay for a missing Retry-After header -- expected %v, received %v", - DefaultPollingDelay, d) - } -} - -func TestGetPollingDelayReturnsDefaultDelayIfRetryHeaderIsMalformed(t *testing.T) { - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - resp.Header.Set(http.CanonicalHeaderKey(headerRetryAfter), "a very bad non-integer value") - - d := GetPollingDelay(resp, DefaultPollingDelay) - if d != DefaultPollingDelay { - t.Errorf("autorest: GetPollingDelay failed to returned the default delay for a malformed Retry-After header -- expected %v, received %v", - DefaultPollingDelay, d) - } -} - -func TestPollForAttemptsStops(t *testing.T) { - client := mocks.NewSender() - client.EmitErrors(-1) - - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - - PollForAttempts(client, req, time.Duration(0), 5) - if client.Attempts() < 5 || client.Attempts() > 5 { - t.Errorf("autorest: PollForAttempts stopped incorrectly -- expected %v attempts, actual attempts were %v", 5, client.Attempts()) - } -} - -func TestPollForDurationsStops(t *testing.T) { - client := mocks.NewSender() - client.EmitErrors(-1) - - d := 10 * time.Millisecond - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - - start := time.Now() - PollForDuration(client, req, time.Duration(0), d) - if time.Now().Sub(start) < d { - t.Error("autorest: PollForDuration stopped too soon") - } -} - -func TestPollForDurationsStopsWithinReason(t *testing.T) { - client := mocks.NewSender() - client.EmitErrors(-1) - - d := 10 * time.Millisecond - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - mocks.SetRetryHeader(resp, d) - client.SetResponse(resp) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - - start := time.Now() - PollForDuration(client, req, time.Duration(0), d) - if time.Now().Sub(start) > (time.Duration(5.0) * d) { - t.Error("autorest: PollForDuration took too long to stop -- exceeded 5 times expected duration") - } -} - -func TestPollingHonorsDelay(t *testing.T) { - client := mocks.NewSender() - client.EmitErrors(-1) - - d1 := 10 * time.Millisecond - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - mocks.SetRetryHeader(resp, d1) - client.SetResponse(resp) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - - start := time.Now() - PollForAttempts(client, req, time.Duration(0), 2) - d2 := time.Now().Sub(start) - if d2 < d1 { - t.Errorf("autorest: Polling failed to honor delay -- expected %v, actual %v", d1.Seconds(), d2.Seconds()) - } -} - -func TestPollingReturnsErrorForExpectedStatusCode(t *testing.T) { - client := mocks.NewSender() - - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - client.SetResponse(resp) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - - resp, err := PollForAttempts(client, req, time.Duration(0), 1, http.StatusAccepted) - if err == nil { - t.Error("autorest: Polling failed to emit error for known status code") - } -} - -func TestPollingReturnsNoErrorForUnexpectedStatusCode(t *testing.T) { - client := mocks.NewSender() - client.EmitStatus("500 InternalServerError", http.StatusInternalServerError) - - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - - resp, err := PollForAttempts(client, req, time.Duration(0), 1, http.StatusAccepted) - if err != nil { - t.Error("autorest: Polling emitted error for unknown status code") - } -} - -func TestPollingReturnsDefaultsToAcceptedStatusCode(t *testing.T) { - client := mocks.NewSender() - - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - client.SetResponse(resp) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - - resp, err := PollForAttempts(client, req, time.Duration(0), 1) - if err == nil { - t.Error("autorest: Polling failed to default to HTTP 202") - } -} - -func TestPollingLeavesFinalBodyOpen(t *testing.T) { - client := mocks.NewSender() - client.EmitStatus("500 InternalServerError", http.StatusInternalServerError) - - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - - resp, _ = PollForAttempts(client, req, time.Duration(0), 1) - if !resp.Body.(*mocks.Body).IsOpen() { - t.Error("autorest: Polling unexpectedly closed the response body") - } -} - -func TestDecorateForPollingCloseBodyOnEachAttempt(t *testing.T) { - client := mocks.NewSender() - - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - client.SetResponse(resp) - - req, _ := NewPollingRequest(resp, NullAuthorizer{}) - resp, _ = PollForAttempts(client, req, time.Duration(0), 5) - if resp.Body.(*mocks.Body).CloseAttempts() < 5 { - t.Errorf("autorest: decorateForPolling failed to close the response Body between requests -- expected %v, received %v", - 5, resp.Body.(*mocks.Body).CloseAttempts()) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -/* -Package azure provides Azure-specific implementations used with AutoRest. - -See the included examples for more detail. -*/ -package azure - -import ( - "net/http" - "strconv" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" -) - -const ( - // HeaderClientID is the Azure extension header to set a user-specified request ID. - HeaderClientID = "x-ms-client-request-id" - - // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID - // should be included in the response. - HeaderReturnClientID = "x-ms-return-client-request-id" - - // HeaderRequestID is the Azure extension header of the service generated request ID returned - // in the response. - HeaderRequestID = "x-ms-request-id" -) - -// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id -// header to true such that UUID accompanies the http.Response. -func WithReturningClientID(uuid string) autorest.PrepareDecorator { - preparer := autorest.CreatePreparer( - WithClientID(uuid), - WithReturnClientID(true)) - - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - return preparer.Prepare(r) - }) - } -} - -// WithClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). -func WithClientID(uuid string) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderClientID, uuid) -} - -// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-return-client-request-id whose boolean value indicates if the value of the -// x-ms-client-request-id header should be included in the http.Response. -func WithReturnClientID(b bool) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) -} - -// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the -// http.Request sent to the service (and returned in the http.Response) -func ExtractClientID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderClientID, resp) -} - -// ExtractRequestID extracts the Azure server generated request identifier from the -// x-ms-request-id header. -func ExtractRequestID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderRequestID, resp) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -package azure - -import ( - "fmt" - "net/http" - "strconv" - "testing" - - . "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" -) - -// Use a Client Inspector to set the request identifier. -func ExampleWithClientID() { - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - req, _ := Prepare(&http.Request{}, - AsGet(), - WithBaseURL("https://microsoft.com/a/b/c/")) - - c := Client{Sender: mocks.NewSender()} - c.RequestInspector = WithReturningClientID(uuid) - - c.Send(req) - fmt.Printf("Inspector added the %s header with the value %s\n", - HeaderClientID, req.Header.Get(HeaderClientID)) - fmt.Printf("Inspector added the %s header with the value %s\n", - HeaderReturnClientID, req.Header.Get(HeaderReturnClientID)) - // Output: - // Inspector added the x-ms-client-request-id header with the value 71FDB9F4-5E49-4C12-B266-DE7B4FD999A6 - // Inspector added the x-ms-return-client-request-id header with the value true -} - -func TestWithReturningClientIDReturnsError(t *testing.T) { - var errIn error - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - _, errOut := Prepare(&http.Request{}, - withErrorPrepareDecorator(&errIn), - WithReturningClientID(uuid)) - - if errOut == nil || errIn != errOut { - t.Errorf("azure: WithReturningClientID failed to exit early when receiving an error -- expected (%v), received (%v)", - errIn, errOut) - } -} - -func TestWithClientID(t *testing.T) { - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - req, _ := Prepare(&http.Request{}, - WithClientID(uuid)) - - if req.Header.Get(HeaderClientID) != uuid { - t.Errorf("azure: WithClientID failed to set %s -- expected %s, received %s", - HeaderClientID, uuid, req.Header.Get(HeaderClientID)) - } -} - -func TestWithReturnClientID(t *testing.T) { - b := false - req, _ := Prepare(&http.Request{}, - WithReturnClientID(b)) - - if req.Header.Get(HeaderReturnClientID) != strconv.FormatBool(b) { - t.Errorf("azure: WithReturnClientID failed to set %s -- expected %s, received %s", - HeaderClientID, strconv.FormatBool(b), req.Header.Get(HeaderClientID)) - } -} - -func TestExtractClientID(t *testing.T) { - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - resp := mocks.NewResponse() - mocks.SetResponseHeader(resp, HeaderClientID, uuid) - - if ExtractClientID(resp) != uuid { - t.Errorf("azure: ExtractClientID failed to extract the %s -- expected %s, received %s", - HeaderClientID, uuid, ExtractClientID(resp)) - } -} - -func TestExtractRequestID(t *testing.T) { - uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" - resp := mocks.NewResponse() - mocks.SetResponseHeader(resp, HeaderRequestID, uuid) - - if ExtractRequestID(resp) != uuid { - t.Errorf("azure: ExtractRequestID failed to extract the %s -- expected %s, received %s", - HeaderRequestID, uuid, ExtractRequestID(resp)) - } -} - -func withErrorPrepareDecorator(e *error) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - *e = fmt.Errorf("autorest: Faux Prepare Error") - return r, *e - }) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/main.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/main.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/main.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/main.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,107 +0,0 @@ -package main - -import ( - "flag" - "io/ioutil" - "log" - "net/http" - "strings" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure" -) - -const resourceGroupURLTemplate = "https://management.azure.com/subscriptions/{subscription-id}/resourcegroups" -const apiVersion = "2015-01-01" - -var ( - certificatePath string - applicationID string - tenantID string - subscriptionID string -) - -func init() { - flag.StringVar(&certificatePath, "certificatePath", "", "path to pk12/pfx certificate") - flag.StringVar(&applicationID, "applicationId", "", "application id") - flag.StringVar(&tenantID, "tenantId", "", "tenant id") - flag.StringVar(&subscriptionID, "subscriptionId", "", "subscription id") - flag.Parse() - - log.Println("Using these settings:") - log.Println("* certificatePath:", certificatePath) - log.Println("* applicationID:", applicationID) - log.Println("* tenantID:", tenantID) - log.Println("* subscriptionID:", subscriptionID) - - if strings.Trim(certificatePath, " ") == "" || - strings.Trim(applicationID, " ") == "" || - strings.Trim(tenantID, " ") == "" || - strings.Trim(subscriptionID, " ") == "" { - log.Fatalln("Bad usage. Please specify all four parameters") - } -} - -func main() { - log.Println("loading certificate... ") - certData, err := ioutil.ReadFile(certificatePath) - if err != nil { - log.Fatalln("failed", err) - } - - log.Println("retrieve oauth token... ") - spt, err := azure.NewServicePrincipalTokenFromCertificate( - applicationID, - certData, - "", - tenantID, - azure.AzureResourceManagerScope) - if err != nil { - log.Fatalln("failed", err) - panic(err) - } - - client := &autorest.Client{} - client.Authorizer = spt - - log.Println("querying the list of resource groups... ") - groupsAsString, err := getResourceGroups(client) - if err != nil { - log.Fatalln("failed", err) - } - - log.Println("") - log.Println("Groups:", *groupsAsString) -} - -func getResourceGroups(client *autorest.Client) (*string, error) { - var p map[string]interface{} - var req *http.Request - p = map[string]interface{}{ - "subscription-id": subscriptionID, - } - q := map[string]interface{}{ - "api-version": apiVersion, - } - - req, _ = autorest.Prepare(&http.Request{}, - autorest.AsGet(), - autorest.WithBaseURL(resourceGroupURLTemplate), - autorest.WithPathParameters(p), - autorest.WithQueryParameters(q)) - - resp, err := client.Send(req, http.StatusOK) - if err != nil { - return nil, err - } - - defer resp.Body.Close() - contents, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - contentsString := string(contents) - - return &contentsString, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/README.md juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/README.md --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/README.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/example/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,115 +0,0 @@ -# autorest azure example - -## Usage - -This example covers how to make an authenticated call to the Azure Resource Manager APIs, using certificate-based authentication. - -0. Export some required variables - - ``` - export SUBSCRIPTION_ID="aff271ee-e9be-4441-b9bb-42f5af4cbaeb" - export TENANT_ID="13de0a15-b5db-44b9-b682-b4ba82afbd29" - export RESOURCE_GROUP="someresourcegroup" - ``` - - * replace both values with your own - -1. Create a private key - - ``` - openssl genrsa -out "example.key" 2048 - ``` - - - -2. Create the certificate - - ``` - openssl req -new -key "example.key" -subj "/CN=example" -out "example.csr" - - openssl x509 -req -in "example.csr" -signkey "example.key" -out "example.crt" -days 10000 - ``` - - - -3. Create the PKCS12 version of the certificate (with no password) - - ``` - openssl pkcs12 -export -out "example.pfx" -inkey "example.key" -in "example.crt" -passout pass: - ``` - - - -4. Register a new Azure AD Application with the certificate contents - - ``` - certificateContents="$(tail -n+2 "example.key" | head -n-1)" - - azure ad app create \ - --name "example-azuread-app" \ - --home-page="http://example-azuread-app/home" \ - --identifier-uris "http://example-azuread-app/app" \ - --key-usage "Verify" \ - --end-date "2020-01-01" \ - --key-value "${certificateContents}" - ``` - - - -5. Create a new service principal using the "Application Id" from the previous step - - ``` - azure ad sp create "APPLICATION_ID" - ``` - - * Replace APPLICATION_ID with the "Application Id" returned in step 4 - - - -6. Grant your service principal necessary permissions - - ``` - azure role assignment create \ - --resource-group "${RESOURCE_GROUP}" \ - --roleName "Contributor" \ - --subscription "${SUBSCRIPTION_ID}" \ - --spn "http://example-azuread-app/app" - ``` - - * Replace SUBSCRIPTION_ID with your subscription id - * Replace RESOURCE_GROUP with the resource group for the assignment - * Ensure that the `spn` parameter matches an `identifier-url` from Step 4 - - - -7. Run this example app to see your resource groups - - ``` - go run main.go \ - --tenantId="${TENANT_ID}" \ - --subscriptionId="${SUBSCRIPTION_ID}" \ - --applicationId="http://example-azuread-app/app" \ - --certificatePath="certificate.pfx" - ``` - - -You should see something like this as output: - -``` -2015/11/08 18:28:39 Using these settings: -2015/11/08 18:28:39 * certificatePath: certificate.pfx -2015/11/08 18:28:39 * applicationID: http://example-azuread-app/app -2015/11/08 18:28:39 * tenantID: 13de0a15-b5db-44b9-b682-b4ba82afbd29 -2015/11/08 18:28:39 * subscriptionID: aff271ee-e9be-4441-b9bb-42f5af4cbaeb -2015/11/08 18:28:39 loading certificate... -2015/11/08 18:28:39 retrieve oauth token... -2015/11/08 18:28:39 querying the list of resource groups... -2015/11/08 18:28:50 -2015/11/08 18:28:50 Groups: {"value":[{"id":"/subscriptions/aff271ee-e9be-4441-b9bb-42f5af4cbaeb/resourceGroups/kube-66f30810","name":"kube-66f30810","location":"westus","tags":{},"properties":{"provisioningState":"Succeeded"}}]} -``` - - - -## Notes - -You may need to wait sometime between executing step 4, step 5 and step 6. If you issue those requests too quickly, you might hit an AD server that is not consistent with the server where the resource was created. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,308 +0,0 @@ -package azure - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "strconv" - "time" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12" -) - -const ( - defaultRefresh = 5 * time.Minute - oauthURL = "https://login.microsoftonline.com/{tenantID}/oauth2/{requestType}?api-version=1.0" - tokenBaseDate = "1970-01-01T00:00:00Z" - - jwtAudienceTemplate = "https://login.microsoftonline.com/%s/oauth2/token" - - // AzureResourceManagerScope is the OAuth scope for the Azure Resource Manager. - AzureResourceManagerScope = "https://management.azure.com/" -) - -var expirationBase time.Time - -func init() { - expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) -} - -// Token encapsulates the access token used to authorize Azure requests. -type Token struct { - AccessToken string `json:"access_token"` - - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` -} - -// Expires returns the time.Time when the Token expires. -func (t Token) Expires() time.Time { - s, err := strconv.Atoi(t.ExpiresOn) - if err != nil { - s = -3600 - } - return expirationBase.Add(time.Duration(s) * time.Second).UTC() -} - -// IsExpired returns true if the Token is expired, false otherwise. -func (t Token) IsExpired() bool { - return t.WillExpireIn(0) -} - -// WillExpireIn returns true if the Token will expire after the passed time.Duration interval -// from now, false otherwise. -func (t Token) WillExpireIn(d time.Duration) bool { - return !t.Expires().After(time.Now().Add(d)) -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the AccessToken of the Token. -func (t *Token) WithAuthorization() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r) - }) - } -} - -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error -} - -// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. -type ServicePrincipalTokenSecret struct { - ClientSecret string -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalTokenSecret. -// It will populate the form submitted during oAuth Token Acquisition using the client_secret. -func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("client_secret", tokenSecret.ClientSecret) - return nil -} - -// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for certificate auth with signed JWTs. -type ServicePrincipalCertificateSecret struct { - Pkcs12 []byte - Password string -} - -// SignJwt returns the JWT signed with the certificate's private key. -func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { - privateKey, cert, err := pkcs12.Decode(secret.Pkcs12, secret.Password) - if err != nil { - return "", err - } - - rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) - if !isRsaKey { - return "", fmt.Errorf("PKCS12 certificate must contain an RSA private key") - } - - hasher := sha1.New() - _, err = hasher.Write(cert.Raw) - if err != nil { - return "", err - } - - thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - - // The jti (JWT ID) claim provides a unique identifier for the JWT. - jti := make([]byte, 20) - _, err = rand.Read(jti) - if err != nil { - return "", err - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Header["x5t"] = thumbprint - token.Claims = map[string]interface{}{ - "aud": fmt.Sprintf(jwtAudienceTemplate, spt.tenantID), - "iss": spt.clientID, - "sub": spt.clientID, - "jti": base64.URLEncoding.EncodeToString(jti), - "nbf": time.Now().Unix(), - "exp": time.Now().Add(time.Hour * 24).Unix(), - } - - signedString, err := token.SignedString(rsaPrivateKey) - return signedString, nil -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalTokenSecret. -// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. -func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.SignJwt(spt) - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// ServicePrincipalToken encapsulates a Token created for a Service Principal. -type ServicePrincipalToken struct { - Token - - secret ServicePrincipalSecret - clientID string - tenantID string - resource string - autoRefresh bool - refreshWithin time.Duration - sender autorest.Sender -} - -// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. -func NewServicePrincipalTokenWithSecret(id string, tenantID string, resource string, secret ServicePrincipalSecret) (*ServicePrincipalToken, error) { - spt := &ServicePrincipalToken{ - secret: secret, - clientID: id, - resource: resource, - tenantID: tenantID, - autoRefresh: true, - refreshWithin: defaultRefresh, - sender: &http.Client{}, - } - return spt, nil -} - -// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal -// credentials scoped to the named resource. -func NewServicePrincipalToken(id string, secret string, tenantID string, resource string) (*ServicePrincipalToken, error) { - return NewServicePrincipalTokenWithSecret( - id, - tenantID, - resource, - &ServicePrincipalTokenSecret{ - ClientSecret: secret, - }, - ) -} - -// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. -func NewServicePrincipalTokenFromCertificate(id string, pkcs12 []byte, password string, tenantID string, resource string) (*ServicePrincipalToken, error) { - return NewServicePrincipalTokenWithSecret( - id, - tenantID, - resource, - &ServicePrincipalCertificateSecret{ - Pkcs12: pkcs12, - Password: password, - }, - ) -} - -// EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin). -func (spt *ServicePrincipalToken) EnsureFresh() error { - if spt.WillExpireIn(spt.refreshWithin) { - return spt.Refresh() - } - return nil -} - -// Refresh obtains a fresh token for the Service Principal. -func (spt *ServicePrincipalToken) Refresh() error { - p := map[string]interface{}{ - "tenantID": spt.tenantID, - "requestType": "token", - } - - v := url.Values{} - v.Set("client_id", spt.clientID) - v.Set("grant_type", "client_credentials") - v.Set("resource", spt.resource) - - err := spt.secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - - req, err := autorest.Prepare(&http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(oauthURL), - autorest.WithPathParameters(p), - autorest.WithFormData(v)) - if err != nil { - return err - } - - resp, err := autorest.SendWithSender(spt.sender, req) - if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "Refresh", "Failure sending request for Service Principal %s", - spt.clientID) - } - - var newToken Token - - err = autorest.Respond(resp, - autorest.WithErrorUnlessOK(), - autorest.ByUnmarshallingJSON(&newToken), - autorest.ByClosing()) - if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "Refresh", "Failure handling response to Service Principal %s request", - spt.clientID) - } - - spt.Token = newToken - - return nil -} - -// SetAutoRefresh enables or disables automatic refreshing of stale tokens. -func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.autoRefresh = autoRefresh -} - -// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will -// refresh the token. -func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.refreshWithin = d - return -} - -// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An -// undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) { - spt.sender = s -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken. -// -// By default, the token will automatically refresh if nearly expired (as determined by the -// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing -// tokens. -func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - if spt.autoRefresh { - err := spt.EnsureFresh() - if err != nil { - return r, autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "WithAuthorization", "Failed to refresh Service Principal Token for request to %s", - r.URL) - } - } - return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r) - }) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,384 +0,0 @@ -package azure - -import ( - "fmt" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "testing" - "time" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" -) - -const ( - defaultFormData = "client_id=id&client_secret=secret&grant_type=client_credentials&resource=resource" -) - -func TestTokenExpires(t *testing.T) { - tt := time.Now().Add(5 * time.Second) - tk := newTokenExpiresAt(tt) - - if tk.Expires().Equal(tt) { - t.Errorf("azure: Token#Expires miscalculated expiration time -- received %v, expected %v", tk.Expires(), tt) - } -} - -func TestTokenIsExpired(t *testing.T) { - tk := newTokenExpiresAt(time.Now().Add(-5 * time.Second)) - - if !tk.IsExpired() { - t.Errorf("azure: Token#IsExpired failed to mark a stale token as expired -- now %v, token expires at %v", - time.Now().UTC(), tk.Expires()) - } -} - -func TestTokenIsExpiredUninitialized(t *testing.T) { - tk := &Token{} - - if !tk.IsExpired() { - t.Errorf("azure: An uninitialized Token failed to mark itself as expired (expiration time %v)", tk.Expires()) - } -} - -func TestTokenIsNoExpired(t *testing.T) { - tk := newTokenExpiresAt(time.Now().Add(1000 * time.Second)) - - if tk.IsExpired() { - t.Errorf("azure: Token marked a fresh token as expired -- now %v, token expires at %v", time.Now().UTC(), tk.Expires()) - } -} - -func TestTokenWillExpireIn(t *testing.T) { - d := 5 * time.Second - tk := newTokenExpiresIn(d) - - if !tk.WillExpireIn(d) { - t.Error("azure: Token#WillExpireIn mismeasured expiration time") - } -} - -func TestTokenWithAuthorization(t *testing.T) { - tk := newToken() - - req, err := autorest.Prepare(&http.Request{}, tk.WithAuthorization()) - if err != nil { - t.Errorf("azure: Token#WithAuthorization returned an error (%v)", err) - } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", tk.AccessToken) { - t.Error("azure: Token#WithAuthorization failed to set Authorization header") - } -} - -func TestServicePrincipalTokenSetAutoRefresh(t *testing.T) { - spt := newServicePrincipalToken() - - if !spt.autoRefresh { - t.Error("azure: ServicePrincipalToken did not default to automatic token refreshing") - } - - spt.SetAutoRefresh(false) - if spt.autoRefresh { - t.Error("azure: ServicePrincipalToken#SetAutoRefresh did not disable automatic token refreshing") - } -} - -func TestServicePrincipalTokenSetRefreshWithin(t *testing.T) { - spt := newServicePrincipalToken() - - if spt.refreshWithin != defaultRefresh { - t.Error("azure: ServicePrincipalToken did not correctly set the default refresh interval") - } - - spt.SetRefreshWithin(2 * defaultRefresh) - if spt.refreshWithin != 2*defaultRefresh { - t.Error("azure: ServicePrincipalToken#SetRefreshWithin did not set the refresh interval") - } -} - -func TestServicePrincipalTokenSetSender(t *testing.T) { - spt := newServicePrincipalToken() - - var s autorest.Sender - s = mocks.NewSender() - spt.SetSender(s) - if !reflect.DeepEqual(s, spt.sender) { - t.Error("azure: ServicePrincipalToken#SetSender did not set the sender") - } -} - -func TestServicePrincipalTokenRefreshUsesPOST(t *testing.T) { - spt := newServicePrincipalToken() - - c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - if r.Method != "POST" { - t.Errorf("azure: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method) - } - return mocks.NewResponse(), nil - }) - } - })()) - spt.SetSender(s) - spt.Refresh() -} - -func TestServicePrincipalTokenRefreshSetsMimeType(t *testing.T) { - spt := newServicePrincipalToken() - - c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - if r.Header.Get(http.CanonicalHeaderKey("Content-Type")) != "application/x-www-form-urlencoded" { - t.Errorf("azure: ServicePrincipalToken#Refresh did not correctly set Content-Type -- expected %v, received %v", - "application/x-form-urlencoded", - r.Header.Get(http.CanonicalHeaderKey("Content-Type"))) - } - return mocks.NewResponse(), nil - }) - } - })()) - spt.SetSender(s) - spt.Refresh() -} - -func TestServicePrincipalTokenRefreshSetsURL(t *testing.T) { - spt := newServicePrincipalToken() - - c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - u := fmt.Sprintf("https://login.microsoftonline.com/%s/oauth2/token?api-version=1.0", spt.tenantID) - if r.URL.String() != u { - t.Errorf("azure: ServicePrincipalToken#Refresh did not correctly set the URL -- expected %v, received %v", - u, r.URL) - } - return mocks.NewResponse(), nil - }) - } - })()) - spt.SetSender(s) - spt.Refresh() -} - -func TestServicePrincipalTokenRefreshSetsBody(t *testing.T) { - spt := newServicePrincipalToken() - - c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("azure: Failed to read body of Service Principal token request (%v)", err) - } else if string(b) != defaultFormData { - t.Errorf("azure: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", - defaultFormData, string(b)) - } - return mocks.NewResponse(), nil - }) - } - })()) - spt.SetSender(s) - spt.Refresh() -} - -func TestServicePrincipalTokenRefreshClosesRequestBody(t *testing.T) { - spt := newServicePrincipalToken() - - resp := mocks.NewResponse() - c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - return resp, nil - }) - } - })()) - spt.SetSender(s) - spt.Refresh() - - if resp.Body.(*mocks.Body).IsOpen() { - t.Error("azure: ServicePrincipalToken#Refresh failed to close the HTTP Response Body") - } -} - -func TestServicePrincipalTokenRefreshPropagatesErrors(t *testing.T) { - spt := newServicePrincipalToken() - - c := mocks.NewSender() - c.EmitErrors(1) - spt.SetSender(c) - - err := spt.Refresh() - if err == nil { - t.Error("azure: Failed to propagate the request error") - } -} - -func TestServicePrincipalTokenRefreshReturnsErrorIfNotOk(t *testing.T) { - spt := newServicePrincipalToken() - - c := mocks.NewSender() - c.EmitStatus("401 NotAuthorized", 401) - spt.SetSender(c) - - err := spt.Refresh() - if err == nil { - t.Error("azure: Failed to return an when receiving a status code other than HTTP 200") - } -} - -func TestServicePrincipalTokenRefreshUnmarshals(t *testing.T) { - spt := newServicePrincipalToken() - - expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(expirationBase).Seconds())) - j := newTokenJSON(expiresOn, "resource") - resp := mocks.NewResponseWithContent(j) - c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - return resp, nil - }) - } - })()) - spt.SetSender(s) - - err := spt.Refresh() - if err != nil { - t.Errorf("azure: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) - } else if spt.AccessToken != "accessToken" || - spt.ExpiresIn != "3600" || - spt.ExpiresOn != expiresOn || - spt.NotBefore != expiresOn || - spt.Resource != "resource" || - spt.Type != "Bearer" { - t.Errorf("azure: ServicePrincipalToken#Refresh failed correctly unmarshal the JSON -- expected %v, received %v", - j, *spt) - } -} - -func TestServicePrincipalTokenEnsureFreshRefreshes(t *testing.T) { - spt := newServicePrincipalToken() - expireToken(&spt.Token) - - f := false - c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - f = true - return mocks.NewResponse(), nil - }) - } - })()) - spt.SetSender(s) - spt.EnsureFresh() - if !f { - t.Error("azure: ServicePrincipalToken#EnsureFresh failed to call Refresh for stale token") - } -} - -func TestServicePrincipalTokenEnsureFreshSkipsIfFresh(t *testing.T) { - spt := newServicePrincipalToken() - setTokenToExpireIn(&spt.Token, 1000*time.Second) - - f := false - c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - f = true - return mocks.NewResponse(), nil - }) - } - })()) - spt.SetSender(s) - spt.EnsureFresh() - if f { - t.Error("azure: ServicePrincipalToken#EnsureFresh invoked Refresh for fresh token") - } -} - -func TestServicePrincipalTokenWithAuthorization(t *testing.T) { - spt := newServicePrincipalToken() - setTokenToExpireIn(&spt.Token, 1000*time.Second) - - req, err := autorest.Prepare(&http.Request{}, spt.WithAuthorization()) - if err != nil { - t.Errorf("azure: ServicePrincipalToken#WithAuthorization returned an error (%v)", err) - } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) { - t.Error("azure: ServicePrincipalToken#WithAuthorization failed to set Authorization header") - } -} - -func TestServicePrincipalTokenWithAuthorizationReturnsErrorIfCannotRefresh(t *testing.T) { - spt := newServicePrincipalToken() - - _, err := autorest.Prepare(&http.Request{}, spt.WithAuthorization()) - if err == nil { - t.Error("azure: ServicePrincipalToken#WithAuthorization failed to return an error when refresh fails") - } -} - -func newToken() *Token { - return &Token{ - AccessToken: "ASECRETVALUE", - Resource: "https://azure.microsoft.com/", - Type: "Bearer", - } -} - -func newTokenJSON(expiresOn string, resource string) string { - return fmt.Sprintf(`{ - "access_token" : "accessToken", - "expires_in" : "3600", - "expires_on" : "%s", - "not_before" : "%s", - "resource" : "%s", - "token_type" : "Bearer" - }`, - expiresOn, expiresOn, resource) -} - -func newTokenExpiresIn(expireIn time.Duration) *Token { - return setTokenToExpireIn(newToken(), expireIn) -} - -func newTokenExpiresAt(expireAt time.Time) *Token { - return setTokenToExpireAt(newToken(), expireAt) -} - -func expireToken(t *Token) *Token { - return setTokenToExpireIn(t, 0) -} - -func setTokenToExpireAt(t *Token, expireAt time.Time) *Token { - t.ExpiresIn = "3600" - t.ExpiresOn = strconv.Itoa(int(expireAt.Sub(expirationBase).Seconds())) - t.NotBefore = t.ExpiresOn - return t -} - -func setTokenToExpireIn(t *Token, expireIn time.Duration) *Token { - return setTokenToExpireAt(t, time.Now().Add(expireIn)) -} - -func newServicePrincipalToken() *ServicePrincipalToken { - spt, _ := NewServicePrincipalToken("id", "secret", "tenentId", "resource") - return spt -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,288 +0,0 @@ -package autorest - -import ( - "bytes" - "io" - "io/ioutil" - "log" - "net/http" - "time" -) - -const ( - // DefaultPollingDelay is the default delay between polling requests (only used if the - // http.Request lacks a well-formed Retry-After header). - DefaultPollingDelay = 60 * time.Second - - // DefaultPollingDuration is the default total polling duration. - DefaultPollingDuration = 15 * time.Minute -) - -// PollingMode sets how, if at all, clients composed with Client will poll. -type PollingMode string - -const ( - // PollUntilAttempts polling mode polls until reaching a maximum number of attempts. - PollUntilAttempts PollingMode = "poll-until-attempts" - - // PollUntilDuration polling mode polls until a specified time.Duration has passed. - PollUntilDuration PollingMode = "poll-until-duration" - - // DoNotPoll disables polling. - DoNotPoll PollingMode = "not-at-all" -) - -const ( - requestFormat = `HTTP Request Begin =================================================== -%s -===================================================== HTTP Request End -` - responseFormat = `HTTP Response Begin =================================================== -%s -===================================================== HTTP Response End -` -) - -// LoggingInspector implements request and response inspectors that log the full request and -// response to a supplied log. -type LoggingInspector struct { - Logger *log.Logger -} - -// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - var body, b bytes.Buffer - - defer r.Body.Close() - - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) - r.Write(&b) - - li.Logger.Printf(requestFormat, b.String()) - - r.Body = ioutil.NopCloser(&body) - return p.Prepare(r) - }) - } -} - -// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - var body, b bytes.Buffer - - defer resp.Body.Close() - - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) - resp.Write(&b) - - li.Logger.Printf(responseFormat, b.String()) - - resp.Body = ioutil.NopCloser(&body) - return r.Respond(resp) - }) - } -} - -var ( - // DefaultClient is the base from which generated clients should create a Client instance. Users - // can then established widely used Client defaults by replacing or modifying the DefaultClient - // before instantiating a generated client. - DefaultClient = Client{PollingMode: PollUntilDuration, PollingDuration: DefaultPollingDuration} -) - -// Client is the base for autorest generated clients. It provides default, "do nothing" -// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the -// standard, undecorated http.Client as a default Sender. Lastly, it supports basic request polling, -// limited to a maximum number of attempts or a specified duration. -// -// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and -// return responses that compose with Response. -// -// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom -// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit -// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence -// sending the request by providing a decorated Sender. -type Client struct { - Authorizer Authorizer - Sender Sender - RequestInspector PrepareDecorator - ResponseInspector RespondDecorator - - PollingMode PollingMode - PollingAttempts int - PollingDuration time.Duration - - // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent - // through the Do method. - UserAgent string -} - -// NewClientWithUserAgent returns an instance of the DefaultClient with the UserAgent set to the -// passed string. -func NewClientWithUserAgent(ua string) Client { - c := DefaultClient - c.UserAgent = ua - return c -} - -// IsPollingAllowed returns an error if the client allows polling and the passed http.Response -// requires it, otherwise it returns nil. -func (c Client) IsPollingAllowed(resp *http.Response, codes ...int) error { - if c.DoNotPoll() && ResponseRequiresPolling(resp, codes...) { - return NewError("autorest/Client", "IsPollingAllowed", "Response to %s requires polling but polling is disabled", - resp.Request.URL) - } - return nil -} - -// PollAsNeeded is a convenience method that will poll if the passed http.Response requires it. -func (c Client) PollAsNeeded(resp *http.Response, codes ...int) (*http.Response, error) { - if !ResponseRequiresPolling(resp, codes...) { - return resp, nil - } - - if c.DoNotPoll() { - return resp, NewError("autorest/Client", "PollAsNeeded", "Polling for %s is required, but polling is disabled", - resp.Request.URL) - } - - req, err := NewPollingRequest(resp, c) - if err != nil { - return resp, NewErrorWithError(err, "autorest/Client", "PollAsNeeded", "Unable to create polling request for response to %s", - resp.Request.URL) - } - - Prepare(req, - c.WithInspection()) - - if c.PollForAttempts() { - return PollForAttempts(c, req, DefaultPollingDelay, c.PollingAttempts, codes...) - } - return PollForDuration(c, req, DefaultPollingDelay, c.PollingDuration, codes...) -} - -// DoNotPoll returns true if the client should not poll, false otherwise. -func (c Client) DoNotPoll() bool { - return len(c.PollingMode) == 0 || c.PollingMode == DoNotPoll -} - -// PollForAttempts returns true if the PollingMode is set to ForAttempts, false otherwise. -func (c Client) PollForAttempts() bool { - return c.PollingMode == PollUntilAttempts -} - -// PollForDuration return true if the PollingMode is set to ForDuration, false otherwise. -func (c Client) PollForDuration() bool { - return c.PollingMode == PollUntilDuration -} - -// Send sends the passed http.Request after applying authorization. It will poll if the client -// allows polling and the http.Response status code requires it. It will close the http.Response -// Body if the request returns an error. -func (c Client) Send(req *http.Request, codes ...int) (*http.Response, error) { - if len(codes) == 0 { - codes = []int{http.StatusOK} - } - - req, err := Prepare(req, - c.WithAuthorization(), - c.WithInspection()) - if err != nil { - return nil, NewErrorWithError(err, "autorest/Client", "Send", "Preparing request failed") - } - - resp, err := SendWithSender(c, req, - DoErrorUnlessStatusCode(codes...)) - if err == nil { - err = c.IsPollingAllowed(resp) - if err == nil { - resp, err = c.PollAsNeeded(resp) - } - } - - if err != nil { - Respond(resp, - ByClosing()) - } - - return resp, err -} - -// Do implements the Sender interface by invoking the active Sender. If Sender is not set, it uses -// a new instance of http.Client. In both cases it will, if UserAgent is set, apply set the -// User-Agent header. -func (c Client) Do(r *http.Request) (*http.Response, error) { - if len(c.UserAgent) > 0 { - r, _ = Prepare(r, WithUserAgent(c.UserAgent)) - } - return c.sender().Do(r) -} - -// sender returns the Sender to which to send requests. -func (c Client) sender() Sender { - if c.Sender == nil { - return &http.Client{} - } - return c.Sender -} - -// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator -// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. -func (c Client) WithAuthorization() PrepareDecorator { - return c.authorizer().WithAuthorization() -} - -// authorizer returns the Authorizer to use. -func (c Client) authorizer() Authorizer { - if c.Authorizer == nil { - return NullAuthorizer{} - } - return c.Authorizer -} - -// WithInspection is a convenience method that passes the request to the supplied RequestInspector, -// if present, or returns the WithNothing PrepareDecorator otherwise. -func (c Client) WithInspection() PrepareDecorator { - if c.RequestInspector == nil { - return WithNothing() - } - return c.RequestInspector -} - -// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, -// if present, or returns the ByIgnoring RespondDecorator otherwise. -func (c Client) ByInspecting() RespondDecorator { - if c.ResponseInspector == nil { - return ByIgnoring() - } - return c.ResponseInspector -} - -// Response serves as the base for all responses from generated clients. It provides access to the -// last http.Response. -type Response struct { - *http.Response `json:"-"` -} - -// GetPollingDelay extracts the polling delay from the Retry-After header of the response. If -// the header is absent or is malformed, it will return the supplied default delay time.Duration. -func (r Response) GetPollingDelay(defaultDelay time.Duration) time.Duration { - return GetPollingDelay(r.Response, defaultDelay) -} - -// GetPollingLocation retrieves the polling URL from the Location header of the response. -func (r Response) GetPollingLocation() string { - return GetPollingLocation(r.Response) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,621 +0,0 @@ -package autorest - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "net/http" - "reflect" - "testing" - "time" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" -) - -func TestLoggingInspectorWithInspection(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.RequestInspector = li.WithInspection() - - Prepare(mocks.NewRequestWithContent("Content"), - c.WithInspection()) - - if len(b.String()) <= 0 { - t.Error("autorest: LoggingInspector#WithInspection did not record Request to the log") - } -} - -func TestLoggingInspectorWithInspectionEmitsErrors(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - r := mocks.NewRequestWithContent("Content") - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.RequestInspector = li.WithInspection() - - r.Body.Close() - Prepare(r, - c.WithInspection()) - - if len(b.String()) <= 0 { - t.Error("autorest: LoggingInspector#WithInspection did not record Request to the log") - } -} - -func TestLoggingInspectorWithInspectionRestoresBody(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - r := mocks.NewRequestWithContent("Content") - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.RequestInspector = li.WithInspection() - - Prepare(r, - c.WithInspection()) - - s, _ := ioutil.ReadAll(r.Body) - if len(s) <= 0 { - t.Error("autorest: LoggingInspector#WithInspection did not restore the Request body") - } -} - -func TestLoggingInspectorByInspecting(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.ResponseInspector = li.ByInspecting() - - Respond(mocks.NewResponseWithContent("Content"), - c.ByInspecting()) - - if len(b.String()) <= 0 { - t.Error("autorest: LoggingInspector#ByInspection did not record Response to the log") - } -} - -func TestLoggingInspectorByInspectingEmitsErrors(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - r := mocks.NewResponseWithContent("Content") - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.ResponseInspector = li.ByInspecting() - - r.Body.Close() - Respond(r, - c.ByInspecting()) - - if len(b.String()) <= 0 { - t.Error("autorest: LoggingInspector#ByInspection did not record Response to the log") - } -} - -func TestLoggingInspectorByInspectingRestoresBody(t *testing.T) { - b := bytes.Buffer{} - c := Client{} - r := mocks.NewResponseWithContent("Content") - li := LoggingInspector{Logger: log.New(&b, "", 0)} - c.ResponseInspector = li.ByInspecting() - - Respond(r, - c.ByInspecting()) - - s, _ := ioutil.ReadAll(r.Body) - if len(s) <= 0 { - t.Error("autorest: LoggingInspector#ByInspecting did not restore the Response body") - } -} - -func TestNewClientWithUserAgent(t *testing.T) { - ua := "UserAgent" - c := NewClientWithUserAgent(ua) - - if c.UserAgent != ua { - t.Errorf("autorest: NewClientWithUserAgent failed to set the UserAgent -- expected %s, received %s", - ua, c.UserAgent) - } -} - -func TestClientIsPollingAllowed(t *testing.T) { - c := Client{PollingMode: PollUntilAttempts} - r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - - err := c.IsPollingAllowed(r) - if err != nil { - t.Errorf("autorest: Client#IsPollingAllowed returned an error for an http.Response that requires polling (%v)", err) - } -} - -func TestClientIsPollingAllowedIgnoresOk(t *testing.T) { - c := Client{PollingMode: PollUntilAttempts} - r := mocks.NewResponse() - - err := c.IsPollingAllowed(r) - if err != nil { - t.Error("autorest: Client#IsPollingAllowed returned an error for an http.Response that does not require polling") - } -} - -func TestClientIsPollingAllowedIgnoresDisabledForIgnoredStatusCode(t *testing.T) { - c := Client{PollingMode: PollUntilAttempts} - r := mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest) - - err := c.IsPollingAllowed(r) - if err != nil { - t.Errorf("autorest: Client#IsPollingAllowed returned an error for an http.Response that requires polling (%v)", err) - } -} - -func TestClientIsPollingAllowedIgnoredPollingMode(t *testing.T) { - c := Client{PollingMode: DoNotPoll} - r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - - err := c.IsPollingAllowed(r) - if err == nil { - t.Error("autorest: Client#IsPollingAllowed failed to return an error when polling is disabled") - } -} - -func TestClientPollAsNeededIgnoresOk(t *testing.T) { - c := Client{} - s := mocks.NewSender() - c.Sender = s - r := mocks.NewResponse() - - resp, err := c.PollAsNeeded(r) - if err != nil { - t.Errorf("autorest: Client#PollAsNeeded failed when given a successful HTTP request (%v)", err) - } - if s.Attempts() > 0 { - t.Error("autorest: Client#PollAsNeeded attempted to poll a successful HTTP request") - } - - Respond(resp, - ByClosing()) -} - -func TestClientPollAsNeededLeavesBodyOpen(t *testing.T) { - c := Client{} - c.Sender = mocks.NewSender() - r := mocks.NewResponse() - - resp, err := c.PollAsNeeded(r) - if err != nil { - t.Errorf("autorest: Client#PollAsNeeded failed when given a successful HTTP request (%v)", err) - } - if !resp.Body.(*mocks.Body).IsOpen() { - t.Error("autorest: Client#PollAsNeeded unexpectedly closed the response body") - } - - Respond(resp, - ByClosing()) -} - -func TestClientPollAsNeededReturnsErrorWhenPollingDisabled(t *testing.T) { - c := Client{} - c.Sender = mocks.NewSender() - c.PollingMode = DoNotPoll - - r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(r) - - _, err := c.PollAsNeeded(r) - if err == nil { - t.Error("autorest: Client#PollAsNeeded failed to return an error when polling was disabled but required") - } - - Respond(r, - ByClosing()) -} - -func TestClientPollAsNeededAllowsInspectionOfRequest(t *testing.T) { - c := Client{} - c.Sender = mocks.NewSender() - c.PollingMode = PollUntilAttempts - c.PollingAttempts = 1 - - mi := &mockInspector{} - c.RequestInspector = mi.WithInspection() - - r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(r) - - c.PollAsNeeded(r) - if !mi.wasInvoked { - t.Error("autorest: Client#PollAsNeeded failed to allow inspection of polling request") - } - - Respond(r, - ByClosing()) -} - -func TestClientPollAsNeededReturnsErrorIfUnableToCreateRequest(t *testing.T) { - c := Client{} - c.Authorizer = mockFailingAuthorizer{} - c.Sender = mocks.NewSender() - c.PollingMode = PollUntilAttempts - c.PollingAttempts = 1 - - r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(r) - - _, err := c.PollAsNeeded(r) - if err == nil { - t.Error("autorest: Client#PollAsNeeded failed to return error when unable to create request") - } - - Respond(r, - ByClosing()) -} - -func TestClientPollAsNeededPollsForAttempts(t *testing.T) { - c := Client{} - c.PollingMode = PollUntilAttempts - c.PollingAttempts = 5 - - s := mocks.NewSender() - s.EmitStatus("202 Accepted", http.StatusAccepted) - c.Sender = s - - r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(r) - s.SetResponse(r) - - resp, _ := c.PollAsNeeded(r) - if s.Attempts() != 5 { - t.Errorf("autorest: Client#PollAsNeeded did not poll the expected number of attempts -- expected %v, actual %v", - c.PollingAttempts, s.Attempts()) - } - - Respond(resp, - ByClosing()) -} - -func TestClientPollAsNeededPollsForDuration(t *testing.T) { - c := Client{} - c.PollingMode = PollUntilDuration - c.PollingDuration = 10 * time.Millisecond - - s := mocks.NewSender() - s.EmitStatus("202 Accepted", http.StatusAccepted) - c.Sender = s - - r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(r) - s.SetResponse(r) - - d1 := 10 * time.Millisecond - start := time.Now() - resp, _ := c.PollAsNeeded(r) - d2 := time.Now().Sub(start) - if d2 < d1 { - t.Errorf("autorest: Client#PollAsNeeded did not poll for the expected duration -- expected %v, actual %v", - d1.Seconds(), d2.Seconds()) - } - - Respond(resp, - ByClosing()) -} - -func TestClientDoNotPoll(t *testing.T) { - c := Client{} - - if !c.DoNotPoll() { - t.Errorf("autorest: Client requested polling by default, expected no polling (%v)", c.PollingMode) - } -} - -func TestClientDoNotPollForAttempts(t *testing.T) { - c := Client{} - c.PollingMode = PollUntilAttempts - - if c.DoNotPoll() { - t.Errorf("autorest: Client failed to request polling after polling mode set to %s", c.PollingMode) - } -} - -func TestClientDoNotPollForDuration(t *testing.T) { - c := Client{} - c.PollingMode = PollUntilDuration - - if c.DoNotPoll() { - t.Errorf("autorest: Client failed to request polling after polling mode set to %s", c.PollingMode) - } -} - -func TestClientPollForAttempts(t *testing.T) { - c := Client{} - c.PollingMode = PollUntilAttempts - - if !c.PollForAttempts() { - t.Errorf("autorest: Client#SetPollingMode failed to set polling by attempts -- polling mode set to %s", c.PollingMode) - } -} - -func TestClientPollForDuration(t *testing.T) { - c := Client{} - c.PollingMode = PollUntilDuration - - if !c.PollForDuration() { - t.Errorf("autorest: Client#SetPollingMode failed to set polling for duration -- polling mode set to %s", c.PollingMode) - } -} - -func TestClientSenderReturnsHttpClientByDefault(t *testing.T) { - c := Client{} - - if fmt.Sprintf("%T", c.sender()) != "*http.Client" { - t.Error("autorest: Client#sender failed to return http.Client by default") - } -} - -func TestClientSendSetsAuthorization(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - c := Client{Authorizer: mockAuthorizer{}, Sender: s} - - c.Send(r) - if len(r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) <= 0 { - t.Errorf("autorest: Client#Send failed to set Authorization header -- %s=%s", - http.CanonicalHeaderKey(headerAuthorization), - r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) - } -} - -func TestClientSendInvokesInspector(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - i := &mockInspector{} - c := Client{RequestInspector: i.WithInspection(), Sender: s} - - c.Send(r) - if !i.wasInvoked { - t.Error("autorest: Client#Send failed to invoke the RequestInspector") - } -} - -func TestClientSendReturnsPrepareError(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - c := Client{Authorizer: mockFailingAuthorizer{}, Sender: s} - - _, err := c.Send(r) - if err == nil { - t.Error("autorest: Client#Send failed to return an error the Prepare error") - } -} - -func TestClientSendSends(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - c := Client{Sender: s} - - c.Send(r) - if s.Attempts() <= 0 { - t.Error("autorest: Client#Send failed to invoke the Sender") - } -} - -func TestClientSendDefaultsToUsingStatusCodeOK(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - c := Client{Authorizer: mockAuthorizer{}, Sender: s} - - _, err := c.Send(r) - if err != nil { - t.Errorf("autorest: Client#Send returned an error for Status Code OK -- %v", - err) - } -} - -func TestClientSendClosesReponseBodyWhenReturningError(t *testing.T) { - s := mocks.NewSender() - r := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) - s.SetResponse(r) - c := Client{Sender: s} - - c.Send(mocks.NewRequest()) - if r.Body.(*mocks.Body).IsOpen() { - t.Error("autorest: Client#Send failed to close the response body when returning an error") - } -} - -func TestClientSendReturnsErrorWithUnexpectedStatusCode(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - s.EmitStatus("500 InternalServerError", http.StatusInternalServerError) - c := Client{Sender: s} - - _, err := c.Send(r) - if err == nil { - t.Error("autorest: Client#Send failed to return an error for an unexpected Status Code") - } -} - -func TestClientSendDoesNotReturnErrorForExpectedStatusCode(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - s.EmitStatus("500 InternalServerError", http.StatusInternalServerError) - c := Client{Sender: s} - - _, err := c.Send(r, http.StatusInternalServerError) - if err != nil { - t.Errorf("autorest: Client#Send returned an error for an expected Status Code -- %v", - err) - } -} - -func TestClientSendPollsIfNeeded(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - s.SetPollAttempts(5) - c := Client{Sender: s, PollingMode: PollUntilAttempts, PollingAttempts: 10} - - c.Send(r, http.StatusOK, http.StatusAccepted) - if s.Attempts() != (5 + 1) { - t.Errorf("autorest: Client#Send failed to poll the expected number of times -- attempts %d", - s.Attempts()) - } -} - -func TestClientSendDoesNotPollIfUnnecessary(t *testing.T) { - r := mocks.NewRequest() - s := mocks.NewSender() - c := Client{Sender: s, PollingMode: PollUntilAttempts, PollingAttempts: 10} - - c.Send(r, http.StatusOK, http.StatusAccepted) - if s.Attempts() != 1 { - t.Errorf("autorest: Client#Send unexpectedly polled -- attempts %d", - s.Attempts()) - } -} - -func TestClientSenderReturnsSetSender(t *testing.T) { - c := Client{} - - s := mocks.NewSender() - c.Sender = s - - if c.sender() != s { - t.Error("autorest: Client#sender failed to return set Sender") - } -} - -func TestClientDoInvokesSender(t *testing.T) { - c := Client{} - - s := mocks.NewSender() - c.Sender = s - - c.Do(&http.Request{}) - if s.Attempts() != 1 { - t.Error("autorest: Client#Do failed to invoke the Sender") - } -} - -func TestClientDoSetsUserAgent(t *testing.T) { - c := Client{UserAgent: "UserAgent"} - r := mocks.NewRequest() - - c.Do(r) - - if r.Header.Get(http.CanonicalHeaderKey(headerUserAgent)) != "UserAgent" { - t.Errorf("autorest: Client#Do failed to correctly set User-Agent header: %s=%s", - http.CanonicalHeaderKey(headerUserAgent), - r.Header.Get(http.CanonicalHeaderKey(headerUserAgent))) - } -} - -func TestClientAuthorizerReturnsNullAuthorizerByDefault(t *testing.T) { - c := Client{} - - if fmt.Sprintf("%T", c.authorizer()) != "autorest.NullAuthorizer" { - t.Error("autorest: Client#authorizer failed to return the NullAuthorizer by default") - } -} - -func TestClientAuthorizerReturnsSetAuthorizer(t *testing.T) { - c := Client{} - c.Authorizer = mockAuthorizer{} - - if fmt.Sprintf("%T", c.authorizer()) != "autorest.mockAuthorizer" { - t.Error("autorest: Client#authorizer failed to return the set Authorizer") - } -} - -func TestClientWithAuthorizer(t *testing.T) { - c := Client{} - c.Authorizer = mockAuthorizer{} - - req, _ := Prepare(&http.Request{}, - c.WithAuthorization()) - - if req.Header.Get(headerAuthorization) == "" { - t.Error("autorest: Client#WithAuthorizer failed to return the WithAuthorizer from the active Authorizer") - } -} - -func TestClientWithInspection(t *testing.T) { - c := Client{} - r := &mockInspector{} - c.RequestInspector = r.WithInspection() - - Prepare(&http.Request{}, - c.WithInspection()) - - if !r.wasInvoked { - t.Error("autorest: Client#WithInspection failed to invoke RequestInspector") - } -} - -func TestClientWithInspectionSetsDefault(t *testing.T) { - c := Client{} - - r1 := &http.Request{} - r2, _ := Prepare(r1, - c.WithInspection()) - - if !reflect.DeepEqual(r1, r2) { - t.Error("autorest: Client#WithInspection failed to provide a default RequestInspector") - } -} - -func TestClientByInspecting(t *testing.T) { - c := Client{} - r := &mockInspector{} - c.ResponseInspector = r.ByInspecting() - - Respond(&http.Response{}, - c.ByInspecting()) - - if !r.wasInvoked { - t.Error("autorest: Client#ByInspecting failed to invoke ResponseInspector") - } -} - -func TestClientByInspectingSetsDefault(t *testing.T) { - c := Client{} - - r := &http.Response{} - Respond(r, - c.ByInspecting()) - - if !reflect.DeepEqual(r, &http.Response{}) { - t.Error("autorest: Client#ByInspecting failed to provide a default ResponseInspector") - } -} - -func TestResponseGetPollingDelay(t *testing.T) { - d1 := 10 * time.Second - - r := mocks.NewResponse() - mocks.SetRetryHeader(r, d1) - ar := Response{Response: r} - - d2 := ar.GetPollingDelay(time.Duration(0)) - if d1 != d2 { - t.Errorf("autorest: Response#GetPollingDelay failed to return the correct delay -- expected %v, received %v", - d1, d2) - } -} - -func TestResponseGetPollingDelayReturnsDefault(t *testing.T) { - ar := Response{Response: mocks.NewResponse()} - - d1 := 10 * time.Second - d2 := ar.GetPollingDelay(d1) - if d1 != d2 { - t.Errorf("autorest: Response#GetPollingDelay failed to return the default delay -- expected %v, received %v", - d1, d2) - } -} - -func TestResponseGetPollingLocation(t *testing.T) { - r := mocks.NewResponse() - mocks.SetLocationHeader(r, mocks.TestURL) - ar := Response{Response: r} - - if ar.GetPollingLocation() != mocks.TestURL { - t.Errorf("autorest: Response#GetPollingLocation failed to return correct URL -- expected %v, received %v", - mocks.TestURL, ar.GetPollingLocation()) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,87 +0,0 @@ -/* -Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) -defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of -time.Time types. And both convert to time.Time through a ToTime method. -*/ -package date - -import ( - "fmt" - "time" -) - -const ( - rfc3339FullDate = "2006-01-02" - dateFormat = "%4d-%02d-%02d" - jsonFormat = `"%4d-%02d-%02d"` -) - -// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., -// 2006-01-02). -type Date struct { - time.Time -} - -// ParseDate create a new Date from the passed string. -func ParseDate(date string) (d Date, err error) { - d = Date{} - d.Time, err = time.Parse(rfc3339FullDate, date) - return d, err -} - -// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalBinary() ([]byte, error) { - return d.MarshalText() -} - -// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalBinary(data []byte) error { - return d.UnmarshalText(data) -} - -// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalJSON() (json []byte, err error) { - return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalJSON(data []byte) (err error) { - if data[0] == '"' { - data = data[1 : len(data)-1] - } - d.Time, err = time.Parse(rfc3339FullDate, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalText() (text []byte, err error) { - return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalText(data []byte) (err error) { - d.Time, err = time.Parse(rfc3339FullDate, string(data)) - if err != nil { - return err - } - return nil -} - -// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). -func (d Date) String() string { - return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) -} - -// ToTime returns a Date as a time.Time -func (d Date) ToTime() time.Time { - return d.Time -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,201 +0,0 @@ -package date - -import ( - "encoding/json" - "fmt" - "reflect" - "testing" - "time" -) - -func ExampleParseDate() { - d, _ := ParseDate("2001-02-03") - fmt.Println(d) - // Output: 2001-02-03 -} - -func ExampleDate() { - d, _ := ParseDate("2001-02-03") - t, _ := time.Parse(time.RFC3339, "2001-02-04T00:00:00Z") - - // Date acts as time.Time when the receiver - if d.Before(t) { - fmt.Printf("Before ") - } else { - fmt.Printf("After ") - } - - // Convert Date when needing a time.Time - if t.After(d.ToTime()) { - fmt.Printf("After") - } else { - fmt.Printf("Before") - } - // Output: Before After -} - -func ExampleDate_MarshalBinary() { - d, _ := ParseDate("2001-02-03") - t, _ := d.MarshalBinary() - fmt.Println(string(t)) - // Output: 2001-02-03 -} - -func ExampleDate_UnmarshalBinary() { - d := Date{} - t := "2001-02-03" - - err := d.UnmarshalBinary([]byte(t)) - if err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: 2001-02-03 -} - -func ExampleDate_MarshalJSON() { - d, _ := ParseDate("2001-02-03") - j, _ := json.Marshal(d) - fmt.Println(string(j)) - // Output: "2001-02-03" -} - -func ExampleDate_UnmarshalJSON() { - var d struct { - Date Date `json:"date"` - } - j := `{ - "date" : "2001-02-03" - }` - - err := json.Unmarshal([]byte(j), &d) - if err != nil { - fmt.Println(err) - } - fmt.Println(d.Date) - // Output: 2001-02-03 -} - -func ExampleDate_MarshalText() { - d, _ := ParseDate("2001-02-03") - t, _ := d.MarshalText() - fmt.Println(string(t)) - // Output: 2001-02-03 -} - -func ExampleDate_UnmarshalText() { - d := Date{} - t := "2001-02-03" - - err := d.UnmarshalText([]byte(t)) - if err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: 2001-02-03 -} - -func TestDateString(t *testing.T) { - d, _ := ParseDate("2001-02-03") - if d.String() != "2001-02-03" { - t.Errorf("date: String failed (%v)", d.String()) - } -} - -func TestDateBinaryRoundTrip(t *testing.T) { - d1, err := ParseDate("2001-02-03") - t1, err := d1.MarshalBinary() - if err != nil { - t.Errorf("date: MarshalBinary failed (%v)", err) - } - - d2 := Date{} - err = d2.UnmarshalBinary(t1) - if err != nil { - t.Errorf("date: UnmarshalBinary failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Errorf("date: Round-trip Binary failed (%v, %v)", d1, d2) - } -} - -func TestDateJSONRoundTrip(t *testing.T) { - type s struct { - Date Date `json:"date"` - } - var err error - d1 := s{} - d1.Date, err = ParseDate("2001-02-03") - j, err := json.Marshal(d1) - if err != nil { - t.Errorf("date: MarshalJSON failed (%v)", err) - } - - d2 := s{} - err = json.Unmarshal(j, &d2) - if err != nil { - t.Errorf("date: UnmarshalJSON failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Errorf("date: Round-trip JSON failed (%v, %v)", d1, d2) - } -} - -func TestDateTextRoundTrip(t *testing.T) { - d1, err := ParseDate("2001-02-03") - t1, err := d1.MarshalText() - if err != nil { - t.Errorf("date: MarshalText failed (%v)", err) - } - - d2 := Date{} - err = d2.UnmarshalText(t1) - if err != nil { - t.Errorf("date: UnmarshalText failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Errorf("date: Round-trip Text failed (%v, %v)", d1, d2) - } -} - -func TestDateToTime(t *testing.T) { - var d Date - d, err := ParseDate("2001-02-03") - if err != nil { - t.Errorf("date: ParseDate failed (%v)", err) - } - var v interface{} = d.ToTime() - switch v.(type) { - case time.Time: - return - default: - t.Errorf("date: ToTime failed to return a time.Time") - } -} - -func TestDateUnmarshalJSONReturnsError(t *testing.T) { - var d struct { - Date Date `json:"date"` - } - j := `{ - "date" : "February 3, 2001" - }` - - err := json.Unmarshal([]byte(j), &d) - if err == nil { - t.Error("date: Date failed to return error for malformed JSON date") - } -} - -func TestDateUnmarshalTextReturnsError(t *testing.T) { - d := Date{} - txt := "February 3, 2001" - - err := d.UnmarshalText([]byte(txt)) - if err == nil { - t.Error("date: Date failed to return error for malformed Text date") - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,70 +0,0 @@ -package date - -import ( - "time" -) - -// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -type Time struct { - time.Time -} - -// ParseTime creates a new Time from the passed string. -func ParseTime(date string) (d Time, err error) { - d = Time{} - d.Time, err = time.Parse(time.RFC3339, date) - return d, err -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (d Time) MarshalBinary() ([]byte, error) { - return d.Time.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (d *Time) UnmarshalBinary(data []byte) error { - return d.Time.UnmarshalText(data) -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (d Time) MarshalJSON() (json []byte, err error) { - return d.Time.MarshalJSON() -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (d *Time) UnmarshalJSON(data []byte) (err error) { - return d.Time.UnmarshalJSON(data) -} - -// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (d Time) MarshalText() (text []byte, err error) { - return d.Time.MarshalText() -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (d *Time) UnmarshalText(data []byte) (err error) { - return d.Time.UnmarshalText(data) -} - -// String returns the Time formatted as an RFC3339 date-time string (i.e., -// 2006-01-02T15:04:05Z). -func (d Time) String() string { - // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. - b, err := d.Time.MarshalText() - if err != nil { - return "" - } - return string(b) -} - -// ToTime returns a Time as a time.Time -func (d Time) ToTime() time.Time { - return d.Time -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -package date - -import ( - "encoding/json" - "fmt" - "reflect" - "testing" - "time" -) - -func ExampleParseTime() { - d, _ := ParseTime("2001-02-03T04:05:06Z") - fmt.Println(d) - // Output: 2001-02-03T04:05:06Z -} - -func ExampleTime_MarshalBinary() { - d, _ := ParseTime("2001-02-03T04:05:06Z") - t, _ := d.MarshalBinary() - fmt.Println(string(t)) - // Output: 2001-02-03T04:05:06Z -} - -func ExampleTime_UnmarshalBinary() { - d := Time{} - t := "2001-02-03T04:05:06Z" - - err := d.UnmarshalBinary([]byte(t)) - if err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: 2001-02-03T04:05:06Z -} - -func ExampleTime_MarshalJSON() { - d, _ := ParseTime("2001-02-03T04:05:06Z") - j, _ := json.Marshal(d) - fmt.Println(string(j)) - // Output: "2001-02-03T04:05:06Z" -} - -func ExampleTime_UnmarshalJSON() { - var d struct { - Time Time `json:"datetime"` - } - j := `{ - "datetime" : "2001-02-03T04:05:06Z" - }` - - err := json.Unmarshal([]byte(j), &d) - if err != nil { - fmt.Println(err) - } - fmt.Println(d.Time) - // Output: 2001-02-03T04:05:06Z -} - -func ExampleTime_MarshalText() { - d, _ := ParseTime("2001-02-03T04:05:06Z") - t, _ := d.MarshalText() - fmt.Println(string(t)) - // Output: 2001-02-03T04:05:06Z -} - -func ExampleTime_UnmarshalText() { - d := Time{} - t := "2001-02-03T04:05:06Z" - - err := d.UnmarshalText([]byte(t)) - if err != nil { - fmt.Println(err) - } - fmt.Println(d) - // Output: 2001-02-03T04:05:06Z -} - -func TestTimeString(t *testing.T) { - d, _ := ParseTime("2001-02-03T04:05:06Z") - if d.String() != "2001-02-03T04:05:06Z" { - t.Errorf("date: String failed (%v)", d.String()) - } -} - -func TestTimeStringReturnsEmptyStringForError(t *testing.T) { - d := Time{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} - if d.String() != "" { - t.Errorf("date: Time#String failed empty string for an error") - } -} - -func TestTimeBinaryRoundTrip(t *testing.T) { - d1, err := ParseTime("2001-02-03T04:05:06Z") - t1, err := d1.MarshalBinary() - if err != nil { - t.Errorf("datetime: MarshalBinary failed (%v)", err) - } - - d2 := Time{} - err = d2.UnmarshalBinary(t1) - if err != nil { - t.Errorf("datetime: UnmarshalBinary failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Errorf("datetime: Round-trip Binary failed (%v, %v)", d1, d2) - } -} - -func TestTimeJSONRoundTrip(t *testing.T) { - type s struct { - Time Time `json:"datetime"` - } - var err error - d1 := s{} - d1.Time, err = ParseTime("2001-02-03T04:05:06Z") - j, err := json.Marshal(d1) - if err != nil { - t.Errorf("datetime: MarshalJSON failed (%v)", err) - } - - d2 := s{} - err = json.Unmarshal(j, &d2) - if err != nil { - t.Errorf("datetime: UnmarshalJSON failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Errorf("datetime: Round-trip JSON failed (%v, %v)", d1, d2) - } -} - -func TestTimeTextRoundTrip(t *testing.T) { - d1, err := ParseTime("2001-02-03T04:05:06Z") - t1, err := d1.MarshalText() - if err != nil { - t.Errorf("datetime: MarshalText failed (%v)", err) - } - - d2 := Time{} - err = d2.UnmarshalText(t1) - if err != nil { - t.Errorf("datetime: UnmarshalText failed (%v)", err) - } - - if !reflect.DeepEqual(d1, d2) { - t.Errorf("datetime: Round-trip Text failed (%v, %v)", d1, d2) - } -} - -func TestTimeToTime(t *testing.T) { - var d Time - d, err := ParseTime("2001-02-03T04:05:06Z") - if err != nil { - t.Errorf("datetime: ParseTime failed (%v)", err) - } - var v interface{} = d.ToTime() - switch v.(type) { - case time.Time: - return - default: - t.Errorf("datetime: ToTime failed to return a time.Time") - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -package autorest - -import ( - "fmt" -) - -// Error describes the methods implemented by autorest errors. -type Error interface { - error - - // PackageType should return the package type of the object emitting the error. For types, the - // value should match that produced the the '%T' format specifier of the fmt package. For other - // elements, such as functions, it returns just the package name (e.g., "autorest"). - PackageType() string - - // Method should return the name of the method raising the error. - Method() string - - // Message should return the error message. - Message() string - - // String should return a formatted containing all available details (i.e., PackageType, Method, - // Message, and original error (if any)). - String() string - - // Original should return the original error, if any, and nil otherwise. - Original() error -} - -type baseError struct { - packageType string - method string - message string - - original error -} - -// NewError creates a new Error conforming object from the passed packageType, method, and -// message. message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) Error { - return NewErrorWithError(nil, packageType, method, message, args...) -} - -// NewErrorWithError creates a new Error conforming object from the passed packageType, method, -// message, and original error. message is treated as a format string to which the optional args -// apply. -func NewErrorWithError(original error, packageType string, method string, message string, args ...interface{}) Error { - if _, ok := original.(Error); ok { - return original.(Error) - } - return baseError{ - packageType: packageType, - method: method, - message: fmt.Sprintf(message, args...), - original: original, - } -} - -// PackageType returns the package type of the object emitting the error. For types, the value -// matches that produced the the '%T' format specifier of the fmt package. For other elements, -// such as functions, it returns just the package name (e.g., "autorest"). -func (be baseError) PackageType() string { - return be.packageType -} - -// Method returns the name of the method raising the error. -func (be baseError) Method() string { - return be.method -} - -// Message is the error message. -func (be baseError) Message() string { - return be.message -} - -// Original returns the original error, if any, and nil otherwise. -func (be baseError) Original() error { - return be.original -} - -// Error returns the same formatted string as String. -func (be baseError) Error() string { - return be.String() -} - -// String returns a formatted containing all available details (i.e., PackageType, Method, -// Message, and original error (if any)). -func (be baseError) String() string { - if be.original == nil { - return fmt.Sprintf("%s:%s %s", be.packageType, be.method, be.message) - } - return fmt.Sprintf("%s:%s %s -- Original Error: %v", be.packageType, be.method, be.message, be.original) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -package autorest - -import ( - "fmt" - "reflect" - "regexp" - "testing" -) - -func TestNewErrorWithErrorAssignsPackageType(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") - - if e.PackageType() != "packageType" { - t.Errorf("autorest: Error failed to set package type -- expected %v, received %v", "packageType", e.PackageType()) - } -} - -func TestNewErrorWithErrorAssignsMethod(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") - - if e.Method() != "method" { - t.Errorf("autorest: Error failed to set package type -- expected %v, received %v", "method", e.Method()) - } -} - -func TestNewErrorWithErrorAssignsMessage(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") - - if e.Message() != "message" { - t.Errorf("autorest: Error failed to set package type -- expected %v, received %v", "message", e.Message()) - } -} - -func TestNewErrorWithErrorAcceptsArgs(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message %s", "arg") - - if matched, _ := regexp.MatchString(`.*arg.*`, e.Message()); !matched { - t.Errorf("autorest: Error failed to apply message arguments -- expected %v, received %v", - `.*arg.*`, e.Message()) - } -} - -func TestNewErrorWithErrorAssignsError(t *testing.T) { - err := fmt.Errorf("original") - e := NewErrorWithError(err, "packageType", "method", "message") - - if e.Original() != err { - t.Errorf("autorest: Error failed to set package type -- expected %v, received %v", err, e.Original()) - } -} - -func TestNewErrorForwards(t *testing.T) { - e1 := NewError("packageType", "method", "message %s", "arg") - e2 := NewErrorWithError(nil, "packageType", "method", "message %s", "arg") - - if !reflect.DeepEqual(e1, e2) { - t.Error("autorest: NewError did not return an error equivelent to NewErrorWithError") - } -} - -func TestErrorError(t *testing.T) { - err := fmt.Errorf("original") - e := NewErrorWithError(err, "packageType", "method", "message") - - if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { - t.Errorf("autorest: Error#Error failed to return original error message -- expected %v, received %v", - `.*original.*`, e.Error()) - } -} - -func TestErrorStringConstainsPackageType(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") - - if matched, _ := regexp.MatchString(`.*packageType.*`, e.String()); !matched { - t.Errorf("autorest: Error#String failed to include PackageType -- expected %v, received %v", - `.*packageType.*`, e.String()) - } -} - -func TestErrorStringConstainsMethod(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") - - if matched, _ := regexp.MatchString(`.*method.*`, e.String()); !matched { - t.Errorf("autorest: Error#String failed to include Method -- expected %v, received %v", - `.*method.*`, e.String()) - } -} - -func TestErrorStringConstainsMessage(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") - - if matched, _ := regexp.MatchString(`.*message.*`, e.String()); !matched { - t.Errorf("autorest: Error#String failed to include Message -- expected %v, received %v", - `.*message.*`, e.String()) - } -} - -func TestErrorStringConstainsOriginal(t *testing.T) { - e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") - - if matched, _ := regexp.MatchString(`.*original.*`, e.String()); !matched { - t.Errorf("autorest: Error#String failed to include Original error -- expected %v, received %v", - `.*original.*`, e.String()) - } -} - -func TestErrorStringSkipsOriginal(t *testing.T) { - e := NewError("packageType", "method", "message") - - if matched, _ := regexp.MatchString(`.*Original.*`, e.String()); matched { - t.Errorf("autorest: Error#String included missing Original error -- unexpected %v, received %v", - `.*Original.*`, e.String()) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,104 +0,0 @@ -package mocks - -import ( - "fmt" - "net/http" - "time" -) - -const ( - // TestDelay is the Retry-After delay used in tests. - TestDelay = 0 * time.Second - - // TestHeader is the header used in tests. - TestHeader = "x-test-header" - - // TestURL is the URL used in tests. - TestURL = "https://microsoft.com/a/b/c/" -) - -const ( - headerLocation = "Location" - headerRetryAfter = "Retry-After" -) - -// NewRequest instantiates a new request. -func NewRequest() *http.Request { - return NewRequestWithContent("") -} - -// NewRequestWithContent instantiates a new request using the passed string for the body content. -func NewRequestWithContent(c string) *http.Request { - r, _ := http.NewRequest("GET", "https://microsoft.com/a/b/c/", NewBody(c)) - return r -} - -// NewRequestForURL instantiates a new request using the passed URL. -func NewRequestForURL(u string) *http.Request { - r, err := http.NewRequest("GET", u, NewBody("")) - if err != nil { - panic(fmt.Sprintf("mocks: ERROR (%v) parsing testing URL %s", err, u)) - } - return r -} - -// NewResponse instantiates a new response. -func NewResponse() *http.Response { - return NewResponseWithContent("") -} - -// NewResponseWithContent instantiates a new response with the passed string as the body content. -func NewResponseWithContent(c string) *http.Response { - return &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Body: NewBody(c), - Request: NewRequest(), - } -} - -// NewResponseWithStatus instantiates a new response using the passed string and integer as the -// status and status code. -func NewResponseWithStatus(s string, c int) *http.Response { - resp := NewResponse() - resp.Status = s - resp.StatusCode = c - return resp -} - -// SetResponseHeader adds a header to the passed response. -func SetResponseHeader(resp *http.Response, h string, v string) { - if resp.Header == nil { - resp.Header = make(http.Header) - } - resp.Header.Set(h, v) -} - -// SetResponseHeaderValues adds a header containing all the passed string values. -func SetResponseHeaderValues(resp *http.Response, h string, values []string) { - if resp.Header == nil { - resp.Header = make(http.Header) - } - for _, v := range values { - resp.Header.Add(h, v) - } -} - -// SetAcceptedHeaders adds the headers usually associated with a 202 Accepted response. -func SetAcceptedHeaders(resp *http.Response) { - SetLocationHeader(resp, TestURL) - SetRetryHeader(resp, TestDelay) -} - -// SetLocationHeader adds the Location header. -func SetLocationHeader(resp *http.Response, location string) { - SetResponseHeader(resp, http.CanonicalHeaderKey(headerLocation), location) -} - -// SetRetryHeader adds the Retry-After header. -func SetRetryHeader(resp *http.Response, delay time.Duration) { - SetResponseHeader(resp, http.CanonicalHeaderKey(headerRetryAfter), fmt.Sprintf("%v", delay.Seconds())) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -package mocks diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,165 +0,0 @@ -/* -Package mocks provides mocks and helpers used in testing. -*/ -package mocks - -import ( - "fmt" - "io" - "net/http" -) - -// Body implements acceptable body over a string. -type Body struct { - s string - b []byte - isOpen bool - closeAttempts int -} - -// NewBody creates a new instance of Body. -func NewBody(s string) *Body { - return (&Body{s: s}).reset() -} - -// Read reads into the passed byte slice and returns the bytes read. -func (body *Body) Read(b []byte) (n int, err error) { - if !body.IsOpen() { - return 0, fmt.Errorf("ERROR: Body has been closed\n") - } - if len(body.b) == 0 { - return 0, io.EOF - } - n = copy(b, body.b) - body.b = body.b[n:] - return n, nil -} - -// Close closes the body. -func (body *Body) Close() error { - if body.isOpen { - body.isOpen = false - body.closeAttempts++ - } - return nil -} - -// CloseAttempts returns the number of times Close was called. -func (body *Body) CloseAttempts() int { - return body.closeAttempts -} - -// IsOpen returns true if the Body has not been closed, false otherwise. -func (body *Body) IsOpen() bool { - return body.isOpen -} - -func (body *Body) reset() *Body { - body.isOpen = true - body.b = []byte(body.s) - return body -} - -// Sender implements a simple null sender. -type Sender struct { - attempts int - pollAttempts int - content string - reuseResponse bool - resp *http.Response - status string - statusCode int - emitErrors int - err error -} - -// NewSender creates a new instance of Sender. -func NewSender() *Sender { - return &Sender{status: "200 OK", statusCode: 200} -} - -// Do accepts the passed request and, based on settings, emits a response and possible error. -func (c *Sender) Do(r *http.Request) (*http.Response, error) { - c.attempts++ - - if !c.reuseResponse || c.resp == nil { - resp := NewResponse() - resp.Request = r - resp.Body = NewBody(c.content) - resp.Status = c.status - resp.StatusCode = c.statusCode - c.resp = resp - } else { - c.resp.Body.(*Body).reset() - } - - if c.pollAttempts > 0 { - c.pollAttempts-- - c.resp.Status = "Accepted" - c.resp.StatusCode = http.StatusAccepted - SetAcceptedHeaders(c.resp) - } - - if c.emitErrors > 0 || c.emitErrors < 0 { - c.emitErrors-- - if c.err == nil { - return c.resp, fmt.Errorf("Faux Error") - } - return c.resp, c.err - } - return c.resp, nil -} - -// Attempts returns the number of times Do was called. -func (c *Sender) Attempts() int { - return c.attempts -} - -// EmitErrors sets the number times Do should emit an error. -func (c *Sender) EmitErrors(emit int) { - c.emitErrors = emit -} - -// SetError sets the error Do should return. -func (c *Sender) SetError(err error) { - c.err = err -} - -// ClearError clears the error Do emits. -func (c *Sender) ClearError() { - c.SetError(nil) -} - -// EmitContent sets the content to be returned by Do in the response body. -func (c *Sender) EmitContent(s string) { - c.content = s -} - -// EmitStatus sets the status of the response Do emits. -func (c *Sender) EmitStatus(status string, code int) { - c.status = status - c.statusCode = code -} - -// SetPollAttempts sets the number of times the returned response emits the default polling -// status code (i.e., 202 Accepted). -func (c *Sender) SetPollAttempts(pa int) { - c.pollAttempts = pa -} - -// ReuseResponse sets if the just one response object should be reused by all calls to Do. -func (c *Sender) ReuseResponse(reuseResponse bool) { - c.reuseResponse = reuseResponse -} - -// SetResponse sets the response from Do. -func (c *Sender) SetResponse(resp *http.Response) { - c.resp = resp - c.reuseResponse = true -} - -// T is a simple testing struct. -type T struct { - Name string `json:"name"` - Age int `json:"age"` -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -package mocks diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,311 +0,0 @@ -package autorest - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" -) - -const ( - mimeTypeJSON = "application/json" - mimeTypeFormPost = "application/x-www-form-urlencoded" - - headerAuthorization = "Authorization" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" -) - -// Preparer is the interface that wraps the Prepare method. -// -// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations -// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. -type Preparer interface { - Prepare(*http.Request) (*http.Request, error) -} - -// PreparerFunc is a method that implements the Preparer interface. -type PreparerFunc func(*http.Request) (*http.Request, error) - -// Prepare implements the Preparer interface on PreparerFunc. -func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { - return pf(r) -} - -// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then affect the result. -type PrepareDecorator func(Preparer) Preparer - -// CreatePreparer creates, decorates, and returns a Preparer. -// Without decorators, the returned Preparer returns the passed http.Request unmodified. -// Preparers are safe to share and re-use. -func CreatePreparer(decorators ...PrepareDecorator) Preparer { - return DecoratePreparer( - Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), - decorators...) -} - -// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it -// applies to the Preparer. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (change the http.Request and then pass it -// along) or a post-decorator (pass the http.Request along and alter it on return). -func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { - for _, decorate := range decorators { - p = decorate(p) - } - return p -} - -// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. -// It creates a Preparer from the decorators which it then applies to the passed http.Request. -func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { - if r == nil { - return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") - } - return CreatePreparer(decorators...).Prepare(r) -} - -// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed -// http.Request. -func WithNothing() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return p.Prepare(r) - }) - } -} - -// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to -// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before -// adding the header. -func WithHeader(header string, value string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(header), value) - } - return r, err - }) - } -} - -// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the supplied token. -func WithBearerAuthorization(token string) PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) -} - -// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value -// is the passed contentType. -func AsContentType(contentType string) PrepareDecorator { - return WithHeader(headerContentType, contentType) -} - -// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the -// passed string. -func WithUserAgent(ua string) PrepareDecorator { - return WithHeader(headerUserAgent, ua) -} - -// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/x-www-form-urlencoded". -func AsFormURLEncoded() PrepareDecorator { - return AsContentType(mimeTypeFormPost) -} - -// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/json". -func AsJSON() PrepareDecorator { - return AsContentType(mimeTypeJSON) -} - -// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The -// decorator does not validate that the passed method string is a known HTTP method. -func WithMethod(method string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Method = method - return p.Prepare(r) - }) - } -} - -// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. -func AsDelete() PrepareDecorator { return WithMethod("DELETE") } - -// AsGet returns a PrepareDecorator that sets the HTTP method to GET. -func AsGet() PrepareDecorator { return WithMethod("GET") } - -// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. -func AsHead() PrepareDecorator { return WithMethod("HEAD") } - -// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. -func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } - -// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. -func AsPatch() PrepareDecorator { return WithMethod("PATCH") } - -// AsPost returns a PrepareDecorator that sets the HTTP method to POST. -func AsPost() PrepareDecorator { return WithMethod("POST") } - -// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. -func AsPut() PrepareDecorator { return WithMethod("PUT") } - -// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed -// from the supplied baseUrl. -func WithBaseURL(baseURL string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - u, err := url.Parse(baseURL) - if err == nil { - r.URL = u - } - } - return r, err - }) - } -} - -// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the -// http.Request body. -func WithFormData(v url.Values) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - s := v.Encode() - r.ContentLength = int64(len(s)) - r.Body = ioutil.NopCloser(strings.NewReader(s)) - } - return r, err - }) - } -} - -// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the -// request and sets the Content-Length header. -func WithJSON(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := json.Marshal(v) - if err == nil { - r.ContentLength = int64(len(b)) - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - } - return r, err - }) - } -} - -// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path -// is absolute (that is, it begins with a "/"), it replaces the existing path. -func WithPath(path string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPath", "Invoked with a nil URL") - } - u := r.URL - u.Path = strings.TrimRight(u.Path, "/") - if strings.HasPrefix(path, "/") { - u.Path = path - } else { - u.Path += "/" + path - } - } - return r, err - }) - } -} - -// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The -// values will be escaped (aka URL encoded) before insertion into the path. -func WithEscapedPathParameters(pathParameters map[string]interface{}) PrepareDecorator { - parameters := escapeValueStrings(ensureValueStrings(pathParameters)) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - r.URL.Path = strings.Replace(r.URL.Path, "{"+key+"}", value, -1) - } - } - return r, err - }) - } -} - -// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. -func WithPathParameters(pathParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(pathParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - r.URL.Path = strings.Replace(r.URL.Path, "{"+key+"}", value, -1) - } - } - return r, err - }) - } -} - -// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters -// given in the supplied map (i.e., key=value). -func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(queryParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") - } - v := r.URL.Query() - for key, value := range parameters { - v.Add(key, value) - } - r.URL.RawQuery = v.Encode() - } - return r, err - }) - } -} - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,496 +0,0 @@ -package autorest - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "testing" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" -) - -// PrepareDecorators wrap and invoke a Preparer. Most often, the decorator invokes the passed -// Preparer and decorates the response. -func ExamplePrepareDecorator() { - path := "a/b/c/" - pd := func() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, fmt.Errorf("ERROR: URL is not set") - } - r.URL.Path += path - } - return r, err - }) - } - } - - r, _ := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - pd()) - - fmt.Printf("Path is %s\n", r.URL) - // Output: Path is https://microsoft.com/a/b/c/ -} - -// PrepareDecorators may also modify and then invoke the Preparer. -func ExamplePrepareDecorator_pre() { - pd := func() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Header.Add(http.CanonicalHeaderKey("ContentType"), "application/json") - return p.Prepare(r) - }) - } - } - - r, _ := Prepare(&http.Request{Header: http.Header{}}, - pd()) - - fmt.Printf("ContentType is %s\n", r.Header.Get("ContentType")) - // Output: ContentType is application/json -} - -// Create a sequence of three Preparers that build up the URL path. -func ExampleCreatePreparer() { - p := CreatePreparer( - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - r, err := p.Prepare(&http.Request{}) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c -} - -// Create and apply separate Preparers -func ExampleCreatePreparer_multiple() { - params := map[string]interface{}{ - "param1": "a", - "param2": "c", - } - - p1 := CreatePreparer(WithBaseURL("https://microsoft.com/"), WithPath("/{param1}/b/{param2}/")) - p2 := CreatePreparer(WithPathParameters(params)) - - r, err := p1.Prepare(&http.Request{}) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } - - r, err = p2.Prepare(r) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c/ -} - -// Create and chain separate Preparers -func ExampleCreatePreparer_chain() { - params := map[string]interface{}{ - "param1": "a", - "param2": "c", - } - - p := CreatePreparer(WithBaseURL("https://microsoft.com/"), WithPath("/{param1}/b/{param2}/")) - p = DecoratePreparer(p, WithPathParameters(params)) - - r, err := p.Prepare(&http.Request{}) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c/ -} - -// Create and prepare an http.Request in one call -func ExamplePrepare() { - r, err := Prepare(&http.Request{}, - AsGet(), - WithBaseURL("https://microsoft.com/"), - WithPath("a/b/c/")) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Printf("%s %s", r.Method, r.URL) - } - // Output: GET https://microsoft.com/a/b/c/ -} - -// Create a request for a supplied base URL and path -func ExampleWithBaseURL() { - r, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/a/b/c/")) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c/ -} - -// Create a request with a custom HTTP header -func ExampleWithHeader() { - r, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/a/b/c/"), - WithHeader("x-foo", "bar")) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Printf("Header %s=%s\n", "x-foo", r.Header.Get("x-foo")) - } - // Output: Header x-foo=bar -} - -// Create a request whose Body is the JSON encoding of a structure -func ExampleWithFormData() { - v := url.Values{} - v.Add("name", "Rob Pike") - v.Add("age", "42") - - r, err := Prepare(&http.Request{}, - WithFormData(v)) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Printf("Request Body contains %s\n", string(b)) - } - // Output: Request Body contains age=42&name=Rob+Pike -} - -// Create a request whose Body is the JSON encoding of a structure -func ExampleWithJSON() { - t := mocks.T{Name: "Rob Pike", Age: 42} - - r, err := Prepare(&http.Request{}, - WithJSON(&t)) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Printf("Request Body contains %s\n", string(b)) - } - // Output: Request Body contains {"name":"Rob Pike","age":42} -} - -// Create a request from a path with escaped parameters -func ExampleWithEscapedPathParameters() { - params := map[string]interface{}{ - "param1": "a b c", - "param2": "d e f", - } - r, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("/{param1}/b/{param2}/"), - WithEscapedPathParameters(params)) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a+b+c/b/d+e+f/ -} - -// Create a request from a path with parameters -func ExampleWithPathParameters() { - params := map[string]interface{}{ - "param1": "a", - "param2": "c", - } - r, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("/{param1}/b/{param2}/"), - WithPathParameters(params)) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c/ -} - -// Create a request with query parameters -func ExampleWithQueryParameters() { - params := map[string]interface{}{ - "q1": "value1", - "q2": "value2", - } - r, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("/a/b/c/"), - WithQueryParameters(params)) - if err != nil { - fmt.Printf("ERROR: %v\n", err) - } else { - fmt.Println(r.URL) - } - // Output: https://microsoft.com/a/b/c/?q1=value1&q2=value2 -} - -func TestCreatePreparerDoesNotModify(t *testing.T) { - r1 := &http.Request{} - p := CreatePreparer() - r2, err := p.Prepare(r1) - if err != nil { - t.Errorf("autorest: CreatePreparer failed (%v)", err) - } - if !reflect.DeepEqual(r1, r2) { - t.Errorf("autorest: CreatePreparer without decorators modified the request") - } -} - -func TestCreatePreparerRunsDecoratorsInOrder(t *testing.T) { - p := CreatePreparer(WithBaseURL("https://microsoft.com/"), WithPath("1"), WithPath("2"), WithPath("3")) - r, err := p.Prepare(&http.Request{}) - if err != nil { - t.Errorf("autorest: CreatePreparer failed (%v)", err) - } - if r.URL.String() != "https://microsoft.com/1/2/3" { - t.Errorf("autorest: CreatePreparer failed to run decorators in order") - } -} - -func TestAsContentType(t *testing.T) { - r, err := Prepare(mocks.NewRequest(), AsContentType("application/text")) - if err != nil { - fmt.Printf("ERROR: %v", err) - } - if r.Header.Get(headerContentType) != "application/text" { - t.Errorf("autorest: AsContentType failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) - } -} - -func TestAsFormURLEncoded(t *testing.T) { - r, err := Prepare(mocks.NewRequest(), AsFormURLEncoded()) - if err != nil { - fmt.Printf("ERROR: %v", err) - } - if r.Header.Get(headerContentType) != mimeTypeFormPost { - t.Errorf("autorest: AsFormURLEncoded failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) - } -} - -func TestAsJSON(t *testing.T) { - r, err := Prepare(mocks.NewRequest(), AsJSON()) - if err != nil { - fmt.Printf("ERROR: %v", err) - } - if r.Header.Get(headerContentType) != mimeTypeJSON { - t.Errorf("autorest: AsJSON failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) - } -} - -func TestWithNothing(t *testing.T) { - r1 := mocks.NewRequest() - r2, err := Prepare(r1, WithNothing()) - if err != nil { - t.Errorf("autorest: WithNothing returned an unexpected error (%v)", err) - } - - if !reflect.DeepEqual(r1, r2) { - t.Error("azure: WithNothing modified the passed HTTP Request") - } -} - -func TestWithBearerAuthorization(t *testing.T) { - r, err := Prepare(mocks.NewRequest(), WithBearerAuthorization("SOME-TOKEN")) - if err != nil { - fmt.Printf("ERROR: %v", err) - } - if r.Header.Get(headerAuthorization) != "Bearer SOME-TOKEN" { - t.Errorf("autorest: WithBearerAuthorization failed to add header (%s=%s)", headerAuthorization, r.Header.Get(headerAuthorization)) - } -} - -func TestWithUserAgent(t *testing.T) { - r, err := Prepare(mocks.NewRequest(), WithUserAgent("User Agent Go")) - if err != nil { - fmt.Printf("ERROR: %v", err) - } - if r.Header.Get(headerUserAgent) != "User Agent Go" { - t.Errorf("autorest: WithUserAgent failed to add header (%s=%s)", headerUserAgent, r.Header.Get(headerUserAgent)) - } -} - -func TestWithMethod(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), WithMethod("HEAD")) - if r.Method != "HEAD" { - t.Error("autorest: WithMethod failed to set HTTP method header") - } -} - -func TestAsDelete(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsDelete()) - if r.Method != "DELETE" { - t.Error("autorest: AsDelete failed to set HTTP method header to DELETE") - } -} - -func TestAsGet(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsGet()) - if r.Method != "GET" { - t.Error("autorest: AsGet failed to set HTTP method header to GET") - } -} - -func TestAsHead(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsHead()) - if r.Method != "HEAD" { - t.Error("autorest: AsHead failed to set HTTP method header to HEAD") - } -} - -func TestAsOptions(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsOptions()) - if r.Method != "OPTIONS" { - t.Error("autorest: AsOptions failed to set HTTP method header to OPTIONS") - } -} - -func TestAsPatch(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsPatch()) - if r.Method != "PATCH" { - t.Error("autorest: AsPatch failed to set HTTP method header to PATCH") - } -} - -func TestAsPost(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsPost()) - if r.Method != "POST" { - t.Error("autorest: AsPost failed to set HTTP method header to POST") - } -} - -func TestAsPut(t *testing.T) { - r, _ := Prepare(mocks.NewRequest(), AsPut()) - if r.Method != "PUT" { - t.Error("autorest: AsPut failed to set HTTP method header to PUT") - } -} - -func TestPrepareWithNullRequest(t *testing.T) { - _, err := Prepare(nil) - if err == nil { - t.Error("autorest: Prepare failed to return an error when given a null http.Request") - } -} - -func TestWithFormDataSetsContentLength(t *testing.T) { - v := url.Values{} - v.Add("name", "Rob Pike") - v.Add("age", "42") - - r, err := Prepare(&http.Request{}, - WithFormData(v)) - if err != nil { - t.Errorf("autorest: WithFormData failed with error (%v)", err) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("autorest: WithFormData failed with error (%v)", err) - } - - if r.ContentLength != int64(len(b)) { - t.Errorf("autorest:WithFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) - } -} - -func TestWithJSONSetsContentLength(t *testing.T) { - r, err := Prepare(&http.Request{}, - WithJSON(&mocks.T{Name: "Rob Pike", Age: 42})) - if err != nil { - t.Errorf("autorest: WithJSON failed with error (%v)", err) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("autorest: WithJSON failed with error (%v)", err) - } - - if r.ContentLength != int64(len(b)) { - t.Errorf("autorest:WithJSON set Content-Length to %v, expected %v", r.ContentLength, len(b)) - } -} - -func TestWithHeaderAllocatesHeaders(t *testing.T) { - r, err := Prepare(mocks.NewRequest(), WithHeader("x-foo", "bar")) - if err != nil { - t.Errorf("autorest: WithHeader failed (%v)", err) - } - if r.Header.Get("x-foo") != "bar" { - t.Errorf("autorest: WithHeader failed to add header (%s=%s)", "x-foo", r.Header.Get("x-foo")) - } -} - -func TestWithPathCatchesNilURL(t *testing.T) { - _, err := Prepare(&http.Request{}, WithPath("a")) - if err == nil { - t.Errorf("autorest: WithPath failed to catch a nil URL") - } -} - -func TestWithEscapedPathParametersCatchesNilURL(t *testing.T) { - _, err := Prepare(&http.Request{}, WithEscapedPathParameters(map[string]interface{}{"foo": "bar"})) - if err == nil { - t.Errorf("autorest: WithEscapedPathParameters failed to catch a nil URL") - } -} - -func TestWithPathParametersCatchesNilURL(t *testing.T) { - _, err := Prepare(&http.Request{}, WithPathParameters(map[string]interface{}{"foo": "bar"})) - if err == nil { - t.Errorf("autorest: WithPathParameters failed to catch a nil URL") - } -} - -func TestWithQueryParametersCatchesNilURL(t *testing.T) { - _, err := Prepare(&http.Request{}, WithQueryParameters(map[string]interface{}{"foo": "bar"})) - if err == nil { - t.Errorf("autorest: WithQueryParameters failed to catch a nil URL") - } -} - -func TestModifyingExistingRequest(t *testing.T) { - r, err := Prepare(mocks.NewRequestForURL("https://bing.com"), WithPath("search"), WithQueryParameters(map[string]interface{}{"q": "golang"})) - if err != nil { - t.Errorf("autorest: Preparing an existing request returned an error (%v)", err) - } - if r.URL.String() != "https://bing.com/search?q=golang" { - t.Errorf("autorest: Preparing an existing request failed (%s)", r.URL) - } -} - -func TestWithAuthorizer(t *testing.T) { - r1 := mocks.NewRequest() - - na := &NullAuthorizer{} - r2, err := Prepare(r1, - na.WithAuthorization()) - if err != nil { - t.Errorf("autorest: NullAuthorizer#WithAuthorization returned an unexpected error (%v)", err) - } else if !reflect.DeepEqual(r1, r2) { - t.Errorf("autorest: NullAuthorizer#WithAuthorization modified the request -- received %v, expected %v", r2, r1) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,163 +0,0 @@ -package autorest - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" -) - -// Responder is the interface that wraps the Respond method. -// -// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold -// state since Responders may be shared and re-used. -type Responder interface { - Respond(*http.Response) error -} - -// ResponderFunc is a method that implements the Responder interface. -type ResponderFunc func(*http.Response) error - -// Respond implements the Responder interface on ResponderFunc. -func (rf ResponderFunc) Respond(r *http.Response) error { - return rf(r) -} - -// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to -// the http.Response and pass it along or, first, pass the http.Response along then react. -type RespondDecorator func(Responder) Responder - -// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned -// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share -// and re-used: It depends on the applied decorators. For example, a standard decorator that closes -// the response body is fine to share whereas a decorator that reads the body into a passed struct -// is not. -// -// To prevent memory leaks, ensure that at least one Responder closes the response body. -func CreateResponder(decorators ...RespondDecorator) Responder { - return DecorateResponder( - Responder(ResponderFunc(func(r *http.Response) error { return nil })), - decorators...) -} - -// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it -// applies to the Responder. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (react to the http.Response and then pass it -// along) or a post-decorator (pass the http.Response along and then react). -func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { - for _, decorate := range decorators { - r = decorate(r) - } - return r -} - -// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. -// It creates a Responder from the decorators it then applies to the passed http.Response. -func Respond(r *http.Response, decorators ...RespondDecorator) error { - if r == nil { - return nil - } - return CreateResponder(decorators...).Respond(r) -} - -// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined -// to the next RespondDecorator. -func ByIgnoring() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - return r.Respond(resp) - }) - } -} - -// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it -// closes the response body. Since the passed Responder is invoked prior to closing the response -// body, the decorator may occur anywhere within the set. -func ByClosing() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return err - }) - } -} - -// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which -// it closes the response if the passed Responder returns an error and the response body exists. -func ByClosingIfError() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil && resp != nil && resp.Body != nil { - resp.Body.Close() - } - return err - }) - } -} - -// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingJSON(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b := bytes.Buffer{} - d := json.NewDecoder(io.TeeReader(resp.Body, &b)) - err = d.Decode(v) - if err != nil { - err = fmt.Errorf("Error (%v) occurred decoding JSON (\"%s\")", err, b.String()) - } - } - return err - }) - } -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. -func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewError("autorest", "WithErrorUnlessStatusCode", "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return err - }) - } -} - -// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is -// anything other than HTTP 200. -func WithErrorUnlessOK() RespondDecorator { - return WithErrorUnlessStatusCode(http.StatusOK) -} - -// ExtractHeader extracts all values of the specified header from the http.Response. It returns an -// empty string slice if the passed http.Response is nil or the header does not exist. -func ExtractHeader(header string, resp *http.Response) []string { - if resp != nil && resp.Header != nil { - return resp.Header[http.CanonicalHeaderKey(header)] - } - return nil -} - -// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It -// returns an empty string if the passed http.Response is nil or the header does not exist. -func ExtractHeaderValue(header string, resp *http.Response) string { - h := ExtractHeader(header, resp) - if len(h) > 0 { - return h[0] - } - return "" -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,387 +0,0 @@ -package autorest - -import ( - "fmt" - "net/http" - "reflect" - "strings" - "testing" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" -) - -func ExampleWithErrorUnlessOK() { - r := mocks.NewResponse() - r.Request = mocks.NewRequest() - - // Respond and leave the response body open (for a subsequent responder to close) - err := Respond(r, - WithErrorUnlessOK(), - ByClosingIfError()) - - if err == nil { - fmt.Printf("%s of %s returned HTTP 200", r.Request.Method, r.Request.URL) - - // Complete handling the response and close the body - Respond(r, - ByClosing()) - } - // Output: GET of https://microsoft.com/a/b/c/ returned HTTP 200 -} - -func ExampleByUnmarshallingJSON() { - c := ` - { - "name" : "Rob Pike", - "age" : 42 - } - ` - - type V struct { - Name string `json:"name"` - Age int `json:"age"` - } - - v := &V{} - - Respond(mocks.NewResponseWithContent(c), - ByUnmarshallingJSON(v), - ByClosing()) - - fmt.Printf("%s is %d years old\n", v.Name, v.Age) - // Output: Rob Pike is 42 years old -} - -func TestCreateResponderDoesNotModify(t *testing.T) { - r1 := mocks.NewResponse() - r2 := mocks.NewResponse() - p := CreateResponder() - err := p.Respond(r1) - if err != nil { - t.Errorf("autorest: CreateResponder failed (%v)", err) - } - if !reflect.DeepEqual(r1, r2) { - t.Errorf("autorest: CreateResponder without decorators modified the response") - } -} - -func TestCreateResponderRunsDecoratorsInOrder(t *testing.T) { - s := "" - - d := func(n int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - s += fmt.Sprintf("%d", n) - } - return err - }) - } - } - - p := CreateResponder(d(1), d(2), d(3)) - err := p.Respond(&http.Response{}) - if err != nil { - t.Errorf("autorest: Respond failed (%v)", err) - } - - if s != "123" { - t.Errorf("autorest: CreateResponder invoked decorators in an incorrect order; expected '123', received '%s'", s) - } -} - -func TestByIgnoring(t *testing.T) { - r := mocks.NewResponse() - - Respond(r, - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(r2 *http.Response) error { - r1 := mocks.NewResponse() - if !reflect.DeepEqual(r1, r2) { - t.Errorf("autorest: ByIgnoring modified the HTTP Response -- received %v, expected %v", r2, r1) - } - return nil - }) - } - })(), - ByIgnoring(), - ByClosing()) -} - -func TestByClosing(t *testing.T) { - r := mocks.NewResponse() - err := Respond(r, ByClosing()) - if err != nil { - t.Errorf("autorest: ByClosing failed (%v)", err) - } - if r.Body.(*mocks.Body).IsOpen() { - t.Errorf("autorest: ByClosing did not close the response body") - } -} - -func TestByClosingAcceptsNilResponse(t *testing.T) { - r := mocks.NewResponse() - - Respond(r, - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - r.Respond(nil) - return nil - }) - } - })(), - ByClosing()) -} - -func TestByClosingAcceptsNilBody(t *testing.T) { - r := mocks.NewResponse() - - Respond(r, - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - resp.Body = nil - r.Respond(resp) - return nil - }) - } - })(), - ByClosing()) -} - -func TestByClosingClosesEvenAfterErrors(t *testing.T) { - var e error - - r := mocks.NewResponse() - Respond(r, - withErrorRespondDecorator(&e), - ByClosing()) - - if r.Body.(*mocks.Body).IsOpen() { - t.Errorf("autorest: ByClosing did not close the response body after an error occurred") - } -} - -func TestByClosingClosesReturnsNestedErrors(t *testing.T) { - var e error - - r := mocks.NewResponse() - err := Respond(r, - withErrorRespondDecorator(&e), - ByClosing()) - - if err == nil || !reflect.DeepEqual(e, err) { - t.Errorf("autorest: ByClosing failed to return a nested error") - } -} - -func TestByClosingIfErrorAcceptsNilResponse(t *testing.T) { - var e error - - r := mocks.NewResponse() - - Respond(r, - withErrorRespondDecorator(&e), - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - r.Respond(nil) - return nil - }) - } - })(), - ByClosingIfError()) -} - -func TestByClosingIfErrorAcceptsNilBody(t *testing.T) { - var e error - - r := mocks.NewResponse() - - Respond(r, - withErrorRespondDecorator(&e), - (func() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - resp.Body.Close() - resp.Body = nil - r.Respond(resp) - return nil - }) - } - })(), - ByClosingIfError()) -} - -func TestByClosingIfErrorClosesIfAnErrorOccurs(t *testing.T) { - var e error - - r := mocks.NewResponse() - Respond(r, - withErrorRespondDecorator(&e), - ByClosingIfError()) - - if r.Body.(*mocks.Body).IsOpen() { - t.Errorf("autorest: ByClosingIfError did not close the response body after an error occurred") - } -} - -func TestByClosingIfErrorDoesNotClosesIfNoErrorOccurs(t *testing.T) { - r := mocks.NewResponse() - Respond(r, - ByClosingIfError()) - - if !r.Body.(*mocks.Body).IsOpen() { - t.Errorf("autorest: ByClosingIfError closed the response body even though no error occurred") - } -} - -func TestByUnmarhallingJSON(t *testing.T) { - v := &mocks.T{} - r := mocks.NewResponseWithContent(jsonT) - err := Respond(r, - ByUnmarshallingJSON(v), - ByClosing()) - if err != nil { - t.Errorf("autorest: ByUnmarshallingJSON failed (%v)", err) - } - if v.Name != "Rob Pike" || v.Age != 42 { - t.Errorf("autorest: ByUnmarshallingJSON failed to properly unmarshal") - } -} - -func TestByUnmarhallingJSONIncludesJSONInErrors(t *testing.T) { - v := &mocks.T{} - j := jsonT[0 : len(jsonT)-2] - r := mocks.NewResponseWithContent(j) - err := Respond(r, - ByUnmarshallingJSON(v), - ByClosing()) - if err == nil || !strings.Contains(err.Error(), j) { - t.Errorf("autorest: ByUnmarshallingJSON failed to return JSON in error (%v)", err) - } -} - -func TestRespondAcceptsNullResponse(t *testing.T) { - err := Respond(nil) - if err != nil { - t.Errorf("autorest: Respond returned an unexpected error when given a null Response (%v)", err) - } -} - -func TestWithErrorUnlessStatusCode(t *testing.T) { - r := mocks.NewResponse() - r.Request = mocks.NewRequest() - r.Status = "400 BadRequest" - r.StatusCode = http.StatusBadRequest - - err := Respond(r, - WithErrorUnlessStatusCode(http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError), - ByClosingIfError()) - - if err != nil { - t.Errorf("autorest: WithErrorUnlessStatusCode returned an error (%v) for an acceptable status code (%s)", err, r.Status) - } -} - -func TestWithErrorUnlessStatusCodeEmitsErrorForUnacceptableStatusCode(t *testing.T) { - r := mocks.NewResponse() - r.Request = mocks.NewRequest() - r.Status = "400 BadRequest" - r.StatusCode = http.StatusBadRequest - - err := Respond(r, - WithErrorUnlessStatusCode(http.StatusOK, http.StatusUnauthorized, http.StatusInternalServerError), - ByClosingIfError()) - - if err == nil { - t.Errorf("autorest: WithErrorUnlessStatusCode failed to return an error for an unacceptable status code (%s)", r.Status) - } -} - -func TestWithErrorUnlessOK(t *testing.T) { - r := mocks.NewResponse() - r.Request = mocks.NewRequest() - - err := Respond(r, - WithErrorUnlessOK(), - ByClosingIfError()) - - if err != nil { - t.Errorf("autorest: WithErrorUnlessOK returned an error for OK status code (%v)", err) - } -} - -func TestWithErrorUnlessOKEmitsErrorIfNotOK(t *testing.T) { - r := mocks.NewResponse() - r.Request = mocks.NewRequest() - r.Status = "400 BadRequest" - r.StatusCode = http.StatusBadRequest - - err := Respond(r, - WithErrorUnlessOK(), - ByClosingIfError()) - - if err == nil { - t.Errorf("autorest: WithErrorUnlessOK failed to return an error for a non-OK status code (%v)", err) - } -} - -func TestExtractHeader(t *testing.T) { - r := mocks.NewResponse() - v := []string{"v1", "v2", "v3"} - mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) - - if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { - t.Errorf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", - mocks.TestHeader, v, mocks.TestHeader, ExtractHeader(mocks.TestHeader, r)) - } -} - -func TestExtractHeaderHandlesMissingHeader(t *testing.T) { - var v []string - r := mocks.NewResponse() - - if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { - t.Errorf("autorest: ExtractHeader failed to handle a missing header -- expected %v, received %v", - v, ExtractHeader(mocks.TestHeader, r)) - } -} - -func TestExtractHeaderValue(t *testing.T) { - r := mocks.NewResponse() - v := "v1" - mocks.SetResponseHeader(r, mocks.TestHeader, v) - - if ExtractHeaderValue(mocks.TestHeader, r) != v { - t.Errorf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", - mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) - } -} - -func TestExtractHeaderValueHandlesMissingHeader(t *testing.T) { - r := mocks.NewResponse() - v := "" - - if ExtractHeaderValue(mocks.TestHeader, r) != v { - t.Errorf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", - mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) - } -} - -func TestExtractHeaderValueRetrievesFirstValue(t *testing.T) { - r := mocks.NewResponse() - v := []string{"v1", "v2", "v3"} - mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) - - if ExtractHeaderValue(mocks.TestHeader, r) != v[0] { - t.Errorf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", - mocks.TestHeader, v[0], mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,211 +0,0 @@ -package autorest - -import ( - "log" - "math" - "net/http" - "time" -) - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -// Send sends, by means of the default http.Client, the passed http.Request, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// Send is a convenience method and not recommended for production. Advanced users should use -// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). -// -// Send will not poll or retry requests. -func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(&http.Client{}, r, decorators...) -} - -// SendWithSender sends the passed http.Request, through the provided Sender, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// SendWithSender will not poll or retry requests. -func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return DecorateSender(s, decorators...).Do(r) -} - -// AfterDelay returns a SendDecorator that delays for the passed time.Duration before -// invoking the Sender. -func AfterDelay(d time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - time.Sleep(d) - return s.Do(r) - }) - } -} - -// AfterRetryDelay returns a SendDecorator that delays for the number of seconds specified in the -// Retry-After header of the prior response when polling is required. -func AfterRetryDelay(defaultDelay time.Duration, codes ...int) SendDecorator { - delay := time.Duration(0) - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if delay > time.Duration(0) { - time.Sleep(delay) - } - resp, err := s.Do(r) - if ResponseRequiresPolling(resp, codes...) { - delay = GetPollingDelay(resp, defaultDelay) - } else { - delay = time.Duration(0) - } - return resp, err - }) - } -} - -// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. -func AsIs() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return s.Do(r) - }) - } -} - -// WithLogging returns a SendDecorator that implements simple before and after logging of the -// request. -func WithLogging(logger *log.Logger) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - logger.Printf("Sending %s %s\n", r.Method, r.URL) - resp, err := s.Do(r) - logger.Printf("%s %s received %s\n", r.Method, r.URL, resp.Status) - return resp, err - }) - } -} - -// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which -// it closes the response if the passed Sender returns an error and the response body exists. -func DoCloseIfError() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - Respond(resp, ByClosing()) - } - return resp, err - }) - } -} - -// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is -// among the set passed. Since these are artificial errors, the response body may still require -// closing. -func DoErrorIfStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && ResponseHasStatusCode(resp, codes...) { - err = NewError("autorest", "DoErrorIfStatusCode", "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. -func DoErrorUnlessStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewError("autorest", "DoErrorUnlessStatusCode", "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoRetryForAttempts returns a SendDecorator that retries the request for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). -func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - for attempt := 0; attempt < attempts; attempt++ { - resp, err = s.Do(r) - if err == nil { - return resp, err - } - DelayForBackoff(backoff, attempt) - } - return resp, err - }) - } -} - -// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal -// to or greater than the specified duration, exponentially backing off between requests using the -// supplied backoff time.Duration (which may be zero). -func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - end := time.Now().Add(d) - for attempt := 0; time.Now().Before(end); attempt++ { - resp, err = s.Do(r) - if err == nil { - return resp, err - } - DelayForBackoff(backoff, attempt) - } - return resp, err - }) - } -} - -// DelayForBackoff invokes time.Sleep for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff may be zero. -func DelayForBackoff(backoff time.Duration, attempt int) { - time.Sleep(time.Duration(math.Pow(float64(backoff), float64(attempt)))) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,504 +0,0 @@ -package autorest - -import ( - "fmt" - "log" - "net/http" - "os" - "reflect" - "testing" - "time" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" -) - -func ExampleSendWithSender() { - client := mocks.NewSender() - client.EmitStatus("202 Accepted", http.StatusAccepted) - - logger := log.New(os.Stdout, "autorest: ", 0) - na := NullAuthorizer{} - - req, _ := Prepare(&http.Request{}, - AsGet(), - WithBaseURL("https://microsoft.com/a/b/c/"), - na.WithAuthorization()) - - r, _ := SendWithSender(client, req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusAccepted), - DoCloseIfError(), - DoRetryForAttempts(5, time.Duration(0))) - - Respond(r, - ByClosing()) - - // Output: - // autorest: Sending GET https://microsoft.com/a/b/c/ - // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted - // autorest: Sending GET https://microsoft.com/a/b/c/ - // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted - // autorest: Sending GET https://microsoft.com/a/b/c/ - // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted - // autorest: Sending GET https://microsoft.com/a/b/c/ - // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted - // autorest: Sending GET https://microsoft.com/a/b/c/ - // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted -} - -func ExampleDoRetryForAttempts() { - client := mocks.NewSender() - client.EmitErrors(10) - - // Retry with backoff -- ensure returned Bodies are closed - r, _ := SendWithSender(client, mocks.NewRequest(), - DoCloseIfError(), - DoRetryForAttempts(5, time.Duration(0))) - - Respond(r, - ByClosing()) - - fmt.Printf("Retry stopped after %d attempts", client.Attempts()) - // Output: Retry stopped after 5 attempts -} - -func ExampleDoErrorIfStatusCode() { - client := mocks.NewSender() - client.EmitStatus("204 NoContent", http.StatusNoContent) - - // Chain decorators to retry the request, up to five times, if the status code is 204 - r, _ := SendWithSender(client, mocks.NewRequest(), - DoErrorIfStatusCode(http.StatusNoContent), - DoCloseIfError(), - DoRetryForAttempts(5, time.Duration(0))) - - Respond(r, - ByClosing()) - - fmt.Printf("Retry stopped after %d attempts with code %s", client.Attempts(), r.Status) - // Output: Retry stopped after 5 attempts with code 204 NoContent -} - -func TestSendWithSenderRunsDecoratorsInOrder(t *testing.T) { - client := mocks.NewSender() - s := "" - - r, err := SendWithSender(client, mocks.NewRequest(), - withMessage(&s, "a"), - withMessage(&s, "b"), - withMessage(&s, "c")) - if err != nil { - t.Errorf("autorest: SendWithSender returned an error (%v)", err) - } - - Respond(r, - ByClosing()) - - if s != "abc" { - t.Errorf("autorest: SendWithSender invoke decorators out of order; expected 'abc', received '%s'", s) - } -} - -func TestCreateSender(t *testing.T) { - f := false - - s := CreateSender( - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - f = true - return nil, nil - }) - } - })()) - s.Do(&http.Request{}) - - if !f { - t.Error("autorest: CreateSender failed to apply supplied decorator") - } -} - -func TestSend(t *testing.T) { - f := false - - Send(&http.Request{}, - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - f = true - return nil, nil - }) - } - })()) - - if !f { - t.Error("autorest: Send failed to apply supplied decorator") - } -} - -func TestAfterDelayWaits(t *testing.T) { - client := mocks.NewSender() - - d := 10 * time.Millisecond - - tt := time.Now() - r, _ := SendWithSender(client, mocks.NewRequest(), - AfterDelay(d)) - s := time.Since(tt) - if s < d { - t.Error("autorest: AfterDelay failed to wait for at least the specified duration") - } - - Respond(r, - ByClosing()) -} - -func TestAfterRetryDelayWaits(t *testing.T) { - client := mocks.NewSender() - client.EmitErrors(-1) - - d := 10 * time.Millisecond - - resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) - mocks.SetAcceptedHeaders(resp) - mocks.SetRetryHeader(resp, d) - client.SetResponse(resp) - - tt := time.Now() - r, _ := SendWithSender(client, mocks.NewRequest(), - AfterRetryDelay(d), - DoRetryForAttempts(2, time.Duration(0))) - s := time.Since(tt) - if s < d { - t.Error("autorest: AfterRetryDelay failed to wait for at least the specified duration") - } - - Respond(r, - ByClosing()) -} - -// Disable test for TravisCI -// func TestAfterDelayDoesNotWaitTooLong(t *testing.T) { -// client := mocks.NewSender() - -// // Establish a baseline and then set the wait to 10x that amount -// // -- Waiting 10x the baseline should be long enough for a real test while not slowing the -// // tests down too much -// tt := time.Now() -// SendWithSender(client, mocks.NewRequest()) -// d := 10 * time.Since(tt) - -// tt = time.Now() -// r, _ := SendWithSender(client, mocks.NewRequest(), -// AfterDelay(d)) -// s := time.Since(tt) -// if s > 5*d { -// t.Error("autorest: AfterDelay waited too long (more than five times the specified duration") -// } - -// Respond(r, -// ByClosing()) -// } - -func TestAsIs(t *testing.T) { - client := mocks.NewSender() - - r1 := mocks.NewResponse() - r2, err := SendWithSender(client, mocks.NewRequest(), - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return r1, nil - }) - } - })(), - AsIs()) - if err != nil { - t.Errorf("autorest: AsIs returned an unexpected error (%v)", err) - } else if !reflect.DeepEqual(r1, r2) { - t.Errorf("autorest: AsIs modified the response -- received %v, expected %v", r2, r1) - } - - Respond(r1, - ByClosing()) - Respond(r2, - ByClosing()) -} - -func TestDoCloseIfError(t *testing.T) { - client := mocks.NewSender() - client.EmitStatus("400 BadRequest", http.StatusBadRequest) - - r, _ := SendWithSender(client, mocks.NewRequest(), - DoErrorIfStatusCode(http.StatusBadRequest), - DoCloseIfError()) - - if r.Body.(*mocks.Body).IsOpen() { - t.Error("autorest: Expected DoCloseIfError to close response body -- it was left open") - } - - Respond(r, - ByClosing()) -} - -func TestDoCloseIfErrorAcceptsNilResponse(t *testing.T) { - client := mocks.NewSender() - - SendWithSender(client, mocks.NewRequest(), - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - resp.Body.Close() - } - return nil, fmt.Errorf("Faux Error") - }) - } - })(), - DoCloseIfError()) -} - -func TestDoCloseIfErrorAcceptsNilBody(t *testing.T) { - client := mocks.NewSender() - - SendWithSender(client, mocks.NewRequest(), - (func() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - resp.Body.Close() - } - resp.Body = nil - return resp, fmt.Errorf("Faux Error") - }) - } - })(), - DoCloseIfError()) -} - -func TestDoErrorIfStatusCode(t *testing.T) { - client := mocks.NewSender() - client.EmitStatus("400 BadRequest", http.StatusBadRequest) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoErrorIfStatusCode(http.StatusBadRequest), - DoCloseIfError()) - if err == nil { - t.Error("autorest: DoErrorIfStatusCode failed to emit an error for passed code") - } - - Respond(r, - ByClosing()) -} - -func TestDoErrorIfStatusCodeIgnoresStatusCodes(t *testing.T) { - client := mocks.NewSender() - client.EmitStatus("202 Accepted", http.StatusAccepted) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoErrorIfStatusCode(http.StatusBadRequest), - DoCloseIfError()) - if err != nil { - t.Error("autorest: DoErrorIfStatusCode failed to ignore a status code") - } - - Respond(r, - ByClosing()) -} - -func TestDoErrorUnlessStatusCode(t *testing.T) { - client := mocks.NewSender() - client.EmitStatus("400 BadRequest", http.StatusBadRequest) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoErrorUnlessStatusCode(http.StatusAccepted), - DoCloseIfError()) - if err == nil { - t.Error("autorest: DoErrorUnlessStatusCode failed to emit an error for an unknown status code") - } - - Respond(r, - ByClosing()) -} - -func TestDoErrorUnlessStatusCodeIgnoresStatusCodes(t *testing.T) { - client := mocks.NewSender() - client.EmitStatus("202 Accepted", http.StatusAccepted) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoErrorUnlessStatusCode(http.StatusAccepted), - DoCloseIfError()) - if err != nil { - t.Error("autorest: DoErrorUnlessStatusCode emitted an error for a knonwn status code") - } - - Respond(r, - ByClosing()) -} - -func TestDoRetryForAttemptsStopsAfterSuccess(t *testing.T) { - client := mocks.NewSender() - - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForAttempts(5, time.Duration(0))) - if client.Attempts() != 1 { - t.Errorf("autorest: DoRetryForAttempts failed to stop after success -- expected attempts %v, actual %v", - 1, client.Attempts()) - } - if err != nil { - t.Errorf("autorest: DoRetryForAttempts returned an unexpected error (%v)", err) - } - - Respond(r, - ByClosing()) -} - -func TestDoRetryForAttemptsStopsAfterAttempts(t *testing.T) { - client := mocks.NewSender() - client.EmitErrors(10) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForAttempts(5, time.Duration(0)), - DoCloseIfError()) - if err == nil { - t.Error("autorest: Mock client failed to emit errors") - } - - Respond(r, - ByClosing()) - - if client.Attempts() != 5 { - t.Error("autorest: DoRetryForAttempts failed to stop after specified number of attempts") - } -} - -func TestDoRetryForAttemptsReturnsResponse(t *testing.T) { - client := mocks.NewSender() - client.EmitErrors(1) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForAttempts(1, time.Duration(0))) - if err == nil { - t.Error("autorest: Mock client failed to emit errors") - } - - if r == nil { - t.Error("autorest: DoRetryForAttempts failed to return the underlying response") - } - - Respond(r, - ByClosing()) -} - -func TestDoRetryForDurationStopsAfterSuccess(t *testing.T) { - client := mocks.NewSender() - - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForDuration(10*time.Millisecond, time.Duration(0))) - if client.Attempts() != 1 { - t.Errorf("autorest: DoRetryForDuration failed to stop after success -- expected attempts %v, actual %v", - 1, client.Attempts()) - } - if err != nil { - t.Errorf("autorest: DoRetryForDuration returned an unexpected error (%v)", err) - } - - Respond(r, - ByClosing()) -} - -func TestDoRetryForDurationStopsAfterDuration(t *testing.T) { - client := mocks.NewSender() - client.EmitErrors(-1) - - d := 10 * time.Millisecond - start := time.Now() - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForDuration(d, time.Duration(0)), - DoCloseIfError()) - if err == nil { - t.Error("autorest: Mock client failed to emit errors") - } - - Respond(r, - ByClosing()) - - if time.Now().Sub(start) < d { - t.Error("autorest: DoRetryForDuration failed stopped too soon") - } -} - -func TestDoRetryForDurationStopsWithinReason(t *testing.T) { - client := mocks.NewSender() - client.EmitErrors(-1) - - d := 10 * time.Millisecond - start := time.Now() - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForDuration(d, time.Duration(0)), - DoCloseIfError()) - if err == nil { - t.Error("autorest: Mock client failed to emit errors") - } - - Respond(r, - ByClosing()) - - if time.Now().Sub(start) > (5 * d) { - t.Error("autorest: DoRetryForDuration failed stopped soon enough (exceeded 5 times specified duration)") - } -} - -func TestDoRetryForDurationReturnsResponse(t *testing.T) { - client := mocks.NewSender() - client.EmitErrors(-1) - - r, err := SendWithSender(client, mocks.NewRequest(), - DoRetryForDuration(10*time.Millisecond, time.Duration(0)), - DoCloseIfError()) - if err == nil { - t.Error("autorest: Mock client failed to emit errors") - } - - if r == nil { - t.Error("autorest: DoRetryForDuration failed to return the underlying response") - } - - Respond(r, - ByClosing()) -} - -func TestDelayForBackoff(t *testing.T) { - - // Establish a baseline and then set the wait to 10x that amount - // -- Waiting 10x the baseline should be long enough for a real test while not slowing the - // tests down too much - tt := time.Now() - DelayForBackoff(time.Millisecond, 0) - d := 10 * time.Since(tt) - - start := time.Now() - DelayForBackoff(d, 1) - if time.Now().Sub(start) < d { - t.Error("autorest: DelayForBackoff did not delay as long as expected") - } -} - -// Disable test for TravisCI -// func TestDelayForBackoffWithinReason(t *testing.T) { - -// // Establish a baseline and then set the wait to 10x that amount -// // -- Waiting 10x the baseline should be long enough for a real test while not slowing the -// // tests down too much -// tt := time.Now() -// DelayForBackoff(time.Millisecond, 0) -// d := 10 * time.Since(tt) - -// start := time.Now() -// DelayForBackoff(d, 1) -// if time.Now().Sub(start) > (time.Duration(5.0) * d) { -// t.Error("autorest: DelayForBackoff delayed too long (exceeded 5 times the specified duration)") -// } -// } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -/* -Package to provides helpers to ease working with pointer values of marshalled structures. -*/ -package to - -// String returns a string value for the passed string pointer. It returns the empty string if the -// pointer is nil. -func String(s *string) string { - if s != nil { - return *s - } - return "" -} - -// StringPtr returns a pointer to the passed string. -func StringPtr(s string) *string { - return &s -} - -// StringMap returns a map of strings built from the map of string pointers. The empty string is -// used for nil pointers. -func StringMap(msp map[string]*string) map[string]string { - ms := make(map[string]string, len(msp)) - for k, sp := range msp { - if sp != nil { - ms[k] = *sp - } else { - ms[k] = "" - } - } - return ms -} - -// StringMapPtr returns a map of string pointers built from the passed map of strings. -func StringMapPtr(ms map[string]string) map[string]*string { - msp := make(map[string]*string, len(ms)) - for k, s := range ms { - msp[k] = StringPtr(s) - } - return msp -} - -// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. -func Bool(b *bool) bool { - if b != nil { - return *b - } - return false -} - -// BoolPtr returns a pointer to the passed bool. -func BoolPtr(b bool) *bool { - return &b -} - -// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int(i *int) int { - if i != nil { - return *i - } - return 0 -} - -// IntPtr returns a pointer to the passed int. -func IntPtr(i int) *int { - return &i -} - -// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int32(i *int32) int32 { - if i != nil { - return *i - } - return 0 -} - -// Int32Ptr returns a pointer to the passed int32. -func Int32Ptr(i int32) *int32 { - return &i -} - -// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int64(i *int64) int64 { - if i != nil { - return *i - } - return 0 -} - -// Int64Ptr returns a pointer to the passed int64. -func Int64Ptr(i int64) *int64 { - return &i -} - -// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. -func Float32(i *float32) float32 { - if i != nil { - return *i - } - return 0.0 -} - -// Float32Ptr returns a pointer to the passed float32. -func Float32Ptr(i float32) *float32 { - return &i -} - -// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. -func Float64(i *float64) float64 { - if i != nil { - return *i - } - return 0.0 -} - -// Float64Ptr returns a pointer to the passed float64. -func Float64Ptr(i float64) *float64 { - return &i -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,196 +0,0 @@ -package to - -import ( - "testing" -) - -func TestString(t *testing.T) { - v := "" - if String(&v) != v { - t.Errorf("to: String failed to return the correct string -- expected %v, received %v", - v, String(&v)) - } -} - -func TestStringHandlesNil(t *testing.T) { - if String(nil) != "" { - t.Errorf("to: String failed to correctly convert nil -- expected %v, received %v", - "", String(nil)) - } -} - -func TestStringPtr(t *testing.T) { - v := "" - if *StringPtr(v) != v { - t.Errorf("to: StringPtr failed to return the correct string -- expected %v, received %v", - v, *StringPtr(v)) - } -} - -func TestStringMap(t *testing.T) { - msp := map[string]*string{"foo": StringPtr("foo"), "bar": StringPtr("bar"), "baz": StringPtr("baz")} - for k, v := range StringMap(msp) { - if *msp[k] != v { - t.Errorf("to: StringMap incorrectly converted an entry -- expected [%s]%v, received[%s]%v", - k, v, k, *msp[k]) - } - } -} - -func TestStringMapHandlesNil(t *testing.T) { - msp := map[string]*string{"foo": StringPtr("foo"), "bar": nil, "baz": StringPtr("baz")} - for k, v := range StringMap(msp) { - if msp[k] == nil && v != "" { - t.Errorf("to: StringMap incorrectly converted a nil entry -- expected [%s]%v, received[%s]%v", - k, v, k, *msp[k]) - } - } -} - -func TestStringMapPtr(t *testing.T) { - ms := map[string]string{"foo": "foo", "bar": "bar", "baz": "baz"} - for k, msp := range StringMapPtr(ms) { - if ms[k] != *msp { - t.Errorf("to: StringMapPtr incorrectly converted an entry -- expected [%s]%v, received[%s]%v", - k, ms[k], k, *msp) - } - } -} - -func TestBool(t *testing.T) { - v := false - if Bool(&v) != v { - t.Errorf("to: Bool failed to return the correct string -- expected %v, received %v", - v, Bool(&v)) - } -} - -func TestBoolHandlesNil(t *testing.T) { - if Bool(nil) != false { - t.Errorf("to: Bool failed to correctly convert nil -- expected %v, received %v", - false, Bool(nil)) - } -} - -func TestBoolPtr(t *testing.T) { - v := false - if *BoolPtr(v) != v { - t.Errorf("to: BoolPtr failed to return the correct string -- expected %v, received %v", - v, *BoolPtr(v)) - } -} - -func TestInt(t *testing.T) { - v := 0 - if Int(&v) != v { - t.Errorf("to: Int failed to return the correct string -- expected %v, received %v", - v, Int(&v)) - } -} - -func TestIntHandlesNil(t *testing.T) { - if Int(nil) != 0 { - t.Errorf("to: Int failed to correctly convert nil -- expected %v, received %v", - 0, Int(nil)) - } -} - -func TestIntPtr(t *testing.T) { - v := 0 - if *IntPtr(v) != v { - t.Errorf("to: IntPtr failed to return the correct string -- expected %v, received %v", - v, *IntPtr(v)) - } -} - -func TestInt32(t *testing.T) { - v := int32(0) - if Int32(&v) != v { - t.Errorf("to: Int32 failed to return the correct string -- expected %v, received %v", - v, Int32(&v)) - } -} - -func TestInt32HandlesNil(t *testing.T) { - if Int32(nil) != int32(0) { - t.Errorf("to: Int32 failed to correctly convert nil -- expected %v, received %v", - 0, Int32(nil)) - } -} - -func TestInt32Ptr(t *testing.T) { - v := int32(0) - if *Int32Ptr(v) != v { - t.Errorf("to: Int32Ptr failed to return the correct string -- expected %v, received %v", - v, *Int32Ptr(v)) - } -} - -func TestInt64(t *testing.T) { - v := int64(0) - if Int64(&v) != v { - t.Errorf("to: Int64 failed to return the correct string -- expected %v, received %v", - v, Int64(&v)) - } -} - -func TestInt64HandlesNil(t *testing.T) { - if Int64(nil) != int64(0) { - t.Errorf("to: Int64 failed to correctly convert nil -- expected %v, received %v", - 0, Int64(nil)) - } -} - -func TestInt64Ptr(t *testing.T) { - v := int64(0) - if *Int64Ptr(v) != v { - t.Errorf("to: Int64Ptr failed to return the correct string -- expected %v, received %v", - v, *Int64Ptr(v)) - } -} - -func TestFloat32(t *testing.T) { - v := float32(0) - if Float32(&v) != v { - t.Errorf("to: Float32 failed to return the correct string -- expected %v, received %v", - v, Float32(&v)) - } -} - -func TestFloat32HandlesNil(t *testing.T) { - if Float32(nil) != float32(0) { - t.Errorf("to: Float32 failed to correctly convert nil -- expected %v, received %v", - 0, Float32(nil)) - } -} - -func TestFloat32Ptr(t *testing.T) { - v := float32(0) - if *Float32Ptr(v) != v { - t.Errorf("to: Float32Ptr failed to return the correct string -- expected %v, received %v", - v, *Float32Ptr(v)) - } -} - -func TestFloat64(t *testing.T) { - v := float64(0) - if Float64(&v) != v { - t.Errorf("to: Float64 failed to return the correct string -- expected %v, received %v", - v, Float64(&v)) - } -} - -func TestFloat64HandlesNil(t *testing.T) { - if Float64(nil) != float64(0) { - t.Errorf("to: Float64 failed to correctly convert nil -- expected %v, received %v", - 0, Float64(nil)) - } -} - -func TestFloat64Ptr(t *testing.T) { - v := float64(0) - if *Float64Ptr(v) != v { - t.Errorf("to: Float64Ptr failed to return the correct string -- expected %v, received %v", - v, *Float64Ptr(v)) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -package autorest - -import ( - "fmt" - "net/url" -) - -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -func escapeValueStrings(m map[string]string) map[string]string { - for key, value := range m { - m[key] = url.QueryEscape(value) - } - return m -} - -func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { - mapOfStrings := make(map[string]string) - for key, value := range mapOfInterface { - mapOfStrings[key] = ensureValueString(value) - } - return mapOfStrings -} - -func ensureValueString(value interface{}) string { - if value == nil { - return "" - } - switch v := value.(type) { - case string: - return v - default: - return fmt.Sprintf("%v", v) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,145 +0,0 @@ -package autorest - -import ( - "fmt" - "net/http" - "reflect" - "testing" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" -) - -const ( - testAuthorizationHeader = "BEARER SECRETTOKEN" - testBadURL = "" - jsonT = ` - { - "name":"Rob Pike", - "age":42 - }` -) - -func TestContainsIntFindsValue(t *testing.T) { - ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - v := 5 - if !containsInt(ints, v) { - t.Errorf("autorest: containsInt failed to find %v in %v", v, ints) - } -} - -func TestContainsIntDoesNotFindValue(t *testing.T) { - ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} - v := 42 - if containsInt(ints, v) { - t.Errorf("autorest: containsInt unexpectedly found %v in %v", v, ints) - } -} - -func TestEscapeStrings(t *testing.T) { - m := map[string]string{ - "string": "a long string with = odd characters", - "int": "42", - "nil": "", - } - r := map[string]string{ - "string": "a+long+string+with+%3D+odd+characters", - "int": "42", - "nil": "", - } - v := escapeValueStrings(m) - if !reflect.DeepEqual(v, r) { - t.Errorf("autorest: ensureValueStrings returned %v\n", v) - } -} - -func TestEnsureStrings(t *testing.T) { - m := map[string]interface{}{ - "string": "string", - "int": 42, - "nil": nil, - } - r := map[string]string{ - "string": "string", - "int": "42", - "nil": "", - } - v := ensureValueStrings(m) - if !reflect.DeepEqual(v, r) { - t.Errorf("autorest: ensureValueStrings returned %v\n", v) - } -} - -func doEnsureBodyClosed(t *testing.T) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if resp != nil && resp.Body != nil && resp.Body.(*mocks.Body).IsOpen() { - t.Error("autorest: Expected Body to be closed -- it was left open") - } - return resp, err - }) - } -} - -type mockAuthorizer struct{} - -func (ma mockAuthorizer) WithAuthorization() PrepareDecorator { - return WithHeader(headerAuthorization, testAuthorizationHeader) -} - -type mockFailingAuthorizer struct{} - -func (mfa mockFailingAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") - }) - } -} - -func withMessage(output *string, msg string) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil { - *output += msg - } - return resp, err - }) - } -} - -type mockInspector struct { - wasInvoked bool -} - -func (mi *mockInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - mi.wasInvoked = true - return p.Prepare(r) - }) - } -} - -func (mi *mockInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - mi.wasInvoked = true - return r.Respond(resp) - }) - } -} - -func withErrorRespondDecorator(e *error) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil { - return err - } - *e = fmt.Errorf("autorest: Faux Respond Error") - return *e - }) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -package autorest - -import ( - "fmt" -) - -const ( - major = "1" - minor = "1" - patch = "1" - tag = "" - semVerFormat = "%s.%s.%s%s" -) - -// Version returns the semantic version (see http://semver.org). -func Version() string { - return fmt.Sprintf(semVerFormat, major, minor, patch, tag) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,13 +0,0 @@ -package autorest - -import ( - "testing" -) - -func TestVersion(t *testing.T) { - v := "1.1.1" - if Version() != v { - t.Errorf("autorest: Version failed to return the expected version -- expected %s, received %s", - v, Version()) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/LICENSE juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/LICENSE --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/LICENSE 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,210 +0,0 @@ -// A useful example app. You can use this to debug your tokens on the command line. -// This is also a great place to look at how you might use this library. -// -// Example usage: -// The following will create and sign a token, then verify it and output the original claims. -// echo {\"foo\":\"bar\"} | bin/jwt -key test/sample_key -alg RS256 -sign - | bin/jwt -key test/sample_key.pub -verify - -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "regexp" - "strings" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" -) - -var ( - // Options - flagAlg = flag.String("alg", "", "signing algorithm identifier") - flagKey = flag.String("key", "", "path to key file or '-' to read from stdin") - flagCompact = flag.Bool("compact", false, "output compact JSON") - flagDebug = flag.Bool("debug", false, "print out all kinds of debug data") - - // Modes - exactly one of these is required - flagSign = flag.String("sign", "", "path to claims object to sign or '-' to read from stdin") - flagVerify = flag.String("verify", "", "path to JWT token to verify or '-' to read from stdin") -) - -func main() { - // Usage message if you ask for -help or if you mess up inputs. - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - fmt.Fprintf(os.Stderr, " One of the following flags is required: sign, verify\n") - flag.PrintDefaults() - } - - // Parse command line options - flag.Parse() - - // Do the thing. If something goes wrong, print error to stderr - // and exit with a non-zero status code - if err := start(); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -// Figure out which thing to do and then do that -func start() error { - if *flagSign != "" { - return signToken() - } else if *flagVerify != "" { - return verifyToken() - } else { - flag.Usage() - return fmt.Errorf("None of the required flags are present. What do you want me to do?") - } -} - -// Helper func: Read input from specified file or stdin -func loadData(p string) ([]byte, error) { - if p == "" { - return nil, fmt.Errorf("No path specified") - } - - var rdr io.Reader - if p == "-" { - rdr = os.Stdin - } else { - if f, err := os.Open(p); err == nil { - rdr = f - defer f.Close() - } else { - return nil, err - } - } - return ioutil.ReadAll(rdr) -} - -// Print a json object in accordance with the prophecy (or the command line options) -func printJSON(j interface{}) error { - var out []byte - var err error - - if *flagCompact == false { - out, err = json.MarshalIndent(j, "", " ") - } else { - out, err = json.Marshal(j) - } - - if err == nil { - fmt.Println(string(out)) - } - - return err -} - -// Verify a token and output the claims. This is a great example -// of how to verify and view a token. -func verifyToken() error { - // get the token - tokData, err := loadData(*flagVerify) - if err != nil { - return fmt.Errorf("Couldn't read token: %v", err) - } - - // trim possible whitespace from token - tokData = regexp.MustCompile(`\s*$`).ReplaceAll(tokData, []byte{}) - if *flagDebug { - fmt.Fprintf(os.Stderr, "Token len: %v bytes\n", len(tokData)) - } - - // Parse the token. Load the key from command line option - token, err := jwt.Parse(string(tokData), func(t *jwt.Token) (interface{}, error) { - data, err := loadData(*flagKey) - if err != nil { - return nil, err - } - if isEs() { - return jwt.ParseECPublicKeyFromPEM(data) - } - return data, nil - }) - - // Print some debug data - if *flagDebug && token != nil { - fmt.Fprintf(os.Stderr, "Header:\n%v\n", token.Header) - fmt.Fprintf(os.Stderr, "Claims:\n%v\n", token.Claims) - } - - // Print an error if we can't parse for some reason - if err != nil { - return fmt.Errorf("Couldn't parse token: %v", err) - } - - // Is token invalid? - if !token.Valid { - return fmt.Errorf("Token is invalid") - } - - // Print the token details - if err := printJSON(token.Claims); err != nil { - return fmt.Errorf("Failed to output claims: %v", err) - } - - return nil -} - -// Create, sign, and output a token. This is a great, simple example of -// how to use this library to create and sign a token. -func signToken() error { - // get the token data from command line arguments - tokData, err := loadData(*flagSign) - if err != nil { - return fmt.Errorf("Couldn't read token: %v", err) - } else if *flagDebug { - fmt.Fprintf(os.Stderr, "Token: %v bytes", len(tokData)) - } - - // parse the JSON of the claims - var claims map[string]interface{} - if err := json.Unmarshal(tokData, &claims); err != nil { - return fmt.Errorf("Couldn't parse claims JSON: %v", err) - } - - // get the key - var key interface{} - key, err = loadData(*flagKey) - if err != nil { - return fmt.Errorf("Couldn't read key: %v", err) - } - - // get the signing alg - alg := jwt.GetSigningMethod(*flagAlg) - if alg == nil { - return fmt.Errorf("Couldn't find signing method: %v", *flagAlg) - } - - // create a new token - token := jwt.New(alg) - token.Claims = claims - - if isEs() { - if k, ok := key.([]byte); !ok { - return fmt.Errorf("Couldn't convert key data to key") - } else { - key, err = jwt.ParseECPrivateKeyFromPEM(k) - if err != nil { - return err - } - } - } - - if out, err := token.SignedString(key); err == nil { - fmt.Println(out) - } else { - return fmt.Errorf("Error signing token: %v", err) - } - - return nil -} - -func isEs() bool { - return strings.HasPrefix(*flagAlg, "ES") -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/doc.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/doc.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/doc.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/doc.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html -// -// See README.md for more info. -package jwt diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,147 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "errors" - "math/big" -) - -var ( - // Sadly this is missing from crypto/ecdsa compared to crypto/rsa - ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") -) - -// Implements the ECDSA family of signing methods signing methods -type SigningMethodECDSA struct { - Name string - Hash crypto.Hash - KeySize int - CurveBits int -} - -// Specific instances for EC256 and company -var ( - SigningMethodES256 *SigningMethodECDSA - SigningMethodES384 *SigningMethodECDSA - SigningMethodES512 *SigningMethodECDSA -) - -func init() { - // ES256 - SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} - RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { - return SigningMethodES256 - }) - - // ES384 - SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} - RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { - return SigningMethodES384 - }) - - // ES512 - SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} - RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { - return SigningMethodES512 - }) -} - -func (m *SigningMethodECDSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an ecdsa.PublicKey struct -func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - // Get the key - var ecdsaKey *ecdsa.PublicKey - switch k := key.(type) { - case *ecdsa.PublicKey: - ecdsaKey = k - default: - return ErrInvalidKey - } - - if len(sig) != 2*m.KeySize { - return ErrECDSAVerification - } - - r := big.NewInt(0).SetBytes(sig[:m.KeySize]) - s := big.NewInt(0).SetBytes(sig[m.KeySize:]) - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { - return nil - } else { - return ErrECDSAVerification - } -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an ecdsa.PrivateKey struct -func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { - // Get the key - var ecdsaKey *ecdsa.PrivateKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - ecdsaKey = k - default: - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return r, s - if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { - curveBits := ecdsaKey.Curve.Params().BitSize - - if m.CurveBits != curveBits { - return "", ErrInvalidKey - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return EncodeSegment(out), nil - } else { - return "", err - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -package jwt_test - -import ( - "crypto/ecdsa" - "io/ioutil" - "strings" - "testing" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" -) - -var ecdsaTestData = []struct { - name string - keys map[string]string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic ES256", - map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiJ9.eyJmb28iOiJiYXIifQ.feG39E-bn8HXAKhzDZq7yEAPWYDhZlwTn3sePJnU9VrGMmwdXAIEyoOnrjreYlVM_Z4N13eK9-TmMTWyfKJtHQ", - "ES256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic ES384", - map[string]string{"private": "test/ec384-private.pem", "public": "test/ec384-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzM4NCJ9.eyJmb28iOiJiYXIifQ.ngAfKMbJUh0WWubSIYe5GMsA-aHNKwFbJk_wq3lq23aPp8H2anb1rRILIzVR0gUf4a8WzDtrzmiikuPWyCS6CN4-PwdgTk-5nehC7JXqlaBZU05p3toM3nWCwm_LXcld", - "ES384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic ES512", - map[string]string{"private": "test/ec512-private.pem", "public": "test/ec512-public.pem"}, - "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzUxMiJ9.eyJmb28iOiJiYXIifQ.AAU0TvGQOcdg2OvrwY73NHKgfk26UDekh9Prz-L_iWuTBIBqOFCWwwLsRiHB1JOddfKAls5do1W0jR_F30JpVd-6AJeTjGKA4C1A1H6gIKwRY0o_tFDIydZCl_lMBMeG5VNFAjO86-WCSKwc3hqaGkq1MugPRq_qrF9AVbuEB4JPLyL5", - "ES512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic ES256 invalid: foo => bar", - map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, - "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.MEQCIHoSJnmGlPaVQDqacx_2XlXEhhqtWceVopjomc2PJLtdAiAUTeGPoNYxZw0z8mgOnnIcjoxRuNDVZvybRZF3wR1l8W", - "ES256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestECDSAVerify(t *testing.T) { - for _, data := range ecdsaTestData { - var err error - - key, _ := ioutil.ReadFile(data.keys["public"]) - - var ecdsaKey *ecdsa.PublicKey - if ecdsaKey, err = jwt.ParseECPublicKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse ECDSA public key: %v", err) - } - - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err = method.Verify(strings.Join(parts[0:2], "."), parts[2], ecdsaKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestECDSASign(t *testing.T) { - for _, data := range ecdsaTestData { - var err error - key, _ := ioutil.ReadFile(data.keys["private"]) - - var ecdsaKey *ecdsa.PrivateKey - if ecdsaKey, err = jwt.ParseECPrivateKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse ECDSA private key: %v", err) - } - - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), ecdsaKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig == parts[2] { - t.Errorf("[%v] Identical signatures\nbefore:\n%v\nafter:\n%v", data.name, parts[2], sig) - } - } - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -package jwt - -import ( - "crypto/ecdsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") -) - -// Parse PEM encoded Elliptic Curve Private Key Structure -func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { - return nil, err - } - - var pkey *ecdsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, ErrNotECPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *ecdsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, ErrNotECPublicKey - } - - return pkey, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/errors.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/errors.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/errors.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/errors.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -package jwt - -import ( - "errors" -) - -// Error constants -var ( - ErrInvalidKey = errors.New("key is invalid or of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") - ErrNoTokenInRequest = errors.New("no token present in request") -) - -// The errors that might occur when parsing and validating a token -const ( - ValidationErrorMalformed uint32 = 1 << iota // Token is malformed - ValidationErrorUnverifiable // Token could not be verified because of signing problems - ValidationErrorSignatureInvalid // Signature validation failed - ValidationErrorExpired // Exp validation failed - ValidationErrorNotValidYet // NBF validation failed -) - -// The error from Parse if token is not valid -type ValidationError struct { - err string - Errors uint32 // bitfield. see ValidationError... constants -} - -// Validation error is an error type -func (e ValidationError) Error() string { - if e.err == "" { - return "token is invalid" - } - return e.err -} - -// No errors -func (e *ValidationError) valid() bool { - if e.Errors > 0 { - return false - } - return true -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/example_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/example_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/example_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/example_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,52 +0,0 @@ -package jwt_test - -import ( - "fmt" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" - "time" -) - -func ExampleParse(myToken string, myLookupKey func(interface{}) (interface{}, error)) { - token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) { - return myLookupKey(token.Header["kid"]) - }) - - if err == nil && token.Valid { - fmt.Println("Your token is valid. I like your style.") - } else { - fmt.Println("This token is terrible! I cannot accept this.") - } -} - -func ExampleNew(mySigningKey []byte) (string, error) { - // Create the token - token := jwt.New(jwt.SigningMethodHS256) - // Set some claims - token.Claims["foo"] = "bar" - token.Claims["exp"] = time.Now().Add(time.Hour * 72).Unix() - // Sign and get the complete encoded token as a string - tokenString, err := token.SignedString(mySigningKey) - return tokenString, err -} - -func ExampleParse_errorChecking(myToken string, myLookupKey func(interface{}) (interface{}, error)) { - token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) { - return myLookupKey(token.Header["kid"]) - }) - - if token.Valid { - fmt.Println("You look nice today") - } else if ve, ok := err.(*jwt.ValidationError); ok { - if ve.Errors&jwt.ValidationErrorMalformed != 0 { - fmt.Println("That's not even a token") - } else if ve.Errors&(jwt.ValidationErrorExpired|jwt.ValidationErrorNotValidYet) != 0 { - // Token is either expired or not active yet - fmt.Println("Timing is everything") - } else { - fmt.Println("Couldn't handle this token:", err) - } - } else { - fmt.Println("Couldn't handle this token:", err) - } - -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,94 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/hmac" - "errors" -) - -// Implements the HMAC-SHA family of signing methods signing methods -type SigningMethodHMAC struct { - Name string - Hash crypto.Hash -} - -// Specific instances for HS256 and company -var ( - SigningMethodHS256 *SigningMethodHMAC - SigningMethodHS384 *SigningMethodHMAC - SigningMethodHS512 *SigningMethodHMAC - ErrSignatureInvalid = errors.New("signature is invalid") -) - -func init() { - // HS256 - SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { - return SigningMethodHS256 - }) - - // HS384 - SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { - return SigningMethodHS384 - }) - - // HS512 - SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { - return SigningMethodHS512 - }) -} - -func (m *SigningMethodHMAC) Alg() string { - return m.Name -} - -// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. -func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { - // Verify the key is the right type - keyBytes, ok := key.([]byte) - if !ok { - return ErrInvalidKey - } - - // Decode signature, for comparison - sig, err := DecodeSegment(signature) - if err != nil { - return err - } - - // Can we use the specified hashing method? - if !m.Hash.Available() { - return ErrHashUnavailable - } - - // This signing method is symmetric, so we validate the signature - // by reproducing the signature from the signing string and key, then - // comparing that against the provided signature. - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - if !hmac.Equal(sig, hasher.Sum(nil)) { - return ErrSignatureInvalid - } - - // No validation errors. Signature is good. - return nil -} - -// Implements the Sign method from SigningMethod for this signing method. -// Key must be []byte -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { - if keyBytes, ok := key.([]byte); ok { - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - return EncodeSegment(hasher.Sum(nil)), nil - } - - return "", ErrInvalidKey -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/hmac_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -package jwt_test - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" - "io/ioutil" - "strings" - "testing" -) - -var hmacTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "web sample", - "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk", - "HS256", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "HS384", - "eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.KWZEuOD5lbBxZ34g7F-SlVLAQ_r5KApWNWlZIIMyQVz5Zs58a7XdNzj5_0EcNoOy", - "HS384", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "HS512", - "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.CN7YijRX6Aw1n2jyI2Id1w90ja-DEMYiWixhYCyHnrZ1VfJRaFQz1bEbjjA5Fn4CLYaUG432dEYmSbS4Saokmw", - "HS512", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - true, - }, - { - "web sample: invalid", - "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXo", - "HS256", - map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, - false, - }, -} - -// Sample data from http://tools.ietf.org/html/draft-jones-json-web-signature-04#appendix-A.1 -var hmacTestKey, _ = ioutil.ReadFile("test/hmacTestKey") - -func TestHMACVerify(t *testing.T) { - for _, data := range hmacTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], hmacTestKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestHMACSign(t *testing.T) { - for _, data := range hmacTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), hmacTestKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) - } - } - } -} - -func BenchmarkHS256Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS256, hmacTestKey) -} - -func BenchmarkHS384Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS384, hmacTestKey) -} - -func BenchmarkHS512Signing(b *testing.B) { - benchmarkSigning(b, jwt.SigningMethodHS512, hmacTestKey) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/LICENSE juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/LICENSE --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/LICENSE 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -Copyright (c) 2012 Dave Grijalva - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,113 +0,0 @@ -package jwt - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" -) - -type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - parts := strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, &ValidationError{err: "token contains an invalid number of segments", Errors: ValidationErrorMalformed} - } - - var err error - token := &Token{Raw: tokenString} - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - if err = dec.Decode(&token.Claims); err != nil { - return token, &ValidationError{err: err.Error(), Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, &ValidationError{err: "signing method (alg) is unavailable.", Errors: ValidationErrorUnverifiable} - } - } else { - return token, &ValidationError{err: "signing method (alg) is unspecified.", Errors: ValidationErrorUnverifiable} - } - - // Verify signing method is in the required set - if p.ValidMethods != nil { - var signingMethodValid = false - var alg = token.Method.Alg() - for _, m := range p.ValidMethods { - if m == alg { - signingMethodValid = true - break - } - } - if !signingMethodValid { - // signing method is not in the listed set - return token, &ValidationError{err: fmt.Sprintf("signing method %v is invalid", alg), Errors: ValidationErrorSignatureInvalid} - } - } - - // Lookup key - var key interface{} - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, &ValidationError{err: "no Keyfunc was provided.", Errors: ValidationErrorUnverifiable} - } - if key, err = keyFunc(token); err != nil { - // keyFunc returned an error - return token, &ValidationError{err: err.Error(), Errors: ValidationErrorUnverifiable} - } - - // Check expiration times - vErr := &ValidationError{} - now := TimeFunc().Unix() - if exp, ok := token.Claims["exp"].(float64); ok { - if now > int64(exp) { - vErr.err = "token is expired" - vErr.Errors |= ValidationErrorExpired - } - } - if nbf, ok := token.Claims["nbf"].(float64); ok { - if now < int64(nbf) { - vErr.err = "token is not valid yet" - vErr.Errors |= ValidationErrorNotValidYet - } - } - - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.err = err.Error() - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } - - return token, vErr -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/parser_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,239 +0,0 @@ -package jwt_test - -import ( - "encoding/json" - "fmt" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" - "io/ioutil" - "net/http" - "reflect" - "testing" - "time" -) - -var ( - jwtTestDefaultKey []byte - defaultKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return jwtTestDefaultKey, nil } - emptyKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, nil } - errorKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, fmt.Errorf("error loading key") } - nilKeyFunc jwt.Keyfunc = nil -) - -var jwtTestData = []struct { - name string - tokenString string - keyfunc jwt.Keyfunc - claims map[string]interface{} - valid bool - errors uint32 - parser *jwt.Parser -}{ - { - "basic", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - defaultKeyFunc, - map[string]interface{}{"foo": "bar"}, - true, - 0, - nil, - }, - { - "basic expired", - "", // autogen - defaultKeyFunc, - map[string]interface{}{"foo": "bar", "exp": float64(time.Now().Unix() - 100)}, - false, - jwt.ValidationErrorExpired, - nil, - }, - { - "basic nbf", - "", // autogen - defaultKeyFunc, - map[string]interface{}{"foo": "bar", "nbf": float64(time.Now().Unix() + 100)}, - false, - jwt.ValidationErrorNotValidYet, - nil, - }, - { - "expired and nbf", - "", // autogen - defaultKeyFunc, - map[string]interface{}{"foo": "bar", "nbf": float64(time.Now().Unix() + 100), "exp": float64(time.Now().Unix() - 100)}, - false, - jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired, - nil, - }, - { - "basic invalid", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - defaultKeyFunc, - map[string]interface{}{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - nil, - }, - { - "basic nokeyfunc", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - nilKeyFunc, - map[string]interface{}{"foo": "bar"}, - false, - jwt.ValidationErrorUnverifiable, - nil, - }, - { - "basic nokey", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - emptyKeyFunc, - map[string]interface{}{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - nil, - }, - { - "basic errorkey", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - errorKeyFunc, - map[string]interface{}{"foo": "bar"}, - false, - jwt.ValidationErrorUnverifiable, - nil, - }, - { - "invalid signing method", - "", - defaultKeyFunc, - map[string]interface{}{"foo": "bar"}, - false, - jwt.ValidationErrorSignatureInvalid, - &jwt.Parser{ValidMethods: []string{"HS256"}}, - }, - { - "valid signing method", - "", - defaultKeyFunc, - map[string]interface{}{"foo": "bar"}, - true, - 0, - &jwt.Parser{ValidMethods: []string{"RS256", "HS256"}}, - }, - { - "JSON Number", - "", - defaultKeyFunc, - map[string]interface{}{"foo": json.Number("123.4")}, - true, - 0, - &jwt.Parser{UseJSONNumber: true}, - }, -} - -func init() { - var e error - if jwtTestDefaultKey, e = ioutil.ReadFile("test/sample_key.pub"); e != nil { - panic(e) - } -} - -func makeSample(c map[string]interface{}) string { - key, e := ioutil.ReadFile("test/sample_key") - if e != nil { - panic(e.Error()) - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Claims = c - s, e := token.SignedString(key) - - if e != nil { - panic(e.Error()) - } - - return s -} - -func TestParser_Parse(t *testing.T) { - for _, data := range jwtTestData { - if data.tokenString == "" { - data.tokenString = makeSample(data.claims) - } - - var token *jwt.Token - var err error - if data.parser != nil { - token, err = data.parser.Parse(data.tokenString, data.keyfunc) - } else { - token, err = jwt.Parse(data.tokenString, data.keyfunc) - } - - if !reflect.DeepEqual(data.claims, token.Claims) { - t.Errorf("[%v] Claims mismatch. Expecting: %v Got: %v", data.name, data.claims, token.Claims) - } - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying token: %T:%v", data.name, err, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid token passed validation", data.name) - } - if data.errors != 0 { - if err == nil { - t.Errorf("[%v] Expecting error. Didn't get one.", data.name) - } else { - // compare the bitfield part of the error - if e := err.(*jwt.ValidationError).Errors; e != data.errors { - t.Errorf("[%v] Errors don't match expectation. %v != %v", data.name, e, data.errors) - } - } - } - if data.valid && token.Signature == "" { - t.Errorf("[%v] Signature is left unpopulated after parsing", data.name) - } - } -} - -func TestParseRequest(t *testing.T) { - // Bearer token request - for _, data := range jwtTestData { - // FIXME: custom parsers are not supported by this helper. skip tests that require them - if data.parser != nil { - t.Logf("Skipping [%v]. Custom parsers are not supported by ParseRequest", data.name) - continue - } - - if data.tokenString == "" { - data.tokenString = makeSample(data.claims) - } - - r, _ := http.NewRequest("GET", "/", nil) - r.Header.Set("Authorization", fmt.Sprintf("Bearer %v", data.tokenString)) - token, err := jwt.ParseFromRequest(r, data.keyfunc) - - if token == nil { - t.Errorf("[%v] Token was not found: %v", data.name, err) - continue - } - if !reflect.DeepEqual(data.claims, token.Claims) { - t.Errorf("[%v] Claims mismatch. Expecting: %v Got: %v", data.name, data.claims, token.Claims) - } - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying token: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid token passed validation", data.name) - } - } -} - -// Helper method for benchmarking various methods -func benchmarkSigning(b *testing.B, method jwt.SigningMethod, key interface{}) { - t := jwt.New(method) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - if _, err := t.SignedString(key); err != nil { - b.Fatal(err) - } - } - }) - -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/README.md juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/README.md --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/README.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-jones-json-web-token.html) - -[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) - -**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. - -## What the heck is a JWT? - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are RSA256 and HMAC SHA256, though hooks are present for adding your own. - -## Parse and Verify - -Parsing and verifying tokens is pretty straight forward. You pass in the token and a function for looking up the key. This is done as a callback since you may need to parse the token to find out what signing method and key was used. - -```go - token, err := jwt.Parse(myToken, func(token *jwt.Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - return myLookupKey(token.Header["kid"]) - }) - - if err == nil && token.Valid { - deliverGoodness("!") - } else { - deliverUtterRejection(":(") - } -``` - -## Create a token - -```go - // Create the token - token := jwt.New(jwt.SigningMethodHS256) - // Set some claims - token.Claims["foo"] = "bar" - token.Claims["exp"] = time.Now().Add(time.Hour * 72).Unix() - // Sign and get the complete encoded token as a string - tokenString, err := token.SignedString(mySigningKey) -``` - -## Extensions - -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. - -Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). - -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. - -## More - -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. For a more http centric example, see [this gist](https://gist.github.com/cryptix/45c33ecf0ae54828e63b). diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSA family of signing methods signing methods -type SigningMethodRSA struct { - Name string - Hash crypto.Hash -} - -// Specific instances for RS256 and company -var ( - SigningMethodRS256 *SigningMethodRSA - SigningMethodRS384 *SigningMethodRSA - SigningMethodRS512 *SigningMethodRSA -) - -func init() { - // RS256 - SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { - return SigningMethodRS256 - }) - - // RS384 - SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { - return SigningMethodRS384 - }) - - // RS512 - SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { - return SigningMethodRS512 - }) -} - -func (m *SigningMethodRSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA public key as -// []byte, or an rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - - switch k := key.(type) { - case []byte: - if rsaKey, err = ParseRSAPublicKeyFromPEM(k); err != nil { - return err - } - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) -} - -// Implements the Sign method from SigningMethod -// For this signing method, must be either a PEM encoded PKCS1 or PKCS8 RSA private key as -// []byte, or an rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { - var err error - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case []byte: - if rsaKey, err = ParseRSAPrivateKeyFromPEM(k); err != nil { - return "", err - } - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -// +build go1.4 - -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSAPSS family of signing methods signing methods -type SigningMethodRSAPSS struct { - *SigningMethodRSA - Options *rsa.PSSOptions -} - -// Specific instances for RS/PS and company -var ( - SigningMethodPS256 *SigningMethodRSAPSS - SigningMethodPS384 *SigningMethodRSAPSS - SigningMethodPS512 *SigningMethodRSAPSS -) - -func init() { - // PS256 - SigningMethodPS256 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS256", - Hash: crypto.SHA256, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA256, - }, - } - RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { - return SigningMethodPS256 - }) - - // PS384 - SigningMethodPS384 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS384", - Hash: crypto.SHA384, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA384, - }, - } - RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { - return SigningMethodPS384 - }) - - // PS512 - SigningMethodPS512 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS512", - Hash: crypto.SHA512, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA512, - }, - } - RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { - return SigningMethodPS512 - }) -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an rsa.PublicKey struct -func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - switch k := key.(type) { - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an rsa.PrivateKey struct -func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_pss_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,96 +0,0 @@ -// +build go1.4 - -package jwt_test - -import ( - "crypto/rsa" - "io/ioutil" - "strings" - "testing" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" -) - -var rsaPSSTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic PS256", - "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9w", - "PS256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic PS384", - "eyJhbGciOiJQUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.w7-qqgj97gK4fJsq_DCqdYQiylJjzWONvD0qWWWhqEOFk2P1eDULPnqHRnjgTXoO4HAw4YIWCsZPet7nR3Xxq4ZhMqvKW8b7KlfRTb9cH8zqFvzMmybQ4jv2hKc3bXYqVow3AoR7hN_CWXI3Dv6Kd2X5xhtxRHI6IL39oTVDUQ74LACe-9t4c3QRPuj6Pq1H4FAT2E2kW_0KOc6EQhCLWEhm2Z2__OZskDC8AiPpP8Kv4k2vB7l0IKQu8Pr4RcNBlqJdq8dA5D3hk5TLxP8V5nG1Ib80MOMMqoS3FQvSLyolFX-R_jZ3-zfq6Ebsqr0yEb0AH2CfsECF7935Pa0FKQ", - "PS384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic PS512", - "eyJhbGciOiJQUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.GX1HWGzFaJevuSLavqqFYaW8_TpvcjQ8KfC5fXiSDzSiT9UD9nB_ikSmDNyDILNdtjZLSvVKfXxZJqCfefxAtiozEDDdJthZ-F0uO4SPFHlGiXszvKeodh7BuTWRI2wL9-ZO4mFa8nq3GMeQAfo9cx11i7nfN8n2YNQ9SHGovG7_T_AvaMZB_jT6jkDHpwGR9mz7x1sycckEo6teLdHRnH_ZdlHlxqknmyTu8Odr5Xh0sJFOL8BepWbbvIIn-P161rRHHiDWFv6nhlHwZnVzjx7HQrWSGb6-s2cdLie9QL_8XaMcUpjLkfOMKkDOfHo6AvpL7Jbwi83Z2ZTHjJWB-A", - "PS512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic PS256 invalid: foo => bar", - "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9W", - "PS256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestRSAPSSVerify(t *testing.T) { - var err error - - key, _ := ioutil.ReadFile("test/sample_key.pub") - var rsaPSSKey *rsa.PublicKey - if rsaPSSKey, err = jwt.ParseRSAPublicKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse RSA public key: %v", err) - } - - for _, data := range rsaPSSTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], rsaPSSKey) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestRSAPSSSign(t *testing.T) { - var err error - - key, _ := ioutil.ReadFile("test/sample_key") - var rsaPSSKey *rsa.PrivateKey - if rsaPSSKey, err = jwt.ParseRSAPrivateKeyFromPEM(key); err != nil { - t.Errorf("Unable to parse RSA private key: %v", err) - } - - for _, data := range rsaPSSTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), rsaPSSKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig == parts[2] { - t.Errorf("[%v] Signatures shouldn't match\nnew:\n%v\noriginal:\n%v", data.name, sig, parts[2]) - } - } - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,174 +0,0 @@ -package jwt_test - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go" - "io/ioutil" - "strings" - "testing" -) - -var rsaTestData = []struct { - name string - tokenString string - alg string - claims map[string]interface{} - valid bool -}{ - { - "Basic RS256", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - "RS256", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic RS384", - "eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.W-jEzRfBigtCWsinvVVuldiuilzVdU5ty0MvpLaSaqK9PlAWWlDQ1VIQ_qSKzwL5IXaZkvZFJXT3yL3n7OUVu7zCNJzdwznbC8Z-b0z2lYvcklJYi2VOFRcGbJtXUqgjk2oGsiqUMUMOLP70TTefkpsgqDxbRh9CDUfpOJgW-dU7cmgaoswe3wjUAUi6B6G2YEaiuXC0XScQYSYVKIzgKXJV8Zw-7AN_DBUI4GkTpsvQ9fVVjZM9csQiEXhYekyrKu1nu_POpQonGd8yqkIyXPECNmmqH5jH4sFiF67XhD7_JpkvLziBpI-uh86evBUadmHhb9Otqw3uV3NTaXLzJw", - "RS384", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "Basic RS512", - "eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.zBlLlmRrUxx4SJPUbV37Q1joRcI9EW13grnKduK3wtYKmDXbgDpF1cZ6B-2Jsm5RB8REmMiLpGms-EjXhgnyh2TSHE-9W2gA_jvshegLWtwRVDX40ODSkTb7OVuaWgiy9y7llvcknFBTIg-FnVPVpXMmeV_pvwQyhaz1SSwSPrDyxEmksz1hq7YONXhXPpGaNbMMeDTNP_1oj8DZaqTIL9TwV8_1wb2Odt_Fy58Ke2RVFijsOLdnyEAjt2n9Mxihu9i3PhNBkkxa2GbnXBfq3kzvZ_xxGGopLdHhJjcGWXO-NiwI9_tiu14NRv4L2xC0ItD9Yz68v2ZIZEp_DuzwRQ", - "RS512", - map[string]interface{}{"foo": "bar"}, - true, - }, - { - "basic invalid: foo => bar", - "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", - "RS256", - map[string]interface{}{"foo": "bar"}, - false, - }, -} - -func TestRSAVerify(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key.pub") - - for _, data := range rsaTestData { - parts := strings.Split(data.tokenString, ".") - - method := jwt.GetSigningMethod(data.alg) - err := method.Verify(strings.Join(parts[0:2], "."), parts[2], key) - if data.valid && err != nil { - t.Errorf("[%v] Error while verifying key: %v", data.name, err) - } - if !data.valid && err == nil { - t.Errorf("[%v] Invalid key passed validation", data.name) - } - } -} - -func TestRSASign(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key") - - for _, data := range rsaTestData { - if data.valid { - parts := strings.Split(data.tokenString, ".") - method := jwt.GetSigningMethod(data.alg) - sig, err := method.Sign(strings.Join(parts[0:2], "."), key) - if err != nil { - t.Errorf("[%v] Error signing token: %v", data.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) - } - } - } -} - -func TestRSAVerifyWithPreParsedPrivateKey(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key.pub") - parsedKey, err := jwt.ParseRSAPublicKeyFromPEM(key) - if err != nil { - t.Fatal(err) - } - testData := rsaTestData[0] - parts := strings.Split(testData.tokenString, ".") - err = jwt.SigningMethodRS256.Verify(strings.Join(parts[0:2], "."), parts[2], parsedKey) - if err != nil { - t.Errorf("[%v] Error while verifying key: %v", testData.name, err) - } -} - -func TestRSAWithPreParsedPrivateKey(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - t.Fatal(err) - } - testData := rsaTestData[0] - parts := strings.Split(testData.tokenString, ".") - sig, err := jwt.SigningMethodRS256.Sign(strings.Join(parts[0:2], "."), parsedKey) - if err != nil { - t.Errorf("[%v] Error signing token: %v", testData.name, err) - } - if sig != parts[2] { - t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", testData.name, sig, parts[2]) - } -} - -func TestRSAKeyParsing(t *testing.T) { - key, _ := ioutil.ReadFile("test/sample_key") - pubKey, _ := ioutil.ReadFile("test/sample_key.pub") - badKey := []byte("All your base are belong to key") - - // Test parsePrivateKey - if _, e := jwt.ParseRSAPrivateKeyFromPEM(key); e != nil { - t.Errorf("Failed to parse valid private key: %v", e) - } - - if k, e := jwt.ParseRSAPrivateKeyFromPEM(pubKey); e == nil { - t.Errorf("Parsed public key as valid private key: %v", k) - } - - if k, e := jwt.ParseRSAPrivateKeyFromPEM(badKey); e == nil { - t.Errorf("Parsed invalid key as valid private key: %v", k) - } - - // Test parsePublicKey - if _, e := jwt.ParseRSAPublicKeyFromPEM(pubKey); e != nil { - t.Errorf("Failed to parse valid public key: %v", e) - } - - if k, e := jwt.ParseRSAPublicKeyFromPEM(key); e == nil { - t.Errorf("Parsed private key as valid public key: %v", k) - } - - if k, e := jwt.ParseRSAPublicKeyFromPEM(badKey); e == nil { - t.Errorf("Parsed invalid key as valid private key: %v", k) - } - -} - -func BenchmarkRS256Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS256, parsedKey) -} - -func BenchmarkRS384Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS384, parsedKey) -} - -func BenchmarkRS512Signing(b *testing.B) { - key, _ := ioutil.ReadFile("test/sample_key") - parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) - if err != nil { - b.Fatal(err) - } - - benchmarkSigning(b, jwt.SigningMethodRS512, parsedKey) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_utils.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_utils.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_utils.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/rsa_utils.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,68 +0,0 @@ -package jwt - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") - ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") -) - -// Parse PEM encoded PKCS1 or PKCS8 private key -func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *rsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/signing_method.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/signing_method.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/signing_method.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/signing_method.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -package jwt - -var signingMethods = map[string]func() SigningMethod{} - -// Implement SigningMethod to add new methods for signing or verifying tokens. -type SigningMethod interface { - Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid - Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error - Alg() string // returns the alg identifier for this method (example: 'HS256') -} - -// Register the "alg" name and a factory function for signing method. -// This is typically done during init() in the method's implementation -func RegisterSigningMethod(alg string, f func() SigningMethod) { - signingMethods[alg] = f -} - -// Get a signing method from an "alg" string -func GetSigningMethod(alg string) (method SigningMethod) { - if methodF, ok := signingMethods[alg]; ok { - method = methodF() - } - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIAh5qA3rmqQQuu0vbKV/+zouz/y/Iy2pLpIcWUSyImSwoAoGCCqGSM49 -AwEHoUQDQgAEYD54V/vp+54P9DXarYqx4MPcm+HKRIQzNasYSoRQHQ/6S6Ps8tpM -cT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== ------END EC PRIVATE KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ ------BEGIN PUBLIC KEY----- -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYD54V/vp+54P9DXarYqx4MPcm+HK -RIQzNasYSoRQHQ/6S6Ps8tpMcT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== ------END PUBLIC KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MIGkAgEBBDCaCvMHKhcG/qT7xsNLYnDT7sE/D+TtWIol1ROdaK1a564vx5pHbsRy -SEKcIxISi1igBwYFK4EEACKhZANiAATYa7rJaU7feLMqrAx6adZFNQOpaUH/Uylb -ZLriOLON5YFVwtVUpO1FfEXZUIQpptRPtc5ixIPY658yhBSb6irfIJUSP9aYTflJ -GKk/mDkK4t8mWBzhiD5B6jg9cEGhGgA= ------END EC PRIVATE KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ ------BEGIN PUBLIC KEY----- -MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE2Gu6yWlO33izKqwMemnWRTUDqWlB/1Mp -W2S64jizjeWBVcLVVKTtRXxF2VCEKabUT7XOYsSD2OufMoQUm+oq3yCVEj/WmE35 -SRipP5g5CuLfJlgc4Yg+Qeo4PXBBoRoA ------END PUBLIC KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MIHcAgEBBEIB0pE4uFaWRx7t03BsYlYvF1YvKaBGyvoakxnodm9ou0R9wC+sJAjH -QZZJikOg4SwNqgQ/hyrOuDK2oAVHhgVGcYmgBwYFK4EEACOhgYkDgYYABAAJXIuw -12MUzpHggia9POBFYXSxaOGKGbMjIyDI+6q7wi7LMw3HgbaOmgIqFG72o8JBQwYN -4IbXHf+f86CRY1AA2wHzbHvt6IhkCXTNxBEffa1yMUgu8n9cKKF2iLgyQKcKqW33 -8fGOw/n3Rm2Yd/EB56u2rnD29qS+nOM9eGS+gy39OQ== ------END EC PRIVATE KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ ------BEGIN PUBLIC KEY----- -MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQACVyLsNdjFM6R4IImvTzgRWF0sWjh -ihmzIyMgyPuqu8IuyzMNx4G2jpoCKhRu9qPCQUMGDeCG1x3/n/OgkWNQANsB82x7 -7eiIZAl0zcQRH32tcjFILvJ/XCihdoi4MkCnCqlt9/HxjsP590ZtmHfxAeertq5w -9vakvpzjPXhkvoMt/Tk= ------END PUBLIC KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/hmacTestKey juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/hmacTestKey --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/hmacTestKey 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/hmacTestKey 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -#5K+¥¼ƒ~ew{¦Z³(æðTÉ(©„²ÒP.¿ÓûZ’ÒGï–Š´Ãwb="=.!r.OÀÍšõgЀ£ \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA4f5wg5l2hKsTeNem/V41fGnJm6gOdrj8ym3rFkEU/wT8RDtn -SgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7mCpz9Er5qLaMXJwZxzHzAahlfA0i -cqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBpHssPnpYGIn20ZZuNlX2BrClciHhC -PUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2XrHhR+1DcKJzQBSTAGnpYVaqpsAR -ap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3bODIRe1AuTyHceAbewn8b462yEWKA -Rdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy7wIDAQABAoIBAQCwia1k7+2oZ2d3 -n6agCAbqIE1QXfCmh41ZqJHbOY3oRQG3X1wpcGH4Gk+O+zDVTV2JszdcOt7E5dAy -MaomETAhRxB7hlIOnEN7WKm+dGNrKRvV0wDU5ReFMRHg31/Lnu8c+5BvGjZX+ky9 -POIhFFYJqwCRlopGSUIxmVj5rSgtzk3iWOQXr+ah1bjEXvlxDOWkHN6YfpV5ThdE -KdBIPGEVqa63r9n2h+qazKrtiRqJqGnOrHzOECYbRFYhexsNFz7YT02xdfSHn7gM -IvabDDP/Qp0PjE1jdouiMaFHYnLBbgvlnZW9yuVf/rpXTUq/njxIXMmvmEyyvSDn -FcFikB8pAoGBAPF77hK4m3/rdGT7X8a/gwvZ2R121aBcdPwEaUhvj/36dx596zvY -mEOjrWfZhF083/nYWE2kVquj2wjs+otCLfifEEgXcVPTnEOPO9Zg3uNSL0nNQghj -FuD3iGLTUBCtM66oTe0jLSslHe8gLGEQqyMzHOzYxNqibxcOZIe8Qt0NAoGBAO+U -I5+XWjWEgDmvyC3TrOSf/KCGjtu0TSv30ipv27bDLMrpvPmD/5lpptTFwcxvVhCs -2b+chCjlghFSWFbBULBrfci2FtliClOVMYrlNBdUSJhf3aYSG2Doe6Bgt1n2CpNn -/iu37Y3NfemZBJA7hNl4dYe+f+uzM87cdQ214+jrAoGAXA0XxX8ll2+ToOLJsaNT -OvNB9h9Uc5qK5X5w+7G7O998BN2PC/MWp8H+2fVqpXgNENpNXttkRm1hk1dych86 -EunfdPuqsX+as44oCyJGFHVBnWpm33eWQw9YqANRI+pCJzP08I5WK3osnPiwshd+ -hR54yjgfYhBFNI7B95PmEQkCgYBzFSz7h1+s34Ycr8SvxsOBWxymG5zaCsUbPsL0 -4aCgLScCHb9J+E86aVbbVFdglYa5Id7DPTL61ixhl7WZjujspeXZGSbmq0Kcnckb -mDgqkLECiOJW2NHP/j0McAkDLL4tysF8TLDO8gvuvzNC+WQ6drO2ThrypLVZQ+ry -eBIPmwKBgEZxhqa0gVvHQG/7Od69KWj4eJP28kq13RhKay8JOoN0vPmspXJo1HY3 -CKuHRG+AP579dncdUnOMvfXOtkdM4vk0+hWASBQzM9xzVcztCa+koAugjVaLS9A+ -9uQoqEeVNTckxx0S2bYevRy7hGQmUJTyQm3j1zEUR5jpdbL83Fbq ------END RSA PRIVATE KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key.pub juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key.pub --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key.pub 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/test/sample_key.pub 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ ------BEGIN PUBLIC KEY----- -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4f5wg5l2hKsTeNem/V41 -fGnJm6gOdrj8ym3rFkEU/wT8RDtnSgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7 -mCpz9Er5qLaMXJwZxzHzAahlfA0icqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBp -HssPnpYGIn20ZZuNlX2BrClciHhCPUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2 -XrHhR+1DcKJzQBSTAGnpYVaqpsARap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3b -ODIRe1AuTyHceAbewn8b462yEWKARdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy -7wIDAQAB ------END PUBLIC KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/token.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/token.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/token.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/token.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,126 +0,0 @@ -package jwt - -import ( - "encoding/base64" - "encoding/json" - "net/http" - "strings" - "time" -) - -// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). -// You can override it to use another time value. This is useful for testing or if your -// server uses a different time zone than your tokens. -var TimeFunc = time.Now - -// Parse methods use this callback function to supply -// the key for verification. The function receives the parsed, -// but unverified Token. This allows you to use propries in the -// Header of the token (such as `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) - -// A JWT Token. Different fields will be used depending on whether you're -// creating or parsing/verifying a token. -type Token struct { - Raw string // The raw token. Populated when you Parse a token - Method SigningMethod // The signing method used or to be used - Header map[string]interface{} // The first segment of the token - Claims map[string]interface{} // The second segment of the token - Signature string // The third segment of the token. Populated when you Parse a token - Valid bool // Is the token valid? Populated when you Parse/Verify a token -} - -// Create a new Token. Takes a signing method -func New(method SigningMethod) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: make(map[string]interface{}), - Method: method, - } -} - -// Get the complete, signed token -func (t *Token) SignedString(key interface{}) (string, error) { - var sig, sstr string - var err error - if sstr, err = t.SigningString(); err != nil { - return "", err - } - if sig, err = t.Method.Sign(sstr, key); err != nil { - return "", err - } - return strings.Join([]string{sstr, sig}, "."), nil -} - -// Generate the signing string. This is the -// most expensive part of the whole deal. Unless you -// need this for something special, just go straight for -// the SignedString. -func (t *Token) SigningString() (string, error) { - var err error - parts := make([]string, 2) - for i, _ := range parts { - var source map[string]interface{} - if i == 0 { - source = t.Header - } else { - source = t.Claims - } - - var jsonValue []byte - if jsonValue, err = json.Marshal(source); err != nil { - return "", err - } - - parts[i] = EncodeSegment(jsonValue) - } - return strings.Join(parts, "."), nil -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return new(Parser).Parse(tokenString, keyFunc) -} - -// Try to find the token in an http.Request. -// This method will call ParseMultipartForm if there's no token in the header. -// Currently, it looks in the Authorization header as well as -// looking for an 'access_token' request parameter in req.Form. -func ParseFromRequest(req *http.Request, keyFunc Keyfunc) (token *Token, err error) { - - // Look for an Authorization header - if ah := req.Header.Get("Authorization"); ah != "" { - // Should be a bearer token - if len(ah) > 6 && strings.ToUpper(ah[0:6]) == "BEARER" { - return Parse(ah[7:], keyFunc) - } - } - - // Look for "access_token" parameter - req.ParseMultipartForm(10e6) - if tokStr := req.Form.Get("access_token"); tokStr != "" { - return Parse(tokStr, keyFunc) - } - - return nil, ErrNoTokenInRequest - -} - -// Encode JWT specific base64url encoding with padding stripped -func EncodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} - -// Decode JWT specific base64url encoding with padding stripped -func DecodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - - return base64.URLEncoding.DecodeString(seg) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/.travis.yml 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -language: go - -go: - - 1.3.3 - - 1.4.2 - - 1.5 - - tip diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -## `jwt-go` Version History - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/LICENSE juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/LICENSE --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/LICENSE 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/PATENTS juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/PATENTS --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/PATENTS 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/PATENTS 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "errors" - "unicode/utf16" -) - -// bmpString returns s encoded in UCS-2 with a zero terminator. -func bmpString(s string) ([]byte, error) { - // References: - // https://tools.ietf.org/html/rfc7292#appendix-B.1 - // http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane - // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes - // EncodeRune returns 0xfffd if the rune does not need special encoding - // - the above RFC provides the info that BMPStrings are NULL terminated. - - ret := make([]byte, 0, 2*len(s)+2) - - for _, r := range s { - if t, _ := utf16.EncodeRune(r); t != 0xfffd { - return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") - } - ret = append(ret, byte(r/256), byte(r%256)) - } - - return append(ret, 0, 0), nil -} - -func decodeBMPString(bmpString []byte) (string, error) { - if len(bmpString)%2 != 0 { - return "", errors.New("pkcs12: odd-length BMP string") - } - - // strip terminator if present - if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { - bmpString = bmpString[:l-2] - } - - s := make([]uint16, 0, len(bmpString)/2) - for len(bmpString) > 0 { - s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) - bmpString = bmpString[2:] - } - - return string(utf16.Decode(s)), nil -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "bytes" - "encoding/hex" - "testing" -) - -var bmpStringTests = []struct { - in string - expectedHex string - shouldFail bool -}{ - {"", "0000", false}, - // Example from https://tools.ietf.org/html/rfc7292#appendix-B. - {"Beavis", "0042006500610076006900730000", false}, - // Some characters from the "Letterlike Symbols Unicode block". - {"\u2115 - Double-struck N", "21150020002d00200044006f00750062006c0065002d00730074007200750063006b0020004e0000", false}, - // any character outside the BMP should trigger an error. - {"\U0001f000 East wind (Mahjong)", "", true}, -} - -func TestBMPString(t *testing.T) { - for i, test := range bmpStringTests { - expected, err := hex.DecodeString(test.expectedHex) - if err != nil { - t.Fatalf("#%d: failed to decode expectation", i) - } - - out, err := bmpString(test.in) - if err == nil && test.shouldFail { - t.Errorf("#%d: expected to fail, but produced %x", i, out) - continue - } - - if err != nil && !test.shouldFail { - t.Errorf("#%d: failed unexpectedly: %s", i, err) - continue - } - - if !test.shouldFail { - if !bytes.Equal(out, expected) { - t.Errorf("#%d: expected %s, got %x", i, test.expectedHex, out) - continue - } - - roundTrip, err := decodeBMPString(out) - if err != nil { - t.Errorf("#%d: decoding output gave an error: %s", i, err) - continue - } - - if roundTrip != test.in { - t.Errorf("#%d: decoding output resulted in %q, but it should have been %q", i, roundTrip, test.in) - continue - } - } - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,131 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "bytes" - "crypto/cipher" - "crypto/des" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2" -) - -var ( - oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) - oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) -) - -// pbeCipher is an abstraction of a PKCS#12 cipher. -type pbeCipher interface { - // create returns a cipher.Block given a key. - create(key []byte) (cipher.Block, error) - // deriveKey returns a key derived from the given password and salt. - deriveKey(salt, password []byte, iterations int) []byte - // deriveKey returns an IV derived from the given password and salt. - deriveIV(salt, password []byte, iterations int) []byte -} - -type shaWithTripleDESCBC struct{} - -func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { - return des.NewTripleDESCipher(key) -} - -func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) -} - -func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) -} - -type shaWith40BitRC2CBC struct{} - -func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { - return rc2.New(key, len(key)*8) -} - -func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) -} - -func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) -} - -type pbeParams struct { - Salt []byte - Iterations int -} - -func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { - var cipherType pbeCipher - - switch { - case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): - cipherType = shaWithTripleDESCBC{} - case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): - cipherType = shaWith40BitRC2CBC{} - default: - return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") - } - - var params pbeParams - if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { - return nil, 0, err - } - - key := cipherType.deriveKey(params.Salt, password, params.Iterations) - iv := cipherType.deriveIV(params.Salt, password, params.Iterations) - - block, err := cipherType.create(key) - if err != nil { - return nil, 0, err - } - - return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil -} - -func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { - cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) - if err != nil { - return nil, err - } - - encrypted := info.Data() - if len(encrypted) == 0 { - return nil, errors.New("pkcs12: empty encrypted data") - } - if len(encrypted)%blockSize != 0 { - return nil, errors.New("pkcs12: input is not a multiple of the block size") - } - decrypted = make([]byte, len(encrypted)) - cbc.CryptBlocks(decrypted, encrypted) - - psLen := int(decrypted[len(decrypted)-1]) - if psLen == 0 || psLen > blockSize { - return nil, ErrDecryption - } - - if len(decrypted) < psLen { - return nil, ErrDecryption - } - ps := decrypted[len(decrypted)-psLen:] - decrypted = decrypted[:len(decrypted)-psLen] - if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { - return nil, ErrDecryption - } - - return -} - -// decryptable abstracts a object that contains ciphertext. -type decryptable interface { - Algorithm() pkix.AlgorithmIdentifier - Data() []byte -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,125 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "bytes" - "crypto/x509/pkix" - "encoding/asn1" - "testing" -) - -var sha1WithTripleDES = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) - -func TestPbDecrypterFor(t *testing.T) { - params, _ := asn1.Marshal(pbeParams{ - Salt: []byte{1, 2, 3, 4, 5, 6, 7, 8}, - Iterations: 2048, - }) - alg := pkix.AlgorithmIdentifier{ - Algorithm: asn1.ObjectIdentifier([]int{1, 2, 3}), - Parameters: asn1.RawValue{ - FullBytes: params, - }, - } - - pass, _ := bmpString("Sesame open") - - _, _, err := pbDecrypterFor(alg, pass) - if _, ok := err.(NotImplementedError); !ok { - t.Errorf("expected not implemented error, got: %T %s", err, err) - } - - alg.Algorithm = sha1WithTripleDES - cbc, blockSize, err := pbDecrypterFor(alg, pass) - if err != nil { - t.Errorf("unexpected error from pbDecrypterFor %v", err) - } - if blockSize != 8 { - t.Errorf("unexpected block size %d, wanted 8", blockSize) - } - - plaintext := []byte{1, 2, 3, 4, 5, 6, 7, 8} - expectedCiphertext := []byte{185, 73, 135, 249, 137, 1, 122, 247} - ciphertext := make([]byte, len(plaintext)) - cbc.CryptBlocks(ciphertext, plaintext) - - if bytes.Compare(ciphertext, expectedCiphertext) != 0 { - t.Errorf("bad ciphertext, got %x but wanted %x", ciphertext, expectedCiphertext) - } -} - -var pbDecryptTests = []struct { - in []byte - expected []byte - expectedError error -}{ - { - []byte("\x33\x73\xf3\x9f\xda\x49\xae\xfc\xa0\x9a\xdf\x5a\x58\xa0\xea\x46"), // 7 padding bytes - []byte("A secret!"), - nil, - }, - { - []byte("\x33\x73\xf3\x9f\xda\x49\xae\xfc\x96\x24\x2f\x71\x7e\x32\x3f\xe7"), // 8 padding bytes - []byte("A secret"), - nil, - }, - { - []byte("\x35\x0c\xc0\x8d\xab\xa9\x5d\x30\x7f\x9a\xec\x6a\xd8\x9b\x9c\xd9"), // 9 padding bytes, incorrect - nil, - ErrDecryption, - }, - { - []byte("\xb2\xf9\x6e\x06\x60\xae\x20\xcf\x08\xa0\x7b\xd9\x6b\x20\xef\x41"), // incorrect padding bytes: [ ... 0x04 0x02 ] - nil, - ErrDecryption, - }, -} - -func TestPbDecrypt(t *testing.T) { - for i, test := range pbDecryptTests { - decryptable := testDecryptable{ - data: test.in, - algorithm: pkix.AlgorithmIdentifier{ - Algorithm: sha1WithTripleDES, - Parameters: pbeParams{ - Salt: []byte("\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8"), - Iterations: 4096, - }.RawASN1(), - }, - } - password, _ := bmpString("sesame") - - plaintext, err := pbDecrypt(decryptable, password) - if err != test.expectedError { - t.Errorf("#%d: got error %q, but wanted %q", i, err, test.expectedError) - continue - } - - if !bytes.Equal(plaintext, test.expected) { - t.Errorf("#%d: got %x, but wanted %x", i, plaintext, test.expected) - } - } -} - -type testDecryptable struct { - data []byte - algorithm pkix.AlgorithmIdentifier -} - -func (d testDecryptable) Algorithm() pkix.AlgorithmIdentifier { return d.algorithm } -func (d testDecryptable) Data() []byte { return d.data } - -func (params pbeParams) RawASN1() (raw asn1.RawValue) { - asn1Bytes, err := asn1.Marshal(params) - if err != nil { - panic(err) - } - _, err = asn1.Unmarshal(asn1Bytes, &raw) - if err != nil { - panic(err) - } - return -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import "errors" - -var ( - // ErrDecryption represents a failure to decrypt the input. - ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") - - // ErrIncorrectPassword is returned when an incorrect password is detected. - // Usually, P12/PFX data is signed to be able to verify the password. - ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") -) - -// NotImplementedError indicates that the input is not currently supported. -type NotImplementedError string - -func (e NotImplementedError) Error() string { - return "pkcs12: " + string(e) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,27 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package rc2 - -import ( - "testing" -) - -func BenchmarkEncrypt(b *testing.B) { - r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64) - b.ResetTimer() - var src [8]byte - for i := 0; i < b.N; i++ { - r.Encrypt(src[:], src[:]) - } -} - -func BenchmarkDecrypt(b *testing.B) { - r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64) - b.ResetTimer() - var src [8]byte - for i := 0; i < b.N; i++ { - r.Decrypt(src[:], src[:]) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,274 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package rc2 implements the RC2 cipher -/* -https://www.ietf.org/rfc/rfc2268.txt -http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf - -This code is licensed under the MIT license. -*/ -package rc2 - -import ( - "crypto/cipher" - "encoding/binary" -) - -// The rc2 block size in bytes -const BlockSize = 8 - -type rc2Cipher struct { - k [64]uint16 -} - -// New returns a new rc2 cipher with the given key and effective key length t1 -func New(key []byte, t1 int) (cipher.Block, error) { - // TODO(dgryski): error checking for key length - return &rc2Cipher{ - k: expandKey(key, t1), - }, nil -} - -func (*rc2Cipher) BlockSize() int { return BlockSize } - -var piTable = [256]byte{ - 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, - 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, - 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, - 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, - 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, - 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, - 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, - 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, - 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, - 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, - 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, - 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, - 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, - 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, - 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, - 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, -} - -func expandKey(key []byte, t1 int) [64]uint16 { - - l := make([]byte, 128) - copy(l, key) - - var t = len(key) - var t8 = (t1 + 7) / 8 - var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) - - for i := len(key); i < 128; i++ { - l[i] = piTable[l[i-1]+l[uint8(i-t)]] - } - - l[128-t8] = piTable[l[128-t8]&tm] - - for i := 127 - t8; i >= 0; i-- { - l[i] = piTable[l[i+1]^l[i+t8]] - } - - var k [64]uint16 - - for i := range k { - k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 - } - - return k -} - -func rotl16(x uint16, b uint) uint16 { - return (x >> (16 - b)) | (x << b) -} - -func (c *rc2Cipher) Encrypt(dst, src []byte) { - - r0 := binary.LittleEndian.Uint16(src[0:]) - r1 := binary.LittleEndian.Uint16(src[2:]) - r2 := binary.LittleEndian.Uint16(src[4:]) - r3 := binary.LittleEndian.Uint16(src[6:]) - - var j int - - for j <= 16 { - // mix r0 - r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) - j++ - - // mix r1 - r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) - j++ - - // mix r2 - r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) - j++ - - // mix r3 - r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) - j++ - - } - - r0 = r0 + c.k[r3&63] - r1 = r1 + c.k[r0&63] - r2 = r2 + c.k[r1&63] - r3 = r3 + c.k[r2&63] - - for j <= 40 { - - // mix r0 - r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) - j++ - - // mix r1 - r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) - j++ - - // mix r2 - r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) - j++ - - // mix r3 - r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) - j++ - - } - - r0 = r0 + c.k[r3&63] - r1 = r1 + c.k[r0&63] - r2 = r2 + c.k[r1&63] - r3 = r3 + c.k[r2&63] - - for j <= 60 { - - // mix r0 - r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) - j++ - - // mix r1 - r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) - j++ - - // mix r2 - r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) - j++ - - // mix r3 - r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) - j++ - } - - binary.LittleEndian.PutUint16(dst[0:], r0) - binary.LittleEndian.PutUint16(dst[2:], r1) - binary.LittleEndian.PutUint16(dst[4:], r2) - binary.LittleEndian.PutUint16(dst[6:], r3) -} - -func (c *rc2Cipher) Decrypt(dst, src []byte) { - - r0 := binary.LittleEndian.Uint16(src[0:]) - r1 := binary.LittleEndian.Uint16(src[2:]) - r2 := binary.LittleEndian.Uint16(src[4:]) - r3 := binary.LittleEndian.Uint16(src[6:]) - - j := 63 - - for j >= 44 { - // unmix r3 - r3 = rotl16(r3, 16-5) - r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) - j-- - - // unmix r2 - r2 = rotl16(r2, 16-3) - r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) - j-- - - // unmix r1 - r1 = rotl16(r1, 16-2) - r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) - j-- - - // unmix r0 - r0 = rotl16(r0, 16-1) - r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) - j-- - } - - r3 = r3 - c.k[r2&63] - r2 = r2 - c.k[r1&63] - r1 = r1 - c.k[r0&63] - r0 = r0 - c.k[r3&63] - - for j >= 20 { - // unmix r3 - r3 = rotl16(r3, 16-5) - r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) - j-- - - // unmix r2 - r2 = rotl16(r2, 16-3) - r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) - j-- - - // unmix r1 - r1 = rotl16(r1, 16-2) - r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) - j-- - - // unmix r0 - r0 = rotl16(r0, 16-1) - r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) - j-- - - } - - r3 = r3 - c.k[r2&63] - r2 = r2 - c.k[r1&63] - r1 = r1 - c.k[r0&63] - r0 = r0 - c.k[r3&63] - - for j >= 0 { - - // unmix r3 - r3 = rotl16(r3, 16-5) - r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) - j-- - - // unmix r2 - r2 = rotl16(r2, 16-3) - r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) - j-- - - // unmix r1 - r1 = rotl16(r1, 16-2) - r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) - j-- - - // unmix r0 - r0 = rotl16(r0, 16-1) - r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) - j-- - - } - - binary.LittleEndian.PutUint16(dst[0:], r0) - binary.LittleEndian.PutUint16(dst[2:], r1) - binary.LittleEndian.PutUint16(dst[4:], r2) - binary.LittleEndian.PutUint16(dst[6:], r3) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package rc2 - -import ( - "bytes" - "encoding/hex" - "testing" -) - -func TestEncryptDecrypt(t *testing.T) { - - // TODO(dgryski): add the rest of the test vectors from the RFC - var tests = []struct { - key string - plain string - cipher string - t1 int - }{ - { - "0000000000000000", - "0000000000000000", - "ebb773f993278eff", - 63, - }, - { - "ffffffffffffffff", - "ffffffffffffffff", - "278b27e42e2f0d49", - 64, - }, - { - "3000000000000000", - "1000000000000001", - "30649edf9be7d2c2", - 64, - }, - { - "88", - "0000000000000000", - "61a8a244adacccf0", - 64, - }, - { - "88bca90e90875a", - "0000000000000000", - "6ccf4308974c267f", - 64, - }, - { - "88bca90e90875a7f0f79c384627bafb2", - "0000000000000000", - "1a807d272bbe5db1", - 64, - }, - { - "88bca90e90875a7f0f79c384627bafb2", - "0000000000000000", - "2269552ab0f85ca6", - 128, - }, - { - "88bca90e90875a7f0f79c384627bafb216f80a6f85920584c42fceb0be255daf1e", - "0000000000000000", - "5b78d3a43dfff1f1", - 129, - }, - } - - for _, tt := range tests { - k, _ := hex.DecodeString(tt.key) - p, _ := hex.DecodeString(tt.plain) - c, _ := hex.DecodeString(tt.cipher) - - b, _ := New(k, tt.t1) - - var dst [8]byte - - b.Encrypt(dst[:], p) - - if !bytes.Equal(dst[:], c) { - t.Errorf("encrypt failed: got % 2x wanted % 2x\n", dst, c) - } - - b.Decrypt(dst[:], c) - - if !bytes.Equal(dst[:], p) { - t.Errorf("decrypt failed: got % 2x wanted % 2x\n", dst, p) - } - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/x509/pkix" - "encoding/asn1" -) - -type macData struct { - Mac digestInfo - MacSalt []byte - Iterations int `asn1:"optional,default:1"` -} - -// from PKCS#7: -type digestInfo struct { - Algorithm pkix.AlgorithmIdentifier - Digest []byte -} - -var ( - oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) -) - -func verifyMac(macData *macData, message, password []byte) error { - if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { - return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) - } - - key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) - - mac := hmac.New(sha1.New, key) - mac.Write(message) - expectedMAC := mac.Sum(nil) - - if !hmac.Equal(macData.Mac.Digest, expectedMAC) { - return ErrIncorrectPassword - } - return nil -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "encoding/asn1" - "testing" -) - -func TestVerifyMac(t *testing.T) { - td := macData{ - Mac: digestInfo{ - Digest: []byte{0x18, 0x20, 0x3d, 0xff, 0x1e, 0x16, 0xf4, 0x92, 0xf2, 0xaf, 0xc8, 0x91, 0xa9, 0xba, 0xd6, 0xca, 0x9d, 0xee, 0x51, 0x93}, - }, - MacSalt: []byte{1, 2, 3, 4, 5, 6, 7, 8}, - Iterations: 2048, - } - - message := []byte{11, 12, 13, 14, 15} - password, _ := bmpString("") - - td.Mac.Algorithm.Algorithm = asn1.ObjectIdentifier([]int{1, 2, 3}) - err := verifyMac(&td, message, password) - if _, ok := err.(NotImplementedError); !ok { - t.Errorf("err: %v", err) - } - - td.Mac.Algorithm.Algorithm = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) - err = verifyMac(&td, message, password) - if err != ErrIncorrectPassword { - t.Errorf("Expected incorrect password, got err: %v", err) - } - - password, _ = bmpString("Sesame open") - err = verifyMac(&td, message, password) - if err != nil { - t.Errorf("err: %v", err) - } - -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,170 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "bytes" - "crypto/sha1" - "math/big" -) - -var ( - one = big.NewInt(1) -) - -// sha1Sum returns the SHA-1 hash of in. -func sha1Sum(in []byte) []byte { - sum := sha1.Sum(in) - return sum[:] -} - -// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of -// repeats of pattern. -func fillWithRepeats(pattern []byte, v int) []byte { - if len(pattern) == 0 { - return nil - } - outputLen := v * ((len(pattern) + v - 1) / v) - return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] -} - -func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { - // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments - - // Let H be a hash function built around a compression function f: - - // Z_2^u x Z_2^v -> Z_2^u - - // (that is, H has a chaining variable and output of length u bits, and - // the message input to the compression function of H is v bits). The - // values for u and v are as follows: - - // HASH FUNCTION VALUE u VALUE v - // MD2, MD5 128 512 - // SHA-1 160 512 - // SHA-224 224 512 - // SHA-256 256 512 - // SHA-384 384 1024 - // SHA-512 512 1024 - // SHA-512/224 224 1024 - // SHA-512/256 256 1024 - - // Furthermore, let r be the iteration count. - - // We assume here that u and v are both multiples of 8, as are the - // lengths of the password and salt strings (which we denote by p and s, - // respectively) and the number n of pseudorandom bits required. In - // addition, u and v are of course non-zero. - - // For information on security considerations for MD5 [19], see [25] and - // [1], and on those for MD2, see [18]. - - // The following procedure can be used to produce pseudorandom bits for - // a particular "purpose" that is identified by a byte called "ID". - // This standard specifies 3 different values for the ID byte: - - // 1. If ID=1, then the pseudorandom bits being produced are to be used - // as key material for performing encryption or decryption. - - // 2. If ID=2, then the pseudorandom bits being produced are to be used - // as an IV (Initial Value) for encryption or decryption. - - // 3. If ID=3, then the pseudorandom bits being produced are to be used - // as an integrity key for MACing. - - // 1. Construct a string, D (the "diversifier"), by concatenating v/8 - // copies of ID. - var D []byte - for i := 0; i < v; i++ { - D = append(D, ID) - } - - // 2. Concatenate copies of the salt together to create a string S of - // length v(ceiling(s/v)) bits (the final copy of the salt may be - // truncated to create S). Note that if the salt is the empty - // string, then so is S. - - S := fillWithRepeats(salt, v) - - // 3. Concatenate copies of the password together to create a string P - // of length v(ceiling(p/v)) bits (the final copy of the password - // may be truncated to create P). Note that if the password is the - // empty string, then so is P. - - P := fillWithRepeats(password, v) - - // 4. Set I=S||P to be the concatenation of S and P. - I := append(S, P...) - - // 5. Set c=ceiling(n/u). - c := (size + u - 1) / u - - // 6. For i=1, 2, ..., c, do the following: - A := make([]byte, c*20) - var IjBuf []byte - for i := 0; i < c; i++ { - // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, - // H(H(H(... H(D||I)))) - Ai := hash(append(D, I...)) - for j := 1; j < r; j++ { - Ai = hash(Ai) - } - copy(A[i*20:], Ai[:]) - - if i < c-1 { // skip on last iteration - // B. Concatenate copies of Ai to create a string B of length v - // bits (the final copy of Ai may be truncated to create B). - var B []byte - for len(B) < v { - B = append(B, Ai[:]...) - } - B = B[:v] - - // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit - // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by - // setting I_j=(I_j+B+1) mod 2^v for each j. - { - Bbi := new(big.Int).SetBytes(B) - Ij := new(big.Int) - - for j := 0; j < len(I)/v; j++ { - Ij.SetBytes(I[j*v : (j+1)*v]) - Ij.Add(Ij, Bbi) - Ij.Add(Ij, one) - Ijb := Ij.Bytes() - // We expect Ijb to be exactly v bytes, - // if it is longer or shorter we must - // adjust it accordingly. - if len(Ijb) > v { - Ijb = Ijb[len(Ijb)-v:] - } - if len(Ijb) < v { - if IjBuf == nil { - IjBuf = make([]byte, v) - } - bytesShort := v - len(Ijb) - for i := 0; i < bytesShort; i++ { - IjBuf[i] = 0 - } - copy(IjBuf[bytesShort:], Ijb) - Ijb = IjBuf - } - copy(I[j*v:(j+1)*v], Ijb) - } - } - } - } - // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom - // bit string, A. - - // 8. Use the first n bits of A as the output of this entire process. - return A[:size] - - // If the above process is being used to generate a DES key, the process - // should be used to create 64 random bits, and the key's parity bits - // should be set after the 64 bits have been produced. Similar concerns - // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any - // similar keys with parity bits "built into them". -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "bytes" - "testing" -) - -func TestThatPBKDFWorksCorrectlyForLongKeys(t *testing.T) { - cipherInfo := shaWithTripleDESCBC{} - - salt := []byte("\xff\xff\xff\xff\xff\xff\xff\xff") - password, _ := bmpString("sesame") - key := cipherInfo.deriveKey(salt, password, 2048) - - if expected := []byte("\x7c\xd9\xfd\x3e\x2b\x3b\xe7\x69\x1a\x44\xe3\xbe\xf0\xf9\xea\x0f\xb9\xb8\x97\xd4\xe3\x25\xd9\xd1"); bytes.Compare(key, expected) != 0 { - t.Fatalf("expected key '%x', but found '%x'", expected, key) - } -} - -func TestThatPBKDFHandlesLeadingZeros(t *testing.T) { - // This test triggers a case where I_j (in step 6C) ends up with leading zero - // byte, meaning that len(Ijb) < v (leading zeros get stripped by big.Int). - // This was previously causing bug whereby certain inputs would break the - // derivation and produce the wrong output. - key := pbkdf(sha1Sum, 20, 64, []byte("\xf3\x7e\x05\xb5\x18\x32\x4b\x4b"), []byte("\x00\x00"), 2048, 1, 24) - expected := []byte("\x00\xf7\x59\xff\x47\xd1\x4d\xd0\x36\x65\xd5\x94\x3c\xb3\xc4\xa3\x9a\x25\x55\xc0\x2a\xed\x66\xe1") - if bytes.Compare(key, expected) != 0 { - t.Fatalf("expected key '%x', but found '%x'", expected, key) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,342 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pkcs12 implements some of PKCS#12. -// -// This implementation is distilled from https://tools.ietf.org/html/rfc7292 -// and referenced documents. It is intended for decoding P12/PFX-stored -// certificates and keys for use with the crypto/tls package. -package pkcs12 - -import ( - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/hex" - "encoding/pem" - "errors" -) - -var ( - oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) - oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) - - oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) - oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) - oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) -) - -type pfxPdu struct { - Version int - AuthSafe contentInfo - MacData macData `asn1:"optional"` -} - -type contentInfo struct { - ContentType asn1.ObjectIdentifier - Content asn1.RawValue `asn1:"tag:0,explicit,optional"` -} - -type encryptedData struct { - Version int - EncryptedContentInfo encryptedContentInfo -} - -type encryptedContentInfo struct { - ContentType asn1.ObjectIdentifier - ContentEncryptionAlgorithm pkix.AlgorithmIdentifier - EncryptedContent []byte `asn1:"tag:0,optional"` -} - -func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { - return i.ContentEncryptionAlgorithm -} - -func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } - -type safeBag struct { - Id asn1.ObjectIdentifier - Value asn1.RawValue `asn1:"tag:0,explicit"` - Attributes []pkcs12Attribute `asn1:"set,optional"` -} - -type pkcs12Attribute struct { - Id asn1.ObjectIdentifier - Value asn1.RawValue `ans1:"set"` -} - -type encryptedPrivateKeyInfo struct { - AlgorithmIdentifier pkix.AlgorithmIdentifier - EncryptedData []byte -} - -func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { - return i.AlgorithmIdentifier -} - -func (i encryptedPrivateKeyInfo) Data() []byte { - return i.EncryptedData -} - -// PEM block types -const ( - certificateType = "CERTIFICATE" - privateKeyType = "PRIVATE KEY" -) - -// unmarshal calls asn1.Unmarshal, but also returns an error if there is any -// trailing data after unmarshaling. -func unmarshal(in []byte, out interface{}) error { - trailing, err := asn1.Unmarshal(in, out) - if err != nil { - return err - } - if len(trailing) != 0 { - return errors.New("pkcs12: trailing data found") - } - return nil -} - -// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks. -func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { - encodedPassword, err := bmpString(password) - if err != nil { - return nil, ErrIncorrectPassword - } - - bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) - - blocks := make([]*pem.Block, 0, len(bags)) - for _, bag := range bags { - block, err := convertBag(&bag, encodedPassword) - if err != nil { - return nil, err - } - blocks = append(blocks, block) - } - - return blocks, nil -} - -func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { - block := &pem.Block{ - Headers: make(map[string]string), - } - - for _, attribute := range bag.Attributes { - k, v, err := convertAttribute(&attribute) - if err != nil { - return nil, err - } - block.Headers[k] = v - } - - switch { - case bag.Id.Equal(oidCertBag): - block.Type = certificateType - certsData, err := decodeCertBag(bag.Value.Bytes) - if err != nil { - return nil, err - } - block.Bytes = certsData - case bag.Id.Equal(oidPKCS8ShroundedKeyBag): - block.Type = privateKeyType - - key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) - if err != nil { - return nil, err - } - - switch key := key.(type) { - case *rsa.PrivateKey: - block.Bytes = x509.MarshalPKCS1PrivateKey(key) - case *ecdsa.PrivateKey: - block.Bytes, err = x509.MarshalECPrivateKey(key) - if err != nil { - return nil, err - } - default: - return nil, errors.New("found unknown private key type in PKCS#8 wrapping") - } - default: - return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) - } - return block, nil -} - -func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { - isString := false - - switch { - case attribute.Id.Equal(oidFriendlyName): - key = "friendlyName" - isString = true - case attribute.Id.Equal(oidLocalKeyID): - key = "localKeyId" - case attribute.Id.Equal(oidMicrosoftCSPName): - // This key is chosen to match OpenSSL. - key = "Microsoft CSP Name" - isString = true - default: - return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String()) - } - - if isString { - if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { - return "", "", err - } - if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { - return "", "", err - } - } else { - var id []byte - if err := unmarshal(attribute.Value.Bytes, &id); err != nil { - return "", "", err - } - value = hex.EncodeToString(id) - } - - return key, value, nil -} - -// Decode extracts a certificate and private key from pfxData. This function -// assumes that there is only one certificate and only one private key in the -// pfxData. -func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { - encodedPassword, err := bmpString(password) - if err != nil { - return nil, nil, err - } - - bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) - if err != nil { - return nil, nil, err - } - - if len(bags) != 2 { - err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") - return - } - - for _, bag := range bags { - switch { - case bag.Id.Equal(oidCertBag): - if certificate != nil { - err = errors.New("pkcs12: expected exactly one certificate bag") - } - - certsData, err := decodeCertBag(bag.Value.Bytes) - if err != nil { - return nil, nil, err - } - certs, err := x509.ParseCertificates(certsData) - if err != nil { - return nil, nil, err - } - if len(certs) != 1 { - err = errors.New("pkcs12: expected exactly one certificate in the certBag") - return nil, nil, err - } - certificate = certs[0] - - case bag.Id.Equal(oidPKCS8ShroundedKeyBag): - if privateKey != nil { - err = errors.New("pkcs12: expected exactly one key bag") - } - - if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { - return nil, nil, err - } - } - } - - if certificate == nil { - return nil, nil, errors.New("pkcs12: certificate missing") - } - if privateKey == nil { - return nil, nil, errors.New("pkcs12: private key missing") - } - - return -} - -func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { - pfx := new(pfxPdu) - if err := unmarshal(p12Data, pfx); err != nil { - return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) - } - - if pfx.Version != 3 { - return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") - } - - if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { - return nil, nil, NotImplementedError("only password-protected PFX is implemented") - } - - // unmarshal the explicit bytes in the content for type 'data' - if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { - return nil, nil, err - } - - if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { - return nil, nil, errors.New("pkcs12: no MAC in data") - } - - if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { - if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { - // some implementations use an empty byte array - // for the empty string password try one more - // time with empty-empty password - password = nil - err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) - } - if err != nil { - return nil, nil, err - } - } - - var authenticatedSafe []contentInfo - if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { - return nil, nil, err - } - - if len(authenticatedSafe) != 2 { - return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") - } - - for _, ci := range authenticatedSafe { - var data []byte - - switch { - case ci.ContentType.Equal(oidDataContentType): - if err := unmarshal(ci.Content.Bytes, &data); err != nil { - return nil, nil, err - } - case ci.ContentType.Equal(oidEncryptedDataContentType): - var encryptedData encryptedData - if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { - return nil, nil, err - } - if encryptedData.Version != 0 { - return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") - } - if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { - return nil, nil, err - } - default: - return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") - } - - var safeContents []safeBag - if err := unmarshal(data, &safeContents); err != nil { - return nil, nil, err - } - bags = append(bags, safeContents...) - } - - return bags, password, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,138 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "crypto/rsa" - "crypto/tls" - "encoding/base64" - "encoding/pem" - "testing" -) - -func TestPfx(t *testing.T) { - for commonName, base64P12 := range testdata { - p12, _ := base64.StdEncoding.DecodeString(base64P12) - - priv, cert, err := Decode(p12, "") - if err != nil { - t.Fatal(err) - } - - if err := priv.(*rsa.PrivateKey).Validate(); err != nil { - t.Errorf("error while validating private key: %v", err) - } - - if cert.Subject.CommonName != commonName { - t.Errorf("expected common name to be %q, but found %q", commonName, cert.Subject.CommonName) - } - } -} - -func TestPEM(t *testing.T) { - for commonName, base64P12 := range testdata { - p12, _ := base64.StdEncoding.DecodeString(base64P12) - - blocks, err := ToPEM(p12, "") - if err != nil { - t.Fatalf("error while converting to PEM: %s", err) - } - - var pemData []byte - for _, b := range blocks { - pemData = append(pemData, pem.EncodeToMemory(b)...) - } - - cert, err := tls.X509KeyPair(pemData, pemData) - if err != nil { - t.Errorf("err while converting to key pair: %v", err) - } - config := tls.Config{ - Certificates: []tls.Certificate{cert}, - } - config.BuildNameToCertificate() - - if _, exists := config.NameToCertificate[commonName]; !exists { - t.Errorf("did not find our cert in PEM?: %v", config.NameToCertificate) - } - } -} - -func ExampleToPEM() { - p12, _ := base64.StdEncoding.DecodeString(`MIIJzgIBAzCCCZQGCS ... CA+gwggPk==`) - - blocks, err := ToPEM(p12, "password") - if err != nil { - panic(err) - } - - var pemData []byte - for _, b := range blocks { - pemData = append(pemData, pem.EncodeToMemory(b)...) - } - - // then use PEM data for tls to construct tls certificate: - cert, err := tls.X509KeyPair(pemData, pemData) - if err != nil { - panic(err) - } - - config := &tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - _ = config -} - -var testdata = map[string]string{ - // 'null' password test case - "Windows Azure Tools": `MIIKDAIBAzCCCcwGCSqGSIb3DQEHAaCCCb0Eggm5MIIJtTCCBe4GCSqGSIb3DQEHAaCCBd8EggXbMIIF1zCCBdMGCyqGSIb3DQEMCgECoIIE7jCCBOowHAYKKoZIhvcNAQwBAzAOBAhStUNnlTGV+gICB9AEggTIJ81JIossF6boFWpPtkiQRPtI6DW6e9QD4/WvHAVrM2bKdpMzSMsCML5NyuddANTKHBVq00Jc9keqGNAqJPKkjhSUebzQFyhe0E1oI9T4zY5UKr/I8JclOeccH4QQnsySzYUG2SnniXnQ+JrG3juetli7EKth9h6jLc6xbubPadY5HMB3wL/eG/kJymiXwU2KQ9Mgd4X6jbcV+NNCE/8jbZHvSTCPeYTJIjxfeX61Sj5kFKUCzERbsnpyevhY3X0eYtEDezZQarvGmXtMMdzf8HJHkWRdk9VLDLgjk8uiJif/+X4FohZ37ig0CpgC2+dP4DGugaZZ51hb8tN9GeCKIsrmWogMXDIVd0OACBp/EjJVmFB6y0kUCXxUE0TZt0XA1tjAGJcjDUpBvTntZjPsnH/4ZySy+s2d9OOhJ6pzRQBRm360TzkFdSwk9DLiLdGfv4pwMMu/vNGBlqjP/1sQtj+jprJiD1sDbCl4AdQZVoMBQHadF2uSD4/o17XG/Ci0r2h6Htc2yvZMAbEY4zMjjIn2a+vqIxD6onexaek1R3zbkS9j19D6EN9EWn8xgz80YRCyW65znZk8xaIhhvlU/mg7sTxeyuqroBZNcq6uDaQTehDpyH7bY2l4zWRpoj10a6JfH2q5shYz8Y6UZC/kOTfuGqbZDNZWro/9pYquvNNW0M847E5t9bsf9VkAAMHRGBbWoVoU9VpI0UnoXSfvpOo+aXa2DSq5sHHUTVY7A9eov3z5IqT+pligx11xcs+YhDWcU8di3BTJisohKvv5Y8WSkm/rloiZd4ig269k0jTRk1olP/vCksPli4wKG2wdsd5o42nX1yL7mFfXocOANZbB+5qMkiwdyoQSk+Vq+C8nAZx2bbKhUq2MbrORGMzOe0Hh0x2a0PeObycN1Bpyv7Mp3ZI9h5hBnONKCnqMhtyQHUj/nNvbJUnDVYNfoOEqDiEqqEwB7YqWzAKz8KW0OIqdlM8uiQ4JqZZlFllnWJUfaiDrdFM3lYSnFQBkzeVlts6GpDOOBjCYd7dcCNS6kq6pZC6p6HN60Twu0JnurZD6RT7rrPkIGE8vAenFt4iGe/yF52fahCSY8Ws4K0UTwN7bAS+4xRHVCWvE8sMRZsRCHizb5laYsVrPZJhE6+hux6OBb6w8kwPYXc+ud5v6UxawUWgt6uPwl8mlAtU9Z7Miw4Nn/wtBkiLL/ke1UI1gqJtcQXgHxx6mzsjh41+nAgTvdbsSEyU6vfOmxGj3Rwc1eOrIhJUqn5YjOWfzzsz/D5DzWKmwXIwdspt1p+u+kol1N3f2wT9fKPnd/RGCb4g/1hc3Aju4DQYgGY782l89CEEdalpQ/35bQczMFk6Fje12HykakWEXd/bGm9Unh82gH84USiRpeOfQvBDYoqEyrY3zkFZzBjhDqa+jEcAj41tcGx47oSfDq3iVYCdL7HSIjtnyEktVXd7mISZLoMt20JACFcMw+mrbjlug+eU7o2GR7T+LwtOp/p4LZqyLa7oQJDwde1BNZtm3TCK2P1mW94QDL0nDUps5KLtr1DaZXEkRbjSJub2ZE9WqDHyU3KA8G84Tq/rN1IoNu/if45jacyPje1Npj9IftUZSP22nV7HMwZtwQ4P4MYHRMBMGCSqGSIb3DQEJFTEGBAQBAAAAMFsGCSqGSIb3DQEJFDFOHkwAewBCADQAQQA0AEYARQBCADAALQBBADEAOABBAC0ANAA0AEIAQgAtAEIANQBGADIALQA0ADkAMQBFAEYAMQA1ADIAQgBBADEANgB9MF0GCSsGAQQBgjcRATFQHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAG8AZgB0AHcAYQByAGUAIABLAGUAeQAgAFMAdABvAHIAYQBnAGUAIABQAHIAbwB2AGkAZABlAHIwggO/BgkqhkiG9w0BBwagggOwMIIDrAIBADCCA6UGCSqGSIb3DQEHATAcBgoqhkiG9w0BDAEGMA4ECEBk5ZAYpu0WAgIH0ICCA3hik4mQFGpw9Ha8TQPtk+j2jwWdxfF0+sTk6S8PTsEfIhB7wPltjiCK92Uv2tCBQnodBUmatIfkpnRDEySmgmdglmOCzj204lWAMRs94PoALGn3JVBXbO1vIDCbAPOZ7Z0Hd0/1t2hmk8v3//QJGUg+qr59/4y/MuVfIg4qfkPcC2QSvYWcK3oTf6SFi5rv9B1IOWFgN5D0+C+x/9Lb/myPYX+rbOHrwtJ4W1fWKoz9g7wwmGFA9IJ2DYGuH8ifVFbDFT1Vcgsvs8arSX7oBsJVW0qrP7XkuDRe3EqCmKW7rBEwYrFznhxZcRDEpMwbFoSvgSIZ4XhFY9VKYglT+JpNH5iDceYEBOQL4vBLpxNUk3l5jKaBNxVa14AIBxq18bVHJ+STInhLhad4u10v/Xbx7wIL3f9DX1yLAkPrpBYbNHS2/ew6H/ySDJnoIDxkw2zZ4qJ+qUJZ1S0lbZVG+VT0OP5uF6tyOSpbMlcGkdl3z254n6MlCrTifcwkzscysDsgKXaYQw06rzrPW6RDub+t+hXzGny799fS9jhQMLDmOggaQ7+LA4oEZsfT89HLMWxJYDqjo3gIfjciV2mV54R684qLDS+AO09U49e6yEbwGlq8lpmO/pbXCbpGbB1b3EomcQbxdWxW2WEkkEd/VBn81K4M3obmywwXJkw+tPXDXfBmzzaqqCR+onMQ5ME1nMkY8ybnfoCc1bDIupjVWsEL2Wvq752RgI6KqzVNr1ew1IdqV5AWN2fOfek+0vi3Jd9FHF3hx8JMwjJL9dZsETV5kHtYJtE7wJ23J68BnCt2eI0GEuwXcCf5EdSKN/xXCTlIokc4Qk/gzRdIZsvcEJ6B1lGovKG54X4IohikqTjiepjbsMWj38yxDmK3mtENZ9ci8FPfbbvIEcOCZIinuY3qFUlRSbx7VUerEoV1IP3clUwexVQo4lHFee2jd7ocWsdSqSapW7OWUupBtDzRkqVhE7tGria+i1W2d6YLlJ21QTjyapWJehAMO637OdbJCCzDs1cXbodRRE7bsP492ocJy8OX66rKdhYbg8srSFNKdb3pF3UDNbN9jhI/t8iagRhNBhlQtTr1me2E/c86Q18qcRXl4bcXTt6acgCeffK6Y26LcVlrgjlD33AEYRRUeyC+rpxbT0aMjdFderlndKRIyG23mSp0HaUwNzAfMAcGBSsOAwIaBBRlviCbIyRrhIysg2dc/KbLFTc2vQQUg4rfwHMM4IKYRD/fsd1x6dda+wQ=`, - // empty string password test case - "testing@example.com": `MIIJzgIBAzCCCZQGCSqGSIb3DQEHAaCCCYUEggmBMIIJfTCCA/cGCSqGSIb3DQEHBqCCA+gwggPk -AgEAMIID3QYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYwDgQIIszfRGqcmPcCAggAgIIDsOZ9Eg1L -s5Wx8JhYoV3HAL4aRnkAWvTYB5NISZOgSgIQTssmt/3A7134dibTmaT/93LikkL3cTKLnQzJ4wDf -YZ1bprpVJvUqz+HFT79m27bP9zYXFrvxWBJbxjYKTSjQMgz+h8LAEpXXGajCmxMJ1oCOtdXkhhzc -LdZN6SAYgtmtyFnCdMEDskSggGuLb3fw84QEJ/Sj6FAULXunW/CPaS7Ce0TMsKmNU/jfFWj3yXXw -ro0kwjKiVLpVFlnBlHo2OoVU7hmkm59YpGhLgS7nxLD3n7nBroQ0ID1+8R01NnV9XLGoGzxMm1te -6UyTCkr5mj+kEQ8EP1Ys7g/TC411uhVWySMt/rcpkx7Vz1r9kYEAzJpONAfr6cuEVkPKrxpq4Fh0 -2fzlKBky0i/hrfIEUmngh+ERHUb/Mtv/fkv1j5w9suESbhsMLLiCXAlsP1UWMX+3bNizi3WVMEts -FM2k9byn+p8IUD/A8ULlE4kEaWeoc+2idkCNQkLGuIdGUXUFVm58se0auUkVRoRJx8x4CkMesT8j -b1H831W66YRWoEwwDQp2kK1lA2vQXxdVHWlFevMNxJeromLzj3ayiaFrfByeUXhR2S+Hpm+c0yNR -4UVU9WED2kacsZcpRm9nlEa5sr28mri5JdBrNa/K02OOhvKCxr5ZGmbOVzUQKla2z4w+Ku9k8POm -dfDNU/fGx1b5hcFWtghXe3msWVsSJrQihnN6q1ughzNiYZlJUGcHdZDRtiWwCFI0bR8h/Dmg9uO9 -4rawQQrjIRT7B8yF3UbkZyAqs8Ppb1TsMeNPHh1rxEfGVQknh/48ouJYsmtbnzugTUt3mJCXXiL+ -XcPMV6bBVAUu4aaVKSmg9+yJtY4/VKv10iw88ktv29fViIdBe3t6l/oPuvQgbQ8dqf4T8w0l/uKZ -9lS1Na9jfT1vCoS7F5TRi+tmyj1vL5kr/amEIW6xKEP6oeAMvCMtbPAzVEj38zdJ1R22FfuIBxkh -f0Zl7pdVbmzRxl/SBx9iIBJSqAvcXItiT0FIj8HxQ+0iZKqMQMiBuNWJf5pYOLWGrIyntCWwHuaQ -wrx0sTGuEL9YXLEAsBDrsvzLkx/56E4INGZFrH8G7HBdW6iGqb22IMI4GHltYSyBRKbB0gadYTyv -abPEoqww8o7/85aPSzOTJ/53ozD438Q+d0u9SyDuOb60SzCD/zPuCEd78YgtXJwBYTuUNRT27FaM -3LGMX8Hz+6yPNRnmnA2XKPn7dx/IlaqAjIs8MIIFfgYJKoZIhvcNAQcBoIIFbwSCBWswggVnMIIF -YwYLKoZIhvcNAQwKAQKgggTuMIIE6jAcBgoqhkiG9w0BDAEDMA4ECJr0cClYqOlcAgIIAASCBMhe -OQSiP2s0/46ONXcNeVAkz2ksW3u/+qorhSiskGZ0b3dFa1hhgBU2Q7JVIkc4Hf7OXaT1eVQ8oqND -uhqsNz83/kqYo70+LS8Hocj49jFgWAKrf/yQkdyP1daHa2yzlEw4mkpqOfnIORQHvYCa8nEApspZ -wVu8y6WVuLHKU67mel7db2xwstQp7PRuSAYqGjTfAylElog8ASdaqqYbYIrCXucF8iF9oVgmb/Qo -xrXshJ9aSLO4MuXlTPELmWgj07AXKSb90FKNihE+y0bWb9LPVFY1Sly3AX9PfrtkSXIZwqW3phpv -MxGxQl/R6mr1z+hlTfY9Wdpb5vlKXPKA0L0Rt8d2pOesylFi6esJoS01QgP1kJILjbrV731kvDc0 -Jsd+Oxv4BMwA7ClG8w1EAOInc/GrV1MWFGw/HeEqj3CZ/l/0jv9bwkbVeVCiIhoL6P6lVx9pXq4t -KZ0uKg/tk5TVJmG2vLcMLvezD0Yk3G2ZOMrywtmskrwoF7oAUpO9e87szoH6fEvUZlkDkPVW1NV4 -cZk3DBSQiuA3VOOg8qbo/tx/EE3H59P0axZWno2GSB0wFPWd1aj+b//tJEJHaaNR6qPRj4IWj9ru -Qbc8eRAcVWleHg8uAehSvUXlFpyMQREyrnpvMGddpiTC8N4UMrrBRhV7+UbCOWhxPCbItnInBqgl -1JpSZIP7iUtsIMdu3fEC2cdbXMTRul+4rdzUR7F9OaezV3jjvcAbDvgbK1CpyC+MJ1Mxm/iTgk9V -iUArydhlR8OniN84GyGYoYCW9O/KUwb6ASmeFOu/msx8x6kAsSQHIkKqMKv0TUR3kZnkxUvdpBGP -KTl4YCTvNGX4dYALBqrAETRDhua2KVBD/kEttDHwBNVbN2xi81+Mc7ml461aADfk0c66R/m2sjHB -2tN9+wG12OIWFQjL6wF/UfJMYamxx2zOOExiId29Opt57uYiNVLOO4ourPewHPeH0u8Gz35aero7 -lkt7cZAe1Q0038JUuE/QGlnK4lESK9UkSIQAjSaAlTsrcfwtQxB2EjoOoLhwH5mvxUEmcNGNnXUc -9xj3M5BD3zBz3Ft7G3YMMDwB1+zC2l+0UG0MGVjMVaeoy32VVNvxgX7jk22OXG1iaOB+PY9kdk+O -X+52BGSf/rD6X0EnqY7XuRPkMGgjtpZeAYxRQnFtCZgDY4wYheuxqSSpdF49yNczSPLkgB3CeCfS -+9NTKN7aC6hBbmW/8yYh6OvSiCEwY0lFS/T+7iaVxr1loE4zI1y/FFp4Pe1qfLlLttVlkygga2UU -SCunTQ8UB/M5IXWKkhMOO11dP4niWwb39Y7pCWpau7mwbXOKfRPX96cgHnQJK5uG+BesDD1oYnX0 -6frN7FOnTSHKruRIwuI8KnOQ/I+owmyz71wiv5LMQt+yM47UrEjB/EZa5X8dpEwOZvkdqL7utcyo -l0XH5kWMXdW856LL/FYftAqJIDAmtX1TXF/rbP6mPyN/IlDC0gjP84Uzd/a2UyTIWr+wk49Ek3vQ -/uDamq6QrwAxVmNh5Tset5Vhpc1e1kb7mRMZIzxSP8JcTuYd45oFKi98I8YjvueHVZce1g7OudQP -SbFQoJvdT46iBg1TTatlltpOiH2mFaxWVS0xYjAjBgkqhkiG9w0BCRUxFgQUdA9eVqvETX4an/c8 -p8SsTugkit8wOwYJKoZIhvcNAQkUMS4eLABGAHIAaQBlAG4AZABsAHkAIABuAGEAbQBlACAAZgBv -AHIAIABjAGUAcgB0MDEwITAJBgUrDgMCGgUABBRFsNz3Zd1O1GI8GTuFwCWuDOjEEwQIuBEfIcAy -HQ8CAggA`, -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "crypto/x509" - "encoding/asn1" - "errors" -) - -var ( - // see https://tools.ietf.org/html/rfc7292#appendix-D - oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) - oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) - oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) -) - -type certBag struct { - Id asn1.ObjectIdentifier - Data []byte `asn1:"tag:0,explicit"` -} - -func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { - pkinfo := new(encryptedPrivateKeyInfo) - if err = unmarshal(asn1Data, pkinfo); err != nil { - return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) - } - - pkData, err := pbDecrypt(pkinfo, password) - if err != nil { - return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) - } - - ret := new(asn1.RawValue) - if err = unmarshal(pkData, ret); err != nil { - return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) - } - - if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { - return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) - } - - return privateKey, nil -} - -func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { - bag := new(certBag) - if err := unmarshal(asn1Data, bag); err != nil { - return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) - } - if !bag.Id.Equal(oidCertTypeX509Certificate) { - return nil, NotImplementedError("only X509 certificates are supported") - } - return bag.Data, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/benchmark.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,187 +0,0 @@ -// Copyright (c) 2012 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package check - -import ( - "fmt" - "runtime" - "time" -) - -var memStats runtime.MemStats - -// testingB is a type passed to Benchmark functions to manage benchmark -// timing and to specify the number of iterations to run. -type timer struct { - start time.Time // Time test or benchmark started - duration time.Duration - N int - bytes int64 - timerOn bool - benchTime time.Duration - // The initial states of memStats.Mallocs and memStats.TotalAlloc. - startAllocs uint64 - startBytes uint64 - // The net total of this test after being run. - netAllocs uint64 - netBytes uint64 -} - -// StartTimer starts timing a test. This function is called automatically -// before a benchmark starts, but it can also used to resume timing after -// a call to StopTimer. -func (c *C) StartTimer() { - if !c.timerOn { - c.start = time.Now() - c.timerOn = true - - runtime.ReadMemStats(&memStats) - c.startAllocs = memStats.Mallocs - c.startBytes = memStats.TotalAlloc - } -} - -// StopTimer stops timing a test. This can be used to pause the timer -// while performing complex initialization that you don't -// want to measure. -func (c *C) StopTimer() { - if c.timerOn { - c.duration += time.Now().Sub(c.start) - c.timerOn = false - runtime.ReadMemStats(&memStats) - c.netAllocs += memStats.Mallocs - c.startAllocs - c.netBytes += memStats.TotalAlloc - c.startBytes - } -} - -// ResetTimer sets the elapsed benchmark time to zero. -// It does not affect whether the timer is running. -func (c *C) ResetTimer() { - if c.timerOn { - c.start = time.Now() - runtime.ReadMemStats(&memStats) - c.startAllocs = memStats.Mallocs - c.startBytes = memStats.TotalAlloc - } - c.duration = 0 - c.netAllocs = 0 - c.netBytes = 0 -} - -// SetBytes informs the number of bytes that the benchmark processes -// on each iteration. If this is called in a benchmark it will also -// report MB/s. -func (c *C) SetBytes(n int64) { - c.bytes = n -} - -func (c *C) nsPerOp() int64 { - if c.N <= 0 { - return 0 - } - return c.duration.Nanoseconds() / int64(c.N) -} - -func (c *C) mbPerSec() float64 { - if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 { - return 0 - } - return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds() -} - -func (c *C) timerString() string { - if c.N <= 0 { - return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9) - } - mbs := c.mbPerSec() - mb := "" - if mbs != 0 { - mb = fmt.Sprintf("\t%7.2f MB/s", mbs) - } - nsop := c.nsPerOp() - ns := fmt.Sprintf("%10d ns/op", nsop) - if c.N > 0 && nsop < 100 { - // The format specifiers here make sure that - // the ones digits line up for all three possible formats. - if nsop < 10 { - ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) - } else { - ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) - } - } - memStats := "" - if c.benchMem { - allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N)) - allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N)) - memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs) - } - return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats) -} - -func min(x, y int) int { - if x > y { - return y - } - return x -} - -func max(x, y int) int { - if x < y { - return y - } - return x -} - -// roundDown10 rounds a number down to the nearest power of 10. -func roundDown10(n int) int { - var tens = 0 - // tens = floor(log_10(n)) - for n > 10 { - n = n / 10 - tens++ - } - // result = 10^tens - result := 1 - for i := 0; i < tens; i++ { - result *= 10 - } - return result -} - -// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. -func roundUp(n int) int { - base := roundDown10(n) - if n < (2 * base) { - return 2 * base - } - if n < (5 * base) { - return 5 * base - } - return 10 * base -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/benchmark_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -// These tests verify the test running logic. - -package check_test - -import ( - . "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" - "time" -) - -var benchmarkS = Suite(&BenchmarkS{}) - -type BenchmarkS struct{} - -func (s *BenchmarkS) TestCountSuite(c *C) { - suitesRun += 1 -} - -func (s *BenchmarkS) TestBasicTestTiming(c *C) { - helper := FixtureHelper{sleepOn: "Test1", sleep: 1000000 * time.Nanosecond} - output := String{} - runConf := RunConf{Output: &output, Verbose: true} - Run(&helper, &runConf) - - expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t0\\.001s\n" + - "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t0\\.000s\n" - c.Assert(output.value, Matches, expected) -} - -func (s *BenchmarkS) TestStreamTestTiming(c *C) { - helper := FixtureHelper{sleepOn: "SetUpSuite", sleep: 1000000 * time.Nanosecond} - output := String{} - runConf := RunConf{Output: &output, Stream: true} - Run(&helper, &runConf) - - expected := "(?s).*\nPASS: check_test\\.go:[0-9]+: FixtureHelper\\.SetUpSuite\t *0\\.001s\n.*" - c.Assert(output.value, Matches, expected) -} - -func (s *BenchmarkS) TestBenchmark(c *C) { - helper := FixtureHelper{sleep: 100000} - output := String{} - runConf := RunConf{ - Output: &output, - Benchmark: true, - BenchmarkTime: 10000000, - Filter: "Benchmark1", - } - Run(&helper, &runConf) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Benchmark1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Benchmark1") - c.Check(helper.calls[6], Equals, "TearDownTest") - // ... and more. - - expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark1\t *100\t *[12][0-9]{5} ns/op\n" - c.Assert(output.value, Matches, expected) -} - -func (s *BenchmarkS) TestBenchmarkBytes(c *C) { - helper := FixtureHelper{sleep: 100000} - output := String{} - runConf := RunConf{ - Output: &output, - Benchmark: true, - BenchmarkTime: 10000000, - Filter: "Benchmark2", - } - Run(&helper, &runConf) - - expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark2\t *100\t *[12][0-9]{5} ns/op\t *[4-9]\\.[0-9]{2} MB/s\n" - c.Assert(output.value, Matches, expected) -} - -func (s *BenchmarkS) TestBenchmarkMem(c *C) { - helper := FixtureHelper{sleep: 100000} - output := String{} - runConf := RunConf{ - Output: &output, - Benchmark: true, - BenchmarkMem: true, - BenchmarkTime: 10000000, - Filter: "Benchmark3", - } - Run(&helper, &runConf) - - expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark3\t *100\t *[12][0-9]{5} ns/op\t *[0-9]+ B/op\t *[1-9] allocs/op\n" - c.Assert(output.value, Matches, expected) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/bootstrap_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,82 +0,0 @@ -// These initial tests are for bootstrapping. They verify that we can -// basically use the testing infrastructure itself to check if the test -// system is working. -// -// These tests use will break down the test runner badly in case of -// errors because if they simply fail, we can't be sure the developer -// will ever see anything (because failing means the failing system -// somehow isn't working! :-) -// -// Do not assume *any* internal functionality works as expected besides -// what's actually tested here. - -package check_test - -import ( - "fmt" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" - "strings" -) - -type BootstrapS struct{} - -var boostrapS = check.Suite(&BootstrapS{}) - -func (s *BootstrapS) TestCountSuite(c *check.C) { - suitesRun += 1 -} - -func (s *BootstrapS) TestFailedAndFail(c *check.C) { - if c.Failed() { - critical("c.Failed() must be false first!") - } - c.Fail() - if !c.Failed() { - critical("c.Fail() didn't put the test in a failed state!") - } - c.Succeed() -} - -func (s *BootstrapS) TestFailedAndSucceed(c *check.C) { - c.Fail() - c.Succeed() - if c.Failed() { - critical("c.Succeed() didn't put the test back in a non-failed state") - } -} - -func (s *BootstrapS) TestLogAndGetTestLog(c *check.C) { - c.Log("Hello there!") - log := c.GetTestLog() - if log != "Hello there!\n" { - critical(fmt.Sprintf("Log() or GetTestLog() is not working! Got: %#v", log)) - } -} - -func (s *BootstrapS) TestLogfAndGetTestLog(c *check.C) { - c.Logf("Hello %v", "there!") - log := c.GetTestLog() - if log != "Hello there!\n" { - critical(fmt.Sprintf("Logf() or GetTestLog() is not working! Got: %#v", log)) - } -} - -func (s *BootstrapS) TestRunShowsErrors(c *check.C) { - output := String{} - check.Run(&FailHelper{}, &check.RunConf{Output: &output}) - if strings.Index(output.value, "Expected failure!") == -1 { - critical(fmt.Sprintf("RunWithWriter() output did not contain the "+ - "expected failure! Got: %#v", - output.value)) - } -} - -func (s *BootstrapS) TestRunDoesntShowSuccesses(c *check.C) { - output := String{} - check.Run(&SuccessHelper{}, &check.RunConf{Output: &output}) - if strings.Index(output.value, "Expected success!") != -1 { - critical(fmt.Sprintf("RunWithWriter() output contained a successful "+ - "test! Got: %#v", - output.value)) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/checkers.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,458 +0,0 @@ -package check - -import ( - "fmt" - "reflect" - "regexp" -) - -// ----------------------------------------------------------------------- -// CommentInterface and Commentf helper, to attach extra information to checks. - -type comment struct { - format string - args []interface{} -} - -// Commentf returns an infomational value to use with Assert or Check calls. -// If the checker test fails, the provided arguments will be passed to -// fmt.Sprintf, and will be presented next to the logged failure. -// -// For example: -// -// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i)) -// -// Note that if the comment is constant, a better option is to -// simply use a normal comment right above or next to the line, as -// it will also get printed with any errors: -// -// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123) -// -func Commentf(format string, args ...interface{}) CommentInterface { - return &comment{format, args} -} - -// CommentInterface must be implemented by types that attach extra -// information to failed checks. See the Commentf function for details. -type CommentInterface interface { - CheckCommentString() string -} - -func (c *comment) CheckCommentString() string { - return fmt.Sprintf(c.format, c.args...) -} - -// ----------------------------------------------------------------------- -// The Checker interface. - -// The Checker interface must be provided by checkers used with -// the Assert and Check verification methods. -type Checker interface { - Info() *CheckerInfo - Check(params []interface{}, names []string) (result bool, error string) -} - -// See the Checker interface. -type CheckerInfo struct { - Name string - Params []string -} - -func (info *CheckerInfo) Info() *CheckerInfo { - return info -} - -// ----------------------------------------------------------------------- -// Not checker logic inverter. - -// The Not checker inverts the logic of the provided checker. The -// resulting checker will succeed where the original one failed, and -// vice-versa. -// -// For example: -// -// c.Assert(a, Not(Equals), b) -// -func Not(checker Checker) Checker { - return ¬Checker{checker} -} - -type notChecker struct { - sub Checker -} - -func (checker *notChecker) Info() *CheckerInfo { - info := *checker.sub.Info() - info.Name = "Not(" + info.Name + ")" - return &info -} - -func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) { - result, error = checker.sub.Check(params, names) - result = !result - return -} - -// ----------------------------------------------------------------------- -// IsNil checker. - -type isNilChecker struct { - *CheckerInfo -} - -// The IsNil checker tests whether the obtained value is nil. -// -// For example: -// -// c.Assert(err, IsNil) -// -var IsNil Checker = &isNilChecker{ - &CheckerInfo{Name: "IsNil", Params: []string{"value"}}, -} - -func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) { - return isNil(params[0]), "" -} - -func isNil(obtained interface{}) (result bool) { - if obtained == nil { - result = true - } else { - switch v := reflect.ValueOf(obtained); v.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - } - return -} - -// ----------------------------------------------------------------------- -// NotNil checker. Alias for Not(IsNil), since it's so common. - -type notNilChecker struct { - *CheckerInfo -} - -// The NotNil checker verifies that the obtained value is not nil. -// -// For example: -// -// c.Assert(iface, NotNil) -// -// This is an alias for Not(IsNil), made available since it's a -// fairly common check. -// -var NotNil Checker = ¬NilChecker{ - &CheckerInfo{Name: "NotNil", Params: []string{"value"}}, -} - -func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) { - return !isNil(params[0]), "" -} - -// ----------------------------------------------------------------------- -// Equals checker. - -type equalsChecker struct { - *CheckerInfo -} - -// The Equals checker verifies that the obtained value is equal to -// the expected value, according to usual Go semantics for ==. -// -// For example: -// -// c.Assert(value, Equals, 42) -// -var Equals Checker = &equalsChecker{ - &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}}, -} - -func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) { - defer func() { - if v := recover(); v != nil { - result = false - error = fmt.Sprint(v) - } - }() - return params[0] == params[1], "" -} - -// ----------------------------------------------------------------------- -// DeepEquals checker. - -type deepEqualsChecker struct { - *CheckerInfo -} - -// The DeepEquals checker verifies that the obtained value is deep-equal to -// the expected value. The check will work correctly even when facing -// slices, interfaces, and values of different types (which always fail -// the test). -// -// For example: -// -// c.Assert(value, DeepEquals, 42) -// c.Assert(array, DeepEquals, []string{"hi", "there"}) -// -var DeepEquals Checker = &deepEqualsChecker{ - &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}}, -} - -func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { - return reflect.DeepEqual(params[0], params[1]), "" -} - -// ----------------------------------------------------------------------- -// HasLen checker. - -type hasLenChecker struct { - *CheckerInfo -} - -// The HasLen checker verifies that the obtained value has the -// provided length. In many cases this is superior to using Equals -// in conjuction with the len function because in case the check -// fails the value itself will be printed, instead of its length, -// providing more details for figuring the problem. -// -// For example: -// -// c.Assert(list, HasLen, 5) -// -var HasLen Checker = &hasLenChecker{ - &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}}, -} - -func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) { - n, ok := params[1].(int) - if !ok { - return false, "n must be an int" - } - value := reflect.ValueOf(params[0]) - switch value.Kind() { - case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String: - default: - return false, "obtained value type has no length" - } - return value.Len() == n, "" -} - -// ----------------------------------------------------------------------- -// ErrorMatches checker. - -type errorMatchesChecker struct { - *CheckerInfo -} - -// The ErrorMatches checker verifies that the error value -// is non nil and matches the regular expression provided. -// -// For example: -// -// c.Assert(err, ErrorMatches, "perm.*denied") -// -var ErrorMatches Checker = errorMatchesChecker{ - &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}}, -} - -func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) { - if params[0] == nil { - return false, "Error value is nil" - } - err, ok := params[0].(error) - if !ok { - return false, "Value is not an error" - } - params[0] = err.Error() - names[0] = "error" - return matches(params[0], params[1]) -} - -// ----------------------------------------------------------------------- -// Matches checker. - -type matchesChecker struct { - *CheckerInfo -} - -// The Matches checker verifies that the string provided as the obtained -// value (or the string resulting from obtained.String()) matches the -// regular expression provided. -// -// For example: -// -// c.Assert(err, Matches, "perm.*denied") -// -var Matches Checker = &matchesChecker{ - &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}}, -} - -func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) { - return matches(params[0], params[1]) -} - -func matches(value, regex interface{}) (result bool, error string) { - reStr, ok := regex.(string) - if !ok { - return false, "Regex must be a string" - } - valueStr, valueIsStr := value.(string) - if !valueIsStr { - if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr { - valueStr, valueIsStr = valueWithStr.String(), true - } - } - if valueIsStr { - matches, err := regexp.MatchString("^"+reStr+"$", valueStr) - if err != nil { - return false, "Can't compile regex: " + err.Error() - } - return matches, "" - } - return false, "Obtained value is not a string and has no .String()" -} - -// ----------------------------------------------------------------------- -// Panics checker. - -type panicsChecker struct { - *CheckerInfo -} - -// The Panics checker verifies that calling the provided zero-argument -// function will cause a panic which is deep-equal to the provided value. -// -// For example: -// -// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}). -// -// -var Panics Checker = &panicsChecker{ - &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}}, -} - -func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) { - f := reflect.ValueOf(params[0]) - if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { - return false, "Function must take zero arguments" - } - defer func() { - // If the function has not panicked, then don't do the check. - if error != "" { - return - } - params[0] = recover() - names[0] = "panic" - result = reflect.DeepEqual(params[0], params[1]) - }() - f.Call(nil) - return false, "Function has not panicked" -} - -type panicMatchesChecker struct { - *CheckerInfo -} - -// The PanicMatches checker verifies that calling the provided zero-argument -// function will cause a panic with an error value matching -// the regular expression provided. -// -// For example: -// -// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`). -// -// -var PanicMatches Checker = &panicMatchesChecker{ - &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}}, -} - -func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) { - f := reflect.ValueOf(params[0]) - if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { - return false, "Function must take zero arguments" - } - defer func() { - // If the function has not panicked, then don't do the check. - if errmsg != "" { - return - } - obtained := recover() - names[0] = "panic" - if e, ok := obtained.(error); ok { - params[0] = e.Error() - } else if _, ok := obtained.(string); ok { - params[0] = obtained - } else { - errmsg = "Panic value is not a string or an error" - return - } - result, errmsg = matches(params[0], params[1]) - }() - f.Call(nil) - return false, "Function has not panicked" -} - -// ----------------------------------------------------------------------- -// FitsTypeOf checker. - -type fitsTypeChecker struct { - *CheckerInfo -} - -// The FitsTypeOf checker verifies that the obtained value is -// assignable to a variable with the same type as the provided -// sample value. -// -// For example: -// -// c.Assert(value, FitsTypeOf, int64(0)) -// c.Assert(value, FitsTypeOf, os.Error(nil)) -// -var FitsTypeOf Checker = &fitsTypeChecker{ - &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}}, -} - -func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) { - obtained := reflect.ValueOf(params[0]) - sample := reflect.ValueOf(params[1]) - if !obtained.IsValid() { - return false, "" - } - if !sample.IsValid() { - return false, "Invalid sample value" - } - return obtained.Type().AssignableTo(sample.Type()), "" -} - -// ----------------------------------------------------------------------- -// Implements checker. - -type implementsChecker struct { - *CheckerInfo -} - -// The Implements checker verifies that the obtained value -// implements the interface specified via a pointer to an interface -// variable. -// -// For example: -// -// var e os.Error -// c.Assert(err, Implements, &e) -// -var Implements Checker = &implementsChecker{ - &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}}, -} - -func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) { - obtained := reflect.ValueOf(params[0]) - ifaceptr := reflect.ValueOf(params[1]) - if !obtained.IsValid() { - return false, "" - } - if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface { - return false, "ifaceptr should be a pointer to an interface variable" - } - return obtained.Type().Implements(ifaceptr.Elem().Type()), "" -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/checkers_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,272 +0,0 @@ -package check_test - -import ( - "errors" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" - "reflect" - "runtime" -) - -type CheckersS struct{} - -var _ = check.Suite(&CheckersS{}) - -func testInfo(c *check.C, checker check.Checker, name string, paramNames []string) { - info := checker.Info() - if info.Name != name { - c.Fatalf("Got name %s, expected %s", info.Name, name) - } - if !reflect.DeepEqual(info.Params, paramNames) { - c.Fatalf("Got param names %#v, expected %#v", info.Params, paramNames) - } -} - -func testCheck(c *check.C, checker check.Checker, result bool, error string, params ...interface{}) ([]interface{}, []string) { - info := checker.Info() - if len(params) != len(info.Params) { - c.Fatalf("unexpected param count in test; expected %d got %d", len(info.Params), len(params)) - } - names := append([]string{}, info.Params...) - result_, error_ := checker.Check(params, names) - if result_ != result || error_ != error { - c.Fatalf("%s.Check(%#v) returned (%#v, %#v) rather than (%#v, %#v)", - info.Name, params, result_, error_, result, error) - } - return params, names -} - -func (s *CheckersS) TestComment(c *check.C) { - bug := check.Commentf("a %d bc", 42) - comment := bug.CheckCommentString() - if comment != "a 42 bc" { - c.Fatalf("Commentf returned %#v", comment) - } -} - -func (s *CheckersS) TestIsNil(c *check.C) { - testInfo(c, check.IsNil, "IsNil", []string{"value"}) - - testCheck(c, check.IsNil, true, "", nil) - testCheck(c, check.IsNil, false, "", "a") - - testCheck(c, check.IsNil, true, "", (chan int)(nil)) - testCheck(c, check.IsNil, false, "", make(chan int)) - testCheck(c, check.IsNil, true, "", (error)(nil)) - testCheck(c, check.IsNil, false, "", errors.New("")) - testCheck(c, check.IsNil, true, "", ([]int)(nil)) - testCheck(c, check.IsNil, false, "", make([]int, 1)) - testCheck(c, check.IsNil, false, "", int(0)) -} - -func (s *CheckersS) TestNotNil(c *check.C) { - testInfo(c, check.NotNil, "NotNil", []string{"value"}) - - testCheck(c, check.NotNil, false, "", nil) - testCheck(c, check.NotNil, true, "", "a") - - testCheck(c, check.NotNil, false, "", (chan int)(nil)) - testCheck(c, check.NotNil, true, "", make(chan int)) - testCheck(c, check.NotNil, false, "", (error)(nil)) - testCheck(c, check.NotNil, true, "", errors.New("")) - testCheck(c, check.NotNil, false, "", ([]int)(nil)) - testCheck(c, check.NotNil, true, "", make([]int, 1)) -} - -func (s *CheckersS) TestNot(c *check.C) { - testInfo(c, check.Not(check.IsNil), "Not(IsNil)", []string{"value"}) - - testCheck(c, check.Not(check.IsNil), false, "", nil) - testCheck(c, check.Not(check.IsNil), true, "", "a") -} - -type simpleStruct struct { - i int -} - -func (s *CheckersS) TestEquals(c *check.C) { - testInfo(c, check.Equals, "Equals", []string{"obtained", "expected"}) - - // The simplest. - testCheck(c, check.Equals, true, "", 42, 42) - testCheck(c, check.Equals, false, "", 42, 43) - - // Different native types. - testCheck(c, check.Equals, false, "", int32(42), int64(42)) - - // With nil. - testCheck(c, check.Equals, false, "", 42, nil) - - // Slices - testCheck(c, check.Equals, false, "runtime error: comparing uncomparable type []uint8", []byte{1, 2}, []byte{1, 2}) - - // Struct values - testCheck(c, check.Equals, true, "", simpleStruct{1}, simpleStruct{1}) - testCheck(c, check.Equals, false, "", simpleStruct{1}, simpleStruct{2}) - - // Struct pointers - testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{1}) - testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{2}) -} - -func (s *CheckersS) TestDeepEquals(c *check.C) { - testInfo(c, check.DeepEquals, "DeepEquals", []string{"obtained", "expected"}) - - // The simplest. - testCheck(c, check.DeepEquals, true, "", 42, 42) - testCheck(c, check.DeepEquals, false, "", 42, 43) - - // Different native types. - testCheck(c, check.DeepEquals, false, "", int32(42), int64(42)) - - // With nil. - testCheck(c, check.DeepEquals, false, "", 42, nil) - - // Slices - testCheck(c, check.DeepEquals, true, "", []byte{1, 2}, []byte{1, 2}) - testCheck(c, check.DeepEquals, false, "", []byte{1, 2}, []byte{1, 3}) - - // Struct values - testCheck(c, check.DeepEquals, true, "", simpleStruct{1}, simpleStruct{1}) - testCheck(c, check.DeepEquals, false, "", simpleStruct{1}, simpleStruct{2}) - - // Struct pointers - testCheck(c, check.DeepEquals, true, "", &simpleStruct{1}, &simpleStruct{1}) - testCheck(c, check.DeepEquals, false, "", &simpleStruct{1}, &simpleStruct{2}) -} - -func (s *CheckersS) TestHasLen(c *check.C) { - testInfo(c, check.HasLen, "HasLen", []string{"obtained", "n"}) - - testCheck(c, check.HasLen, true, "", "abcd", 4) - testCheck(c, check.HasLen, true, "", []int{1, 2}, 2) - testCheck(c, check.HasLen, false, "", []int{1, 2}, 3) - - testCheck(c, check.HasLen, false, "n must be an int", []int{1, 2}, "2") - testCheck(c, check.HasLen, false, "obtained value type has no length", nil, 2) -} - -func (s *CheckersS) TestErrorMatches(c *check.C) { - testInfo(c, check.ErrorMatches, "ErrorMatches", []string{"value", "regex"}) - - testCheck(c, check.ErrorMatches, false, "Error value is nil", nil, "some error") - testCheck(c, check.ErrorMatches, false, "Value is not an error", 1, "some error") - testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "some error") - testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "so.*or") - - // Verify params mutation - params, names := testCheck(c, check.ErrorMatches, false, "", errors.New("some error"), "other error") - c.Assert(params[0], check.Equals, "some error") - c.Assert(names[0], check.Equals, "error") -} - -func (s *CheckersS) TestMatches(c *check.C) { - testInfo(c, check.Matches, "Matches", []string{"value", "regex"}) - - // Simple matching - testCheck(c, check.Matches, true, "", "abc", "abc") - testCheck(c, check.Matches, true, "", "abc", "a.c") - - // Must match fully - testCheck(c, check.Matches, false, "", "abc", "ab") - testCheck(c, check.Matches, false, "", "abc", "bc") - - // String()-enabled values accepted - testCheck(c, check.Matches, true, "", reflect.ValueOf("abc"), "a.c") - testCheck(c, check.Matches, false, "", reflect.ValueOf("abc"), "a.d") - - // Some error conditions. - testCheck(c, check.Matches, false, "Obtained value is not a string and has no .String()", 1, "a.c") - testCheck(c, check.Matches, false, "Can't compile regex: error parsing regexp: missing closing ]: `[c$`", "abc", "a[c") -} - -func (s *CheckersS) TestPanics(c *check.C) { - testInfo(c, check.Panics, "Panics", []string{"function", "expected"}) - - // Some errors. - testCheck(c, check.Panics, false, "Function has not panicked", func() bool { return false }, "BOOM") - testCheck(c, check.Panics, false, "Function must take zero arguments", 1, "BOOM") - - // Plain strings. - testCheck(c, check.Panics, true, "", func() { panic("BOOM") }, "BOOM") - testCheck(c, check.Panics, false, "", func() { panic("KABOOM") }, "BOOM") - testCheck(c, check.Panics, true, "", func() bool { panic("BOOM") }, "BOOM") - - // Error values. - testCheck(c, check.Panics, true, "", func() { panic(errors.New("BOOM")) }, errors.New("BOOM")) - testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM")) - - type deep struct{ i int } - // Deep value - testCheck(c, check.Panics, true, "", func() { panic(&deep{99}) }, &deep{99}) - - // Verify params/names mutation - params, names := testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM")) - c.Assert(params[0], check.ErrorMatches, "KABOOM") - c.Assert(names[0], check.Equals, "panic") - - // Verify a nil panic - testCheck(c, check.Panics, true, "", func() { panic(nil) }, nil) - testCheck(c, check.Panics, false, "", func() { panic(nil) }, "NOPE") -} - -func (s *CheckersS) TestPanicMatches(c *check.C) { - testInfo(c, check.PanicMatches, "PanicMatches", []string{"function", "expected"}) - - // Error matching. - testCheck(c, check.PanicMatches, true, "", func() { panic(errors.New("BOOM")) }, "BO.M") - testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BO.M") - - // Some errors. - testCheck(c, check.PanicMatches, false, "Function has not panicked", func() bool { return false }, "BOOM") - testCheck(c, check.PanicMatches, false, "Function must take zero arguments", 1, "BOOM") - - // Plain strings. - testCheck(c, check.PanicMatches, true, "", func() { panic("BOOM") }, "BO.M") - testCheck(c, check.PanicMatches, false, "", func() { panic("KABOOM") }, "BOOM") - testCheck(c, check.PanicMatches, true, "", func() bool { panic("BOOM") }, "BO.M") - - // Verify params/names mutation - params, names := testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BOOM") - c.Assert(params[0], check.Equals, "KABOOM") - c.Assert(names[0], check.Equals, "panic") - - // Verify a nil panic - testCheck(c, check.PanicMatches, false, "Panic value is not a string or an error", func() { panic(nil) }, "") -} - -func (s *CheckersS) TestFitsTypeOf(c *check.C) { - testInfo(c, check.FitsTypeOf, "FitsTypeOf", []string{"obtained", "sample"}) - - // Basic types - testCheck(c, check.FitsTypeOf, true, "", 1, 0) - testCheck(c, check.FitsTypeOf, false, "", 1, int64(0)) - - // Aliases - testCheck(c, check.FitsTypeOf, false, "", 1, errors.New("")) - testCheck(c, check.FitsTypeOf, false, "", "error", errors.New("")) - testCheck(c, check.FitsTypeOf, true, "", errors.New("error"), errors.New("")) - - // Structures - testCheck(c, check.FitsTypeOf, false, "", 1, simpleStruct{}) - testCheck(c, check.FitsTypeOf, false, "", simpleStruct{42}, &simpleStruct{}) - testCheck(c, check.FitsTypeOf, true, "", simpleStruct{42}, simpleStruct{}) - testCheck(c, check.FitsTypeOf, true, "", &simpleStruct{42}, &simpleStruct{}) - - // Some bad values - testCheck(c, check.FitsTypeOf, false, "Invalid sample value", 1, interface{}(nil)) - testCheck(c, check.FitsTypeOf, false, "", interface{}(nil), 0) -} - -func (s *CheckersS) TestImplements(c *check.C) { - testInfo(c, check.Implements, "Implements", []string{"obtained", "ifaceptr"}) - - var e error - var re runtime.Error - testCheck(c, check.Implements, true, "", errors.New(""), &e) - testCheck(c, check.Implements, false, "", errors.New(""), &re) - - // Some bad values - testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, errors.New("")) - testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, interface{}(nil)) - testCheck(c, check.Implements, false, "", interface{}(nil), &e) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/check.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/check.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/check.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/check.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,954 +0,0 @@ -// Package check is a rich testing extension for Go's testing package. -// -// For details about the project, see: -// -// http://labix.org/gocheck -// -package check - -import ( - "bytes" - "errors" - "fmt" - "io" - "math/rand" - "os" - "path" - "path/filepath" - "reflect" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// ----------------------------------------------------------------------- -// Internal type which deals with suite method calling. - -const ( - fixtureKd = iota - testKd -) - -type funcKind int - -const ( - succeededSt = iota - failedSt - skippedSt - panickedSt - fixturePanickedSt - missedSt -) - -type funcStatus uint32 - -// A method value can't reach its own Method structure. -type methodType struct { - reflect.Value - Info reflect.Method -} - -func newMethod(receiver reflect.Value, i int) *methodType { - return &methodType{receiver.Method(i), receiver.Type().Method(i)} -} - -func (method *methodType) PC() uintptr { - return method.Info.Func.Pointer() -} - -func (method *methodType) suiteName() string { - t := method.Info.Type.In(0) - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t.Name() -} - -func (method *methodType) String() string { - return method.suiteName() + "." + method.Info.Name -} - -func (method *methodType) matches(re *regexp.Regexp) bool { - return (re.MatchString(method.Info.Name) || - re.MatchString(method.suiteName()) || - re.MatchString(method.String())) -} - -type C struct { - method *methodType - kind funcKind - testName string - _status funcStatus - logb *logger - logw io.Writer - done chan *C - reason string - mustFail bool - tempDir *tempDir - benchMem bool - startTime time.Time - timer -} - -func (c *C) status() funcStatus { - return funcStatus(atomic.LoadUint32((*uint32)(&c._status))) -} - -func (c *C) setStatus(s funcStatus) { - atomic.StoreUint32((*uint32)(&c._status), uint32(s)) -} - -func (c *C) stopNow() { - runtime.Goexit() -} - -// logger is a concurrency safe byte.Buffer -type logger struct { - sync.Mutex - writer bytes.Buffer -} - -func (l *logger) Write(buf []byte) (int, error) { - l.Lock() - defer l.Unlock() - return l.writer.Write(buf) -} - -func (l *logger) WriteTo(w io.Writer) (int64, error) { - l.Lock() - defer l.Unlock() - return l.writer.WriteTo(w) -} - -func (l *logger) String() string { - l.Lock() - defer l.Unlock() - return l.writer.String() -} - -// ----------------------------------------------------------------------- -// Handling of temporary files and directories. - -type tempDir struct { - sync.Mutex - path string - counter int -} - -func (td *tempDir) newPath() string { - td.Lock() - defer td.Unlock() - if td.path == "" { - var err error - for i := 0; i != 100; i++ { - path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int()) - if err = os.Mkdir(path, 0700); err == nil { - td.path = path - break - } - } - if td.path == "" { - panic("Couldn't create temporary directory: " + err.Error()) - } - } - result := filepath.Join(td.path, strconv.Itoa(td.counter)) - td.counter += 1 - return result -} - -func (td *tempDir) removeAll() { - td.Lock() - defer td.Unlock() - if td.path != "" { - err := os.RemoveAll(td.path) - if err != nil { - fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error()) - } - } -} - -// Create a new temporary directory which is automatically removed after -// the suite finishes running. -func (c *C) MkDir() string { - path := c.tempDir.newPath() - if err := os.Mkdir(path, 0700); err != nil { - panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error())) - } - return path -} - -// ----------------------------------------------------------------------- -// Low-level logging functions. - -func (c *C) log(args ...interface{}) { - c.writeLog([]byte(fmt.Sprint(args...) + "\n")) -} - -func (c *C) logf(format string, args ...interface{}) { - c.writeLog([]byte(fmt.Sprintf(format+"\n", args...))) -} - -func (c *C) logNewLine() { - c.writeLog([]byte{'\n'}) -} - -func (c *C) writeLog(buf []byte) { - c.logb.Write(buf) - if c.logw != nil { - c.logw.Write(buf) - } -} - -func hasStringOrError(x interface{}) (ok bool) { - _, ok = x.(fmt.Stringer) - if ok { - return - } - _, ok = x.(error) - return -} - -func (c *C) logValue(label string, value interface{}) { - if label == "" { - if hasStringOrError(value) { - c.logf("... %#v (%q)", value, value) - } else { - c.logf("... %#v", value) - } - } else if value == nil { - c.logf("... %s = nil", label) - } else { - if hasStringOrError(value) { - fv := fmt.Sprintf("%#v", value) - qv := fmt.Sprintf("%q", value) - if fv != qv { - c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv) - return - } - } - if s, ok := value.(string); ok && isMultiLine(s) { - c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value)) - c.logMultiLine(s) - } else { - c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value) - } - } -} - -func (c *C) logMultiLine(s string) { - b := make([]byte, 0, len(s)*2) - i := 0 - n := len(s) - for i < n { - j := i + 1 - for j < n && s[j-1] != '\n' { - j++ - } - b = append(b, "... "...) - b = strconv.AppendQuote(b, s[i:j]) - if j < n { - b = append(b, " +"...) - } - b = append(b, '\n') - i = j - } - c.writeLog(b) -} - -func isMultiLine(s string) bool { - for i := 0; i+1 < len(s); i++ { - if s[i] == '\n' { - return true - } - } - return false -} - -func (c *C) logString(issue string) { - c.log("... ", issue) -} - -func (c *C) logCaller(skip int) { - // This is a bit heavier than it ought to be. - skip += 1 // Our own frame. - pc, callerFile, callerLine, ok := runtime.Caller(skip) - if !ok { - return - } - var testFile string - var testLine int - testFunc := runtime.FuncForPC(c.method.PC()) - if runtime.FuncForPC(pc) != testFunc { - for { - skip += 1 - if pc, file, line, ok := runtime.Caller(skip); ok { - // Note that the test line may be different on - // distinct calls for the same test. Showing - // the "internal" line is helpful when debugging. - if runtime.FuncForPC(pc) == testFunc { - testFile, testLine = file, line - break - } - } else { - break - } - } - } - if testFile != "" && (testFile != callerFile || testLine != callerLine) { - c.logCode(testFile, testLine) - } - c.logCode(callerFile, callerLine) -} - -func (c *C) logCode(path string, line int) { - c.logf("%s:%d:", nicePath(path), line) - code, err := printLine(path, line) - if code == "" { - code = "..." // XXX Open the file and take the raw line. - if err != nil { - code += err.Error() - } - } - c.log(indent(code, " ")) -} - -var valueGo = filepath.Join("reflect", "value.go") -var asmGo = filepath.Join("runtime", "asm_") - -func (c *C) logPanic(skip int, value interface{}) { - skip++ // Our own frame. - initialSkip := skip - for ; ; skip++ { - if pc, file, line, ok := runtime.Caller(skip); ok { - if skip == initialSkip { - c.logf("... Panic: %s (PC=0x%X)\n", value, pc) - } - name := niceFuncName(pc) - path := nicePath(file) - if strings.Contains(path, "/gopkg.in/check.v") { - continue - } - if name == "Value.call" && strings.HasSuffix(path, valueGo) { - continue - } - if (name == "call16" || name == "call32") && strings.Contains(path, asmGo) { - continue - } - c.logf("%s:%d\n in %s", nicePath(file), line, name) - } else { - break - } - } -} - -func (c *C) logSoftPanic(issue string) { - c.log("... Panic: ", issue) -} - -func (c *C) logArgPanic(method *methodType, expectedType string) { - c.logf("... Panic: %s argument should be %s", - niceFuncName(method.PC()), expectedType) -} - -// ----------------------------------------------------------------------- -// Some simple formatting helpers. - -var initWD, initWDErr = os.Getwd() - -func init() { - if initWDErr == nil { - initWD = strings.Replace(initWD, "\\", "/", -1) + "/" - } -} - -func nicePath(path string) string { - if initWDErr == nil { - if strings.HasPrefix(path, initWD) { - return path[len(initWD):] - } - } - return path -} - -func niceFuncPath(pc uintptr) string { - function := runtime.FuncForPC(pc) - if function != nil { - filename, line := function.FileLine(pc) - return fmt.Sprintf("%s:%d", nicePath(filename), line) - } - return "" -} - -func niceFuncName(pc uintptr) string { - function := runtime.FuncForPC(pc) - if function != nil { - name := path.Base(function.Name()) - if i := strings.Index(name, "."); i > 0 { - name = name[i+1:] - } - if strings.HasPrefix(name, "(*") { - if i := strings.Index(name, ")"); i > 0 { - name = name[2:i] + name[i+1:] - } - } - if i := strings.LastIndex(name, ".*"); i != -1 { - name = name[:i] + "." + name[i+2:] - } - if i := strings.LastIndex(name, "·"); i != -1 { - name = name[:i] + "." + name[i+2:] - } - return name - } - return "" -} - -// ----------------------------------------------------------------------- -// Result tracker to aggregate call results. - -type Result struct { - Succeeded int - Failed int - Skipped int - Panicked int - FixturePanicked int - ExpectedFailures int - Missed int // Not even tried to run, related to a panic in the fixture. - RunError error // Houston, we've got a problem. - WorkDir string // If KeepWorkDir is true -} - -type resultTracker struct { - result Result - _lastWasProblem bool - _waiting int - _missed int - _expectChan chan *C - _doneChan chan *C - _stopChan chan bool -} - -func newResultTracker() *resultTracker { - return &resultTracker{_expectChan: make(chan *C), // Synchronous - _doneChan: make(chan *C, 32), // Asynchronous - _stopChan: make(chan bool)} // Synchronous -} - -func (tracker *resultTracker) start() { - go tracker._loopRoutine() -} - -func (tracker *resultTracker) waitAndStop() { - <-tracker._stopChan -} - -func (tracker *resultTracker) expectCall(c *C) { - tracker._expectChan <- c -} - -func (tracker *resultTracker) callDone(c *C) { - tracker._doneChan <- c -} - -func (tracker *resultTracker) _loopRoutine() { - for { - var c *C - if tracker._waiting > 0 { - // Calls still running. Can't stop. - select { - // XXX Reindent this (not now to make diff clear) - case c = <-tracker._expectChan: - tracker._waiting += 1 - case c = <-tracker._doneChan: - tracker._waiting -= 1 - switch c.status() { - case succeededSt: - if c.kind == testKd { - if c.mustFail { - tracker.result.ExpectedFailures++ - } else { - tracker.result.Succeeded++ - } - } - case failedSt: - tracker.result.Failed++ - case panickedSt: - if c.kind == fixtureKd { - tracker.result.FixturePanicked++ - } else { - tracker.result.Panicked++ - } - case fixturePanickedSt: - // Track it as missed, since the panic - // was on the fixture, not on the test. - tracker.result.Missed++ - case missedSt: - tracker.result.Missed++ - case skippedSt: - if c.kind == testKd { - tracker.result.Skipped++ - } - } - } - } else { - // No calls. Can stop, but no done calls here. - select { - case tracker._stopChan <- true: - return - case c = <-tracker._expectChan: - tracker._waiting += 1 - case c = <-tracker._doneChan: - panic("Tracker got an unexpected done call.") - } - } - } -} - -// ----------------------------------------------------------------------- -// The underlying suite runner. - -type suiteRunner struct { - suite interface{} - setUpSuite, tearDownSuite *methodType - setUpTest, tearDownTest *methodType - tests []*methodType - tracker *resultTracker - tempDir *tempDir - keepDir bool - output *outputWriter - reportedProblemLast bool - benchTime time.Duration - benchMem bool -} - -type RunConf struct { - Output io.Writer - Stream bool - Verbose bool - Filter string - Benchmark bool - BenchmarkTime time.Duration // Defaults to 1 second - BenchmarkMem bool - KeepWorkDir bool -} - -// Create a new suiteRunner able to run all methods in the given suite. -func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner { - var conf RunConf - if runConf != nil { - conf = *runConf - } - if conf.Output == nil { - conf.Output = os.Stdout - } - if conf.Benchmark { - conf.Verbose = true - } - - suiteType := reflect.TypeOf(suite) - suiteNumMethods := suiteType.NumMethod() - suiteValue := reflect.ValueOf(suite) - - runner := &suiteRunner{ - suite: suite, - output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose), - tracker: newResultTracker(), - benchTime: conf.BenchmarkTime, - benchMem: conf.BenchmarkMem, - tempDir: &tempDir{}, - keepDir: conf.KeepWorkDir, - tests: make([]*methodType, 0, suiteNumMethods), - } - if runner.benchTime == 0 { - runner.benchTime = 1 * time.Second - } - - var filterRegexp *regexp.Regexp - if conf.Filter != "" { - if regexp, err := regexp.Compile(conf.Filter); err != nil { - msg := "Bad filter expression: " + err.Error() - runner.tracker.result.RunError = errors.New(msg) - return runner - } else { - filterRegexp = regexp - } - } - - for i := 0; i != suiteNumMethods; i++ { - method := newMethod(suiteValue, i) - switch method.Info.Name { - case "SetUpSuite": - runner.setUpSuite = method - case "TearDownSuite": - runner.tearDownSuite = method - case "SetUpTest": - runner.setUpTest = method - case "TearDownTest": - runner.tearDownTest = method - default: - prefix := "Test" - if conf.Benchmark { - prefix = "Benchmark" - } - if !strings.HasPrefix(method.Info.Name, prefix) { - continue - } - if filterRegexp == nil || method.matches(filterRegexp) { - runner.tests = append(runner.tests, method) - } - } - } - return runner -} - -// Run all methods in the given suite. -func (runner *suiteRunner) run() *Result { - if runner.tracker.result.RunError == nil && len(runner.tests) > 0 { - runner.tracker.start() - if runner.checkFixtureArgs() { - c := runner.runFixture(runner.setUpSuite, "", nil) - if c == nil || c.status() == succeededSt { - for i := 0; i != len(runner.tests); i++ { - c := runner.runTest(runner.tests[i]) - if c.status() == fixturePanickedSt { - runner.skipTests(missedSt, runner.tests[i+1:]) - break - } - } - } else if c != nil && c.status() == skippedSt { - runner.skipTests(skippedSt, runner.tests) - } else { - runner.skipTests(missedSt, runner.tests) - } - runner.runFixture(runner.tearDownSuite, "", nil) - } else { - runner.skipTests(missedSt, runner.tests) - } - runner.tracker.waitAndStop() - if runner.keepDir { - runner.tracker.result.WorkDir = runner.tempDir.path - } else { - runner.tempDir.removeAll() - } - } - return &runner.tracker.result -} - -// Create a call object with the given suite method, and fork a -// goroutine with the provided dispatcher for running it. -func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { - var logw io.Writer - if runner.output.Stream { - logw = runner.output - } - if logb == nil { - logb = new(logger) - } - c := &C{ - method: method, - kind: kind, - testName: testName, - logb: logb, - logw: logw, - tempDir: runner.tempDir, - done: make(chan *C, 1), - timer: timer{benchTime: runner.benchTime}, - startTime: time.Now(), - benchMem: runner.benchMem, - } - runner.tracker.expectCall(c) - go (func() { - runner.reportCallStarted(c) - defer runner.callDone(c) - dispatcher(c) - })() - return c -} - -// Same as forkCall(), but wait for call to finish before returning. -func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { - c := runner.forkCall(method, kind, testName, logb, dispatcher) - <-c.done - return c -} - -// Handle a finished call. If there were any panics, update the call status -// accordingly. Then, mark the call as done and report to the tracker. -func (runner *suiteRunner) callDone(c *C) { - value := recover() - if value != nil { - switch v := value.(type) { - case *fixturePanic: - if v.status == skippedSt { - c.setStatus(skippedSt) - } else { - c.logSoftPanic("Fixture has panicked (see related PANIC)") - c.setStatus(fixturePanickedSt) - } - default: - c.logPanic(1, value) - c.setStatus(panickedSt) - } - } - if c.mustFail { - switch c.status() { - case failedSt: - c.setStatus(succeededSt) - case succeededSt: - c.setStatus(failedSt) - c.logString("Error: Test succeeded, but was expected to fail") - c.logString("Reason: " + c.reason) - } - } - - runner.reportCallDone(c) - c.done <- c -} - -// Runs a fixture call synchronously. The fixture will still be run in a -// goroutine like all suite methods, but this method will not return -// while the fixture goroutine is not done, because the fixture must be -// run in a desired order. -func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C { - if method != nil { - c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) { - c.ResetTimer() - c.StartTimer() - defer c.StopTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - }) - return c - } - return nil -} - -// Run the fixture method with runFixture(), but panic with a fixturePanic{} -// in case the fixture method panics. This makes it easier to track the -// fixture panic together with other call panics within forkTest(). -func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C { - if skipped != nil && *skipped { - return nil - } - c := runner.runFixture(method, testName, logb) - if c != nil && c.status() != succeededSt { - if skipped != nil { - *skipped = c.status() == skippedSt - } - panic(&fixturePanic{c.status(), method}) - } - return c -} - -type fixturePanic struct { - status funcStatus - method *methodType -} - -// Run the suite test method, together with the test-specific fixture, -// asynchronously. -func (runner *suiteRunner) forkTest(method *methodType) *C { - testName := method.String() - return runner.forkCall(method, testKd, testName, nil, func(c *C) { - var skipped bool - defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped) - defer c.StopTimer() - benchN := 1 - for { - runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped) - mt := c.method.Type() - if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) { - // Rather than a plain panic, provide a more helpful message when - // the argument type is incorrect. - c.setStatus(panickedSt) - c.logArgPanic(c.method, "*check.C") - return - } - if strings.HasPrefix(c.method.Info.Name, "Test") { - c.ResetTimer() - c.StartTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - return - } - if !strings.HasPrefix(c.method.Info.Name, "Benchmark") { - panic("unexpected method prefix: " + c.method.Info.Name) - } - - runtime.GC() - c.N = benchN - c.ResetTimer() - c.StartTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - c.StopTimer() - if c.status() != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 { - return - } - perOpN := int(1e9) - if c.nsPerOp() != 0 { - perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp()) - } - - // Logic taken from the stock testing package: - // - Run more iterations than we think we'll need for a second (1.5x). - // - Don't grow too fast in case we had timing errors previously. - // - Be sure to run at least one more than last time. - benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1) - benchN = roundUp(benchN) - - skipped = true // Don't run the deferred one if this panics. - runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil) - skipped = false - } - }) -} - -// Same as forkTest(), but wait for the test to finish before returning. -func (runner *suiteRunner) runTest(method *methodType) *C { - c := runner.forkTest(method) - <-c.done - return c -} - -// Helper to mark tests as skipped or missed. A bit heavy for what -// it does, but it enables homogeneous handling of tracking, including -// nice verbose output. -func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) { - for _, method := range methods { - runner.runFunc(method, testKd, "", nil, func(c *C) { - c.setStatus(status) - }) - } -} - -// Verify if the fixture arguments are *check.C. In case of errors, -// log the error as a panic in the fixture method call, and return false. -func (runner *suiteRunner) checkFixtureArgs() bool { - succeeded := true - argType := reflect.TypeOf(&C{}) - for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} { - if method != nil { - mt := method.Type() - if mt.NumIn() != 1 || mt.In(0) != argType { - succeeded = false - runner.runFunc(method, fixtureKd, "", nil, func(c *C) { - c.logArgPanic(method, "*check.C") - c.setStatus(panickedSt) - }) - } - } - } - return succeeded -} - -func (runner *suiteRunner) reportCallStarted(c *C) { - runner.output.WriteCallStarted("START", c) -} - -func (runner *suiteRunner) reportCallDone(c *C) { - runner.tracker.callDone(c) - switch c.status() { - case succeededSt: - if c.mustFail { - runner.output.WriteCallSuccess("FAIL EXPECTED", c) - } else { - runner.output.WriteCallSuccess("PASS", c) - } - case skippedSt: - runner.output.WriteCallSuccess("SKIP", c) - case failedSt: - runner.output.WriteCallProblem("FAIL", c) - case panickedSt: - runner.output.WriteCallProblem("PANIC", c) - case fixturePanickedSt: - // That's a testKd call reporting that its fixture - // has panicked. The fixture call which caused the - // panic itself was tracked above. We'll report to - // aid debugging. - runner.output.WriteCallProblem("PANIC", c) - case missedSt: - runner.output.WriteCallSuccess("MISS", c) - } -} - -// ----------------------------------------------------------------------- -// Output writer manages atomic output writing according to settings. - -type outputWriter struct { - m sync.Mutex - writer io.Writer - wroteCallProblemLast bool - Stream bool - Verbose bool -} - -func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter { - return &outputWriter{writer: writer, Stream: stream, Verbose: verbose} -} - -func (ow *outputWriter) Write(content []byte) (n int, err error) { - ow.m.Lock() - n, err = ow.writer.Write(content) - ow.m.Unlock() - return -} - -func (ow *outputWriter) WriteCallStarted(label string, c *C) { - if ow.Stream { - header := renderCallHeader(label, c, "", "\n") - ow.m.Lock() - ow.writer.Write([]byte(header)) - ow.m.Unlock() - } -} - -func (ow *outputWriter) WriteCallProblem(label string, c *C) { - var prefix string - if !ow.Stream { - prefix = "\n-----------------------------------" + - "-----------------------------------\n" - } - header := renderCallHeader(label, c, prefix, "\n\n") - ow.m.Lock() - ow.wroteCallProblemLast = true - ow.writer.Write([]byte(header)) - if !ow.Stream { - c.logb.WriteTo(ow.writer) - } - ow.m.Unlock() -} - -func (ow *outputWriter) WriteCallSuccess(label string, c *C) { - if ow.Stream || (ow.Verbose && c.kind == testKd) { - // TODO Use a buffer here. - var suffix string - if c.reason != "" { - suffix = " (" + c.reason + ")" - } - if c.status() == succeededSt { - suffix += "\t" + c.timerString() - } - suffix += "\n" - if ow.Stream { - suffix += "\n" - } - header := renderCallHeader(label, c, "", suffix) - ow.m.Lock() - // Resist temptation of using line as prefix above due to race. - if !ow.Stream && ow.wroteCallProblemLast { - header = "\n-----------------------------------" + - "-----------------------------------\n" + - header - } - ow.wroteCallProblemLast = false - ow.writer.Write([]byte(header)) - ow.m.Unlock() - } -} - -func renderCallHeader(label string, c *C, prefix, suffix string) string { - pc := c.method.PC() - return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc), - niceFuncName(pc), suffix) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/check_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,207 +0,0 @@ -// This file contains just a few generic helpers which are used by the -// other test files. - -package check_test - -import ( - "flag" - "fmt" - "os" - "regexp" - "runtime" - "testing" - "time" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" -) - -// We count the number of suites run at least to get a vague hint that the -// test suite is behaving as it should. Otherwise a bug introduced at the -// very core of the system could go unperceived. -const suitesRunExpected = 8 - -var suitesRun int = 0 - -func Test(t *testing.T) { - check.TestingT(t) - if suitesRun != suitesRunExpected && flag.Lookup("check.f").Value.String() == "" { - critical(fmt.Sprintf("Expected %d suites to run rather than %d", - suitesRunExpected, suitesRun)) - } -} - -// ----------------------------------------------------------------------- -// Helper functions. - -// Break down badly. This is used in test cases which can't yet assume -// that the fundamental bits are working. -func critical(error string) { - fmt.Fprintln(os.Stderr, "CRITICAL: "+error) - os.Exit(1) -} - -// Return the file line where it's called. -func getMyLine() int { - if _, _, line, ok := runtime.Caller(1); ok { - return line - } - return -1 -} - -// ----------------------------------------------------------------------- -// Helper type implementing a basic io.Writer for testing output. - -// Type implementing the io.Writer interface for analyzing output. -type String struct { - value string -} - -// The only function required by the io.Writer interface. Will append -// written data to the String.value string. -func (s *String) Write(p []byte) (n int, err error) { - s.value += string(p) - return len(p), nil -} - -// Trivial wrapper to test errors happening on a different file -// than the test itself. -func checkEqualWrapper(c *check.C, obtained, expected interface{}) (result bool, line int) { - return c.Check(obtained, check.Equals, expected), getMyLine() -} - -// ----------------------------------------------------------------------- -// Helper suite for testing basic fail behavior. - -type FailHelper struct { - testLine int -} - -func (s *FailHelper) TestLogAndFail(c *check.C) { - s.testLine = getMyLine() - 1 - c.Log("Expected failure!") - c.Fail() -} - -// ----------------------------------------------------------------------- -// Helper suite for testing basic success behavior. - -type SuccessHelper struct{} - -func (s *SuccessHelper) TestLogAndSucceed(c *check.C) { - c.Log("Expected success!") -} - -// ----------------------------------------------------------------------- -// Helper suite for testing ordering and behavior of fixture. - -type FixtureHelper struct { - calls []string - panicOn string - skip bool - skipOnN int - sleepOn string - sleep time.Duration - bytes int64 -} - -func (s *FixtureHelper) trace(name string, c *check.C) { - s.calls = append(s.calls, name) - if name == s.panicOn { - panic(name) - } - if s.sleep > 0 && s.sleepOn == name { - time.Sleep(s.sleep) - } - if s.skip && s.skipOnN == len(s.calls)-1 { - c.Skip("skipOnN == n") - } -} - -func (s *FixtureHelper) SetUpSuite(c *check.C) { - s.trace("SetUpSuite", c) -} - -func (s *FixtureHelper) TearDownSuite(c *check.C) { - s.trace("TearDownSuite", c) -} - -func (s *FixtureHelper) SetUpTest(c *check.C) { - s.trace("SetUpTest", c) -} - -func (s *FixtureHelper) TearDownTest(c *check.C) { - s.trace("TearDownTest", c) -} - -func (s *FixtureHelper) Test1(c *check.C) { - s.trace("Test1", c) -} - -func (s *FixtureHelper) Test2(c *check.C) { - s.trace("Test2", c) -} - -func (s *FixtureHelper) Benchmark1(c *check.C) { - s.trace("Benchmark1", c) - for i := 0; i < c.N; i++ { - time.Sleep(s.sleep) - } -} - -func (s *FixtureHelper) Benchmark2(c *check.C) { - s.trace("Benchmark2", c) - c.SetBytes(1024) - for i := 0; i < c.N; i++ { - time.Sleep(s.sleep) - } -} - -func (s *FixtureHelper) Benchmark3(c *check.C) { - var x []int64 - s.trace("Benchmark3", c) - for i := 0; i < c.N; i++ { - time.Sleep(s.sleep) - x = make([]int64, 5) - _ = x - } -} - -// ----------------------------------------------------------------------- -// Helper which checks the state of the test and ensures that it matches -// the given expectations. Depends on c.Errorf() working, so shouldn't -// be used to test this one function. - -type expectedState struct { - name string - result interface{} - failed bool - log string -} - -// Verify the state of the test. Note that since this also verifies if -// the test is supposed to be in a failed state, no other checks should -// be done in addition to what is being tested. -func checkState(c *check.C, result interface{}, expected *expectedState) { - failed := c.Failed() - c.Succeed() - log := c.GetTestLog() - matched, matchError := regexp.MatchString("^"+expected.log+"$", log) - if matchError != nil { - c.Errorf("Error in matching expression used in testing %s", - expected.name) - } else if !matched { - c.Errorf("%s logged:\n----------\n%s----------\n\nExpected:\n----------\n%s\n----------", - expected.name, log, expected.log) - } - if result != expected.result { - c.Errorf("%s returned %#v rather than %#v", - expected.name, result, expected.result) - } - if failed != expected.failed { - if failed { - c.Errorf("%s has failed when it shouldn't", expected.name) - } else { - c.Errorf("%s has not failed when it should", expected.name) - } - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -package check - -func PrintLine(filename string, line int) (string, error) { - return printLine(filename, line) -} - -func Indent(s, with string) string { - return indent(s, with) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/fixture_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,484 +0,0 @@ -// Tests for the behavior of the test fixture system. - -package check_test - -import ( - . "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" -) - -// ----------------------------------------------------------------------- -// Fixture test suite. - -type FixtureS struct{} - -var fixtureS = Suite(&FixtureS{}) - -func (s *FixtureS) TestCountSuite(c *C) { - suitesRun += 1 -} - -// ----------------------------------------------------------------------- -// Basic fixture ordering verification. - -func (s *FixtureS) TestOrder(c *C) { - helper := FixtureHelper{} - Run(&helper, nil) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Test2") - c.Check(helper.calls[6], Equals, "TearDownTest") - c.Check(helper.calls[7], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 8) -} - -// ----------------------------------------------------------------------- -// Check the behavior when panics occur within tests and fixtures. - -func (s *FixtureS) TestPanicOnTest(c *C) { - helper := FixtureHelper{panicOn: "Test1"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Test2") - c.Check(helper.calls[6], Equals, "TearDownTest") - c.Check(helper.calls[7], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 8) - - expected := "^\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: FixtureHelper.Test1\n\n" + - "\\.\\.\\. Panic: Test1 \\(PC=[xA-F0-9]+\\)\n\n" + - ".+:[0-9]+\n" + - " in (go)?panic\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.trace\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.Test1\n" + - "(.|\n)*$" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnSetUpTest(c *C) { - helper := FixtureHelper{panicOn: "SetUpTest"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "TearDownTest") - c.Check(helper.calls[3], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 4) - - expected := "^\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper\\.SetUpTest\n\n" + - "\\.\\.\\. Panic: SetUpTest \\(PC=[xA-F0-9]+\\)\n\n" + - ".+:[0-9]+\n" + - " in (go)?panic\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.trace\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.SetUpTest\n" + - "(.|\n)*" + - "\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper\\.Test1\n\n" + - "\\.\\.\\. Panic: Fixture has panicked " + - "\\(see related PANIC\\)\n$" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnTearDownTest(c *C) { - helper := FixtureHelper{panicOn: "TearDownTest"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 5) - - expected := "^\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper.TearDownTest\n\n" + - "\\.\\.\\. Panic: TearDownTest \\(PC=[xA-F0-9]+\\)\n\n" + - ".+:[0-9]+\n" + - " in (go)?panic\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.trace\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.TearDownTest\n" + - "(.|\n)*" + - "\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper\\.Test1\n\n" + - "\\.\\.\\. Panic: Fixture has panicked " + - "\\(see related PANIC\\)\n$" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnSetUpSuite(c *C) { - helper := FixtureHelper{panicOn: "SetUpSuite"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 2) - - expected := "^\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper.SetUpSuite\n\n" + - "\\.\\.\\. Panic: SetUpSuite \\(PC=[xA-F0-9]+\\)\n\n" + - ".+:[0-9]+\n" + - " in (go)?panic\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.trace\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.SetUpSuite\n" + - "(.|\n)*$" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnTearDownSuite(c *C) { - helper := FixtureHelper{panicOn: "TearDownSuite"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Test2") - c.Check(helper.calls[6], Equals, "TearDownTest") - c.Check(helper.calls[7], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 8) - - expected := "^\n-+\n" + - "PANIC: check_test\\.go:[0-9]+: " + - "FixtureHelper.TearDownSuite\n\n" + - "\\.\\.\\. Panic: TearDownSuite \\(PC=[xA-F0-9]+\\)\n\n" + - ".+:[0-9]+\n" + - " in (go)?panic\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.trace\n" + - ".*check_test.go:[0-9]+\n" + - " in FixtureHelper.TearDownSuite\n" + - "(.|\n)*$" - - c.Check(output.value, Matches, expected) -} - -// ----------------------------------------------------------------------- -// A wrong argument on a test or fixture will produce a nice error. - -func (s *FixtureS) TestPanicOnWrongTestArg(c *C) { - helper := WrongTestArgHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "TearDownTest") - c.Check(helper.calls[3], Equals, "SetUpTest") - c.Check(helper.calls[4], Equals, "Test2") - c.Check(helper.calls[5], Equals, "TearDownTest") - c.Check(helper.calls[6], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 7) - - expected := "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongTestArgHelper\\.Test1\n\n" + - "\\.\\.\\. Panic: WrongTestArgHelper\\.Test1 argument " + - "should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnWrongSetUpTestArg(c *C) { - helper := WrongSetUpTestArgHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(len(helper.calls), Equals, 0) - - expected := - "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongSetUpTestArgHelper\\.SetUpTest\n\n" + - "\\.\\.\\. Panic: WrongSetUpTestArgHelper\\.SetUpTest argument " + - "should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnWrongSetUpSuiteArg(c *C) { - helper := WrongSetUpSuiteArgHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(len(helper.calls), Equals, 0) - - expected := - "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongSetUpSuiteArgHelper\\.SetUpSuite\n\n" + - "\\.\\.\\. Panic: WrongSetUpSuiteArgHelper\\.SetUpSuite argument " + - "should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -// ----------------------------------------------------------------------- -// Nice errors also when tests or fixture have wrong arg count. - -func (s *FixtureS) TestPanicOnWrongTestArgCount(c *C) { - helper := WrongTestArgCountHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "TearDownTest") - c.Check(helper.calls[3], Equals, "SetUpTest") - c.Check(helper.calls[4], Equals, "Test2") - c.Check(helper.calls[5], Equals, "TearDownTest") - c.Check(helper.calls[6], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 7) - - expected := "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongTestArgCountHelper\\.Test1\n\n" + - "\\.\\.\\. Panic: WrongTestArgCountHelper\\.Test1 argument " + - "should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnWrongSetUpTestArgCount(c *C) { - helper := WrongSetUpTestArgCountHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(len(helper.calls), Equals, 0) - - expected := - "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongSetUpTestArgCountHelper\\.SetUpTest\n\n" + - "\\.\\.\\. Panic: WrongSetUpTestArgCountHelper\\.SetUpTest argument " + - "should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -func (s *FixtureS) TestPanicOnWrongSetUpSuiteArgCount(c *C) { - helper := WrongSetUpSuiteArgCountHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(len(helper.calls), Equals, 0) - - expected := - "^\n-+\n" + - "PANIC: fixture_test\\.go:[0-9]+: " + - "WrongSetUpSuiteArgCountHelper\\.SetUpSuite\n\n" + - "\\.\\.\\. Panic: WrongSetUpSuiteArgCountHelper" + - "\\.SetUpSuite argument should be \\*check\\.C\n" - - c.Check(output.value, Matches, expected) -} - -// ----------------------------------------------------------------------- -// Helper test suites with wrong function arguments. - -type WrongTestArgHelper struct { - FixtureHelper -} - -func (s *WrongTestArgHelper) Test1(t int) { -} - -type WrongSetUpTestArgHelper struct { - FixtureHelper -} - -func (s *WrongSetUpTestArgHelper) SetUpTest(t int) { -} - -type WrongSetUpSuiteArgHelper struct { - FixtureHelper -} - -func (s *WrongSetUpSuiteArgHelper) SetUpSuite(t int) { -} - -type WrongTestArgCountHelper struct { - FixtureHelper -} - -func (s *WrongTestArgCountHelper) Test1(c *C, i int) { -} - -type WrongSetUpTestArgCountHelper struct { - FixtureHelper -} - -func (s *WrongSetUpTestArgCountHelper) SetUpTest(c *C, i int) { -} - -type WrongSetUpSuiteArgCountHelper struct { - FixtureHelper -} - -func (s *WrongSetUpSuiteArgCountHelper) SetUpSuite(c *C, i int) { -} - -// ----------------------------------------------------------------------- -// Ensure fixture doesn't run without tests. - -type NoTestsHelper struct { - hasRun bool -} - -func (s *NoTestsHelper) SetUpSuite(c *C) { - s.hasRun = true -} - -func (s *NoTestsHelper) TearDownSuite(c *C) { - s.hasRun = true -} - -func (s *FixtureS) TestFixtureDoesntRunWithoutTests(c *C) { - helper := NoTestsHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Check(helper.hasRun, Equals, false) -} - -// ----------------------------------------------------------------------- -// Verify that checks and assertions work correctly inside the fixture. - -type FixtureCheckHelper struct { - fail string - completed bool -} - -func (s *FixtureCheckHelper) SetUpSuite(c *C) { - switch s.fail { - case "SetUpSuiteAssert": - c.Assert(false, Equals, true) - case "SetUpSuiteCheck": - c.Check(false, Equals, true) - } - s.completed = true -} - -func (s *FixtureCheckHelper) SetUpTest(c *C) { - switch s.fail { - case "SetUpTestAssert": - c.Assert(false, Equals, true) - case "SetUpTestCheck": - c.Check(false, Equals, true) - } - s.completed = true -} - -func (s *FixtureCheckHelper) Test(c *C) { - // Do nothing. -} - -func (s *FixtureS) TestSetUpSuiteCheck(c *C) { - helper := FixtureCheckHelper{fail: "SetUpSuiteCheck"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Assert(output.value, Matches, - "\n---+\n"+ - "FAIL: fixture_test\\.go:[0-9]+: "+ - "FixtureCheckHelper\\.SetUpSuite\n\n"+ - "fixture_test\\.go:[0-9]+:\n"+ - " c\\.Check\\(false, Equals, true\\)\n"+ - "\\.+ obtained bool = false\n"+ - "\\.+ expected bool = true\n\n") - c.Assert(helper.completed, Equals, true) -} - -func (s *FixtureS) TestSetUpSuiteAssert(c *C) { - helper := FixtureCheckHelper{fail: "SetUpSuiteAssert"} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Assert(output.value, Matches, - "\n---+\n"+ - "FAIL: fixture_test\\.go:[0-9]+: "+ - "FixtureCheckHelper\\.SetUpSuite\n\n"+ - "fixture_test\\.go:[0-9]+:\n"+ - " c\\.Assert\\(false, Equals, true\\)\n"+ - "\\.+ obtained bool = false\n"+ - "\\.+ expected bool = true\n\n") - c.Assert(helper.completed, Equals, false) -} - -// ----------------------------------------------------------------------- -// Verify that logging within SetUpTest() persists within the test log itself. - -type FixtureLogHelper struct { - c *C -} - -func (s *FixtureLogHelper) SetUpTest(c *C) { - s.c = c - c.Log("1") -} - -func (s *FixtureLogHelper) Test(c *C) { - c.Log("2") - s.c.Log("3") - c.Log("4") - c.Fail() -} - -func (s *FixtureLogHelper) TearDownTest(c *C) { - s.c.Log("5") -} - -func (s *FixtureS) TestFixtureLogging(c *C) { - helper := FixtureLogHelper{} - output := String{} - Run(&helper, &RunConf{Output: &output}) - c.Assert(output.value, Matches, - "\n---+\n"+ - "FAIL: fixture_test\\.go:[0-9]+: "+ - "FixtureLogHelper\\.Test\n\n"+ - "1\n2\n3\n4\n5\n") -} - -// ----------------------------------------------------------------------- -// Skip() within fixture methods. - -func (s *FixtureS) TestSkipSuite(c *C) { - helper := FixtureHelper{skip: true, skipOnN: 0} - output := String{} - result := Run(&helper, &RunConf{Output: &output}) - c.Assert(output.value, Equals, "") - c.Assert(helper.calls[0], Equals, "SetUpSuite") - c.Assert(helper.calls[1], Equals, "TearDownSuite") - c.Assert(len(helper.calls), Equals, 2) - c.Assert(result.Skipped, Equals, 2) -} - -func (s *FixtureS) TestSkipTest(c *C) { - helper := FixtureHelper{skip: true, skipOnN: 1} - output := String{} - result := Run(&helper, &RunConf{Output: &output}) - c.Assert(helper.calls[0], Equals, "SetUpSuite") - c.Assert(helper.calls[1], Equals, "SetUpTest") - c.Assert(helper.calls[2], Equals, "SetUpTest") - c.Assert(helper.calls[3], Equals, "Test2") - c.Assert(helper.calls[4], Equals, "TearDownTest") - c.Assert(helper.calls[5], Equals, "TearDownSuite") - c.Assert(len(helper.calls), Equals, 6) - c.Assert(result.Skipped, Equals, 1) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/foundation_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,335 +0,0 @@ -// These tests check that the foundations of gocheck are working properly. -// They already assume that fundamental failing is working already, though, -// since this was tested in bootstrap_test.go. Even then, some care may -// still have to be taken when using external functions, since they should -// of course not rely on functionality tested here. - -package check_test - -import ( - "fmt" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" - "log" - "os" - "regexp" - "strings" -) - -// ----------------------------------------------------------------------- -// Foundation test suite. - -type FoundationS struct{} - -var foundationS = check.Suite(&FoundationS{}) - -func (s *FoundationS) TestCountSuite(c *check.C) { - suitesRun += 1 -} - -func (s *FoundationS) TestErrorf(c *check.C) { - // Do not use checkState() here. It depends on Errorf() working. - expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+ - " c.Errorf(\"Error %%v!\", \"message\")\n"+ - "... Error: Error message!\n\n", - getMyLine()+1) - c.Errorf("Error %v!", "message") - failed := c.Failed() - c.Succeed() - if log := c.GetTestLog(); log != expectedLog { - c.Logf("Errorf() logged %#v rather than %#v", log, expectedLog) - c.Fail() - } - if !failed { - c.Logf("Errorf() didn't put the test in a failed state") - c.Fail() - } -} - -func (s *FoundationS) TestError(c *check.C) { - expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+ - " c\\.Error\\(\"Error \", \"message!\"\\)\n"+ - "\\.\\.\\. Error: Error message!\n\n", - getMyLine()+1) - c.Error("Error ", "message!") - checkState(c, nil, - &expectedState{ - name: "Error(`Error `, `message!`)", - failed: true, - log: expectedLog, - }) -} - -func (s *FoundationS) TestFailNow(c *check.C) { - defer (func() { - if !c.Failed() { - c.Error("FailNow() didn't fail the test") - } else { - c.Succeed() - if c.GetTestLog() != "" { - c.Error("Something got logged:\n" + c.GetTestLog()) - } - } - })() - - c.FailNow() - c.Log("FailNow() didn't stop the test") -} - -func (s *FoundationS) TestSucceedNow(c *check.C) { - defer (func() { - if c.Failed() { - c.Error("SucceedNow() didn't succeed the test") - } - if c.GetTestLog() != "" { - c.Error("Something got logged:\n" + c.GetTestLog()) - } - })() - - c.Fail() - c.SucceedNow() - c.Log("SucceedNow() didn't stop the test") -} - -func (s *FoundationS) TestFailureHeader(c *check.C) { - output := String{} - failHelper := FailHelper{} - check.Run(&failHelper, &check.RunConf{Output: &output}) - header := fmt.Sprintf(""+ - "\n-----------------------------------"+ - "-----------------------------------\n"+ - "FAIL: check_test.go:%d: FailHelper.TestLogAndFail\n", - failHelper.testLine) - if strings.Index(output.value, header) == -1 { - c.Errorf(""+ - "Failure didn't print a proper header.\n"+ - "... Got:\n%s... Expected something with:\n%s", - output.value, header) - } -} - -func (s *FoundationS) TestFatal(c *check.C) { - var line int - defer (func() { - if !c.Failed() { - c.Error("Fatal() didn't fail the test") - } else { - c.Succeed() - expected := fmt.Sprintf("foundation_test.go:%d:\n"+ - " c.Fatal(\"Die \", \"now!\")\n"+ - "... Error: Die now!\n\n", - line) - if c.GetTestLog() != expected { - c.Error("Incorrect log:", c.GetTestLog()) - } - } - })() - - line = getMyLine() + 1 - c.Fatal("Die ", "now!") - c.Log("Fatal() didn't stop the test") -} - -func (s *FoundationS) TestFatalf(c *check.C) { - var line int - defer (func() { - if !c.Failed() { - c.Error("Fatalf() didn't fail the test") - } else { - c.Succeed() - expected := fmt.Sprintf("foundation_test.go:%d:\n"+ - " c.Fatalf(\"Die %%s!\", \"now\")\n"+ - "... Error: Die now!\n\n", - line) - if c.GetTestLog() != expected { - c.Error("Incorrect log:", c.GetTestLog()) - } - } - })() - - line = getMyLine() + 1 - c.Fatalf("Die %s!", "now") - c.Log("Fatalf() didn't stop the test") -} - -func (s *FoundationS) TestCallerLoggingInsideTest(c *check.C) { - log := fmt.Sprintf(""+ - "foundation_test.go:%d:\n"+ - " result := c.Check\\(10, check.Equals, 20\\)\n"+ - "\\.\\.\\. obtained int = 10\n"+ - "\\.\\.\\. expected int = 20\n\n", - getMyLine()+1) - result := c.Check(10, check.Equals, 20) - checkState(c, result, - &expectedState{ - name: "Check(10, Equals, 20)", - result: false, - failed: true, - log: log, - }) -} - -func (s *FoundationS) TestCallerLoggingInDifferentFile(c *check.C) { - result, line := checkEqualWrapper(c, 10, 20) - testLine := getMyLine() - 1 - log := fmt.Sprintf(""+ - "foundation_test.go:%d:\n"+ - " result, line := checkEqualWrapper\\(c, 10, 20\\)\n"+ - "check_test.go:%d:\n"+ - " return c.Check\\(obtained, check.Equals, expected\\), getMyLine\\(\\)\n"+ - "\\.\\.\\. obtained int = 10\n"+ - "\\.\\.\\. expected int = 20\n\n", - testLine, line) - checkState(c, result, - &expectedState{ - name: "Check(10, Equals, 20)", - result: false, - failed: true, - log: log, - }) -} - -// ----------------------------------------------------------------------- -// ExpectFailure() inverts the logic of failure. - -type ExpectFailureSucceedHelper struct{} - -func (s *ExpectFailureSucceedHelper) TestSucceed(c *check.C) { - c.ExpectFailure("It booms!") - c.Error("Boom!") -} - -type ExpectFailureFailHelper struct{} - -func (s *ExpectFailureFailHelper) TestFail(c *check.C) { - c.ExpectFailure("Bug #XYZ") -} - -func (s *FoundationS) TestExpectFailureFail(c *check.C) { - helper := ExpectFailureFailHelper{} - output := String{} - result := check.Run(&helper, &check.RunConf{Output: &output}) - - expected := "" + - "^\n-+\n" + - "FAIL: foundation_test\\.go:[0-9]+:" + - " ExpectFailureFailHelper\\.TestFail\n\n" + - "\\.\\.\\. Error: Test succeeded, but was expected to fail\n" + - "\\.\\.\\. Reason: Bug #XYZ\n$" - - matched, err := regexp.MatchString(expected, output.value) - if err != nil { - c.Error("Bad expression: ", expected) - } else if !matched { - c.Error("ExpectFailure() didn't log properly:\n", output.value) - } - - c.Assert(result.ExpectedFailures, check.Equals, 0) -} - -func (s *FoundationS) TestExpectFailureSucceed(c *check.C) { - helper := ExpectFailureSucceedHelper{} - output := String{} - result := check.Run(&helper, &check.RunConf{Output: &output}) - - c.Assert(output.value, check.Equals, "") - c.Assert(result.ExpectedFailures, check.Equals, 1) -} - -func (s *FoundationS) TestExpectFailureSucceedVerbose(c *check.C) { - helper := ExpectFailureSucceedHelper{} - output := String{} - result := check.Run(&helper, &check.RunConf{Output: &output, Verbose: true}) - - expected := "" + - "FAIL EXPECTED: foundation_test\\.go:[0-9]+:" + - " ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\t *[.0-9]+s\n" - - matched, err := regexp.MatchString(expected, output.value) - if err != nil { - c.Error("Bad expression: ", expected) - } else if !matched { - c.Error("ExpectFailure() didn't log properly:\n", output.value) - } - - c.Assert(result.ExpectedFailures, check.Equals, 1) -} - -// ----------------------------------------------------------------------- -// Skip() allows stopping a test without positive/negative results. - -type SkipTestHelper struct{} - -func (s *SkipTestHelper) TestFail(c *check.C) { - c.Skip("Wrong platform or whatever") - c.Error("Boom!") -} - -func (s *FoundationS) TestSkip(c *check.C) { - helper := SkipTestHelper{} - output := String{} - check.Run(&helper, &check.RunConf{Output: &output}) - - if output.value != "" { - c.Error("Skip() logged something:\n", output.value) - } -} - -func (s *FoundationS) TestSkipVerbose(c *check.C) { - helper := SkipTestHelper{} - output := String{} - check.Run(&helper, &check.RunConf{Output: &output, Verbose: true}) - - expected := "SKIP: foundation_test\\.go:[0-9]+: SkipTestHelper\\.TestFail" + - " \\(Wrong platform or whatever\\)" - matched, err := regexp.MatchString(expected, output.value) - if err != nil { - c.Error("Bad expression: ", expected) - } else if !matched { - c.Error("Skip() didn't log properly:\n", output.value) - } -} - -// ----------------------------------------------------------------------- -// Check minimum *log.Logger interface provided by *check.C. - -type minLogger interface { - Output(calldepth int, s string) error -} - -func (s *BootstrapS) TestMinLogger(c *check.C) { - var logger minLogger - logger = log.New(os.Stderr, "", 0) - logger = c - logger.Output(0, "Hello there") - expected := `\[LOG\] [0-9]+:[0-9][0-9]\.[0-9][0-9][0-9] +Hello there\n` - output := c.GetTestLog() - c.Assert(output, check.Matches, expected) -} - -// ----------------------------------------------------------------------- -// Ensure that suites with embedded types are working fine, including the -// the workaround for issue 906. - -type EmbeddedInternalS struct { - called bool -} - -type EmbeddedS struct { - EmbeddedInternalS -} - -var embeddedS = check.Suite(&EmbeddedS{}) - -func (s *EmbeddedS) TestCountSuite(c *check.C) { - suitesRun += 1 -} - -func (s *EmbeddedInternalS) TestMethod(c *check.C) { - c.Error("TestMethod() of the embedded type was called!?") -} - -func (s *EmbeddedS) TestMethod(c *check.C) { - // http://code.google.com/p/go/issues/detail?id=906 - c.Check(s.called, check.Equals, false) // Go issue 906 is affecting the runner? - s.called = true -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/helpers.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,231 +0,0 @@ -package check - -import ( - "fmt" - "strings" - "time" -) - -// TestName returns the current test name in the form "SuiteName.TestName" -func (c *C) TestName() string { - return c.testName -} - -// ----------------------------------------------------------------------- -// Basic succeeding/failing logic. - -// Failed returns whether the currently running test has already failed. -func (c *C) Failed() bool { - return c.status() == failedSt -} - -// Fail marks the currently running test as failed. -// -// Something ought to have been previously logged so the developer can tell -// what went wrong. The higher level helper functions will fail the test -// and do the logging properly. -func (c *C) Fail() { - c.setStatus(failedSt) -} - -// FailNow marks the currently running test as failed and stops running it. -// Something ought to have been previously logged so the developer can tell -// what went wrong. The higher level helper functions will fail the test -// and do the logging properly. -func (c *C) FailNow() { - c.Fail() - c.stopNow() -} - -// Succeed marks the currently running test as succeeded, undoing any -// previous failures. -func (c *C) Succeed() { - c.setStatus(succeededSt) -} - -// SucceedNow marks the currently running test as succeeded, undoing any -// previous failures, and stops running the test. -func (c *C) SucceedNow() { - c.Succeed() - c.stopNow() -} - -// ExpectFailure informs that the running test is knowingly broken for -// the provided reason. If the test does not fail, an error will be reported -// to raise attention to this fact. This method is useful to temporarily -// disable tests which cover well known problems until a better time to -// fix the problem is found, without forgetting about the fact that a -// failure still exists. -func (c *C) ExpectFailure(reason string) { - if reason == "" { - panic("Missing reason why the test is expected to fail") - } - c.mustFail = true - c.reason = reason -} - -// Skip skips the running test for the provided reason. If run from within -// SetUpTest, the individual test being set up will be skipped, and if run -// from within SetUpSuite, the whole suite is skipped. -func (c *C) Skip(reason string) { - if reason == "" { - panic("Missing reason why the test is being skipped") - } - c.reason = reason - c.setStatus(skippedSt) - c.stopNow() -} - -// ----------------------------------------------------------------------- -// Basic logging. - -// GetTestLog returns the current test error output. -func (c *C) GetTestLog() string { - return c.logb.String() -} - -// Log logs some information into the test error output. -// The provided arguments are assembled together into a string with fmt.Sprint. -func (c *C) Log(args ...interface{}) { - c.log(args...) -} - -// Log logs some information into the test error output. -// The provided arguments are assembled together into a string with fmt.Sprintf. -func (c *C) Logf(format string, args ...interface{}) { - c.logf(format, args...) -} - -// Output enables *C to be used as a logger in functions that require only -// the minimum interface of *log.Logger. -func (c *C) Output(calldepth int, s string) error { - d := time.Now().Sub(c.startTime) - msec := d / time.Millisecond - sec := d / time.Second - min := d / time.Minute - - c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s) - return nil -} - -// Error logs an error into the test error output and marks the test as failed. -// The provided arguments are assembled together into a string with fmt.Sprint. -func (c *C) Error(args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) - c.logNewLine() - c.Fail() -} - -// Errorf logs an error into the test error output and marks the test as failed. -// The provided arguments are assembled together into a string with fmt.Sprintf. -func (c *C) Errorf(format string, args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprintf("Error: "+format, args...)) - c.logNewLine() - c.Fail() -} - -// Fatal logs an error into the test error output, marks the test as failed, and -// stops the test execution. The provided arguments are assembled together into -// a string with fmt.Sprint. -func (c *C) Fatal(args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) - c.logNewLine() - c.FailNow() -} - -// Fatlaf logs an error into the test error output, marks the test as failed, and -// stops the test execution. The provided arguments are assembled together into -// a string with fmt.Sprintf. -func (c *C) Fatalf(format string, args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...))) - c.logNewLine() - c.FailNow() -} - -// ----------------------------------------------------------------------- -// Generic checks and assertions based on checkers. - -// Check verifies if the first value matches the expected value according -// to the provided checker. If they do not match, an error is logged, the -// test is marked as failed, and the test execution continues. -// -// Some checkers may not need the expected argument (e.g. IsNil). -// -// Extra arguments provided to the function are logged next to the reported -// problem when the matching fails. -func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool { - return c.internalCheck("Check", obtained, checker, args...) -} - -// Assert ensures that the first value matches the expected value according -// to the provided checker. If they do not match, an error is logged, the -// test is marked as failed, and the test execution stops. -// -// Some checkers may not need the expected argument (e.g. IsNil). -// -// Extra arguments provided to the function are logged next to the reported -// problem when the matching fails. -func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) { - if !c.internalCheck("Assert", obtained, checker, args...) { - c.stopNow() - } -} - -func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool { - if checker == nil { - c.logCaller(2) - c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName)) - c.logString("Oops.. you've provided a nil checker!") - c.logNewLine() - c.Fail() - return false - } - - // If the last argument is a bug info, extract it out. - var comment CommentInterface - if len(args) > 0 { - if c, ok := args[len(args)-1].(CommentInterface); ok { - comment = c - args = args[:len(args)-1] - } - } - - params := append([]interface{}{obtained}, args...) - info := checker.Info() - - if len(params) != len(info.Params) { - names := append([]string{info.Params[0], info.Name}, info.Params[1:]...) - c.logCaller(2) - c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", "))) - c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1)) - c.logNewLine() - c.Fail() - return false - } - - // Copy since it may be mutated by Check. - names := append([]string{}, info.Params...) - - // Do the actual check. - result, error := checker.Check(params, names) - if !result || error != "" { - c.logCaller(2) - for i := 0; i != len(params); i++ { - c.logValue(names[i], params[i]) - } - if comment != nil { - c.logString(comment.CheckCommentString()) - } - if error != "" { - c.logString(error) - } - c.logNewLine() - c.Fail() - return false - } - return true -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/helpers_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,519 +0,0 @@ -// These tests verify the inner workings of the helper methods associated -// with check.T. - -package check_test - -import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" - "os" - "reflect" - "runtime" - "sync" -) - -var helpersS = check.Suite(&HelpersS{}) - -type HelpersS struct{} - -func (s *HelpersS) TestCountSuite(c *check.C) { - suitesRun += 1 -} - -// ----------------------------------------------------------------------- -// Fake checker and bug info to verify the behavior of Assert() and Check(). - -type MyChecker struct { - info *check.CheckerInfo - params []interface{} - names []string - result bool - error string -} - -func (checker *MyChecker) Info() *check.CheckerInfo { - if checker.info == nil { - return &check.CheckerInfo{Name: "MyChecker", Params: []string{"myobtained", "myexpected"}} - } - return checker.info -} - -func (checker *MyChecker) Check(params []interface{}, names []string) (bool, string) { - rparams := checker.params - rnames := checker.names - checker.params = append([]interface{}{}, params...) - checker.names = append([]string{}, names...) - if rparams != nil { - copy(params, rparams) - } - if rnames != nil { - copy(names, rnames) - } - return checker.result, checker.error -} - -type myCommentType string - -func (c myCommentType) CheckCommentString() string { - return string(c) -} - -func myComment(s string) myCommentType { - return myCommentType(s) -} - -// ----------------------------------------------------------------------- -// Ensure a real checker actually works fine. - -func (s *HelpersS) TestCheckerInterface(c *check.C) { - testHelperSuccess(c, "Check(1, Equals, 1)", true, func() interface{} { - return c.Check(1, check.Equals, 1) - }) -} - -// ----------------------------------------------------------------------- -// Tests for Check(), mostly the same as for Assert() following these. - -func (s *HelpersS) TestCheckSucceedWithExpected(c *check.C) { - checker := &MyChecker{result: true} - testHelperSuccess(c, "Check(1, checker, 2)", true, func() interface{} { - return c.Check(1, checker, 2) - }) - if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) { - c.Fatalf("Bad params for check: %#v", checker.params) - } -} - -func (s *HelpersS) TestCheckSucceedWithoutExpected(c *check.C) { - checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - testHelperSuccess(c, "Check(1, checker)", true, func() interface{} { - return c.Check(1, checker) - }) - if !reflect.DeepEqual(checker.params, []interface{}{1}) { - c.Fatalf("Bad params for check: %#v", checker.params) - } -} - -func (s *HelpersS) TestCheckFailWithExpected(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, 2\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n\n" - testHelperFailure(c, "Check(1, checker, 2)", false, false, log, - func() interface{} { - return c.Check(1, checker, 2) - }) -} - -func (s *HelpersS) TestCheckFailWithExpectedAndComment(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n" + - "\\.+ Hello world!\n\n" - testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log, - func() interface{} { - return c.Check(1, checker, 2, myComment("Hello world!")) - }) -} - -func (s *HelpersS) TestCheckFailWithExpectedAndStaticComment(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " // Nice leading comment\\.\n" + - " return c\\.Check\\(1, checker, 2\\) // Hello there\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n\n" - testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log, - func() interface{} { - // Nice leading comment. - return c.Check(1, checker, 2) // Hello there - }) -} - -func (s *HelpersS) TestCheckFailWithoutExpected(c *check.C) { - checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker\\)\n" + - "\\.+ myvalue int = 1\n\n" - testHelperFailure(c, "Check(1, checker)", false, false, log, - func() interface{} { - return c.Check(1, checker) - }) -} - -func (s *HelpersS) TestCheckFailWithoutExpectedAndMessage(c *check.C) { - checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" + - "\\.+ myvalue int = 1\n" + - "\\.+ Hello world!\n\n" - testHelperFailure(c, "Check(1, checker, msg)", false, false, log, - func() interface{} { - return c.Check(1, checker, myComment("Hello world!")) - }) -} - -func (s *HelpersS) TestCheckWithMissingExpected(c *check.C) { - checker := &MyChecker{result: true} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker\\)\n" + - "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" + - "\\.+ Wrong number of parameters for MyChecker: " + - "want 3, got 2\n\n" - testHelperFailure(c, "Check(1, checker, !?)", false, false, log, - func() interface{} { - return c.Check(1, checker) - }) -} - -func (s *HelpersS) TestCheckWithTooManyExpected(c *check.C) { - checker := &MyChecker{result: true} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, 2, 3\\)\n" + - "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" + - "\\.+ Wrong number of parameters for MyChecker: " + - "want 3, got 4\n\n" - testHelperFailure(c, "Check(1, checker, 2, 3)", false, false, log, - func() interface{} { - return c.Check(1, checker, 2, 3) - }) -} - -func (s *HelpersS) TestCheckWithError(c *check.C) { - checker := &MyChecker{result: false, error: "Some not so cool data provided!"} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, 2\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n" + - "\\.+ Some not so cool data provided!\n\n" - testHelperFailure(c, "Check(1, checker, 2)", false, false, log, - func() interface{} { - return c.Check(1, checker, 2) - }) -} - -func (s *HelpersS) TestCheckWithNilChecker(c *check.C) { - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, nil\\)\n" + - "\\.+ Check\\(obtained, nil!\\?, \\.\\.\\.\\):\n" + - "\\.+ Oops\\.\\. you've provided a nil checker!\n\n" - testHelperFailure(c, "Check(obtained, nil)", false, false, log, - func() interface{} { - return c.Check(1, nil) - }) -} - -func (s *HelpersS) TestCheckWithParamsAndNamesMutation(c *check.C) { - checker := &MyChecker{result: false, params: []interface{}{3, 4}, names: []string{"newobtained", "newexpected"}} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " return c\\.Check\\(1, checker, 2\\)\n" + - "\\.+ newobtained int = 3\n" + - "\\.+ newexpected int = 4\n\n" - testHelperFailure(c, "Check(1, checker, 2) with mutation", false, false, log, - func() interface{} { - return c.Check(1, checker, 2) - }) -} - -// ----------------------------------------------------------------------- -// Tests for Assert(), mostly the same as for Check() above. - -func (s *HelpersS) TestAssertSucceedWithExpected(c *check.C) { - checker := &MyChecker{result: true} - testHelperSuccess(c, "Assert(1, checker, 2)", nil, func() interface{} { - c.Assert(1, checker, 2) - return nil - }) - if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) { - c.Fatalf("Bad params for check: %#v", checker.params) - } -} - -func (s *HelpersS) TestAssertSucceedWithoutExpected(c *check.C) { - checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - testHelperSuccess(c, "Assert(1, checker)", nil, func() interface{} { - c.Assert(1, checker) - return nil - }) - if !reflect.DeepEqual(checker.params, []interface{}{1}) { - c.Fatalf("Bad params for check: %#v", checker.params) - } -} - -func (s *HelpersS) TestAssertFailWithExpected(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker, 2\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n\n" - testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log, - func() interface{} { - c.Assert(1, checker, 2) - return nil - }) -} - -func (s *HelpersS) TestAssertFailWithExpectedAndMessage(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n" + - "\\.+ Hello world!\n\n" - testHelperFailure(c, "Assert(1, checker, 2, msg)", nil, true, log, - func() interface{} { - c.Assert(1, checker, 2, myComment("Hello world!")) - return nil - }) -} - -func (s *HelpersS) TestAssertFailWithoutExpected(c *check.C) { - checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker\\)\n" + - "\\.+ myvalue int = 1\n\n" - testHelperFailure(c, "Assert(1, checker)", nil, true, log, - func() interface{} { - c.Assert(1, checker) - return nil - }) -} - -func (s *HelpersS) TestAssertFailWithoutExpectedAndMessage(c *check.C) { - checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" + - "\\.+ myvalue int = 1\n" + - "\\.+ Hello world!\n\n" - testHelperFailure(c, "Assert(1, checker, msg)", nil, true, log, - func() interface{} { - c.Assert(1, checker, myComment("Hello world!")) - return nil - }) -} - -func (s *HelpersS) TestAssertWithMissingExpected(c *check.C) { - checker := &MyChecker{result: true} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker\\)\n" + - "\\.+ Assert\\(myobtained, MyChecker, myexpected\\):\n" + - "\\.+ Wrong number of parameters for MyChecker: " + - "want 3, got 2\n\n" - testHelperFailure(c, "Assert(1, checker, !?)", nil, true, log, - func() interface{} { - c.Assert(1, checker) - return nil - }) -} - -func (s *HelpersS) TestAssertWithError(c *check.C) { - checker := &MyChecker{result: false, error: "Some not so cool data provided!"} - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, checker, 2\\)\n" + - "\\.+ myobtained int = 1\n" + - "\\.+ myexpected int = 2\n" + - "\\.+ Some not so cool data provided!\n\n" - testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log, - func() interface{} { - c.Assert(1, checker, 2) - return nil - }) -} - -func (s *HelpersS) TestAssertWithNilChecker(c *check.C) { - log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + - " c\\.Assert\\(1, nil\\)\n" + - "\\.+ Assert\\(obtained, nil!\\?, \\.\\.\\.\\):\n" + - "\\.+ Oops\\.\\. you've provided a nil checker!\n\n" - testHelperFailure(c, "Assert(obtained, nil)", nil, true, log, - func() interface{} { - c.Assert(1, nil) - return nil - }) -} - -// ----------------------------------------------------------------------- -// Ensure that values logged work properly in some interesting cases. - -func (s *HelpersS) TestValueLoggingWithArrays(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + - " return c\\.Check\\(\\[\\]byte{1, 2}, checker, \\[\\]byte{1, 3}\\)\n" + - "\\.+ myobtained \\[\\]uint8 = \\[\\]byte{0x1, 0x2}\n" + - "\\.+ myexpected \\[\\]uint8 = \\[\\]byte{0x1, 0x3}\n\n" - testHelperFailure(c, "Check([]byte{1}, chk, []byte{3})", false, false, log, - func() interface{} { - return c.Check([]byte{1, 2}, checker, []byte{1, 3}) - }) -} - -func (s *HelpersS) TestValueLoggingWithMultiLine(c *check.C) { - checker := &MyChecker{result: false} - log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + - " return c\\.Check\\(\"a\\\\nb\\\\n\", checker, \"a\\\\nb\\\\nc\"\\)\n" + - "\\.+ myobtained string = \"\" \\+\n" + - "\\.+ \"a\\\\n\" \\+\n" + - "\\.+ \"b\\\\n\"\n" + - "\\.+ myexpected string = \"\" \\+\n" + - "\\.+ \"a\\\\n\" \\+\n" + - "\\.+ \"b\\\\n\" \\+\n" + - "\\.+ \"c\"\n\n" - testHelperFailure(c, `Check("a\nb\n", chk, "a\nb\nc")`, false, false, log, - func() interface{} { - return c.Check("a\nb\n", checker, "a\nb\nc") - }) -} - -func (s *HelpersS) TestValueLoggingWithMultiLineException(c *check.C) { - // If the newline is at the end of the string, don't log as multi-line. - checker := &MyChecker{result: false} - log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + - " return c\\.Check\\(\"a b\\\\n\", checker, \"a\\\\nb\"\\)\n" + - "\\.+ myobtained string = \"a b\\\\n\"\n" + - "\\.+ myexpected string = \"\" \\+\n" + - "\\.+ \"a\\\\n\" \\+\n" + - "\\.+ \"b\"\n\n" - testHelperFailure(c, `Check("a b\n", chk, "a\nb")`, false, false, log, - func() interface{} { - return c.Check("a b\n", checker, "a\nb") - }) -} - -// ----------------------------------------------------------------------- -// MakeDir() tests. - -type MkDirHelper struct { - path1 string - path2 string - isDir1 bool - isDir2 bool - isDir3 bool - isDir4 bool -} - -func (s *MkDirHelper) SetUpSuite(c *check.C) { - s.path1 = c.MkDir() - s.isDir1 = isDir(s.path1) -} - -func (s *MkDirHelper) Test(c *check.C) { - s.path2 = c.MkDir() - s.isDir2 = isDir(s.path2) -} - -func (s *MkDirHelper) TearDownSuite(c *check.C) { - s.isDir3 = isDir(s.path1) - s.isDir4 = isDir(s.path2) -} - -func (s *HelpersS) TestMkDir(c *check.C) { - helper := MkDirHelper{} - output := String{} - check.Run(&helper, &check.RunConf{Output: &output}) - c.Assert(output.value, check.Equals, "") - c.Check(helper.isDir1, check.Equals, true) - c.Check(helper.isDir2, check.Equals, true) - c.Check(helper.isDir3, check.Equals, true) - c.Check(helper.isDir4, check.Equals, true) - c.Check(helper.path1, check.Not(check.Equals), - helper.path2) - c.Check(isDir(helper.path1), check.Equals, false) - c.Check(isDir(helper.path2), check.Equals, false) -} - -func isDir(path string) bool { - if stat, err := os.Stat(path); err == nil { - return stat.IsDir() - } - return false -} - -// Concurrent logging should not corrupt the underling buffer. -// Use go test -race to detect the race in this test. -func (s *HelpersS) TestConcurrentLogging(c *check.C) { - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) - var start, stop sync.WaitGroup - start.Add(1) - for i, n := 0, runtime.NumCPU()*2; i < n; i++ { - stop.Add(1) - go func(i int) { - start.Wait() - for j := 0; j < 30; j++ { - c.Logf("Worker %d: line %d", i, j) - } - stop.Done() - }(i) - } - start.Done() - stop.Wait() -} - -// ----------------------------------------------------------------------- -// Test the TestName function - -type TestNameHelper struct { - name1 string - name2 string - name3 string - name4 string - name5 string -} - -func (s *TestNameHelper) SetUpSuite(c *check.C) { s.name1 = c.TestName() } -func (s *TestNameHelper) SetUpTest(c *check.C) { s.name2 = c.TestName() } -func (s *TestNameHelper) Test(c *check.C) { s.name3 = c.TestName() } -func (s *TestNameHelper) TearDownTest(c *check.C) { s.name4 = c.TestName() } -func (s *TestNameHelper) TearDownSuite(c *check.C) { s.name5 = c.TestName() } - -func (s *HelpersS) TestTestName(c *check.C) { - helper := TestNameHelper{} - output := String{} - check.Run(&helper, &check.RunConf{Output: &output}) - c.Check(helper.name1, check.Equals, "") - c.Check(helper.name2, check.Equals, "TestNameHelper.Test") - c.Check(helper.name3, check.Equals, "TestNameHelper.Test") - c.Check(helper.name4, check.Equals, "TestNameHelper.Test") - c.Check(helper.name5, check.Equals, "") -} - -// ----------------------------------------------------------------------- -// A couple of helper functions to test helper functions. :-) - -func testHelperSuccess(c *check.C, name string, expectedResult interface{}, closure func() interface{}) { - var result interface{} - defer (func() { - if err := recover(); err != nil { - panic(err) - } - checkState(c, result, - &expectedState{ - name: name, - result: expectedResult, - failed: false, - log: "", - }) - })() - result = closure() -} - -func testHelperFailure(c *check.C, name string, expectedResult interface{}, shouldStop bool, log string, closure func() interface{}) { - var result interface{} - defer (func() { - if err := recover(); err != nil { - panic(err) - } - checkState(c, result, - &expectedState{ - name: name, - result: expectedResult, - failed: true, - log: log, - }) - })() - result = closure() - if shouldStop { - c.Logf("%s didn't stop when it should", name) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Gocheck - A rich testing framework for Go - -Copyright (c) 2010-2013 Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/printer.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/printer.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/printer.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/printer.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,168 +0,0 @@ -package check - -import ( - "bytes" - "go/ast" - "go/parser" - "go/printer" - "go/token" - "os" -) - -func indent(s, with string) (r string) { - eol := true - for i := 0; i != len(s); i++ { - c := s[i] - switch { - case eol && c == '\n' || c == '\r': - case c == '\n' || c == '\r': - eol = true - case eol: - eol = false - s = s[:i] + with + s[i:] - i += len(with) - } - } - return s -} - -func printLine(filename string, line int) (string, error) { - fset := token.NewFileSet() - file, err := os.Open(filename) - if err != nil { - return "", err - } - fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments) - if err != nil { - return "", err - } - config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4} - lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config} - ast.Walk(lp, fnode) - result := lp.output.Bytes() - // Comments leave \n at the end. - n := len(result) - for n > 0 && result[n-1] == '\n' { - n-- - } - return string(result[:n]), nil -} - -type linePrinter struct { - config *printer.Config - fset *token.FileSet - fnode *ast.File - line int - output bytes.Buffer - stmt ast.Stmt -} - -func (lp *linePrinter) emit() bool { - if lp.stmt != nil { - lp.trim(lp.stmt) - lp.printWithComments(lp.stmt) - lp.stmt = nil - return true - } - return false -} - -func (lp *linePrinter) printWithComments(n ast.Node) { - nfirst := lp.fset.Position(n.Pos()).Line - nlast := lp.fset.Position(n.End()).Line - for _, g := range lp.fnode.Comments { - cfirst := lp.fset.Position(g.Pos()).Line - clast := lp.fset.Position(g.End()).Line - if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column { - for _, c := range g.List { - lp.output.WriteString(c.Text) - lp.output.WriteByte('\n') - } - } - if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash { - // The printer will not include the comment if it starts past - // the node itself. Trick it into printing by overlapping the - // slash with the end of the statement. - g.List[0].Slash = n.End() - 1 - } - } - node := &printer.CommentedNode{n, lp.fnode.Comments} - lp.config.Fprint(&lp.output, lp.fset, node) -} - -func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) { - if n == nil { - if lp.output.Len() == 0 { - lp.emit() - } - return nil - } - first := lp.fset.Position(n.Pos()).Line - last := lp.fset.Position(n.End()).Line - if first <= lp.line && last >= lp.line { - // Print the innermost statement containing the line. - if stmt, ok := n.(ast.Stmt); ok { - if _, ok := n.(*ast.BlockStmt); !ok { - lp.stmt = stmt - } - } - if first == lp.line && lp.emit() { - return nil - } - return lp - } - return nil -} - -func (lp *linePrinter) trim(n ast.Node) bool { - stmt, ok := n.(ast.Stmt) - if !ok { - return true - } - line := lp.fset.Position(n.Pos()).Line - if line != lp.line { - return false - } - switch stmt := stmt.(type) { - case *ast.IfStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.SwitchStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.TypeSwitchStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.CaseClause: - stmt.Body = lp.trimList(stmt.Body) - case *ast.CommClause: - stmt.Body = lp.trimList(stmt.Body) - case *ast.BlockStmt: - stmt.List = lp.trimList(stmt.List) - } - return true -} - -func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt { - if !lp.trim(stmt) { - return lp.emptyBlock(stmt) - } - stmt.Rbrace = stmt.Lbrace - return stmt -} - -func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt { - for i := 0; i != len(stmts); i++ { - if !lp.trim(stmts[i]) { - stmts[i] = lp.emptyStmt(stmts[i]) - break - } - } - return stmts -} - -func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt { - return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}} -} - -func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt { - p := n.Pos() - return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p} -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/printer_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,109 +0,0 @@ -package check_test - -import ( - . "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" -) - -var _ = Suite(&PrinterS{}) - -type PrinterS struct{} - -func (s *PrinterS) TestCountSuite(c *C) { - suitesRun += 1 -} - -var printTestFuncLine int - -func init() { - printTestFuncLine = getMyLine() + 3 -} - -func printTestFunc() { - println(1) // Comment1 - if 2 == 2 { // Comment2 - println(3) // Comment3 - } - switch 5 { - case 6: - println(6) // Comment6 - println(7) - } - switch interface{}(9).(type) { // Comment9 - case int: - println(10) - println(11) - } - select { - case <-(chan bool)(nil): - println(14) - println(15) - default: - println(16) - println(17) - } - println(19, - 20) - _ = func() { - println(21) - println(22) - } - println(24, func() { - println(25) - }) - // Leading comment - // with multiple lines. - println(29) // Comment29 -} - -var printLineTests = []struct { - line int - output string -}{ - {1, "println(1) // Comment1"}, - {2, "if 2 == 2 { // Comment2\n ...\n}"}, - {3, "println(3) // Comment3"}, - {5, "switch 5 {\n...\n}"}, - {6, "case 6:\n println(6) // Comment6\n ..."}, - {7, "println(7)"}, - {9, "switch interface{}(9).(type) { // Comment9\n...\n}"}, - {10, "case int:\n println(10)\n ..."}, - {14, "case <-(chan bool)(nil):\n println(14)\n ..."}, - {15, "println(15)"}, - {16, "default:\n println(16)\n ..."}, - {17, "println(17)"}, - {19, "println(19,\n 20)"}, - {20, "println(19,\n 20)"}, - {21, "_ = func() {\n println(21)\n println(22)\n}"}, - {22, "println(22)"}, - {24, "println(24, func() {\n println(25)\n})"}, - {25, "println(25)"}, - {26, "println(24, func() {\n println(25)\n})"}, - {29, "// Leading comment\n// with multiple lines.\nprintln(29) // Comment29"}, -} - -func (s *PrinterS) TestPrintLine(c *C) { - for _, test := range printLineTests { - output, err := PrintLine("printer_test.go", printTestFuncLine+test.line) - c.Assert(err, IsNil) - c.Assert(output, Equals, test.output) - } -} - -var indentTests = []struct { - in, out string -}{ - {"", ""}, - {"\n", "\n"}, - {"a", ">>>a"}, - {"a\n", ">>>a\n"}, - {"a\nb", ">>>a\n>>>b"}, - {" ", ">>> "}, -} - -func (s *PrinterS) TestIndent(c *C) { - for _, test := range indentTests { - out := Indent(test.in, ">>>") - c.Assert(out, Equals, test.out) - } - -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/README.md juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/README.md --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/README.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -Instructions -============ - -Install the package with: - - go get gopkg.in/check.v1 - -Import it with: - - import "gopkg.in/check.v1" - -and use _check_ as the package name inside the code. - -For more details, visit the project page: - -* http://labix.org/gocheck - -and the API documentation: - -* https://gopkg.in/check.v1 diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/run.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/run.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/run.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/run.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,175 +0,0 @@ -package check - -import ( - "bufio" - "flag" - "fmt" - "os" - "testing" - "time" -) - -// ----------------------------------------------------------------------- -// Test suite registry. - -var allSuites []interface{} - -// Suite registers the given value as a test suite to be run. Any methods -// starting with the Test prefix in the given value will be considered as -// a test method. -func Suite(suite interface{}) interface{} { - allSuites = append(allSuites, suite) - return suite -} - -// ----------------------------------------------------------------------- -// Public running interface. - -var ( - oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run") - oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode") - oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)") - oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks") - oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark") - oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run") - oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory") - - newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run") - newVerboseFlag = flag.Bool("check.v", false, "Verbose mode") - newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)") - newBenchFlag = flag.Bool("check.b", false, "Run benchmarks") - newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark") - newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks") - newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run") - newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory") -) - -// TestingT runs all test suites registered with the Suite function, -// printing results to stdout, and reporting any failures back to -// the "testing" package. -func TestingT(testingT *testing.T) { - benchTime := *newBenchTime - if benchTime == 1*time.Second { - benchTime = *oldBenchTime - } - conf := &RunConf{ - Filter: *oldFilterFlag + *newFilterFlag, - Verbose: *oldVerboseFlag || *newVerboseFlag, - Stream: *oldStreamFlag || *newStreamFlag, - Benchmark: *oldBenchFlag || *newBenchFlag, - BenchmarkTime: benchTime, - BenchmarkMem: *newBenchMem, - KeepWorkDir: *oldWorkFlag || *newWorkFlag, - } - if *oldListFlag || *newListFlag { - w := bufio.NewWriter(os.Stdout) - for _, name := range ListAll(conf) { - fmt.Fprintln(w, name) - } - w.Flush() - return - } - result := RunAll(conf) - println(result.String()) - if !result.Passed() { - testingT.Fail() - } -} - -// RunAll runs all test suites registered with the Suite function, using the -// provided run configuration. -func RunAll(runConf *RunConf) *Result { - result := Result{} - for _, suite := range allSuites { - result.Add(Run(suite, runConf)) - } - return &result -} - -// Run runs the provided test suite using the provided run configuration. -func Run(suite interface{}, runConf *RunConf) *Result { - runner := newSuiteRunner(suite, runConf) - return runner.run() -} - -// ListAll returns the names of all the test functions registered with the -// Suite function that will be run with the provided run configuration. -func ListAll(runConf *RunConf) []string { - var names []string - for _, suite := range allSuites { - names = append(names, List(suite, runConf)...) - } - return names -} - -// List returns the names of the test functions in the given -// suite that will be run with the provided run configuration. -func List(suite interface{}, runConf *RunConf) []string { - var names []string - runner := newSuiteRunner(suite, runConf) - for _, t := range runner.tests { - names = append(names, t.String()) - } - return names -} - -// ----------------------------------------------------------------------- -// Result methods. - -func (r *Result) Add(other *Result) { - r.Succeeded += other.Succeeded - r.Skipped += other.Skipped - r.Failed += other.Failed - r.Panicked += other.Panicked - r.FixturePanicked += other.FixturePanicked - r.ExpectedFailures += other.ExpectedFailures - r.Missed += other.Missed - if r.WorkDir != "" && other.WorkDir != "" { - r.WorkDir += ":" + other.WorkDir - } else if other.WorkDir != "" { - r.WorkDir = other.WorkDir - } -} - -func (r *Result) Passed() bool { - return (r.Failed == 0 && r.Panicked == 0 && - r.FixturePanicked == 0 && r.Missed == 0 && - r.RunError == nil) -} - -func (r *Result) String() string { - if r.RunError != nil { - return "ERROR: " + r.RunError.Error() - } - - var value string - if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 && - r.Missed == 0 { - value = "OK: " - } else { - value = "OOPS: " - } - value += fmt.Sprintf("%d passed", r.Succeeded) - if r.Skipped != 0 { - value += fmt.Sprintf(", %d skipped", r.Skipped) - } - if r.ExpectedFailures != 0 { - value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures) - } - if r.Failed != 0 { - value += fmt.Sprintf(", %d FAILED", r.Failed) - } - if r.Panicked != 0 { - value += fmt.Sprintf(", %d PANICKED", r.Panicked) - } - if r.FixturePanicked != 0 { - value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked) - } - if r.Missed != 0 { - value += fmt.Sprintf(", %d MISSED", r.Missed) - } - if r.WorkDir != "" { - value += "\nWORK=" + r.WorkDir - } - return value -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/run_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,419 +0,0 @@ -// These tests verify the test running logic. - -package check_test - -import ( - "errors" - . "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" - "os" - "sync" -) - -var runnerS = Suite(&RunS{}) - -type RunS struct{} - -func (s *RunS) TestCountSuite(c *C) { - suitesRun += 1 -} - -// ----------------------------------------------------------------------- -// Tests ensuring result counting works properly. - -func (s *RunS) TestSuccess(c *C) { - output := String{} - result := Run(&SuccessHelper{}, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 1) - c.Check(result.Failed, Equals, 0) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 0) - c.Check(result.FixturePanicked, Equals, 0) - c.Check(result.Missed, Equals, 0) - c.Check(result.RunError, IsNil) -} - -func (s *RunS) TestFailure(c *C) { - output := String{} - result := Run(&FailHelper{}, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 0) - c.Check(result.Failed, Equals, 1) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 0) - c.Check(result.FixturePanicked, Equals, 0) - c.Check(result.Missed, Equals, 0) - c.Check(result.RunError, IsNil) -} - -func (s *RunS) TestFixture(c *C) { - output := String{} - result := Run(&FixtureHelper{}, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 2) - c.Check(result.Failed, Equals, 0) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 0) - c.Check(result.FixturePanicked, Equals, 0) - c.Check(result.Missed, Equals, 0) - c.Check(result.RunError, IsNil) -} - -func (s *RunS) TestPanicOnTest(c *C) { - output := String{} - helper := &FixtureHelper{panicOn: "Test1"} - result := Run(helper, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 1) - c.Check(result.Failed, Equals, 0) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 1) - c.Check(result.FixturePanicked, Equals, 0) - c.Check(result.Missed, Equals, 0) - c.Check(result.RunError, IsNil) -} - -func (s *RunS) TestPanicOnSetUpTest(c *C) { - output := String{} - helper := &FixtureHelper{panicOn: "SetUpTest"} - result := Run(helper, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 0) - c.Check(result.Failed, Equals, 0) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 0) - c.Check(result.FixturePanicked, Equals, 1) - c.Check(result.Missed, Equals, 2) - c.Check(result.RunError, IsNil) -} - -func (s *RunS) TestPanicOnSetUpSuite(c *C) { - output := String{} - helper := &FixtureHelper{panicOn: "SetUpSuite"} - result := Run(helper, &RunConf{Output: &output}) - c.Check(result.Succeeded, Equals, 0) - c.Check(result.Failed, Equals, 0) - c.Check(result.Skipped, Equals, 0) - c.Check(result.Panicked, Equals, 0) - c.Check(result.FixturePanicked, Equals, 1) - c.Check(result.Missed, Equals, 2) - c.Check(result.RunError, IsNil) -} - -// ----------------------------------------------------------------------- -// Check result aggregation. - -func (s *RunS) TestAdd(c *C) { - result := &Result{ - Succeeded: 1, - Skipped: 2, - Failed: 3, - Panicked: 4, - FixturePanicked: 5, - Missed: 6, - ExpectedFailures: 7, - } - result.Add(&Result{ - Succeeded: 10, - Skipped: 20, - Failed: 30, - Panicked: 40, - FixturePanicked: 50, - Missed: 60, - ExpectedFailures: 70, - }) - c.Check(result.Succeeded, Equals, 11) - c.Check(result.Skipped, Equals, 22) - c.Check(result.Failed, Equals, 33) - c.Check(result.Panicked, Equals, 44) - c.Check(result.FixturePanicked, Equals, 55) - c.Check(result.Missed, Equals, 66) - c.Check(result.ExpectedFailures, Equals, 77) - c.Check(result.RunError, IsNil) -} - -// ----------------------------------------------------------------------- -// Check the Passed() method. - -func (s *RunS) TestPassed(c *C) { - c.Assert((&Result{}).Passed(), Equals, true) - c.Assert((&Result{Succeeded: 1}).Passed(), Equals, true) - c.Assert((&Result{Skipped: 1}).Passed(), Equals, true) - c.Assert((&Result{Failed: 1}).Passed(), Equals, false) - c.Assert((&Result{Panicked: 1}).Passed(), Equals, false) - c.Assert((&Result{FixturePanicked: 1}).Passed(), Equals, false) - c.Assert((&Result{Missed: 1}).Passed(), Equals, false) - c.Assert((&Result{RunError: errors.New("!")}).Passed(), Equals, false) -} - -// ----------------------------------------------------------------------- -// Check that result printing is working correctly. - -func (s *RunS) TestPrintSuccess(c *C) { - result := &Result{Succeeded: 5} - c.Check(result.String(), Equals, "OK: 5 passed") -} - -func (s *RunS) TestPrintFailure(c *C) { - result := &Result{Failed: 5} - c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FAILED") -} - -func (s *RunS) TestPrintSkipped(c *C) { - result := &Result{Skipped: 5} - c.Check(result.String(), Equals, "OK: 0 passed, 5 skipped") -} - -func (s *RunS) TestPrintExpectedFailures(c *C) { - result := &Result{ExpectedFailures: 5} - c.Check(result.String(), Equals, "OK: 0 passed, 5 expected failures") -} - -func (s *RunS) TestPrintPanicked(c *C) { - result := &Result{Panicked: 5} - c.Check(result.String(), Equals, "OOPS: 0 passed, 5 PANICKED") -} - -func (s *RunS) TestPrintFixturePanicked(c *C) { - result := &Result{FixturePanicked: 5} - c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FIXTURE-PANICKED") -} - -func (s *RunS) TestPrintMissed(c *C) { - result := &Result{Missed: 5} - c.Check(result.String(), Equals, "OOPS: 0 passed, 5 MISSED") -} - -func (s *RunS) TestPrintAll(c *C) { - result := &Result{Succeeded: 1, Skipped: 2, ExpectedFailures: 3, - Panicked: 4, FixturePanicked: 5, Missed: 6} - c.Check(result.String(), Equals, - "OOPS: 1 passed, 2 skipped, 3 expected failures, 4 PANICKED, "+ - "5 FIXTURE-PANICKED, 6 MISSED") -} - -func (s *RunS) TestPrintRunError(c *C) { - result := &Result{Succeeded: 1, Failed: 1, - RunError: errors.New("Kaboom!")} - c.Check(result.String(), Equals, "ERROR: Kaboom!") -} - -// ----------------------------------------------------------------------- -// Verify that the method pattern flag works correctly. - -func (s *RunS) TestFilterTestName(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "Test[91]"} - Run(&helper, &runConf) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 5) -} - -func (s *RunS) TestFilterTestNameWithAll(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: ".*"} - Run(&helper, &runConf) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Test2") - c.Check(helper.calls[6], Equals, "TearDownTest") - c.Check(helper.calls[7], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 8) -} - -func (s *RunS) TestFilterSuiteName(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "FixtureHelper"} - Run(&helper, &runConf) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test1") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "SetUpTest") - c.Check(helper.calls[5], Equals, "Test2") - c.Check(helper.calls[6], Equals, "TearDownTest") - c.Check(helper.calls[7], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 8) -} - -func (s *RunS) TestFilterSuiteNameAndTestName(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "FixtureHelper\\.Test2"} - Run(&helper, &runConf) - c.Check(helper.calls[0], Equals, "SetUpSuite") - c.Check(helper.calls[1], Equals, "SetUpTest") - c.Check(helper.calls[2], Equals, "Test2") - c.Check(helper.calls[3], Equals, "TearDownTest") - c.Check(helper.calls[4], Equals, "TearDownSuite") - c.Check(len(helper.calls), Equals, 5) -} - -func (s *RunS) TestFilterAllOut(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "NotFound"} - Run(&helper, &runConf) - c.Check(len(helper.calls), Equals, 0) -} - -func (s *RunS) TestRequirePartialMatch(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "est"} - Run(&helper, &runConf) - c.Check(len(helper.calls), Equals, 8) -} - -func (s *RunS) TestFilterError(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Filter: "]["} - result := Run(&helper, &runConf) - c.Check(result.String(), Equals, - "ERROR: Bad filter expression: error parsing regexp: missing closing ]: `[`") - c.Check(len(helper.calls), Equals, 0) -} - -// ----------------------------------------------------------------------- -// Verify that List works correctly. - -func (s *RunS) TestListFiltered(c *C) { - names := List(&FixtureHelper{}, &RunConf{Filter: "1"}) - c.Assert(names, DeepEquals, []string{ - "FixtureHelper.Test1", - }) -} - -func (s *RunS) TestList(c *C) { - names := List(&FixtureHelper{}, &RunConf{}) - c.Assert(names, DeepEquals, []string{ - "FixtureHelper.Test1", - "FixtureHelper.Test2", - }) -} - -// ----------------------------------------------------------------------- -// Verify that verbose mode prints tests which pass as well. - -func (s *RunS) TestVerboseMode(c *C) { - helper := FixtureHelper{} - output := String{} - runConf := RunConf{Output: &output, Verbose: true} - Run(&helper, &runConf) - - expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t *[.0-9]+s\n" + - "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n" - - c.Assert(output.value, Matches, expected) -} - -func (s *RunS) TestVerboseModeWithFailBeforePass(c *C) { - helper := FixtureHelper{panicOn: "Test1"} - output := String{} - runConf := RunConf{Output: &output, Verbose: true} - Run(&helper, &runConf) - - expected := "(?s).*PANIC.*\n-+\n" + // Should have an extra line. - "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n" - - c.Assert(output.value, Matches, expected) -} - -// ----------------------------------------------------------------------- -// Verify the stream output mode. In this mode there's no output caching. - -type StreamHelper struct { - l2 sync.Mutex - l3 sync.Mutex -} - -func (s *StreamHelper) SetUpSuite(c *C) { - c.Log("0") -} - -func (s *StreamHelper) Test1(c *C) { - c.Log("1") - s.l2.Lock() - s.l3.Lock() - go func() { - s.l2.Lock() // Wait for "2". - c.Log("3") - s.l3.Unlock() - }() -} - -func (s *StreamHelper) Test2(c *C) { - c.Log("2") - s.l2.Unlock() - s.l3.Lock() // Wait for "3". - c.Fail() - c.Log("4") -} - -func (s *RunS) TestStreamMode(c *C) { - helper := &StreamHelper{} - output := String{} - runConf := RunConf{Output: &output, Stream: true} - Run(helper, &runConf) - - expected := "START: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n0\n" + - "PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\t *[.0-9]+s\n\n" + - "START: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n1\n" + - "PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\t *[.0-9]+s\n\n" + - "START: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n2\n3\n4\n" + - "FAIL: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n\n" - - c.Assert(output.value, Matches, expected) -} - -type StreamMissHelper struct{} - -func (s *StreamMissHelper) SetUpSuite(c *C) { - c.Log("0") - c.Fail() -} - -func (s *StreamMissHelper) Test1(c *C) { - c.Log("1") -} - -func (s *RunS) TestStreamModeWithMiss(c *C) { - helper := &StreamMissHelper{} - output := String{} - runConf := RunConf{Output: &output, Stream: true} - Run(helper, &runConf) - - expected := "START: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n0\n" + - "FAIL: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n\n" + - "START: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n" + - "MISS: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n\n" - - c.Assert(output.value, Matches, expected) -} - -// ----------------------------------------------------------------------- -// Verify that that the keep work dir request indeed does so. - -type WorkDirSuite struct{} - -func (s *WorkDirSuite) Test(c *C) { - c.MkDir() -} - -func (s *RunS) TestKeepWorkDir(c *C) { - output := String{} - runConf := RunConf{Output: &output, Verbose: true, KeepWorkDir: true} - result := Run(&WorkDirSuite{}, &runConf) - - c.Assert(result.String(), Matches, ".*\nWORK="+result.WorkDir) - - stat, err := os.Stat(result.WorkDir) - c.Assert(err, IsNil) - c.Assert(stat.IsDir(), Equals, true) -} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/TODO juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/TODO --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/TODO 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1/TODO 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -- Assert(slice, Contains, item) -- Parallel test support diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/LICENSE juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/LICENSE --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/LICENSE 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/LICENSE 2016-10-13 14:32:06.000000000 +0000 @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2016 Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/http.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/http.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/http.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/http.go 2016-10-13 14:32:06.000000000 +0000 @@ -103,37 +103,58 @@ // HTTP client and parameters. It returns the response from the call or an // error. func (client client) sendRequest(httpClient *http.Client, url, requestType, contentType string, data []byte, numberOfRetries int) (*http.Response, error) { - request, reqErr := client.createAzureRequest(url, requestType, contentType, data) - if reqErr != nil { - return nil, reqErr - } - response, err := httpClient.Do(request) - if err != nil { - if numberOfRetries == 0 { - return nil, err - } + absURI := client.createAzureRequestURI(url) - return client.sendRequest(httpClient, url, requestType, contentType, data, numberOfRetries-1) - } + for { + request, reqErr := client.createAzureRequest(absURI, requestType, contentType, data) + if reqErr != nil { + return nil, reqErr + } - if response.StatusCode >= http.StatusBadRequest { - body, err := getResponseBody(response) + response, err := httpClient.Do(request) if err != nil { - // Failed to read the response body - return nil, err - } - azureErr := getAzureError(body) - if azureErr != nil { if numberOfRetries == 0 { - return nil, azureErr + return nil, err } return client.sendRequest(httpClient, url, requestType, contentType, data, numberOfRetries-1) } + if response.StatusCode == http.StatusTemporaryRedirect { + // ASM's way of moving traffic around, see https://msdn.microsoft.com/en-us/library/azure/ee460801.aspx + // Only handled automatically for GET/HEAD requests. This is for the rest of the http verbs. + u, err := response.Location() + if err != nil { + return response, fmt.Errorf("Redirect requested but location header could not be retrieved: %v", err) + } + absURI = u.String() + continue // re-issue request + } + + if response.StatusCode >= http.StatusBadRequest { + body, err := getResponseBody(response) + if err != nil { + // Failed to read the response body + return nil, err + } + azureErr := getAzureError(body) + if azureErr != nil { + if numberOfRetries == 0 { + return nil, azureErr + } + + return client.sendRequest(httpClient, url, requestType, contentType, data, numberOfRetries-1) + } + } + + return response, nil } +} - return response, nil +// createAzureRequestURI constructs the request uri using the management API endpoint and +// subscription ID associated with the client. +func (client client) createAzureRequestURI(url string) string { + return fmt.Sprintf("%s/%s/%s", client.config.ManagementURL, client.publishSettings.SubscriptionID, url) } // createAzureRequest packages up the request with the correct set of headers and returns @@ -142,7 +163,6 @@ var request *http.Request var err error - url = fmt.Sprintf("%s/%s/%s", client.config.ManagementURL, client.publishSettings.SubscriptionID, url) if data != nil { body := bytes.NewBuffer(data) request, err = http.NewRequest(requestType, url, body) diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/publishSettings.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/publishSettings.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/publishSettings.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/publishSettings.go 2016-10-13 14:32:06.000000000 +0000 @@ -7,7 +7,7 @@ "fmt" "io/ioutil" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12" + "golang.org/x/crypto/pkcs12" ) // ClientFromPublishSettingsData unmarshalls the contents of a publish settings file diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -209,7 +209,7 @@ return vm.client.SendAzurePostRequest(requestURL, startRoleOperationBytes) } -func (vm VirtualMachineClient) ShutdownRole(cloudServiceName, deploymentName, roleName string) (management.OperationID, error) { +func (vm VirtualMachineClient) ShutdownRole(cloudServiceName, deploymentName, roleName string, postaction PostShutdownAction) (management.OperationID, error) { if cloudServiceName == "" { return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") } @@ -221,7 +221,8 @@ } shutdownRoleOperationBytes, err := xml.Marshal(ShutdownRoleOperation{ - OperationType: "ShutdownRoleOperation", + OperationType: "ShutdownRoleOperation", + PostShutdownAction: postaction, }) if err != nil { return "", err @@ -253,7 +254,7 @@ return vm.client.SendAzurePostRequest(requestURL, restartRoleOperationBytes) } -func (vm VirtualMachineClient) DeleteRole(cloudServiceName, deploymentName, roleName string) (management.OperationID, error) { +func (vm VirtualMachineClient) DeleteRole(cloudServiceName, deploymentName, roleName string, deleteVHD bool) (management.OperationID, error) { if cloudServiceName == "" { return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") } @@ -265,6 +266,9 @@ } requestURL := fmt.Sprintf(azureRoleURL, cloudServiceName, deploymentName, roleName) + if deleteVHD { + requestURL += "?comp=media" + } return vm.client.SendAzureDeleteRequest(requestURL) } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities.go 2016-10-13 14:32:06.000000000 +0000 @@ -333,7 +333,7 @@ TimeZone string `xml:",omitempty"` // Optional. Specifies the time zone for the Virtual Machine. DomainJoin *DomainJoin `xml:",omitempty"` // Optional. Contains properties that define a domain to which the Virtual Machine will be joined. StoredCertificateSettings []CertificateSetting `xml:">StoredCertificateSetting,omitempty"` // Optional. Contains a list of service certificates with which to provision to the new Virtual Machine. - WinRMListeners *WinRMListener `xml:"WinRM>Listeners>Listener,omitempty"` // Optional. Contains configuration settings for the Windows Remote Management service on the Virtual Machine. This enables remote Windows PowerShell. + WinRMListeners *[]WinRMListener `xml:"WinRM>Listeners>Listener,omitempty"` // Optional. Contains configuration settings for the Windows Remote Management service on the Virtual Machine. This enables remote Windows PowerShell. AdminUsername string `xml:",omitempty"` // Optional. Specifies the name of the administrator account that is created to access the Virtual Machine. If you are creating a Virtual Machine using an image, you must specify a name of an administrator account to be created by using this element. You must use the AdminPassword element to specify the password of the administrator account that is being created. If you are creating a Virtual Machine using an existing specialized disk, this element is not used because the account should already exist on the disk. AdditionalUnattendContent string `xml:",omitempty"` // Specifies additional base-64 encoded XML formatted information that can be included in the Unattend.xml file, which is used by Windows Setup. @@ -451,6 +451,7 @@ // virtual IP address for the Virtual Machine. type PublicIP struct { Name string // Specifies the name of the public IP address. + Address string // Specifies the IP address. IdleTimeoutInMinutes int `xml:",omitempty"` // Specifies the timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. } @@ -468,10 +469,19 @@ OperationType string } +type PostShutdownAction string + +// Enum values for PostShutdownAction +const ( + PostShutdownActionStopped PostShutdownAction = "Stopped" + PostShutdownActionStoppedDeallocated PostShutdownAction = "StoppedDeallocated" +) + // ShutdownRoleOperation contains the information for shutting down a Role. type ShutdownRoleOperation struct { - XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ShutdownRoleOperation"` - OperationType string + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ShutdownRoleOperation"` + OperationType string + PostShutdownAction PostShutdownAction } // RestartRoleOperation contains the information for restarting a Role. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -282,4 +282,18 @@ deployment.RoleList[0].VMImageInput.DataDiskConfigurations[0].ResizedSizeInGB) } + // ====== + + winRMlisteners := *deployment.RoleList[0].ConfigurationSets[0].WinRMListeners + if string(winRMlisteners[0].Protocol) != "listener-protocol" { + t.Fatalf("Expected winRMlisteners[0].Protocol to be listener-protocol, but got %s", + string(winRMlisteners[0].Protocol)) + } + + winRMlisteners2 := *deployment.RoleList[0].ConfigurationSets[0].WinRMListeners + if winRMlisteners2[1].CertificateThumbprint != "certificate-thumbprint" { + t.Fatalf("Expected winRMlisteners2[1].CertificateThumbprint to be certificate-thumbprint, but got %s", + winRMlisteners2[1].CertificateThumbprint) + } + } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -98,9 +98,9 @@ // repository that is associated with the specified subscription // // https://msdn.microsoft.com/en-us/library/azure/jj157200.aspx -func (c DiskClient) DeleteDisk(name string, deleteVHD bool) (management.OperationID, error) { +func (c DiskClient) DeleteDisk(name string, deleteVHD bool) error { if name == "" { - return "", fmt.Errorf(errParamNotSpecified, "name") + return fmt.Errorf(errParamNotSpecified, "name") } requestURL := fmt.Sprintf(deleteDiskURL, name) @@ -108,7 +108,8 @@ requestURL += "?comp=media" } - return c.client.SendAzureDeleteRequest(requestURL) + _, err := c.client.SendAzureDeleteRequest(requestURL) // request is handled synchronously + return err } // GetDataDisk retrieves the specified data disk from a Virtual Machine diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -4,14 +4,16 @@ import ( "encoding/xml" "fmt" + "net/url" "github.com/Azure/azure-sdk-for-go/management" ) const ( - azureImageListURL = "services/vmimages" - azureRoleOperationsURL = "services/hostedservices/%s/deployments/%s/roleinstances/%s/operations" - errParamNotSpecified = "Parameter %s is not specified." + azureImageListURL = "services/vmimages" + azureImageDeleteURLformat = "services/vmimages/%s" + azureRoleOperationsURL = "services/hostedservices/%s/deployments/%s/roleinstances/%s/operations" + errParamNotSpecified = "Parameter %s is not specified." ) //NewClient is used to instantiate a new Client from an Azure client @@ -19,10 +21,32 @@ return Client{client} } -func (c Client) ListVirtualMachineImages() (ListVirtualMachineImagesResponse, error) { +//ListVirtualMachineImages lists the available VM images, filtered by the optional parameters. +//See https://msdn.microsoft.com/en-us/library/azure/dn499770.aspx +func (c Client) ListVirtualMachineImages(parameters ListParameters) (ListVirtualMachineImagesResponse, error) { var imageList ListVirtualMachineImagesResponse - response, err := c.SendAzureGetRequest(azureImageListURL) + listURL := azureImageListURL + + v := url.Values{} + if parameters.Location != "" { + v.Add("location", parameters.Location) + } + + if parameters.Publisher != "" { + v.Add("publisher", parameters.Publisher) + } + + if parameters.Category != "" { + v.Add("category", parameters.Category) + } + + query := v.Encode() + if query != "" { + listURL = listURL + "?" + query + } + + response, err := c.SendAzureGetRequest(listURL) if err != nil { return imageList, err } @@ -30,6 +54,34 @@ return imageList, err } +//DeleteVirtualMachineImage deletes the named VM image. If deleteVHDs is specified, +//the referenced OS and data disks are also deleted. +//See https://msdn.microsoft.com/en-us/library/azure/dn499769.aspx +func (c Client) DeleteVirtualMachineImage(name string, deleteVHDs bool) error { + if name == "" { + return fmt.Errorf(errParamNotSpecified, "name") + } + + uri := fmt.Sprintf(azureImageDeleteURLformat, name) + + if deleteVHDs { + uri = uri + "?comp=media" + } + + _, err := c.SendAzureDeleteRequest(uri) // delete is synchronous for this operation + return err +} + +type ListParameters struct { + Location string + Publisher string + Category string +} + +const CategoryUser = "User" + +//Capture captures a VM into a VM image. The VM has to be shut down previously. +//See https://msdn.microsoft.com/en-us/library/azure/dn499768.aspx func (c Client) Capture(cloudServiceName, deploymentName, roleName string, name, label string, osState OSState, parameters CaptureParameters) (management.OperationID, error) { if cloudServiceName == "" { diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities.go 2016-10-13 14:32:06.000000000 +0000 @@ -13,8 +13,6 @@ } type ListVirtualMachineImagesResponse struct { - XMLName xml.Name `xml:"VMImages"` - Xmlns string `xml:"xmlns,attr"` VMImages []VMImage `xml:"VMImage"` } @@ -24,7 +22,7 @@ Category string // Specifies the repository classification of the image. All user images have the category User. Description string // Specifies the description of the image. OSDiskConfiguration OSDiskConfiguration // Specifies configuration information for the operating system disk that is associated with the image. - DataDiskConfigurations []DataDiskConfiguration // Specifies configuration information for the data disks that are associated with the image. A VM Image might not have data disks associated with it. + DataDiskConfigurations []DataDiskConfiguration `xml:">DataDiskConfiguration"` // Specifies configuration information for the data disks that are associated with the image. A VM Image might not have data disks associated with it. ServiceName string // Specifies the name of the cloud service that contained the Virtual Machine from which the image was created. DeploymentName string // Specifies the name of the deployment that contained the Virtual Machine from which the image was created. RoleName string // Specifies the name of the Virtual Machine from which the image was created. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,110 @@ +package virtualmachineimage + +import ( + "encoding/xml" + "testing" +) + +const xml1 = ` + + imgName + + User + packer made image + + OSDisk + ReadWrite + Generalized + Linux + https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12.vhd + 30 + Standard + + + PkrSrvf3mz03u4mi + PkrVMf3mz03u4mi + PkrVMf3mz03u4mi + Central US + 2015-12-12T08:59:29.1936858Z + 2015-12-12T08:59:29.1936858Z + PackerMade + Small + false + VMImageReadyForUse + StoppedVM + Small +` +const xml2 = ` + + imgName + + User + packer made image + + OSDisk + ReadWrite + Generalized + Linux + https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12.vhd + 30 + Standard + + + + DataDisk1 + ReadWrite + https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12-dd1.vhd + 31 + Standard + + + DataDisk2 + ReadWrite + https://sa.blob.core.windows.net/images/PackerMade_Ubuntu_Serv14_2015-12-12-dd2.vhd + 32 + Standard + + + PkrSrvf3mz03u4mi + PkrVMf3mz03u4mi + PkrVMf3mz03u4mi + Central US + 2015-12-12T08:59:29.1936858Z + 2015-12-12T08:59:29.1936858Z + PackerMade + Small + false + VMImageReadyForUse + StoppedVM + Small +` + +func Test_NoDataDisksUnmarshal(t *testing.T) { + var image VMImage + if err := xml.Unmarshal([]byte(xml1), &image); err != nil { + t.Fatal(err) + } + + check := checker{t} + check.Equal(0, len(image.DataDiskConfigurations)) +} + +func Test_DataDiskCountUnmarshal(t *testing.T) { + var image VMImage + if err := xml.Unmarshal([]byte(xml2), &image); err != nil { + t.Fatal(err) + } + + check := checker{t} + check.Equal(2, len(image.DataDiskConfigurations)) + check.Equal("DataDisk1", image.DataDiskConfigurations[0].Name) + check.Equal("DataDisk2", image.DataDiskConfigurations[1].Name) +} + +type checker struct{ *testing.T } + +func (a *checker) Equal(expected, actual interface{}) { + if expected != actual { + a.T.Fatalf("Expected %q, but got %q", expected, actual) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/vmutils/deployment.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/vmutils/deployment.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/vmutils/deployment.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/vmutils/deployment.go 2016-10-13 14:32:06.000000000 +0000 @@ -50,9 +50,9 @@ return nil } -// ConfigureDeploymentFromVMImage configures VM Role to deploy from a previously -// captured VM image. -func ConfigureDeploymentFromVMImage( +// ConfigureDeploymentFromPublishedVMImage configures VM Role to deploy from +// a published (public) VM image. +func ConfigureDeploymentFromPublishedVMImage( role *vm.Role, vmImageName string, mediaLocation string, @@ -67,6 +67,19 @@ return nil } +// ConfigureDeploymentFromUserVMImage configures VM Role to deploy from a previously +// captured (user generated) VM image. +func ConfigureDeploymentFromUserVMImage( + role *vm.Role, + vmImageName string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.VMImageName = vmImageName + return nil +} + // ConfigureDeploymentFromExistingOSDisk configures VM Role to deploy from an // existing disk. 'label' is optional. func ConfigureDeploymentFromExistingOSDisk(role *vm.Role, osDiskName, label string) error { diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -34,16 +34,34 @@ testRoleConfiguration(t, client, role, location) } +func TestDeployPlatformWindowsImage(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + role := NewVMConfiguration(vmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + GetWindowsTestImage(t, client).Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname), + GenerateName()) + ConfigureForWindows(&role, vmname, "azureuser", GeneratePassword(), true, "") + ConfigureWinRMOverHTTP(&role) + ConfigureWinRMOverHTTPS(&role, "") + + testRoleConfiguration(t, client, role, location) +} + func TestVMImageList(t *testing.T) { client := testutils.GetTestClient(t) vmic := vmimage.NewClient(client) - il, _ := vmic.ListVirtualMachineImages() + il, _ := vmic.ListVirtualMachineImages(vmimage.ListParameters{}) for _, im := range il.VMImages { t.Logf("%s -%s", im.Name, im.Description) } } -func TestDeployPlatformCaptureRedeploy(t *testing.T) { +func TestDeployPlatformOSImageCaptureRedeploy(t *testing.T) { client := testutils.GetTestClient(t) vmname := GenerateName() sa := GetTestStorageAccount(t, client) @@ -74,7 +92,7 @@ t.Logf("Shutting down VM: %s", vmname) if err := Await(client, func() (management.OperationID, error) { - return vmc.ShutdownRole(vmname, vmname, vmname) + return vmc.ShutdownRole(vmname, vmname, vmname, vm.PostShutdownActionStopped) }); err != nil { t.Error(err) } @@ -84,14 +102,14 @@ } imagename := GenerateName() - t.Logf("Capturing VMImage: %s", imagename) + t.Logf("Capturing OSImage: %s", imagename) if err := Await(client, func() (management.OperationID, error) { return vmc.CaptureRole(vmname, vmname, vmname, imagename, imagename, nil) }); err != nil { t.Error(err) } - im := GetUserImage(t, client, imagename) + im := GetUserOSImage(t, client, imagename) t.Logf("Found image: %+v", im) newvmname := GenerateName() @@ -103,6 +121,73 @@ ConfigureForLinux(&role, newvmname, "azureuser", GeneratePassword()) ConfigureWithPublicSSH(&role) + t.Logf("Deploying new VM from freshly captured OS image: %s", newvmname) + if err := Await(client, func() (management.OperationID, error) { + return vmc.CreateDeployment(role, vmname, vm.CreateDeploymentOptions{}) + }); err != nil { + t.Error(err) + } + + deleteHostedService(t, client, vmname) +} + +func TestDeployPlatformVMImageCaptureRedeploy(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + role := NewVMConfiguration(vmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + GetLinuxTestImage(t, client).Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname), + GenerateName()) + ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword()) + ConfigureWithPublicSSH(&role) + + t.Logf("Deploying VM: %s", vmname) + createRoleConfiguration(t, client, role, location) + + t.Logf("Wait for deployment to enter running state") + vmc := vm.NewClient(client) + status := vm.DeploymentStatusDeploying + for status != vm.DeploymentStatusRunning { + deployment, err := vmc.GetDeployment(vmname, vmname) + if err != nil { + t.Error(err) + break + } + status = deployment.Status + } + + t.Logf("Shutting down VM: %s", vmname) + if err := Await(client, func() (management.OperationID, error) { + return vmc.ShutdownRole(vmname, vmname, vmname, vm.PostShutdownActionStopped) + }); err != nil { + t.Error(err) + } + + if err := WaitForDeploymentInstanceStatus(client, vmname, vmname, vm.InstanceStatusStoppedVM); err != nil { + t.Fatal(err) + } + + imagename := GenerateName() + t.Logf("Capturing VMImage: %s", imagename) + if err := Await(client, func() (management.OperationID, error) { + return vmimage.NewClient(client).Capture(vmname, vmname, vmname, imagename, imagename, vmimage.OSStateGeneralized, vmimage.CaptureParameters{}) + }); err != nil { + t.Error(err) + } + + im := GetUserVMImage(t, client, imagename) + t.Logf("Found image: %+v", im) + + newvmname := GenerateName() + role = NewVMConfiguration(newvmname, "Standard_D3") + ConfigureDeploymentFromUserVMImage(&role, im.Name) + ConfigureForLinux(&role, newvmname, "azureuser", GeneratePassword()) + ConfigureWithPublicSSH(&role) + t.Logf("Deploying new VM from freshly captured VM image: %s", newvmname) if err := Await(client, func() (management.OperationID, error) { return vmc.CreateDeployment(role, vmname, vm.CreateDeploymentOptions{}) @@ -113,7 +198,7 @@ deleteHostedService(t, client, vmname) } -func TestDeployFromVmImage(t *testing.T) { +func TestDeployFromPublishedVmImage(t *testing.T) { client := testutils.GetTestClient(t) vmname := GenerateName() sa := GetTestStorageAccount(t, client) @@ -125,7 +210,7 @@ }) role := NewVMConfiguration(vmname, "Standard_D4") - ConfigureDeploymentFromVMImage(&role, im.Name, + ConfigureDeploymentFromPublishedVMImage(&role, im.Name, fmt.Sprintf("http://%s.blob.core.windows.net/%s", sa.ServiceName, vmname), false) ConfigureForWindows(&role, vmname, "azureuser", GeneratePassword(), true, "") ConfigureWithPublicSSH(&role) @@ -150,7 +235,7 @@ vmc := vm.NewClient(client) if err := Await(client, func() (management.OperationID, error) { - return vmc.ShutdownRole(vmname, vmname, vmname) + return vmc.ShutdownRole(vmname, vmname, vmname, vm.PostShutdownActionStopped) }); err != nil { t.Error(err) } @@ -253,7 +338,13 @@ }) } -func GetUserImage(t *testing.T, client management.Client, name string) osimage.OSImage { +func GetWindowsTestImage(t *testing.T, client management.Client) osimage.OSImage { + return GetOSImage(t, client, func(im osimage.OSImage) bool { + return im.Category == "Public" && im.ImageFamily == "Windows Server 2012 R2 Datacenter" + }) +} + +func GetUserOSImage(t *testing.T, client management.Client, name string) osimage.OSImage { return GetOSImage(t, client, func(im osimage.OSImage) bool { return im.Category == "User" && im.Name == name }) @@ -290,12 +381,18 @@ return image } +func GetUserVMImage(t *testing.T, client management.Client, name string) vmimage.VMImage { + return GetVMImage(t, client, func(im vmimage.VMImage) bool { + return im.Category == "User" && im.Name == name + }) +} + func GetVMImage( t *testing.T, client management.Client, filter func(vmimage.VMImage) bool) vmimage.VMImage { t.Log("Selecting VM image") - allimages, err := vmimage.NewClient(client).ListVirtualMachineImages() + allimages, err := vmimage.NewClient(client).ListVirtualMachineImages(vmimage.ListParameters{}) if err != nil { t.Fatal(err) } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/vmutils/rolestate.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/vmutils/rolestate.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/vmutils/rolestate.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/vmutils/rolestate.go 2016-10-13 14:32:06.000000000 +0000 @@ -7,7 +7,7 @@ vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" ) -// WaitForDeploymentPowerState blocks until all role instances in deployment to +// WaitForDeploymentPowerState blocks until all role instances in deployment // reach desired power state. func WaitForDeploymentPowerState(client management.Client, cloudServiceName, deploymentName string, desiredPowerstate vm.PowerState) error { for { @@ -28,6 +28,31 @@ return false } } + + return true +} + +// WaitForDeploymentInstanceStatus blocks until all role instances in deployment +// reach desired InstanceStatus. +func WaitForDeploymentInstanceStatus(client management.Client, cloudServiceName, deploymentName string, desiredInstanceStatus vm.InstanceStatus) error { + for { + deployment, err := vm.NewClient(client).GetDeployment(cloudServiceName, deploymentName) + if err != nil { + return err + } + if allInstancesInInstanceStatus(deployment.RoleInstanceList, desiredInstanceStatus) { + return nil + } + time.Sleep(2 * time.Second) + } +} + +func allInstancesInInstanceStatus(instances []vm.RoleInstance, desiredInstancestatus vm.InstanceStatus) bool { + for _, r := range instances { + if r.InstanceStatus != desiredInstancestatus { + return false + } + } return true } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go 2016-10-13 14:32:06.000000000 +0000 @@ -99,3 +99,50 @@ return nil } + +func ConfigureWinRMListener(role *vm.Role, protocol vm.WinRMProtocol, certificateThumbprint string) error { + + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + winconfig := findConfig(role.ConfigurationSets, vm.ConfigurationSetTypeWindowsProvisioning) + + if winconfig != nil { + + listener := vm.WinRMListener{ + Protocol: protocol, + CertificateThumbprint: certificateThumbprint, + } + + if winconfig.WinRMListeners == nil { + winconfig.WinRMListeners = &[]vm.WinRMListener{} + } + + currentListeners := *winconfig.WinRMListeners + + // replace existing listener if it's already configured + for i, existingListener := range currentListeners { + if existingListener.Protocol == protocol { + currentListeners[i] = listener + return nil + } + } + + // otherwise append to list of listeners + newListeners := append(currentListeners, listener) + winconfig.WinRMListeners = &newListeners + + return nil + } + + return fmt.Errorf("WindowsProvisioningConfigurationSet not found in 'role'") +} + +func ConfigureWinRMOverHTTP(role *vm.Role) error { + return ConfigureWinRMListener(role, vm.WinRMProtocolHTTP, "") +} + +func ConfigureWinRMOverHTTPS(role *vm.Role, certificateThumbprint string) error { + return ConfigureWinRMListener(role, vm.WinRMProtocolHTTPS, certificateThumbprint) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -128,7 +128,7 @@ func TestNewVmFromVMImage(t *testing.T) { role := NewVMConfiguration("restoredbackup", "Standard_D1") - ConfigureDeploymentFromVMImage(&role, "myvm-backup-20150209", + ConfigureDeploymentFromPublishedVMImage(&role, "myvm-backup-20150209", "http://mystorageacct.blob.core.windows.net/vhds/myoldnewvm.vhd", false) bytes, err := xml.MarshalIndent(role, "", " ") @@ -210,6 +210,230 @@ true ` + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestWinRMOverHttps(t *testing.T) { + role := NewVMConfiguration("winrmoverhttp", "Standard_D1") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWinRMOverHTTPS(&role, "abcdef") + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + winrmoverhttp + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + + Https + abcdef + + + + azuser + + + + + + + Standard_D1 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestWinRMOverHttpsWithNoThumbprint(t *testing.T) { + role := NewVMConfiguration("winrmoverhttp", "Standard_D1") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWinRMOverHTTPS(&role, "") + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + winrmoverhttp + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + + Https + + + + azuser + + + + + + + Standard_D1 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestWinRMOverHttp(t *testing.T) { + role := NewVMConfiguration("winrmoverhttp", "Standard_D1") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWinRMOverHTTP(&role) + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + winrmoverhttp + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + + Http + + + + azuser + + + + + + + Standard_D1 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestSettingWinRMOverHttpTwice(t *testing.T) { + role := NewVMConfiguration("winrmoverhttp", "Standard_D1") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWinRMOverHTTP(&role) + ConfigureWinRMOverHTTP(&role) + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + winrmoverhttp + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + + Http + + + + azuser + + + + + + + Standard_D1 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestSettingWinRMOverHttpAndHttpsTwice(t *testing.T) { + role := NewVMConfiguration("winrmoverhttp", "Standard_D1") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWinRMOverHTTP(&role) + ConfigureWinRMOverHTTPS(&role, "") + ConfigureWinRMOverHTTP(&role) + ConfigureWinRMOverHTTPS(&role, "abcdef") + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + winrmoverhttp + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + + Http + + + Https + abcdef + + + + azuser + + + + + + + Standard_D1 + true +` + if string(bytes) != expected { t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/README.md juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/README.md --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/README.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/README.md 2016-10-13 14:32:06.000000000 +0000 @@ -86,3 +86,6 @@ # License This project is published under [Apache 2.0 License](LICENSE). + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/blob.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/blob.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/blob.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/blob.go 2016-10-13 14:32:06.000000000 +0000 @@ -55,7 +55,51 @@ type Blob struct { Name string `xml:"Name"` Properties BlobProperties `xml:"Properties"` - // TODO (ahmetalpbalkan) Metadata + Metadata BlobMetadata `xml:"Metadata"` +} + +// BlobMetadata is a set of custom name/value pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx +type BlobMetadata map[string]string + +type blobMetadataEntries struct { + Entries []blobMetadataEntry `xml:",any"` +} +type blobMetadataEntry struct { + XMLName xml.Name + Value string `xml:",chardata"` +} + +// UnmarshalXML converts the xml:Metadata into Metadata map +func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var entries blobMetadataEntries + if err := d.DecodeElement(&entries, &start); err != nil { + return err + } + for _, entry := range entries.Entries { + if *bm == nil { + *bm = make(BlobMetadata) + } + (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value + } + return nil +} + +// MarshalXML implements the xml.Marshaler interface. It encodes +// metadata name/value pairs as they would appear in an Azure +// ListBlobs response. +func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + entries := make([]blobMetadataEntry, 0, len(bm)) + for k, v := range bm { + entries = append(entries, blobMetadataEntry{ + XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)}, + Value: v, + }) + } + return enc.EncodeElement(blobMetadataEntries{ + Entries: entries, + }, start) } // BlobProperties contains various properties of a blob @@ -75,6 +119,7 @@ CopyProgress string `xml:"CopyProgress"` CopyCompletionTime string `xml:"CopyCompletionTime"` CopyStatusDescription string `xml:"CopyStatusDescription"` + LeaseStatus string `xml:"LeaseStatus"` } // BlobListResponse contains the response fields from ListBlobs call. @@ -88,6 +133,16 @@ NextMarker string `xml:"NextMarker"` MaxResults int64 `xml:"MaxResults"` Blobs []Blob `xml:"Blobs>Blob"` + + // BlobPrefix is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + // The list here can be thought of as "folders" that may contain + // other folders or blobs. + BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` + + // Delimiter is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + Delimiter string `xml:"Delimiter"` } // ListContainersParameters defines the set of customizable parameters to make a @@ -167,8 +222,9 @@ // Types of page blobs const ( - BlobTypeBlock BlobType = "BlockBlob" - BlobTypePage BlobType = "PageBlob" + BlobTypeBlock BlobType = "BlockBlob" + BlobTypePage BlobType = "PageBlob" + BlobTypeAppend BlobType = "AppendBlob" ) // PageWriteType defines the type updates that are going to be @@ -330,7 +386,6 @@ uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) headers := b.client.getStandardHeaders() - headers["Content-Length"] = "0" if access != "" { headers["x-ms-blob-public-access"] = string(access) } @@ -447,7 +502,7 @@ // // See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, "") + resp, err := b.getBlobRange(container, name, "", nil) if err != nil { return nil, err } @@ -462,8 +517,8 @@ // string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. // // See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, bytesRange) +func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (io.ReadCloser, error) { + resp, err := b.getBlobRange(container, name, bytesRange, extraHeaders) if err != nil { return nil, err } @@ -474,7 +529,7 @@ return resp.body, nil } -func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*storageResponse, error) { +func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) { verb := "GET" uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) @@ -482,6 +537,11 @@ if bytesRange != "" { headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) } + + for k, v := range extraHeaders { + headers[k] = v + } + resp, err := b.client.exec(verb, uri, headers, nil) if err != nil { return nil, err @@ -538,6 +598,7 @@ CopySource: resp.headers.Get("x-ms-copy-source"), CopyStatus: resp.headers.Get("x-ms-copy-status"), BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), + LeaseStatus: resp.headers.Get("x-ms-lease-status"), }, nil } @@ -549,14 +610,17 @@ // applications either. // // See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string) error { +func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error { params := url.Values{"comp": {"metadata"}} uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) headers := b.client.getStandardHeaders() for k, v := range metadata { headers[userDefinedMetadataHeaderPrefix+k] = v } - headers["Content-Length"] = "0" + + for k, v := range extraHeaders { + headers[k] = v + } resp, err := b.client.exec("PUT", uri, headers, nil) if err != nil { @@ -614,7 +678,7 @@ // // See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx func (b BlobStorageClient) CreateBlockBlob(container, name string) error { - return b.CreateBlockBlobFromReader(container, name, 0, nil) + return b.CreateBlockBlobFromReader(container, name, 0, nil, nil) } // CreateBlockBlobFromReader initializes a block blob using data from @@ -626,13 +690,17 @@ // PutBlock, and PutBlockList. // // See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader) error { +func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { path := fmt.Sprintf("%s/%s", container, name) uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypeBlock) headers["Content-Length"] = fmt.Sprintf("%d", size) + for k, v := range extraHeaders { + headers[k] = v + } + resp, err := b.client.exec("PUT", uri, headers, blob) if err != nil { return err @@ -649,7 +717,7 @@ // // See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { - return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk)) + return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil) } // PutBlockWithLength saves the given data stream of exactly specified size to @@ -660,16 +728,21 @@ // checked by the SDK). // // See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader) error { +func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypeBlock) headers["Content-Length"] = fmt.Sprintf("%v", size) + for k, v := range extraHeaders { + headers[k] = v + } + resp, err := b.client.exec("PUT", uri, headers, blob) if err != nil { return err } + defer resp.body.Close() return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } @@ -716,13 +789,16 @@ // be created using this method before writing pages. // // See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutPageBlob(container, name string, size int64) error { +func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { path := fmt.Sprintf("%s/%s", container, name) uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypePage) headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) - headers["Content-Length"] = fmt.Sprintf("%v", 0) + + for k, v := range extraHeaders { + headers[k] = v + } resp, err := b.client.exec("PUT", uri, headers, nil) if err != nil { @@ -738,14 +814,16 @@ // with 512-byte boundaries and chunk must be of size multiplies by 512. // // See https://msdn.microsoft.com/en-us/library/ee691975.aspx -func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte) error { +func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error { path := fmt.Sprintf("%s/%s", container, name) uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) headers := b.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypePage) headers["x-ms-page-write"] = string(writeType) headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) - + for k, v := range extraHeaders { + headers[k] = v + } var contentLength int64 var data io.Reader if writeType == PageWriteTypeClear { @@ -788,6 +866,52 @@ return out, err } +// PutAppendBlob initializes an empty append blob with specified name. An +// append blob must be created using this method before appending blocks. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// AppendBlock appends a block to an append blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx +func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + // CopyBlob starts a blob copy operation and waits for the operation to // complete. sourceBlob parameter must be a canonical URL to the blob (can be // obtained using GetBlobURL method.) There is no SLA on blob copy and therefore @@ -807,7 +931,6 @@ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) headers := b.client.getStandardHeaders() - headers["Content-Length"] = "0" headers["x-ms-copy-source"] = sourceBlob resp, err := b.client.exec("PUT", uri, headers, nil) @@ -856,8 +979,8 @@ // DeleteBlob deletes the given blob from the specified container. // If the blob does not exists at the time of the Delete Blob operation, it // returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlob(container, name string) error { - resp, err := b.deleteBlob(container, name) +func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[string]string) error { + resp, err := b.deleteBlob(container, name, extraHeaders) if err != nil { return err } @@ -869,8 +992,8 @@ // blob is deleted with this call, returns true. Otherwise returns false. // // See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlobIfExists(container, name string) (bool, error) { - resp, err := b.deleteBlob(container, name) +func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) { + resp, err := b.deleteBlob(container, name, extraHeaders) if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) { return resp.statusCode == http.StatusAccepted, nil } @@ -878,10 +1001,13 @@ return false, err } -func (b BlobStorageClient) deleteBlob(container, name string) (*storageResponse, error) { +func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) { verb := "DELETE" uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) headers := b.client.getStandardHeaders() + for k, v := range extraHeaders { + headers[k] = v + } return b.client.exec(verb, uri, headers, nil) } @@ -910,7 +1036,7 @@ if err != nil { return "", err } - signedExpiry := expiry.Format(time.RFC3339) + signedExpiry := expiry.UTC().Format(time.RFC3339) signedResource := "b" stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions) @@ -938,6 +1064,10 @@ func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) { var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/blob" + canonicalizedResource + } + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx if signedVersion >= "2013-08-15" { return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -4,6 +4,7 @@ "bytes" "crypto/rand" "encoding/base64" + "encoding/xml" "fmt" "io" "io/ioutil" @@ -14,7 +15,7 @@ "testing" "time" - chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" + chk "gopkg.in/check.v1" ) type StorageBlobSuite struct{} @@ -218,7 +219,7 @@ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) defer cli.DeleteContainer(cnt) c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte("Hello!")), chk.IsNil) - defer cli.DeleteBlob(cnt, blob) + defer cli.DeleteBlob(cnt, blob, nil) ok, err := cli.BlobExists(cnt, blob+".foo") c.Assert(err, chk.IsNil) @@ -254,10 +255,10 @@ defer cli.deleteContainer(cnt) c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil) - defer cli.DeleteBlob(cnt, src) + defer cli.DeleteBlob(cnt, src, nil) c.Assert(cli.CopyBlob(cnt, dst, cli.GetBlobURL(cnt, src)), chk.IsNil) - defer cli.DeleteBlob(cnt, dst) + defer cli.DeleteBlob(cnt, dst, nil) blobBody, err := cli.GetBlob(cnt, dst) c.Assert(err, chk.IsNil) @@ -273,13 +274,49 @@ blob := randString(20) cli := getBlobClient(c) - c.Assert(cli.DeleteBlob(cnt, blob), chk.NotNil) + c.Assert(cli.DeleteBlob(cnt, blob, nil), chk.NotNil) - ok, err := cli.DeleteBlobIfExists(cnt, blob) + ok, err := cli.DeleteBlobIfExists(cnt, blob, nil) c.Assert(err, chk.IsNil) c.Assert(ok, chk.Equals, false) } +func (s *StorageBlobSuite) TestDeleteBlobWithConditions(c *chk.C) { + cnt := randContainer() + blob := randString(20) + + cli := getBlobClient(c) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil) + oldProps, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + + // Update metadata, so Etag changes + c.Assert(cli.SetBlobMetadata(cnt, blob, map[string]string{}, nil), chk.IsNil) + newProps, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + + // "Delete if matches old Etag" should fail without deleting. + err = cli.DeleteBlob(cnt, blob, map[string]string{ + "If-Match": oldProps.Etag, + }) + c.Assert(err, chk.FitsTypeOf, AzureStorageServiceError{}) + c.Assert(err.(AzureStorageServiceError).StatusCode, chk.Equals, http.StatusPreconditionFailed) + _, err = cli.GetBlob(cnt, blob) + c.Assert(err, chk.IsNil) + + // "Delete if matches new Etag" should succeed. + err = cli.DeleteBlob(cnt, blob, map[string]string{ + "If-Match": newProps.Etag, + }) + c.Assert(err, chk.IsNil) + _, err = cli.GetBlob(cnt, blob) + c.Assert(err, chk.Not(chk.IsNil)) +} + func (s *StorageBlobSuite) TestGetBlobProperties(c *chk.C) { cnt := randContainer() blob := randString(20) @@ -344,6 +381,192 @@ c.Assert(seen, chk.DeepEquals, blobs) } +// listBlobsAsFiles is a helper function to list blobs as "folders" and "files". +func listBlobsAsFiles(cli BlobStorageClient, cnt string, parentDir string) (folders []string, files []string, err error) { + var blobParams ListBlobsParameters + var blobListResponse BlobListResponse + + // Top level "folders" + blobParams = ListBlobsParameters{ + Delimiter: "/", + Prefix: parentDir, + } + + blobListResponse, err = cli.ListBlobs(cnt, blobParams) + if err != nil { + return nil, nil, err + } + + // These are treated as "folders" under the parentDir. + folders = blobListResponse.BlobPrefixes + + // "Files"" are blobs which are under the parentDir. + files = make([]string, len(blobListResponse.Blobs)) + for i := range blobListResponse.Blobs { + files[i] = blobListResponse.Blobs[i].Name + } + + return folders, files, nil +} + +// TestListBlobsTraversal tests that we can correctly traverse +// blobs in blob storage as if it were a file system by using +// a combination of Prefix, Delimiter, and BlobPrefixes. +// +// Blob storage is flat, but we can *simulate* the file +// system with folders and files using conventions in naming. +// With the blob namedd "/usr/bin/ls", when we use delimiter '/', +// the "ls" would be a "file"; with "/", /usr" and "/usr/bin" being +// the "folders" +// +// NOTE: The use of delimiter (eg forward slash) is extremely fiddly +// and difficult to get right so some discipline in naming and rules +// when using the API is required to get everything to work as expected. +// +// Assuming our delimiter is a forward slash, the rules are: +// +// - Do use a leading forward slash in blob names to make things +// consistent and simpler (see further). +// Note that doing so will show "" as the only top-level +// folder in the container in Azure portal, which may look strange. +// +// - The "folder names" are returned *with trailing forward slash* as per MSDN. +// +// - The "folder names" will be "absolue paths", e.g. listing things under "/usr/" +// will return folder names "/usr/bin/". +// +// - The "file names" are returned as full blob names, e.g. when listing +// things under "/usr/bin/", the file names will be "/usr/bin/ls" and +// "/usr/bin/cat". +// +// - Everything is returned with case-sensitive order as expected in real file system +// as per MSDN. +// +// - To list things under a "folder" always use trailing forward slash. +// +// Example: to list top level folders we use root folder named "" with +// trailing forward slash, so we use "/". +// +// Example: to list folders under "/usr", we again append forward slash and +// so we use "/usr/". +// +// Because we use leading forward slash we don't need to have different +// treatment of "get top-level folders" and "get non-top-level folders" +// scenarios. +func (s *StorageBlobSuite) TestListBlobsTraversal(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + // Note use of leading forward slash as per naming rules. + blobsToCreate := []string{ + "/usr/bin/ls", + "/usr/bin/cat", + "/usr/lib64/libc.so", + "/etc/hosts", + "/etc/init.d/iptables", + } + + // Create the above blobs + for _, blobName := range blobsToCreate { + err := cli.CreateBlockBlob(cnt, blobName) + c.Assert(err, chk.IsNil) + } + + var folders []string + var files []string + var err error + + // Top level folders and files. + folders, files, err = listBlobsAsFiles(cli, cnt, "/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string{"/etc/", "/usr/"}) + c.Assert(files, chk.DeepEquals, []string{}) + + // Things under /etc/. Note use of trailing forward slash here as per rules. + folders, files, err = listBlobsAsFiles(cli, cnt, "/etc/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string{"/etc/init.d/"}) + c.Assert(files, chk.DeepEquals, []string{"/etc/hosts"}) + + // Things under /etc/init.d/ + folders, files, err = listBlobsAsFiles(cli, cnt, "/etc/init.d/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string(nil)) + c.Assert(files, chk.DeepEquals, []string{"/etc/init.d/iptables"}) + + // Things under /usr/ + folders, files, err = listBlobsAsFiles(cli, cnt, "/usr/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string{"/usr/bin/", "/usr/lib64/"}) + c.Assert(files, chk.DeepEquals, []string{}) + + // Things under /usr/bin/ + folders, files, err = listBlobsAsFiles(cli, cnt, "/usr/bin/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string(nil)) + c.Assert(files, chk.DeepEquals, []string{"/usr/bin/cat", "/usr/bin/ls"}) +} + +func (s *StorageBlobSuite) TestListBlobsWithMetadata(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + expectMeta := make(map[string]BlobMetadata) + + // Put 4 blobs with metadata + for i := 0; i < 4; i++ { + name := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil) + c.Assert(cli.SetBlobMetadata(cnt, name, map[string]string{ + "Foo": name, + "Bar_BAZ": "Waz Qux", + }, nil), chk.IsNil) + expectMeta[name] = BlobMetadata{ + "foo": name, + "bar_baz": "Waz Qux", + } + } + + // Put one more blob with no metadata + blobWithoutMetadata := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, blobWithoutMetadata, []byte("Hello, world!")), chk.IsNil) + expectMeta[blobWithoutMetadata] = nil + + // Get ListBlobs with include:"metadata" + resp, err := cli.ListBlobs(cnt, ListBlobsParameters{ + MaxResults: 5, + Include: "metadata"}) + c.Assert(err, chk.IsNil) + + respBlobs := make(map[string]Blob) + for _, v := range resp.Blobs { + respBlobs[v.Name] = v + } + + // Verify the metadata is as expected + for name := range expectMeta { + c.Check(respBlobs[name].Metadata, chk.DeepEquals, expectMeta[name]) + } +} + +// Ensure it's possible to generate a ListBlobs response with +// metadata, e.g., for a stub server. +func (s *StorageBlobSuite) TestMarshalBlobMetadata(c *chk.C) { + buf, err := xml.Marshal(Blob{ + Name: randString(20), + Properties: BlobProperties{}, + Metadata: BlobMetadata{"foo": "baz < waz"}, + }) + c.Assert(err, chk.IsNil) + c.Assert(string(buf), chk.Matches, `.*baz < waz.*`) +} + func (s *StorageBlobSuite) TestGetAndSetMetadata(c *chk.C) { cli := getBlobClient(c) cnt := randContainer() @@ -364,7 +587,7 @@ "bar_baz": "waz qux", } - err = cli.SetBlobMetadata(cnt, blob, mPut) + err = cli.SetBlobMetadata(cnt, blob, mPut, nil) c.Assert(err, chk.IsNil) m, err = cli.GetBlobMetadata(cnt, blob) @@ -382,7 +605,7 @@ "bar_baz": "different waz qux", } - err = cli.SetBlobMetadata(cnt, blob, mPutUpper) + err = cli.SetBlobMetadata(cnt, blob, mPutUpper, nil) c.Assert(err, chk.IsNil) m, err = cli.GetBlobMetadata(cnt, blob) @@ -390,6 +613,39 @@ c.Check(m, chk.DeepEquals, mExpectLower) } +func (s *StorageBlobSuite) TestSetMetadataWithExtraHeaders(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + mPut := map[string]string{ + "foo": "bar", + "bar_baz": "waz qux", + } + + extraHeaders := map[string]string{ + "If-Match": "incorrect-etag", + } + + // Set with incorrect If-Match in extra headers should result in error + err := cli.SetBlobMetadata(cnt, blob, mPut, extraHeaders) + c.Assert(err, chk.NotNil) + + props, err := cli.GetBlobProperties(cnt, blob) + extraHeaders = map[string]string{ + "If-Match": props.Etag, + } + + // Set with matching If-Match in extra headers should succeed + err = cli.SetBlobMetadata(cnt, blob, mPut, extraHeaders) + c.Assert(err, chk.IsNil) +} + func (s *StorageBlobSuite) TestPutEmptyBlockBlob(c *chk.C) { cli := getBlobClient(c) cnt := randContainer() @@ -415,7 +671,7 @@ defer cli.DeleteContainer(cnt) c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(body)), chk.IsNil) - defer cli.DeleteBlob(cnt, blob) + defer cli.DeleteBlob(cnt, blob, nil) // Read 1-3 for _, r := range []struct { @@ -426,7 +682,7 @@ {"1-3", body[1 : 3+1]}, {"3-", body[3:]}, } { - resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr) + resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr, nil) c.Assert(err, chk.IsNil) blobBody, err := ioutil.ReadAll(resp) c.Assert(err, chk.IsNil) @@ -444,7 +700,7 @@ name := randString(20) data := randBytes(8888) - c.Assert(cli.CreateBlockBlobFromReader(cnt, name, uint64(len(data)), bytes.NewReader(data)), chk.IsNil) + c.Assert(cli.CreateBlockBlobFromReader(cnt, name, uint64(len(data)), bytes.NewReader(data), nil), chk.IsNil) body, err := cli.GetBlob(cnt, name) c.Assert(err, chk.IsNil) @@ -463,7 +719,7 @@ name := randString(20) data := randBytes(8888) - err := cli.CreateBlockBlobFromReader(cnt, name, 9999, bytes.NewReader(data)) + err := cli.CreateBlockBlobFromReader(cnt, name, 9999, bytes.NewReader(data), nil) c.Assert(err, chk.Not(chk.IsNil)) _, err = cli.GetBlob(cnt, name) @@ -495,7 +751,7 @@ // Put one block c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) - defer cli.deleteBlob(cnt, blob) + defer cli.deleteBlob(cnt, blob, nil) // Get committed blocks committed, err := cli.GetBlockList(cnt, blob, BlockListTypeCommitted) @@ -549,7 +805,7 @@ blob := randString(20) size := int64(10 * 1024 * 1024) - c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) // Verify props, err := cli.GetBlobProperties(cnt, blob) @@ -566,17 +822,17 @@ blob := randString(20) size := int64(10 * 1024 * 1024) // larger than we'll use - c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) chunk1 := []byte(randString(1024)) chunk2 := []byte(randString(512)) // Append chunks - c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1), chk.IsNil) - c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2), chk.IsNil) + c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1, nil), chk.IsNil) + c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2, nil), chk.IsNil) // Verify contents - out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1)) + out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1), nil) c.Assert(err, chk.IsNil) defer out.Close() blobContents, err := ioutil.ReadAll(out) @@ -586,10 +842,10 @@ // Overwrite first half of chunk1 chunk0 := []byte(randString(512)) - c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0), chk.IsNil) + c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0, nil), chk.IsNil) // Verify contents - out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1)) + out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1), nil) c.Assert(err, chk.IsNil) defer out.Close() blobContents, err = ioutil.ReadAll(out) @@ -605,17 +861,17 @@ blob := randString(20) size := int64(10 * 1024 * 1024) // larger than we'll use - c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) // Put 0-2047 chunk := []byte(randString(2048)) - c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk), chk.IsNil) + c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk, nil), chk.IsNil) // Clear 512-1023 - c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil), chk.IsNil) + c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil, nil), chk.IsNil) // Verify contents - out, err := cli.GetBlobRange(cnt, blob, "0-2047") + out, err := cli.GetBlobRange(cnt, blob, "0-2047", nil) c.Assert(err, chk.IsNil) contents, err := ioutil.ReadAll(out) c.Assert(err, chk.IsNil) @@ -631,7 +887,7 @@ blob := randString(20) size := int64(10 * 1024 * 1024) // larger than we'll use - c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) // Get page ranges on empty blob out, err := cli.GetPageRanges(cnt, blob) @@ -639,20 +895,73 @@ c.Assert(len(out.PageList), chk.Equals, 0) // Add 0-512 page - c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512))), chk.IsNil) + c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512)), nil), chk.IsNil) out, err = cli.GetPageRanges(cnt, blob) c.Assert(err, chk.IsNil) c.Assert(len(out.PageList), chk.Equals, 1) // Add 1024-2048 - c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024))), chk.IsNil) + c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024)), nil), chk.IsNil) out, err = cli.GetPageRanges(cnt, blob) c.Assert(err, chk.IsNil) c.Assert(len(out.PageList), chk.Equals, 2) } +func (s *StorageBlobSuite) TestPutAppendBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.PutAppendBlob(cnt, blob, nil), chk.IsNil) + + // Verify + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Equals, int64(0)) + c.Assert(props.BlobType, chk.Equals, BlobTypeAppend) +} + +func (s *StorageBlobSuite) TestPutAppendBlobAppendBlocks(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.PutAppendBlob(cnt, blob, nil), chk.IsNil) + + chunk1 := []byte(randString(1024)) + chunk2 := []byte(randString(512)) + + // Append first block + c.Assert(cli.AppendBlock(cnt, blob, chunk1, nil), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)-1), nil) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, chunk1) + out.Close() + + // Append second block + c.Assert(cli.AppendBlock(cnt, blob, chunk2, nil), chk.IsNil) + + // Verify contents + out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1), nil) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err = ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) + out.Close() +} + func deleteTestContainers(cli BlobStorageClient) error { for { resp, err := cli.ListContainers(ListContainersParameters{Prefix: testContainerPrefix}) diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/client.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/client.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/client.go 2016-10-13 14:32:06.000000000 +0000 @@ -4,6 +4,7 @@ import ( "bytes" "encoding/base64" + "encoding/json" "encoding/xml" "errors" "fmt" @@ -24,7 +25,7 @@ // DefaultAPIVersion is the Azure Storage API version string used when a // basic client is created. - DefaultAPIVersion = "2014-02-14" + DefaultAPIVersion = "2015-02-21" defaultUseHTTPS = true @@ -37,6 +38,10 @@ // Client is the object that needs to be constructed to perform // operations on the storage account. type Client struct { + // HTTPClient is the http.Client used to initiate API + // requests. If it is nil, http.DefaultClient is used. + HTTPClient *http.Client + accountName string accountKey []byte useHTTPS bool @@ -50,6 +55,11 @@ body io.ReadCloser } +type odataResponse struct { + storageResponse + odata odataErrorMessage +} + // AzureStorageServiceError contains fields of the error response from // Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx // Some fields might be specific to certain calls. @@ -64,6 +74,20 @@ RequestID string } +type odataErrorMessageMessage struct { + Lang string `json:"lang"` + Value string `json:"value"` +} + +type odataErrorMessageInternal struct { + Code string `json:"code"` + Message odataErrorMessageMessage `json:"message"` +} + +type odataErrorMessage struct { + Err odataErrorMessageInternal `json:"odata.error"` +} + // UnexpectedStatusCodeError is returned when a storage service responds with neither an error // nor with an HTTP status code indicating success. type UnexpectedStatusCodeError struct { @@ -108,7 +132,7 @@ key, err := base64.StdEncoding.DecodeString(accountKey) if err != nil { - return c, err + return c, fmt.Errorf("azure: malformed storage account key: %v", err) } return Client{ @@ -162,6 +186,12 @@ return QueueServiceClient{c} } +// GetTableService returns a TableServiceClient which can operate on the table +// service of the storage account. +func (c Client) GetTableService() TableServiceClient { + return TableServiceClient{c} +} + // GetFileService returns a FileServiceClient which can operate on the file // service of the storage account. func (c Client) GetFileService() FileServiceClient { @@ -224,6 +254,22 @@ return ch } +func (c Client) buildCanonicalizedResourceTable(uri string) (string, error) { + errMsg := "buildCanonicalizedResourceTable error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := "/" + c.accountName + + if len(u.Path) > 0 { + cr += u.Path + } + + return cr, nil +} + func (c Client) buildCanonicalizedResource(uri string) (string, error) { errMsg := "buildCanonicalizedResource error: %s" u, err := url.Parse(uri) @@ -232,6 +278,7 @@ } cr := "/" + c.accountName + if len(u.Path) > 0 { cr += u.Path } @@ -262,15 +309,20 @@ } } } + return cr, nil } func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { + contentLength := headers["Content-Length"] + if contentLength == "0" { + contentLength = "" + } canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", verb, headers["Content-Encoding"], headers["Content-Language"], - headers["Content-Length"], + contentLength, headers["Content-MD5"], headers["Content-Type"], headers["Date"], @@ -300,6 +352,7 @@ if err != nil { return nil, errors.New("azure/storage: error creating request: " + err.Error()) } + if clstr, ok := headers["Content-Length"]; ok { // content length header is being signed, but completely ignored by golang. // instead we have to use the ContentLength property on the request struct @@ -313,7 +366,11 @@ for k, v := range headers { req.Header.Add(k, v) } - httpClient := http.Client{} + + httpClient := c.HTTPClient + if httpClient == nil { + httpClient = http.DefaultClient + } resp, err := httpClient.Do(req) if err != nil { return nil, err @@ -351,6 +408,70 @@ body: resp.Body}, nil } +func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { + req, err := http.NewRequest(verb, url, body) + for k, v := range headers { + req.Header.Add(k, v) + } + + httpClient := c.HTTPClient + if httpClient == nil { + httpClient = http.DefaultClient + } + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + respToRet := &odataResponse{} + respToRet.body = resp.Body + respToRet.statusCode = resp.StatusCode + respToRet.headers = resp.Header + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + var respBody []byte + respBody, err = readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + // no error in response body + err = fmt.Errorf("storage: service returned without a response body (%d)", resp.StatusCode) + return respToRet, err + } + // try unmarshal as odata.error json + err = json.Unmarshal(respBody, &respToRet.odata) + return respToRet, err + } + + return respToRet, nil +} + +func (c Client) createSharedKeyLite(url string, headers map[string]string) (string, error) { + can, err := c.buildCanonicalizedResourceTable(url) + + if err != nil { + return "", err + } + strToSign := headers["x-ms-date"] + "\n" + can + + hmac := c.computeHmac256(strToSign) + return fmt.Sprintf("SharedKeyLite %s:%s", c.accountName, hmac), nil +} + +func (c Client) execTable(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { + var err error + headers["Authorization"], err = c.createSharedKeyLite(url, headers) + if err != nil { + return nil, err + } + + return c.execInternalJSON(verb, url, headers, body) +} + func readResponseBody(resp *http.Response) ([]byte, error) { defer resp.Body.Close() out, err := ioutil.ReadAll(resp.Body) @@ -371,7 +492,8 @@ } func (e AzureStorageServiceError) Error() string { - return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestID) + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s", + e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue) } // checkRespCode returns UnexpectedStatusError if the given response code is not diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -6,7 +6,7 @@ "os" "testing" - chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" + chk "gopkg.in/check.v1" ) // Hook up gocheck to testing @@ -31,6 +31,11 @@ return cli } +func (s *StorageClientSuite) TestMalformedKeyError(c *chk.C) { + _, err := NewBasicClient("foo", "malformed") + c.Assert(err, chk.ErrorMatches, "azure: malformed storage account key: .*") +} + func (s *StorageClientSuite) TestGetBaseURL_Basic_Https(c *chk.C) { cli, err := NewBasicClient("foo", "YmFy") c.Assert(err, chk.IsNil) diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/file.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/file.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/file.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/file.go 2016-10-13 14:32:06.000000000 +0000 @@ -49,7 +49,6 @@ func (f FileServiceClient) createShare(name string) (*storageResponse, error) { uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) headers := f.client.getStandardHeaders() - headers["Content-Length"] = "0" return f.client.exec("PUT", uri, headers, nil) } diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/file_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/file_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/file_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/file_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -1,7 +1,7 @@ package storage import ( - chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" + chk "gopkg.in/check.v1" ) type StorageFileSuite struct{} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/queue.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/queue.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/queue.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/queue.go 2016-10-13 14:32:06.000000000 +0000 @@ -134,7 +134,6 @@ func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) headers := c.client.getStandardHeaders() - headers["Content-Length"] = "0" for k, v := range metadata { headers[userDefinedMetadataHeaderPrefix+k] = v } @@ -195,7 +194,6 @@ func (c QueueServiceClient) CreateQueue(name string) error { uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) headers := c.client.getStandardHeaders() - headers["Content-Length"] = "0" resp, err := c.client.exec("PUT", uri, headers, nil) if err != nil { return err diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -3,7 +3,7 @@ import ( "time" - chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" + chk "gopkg.in/check.v1" ) type StorageQueueSuite struct{} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/table_entities.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/table_entities.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/table_entities.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/table_entities.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,351 @@ +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "reflect" +) + +const ( + partitionKeyNode = "PartitionKey" + rowKeyNode = "RowKey" + tag = "table" + tagIgnore = "-" + continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey" + continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey" + maxTopParameter = 1000 +) + +type queryTablesResponse struct { + TableName []struct { + TableName string `json:"TableName"` + } `json:"value"` +} + +const ( + tableOperationTypeInsert = iota + tableOperationTypeUpdate = iota + tableOperationTypeMerge = iota + tableOperationTypeInsertOrReplace = iota + tableOperationTypeInsertOrMerge = iota +) + +type tableOperation int + +// TableEntity interface specifies +// the functions needed to support +// marshaling and unmarshaling into +// Azure Tables. The struct must only contain +// simple types because Azure Tables do not +// support hierarchy. +type TableEntity interface { + PartitionKey() string + RowKey() string + SetPartitionKey(string) error + SetRowKey(string) error +} + +// ContinuationToken is an opaque (ie not useful to inspect) +// struct that Get... methods can return if there are more +// entries to be returned than the ones already +// returned. Just pass it to the same function to continue +// receiving the remaining entries. +type ContinuationToken struct { + NextPartitionKey string + NextRowKey string +} + +type getTableEntriesResponse struct { + Elements []map[string]interface{} `json:"value"` +} + +// QueryTableEntities queries the specified table and returns the unmarshaled +// entities of type retType. +// top parameter limits the returned entries up to top. Maximum top +// allowed by Azure API is 1000. In case there are more than top entries to be +// returned the function will return a non nil *ContinuationToken. You can call the +// same function again passing the received ContinuationToken as previousContToken +// parameter in order to get the following entries. The query parameter +// is the odata query. To retrieve all the entries pass the empty string. +// The function returns a pointer to a TableEntity slice, the *ContinuationToken +// if there are more entries to be returned and an error in case something went +// wrong. +// +// Example: +// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "") +func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) { + if top > maxTopParameter { + return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top) + } + + uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{}) + uri += fmt.Sprintf("?$top=%d", top) + if query != "" { + uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query)) + } + + if previousContToken != nil { + uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey) + } + + headers := c.getStandardHeaders() + + headers["Content-Length"] = "0" + + resp, err := c.client.execTable("GET", uri, headers, nil) + + contToken := extractContinuationTokenFromHeaders(resp.headers) + + if err != nil { + return nil, contToken, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, contToken, err + } + + retEntries, err := deserializeEntity(retType, resp.body) + if err != nil { + return nil, contToken, err + } + + return retEntries, contToken, nil +} + +// InsertEntity inserts an entity in the specified table. +// The function fails if there is an entity with the same +// PartitionKey and RowKey in the table. +func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, false, "POST"); err != nil { + return checkRespCode(sc, []int{http.StatusCreated}) + } + + return err +} + +func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) { + uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) + if specifyKeysInURL { + uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) + } + + headers := c.getStandardHeaders() + + var buf bytes.Buffer + + if err := injectPartitionAndRowKeys(entity, &buf); err != nil { + return 0, err + } + + headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) + + var err error + var resp *odataResponse + + resp, err = c.client.execTable(method, uri, headers, &buf) + + if err != nil { + return 0, err + } + + defer resp.body.Close() + + return resp.statusCode, nil +} + +// UpdateEntity updates the contents of an entity with the +// one passed as parameter. The function fails if there is no entity +// with the same PartitionKey and RowKey in the table. +func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +// MergeEntity merges the contents of an entity with the +// one passed as parameter. +// The function fails if there is no entity +// with the same PartitionKey and RowKey in the table. +func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +// DeleteEntityWithoutCheck deletes the entity matching by +// PartitionKey and RowKey. There is no check on IfMatch +// parameter so the entity is always deleted. +// The function fails if there is no entity +// with the same PartitionKey and RowKey in the table. +func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error { + return c.DeleteEntity(table, entity, "*") +} + +// DeleteEntity deletes the entity matching by +// PartitionKey, RowKey and ifMatch field. +// The function fails if there is no entity +// with the same PartitionKey and RowKey in the table or +// the ifMatch is different. +func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error { + uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) + uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) + + headers := c.getStandardHeaders() + + headers["Content-Length"] = "0" + headers["If-Match"] = ifMatch + + resp, err := c.client.execTable("DELETE", uri, headers, nil) + + if err != nil { + return err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return nil +} + +// InsertOrReplaceEntity inserts an entity in the specified table +// or replaced the existing one. +func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +// InsertOrMergeEntity inserts an entity in the specified table +// or merges the existing one. +func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error { + if err := json.NewEncoder(buf).Encode(entity); err != nil { + return err + } + + dec := make(map[string]interface{}) + if err := json.NewDecoder(buf).Decode(&dec); err != nil { + return err + } + + // Inject PartitionKey and RowKey + dec[partitionKeyNode] = entity.PartitionKey() + dec[rowKeyNode] = entity.RowKey() + + // Remove tagged fields + // The tag is defined in the const section + // This is useful to avoid storing the PartitionKey and RowKey twice. + numFields := reflect.ValueOf(entity).Elem().NumField() + for i := 0; i < numFields; i++ { + f := reflect.ValueOf(entity).Elem().Type().Field(i) + + if f.Tag.Get(tag) == tagIgnore { + // we must look for its JSON name in the dictionary + // as the user can rename it using a tag + jsonName := f.Name + if f.Tag.Get("json") != "" { + jsonName = f.Tag.Get("json") + } + delete(dec, jsonName) + } + } + + buf.Reset() + + if err := json.NewEncoder(buf).Encode(&dec); err != nil { + return err + } + + return nil +} + +func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) { + buf := new(bytes.Buffer) + + var ret getTableEntriesResponse + if err := json.NewDecoder(reader).Decode(&ret); err != nil { + return nil, err + } + + tEntries := make([]TableEntity, len(ret.Elements)) + + for i, entry := range ret.Elements { + + buf.Reset() + if err := json.NewEncoder(buf).Encode(entry); err != nil { + return nil, err + } + + dec := make(map[string]interface{}) + if err := json.NewDecoder(buf).Decode(&dec); err != nil { + return nil, err + } + + var pKey, rKey string + // strip pk and rk + for key, val := range dec { + switch key { + case partitionKeyNode: + pKey = val.(string) + case rowKeyNode: + rKey = val.(string) + } + } + + delete(dec, partitionKeyNode) + delete(dec, rowKeyNode) + + buf.Reset() + if err := json.NewEncoder(buf).Encode(dec); err != nil { + return nil, err + } + + // Create a empty retType instance + tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity) + // Popolate it with the values + if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil { + return nil, err + } + + // Reset PartitionKey and RowKey + tEntries[i].SetPartitionKey(pKey) + tEntries[i].SetRowKey(rKey) + } + + return tEntries, nil +} + +func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken { + ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)} + + if ct.NextPartitionKey != "" && ct.NextRowKey != "" { + return &ct + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/table.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/table.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/table.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/table.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,129 @@ +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +// TableServiceClient contains operations for Microsoft Azure Table Storage +// Service. +type TableServiceClient struct { + client Client +} + +// AzureTable is the typedef of the Azure Table name +type AzureTable string + +const ( + tablesURIPath = "/Tables" +) + +type createTableRequest struct { + TableName string `json:"TableName"` +} + +func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) } + +func (c *TableServiceClient) getStandardHeaders() map[string]string { + return map[string]string{ + "x-ms-version": "2015-02-21", + "x-ms-date": currentTimeRfc1123Formatted(), + "Accept": "application/json;odata=nometadata", + "Accept-Charset": "UTF-8", + "Content-Type": "application/json", + } +} + +// QueryTables returns the tables created in the +// *TableServiceClient storage account. +func (c *TableServiceClient) QueryTables() ([]AzureTable, error) { + uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) + + headers := c.getStandardHeaders() + headers["Content-Length"] = "0" + + resp, err := c.client.execTable("GET", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + buf := new(bytes.Buffer) + buf.ReadFrom(resp.body) + + var respArray queryTablesResponse + if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil { + return nil, err + } + + s := make([]AzureTable, len(respArray.TableName)) + for i, elem := range respArray.TableName { + s[i] = AzureTable(elem.TableName) + } + + return s, nil +} + +// CreateTable creates the table given the specific +// name. This function fails if the name is not compliant +// with the specification or the tables already exists. +func (c *TableServiceClient) CreateTable(table AzureTable) error { + uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) + + headers := c.getStandardHeaders() + + req := createTableRequest{TableName: string(table)} + buf := new(bytes.Buffer) + + if err := json.NewEncoder(buf).Encode(req); err != nil { + return err + } + + headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) + + resp, err := c.client.execTable("POST", uri, headers, buf) + + if err != nil { + return err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + return err + } + + return nil +} + +// DeleteTable deletes the table given the specific +// name. This function fails if the table is not present. +// Be advised: DeleteTable deletes all the entries +// that may be present. +func (c *TableServiceClient) DeleteTable(table AzureTable) error { + uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) + uri += fmt.Sprintf("('%s')", string(table)) + + headers := c.getStandardHeaders() + + headers["Content-Length"] = "0" + + resp, err := c.client.execTable("DELETE", uri, headers, nil) + + if err != nil { + return err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/table_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/table_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/table_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/table_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,287 @@ +package storage + +import ( + "crypto/rand" + "encoding/base64" + "fmt" + "reflect" + + chk "gopkg.in/check.v1" +) + +type TableClient struct{} + +func getTableClient(c *chk.C) TableServiceClient { + return getBasicClient(c).GetTableService() +} + +type CustomEntity struct { + Name string `json:"name"` + Surname string `json:"surname"` + Number int + PKey string `json:"pk" table:"-"` + RKey string `json:"rk" table:"-"` +} + +type CustomEntityExtended struct { + *CustomEntity + ExtraField string +} + +func (c *CustomEntity) PartitionKey() string { + return c.PKey +} + +func (c *CustomEntity) RowKey() string { + return c.RKey +} + +func (c *CustomEntity) SetPartitionKey(s string) error { + c.PKey = s + return nil +} + +func (c *CustomEntity) SetRowKey(s string) error { + c.RKey = s + return nil +} + +func (s *StorageBlobSuite) Test_SharedKeyLite(c *chk.C) { + cli := getTableClient(c) + + // override the accountKey and accountName + // but make sure to reset when returning + oldAK := cli.client.accountKey + oldAN := cli.client.accountName + + defer func() { + cli.client.accountKey = oldAK + cli.client.accountName = oldAN + }() + + // don't worry, I've already changed mine :) + key, err := base64.StdEncoding.DecodeString("zHDHGs7C+Di9pZSDMuarxJJz3xRBzAHBYaobxpLEc7kwTptR/hPEa9j93hIfb2Tbe9IA50MViGmjQ6nUF/OVvA==") + if err != nil { + c.Fail() + } + + cli.client.accountKey = key + cli.client.accountName = "mindgotest" + + headers := map[string]string{ + "Accept-Charset": "UTF-8", + "Content-Type": "application/json", + "x-ms-date": "Wed, 23 Sep 2015 16:40:05 GMT", + "Content-Length": "0", + "x-ms-version": "2015-02-21", + "Accept": "application/json;odata=nometadata", + } + url := "https://mindgotest.table.core.windows.net/tquery()" + + ret, err := cli.client.createSharedKeyLite(url, headers) + if err != nil { + c.Fail() + } + + c.Assert(ret, chk.Equals, "SharedKeyLite mindgotest:+32DTgsPUgXPo/O7RYaTs0DllA6FTXMj3uK4Qst8y/E=") +} + +func (s *StorageBlobSuite) Test_CreateAndDeleteTable(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + + err = cli.DeleteTable(tn) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) Test_InsertEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Luke", Surname: "Skywalker", Number: 1543, PKey: "pkey"} + + for i := 0; i < 12; i++ { + ce.SetRowKey(fmt.Sprintf("%d", i)) + + err = cli.InsertEntity(tn, ce) + c.Assert(err, chk.IsNil) + } +} + +func (s *StorageBlobSuite) Test_InsertOrReplaceEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "5"} + + err = cli.InsertOrReplaceEntity(tn, ce) + c.Assert(err, chk.IsNil) + + cextra := &CustomEntityExtended{&CustomEntity{PKey: "pkey", RKey: "5"}, "extra"} + err = cli.InsertOrReplaceEntity(tn, cextra) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) Test_InsertOrMergeEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "5"} + + err = cli.InsertOrMergeEntity(tn, ce) + c.Assert(err, chk.IsNil) + + cextra := &CustomEntityExtended{&CustomEntity{PKey: "pkey", RKey: "5"}, "extra"} + err = cli.InsertOrReplaceEntity(tn, cextra) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) Test_InsertAndGetEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "100"} + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + ce.SetRowKey("200") + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + entries, _, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "") + c.Assert(err, chk.IsNil) + + c.Assert(len(entries), chk.Equals, 2) + + c.Assert(ce.RowKey(), chk.Equals, entries[1].RowKey()) + + c.Assert(entries[1].(*CustomEntity), chk.DeepEquals, ce) +} + +func (s *StorageBlobSuite) Test_InsertAndQueryEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "100"} + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + ce.SetRowKey("200") + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + entries, _, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "RowKey eq '200'") + c.Assert(err, chk.IsNil) + + c.Assert(len(entries), chk.Equals, 1) + + c.Assert(ce.RowKey(), chk.Equals, entries[0].RowKey()) +} + +func (s *StorageBlobSuite) Test_InsertAndDeleteEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Test", Surname: "Test2", Number: 0, PKey: "pkey", RKey: "r01"} + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + ce.Number = 1 + ce.SetRowKey("r02") + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + entries, _, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "Number eq 1") + c.Assert(err, chk.IsNil) + + c.Assert(len(entries), chk.Equals, 1) + + c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ce) + + c.Assert(cli.DeleteEntityWithoutCheck(tn, entries[0]), chk.IsNil) + + entries, _, err = cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "") + c.Assert(err, chk.IsNil) + + // only 1 entry must be present + c.Assert(len(entries), chk.Equals, 1) +} + +func (s *StorageBlobSuite) Test_ContinuationToken(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + var ce *CustomEntity + var ceList [5]*CustomEntity + + for i := 0; i < 5; i++ { + ce = &CustomEntity{Name: "Test", Surname: "Test2", Number: i, PKey: "pkey", RKey: fmt.Sprintf("r%d", i)} + ceList[i] = ce + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + } + + // retrieve using top = 2. Should return 2 entries, 2 entries and finally + // 1 entry + entries, contToken, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 2, "") + c.Assert(err, chk.IsNil) + c.Assert(len(entries), chk.Equals, 2) + c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ceList[0]) + c.Assert(entries[1].(*CustomEntity), chk.DeepEquals, ceList[1]) + c.Assert(contToken, chk.NotNil) + + entries, contToken, err = cli.QueryTableEntities(tn, contToken, reflect.TypeOf(ce), 2, "") + c.Assert(err, chk.IsNil) + c.Assert(len(entries), chk.Equals, 2) + c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ceList[2]) + c.Assert(entries[1].(*CustomEntity), chk.DeepEquals, ceList[3]) + c.Assert(contToken, chk.NotNil) + + entries, contToken, err = cli.QueryTableEntities(tn, contToken, reflect.TypeOf(ce), 2, "") + c.Assert(err, chk.IsNil) + c.Assert(len(entries), chk.Equals, 1) + c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ceList[4]) + c.Assert(contToken, chk.IsNil) +} + +func randTable() string { + const alphanum = "abcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, 32) + rand.Read(bytes) + for i, b := range bytes { + bytes[i] = alphanum[b%byte(len(alphanum))] + } + return string(bytes) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -7,7 +7,7 @@ "strings" "time" - chk "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/gopkg.in/check.v1" + chk "gopkg.in/check.v1" ) func (s *StorageClientSuite) Test_timeRfc1123Formatted(c *chk.C) { diff -Nru juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/.travis.yml juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/.travis.yml --- juju-core-2.0~beta15/src/github.com/Azure/azure-sdk-for-go/.travis.yml 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/azure-sdk-for-go/.travis.yml 2016-10-13 14:32:06.000000000 +0000 @@ -2,17 +2,21 @@ language: go -before_script: - - go get -u golang.org/x/tools/cmd/vet +go: 1.6 + +install: - go get -u github.com/golang/lint/golint + - go get -u github.com/Masterminds/glide + - go get -u golang.org/x/net/context + - export GO15VENDOREXPERIMENT=1 + - glide install -go: tip script: - test -z "$(gofmt -s -l $(find ./arm/* -type d -print) | tee /dev/stderr)" - test -z "$(gofmt -s -l -w management | tee /dev/stderr)" - test -z "$(gofmt -s -l -w storage | tee /dev/stderr)" - go build -v ./... - - test -z "$(go vet $(find ./arm/* -type d -print) | tee /dev/stderr)" + - test -z "$(go vet $(find ./arm/* -type d -print | grep -v '^./arm/resources$') | tee /dev/stderr)" - test -z "$(golint ./arm/... | tee /dev/stderr)" - go test -v ./storage/... -check.v - test -z "$(golint ./storage/... | tee /dev/stderr)" diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/autorest.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/autorest.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/autorest.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/autorest.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,114 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +import ( + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/autorest_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/autorest_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/autorest_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/autorest_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,126 @@ +package autorest + +import ( + "net/http" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func TestResponseHasStatusCode(t *testing.T) { + codes := []int{http.StatusOK, http.StatusAccepted} + resp := &http.Response{StatusCode: http.StatusAccepted} + if !ResponseHasStatusCode(resp, codes...) { + t.Fatalf("autorest: ResponseHasStatusCode failed to find %v in %v", resp.StatusCode, codes) + } +} + +func TestResponseHasStatusCodeNotPresent(t *testing.T) { + codes := []int{http.StatusOK, http.StatusAccepted} + resp := &http.Response{StatusCode: http.StatusInternalServerError} + if ResponseHasStatusCode(resp, codes...) { + t.Fatalf("autorest: ResponseHasStatusCode unexpectedly found %v in %v", resp.StatusCode, codes) + } +} + +func TestNewPollingRequestDoesNotReturnARequestWhenLocationHeaderIsMissing(t *testing.T) { + resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) + + req, _ := NewPollingRequest(resp, nil) + if req != nil { + t.Fatal("autorest: NewPollingRequest returned an http.Request when the Location header was missing") + } +} + +func TestNewPollingRequestReturnsAnErrorWhenPrepareFails(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL) + + _, err := NewPollingRequest(resp, nil) + if err == nil { + t.Fatal("autorest: NewPollingRequest failed to return an error when Prepare fails") + } +} + +func TestNewPollingRequestDoesNotReturnARequestWhenPrepareFails(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL) + + req, _ := NewPollingRequest(resp, nil) + if req != nil { + t.Fatal("autorest: NewPollingRequest returned an http.Request when Prepare failed") + } +} + +func TestNewPollingRequestReturnsAGetRequest(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, nil) + if req.Method != "GET" { + t.Fatalf("autorest: NewPollingRequest did not create an HTTP GET request -- actual method %v", req.Method) + } +} + +func TestNewPollingRequestProvidesTheURL(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, nil) + if req.URL.String() != mocks.TestURL { + t.Fatalf("autorest: NewPollingRequest did not create an HTTP with the expected URL -- received %v, expected %v", req.URL, mocks.TestURL) + } +} + +func TestGetLocation(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + l := GetLocation(resp) + if len(l) == 0 { + t.Fatalf("autorest: GetLocation failed to return Location header -- expected %v, received %v", mocks.TestURL, l) + } +} + +func TestGetLocationReturnsEmptyStringForMissingLocation(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + l := GetLocation(resp) + if len(l) != 0 { + t.Fatalf("autorest: GetLocation return a value without a Location header -- received %v", l) + } +} + +func TestGetRetryAfter(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != mocks.TestDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the expected delay -- expected %v, received %v", mocks.TestDelay, d) + } +} + +func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMissing(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != DefaultPollingDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a missing Retry-After header -- expected %v, received %v", + DefaultPollingDelay, d) + } +} + +func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMalformed(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderRetryAfter), "a very bad non-integer value") + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != DefaultPollingDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a malformed Retry-After header -- expected %v, received %v", + DefaultPollingDelay, d) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/async.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/async.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/async.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/async.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,307 @@ +package azure + +import ( + "bytes" + "fmt" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "io/ioutil" + "net/http" + "strings" + "time" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + methodDelete = "DELETE" + methodPatch = "PATCH" + methodPost = "POST" + methodPut = "PUT" + methodGet = "GET" + + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure +// long-running operation. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + if err != nil { + return resp, err + } + pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK} + if !autorest.ResponseHasStatusCode(resp, pollingCodes...) { + return resp, nil + } + + ps := pollingState{} + for err == nil { + err = updatePollingState(resp, &ps) + if err != nil { + break + } + if ps.hasTerminated() { + if !ps.hasSucceeded() { + err = ps + } + break + } + + r, err = newPollingRequest(resp, ps) + if err != nil { + return resp, err + } + + delay = autorest.GetRetryAfter(resp, delay) + resp, err = autorest.SendWithSender(s, r, + autorest.AfterDelay(delay)) + } + + return resp, err + }) + } +} + +func getAsyncOperation(resp *http.Response) string { + return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) +} + +func hasSucceeded(state string) bool { + return state == operationSucceeded +} + +func hasTerminated(state string) bool { + switch state { + case operationCanceled, operationFailed, operationSucceeded: + return true + default: + return false + } +} + +func hasFailed(state string) bool { + return state == operationFailed +} + +type provisioningTracker interface { + state() string + hasSucceeded() bool + hasTerminated() bool +} + +type operationResource struct { + // Note: + // The specification states services should return the "id" field. However some return it as + // "operationId". + ID string `json:"id"` + OperationID string `json:"operationId"` + Name string `json:"name"` + Status string `json:"status"` + Properties map[string]interface{} `json:"properties"` + OperationError ServiceError `json:"error"` + StartTime date.Time `json:"startTime"` + EndTime date.Time `json:"endTime"` + PercentComplete float64 `json:"percentComplete"` +} + +func (or operationResource) state() string { + return or.Status +} + +func (or operationResource) hasSucceeded() bool { + return hasSucceeded(or.state()) +} + +func (or operationResource) hasTerminated() bool { + return hasTerminated(or.state()) +} + +type provisioningProperties struct { + ProvisioningState string `json:"provisioningState"` +} + +type provisioningStatus struct { + Properties provisioningProperties `json:"properties,omitempty"` + ProvisioningError ServiceError `json:"error,omitempty"` +} + +func (ps provisioningStatus) state() string { + return ps.Properties.ProvisioningState +} + +func (ps provisioningStatus) hasSucceeded() bool { + return hasSucceeded(ps.state()) +} + +func (ps provisioningStatus) hasTerminated() bool { + return hasTerminated(ps.state()) +} + +func (ps provisioningStatus) hasProvisioningError() bool { + return ps.ProvisioningError != ServiceError{} +} + +type pollingResponseFormat string + +const ( + usesOperationResponse pollingResponseFormat = "OperationResponse" + usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus" + formatIsUnknown pollingResponseFormat = "" +) + +type pollingState struct { + responseFormat pollingResponseFormat + uri string + state string + code string + message string +} + +func (ps pollingState) hasSucceeded() bool { + return hasSucceeded(ps.state) +} + +func (ps pollingState) hasTerminated() bool { + return hasTerminated(ps.state) +} + +func (ps pollingState) hasFailed() bool { + return hasFailed(ps.state) +} + +func (ps pollingState) Error() string { + return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message) +} + +// updatePollingState maps the operation status -- retrieved from either a provisioningState +// field, the status field of an OperationResource, or inferred from the HTTP status code -- +// into a well-known states. Since the process begins from the initial request, the state +// always comes from either a the provisioningState returned or is inferred from the HTTP +// status code. Subsequent requests will read an Azure OperationResource object if the +// service initially returned the Azure-AsyncOperation header. The responseFormat field notes +// the expected response format. +func updatePollingState(resp *http.Response, ps *pollingState) error { + // Determine the response shape + // -- The first response will always be a provisioningStatus response; only the polling requests, + // depending on the header returned, may be something otherwise. + var pt provisioningTracker + if ps.responseFormat == usesOperationResponse { + pt = &operationResource{} + } else { + pt = &provisioningStatus{} + } + + // If this is the first request (that is, the polling response shape is unknown), determine how + // to poll and what to expect + if ps.responseFormat == formatIsUnknown { + req := resp.Request + if req == nil { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") + } + + // Prefer the Azure-AsyncOperation header + ps.uri = getAsyncOperation(resp) + if ps.uri != "" { + ps.responseFormat = usesOperationResponse + } else { + ps.responseFormat = usesProvisioningStatus + } + + // Else, use the Location header + if ps.uri == "" { + ps.uri = autorest.GetLocation(resp) + } + + // Lastly, requests against an existing resource, use the last request URI + if ps.uri == "" { + m := strings.ToUpper(req.Method) + if m == methodPatch || m == methodPut || m == methodGet { + ps.uri = req.URL.String() + } + } + } + + // Read and interpret the response (saving the Body in case no polling is necessary) + b := &bytes.Buffer{} + err := autorest.Respond(resp, + autorest.ByCopying(b), + autorest.ByUnmarshallingJSON(pt), + autorest.ByClosing()) + resp.Body = ioutil.NopCloser(b) + if err != nil { + return err + } + + // Interpret the results + // -- Terminal states apply regardless + // -- Unknown states are per-service inprogress states + // -- Otherwise, infer state from HTTP status code + if pt.hasTerminated() { + ps.state = pt.state() + } else if pt.state() != "" { + ps.state = operationInProgress + } else { + switch resp.StatusCode { + case http.StatusAccepted: + ps.state = operationInProgress + + case http.StatusNoContent, http.StatusCreated, http.StatusOK: + ps.state = operationSucceeded + + default: + ps.state = operationFailed + } + } + + if ps.state == operationInProgress && ps.uri == "" { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) + } + + // For failed operation, check for error code and message in + // -- Operation resource + // -- Response + // -- Otherwise, Unknown + if ps.hasFailed() { + if ps.responseFormat == usesOperationResponse { + or := pt.(*operationResource) + ps.code = or.OperationError.Code + ps.message = or.OperationError.Message + } else { + p := pt.(*provisioningStatus) + if p.hasProvisioningError() { + ps.code = p.ProvisioningError.Code + ps.message = p.ProvisioningError.Message + } else { + ps.code = "Unknown" + ps.message = "None" + } + } + } + return nil +} + +func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) { + req := resp.Request + if req == nil { + return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing") + } + + reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel}, + autorest.AsGet(), + autorest.WithBaseURL(ps.uri)) + if err != nil { + return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri) + } + + return reqPoll, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/async_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/async_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/async_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/async_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,1115 @@ +package azure + +import ( + "fmt" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" + "io/ioutil" + "net/http" + "reflect" + "strings" + "sync" + "testing" + "time" +) + +func TestGetAsyncOperation_ReturnsAzureAsyncOperationHeader(t *testing.T) { + r := newAsynchronousResponse() + + if getAsyncOperation(r) != mocks.TestAzureAsyncURL { + t.Fatalf("azure: getAsyncOperation failed to extract the Azure-AsyncOperation header -- expected %v, received %v", mocks.TestURL, getAsyncOperation(r)) + } +} + +func TestGetAsyncOperation_ReturnsEmptyStringIfHeaderIsAbsent(t *testing.T) { + r := mocks.NewResponse() + + if len(getAsyncOperation(r)) != 0 { + t.Fatalf("azure: getAsyncOperation failed to return empty string when the Azure-AsyncOperation header is absent -- received %v", getAsyncOperation(r)) + } +} + +func TestHasSucceeded_ReturnsTrueForSuccess(t *testing.T) { + if !hasSucceeded(operationSucceeded) { + t.Fatal("azure: hasSucceeded failed to return true for success") + } +} + +func TestHasSucceeded_ReturnsFalseOtherwise(t *testing.T) { + if hasSucceeded("not a success string") { + t.Fatal("azure: hasSucceeded returned true for a non-success") + } +} + +func TestHasTerminated_ReturnsTrueForValidTerminationStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !hasTerminated(state) { + t.Fatalf("azure: hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestHasTerminated_ReturnsFalseForUnknownStates(t *testing.T) { + if hasTerminated("not a known state") { + t.Fatal("azure: hasTerminated returned true for an unknown state") + } +} + +func TestOperationError_ErrorReturnsAString(t *testing.T) { + s := (ServiceError{Code: "server code", Message: "server error"}).Error() + if s == "" { + t.Fatalf("azure: operationError#Error failed to return an error") + } + if !strings.Contains(s, "server code") || !strings.Contains(s, "server error") { + t.Fatalf("azure: operationError#Error returned a malformed error -- error='%v'", s) + } +} + +func TestOperationResource_StateReturnsState(t *testing.T) { + if (operationResource{Status: "state"}).state() != "state" { + t.Fatalf("azure: operationResource#state failed to return the correct state") + } +} + +func TestOperationResource_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { + if (operationResource{Status: "not a success string"}).hasSucceeded() { + t.Fatalf("azure: operationResource#hasSucceeded failed to return false for a canceled operation") + } +} + +func TestOperationResource_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { + if !(operationResource{Status: operationSucceeded}).hasSucceeded() { + t.Fatalf("azure: operationResource#hasSucceeded failed to return true for a successful operation") + } +} + +func TestOperationResource_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !(operationResource{Status: state}).hasTerminated() { + t.Fatalf("azure: operationResource#hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestOperationResource_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { + if (operationResource{Status: "not a known state"}).hasTerminated() { + t.Fatalf("azure: operationResource#hasTerminated returned true for a non-terminal operation") + } +} + +func TestProvisioningStatus_StateReturnsState(t *testing.T) { + if (provisioningStatus{Properties: provisioningProperties{"state"}}).state() != "state" { + t.Fatalf("azure: provisioningStatus#state failed to return the correct state") + } +} + +func TestProvisioningStatus_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { + if (provisioningStatus{Properties: provisioningProperties{"not a success string"}}).hasSucceeded() { + t.Fatalf("azure: provisioningStatus#hasSucceeded failed to return false for a canceled operation") + } +} + +func TestProvisioningStatus_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { + if !(provisioningStatus{Properties: provisioningProperties{operationSucceeded}}).hasSucceeded() { + t.Fatalf("azure: provisioningStatus#hasSucceeded failed to return true for a successful operation") + } +} + +func TestProvisioningStatus_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !(provisioningStatus{Properties: provisioningProperties{state}}).hasTerminated() { + t.Fatalf("azure: provisioningStatus#hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestProvisioningStatus_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { + if (provisioningStatus{Properties: provisioningProperties{"not a known state"}}).hasTerminated() { + t.Fatalf("azure: provisioningStatus#hasTerminated returned true for a non-terminal operation") + } +} + +func TestPollingState_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { + if (pollingState{state: "not a success string"}).hasSucceeded() { + t.Fatalf("azure: pollingState#hasSucceeded failed to return false for a canceled operation") + } +} + +func TestPollingState_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { + if !(pollingState{state: operationSucceeded}).hasSucceeded() { + t.Fatalf("azure: pollingState#hasSucceeded failed to return true for a successful operation") + } +} + +func TestPollingState_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !(pollingState{state: state}).hasTerminated() { + t.Fatalf("azure: pollingState#hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestPollingState_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { + if (pollingState{state: "not a known state"}).hasTerminated() { + t.Fatalf("azure: pollingState#hasTerminated returned true for a non-terminal operation") + } +} + +func TestUpdatePollingState_ReturnsAnErrorIfOneOccurs(t *testing.T) { + resp := mocks.NewResponseWithContent(operationResourceIllegal) + err := updatePollingState(resp, &pollingState{}) + if err == nil { + t.Fatalf("azure: updatePollingState failed to return an error after a JSON parsing error") + } +} + +func TestUpdatePollingState_ReturnsTerminatedForKnownProvisioningStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, state)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasTerminated() { + t.Fatalf("azure: updatePollingState failed to return a terminating pollingState for the '%s' state", state) + } + } +} + +func TestUpdatePollingState_ReturnsSuccessForSuccessfulProvisioningState(t *testing.T) { + resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, operationSucceeded)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState failed to return a successful pollingState for the '%s' state", operationSucceeded) + } +} + +func TestUpdatePollingState_ReturnsInProgressForAllOtherProvisioningStates(t *testing.T) { + s := "not a recognized state" + resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, s)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if ps.hasTerminated() { + t.Fatalf("azure: updatePollingState returned terminated for unknown state '%s'", s) + } +} + +func TestUpdatePollingState_ReturnsSuccessWhenProvisioningStateFieldIsAbsentForSuccessStatusCodes(t *testing.T) { + for _, sc := range []int{http.StatusOK, http.StatusCreated, http.StatusNoContent} { + resp := mocks.NewResponseWithContent(pollingStateEmpty) + resp.StatusCode = sc + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState failed to return success when the provisionState field is absent for Status Code %d", sc) + } + } +} + +func TestUpdatePollingState_ReturnsInProgressWhenProvisioningStateFieldIsAbsentForAccepted(t *testing.T) { + resp := mocks.NewResponseWithContent(pollingStateEmpty) + resp.StatusCode = http.StatusAccepted + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if ps.hasTerminated() { + t.Fatalf("azure: updatePollingState returned terminated when the provisionState field is absent for Status Code Accepted") + } +} + +func TestUpdatePollingState_ReturnsFailedWhenProvisioningStateFieldIsAbsentForUnknownStatusCodes(t *testing.T) { + resp := mocks.NewResponseWithContent(pollingStateEmpty) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasTerminated() || ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState did not return failed when the provisionState field is absent for an unknown Status Code") + } +} + +func TestUpdatePollingState_ReturnsTerminatedForKnownOperationResourceStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, state)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + if !ps.hasTerminated() { + t.Fatalf("azure: updatePollingState failed to return a terminating pollingState for the '%s' state", state) + } + } +} + +func TestUpdatePollingState_ReturnsSuccessForSuccessfulOperationResourceState(t *testing.T) { + resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, operationSucceeded)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + if !ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState failed to return a successful pollingState for the '%s' state", operationSucceeded) + } +} + +func TestUpdatePollingState_ReturnsInProgressForAllOtherOperationResourceStates(t *testing.T) { + s := "not a recognized state" + resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, s)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + if ps.hasTerminated() { + t.Fatalf("azure: updatePollingState returned terminated for unknown state '%s'", s) + } +} + +func TestUpdatePollingState_CopiesTheResponseBody(t *testing.T) { + s := fmt.Sprintf(pollingStateFormat, operationSucceeded) + resp := mocks.NewResponseWithContent(s) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("azure: updatePollingState failed to replace the http.Response Body -- Error='%v'", err) + } + if string(b) != s { + t.Fatalf("azure: updatePollingState failed to copy the http.Response Body -- Expected='%s' Received='%s'", s, string(b)) + } +} + +func TestUpdatePollingState_ClosesTheOriginalResponseBody(t *testing.T) { + resp := mocks.NewResponse() + b := resp.Body.(*mocks.Body) + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if b.IsOpen() { + t.Fatal("azure: updatePollingState failed to close the original http.Response Body") + } +} + +func TestUpdatePollingState_FailsWhenResponseLacksRequest(t *testing.T) { + resp := newAsynchronousResponse() + resp.Request = nil + + ps := pollingState{} + err := updatePollingState(resp, &ps) + if err == nil { + t.Fatal("azure: updatePollingState failed to return an error when the http.Response lacked the original http.Request") + } +} + +func TestUpdatePollingState_SetsTheResponseFormatWhenUsingTheAzureAsyncOperationHeader(t *testing.T) { + ps := pollingState{} + updatePollingState(newAsynchronousResponse(), &ps) + + if ps.responseFormat != usesOperationResponse { + t.Fatal("azure: updatePollingState failed to set the correct response format when using the Azure-AsyncOperation header") + } +} + +func TestUpdatePollingState_SetsTheResponseFormatWhenUsingTheAzureAsyncOperationHeaderIsMissing(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.responseFormat != usesProvisioningStatus { + t.Fatal("azure: updatePollingState failed to set the correct response format when the Azure-AsyncOperation header is absent") + } +} + +func TestUpdatePollingState_DoesNotChangeAnExistingReponseFormat(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + ps := pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, &ps) + + if ps.responseFormat != usesOperationResponse { + t.Fatal("azure: updatePollingState failed to leave an existing response format setting") + } +} + +func TestUpdatePollingState_PrefersTheAzureAsyncOperationHeader(t *testing.T) { + resp := newAsynchronousResponse() + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestAzureAsyncURL { + t.Fatal("azure: updatePollingState failed to prefer the Azure-AsyncOperation header") + } +} + +func TestUpdatePollingState_PrefersLocationWhenTheAzureAsyncOperationHeaderMissing(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestLocationURL { + t.Fatal("azure: updatePollingState failed to prefer the Location header when the Azure-AsyncOperation header is missing") + } +} + +func TestUpdatePollingState_UsesTheObjectLocationIfAsyncHeadersAreMissing(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + resp.Request.Method = methodPatch + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestURL { + t.Fatal("azure: updatePollingState failed to use the Object URL when the asynchronous headers are missing") + } +} + +func TestUpdatePollingState_RecognizesLowerCaseHTTPVerbs(t *testing.T) { + for _, m := range []string{"patch", "put", "get"} { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + resp.Request.Method = m + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestURL { + t.Fatalf("azure: updatePollingState failed to recognize the lower-case HTTP verb '%s'", m) + } + } +} + +func TestUpdatePollingState_ReturnsAnErrorIfAsyncHeadersAreMissingForANewOrDeletedObject(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + + for _, m := range []string{methodDelete, methodPost} { + resp.Request.Method = m + err := updatePollingState(resp, &pollingState{}) + if err == nil { + t.Fatalf("azure: updatePollingState failed to return an error even though it could not determine the polling URL for Method '%s'", m) + } + } +} + +func TestNewPollingRequest_FailsWhenResponseLacksRequest(t *testing.T) { + resp := newAsynchronousResponse() + resp.Request = nil + + _, err := newPollingRequest(resp, pollingState{}) + if err == nil { + t.Fatal("azure: newPollingRequest failed to return an error when the http.Response lacked the original http.Request") + } +} + +func TestNewPollingRequest_ReturnsAnErrorWhenPrepareFails(t *testing.T) { + _, err := newPollingRequest(newAsynchronousResponse(), pollingState{responseFormat: usesOperationResponse, uri: mocks.TestBadURL}) + if err == nil { + t.Fatal("azure: newPollingRequest failed to return an error when Prepare fails") + } +} + +func TestNewPollingRequest_DoesNotReturnARequestWhenPrepareFails(t *testing.T) { + req, _ := newPollingRequest(newAsynchronousResponse(), pollingState{responseFormat: usesOperationResponse, uri: mocks.TestBadURL}) + if req != nil { + t.Fatal("azure: newPollingRequest returned an http.Request when Prepare failed") + } +} + +func TestNewPollingRequest_ReturnsAGetRequest(t *testing.T) { + req, _ := newPollingRequest(newAsynchronousResponse(), pollingState{responseFormat: usesOperationResponse, uri: mocks.TestAzureAsyncURL}) + if req.Method != "GET" { + t.Fatalf("azure: newPollingRequest did not create an HTTP GET request -- actual method %v", req.Method) + } +} + +func TestDoPollForAsynchronous_IgnoresUnspecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Duration(0))) + + if client.Attempts() != 1 { + t.Fatalf("azure: DoPollForAsynchronous polled for unspecified status code") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsForSpecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAsynchronousResponse()) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() != 2 { + t.Fatalf("azure: DoPollForAsynchronous failed to poll for specified status code") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_CanBeCanceled(t *testing.T) { + cancel := make(chan struct{}) + delay := 5 * time.Second + + r1 := newAsynchronousResponse() + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(newOperationResourceResponse("Busy"), -1) + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + go func() { + req := mocks.NewRequest() + req.Cancel = cancel + + wg.Done() + + r, _ := autorest.SendWithSender(client, req, + DoPollForAsynchronous(10*time.Second)) + autorest.Respond(r, + autorest.ByClosing()) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(start) >= delay { + t.Fatalf("azure: DoPollForAsynchronous failed to cancel") + } +} + +func TestDoPollForAsynchronous_ClosesAllNonreturnedResponseBodiesWhenPolling(t *testing.T) { + r1 := newAsynchronousResponse() + b1 := r1.Body.(*mocks.Body) + r2 := newOperationResourceResponse("busy") + b2 := r2.Body.(*mocks.Body) + r3 := newOperationResourceResponse(operationSucceeded) + b3 := r3.Body.(*mocks.Body) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendResponse(r3) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if b1.IsOpen() || b2.IsOpen() || b3.IsOpen() { + t.Fatalf("azure: DoPollForAsynchronous did not close unreturned response bodies") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_LeavesLastResponseBodyOpen(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendResponse(r3) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + b, err := ioutil.ReadAll(r.Body) + if len(b) <= 0 || err != nil { + t.Fatalf("azure: DoPollForAsynchronous did not leave open the body of the last response - Error='%v'", err) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_DoesNotPollIfOriginalRequestReturnedAnError(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendResponse(r2) + client.SetError(fmt.Errorf("Faux Error")) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() != 1 { + t.Fatalf("azure: DoPollForAsynchronous tried to poll after receiving an error") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_DoesNotPollIfCreatingOperationRequestFails(t *testing.T) { + r1 := newAsynchronousResponse() + mocks.SetResponseHeader(r1, http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestBadURL) + r2 := newOperationResourceResponse("busy") + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 1 { + t.Fatalf("azure: DoPollForAsynchronous polled with an invalidly formed operation request") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_StopsPollingAfterAnError(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(2) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 3 { + t.Fatalf("azure: DoPollForAsynchronous failed to stop polling after receiving an error") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsPollingError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(newAsynchronousResponse(), 5) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err == nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return error from polling") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsForStatusAccepted(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Status = "202 Accepted" + r1.StatusCode = http.StatusAccepted + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsForStatusCreated(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Status = "201 Created" + r1.StatusCode = http.StatusCreated + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilProvisioningStatusTerminates(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newProvisioningStatusResponse(operationCanceled) + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilProvisioningStatusSucceeds(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newProvisioningStatusResponse(operationSucceeded) + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilOperationResourceHasTerminated(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilOperationResourceHasSucceeded(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_StopsPollingWhenOperationResourceHasTerminated(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 2) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 4 { + t.Fatalf("azure: DoPollForAsynchronous failed to stop after receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsAnErrorForCanceledOperations(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err == nil || !strings.Contains(fmt.Sprintf("%v", err), "Canceled") { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error for a canceled OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsAnErrorForFailedOperations(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationFailed) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err == nil || !strings.Contains(fmt.Sprintf("%v", err), "Failed") { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error for a canceled OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_WithNilURI(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r1.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + + r2 := newOperationResourceResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendResponse(r2) + + req, _ := http.NewRequest("POST", "https://microsoft.com/a/b/c/", mocks.NewBody("")) + r, err := autorest.SendWithSender(client, req, + DoPollForAsynchronous(time.Millisecond)) + + if err == nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return error for nil URI. got: nil; want: Azure Polling Error - Unable to obtain polling URI for POST") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsAnUnknownErrorForFailedOperations(t *testing.T) { + // Return unknown error if error not present in last response + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newProvisioningStatusResponse(operationFailed) + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + expected := makeLongRunningOperationErrorString("Unknown", "None") + if err.Error() != expected { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for an unknown error. \n expected=%q \n got=%q", + expected, err.Error()) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsErrorForLastErrorResponse(t *testing.T) { + // Return error code and message if error present in last response + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newAsynchronousResponseWithError() + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + expected := makeLongRunningOperationErrorString("InvalidParameter", "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix.") + if err.Error() != expected { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for an unknown error. \n expected=%q \n got=%q", + expected, err.Error()) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsOperationResourceErrorForFailedOperations(t *testing.T) { + // Return Operation resource response with error code and message in last operation resource response + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationFailed) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + expected := makeLongRunningOperationErrorString("BadArgument", "The provided database 'foo' has an invalid username.") + if err.Error() != expected { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for a failed Operations. \n expected=%q \n got=%q", + expected, err.Error()) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsErrorForFirstPutRequest(t *testing.T) { + // Return 400 bad response with error code and message in first put + r1 := newAsynchronousResponseWithError() + client := mocks.NewSender() + client.AppendResponse(r1) + + res, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + if err != nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for a failed Operations. \n expected=%q \n got=%q", + errorResponse, err.Error()) + } + + err = autorest.Respond(res, + WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + + reqError, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + expected := &RequestError{ + ServiceError: &ServiceError{ + Code: "InvalidParameter", + Message: "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix.", + }, + DetailedError: autorest.DetailedError{ + StatusCode: 400, + }, + } + if !reflect.DeepEqual(reqError, expected) { + t.Fatalf("azure: wrong error. expected=%q\ngot=%q", expected, reqError) + } + + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != errorResponse { + t.Fatalf("azure: Response body is wrong. got=%q expected=%q", string(b), errorResponse) + } + +} + +func TestDoPollForAsynchronous_ReturnsNoErrorForSuccessfulOperations(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err != nil { + t.Fatalf("azure: DoPollForAsynchronous returned an error for a successful OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_StopsPollingIfItReceivesAnInvalidOperationResource(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse("busy") + r3.Body = mocks.NewBody(operationResourceIllegal) + r4 := newOperationResourceResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + client.AppendAndRepeatResponse(r4, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 4 { + t.Fatalf("azure: DoPollForAsynchronous failed to stop polling after receiving an invalid OperationResource") + } + if err == nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return an error after receving an invalid OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +const ( + operationResourceIllegal = ` + This is not JSON and should fail...badly. + ` + pollingStateFormat = ` + { + "unused" : { + "somefield" : 42 + }, + "properties" : { + "provisioningState": "%s" + } + } + ` + + errorResponse = ` + { + "error" : { + "code" : "InvalidParameter", + "message" : "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix." + } + } + ` + + pollingStateEmpty = ` + { + "unused" : { + "somefield" : 42 + }, + "properties" : { + } + } + ` + + operationResourceFormat = ` + { + "id": "/subscriptions/id/locations/westus/operationsStatus/sameguid", + "name": "sameguid", + "status" : "%s", + "startTime" : "2006-01-02T15:04:05Z", + "endTime" : "2006-01-02T16:04:05Z", + "percentComplete" : 50.00, + + "properties" : {} + } + ` + + operationResourceErrorFormat = ` + { + "id": "/subscriptions/id/locations/westus/operationsStatus/sameguid", + "name": "sameguid", + "status" : "%s", + "startTime" : "2006-01-02T15:04:05Z", + "endTime" : "2006-01-02T16:04:05Z", + "percentComplete" : 50.00, + + "properties" : {}, + "error" : { + "code" : "BadArgument", + "message" : "The provided database 'foo' has an invalid username." + } + } + ` +) + +func newAsynchronousResponse() *http.Response { + r := mocks.NewResponseWithStatus("201 Created", http.StatusCreated) + r.Body = mocks.NewBody(fmt.Sprintf(pollingStateFormat, operationInProgress)) + mocks.SetResponseHeader(r, http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestAzureAsyncURL) + mocks.SetResponseHeader(r, http.CanonicalHeaderKey(autorest.HeaderLocation), mocks.TestLocationURL) + mocks.SetRetryHeader(r, retryDelay) + r.Request = mocks.NewRequestForURL(mocks.TestURL) + return r +} + +func newAsynchronousResponseWithError() *http.Response { + r := mocks.NewResponseWithStatus("400 Bad Request", http.StatusBadRequest) + mocks.SetRetryHeader(r, retryDelay) + r.Request = mocks.NewRequestForURL(mocks.TestURL) + r.Body = mocks.NewBody(errorResponse) + return r +} + +func newOperationResourceResponse(status string) *http.Response { + r := newAsynchronousResponse() + r.Body = mocks.NewBody(fmt.Sprintf(operationResourceFormat, status)) + return r +} + +func newOperationResourceErrorResponse(status string) *http.Response { + r := newAsynchronousResponse() + r.Body = mocks.NewBody(fmt.Sprintf(operationResourceErrorFormat, status)) + return r +} + +func newProvisioningStatusResponse(status string) *http.Response { + r := newAsynchronousResponse() + r.Body = mocks.NewBody(fmt.Sprintf(pollingStateFormat, status)) + return r +} + +func makeLongRunningOperationErrorString(code string, message string) string { + return fmt.Sprintf("Long running operation terminated with status 'Failed': Code=%q Message=%q", code, message) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/azure.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/azure.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/azure.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/azure.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,180 @@ +/* +Package azure provides Azure-specific implementations used with AutoRest. + +See the included examples for more detail. +*/ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strconv" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Details *[]interface{} `json:"details"` +} + +func (se ServiceError) Error() string { + if se.Details != nil { + d, err := json.Marshal(*(se.Details)) + if err != nil { + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details) + } + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d)) + } + return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } else if e.ServiceError == nil { + e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"} + } + + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/azure_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/azure_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/azure_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/azure_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,431 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + headerAuthorization = "Authorization" + longDelay = 5 * time.Second + retryDelay = 10 * time.Millisecond + testLogPrefix = "azure:" +) + +// Use a Client Inspector to set the request identifier. +func ExampleWithClientID() { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + req, _ := autorest.Prepare(&http.Request{}, + autorest.AsGet(), + autorest.WithBaseURL("https://microsoft.com/a/b/c/")) + + c := autorest.Client{Sender: mocks.NewSender()} + c.RequestInspector = WithReturningClientID(uuid) + + autorest.SendWithSender(c, req) + fmt.Printf("Inspector added the %s header with the value %s\n", + HeaderClientID, req.Header.Get(HeaderClientID)) + fmt.Printf("Inspector added the %s header with the value %s\n", + HeaderReturnClientID, req.Header.Get(HeaderReturnClientID)) + // Output: + // Inspector added the x-ms-client-request-id header with the value 71FDB9F4-5E49-4C12-B266-DE7B4FD999A6 + // Inspector added the x-ms-return-client-request-id header with the value true +} + +func TestWithReturningClientIDReturnsError(t *testing.T) { + var errIn error + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + _, errOut := autorest.Prepare(&http.Request{}, + withErrorPrepareDecorator(&errIn), + WithReturningClientID(uuid)) + + if errOut == nil || errIn != errOut { + t.Fatalf("azure: WithReturningClientID failed to exit early when receiving an error -- expected (%v), received (%v)", + errIn, errOut) + } +} + +func TestWithClientID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + req, _ := autorest.Prepare(&http.Request{}, + WithClientID(uuid)) + + if req.Header.Get(HeaderClientID) != uuid { + t.Fatalf("azure: WithClientID failed to set %s -- expected %s, received %s", + HeaderClientID, uuid, req.Header.Get(HeaderClientID)) + } +} + +func TestWithReturnClientID(t *testing.T) { + b := false + req, _ := autorest.Prepare(&http.Request{}, + WithReturnClientID(b)) + + if req.Header.Get(HeaderReturnClientID) != strconv.FormatBool(b) { + t.Fatalf("azure: WithReturnClientID failed to set %s -- expected %s, received %s", + HeaderClientID, strconv.FormatBool(b), req.Header.Get(HeaderClientID)) + } +} + +func TestExtractClientID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + resp := mocks.NewResponse() + mocks.SetResponseHeader(resp, HeaderClientID, uuid) + + if ExtractClientID(resp) != uuid { + t.Fatalf("azure: ExtractClientID failed to extract the %s -- expected %s, received %s", + HeaderClientID, uuid, ExtractClientID(resp)) + } +} + +func TestExtractRequestID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + resp := mocks.NewResponse() + mocks.SetResponseHeader(resp, HeaderRequestID, uuid) + + if ExtractRequestID(resp) != uuid { + t.Fatalf("azure: ExtractRequestID failed to extract the %s -- expected %s, received %s", + HeaderRequestID, uuid, ExtractRequestID(resp)) + } +} + +func TestIsAzureError_ReturnsTrueForAzureError(t *testing.T) { + if !IsAzureError(&RequestError{}) { + t.Fatalf("azure: IsAzureError failed to return true for an Azure Service error") + } +} + +func TestIsAzureError_ReturnsFalseForNonAzureError(t *testing.T) { + if IsAzureError(fmt.Errorf("An Error")) { + t.Fatalf("azure: IsAzureError return true for an non-Azure Service error") + } +} + +func TestNewErrorWithError_UsesReponseStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("Error"), "packageType", "method", mocks.NewResponseWithStatus("Forbidden", http.StatusForbidden), "message") + if e.StatusCode != http.StatusForbidden { + t.Fatalf("azure: NewErrorWithError failed to use the Status Code of the passed Response -- expected %v, received %v", http.StatusForbidden, e.StatusCode) + } +} + +func TestNewErrorWithError_ReturnsUnwrappedError(t *testing.T) { + e1 := RequestError{} + e1.ServiceError = &ServiceError{Code: "42", Message: "A Message"} + e1.StatusCode = 200 + e1.RequestID = "A RequestID" + e2 := NewErrorWithError(&e1, "packageType", "method", nil, "message") + + if !reflect.DeepEqual(e1, e2) { + t.Fatalf("azure: NewErrorWithError wrapped an RequestError -- expected %T, received %T", e1, e2) + } +} + +func TestNewErrorWithError_WrapsAnError(t *testing.T) { + e1 := fmt.Errorf("Inner Error") + var e2 interface{} = NewErrorWithError(e1, "packageType", "method", nil, "message") + + if _, ok := e2.(RequestError); !ok { + t.Fatalf("azure: NewErrorWithError failed to wrap a standard error -- received %T", e2) + } +} + +func TestWithErrorUnlessStatusCode_NotAnAzureError(t *testing.T) { + body := ` + + IIS Error page + + Some non-JSON error page + ` + r := mocks.NewResponseWithContent(body) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusBadRequest + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + ok, _ := err.(*RequestError) + if ok != nil { + t.Fatalf("azure: azure.RequestError returned from malformed response: %v", err) + } + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != body { + t.Fatalf("response body is wrong. got=%q exptected=%q", string(b), body) + } +} + +func TestWithErrorUnlessStatusCode_FoundAzureErrorWithoutDetails(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Azure is having trouble right now." + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Azure is having trouble right now.\"" + if !reflect.DeepEqual(expected, azErr.Error()) { + t.Fatalf("azure: service error is not unmarshaled properly.\nexpected=%v\ngot=%v", expected, azErr.Error()) + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%d Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestWithErrorUnlessStatusCode_FoundAzureErrorWithDetails(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Azure is having trouble right now.", + "details": [{"code": "conflict1", "message":"error message1"}, + {"code": "conflict2", "message":"error message2"}] + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + if expected := "InternalError"; azErr.ServiceError.Code != expected { + t.Fatalf("azure: wrong error code. expected=%q; got=%q", expected, azErr.ServiceError.Code) + } + if azErr.ServiceError.Message == "" { + t.Fatalf("azure: error message is not unmarshaled properly") + } + b, _ := json.Marshal(*azErr.ServiceError.Details) + if string(b) != `[{"code":"conflict1","message":"error message1"},{"code":"conflict2","message":"error message2"}]` { + t.Fatalf("azure: error details is not unmarshaled properly") + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err = ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestWithErrorUnlessStatusCode_NoAzureError(t *testing.T) { + j := `{ + "Status":"NotFound" + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + expected := &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + + if !reflect.DeepEqual(expected, azErr.ServiceError) { + t.Fatalf("azure: service error is not unmarshaled properly. expected=%q\ngot=%q", expected, azErr.ServiceError) + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestRequestErrorString_WithError(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Conflict", + "details": [{"code": "conflict1", "message":"error message1"}] + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, _ := err.(*RequestError) + expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Conflict\" Details=[{\"code\":\"conflict1\",\"message\":\"error message1\"}]" + if expected != azErr.Error() { + t.Fatalf("azure: send wrong RequestError.\nexpected=%v\ngot=%v", expected, azErr.Error()) + } +} + +func withErrorPrepareDecorator(e *error) autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + *e = fmt.Errorf("azure: Faux Prepare Error") + return r, *e + }) + } +} + +func withAsyncResponseDecorator(n int) autorest.SendDecorator { + i := 0 + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil { + if i < n { + resp.StatusCode = http.StatusCreated + resp.Header = http.Header{} + resp.Header.Add(http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestURL) + i++ + } else { + resp.StatusCode = http.StatusOK + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + } + } + return resp, err + }) + } +} + +type mockAuthorizer struct{} + +func (ma mockAuthorizer) WithAuthorization() autorest.PrepareDecorator { + return autorest.WithHeader(headerAuthorization, mocks.TestAuthorizationHeader) +} + +type mockFailingAuthorizer struct{} + +func (mfa mockFailingAuthorizer) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") + }) + } +} + +type mockInspector struct { + wasInvoked bool +} + +func (mi *mockInspector) WithInspection() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + mi.wasInvoked = true + return p.Prepare(r) + }) + } +} + +func (mi *mockInspector) ByInspecting() autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + mi.wasInvoked = true + return r.Respond(resp) + }) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/config.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/config.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/config.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/config.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,13 @@ +package azure + +import ( + "net/url" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorizeEndpoint url.URL + TokenEndpoint url.URL + DeviceCodeEndpoint url.URL +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/devicetoken.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/devicetoken.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/devicetoken.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/devicetoken.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,193 @@ +package azure + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "fmt" + "net/http" + "net/url" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + logPrefix = "autorest/azure/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + req, _ := autorest.Prepare( + &http.Request{}, + autorest.AsPost(), + autorest.AsFormURLEncoded(), + autorest.WithBaseURL(oauthConfig.DeviceCodeEndpoint.String()), + autorest.WithFormData(url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + }), + ) + + resp, err := autorest.SendWithSender(client, req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err) + } + + var code DeviceCode + err = autorest.Respond( + resp, + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&code), + autorest.ByClosing()) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { + req, _ := autorest.Prepare( + &http.Request{}, + autorest.AsPost(), + autorest.AsFormURLEncoded(), + autorest.WithBaseURL(code.OAuthConfig.TokenEndpoint.String()), + autorest.WithFormData(url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + }), + ) + + resp, err := autorest.SendWithSender(client, req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err) + } + + var token deviceToken + err = autorest.Respond( + resp, + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), + autorest.ByUnmarshallingJSON(&token), + autorest.ByClosing()) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletion(client, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + time.Sleep(waitDuration) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/devicetoken_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/devicetoken_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/devicetoken_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/devicetoken_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,301 @@ +package azure + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + TestResource = "SomeResource" + TestClientID = "SomeClientID" + TestTenantID = "SomeTenantID" +) + +var ( + testOAuthConfig, _ = PublicCloud.OAuthConfigForTenant(TestTenantID) + TestOAuthConfig = *testOAuthConfig +) + +const MockDeviceCodeResponse = ` +{ + "device_code": "10000-40-1234567890", + "user_code": "ABCDEF", + "verification_url": "http://aka.ms/deviceauth", + "expires_in": "900", + "interval": "0" +} +` + +const MockDeviceTokenResponse = `{ + "access_token": "accessToken", + "refresh_token": "refreshToken", + "expires_in": "1000", + "expires_on": "2000", + "not_before": "3000", + "resource": "resource", + "token_type": "type" +} +` + +func TestDeviceCodeIncludesResource(t *testing.T) { + sender := mocks.NewSender() + sender.AppendResponse(mocks.NewResponseWithContent(MockDeviceCodeResponse)) + client := &autorest.Client{Sender: sender} + + code, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + if err != nil { + t.Fatalf("azure: unexpected error initiating device auth") + } + + if code.Resource != TestResource { + t.Fatalf("azure: InitiateDeviceAuth failed to stash the resource in the DeviceCode struct") + } +} + +func TestDeviceCodeReturnsErrorIfSendingFails(t *testing.T) { + sender := mocks.NewSender() + sender.SetError(fmt.Errorf("this is an error")) + client := &autorest.Client{Sender: sender} + + _, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeSendingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errCodeSendingFails, err.Error()) + } +} + +func TestDeviceCodeReturnsErrorIfBadRequest(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("doesn't matter") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceCodeReturnsErrorIfCannotDeserializeDeviceCode(t *testing.T) { + gibberishJSON := strings.Replace(MockDeviceCodeResponse, "expires_in", "\":, :gibberish", -1) + sender := mocks.NewSender() + body := mocks.NewBody(gibberishJSON) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 200, "OK")) + client := &autorest.Client{Sender: sender} + + _, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func deviceCode() *DeviceCode { + var deviceCode DeviceCode + json.Unmarshal([]byte(MockDeviceCodeResponse), &deviceCode) + deviceCode.Resource = TestResource + deviceCode.ClientID = TestClientID + return &deviceCode +} + +func TestDeviceTokenReturns(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(MockDeviceTokenResponse) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 200, "OK")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err != nil { + t.Fatalf("azure: got error unexpectedly") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfSendingFails(t *testing.T) { + sender := mocks.NewSender() + sender.SetError(fmt.Errorf("this is an error")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenSendingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errTokenSendingFails, err.Error()) + } +} + +func TestDeviceTokenReturnsErrorIfServerError(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 500, "Internal Server Error")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfCannotDeserializeDeviceToken(t *testing.T) { + gibberishJSON := strings.Replace(MockDeviceTokenResponse, "expires_in", ";:\"gibberish", -1) + sender := mocks.NewSender() + body := mocks.NewBody(gibberishJSON) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 200, "OK")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func errorDeviceTokenResponse(message string) string { + return `{ "error": "` + message + `" }` +} + +func TestDeviceTokenReturnsErrorIfAuthorizationPending(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("authorization_pending")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := CheckForUserCompletion(client, deviceCode()) + if err != ErrDeviceAuthorizationPending { + t.Fatalf("!!!") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfSlowDown(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("slow_down")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := CheckForUserCompletion(client, deviceCode()) + if err != ErrDeviceSlowDown { + t.Fatalf("!!!") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +type deviceTokenSender struct { + errorString string + attempts int +} + +func newDeviceTokenSender(deviceErrorString string) *deviceTokenSender { + return &deviceTokenSender{errorString: deviceErrorString, attempts: 0} +} + +func (s *deviceTokenSender) Do(req *http.Request) (*http.Response, error) { + var resp *http.Response + if s.attempts < 1 { + s.attempts++ + resp = mocks.NewResponseWithContent(errorDeviceTokenResponse(s.errorString)) + } else { + resp = mocks.NewResponseWithContent(MockDeviceTokenResponse) + } + return resp, nil +} + +// since the above only exercise CheckForUserCompletion, we repeat the test here, +// but with the intent of showing that WaitForUserCompletion loops properly. +func TestDeviceTokenSucceedsWithIntermediateAuthPending(t *testing.T) { + sender := newDeviceTokenSender("authorization_pending") + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err != nil { + t.Fatalf("unexpected error occurred") + } +} + +// same as above but with SlowDown now +func TestDeviceTokenSucceedsWithIntermediateSlowDown(t *testing.T) { + sender := newDeviceTokenSender("slow_down") + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err != nil { + t.Fatalf("unexpected error occurred") + } +} + +func TestDeviceTokenReturnsErrorIfAccessDenied(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("access_denied")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err != ErrDeviceAccessDenied { + t.Fatalf("azure: got wrong error expected(%s) actual(%s)", ErrDeviceAccessDenied.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfCodeExpired(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("code_expired")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err != ErrDeviceCodeExpired { + t.Fatalf("azure: got wrong error expected(%s) actual(%s)", ErrDeviceCodeExpired.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorForUnknownError(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("unknown_error")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err == nil { + t.Fatalf("failed to get error") + } + if err != ErrDeviceGeneric { + t.Fatalf("azure: got wrong error expected(%s) actual(%s)", ErrDeviceGeneric.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/environments.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/environments.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/environments.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/environments.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,147 @@ +package azure + +import ( + "fmt" + "net/url" + "strings" +) + +const ( + activeDirectoryAPIVersion = "1.0" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.azure.com", + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/?api-version=1.0", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + } +) + +// EnvironmentFromName returns an Environment based on the common name specified +func EnvironmentFromName(name string) (Environment, error) { + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + return env, nil +} + +// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls +func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) { + template := "%s/oauth2/%s?api-version=%s" + u, err := url.Parse(env.ActiveDirectoryEndpoint) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "authorize", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, "token", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "devicecode", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/environments_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/environments_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/environments_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/environments_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,215 @@ +package azure + +import ( + "encoding/json" + "testing" +) + +func TestOAuthConfigForTenant(t *testing.T) { + az := PublicCloud + + config, err := az.OAuthConfigForTenant("tenant-id-test") + if err != nil { + t.Fatalf("autorest/azure: Unexpected error while retrieving oauth configuration for tenant: %v.", err) + } + + expected := "https://login.microsoftonline.com/tenant-id-test/oauth2/authorize?api-version=1.0" + if config.AuthorizeEndpoint.String() != expected { + t.Fatalf("autorest/azure: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%s).", expected, config.AuthorizeEndpoint) + } + + expected = "https://login.microsoftonline.com/tenant-id-test/oauth2/token?api-version=1.0" + if config.TokenEndpoint.String() != expected { + t.Fatalf("autorest/azure: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%s).", expected, config.TokenEndpoint) + } + + expected = "https://login.microsoftonline.com/tenant-id-test/oauth2/devicecode?api-version=1.0" + if config.DeviceCodeEndpoint.String() != expected { + t.Fatalf("autorest/azure: Incorrect devicecode url for Tenant from Environment. expected(%s). actual(%s).", expected, config.DeviceCodeEndpoint) + } +} + +func TestEnvironmentFromName(t *testing.T) { + name := "azurechinacloud" + if env, _ := EnvironmentFromName(name); env != ChinaCloud { + t.Errorf("Expected to get ChinaCloud for %q", name) + } + + name = "AzureChinaCloud" + if env, _ := EnvironmentFromName(name); env != ChinaCloud { + t.Errorf("Expected to get ChinaCloud for %q", name) + } + + name = "azuregermancloud" + if env, _ := EnvironmentFromName(name); env != GermanCloud { + t.Errorf("Expected to get GermanCloud for %q", name) + } + + name = "AzureGermanCloud" + if env, _ := EnvironmentFromName(name); env != GermanCloud { + t.Errorf("Expected to get GermanCloud for %q", name) + } + + name = "azurepubliccloud" + if env, _ := EnvironmentFromName(name); env != PublicCloud { + t.Errorf("Expected to get PublicCloud for %q", name) + } + + name = "AzurePublicCloud" + if env, _ := EnvironmentFromName(name); env != PublicCloud { + t.Errorf("Expected to get PublicCloud for %q", name) + } + + name = "azureusgovernmentcloud" + if env, _ := EnvironmentFromName(name); env != USGovernmentCloud { + t.Errorf("Expected to get USGovernmentCloud for %q", name) + } + + name = "AzureUSGovernmentCloud" + if env, _ := EnvironmentFromName(name); env != USGovernmentCloud { + t.Errorf("Expected to get USGovernmentCloud for %q", name) + } + + name = "thisisnotarealcloudenv" + if _, err := EnvironmentFromName(name); err == nil { + t.Errorf("Expected to get an error for %q", name) + } +} + +func TestDeserializeEnvironment(t *testing.T) { + env := `{ + "name": "--name--", + "ActiveDirectoryEndpoint": "--active-directory-endpoint--", + "galleryEndpoint": "--gallery-endpoint--", + "graphEndpoint": "--graph-endpoint--", + "keyVaultDNSSuffix": "--key-vault-dns-suffix--", + "keyVaultEndpoint": "--key-vault-endpoint--", + "managementPortalURL": "--management-portal-url--", + "publishSettingsURL": "--publish-settings-url--", + "resourceManagerEndpoint": "--resource-manager-endpoint--", + "serviceBusEndpointSuffix": "--service-bus-endpoint-suffix--", + "serviceManagementEndpoint": "--service-management-endpoint--", + "sqlDatabaseDNSSuffix": "--sql-database-dns-suffix--", + "storageEndpointSuffix": "--storage-endpoint-suffix--", + "trafficManagerDNSSuffix": "--traffic-manager-dns-suffix--" + }` + + testSubject := Environment{} + err := json.Unmarshal([]byte(env), &testSubject) + if err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + + if "--name--" != testSubject.Name { + t.Errorf("Expected Name to be \"--name--\", but got %q", testSubject.Name) + } + if "--management-portal-url--" != testSubject.ManagementPortalURL { + t.Errorf("Expected ManagementPortalURL to be \"--management-portal-url--\", but got %q", testSubject.ManagementPortalURL) + } + if "--publish-settings-url--" != testSubject.PublishSettingsURL { + t.Errorf("Expected PublishSettingsURL to be \"--publish-settings-url--\", but got %q", testSubject.PublishSettingsURL) + } + if "--service-management-endpoint--" != testSubject.ServiceManagementEndpoint { + t.Errorf("Expected ServiceManagementEndpoint to be \"--service-management-endpoint--\", but got %q", testSubject.ServiceManagementEndpoint) + } + if "--resource-manager-endpoint--" != testSubject.ResourceManagerEndpoint { + t.Errorf("Expected ResourceManagerEndpoint to be \"--resource-manager-endpoint--\", but got %q", testSubject.ResourceManagerEndpoint) + } + if "--active-directory-endpoint--" != testSubject.ActiveDirectoryEndpoint { + t.Errorf("Expected ActiveDirectoryEndpoint to be \"--active-directory-endpoint--\", but got %q", testSubject.ActiveDirectoryEndpoint) + } + if "--gallery-endpoint--" != testSubject.GalleryEndpoint { + t.Errorf("Expected GalleryEndpoint to be \"--gallery-endpoint--\", but got %q", testSubject.GalleryEndpoint) + } + if "--key-vault-endpoint--" != testSubject.KeyVaultEndpoint { + t.Errorf("Expected KeyVaultEndpoint to be \"--key-vault-endpoint--\", but got %q", testSubject.KeyVaultEndpoint) + } + if "--graph-endpoint--" != testSubject.GraphEndpoint { + t.Errorf("Expected GraphEndpoint to be \"--graph-endpoint--\", but got %q", testSubject.GraphEndpoint) + } + if "--storage-endpoint-suffix--" != testSubject.StorageEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--storage-endpoint-suffix--\", but got %q", testSubject.StorageEndpointSuffix) + } + if "--sql-database-dns-suffix--" != testSubject.SQLDatabaseDNSSuffix { + t.Errorf("Expected sql-database-dns-suffix to be \"--sql-database-dns-suffix--\", but got %q", testSubject.SQLDatabaseDNSSuffix) + } + if "--key-vault-dns-suffix--" != testSubject.KeyVaultDNSSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--key-vault-dns-suffix--\", but got %q", testSubject.KeyVaultDNSSuffix) + } + if "--service-bus-endpoint-suffix--" != testSubject.ServiceBusEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--service-bus-endpoint-suffix--\", but got %q", testSubject.ServiceBusEndpointSuffix) + } +} + +func TestRoundTripSerialization(t *testing.T) { + env := Environment{ + Name: "--unit-test--", + ManagementPortalURL: "--management-portal-url", + PublishSettingsURL: "--publish-settings-url--", + ServiceManagementEndpoint: "--service-management-endpoint--", + ResourceManagerEndpoint: "--resource-management-endpoint--", + ActiveDirectoryEndpoint: "--active-directory-endpoint--", + GalleryEndpoint: "--gallery-endpoint--", + KeyVaultEndpoint: "--key-vault--endpoint--", + GraphEndpoint: "--graph-endpoint--", + StorageEndpointSuffix: "--storage-endpoint-suffix--", + SQLDatabaseDNSSuffix: "--sql-database-dns-suffix--", + TrafficManagerDNSSuffix: "--traffic-manager-dns-suffix--", + KeyVaultDNSSuffix: "--key-vault-dns-suffix--", + ServiceBusEndpointSuffix: "--service-bus-endpoint-suffix--", + } + + bytes, err := json.Marshal(env) + if err != nil { + t.Fatalf("failed to marshal: %s", err) + } + + testSubject := Environment{} + err = json.Unmarshal(bytes, &testSubject) + if err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + + if env.Name != testSubject.Name { + t.Errorf("Expected Name to be %q, but got %q", env.Name, testSubject.Name) + } + if env.ManagementPortalURL != testSubject.ManagementPortalURL { + t.Errorf("Expected ManagementPortalURL to be %q, but got %q", env.ManagementPortalURL, testSubject.ManagementPortalURL) + } + if env.PublishSettingsURL != testSubject.PublishSettingsURL { + t.Errorf("Expected PublishSettingsURL to be %q, but got %q", env.PublishSettingsURL, testSubject.PublishSettingsURL) + } + if env.ServiceManagementEndpoint != testSubject.ServiceManagementEndpoint { + t.Errorf("Expected ServiceManagementEndpoint to be %q, but got %q", env.ServiceManagementEndpoint, testSubject.ServiceManagementEndpoint) + } + if env.ResourceManagerEndpoint != testSubject.ResourceManagerEndpoint { + t.Errorf("Expected ResourceManagerEndpoint to be %q, but got %q", env.ResourceManagerEndpoint, testSubject.ResourceManagerEndpoint) + } + if env.ActiveDirectoryEndpoint != testSubject.ActiveDirectoryEndpoint { + t.Errorf("Expected ActiveDirectoryEndpoint to be %q, but got %q", env.ActiveDirectoryEndpoint, testSubject.ActiveDirectoryEndpoint) + } + if env.GalleryEndpoint != testSubject.GalleryEndpoint { + t.Errorf("Expected GalleryEndpoint to be %q, but got %q", env.GalleryEndpoint, testSubject.GalleryEndpoint) + } + if env.KeyVaultEndpoint != testSubject.KeyVaultEndpoint { + t.Errorf("Expected KeyVaultEndpoint to be %q, but got %q", env.KeyVaultEndpoint, testSubject.KeyVaultEndpoint) + } + if env.GraphEndpoint != testSubject.GraphEndpoint { + t.Errorf("Expected GraphEndpoint to be %q, but got %q", env.GraphEndpoint, testSubject.GraphEndpoint) + } + if env.StorageEndpointSuffix != testSubject.StorageEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be %q, but got %q", env.StorageEndpointSuffix, testSubject.StorageEndpointSuffix) + } + if env.SQLDatabaseDNSSuffix != testSubject.SQLDatabaseDNSSuffix { + t.Errorf("Expected SQLDatabaseDNSSuffix to be %q, but got %q", env.SQLDatabaseDNSSuffix, testSubject.SQLDatabaseDNSSuffix) + } + if env.TrafficManagerDNSSuffix != testSubject.TrafficManagerDNSSuffix { + t.Errorf("Expected TrafficManagerDNSSuffix to be %q, but got %q", env.TrafficManagerDNSSuffix, testSubject.TrafficManagerDNSSuffix) + } + if env.KeyVaultDNSSuffix != testSubject.KeyVaultDNSSuffix { + t.Errorf("Expected KeyVaultDNSSuffix to be %q, but got %q", env.KeyVaultDNSSuffix, testSubject.KeyVaultDNSSuffix) + } + if env.ServiceBusEndpointSuffix != testSubject.ServiceBusEndpointSuffix { + t.Errorf("Expected ServiceBusEndpointSuffix to be %q, but got %q", env.ServiceBusEndpointSuffix, testSubject.ServiceBusEndpointSuffix) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/example/main.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/example/main.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/example/main.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/example/main.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,257 @@ +package main + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "golang.org/x/crypto/pkcs12" +) + +const ( + resourceGroupURLTemplate = "https://management.azure.com" + apiVersion = "2015-01-01" + nativeAppClientID = "a87032a7-203c-4bf7-913c-44c50d23409a" + resource = "https://management.core.windows.net/" +) + +var ( + mode string + tenantID string + subscriptionID string + applicationID string + + tokenCachePath string + forceRefresh bool + impatient bool + + certificatePath string +) + +func init() { + flag.StringVar(&mode, "mode", "device", "mode of operation for SPT creation") + flag.StringVar(&certificatePath, "certificatePath", "", "path to pk12/pfx certificate") + flag.StringVar(&applicationID, "applicationId", "", "application id") + flag.StringVar(&tenantID, "tenantId", "", "tenant id") + flag.StringVar(&subscriptionID, "subscriptionId", "", "subscription id") + flag.StringVar(&tokenCachePath, "tokenCachePath", "", "location of oauth token cache") + flag.BoolVar(&forceRefresh, "forceRefresh", false, "pass true to force a token refresh") + + flag.Parse() + + log.Printf("mode(%s) certPath(%s) appID(%s) tenantID(%s), subID(%s)\n", + mode, certificatePath, applicationID, tenantID, subscriptionID) + + if mode == "certificate" && + (strings.TrimSpace(tenantID) == "" || strings.TrimSpace(subscriptionID) == "") { + log.Fatalln("Bad usage. Using certificate mode. Please specify tenantID, subscriptionID") + } + + if mode != "certificate" && mode != "device" { + log.Fatalln("Bad usage. Mode must be one of 'certificate' or 'device'.") + } + + if mode == "device" && strings.TrimSpace(applicationID) == "" { + log.Println("Using device mode auth. Will use `azkube` clientID since none was specified on the comand line.") + applicationID = nativeAppClientID + } + + if mode == "certificate" && strings.TrimSpace(certificatePath) == "" { + log.Fatalln("Bad usage. Mode 'certificate' requires the 'certificatePath' argument.") + } + + if strings.TrimSpace(tenantID) == "" || strings.TrimSpace(subscriptionID) == "" || strings.TrimSpace(applicationID) == "" { + log.Fatalln("Bad usage. Must specify the 'tenantId' and 'subscriptionId'") + } +} + +func getSptFromCachedToken(oauthConfig azure.OAuthConfig, clientID, resource string, callbacks ...azure.TokenRefreshCallback) (*azure.ServicePrincipalToken, error) { + token, err := azure.LoadToken(tokenCachePath) + if err != nil { + return nil, fmt.Errorf("failed to load token from cache: %v", err) + } + + spt, _ := azure.NewServicePrincipalTokenFromManualToken( + oauthConfig, + clientID, + resource, + *token, + callbacks...) + + return spt, nil +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +func getSptFromCertificate(oauthConfig azure.OAuthConfig, clientID, resource, certicatePath string, callbacks ...azure.TokenRefreshCallback) (*azure.ServicePrincipalToken, error) { + certData, err := ioutil.ReadFile(certificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) + } + + certificate, rsaPrivateKey, err := decodePkcs12(certData, "") + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + + spt, _ := azure.NewServicePrincipalTokenFromCertificate( + oauthConfig, + clientID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + + return spt, nil +} + +func getSptFromDeviceFlow(oauthConfig azure.OAuthConfig, clientID, resource string, callbacks ...azure.TokenRefreshCallback) (*azure.ServicePrincipalToken, error) { + oauthClient := &autorest.Client{} + deviceCode, err := azure.InitiateDeviceAuth(oauthClient, oauthConfig, clientID, resource) + if err != nil { + return nil, fmt.Errorf("failed to start device auth flow: %s", err) + } + + fmt.Println(*deviceCode.Message) + + token, err := azure.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("failed to finish device auth flow: %s", err) + } + + spt, err := azure.NewServicePrincipalTokenFromManualToken( + oauthConfig, + clientID, + resource, + *token, + callbacks...) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) + } + + return spt, nil +} + +func printResourceGroups(client *autorest.Client) error { + p := map[string]interface{}{"subscription-id": subscriptionID} + q := map[string]interface{}{"api-version": apiVersion} + + req, _ := autorest.Prepare(&http.Request{}, + autorest.AsGet(), + autorest.WithBaseURL(resourceGroupURLTemplate), + autorest.WithPathParameters("/subscriptions/{subscription-id}/resourcegroups", p), + autorest.WithQueryParameters(q)) + + resp, err := autorest.SendWithSender(client, req) + if err != nil { + return err + } + + value := struct { + ResourceGroups []struct { + Name string `json:"name"` + } `json:"value"` + }{} + + defer resp.Body.Close() + dec := json.NewDecoder(resp.Body) + err = dec.Decode(&value) + if err != nil { + return err + } + + var groupNames = make([]string, len(value.ResourceGroups)) + for i, name := range value.ResourceGroups { + groupNames[i] = name.Name + } + + log.Println("Groups:", strings.Join(groupNames, ", ")) + return err +} + +func saveToken(spt azure.Token) { + if tokenCachePath != "" { + err := azure.SaveToken(tokenCachePath, 0600, spt) + if err != nil { + log.Println("error saving token", err) + } else { + log.Println("saved token to", tokenCachePath) + } + } +} + +func main() { + var spt *azure.ServicePrincipalToken + var err error + + callback := func(t azure.Token) error { + log.Println("refresh callback was called") + saveToken(t) + return nil + } + + oauthConfig, err := azure.PublicCloud.OAuthConfigForTenant(tenantID) + if err != nil { + panic(err) + } + + if tokenCachePath != "" { + log.Println("tokenCachePath specified; attempting to load from", tokenCachePath) + spt, err = getSptFromCachedToken(*oauthConfig, applicationID, resource, callback) + if err != nil { + spt = nil // just in case, this is the condition below + log.Println("loading from cache failed:", err) + } + } + + if spt == nil { + log.Println("authenticating via 'mode'", mode) + switch mode { + case "device": + spt, err = getSptFromDeviceFlow(*oauthConfig, applicationID, resource, callback) + case "certificate": + spt, err = getSptFromCertificate(*oauthConfig, applicationID, resource, certificatePath, callback) + } + if err != nil { + log.Fatalln("failed to retrieve token:", err) + } + + // should save it as soon as you get it since Refresh won't be called for some time + if tokenCachePath != "" { + saveToken(spt.Token) + } + } + + client := &autorest.Client{} + client.Authorizer = spt + + printResourceGroups(client) + + if forceRefresh { + err = spt.Refresh() + if err != nil { + panic(err) + } + printResourceGroups(client) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/example/README.md juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/example/README.md --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/example/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/example/README.md 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,127 @@ +# autorest azure example + +## Usage (device mode) + +This shows how to use the example for device auth. + +1. Execute this. It will save your token to /tmp/azure-example-token: + + ``` + ./example -tenantId "13de0a15-b5db-44b9-b682-b4ba82afbd29" -subscriptionId "aff271ee-e9be-4441-b9bb-42f5af4cbaeb" -mode "device" -tokenCachePath "/tmp/azure-example-token" + ``` + +2. Execute it again, it will load the token from cache and not prompt for auth again. + +## Usage (certificate mode) + +This example covers how to make an authenticated call to the Azure Resource Manager APIs, using certificate-based authentication. + +0. Export some required variables + + ``` + export SUBSCRIPTION_ID="aff271ee-e9be-4441-b9bb-42f5af4cbaeb" + export TENANT_ID="13de0a15-b5db-44b9-b682-b4ba82afbd29" + export RESOURCE_GROUP="someresourcegroup" + ``` + + * replace both values with your own + +1. Create a private key + + ``` + openssl genrsa -out "example.key" 2048 + ``` + + + +2. Create the certificate + + ``` + openssl req -new -key "example.key" -subj "/CN=example" -out "example.csr" + + openssl x509 -req -in "example.csr" -signkey "example.key" -out "example.crt" -days 10000 + ``` + + + +3. Create the PKCS12 version of the certificate (with no password) + + ``` + openssl pkcs12 -export -out "example.pfx" -inkey "example.key" -in "example.crt" -passout pass: + ``` + + + +4. Register a new Azure AD Application with the certificate contents + + ``` + certificateContents="$(tail -n+2 "example.key" | head -n-1)" + + azure ad app create \ + --name "example-azuread-app" \ + --home-page="http://example-azuread-app/home" \ + --identifier-uris "http://example-azuread-app/app" \ + --key-usage "Verify" \ + --end-date "2020-01-01" \ + --key-value "${certificateContents}" + ``` + + + +5. Create a new service principal using the "Application Id" from the previous step + + ``` + azure ad sp create "APPLICATION_ID" + ``` + + * Replace APPLICATION_ID with the "Application Id" returned in step 4 + + + +6. Grant your service principal necessary permissions + + ``` + azure role assignment create \ + --resource-group "${RESOURCE_GROUP}" \ + --roleName "Contributor" \ + --subscription "${SUBSCRIPTION_ID}" \ + --spn "http://example-azuread-app/app" + ``` + + * Replace SUBSCRIPTION_ID with your subscription id + * Replace RESOURCE_GROUP with the resource group for the assignment + * Ensure that the `spn` parameter matches an `identifier-url` from Step 4 + + + +7. Run this example app to see your resource groups + + ``` + go run main.go \ + --tenantId="${TENANT_ID}" \ + --subscriptionId="${SUBSCRIPTION_ID}" \ + --applicationId="http://example-azuread-app/app" \ + --certificatePath="certificate.pfx" + ``` + + +You should see something like this as output: + +``` +2015/11/08 18:28:39 Using these settings: +2015/11/08 18:28:39 * certificatePath: certificate.pfx +2015/11/08 18:28:39 * applicationID: http://example-azuread-app/app +2015/11/08 18:28:39 * tenantID: 13de0a15-b5db-44b9-b682-b4ba82afbd29 +2015/11/08 18:28:39 * subscriptionID: aff271ee-e9be-4441-b9bb-42f5af4cbaeb +2015/11/08 18:28:39 loading certificate... +2015/11/08 18:28:39 retrieve oauth token... +2015/11/08 18:28:39 querying the list of resource groups... +2015/11/08 18:28:50 +2015/11/08 18:28:50 Groups: {"value":[{"id":"/subscriptions/aff271ee-e9be-4441-b9bb-42f5af4cbaeb/resourceGroups/kube-66f30810","name":"kube-66f30810","location":"westus","tags":{},"properties":{"provisioningState":"Succeeded"}}]} +``` + + + +## Notes + +You may need to wait sometime between executing step 4, step 5 and step 6. If you issue those requests too quickly, you might hit an AD server that is not consistent with the server where the resource was created. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/persist.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/persist.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/persist.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/persist.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,59 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/persist_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/persist_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/persist_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/persist_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,152 @@ +package azure + +import ( + "encoding/json" + "io/ioutil" + "os" + "reflect" + "runtime" + "strings" + "testing" +) + +const MockTokenJSON string = `{ + "access_token": "accessToken", + "refresh_token": "refreshToken", + "expires_in": "1000", + "expires_on": "2000", + "not_before": "3000", + "resource": "resource", + "token_type": "type" +}` + +var TestToken = Token{ + AccessToken: "accessToken", + RefreshToken: "refreshToken", + ExpiresIn: "1000", + ExpiresOn: "2000", + NotBefore: "3000", + Resource: "resource", + Type: "type", +} + +func writeTestTokenFile(t *testing.T, suffix string, contents string) *os.File { + f, err := ioutil.TempFile(os.TempDir(), suffix) + if err != nil { + t.Fatalf("azure: unexpected error when creating temp file: %v", err) + } + defer f.Close() + + _, err = f.Write([]byte(contents)) + if err != nil { + t.Fatalf("azure: unexpected error when writing temp test file: %v", err) + } + + return f +} + +func TestLoadToken(t *testing.T) { + f := writeTestTokenFile(t, "testloadtoken", MockTokenJSON) + defer os.Remove(f.Name()) + + expectedToken := TestToken + actualToken, err := LoadToken(f.Name()) + if err != nil { + t.Fatalf("azure: unexpected error loading token from file: %v", err) + } + + if *actualToken != expectedToken { + t.Fatalf("azure: failed to decode properly expected(%v) actual(%v)", expectedToken, *actualToken) + } + + // test that LoadToken closes the file properly + err = SaveToken(f.Name(), 0600, *actualToken) + if err != nil { + t.Fatalf("azure: could not save token after LoadToken: %v", err) + } +} + +func TestLoadTokenFailsBadPath(t *testing.T) { + _, err := LoadToken("/tmp/this_file_should_never_exist_really") + expectedSubstring := "failed to open file" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error()) + } +} + +func TestLoadTokenFailsBadJson(t *testing.T) { + gibberishJSON := strings.Replace(MockTokenJSON, "expires_on", ";:\"gibberish", -1) + f := writeTestTokenFile(t, "testloadtokenfailsbadjson", gibberishJSON) + defer os.Remove(f.Name()) + + _, err := LoadToken(f.Name()) + expectedSubstring := "failed to decode contents of file" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error()) + } +} + +func token() *Token { + var token Token + json.Unmarshal([]byte(MockTokenJSON), &token) + return &token +} + +func TestSaveToken(t *testing.T) { + f, err := ioutil.TempFile("", "testloadtoken") + if err != nil { + t.Fatalf("azure: unexpected error when creating temp file: %v", err) + } + defer os.Remove(f.Name()) + f.Close() + + mode := os.ModePerm & 0642 + err = SaveToken(f.Name(), mode, *token()) + if err != nil { + t.Fatalf("azure: unexpected error saving token to file: %v", err) + } + fi, err := os.Stat(f.Name()) // open a new stat as held ones are not fresh + if err != nil { + t.Fatalf("azure: stat failed: %v", err) + } + if runtime.GOOS != "windows" { // permissions don't work on Windows + if perm := fi.Mode().Perm(); perm != mode { + t.Fatalf("azure: wrong file perm. got:%s; expected:%s file :%s", perm, mode, f.Name()) + } + } + + var actualToken Token + var expectedToken Token + + json.Unmarshal([]byte(MockTokenJSON), expectedToken) + + contents, err := ioutil.ReadFile(f.Name()) + if err != nil { + t.Fatal("!!") + } + json.Unmarshal(contents, actualToken) + + if !reflect.DeepEqual(actualToken, expectedToken) { + t.Fatal("azure: token was not serialized correctly") + } +} + +func TestSaveTokenFailsNoPermission(t *testing.T) { + pathWhereWeShouldntHavePermission := "/usr/thiswontwork/atall" + if runtime.GOOS == "windows" { + pathWhereWeShouldntHavePermission = "c:\\windows\\system32\\mytokendir\\mytoken" + } + err := SaveToken(pathWhereWeShouldntHavePermission, 0644, *token()) + expectedSubstring := "failed to create directory" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err) + } +} + +func TestSaveTokenFailsCantCreate(t *testing.T) { + err := SaveToken("/thiswontwork", 0644, *token()) + expectedSubstring := "failed to create the temp file to write the token" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/token.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/token.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/token.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/token.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,363 @@ +package azure + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + tokenBaseDate = "1970-01-01T00:00:00Z" + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" +) + +var expirationBase time.Time + +func init() { + expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := strconv.Atoi(t.ExpiresOn) + if err != nil { + s = -3600 + } + return expirationBase.Add(time.Duration(s) * time.Second).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the AccessToken of the Token. +func (t *Token) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r) + }) + } +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token.") +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + token.Claims = jwt.MapClaims{ + "aud": spt.oauthConfig.TokenEndpoint, + "iss": spt.clientID, + "sub": spt.clientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + Token + + secret ServicePrincipalSecret + oauthConfig OAuthConfig + clientID string + resource string + autoRefresh bool + refreshWithin time.Duration + sender autorest.Sender + + refreshCallbacks []TokenRefreshCallback +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt := &ServicePrincipalToken{ + oauthConfig: oauthConfig, + secret: secret, + clientID: id, + resource: resource, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin). +func (spt *ServicePrincipalToken) EnsureFresh() error { + if spt.WillExpireIn(spt.refreshWithin) { + return spt.Refresh() + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.Token) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "InvokeRefreshCallbacks", nil, "A TokenRefreshCallback handler returned an error") + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.refreshInternal(spt.resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.refreshInternal(resource) +} + +func (spt *ServicePrincipalToken) refreshInternal(resource string) error { + v := url.Values{} + v.Set("client_id", spt.clientID) + v.Set("resource", resource) + + if spt.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.RefreshToken) + } else { + v.Set("grant_type", OAuthGrantTypeClientCredentials) + err := spt.secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + req, _ := autorest.Prepare(&http.Request{}, + autorest.AsPost(), + autorest.AsFormURLEncoded(), + autorest.WithBaseURL(spt.oauthConfig.TokenEndpoint.String()), + autorest.WithFormData(v)) + + resp, err := autorest.SendWithSender(spt.sender, req) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "Refresh", resp, "Failure sending request for Service Principal %s", + spt.clientID) + } + + var newToken Token + err = autorest.Respond(resp, + autorest.WithErrorUnlessOK(), + autorest.ByUnmarshallingJSON(&newToken), + autorest.ByClosing()) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "Refresh", resp, "Failure handling response to Service Principal %s request", + spt.clientID) + } + + spt.Token = newToken + + err = spt.InvokeRefreshCallbacks(newToken) + if err != nil { + // its already wrapped inside InvokeRefreshCallbacks + return err + } + + return nil +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.autoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.refreshWithin = d + return +} + +// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) { + spt.sender = s +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken. +// +// By default, the token will automatically refresh if nearly expired (as determined by the +// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing +// tokens. +func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + if spt.autoRefresh { + err := spt.EnsureFresh() + if err != nil { + return r, autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "WithAuthorization", nil, "Failed to refresh Service Principal Token for request to %s", + r.URL) + } + } + return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r) + }) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/token_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/token_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/azure/token_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/azure/token_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,506 @@ +package azure + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "io/ioutil" + "math/big" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + defaultFormData = "client_id=id&client_secret=secret&grant_type=client_credentials&resource=resource" + defaultManualFormData = "client_id=id&grant_type=refresh_token&refresh_token=refreshtoken&resource=resource" +) + +func TestTokenExpires(t *testing.T) { + tt := time.Now().Add(5 * time.Second) + tk := newTokenExpiresAt(tt) + + if tk.Expires().Equal(tt) { + t.Fatalf("azure: Token#Expires miscalculated expiration time -- received %v, expected %v", tk.Expires(), tt) + } +} + +func TestTokenIsExpired(t *testing.T) { + tk := newTokenExpiresAt(time.Now().Add(-5 * time.Second)) + + if !tk.IsExpired() { + t.Fatalf("azure: Token#IsExpired failed to mark a stale token as expired -- now %v, token expires at %v", + time.Now().UTC(), tk.Expires()) + } +} + +func TestTokenIsExpiredUninitialized(t *testing.T) { + tk := &Token{} + + if !tk.IsExpired() { + t.Fatalf("azure: An uninitialized Token failed to mark itself as expired (expiration time %v)", tk.Expires()) + } +} + +func TestTokenIsNoExpired(t *testing.T) { + tk := newTokenExpiresAt(time.Now().Add(1000 * time.Second)) + + if tk.IsExpired() { + t.Fatalf("azure: Token marked a fresh token as expired -- now %v, token expires at %v", time.Now().UTC(), tk.Expires()) + } +} + +func TestTokenWillExpireIn(t *testing.T) { + d := 5 * time.Second + tk := newTokenExpiresIn(d) + + if !tk.WillExpireIn(d) { + t.Fatal("azure: Token#WillExpireIn mismeasured expiration time") + } +} + +func TestTokenWithAuthorization(t *testing.T) { + tk := newToken() + + req, err := autorest.Prepare(&http.Request{}, tk.WithAuthorization()) + if err != nil { + t.Fatalf("azure: Token#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", tk.AccessToken) { + t.Fatal("azure: Token#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenSetAutoRefresh(t *testing.T) { + spt := newServicePrincipalToken() + + if !spt.autoRefresh { + t.Fatal("azure: ServicePrincipalToken did not default to automatic token refreshing") + } + + spt.SetAutoRefresh(false) + if spt.autoRefresh { + t.Fatal("azure: ServicePrincipalToken#SetAutoRefresh did not disable automatic token refreshing") + } +} + +func TestServicePrincipalTokenSetRefreshWithin(t *testing.T) { + spt := newServicePrincipalToken() + + if spt.refreshWithin != defaultRefresh { + t.Fatal("azure: ServicePrincipalToken did not correctly set the default refresh interval") + } + + spt.SetRefreshWithin(2 * defaultRefresh) + if spt.refreshWithin != 2*defaultRefresh { + t.Fatal("azure: ServicePrincipalToken#SetRefreshWithin did not set the refresh interval") + } +} + +func TestServicePrincipalTokenSetSender(t *testing.T) { + spt := newServicePrincipalToken() + + var s autorest.Sender + s = mocks.NewSender() + spt.SetSender(s) + if !reflect.DeepEqual(s, spt.sender) { + t.Fatal("azure: ServicePrincipalToken#SetSender did not set the sender") + } +} + +func TestServicePrincipalTokenRefreshUsesPOST(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody("") + resp := mocks.NewResponseWithBodyAndStatus(body, 200, "OK") + + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Method != "POST" { + t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method) + } + return resp, nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() + + if body.IsOpen() { + t.Fatalf("the response was not closed!") + } +} + +func TestServicePrincipalTokenRefreshSetsMimeType(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Header.Get(http.CanonicalHeaderKey("Content-Type")) != "application/x-www-form-urlencoded" { + t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set Content-Type -- expected %v, received %v", + "application/x-form-urlencoded", + r.Header.Get(http.CanonicalHeaderKey("Content-Type"))) + } + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() +} + +func TestServicePrincipalTokenRefreshSetsURL(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.URL.String() != TestOAuthConfig.TokenEndpoint.String() { + t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set the URL -- expected %v, received %v", + TestOAuthConfig.TokenEndpoint, r.URL) + } + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() +} + +func testServicePrincipalTokenRefreshSetsBody(t *testing.T, spt *ServicePrincipalToken, f func(*testing.T, []byte)) { + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("azure: Failed to read body of Service Principal token request (%v)", err) + } + f(t, b) + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() +} + +func TestServicePrincipalTokenManualRefreshSetsBody(t *testing.T) { + sptManual := newServicePrincipalTokenManual() + testServicePrincipalTokenRefreshSetsBody(t, sptManual, func(t *testing.T, b []byte) { + if string(b) != defaultManualFormData { + t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", + defaultManualFormData, string(b)) + } + }) +} + +func TestServicePrincipalTokenCertficateRefreshSetsBody(t *testing.T) { + sptCert := newServicePrincipalTokenCertificate(t) + testServicePrincipalTokenRefreshSetsBody(t, sptCert, func(t *testing.T, b []byte) { + body := string(b) + + values, _ := url.ParseQuery(body) + if values["client_assertion_type"][0] != "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" || + values["client_id"][0] != "id" || + values["grant_type"][0] != "client_credentials" || + values["resource"][0] != "resource" { + t.Fatalf("azure: ServicePrincipalTokenCertificate#Refresh did not correctly set the HTTP Request Body.") + } + }) +} + +func TestServicePrincipalTokenSecretRefreshSetsBody(t *testing.T) { + spt := newServicePrincipalToken() + testServicePrincipalTokenRefreshSetsBody(t, spt, func(t *testing.T, b []byte) { + if string(b) != defaultFormData { + t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", + defaultFormData, string(b)) + } + + }) +} + +func TestServicePrincipalTokenRefreshClosesRequestBody(t *testing.T) { + spt := newServicePrincipalToken() + + resp := mocks.NewResponse() + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() + + if resp.Body.(*mocks.Body).IsOpen() { + t.Fatal("azure: ServicePrincipalToken#Refresh failed to close the HTTP Response Body") + } +} + +func TestServicePrincipalTokenRefreshPropagatesErrors(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + c.SetError(fmt.Errorf("Faux Error")) + spt.SetSender(c) + + err := spt.Refresh() + if err == nil { + t.Fatal("azure: Failed to propagate the request error") + } +} + +func TestServicePrincipalTokenRefreshReturnsErrorIfNotOk(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + c.AppendResponse(mocks.NewResponseWithStatus("401 NotAuthorized", 401)) + spt.SetSender(c) + + err := spt.Refresh() + if err == nil { + t.Fatal("azure: Failed to return an when receiving a status code other than HTTP 200") + } +} + +func TestServicePrincipalTokenRefreshUnmarshals(t *testing.T) { + spt := newServicePrincipalToken() + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(expirationBase).Seconds())) + j := newTokenJSON(expiresOn, "resource") + resp := mocks.NewResponseWithContent(j) + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + + err := spt.Refresh() + if err != nil { + t.Fatalf("azure: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } else if spt.AccessToken != "accessToken" || + spt.ExpiresIn != "3600" || + spt.ExpiresOn != expiresOn || + spt.NotBefore != expiresOn || + spt.Resource != "resource" || + spt.Type != "Bearer" { + t.Fatalf("azure: ServicePrincipalToken#Refresh failed correctly unmarshal the JSON -- expected %v, received %v", + j, *spt) + } +} + +func TestServicePrincipalTokenEnsureFreshRefreshes(t *testing.T) { + spt := newServicePrincipalToken() + expireToken(&spt.Token) + + f := false + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.EnsureFresh() + if !f { + t.Fatal("azure: ServicePrincipalToken#EnsureFresh failed to call Refresh for stale token") + } +} + +func TestServicePrincipalTokenEnsureFreshSkipsIfFresh(t *testing.T) { + spt := newServicePrincipalToken() + setTokenToExpireIn(&spt.Token, 1000*time.Second) + + f := false + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.EnsureFresh() + if f { + t.Fatal("azure: ServicePrincipalToken#EnsureFresh invoked Refresh for fresh token") + } +} + +func TestServicePrincipalTokenWithAuthorization(t *testing.T) { + spt := newServicePrincipalToken() + setTokenToExpireIn(&spt.Token, 1000*time.Second) + + req, err := autorest.Prepare(&http.Request{}, spt.WithAuthorization()) + if err != nil { + t.Fatalf("azure: ServicePrincipalToken#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) { + t.Fatal("azure: ServicePrincipalToken#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenWithAuthorizationReturnsErrorIfCannotRefresh(t *testing.T) { + spt := newServicePrincipalToken() + + _, err := autorest.Prepare(&http.Request{}, spt.WithAuthorization()) + if err == nil { + t.Fatal("azure: ServicePrincipalToken#WithAuthorization failed to return an error when refresh fails") + } +} + +func TestRefreshCallback(t *testing.T) { + callbackTriggered := false + spt := newServicePrincipalToken(func(Token) error { + callbackTriggered = true + return nil + }) + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(expirationBase).Seconds())) + + sender := mocks.NewSender() + j := newTokenJSON(expiresOn, "resource") + sender.AppendResponse(mocks.NewResponseWithContent(j)) + spt.SetSender(sender) + spt.Refresh() + + if !callbackTriggered { + t.Fatalf("azure: RefreshCallback failed to trigger call callback") + } +} + +func TestRefreshCallbackErrorPropagates(t *testing.T) { + errorText := "this is an error text" + spt := newServicePrincipalToken(func(Token) error { + return fmt.Errorf(errorText) + }) + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(expirationBase).Seconds())) + + sender := mocks.NewSender() + j := newTokenJSON(expiresOn, "resource") + sender.AppendResponse(mocks.NewResponseWithContent(j)) + spt.SetSender(sender) + err := spt.Refresh() + + if err == nil || !strings.Contains(err.Error(), errorText) { + t.Fatalf("azure: RefreshCallback failed to propagate error") + } +} + +// This demonstrates the danger of manual token without a refresh token +func TestServicePrincipalTokenManualRefreshFailsWithoutRefresh(t *testing.T) { + spt := newServicePrincipalTokenManual() + spt.RefreshToken = "" + err := spt.Refresh() + if err == nil { + t.Fatalf("azure: ServicePrincipalToken#Refresh should have failed with a ManualTokenSecret without a refresh token") + } +} + +func newToken() *Token { + return &Token{ + AccessToken: "ASECRETVALUE", + Resource: "https://azure.microsoft.com/", + Type: "Bearer", + } +} + +func newTokenJSON(expiresOn string, resource string) string { + return fmt.Sprintf(`{ + "access_token" : "accessToken", + "expires_in" : "3600", + "expires_on" : "%s", + "not_before" : "%s", + "resource" : "%s", + "token_type" : "Bearer" + }`, + expiresOn, expiresOn, resource) +} + +func newTokenExpiresIn(expireIn time.Duration) *Token { + return setTokenToExpireIn(newToken(), expireIn) +} + +func newTokenExpiresAt(expireAt time.Time) *Token { + return setTokenToExpireAt(newToken(), expireAt) +} + +func expireToken(t *Token) *Token { + return setTokenToExpireIn(t, 0) +} + +func setTokenToExpireAt(t *Token, expireAt time.Time) *Token { + t.ExpiresIn = "3600" + t.ExpiresOn = strconv.Itoa(int(expireAt.Sub(expirationBase).Seconds())) + t.NotBefore = t.ExpiresOn + return t +} + +func setTokenToExpireIn(t *Token, expireIn time.Duration) *Token { + return setTokenToExpireAt(t, time.Now().Add(expireIn)) +} + +func newServicePrincipalToken(callbacks ...TokenRefreshCallback) *ServicePrincipalToken { + spt, _ := NewServicePrincipalToken(TestOAuthConfig, "id", "secret", "resource", callbacks...) + return spt +} + +func newServicePrincipalTokenManual() *ServicePrincipalToken { + token := newToken() + token.RefreshToken = "refreshtoken" + spt, _ := NewServicePrincipalTokenFromManualToken(TestOAuthConfig, "id", "resource", *token) + return spt +} + +func newServicePrincipalTokenCertificate(t *testing.T) *ServicePrincipalToken { + template := x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{CommonName: "test"}, + BasicConstraintsValid: true, + } + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + certificateBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) + if err != nil { + t.Fatal(err) + } + certificate, err := x509.ParseCertificate(certificateBytes) + if err != nil { + t.Fatal(err) + } + + spt, _ := NewServicePrincipalTokenFromCertificate(TestOAuthConfig, "id", certificate, privateKey, "resource") + return spt +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/client.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/client.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/client.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,211 @@ +package autorest + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/cookiejar" + "time" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is a resonable delay for retry. + defaultRetryInterval = 30 * time.Second +) + +var statusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 +} + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + UserAgent: ua, + } +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + r, err := Prepare(r, + c.WithInspection(), + c.WithAuthorization()) + if err != nil { + return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + resp, err := SendWithSender(c.sender(), r, + DoRetryForStatusCodes(c.RetryAttempts, defaultRetryInterval, statusCodesForRetry...)) + Respond(resp, + c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender() Sender { + if c.Sender == nil { + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j} + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/client_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/client_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/client_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/client_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,313 @@ +package autorest + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "math/rand" + "net/http" + "reflect" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func TestLoggingInspectorWithInspection(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + Prepare(mocks.NewRequestWithContent("Content"), + c.WithInspection()) + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log") + } +} + +func TestLoggingInspectorWithInspectionEmitsErrors(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewRequestWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + if _, err := Prepare(r, + c.WithInspection()); err != nil { + t.Error(err) + } + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log") + } +} + +func TestLoggingInspectorWithInspectionRestoresBody(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewRequestWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + Prepare(r, + c.WithInspection()) + + s, _ := ioutil.ReadAll(r.Body) + if len(s) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not restore the Request body") + } +} + +func TestLoggingInspectorByInspecting(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + Respond(mocks.NewResponseWithContent("Content"), + c.ByInspecting()) + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log") + } +} + +func TestLoggingInspectorByInspectingEmitsErrors(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewResponseWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + if err := Respond(r, + c.ByInspecting()); err != nil { + t.Fatal(err) + } + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log") + } +} + +func TestLoggingInspectorByInspectingRestoresBody(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewResponseWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + Respond(r, + c.ByInspecting()) + + s, _ := ioutil.ReadAll(r.Body) + if len(s) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspecting did not restore the Response body") + } +} + +func TestNewClientWithUserAgent(t *testing.T) { + ua := "UserAgent" + c := NewClientWithUserAgent(ua) + + if c.UserAgent != ua { + t.Fatalf("autorest: NewClientWithUserAgent failed to set the UserAgent -- expected %s, received %s", + ua, c.UserAgent) + } +} + +func TestClientSenderReturnsHttpClientByDefault(t *testing.T) { + c := Client{} + + if fmt.Sprintf("%T", c.sender()) != "*http.Client" { + t.Fatal("autorest: Client#sender failed to return http.Client by default") + } +} + +func TestClientSenderReturnsSetSender(t *testing.T) { + c := Client{} + + s := mocks.NewSender() + c.Sender = s + + if c.sender() != s { + t.Fatal("autorest: Client#sender failed to return set Sender") + } +} + +func TestClientDoInvokesSender(t *testing.T) { + c := Client{} + + s := mocks.NewSender() + c.Sender = s + + c.Do(&http.Request{}) + if s.Attempts() != 1 { + t.Fatal("autorest: Client#Do failed to invoke the Sender") + } +} + +func TestClientDoSetsUserAgent(t *testing.T) { + ua := "UserAgent" + c := Client{UserAgent: ua} + r := mocks.NewRequest() + + c.Do(r) + + if r.UserAgent() != ua { + t.Fatalf("autorest: Client#Do failed to correctly set User-Agent header: %s=%s", + http.CanonicalHeaderKey(headerUserAgent), r.UserAgent()) + } +} + +func TestClientDoSetsAuthorization(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + c := Client{Authorizer: mockAuthorizer{}, Sender: s} + + c.Do(r) + if len(r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) <= 0 { + t.Fatalf("autorest: Client#Send failed to set Authorization header -- %s=%s", + http.CanonicalHeaderKey(headerAuthorization), + r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) + } +} + +func TestClientDoInvokesRequestInspector(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + i := &mockInspector{} + c := Client{RequestInspector: i.WithInspection(), Sender: s} + + c.Do(r) + if !i.wasInvoked { + t.Fatal("autorest: Client#Send failed to invoke the RequestInspector") + } +} + +func TestClientDoInvokesResponseInspector(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + i := &mockInspector{} + c := Client{ResponseInspector: i.ByInspecting(), Sender: s} + + c.Do(r) + if !i.wasInvoked { + t.Fatal("autorest: Client#Send failed to invoke the ResponseInspector") + } +} + +func TestClientDoReturnsErrorIfPrepareFails(t *testing.T) { + c := Client{} + s := mocks.NewSender() + c.Authorizer = mockFailingAuthorizer{} + c.Sender = s + + _, err := c.Do(&http.Request{}) + if err == nil { + t.Fatalf("autorest: Client#Do failed to return an error when Prepare failed") + } +} + +func TestClientDoDoesNotSendIfPrepareFails(t *testing.T) { + c := Client{} + s := mocks.NewSender() + c.Authorizer = mockFailingAuthorizer{} + c.Sender = s + + c.Do(&http.Request{}) + if s.Attempts() > 0 { + t.Fatal("autorest: Client#Do failed to invoke the Sender") + } +} + +func TestClientAuthorizerReturnsNullAuthorizerByDefault(t *testing.T) { + c := Client{} + + if fmt.Sprintf("%T", c.authorizer()) != "autorest.NullAuthorizer" { + t.Fatal("autorest: Client#authorizer failed to return the NullAuthorizer by default") + } +} + +func TestClientAuthorizerReturnsSetAuthorizer(t *testing.T) { + c := Client{} + c.Authorizer = mockAuthorizer{} + + if fmt.Sprintf("%T", c.authorizer()) != "autorest.mockAuthorizer" { + t.Fatal("autorest: Client#authorizer failed to return the set Authorizer") + } +} + +func TestClientWithAuthorizer(t *testing.T) { + c := Client{} + c.Authorizer = mockAuthorizer{} + + req, _ := Prepare(&http.Request{}, + c.WithAuthorization()) + + if req.Header.Get(headerAuthorization) == "" { + t.Fatal("autorest: Client#WithAuthorizer failed to return the WithAuthorizer from the active Authorizer") + } +} + +func TestClientWithInspection(t *testing.T) { + c := Client{} + r := &mockInspector{} + c.RequestInspector = r.WithInspection() + + Prepare(&http.Request{}, + c.WithInspection()) + + if !r.wasInvoked { + t.Fatal("autorest: Client#WithInspection failed to invoke RequestInspector") + } +} + +func TestClientWithInspectionSetsDefault(t *testing.T) { + c := Client{} + + r1 := &http.Request{} + r2, _ := Prepare(r1, + c.WithInspection()) + + if !reflect.DeepEqual(r1, r2) { + t.Fatal("autorest: Client#WithInspection failed to provide a default RequestInspector") + } +} + +func TestClientByInspecting(t *testing.T) { + c := Client{} + r := &mockInspector{} + c.ResponseInspector = r.ByInspecting() + + Respond(&http.Response{}, + c.ByInspecting()) + + if !r.wasInvoked { + t.Fatal("autorest: Client#ByInspecting failed to invoke ResponseInspector") + } +} + +func TestClientByInspectingSetsDefault(t *testing.T) { + c := Client{} + + r := &http.Response{} + Respond(r, + c.ByInspecting()) + + if !reflect.DeepEqual(r, &http.Response{}) { + t.Fatal("autorest: Client#ByInspecting failed to provide a default ResponseInspector") + } +} + +func randomString(n int) string { + const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + r := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + s := make([]byte, n) + for i := range s { + s[i] = chars[r.Intn(len(chars))] + } + return string(s) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/date.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/date.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/date.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/date.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,82 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/date_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/date_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/date_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/date_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,223 @@ +package date + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleParseDate() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func ExampleDate() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + + t, err := time.Parse(time.RFC3339, "2001-02-04T00:00:00Z") + if err != nil { + fmt.Println(err) + } + + // Date acts as time.Time when the receiver + if d.Before(t) { + fmt.Printf("Before ") + } else { + fmt.Printf("After ") + } + + // Convert Date when needing a time.Time + if t.After(d.ToTime()) { + fmt.Printf("After") + } else { + fmt.Printf("Before") + } + // Output: Before After +} + +func ExampleDate_MarshalBinary() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03 +} + +func ExampleDate_UnmarshalBinary() { + d := Date{} + t := "2001-02-03" + + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func ExampleDate_MarshalJSON() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "2001-02-03" +} + +func ExampleDate_UnmarshalJSON() { + var d struct { + Date Date `json:"date"` + } + j := `{"date" : "2001-02-03"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Date) + // Output: 2001-02-03 +} + +func ExampleDate_MarshalText() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03 +} + +func ExampleDate_UnmarshalText() { + d := Date{} + t := "2001-02-03" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func TestDateString(t *testing.T) { + d, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: String failed (%v)", err) + } + if d.String() != "2001-02-03" { + t.Fatalf("date: String failed (%v)", d.String()) + } +} + +func TestDateBinaryRoundTrip(t *testing.T) { + d1, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: MarshalBinary failed (%v)", err) + } + + d2 := Date{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestDateJSONRoundTrip(t *testing.T) { + type s struct { + Date Date `json:"date"` + } + var err error + d1 := s{} + d1.Date, err = ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestDateTextRoundTrip(t *testing.T) { + d1, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: MarshalText failed (%v)", err) + } + d2 := Date{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestDateToTime(t *testing.T) { + var d Date + d, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + var _ time.Time = d.ToTime() +} + +func TestDateUnmarshalJSONReturnsError(t *testing.T) { + var d struct { + Date Date `json:"date"` + } + j := `{"date" : "February 3, 2001"}` + + if err := json.Unmarshal([]byte(j), &d); err == nil { + t.Fatal("date: Date failed to return error for malformed JSON date") + } +} + +func TestDateUnmarshalTextReturnsError(t *testing.T) { + d := Date{} + txt := "February 3, 2001" + + if err := d.UnmarshalText([]byte(txt)); err == nil { + t.Fatal("date: Date failed to return error for malformed Text date") + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/time.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/time.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/time.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/time.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,70 @@ +package date + +import ( + "time" +) + +const ( + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc3339JSON, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc3339, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/timerfc1123.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/timerfc1123.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/timerfc1123.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/timerfc1123.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,86 @@ +package date + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,212 @@ +package date + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleTimeRFC1123() { + d, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2006-01-02 15:04:05 +0000 MST +} + +func ExampleTimeRFC1123_MarshalBinary() { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + b, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(b)) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_UnmarshalBinary() { + d := TimeRFC1123{} + t := "Mon, 02 Jan 2006 15:04:05 MST" + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_MarshalJSON() { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "Mon, 02 Jan 2006 15:04:05 MST" +} + +func TestTimeRFC1123MarshalJSONInvalid(t *testing.T) { + ti := time.Date(20000, 01, 01, 00, 00, 00, 00, time.UTC) + d := TimeRFC1123{ti} + if _, err := json.Marshal(d); err == nil { + t.Fatalf("date: TimeRFC1123#Marshal failed for invalid date") + } +} + +func ExampleTimeRFC1123_UnmarshalJSON() { + var d struct { + Time TimeRFC1123 `json:"datetime"` + } + j := `{"datetime" : "Mon, 02 Jan 2006 15:04:05 MST"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Time) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_MarshalText() { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: Sat, 03 Feb 2001 04:05:06 UTC +} + +func ExampleTimeRFC1123_UnmarshalText() { + d := TimeRFC1123{} + t := "Sat, 03 Feb 2001 04:05:06 UTC" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: Sat, 03 Feb 2001 04:05:06 UTC +} + +func TestUnmarshalJSONforInvalidDateRfc1123(t *testing.T) { + dt := `"Mon, 02 Jan 2000000 15:05 MST"` + d := TimeRFC1123{} + if err := d.UnmarshalJSON([]byte(dt)); err == nil { + t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date") + } +} + +func TestUnmarshalTextforInvalidDateRfc1123(t *testing.T) { + dt := "Mon, 02 Jan 2000000 15:05 MST" + d := TimeRFC1123{} + if err := d.UnmarshalText([]byte(dt)); err == nil { + t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date") + } +} + +func TestTimeStringRfc1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + if d.String() != "Mon, 02 Jan 2006 15:04:05 MST" { + t.Fatalf("date: TimeRFC1123#String failed (%v)", d.String()) + } +} + +func TestTimeStringReturnsEmptyStringForErrorRfc1123(t *testing.T) { + d := TimeRFC1123{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} + if d.String() != "" { + t.Fatalf("date: TimeRFC1123#String failed empty string for an error") + } +} + +func TestTimeBinaryRoundTripRfc1123(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := TimeRFC1123{ti} + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalBinary failed (%v)", err) + } + + d2 := TimeRFC1123{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestTimeJSONRoundTripRfc1123(t *testing.T) { + type s struct { + Time TimeRFC1123 `json:"datetime"` + } + var err error + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := s{Time: TimeRFC1123{ti}} + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestTimeTextRoundTripRfc1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := TimeRFC1123{Time: ti} + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalText failed (%v)", err) + } + + d2 := TimeRFC1123{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestTimeToTimeRFC1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + d := TimeRFC1123{ti} + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + var _ time.Time = d.ToTime() +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/time_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/time_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/time_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/time_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,203 @@ +package date + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleParseTime() { + d, _ := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + fmt.Println(d) + // Output: 2001-02-03 04:05:06 +0000 UTC +} + +func ExampleTime_MarshalBinary() { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := Time{ti} + t, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_UnmarshalBinary() { + d := Time{} + t := "2001-02-03T04:05:06Z" + + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_MarshalJSON() { + d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "2001-02-03T04:05:06Z" +} + +func ExampleTime_UnmarshalJSON() { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "2001-02-03T04:05:06Z"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Time) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_MarshalText() { + d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_UnmarshalText() { + d := Time{} + t := "2001-02-03T04:05:06Z" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03T04:05:06Z +} + +func TestUnmarshalTextforInvalidDate(t *testing.T) { + d := Time{} + dt := "2001-02-03T04:05:06AAA" + + if err := d.UnmarshalText([]byte(dt)); err == nil { + t.Fatalf("date: Time#Unmarshal was expecting error for invalid date") + } +} + +func TestUnmarshalJSONforInvalidDate(t *testing.T) { + d := Time{} + dt := `"2001-02-03T04:05:06AAA"` + + if err := d.UnmarshalJSON([]byte(dt)); err == nil { + t.Fatalf("date: Time#Unmarshal was expecting error for invalid date") + } +} + +func TestTimeString(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := Time{ti} + if d.String() != "2001-02-03T04:05:06Z" { + t.Fatalf("date: Time#String failed (%v)", d.String()) + } +} + +func TestTimeStringReturnsEmptyStringForError(t *testing.T) { + d := Time{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} + if d.String() != "" { + t.Fatalf("date: Time#String failed empty string for an error") + } +} + +func TestTimeBinaryRoundTrip(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + d1 := Time{ti} + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: Time#MarshalBinary failed (%v)", err) + } + + d2 := Time{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: Time#UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date:Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestTimeJSONRoundTrip(t *testing.T) { + type s struct { + Time Time `json:"datetime"` + } + + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + + d1 := s{Time: Time{ti}} + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: Time#MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: Time#UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestTimeTextRoundTrip(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + d1 := Time{Time: ti} + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: Time#MarshalText failed (%v)", err) + } + + d2 := Time{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestTimeToTime(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + d := Time{ti} + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + var _ time.Time = d.ToTime() +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/utility.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/utility.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/date/utility.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/date/utility.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,11 @@ +package date + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/error.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/error.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/error.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/error.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,77 @@ +package autorest + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/error_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/error_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/error_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/error_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,188 @@ +package autorest + +import ( + "fmt" + "net/http" + "reflect" + "regexp" + "testing" +) + +func TestNewErrorWithError_AssignsPackageType(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.PackageType != "packageType" { + t.Fatalf("autorest: Error failed to set package type -- expected %v, received %v", "packageType", e.PackageType) + } +} + +func TestNewErrorWithError_AssignsMethod(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.Method != "method" { + t.Fatalf("autorest: Error failed to set method -- expected %v, received %v", "method", e.Method) + } +} + +func TestNewErrorWithError_AssignsMessage(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.Message != "message" { + t.Fatalf("autorest: Error failed to set message -- expected %v, received %v", "message", e.Message) + } +} + +func TestNewErrorWithError_AssignsUndefinedStatusCodeIfRespNil(t *testing.T) { + e := NewErrorWithError(nil, "packageType", "method", nil, "message") + if e.StatusCode != UndefinedStatusCode { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", UndefinedStatusCode, e.StatusCode) + } +} + +func TestNewErrorWithError_AssignsStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if e.StatusCode != http.StatusBadRequest { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", http.StatusBadRequest, e.StatusCode) + } +} + +func TestNewErrorWithError_AcceptsArgs(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message %s", "arg") + + if matched, _ := regexp.MatchString(`.*arg.*`, e.Message); !matched { + t.Fatalf("autorest: Error failed to apply message arguments -- expected %v, received %v", + `.*arg.*`, e.Message) + } +} + +func TestNewErrorWithError_AssignsError(t *testing.T) { + err := fmt.Errorf("original") + e := NewErrorWithError(err, "packageType", "method", nil, "message") + + if e.Original != err { + t.Fatalf("autorest: Error failed to set error -- expected %v, received %v", err, e.Original) + } +} + +func TestNewErrorWithResponse_ContainsStatusCode(t *testing.T) { + e := NewErrorWithResponse("packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if e.StatusCode != http.StatusBadRequest { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", http.StatusBadRequest, e.StatusCode) + } +} + +func TestNewErrorWithResponse_nilResponse_ReportsUndefinedStatusCode(t *testing.T) { + e := NewErrorWithResponse("packageType", "method", nil, "message") + + if e.StatusCode != UndefinedStatusCode { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", UndefinedStatusCode, e.StatusCode) + } +} + +func TestNewErrorWithResponse_Forwards(t *testing.T) { + e1 := NewError("packageType", "method", "message %s", "arg") + e2 := NewErrorWithResponse("packageType", "method", nil, "message %s", "arg") + + if !reflect.DeepEqual(e1, e2) { + t.Fatal("autorest: NewError did not return an error equivelent to NewErrorWithError") + } +} + +func TestNewErrorWithError_Forwards(t *testing.T) { + e1 := NewError("packageType", "method", "message %s", "arg") + e2 := NewErrorWithError(nil, "packageType", "method", nil, "message %s", "arg") + + if !reflect.DeepEqual(e1, e2) { + t.Fatal("autorest: NewError did not return an error equivelent to NewErrorWithError") + } +} + +func TestNewErrorWithError_DoesNotWrapADetailedError(t *testing.T) { + e1 := NewError("packageType1", "method1", "message1 %s", "arg1") + e2 := NewErrorWithError(e1, "packageType2", "method2", nil, "message2 %s", "arg2") + + if !reflect.DeepEqual(e1, e2) { + t.Fatalf("autorest: NewErrorWithError incorrectly wrapped a DetailedError -- expected %v, received %v", e1, e2) + } +} + +func TestNewErrorWithError_WrapsAnError(t *testing.T) { + e1 := fmt.Errorf("Inner Error") + var e2 interface{} = NewErrorWithError(e1, "packageType", "method", nil, "message") + + if _, ok := e2.(DetailedError); !ok { + t.Fatalf("autorest: NewErrorWithError failed to wrap a standard error -- received %T", e2) + } +} + +func TestDetailedError(t *testing.T) { + err := fmt.Errorf("original") + e := NewErrorWithError(err, "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#Error failed to return original error message -- expected %v, received %v", + `.*original.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsPackageType(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*packageType.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include PackageType -- expected %v, received %v", + `.*packageType.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsMethod(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*method.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Method -- expected %v, received %v", + `.*method.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsMessage(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*message.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Message -- expected %v, received %v", + `.*message.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if matched, _ := regexp.MatchString(`.*400.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Status Code -- expected %v, received %v", + `.*400.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsOriginal(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Original error -- expected %v, received %v", + `.*original.*`, e.Error()) + } +} + +func TestDetailedErrorSkipsOriginal(t *testing.T) { + e := NewError("packageType", "method", "message") + + if matched, _ := regexp.MatchString(`.*Original.*`, e.Error()); matched { + t.Fatalf("autorest: Error#String included missing Original error -- unexpected %v, received %v", + `.*Original.*`, e.Error()) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,137 @@ +package mocks + +import ( + "fmt" + "net/http" + "time" +) + +const ( + // TestAuthorizationHeader is a faux HTTP Authorization header value + TestAuthorizationHeader = "BEARER SECRETTOKEN" + + // TestBadURL is a malformed URL + TestBadURL = " " + + // TestDelay is the Retry-After delay used in tests. + TestDelay = 0 * time.Second + + // TestHeader is the header used in tests. + TestHeader = "x-test-header" + + // TestURL is the URL used in tests. + TestURL = "https://microsoft.com/a/b/c/" + + // TestAzureAsyncURL is a URL used in Azure asynchronous tests + TestAzureAsyncURL = "https://microsoft.com/a/b/c/async" + + // TestLocationURL is a URL used in Azure asynchronous tests + TestLocationURL = "https://microsoft.com/a/b/c/location" +) + +const ( + headerLocation = "Location" + headerRetryAfter = "Retry-After" +) + +// NewRequest instantiates a new request. +func NewRequest() *http.Request { + return NewRequestWithContent("") +} + +// NewRequestWithContent instantiates a new request using the passed string for the body content. +func NewRequestWithContent(c string) *http.Request { + r, _ := http.NewRequest("GET", "https://microsoft.com/a/b/c/", NewBody(c)) + return r +} + +// NewRequestWithCloseBody instantiates a new request. +func NewRequestWithCloseBody() *http.Request { + return NewRequestWithCloseBodyContent("request body") +} + +// NewRequestWithCloseBodyContent instantiates a new request using the passed string for the body content. +func NewRequestWithCloseBodyContent(c string) *http.Request { + r, _ := http.NewRequest("GET", "https://microsoft.com/a/b/c/", NewBodyClose(c)) + return r +} + +// NewRequestForURL instantiates a new request using the passed URL. +func NewRequestForURL(u string) *http.Request { + r, err := http.NewRequest("GET", u, NewBody("")) + if err != nil { + panic(fmt.Sprintf("mocks: ERROR (%v) parsing testing URL %s", err, u)) + } + return r +} + +// NewResponse instantiates a new response. +func NewResponse() *http.Response { + return NewResponseWithContent("") +} + +// NewResponseWithContent instantiates a new response with the passed string as the body content. +func NewResponseWithContent(c string) *http.Response { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Body: NewBody(c), + Request: NewRequest(), + } +} + +// NewResponseWithStatus instantiates a new response using the passed string and integer as the +// status and status code. +func NewResponseWithStatus(s string, c int) *http.Response { + resp := NewResponse() + resp.Status = s + resp.StatusCode = c + return resp +} + +// NewResponseWithBodyAndStatus instantiates a new response using the specified mock body, +// status and status code +func NewResponseWithBodyAndStatus(body *Body, c int, s string) *http.Response { + resp := NewResponse() + resp.Body = body + resp.Status = s + resp.StatusCode = c + return resp +} + +// SetResponseHeader adds a header to the passed response. +func SetResponseHeader(resp *http.Response, h string, v string) { + if resp.Header == nil { + resp.Header = make(http.Header) + } + resp.Header.Set(h, v) +} + +// SetResponseHeaderValues adds a header containing all the passed string values. +func SetResponseHeaderValues(resp *http.Response, h string, values []string) { + if resp.Header == nil { + resp.Header = make(http.Header) + } + for _, v := range values { + resp.Header.Add(h, v) + } +} + +// SetAcceptedHeaders adds the headers usually associated with a 202 Accepted response. +func SetAcceptedHeaders(resp *http.Response) { + SetLocationHeader(resp, TestURL) + SetRetryHeader(resp, TestDelay) +} + +// SetLocationHeader adds the Location header. +func SetLocationHeader(resp *http.Response, location string) { + SetResponseHeader(resp, http.CanonicalHeaderKey(headerLocation), location) +} + +// SetRetryHeader adds the Retry-After header. +func SetRetryHeader(resp *http.Response, delay time.Duration) { + SetResponseHeader(resp, http.CanonicalHeaderKey(headerRetryAfter), fmt.Sprintf("%v", delay.Seconds())) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/mocks/helpers_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/mocks/helpers_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/mocks/helpers_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/mocks/helpers_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1 @@ +package mocks diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,162 @@ +/* +Package mocks provides mocks and helpers used in testing. +*/ +package mocks + +import ( + "fmt" + "io" + "net/http" +) + +// Body implements acceptable body over a string. +type Body struct { + s string + b []byte + isOpen bool + closeAttempts int +} + +// NewBody creates a new instance of Body. +func NewBody(s string) *Body { + return (&Body{s: s}).reset() +} + +// NewBodyClose creates a new instance of Body. +func NewBodyClose(s string) *Body { + return &Body{s: s} +} + +// Read reads into the passed byte slice and returns the bytes read. +func (body *Body) Read(b []byte) (n int, err error) { + if !body.IsOpen() { + return 0, fmt.Errorf("ERROR: Body has been closed\n") + } + if len(body.b) == 0 { + return 0, io.EOF + } + n = copy(b, body.b) + body.b = body.b[n:] + return n, nil +} + +// Close closes the body. +func (body *Body) Close() error { + if body.isOpen { + body.isOpen = false + body.closeAttempts++ + } + return nil +} + +// CloseAttempts returns the number of times Close was called. +func (body *Body) CloseAttempts() int { + return body.closeAttempts +} + +// IsOpen returns true if the Body has not been closed, false otherwise. +func (body *Body) IsOpen() bool { + return body.isOpen +} + +func (body *Body) reset() *Body { + body.isOpen = true + body.b = []byte(body.s) + return body +} + +// Sender implements a simple null sender. +type Sender struct { + attempts int + responses []*http.Response + repeatResponse []int + err error + repeatError int + emitErrorAfter int +} + +// NewSender creates a new instance of Sender. +func NewSender() *Sender { + return &Sender{} +} + +// Do accepts the passed request and, based on settings, emits a response and possible error. +func (c *Sender) Do(r *http.Request) (resp *http.Response, err error) { + c.attempts++ + + if len(c.responses) > 0 { + resp = c.responses[0] + if resp != nil { + if b, ok := resp.Body.(*Body); ok { + b.reset() + } + } + c.repeatResponse[0]-- + if c.repeatResponse[0] == 0 { + c.responses = c.responses[1:] + c.repeatResponse = c.repeatResponse[1:] + } + } else { + resp = NewResponse() + } + if resp != nil { + resp.Request = r + } + + if c.emitErrorAfter > 0 { + c.emitErrorAfter-- + } else if c.err != nil { + err = c.err + c.repeatError-- + if c.repeatError == 0 { + c.err = nil + } + } + + return +} + +// AppendResponse adds the passed http.Response to the response stack. +func (c *Sender) AppendResponse(resp *http.Response) { + c.AppendAndRepeatResponse(resp, 1) +} + +// AppendAndRepeatResponse adds the passed http.Response to the response stack along with a +// repeat count. A negative repeat count will return the response for all remaining calls to Do. +func (c *Sender) AppendAndRepeatResponse(resp *http.Response, repeat int) { + if c.responses == nil { + c.responses = []*http.Response{resp} + c.repeatResponse = []int{repeat} + } else { + c.responses = append(c.responses, resp) + c.repeatResponse = append(c.repeatResponse, repeat) + } +} + +// Attempts returns the number of times Do was called. +func (c *Sender) Attempts() int { + return c.attempts +} + +// SetError sets the error Do should return. +func (c *Sender) SetError(err error) { + c.SetAndRepeatError(err, 1) +} + +// SetAndRepeatError sets the error Do should return and how many calls to Do will return the error. +// A negative repeat value will return the error for all remaining calls to Do. +func (c *Sender) SetAndRepeatError(err error, repeat int) { + c.err = err + c.repeatError = repeat +} + +// SetEmitErrorAfter sets the number of attempts to be made before errors are emitted. +func (c *Sender) SetEmitErrorAfter(ea int) { + c.emitErrorAfter = ea +} + +// T is a simple testing struct. +type T struct { + Name string `json:"name" xml:"Name"` + Age int `json:"age" xml:"Age"` +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/mocks/mocks_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/mocks/mocks_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/mocks/mocks_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/mocks/mocks_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1 @@ +package mocks diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/preparer.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/preparer.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/preparer.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/preparer.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,373 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + v.Add(key, value) + } + r.URL.RawQuery = createQuery(v) + } + return r, err + }) + } +} + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/preparer_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/preparer_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/preparer_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/preparer_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,652 @@ +package autorest + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +// PrepareDecorators wrap and invoke a Preparer. Most often, the decorator invokes the passed +// Preparer and decorates the response. +func ExamplePrepareDecorator() { + path := "a/b/c/" + pd := func() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, fmt.Errorf("ERROR: URL is not set") + } + r.URL.Path += path + } + return r, err + }) + } + } + + r, _ := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + pd()) + + fmt.Printf("Path is %s\n", r.URL) + // Output: Path is https://microsoft.com/a/b/c/ +} + +// PrepareDecorators may also modify and then invoke the Preparer. +func ExamplePrepareDecorator_pre() { + pd := func() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Header.Add(http.CanonicalHeaderKey("ContentType"), "application/json") + return p.Prepare(r) + }) + } + } + + r, _ := Prepare(&http.Request{Header: http.Header{}}, + pd()) + + fmt.Printf("ContentType is %s\n", r.Header.Get("ContentType")) + // Output: ContentType is application/json +} + +// Create a sequence of three Preparers that build up the URL path. +func ExampleCreatePreparer() { + p := CreatePreparer( + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + r, err := p.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c +} + +// Create and apply separate Preparers +func ExampleCreatePreparer_multiple() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + + p1 := CreatePreparer(WithBaseURL("https://microsoft.com/")) + p2 := CreatePreparer(WithPathParameters("/{param1}/b/{param2}/", params)) + + r, err := p1.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + r, err = p2.Prepare(r) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create and chain separate Preparers +func ExampleCreatePreparer_chain() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + + p := CreatePreparer(WithBaseURL("https://microsoft.com/")) + p = DecoratePreparer(p, WithPathParameters("/{param1}/b/{param2}/", params)) + + r, err := p.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create and prepare an http.Request in one call +func ExamplePrepare() { + r, err := Prepare(&http.Request{}, + AsGet(), + WithBaseURL("https://microsoft.com/"), + WithPath("a/b/c/")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("%s %s", r.Method, r.URL) + } + // Output: GET https://microsoft.com/a/b/c/ +} + +// Create a request for a supplied base URL and path +func ExampleWithBaseURL() { + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/a/b/c/")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +func ExampleWithBaseURL_second() { + _, err := Prepare(&http.Request{}, WithBaseURL(":")) + fmt.Println(err) + // Output: parse :: missing protocol scheme +} + +// Create a request with a custom HTTP header +func ExampleWithHeader() { + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/a/b/c/"), + WithHeader("x-foo", "bar")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Header %s=%s\n", "x-foo", r.Header.Get("x-foo")) + } + // Output: Header x-foo=bar +} + +// Create a request whose Body is the JSON encoding of a structure +func ExampleWithFormData() { + v := url.Values{} + v.Add("name", "Rob Pike") + v.Add("age", "42") + + r, err := Prepare(&http.Request{}, + WithFormData(v)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Request Body contains %s\n", string(b)) + } + // Output: Request Body contains age=42&name=Rob+Pike +} + +// Create a request whose Body is the JSON encoding of a structure +func ExampleWithJSON() { + t := mocks.T{Name: "Rob Pike", Age: 42} + + r, err := Prepare(&http.Request{}, + WithJSON(&t)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Request Body contains %s\n", string(b)) + } + // Output: Request Body contains {"name":"Rob Pike","age":42} +} + +// Create a request from a path with escaped parameters +func ExampleWithEscapedPathParameters() { + params := map[string]interface{}{ + "param1": "a b c", + "param2": "d e f", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithEscapedPathParameters("/{param1}/b/{param2}/", params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a+b+c/b/d+e+f/ +} + +// Create a request from a path with parameters +func ExampleWithPathParameters() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPathParameters("/{param1}/b/{param2}/", params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create a request with query parameters +func ExampleWithQueryParameters() { + params := map[string]interface{}{ + "q1": "value1", + "q2": "value2", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("/a/b/c/"), + WithQueryParameters(params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/?q1=value1&q2=value2 +} + +func TestWithPathWithInvalidPath(t *testing.T) { + p := "path%2*end" + if _, err := Prepare(&http.Request{}, WithBaseURL("https://microsoft.com/"), WithPath(p)); err == nil { + t.Fatalf("autorest: WithPath should fail for invalid URL escape error for path '%v' ", p) + } + +} + +func TestWithPathParametersWithInvalidPath(t *testing.T) { + p := "path%2*end" + m := map[string]interface{}{ + "path1": p, + } + if _, err := Prepare(&http.Request{}, WithBaseURL("https://microsoft.com/"), WithPathParameters("/{path1}/", m)); err == nil { + t.Fatalf("autorest: WithPath should fail for invalid URL escape for path '%v' ", p) + } + +} + +func TestCreatePreparerDoesNotModify(t *testing.T) { + r1 := &http.Request{} + p := CreatePreparer() + r2, err := p.Prepare(r1) + if err != nil { + t.Fatalf("autorest: CreatePreparer failed (%v)", err) + } + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: CreatePreparer without decorators modified the request") + } +} + +func TestCreatePreparerRunsDecoratorsInOrder(t *testing.T) { + p := CreatePreparer(WithBaseURL("https://microsoft.com/"), WithPath("1"), WithPath("2"), WithPath("3")) + r, err := p.Prepare(&http.Request{}) + if err != nil { + t.Fatalf("autorest: CreatePreparer failed (%v)", err) + } + if r.URL.String() != "https:/1/2/3" && r.URL.Host != "microsoft.com" { + t.Fatalf("autorest: CreatePreparer failed to run decorators in order") + } +} + +func TestAsContentType(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsContentType("application/text")) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != "application/text" { + t.Fatalf("autorest: AsContentType failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestAsFormURLEncoded(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsFormURLEncoded()) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != mimeTypeFormPost { + t.Fatalf("autorest: AsFormURLEncoded failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestAsJSON(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsJSON()) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != mimeTypeJSON { + t.Fatalf("autorest: AsJSON failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestWithNothing(t *testing.T) { + r1 := mocks.NewRequest() + r2, err := Prepare(r1, WithNothing()) + if err != nil { + t.Fatalf("autorest: WithNothing returned an unexpected error (%v)", err) + } + + if !reflect.DeepEqual(r1, r2) { + t.Fatal("azure: WithNothing modified the passed HTTP Request") + } +} + +func TestWithBearerAuthorization(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), WithBearerAuthorization("SOME-TOKEN")) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerAuthorization) != "Bearer SOME-TOKEN" { + t.Fatalf("autorest: WithBearerAuthorization failed to add header (%s=%s)", headerAuthorization, r.Header.Get(headerAuthorization)) + } +} + +func TestWithUserAgent(t *testing.T) { + ua := "User Agent Go" + r, err := Prepare(mocks.NewRequest(), WithUserAgent(ua)) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.UserAgent() != ua || r.Header.Get(headerUserAgent) != ua { + t.Fatalf("autorest: WithUserAgent failed to add header (%s=%s)", headerUserAgent, r.Header.Get(headerUserAgent)) + } +} + +func TestWithMethod(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), WithMethod("HEAD")) + if r.Method != "HEAD" { + t.Fatal("autorest: WithMethod failed to set HTTP method header") + } +} + +func TestAsDelete(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsDelete()) + if r.Method != "DELETE" { + t.Fatal("autorest: AsDelete failed to set HTTP method header to DELETE") + } +} + +func TestAsGet(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsGet()) + if r.Method != "GET" { + t.Fatal("autorest: AsGet failed to set HTTP method header to GET") + } +} + +func TestAsHead(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsHead()) + if r.Method != "HEAD" { + t.Fatal("autorest: AsHead failed to set HTTP method header to HEAD") + } +} + +func TestAsOptions(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsOptions()) + if r.Method != "OPTIONS" { + t.Fatal("autorest: AsOptions failed to set HTTP method header to OPTIONS") + } +} + +func TestAsPatch(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPatch()) + if r.Method != "PATCH" { + t.Fatal("autorest: AsPatch failed to set HTTP method header to PATCH") + } +} + +func TestAsPost(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPost()) + if r.Method != "POST" { + t.Fatal("autorest: AsPost failed to set HTTP method header to POST") + } +} + +func TestAsPut(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPut()) + if r.Method != "PUT" { + t.Fatal("autorest: AsPut failed to set HTTP method header to PUT") + } +} + +func TestPrepareWithNullRequest(t *testing.T) { + _, err := Prepare(nil) + if err == nil { + t.Fatal("autorest: Prepare failed to return an error when given a null http.Request") + } +} + +func TestWithFormDataSetsContentLength(t *testing.T) { + v := url.Values{} + v.Add("name", "Rob Pike") + v.Add("age", "42") + + r, err := Prepare(&http.Request{}, + WithFormData(v)) + if err != nil { + t.Fatalf("autorest: WithFormData failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFormData failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithBool_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithBool(false)) + if err != nil { + t.Fatalf("autorest: WithBool failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithBool failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", false))) { + t.Fatalf("autorest: WithBool set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", false)))) + } + + v, err := strconv.ParseBool(string(s)) + if err != nil || v { + t.Fatalf("autorest: WithBool incorrectly encoded the boolean as %v", s) + } +} + +func TestWithFloat32_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithFloat32(42.0)) + if err != nil { + t.Fatalf("autorest: WithFloat32 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFloat32 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42.0))) { + t.Fatalf("autorest: WithFloat32 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42.0)))) + } + + v, err := strconv.ParseFloat(string(s), 32) + if err != nil || float32(v) != float32(42.0) { + t.Fatalf("autorest: WithFloat32 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithFloat64_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithFloat64(42.0)) + if err != nil { + t.Fatalf("autorest: WithFloat64 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFloat64 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42.0))) { + t.Fatalf("autorest: WithFloat64 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42.0)))) + } + + v, err := strconv.ParseFloat(string(s), 64) + if err != nil || v != float64(42.0) { + t.Fatalf("autorest: WithFloat64 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithInt32_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithInt32(42)) + if err != nil { + t.Fatalf("autorest: WithInt32 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithInt32 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42))) { + t.Fatalf("autorest: WithInt32 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42)))) + } + + v, err := strconv.ParseInt(string(s), 10, 32) + if err != nil || int32(v) != int32(42) { + t.Fatalf("autorest: WithInt32 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithInt64_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithInt64(42)) + if err != nil { + t.Fatalf("autorest: WithInt64 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithInt64 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42))) { + t.Fatalf("autorest: WithInt64 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42)))) + } + + v, err := strconv.ParseInt(string(s), 10, 64) + if err != nil || v != int64(42) { + t.Fatalf("autorest: WithInt64 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithString_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithString("value")) + if err != nil { + t.Fatalf("autorest: WithString failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithString failed with error (%v)", err) + } + + if r.ContentLength != int64(len("value")) { + t.Fatalf("autorest: WithString set Content-Length to %v, expected %v", r.ContentLength, int64(len("value"))) + } + + if string(s) != "value" { + t.Fatalf("autorest: WithString incorrectly encoded the string as %v", s) + } +} + +func TestWithJSONSetsContentLength(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithJSON(&mocks.T{Name: "Rob Pike", Age: 42})) + if err != nil { + t.Fatalf("autorest: WithJSON failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithJSON failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithJSON set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithHeaderAllocatesHeaders(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), WithHeader("x-foo", "bar")) + if err != nil { + t.Fatalf("autorest: WithHeader failed (%v)", err) + } + if r.Header.Get("x-foo") != "bar" { + t.Fatalf("autorest: WithHeader failed to add header (%s=%s)", "x-foo", r.Header.Get("x-foo")) + } +} + +func TestWithPathCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithPath("a")) + if err == nil { + t.Fatalf("autorest: WithPath failed to catch a nil URL") + } +} + +func TestWithEscapedPathParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithEscapedPathParameters("", map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithEscapedPathParameters failed to catch a nil URL") + } +} + +func TestWithPathParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithPathParameters("", map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithPathParameters failed to catch a nil URL") + } +} + +func TestWithQueryParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithQueryParameters(map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithQueryParameters failed to catch a nil URL") + } +} + +func TestModifyingExistingRequest(t *testing.T) { + r, err := Prepare(mocks.NewRequestForURL("https://bing.com"), WithPath("search"), WithQueryParameters(map[string]interface{}{"q": "golang"})) + if err != nil { + t.Fatalf("autorest: Preparing an existing request returned an error (%v)", err) + } + if r.URL.String() != "https:/search?q=golang" && r.URL.Host != "bing.com" { + t.Fatalf("autorest: Preparing an existing request failed (%s)", r.URL) + } +} + +func TestWithAuthorizer(t *testing.T) { + r1 := mocks.NewRequest() + + na := &NullAuthorizer{} + r2, err := Prepare(r1, + na.WithAuthorization()) + if err != nil { + t.Fatalf("autorest: NullAuthorizer#WithAuthorization returned an unexpected error (%v)", err) + } else if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: NullAuthorizer#WithAuthorization modified the request -- received %v, expected %v", r2, r1) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/responder.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/responder.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/responder.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/responder.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,208 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/responder_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/responder_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/responder_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/responder_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,535 @@ +package autorest + +import ( + "bytes" + "fmt" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func ExampleWithErrorUnlessOK() { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + + // Respond and leave the response body open (for a subsequent responder to close) + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err == nil { + fmt.Printf("%s of %s returned HTTP 200", r.Request.Method, r.Request.URL) + + // Complete handling the response and close the body + Respond(r, + ByClosing()) + } + // Output: GET of https://microsoft.com/a/b/c/ returned HTTP 200 +} + +func ExampleByUnmarshallingJSON() { + c := ` + { + "name" : "Rob Pike", + "age" : 42 + } + ` + + type V struct { + Name string `json:"name"` + Age int `json:"age"` + } + + v := &V{} + + Respond(mocks.NewResponseWithContent(c), + ByUnmarshallingJSON(v), + ByClosing()) + + fmt.Printf("%s is %d years old\n", v.Name, v.Age) + // Output: Rob Pike is 42 years old +} + +func ExampleByUnmarshallingXML() { + c := ` + + Rob Pike + 42 + ` + + type V struct { + Name string `xml:"Name"` + Age int `xml:"Age"` + } + + v := &V{} + + Respond(mocks.NewResponseWithContent(c), + ByUnmarshallingXML(v), + ByClosing()) + + fmt.Printf("%s is %d years old\n", v.Name, v.Age) + // Output: Rob Pike is 42 years old +} + +func TestCreateResponderDoesNotModify(t *testing.T) { + r1 := mocks.NewResponse() + r2 := mocks.NewResponse() + p := CreateResponder() + err := p.Respond(r1) + if err != nil { + t.Fatalf("autorest: CreateResponder failed (%v)", err) + } + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: CreateResponder without decorators modified the response") + } +} + +func TestCreateResponderRunsDecoratorsInOrder(t *testing.T) { + s := "" + + d := func(n int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + s += fmt.Sprintf("%d", n) + } + return err + }) + } + } + + p := CreateResponder(d(1), d(2), d(3)) + err := p.Respond(&http.Response{}) + if err != nil { + t.Fatalf("autorest: Respond failed (%v)", err) + } + + if s != "123" { + t.Fatalf("autorest: CreateResponder invoked decorators in an incorrect order; expected '123', received '%s'", s) + } +} + +func TestByIgnoring(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(r2 *http.Response) error { + r1 := mocks.NewResponse() + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: ByIgnoring modified the HTTP Response -- received %v, expected %v", r2, r1) + } + return nil + }) + } + })(), + ByIgnoring(), + ByClosing()) +} + +func TestByCopying_Copies(t *testing.T) { + r := mocks.NewResponseWithContent(jsonT) + b := &bytes.Buffer{} + + err := Respond(r, + ByCopying(b), + ByUnmarshallingJSON(&mocks.T{}), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByCopying returned an unexpected error -- %v", err) + } + if b.String() != jsonT { + t.Fatalf("autorest: ByCopying failed to copy the bytes read") + } +} + +func TestByCopying_ReturnsNestedErrors(t *testing.T) { + r := mocks.NewResponseWithContent(jsonT) + + r.Body.Close() + err := Respond(r, + ByCopying(&bytes.Buffer{}), + ByUnmarshallingJSON(&mocks.T{}), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByCopying failed to return the expected error") + } +} + +func TestByCopying_AcceptsNilReponse(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByCopying(&bytes.Buffer{})) +} + +func TestByCopying_AcceptsNilBody(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByCopying(&bytes.Buffer{})) +} + +func TestByClosing(t *testing.T) { + r := mocks.NewResponse() + err := Respond(r, ByClosing()) + if err != nil { + t.Fatalf("autorest: ByClosing failed (%v)", err) + } + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosing did not close the response body") + } +} + +func TestByClosingAcceptsNilResponse(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByClosing()) +} + +func TestByClosingAcceptsNilBody(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByClosing()) +} + +func TestByClosingClosesEvenAfterErrors(t *testing.T) { + var e error + + r := mocks.NewResponse() + Respond(r, + withErrorRespondDecorator(&e), + ByClosing()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosing did not close the response body after an error occurred") + } +} + +func TestByClosingClosesReturnsNestedErrors(t *testing.T) { + var e error + + r := mocks.NewResponse() + err := Respond(r, + withErrorRespondDecorator(&e), + ByClosing()) + + if err == nil || !reflect.DeepEqual(e, err) { + t.Fatalf("autorest: ByClosing failed to return a nested error") + } +} + +func TestByClosingIfErrorAcceptsNilResponse(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByClosingIfError()) +} + +func TestByClosingIfErrorAcceptsNilBody(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByClosingIfError()) +} + +func TestByClosingIfErrorClosesIfAnErrorOccurs(t *testing.T) { + var e error + + r := mocks.NewResponse() + Respond(r, + withErrorRespondDecorator(&e), + ByClosingIfError()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosingIfError did not close the response body after an error occurred") + } +} + +func TestByClosingIfErrorDoesNotClosesIfNoErrorOccurs(t *testing.T) { + r := mocks.NewResponse() + Respond(r, + ByClosingIfError()) + + if !r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosingIfError closed the response body even though no error occurred") + } +} + +func TestByUnmarshallingJSON(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed (%v)", err) + } + if v.Name != "Rob Pike" || v.Age != 42 { + t.Fatalf("autorest: ByUnmarshallingJSON failed to properly unmarshal") + } +} + +func TestByUnmarshallingJSON_HandlesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + r.Body.(*mocks.Body).Close() + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed to receive / respond to read error") + } +} + +func TestByUnmarshallingJSONIncludesJSONInErrors(t *testing.T) { + v := &mocks.T{} + j := jsonT[0 : len(jsonT)-2] + r := mocks.NewResponseWithContent(j) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil || !strings.Contains(err.Error(), j) { + t.Fatalf("autorest: ByUnmarshallingJSON failed to return JSON in error (%v)", err) + } +} + +func TestByUnmarshallingJSONEmptyInput(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(``) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed to return nil in case of empty JSON (%v)", err) + } +} + +func TestByUnmarshallingXML(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(xmlT) + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingXML failed (%v)", err) + } + if v.Name != "Rob Pike" || v.Age != 42 { + t.Fatalf("autorest: ByUnmarshallingXML failed to properly unmarshal") + } +} + +func TestByUnmarshallingXML_HandlesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(xmlT) + r.Body.(*mocks.Body).Close() + + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByUnmarshallingXML failed to receive / respond to read error") + } +} + +func TestByUnmarshallingXMLIncludesXMLInErrors(t *testing.T) { + v := &mocks.T{} + x := xmlT[0 : len(xmlT)-2] + r := mocks.NewResponseWithContent(x) + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err == nil || !strings.Contains(err.Error(), x) { + t.Fatalf("autorest: ByUnmarshallingXML failed to return XML in error (%v)", err) + } +} + +func TestRespondAcceptsNullResponse(t *testing.T) { + err := Respond(nil) + if err != nil { + t.Fatalf("autorest: Respond returned an unexpected error when given a null Response (%v)", err) + } +} + +func TestWithErrorUnlessStatusCode(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError), + ByClosingIfError()) + + if err != nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode returned an error (%v) for an acceptable status code (%s)", err, r.Status) + } +} + +func TestWithErrorUnlessStatusCodeEmitsErrorForUnacceptableStatusCode(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusOK, http.StatusUnauthorized, http.StatusInternalServerError), + ByClosingIfError()) + + if err == nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode failed to return an error for an unacceptable status code (%s)", r.Status) + } +} + +func TestWithErrorUnlessOK(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err != nil { + t.Fatalf("autorest: WithErrorUnlessOK returned an error for OK status code (%v)", err) + } +} + +func TestWithErrorUnlessOKEmitsErrorIfNotOK(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err == nil { + t.Fatalf("autorest: WithErrorUnlessOK failed to return an error for a non-OK status code (%v)", err) + } +} + +func TestExtractHeader(t *testing.T) { + r := mocks.NewResponse() + v := []string{"v1", "v2", "v3"} + mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) + + if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeader(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderHandlesMissingHeader(t *testing.T) { + var v []string + r := mocks.NewResponse() + + if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { + t.Fatalf("autorest: ExtractHeader failed to handle a missing header -- expected %v, received %v", + v, ExtractHeader(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValue(t *testing.T) { + r := mocks.NewResponse() + v := "v1" + mocks.SetResponseHeader(r, mocks.TestHeader, v) + + if ExtractHeaderValue(mocks.TestHeader, r) != v { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValueHandlesMissingHeader(t *testing.T) { + r := mocks.NewResponse() + v := "" + + if ExtractHeaderValue(mocks.TestHeader, r) != v { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValueRetrievesFirstValue(t *testing.T) { + r := mocks.NewResponse() + v := []string{"v1", "v2", "v3"} + mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) + + if ExtractHeaderValue(mocks.TestHeader, r) != v[0] { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v[0], mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/sender.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/sender.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/sender.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/sender.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,267 @@ +package autorest + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "math" + "net/http" + "time" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 1, r.Cancel) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequest(resp, r.Cancel) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + for attempt := 0; attempt < attempts; attempt++ { + resp, err = s.Do(r) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + b := []byte{} + if r.Body != nil { + b, err = ioutil.ReadAll(r.Body) + if err != nil { + return resp, err + } + } + + // Increment to add the first call (attempts denotes number of retries) + attempts++ + for attempt := 0; attempt < attempts; attempt++ { + r.Body = ioutil.NopCloser(bytes.NewBuffer(b)) + resp, err = s.Do(r) + if err != nil || !ResponseHasStatusCode(resp, codes...) { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + resp, err = s.Do(r) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + select { + case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): + return true + case <-cancel: + return false + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/sender_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/sender_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/sender_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/sender_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,732 @@ +package autorest + +import ( + "bytes" + "fmt" + "log" + "net/http" + "os" + "reflect" + "sync" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func ExampleSendWithSender() { + r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(r) + + client := mocks.NewSender() + client.AppendAndRepeatResponse(r, 10) + + logger := log.New(os.Stdout, "autorest: ", 0) + na := NullAuthorizer{} + + req, _ := Prepare(&http.Request{}, + AsGet(), + WithBaseURL("https://microsoft.com/a/b/c/"), + na.WithAuthorization()) + + r, _ = SendWithSender(client, req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusAccepted), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByClosing()) + + // Output: + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted +} + +func ExampleDoRetryForAttempts() { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), 10) + + // Retry with backoff -- ensure returned Bodies are closed + r, _ := SendWithSender(client, mocks.NewRequest(), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByClosing()) + + fmt.Printf("Retry stopped after %d attempts", client.Attempts()) + // Output: Retry stopped after 5 attempts +} + +func ExampleDoErrorIfStatusCode() { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 NoContent", http.StatusNoContent), 10) + + // Chain decorators to retry the request, up to five times, if the status code is 204 + r, _ := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusNoContent), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByClosing()) + + fmt.Printf("Retry stopped after %d attempts with code %s", client.Attempts(), r.Status) + // Output: Retry stopped after 5 attempts with code 204 NoContent +} + +func TestSendWithSenderRunsDecoratorsInOrder(t *testing.T) { + client := mocks.NewSender() + s := "" + + r, err := SendWithSender(client, mocks.NewRequest(), + withMessage(&s, "a"), + withMessage(&s, "b"), + withMessage(&s, "c")) + if err != nil { + t.Fatalf("autorest: SendWithSender returned an error (%v)", err) + } + + Respond(r, + ByClosing()) + + if s != "abc" { + t.Fatalf("autorest: SendWithSender invoke decorators out of order; expected 'abc', received '%s'", s) + } +} + +func TestCreateSender(t *testing.T) { + f := false + + s := CreateSender( + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return nil, nil + }) + } + })()) + s.Do(&http.Request{}) + + if !f { + t.Fatal("autorest: CreateSender failed to apply supplied decorator") + } +} + +func TestSend(t *testing.T) { + f := false + + Send(&http.Request{}, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return nil, nil + }) + } + })()) + + if !f { + t.Fatal("autorest: Send failed to apply supplied decorator") + } +} + +func TestAfterDelayWaits(t *testing.T) { + client := mocks.NewSender() + + d := 2 * time.Second + + tt := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + AfterDelay(d)) + s := time.Since(tt) + if s < d { + t.Fatal("autorest: AfterDelay failed to wait for at least the specified duration") + } + + Respond(r, + ByClosing()) +} + +func TestAfterDelay_Cancels(t *testing.T) { + client := mocks.NewSender() + cancel := make(chan struct{}) + delay := 5 * time.Second + + var wg sync.WaitGroup + wg.Add(1) + tt := time.Now() + go func() { + req := mocks.NewRequest() + req.Cancel = cancel + wg.Done() + SendWithSender(client, req, + AfterDelay(delay)) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(tt) >= delay { + t.Fatal("autorest: AfterDelay failed to cancel") + } +} + +func TestAfterDelayDoesNotWaitTooLong(t *testing.T) { + client := mocks.NewSender() + + d := 5 * time.Millisecond + start := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + AfterDelay(d)) + + if time.Since(start) > (5 * d) { + t.Fatal("autorest: AfterDelay waited too long (exceeded 5 times specified duration)") + } + + Respond(r, + ByClosing()) +} + +func TestAsIs(t *testing.T) { + client := mocks.NewSender() + + r1 := mocks.NewResponse() + client.AppendResponse(r1) + + r2, err := SendWithSender(client, mocks.NewRequest(), + AsIs()) + if err != nil { + t.Fatalf("autorest: AsIs returned an unexpected error (%v)", err) + } else if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: AsIs modified the response -- received %v, expected %v", r2, r1) + } + + Respond(r1, + ByClosing()) + Respond(r2, + ByClosing()) +} + +func TestDoCloseIfError(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatal("autorest: Expected DoCloseIfError to close response body -- it was left open") + } + + Respond(r, + ByClosing()) +} + +func TestDoCloseIfErrorAcceptsNilResponse(t *testing.T) { + client := mocks.NewSender() + + SendWithSender(client, mocks.NewRequest(), + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + resp.Body.Close() + } + return nil, fmt.Errorf("Faux Error") + }) + } + })(), + DoCloseIfError()) +} + +func TestDoCloseIfErrorAcceptsNilBody(t *testing.T) { + client := mocks.NewSender() + + SendWithSender(client, mocks.NewRequest(), + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + resp.Body.Close() + } + resp.Body = nil + return resp, fmt.Errorf("Faux Error") + }) + } + })(), + DoCloseIfError()) +} + +func TestDoErrorIfStatusCode(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: DoErrorIfStatusCode failed to emit an error for passed code") + } + + Respond(r, + ByClosing()) +} + +func TestDoErrorIfStatusCodeIgnoresStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + if err != nil { + t.Fatal("autorest: DoErrorIfStatusCode failed to ignore a status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoErrorUnlessStatusCode(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorUnlessStatusCode(http.StatusAccepted), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: DoErrorUnlessStatusCode failed to emit an error for an unknown status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoErrorUnlessStatusCodeIgnoresStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorUnlessStatusCode(http.StatusAccepted), + DoCloseIfError()) + if err != nil { + t.Fatal("autorest: DoErrorUnlessStatusCode emitted an error for a knonwn status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForAttemptsStopsAfterSuccess(t *testing.T) { + client := mocks.NewSender() + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(5, time.Duration(0))) + if client.Attempts() != 1 { + t.Fatalf("autorest: DoRetryForAttempts failed to stop after success -- expected attempts %v, actual %v", + 1, client.Attempts()) + } + if err != nil { + t.Fatalf("autorest: DoRetryForAttempts returned an unexpected error (%v)", err) + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForAttemptsStopsAfterAttempts(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), 10) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(5, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + Respond(r, + ByClosing()) + + if client.Attempts() != 5 { + t.Fatal("autorest: DoRetryForAttempts failed to stop after specified number of attempts") + } +} + +func TestDoRetryForAttemptsReturnsResponse(t *testing.T) { + client := mocks.NewSender() + client.SetError(fmt.Errorf("Faux Error")) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(1, time.Duration(0))) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if r == nil { + t.Fatal("autorest: DoRetryForAttempts failed to return the underlying response") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForDurationStopsAfterSuccess(t *testing.T) { + client := mocks.NewSender() + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(10*time.Millisecond, time.Duration(0))) + if client.Attempts() != 1 { + t.Fatalf("autorest: DoRetryForDuration failed to stop after success -- expected attempts %v, actual %v", + 1, client.Attempts()) + } + if err != nil { + t.Fatalf("autorest: DoRetryForDuration returned an unexpected error (%v)", err) + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForDurationStopsAfterDuration(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + d := 5 * time.Millisecond + start := time.Now() + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(d, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if time.Since(start) < d { + t.Fatal("autorest: DoRetryForDuration failed stopped too soon") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForDurationStopsWithinReason(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + d := 5 * time.Second + start := time.Now() + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(d, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if time.Since(start) > (5 * d) { + t.Fatal("autorest: DoRetryForDuration failed stopped soon enough (exceeded 5 times specified duration)") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForDurationReturnsResponse(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(10*time.Millisecond, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if r == nil { + t.Fatal("autorest: DoRetryForDuration failed to return the underlying response") + } + + Respond(r, + ByClosing()) +} + +func TestDelayForBackoff(t *testing.T) { + d := 2 * time.Second + start := time.Now() + DelayForBackoff(d, 1, nil) + if time.Since(start) < d { + t.Fatal("autorest: DelayForBackoff did not delay as long as expected") + } +} + +func TestDelayForBackoff_Cancels(t *testing.T) { + cancel := make(chan struct{}) + delay := 5 * time.Second + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + go func() { + wg.Done() + DelayForBackoff(delay, 1, cancel) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(start) >= delay { + t.Fatal("autorest: DelayForBackoff failed to cancel") + } +} + +func TestDelayForBackoffWithinReason(t *testing.T) { + d := 5 * time.Second + start := time.Now() + DelayForBackoff(d, 1, nil) + if time.Since(start) > (5 * d) { + t.Fatal("autorest: DelayForBackoff delayed too long (exceeded 5 times the specified duration)") + } +} + +func TestDoPollForStatusCodes_IgnoresUnspecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Duration(0), time.Duration(0))) + + if client.Attempts() != 1 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes polled for unspecified status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoPollForStatusCodes_PollsForSpecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if client.Attempts() != 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to poll for specified status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoPollForStatusCodes_CanBeCanceled(t *testing.T) { + cancel := make(chan struct{}) + delay := 5 * time.Second + + r := mocks.NewResponse() + mocks.SetAcceptedHeaders(r) + client := mocks.NewSender() + client.AppendAndRepeatResponse(r, 100) + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + go func() { + wg.Done() + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + Respond(r, + ByClosing()) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(start) >= delay { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to cancel") + } +} + +func TestDoPollForStatusCodes_ClosesAllNonreturnedResponseBodiesWhenPolling(t *testing.T) { + resp := newAcceptedResponse() + + client := mocks.NewSender() + client.AppendAndRepeatResponse(resp, 2) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if resp.Body.(*mocks.Body).IsOpen() || resp.Body.(*mocks.Body).CloseAttempts() < 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes did not close unreturned response bodies") + } + + Respond(r, + ByClosing()) +} + +func TestDoPollForStatusCodes_LeavesLastResponseBodyOpen(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if !r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: Sender#DoPollForStatusCodes did not leave open the body of the last response") + } + + Respond(r, + ByClosing()) +} + +func TestDoPollForStatusCodes_StopsPollingAfterAnError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(newAcceptedResponse(), 5) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(1) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if client.Attempts() > 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to stop polling after receiving an error") + } + + Respond(r, + ByClosing()) +} + +func TestDoPollForStatusCodes_ReturnsPollingError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(newAcceptedResponse(), 5) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(1) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if err == nil { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to return error from polling") + } + + Respond(r, + ByClosing()) +} + +func TestWithLogging_Logs(t *testing.T) { + buf := &bytes.Buffer{} + logger := log.New(buf, "autorest: ", 0) + client := mocks.NewSender() + + r, _ := SendWithSender(client, &http.Request{}, + WithLogging(logger)) + + if buf.String() == "" { + t.Fatal("autorest: Sender#WithLogging failed to log the request") + } + + Respond(r, + ByClosing()) +} + +func TestWithLogging_HandlesMissingResponse(t *testing.T) { + buf := &bytes.Buffer{} + logger := log.New(buf, "autorest: ", 0) + client := mocks.NewSender() + client.AppendResponse(nil) + client.SetError(fmt.Errorf("Faux Error")) + + r, err := SendWithSender(client, &http.Request{}, + WithLogging(logger)) + + if r != nil || err == nil { + t.Fatal("autorest: Sender#WithLogging returned a valid response -- expecting nil") + } + if buf.String() == "" { + t.Fatal("autorest: Sender#WithLogging failed to log the request for a nil response") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForStatusCodesWithSuccess(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("408 Request Timeout", http.StatusRequestTimeout), 2) + client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK)) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(5, time.Duration(2*time.Second), http.StatusRequestTimeout), + ) + + Respond(r, + ByClosing()) + + if client.Attempts() != 3 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: StatusCode %v in %v attempts; Want: StatusCode 200 OK in 2 attempts -- ", + r.Status, client.Attempts()-1) + } +} + +func TestDoRetryForStatusCodesWithNoSuccess(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("504 Gateway Timeout", http.StatusGatewayTimeout), 5) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(2, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + Respond(r, + ByClosing()) + + if client.Attempts() != 3 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: failed stop after %v retry attempts; Want: Stop after 2 retry attempts", + client.Attempts()-1) + } +} + +func TestDoRetryForStatusCodes_CodeNotInRetryList(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 No Content", http.StatusNoContent), 1) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(6, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + + Respond(r, + ByClosing()) + + if client.Attempts() != 1 || r.Status != "204 No Content" { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: Retry attempts %v for StatusCode %v; Want: 0 attempts for StatusCode 204", + client.Attempts(), r.Status) + } +} + +func TestDoRetryForStatusCodes_RequestBodyReadError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 No Content", http.StatusNoContent), 2) + + r, err := SendWithSender(client, mocks.NewRequestWithCloseBody(), + DoRetryForStatusCodes(6, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + + Respond(r, + ByClosing()) + + if err == nil || client.Attempts() != 0 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: Not failed for request body read error; Want: Failed for body read error - %v", err) + } +} + +func newAcceptedResponse() *http.Response { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + return resp +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/to/convert.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/to/convert.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/to/convert.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/to/convert.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,133 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func StringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} + +// StringSlicePtr returns a pointer to the passed string slice. +func StringSlicePtr(s []string) *[]string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) *map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return &msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/to/convert_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/to/convert_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/to/convert_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/to/convert_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,220 @@ +package to + +import ( + "reflect" + "testing" +) + +func TestString(t *testing.T) { + v := "" + if String(&v) != v { + t.Fatalf("to: String failed to return the correct string -- expected %v, received %v", + v, String(&v)) + } +} + +func TestStringHandlesNil(t *testing.T) { + if String(nil) != "" { + t.Fatalf("to: String failed to correctly convert nil -- expected %v, received %v", + "", String(nil)) + } +} + +func TestStringPtr(t *testing.T) { + v := "" + if *StringPtr(v) != v { + t.Fatalf("to: StringPtr failed to return the correct string -- expected %v, received %v", + v, *StringPtr(v)) + } +} + +func TestStringSlice(t *testing.T) { + v := []string{} + if out := StringSlice(&v); !reflect.DeepEqual(out, v) { + t.Fatalf("to: StringSlice failed to return the correct slice -- expected %v, received %v", + v, out) + } +} + +func TestStringSliceHandlesNil(t *testing.T) { + if out := StringSlice(nil); out != nil { + t.Fatalf("to: StringSlice failed to correctly convert nil -- expected %v, received %v", + nil, out) + } +} + +func TestStringSlicePtr(t *testing.T) { + v := []string{"a", "b"} + if out := StringSlicePtr(v); !reflect.DeepEqual(*out, v) { + t.Fatalf("to: StringSlicePtr failed to return the correct slice -- expected %v, received %v", + v, *out) + } +} + +func TestStringMap(t *testing.T) { + msp := map[string]*string{"foo": StringPtr("foo"), "bar": StringPtr("bar"), "baz": StringPtr("baz")} + for k, v := range StringMap(msp) { + if *msp[k] != v { + t.Fatalf("to: StringMap incorrectly converted an entry -- expected [%s]%v, received[%s]%v", + k, v, k, *msp[k]) + } + } +} + +func TestStringMapHandlesNil(t *testing.T) { + msp := map[string]*string{"foo": StringPtr("foo"), "bar": nil, "baz": StringPtr("baz")} + for k, v := range StringMap(msp) { + if msp[k] == nil && v != "" { + t.Fatalf("to: StringMap incorrectly converted a nil entry -- expected [%s]%v, received[%s]%v", + k, v, k, *msp[k]) + } + } +} + +func TestStringMapPtr(t *testing.T) { + ms := map[string]string{"foo": "foo", "bar": "bar", "baz": "baz"} + for k, msp := range *StringMapPtr(ms) { + if ms[k] != *msp { + t.Fatalf("to: StringMapPtr incorrectly converted an entry -- expected [%s]%v, received[%s]%v", + k, ms[k], k, *msp) + } + } +} + +func TestBool(t *testing.T) { + v := false + if Bool(&v) != v { + t.Fatalf("to: Bool failed to return the correct string -- expected %v, received %v", + v, Bool(&v)) + } +} + +func TestBoolHandlesNil(t *testing.T) { + if Bool(nil) != false { + t.Fatalf("to: Bool failed to correctly convert nil -- expected %v, received %v", + false, Bool(nil)) + } +} + +func TestBoolPtr(t *testing.T) { + v := false + if *BoolPtr(v) != v { + t.Fatalf("to: BoolPtr failed to return the correct string -- expected %v, received %v", + v, *BoolPtr(v)) + } +} + +func TestInt(t *testing.T) { + v := 0 + if Int(&v) != v { + t.Fatalf("to: Int failed to return the correct string -- expected %v, received %v", + v, Int(&v)) + } +} + +func TestIntHandlesNil(t *testing.T) { + if Int(nil) != 0 { + t.Fatalf("to: Int failed to correctly convert nil -- expected %v, received %v", + 0, Int(nil)) + } +} + +func TestIntPtr(t *testing.T) { + v := 0 + if *IntPtr(v) != v { + t.Fatalf("to: IntPtr failed to return the correct string -- expected %v, received %v", + v, *IntPtr(v)) + } +} + +func TestInt32(t *testing.T) { + v := int32(0) + if Int32(&v) != v { + t.Fatalf("to: Int32 failed to return the correct string -- expected %v, received %v", + v, Int32(&v)) + } +} + +func TestInt32HandlesNil(t *testing.T) { + if Int32(nil) != int32(0) { + t.Fatalf("to: Int32 failed to correctly convert nil -- expected %v, received %v", + 0, Int32(nil)) + } +} + +func TestInt32Ptr(t *testing.T) { + v := int32(0) + if *Int32Ptr(v) != v { + t.Fatalf("to: Int32Ptr failed to return the correct string -- expected %v, received %v", + v, *Int32Ptr(v)) + } +} + +func TestInt64(t *testing.T) { + v := int64(0) + if Int64(&v) != v { + t.Fatalf("to: Int64 failed to return the correct string -- expected %v, received %v", + v, Int64(&v)) + } +} + +func TestInt64HandlesNil(t *testing.T) { + if Int64(nil) != int64(0) { + t.Fatalf("to: Int64 failed to correctly convert nil -- expected %v, received %v", + 0, Int64(nil)) + } +} + +func TestInt64Ptr(t *testing.T) { + v := int64(0) + if *Int64Ptr(v) != v { + t.Fatalf("to: Int64Ptr failed to return the correct string -- expected %v, received %v", + v, *Int64Ptr(v)) + } +} + +func TestFloat32(t *testing.T) { + v := float32(0) + if Float32(&v) != v { + t.Fatalf("to: Float32 failed to return the correct string -- expected %v, received %v", + v, Float32(&v)) + } +} + +func TestFloat32HandlesNil(t *testing.T) { + if Float32(nil) != float32(0) { + t.Fatalf("to: Float32 failed to correctly convert nil -- expected %v, received %v", + 0, Float32(nil)) + } +} + +func TestFloat32Ptr(t *testing.T) { + v := float32(0) + if *Float32Ptr(v) != v { + t.Fatalf("to: Float32Ptr failed to return the correct string -- expected %v, received %v", + v, *Float32Ptr(v)) + } +} + +func TestFloat64(t *testing.T) { + v := float64(0) + if Float64(&v) != v { + t.Fatalf("to: Float64 failed to return the correct string -- expected %v, received %v", + v, Float64(&v)) + } +} + +func TestFloat64HandlesNil(t *testing.T) { + if Float64(nil) != float64(0) { + t.Fatalf("to: Float64 failed to correctly convert nil -- expected %v, received %v", + 0, Float64(nil)) + } +} + +func TestFloat64Ptr(t *testing.T) { + v := float64(0) + if *Float64Ptr(v) != v { + t.Fatalf("to: Float64Ptr failed to return the correct string -- expected %v, received %v", + v, *Float64Ptr(v)) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/utility.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/utility.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/utility.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/utility.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,161 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/url" + "sort" + "strings" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using separator. +func String(v interface{}, sep ...string) string { + if len(sep) > 0 { + return ensureValueString(strings.Join(v.([]string), sep[0])) + } + return ensureValueString(v) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// This method is same as Encode() method of "net/url" go package, +// except it does not encode the query parameters because they +// already come encoded. It formats values map in query format (bar=foo&a=b). +func createQuery(v url.Values) string { + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := url.QueryEscape(k) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(v) + } + } + return buf.String() +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/utility_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/utility_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/utility_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/utility_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,315 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + jsonT = ` + { + "name":"Rob Pike", + "age":42 + }` + xmlT = ` + + Rob Pike + 42 + ` +) + +func TestNewDecoderCreatesJSONDecoder(t *testing.T) { + d := NewDecoder(EncodedAsJSON, strings.NewReader(jsonT)) + _, ok := d.(*json.Decoder) + if d == nil || !ok { + t.Fatal("autorest: NewDecoder failed to create a JSON decoder when requested") + } +} + +func TestNewDecoderCreatesXMLDecoder(t *testing.T) { + d := NewDecoder(EncodedAsXML, strings.NewReader(xmlT)) + _, ok := d.(*xml.Decoder) + if d == nil || !ok { + t.Fatal("autorest: NewDecoder failed to create an XML decoder when requested") + } +} + +func TestNewDecoderReturnsNilForUnknownEncoding(t *testing.T) { + d := NewDecoder("unknown", strings.NewReader(xmlT)) + if d != nil { + t.Fatal("autorest: NewDecoder created a decoder for an unknown encoding") + } +} + +func TestCopyAndDecodeDecodesJSON(t *testing.T) { + _, err := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT), &mocks.T{}) + if err != nil { + t.Fatalf("autorest: CopyAndDecode returned an error with valid JSON - %v", err) + } +} + +func TestCopyAndDecodeDecodesXML(t *testing.T) { + _, err := CopyAndDecode(EncodedAsXML, strings.NewReader(xmlT), &mocks.T{}) + if err != nil { + t.Fatalf("autorest: CopyAndDecode returned an error with valid XML - %v", err) + } +} + +func TestCopyAndDecodeReturnsJSONDecodingErrors(t *testing.T) { + _, err := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT[0:len(jsonT)-2]), &mocks.T{}) + if err == nil { + t.Fatalf("autorest: CopyAndDecode failed to return an error with invalid JSON") + } +} + +func TestCopyAndDecodeReturnsXMLDecodingErrors(t *testing.T) { + _, err := CopyAndDecode(EncodedAsXML, strings.NewReader(xmlT[0:len(xmlT)-2]), &mocks.T{}) + if err == nil { + t.Fatalf("autorest: CopyAndDecode failed to return an error with invalid XML") + } +} + +func TestCopyAndDecodeAlwaysReturnsACopy(t *testing.T) { + b, _ := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT), &mocks.T{}) + if b.String() != jsonT { + t.Fatalf("autorest: CopyAndDecode failed to return a valid copy of the data - %v", b.String()) + } +} + +func TestTeeReadCloser_Copies(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + b := &bytes.Buffer{} + + r.Body = TeeReadCloser(r.Body, b) + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: TeeReadCloser returned an unexpected error -- %v", err) + } + if b.String() != jsonT { + t.Fatalf("autorest: TeeReadCloser failed to copy the bytes read") + } +} + +func TestTeeReadCloser_PassesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + + r.Body.(*mocks.Body).Close() + r.Body = TeeReadCloser(r.Body, &bytes.Buffer{}) + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: TeeReadCloser failed to return the expected error") + } +} + +func TestTeeReadCloser_ClosesWrappedReader(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + + b := r.Body.(*mocks.Body) + r.Body = TeeReadCloser(r.Body, &bytes.Buffer{}) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: TeeReadCloser returned an unexpected error -- %v", err) + } + if b.IsOpen() { + t.Fatalf("autorest: TeeReadCloser failed to close the nested io.ReadCloser") + } +} + +func TestContainsIntFindsValue(t *testing.T) { + ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + v := 5 + if !containsInt(ints, v) { + t.Fatalf("autorest: containsInt failed to find %v in %v", v, ints) + } +} + +func TestContainsIntDoesNotFindValue(t *testing.T) { + ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + v := 42 + if containsInt(ints, v) { + t.Fatalf("autorest: containsInt unexpectedly found %v in %v", v, ints) + } +} + +func TestContainsIntAcceptsEmptyList(t *testing.T) { + ints := make([]int, 10) + if containsInt(ints, 42) { + t.Fatalf("autorest: containsInt failed to handle an empty list") + } +} + +func TestContainsIntAcceptsNilList(t *testing.T) { + var ints []int + if containsInt(ints, 42) { + t.Fatalf("autorest: containsInt failed to handle an nil list") + } +} + +func TestEscapeStrings(t *testing.T) { + m := map[string]string{ + "string": "a long string with = odd characters", + "int": "42", + "nil": "", + } + r := map[string]string{ + "string": "a+long+string+with+%3D+odd+characters", + "int": "42", + "nil": "", + } + v := escapeValueStrings(m) + if !reflect.DeepEqual(v, r) { + t.Fatalf("autorest: ensureValueStrings returned %v\n", v) + } +} + +func TestEnsureStrings(t *testing.T) { + m := map[string]interface{}{ + "string": "string", + "int": 42, + "nil": nil, + "bytes": []byte{255, 254, 253}, + } + r := map[string]string{ + "string": "string", + "int": "42", + "nil": "", + "bytes": string([]byte{255, 254, 253}), + } + v := ensureValueStrings(m) + if !reflect.DeepEqual(v, r) { + t.Fatalf("autorest: ensureValueStrings returned %v\n", v) + } +} + +func ExampleString() { + m := []string{ + "string1", + "string2", + "string3", + } + + fmt.Println(String(m, ",")) + // Output: string1,string2,string3 +} + +func TestStringWithValidString(t *testing.T) { + i := 123 + if String(i) != "123" { + t.Fatal("autorest: String method failed to convert integer 123 to string") + } +} + +func TestEncodeWithValidPath(t *testing.T) { + s := Encode("Path", "Hello Gopher") + if s != "Hello%20Gopher" { + t.Fatalf("autorest: Encode method failed for valid path encoding. Got: %v; Want: %v", s, "Hello%20Gopher") + } +} + +func TestEncodeWithValidQuery(t *testing.T) { + s := Encode("Query", "Hello Gopher") + if s != "Hello+Gopher" { + t.Fatalf("autorest: Encode method failed for valid query encoding. Got: '%v'; Want: 'Hello+Gopher'", s) + } +} + +func TestEncodeWithValidNotPathQuery(t *testing.T) { + s := Encode("Host", "Hello Gopher") + if s != "Hello Gopher" { + t.Fatalf("autorest: Encode method failed for parameter not query or path. Got: '%v'; Want: 'Hello Gopher'", s) + } +} + +func doEnsureBodyClosed(t *testing.T) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if resp != nil && resp.Body != nil && resp.Body.(*mocks.Body).IsOpen() { + t.Fatal("autorest: Expected Body to be closed -- it was left open") + } + return resp, err + }) + } +} + +type mockAuthorizer struct{} + +func (ma mockAuthorizer) WithAuthorization() PrepareDecorator { + return WithHeader(headerAuthorization, mocks.TestAuthorizationHeader) +} + +type mockFailingAuthorizer struct{} + +func (mfa mockFailingAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") + }) + } +} + +type mockInspector struct { + wasInvoked bool +} + +func (mi *mockInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + mi.wasInvoked = true + return p.Prepare(r) + }) + } +} + +func (mi *mockInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + mi.wasInvoked = true + return r.Respond(resp) + }) + } +} + +func withMessage(output *string, msg string) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil { + *output += msg + } + return resp, err + }) + } +} + +func withErrorRespondDecorator(e *error) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil { + return err + } + *e = fmt.Errorf("autorest: Faux Respond Error") + return *e + }) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/version.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/version.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/version.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/version.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,18 @@ +package autorest + +import ( + "fmt" +) + +const ( + major = "7" + minor = "0" + patch = "0" + tag = "" + semVerFormat = "%s.%s.%s%s" +) + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/version_test.go juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/version_test.go --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/autorest/version_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/autorest/version_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,13 @@ +package autorest + +import ( + "testing" +) + +func TestVersion(t *testing.T) { + v := "7.0.0" + if Version() != v { + t.Fatalf("autorest: Version failed to return the expected version -- expected %s, received %s", + v, Version()) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/CHANGELOG.md juju-core-2.0.0/src/github.com/Azure/go-autorest/CHANGELOG.md --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/CHANGELOG.md 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,124 @@ +# CHANGELOG + +## v7.0.7 +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 +- Better error messages for long running operation failures + +## v7.0.3 +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/.gitignore juju-core-2.0.0/src/github.com/Azure/go-autorest/.gitignore --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/.gitignore 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,29 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/glide.lock juju-core-2.0.0/src/github.com/Azure/go-autorest/glide.lock --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/glide.lock 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/glide.lock 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,15 @@ +hash: fe11759f8785c35a54b84dcce121eab8c8f220a2c05ebe3f36087ebe5bf38528 +updated: 2016-06-22T13:31:27.2502096-07:00 +imports: +- name: github.com/dgrijalva/jwt-go + version: f0777076321ab64f6efc15a82d9d23b98539b943 + subpackages: + - . +- name: golang.org/x/crypto + version: f3241ce8505855877cc8a9717bd61a0f7c4ea83c + repo: https://github.com/golang/crypto.git + vcs: git + subpackages: + - pkcs12 + - pkcs12/internal/rc2 +devImports: [] diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/glide.yaml juju-core-2.0.0/src/github.com/Azure/go-autorest/glide.yaml --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/glide.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/glide.yaml 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,10 @@ +package: github.com/Azure/go-autorest +import: +- package: github.com/dgrijalva/jwt-go + subpackages: + - . +- package: golang.org/x/crypto + vcs: git + repo: https://github.com/golang/crypto.git + subpackages: + - /pkcs12 diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/LICENSE juju-core-2.0.0/src/github.com/Azure/go-autorest/LICENSE --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/LICENSE 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/README.md juju-core-2.0.0/src/github.com/Azure/go-autorest/README.md --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/README.md 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,131 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) [![Build Status](https://travis-ci.org/Azure/go-autorest.svg?branch=master)](https://travis-ci.org/Azure/go-autorest) [![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +## Usage +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +## License + +See LICENSE file. + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff -Nru juju-core-2.0~beta15/src/github.com/Azure/go-autorest/.travis.yml juju-core-2.0.0/src/github.com/Azure/go-autorest/.travis.yml --- juju-core-2.0~beta15/src/github.com/Azure/go-autorest/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/Azure/go-autorest/.travis.yml 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,17 @@ +sudo: false + +language: go + +go: 1.6 + +install: + - go get -u github.com/golang/lint/golint + - go get -u github.com/Masterminds/glide + - glide install + +script: + - test -z "$(gofmt -s -l -w ./autorest/. | tee /dev/stderr)" + - test -z "$(golint ./autorest/... | tee /dev/stderr)" + - go vet ./autorest/... + - go build -v ./autorest/... + - go test -v ./autorest/... diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/.gitignore juju-core-2.0.0/src/github.com/beorn7/perks/.gitignore --- juju-core-2.0~beta15/src/github.com/beorn7/perks/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/.gitignore 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,2 @@ +*.test +*.prof diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/histogram/bench_test.go juju-core-2.0.0/src/github.com/beorn7/perks/histogram/bench_test.go --- juju-core-2.0~beta15/src/github.com/beorn7/perks/histogram/bench_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/histogram/bench_test.go 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,26 @@ +package histogram + +import ( + "math/rand" + "testing" +) + +func BenchmarkInsert10Bins(b *testing.B) { + b.StopTimer() + h := New(10) + b.StartTimer() + for i := 0; i < b.N; i++ { + f := rand.ExpFloat64() + h.Insert(f) + } +} + +func BenchmarkInsert100Bins(b *testing.B) { + b.StopTimer() + h := New(100) + b.StartTimer() + for i := 0; i < b.N; i++ { + f := rand.ExpFloat64() + h.Insert(f) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/histogram/histogram.go juju-core-2.0.0/src/github.com/beorn7/perks/histogram/histogram.go --- juju-core-2.0~beta15/src/github.com/beorn7/perks/histogram/histogram.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/histogram/histogram.go 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,108 @@ +// Package histogram provides a Go implementation of BigML's histogram package +// for Clojure/Java. It is currently experimental. +package histogram + +import ( + "container/heap" + "math" + "sort" +) + +type Bin struct { + Count int + Sum float64 +} + +func (b *Bin) Update(x *Bin) { + b.Count += x.Count + b.Sum += x.Sum +} + +func (b *Bin) Mean() float64 { + return b.Sum / float64(b.Count) +} + +type Bins []*Bin + +func (bs Bins) Len() int { return len(bs) } +func (bs Bins) Less(i, j int) bool { return bs[i].Mean() < bs[j].Mean() } +func (bs Bins) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } + +func (bs *Bins) Push(x interface{}) { + *bs = append(*bs, x.(*Bin)) +} + +func (bs *Bins) Pop() interface{} { + return bs.remove(len(*bs) - 1) +} + +func (bs *Bins) remove(n int) *Bin { + if n < 0 || len(*bs) < n { + return nil + } + x := (*bs)[n] + *bs = append((*bs)[:n], (*bs)[n+1:]...) + return x +} + +type Histogram struct { + res *reservoir +} + +func New(maxBins int) *Histogram { + return &Histogram{res: newReservoir(maxBins)} +} + +func (h *Histogram) Insert(f float64) { + h.res.insert(&Bin{1, f}) + h.res.compress() +} + +func (h *Histogram) Bins() Bins { + return h.res.bins +} + +type reservoir struct { + n int + maxBins int + bins Bins +} + +func newReservoir(maxBins int) *reservoir { + return &reservoir{maxBins: maxBins} +} + +func (r *reservoir) insert(bin *Bin) { + r.n += bin.Count + i := sort.Search(len(r.bins), func(i int) bool { + return r.bins[i].Mean() >= bin.Mean() + }) + if i < 0 || i == r.bins.Len() { + // TODO(blake): Maybe use an .insert(i, bin) instead of + // performing the extra work of a heap.Push. + heap.Push(&r.bins, bin) + return + } + r.bins[i].Update(bin) +} + +func (r *reservoir) compress() { + for r.bins.Len() > r.maxBins { + minGapIndex := -1 + minGap := math.MaxFloat64 + for i := 0; i < r.bins.Len()-1; i++ { + gap := gapWeight(r.bins[i], r.bins[i+1]) + if minGap > gap { + minGap = gap + minGapIndex = i + } + } + prev := r.bins[minGapIndex] + next := r.bins.remove(minGapIndex + 1) + prev.Update(next) + } +} + +func gapWeight(prev, next *Bin) float64 { + return next.Mean() - prev.Mean() +} diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/histogram/histogram_test.go juju-core-2.0.0/src/github.com/beorn7/perks/histogram/histogram_test.go --- juju-core-2.0~beta15/src/github.com/beorn7/perks/histogram/histogram_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/histogram/histogram_test.go 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,38 @@ +package histogram + +import ( + "math/rand" + "testing" +) + +func TestHistogram(t *testing.T) { + const numPoints = 1e6 + const maxBins = 3 + + h := New(maxBins) + for i := 0; i < numPoints; i++ { + f := rand.ExpFloat64() + h.Insert(f) + } + + bins := h.Bins() + if g := len(bins); g > maxBins { + t.Fatalf("got %d bins, wanted <= %d", g, maxBins) + } + + for _, b := range bins { + t.Logf("%+v", b) + } + + if g := count(h.Bins()); g != numPoints { + t.Fatalf("binned %d points, wanted %d", g, numPoints) + } +} + +func count(bins Bins) int { + binCounts := 0 + for _, b := range bins { + binCounts += b.Count + } + return binCounts +} diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/LICENSE juju-core-2.0.0/src/github.com/beorn7/perks/LICENSE --- juju-core-2.0~beta15/src/github.com/beorn7/perks/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/LICENSE 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/quantile/bench_test.go juju-core-2.0.0/src/github.com/beorn7/perks/quantile/bench_test.go --- juju-core-2.0~beta15/src/github.com/beorn7/perks/quantile/bench_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/quantile/bench_test.go 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,63 @@ +package quantile + +import ( + "testing" +) + +func BenchmarkInsertTargeted(b *testing.B) { + b.ReportAllocs() + + s := NewTargeted(Targets) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) { + s := NewTargeted(TargetsSmallEpsilon) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertBiased(b *testing.B) { + s := NewLowBiased(0.01) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) { + s := NewLowBiased(0.0001) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkQuery(b *testing.B) { + s := NewTargeted(Targets) + for i := float64(0); i < 1e6; i++ { + s.Insert(i) + } + b.ResetTimer() + n := float64(b.N) + for i := float64(0); i < n; i++ { + s.Query(i / n) + } +} + +func BenchmarkQuerySmallEpsilon(b *testing.B) { + s := NewTargeted(TargetsSmallEpsilon) + for i := float64(0); i < 1e6; i++ { + s.Insert(i) + } + b.ResetTimer() + n := float64(b.N) + for i := float64(0); i < n; i++ { + s.Query(i / n) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/quantile/exampledata.txt juju-core-2.0.0/src/github.com/beorn7/perks/quantile/exampledata.txt --- juju-core-2.0~beta15/src/github.com/beorn7/perks/quantile/exampledata.txt 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/quantile/exampledata.txt 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/quantile/example_test.go juju-core-2.0.0/src/github.com/beorn7/perks/quantile/example_test.go --- juju-core-2.0~beta15/src/github.com/beorn7/perks/quantile/example_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/quantile/example_test.go 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,121 @@ +// +build go1.1 + +package quantile_test + +import ( + "bufio" + "fmt" + "log" + "os" + "strconv" + "time" + + "github.com/beorn7/perks/quantile" +) + +func Example_simple() { + ch := make(chan float64) + go sendFloats(ch) + + // Compute the 50th, 90th, and 99th percentile. + q := quantile.NewTargeted(map[float64]float64{ + 0.50: 0.005, + 0.90: 0.001, + 0.99: 0.0001, + }) + for v := range ch { + q.Insert(v) + } + + fmt.Println("perc50:", q.Query(0.50)) + fmt.Println("perc90:", q.Query(0.90)) + fmt.Println("perc99:", q.Query(0.99)) + fmt.Println("count:", q.Count()) + // Output: + // perc50: 5 + // perc90: 16 + // perc99: 223 + // count: 2388 +} + +func Example_mergeMultipleStreams() { + // Scenario: + // We have multiple database shards. On each shard, there is a process + // collecting query response times from the database logs and inserting + // them into a Stream (created via NewTargeted(0.90)), much like the + // Simple example. These processes expose a network interface for us to + // ask them to serialize and send us the results of their + // Stream.Samples so we may Merge and Query them. + // + // NOTES: + // * These sample sets are small, allowing us to get them + // across the network much faster than sending the entire list of data + // points. + // + // * For this to work correctly, we must supply the same quantiles + // a priori the process collecting the samples supplied to NewTargeted, + // even if we do not plan to query them all here. + ch := make(chan quantile.Samples) + getDBQuerySamples(ch) + q := quantile.NewTargeted(map[float64]float64{0.90: 0.001}) + for samples := range ch { + q.Merge(samples) + } + fmt.Println("perc90:", q.Query(0.90)) +} + +func Example_window() { + // Scenario: We want the 90th, 95th, and 99th percentiles for each + // minute. + + ch := make(chan float64) + go sendStreamValues(ch) + + tick := time.NewTicker(1 * time.Minute) + q := quantile.NewTargeted(map[float64]float64{ + 0.90: 0.001, + 0.95: 0.0005, + 0.99: 0.0001, + }) + for { + select { + case t := <-tick.C: + flushToDB(t, q.Samples()) + q.Reset() + case v := <-ch: + q.Insert(v) + } + } +} + +func sendStreamValues(ch chan float64) { + // Use your imagination +} + +func flushToDB(t time.Time, samples quantile.Samples) { + // Use your imagination +} + +// This is a stub for the above example. In reality this would hit the remote +// servers via http or something like it. +func getDBQuerySamples(ch chan quantile.Samples) {} + +func sendFloats(ch chan<- float64) { + f, err := os.Open("exampledata.txt") + if err != nil { + log.Fatal(err) + } + sc := bufio.NewScanner(f) + for sc.Scan() { + b := sc.Bytes() + v, err := strconv.ParseFloat(string(b), 64) + if err != nil { + log.Fatal(err) + } + ch <- v + } + if sc.Err() != nil { + log.Fatal(sc.Err()) + } + close(ch) +} diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/quantile/stream.go juju-core-2.0.0/src/github.com/beorn7/perks/quantile/stream.go --- juju-core-2.0~beta15/src/github.com/beorn7/perks/quantile/stream.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/quantile/stream.go 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,292 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + Æ’ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(Æ’) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + Æ’ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(Æ’) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targets map[float64]float64) *Stream { + Æ’ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for quantile, epsilon := range targets { + if quantile*s.n <= r { + f = (2 * epsilon * r) / quantile + } else { + f = (2 * epsilon * (s.n - r)) / (1 - quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(Æ’) +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(Æ’ invariant) *Stream { + x := &stream{Æ’: Æ’} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(float64(l) * q) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + Æ’ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.Æ’(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.Æ’(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.Æ’(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/quantile/stream_test.go juju-core-2.0.0/src/github.com/beorn7/perks/quantile/stream_test.go --- juju-core-2.0~beta15/src/github.com/beorn7/perks/quantile/stream_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/quantile/stream_test.go 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,188 @@ +package quantile + +import ( + "math" + "math/rand" + "sort" + "testing" +) + +var ( + Targets = map[float64]float64{ + 0.01: 0.001, + 0.10: 0.01, + 0.50: 0.05, + 0.90: 0.01, + 0.99: 0.001, + } + TargetsSmallEpsilon = map[float64]float64{ + 0.01: 0.0001, + 0.10: 0.001, + 0.50: 0.005, + 0.90: 0.001, + 0.99: 0.0001, + } + LowQuantiles = []float64{0.01, 0.1, 0.5} + HighQuantiles = []float64{0.99, 0.9, 0.5} +) + +const RelativeEpsilon = 0.01 + +func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for quantile, epsilon := range Targets { + n := float64(len(a)) + k := int(quantile * n) + lower := int((quantile - epsilon) * n) + if lower < 1 { + lower = 1 + } + upper := int(math.Ceil((quantile + epsilon) * n)) + if upper > len(a) { + upper = len(a) + } + w, min, max := a[k-1], a[lower-1], a[upper-1] + if g := s.Query(quantile); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g) + } + } +} + +func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for _, qu := range LowQuantiles { + n := float64(len(a)) + k := int(qu * n) + + lowerRank := int((1 - RelativeEpsilon) * qu * n) + upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n)) + w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] + if g := s.Query(qu); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) + } + } +} + +func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for _, qu := range HighQuantiles { + n := float64(len(a)) + k := int(qu * n) + + lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n) + upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n)) + w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] + if g := s.Query(qu); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) + } + } +} + +func populateStream(s *Stream) []float64 { + a := make([]float64, 0, 1e5+100) + for i := 0; i < cap(a); i++ { + v := rand.NormFloat64() + // Add 5% asymmetric outliers. + if i%20 == 0 { + v = v*v + 1 + } + s.Insert(v) + a = append(a, v) + } + return a +} + +func TestTargetedQuery(t *testing.T) { + rand.Seed(42) + s := NewTargeted(Targets) + a := populateStream(s) + verifyPercsWithAbsoluteEpsilon(t, a, s) +} + +func TestLowBiasedQuery(t *testing.T) { + rand.Seed(42) + s := NewLowBiased(RelativeEpsilon) + a := populateStream(s) + verifyLowPercsWithRelativeEpsilon(t, a, s) +} + +func TestHighBiasedQuery(t *testing.T) { + rand.Seed(42) + s := NewHighBiased(RelativeEpsilon) + a := populateStream(s) + verifyHighPercsWithRelativeEpsilon(t, a, s) +} + +// BrokenTestTargetedMerge is broken, see Merge doc comment. +func BrokenTestTargetedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewTargeted(Targets) + s2 := NewTargeted(Targets) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyPercsWithAbsoluteEpsilon(t, a, s1) +} + +// BrokenTestLowBiasedMerge is broken, see Merge doc comment. +func BrokenTestLowBiasedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewLowBiased(RelativeEpsilon) + s2 := NewLowBiased(RelativeEpsilon) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyLowPercsWithRelativeEpsilon(t, a, s2) +} + +// BrokenTestHighBiasedMerge is broken, see Merge doc comment. +func BrokenTestHighBiasedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewHighBiased(RelativeEpsilon) + s2 := NewHighBiased(RelativeEpsilon) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyHighPercsWithRelativeEpsilon(t, a, s2) +} + +func TestUncompressed(t *testing.T) { + q := NewTargeted(Targets) + for i := 100; i > 0; i-- { + q.Insert(float64(i)) + } + if g := q.Count(); g != 100 { + t.Errorf("want count 100, got %d", g) + } + // Before compression, Query should have 100% accuracy. + for quantile := range Targets { + w := quantile * 100 + if g := q.Query(quantile); g != w { + t.Errorf("want %f, got %f", w, g) + } + } +} + +func TestUncompressedSamples(t *testing.T) { + q := NewTargeted(map[float64]float64{0.99: 0.001}) + for i := 1; i <= 100; i++ { + q.Insert(float64(i)) + } + if g := q.Samples().Len(); g != 100 { + t.Errorf("want count 100, got %d", g) + } +} + +func TestUncompressedOne(t *testing.T) { + q := NewTargeted(map[float64]float64{0.99: 0.01}) + q.Insert(3.14) + if g := q.Query(0.90); g != 3.14 { + t.Error("want PI, got", g) + } +} + +func TestDefaults(t *testing.T) { + if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 { + t.Errorf("want 0, got %f", g) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/README.md juju-core-2.0.0/src/github.com/beorn7/perks/README.md --- juju-core-2.0~beta15/src/github.com/beorn7/perks/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/README.md 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,31 @@ +# Perks for Go (golang.org) + +Perks contains the Go package quantile that computes approximate quantiles over +an unbounded data stream within low memory and CPU bounds. + +For more information and examples, see: +http://godoc.org/github.com/bmizerany/perks + +A very special thank you and shout out to Graham Cormode (Rutgers University), +Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and +Divesh Srivastava (AT&T Labs–Research) for their research and publication of +[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf) + +Thank you, also: +* Armon Dadgar (@armon) +* Andrew Gerrand (@nf) +* Brad Fitzpatrick (@bradfitz) +* Keith Rarick (@kr) + +FAQ: + +Q: Why not move the quantile package into the project root? +A: I want to add more packages to perks later. + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/topk/topk.go juju-core-2.0.0/src/github.com/beorn7/perks/topk/topk.go --- juju-core-2.0~beta15/src/github.com/beorn7/perks/topk/topk.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/topk/topk.go 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,90 @@ +package topk + +import ( + "sort" +) + +// http://www.cs.ucsb.edu/research/tech_reports/reports/2005-23.pdf + +type Element struct { + Value string + Count int +} + +type Samples []*Element + +func (sm Samples) Len() int { + return len(sm) +} + +func (sm Samples) Less(i, j int) bool { + return sm[i].Count < sm[j].Count +} + +func (sm Samples) Swap(i, j int) { + sm[i], sm[j] = sm[j], sm[i] +} + +type Stream struct { + k int + mon map[string]*Element + + // the minimum Element + min *Element +} + +func New(k int) *Stream { + s := new(Stream) + s.k = k + s.mon = make(map[string]*Element) + s.min = &Element{} + + // Track k+1 so that less frequenet items contended for that spot, + // resulting in k being more accurate. + return s +} + +func (s *Stream) Insert(x string) { + s.insert(&Element{x, 1}) +} + +func (s *Stream) Merge(sm Samples) { + for _, e := range sm { + s.insert(e) + } +} + +func (s *Stream) insert(in *Element) { + e := s.mon[in.Value] + if e != nil { + e.Count++ + } else { + if len(s.mon) < s.k+1 { + e = &Element{in.Value, in.Count} + s.mon[in.Value] = e + } else { + e = s.min + delete(s.mon, e.Value) + e.Value = in.Value + e.Count += in.Count + s.min = e + } + } + if e.Count < s.min.Count { + s.min = e + } +} + +func (s *Stream) Query() Samples { + var sm Samples + for _, e := range s.mon { + sm = append(sm, e) + } + sort.Sort(sort.Reverse(sm)) + + if len(sm) < s.k { + return sm + } + + return sm[:s.k] +} diff -Nru juju-core-2.0~beta15/src/github.com/beorn7/perks/topk/topk_test.go juju-core-2.0.0/src/github.com/beorn7/perks/topk/topk_test.go --- juju-core-2.0~beta15/src/github.com/beorn7/perks/topk/topk_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/beorn7/perks/topk/topk_test.go 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,57 @@ +package topk + +import ( + "fmt" + "math/rand" + "sort" + "testing" +) + +func TestTopK(t *testing.T) { + stream := New(10) + ss := []*Stream{New(10), New(10), New(10)} + m := make(map[string]int) + for _, s := range ss { + for i := 0; i < 1e6; i++ { + v := fmt.Sprintf("%x", int8(rand.ExpFloat64())) + s.Insert(v) + m[v]++ + } + stream.Merge(s.Query()) + } + + var sm Samples + for x, s := range m { + sm = append(sm, &Element{x, s}) + } + sort.Sort(sort.Reverse(sm)) + + g := stream.Query() + if len(g) != 10 { + t.Fatalf("got %d, want 10", len(g)) + } + for i, e := range g { + if sm[i].Value != e.Value { + t.Errorf("at %d: want %q, got %q", i, sm[i].Value, e.Value) + } + } +} + +func TestQuery(t *testing.T) { + queryTests := []struct { + value string + expected int + }{ + {"a", 1}, + {"b", 2}, + {"c", 2}, + } + + stream := New(2) + for _, tt := range queryTests { + stream.Insert(tt.value) + if n := len(stream.Query()); n != tt.expected { + t.Errorf("want %d, got %d", tt.expected, n) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/bmizerany/pat/.gitignore juju-core-2.0.0/src/github.com/bmizerany/pat/.gitignore --- juju-core-2.0~beta15/src/github.com/bmizerany/pat/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/bmizerany/pat/.gitignore 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,3 @@ +*.prof +*.out +example/example diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/claims.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/claims.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/claims.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/claims.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/cmd/jwt/app.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,245 @@ +// A useful example app. You can use this to debug your tokens on the command line. +// This is also a great place to look at how you might use this library. +// +// Example usage: +// The following will create and sign a token, then verify it and output the original claims. +// echo {\"foo\":\"bar\"} | bin/jwt -key test/sample_key -alg RS256 -sign - | bin/jwt -key test/sample_key.pub -verify - +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "strings" + + jwt "github.com/dgrijalva/jwt-go" +) + +var ( + // Options + flagAlg = flag.String("alg", "", "signing algorithm identifier") + flagKey = flag.String("key", "", "path to key file or '-' to read from stdin") + flagCompact = flag.Bool("compact", false, "output compact JSON") + flagDebug = flag.Bool("debug", false, "print out all kinds of debug data") + + // Modes - exactly one of these is required + flagSign = flag.String("sign", "", "path to claims object to sign or '-' to read from stdin") + flagVerify = flag.String("verify", "", "path to JWT token to verify or '-' to read from stdin") + flagShow = flag.String("show", "", "path to JWT file or '-' to read from stdin") +) + +func main() { + // Usage message if you ask for -help or if you mess up inputs. + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, " One of the following flags is required: sign, verify\n") + flag.PrintDefaults() + } + + // Parse command line options + flag.Parse() + + // Do the thing. If something goes wrong, print error to stderr + // and exit with a non-zero status code + if err := start(); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} + +// Figure out which thing to do and then do that +func start() error { + if *flagSign != "" { + return signToken() + } else if *flagVerify != "" { + return verifyToken() + } else if *flagShow != "" { + return showToken() + } else { + flag.Usage() + return fmt.Errorf("None of the required flags are present. What do you want me to do?") + } +} + +// Helper func: Read input from specified file or stdin +func loadData(p string) ([]byte, error) { + if p == "" { + return nil, fmt.Errorf("No path specified") + } + + var rdr io.Reader + if p == "-" { + rdr = os.Stdin + } else { + if f, err := os.Open(p); err == nil { + rdr = f + defer f.Close() + } else { + return nil, err + } + } + return ioutil.ReadAll(rdr) +} + +// Print a json object in accordance with the prophecy (or the command line options) +func printJSON(j interface{}) error { + var out []byte + var err error + + if *flagCompact == false { + out, err = json.MarshalIndent(j, "", " ") + } else { + out, err = json.Marshal(j) + } + + if err == nil { + fmt.Println(string(out)) + } + + return err +} + +// Verify a token and output the claims. This is a great example +// of how to verify and view a token. +func verifyToken() error { + // get the token + tokData, err := loadData(*flagVerify) + if err != nil { + return fmt.Errorf("Couldn't read token: %v", err) + } + + // trim possible whitespace from token + tokData = regexp.MustCompile(`\s*$`).ReplaceAll(tokData, []byte{}) + if *flagDebug { + fmt.Fprintf(os.Stderr, "Token len: %v bytes\n", len(tokData)) + } + + // Parse the token. Load the key from command line option + token, err := jwt.Parse(string(tokData), func(t *jwt.Token) (interface{}, error) { + data, err := loadData(*flagKey) + if err != nil { + return nil, err + } + if isEs() { + return jwt.ParseECPublicKeyFromPEM(data) + } + return data, nil + }) + + // Print some debug data + if *flagDebug && token != nil { + fmt.Fprintf(os.Stderr, "Header:\n%v\n", token.Header) + fmt.Fprintf(os.Stderr, "Claims:\n%v\n", token.Claims) + } + + // Print an error if we can't parse for some reason + if err != nil { + return fmt.Errorf("Couldn't parse token: %v", err) + } + + // Is token invalid? + if !token.Valid { + return fmt.Errorf("Token is invalid") + } + + // Print the token details + if err := printJSON(token.Claims); err != nil { + return fmt.Errorf("Failed to output claims: %v", err) + } + + return nil +} + +// Create, sign, and output a token. This is a great, simple example of +// how to use this library to create and sign a token. +func signToken() error { + // get the token data from command line arguments + tokData, err := loadData(*flagSign) + if err != nil { + return fmt.Errorf("Couldn't read token: %v", err) + } else if *flagDebug { + fmt.Fprintf(os.Stderr, "Token: %v bytes", len(tokData)) + } + + // parse the JSON of the claims + var claims jwt.MapClaims + if err := json.Unmarshal(tokData, &claims); err != nil { + return fmt.Errorf("Couldn't parse claims JSON: %v", err) + } + + // get the key + var key interface{} + key, err = loadData(*flagKey) + if err != nil { + return fmt.Errorf("Couldn't read key: %v", err) + } + + // get the signing alg + alg := jwt.GetSigningMethod(*flagAlg) + if alg == nil { + return fmt.Errorf("Couldn't find signing method: %v", *flagAlg) + } + + // create a new token + token := jwt.NewWithClaims(alg, claims) + + if isEs() { + if k, ok := key.([]byte); !ok { + return fmt.Errorf("Couldn't convert key data to key") + } else { + key, err = jwt.ParseECPrivateKeyFromPEM(k) + if err != nil { + return err + } + } + } + + if out, err := token.SignedString(key); err == nil { + fmt.Println(out) + } else { + return fmt.Errorf("Error signing token: %v", err) + } + + return nil +} + +// showToken pretty-prints the token on the command line. +func showToken() error { + // get the token + tokData, err := loadData(*flagShow) + if err != nil { + return fmt.Errorf("Couldn't read token: %v", err) + } + + // trim possible whitespace from token + tokData = regexp.MustCompile(`\s*$`).ReplaceAll(tokData, []byte{}) + if *flagDebug { + fmt.Fprintf(os.Stderr, "Token len: %v bytes\n", len(tokData)) + } + + token, err := jwt.Parse(string(tokData), nil) + if token == nil { + return fmt.Errorf("malformed token: %v", err) + } + + // Print the token details + fmt.Println("Header:") + if err := printJSON(token.Header); err != nil { + return fmt.Errorf("Failed to output header: %v", err) + } + + fmt.Println("Claims:") + if err := printJSON(token.Claims); err != nil { + return fmt.Errorf("Failed to output claims: %v", err) + } + + return nil +} + +func isEs() bool { + return strings.HasPrefix(*flagAlg, "ES") +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/cmd/jwt/README.md juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/cmd/jwt/README.md --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/cmd/jwt/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/cmd/jwt/README.md 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,13 @@ +`jwt` command-line tool +======================= + +This is a simple tool to sign, verify and show JSON Web Tokens from +the command line. + +The following will create and sign a token, then verify it and output the original claims: + + echo {\"foo\":\"bar\"} | bin/jwt -key test/sample_key -alg RS256 -sign - | bin/jwt -key test/sample_key.pub -verify - + +To simply display a token, use: + + echo $JWT | jwt -show - diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/doc.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/doc.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/doc.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/ecdsa.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/ecdsa.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/ecdsa.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/ecdsa.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,147 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/ecdsa_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/ecdsa_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/ecdsa_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/ecdsa_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,100 @@ +package jwt_test + +import ( + "crypto/ecdsa" + "io/ioutil" + "strings" + "testing" + + "github.com/dgrijalva/jwt-go" +) + +var ecdsaTestData = []struct { + name string + keys map[string]string + tokenString string + alg string + claims map[string]interface{} + valid bool +}{ + { + "Basic ES256", + map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, + "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiJ9.eyJmb28iOiJiYXIifQ.feG39E-bn8HXAKhzDZq7yEAPWYDhZlwTn3sePJnU9VrGMmwdXAIEyoOnrjreYlVM_Z4N13eK9-TmMTWyfKJtHQ", + "ES256", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic ES384", + map[string]string{"private": "test/ec384-private.pem", "public": "test/ec384-public.pem"}, + "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzM4NCJ9.eyJmb28iOiJiYXIifQ.ngAfKMbJUh0WWubSIYe5GMsA-aHNKwFbJk_wq3lq23aPp8H2anb1rRILIzVR0gUf4a8WzDtrzmiikuPWyCS6CN4-PwdgTk-5nehC7JXqlaBZU05p3toM3nWCwm_LXcld", + "ES384", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic ES512", + map[string]string{"private": "test/ec512-private.pem", "public": "test/ec512-public.pem"}, + "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzUxMiJ9.eyJmb28iOiJiYXIifQ.AAU0TvGQOcdg2OvrwY73NHKgfk26UDekh9Prz-L_iWuTBIBqOFCWwwLsRiHB1JOddfKAls5do1W0jR_F30JpVd-6AJeTjGKA4C1A1H6gIKwRY0o_tFDIydZCl_lMBMeG5VNFAjO86-WCSKwc3hqaGkq1MugPRq_qrF9AVbuEB4JPLyL5", + "ES512", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "basic ES256 invalid: foo => bar", + map[string]string{"private": "test/ec256-private.pem", "public": "test/ec256-public.pem"}, + "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.MEQCIHoSJnmGlPaVQDqacx_2XlXEhhqtWceVopjomc2PJLtdAiAUTeGPoNYxZw0z8mgOnnIcjoxRuNDVZvybRZF3wR1l8W", + "ES256", + map[string]interface{}{"foo": "bar"}, + false, + }, +} + +func TestECDSAVerify(t *testing.T) { + for _, data := range ecdsaTestData { + var err error + + key, _ := ioutil.ReadFile(data.keys["public"]) + + var ecdsaKey *ecdsa.PublicKey + if ecdsaKey, err = jwt.ParseECPublicKeyFromPEM(key); err != nil { + t.Errorf("Unable to parse ECDSA public key: %v", err) + } + + parts := strings.Split(data.tokenString, ".") + + method := jwt.GetSigningMethod(data.alg) + err = method.Verify(strings.Join(parts[0:2], "."), parts[2], ecdsaKey) + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying key: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid key passed validation", data.name) + } + } +} + +func TestECDSASign(t *testing.T) { + for _, data := range ecdsaTestData { + var err error + key, _ := ioutil.ReadFile(data.keys["private"]) + + var ecdsaKey *ecdsa.PrivateKey + if ecdsaKey, err = jwt.ParseECPrivateKeyFromPEM(key); err != nil { + t.Errorf("Unable to parse ECDSA private key: %v", err) + } + + if data.valid { + parts := strings.Split(data.tokenString, ".") + method := jwt.GetSigningMethod(data.alg) + sig, err := method.Sign(strings.Join(parts[0:2], "."), ecdsaKey) + if err != nil { + t.Errorf("[%v] Error signing token: %v", data.name, err) + } + if sig == parts[2] { + t.Errorf("[%v] Identical signatures\nbefore:\n%v\nafter:\n%v", data.name, parts[2], sig) + } + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/ecdsa_utils.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/errors.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/errors.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/errors.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/errors.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,63 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } + return e.Inner.Error() +} + +// No errors +func (e *ValidationError) valid() bool { + if e.Errors > 0 { + return false + } + return true +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/example_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/example_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/example_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/example_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,114 @@ +package jwt_test + +import ( + "fmt" + "github.com/dgrijalva/jwt-go" + "time" +) + +// Example (atypical) using the StandardClaims type by itself to parse a token. +// The StandardClaims type is designed to be embedded into your custom types +// to provide standard validation features. You can use it alone, but there's +// no way to retrieve other fields after parsing. +// See the CustomClaimsType example for intended usage. +func ExampleNewWithClaims_standardClaims() { + mySigningKey := []byte("AllYourBase") + + // Create the Claims + claims := &jwt.StandardClaims{ + ExpiresAt: 15000, + Issuer: "test", + } + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + ss, err := token.SignedString(mySigningKey) + fmt.Printf("%v %v", ss, err) + //Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.QsODzZu3lUZMVdhbO76u3Jv02iYCvEHcYVUI1kOWEU0 +} + +// Example creating a token using a custom claims type. The StandardClaim is embedded +// in the custom type to allow for easy encoding, parsing and validation of standard claims. +func ExampleNewWithClaims_customClaimsType() { + mySigningKey := []byte("AllYourBase") + + type MyCustomClaims struct { + Foo string `json:"foo"` + jwt.StandardClaims + } + + // Create the Claims + claims := MyCustomClaims{ + "bar", + jwt.StandardClaims{ + ExpiresAt: 15000, + Issuer: "test", + }, + } + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + ss, err := token.SignedString(mySigningKey) + fmt.Printf("%v %v", ss, err) + //Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c +} + +// Example creating a token using a custom claims type. The StandardClaim is embedded +// in the custom type to allow for easy encoding, parsing and validation of standard claims. +func ExampleParseWithClaims_customClaimsType() { + tokenString := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c" + + type MyCustomClaims struct { + Foo string `json:"foo"` + jwt.StandardClaims + } + + // sample token is expired. override time so it parses as valid + at(time.Unix(0, 0), func() { + token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, func(token *jwt.Token) (interface{}, error) { + return []byte("AllYourBase"), nil + }) + + if claims, ok := token.Claims.(*MyCustomClaims); ok && token.Valid { + fmt.Printf("%v %v", claims.Foo, claims.StandardClaims.ExpiresAt) + } else { + fmt.Println(err) + } + }) + + // Output: bar 15000 +} + +// Override time value for tests. Restore default value after. +func at(t time.Time, f func()) { + jwt.TimeFunc = func() time.Time { + return t + } + f() + jwt.TimeFunc = time.Now +} + +// An example of parsing the error types using bitfield checks +func ExampleParse_errorChecking() { + // Token from another example. This token is expired + var tokenString = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJleHAiOjE1MDAwLCJpc3MiOiJ0ZXN0In0.HE7fK0xOQwFEr4WDgRWj4teRPZ6i3GLwD5YCm6Pwu_c" + + token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { + return []byte("AllYourBase"), nil + }) + + if token.Valid { + fmt.Println("You look nice today") + } else if ve, ok := err.(*jwt.ValidationError); ok { + if ve.Errors&jwt.ValidationErrorMalformed != 0 { + fmt.Println("That's not even a token") + } else if ve.Errors&(jwt.ValidationErrorExpired|jwt.ValidationErrorNotValidYet) != 0 { + // Token is either expired or not active yet + fmt.Println("Timing is everything") + } else { + fmt.Println("Couldn't handle this token:", err) + } + } else { + fmt.Println("Couldn't handle this token:", err) + } + + // Output: Timing is everything +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/.gitignore juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/.gitignore --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/.gitignore 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,4 @@ +.DS_Store +bin + + diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/hmac_example_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/hmac_example_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/hmac_example_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/hmac_example_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,64 @@ +package jwt_test + +import ( + "fmt" + "github.com/dgrijalva/jwt-go" + "io/ioutil" + "time" +) + +// For HMAC signing method, the key can be any []byte. It is recommended to generate +// a key using crypto/rand or something equivalent. You need the same key for signing +// and validating. +var hmacSampleSecret []byte + +func init() { + // Load sample key data + if keyData, e := ioutil.ReadFile("test/hmacTestKey"); e == nil { + hmacSampleSecret = keyData + } else { + panic(e) + } +} + +// Example creating, signing, and encoding a JWT token using the HMAC signing method +func ExampleNew_hmac() { + // Create a new token object, specifying signing method and the claims + // you would like it to contain. + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "foo": "bar", + "nbf": time.Date(2015, 10, 10, 12, 0, 0, 0, time.UTC).Unix(), + }) + + // Sign and get the complete encoded token as a string using the secret + tokenString, err := token.SignedString(hmacSampleSecret) + + fmt.Println(tokenString, err) + // Output: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.u1riaD1rW97opCoAuRCTy4w58Br-Zk-bh7vLiRIsrpU +} + +// Example parsing and validating a token using the HMAC signing method +func ExampleParse_hmac() { + // sample token string taken from the New example + tokenString := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.u1riaD1rW97opCoAuRCTy4w58Br-Zk-bh7vLiRIsrpU" + + // Parse takes the token string and a function for looking up the key. The latter is especially + // useful if you use multiple keys for your application. The standard is to use 'kid' in the + // head of the token to identify which key to use, but the parsed token (head and claims) is provided + // to the callback, providing flexibility. + token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + return hmacSampleSecret, nil + }) + + if claims, ok := token.Claims.(jwt.MapClaims); ok && token.Valid { + fmt.Println(claims["foo"], claims["nbf"]) + } else { + fmt.Println(err) + } + + // Output: bar 1.4444784e+09 +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/hmac.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/hmac.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/hmac.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/hmac.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,94 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKey +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/hmac_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/hmac_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/hmac_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/hmac_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,91 @@ +package jwt_test + +import ( + "github.com/dgrijalva/jwt-go" + "io/ioutil" + "strings" + "testing" +) + +var hmacTestData = []struct { + name string + tokenString string + alg string + claims map[string]interface{} + valid bool +}{ + { + "web sample", + "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk", + "HS256", + map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, + true, + }, + { + "HS384", + "eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.KWZEuOD5lbBxZ34g7F-SlVLAQ_r5KApWNWlZIIMyQVz5Zs58a7XdNzj5_0EcNoOy", + "HS384", + map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, + true, + }, + { + "HS512", + "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjEuMzAwODE5MzhlKzA5LCJodHRwOi8vZXhhbXBsZS5jb20vaXNfcm9vdCI6dHJ1ZSwiaXNzIjoiam9lIn0.CN7YijRX6Aw1n2jyI2Id1w90ja-DEMYiWixhYCyHnrZ1VfJRaFQz1bEbjjA5Fn4CLYaUG432dEYmSbS4Saokmw", + "HS512", + map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, + true, + }, + { + "web sample: invalid", + "eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXo", + "HS256", + map[string]interface{}{"iss": "joe", "exp": 1300819380, "http://example.com/is_root": true}, + false, + }, +} + +// Sample data from http://tools.ietf.org/html/draft-jones-json-web-signature-04#appendix-A.1 +var hmacTestKey, _ = ioutil.ReadFile("test/hmacTestKey") + +func TestHMACVerify(t *testing.T) { + for _, data := range hmacTestData { + parts := strings.Split(data.tokenString, ".") + + method := jwt.GetSigningMethod(data.alg) + err := method.Verify(strings.Join(parts[0:2], "."), parts[2], hmacTestKey) + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying key: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid key passed validation", data.name) + } + } +} + +func TestHMACSign(t *testing.T) { + for _, data := range hmacTestData { + if data.valid { + parts := strings.Split(data.tokenString, ".") + method := jwt.GetSigningMethod(data.alg) + sig, err := method.Sign(strings.Join(parts[0:2], "."), hmacTestKey) + if err != nil { + t.Errorf("[%v] Error signing token: %v", data.name, err) + } + if sig != parts[2] { + t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) + } + } + } +} + +func BenchmarkHS256Signing(b *testing.B) { + benchmarkSigning(b, jwt.SigningMethodHS256, hmacTestKey) +} + +func BenchmarkHS384Signing(b *testing.B) { + benchmarkSigning(b, jwt.SigningMethodHS384, hmacTestKey) +} + +func BenchmarkHS512Signing(b *testing.B) { + benchmarkSigning(b, jwt.SigningMethodHS512, hmacTestKey) +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/http_example_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/http_example_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/http_example_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/http_example_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,216 @@ +package jwt_test + +// Example HTTP auth using asymmetric crypto/RSA keys +// This is based on a (now outdated) example at https://gist.github.com/cryptix/45c33ecf0ae54828e63b + +import ( + "bytes" + "crypto/rsa" + "fmt" + "github.com/dgrijalva/jwt-go" + "github.com/dgrijalva/jwt-go/request" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// location of the files used for signing and verification +const ( + privKeyPath = "test/sample_key" // openssl genrsa -out app.rsa keysize + pubKeyPath = "test/sample_key.pub" // openssl rsa -in app.rsa -pubout > app.rsa.pub +) + +var ( + verifyKey *rsa.PublicKey + signKey *rsa.PrivateKey + serverPort int + // storing sample username/password pairs + // don't do this on a real server + users = map[string]string{ + "test": "known", + } +) + +// read the key files before starting http handlers +func init() { + signBytes, err := ioutil.ReadFile(privKeyPath) + fatal(err) + + signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) + fatal(err) + + verifyBytes, err := ioutil.ReadFile(pubKeyPath) + fatal(err) + + verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) + fatal(err) + + http.HandleFunc("/authenticate", authHandler) + http.HandleFunc("/restricted", restrictedHandler) + + // Setup listener + listener, err := net.ListenTCP("tcp", &net.TCPAddr{}) + serverPort = listener.Addr().(*net.TCPAddr).Port + + log.Println("Listening...") + go func() { + fatal(http.Serve(listener, nil)) + }() +} + +var start func() + +func fatal(err error) { + if err != nil { + log.Fatal(err) + } +} + +// Define some custom types were going to use within our tokens +type CustomerInfo struct { + Name string + Kind string +} + +type CustomClaimsExample struct { + *jwt.StandardClaims + TokenType string + CustomerInfo +} + +func Example_getTokenViaHTTP() { + // See func authHandler for an example auth handler that produces a token + res, err := http.PostForm(fmt.Sprintf("http://localhost:%v/authenticate", serverPort), url.Values{ + "user": {"test"}, + "pass": {"known"}, + }) + if err != nil { + fatal(err) + } + + if res.StatusCode != 200 { + fmt.Println("Unexpected status code", res.StatusCode) + } + + // Read the token out of the response body + buf := new(bytes.Buffer) + io.Copy(buf, res.Body) + res.Body.Close() + tokenString := strings.TrimSpace(buf.String()) + + // Parse the token + token, err := jwt.ParseWithClaims(tokenString, &CustomClaimsExample{}, func(token *jwt.Token) (interface{}, error) { + // since we only use the one private key to sign the tokens, + // we also only use its public counter part to verify + return verifyKey, nil + }) + fatal(err) + + claims := token.Claims.(*CustomClaimsExample) + fmt.Println(claims.CustomerInfo.Name) + + //Output: test +} + +func Example_useTokenViaHTTP() { + + // Make a sample token + // In a real world situation, this token will have been acquired from + // some other API call (see Example_getTokenViaHTTP) + token, err := createToken("foo") + fatal(err) + + // Make request. See func restrictedHandler for example request processor + req, err := http.NewRequest("GET", fmt.Sprintf("http://localhost:%v/restricted", serverPort), nil) + fatal(err) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %v", token)) + res, err := http.DefaultClient.Do(req) + fatal(err) + + // Read the response body + buf := new(bytes.Buffer) + io.Copy(buf, res.Body) + res.Body.Close() + fmt.Println(buf.String()) + + // Output: Welcome, foo +} + +func createToken(user string) (string, error) { + // create a signer for rsa 256 + t := jwt.New(jwt.GetSigningMethod("RS256")) + + // set our claims + t.Claims = &CustomClaimsExample{ + &jwt.StandardClaims{ + // set the expire time + // see http://tools.ietf.org/html/draft-ietf-oauth-json-web-token-20#section-4.1.4 + ExpiresAt: time.Now().Add(time.Minute * 1).Unix(), + }, + "level1", + CustomerInfo{user, "human"}, + } + + // Creat token string + return t.SignedString(signKey) +} + +// reads the form values, checks them and creates the token +func authHandler(w http.ResponseWriter, r *http.Request) { + // make sure its post + if r.Method != "POST" { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintln(w, "No POST", r.Method) + return + } + + user := r.FormValue("user") + pass := r.FormValue("pass") + + log.Printf("Authenticate: user[%s] pass[%s]\n", user, pass) + + // check values + if user != "test" || pass != "known" { + w.WriteHeader(http.StatusForbidden) + fmt.Fprintln(w, "Wrong info") + return + } + + tokenString, err := createToken(user) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintln(w, "Sorry, error while Signing Token!") + log.Printf("Token Signing error: %v\n", err) + return + } + + w.Header().Set("Content-Type", "application/jwt") + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, tokenString) +} + +// only accessible with a valid token +func restrictedHandler(w http.ResponseWriter, r *http.Request) { + // Get token from request + token, err := request.ParseFromRequestWithClaims(r, request.OAuth2Extractor, &CustomClaimsExample{}, func(token *jwt.Token) (interface{}, error) { + // since we only use the one private key to sign the tokens, + // we also only use its public counter part to verify + return verifyKey, nil + }) + + // If the token is missing or invalid, return error + if err != nil { + w.WriteHeader(http.StatusUnauthorized) + fmt.Fprintln(w, "Invalid token:", err) + return + } + + // Token is valid + fmt.Fprintln(w, "Welcome,", token.Claims.(*CustomClaimsExample).Name) + return +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/LICENSE juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/LICENSE --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/LICENSE 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/map_claims.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/map_claims.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/map_claims.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/map_claims.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,96 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(tokenString, request.OAuth2Extractor, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/none.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/none.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/none.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/none.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/none_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/none_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/none_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/none_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,72 @@ +package jwt_test + +import ( + "github.com/dgrijalva/jwt-go" + "strings" + "testing" +) + +var noneTestData = []struct { + name string + tokenString string + alg string + key interface{} + claims map[string]interface{} + valid bool +}{ + { + "Basic", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.", + "none", + jwt.UnsafeAllowNoneSignatureType, + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic - no key", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.", + "none", + nil, + map[string]interface{}{"foo": "bar"}, + false, + }, + { + "Signed", + "eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.W-jEzRfBigtCWsinvVVuldiuilzVdU5ty0MvpLaSaqK9PlAWWlDQ1VIQ_qSKzwL5IXaZkvZFJXT3yL3n7OUVu7zCNJzdwznbC8Z-b0z2lYvcklJYi2VOFRcGbJtXUqgjk2oGsiqUMUMOLP70TTefkpsgqDxbRh9CDUfpOJgW-dU7cmgaoswe3wjUAUi6B6G2YEaiuXC0XScQYSYVKIzgKXJV8Zw-7AN_DBUI4GkTpsvQ9fVVjZM9csQiEXhYekyrKu1nu_POpQonGd8yqkIyXPECNmmqH5jH4sFiF67XhD7_JpkvLziBpI-uh86evBUadmHhb9Otqw3uV3NTaXLzJw", + "none", + jwt.UnsafeAllowNoneSignatureType, + map[string]interface{}{"foo": "bar"}, + false, + }, +} + +func TestNoneVerify(t *testing.T) { + for _, data := range noneTestData { + parts := strings.Split(data.tokenString, ".") + + method := jwt.GetSigningMethod(data.alg) + err := method.Verify(strings.Join(parts[0:2], "."), parts[2], data.key) + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying key: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid key passed validation", data.name) + } + } +} + +func TestNoneSign(t *testing.T) { + for _, data := range noneTestData { + if data.valid { + parts := strings.Split(data.tokenString, ".") + method := jwt.GetSigningMethod(data.alg) + sig, err := method.Sign(strings.Join(parts[0:2], "."), data.key) + if err != nil { + t.Errorf("[%v] Error signing token: %v", data.name, err) + } + if sig != parts[2] { + t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) + } + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/parser.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/parser.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/parser.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/parser.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,131 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + parts := strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + var err error + token := &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/parser_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/parser_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/parser_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/parser_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,261 @@ +package jwt_test + +import ( + "crypto/rsa" + "encoding/json" + "fmt" + "reflect" + "testing" + "time" + + "github.com/dgrijalva/jwt-go" + "github.com/dgrijalva/jwt-go/test" +) + +var keyFuncError error = fmt.Errorf("error loading key") + +var ( + jwtTestDefaultKey *rsa.PublicKey + defaultKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return jwtTestDefaultKey, nil } + emptyKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, nil } + errorKeyFunc jwt.Keyfunc = func(t *jwt.Token) (interface{}, error) { return nil, keyFuncError } + nilKeyFunc jwt.Keyfunc = nil +) + +func init() { + jwtTestDefaultKey = test.LoadRSAPublicKeyFromDisk("test/sample_key.pub") +} + +var jwtTestData = []struct { + name string + tokenString string + keyfunc jwt.Keyfunc + claims jwt.Claims + valid bool + errors uint32 + parser *jwt.Parser +}{ + { + "basic", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + defaultKeyFunc, + jwt.MapClaims{"foo": "bar"}, + true, + 0, + nil, + }, + { + "basic expired", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "exp": float64(time.Now().Unix() - 100)}, + false, + jwt.ValidationErrorExpired, + nil, + }, + { + "basic nbf", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "nbf": float64(time.Now().Unix() + 100)}, + false, + jwt.ValidationErrorNotValidYet, + nil, + }, + { + "expired and nbf", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "nbf": float64(time.Now().Unix() + 100), "exp": float64(time.Now().Unix() - 100)}, + false, + jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired, + nil, + }, + { + "basic invalid", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + defaultKeyFunc, + jwt.MapClaims{"foo": "bar"}, + false, + jwt.ValidationErrorSignatureInvalid, + nil, + }, + { + "basic nokeyfunc", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + nilKeyFunc, + jwt.MapClaims{"foo": "bar"}, + false, + jwt.ValidationErrorUnverifiable, + nil, + }, + { + "basic nokey", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + emptyKeyFunc, + jwt.MapClaims{"foo": "bar"}, + false, + jwt.ValidationErrorSignatureInvalid, + nil, + }, + { + "basic errorkey", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + errorKeyFunc, + jwt.MapClaims{"foo": "bar"}, + false, + jwt.ValidationErrorUnverifiable, + nil, + }, + { + "invalid signing method", + "", + defaultKeyFunc, + jwt.MapClaims{"foo": "bar"}, + false, + jwt.ValidationErrorSignatureInvalid, + &jwt.Parser{ValidMethods: []string{"HS256"}}, + }, + { + "valid signing method", + "", + defaultKeyFunc, + jwt.MapClaims{"foo": "bar"}, + true, + 0, + &jwt.Parser{ValidMethods: []string{"RS256", "HS256"}}, + }, + { + "JSON Number", + "", + defaultKeyFunc, + jwt.MapClaims{"foo": json.Number("123.4")}, + true, + 0, + &jwt.Parser{UseJSONNumber: true}, + }, + { + "Standard Claims", + "", + defaultKeyFunc, + &jwt.StandardClaims{ + ExpiresAt: time.Now().Add(time.Second * 10).Unix(), + }, + true, + 0, + &jwt.Parser{UseJSONNumber: true}, + }, + { + "JSON Number - basic expired", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "exp": json.Number(fmt.Sprintf("%v", time.Now().Unix()-100))}, + false, + jwt.ValidationErrorExpired, + &jwt.Parser{UseJSONNumber: true}, + }, + { + "JSON Number - basic nbf", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100))}, + false, + jwt.ValidationErrorNotValidYet, + &jwt.Parser{UseJSONNumber: true}, + }, + { + "JSON Number - expired and nbf", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100)), "exp": json.Number(fmt.Sprintf("%v", time.Now().Unix()-100))}, + false, + jwt.ValidationErrorNotValidYet | jwt.ValidationErrorExpired, + &jwt.Parser{UseJSONNumber: true}, + }, + { + "SkipClaimsValidation during token parsing", + "", // autogen + defaultKeyFunc, + jwt.MapClaims{"foo": "bar", "nbf": json.Number(fmt.Sprintf("%v", time.Now().Unix()+100))}, + true, + 0, + &jwt.Parser{UseJSONNumber: true, SkipClaimsValidation: true}, + }, +} + +func TestParser_Parse(t *testing.T) { + privateKey := test.LoadRSAPrivateKeyFromDisk("test/sample_key") + + // Iterate over test data set and run tests + for _, data := range jwtTestData { + // If the token string is blank, use helper function to generate string + if data.tokenString == "" { + data.tokenString = test.MakeSampleToken(data.claims, privateKey) + } + + // Parse the token + var token *jwt.Token + var err error + var parser = data.parser + if parser == nil { + parser = new(jwt.Parser) + } + // Figure out correct claims type + switch data.claims.(type) { + case jwt.MapClaims: + token, err = parser.ParseWithClaims(data.tokenString, jwt.MapClaims{}, data.keyfunc) + case *jwt.StandardClaims: + token, err = parser.ParseWithClaims(data.tokenString, &jwt.StandardClaims{}, data.keyfunc) + } + + // Verify result matches expectation + if !reflect.DeepEqual(data.claims, token.Claims) { + t.Errorf("[%v] Claims mismatch. Expecting: %v Got: %v", data.name, data.claims, token.Claims) + } + + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying token: %T:%v", data.name, err, err) + } + + if !data.valid && err == nil { + t.Errorf("[%v] Invalid token passed validation", data.name) + } + + if (err == nil && !token.Valid) || (err != nil && token.Valid) { + t.Errorf("[%v] Inconsistent behavior between returned error and token.Valid") + } + + if data.errors != 0 { + if err == nil { + t.Errorf("[%v] Expecting error. Didn't get one.", data.name) + } else { + + ve := err.(*jwt.ValidationError) + // compare the bitfield part of the error + if e := ve.Errors; e != data.errors { + t.Errorf("[%v] Errors don't match expectation. %v != %v", data.name, e, data.errors) + } + + if err.Error() == keyFuncError.Error() && ve.Inner != keyFuncError { + t.Errorf("[%v] Inner error does not match expectation. %v != %v", data.name, ve.Inner, keyFuncError) + } + } + } + if data.valid && token.Signature == "" { + t.Errorf("[%v] Signature is left unpopulated after parsing", data.name) + } + } +} + +// Helper method for benchmarking various methods +func benchmarkSigning(b *testing.B, method jwt.SigningMethod, key interface{}) { + t := jwt.New(method) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if _, err := t.SignedString(key); err != nil { + b.Fatal(err) + } + } + }) + +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/README.md juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/README.md --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/README.md 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,85 @@ +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) + +**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect. + + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation. diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/doc.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/doc.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/doc.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,7 @@ +// Utility package for extracting JWT tokens from +// HTTP requests. +// +// The main function is ParseFromRequest and it's WithClaims variant. +// See examples for how to use the various Extractor implementations +// or roll your own. +package request diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/extractor_example_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/extractor_example_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/extractor_example_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/extractor_example_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,32 @@ +package request + +import ( + "fmt" + "net/url" +) + +const ( + exampleTokenA = "A" +) + +func ExampleHeaderExtractor() { + req := makeExampleRequest("GET", "/", map[string]string{"Token": exampleTokenA}, nil) + tokenString, err := HeaderExtractor{"Token"}.ExtractToken(req) + if err == nil { + fmt.Println(tokenString) + } else { + fmt.Println(err) + } + //Output: A +} + +func ExampleArgumentExtractor() { + req := makeExampleRequest("GET", "/", nil, url.Values{"token": {extractorTestTokenA}}) + tokenString, err := ArgumentExtractor{"token"}.ExtractToken(req) + if err == nil { + fmt.Println(tokenString) + } else { + fmt.Println(err) + } + //Output: A +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/extractor.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/extractor.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/extractor.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/extractor.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,81 @@ +package request + +import ( + "errors" + "net/http" +) + +// Errors +var ( + ErrNoTokenInRequest = errors.New("no token present in request") +) + +// Interface for extracting a token from an HTTP request. +// The ExtractToken method should return a token string or an error. +// If no token is present, you must return ErrNoTokenInRequest. +type Extractor interface { + ExtractToken(*http.Request) (string, error) +} + +// Extractor for finding a token in a header. Looks at each specified +// header in order until there's a match +type HeaderExtractor []string + +func (e HeaderExtractor) ExtractToken(req *http.Request) (string, error) { + // loop over header names and return the first one that contains data + for _, header := range e { + if ah := req.Header.Get(header); ah != "" { + return ah, nil + } + } + return "", ErrNoTokenInRequest +} + +// Extract token from request arguments. This includes a POSTed form or +// GET URL arguments. Argument names are tried in order until there's a match. +// This extractor calls `ParseMultipartForm` on the request +type ArgumentExtractor []string + +func (e ArgumentExtractor) ExtractToken(req *http.Request) (string, error) { + // Make sure form is parsed + req.ParseMultipartForm(10e6) + + // loop over arg names and return the first one that contains data + for _, arg := range e { + if ah := req.Form.Get(arg); ah != "" { + return ah, nil + } + } + + return "", ErrNoTokenInRequest +} + +// Tries Extractors in order until one returns a token string or an error occurs +type MultiExtractor []Extractor + +func (e MultiExtractor) ExtractToken(req *http.Request) (string, error) { + // loop over header names and return the first one that contains data + for _, extractor := range e { + if tok, err := extractor.ExtractToken(req); tok != "" { + return tok, nil + } else if err != ErrNoTokenInRequest { + return "", err + } + } + return "", ErrNoTokenInRequest +} + +// Wrap an Extractor in this to post-process the value before it's handed off. +// See AuthorizationHeaderExtractor for an example +type PostExtractionFilter struct { + Extractor + Filter func(string) (string, error) +} + +func (e *PostExtractionFilter) ExtractToken(req *http.Request) (string, error) { + if tok, err := e.Extractor.ExtractToken(req); tok != "" { + return e.Filter(tok) + } else { + return "", err + } +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/extractor_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/extractor_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/extractor_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/extractor_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,91 @@ +package request + +import ( + "fmt" + "net/http" + "net/url" + "testing" +) + +var extractorTestTokenA = "A" +var extractorTestTokenB = "B" + +var extractorTestData = []struct { + name string + extractor Extractor + headers map[string]string + query url.Values + token string + err error +}{ + { + name: "simple header", + extractor: HeaderExtractor{"Foo"}, + headers: map[string]string{"Foo": extractorTestTokenA}, + query: nil, + token: extractorTestTokenA, + err: nil, + }, + { + name: "simple argument", + extractor: ArgumentExtractor{"token"}, + headers: map[string]string{}, + query: url.Values{"token": {extractorTestTokenA}}, + token: extractorTestTokenA, + err: nil, + }, + { + name: "multiple extractors", + extractor: MultiExtractor{ + HeaderExtractor{"Foo"}, + ArgumentExtractor{"token"}, + }, + headers: map[string]string{"Foo": extractorTestTokenA}, + query: url.Values{"token": {extractorTestTokenB}}, + token: extractorTestTokenA, + err: nil, + }, + { + name: "simple miss", + extractor: HeaderExtractor{"This-Header-Is-Not-Set"}, + headers: map[string]string{"Foo": extractorTestTokenA}, + query: nil, + token: "", + err: ErrNoTokenInRequest, + }, + { + name: "filter", + extractor: AuthorizationHeaderExtractor, + headers: map[string]string{"Authorization": "Bearer " + extractorTestTokenA}, + query: nil, + token: extractorTestTokenA, + err: nil, + }, +} + +func TestExtractor(t *testing.T) { + // Bearer token request + for _, data := range extractorTestData { + // Make request from test struct + r := makeExampleRequest("GET", "/", data.headers, data.query) + + // Test extractor + token, err := data.extractor.ExtractToken(r) + if token != data.token { + t.Errorf("[%v] Expected token '%v'. Got '%v'", data.name, data.token, token) + continue + } + if err != data.err { + t.Errorf("[%v] Expected error '%v'. Got '%v'", data.name, data.err, err) + continue + } + } +} + +func makeExampleRequest(method, path string, headers map[string]string, urlArgs url.Values) *http.Request { + r, _ := http.NewRequest(method, fmt.Sprintf("%v?%v", path, urlArgs.Encode()), nil) + for k, v := range headers { + r.Header.Set(k, v) + } + return r +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/oauth2.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/oauth2.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/oauth2.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/oauth2.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,28 @@ +package request + +import ( + "strings" +) + +// Strips 'Bearer ' prefix from bearer token string +func stripBearerPrefixFromTokenString(tok string) (string, error) { + // Should be a bearer token + if len(tok) > 6 && strings.ToUpper(tok[0:7]) == "BEARER " { + return tok[7:], nil + } + return tok, nil +} + +// Extract bearer token from Authorization header +// Uses PostExtractionFilter to strip "Bearer " prefix from header +var AuthorizationHeaderExtractor = &PostExtractionFilter{ + HeaderExtractor{"Authorization"}, + stripBearerPrefixFromTokenString, +} + +// Extractor for OAuth2 access tokens. Looks in 'Authorization' +// header then 'access_token' argument for a token. +var OAuth2Extractor = &MultiExtractor{ + AuthorizationHeaderExtractor, + ArgumentExtractor{"access_token"}, +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/request.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/request.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/request.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/request.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,24 @@ +package request + +import ( + "github.com/dgrijalva/jwt-go" + "net/http" +) + +// Extract and parse a JWT token from an HTTP request. +// This behaves the same as Parse, but accepts a request and an extractor +// instead of a token string. The Extractor interface allows you to define +// the logic for extracting a token. Several useful implementations are provided. +func ParseFromRequest(req *http.Request, extractor Extractor, keyFunc jwt.Keyfunc) (token *jwt.Token, err error) { + return ParseFromRequestWithClaims(req, extractor, jwt.MapClaims{}, keyFunc) +} + +// ParseFromRequest but with custom Claims type +func ParseFromRequestWithClaims(req *http.Request, extractor Extractor, claims jwt.Claims, keyFunc jwt.Keyfunc) (token *jwt.Token, err error) { + // Extract token from request + if tokStr, err := extractor.ExtractToken(req); err == nil { + return jwt.ParseWithClaims(tokStr, claims, keyFunc) + } else { + return nil, err + } +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/request_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/request_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/request/request_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/request/request_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,103 @@ +package request + +import ( + "fmt" + "github.com/dgrijalva/jwt-go" + "github.com/dgrijalva/jwt-go/test" + "net/http" + "net/url" + "reflect" + "strings" + "testing" +) + +var requestTestData = []struct { + name string + claims jwt.MapClaims + extractor Extractor + headers map[string]string + query url.Values + valid bool +}{ + { + "authorization bearer token", + jwt.MapClaims{"foo": "bar"}, + AuthorizationHeaderExtractor, + map[string]string{"Authorization": "Bearer %v"}, + url.Values{}, + true, + }, + { + "oauth bearer token - header", + jwt.MapClaims{"foo": "bar"}, + OAuth2Extractor, + map[string]string{"Authorization": "Bearer %v"}, + url.Values{}, + true, + }, + { + "oauth bearer token - url", + jwt.MapClaims{"foo": "bar"}, + OAuth2Extractor, + map[string]string{}, + url.Values{"access_token": {"%v"}}, + true, + }, + { + "url token", + jwt.MapClaims{"foo": "bar"}, + ArgumentExtractor{"token"}, + map[string]string{}, + url.Values{"token": {"%v"}}, + true, + }, +} + +func TestParseRequest(t *testing.T) { + // load keys from disk + privateKey := test.LoadRSAPrivateKeyFromDisk("../test/sample_key") + publicKey := test.LoadRSAPublicKeyFromDisk("../test/sample_key.pub") + keyfunc := func(*jwt.Token) (interface{}, error) { + return publicKey, nil + } + + // Bearer token request + for _, data := range requestTestData { + // Make token from claims + tokenString := test.MakeSampleToken(data.claims, privateKey) + + // Make query string + for k, vv := range data.query { + for i, v := range vv { + if strings.Contains(v, "%v") { + data.query[k][i] = fmt.Sprintf(v, tokenString) + } + } + } + + // Make request from test struct + r, _ := http.NewRequest("GET", fmt.Sprintf("/?%v", data.query.Encode()), nil) + for k, v := range data.headers { + if strings.Contains(v, "%v") { + r.Header.Set(k, fmt.Sprintf(v, tokenString)) + } else { + r.Header.Set(k, tokenString) + } + } + token, err := ParseFromRequestWithClaims(r, data.extractor, jwt.MapClaims{}, keyfunc) + + if token == nil { + t.Errorf("[%v] Token was not found: %v", data.name, err) + continue + } + if !reflect.DeepEqual(data.claims, token.Claims) { + t.Errorf("[%v] Claims mismatch. Expecting: %v Got: %v", data.name, data.claims, token.Claims) + } + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying token: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid token passed validation", data.name) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/rsa.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/rsa.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/rsa.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/rsa.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,100 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/rsa_pss.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/rsa_pss.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/rsa_pss.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/rsa_pss.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/rsa_pss_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/rsa_pss_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/rsa_pss_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/rsa_pss_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,96 @@ +// +build go1.4 + +package jwt_test + +import ( + "crypto/rsa" + "io/ioutil" + "strings" + "testing" + + "github.com/dgrijalva/jwt-go" +) + +var rsaPSSTestData = []struct { + name string + tokenString string + alg string + claims map[string]interface{} + valid bool +}{ + { + "Basic PS256", + "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9w", + "PS256", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic PS384", + "eyJhbGciOiJQUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.w7-qqgj97gK4fJsq_DCqdYQiylJjzWONvD0qWWWhqEOFk2P1eDULPnqHRnjgTXoO4HAw4YIWCsZPet7nR3Xxq4ZhMqvKW8b7KlfRTb9cH8zqFvzMmybQ4jv2hKc3bXYqVow3AoR7hN_CWXI3Dv6Kd2X5xhtxRHI6IL39oTVDUQ74LACe-9t4c3QRPuj6Pq1H4FAT2E2kW_0KOc6EQhCLWEhm2Z2__OZskDC8AiPpP8Kv4k2vB7l0IKQu8Pr4RcNBlqJdq8dA5D3hk5TLxP8V5nG1Ib80MOMMqoS3FQvSLyolFX-R_jZ3-zfq6Ebsqr0yEb0AH2CfsECF7935Pa0FKQ", + "PS384", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic PS512", + "eyJhbGciOiJQUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.GX1HWGzFaJevuSLavqqFYaW8_TpvcjQ8KfC5fXiSDzSiT9UD9nB_ikSmDNyDILNdtjZLSvVKfXxZJqCfefxAtiozEDDdJthZ-F0uO4SPFHlGiXszvKeodh7BuTWRI2wL9-ZO4mFa8nq3GMeQAfo9cx11i7nfN8n2YNQ9SHGovG7_T_AvaMZB_jT6jkDHpwGR9mz7x1sycckEo6teLdHRnH_ZdlHlxqknmyTu8Odr5Xh0sJFOL8BepWbbvIIn-P161rRHHiDWFv6nhlHwZnVzjx7HQrWSGb6-s2cdLie9QL_8XaMcUpjLkfOMKkDOfHo6AvpL7Jbwi83Z2ZTHjJWB-A", + "PS512", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "basic PS256 invalid: foo => bar", + "eyJhbGciOiJQUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.PPG4xyDVY8ffp4CcxofNmsTDXsrVG2npdQuibLhJbv4ClyPTUtR5giNSvuxo03kB6I8VXVr0Y9X7UxhJVEoJOmULAwRWaUsDnIewQa101cVhMa6iR8X37kfFoiZ6NkS-c7henVkkQWu2HtotkEtQvN5hFlk8IevXXPmvZlhQhwzB1sGzGYnoi1zOfuL98d3BIjUjtlwii5w6gYG2AEEzp7HnHCsb3jIwUPdq86Oe6hIFjtBwduIK90ca4UqzARpcfwxHwVLMpatKask00AgGVI0ysdk0BLMjmLutquD03XbThHScC2C2_Pp4cHWgMzvbgLU2RYYZcZRKr46QeNgz9W", + "PS256", + map[string]interface{}{"foo": "bar"}, + false, + }, +} + +func TestRSAPSSVerify(t *testing.T) { + var err error + + key, _ := ioutil.ReadFile("test/sample_key.pub") + var rsaPSSKey *rsa.PublicKey + if rsaPSSKey, err = jwt.ParseRSAPublicKeyFromPEM(key); err != nil { + t.Errorf("Unable to parse RSA public key: %v", err) + } + + for _, data := range rsaPSSTestData { + parts := strings.Split(data.tokenString, ".") + + method := jwt.GetSigningMethod(data.alg) + err := method.Verify(strings.Join(parts[0:2], "."), parts[2], rsaPSSKey) + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying key: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid key passed validation", data.name) + } + } +} + +func TestRSAPSSSign(t *testing.T) { + var err error + + key, _ := ioutil.ReadFile("test/sample_key") + var rsaPSSKey *rsa.PrivateKey + if rsaPSSKey, err = jwt.ParseRSAPrivateKeyFromPEM(key); err != nil { + t.Errorf("Unable to parse RSA private key: %v", err) + } + + for _, data := range rsaPSSTestData { + if data.valid { + parts := strings.Split(data.tokenString, ".") + method := jwt.GetSigningMethod(data.alg) + sig, err := method.Sign(strings.Join(parts[0:2], "."), rsaPSSKey) + if err != nil { + t.Errorf("[%v] Error signing token: %v", data.name, err) + } + if sig == parts[2] { + t.Errorf("[%v] Signatures shouldn't match\nnew:\n%v\noriginal:\n%v", data.name, sig, parts[2]) + } + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/rsa_test.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/rsa_test.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/rsa_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/rsa_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,176 @@ +package jwt_test + +import ( + "github.com/dgrijalva/jwt-go" + "io/ioutil" + "strings" + "testing" +) + +var rsaTestData = []struct { + name string + tokenString string + alg string + claims map[string]interface{} + valid bool +}{ + { + "Basic RS256", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.FhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + "RS256", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic RS384", + "eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.W-jEzRfBigtCWsinvVVuldiuilzVdU5ty0MvpLaSaqK9PlAWWlDQ1VIQ_qSKzwL5IXaZkvZFJXT3yL3n7OUVu7zCNJzdwznbC8Z-b0z2lYvcklJYi2VOFRcGbJtXUqgjk2oGsiqUMUMOLP70TTefkpsgqDxbRh9CDUfpOJgW-dU7cmgaoswe3wjUAUi6B6G2YEaiuXC0XScQYSYVKIzgKXJV8Zw-7AN_DBUI4GkTpsvQ9fVVjZM9csQiEXhYekyrKu1nu_POpQonGd8yqkIyXPECNmmqH5jH4sFiF67XhD7_JpkvLziBpI-uh86evBUadmHhb9Otqw3uV3NTaXLzJw", + "RS384", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "Basic RS512", + "eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIifQ.zBlLlmRrUxx4SJPUbV37Q1joRcI9EW13grnKduK3wtYKmDXbgDpF1cZ6B-2Jsm5RB8REmMiLpGms-EjXhgnyh2TSHE-9W2gA_jvshegLWtwRVDX40ODSkTb7OVuaWgiy9y7llvcknFBTIg-FnVPVpXMmeV_pvwQyhaz1SSwSPrDyxEmksz1hq7YONXhXPpGaNbMMeDTNP_1oj8DZaqTIL9TwV8_1wb2Odt_Fy58Ke2RVFijsOLdnyEAjt2n9Mxihu9i3PhNBkkxa2GbnXBfq3kzvZ_xxGGopLdHhJjcGWXO-NiwI9_tiu14NRv4L2xC0ItD9Yz68v2ZIZEp_DuzwRQ", + "RS512", + map[string]interface{}{"foo": "bar"}, + true, + }, + { + "basic invalid: foo => bar", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJmb28iOiJiYXIifQ.EhkiHkoESI_cG3NPigFrxEk9Z60_oXrOT2vGm9Pn6RDgYNovYORQmmA0zs1AoAOf09ly2Nx2YAg6ABqAYga1AcMFkJljwxTT5fYphTuqpWdy4BELeSYJx5Ty2gmr8e7RonuUztrdD5WfPqLKMm1Ozp_T6zALpRmwTIW0QPnaBXaQD90FplAg46Iy1UlDKr-Eupy0i5SLch5Q-p2ZpaL_5fnTIUDlxC3pWhJTyx_71qDI-mAA_5lE_VdroOeflG56sSmDxopPEG3bFlSu1eowyBfxtu0_CuVd-M42RU75Zc4Gsj6uV77MBtbMrf4_7M_NUTSgoIF3fRqxrj0NzihIBg", + "RS256", + map[string]interface{}{"foo": "bar"}, + false, + }, +} + +func TestRSAVerify(t *testing.T) { + keyData, _ := ioutil.ReadFile("test/sample_key.pub") + key, _ := jwt.ParseRSAPublicKeyFromPEM(keyData) + + for _, data := range rsaTestData { + parts := strings.Split(data.tokenString, ".") + + method := jwt.GetSigningMethod(data.alg) + err := method.Verify(strings.Join(parts[0:2], "."), parts[2], key) + if data.valid && err != nil { + t.Errorf("[%v] Error while verifying key: %v", data.name, err) + } + if !data.valid && err == nil { + t.Errorf("[%v] Invalid key passed validation", data.name) + } + } +} + +func TestRSASign(t *testing.T) { + keyData, _ := ioutil.ReadFile("test/sample_key") + key, _ := jwt.ParseRSAPrivateKeyFromPEM(keyData) + + for _, data := range rsaTestData { + if data.valid { + parts := strings.Split(data.tokenString, ".") + method := jwt.GetSigningMethod(data.alg) + sig, err := method.Sign(strings.Join(parts[0:2], "."), key) + if err != nil { + t.Errorf("[%v] Error signing token: %v", data.name, err) + } + if sig != parts[2] { + t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", data.name, sig, parts[2]) + } + } + } +} + +func TestRSAVerifyWithPreParsedPrivateKey(t *testing.T) { + key, _ := ioutil.ReadFile("test/sample_key.pub") + parsedKey, err := jwt.ParseRSAPublicKeyFromPEM(key) + if err != nil { + t.Fatal(err) + } + testData := rsaTestData[0] + parts := strings.Split(testData.tokenString, ".") + err = jwt.SigningMethodRS256.Verify(strings.Join(parts[0:2], "."), parts[2], parsedKey) + if err != nil { + t.Errorf("[%v] Error while verifying key: %v", testData.name, err) + } +} + +func TestRSAWithPreParsedPrivateKey(t *testing.T) { + key, _ := ioutil.ReadFile("test/sample_key") + parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) + if err != nil { + t.Fatal(err) + } + testData := rsaTestData[0] + parts := strings.Split(testData.tokenString, ".") + sig, err := jwt.SigningMethodRS256.Sign(strings.Join(parts[0:2], "."), parsedKey) + if err != nil { + t.Errorf("[%v] Error signing token: %v", testData.name, err) + } + if sig != parts[2] { + t.Errorf("[%v] Incorrect signature.\nwas:\n%v\nexpecting:\n%v", testData.name, sig, parts[2]) + } +} + +func TestRSAKeyParsing(t *testing.T) { + key, _ := ioutil.ReadFile("test/sample_key") + pubKey, _ := ioutil.ReadFile("test/sample_key.pub") + badKey := []byte("All your base are belong to key") + + // Test parsePrivateKey + if _, e := jwt.ParseRSAPrivateKeyFromPEM(key); e != nil { + t.Errorf("Failed to parse valid private key: %v", e) + } + + if k, e := jwt.ParseRSAPrivateKeyFromPEM(pubKey); e == nil { + t.Errorf("Parsed public key as valid private key: %v", k) + } + + if k, e := jwt.ParseRSAPrivateKeyFromPEM(badKey); e == nil { + t.Errorf("Parsed invalid key as valid private key: %v", k) + } + + // Test parsePublicKey + if _, e := jwt.ParseRSAPublicKeyFromPEM(pubKey); e != nil { + t.Errorf("Failed to parse valid public key: %v", e) + } + + if k, e := jwt.ParseRSAPublicKeyFromPEM(key); e == nil { + t.Errorf("Parsed private key as valid public key: %v", k) + } + + if k, e := jwt.ParseRSAPublicKeyFromPEM(badKey); e == nil { + t.Errorf("Parsed invalid key as valid private key: %v", k) + } + +} + +func BenchmarkRS256Signing(b *testing.B) { + key, _ := ioutil.ReadFile("test/sample_key") + parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) + if err != nil { + b.Fatal(err) + } + + benchmarkSigning(b, jwt.SigningMethodRS256, parsedKey) +} + +func BenchmarkRS384Signing(b *testing.B) { + key, _ := ioutil.ReadFile("test/sample_key") + parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) + if err != nil { + b.Fatal(err) + } + + benchmarkSigning(b, jwt.SigningMethodRS384, parsedKey) +} + +func BenchmarkRS512Signing(b *testing.B) { + key, _ := ioutil.ReadFile("test/sample_key") + parsedKey, err := jwt.ParseRSAPrivateKeyFromPEM(key) + if err != nil { + b.Fatal(err) + } + + benchmarkSigning(b, jwt.SigningMethodRS512, parsedKey) +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/rsa_utils.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/rsa_utils.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/rsa_utils.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/rsa_utils.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/signing_method.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/signing_method.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/signing_method.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/signing_method.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec256-private.pem 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIAh5qA3rmqQQuu0vbKV/+zouz/y/Iy2pLpIcWUSyImSwoAoGCCqGSM49 +AwEHoUQDQgAEYD54V/vp+54P9DXarYqx4MPcm+HKRIQzNasYSoRQHQ/6S6Ps8tpM +cT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== +-----END EC PRIVATE KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec256-public.pem 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,4 @@ +-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEYD54V/vp+54P9DXarYqx4MPcm+HK +RIQzNasYSoRQHQ/6S6Ps8tpMcT+KvIIC8W/e9k0W7Cm72M1P9jU7SLf/vg== +-----END PUBLIC KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec384-private.pem 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,6 @@ +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDCaCvMHKhcG/qT7xsNLYnDT7sE/D+TtWIol1ROdaK1a564vx5pHbsRy +SEKcIxISi1igBwYFK4EEACKhZANiAATYa7rJaU7feLMqrAx6adZFNQOpaUH/Uylb +ZLriOLON5YFVwtVUpO1FfEXZUIQpptRPtc5ixIPY658yhBSb6irfIJUSP9aYTflJ +GKk/mDkK4t8mWBzhiD5B6jg9cEGhGgA= +-----END EC PRIVATE KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec384-public.pem 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,5 @@ +-----BEGIN PUBLIC KEY----- +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE2Gu6yWlO33izKqwMemnWRTUDqWlB/1Mp +W2S64jizjeWBVcLVVKTtRXxF2VCEKabUT7XOYsSD2OufMoQUm+oq3yCVEj/WmE35 +SRipP5g5CuLfJlgc4Yg+Qeo4PXBBoRoA +-----END PUBLIC KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec512-private.pem 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIB0pE4uFaWRx7t03BsYlYvF1YvKaBGyvoakxnodm9ou0R9wC+sJAjH +QZZJikOg4SwNqgQ/hyrOuDK2oAVHhgVGcYmgBwYFK4EEACOhgYkDgYYABAAJXIuw +12MUzpHggia9POBFYXSxaOGKGbMjIyDI+6q7wi7LMw3HgbaOmgIqFG72o8JBQwYN +4IbXHf+f86CRY1AA2wHzbHvt6IhkCXTNxBEffa1yMUgu8n9cKKF2iLgyQKcKqW33 +8fGOw/n3Rm2Yd/EB56u2rnD29qS+nOM9eGS+gy39OQ== +-----END EC PRIVATE KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/ec512-public.pem 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,6 @@ +-----BEGIN PUBLIC KEY----- +MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQACVyLsNdjFM6R4IImvTzgRWF0sWjh +ihmzIyMgyPuqu8IuyzMNx4G2jpoCKhRu9qPCQUMGDeCG1x3/n/OgkWNQANsB82x7 +7eiIZAl0zcQRH32tcjFILvJ/XCihdoi4MkCnCqlt9/HxjsP590ZtmHfxAeertq5w +9vakvpzjPXhkvoMt/Tk= +-----END PUBLIC KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/helpers.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/helpers.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/helpers.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/helpers.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,42 @@ +package test + +import ( + "crypto/rsa" + "github.com/dgrijalva/jwt-go" + "io/ioutil" +) + +func LoadRSAPrivateKeyFromDisk(location string) *rsa.PrivateKey { + keyData, e := ioutil.ReadFile(location) + if e != nil { + panic(e.Error()) + } + key, e := jwt.ParseRSAPrivateKeyFromPEM(keyData) + if e != nil { + panic(e.Error()) + } + return key +} + +func LoadRSAPublicKeyFromDisk(location string) *rsa.PublicKey { + keyData, e := ioutil.ReadFile(location) + if e != nil { + panic(e.Error()) + } + key, e := jwt.ParseRSAPublicKeyFromPEM(keyData) + if e != nil { + panic(e.Error()) + } + return key +} + +func MakeSampleToken(c jwt.Claims, key interface{}) string { + token := jwt.NewWithClaims(jwt.SigningMethodRS256, c) + s, e := token.SignedString(key) + + if e != nil { + panic(e.Error()) + } + + return s +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/hmacTestKey juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/hmacTestKey --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/hmacTestKey 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/hmacTestKey 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1 @@ +#5K+¥¼ƒ~ew{¦Z³(æðTÉ(©„²ÒP.¿ÓûZ’ÒGï–Š´Ãwb="=.!r.OÀÍšõgЀ£ \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/sample_key juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/sample_key --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/sample_key 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/sample_key 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA4f5wg5l2hKsTeNem/V41fGnJm6gOdrj8ym3rFkEU/wT8RDtn +SgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7mCpz9Er5qLaMXJwZxzHzAahlfA0i +cqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBpHssPnpYGIn20ZZuNlX2BrClciHhC +PUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2XrHhR+1DcKJzQBSTAGnpYVaqpsAR +ap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3bODIRe1AuTyHceAbewn8b462yEWKA +Rdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy7wIDAQABAoIBAQCwia1k7+2oZ2d3 +n6agCAbqIE1QXfCmh41ZqJHbOY3oRQG3X1wpcGH4Gk+O+zDVTV2JszdcOt7E5dAy +MaomETAhRxB7hlIOnEN7WKm+dGNrKRvV0wDU5ReFMRHg31/Lnu8c+5BvGjZX+ky9 +POIhFFYJqwCRlopGSUIxmVj5rSgtzk3iWOQXr+ah1bjEXvlxDOWkHN6YfpV5ThdE +KdBIPGEVqa63r9n2h+qazKrtiRqJqGnOrHzOECYbRFYhexsNFz7YT02xdfSHn7gM +IvabDDP/Qp0PjE1jdouiMaFHYnLBbgvlnZW9yuVf/rpXTUq/njxIXMmvmEyyvSDn +FcFikB8pAoGBAPF77hK4m3/rdGT7X8a/gwvZ2R121aBcdPwEaUhvj/36dx596zvY +mEOjrWfZhF083/nYWE2kVquj2wjs+otCLfifEEgXcVPTnEOPO9Zg3uNSL0nNQghj +FuD3iGLTUBCtM66oTe0jLSslHe8gLGEQqyMzHOzYxNqibxcOZIe8Qt0NAoGBAO+U +I5+XWjWEgDmvyC3TrOSf/KCGjtu0TSv30ipv27bDLMrpvPmD/5lpptTFwcxvVhCs +2b+chCjlghFSWFbBULBrfci2FtliClOVMYrlNBdUSJhf3aYSG2Doe6Bgt1n2CpNn +/iu37Y3NfemZBJA7hNl4dYe+f+uzM87cdQ214+jrAoGAXA0XxX8ll2+ToOLJsaNT +OvNB9h9Uc5qK5X5w+7G7O998BN2PC/MWp8H+2fVqpXgNENpNXttkRm1hk1dych86 +EunfdPuqsX+as44oCyJGFHVBnWpm33eWQw9YqANRI+pCJzP08I5WK3osnPiwshd+ +hR54yjgfYhBFNI7B95PmEQkCgYBzFSz7h1+s34Ycr8SvxsOBWxymG5zaCsUbPsL0 +4aCgLScCHb9J+E86aVbbVFdglYa5Id7DPTL61ixhl7WZjujspeXZGSbmq0Kcnckb +mDgqkLECiOJW2NHP/j0McAkDLL4tysF8TLDO8gvuvzNC+WQ6drO2ThrypLVZQ+ry +eBIPmwKBgEZxhqa0gVvHQG/7Od69KWj4eJP28kq13RhKay8JOoN0vPmspXJo1HY3 +CKuHRG+AP579dncdUnOMvfXOtkdM4vk0+hWASBQzM9xzVcztCa+koAugjVaLS9A+ +9uQoqEeVNTckxx0S2bYevRy7hGQmUJTyQm3j1zEUR5jpdbL83Fbq +-----END RSA PRIVATE KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/sample_key.pub juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/sample_key.pub --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/test/sample_key.pub 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/test/sample_key.pub 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4f5wg5l2hKsTeNem/V41 +fGnJm6gOdrj8ym3rFkEU/wT8RDtnSgFEZOQpHEgQ7JL38xUfU0Y3g6aYw9QT0hJ7 +mCpz9Er5qLaMXJwZxzHzAahlfA0icqabvJOMvQtzD6uQv6wPEyZtDTWiQi9AXwBp +HssPnpYGIn20ZZuNlX2BrClciHhCPUIIZOQn/MmqTD31jSyjoQoV7MhhMTATKJx2 +XrHhR+1DcKJzQBSTAGnpYVaqpsARap+nwRipr3nUTuxyGohBTSmjJ2usSeQXHI3b +ODIRe1AuTyHceAbewn8b462yEWKARdpd9AjQW5SIVPfdsz5B6GlYQ5LdYKtznTuy +7wIDAQAB +-----END PUBLIC KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/token.go juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/token.go --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/token.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/token.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/.travis.yml juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/.travis.yml --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/.travis.yml 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,8 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip diff -Nru juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md --- juju-core-2.0~beta15/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,105 @@ +## `jwt-go` Version History + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/dustin/go-humanize/.gitignore juju-core-2.0.0/src/github.com/dustin/go-humanize/.gitignore --- juju-core-2.0~beta15/src/github.com/dustin/go-humanize/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/dustin/go-humanize/.gitignore 2016-10-13 14:31:52.000000000 +0000 @@ -0,0 +1,6 @@ +#* +*.[568] +*.a +*~ +[568].out +_* diff -Nru juju-core-2.0~beta15/src/github.com/gabriel-samfira/sys/.gitignore juju-core-2.0.0/src/github.com/gabriel-samfira/sys/.gitignore --- juju-core-2.0~beta15/src/github.com/gabriel-samfira/sys/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/gabriel-samfira/sys/.gitignore 2016-10-13 14:32:29.000000000 +0000 @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/AUTHORS juju-core-2.0.0/src/github.com/golang/protobuf/AUTHORS --- juju-core-2.0~beta15/src/github.com/golang/protobuf/AUTHORS 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/AUTHORS 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/CONTRIBUTORS juju-core-2.0.0/src/github.com/golang/protobuf/CONTRIBUTORS --- juju-core-2.0~beta15/src/github.com/golang/protobuf/CONTRIBUTORS 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/CONTRIBUTORS 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/.gitignore juju-core-2.0.0/src/github.com/golang/protobuf/.gitignore --- juju-core-2.0~beta15/src/github.com/golang/protobuf/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/.gitignore 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,17 @@ +.DS_Store +*.[568ao] +*.pb.go +*.ao +*.so +*.pyc +._* +.nfs.* +[568a].out +*~ +*.orig +core +_obj +_test +_testmain.go +compiler/protoc-gen-go +compiler/testdata/extension_test diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/LICENSE juju-core-2.0.0/src/github.com/golang/protobuf/LICENSE --- juju-core-2.0~beta15/src/github.com/golang/protobuf/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/LICENSE 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/Makefile juju-core-2.0.0/src/github.com/golang/protobuf/Makefile --- juju-core-2.0~beta15/src/github.com/golang/protobuf/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/Makefile 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,52 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +all: install + +install: + go install ./proto + go install ./protoc-gen-go + +test: + go test ./proto + make -C protoc-gen-go/testdata test + +clean: + go clean ./... + +nuke: + go clean -i ./... + +regenerate: + make -C protoc-gen-go/descriptor regenerate + make -C protoc-gen-go/plugin regenerate + make -C proto/testdata regenerate diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/Make.protobuf juju-core-2.0.0/src/github.com/golang/protobuf/Make.protobuf --- juju-core-2.0~beta15/src/github.com/golang/protobuf/Make.protobuf 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/Make.protobuf 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,40 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Includable Makefile to add a rule for generating .pb.go files from .proto files +# (Google protocol buffer descriptions). +# Typical use if myproto.proto is a file in package mypackage in this directory: +# +# include $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf + +%.pb.go: %.proto + protoc --go_out=. $< + diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/all_test.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/all_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/all_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/all_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,2083 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "reflect" + "runtime/debug" + "strings" + "testing" + "time" + + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" +) + +var globalO *Buffer + +func old() *Buffer { + if globalO == nil { + globalO = NewBuffer(nil) + } + globalO.Reset() + return globalO +} + +func equalbytes(b1, b2 []byte, t *testing.T) { + if len(b1) != len(b2) { + t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) + return + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) + } + } +} + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} + +func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { + data := b.Bytes() + ld := len(data) + ls := len(s) / 2 + + fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) + + // find the interesting spot - n + n := ls + if ld < ls { + n = ld + } + j := 0 + for i := 0; i < n; i++ { + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + if data[i] == bs { + continue + } + n = i + break + } + l := n - 10 + if l < 0 { + l = 0 + } + h := n + 10 + + // find the interesting spot - n + fmt.Printf("is[%d]:", l) + for i := l; i < h; i++ { + if i >= ld { + fmt.Printf(" --") + continue + } + fmt.Printf(" %.2x", data[i]) + } + fmt.Printf("\n") + + fmt.Printf("sb[%d]:", l) + for i := l; i < h; i++ { + if i >= ls { + fmt.Printf(" --") + continue + } + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + fmt.Printf(" %.2x", bs) + } + fmt.Printf("\n") + + t.Fail() + + // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) + // Print the output in a partially-decoded format; can + // be helpful when updating the test. It produces the output + // that is pasted, with minor edits, into the argument to verify(). + // data := b.Bytes() + // nesting := 0 + // for b.Len() > 0 { + // start := len(data) - b.Len() + // var u uint64 + // u, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // wire := u & 0x7 + // tag := u >> 3 + // switch wire { + // case WireVarint: + // v, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed32: + // v, err := DecodeFixed32(b) + // if err != nil { + // fmt.Printf("decode error on fixed32:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed64: + // v, err := DecodeFixed64(b) + // if err != nil { + // fmt.Printf("decode error on fixed64:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireBytes: + // nb, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // after_tag := len(data) - b.Len() + // str := make([]byte, nb) + // _, err = b.Read(str) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", + // data[start:after_tag], str, tag, wire) + // case WireStartGroup: + // nesting++ + // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // case WireEndGroup: + // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // nesting-- + // default: + // fmt.Printf("unrecognized wire type %d\n", wire) + // return + // } + // } +} + +func hex(c uint8) uint8 { + if '0' <= c && c <= '9' { + return c - '0' + } + if 'a' <= c && c <= 'f' { + return 10 + c - 'a' + } + if 'A' <= c && c <= 'F' { + return 10 + c - 'A' + } + return 0 +} + +func equal(b []byte, s string, t *testing.T) bool { + if 2*len(b) != len(s) { + // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) + fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) + return false + } + for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { + x := hex(s[j])*16 + hex(s[j+1]) + if b[i] != x { + // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) + fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) + return false + } + } + return true +} + +func overify(t *testing.T, pb *GoTest, expected string) { + o := old() + err := o.Marshal(pb) + if err != nil { + fmt.Printf("overify marshal-1 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 1", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = o.Unmarshal(pbd) + if err != nil { + t.Fatalf("overify unmarshal err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + o.Reset() + err = o.Marshal(pbd) + if err != nil { + t.Errorf("overify marshal-2 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 2", o.Bytes()) + t.Fatalf("string = %s", expected) + } +} + +// Simple tests for numeric encode/decode primitives (varint, etc.) +func TestNumericPrimitives(t *testing.T) { + for i := uint64(0); i < 1e6; i += 111 { + o := old() + if o.EncodeVarint(i) != nil { + t.Error("EncodeVarint") + break + } + x, e := o.DecodeVarint() + if e != nil { + t.Fatal("DecodeVarint") + } + if x != i { + t.Fatal("varint decode fail:", i, x) + } + + o = old() + if o.EncodeFixed32(i) != nil { + t.Fatal("encFixed32") + } + x, e = o.DecodeFixed32() + if e != nil { + t.Fatal("decFixed32") + } + if x != i { + t.Fatal("fixed32 decode fail:", i, x) + } + + o = old() + if o.EncodeFixed64(i*1234567) != nil { + t.Error("encFixed64") + break + } + x, e = o.DecodeFixed64() + if e != nil { + t.Error("decFixed64") + break + } + if x != i*1234567 { + t.Error("fixed64 decode fail:", i*1234567, x) + break + } + + o = old() + i32 := int32(i - 12345) + if o.EncodeZigzag32(uint64(i32)) != nil { + t.Fatal("EncodeZigzag32") + } + x, e = o.DecodeZigzag32() + if e != nil { + t.Fatal("DecodeZigzag32") + } + if x != uint64(uint32(i32)) { + t.Fatal("zigzag32 decode fail:", i32, x) + } + + o = old() + i64 := int64(i - 12345) + if o.EncodeZigzag64(uint64(i64)) != nil { + t.Fatal("EncodeZigzag64") + } + x, e = o.DecodeZigzag64() + if e != nil { + t.Fatal("DecodeZigzag64") + } + if x != uint64(i64) { + t.Fatal("zigzag64 decode fail:", i64, x) + } + } +} + +// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. +type fakeMarshaler struct { + b []byte + err error +} + +func (f fakeMarshaler) Marshal() ([]byte, error) { + return f.b, f.err +} + +func (f fakeMarshaler) String() string { + return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) +} + +func (f fakeMarshaler) ProtoMessage() {} + +func (f fakeMarshaler) Reset() {} + +// Simple tests for proto messages that implement the Marshaler interface. +func TestMarshalerEncoding(t *testing.T) { + tests := []struct { + name string + m Message + want []byte + wantErr error + }{ + { + name: "Marshaler that fails", + m: fakeMarshaler{ + err: errors.New("some marshal err"), + b: []byte{5, 6, 7}, + }, + // Since there's an error, nothing should be written to buffer. + want: nil, + wantErr: errors.New("some marshal err"), + }, + { + name: "Marshaler that succeeds", + m: fakeMarshaler{ + b: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + want: []byte{0, 1, 2, 3, 4, 127, 255}, + wantErr: nil, + }, + } + for _, test := range tests { + b := NewBuffer(nil) + err := b.Marshal(test.m) + if !reflect.DeepEqual(test.wantErr, err) { + t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) + } + if !reflect.DeepEqual(test.want, b.Bytes()) { + t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) + } + } +} + +// Simple tests for bytes +func TestBytesPrimitives(t *testing.T) { + o := old() + bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} + if o.EncodeRawBytes(bytes) != nil { + t.Error("EncodeRawBytes") + } + decb, e := o.DecodeRawBytes(false) + if e != nil { + t.Error("DecodeRawBytes") + } + equalbytes(bytes, decb, t) +} + +// Simple tests for strings +func TestStringPrimitives(t *testing.T) { + o := old() + s := "now is the time" + if o.EncodeStringBytes(s) != nil { + t.Error("enc_string") + } + decs, e := o.DecodeStringBytes() + if e != nil { + t.Error("dec_string") + } + if s != decs { + t.Error("string encode/decode fail:", s, decs) + } +} + +// Do we catch the "required bit not set" case? +func TestRequiredBit(t *testing.T) { + o := old() + pb := new(GoTest) + err := o.Marshal(pb) + if err == nil { + t.Error("did not catch missing required fields") + } else if strings.Index(err.Error(), "Kind") < 0 { + t.Error("wrong error type:", err) + } +} + +// Check that all fields are nil. +// Clearly silly, and a residue from a more interesting test with an earlier, +// different initialization property, but it once caught a compiler bug so +// it lives. +func checkInitialized(pb *GoTest, t *testing.T) { + if pb.F_BoolDefaulted != nil { + t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) + } + if pb.F_Int32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) + } + if pb.F_Int64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) + } + if pb.F_Fixed32Defaulted != nil { + t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) + } + if pb.F_Fixed64Defaulted != nil { + t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) + } + if pb.F_Uint32Defaulted != nil { + t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) + } + if pb.F_Uint64Defaulted != nil { + t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) + } + if pb.F_FloatDefaulted != nil { + t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) + } + if pb.F_DoubleDefaulted != nil { + t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) + } + if pb.F_StringDefaulted != nil { + t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) + } + if pb.F_BytesDefaulted != nil { + t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) + } + if pb.F_Sint32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) + } + if pb.F_Sint64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) + } +} + +// Does Reset() reset? +func TestReset(t *testing.T) { + pb := initGoTest(true) + // muck with some values + pb.F_BoolDefaulted = Bool(false) + pb.F_Int32Defaulted = Int32(237) + pb.F_Int64Defaulted = Int64(12346) + pb.F_Fixed32Defaulted = Uint32(32000) + pb.F_Fixed64Defaulted = Uint64(666) + pb.F_Uint32Defaulted = Uint32(323232) + pb.F_Uint64Defaulted = nil + pb.F_FloatDefaulted = nil + pb.F_DoubleDefaulted = Float64(0) + pb.F_StringDefaulted = String("gotcha") + pb.F_BytesDefaulted = []byte("asdfasdf") + pb.F_Sint32Defaulted = Int32(123) + pb.F_Sint64Defaulted = Int64(789) + pb.Reset() + checkInitialized(pb, t) +} + +// All required fields set, no defaults provided. +func TestEncodeDecode1(t *testing.T) { + pb := initGoTest(false) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 0x20 + "714000000000000000"+ // field 14, encoding 1, value 0x40 + "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 + "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" + "b304"+ // field 70, encoding 3, start group + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // field 70, encoding 4, end group + "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f") // field 103, encoding 0, 0x7f zigzag64 +} + +// All required fields set, defaults provided. +func TestEncodeDecode2(t *testing.T) { + pb := initGoTest(true) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All default fields set to their default value by hand +func TestEncodeDecode3(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolDefaulted = Bool(true) + pb.F_Int32Defaulted = Int32(32) + pb.F_Int64Defaulted = Int64(64) + pb.F_Fixed32Defaulted = Uint32(320) + pb.F_Fixed64Defaulted = Uint64(640) + pb.F_Uint32Defaulted = Uint32(3200) + pb.F_Uint64Defaulted = Uint64(6400) + pb.F_FloatDefaulted = Float32(314159) + pb.F_DoubleDefaulted = Float64(271828) + pb.F_StringDefaulted = String("hello, \"world!\"\n") + pb.F_BytesDefaulted = []byte("Bignose") + pb.F_Sint32Defaulted = Int32(-32) + pb.F_Sint64Defaulted = Int64(-64) + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all non-defaulted optional fields have values. +func TestEncodeDecode4(t *testing.T) { + pb := initGoTest(true) + pb.Table = String("hello") + pb.Param = Int32(7) + pb.OptionalField = initGoTestField() + pb.F_BoolOptional = Bool(true) + pb.F_Int32Optional = Int32(32) + pb.F_Int64Optional = Int64(64) + pb.F_Fixed32Optional = Uint32(3232) + pb.F_Fixed64Optional = Uint64(6464) + pb.F_Uint32Optional = Uint32(323232) + pb.F_Uint64Optional = Uint64(646464) + pb.F_FloatOptional = Float32(32.) + pb.F_DoubleOptional = Float64(64.) + pb.F_StringOptional = String("hello") + pb.F_BytesOptional = []byte("Bignose") + pb.F_Sint32Optional = Int32(-32) + pb.F_Sint64Optional = Int64(-64) + pb.Optionalgroup = initGoTest_OptionalGroup() + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" + "1807"+ // field 3, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "f00101"+ // field 30, encoding 0, value 1 + "f80120"+ // field 31, encoding 0, value 32 + "800240"+ // field 32, encoding 0, value 64 + "8d02a00c0000"+ // field 33, encoding 5, value 3232 + "91024019000000000000"+ // field 34, encoding 1, value 6464 + "9802a0dd13"+ // field 35, encoding 0, value 323232 + "a002c0ba27"+ // field 36, encoding 0, value 646464 + "ad0200000042"+ // field 37, encoding 5, value 32.0 + "b1020000000000005040"+ // field 38, encoding 1, value 64.0 + "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "d305"+ // start group field 90 level 1 + "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" + "d405"+ // end group field 90 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" + "f0123f"+ // field 302, encoding 0, value 63 + "f8127f"+ // field 303, encoding 0, value 127 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestEncodeDecode5(t *testing.T) { + pb := initGoTest(true) + pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} + pb.F_BoolRepeated = []bool{false, true} + pb.F_Int32Repeated = []int32{32, 33} + pb.F_Int64Repeated = []int64{64, 65} + pb.F_Fixed32Repeated = []uint32{3232, 3333} + pb.F_Fixed64Repeated = []uint64{6464, 6565} + pb.F_Uint32Repeated = []uint32{323232, 333333} + pb.F_Uint64Repeated = []uint64{646464, 656565} + pb.F_FloatRepeated = []float32{32., 33.} + pb.F_DoubleRepeated = []float64{64., 65.} + pb.F_StringRepeated = []string{"hello", "sailor"} + pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} + pb.F_Sint32Repeated = []int32{32, -32} + pb.F_Sint64Repeated = []int64{64, -64} + pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "a00100"+ // field 20, encoding 0, value 0 + "a00101"+ // field 20, encoding 0, value 1 + "a80120"+ // field 21, encoding 0, value 32 + "a80121"+ // field 21, encoding 0, value 33 + "b00140"+ // field 22, encoding 0, value 64 + "b00141"+ // field 22, encoding 0, value 65 + "bd01a00c0000"+ // field 23, encoding 5, value 3232 + "bd01050d0000"+ // field 23, encoding 5, value 3333 + "c1014019000000000000"+ // field 24, encoding 1, value 6464 + "c101a519000000000000"+ // field 24, encoding 1, value 6565 + "c801a0dd13"+ // field 25, encoding 0, value 323232 + "c80195ac14"+ // field 25, encoding 0, value 333333 + "d001c0ba27"+ // field 26, encoding 0, value 646464 + "d001b58928"+ // field 26, encoding 0, value 656565 + "dd0100000042"+ // field 27, encoding 5, value 32.0 + "dd0100000442"+ // field 27, encoding 5, value 33.0 + "e1010000000000005040"+ // field 28, encoding 1, value 64.0 + "e1010000000000405040"+ // field 28, encoding 1, value 65.0 + "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" + "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ca0c03"+"626967"+ // field 201, encoding 2, string "big" + "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" + "d00c40"+ // field 202, encoding 0, value 32 + "d00c3f"+ // field 202, encoding 0, value -32 + "d80c8001"+ // field 203, encoding 0, value 64 + "d80c7f"+ // field 203, encoding 0, value -64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, all packed repeated fields given two values. +func TestEncodeDecode6(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolRepeatedPacked = []bool{false, true} + pb.F_Int32RepeatedPacked = []int32{32, 33} + pb.F_Int64RepeatedPacked = []int64{64, 65} + pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} + pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} + pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} + pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} + pb.F_FloatRepeatedPacked = []float32{32., 33.} + pb.F_DoubleRepeatedPacked = []float64{64., 65.} + pb.F_Sint32RepeatedPacked = []int32{32, -32} + pb.F_Sint64RepeatedPacked = []int64{64, -64} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 + "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 + "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 + "aa0308"+ // field 53, encoding 2, 8 bytes + "a00c0000050d0000"+ // value 3232, value 3333 + "b20310"+ // field 54, encoding 2, 16 bytes + "4019000000000000a519000000000000"+ // value 6464, value 6565 + "ba0306"+ // field 55, encoding 2, 6 bytes + "a0dd1395ac14"+ // value 323232, value 333333 + "c20306"+ // field 56, encoding 2, 6 bytes + "c0ba27b58928"+ // value 646464, value 656565 + "ca0308"+ // field 57, encoding 2, 8 bytes + "0000004200000442"+ // value 32.0, value 33.0 + "d20310"+ // field 58, encoding 2, 16 bytes + "00000000000050400000000000405040"+ // value 64.0, value 65.0 + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "b21f02"+ // field 502, encoding 2, 2 bytes + "403f"+ // value 32, value -32 + "ba1f03"+ // field 503, encoding 2, 3 bytes + "80017f") // value 64, value -64 +} + +// Test that we can encode empty bytes fields. +func TestEncodeDecodeBytes1(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRequired = []byte{} + pb.F_BytesRepeated = [][]byte{{}} + pb.F_BytesOptional = []byte{} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { + t.Error("required empty bytes field is incorrect") + } + if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { + t.Error("repeated empty bytes field is incorrect") + } + if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { + t.Error("optional empty bytes field is incorrect") + } +} + +// Test that we encode nil-valued fields of a repeated bytes field correctly. +// Since entries in a repeated field cannot be nil, nil must mean empty value. +func TestEncodeDecodeBytes2(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRepeated = [][]byte{nil} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { + t.Error("Unexpected value for repeated bytes field") + } +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestSkippingUnrecognizedFields(t *testing.T) { + o := old() + pb := initGoTestField() + + // Marshal it normally. + o.Marshal(pb) + + // Now new a GoSkipTest record. + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + // Marshal it into same buffer. + o.Marshal(skip) + + pbd := new(GoTestField) + o.Unmarshal(pbd) + + // The __unrecognized field should be a marshaling of GoSkipTest + skipd := new(GoSkipTest) + + o.SetBuf(pbd.XXX_unrecognized) + o.Unmarshal(skipd) + + if *skipd.SkipInt32 != *skip.SkipInt32 { + t.Error("skip int32", skipd.SkipInt32) + } + if *skipd.SkipFixed32 != *skip.SkipFixed32 { + t.Error("skip fixed32", skipd.SkipFixed32) + } + if *skipd.SkipFixed64 != *skip.SkipFixed64 { + t.Error("skip fixed64", skipd.SkipFixed64) + } + if *skipd.SkipString != *skip.SkipString { + t.Error("skip string", *skipd.SkipString) + } + if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { + t.Error("skip group int32", skipd.Skipgroup.GroupInt32) + } + if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { + t.Error("skip group string", *skipd.Skipgroup.GroupString) + } +} + +// Check that unrecognized fields of a submessage are preserved. +func TestSubmessageUnrecognizedFields(t *testing.T) { + nm := &NewMessage{ + Nested: &NewMessage_Nested{ + Name: String("Nigel"), + FoodGroup: String("carbs"), + }, + } + b, err := Marshal(nm) + if err != nil { + t.Fatalf("Marshal of NewMessage: %v", err) + } + + // Unmarshal into an OldMessage. + om := new(OldMessage) + if err := Unmarshal(b, om); err != nil { + t.Fatalf("Unmarshal to OldMessage: %v", err) + } + exp := &OldMessage{ + Nested: &OldMessage_Nested{ + Name: String("Nigel"), + // normal protocol buffer users should not do this + XXX_unrecognized: []byte("\x12\x05carbs"), + }, + } + if !Equal(om, exp) { + t.Errorf("om = %v, want %v", om, exp) + } + + // Clone the OldMessage. + om = Clone(om).(*OldMessage) + if !Equal(om, exp) { + t.Errorf("Clone(om) = %v, want %v", om, exp) + } + + // Marshal the OldMessage, then unmarshal it into an empty NewMessage. + if b, err = Marshal(om); err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + t.Logf("Marshal(%v) -> %q", om, b) + nm2 := new(NewMessage) + if err := Unmarshal(b, nm2); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + if !Equal(nm, nm2) { + t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) + } +} + +// Check that an int32 field can be upgraded to an int64 field. +func TestNegativeInt32(t *testing.T) { + om := &OldMessage{ + Num: Int32(-1), + } + b, err := Marshal(om) + if err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + + // Check the size. It should be 11 bytes; + // 1 for the field/wire type, and 10 for the negative number. + if len(b) != 11 { + t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) + } + + // Unmarshal into a NewMessage. + nm := new(NewMessage) + if err := Unmarshal(b, nm); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + want := &NewMessage{ + Num: Int64(-1), + } + if !Equal(nm, want) { + t.Errorf("nm = %v, want %v", nm, want) + } +} + +// Check that we can grow an array (repeated field) to have many elements. +// This test doesn't depend only on our encoding; for variety, it makes sure +// we create, encode, and decode the correct contents explicitly. It's therefore +// a bit messier. +// This test also uses (and hence tests) the Marshal/Unmarshal functions +// instead of the methods. +func TestBigRepeated(t *testing.T) { + pb := initGoTest(true) + + // Create the arrays + const N = 50 // Internally the library starts much smaller. + pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) + pb.F_Sint64Repeated = make([]int64, N) + pb.F_Sint32Repeated = make([]int32, N) + pb.F_BytesRepeated = make([][]byte, N) + pb.F_StringRepeated = make([]string, N) + pb.F_DoubleRepeated = make([]float64, N) + pb.F_FloatRepeated = make([]float32, N) + pb.F_Uint64Repeated = make([]uint64, N) + pb.F_Uint32Repeated = make([]uint32, N) + pb.F_Fixed64Repeated = make([]uint64, N) + pb.F_Fixed32Repeated = make([]uint32, N) + pb.F_Int64Repeated = make([]int64, N) + pb.F_Int32Repeated = make([]int32, N) + pb.F_BoolRepeated = make([]bool, N) + pb.RepeatedField = make([]*GoTestField, N) + + // Fill in the arrays with checkable values. + igtf := initGoTestField() + igtrg := initGoTest_RepeatedGroup() + for i := 0; i < N; i++ { + pb.Repeatedgroup[i] = igtrg + pb.F_Sint64Repeated[i] = int64(i) + pb.F_Sint32Repeated[i] = int32(i) + s := fmt.Sprint(i) + pb.F_BytesRepeated[i] = []byte(s) + pb.F_StringRepeated[i] = s + pb.F_DoubleRepeated[i] = float64(i) + pb.F_FloatRepeated[i] = float32(i) + pb.F_Uint64Repeated[i] = uint64(i) + pb.F_Uint32Repeated[i] = uint32(i) + pb.F_Fixed64Repeated[i] = uint64(i) + pb.F_Fixed32Repeated[i] = uint32(i) + pb.F_Int64Repeated[i] = int64(i) + pb.F_Int32Repeated[i] = int32(i) + pb.F_BoolRepeated[i] = i%2 == 0 + pb.RepeatedField[i] = igtf + } + + // Marshal. + buf, _ := Marshal(pb) + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + Unmarshal(buf, pbd) + + // Check the checkable values + for i := uint64(0); i < N; i++ { + if pbd.Repeatedgroup[i] == nil { // TODO: more checking? + t.Error("pbd.Repeatedgroup bad") + } + var x uint64 + x = uint64(pbd.F_Sint64Repeated[i]) + if x != i { + t.Error("pbd.F_Sint64Repeated bad", x, i) + } + x = uint64(pbd.F_Sint32Repeated[i]) + if x != i { + t.Error("pbd.F_Sint32Repeated bad", x, i) + } + s := fmt.Sprint(i) + equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) + if pbd.F_StringRepeated[i] != s { + t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) + } + x = uint64(pbd.F_DoubleRepeated[i]) + if x != i { + t.Error("pbd.F_DoubleRepeated bad", x, i) + } + x = uint64(pbd.F_FloatRepeated[i]) + if x != i { + t.Error("pbd.F_FloatRepeated bad", x, i) + } + x = pbd.F_Uint64Repeated[i] + if x != i { + t.Error("pbd.F_Uint64Repeated bad", x, i) + } + x = uint64(pbd.F_Uint32Repeated[i]) + if x != i { + t.Error("pbd.F_Uint32Repeated bad", x, i) + } + x = pbd.F_Fixed64Repeated[i] + if x != i { + t.Error("pbd.F_Fixed64Repeated bad", x, i) + } + x = uint64(pbd.F_Fixed32Repeated[i]) + if x != i { + t.Error("pbd.F_Fixed32Repeated bad", x, i) + } + x = uint64(pbd.F_Int64Repeated[i]) + if x != i { + t.Error("pbd.F_Int64Repeated bad", x, i) + } + x = uint64(pbd.F_Int32Repeated[i]) + if x != i { + t.Error("pbd.F_Int32Repeated bad", x, i) + } + if pbd.F_BoolRepeated[i] != (i%2 == 0) { + t.Error("pbd.F_BoolRepeated bad", x, i) + } + if pbd.RepeatedField[i] == nil { // TODO: more checking? + t.Error("pbd.RepeatedField bad") + } + } +} + +// Verify we give a useful message when decoding to the wrong structure type. +func TestTypeMismatch(t *testing.T) { + pb1 := initGoTest(true) + + // Marshal + o := old() + o.Marshal(pb1) + + // Now Unmarshal it to the wrong type. + pb2 := initGoTestField() + err := o.Unmarshal(pb2) + if err == nil { + t.Error("expected error, got no error") + } else if !strings.Contains(err.Error(), "bad wiretype") { + t.Error("expected bad wiretype error, got", err) + } +} + +func encodeDecode(t *testing.T, in, out Message, msg string) { + buf, err := Marshal(in) + if err != nil { + t.Fatalf("failed marshaling %v: %v", msg, err) + } + if err := Unmarshal(buf, out); err != nil { + t.Fatalf("failed unmarshaling %v: %v", msg, err) + } +} + +func TestPackedNonPackedDecoderSwitching(t *testing.T) { + np, p := new(NonPackedTest), new(PackedTest) + + // non-packed -> packed + np.A = []int32{0, 1, 1, 2, 3, 5} + encodeDecode(t, np, p, "non-packed -> packed") + if !reflect.DeepEqual(np.A, p.B) { + t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) + } + + // packed -> non-packed + np.Reset() + p.B = []int32{3, 1, 4, 1, 5, 9} + encodeDecode(t, p, np, "packed -> non-packed") + if !reflect.DeepEqual(p.B, np.A) { + t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) + } +} + +func TestProto1RepeatedGroup(t *testing.T) { + pb := &MessageList{ + Message: []*MessageList_Message{ + { + Name: String("blah"), + Count: Int32(7), + }, + // NOTE: pb.Message[1] is a nil + nil, + }, + } + + o := old() + err := o.Marshal(pb) + if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { + t.Fatalf("unexpected or no error when marshaling: %v", err) + } +} + +// Test that enums work. Checks for a bug introduced by making enums +// named types instead of int32: newInt32FromUint64 would crash with +// a type mismatch in reflect.PointTo. +func TestEnum(t *testing.T) { + pb := new(GoEnum) + pb.Foo = FOO_FOO1.Enum() + o := old() + if err := o.Marshal(pb); err != nil { + t.Fatal("error encoding enum:", err) + } + pb1 := new(GoEnum) + if err := o.Unmarshal(pb1); err != nil { + t.Fatal("error decoding enum:", err) + } + if *pb1.Foo != FOO_FOO1 { + t.Error("expected 7 but got ", *pb1.Foo) + } +} + +// Enum types have String methods. Check that enum fields can be printed. +// We don't care what the value actually is, just as long as it doesn't crash. +func TestPrintingNilEnumFields(t *testing.T) { + pb := new(GoEnum) + fmt.Sprintf("%+v", pb) +} + +// Verify that absent required fields cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcement(t *testing.T) { + pb := new(GoTestField) + _, err := Marshal(pb) + if err == nil { + t.Error("marshal: expected error, got nil") + } else if strings.Index(err.Error(), "Label") < 0 { + t.Errorf("marshal: bad error type: %v", err) + } + + // A slightly sneaky, yet valid, proto. It encodes the same required field twice, + // so simply counting the required fields is insufficient. + // field 1, encoding 2, value "hi" + buf := []byte("\x0A\x02hi\x0A\x02hi") + err = Unmarshal(buf, pb) + if err == nil { + t.Error("unmarshal: expected error, got nil") + } else if strings.Index(err.Error(), "{Unknown}") < 0 { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +func TestTypedNilMarshal(t *testing.T) { + // A typed nil should return ErrNil and not crash. + _, err := Marshal((*GoEnum)(nil)) + if err != ErrNil { + t.Errorf("Marshal: got err %v, want ErrNil", err) + } +} + +// A type that implements the Marshaler interface, but is not nillable. +type nonNillableInt uint64 + +func (nni nonNillableInt) Marshal() ([]byte, error) { + return EncodeVarint(uint64(nni)), nil +} + +type NNIMessage struct { + nni nonNillableInt +} + +func (*NNIMessage) Reset() {} +func (*NNIMessage) String() string { return "" } +func (*NNIMessage) ProtoMessage() {} + +// A type that implements the Marshaler interface and is nillable. +type nillableMessage struct { + x uint64 +} + +func (nm *nillableMessage) Marshal() ([]byte, error) { + return EncodeVarint(nm.x), nil +} + +type NMMessage struct { + nm *nillableMessage +} + +func (*NMMessage) Reset() {} +func (*NMMessage) String() string { return "" } +func (*NMMessage) ProtoMessage() {} + +// Verify a type that uses the Marshaler interface, but has a nil pointer. +func TestNilMarshaler(t *testing.T) { + // Try a struct with a Marshaler field that is nil. + // It should be directly marshable. + nmm := new(NMMessage) + if _, err := Marshal(nmm); err != nil { + t.Error("unexpected error marshaling nmm: ", err) + } + + // Try a struct with a Marshaler field that is not nillable. + nnim := new(NNIMessage) + nnim.nni = 7 + var _ Marshaler = nnim.nni // verify it is truly a Marshaler + if _, err := Marshal(nnim); err != nil { + t.Error("unexpected error marshaling nnim: ", err) + } +} + +func TestAllSetDefaults(t *testing.T) { + // Exercise SetDefaults with all scalar field types. + m := &Defaults{ + // NaN != NaN, so override that here. + F_Nan: Float32(1.7), + } + expected := &Defaults{ + F_Bool: Bool(true), + F_Int32: Int32(32), + F_Int64: Int64(64), + F_Fixed32: Uint32(320), + F_Fixed64: Uint64(640), + F_Uint32: Uint32(3200), + F_Uint64: Uint64(6400), + F_Float: Float32(314159), + F_Double: Float64(271828), + F_String: String(`hello, "world!"` + "\n"), + F_Bytes: []byte("Bignose"), + F_Sint32: Int32(-32), + F_Sint64: Int64(-64), + F_Enum: Defaults_GREEN.Enum(), + F_Pinf: Float32(float32(math.Inf(1))), + F_Ninf: Float32(float32(math.Inf(-1))), + F_Nan: Float32(1.7), + StrZero: String(""), + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithSetField(t *testing.T) { + // Check that a set value is not overridden. + m := &Defaults{ + F_Int32: Int32(12), + } + SetDefaults(m) + if v := m.GetF_Int32(); v != 12 { + t.Errorf("m.FInt32 = %v, want 12", v) + } +} + +func TestSetDefaultsWithSubMessage(t *testing.T) { + m := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + }, + } + expected := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + Port: Int32(4000), + }, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { + m := &MyMessage{ + RepInner: []*InnerMessage{{}}, + } + expected := &MyMessage{ + RepInner: []*InnerMessage{{ + Port: Int32(4000), + }}, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { + m := &MyMessage{ + Pet: []string{"turtle", "wombat"}, + } + expected := Clone(m) + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestMaximumTagNumber(t *testing.T) { + m := &MaxTag{ + LastField: String("natural goat essence"), + } + buf, err := Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal failed: %v", err) + } + m2 := new(MaxTag) + if err := Unmarshal(buf, m2); err != nil { + t.Fatalf("proto.Unmarshal failed: %v", err) + } + if got, want := m2.GetLastField(), *m.LastField; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestJSON(t *testing.T) { + m := &MyMessage{ + Count: Int32(4), + Pet: []string{"bunny", "kitty"}, + Inner: &InnerMessage{ + Host: String("cauchy"), + }, + Bikeshed: MyMessage_GREEN.Enum(), + } + const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` + + b, err := json.Marshal(m) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + s := string(b) + if s != expected { + t.Errorf("got %s\nwant %s", s, expected) + } + + received := new(MyMessage) + if err := json.Unmarshal(b, received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } + + // Test unmarshalling of JSON with symbolic enum name. + const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` + received.Reset() + if err := json.Unmarshal([]byte(old), received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } +} + +func TestBadWireType(t *testing.T) { + b := []byte{7<<3 | 6} // field 7, wire type 6 + pb := new(OtherMessage) + if err := Unmarshal(b, pb); err == nil { + t.Errorf("Unmarshal did not fail") + } else if !strings.Contains(err.Error(), "unknown wire type") { + t.Errorf("wrong error: %v", err) + } +} + +func TestBytesWithInvalidLength(t *testing.T) { + // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. + b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} + Unmarshal(b, new(MyMessage)) +} + +func TestLengthOverflow(t *testing.T) { + // Overflowing a length should not panic. + b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} + Unmarshal(b, new(MyMessage)) +} + +func TestVarintOverflow(t *testing.T) { + // Overflowing a 64-bit length should not be allowed. + b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} + if err := Unmarshal(b, new(MyMessage)); err == nil { + t.Fatalf("Overflowed uint64 length without error") + } +} + +func TestUnmarshalFuzz(t *testing.T) { + const N = 1000 + seed := time.Now().UnixNano() + t.Logf("RNG seed is %d", seed) + rng := rand.New(rand.NewSource(seed)) + buf := make([]byte, 20) + for i := 0; i < N; i++ { + for j := range buf { + buf[j] = byte(rng.Intn(256)) + } + fuzzUnmarshal(t, buf) + } +} + +func TestMergeMessages(t *testing.T) { + pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} + data, err := Marshal(pb) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + pb1 := new(MessageList) + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("first Unmarshal: %v", err) + } + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("second Unmarshal: %v", err) + } + if len(pb1.Message) != 1 { + t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) + } + + pb2 := new(MessageList) + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("first UnmarshalMerge: %v", err) + } + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("second UnmarshalMerge: %v", err) + } + if len(pb2.Message) != 2 { + t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) + } +} + +func TestExtensionMarshalOrder(t *testing.T) { + m := &MyMessage{Count: Int(123)} + if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { + t.Fatalf("SetExtension: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + var orig []byte + for i := 0; i < 100; i++ { + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if i == 0 { + orig = b + continue + } + if !bytes.Equal(b, orig) { + t.Errorf("Bytes differ on attempt #%d", i) + } + } +} + +// Many extensions, because small maps might not iterate differently on each iteration. +var exts = []*ExtensionDesc{ + E_X201, + E_X202, + E_X203, + E_X204, + E_X205, + E_X206, + E_X207, + E_X208, + E_X209, + E_X210, + E_X211, + E_X212, + E_X213, + E_X214, + E_X215, + E_X216, + E_X217, + E_X218, + E_X219, + E_X220, + E_X221, + E_X222, + E_X223, + E_X224, + E_X225, + E_X226, + E_X227, + E_X228, + E_X229, + E_X230, + E_X231, + E_X232, + E_X233, + E_X234, + E_X235, + E_X236, + E_X237, + E_X238, + E_X239, + E_X240, + E_X241, + E_X242, + E_X243, + E_X244, + E_X245, + E_X246, + E_X247, + E_X248, + E_X249, + E_X250, +} + +func TestMessageSetMarshalOrder(t *testing.T) { + m := &MyMessageSet{} + for _, x := range exts { + if err := SetExtension(m, x, &Empty{}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + } + + buf, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + for i := 0; i < 10; i++ { + b1, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(b1, buf) { + t.Errorf("Bytes differ on re-Marshal #%d", i) + } + + m2 := &MyMessageSet{} + if err := Unmarshal(buf, m2); err != nil { + t.Errorf("Unmarshal: %v", err) + } + b2, err := Marshal(m2) + if err != nil { + t.Errorf("re-Marshal: %v", err) + } + if !bytes.Equal(b2, buf) { + t.Errorf("Bytes differ on round-trip #%d", i) + } + } +} + +func TestUnmarshalMergesMessages(t *testing.T) { + // If a nested message occurs twice in the input, + // the fields should be merged when decoding. + a := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("polhode"), + Port: Int32(1234), + }, + } + aData, err := Marshal(a) + if err != nil { + t.Fatalf("Marshal(a): %v", err) + } + b := &OtherMessage{ + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Connected: Bool(true), + }, + } + bData, err := Marshal(b) + if err != nil { + t.Fatalf("Marshal(b): %v", err) + } + want := &OtherMessage{ + Key: Int64(123), + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Port: Int32(1234), + Connected: Bool(true), + }, + } + got := new(OtherMessage) + if err := Unmarshal(append(aData, bData...), got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !Equal(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func TestEncodingSizes(t *testing.T) { + tests := []struct { + m Message + n int + }{ + {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, + {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, + {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, + {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, + } + for _, test := range tests { + b, err := Marshal(test.m) + if err != nil { + t.Errorf("Marshal(%v): %v", test.m, err) + continue + } + if len(b) != test.n { + t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) + } + } +} + +func TestRequiredNotSetError(t *testing.T) { + pb := initGoTest(false) + pb.RequiredField.Label = nil + pb.F_Int32Required = nil + pb.F_Int64Required = nil + + expected := "0807" + // field 1, encoding 0, value 7 + "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) + "5001" + // field 10, encoding 0, value 1 + "6d20000000" + // field 13, encoding 5, value 0x20 + "714000000000000000" + // field 14, encoding 1, value 0x40 + "78a019" + // field 15, encoding 0, value 0xca0 = 3232 + "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45" + // field 17, encoding 5, value 3232.0 + "9101000000000040b940" + // field 18, encoding 1, value 6464.0 + "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" + "b304" + // field 70, encoding 3, start group + "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" + "b404" + // field 70, encoding 4, end group + "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" + "b0063f" + // field 102, encoding 0, 0x3f zigzag32 + "b8067f" // field 103, encoding 0, 0x7f zigzag64 + + o := old() + bytes, err := Marshal(pb) + if _, ok := err.(*RequiredNotSetError); !ok { + fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("expected = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-1 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 1", bytes) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = Unmarshal(bytes, pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { + t.Errorf("unmarshal wrong err msg: %v", err) + } + bytes, err = Marshal(pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-2 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 2", bytes) + t.Fatalf("string = %s", expected) + } +} + +func fuzzUnmarshal(t *testing.T, data []byte) { + defer func() { + if e := recover(); e != nil { + t.Errorf("These bytes caused a panic: %+v", data) + t.Logf("Stack:\n%s", debug.Stack()) + t.FailNow() + } + }() + + pb := new(MyMessage) + Unmarshal(data, pb) +} + +func TestMapFieldMarshal(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // b should be the concatenation of these three byte sequences in some order. + parts := []string{ + "\n\a\b\x01\x12\x03Rob", + "\n\a\b\x04\x12\x03Ian", + "\n\b\b\x08\x12\x04Dave", + } + ok := false + for i := range parts { + for j := range parts { + if j == i { + continue + } + for k := range parts { + if k == i || k == j { + continue + } + try := parts[i] + parts[j] + parts[k] + if bytes.Equal(b, []byte(try)) { + ok = true + break + } + } + } + } + if !ok { + t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) + } + t.Logf("FYI b: %q", b) + + (new(Buffer)).DebugPrint("Dump of b", b) +} + +func TestMapFieldRoundTrips(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + MsgMapping: map[int64]*FloatingPoint{ + 0x7001: &FloatingPoint{F: Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{ + false: []byte("that's not right!"), + true: []byte("aye, 'tis true!"), + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("FYI b: %q", b) + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + for _, pair := range [][2]interface{}{ + {m.NameMapping, m2.NameMapping}, + {m.MsgMapping, m2.MsgMapping}, + {m.ByteMapping, m2.ByteMapping}, + } { + if !reflect.DeepEqual(pair[0], pair[1]) { + t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) + } + } +} + +func TestMapFieldWithNil(t *testing.T) { + m := &MessageWithMap{ + MsgMapping: map[int64]*FloatingPoint{ + 1: nil, + }, + } + b, err := Marshal(m) + if err == nil { + t.Fatalf("Marshal of bad map should have failed, got these bytes: %v", b) + } +} + +// Benchmarks + +func testMsg() *GoTest { + pb := initGoTest(true) + const N = 1000 // Internally the library starts much smaller. + pb.F_Int32Repeated = make([]int32, N) + pb.F_DoubleRepeated = make([]float64, N) + for i := 0; i < N; i++ { + pb.F_Int32Repeated[i] = int32(i) + pb.F_DoubleRepeated[i] = float64(i) + } + return pb +} + +func bytesMsg() *GoTest { + pb := initGoTest(true) + buf := make([]byte, 4000) + for i := range buf { + buf[i] = byte(i) + } + pb.F_BytesDefaulted = buf + return pb +} + +func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { + d, _ := marshal(pb) + b.SetBytes(int64(len(d))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + marshal(pb) + } +} + +func benchmarkBufferMarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + p.Reset() + err := p.Marshal(pb0) + return p.Bytes(), err + }) +} + +func benchmarkSize(b *testing.B, pb Message) { + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + Size(pb) + return nil, nil + }) +} + +func newOf(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + return reflect.New(in.Type().Elem()).Interface().(Message) +} + +func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { + d, _ := Marshal(pb) + b.SetBytes(int64(len(d))) + pbd := newOf(pb) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + unmarshal(d, pbd) + } +} + +func benchmarkBufferUnmarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { + p.SetBuf(d) + return p.Unmarshal(pb0) + }) +} + +// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} + +func BenchmarkMarshal(b *testing.B) { + benchmarkMarshal(b, testMsg(), Marshal) +} + +func BenchmarkBufferMarshal(b *testing.B) { + benchmarkBufferMarshal(b, testMsg()) +} + +func BenchmarkSize(b *testing.B) { + benchmarkSize(b, testMsg()) +} + +func BenchmarkUnmarshal(b *testing.B) { + benchmarkUnmarshal(b, testMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshal(b *testing.B) { + benchmarkBufferUnmarshal(b, testMsg()) +} + +func BenchmarkMarshalBytes(b *testing.B) { + benchmarkMarshal(b, bytesMsg(), Marshal) +} + +func BenchmarkBufferMarshalBytes(b *testing.B) { + benchmarkBufferMarshal(b, bytesMsg()) +} + +func BenchmarkSizeBytes(b *testing.B) { + benchmarkSize(b, bytesMsg()) +} + +func BenchmarkUnmarshalBytes(b *testing.B) { + benchmarkUnmarshal(b, bytesMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshalBytes(b *testing.B) { + benchmarkBufferUnmarshal(b, bytesMsg()) +} + +func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { + b.StopTimer() + pb := initGoTestField() + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + pbd := new(GoTestField) + p := NewBuffer(nil) + p.Marshal(pb) + p.Marshal(skip) + p2 := NewBuffer(nil) + + b.StartTimer() + for i := 0; i < b.N; i++ { + p2.SetBuf(p.Bytes()) + p2.Unmarshal(pbd) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/clone.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/clone.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/clone.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/clone.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,197 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: MessageSet and RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i)) + } + + if emIn, ok := in.Addr().Interface().(extendableProto); ok { + emOut := out.Addr().Interface().(extendableProto) + mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +func mergeAny(out, in reflect.Value) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(in) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key)) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem()) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i)) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value)) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/clone_test.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/clone_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/clone_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/clone_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,227 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + pb "github.com/golang/protobuf/proto/testdata" +) + +var cloneTestMessage = &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, +} + +func init() { + ext := &pb.Ext{ + Data: proto.String("extension"), + } + if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { + panic("SetExtension: " + err.Error()) + } +} + +func TestClone(t *testing.T) { + m := proto.Clone(cloneTestMessage).(*pb.MyMessage) + if !proto.Equal(m, cloneTestMessage) { + t.Errorf("Clone(%v) = %v", cloneTestMessage, m) + } + + // Verify it was a deep copy. + *m.Inner.Port++ + if proto.Equal(m, cloneTestMessage) { + t.Error("Mutating clone changed the original") + } + // Byte fields and repeated fields should be copied. + if &m.Pet[0] == &cloneTestMessage.Pet[0] { + t.Error("Pet: repeated field not copied") + } + if &m.Others[0] == &cloneTestMessage.Others[0] { + t.Error("Others: repeated field not copied") + } + if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { + t.Error("Others[0].Value: bytes field not copied") + } + if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { + t.Error("RepBytes: repeated field not copied") + } + if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { + t.Error("RepBytes[0]: bytes field not copied") + } +} + +func TestCloneNil(t *testing.T) { + var m *pb.MyMessage + if c := proto.Clone(m); !proto.Equal(m, c) { + t.Errorf("Clone(%v) = %v", m, c) + } +} + +var mergeTests = []struct { + src, dst, want proto.Message +}{ + { + src: &pb.MyMessage{ + Count: proto.Int32(42), + }, + dst: &pb.MyMessage{ + Name: proto.String("Dave"), + }, + want: &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + }, + }, + { + src: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + }, + Pet: []string{"horsey"}, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + }, + dst: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + { + // Explicitly test a src=nil field + Inner: nil, + }, + }, + }, + want: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty", "horsey"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + {}, + { + Value: []byte("some bytes"), + }, + }, + }, + }, + { + src: &pb.MyMessage{ + RepBytes: [][]byte{[]byte("wow")}, + }, + dst: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham")}, + }, + want: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, + }, + }, + // Check that a scalar bytes field replaces rather than appends. + { + src: &pb.OtherMessage{Value: []byte("foo")}, + dst: &pb.OtherMessage{Value: []byte("bar")}, + want: &pb.OtherMessage{Value: []byte("foo")}, + }, + { + src: &pb.MessageWithMap{ + NameMapping: map[int32]string{6: "Nigel"}, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + dst: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Bruce", // should be overwritten + 7: "Andrew", + }, + }, + want: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Nigel", + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + }, +} + +func TestMerge(t *testing.T) { + for _, m := range mergeTests { + got := proto.Clone(m.dst) + proto.Merge(got, m.src) + if !proto.Equal(got, m.want) { + t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/decode.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/decode.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/decode.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/decode.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,827 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + // x, n already 0 + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + // x, err already 0 + + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + e.ExtensionMap()[int32(tag)] = ext + } + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + + y := *v + for i := 0; i < nb; i++ { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() || !valelem.IsValid() { + // We did not decode the key or the value in the map entry. + // Either way, it's an invalid map entry. + return fmt.Errorf("proto: bad map data: missing key/val") + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/encode.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/encode.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/encode.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/encode.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,1293 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + var state errorState + if err != nil && !state.shouldContinue(err, nil) { + return nil, err + } + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + if err != nil { + return err + } + p.buf = append(p.buf, data...) + return nil + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Encode++ + } + + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Size++ + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + v := *structPointer_ExtMap(base, p.field) + if err := encodeExtensionMap(v); err != nil { + return err + } + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := *structPointer_ExtMap(base, p.field) + return sizeExtensionMap(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + return err + } + return nil + } + + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := v.MapIndex(key) + + // The only illegal map entry values are nil message pointers. + if val.Kind() == reflect.Ptr && val.IsNil() { + return errors.New("proto: map has nil element") + } + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/equal.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/equal.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/equal.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/equal.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,256 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. +// TODO: MessageSet. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal (a "bytes" field, + although represented by []byte, is not a repeated field) + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +func equalAny(v1, v2 reflect.Value) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2) { + return false + } + } + return true + case reflect.Ptr: + return equalAny(v1.Elem(), v2.Elem()) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i)) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// em1 and em2 are extension maps. +func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { + return false + } + } + + return true +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/equal_test.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/equal_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/equal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/equal_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,191 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + . "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +// Four identical base messages. +// The init function adds extensions to some of them. +var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} + +// Two messages with non-message extensions. +var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} +var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} + +func init() { + ext1 := &pb.Ext{Data: String("Kirk")} + ext2 := &pb.Ext{Data: String("Picard")} + + // messageWithExtension1a has ext1, but never marshals it. + if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1a failed: " + err.Error()) + } + + // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. + if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { + panic("SetExtension on 1b failed: " + err.Error()) + } + buf, err := Marshal(messageWithExtension1b) + if err != nil { + panic("Marshal of 1b failed: " + err.Error()) + } + messageWithExtension1b.Reset() + if err := Unmarshal(buf, messageWithExtension1b); err != nil { + panic("Unmarshal of 1b failed: " + err.Error()) + } + + // messageWithExtension2 has ext2. + if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { + panic("SetExtension on 2 failed: " + err.Error()) + } + + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { + panic("SetExtension on Int32-1 failed: " + err.Error()) + } + if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { + panic("SetExtension on Int32-2 failed: " + err.Error()) + } +} + +var EqualTests = []struct { + desc string + a, b Message + exp bool +}{ + {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, + {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, + {"nil vs nil", nil, nil, true}, + {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, + {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, + {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, + + {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, + {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, + {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, + {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, + + {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, + {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, + {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, + {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, + {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, + {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, + {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, + + { + "nested, different", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, + false, + }, + { + "nested, equal", + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, + true, + }, + + {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, + {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, + {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, + { + "repeated bytes", + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, + true, + }, + + {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, + {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, + {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, + + {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, + {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, + + { + "message with group", + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + &pb.MyMessage{ + Count: Int32(1), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: Int32(5), + }, + }, + true, + }, + + { + "map same", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + true, + }, + { + "map different entry", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, + false, + }, + { + "map different key only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, + false, + }, + { + "map different value only", + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, + &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, + false, + }, +} + +func TestEqual(t *testing.T) { + for _, tc := range EqualTests { + if res := Equal(tc.a, tc.b); res != tc.exp { + t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/extensions.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/extensions.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/extensions.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/extensions.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,400 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base extendableProto, id int32, b []byte) { + base.ExtensionMap()[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + // Check the extended type. + if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. +func encodeExtensionMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func sizeExtensionMap(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + _, ok := pb.ExtensionMap()[extension.Field] + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb extendableProto, extension *ExtensionDesc) { + // TODO: Check types, field numbers, etc.? + delete(pb.ExtensionMap(), extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { + if err := checkExtensionTypes(pb, extension); err != nil { + return nil, err + } + + emap := pb.ExtensionMap() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + rep := extension.repeated() + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if !rep || o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := pb.(extendableProto) + if !ok { + err = errors.New("proto: not an extendable proto") + return + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if err := checkExtensionTypes(pb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/extensions_test.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/extensions_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/extensions_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/extensions_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,292 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +func TestGetExtensionsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", ext1) + } + exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ + pb.E_Ext_More, + pb.E_Ext_Text, + }) + if err != nil { + t.Fatalf("GetExtensions() failed: %s", err) + } + if exts[0] != ext1 { + t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) + } + if exts[1] != nil { + t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) + } +} + +func TestGetExtensionStability(t *testing.T) { + check := func(m *pb.MyMessage) bool { + ext1, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + ext2, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + return ext1 == ext2 + } + msg := &pb.MyMessage{Count: proto.Int32(4)} + ext0 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { + t.Fatalf("Could not set ext1: %s", ext0) + } + if !check(msg) { + t.Errorf("GetExtension() not stable before marshaling") + } + bb, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Marshal() failed: %s", err) + } + msg1 := &pb.MyMessage{} + err = proto.Unmarshal(bb, msg1) + if err != nil { + t.Fatalf("Unmarshal() failed: %s", err) + } + if !check(msg1) { + t.Errorf("GetExtension() not stable after unmarshaling") + } +} + +func TestGetExtensionDefaults(t *testing.T) { + var setFloat64 float64 = 1 + var setFloat32 float32 = 2 + var setInt32 int32 = 3 + var setInt64 int64 = 4 + var setUint32 uint32 = 5 + var setUint64 uint64 = 6 + var setBool = true + var setBool2 = false + var setString = "Goodnight string" + var setBytes = []byte("Goodnight bytes") + var setEnum = pb.DefaultsMessage_TWO + + type testcase struct { + ext *proto.ExtensionDesc // Extension we are testing. + want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). + def interface{} // Expected value of extension after ClearExtension(). + } + tests := []testcase{ + {pb.E_NoDefaultDouble, setFloat64, nil}, + {pb.E_NoDefaultFloat, setFloat32, nil}, + {pb.E_NoDefaultInt32, setInt32, nil}, + {pb.E_NoDefaultInt64, setInt64, nil}, + {pb.E_NoDefaultUint32, setUint32, nil}, + {pb.E_NoDefaultUint64, setUint64, nil}, + {pb.E_NoDefaultSint32, setInt32, nil}, + {pb.E_NoDefaultSint64, setInt64, nil}, + {pb.E_NoDefaultFixed32, setUint32, nil}, + {pb.E_NoDefaultFixed64, setUint64, nil}, + {pb.E_NoDefaultSfixed32, setInt32, nil}, + {pb.E_NoDefaultSfixed64, setInt64, nil}, + {pb.E_NoDefaultBool, setBool, nil}, + {pb.E_NoDefaultBool, setBool2, nil}, + {pb.E_NoDefaultString, setString, nil}, + {pb.E_NoDefaultBytes, setBytes, nil}, + {pb.E_NoDefaultEnum, setEnum, nil}, + {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, + {pb.E_DefaultFloat, setFloat32, float32(3.14)}, + {pb.E_DefaultInt32, setInt32, int32(42)}, + {pb.E_DefaultInt64, setInt64, int64(43)}, + {pb.E_DefaultUint32, setUint32, uint32(44)}, + {pb.E_DefaultUint64, setUint64, uint64(45)}, + {pb.E_DefaultSint32, setInt32, int32(46)}, + {pb.E_DefaultSint64, setInt64, int64(47)}, + {pb.E_DefaultFixed32, setUint32, uint32(48)}, + {pb.E_DefaultFixed64, setUint64, uint64(49)}, + {pb.E_DefaultSfixed32, setInt32, int32(50)}, + {pb.E_DefaultSfixed64, setInt64, int64(51)}, + {pb.E_DefaultBool, setBool, true}, + {pb.E_DefaultBool, setBool2, true}, + {pb.E_DefaultString, setString, "Hello, string"}, + {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, + {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, + } + + checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { + val, err := proto.GetExtension(msg, test.ext) + if err != nil { + if valWant != nil { + return fmt.Errorf("GetExtension(): %s", err) + } + if want := proto.ErrMissingExtension; err != want { + return fmt.Errorf("Unexpected error: got %v, want %v", err, want) + } + return nil + } + + // All proto2 extension values are either a pointer to a value or a slice of values. + ty := reflect.TypeOf(val) + tyWant := reflect.TypeOf(test.ext.ExtensionType) + if got, want := ty, tyWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) + } + tye := ty.Elem() + tyeWant := tyWant.Elem() + if got, want := tye, tyeWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) + } + + // Check the name of the type of the value. + // If it is an enum it will be type int32 with the name of the enum. + if got, want := tye.Name(), tye.Name(); got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) + } + + // Check that value is what we expect. + // If we have a pointer in val, get the value it points to. + valExp := val + if ty.Kind() == reflect.Ptr { + valExp = reflect.ValueOf(val).Elem().Interface() + } + if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { + return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) + } + + return nil + } + + setTo := func(test testcase) interface{} { + setTo := reflect.ValueOf(test.want) + if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { + setTo = reflect.New(typ).Elem() + setTo.Set(reflect.New(setTo.Type().Elem())) + setTo.Elem().Set(reflect.ValueOf(test.want)) + } + return setTo.Interface() + } + + for _, test := range tests { + msg := &pb.DefaultsMessage{} + name := test.ext.Name + + // Check the initial value. + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + + // Set the per-type value and check value. + name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) + if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { + t.Errorf("%s: SetExtension(): %v", name, err) + continue + } + if err := checkVal(test, msg, test.want); err != nil { + t.Errorf("%s: %v", name, err) + continue + } + + // Set and check the value. + name += " (cleared)" + proto.ClearExtension(msg, test.ext) + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + } +} + +func TestExtensionsRoundTrip(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{ + Data: proto.String("hi"), + } + ext2 := &pb.Ext{ + Data: proto.String("there"), + } + exists := proto.HasExtension(msg, pb.E_Ext_More) + if exists { + t.Error("Extension More present unexpectedly") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Error(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { + t.Error(err) + } + e, err := proto.GetExtension(msg, pb.E_Ext_More) + if err != nil { + t.Error(err) + } + x, ok := e.(*pb.Ext) + if !ok { + t.Errorf("e has type %T, expected testdata.Ext", e) + } else if *x.Data != "there" { + t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) + } + proto.ClearExtension(msg, pb.E_Ext_More) + if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { + t.Errorf("got %v, expected ErrMissingExtension", e) + } + if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { + t.Error("expected bad extension error, got nil") + } + if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { + t.Error("expected extension err") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { + t.Error("expected some sort of type mismatch error, got nil") + } +} + +func TestNilExtension(t *testing.T) { + msg := &pb.MyMessage{ + Count: proto.Int32(1), + } + if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { + t.Fatal(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { + t.Error("expected SetExtension to fail due to a nil extension") + } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { + t.Errorf("expected error %v, got %v", want, err) + } + // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update + // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/lib.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/lib.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/lib.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/lib.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,796 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + +package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // write point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + break + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + if err != nil { + fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + if err != nil { + fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/Makefile juju-core-2.0.0/src/github.com/golang/protobuf/proto/Makefile --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/Makefile 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto + make diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/message_set.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/message_set.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/message_set.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/message_set.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,287 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and MessageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. +// +// When a proto1 proto has a field that looks like: +// optional message info = 3; +// the protocol compiler produces a field in the generated struct that looks like: +// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` +// The package is automatically inserted so there is no need for that proto file to +// import this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type MessageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure MessageSet is a Message. +var _ Message = (*MessageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *MessageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *MessageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *MessageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return ErrNoMessageTypeId + } + return nil // TODO: return error instead? +} + +func (ms *MessageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return ErrNoMessageTypeId + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *MessageSet) Reset() { *ms = MessageSet{} } +func (ms *MessageSet) String() string { return CompactTextString(ms) } +func (*MessageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { + ms := new(MessageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/message_set_test.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/message_set_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/message_set_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/message_set_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,66 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "testing" +) + +func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { + // Check that a repeated message set entry will be concatenated. + in := &MessageSet{ + Item: []*_MessageSet_Item{ + {TypeId: Int32(12345), Message: []byte("hoo")}, + {TypeId: Int32(12345), Message: []byte("hah")}, + }, + } + b, err := Marshal(in) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("Marshaled bytes: %q", b) + + m := make(map[int32]Extension) + if err := UnmarshalMessageSet(b, m); err != nil { + t.Fatalf("UnmarshalMessageSet: %v", err) + } + ext, ok := m[12345] + if !ok { + t.Fatalf("Didn't retrieve extension 12345; map is %v", m) + } + // Skip wire type/field number and length varints. + got := skipVarint(skipVarint(ext.enc)) + if want := []byte("hoohah"); !bytes.Equal(got, want) { + t.Errorf("Combined extension is %q, want %q", got, want) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/pointer_reflect.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/pointer_reflect.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/pointer_reflect.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/pointer_reflect.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,479 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// Map returns the reflect.Value for the address of a map field in the struct. +func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/pointer_unsafe.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/pointer_unsafe.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/pointer_unsafe.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/pointer_unsafe.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,266 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Map returns the reflect.Value for the address of a map field in the struct. +func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/properties.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/properties.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/properties.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/properties.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,742 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + if p.OrigName != p.Name { + s += ",name=" + p.OrigName + } + if p.proto3 { + s += ",proto3" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_byte + p.dec = (*Buffer).dec_slice_byte + p.size = size_slice_byte + // This is a []byte, which is either a bytes field, + // or the value of a map field. In the latter case, + // we always encode an empty []byte, so we should not + // use the proto3 enc/size funcs. + // f == nil iff this is the key/value of a map field. + if p.proto3 && f != nil { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-go. +// source: proto3_proto/proto3.proto +// DO NOT EDIT! + +/* +Package proto3_proto is a generated protocol buffer package. + +It is generated from these files: + proto3_proto/proto3.proto + +It has these top-level messages: + Message + Nested + MessageWithMap +*/ +package proto3_proto + +import proto "github.com/golang/protobuf/proto" +import testdata "github.com/golang/protobuf/proto/testdata" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal + +type Message_Humour int32 + +const ( + Message_UNKNOWN Message_Humour = 0 + Message_PUNS Message_Humour = 1 + Message_SLAPSTICK Message_Humour = 2 + Message_BILL_BAILEY Message_Humour = 3 +) + +var Message_Humour_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PUNS", + 2: "SLAPSTICK", + 3: "BILL_BAILEY", +} +var Message_Humour_value = map[string]int32{ + "UNKNOWN": 0, + "PUNS": 1, + "SLAPSTICK": 2, + "BILL_BAILEY": 3, +} + +func (x Message_Humour) String() string { + return proto.EnumName(Message_Humour_name, int32(x)) +} + +type Message struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} + +func (m *Message) GetNested() *Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *Message) GetTerrain() map[string]*Nested { + if m != nil { + return m.Terrain + } + return nil +} + +func (m *Message) GetProto2Field() *testdata.SubDefaults { + if m != nil { + return m.Proto2Field + } + return nil +} + +func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { + if m != nil { + return m.Proto2Value + } + return nil +} + +type Nested struct { + Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} + +type MessageWithMap struct { + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func init() { + proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto juju-core-2.0.0/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,68 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +import "testdata/test.proto"; + +package proto3_proto; + +message Message { + enum Humour { + UNKNOWN = 0; + PUNS = 1; + SLAPSTICK = 2; + BILL_BAILEY = 3; + } + + string name = 1; + Humour hilarity = 2; + uint32 height_in_cm = 3; + bytes data = 4; + int64 result_count = 7; + bool true_scotsman = 8; + float score = 9; + + repeated uint64 key = 5; + Nested nested = 6; + + map terrain = 10; + testdata.SubDefaults proto2_field = 11; + map proto2_value = 13; +} + +message Nested { + string bunny = 1; +} + +message MessageWithMap { + map byte_mapping = 1; +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/proto3_test.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/proto3_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/proto3_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/proto3_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,125 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/proto3_proto" + tpb "github.com/golang/protobuf/proto/testdata" +) + +func TestProto3ZeroValues(t *testing.T) { + tests := []struct { + desc string + m proto.Message + }{ + {"zero message", &pb.Message{}}, + {"empty bytes field", &pb.Message{Data: []byte{}}}, + } + for _, test := range tests { + b, err := proto.Marshal(test.m) + if err != nil { + t.Errorf("%s: proto.Marshal: %v", test.desc, err) + continue + } + if len(b) > 0 { + t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) + } + } +} + +func TestRoundTripProto3(t *testing.T) { + m := &pb.Message{ + Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" + Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 + HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 + Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" + ResultCount: 47, // (0 | 7<<3): 0x38 0x2f + TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 + Score: 8.1, // (5 | 9<<3): 0x4d <8.1> + + Key: []uint64{1, 0xdeadbeef}, + Nested: &pb.Nested{ + Bunny: "Monty", + }, + } + t.Logf(" m: %v", m) + + b, err := proto.Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal: %v", err) + } + t.Logf(" b: %q", b) + + m2 := new(pb.Message) + if err := proto.Unmarshal(b, m2); err != nil { + t.Fatalf("proto.Unmarshal: %v", err) + } + t.Logf("m2: %v", m2) + + if !proto.Equal(m, m2) { + t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) + } +} + +func TestProto3SetDefaults(t *testing.T) { + in := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: new(tpb.SubDefaults), + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": new(tpb.SubDefaults), + }, + } + + got := proto.Clone(in).(*pb.Message) + proto.SetDefaults(got) + + // There are no defaults in proto3. Everything should be the zero value, but + // we need to remember to set defaults for nested proto2 messages. + want := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, + }, + } + + if !proto.Equal(got, want) { + t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/size2_test.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/size2_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/size2_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/size2_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "testing" +) + +// This is a separate file and package from size_test.go because that one uses +// generated messages and thus may not be in package proto without having a circular +// dependency, whereas this file tests unexported details of size.go. + +func TestVarintSize(t *testing.T) { + // Check the edge cases carefully. + testCases := []struct { + n uint64 + size int + }{ + {0, 1}, + {1, 1}, + {127, 1}, + {128, 2}, + {16383, 2}, + {16384, 3}, + {1<<63 - 1, 9}, + {1 << 63, 10}, + } + for _, tc := range testCases { + size := sizeVarint(tc.n) + if size != tc.size { + t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/size_test.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/size_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/size_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/size_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,142 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "log" + "strings" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} + +// messageWithExtension2 is in equal_test.go. +var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} + +func init() { + if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + + // Force messageWithExtension3 to have the extension encoded. + Marshal(messageWithExtension3) + +} + +var SizeTests = []struct { + desc string + pb Message +}{ + {"empty", &pb.OtherMessage{}}, + // Basic types. + {"bool", &pb.Defaults{F_Bool: Bool(true)}}, + {"int32", &pb.Defaults{F_Int32: Int32(12)}}, + {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, + {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, + {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, + {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, + {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, + {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, + {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, + {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, + {"float", &pb.Defaults{F_Float: Float32(12.6)}}, + {"double", &pb.Defaults{F_Double: Float64(13.9)}}, + {"string", &pb.Defaults{F_String: String("niles")}}, + {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, + {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, + {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, + {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, + {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, + // Repeated. + {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, + {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, + {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, + {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, + {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, + {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ + // Need enough large numbers to verify that the header is counting the number of bytes + // for the field, not the number of elements. + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + }}}, + {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, + {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, + // Nested. + {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, + {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, + // Other things. + {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, + {"extension (unencoded)", messageWithExtension1}, + {"extension (encoded)", messageWithExtension3}, + // proto3 message + {"proto3 empty", &proto3pb.Message{}}, + {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, + {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, + {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, + {"proto3 float", &proto3pb.Message{Score: 12.6}}, + {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, + {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, + {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, + {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, + + {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, + {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, + {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, + {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, + + {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, + {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, + {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, +} + +func TestSize(t *testing.T) { + for _, tc := range SizeTests { + size := Size(tc.pb) + b, err := Marshal(tc.pb) + if err != nil { + t.Errorf("%v: Marshal failed: %v", tc.desc, err) + continue + } + if size != len(b) { + t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) + t.Logf("%v: bytes: %#v", tc.desc, b) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/testdata/golden_test.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/testdata/golden_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/testdata/golden_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/testdata/golden_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,86 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Verify that the compiler output for test.proto is unchanged. + +package testdata + +import ( + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. +func sum(t *testing.T, name string) string { + data, err := ioutil.ReadFile(name) + if err != nil { + t.Fatal(err) + } + t.Logf("sum(%q): length is %d", name, len(data)) + hash := sha1.New() + _, err = hash.Write(data) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("% x", hash.Sum(nil)) +} + +func run(t *testing.T, name string, args ...string) { + cmd := exec.Command(name, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + t.Fatal(err) + } +} + +func TestGolden(t *testing.T) { + // Compute the original checksum. + goldenSum := sum(t, "test.pb.go") + // Run the proto compiler. + run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") + newFile := filepath.Join(os.TempDir(), "test.pb.go") + defer os.Remove(newFile) + // Compute the new checksum. + newSum := sum(t, newFile) + // Verify + if newSum != goldenSum { + run(t, "diff", "-u", "test.pb.go", newFile) + t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/testdata/Makefile juju-core-2.0.0/src/github.com/golang/protobuf/proto/testdata/Makefile --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/testdata/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/testdata/Makefile 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,50 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +include ../../Make.protobuf + +all: regenerate + +regenerate: + rm -f test.pb.go + make test.pb.go + +# The following rules are just aids to development. Not needed for typical testing. + +diff: regenerate + git diff test.pb.go + +restore: + cp test.pb.go.golden test.pb.go + +preserve: + cp test.pb.go test.pb.go.golden diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/testdata/test.pb.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/testdata/test.pb.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/testdata/test.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/testdata/test.pb.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,2746 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package testdata is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + GoEnum + GoTestField + GoTest + GoSkipTest + NonPackedTest + PackedTest + MaxTag + OldMessage + NewMessage + InnerMessage + OtherMessage + MyMessage + Ext + DefaultsMessage + MyMessageSet + Empty + MessageList + Strings + Defaults + SubDefaults + RepeatedEnum + MoreRepeated + GroupOld + GroupNew + FloatingPoint + MessageWithMap +*/ +package testdata + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} + +type DefaultsMessage_DefaultsEnum int32 + +const ( + DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 + DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 + DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 +) + +var DefaultsMessage_DefaultsEnum_name = map[int32]string{ + 0: "ZERO", + 1: "ONE", + 2: "TWO", +} +var DefaultsMessage_DefaultsEnum_value = map[string]int32{ + "ZERO": 0, + "ONE": 1, + "TWO": 2, +} + +func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { + p := new(DefaultsMessage_DefaultsEnum) + *p = x + return p +} +func (x DefaultsMessage_DefaultsEnum) String() string { + return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) +} +func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") + if err != nil { + return err + } + *x = DefaultsMessage_DefaultsEnum(value) + return nil +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *OldMessage) GetNum() int32 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + // This is an int32 in OldMessage. + Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *NewMessage) GetNum() int64 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} +func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", +} + +type DefaultsMessage struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } +func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } +func (*DefaultsMessage) ProtoMessage() {} + +var extRange_DefaultsMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_DefaultsMessage +} +func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type MyMessageSet struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} + +func (m *MyMessageSet) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(m.ExtensionMap()) +} +func (m *MyMessageSet) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) +} +func (m *MyMessageSet) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(m.XXX_extensions) +} +func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) +} + +// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*MyMessageSet)(nil) +var _ proto.Unmarshaler = (*MyMessageSet)(nil) + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} +func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + // Redundant but explicit defaults. + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +func (m *Defaults) GetStrZero() string { + if m != nil && m.StrZero != nil { + return *m.StrZero + } + return "" +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +type MessageWithMap struct { + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} + +func (m *MessageWithMap) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func (m *MessageWithMap) GetStrToStr() map[string]string { + if m != nil { + return m.StrToStr + } + return nil +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", +} + +var E_NoDefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "testdata.no_default_double", + Tag: "fixed64,101,opt,name=no_default_double", +} + +var E_NoDefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 102, + Name: "testdata.no_default_float", + Tag: "fixed32,102,opt,name=no_default_float", +} + +var E_NoDefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 103, + Name: "testdata.no_default_int32", + Tag: "varint,103,opt,name=no_default_int32", +} + +var E_NoDefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 104, + Name: "testdata.no_default_int64", + Tag: "varint,104,opt,name=no_default_int64", +} + +var E_NoDefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 105, + Name: "testdata.no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32", +} + +var E_NoDefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 106, + Name: "testdata.no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64", +} + +var E_NoDefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 107, + Name: "testdata.no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32", +} + +var E_NoDefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 108, + Name: "testdata.no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64", +} + +var E_NoDefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 109, + Name: "testdata.no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32", +} + +var E_NoDefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 110, + Name: "testdata.no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64", +} + +var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 111, + Name: "testdata.no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32", +} + +var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 112, + Name: "testdata.no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64", +} + +var E_NoDefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 113, + Name: "testdata.no_default_bool", + Tag: "varint,113,opt,name=no_default_bool", +} + +var E_NoDefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 114, + Name: "testdata.no_default_string", + Tag: "bytes,114,opt,name=no_default_string", +} + +var E_NoDefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 115, + Name: "testdata.no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes", +} + +var E_NoDefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 116, + Name: "testdata.no_default_enum", + Tag: "varint,116,opt,name=no_default_enum,enum=testdata.DefaultsMessage_DefaultsEnum", +} + +var E_DefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 201, + Name: "testdata.default_double", + Tag: "fixed64,201,opt,name=default_double,def=3.1415", +} + +var E_DefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 202, + Name: "testdata.default_float", + Tag: "fixed32,202,opt,name=default_float,def=3.14", +} + +var E_DefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 203, + Name: "testdata.default_int32", + Tag: "varint,203,opt,name=default_int32,def=42", +} + +var E_DefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 204, + Name: "testdata.default_int64", + Tag: "varint,204,opt,name=default_int64,def=43", +} + +var E_DefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 205, + Name: "testdata.default_uint32", + Tag: "varint,205,opt,name=default_uint32,def=44", +} + +var E_DefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 206, + Name: "testdata.default_uint64", + Tag: "varint,206,opt,name=default_uint64,def=45", +} + +var E_DefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 207, + Name: "testdata.default_sint32", + Tag: "zigzag32,207,opt,name=default_sint32,def=46", +} + +var E_DefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 208, + Name: "testdata.default_sint64", + Tag: "zigzag64,208,opt,name=default_sint64,def=47", +} + +var E_DefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 209, + Name: "testdata.default_fixed32", + Tag: "fixed32,209,opt,name=default_fixed32,def=48", +} + +var E_DefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 210, + Name: "testdata.default_fixed64", + Tag: "fixed64,210,opt,name=default_fixed64,def=49", +} + +var E_DefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 211, + Name: "testdata.default_sfixed32", + Tag: "fixed32,211,opt,name=default_sfixed32,def=50", +} + +var E_DefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 212, + Name: "testdata.default_sfixed64", + Tag: "fixed64,212,opt,name=default_sfixed64,def=51", +} + +var E_DefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 213, + Name: "testdata.default_bool", + Tag: "varint,213,opt,name=default_bool,def=1", +} + +var E_DefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 214, + Name: "testdata.default_string", + Tag: "bytes,214,opt,name=default_string,def=Hello, string", +} + +var E_DefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 215, + Name: "testdata.default_bytes", + Tag: "bytes,215,opt,name=default_bytes,def=Hello, bytes", +} + +var E_DefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 216, + Name: "testdata.default_enum", + Tag: "varint,216,opt,name=default_enum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "testdata.x201", + Tag: "bytes,201,opt,name=x201", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "testdata.x202", + Tag: "bytes,202,opt,name=x202", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "testdata.x203", + Tag: "bytes,203,opt,name=x203", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "testdata.x204", + Tag: "bytes,204,opt,name=x204", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "testdata.x205", + Tag: "bytes,205,opt,name=x205", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "testdata.x206", + Tag: "bytes,206,opt,name=x206", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "testdata.x207", + Tag: "bytes,207,opt,name=x207", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "testdata.x208", + Tag: "bytes,208,opt,name=x208", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "testdata.x209", + Tag: "bytes,209,opt,name=x209", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "testdata.x210", + Tag: "bytes,210,opt,name=x210", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "testdata.x211", + Tag: "bytes,211,opt,name=x211", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "testdata.x212", + Tag: "bytes,212,opt,name=x212", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "testdata.x213", + Tag: "bytes,213,opt,name=x213", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "testdata.x214", + Tag: "bytes,214,opt,name=x214", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "testdata.x215", + Tag: "bytes,215,opt,name=x215", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "testdata.x216", + Tag: "bytes,216,opt,name=x216", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "testdata.x217", + Tag: "bytes,217,opt,name=x217", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "testdata.x218", + Tag: "bytes,218,opt,name=x218", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "testdata.x219", + Tag: "bytes,219,opt,name=x219", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "testdata.x220", + Tag: "bytes,220,opt,name=x220", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "testdata.x221", + Tag: "bytes,221,opt,name=x221", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "testdata.x222", + Tag: "bytes,222,opt,name=x222", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "testdata.x223", + Tag: "bytes,223,opt,name=x223", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "testdata.x224", + Tag: "bytes,224,opt,name=x224", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "testdata.x225", + Tag: "bytes,225,opt,name=x225", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "testdata.x226", + Tag: "bytes,226,opt,name=x226", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "testdata.x227", + Tag: "bytes,227,opt,name=x227", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "testdata.x228", + Tag: "bytes,228,opt,name=x228", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "testdata.x229", + Tag: "bytes,229,opt,name=x229", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "testdata.x230", + Tag: "bytes,230,opt,name=x230", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "testdata.x231", + Tag: "bytes,231,opt,name=x231", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "testdata.x232", + Tag: "bytes,232,opt,name=x232", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "testdata.x233", + Tag: "bytes,233,opt,name=x233", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "testdata.x234", + Tag: "bytes,234,opt,name=x234", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "testdata.x235", + Tag: "bytes,235,opt,name=x235", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "testdata.x236", + Tag: "bytes,236,opt,name=x236", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "testdata.x237", + Tag: "bytes,237,opt,name=x237", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "testdata.x238", + Tag: "bytes,238,opt,name=x238", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "testdata.x239", + Tag: "bytes,239,opt,name=x239", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "testdata.x240", + Tag: "bytes,240,opt,name=x240", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "testdata.x241", + Tag: "bytes,241,opt,name=x241", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "testdata.x242", + Tag: "bytes,242,opt,name=x242", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "testdata.x243", + Tag: "bytes,243,opt,name=x243", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "testdata.x244", + Tag: "bytes,244,opt,name=x244", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "testdata.x245", + Tag: "bytes,245,opt,name=x245", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "testdata.x246", + Tag: "bytes,246,opt,name=x246", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "testdata.x247", + Tag: "bytes,247,opt,name=x247", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "testdata.x248", + Tag: "bytes,248,opt,name=x248", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "testdata.x249", + Tag: "bytes,249,opt,name=x249", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "testdata.x250", + Tag: "bytes,250,opt,name=x250", +} + +func init() { + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_NoDefaultDouble) + proto.RegisterExtension(E_NoDefaultFloat) + proto.RegisterExtension(E_NoDefaultInt32) + proto.RegisterExtension(E_NoDefaultInt64) + proto.RegisterExtension(E_NoDefaultUint32) + proto.RegisterExtension(E_NoDefaultUint64) + proto.RegisterExtension(E_NoDefaultSint32) + proto.RegisterExtension(E_NoDefaultSint64) + proto.RegisterExtension(E_NoDefaultFixed32) + proto.RegisterExtension(E_NoDefaultFixed64) + proto.RegisterExtension(E_NoDefaultSfixed32) + proto.RegisterExtension(E_NoDefaultSfixed64) + proto.RegisterExtension(E_NoDefaultBool) + proto.RegisterExtension(E_NoDefaultString) + proto.RegisterExtension(E_NoDefaultBytes) + proto.RegisterExtension(E_NoDefaultEnum) + proto.RegisterExtension(E_DefaultDouble) + proto.RegisterExtension(E_DefaultFloat) + proto.RegisterExtension(E_DefaultInt32) + proto.RegisterExtension(E_DefaultInt64) + proto.RegisterExtension(E_DefaultUint32) + proto.RegisterExtension(E_DefaultUint64) + proto.RegisterExtension(E_DefaultSint32) + proto.RegisterExtension(E_DefaultSint64) + proto.RegisterExtension(E_DefaultFixed32) + proto.RegisterExtension(E_DefaultFixed64) + proto.RegisterExtension(E_DefaultSfixed32) + proto.RegisterExtension(E_DefaultSfixed64) + proto.RegisterExtension(E_DefaultBool) + proto.RegisterExtension(E_DefaultString) + proto.RegisterExtension(E_DefaultBytes) + proto.RegisterExtension(E_DefaultEnum) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/testdata/test.proto juju-core-2.0.0/src/github.com/golang/protobuf/proto/testdata/test.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/testdata/test.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/testdata/test.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,480 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A feature-rich test file for the protocol compiler and libraries. + +syntax = "proto2"; + +package testdata; + +enum FOO { FOO1 = 1; }; + +message GoEnum { + required FOO foo = 1; +} + +message GoTestField { + required string Label = 1; + required string Type = 2; +} + +message GoTest { + // An enum, for completeness. + enum KIND { + VOID = 0; + + // Basic types + BOOL = 1; + BYTES = 2; + FINGERPRINT = 3; + FLOAT = 4; + INT = 5; + STRING = 6; + TIME = 7; + + // Groupings + TUPLE = 8; + ARRAY = 9; + MAP = 10; + + // Table types + TABLE = 11; + + // Functions + FUNCTION = 12; // last tag + }; + + // Some typical parameters + required KIND Kind = 1; + optional string Table = 2; + optional int32 Param = 3; + + // Required, repeated and optional foreign fields. + required GoTestField RequiredField = 4; + repeated GoTestField RepeatedField = 5; + optional GoTestField OptionalField = 6; + + // Required fields of all basic types + required bool F_Bool_required = 10; + required int32 F_Int32_required = 11; + required int64 F_Int64_required = 12; + required fixed32 F_Fixed32_required = 13; + required fixed64 F_Fixed64_required = 14; + required uint32 F_Uint32_required = 15; + required uint64 F_Uint64_required = 16; + required float F_Float_required = 17; + required double F_Double_required = 18; + required string F_String_required = 19; + required bytes F_Bytes_required = 101; + required sint32 F_Sint32_required = 102; + required sint64 F_Sint64_required = 103; + + // Repeated fields of all basic types + repeated bool F_Bool_repeated = 20; + repeated int32 F_Int32_repeated = 21; + repeated int64 F_Int64_repeated = 22; + repeated fixed32 F_Fixed32_repeated = 23; + repeated fixed64 F_Fixed64_repeated = 24; + repeated uint32 F_Uint32_repeated = 25; + repeated uint64 F_Uint64_repeated = 26; + repeated float F_Float_repeated = 27; + repeated double F_Double_repeated = 28; + repeated string F_String_repeated = 29; + repeated bytes F_Bytes_repeated = 201; + repeated sint32 F_Sint32_repeated = 202; + repeated sint64 F_Sint64_repeated = 203; + + // Optional fields of all basic types + optional bool F_Bool_optional = 30; + optional int32 F_Int32_optional = 31; + optional int64 F_Int64_optional = 32; + optional fixed32 F_Fixed32_optional = 33; + optional fixed64 F_Fixed64_optional = 34; + optional uint32 F_Uint32_optional = 35; + optional uint64 F_Uint64_optional = 36; + optional float F_Float_optional = 37; + optional double F_Double_optional = 38; + optional string F_String_optional = 39; + optional bytes F_Bytes_optional = 301; + optional sint32 F_Sint32_optional = 302; + optional sint64 F_Sint64_optional = 303; + + // Default-valued fields of all basic types + optional bool F_Bool_defaulted = 40 [default=true]; + optional int32 F_Int32_defaulted = 41 [default=32]; + optional int64 F_Int64_defaulted = 42 [default=64]; + optional fixed32 F_Fixed32_defaulted = 43 [default=320]; + optional fixed64 F_Fixed64_defaulted = 44 [default=640]; + optional uint32 F_Uint32_defaulted = 45 [default=3200]; + optional uint64 F_Uint64_defaulted = 46 [default=6400]; + optional float F_Float_defaulted = 47 [default=314159.]; + optional double F_Double_defaulted = 48 [default=271828.]; + optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; + optional sint32 F_Sint32_defaulted = 402 [default = -32]; + optional sint64 F_Sint64_defaulted = 403 [default = -64]; + + // Packed repeated fields (no string or bytes). + repeated bool F_Bool_repeated_packed = 50 [packed=true]; + repeated int32 F_Int32_repeated_packed = 51 [packed=true]; + repeated int64 F_Int64_repeated_packed = 52 [packed=true]; + repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; + repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; + repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; + repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; + repeated float F_Float_repeated_packed = 57 [packed=true]; + repeated double F_Double_repeated_packed = 58 [packed=true]; + repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; + repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + + // Required, repeated, and optional groups. + required group RequiredGroup = 70 { + required string RequiredField = 71; + }; + + repeated group RepeatedGroup = 80 { + required string RequiredField = 81; + }; + + optional group OptionalGroup = 90 { + required string RequiredField = 91; + }; +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +message GoSkipTest { + required int32 skip_int32 = 11; + required fixed32 skip_fixed32 = 12; + required fixed64 skip_fixed64 = 13; + required string skip_string = 14; + required group SkipGroup = 15 { + required int32 group_int32 = 16; + required string group_string = 17; + } +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +message NonPackedTest { + repeated int32 a = 1; +} + +message PackedTest { + repeated int32 b = 1 [packed=true]; +} + +message MaxTag { + // Maximum possible tag number. + optional string last_field = 536870911; +} + +message OldMessage { + message Nested { + optional string name = 1; + } + optional Nested nested = 1; + + optional int32 num = 2; +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +message NewMessage { + message Nested { + optional string name = 1; + optional string food_group = 2; + } + optional Nested nested = 1; + + // This is an int32 in OldMessage. + optional int64 num = 2; +} + +// Smaller tests for ASCII formatting. + +message InnerMessage { + required string host = 1; + optional int32 port = 2 [default=4000]; + optional bool connected = 3; +} + +message OtherMessage { + optional int64 key = 1; + optional bytes value = 2; + optional float weight = 3; + optional InnerMessage inner = 4; +} + +message MyMessage { + required int32 count = 1; + optional string name = 2; + optional string quote = 3; + repeated string pet = 4; + optional InnerMessage inner = 5; + repeated OtherMessage others = 6; + repeated InnerMessage rep_inner = 12; + + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color bikeshed = 7; + + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // This field becomes [][]byte in the generated code. + repeated bytes rep_bytes = 10; + + optional double bigfloat = 11; + + extensions 100 to max; +} + +message Ext { + extend MyMessage { + optional Ext more = 103; + optional string text = 104; + optional int32 number = 105; + } + + optional string data = 1; +} + +extend MyMessage { + repeated string greeting = 106; +} + +message DefaultsMessage { + enum DefaultsEnum { + ZERO = 0; + ONE = 1; + TWO = 2; + }; + extensions 100 to max; +} + +extend DefaultsMessage { + optional double no_default_double = 101; + optional float no_default_float = 102; + optional int32 no_default_int32 = 103; + optional int64 no_default_int64 = 104; + optional uint32 no_default_uint32 = 105; + optional uint64 no_default_uint64 = 106; + optional sint32 no_default_sint32 = 107; + optional sint64 no_default_sint64 = 108; + optional fixed32 no_default_fixed32 = 109; + optional fixed64 no_default_fixed64 = 110; + optional sfixed32 no_default_sfixed32 = 111; + optional sfixed64 no_default_sfixed64 = 112; + optional bool no_default_bool = 113; + optional string no_default_string = 114; + optional bytes no_default_bytes = 115; + optional DefaultsMessage.DefaultsEnum no_default_enum = 116; + + optional double default_double = 201 [default = 3.1415]; + optional float default_float = 202 [default = 3.14]; + optional int32 default_int32 = 203 [default = 42]; + optional int64 default_int64 = 204 [default = 43]; + optional uint32 default_uint32 = 205 [default = 44]; + optional uint64 default_uint64 = 206 [default = 45]; + optional sint32 default_sint32 = 207 [default = 46]; + optional sint64 default_sint64 = 208 [default = 47]; + optional fixed32 default_fixed32 = 209 [default = 48]; + optional fixed64 default_fixed64 = 210 [default = 49]; + optional sfixed32 default_sfixed32 = 211 [default = 50]; + optional sfixed64 default_sfixed64 = 212 [default = 51]; + optional bool default_bool = 213 [default = true]; + optional string default_string = 214 [default = "Hello, string"]; + optional bytes default_bytes = 215 [default = "Hello, bytes"]; + optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; +} + +message MyMessageSet { + option message_set_wire_format = true; + extensions 100 to max; +} + +message Empty { +} + +extend MyMessageSet { + optional Empty x201 = 201; + optional Empty x202 = 202; + optional Empty x203 = 203; + optional Empty x204 = 204; + optional Empty x205 = 205; + optional Empty x206 = 206; + optional Empty x207 = 207; + optional Empty x208 = 208; + optional Empty x209 = 209; + optional Empty x210 = 210; + optional Empty x211 = 211; + optional Empty x212 = 212; + optional Empty x213 = 213; + optional Empty x214 = 214; + optional Empty x215 = 215; + optional Empty x216 = 216; + optional Empty x217 = 217; + optional Empty x218 = 218; + optional Empty x219 = 219; + optional Empty x220 = 220; + optional Empty x221 = 221; + optional Empty x222 = 222; + optional Empty x223 = 223; + optional Empty x224 = 224; + optional Empty x225 = 225; + optional Empty x226 = 226; + optional Empty x227 = 227; + optional Empty x228 = 228; + optional Empty x229 = 229; + optional Empty x230 = 230; + optional Empty x231 = 231; + optional Empty x232 = 232; + optional Empty x233 = 233; + optional Empty x234 = 234; + optional Empty x235 = 235; + optional Empty x236 = 236; + optional Empty x237 = 237; + optional Empty x238 = 238; + optional Empty x239 = 239; + optional Empty x240 = 240; + optional Empty x241 = 241; + optional Empty x242 = 242; + optional Empty x243 = 243; + optional Empty x244 = 244; + optional Empty x245 = 245; + optional Empty x246 = 246; + optional Empty x247 = 247; + optional Empty x248 = 248; + optional Empty x249 = 249; + optional Empty x250 = 250; +} + +message MessageList { + repeated group Message = 1 { + required string name = 2; + required int32 count = 3; + } +} + +message Strings { + optional string string_field = 1; + optional bytes bytes_field = 2; +} + +message Defaults { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + optional bool F_Bool = 1 [default=true]; + optional int32 F_Int32 = 2 [default=32]; + optional int64 F_Int64 = 3 [default=64]; + optional fixed32 F_Fixed32 = 4 [default=320]; + optional fixed64 F_Fixed64 = 5 [default=640]; + optional uint32 F_Uint32 = 6 [default=3200]; + optional uint64 F_Uint64 = 7 [default=6400]; + optional float F_Float = 8 [default=314159.]; + optional double F_Double = 9 [default=271828.]; + optional string F_String = 10 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes = 11 [default="Bignose"]; + optional sint32 F_Sint32 = 12 [default=-32]; + optional sint64 F_Sint64 = 13 [default=-64]; + optional Color F_Enum = 14 [default=GREEN]; + + // More fields with crazy defaults. + optional float F_Pinf = 15 [default=inf]; + optional float F_Ninf = 16 [default=-inf]; + optional float F_Nan = 17 [default=nan]; + + // Sub-message. + optional SubDefaults sub = 18; + + // Redundant but explicit defaults. + optional string str_zero = 19 [default=""]; +} + +message SubDefaults { + optional int64 n = 1 [default=7]; +} + +message RepeatedEnum { + enum Color { + RED = 1; + } + repeated Color color = 1; +} + +message MoreRepeated { + repeated bool bools = 1; + repeated bool bools_packed = 2 [packed=true]; + repeated int32 ints = 3; + repeated int32 ints_packed = 4 [packed=true]; + repeated int64 int64s_packed = 7 [packed=true]; + repeated string strings = 5; + repeated fixed32 fixeds = 6; +} + +// GroupOld and GroupNew have the same wire format. +// GroupNew has a new field inside a group. + +message GroupOld { + optional group G = 101 { + optional int32 x = 2; + } +} + +message GroupNew { + optional group G = 101 { + optional int32 x = 2; + optional int32 y = 3; + } +} + +message FloatingPoint { + required double f = 1; +} + +message MessageWithMap { + map name_mapping = 1; + map msg_mapping = 2; + map byte_mapping = 3; + map str_to_str = 4; +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/text.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/text.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/text.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/text.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,792 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "log" + "math" + "os" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Printf("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +var ( + messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() +) + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func writeStruct(w *textWriter, sv reflect.Value) error { + if sv.Type() == messageSetType { + return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) + } + + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() // TODO: should we sort these for deterministic output? + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + switch fv.Kind() { + case reflect.Bool: + if !fv.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if fv.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if fv.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if fv.Float() == 0 { + continue + } + case reflect.String: + if fv.String() == "" { + continue + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if pv.Type().Implements(extendableProtoType) { + if err := writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Interface().([]byte))); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if tm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeMessageSet(w *textWriter, ms *MessageSet) error { + for _, item := range ms.Item { + id := *item.TypeId + if msd, ok := messageSetMap[id]; ok { + // Known message set type. + if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { + return err + } + w.indent() + + pb := reflect.New(msd.t.Elem()) + if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { + if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { + return err + } + } else { + if err := writeStruct(w, pb.Elem()); err != nil { + return err + } + } + } else { + // Unknown type. + if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { + return err + } + w.indent() + if err := writeUnknownStruct(w, item.Message); err != nil { + return err + } + } + w.unindent() + if _, err := w.Write(gtNewline); err != nil { + return err + } + } + return nil +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep := pv.Interface().(extendableProto) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m := ep.ExtensionMap() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil { + return err + } + continue + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +func marshalText(w io.Writer, pb Message, compact bool) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: compact, + } + + if tm, ok := pb.(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { + return marshalText(w, pb, false) +} + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, false) + return buf.String() +} + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, true) + return buf.String() +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/text_parser.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/text_parser.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/text_parser.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/text_parser.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,772 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %v", p.s[0:i+1]) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || p.s[0] != '"' { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { + sprops := GetProperties(st) + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + reqCount := GetProperties(st).reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]". + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + tok = p.next() + if tok.err != nil { + return tok.err + } + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == tok.value { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", tok.value) + } + // Check the extension terminator. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != "]" { + return p.errorf("unrecognized extension terminator %q", tok.value) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(extendableProto) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + } else { + // This is a normal, non-extension field. + name := tok.value + fi, props, ok := structFieldByName(st, name) + if !ok { + return p.errorf("unknown field name %q in %v", name, st) + } + + dst := sv.Field(fi) + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, st.Field(fi).Type); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } else if props.Required { + reqCount-- + } + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. May already exist. + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(at, flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + case reflect.Bool: + // Either "true", "false", 1 or 0. + switch tok.value { + case "true", "1": + fv.SetBool(true) + return nil + case "false", "0": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/text_parser_test.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/text_parser_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/text_parser_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/text_parser_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,511 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "math" + "reflect" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + . "github.com/golang/protobuf/proto/testdata" +) + +type UnmarshalTextTest struct { + in string + err string // if "", no error expected + out *MyMessage +} + +func buildExtStructTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_More, &Ext{ + Data: String("Hello, world!"), + }) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtDataTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_Text, String("Hello, world!")) + SetExtension(msg, E_Ext_Number, Int32(1729)) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtRepStringTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { + panic(err) + } + return UnmarshalTextTest{in: text, out: msg} +} + +var unMarshalTextTests = []UnmarshalTextTest{ + // Basic + { + in: " count:42\n name:\"Dave\" ", + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + }, + }, + + // Empty quoted string + { + in: `count:42 name:""`, + out: &MyMessage{ + Count: Int32(42), + Name: String(""), + }, + }, + + // Quoted string concatenation + { + in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string with escaped apostrophe + { + in: `count:42 name: "HOLIDAY - New Year\'s Day"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("HOLIDAY - New Year's Day"), + }, + }, + + // Quoted string with single quote + { + in: `count:42 name: 'Roger "The Ramster" Ramjet'`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`Roger "The Ramster" Ramjet`), + }, + }, + + // Quoted string with all the accepted special characters from the C++ test + { + in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), + }, + }, + + // Quoted string with quoted backslash + { + in: `count:42 name: "\\'xyz"`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`\'xyz`), + }, + }, + + // Quoted string with UTF-8 bytes. + { + in: "count:42 name: '\303\277\302\201\xAB'", + out: &MyMessage{ + Count: Int32(42), + Name: String("\303\277\302\201\xAB"), + }, + }, + + // Bad quoted string + { + in: `inner: < host: "\0" >` + "\n", + err: `line 1.15: invalid quoted string "\0"`, + }, + + // Number too large for int64 + { + in: "count: 1 others { key: 123456789012345678901 }", + err: "line 1.23: invalid int64: 123456789012345678901", + }, + + // Number too large for int32 + { + in: "count: 1234567890123", + err: "line 1.7: invalid int32: 1234567890123", + }, + + // Number in hexadecimal + { + in: "count: 0x2beef", + out: &MyMessage{ + Count: Int32(0x2beef), + }, + }, + + // Number in octal + { + in: "count: 024601", + out: &MyMessage{ + Count: Int32(024601), + }, + }, + + // Floating point number with "f" suffix + { + in: "count: 4 others:< weight: 17.0f >", + out: &MyMessage{ + Count: Int32(4), + Others: []*OtherMessage{ + { + Weight: Float32(17), + }, + }, + }, + }, + + // Floating point positive infinity + { + in: "count: 4 bigfloat: inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(1)), + }, + }, + + // Floating point negative infinity + { + in: "count: 4 bigfloat: -inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(-1)), + }, + }, + + // Number too large for float32 + { + in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", + err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", + }, + + // Number posing as a quoted string + { + in: `inner: < host: 12 >` + "\n", + err: `line 1.15: invalid string: 12`, + }, + + // Quoted string posing as int32 + { + in: `count: "12"`, + err: `line 1.7: invalid int32: "12"`, + }, + + // Quoted string posing a float32 + { + in: `others:< weight: "17.4" >`, + err: `line 1.17: invalid float32: "17.4"`, + }, + + // Enum + { + in: `count:42 bikeshed: BLUE`, + out: &MyMessage{ + Count: Int32(42), + Bikeshed: MyMessage_BLUE.Enum(), + }, + }, + + // Repeated field + { + in: `count:42 pet: "horsey" pet:"bunny"`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated message with/without colon and <>/{} + { + in: `count:42 others:{} others{} others:<> others:{}`, + out: &MyMessage{ + Count: Int32(42), + Others: []*OtherMessage{ + {}, + {}, + {}, + {}, + }, + }, + }, + + // Missing colon for inner message + { + in: `count:42 inner < host: "cauchy.syd" >`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("cauchy.syd"), + }, + }, + }, + + // Missing colon for string field + { + in: `name "Dave"`, + err: `line 1.5: expected ':', found "\"Dave\""`, + }, + + // Missing colon for int32 field + { + in: `count 42`, + err: `line 1.6: expected ':', found "42"`, + }, + + // Missing required field + { + in: `name: "Pawel"`, + err: `proto: required field "testdata.MyMessage.count" not set`, + out: &MyMessage{ + Name: String("Pawel"), + }, + }, + + // Repeated non-repeated field + { + in: `name: "Rob" name: "Russ"`, + err: `line 1.12: non-repeated field "name" was repeated`, + }, + + // Group + { + in: `count: 17 SomeGroup { group_field: 12 }`, + out: &MyMessage{ + Count: Int32(17), + Somegroup: &MyMessage_SomeGroup{ + GroupField: Int32(12), + }, + }, + }, + + // Semicolon between fields + { + in: `count:3;name:"Calvin"`, + out: &MyMessage{ + Count: Int32(3), + Name: String("Calvin"), + }, + }, + // Comma between fields + { + in: `count:4,name:"Ezekiel"`, + out: &MyMessage{ + Count: Int32(4), + Name: String("Ezekiel"), + }, + }, + + // Extension + buildExtStructTest(`count: 42 [testdata.Ext.more]:`), + buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), + buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), + buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), + + // Big all-in-one + { + in: "count:42 # Meaning\n" + + `name:"Dave" ` + + `quote:"\"I didn't want to go.\"" ` + + `pet:"bunny" ` + + `pet:"kitty" ` + + `pet:"horsey" ` + + `inner:<` + + ` host:"footrest.syd" ` + + ` port:7001 ` + + ` connected:true ` + + `> ` + + `others:<` + + ` key:3735928559 ` + + ` value:"\x01A\a\f" ` + + `> ` + + `others:<` + + " weight:58.9 # Atomic weight of Co\n" + + ` inner:<` + + ` host:"lesha.mtv" ` + + ` port:8002 ` + + ` >` + + `>`, + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + Quote: String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &InnerMessage{ + Host: String("footrest.syd"), + Port: Int32(7001), + Connected: Bool(true), + }, + Others: []*OtherMessage{ + { + Key: Int64(3735928559), + Value: []byte{0x1, 'A', '\a', '\f'}, + }, + { + Weight: Float32(58.9), + Inner: &InnerMessage{ + Host: String("lesha.mtv"), + Port: Int32(8002), + }, + }, + }, + }, + }, +} + +func TestUnmarshalText(t *testing.T) { + for i, test := range unMarshalTextTests { + pb := new(MyMessage) + err := UnmarshalText(test.in, pb) + if test.err == "" { + // We don't expect failure. + if err != nil { + t.Errorf("Test %d: Unexpected error: %v", i, err) + } else if !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } else { + // We do expect failure. + if err == nil { + t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) + } else if err.Error() != test.err { + t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", + i, err.Error(), test.err) + } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } + } +} + +func TestUnmarshalTextCustomMessage(t *testing.T) { + msg := &textMessage{} + if err := UnmarshalText("custom", msg); err != nil { + t.Errorf("Unexpected error from custom unmarshal: %v", err) + } + if UnmarshalText("not custom", msg) == nil { + t.Errorf("Didn't get expected error from custom unmarshal") + } +} + +// Regression test; this caused a panic. +func TestRepeatedEnum(t *testing.T) { + pb := new(RepeatedEnum) + if err := UnmarshalText("color: RED", pb); err != nil { + t.Fatal(err) + } + exp := &RepeatedEnum{ + Color: []RepeatedEnum_Color{RepeatedEnum_RED}, + } + if !Equal(pb, exp) { + t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) + } +} + +func TestProto3TextParsing(t *testing.T) { + m := new(proto3pb.Message) + const in = `name: "Wallace" true_scotsman: true` + want := &proto3pb.Message{ + Name: "Wallace", + TrueScotsman: true, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestMapParsing(t *testing.T) { + m := new(MessageWithMap) + const in = `name_mapping: name_mapping:` + + `msg_mapping:,>` + // separating commas are okay + `msg_mapping>` + // no colon after "value" + `byte_mapping:` + want := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Beatles", + 1234: "Feist", + }, + MsgMapping: map[int64]*FloatingPoint{ + -4: {F: Float64(2.0)}, + -2: {F: Float64(4.0)}, + }, + ByteMapping: map[bool][]byte{ + true: []byte("so be it"), + }, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +var benchInput string + +func init() { + benchInput = "count: 4\n" + for i := 0; i < 1000; i++ { + benchInput += "pet: \"fido\"\n" + } + + // Check it is valid input. + pb := new(MyMessage) + err := UnmarshalText(benchInput, pb) + if err != nil { + panic("Bad benchmark input: " + err.Error()) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + pb := new(MyMessage) + for i := 0; i < b.N; i++ { + UnmarshalText(benchInput, pb) + } + b.SetBytes(int64(len(benchInput))) +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/text_test.go juju-core-2.0.0/src/github.com/golang/protobuf/proto/text_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/proto/text_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/proto/text_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,441 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +// textMessage implements the methods that allow it to marshal and unmarshal +// itself as text. +type textMessage struct { +} + +func (*textMessage) MarshalText() ([]byte, error) { + return []byte("custom"), nil +} + +func (*textMessage) UnmarshalText(bytes []byte) error { + if string(bytes) != "custom" { + return errors.New("expected 'custom'") + } + return nil +} + +func (*textMessage) Reset() {} +func (*textMessage) String() string { return "" } +func (*textMessage) ProtoMessage() {} + +func newTestMessage() *pb.MyMessage { + msg := &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Quote: proto.String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("footrest.syd"), + Port: proto.Int32(7001), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(0xdeadbeef), + Value: []byte{1, 65, 7, 12}, + }, + { + Weight: proto.Float32(6.022), + Inner: &pb.InnerMessage{ + Host: proto.String("lesha.mtv"), + Port: proto.Int32(8002), + }, + }, + }, + Bikeshed: pb.MyMessage_BLUE.Enum(), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(8), + }, + // One normally wouldn't do this. + // This is an undeclared tag 13, as a varint (wire type 0) with value 4. + XXX_unrecognized: []byte{13<<3 | 0, 4}, + } + ext := &pb.Ext{ + Data: proto.String("Big gobs for big rats"), + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { + panic(err) + } + greetings := []string{"adg", "easy", "cow"} + if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { + panic(err) + } + + // Add an unknown extension. We marshal a pb.Ext, and fake the ID. + b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) + if err != nil { + panic(err) + } + b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) + proto.SetRawExtension(msg, 201, b) + + // Extensions can be plain fields, too, so let's test that. + b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) + proto.SetRawExtension(msg, 202, b) + + return msg +} + +const text = `count: 42 +name: "Dave" +quote: "\"I didn't want to go.\"" +pet: "bunny" +pet: "kitty" +pet: "horsey" +inner: < + host: "footrest.syd" + port: 7001 + connected: true +> +others: < + key: 3735928559 + value: "\001A\007\014" +> +others: < + weight: 6.022 + inner: < + host: "lesha.mtv" + port: 8002 + > +> +bikeshed: BLUE +SomeGroup { + group_field: 8 +} +/* 2 unknown bytes */ +13: 4 +[testdata.Ext.more]: < + data: "Big gobs for big rats" +> +[testdata.greeting]: "adg" +[testdata.greeting]: "easy" +[testdata.greeting]: "cow" +/* 13 unknown bytes */ +201: "\t3G skiing" +/* 3 unknown bytes */ +202: 19 +` + +func TestMarshalText(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, newTestMessage()); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != text { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) + } +} + +func TestMarshalTextCustomMessage(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, &textMessage{}); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != "custom" { + t.Errorf("Got %q, expected %q", s, "custom") + } +} +func TestMarshalTextNil(t *testing.T) { + want := "" + tests := []proto.Message{nil, (*pb.MyMessage)(nil)} + for i, test := range tests { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, test); err != nil { + t.Fatal(err) + } + if got := buf.String(); got != want { + t.Errorf("%d: got %q want %q", i, got, want) + } + } +} + +func TestMarshalTextUnknownEnum(t *testing.T) { + // The Color enum only specifies values 0-2. + m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} + got := m.String() + const want = `bikeshed:3 ` + if got != want { + t.Errorf("\n got %q\nwant %q", got, want) + } +} + +func BenchmarkMarshalTextBuffered(b *testing.B) { + buf := new(bytes.Buffer) + m := newTestMessage() + for i := 0; i < b.N; i++ { + buf.Reset() + proto.MarshalText(buf, m) + } +} + +func BenchmarkMarshalTextUnbuffered(b *testing.B) { + w := ioutil.Discard + m := newTestMessage() + for i := 0; i < b.N; i++ { + proto.MarshalText(w, m) + } +} + +func compact(src string) string { + // s/[ \n]+/ /g; s/ $//; + dst := make([]byte, len(src)) + space, comment := false, false + j := 0 + for i := 0; i < len(src); i++ { + if strings.HasPrefix(src[i:], "/*") { + comment = true + i++ + continue + } + if comment && strings.HasPrefix(src[i:], "*/") { + comment = false + i++ + continue + } + if comment { + continue + } + c := src[i] + if c == ' ' || c == '\n' { + space = true + continue + } + if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { + space = false + } + if c == '{' { + space = false + } + if space { + dst[j] = ' ' + j++ + space = false + } + dst[j] = c + j++ + } + if space { + dst[j] = ' ' + j++ + } + return string(dst[0:j]) +} + +var compactText = compact(text) + +func TestCompactText(t *testing.T) { + s := proto.CompactTextString(newTestMessage()) + if s != compactText { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) + } +} + +func TestStringEscaping(t *testing.T) { + testCases := []struct { + in *pb.Strings + out string + }{ + { + // Test data from C++ test (TextFormatTest.StringEscape). + // Single divergence: we don't escape apostrophes. + &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, + "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", + }, + { + // Test data from the same C++ test. + &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, + "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", + }, + { + // Some UTF-8. + &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, + `string_field: "\000\001\377\201"` + "\n", + }, + } + + for i, tc := range testCases { + var buf bytes.Buffer + if err := proto.MarshalText(&buf, tc.in); err != nil { + t.Errorf("proto.MarsalText: %v", err) + continue + } + s := buf.String() + if s != tc.out { + t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) + continue + } + + // Check round-trip. + pb := new(pb.Strings) + if err := proto.UnmarshalText(s, pb); err != nil { + t.Errorf("#%d: UnmarshalText: %v", i, err) + continue + } + if !proto.Equal(pb, tc.in) { + t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) + } + } +} + +// A limitedWriter accepts some output before it fails. +// This is a proxy for something like a nearly-full or imminently-failing disk, +// or a network connection that is about to die. +type limitedWriter struct { + b bytes.Buffer + limit int +} + +var outOfSpace = errors.New("proto: insufficient space") + +func (w *limitedWriter) Write(p []byte) (n int, err error) { + var avail = w.limit - w.b.Len() + if avail <= 0 { + return 0, outOfSpace + } + if len(p) <= avail { + return w.b.Write(p) + } + n, _ = w.b.Write(p[:avail]) + return n, outOfSpace +} + +func TestMarshalTextFailing(t *testing.T) { + // Try lots of different sizes to exercise more error code-paths. + for lim := 0; lim < len(text); lim++ { + buf := new(limitedWriter) + buf.limit = lim + err := proto.MarshalText(buf, newTestMessage()) + // We expect a certain error, but also some partial results in the buffer. + if err != outOfSpace { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) + } + s := buf.b.String() + x := text[:buf.limit] + if s != x { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) + } + } +} + +func TestFloats(t *testing.T) { + tests := []struct { + f float64 + want string + }{ + {0, "0"}, + {4.7, "4.7"}, + {math.Inf(1), "inf"}, + {math.Inf(-1), "-inf"}, + {math.NaN(), "nan"}, + } + for _, test := range tests { + msg := &pb.FloatingPoint{F: &test.f} + got := strings.TrimSpace(msg.String()) + want := `f:` + test.want + if got != want { + t.Errorf("f=%f: got %q, want %q", test.f, got, want) + } + } +} + +func TestRepeatedNilText(t *testing.T) { + m := &pb.MessageList{ + Message: []*pb.MessageList_Message{ + nil, + &pb.MessageList_Message{ + Name: proto.String("Horse"), + }, + nil, + }, + } + want := `Message +Message { + name: "Horse" +} +Message +` + if s := proto.MarshalTextString(m); s != want { + t.Errorf(" got: %s\nwant: %s", s, want) + } +} + +func TestProto3Text(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&proto3pb.Message{}, ``}, + // zero message except for an empty byte slice + {&proto3pb.Message{Data: []byte{}}, ``}, + // trivial case + {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, + // empty map + {&pb.MessageWithMap{}, ``}, + // non-empty map; current map format is the same as a repeated struct + { + &pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}}, + `name_mapping:`, + }, + // map with nil value; not well-defined, but we shouldn't crash + { + &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, + `msg_mapping:`, + }, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,1635 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/descriptor.proto +// DO NOT EDIT! + +/* +Package google_protobuf is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo +*/ +package descriptor + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. Extensions of a oneof should + // not set this since the oneof to which they belong will be inferred based + // on the extension range containing the extension's field number. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index" json:"oneof_index,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,def=0" json:"server_streaming,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,def=0" json:"java_multiple_files,omitempty"` + // If set true, then the Java code generator will generate equals() and + // hashCode() methods for all messages defined in the .proto file. + // - In the full runtime, this is purely a speed optimization, as the + // AbstractMessage base class includes reflection-based implementations of + // these methods. + // - In the lite runtime, setting this option changes the semantics of + // equals() and hashCode() to more closely match those of the full runtime; + // the generated methods compute their results based on field values rather + // than object identity. (Implementations should not assume that hashcodes + // will be consistent across runtimes or versions of the protocol compiler.) + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,def=0" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,def=0" json:"py_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,def=0" json:"cc_enable_arenas,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} + +var extRange_FileOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} +func (m *FileOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaGenerateEqualsAndHash bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return Default_FileOptions_JavaGenerateEqualsAndHash +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} +func (m *MessageOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outher message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} +func (m *FieldOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} +func (m *EnumOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} +func (m *EnumValueOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} +func (m *ServiceOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} +func (m *MethodOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +const Default_MethodOptions_Deprecated bool = false + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments" json:"trailing_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.golden juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.golden --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.golden 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.golden 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,1024 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/descriptor.proto +// DO NOT EDIT! + +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import "math" + +// Reference proto and math imports to suppress error if they are not otherwise used. +var _ = proto.GetString +var _ = math.Inf + +type FieldDescriptorProto_Type int32 + +const ( + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +// NewFieldDescriptorProto_Type is deprecated. Use x.Enum() instead. +func NewFieldDescriptorProto_Type(x FieldDescriptorProto_Type) *FieldDescriptorProto_Type { + e := FieldDescriptorProto_Type(x) + return &e +} +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +type FieldDescriptorProto_Label int32 + +const ( + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +// NewFieldDescriptorProto_Label is deprecated. Use x.Enum() instead. +func NewFieldDescriptorProto_Label(x FieldDescriptorProto_Label) *FieldDescriptorProto_Label { + e := FieldDescriptorProto_Label(x) + return &e +} +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +// NewFileOptions_OptimizeMode is deprecated. Use x.Enum() instead. +func NewFileOptions_OptimizeMode(x FileOptions_OptimizeMode) *FileOptions_OptimizeMode { + e := FileOptions_OptimizeMode(x) + return &e +} +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +type FieldOptions_CType int32 + +const ( + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +// NewFieldOptions_CType is deprecated. Use x.Enum() instead. +func NewFieldOptions_CType(x FieldOptions_CType) *FieldOptions_CType { + e := FieldOptions_CType(x) + return &e +} +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +type StreamOptions_TokenUnit int32 + +const ( + StreamOptions_MESSAGE StreamOptions_TokenUnit = 0 + StreamOptions_BYTE StreamOptions_TokenUnit = 1 +) + +var StreamOptions_TokenUnit_name = map[int32]string{ + 0: "MESSAGE", + 1: "BYTE", +} +var StreamOptions_TokenUnit_value = map[string]int32{ + "MESSAGE": 0, + "BYTE": 1, +} + +// NewStreamOptions_TokenUnit is deprecated. Use x.Enum() instead. +func NewStreamOptions_TokenUnit(x StreamOptions_TokenUnit) *StreamOptions_TokenUnit { + e := StreamOptions_TokenUnit(x) + return &e +} +func (x StreamOptions_TokenUnit) Enum() *StreamOptions_TokenUnit { + p := new(StreamOptions_TokenUnit) + *p = x + return p +} +func (x StreamOptions_TokenUnit) String() string { + return proto.EnumName(StreamOptions_TokenUnit_name, int32(x)) +} + +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *FileDescriptorSet) Reset() { *this = FileDescriptorSet{} } +func (this *FileDescriptorSet) String() string { return proto.CompactTextString(this) } +func (*FileDescriptorSet) ProtoMessage() {} + +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency" json:"public_dependency,omitempty"` + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency" json:"weak_dependency,omitempty"` + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info" json:"source_code_info,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *FileDescriptorProto) Reset() { *this = FileDescriptorProto{} } +func (this *FileDescriptorProto) String() string { return proto.CompactTextString(this) } +func (*FileDescriptorProto) ProtoMessage() {} + +func (this *FileDescriptorProto) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *FileDescriptorProto) GetPackage() string { + if this != nil && this.Package != nil { + return *this.Package + } + return "" +} + +func (this *FileDescriptorProto) GetOptions() *FileOptions { + if this != nil { + return this.Options + } + return nil +} + +func (this *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if this != nil { + return this.SourceCodeInfo + } + return nil +} + +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range" json:"extension_range,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *DescriptorProto) Reset() { *this = DescriptorProto{} } +func (this *DescriptorProto) String() string { return proto.CompactTextString(this) } +func (*DescriptorProto) ProtoMessage() {} + +func (this *DescriptorProto) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *DescriptorProto) GetOptions() *MessageOptions { + if this != nil { + return this.Options + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *DescriptorProto_ExtensionRange) Reset() { *this = DescriptorProto_ExtensionRange{} } +func (this *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(this) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} + +func (this *DescriptorProto_ExtensionRange) GetStart() int32 { + if this != nil && this.Start != nil { + return *this.Start + } + return 0 +} + +func (this *DescriptorProto_ExtensionRange) GetEnd() int32 { + if this != nil && this.End != nil { + return *this.End + } + return 0 +} + +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=proto2.FieldDescriptorProto_Label" json:"label,omitempty"` + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=proto2.FieldDescriptorProto_Type" json:"type,omitempty"` + TypeName *string `protobuf:"bytes,6,opt,name=type_name" json:"type_name,omitempty"` + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value" json:"default_value,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *FieldDescriptorProto) Reset() { *this = FieldDescriptorProto{} } +func (this *FieldDescriptorProto) String() string { return proto.CompactTextString(this) } +func (*FieldDescriptorProto) ProtoMessage() {} + +func (this *FieldDescriptorProto) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *FieldDescriptorProto) GetNumber() int32 { + if this != nil && this.Number != nil { + return *this.Number + } + return 0 +} + +func (this *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if this != nil && this.Label != nil { + return *this.Label + } + return 0 +} + +func (this *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if this != nil && this.Type != nil { + return *this.Type + } + return 0 +} + +func (this *FieldDescriptorProto) GetTypeName() string { + if this != nil && this.TypeName != nil { + return *this.TypeName + } + return "" +} + +func (this *FieldDescriptorProto) GetExtendee() string { + if this != nil && this.Extendee != nil { + return *this.Extendee + } + return "" +} + +func (this *FieldDescriptorProto) GetDefaultValue() string { + if this != nil && this.DefaultValue != nil { + return *this.DefaultValue + } + return "" +} + +func (this *FieldDescriptorProto) GetOptions() *FieldOptions { + if this != nil { + return this.Options + } + return nil +} + +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *EnumDescriptorProto) Reset() { *this = EnumDescriptorProto{} } +func (this *EnumDescriptorProto) String() string { return proto.CompactTextString(this) } +func (*EnumDescriptorProto) ProtoMessage() {} + +func (this *EnumDescriptorProto) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *EnumDescriptorProto) GetOptions() *EnumOptions { + if this != nil { + return this.Options + } + return nil +} + +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *EnumValueDescriptorProto) Reset() { *this = EnumValueDescriptorProto{} } +func (this *EnumValueDescriptorProto) String() string { return proto.CompactTextString(this) } +func (*EnumValueDescriptorProto) ProtoMessage() {} + +func (this *EnumValueDescriptorProto) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *EnumValueDescriptorProto) GetNumber() int32 { + if this != nil && this.Number != nil { + return *this.Number + } + return 0 +} + +func (this *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if this != nil { + return this.Options + } + return nil +} + +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Stream []*StreamDescriptorProto `protobuf:"bytes,4,rep,name=stream" json:"stream,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *ServiceDescriptorProto) Reset() { *this = ServiceDescriptorProto{} } +func (this *ServiceDescriptorProto) String() string { return proto.CompactTextString(this) } +func (*ServiceDescriptorProto) ProtoMessage() {} + +func (this *ServiceDescriptorProto) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if this != nil { + return this.Options + } + return nil +} + +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + InputType *string `protobuf:"bytes,2,opt,name=input_type" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *MethodDescriptorProto) Reset() { *this = MethodDescriptorProto{} } +func (this *MethodDescriptorProto) String() string { return proto.CompactTextString(this) } +func (*MethodDescriptorProto) ProtoMessage() {} + +func (this *MethodDescriptorProto) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *MethodDescriptorProto) GetInputType() string { + if this != nil && this.InputType != nil { + return *this.InputType + } + return "" +} + +func (this *MethodDescriptorProto) GetOutputType() string { + if this != nil && this.OutputType != nil { + return *this.OutputType + } + return "" +} + +func (this *MethodDescriptorProto) GetOptions() *MethodOptions { + if this != nil { + return this.Options + } + return nil +} + +type StreamDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + ClientMessageType *string `protobuf:"bytes,2,opt,name=client_message_type" json:"client_message_type,omitempty"` + ServerMessageType *string `protobuf:"bytes,3,opt,name=server_message_type" json:"server_message_type,omitempty"` + Options *StreamOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *StreamDescriptorProto) Reset() { *this = StreamDescriptorProto{} } +func (this *StreamDescriptorProto) String() string { return proto.CompactTextString(this) } +func (*StreamDescriptorProto) ProtoMessage() {} + +func (this *StreamDescriptorProto) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *StreamDescriptorProto) GetClientMessageType() string { + if this != nil && this.ClientMessageType != nil { + return *this.ClientMessageType + } + return "" +} + +func (this *StreamDescriptorProto) GetServerMessageType() string { + if this != nil && this.ServerMessageType != nil { + return *this.ServerMessageType + } + return "" +} + +func (this *StreamDescriptorProto) GetOptions() *StreamOptions { + if this != nil { + return this.Options + } + return nil +} + +type FileOptions struct { + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package" json:"java_package,omitempty"` + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname" json:"java_outer_classname,omitempty"` + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,def=0" json:"java_multiple_files,omitempty"` + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,def=0" json:"java_generate_equals_and_hash,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,enum=proto2.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + GoPackage *string `protobuf:"bytes,11,opt,name=go_package" json:"go_package,omitempty"` + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,def=0" json:"py_generic_services,omitempty"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *FileOptions) Reset() { *this = FileOptions{} } +func (this *FileOptions) String() string { return proto.CompactTextString(this) } +func (*FileOptions) ProtoMessage() {} + +var extRange_FileOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} +func (this *FileOptions) ExtensionMap() map[int32]proto.Extension { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32]proto.Extension) + } + return this.XXX_extensions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaGenerateEqualsAndHash bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false + +func (this *FileOptions) GetJavaPackage() string { + if this != nil && this.JavaPackage != nil { + return *this.JavaPackage + } + return "" +} + +func (this *FileOptions) GetJavaOuterClassname() string { + if this != nil && this.JavaOuterClassname != nil { + return *this.JavaOuterClassname + } + return "" +} + +func (this *FileOptions) GetJavaMultipleFiles() bool { + if this != nil && this.JavaMultipleFiles != nil { + return *this.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (this *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if this != nil && this.JavaGenerateEqualsAndHash != nil { + return *this.JavaGenerateEqualsAndHash + } + return Default_FileOptions_JavaGenerateEqualsAndHash +} + +func (this *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if this != nil && this.OptimizeFor != nil { + return *this.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (this *FileOptions) GetGoPackage() string { + if this != nil && this.GoPackage != nil { + return *this.GoPackage + } + return "" +} + +func (this *FileOptions) GetCcGenericServices() bool { + if this != nil && this.CcGenericServices != nil { + return *this.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (this *FileOptions) GetJavaGenericServices() bool { + if this != nil && this.JavaGenericServices != nil { + return *this.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (this *FileOptions) GetPyGenericServices() bool { + if this != nil && this.PyGenericServices != nil { + return *this.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +type MessageOptions struct { + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,def=0" json:"message_set_wire_format,omitempty"` + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *MessageOptions) Reset() { *this = MessageOptions{} } +func (this *MessageOptions) String() string { return proto.CompactTextString(this) } +func (*MessageOptions) ProtoMessage() {} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} +func (this *MessageOptions) ExtensionMap() map[int32]proto.Extension { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32]proto.Extension) + } + return this.XXX_extensions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false + +func (this *MessageOptions) GetMessageSetWireFormat() bool { + if this != nil && this.MessageSetWireFormat != nil { + return *this.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (this *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if this != nil && this.NoStandardDescriptorAccessor != nil { + return *this.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +type FieldOptions struct { + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=proto2.FieldOptions_CType,def=0" json:"ctype,omitempty"` + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + ExperimentalMapKey *string `protobuf:"bytes,9,opt,name=experimental_map_key" json:"experimental_map_key,omitempty"` + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *FieldOptions) Reset() { *this = FieldOptions{} } +func (this *FieldOptions) String() string { return proto.CompactTextString(this) } +func (*FieldOptions) ProtoMessage() {} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} +func (this *FieldOptions) ExtensionMap() map[int32]proto.Extension { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32]proto.Extension) + } + return this.XXX_extensions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (this *FieldOptions) GetCtype() FieldOptions_CType { + if this != nil && this.Ctype != nil { + return *this.Ctype + } + return Default_FieldOptions_Ctype +} + +func (this *FieldOptions) GetPacked() bool { + if this != nil && this.Packed != nil { + return *this.Packed + } + return false +} + +func (this *FieldOptions) GetLazy() bool { + if this != nil && this.Lazy != nil { + return *this.Lazy + } + return Default_FieldOptions_Lazy +} + +func (this *FieldOptions) GetDeprecated() bool { + if this != nil && this.Deprecated != nil { + return *this.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (this *FieldOptions) GetExperimentalMapKey() string { + if this != nil && this.ExperimentalMapKey != nil { + return *this.ExperimentalMapKey + } + return "" +} + +func (this *FieldOptions) GetWeak() bool { + if this != nil && this.Weak != nil { + return *this.Weak + } + return Default_FieldOptions_Weak +} + +type EnumOptions struct { + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,def=1" json:"allow_alias,omitempty"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *EnumOptions) Reset() { *this = EnumOptions{} } +func (this *EnumOptions) String() string { return proto.CompactTextString(this) } +func (*EnumOptions) ProtoMessage() {} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} +func (this *EnumOptions) ExtensionMap() map[int32]proto.Extension { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32]proto.Extension) + } + return this.XXX_extensions +} + +const Default_EnumOptions_AllowAlias bool = true + +func (this *EnumOptions) GetAllowAlias() bool { + if this != nil && this.AllowAlias != nil { + return *this.AllowAlias + } + return Default_EnumOptions_AllowAlias +} + +type EnumValueOptions struct { + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *EnumValueOptions) Reset() { *this = EnumValueOptions{} } +func (this *EnumValueOptions) String() string { return proto.CompactTextString(this) } +func (*EnumValueOptions) ProtoMessage() {} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} +func (this *EnumValueOptions) ExtensionMap() map[int32]proto.Extension { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32]proto.Extension) + } + return this.XXX_extensions +} + +type ServiceOptions struct { + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *ServiceOptions) Reset() { *this = ServiceOptions{} } +func (this *ServiceOptions) String() string { return proto.CompactTextString(this) } +func (*ServiceOptions) ProtoMessage() {} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} +func (this *ServiceOptions) ExtensionMap() map[int32]proto.Extension { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32]proto.Extension) + } + return this.XXX_extensions +} + +type MethodOptions struct { + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *MethodOptions) Reset() { *this = MethodOptions{} } +func (this *MethodOptions) String() string { return proto.CompactTextString(this) } +func (*MethodOptions) ProtoMessage() {} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} +func (this *MethodOptions) ExtensionMap() map[int32]proto.Extension { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32]proto.Extension) + } + return this.XXX_extensions +} + +type StreamOptions struct { + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *StreamOptions) Reset() { *this = StreamOptions{} } +func (this *StreamOptions) String() string { return proto.CompactTextString(this) } +func (*StreamOptions) ProtoMessage() {} + +var extRange_StreamOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*StreamOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_StreamOptions +} +func (this *StreamOptions) ExtensionMap() map[int32]proto.Extension { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32]proto.Extension) + } + return this.XXX_extensions +} + +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *UninterpretedOption) Reset() { *this = UninterpretedOption{} } +func (this *UninterpretedOption) String() string { return proto.CompactTextString(this) } +func (*UninterpretedOption) ProtoMessage() {} + +func (this *UninterpretedOption) GetIdentifierValue() string { + if this != nil && this.IdentifierValue != nil { + return *this.IdentifierValue + } + return "" +} + +func (this *UninterpretedOption) GetPositiveIntValue() uint64 { + if this != nil && this.PositiveIntValue != nil { + return *this.PositiveIntValue + } + return 0 +} + +func (this *UninterpretedOption) GetNegativeIntValue() int64 { + if this != nil && this.NegativeIntValue != nil { + return *this.NegativeIntValue + } + return 0 +} + +func (this *UninterpretedOption) GetDoubleValue() float64 { + if this != nil && this.DoubleValue != nil { + return *this.DoubleValue + } + return 0 +} + +func (this *UninterpretedOption) GetStringValue() []byte { + if this != nil { + return this.StringValue + } + return nil +} + +func (this *UninterpretedOption) GetAggregateValue() string { + if this != nil && this.AggregateValue != nil { + return *this.AggregateValue + } + return "" +} + +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *UninterpretedOption_NamePart) Reset() { *this = UninterpretedOption_NamePart{} } +func (this *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(this) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} + +func (this *UninterpretedOption_NamePart) GetNamePart() string { + if this != nil && this.NamePart != nil { + return *this.NamePart + } + return "" +} + +func (this *UninterpretedOption_NamePart) GetIsExtension() bool { + if this != nil && this.IsExtension != nil { + return *this.IsExtension + } + return false +} + +type SourceCodeInfo struct { + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *SourceCodeInfo) Reset() { *this = SourceCodeInfo{} } +func (this *SourceCodeInfo) String() string { return proto.CompactTextString(this) } +func (*SourceCodeInfo) ProtoMessage() {} + +type SourceCodeInfo_Location struct { + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *SourceCodeInfo_Location) Reset() { *this = SourceCodeInfo_Location{} } +func (this *SourceCodeInfo_Location) String() string { return proto.CompactTextString(this) } +func (*SourceCodeInfo_Location) ProtoMessage() {} + +func init() { + proto.RegisterEnum("google_protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google_protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google_protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google_protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google_protobuf.StreamOptions_TokenUnit", StreamOptions_TokenUnit_name, StreamOptions_TokenUnit_value) +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,45 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/descriptor.proto +regenerate: + echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION + cd $(HOME)/src/protobuf/src && \ + protoc --go_out=. ./google/protobuf/descriptor.proto && \ + sed -i 's,^package google_protobuf,package descriptor,' google/protobuf/descriptor.pb.go && \ + cp ./google/protobuf/descriptor.pb.go $(GOPATH)/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go + +restore: + cp descriptor.pb.golden descriptor.pb.go + +preserve: + cp descriptor.pb.go descriptor.pb.golden diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/doc.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/doc.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/doc.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,51 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + A plugin for the Google protocol buffer compiler to generate Go code. + Run it by building this program and putting it in your path with the name + protoc-gen-go + That word 'go' at the end becomes part of the option string set for the + protocol compiler, so once the protocol compiler (protoc) is installed + you can run + protoc --go_out=output_directory input_directory/file.proto + to generate Go bindings for the protocol defined by file.proto. + With that input, the output will be written to + output_directory/file.pb.go + + The generated code is documented in the package comment for + the library. + + See the README and documentation for protocol buffers to learn more: + https://developers.google.com/protocol-buffers/ + +*/ +package documentation diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/generator/generator.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/generator/generator.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/generator/generator.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/generator/generator.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,2071 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + The code generator for the plugin for the Google protocol buffer compiler. + It generates Go code from the protocol buffer description files read by the + main routine. +*/ +package generator + +import ( + "bytes" + "fmt" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "path" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" + plugin "github.com/golang/protobuf/protoc-gen-go/plugin" +) + +// A Plugin provides functionality to add to the output during Go code generation, +// such as to produce RPC stubs. +type Plugin interface { + // Name identifies the plugin. + Name() string + // Init is called once after data structures are built but before + // code generation begins. + Init(g *Generator) + // Generate produces the code generated by the plugin for this file, + // except for the imports, by calling the generator's methods P, In, and Out. + Generate(file *FileDescriptor) + // GenerateImports produces the import declarations for this file. + // It is called after Generate. + GenerateImports(file *FileDescriptor) +} + +var plugins []Plugin + +// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. +// It is typically called during initialization. +func RegisterPlugin(p Plugin) { + plugins = append(plugins, p) +} + +// Each type we import as a protocol buffer (other than FileDescriptorProto) needs +// a pointer to the FileDescriptorProto that represents it. These types achieve that +// wrapping by placing each Proto inside a struct with the pointer to its File. The +// structs have the same names as their contents, with "Proto" removed. +// FileDescriptor is used to store the things that it points to. + +// The file and package name method are common to messages and enums. +type common struct { + file *descriptor.FileDescriptorProto // File this object comes from. +} + +// PackageName is name in the package clause in the generated file. +func (c *common) PackageName() string { return uniquePackageOf(c.file) } + +func (c *common) File() *descriptor.FileDescriptorProto { return c.file } + +func fileIsProto3(file *descriptor.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func (c *common) proto3() bool { return fileIsProto3(c.file) } + +// Descriptor represents a protocol buffer message. +type Descriptor struct { + common + *descriptor.DescriptorProto + parent *Descriptor // The containing message, if any. + nested []*Descriptor // Inner messages, if any. + ext []*ExtensionDescriptor // Extensions, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or another message. + path string // The SourceCodeInfo path as comma-separated integers. + group bool +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (d *Descriptor) TypeName() []string { + if d.typename != nil { + return d.typename + } + n := 0 + for parent := d; parent != nil; parent = parent.parent { + n++ + } + s := make([]string, n, n) + for parent := d; parent != nil; parent = parent.parent { + n-- + s[n] = parent.GetName() + } + d.typename = s + return s +} + +// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type EnumDescriptor struct { + common + *descriptor.EnumDescriptorProto + parent *Descriptor // The containing message, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or a message. + path string // The SourceCodeInfo path as comma-separated integers. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *EnumDescriptor) TypeName() (s []string) { + if e.typename != nil { + return e.typename + } + name := e.GetName() + if e.parent == nil { + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + e.typename = s + return s +} + +// Everything but the last element of the full type name, CamelCased. +// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . +func (e *EnumDescriptor) prefix() string { + if e.parent == nil { + // If the enum is not part of a message, the prefix is just the type name. + return CamelCase(*e.Name) + "_" + } + typeName := e.TypeName() + return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" +} + +// The integer value of the named constant in this enumerated type. +func (e *EnumDescriptor) integerValueAsString(name string) string { + for _, c := range e.Value { + if c.GetName() == name { + return fmt.Sprint(c.GetNumber()) + } + } + log.Fatal("cannot find value for enum constant") + return "" +} + +// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type ExtensionDescriptor struct { + common + *descriptor.FieldDescriptorProto + parent *Descriptor // The containing message, if any. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *ExtensionDescriptor) TypeName() (s []string) { + name := e.GetName() + if e.parent == nil { + // top-level extension + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + return s +} + +// DescName returns the variable name used for the generated descriptor. +func (e *ExtensionDescriptor) DescName() string { + // The full type name. + typeName := e.TypeName() + // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. + for i, s := range typeName { + typeName[i] = CamelCase(s) + } + return "E_" + strings.Join(typeName, "_") +} + +// ImportedDescriptor describes a type that has been publicly imported from another file. +type ImportedDescriptor struct { + common + o Object +} + +func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } + +// FileDescriptor describes an protocol buffer descriptor file (.proto). +// It includes slices of all the messages and enums defined within it. +// Those slices are constructed by WrapTypes. +type FileDescriptor struct { + *descriptor.FileDescriptorProto + desc []*Descriptor // All the messages defined in this file. + enum []*EnumDescriptor // All the enums defined in this file. + ext []*ExtensionDescriptor // All the top-level extensions defined in this file. + imp []*ImportedDescriptor // All types defined in files publicly imported by this file. + + // Comments, stored as a map of path (comma-separated integers) to the comment. + comments map[string]*descriptor.SourceCodeInfo_Location + + // The full list of symbols that are exported, + // as a map from the exported object to its symbols. + // This is used for supporting public imports. + exported map[Object][]symbol + + index int // The index of this file in the list of files to generate code for + + proto3 bool // whether to generate proto3 code for this file +} + +// PackageName is the package name we'll use in the generated code to refer to this file. +func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) } + +// goPackageName returns the Go package name to use in the +// generated Go file. The result explicit reports whether the name +// came from an option go_package statement. If explicit is false, +// the name was derived from the protocol buffer's package statement +// or the input file name. +func (d *FileDescriptor) goPackageName() (name string, explicit bool) { + // Does the file have a "go_package" option? + if opts := d.Options; opts != nil { + if pkg := opts.GetGoPackage(); pkg != "" { + return pkg, true + } + } + + // Does the file have a package clause? + if pkg := d.GetPackage(); pkg != "" { + return pkg, false + } + // Use the file base name. + return baseName(d.GetName()), false +} + +func (d *FileDescriptor) addExport(obj Object, sym symbol) { + d.exported[obj] = append(d.exported[obj], sym) +} + +// symbol is an interface representing an exported Go symbol. +type symbol interface { + // GenerateAlias should generate an appropriate alias + // for the symbol from the named package. + GenerateAlias(g *Generator, pkg string) +} + +type messageSymbol struct { + sym string + hasExtensions, isMessageSet bool + getters []getterSymbol +} + +type getterSymbol struct { + name string + typ string + typeName string // canonical name in proto world; empty for proto.Message and similar + genType bool // whether typ is a generated type (message/group/enum) +} + +func (ms *messageSymbol) GenerateAlias(g *Generator, pkg string) { + remoteSym := pkg + "." + ms.sym + + g.P("type ", ms.sym, " ", remoteSym) + g.P("func (m *", ms.sym, ") Reset() { (*", remoteSym, ")(m).Reset() }") + g.P("func (m *", ms.sym, ") String() string { return (*", remoteSym, ")(m).String() }") + g.P("func (*", ms.sym, ") ProtoMessage() {}") + if ms.hasExtensions { + g.P("func (*", ms.sym, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange ", + "{ return (*", remoteSym, ")(nil).ExtensionRangeArray() }") + g.P("func (m *", ms.sym, ") ExtensionMap() map[int32]", g.Pkg["proto"], ".Extension ", + "{ return (*", remoteSym, ")(m).ExtensionMap() }") + if ms.isMessageSet { + g.P("func (m *", ms.sym, ") Marshal() ([]byte, error) ", + "{ return (*", remoteSym, ")(m).Marshal() }") + g.P("func (m *", ms.sym, ") Unmarshal(buf []byte) error ", + "{ return (*", remoteSym, ")(m).Unmarshal(buf) }") + } + } + for _, get := range ms.getters { + + if get.typeName != "" { + g.RecordTypeUse(get.typeName) + } + typ := get.typ + val := "(*" + remoteSym + ")(m)." + get.name + "()" + if get.genType { + // typ will be "*pkg.T" (message/group) or "pkg.T" (enum). + // Either of those might have a "[]" prefix if it is repeated. + // Drop the package qualifier since we have hoisted the type into this package. + rep := strings.HasPrefix(typ, "[]") + if rep { + typ = typ[2:] + } + star := typ[0] == '*' + typ = typ[strings.Index(typ, ".")+1:] + if star { + typ = "*" + typ + } + if rep { + // Go does not permit conversion between slice types where both + // element types are named. That means we need to generate a bit + // of code in this situation. + // typ is the element type. + // val is the expression to get the slice from the imported type. + + ctyp := typ // conversion type expression; "Foo" or "(*Foo)" + if star { + ctyp = "(" + typ + ")" + } + + g.P("func (m *", ms.sym, ") ", get.name, "() []", typ, " {") + g.In() + g.P("o := ", val) + g.P("if o == nil {") + g.In() + g.P("return nil") + g.Out() + g.P("}") + g.P("s := make([]", typ, ", len(o))") + g.P("for i, x := range o {") + g.In() + g.P("s[i] = ", ctyp, "(x)") + g.Out() + g.P("}") + g.P("return s") + g.Out() + g.P("}") + continue + } + // Convert imported type into the forwarding type. + val = "(" + typ + ")(" + val + ")" + } + + g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " { return ", val, " }") + } + +} + +type enumSymbol struct { + name string + proto3 bool // Whether this came from a proto3 file. +} + +func (es enumSymbol) GenerateAlias(g *Generator, pkg string) { + s := es.name + g.P("type ", s, " ", pkg, ".", s) + g.P("var ", s, "_name = ", pkg, ".", s, "_name") + g.P("var ", s, "_value = ", pkg, ".", s, "_value") + g.P("func (x ", s, ") String() string { return (", pkg, ".", s, ")(x).String() }") + if !es.proto3 { + g.P("func (x ", s, ") Enum() *", s, "{ return (*", s, ")((", pkg, ".", s, ")(x).Enum()) }") + g.P("func (x *", s, ") UnmarshalJSON(data []byte) error { return (*", pkg, ".", s, ")(x).UnmarshalJSON(data) }") + } +} + +type constOrVarSymbol struct { + sym string + typ string // either "const" or "var" + cast string // if non-empty, a type cast is required (used for enums) +} + +func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg string) { + v := pkg + "." + cs.sym + if cs.cast != "" { + v = cs.cast + "(" + v + ")" + } + g.P(cs.typ, " ", cs.sym, " = ", v) +} + +// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. +type Object interface { + PackageName() string // The name we use in our output (a_b_c), possibly renamed for uniqueness. + TypeName() []string + File() *descriptor.FileDescriptorProto +} + +// Each package name we generate must be unique. The package we're generating +// gets its own name but every other package must have a unique name that does +// not conflict in the code we generate. These names are chosen globally (although +// they don't have to be, it simplifies things to do them globally). +func uniquePackageOf(fd *descriptor.FileDescriptorProto) string { + s, ok := uniquePackageName[fd] + if !ok { + log.Fatal("internal error: no package name defined for " + fd.GetName()) + } + return s +} + +// Generator is the type whose methods generate the output, stored in the associated response structure. +type Generator struct { + *bytes.Buffer + + Request *plugin.CodeGeneratorRequest // The input. + Response *plugin.CodeGeneratorResponse // The output. + + Param map[string]string // Command-line parameters. + PackageImportPath string // Go import path of the package we're generating code for + ImportPrefix string // String to prefix to imported package file names. + ImportMap map[string]string // Mapping from import name to generated name + + Pkg map[string]string // The names under which we import support packages + + packageName string // What we're calling ourselves. + allFiles []*FileDescriptor // All files in the tree + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + usedPackages map[string]bool // Names of packages used in current file. + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + indent string +} + +// New creates a new generator and allocates the request and response protobufs. +func New() *Generator { + g := new(Generator) + g.Buffer = new(bytes.Buffer) + g.Request = new(plugin.CodeGeneratorRequest) + g.Response = new(plugin.CodeGeneratorResponse) + return g +} + +// Error reports a problem, including an error, and exits the program. +func (g *Generator) Error(err error, msgs ...string) { + s := strings.Join(msgs, " ") + ":" + err.Error() + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// Fail reports a problem and exits the program. +func (g *Generator) Fail(msgs ...string) { + s := strings.Join(msgs, " ") + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// CommandLineParameters breaks the comma-separated list of key=value pairs +// in the parameter (a member of the request protobuf) into a key/value map. +// It then sets file name mappings defined by those entries. +func (g *Generator) CommandLineParameters(parameter string) { + g.Param = make(map[string]string) + for _, p := range strings.Split(parameter, ",") { + if i := strings.Index(p, "="); i < 0 { + g.Param[p] = "" + } else { + g.Param[p[0:i]] = p[i+1:] + } + } + + g.ImportMap = make(map[string]string) + pluginList := "none" // Default list of plugin names to enable (empty means all). + for k, v := range g.Param { + switch k { + case "import_prefix": + g.ImportPrefix = v + case "import_path": + g.PackageImportPath = v + case "plugins": + pluginList = v + default: + if len(k) > 0 && k[0] == 'M' { + g.ImportMap[k[1:]] = v + } + } + } + + if pluginList != "" { + // Amend the set of plugins. + enabled := make(map[string]bool) + for _, name := range strings.Split(pluginList, "+") { + enabled[name] = true + } + var nplugins []Plugin + for _, p := range plugins { + if enabled[p.Name()] { + nplugins = append(nplugins, p) + } + } + plugins = nplugins + } +} + +// DefaultPackageName returns the package name printed for the object. +// If its file is in a different package, it returns the package name we're using for this file, plus ".". +// Otherwise it returns the empty string. +func (g *Generator) DefaultPackageName(obj Object) string { + pkg := obj.PackageName() + if pkg == g.packageName { + return "" + } + return pkg + "." +} + +// For each input file, the unique package name to use, underscored. +var uniquePackageName = make(map[*descriptor.FileDescriptorProto]string) + +// Package names already registered. Key is the name from the .proto file; +// value is the name that appears in the generated code. +var pkgNamesInUse = make(map[string]bool) + +// Create and remember a guaranteed unique package name for this file descriptor. +// Pkg is the candidate name. If f is nil, it's a builtin package like "proto" and +// has no file descriptor. +func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { + // Convert dots to underscores before finding a unique alias. + pkg = strings.Map(badToUnderscore, pkg) + + for i, orig := 1, pkg; pkgNamesInUse[pkg]; i++ { + // It's a duplicate; must rename. + pkg = orig + strconv.Itoa(i) + } + // Install it. + pkgNamesInUse[pkg] = true + if f != nil { + uniquePackageName[f.FileDescriptorProto] = pkg + } + return pkg +} + +var isGoKeyword = map[string]bool{ + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "else": true, + "defer": true, + "fallthrough": true, + "for": true, + "func": true, + "go": true, + "goto": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, +} + +// defaultGoPackage returns the package name to use, +// derived from the import path of the package we're building code for. +func (g *Generator) defaultGoPackage() string { + p := g.PackageImportPath + if i := strings.LastIndex(p, "/"); i >= 0 { + p = p[i+1:] + } + if p == "" { + return "" + } + + p = strings.Map(badToUnderscore, p) + // Identifier must not be keyword: insert _. + if isGoKeyword[p] { + p = "_" + p + } + // Identifier must not begin with digit: insert _. + if r, _ := utf8.DecodeRuneInString(p); unicode.IsDigit(r) { + p = "_" + p + } + return p +} + +// SetPackageNames sets the package name for this run. +// The package name must agree across all files being generated. +// It also defines unique package names for all imported files. +func (g *Generator) SetPackageNames() { + // Register the name for this package. It will be the first name + // registered so is guaranteed to be unmodified. + pkg, explicit := g.genFiles[0].goPackageName() + + // Check all files for an explicit go_package option. + for _, f := range g.genFiles { + thisPkg, thisExplicit := f.goPackageName() + if thisExplicit { + if !explicit { + // Let this file's go_package option serve for all input files. + pkg, explicit = thisPkg, true + } else if thisPkg != pkg { + g.Fail("inconsistent package names:", thisPkg, pkg) + } + } + } + + // If we don't have an explicit go_package option but we have an + // import path, use that. + if !explicit { + p := g.defaultGoPackage() + if p != "" { + pkg, explicit = p, true + } + } + + // If there was no go_package and no import path to use, + // double-check that all the inputs have the same implicit + // Go package name. + if !explicit { + for _, f := range g.genFiles { + thisPkg, _ := f.goPackageName() + if thisPkg != pkg { + g.Fail("inconsistent package names:", thisPkg, pkg) + } + } + } + + g.packageName = RegisterUniquePackageName(pkg, g.genFiles[0]) + + // Register the support package names. They might collide with the + // name of a package we import. + g.Pkg = map[string]string{ + "math": RegisterUniquePackageName("math", nil), + "proto": RegisterUniquePackageName("proto", nil), + } + +AllFiles: + for _, f := range g.allFiles { + for _, genf := range g.genFiles { + if f == genf { + // In this package already. + uniquePackageName[f.FileDescriptorProto] = g.packageName + continue AllFiles + } + } + // The file is a dependency, so we want to ignore its go_package option + // because that is only relevant for its specific generated output. + pkg := f.GetPackage() + if pkg == "" { + pkg = baseName(*f.Name) + } + RegisterUniquePackageName(pkg, f) + } +} + +// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos +// and FileDescriptorProtos into file-referenced objects within the Generator. +// It also creates the list of files to generate and so should be called before GenerateAllFiles. +func (g *Generator) WrapTypes() { + g.allFiles = make([]*FileDescriptor, len(g.Request.ProtoFile)) + for i, f := range g.Request.ProtoFile { + // We must wrap the descriptors before we wrap the enums + descs := wrapDescriptors(f) + g.buildNestedDescriptors(descs) + enums := wrapEnumDescriptors(f, descs) + exts := wrapExtensions(f) + imps := wrapImported(f, g) + fd := &FileDescriptor{ + FileDescriptorProto: f, + desc: descs, + enum: enums, + ext: exts, + imp: imps, + exported: make(map[Object][]symbol), + proto3: fileIsProto3(f), + } + extractComments(fd) + g.allFiles[i] = fd + } + + g.genFiles = make([]*FileDescriptor, len(g.Request.FileToGenerate)) +FindFiles: + for i, fileName := range g.Request.FileToGenerate { + // Search the list. This algorithm is n^2 but n is tiny. + for _, file := range g.allFiles { + if fileName == file.GetName() { + g.genFiles[i] = file + file.index = i + continue FindFiles + } + } + g.Fail("could not find file named", fileName) + } + g.Response.File = make([]*plugin.CodeGeneratorResponse_File, len(g.genFiles)) +} + +// Scan the descriptors in this file. For each one, build the slice of nested descriptors +func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { + for _, desc := range descs { + if len(desc.NestedType) != 0 { + desc.nested = make([]*Descriptor, len(desc.NestedType)) + n := 0 + for _, nest := range descs { + if nest.parent == desc { + desc.nested[n] = nest + n++ + } + } + if n != len(desc.NestedType) { + g.Fail("internal error: nesting failure for", desc.GetName()) + } + } + } +} + +// Construct the Descriptor +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *Descriptor { + d := &Descriptor{ + common: common{file}, + DescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + d.path = fmt.Sprintf("%d,%d", messagePath, index) + } else { + d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) + } + + // The only way to distinguish a group from a message is whether + // the containing message has a TYPE_GROUP field that matches. + if parent != nil { + parts := d.TypeName() + if file.Package != nil { + parts = append([]string{*file.Package}, parts...) + } + exp := "." + strings.Join(parts, ".") + for _, field := range parent.Field { + if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { + d.group = true + break + } + } + } + + d.ext = make([]*ExtensionDescriptor, len(desc.Extension)) + for i, field := range desc.Extension { + d.ext[i] = &ExtensionDescriptor{common{file}, field, d} + } + + return d +} + +// Return a slice of all the Descriptors defined within this file +func wrapDescriptors(file *descriptor.FileDescriptorProto) []*Descriptor { + sl := make([]*Descriptor, 0, len(file.MessageType)+10) + for i, desc := range file.MessageType { + sl = wrapThisDescriptor(sl, desc, nil, file, i) + } + return sl +} + +// Wrap this Descriptor, recursively +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) []*Descriptor { + sl = append(sl, newDescriptor(desc, parent, file, index)) + me := sl[len(sl)-1] + for i, nested := range desc.NestedType { + sl = wrapThisDescriptor(sl, nested, me, file, i) + } + return sl +} + +// Construct the EnumDescriptor +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *EnumDescriptor { + ed := &EnumDescriptor{ + common: common{file}, + EnumDescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + ed.path = fmt.Sprintf("%d,%d", enumPath, index) + } else { + ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) + } + return ed +} + +// Return a slice of all the EnumDescriptors defined within this file +func wrapEnumDescriptors(file *descriptor.FileDescriptorProto, descs []*Descriptor) []*EnumDescriptor { + sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) + // Top-level enums. + for i, enum := range file.EnumType { + sl = append(sl, newEnumDescriptor(enum, nil, file, i)) + } + // Enums within messages. Enums within embedded messages appear in the outer-most message. + for _, nested := range descs { + for i, enum := range nested.EnumType { + sl = append(sl, newEnumDescriptor(enum, nested, file, i)) + } + } + return sl +} + +// Return a slice of all the top-level ExtensionDescriptors defined within this file. +func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor { + sl := make([]*ExtensionDescriptor, len(file.Extension)) + for i, field := range file.Extension { + sl[i] = &ExtensionDescriptor{common{file}, field, nil} + } + return sl +} + +// Return a slice of all the types that are publicly imported into this file. +func wrapImported(file *descriptor.FileDescriptorProto, g *Generator) (sl []*ImportedDescriptor) { + for _, index := range file.PublicDependency { + df := g.fileByName(file.Dependency[index]) + for _, d := range df.desc { + sl = append(sl, &ImportedDescriptor{common{file}, d}) + } + for _, e := range df.enum { + sl = append(sl, &ImportedDescriptor{common{file}, e}) + } + for _, ext := range df.ext { + sl = append(sl, &ImportedDescriptor{common{file}, ext}) + } + } + return +} + +func extractComments(file *FileDescriptor) { + file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) + for _, loc := range file.GetSourceCodeInfo().GetLocation() { + if loc.LeadingComments == nil { + continue + } + var p []string + for _, n := range loc.Path { + p = append(p, strconv.Itoa(int(n))) + } + file.comments[strings.Join(p, ",")] = loc + } +} + +// BuildTypeNameMap builds the map from fully qualified type names to objects. +// The key names for the map come from the input data, which puts a period at the beginning. +// It should be called after SetPackageNames and before GenerateAllFiles. +func (g *Generator) BuildTypeNameMap() { + g.typeNameToObject = make(map[string]Object) + for _, f := range g.allFiles { + // The names in this loop are defined by the proto world, not us, so the + // package name may be empty. If so, the dotted package name of X will + // be ".X"; otherwise it will be ".pkg.X". + dottedPkg := "." + f.GetPackage() + if dottedPkg != "." { + dottedPkg += "." + } + for _, enum := range f.enum { + name := dottedPkg + dottedSlice(enum.TypeName()) + g.typeNameToObject[name] = enum + } + for _, desc := range f.desc { + name := dottedPkg + dottedSlice(desc.TypeName()) + g.typeNameToObject[name] = desc + } + } +} + +// ObjectNamed, given a fully-qualified input type name as it appears in the input data, +// returns the descriptor for the message or enum with that name. +func (g *Generator) ObjectNamed(typeName string) Object { + o, ok := g.typeNameToObject[typeName] + if !ok { + g.Fail("can't find object with type", typeName) + } + + // If the file of this object isn't a direct dependency of the current file, + // or in the current file, then this object has been publicly imported into + // a dependency of the current file. + // We should return the ImportedDescriptor object for it instead. + direct := *o.File().Name == *g.file.Name + if !direct { + for _, dep := range g.file.Dependency { + if *g.fileByName(dep).Name == *o.File().Name { + direct = true + break + } + } + } + if !direct { + found := false + Loop: + for _, dep := range g.file.Dependency { + df := g.fileByName(*g.fileByName(dep).Name) + for _, td := range df.imp { + if td.o == o { + // Found it! + o = td + found = true + break Loop + } + } + } + if !found { + log.Printf("protoc-gen-go: WARNING: failed finding publicly imported dependency for %v, used in %v", typeName, *g.file.Name) + } + } + + return o +} + +// P prints the arguments to the generated output. It handles strings and int32s, plus +// handling indirections because they may be *string, etc. +func (g *Generator) P(str ...interface{}) { + g.WriteString(g.indent) + for _, v := range str { + switch s := v.(type) { + case string: + g.WriteString(s) + case *string: + g.WriteString(*s) + case bool: + g.WriteString(fmt.Sprintf("%t", s)) + case *bool: + g.WriteString(fmt.Sprintf("%t", *s)) + case int: + g.WriteString(fmt.Sprintf("%d", s)) + case *int32: + g.WriteString(fmt.Sprintf("%d", *s)) + case *int64: + g.WriteString(fmt.Sprintf("%d", *s)) + case float64: + g.WriteString(fmt.Sprintf("%g", s)) + case *float64: + g.WriteString(fmt.Sprintf("%g", *s)) + default: + g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + } + } + g.WriteByte('\n') +} + +// In Indents the output one tab stop. +func (g *Generator) In() { g.indent += "\t" } + +// Out unindents the output one tab stop. +func (g *Generator) Out() { + if len(g.indent) > 0 { + g.indent = g.indent[1:] + } +} + +// GenerateAllFiles generates the output for all the files we're outputting. +func (g *Generator) GenerateAllFiles() { + // Initialize the plugins + for _, p := range plugins { + p.Init(g) + } + // Generate the output. The generator runs for every file, even the files + // that we don't generate output for, so that we can collate the full list + // of exported symbols to support public imports. + genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) + for _, file := range g.genFiles { + genFileMap[file] = true + } + i := 0 + for _, file := range g.allFiles { + g.Reset() + g.generate(file) + if _, ok := genFileMap[file]; !ok { + continue + } + g.Response.File[i] = new(plugin.CodeGeneratorResponse_File) + g.Response.File[i].Name = proto.String(goFileName(*file.Name)) + g.Response.File[i].Content = proto.String(g.String()) + i++ + } +} + +// Run all the plugins associated with the file. +func (g *Generator) runPlugins(file *FileDescriptor) { + for _, p := range plugins { + p.Generate(file) + } +} + +// FileOf return the FileDescriptor for this FileDescriptorProto. +func (g *Generator) FileOf(fd *descriptor.FileDescriptorProto) *FileDescriptor { + for _, file := range g.allFiles { + if file.FileDescriptorProto == fd { + return file + } + } + g.Fail("could not find file in table:", fd.GetName()) + return nil +} + +// Fill the response protocol buffer with the generated output for all the files we're +// supposed to generate. +func (g *Generator) generate(file *FileDescriptor) { + g.file = g.FileOf(file.FileDescriptorProto) + g.usedPackages = make(map[string]bool) + + for _, td := range g.file.imp { + g.generateImported(td) + } + for _, enum := range g.file.enum { + g.generateEnum(enum) + } + for _, desc := range g.file.desc { + // Don't generate virtual messages for maps. + if desc.GetOptions().GetMapEntry() { + continue + } + g.generateMessage(desc) + } + for _, ext := range g.file.ext { + g.generateExtension(ext) + } + g.generateInitFunction() + + // Run the plugins before the imports so we know which imports are necessary. + g.runPlugins(file) + + // Generate header and imports last, though they appear first in the output. + rem := g.Buffer + g.Buffer = new(bytes.Buffer) + g.generateHeader() + g.generateImports() + g.Write(rem.Bytes()) + + // Reformat generated code. + fset := token.NewFileSet() + ast, err := parser.ParseFile(fset, "", g, parser.ParseComments) + if err != nil { + g.Fail("bad Go source code was generated:", err.Error()) + return + } + g.Reset() + err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, ast) + if err != nil { + g.Fail("generated Go source code could not be reformatted:", err.Error()) + } +} + +// Generate the header, including package definition +func (g *Generator) generateHeader() { + g.P("// Code generated by protoc-gen-go.") + g.P("// source: ", g.file.Name) + g.P("// DO NOT EDIT!") + g.P() + + name := g.file.PackageName() + + if g.file.index == 0 { + // Generate package docs for the first file in the package. + g.P("/*") + g.P("Package ", name, " is a generated protocol buffer package.") + g.P() + if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok { + // not using g.PrintComments because this is a /* */ comment block. + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + for _, line := range strings.Split(text, "\n") { + line = strings.TrimPrefix(line, " ") + // ensure we don't escape from the block comment + line = strings.Replace(line, "*/", "* /", -1) + g.P(line) + } + g.P() + } + g.P("It is generated from these files:") + for _, f := range g.genFiles { + g.P("\t", f.Name) + } + g.P() + g.P("It has these top-level messages:") + for _, msg := range g.file.desc { + if msg.parent != nil { + continue + } + g.P("\t", CamelCaseSlice(msg.TypeName())) + } + g.P("*/") + } + + g.P("package ", name) + g.P() +} + +// PrintComments prints any comments from the source .proto file. +// The path is a comma-separated list of integers. +// See descriptor.proto for its format. +func (g *Generator) PrintComments(path string) { + if loc, ok := g.file.comments[path]; ok { + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + for _, line := range strings.Split(text, "\n") { + g.P("// ", strings.TrimPrefix(line, " ")) + } + } +} + +func (g *Generator) fileByName(filename string) *FileDescriptor { + for _, fd := range g.allFiles { + if fd.GetName() == filename { + return fd + } + } + return nil +} + +// weak returns whether the ith import of the current file is a weak import. +func (g *Generator) weak(i int32) bool { + for _, j := range g.file.WeakDependency { + if j == i { + return true + } + } + return false +} + +// Generate the imports +func (g *Generator) generateImports() { + // We almost always need a proto import. Rather than computing when we + // do, which is tricky when there's a plugin, just import it and + // reference it later. The same argument applies to the math package, + // for handling bit patterns for floating-point numbers. + g.P("import " + g.Pkg["proto"] + " " + strconv.Quote(g.ImportPrefix+"github.com/golang/protobuf/proto")) + if !g.file.proto3 { + g.P("import " + g.Pkg["math"] + ` "math"`) + } + for i, s := range g.file.Dependency { + fd := g.fileByName(s) + // Do not import our own package. + if fd.PackageName() == g.packageName { + continue + } + filename := goFileName(s) + // By default, import path is the dirname of the Go filename. + importPath := path.Dir(filename) + if substitution, ok := g.ImportMap[s]; ok { + importPath = substitution + } + importPath = g.ImportPrefix + importPath + // Skip weak imports. + if g.weak(int32(i)) { + g.P("// skipping weak import ", fd.PackageName(), " ", strconv.Quote(importPath)) + continue + } + if _, ok := g.usedPackages[fd.PackageName()]; ok { + g.P("import ", fd.PackageName(), " ", strconv.Quote(importPath)) + } else { + // TODO: Re-enable this when we are more feature-complete. + // For instance, some protos use foreign field extensions, which we don't support. + // Until then, this is just annoying spam. + //log.Printf("protoc-gen-go: discarding unused import from %v: %v", *g.file.Name, s) + g.P("// discarding unused import ", fd.PackageName(), " ", strconv.Quote(importPath)) + } + } + g.P() + // TODO: may need to worry about uniqueness across plugins + for _, p := range plugins { + p.GenerateImports(g.file) + g.P() + } + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ = ", g.Pkg["proto"], ".Marshal") + if !g.file.proto3 { + g.P("var _ = ", g.Pkg["math"], ".Inf") + } + g.P() +} + +func (g *Generator) generateImported(id *ImportedDescriptor) { + // Don't generate public import symbols for files that we are generating + // code for, since those symbols will already be in this package. + // We can't simply avoid creating the ImportedDescriptor objects, + // because g.genFiles isn't populated at that stage. + tn := id.TypeName() + sn := tn[len(tn)-1] + df := g.FileOf(id.o.File()) + filename := *df.Name + for _, fd := range g.genFiles { + if *fd.Name == filename { + g.P("// Ignoring public import of ", sn, " from ", filename) + g.P() + return + } + } + g.P("// ", sn, " from public import ", filename) + g.usedPackages[df.PackageName()] = true + + for _, sym := range df.exported[id.o] { + sym.GenerateAlias(g, df.PackageName()) + } + + g.P() +} + +// Generate the enum definitions for this EnumDescriptor. +func (g *Generator) generateEnum(enum *EnumDescriptor) { + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + ccPrefix := enum.prefix() + + g.PrintComments(enum.path) + g.P("type ", ccTypeName, " int32") + g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) + g.P("const (") + g.In() + for i, e := range enum.Value { + g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)) + + name := ccPrefix + *e.Name + g.P(name, " ", ccTypeName, " = ", e.Number) + g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) + } + g.Out() + g.P(")") + g.P("var ", ccTypeName, "_name = map[int32]string{") + g.In() + generated := make(map[int32]bool) // avoid duplicate values + for _, e := range enum.Value { + duplicate := "" + if _, present := generated[*e.Number]; present { + duplicate = "// Duplicate value: " + } + g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") + generated[*e.Number] = true + } + g.Out() + g.P("}") + g.P("var ", ccTypeName, "_value = map[string]int32{") + g.In() + for _, e := range enum.Value { + g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") + } + g.Out() + g.P("}") + + if !enum.proto3() { + g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") + g.In() + g.P("p := new(", ccTypeName, ")") + g.P("*p = x") + g.P("return p") + g.Out() + g.P("}") + } + + g.P("func (x ", ccTypeName, ") String() string {") + g.In() + g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") + g.Out() + g.P("}") + + if !enum.proto3() { + g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") + g.In() + g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) + g.P("if err != nil {") + g.In() + g.P("return err") + g.Out() + g.P("}") + g.P("*x = ", ccTypeName, "(value)") + g.P("return nil") + g.Out() + g.P("}") + } + + g.P() +} + +// The tag is a string like "varint,2,opt,name=fieldname,def=7" that +// identifies details of the field for the protocol buffer marshaling and unmarshaling +// code. The fields are: +// wire encoding +// protocol tag number +// opt,req,rep for optional, required, or repeated +// packed whether the encoding is "packed" (optional; repeated primitives only) +// name= the original declared name +// enum= the name of the enum type if it is an enum-typed field. +// proto3 if this field is in a proto3 message +// def= string representation of the default value, if any. +// The default value must be in a representation that can be used at run-time +// to generate the default value. Thus bools become 0 and 1, for instance. +func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { + optrepreq := "" + switch { + case isOptional(field): + optrepreq = "opt" + case isRequired(field): + optrepreq = "req" + case isRepeated(field): + optrepreq = "rep" + } + var defaultValue string + if dv := field.DefaultValue; dv != nil { // set means an explicit default + defaultValue = *dv + // Some types need tweaking. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if defaultValue == "true" { + defaultValue = "1" + } else { + defaultValue = "0" + } + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + // Nothing to do. Quoting is done for the whole tag. + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // For enums we need to provide the integer constant. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + // It is an enum that was publicly imported. + // We need the underlying type. + obj = id.o + } + enum, ok := obj.(*EnumDescriptor) + if !ok { + log.Printf("obj is a %T", obj) + if id, ok := obj.(*ImportedDescriptor); ok { + log.Printf("id.o is a %T", id.o) + } + g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) + } + defaultValue = enum.integerValueAsString(defaultValue) + } + defaultValue = ",def=" + defaultValue + } + enum := "" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { + // We avoid using obj.PackageName(), because we want to use the + // original (proto-world) package name. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + obj = id.o + } + enum = ",enum=" + if pkg := obj.File().GetPackage(); pkg != "" { + enum += pkg + "." + } + enum += CamelCaseSlice(obj.TypeName()) + } + packed := "" + if field.Options != nil && field.Options.GetPacked() { + packed = ",packed" + } + fieldName := field.GetName() + name := fieldName + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + // We must use the type name for groups instead of + // the field name to preserve capitalization. + // type_name in FieldDescriptorProto is fully-qualified, + // but we only want the local part. + name = *field.TypeName + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[i+1:] + } + } + if name == CamelCase(fieldName) { + name = "" + } else { + name = ",name=" + name + } + if message.proto3() { + // We only need the extra tag for []byte fields; + // no need to add noise for the others. + if *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES { + name += ",proto3" + } + } + return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s", + wiretype, + field.GetNumber(), + optrepreq, + packed, + name, + enum, + defaultValue)) +} + +func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { + switch typ { + case descriptor.FieldDescriptorProto_TYPE_GROUP: + return false + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + return false + case descriptor.FieldDescriptorProto_TYPE_BYTES: + return false + } + return true +} + +// TypeName is the printed name appropriate for an item. If the object is in the current file, +// TypeName drops the package name and underscores the rest. +// Otherwise the object is from another package; and the result is the underscored +// package name followed by the item name. +// The result always has an initial capital. +func (g *Generator) TypeName(obj Object) string { + return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) +} + +// TypeNameWithPackage is like TypeName, but always includes the package +// name even if the object is in our own package. +func (g *Generator) TypeNameWithPackage(obj Object) string { + return obj.PackageName() + CamelCaseSlice(obj.TypeName()) +} + +// GoType returns a string representing the type name, and the wire type +func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { + // TODO: Options. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + typ, wire = "float64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + typ, wire = "float32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_INT64: + typ, wire = "int64", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + typ, wire = "uint64", "varint" + case descriptor.FieldDescriptorProto_TYPE_INT32: + typ, wire = "int32", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + typ, wire = "uint32", "varint" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + typ, wire = "uint64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + typ, wire = "uint32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + typ, wire = "bool", "varint" + case descriptor.FieldDescriptorProto_TYPE_STRING: + typ, wire = "string", "bytes" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "group" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "bytes" + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typ, wire = "[]byte", "bytes" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "varint" + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + typ, wire = "int32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + typ, wire = "int64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + typ, wire = "int32", "zigzag32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + typ, wire = "int64", "zigzag64" + default: + g.Fail("unknown type for", field.GetName()) + } + if isRepeated(field) { + typ = "[]" + typ + } else if message != nil && message.proto3() { + return + } else if needsStar(*field.Type) { + typ = "*" + typ + } + return +} + +func (g *Generator) RecordTypeUse(t string) { + if obj, ok := g.typeNameToObject[t]; ok { + // Call ObjectNamed to get the true object to record the use. + obj = g.ObjectNamed(t) + g.usedPackages[obj.PackageName()] = true + } +} + +// Method names that may be generated. Fields with these names get an +// underscore appended. +var methodNames = [...]string{ + "Reset", + "String", + "ProtoMessage", + "Marshal", + "Unmarshal", + "ExtensionRangeArray", + "ExtensionMap", + "Descriptor", +} + +// Generate the type and default constant definitions for this Descriptor. +func (g *Generator) generateMessage(message *Descriptor) { + // The full type name + typeName := message.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + + usedNames := make(map[string]bool) + for _, n := range methodNames { + usedNames[n] = true + } + fieldNames := make(map[*descriptor.FieldDescriptorProto]string) + fieldGetterNames := make(map[*descriptor.FieldDescriptorProto]string) + mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) + + g.PrintComments(message.path) + g.P("type ", ccTypeName, " struct {") + g.In() + + for i, field := range message.Field { + g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)) + + fieldName := CamelCase(*field.Name) + for usedNames[fieldName] { + fieldName += "_" + } + fieldGetterName := fieldName + usedNames[fieldName] = true + typename, wiretype := g.GoType(message, field) + jsonName := *field.Name + tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") + + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + // Figure out the Go types and tags for the key and value types. + keyField, valField := d.Field[0], d.Field[1] + keyType, keyWire := g.GoType(d, keyField) + valType, valWire := g.GoType(d, valField) + keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) + + // We don't use stars, except for message-typed values. + // Message and enum types are the only two possibly foreign types used in maps, + // so record their use. They are not permitted as map keys. + keyType = strings.TrimPrefix(keyType, "*") + switch *valField.Type { + case descriptor.FieldDescriptorProto_TYPE_ENUM: + valType = strings.TrimPrefix(valType, "*") + g.RecordTypeUse(valField.GetTypeName()) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + g.RecordTypeUse(valField.GetTypeName()) + default: + valType = strings.TrimPrefix(valType, "*") + } + + typename = fmt.Sprintf("map[%s]%s", keyType, valType) + mapFieldTypes[field] = typename // record for the getter generation + + tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) + } + } + + fieldNames[field] = fieldName + fieldGetterNames[field] = fieldGetterName + g.P(fieldName, "\t", typename, "\t`", tag, "`") + g.RecordTypeUse(field.GetTypeName()) + } + if len(message.ExtensionRange) > 0 { + g.P("XXX_extensions\t\tmap[int32]", g.Pkg["proto"], ".Extension `json:\"-\"`") + } + if !message.proto3() { + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + } + g.Out() + g.P("}") + + // Reset, String and ProtoMessage methods. + g.P("func (m *", ccTypeName, ") Reset() { *m = ", ccTypeName, "{} }") + g.P("func (m *", ccTypeName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") + g.P("func (*", ccTypeName, ") ProtoMessage() {}") + + // Extension support methods + var hasExtensions, isMessageSet bool + if len(message.ExtensionRange) > 0 { + hasExtensions = true + // message_set_wire_format only makes sense when extensions are defined. + if opts := message.Options; opts != nil && opts.GetMessageSetWireFormat() { + isMessageSet = true + g.P() + g.P("func (m *", ccTypeName, ") Marshal() ([]byte, error) {") + g.In() + g.P("return ", g.Pkg["proto"], ".MarshalMessageSet(m.ExtensionMap())") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") Unmarshal(buf []byte) error {") + g.In() + g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSet(buf, m.ExtensionMap())") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") MarshalJSON() ([]byte, error) {") + g.In() + g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(m.XXX_extensions)") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") UnmarshalJSON(buf []byte) error {") + g.In() + g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, m.XXX_extensions)") + g.Out() + g.P("}") + g.P("// ensure ", ccTypeName, " satisfies proto.Marshaler and proto.Unmarshaler") + g.P("var _ ", g.Pkg["proto"], ".Marshaler = (*", ccTypeName, ")(nil)") + g.P("var _ ", g.Pkg["proto"], ".Unmarshaler = (*", ccTypeName, ")(nil)") + } + + g.P() + g.P("var extRange_", ccTypeName, " = []", g.Pkg["proto"], ".ExtensionRange{") + g.In() + for _, r := range message.ExtensionRange { + end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends + g.P("{", r.Start, ", ", end, "},") + } + g.Out() + g.P("}") + g.P("func (*", ccTypeName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") + g.In() + g.P("return extRange_", ccTypeName) + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") ExtensionMap() map[int32]", g.Pkg["proto"], ".Extension {") + g.In() + g.P("if m.XXX_extensions == nil {") + g.In() + g.P("m.XXX_extensions = make(map[int32]", g.Pkg["proto"], ".Extension)") + g.Out() + g.P("}") + g.P("return m.XXX_extensions") + g.Out() + g.P("}") + } + + // Default constants + defNames := make(map[*descriptor.FieldDescriptorProto]string) + for _, field := range message.Field { + def := field.GetDefaultValue() + if def == "" { + continue + } + fieldname := "Default_" + ccTypeName + "_" + CamelCase(*field.Name) + defNames[field] = fieldname + typename, _ := g.GoType(message, field) + if typename[0] == '*' { + typename = typename[1:] + } + kind := "const " + switch { + case typename == "bool": + case typename == "string": + def = strconv.Quote(def) + case typename == "[]byte": + def = "[]byte(" + strconv.Quote(def) + ")" + kind = "var " + case def == "inf", def == "-inf", def == "nan": + // These names are known to, and defined by, the protocol language. + switch def { + case "inf": + def = "math.Inf(1)" + case "-inf": + def = "math.Inf(-1)" + case "nan": + def = "math.NaN()" + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_FLOAT { + def = "float32(" + def + ")" + } + kind = "var " + case *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM: + // Must be an enum. Need to construct the prefixed name. + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate constant for %s", fieldname) + continue + } + def = g.DefaultPackageName(obj) + enum.prefix() + def + } + g.P(kind, fieldname, " ", typename, " = ", def) + g.file.addExport(message, constOrVarSymbol{fieldname, kind, ""}) + } + g.P() + + // Field getters + var getters []getterSymbol + for _, field := range message.Field { + fname := fieldNames[field] + typename, _ := g.GoType(message, field) + if t, ok := mapFieldTypes[field]; ok { + typename = t + } + mname := "Get" + fieldGetterNames[field] + star := "" + if needsStar(*field.Type) && typename[0] == '*' { + typename = typename[1:] + star = "*" + } + + // In proto3, only generate getters for message fields. + if message.proto3() && *field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE { + continue + } + + // Only export getter symbols for basic types, + // and for messages and enums in the same package. + // Groups are not exported. + // Foreign types can't be hoisted through a public import because + // the importer may not already be importing the defining .proto. + // As an example, imagine we have an import tree like this: + // A.proto -> B.proto -> C.proto + // If A publicly imports B, we need to generate the getters from B in A's output, + // but if one such getter returns something from C then we cannot do that + // because A is not importing C already. + var getter, genType bool + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_GROUP: + getter = false + case descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_ENUM: + // Only export getter if its return type is in this package. + getter = g.ObjectNamed(field.GetTypeName()).PackageName() == message.PackageName() + genType = true + default: + getter = true + } + if getter { + getters = append(getters, getterSymbol{ + name: mname, + typ: typename, + typeName: field.GetTypeName(), + genType: genType, + }) + } + + g.P("func (m *", ccTypeName, ") "+mname+"() "+typename+" {") + g.In() + def, hasDef := defNames[field] + typeDefaultIsNil := false // whether this field type's default value is a literal nil unless specified + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typeDefaultIsNil = !hasDef + case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE: + typeDefaultIsNil = true + } + if isRepeated(field) { + typeDefaultIsNil = true + } + if typeDefaultIsNil { + // A bytes field with no explicit default needs less generated code, + // as does a message or group field, or a repeated field. + g.P("if m != nil {") + g.In() + g.P("return m." + fname) + g.Out() + g.P("}") + g.P("return nil") + g.Out() + g.P("}") + g.P() + continue + } + g.P("if m != nil && m." + fname + " != nil {") + g.In() + g.P("return " + star + "m." + fname) + g.Out() + g.P("}") + if hasDef { + if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { + g.P("return " + def) + } else { + // The default is a []byte var. + // Make a copy when returning it to be safe. + g.P("return append([]byte(nil), ", def, "...)") + } + } else { + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + g.P("return false") + case descriptor.FieldDescriptorProto_TYPE_STRING: + g.P(`return ""`) + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // The default default for an enum is the first value in the enum, + // not zero. + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate getter for %s", field.GetName()) + continue + } + if len(enum.Value) == 0 { + g.P("return 0 // empty enum") + } else { + first := enum.Value[0].GetName() + g.P("return ", g.DefaultPackageName(obj)+enum.prefix()+first) + } + default: + g.P("return 0") + } + } + g.Out() + g.P("}") + g.P() + } + + if !message.group { + ms := &messageSymbol{sym: ccTypeName, hasExtensions: hasExtensions, isMessageSet: isMessageSet, getters: getters} + g.file.addExport(message, ms) + } + + for _, ext := range message.ext { + g.generateExtension(ext) + } + +} + +func (g *Generator) generateExtension(ext *ExtensionDescriptor) { + ccTypeName := ext.DescName() + + extDesc := g.ObjectNamed(*ext.Extendee).(*Descriptor) + extendedType := "*" + g.TypeName(extDesc) + field := ext.FieldDescriptorProto + fieldType, wireType := g.GoType(ext.parent, field) + tag := g.goTag(extDesc, field, wireType) + g.RecordTypeUse(*ext.Extendee) + if n := ext.FieldDescriptorProto.TypeName; n != nil { + // foreign extension type + g.RecordTypeUse(*n) + } + + typeName := ext.TypeName() + + // Special case for proto2 message sets: If this extension is extending + // proto2_bridge.MessageSet, and its final name component is "message_set_extension", + // then drop that last component. + mset := false + if extendedType == "*proto2_bridge.MessageSet" && typeName[len(typeName)-1] == "message_set_extension" { + typeName = typeName[:len(typeName)-1] + mset = true + } + + // For text formatting, the package must be exactly what the .proto file declares, + // ignoring overrides such as the go_package option, and with no dot/underscore mapping. + extName := strings.Join(typeName, ".") + if g.file.Package != nil { + extName = *g.file.Package + "." + extName + } + + g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") + g.In() + g.P("ExtendedType: (", extendedType, ")(nil),") + g.P("ExtensionType: (", fieldType, ")(nil),") + g.P("Field: ", field.Number, ",") + g.P(`Name: "`, extName, `",`) + g.P("Tag: ", tag, ",") + + g.Out() + g.P("}") + g.P() + + if mset { + // Generate a bit more code to register with message_set.go. + g.P("func init() { ") + g.In() + g.P(g.Pkg["proto"], ".RegisterMessageSetType((", fieldType, ")(nil), ", field.Number, ", \"", extName, "\")") + g.Out() + g.P("}") + } + + g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) +} + +func (g *Generator) generateInitFunction() { + g.P("func init() {") + g.In() + for _, enum := range g.file.enum { + g.generateEnumRegistration(enum) + } + for _, d := range g.file.desc { + for _, ext := range d.ext { + g.generateExtensionRegistration(ext) + } + } + for _, ext := range g.file.ext { + g.generateExtensionRegistration(ext) + } + g.Out() + g.P("}") +} + +func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { + // // We always print the full (proto-world) package name here. + pkg := enum.File().GetPackage() + if pkg != "" { + pkg += "." + } + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + g.P(g.Pkg["proto"]+".RegisterEnum(", strconv.Quote(pkg+ccTypeName), ", ", ccTypeName+"_name, ", ccTypeName+"_value)") +} + +func (g *Generator) generateExtensionRegistration(ext *ExtensionDescriptor) { + g.P(g.Pkg["proto"]+".RegisterExtension(", ext.DescName(), ")") +} + +// And now lots of helper functions. + +// Is c an ASCII lower-case letter? +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +// Is c an ASCII digit? +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// CamelCase returns the CamelCased name. +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +// There is a remote possibility of this rewrite causing a name collision, +// but it's so remote we're prepared to pretend it's nonexistent - since the +// C++ generator lowercases names, it's extremely unlikely to have two fields +// with different capitalizations. +// In short, _my_field_name_2 becomes XMyFieldName_2. +func CamelCase(s string) string { + if s == "" { + return "" + } + t := make([]byte, 0, 32) + i := 0 + if s[0] == '_' { + // Need a capital letter; drop the '_'. + t = append(t, 'X') + i++ + } + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + for ; i < len(s); i++ { + c := s[i] + if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { + continue // Skip the underscore in s. + } + if isASCIIDigit(c) { + t = append(t, c) + continue + } + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c ^= ' ' // Make it a capital letter. + } + t = append(t, c) // Guaranteed not lower case. + // Accept lower case sequence that follows. + for i+1 < len(s) && isASCIILower(s[i+1]) { + i++ + t = append(t, s[i]) + } + } + return string(t) +} + +// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to +// be joined with "_". +func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } + +// dottedSlice turns a sliced name into a dotted name. +func dottedSlice(elem []string) string { return strings.Join(elem, ".") } + +// Given a .proto file name, return the output name for the generated Go program. +func goFileName(name string) string { + ext := path.Ext(name) + if ext == ".proto" || ext == ".protodevel" { + name = name[0 : len(name)-len(ext)] + } + return name + ".pb.go" +} + +// Is this field optional? +func isOptional(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL +} + +// Is this field required? +func isRequired(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED +} + +// Is this field repeated? +func isRepeated(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED +} + +// badToUnderscore is the mapping function used to generate Go names from package names, +// which can be dotted in the input .proto file. It replaces non-identifier characters such as +// dot or dash with underscore. +func badToUnderscore(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' +} + +// baseName returns the last path element of the name, with the last dotted suffix removed. +func baseName(name string) string { + // First, find the last element + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + // Now drop the suffix + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[0:i] + } + return name +} + +// The SourceCodeInfo message describes the location of elements of a parsed +// .proto file by way of a "path", which is a sequence of integers that +// describe the route from a FileDescriptorProto to the relevant submessage. +// The path alternates between a field number of a repeated field, and an index +// into that repeated field. The constants below define the field numbers that +// are used. +// +// See descriptor.proto for more information about this. +const ( + // tag numbers in FileDescriptorProto + packagePath = 2 // package + messagePath = 4 // message_type + enumPath = 5 // enum_type + // tag numbers in DescriptorProto + messageFieldPath = 2 // field + messageMessagePath = 3 // nested_type + messageEnumPath = 4 // enum_type + // tag numbers in EnumDescriptorProto + enumValuePath = 2 // value +) diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/generator/Makefile juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/generator/Makefile --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/generator/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/generator/Makefile 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,40 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(GOROOT)/src/Make.inc + +TARG=github.com/golang/protobuf/compiler/generator +GOFILES=\ + generator.go\ + +DEPS=../descriptor ../plugin ../../proto + +include $(GOROOT)/src/Make.pkg diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,56 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2013 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package generator + +import ( + "testing" +) + +func TestCamelCase(t *testing.T) { + tests := []struct { + in, want string + }{ + {"one", "One"}, + {"one_two", "OneTwo"}, + {"_my_field_name_2", "XMyFieldName_2"}, + {"Something_Capped", "Something_Capped"}, + {"my_Name", "My_Name"}, + {"OneTwo", "OneTwo"}, + {"_", "X"}, + {"_a_", "XA_"}, + } + for _, tc := range tests { + if got := CamelCase(tc.in); got != tc.want { + t.Errorf("CamelCase(%q) = %q, want %q", tc.in, got, tc.want) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/internal/grpc/grpc.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/internal/grpc/grpc.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/internal/grpc/grpc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/internal/grpc/grpc.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,436 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package grpc outputs gRPC service descriptions in Go code. +// It runs as a plugin for the Go protocol buffer compiler plugin. +// It is linked in to protoc-gen-go. +package grpc + +import ( + "fmt" + "path" + "strconv" + "strings" + + pb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/golang/protobuf/protoc-gen-go/generator" +) + +// Paths for packages used by code generated in this file, +// relative to the import_prefix of the generator.Generator. +const ( + contextPkgPath = "golang.org/x/net/context" + grpcPkgPath = "google.golang.org/grpc" +) + +func init() { + generator.RegisterPlugin(new(grpc)) +} + +// grpc is an implementation of the Go protocol buffer compiler's +// plugin architecture. It generates bindings for gRPC support. +type grpc struct { + gen *generator.Generator +} + +// Name returns the name of this plugin, "grpc". +func (g *grpc) Name() string { + return "grpc" +} + +// The names for packages imported in the generated code. +// They may vary from the final path component of the import path +// if the name is used by other packages. +var ( + contextPkg string + grpcPkg string +) + +// Init initializes the plugin. +func (g *grpc) Init(gen *generator.Generator) { + g.gen = gen + contextPkg = generator.RegisterUniquePackageName("context", nil) + grpcPkg = generator.RegisterUniquePackageName("grpc", nil) +} + +// Given a type name defined in a .proto, return its object. +// Also record that we're using it, to guarantee the associated import. +func (g *grpc) objectNamed(name string) generator.Object { + g.gen.RecordTypeUse(name) + return g.gen.ObjectNamed(name) +} + +// Given a type name defined in a .proto, return its name as we will print it. +func (g *grpc) typeName(str string) string { + return g.gen.TypeName(g.objectNamed(str)) +} + +// P forwards to g.gen.P. +func (g *grpc) P(args ...interface{}) { g.gen.P(args...) } + +// Generate generates code for the services in the given file. +func (g *grpc) Generate(file *generator.FileDescriptor) { + for i, service := range file.FileDescriptorProto.Service { + g.generateService(file, service, i) + } +} + +// GenerateImports generates the import declaration for this file. +func (g *grpc) GenerateImports(file *generator.FileDescriptor) { + if len(file.FileDescriptorProto.Service) == 0 { + return + } + g.P("import (") + g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath))) + g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath))) + g.P(")") + g.P() + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ ", contextPkg, ".Context") + g.P("var _ ", grpcPkg, ".ClientConn") + g.P() +} + +// reservedClientName records whether a client name is reserved on the client side. +var reservedClientName = map[string]bool{ +// TODO: do we need any in gRPC? +} + +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } + +// generateService generates all the code for the named service. +func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { + path := fmt.Sprintf("6,%d", index) // 6 means service. + + origServName := service.GetName() + fullServName := file.GetPackage() + "." + origServName + servName := generator.CamelCase(origServName) + + g.P() + g.P("// Client API for ", servName, " service") + g.P() + + // Client interface. + g.P("type ", servName, "Client interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateClientSignature(servName, method)) + } + g.P("}") + g.P() + + // Client structure. + g.P("type ", unexport(servName), "Client struct {") + g.P("cc *", grpcPkg, ".ClientConn") + g.P("}") + g.P() + + // NewClient factory. + g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {") + g.P("return &", unexport(servName), "Client{cc}") + g.P("}") + g.P() + + var methodIndex, streamIndex int + serviceDescVar := "_" + servName + "_serviceDesc" + // Client method implementations. + for _, method := range service.Method { + var descExpr string + if !method.GetServerStreaming() && !method.GetClientStreaming() { + // Unary RPC method + descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex) + methodIndex++ + } else { + // Streaming RPC method + descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex) + streamIndex++ + } + g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) + } + + g.P("// Server API for ", servName, " service") + g.P() + + // Server interface. + serverType := servName + "Server" + g.P("type ", serverType, " interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateServerSignature(servName, method)) + } + g.P("}") + g.P() + + // Server registration. + g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") + g.P("s.RegisterService(&", serviceDescVar, `, srv)`) + g.P("}") + g.P() + + // Server handler implementations. + var handlerNames []string + for _, method := range service.Method { + hname := g.generateServerMethod(servName, method) + handlerNames = append(handlerNames, hname) + } + + // Service descriptor. + g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {") + g.P("ServiceName: ", strconv.Quote(fullServName), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPkg, ".MethodDesc{") + for i, method := range service.Method { + if method.GetServerStreaming() || method.GetClientStreaming() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPkg, ".StreamDesc{") + for i, method := range service.Method { + if !method.GetServerStreaming() && !method.GetClientStreaming() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.GetServerStreaming() { + g.P("ServerStreams: true,") + } + if method.GetClientStreaming() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("}") + g.P() +} + +// generateClientSignature returns the client-side signature for a method. +func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + reqArg := ", in *" + g.typeName(method.GetInputType()) + if method.GetClientStreaming() { + reqArg = "" + } + respName := "*" + g.typeName(method.GetOutputType()) + if method.GetServerStreaming() || method.GetClientStreaming() { + respName = servName + "_" + generator.CamelCase(origMethName) + "Client" + } + return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName) +} + +func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) { + sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName()) + methName := generator.CamelCase(method.GetName()) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("out := new(", outType, ")") + // TODO: Pass descExpr to Invoke. + g.P("err := ", grpcPkg, `.Invoke(ctx, "`, sname, `", in, out, c.cc, opts...)`) + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return + } + streamType := unexport(servName) + methName + "Client" + g.P("stream, err := ", grpcPkg, ".NewClientStream(ctx, ", descExpr, `, c.cc, "`, sname, `", opts...)`) + g.P("if err != nil { return nil, err }") + g.P("x := &", streamType, "{stream}") + if !method.GetClientStreaming() { + g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + } + g.P("return x, nil") + g.P("}") + g.P() + + genSend := method.GetClientStreaming() + genRecv := method.GetServerStreaming() + genCloseAndRecv := !method.GetServerStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Client interface {") + if genSend { + g.P("Send(*", inType, ") error") + } + if genRecv { + g.P("Recv() (*", outType, ", error)") + } + if genCloseAndRecv { + g.P("CloseAndRecv() (*", outType, ", error)") + } + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", inType, ") error {") + g.P("return x.ClientStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + if genCloseAndRecv { + g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } +} + +// generateServerSignature returns the server-side signature for a method. +func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + + var reqArgs []string + ret := "error" + if !method.GetServerStreaming() && !method.GetClientStreaming() { + reqArgs = append(reqArgs, contextPkg+".Context") + ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" + } + if !method.GetClientStreaming() { + reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType())) + } + if method.GetServerStreaming() || method.GetClientStreaming() { + reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server") + } + + return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +func (g *grpc) generateServerMethod(servName string, method *pb.MethodDescriptorProto) string { + methName := generator.CamelCase(method.GetName()) + hname := fmt.Sprintf("_%s_%s_Handler", servName, methName) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, codec ", grpcPkg, ".Codec, buf []byte) (interface{}, error) {") + g.P("in := new(", inType, ")") + g.P("if err := codec.Unmarshal(buf, in); err != nil { return nil, err }") + g.P("out, err := srv.(", servName, "Server).", methName, "(ctx, in)") + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return hname + } + streamType := unexport(servName) + methName + "Server" + g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {") + if !method.GetClientStreaming() { + g.P("m := new(", inType, ")") + g.P("if err := stream.RecvMsg(m); err != nil { return err }") + g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})") + } else { + g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})") + } + g.P("}") + g.P() + + genSend := method.GetServerStreaming() + genSendAndClose := !method.GetServerStreaming() + genRecv := method.GetClientStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Server interface {") + if genSend { + g.P("Send(*", outType, ") error") + } + if genSendAndClose { + g.P("SendAndClose(*", outType, ") error") + } + if genRecv { + g.P("Recv() (*", inType, ", error)") + } + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genSendAndClose { + g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {") + g.P("m := new(", inType, ")") + g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + + return hname +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/link_grpc.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/link_grpc.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/link_grpc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/link_grpc.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,34 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import _ "github.com/golang/protobuf/protoc-gen-go/internal/grpc" diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/main.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/main.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/main.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/main.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,98 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate +// Go code. Run it by building this program and putting it in your path with +// the name +// protoc-gen-go +// That word 'go' at the end becomes part of the option string set for the +// protocol compiler, so once the protocol compiler (protoc) is installed +// you can run +// protoc --go_out=output_directory input_directory/file.proto +// to generate Go bindings for the protocol defined by file.proto. +// With that input, the output will be written to +// output_directory/file.pb.go +// +// The generated code is documented in the package comment for +// the library. +// +// See the README and documentation for protocol buffers to learn more: +// https://developers.google.com/protocol-buffers/ +package main + +import ( + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/protoc-gen-go/generator" +) + +func main() { + // Begin by allocating a generator. The request and response structures are stored there + // so we can do error handling easily - the response structure contains the field to + // report failure. + g := generator.New() + + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + g.Error(err, "reading input") + } + + if err := proto.Unmarshal(data, g.Request); err != nil { + g.Error(err, "parsing input proto") + } + + if len(g.Request.FileToGenerate) == 0 { + g.Fail("no files to generate") + } + + g.CommandLineParameters(g.Request.GetParameter()) + + // Create a wrapped version of the Descriptors and EnumDescriptors that + // point to the file that defines them. + g.WrapTypes() + + g.SetPackageNames() + g.BuildTypeNameMap() + + g.GenerateAllFiles() + + // Send back the results. + data, err = proto.Marshal(g.Response) + if err != nil { + g.Error(err, "failed to marshal output proto") + } + _, err = os.Stdout.Write(data) + if err != nil { + g.Error(err, "failed to write output proto") + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/Makefile juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/Makefile --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/Makefile 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,35 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(GOROOT)/src/Make.cmd + +test: + cd testdata && make test diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,46 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but plugin.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/compiler/plugin.proto +# Also we need to fix an import. +regenerate: + echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION + cd $(HOME)/src/protobuf/src && \ + protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor:. \ + ./google/protobuf/compiler/plugin.proto && \ + cat ./google/protobuf/compiler/plugin.pb.go > $(GOPATH)/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go + +restore: + cp plugin.pb.golden plugin.pb.go + +preserve: + cp plugin.pb.go plugin.pb.golden diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/compiler/plugin.proto +// DO NOT EDIT! + +/* +Package google_protobuf_compiler is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/compiler/plugin.proto + +It has these top-level messages: + CodeGeneratorRequest + CodeGeneratorResponse +*/ +package google_protobuf_compiler + +import proto "github.com/golang/protobuf/proto" +import math "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } +func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (m *CodeGeneratorRequest) GetFileToGenerate() []string { + if m != nil { + return m.FileToGenerate + } + return nil +} + +func (m *CodeGeneratorRequest) GetParameter() string { + if m != nil && m.Parameter != nil { + return *m.Parameter + } + return "" +} + +func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { + if m != nil { + return m.ProtoFile + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } +func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (m *CodeGeneratorResponse) GetError() string { + if m != nil && m.Error != nil { + return *m.Error + } + return "" +} + +func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if m != nil { + return m.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } +func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (m *CodeGeneratorResponse_File) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { + if m != nil && m.InsertionPoint != nil { + return *m.InsertionPoint + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetContent() string { + if m != nil && m.Content != nil { + return *m.Content + } + return "" +} + +func init() { +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/compiler/plugin.proto +// DO NOT EDIT! + +package google_protobuf_compiler + +import proto "github.com/golang/protobuf/proto" +import "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference proto and math imports to suppress error if they are not otherwise used. +var _ = proto.GetString +var _ = math.Inf + +type CodeGeneratorRequest struct { + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } +func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (this *CodeGeneratorRequest) GetParameter() string { + if this != nil && this.Parameter != nil { + return *this.Parameter + } + return "" +} + +type CodeGeneratorResponse struct { + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } +func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (this *CodeGeneratorResponse) GetError() string { + if this != nil && this.Error != nil { + return *this.Error + } + return "" +} + +type CodeGeneratorResponse_File struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } +func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (this *CodeGeneratorResponse_File) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { + if this != nil && this.InsertionPoint != nil { + return *this.InsertionPoint + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetContent() string { + if this != nil && this.Content != nil { + return *this.Content + } + return "" +} + +func init() { +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package extension_base; + +message BaseMessage { + optional int32 height = 1; + extensions 4 to 9; + extensions 16 to max; +} + +// Another message that may be extended, using message_set_wire_format. +message OldStyleMessage { + option message_set_wire_format = true; + extensions 100 to max; +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,38 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package extension_extra; + +message ExtraMessage { + optional int32 width = 1; +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,210 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test that we can use protocol buffers that use extensions. + +package testdata + +/* + +import ( + "bytes" + "regexp" + "testing" + + "github.com/golang/protobuf/proto" + base "extension_base.pb" + user "extension_user.pb" +) + +func TestSingleFieldExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(178), + } + + // Use extension within scope of another type. + vol := proto.Uint32(11) + err := proto.SetExtension(bm, user.E_LoudMessage_Volume, vol) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_LoudMessage_Volume) { + t.Fatal("Decoded message didn't contain extension.") + } + vol_out, err := proto.GetExtension(bm_new, user.E_LoudMessage_Volume) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if v := vol_out.(*uint32); *v != *vol { + t.Errorf("vol_out = %v, expected %v", *v, *vol) + } + proto.ClearExtension(bm_new, user.E_LoudMessage_Volume) + if proto.HasExtension(bm_new, user.E_LoudMessage_Volume) { + t.Fatal("Failed clearing extension.") + } +} + +func TestMessageExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(179), + } + + // Use extension that is itself a message. + um := &user.UserMessage{ + Name: proto.String("Dave"), + Rank: proto.String("Major"), + } + err := proto.SetExtension(bm, user.E_LoginMessage_UserMessage, um) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) { + t.Fatal("Decoded message didn't contain extension.") + } + um_out, err := proto.GetExtension(bm_new, user.E_LoginMessage_UserMessage) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if n := um_out.(*user.UserMessage).Name; *n != *um.Name { + t.Errorf("um_out.Name = %q, expected %q", *n, *um.Name) + } + if r := um_out.(*user.UserMessage).Rank; *r != *um.Rank { + t.Errorf("um_out.Rank = %q, expected %q", *r, *um.Rank) + } + proto.ClearExtension(bm_new, user.E_LoginMessage_UserMessage) + if proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) { + t.Fatal("Failed clearing extension.") + } +} + +func TestTopLevelExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(179), + } + + width := proto.Int32(17) + err := proto.SetExtension(bm, user.E_Width, width) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_Width) { + t.Fatal("Decoded message didn't contain extension.") + } + width_out, err := proto.GetExtension(bm_new, user.E_Width) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if w := width_out.(*int32); *w != *width { + t.Errorf("width_out = %v, expected %v", *w, *width) + } + proto.ClearExtension(bm_new, user.E_Width) + if proto.HasExtension(bm_new, user.E_Width) { + t.Fatal("Failed clearing extension.") + } +} + +func TestMessageSetWireFormat(t *testing.T) { + osm := new(base.OldStyleMessage) + osp := &user.OldStyleParcel{ + Name: proto.String("Dave"), + Height: proto.Int32(178), + } + + err := proto.SetExtension(osm, user.E_OldStyleParcel_MessageSetExtension, osp) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + + buf, err := proto.Marshal(osm) + if err != nil { + t.Fatal("Failed encoding message:", err) + } + + // Data generated from Python implementation. + expected := []byte{ + 11, 16, 209, 15, 26, 9, 10, 4, 68, 97, 118, 101, 16, 178, 1, 12, + } + + if !bytes.Equal(expected, buf) { + t.Errorf("Encoding mismatch.\nwant %+v\n got %+v", expected, buf) + } + + // Check that it is restored correctly. + osm = new(base.OldStyleMessage) + if err := proto.Unmarshal(buf, osm); err != nil { + t.Fatal("Failed decoding message:", err) + } + osp_out, err := proto.GetExtension(osm, user.E_OldStyleParcel_MessageSetExtension) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + osp = osp_out.(*user.OldStyleParcel) + if *osp.Name != "Dave" || *osp.Height != 178 { + t.Errorf("Retrieved extension from decoded message is not correct: %+v", osp) + } +} + +func main() { + // simpler than rigging up gotest + testing.Main(regexp.MatchString, []testing.InternalTest{ + {"TestSingleFieldExtension", TestSingleFieldExtension}, + {"TestMessageExtension", TestMessageExtension}, + {"TestTopLevelExtension", TestTopLevelExtension}, + }, + []testing.InternalBenchmark{}, + []testing.InternalExample{}) +} + +*/ diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "extension_base.proto"; +import "extension_extra.proto"; + +package extension_user; + +message UserMessage { + optional string name = 1; + optional string rank = 2; +} + +// Extend with a message +extend extension_base.BaseMessage { + optional UserMessage user_message = 5; +} + +// Extend with a foreign message +extend extension_base.BaseMessage { + optional extension_extra.ExtraMessage extra_message = 9; +} + +// Extend with some primitive types +extend extension_base.BaseMessage { + optional int32 width = 6; + optional int64 area = 7; +} + +// Extend inside the scope of another type +message LoudMessage { + extend extension_base.BaseMessage { + optional uint32 volume = 8; + } + extensions 100 to max; +} + +// Extend inside the scope of another type, using a message. +message LoginMessage { + extend extension_base.BaseMessage { + optional UserMessage user_message = 16; + } +} + +// Extend with a repeated field +extend extension_base.BaseMessage { + repeated Detail detail = 17; +} + +message Detail { + optional string color = 1; +} + +// An extension of an extension +message Announcement { + optional string words = 1; + extend LoudMessage { + optional Announcement loud_ext = 100; + } +} + +// Something that can be put in a message set. +message OldStyleParcel { + extend extension_base.OldStyleMessage { + optional OldStyleParcel message_set_extension = 2001; + } + + required string name = 1; + optional int32 height = 2; +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/golden_test.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/golden_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/golden_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/golden_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,86 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Verify that the compiler output for test.proto is unchanged. + +package testdata + +import ( + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. +func sum(t *testing.T, name string) string { + data, err := ioutil.ReadFile(name) + if err != nil { + t.Fatal(err) + } + t.Logf("sum(%q): length is %d", name, len(data)) + hash := sha1.New() + _, err = hash.Write(data) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("% x", hash.Sum(nil)) +} + +func run(t *testing.T, name string, args ...string) { + cmd := exec.Command(name, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + t.Fatal(err) + } +} + +func TestGolden(t *testing.T) { + // Compute the original checksum. + goldenSum := sum(t, "my_test/test.pb.go") + // Run the proto compiler. + run(t, "protoc", "--go_out="+os.TempDir(), "my_test/test.proto") + newFile := filepath.Join(os.TempDir(), "my_test/test.pb.go") + defer os.Remove(newFile) + // Compute the new checksum. + newSum := sum(t, newFile) + // Verify + if newSum != goldenSum { + run(t, "diff", "-u", "my_test/test.pb.go", newFile) + t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") + } +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,59 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc.testing; + +message SimpleRequest { +} + +message SimpleResponse { +} + +message StreamMsg { +} + +message StreamMsg2 { +} + +service Test { + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // This RPC streams from the server only. + rpc Downstream(SimpleRequest) returns (stream StreamMsg); + + // This RPC streams from the client. + rpc Upstream(stream StreamMsg) returns (SimpleResponse); + + // This one streams in both directions. + rpc Bidi(stream StreamMsg) returns (stream StreamMsg2); +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,43 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +message PubliclyImportedMessage { + optional int64 field = 1; +} + +enum PubliclyImportedEnum { + GLASSES = 1; + HAIR = 2; +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,38 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +message ForeignImportedMessage { + optional string tuber = 1; +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,113 @@ +// Code generated by protoc-gen-go. +// source: imp.proto +// DO NOT EDIT! + +package imp + +import proto "github.com/golang/protobuf/proto" +import "math" +import "os" +import imp1 "imp2.pb" + +// Reference proto & math imports to suppress error if they are not otherwise used. +var _ = proto.GetString +var _ = math.Inf + +// Types from public import imp2.proto +type PubliclyImportedMessage imp1.PubliclyImportedMessage + +func (this *PubliclyImportedMessage) Reset() { (*imp1.PubliclyImportedMessage)(this).Reset() } +func (this *PubliclyImportedMessage) String() string { + return (*imp1.PubliclyImportedMessage)(this).String() +} + +// PubliclyImportedMessage from public import imp.proto + +type ImportedMessage_Owner int32 + +const ( + ImportedMessage_DAVE ImportedMessage_Owner = 1 + ImportedMessage_MIKE ImportedMessage_Owner = 2 +) + +var ImportedMessage_Owner_name = map[int32]string{ + 1: "DAVE", + 2: "MIKE", +} +var ImportedMessage_Owner_value = map[string]int32{ + "DAVE": 1, + "MIKE": 2, +} + +// NewImportedMessage_Owner is deprecated. Use x.Enum() instead. +func NewImportedMessage_Owner(x ImportedMessage_Owner) *ImportedMessage_Owner { + e := ImportedMessage_Owner(x) + return &e +} +func (x ImportedMessage_Owner) Enum() *ImportedMessage_Owner { + p := new(ImportedMessage_Owner) + *p = x + return p +} +func (x ImportedMessage_Owner) String() string { + return proto.EnumName(ImportedMessage_Owner_name, int32(x)) +} + +type ImportedMessage struct { + Field *int64 `protobuf:"varint,1,req,name=field" json:"field,omitempty"` + XXX_extensions map[int32][]byte `json:",omitempty"` + XXX_unrecognized []byte `json:",omitempty"` +} + +func (this *ImportedMessage) Reset() { *this = ImportedMessage{} } +func (this *ImportedMessage) String() string { return proto.CompactTextString(this) } + +var extRange_ImportedMessage = []proto.ExtensionRange{ + proto.ExtensionRange{90, 100}, +} + +func (*ImportedMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ImportedMessage +} +func (this *ImportedMessage) ExtensionMap() map[int32][]byte { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32][]byte) + } + return this.XXX_extensions +} + +type ImportedExtendable struct { + XXX_extensions map[int32][]byte `json:",omitempty"` + XXX_unrecognized []byte `json:",omitempty"` +} + +func (this *ImportedExtendable) Reset() { *this = ImportedExtendable{} } +func (this *ImportedExtendable) String() string { return proto.CompactTextString(this) } + +func (this *ImportedExtendable) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(this.ExtensionMap()) +} +func (this *ImportedExtendable) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, this.ExtensionMap()) +} +// ensure ImportedExtendable satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*ImportedExtendable)(nil) +var _ proto.Unmarshaler = (*ImportedExtendable)(nil) + +var extRange_ImportedExtendable = []proto.ExtensionRange{ + proto.ExtensionRange{100, 536870911}, +} + +func (*ImportedExtendable) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ImportedExtendable +} +func (this *ImportedExtendable) ExtensionMap() map[int32][]byte { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32][]byte) + } + return this.XXX_extensions +} + +func init() { + proto.RegisterEnum("imp.ImportedMessage_Owner", ImportedMessage_Owner_name, ImportedMessage_Owner_value) +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,65 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +import "imp2.proto"; +import "imp3.proto"; + +message ImportedMessage { + required int64 field = 1; + + // The forwarded getters for these fields are fiddly to get right. + optional ImportedMessage2 local_msg = 2; + optional ForeignImportedMessage foreign_msg = 3; // in imp3.proto + optional Owner enum_field = 4; + + repeated string name = 5; + repeated Owner boss = 6; + repeated ImportedMessage2 memo = 7; + + enum Owner { + DAVE = 1; + MIKE = 2; + } + + extensions 90 to 100; +} + +message ImportedMessage2 { +} + +message ImportedExtendable { + option message_set_wire_format = true; + extensions 100 to max; +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A simple binary to link together the protocol buffers in this test. + +package testdata + +import ( + "testing" + + multipb "./multi" + mytestpb "./my_test" +) + +func TestLink(t *testing.T) { + _ = &multipb.Multi1{} + _ = &mytestpb.Request{} +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,66 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +all: + @echo run make test + +include ../../Make.protobuf + +test: golden testbuild + +#test: golden testbuild extension_test +# ./extension_test +# @echo PASS + +golden: + make -B my_test/test.pb.go + diff -w my_test/test.pb.go my_test/test.pb.go.golden + +nuke: clean + +testbuild: buildprotos + go test + +buildprotos: + # Invoke protoc once to generate three independent .pb.go files in the same package. + protoc --go_out=. multi/multi{1,2,3}.proto + +#extension_test: extension_test.$O +# $(LD) -L. -o $@ $< + +#multi.a: multi3.pb.$O multi2.pb.$O multi1.pb.$O +# rm -f multi.a +# $(QUOTED_GOBIN)/gopack grc $@ $< + +#test.pb.go: imp.pb.go +#multi1.pb.go: multi2.pb.go multi3.pb.go +#main.$O: imp.pb.$O test.pb.$O multi.a +#extension_test.$O: extension_base.pb.$O extension_extra.pb.$O extension_user.pb.$O diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,44 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "multi/multi2.proto"; +import "multi/multi3.proto"; + +package multitest; + +message Multi1 { + required Multi2 multi2 = 1; + optional Multi2.Color color = 2; + optional Multi3.HatType hat_type = 3; +} + diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package multitest; + +message Multi2 { + required int32 required_value = 1; + + enum Color { + BLUE = 1; + GREEN = 2; + RED = 3; + }; + optional Color color = 2; +} + diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,43 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package multitest; + +message Multi3 { + enum HatType { + FEDORA = 1; + FEZ = 2; + }; + optional HatType hat_type = 1; +} + diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,487 @@ +// Code generated by protoc-gen-go. +// source: my_test/test.proto +// DO NOT EDIT! + +/* +Package my_test is a generated protocol buffer package. + +This package holds interesting messages. + +It is generated from these files: + my_test/test.proto + +It has these top-level messages: + Request + Reply + OtherBase + ReplyExtensions + OtherReplyExtensions + OldReply +*/ +package my_test + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// discarding unused import multitest2 "multi" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type HatType int32 + +const ( + // deliberately skipping 0 + HatType_FEDORA HatType = 1 + HatType_FEZ HatType = 2 +) + +var HatType_name = map[int32]string{ + 1: "FEDORA", + 2: "FEZ", +} +var HatType_value = map[string]int32{ + "FEDORA": 1, + "FEZ": 2, +} + +func (x HatType) Enum() *HatType { + p := new(HatType) + *p = x + return p +} +func (x HatType) String() string { + return proto.EnumName(HatType_name, int32(x)) +} +func (x *HatType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType") + if err != nil { + return err + } + *x = HatType(value) + return nil +} + +// This enum represents days of the week. +type Days int32 + +const ( + Days_MONDAY Days = 1 + Days_TUESDAY Days = 2 + Days_LUNDI Days = 1 +) + +var Days_name = map[int32]string{ + 1: "MONDAY", + 2: "TUESDAY", + // Duplicate value: 1: "LUNDI", +} +var Days_value = map[string]int32{ + "MONDAY": 1, + "TUESDAY": 2, + "LUNDI": 1, +} + +func (x Days) Enum() *Days { + p := new(Days) + *p = x + return p +} +func (x Days) String() string { + return proto.EnumName(Days_name, int32(x)) +} +func (x *Days) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days") + if err != nil { + return err + } + *x = Days(value) + return nil +} + +type Request_Color int32 + +const ( + Request_RED Request_Color = 0 + Request_GREEN Request_Color = 1 + Request_BLUE Request_Color = 2 +) + +var Request_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Request_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Request_Color) Enum() *Request_Color { + p := new(Request_Color) + *p = x + return p +} +func (x Request_Color) String() string { + return proto.EnumName(Request_Color_name, int32(x)) +} +func (x *Request_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color") + if err != nil { + return err + } + *x = Request_Color(value) + return nil +} + +type Reply_Entry_Game int32 + +const ( + Reply_Entry_FOOTBALL Reply_Entry_Game = 1 + Reply_Entry_TENNIS Reply_Entry_Game = 2 +) + +var Reply_Entry_Game_name = map[int32]string{ + 1: "FOOTBALL", + 2: "TENNIS", +} +var Reply_Entry_Game_value = map[string]int32{ + "FOOTBALL": 1, + "TENNIS": 2, +} + +func (x Reply_Entry_Game) Enum() *Reply_Entry_Game { + p := new(Reply_Entry_Game) + *p = x + return p +} +func (x Reply_Entry_Game) String() string { + return proto.EnumName(Reply_Entry_Game_name, int32(x)) +} +func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game") + if err != nil { + return err + } + *x = Reply_Entry_Game(value) + return nil +} + +// This is a message that might be sent somewhere. +type Request struct { + Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"` + // optional imp.ImportedMessage imported_message = 2; + Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"` + Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` + // optional imp.ImportedMessage.Owner owner = 6; + Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` + Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + // This is a map field. It will generate map[int32]string. + NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // This is a map field whose value type is a message. + MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +const Default_Request_Hat HatType = HatType_FEDORA + +var Default_Request_Deadline float32 = float32(math.Inf(1)) + +func (m *Request) GetKey() []int64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Request) GetHue() Request_Color { + if m != nil && m.Hue != nil { + return *m.Hue + } + return Request_RED +} + +func (m *Request) GetHat() HatType { + if m != nil && m.Hat != nil { + return *m.Hat + } + return Default_Request_Hat +} + +func (m *Request) GetDeadline() float32 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return Default_Request_Deadline +} + +func (m *Request) GetSomegroup() *Request_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *Request) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *Request) GetMsgMapping() map[int64]*Reply { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *Request) GetReset_() int32 { + if m != nil && m.Reset_ != nil { + return *m.Reset_ + } + return 0 +} + +type Request_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } +func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Request_SomeGroup) ProtoMessage() {} + +func (m *Request_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Reply struct { + Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys" json:"compact_keys,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply) Reset() { *m = Reply{} } +func (m *Reply) String() string { return proto.CompactTextString(m) } +func (*Reply) ProtoMessage() {} + +var extRange_Reply = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Reply +} +func (m *Reply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *Reply) GetFound() []*Reply_Entry { + if m != nil { + return m.Found + } + return nil +} + +func (m *Reply) GetCompactKeys() []int32 { + if m != nil { + return m.CompactKeys + } + return nil +} + +type Reply_Entry struct { + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2" json:"_my_field_name_2,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } +func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } +func (*Reply_Entry) ProtoMessage() {} + +const Default_Reply_Entry_Value int64 = 7 + +func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 { + if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil { + return *m.KeyThatNeeds_1234Camel_CasIng + } + return 0 +} + +func (m *Reply_Entry) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return Default_Reply_Entry_Value +} + +func (m *Reply_Entry) GetXMyFieldName_2() int64 { + if m != nil && m.XMyFieldName_2 != nil { + return *m.XMyFieldName_2 + } + return 0 +} + +type OtherBase struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherBase) Reset() { *m = OtherBase{} } +func (m *OtherBase) String() string { return proto.CompactTextString(m) } +func (*OtherBase) ProtoMessage() {} + +var extRange_OtherBase = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherBase +} +func (m *OtherBase) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *OtherBase) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type ReplyExtensions struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } +func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*ReplyExtensions) ProtoMessage() {} + +var E_ReplyExtensions_Time = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.time", + Tag: "fixed64,101,opt,name=time", +} + +var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 105, + Name: "my.test.ReplyExtensions.carrot", + Tag: "bytes,105,opt,name=carrot", +} + +var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ + ExtendedType: (*OtherBase)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.donut", + Tag: "bytes,101,opt,name=donut", +} + +type OtherReplyExtensions struct { + Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } +func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*OtherReplyExtensions) ProtoMessage() {} + +func (m *OtherReplyExtensions) GetKey() int32 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +type OldReply struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldReply) Reset() { *m = OldReply{} } +func (m *OldReply) String() string { return proto.CompactTextString(m) } +func (*OldReply) ProtoMessage() {} + +func (m *OldReply) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(m.ExtensionMap()) +} +func (m *OldReply) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) +} +func (m *OldReply) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(m.XXX_extensions) +} +func (m *OldReply) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) +} + +// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*OldReply)(nil) +var _ proto.Unmarshaler = (*OldReply)(nil) + +var extRange_OldReply = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OldReply +} +func (m *OldReply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +var E_Tag = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*string)(nil), + Field: 103, + Name: "my.test.tag", + Tag: "bytes,103,opt,name=tag", +} + +var E_Donut = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*OtherReplyExtensions)(nil), + Field: 106, + Name: "my.test.donut", + Tag: "bytes,106,opt,name=donut", +} + +func init() { + proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value) + proto.RegisterEnum("my.test.Days", Days_name, Days_value) + proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value) + proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value) + proto.RegisterExtension(E_ReplyExtensions_Time) + proto.RegisterExtension(E_ReplyExtensions_Carrot) + proto.RegisterExtension(E_ReplyExtensions_Donut) + proto.RegisterExtension(E_Tag) + proto.RegisterExtension(E_Donut) +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,487 @@ +// Code generated by protoc-gen-go. +// source: my_test/test.proto +// DO NOT EDIT! + +/* +Package my_test is a generated protocol buffer package. + +This package holds interesting messages. + +It is generated from these files: + my_test/test.proto + +It has these top-level messages: + Request + Reply + OtherBase + ReplyExtensions + OtherReplyExtensions + OldReply +*/ +package my_test + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// discarding unused import multitest2 "multi" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type HatType int32 + +const ( + // deliberately skipping 0 + HatType_FEDORA HatType = 1 + HatType_FEZ HatType = 2 +) + +var HatType_name = map[int32]string{ + 1: "FEDORA", + 2: "FEZ", +} +var HatType_value = map[string]int32{ + "FEDORA": 1, + "FEZ": 2, +} + +func (x HatType) Enum() *HatType { + p := new(HatType) + *p = x + return p +} +func (x HatType) String() string { + return proto.EnumName(HatType_name, int32(x)) +} +func (x *HatType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType") + if err != nil { + return err + } + *x = HatType(value) + return nil +} + +// This enum represents days of the week. +type Days int32 + +const ( + Days_MONDAY Days = 1 + Days_TUESDAY Days = 2 + Days_LUNDI Days = 1 +) + +var Days_name = map[int32]string{ + 1: "MONDAY", + 2: "TUESDAY", + // Duplicate value: 1: "LUNDI", +} +var Days_value = map[string]int32{ + "MONDAY": 1, + "TUESDAY": 2, + "LUNDI": 1, +} + +func (x Days) Enum() *Days { + p := new(Days) + *p = x + return p +} +func (x Days) String() string { + return proto.EnumName(Days_name, int32(x)) +} +func (x *Days) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days") + if err != nil { + return err + } + *x = Days(value) + return nil +} + +type Request_Color int32 + +const ( + Request_RED Request_Color = 0 + Request_GREEN Request_Color = 1 + Request_BLUE Request_Color = 2 +) + +var Request_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Request_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Request_Color) Enum() *Request_Color { + p := new(Request_Color) + *p = x + return p +} +func (x Request_Color) String() string { + return proto.EnumName(Request_Color_name, int32(x)) +} +func (x *Request_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color") + if err != nil { + return err + } + *x = Request_Color(value) + return nil +} + +type Reply_Entry_Game int32 + +const ( + Reply_Entry_FOOTBALL Reply_Entry_Game = 1 + Reply_Entry_TENNIS Reply_Entry_Game = 2 +) + +var Reply_Entry_Game_name = map[int32]string{ + 1: "FOOTBALL", + 2: "TENNIS", +} +var Reply_Entry_Game_value = map[string]int32{ + "FOOTBALL": 1, + "TENNIS": 2, +} + +func (x Reply_Entry_Game) Enum() *Reply_Entry_Game { + p := new(Reply_Entry_Game) + *p = x + return p +} +func (x Reply_Entry_Game) String() string { + return proto.EnumName(Reply_Entry_Game_name, int32(x)) +} +func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game") + if err != nil { + return err + } + *x = Reply_Entry_Game(value) + return nil +} + +// This is a message that might be sent somewhere. +type Request struct { + Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"` + // optional imp.ImportedMessage imported_message = 2; + Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"` + Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` + // optional imp.ImportedMessage.Owner owner = 6; + Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` + Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"` + // This is a map field. It will generate map[int32]string. + NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // This is a map field whose value type is a message. + MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +const Default_Request_Hat HatType = HatType_FEDORA + +var Default_Request_Deadline float32 = float32(math.Inf(1)) + +func (m *Request) GetKey() []int64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Request) GetHue() Request_Color { + if m != nil && m.Hue != nil { + return *m.Hue + } + return Request_RED +} + +func (m *Request) GetHat() HatType { + if m != nil && m.Hat != nil { + return *m.Hat + } + return Default_Request_Hat +} + +func (m *Request) GetDeadline() float32 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return Default_Request_Deadline +} + +func (m *Request) GetSomegroup() *Request_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *Request) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *Request) GetMsgMapping() map[int64]*Reply { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *Request) GetReset_() int32 { + if m != nil && m.Reset_ != nil { + return *m.Reset_ + } + return 0 +} + +type Request_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } +func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Request_SomeGroup) ProtoMessage() {} + +func (m *Request_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Reply struct { + Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys" json:"compact_keys,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply) Reset() { *m = Reply{} } +func (m *Reply) String() string { return proto.CompactTextString(m) } +func (*Reply) ProtoMessage() {} + +var extRange_Reply = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Reply +} +func (m *Reply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *Reply) GetFound() []*Reply_Entry { + if m != nil { + return m.Found + } + return nil +} + +func (m *Reply) GetCompactKeys() []int32 { + if m != nil { + return m.CompactKeys + } + return nil +} + +type Reply_Entry struct { + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2" json:"_my_field_name_2,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } +func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } +func (*Reply_Entry) ProtoMessage() {} + +const Default_Reply_Entry_Value int64 = 7 + +func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 { + if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil { + return *m.KeyThatNeeds_1234Camel_CasIng + } + return 0 +} + +func (m *Reply_Entry) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return Default_Reply_Entry_Value +} + +func (m *Reply_Entry) GetXMyFieldName_2() int64 { + if m != nil && m.XMyFieldName_2 != nil { + return *m.XMyFieldName_2 + } + return 0 +} + +type OtherBase struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherBase) Reset() { *m = OtherBase{} } +func (m *OtherBase) String() string { return proto.CompactTextString(m) } +func (*OtherBase) ProtoMessage() {} + +var extRange_OtherBase = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherBase +} +func (m *OtherBase) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *OtherBase) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type ReplyExtensions struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } +func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*ReplyExtensions) ProtoMessage() {} + +var E_ReplyExtensions_Time = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.time", + Tag: "fixed64,101,opt,name=time", +} + +var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 105, + Name: "my.test.ReplyExtensions.carrot", + Tag: "bytes,105,opt,name=carrot", +} + +var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ + ExtendedType: (*OtherBase)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.donut", + Tag: "bytes,101,opt,name=donut", +} + +type OtherReplyExtensions struct { + Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } +func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*OtherReplyExtensions) ProtoMessage() {} + +func (m *OtherReplyExtensions) GetKey() int32 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +type OldReply struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldReply) Reset() { *m = OldReply{} } +func (m *OldReply) String() string { return proto.CompactTextString(m) } +func (*OldReply) ProtoMessage() {} + +func (m *OldReply) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(m.ExtensionMap()) +} +func (m *OldReply) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) +} +func (m *OldReply) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(m.XXX_extensions) +} +func (m *OldReply) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) +} + +// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*OldReply)(nil) +var _ proto.Unmarshaler = (*OldReply)(nil) + +var extRange_OldReply = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OldReply +} +func (m *OldReply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +var E_Tag = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*string)(nil), + Field: 103, + Name: "my.test.tag", + Tag: "bytes,103,opt,name=tag", +} + +var E_Donut = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*OtherReplyExtensions)(nil), + Field: 106, + Name: "my.test.donut", + Tag: "bytes,106,opt,name=donut", +} + +func init() { + proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value) + proto.RegisterEnum("my.test.Days", Days_name, Days_value) + proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value) + proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value) + proto.RegisterExtension(E_ReplyExtensions_Time) + proto.RegisterExtension(E_ReplyExtensions_Carrot) + proto.RegisterExtension(E_ReplyExtensions_Donut) + proto.RegisterExtension(E_Tag) + proto.RegisterExtension(E_Donut) +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,132 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +// This package holds interesting messages. +package my.test; // dotted package name + +//import "imp.proto"; +import "multi/multi1.proto"; // unused import + +enum HatType { + // deliberately skipping 0 + FEDORA = 1; + FEZ = 2; +} + +// This enum represents days of the week. +enum Days { + option allow_alias = true; + + MONDAY = 1; + TUESDAY = 2; + LUNDI = 1; // same value as MONDAY +} + +// This is a message that might be sent somewhere. +message Request { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + repeated int64 key = 1; +// optional imp.ImportedMessage imported_message = 2; + optional Color hue = 3; // no default + optional HatType hat = 4 [default=FEDORA]; +// optional imp.ImportedMessage.Owner owner = 6; + optional float deadline = 7 [default=inf]; + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // These foreign types are in imp2.proto, + // which is publicly imported by imp.proto. +// optional imp.PubliclyImportedMessage pub = 10; +// optional imp.PubliclyImportedEnum pub_enum = 13 [default=HAIR]; + + + // This is a map field. It will generate map[int32]string. + map name_mapping = 14; + // This is a map field whose value type is a message. + map msg_mapping = 15; + + optional int32 reset = 12; +} + +message Reply { + message Entry { + required int64 key_that_needs_1234camel_CasIng = 1; + optional int64 value = 2 [default=7]; + optional int64 _my_field_name_2 = 3; + enum Game { + FOOTBALL = 1; + TENNIS = 2; + } + } + repeated Entry found = 1; + repeated int32 compact_keys = 2 [packed=true]; + extensions 100 to max; +} + +message OtherBase { + optional string name = 1; + extensions 100 to max; +} + +message ReplyExtensions { + extend Reply { + optional double time = 101; + optional ReplyExtensions carrot = 105; + } + extend OtherBase { + optional ReplyExtensions donut = 101; + } +} + +message OtherReplyExtensions { + optional int32 key = 1; +} + +// top-level extension +extend Reply { + optional string tag = 103; + optional OtherReplyExtensions donut = 106; +// optional imp.ImportedMessage elephant = 107; // extend with message from another file. +} + +message OldReply { + // Extensions will be encoded in MessageSet wire format. + option message_set_wire_format = true; + extensions 100 to max; +} + diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto --- juju-core-2.0~beta15/src/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,52 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package proto3; + +message Request { + enum Flavour { + SWEET = 0; + SOUR = 1; + UMAMI = 2; + GOPHERLICIOUS = 3; + } + string name = 1; // "optional" may be omitted + repeated int64 key = 2; + optional Flavour taste = 3; + Book book = 4; +} + +message Book { + string title = 1; + bytes raw_data = 2; +} diff -Nru juju-core-2.0~beta15/src/github.com/golang/protobuf/README juju-core-2.0.0/src/github.com/golang/protobuf/README --- juju-core-2.0~beta15/src/github.com/golang/protobuf/README 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/golang/protobuf/README 2016-10-13 14:32:09.000000000 +0000 @@ -0,0 +1,148 @@ +Go support for Protocol Buffers - Google's data interchange format +Copyright 2010 The Go Authors. +https://github.com/golang/protobuf + +This package and the code it generates requires at least Go 1.2. + +This software implements Go bindings for protocol buffers. For +information about protocol buffers themselves, see + https://developers.google.com/protocol-buffers/ +To use this software, you must first install the standard C++ +implementation of protocol buffers from + https://developers.google.com/protocol-buffers/ +And of course you must also install the Go compiler and tools from + https://golang.org/ +See + https://golang.org/doc/install +for details or, if you are using gccgo, follow the instructions at + https://golang.org/doc/install/gccgo + +This software has two parts: a 'protocol compiler plugin' that +generates Go source files that, once compiled, can access and manage +protocol buffers; and a library that implements run-time support for +encoding (marshaling), decoding (unmarshaling), and accessing protocol +buffers. + +There is no support for RPC in Go using protocol buffers. It may come +once a standard RPC protocol develops for protobufs. + +There are no insertion points in the plugin. + +To install this code: + +The simplest way is to run go get. + + # Grab the code from the repository and install the proto package. + go get -u github.com/golang/protobuf/{proto,protoc-gen-go} + +The compiler plugin, protoc-gen-go, will be installed in $GOBIN, +defaulting to $GOPATH/bin. It must be in your $PATH for the protocol +compiler, protoc, to find it. + +Once the software is installed, there are two steps to using it. +First you must compile the protocol buffer definitions and then import +them, with the support library, into your program. + +To compile the protocol buffer definition, run protoc with the --go_out +parameter set to the directory you want to output the Go code to. + + protoc --go_out=. *.proto + +The generated files will be suffixed .pb.go. See the Test code below +for an example using such a file. + + +The package comment for the proto library contains text describing +the interface provided in Go for protocol buffers. Here is an edited +version. + +========== + +The proto package converts data structures to and from the +wire format of protocol buffers. It works in concert with the +Go source code generated for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + Helpers for getting values are superseded by the + GetFoo methods and their use is deprecated. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed with the enum's type name. Enum types have + a String method, and a Enum method to assist in message construction. + - Nested groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +Consider file test.proto, containing + + package example; + + enum FOO { X = 17; }; + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } + +To create and play with a Test object from the example package, + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + "path/to/example" + ) + + func main() { + test := &example.Test { + Label: proto.String("hello"), + Type: proto.Int32(17), + Optionalgroup: &example.Test_OptionalGroup { + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &example.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } diff -Nru juju-core-2.0~beta15/src/github.com/google/go-querystring/.gitignore juju-core-2.0.0/src/github.com/google/go-querystring/.gitignore --- juju-core-2.0~beta15/src/github.com/google/go-querystring/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/google/go-querystring/.gitignore 2016-10-13 14:32:17.000000000 +0000 @@ -0,0 +1 @@ +*.test diff -Nru juju-core-2.0~beta15/src/github.com/gorilla/websocket/.gitignore juju-core-2.0.0/src/github.com/gorilla/websocket/.gitignore --- juju-core-2.0~beta15/src/github.com/gorilla/websocket/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/gorilla/websocket/.gitignore 2016-10-13 14:32:11.000000000 +0000 @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff -Nru juju-core-2.0~beta15/src/github.com/joyent/gocommon/.gitignore juju-core-2.0.0/src/github.com/joyent/gocommon/.gitignore --- juju-core-2.0~beta15/src/github.com/joyent/gocommon/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/joyent/gocommon/.gitignore 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IntelliJ files +.idea +*.iml \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/joyent/gosdc/.gitignore juju-core-2.0.0/src/github.com/joyent/gosdc/.gitignore --- juju-core-2.0~beta15/src/github.com/joyent/gosdc/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/joyent/gosdc/.gitignore 2016-10-13 14:32:10.000000000 +0000 @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IntelliJ files +.idea +*.iml \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/joyent/gosign/.gitignore juju-core-2.0.0/src/github.com/joyent/gosign/.gitignore --- juju-core-2.0~beta15/src/github.com/joyent/gosign/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/joyent/gosign/.gitignore 2016-10-13 14:31:58.000000000 +0000 @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IntelliJ files +.idea +*.iml \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/attribute.go juju-core-2.0.0/src/github.com/juju/ansiterm/attribute.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/attribute.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/attribute.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,50 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +import ( + "fmt" + "sort" + "strings" +) + +type attribute int + +const ( + unknownAttribute attribute = -1 + reset attribute = 0 +) + +// sgr returns the escape sequence for the Select Graphic Rendition +// for the attribute. +func (a attribute) sgr() string { + if a < 0 { + return "" + } + return fmt.Sprintf("\x1b[%dm", a) +} + +type attributes []attribute + +func (a attributes) Len() int { return len(a) } +func (a attributes) Less(i, j int) bool { return a[i] < a[j] } +func (a attributes) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// sgr returns the combined escape sequence for the Select Graphic Rendition +// for the sequence of attributes. +func (a attributes) sgr() string { + switch len(a) { + case 0: + return "" + case 1: + return a[0].sgr() + default: + sort.Sort(a) + var values []string + for _, attr := range a { + values = append(values, fmt.Sprint(attr)) + } + return fmt.Sprintf("\x1b[%sm", strings.Join(values, ";")) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/attribute_test.go juju-core-2.0.0/src/github.com/juju/ansiterm/attribute_test.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/attribute_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/attribute_test.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,30 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +import gc "gopkg.in/check.v1" + +type attributeSuite struct{} + +var _ = gc.Suite(&attributeSuite{}) + +func (*attributeSuite) TestSGR(c *gc.C) { + c.Check(unknownAttribute.sgr(), gc.Equals, "") + c.Check(reset.sgr(), gc.Equals, "\x1b[0m") + var yellow attribute = 33 + c.Check(yellow.sgr(), gc.Equals, "\x1b[33m") +} + +func (*attributeSuite) TestAttributes(c *gc.C) { + var a attributes + c.Check(a.sgr(), gc.Equals, "") + a = append(a, Yellow.foreground()) + c.Check(a.sgr(), gc.Equals, "\x1b[33m") + a = append(a, Blue.background()) + c.Check(a.sgr(), gc.Equals, "\x1b[33;44m") + + // Add bold to the end to show sorting of the attributes. + a = append(a, Bold.enable()) + c.Check(a.sgr(), gc.Equals, "\x1b[1;33;44m") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/color.go juju-core-2.0.0/src/github.com/juju/ansiterm/color.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/color.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/color.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,119 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +const ( + _ Color = iota + Default + Black + Red + Green + Yellow + Blue + Magenta + Cyan + Gray + DarkGray + BrightRed + BrightGreen + BrightYellow + BrightBlue + BrightMagenta + BrightCyan + White +) + +// Color represents one of the standard 16 ANSI colors. +type Color int + +// String returns the name of the color. +func (c Color) String() string { + switch c { + case Default: + return "default" + case Black: + return "black" + case Red: + return "red" + case Green: + return "green" + case Yellow: + return "yellow" + case Blue: + return "blue" + case Magenta: + return "magenta" + case Cyan: + return "cyan" + case Gray: + return "gray" + case DarkGray: + return "darkgray" + case BrightRed: + return "brightred" + case BrightGreen: + return "brightgreen" + case BrightYellow: + return "brightyellow" + case BrightBlue: + return "brightblue" + case BrightMagenta: + return "brightmagenta" + case BrightCyan: + return "brightcyan" + case White: + return "white" + default: + return "" + } +} + +func (c Color) foreground() attribute { + switch c { + case Default: + return 39 + case Black: + return 30 + case Red: + return 31 + case Green: + return 32 + case Yellow: + return 33 + case Blue: + return 34 + case Magenta: + return 35 + case Cyan: + return 36 + case Gray: + return 37 + case DarkGray: + return 90 + case BrightRed: + return 91 + case BrightGreen: + return 92 + case BrightYellow: + return 93 + case BrightBlue: + return 94 + case BrightMagenta: + return 95 + case BrightCyan: + return 96 + case White: + return 97 + default: + return unknownAttribute + } +} + +func (c Color) background() attribute { + value := c.foreground() + if value != unknownAttribute { + return value + 10 + } + return value +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/color_test.go juju-core-2.0.0/src/github.com/juju/ansiterm/color_test.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/color_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/color_test.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,22 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +import gc "gopkg.in/check.v1" + +type colorSuite struct{} + +var _ = gc.Suite(&colorSuite{}) + +func (*colorSuite) TestString(c *gc.C) { + c.Check(Default.String(), gc.Equals, "default") + c.Check(Yellow.String(), gc.Equals, "yellow") + c.Check(BrightMagenta.String(), gc.Equals, "brightmagenta") + var blank Color + c.Check(blank.String(), gc.Equals, "") + var huge Color = 1234 + c.Check(huge.String(), gc.Equals, "") + var negative Color = -1 + c.Check(negative.String(), gc.Equals, "") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/context.go juju-core-2.0.0/src/github.com/juju/ansiterm/context.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/context.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/context.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,95 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +import ( + "fmt" + "io" +) + +// Context provides a way to specify both foreground and background colors +// along with other styles and write text to a Writer with those colors and +// styles. +type Context struct { + Foreground Color + Background Color + Styles []Style +} + +// Foreground is a convenience function that creates a Context with the +// specified color as the foreground color. +func Foreground(color Color) *Context { + return &Context{Foreground: color} +} + +// Background is a convenience function that creates a Context with the +// specified color as the background color. +func Background(color Color) *Context { + return &Context{Background: color} +} + +// Styles is a convenience function that creates a Context with the +// specified styles set. +func Styles(styles ...Style) *Context { + return &Context{Styles: styles} +} + +// SetForeground sets the foreground to the specified color. +func (c *Context) SetForeground(color Color) *Context { + c.Foreground = color + return c +} + +// SetBackground sets the background to the specified color. +func (c *Context) SetBackground(color Color) *Context { + c.Background = color + return c +} + +// SetStyle replaces the styles with the new values. +func (c *Context) SetStyle(styles ...Style) *Context { + c.Styles = styles + return c +} + +type sgrWriter interface { + io.Writer + writeSGR(value sgr) +} + +// Fprintf will set the sgr values of the writer to the specified +// foreground, background and styles, then write the formatted string, +// then reset the writer. +func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{}) { + w.writeSGR(c) + fmt.Fprintf(w, format, args...) + w.writeSGR(reset) +} + +// Fprint will set the sgr values of the writer to the specified foreground, +// background and styles, then formats using the default formats for its +// operands and writes to w. Spaces are added between operands when neither is +// a string. It returns the number of bytes written and any write error +// encountered. +func (c *Context) Fprint(w sgrWriter, args ...interface{}) { + w.writeSGR(c) + fmt.Fprint(w, args...) + w.writeSGR(reset) +} + +func (c *Context) sgr() string { + var values attributes + if foreground := c.Foreground.foreground(); foreground != unknownAttribute { + values = append(values, foreground) + } + if background := c.Background.background(); background != unknownAttribute { + values = append(values, background) + } + for _, style := range c.Styles { + if value := style.enable(); value != unknownAttribute { + values = append(values, value) + } + } + return values.sgr() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/context_test.go juju-core-2.0.0/src/github.com/juju/ansiterm/context_test.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/context_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/context_test.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,104 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +import ( + "bytes" + + gc "gopkg.in/check.v1" +) + +type contextSuite struct{} + +var _ = gc.Suite(&contextSuite{}) + +func (*contextSuite) newWriter() (*bytes.Buffer, *Writer) { + buff := &bytes.Buffer{} + writer := NewWriter(buff) + writer.noColor = false + return buff, writer +} + +func (*contextSuite) TestBlank(c *gc.C) { + var context Context + c.Assert(context.sgr(), gc.Equals, "") +} + +func (*contextSuite) TestAllUnknown(c *gc.C) { + context := Context{ + Foreground: 123, + Background: 432, + Styles: []Style{456, 99}, + } + c.Assert(context.sgr(), gc.Equals, "") +} + +func (*contextSuite) TestForeground(c *gc.C) { + context := Foreground(Yellow) + c.Assert(context.sgr(), gc.Equals, "\x1b[33m") +} + +func (*contextSuite) TestBackground(c *gc.C) { + context := Background(Blue) + c.Assert(context.sgr(), gc.Equals, "\x1b[44m") +} + +func (*contextSuite) TestStyles(c *gc.C) { + context := Styles(Bold, Italic) + c.Assert(context.sgr(), gc.Equals, "\x1b[1;3m") +} + +func (*contextSuite) TestValid(c *gc.C) { + context := Context{ + Foreground: Yellow, + Background: Blue, + Styles: []Style{Bold, Italic}, + } + c.Assert(context.sgr(), gc.Equals, "\x1b[1;3;33;44m") +} + +func (*contextSuite) TestSetForeground(c *gc.C) { + var context Context + context.SetForeground(Yellow) + c.Assert(context.sgr(), gc.Equals, "\x1b[33m") +} + +func (*contextSuite) TestSetBackground(c *gc.C) { + var context Context + context.SetBackground(Blue) + c.Assert(context.sgr(), gc.Equals, "\x1b[44m") +} + +func (*contextSuite) TestSetStyles(c *gc.C) { + var context Context + context.SetStyle(Bold, Italic) + c.Assert(context.sgr(), gc.Equals, "\x1b[1;3m") +} + +func (s *contextSuite) TestFprintfNoColor(c *gc.C) { + buff, writer := s.newWriter() + writer.noColor = true + + context := Context{ + Foreground: Yellow, + Background: Blue, + Styles: []Style{Bold, Italic}, + } + + context.Fprintf(writer, "hello %s, %d", "world", 42) + c.Assert(buff.String(), gc.Equals, "hello world, 42") +} + +func (s *contextSuite) TestFprintfColor(c *gc.C) { + buff, writer := s.newWriter() + + context := Context{ + Foreground: Yellow, + Background: Blue, + Styles: []Style{Bold, Italic}, + } + + context.Fprintf(writer, "hello %s, %d", "world", 42) + c.Assert(buff.String(), gc.Equals, "\x1b[1;3;33;44mhello world, 42\x1b[0m") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/doc.go juju-core-2.0.0/src/github.com/juju/ansiterm/doc.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/doc.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,6 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// Package ansiterm provides a Writer that writes out the ANSI escape +// codes for color and styles. +package ansiterm diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/LICENSE juju-core-2.0.0/src/github.com/juju/ansiterm/LICENSE --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/LICENSE 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,191 @@ +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/Makefile juju-core-2.0.0/src/github.com/juju/ansiterm/Makefile --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/Makefile 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,14 @@ +# Copyright 2016 Canonical Ltd. +# Licensed under the LGPLv3, see LICENCE file for details. + +default: check + +check: + go test + +docs: + godoc2md github.com/juju/ansiterm > README.md + sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/ansiterm?status.svg)](https://godoc.org/github.com/juju/ansiterm)|' README.md + + +.PHONY: default check docs diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/package_test.go juju-core-2.0.0/src/github.com/juju/ansiterm/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/package_test.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/README.md juju-core-2.0.0/src/github.com/juju/ansiterm/README.md --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/README.md 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,323 @@ + +# ansiterm + import "github.com/juju/ansiterm" + +Package ansiterm provides a Writer that writes out the ANSI escape +codes for color and styles. + + + + + + + +## type Color +``` go +type Color int +``` +Color represents one of the standard 16 ANSI colors. + + + +``` go +const ( + Default Color + Black + Red + Green + Yellow + Blue + Magenta + Cyan + Gray + DarkGray + BrightRed + BrightGreen + BrightYellow + BrightBlue + BrightMagenta + BrightCyan + White +) +``` + + + + + + + + +### func (Color) String +``` go +func (c Color) String() string +``` +String returns the name of the color. + + + +## type Context +``` go +type Context struct { + Foreground Color + Background Color + Styles []Style +} +``` +Context provides a way to specify both foreground and background colors +along with other styles and write text to a Writer with those colors and +styles. + + + + + + + + + +### func Background +``` go +func Background(color Color) *Context +``` +Background is a convenience function that creates a Context with the +specified color as the background color. + + +### func Foreground +``` go +func Foreground(color Color) *Context +``` +Foreground is a convenience function that creates a Context with the +specified color as the foreground color. + + +### func Styles +``` go +func Styles(styles ...Style) *Context +``` +Styles is a convenience function that creates a Context with the +specified styles set. + + + + +### func (\*Context) Fprint +``` go +func (c *Context) Fprint(w sgrWriter, args ...interface{}) +``` +Fprint will set the sgr values of the writer to the specified foreground, +background and styles, then formats using the default formats for its +operands and writes to w. Spaces are added between operands when neither is +a string. It returns the number of bytes written and any write error +encountered. + + + +### func (\*Context) Fprintf +``` go +func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{}) +``` +Fprintf will set the sgr values of the writer to the specified +foreground, background and styles, then write the formatted string, +then reset the writer. + + + +### func (\*Context) SetBackground +``` go +func (c *Context) SetBackground(color Color) *Context +``` +SetBackground sets the background to the specified color. + + + +### func (\*Context) SetForeground +``` go +func (c *Context) SetForeground(color Color) *Context +``` +SetForeground sets the foreground to the specified color. + + + +### func (\*Context) SetStyle +``` go +func (c *Context) SetStyle(styles ...Style) *Context +``` +SetStyle replaces the styles with the new values. + + + +## type Style +``` go +type Style int +``` + + +``` go +const ( + Bold Style + Faint + Italic + Underline + Blink + Reverse + Strikethrough + Conceal +) +``` + + + + + + + + +### func (Style) String +``` go +func (s Style) String() string +``` + + +## type TabWriter +``` go +type TabWriter struct { + Writer + // contains filtered or unexported fields +} +``` +TabWriter is a filter that inserts padding around tab-delimited +columns in its input to align them in the output. + +It also setting of colors and styles over and above the standard +tabwriter package. + + + + + + + + + +### func NewTabWriter +``` go +func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter +``` +NewTabWriter returns a writer that is able to set colors and styels. +The ansi escape codes are stripped for width calculations. + + + + +### func (\*TabWriter) Flush +``` go +func (t *TabWriter) Flush() error +``` +Flush should be called after the last call to Write to ensure +that any data buffered in the Writer is written to output. Any +incomplete escape sequence at the end is considered +complete for formatting purposes. + + + +### func (\*TabWriter) Init +``` go +func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter +``` +A Writer must be initialized with a call to Init. The first parameter (output) +specifies the filter output. The remaining parameters control the formatting: + + + minwidth minimal cell width including any padding + tabwidth width of tab characters (equivalent number of spaces) + padding padding added to a cell before computing its width + padchar ASCII char used for padding + if padchar == '\t', the Writer will assume that the + width of a '\t' in the formatted output is tabwidth, + and cells are left-aligned independent of align_left + (for correct-looking results, tabwidth must correspond + to the tab width in the viewer displaying the result) + flags formatting control + + + +## type Writer +``` go +type Writer struct { + io.Writer + // contains filtered or unexported fields +} +``` +Writer allows colors and styles to be specified. If the io.Writer +is not a terminal capable of color, all attempts to set colors or +styles are no-ops. + + + + + + + + + +### func NewWriter +``` go +func NewWriter(w io.Writer) *Writer +``` +NewWriter returns a Writer that allows the caller to specify colors and +styles. If the io.Writer is not a terminal capable of color, all attempts +to set colors or styles are no-ops. + + + + +### func (\*Writer) ClearStyle +``` go +func (w *Writer) ClearStyle(s Style) +``` +ClearStyle clears the text style. + + + +### func (\*Writer) Reset +``` go +func (w *Writer) Reset() +``` +Reset returns the default foreground and background colors with no styles. + + + +### func (\*Writer) SetBackground +``` go +func (w *Writer) SetBackground(c Color) +``` +SetBackground sets the background color. + + + +### func (\*Writer) SetForeground +``` go +func (w *Writer) SetForeground(c Color) +``` +SetForeground sets the foreground color. + + + +### func (\*Writer) SetStyle +``` go +func (w *Writer) SetStyle(s Style) +``` +SetStyle sets the text style. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/style.go juju-core-2.0.0/src/github.com/juju/ansiterm/style.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/style.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/style.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,72 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +const ( + _ Style = iota + Bold + Faint + Italic + Underline + Blink + Reverse + Strikethrough + Conceal +) + +type Style int + +func (s Style) String() string { + switch s { + case Bold: + return "bold" + case Faint: + return "faint" + case Italic: + return "italic" + case Underline: + return "underline" + case Blink: + return "blink" + case Reverse: + return "reverse" + case Strikethrough: + return "strikethrough" + case Conceal: + return "conceal" + default: + return "" + } +} + +func (s Style) enable() attribute { + switch s { + case Bold: + return 1 + case Faint: + return 2 + case Italic: + return 3 + case Underline: + return 4 + case Blink: + return 5 + case Reverse: + return 7 + case Conceal: + return 8 + case Strikethrough: + return 9 + default: + return unknownAttribute + } +} + +func (s Style) disable() attribute { + value := s.enable() + if value != unknownAttribute { + return value + 20 + } + return value +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/style_test.go juju-core-2.0.0/src/github.com/juju/ansiterm/style_test.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/style_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/style_test.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,21 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +import gc "gopkg.in/check.v1" + +type styleSuite struct{} + +var _ = gc.Suite(&styleSuite{}) + +func (*styleSuite) TestString(c *gc.C) { + c.Check(Bold.String(), gc.Equals, "bold") + c.Check(Strikethrough.String(), gc.Equals, "strikethrough") + var blank Style + c.Check(blank.String(), gc.Equals, "") + var huge Style = 1234 + c.Check(huge.String(), gc.Equals, "") + var negative Style = -1 + c.Check(negative.String(), gc.Equals, "") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/tabwriter/tabwriter.go juju-core-2.0.0/src/github.com/juju/ansiterm/tabwriter/tabwriter.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/tabwriter/tabwriter.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/tabwriter/tabwriter.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,587 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is mostly a copy of the go standard library text/tabwriter. With +// the additional stripping of ansi control characters for width calculations. + +// Package tabwriter implements a write filter (tabwriter.Writer) that +// translates tabbed columns in input into properly aligned text. +// +// The package is using the Elastic Tabstops algorithm described at +// http://nickgravgaard.com/elastictabstops/index.html. +// +package tabwriter + +import ( + "bytes" + "io" + "unicode/utf8" + + "github.com/lunixbochs/vtclean" +) + +// ---------------------------------------------------------------------------- +// Filter implementation + +// A cell represents a segment of text terminated by tabs or line breaks. +// The text itself is stored in a separate buffer; cell only describes the +// segment's size in bytes, its width in runes, and whether it's an htab +// ('\t') terminated cell. +// +type cell struct { + size int // cell size in bytes + width int // cell width in runes + htab bool // true if the cell is terminated by an htab ('\t') +} + +// A Writer is a filter that inserts padding around tab-delimited +// columns in its input to align them in the output. +// +// The Writer treats incoming bytes as UTF-8 encoded text consisting +// of cells terminated by (horizontal or vertical) tabs or line +// breaks (newline or formfeed characters). Cells in adjacent lines +// constitute a column. The Writer inserts padding as needed to +// make all cells in a column have the same width, effectively +// aligning the columns. It assumes that all characters have the +// same width except for tabs for which a tabwidth must be specified. +// Note that cells are tab-terminated, not tab-separated: trailing +// non-tab text at the end of a line does not form a column cell. +// +// The Writer assumes that all Unicode code points have the same width; +// this may not be true in some fonts. +// +// If DiscardEmptyColumns is set, empty columns that are terminated +// entirely by vertical (or "soft") tabs are discarded. Columns +// terminated by horizontal (or "hard") tabs are not affected by +// this flag. +// +// If a Writer is configured to filter HTML, HTML tags and entities +// are passed through. The widths of tags and entities are +// assumed to be zero (tags) and one (entities) for formatting purposes. +// +// A segment of text may be escaped by bracketing it with Escape +// characters. The tabwriter passes escaped text segments through +// unchanged. In particular, it does not interpret any tabs or line +// breaks within the segment. If the StripEscape flag is set, the +// Escape characters are stripped from the output; otherwise they +// are passed through as well. For the purpose of formatting, the +// width of the escaped text is always computed excluding the Escape +// characters. +// +// The formfeed character ('\f') acts like a newline but it also +// terminates all columns in the current line (effectively calling +// Flush). Cells in the next line start new columns. Unless found +// inside an HTML tag or inside an escaped text segment, formfeed +// characters appear as newlines in the output. +// +// The Writer must buffer input internally, because proper spacing +// of one line may depend on the cells in future lines. Clients must +// call Flush when done calling Write. +// +type Writer struct { + // configuration + output io.Writer + minwidth int + tabwidth int + padding int + padbytes [8]byte + flags uint + + // current state + buf bytes.Buffer // collected text excluding tabs or line breaks + pos int // buffer position up to which cell.width of incomplete cell has been computed + cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections + endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0) + lines [][]cell // list of lines; each line is a list of cells + widths []int // list of column widths in runes - re-used during formatting + alignment map[int]uint // column alignment +} + +func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) } + +// Reset the current state. +func (b *Writer) reset() { + b.buf.Reset() + b.pos = 0 + b.cell = cell{} + b.endChar = 0 + b.lines = b.lines[0:0] + b.widths = b.widths[0:0] + b.alignment = make(map[int]uint) + b.addLine() +} + +// Internal representation (current state): +// +// - all text written is appended to buf; tabs and line breaks are stripped away +// - at any given time there is a (possibly empty) incomplete cell at the end +// (the cell starts after a tab or line break) +// - cell.size is the number of bytes belonging to the cell so far +// - cell.width is text width in runes of that cell from the start of the cell to +// position pos; html tags and entities are excluded from this width if html +// filtering is enabled +// - the sizes and widths of processed text are kept in the lines list +// which contains a list of cells for each line +// - the widths list is a temporary list with current widths used during +// formatting; it is kept in Writer because it's re-used +// +// |<---------- size ---------->| +// | | +// |<- width ->|<- ignored ->| | +// | | | | +// [---processed---tab------------......] +// ^ ^ ^ +// | | | +// buf start of incomplete cell pos + +// Formatting can be controlled with these flags. +const ( + // Ignore html tags and treat entities (starting with '&' + // and ending in ';') as single characters (width = 1). + FilterHTML uint = 1 << iota + + // Strip Escape characters bracketing escaped text segments + // instead of passing them through unchanged with the text. + StripEscape + + // Force right-alignment of cell content. + // Default is left-alignment. + AlignRight + + // Handle empty columns as if they were not present in + // the input in the first place. + DiscardEmptyColumns + + // Always use tabs for indentation columns (i.e., padding of + // leading empty cells on the left) independent of padchar. + TabIndent + + // Print a vertical bar ('|') between columns (after formatting). + // Discarded columns appear as zero-width columns ("||"). + Debug +) + +// A Writer must be initialized with a call to Init. The first parameter (output) +// specifies the filter output. The remaining parameters control the formatting: +// +// minwidth minimal cell width including any padding +// tabwidth width of tab characters (equivalent number of spaces) +// padding padding added to a cell before computing its width +// padchar ASCII char used for padding +// if padchar == '\t', the Writer will assume that the +// width of a '\t' in the formatted output is tabwidth, +// and cells are left-aligned independent of align_left +// (for correct-looking results, tabwidth must correspond +// to the tab width in the viewer displaying the result) +// flags formatting control +// +func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer { + if minwidth < 0 || tabwidth < 0 || padding < 0 { + panic("negative minwidth, tabwidth, or padding") + } + b.output = output + b.minwidth = minwidth + b.tabwidth = tabwidth + b.padding = padding + for i := range b.padbytes { + b.padbytes[i] = padchar + } + if padchar == '\t' { + // tab padding enforces left-alignment + flags &^= AlignRight + } + b.flags = flags + + b.reset() + + return b +} + +// debugging support (keep code around) +func (b *Writer) dump() { + pos := 0 + for i, line := range b.lines { + print("(", i, ") ") + for _, c := range line { + print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]") + pos += c.size + } + print("\n") + } + print("\n") +} + +// local error wrapper so we can distinguish errors we want to return +// as errors from genuine panics (which we don't want to return as errors) +type osError struct { + err error +} + +func (b *Writer) write0(buf []byte) { + n, err := b.output.Write(buf) + if n != len(buf) && err == nil { + err = io.ErrShortWrite + } + if err != nil { + panic(osError{err}) + } +} + +func (b *Writer) writeN(src []byte, n int) { + for n > len(src) { + b.write0(src) + n -= len(src) + } + b.write0(src[0:n]) +} + +var ( + newline = []byte{'\n'} + tabs = []byte("\t\t\t\t\t\t\t\t") +) + +func (b *Writer) writePadding(textw, cellw int, useTabs bool) { + if b.padbytes[0] == '\t' || useTabs { + // padding is done with tabs + if b.tabwidth == 0 { + return // tabs have no width - can't do any padding + } + // make cellw the smallest multiple of b.tabwidth + cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth + n := cellw - textw // amount of padding + if n < 0 { + panic("internal error") + } + b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth) + return + } + + // padding is done with non-tab characters + b.writeN(b.padbytes[0:], cellw-textw) +} + +var vbar = []byte{'|'} + +func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) { + pos = pos0 + for i := line0; i < line1; i++ { + line := b.lines[i] + + // if TabIndent is set, use tabs to pad leading empty cells + useTabs := b.flags&TabIndent != 0 + + for j, c := range line { + if j > 0 && b.flags&Debug != 0 { + // indicate column break + b.write0(vbar) + } + + if c.size == 0 { + // empty cell + if j < len(b.widths) { + b.writePadding(c.width, b.widths[j], useTabs) + } + } else { + // non-empty cell + useTabs = false + alignColumnRight := b.alignment[j] == AlignRight + if (b.flags&AlignRight == 0) && !alignColumnRight { // align left + b.write0(b.buf.Bytes()[pos : pos+c.size]) + pos += c.size + if j < len(b.widths) { + b.writePadding(c.width, b.widths[j], false) + } + } else if alignColumnRight { + // just this column + internalSize := b.widths[j] - b.padding + if j < len(b.widths) { + b.writePadding(c.width, internalSize, false) + } + b.write0(b.buf.Bytes()[pos : pos+c.size]) + if b.padding > 0 { + b.writePadding(0, b.padding, false) + } + pos += c.size + } else { // align right + if j < len(b.widths) { + b.writePadding(c.width, b.widths[j], false) + } + b.write0(b.buf.Bytes()[pos : pos+c.size]) + pos += c.size + } + } + } + + if i+1 == len(b.lines) { + // last buffered line - we don't have a newline, so just write + // any outstanding buffered data + b.write0(b.buf.Bytes()[pos : pos+b.cell.size]) + pos += b.cell.size + } else { + // not the last line - write newline + b.write0(newline) + } + } + return +} + +// Format the text between line0 and line1 (excluding line1); pos +// is the buffer position corresponding to the beginning of line0. +// Returns the buffer position corresponding to the beginning of +// line1 and an error, if any. +// +func (b *Writer) format(pos0 int, line0, line1 int) (pos int) { + pos = pos0 + column := len(b.widths) + for this := line0; this < line1; this++ { + line := b.lines[this] + + if column < len(line)-1 { + // cell exists in this column => this line + // has more cells than the previous line + // (the last cell per line is ignored because cells are + // tab-terminated; the last cell per line describes the + // text before the newline/formfeed and does not belong + // to a column) + + // print unprinted lines until beginning of block + pos = b.writeLines(pos, line0, this) + line0 = this + + // column block begin + width := b.minwidth // minimal column width + discardable := true // true if all cells in this column are empty and "soft" + for ; this < line1; this++ { + line = b.lines[this] + if column < len(line)-1 { + // cell exists in this column + c := line[column] + // update width + if w := c.width + b.padding; w > width { + width = w + } + // update discardable + if c.width > 0 || c.htab { + discardable = false + } + } else { + break + } + } + // column block end + + // discard empty columns if necessary + if discardable && b.flags&DiscardEmptyColumns != 0 { + width = 0 + } + + // format and print all columns to the right of this column + // (we know the widths of this column and all columns to the left) + b.widths = append(b.widths, width) // push width + pos = b.format(pos, line0, this) + b.widths = b.widths[0 : len(b.widths)-1] // pop width + line0 = this + } + } + + // print unprinted lines until end + return b.writeLines(pos, line0, line1) +} + +// Append text to current cell. +func (b *Writer) append(text []byte) { + b.buf.Write(text) + b.cell.size += len(text) +} + +// Update the cell width. +func (b *Writer) updateWidth() { + // ---- Changes here ----- + newChars := b.buf.Bytes()[b.pos:b.buf.Len()] + cleaned := vtclean.Clean(string(newChars), false) // false to strip colors + b.cell.width += utf8.RuneCount([]byte(cleaned)) + // --- end of changes ---- + b.pos = b.buf.Len() +} + +// To escape a text segment, bracket it with Escape characters. +// For instance, the tab in this string "Ignore this tab: \xff\t\xff" +// does not terminate a cell and constitutes a single character of +// width one for formatting purposes. +// +// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence. +// +const Escape = '\xff' + +// Start escaped mode. +func (b *Writer) startEscape(ch byte) { + switch ch { + case Escape: + b.endChar = Escape + case '<': + b.endChar = '>' + case '&': + b.endChar = ';' + } +} + +// Terminate escaped mode. If the escaped text was an HTML tag, its width +// is assumed to be zero for formatting purposes; if it was an HTML entity, +// its width is assumed to be one. In all other cases, the width is the +// unicode width of the text. +// +func (b *Writer) endEscape() { + switch b.endChar { + case Escape: + b.updateWidth() + if b.flags&StripEscape == 0 { + b.cell.width -= 2 // don't count the Escape chars + } + case '>': // tag of zero width + case ';': + b.cell.width++ // entity, count as one rune + } + b.pos = b.buf.Len() + b.endChar = 0 +} + +// Terminate the current cell by adding it to the list of cells of the +// current line. Returns the number of cells in that line. +// +func (b *Writer) terminateCell(htab bool) int { + b.cell.htab = htab + line := &b.lines[len(b.lines)-1] + *line = append(*line, b.cell) + b.cell = cell{} + return len(*line) +} + +func handlePanic(err *error, op string) { + if e := recover(); e != nil { + if nerr, ok := e.(osError); ok { + *err = nerr.err + return + } + panic("tabwriter: panic during " + op) + } +} + +// Flush should be called after the last call to Write to ensure +// that any data buffered in the Writer is written to output. Any +// incomplete escape sequence at the end is considered +// complete for formatting purposes. +// +func (b *Writer) Flush() (err error) { + defer b.reset() // even in the presence of errors + defer handlePanic(&err, "Flush") + + // add current cell if not empty + if b.cell.size > 0 { + if b.endChar != 0 { + // inside escape - terminate it even if incomplete + b.endEscape() + } + b.terminateCell(false) + } + + // format contents of buffer + b.format(0, 0, len(b.lines)) + + return +} + +var hbar = []byte("---\n") + +// SetColumnAlignRight will mark a particular column as align right. +// This is reset on the next flush. +func (b *Writer) SetColumnAlignRight(column int) { + b.alignment[column] = AlignRight +} + +// Write writes buf to the writer b. +// The only errors returned are ones encountered +// while writing to the underlying output stream. +// +func (b *Writer) Write(buf []byte) (n int, err error) { + defer handlePanic(&err, "Write") + + // split text into cells + n = 0 + for i, ch := range buf { + if b.endChar == 0 { + // outside escape + switch ch { + case '\t', '\v', '\n', '\f': + // end of cell + b.append(buf[n:i]) + b.updateWidth() + n = i + 1 // ch consumed + ncells := b.terminateCell(ch == '\t') + if ch == '\n' || ch == '\f' { + // terminate line + b.addLine() + if ch == '\f' || ncells == 1 { + // A '\f' always forces a flush. Otherwise, if the previous + // line has only one cell which does not have an impact on + // the formatting of the following lines (the last cell per + // line is ignored by format()), thus we can flush the + // Writer contents. + if err = b.Flush(); err != nil { + return + } + if ch == '\f' && b.flags&Debug != 0 { + // indicate section break + b.write0(hbar) + } + } + } + + case Escape: + // start of escaped sequence + b.append(buf[n:i]) + b.updateWidth() + n = i + if b.flags&StripEscape != 0 { + n++ // strip Escape + } + b.startEscape(Escape) + + case '<', '&': + // possibly an html tag/entity + if b.flags&FilterHTML != 0 { + // begin of tag/entity + b.append(buf[n:i]) + b.updateWidth() + n = i + b.startEscape(ch) + } + } + + } else { + // inside escape + if ch == b.endChar { + // end of tag/entity + j := i + 1 + if ch == Escape && b.flags&StripEscape != 0 { + j = i // strip Escape + } + b.append(buf[n:j]) + n = i + 1 // ch consumed + b.endEscape() + } + } + } + + // append leftover text + b.append(buf[n:]) + n = len(buf) + return +} + +// NewWriter allocates and initializes a new tabwriter.Writer. +// The parameters are the same as for the Init function. +// +func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer { + return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/tabwriter.go juju-core-2.0.0/src/github.com/juju/ansiterm/tabwriter.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/tabwriter.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/tabwriter.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,64 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +import ( + "io" + + "github.com/juju/ansiterm/tabwriter" +) + +// NewTabWriter returns a writer that is able to set colors and styels. +// The ansi escape codes are stripped for width calculations. +func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter { + return new(TabWriter).Init(output, minwidth, tabwidth, padding, padchar, flags) +} + +// TabWriter is a filter that inserts padding around tab-delimited +// columns in its input to align them in the output. +// +// It also setting of colors and styles over and above the standard +// tabwriter package. +type TabWriter struct { + Writer + tw tabwriter.Writer +} + +// Flush should be called after the last call to Write to ensure +// that any data buffered in the Writer is written to output. Any +// incomplete escape sequence at the end is considered +// complete for formatting purposes. +// +func (t *TabWriter) Flush() error { + return t.tw.Flush() +} + +// SetColumnAlignRight will mark a particular column as align right. +// This is reset on the next flush. +func (t *TabWriter) SetColumnAlignRight(column int) { + t.tw.SetColumnAlignRight(column) +} + +// A Writer must be initialized with a call to Init. The first parameter (output) +// specifies the filter output. The remaining parameters control the formatting: +// +// minwidth minimal cell width including any padding +// tabwidth width of tab characters (equivalent number of spaces) +// padding padding added to a cell before computing its width +// padchar ASCII char used for padding +// if padchar == '\t', the Writer will assume that the +// width of a '\t' in the formatted output is tabwidth, +// and cells are left-aligned independent of align_left +// (for correct-looking results, tabwidth must correspond +// to the tab width in the viewer displaying the result) +// flags formatting control +// +func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter { + writer, colorCapable := colorEnabledWriter(output) + t.Writer = Writer{ + Writer: t.tw.Init(writer, minwidth, tabwidth, padding, padchar, flags), + noColor: !colorCapable, + } + return t +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/terminal.go juju-core-2.0.0/src/github.com/juju/ansiterm/terminal.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/terminal.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/terminal.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,32 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +import ( + "io" + "os" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +// colorEnabledWriter returns a writer that can handle the ansi color codes +// and true if the writer passed in is a terminal capable of color. If the +// TERM environment variable is set to "dumb", the terminal is not considered +// color capable. +func colorEnabledWriter(w io.Writer) (io.Writer, bool) { + f, ok := w.(*os.File) + if !ok { + return w, false + } + // Check the TERM environment variable specifically + // to check for "dumb" terminals. + if os.Getenv("TERM") == "dumb" { + return w, false + } + if !isatty.IsTerminal(f.Fd()) { + return w, false + } + return colorable.NewColorable(f), true +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/writer.go juju-core-2.0.0/src/github.com/juju/ansiterm/writer.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/writer.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/writer.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,74 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +import ( + "fmt" + "io" +) + +// Writer allows colors and styles to be specified. If the io.Writer +// is not a terminal capable of color, all attempts to set colors or +// styles are no-ops. +type Writer struct { + io.Writer + + noColor bool +} + +// NewWriter returns a Writer that allows the caller to specify colors and +// styles. If the io.Writer is not a terminal capable of color, all attempts +// to set colors or styles are no-ops. +func NewWriter(w io.Writer) *Writer { + writer, colorCapable := colorEnabledWriter(w) + return &Writer{ + Writer: writer, + noColor: !colorCapable, + } +} + +// SetColorCapable forces the writer to either write the ANSI escape color +// if capable is true, or to not write them if capable is false. +func (w *Writer) SetColorCapable(capable bool) { + w.noColor = !capable +} + +// SetForeground sets the foreground color. +func (w *Writer) SetForeground(c Color) { + w.writeSGR(c.foreground()) +} + +// SetBackground sets the background color. +func (w *Writer) SetBackground(c Color) { + w.writeSGR(c.background()) +} + +// SetStyle sets the text style. +func (w *Writer) SetStyle(s Style) { + w.writeSGR(s.enable()) +} + +// ClearStyle clears the text style. +func (w *Writer) ClearStyle(s Style) { + w.writeSGR(s.disable()) +} + +// Reset returns the default foreground and background colors with no styles. +func (w *Writer) Reset() { + w.writeSGR(reset) +} + +type sgr interface { + // sgr returns the combined escape sequence for the Select Graphic Rendition. + sgr() string +} + +// writeSGR takes the appropriate integer SGR parameters +// and writes out the ANIS escape code. +func (w *Writer) writeSGR(value sgr) { + if w.noColor { + return + } + fmt.Fprint(w, value.sgr()) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/ansiterm/writer_test.go juju-core-2.0.0/src/github.com/juju/ansiterm/writer_test.go --- juju-core-2.0~beta15/src/github.com/juju/ansiterm/writer_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/ansiterm/writer_test.go 2016-10-13 14:31:57.000000000 +0000 @@ -0,0 +1,77 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package ansiterm + +import ( + "bytes" + + gc "gopkg.in/check.v1" +) + +type writerSuite struct{} + +var _ = gc.Suite(&writerSuite{}) + +func (*writerSuite) TestNoColor(c *gc.C) { + buff := &bytes.Buffer{} + writer := NewWriter(buff) + c.Check(writer.noColor, gc.Equals, true) + + writer.SetForeground(Yellow) + writer.SetBackground(Blue) + writer.SetStyle(Bold) + writer.ClearStyle(Bold) + writer.Reset() + + c.Check(buff.String(), gc.Equals, "") +} + +func (*writerSuite) TestSetColorCapable(c *gc.C) { + buff := &bytes.Buffer{} + writer := NewWriter(buff) + c.Check(writer.noColor, gc.Equals, true) + + writer.SetColorCapable(true) + c.Check(writer.noColor, gc.Equals, false) + + writer.SetColorCapable(false) + c.Check(writer.noColor, gc.Equals, true) +} + +func (*writerSuite) newWriter() (*bytes.Buffer, *Writer) { + buff := &bytes.Buffer{} + writer := NewWriter(buff) + writer.noColor = false + return buff, writer +} + +func (s *writerSuite) TestSetForeground(c *gc.C) { + buff, writer := s.newWriter() + writer.SetForeground(Yellow) + c.Check(buff.String(), gc.Equals, "\x1b[33m") +} + +func (s *writerSuite) TestSetBackground(c *gc.C) { + buff, writer := s.newWriter() + writer.SetBackground(Blue) + c.Check(buff.String(), gc.Equals, "\x1b[44m") +} + +func (s *writerSuite) TestSetStyle(c *gc.C) { + buff, writer := s.newWriter() + writer.SetStyle(Bold) + c.Check(buff.String(), gc.Equals, "\x1b[1m") +} + +func (s *writerSuite) TestClearStyle(c *gc.C) { + buff, writer := s.newWriter() + writer.ClearStyle(Bold) + c.Check(buff.String(), gc.Equals, "\x1b[21m") +} + +func (s *writerSuite) TestReset(c *gc.C) { + buff, writer := s.newWriter() + writer.Reset() + c.Check(buff.String(), gc.Equals, "\x1b[0m") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/bundlechanges/.gitignore juju-core-2.0.0/src/github.com/juju/bundlechanges/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/bundlechanges/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/bundlechanges/.gitignore 2016-10-13 14:31:56.000000000 +0000 @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/args.go juju-core-2.0.0/src/github.com/juju/cmd/args.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/args.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/args.go 2016-10-13 14:32:34.000000000 +0000 @@ -6,7 +6,7 @@ import ( "strings" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // StringsValue implements gnuflag.Value for a comma separated list of diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/args_test.go juju-core-2.0.0/src/github.com/juju/cmd/args_test.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/args_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/args_test.go 2016-10-13 14:32:34.000000000 +0000 @@ -7,9 +7,9 @@ "fmt" "io/ioutil" + "github.com/juju/gnuflag" "github.com/juju/testing" gc "gopkg.in/check.v1" - "launchpad.net/gnuflag" "github.com/juju/cmd" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/cmd.go juju-core-2.0.0/src/github.com/juju/cmd/cmd.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/cmd.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/cmd.go 2016-10-13 14:32:34.000000000 +0000 @@ -14,7 +14,7 @@ "path/filepath" "strings" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // RcPassthroughError indicates that a Juju plugin command exited with a diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/cmd_test.go juju-core-2.0.0/src/github.com/juju/cmd/cmd_test.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/cmd_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/cmd_test.go 2016-10-13 14:32:34.000000000 +0000 @@ -9,7 +9,7 @@ "os" "path/filepath" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" gc "gopkg.in/check.v1" diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/cmdtesting/cmd.go juju-core-2.0.0/src/github.com/juju/cmd/cmdtesting/cmd.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/cmdtesting/cmd.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/cmdtesting/cmd.go 2016-10-13 14:32:34.000000000 +0000 @@ -7,8 +7,8 @@ "bytes" "io/ioutil" + "github.com/juju/gnuflag" gc "gopkg.in/check.v1" - "launchpad.net/gnuflag" "github.com/juju/cmd" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/filevar_test.go juju-core-2.0.0/src/github.com/juju/cmd/filevar_test.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/filevar_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/filevar_test.go 2016-10-13 14:32:34.000000000 +0000 @@ -10,11 +10,11 @@ "os" "path/filepath" + "github.com/juju/gnuflag" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" - "launchpad.net/gnuflag" "github.com/juju/cmd" "github.com/juju/cmd/cmdtesting" diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/help.go juju-core-2.0.0/src/github.com/juju/cmd/help.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/help.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/help.go 2016-10-13 14:32:34.000000000 +0000 @@ -9,7 +9,7 @@ "sort" "strings" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) type helpCommand struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/logging.go juju-core-2.0.0/src/github.com/juju/cmd/logging.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/logging.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/logging.go 2016-10-13 14:32:34.000000000 +0000 @@ -8,8 +8,9 @@ "io" "os" + "github.com/juju/ansiterm" + "github.com/juju/gnuflag" "github.com/juju/loggo" - "launchpad.net/gnuflag" ) // Log supplies the necessary functionality for Commands that wish to set up @@ -34,7 +35,7 @@ if l.NewWriter != nil { return l.NewWriter(target) } - return loggo.NewSimpleWriter(target, loggo.DefaultFormatter) + return loggo.NewColorWriter(target) } // AddFlags adds appropriate flags to f. @@ -92,8 +93,7 @@ loggo.RemoveWriter("default") // Create a simple writer that doesn't show filenames, or timestamps, // and only shows warning or above. - writer := loggo.NewSimpleWriter(ctx.Stderr, warningFormatter) - writer = loggo.NewMinimumLevelWriter(writer, loggo.WARNING) + writer := NewWarningWriter(ctx.Stderr) err := loggo.RegisterWriter("warning", writer) if err != nil { return err @@ -107,12 +107,6 @@ return nil } -// warningFormatter is a simple loggo formatter that produces something like: -// WARNING The message... -func warningFormatter(entry loggo.Entry) string { - return fmt.Sprintf("%s %s", entry.Level, entry.Message) -} - // NewCommandLogWriter creates a loggo writer for registration // by the callers of a command. This way the logged output can also // be displayed otherwise, e.g. on the screen. @@ -137,3 +131,21 @@ } } } + +type warningWriter struct { + writer *ansiterm.Writer +} + +// NewColorWriter will write out colored severity levels if the writer is +// outputting to a terminal. +func NewWarningWriter(writer io.Writer) loggo.Writer { + w := &warningWriter{ansiterm.NewWriter(writer)} + return loggo.NewMinimumLevelWriter(w, loggo.WARNING) +} + +// Write implements Writer. +// WARNING The message... +func (w *warningWriter) Write(entry loggo.Entry) { + loggo.SeverityColor[entry.Level].Fprintf(w.writer, entry.Level.String()) + fmt.Fprintf(w.writer, " %s\n", entry.Message) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/output.go juju-core-2.0.0/src/github.com/juju/cmd/output.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/output.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/output.go 2016-10-13 14:32:34.000000000 +0000 @@ -13,21 +13,21 @@ "strconv" "strings" + "github.com/juju/gnuflag" goyaml "gopkg.in/yaml.v2" - "launchpad.net/gnuflag" ) -// Formatter converts an arbitrary object into a []byte. -type Formatter func(value interface{}) ([]byte, error) +// Formatter writes the arbitrary object into the writer. +type Formatter func(writer io.Writer, value interface{}) error -// FormatYaml marshals value to a yaml-formatted []byte, unless value is nil. -func FormatYaml(value interface{}) ([]byte, error) { +// FormatYaml writes out value as yaml to the writer, unless value is nil. +func FormatYaml(writer io.Writer, value interface{}) error { if value == nil { - return nil, nil + return nil } result, err := goyaml.Marshal(value) if err != nil { - return nil, err + return err } for i := len(result) - 1; i > 0; i-- { if result[i] != '\n' { @@ -35,11 +35,25 @@ } result = result[:i] } - return result, nil + + if len(result) > 0 { + result = append(result, '\n') + _, err = writer.Write(result) + return err + } + return nil } -// FormatJson marshals value to a json-formatted []byte. -var FormatJson = json.Marshal +// FormatJson writes out value as json. +func FormatJson(writer io.Writer, value interface{}) error { + result, err := json.Marshal(value) + if err != nil { + return err + } + result = append(result, '\n') + _, err = writer.Write(result) + return err +} // FormatSmart marshals value into a []byte according to the following rules: // * string: untouched @@ -47,39 +61,52 @@ // * int or float: converted to sensible strings // * []string: joined by `\n`s into a single string // * anything else: delegate to FormatYaml -func FormatSmart(value interface{}) ([]byte, error) { +func FormatSmart(writer io.Writer, value interface{}) error { if value == nil { - return nil, nil + return nil } v := reflect.ValueOf(value) switch kind := v.Kind(); kind { case reflect.String: - return []byte(value.(string)), nil + if value == "" { + return nil + } + _, err := fmt.Fprintln(writer, value) + return err case reflect.Array: if v.Type().Elem().Kind() == reflect.String { slice := reflect.MakeSlice(reflect.TypeOf([]string(nil)), v.Len(), v.Len()) reflect.Copy(slice, v) - return []byte(strings.Join(slice.Interface().([]string), "\n")), nil + _, err := fmt.Fprintln(writer, strings.Join(slice.Interface().([]string), "\n")) + return err } case reflect.Slice: if v.Type().Elem().Kind() == reflect.String { - return []byte(strings.Join(value.([]string), "\n")), nil + out := strings.Join(value.([]string), "\n") + if out != "" { + out += "\n" + } + _, err := fmt.Fprint(writer, out) + return err } case reflect.Bool: + result := "False" if value.(bool) { - return []byte("True"), nil + result = "True" } - return []byte("False"), nil + _, err := fmt.Fprintln(writer, result) + return err case reflect.Float32, reflect.Float64: sv := strconv.FormatFloat(value.(float64), 'f', -1, 64) - return []byte(sv), nil + _, err := fmt.Fprintln(writer, sv) + return err case reflect.Map: case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: default: - return nil, fmt.Errorf("cannot marshal %#v", value) + return fmt.Errorf("cannot marshal %#v", value) } - return FormatYaml(value) + return FormatYaml(writer, value) } // DefaultFormatters holds the formatters that can be @@ -133,8 +160,8 @@ } // format runs the chosen formatter on value. -func (v *formatterValue) format(value interface{}) ([]byte, error) { - return v.formatters[v.name](value) +func (v *formatterValue) format(writer io.Writer, value interface{}) error { + return v.formatters[v.name](writer, value) } // Output is responsible for interpreting output-related command line flags @@ -167,15 +194,14 @@ defer f.Close() target = f } - bytes, err := c.formatter.format(value) - if err != nil { + + if err = c.formatter.format(target, value); err != nil { return } - if len(bytes) > 0 { - _, err = target.Write(bytes) - if err == nil { - _, err = target.Write([]byte{'\n'}) - } + // If the formatter is not one of the default ones, add a new line at the end. + // This keeps consistent behaviour with the current code. + if _, found := DefaultFormatters[c.formatter.name]; !found { + fmt.Fprintln(target) } return } diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/output_test.go juju-core-2.0.0/src/github.com/juju/cmd/output_test.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/output_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/output_test.go 2016-10-13 14:32:34.000000000 +0000 @@ -4,8 +4,8 @@ package cmd_test import ( + "github.com/juju/gnuflag" gc "gopkg.in/check.v1" - "launchpad.net/gnuflag" "github.com/juju/cmd" "github.com/juju/cmd/cmdtesting" @@ -61,6 +61,7 @@ {"hello", "hello\n"}, {"\n\n\n", "\n\n\n\n"}, {"foo: bar", "foo: bar\n"}, + {[]string{}, ""}, {[]string{"blam", "dink"}, "blam\ndink\n"}, {map[interface{}]interface{}{"foo": "bar"}, "foo: bar\n"}, }, @@ -76,6 +77,7 @@ {"hello", "hello\n"}, {"\n\n\n", "\n\n\n\n"}, {"foo: bar", "foo: bar\n"}, + {[]string{}, ""}, {[]string{"blam", "dink"}, "blam\ndink\n"}, {[2]string{"blam", "dink"}, "blam\ndink\n"}, {map[interface{}]interface{}{"foo": "bar"}, "foo: bar\n"}, @@ -108,6 +110,7 @@ {"hello", "hello\n"}, {"\n\n\n", "|2+\n"}, {"foo: bar", "'foo: bar'\n"}, + {[]string{}, "[]\n"}, {[]string{"blam", "dink"}, "- blam\n- dink\n"}, {defaultValue, "juju: 1\npuppet: false\n"}, }, diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/supercommand.go juju-core-2.0.0/src/github.com/juju/cmd/supercommand.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/supercommand.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/supercommand.go 2016-10-13 14:32:34.000000000 +0000 @@ -10,8 +10,8 @@ "strings" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" - "launchpad.net/gnuflag" ) var logger = loggo.GetLogger("cmd") diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/supercommand_test.go juju-core-2.0.0/src/github.com/juju/cmd/supercommand_test.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/supercommand_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/supercommand_test.go 2016-10-13 14:32:34.000000000 +0000 @@ -10,9 +10,9 @@ "path/filepath" "strings" + "github.com/juju/gnuflag" gitjujutesting "github.com/juju/testing" gc "gopkg.in/check.v1" - "launchpad.net/gnuflag" "github.com/juju/cmd" "github.com/juju/cmd/cmdtesting" diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/util_test.go juju-core-2.0.0/src/github.com/juju/cmd/util_test.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/util_test.go 2016-10-13 14:32:34.000000000 +0000 @@ -9,7 +9,7 @@ "fmt" "io" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/cmd" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/cmd/version.go juju-core-2.0.0/src/github.com/juju/cmd/version.go --- juju-core-2.0~beta15/src/github.com/juju/cmd/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/cmd/version.go 2016-10-13 14:32:34.000000000 +0000 @@ -4,7 +4,7 @@ package cmd import ( - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // versionCommand is a cmd.Command that prints the current version. diff -Nru juju-core-2.0~beta15/src/github.com/juju/errors/.gitignore juju-core-2.0.0/src/github.com/juju/errors/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/errors/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/errors/.gitignore 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff -Nru juju-core-2.0~beta15/src/github.com/juju/gnuflag/export_test.go juju-core-2.0.0/src/github.com/juju/gnuflag/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/gnuflag/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gnuflag/export_test.go 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,19 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gnuflag + +import ( + "os" +) + +// Additional routines compiled into the package only during testing. + +// ResetForTesting clears all flag state and sets the usage function as directed. +// After calling ResetForTesting, parse errors in flag handling will not +// exit the program. +func ResetForTesting(usage func()) { + CommandLine = NewFlagSet(os.Args[0], ContinueOnError) + Usage = usage +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/gnuflag/flag.go juju-core-2.0.0/src/github.com/juju/gnuflag/flag.go --- juju-core-2.0~beta15/src/github.com/juju/gnuflag/flag.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gnuflag/flag.go 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,936 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Package flag implements command-line flag parsing in the GNU style. + It is almost exactly the same as the standard flag package, + the only difference being the extra argument to Parse. + + Command line flag syntax: + -f // single letter flag + -fg // two single letter flags together + --flag // multiple letter flag + --flag x // non-boolean flags only + -f x // non-boolean flags only + -fx // if f is a non-boolean flag, x is its argument. + + The last three forms are not permitted for boolean flags because the + meaning of the command + cmd -f * + will change if there is a file called 0, false, etc. There is currently + no way to turn off a boolean flag. + + Flag parsing stops after the terminator "--", or just before the first + non-flag argument ("-" is a non-flag argument) if the interspersed + argument to Parse is false. +*/ +package gnuflag + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// ErrHelp is the error returned if the -help or -h flag is invoked +// but no such flag is defined. +var ErrHelp = errors.New("flag: help requested") + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Get() interface{} { return bool(*b) } + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Get() interface{} { return int(*i) } + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Get() interface{} { return int64(*i) } + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Get() interface{} { return uint(*i) } + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Get() interface{} { return uint64(*i) } + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) Get() interface{} { return string(*s) } + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Get() interface{} { return float64(*f) } + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d) } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +type Value interface { + String() string + Set(string) error +} + +// Getter is an interface that allows the contents of a Value to be retrieved. +// It wraps the Value interface, rather than being part of it, because it +// appeared after Go 1 and its compatibility rules. All Value types provided +// by this package satisfy the Getter interface. +type Getter interface { + Value + Get() interface{} +} + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + ContinueOnError ErrorHandling = iota + ExitOnError + PanicOnError +) + +// A FlagSet represents a set of defined flags. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[string]*Flag + formal map[string]*Flag + args []string // arguments after flags + procArgs []string // arguments being processed (gnu only) + procFlag string // flag being processed (gnu only) + allowIntersperse bool // (gnu only) + exitOnError bool // does the program exit if there's an error? + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for _, f := range flags { + list[i] = f.Name + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[name] + } + return result +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.formal[name] +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.formal[name] +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + flag, ok := f.formal[name] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// flagsByLength is a slice of flags implementing sort.Interface, +// sorting primarily by the length of the flag, and secondarily +// alphabetically. +type flagsByLength []*Flag + +func (f flagsByLength) Less(i, j int) bool { + s1, s2 := f[i].Name, f[j].Name + if len(s1) != len(s2) { + return len(s1) < len(s2) + } + return s1 < s2 +} +func (f flagsByLength) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} +func (f flagsByLength) Len() int { + return len(f) +} + +// flagsByName is a slice of slices of flags implementing sort.Interface, +// alphabetically sorting by the name of the first flag in each slice. +type flagsByName [][]*Flag + +func (f flagsByName) Less(i, j int) bool { + return f[i][0].Name < f[j][0].Name +} +func (f flagsByName) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} +func (f flagsByName) Len() int { + return len(f) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +// If there is more than one name for a given flag, the usage information and +// default value from the shortest will be printed (or the least alphabetically +// if there are several equally short flag names). +func (f *FlagSet) PrintDefaults() { + // group together all flags for a given value + flags := make(map[interface{}]flagsByLength) + f.VisitAll(func(f *Flag) { + flags[f.Value] = append(flags[f.Value], f) + }) + + // sort the output flags by shortest name for each group. + var byName flagsByName + for _, f := range flags { + sort.Sort(f) + byName = append(byName, f) + } + sort.Sort(byName) + + var line bytes.Buffer + for _, fs := range byName { + line.Reset() + for i, f := range fs { + if i > 0 { + line.WriteString(", ") + } + line.WriteString(flagWithMinus(f.Name)) + } + format := "%s (= %s)\n %s\n" + if _, ok := fs[0].Value.(*stringValue); ok { + // put quotes on the value + format = "%s (= %q)\n %s\n" + } + fmt.Fprintf(f.out(), format, line.Bytes(), fs[0].DefValue, fs[0].Usage) + } +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + if f.name == "" { + fmt.Fprintf(f.out(), "Usage:\n") + } else { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + } + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.Var(newBoolValue(value, p), name, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + CommandLine.Var(newBoolValue(value, p), name, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + p := new(bool) + f.BoolVar(p, name, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return CommandLine.Bool(name, value, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.Var(newIntValue(value, p), name, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + CommandLine.Var(newIntValue(value, p), name, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVar(p, name, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return CommandLine.Int(name, value, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.Var(newInt64Value(value, p), name, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + CommandLine.Var(newInt64Value(value, p), name, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64Var(p, name, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return CommandLine.Int64(name, value, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.Var(newUintValue(value, p), name, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + CommandLine.Var(newUintValue(value, p), name, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVar(p, name, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return CommandLine.Uint(name, value, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.Var(newUint64Value(value, p), name, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + CommandLine.Var(newUint64Value(value, p), name, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64Var(p, name, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return CommandLine.Uint64(name, value, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.Var(newStringValue(value, p), name, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + CommandLine.Var(newStringValue(value, p), name, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVar(p, name, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return CommandLine.String(name, value, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.Var(newFloat64Value(value, p), name, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + CommandLine.Var(newFloat64Value(value, p), name, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64Var(p, name, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return CommandLine.Float64(name, value, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.Var(newDurationValue(value, p), name, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + CommandLine.Var(newDurationValue(value, p), name, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVar(p, name, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return CommandLine.Duration(name, value, usage) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{name, usage, value, value.String()} + _, alreadythere := f.formal[name] + if alreadythere { + fmt.Fprintf(f.out(), "%s flag redefined: %s\n", f.name, name) + panic("flag redefinition") // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[string]*Flag) + } + f.formal[name] = flag +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + CommandLine.Var(value, name, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f.Usage == nil { + if f == CommandLine { + Usage() + } else { + defaultUsage(f) + } + } else { + f.Usage() + } +} + +func (f *FlagSet) parseOne() (flagName string, long, finished bool, err error) { + if len(f.procArgs) == 0 { + finished = true + return + } + + // processing previously encountered single-rune flag + if flag := f.procFlag; len(flag) > 0 { + _, n := utf8.DecodeRuneInString(flag) + f.procFlag = flag[n:] + flagName = flag[0:n] + return + } + + a := f.procArgs[0] + + // one non-flag argument + if a == "-" || a == "" || a[0] != '-' { + if f.allowIntersperse { + f.args = append(f.args, a) + f.procArgs = f.procArgs[1:] + return + } + f.args = append(f.args, f.procArgs...) + f.procArgs = nil + finished = true + return + } + + // end of flags + if f.procArgs[0] == "--" { + f.args = append(f.args, f.procArgs[1:]...) + f.procArgs = nil + finished = true + return + } + + // long flag signified with "--" prefix + if a[1] == '-' { + long = true + i := strings.Index(a, "=") + if i < 0 { + f.procArgs = f.procArgs[1:] + flagName = a[2:] + return + } + flagName = a[2:i] + if flagName == "" { + err = fmt.Errorf("empty flag in argument %q", a) + return + } + f.procArgs = f.procArgs[1:] + f.procFlag = a[i:] + return + } + + // some number of single-rune flags + a = a[1:] + _, n := utf8.DecodeRuneInString(a) + flagName = a[0:n] + f.procFlag = a[n:] + f.procArgs = f.procArgs[1:] + return +} + +func flagWithMinus(name string) string { + if len(name) > 1 { + return "--" + name + } + return "-" + name +} + +func (f *FlagSet) parseFlagArg(name string, long bool) (finished bool, err error) { + m := f.formal + flag, alreadythere := m[name] // BUG + if !alreadythere { + if name == "help" || name == "h" { // special case for nice help message. + f.usage() + return false, ErrHelp + } + // TODO print --xxx when flag is more than one rune. + return false, f.failf("flag provided but not defined: %s", flagWithMinus(name)) + } + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() && !strings.HasPrefix(f.procFlag, "=") { + // special case: doesn't need an arg, and an arg hasn't + // been provided explicitly. + if err := fv.Set("true"); err != nil { + return false, f.failf("invalid boolean flag %s: %v", name, err) + } + } else { + // It must have a value, which might be the next argument. + var hasValue bool + var value string + if f.procFlag != "" { + // value directly follows flag + value = f.procFlag + if long { + if value[0] != '=' { + panic("no leading '=' in long flag") + } + value = value[1:] + } + hasValue = true + f.procFlag = "" + } + if !hasValue && len(f.procArgs) > 0 { + // value is the next arg + hasValue = true + value, f.procArgs = f.procArgs[0], f.procArgs[1:] + } + if !hasValue { + return false, f.failf("flag needs an argument: %s", flagWithMinus(name)) + } + if err := flag.Value.Set(value); err != nil { + return false, f.failf("invalid value %q for flag %s: %v", value, flagWithMinus(name), err) + } + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if --help or -h was set but not defined. +// If allowIntersperse is set, arguments and flags can be interspersed, that +// is flags can follow positional arguments. +func (f *FlagSet) Parse(allowIntersperse bool, arguments []string) error { + f.parsed = true + f.procArgs = arguments + f.procFlag = "" + f.args = nil + f.allowIntersperse = allowIntersperse + for { + name, long, finished, err := f.parseOne() + if !finished { + if name != "" { + finished, err = f.parseFlagArg(name, long) + } + } + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + if !finished { + continue + } + if err == nil { + break + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +// If allowIntersperse is set, arguments and flags can be interspersed, that +// is flags can follow positional arguments. +func Parse(allowIntersperse bool) { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(allowIntersperse, os.Args[1:]) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +// The top-level functions such as BoolVar, Arg, and so on are wrappers for the +// methods of CommandLine. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + } + return f +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/gnuflag/flag_test.go juju-core-2.0.0/src/github.com/juju/gnuflag/flag_test.go --- juju-core-2.0~beta15/src/github.com/juju/gnuflag/flag_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gnuflag/flag_test.go 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,643 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gnuflag_test + +import ( + "bytes" + "fmt" + . "github.com/juju/gnuflag" + "os" + "reflect" + "sort" + "strings" + "testing" + "time" +) + +var ( + test_bool = Bool("test_bool", false, "bool value") + test_int = Int("test_int", 0, "int value") + test_int64 = Int64("test_int64", 0, "int64 value") + test_uint = Uint("test_uint", 0, "uint value") + test_uint64 = Uint64("test_uint64", 0, "uint64 value") + test_string = String("test_string", "0", "string value") + test_float64 = Float64("test_float64", 0, "float64 value") + test_duration = Duration("test_duration", 0, "time.Duration value") +) + +func boolString(s string) string { + if s == "0" { + return "false" + } + return "true" +} + +func TestEverything(t *testing.T) { + m := make(map[string]*Flag) + desired := "0" + visitor := func(f *Flag) { + if len(f.Name) > 5 && f.Name[0:5] == "test_" { + m[f.Name] = f + ok := false + switch { + case f.Value.String() == desired: + ok = true + case f.Name == "test_bool" && f.Value.String() == boolString(desired): + ok = true + case f.Name == "test_duration" && f.Value.String() == desired+"s": + ok = true + } + if !ok { + t.Error("Visit: bad value", f.Value.String(), "for", f.Name) + } + } + } + VisitAll(visitor) + if len(m) != 8 { + t.Error("VisitAll misses some flags") + for k, v := range m { + t.Log(k, *v) + } + } + m = make(map[string]*Flag) + Visit(visitor) + if len(m) != 0 { + t.Errorf("Visit sees unset flags") + for k, v := range m { + t.Log(k, *v) + } + } + // Now set all flags + Set("test_bool", "true") + Set("test_int", "1") + Set("test_int64", "1") + Set("test_uint", "1") + Set("test_uint64", "1") + Set("test_string", "1") + Set("test_float64", "1") + Set("test_duration", "1s") + desired = "1" + Visit(visitor) + if len(m) != 8 { + t.Error("Visit fails after set") + for k, v := range m { + t.Log(k, *v) + } + } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } +} + +func TestGet(t *testing.T) { + ResetForTesting(nil) + Bool("test_bool", true, "bool value") + Int("test_int", 1, "int value") + Int64("test_int64", 2, "int64 value") + Uint("test_uint", 3, "uint value") + Uint64("test_uint64", 4, "uint64 value") + String("test_string", "5", "string value") + Float64("test_float64", 6, "float64 value") + Duration("test_duration", 7, "time.Duration value") + + visitor := func(f *Flag) { + if len(f.Name) > 5 && f.Name[0:5] == "test_" { + g, ok := f.Value.(Getter) + if !ok { + t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) + return + } + switch f.Name { + case "test_bool": + ok = g.Get() == true + case "test_int": + ok = g.Get() == int(1) + case "test_int64": + ok = g.Get() == int64(2) + case "test_uint": + ok = g.Get() == uint(3) + case "test_uint64": + ok = g.Get() == uint64(4) + case "test_string": + ok = g.Get() == "5" + case "test_float64": + ok = g.Get() == float64(6) + case "test_duration": + ok = g.Get() == time.Duration(7) + } + if !ok { + t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), f.Name) + } + } + } + VisitAll(visitor) +} + +func TestUsage(t *testing.T) { + called := false + ResetForTesting(func() { called = true }) + f := CommandLine + f.SetOutput(nullWriter{}) + if f.Parse(true, []string{"-x"}) == nil { + t.Error("parse did not fail for unknown flag") + } + if !called { + t.Error("did not call Usage for unknown flag") + } +} + +var parseTests = []struct { + about string + intersperse bool + args []string + vals map[string]interface{} + remaining []string + error string +}{{ + about: "regular args", + intersperse: true, + args: []string{ + "--bool2", + "--int", "22", + "--int64", "0x23", + "--uint", "24", + "--uint64", "25", + "--string", "hello", + "--float64", "2718e28", + "--duration", "2m", + "one - extra - argument", + }, + vals: map[string]interface{}{ + "bool": false, + "bool2": true, + "int": 22, + "int64": int64(0x23), + "uint": uint(24), + "uint64": uint64(25), + "string": "hello", + "float64": 2718e28, + "duration": 2 * 60 * time.Second, + }, + remaining: []string{ + "one - extra - argument", + }, +}, { + about: "playing with -", + intersperse: true, + args: []string{ + "-a", + "-", + "-bc", + "2", + "-de1s", + "-f2s", + "-g", "3s", + "--h", + "--long", + "--long2", "-4s", + "3", + "4", + "--", "-5", + }, + vals: map[string]interface{}{ + "a": true, + "b": true, + "c": true, + "d": true, + "e": "1s", + "f": "2s", + "g": "3s", + "h": true, + "long": true, + "long2": "-4s", + "z": "default", + "www": 99, + }, + remaining: []string{ + "-", + "2", + "3", + "4", + "-5", + }, +}, { + about: "flag after explicit --", + intersperse: true, + args: []string{ + "-a", + "--", + "-b", + }, + vals: map[string]interface{}{ + "a": true, + "b": false, + }, + remaining: []string{ + "-b", + }, +}, { + about: "flag after end", + args: []string{ + "-a", + "foo", + "-b", + }, + vals: map[string]interface{}{ + "a": true, + "b": false, + }, + remaining: []string{ + "foo", + "-b", + }, +}, { + about: "arg and flag after explicit end", + args: []string{ + "-a", + "--", + "foo", + "-b", + }, + vals: map[string]interface{}{ + "a": true, + "b": false, + }, + remaining: []string{ + "foo", + "-b", + }, +}, { + about: "boolean args, explicitly and non-explicitly given", + args: []string{ + "--a=false", + "--b=true", + "--c", + }, + vals: map[string]interface{}{ + "a": false, + "b": true, + "c": true, + }, +}, { + about: "using =", + args: []string{ + "--arble=bar", + "--bletch=", + "--a=something", + "-b=other", + "-cdand more", + "--curdle=--milk", + "--sandwich", "=", + "--darn=", + "=arg", + }, + vals: map[string]interface{}{ + "arble": "bar", + "bletch": "", + "a": "something", + "b": "=other", + "c": true, + "d": "and more", + "curdle": "--milk", + "sandwich": "=", + "darn": "", + }, + remaining: []string{"=arg"}, +}, { + about: "empty flag #1", + args: []string{ + "--=bar", + }, + error: `empty flag in argument "--=bar"`, +}, { + about: "single-letter equals", + args: []string{ + "-=bar", + }, + error: `flag provided but not defined: -=`, +}, { + about: "empty flag #2", + args: []string{ + "--=", + }, + error: `empty flag in argument "--="`, +}, { + about: "no equals", + args: []string{ + "-=", + }, + error: `flag provided but not defined: -=`, +}, { + args: []string{ + "-a=true", + }, + vals: map[string]interface{}{ + "a": true, + }, + error: `invalid value "=true" for flag -a: strconv.ParseBool: parsing "=true": invalid syntax`, +}, { + intersperse: true, + args: []string{ + "-a", + "-b", + }, + vals: map[string]interface{}{ + "a": true, + }, + error: "flag provided but not defined: -b", +}, { + intersperse: true, + args: []string{ + "-a", + }, + vals: map[string]interface{}{ + "a": "default", + }, + error: "flag needs an argument: -a", +}, { + intersperse: true, + args: []string{ + "-a", "b", + }, + vals: map[string]interface{}{ + "a": 0, + }, + error: `invalid value "b" for flag -a: strconv.ParseInt: parsing "b": invalid syntax`, +}, +} + +func testParse(newFlagSet func() *FlagSet, t *testing.T) { + for i, g := range parseTests { + t.Logf("test %d. %s", i, g.about) + f := newFlagSet() + flags := make(map[string]interface{}) + for name, val := range g.vals { + switch val.(type) { + case bool: + flags[name] = f.Bool(name, false, "bool value "+name) + case string: + flags[name] = f.String(name, "default", "string value "+name) + case int: + flags[name] = f.Int(name, 99, "int value "+name) + case uint: + flags[name] = f.Uint(name, 0, "uint value") + case uint64: + flags[name] = f.Uint64(name, 0, "uint64 value") + case int64: + flags[name] = f.Int64(name, 0, "uint64 value") + case float64: + flags[name] = f.Float64(name, 0, "float64 value") + case time.Duration: + flags[name] = f.Duration(name, 5*time.Second, "duration value") + default: + t.Fatalf("unhandled type %T", val) + } + } + err := f.Parse(g.intersperse, g.args) + if g.error != "" { + if err == nil { + t.Errorf("expected error %q got nil", g.error) + } else if err.Error() != g.error { + t.Errorf("expected error %q got %q", g.error, err.Error()) + } + continue + } + for name, val := range g.vals { + actual := reflect.ValueOf(flags[name]).Elem().Interface() + if val != actual { + t.Errorf("flag %q, expected %v got %v", name, val, actual) + } + } + if len(f.Args()) != len(g.remaining) { + t.Fatalf("remaining args, expected %q got %q", g.remaining, f.Args()) + } + for j, a := range f.Args() { + if a != g.remaining[j] { + t.Errorf("arg %d, expected %q got %q", j, g.remaining[i], a) + } + } + } +} + +func TestParse(t *testing.T) { + testParse(func() *FlagSet { + ResetForTesting(func() {}) + CommandLine.SetOutput(nullWriter{}) + return CommandLine + }, t) +} + +func TestFlagSetParse(t *testing.T) { + testParse(func() *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.SetOutput(nullWriter{}) + return f + }, t) +} + +// Declare a user-defined flag type. +type flagVar []string + +func (f *flagVar) String() string { + return fmt.Sprint([]string(*f)) +} + +func (f *flagVar) Set(value string) error { + *f = append(*f, value) + return nil +} + +func TestUserDefined(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var v flagVar + flags.Var(&v, "v", "usage") + if err := flags.Parse(true, []string{"-v", "1", "-v", "2", "-v3"}); err != nil { + t.Error(err) + } + if len(v) != 3 { + t.Fatal("expected 3 args; got ", len(v)) + } + expect := "[1 2 3]" + if v.String() != expect { + t.Errorf("expected value %q got %q", expect, v.String()) + } +} + +func TestUserDefinedForCommandLine(t *testing.T) { + const help = "HELP" + var result string + ResetForTesting(func() { result = help }) + Usage() + if result != help { + t.Fatalf("got %q; expected %q", result, help) + } +} + +// Declare a user-defined boolean flag type. +type boolFlagVar struct { + count int +} + +func (b *boolFlagVar) String() string { + return fmt.Sprintf("%d", b.count) +} + +func (b *boolFlagVar) Set(value string) error { + if value == "true" { + b.count++ + } + return nil +} + +func (b *boolFlagVar) IsBoolFlag() bool { + return b.count < 4 +} + +func TestUserDefinedBool(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var b boolFlagVar + var err error + flags.Var(&b, "b", "usage") + if err = flags.Parse(true, []string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { + if b.count < 4 { + t.Error(err) + } + } + + if b.count != 4 { + t.Errorf("want: %d; got: %d", 4, b.count) + } + + if err == nil { + t.Error("expected error; got none") + } +} + +func TestSetOutput(t *testing.T) { + var flags FlagSet + var buf bytes.Buffer + flags.SetOutput(&buf) + flags.Init("test", ContinueOnError) + flags.Parse(true, []string{"-unknown"}) + if out := buf.String(); !strings.Contains(out, "-unknown") { + t.Logf("expected output mentioning unknown; got %q", out) + } +} + +// This tests that one can reset the flags. This still works but not well, and is +// superseded by FlagSet. +func TestChangingArgs(t *testing.T) { + ResetForTesting(func() { t.Fatal("bad parse") }) + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "--before", "subcmd", "--after", "args"} + before := Bool("before", false, "") + if err := CommandLine.Parse(false, os.Args[1:]); err != nil { + t.Fatal(err) + } + cmd := Arg(0) + os.Args = Args() + after := Bool("after", false, "") + Parse(false) + args := Args() + + if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { + t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) + } +} + +// Test that -help invokes the usage message and returns ErrHelp. +func TestHelp(t *testing.T) { + var helpCalled = false + fs := NewFlagSet("help test", ContinueOnError) + fs.SetOutput(nullWriter{}) + fs.Usage = func() { helpCalled = true } + var flag bool + fs.BoolVar(&flag, "flag", false, "regular flag") + // Regular flag invocation should work + err := fs.Parse(true, []string{"--flag"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + if !flag { + t.Error("flag was not set by --flag") + } + if helpCalled { + t.Error("help called for regular flag") + helpCalled = false // reset for next test + } + // Help flag should work as expected. + err = fs.Parse(true, []string{"--help"}) + if err == nil { + t.Fatal("error expected") + } + if err != ErrHelp { + t.Fatal("expected ErrHelp; got ", err) + } + if !helpCalled { + t.Fatal("help was not called") + } + // If we define a help flag, that should override. + var help bool + fs.BoolVar(&help, "help", false, "help flag") + helpCalled = false + err = fs.Parse(true, []string{"--help"}) + if err != nil { + t.Fatal("expected no error for defined --help; got ", err) + } + if helpCalled { + t.Fatal("help was called; should not have been for defined help flag") + } +} + +type nullWriter struct{} + +func (nullWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +func TestPrintDefaults(t *testing.T) { + f := NewFlagSet("print test", ContinueOnError) + f.SetOutput(nullWriter{}) + var b bool + var c int + var d string + var e float64 + f.IntVar(&c, "trapclap", 99, "usage not shown") + f.IntVar(&c, "c", 99, "c usage") + + f.BoolVar(&b, "bal", false, "usage not shown") + f.BoolVar(&b, "x", false, "usage not shown") + f.BoolVar(&b, "b", false, "b usage") + f.BoolVar(&b, "balalaika", false, "usage not shown") + + f.StringVar(&d, "d", "d default", "d usage") + + f.Float64Var(&e, "elephant", 3.14, "elephant usage") + + var buf bytes.Buffer + f.SetOutput(&buf) + f.PrintDefaults() + f.SetOutput(nullWriter{}) + + expect := + `-b, -x, --bal, --balalaika (= false) + b usage +-c, --trapclap (= 99) + c usage +-d (= "d default") + d usage +--elephant (= 3.14) + elephant usage +` + if buf.String() != expect { + t.Errorf("expect %q got %q", expect, buf.String()) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/gnuflag/LICENSE juju-core-2.0.0/src/github.com/juju/gnuflag/LICENSE --- juju-core-2.0~beta15/src/github.com/juju/gnuflag/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gnuflag/LICENSE 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru juju-core-2.0~beta15/src/github.com/juju/gnuflag/README.md juju-core-2.0.0/src/github.com/juju/gnuflag/README.md --- juju-core-2.0~beta15/src/github.com/juju/gnuflag/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gnuflag/README.md 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,10 @@ +Gnuflag +----- + +The gnuflag package is a fork of the Go standard library +package that supports GNU-compatible flag syntax. + +In particular, it supports `--longflag` and `-l` single-character +flag syntax. + +Full documentation can be found here: https://godoc.org/github.com/juju/gnuflag. diff -Nru juju-core-2.0~beta15/src/github.com/juju/go4/.gitignore juju-core-2.0.0/src/github.com/juju/go4/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/go4/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/go4/.gitignore 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff -Nru juju-core-2.0~beta15/src/github.com/juju/go4/lock/.gitignore juju-core-2.0.0/src/github.com/juju/go4/lock/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/go4/lock/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/go4/lock/.gitignore 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1 @@ +*~ diff -Nru juju-core-2.0~beta15/src/github.com/juju/gojsonschema/.gitignore juju-core-2.0.0/src/github.com/juju/gojsonschema/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/gojsonschema/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gojsonschema/.gitignore 2016-10-13 14:31:58.000000000 +0000 @@ -0,0 +1 @@ +*.sw[nop] diff -Nru juju-core-2.0~beta15/src/github.com/juju/gomaasapi/controller.go juju-core-2.0.0/src/github.com/juju/gomaasapi/controller.go --- juju-core-2.0~beta15/src/github.com/juju/gomaasapi/controller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gomaasapi/controller.go 2016-10-13 14:32:07.000000000 +0000 @@ -252,6 +252,7 @@ Domain string Zone string AgentName string + OwnerData map[string]string } // Machines implements Controller. @@ -263,6 +264,8 @@ params.MaybeAdd("domain", args.Domain) params.MaybeAdd("zone", args.Zone) params.MaybeAdd("agent_name", args.AgentName) + // At the moment the MAAS API doesn't support filtering by owner + // data so we do that ourselves below. source, err := c.getQuery("machines", params.Values) if err != nil { return nil, NewUnexpectedError(err) @@ -274,11 +277,22 @@ var result []Machine for _, m := range machines { m.controller = c - result = append(result, m) + if ownerDataMatches(m.ownerData, args.OwnerData) { + result = append(result, m) + } } return result, nil } +func ownerDataMatches(ownerData, filter map[string]string) bool { + for key, value := range filter { + if ownerData[key] != value { + return false + } + } + return true +} + // StorageSpec represents one element of storage constraints necessary // to be satisfied to allocate a machine. type StorageSpec struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/gomaasapi/controller_test.go juju-core-2.0.0/src/github.com/juju/gomaasapi/controller_test.go --- juju-core-2.0~beta15/src/github.com/juju/gomaasapi/controller_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gomaasapi/controller_test.go 2016-10-13 14:32:07.000000000 +0000 @@ -246,6 +246,44 @@ c.Assert(machines[0].Hostname(), gc.Equals, "untasted-markita") } +func (s *controllerSuite) TestMachinesFilterWithOwnerData(c *gc.C) { + controller := s.getController(c) + machines, err := controller.Machines(MachinesArgs{ + Hostnames: []string{"untasted-markita"}, + OwnerData: map[string]string{ + "fez": "jim crawford", + }, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(machines, gc.HasLen, 0) +} + +func (s *controllerSuite) TestMachinesFilterWithOwnerData_MultipleMatches(c *gc.C) { + controller := s.getController(c) + machines, err := controller.Machines(MachinesArgs{ + OwnerData: map[string]string{ + "braid": "jonathan blow", + }, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(machines, gc.HasLen, 2) + c.Assert(machines[0].Hostname(), gc.Equals, "lowlier-glady") + c.Assert(machines[1].Hostname(), gc.Equals, "icier-nina") +} + +func (s *controllerSuite) TestMachinesFilterWithOwnerData_RequiresAllMatch(c *gc.C) { + controller := s.getController(c) + machines, err := controller.Machines(MachinesArgs{ + OwnerData: map[string]string{ + "braid": "jonathan blow", + "frog-fractions": "jim crawford", + }, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(machines, gc.HasLen, 1) + c.Assert(machines[0].Hostname(), gc.Equals, "lowlier-glady") +} + func (s *controllerSuite) TestMachinesArgs(c *gc.C) { controller := s.getController(c) // This will fail with a 404 due to the test server not having something at diff -Nru juju-core-2.0~beta15/src/github.com/juju/gomaasapi/.gitignore juju-core-2.0.0/src/github.com/juju/gomaasapi/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/gomaasapi/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gomaasapi/.gitignore 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,2 @@ +*.sw[nop] +example/[^.]* diff -Nru juju-core-2.0~beta15/src/github.com/juju/gomaasapi/interfaces.go juju-core-2.0.0/src/github.com/juju/gomaasapi/interfaces.go --- juju-core-2.0~beta15/src/github.com/juju/gomaasapi/interfaces.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gomaasapi/interfaces.go 2016-10-13 14:32:07.000000000 +0000 @@ -177,6 +177,8 @@ // Machine represents a physical machine. type Machine interface { + OwnerDataHolder + SystemID() string Hostname() string FQDN() string @@ -343,3 +345,18 @@ // There are some other attributes for block devices, but we can // expose them on an as needed basis. } + +// OwnerDataHolder represents any MAAS object that can store key/value +// data. +type OwnerDataHolder interface { + // OwnerData returns a copy of the key/value data stored for this + // object. + OwnerData() map[string]string + + // SetOwnerData updates the key/value data stored for this object + // with the values passed in. Existing keys that aren't specified + // in the map passed in will be left in place; to clear a key set + // its value to "". All owner data is cleared when the object is + // released. + SetOwnerData(map[string]string) error +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/gomaasapi/interface_test.go juju-core-2.0.0/src/github.com/juju/gomaasapi/interface_test.go --- juju-core-2.0~beta15/src/github.com/juju/gomaasapi/interface_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gomaasapi/interface_test.go 2016-10-13 14:32:07.000000000 +0000 @@ -152,13 +152,23 @@ type fakeSubnet struct { Subnet - id int + id int + cidr string + vlan VLAN } func (f *fakeSubnet) ID() int { return f.id } +func (f *fakeSubnet) CIDR() string { + return f.cidr +} + +func (f *fakeSubnet) VLAN() VLAN { + return f.vlan +} + func (s *interfaceSuite) TestLinkSubnetArgs(c *gc.C) { for i, test := range []struct { args LinkSubnetArgs diff -Nru juju-core-2.0~beta15/src/github.com/juju/gomaasapi/machine.go juju-core-2.0.0/src/github.com/juju/gomaasapi/machine.go --- juju-core-2.0~beta15/src/github.com/juju/gomaasapi/machine.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gomaasapi/machine.go 2016-10-13 14:32:07.000000000 +0000 @@ -4,7 +4,9 @@ package gomaasapi import ( + "fmt" "net/http" + "net/url" "github.com/juju/errors" "github.com/juju/schema" @@ -16,10 +18,11 @@ resourceURI string - systemID string - hostname string - fqdn string - tags []string + systemID string + hostname string + fqdn string + tags []string + ownerData map[string]string operatingSystem string distroSeries string @@ -57,6 +60,8 @@ m.statusName = other.statusName m.statusMessage = other.statusMessage m.zone = other.zone + m.tags = other.tags + m.ownerData = other.ownerData } // SystemID implements Machine. @@ -248,12 +253,15 @@ } // CreateMachineDeviceArgs is an argument structure for Machine.CreateDevice. -// All fields except Hostname are required. +// Only InterfaceName and MACAddress fields are required, the others are only +// used if set. If Subnet and VLAN are both set, Subnet.VLAN() must match the +// given VLAN. On failure, returns an error satisfying errors.IsNotValid(). type CreateMachineDeviceArgs struct { Hostname string InterfaceName string MACAddress string Subnet Subnet + VLAN VLAN } // Validate ensures that all required values are non-emtpy. @@ -261,12 +269,19 @@ if a.InterfaceName == "" { return errors.NotValidf("missing InterfaceName") } + if a.MACAddress == "" { return errors.NotValidf("missing MACAddress") } - if a.Subnet == nil { - return errors.NotValidf("missing Subnet") + + if a.Subnet != nil && a.VLAN != nil && a.Subnet.VLAN() != a.VLAN { + msg := fmt.Sprintf( + "given subnet %q on VLAN %d does not match given VLAN %d", + a.Subnet.CIDR(), a.Subnet.VLAN().ID(), a.VLAN.ID(), + ) + return errors.NewNotValid(nil, msg) } + return nil } @@ -284,46 +299,101 @@ return nil, errors.Trace(err) } - defer func() { + defer func(err *error) { // If there is an error return, at least try to delete the device we just created. - if err != nil { + if *err != nil { if innerErr := device.Delete(); innerErr != nil { logger.Warningf("could not delete device %q", device.SystemID()) } } - }() + }(&err) - // There should be one interface created for each MAC Address, and since - // we only specified one, there should just be one response. + // Update the VLAN to use for the interface, if given. + vlanToUse := args.VLAN + if vlanToUse == nil && args.Subnet != nil { + vlanToUse = args.Subnet.VLAN() + } + + // There should be one interface created for each MAC Address, and since we + // only specified one, there should just be one response. interfaces := device.InterfaceSet() if count := len(interfaces); count != 1 { err := errors.Errorf("unexpected interface count for device: %d", count) return nil, NewUnexpectedError(err) } iface := interfaces[0] + nameToUse := args.InterfaceName - // Now update the name and vlan of interface that was created… - updateArgs := UpdateInterfaceArgs{} - if iface.Name() != args.InterfaceName { - updateArgs.Name = args.InterfaceName + if err := m.updateDeviceInterface(iface, nameToUse, vlanToUse); err != nil { + return nil, errors.Trace(err) } - if iface.VLAN().ID() != args.Subnet.VLAN().ID() { - updateArgs.VLAN = args.Subnet.VLAN() + + if args.Subnet == nil { + // Nothing further to update. + return device, nil } - err = iface.Update(updateArgs) - if err != nil { + + if err := m.linkDeviceInterfaceToSubnet(iface, args.Subnet); err != nil { return nil, errors.Trace(err) } - err = iface.LinkSubnet(LinkSubnetArgs{ + return device, nil +} + +func (m *machine) updateDeviceInterface(iface Interface, nameToUse string, vlanToUse VLAN) error { + updateArgs := UpdateInterfaceArgs{} + updateArgs.Name = nameToUse + + if vlanToUse != nil { + updateArgs.VLAN = vlanToUse + } + + if err := iface.Update(updateArgs); err != nil { + return errors.Annotatef(err, "updating device interface %q failed", iface.Name()) + } + + return nil +} + +func (m *machine) linkDeviceInterfaceToSubnet(iface Interface, subnetToUse Subnet) error { + err := iface.LinkSubnet(LinkSubnetArgs{ Mode: LinkModeStatic, - Subnet: args.Subnet, + Subnet: subnetToUse, }) if err != nil { - return nil, errors.Trace(err) + return errors.Annotatef( + err, "linking device interface %q to subnet %q failed", + iface.Name(), subnetToUse.CIDR()) } - return device, nil + return nil +} + +// OwnerData implements OwnerDataHolder. +func (m *machine) OwnerData() map[string]string { + result := make(map[string]string) + for key, value := range m.ownerData { + result[key] = value + } + return result +} + +// SetOwnerData implements OwnerDataHolder. +func (m *machine) SetOwnerData(ownerData map[string]string) error { + params := make(url.Values) + for key, value := range ownerData { + params.Add(key, value) + } + result, err := m.controller.post(m.resourceURI, "set_owner_data", params) + if err != nil { + return errors.Trace(err) + } + machine, err := readMachine(m.controller.apiVersion, result) + if err != nil { + return errors.Trace(err) + } + m.updateFrom(machine) + return nil } func readMachine(controllerVersion version.Number, source interface{}) (*machine, error) { @@ -395,10 +465,11 @@ fields := schema.Fields{ "resource_uri": schema.String(), - "system_id": schema.String(), - "hostname": schema.String(), - "fqdn": schema.String(), - "tag_names": schema.List(schema.String()), + "system_id": schema.String(), + "hostname": schema.String(), + "fqdn": schema.String(), + "tag_names": schema.List(schema.String()), + "owner_data": schema.StringMap(schema.String()), "osystem": schema.String(), "distro_series": schema.String(), @@ -459,10 +530,11 @@ result := &machine{ resourceURI: valid["resource_uri"].(string), - systemID: valid["system_id"].(string), - hostname: valid["hostname"].(string), - fqdn: valid["fqdn"].(string), - tags: convertToStringSlice(valid["tag_names"]), + systemID: valid["system_id"].(string), + hostname: valid["hostname"].(string), + fqdn: valid["fqdn"].(string), + tags: convertToStringSlice(valid["tag_names"]), + ownerData: convertToStringMap(valid["owner_data"]), operatingSystem: valid["osystem"].(string), distroSeries: valid["distro_series"].(string), @@ -496,3 +568,17 @@ } return result } + +func convertToStringMap(field interface{}) map[string]string { + if field == nil { + return nil + } + // This function is only called after a schema Coerce, so it's + // safe. + fieldMap := field.(map[string]interface{}) + result := make(map[string]string) + for key, value := range fieldMap { + result[key] = value.(string) + } + return result +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/gomaasapi/machine_test.go juju-core-2.0.0/src/github.com/juju/gomaasapi/machine_test.go --- juju-core-2.0~beta15/src/github.com/juju/gomaasapi/machine_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gomaasapi/machine_test.go 2016-10-13 14:32:07.000000000 +0000 @@ -4,6 +4,7 @@ package gomaasapi import ( + "fmt" "net/http" "github.com/juju/errors" @@ -52,6 +53,10 @@ c.Check(machine.Hostname(), gc.Equals, "untasted-markita") c.Check(machine.FQDN(), gc.Equals, "untasted-markita.maas") c.Check(machine.Tags(), jc.DeepEquals, []string{"virtual", "magic"}) + c.Check(machine.OwnerData(), jc.DeepEquals, map[string]string{ + "fez": "phil fish", + "frog-fractions": "jim crawford", + }) c.Check(machine.IPAddresses(), jc.DeepEquals, []string{"192.168.100.4"}) c.Check(machine.Memory(), gc.Equals, 1024) @@ -124,6 +129,7 @@ c.Assert(err, jc.ErrorIsNil) c.Check(machines, gc.HasLen, 1) machine := machines[0].(*machine) + server.ResetRequests() return server, machine } @@ -230,20 +236,34 @@ args: CreateMachineDeviceArgs{ InterfaceName: "eth1", MACAddress: "something", + Subnet: &fakeSubnet{ + cidr: "1.2.3.4/5", + vlan: &fakeVLAN{id: 42}, + }, + VLAN: &fakeVLAN{id: 10}, }, - errText: `missing Subnet not valid`, + errText: `given subnet "1.2.3.4/5" on VLAN 42 does not match given VLAN 10`, }, { args: CreateMachineDeviceArgs{ + Hostname: "is-optional", InterfaceName: "eth1", MACAddress: "something", - Subnet: &fakeSubnet{}, + Subnet: nil, + VLAN: &fakeVLAN{}, }, }, { args: CreateMachineDeviceArgs{ - Hostname: "is-optional", InterfaceName: "eth1", MACAddress: "something", Subnet: &fakeSubnet{}, + VLAN: nil, + }, + }, { + args: CreateMachineDeviceArgs{ + InterfaceName: "eth1", + MACAddress: "something", + Subnet: nil, + VLAN: nil, }, }} { c.Logf("test %d", i) @@ -284,12 +304,66 @@ InterfaceName: "eth4", MACAddress: "fake-mac-address", Subnet: subnet, + VLAN: subnet.VLAN(), }) c.Assert(err, jc.ErrorIsNil) c.Assert(device.InterfaceSet()[0].Name(), gc.Equals, "eth4") c.Assert(device.InterfaceSet()[0].VLAN().ID(), gc.Equals, subnet.VLAN().ID()) } +func (s *machineSuite) TestCreateDeviceWithoutSubnetOrVLAN(c *gc.C) { + server, machine := s.getServerAndMachine(c) + // The createDeviceResponse returns a single interface with the name "eth0". + server.AddPostResponse("/api/2.0/devices/?op=", http.StatusOK, createDeviceResponse) + updateInterfaceResponse := updateJSONMap(c, interfaceResponse, map[string]interface{}{ + "name": "eth4", + "links": []interface{}{}, + "resource_uri": "/MAAS/api/2.0/nodes/4y3haf/interfaces/48/", + }) + server.AddPutResponse("/MAAS/api/2.0/nodes/4y3haf/interfaces/48/", http.StatusOK, updateInterfaceResponse) + device, err := machine.CreateDevice(CreateMachineDeviceArgs{ + InterfaceName: "eth4", + MACAddress: "fake-mac-address", + Subnet: nil, + VLAN: nil, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(device.InterfaceSet()[0].Name(), gc.Equals, "eth4") + // No specifc subnet or VLAN should be set. + c.Assert(device.InterfaceSet()[0].VLAN().ID(), gc.Equals, 1) // set in interfaceResponse + c.Assert(device.InterfaceSet()[0].Links(), gc.HasLen, 0) // set above +} + +func (s *machineSuite) TestCreateDeviceWithVLANOnly(c *gc.C) { + server, machine := s.getServerAndMachine(c) + // The createDeviceResponse returns a single interface with the name "eth0". + server.AddPostResponse("/api/2.0/devices/?op=", http.StatusOK, createDeviceResponse) + updateInterfaceResponse := updateJSONMap(c, interfaceResponse, map[string]interface{}{ + "name": "eth4", + "vlan": map[string]interface{}{ + "id": 42, + "resource_uri": "/MAAS/api/2.0/vlans/42/", + "vid": 1234, + "fabric": "live", + "dhcp_on": false, + "mtu": 9001, + }, + "links": []interface{}{}, + "resource_uri": "/MAAS/api/2.0/nodes/4y3haf/interfaces/48/", + }) + server.AddPutResponse("/MAAS/api/2.0/nodes/4y3haf/interfaces/48/", http.StatusOK, updateInterfaceResponse) + device, err := machine.CreateDevice(CreateMachineDeviceArgs{ + InterfaceName: "eth4", + MACAddress: "fake-mac-address", + Subnet: nil, + VLAN: &fakeVLAN{id: 42}, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(device.InterfaceSet()[0].Name(), gc.Equals, "eth4") + // VLAN should be set. + c.Assert(device.InterfaceSet()[0].VLAN().ID(), gc.Equals, 42) +} + func (s *machineSuite) TestCreateDeviceTriesToDeleteDeviceOnError(c *gc.C) { server, machine := s.getServerAndMachine(c) // The createDeviceResponse returns a single interface with the name "eth0". @@ -315,8 +389,35 @@ c.Assert(request.RequestURI, gc.Equals, "/MAAS/api/2.0/devices/4y3haf/") } +func (s *machineSuite) TestOwnerDataCopies(c *gc.C) { + machine := machine{ownerData: make(map[string]string)} + ownerData := machine.OwnerData() + ownerData["sad"] = "children" + c.Assert(machine.OwnerData(), gc.DeepEquals, map[string]string{}) +} + +func (s *machineSuite) TestSetOwnerData(c *gc.C) { + server, machine := s.getServerAndMachine(c) + server.AddPostResponse(machine.resourceURI+"?op=set_owner_data", 200, machineWithOwnerData(`{"returned": "data"}`)) + err := machine.SetOwnerData(map[string]string{ + "draco": "malfoy", + "empty": "", // Check that empty strings get passed along. + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(machine.OwnerData(), gc.DeepEquals, map[string]string{"returned": "data"}) + form := server.LastRequest().PostForm + // Looking at the map directly so we can tell the difference + // between no value and an explicit empty string. + c.Check(form["draco"], gc.DeepEquals, []string{"malfoy"}) + c.Check(form["empty"], gc.DeepEquals, []string{""}) +} + +func machineWithOwnerData(data string) string { + return fmt.Sprintf(machineOwnerDataTemplate, data) +} + const ( - machineResponse = ` + machineOwnerDataTemplate = ` { "netboot": false, "system_id": "4y3ha3", @@ -590,7 +691,7 @@ "power_type": "virsh", "distro_series": "trusty", "tag_names": [ - "virtual", "magic" + "virtual", "magic" ], "disable_ipv4": false, "status_message": "From 'Deploying' to 'Deployed'", @@ -694,9 +795,77 @@ "resource_record_count": 0, "ttl": null, "authoritative": true - } + }, + "owner_data": %s } ` + + createDeviceResponse = ` +{ + "zone": { + "description": "", + "resource_uri": "/MAAS/api/2.0/zones/default/", + "name": "default" + }, + "domain": { + "resource_record_count": 0, + "resource_uri": "/MAAS/api/2.0/domains/0/", + "authoritative": true, + "name": "maas", + "ttl": null, + "id": 0 + }, + "node_type_name": "Device", + "address_ttl": null, + "hostname": "furnacelike-brittney", + "node_type": 1, + "resource_uri": "/MAAS/api/2.0/devices/4y3haf/", + "ip_addresses": ["192.168.100.11"], + "owner": "thumper", + "tag_names": [], + "fqdn": "furnacelike-brittney.maas", + "system_id": "4y3haf", + "parent": "4y3ha3", + "interface_set": [ + { + "resource_uri": "/MAAS/api/2.0/nodes/4y3haf/interfaces/48/", + "type": "physical", + "mac_address": "78:f0:f1:16:a7:46", + "params": "", + "discovered": null, + "effective_mtu": 1500, + "id": 48, + "children": [], + "links": [], + "name": "eth0", + "vlan": { + "secondary_rack": null, + "dhcp_on": true, + "fabric": "fabric-0", + "mtu": 1500, + "primary_rack": "4y3h7n", + "resource_uri": "/MAAS/api/2.0/vlans/1/", + "external_dhcp": null, + "name": "untagged", + "id": 1, + "vid": 0 + }, + "tags": [], + "parents": [], + "enabled": true + } + ] +} +` +) + +var ( + machineResponse = machineWithOwnerData(`{ + "fez": "phil fish", + "frog-fractions": "jim crawford" + } +`) + machinesResponse = "[" + machineResponse + `, { "netboot": true, @@ -938,6 +1107,10 @@ "resource_record_count": 0, "ttl": null, "authoritative": true + }, + "owner_data": { + "braid": "jonathan blow", + "frog-fractions": "jim crawford" } }, { @@ -1180,66 +1353,12 @@ "resource_record_count": 0, "ttl": null, "authoritative": true + }, + "owner_data": { + "braid": "jonathan blow", + "fez": "phil fish" } } ] ` - - createDeviceResponse = ` -{ - "zone": { - "description": "", - "resource_uri": "/MAAS/api/2.0/zones/default/", - "name": "default" - }, - "domain": { - "resource_record_count": 0, - "resource_uri": "/MAAS/api/2.0/domains/0/", - "authoritative": true, - "name": "maas", - "ttl": null, - "id": 0 - }, - "node_type_name": "Device", - "address_ttl": null, - "hostname": "furnacelike-brittney", - "node_type": 1, - "resource_uri": "/MAAS/api/2.0/devices/4y3haf/", - "ip_addresses": ["192.168.100.11"], - "owner": "thumper", - "tag_names": [], - "fqdn": "furnacelike-brittney.maas", - "system_id": "4y3haf", - "parent": "4y3ha3", - "interface_set": [ - { - "resource_uri": "/MAAS/api/2.0/nodes/4y3haf/interfaces/48/", - "type": "physical", - "mac_address": "78:f0:f1:16:a7:46", - "params": "", - "discovered": null, - "effective_mtu": 1500, - "id": 48, - "children": [], - "links": [], - "name": "eth0", - "vlan": { - "secondary_rack": null, - "dhcp_on": true, - "fabric": "fabric-0", - "mtu": 1500, - "primary_rack": "4y3h7n", - "resource_uri": "/MAAS/api/2.0/vlans/1/", - "external_dhcp": null, - "name": "untagged", - "id": 1, - "vid": 0 - }, - "tags": [], - "parents": [], - "enabled": true - } - ] -} -` ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/gomaasapi/testing.go juju-core-2.0.0/src/github.com/juju/gomaasapi/testing.go --- juju-core-2.0~beta15/src/github.com/juju/gomaasapi/testing.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gomaasapi/testing.go 2016-10-13 14:32:07.000000000 +0000 @@ -145,10 +145,22 @@ return s.requests[pos] } +func (s *SimpleTestServer) LastNRequests(n int) []*http.Request { + start := len(s.requests) - n + if start < 0 { + start = 0 + } + return s.requests[start:] +} + func (s *SimpleTestServer) RequestCount() int { return len(s.requests) } +func (s *SimpleTestServer) ResetRequests() { + s.requests = nil +} + func (s *SimpleTestServer) handler(writer http.ResponseWriter, request *http.Request) { method := request.Method var ( diff -Nru juju-core-2.0~beta15/src/github.com/juju/gomaasapi/testservice.go juju-core-2.0.0/src/github.com/juju/gomaasapi/testservice.go --- juju-core-2.0~beta15/src/github.com/juju/gomaasapi/testservice.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gomaasapi/testservice.go 2016-10-13 14:32:07.000000000 +0000 @@ -108,11 +108,11 @@ } type TestDevice struct { - IPAddresses []string - SystemId string - MACAddress string - Parent string - Hostname string + IPAddresses []string + SystemId string + MACAddresses []string + Parent string + Hostname string // Not part of the device definition but used by the template. APIVersion string @@ -663,11 +663,8 @@ } } -func macMatches(device *TestDevice, macs []string, hasMac bool) bool { - if !hasMac { - return true - } - return contains(macs, device.MACAddress) +func macMatches(mac string, device *TestDevice) bool { + return contains(device.MACAddresses, mac) } // deviceListingHandler handles requests for '/devices/'. @@ -676,13 +673,25 @@ checkError(err) // TODO(mfoord): support filtering by hostname and id macs, hasMac := values["mac_address"] - var matchedDevices []string - for _, device := range server.devices { - if macMatches(device, macs, hasMac) { - matchedDevices = append(matchedDevices, renderDevice(device)) + var matchedDevices []*TestDevice + if !hasMac { + for _, device := range server.devices { + matchedDevices = append(matchedDevices, device) + } + } else { + for _, mac := range macs { + for _, device := range server.devices { + if macMatches(mac, device) { + matchedDevices = append(matchedDevices, device) + } + } } } - json := fmt.Sprintf("[%v]", strings.Join(matchedDevices, ", ")) + deviceChunks := make([]string, len(matchedDevices)) + for i := range matchedDevices { + deviceChunks[i] = renderDevice(matchedDevices[i]) + } + json := fmt.Sprintf("[%v]", strings.Join(deviceChunks, ", ")) w.WriteHeader(http.StatusOK) fmt.Fprint(w, json) @@ -696,16 +705,31 @@ } return strings.Join(pieces, ", ") }, + "last": func(items []string) []string { + if len(items) == 0 { + return []string{} + } + return items[len(items)-1:] + }, + "allButLast": func(items []string) []string { + if len(items) < 2 { + return []string{} + } + return items[0 : len(items)-1] + }, } const ( // The json template for generating new devices. // TODO(mfoord): set resource_uri in MAC addresses deviceTemplate = `{ - "macaddress_set": [ + "macaddress_set": [{{range .MACAddresses | allButLast}} { - "mac_address": "{{.MACAddress}}" - } + "mac_address": "{{.}}" + },{{end}}{{range .MACAddresses | last}} + { + "mac_address": "{{.}}" + }{{end}} ], "zone": { "resource_uri": "/MAAS/api/{{.APIVersion}}/zones/default/", @@ -741,6 +765,23 @@ return result[0], true } +func getValues(values url.Values, key string) ([]string, bool) { + result, hasResult := values[key] + if !hasResult { + return nil, false + } + var output []string + for _, val := range result { + if val != "" { + output = append(output, val) + } + } + if len(output) == 0 { + return nil, false + } + return output, true +} + // newDeviceHandler creates, stores and returns new devices. func newDeviceHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { err := r.ParseForm() @@ -753,23 +794,23 @@ systemId := fmt.Sprintf("node-%v", uuid) // At least one MAC address must be specified. // TODO(mfoord) we only support a single MAC in the test server. - mac, hasMac := getValue(values, "mac_addresses") + macs, hasMacs := getValues(values, "mac_addresses") // hostname and parent are optional. // TODO(mfoord): we require both to be set in the test server. hostname, hasHostname := getValue(values, "hostname") parent, hasParent := getValue(values, "parent") - if !hasHostname || !hasMac || !hasParent { + if !hasHostname || !hasMacs || !hasParent { w.WriteHeader(http.StatusBadRequest) return } device := &TestDevice{ - MACAddress: mac, - APIVersion: server.version, - Parent: parent, - Hostname: hostname, - SystemId: systemId, + MACAddresses: macs, + APIVersion: server.version, + Parent: parent, + Hostname: hostname, + SystemId: systemId, } deviceJSON := renderDevice(device) diff -Nru juju-core-2.0~beta15/src/github.com/juju/gomaasapi/testservice_test.go juju-core-2.0.0/src/github.com/juju/gomaasapi/testservice_test.go --- juju-core-2.0~beta15/src/github.com/juju/gomaasapi/testservice_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/gomaasapi/testservice_test.go 2016-10-13 14:32:07.000000000 +0000 @@ -66,10 +66,12 @@ c.Assert(string(content), Equals, capabilities) } -func (suite *TestServerSuite) createDevice(c *C, mac, hostname, parent string) string { +func (suite *TestServerSuite) createDevice(c *C, macs, hostname, parent string) string { devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + "?op=new" values := url.Values{} - values.Add("mac_addresses", mac) + for _, mac := range strings.Split(macs, ",") { + values.Add("mac_addresses", mac) + } values.Add("hostname", hostname) values.Add("parent", parent) result := suite.post(c, devicesURL, values) @@ -110,15 +112,19 @@ return result } -func checkDevice(c *C, device map[string]JSONObject, mac, hostname, parent string) { +func checkDevice(c *C, device map[string]JSONObject, macs, hostname, parent string) { + macSlice := strings.Split(macs, ",") macArray, err := device["macaddress_set"].GetArray() c.Assert(err, IsNil) - c.Assert(macArray, HasLen, 1) - macMap, err := macArray[0].GetMap() - c.Assert(err, IsNil) + c.Assert(macArray, HasLen, len(macSlice)) - actualMac := getString(c, macMap, "mac_address") - c.Assert(actualMac, Equals, mac) + for i := range macArray { + macMap, err := macArray[i].GetMap() + c.Assert(err, IsNil) + + actualMac := getString(c, macMap, "mac_address") + c.Check(actualMac, Equals, macSlice[i]) + } actualParent := getString(c, device, "parent") c.Assert(actualParent, Equals, parent) @@ -205,6 +211,18 @@ c.Assert(actualId, Equals, systemId) } +func (suite *TestServerSuite) TestGetDeviceWithMultipleMacs(c *C) { + systemId := suite.createDevice(c, "foo,boo", "bar", "baz") + deviceURL := fmt.Sprintf("/api/%v/devices/%v/", suite.server.version, systemId) + + result := suite.get(c, deviceURL) + resultMap, err := result.GetMap() + c.Assert(err, IsNil) + checkDevice(c, resultMap, "foo,boo", "bar", "baz") + actualId, err := resultMap["system_id"].GetString() + c.Assert(actualId, Equals, systemId) +} + func (suite *TestServerSuite) TestDevicesList(c *C) { firstId := suite.createDevice(c, "foo", "bar", "baz") c.Assert(firstId, Not(Equals), "") @@ -252,6 +270,27 @@ checkDevice(c, deviceMap, "foo", "bar", "baz") } +func (suite *TestServerSuite) TestDevicesListMacFilteringMultipleAddresses(c *C) { + firstId := suite.createDevice(c, "foo,boo", "bar", "baz") + c.Assert(firstId, Not(Equals), "") + secondId := suite.createDevice(c, "bam,boom", "bing", "bong") + c.Assert(secondId, Not(Equals), "") + + op := "?op=list&mac_address=foo&mac_address=boo" + devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + op + result := suite.get(c, devicesURL) + + devicesArray, err := result.GetArray() + c.Assert(err, IsNil) + c.Assert(devicesArray, HasLen, 2) + deviceMap, err := devicesArray[0].GetMap() + c.Assert(err, IsNil) + checkDevice(c, deviceMap, "foo,boo", "bar", "baz") + deviceMap, err = devicesArray[1].GetMap() + c.Assert(err, IsNil) + checkDevice(c, deviceMap, "foo,boo", "bar", "baz") +} + func (suite *TestServerSuite) TestDeviceClaimStickyIPRequiresAddress(c *C) { systemId := suite.createDevice(c, "foo", "bar", "baz") op := "?op=claim_sticky_ip_address" diff -Nru juju-core-2.0~beta15/src/github.com/juju/govmomi/contrib/vagrant/vcsa/.gitignore juju-core-2.0.0/src/github.com/juju/govmomi/contrib/vagrant/vcsa/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/govmomi/contrib/vagrant/vcsa/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/govmomi/contrib/vagrant/vcsa/.gitignore 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,3 @@ +*.box +*.ova +.vagrant diff -Nru juju-core-2.0~beta15/src/github.com/juju/govmomi/govc/.gitignore juju-core-2.0.0/src/github.com/juju/govmomi/govc/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/govmomi/govc/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/govmomi/govc/.gitignore 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1 @@ +/govc* diff -Nru juju-core-2.0~beta15/src/github.com/juju/govmomi/govc/test/.gitignore juju-core-2.0.0/src/github.com/juju/govmomi/govc/test/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/govmomi/govc/test/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/govmomi/govc/test/.gitignore 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1 @@ +.vagrant diff -Nru juju-core-2.0~beta15/src/github.com/juju/govmomi/govc/test/images/.gitignore juju-core-2.0.0/src/github.com/juju/govmomi/govc/test/images/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/govmomi/govc/test/images/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/govmomi/govc/test/images/.gitignore 2016-10-13 14:32:07.000000000 +0000 @@ -0,0 +1,3 @@ +ttylinux-* +floppybird.img + diff -Nru juju-core-2.0~beta15/src/github.com/juju/httprequest/checkisjson.go juju-core-2.0.0/src/github.com/juju/httprequest/checkisjson.go --- juju-core-2.0~beta15/src/github.com/juju/httprequest/checkisjson.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/httprequest/checkisjson.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,155 +0,0 @@ -package httprequest - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "unicode" - - "golang.org/x/net/html" - "golang.org/x/net/html/atom" - "gopkg.in/errgo.v1" -) - -// maxErrorBodySize holds the maximum amount of body that -// we try to read for an error before extracting text from it. -// It's reasonably large because: -// a) HTML often has large embedded scripts which we want -// to skip and -// b) it should be an relatively unusual case so the size -// shouldn't harm. -// -// It's defined as a variable so that it can be redefined in tests. -var maxErrorBodySize = 200 * 1024 - -// checkIsJSON checks that the content type of the given header implies -// that the content is JSON. If it is not, then reads from the body to -// try to make a useful error message. -func checkIsJSON(header http.Header, body io.Reader) error { - contentType := header.Get("Content-Type") - mediaType, _, err := mime.ParseMediaType(contentType) - if mediaType == "application/json" { - return nil - } - if err != nil { - // Even if there's no media type, we want to see something useful. - mediaType = fmt.Sprintf("%q", contentType) - } - // TODO use charset.NewReader to convert from non-utf8 content? - // Read the body ignoring any errors - we'll just make do with what we've got. - bodyData, _ := ioutil.ReadAll(io.LimitReader(noErrorReader{body}, int64(maxErrorBodySize))) - switch mediaType { - case "text/html": - text, err := htmlToText(bytes.NewReader(bodyData)) - if err != nil { - // Note: it seems that this can never actually - // happen - the only way that the HTML parser - // can fail is if there's a read error and we've - // removed that possibility by using - // noErrorReader above. - return errgo.Notef(err, "unexpected (and invalid) content text/html; want application/json; content: %q", sizeLimit(bodyData)) - } - if len(text) == 0 { - return errgo.Newf(`unexpected content type text/html; want application/json; content: %q`, sizeLimit(bodyData)) - } - return errgo.Newf(`unexpected content type text/html; want application/json; content: %s`, sizeLimit(text)) - case "text/plain": - return errgo.Newf(`unexpected content type text/plain; want application/json; content: %s`, sizeLimit(sanitizeText(string(bodyData), true))) - default: - return errgo.Newf(`unexpected content type %s; want application/json; content: %q`, mediaType, sizeLimit(bodyData)) - } -} - -// noErrorReader wraps a reader, turning any errors into io.EOF -// so that we can extract some content even if we get an io error. -type noErrorReader struct { - r io.Reader -} - -func (r noErrorReader) Read(buf []byte) (int, error) { - n, err := r.r.Read(buf) - if err != nil { - err = io.EOF - } - return n, err -} - -func sizeLimit(data []byte) []byte { - const max = 1024 - if len(data) < max { - return data - } - return append(data[0:max], fmt.Sprintf(" ... [%d bytes omitted]", len(data)-max)...) -} - -// htmlToText attempts to return some relevant textual content -// from the HTML content in the given reader, formatted -// as a single line. -func htmlToText(r io.Reader) ([]byte, error) { - n, err := html.Parse(r) - if err != nil { - return nil, err - } - var buf bytes.Buffer - htmlNodeToText(&buf, n) - return buf.Bytes(), nil -} - -func htmlNodeToText(w *bytes.Buffer, n *html.Node) { - for ; n != nil; n = n.NextSibling { - switch n.Type { - case html.TextNode: - data := sanitizeText(n.Data, false) - if len(data) == 0 { - break - } - if w.Len() > 0 { - w.WriteString("; ") - } - w.Write(data) - case html.ElementNode: - if n.DataAtom != atom.Script { - htmlNodeToText(w, n.FirstChild) - } - case html.DocumentNode: - htmlNodeToText(w, n.FirstChild) - } - } -} - -// sanitizeText tries to make the given string easier to read when presented -// as a single line. It squashes each run of white space into a single -// space, trims leading and trailing white space and trailing full -// stops. If newlineSemi is true, any newlines will be replaced with a -// semicolon. -func sanitizeText(s string, newlineSemi bool) []byte { - out := make([]byte, 0, len(s)) - prevWhite := false - for _, r := range s { - if newlineSemi && r == '\n' && len(out) > 0 { - out = append(out, ';') - prevWhite = true - continue - } - if unicode.IsSpace(r) { - if len(out) > 0 { - prevWhite = true - } - continue - } - if prevWhite { - out = append(out, ' ') - prevWhite = false - } - out = append(out, string(r)...) - } - // Remove final space, any full stops and any final semicolon - // we might have added. - out = bytes.TrimRightFunc(out, func(r rune) bool { - return r == '.' || r == ' ' || r == ';' - }) - return out -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/httprequest/checkisjson_test.go juju-core-2.0.0/src/github.com/juju/httprequest/checkisjson_test.go --- juju-core-2.0~beta15/src/github.com/juju/httprequest/checkisjson_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/httprequest/checkisjson_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,455 +0,0 @@ -package httprequest_test - -import ( - "net/http" - "strings" - - gc "gopkg.in/check.v1" - - "github.com/juju/httprequest" -) - -type checkIsJSONSuite struct{} - -var _ = gc.Suite(&checkIsJSONSuite{}) - -var checkIsJSONTests = []struct { - about string - contentType string - body string - expectError string -}{{ - about: "simple json", - contentType: "application/json", - body: "not json but unread", -}, { - about: "simple json with charset", - contentType: "application/json; charset=UTF-8", - body: "not json but unread", -}, { - about: "plain text", - contentType: "text/plain; charset=UTF-8", - body: " some\n text\t\n", - expectError: `unexpected content type text/plain; want application/json; content: some; text`, -}, { - about: "plain text with leading newline", - contentType: "text/plain; charset=UTF-8", - body: "\nsome text", - expectError: `unexpected content type text/plain; want application/json; content: some text`, -}, { - about: "unknown content type", - contentType: "something", - body: "some \nstuff", - expectError: `unexpected content type something; want application/json; content: "some \\nstuff"`, -}, { - about: "bad content type", - contentType: "/; charset=foo", - body: `some stuff`, - expectError: `unexpected content type "/; charset=foo"; want application/json; content: "some stuff"`, -}, { - about: "large text body", - contentType: "text/plain", - body: strings.Repeat("x", 1024+300), - expectError: `unexpected content type text/plain; want application/json; content: ` + strings.Repeat("x", 1024) + ` \.\.\. \[300 bytes omitted]`, -}, { - about: "html with no text", - contentType: "text/html", - body: "\n", - expectError: `unexpected content type text/html; want application/json; content: "\\n"`, -}, { - about: "non-utf8 text", - contentType: "text/plain; charset=iso8859-1", - body: "Pepp\xe9\n", - // It would be nice to make this better, but we don't - // really want to drag in all the charsets for this. - expectError: "unexpected content type text/plain; want application/json; content: Pepp\uFFFD", -}, { - about: "actual html error message from proxy", - contentType: "text/html; charset=UTF-8", - body: ` - -502 Proxy Error - -

Proxy Error

-

The proxy server received an invalid -response from an upstream server.
-The proxy server could not handle the request GET /identity/v1/wait.

-Reason: Error reading from remote server

-
-
Apache/2.4.7 (Ubuntu) Server at api.jujucharms.com Port 443
-`, - expectError: `unexpected content type text/html; want application/json; content: 502 Proxy Error; Proxy Error; The proxy server received an invalid response from an upstream server; The proxy server could not handle the request; GET /identity/v1/wait; Reason:; Error reading from remote server; Apache/2\.4\.7 \(Ubuntu\) Server at api.jujucharms.com Port 443`, -}, { - about: "actual html error message web page", - contentType: "text/html; charset=UTF-8", - body: ` - - - - - - -Page not found | Juju - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - -
- - - -
- - -
- -
-
- - - - - - -
-
-

404: Sorry, we couldn’t find the page.

- -

Try a different URL, try searching for solutions or learn how to create your own solution.

- - -
-
- - - -
- -
- - - - - - - - - - - - - - - - - - - - - - - -`, - expectError: `unexpected content type text/html; want application/json; content: Page not found | Juju; Jump to content; Store; Demo; About; Features; Community; Docs; Get started; ☰; Create; \+; 404: Sorry, we couldn’t find the page; Try a different URL, try searching for solutions or learn how to; create your own solution; Browse the store; All bundles; All charms; Submit a bug; Browse the store ›; Back to the top; Demo; About; Features; Docs; Get Started; Juju on Google+; Ubuntu Cloud on Twitter; Ubuntu Cloud on Facebook; © 2015 Canonical Ltd. Ubuntu and Canonical are registered trademarks of Canonical Ltd; Legal information; Report a bug on this site; Got to the top of the page`, -}} - -func (checkIsJSONSuite) TestCheckIsJSON(c *gc.C) { - *httprequest.MaxErrorBodySize = 16 * 1024 - for i, test := range checkIsJSONTests { - c.Logf("test %d: %s", i, test.about) - r := strings.NewReader(test.body) - err := httprequest.CheckIsJSON(http.Header{ - "Content-Type": {test.contentType}, - }, r) - if test.expectError == "" { - c.Assert(err, gc.IsNil) - c.Assert(r.Len(), gc.Equals, len(test.body)) - continue - } - c.Assert(err, gc.ErrorMatches, test.expectError) - if len(test.body) > *httprequest.MaxErrorBodySize { - c.Assert(r.Len(), gc.Equals, *httprequest.MaxErrorBodySize-len(test.body)) - } - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/httprequest/client.go juju-core-2.0.0/src/github.com/juju/httprequest/client.go --- juju-core-2.0~beta15/src/github.com/juju/httprequest/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/httprequest/client.go 2016-10-13 14:32:12.000000000 +0000 @@ -81,6 +81,13 @@ // // Any error that c.UnmarshalError or c.Doer returns will not // have its cause masked. +// +// If the request returns a response with a status code signifying +// success, but the response could not be unmarshaled, a +// *DecodeResponseError will be returned holding the response. Note that if +// the request returns an error status code, the Client.UnmarshalError +// function is responsible for doing this if desired (the default error +// unmarshal functions do). func (c *Client) Call(params, resp interface{}) error { return c.CallURL(c.BaseURL, params, resp) } @@ -143,6 +150,10 @@ // // If req.URL does not have a host part it will be treated as relative to // c.BaseURL. req.URL will be updated to the actual URL used. +// +// If the response cannot by unmarshaled, a *DecodeResponseError +// will be returned holding the response from the request. +// the entire response body. func (c *Client) Do(req *http.Request, body io.ReadSeeker, resp interface{}) error { if req.URL.Host == "" { var err error @@ -207,7 +218,7 @@ } } -// unmarshalResponse unmarshals +// unmarshalResponse unmarshals an HTTP response into the given value. func (c *Client) unmarshalResponse(httpResp *http.Response, resp interface{}) error { if 200 <= httpResp.StatusCode && httpResp.StatusCode < 300 { if respPt, ok := resp.(**http.Response); ok { @@ -216,7 +227,7 @@ } defer httpResp.Body.Close() if err := UnmarshalJSONResponse(httpResp, resp); err != nil { - return errgo.Notef(err, "%s %s", httpResp.Request.Method, httpResp.Request.URL) + return errgo.NoteMask(err, fmt.Sprintf("%s %s", httpResp.Request.Method, httpResp.Request.URL), isDecodeResponseError) } return nil } @@ -227,15 +238,30 @@ } err := errUnmarshaler(httpResp) if err == nil { - err = errgo.Newf("unexpected HTTP response status: %s", httpResp.Status) + err = errgo.Newf( + "%s %s: unexpected HTTP response status: %s", + httpResp.Request.Method, + httpResp.Request.URL.String(), + httpResp.Status, + ) + } + if isDecodeResponseError(err) || isDecodeResponseError(errgo.Cause(err)) { + return errgo.NoteMask( + err, + httpResp.Request.Method+" "+httpResp.Request.URL.String(), + errgo.Any, + ) } - return errgo.NoteMask(err, httpResp.Request.Method+" "+httpResp.Request.URL.String(), errgo.Any) + return err } // ErrorUnmarshaler returns a function which will unmarshal error // responses into new values of the same type as template. The argument // must be a pointer. A new instance of it is created every time the // returned function is called. +// +// If the error cannot by unmarshaled, the function will return an +// *HTTPResponseError holding the response from the request. func ErrorUnmarshaler(template error) func(*http.Response) error { t := reflect.TypeOf(template) if t.Kind() != reflect.Ptr { @@ -246,11 +272,11 @@ if 300 <= resp.StatusCode && resp.StatusCode < 400 { // It's a redirection error. loc, _ := resp.Location() - return fmt.Errorf("unexpected redirect (status %s) from %q to %q", resp.Status, resp.Request.URL, loc) + return newDecodeResponseError(resp, nil, fmt.Errorf("unexpected redirect (status %s) from %q to %q", resp.Status, resp.Request.URL, loc)) } errv := reflect.New(t) if err := UnmarshalJSONResponse(resp, errv.Interface()); err != nil { - return fmt.Errorf("cannot unmarshal error response (status %s): %v", resp.Status, err) + return errgo.NoteMask(err, fmt.Sprintf("cannot unmarshal error response (status %s)", resp.Status), isDecodeResponseError) } return errv.Interface().(error) } @@ -259,23 +285,46 @@ // UnmarshalJSONResponse unmarshals the given HTTP response // into x, which should be a pointer to the result to be // unmarshaled into. +// +// If the response cannot be unmarshaled, an error of type +// *DecodeResponseError will be returned. func UnmarshalJSONResponse(resp *http.Response, x interface{}) error { - // Try to read all the body so that we can reuse the - // connection, but don't try *too* hard. - defer io.Copy(ioutil.Discard, io.LimitReader(resp.Body, 8*1024)) if x == nil { return nil } - if err := checkIsJSON(resp.Header, resp.Body); err != nil { - return errgo.Mask(err) + if !isJSONMediaType(resp.Header) { + fancyErr := newFancyDecodeError(resp.Header, resp.Body) + return newDecodeResponseError(resp, fancyErr.body, fancyErr) + } + // Read enough data that we can produce a plausible-looking + // possibly-truncated response body in the error. + var buf bytes.Buffer + n, err := io.Copy(&buf, io.LimitReader(resp.Body, int64(maxErrorBodySize))) + + bodyData := buf.Bytes() + if err != nil { + return newDecodeResponseError(resp, bodyData, errgo.Notef(err, "error reading response body")) } - // Decode only a single JSON value, and then - // discard the rest of the body so that we can - // reuse the connection even if some foolish server - // has put garbage on the end. - dec := json.NewDecoder(resp.Body) + if n < int64(maxErrorBodySize) { + // We've read all the data; unmarshal it. + if err := json.Unmarshal(bodyData, x); err != nil { + return newDecodeResponseError(resp, bodyData, err) + } + return nil + } + // The response is longer than maxErrorBodySize; stitch the read + // bytes together with the body so that we can still read + // bodies larger than maxErrorBodySize. + dec := json.NewDecoder(io.MultiReader(&buf, resp.Body)) + + // Try to read all the body so that we can reuse the + // connection, but don't try *too* hard. Note that the + // usual number of additional bytes is 1 (a single newline + // after the JSON). + defer io.Copy(ioutil.Discard, io.LimitReader(resp.Body, 8*1024)) + if err := dec.Decode(x); err != nil { - return errgo.Mask(err) + return newDecodeResponseError(resp, bodyData, err) } return nil } @@ -299,7 +348,7 @@ if e.Message == "" { return "httprequest: no error message found" } - return "httprequest: " + e.Message + return e.Message } // appendURL returns the result of combining the diff -Nru juju-core-2.0~beta15/src/github.com/juju/httprequest/client_test.go juju-core-2.0.0/src/github.com/juju/httprequest/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/httprequest/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/httprequest/client_test.go 2016-10-13 14:32:12.000000000 +0000 @@ -9,6 +9,7 @@ "reflect" "strings" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/julienschmidt/httprouter" gc "gopkg.in/check.v1" @@ -17,7 +18,9 @@ "github.com/juju/httprequest" ) -type clientSuite struct{} +type clientSuite struct { + testing.CleanupSuite +} var _ = gc.Suite(&clientSuite{}) @@ -26,7 +29,7 @@ client httprequest.Client req interface{} expectError string - expectCause interface{} + assertError func(c *gc.C, err error) expectResp interface{} }{{ about: "GET success", @@ -56,10 +59,12 @@ P: "hello", Body: struct{ I bool }{true}, }, - expectError: `POST http://.*/m2/hello: httprequest: cannot unmarshal parameters: cannot unmarshal into field: cannot unmarshal request body: json: cannot unmarshal bool into Go value of type int`, - expectCause: &httprequest.RemoteError{ - Message: `cannot unmarshal parameters: cannot unmarshal into field: cannot unmarshal request body: json: cannot unmarshal bool into Go value of type int`, - Code: "bad request", + expectError: `cannot unmarshal parameters: cannot unmarshal into field: cannot unmarshal request body: json: cannot unmarshal bool into Go value of type int`, + assertError: func(c *gc.C, err error) { + c.Assert(errgo.Cause(err), jc.DeepEquals, &httprequest.RemoteError{ + Message: `cannot unmarshal parameters: cannot unmarshal into field: cannot unmarshal request body: json: cannot unmarshal bool into Go value of type int`, + Code: "bad request", + }) }, }, { about: "error unmarshaler returns nil", @@ -109,6 +114,30 @@ req: &chM4Req{}, expectResp: new(int), expectError: `GET http://.*/m4: unexpected content type text/plain; want application/json; content: bad response`, + assertError: func(c *gc.C, err error) { + c.Assert(errgo.Cause(err), gc.FitsTypeOf, (*httprequest.DecodeResponseError)(nil)) + + err1 := errgo.Cause(err).(*httprequest.DecodeResponseError) + c.Assert(err1.Response, gc.NotNil) + data, err := ioutil.ReadAll(err1.Response.Body) + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, "bad response") + }, +}, { + about: "bad content in error response", + req: &chM5Req{}, + expectResp: new(int), + expectError: `GET http://.*/m5: cannot unmarshal error response \(status 418 I'm a teapot\): unexpected content type text/plain; want application/json; content: bad error value`, + assertError: func(c *gc.C, err error) { + c.Assert(errgo.Cause(err), gc.FitsTypeOf, (*httprequest.DecodeResponseError)(nil)) + + err1 := errgo.Cause(err).(*httprequest.DecodeResponseError) + c.Assert(err1.Response, gc.NotNil) + data, err := ioutil.ReadAll(err1.Response.Body) + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, "bad error value") + c.Assert(err1.Response.StatusCode, gc.Equals, http.StatusTeapot) + }, }} func (s *clientSuite) TestCall(c *gc.C) { @@ -125,9 +154,9 @@ client.BaseURL = srv.URL err := client.Call(test.req, resp) if test.expectError != "" { - c.Assert(err, gc.ErrorMatches, test.expectError) - if test.expectCause != nil { - c.Assert(errgo.Cause(err), jc.DeepEquals, test.expectCause) + c.Check(err, gc.ErrorMatches, test.expectError) + if test.assertError != nil { + test.assertError(c, err) } continue } @@ -308,7 +337,7 @@ var resp *http.Response err := client.Get("/m3", &resp) c.Assert(resp, gc.IsNil) - c.Assert(err, gc.ErrorMatches, `GET http://.*/m3: httprequest: m3 error`) + c.Assert(err, gc.ErrorMatches, `m3 error`) c.Assert(doer.openedBodies, gc.Equals, 1) c.Assert(doer.closedBodies, gc.Equals, 1) } @@ -396,7 +425,7 @@ } func (s *clientSuite) TestDoDoesNotReadRequestBodyAfterReturning(c *gc.C) { - body := &largeReader{total: 300 * 1024} + body := &largeReader{byte: 'a', total: 300 * 1024} // Closing the body will cause a panic under the race // detector if the Do method reads after returning. defer body.Close() @@ -436,6 +465,113 @@ c.Assert(resp, jc.DeepEquals, chM1Resp{"foo"}) } +func (s *clientSuite) TestUnmarshalJSONResponseWithBodyReadError(c *gc.C) { + resp := &http.Response{ + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(io.MultiReader( + strings.NewReader(`{"one": "two"}`), + errorReader("some bad read"), + )), + } + var val map[string]string + err := httprequest.UnmarshalJSONResponse(resp, &val) + c.Assert(err, gc.ErrorMatches, `error reading response body: some bad read`) + c.Assert(val, gc.IsNil) + assertDecodeResponseError(c, err, http.StatusOK, `{"one": "two"}`) +} + +func (s *clientSuite) TestUnmarshalJSONResponseWithBadContentType(c *gc.C) { + resp := &http.Response{ + Header: http.Header{ + "Content-Type": {"foo/bar"}, + }, + StatusCode: http.StatusTeapot, + Body: ioutil.NopCloser(strings.NewReader(`something or other`)), + } + var val map[string]string + err := httprequest.UnmarshalJSONResponse(resp, &val) + c.Assert(err, gc.ErrorMatches, `unexpected content type foo/bar; want application/json; content: "something or other"`) + c.Assert(val, gc.IsNil) + assertDecodeResponseError(c, err, http.StatusTeapot, `something or other`) +} + +func (s *clientSuite) TestUnmarshalJSONResponseWithErrorAndLargeBody(c *gc.C) { + s.PatchValue(httprequest.MaxErrorBodySize, 11) + + resp := &http.Response{ + Header: http.Header{ + "Content-Type": {"foo/bar"}, + }, + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(strings.NewReader(`123456789 123456789`)), + } + var val map[string]string + err := httprequest.UnmarshalJSONResponse(resp, &val) + c.Assert(err, gc.ErrorMatches, `unexpected content type foo/bar; want application/json; content: "123456789 1"`) + c.Assert(val, gc.IsNil) + assertDecodeResponseError(c, err, http.StatusOK, `123456789 1`) +} + +func (s *clientSuite) TestUnmarshalJSONResponseWithLargeBody(c *gc.C) { + s.PatchValue(httprequest.MaxErrorBodySize, 11) + + resp := &http.Response{ + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(strings.NewReader(`"23456789 123456789"`)), + } + var val string + err := httprequest.UnmarshalJSONResponse(resp, &val) + c.Assert(err, gc.IsNil) + c.Assert(val, gc.Equals, "23456789 123456789") +} + +func (s *clientSuite) TestUnmarshalJSONWithDecodeError(c *gc.C) { + resp := &http.Response{ + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(strings.NewReader(`{"one": "two"}`)), + } + var val chan string + err := httprequest.UnmarshalJSONResponse(resp, &val) + c.Assert(err, gc.ErrorMatches, `json: cannot unmarshal object into Go value of type chan string`) + c.Assert(val, gc.IsNil) + assertDecodeResponseError(c, err, http.StatusOK, `{"one": "two"}`) +} + +func (s *clientSuite) TestUnmarshalJSONWithDecodeErrorAndLargeBody(c *gc.C) { + s.PatchValue(httprequest.MaxErrorBodySize, 11) + + resp := &http.Response{ + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(strings.NewReader(`"23456789 123456789"`)), + } + var val chan string + err := httprequest.UnmarshalJSONResponse(resp, &val) + c.Assert(err, gc.ErrorMatches, `json: cannot unmarshal string into Go value of type chan string`) + c.Assert(val, gc.IsNil) + assertDecodeResponseError(c, err, http.StatusOK, `"23456789 1`) +} + +func assertDecodeResponseError(c *gc.C, err error, status int, body string) { + c.Assert(errgo.Cause(err), gc.FitsTypeOf, (*httprequest.DecodeResponseError)(nil)) + err1 := errgo.Cause(err).(*httprequest.DecodeResponseError) + data, err := ioutil.ReadAll(err1.Response.Body) + c.Assert(err, gc.IsNil) + c.Assert(err1.Response.StatusCode, gc.Equals, status) + c.Assert(string(data), gc.Equals, body) +} + func (*clientSuite) newServer() *httptest.Server { f := func(p httprequest.Params) (clientHandlers, error) { return clientHandlers{}, nil @@ -574,6 +710,15 @@ p.Response.Write([]byte("bad response")) } +type chM5Req struct { + httprequest.Route `httprequest:"GET /m5"` +} + +func (clientHandlers) M5(p httprequest.Params, _ *chM5Req) { + p.Response.WriteHeader(http.StatusTeapot) + p.Response.Write([]byte("bad error value")) +} + type chContentLengthReq struct { httprequest.Route `httprequest:"PUT /content-length"` } @@ -640,6 +785,7 @@ // largeReader implements a reader that produces up to total bytes // in 1 byte reads. type largeReader struct { + byte byte total int n int } @@ -649,7 +795,7 @@ return 0, io.EOF } r.n++ - return copy(buf, []byte("a")), nil + return copy(buf, []byte{r.byte}), nil } func (r *largeReader) Seek(offset int64, whence int) (int64, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/httprequest/dependencies.tsv juju-core-2.0.0/src/github.com/juju/httprequest/dependencies.tsv --- juju-core-2.0~beta15/src/github.com/juju/httprequest/dependencies.tsv 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/httprequest/dependencies.tsv 2016-10-13 14:32:12.000000000 +0000 @@ -1,8 +1,12 @@ +github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z github.com/juju/testing git 162fafccebf20a4207ab93d63b986c230e3f4d2e 2016-04-04T09:43:17Z +github.com/juju/utils git 6219812829a3542c827c76cc75f416d4e6c94335 2016-07-08T10:00:56Z github.com/julienschmidt/httprouter git 77a895ad01ebc98a4dc95d8355bc825ce80a56f6 2015-10-13T22:55:20Z +golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z golang.org/x/tools git 1f1b3322f67af76803c942fd237291538ec68262 2016-04-27T05:26:01Z gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z +gopkg.in/mgo.v2 git 29cc868a5ca65f401ff318143f9408d02f4799cc 2016-06-09T18:00:28Z gopkg.in/yaml.v2 git a83829b6f1293c91addabc89d0571c246397bbf4 2016-03-01T20:40:22Z diff -Nru juju-core-2.0~beta15/src/github.com/juju/httprequest/export_test.go juju-core-2.0.0/src/github.com/juju/httprequest/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/httprequest/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/httprequest/export_test.go 2016-10-13 14:32:12.000000000 +0000 @@ -1,5 +1,4 @@ package httprequest var AppendURL = appendURL -var CheckIsJSON = checkIsJSON var MaxErrorBodySize = &maxErrorBodySize diff -Nru juju-core-2.0~beta15/src/github.com/juju/httprequest/fancyerror.go juju-core-2.0.0/src/github.com/juju/httprequest/fancyerror.go --- juju-core-2.0~beta15/src/github.com/juju/httprequest/fancyerror.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/httprequest/fancyerror.go 2016-10-13 14:32:12.000000000 +0000 @@ -0,0 +1,260 @@ +package httprequest + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "unicode" + + "golang.org/x/net/html" + "golang.org/x/net/html/atom" + "gopkg.in/errgo.v1" +) + +func isDecodeResponseError(err error) bool { + _, ok := err.(*DecodeResponseError) + return ok +} + +// DecodeResponseError represents an error when an HTTP +// response could not be decoded. +type DecodeResponseError struct { + // Response holds the problematic HTTP response. + // The body of this does not need to be closed + // and may be truncated if the response is large. + Response *http.Response + + // DecodeError holds the error that was encountered + // when decoding. + DecodeError error +} + +func (e *DecodeResponseError) Error() string { + return e.DecodeError.Error() +} + +// newDecodeResponseError returns a new DecodeResponseError that +// uses the given error for its message. The Response field +// holds a copy of req. If bodyData is non-nil, it +// will be used as the data in the Response.Body field; +// otherwise body data will be read from req.Body. +func newDecodeResponseError(resp *http.Response, bodyData []byte, err error) *DecodeResponseError { + if bodyData == nil { + bodyData = readBodyForError(resp.Body) + } + resp1 := *resp + resp1.Body = ioutil.NopCloser(bytes.NewReader(bodyData)) + + return &DecodeResponseError{ + Response: &resp1, + DecodeError: errgo.Mask(err, errgo.Any), + } +} + +// newDecodeRequestError returns a new DecodeRequestError that +// uses the given error for its message. The Request field +// holds a copy of req. If bodyData is non-nil, it +// will be used as the data in the Request.Body field; +// otherwise body data will be read from req.Body. +func newDecodeRequestError(req *http.Request, bodyData []byte, err error) *DecodeRequestError { + if bodyData == nil { + bodyData = readBodyForError(req.Body) + } + req1 := *req + req1.Body = ioutil.NopCloser(bytes.NewReader(bodyData)) + + return &DecodeRequestError{ + Request: &req1, + DecodeError: errgo.Mask(err, errgo.Any), + } +} + +// DecodeRequestError represents an error when an HTTP +// request could not be decoded. +type DecodeRequestError struct { + // Request holds the problematic HTTP request. + // The body of this does not need to be closed + // and may be truncated if the response is large. + Request *http.Request + + // DecodeError holds the error that was encountered + // when decoding. + DecodeError error +} + +func (e *DecodeRequestError) Error() string { + return e.DecodeError.Error() +} + +// fancyDecodeError is an error type that tries to +// produce a nice error message when the content +// type of a request or response is wrong. +type fancyDecodeError struct { + // contentType holds the contentType of the request or response. + contentType string + + // body holds up to maxErrorBodySize saved bytes of the + // request or response body. + body []byte +} + +func newFancyDecodeError(h http.Header, body io.Reader) *fancyDecodeError { + return &fancyDecodeError{ + contentType: h.Get("Content-Type"), + body: readBodyForError(body), + } +} + +func readBodyForError(r io.Reader) []byte { + data, _ := ioutil.ReadAll(io.LimitReader(noErrorReader{r}, int64(maxErrorBodySize))) + return data +} + +// maxErrorBodySize holds the maximum amount of body that +// we try to read for an error before extracting text from it. +// It's reasonably large because: +// a) HTML often has large embedded scripts which we want +// to skip and +// b) it should be an relatively unusual case so the size +// shouldn't harm. +// +// It's defined as a variable so that it can be redefined in tests. +var maxErrorBodySize = 200 * 1024 + +// isJSONMediaType reports whether the content type of the given header implies +// that the content is JSON. +func isJSONMediaType(header http.Header) bool { + contentType := header.Get("Content-Type") + mediaType, _, _ := mime.ParseMediaType(contentType) + return mediaType == "application/json" +} + +// Error implements error.Error by trying to produce a decent +// error message derived from the body content. +func (e *fancyDecodeError) Error() string { + mediaType, _, err := mime.ParseMediaType(e.contentType) + if err != nil { + // Even if there's no media type, we want to see something useful. + mediaType = fmt.Sprintf("%q", e.contentType) + } + + // TODO use charset.NewReader to convert from non-utf8 content? + switch mediaType { + case "text/html": + text, err := htmlToText(bytes.NewReader(e.body)) + if err != nil { + // Note: it seems that this can never actually + // happen - the only way that the HTML parser + // can fail is if there's a read error and we've + // removed that possibility by using + // noErrorReader above. + return fmt.Sprintf("unexpected (and invalid) content text/html; want application/json; content: %q", sizeLimit(e.body)) + } + if len(text) == 0 { + return fmt.Sprintf(`unexpected content type text/html; want application/json; content: %q`, sizeLimit(e.body)) + } + return fmt.Sprintf(`unexpected content type text/html; want application/json; content: %s`, sizeLimit(text)) + case "text/plain": + return fmt.Sprintf(`unexpected content type text/plain; want application/json; content: %s`, sizeLimit(sanitizeText(string(e.body), true))) + default: + return fmt.Sprintf(`unexpected content type %s; want application/json; content: %q`, mediaType, sizeLimit(e.body)) + } +} + +// noErrorReader wraps a reader, turning any errors into io.EOF +// so that we can extract some content even if we get an io error. +type noErrorReader struct { + r io.Reader +} + +func (r noErrorReader) Read(buf []byte) (int, error) { + n, err := r.r.Read(buf) + if err != nil { + err = io.EOF + } + return n, err +} + +func sizeLimit(data []byte) []byte { + const max = 1024 + if len(data) < max { + return data + } + return append(data[0:max], fmt.Sprintf(" ... [%d bytes omitted]", len(data)-max)...) +} + +// htmlToText attempts to return some relevant textual content +// from the HTML content in the given reader, formatted +// as a single line. +func htmlToText(r io.Reader) ([]byte, error) { + n, err := html.Parse(r) + if err != nil { + return nil, err + } + var buf bytes.Buffer + htmlNodeToText(&buf, n) + return buf.Bytes(), nil +} + +// htmlNodeToText tries to extract some text from an arbitrary HTML +// page. It doesn't try to avoid looking in the header, because the +// title is in the header and is often the most succinct description of +// the page. +func htmlNodeToText(w *bytes.Buffer, n *html.Node) { + for ; n != nil; n = n.NextSibling { + switch n.Type { + case html.TextNode: + data := sanitizeText(n.Data, false) + if len(data) == 0 { + break + } + if w.Len() > 0 { + w.WriteString("; ") + } + w.Write(data) + case html.ElementNode: + if n.DataAtom != atom.Script { + htmlNodeToText(w, n.FirstChild) + } + case html.DocumentNode: + htmlNodeToText(w, n.FirstChild) + } + } +} + +// sanitizeText tries to make the given string easier to read when presented +// as a single line. It squashes each run of white space into a single +// space, trims leading and trailing white space and trailing full +// stops. If newlineSemi is true, any newlines will be replaced with a +// semicolon. +func sanitizeText(s string, newlineSemi bool) []byte { + out := make([]byte, 0, len(s)) + prevWhite := false + for _, r := range s { + if newlineSemi && r == '\n' && len(out) > 0 { + out = append(out, ';') + prevWhite = true + continue + } + if unicode.IsSpace(r) { + if len(out) > 0 { + prevWhite = true + } + continue + } + if prevWhite { + out = append(out, ' ') + prevWhite = false + } + out = append(out, string(r)...) + } + // Remove final space, any full stops and any final semicolon + // we might have added. + out = bytes.TrimRightFunc(out, func(r rune) bool { + return r == '.' || r == ' ' || r == ';' + }) + return out +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/httprequest/fancyerror_test.go juju-core-2.0.0/src/github.com/juju/httprequest/fancyerror_test.go --- juju-core-2.0~beta15/src/github.com/juju/httprequest/fancyerror_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/httprequest/fancyerror_test.go 2016-10-13 14:32:12.000000000 +0000 @@ -0,0 +1,435 @@ +package httprequest + +import ( + "strings" + + gc "gopkg.in/check.v1" +) + +type checkIsJSONSuite struct{} + +var _ = gc.Suite(&checkIsJSONSuite{}) + +var fancyDecodeErrorTests = []struct { + about string + contentType string + body string + expectError string +}{{ + about: "plain text", + contentType: "text/plain; charset=UTF-8", + body: " some\n text\t\n", + expectError: `unexpected content type text/plain; want application/json; content: some; text`, +}, { + about: "plain text with leading newline", + contentType: "text/plain; charset=UTF-8", + body: "\nsome text", + expectError: `unexpected content type text/plain; want application/json; content: some text`, +}, { + about: "unknown content type", + contentType: "something", + body: "some \nstuff", + expectError: `unexpected content type something; want application/json; content: "some \\nstuff"`, +}, { + about: "bad content type", + contentType: "/; charset=foo", + body: `some stuff`, + expectError: `unexpected content type "/; charset=foo"; want application/json; content: "some stuff"`, +}, { + about: "large text body", + contentType: "text/plain", + body: strings.Repeat("x", 1024+300), + expectError: `unexpected content type text/plain; want application/json; content: ` + strings.Repeat("x", 1024) + ` \.\.\. \[300 bytes omitted]`, +}, { + about: "html with no text", + contentType: "text/html", + body: "\n", + expectError: `unexpected content type text/html; want application/json; content: "\\n"`, +}, { + about: "non-utf8 text", + contentType: "text/plain; charset=iso8859-1", + body: "Pepp\xe9\n", + // It would be nice to make this better, but we don't + // really want to drag in all the charsets for this. + expectError: "unexpected content type text/plain; want application/json; content: Pepp\uFFFD", +}, { + about: "actual html error message from proxy", + contentType: "text/html; charset=UTF-8", + body: ` + +502 Proxy Error + +

Proxy Error

+

The proxy server received an invalid +response from an upstream server.
+The proxy server could not handle the request GET /identity/v1/wait.

+Reason: Error reading from remote server

+
+
Apache/2.4.7 (Ubuntu) Server at api.jujucharms.com Port 443
+`, + expectError: `unexpected content type text/html; want application/json; content: 502 Proxy Error; Proxy Error; The proxy server received an invalid response from an upstream server; The proxy server could not handle the request; GET /identity/v1/wait; Reason:; Error reading from remote server; Apache/2\.4\.7 \(Ubuntu\) Server at api.jujucharms.com Port 443`, +}, { + about: "actual html error message web page", + contentType: "text/html; charset=UTF-8", + body: ` + + + + + + +Page not found | Juju + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + +
+ + +
+ +
+
+ + + + + + +
+
+

404: Sorry, we couldn’t find the page.

+ +

Try a different URL, try searching for solutions or learn how to create your own solution.

+ + +
+
+ + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +`, + expectError: `unexpected content type text/html; want application/json; content: Page not found | Juju; Jump to content; Store; Demo; About; Features; Community; Docs; Get started; ☰; Create; \+; 404: Sorry, we couldn’t find the page; Try a different URL, try searching for solutions or learn how to; create your own solution; Browse the store; All bundles; All charms; Submit a bug; Browse the store ›; Back to the top; Demo; About; Features; Docs; Get Started; Juju on Google+; Ubuntu Cloud on Twitter; Ubuntu Cloud on Facebook; © 2015 Canonical Ltd. Ubuntu and Canonical are registered trademarks of Canonical Ltd; Legal information; Report a bug on this site; Got to the top of the page`, +}} + +func (checkIsJSONSuite) TestFancyDecodeError(c *gc.C) { + for i, test := range fancyDecodeErrorTests { + c.Logf("test %d: %s", i, test.about) + err := &fancyDecodeError{ + contentType: test.contentType, + body: []byte(test.body), + } + c.Assert(err, gc.ErrorMatches, test.expectError) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/httprequest/handler.go juju-core-2.0.0/src/github.com/juju/httprequest/handler.go --- juju-core-2.0~beta15/src/github.com/juju/httprequest/handler.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/httprequest/handler.go 2016-10-13 14:32:12.000000000 +0000 @@ -5,6 +5,7 @@ import ( "encoding/json" + "fmt" "io" "net/http" "reflect" @@ -402,16 +403,17 @@ Request: req, PathVar: p, }); err != nil { - // We write the error only if the header hasn't - // already been written, because if it has, then - // we will not be able to set the appropriate error - // response code, and there's a danger that we - // may be corrupting output by appending - // a JSON error message to it. - if !w1.headerWritten { - e.WriteError(w, err) + if w1.headerWritten { + // The header has already been written, + // so we can't set the appropriate error + // response code and there's a danger + // that we may be corrupting the + // response by appending a JSON error + // message to it. + // TODO log an error in this case. + return } - // TODO log the error? + e.WriteError(w, err) } } } @@ -425,7 +427,22 @@ // HeaderSetter. func (e ErrorMapper) WriteError(w http.ResponseWriter, err error) { status, resp := e(err) - WriteJSON(w, status, resp) + err1 := WriteJSON(w, status, resp) + if err1 == nil { + return + } + // TODO log an error ? + + // JSON-marshaling the original error failed, so try to send that + // error instead; if that fails, give up and go home. + status1, resp1 := e(errgo.Notef(err1, "cannot marshal error response %q", err)) + err2 := WriteJSON(w, status1, resp1) + if err2 == nil { + return + } + + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(fmt.Sprintf("really cannot marshal error response %q: %v", err, err1))) } // WriteJSON writes the given value to the ResponseWriter @@ -442,9 +459,6 @@ // con: if there's an error after the first write, it will be lost. data, err := json.Marshal(val) if err != nil { - // TODO(rog) log an error if this fails and lose the - // error return, because most callers will need - // to do that anyway. return errgo.Mask(err) } w.Header().Set("content-type", "application/json") diff -Nru juju-core-2.0~beta15/src/github.com/juju/httprequest/handler_test.go juju-core-2.0.0/src/github.com/juju/httprequest/handler_test.go --- juju-core-2.0~beta15/src/github.com/juju/httprequest/handler_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/httprequest/handler_test.go 2016-10-13 14:32:12.000000000 +0000 @@ -341,14 +341,14 @@ }, { about: "return type that can't be marshaled as JSON", f: func(c *gc.C) interface{} { - return func(p httprequest.Params, s *struct{}) (map[int]int, error) { - return map[int]int{0: 1}, nil + return func(p httprequest.Params, s *struct{}) (chan int, error) { + return make(chan int), nil } }, req: &http.Request{}, pathVar: httprouter.Params{}, expectBody: httprequest.RemoteError{ - Message: "json: unsupported type: map[int]int", + Message: "json: unsupported type: chan int", }, expectStatus: http.StatusInternalServerError, }, { @@ -785,11 +785,12 @@ } var ( - errUnauth = errors.New("unauth") - errBadReq = errors.New("bad request") - errOther = errors.New("other") - errCustomHeaders = errors.New("custom headers") - errNil = errors.New("nil result") + errUnauth = errors.New("unauth") + errBadReq = errors.New("bad request") + errOther = errors.New("other") + errCustomHeaders = errors.New("custom headers") + errUnmarshalableError = errors.New("unmarshalable error") + errNil = errors.New("nil result") ) type HeaderNumber struct { @@ -833,6 +834,8 @@ h.Set("Acceptability", "not at all") }, } + case errUnmarshalableError: + return http.StatusTeapot, make(chan int) case errNil: return status, nil } @@ -876,6 +879,12 @@ expectHeader: http.Header{ "Acceptability": {"not at all"}, }, +}, { + err: errUnmarshalableError, + expectStatus: http.StatusInternalServerError, + expectResp: &httprequest.RemoteError{ + Message: `cannot marshal error response "unmarshalable error": json: unsupported type: chan int`, + }, }} func (s *handlerSuite) TestWriteError(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/httprequest/unmarshal.go juju-core-2.0.0/src/github.com/juju/httprequest/unmarshal.go --- juju-core-2.0~beta15/src/github.com/juju/httprequest/unmarshal.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/httprequest/unmarshal.go 2016-10-13 14:32:12.000000000 +0000 @@ -157,8 +157,10 @@ // unmarshalBody unmarshals the http request body // into the given value. func unmarshalBody(v reflect.Value, p Params, makeResult resultMaker) error { - if err := checkIsJSON(p.Request.Header, p.Request.Body); err != nil { - return errgo.Mask(err) + if !isJSONMediaType(p.Request.Header) { + fancyErr := newFancyDecodeError(p.Request.Header, p.Request.Body) + + return newDecodeRequestError(p.Request, fancyErr.body, fancyErr) } data, err := ioutil.ReadAll(p.Request.Body) if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/idmclient/.gitignore juju-core-2.0.0/src/github.com/juju/idmclient/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/idmclient/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/idmclient/.gitignore 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1 @@ +*.swp diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/agent/agentbootstrap/bootstrap.go juju-core-2.0.0/src/github.com/juju/juju/agent/agentbootstrap/bootstrap.go --- juju-core-2.0~beta15/src/github.com/juju/juju/agent/agentbootstrap/bootstrap.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/agent/agentbootstrap/bootstrap.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,12 @@ package agentbootstrap import ( + "fmt" + "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils" + "github.com/juju/utils/clock" "github.com/juju/utils/series" "gopkg.in/juju/names.v2" @@ -90,20 +93,28 @@ return nil, nil, errors.Annotate(err, "failed to initialize mongo admin user") } - cloudCredentials := make(map[string]cloud.Credential) - if args.ControllerCloudCredential != nil { - cloudCredentials[args.ControllerCloudCredentialName] = *args.ControllerCloudCredential + cloudCredentials := make(map[names.CloudCredentialTag]cloud.Credential) + var cloudCredentialTag names.CloudCredentialTag + if args.ControllerCloudCredential != nil && args.ControllerCloudCredentialName != "" { + cloudCredentialTag = names.NewCloudCredentialTag(fmt.Sprintf( + "%s/%s/%s", + args.ControllerCloudName, + adminUser.Id(), + args.ControllerCloudCredentialName, + )) + cloudCredentials[cloudCredentialTag] = *args.ControllerCloudCredential } logger.Debugf("initializing address %v", info.Addrs) st, err := state.Initialize(state.InitializeParams{ + Clock: clock.WallClock, ControllerModelArgs: state.ModelArgs{ Owner: adminUser, Config: args.ControllerModelConfig, Constraints: args.ModelConstraints, CloudName: args.ControllerCloudName, CloudRegion: args.ControllerCloudRegion, - CloudCredential: args.ControllerCloudCredentialName, + CloudCredential: cloudCredentialTag, StorageProviderRegistry: args.StorageProviderRegistry, }, CloudName: args.ControllerCloudName, @@ -111,6 +122,7 @@ CloudCredentials: cloudCredentials, ControllerConfig: args.ControllerConfig, ControllerInheritedConfig: args.ControllerInheritedConfig, + RegionInheritedConfig: args.RegionInheritedConfig, MongoInfo: info, MongoDialOpts: dialOpts, NewPolicy: newPolicy, @@ -163,16 +175,10 @@ return nil, nil, errors.Trace(err) } - // TODO(axw) we shouldn't be adding credentials to model config. - if args.ControllerCloudCredential != nil { - for k, v := range args.ControllerCloudCredential.Attributes() { - attrs[k] = v - } - } controllerUUID := args.ControllerConfig.ControllerUUID() creator := modelmanager.ModelConfigCreator{Provider: args.Provider} hostedModelConfig, err := creator.NewModelConfig( - cloudSpec, controllerUUID, args.ControllerModelConfig, attrs, + cloudSpec, args.ControllerModelConfig, attrs, ) if err != nil { return nil, nil, errors.Annotate(err, "creating hosted model config") @@ -200,7 +206,7 @@ Constraints: args.ModelConstraints, CloudName: args.ControllerCloudName, CloudRegion: args.ControllerCloudRegion, - CloudCredential: args.ControllerCloudCredentialName, + CloudCredential: cloudCredentialTag, StorageProviderRegistry: args.StorageProviderRegistry, }) if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/agent/agentbootstrap/bootstrap_test.go juju-core-2.0.0/src/github.com/juju/juju/agent/agentbootstrap/bootstrap_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/agent/agentbootstrap/bootstrap_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/agent/agentbootstrap/bootstrap_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -97,6 +97,7 @@ StateAddresses: []string{s.mgoInst.Addr()}, CACert: testing.CACert, Password: testing.DefaultMongoPassword, + Controller: testing.ControllerTag, Model: testing.ModelTag, } servingInfo := params.StateServingInfo{ @@ -148,8 +149,13 @@ } controllerInheritedConfig := map[string]interface{}{ "apt-mirror": "http://mirror", + "no-proxy": "value", + } + regionConfig := cloud.RegionConfig{ + "some-region": cloud.Attrs{ + "no-proxy": "a-value", + }, } - var envProvider fakeProvider args := agentbootstrap.InitializeStateParams{ StateInitializationParams: instancecfg.StateInitializationParams{ @@ -157,12 +163,13 @@ BootstrapMachineInstanceId: "i-bootstrap", BootstrapMachineHardwareCharacteristics: &expectHW, ControllerCloud: cloud.Cloud{ - Type: "dummy", - AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, - Regions: []cloud.Region{{Name: "some-region"}}, + Type: "dummy", + AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, + Regions: []cloud.Region{{Name: "dummy-region"}}, + RegionConfig: regionConfig, }, ControllerCloudName: "dummy", - ControllerCloudRegion: "some-region", + ControllerCloudRegion: "dummy-region", ControllerConfig: controllerCfg, ControllerModelConfig: modelCfg, ModelConstraints: expectModelConstraints, @@ -196,7 +203,8 @@ // Check that initial admin user has been set up correctly. modelTag := model.Tag().(names.ModelTag) - s.assertCanLogInAsAdmin(c, modelTag, testing.DefaultMongoPassword) + controllerTag := names.NewControllerTag(controllerCfg.ControllerUUID()) + s.assertCanLogInAsAdmin(c, modelTag, controllerTag, testing.DefaultMongoPassword) user, err := st.User(model.Owner()) c.Assert(err, jc.ErrorIsNil) c.Assert(user.PasswordValid(testing.DefaultMongoPassword), jc.IsTrue) @@ -205,7 +213,7 @@ controllerCfg, err = st.ControllerConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(controllerCfg, jc.DeepEquals, controller.Config{ - "controller-uuid": testing.ModelTag.Id(), + "controller-uuid": testing.ControllerTag.Id(), "ca-cert": testing.CACert, "state-port": 1234, "api-port": 17777, @@ -221,6 +229,7 @@ c.Assert(err, jc.ErrorIsNil) expectedAttrs := expectedCfg.AllAttrs() expectedAttrs["apt-mirror"] = "http://mirror" + expectedAttrs["no-proxy"] = "value" c.Assert(newModelCfg.AllAttrs(), jc.DeepEquals, expectedAttrs) gotModelConstraints, err := st.ModelConstraints() @@ -239,7 +248,7 @@ hostedModel, err := hostedModelSt.Model() c.Assert(err, jc.ErrorIsNil) c.Assert(hostedModel.Name(), gc.Equals, "hosted") - c.Assert(hostedModel.CloudRegion(), gc.Equals, "some-region") + c.Assert(hostedModel.CloudRegion(), gc.Equals, "dummy-region") hostedCfg, err := hostedModelSt.ModelConfig() c.Assert(err, jc.ErrorIsNil) _, hasUnexpected := hostedCfg.AllAttrs()["not-for-hosted"] @@ -289,27 +298,26 @@ info, ok := cfg.MongoInfo() c.Assert(ok, jc.IsTrue) c.Assert(info.Password, gc.Not(gc.Equals), testing.DefaultMongoPassword) - st1, err := state.Open(newCfg.Model(), info, mongotest.DialOpts(), nil) + st1, err := state.Open(newCfg.Model(), newCfg.Controller(), info, mongotest.DialOpts(), nil) c.Assert(err, jc.ErrorIsNil) defer st1.Close() // Make sure that the hosted model Environ's Create method is called. envProvider.CheckCallNames(c, - "RestrictedConfigAttributes", "PrepareConfig", "Validate", "Open", "Create", ) - envProvider.CheckCall(c, 3, "Open", environs.OpenParams{ + envProvider.CheckCall(c, 2, "Open", environs.OpenParams{ Cloud: environs.CloudSpec{ Type: "dummy", Name: "dummy", - Region: "some-region", + Region: "dummy-region", }, Config: hostedCfg, }) - envProvider.CheckCall(c, 4, "Create", environs.CreateParams{ + envProvider.CheckCall(c, 3, "Create", environs.CreateParams{ ControllerUUID: controllerCfg.ControllerUUID(), }) } @@ -322,6 +330,7 @@ StateAddresses: []string{s.mgoInst.Addr()}, CACert: testing.CACert, Password: "fake", + Controller: testing.ControllerTag, Model: testing.ModelTag, } cfg, err := agent.NewAgentConfig(configParams) @@ -348,6 +357,7 @@ StateAddresses: []string{s.mgoInst.Addr()}, CACert: testing.CACert, Password: testing.DefaultMongoPassword, + Controller: testing.ControllerTag, Model: testing.ModelTag, } cfg, err := agent.NewAgentConfig(configParams) @@ -432,7 +442,7 @@ } } -func (s *bootstrapSuite) assertCanLogInAsAdmin(c *gc.C, modelTag names.ModelTag, password string) { +func (s *bootstrapSuite) assertCanLogInAsAdmin(c *gc.C, modelTag names.ModelTag, controllerTag names.ControllerTag, password string) { info := &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{s.mgoInst.Addr()}, @@ -441,7 +451,7 @@ Tag: nil, // admin user Password: password, } - st, err := state.Open(modelTag, info, mongotest.DialOpts(), state.NewPolicyFunc(nil)) + st, err := state.Open(modelTag, controllerTag, info, mongotest.DialOpts(), state.NewPolicyFunc(nil)) c.Assert(err, jc.ErrorIsNil) defer st.Close() _, err = st.Machine("0") @@ -453,11 +463,6 @@ gitjujutesting.Stub } -func (p *fakeProvider) RestrictedConfigAttributes() []string { - p.MethodCall(p, "RestrictedConfigAttributes") - return []string{} -} - func (p *fakeProvider) PrepareConfig(args environs.PrepareConfigParams) (*config.Config, error) { p.MethodCall(p, "PrepareConfig", args) return args.Config, p.NextErr() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/agent/agent.go juju-core-2.0.0/src/github.com/juju/juju/agent/agent.go --- juju-core-2.0~beta15/src/github.com/juju/juju/agent/agent.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/agent/agent.go 2016-10-13 14:31:49.000000000 +0000 @@ -166,7 +166,7 @@ Namespace = "NAMESPACE" AgentServiceName = "AGENT_SERVICE_NAME" MongoOplogSize = "MONGO_OPLOG_SIZE" - NumaCtlPreference = "NUMA_CTL_PREFERENCE" + NUMACtlPreference = "NUMA_CTL_PREFERENCE" ) // The Config interface is the sole way that the agent gets access to the @@ -246,6 +246,9 @@ // Model returns the tag for the model that the agent belongs to. Model() names.ModelTag + // Controller returns the tag for the controller that the agent belongs to. + Controller() names.ControllerTag + // MetricsSpoolDir returns the spool directory where workloads store // collected metrics. MetricsSpoolDir() string @@ -283,22 +286,6 @@ // SetCACert sets the CA cert used for validating API connections. SetCACert(string) - // Migrate takes an existing agent config and applies the given - // parameters to change it. - // - // Only non-empty fields in newParams are used - // to change existing config settings. All changes are written - // atomically. UpgradedToVersion cannot be changed here, because - // Migrate is most likely called during an upgrade, so it will be - // changed at the end of the upgrade anyway, if successful. - // - // Migrate does not actually write the new configuration. - // - // Note that if the configuration file moves location, - // (if DataDir is set), the the caller is responsible for removing - // the old configuration. - Migrate(MigrateParams) error - // SetStateServingInfo sets the information needed // to run a controller SetStateServingInfo(info params.StateServingInfo) @@ -330,17 +317,6 @@ ConfigWriter } -// MigrateParams holds agent config values to change in a -// Migrate call. Empty fields will be ignored. DeleteValues -// specifies a list of keys to delete. -type MigrateParams struct { - Paths Paths - Jobs []multiwatcher.MachineJob - DeleteValues []string - Values map[string]string - Model names.ModelTag -} - // Ensure that the configInternal struct implements the Config interface. var _ Config = (*configInternal)(nil) @@ -363,6 +339,7 @@ paths Paths tag names.Tag nonce string + controller names.ControllerTag model names.ModelTag jobs []multiwatcher.MachineJob upgradedToVersion version.Number @@ -384,6 +361,7 @@ Tag names.Tag Password string Nonce string + Controller names.ControllerTag Model names.ModelTag StateAddresses []string APIAddresses []string @@ -413,6 +391,11 @@ if configParams.Password == "" { return nil, errors.Trace(requiredError("password")) } + if uuid := configParams.Controller.Id(); uuid == "" { + return nil, errors.Trace(requiredError("controller")) + } else if !names.IsValidController(uuid) { + return nil, errors.Errorf("%q is not a valid controller uuid", uuid) + } if uuid := configParams.Model.Id(); uuid == "" { return nil, errors.Trace(requiredError("model")) } else if !names.IsValidModel(uuid) { @@ -433,6 +416,7 @@ upgradedToVersion: configParams.UpgradedToVersion, tag: configParams.Tag, nonce: configParams.Nonce, + controller: configParams.Controller, model: configParams.Model, caCert: configParams.CACert, oldPassword: configParams.Password, @@ -538,32 +522,11 @@ for key, val := range c0.values { c1.values[key] = val } - return &c1 -} - -func (config *configInternal) Migrate(newParams MigrateParams) error { - config.paths.Migrate(newParams.Paths) - config.configFilePath = ConfigPath(config.paths.DataDir, config.tag) - if len(newParams.Jobs) > 0 { - config.jobs = make([]multiwatcher.MachineJob, len(newParams.Jobs)) - copy(config.jobs, newParams.Jobs) - } - for _, key := range newParams.DeleteValues { - delete(config.values, key) - } - for key, value := range newParams.Values { - if config.values == nil { - config.values = make(map[string]string) - } - config.values[key] = value - } - if newParams.Model.Id() != "" { - config.model = newParams.Model - } - if err := config.check(); err != nil { - return fmt.Errorf("migrated agent config is invalid: %v", err) + if c0.servingInfo != nil { + info := *c0.servingInfo + c1.servingInfo = &info } - return nil + return &c1 } func (c *configInternal) SetUpgradedToVersion(newVersion version.Number) { @@ -695,6 +658,10 @@ return c.model } +func (c *configInternal) Controller() names.ControllerTag { + return c.controller +} + func (c *configInternal) Dir() string { return Dir(c.paths.DataDir, c.tag) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/agent/agent_test.go juju-core-2.0.0/src/github.com/juju/juju/agent/agent_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/agent/agent_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/agent/agent_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,6 @@ import ( "fmt" "path/filepath" - "reflect" jc "github.com/juju/testing/checkers" "github.com/juju/version" @@ -19,7 +18,6 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/mongo" "github.com/juju/juju/network" - "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/testing" jujuversion "github.com/juju/juju/version" ) @@ -66,6 +64,7 @@ Tag: names.NewMachineTag("1"), UpgradedToVersion: jujuversion.Current, Password: "sekrit", + Controller: testing.ControllerTag, }, checkErr: "model not found in configuration", }, { @@ -75,16 +74,39 @@ Tag: names.NewMachineTag("1"), UpgradedToVersion: jujuversion.Current, Password: "sekrit", + Controller: testing.ControllerTag, Model: names.NewModelTag("uuid"), }, checkErr: `"uuid" is not a valid model uuid`, }, { + about: "missing controller tag", + params: agent.AgentConfigParams{ + Paths: agent.Paths{DataDir: "/data/dir"}, + Tag: names.NewMachineTag("1"), + UpgradedToVersion: jujuversion.Current, + Password: "sekrit", + Model: testing.ModelTag, + }, + checkErr: "controller not found in configuration", +}, { + about: "invalid controller tag", + params: agent.AgentConfigParams{ + Paths: agent.Paths{DataDir: "/data/dir"}, + Tag: names.NewMachineTag("1"), + UpgradedToVersion: jujuversion.Current, + Password: "sekrit", + Controller: names.NewControllerTag("uuid"), + Model: testing.ModelTag, + }, + checkErr: `"uuid" is not a valid controller uuid`, +}, { about: "missing CA cert", params: agent.AgentConfigParams{ Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), UpgradedToVersion: jujuversion.Current, Password: "sekrit", + Controller: testing.ControllerTag, Model: testing.ModelTag, }, checkErr: "CA certificate not found in configuration", @@ -96,6 +118,7 @@ UpgradedToVersion: jujuversion.Current, Password: "sekrit", CACert: "ca cert", + Controller: testing.ControllerTag, Model: testing.ModelTag, }, checkErr: "state or API addresses not found in configuration", @@ -107,6 +130,7 @@ UpgradedToVersion: jujuversion.Current, Password: "sekrit", CACert: "ca cert", + Controller: testing.ControllerTag, Model: testing.ModelTag, StateAddresses: []string{"localhost:8080", "bad-address"}, }, @@ -119,6 +143,7 @@ UpgradedToVersion: jujuversion.Current, Password: "sekrit", CACert: "ca cert", + Controller: testing.ControllerTag, Model: testing.ModelTag, APIAddresses: []string{"localhost:8080", "bad-address"}, }, @@ -131,6 +156,7 @@ UpgradedToVersion: jujuversion.Current, Password: "sekrit", CACert: "ca cert", + Controller: testing.ControllerTag, Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, }, @@ -142,6 +168,7 @@ UpgradedToVersion: jujuversion.Current, Password: "sekrit", CACert: "ca cert", + Controller: testing.ControllerTag, Model: testing.ModelTag, APIAddresses: []string{"localhost:1234"}, }, @@ -153,6 +180,7 @@ UpgradedToVersion: jujuversion.Current, Password: "sekrit", CACert: "ca cert", + Controller: testing.ControllerTag, Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, @@ -165,6 +193,7 @@ Password: "sekrit", UpgradedToVersion: jujuversion.Current, CACert: "ca cert", + Controller: testing.ControllerTag, Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, @@ -178,6 +207,7 @@ Password: "sekrit", UpgradedToVersion: jujuversion.Current, CACert: "ca cert", + Controller: testing.ControllerTag, Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, @@ -194,6 +224,7 @@ Password: "sekrit", UpgradedToVersion: jujuversion.Current, CACert: "ca cert", + Controller: testing.ControllerTag, Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, @@ -213,6 +244,7 @@ Password: "sekrit", UpgradedToVersion: jujuversion.Current, CACert: "ca cert", + Controller: testing.ControllerTag, Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, @@ -237,6 +269,7 @@ Tag: names.NewUnitTag("ubuntu/1"), Password: "sekrit", UpgradedToVersion: jujuversion.Current, + Controller: testing.ControllerTag, Model: testing.ModelTag, CACert: "ca cert", StateAddresses: []string{"localhost:1234"}, @@ -262,136 +295,6 @@ } } -func (*suite) TestMigrate(c *gc.C) { - initialParams := agent.AgentConfigParams{ - Paths: agent.Paths{ - DataDir: c.MkDir(), - LogDir: c.MkDir(), - }, - Tag: names.NewMachineTag("1"), - Nonce: "nonce", - Password: "secret", - UpgradedToVersion: version.MustParse("1.16.5"), - Jobs: []multiwatcher.MachineJob{ - multiwatcher.JobManageModel, - multiwatcher.JobHostUnits, - }, - CACert: "ca cert", - Model: testing.ModelTag, - StateAddresses: []string{"localhost:1234"}, - APIAddresses: []string{"localhost:4321"}, - Values: map[string]string{ - "key1": "value1", - "key2": "value2", - "key3": "value3", - }, - } - - migrateTests := []struct { - comment string - fields []string - newParams agent.MigrateParams - expectValues map[string]string - expectErr string - }{{ - comment: "nothing to change", - fields: nil, - newParams: agent.MigrateParams{}, - }, { - fields: []string{"Paths"}, - newParams: agent.MigrateParams{ - Paths: agent.Paths{DataDir: c.MkDir()}, - }, - }, { - fields: []string{"Paths"}, - newParams: agent.MigrateParams{ - Paths: agent.Paths{ - DataDir: c.MkDir(), - LogDir: c.MkDir(), - }, - }, - }, { - fields: []string{"Jobs"}, - newParams: agent.MigrateParams{ - Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, - }, - }, { - comment: "invalid/immutable field specified", - fields: []string{"InvalidField"}, - newParams: agent.MigrateParams{}, - expectErr: `unknown field "InvalidField"`, - }, { - comment: "Values can be added, changed or removed", - fields: []string{"Values", "DeleteValues"}, - newParams: agent.MigrateParams{ - DeleteValues: []string{"key2", "key3"}, // delete - Values: map[string]string{ - "key1": "new value1", // change - "new key3": "value3", // add - "empty": "", // add empty val - }, - }, - expectValues: map[string]string{ - "key1": "new value1", - "new key3": "value3", - "empty": "", - }, - }} - for i, test := range migrateTests { - summary := "migrate fields" - if test.comment != "" { - summary += " (" + test.comment + ") " - } - c.Logf("test %d: %s %v", i, summary, test.fields) - - initialConfig, err := agent.NewAgentConfig(initialParams) - c.Assert(err, jc.ErrorIsNil) - - newConfig, err := agent.NewAgentConfig(initialParams) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(initialConfig.Write(), gc.IsNil) - c.Assert(agent.ConfigFileExists(initialConfig), jc.IsTrue) - - err = newConfig.Migrate(test.newParams) - c.Assert(err, jc.ErrorIsNil) - err = newConfig.Write() - c.Assert(err, jc.ErrorIsNil) - c.Assert(agent.ConfigFileExists(newConfig), jc.IsTrue) - - // Make sure we can read it back successfully and it - // matches what we wrote. - configPath := agent.ConfigPath(newConfig.DataDir(), newConfig.Tag()) - c.Logf("new config path: %v", configPath) - readConfig, err := agent.ReadConfig(configPath) - c.Check(err, jc.ErrorIsNil) - c.Check(newConfig, jc.DeepEquals, readConfig) - - // Make sure only the specified fields were changed and - // the rest matches. - for _, field := range test.fields { - switch field { - case "Values": - err = agent.PatchConfig(initialConfig, field, test.expectValues) - c.Check(err, jc.ErrorIsNil) - case "DeleteValues": - err = agent.PatchConfig(initialConfig, field, test.newParams.DeleteValues) - c.Check(err, jc.ErrorIsNil) - default: - value := reflect.ValueOf(test.newParams).FieldByName(field) - if value.IsValid() && test.expectErr == "" { - err = agent.PatchConfig(initialConfig, field, value.Interface()) - c.Check(err, jc.ErrorIsNil) - } else { - err = agent.PatchConfig(initialConfig, field, value) - c.Check(err, gc.ErrorMatches, test.expectErr) - } - } - } - c.Check(newConfig, jc.DeepEquals, initialConfig) - } -} - func stateServingInfo() params.StateServingInfo { return params.StateServingInfo{ Cert: "cert", @@ -480,6 +383,7 @@ StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, Nonce: "a nonce", + Controller: testing.ControllerTag, Model: testing.ModelTag, } @@ -567,8 +471,8 @@ c.Assert(ok, jc.IsTrue) c.Check(apiinfo.Addrs, gc.HasLen, len(attrParams.APIAddresses)+1) localhostAddressFound := false - for _, eachApiAddress := range apiinfo.Addrs { - if eachApiAddress == "localhost:47" { + for _, eachAPIAddress := range apiinfo.Addrs { + if eachAPIAddress == "localhost:47" { localhostAddressFound = true break } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/agent/format-2.0.go juju-core-2.0.0/src/github.com/juju/juju/agent/format-2.0.go --- juju-core-2.0~beta15/src/github.com/juju/juju/agent/format-2.0.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/agent/format-2.0.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,6 @@ package agent import ( - "fmt" "net" "strconv" @@ -40,6 +39,7 @@ StateAddresses []string `yaml:"stateaddresses,omitempty"` StatePassword string `yaml:"statepassword,omitempty"` + Controller string `yaml:"controller,omitempty"` Model string `yaml:"model,omitempty"` APIAddresses []string `yaml:"apiaddresses,omitempty"` APIPassword string `yaml:"apipassword,omitempty"` @@ -77,12 +77,13 @@ if err != nil { return nil, err } - var modelTag names.ModelTag - if format.Model != "" { - modelTag, err = names.ParseModelTag(format.Model) - if err != nil { - return nil, errors.Trace(err) - } + controllerTag, err := names.ParseControllerTag(format.Controller) + if err != nil { + return nil, errors.Trace(err) + } + modelTag, err := names.ParseModelTag(format.Model) + if err != nil { + return nil, errors.Trace(err) } config := &configInternal{ tag: tag, @@ -94,6 +95,7 @@ jobs: format.Jobs, upgradedToVersion: *format.UpgradedToVersion, nonce: format.Nonce, + controller: controllerTag, model: modelTag, caCert: format.CACert, oldPassword: format.OldPassword, @@ -124,7 +126,7 @@ // If private key is not present, infer it from the ports in the state addresses. if config.servingInfo.StatePort == 0 { if len(format.StateAddresses) == 0 { - return nil, fmt.Errorf("server key found but no state port") + return nil, errors.New("server key found but no state port") } _, portString, err := net.SplitHostPort(format.StateAddresses[0]) @@ -153,10 +155,8 @@ } func (formatter_2_0) marshal(config *configInternal) ([]byte, error) { - var modelTag string - if config.model.Id() != "" { - modelTag = config.model.String() - } + controllerTag := config.controller.String() + modelTag := config.model.String() format := &format_2_0Serialization{ Tag: config.tag.String(), DataDir: config.paths.DataDir, @@ -165,6 +165,7 @@ Jobs: config.jobs, UpgradedToVersion: &config.upgradedToVersion, Nonce: config.nonce, + Controller: controllerTag, Model: modelTag, CACert: string(config.caCert), OldPassword: config.oldPassword, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/agent/format-2.0_whitebox_test.go juju-core-2.0.0/src/github.com/juju/juju/agent/format-2.0_whitebox_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/agent/format-2.0_whitebox_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/agent/format-2.0_whitebox_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -51,6 +51,8 @@ var agentConfig2_0Contents = ` # format 2.0 +controller: controller-deadbeef-1bad-500d-9000-4b1d0d06f00d +model: model-deadbeef-0bad-400d-8000-4b1d0d06f00d tag: machine-0 datadir: /home/user/.local/share/juju/local logdir: /var/log/juju-user-local @@ -199,6 +201,8 @@ var agentConfig2_0NotStateMachine = ` # format 2.0 +controller: controller-deadbeef-1bad-500d-9000-4b1d0d06f00d +model: model-deadbeef-0bad-400d-8000-4b1d0d06f00d tag: machine-1 datadir: /home/user/.local/share/juju/local logdir: /var/log/juju-user-local diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/agent/format_whitebox_test.go juju-core-2.0.0/src/github.com/juju/juju/agent/format_whitebox_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/agent/format_whitebox_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/agent/format_whitebox_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -38,6 +38,7 @@ APIAddresses: []string{"localhost:1235"}, Nonce: "a nonce", Model: testing.ModelTag, + Controller: testing.ControllerTag, } func newTestConfig(c *gc.C) *configInternal { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/agent/identity_test.go juju-core-2.0.0/src/github.com/juju/juju/agent/identity_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/agent/identity_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/agent/identity_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,6 +34,7 @@ StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, Nonce: "a nonce", + Controller: testing.ControllerTag, Model: testing.ModelTag, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/agent/tools/toolsdir.go juju-core-2.0.0/src/github.com/juju/juju/agent/tools/toolsdir.go --- juju-core-2.0~beta15/src/github.com/juju/juju/agent/tools/toolsdir.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/agent/tools/toolsdir.go 2016-10-13 14:31:49.000000000 +0000 @@ -73,11 +73,8 @@ return err } defer os.Remove(f.Name()) - // TODO(wallyworld) - 2013-09-24 bug=1229512 - // When we can ensure all tools records have valid checksums recorded, - // we can remove this test short circuit. gzipSHA256 := fmt.Sprintf("%x", sha256hash.Sum(nil)) - if tools.SHA256 != "" && tools.SHA256 != gzipSHA256 { + if tools.SHA256 != gzipSHA256 { return fmt.Errorf("tarball sha256 mismatch, expected %s, got %s", tools.SHA256, gzipSHA256) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/agent/machine_test.go juju-core-2.0.0/src/github.com/juju/juju/api/agent/machine_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/agent/machine_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/agent/machine_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -53,7 +53,8 @@ APIPort: ssi.APIPort, StatePort: ssi.StatePort, } - s.State.SetStateServingInfo(ssi) + err := s.State.SetStateServingInfo(ssi) + c.Assert(err, jc.ErrorIsNil) info, err := apiagent.NewState(st).StateServingInfo() c.Assert(err, jc.ErrorIsNil) c.Assert(info, jc.DeepEquals, expected) @@ -157,7 +158,7 @@ c.Assert(err, jc.ErrorIsNil) info.Tag = tag info.Password = "foo-12345678901234567890" - err = tryOpenState(s.State.ModelTag(), info) + err = tryOpenState(s.State.ModelTag(), s.State.ControllerTag(), info) c.Assert(errors.Cause(err), jc.Satisfies, errors.IsUnauthorized) } @@ -179,8 +180,8 @@ c.Assert(rFlag, jc.IsFalse) } -func tryOpenState(modelTag names.ModelTag, info *mongo.MongoInfo) error { - st, err := state.Open(modelTag, info, mongotest.DialOpts(), nil) +func tryOpenState(modelTag names.ModelTag, controllerTag names.ControllerTag, info *mongo.MongoInfo) error { + st, err := state.Open(modelTag, controllerTag, info, mongotest.DialOpts(), nil) if err == nil { st.Close() } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/agent/model_test.go juju-core-2.0.0/src/github.com/juju/juju/api/agent/model_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/agent/model_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/agent/model_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -27,5 +27,6 @@ c.Assert(agentAPI, gc.NotNil) s.ModelWatcherTests = apitesting.NewModelWatcherTests( - agentAPI, s.BackingState, apitesting.NoSecrets) + agentAPI, s.BackingState, + ) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/agent/state.go juju-core-2.0.0/src/github.com/juju/juju/api/agent/state.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/agent/state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/agent/state.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,14 +6,17 @@ import ( "fmt" + "github.com/juju/errors" "gopkg.in/juju/names.v2" "github.com/juju/juju/api/base" "github.com/juju/juju/api/common" "github.com/juju/juju/api/common/cloudspec" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/instance" "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/watcher" ) // State provides access to an agent's view of the state. @@ -72,6 +75,20 @@ return results.Master, err } +// WatchCredential returns a watcher which reports when the specified +// credential has changed. +func (c *State) WatchCredential(tag names.CloudCredentialTag) (watcher.NotifyWatcher, error) { + var result params.NotifyWatchResult + err := c.facade.FacadeCall("WatchCredential", nil, &result) + if err != nil { + return nil, errors.Trace(err) + } + if result.Error != nil { + return nil, result.Error + } + return apiwatcher.NewNotifyWatcher(c.facade.RawAPICaller(), result), nil +} + type Entity struct { st *State tag names.Tag diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/apiclient.go juju-core-2.0.0/src/github.com/juju/juju/api/apiclient.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/apiclient.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/apiclient.go 2016-10-13 14:31:49.000000000 +0000 @@ -36,22 +36,19 @@ "github.com/juju/juju/rpc/jsoncodec" ) -var logger = loggo.GetLogger("juju.api") +// PingPeriod defines how often the internal connection health check +// will run. +const PingPeriod = 1 * time.Minute + +// pingTimeout defines how long a health check can take before we +// consider it to have failed. +const pingTimeout = 30 * time.Second -// TODO(fwereade): we should be injecting a Clock; and injecting these values; -// across the board, instead of using these global variables. -var ( - // PingPeriod defines how often the internal connection health check - // will run. - PingPeriod = 1 * time.Minute - - // PingTimeout defines how long a health check can take before we - // consider it to have failed. - PingTimeout = 30 * time.Second -) +var logger = loggo.GetLogger("juju.api") type rpcConnection interface { Call(req rpc.Request, params, response interface{}) error + Dead() <-chan struct{} Close() error } @@ -68,13 +65,12 @@ // will be associated with (specifically macaroon auth cookies). cookieURL *url.URL - // modelTag holds the model tag once we're connected - modelTag string + // modelTag holds the model tag. + // It is empty if there is no model tag associated with the connection. + modelTag names.ModelTag - // controllerTag holds the controller tag once we're connected. - // This is only set with newer apiservers where they are using - // the v1 login mechansim. - controllerTag string + // controllerTag holds the controller's tag once we're connected. + controllerTag names.ControllerTag // serverVersion holds the version of the API server that we are // connected to. It is possible that this version is 0 if the @@ -89,12 +85,19 @@ // Login facadeVersions map[string][]int + // pingFacadeVersion is the version to use for the pinger. This is lazily + // set at initialization to avoid a race in our tests. See + // http://pad.lv/1614732 for more details regarding the race. + pingerFacadeVersion int + // authTag holds the authenticated entity's tag after login. authTag names.Tag - // readOnly holds whether the user has read-only access for the - // connected model. - readOnly bool + // mpdelAccess holds the access level of the user to the connected model. + modelAccess string + + // controllerAccess holds the access level of the user to the connected controller. + controllerAccess string // broken is a channel that gets closed when the connection is // broken. @@ -161,19 +164,16 @@ // // See Connect for details of the connection mechanics. func Open(info *Info, opts DialOpts) (Connection, error) { - return open(info, opts, clock.WallClock, (*state).Login) + return open(info, opts, clock.WallClock) } -// This unexported open method is used both directly above in the Open -// function, and also the OpenWithVersion function below to explicitly cause -// the API server to think that the client is older than it really is. +// open is the unexported version of open that also includes +// an explicit clock instance argument. func open( info *Info, opts DialOpts, clock clock.Clock, - loginFunc func(st *state, tag names.Tag, pwd, nonce string, ms []macaroon.Slice) error, ) (Connection, error) { - if err := info.Validate(); err != nil { return nil, errors.Annotate(err, "validating info for opening an API connection") } @@ -218,8 +218,9 @@ Host: conn.Config().Location.Host, Path: "/", }, - serverScheme: "https", - serverRootAddress: conn.Config().Location.Host, + pingerFacadeVersion: facadeVersions["Pinger"], + serverScheme: "https", + serverRootAddress: conn.Config().Location.Host, // We populate the username and password before // login because, when doing HTTP requests, we'll want // to use the same username and password for authenticating @@ -230,16 +231,27 @@ nonce: info.Nonce, tlsConfig: tlsConfig, bakeryClient: bakeryClient, + modelTag: info.ModelTag, } if !info.SkipLogin { - if err := loginFunc(st, info.Tag, info.Password, info.Nonce, info.Macaroons); err != nil { + if err := st.Login(info.Tag, info.Password, info.Nonce, info.Macaroons); err != nil { conn.Close() return nil, errors.Trace(err) } } + st.broken = make(chan struct{}) st.closed = make(chan struct{}) - go st.heartbeatMonitor() + + go (&monitor{ + clock: clock, + ping: st.Ping, + pingPeriod: PingPeriod, + pingTimeout: pingTimeout, + closed: st.closed, + dead: client.Dead(), + broken: st.broken, + }).run() return st, nil } @@ -263,22 +275,6 @@ return t.fallback.RoundTrip(req) } -// OpenWithVersion uses an explicit version of the Admin facade to call Login -// on. This allows the caller to pretend to be an older client, and is used -// only in testing. -func OpenWithVersion(info *Info, opts DialOpts, loginVersion int) (Connection, error) { - var loginFunc func(st *state, tag names.Tag, pwd, nonce string, ms []macaroon.Slice) error - switch loginVersion { - case 2: - loginFunc = (*state).loginV2 - case 3: - loginFunc = (*state).loginV3 - default: - return nil, errors.NotSupportedf("loginVersion %d", loginVersion) - } - return open(info, opts, clock.WallClock, loginFunc) -} - // connectWebsocket establishes a websocket connection to the RPC // API websocket on the API server using Info. If multiple API addresses // are provided in Info they will be tried concurrently - the first successful @@ -302,9 +298,9 @@ } tlsConfig.RootCAs = certPool } - path := "/" - if info.ModelTag.Id() != "" { - path = apiPath(info.ModelTag, "/api") + path, err := apiPath(info.ModelTag, "/api") + if err != nil { + return nil, nil, errors.Trace(err) } conn, err := dialWebSocket(info.Addrs, path, tlsConfig, opts) if err != nil { @@ -343,7 +339,7 @@ return result.(*websocket.Conn), nil } -// ConnectStream implements Connection.ConnectStream, whatever that is.. +// ConnectStream implements StreamConnector.ConnectStream. func (st *state) ConnectStream(path string, attrs url.Values) (base.Stream, error) { if !st.isLoggedIn() { return nil, errors.New("cannot use ConnectStream without logging in") @@ -376,18 +372,9 @@ // ConnectStream only in that it will not retry the connection if it encounters // discharge-required error. func (st *state) connectStream(path string, attrs url.Values) (base.Stream, error) { - if !strings.HasPrefix(path, "/") { - return nil, errors.New(`path must start with "/"`) - } - if _, ok := st.ServerVersion(); ok { - // If the server version is set, then we know the server is capable of - // serving streams at the model path. We also fully expect - // that the server has returned a valid model tag. - modelTag, err := st.ModelTag() - if err != nil { - return nil, errors.Annotate(err, "cannot get model tag, perhaps connected to system not model") - } - path = apiPath(modelTag, path) + path, err := apiPath(st.modelTag, path) + if err != nil { + return nil, errors.Trace(err) } target := url.URL{ Scheme: "wss", @@ -459,17 +446,11 @@ } // apiEndpoint returns a URL that refers to the given API slash-prefixed -// endpoint path and query parameters. Note that the caller -// is responsible for ensuring that the path *is* prefixed with a slash. +// endpoint path and query parameters. func (st *state) apiEndpoint(path, query string) (*url.URL, error) { - if _, err := st.ControllerTag(); err == nil { - // The controller tag is set, so the agent version is >= 1.23, - // so we can use the model endpoint. - modelTag, err := st.ModelTag() - if err != nil { - return nil, errors.Annotate(err, "cannot get API endpoint address") - } - path = apiPath(modelTag, path) + path, err := apiPath(st.modelTag, path) + if err != nil { + return nil, errors.Trace(err) } return &url.URL{ Scheme: st.serverScheme, @@ -479,21 +460,22 @@ }, nil } +// Ping implements api.Connection. +func (s *state) Ping() error { + return s.APICall("Pinger", s.pingerFacadeVersion, "", "Ping", nil, nil) +} + // apiPath returns the given API endpoint path relative -// to the given model tag. The caller is responsible -// for ensuring that the model tag is valid and -// that the path is slash-prefixed. -func apiPath(modelTag names.ModelTag, path string) string { +// to the given model tag. +func apiPath(modelTag names.ModelTag, path string) (string, error) { if !strings.HasPrefix(path, "/") { - panic(fmt.Sprintf("apiPath called with non-slash-prefixed path %q", path)) + return "", errors.Errorf("cannot make API path from non-slash-prefixed path %q", path) } - if modelTag.Id() == "" { - panic("apiPath called with empty model tag") + modelUUID := modelTag.Id() + if modelUUID == "" { + return path, nil } - if modelUUID := modelTag.Id(); modelUUID != "" { - return "/model/" + modelUUID + path - } - return path + return "/model/" + modelUUID + path, nil } // tagToString returns the value of a tag's String method, or "" if the tag is nil. @@ -575,42 +557,6 @@ return false } -func callWithTimeout(f func() error, timeout time.Duration) bool { - result := make(chan error, 1) - go func() { - // Note that result is buffered so that we don't leak this - // goroutine when a timeout happens. - result <- f() - }() - select { - case err := <-result: - if err != nil { - logger.Debugf("health ping failed: %v", err) - } - return err == nil - case <-time.After(timeout): - logger.Errorf("health ping timed out after %s", timeout) - return false - } -} - -func (s *state) heartbeatMonitor() { - for { - if !callWithTimeout(s.Ping, PingTimeout) { - close(s.broken) - return - } - select { - case <-time.After(PingPeriod): - case <-s.closed: - } - } -} - -func (s *state) Ping() error { - return s.APICall("Pinger", s.BestFacadeVersion("Pinger"), "", "Ping", nil, nil) -} - type hasErrorCode interface { ErrorCode() string } @@ -631,6 +577,7 @@ }, args, response) }, IsFatalError: func(err error) bool { + err = errors.Cause(err) ec, ok := err.(hasErrorCode) if !ok { return true @@ -658,24 +605,38 @@ return err } -// Broken returns a channel that's closed when the connection is broken. +// Broken implements api.Connection. func (s *state) Broken() <-chan struct{} { return s.broken } +// IsBroken implements api.Connection. +func (s *state) IsBroken() bool { + select { + case <-s.broken: + return true + default: + } + if err := s.Ping(); err != nil { + logger.Debugf("connection ping failed: %v", err) + return true + } + return false +} + // Addr returns the address used to connect to the API server. func (s *state) Addr() string { return s.addr } -// ModelTag returns the tag of the model we are connected to. -func (s *state) ModelTag() (names.ModelTag, error) { - return names.ParseModelTag(s.modelTag) +// ModelTag implements base.APICaller.ModelTag. +func (s *state) ModelTag() (names.ModelTag, bool) { + return s.modelTag, s.modelTag.Id() != "" } -// ControllerTag returns the tag of the server we are connected to. -func (s *state) ControllerTag() (names.ModelTag, error) { - return names.ParseModelTag(s.controllerTag) +// ControllerTag implements base.APICaller.ControllerTag. +func (s *state) ControllerTag() names.ControllerTag { + return s.controllerTag } // APIHostPorts returns addresses that may be used to connect diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/apiclient_test.go juju-core-2.0.0/src/github.com/juju/juju/api/apiclient_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/apiclient_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/apiclient_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -106,8 +106,8 @@ defer st.Close() c.Assert(st.Addr(), gc.Equals, info.Addrs[0]) - modelTag, err := st.ModelTag() - c.Assert(err, jc.ErrorIsNil) + modelTag, ok := st.ModelTag() + c.Assert(ok, jc.IsTrue) c.Assert(modelTag, gc.Equals, s.State.ModelTag()) remoteVersion, versionSet := st.ServerVersion() @@ -213,7 +213,7 @@ func (s *apiclientSuite) TestAPICallNoError(c *gc.C) { clock := &fakeClock{} conn := api.NewTestingState(api.TestingStateParams{ - RPCConnection: &fakeRPCConnection{}, + RPCConnection: newRPCConnection(), Clock: clock, }) @@ -225,10 +225,8 @@ func (s *apiclientSuite) TestAPICallError(c *gc.C) { clock := &fakeClock{} conn := api.NewTestingState(api.TestingStateParams{ - RPCConnection: &fakeRPCConnection{ - errors: []error{errors.BadRequestf("boom")}, - }, - Clock: clock, + RPCConnection: newRPCConnection(errors.BadRequestf("boom")), + Clock: clock, }) err := conn.APICall("facade", 1, "id", "method", nil, nil) @@ -240,14 +238,13 @@ func (s *apiclientSuite) TestAPICallRetries(c *gc.C) { clock := &fakeClock{} conn := api.NewTestingState(api.TestingStateParams{ - RPCConnection: &fakeRPCConnection{ - errors: []error{ + RPCConnection: newRPCConnection( + errors.Trace( &rpc.RequestError{ Message: "hmm...", Code: params.CodeRetry, - }, - }, - }, + }), + ), Clock: clock, }) @@ -258,16 +255,14 @@ func (s *apiclientSuite) TestAPICallRetriesLimit(c *gc.C) { clock := &fakeClock{} - retryError := &rpc.RequestError{Message: "hmm...", Code: params.CodeRetry} + retryError := errors.Trace(&rpc.RequestError{Message: "hmm...", Code: params.CodeRetry}) var errors []error for i := 0; i < 10; i++ { errors = append(errors, retryError) } conn := api.NewTestingState(api.TestingStateParams{ - RPCConnection: &fakeRPCConnection{ - errors: errors, - }, - Clock: clock, + RPCConnection: newRPCConnection(errors...), + Clock: clock, }) err := conn.APICall("facade", 1, "id", "method", nil, nil) @@ -286,6 +281,47 @@ }) } +func (s *apiclientSuite) TestPing(c *gc.C) { + clock := &fakeClock{} + rpcConn := newRPCConnection() + conn := api.NewTestingState(api.TestingStateParams{ + RPCConnection: rpcConn, + Clock: clock, + }) + err := conn.Ping() + c.Assert(err, jc.ErrorIsNil) + rpcConn.stub.CheckCalls(c, []testing.StubCall{{ + "Pinger.Ping", []interface{}{0, nil}, + }}) +} + +func (s *apiclientSuite) TestIsBrokenOk(c *gc.C) { + conn := api.NewTestingState(api.TestingStateParams{ + RPCConnection: newRPCConnection(), + Clock: new(fakeClock), + }) + c.Assert(conn.IsBroken(), jc.IsFalse) +} + +func (s *apiclientSuite) TestIsBrokenChannelClosed(c *gc.C) { + broken := make(chan struct{}) + close(broken) + conn := api.NewTestingState(api.TestingStateParams{ + RPCConnection: newRPCConnection(), + Clock: new(fakeClock), + Broken: broken, + }) + c.Assert(conn.IsBroken(), jc.IsTrue) +} + +func (s *apiclientSuite) TestIsBrokenPingFailed(c *gc.C) { + conn := api.NewTestingState(api.TestingStateParams{ + RPCConnection: newRPCConnection(errors.New("no biscuit")), + Clock: new(fakeClock), + }) + c.Assert(conn.IsBroken(), jc.IsTrue) +} + type fakeClock struct { clock.Clock @@ -306,9 +342,18 @@ return time.After(0) } +func newRPCConnection(errs ...error) *fakeRPCConnection { + conn := new(fakeRPCConnection) + conn.stub.SetErrors(errs...) + return conn +} + type fakeRPCConnection struct { - pos int - errors []error + stub testing.Stub +} + +func (f *fakeRPCConnection) Dead() <-chan struct{} { + return nil } func (f *fakeRPCConnection) Close() error { @@ -316,12 +361,8 @@ } func (f *fakeRPCConnection) Call(req rpc.Request, params, response interface{}) error { - if f.pos >= len(f.errors) { - return nil - } - err := f.errors[f.pos] - f.pos++ - return err + f.stub.AddCall(req.Type+"."+req.Action, req.Version, params) + return f.stub.NextErr() } type redirectAPI struct { @@ -339,12 +380,12 @@ r *redirectAPI } -func (a *redirectAPIAdmin) Login(req params.LoginRequest) (params.LoginResultV1, error) { +func (a *redirectAPIAdmin) Login(req params.LoginRequest) (params.LoginResult, error) { if a.r.modelUUID != "beef1beef1-0000-0000-000011112222" { - return params.LoginResultV1{}, errors.New("logged into unexpected model") + return params.LoginResult{}, errors.New("logged into unexpected model") } a.r.redirected = true - return params.LoginResultV1{}, params.Error{ + return params.LoginResult{}, params.Error{ Message: "redirect", Code: params.CodeRedirect, } @@ -369,5 +410,5 @@ } func assertConnAddrForRoot(c *gc.C, conn *websocket.Conn, addr string) { - c.Assert(conn.RemoteAddr(), gc.Matches, "^wss://"+addr+"/$") + c.Assert(conn.RemoteAddr(), gc.Matches, "^wss://"+addr+"/api$") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/application/client.go juju-core-2.0.0/src/github.com/juju/juju/api/application/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/application/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/application/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -52,39 +52,49 @@ // ModelUUID returns the model UUID from the client connection. func (c *Client) ModelUUID() string { - tag, err := c.st.ModelTag() - if err != nil { - logger.Warningf("model tag not an model: %v", err) - return "" + tag, ok := c.st.ModelTag() + if !ok { + logger.Warningf("controller-only API connection has no model tag") } return tag.Id() } // DeployArgs holds the arguments to be sent to Client.ServiceDeploy. type DeployArgs struct { + // CharmID identifies the charm to deploy. CharmID charmstore.CharmID + // ApplicationName is the name to give the application. ApplicationName string + // Series to be used for the machine. Series string + // NumUnits is the number of units to deploy. NumUnits int + // ConfigYAML is a string that overrides the default config.yml. ConfigYAML string - // Cons contains constraints on where units of this application may be - // placed. + + // Cons contains constraints on where units of this application + // may be placed. Cons constraints.Value + // Placement directives on where the machines for the unit must be // created. Placement []*instance.Placement + // Storage contains Constraints specifying how storage should be // handled. Storage map[string]storage.Constraints + // EndpointBindings EndpointBindings map[string]string - // Collection of resource names for the application, with the value being the - // unique ID of a pre-uploaded resources in storage. + + // Collection of resource names for the application, with the + // value being the unique ID of a pre-uploaded resources in + // storage. Resources map[string]string } @@ -96,7 +106,7 @@ Applications: []params.ApplicationDeploy{{ ApplicationName: args.ApplicationName, Series: args.Series, - CharmUrl: args.CharmID.URL.String(), + CharmURL: args.CharmID.URL.String(), Channel: string(args.CharmID.Channel), NumUnits: args.NumUnits, ConfigYAML: args.ConfigYAML, @@ -111,9 +121,9 @@ var err error err = c.facade.FacadeCall("Deploy", deployArgs, &results) if err != nil { - return err + return errors.Trace(err) } - return results.OneError() + return errors.Trace(results.OneError()) } // GetCharmURL returns the charm URL the given service is @@ -123,10 +133,10 @@ args := params.ApplicationGet{ApplicationName: serviceName} err := c.facade.FacadeCall("GetCharmURL", args, result) if err != nil { - return nil, err + return nil, errors.Trace(err) } if result.Error != nil { - return nil, result.Error + return nil, errors.Trace(result.Error) } return charm.ParseURL(result.Result) } @@ -136,27 +146,69 @@ type SetCharmConfig struct { // ApplicationName is the name of the application to set the charm on. ApplicationName string + // CharmID identifies the charm. CharmID charmstore.CharmID + + // ConfigSettings is the charm settings to set during the upgrade. + // This field is only understood by Application facade version 2 + // and greater. + ConfigSettings map[string]string `json:"config-settings,omitempty"` + + // ConfigSettingsYAML is the charm settings in YAML format to set + // during the upgrade. If this is non-empty, it will take precedence + // over ConfigSettings. This field is only understood by Application + // facade version 2 + ConfigSettingsYAML string `json:"config-settings-yaml,omitempty"` + // ForceSeries forces the use of the charm even if it doesn't match the // series of the unit. ForceSeries bool + // ForceUnits forces the upgrade on units in an error state. ForceUnits bool + // ResourceIDs is a map of resource names to resource IDs to activate during // the upgrade. ResourceIDs map[string]string + + // StorageConstraints is a map of storage names to storage constraints to + // update during the upgrade. This field is only understood by Application + // facade version 2 and greater. + StorageConstraints map[string]storage.Constraints `json:"storage-constraints,omitempty"` } // SetCharm sets the charm for a given service. func (c *Client) SetCharm(cfg SetCharmConfig) error { + var storageConstraints map[string]params.StorageConstraints + if len(cfg.StorageConstraints) > 0 { + storageConstraints = make(map[string]params.StorageConstraints) + for name, cons := range cfg.StorageConstraints { + size, count := cons.Size, cons.Count + var sizePtr, countPtr *uint64 + if size > 0 { + sizePtr = &size + } + if count > 0 { + countPtr = &count + } + storageConstraints[name] = params.StorageConstraints{ + Pool: cons.Pool, + Size: sizePtr, + Count: countPtr, + } + } + } args := params.ApplicationSetCharm{ - ApplicationName: cfg.ApplicationName, - CharmUrl: cfg.CharmID.URL.String(), - Channel: string(cfg.CharmID.Channel), - ForceSeries: cfg.ForceSeries, - ForceUnits: cfg.ForceUnits, - ResourceIDs: cfg.ResourceIDs, + ApplicationName: cfg.ApplicationName, + CharmURL: cfg.CharmID.URL.String(), + Channel: string(cfg.CharmID.Channel), + ConfigSettings: cfg.ConfigSettings, + ConfigSettingsYAML: cfg.ConfigSettingsYAML, + ForceSeries: cfg.ForceSeries, + ForceUnits: cfg.ForceUnits, + ResourceIDs: cfg.ResourceIDs, + StorageConstraints: storageConstraints, } return c.facade.FacadeCall("SetCharm", args, nil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/application/client_test.go juju-core-2.0.0/src/github.com/juju/juju/api/application/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/application/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/application/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -84,7 +84,7 @@ args, ok := a.(params.ApplicationsDeploy) c.Assert(ok, jc.IsTrue) c.Assert(args.Applications, gc.HasLen, 1) - c.Assert(args.Applications[0].CharmUrl, gc.Equals, "cs:trusty/a-charm-1") + c.Assert(args.Applications[0].CharmURL, gc.Equals, "cs:trusty/a-charm-1") c.Assert(args.Applications[0].ApplicationName, gc.Equals, "serviceA") c.Assert(args.Applications[0].Series, gc.Equals, "series") c.Assert(args.Applications[0].NumUnits, gc.Equals, 2) @@ -140,15 +140,29 @@ func (s *serviceSuite) TestServiceSetCharm(c *gc.C) { var called bool + toUint64Ptr := func(v uint64) *uint64 { + return &v + } application.PatchFacadeCall(s, s.client, func(request string, a, response interface{}) error { called = true c.Assert(request, gc.Equals, "SetCharm") args, ok := a.(params.ApplicationSetCharm) c.Assert(ok, jc.IsTrue) c.Assert(args.ApplicationName, gc.Equals, "application") - c.Assert(args.CharmUrl, gc.Equals, "cs:trusty/application-1") + c.Assert(args.CharmURL, gc.Equals, "cs:trusty/application-1") + c.Assert(args.ConfigSettings, jc.DeepEquals, map[string]string{ + "a": "b", + "c": "d", + }) + c.Assert(args.ConfigSettingsYAML, gc.Equals, "yaml") c.Assert(args.ForceSeries, gc.Equals, true) c.Assert(args.ForceUnits, gc.Equals, true) + c.Assert(args.StorageConstraints, jc.DeepEquals, map[string]params.StorageConstraints{ + "a": {Pool: "radiant"}, + "b": {Count: toUint64Ptr(123)}, + "c": {Size: toUint64Ptr(123)}, + }) + return nil }) cfg := application.SetCharmConfig{ @@ -156,8 +170,18 @@ CharmID: charmstore.CharmID{ URL: charm.MustParseURL("trusty/application-1"), }, - ForceSeries: true, - ForceUnits: true, + ConfigSettings: map[string]string{ + "a": "b", + "c": "d", + }, + ConfigSettingsYAML: "yaml", + ForceSeries: true, + ForceUnits: true, + StorageConstraints: map[string]storage.Constraints{ + "a": {Pool: "radiant"}, + "b": {Count: 123}, + "c": {Size: 123}, + }, } err := s.client.SetCharm(cfg) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/authentication/package_test.go juju-core-2.0.0/src/github.com/juju/juju/api/authentication/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/authentication/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/authentication/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package authentication_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/authentication/visitor.go juju-core-2.0.0/src/github.com/juju/juju/api/authentication/visitor.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/authentication/visitor.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/authentication/visitor.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,64 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package authentication + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/juju/errors" + + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +const authMethod = "juju_userpass" + +// Visitor is a httpbakery.Visitor that will login directly +// to the Juju controller using password authentication. This +// only applies when logging in as a local user. +type Visitor struct { + username string + getPassword func(string) (string, error) +} + +// NewVisitor returns a new Visitor. +func NewVisitor(username string, getPassword func(string) (string, error)) *Visitor { + return &Visitor{ + username: username, + getPassword: getPassword, + } +} + +// VisitWebPage is part of the httpbakery.Visitor interface. +func (v *Visitor) VisitWebPage(client *httpbakery.Client, methodURLs map[string]*url.URL) error { + methodURL := methodURLs[authMethod] + if methodURL == nil { + return httpbakery.ErrMethodNotSupported + } + + password, err := v.getPassword(v.username) + if err != nil { + return err + } + + // POST to the URL with username and password. + resp, err := client.PostForm(methodURL.String(), url.Values{ + "user": {v.username}, + "password": {password}, + }) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + return nil + } + var jsonError httpbakery.Error + if err := json.NewDecoder(resp.Body).Decode(&jsonError); err != nil { + return errors.Annotate(err, "unmarshalling error") + } + return &jsonError +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/authentication/visitor_test.go juju-core-2.0.0/src/github.com/juju/juju/api/authentication/visitor_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/authentication/visitor_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/authentication/visitor_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,88 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package authentication_test + +import ( + "net/http" + "net/http/cookiejar" + "net/http/httptest" + "net/url" + + "github.com/juju/juju/api/authentication" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +type VisitorSuite struct { + testing.IsolationSuite + + jar *cookiejar.Jar + client *httpbakery.Client + server *httptest.Server + handler http.Handler +} + +var _ = gc.Suite(&VisitorSuite{}) + +func (s *VisitorSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + var err error + s.jar, err = cookiejar.New(nil) + c.Assert(err, jc.ErrorIsNil) + s.client = httpbakery.NewClient() + s.client.Jar = s.jar + s.handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) + s.server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + s.handler.ServeHTTP(w, r) + })) + s.AddCleanup(func(c *gc.C) { s.server.Close() }) +} + +func (s *VisitorSuite) TestVisitWebPage(c *gc.C) { + v := authentication.NewVisitor("bob", func(username string) (string, error) { + c.Assert(username, gc.Equals, "bob") + return "hunter2", nil + }) + var formUser, formPassword string + s.handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + formUser = r.Form.Get("user") + formPassword = r.Form.Get("password") + }) + err := v.VisitWebPage(s.client, map[string]*url.URL{ + "juju_userpass": mustParseURL(s.server.URL), + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(formUser, gc.Equals, "bob") + c.Assert(formPassword, gc.Equals, "hunter2") +} + +func (s *VisitorSuite) TestVisitWebPageMethodNotSupported(c *gc.C) { + v := authentication.NewVisitor("bob", nil) + err := v.VisitWebPage(s.client, map[string]*url.URL{}) + c.Assert(err, gc.Equals, httpbakery.ErrMethodNotSupported) +} + +func (s *VisitorSuite) TestVisitWebPageErrorResult(c *gc.C) { + v := authentication.NewVisitor("bob", func(username string) (string, error) { + return "hunter2", nil + }) + s.handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, `{"Message":"bleh"}`, http.StatusInternalServerError) + }) + err := v.VisitWebPage(s.client, map[string]*url.URL{ + "juju_userpass": mustParseURL(s.server.URL), + }) + c.Assert(err, gc.ErrorMatches, "bleh") +} + +func mustParseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + panic(err) + } + return u +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/backups/download_test.go juju-core-2.0.0/src/github.com/juju/juju/api/backups/download_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/backups/download_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/backups/download_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -46,7 +46,7 @@ func (s *downloadSuite) TestFailedRequest(c *gc.C) { resultArchive, err := s.client.Download("unknown") - c.Assert(err, gc.ErrorMatches, `GET https://.*/model/.*/backups: backup metadata "unknown" not found`) + c.Assert(err, gc.ErrorMatches, `.*backup metadata "unknown" not found$`) c.Assert(err, jc.Satisfies, params.IsCodeNotFound) c.Assert(resultArchive, gc.Equals, nil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/backups/upload_test.go juju-core-2.0.0/src/github.com/juju/juju/api/backups/upload_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/backups/upload_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/backups/upload_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -61,6 +61,6 @@ meta.Model = "" id, err := s.client.Upload(archive, meta) - c.Assert(err, gc.ErrorMatches, `PUT https://.*/model/.*/backups: while storing backup archive: missing Model`) + c.Assert(err, gc.ErrorMatches, `.*while storing backup archive: missing Model$`) c.Assert(id, gc.Equals, "") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/base/caller.go juju-core-2.0.0/src/github.com/juju/juju/api/base/caller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/base/caller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/base/caller.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,9 +26,9 @@ // client can use with the current API server. BestFacadeVersion(facade string) int - // ModelTag returns the tag of the model the client is - // connected to. - ModelTag() (names.ModelTag, error) + // ModelTag returns the tag of the model the client is connected + // to if there is one. It returns false for a controller-only connection. + ModelTag() (names.ModelTag, bool) // HTTPClient returns an httprequest.Client that can be used // to make HTTP requests to the API. URLs passed to the client diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/base/testing/apicaller.go juju-core-2.0.0/src/github.com/juju/juju/api/base/testing/apicaller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/base/testing/apicaller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/base/testing/apicaller.go 2016-10-13 14:31:49.000000000 +0000 @@ -32,8 +32,8 @@ return 0 } -func (APICallerFunc) ModelTag() (names.ModelTag, error) { - return coretesting.ModelTag, nil +func (APICallerFunc) ModelTag() (names.ModelTag, bool) { + return coretesting.ModelTag, true } func (APICallerFunc) Close() error { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/base/types.go juju-core-2.0.0/src/github.com/juju/juju/api/base/types.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/base/types.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/base/types.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,8 @@ import ( "time" - "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/instance" + "github.com/juju/juju/status" ) // UserModel holds information about a model and the last @@ -22,8 +23,54 @@ // ModelStatus holds information about the status of a juju model. type ModelStatus struct { UUID string - Life params.Life + Life string Owner string + TotalMachineCount int + CoreCount int HostedMachineCount int ServiceCount int + Machines []Machine +} + +// Machine holds information about a machine in a juju model. +type Machine struct { + Id string + InstanceId string + HasVote bool + WantsVote bool + Status string + Hardware *instance.HardwareCharacteristics +} + +// ModelInfo holds information about a model. +type ModelInfo struct { + Name string + UUID string + ControllerUUID string + ProviderType string + DefaultSeries string + Cloud string + CloudRegion string + CloudCredential string + Owner string + Life string + Status Status + Users []UserInfo + Machines []Machine +} + +// Status represents the status of a machine, application, or unit. +type Status struct { + Status status.Status + Info string + Data map[string]interface{} + Since *time.Time +} + +// UserInfo holds information about a user in a juju model. +type UserInfo struct { + UserName string + DisplayName string + LastConnection *time.Time + Access string } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/charms/client.go juju-core-2.0.0/src/github.com/juju/juju/api/charms/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/charms/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/charms/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -249,9 +249,14 @@ } return &charm.Metrics{ Metrics: convertCharmMetricMap(metrics.Metrics), + Plan: convertCharmPlan(metrics.Plan), } } +func convertCharmPlan(plan params.CharmPlan) *charm.Plan { + return &charm.Plan{Required: plan.Required} +} + func convertCharmMetricMap(metrics map[string]params.CharmMetric) map[string]charm.Metric { if len(metrics) == 0 { return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/client.go juju-core-2.0.0/src/github.com/juju/juju/api/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,6 +12,7 @@ "os" "strconv" "strings" + "time" "github.com/juju/errors" "github.com/juju/loggo" @@ -184,20 +185,14 @@ return c.facade.FacadeCall("SetModelConstraints", params, nil) } -// ModelInfo returns details about the Juju model. -func (c *Client) ModelInfo() (params.ModelInfo, error) { - var info params.ModelInfo - err := c.facade.FacadeCall("ModelInfo", nil, &info) - return info, err -} - -// ModelUUID returns the model UUID from the client connection. -func (c *Client) ModelUUID() (string, error) { - tag, err := c.st.ModelTag() - if err != nil { - return "", errors.Annotate(err, "model tag not an model") +// ModelUUID returns the model UUID from the client connection +// and reports whether it is valud. +func (c *Client) ModelUUID() (string, bool) { + tag, ok := c.st.ModelTag() + if !ok { + return "", false } - return tag.Id(), nil + return tag.Id(), true } // ModelUserInfo returns information on all users in the model. @@ -593,19 +588,19 @@ return attrs } -// WatchDebugLog returns a ReadCloser that the caller can read the log -// lines from. Only log lines that match the filtering specified in -// the DebugLogParams are returned. It returns an error that satisfies -// errors.IsNotImplemented when the API server does not support the -// end-point. -func (c *Client) WatchDebugLog(args DebugLogParams) (io.ReadCloser, error) { - // The websocket connection just hangs if the server doesn't have the log - // end point. So do a version check, as version was added at the same time - // as the remote end point. - _, err := c.AgentVersion() - if err != nil { - return nil, errors.NotSupportedf("WatchDebugLog") - } +// LogMessage is a structured logging entry. +type LogMessage struct { + Entity string + Timestamp time.Time + Severity string + Module string + Location string + Message string +} + +// WatchDebugLog returns a channel of structured Log Messages. Only log entries +// that match the filtering specified in the DebugLogParams are returned. +func (c *Client) WatchDebugLog(args DebugLogParams) (<-chan LogMessage, error) { // Prepare URL query attributes. attrs := args.URLQuery() @@ -613,5 +608,27 @@ if err != nil { return nil, errors.Trace(err) } - return connection, nil + + messages := make(chan LogMessage) + go func() { + defer close(messages) + + for { + var msg params.LogMessage + err := connection.ReadJSON(&msg) + if err != nil { + return + } + messages <- LogMessage{ + Entity: msg.Entity, + Timestamp: msg.Timestamp, + Severity: msg.Severity, + Module: msg.Module, + Location: msg.Location, + Message: msg.Message, + } + } + }() + + return messages, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/client_macaroon_test.go juju-core-2.0.0/src/github.com/juju/juju/api/client_macaroon_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/client_macaroon_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/client_macaroon_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,6 +12,7 @@ "github.com/juju/juju/api" apitesting "github.com/juju/juju/api/testing" + "github.com/juju/juju/permission" "github.com/juju/juju/testcharms" ) @@ -28,9 +29,11 @@ func (s *clientMacaroonSuite) SetUpTest(c *gc.C) { s.MacaroonSuite.SetUpTest(c) - s.AddModelUser(c, "testuser@somewhere") + const username = "testuser@somewhere" + s.AddModelUser(c, username) + s.AddControllerUser(c, username, permission.LoginAccess) s.cookieJar = apitesting.NewClearableCookieJar() - s.DischargerLogin = func() string { return "testuser@somewhere" } + s.DischargerLogin = func() string { return username } s.client = s.OpenAPI(c, nil, s.cookieJar).Client() // Even though we've logged into the API, we want @@ -76,5 +79,5 @@ ) // Upload an archive with its original revision. _, err := s.client.AddLocalCharm(curl, charmArchive) - c.Assert(err, gc.ErrorMatches, `POST https://.+: invalid entity name or password`) + c.Assert(err, gc.ErrorMatches, `.*invalid entity name or password$`) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/client_test.go juju-core-2.0.0/src/github.com/juju/juju/api/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,6 @@ package api_test import ( - "bufio" "bytes" "fmt" "io" @@ -54,8 +53,8 @@ c.Assert(client.Close(), gc.IsNil) } -func (s *clientSuite) TestUploadToolsOtherEnvironment(c *gc.C) { - otherSt, otherAPISt := s.otherEnviron(c) +func (s *clientSuite) TestUploadToolsOtherModel(c *gc.C) { + otherSt, otherAPISt := s.otherModel(c) defer otherSt.Close() defer otherAPISt.Close() client := otherAPISt.Client() @@ -118,13 +117,13 @@ c.Assert(savedURL.String(), gc.Equals, curl.WithRevision(43).String()) } -func (s *clientSuite) TestAddLocalCharmOtherEnvironment(c *gc.C) { +func (s *clientSuite) TestAddLocalCharmOtherModel(c *gc.C) { charmArchive := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") curl := charm.MustParseURL( fmt.Sprintf("local:quantal/%s-%d", charmArchive.Meta().Name, charmArchive.Revision()), ) - otherSt, otherAPISt := s.otherEnviron(c) + otherSt, otherAPISt := s.otherModel(c) defer otherSt.Close() defer otherAPISt.Close() client := otherAPISt.Client() @@ -139,7 +138,7 @@ c.Assert(charm.String(), gc.Equals, curl.String()) } -func (s *clientSuite) otherEnviron(c *gc.C) (*state.State, api.Connection) { +func (s *clientSuite) otherModel(c *gc.C) (*state.State, api.Connection) { otherSt := s.Factory.MakeModel(c, nil) info := s.APIInfo(c) info.ModelTag = otherSt.ModelTag() @@ -168,7 +167,7 @@ ) _, err := client.AddLocalCharm(curl, charmArchive) - c.Assert(err, gc.ErrorMatches, `POST http://.+: the POST method is not allowed`) + c.Assert(err, gc.ErrorMatches, `.*the POST method is not allowed$`) } func (s *clientSuite) TestMinVersionLocalCharm(c *gc.C) { @@ -253,7 +252,7 @@ func (s *clientSuite) TestOpenURIError(c *gc.C) { client := s.APIState.Client() _, err := client.OpenURI("/tools/foobar", nil) - c.Assert(err, gc.ErrorMatches, ".+error parsing version.+") + c.Assert(err, gc.ErrorMatches, ".*error parsing version.+") } func (s *clientSuite) TestOpenCharmFound(c *gc.C) { @@ -277,7 +276,7 @@ _, err := client.OpenCharm(curl) - c.Check(err, gc.ErrorMatches, `.*unable to retrieve and save the charm: cannot get charm from state: charm "cs:quantal/spam-3" not found`) + c.Check(err, gc.ErrorMatches, `.*cannot get charm from state: charm "cs:quantal/spam-3" not found`) } func addLocalCharm(c *gc.C, client *api.Client, name string) (*charm.URL, *charm.CharmArchive) { @@ -307,22 +306,22 @@ // envEndpoint returns "/model//" func envEndpoint(c *gc.C, apiState api.Connection, destination string) string { - modelTag, err := apiState.ModelTag() - c.Assert(err, jc.ErrorIsNil) + modelTag, ok := apiState.ModelTag() + c.Assert(ok, jc.IsTrue) return path.Join("/model", modelTag.Id(), destination) } -func (s *clientSuite) TestClientEnvironmentUUID(c *gc.C) { - environ, err := s.State.Model() +func (s *clientSuite) TestClientModelUUID(c *gc.C) { + model, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) client := s.APIState.Client() - uuid, err := client.ModelUUID() - c.Assert(err, jc.ErrorIsNil) - c.Assert(uuid, gc.Equals, environ.Tag().Id()) + uuid, ok := client.ModelUUID() + c.Assert(ok, jc.IsTrue) + c.Assert(uuid, gc.Equals, model.Tag().Id()) } -func (s *clientSuite) TestClientEnvironmentUsers(c *gc.C) { +func (s *clientSuite) TestClientModelUsers(c *gc.C) { client := s.APIState.Client() cleanup := api.PatchClientFacadeCall(client, func(request string, paramsIn interface{}, response interface{}) error { @@ -357,15 +356,14 @@ // Use the no tail option so we don't try to start a tailing cursor // on the oplog when there is no oplog configured in mongo as the tests // don't set up mongo in replicaset mode. - reader, err := client.WatchDebugLog(api.DebugLogParams{NoTail: true}) + messages, err := client.WatchDebugLog(api.DebugLogParams{NoTail: true}) c.Assert(err, jc.ErrorIsNil) - c.Assert(reader, gc.NotNil) - reader.Close() + c.Assert(messages, gc.NotNil) } func (s *clientSuite) TestConnectStreamRequiresSlashPathPrefix(c *gc.C) { reader, err := s.APIState.ConnectStream("foo", nil) - c.Assert(err, gc.ErrorMatches, `path must start with "/"`) + c.Assert(err, gc.ErrorMatches, `cannot make API path from non-slash-prefixed path "foo"`) c.Assert(reader, gc.Equals, nil) } @@ -407,7 +405,8 @@ } func (s *clientSuite) TestWatchDebugLogParamsEncoded(c *gc.C) { - s.PatchValue(api.WebsocketDialConfig, echoURL(c)) + catcher := urlCatcher{} + s.PatchValue(api.WebsocketDialConfig, catcher.recordLocation) params := api.DebugLogParams{ IncludeEntity: []string{"a", "b"}, @@ -422,10 +421,10 @@ } client := s.APIState.Client() - reader, err := client.WatchDebugLog(params) + _, err := client.WatchDebugLog(params) c.Assert(err, jc.ErrorIsNil) - connectURL := connectURLFromReader(c, reader) + connectURL := catcher.location values := connectURL.Query() c.Assert(values, jc.DeepEquals, url.Values{ "includeEntity": params.IncludeEntity, @@ -441,28 +440,28 @@ } func (s *clientSuite) TestConnectStreamAtUUIDPath(c *gc.C) { - s.PatchValue(api.WebsocketDialConfig, echoURL(c)) - // If the server supports it, we should log at "/model/UUID/log" - environ, err := s.State.Model() + catcher := urlCatcher{} + s.PatchValue(api.WebsocketDialConfig, catcher.recordLocation) + model, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) info := s.APIInfo(c) - info.ModelTag = environ.ModelTag() + info.ModelTag = model.ModelTag() apistate, err := api.Open(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) defer apistate.Close() - reader, err := apistate.ConnectStream("/path", nil) + _, err = apistate.ConnectStream("/path", nil) c.Assert(err, jc.ErrorIsNil) - connectURL := connectURLFromReader(c, reader) - c.Assert(connectURL.Path, gc.Matches, fmt.Sprintf("/model/%s/path", environ.UUID())) + connectURL := catcher.location + c.Assert(connectURL.Path, gc.Matches, fmt.Sprintf("/model/%s/path", model.UUID())) } func (s *clientSuite) TestOpenUsesModelUUIDPaths(c *gc.C) { info := s.APIInfo(c) // Passing in the correct model UUID should work - environ, err := s.State.Model() + model, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) - info.ModelTag = environ.ModelTag() + info.ModelTag = model.ModelTag() apistate, err := api.Open(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) apistate.Close() @@ -529,25 +528,17 @@ return 0, r.err } -func echoURL(c *gc.C) func(*websocket.Config) (base.Stream, error) { - return func(config *websocket.Config) (base.Stream, error) { - pr, pw := io.Pipe() - go func() { - fmt.Fprintf(pw, "null\n") - fmt.Fprintf(pw, "%s\n", config.Location) - }() - return fakeStreamReader{pr}, nil - } +type urlCatcher struct { + location *url.URL } -func connectURLFromReader(c *gc.C, rc io.ReadCloser) *url.URL { - bufReader := bufio.NewReader(rc) - location, err := bufReader.ReadString('\n') - c.Assert(err, jc.ErrorIsNil) - connectURL, err := url.Parse(strings.TrimSpace(location)) - c.Assert(err, jc.ErrorIsNil) - rc.Close() - return connectURL +func (u *urlCatcher) recordLocation(config *websocket.Config) (base.Stream, error) { + u.location = config.Location + pr, pw := io.Pipe() + go func() { + fmt.Fprintf(pw, "null\n") + }() + return fakeStreamReader{pr}, nil } type fakeStreamReader struct { @@ -562,13 +553,13 @@ } func (s fakeStreamReader) Write([]byte) (int, error) { - panic("not implemented") + return 0, errors.NotImplementedf("Write") } func (s fakeStreamReader) ReadJSON(v interface{}) error { - panic("not implemented") + return errors.NotImplementedf("ReadJSON") } func (s fakeStreamReader) WriteJSON(v interface{}) error { - panic("not implemented") + return errors.NotImplementedf("WriteJSON") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/cloud/cloud.go juju-core-2.0.0/src/github.com/juju/juju/api/cloud/cloud.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/cloud/cloud.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/cloud/cloud.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,6 +26,23 @@ return &Client{ClientFacade: frontend, facade: backend} } +// Clouds returns the details of all clouds supported by the controller. +func (c *Client) Clouds() (map[names.CloudTag]jujucloud.Cloud, error) { + var result params.CloudsResult + if err := c.facade.FacadeCall("Clouds", nil, &result); err != nil { + return nil, errors.Trace(err) + } + clouds := make(map[names.CloudTag]jujucloud.Cloud) + for tagString, cloud := range result.Clouds { + tag, err := names.ParseCloudTag(tagString) + if err != nil { + return nil, errors.Trace(err) + } + clouds[tag] = cloudFromParams(cloud) + } + return clouds, nil +} + // Cloud returns the details of the cloud with the given tag. func (c *Client) Cloud(tag names.CloudTag) (jujucloud.Cloud, error) { var results params.CloudResults @@ -39,61 +56,58 @@ if results.Results[0].Error != nil { return jujucloud.Cloud{}, results.Results[0].Error } - result := results.Results[0].Cloud - authTypes := make([]jujucloud.AuthType, len(result.AuthTypes)) - for i, authType := range result.AuthTypes { + return cloudFromParams(*results.Results[0].Cloud), nil +} + +func cloudFromParams(p params.Cloud) jujucloud.Cloud { + authTypes := make([]jujucloud.AuthType, len(p.AuthTypes)) + for i, authType := range p.AuthTypes { authTypes[i] = jujucloud.AuthType(authType) } - regions := make([]jujucloud.Region, len(result.Regions)) - for i, region := range result.Regions { + regions := make([]jujucloud.Region, len(p.Regions)) + for i, region := range p.Regions { regions[i] = jujucloud.Region{ - Name: region.Name, - Endpoint: region.Endpoint, - StorageEndpoint: region.StorageEndpoint, + Name: region.Name, + Endpoint: region.Endpoint, + IdentityEndpoint: region.IdentityEndpoint, + StorageEndpoint: region.StorageEndpoint, } } return jujucloud.Cloud{ - Type: result.Type, - AuthTypes: authTypes, - Endpoint: result.Endpoint, - StorageEndpoint: result.StorageEndpoint, - Regions: regions, - }, nil + Type: p.Type, + AuthTypes: authTypes, + Endpoint: p.Endpoint, + IdentityEndpoint: p.IdentityEndpoint, + StorageEndpoint: p.StorageEndpoint, + Regions: regions, + } } -// CloudDefaults returns the cloud defaults for the given users. -func (c *Client) CloudDefaults(user names.UserTag) (jujucloud.Defaults, error) { - var results params.CloudDefaultsResults - args := params.Entities{[]params.Entity{{user.String()}}} - if err := c.facade.FacadeCall("CloudDefaults", args, &results); err != nil { - return jujucloud.Defaults{}, errors.Trace(err) +// DefaultCloud returns the tag of the cloud that models will be +// created in by default. +func (c *Client) DefaultCloud() (names.CloudTag, error) { + var result params.StringResult + if err := c.facade.FacadeCall("DefaultCloud", nil, &result); err != nil { + return names.CloudTag{}, errors.Trace(err) } - if len(results.Results) != 1 { - return jujucloud.Defaults{}, errors.Errorf("expected 1 result, got %d", len(results.Results)) + if result.Error != nil { + return names.CloudTag{}, result.Error } - if results.Results[0].Error != nil { - return jujucloud.Defaults{}, results.Results[0].Error - } - result := results.Results[0].Result - cloudTag, err := names.ParseCloudTag(result.CloudTag) + cloudTag, err := names.ParseCloudTag(result.Result) if err != nil { - return jujucloud.Defaults{}, errors.Trace(err) + return names.CloudTag{}, errors.Trace(err) } - return jujucloud.Defaults{ - Cloud: cloudTag.Id(), - Region: result.CloudRegion, - Credential: result.CloudCredential, - }, nil + return cloudTag, nil } -// Credentials returns the cloud credentials for the user and cloud with -// the given tags. -func (c *Client) Credentials(user names.UserTag, cloud names.CloudTag) (map[string]jujucloud.Credential, error) { - var results params.CloudCredentialsResults +// UserCredentials returns the tags for cloud credentials available to a user for +// use with a specific cloud. +func (c *Client) UserCredentials(user names.UserTag, cloud names.CloudTag) ([]names.CloudCredentialTag, error) { + var results params.StringsResults args := params.UserClouds{[]params.UserCloud{ {UserTag: user.String(), CloudTag: cloud.String()}, }} - if err := c.facade.FacadeCall("Credentials", args, &results); err != nil { + if err := c.facade.FacadeCall("UserCredentials", args, &results); err != nil { return nil, errors.Trace(err) } if len(results.Results) != 1 { @@ -102,41 +116,64 @@ if results.Results[0].Error != nil { return nil, results.Results[0].Error } - credentials := make(map[string]jujucloud.Credential) - for name, credential := range results.Results[0].Credentials { - credentials[name] = jujucloud.NewCredential( - jujucloud.AuthType(credential.AuthType), - credential.Attributes, - ) - } - return credentials, nil + tags := make([]names.CloudCredentialTag, len(results.Results[0].Result)) + for i, s := range results.Results[0].Result { + tag, err := names.ParseCloudCredentialTag(s) + if err != nil { + return nil, errors.Trace(err) + } + tags[i] = tag + } + return tags, nil } -// UpdateCredentials updates the cloud credentials for the user and cloud with -// the given tags. Exiting credentials that are not named in the map will be -// untouched. -func (c *Client) UpdateCredentials(user names.UserTag, cloud names.CloudTag, credentials map[string]jujucloud.Credential) error { +// UpdateCredential updates a cloud credentials. +func (c *Client) UpdateCredential(tag names.CloudCredentialTag, credential jujucloud.Credential) error { var results params.ErrorResults - paramsCredentials := make(map[string]params.CloudCredential) - for name, credential := range credentials { - paramsCredentials[name] = params.CloudCredential{ - AuthType: string(credential.AuthType()), - Attributes: credential.Attributes(), - } + args := params.UpdateCloudCredentials{ + Credentials: []params.UpdateCloudCredential{{ + Tag: tag.String(), + Credential: params.CloudCredential{ + AuthType: string(credential.AuthType()), + Attributes: credential.Attributes(), + }, + }}, } - args := params.UsersCloudCredentials{[]params.UserCloudCredentials{{ - UserTag: user.String(), - CloudTag: cloud.String(), - Credentials: paramsCredentials, - }}} if err := c.facade.FacadeCall("UpdateCredentials", args, &results); err != nil { return errors.Trace(err) } - if len(results.Results) != 1 { - return errors.Errorf("expected 1 result, got %d", len(results.Results)) + return results.OneError() +} + +// RevokeCredential revokes/deletes a cloud credential. +func (c *Client) RevokeCredential(tag names.CloudCredentialTag) error { + var results params.ErrorResults + args := params.Entities{ + Entities: []params.Entity{{ + Tag: tag.String(), + }}, } - if results.Results[0].Error != nil { - return results.Results[0].Error + if err := c.facade.FacadeCall("RevokeCredentials", args, &results); err != nil { + return errors.Trace(err) + } + return results.OneError() +} + +// Credentials return a slice of credential values for the specified tags. +// Secrets are excluded from the credential attributes. +func (c *Client) Credentials(tags ...names.CloudCredentialTag) ([]params.CloudCredentialResult, error) { + if len(tags) == 0 { + return []params.CloudCredentialResult{}, nil + } + var results params.CloudCredentialResults + args := params.Entities{ + Entities: make([]params.Entity, len(tags)), + } + for i, tag := range tags { + args.Entities[i].Tag = tag.String() + } + if err := c.facade.FacadeCall("Credential", args, &results); err != nil { + return nil, errors.Trace(err) } - return nil + return results.Results, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/cloud/cloud_test.go juju-core-2.0.0/src/github.com/juju/juju/api/cloud/cloud_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/cloud/cloud_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/cloud/cloud_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -32,7 +32,7 @@ c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "Cloud") c.Check(a, jc.DeepEquals, params.Entities{ - []params.Entity{{"cloud-foo"}}, + Entities: []params.Entity{{Tag: "cloud-foo"}}, }) c.Assert(result, gc.FitsTypeOf, ¶ms.CloudResults{}) results := result.(*params.CloudResults) @@ -57,43 +57,49 @@ }) } -func (s *cloudSuite) TestCloudDefaults(c *gc.C) { +func (s *cloudSuite) TestClouds(c *gc.C) { apiCaller := basetesting.APICallerFunc( func(objType string, version int, id, request string, - a, result interface{}, + a, result_ interface{}, ) error { c.Check(objType, gc.Equals, "Cloud") c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "CloudDefaults") - c.Check(a, jc.DeepEquals, params.Entities{ - []params.Entity{{"user-bob"}}, - }) - c.Assert(result, gc.FitsTypeOf, ¶ms.CloudDefaultsResults{}) - results := result.(*params.CloudDefaultsResults) - results.Results = append(results.Results, params.CloudDefaultsResult{ - Result: ¶ms.CloudDefaults{ - CloudTag: "cloud-foo", - CloudRegion: "some-region", - CloudCredential: "some-credential", + c.Check(request, gc.Equals, "Clouds") + c.Check(a, gc.IsNil) + c.Assert(result_, gc.FitsTypeOf, ¶ms.CloudsResult{}) + result := result_.(*params.CloudsResult) + result.Clouds = map[string]params.Cloud{ + "cloud-foo": { + Type: "bar", }, - }) + "cloud-baz": { + Type: "qux", + AuthTypes: []string{"empty", "userpass"}, + Regions: []params.CloudRegion{{Name: "nether", Endpoint: "endpoint"}}, + }, + } return nil }, ) client := cloudapi.NewClient(apiCaller) - result, err := client.CloudDefaults(names.NewUserTag("bob")) + clouds, err := client.Clouds() c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.DeepEquals, cloud.Defaults{ - Cloud: "foo", - Region: "some-region", - Credential: "some-credential", + c.Assert(clouds, jc.DeepEquals, map[names.CloudTag]cloud.Cloud{ + names.NewCloudTag("foo"): { + Type: "bar", + }, + names.NewCloudTag("baz"): { + Type: "qux", + AuthTypes: []cloud.AuthType{cloud.EmptyAuthType, cloud.UserPassAuthType}, + Regions: []cloud.Region{{Name: "nether", Endpoint: "endpoint"}}, + }, }) } -func (s *cloudSuite) TestCredentials(c *gc.C) { +func (s *cloudSuite) TestDefaultCloud(c *gc.C) { apiCaller := basetesting.APICallerFunc( func(objType string, version int, @@ -102,25 +108,40 @@ ) error { c.Check(objType, gc.Equals, "Cloud") c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "Credentials") - c.Assert(result, gc.FitsTypeOf, ¶ms.CloudCredentialsResults{}) - c.Assert(a, jc.DeepEquals, params.UserClouds{[]params.UserCloud{{ - UserTag: "user-bob@local", + c.Check(request, gc.Equals, "DefaultCloud") + c.Assert(result, gc.FitsTypeOf, ¶ms.StringResult{}) + results := result.(*params.StringResult) + results.Result = "cloud-foo" + return nil + }, + ) + + client := cloudapi.NewClient(apiCaller) + result, err := client.DefaultCloud() + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, names.NewCloudTag("foo")) +} + +func (s *cloudSuite) TestUserCredentials(c *gc.C) { + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + c.Check(objType, gc.Equals, "Cloud") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "UserCredentials") + c.Assert(result, gc.FitsTypeOf, ¶ms.StringsResults{}) + c.Assert(a, jc.DeepEquals, params.UserClouds{UserClouds: []params.UserCloud{{ + UserTag: "user-bob", CloudTag: "cloud-foo", }}}) - *result.(*params.CloudCredentialsResults) = params.CloudCredentialsResults{ - Results: []params.CloudCredentialsResult{{ - Credentials: map[string]params.CloudCredential{ - "one": { - AuthType: "empty", - }, - "two": { - AuthType: "userpass", - Attributes: map[string]string{ - "username": "admin", - "password": "adm1n", - }, - }, + *result.(*params.StringsResults) = params.StringsResults{ + Results: []params.StringsResult{{ + Result: []string{ + "cloudcred-foo_bob_one", + "cloudcred-foo_bob_two", }, }}, } @@ -129,14 +150,11 @@ ) client := cloudapi.NewClient(apiCaller) - result, err := client.Credentials(names.NewUserTag("bob@local"), names.NewCloudTag("foo")) + result, err := client.UserCredentials(names.NewUserTag("bob"), names.NewCloudTag("foo")) c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.DeepEquals, map[string]cloud.Credential{ - "one": cloud.NewEmptyCredential(), - "two": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ - "username": "admin", - "password": "adm1n", - }), + c.Assert(result, jc.SameContents, []names.CloudCredentialTag{ + names.NewCloudCredentialTag("foo/bob/one"), + names.NewCloudCredentialTag("foo/bob/two"), }) } @@ -152,19 +170,13 @@ c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "UpdateCredentials") c.Assert(result, gc.FitsTypeOf, ¶ms.ErrorResults{}) - c.Assert(a, jc.DeepEquals, params.UsersCloudCredentials{[]params.UserCloudCredentials{{ - UserTag: "user-bob@local", - CloudTag: "cloud-foo", - Credentials: map[string]params.CloudCredential{ - "one": { - AuthType: "empty", - }, - "two": { - AuthType: "userpass", - Attributes: map[string]string{ - "username": "admin", - "password": "adm1n", - }, + c.Assert(a, jc.DeepEquals, params.UpdateCloudCredentials{Credentials: []params.UpdateCloudCredential{{ + Tag: "cloudcred-foo_bob_bar", + Credential: params.CloudCredential{ + AuthType: "userpass", + Attributes: map[string]string{ + "username": "admin", + "password": "adm1n", }, }, }}}) @@ -177,13 +189,97 @@ ) client := cloudapi.NewClient(apiCaller) - err := client.UpdateCredentials(names.NewUserTag("bob@local"), names.NewCloudTag("foo"), map[string]cloud.Credential{ - "one": cloud.NewEmptyCredential(), - "two": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ - "username": "admin", - "password": "adm1n", - }), - }) + tag := names.NewCloudCredentialTag("foo/bob/bar") + err := client.UpdateCredential(tag, cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ + "username": "admin", + "password": "adm1n", + })) + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *cloudSuite) TestRevokeCredential(c *gc.C) { + var called bool + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + c.Check(objType, gc.Equals, "Cloud") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "RevokeCredentials") + c.Assert(result, gc.FitsTypeOf, ¶ms.ErrorResults{}) + c.Assert(a, jc.DeepEquals, params.Entities{Entities: []params.Entity{{ + Tag: "cloudcred-foo_bob_bar", + }}}) + *result.(*params.ErrorResults) = params.ErrorResults{ + Results: []params.ErrorResult{{}}, + } + called = true + return nil + }, + ) + + client := cloudapi.NewClient(apiCaller) + tag := names.NewCloudCredentialTag("foo/bob/bar") + err := client.RevokeCredential(tag) c.Assert(err, jc.ErrorIsNil) c.Assert(called, jc.IsTrue) } + +func (s *cloudSuite) TestCredentials(c *gc.C) { + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + c.Check(objType, gc.Equals, "Cloud") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "Credential") + c.Assert(result, gc.FitsTypeOf, ¶ms.CloudCredentialResults{}) + c.Assert(a, jc.DeepEquals, params.Entities{Entities: []params.Entity{{ + Tag: "cloudcred-foo_bob_bar", + }}}) + *result.(*params.CloudCredentialResults) = params.CloudCredentialResults{ + Results: []params.CloudCredentialResult{ + { + Result: ¶ms.CloudCredential{ + AuthType: "userpass", + Attributes: map[string]string{"username": "fred"}, + Redacted: []string{"password"}, + }, + }, { + Result: ¶ms.CloudCredential{ + AuthType: "userpass", + Attributes: map[string]string{"username": "mary"}, + Redacted: []string{"password"}, + }, + }, + }, + } + return nil + }, + ) + + client := cloudapi.NewClient(apiCaller) + tag := names.NewCloudCredentialTag("foo/bob/bar") + result, err := client.Credentials(tag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, []params.CloudCredentialResult{ + { + Result: ¶ms.CloudCredential{ + AuthType: "userpass", + Attributes: map[string]string{"username": "fred"}, + Redacted: []string{"password"}, + }, + }, { + Result: ¶ms.CloudCredential{ + AuthType: "userpass", + Attributes: map[string]string{"username": "mary"}, + Redacted: []string{"password"}, + }, + }, + }) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/common/cloudspec/cloudspec.go juju-core-2.0.0/src/github.com/juju/juju/api/common/cloudspec/cloudspec.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/common/cloudspec/cloudspec.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/common/cloudspec/cloudspec.go 2016-10-13 14:31:49.000000000 +0000 @@ -41,21 +41,31 @@ if result.Error != nil { return environs.CloudSpec{}, errors.Annotate(result.Error, "API request failed") } + return api.MakeCloudSpec(result.Result) +} + +// MakeCloudSpec creates an environs.CloudSpec from a params.CloudSpec +// that has been returned from the apiserver. +func (api *CloudSpecAPI) MakeCloudSpec(pSpec *params.CloudSpec) (environs.CloudSpec, error) { + if pSpec == nil { + return environs.CloudSpec{}, errors.NotValidf("nil value") + } var credential *cloud.Credential - if result.Result.Credential != nil { + if pSpec.Credential != nil { credentialValue := cloud.NewCredential( - cloud.AuthType(result.Result.Credential.AuthType), - result.Result.Credential.Attributes, + cloud.AuthType(pSpec.Credential.AuthType), + pSpec.Credential.Attributes, ) credential = &credentialValue } spec := environs.CloudSpec{ - Type: result.Result.Type, - Name: result.Result.Name, - Region: result.Result.Region, - Endpoint: result.Result.Endpoint, - StorageEndpoint: result.Result.StorageEndpoint, - Credential: credential, + Type: pSpec.Type, + Name: pSpec.Name, + Region: pSpec.Region, + Endpoint: pSpec.Endpoint, + IdentityEndpoint: pSpec.IdentityEndpoint, + StorageEndpoint: pSpec.StorageEndpoint, + Credential: credential, } if err := spec.Validate(); err != nil { return environs.CloudSpec{}, errors.Annotate(err, "validating CloudSpec") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/common/cloudspec/cloudspec_test.go juju-core-2.0.0/src/github.com/juju/juju/api/common/cloudspec/cloudspec_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/common/cloudspec/cloudspec_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/common/cloudspec/cloudspec_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -39,11 +39,12 @@ *(response.(*params.CloudSpecResults)) = params.CloudSpecResults{ []params.CloudSpecResult{{ Result: ¶ms.CloudSpec{ - Type: "type", - Name: "name", - Region: "region", - Endpoint: "endpoint", - StorageEndpoint: "storage-endpoint", + Type: "type", + Name: "name", + Region: "region", + Endpoint: "endpoint", + IdentityEndpoint: "identity-endpoint", + StorageEndpoint: "storage-endpoint", Credential: ¶ms.CloudCredential{ AuthType: "auth-type", Attributes: map[string]string{"k": "v"}, @@ -62,12 +63,13 @@ map[string]string{"k": "v"}, ) c.Assert(cloudSpec, jc.DeepEquals, environs.CloudSpec{ - Type: "type", - Name: "name", - Region: "region", - Endpoint: "endpoint", - StorageEndpoint: "storage-endpoint", - Credential: &credential, + Type: "type", + Name: "name", + Region: "region", + Endpoint: "endpoint", + IdentityEndpoint: "identity-endpoint", + StorageEndpoint: "storage-endpoint", + Credential: &credential, }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/common/modelstatus.go juju-core-2.0.0/src/github.com/juju/juju/api/common/modelstatus.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/common/modelstatus.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/common/modelstatus.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,74 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common + +import ( + "github.com/juju/errors" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/params" +) + +// ModelStatusAPI provides common client-side API functions +// to call into apiserver.common.ModelStatusAPI. +type ModelStatusAPI struct { + facade base.FacadeCaller +} + +// NewModelStatusAPI creates a ModelStatusAPI on the specified facade, +// and uses this name when calling through the caller. +func NewModelStatusAPI(facade base.FacadeCaller) *ModelStatusAPI { + return &ModelStatusAPI{facade} +} + +// ModelStatus returns a status summary for each model tag passed in. +func (c *ModelStatusAPI) ModelStatus(tags ...names.ModelTag) ([]base.ModelStatus, error) { + result := params.ModelStatusResults{} + models := make([]params.Entity, len(tags)) + for i, tag := range tags { + models[i] = params.Entity{Tag: tag.String()} + } + req := params.Entities{ + Entities: models, + } + if err := c.facade.FacadeCall("ModelStatus", req, &result); err != nil { + return nil, err + } + + results := make([]base.ModelStatus, len(result.Results)) + for i, r := range result.Results { + model, err := names.ParseModelTag(r.ModelTag) + if err != nil { + return nil, errors.Annotatef(err, "ModelTag %q at position %d", r.ModelTag, i) + } + owner, err := names.ParseUserTag(r.OwnerTag) + if err != nil { + return nil, errors.Annotatef(err, "OwnerTag %q at position %d", r.OwnerTag, i) + } + + results[i] = base.ModelStatus{ + UUID: model.Id(), + Life: string(r.Life), + Owner: owner.Id(), + HostedMachineCount: r.HostedMachineCount, + ServiceCount: r.ApplicationCount, + TotalMachineCount: len(r.Machines), + } + results[i].Machines = make([]base.Machine, len(r.Machines)) + for j, mm := range r.Machines { + if mm.Hardware != nil && mm.Hardware.Cores != nil { + results[i].CoreCount += int(*mm.Hardware.Cores) + } + results[i].Machines[j] = base.Machine{ + Id: mm.Id, + InstanceId: mm.InstanceId, + HasVote: mm.HasVote, + WantsVote: mm.WantsVote, + Status: mm.Status, + } + } + } + return results, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/controller.go juju-core-2.0.0/src/github.com/juju/juju/api/controller/controller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/controller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/controller/controller.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,14 +4,19 @@ package controller import ( + "encoding/json" + "github.com/juju/errors" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" "github.com/juju/juju/api" "github.com/juju/juju/api/base" "github.com/juju/juju/api/common" "github.com/juju/juju/api/common/cloudspec" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/permission" ) // Client provides methods that the Juju client command uses to interact @@ -20,6 +25,7 @@ base.ClientFacade facade base.FacadeCaller *common.ControllerConfigAPI + *common.ModelStatusAPI *cloudspec.CloudSpecAPI } @@ -31,6 +37,7 @@ ClientFacade: frontend, facade: backend, ControllerConfigAPI: common.NewControllerConfig(backend), + ModelStatusAPI: common.NewModelStatusAPI(backend), CloudSpecAPI: cloudspec.NewCloudSpecAPI(backend), } } @@ -52,7 +59,7 @@ result[i] = base.UserModel{ Name: model.Name, UUID: model.UUID, - Owner: owner.Canonical(), + Owner: owner.Id(), LastConnection: model.LastConnection, } } @@ -71,6 +78,51 @@ return values, err } +// HostedConfig contains the model config and the cloud spec for that +// model such that direct access to the provider can be used. +type HostedConfig struct { + Name string + Owner names.UserTag + Config map[string]interface{} + CloudSpec environs.CloudSpec + Error error +} + +// HostedModelsConfig returns all model settings for the +// controller model. +func (c *Client) HostedModelConfigs() ([]HostedConfig, error) { + result := params.HostedModelConfigsResults{} + err := c.facade.FacadeCall("HostedModelConfigs", nil, &result) + if err != nil { + return nil, errors.Trace(err) + } + // If we get to here, we have some values. Each value may or + // may not have an error, but it should at least have a name + // and owner. + hostedConfigs := make([]HostedConfig, len(result.Models)) + for i, modelConfig := range result.Models { + hostedConfigs[i].Name = modelConfig.Name + tag, err := names.ParseUserTag(modelConfig.OwnerTag) + if err != nil { + hostedConfigs[i].Error = errors.Trace(err) + continue + } + hostedConfigs[i].Owner = tag + if modelConfig.Error != nil { + hostedConfigs[i].Error = errors.Trace(modelConfig.Error) + continue + } + hostedConfigs[i].Config = modelConfig.Config + spec, err := c.MakeCloudSpec(modelConfig.CloudSpec) + if err != nil { + hostedConfigs[i].Error = errors.Trace(err) + continue + } + hostedConfigs[i].CloudSpec = spec + } + return hostedConfigs, err +} + // DestroyController puts the controller model into a "dying" state, // and removes all non-manager machine instances. Underlying DestroyModel // calls will fail if there are any manually-provisioned non-manager machines @@ -106,57 +158,79 @@ return api.NewAllModelWatcher(c.facade.RawAPICaller(), &info.AllWatcherId), nil } -// ModelStatus returns a status summary for each model tag passed in. -func (c *Client) ModelStatus(tags ...names.ModelTag) ([]base.ModelStatus, error) { - result := params.ModelStatusResults{} - models := make([]params.Entity, len(tags)) - for i, tag := range tags { - models[i] = params.Entity{Tag: tag.String()} +// GrantController grants a user access to the controller. +func (c *Client) GrantController(user, access string) error { + return c.modifyControllerUser(params.GrantControllerAccess, user, access) +} + +// RevokeController revokes a user's access to the controller. +func (c *Client) RevokeController(user, access string) error { + return c.modifyControllerUser(params.RevokeControllerAccess, user, access) +} + +func (c *Client) modifyControllerUser(action params.ControllerAction, user, access string) error { + var args params.ModifyControllerAccessRequest + + if !names.IsValidUser(user) { + return errors.Errorf("invalid username: %q", user) } - req := params.Entities{ - Entities: models, + userTag := names.NewUserTag(user) + + args.Changes = []params.ModifyControllerAccess{{ + UserTag: userTag.String(), + Action: action, + Access: access, + }} + + var result params.ErrorResults + err := c.facade.FacadeCall("ModifyControllerAccess", args, &result) + if err != nil { + return errors.Trace(err) } - if err := c.facade.FacadeCall("ModelStatus", req, &result); err != nil { - return nil, err + if len(result.Results) != len(args.Changes) { + return errors.Errorf("expected %d results, got %d", len(args.Changes), len(result.Results)) } - results := make([]base.ModelStatus, len(result.Results)) - for i, r := range result.Results { - model, err := names.ParseModelTag(r.ModelTag) - if err != nil { - return nil, errors.Annotatef(err, "ModelTag %q at position %d", r.ModelTag, i) - } - owner, err := names.ParseUserTag(r.OwnerTag) - if err != nil { - return nil, errors.Annotatef(err, "OwnerTag %q at position %d", r.OwnerTag, i) - } - - results[i] = base.ModelStatus{ - UUID: model.Id(), - Life: r.Life, - Owner: owner.Canonical(), - HostedMachineCount: r.HostedMachineCount, - ServiceCount: r.ApplicationCount, - } + return result.Combine() +} +// GetControllerAccess returns the access level the user has on the controller. +func (c *Client) GetControllerAccess(user string) (permission.Access, error) { + if !names.IsValidUser(user) { + return "", errors.Errorf("invalid username: %q", user) + } + entities := params.Entities{Entities: []params.Entity{{names.NewUserTag(user).String()}}} + var results params.UserAccessResults + err := c.facade.FacadeCall("GetControllerAccess", entities, &results) + if err != nil { + return "", errors.Trace(err) + } + if len(results.Results) != 1 { + return "", errors.Errorf("expected 1 result, got %d", len(results.Results)) } - return results, nil + if err := results.Results[0].Error; err != nil { + return "", errors.Trace(err) + } + return permission.Access(results.Results[0].Result.Access), nil } -// ModelMigrationSpec holds the details required to start the -// migration of a single model. -type ModelMigrationSpec struct { +// MigrationSpec holds the details required to start the migration of +// a single model. +type MigrationSpec struct { ModelUUID string TargetControllerUUID string TargetAddrs []string TargetCACert string TargetUser string TargetPassword string + TargetMacaroons []macaroon.Slice + ExternalControl bool + SkipInitialPrechecks bool } // Validate performs sanity checks on the migration configuration it // holds. -func (s *ModelMigrationSpec) Validate() error { +func (s *MigrationSpec) Validate() error { if !names.IsValidModel(s.ModelUUID) { return errors.NotValidf("model UUID") } @@ -172,36 +246,45 @@ if !names.IsValidUser(s.TargetUser) { return errors.NotValidf("target user") } - if s.TargetPassword == "" { - return errors.NotValidf("empty target password") + if s.TargetPassword == "" && len(s.TargetMacaroons) == 0 { + return errors.NotValidf("missing authentication secrets") } return nil } -// InitiateModelMigration attempts to start a migration for the -// specified model, returning the migration's ID. +// InitiateMigration attempts to start a migration for the specified +// model, returning the migration's ID. // // The API server supports starting multiple migrations in one request // but we don't need that at the client side yet (and may never) so // this call just supports starting one migration at a time. -func (c *Client) InitiateModelMigration(spec ModelMigrationSpec) (string, error) { +func (c *Client) InitiateMigration(spec MigrationSpec) (string, error) { if err := spec.Validate(); err != nil { return "", errors.Trace(err) } - args := params.InitiateModelMigrationArgs{ - Specs: []params.ModelMigrationSpec{{ + + macsJSON, err := macaroonsToJSON(spec.TargetMacaroons) + if err != nil { + return "", errors.Trace(err) + } + + args := params.InitiateMigrationArgs{ + Specs: []params.MigrationSpec{{ ModelTag: names.NewModelTag(spec.ModelUUID).String(), - TargetInfo: params.ModelMigrationTargetInfo{ - ControllerTag: names.NewModelTag(spec.TargetControllerUUID).String(), + TargetInfo: params.MigrationTargetInfo{ + ControllerTag: names.NewControllerTag(spec.TargetControllerUUID).String(), Addrs: spec.TargetAddrs, CACert: spec.TargetCACert, AuthTag: names.NewUserTag(spec.TargetUser).String(), Password: spec.TargetPassword, + Macaroons: string(macsJSON), }, + ExternalControl: spec.ExternalControl, + SkipInitialPrechecks: spec.SkipInitialPrechecks, }}, } - response := params.InitiateModelMigrationResults{} - if err := c.facade.FacadeCall("InitiateModelMigration", args, &response); err != nil { + response := params.InitiateMigrationResults{} + if err := c.facade.FacadeCall("InitiateMigration", args, &response); err != nil { return "", errors.Trace(err) } if len(response.Results) != 1 { @@ -213,3 +296,14 @@ } return result.MigrationId, nil } + +func macaroonsToJSON(macs []macaroon.Slice) (string, error) { + if len(macs) == 0 { + return "", nil + } + out, err := json.Marshal(macs) + if err != nil { + return "", errors.Annotate(err, "marshalling macaroons") + } + return string(out), nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/controller_test.go juju-core-2.0.0/src/github.com/juju/juju/api/controller/controller_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/controller_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/controller/controller_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -1,223 +1,229 @@ -// Copyright 2015 Canonical Ltd. +// Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package controller_test import ( - "fmt" - "time" + "encoding/json" + "errors" - "github.com/juju/errors" jc "github.com/juju/testing/checkers" - "github.com/juju/utils" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" - "github.com/juju/juju/api/base" + apitesting "github.com/juju/juju/api/base/testing" "github.com/juju/juju/api/controller" - commontesting "github.com/juju/juju/apiserver/common/testing" + "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" - "github.com/juju/juju/testing" - "github.com/juju/juju/testing/factory" + "github.com/juju/juju/environs" + jujutesting "github.com/juju/testing" + "github.com/juju/utils" ) -type controllerSuite struct { - jujutesting.JujuConnSuite - commontesting.BlockHelper +type Suite struct { + jujutesting.IsolationSuite } -var _ = gc.Suite(&controllerSuite{}) +var _ = gc.Suite(&Suite{}) -func (s *controllerSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) +func (s *Suite) TestInitiateMigration(c *gc.C) { + s.checkInitiateMigration(c, makeSpec()) } -func (s *controllerSuite) OpenAPI(c *gc.C) *controller.Client { - return controller.NewClient(s.APIState) +func (s *Suite) TestInitiateMigrationExternalControl(c *gc.C) { + spec := makeSpec() + spec.ExternalControl = true + s.checkInitiateMigration(c, spec) } -func (s *controllerSuite) TestAllModels(c *gc.C) { - owner := names.NewUserTag("user@remote") - s.Factory.MakeModel(c, &factory.ModelParams{ - Name: "first", Owner: owner}).Close() - s.Factory.MakeModel(c, &factory.ModelParams{ - Name: "second", Owner: owner}).Close() - - sysManager := s.OpenAPI(c) - envs, err := sysManager.AllModels() - c.Assert(err, jc.ErrorIsNil) - c.Assert(envs, gc.HasLen, 3) - - var obtained []string - for _, env := range envs { - obtained = append(obtained, fmt.Sprintf("%s/%s", env.Owner, env.Name)) - } - expected := []string{ - "admin@local/controller", - "user@remote/first", - "user@remote/second", - } - c.Assert(obtained, jc.SameContents, expected) +func (s *Suite) TestInitiateMigrationSkipPrechecks(c *gc.C) { + spec := makeSpec() + spec.SkipInitialPrechecks = true + s.checkInitiateMigration(c, spec) } -func (s *controllerSuite) TestModelConfig(c *gc.C) { - sysManager := s.OpenAPI(c) - cfg, err := sysManager.ModelConfig() +func (s *Suite) checkInitiateMigration(c *gc.C, spec controller.MigrationSpec) { + client, stub := makeClient(params.InitiateMigrationResults{ + Results: []params.InitiateMigrationResult{{ + MigrationId: "id", + }}, + }) + id, err := client.InitiateMigration(spec) c.Assert(err, jc.ErrorIsNil) - c.Assert(cfg["name"], gc.Equals, "controller") + c.Check(id, gc.Equals, "id") + stub.CheckCalls(c, []jujutesting.StubCall{ + {"Controller.InitiateMigration", []interface{}{specToArgs(spec)}}, + }) } -func (s *controllerSuite) TestControllerConfig(c *gc.C) { - sysManager := s.OpenAPI(c) - cfg, err := sysManager.ControllerConfig() - c.Assert(err, jc.ErrorIsNil) - cfgFromDB, err := s.State.ControllerConfig() - c.Assert(err, jc.ErrorIsNil) - c.Assert(cfg["controller-uuid"], gc.Equals, cfgFromDB.ControllerUUID()) - c.Assert(int(cfg["state-port"].(float64)), gc.Equals, cfgFromDB.StatePort()) - c.Assert(int(cfg["api-port"].(float64)), gc.Equals, cfgFromDB.APIPort()) +func specToArgs(spec controller.MigrationSpec) params.InitiateMigrationArgs { + var macsJSON []byte + if len(spec.TargetMacaroons) > 0 { + var err error + macsJSON, err = json.Marshal(spec.TargetMacaroons) + if err != nil { + panic(err) + } + } + return params.InitiateMigrationArgs{ + Specs: []params.MigrationSpec{{ + ModelTag: names.NewModelTag(spec.ModelUUID).String(), + TargetInfo: params.MigrationTargetInfo{ + ControllerTag: names.NewControllerTag(spec.TargetControllerUUID).String(), + Addrs: spec.TargetAddrs, + CACert: spec.TargetCACert, + AuthTag: names.NewUserTag(spec.TargetUser).String(), + Password: spec.TargetPassword, + Macaroons: string(macsJSON), + }, + ExternalControl: spec.ExternalControl, + SkipInitialPrechecks: spec.SkipInitialPrechecks, + }}, + } } -func (s *controllerSuite) TestDestroyController(c *gc.C) { - st := s.Factory.MakeModel(c, &factory.ModelParams{Name: "foo"}) - factory.NewFactory(st).MakeMachine(c, nil) // make it non-empty - st.Close() - - sysManager := s.OpenAPI(c) - err := sysManager.DestroyController(false) - c.Assert(err, gc.ErrorMatches, `failed to destroy model: hosting 1 other models \(controller has hosted models\)`) +func (s *Suite) TestInitiateMigrationError(c *gc.C) { + client, _ := makeClient(params.InitiateMigrationResults{ + Results: []params.InitiateMigrationResult{{ + Error: common.ServerError(errors.New("boom")), + }}, + }) + id, err := client.InitiateMigration(makeSpec()) + c.Check(id, gc.Equals, "") + c.Check(err, gc.ErrorMatches, "boom") } -func (s *controllerSuite) TestListBlockedModels(c *gc.C) { - err := s.State.SwitchBlockOn(state.ChangeBlock, "change block for controller") - err = s.State.SwitchBlockOn(state.DestroyBlock, "destroy block for controller") - c.Assert(err, jc.ErrorIsNil) - - sysManager := s.OpenAPI(c) - results, err := sysManager.ListBlockedModels() - c.Assert(err, jc.ErrorIsNil) - c.Assert(results, jc.DeepEquals, []params.ModelBlockInfo{ - { - Name: "controller", - UUID: s.State.ModelUUID(), - OwnerTag: s.AdminUserTag(c).String(), - Blocks: []string{ - "BlockChange", - "BlockDestroy", - }, +func (s *Suite) TestInitiateMigrationResultMismatch(c *gc.C) { + client, _ := makeClient(params.InitiateMigrationResults{ + Results: []params.InitiateMigrationResult{ + {MigrationId: "id"}, + {MigrationId: "wtf"}, }, }) + id, err := client.InitiateMigration(makeSpec()) + c.Check(id, gc.Equals, "") + c.Check(err, gc.ErrorMatches, "unexpected number of results returned") } -func (s *controllerSuite) TestRemoveBlocks(c *gc.C) { - s.State.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") - s.State.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") - - sysManager := s.OpenAPI(c) - err := sysManager.RemoveBlocks() - c.Assert(err, jc.ErrorIsNil) - - blocks, err := s.State.AllBlocksForController() - c.Assert(err, jc.ErrorIsNil) - c.Assert(blocks, gc.HasLen, 0) +func (s *Suite) TestInitiateMigrationCallError(c *gc.C) { + apiCaller := apitesting.APICallerFunc(func(string, int, string, string, interface{}, interface{}) error { + return errors.New("boom") + }) + client := controller.NewClient(apiCaller) + id, err := client.InitiateMigration(makeSpec()) + c.Check(id, gc.Equals, "") + c.Check(err, gc.ErrorMatches, "boom") } -func (s *controllerSuite) TestWatchAllModels(c *gc.C) { - // The WatchAllModels infrastructure is comprehensively tested - // else. This test just ensure that the API calls work end-to-end. - sysManager := s.OpenAPI(c) - - w, err := sysManager.WatchAllModels() - c.Assert(err, jc.ErrorIsNil) - defer func() { - err := w.Stop() - c.Assert(err, jc.ErrorIsNil) - }() - - deltasC := make(chan []multiwatcher.Delta) - go func() { - deltas, err := w.Next() - c.Assert(err, jc.ErrorIsNil) - deltasC <- deltas - }() - - select { - case deltas := <-deltasC: - c.Assert(deltas, gc.HasLen, 1) - modelInfo := deltas[0].Entity.(*multiwatcher.ModelInfo) - - env, err := s.State.Model() - c.Assert(err, jc.ErrorIsNil) - - c.Assert(modelInfo.ModelUUID, gc.Equals, env.UUID()) - c.Assert(modelInfo.Name, gc.Equals, env.Name()) - c.Assert(modelInfo.Life, gc.Equals, multiwatcher.Life("alive")) - c.Assert(modelInfo.Owner, gc.Equals, env.Owner().Id()) - c.Assert(modelInfo.ControllerUUID, gc.Equals, env.ControllerUUID()) - case <-time.After(testing.LongWait): - c.Fatal("timed out") - } +func (s *Suite) TestInitiateMigrationValidationError(c *gc.C) { + client, stub := makeClient(params.InitiateMigrationResults{}) + spec := makeSpec() + spec.ModelUUID = "not-a-uuid" + id, err := client.InitiateMigration(spec) + c.Check(id, gc.Equals, "") + c.Check(err, gc.ErrorMatches, "model UUID not valid") + c.Check(stub.Calls(), gc.HasLen, 0) // API call shouldn't have happened } -func (s *controllerSuite) TestModelStatus(c *gc.C) { - controller := s.OpenAPI(c) - modelTag := s.State.ModelTag() - results, err := controller.ModelStatus(modelTag) - c.Assert(err, jc.ErrorIsNil) - c.Assert(results, jc.DeepEquals, []base.ModelStatus{{ - UUID: modelTag.Id(), - HostedMachineCount: 0, - ServiceCount: 0, - Owner: "admin@local", - Life: params.Alive, - }}) -} - -func (s *controllerSuite) TestInitiateModelMigration(c *gc.C) { - st := s.Factory.MakeModel(c, nil) - defer st.Close() - - _, err := st.LatestModelMigration() - c.Assert(errors.IsNotFound(err), jc.IsTrue) - - spec := controller.ModelMigrationSpec{ - ModelUUID: st.ModelUUID(), - TargetControllerUUID: randomUUID(), - TargetAddrs: []string{"1.2.3.4:5"}, - TargetCACert: "cert", - TargetUser: "someone", - TargetPassword: "secret", - } - - controller := s.OpenAPI(c) - id, err := controller.InitiateModelMigration(spec) - c.Assert(err, jc.ErrorIsNil) - expectedId := st.ModelUUID() + ":0" - c.Check(id, gc.Equals, expectedId) - - // Check database. - mig, err := st.LatestModelMigration() - c.Assert(err, jc.ErrorIsNil) - c.Check(mig.Id(), gc.Equals, expectedId) +func (s *Suite) TestHostedModelConfigs_CallError(c *gc.C) { + apiCaller := apitesting.APICallerFunc(func(string, int, string, string, interface{}, interface{}) error { + return errors.New("boom") + }) + client := controller.NewClient(apiCaller) + config, err := client.HostedModelConfigs() + c.Check(config, gc.HasLen, 0) + c.Check(err, gc.ErrorMatches, "boom") +} + +func (s *Suite) TestHostedModelConfigs_FormatResults(c *gc.C) { + apiCaller := apitesting.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { + c.Assert(objType, gc.Equals, "Controller") + c.Assert(request, gc.Equals, "HostedModelConfigs") + c.Assert(arg, gc.IsNil) + out := result.(*params.HostedModelConfigsResults) + c.Assert(out, gc.NotNil) + *out = params.HostedModelConfigsResults{ + Models: []params.HostedModelConfig{ + { + Name: "first", + OwnerTag: "user-foo@bar", + Config: map[string]interface{}{ + "name": "first", + }, + CloudSpec: ¶ms.CloudSpec{ + Type: "magic", + Name: "first", + }, + }, { + Name: "second", + OwnerTag: "bad-tag", + }, { + Name: "third", + OwnerTag: "user-foo@bar", + Config: map[string]interface{}{ + "name": "third", + }, + CloudSpec: ¶ms.CloudSpec{ + Name: "third", + }, + }, + }, + } + return nil + }) + client := controller.NewClient(apiCaller) + config, err := client.HostedModelConfigs() + c.Assert(config, gc.HasLen, 3) + c.Assert(err, jc.ErrorIsNil) + first := config[0] + c.Assert(first.Name, gc.Equals, "first") + c.Assert(first.Owner, gc.Equals, names.NewUserTag("foo@bar")) + c.Assert(first.Config, gc.DeepEquals, map[string]interface{}{ + "name": "first", + }) + c.Assert(first.CloudSpec, gc.DeepEquals, environs.CloudSpec{ + Type: "magic", + Name: "first", + }) + second := config[1] + c.Assert(second.Name, gc.Equals, "second") + c.Assert(second.Error.Error(), gc.Equals, `"bad-tag" is not a valid tag`) + third := config[2] + c.Assert(third.Name, gc.Equals, "third") + c.Assert(third.Error.Error(), gc.Equals, "validating CloudSpec: empty Type not valid") +} + +func makeClient(results params.InitiateMigrationResults) ( + *controller.Client, *jujutesting.Stub, +) { + var stub jujutesting.Stub + apiCaller := apitesting.APICallerFunc( + func(objType string, version int, id, request string, arg, result interface{}) error { + stub.AddCall(objType+"."+request, arg) + out := result.(*params.InitiateMigrationResults) + *out = results + return nil + }, + ) + client := controller.NewClient(apiCaller) + return client, &stub } -func (s *controllerSuite) TestInitiateModelMigrationError(c *gc.C) { - spec := controller.ModelMigrationSpec{ - ModelUUID: randomUUID(), // Model doesn't exist. +func makeSpec() controller.MigrationSpec { + mac, err := macaroon.New([]byte("secret"), "id", "location") + if err != nil { + panic(err) + } + return controller.MigrationSpec{ + ModelUUID: randomUUID(), TargetControllerUUID: randomUUID(), TargetAddrs: []string{"1.2.3.4:5"}, TargetCACert: "cert", TargetUser: "someone", TargetPassword: "secret", + TargetMacaroons: []macaroon.Slice{{mac}}, } - - controller := s.OpenAPI(c) - id, err := controller.InitiateModelMigration(spec) - c.Check(id, gc.Equals, "") - c.Check(err, gc.ErrorMatches, "unable to read model: .+") } func randomUUID() string { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/gui.go juju-core-2.0.0/src/github.com/juju/juju/api/controller/gui.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/gui.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/controller/gui.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,89 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/juju/errors" + "github.com/juju/version" + + "github.com/juju/juju/apiserver/params" +) + +const ( + guiArchivePath = "/gui-archive" + guiVersionPath = "/gui-version" +) + +// GUIArchives retrieves information about Juju GUI archives currently present +// in the Juju controller. +func (c *Client) GUIArchives() ([]params.GUIArchiveVersion, error) { + httpClient, err := c.facade.RawAPICaller().HTTPClient() + if err != nil { + return nil, errors.Annotate(err, "cannot retrieve HTTP client") + } + var resp params.GUIArchiveResponse + if err = httpClient.Get(guiArchivePath, &resp); err != nil { + return nil, errors.Annotate(err, "cannot retrieve GUI archives info") + } + return resp.Versions, nil +} + +// UploadGUIArchive uploads a GUI archive to the controller over HTTPS, and +// reports about whether the upload updated the current GUI served by Juju. +func (c *Client) UploadGUIArchive(r io.ReadSeeker, hash string, size int64, vers version.Number) (current bool, err error) { + // Prepare the request. + v := url.Values{} + v.Set("version", vers.String()) + v.Set("hash", hash) + req, err := http.NewRequest("POST", guiArchivePath+"?"+v.Encode(), nil) + if err != nil { + return false, errors.Annotate(err, "cannot create upload request") + } + req.Header.Set("Content-Type", "application/x-tar-bzip2") + req.ContentLength = size + + // Retrieve a client and send the request. + httpClient, err := c.facade.RawAPICaller().HTTPClient() + if err != nil { + return false, errors.Annotate(err, "cannot retrieve HTTP client") + } + var resp params.GUIArchiveVersion + if err = httpClient.Do(req, r, &resp); err != nil { + return false, errors.Annotate(err, "cannot upload the GUI archive") + } + return resp.Current, nil +} + +// SelectGUIVersion selects which version of the Juju GUI is served by the +// controller. +func (c *Client) SelectGUIVersion(vers version.Number) error { + // Prepare the request. + req, err := http.NewRequest("PUT", guiVersionPath, nil) + if err != nil { + return errors.Annotate(err, "cannot create PUT request") + } + req.Header.Set("Content-Type", params.ContentTypeJSON) + content, err := json.Marshal(params.GUIVersionRequest{ + Version: vers, + }) + if err != nil { + errors.Annotate(err, "cannot marshal request body") + } + + // Retrieve a client and send the request. + httpClient, err := c.facade.RawAPICaller().HTTPClient() + if err != nil { + return errors.Annotate(err, "cannot retrieve HTTP client") + } + if err = httpClient.Do(req, bytes.NewReader(content), nil); err != nil { + return errors.Annotate(err, "cannot select GUI version") + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/gui_test.go juju-core-2.0.0/src/github.com/juju/juju/api/controller/gui_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/gui_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/controller/gui_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,148 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller_test + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/version" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/controller" + "github.com/juju/juju/apiserver/params" +) + +// sendJSONResponse encodes the given content as JSON and writes it to the +// given response writer. +func sendJSONResponse(c *gc.C, w http.ResponseWriter, content interface{}) { + w.Header().Set("Content-Type", params.ContentTypeJSON) + encoder := json.NewEncoder(w) + err := encoder.Encode(content) + c.Assert(err, jc.ErrorIsNil) +} + +// withHTTPClient sets up a fixture with the given address and handle, then +// runs the given test and checks that the HTTP handler has been called with +// the given method. +func withHTTPClient(c *gc.C, address, expectMethod string, handle func(http.ResponseWriter, *http.Request), test func(*controller.Client)) { + fix := newHTTPFixture(address, handle) + stub := fix.run(c, func(ac base.APICallCloser) { + client := controller.NewClient(ac) + test(client) + }) + stub.CheckCalls(c, []testing.StubCall{{expectMethod, nil}}) +} + +func (s *Suite) TestGUIArchives(c *gc.C) { + response := params.GUIArchiveResponse{ + Versions: []params.GUIArchiveVersion{{ + Version: version.MustParse("1.0.0"), + SHA256: "hash1", + Current: false, + }, { + Version: version.MustParse("2.0.0"), + SHA256: "hash2", + Current: true, + }}, + } + withHTTPClient(c, "/gui-archive", "GET", func(w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + sendJSONResponse(c, w, response) + }, func(client *controller.Client) { + // Retrieve the GUI archive versions. + versions, err := client.GUIArchives() + c.Assert(err, jc.ErrorIsNil) + c.Assert(versions, jc.DeepEquals, response.Versions) + }) +} + +func (s *Suite) TestGUIArchivesError(c *gc.C) { + withHTTPClient(c, "/gui-archive", "GET", func(w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + w.WriteHeader(http.StatusBadRequest) + }, func(client *controller.Client) { + // Call to get GUI archive versions. + versions, err := client.GUIArchives() + c.Assert(err, gc.ErrorMatches, "cannot retrieve GUI archives info: .*") + c.Assert(versions, gc.IsNil) + }) +} + +func (s *Suite) TestUploadGUIArchive(c *gc.C) { + archive := []byte("archive content") + hash, size, vers := "archive-hash", int64(len(archive)), version.MustParse("2.1.0") + withHTTPClient(c, "/gui-archive", "POST", func(w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + err := req.ParseForm() + c.Assert(err, jc.ErrorIsNil) + // Check version and content length. + c.Assert(req.Form.Get("version"), gc.Equals, vers.String()) + c.Assert(req.ContentLength, gc.Equals, size) + // Check request body. + obtainedArchive, err := ioutil.ReadAll(req.Body) + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtainedArchive, gc.DeepEquals, archive) + // Check hash. + h := req.Form.Get("hash") + c.Assert(h, gc.Equals, hash) + // Send the response. + sendJSONResponse(c, w, params.GUIArchiveVersion{ + Current: true, + }) + }, func(client *controller.Client) { + // Upload a new Juju GUI archive. + current, err := client.UploadGUIArchive(bytes.NewReader(archive), hash, size, vers) + c.Assert(err, jc.ErrorIsNil) + c.Assert(current, jc.IsTrue) + }) +} + +func (s *Suite) TestUploadGUIArchiveError(c *gc.C) { + archive := []byte("archive content") + hash, size, vers := "archive-hash", int64(len(archive)), version.MustParse("2.1.0") + withHTTPClient(c, "/gui-archive", "POST", func(w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + w.WriteHeader(http.StatusBadRequest) + }, func(client *controller.Client) { + // Call to upload a new Juju GUI archive. + current, err := client.UploadGUIArchive(bytes.NewReader(archive), hash, size, vers) + c.Assert(err, gc.ErrorMatches, "cannot upload the GUI archive: .*") + c.Assert(current, jc.IsFalse) + }) +} + +func (s *Suite) TestSelectGUIVersion(c *gc.C) { + vers := version.MustParse("2.0.42") + withHTTPClient(c, "/gui-version", "PUT", func(w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + // Check request body. + var request params.GUIVersionRequest + decoder := json.NewDecoder(req.Body) + err := decoder.Decode(&request) + c.Assert(err, jc.ErrorIsNil) + c.Assert(request.Version, gc.Equals, vers) + }, func(client *controller.Client) { + // Switch to a specific Juju GUI version. + err := client.SelectGUIVersion(vers) + c.Assert(err, jc.ErrorIsNil) + }) +} + +func (s *Suite) TestSelectGUIVersionError(c *gc.C) { + vers := version.MustParse("2.0.42") + withHTTPClient(c, "/gui-version", "PUT", func(w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + w.WriteHeader(http.StatusBadRequest) + }, func(client *controller.Client) { + // Call to select a Juju GUI version. + err := client.SelectGUIVersion(vers) + c.Assert(err, gc.ErrorMatches, "cannot select GUI version: .*") + }) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/http_test.go juju-core-2.0.0/src/github.com/juju/juju/api/controller/http_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/http_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/controller/http_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,86 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller_test + +import ( + "net" + "net/http" + "net/url" + + "github.com/juju/httprequest" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base" +) + +// newHTTPFixture creates and returns an HTTP fixture to be used in order to +// mock controller HTTP requests to the given controller address. +// Use it like in the following example: +// fix := newHTTPFixture("/my/controller/path", func(w http.ResponseWriter, req *http.Request) { +// // Simulate what's returned by the server. +// }) +// stub := fix.run(c, func(ac base.APICallCloser) { +// // Do something with the API caller. +// }) +// At this point the stub, if the handler has been called, includes one call +// with the HTTP method requested while running the test function. +func newHTTPFixture(address string, handle func(http.ResponseWriter, *http.Request)) *httpFixture { + return &httpFixture{ + address: address, + handle: handle, + } +} + +// httpFixture is used to mock controller HTTP API calls. +type httpFixture struct { + address string + handle func(http.ResponseWriter, *http.Request) +} + +// run sets up the fixture and run the given test. See newHTTPFixture for an +// example of how to use this. +func (f *httpFixture) run(c *gc.C, test func(base.APICallCloser)) *testing.Stub { + stub := &testing.Stub{} + lis, err := net.Listen("tcp", "127.0.0.1:0") + c.Assert(err, jc.ErrorIsNil) + defer lis.Close() + mux := http.NewServeMux() + mux.HandleFunc(f.address, func(w http.ResponseWriter, r *http.Request) { + stub.AddCall(r.Method) + f.handle(w, r) + }) + go func() { + http.Serve(lis, mux) + }() + test(&httpAPICallCloser{ + url: &url.URL{ + Scheme: "http", + Host: lis.Addr().String(), + }, + }) + return stub +} + +var _ base.APICallCloser = (*httpAPICallCloser)(nil) + +// httpAPICallCloser implements base.APICallCloser. +type httpAPICallCloser struct { + base.APICallCloser + url *url.URL +} + +// BestFacadeVersion implements base.APICallCloser. +func (*httpAPICallCloser) BestFacadeVersion(facade string) int { + return 42 +} + +// HTTPClient implements base.APICallCloser. The returned HTTP client can be +// used to send requests to the testing server set up in httpFixture.run(). +func (ac *httpAPICallCloser) HTTPClient() (*httprequest.Client, error) { + return &httprequest.Client{ + BaseURL: ac.url.String(), + }, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/legacy_test.go juju-core-2.0.0/src/github.com/juju/juju/api/controller/legacy_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/controller/legacy_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/controller/legacy_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,276 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller_test + +import ( + "fmt" + "net" + "time" + + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/controller" + "github.com/juju/juju/apiserver" + commontesting "github.com/juju/juju/apiserver/common/testing" + "github.com/juju/juju/apiserver/observer" + "github.com/juju/juju/apiserver/observer/fakeobserver" + "github.com/juju/juju/apiserver/params" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/permission" + "github.com/juju/juju/state" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +// legacySuite has the tests for the controller client-side facade +// which use JujuConnSuite. The plan is to gradually move these tests +// to Suite in controller_test.go. +type legacySuite struct { + jujutesting.JujuConnSuite + commontesting.BlockHelper +} + +var _ = gc.Suite(&legacySuite{}) + +func (s *legacySuite) OpenAPI(c *gc.C) *controller.Client { + return controller.NewClient(s.OpenControllerAPI(c)) +} + +func (s *legacySuite) TestAllModels(c *gc.C) { + owner := names.NewUserTag("user@remote") + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "first", Owner: owner}).Close() + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "second", Owner: owner}).Close() + + sysManager := s.OpenAPI(c) + defer sysManager.Close() + envs, err := sysManager.AllModels() + c.Assert(err, jc.ErrorIsNil) + c.Assert(envs, gc.HasLen, 3) + + var obtained []string + for _, env := range envs { + obtained = append(obtained, fmt.Sprintf("%s/%s", env.Owner, env.Name)) + } + expected := []string{ + "admin/controller", + "user@remote/first", + "user@remote/second", + } + c.Assert(obtained, jc.SameContents, expected) +} + +func (s *legacySuite) TestModelConfig(c *gc.C) { + sysManager := s.OpenAPI(c) + defer sysManager.Close() + cfg, err := sysManager.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + c.Assert(cfg["name"], gc.Equals, "controller") +} + +func (s *legacySuite) TestControllerConfig(c *gc.C) { + sysManager := s.OpenAPI(c) + defer sysManager.Close() + cfg, err := sysManager.ControllerConfig() + c.Assert(err, jc.ErrorIsNil) + cfgFromDB, err := s.State.ControllerConfig() + c.Assert(err, jc.ErrorIsNil) + c.Assert(cfg["controller-uuid"], gc.Equals, cfgFromDB.ControllerUUID()) + c.Assert(int(cfg["state-port"].(float64)), gc.Equals, cfgFromDB.StatePort()) + c.Assert(int(cfg["api-port"].(float64)), gc.Equals, cfgFromDB.APIPort()) +} + +func (s *legacySuite) TestDestroyController(c *gc.C) { + st := s.Factory.MakeModel(c, &factory.ModelParams{Name: "foo"}) + factory.NewFactory(st).MakeMachine(c, nil) // make it non-empty + st.Close() + + sysManager := s.OpenAPI(c) + defer sysManager.Close() + err := sysManager.DestroyController(false) + c.Assert(err, gc.ErrorMatches, `failed to destroy model: hosting 1 other models \(controller has hosted models\)`) +} + +func (s *legacySuite) TestListBlockedModels(c *gc.C) { + err := s.State.SwitchBlockOn(state.ChangeBlock, "change block for controller") + err = s.State.SwitchBlockOn(state.DestroyBlock, "destroy block for controller") + c.Assert(err, jc.ErrorIsNil) + + sysManager := s.OpenAPI(c) + defer sysManager.Close() + results, err := sysManager.ListBlockedModels() + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, jc.DeepEquals, []params.ModelBlockInfo{ + { + Name: "controller", + UUID: s.State.ModelUUID(), + OwnerTag: s.AdminUserTag(c).String(), + Blocks: []string{ + "BlockChange", + "BlockDestroy", + }, + }, + }) +} + +func (s *legacySuite) TestRemoveBlocks(c *gc.C) { + s.State.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") + s.State.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") + + sysManager := s.OpenAPI(c) + defer sysManager.Close() + err := sysManager.RemoveBlocks() + c.Assert(err, jc.ErrorIsNil) + + blocks, err := s.State.AllBlocksForController() + c.Assert(err, jc.ErrorIsNil) + c.Assert(blocks, gc.HasLen, 0) +} + +func (s *legacySuite) TestWatchAllModels(c *gc.C) { + // The WatchAllModels infrastructure is comprehensively tested + // else. This test just ensure that the API calls work end-to-end. + sysManager := s.OpenAPI(c) + defer sysManager.Close() + + w, err := sysManager.WatchAllModels() + c.Assert(err, jc.ErrorIsNil) + defer func() { + err := w.Stop() + c.Assert(err, jc.ErrorIsNil) + }() + + deltasC := make(chan []multiwatcher.Delta) + go func() { + deltas, err := w.Next() + c.Assert(err, jc.ErrorIsNil) + deltasC <- deltas + }() + + select { + case deltas := <-deltasC: + c.Assert(deltas, gc.HasLen, 1) + modelInfo := deltas[0].Entity.(*multiwatcher.ModelInfo) + + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + c.Assert(modelInfo.ModelUUID, gc.Equals, env.UUID()) + c.Assert(modelInfo.Name, gc.Equals, env.Name()) + c.Assert(modelInfo.Life, gc.Equals, multiwatcher.Life("alive")) + c.Assert(modelInfo.Owner, gc.Equals, env.Owner().Id()) + c.Assert(modelInfo.ControllerUUID, gc.Equals, env.ControllerUUID()) + case <-time.After(testing.LongWait): + c.Fatal("timed out") + } +} + +func (s *legacySuite) TestAPIServerCanShutdownWithOutstandingNext(c *gc.C) { + + lis, err := net.Listen("tcp", "localhost:0") + c.Assert(err, jc.ErrorIsNil) + + srv, err := apiserver.NewServer(s.State, lis, apiserver.ServerConfig{ + Clock: clock.WallClock, + Cert: testing.ServerCert, + Key: testing.ServerKey, + Tag: names.NewMachineTag("0"), + DataDir: c.MkDir(), + LogDir: c.MkDir(), + NewObserver: func() observer.Observer { return &fakeobserver.Instance{} }, + AutocertURL: "https://0.1.2.3/no-autocert-here", + }) + c.Assert(err, gc.IsNil) + + // Connect to the API server we've just started. + apiInfo := s.APIInfo(c) + apiInfo.Addrs = []string{lis.Addr().String()} + apiInfo.ModelTag = names.ModelTag{} + apiState, err := api.Open(apiInfo, api.DialOpts{}) + sysManager := controller.NewClient(apiState) + defer sysManager.Close() + + w, err := sysManager.WatchAllModels() + c.Assert(err, jc.ErrorIsNil) + defer w.Stop() + + deltasC := make(chan struct{}, 2) + go func() { + defer close(deltasC) + for { + _, err := w.Next() + if err != nil { + return + } + deltasC <- struct{}{} + } + }() + // Read the first event. + select { + case <-deltasC: + case <-time.After(testing.LongWait): + c.Fatal("timed out") + } + // Wait a little while for the Next call to actually arrive. + time.Sleep(testing.ShortWait) + + // We should be able to close the server instance + // even when there's an outstanding Next call. + srvStopped := make(chan struct{}) + go func() { + srv.Stop() + close(srvStopped) + }() + + select { + case <-srvStopped: + case <-time.After(testing.LongWait): + c.Fatal("timed out waiting for server to stop") + } + + // Check that the Next call has returned too. + select { + case _, ok := <-deltasC: + if ok { + c.Fatalf("got unexpected event from deltasC") + } + case <-time.After(testing.LongWait): + c.Fatal("timed out") + } +} + +func (s *legacySuite) TestModelStatus(c *gc.C) { + sysManager := s.OpenAPI(c) + defer sysManager.Close() + s.Factory.MakeMachine(c, nil) + modelTag := s.State.ModelTag() + results, err := sysManager.ModelStatus(modelTag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, jc.DeepEquals, []base.ModelStatus{{ + UUID: modelTag.Id(), + TotalMachineCount: 1, + HostedMachineCount: 1, + ServiceCount: 0, + Owner: "admin", + Life: string(params.Alive), + Machines: []base.Machine{{Id: "0", InstanceId: "id-2", Status: "pending"}}, + }}) +} + +func (s *legacySuite) TestGetControllerAccess(c *gc.C) { + controller := s.OpenAPI(c) + defer controller.Close() + err := controller.GrantController("fred@external", "add-model") + c.Assert(err, jc.ErrorIsNil) + access, err := controller.GetControllerAccess("fred@external") + c.Assert(err, jc.ErrorIsNil) + c.Assert(access, gc.Equals, permission.Access("add-model")) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/deployer/deployer_test.go juju-core-2.0.0/src/github.com/juju/juju/api/deployer/deployer_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/deployer/deployer_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/deployer/deployer_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,6 +17,7 @@ "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" "github.com/juju/juju/state" + "github.com/juju/juju/status" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/watcher/watchertest" ) @@ -237,3 +238,21 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(addresses, gc.DeepEquals, stateAddresses) } + +func (s *deployerSuite) TestUnitSetStatus(c *gc.C) { + unit, err := s.st.Unit(s.principal.Tag().(names.UnitTag)) + c.Assert(err, jc.ErrorIsNil) + err = unit.SetStatus(status.Blocked, "waiting", map[string]interface{}{"foo": "bar"}) + c.Assert(err, jc.ErrorIsNil) + + stateUnit, err := s.BackingState.Unit(unit.Name()) + c.Assert(err, jc.ErrorIsNil) + sInfo, err := stateUnit.Status() + c.Assert(err, jc.ErrorIsNil) + sInfo.Since = nil + c.Assert(sInfo, jc.DeepEquals, status.StatusInfo{ + Status: status.Blocked, + Message: "waiting", + Data: map[string]interface{}{"foo": "bar"}, + }) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/deployer/unit.go juju-core-2.0.0/src/github.com/juju/juju/api/deployer/unit.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/deployer/unit.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/deployer/unit.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ "github.com/juju/juju/api/common" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/status" ) // Unit represents a juju unit as seen by the deployer worker. @@ -68,5 +69,20 @@ if err != nil { return err } + return result.OneError() +} + +// SetStatus sets the status of the unit. +func (u *Unit) SetStatus(unitStatus status.Status, info string, data map[string]interface{}) error { + var result params.ErrorResults + args := params.SetStatus{ + Entities: []params.EntityStatusArgs{ + {Tag: u.tag.String(), Status: unitStatus.String(), Info: info, Data: data}, + }, + } + err := u.st.facade.FacadeCall("SetStatus", args, &result) + if err != nil { + return err + } return result.OneError() } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/export_test.go juju-core-2.0.0/src/github.com/juju/juju/api/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ "github.com/juju/juju/api/base" "github.com/juju/juju/network" "github.com/juju/utils/clock" + "gopkg.in/juju/names.v2" ) var ( @@ -47,21 +48,31 @@ ServerRoot string RPCConnection RPCConnection Clock clock.Clock + Broken chan struct{} } // NewTestingState creates an api.State object that can be used for testing. It // isn't backed onto an actual API server, so actual RPC methods can't be // called on it. But it can be used for testing general behaviour. func NewTestingState(params TestingStateParams) Connection { + var modelTag names.ModelTag + if params.ModelTag != "" { + t, err := names.ParseModelTag(params.ModelTag) + if err != nil { + panic("invalid model tag") + } + modelTag = t + } st := &state{ client: params.RPCConnection, clock: params.Clock, addr: params.Address, - modelTag: params.ModelTag, + modelTag: modelTag, hostPorts: params.APIHostPorts, facadeVersions: params.FacadeVersions, serverScheme: params.ServerScheme, serverRootAddress: params.ServerRoot, + broken: params.Broken, } return st } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/facadeversions.go juju-core-2.0.0/src/github.com/juju/juju/api/facadeversions.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/facadeversions.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/facadeversions.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,10 +17,11 @@ "AllModelWatcher": 2, "AllWatcher": 1, "Annotations": 2, - "Application": 1, + "Application": 2, "ApplicationScaler": 1, "Backups": 1, "Block": 2, + "Bundle": 1, "CharmRevisionUpdater": 2, "Charms": 2, "Cleaner": 2, @@ -46,6 +47,7 @@ "Logger": 1, "MachineActions": 1, "MachineManager": 2, + "MachineUndertaker": 1, "Machiner": 1, "MeterStatus": 1, "MetricsAdder": 2, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/firewaller/firewaller.go juju-core-2.0.0/src/github.com/juju/juju/api/firewaller/firewaller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/firewaller/firewaller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/firewaller/firewaller.go 2016-10-13 14:31:49.000000000 +0000 @@ -41,7 +41,7 @@ } // ModelTag returns the current model's tag. -func (st *State) ModelTag() (names.ModelTag, error) { +func (st *State) ModelTag() (names.ModelTag, bool) { return st.facade.RawAPICaller().ModelTag() } @@ -96,16 +96,15 @@ // WatchOpenedPorts returns a StringsWatcher that notifies of // changes to the opened ports for the current model. func (st *State) WatchOpenedPorts() (watcher.StringsWatcher, error) { - modelTag, err := st.ModelTag() - if err != nil { - return nil, errors.Annotatef(err, "invalid model tag") + modelTag, ok := st.ModelTag() + if !ok { + return nil, errors.New("API connection is controller-only (should never happen)") } var results params.StringsWatchResults args := params.Entities{ Entities: []params.Entity{{Tag: modelTag.String()}}, } - err = st.facade.FacadeCall("WatchOpenedPorts", args, &results) - if err != nil { + if err := st.facade.FacadeCall("WatchOpenedPorts", args, &results); err != nil { return nil, err } if len(results.Results) != 1 { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/firewaller/state_test.go juju-core-2.0.0/src/github.com/juju/juju/api/firewaller/state_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/firewaller/state_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/firewaller/state_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,7 +22,7 @@ func (s *stateSuite) SetUpTest(c *gc.C) { s.firewallerSuite.SetUpTest(c) - s.ModelWatcherTests = apitesting.NewModelWatcherTests(s.firewaller, s.BackingState, true) + s.ModelWatcherTests = apitesting.NewModelWatcherTests(s.firewaller, s.BackingState) } func (s *stateSuite) TearDownTest(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/gui.go juju-core-2.0.0/src/github.com/juju/juju/api/gui.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/gui.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/gui.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package api - -import ( - "bytes" - "encoding/json" - "io" - "net/http" - "net/url" - - "github.com/juju/errors" - "github.com/juju/version" - - "github.com/juju/juju/apiserver/params" -) - -const ( - guiArchivePath = "/gui-archive" - guiVersionPath = "/gui-version" -) - -// GUIArchives retrieves information about Juju GUI archives currently present -// in the Juju controller. -func (c *Client) GUIArchives() ([]params.GUIArchiveVersion, error) { - httpClient, err := c.st.RootHTTPClient() - if err != nil { - return nil, errors.Annotate(err, "cannot retrieve HTTP client") - } - var resp params.GUIArchiveResponse - if err = httpClient.Get(guiArchivePath, &resp); err != nil { - return nil, errors.Annotate(err, "cannot retrieve GUI archives info") - } - return resp.Versions, nil -} - -// UploadGUIArchive uploads a GUI archive to the controller over HTTPS, and -// reports about whether the upload updated the current GUI served by Juju. -func (c *Client) UploadGUIArchive(r io.ReadSeeker, hash string, size int64, vers version.Number) (current bool, err error) { - // Prepare the request. - v := url.Values{} - v.Set("version", vers.String()) - v.Set("hash", hash) - req, err := http.NewRequest("POST", guiArchivePath+"?"+v.Encode(), nil) - if err != nil { - return false, errors.Annotate(err, "cannot create upload request") - } - req.Header.Set("Content-Type", "application/x-tar-bzip2") - req.ContentLength = size - - // Retrieve a client and send the request. - httpClient, err := c.st.RootHTTPClient() - if err != nil { - return false, errors.Annotate(err, "cannot retrieve HTTP client") - } - var resp params.GUIArchiveVersion - if err = httpClient.Do(req, r, &resp); err != nil { - return false, errors.Annotate(err, "cannot upload the GUI archive") - } - return resp.Current, nil -} - -// SelectGUIVersion selects which version of the Juju GUI is served by the -// controller. -func (c *Client) SelectGUIVersion(vers version.Number) error { - // Prepare the request. - req, err := http.NewRequest("PUT", guiVersionPath, nil) - if err != nil { - return errors.Annotate(err, "cannot create PUT request") - } - req.Header.Set("Content-Type", params.ContentTypeJSON) - content, err := json.Marshal(params.GUIVersionRequest{ - Version: vers, - }) - if err != nil { - errors.Annotate(err, "cannot marshal request body") - } - - // Retrieve a client and send the request. - httpClient, err := c.st.RootHTTPClient() - if err != nil { - return errors.Annotate(err, "cannot retrieve HTTP client") - } - if err = httpClient.Do(req, bytes.NewReader(content), nil); err != nil { - return errors.Annotate(err, "cannot select GUI version") - } - return nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/gui_test.go juju-core-2.0.0/src/github.com/juju/juju/api/gui_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/gui_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/gui_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,166 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package api_test - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - - jc "github.com/juju/testing/checkers" - "github.com/juju/version" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/params" -) - -// sendJSONResponse encodes the given content as JSON and writes it to the -// given response writer. -func sendJSONResponse(c *gc.C, w http.ResponseWriter, content interface{}) { - w.Header().Set("Content-Type", params.ContentTypeJSON) - encoder := json.NewEncoder(w) - err := encoder.Encode(content) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *clientSuite) TestGUIArchives(c *gc.C) { - client := s.APIState.Client() - called := false - response := params.GUIArchiveResponse{ - Versions: []params.GUIArchiveVersion{{ - Version: version.MustParse("1.0.0"), - SHA256: "hash1", - Current: false, - }, { - Version: version.MustParse("2.0.0"), - SHA256: "hash2", - Current: true, - }}, - } - - // Set up a fake endpoint for tests. - defer fakeAPIEndpoint(c, client, "/gui-archive", "GET", - func(w http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - called = true - sendJSONResponse(c, w, response) - }, - ).Close() - - versions, err := client.GUIArchives() - c.Assert(err, jc.ErrorIsNil) - c.Assert(versions, jc.DeepEquals, response.Versions) - c.Assert(called, jc.IsTrue) -} - -func (s *clientSuite) TestGUIArchivesError(c *gc.C) { - client := s.APIState.Client() - - // Set up a fake endpoint for tests. - defer fakeAPIEndpoint(c, client, "/gui-archive", "GET", - func(w http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - w.WriteHeader(http.StatusBadRequest) - }, - ).Close() - - versions, err := client.GUIArchives() - c.Assert(err, gc.ErrorMatches, "cannot retrieve GUI archives info: .*") - c.Assert(versions, gc.IsNil) -} - -func (s *clientSuite) TestUploadGUIArchive(c *gc.C) { - client := s.APIState.Client() - called := false - archive := []byte("archive content") - hash, size, vers := "archive-hash", int64(len(archive)), version.MustParse("2.1.0") - - // Set up a fake endpoint for tests. - defer fakeAPIEndpoint(c, client, "/gui-archive", "POST", - func(w http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - called = true - err := req.ParseForm() - c.Assert(err, jc.ErrorIsNil) - // Check version and content length. - c.Assert(req.Form.Get("version"), gc.Equals, vers.String()) - c.Assert(req.ContentLength, gc.Equals, size) - // Check request body. - obtainedArchive, err := ioutil.ReadAll(req.Body) - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtainedArchive, gc.DeepEquals, archive) - // Check hash. - h := req.Form.Get("hash") - c.Assert(h, gc.Equals, hash) - // Send the response. - sendJSONResponse(c, w, params.GUIArchiveVersion{ - Current: true, - }) - }, - ).Close() - - current, err := client.UploadGUIArchive(bytes.NewReader(archive), hash, size, vers) - c.Assert(err, jc.ErrorIsNil) - c.Assert(current, jc.IsTrue) - c.Assert(called, jc.IsTrue) -} - -func (s *clientSuite) TestUploadGUIArchiveError(c *gc.C) { - client := s.APIState.Client() - archive := []byte("archive content") - hash, size, vers := "archive-hash", int64(len(archive)), version.MustParse("2.1.0") - - // Set up a fake endpoint for tests. - defer fakeAPIEndpoint(c, client, "/gui-archive", "POST", - func(w http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - w.WriteHeader(http.StatusBadRequest) - }, - ).Close() - - current, err := client.UploadGUIArchive(bytes.NewReader(archive), hash, size, vers) - c.Assert(err, gc.ErrorMatches, "cannot upload the GUI archive: .*") - c.Assert(current, jc.IsFalse) -} - -func (s *clientSuite) TestSelectGUIVersion(c *gc.C) { - client := s.APIState.Client() - called := false - vers := version.MustParse("2.0.42") - - // Set up a fake endpoint for tests. - defer fakeAPIEndpoint(c, client, "/gui-version", "PUT", - func(w http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - called = true - // Check request body. - var request params.GUIVersionRequest - decoder := json.NewDecoder(req.Body) - err := decoder.Decode(&request) - c.Assert(err, jc.ErrorIsNil) - c.Assert(request.Version, gc.Equals, vers) - }, - ).Close() - - err := client.SelectGUIVersion(vers) - c.Assert(err, jc.ErrorIsNil) - c.Assert(called, jc.IsTrue) -} - -func (s *clientSuite) TestSelectGUIVersionError(c *gc.C) { - client := s.APIState.Client() - vers := version.MustParse("2.0.42") - - // Set up a fake endpoint for tests. - defer fakeAPIEndpoint(c, client, "/gui-version", "PUT", - func(w http.ResponseWriter, req *http.Request) { - defer req.Body.Close() - w.WriteHeader(http.StatusBadRequest) - }, - ).Close() - - err := client.SelectGUIVersion(vers) - c.Assert(err, gc.ErrorMatches, "cannot select GUI version: .*") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/highavailability/client.go juju-core-2.0.0/src/github.com/juju/juju/api/highavailability/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/highavailability/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/highavailability/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,6 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/replicaset" - "gopkg.in/juju/names.v2" "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" @@ -20,18 +19,13 @@ // Client provides access to the high availability service, used to manage controllers. type Client struct { base.ClientFacade - facade base.FacadeCaller - modelTag names.ModelTag + facade base.FacadeCaller } // NewClient returns a new HighAvailability client. func NewClient(caller base.APICallCloser) *Client { - modelTag, err := caller.ModelTag() - if err != nil { - logger.Errorf("ignoring invalid model tag: %v", err) - } frontend, backend := base.NewClientFacade(caller, "HighAvailability") - return &Client{ClientFacade: frontend, facade: backend, modelTag: modelTag} + return &Client{ClientFacade: frontend, facade: backend} } // EnableHA ensures the availability of Juju controllers. @@ -42,7 +36,6 @@ var results params.ControllersChangeResults arg := params.ControllersSpecs{ Specs: []params.ControllersSpec{{ - ModelTag: c.modelTag.String(), NumControllers: numControllers, Constraints: cons, Placement: placement, @@ -66,7 +59,12 @@ // to shut down their mongo server. func (c *Client) MongoUpgradeMode(v mongo.Version) (params.MongoUpgradeResults, error) { arg := params.UpgradeMongoParams{ - Target: v, + Target: params.MongoVersion{ + Major: v.Major, + Minor: v.Minor, + Patch: v.Patch, + StorageEngine: string(v.StorageEngine), + }, } results := params.MongoUpgradeResults{} if err := c.facade.FacadeCall("StopHAReplicationForUpgrade", arg, &results); err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/http_test.go juju-core-2.0.0/src/github.com/juju/juju/api/http_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/http_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/http_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -110,7 +110,7 @@ }, }) }, - expectError: `GET http://.*/: some error`, + expectError: `.*some error$`, expectErrorCode: params.CodeBadRequest, expectErrorInfo: ¶ms.ErrorInfo{ MacaroonPath: "foo", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/instancepoller/machine_test.go juju-core-2.0.0/src/github.com/juju/juju/api/instancepoller/machine_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/instancepoller/machine_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/instancepoller/machine_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -208,14 +208,14 @@ var called int results := params.StatusResults{ Results: []params.StatusResult{{ - Status: status.StatusProvisioning.String(), + Status: status.Provisioning.String(), }}, } apiCaller := successAPICaller(c, "InstanceStatus", entitiesArgs, results, &called) machine := instancepoller.NewMachine(apiCaller, s.tag, params.Alive) statusResult, err := machine.InstanceStatus() c.Check(err, jc.ErrorIsNil) - c.Check(statusResult.Status, gc.DeepEquals, status.StatusProvisioning.String()) + c.Check(statusResult.Status, gc.DeepEquals, status.Provisioning.String()) c.Check(called, gc.Equals, 1) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/interface.go juju-core-2.0.0/src/github.com/juju/juju/api/interface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/interface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/interface.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package api import ( + "net/url" "time" "github.com/juju/errors" @@ -42,7 +43,8 @@ CACert string // ModelTag holds the model tag for the model we are - // trying to connect to. + // trying to connect to. If this is empty, a controller-only + // login will be made. ModelTag names.ModelTag // ...but this block of fields is all about the authentication mechanism @@ -117,7 +119,7 @@ Timeout time.Duration // RetryDelay is the amount of time to wait between - // unsucssful connection attempts. + // unsuccessful connection attempts. RetryDelay time.Duration // BakeryClient is the httpbakery Client, which @@ -148,7 +150,7 @@ type OpenFunc func(*Info, DialOpts) (Connection, error) // Connection exists purely to make api-opening funcs mockable. It's just a -// dumb copy of all the methods on api.Connection; we can and should be extracting +// dumb copy of all the methods on api.state; we can and should be extracting // smaller and more relevant interfaces (and dropping some of them too). // Connection represents a connection to a Juju API server. @@ -156,10 +158,19 @@ // This first block of methods is pretty close to a sane Connection interface. Close() error - Broken() <-chan struct{} Addr() string APIHostPorts() [][]network.HostPort + // Broken returns a channel which will be closed if the connection + // is detected to be broken, either because the underlying + // connection has closed or because API pings have failed. + Broken() <-chan struct{} + + // IsBroken returns whether the connection is broken. It checks + // the Broken channel and if that is open, attempts a connection + // ping. + IsBroken() bool + // These are a bit off -- ServerVersion is apparently not known until after // Login()? Maybe evidence of need for a separate AuthenticatedConnection..? Login(name names.Tag, password, nonce string, ms []macaroon.Slice) error @@ -169,19 +180,16 @@ // This should not be used outside the api/* packages or tests. base.APICaller - // ControllerTag returns the model tag of the controller - // (as opposed to the model tag of the currently connected - // model inside that controller). + // ControllerTag returns the tag of the controller. // This could be defined on base.APICaller. - ControllerTag() (names.ModelTag, error) + ControllerTag() names.ControllerTag // All the rest are strange and questionable and deserve extra attention // and/or discussion. - // Something-or-other expects Ping to exist, and *maybe* the heartbeat - // *should* be handled outside the State type, but it's also handled - // inside it as well. We should figure this out sometime -- we should - // either expose Ping() or Broken() but not both. + // Ping makes an API request which checks if the connection is + // still functioning. + // NOTE: This method is deprecated. Please use IsBroken or Broken instead. Ping() error // I think this is actually dead code. It's tested, at least, so I'm @@ -192,9 +200,15 @@ // connection. AuthTag() names.Tag - // ReadOnly returns whether the authorized user is connected to the model - // in read-only mode. - ReadOnly() bool + // ModelAccess returns the access level of authorized user to the model. + ModelAccess() string + + // ControllerAccess returns the access level of authorized user to the controller. + ControllerAccess() string + + // CookieURL returns the URL that HTTP cookies for the API will be + // associated with. + CookieURL() *url.URL // These methods expose a bunch of worker-specific facades, and basically // just should not exist; but removing them is too noisy for a single CL. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/keymanager/client_test.go juju-core-2.0.0/src/github.com/juju/juju/api/keymanager/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/keymanager/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/keymanager/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -139,7 +139,7 @@ {Error: nil}, {Error: clientError("invalid ssh key: missing")}, }) - s.assertModelKeys(c, []string{"invalid", key3}) + s.assertModelKeys(c, []string{key3, "invalid"}) } func (s *keymanagerSuite) TestImportKeys(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/machiner/machiner_test.go juju-core-2.0.0/src/github.com/juju/juju/api/machiner/machiner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/machiner/machiner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/machiner/machiner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -71,16 +71,16 @@ statusInfo, err := s.machine.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusPending) + c.Assert(statusInfo.Status, gc.Equals, status.Pending) c.Assert(statusInfo.Message, gc.Equals, "") c.Assert(statusInfo.Data, gc.HasLen, 0) - err = machine.SetStatus(status.StatusStarted, "blah", nil) + err = machine.SetStatus(status.Started, "blah", nil) c.Assert(err, jc.ErrorIsNil) statusInfo, err = s.machine.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusStarted) + c.Assert(statusInfo.Status, gc.Equals, status.Started) c.Assert(statusInfo.Message, gc.Equals, "blah") c.Assert(statusInfo.Data, gc.HasLen, 0) c.Assert(statusInfo.Since, gc.NotNil) @@ -193,7 +193,7 @@ // Change something other than the lifecycle and make sure it's // not detected. - err = machine.SetStatus(status.StatusStarted, "not really", nil) + err = machine.SetStatus(status.Started, "not really", nil) c.Assert(err, jc.ErrorIsNil) wc.AssertNoChange() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/machineundertaker/package_test.go juju-core-2.0.0/src/github.com/juju/juju/api/machineundertaker/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/machineundertaker/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/machineundertaker/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/machineundertaker/undertaker.go juju-core-2.0.0/src/github.com/juju/juju/api/machineundertaker/undertaker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/machineundertaker/undertaker.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/machineundertaker/undertaker.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,121 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker + +import ( + "github.com/juju/errors" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/network" + "github.com/juju/juju/watcher" +) + +// NewWatcherFunc exists to let us test WatchMachineRemovals. +type NewWatcherFunc func(base.APICaller, params.NotifyWatchResult) watcher.NotifyWatcher + +// API provides access to the machine undertaker API facade. +type API struct { + facade base.FacadeCaller + modelTag names.ModelTag + newWatcher NewWatcherFunc +} + +// NewAPI creates a new client-side machine undertaker facade. +func NewAPI(caller base.APICaller, newWatcher NewWatcherFunc) (*API, error) { + modelTag, ok := caller.ModelTag() + if !ok { + return nil, errors.New("machine undertaker client requires a model API connection") + } + api := API{ + facade: base.NewFacadeCaller(caller, "MachineUndertaker"), + modelTag: modelTag, + newWatcher: newWatcher, + } + return &api, nil +} + +// AllMachineRemovals returns all the machines that have been marked +// ready to clean up. +func (api *API) AllMachineRemovals() ([]names.MachineTag, error) { + var results params.EntitiesResults + args := wrapEntities(api.modelTag) + err := api.facade.FacadeCall("AllMachineRemovals", &args, &results) + if err != nil { + return nil, errors.Trace(err) + } + if len(results.Results) != 1 { + return nil, errors.Errorf("expected one result, got %d", len(results.Results)) + } + result := results.Results[0] + if result.Error != nil { + return nil, errors.Trace(result.Error) + } + machines := make([]names.MachineTag, len(result.Entities)) + for i, entity := range result.Entities { + tag, err := names.ParseMachineTag(entity.Tag) + if err != nil { + return nil, errors.Trace(err) + } + machines[i] = tag + } + return machines, nil +} + +// GetProviderInterfaceInfo gets the provider details for all of the +// interfaces for one machine. +func (api *API) GetProviderInterfaceInfo(machine names.MachineTag) ([]network.ProviderInterfaceInfo, error) { + var result params.ProviderInterfaceInfoResults + args := wrapEntities(machine) + err := api.facade.FacadeCall("GetMachineProviderInterfaceInfo", &args, &result) + if err != nil { + return nil, errors.Trace(err) + } + if len(result.Results) != 1 { + return nil, errors.Errorf("expected one result, got %d", len(result.Results)) + } + item := result.Results[0] + if item.MachineTag != machine.String() { + return nil, errors.Errorf("expected interface info for %s but got %s", machine, item.MachineTag) + } + infos := make([]network.ProviderInterfaceInfo, len(item.Interfaces)) + for i, info := range item.Interfaces { + infos[i].InterfaceName = info.InterfaceName + infos[i].MACAddress = info.MACAddress + infos[i].ProviderId = network.Id(info.ProviderId) + } + return infos, nil +} + +// CompleteRemoval finishes the removal of the machine in the database +// after any provider resources are cleaned up. +func (api *API) CompleteRemoval(machine names.MachineTag) error { + args := wrapEntities(machine) + return api.facade.FacadeCall("CompleteMachineRemovals", &args, nil) +} + +// WatchMachineRemovals registers to be notified when a machine +// removal is requested. +func (api *API) WatchMachineRemovals() (watcher.NotifyWatcher, error) { + var results params.NotifyWatchResults + args := wrapEntities(api.modelTag) + err := api.facade.FacadeCall("WatchMachineRemovals", &args, &results) + if err != nil { + return nil, errors.Trace(err) + } + if len(results.Results) != 1 { + return nil, errors.Errorf("expected one result, got %d", len(results.Results)) + } + result := results.Results[0] + if err := result.Error; err != nil { + return nil, errors.Trace(result.Error) + } + w := api.newWatcher(api.facade.RawAPICaller(), result) + return w, nil +} + +func wrapEntities(tag names.Tag) params.Entities { + return params.Entities{Entities: []params.Entity{{Tag: tag.String()}}} +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/machineundertaker/undertaker_test.go juju-core-2.0.0/src/github.com/juju/juju/api/machineundertaker/undertaker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/machineundertaker/undertaker_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/machineundertaker/undertaker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,349 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/api/machineundertaker" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/network" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" +) + +type undertakerSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&undertakerSuite{}) + +func (s *undertakerSuite) TestRequiresModelConnection(c *gc.C) { + api, err := machineundertaker.NewAPI(&fakeAPICaller{hasModelTag: false}, nil) + c.Assert(err, gc.ErrorMatches, "machine undertaker client requires a model API connection") + c.Assert(api, gc.IsNil) + api, err = machineundertaker.NewAPI(&fakeAPICaller{hasModelTag: true}, nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(api, gc.NotNil) +} + +func (s *undertakerSuite) TestAllMachineRemovals(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Check(facade, gc.Equals, "MachineUndertaker") + c.Check(request, gc.Equals, "AllMachineRemovals") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(arg, gc.DeepEquals, wrapEntities(coretesting.ModelTag.String())) + c.Assert(result, gc.FitsTypeOf, ¶ms.EntitiesResults{}) + *result.(*params.EntitiesResults) = *wrapEntitiesResults("machine-23", "machine-42") + return nil + } + api := makeAPI(c, caller) + results, err := api.AllMachineRemovals() + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, gc.DeepEquals, []names.MachineTag{ + names.NewMachineTag("23"), + names.NewMachineTag("42"), + }) +} + +func (s *undertakerSuite) TestAllMachineRemovals_Error(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + return errors.New("restless year") + } + api := makeAPI(c, caller) + results, err := api.AllMachineRemovals() + c.Assert(err, gc.ErrorMatches, "restless year") + c.Assert(results, gc.IsNil) +} + +func (s *undertakerSuite) TestAllMachineRemovals_BadTag(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Assert(result, gc.FitsTypeOf, ¶ms.EntitiesResults{}) + *result.(*params.EntitiesResults) = *wrapEntitiesResults("machine-23", "application-burp") + return nil + } + api := makeAPI(c, caller) + results, err := api.AllMachineRemovals() + c.Assert(err, gc.ErrorMatches, `"application-burp" is not a valid machine tag`) + c.Assert(results, gc.IsNil) +} + +func (s *undertakerSuite) TestAllMachineRemovals_ErrorResult(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Assert(result, gc.FitsTypeOf, ¶ms.EntitiesResults{}) + *result.(*params.EntitiesResults) = params.EntitiesResults{ + Results: []params.EntitiesResult{{ + Error: common.ServerError(errors.New("everythingisterrible")), + }}, + } + return nil + } + api := makeAPI(c, caller) + results, err := api.AllMachineRemovals() + c.Assert(err, gc.ErrorMatches, "everythingisterrible") + c.Assert(results, gc.IsNil) +} + +func (s *undertakerSuite) TestAllMachineRemovals_TooManyResults(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Assert(result, gc.FitsTypeOf, ¶ms.EntitiesResults{}) + *result.(*params.EntitiesResults) = params.EntitiesResults{ + Results: []params.EntitiesResult{{ + Entities: []params.Entity{{Tag: "machine-1"}}, + }, { + Entities: []params.Entity{{Tag: "machine-2"}}, + }}, + } + return nil + } + api := makeAPI(c, caller) + results, err := api.AllMachineRemovals() + c.Assert(err, gc.ErrorMatches, "expected one result, got 2") + c.Assert(results, gc.IsNil) +} + +func (s *undertakerSuite) TestAllMachineRemovals_TooFewResults(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Assert(result, gc.FitsTypeOf, ¶ms.EntitiesResults{}) + *result.(*params.EntitiesResults) = params.EntitiesResults{} + return nil + } + api := makeAPI(c, caller) + results, err := api.AllMachineRemovals() + c.Assert(err, gc.ErrorMatches, "expected one result, got 0") + c.Assert(results, gc.IsNil) +} + +func (*undertakerSuite) TestGetInfo(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Check(facade, gc.Equals, "MachineUndertaker") + c.Check(request, gc.Equals, "GetMachineProviderInterfaceInfo") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(arg, gc.DeepEquals, wrapEntities("machine-100")) + c.Assert(result, gc.FitsTypeOf, ¶ms.ProviderInterfaceInfoResults{}) + *result.(*params.ProviderInterfaceInfoResults) = params.ProviderInterfaceInfoResults{ + Results: []params.ProviderInterfaceInfoResult{{ + MachineTag: "machine-100", + Interfaces: []params.ProviderInterfaceInfo{{ + InterfaceName: "hamster huey", + MACAddress: "calvin", + ProviderId: "1234", + }, { + InterfaceName: "happy hamster hop", + MACAddress: "hobbes", + ProviderId: "1235", + }}, + }}, + } + return nil + } + api := makeAPI(c, caller) + results, err := api.GetProviderInterfaceInfo(names.NewMachineTag("100")) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, gc.DeepEquals, []network.ProviderInterfaceInfo{{ + InterfaceName: "hamster huey", + MACAddress: "calvin", + ProviderId: "1234", + }, { + InterfaceName: "happy hamster hop", + MACAddress: "hobbes", + ProviderId: "1235", + }}) +} + +func (*undertakerSuite) TestGetInfo_GenericError(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + return errors.New("gooey kablooey") + } + api := makeAPI(c, caller) + results, err := api.GetProviderInterfaceInfo(names.NewMachineTag("100")) + c.Assert(err, gc.ErrorMatches, "gooey kablooey") + c.Assert(results, gc.IsNil) +} + +func (*undertakerSuite) TestGetInfo_TooMany(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Assert(result, gc.FitsTypeOf, ¶ms.ProviderInterfaceInfoResults{}) + *result.(*params.ProviderInterfaceInfoResults) = params.ProviderInterfaceInfoResults{ + Results: []params.ProviderInterfaceInfoResult{{ + MachineTag: "machine-100", + Interfaces: []params.ProviderInterfaceInfo{{ + InterfaceName: "hamster huey", + MACAddress: "calvin", + ProviderId: "1234", + }}, + }, { + MachineTag: "machine-101", + Interfaces: []params.ProviderInterfaceInfo{{ + InterfaceName: "hamster huey", + MACAddress: "calvin", + ProviderId: "1234", + }}, + }}, + } + return nil + } + api := makeAPI(c, caller) + results, err := api.GetProviderInterfaceInfo(names.NewMachineTag("100")) + c.Assert(err, gc.ErrorMatches, "expected one result, got 2") + c.Assert(results, gc.IsNil) +} + +func (*undertakerSuite) TestGetInfo_BadMachine(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Assert(result, gc.FitsTypeOf, ¶ms.ProviderInterfaceInfoResults{}) + *result.(*params.ProviderInterfaceInfoResults) = params.ProviderInterfaceInfoResults{ + Results: []params.ProviderInterfaceInfoResult{{ + MachineTag: "machine-101", + Interfaces: []params.ProviderInterfaceInfo{{ + InterfaceName: "hamster huey", + MACAddress: "calvin", + ProviderId: "1234", + }}, + }}, + } + return nil + } + api := makeAPI(c, caller) + results, err := api.GetProviderInterfaceInfo(names.NewMachineTag("100")) + c.Assert(err, gc.ErrorMatches, "expected interface info for machine-100 but got machine-101") + c.Assert(results, gc.IsNil) +} + +func (*undertakerSuite) TestCompleteRemoval(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Check(facade, gc.Equals, "MachineUndertaker") + c.Check(request, gc.Equals, "CompleteMachineRemovals") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(arg, gc.DeepEquals, wrapEntities("machine-100")) + c.Check(result, gc.DeepEquals, nil) + return errors.New("gooey kablooey") + } + api := makeAPI(c, caller) + err := api.CompleteRemoval(names.NewMachineTag("100")) + c.Assert(err, gc.ErrorMatches, "gooey kablooey") +} + +func (*undertakerSuite) TestWatchMachineRemovals_CallFailed(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Check(facade, gc.Equals, "MachineUndertaker") + c.Check(request, gc.Equals, "WatchMachineRemovals") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(arg, gc.DeepEquals, wrapEntities(coretesting.ModelTag.String())) + return errors.New("oopsy") + } + api := makeAPI(c, caller) + w, err := api.WatchMachineRemovals() + c.Check(w, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "oopsy") +} + +func (*undertakerSuite) TestWatchMachineRemovals_ErrorInWatcher(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Assert(result, gc.FitsTypeOf, ¶ms.NotifyWatchResults{}) + *result.(*params.NotifyWatchResults) = params.NotifyWatchResults{ + Results: []params.NotifyWatchResult{{ + Error: ¶ms.Error{Message: "blammo"}, + }}, + } + return nil + } + api := makeAPI(c, caller) + w, err := api.WatchMachineRemovals() + c.Check(w, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "blammo") +} + +func (*undertakerSuite) TestWatchMachineRemovals_TooMany(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Assert(result, gc.FitsTypeOf, ¶ms.NotifyWatchResults{}) + *result.(*params.NotifyWatchResults) = params.NotifyWatchResults{ + Results: []params.NotifyWatchResult{{ + NotifyWatcherId: "2", + }, { + NotifyWatcherId: "3", + }}, + } + return nil + } + api := makeAPI(c, caller) + w, err := api.WatchMachineRemovals() + c.Check(w, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "expected one result, got 2") +} + +func (*undertakerSuite) TestWatchMachineRemovals_Success(c *gc.C) { + caller := func(facade string, version int, id, request string, arg, result interface{}) error { + c.Assert(result, gc.FitsTypeOf, ¶ms.NotifyWatchResults{}) + *result.(*params.NotifyWatchResults) = params.NotifyWatchResults{ + Results: []params.NotifyWatchResult{{ + NotifyWatcherId: "2", + }}, + } + return nil + } + expectWatcher := &struct{ watcher.NotifyWatcher }{} + newWatcher := func(wcaller base.APICaller, result params.NotifyWatchResult) watcher.NotifyWatcher { + c.Check(wcaller, gc.NotNil) // not comparable + c.Check(result, gc.DeepEquals, params.NotifyWatchResult{ + NotifyWatcherId: "2", + }) + return expectWatcher + } + + api, err := machineundertaker.NewAPI(testing.APICallerFunc(caller), newWatcher) + c.Check(err, jc.ErrorIsNil) + w, err := api.WatchMachineRemovals() + c.Check(err, jc.ErrorIsNil) + c.Check(w, gc.Equals, expectWatcher) +} + +func makeAPI(c *gc.C, caller testing.APICallerFunc) *machineundertaker.API { + api, err := machineundertaker.NewAPI(caller, nil) + c.Assert(err, jc.ErrorIsNil) + return api +} + +func wrapEntities(tags ...string) *params.Entities { + return ¶ms.Entities{Entities: makeEntitySlice(tags...)} +} + +func makeEntitySlice(tags ...string) []params.Entity { + results := make([]params.Entity, len(tags)) + for i := range tags { + results[i].Tag = tags[i] + } + return results +} + +func wrapEntitiesResults(tags ...string) *params.EntitiesResults { + return ¶ms.EntitiesResults{ + Results: []params.EntitiesResult{{ + Entities: makeEntitySlice(tags...), + }}, + } +} + +type fakeAPICaller struct { + base.APICaller + hasModelTag bool +} + +func (c *fakeAPICaller) ModelTag() (names.ModelTag, bool) { + return names.ModelTag{}, c.hasModelTag +} + +func (c *fakeAPICaller) BestFacadeVersion(string) int { + return 0 +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/metricsdebug/client.go juju-core-2.0.0/src/github.com/juju/juju/api/metricsdebug/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/metricsdebug/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/metricsdebug/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,8 +22,11 @@ // MetricsDebugClient defines the methods on the metricsdebug API end point. type MetricsDebugClient interface { - // GetMetrics will receive metrics collected by the given entity tag - GetMetrics(tag string) ([]params.MetricResult, error) + // GetMetrics will receive metrics collected by the given entity tags + // The tags act as a filter over what is to be returned. If no tags are + // supplied GetMetrics will return all the metrics recorded in the + // current model. + GetMetrics(tags ...string) ([]params.MetricResult, error) } // MeterStatusClient defines methods on the metricsdebug API end point. @@ -42,10 +45,12 @@ } // GetMetrics will receive metrics collected by the given entity -func (c *Client) GetMetrics(tag string) ([]params.MetricResult, error) { - p := params.Entities{Entities: []params.Entity{ - {tag}, - }} +func (c *Client) GetMetrics(tags ...string) ([]params.MetricResult, error) { + entities := make([]params.Entity, len(tags)) + for i, tag := range tags { + entities[i] = params.Entity{Tag: tag} + } + p := params.Entities{Entities: entities} results := new(params.MetricResults) if err := c.facade.FacadeCall("GetMetrics", p, results); err != nil { return nil, errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/metricsdebug/client_test.go juju-core-2.0.0/src/github.com/juju/juju/api/metricsdebug/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/metricsdebug/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/metricsdebug/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -100,6 +100,58 @@ c.Assert(called, jc.IsTrue) } +func (s *metricsdebugSuiteMock) TestGetMetricsForModel(c *gc.C) { + var called bool + now := time.Now() + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + requestParam, response interface{}, + ) error { + c.Assert(request, gc.Equals, "GetMetrics") + entities := requestParam.(params.Entities) + c.Assert(entities, gc.DeepEquals, params.Entities{Entities: []params.Entity{}}) + result := response.(*params.MetricResults) + result.Results = []params.EntityMetrics{{ + Metrics: []params.MetricResult{{ + Key: "pings", + Value: "5", + Time: now, + }}, + Error: nil, + }} + called = true + return nil + }) + client := metricsdebug.NewClient(apiCaller) + metrics, err := client.GetMetrics() + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) + c.Assert(metrics, gc.HasLen, 1) + c.Assert(metrics[0].Key, gc.Equals, "pings") + c.Assert(metrics[0].Value, gc.Equals, "5") + c.Assert(metrics[0].Time, gc.Equals, now) +} + +func (s *metricsdebugSuiteMock) TestGetMetricsForModelFails(c *gc.C) { + var called bool + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + requestParam, response interface{}, + ) error { + called = true + return errors.New("an error") + }) + client := metricsdebug.NewClient(apiCaller) + metrics, err := client.GetMetrics() + c.Assert(called, jc.IsTrue) + c.Assert(metrics, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "an error") +} + func (s *metricsdebugSuiteMock) TestSetMeterStatus(c *gc.C) { var called bool apiCaller := basetesting.APICallerFunc( @@ -229,6 +281,32 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(metrics1, gc.HasLen, 1) assertSameMetric(c, metrics1[0], metricUnit1) + + metrics2, err := s.manager.GetMetrics("unit-metered/0", "unit-metered/1") + c.Assert(err, jc.ErrorIsNil) + c.Assert(metrics2, gc.HasLen, 2) +} + +func (s *metricsdebugSuite) TestFeatureGetMetricsForModel(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{ + Charm: meteredCharm, + }) + unit0 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + unit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + + metricUnit0 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit0, + }) + metricUnit1 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit1, + }) + + metrics, err := s.manager.GetMetrics() + c.Assert(err, jc.ErrorIsNil) + c.Assert(metrics, gc.HasLen, 2) + assertSameMetric(c, metrics[0], metricUnit0) + assertSameMetric(c, metrics[1], metricUnit1) } func (s *metricsdebugSuite) TestFeatureGetMultipleMetricsWithService(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/metricsmanager/client.go juju-core-2.0.0/src/github.com/juju/juju/api/metricsmanager/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/metricsmanager/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/metricsmanager/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -29,9 +29,10 @@ // NewClient creates a new client for accessing the metricsmanager api func NewClient(apiCaller base.APICaller) (*Client, error) { - modelTag, err := apiCaller.ModelTag() - if err != nil { - return nil, errors.Trace(err) + modelTag, ok := apiCaller.ModelTag() + if !ok { + return nil, errors.New("metricsmanager client is not appropriate for controller-only API") + } facade := base.NewFacadeCaller(apiCaller, "MetricsManager") return &Client{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/migrationmaster/client.go juju-core-2.0.0/src/github.com/juju/juju/api/migrationmaster/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/migrationmaster/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/migrationmaster/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,12 @@ package migrationmaster import ( + "encoding/json" + "github.com/juju/errors" "github.com/juju/version" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" @@ -46,12 +49,12 @@ return c.newWatcher(c.caller.RawAPICaller(), result), nil } -// GetMigrationStatus returns the details and progress of the latest +// MigrationStatus returns the details and progress of the latest // model migration. -func (c *Client) GetMigrationStatus() (migration.MigrationStatus, error) { +func (c *Client) MigrationStatus() (migration.MigrationStatus, error) { var empty migration.MigrationStatus var status params.MasterMigrationStatus - err := c.caller.FacadeCall("GetMigrationStatus", nil, &status) + err := c.caller.FacadeCall("MigrationStatus", nil, &status) if err != nil { return empty, errors.Trace(err) } @@ -67,7 +70,7 @@ } target := status.Spec.TargetInfo - controllerTag, err := names.ParseModelTag(target.ControllerTag) + controllerTag, err := names.ParseControllerTag(target.ControllerTag) if err != nil { return empty, errors.Annotatef(err, "parsing controller tag") } @@ -77,9 +80,17 @@ return empty, errors.Annotatef(err, "unable to parse auth tag") } + var macs []macaroon.Slice + if target.Macaroons != "" { + if err := json.Unmarshal([]byte(target.Macaroons), &macs); err != nil { + return empty, errors.Annotatef(err, "unmarshalling macaroon") + } + } + return migration.MigrationStatus{ MigrationId: status.MigrationId, ModelUUID: modelTag.Id(), + ExternalControl: status.Spec.ExternalControl, Phase: phase, PhaseChangedTime: status.PhaseChangedTime, TargetInfo: migration.TargetInfo{ @@ -88,6 +99,7 @@ CACert: target.CACert, AuthTag: authTag, Password: target.Password, + Macaroons: macs, }, }, nil } @@ -109,6 +121,31 @@ return c.caller.FacadeCall("SetStatusMessage", args, nil) } +// ModelInfo return basic information about the model to migrated. +func (c *Client) ModelInfo() (migration.ModelInfo, error) { + var info params.MigrationModelInfo + err := c.caller.FacadeCall("ModelInfo", nil, &info) + if err != nil { + return migration.ModelInfo{}, errors.Trace(err) + } + owner, err := names.ParseUserTag(info.OwnerTag) + if err != nil { + return migration.ModelInfo{}, errors.Trace(err) + } + return migration.ModelInfo{ + UUID: info.UUID, + Name: info.Name, + Owner: owner, + AgentVersion: info.AgentVersion, + }, nil +} + +// Prechecks verifies that the source controller and model are healthy +// and able to participate in a migration. +func (c *Client) Prechecks() error { + return c.caller.FacadeCall("Prechecks", nil, nil) +} + // Export returns a serialized representation of the model associated // with the API connection. The charms used by the model are also // returned. @@ -156,13 +193,13 @@ return c.newWatcher(c.caller.RawAPICaller(), result), nil } -// GetMinionReports returns details of the reports made by migration +// MinionReports returns details of the reports made by migration // minions to the controller for the current migration phase. -func (c *Client) GetMinionReports() (migration.MinionReports, error) { +func (c *Client) MinionReports() (migration.MinionReports, error) { var in params.MinionReports var out migration.MinionReports - err := c.caller.FacadeCall("GetMinionReports", nil, &in) + err := c.caller.FacadeCall("MinionReports", nil, &in) if err != nil { return out, errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/migrationmaster/client_test.go juju-core-2.0.0/src/github.com/juju/juju/api/migrationmaster/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/migrationmaster/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/migrationmaster/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package migrationmaster_test import ( + "encoding/json" "time" "github.com/juju/errors" @@ -13,6 +14,7 @@ "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" "github.com/juju/juju/api/base" apitesting "github.com/juju/juju/api/base/testing" @@ -60,44 +62,55 @@ c.Assert(err, gc.ErrorMatches, "boom") } -func (s *ClientSuite) TestGetMigrationStatus(c *gc.C) { +func (s *ClientSuite) TestMigrationStatus(c *gc.C) { + mac, err := macaroon.New([]byte("secret"), "id", "location") + c.Assert(err, jc.ErrorIsNil) + macs := []macaroon.Slice{{mac}} + macsJSON, err := json.Marshal(macs) + c.Assert(err, jc.ErrorIsNil) + modelUUID := utils.MustNewUUID().String() controllerUUID := utils.MustNewUUID().String() + controllerTag := names.NewControllerTag(controllerUUID) timestamp := time.Date(2016, 6, 22, 16, 42, 44, 0, time.UTC) apiCaller := apitesting.APICallerFunc(func(_ string, _ int, _, _ string, _, result interface{}) error { out := result.(*params.MasterMigrationStatus) *out = params.MasterMigrationStatus{ - Spec: params.ModelMigrationSpec{ + Spec: params.MigrationSpec{ ModelTag: names.NewModelTag(modelUUID).String(), - TargetInfo: params.ModelMigrationTargetInfo{ - ControllerTag: names.NewModelTag(controllerUUID).String(), + TargetInfo: params.MigrationTargetInfo{ + ControllerTag: controllerTag.String(), Addrs: []string{"2.2.2.2:2"}, CACert: "cert", AuthTag: names.NewUserTag("admin").String(), Password: "secret", + Macaroons: string(macsJSON), }, + ExternalControl: true, }, MigrationId: "id", - Phase: "PRECHECK", + Phase: "IMPORT", PhaseChangedTime: timestamp, } return nil }) client := migrationmaster.NewClient(apiCaller, nil) - status, err := client.GetMigrationStatus() + status, err := client.MigrationStatus() c.Assert(err, jc.ErrorIsNil) c.Assert(status, gc.DeepEquals, migration.MigrationStatus{ MigrationId: "id", ModelUUID: modelUUID, - Phase: migration.PRECHECK, + Phase: migration.IMPORT, PhaseChangedTime: timestamp, + ExternalControl: true, TargetInfo: migration.TargetInfo{ - ControllerTag: names.NewModelTag(controllerUUID), + ControllerTag: controllerTag, Addrs: []string{"2.2.2.2:2"}, CACert: "cert", AuthTag: names.NewUserTag("admin"), Password: "secret", + Macaroons: macs, }, }) } @@ -150,6 +163,47 @@ c.Assert(err, gc.ErrorMatches, "boom") } +func (s *ClientSuite) TestModelInfo(c *gc.C) { + var stub jujutesting.Stub + owner := names.NewUserTag("owner") + apiCaller := apitesting.APICallerFunc(func(objType string, v int, id, request string, arg, result interface{}) error { + stub.AddCall(objType+"."+request, id, arg) + *(result.(*params.MigrationModelInfo)) = params.MigrationModelInfo{ + UUID: "uuid", + Name: "name", + OwnerTag: owner.String(), + AgentVersion: version.MustParse("1.2.3"), + } + return nil + }) + client := migrationmaster.NewClient(apiCaller, nil) + model, err := client.ModelInfo() + stub.CheckCalls(c, []jujutesting.StubCall{ + {"MigrationMaster.ModelInfo", []interface{}{"", nil}}, + }) + c.Check(err, jc.ErrorIsNil) + c.Check(model, jc.DeepEquals, migration.ModelInfo{ + UUID: "uuid", + Name: "name", + Owner: owner, + AgentVersion: version.MustParse("1.2.3"), + }) +} + +func (s *ClientSuite) TestPrechecks(c *gc.C) { + var stub jujutesting.Stub + apiCaller := apitesting.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { + stub.AddCall(objType+"."+request, id, arg) + return errors.New("blam") + }) + client := migrationmaster.NewClient(apiCaller, nil) + err := client.Prechecks() + c.Check(err, gc.ErrorMatches, "blam") + stub.CheckCalls(c, []jujutesting.StubCall{ + {"MigrationMaster.Prechecks", []interface{}{"", nil}}, + }) +} + func (s *ClientSuite) TestExport(c *gc.C) { var stub jujutesting.Stub apiCaller := apitesting.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { @@ -245,14 +299,14 @@ c.Assert(err, gc.ErrorMatches, "boom") } -func (s *ClientSuite) TestGetMinionReports(c *gc.C) { +func (s *ClientSuite) TestMinionReports(c *gc.C) { var stub jujutesting.Stub apiCaller := apitesting.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { stub.AddCall(objType+"."+request, id, arg) out := result.(*params.MinionReports) *out = params.MinionReports{ MigrationId: "id", - Phase: "PRECHECK", + Phase: "IMPORT", SuccessCount: 4, UnknownCount: 3, UnknownSample: []string{ @@ -269,14 +323,14 @@ return nil }) client := migrationmaster.NewClient(apiCaller, nil) - out, err := client.GetMinionReports() + out, err := client.MinionReports() c.Assert(err, jc.ErrorIsNil) stub.CheckCalls(c, []jujutesting.StubCall{ - {"MigrationMaster.GetMinionReports", []interface{}{"", nil}}, + {"MigrationMaster.MinionReports", []interface{}{"", nil}}, }) c.Assert(out, gc.DeepEquals, migration.MinionReports{ MigrationId: "id", - Phase: migration.PRECHECK, + Phase: migration.IMPORT, SuccessCount: 4, UnknownCount: 3, SomeUnknownMachines: []string{"3", "4"}, @@ -286,16 +340,16 @@ }) } -func (s *ClientSuite) TestGetMinionReportsFailedCall(c *gc.C) { +func (s *ClientSuite) TestMinionReportsFailedCall(c *gc.C) { apiCaller := apitesting.APICallerFunc(func(string, int, string, string, interface{}, interface{}) error { return errors.New("blam") }) client := migrationmaster.NewClient(apiCaller, nil) - _, err := client.GetMinionReports() + _, err := client.MinionReports() c.Assert(err, gc.ErrorMatches, "blam") } -func (s *ClientSuite) TestGetMinionReportsInvalidPhase(c *gc.C) { +func (s *ClientSuite) TestMinionReportsInvalidPhase(c *gc.C) { apiCaller := apitesting.APICallerFunc(func(_ string, _ int, _ string, _ string, _ interface{}, result interface{}) error { out := result.(*params.MinionReports) *out = params.MinionReports{ @@ -304,34 +358,34 @@ return nil }) client := migrationmaster.NewClient(apiCaller, nil) - _, err := client.GetMinionReports() + _, err := client.MinionReports() c.Assert(err, gc.ErrorMatches, `invalid phase: "BLARGH"`) } -func (s *ClientSuite) TestGetMinionReportsBadUnknownTag(c *gc.C) { +func (s *ClientSuite) TestMinionReportsBadUnknownTag(c *gc.C) { apiCaller := apitesting.APICallerFunc(func(_ string, _ int, _ string, _ string, _ interface{}, result interface{}) error { out := result.(*params.MinionReports) *out = params.MinionReports{ - Phase: "PRECHECK", + Phase: "IMPORT", UnknownSample: []string{"carl"}, } return nil }) client := migrationmaster.NewClient(apiCaller, nil) - _, err := client.GetMinionReports() + _, err := client.MinionReports() c.Assert(err, gc.ErrorMatches, `processing unknown agents: "carl" is not a valid tag`) } -func (s *ClientSuite) TestGetMinionReportsBadFailedTag(c *gc.C) { +func (s *ClientSuite) TestMinionReportsBadFailedTag(c *gc.C) { apiCaller := apitesting.APICallerFunc(func(_ string, _ int, _ string, _ string, _ interface{}, result interface{}) error { out := result.(*params.MinionReports) *out = params.MinionReports{ - Phase: "PRECHECK", + Phase: "IMPORT", Failed: []string{"dave"}, } return nil }) client := migrationmaster.NewClient(apiCaller, nil) - _, err := client.GetMinionReports() + _, err := client.MinionReports() c.Assert(err, gc.ErrorMatches, `processing failed agents: "dave" is not a valid tag`) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/migrationtarget/client.go juju-core-2.0.0/src/github.com/juju/juju/api/migrationtarget/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/migrationtarget/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/migrationtarget/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,48 +8,46 @@ "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" + coremigration "github.com/juju/juju/core/migration" ) -// Client describes the client side API for the MigrationTarget -// facade. It is called by the migration master worker to talk to the -// target controller during a migration. -type Client interface { - // Import takes a serialized model and imports it into the target - // controller. - Import([]byte) error - - // Abort removes all data relating to a previously imported - // model. - Abort(string) error - - // Activate marks a migrated model as being ready to use. - Activate(string) error -} - // NewClient returns a new Client based on an existing API connection. -func NewClient(caller base.APICaller) Client { - return &client{base.NewFacadeCaller(caller, "MigrationTarget")} +func NewClient(caller base.APICaller) *Client { + return &Client{base.NewFacadeCaller(caller, "MigrationTarget")} } -// client implements Client. -type client struct { +// Client is the client-side API for the MigrationTarget facade. It is +// used by the migrationmaster worker when talking to the target +// controller during a migration. +type Client struct { caller base.FacadeCaller } -// Import implements Client. -func (c *client) Import(bytes []byte) error { +func (c *Client) Prechecks(model coremigration.ModelInfo) error { + args := params.MigrationModelInfo{ + UUID: model.UUID, + Name: model.Name, + OwnerTag: model.Owner.String(), + AgentVersion: model.AgentVersion, + } + return c.caller.FacadeCall("Prechecks", args, nil) +} + +// Import takes a serialized model and imports it into the target +// controller. +func (c *Client) Import(bytes []byte) error { serialized := params.SerializedModel{Bytes: bytes} return c.caller.FacadeCall("Import", serialized, nil) } -// Abort implements Client. -func (c *client) Abort(modelUUID string) error { +// Abort removes all data relating to a previously imported model. +func (c *Client) Abort(modelUUID string) error { args := params.ModelArgs{ModelTag: names.NewModelTag(modelUUID).String()} return c.caller.FacadeCall("Abort", args, nil) } -// Activate implements Client. -func (c *client) Activate(modelUUID string) error { +// Activate marks a migrated model as being ready to use. +func (c *Client) Activate(modelUUID string) error { args := params.ModelArgs{ModelTag: names.NewModelTag(modelUUID).String()} return c.caller.FacadeCall("Activate", args, nil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/migrationtarget/client_test.go juju-core-2.0.0/src/github.com/juju/juju/api/migrationtarget/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/migrationtarget/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/migrationtarget/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,12 +6,14 @@ import ( "github.com/juju/errors" jujutesting "github.com/juju/testing" + "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" apitesting "github.com/juju/juju/api/base/testing" "github.com/juju/juju/api/migrationtarget" "github.com/juju/juju/apiserver/params" + coremigration "github.com/juju/juju/core/migration" ) type ClientSuite struct { @@ -20,7 +22,7 @@ var _ = gc.Suite(&ClientSuite{}) -func (s *ClientSuite) getClientAndStub(c *gc.C) (migrationtarget.Client, *jujutesting.Stub) { +func (s *ClientSuite) getClientAndStub(c *gc.C) (*migrationtarget.Client, *jujutesting.Stub) { var stub jujutesting.Stub apiCaller := apitesting.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { stub.AddCall(objType+"."+request, id, arg) @@ -30,6 +32,31 @@ return client, &stub } +func (s *ClientSuite) TestPrechecks(c *gc.C) { + client, stub := s.getClientAndStub(c) + + ownerTag := names.NewUserTag("owner") + vers := version.MustParse("1.2.3") + + err := client.Prechecks(coremigration.ModelInfo{ + UUID: "uuid", + Owner: ownerTag, + Name: "name", + AgentVersion: vers, + }) + c.Assert(err, gc.ErrorMatches, "boom") + + expectedArg := params.MigrationModelInfo{ + UUID: "uuid", + Name: "name", + OwnerTag: ownerTag.String(), + AgentVersion: vers, + } + stub.CheckCalls(c, []jujutesting.StubCall{ + {"MigrationTarget.Prechecks", []interface{}{"", expectedArg}}, + }) +} + func (s *ClientSuite) TestImport(c *gc.C) { client, stub := s.getClientAndStub(c) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/modelconfig/modelconfig.go juju-core-2.0.0/src/github.com/juju/juju/api/modelconfig/modelconfig.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/modelconfig/modelconfig.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/modelconfig/modelconfig.go 2016-10-13 14:31:49.000000000 +0000 @@ -73,32 +73,3 @@ args := params.ModelUnset{Keys: keys} return c.facade.FacadeCall("ModelUnset", args, nil) } - -// ModelDefaults returns the default config values used when creating a new model. -func (c *Client) ModelDefaults() (config.ConfigValues, error) { - result := params.ModelConfigResults{} - err := c.facade.FacadeCall("ModelDefaults", nil, &result) - if err != nil { - return nil, errors.Trace(err) - } - values := make(config.ConfigValues) - for name, val := range result.Config { - values[name] = config.ConfigValue{ - Value: val.Value, - Source: val.Source, - } - } - return values, nil -} - -// SetModelDefaults updates the specified default model config values. -func (c *Client) SetModelDefaults(config map[string]interface{}) error { - args := params.ModelSet{Config: config} - return c.facade.FacadeCall("SetModelDefaults", args, nil) -} - -// UnsetModelDefaults removes the specified default model config values. -func (c *Client) UnsetModelDefaults(keys ...string) error { - args := params.ModelUnset{Keys: keys} - return c.facade.FacadeCall("UnsetModelDefaults", args, nil) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/modelconfig/modelconfig_test.go juju-core-2.0.0/src/github.com/juju/juju/api/modelconfig/modelconfig_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/modelconfig/modelconfig_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/modelconfig/modelconfig_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -127,84 +127,3 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(called, jc.IsTrue) } - -func (s *modelconfigSuite) TestModelDefaults(c *gc.C) { - apiCaller := basetesting.APICallerFunc( - func(objType string, - version int, - id, request string, - a, result interface{}, - ) error { - c.Check(objType, gc.Equals, "ModelConfig") - c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "ModelDefaults") - c.Check(a, gc.IsNil) - c.Assert(result, gc.FitsTypeOf, ¶ms.ModelConfigResults{}) - results := result.(*params.ModelConfigResults) - results.Config = map[string]params.ConfigValue{ - "foo": {"bar", "model"}, - } - return nil - }, - ) - client := modelconfig.NewClient(apiCaller) - result, err := client.ModelDefaults() - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.DeepEquals, config.ConfigValues{ - "foo": {"bar", "model"}, - }) -} - -func (s *modelconfigSuite) TestSetModelDefaults(c *gc.C) { - called := false - apiCaller := basetesting.APICallerFunc( - func(objType string, - version int, - id, request string, - a, result interface{}, - ) error { - c.Check(objType, gc.Equals, "ModelConfig") - c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "SetModelDefaults") - c.Check(a, jc.DeepEquals, params.ModelSet{ - Config: map[string]interface{}{ - "some-name": "value", - "other-name": true, - }, - }) - called = true - return nil - }, - ) - client := modelconfig.NewClient(apiCaller) - err := client.SetModelDefaults(map[string]interface{}{ - "some-name": "value", - "other-name": true, - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(called, jc.IsTrue) -} - -func (s *modelconfigSuite) TestUnsetModelDefaults(c *gc.C) { - called := false - apiCaller := basetesting.APICallerFunc( - func(objType string, - version int, - id, request string, - a, result interface{}, - ) error { - c.Check(objType, gc.Equals, "ModelConfig") - c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "UnsetModelDefaults") - c.Check(a, jc.DeepEquals, params.ModelUnset{ - Keys: []string{"foo", "bar"}, - }) - called = true - return nil - }, - ) - client := modelconfig.NewClient(apiCaller) - err := client.UnsetModelDefaults("foo", "bar") - c.Assert(err, jc.ErrorIsNil) - c.Assert(called, jc.IsTrue) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/modelmanager/modelmanager.go juju-core-2.0.0/src/github.com/juju/juju/api/modelmanager/modelmanager.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/modelmanager/modelmanager.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/modelmanager/modelmanager.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,8 +9,11 @@ "gopkg.in/juju/names.v2" "github.com/juju/juju/api/base" + "github.com/juju/juju/api/common" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/juju/permission" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/instance" + "github.com/juju/juju/permission" ) var logger = loggo.GetLogger("juju.api.modelmanager") @@ -20,13 +23,18 @@ type Client struct { base.ClientFacade facade base.FacadeCaller + *common.ModelStatusAPI } // NewClient creates a new `Client` based on an existing authenticated API // connection. func NewClient(st base.APICallCloser) *Client { frontend, backend := base.NewClientFacade(st, "ModelManager") - return &Client{ClientFacade: frontend, facade: backend} + return &Client{ + ClientFacade: frontend, + facade: backend, + ModelStatusAPI: common.NewModelStatusAPI(backend), + } } // Close closes the api connection. @@ -37,23 +45,110 @@ // CreateModel creates a new model using the model config, // cloud region and credential specified in the args. func (c *Client) CreateModel( - name, owner, cloudRegion, cloudCredential string, config map[string]interface{}, -) (params.ModelInfo, error) { - var result params.ModelInfo + name, owner, cloud, cloudRegion string, + cloudCredential names.CloudCredentialTag, + config map[string]interface{}, +) (base.ModelInfo, error) { + var result base.ModelInfo if !names.IsValidUser(owner) { return result, errors.Errorf("invalid owner name %q", owner) } + var cloudTag string + if cloud != "" { + if !names.IsValidCloud(cloud) { + return result, errors.Errorf("invalid cloud name %q", cloud) + } + cloudTag = names.NewCloudTag(cloud).String() + } + var cloudCredentialTag string + if cloudCredential != (names.CloudCredentialTag{}) { + cloudCredentialTag = cloudCredential.String() + } createArgs := params.ModelCreateArgs{ - Name: name, - OwnerTag: names.NewUserTag(owner).String(), - Config: config, - CloudRegion: cloudRegion, - CloudCredential: cloudCredential, + Name: name, + OwnerTag: names.NewUserTag(owner).String(), + Config: config, + CloudTag: cloudTag, + CloudRegion: cloudRegion, + CloudCredentialTag: cloudCredentialTag, } - err := c.facade.FacadeCall("CreateModel", createArgs, &result) + var modelInfo params.ModelInfo + err := c.facade.FacadeCall("CreateModel", createArgs, &modelInfo) if err != nil { return result, errors.Trace(err) } + return convertParamsModelInfo(modelInfo) +} + +func convertParamsModelInfo(modelInfo params.ModelInfo) (base.ModelInfo, error) { + cloud, err := names.ParseCloudTag(modelInfo.CloudTag) + if err != nil { + return base.ModelInfo{}, err + } + var credential string + if modelInfo.CloudCredentialTag != "" { + credTag, err := names.ParseCloudCredentialTag(modelInfo.CloudCredentialTag) + if err != nil { + return base.ModelInfo{}, err + } + credential = credTag.Id() + } + ownerTag, err := names.ParseUserTag(modelInfo.OwnerTag) + if err != nil { + return base.ModelInfo{}, err + } + result := base.ModelInfo{ + Name: modelInfo.Name, + UUID: modelInfo.UUID, + ControllerUUID: modelInfo.ControllerUUID, + ProviderType: modelInfo.ProviderType, + DefaultSeries: modelInfo.DefaultSeries, + Cloud: cloud.Id(), + CloudRegion: modelInfo.CloudRegion, + CloudCredential: credential, + Owner: ownerTag.Id(), + Life: string(modelInfo.Life), + } + result.Status = base.Status{ + Status: modelInfo.Status.Status, + Info: modelInfo.Status.Info, + Data: make(map[string]interface{}), + Since: modelInfo.Status.Since, + } + for k, v := range modelInfo.Status.Data { + result.Status.Data[k] = v + } + result.Users = make([]base.UserInfo, len(modelInfo.Users)) + for i, u := range modelInfo.Users { + result.Users[i] = base.UserInfo{ + UserName: u.UserName, + DisplayName: u.DisplayName, + Access: string(u.Access), + LastConnection: u.LastConnection, + } + } + result.Machines = make([]base.Machine, len(modelInfo.Machines)) + for i, m := range modelInfo.Machines { + machine := base.Machine{ + Id: m.Id, + InstanceId: m.InstanceId, + HasVote: m.HasVote, + WantsVote: m.WantsVote, + Status: m.Status, + } + if m.Hardware != nil { + machine.Hardware = &instance.HardwareCharacteristics{ + Arch: m.Hardware.Arch, + Mem: m.Hardware.Mem, + RootDisk: m.Hardware.RootDisk, + CpuCores: m.Hardware.Cores, + CpuPower: m.Hardware.CpuPower, + Tags: m.Hardware.Tags, + AvailabilityZone: m.Hardware.AvailabilityZone, + } + } + result.Machines[i] = machine + } return result, nil } @@ -80,7 +175,7 @@ result[i] = base.UserModel{ Name: model.Name, UUID: model.UUID, - Owner: owner.Canonical(), + Owner: owner.Id(), LastConnection: model.LastConnection, } } @@ -126,35 +221,45 @@ return result.Result, nil } -// DestroyModel puts the model into a "dying" state, -// and removes all non-manager machine instances. DestroyModel -// will fail if there are any manually-provisioned non-manager machines -// in state. -func (c *Client) DestroyModel() error { - return c.facade.FacadeCall("DestroyModel", nil, nil) -} - -// ParseModelAccess parses an access permission argument into -// a type suitable for making an API facade call. -func ParseModelAccess(access string) (params.UserAccessPermission, error) { - var fail params.UserAccessPermission - - modelAccess, err := permission.ParseModelAccess(access) - if err != nil { - return fail, errors.Trace(err) - } - var accessPermission params.UserAccessPermission - switch modelAccess { - case permission.ModelReadAccess: - accessPermission = params.ModelReadAccess - case permission.ModelWriteAccess: - accessPermission = params.ModelWriteAccess - case permission.ModelAdminAccess: - accessPermission = params.ModelAdminAccess - default: - return fail, errors.Errorf("unsupported model access permission %v", modelAccess) +// DumpModelDB returns all relevant mongo documents for the model. +func (c *Client) DumpModelDB(model names.ModelTag) (map[string]interface{}, error) { + var results params.MapResults + entities := params.Entities{ + Entities: []params.Entity{{Tag: model.String()}}, + } + + err := c.facade.FacadeCall("DumpModelsDB", entities, &results) + if err != nil { + return nil, errors.Trace(err) + } + if count := len(results.Results); count != 1 { + return nil, errors.Errorf("unexpected result count: %d", count) + } + result := results.Results[0] + if result.Error != nil { + return nil, result.Error } - return accessPermission, nil + return result.Result, nil +} + +// DestroyModel puts the specified model into a "dying" state, which will +// cause the model's resources to be cleaned up, after which the model will +// be removed. +func (c *Client) DestroyModel(tag names.ModelTag) error { + var results params.ErrorResults + entities := params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + } + if err := c.facade.FacadeCall("DestroyModels", entities, &results); err != nil { + return errors.Trace(err) + } + if n := len(results.Results); n != 1 { + return errors.Errorf("expected 1 result, got %d", n) + } + if err := results.Results[0].Error; err != nil { + return errors.Trace(err) + } + return nil } // GrantModel grants a user access to the specified models. @@ -175,8 +280,8 @@ } userTag := names.NewUserTag(user) - accessPermission, err := ParseModelAccess(access) - if err != nil { + modelAccess := permission.Access(access) + if err := permission.ValidateModelAccess(modelAccess); err != nil { return errors.Trace(err) } for _, model := range modelUUIDs { @@ -187,13 +292,13 @@ args.Changes = append(args.Changes, params.ModifyModelAccess{ UserTag: userTag.String(), Action: action, - Access: accessPermission, + Access: params.UserAccessPermission(modelAccess), ModelTag: modelTag.String(), }) } var result params.ErrorResults - err = c.facade.FacadeCall("ModifyModelAccess", args, &result) + err := c.facade.FacadeCall("ModifyModelAccess", args, &result) if err != nil { return errors.Trace(err) } @@ -203,9 +308,75 @@ for i, r := range result.Results { if r.Error != nil && r.Error.Code == params.CodeAlreadyExists { - logger.Warningf("model %q is already shared with %q", modelUUIDs[i], userTag.Canonical()) + logger.Warningf("model %q is already shared with %q", modelUUIDs[i], userTag.Id()) result.Results[i].Error = nil } } return result.Combine() } + +// ModelDefaults returns the default values for various sources used when +// creating a new model. +func (c *Client) ModelDefaults() (config.ModelDefaultAttributes, error) { + result := params.ModelDefaultsResult{} + err := c.facade.FacadeCall("ModelDefaults", nil, &result) + if err != nil { + return nil, errors.Trace(err) + } + values := make(config.ModelDefaultAttributes) + for name, val := range result.Config { + setting := config.AttributeDefaultValues{ + Default: val.Default, + Controller: val.Controller, + } + for _, region := range val.Regions { + setting.Regions = append(setting.Regions, config.RegionDefaultValue{ + Name: region.RegionName, + Value: region.Value}) + } + values[name] = setting + } + return values, nil +} + +// SetModelDefaults updates the specified default model config values. +func (c *Client) SetModelDefaults(cloud, region string, config map[string]interface{}) error { + var cloudTag string + if cloud != "" { + cloudTag = names.NewCloudTag(cloud).String() + } + args := params.SetModelDefaults{ + Config: []params.ModelDefaultValues{{ + Config: config, + CloudTag: cloudTag, + CloudRegion: region, + }}, + } + var result params.ErrorResults + err := c.facade.FacadeCall("SetModelDefaults", args, &result) + if err != nil { + return err + } + return result.OneError() +} + +// UnsetModelDefaults removes the specified default model config values. +func (c *Client) UnsetModelDefaults(cloud, region string, keys ...string) error { + var cloudTag string + if cloud != "" { + cloudTag = names.NewCloudTag(cloud).String() + } + args := params.UnsetModelDefaults{ + Keys: []params.ModelUnsetKeys{{ + Keys: keys, + CloudTag: cloudTag, + CloudRegion: region, + }}, + } + var result params.ErrorResults + err := c.facade.FacadeCall("UnsetModelDefaults", args, &result) + if err != nil { + return err + } + return result.OneError() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/modelmanager/modelmanager_test.go juju-core-2.0.0/src/github.com/juju/juju/api/modelmanager/modelmanager_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/modelmanager/modelmanager_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/modelmanager/modelmanager_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,9 +9,11 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "github.com/juju/juju/api/base" basetesting "github.com/juju/juju/api/base/testing" "github.com/juju/juju/api/modelmanager" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/config" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" @@ -28,33 +30,48 @@ } func (s *modelmanagerSuite) OpenAPI(c *gc.C) *modelmanager.Client { - return modelmanager.NewClient(s.APIState) + return modelmanager.NewClient(s.OpenControllerAPI(c)) } func (s *modelmanagerSuite) TestCreateModelBadUser(c *gc.C) { modelManager := s.OpenAPI(c) - _, err := modelManager.CreateModel("mymodel", "not a user", "", "", nil) + defer modelManager.Close() + _, err := modelManager.CreateModel("mymodel", "not a user", "", "", names.CloudCredentialTag{}, nil) c.Assert(err, gc.ErrorMatches, `invalid owner name "not a user"`) } func (s *modelmanagerSuite) TestCreateModel(c *gc.C) { + s.testCreateModel(c, "dummy", "dummy-region") +} + +func (s *modelmanagerSuite) TestCreateModelCloudDefaultRegion(c *gc.C) { + s.testCreateModel(c, "dummy", "") +} + +func (s *modelmanagerSuite) TestCreateModelDefaultCloudAndRegion(c *gc.C) { + s.testCreateModel(c, "", "") +} + +func (s *modelmanagerSuite) testCreateModel(c *gc.C, cloud, region string) { modelManager := s.OpenAPI(c) + defer modelManager.Close() user := s.Factory.MakeUser(c, nil) - owner := user.UserTag().Canonical() - newModel, err := modelManager.CreateModel("new-model", owner, "", "", map[string]interface{}{ + owner := user.UserTag().Id() + newModel, err := modelManager.CreateModel("new-model", owner, cloud, region, names.CloudCredentialTag{}, map[string]interface{}{ "authorized-keys": "ssh-key", // dummy needs controller "controller": false, }) c.Assert(err, jc.ErrorIsNil) c.Assert(newModel.Name, gc.Equals, "new-model") - c.Assert(newModel.OwnerTag, gc.Equals, user.Tag().String()) - c.Assert(newModel.CloudRegion, gc.Equals, "") + c.Assert(newModel.Owner, gc.Equals, user.String()) + c.Assert(newModel.CloudRegion, gc.Equals, "dummy-region") c.Assert(utils.IsValidUUIDString(newModel.UUID), jc.IsTrue) } func (s *modelmanagerSuite) TestListModelsBadUser(c *gc.C) { modelManager := s.OpenAPI(c) + defer modelManager.Close() _, err := modelManager.ListModels("not a user") c.Assert(err, gc.ErrorMatches, `invalid user name "not a user"`) } @@ -67,6 +84,7 @@ Name: "second", Owner: owner}).Close() modelManager := s.OpenAPI(c) + defer modelManager.Close() models, err := modelManager.ListModels("user@remote") c.Assert(err, jc.ErrorIsNil) c.Assert(models, gc.HasLen, 2) @@ -77,21 +95,149 @@ c.Assert(ownerNames, jc.DeepEquals, []string{"user@remote", "user@remote"}) } -func (s *modelmanagerSuite) TestDestroyEnvironment(c *gc.C) { - modelManagerClient := s.OpenAPI(c) +func (s *modelmanagerSuite) TestDestroyModel(c *gc.C) { + modelManager := s.OpenAPI(c) + defer modelManager.Close() var called bool - modelmanager.PatchFacadeCall(&s.CleanupSuite, modelManagerClient, + modelmanager.PatchFacadeCall(&s.CleanupSuite, modelManager, func(req string, args interface{}, resp interface{}) error { - c.Assert(req, gc.Equals, "DestroyModel") + c.Assert(req, gc.Equals, "DestroyModels") + c.Assert(args, jc.DeepEquals, params.Entities{ + Entities: []params.Entity{{testing.ModelTag.String()}}, + }) + results := resp.(*params.ErrorResults) + *results = params.ErrorResults{ + Results: []params.ErrorResult{{}}, + } called = true return nil }) - err := modelManagerClient.DestroyModel() + err := modelManager.DestroyModel(testing.ModelTag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *modelmanagerSuite) TestModelDefaults(c *gc.C) { + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + c.Check(objType, gc.Equals, "ModelManager") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "ModelDefaults") + c.Check(a, gc.IsNil) + c.Assert(result, gc.FitsTypeOf, ¶ms.ModelDefaultsResult{}) + results := result.(*params.ModelDefaultsResult) + results.Config = map[string]params.ModelDefaults{ + "foo": {"bar", "model", []params.RegionDefaults{{ + "dummy-region", + "dummy-value"}}}, + } + return nil + }, + ) + client := modelmanager.NewClient(apiCaller) + result, err := client.ModelDefaults() + c.Assert(err, jc.ErrorIsNil) + + c.Assert(result, jc.DeepEquals, config.ModelDefaultAttributes{ + "foo": {"bar", "model", []config.RegionDefaultValue{{ + "dummy-region", + "dummy-value"}}}, + }) +} + +func (s *modelmanagerSuite) TestSetModelDefaults(c *gc.C) { + called := false + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + c.Check(objType, gc.Equals, "ModelManager") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "SetModelDefaults") + c.Check(a, jc.DeepEquals, params.SetModelDefaults{ + Config: []params.ModelDefaultValues{{ + CloudTag: "cloud-mycloud", + CloudRegion: "region", + Config: map[string]interface{}{ + "some-name": "value", + "other-name": true, + }, + }}}) + c.Assert(result, gc.FitsTypeOf, ¶ms.ErrorResults{}) + *(result.(*params.ErrorResults)) = params.ErrorResults{ + Results: []params.ErrorResult{{Error: nil}}, + } + called = true + return nil + }, + ) + client := modelmanager.NewClient(apiCaller) + err := client.SetModelDefaults("mycloud", "region", map[string]interface{}{ + "some-name": "value", + "other-name": true, + }) c.Assert(err, jc.ErrorIsNil) c.Assert(called, jc.IsTrue) } +func (s *modelmanagerSuite) TestUnsetModelDefaults(c *gc.C) { + called := false + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + c.Check(objType, gc.Equals, "ModelManager") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "UnsetModelDefaults") + c.Check(a, jc.DeepEquals, params.UnsetModelDefaults{ + Keys: []params.ModelUnsetKeys{{ + CloudTag: "cloud-mycloud", + CloudRegion: "region", + Keys: []string{"foo", "bar"}, + }}}) + c.Assert(result, gc.FitsTypeOf, ¶ms.ErrorResults{}) + *(result.(*params.ErrorResults)) = params.ErrorResults{ + Results: []params.ErrorResult{{Error: nil}}, + } + called = true + return nil + }, + ) + client := modelmanager.NewClient(apiCaller) + err := client.UnsetModelDefaults("mycloud", "region", "foo", "bar") + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *modelmanagerSuite) TestModelStatus(c *gc.C) { + sysManager := s.OpenAPI(c) + defer sysManager.Close() + m := s.Factory.MakeMachine(c, nil) + id, err := m.InstanceId() + c.Assert(err, jc.ErrorIsNil) + modelTag := s.State.ModelTag() + results, err := sysManager.ModelStatus(modelTag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, jc.DeepEquals, []base.ModelStatus{{ + UUID: modelTag.Id(), + TotalMachineCount: 1, + HostedMachineCount: 1, + ServiceCount: 0, + Owner: "admin", + Life: string(params.Alive), + Machines: []base.Machine{{Id: "0", InstanceId: string(id), Status: "pending"}}, + }}) +} + type dumpModelSuite struct { testing.BaseSuite } @@ -140,3 +286,52 @@ c.Assert(err, gc.ErrorMatches, "fake error") c.Assert(out, gc.IsNil) } + +func (s *dumpModelSuite) TestDumpModelDB(c *gc.C) { + expected := map[string]interface{}{ + "models": []map[string]interface{}{{ + "name": "admin", + "uuid": "some-uuid", + }}, + "machines": []map[string]interface{}{{ + "id": "0", + "life": 0, + }}, + } + results := params.MapResults{Results: []params.MapResult{{ + Result: expected, + }}} + apiCaller := basetesting.APICallerFunc( + func(objType string, version int, id, request string, args, result interface{}) error { + c.Check(objType, gc.Equals, "ModelManager") + c.Check(request, gc.Equals, "DumpModelsDB") + in, ok := args.(params.Entities) + c.Assert(ok, jc.IsTrue) + c.Assert(in, gc.DeepEquals, params.Entities{[]params.Entity{{testing.ModelTag.String()}}}) + res, ok := result.(*params.MapResults) + c.Assert(ok, jc.IsTrue) + *res = results + return nil + }) + client := modelmanager.NewClient(apiCaller) + out, err := client.DumpModelDB(testing.ModelTag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(out, jc.DeepEquals, expected) +} + +func (s *dumpModelSuite) TestDumpModelDBError(c *gc.C) { + results := params.MapResults{Results: []params.MapResult{{ + Error: ¶ms.Error{Message: "fake error"}, + }}} + apiCaller := basetesting.APICallerFunc( + func(objType string, version int, id, request string, args, result interface{}) error { + res, ok := result.(*params.MapResults) + c.Assert(ok, jc.IsTrue) + *res = results + return nil + }) + client := modelmanager.NewClient(apiCaller) + out, err := client.DumpModelDB(testing.ModelTag) + c.Assert(err, gc.ErrorMatches, "fake error") + c.Assert(out, gc.IsNil) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/monitor.go juju-core-2.0.0/src/github.com/juju/juju/api/monitor.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/monitor.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/monitor.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,62 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package api + +import ( + "time" + + "github.com/juju/utils/clock" +) + +// monitor performs regular pings of an API connection as well as +// monitoring the connection closed channel and the underlying +// rpc.Conn's dead channel. It will close `broken` if pings fail, or +// if `closed` or `dead` are closed. +type monitor struct { + clock clock.Clock + + ping func() error + pingPeriod time.Duration + pingTimeout time.Duration + + closed <-chan struct{} + dead <-chan struct{} + broken chan<- struct{} +} + +func (m *monitor) run() { + defer close(m.broken) + for { + select { + case <-m.closed: + return + case <-m.dead: + logger.Debugf("RPC connection died") + return + case <-m.clock.After(m.pingPeriod): + if !m.pingWithTimeout() { + return + } + } + } +} + +func (m *monitor) pingWithTimeout() bool { + result := make(chan error, 1) + go func() { + // Note that result is buffered so that we don't leak this + // goroutine when a timeout happens. + result <- m.ping() + }() + select { + case err := <-result: + if err != nil { + logger.Debugf("health ping failed: %v", err) + } + return err == nil + case <-m.clock.After(m.pingTimeout): + logger.Errorf("health ping timed out after %s", m.pingTimeout) + return false + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/monitor_internal_test.go juju-core-2.0.0/src/github.com/juju/juju/api/monitor_internal_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/monitor_internal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/monitor_internal_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,114 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package api + +import ( + "errors" + "time" + + "github.com/juju/testing" + gc "gopkg.in/check.v1" + + jjtesting "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&MonitorSuite{}) + +type MonitorSuite struct { + testing.IsolationSuite + clock *testing.Clock + closed chan (struct{}) + dead chan (struct{}) + broken chan (struct{}) + monitor *monitor +} + +const testPingPeriod = 30 * time.Second +const testPingTimeout = time.Second + +func (s *MonitorSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.clock = testing.NewClock(time.Time{}) + s.closed = make(chan struct{}) + s.dead = make(chan struct{}) + s.broken = make(chan struct{}) + s.monitor = &monitor{ + clock: s.clock, + ping: func() error { return nil }, + pingPeriod: testPingPeriod, + pingTimeout: testPingTimeout, + closed: s.closed, + dead: s.dead, + broken: s.broken, + } +} + +func (s *MonitorSuite) TestClose(c *gc.C) { + go s.monitor.run() + s.waitForClock(c) + close(s.closed) + assertEvent(c, s.broken) +} + +func (s *MonitorSuite) TestDead(c *gc.C) { + go s.monitor.run() + s.waitForClock(c) + close(s.dead) + assertEvent(c, s.broken) +} + +func (s *MonitorSuite) TestFirstPingFails(c *gc.C) { + s.monitor.ping = func() error { return errors.New("boom") } + go s.monitor.run() + + s.waitThenAdvance(c, testPingPeriod) + assertEvent(c, s.broken) +} + +func (s *MonitorSuite) TestLaterPingFails(c *gc.C) { + pings := 0 + s.monitor.ping = func() error { + if pings > 0 { + return errors.New("boom") + } + pings++ + return nil + } + go s.monitor.run() + + s.waitThenAdvance(c, testPingPeriod) // in run + s.waitForClock(c) // in pingWithTimeout + s.waitThenAdvance(c, testPingPeriod) // in run + s.waitForClock(c) // in pingWithTimeout + assertEvent(c, s.broken) +} + +func (s *MonitorSuite) TestPingsTimesOut(c *gc.C) { + s.monitor.ping = func() error { + // Advance the clock only once this ping call is being waited on. + s.waitThenAdvance(c, testPingTimeout) + return nil + } + go s.monitor.run() + + s.waitThenAdvance(c, testPingPeriod) + assertEvent(c, s.broken) +} + +func (s *MonitorSuite) waitForClock(c *gc.C) { + assertEvent(c, s.clock.Alarms()) +} + +func (s *MonitorSuite) waitThenAdvance(c *gc.C, d time.Duration) { + s.waitForClock(c) + s.clock.Advance(d) +} + +func assertEvent(c *gc.C, ch <-chan struct{}) { + select { + case <-ch: + case <-time.After(jjtesting.LongWait): + c.Fatal("timed out waiting for channel event") + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/provisioner/machine.go juju-core-2.0.0/src/github.com/juju/juju/api/provisioner/machine.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/provisioner/machine.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/provisioner/machine.go 2016-10-13 14:31:49.000000000 +0000 @@ -168,6 +168,20 @@ return result.OneError() } +// MarkForRemoval indicates that the machine is ready to have any +// provider-level resources cleaned up and be removed. +func (m *Machine) MarkForRemoval() error { + var result params.ErrorResults + args := params.Entities{ + Entities: []params.Entity{{Tag: m.tag.String()}}, + } + err := m.st.facade.FacadeCall("MarkMachinesForRemoval", args, &result) + if err != nil { + return err + } + return result.OneError() +} + // Series returns the operating system series running on the machine. // // NOTE: Unlike state.Machine.Series(), this method returns an error diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/provisioner/provisioner_test.go juju-core-2.0.0/src/github.com/juju/juju/api/provisioner/provisioner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/provisioner/provisioner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/provisioner/provisioner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -74,7 +74,7 @@ s.provisioner = provisioner.NewState(s.st) c.Assert(s.provisioner, gc.NotNil) - s.ModelWatcherTests = apitesting.NewModelWatcherTests(s.provisioner, s.BackingState, apitesting.HasSecrets) + s.ModelWatcherTests = apitesting.NewModelWatcherTests(s.provisioner, s.BackingState) s.APIAddresserTests = apitesting.NewAPIAddresserTests(s.provisioner, s.BackingState) } @@ -97,15 +97,15 @@ machineStatus, info, err := apiMachine.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(machineStatus, gc.Equals, status.StatusPending) + c.Assert(machineStatus, gc.Equals, status.Pending) c.Assert(info, gc.Equals, "") - err = apiMachine.SetStatus(status.StatusStarted, "blah", nil) + err = apiMachine.SetStatus(status.Started, "blah", nil) c.Assert(err, jc.ErrorIsNil) machineStatus, info, err = apiMachine.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(machineStatus, gc.Equals, status.StatusStarted) + c.Assert(machineStatus, gc.Equals, status.Started) c.Assert(info, gc.Equals, "blah") statusInfo, err := s.machine.Status() c.Assert(err, jc.ErrorIsNil) @@ -118,13 +118,13 @@ instanceStatus, info, err := apiMachine.InstanceStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(instanceStatus, gc.Equals, status.StatusPending) + c.Assert(instanceStatus, gc.Equals, status.Pending) c.Assert(info, gc.Equals, "") - err = apiMachine.SetInstanceStatus(status.StatusStarted, "blah", nil) + err = apiMachine.SetInstanceStatus(status.Started, "blah", nil) c.Assert(err, jc.ErrorIsNil) instanceStatus, info, err = apiMachine.InstanceStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(instanceStatus, gc.Equals, status.StatusStarted) + c.Assert(instanceStatus, gc.Equals, status.Started) c.Assert(info, gc.Equals, "blah") statusInfo, err := s.machine.InstanceStatus() c.Assert(err, jc.ErrorIsNil) @@ -135,12 +135,12 @@ apiMachine, err := s.provisioner.Machine(s.machine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) - err = apiMachine.SetStatus(status.StatusError, "blah", map[string]interface{}{"foo": "bar"}) + err = apiMachine.SetStatus(status.Error, "blah", map[string]interface{}{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) machineStatus, info, err := apiMachine.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(machineStatus, gc.Equals, status.StatusError) + c.Assert(machineStatus, gc.Equals, status.Error) c.Assert(info, gc.Equals, "blah") statusInfo, err := s.machine.Status() c.Assert(err, jc.ErrorIsNil) @@ -152,7 +152,7 @@ c.Assert(err, jc.ErrorIsNil) now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "blah", Data: map[string]interface{}{"transient": true}, Since: &now, @@ -213,6 +213,27 @@ c.Assert(err, gc.ErrorMatches, "machine 0 is required by the model") } +func (s *provisionerSuite) TestMarkForRemoval(c *gc.C) { + machine, err := s.State.AddMachine("xenial", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + + apiMachine, err := s.provisioner.Machine(machine.Tag().(names.MachineTag)) + c.Assert(err, jc.ErrorIsNil) + + err = apiMachine.MarkForRemoval() + c.Assert(err, gc.ErrorMatches, "cannot remove machine 1: machine is not dead") + + err = machine.EnsureDead() + c.Assert(err, jc.ErrorIsNil) + + err = apiMachine.MarkForRemoval() + c.Assert(err, jc.ErrorIsNil) + + removals, err := s.State.AllMachineRemovals() + c.Assert(err, jc.ErrorIsNil) + c.Assert(removals, jc.SameContents, []string{"1"}) +} + func (s *provisionerSuite) TestRefreshAndLife(c *gc.C) { // Create a fresh machine to test the complete scenario. otherMachine, err := s.State.AddMachine("quantal", state.JobHostUnits) @@ -259,7 +280,7 @@ c.Assert(err, gc.ErrorMatches, "machine 1 not provisioned") c.Assert(instanceId, gc.Equals, instance.Id("")) - hwChars := instance.MustParseHardware("cpu-cores=123", "mem=4G") + hwChars := instance.MustParseHardware("cores=123", "mem=4G") volumes := []params.Volume{{ VolumeTag: "volume-1-0", @@ -394,7 +415,7 @@ SpaceName: "{{if (lt . 2)}}space1{{else}}space2{{end}}", }) - cons := constraints.MustParse("cpu-cores=12 mem=8G spaces=^space1,space2") + cons := constraints.MustParse("cores=12 mem=8G spaces=^space1,space2") template := state.MachineTemplate{ Series: "quantal", Jobs: []state.MachineJob{state.JobHostUnits}, @@ -453,7 +474,7 @@ // Change something other than the containers and make sure it's // not detected. - err = apiMachine.SetStatus(status.StatusStarted, "not really", nil) + err = apiMachine.SetStatus(status.Started, "not really", nil) c.Assert(err, jc.ErrorIsNil) wc.AssertNoChange() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/singular/api.go juju-core-2.0.0/src/github.com/juju/juju/api/singular/api.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/singular/api.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/singular/api.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,9 +22,9 @@ if !names.IsValidMachine(controllerId) { return nil, errors.NotValidf("controller tag") } - modelTag, err := apiCaller.ModelTag() - if err != nil { - return nil, errors.Trace(err) + modelTag, ok := apiCaller.ModelTag() + if !ok { + return nil, errors.New("cannot use singular API on controller-only connection") } facadeCaller := base.NewFacadeCaller(apiCaller, "Singular") return &API{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/singular/api_test.go juju-core-2.0.0/src/github.com/juju/juju/api/singular/api_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/singular/api_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/singular/api_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -37,10 +37,10 @@ c.Check(err, gc.ErrorMatches, "controller tag not valid") } -func (s *APISuite) TestBadModelTag(c *gc.C) { +func (s *APISuite) TestControllerOnlyAPI(c *gc.C) { api, err := singular.NewAPI(mockAPICaller{}, machine123) c.Check(api, gc.IsNil) - c.Check(err, gc.ErrorMatches, "no tags for you") + c.Check(err, gc.ErrorMatches, `cannot use singular API on controller-only connection`) } func (s *APISuite) TestNoCalls(c *gc.C) { @@ -181,6 +181,6 @@ base.APICaller } -func (mockAPICaller) ModelTag() (names.ModelTag, error) { - return names.ModelTag{}, errors.New("no tags for you") +func (mockAPICaller) ModelTag() (names.ModelTag, bool) { + return names.ModelTag{}, false } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/state.go juju-core-2.0.0/src/github.com/juju/juju/api/state.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/state.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,7 @@ import ( "net" + "net/url" "strconv" "github.com/juju/errors" @@ -32,36 +33,22 @@ // or macaroons. Subsequent requests on the state will act as that entity. // This method is usually called automatically by Open. The machine nonce // should be empty unless logging in as a machine agent. -func (st *state) Login(tag names.Tag, password, nonce string, ms []macaroon.Slice) error { - err := st.loginV3(tag, password, nonce, ms) - return errors.Trace(err) -} - -// loginV2 is retained for testing logins from older clients. -func (st *state) loginV2(tag names.Tag, password, nonce string, ms []macaroon.Slice) error { - return st.loginForVersion(tag, password, nonce, ms, 2) -} - -func (st *state) loginV3(tag names.Tag, password, nonce string, ms []macaroon.Slice) error { - return st.loginForVersion(tag, password, nonce, ms, 3) -} - -func (st *state) loginForVersion(tag names.Tag, password, nonce string, macaroons []macaroon.Slice, vers int) error { - var result params.LoginResultV1 +func (st *state) Login(tag names.Tag, password, nonce string, macaroons []macaroon.Slice) error { + var result params.LoginResult request := ¶ms.LoginRequest{ AuthTag: tagToString(tag), Credentials: password, Nonce: nonce, Macaroons: macaroons, } - if tag == nil { + if password == "" { // Add any macaroons from the cookie jar that might work for // authenticating the login request. request.Macaroons = append(request.Macaroons, httpbakery.MacaroonsForURL(st.bakeryClient.Client.Jar, st.cookieURL)..., ) } - err := st.APICall("Admin", vers, "", "Login", request, &result) + err := st.APICall("Admin", 3, "", "Login", request, &result) if err != nil { var resp params.RedirectInfoResult if params.IsRedirect(err) { @@ -95,12 +82,19 @@ MacaroonPath: "/", }, }); err != nil { + cause := errors.Cause(err) + if httpbakery.IsInteractionError(cause) { + // Just inform the user of the reason for the + // failure, e.g. because the username/password + // they presented was invalid. + err = cause.(*httpbakery.InteractionError).Reason + } return errors.Trace(err) } // Add the macaroons that have been saved by HandleError to our login request. request.Macaroons = httpbakery.MacaroonsForURL(st.bakeryClient.Client.Jar, st.cookieURL) - result = params.LoginResultV1{} // zero result - err = st.APICall("Admin", vers, "", "Login", request, &result) + result = params.LoginResult{} // zero result + err = st.APICall("Admin", 3, "", "Login", request, &result) if err != nil { return errors.Trace(err) } @@ -109,22 +103,25 @@ } } - var readOnly bool + var controllerAccess string + var modelAccess string if result.UserInfo != nil { tag, err = names.ParseTag(result.UserInfo.Identity) if err != nil { return errors.Trace(err) } - readOnly = result.UserInfo.ReadOnly + controllerAccess = result.UserInfo.ControllerAccess + modelAccess = result.UserInfo.ModelAccess } servers := params.NetworkHostsPorts(result.Servers) if err = st.setLoginResult(loginResultParams{ - tag: tag, - modelTag: result.ModelTag, - controllerTag: result.ControllerTag, - servers: servers, - facades: result.Facades, - readOnly: readOnly, + tag: tag, + modelTag: result.ModelTag, + controllerTag: result.ControllerTag, + servers: servers, + facades: result.Facades, + modelAccess: modelAccess, + controllerAccess: controllerAccess, }); err != nil { return errors.Trace(err) } @@ -136,19 +133,35 @@ } type loginResultParams struct { - tag names.Tag - modelTag string - controllerTag string - readOnly bool - servers [][]network.HostPort - facades []params.FacadeVersions + tag names.Tag + modelTag string + controllerTag string + modelAccess string + controllerAccess string + servers [][]network.HostPort + facades []params.FacadeVersions } func (st *state) setLoginResult(p loginResultParams) error { st.authTag = p.tag - st.modelTag = p.modelTag - st.controllerTag = p.controllerTag - st.readOnly = p.readOnly + var modelTag names.ModelTag + if p.modelTag != "" { + var err error + modelTag, err = names.ParseModelTag(p.modelTag) + if err != nil { + return errors.Annotatef(err, "invalid model tag in login result") + } + } + if modelTag.Id() != st.modelTag.Id() { + return errors.Errorf("mismatched model tag in login result (got %q want %q)", modelTag.Id(), st.modelTag.Id()) + } + ctag, err := names.ParseControllerTag(p.controllerTag) + if err != nil { + return errors.Annotatef(err, "invalid controller tag %q returned from login", p.controllerTag) + } + st.controllerTag = ctag + st.controllerAccess = p.controllerAccess + st.modelAccess = p.modelAccess hostPorts, err := addAddress(p.servers, st.addr) if err != nil { @@ -173,10 +186,21 @@ return st.authTag } -// ReadOnly returns whether the authorized user is connected to the model in -// read-only mode. -func (st *state) ReadOnly() bool { - return st.readOnly +// ModelAccess returns the access level of authorized user to the model. +func (st *state) ModelAccess() string { + return st.modelAccess +} + +// ControllerAccess returns the access level of authorized user to the model. +func (st *state) ControllerAccess() string { + return st.controllerAccess +} + +// CookieURL returns the URL that HTTP cookies for the API will be +// associated with. +func (st *state) CookieURL() *url.URL { + copy := *st.cookieURL + return © } // slideAddressToFront moves the address at the location (serverIndex, addrIndex) to be diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/state_macaroon_test.go juju-core-2.0.0/src/github.com/juju/juju/api/state_macaroon_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/state_macaroon_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/state_macaroon_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,6 +12,7 @@ "github.com/juju/juju/api" apitesting "github.com/juju/juju/api/testing" + "github.com/juju/juju/permission" "github.com/juju/juju/rpc" ) @@ -27,6 +28,7 @@ func (s *macaroonLoginSuite) SetUpTest(c *gc.C) { s.MacaroonSuite.SetUpTest(c) s.AddModelUser(c, testUserName) + s.AddControllerUser(c, testUserName, permission.LoginAccess) info := s.APIInfo(c) info.SkipLogin = true s.client = s.OpenAPI(c, info, nil) @@ -60,7 +62,8 @@ } func (s *macaroonLoginSuite) TestConnectStream(c *gc.C) { - s.PatchValue(api.WebsocketDialConfig, echoURL(c)) + catcher := urlCatcher{} + s.PatchValue(api.WebsocketDialConfig, catcher.recordLocation) dischargeCount := 0 s.DischargerLogin = func() string { @@ -77,13 +80,14 @@ conn, err := s.client.ConnectStream("/path", nil) c.Assert(err, gc.IsNil) defer conn.Close() - connectURL := connectURLFromReader(c, conn) + connectURL := catcher.location c.Assert(connectURL.Path, gc.Equals, "/model/"+s.State.ModelTag().Id()+"/path") c.Assert(dischargeCount, gc.Equals, 1) } func (s *macaroonLoginSuite) TestConnectStreamWithoutLogin(c *gc.C) { - s.PatchValue(api.WebsocketDialConfig, echoURL(c)) + catcher := urlCatcher{} + s.PatchValue(api.WebsocketDialConfig, catcher.recordLocation) conn, err := s.client.ConnectStream("/path", nil) c.Assert(err, gc.ErrorMatches, `cannot use ConnectStream without logging in`) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/state_test.go juju-core-2.0.0/src/github.com/juju/juju/api/state_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/state_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/state_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,6 +12,7 @@ "gopkg.in/macaroon.v1" "github.com/juju/juju/api" + "github.com/juju/juju/api/modelmanager" "github.com/juju/juju/api/usermanager" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" @@ -77,51 +78,60 @@ }) } -func (s *stateSuite) TestLoginSetsModelTag(c *gc.C) { - env, err := s.State.Model() +func (s *stateSuite) TestTags(c *gc.C) { + model, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) apistate, tag, password := s.OpenAPIWithoutLogin(c) defer apistate.Close() - // We haven't called Login yet, so the ModelTag shouldn't be set. - modelTag, err := apistate.ModelTag() - c.Check(err, gc.ErrorMatches, `"" is not a valid tag`) - c.Check(modelTag, gc.Equals, names.ModelTag{}) + // Even though we haven't called Login, the model tag should + // still be set. + modelTag, ok := apistate.ModelTag() + c.Check(ok, jc.IsTrue) + c.Check(modelTag, gc.Equals, model.ModelTag()) err = apistate.Login(tag, password, "", nil) c.Assert(err, jc.ErrorIsNil) - // Now that we've logged in, ModelTag should be updated correctly. - modelTag, err = apistate.ModelTag() - c.Check(err, jc.ErrorIsNil) - c.Check(modelTag, gc.Equals, env.ModelTag()) - // The controller tag is also set, and since the model is the - // controller model, the uuid is the same. - controllerTag, err := apistate.ControllerTag() - c.Check(err, jc.ErrorIsNil) - c.Check(controllerTag, gc.Equals, env.ModelTag()) -} - -func (s *stateSuite) TestLoginMacaroon(c *gc.C) { - apistate, tag, _ := s.OpenAPIWithoutLogin(c) - defer apistate.Close() - // Use s.APIState, because we can't get at UserManager without logging in. - mac, err := usermanager.NewClient(s.APIState).CreateLocalLoginMacaroon(tag.(names.UserTag)) + // Now that we've logged in, ModelTag should still be the same. + modelTag, ok = apistate.ModelTag() + c.Check(ok, jc.IsTrue) + c.Check(modelTag, gc.Equals, model.ModelTag()) + controllerTag := apistate.ControllerTag() + c.Check(controllerTag, gc.Equals, coretesting.ControllerTag) +} + +func (s *stateSuite) TestLoginSetsModelAccess(c *gc.C) { + // The default user has admin access. + c.Assert(s.APIState.ModelAccess(), gc.Equals, "admin") + + manager := usermanager.NewClient(s.OpenControllerAPI(c)) + defer manager.Close() + usertag, _, err := manager.AddUser("ro", "ro", "ro-password") + c.Assert(err, jc.ErrorIsNil) + mmanager := modelmanager.NewClient(s.OpenControllerAPI(c)) + defer mmanager.Close() + modeltag, ok := s.APIState.ModelTag() + c.Assert(ok, jc.IsTrue) + err = mmanager.GrantModel(usertag.Id(), "read", modeltag.Id()) c.Assert(err, jc.ErrorIsNil) - err = apistate.Login(tag, "", "", []macaroon.Slice{{mac}}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(apistate.AuthTag(), gc.Equals, tag) + conn := s.OpenAPIAs(c, usertag, "ro-password") + c.Assert(conn.ModelAccess(), gc.Equals, "read") } -func (s *stateSuite) TestLoginReadOnly(c *gc.C) { - // The default user has read and write access. - c.Assert(s.APIState.ReadOnly(), jc.IsFalse) - - // Check with an user in read-only mode. - modeltag, err := s.APIState.ModelTag() - c.Assert(err, jc.ErrorIsNil) - manager := usermanager.NewClient(s.APIState) - usertag, _, err := manager.AddUser("ro", "ro", "ro-password", "read", modeltag.Id()) +func (s *stateSuite) TestLoginSetsControllerAccess(c *gc.C) { + // The default user has admin access. + c.Assert(s.APIState.ControllerAccess(), gc.Equals, "superuser") + + manager := usermanager.NewClient(s.OpenControllerAPI(c)) + defer manager.Close() + usertag, _, err := manager.AddUser("ro", "ro", "ro-password") + c.Assert(err, jc.ErrorIsNil) + mmanager := modelmanager.NewClient(s.OpenControllerAPI(c)) + defer mmanager.Close() + modeltag, ok := s.APIState.ModelTag() + c.Assert(ok, jc.IsTrue) + err = mmanager.GrantModel(usertag.Id(), "read", modeltag.Id()) c.Assert(err, jc.ErrorIsNil) conn := s.OpenAPIAs(c, usertag, "ro-password") - c.Assert(conn.ReadOnly(), jc.IsTrue) + c.Assert(conn.ControllerAccess(), gc.Equals, "login") } func (s *stateSuite) TestLoginMacaroonInvalidId(c *gc.C) { @@ -130,17 +140,7 @@ mac, err := macaroon.New([]byte("root-key"), "id", "juju") c.Assert(err, jc.ErrorIsNil) err = apistate.Login(tag, "", "", []macaroon.Slice{{mac}}) - c.Assert(err, gc.ErrorMatches, "invalid entity name or password \\(unauthorized access\\)") -} - -func (s *stateSuite) TestLoginMacaroonInvalidUser(c *gc.C) { - apistate, tag, _ := s.OpenAPIWithoutLogin(c) - defer apistate.Close() - // Use s.APIState, because we can't get at UserManager without logging in. - mac, err := usermanager.NewClient(s.APIState).CreateLocalLoginMacaroon(tag.(names.UserTag)) - c.Assert(err, jc.ErrorIsNil) - err = apistate.Login(names.NewUserTag("bob@local"), "", "", []macaroon.Slice{{mac}}) - c.Assert(err, gc.ErrorMatches, "invalid entity name or password \\(unauthorized access\\)") + c.Assert(err, gc.ErrorMatches, "interaction required but not possible") } func (s *stateSuite) TestLoginTracksFacadeVersions(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/testing/environwatcher.go juju-core-2.0.0/src/github.com/juju/juju/api/testing/environwatcher.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/testing/environwatcher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/testing/environwatcher.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,38 +7,29 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/state" - "github.com/juju/juju/state/stateenvirons" "github.com/juju/juju/watcher" "github.com/juju/juju/watcher/watchertest" ) -const ( - HasSecrets = true - NoSecrets = false -) - type ModelWatcherFacade interface { WatchForModelConfigChanges() (watcher.NotifyWatcher, error) ModelConfig() (*config.Config, error) } type ModelWatcherTests struct { - facade ModelWatcherFacade - state *state.State - hasSecrets bool + facade ModelWatcherFacade + state *state.State } func NewModelWatcherTests( facade ModelWatcherFacade, st *state.State, - hasSecrets bool) *ModelWatcherTests { +) *ModelWatcherTests { return &ModelWatcherTests{ - facade: facade, - state: st, - hasSecrets: hasSecrets, + facade: facade, + state: st, } } @@ -49,21 +40,6 @@ conf, err := s.facade.ModelConfig() c.Assert(err, jc.ErrorIsNil) - // If the facade doesn't have secrets, we need to replace the config - // values in our model to compare against with the secrets replaced. - if !s.hasSecrets { - env, err := stateenvirons.GetNewEnvironFunc(environs.New)(s.state) - c.Assert(err, jc.ErrorIsNil) - secretAttrs, err := env.Provider().SecretAttrs(envConfig) - c.Assert(err, jc.ErrorIsNil) - secrets := make(map[string]interface{}) - for key := range secretAttrs { - secrets[key] = "not available" - } - envConfig, err = envConfig.Apply(secrets) - c.Assert(err, jc.ErrorIsNil) - } - c.Assert(conf, jc.DeepEquals, envConfig) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/testing/macaroonsuite.go juju-core-2.0.0/src/github.com/juju/juju/api/testing/macaroonsuite.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/testing/macaroonsuite.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/testing/macaroonsuite.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "net/url" "github.com/juju/errors" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" "gopkg.in/macaroon-bakery.v1/bakery/checkers" @@ -18,6 +19,8 @@ "github.com/juju/juju/api" "github.com/juju/juju/controller" jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/permission" + "github.com/juju/juju/state" "github.com/juju/juju/testing/factory" ) @@ -75,6 +78,17 @@ }) } +// AddControllerUser is a convenience funcation that adds +// a controller user with the specified access. +func (s *MacaroonSuite) AddControllerUser(c *gc.C, username string, access permission.Access) { + _, err := s.State.AddControllerUser(state.UserAccessSpec{ + User: names.NewUserTag(username), + CreatedBy: s.AdminUserTag(c), + Access: access, + }) + c.Assert(err, jc.ErrorIsNil) +} + // OpenAPI opens a connection to the API using the given information. // and empty DialOpts. If info is nil, s.APIInfo(c) is used. // If jar is non-nil, it will be used as the store for the cookies created diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/undertaker/undertaker.go juju-core-2.0.0/src/github.com/juju/juju/api/undertaker/undertaker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/undertaker/undertaker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/undertaker/undertaker.go 2016-10-13 14:31:49.000000000 +0000 @@ -25,9 +25,9 @@ // NewClient creates a new client for accessing the undertaker API. func NewClient(caller base.APICaller, newWatcher NewWatcherFunc) (*Client, error) { - modelTag, err := caller.ModelTag() - if err != nil { - return nil, errors.Trace(err) + modelTag, ok := caller.ModelTag() + if !ok { + return nil, errors.New("undertaker client is not appropriate for controller-only API") } return &Client{ modelTag: modelTag, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/unitassigner/unitassigner_test.go juju-core-2.0.0/src/github.com/juju/juju/api/unitassigner/unitassigner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/unitassigner/unitassigner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/unitassigner/unitassigner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -115,11 +115,20 @@ func (f *fakeWatchCaller) APICall(objType string, version int, id, request string, param, response interface{}) error { f.Lock() defer f.Unlock() - f.request = request - f.params = param - _, ok := response.(*params.StringsWatchResult) - if !ok { - f.c.Errorf("Expected *params.StringsWatchResult as response, but was %#v", response) + + // We only care for the first request as that is all the tests + // assert on. The watcher (StringsWatcher) is continuously + // running and this function gets called repeatedly + // overwriting f.request leading to intermittent failures. + // Fixes: https://bugs.launchpad.net/juju/+bug/1606302 + + if f.request == "" { + f.request = request + f.params = param + _, ok := response.(*params.StringsWatchResult) + if !ok { + f.c.Errorf("Expected *params.StringsWatchResult as response, but was %#v", response) + } } return f.err } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/uniter/application_test.go juju-core-2.0.0/src/github.com/juju/juju/api/uniter/application_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/uniter/application_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/uniter/application_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -126,20 +126,20 @@ message := "a test message" stat, err := s.wordpressService.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(stat.Status, gc.Not(gc.Equals), status.StatusActive) + c.Assert(stat.Status, gc.Not(gc.Equals), status.Active) c.Assert(stat.Message, gc.Not(gc.Equals), message) - err = s.apiService.SetStatus(s.wordpressUnit.Name(), status.StatusActive, message, map[string]interface{}{}) + err = s.apiService.SetStatus(s.wordpressUnit.Name(), status.Active, message, map[string]interface{}{}) c.Check(err, gc.ErrorMatches, `"wordpress/0" is not leader of "wordpress"`) s.claimLeadership(c, s.wordpressUnit, s.wordpressService) - err = s.apiService.SetStatus(s.wordpressUnit.Name(), status.StatusActive, message, map[string]interface{}{}) + err = s.apiService.SetStatus(s.wordpressUnit.Name(), status.Active, message, map[string]interface{}{}) c.Check(err, jc.ErrorIsNil) stat, err = s.wordpressService.Status() c.Check(err, jc.ErrorIsNil) - c.Check(stat.Status, gc.Equals, status.StatusActive) + c.Check(stat.Status, gc.Equals, status.Active) c.Check(stat.Message, gc.Equals, message) } @@ -147,12 +147,12 @@ message := "a test message" stat, err := s.wordpressService.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(stat.Status, gc.Not(gc.Equals), status.StatusActive) + c.Assert(stat.Status, gc.Not(gc.Equals), status.Active) c.Assert(stat.Message, gc.Not(gc.Equals), message) now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: message, Data: map[string]interface{}{}, Since: &now, @@ -162,7 +162,7 @@ stat, err = s.wordpressService.Status() c.Check(err, jc.ErrorIsNil) - c.Check(stat.Status, gc.Equals, status.StatusActive) + c.Check(stat.Status, gc.Equals, status.Active) c.Check(stat.Message, gc.Equals, message) result, err := s.apiService.Status(s.wordpressUnit.Name()) @@ -171,7 +171,7 @@ s.claimLeadership(c, s.wordpressUnit, s.wordpressService) result, err = s.apiService.Status(s.wordpressUnit.Name()) c.Check(err, jc.ErrorIsNil) - c.Check(result.Application.Status, gc.Equals, status.StatusActive.String()) + c.Check(result.Application.Status, gc.Equals, status.Active.String()) } func (s *serviceSuite) claimLeadership(c *gc.C, unit *state.Unit, service *state.Application) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/uniter/leadership.go juju-core-2.0.0/src/github.com/juju/juju/api/uniter/leadership.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/uniter/leadership.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/uniter/leadership.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,21 +15,21 @@ func NewLeadershipSettingsAccessor( caller FacadeCallFn, newWatcher NewNotifyWatcherFn, - checkApiVersion CheckApiVersionFn, + checkAPIVersion CheckAPIVersionFn, ) *LeadershipSettingsAccessor { - return &LeadershipSettingsAccessor{caller, newWatcher, checkApiVersion} + return &LeadershipSettingsAccessor{caller, newWatcher, checkAPIVersion} } type FacadeCallFn func(request string, params, response interface{}) error type NewNotifyWatcherFn func(params.NotifyWatchResult) watcher.NotifyWatcher -type CheckApiVersionFn func(functionName string) error +type CheckAPIVersionFn func(functionName string) error // LeadershipSettingsAccessor provides a type that can make RPC calls // to a service which can read, write, and watch leadership settings. type LeadershipSettingsAccessor struct { facadeCaller FacadeCallFn newNotifyWatcher NewNotifyWatcherFn - checkApiVersion CheckApiVersionFn + checkAPIVersion CheckAPIVersionFn } // Merge merges the provided settings into the leadership settings for @@ -37,7 +37,7 @@ // this operation. func (lsa *LeadershipSettingsAccessor) Merge(serviceId string, settings map[string]string) error { - if err := lsa.checkApiVersion("Merge"); err != nil { + if err := lsa.checkAPIVersion("Merge"); err != nil { return errors.Annotatef(err, "cannot access leadership api") } @@ -58,7 +58,7 @@ // ID. Anyone may perform this operation. func (lsa *LeadershipSettingsAccessor) Read(serviceId string) (map[string]string, error) { - if err := lsa.checkApiVersion("Read"); err != nil { + if err := lsa.checkAPIVersion("Read"); err != nil { return nil, errors.Annotatef(err, "cannot access leadership api") } @@ -79,7 +79,7 @@ // for leadership settings changes to be made for a given service ID. func (lsa *LeadershipSettingsAccessor) WatchLeadershipSettings(serviceId string) (watcher.NotifyWatcher, error) { - if err := lsa.checkApiVersion("WatchLeadershipSettings"); err != nil { + if err := lsa.checkAPIVersion("WatchLeadershipSettings"); err != nil { return nil, errors.Annotatef(err, "cannot access leadership api") } var results params.NotifyWatchResults diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/uniter/state_test.go juju-core-2.0.0/src/github.com/juju/juju/api/uniter/state_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/uniter/state_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/uniter/state_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,7 +24,7 @@ func (s *stateSuite) SetUpTest(c *gc.C) { s.uniterSuite.SetUpTest(c) s.APIAddresserTests = apitesting.NewAPIAddresserTests(s.uniter, s.BackingState) - s.ModelWatcherTests = apitesting.NewModelWatcherTests(s.uniter, s.BackingState, apitesting.NoSecrets) + s.ModelWatcherTests = apitesting.NewModelWatcherTests(s.uniter, s.BackingState) } func (s *stateSuite) TestProviderType(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/uniter/uniter.go juju-core-2.0.0/src/github.com/juju/juju/api/uniter/uniter.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/uniter/uniter.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/uniter/uniter.go 2016-10-13 14:31:49.000000000 +0000 @@ -382,9 +382,9 @@ // ErrIfNotVersionFn returns a function which can be used to check for // the minimum supported version, and, if appropriate, generate an // error. -func ErrIfNotVersionFn(minVersion int, bestApiVersion int) func(string) error { +func ErrIfNotVersionFn(minVersion int, bestAPIVersion int) func(string) error { return func(fnName string) error { - if minVersion <= bestApiVersion { + if minVersion <= bestAPIVersion { return nil } return errors.NotImplementedf("%s(...) requires v%d+", fnName, minVersion) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/uniter/unit_test.go juju-core-2.0.0/src/github.com/juju/juju/api/uniter/unit_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/uniter/unit_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/uniter/unit_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -63,22 +63,22 @@ func (s *unitSuite) TestSetAgentStatus(c *gc.C) { statusInfo, err := s.wordpressUnit.AgentStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusAllocating) + c.Assert(statusInfo.Status, gc.Equals, status.Allocating) c.Assert(statusInfo.Message, gc.Equals, "") c.Assert(statusInfo.Data, gc.HasLen, 0) unitStatusInfo, err := s.wordpressUnit.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(unitStatusInfo.Status, gc.Equals, status.StatusUnknown) - c.Assert(unitStatusInfo.Message, gc.Equals, "Waiting for agent initialization to finish") + c.Assert(unitStatusInfo.Status, gc.Equals, status.Waiting) + c.Assert(unitStatusInfo.Message, gc.Equals, "waiting for machine") c.Assert(unitStatusInfo.Data, gc.HasLen, 0) - err = s.apiUnit.SetAgentStatus(status.StatusIdle, "blah", nil) + err = s.apiUnit.SetAgentStatus(status.Idle, "blah", nil) c.Assert(err, jc.ErrorIsNil) statusInfo, err = s.wordpressUnit.AgentStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusIdle) + c.Assert(statusInfo.Status, gc.Equals, status.Idle) c.Assert(statusInfo.Message, gc.Equals, "blah") c.Assert(statusInfo.Data, gc.HasLen, 0) c.Assert(statusInfo.Since, gc.NotNil) @@ -86,30 +86,30 @@ // Ensure that unit has not changed. unitStatusInfo, err = s.wordpressUnit.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(unitStatusInfo.Status, gc.Equals, status.StatusUnknown) - c.Assert(unitStatusInfo.Message, gc.Equals, "Waiting for agent initialization to finish") + c.Assert(unitStatusInfo.Status, gc.Equals, status.Waiting) + c.Assert(unitStatusInfo.Message, gc.Equals, "waiting for machine") c.Assert(unitStatusInfo.Data, gc.HasLen, 0) } func (s *unitSuite) TestSetUnitStatus(c *gc.C) { statusInfo, err := s.wordpressUnit.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusUnknown) - c.Assert(statusInfo.Message, gc.Equals, "Waiting for agent initialization to finish") + c.Assert(statusInfo.Status, gc.Equals, status.Waiting) + c.Assert(statusInfo.Message, gc.Equals, "waiting for machine") c.Assert(statusInfo.Data, gc.HasLen, 0) agentStatusInfo, err := s.wordpressUnit.AgentStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(agentStatusInfo.Status, gc.Equals, status.StatusAllocating) + c.Assert(agentStatusInfo.Status, gc.Equals, status.Allocating) c.Assert(agentStatusInfo.Message, gc.Equals, "") c.Assert(agentStatusInfo.Data, gc.HasLen, 0) - err = s.apiUnit.SetUnitStatus(status.StatusActive, "blah", nil) + err = s.apiUnit.SetUnitStatus(status.Active, "blah", nil) c.Assert(err, jc.ErrorIsNil) statusInfo, err = s.wordpressUnit.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusActive) + c.Assert(statusInfo.Status, gc.Equals, status.Active) c.Assert(statusInfo.Message, gc.Equals, "blah") c.Assert(statusInfo.Data, gc.HasLen, 0) c.Assert(statusInfo.Since, gc.NotNil) @@ -117,7 +117,7 @@ // Ensure unit's agent has not changed. agentStatusInfo, err = s.wordpressUnit.AgentStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(agentStatusInfo.Status, gc.Equals, status.StatusAllocating) + c.Assert(agentStatusInfo.Status, gc.Equals, status.Allocating) c.Assert(agentStatusInfo.Message, gc.Equals, "") c.Assert(agentStatusInfo.Data, gc.HasLen, 0) } @@ -125,7 +125,7 @@ func (s *unitSuite) TestUnitStatus(c *gc.C) { now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, Message: "blah", Since: &now, } @@ -137,7 +137,7 @@ c.Assert(result.Since, gc.NotNil) result.Since = nil c.Assert(result, gc.DeepEquals, params.StatusResult{ - Status: status.StatusMaintenance.String(), + Status: status.Maintenance.String(), Info: "blah", Data: map[string]interface{}{}, }) @@ -229,7 +229,7 @@ // Change something other than the lifecycle and make sure it's // not detected. - err = s.apiUnit.SetAgentStatus(status.StatusIdle, "not really", nil) + err = s.apiUnit.SetAgentStatus(status.Idle, "not really", nil) c.Assert(err, jc.ErrorIsNil) wc.AssertNoChange() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/usermanager/client.go juju-core-2.0.0/src/github.com/juju/juju/api/usermanager/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/usermanager/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/usermanager/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,14 +7,11 @@ "fmt" "strings" - "gopkg.in/macaroon.v1" - "github.com/juju/errors" "github.com/juju/loggo" "gopkg.in/juju/names.v2" "github.com/juju/juju/api/base" - "github.com/juju/juju/api/modelmanager" "github.com/juju/juju/apiserver/params" ) @@ -36,35 +33,21 @@ // AddUser creates a new local user in the controller, sharing with that user any specified models. func (c *Client) AddUser( - username, displayName, password, access string, modelUUIDs ...string, + username, displayName, password string, ) (_ names.UserTag, secretKey []byte, _ error) { if !names.IsValidUser(username) { return names.UserTag{}, nil, fmt.Errorf("invalid user name %q", username) } - modelTags := make([]string, len(modelUUIDs)) - for i, uuid := range modelUUIDs { - modelTags[i] = names.NewModelTag(uuid).String() - } - - var accessPermission params.UserAccessPermission - var err error - if len(modelTags) > 0 { - accessPermission, err = modelmanager.ParseModelAccess(access) - if err != nil { - return names.UserTag{}, nil, errors.Trace(err) - } - } userArgs := params.AddUsers{ Users: []params.AddUser{{ - Username: username, - DisplayName: displayName, - Password: password, - SharedModelTags: modelTags, - ModelAccess: accessPermission}}, + Username: username, + DisplayName: displayName, + Password: password, + }}, } var results params.AddUserResults - err = c.facade.FacadeCall("AddUser", userArgs, &results) + err := c.facade.FacadeCall("AddUser", userArgs, &results) if err != nil { return names.UserTag{}, nil, errors.Trace(err) } @@ -163,6 +146,8 @@ errorStrings = append(errorStrings, annotated.Error()) } } + // TODO(wallyworld) - we should return these errors to the caller so that any + // users which are successfully found can be handled. if len(errorStrings) > 0 { return nil, errors.New(strings.Join(errorStrings, ", ")) } @@ -195,22 +180,3 @@ } return results.OneError() } - -// CreateLocalLoginMacaroon creates a local login macaroon for the -// authenticated user. -func (c *Client) CreateLocalLoginMacaroon(tag names.UserTag) (*macaroon.Macaroon, error) { - args := params.Entities{Entities: []params.Entity{{tag.String()}}} - var results params.MacaroonResults - if err := c.facade.FacadeCall("CreateLocalLoginMacaroon", args, &results); err != nil { - return nil, errors.Trace(err) - } - if n := len(results.Results); n != 1 { - logger.Errorf("expected 1 result, got %#v", results) - return nil, errors.Errorf("expected 1 result, got %d", n) - } - result := results.Results[0] - if result.Error != nil { - return nil, errors.Trace(result.Error) - } - return result.Result, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/usermanager/client_test.go juju-core-2.0.0/src/github.com/juju/juju/api/usermanager/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/usermanager/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/usermanager/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,11 +7,9 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/names.v2" "github.com/juju/juju/api/usermanager" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/core/description" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/testing/factory" ) @@ -26,12 +24,17 @@ func (s *usermanagerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) - s.usermanager = usermanager.NewClient(s.APIState) + s.usermanager = usermanager.NewClient(s.OpenControllerAPI(c)) c.Assert(s.usermanager, gc.NotNil) } +func (s *usermanagerSuite) TearDownTest(c *gc.C) { + s.usermanager.Close() + s.JujuConnSuite.TearDownTest(c) +} + func (s *usermanagerSuite) TestAddUser(c *gc.C) { - tag, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password", "") + tag, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") c.Assert(err, jc.ErrorIsNil) user, err := s.State.User(tag) @@ -41,42 +44,10 @@ c.Assert(user.PasswordValid("password"), jc.IsTrue) } -func (s *usermanagerSuite) TestAddUserWithModelAccess(c *gc.C) { - sharedModelState := s.Factory.MakeModel(c, nil) - defer sharedModelState.Close() - - foobarTag, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password", "read", sharedModelState.ModelUUID()) - c.Assert(err, jc.ErrorIsNil) - - altAdminTag, _, err := s.usermanager.AddUser("altadmin", "Alt Admin", "password", "write", sharedModelState.ModelUUID()) - c.Assert(err, jc.ErrorIsNil) - - // Check model is shared with expected users. - sharedModel, err := sharedModelState.Model() - c.Assert(err, jc.ErrorIsNil) - users, err := sharedModel.Users() - c.Assert(err, jc.ErrorIsNil) - c.Assert(users, gc.HasLen, 3) - var modelUserTags = make([]names.UserTag, len(users)) - for i, u := range users { - modelUserTags[i] = u.UserTag - if u.UserTag.Name() == "foobar" { - c.Assert(u.Access, gc.Equals, description.ReadAccess) - } else { - c.Assert(u.Access, gc.Not(gc.Equals), description.ReadAccess) - } - } - c.Assert(modelUserTags, jc.SameContents, []names.UserTag{ - foobarTag, - altAdminTag, - names.NewLocalUserTag("admin"), - }) -} - func (s *usermanagerSuite) TestAddExistingUser(c *gc.C) { s.Factory.MakeUser(c, &factory.UserParams{Name: "foobar"}) - _, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password", "read") + _, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") c.Assert(err, gc.ErrorMatches, "failed to create user: user already exists") } @@ -86,7 +57,7 @@ return errors.New("call error") }, ) - _, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password", "write") + _, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") c.Assert(err, gc.ErrorMatches, "call error") } @@ -100,12 +71,12 @@ return errors.New("wrong result type") }, ) - _, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password", "read") + _, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") c.Assert(err, gc.ErrorMatches, "expected 1 result, got 2") } func (s *usermanagerSuite) TestRemoveUser(c *gc.C) { - tag, _, err := s.usermanager.AddUser("jjam", "Jimmy Jam", "password", "") + tag, _, err := s.usermanager.AddUser("jjam", "Jimmy Jam", "password") c.Assert(err, jc.ErrorIsNil) // Ensure the user exists. @@ -174,6 +145,7 @@ { Username: "foobar", DisplayName: "Foo Bar", + Access: "login", CreatedBy: s.AdminUserTag(c).Name(), DateCreated: user.DateCreated(), }, @@ -232,7 +204,7 @@ func (s *usermanagerSuite) TestSetUserPasswordCanonical(c *gc.C) { tag := s.AdminUserTag(c) - err := s.usermanager.SetPassword(tag.Canonical(), "new-password") + err := s.usermanager.SetPassword(tag.Id(), "new-password") c.Assert(err, jc.ErrorIsNil) user, err := s.State.User(tag) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/watcher/watcher.go juju-core-2.0.0/src/github.com/juju/juju/api/watcher/watcher.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/watcher/watcher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/watcher/watcher.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "github.com/juju/errors" "github.com/juju/loggo" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/api/watcher/watcher_test.go juju-core-2.0.0/src/github.com/juju/juju/api/watcher/watcher_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/api/watcher/watcher_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/api/watcher/watcher_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -320,17 +320,17 @@ assertChange("", migration.NONE) // Now create a migration, should trigger watcher. - spec := state.ModelMigrationSpec{ + spec := state.MigrationSpec{ InitiatedBy: names.NewUserTag("someone"), TargetInfo: migration.TargetInfo{ - ControllerTag: names.NewModelTag(utils.MustNewUUID().String()), + ControllerTag: names.NewControllerTag(utils.MustNewUUID().String()), Addrs: []string{"1.2.3.4:5"}, CACert: "cert", AuthTag: names.NewUserTag("dog"), Password: "sekret", }, } - mig, err := hostedState.CreateModelMigration(spec) + mig, err := hostedState.CreateMigration(spec) c.Assert(err, jc.ErrorIsNil) assertChange(mig.Id(), migration.QUIESCE) @@ -341,7 +341,7 @@ assertChange(mig.Id(), migration.ABORTDONE) // Start a new migration, this should also trigger. - mig2, err := hostedState.CreateModelMigration(spec) + mig2, err := hostedState.CreateMigration(spec) c.Assert(err, jc.ErrorIsNil) assertChange(mig2.Id(), migration.QUIESCE) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/action/action.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/action/action.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/action/action.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/action/action.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,6 +10,7 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -39,9 +40,46 @@ }, nil } +func (a *ActionAPI) checkCanRead() error { + canRead, err := a.authorizer.HasPermission(permission.ReadAccess, a.state.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canRead { + return common.ErrPerm + } + return nil +} + +func (a *ActionAPI) checkCanWrite() error { + canWrite, err := a.authorizer.HasPermission(permission.WriteAccess, a.state.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canWrite { + return common.ErrPerm + } + return nil +} + +func (a *ActionAPI) checkCanAdmin() error { + canAdmin, err := a.authorizer.HasPermission(permission.AdminAccess, a.state.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canAdmin { + return common.ErrPerm + } + return nil +} + // Actions takes a list of ActionTags, and returns the full Action for // each ID. func (a *ActionAPI) Actions(arg params.Entities) (params.ActionResults, error) { + if err := a.checkCanRead(); err != nil { + return params.ActionResults{}, errors.Trace(err) + } + response := params.ActionResults{Results: make([]params.ActionResult, len(arg.Entities))} for i, entity := range arg.Entities { currentResult := &response.Results[i] @@ -73,6 +111,10 @@ // FindActionTagsByPrefix takes a list of string prefixes and finds // corresponding ActionTags that match that prefix. func (a *ActionAPI) FindActionTagsByPrefix(arg params.FindTags) (params.FindTagsResults, error) { + if err := a.checkCanRead(); err != nil { + return params.FindTagsResults{}, errors.Trace(err) + } + response := params.FindTagsResults{Matches: make(map[string][]params.Entity)} for _, prefix := range arg.Prefixes { found := a.state.FindActionTagsByPrefix(prefix) @@ -86,6 +128,10 @@ } func (a *ActionAPI) FindActionsByNames(arg params.FindActionsByNames) (params.ActionsByNames, error) { + if err := a.checkCanWrite(); err != nil { + return params.ActionsByNames{}, errors.Trace(err) + } + response := params.ActionsByNames{Actions: make([]params.ActionsByName, len(arg.ActionNames))} for i, name := range arg.ActionNames { currentResult := &response.Actions[i] @@ -114,6 +160,10 @@ // enqueued Action, or an error if there was a problem enqueueing the // Action. func (a *ActionAPI) Enqueue(arg params.Actions) (params.ActionResults, error) { + if err := a.checkCanWrite(); err != nil { + return params.ActionResults{}, errors.Trace(err) + } + if err := a.check.ChangeAllowed(); err != nil { return params.ActionResults{}, errors.Trace(err) } @@ -142,6 +192,10 @@ // returns all of the Actions that have been enqueued or run by each of // those Entities. func (a *ActionAPI) ListAll(arg params.Entities) (params.ActionsByReceivers, error) { + if err := a.checkCanRead(); err != nil { + return params.ActionsByReceivers{}, errors.Trace(err) + } + return a.internalList(arg, combine(pendingActions, runningActions, completedActions)) } @@ -149,6 +203,10 @@ // and returns all of the Actions that are enqueued for each of those // Entities. func (a *ActionAPI) ListPending(arg params.Entities) (params.ActionsByReceivers, error) { + if err := a.checkCanRead(); err != nil { + return params.ActionsByReceivers{}, errors.Trace(err) + } + return a.internalList(arg, pendingActions) } @@ -156,6 +214,10 @@ // returns all of the Actions that have are running on each of those // Entities. func (a *ActionAPI) ListRunning(arg params.Entities) (params.ActionsByReceivers, error) { + if err := a.checkCanRead(); err != nil { + return params.ActionsByReceivers{}, errors.Trace(err) + } + return a.internalList(arg, runningActions) } @@ -163,11 +225,19 @@ // and returns all of the Actions that have been run on each of those // Entities. func (a *ActionAPI) ListCompleted(arg params.Entities) (params.ActionsByReceivers, error) { + if err := a.checkCanRead(); err != nil { + return params.ActionsByReceivers{}, errors.Trace(err) + } + return a.internalList(arg, completedActions) } // Cancel attempts to cancel enqueued Actions from running. func (a *ActionAPI) Cancel(arg params.Entities) (params.ActionResults, error) { + if err := a.checkCanWrite(); err != nil { + return params.ActionResults{}, errors.Trace(err) + } + if err := a.check.ChangeAllowed(); err != nil { return params.ActionResults{}, errors.Trace(err) } @@ -210,6 +280,10 @@ // services. func (a *ActionAPI) ApplicationsCharmsActions(args params.Entities) (params.ApplicationsCharmActionsResults, error) { result := params.ApplicationsCharmActionsResults{Results: make([]params.ApplicationCharmActionsResult, len(args.Entities))} + if err := a.checkCanWrite(); err != nil { + return result, errors.Trace(err) + } + for i, entity := range args.Entities { currentResult := &result.Results[i] svcTag, err := names.ParseApplicationTag(entity.Tag) @@ -280,7 +354,7 @@ for _, fn := range funcs { items, err := fn(ar) if err != nil { - return result, err + return result, errors.Trace(err) } result = append(result, items...) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/action/run.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/action/run.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/action/run.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/action/run.go 2016-10-13 14:31:49.000000000 +0000 @@ -44,6 +44,9 @@ // Run the commands specified on the machines identified through the // list of machines, units and services. func (a *ActionAPI) Run(run params.RunParams) (results params.ActionResults, err error) { + if err := a.checkCanAdmin(); err != nil { + return results, err + } if err := a.check.ChangeAllowed(); err != nil { return results, errors.Trace(err) } @@ -68,6 +71,10 @@ // RunOnAllMachines attempts to run the specified command on all the machines. func (a *ActionAPI) RunOnAllMachines(run params.RunParams) (results params.ActionResults, err error) { + if err := a.checkCanAdmin(); err != nil { + return results, err + } + if err := a.check.ChangeAllowed(); err != nil { return results, errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/action/run_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/action/run_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/action/run_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/action/run_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,8 +7,10 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/action" + "github.com/juju/juju/apiserver/common" commontesting "github.com/juju/juju/apiserver/common/testing" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" @@ -240,3 +242,39 @@ }) c.Assert(called, jc.IsTrue) } + +func (s *runSuite) TestRunRequiresAdmin(c *gc.C) { + alpha := names.NewUserTag("alpha@bravo") + auth := apiservertesting.FakeAuthorizer{ + Tag: alpha, + HasWriteTag: alpha, + } + client, err := action.NewActionAPI(s.State, nil, auth) + c.Assert(err, jc.ErrorIsNil) + _, err = client.Run(params.RunParams{}) + c.Assert(errors.Cause(err), gc.Equals, common.ErrPerm) + + auth.AdminTag = alpha + client, err = action.NewActionAPI(s.State, nil, auth) + c.Assert(err, jc.ErrorIsNil) + _, err = client.Run(params.RunParams{}) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *runSuite) TestRunOnAllMachinesRequiresAdmin(c *gc.C) { + alpha := names.NewUserTag("alpha@bravo") + auth := apiservertesting.FakeAuthorizer{ + Tag: alpha, + HasWriteTag: alpha, + } + client, err := action.NewActionAPI(s.State, nil, auth) + c.Assert(err, jc.ErrorIsNil) + _, err = client.RunOnAllMachines(params.RunParams{}) + c.Assert(errors.Cause(err), gc.Equals, common.ErrPerm) + + auth.AdminTag = alpha + client, err = action.NewActionAPI(s.State, nil, auth) + c.Assert(err, jc.ErrorIsNil) + _, err = client.RunOnAllMachines(params.RunParams{}) + c.Assert(err, jc.ErrorIsNil) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/admin.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/admin.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/admin.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/admin.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,7 +16,7 @@ "github.com/juju/juju/apiserver/observer" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/apiserver/presence" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" "github.com/juju/juju/rpc" "github.com/juju/juju/rpc/rpcreflect" "github.com/juju/juju/state" @@ -24,7 +24,7 @@ jujuversion "github.com/juju/juju/version" ) -type adminApiFactory func(*Server, *apiHandler, observer.Observer) interface{} +type adminAPIFactory func(*Server, *apiHandler, observer.Observer) interface{} // admin is the only object that unlogged-in clients can access. It holds any // methods that are needed to log in. @@ -42,8 +42,9 @@ var MaintenanceNoLoginError = errors.New("login failed - maintenance in progress") var errAlreadyLoggedIn = errors.New("already logged in") -func (a *admin) doLogin(req params.LoginRequest, loginVersion int) (params.LoginResultV1, error) { - var fail params.LoginResultV1 +// login is the internal version of the Login API call. +func (a *admin) login(req params.LoginRequest, loginVersion int) (params.LoginResult, error) { + var fail params.LoginResult a.mu.Lock() defer a.mu.Unlock() @@ -52,19 +53,19 @@ return fail, errAlreadyLoggedIn } - // authedApi is the API method finder we'll use after getting logged in. - var authedApi rpc.MethodFinder = newApiRoot(a.root.state, a.root.resources, a.root) + // apiRoot is the API root exposed to the client after authentication. + var apiRoot rpc.Root = newAPIRoot(a.root.state, a.root.resources, a.root) // Use the login validation function, if one was specified. if a.srv.validator != nil { err := a.srv.validator(req) switch err { case params.UpgradeInProgressError: - authedApi = newUpgradingRoot(authedApi) + apiRoot = restrictRoot(apiRoot, upgradeMethodsOnly) case AboutToRestoreError: - authedApi = newAboutToRestoreRoot(authedApi) + apiRoot = restrictRoot(apiRoot, aboutToRestoreMethodsOnly) case RestoreInProgressError: - authedApi = newRestoreInProgressRoot(authedApi) + apiRoot = restrictAll(apiRoot, restoreInProgressError) case nil: // in this case no need to wrap authed api so we do nothing default: @@ -88,13 +89,13 @@ } } - serverOnlyLogin := a.root.modelUUID == "" + controllerOnlyLogin := a.root.modelUUID == "" controllerMachineLogin := false - entity, lastConnection, err := doCheckCreds(a.root.state, req, !serverOnlyLogin, a.srv.authCtxt) + entity, lastConnection, err := a.checkCreds(req, isUser) if err != nil { if err, ok := errors.Cause(err).(*common.DischargeRequiredError); ok { - loginResult := params.LoginResultV1{ + loginResult := params.LoginResult{ DischargeRequired: err.Macaroon, DischargeRequiredReason: err.Error(), } @@ -121,6 +122,9 @@ if kind != names.MachineTagKind { return fail, errors.Trace(err) } + if errors.Cause(err) != common.ErrBadCreds { + return fail, err + } entity, err = a.checkControllerMachineCreds(req) if err != nil { return fail, errors.Trace(err) @@ -132,7 +136,6 @@ controllerMachineLogin = true } a.root.entity = entity - a.apiObserver.Login(entity.Tag(), a.root.state.ModelTag(), controllerMachineLogin, req.UserData) // We have authenticated the user; enable the appropriate API @@ -140,26 +143,25 @@ a.loggedIn = true if !controllerMachineLogin { - if err := startPingerIfAgent(a.root, entity); err != nil { + if err := startPingerIfAgent(a.srv.pingClock, a.root, entity); err != nil { return fail, errors.Trace(err) } } var maybeUserInfo *params.AuthUserInfo - var modelUser description.UserAccess // Send back user info if user - if isUser && !serverOnlyLogin { - maybeUserInfo = ¶ms.AuthUserInfo{ - Identity: entity.Tag().String(), - LastConnection: lastConnection, - } - modelUser, err = a.root.state.UserAccess(entity.Tag().(names.UserTag), a.root.state.ModelTag()) + if isUser { + userTag := entity.Tag().(names.UserTag) + maybeUserInfo, err = a.checkUserPermissions(userTag, controllerOnlyLogin) if err != nil { - return fail, errors.Annotatef(err, "missing ModelUser for logged in user %s", entity.Tag()) + return fail, errors.Trace(err) } - maybeUserInfo.ReadOnly = modelUser.Access == description.ReadAccess - if maybeUserInfo.ReadOnly { - logger.Debugf("model user %s is READ ONLY", entity.Tag()) + maybeUserInfo.LastConnection = lastConnection + } else { + if controllerOnlyLogin { + logger.Debugf("controller login: %s", entity.Tag()) + } else { + logger.Debugf("model login: %s for %s", entity.Tag(), a.root.state.ModelTag().Id()) } } @@ -168,50 +170,121 @@ if err != nil { return fail, errors.Trace(err) } - logger.Debugf("hostPorts: %v", hostPorts) - environ, err := a.root.state.Model() + model, err := a.root.state.Model() if err != nil { return fail, errors.Trace(err) } - loginResult := params.LoginResultV1{ + if isUser && model.MigrationMode() == state.MigrationModeImporting { + apiRoot = restrictAll(apiRoot, errors.New("migration in progress, model is importing")) + } + + loginResult := params.LoginResult{ Servers: params.FromNetworkHostsPorts(hostPorts), - ModelTag: environ.Tag().String(), - ControllerTag: environ.ControllerTag().String(), - Facades: DescribeFacades(), + ControllerTag: model.ControllerTag().String(), UserInfo: maybeUserInfo, ServerVersion: jujuversion.Current.String(), } - // For sufficiently modern login versions, stop serving the - // controller model at the root of the API. - if serverOnlyLogin { - authedApi = newRestrictedRoot(authedApi) - // Remove the ModelTag from the response as there is no - // model here. - loginResult.ModelTag = "" - // Strip out the facades that are not supported from the result. - var facades []params.FacadeVersions - for _, facade := range loginResult.Facades { - if restrictedRootNames.Contains(facade.Name) { - facades = append(facades, facade) - } + if controllerOnlyLogin { + loginResult.Facades = filterFacades(isControllerFacade) + apiRoot = restrictRoot(apiRoot, controllerFacadesOnly) + } else { + loginResult.ModelTag = model.Tag().String() + loginResult.Facades = filterFacades(isModelFacade) + apiRoot = restrictRoot(apiRoot, modelFacadesOnly) + } + + a.root.rpcConn.ServeRoot(apiRoot, serverError) + + return loginResult, nil +} + +func (a *admin) checkUserPermissions(userTag names.UserTag, controllerOnlyLogin bool) (*params.AuthUserInfo, error) { + + modelAccess := permission.NoAccess + if !controllerOnlyLogin { + // Only grab modelUser permissions if this is not a controller only + // login. In all situations, if the model user is not found, they have + // no authorisation to access this model. + modelUser, err := a.root.state.UserAccess(userTag, a.root.state.ModelTag()) + if err != nil { + return nil, errors.Wrap(err, common.ErrPerm) + } + modelAccess = modelUser.Access + } + + // TODO(perrito666) remove the following section about everyone group + // when groups are implemented, this accounts only for the lack of a local + // ControllerUser when logging in from an external user that has not been granted + // permissions on the controller but there are permissions for the special + // everyone group. + everyoneGroupAccess := permission.NoAccess + if !userTag.IsLocal() { + everyoneTag := names.NewUserTag(common.EveryoneTagName) + everyoneGroupUser, err := state.ControllerAccess(a.root.state, everyoneTag) + if err != nil && !errors.IsNotFound(err) { + return nil, errors.Annotatef(err, "obtaining ControllerUser for everyone group") + } + everyoneGroupAccess = everyoneGroupUser.Access + } + + controllerAccess := permission.NoAccess + if controllerUser, err := state.ControllerAccess(a.root.state, userTag); err == nil { + controllerAccess = controllerUser.Access + } else if errors.IsNotFound(err) { + controllerAccess = everyoneGroupAccess + } else { + return nil, errors.Annotatef(err, "obtaining ControllerUser for logged in user %s", userTag.Id()) + } + // It is possible that the everyoneGroup permissions are more capable than an + // individuals. If they are, use them. + if everyoneGroupAccess.GreaterControllerAccessThan(controllerAccess) { + controllerAccess = everyoneGroupAccess + } + if controllerOnlyLogin || !a.srv.allowModelAccess { + // We're either explicitly logging into the controller or + // we must check that the user has access to the controller + // even though they're logging into a model. + if controllerAccess == permission.NoAccess { + return nil, errors.Trace(common.ErrPerm) } - loginResult.Facades = facades } - emptyUserAccess := description.UserAccess{} - if modelUser != emptyUserAccess { - authedApi = newClientAuthRoot(authedApi, modelUser) + if controllerOnlyLogin { + logger.Debugf("controller login: user %s has %q access", userTag.Id(), controllerAccess) + } else { + logger.Debugf("model login: user %s has %q for controller; %q for model %s", + userTag.Id(), controllerAccess, modelAccess, a.root.state.ModelTag().Id()) } + return ¶ms.AuthUserInfo{ + Identity: userTag.String(), + ControllerAccess: string(controllerAccess), + ModelAccess: string(modelAccess), + }, nil +} - a.root.rpcConn.ServeFinder(authedApi, serverError) +func filterFacades(allowFacade func(name string) bool) []params.FacadeVersions { + allFacades := DescribeFacades() + out := make([]params.FacadeVersions, 0, len(allFacades)) + for _, facade := range allFacades { + if allowFacade(facade.Name) { + out = append(out, facade) + } + } + return out +} - return loginResult, nil +func (a *admin) checkCreds(req params.LoginRequest, lookForModelUser bool) (state.Entity, *time.Time, error) { + return doCheckCreds(a.root.state, req, lookForModelUser, a.authenticator()) } func (a *admin) checkControllerMachineCreds(req params.LoginRequest) (state.Entity, error) { - return checkControllerMachineCreds(a.srv.state, req, a.srv.authCtxt) + return checkControllerMachineCreds(a.srv.state, req, a.authenticator()) +} + +func (a *admin) authenticator() authentication.EntityAuthenticator { + return a.srv.authCtxt.authenticator(a.root.serverHost) } func (a *admin) maintenanceInProgress() bool { @@ -237,7 +310,7 @@ // checkCreds validates the entities credentials in the current model. // If the entity is a user, and lookForModelUser is true, a model user must exist -// for the model. In the case of a user logging in to the server, but +// for the model. In the case of a user logging in to the controller, but // not a model, there is no env user needed. While we have the env // user, if we do have it, update the last login time. // @@ -324,18 +397,20 @@ if !ok { return f.st.FindEntity(tag) } - modelUser, err := f.st.UserAccess(utag, f.st.ModelTag()) + + modelUser, controllerUser, err := common.UserAccess(f.st, utag) if err != nil { - return nil, err + return nil, errors.Trace(err) } u := &modelUserEntity{ - st: f.st, - modelUser: modelUser, + st: f.st, + modelUser: modelUser, + controllerUser: controllerUser, } if utag.IsLocal() { user, err := f.st.User(utag) if err != nil { - return nil, err + return nil, errors.Trace(err) } u.user = user } @@ -352,8 +427,9 @@ type modelUserEntity struct { st *state.State - modelUser description.UserAccess - user *state.User + controllerUser permission.UserAccess + modelUser permission.UserAccess + user *state.User } // Refresh implements state.Authenticator.Refresh. @@ -383,15 +459,28 @@ // Tag implements state.Entity.Tag. func (u *modelUserEntity) Tag() names.Tag { - return u.modelUser.UserTag + if u.user != nil { + return u.user.UserTag() + } + if !permission.IsEmptyUserAccess(u.modelUser) { + return u.modelUser.UserTag + } + return u.controllerUser.UserTag + } // LastLogin implements loginEntity.LastLogin. func (u *modelUserEntity) LastLogin() (time.Time, error) { // The last connection for the model takes precedence over // the local user last login time. - t, err := u.st.LastModelConnection(u.modelUser.UserTag) - if state.IsNeverConnectedError(err) { + var err error + var t time.Time + if !permission.IsEmptyUserAccess(u.modelUser) { + t, err = u.st.LastModelConnection(u.modelUser.UserTag) + } else { + err = state.NeverConnectedError("controller user") + } + if state.IsNeverConnectedError(err) || permission.IsEmptyUserAccess(u.modelUser) { if u.user != nil { // There's a global user, so use that login time instead. return u.user.LastLogin() @@ -400,25 +489,31 @@ // to implement LastLogin error semantics too. err = state.NeverLoggedInError(err.Error()) } - return t, err + return t, errors.Trace(err) } // UpdateLastLogin implements loginEntity.UpdateLastLogin. func (u *modelUserEntity) UpdateLastLogin() error { + var err error + + if !permission.IsEmptyUserAccess(u.modelUser) { + if u.modelUser.Object.Kind() != names.ModelTagKind { + return errors.NotValidf("%s as model user", u.modelUser.Object.Kind()) + } - if u.modelUser.Object.Kind() != names.ModelTagKind { - return errors.NotValidf("%s as model user", u.modelUser.Object.Kind()) + err = u.st.UpdateLastModelConnection(u.modelUser.UserTag) } - err := u.st.UpdateLastModelConnection(u.modelUser.UserTag) if u.user != nil { err1 := u.user.UpdateLastLogin() if err == nil { return err1 } } - - return err + if err != nil { + return errors.Trace(err) + } + return nil } // presenceShim exists to represent a statepresence.Agent in a form @@ -440,7 +535,7 @@ return pinger, nil } -func startPingerIfAgent(root *apiHandler, entity state.Entity) error { +func startPingerIfAgent(clock clock.Clock, root *apiHandler, entity state.Entity) error { // worker runs presence.Pingers -- absence of which will cause // embarrassing "agent is lost" messages to show up in status -- // until it's stopped. It's stored in resources purely for the @@ -454,7 +549,7 @@ worker, err := presence.New(presence.Config{ Identity: entity.Tag(), Start: presenceShim{agent}.Start, - Clock: clock.WallClock, + Clock: clock, RetryDelay: 3 * time.Second, }) if err != nil { @@ -478,7 +573,7 @@ logger.Errorf("error closing the RPC connection: %v", err) } } - pingTimeout := newPingTimeout(action, clock.WallClock, maxClientPingInterval) + pingTimeout := newPingTimeout(action, clock, maxClientPingInterval) return root.getResources().RegisterNamed("pingTimeout", pingTimeout) } @@ -493,3 +588,6 @@ func (r *errRoot) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { return nil, r.err } + +func (r *errRoot) Kill() { +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/admin_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/admin_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/admin_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/admin_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "fmt" "net" + "net/url" "strconv" "sync" "time" @@ -16,16 +17,19 @@ "github.com/juju/utils" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon-bakery.v1/httpbakery" "github.com/juju/juju/api" apimachiner "github.com/juju/juju/api/machiner" apitesting "github.com/juju/juju/api/testing" "github.com/juju/juju/apiserver" - "github.com/juju/juju/apiserver/observer" - "github.com/juju/juju/apiserver/observer/fakeobserver" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/controller" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/constraints" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" + "github.com/juju/juju/permission" "github.com/juju/juju/rpc" "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" @@ -34,7 +38,7 @@ type baseLoginSuite struct { jujutesting.JujuConnSuite - setAdminApi func(*apiserver.Server) + setAdminAPI func(*apiserver.Server) } type loginSuite struct { @@ -43,8 +47,8 @@ var _ = gc.Suite(&loginSuite{ baseLoginSuite{ - setAdminApi: func(srv *apiserver.Server) { - apiserver.SetAdminApiVersions(srv, 3) + setAdminAPI: func(srv *apiserver.Server) { + apiserver.SetAdminAPIVersions(srv, 3) }, }, }) @@ -54,28 +58,14 @@ loggo.GetLogger("juju.apiserver").SetLogLevel(loggo.TRACE) } -func (s *baseLoginSuite) setupServer(c *gc.C) (api.Connection, func()) { - return s.setupServerForModel(c, s.State.ModelTag()) -} - -func (s *baseLoginSuite) setupServerForModel(c *gc.C, modelTag names.ModelTag) (api.Connection, func()) { - info, cleanup := s.setupServerForEnvironmentWithValidator(c, modelTag, nil) - st, err := api.Open(info, fastDialOpts) - c.Assert(err, jc.ErrorIsNil) - return st, func() { - st.Close() - cleanup() - } -} - -func (s *baseLoginSuite) setupMachineAndServer(c *gc.C) (*api.Info, func()) { +func (s *baseLoginSuite) newMachineAndServer(c *gc.C) (*api.Info, *apiserver.Server) { machine, password := s.Factory.MakeMachineReturningPassword( c, &factory.MachineParams{Nonce: "fake_nonce"}) - info, cleanup := s.setupServerWithValidator(c, nil) + info, srv := newServer(c, s.State) info.Tag = machine.Tag() info.Password = password info.Nonce = "fake_nonce" - return info, cleanup + return info, srv } func (s *loginSuite) TestLoginWithInvalidTag(c *gc.C) { @@ -83,7 +73,6 @@ info.Tag = nil info.Password = "" st := s.openAPIWithoutLogin(c, info) - defer st.Close() request := ¶ms.LoginRequest{ AuthTag: "bar", @@ -99,8 +88,9 @@ // Start our own server so we can control when the first login // happens. Otherwise in JujuConnSuite.SetUpTest api.Open is // called with user-admin permissions automatically. - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() + info, srv := newServer(c, s.State) + defer assertStop(c, srv) + info.ModelTag = s.State.ModelTag() adminUser := s.AdminUserTag(c) @@ -131,7 +121,6 @@ // Open the API without logging in, so we can perform // operations on the connection before calling Login. st := s.openAPIWithoutLogin(c, info) - defer st.Close() _, err := apimachiner.NewState(st).Machine(names.NewMachineTag("0")) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ @@ -154,11 +143,11 @@ } func (s *loginSuite) TestLoginAsDeactivatedUser(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() + info, srv := newServer(c, s.State) + defer assertStop(c, srv) + info.ModelTag = s.State.ModelTag() st := s.openAPIWithoutLogin(c, info) - defer st.Close() password := "password" u := s.Factory.MakeUser(c, &factory.UserParams{Password: password, Disabled: true}) @@ -170,10 +159,7 @@ // Since these are user login tests, the nonce is empty. err = st.Login(u.Tag(), password, "", nil) - c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ - Message: "invalid entity name or password", - Code: "unauthorized access", - }) + assertInvalidEntityPassword(c, err) _, err = st.Client().Status([]string{}) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ @@ -183,8 +169,8 @@ } func (s *baseLoginSuite) runLoginSetsLogIdentifier(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() + info, srv := newServer(c, s.State) + defer assertStop(c, srv) machine, password := s.Factory.MakeMachineReturningPassword( c, &factory.MachineParams{Nonce: "fake_nonce"}) @@ -203,8 +189,8 @@ } func (s *loginSuite) TestLoginAddrs(c *gc.C) { - info, cleanup := s.setupMachineAndServer(c) - defer cleanup() + info, srv := s.newMachineAndServer(c) + defer assertStop(c, srv) err := s.State.SetAPIHostPorts(nil) c.Assert(err, jc.ErrorIsNil) @@ -284,8 +270,8 @@ } func (s *loginSuite) TestDelayLogins(c *gc.C) { - info, cleanup := s.setupMachineAndServer(c) - defer cleanup() + info, srv := s.newMachineAndServer(c) + defer assertStop(c, srv) delayChan, cleanup := apiserver.DelayLogins() defer cleanup() @@ -342,8 +328,8 @@ } func (s *loginSuite) TestLoginRateLimited(c *gc.C) { - info, cleanup := s.setupMachineAndServer(c) - defer cleanup() + info, srv := s.newMachineAndServer(c) + defer assertStop(c, srv) delayChan, cleanup := apiserver.DelayLogins() defer cleanup() @@ -395,8 +381,8 @@ } func (s *loginSuite) TestUsersLoginWhileRateLimited(c *gc.C) { - info, cleanup := s.setupMachineAndServer(c) - defer cleanup() + info, srv := s.newMachineAndServer(c) + defer assertStop(c, srv) delayChan, cleanup := apiserver.DelayLogins() defer cleanup() @@ -443,10 +429,13 @@ } func (s *loginSuite) TestUsersAreNotRateLimited(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) + info, srv := newServer(c, s.State) + defer assertStop(c, srv) + info.Tag = s.AdminUserTag(c) info.Password = "dummy-secret" - defer cleanup() + info.ModelTag = s.State.ModelTag() + delayChan, cleanup := apiserver.DelayLogins() defer cleanup() // We can login more than LoginRateLimit users @@ -469,17 +458,18 @@ } } -func (s *loginSuite) TestNonEnvironUserLoginFails(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() +func (s *loginSuite) TestNonModelUserLoginFails(c *gc.C) { + info, srv := newServer(c, s.State) + defer assertStop(c, srv) + info.ModelTag = s.State.ModelTag() user := s.Factory.MakeUser(c, &factory.UserParams{Password: "dummy-password", NoModelUser: true}) + ctag := names.NewControllerTag(s.State.ControllerUUID()) + err := s.State.RemoveUserAccess(user.UserTag(), ctag) + c.Assert(err, jc.ErrorIsNil) info.Password = "dummy-password" info.Tag = user.UserTag() - _, err := api.Open(info, fastDialOpts) - c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ - Message: "invalid entity name or password", - Code: "unauthorized access", - }) + _, err = api.Open(info, fastDialOpts) + assertInvalidEntityPassword(c, err) } func (s *loginSuite) TestLoginValidationSuccess(c *gc.C) { @@ -526,15 +516,16 @@ } func (s *loginSuite) TestFailedLoginDuringMaintenance(c *gc.C) { - validator := func(params.LoginRequest) error { + cfg := defaultServerConfig(c) + cfg.Validator = func(params.LoginRequest) error { return errors.New("something") } - info, cleanup := s.setupServerWithValidator(c, validator) - defer cleanup() + info, srv := newServerWithConfig(c, s.State, cfg) + defer assertStop(c, srv) + info.ModelTag = s.State.ModelTag() checkLogin := func(tag names.Tag) { st := s.openAPIWithoutLogin(c, info) - defer st.Close() err := st.Login(tag, "dummy-secret", "nonce", nil) c.Assert(err, gc.ErrorMatches, "something") } @@ -545,11 +536,13 @@ type validationChecker func(c *gc.C, err error, st api.Connection) func (s *baseLoginSuite) checkLoginWithValidator(c *gc.C, validator apiserver.LoginValidator, checker validationChecker) { - info, cleanup := s.setupServerWithValidator(c, validator) - defer cleanup() + cfg := defaultServerConfig(c) + cfg.Validator = validator + info, srv := newServerWithConfig(c, s.State, cfg) + defer assertStop(c, srv) + info.ModelTag = s.State.ModelTag() st := s.openAPIWithoutLogin(c, info) - defer st.Close() // Ensure not already logged in. _, err := apimachiner.NewState(st).Machine(names.NewMachineTag("0")) @@ -565,59 +558,23 @@ checker(c, err, st) } -func (s *baseLoginSuite) setupServerWithValidator(c *gc.C, validator apiserver.LoginValidator) (*api.Info, func()) { - env, err := s.State.Model() - c.Assert(err, jc.ErrorIsNil) - return s.setupServerForEnvironmentWithValidator(c, env.ModelTag(), validator) -} - -func (s *baseLoginSuite) setupServerForEnvironmentWithValidator(c *gc.C, modelTag names.ModelTag, validator apiserver.LoginValidator) (*api.Info, func()) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, jc.ErrorIsNil) - srv, err := apiserver.NewServer( - s.State, - listener, - apiserver.ServerConfig{ - Cert: []byte(coretesting.ServerCert), - Key: []byte(coretesting.ServerKey), - Validator: validator, - Tag: names.NewMachineTag("0"), - LogDir: c.MkDir(), - NewObserver: func() observer.Observer { return &fakeobserver.Instance{} }, - }, - ) - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.setAdminApi, gc.NotNil) - s.setAdminApi(srv) - info := &api.Info{ - Tag: nil, - Password: "", - ModelTag: modelTag, - Addrs: []string{srv.Addr().String()}, - CACert: coretesting.CACert, - } - return info, func() { - err := srv.Stop() - c.Assert(err, jc.ErrorIsNil) - } -} - -func (s *baseLoginSuite) openAPIWithoutLogin(c *gc.C, info *api.Info) api.Connection { +func (s *baseLoginSuite) openAPIWithoutLogin(c *gc.C, info0 *api.Info) api.Connection { + info := *info0 info.Tag = nil info.Password = "" info.SkipLogin = true - st, err := api.Open(info, fastDialOpts) + st, err := api.Open(&info, fastDialOpts) c.Assert(err, jc.ErrorIsNil) + s.AddCleanup(func(*gc.C) { st.Close() }) return st } func (s *loginSuite) TestControllerModel(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() + info, srv := newServer(c, s.State) + defer assertStop(c, srv) - c.Assert(info.ModelTag, gc.Equals, s.State.ModelTag()) + info.ModelTag = s.State.ModelTag() st := s.openAPIWithoutLogin(c, info) - defer st.Close() adminUser := s.AdminUserTag(c) err := st.Login(adminUser, "dummy-secret", "", nil) @@ -627,30 +584,25 @@ } func (s *loginSuite) TestControllerModelBadCreds(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() + info, srv := newServer(c, s.State) + defer assertStop(c, srv) - c.Assert(info.ModelTag, gc.Equals, s.State.ModelTag()) + info.ModelTag = s.State.ModelTag() st := s.openAPIWithoutLogin(c, info) - defer st.Close() adminUser := s.AdminUserTag(c) err := st.Login(adminUser, "bad-password", "", nil) - c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ - Message: `invalid entity name or password`, - Code: "unauthorized access", - }) + assertInvalidEntityPassword(c, err) } func (s *loginSuite) TestNonExistentModel(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() + info, srv := newServer(c, s.State) + defer assertStop(c, srv) uuid, err := utils.NewUUID() c.Assert(err, jc.ErrorIsNil) info.ModelTag = names.NewModelTag(uuid.String()) st := s.openAPIWithoutLogin(c, info) - defer st.Close() adminUser := s.AdminUserTag(c) err = st.Login(adminUser, "dummy-secret", "", nil) @@ -661,12 +613,11 @@ } func (s *loginSuite) TestInvalidModel(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - + info, srv := newServer(c, s.State) + defer assertStop(c, srv) info.ModelTag = names.NewModelTag("rubbish") + st := s.openAPIWithoutLogin(c, info) - defer st.Close() adminUser := s.AdminUserTag(c) err := st.Login(adminUser, "dummy-secret", "", nil) @@ -677,8 +628,8 @@ } func (s *loginSuite) TestOtherModel(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() + info, srv := newServer(c, s.State) + defer assertStop(c, srv) envOwner := s.Factory.MakeUser(c, nil) envState := s.Factory.MakeModel(c, &factory.ModelParams{ @@ -687,7 +638,6 @@ defer envState.Close() info.ModelTag = envState.ModelTag() st := s.openAPIWithoutLogin(c, info) - defer st.Close() err := st.Login(envOwner.UserTag(), "password", "", nil) c.Assert(err, jc.ErrorIsNil) @@ -699,8 +649,8 @@ // Machine credentials are checked against environment specific // machines, so this makes sure that the credential checking is // using the correct state connection. - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() + info, srv := newServer(c, s.State) + defer assertStop(c, srv) envOwner := s.Factory.MakeUser(c, nil) envState := s.Factory.MakeModel(c, &factory.ModelParams{ @@ -718,15 +668,41 @@ info.ModelTag = envState.ModelTag() st := s.openAPIWithoutLogin(c, info) - defer st.Close() err := st.Login(machine.Tag(), password, "nonce", nil) c.Assert(err, jc.ErrorIsNil) } +func (s *loginSuite) TestMachineLoginOtherModelNotProvisioned(c *gc.C) { + info, srv := newServer(c, s.State) + defer assertStop(c, srv) + + envOwner := s.Factory.MakeUser(c, nil) + envState := s.Factory.MakeModel(c, &factory.ModelParams{ + Owner: envOwner.UserTag(), + ConfigAttrs: map[string]interface{}{ + "controller": false, + }, + }) + defer envState.Close() + + f2 := factory.NewFactory(envState) + machine, password := f2.MakeUnprovisionedMachineReturningPassword(c, &factory.MachineParams{}) + + info.ModelTag = envState.ModelTag() + st := s.openAPIWithoutLogin(c, info) + + // If the agent attempts Login before the provisioner has recorded + // the machine's nonce in state, then the agent should get back an + // error with code "not provisioned". + err := st.Login(machine.Tag(), password, "nonce", nil) + c.Assert(err, gc.ErrorMatches, `machine 0 not provisioned \(not provisioned\)`) + c.Assert(err, jc.Satisfies, params.IsCodeNotProvisioned) +} + func (s *loginSuite) TestOtherEnvironmentFromController(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() + info, srv := newServer(c, s.State) + defer assertStop(c, srv) machine, password := s.Factory.MakeMachineReturningPassword(c, &factory.MachineParams{ Jobs: []state.MachineJob{state.JobManageModel}, @@ -736,15 +712,40 @@ defer envState.Close() info.ModelTag = envState.ModelTag() st := s.openAPIWithoutLogin(c, info) - defer st.Close() err := st.Login(machine.Tag(), password, "nonce", nil) c.Assert(err, jc.ErrorIsNil) } +func (s *loginSuite) TestOtherEnvironmentFromControllerOtherNotProvisioned(c *gc.C) { + info, srv := newServer(c, s.State) + defer assertStop(c, srv) + + managerMachine, password := s.Factory.MakeMachineReturningPassword(c, &factory.MachineParams{ + Jobs: []state.MachineJob{state.JobManageModel}, + }) + + // Create a hosted model with an unprovisioned machine that has the + // same tag as the manager machine. + hostedModelState := s.Factory.MakeModel(c, nil) + defer hostedModelState.Close() + f2 := factory.NewFactory(hostedModelState) + workloadMachine, _ := f2.MakeUnprovisionedMachineReturningPassword(c, &factory.MachineParams{}) + c.Assert(managerMachine.Tag(), gc.Equals, workloadMachine.Tag()) + + info.ModelTag = hostedModelState.ModelTag() + st := s.openAPIWithoutLogin(c, info) + + // The fact that the machine with the same tag in the hosted + // model is unprovisioned should not cause the login to fail + // with "not provisioned", because the passwords don't match. + err := st.Login(managerMachine.Tag(), password, "nonce", nil) + c.Assert(err, jc.ErrorIsNil) +} + func (s *loginSuite) TestOtherEnvironmentWhenNotController(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() + info, srv := newServer(c, s.State) + defer assertStop(c, srv) machine, password := s.Factory.MakeMachineReturningPassword(c, nil) @@ -752,38 +753,84 @@ defer envState.Close() info.ModelTag = envState.ModelTag() st := s.openAPIWithoutLogin(c, info) - defer st.Close() err := st.Login(machine.Tag(), password, "nonce", nil) - c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ - Message: "permission denied", - Code: "unauthorized access", + assertPermissionDenied(c, err) +} + +func (s *loginSuite) loginLocalUser(c *gc.C, info *api.Info) (*state.User, params.LoginResult) { + password := "shhh..." + user := s.Factory.MakeUser(c, &factory.UserParams{ + Password: password, }) + conn := s.openAPIWithoutLogin(c, info) + + var result params.LoginResult + request := ¶ms.LoginRequest{ + AuthTag: user.Tag().String(), + Credentials: password, + } + err := conn.APICall("Admin", 3, "", "Login", request, &result) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.UserInfo, gc.NotNil) + return user, result +} + +func (s *loginSuite) TestLoginResultLocalUser(c *gc.C) { + info, srv := newServer(c, s.State) + defer assertStop(c, srv) + info.ModelTag = s.State.ModelTag() + + user, result := s.loginLocalUser(c, info) + c.Check(result.UserInfo.Identity, gc.Equals, user.Tag().String()) + c.Check(result.UserInfo.ControllerAccess, gc.Equals, "login") + c.Check(result.UserInfo.ModelAccess, gc.Equals, "admin") } -func (s *loginSuite) assertRemoteModel(c *gc.C, st api.Connection, expected names.ModelTag) { +func (s *loginSuite) TestLoginResultLocalUserEveryoneCreateOnlyNonLocal(c *gc.C) { + info, srv := newServer(c, s.State) + defer assertStop(c, srv) + info.ModelTag = s.State.ModelTag() + + setEveryoneAccess(c, s.State, s.AdminUserTag(c), permission.AddModelAccess) + + user, result := s.loginLocalUser(c, info) + c.Check(result.UserInfo.Identity, gc.Equals, user.Tag().String()) + c.Check(result.UserInfo.ControllerAccess, gc.Equals, "login") + c.Check(result.UserInfo.ModelAccess, gc.Equals, "admin") +} + +func (s *loginSuite) assertRemoteModel(c *gc.C, api api.Connection, expected names.ModelTag) { // Look at what the api thinks it has. - tag, err := st.ModelTag() - c.Assert(err, jc.ErrorIsNil) + tag, ok := api.ModelTag() + c.Assert(ok, jc.IsTrue) c.Assert(tag, gc.Equals, expected) // Look at what the api Client thinks it has. - client := st.Client() + client := api.Client() // ModelUUID looks at the env tag on the api state connection. - uuid, err := client.ModelUUID() - c.Assert(err, jc.ErrorIsNil) + uuid, ok := client.ModelUUID() + c.Assert(ok, jc.IsTrue) c.Assert(uuid, gc.Equals, expected.Id()) - // ModelInfo calls a remote method that looks up the environment. - info, err := client.ModelInfo() + // The code below is to verify that the API connection is operating on + // the expected model. We make a change in state on that model, and + // then check that it is picked up by a call to the API. + + st, err := s.State.ForModel(tag) + c.Assert(err, jc.ErrorIsNil) + defer st.Close() + + expectedCons := constraints.MustParse("mem=8G") + err = st.SetModelConstraints(expectedCons) + c.Assert(err, jc.ErrorIsNil) + + cons, err := client.GetModelConstraints() c.Assert(err, jc.ErrorIsNil) - c.Assert(info.UUID, gc.Equals, expected.Id()) + c.Assert(cons, jc.DeepEquals, expectedCons) } func (s *loginSuite) TestLoginUpdatesLastLoginAndConnection(c *gc.C) { - _, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - // Since the login and connection times truncate time to the second, // we need to make sure our start time is just before now. startTime := time.Now().Add(-time.Second) @@ -838,24 +885,189 @@ info.ModelTag = names.ModelTag{} client, err := api.Open(info, api.DialOpts{}) - c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ - Message: "invalid entity name or password", - Code: "unauthorized access", - }) + assertInvalidEntityPassword(c, err) c.Assert(client, gc.Equals, nil) } +func (s *macaroonLoginSuite) login(c *gc.C, info *api.Info) (params.LoginResult, error) { + info.SkipLogin = true + + cookieJar := apitesting.NewClearableCookieJar() + + client := s.OpenAPI(c, info, cookieJar) + defer client.Close() + + var ( + // Remote users start with an empty login request. + request params.LoginRequest + result params.LoginResult + ) + err := client.APICall("Admin", 3, "", "Login", &request, &result) + c.Assert(err, jc.ErrorIsNil) + + cookieURL := &url.URL{ + Scheme: "https", + Host: "localhost", + Path: "/", + } + + bakeryClient := httpbakery.NewClient() + + err = bakeryClient.HandleError(cookieURL, &httpbakery.Error{ + Message: result.DischargeRequiredReason, + Code: httpbakery.ErrDischargeRequired, + Info: &httpbakery.ErrorInfo{ + Macaroon: result.DischargeRequired, + MacaroonPath: "/", + }, + }) + c.Assert(err, jc.ErrorIsNil) + // Add the macaroons that have been saved by HandleError to our login request. + request.Macaroons = httpbakery.MacaroonsForURL(bakeryClient.Client.Jar, cookieURL) + + err = client.APICall("Admin", 3, "", "Login", &request, &result) + return result, err +} + +func (s *macaroonLoginSuite) TestRemoteUserLoginToControllerNoAccess(c *gc.C) { + s.DischargerLogin = func() string { + return "test@somewhere" + } + info := s.APIInfo(c) + // Log in to the controller, not the model. + info.ModelTag = names.ModelTag{} + + _, err := s.login(c, info) + assertInvalidEntityPassword(c, err) +} + +func (s *macaroonLoginSuite) TestRemoteUserLoginToControllerLoginAccess(c *gc.C) { + setEveryoneAccess(c, s.State, s.AdminUserTag(c), permission.LoginAccess) + const remoteUser = "test@somewhere" + var remoteUserTag = names.NewUserTag(remoteUser) + + s.DischargerLogin = func() string { + return remoteUser + } + info := s.APIInfo(c) + // Log in to the controller, not the model. + info.ModelTag = names.ModelTag{} + + result, err := s.login(c, info) + c.Check(err, jc.ErrorIsNil) + c.Assert(result.UserInfo, gc.NotNil) + c.Check(result.UserInfo.Identity, gc.Equals, remoteUserTag.String()) + c.Check(result.UserInfo.ControllerAccess, gc.Equals, "login") + c.Check(result.UserInfo.ModelAccess, gc.Equals, "") +} + +func (s *macaroonLoginSuite) TestRemoteUserLoginToControllerAddModelAccess(c *gc.C) { + setEveryoneAccess(c, s.State, s.AdminUserTag(c), permission.AddModelAccess) + const remoteUser = "test@somewhere" + var remoteUserTag = names.NewUserTag(remoteUser) + + s.DischargerLogin = func() string { + return remoteUser + } + info := s.APIInfo(c) + // Log in to the controller, not the model. + info.ModelTag = names.ModelTag{} + + result, err := s.login(c, info) + c.Check(err, jc.ErrorIsNil) + c.Assert(result.UserInfo, gc.NotNil) + c.Check(result.UserInfo.Identity, gc.Equals, remoteUserTag.String()) + c.Check(result.UserInfo.ControllerAccess, gc.Equals, "add-model") + c.Check(result.UserInfo.ModelAccess, gc.Equals, "") +} + +func (s *macaroonLoginSuite) TestRemoteUserLoginToModelNoExplicitAccess(c *gc.C) { + // If we have a remote user which the controller knows nothing about, + // and the macaroon is discharged successfully, and the user is attempting + // to log into a model, that is permission denied. + setEveryoneAccess(c, s.State, s.AdminUserTag(c), permission.LoginAccess) + s.DischargerLogin = func() string { + return "test@somewhere" + } + info := s.APIInfo(c) + + _, err := s.login(c, info) + assertPermissionDenied(c, err) +} + +func (s *macaroonLoginSuite) TestRemoteUserLoginToModelWithExplicitAccess(c *gc.C) { + s.testRemoteUserLoginToModelWithExplicitAccess(c, false) +} + +func (s *macaroonLoginSuite) TestRemoteUserLoginToModelWithExplicitAccessAndAllowModelAccess(c *gc.C) { + s.testRemoteUserLoginToModelWithExplicitAccess(c, true) +} + +func (s *macaroonLoginSuite) testRemoteUserLoginToModelWithExplicitAccess(c *gc.C, allowModelAccess bool) { + cfg := defaultServerConfig(c) + cfg.AllowModelAccess = allowModelAccess + + info, srv := newServerWithConfig(c, s.State, cfg) + defer assertStop(c, srv) + info.ModelTag = s.State.ModelTag() + + // If we have a remote user which has explict model access, but neither + // controller access nor 'everyone' access, the user will have access + // only if the AllowModelAccess configuration flag is true. + const remoteUser = "test@somewhere" + s.Factory.MakeModelUser(c, &factory.ModelUserParams{ + User: remoteUser, + + Access: permission.WriteAccess, + }) + s.DischargerLogin = func() string { + return remoteUser + } + + _, err := s.login(c, info) + if allowModelAccess { + c.Assert(err, jc.ErrorIsNil) + } else { + assertPermissionDenied(c, err) + } +} + +func (s *macaroonLoginSuite) TestRemoteUserLoginToModelWithControllerAccess(c *gc.C) { + const remoteUser = "test@somewhere" + var remoteUserTag = names.NewUserTag(remoteUser) + s.Factory.MakeModelUser(c, &factory.ModelUserParams{ + User: remoteUser, + Access: permission.WriteAccess, + }) + s.AddControllerUser(c, remoteUser, permission.AddModelAccess) + + s.DischargerLogin = func() string { + return remoteUser + } + info := s.APIInfo(c) + + result, err := s.login(c, info) + c.Check(err, jc.ErrorIsNil) + c.Assert(result.UserInfo, gc.NotNil) + c.Check(result.UserInfo.Identity, gc.Equals, remoteUserTag.String()) + c.Check(result.UserInfo.ControllerAccess, gc.Equals, "add-model") + c.Check(result.UserInfo.ModelAccess, gc.Equals, "write") +} + func (s *macaroonLoginSuite) TestLoginToEnvironmentSuccess(c *gc.C) { - s.AddModelUser(c, "test@somewhere") + const remoteUser = "test@somewhere" + s.AddModelUser(c, remoteUser) + s.AddControllerUser(c, remoteUser, permission.LoginAccess) s.DischargerLogin = func() string { return "test@somewhere" } + loggo.GetLogger("juju.apiserver").SetLogLevel(loggo.TRACE) client, err := api.Open(s.APIInfo(c), api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) defer client.Close() // The auth tag has been correctly returned by the server. - c.Assert(client.AuthTag(), gc.Equals, names.NewUserTag("test@somewhere")) + c.Assert(client.AuthTag(), gc.Equals, names.NewUserTag(remoteUser)) } func (s *macaroonLoginSuite) TestFailedToObtainDischargeLogin(c *gc.C) { @@ -872,9 +1084,62 @@ return "testUnknown@somewhere" } client, err := api.Open(s.APIInfo(c), api.DialOpts{}) + assertInvalidEntityPassword(c, err) + c.Assert(client, gc.Equals, nil) +} + +func assertInvalidEntityPassword(c *gc.C, err error) { c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: "invalid entity name or password", Code: "unauthorized access", }) - c.Assert(client, gc.Equals, nil) +} + +func assertPermissionDenied(c *gc.C, err error) { + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: "permission denied", + Code: "unauthorized access", + }) +} + +func setEveryoneAccess(c *gc.C, st *state.State, adminUser names.UserTag, access permission.Access) { + err := controller.ChangeControllerAccess( + st, adminUser, names.NewUserTag(common.EveryoneTagName), + params.GrantControllerAccess, access) + c.Assert(err, jc.ErrorIsNil) +} + +var _ = gc.Suite(&migrationSuite{ + baseLoginSuite{ + setAdminAPI: func(srv *apiserver.Server) { + apiserver.SetAdminAPIVersions(srv, 3) + }, + }, +}) + +type migrationSuite struct { + baseLoginSuite +} + +func (s *migrationSuite) TestImportingModel(c *gc.C) { + m, password := s.Factory.MakeMachineReturningPassword(c, &factory.MachineParams{ + Nonce: "nonce", + }) + model, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + err = model.SetMigrationMode(state.MigrationModeImporting) + c.Assert(err, jc.ErrorIsNil) + + // Users should be able to log in but RPC requests should fail. + info := s.APIInfo(c) + userConn := s.OpenAPIAs(c, info.Tag, info.Password) + defer userConn.Close() + _, err = userConn.Client().Status(nil) + c.Check(err, gc.ErrorMatches, "migration in progress, model is importing") + + // Machines should be able to use the API. + machineConn := s.OpenAPIAsMachine(c, m.Tag(), password, "nonce") + defer machineConn.Close() + _, err = apimachiner.NewState(machineConn).Machine(m.MachineTag()) + c.Check(err, jc.ErrorIsNil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/adminv3.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/adminv3.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/adminv3.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/adminv3.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,12 +11,12 @@ "github.com/juju/juju/apiserver/params" ) -type adminApiV3 struct { +type adminAPIV3 struct { *admin } -func newAdminApiV3(srv *Server, root *apiHandler, apiObserver observer.Observer) interface{} { - return &adminApiV3{ +func newAdminAPIV3(srv *Server, root *apiHandler, apiObserver observer.Observer) interface{} { + return &adminAPIV3{ &admin{ srv: srv, root: root, @@ -27,7 +27,7 @@ // Admin returns an object that provides API access to methods that can be // called even when not authenticated. -func (r *adminApiV3) Admin(id string) (*adminApiV3, error) { +func (r *adminAPIV3) Admin(id string) (*adminAPIV3, error) { if id != "" { // Safeguard id for possible future use. return nil, common.ErrBadId @@ -37,13 +37,13 @@ // Login logs in with the provided credentials. All subsequent requests on the // connection will act as the authenticated user. -func (a *adminApiV3) Login(req params.LoginRequest) (params.LoginResultV1, error) { - return a.doLogin(req, 3) +func (a *adminAPIV3) Login(req params.LoginRequest) (params.LoginResult, error) { + return a.login(req, 3) } // RedirectInfo returns redirected host information for the model. // In Juju it always returns an error because the Juju controller // does not multiplex controllers. -func (a *adminApiV3) RedirectInfo() (params.RedirectInfoResult, error) { +func (a *adminAPIV3) RedirectInfo() (params.RedirectInfoResult, error) { return params.RedirectInfoResult{}, fmt.Errorf("not redirected") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/adminv3_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/adminv3_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/adminv3_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/adminv3_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,17 +22,14 @@ var _ = gc.Suite(&loginV3Suite{ loginSuite{ baseLoginSuite{ - setAdminApi: func(srv *apiserver.Server) { - apiserver.SetAdminApiVersions(srv, 3) + setAdminAPI: func(srv *apiserver.Server) { + apiserver.SetAdminAPIVersions(srv, 3) }, }, }, }) func (s *loginV3Suite) TestClientLoginToEnvironment(c *gc.C) { - _, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - info := s.APIInfo(c) apiState, err := api.Open(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) @@ -44,9 +41,6 @@ } func (s *loginV3Suite) TestClientLoginToServer(c *gc.C) { - _, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - info := s.APIInfo(c) info.ModelTag = names.ModelTag{} apiState, err := api.Open(info, api.DialOpts{}) @@ -56,15 +50,12 @@ client := apiState.Client() _, err = client.GetModelConstraints() c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ - Message: `logged in to server, no model, "Client" not supported`, + Message: `facade "Client" not supported for controller API connection`, Code: "not supported", }) } func (s *loginV3Suite) TestClientLoginToServerNoAccessToControllerEnv(c *gc.C) { - _, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - password := "shhh..." user := s.Factory.MakeUser(c, &factory.UserParams{ NoModelUser: true, @@ -87,11 +78,14 @@ } func (s *loginV3Suite) TestClientLoginToRootOldClient(c *gc.C) { - _, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - info := s.APIInfo(c) + info.Tag = nil + info.Password = "" info.ModelTag = names.ModelTag{} - _, err := api.OpenWithVersion(info, api.DialOpts{}, 2) + info.SkipLogin = true + apiState, err := api.Open(info, api.DialOpts{}) + c.Assert(err, jc.ErrorIsNil) + + err = apiState.APICall("Admin", 2, "", "Login", struct{}{}, nil) c.Assert(err, gc.ErrorMatches, ".*this version of Juju does not support login from old clients.*") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/agent/agent.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/agent/agent.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/agent/agent.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/agent/agent.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,6 +18,7 @@ "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/state/stateenvirons" + "github.com/juju/juju/state/watcher" ) func init() { @@ -32,8 +33,9 @@ *common.ControllerConfigAPI cloudspec.CloudSpecAPI - st *state.State - auth facade.Authorizer + st *state.State + auth facade.Authorizer + resources facade.Resources } // NewAgentAPIV2 returns an object implementing version 2 of the Agent API @@ -55,6 +57,7 @@ CloudSpecAPI: cloudspec.NewCloudSpec(environConfigGetter.CloudSpec, common.AuthFuncForTag(st.ModelTag())), st: st, auth: auth, + resources: resources, }, nil } @@ -154,3 +157,32 @@ } return pjobs } + +// WatchCredentials watches for changes to the specified credentials. +func (api *AgentAPIV2) WatchCredentials(args params.Entities) (params.NotifyWatchResults, error) { + if !api.auth.AuthModelManager() { + return params.NotifyWatchResults{}, common.ErrPerm + } + + results := params.NotifyWatchResults{ + Results: make([]params.NotifyWatchResult, len(args.Entities)), + } + for i, entity := range args.Entities { + credentialTag, err := names.ParseCloudCredentialTag(entity.Tag) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + watch := api.st.WatchCredential(credentialTag) + // Consume the initial event. Technically, API calls to Watch + // 'transmit' the initial event in the Watch response. But + // NotifyWatchers have no state to transmit. + if _, ok := <-watch.Changes(); ok { + results.Results[i].NotifyWatcherId = api.resources.Register(watch) + } else { + err = watcher.EnsureErr(watch) + results.Results[i].Error = common.ServerError(err) + } + } + return results, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/agent/agent_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/agent/agent_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/agent/agent_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/agent/agent_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,10 +14,12 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/cloud" "github.com/juju/juju/instance" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" + statetesting "github.com/juju/juju/state/testing" coretesting "github.com/juju/juju/testing" ) @@ -238,3 +240,39 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(rFlag, jc.IsFalse) } + +func (s *agentSuite) TestWatchCredentials(c *gc.C) { + authorizer := apiservertesting.FakeAuthorizer{ + Tag: names.NewMachineTag("0"), + EnvironManager: true, + } + api, err := agent.NewAgentAPIV2(s.State, s.resources, authorizer) + c.Assert(err, jc.ErrorIsNil) + tag := names.NewCloudCredentialTag("dummy/fred/default") + result, err := api.WatchCredentials(params.Entities{Entities: []params.Entity{{Tag: tag.String()}}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, params.NotifyWatchResults{Results: []params.NotifyWatchResult{{"1", nil}}}) + c.Assert(s.resources.Count(), gc.Equals, 1) + + w := s.resources.Get("1") + defer statetesting.AssertStop(c, w) + + // Check that the Watch has consumed the initial events ("returned" in the Watch call) + wc := statetesting.NewNotifyWatcherC(c, s.State, w.(state.NotifyWatcher)) + wc.AssertNoChange() + + s.State.UpdateCloudCredential(tag, cloud.NewCredential(cloud.UserPassAuthType, nil)) + wc.AssertOneChange() +} + +func (s *agentSuite) TestWatchAuthError(c *gc.C) { + authorizer := apiservertesting.FakeAuthorizer{ + Tag: names.NewMachineTag("1"), + EnvironManager: false, + } + api, err := agent.NewAgentAPIV2(s.State, s.resources, authorizer) + c.Assert(err, jc.ErrorIsNil) + _, err = api.WatchCredentials(params.Entities{}) + c.Assert(err, gc.ErrorMatches, "permission denied") + c.Assert(s.resources.Count(), gc.Equals, 0) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/agent/model_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/agent/model_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/agent/model_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/agent/model_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -48,5 +48,6 @@ ) c.Assert(err, jc.ErrorIsNil) s.ModelWatcherTest = commontesting.NewModelWatcherTest( - s.api, s.State, s.resources, commontesting.NoSecrets) + s.api, s.State, s.resources, + ) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/allfacades.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/allfacades.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/allfacades.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/allfacades.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,30 +10,31 @@ // TODO(fwereade): this is silly. We should be declaring our full API in *one* // place, not scattering it across packages and depending on magic import lists. import ( - _ "github.com/juju/juju/apiserver/action" + _ "github.com/juju/juju/apiserver/action" // ModelUser Write _ "github.com/juju/juju/apiserver/agent" _ "github.com/juju/juju/apiserver/agenttools" - _ "github.com/juju/juju/apiserver/annotations" - _ "github.com/juju/juju/apiserver/application" + _ "github.com/juju/juju/apiserver/annotations" // ModelUser Write + _ "github.com/juju/juju/apiserver/application" // ModelUser Write _ "github.com/juju/juju/apiserver/applicationscaler" - _ "github.com/juju/juju/apiserver/backups" - _ "github.com/juju/juju/apiserver/block" + _ "github.com/juju/juju/apiserver/backups" // ModelUser Write + _ "github.com/juju/juju/apiserver/block" // ModelUser Write + _ "github.com/juju/juju/apiserver/bundle" _ "github.com/juju/juju/apiserver/charmrevisionupdater" - _ "github.com/juju/juju/apiserver/charms" + _ "github.com/juju/juju/apiserver/charms" // ModelUser Write _ "github.com/juju/juju/apiserver/cleaner" - _ "github.com/juju/juju/apiserver/client" - _ "github.com/juju/juju/apiserver/cloud" - _ "github.com/juju/juju/apiserver/controller" + _ "github.com/juju/juju/apiserver/client" // ModelUser Write + _ "github.com/juju/juju/apiserver/cloud" // ModelUser Read + _ "github.com/juju/juju/apiserver/controller" // ModelUser Admin (although some methods check for read only) _ "github.com/juju/juju/apiserver/deployer" _ "github.com/juju/juju/apiserver/discoverspaces" _ "github.com/juju/juju/apiserver/diskmanager" _ "github.com/juju/juju/apiserver/firewaller" - _ "github.com/juju/juju/apiserver/highavailability" + _ "github.com/juju/juju/apiserver/highavailability" // ModelUser Write _ "github.com/juju/juju/apiserver/hostkeyreporter" - _ "github.com/juju/juju/apiserver/imagemanager" + _ "github.com/juju/juju/apiserver/imagemanager" // ModelUser Write _ "github.com/juju/juju/apiserver/imagemetadata" _ "github.com/juju/juju/apiserver/instancepoller" - _ "github.com/juju/juju/apiserver/keymanager" + _ "github.com/juju/juju/apiserver/keymanager" // ModelUser Write _ "github.com/juju/juju/apiserver/keyupdater" _ "github.com/juju/juju/apiserver/leadership" _ "github.com/juju/juju/apiserver/lifeflag" @@ -41,27 +42,28 @@ _ "github.com/juju/juju/apiserver/logger" _ "github.com/juju/juju/apiserver/machine" _ "github.com/juju/juju/apiserver/machineactions" - _ "github.com/juju/juju/apiserver/machinemanager" + _ "github.com/juju/juju/apiserver/machinemanager" // ModelUser Write + _ "github.com/juju/juju/apiserver/machineundertaker" _ "github.com/juju/juju/apiserver/meterstatus" _ "github.com/juju/juju/apiserver/metricsadder" - _ "github.com/juju/juju/apiserver/metricsdebug" + _ "github.com/juju/juju/apiserver/metricsdebug" // ModelUser Write _ "github.com/juju/juju/apiserver/metricsmanager" _ "github.com/juju/juju/apiserver/migrationflag" _ "github.com/juju/juju/apiserver/migrationmaster" _ "github.com/juju/juju/apiserver/migrationminion" - _ "github.com/juju/juju/apiserver/migrationtarget" - _ "github.com/juju/juju/apiserver/modelconfig" - _ "github.com/juju/juju/apiserver/modelmanager" + _ "github.com/juju/juju/apiserver/migrationtarget" // ModelUser Write + _ "github.com/juju/juju/apiserver/modelconfig" // ModelUser Write + _ "github.com/juju/juju/apiserver/modelmanager" // ModelUser Write _ "github.com/juju/juju/apiserver/provisioner" _ "github.com/juju/juju/apiserver/proxyupdater" _ "github.com/juju/juju/apiserver/reboot" _ "github.com/juju/juju/apiserver/resumer" _ "github.com/juju/juju/apiserver/retrystrategy" _ "github.com/juju/juju/apiserver/singular" - _ "github.com/juju/juju/apiserver/spaces" - _ "github.com/juju/juju/apiserver/sshclient" + _ "github.com/juju/juju/apiserver/spaces" // ModelUser Write + _ "github.com/juju/juju/apiserver/sshclient" // ModelUser Write _ "github.com/juju/juju/apiserver/statushistory" - _ "github.com/juju/juju/apiserver/storage" + _ "github.com/juju/juju/apiserver/storage" // ModelUser Write _ "github.com/juju/juju/apiserver/storageprovisioner" _ "github.com/juju/juju/apiserver/subnets" _ "github.com/juju/juju/apiserver/undertaker" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/annotations/client.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/annotations/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/annotations/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/annotations/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,6 +10,7 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -40,6 +41,7 @@ resources facade.Resources, authorizer facade.Authorizer, ) (*API, error) { + if !authorizer.AuthClient() { return nil, common.ErrPerm } @@ -50,10 +52,40 @@ }, nil } +func (api *API) checkCanRead() error { + canRead, err := api.authorizer.HasPermission(permission.ReadAccess, api.access.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canRead { + return common.ErrPerm + } + return nil +} + +func (api *API) checkCanWrite() error { + canWrite, err := api.authorizer.HasPermission(permission.WriteAccess, api.access.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canWrite { + return common.ErrPerm + } + return nil +} + // Get returns annotations for given entities. // If annotations cannot be retrieved for a given entity, an error is returned. // Each entity is treated independently and, hence, will fail or succeed independently. func (api *API) Get(args params.Entities) params.AnnotationsGetResults { + if err := api.checkCanRead(); err != nil { + result := make([]params.AnnotationsGetResult, len(args.Entities)) + for i := range result { + result[i].Error = params.ErrorResult{Error: common.ServerError(err)} + } + return params.AnnotationsGetResults{Results: result} + } + entityResults := []params.AnnotationsGetResult{} for _, entity := range args.Entities { anEntityResult := params.AnnotationsGetResult{EntityTag: entity.Tag} @@ -69,6 +101,13 @@ // Set stores annotations for given entities func (api *API) Set(args params.AnnotationsSet) params.ErrorResults { + if err := api.checkCanWrite(); err != nil { + errorResults := make([]params.ErrorResult, len(args.Annotations)) + for i := range errorResults { + errorResults[i].Error = common.ServerError(err) + } + return params.ErrorResults{Results: errorResults} + } setErrors := []params.ErrorResult{} for _, entityAnnotation := range args.Annotations { err := api.setEntityAnnotations(entityAnnotation.EntityTag, entityAnnotation.Annotations) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/annotations/client_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/annotations/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/annotations/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/annotations/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,7 +22,7 @@ // TODO(anastasiamac) mock to remove JujuConnSuite jujutesting.JujuConnSuite - annotationsApi *annotations.API + annotationsAPI *annotations.API authorizer apiservertesting.FakeAuthorizer } @@ -34,7 +34,7 @@ Tag: s.AdminUserTag(c), } var err error - s.annotationsApi, err = annotations.NewAPI(s.State, nil, s.authorizer) + s.annotationsAPI, err = annotations.NewAPI(s.State, nil, s.authorizer) c.Assert(err, jc.ErrorIsNil) } @@ -79,7 +79,7 @@ func (s *annotationSuite) assertAnnotationsRemoval(c *gc.C, tag names.Tag) { entity := tag.String() entities := params.Entities{[]params.Entity{{entity}}} - ann := s.annotationsApi.Get(entities) + ann := s.annotationsAPI.Get(entities) c.Assert(ann.Results, gc.HasLen, 1) aResult := ann.Results[0] @@ -92,11 +92,11 @@ entities := params.Entities{[]params.Entity{{entity}}} annotations := map[string]string{"mykey": "myvalue"} - setResult := s.annotationsApi.Set( + setResult := s.annotationsAPI.Set( params.AnnotationsSet{Annotations: constructSetParameters([]string{entity}, annotations)}) c.Assert(setResult.OneError().Error(), gc.Matches, ".*permission denied.*") - got := s.annotationsApi.Get(entities) + got := s.annotationsAPI.Get(entities) c.Assert(got.Results, gc.HasLen, 1) aResult := got.Results[0] @@ -161,11 +161,11 @@ entities := params.Entities{[]params.Entity{entity}} annotations := map[string]string{"mykey": "myvalue"} - setResult := s.annotationsApi.Set( + setResult := s.annotationsAPI.Set( params.AnnotationsSet{Annotations: constructSetParameters([]string{tag}, annotations)}) c.Assert(setResult.OneError().Error(), gc.Matches, ".*does not support annotations.*") - got := s.annotationsApi.Get(entities) + got := s.annotationsAPI.Get(entities) c.Assert(got.Results, gc.HasLen, 1) aResult := got.Results[0] @@ -201,7 +201,7 @@ } annotations := map[string]string{"mykey": "myvalue"} - setResult := s.annotationsApi.Set( + setResult := s.annotationsAPI.Set( params.AnnotationsSet{Annotations: constructSetParameters(entities, annotations)}) c.Assert(setResult.Results, gc.HasLen, 1) @@ -210,7 +210,7 @@ c.Assert(oneError, gc.Matches, fmt.Sprintf(".*%q.*", rTag)) c.Assert(oneError, gc.Matches, ".*does not support annotations.*") - got := s.annotationsApi.Get(params.Entities{[]params.Entity{ + got := s.annotationsAPI.Get(params.Entities{[]params.Entity{ {rEntity}, {sEntity}}}) c.Assert(got.Results, gc.HasLen, 2) @@ -251,7 +251,7 @@ entities []string, initialAnnotations map[string]string) { if initialAnnotations != nil { - initialResult := s.annotationsApi.Set( + initialResult := s.annotationsAPI.Set( params.AnnotationsSet{ Annotations: constructSetParameters(entities, initialAnnotations)}) c.Assert(initialResult.Combine(), jc.ErrorIsNil) @@ -262,7 +262,7 @@ entities []string, annotations map[string]string, expectedError string) { - setResult := s.annotationsApi.Set( + setResult := s.annotationsAPI.Set( params.AnnotationsSet{Annotations: constructSetParameters(entities, annotations)}) if expectedError != "" { c.Assert(setResult.OneError().Error(), gc.Matches, expectedError) @@ -275,7 +275,7 @@ entities params.Entities, entity string, expected map[string]string) params.AnnotationsGetResult { - got := s.annotationsApi.Get(entities) + got := s.annotationsAPI.Get(entities) c.Assert(got.Results, gc.HasLen, 1) aResult := got.Results[0] @@ -291,7 +291,7 @@ for key := range aResult.Annotations { cleanup[key] = "" } - cleanupResult := s.annotationsApi.Set( + cleanupResult := s.annotationsAPI.Set( params.AnnotationsSet{Annotations: constructSetParameters(entities, cleanup)}) c.Assert(cleanupResult.Combine(), jc.ErrorIsNil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/annotations/state.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/annotations/state.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/annotations/state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/annotations/state.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,6 +13,7 @@ FindEntity(tag names.Tag) (state.Entity, error) GetAnnotations(entity state.GlobalEntity) (map[string]string, error) SetAnnotations(entity state.GlobalEntity, annotations map[string]string) error + ModelTag() names.ModelTag } type stateShim struct { @@ -30,3 +31,7 @@ func (s stateShim) SetAnnotations(entity state.GlobalEntity, annotations map[string]string) error { return s.state.SetAnnotations(entity, annotations) } + +func (s stateShim) ModelTag() names.ModelTag { + return s.state.ModelTag() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/apiserver.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/apiserver.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/apiserver.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/apiserver.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,16 +11,20 @@ "strings" "sync" "sync/atomic" - "time" "github.com/bmizerany/pat" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils" + "github.com/juju/utils/clock" + "golang.org/x/crypto/acme" + "golang.org/x/crypto/acme/autocert" "golang.org/x/net/websocket" "gopkg.in/juju/names.v2" - "launchpad.net/tomb" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/tomb.v1" + "github.com/juju/juju/apiserver/authentication" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/common/apihttp" "github.com/juju/juju/apiserver/observer" @@ -39,6 +43,8 @@ // Server holds the server side of the API. type Server struct { tomb tomb.Tomb + clock clock.Clock + pingClock clock.Clock wg sync.WaitGroup state *state.State statePool *state.StatePool @@ -48,12 +54,24 @@ logDir string limiter utils.Limiter validator LoginValidator - adminApiFactories map[int]adminApiFactory + adminAPIFactories map[int]adminAPIFactory modelUUID string authCtxt *authContext lastConnectionID uint64 newObserver observer.ObserverFactory connCount int64 + certChanged <-chan params.StateServingInfo + tlsConfig *tls.Config + allowModelAccess bool + + // mu guards the fields below it. + mu sync.Mutex + + // cert holds the current certificate used for tls.Config. + cert *tls.Certificate + + // certDNSNames holds the DNS names associated with cert. + certDNSNames []string } // LoginValidator functions are used to decide whether login requests @@ -63,13 +81,30 @@ // ServerConfig holds parameters required to set up an API server. type ServerConfig struct { - Cert []byte - Key []byte + Clock clock.Clock + PingClock clock.Clock + Cert string + Key string Tag names.Tag DataDir string LogDir string Validator LoginValidator - CertChanged chan params.StateServingInfo + CertChanged <-chan params.StateServingInfo + + // AutocertDNSName holds the DNS name for which + // official TLS certificates will be obtained. If this is + // empty, no certificates will be requested. + AutocertDNSName string + + // AutocertURL holds the URL from which official + // TLS certificates will be obtained. By default, + // acme.LetsEncryptURL will be used. + AutocertURL string + + // AllowModelAccess holds whether users will be allowed to + // access models that they have access rights to even when + // they don't have access to the controller. + AllowModelAccess bool // NewObserver is a function which will return an observer. This // is used per-connection to instantiate a new observer to be @@ -81,6 +116,9 @@ } func (c *ServerConfig) Validate() error { + if c.Clock == nil { + return errors.NotValidf("missing Clock") + } if c.NewObserver == nil { return errors.NotValidf("missing NewObserver") } @@ -88,91 +126,11 @@ return nil } -// changeCertListener wraps a TLS net.Listener. -// It allows connection handshakes to be -// blocked while the TLS certificate is updated. -type changeCertListener struct { - net.Listener - tomb tomb.Tomb - - // A mutex used to block accept operations. - m sync.Mutex - - // A channel used to pass in new certificate information. - certChanged <-chan params.StateServingInfo - - // The config to update with any new certificate. - config *tls.Config -} - -func newChangeCertListener(lis net.Listener, certChanged <-chan params.StateServingInfo, config *tls.Config) *changeCertListener { - cl := &changeCertListener{ - Listener: lis, - certChanged: certChanged, - config: config, - } - go func() { - defer cl.tomb.Done() - cl.tomb.Kill(cl.processCertChanges()) - }() - return cl -} - -// Accept waits for and returns the next connection to the listener. -func (cl *changeCertListener) Accept() (net.Conn, error) { - conn, err := cl.Listener.Accept() - if err != nil { - return nil, err - } - cl.m.Lock() - defer cl.m.Unlock() - - // make a copy of cl.config so that update certificate does not mutate - // the config passed to the tls.Server for this conn. - config := *cl.config - return tls.Server(conn, &config), nil -} - -// Close closes the listener. -func (cl *changeCertListener) Close() error { - cl.tomb.Kill(nil) - return cl.Listener.Close() -} - -// processCertChanges receives new certificate information and -// calls a method to update the listener's certificate. -func (cl *changeCertListener) processCertChanges() error { - for { - select { - case info := <-cl.certChanged: - if info.Cert != "" { - cl.updateCertificate([]byte(info.Cert), []byte(info.PrivateKey)) - } - case <-cl.tomb.Dying(): - return tomb.ErrDying - } - } -} - -// updateCertificate generates a new TLS certificate and assigns it -// to the TLS listener. -func (cl *changeCertListener) updateCertificate(cert, key []byte) { - cl.m.Lock() - defer cl.m.Unlock() - if tlsCert, err := tls.X509KeyPair(cert, key); err != nil { - logger.Errorf("cannot create new TLS certificate: %v", err) - } else { - logger.Infof("updating api server certificate") - x509Cert, err := x509.ParseCertificate(tlsCert.Certificate[0]) - if err == nil { - var addr []string - for _, ip := range x509Cert.IPAddresses { - addr = append(addr, ip.String()) - } - logger.Infof("new certificate addresses: %v", strings.Join(addr, ", ")) - } - cl.config.Certificates = []tls.Certificate{tlsCert} +func (c *ServerConfig) pingClock() clock.Clock { + if c.PingClock == nil { + return c.Clock } + return c.PingClock } // NewServer serves the given state by accepting requests on the given @@ -190,11 +148,7 @@ // server needs to run before mongo upgrades have happened and // any state manipulation may be be relying on features of the // database added by upgrades. Here be dragons. - l, ok := lis.(*net.TCPListener) - if !ok { - return nil, errors.Errorf("listener is not of type *net.TCPListener: %T", lis) - } - srv, err := newServer(s, l, cfg) + srv, err := newServer(s, lis, cfg) if err != nil { // There is no running server around to close the listener. lis.Close() @@ -203,43 +157,84 @@ return srv, nil } -func newServer(s *state.State, lis *net.TCPListener, cfg ServerConfig) (_ *Server, err error) { - tlsCert, err := tls.X509KeyPair(cfg.Cert, cfg.Key) - if err != nil { - return nil, err - } - // TODO(rog) check that *srvRoot is a valid type for using - // as an RPC server. - tlsConfig := utils.SecureTLSConfig() - tlsConfig.Certificates = []tls.Certificate{tlsCert} - +func newServer(s *state.State, lis net.Listener, cfg ServerConfig) (_ *Server, err error) { stPool := cfg.StatePool if stPool == nil { stPool = state.NewStatePool(s) } srv := &Server{ + clock: cfg.Clock, + pingClock: cfg.pingClock(), + lis: lis, newObserver: cfg.NewObserver, state: s, statePool: stPool, - lis: newChangeCertListener(lis, cfg.CertChanged, tlsConfig), tag: cfg.Tag, dataDir: cfg.DataDir, logDir: cfg.LogDir, limiter: utils.NewLimiter(loginRateLimit), validator: cfg.Validator, - adminApiFactories: map[int]adminApiFactory{ - 3: newAdminApiV3, + adminAPIFactories: map[int]adminAPIFactory{ + 3: newAdminAPIV3, }, + certChanged: cfg.CertChanged, + allowModelAccess: cfg.AllowModelAccess, } + + srv.tlsConfig = srv.newTLSConfig(cfg) + srv.lis = tls.NewListener(lis, srv.tlsConfig) + srv.authCtxt, err = newAuthContext(s) if err != nil { return nil, errors.Trace(err) } + if err := srv.updateCertificate(cfg.Cert, cfg.Key); err != nil { + return nil, errors.Annotatef(err, "cannot set initial certificate") + } go srv.run() return srv, nil } +func (srv *Server) newTLSConfig(cfg ServerConfig) *tls.Config { + tlsConfig := utils.SecureTLSConfig() + if cfg.AutocertDNSName == "" { + // No official DNS name, no certificate. + tlsConfig.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + cert, _ := srv.localCertificate(clientHello.ServerName) + return cert, nil + } + return tlsConfig + } + m := autocert.Manager{ + Prompt: autocert.AcceptTOS, + Cache: srv.state.AutocertCache(), + HostPolicy: autocert.HostWhitelist(cfg.AutocertDNSName), + } + if cfg.AutocertURL != "" { + m.Client = &acme.Client{ + DirectoryURL: cfg.AutocertURL, + } + } + tlsConfig.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + logger.Infof("getting certificate for server name %q", clientHello.ServerName) + // Get the locally created certificate and whether it's appropriate + // for the SNI name. If not, we'll try to get an acme cert and + // fall back to the local certificate if that fails. + cert, shouldUse := srv.localCertificate(clientHello.ServerName) + if shouldUse { + return cert, nil + } + acmeCert, err := m.GetCertificate(clientHello) + if err == nil { + return acmeCert, nil + } + logger.Errorf("cannot get autocert certificate for %q: %v", clientHello.ServerName, err) + return cert, nil + } + return tlsConfig +} + func (srv *Server) ConnectionCount() int64 { return atomic.LoadInt64(&srv.connCount) } @@ -274,8 +269,11 @@ err := srv.lis.Close() logger.Infof("closed listening socket %q with final error: %v", addr, err) - srv.state.HackLeadership() // Break deadlocks caused by BlockUntil... calls. - srv.wg.Wait() // wait for any outstanding requests to complete. + // Break deadlocks caused by leadership BlockUntil... calls. + srv.statePool.KillWorkers() + srv.state.KillWorkers() + + srv.wg.Wait() // wait for any outstanding requests to complete. srv.tomb.Done() srv.statePool.Close() srv.state.Close() @@ -287,6 +285,24 @@ srv.tomb.Kill(srv.mongoPinger()) }() + srv.wg.Add(1) + go func() { + defer srv.wg.Done() + srv.tomb.Kill(srv.expireLocalLoginInteractions()) + }() + + srv.wg.Add(1) + go func() { + defer srv.wg.Done() + srv.tomb.Kill(srv.processCertChanges()) + }() + + srv.wg.Add(1) + go func() { + defer srv.wg.Done() + srv.tomb.Kill(srv.processModelRemovals()) + }() + // for pat based handlers, they are matched in-order of being // registered, first match wins. So more specific ones have to be // registered first. @@ -296,10 +312,13 @@ } go func() { - addr := srv.lis.Addr() // not valid after addr closed - logger.Debugf("Starting API http server on address %q", addr) - err := http.Serve(srv.lis, mux) - // normally logging an error at debug level would be grounds for a beating, + logger.Debugf("Starting API http server on address %q", srv.lis.Addr()) + httpSrv := &http.Server{ + Handler: mux, + TLSConfig: srv.tlsConfig, + } + err := httpSrv.Serve(srv.lis) + // Normally logging an error at debug level would be grounds for a beating, // however in this case the error is *expected* to be non nil, and does not // affect the operation of the apiserver, but for completeness log it anyway. logger.Debugf("API http server exited, final error was: %v", err) @@ -343,11 +362,16 @@ add("/model/:modeluuid/logsink", logSinkHandler) add("/model/:modeluuid/logstream", logStreamHandler) add("/model/:modeluuid/log", debugLogHandler) - add("/model/:modeluuid/charms", - &charmsHandler{ - ctxt: httpCtxt, - dataDir: srv.dataDir}, - ) + + charmsHandler := &charmsHandler{ + ctxt: httpCtxt, + dataDir: srv.dataDir, + } + charmsServer := &CharmsHTTPHandler{ + PostHandler: charmsHandler.ServePost, + GetHandler: charmsHandler.ServeGet, + } + add("/model/:modeluuid/charms", charmsServer) add("/model/:modeluuid/tools", &toolsUploadHandler{ ctxt: httpCtxt, @@ -376,12 +400,7 @@ // For backwards compatibility we register all the old paths add("/log", debugLogHandler) - add("/charms", - &charmsHandler{ - ctxt: httpCtxt, - dataDir: srv.dataDir, - }, - ) + add("/charms", charmsServer) add("/tools", &toolsUploadHandler{ ctxt: httpCtxt, @@ -394,15 +413,52 @@ ) add("/register", ®isterUserHandler{ - httpCtxt, - srv.authCtxt.userAuth.CreateLocalLoginMacaroon, + ctxt: httpCtxt, }, ) + add("/api", mainAPIHandler) + // Serve the API at / (only) for backward compatiblity. Note that the + // pat muxer special-cases / so that it does not serve all + // possible endpoints, but only / itself. add("/", mainAPIHandler) + // Add HTTP handlers for local-user macaroon authentication. + localLoginHandlers := &localLoginHandlers{srv.authCtxt, srv.state} + dischargeMux := http.NewServeMux() + httpbakery.AddDischargeHandler( + dischargeMux, + localUserIdentityLocationPath, + localLoginHandlers.authCtxt.localUserThirdPartyBakeryService, + localLoginHandlers.checkThirdPartyCaveat, + ) + dischargeMux.Handle( + localUserIdentityLocationPath+"/login", + makeHandler(handleJSON(localLoginHandlers.serveLogin)), + ) + dischargeMux.Handle( + localUserIdentityLocationPath+"/wait", + makeHandler(handleJSON(localLoginHandlers.serveWait)), + ) + add(localUserIdentityLocationPath+"/discharge", dischargeMux) + add(localUserIdentityLocationPath+"/publickey", dischargeMux) + add(localUserIdentityLocationPath+"/login", dischargeMux) + add(localUserIdentityLocationPath+"/wait", dischargeMux) + return endpoints } +func (srv *Server) expireLocalLoginInteractions() error { + for { + select { + case <-srv.tomb.Dying(): + return tomb.ErrDying + case <-srv.clock.After(authentication.LocalLoginInteractionTimeout): + now := srv.authCtxt.clock.Now() + srv.authCtxt.localUserInteractions.Expire(now) + } + } +} + func (srv *Server) newHandlerArgs(spec apihttp.HandlerConstraints) apihttp.NewHandlerArgs { ctxt := httpContext{ srv: srv, @@ -492,7 +548,7 @@ Handler: func(conn *websocket.Conn) { modelUUID := req.URL.Query().Get(":modeluuid") logger.Tracef("got a request for model %q", modelUUID) - if err := srv.serveConn(conn, modelUUID, apiObserver); err != nil { + if err := srv.serveConn(conn, modelUUID, apiObserver, req.Host); err != nil { logger.Errorf("error serving RPCs: %v", err) } }, @@ -500,30 +556,11 @@ wsServer.ServeHTTP(w, req) } -func (srv *Server) serveConn(wsConn *websocket.Conn, modelUUID string, apiObserver observer.Observer) error { +func (srv *Server) serveConn(wsConn *websocket.Conn, modelUUID string, apiObserver observer.Observer, host string) error { codec := jsoncodec.NewWebsocket(wsConn) conn := rpc.NewConn(codec, apiObserver) - h, err := srv.newAPIHandler(conn, modelUUID) - if err != nil { - conn.ServeFinder(&errRoot{err}, serverError) - } else { - adminApis := make(map[int]interface{}) - for apiVersion, factory := range srv.adminApiFactories { - adminApis[apiVersion] = factory(srv, h, apiObserver) - } - conn.ServeFinder(newAnonRoot(h, adminApis), serverError) - } - conn.Start() - select { - case <-conn.Dead(): - case <-srv.tomb.Dying(): - } - return conn.Close() -} - -func (srv *Server) newAPIHandler(conn *rpc.Conn, modelUUID string) (*apiHandler, error) { // Note that we don't overwrite modelUUID here because // newAPIHandler treats an empty modelUUID as signifying // the API version used. @@ -531,33 +568,123 @@ statePool: srv.statePool, modelUUID: modelUUID, }) - if err != nil { - return nil, errors.Trace(err) + var ( + st *state.State + h *apiHandler + ) + if err == nil { + st, err = srv.statePool.Get(resolvedModelUUID) } - st, err := srv.statePool.Get(resolvedModelUUID) + + if err == nil { + defer func() { + err := srv.statePool.Release(resolvedModelUUID) + if err != nil { + logger.Errorf("error releasing %v back into the state pool:", err) + } + }() + h, err = newAPIHandler(srv, st, conn, modelUUID, host) + } + if err != nil { - return nil, errors.Trace(err) + conn.ServeRoot(&errRoot{errors.Trace(err)}, serverError) + } else { + adminAPIs := make(map[int]interface{}) + for apiVersion, factory := range srv.adminAPIFactories { + adminAPIs[apiVersion] = factory(srv, h, apiObserver) + } + conn.ServeRoot(newAnonRoot(h, adminAPIs), serverError) } - return newApiHandler(srv, st, conn, modelUUID) + conn.Start() + select { + case <-conn.Dead(): + case <-srv.tomb.Dying(): + } + return conn.Close() } func (srv *Server) mongoPinger() error { - // TODO(fwereade): 2016-03-17 lp:1558657 - timer := time.NewTimer(0) session := srv.state.MongoSession().Copy() defer session.Close() for { + if err := session.Ping(); err != nil { + logger.Infof("got error pinging mongo: %v", err) + return errors.Annotate(err, "error pinging mongo") + } select { - case <-timer.C: + case <-srv.clock.After(mongoPingInterval): case <-srv.tomb.Dying(): return tomb.ErrDying } - if err := session.Ping(); err != nil { - logger.Infof("got error pinging mongo: %v", err) - return errors.Annotate(err, "error pinging mongo") + } +} + +// localCertificate returns the local server certificate and reports +// whether it should be used to serve a connection addressed to the +// given server name. +func (srv *Server) localCertificate(serverName string) (*tls.Certificate, bool) { + srv.mu.Lock() + defer srv.mu.Unlock() + if net.ParseIP(serverName) != nil { + // IP address connections always use the local certificate. + return srv.cert, true + } + if !strings.Contains(serverName, ".") { + // If the server name doesn't contain a period there's no + // way we can obtain a certificate for it. + // This applies to the common case where "juju-apiserver" is + // used as the server name. + return srv.cert, true + } + // Perhaps the server name is explicitly mentioned by the server certificate. + for _, name := range srv.certDNSNames { + if name == serverName { + return srv.cert, true } - timer.Reset(mongoPingInterval) } + return srv.cert, false +} + +// processCertChanges receives new certificate information and +// calls a method to update the listener's certificate. +func (srv *Server) processCertChanges() error { + for { + select { + case info := <-srv.certChanged: + if info.Cert == "" { + break + } + logger.Infof("received API server certificate") + if err := srv.updateCertificate(info.Cert, info.PrivateKey); err != nil { + logger.Errorf("cannot update certificate: %v", err) + } + case <-srv.tomb.Dying(): + return tomb.ErrDying + } + } +} + +// updateCertificate updates the current CA certificate and key +// from the given cert and key. +func (srv *Server) updateCertificate(cert, key string) error { + srv.mu.Lock() + defer srv.mu.Unlock() + tlsCert, err := tls.X509KeyPair([]byte(cert), []byte(key)) + if err != nil { + return errors.Annotatef(err, "cannot create new TLS certificate") + } + x509Cert, err := x509.ParseCertificate(tlsCert.Certificate[0]) + if err != nil { + return errors.Annotatef(err, "parsing x509 cert") + } + var addr []string + for _, ip := range x509Cert.IPAddresses { + addr = append(addr, ip.String()) + } + logger.Infof("new certificate addresses: %v", strings.Join(addr, ", ")) + srv.cert = &tlsCert + srv.certDNSNames = x509Cert.DNSNames + return nil } func serverError(err error) error { @@ -566,3 +693,35 @@ } return nil } + +func (srv *Server) processModelRemovals() error { + w := srv.state.WatchModels() + defer w.Stop() + for { + select { + case <-srv.tomb.Dying(): + return tomb.ErrDying + case modelUUIDs := <-w.Changes(): + for _, modelUUID := range modelUUIDs { + model, err := srv.state.GetModel(names.NewModelTag(modelUUID)) + gone := errors.IsNotFound(err) + dead := err == nil && model.Life() == state.Dead + if err != nil && !gone { + return errors.Trace(err) + } + if !dead && !gone { + continue + } + + logger.Debugf("removing model %v from the state pool", modelUUID) + // Model's gone away - ensure that it gets removed + // from from the state pool once people are finished + // with it. + err = srv.statePool.Remove(modelUUID) + if err != nil { + return errors.Trace(err) + } + } + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/apiserver_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/apiserver_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/apiserver_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/apiserver_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,129 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver_test + +import ( + "fmt" + "net" + + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + "github.com/juju/utils/clock" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/api" + "github.com/juju/juju/apiserver" + "github.com/juju/juju/apiserver/observer" + "github.com/juju/juju/apiserver/observer/fakeobserver" + "github.com/juju/juju/state" + statetesting "github.com/juju/juju/state/testing" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/workertest" +) + +const ( + ownerPassword = "very very secret" +) + +type apiserverBaseSuite struct { + statetesting.StateSuite +} + +func (s *apiserverBaseSuite) SetUpTest(c *gc.C) { + s.StateSuite.SetUpTest(c) + u, err := s.State.User(s.Owner) + c.Assert(err, jc.ErrorIsNil) + err = u.SetPassword(ownerPassword) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *apiserverBaseSuite) sampleConfig(c *gc.C) apiserver.ServerConfig { + return apiserver.ServerConfig{ + Clock: clock.WallClock, + Cert: coretesting.ServerCert, + Key: coretesting.ServerKey, + Tag: names.NewMachineTag("0"), + LogDir: c.MkDir(), + NewObserver: func() observer.Observer { return &fakeobserver.Instance{} }, + AutocertURL: "https://0.1.2.3/no-autocert-here", + } +} + +func (s *apiserverBaseSuite) newServerNoCleanup(c *gc.C, config apiserver.ServerConfig) *apiserver.Server { + listener, err := net.Listen("tcp", ":0") + c.Assert(err, jc.ErrorIsNil) + srv, err := apiserver.NewServer(s.State, listener, config) + c.Assert(err, jc.ErrorIsNil) + return srv +} + +func (s *apiserverBaseSuite) newServer(c *gc.C, config apiserver.ServerConfig) *apiserver.Server { + srv := s.newServerNoCleanup(c, config) + s.AddCleanup(func(c *gc.C) { + workertest.CleanKill(c, srv) + }) + return srv +} + +func (s *apiserverBaseSuite) newServerDirtyKill(c *gc.C, config apiserver.ServerConfig) *apiserver.Server { + srv := s.newServerNoCleanup(c, config) + s.AddCleanup(func(c *gc.C) { + workertest.DirtyKill(c, srv) + }) + return srv +} + +// APIInfo returns an info struct that has the server's address and ca-cert +// populated. +func (s *apiserverBaseSuite) APIInfo(server *apiserver.Server) *api.Info { + address := fmt.Sprintf("localhost:%d", server.Addr().Port) + return &api.Info{ + Addrs: []string{address}, + CACert: coretesting.CACert, + } +} + +func (s *apiserverBaseSuite) openAPIAs(c *gc.C, srv *apiserver.Server, tag names.Tag, password, nonce string, controllerOnly bool) api.Connection { + apiInfo := s.APIInfo(srv) + apiInfo.Tag = tag + apiInfo.Password = password + apiInfo.Nonce = nonce + if !controllerOnly { + apiInfo.ModelTag = s.State.ModelTag() + } + conn, err := api.Open(apiInfo, api.DialOpts{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(conn, gc.NotNil) + s.AddCleanup(func(c *gc.C) { + conn.Close() + }) + return conn +} + +// OpenAPIAsNewMachine creates a new client connection logging in as the +// controller owner. The returned api.Connection should not be closed by the +// caller as a cleanup function has been registered to do that. +func (s *apiserverBaseSuite) OpenAPIAsAdmin(c *gc.C, srv *apiserver.Server) api.Connection { + return s.openAPIAs(c, srv, s.Owner, ownerPassword, "", false) +} + +// OpenAPIAsNewMachine creates a new machine entry that lives in system state, +// and then uses that to open the API. The returned api.Connection should not be +// closed by the caller as a cleanup function has been registered to do that. +// The machine will run the supplied jobs; if none are given, JobHostUnits is assumed. +func (s *apiserverBaseSuite) OpenAPIAsNewMachine(c *gc.C, srv *apiserver.Server, jobs ...state.MachineJob) (api.Connection, *state.Machine) { + if len(jobs) == 0 { + jobs = []state.MachineJob{state.JobHostUnits} + } + machine, err := s.State.AddMachine("quantal", jobs...) + c.Assert(err, jc.ErrorIsNil) + password, err := utils.RandomPassword() + c.Assert(err, jc.ErrorIsNil) + err = machine.SetPassword(password) + c.Assert(err, jc.ErrorIsNil) + err = machine.SetProvisioned("foo", "fake_nonce", nil) + c.Assert(err, jc.ErrorIsNil) + return s.openAPIAs(c, srv, machine.Tag(), password, "fake_nonce", false), machine +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/application.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/application.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/application.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/application.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,6 +18,7 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/instance" jjj "github.com/juju/juju/juju" + "github.com/juju/juju/permission" "github.com/juju/juju/state" statestorage "github.com/juju/juju/state/storage" ) @@ -29,41 +30,88 @@ ) func init() { - common.RegisterStandardFacade("Application", 1, NewAPI) -} + common.RegisterStandardFacade("Application", 1, newAPI) -// Application defines the methods on the application API end point. -type Application interface { - SetMetricCredentials(args params.ApplicationMetricCredentials) (params.ErrorResults, error) + // Facade version 2 adds support for the ConfigSettings + // and StorageConstraints fields in SetCharm. + common.RegisterStandardFacade("Application", 2, newAPI) } // API implements the application interface and is the concrete // implementation of the api end point. type API struct { - check *common.BlockChecker - state *state.State + backend Backend authorizer facade.Authorizer + check BlockChecker + + // TODO(axw) stateCharm only exists because I ran out + // of time unwinding all of the tendrils of state. We + // should pass a charm.Charm and charm.URL back into + // state wherever we pass in a state.Charm currently. + stateCharm func(Charm) *state.Charm } -// NewAPI returns a new application API facade. -func NewAPI( +func newAPI( st *state.State, resources facade.Resources, authorizer facade.Authorizer, ) (*API, error) { + backend := NewStateBackend(st) + blockChecker := common.NewBlockChecker(st) + stateCharm := CharmToStateCharm + return NewAPI( + backend, + authorizer, + blockChecker, + stateCharm, + ) +} + +// NewAPI returns a new application API facade. +func NewAPI( + backend Backend, + authorizer facade.Authorizer, + blockChecker BlockChecker, + stateCharm func(Charm) *state.Charm, +) (*API, error) { if !authorizer.AuthClient() { return nil, common.ErrPerm } - return &API{ - state: st, + backend: backend, authorizer: authorizer, - check: common.NewBlockChecker(st), + check: blockChecker, + stateCharm: stateCharm, }, nil } +func (api *API) checkCanRead() error { + canRead, err := api.authorizer.HasPermission(permission.ReadAccess, api.backend.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canRead { + return common.ErrPerm + } + return nil +} + +func (api *API) checkCanWrite() error { + canWrite, err := api.authorizer.HasPermission(permission.WriteAccess, api.backend.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canWrite { + return common.ErrPerm + } + return nil +} + // SetMetricCredentials sets credentials on the application. func (api *API) SetMetricCredentials(args params.ApplicationMetricCredentials) (params.ErrorResults, error) { + if err := api.checkCanWrite(); err != nil { + return params.ErrorResults{}, errors.Trace(err) + } result := params.ErrorResults{ Results: make([]params.ErrorResult, len(args.Creds)), } @@ -71,7 +119,7 @@ return result, nil } for i, a := range args.Creds { - application, err := api.state.Application(a.ApplicationName) + application, err := api.backend.Application(a.ApplicationName) if err != nil { result.Results[i].Error = common.ServerError(err) continue @@ -87,6 +135,9 @@ // Deploy fetches the charms from the charm store and deploys them // using the specified placement directives. func (api *API) Deploy(args params.ApplicationsDeploy) (params.ErrorResults, error) { + if err := api.checkCanWrite(); err != nil { + return params.ErrorResults{}, errors.Trace(err) + } result := params.ErrorResults{ Results: make([]params.ErrorResult, len(args.Applications)), } @@ -94,7 +145,7 @@ return result, errors.Trace(err) } for i, arg := range args.Applications { - err := deployApplication(api.state, arg) + err := deployApplication(api.backend, api.stateCharm, arg) result.Results[i].Error = common.ServerError(err) } return result, nil @@ -103,8 +154,12 @@ // deployApplication fetches the charm from the charm store and deploys it. // The logic has been factored out into a common function which is called by // both the legacy API on the client facade, as well as the new application facade. -func deployApplication(st *state.State, args params.ApplicationDeploy) error { - curl, err := charm.ParseURL(args.CharmUrl) +func deployApplication( + backend Backend, + stateCharm func(Charm) *state.Charm, + args params.ApplicationDeploy, +) error { + curl, err := charm.ParseURL(args.CharmURL) if err != nil { return errors.Trace(err) } @@ -117,14 +172,14 @@ if p.Scope != instance.MachineScope { continue } - _, err = st.Machine(p.Directive) + _, err = backend.Machine(p.Directive) if err != nil { return errors.Annotatef(err, `cannot deploy "%v" to machine %v`, args.ApplicationName, p.Directive) } } // Try to find the charm URL in state first. - ch, err := st.Charm(curl) + ch, err := backend.Charm(curl) if err != nil { return errors.Trace(err) } @@ -138,7 +193,7 @@ settings, err = ch.Config().ParseSettingsYAML([]byte(args.ConfigYAML), args.ApplicationName) } else if len(args.Config) > 0 { // Parse config in a compatible way (see function comment). - settings, err = parseSettingsCompatible(ch, args.Config) + settings, err = parseSettingsCompatible(ch.Config(), args.Config) } if err != nil { return errors.Trace(err) @@ -146,11 +201,11 @@ channel := csparams.Channel(args.Channel) - _, err = jjj.DeployApplication(st, + _, err = jjj.DeployApplication(backend, jjj.DeployApplicationParams{ ApplicationName: args.ApplicationName, Series: args.Series, - Charm: ch, + Charm: stateCharm(ch), Channel: channel, NumUnits: args.NumUnits, ConfigSettings: settings, @@ -165,13 +220,13 @@ // ApplicationSetSettingsStrings updates the settings for the given application, // taking the configuration from a map of strings. -func ApplicationSetSettingsStrings(application *state.Application, settings map[string]string) error { +func ApplicationSetSettingsStrings(application Application, settings map[string]string) error { ch, _, err := application.Charm() if err != nil { return errors.Trace(err) } // Parse config in a compatible way (see function comment). - changes, err := parseSettingsCompatible(ch, settings) + changes, err := parseSettingsCompatible(ch.Config(), settings) if err != nil { return errors.Trace(err) } @@ -184,7 +239,7 @@ // string caused it to reset to the default value. We now allow // empty strings as actual values, but we want to preserve the API // behavior. -func parseSettingsCompatible(ch *state.Charm, settings map[string]string) (charm.Settings, error) { +func parseSettingsCompatible(charmConfig *charm.Config, settings map[string]string) (charm.Settings, error) { setSettings := map[string]string{} unsetSettings := charm.Settings{} // Split settings into those which set and those which unset a value. @@ -196,14 +251,14 @@ setSettings[name] = value } // Validate the settings. - changes, err := ch.Config().ParseSettingsStrings(setSettings) + changes, err := charmConfig.ParseSettingsStrings(setSettings) if err != nil { - return nil, err + return nil, errors.Trace(err) } // Validate the unsettings and merge them into the changes. - unsetSettings, err = ch.Config().ValidateSettings(unsetSettings) + unsetSettings, err = charmConfig.ValidateSettings(unsetSettings) if err != nil { - return nil, err + return nil, errors.Trace(err) } for name := range unsetSettings { changes[name] = nil @@ -215,79 +270,143 @@ // minimum number of units, settings and constraints. // All parameters in params.ApplicationUpdate except the application name are optional. func (api *API) Update(args params.ApplicationUpdate) error { - if !args.ForceCharmUrl { + if err := api.checkCanWrite(); err != nil { + return err + } + if !args.ForceCharmURL { if err := api.check.ChangeAllowed(); err != nil { return errors.Trace(err) } } - svc, err := api.state.Application(args.ApplicationName) + app, err := api.backend.Application(args.ApplicationName) if err != nil { return errors.Trace(err) } // Set the charm for the given application. - if args.CharmUrl != "" { + if args.CharmURL != "" { // For now we do not support changing the channel through Update(). // TODO(ericsnow) Support it? - channel := svc.Channel() - if err = api.applicationSetCharm(svc, args.CharmUrl, channel, args.ForceSeries, args.ForceCharmUrl, nil); err != nil { + channel := app.Channel() + if err = api.applicationSetCharm( + args.ApplicationName, + app, + args.CharmURL, + channel, + nil, // charm settings (strings map) + "", // charm settings (YAML) + args.ForceSeries, + args.ForceCharmURL, + nil, // resource IDs + nil, // storage constraints + ); err != nil { return errors.Trace(err) } } // Set the minimum number of units for the given application. if args.MinUnits != nil { - if err = svc.SetMinUnits(*args.MinUnits); err != nil { + if err = app.SetMinUnits(*args.MinUnits); err != nil { return errors.Trace(err) } } // Set up application's settings. if args.SettingsYAML != "" { - if err = applicationSetSettingsYAML(svc, args.SettingsYAML); err != nil { + if err = applicationSetSettingsYAML(args.ApplicationName, app, args.SettingsYAML); err != nil { return errors.Annotate(err, "setting configuration from YAML") } } else if len(args.SettingsStrings) > 0 { - if err = ApplicationSetSettingsStrings(svc, args.SettingsStrings); err != nil { + if err = ApplicationSetSettingsStrings(app, args.SettingsStrings); err != nil { return errors.Trace(err) } } // Update application's constraints. if args.Constraints != nil { - return svc.SetConstraints(*args.Constraints) + return app.SetConstraints(*args.Constraints) } return nil } // SetCharm sets the charm for a given for the application. func (api *API) SetCharm(args params.ApplicationSetCharm) error { + if err := api.checkCanWrite(); err != nil { + return err + } // when forced units in error, don't block if !args.ForceUnits { if err := api.check.ChangeAllowed(); err != nil { return errors.Trace(err) } } - application, err := api.state.Application(args.ApplicationName) + application, err := api.backend.Application(args.ApplicationName) if err != nil { return errors.Trace(err) } channel := csparams.Channel(args.Channel) - return api.applicationSetCharm(application, args.CharmUrl, channel, args.ForceSeries, args.ForceUnits, args.ResourceIDs) + return api.applicationSetCharm( + args.ApplicationName, + application, + args.CharmURL, + channel, + args.ConfigSettings, + args.ConfigSettingsYAML, + args.ForceSeries, + args.ForceUnits, + args.ResourceIDs, + args.StorageConstraints, + ) } // applicationSetCharm sets the charm for the given for the application. -func (api *API) applicationSetCharm(application *state.Application, url string, channel csparams.Channel, forceSeries, forceUnits bool, resourceIDs map[string]string) error { +func (api *API) applicationSetCharm( + appName string, + application Application, + url string, + channel csparams.Channel, + configSettingsStrings map[string]string, + configSettingsYAML string, + forceSeries, + forceUnits bool, + resourceIDs map[string]string, + storageConstraints map[string]params.StorageConstraints, +) error { curl, err := charm.ParseURL(url) if err != nil { return errors.Trace(err) } - sch, err := api.state.Charm(curl) + sch, err := api.backend.Charm(curl) if err != nil { return errors.Trace(err) } + var settings charm.Settings + if configSettingsYAML != "" { + settings, err = sch.Config().ParseSettingsYAML([]byte(configSettingsYAML), appName) + } else if len(configSettingsStrings) > 0 { + settings, err = parseSettingsCompatible(sch.Config(), configSettingsStrings) + } + if err != nil { + return errors.Annotate(err, "parsing config settings") + } + var stateStorageConstraints map[string]state.StorageConstraints + if len(storageConstraints) > 0 { + stateStorageConstraints = make(map[string]state.StorageConstraints) + for name, cons := range storageConstraints { + stateCons := state.StorageConstraints{Pool: cons.Pool} + if cons.Size != nil { + stateCons.Size = *cons.Size + } + if cons.Count != nil { + stateCons.Count = *cons.Count + } + stateStorageConstraints[name] = stateCons + } + } cfg := state.SetCharmConfig{ - Charm: sch, - Channel: channel, - ForceSeries: forceSeries, - ForceUnits: forceUnits, - ResourceIDs: resourceIDs, + Charm: api.stateCharm(sch), + Channel: channel, + ConfigSettings: settings, + ForceSeries: forceSeries, + ForceUnits: forceUnits, + ResourceIDs: resourceIDs, + StorageConstraints: stateStorageConstraints, } return application.SetCharm(cfg) } @@ -322,14 +441,14 @@ // applicationSetSettingsYAML updates the settings for the given application, // taking the configuration from a YAML string. -func applicationSetSettingsYAML(application *state.Application, settings string) error { +func applicationSetSettingsYAML(appName string, application Application, settings string) error { b := []byte(settings) var all map[string]interface{} if err := goyaml.Unmarshal(b, &all); err != nil { return errors.Annotate(err, "parsing settings data") } // The file is already in the right format. - if _, ok := all[application.Name()]; !ok { + if _, ok := all[appName]; !ok { changes, err := settingsFromGetYaml(all) if err != nil { return errors.Annotate(err, "processing YAML generated by get") @@ -342,7 +461,7 @@ return errors.Annotate(err, "obtaining charm for this application") } - changes, err := ch.Config().ParseSettingsYAML(b, application.Name()) + changes, err := ch.Config().ParseSettingsYAML(b, appName) if err != nil { return errors.Annotate(err, "creating config from YAML") } @@ -352,9 +471,12 @@ // GetCharmURL returns the charm URL the given application is // running at present. func (api *API) GetCharmURL(args params.ApplicationGet) (params.StringResult, error) { - application, err := api.state.Application(args.ApplicationName) + if err := api.checkCanWrite(); err != nil { + return params.StringResult{}, errors.Trace(err) + } + application, err := api.backend.Application(args.ApplicationName) if err != nil { - return params.StringResult{}, err + return params.StringResult{}, errors.Trace(err) } charmURL, _ := application.CharmURL() return params.StringResult{Result: charmURL.String()}, nil @@ -364,14 +486,17 @@ // It does not unset values that are set to an empty string. // Unset should be used for that. func (api *API) Set(p params.ApplicationSet) error { + if err := api.checkCanWrite(); err != nil { + return err + } if err := api.check.ChangeAllowed(); err != nil { return errors.Trace(err) } - svc, err := api.state.Application(p.ApplicationName) + app, err := api.backend.Application(p.ApplicationName) if err != nil { return err } - ch, _, err := svc.Charm() + ch, _, err := app.Charm() if err != nil { return err } @@ -381,16 +506,19 @@ return err } - return svc.UpdateConfigSettings(changes) + return app.UpdateConfigSettings(changes) } // Unset implements the server side of Client.Unset. func (api *API) Unset(p params.ApplicationUnset) error { + if err := api.checkCanWrite(); err != nil { + return err + } if err := api.check.ChangeAllowed(); err != nil { return errors.Trace(err) } - svc, err := api.state.Application(p.ApplicationName) + app, err := api.backend.Application(p.ApplicationName) if err != nil { return err } @@ -398,19 +526,23 @@ for _, option := range p.Options { settings[option] = nil } - return svc.UpdateConfigSettings(settings) + return app.UpdateConfigSettings(settings) } // CharmRelations implements the server side of Application.CharmRelations. func (api *API) CharmRelations(p params.ApplicationCharmRelations) (params.ApplicationCharmRelationsResults, error) { var results params.ApplicationCharmRelationsResults - application, err := api.state.Application(p.ApplicationName) + if err := api.checkCanRead(); err != nil { + return results, errors.Trace(err) + } + + application, err := api.backend.Application(p.ApplicationName) if err != nil { - return results, err + return results, errors.Trace(err) } endpoints, err := application.Endpoints() if err != nil { - return results, err + return results, errors.Trace(err) } results.CharmRelations = make([]string, len(endpoints)) for i, endpoint := range endpoints { @@ -422,49 +554,58 @@ // Expose changes the juju-managed firewall to expose any ports that // were also explicitly marked by units as open. func (api *API) Expose(args params.ApplicationExpose) error { + if err := api.checkCanWrite(); err != nil { + return err + } if err := api.check.ChangeAllowed(); err != nil { return errors.Trace(err) } - svc, err := api.state.Application(args.ApplicationName) + app, err := api.backend.Application(args.ApplicationName) if err != nil { return err } - return svc.SetExposed() + return app.SetExposed() } // Unexpose changes the juju-managed firewall to unexpose any ports that // were also explicitly marked by units as open. func (api *API) Unexpose(args params.ApplicationUnexpose) error { + if err := api.checkCanWrite(); err != nil { + return err + } if err := api.check.ChangeAllowed(); err != nil { return errors.Trace(err) } - svc, err := api.state.Application(args.ApplicationName) + app, err := api.backend.Application(args.ApplicationName) if err != nil { return err } - return svc.ClearExposed() + return app.ClearExposed() } // addApplicationUnits adds a given number of units to an application. -func addApplicationUnits(st *state.State, args params.AddApplicationUnits) ([]*state.Unit, error) { - application, err := st.Application(args.ApplicationName) +func addApplicationUnits(backend Backend, args params.AddApplicationUnits) ([]*state.Unit, error) { + application, err := backend.Application(args.ApplicationName) if err != nil { - return nil, err + return nil, errors.Trace(err) } if args.NumUnits < 1 { return nil, errors.New("must add at least one unit") } - return jjj.AddUnits(st, application, args.NumUnits, args.Placement) + return jjj.AddUnits(backend, application, args.ApplicationName, args.NumUnits, args.Placement) } // AddUnits adds a given number of units to an application. func (api *API) AddUnits(args params.AddApplicationUnits) (params.AddApplicationUnitsResults, error) { + if err := api.checkCanWrite(); err != nil { + return params.AddApplicationUnitsResults{}, errors.Trace(err) + } if err := api.check.ChangeAllowed(); err != nil { return params.AddApplicationUnitsResults{}, errors.Trace(err) } - units, err := addApplicationUnits(api.state, args) + units, err := addApplicationUnits(api.backend, args) if err != nil { - return params.AddApplicationUnitsResults{}, err + return params.AddApplicationUnitsResults{}, errors.Trace(err) } unitNames := make([]string, len(units)) for i, unit := range units { @@ -475,12 +616,15 @@ // DestroyUnits removes a given set of application units. func (api *API) DestroyUnits(args params.DestroyApplicationUnits) error { + if err := api.checkCanWrite(); err != nil { + return err + } if err := api.check.RemoveAllowed(); err != nil { return errors.Trace(err) } var errs []string for _, name := range args.UnitNames { - unit, err := api.state.Unit(name) + unit, err := api.backend.Unit(name) switch { case errors.IsNotFound(err): err = errors.Errorf("unit %q does not exist", name) @@ -501,56 +645,68 @@ // Destroy destroys a given application. func (api *API) Destroy(args params.ApplicationDestroy) error { + if err := api.checkCanWrite(); err != nil { + return err + } if err := api.check.RemoveAllowed(); err != nil { return errors.Trace(err) } - svc, err := api.state.Application(args.ApplicationName) + app, err := api.backend.Application(args.ApplicationName) if err != nil { return err } - return svc.Destroy() + return app.Destroy() } // GetConstraints returns the constraints for a given application. func (api *API) GetConstraints(args params.GetApplicationConstraints) (params.GetConstraintsResults, error) { - svc, err := api.state.Application(args.ApplicationName) + if err := api.checkCanRead(); err != nil { + return params.GetConstraintsResults{}, errors.Trace(err) + } + app, err := api.backend.Application(args.ApplicationName) if err != nil { - return params.GetConstraintsResults{}, err + return params.GetConstraintsResults{}, errors.Trace(err) } - cons, err := svc.Constraints() - return params.GetConstraintsResults{cons}, err + cons, err := app.Constraints() + return params.GetConstraintsResults{cons}, errors.Trace(err) } // SetConstraints sets the constraints for a given application. func (api *API) SetConstraints(args params.SetConstraints) error { + if err := api.checkCanWrite(); err != nil { + return err + } if err := api.check.ChangeAllowed(); err != nil { return errors.Trace(err) } - svc, err := api.state.Application(args.ApplicationName) + app, err := api.backend.Application(args.ApplicationName) if err != nil { return err } - return svc.SetConstraints(args.Constraints) + return app.SetConstraints(args.Constraints) } // AddRelation adds a relation between the specified endpoints and returns the relation info. func (api *API) AddRelation(args params.AddRelation) (params.AddRelationResults, error) { + if err := api.checkCanWrite(); err != nil { + return params.AddRelationResults{}, errors.Trace(err) + } if err := api.check.ChangeAllowed(); err != nil { return params.AddRelationResults{}, errors.Trace(err) } - inEps, err := api.state.InferEndpoints(args.Endpoints...) + inEps, err := api.backend.InferEndpoints(args.Endpoints...) if err != nil { - return params.AddRelationResults{}, err + return params.AddRelationResults{}, errors.Trace(err) } - rel, err := api.state.AddRelation(inEps...) + rel, err := api.backend.AddRelation(inEps...) if err != nil { - return params.AddRelationResults{}, err + return params.AddRelationResults{}, errors.Trace(err) } outEps := make(map[string]params.CharmRelation) for _, inEp := range inEps { outEp, err := rel.Endpoint(inEp.ApplicationName) if err != nil { - return params.AddRelationResults{}, err + return params.AddRelationResults{}, errors.Trace(err) } outEps[inEp.ApplicationName] = params.CharmRelation{ Name: outEp.Relation.Name, @@ -566,14 +722,17 @@ // DestroyRelation removes the relation between the specified endpoints. func (api *API) DestroyRelation(args params.DestroyRelation) error { + if err := api.checkCanWrite(); err != nil { + return err + } if err := api.check.RemoveAllowed(); err != nil { return errors.Trace(err) } - eps, err := api.state.InferEndpoints(args.Endpoints...) + eps, err := api.backend.InferEndpoints(args.Endpoints...) if err != nil { return err } - rel, err := api.state.EndpointsRelation(eps...) + rel, err := api.backend.EndpointsRelation(eps...) if err != nil { return err } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/application_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/application_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/application_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/application_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,6 @@ "fmt" "io" "regexp" - "runtime" "sync" "time" @@ -24,6 +23,7 @@ "gopkg.in/mgo.v2" "github.com/juju/juju/apiserver/application" + "github.com/juju/juju/apiserver/common" commontesting "github.com/juju/juju/apiserver/common/testing" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" @@ -44,15 +44,13 @@ apiservertesting.CharmStoreSuite commontesting.BlockHelper - applicationApi *application.API + applicationAPI *application.API application *state.Application authorizer apiservertesting.FakeAuthorizer } var _ = gc.Suite(&serviceSuite{}) -var _ application.Application = (*application.API)(nil) - func (s *serviceSuite) SetUpSuite(c *gc.C) { s.CharmStoreSuite.SetUpSuite(c) s.JujuConnSuite.SetUpSuite(c) @@ -76,7 +74,12 @@ Tag: s.AdminUserTag(c), } var err error - s.applicationApi, err = application.NewAPI(s.State, nil, s.authorizer) + backend := application.NewStateBackend(s.State) + blockChecker := common.NewBlockChecker(s.State) + s.applicationAPI, err = application.NewAPI( + backend, s.authorizer, blockChecker, + application.CharmToStateCharm, + ) c.Assert(err, jc.ErrorIsNil) } @@ -140,7 +143,7 @@ } for i, t := range tests { c.Logf("Running test %d %v", i, t.about) - results, err := s.applicationApi.SetMetricCredentials(t.args) + results, err := s.applicationAPI.SetMetricCredentials(t.args) c.Assert(err, jc.ErrorIsNil) c.Assert(results.Results, gc.HasLen, len(t.results.Results)) c.Assert(results, gc.DeepEquals, t.results) @@ -170,7 +173,7 @@ "title": "foobar", "username": "", } - settings, err := application.ParseSettingsCompatible(ch, options) + settings, err := application.ParseSettingsCompatible(ch.Config(), options) c.Assert(err, jc.ErrorIsNil) c.Assert(settings, gc.DeepEquals, charm.Settings{ "title": "foobar", @@ -181,7 +184,7 @@ options = map[string]string{ "yummy": "didgeridoo", } - _, err = application.ParseSettingsCompatible(ch, options) + _, err = application.ParseSettingsCompatible(ch.Config(), options) c.Assert(err, gc.ErrorMatches, `unknown option "yummy"`) } @@ -202,12 +205,12 @@ var cons constraints.Value args := params.ApplicationDeploy{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), NumUnits: 1, Constraints: cons, Storage: storageConstraints, } - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) @@ -257,12 +260,12 @@ var cons constraints.Value args := params.ApplicationDeploy{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), NumUnits: 1, Constraints: cons, Storage: storageConstraints, } - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) @@ -279,11 +282,11 @@ var cons constraints.Value args := params.ApplicationDeploy{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), NumUnits: 1, Constraints: cons, } - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) @@ -311,14 +314,14 @@ var cons constraints.Value args := params.ApplicationDeploy{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), NumUnits: 1, Constraints: cons, Placement: []*instance.Placement{ {"deadbeef-0bad-400d-8000-4b1d0d06f00d", "valid"}, }, } - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) @@ -340,14 +343,14 @@ var cons constraints.Value args := params.ApplicationDeploy{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), NumUnits: 1, Constraints: cons, Placement: []*instance.Placement{ {"deadbeef-0bad-400d-8000-4b1d0d06f00d", "invalid"}, }, } - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) @@ -366,13 +369,13 @@ var cons constraints.Value args := params.ApplicationDeploy{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), NumUnits: 1, Constraints: cons, EndpointBindings: endpointBindings, } - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) @@ -503,9 +506,8 @@ } func (s *serviceSuite) TestAddCharmConcurrently(c *gc.C) { - if runtime.GOOS == "windows" { - c.Skip("bug 1596960: Skipping this on windows for now") - } + c.Skip("see lp:1596960 -- bad test for bad code") + var putBarrier sync.WaitGroup var blobs blobs s.PatchValue(application.NewStateStorage, func(uuid string, session *mgo.Session) statestorage.Storage { @@ -592,7 +594,7 @@ func (s *serviceSuite) TestServiceGetCharmURL(c *gc.C) { s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - result, err := s.applicationApi.GetCharmURL(params.ApplicationGet{"wordpress"}) + result, err := s.applicationAPI.GetCharmURL(params.ApplicationGet{"wordpress"}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Error, gc.IsNil) c.Assert(result.Result, gc.Equals, "local:quantal/wordpress-3") @@ -604,9 +606,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application", NumUnits: 3, }}}) @@ -618,9 +620,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - err = s.applicationApi.SetCharm(params.ApplicationSetCharm{ + err = s.applicationAPI.SetCharm(params.ApplicationSetCharm{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) @@ -639,9 +641,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application", NumUnits: 3, }}}) @@ -656,9 +658,9 @@ } func (s *serviceSuite) assertServiceSetCharm(c *gc.C, forceUnits bool) { - err := s.applicationApi.SetCharm(params.ApplicationSetCharm{ + err := s.applicationAPI.SetCharm(params.ApplicationSetCharm{ ApplicationName: "application", - CharmUrl: "cs:~who/precise/wordpress-3", + CharmURL: "cs:~who/precise/wordpress-3", ForceUnits: forceUnits, }) c.Assert(err, jc.ErrorIsNil) @@ -671,9 +673,9 @@ } func (s *serviceSuite) assertServiceSetCharmBlocked(c *gc.C, msg string) { - err := s.applicationApi.SetCharm(params.ApplicationSetCharm{ + err := s.applicationAPI.SetCharm(params.ApplicationSetCharm{ ApplicationName: "application", - CharmUrl: "cs:~who/precise/wordpress-3", + CharmURL: "cs:~who/precise/wordpress-3", }) s.AssertBlocked(c, err, msg) } @@ -702,9 +704,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application", NumUnits: 3, }}}) @@ -716,9 +718,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - err = s.applicationApi.SetCharm(params.ApplicationSetCharm{ + err = s.applicationAPI.SetCharm(params.ApplicationSetCharm{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), ForceUnits: true, }) c.Assert(err, jc.ErrorIsNil) @@ -744,9 +746,9 @@ } func (s *serviceSuite) TestServiceSetCharmInvalidService(c *gc.C) { - err := s.applicationApi.SetCharm(params.ApplicationSetCharm{ + err := s.applicationAPI.SetCharm(params.ApplicationSetCharm{ ApplicationName: "badservice", - CharmUrl: "cs:precise/wordpress-3", + CharmURL: "cs:precise/wordpress-3", ForceSeries: true, ForceUnits: true, }) @@ -774,9 +776,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application", }}}) c.Assert(err, jc.ErrorIsNil) @@ -790,12 +792,12 @@ // Even with forceSeries = true, we can't change a charm where // the series is sepcified in the URL. - err = s.applicationApi.SetCharm(params.ApplicationSetCharm{ + err = s.applicationAPI.SetCharm(params.ApplicationSetCharm{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), ForceSeries: true, }) - c.Assert(err, gc.ErrorMatches, "cannot change a service's series") + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "application" to charm "cs:~who/trusty/dummy-1": cannot change an application's series`) } func (s *serviceSuite) TestServiceSetCharmUnsupportedSeries(c *gc.C) { @@ -804,9 +806,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application", Series: "precise", }}}) @@ -819,11 +821,11 @@ }) c.Assert(err, jc.ErrorIsNil) - err = s.applicationApi.SetCharm(params.ApplicationSetCharm{ + err = s.applicationAPI.SetCharm(params.ApplicationSetCharm{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), }) - c.Assert(err, gc.ErrorMatches, "cannot upgrade charm, only these series are supported: trusty, wily") + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "application" to charm "cs:~who/multi-series-1": only these series are supported: trusty, wily`) } func (s *serviceSuite) assertServiceSetCharmSeries(c *gc.C, upgradeCharm, series string) { @@ -832,9 +834,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application", Series: "precise", }}}) @@ -852,9 +854,9 @@ }) c.Assert(err, jc.ErrorIsNil) - err = s.applicationApi.SetCharm(params.ApplicationSetCharm{ + err = s.applicationAPI.SetCharm(params.ApplicationSetCharm{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), ForceSeries: true, }) c.Assert(err, jc.ErrorIsNil) @@ -879,9 +881,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application", Series: "precise", }}}) @@ -894,12 +896,12 @@ }) c.Assert(err, jc.ErrorIsNil) - err = s.applicationApi.SetCharm(params.ApplicationSetCharm{ + err = s.applicationAPI.SetCharm(params.ApplicationSetCharm{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), ForceSeries: true, }) - c.Assert(err, gc.ErrorMatches, `cannot upgrade charm, OS "Ubuntu" not supported by charm`) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "application" to charm "cs:~who/multi-series-windows-0": OS "Ubuntu" not supported by charm`) } type testModeCharmRepo struct { @@ -931,9 +933,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application", NumUnits: 3, }}}) @@ -944,9 +946,9 @@ // Check that the store's test mode is enabled when calling SetCharm. curl, _ = s.UploadCharm(c, "trusty/wordpress-2", "wordpress") - err = s.applicationApi.SetCharm(params.ApplicationSetCharm{ + err = s.applicationAPI.SetCharm(params.ApplicationSetCharm{ ApplicationName: "application", - CharmUrl: curl.String(), + CharmURL: curl.String(), }) c.Assert(repo.testMode, jc.IsTrue) @@ -968,9 +970,9 @@ } func (s *serviceSuite) assertServiceDeployPrincipal(c *gc.C, curl *charm.URL, ch charm.Charm, mem4g constraints.Value) { - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application", NumUnits: 3, Constraints: mem4g, @@ -982,9 +984,9 @@ } func (s *serviceSuite) assertServiceDeployPrincipalBlocked(c *gc.C, msg string, curl *charm.URL, mem4g constraints.Value) { - _, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + _, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application", NumUnits: 3, Constraints: mem4g, @@ -1016,9 +1018,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application-name", }}}) c.Assert(err, jc.ErrorIsNil) @@ -1045,9 +1047,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application-name", NumUnits: 1, ConfigYAML: "application-name:\n username: fred", @@ -1071,9 +1073,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application-name", NumUnits: 1, ConfigYAML: "application-name:\n skill-level: fred", @@ -1094,9 +1096,9 @@ machine, err := s.State.AddMachine("precise", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application-name", NumUnits: 1, ConfigYAML: "application-name:\n username: fred", @@ -1128,9 +1130,9 @@ } func (s *serviceSuite) TestServiceDeployToMachineNotFound(c *gc.C) { - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: "cs:precise/application-name-1", + CharmURL: "cs:precise/application-name-1", ApplicationName: "application-name", NumUnits: 1, Placement: []*instance.Placement{instance.MustParsePlacement("42")}, @@ -1149,9 +1151,9 @@ URL: curl.String(), }) c.Assert(err, jc.ErrorIsNil) - results, err := s.applicationApi.Deploy(params.ApplicationsDeploy{ + results, err := s.applicationAPI.Deploy(params.ApplicationsDeploy{ Applications: []params.ApplicationDeploy{{ - CharmUrl: curl.String(), + CharmURL: curl.String(), ApplicationName: "application", NumUnits: 1, }}}) @@ -1160,7 +1162,7 @@ c.Assert(results.Results[0].Error, gc.IsNil) } -func (s *serviceSuite) checkClientServiceUpdateSetCharm(c *gc.C, forceCharmUrl bool) { +func (s *serviceSuite) checkClientServiceUpdateSetCharm(c *gc.C, forceCharmURL bool) { s.deployServiceForUpdateTests(c) curl, _ := s.UploadCharm(c, "precise/wordpress-3", "wordpress") err := application.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{ @@ -1171,10 +1173,10 @@ // Update the charm for the application. args := params.ApplicationUpdate{ ApplicationName: "application", - CharmUrl: curl.String(), - ForceCharmUrl: forceCharmUrl, + CharmURL: curl.String(), + ForceCharmURL: forceCharmURL, } - err = s.applicationApi.Update(args) + err = s.applicationAPI.Update(args) c.Assert(err, jc.ErrorIsNil) // Ensure the charm has been updated and and the force flag correctly set. @@ -1183,7 +1185,7 @@ ch, force, err := application.Charm() c.Assert(err, jc.ErrorIsNil) c.Assert(ch.URL().String(), gc.Equals, curl.String()) - c.Assert(force, gc.Equals, forceCharmUrl) + c.Assert(force, gc.Equals, forceCharmURL) } func (s *serviceSuite) TestServiceUpdateSetCharm(c *gc.C) { @@ -1216,10 +1218,10 @@ // Update the charm for the application. args := params.ApplicationUpdate{ ApplicationName: "application", - CharmUrl: curl, - ForceCharmUrl: false, + CharmURL: curl, + ForceCharmURL: false, } - err := s.applicationApi.Update(args) + err := s.applicationAPI.Update(args) s.AssertBlocked(c, err, "TestBlockChangeServiceUpdate") } @@ -1238,10 +1240,10 @@ // Update the charm for the application. args := params.ApplicationUpdate{ ApplicationName: "application", - CharmUrl: curl, - ForceCharmUrl: true, + CharmURL: curl, + ForceCharmURL: true, } - err := s.applicationApi.Update(args) + err := s.applicationAPI.Update(args) c.Assert(err, jc.ErrorIsNil) // Ensure the charm has been updated and and the force flag correctly set. @@ -1257,9 +1259,9 @@ s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) args := params.ApplicationUpdate{ ApplicationName: "wordpress", - CharmUrl: "cs:precise/wordpress-999999", + CharmURL: "cs:precise/wordpress-999999", } - err := s.applicationApi.Update(args) + err := s.applicationAPI.Update(args) c.Check(err, gc.ErrorMatches, `charm "cs:precise/wordpress-999999" not found`) } @@ -1272,7 +1274,7 @@ ApplicationName: "dummy", MinUnits: &minUnits, } - err := s.applicationApi.Update(args) + err := s.applicationAPI.Update(args) c.Assert(err, jc.ErrorIsNil) // Ensure the minimum number of units has been set. @@ -1289,7 +1291,7 @@ ApplicationName: "dummy", MinUnits: &minUnits, } - err := s.applicationApi.Update(args) + err := s.applicationAPI.Update(args) c.Assert(err, gc.ErrorMatches, `cannot set minimum units for application "dummy": cannot set a negative minimum number of units`) @@ -1306,7 +1308,7 @@ ApplicationName: "dummy", SettingsStrings: map[string]string{"title": "s-title", "username": "s-user"}, } - err := s.applicationApi.Update(args) + err := s.applicationAPI.Update(args) c.Assert(err, jc.ErrorIsNil) // Ensure the settings have been correctly updated. @@ -1324,7 +1326,7 @@ ApplicationName: "dummy", SettingsYAML: "dummy:\n title: y-title\n username: y-user", } - err := s.applicationApi.Update(args) + err := s.applicationAPI.Update(args) c.Assert(err, jc.ErrorIsNil) // Ensure the settings have been correctly updated. @@ -1342,7 +1344,7 @@ ApplicationName: "dummy", SettingsYAML: "charm: dummy\napplication: dummy\nsettings:\n title:\n value: y-title\n type: string\n username:\n value: y-user\n ignore:\n blah: true", } - err := s.applicationApi.Update(args) + err := s.applicationAPI.Update(args) c.Assert(err, jc.ErrorIsNil) // Ensure the settings have been correctly updated. @@ -1356,13 +1358,13 @@ application := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) // Update constraints for the application. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + cons, err := constraints.Parse("mem=4096", "cores=2") c.Assert(err, jc.ErrorIsNil) args := params.ApplicationUpdate{ ApplicationName: "dummy", Constraints: &cons, } - err = s.applicationApi.Update(args) + err = s.applicationAPI.Update(args) c.Assert(err, jc.ErrorIsNil) // Ensure the constraints have been correctly updated. @@ -1381,18 +1383,18 @@ // Update all the service attributes. minUnits := 3 - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + cons, err := constraints.Parse("mem=4096", "cores=2") c.Assert(err, jc.ErrorIsNil) args := params.ApplicationUpdate{ ApplicationName: "application", - CharmUrl: curl.String(), - ForceCharmUrl: true, + CharmURL: curl.String(), + ForceCharmURL: true, MinUnits: &minUnits, SettingsStrings: map[string]string{"blog-title": "string-title"}, SettingsYAML: "application:\n blog-title: yaml-title\n", Constraints: &cons, } - err = s.applicationApi.Update(args) + err = s.applicationAPI.Update(args) c.Assert(err, jc.ErrorIsNil) // Ensure the service has been correctly updated. @@ -1426,18 +1428,18 @@ // Calling Update with no parameters set is a no-op. args := params.ApplicationUpdate{ApplicationName: "wordpress"} - err := s.applicationApi.Update(args) + err := s.applicationAPI.Update(args) c.Assert(err, jc.ErrorIsNil) } func (s *serviceSuite) TestServiceUpdateNoService(c *gc.C) { - err := s.applicationApi.Update(params.ApplicationUpdate{}) + err := s.applicationAPI.Update(params.ApplicationUpdate{}) c.Assert(err, gc.ErrorMatches, `"" is not a valid application name`) } func (s *serviceSuite) TestServiceUpdateInvalidService(c *gc.C) { args := params.ApplicationUpdate{ApplicationName: "no-such-service"} - err := s.applicationApi.Update(args) + err := s.applicationAPI.Update(args) c.Assert(err, gc.ErrorMatches, `application "no-such-service" not found`) } @@ -1448,7 +1450,7 @@ func (s *serviceSuite) TestServiceSet(c *gc.C) { dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - err := s.applicationApi.Set(params.ApplicationSet{ApplicationName: "dummy", Options: map[string]string{ + err := s.applicationAPI.Set(params.ApplicationSet{ApplicationName: "dummy", Options: map[string]string{ "title": "foobar", "username": validSetTestValue, }}) @@ -1460,7 +1462,7 @@ "username": validSetTestValue, }) - err = s.applicationApi.Set(params.ApplicationSet{ApplicationName: "dummy", Options: map[string]string{ + err = s.applicationAPI.Set(params.ApplicationSet{ApplicationName: "dummy", Options: map[string]string{ "title": "barfoo", "username": "", }}) @@ -1474,7 +1476,7 @@ } func (s *serviceSuite) assertServiceSetBlocked(c *gc.C, dummy *state.Application, msg string) { - err := s.applicationApi.Set(params.ApplicationSet{ + err := s.applicationAPI.Set(params.ApplicationSet{ ApplicationName: "dummy", Options: map[string]string{ "title": "foobar", @@ -1483,7 +1485,7 @@ } func (s *serviceSuite) assertServiceSet(c *gc.C, dummy *state.Application) { - err := s.applicationApi.Set(params.ApplicationSet{ + err := s.applicationAPI.Set(params.ApplicationSet{ ApplicationName: "dummy", Options: map[string]string{ "title": "foobar", @@ -1518,7 +1520,7 @@ func (s *serviceSuite) TestServerUnset(c *gc.C) { dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - err := s.applicationApi.Set(params.ApplicationSet{ApplicationName: "dummy", Options: map[string]string{ + err := s.applicationAPI.Set(params.ApplicationSet{ApplicationName: "dummy", Options: map[string]string{ "title": "foobar", "username": "user name", }}) @@ -1530,7 +1532,7 @@ "username": "user name", }) - err = s.applicationApi.Unset(params.ApplicationUnset{ApplicationName: "dummy", Options: []string{"username"}}) + err = s.applicationAPI.Unset(params.ApplicationUnset{ApplicationName: "dummy", Options: []string{"username"}}) c.Assert(err, jc.ErrorIsNil) settings, err = dummy.ConfigSettings() c.Assert(err, jc.ErrorIsNil) @@ -1542,7 +1544,7 @@ func (s *serviceSuite) setupServerUnsetBlocked(c *gc.C) *state.Application { dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - err := s.applicationApi.Set(params.ApplicationSet{ + err := s.applicationAPI.Set(params.ApplicationSet{ ApplicationName: "dummy", Options: map[string]string{ "title": "foobar", @@ -1559,7 +1561,7 @@ } func (s *serviceSuite) assertServerUnset(c *gc.C, dummy *state.Application) { - err := s.applicationApi.Unset(params.ApplicationUnset{ + err := s.applicationAPI.Unset(params.ApplicationUnset{ ApplicationName: "dummy", Options: []string{"username"}, }) @@ -1572,7 +1574,7 @@ } func (s *serviceSuite) assertServerUnsetBlocked(c *gc.C, dummy *state.Application, msg string) { - err := s.applicationApi.Unset(params.ApplicationUnset{ + err := s.applicationAPI.Unset(params.ApplicationUnset{ ApplicationName: "dummy", Options: []string{"username"}, }) @@ -1641,7 +1643,7 @@ if t.to != "" { args.Placement = []*instance.Placement{instance.MustParsePlacement(t.to)} } - result, err := s.applicationApi.AddUnits(args) + result, err := s.applicationAPI.AddUnits(args) if t.err != "" { c.Assert(err, gc.ErrorMatches, t.err) continue @@ -1662,7 +1664,7 @@ machine, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) - _, err = s.applicationApi.AddUnits(params.AddApplicationUnits{ + _, err = s.applicationAPI.AddUnits(params.AddApplicationUnits{ ApplicationName: "dummy", NumUnits: 1, Placement: []*instance.Placement{instance.MustParsePlacement("lxd:" + machine.Id())}, @@ -1713,7 +1715,7 @@ if serviceName == "" { serviceName = "dummy" } - result, err := s.applicationApi.AddUnits(params.AddApplicationUnits{ + result, err := s.applicationAPI.AddUnits(params.AddApplicationUnits{ ApplicationName: serviceName, NumUnits: len(t.expected), Placement: t.placement, @@ -1735,7 +1737,7 @@ } func (s *serviceSuite) assertAddServiceUnits(c *gc.C) { - result, err := s.applicationApi.AddUnits(params.AddApplicationUnits{ + result, err := s.applicationAPI.AddUnits(params.AddApplicationUnits{ ApplicationName: "dummy", NumUnits: 3, }) @@ -1758,10 +1760,10 @@ _, err = s.State.AddRelation(eps...) c.Assert(err, jc.ErrorIsNil) - _, err = s.applicationApi.CharmRelations(params.ApplicationCharmRelations{"blah"}) + _, err = s.applicationAPI.CharmRelations(params.ApplicationCharmRelations{"blah"}) c.Assert(err, gc.ErrorMatches, `application "blah" not found`) - result, err := s.applicationApi.CharmRelations(params.ApplicationCharmRelations{"wordpress"}) + result, err := s.applicationAPI.CharmRelations(params.ApplicationCharmRelations{"wordpress"}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.CharmRelations, gc.DeepEquals, []string{ "cache", "db", "juju-info", "logging-dir", "monitoring-port", "url", @@ -1769,7 +1771,7 @@ } func (s *serviceSuite) assertAddServiceUnitsBlocked(c *gc.C, msg string) { - _, err := s.applicationApi.AddUnits(params.AddApplicationUnits{ + _, err := s.applicationAPI.AddUnits(params.AddApplicationUnits{ ApplicationName: "dummy", NumUnits: 3, }) @@ -1796,7 +1798,7 @@ func (s *serviceSuite) TestAddUnitToMachineNotFound(c *gc.C) { s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - _, err := s.applicationApi.AddUnits(params.AddApplicationUnits{ + _, err := s.applicationAPI.AddUnits(params.AddApplicationUnits{ ApplicationName: "dummy", NumUnits: 3, Placement: []*instance.Placement{instance.MustParsePlacement("42")}, @@ -1818,7 +1820,7 @@ c.Assert(svcs[1].IsExposed(), jc.IsTrue) for i, t := range serviceExposeTests { c.Logf("test %d. %s", i, t.about) - err = s.applicationApi.Expose(params.ApplicationExpose{t.service}) + err = s.applicationAPI.Expose(params.ApplicationExpose{t.service}) if t.err != "" { c.Assert(err, gc.ErrorMatches, t.err) } else { @@ -1870,7 +1872,7 @@ func (s *serviceSuite) assertServiceExpose(c *gc.C) { for i, t := range serviceExposeTests { c.Logf("test %d. %s", i, t.about) - err := s.applicationApi.Expose(params.ApplicationExpose{t.service}) + err := s.applicationAPI.Expose(params.ApplicationExpose{t.service}) if t.err != "" { c.Assert(err, gc.ErrorMatches, t.err) } else { @@ -1885,7 +1887,7 @@ func (s *serviceSuite) assertServiceExposeBlocked(c *gc.C, msg string) { for i, t := range serviceExposeTests { c.Logf("test %d. %s", i, t.about) - err := s.applicationApi.Expose(params.ApplicationExpose{t.service}) + err := s.applicationAPI.Expose(params.ApplicationExpose{t.service}) s.AssertBlocked(c, err, msg) } } @@ -1943,7 +1945,7 @@ svc.SetExposed() } c.Assert(svc.IsExposed(), gc.Equals, t.initial) - err := s.applicationApi.Unexpose(params.ApplicationUnexpose{t.service}) + err := s.applicationAPI.Unexpose(params.ApplicationUnexpose{t.service}) if t.err == "" { c.Assert(err, jc.ErrorIsNil) svc.Refresh() @@ -1965,7 +1967,7 @@ } func (s *serviceSuite) assertServiceUnexpose(c *gc.C, svc *state.Application) { - err := s.applicationApi.Unexpose(params.ApplicationUnexpose{"dummy-service"}) + err := s.applicationAPI.Unexpose(params.ApplicationUnexpose{"dummy-service"}) c.Assert(err, jc.ErrorIsNil) svc.Refresh() c.Assert(svc.IsExposed(), gc.Equals, false) @@ -1974,7 +1976,7 @@ } func (s *serviceSuite) assertServiceUnexposeBlocked(c *gc.C, svc *state.Application, msg string) { - err := s.applicationApi.Unexpose(params.ApplicationUnexpose{"dummy-service"}) + err := s.applicationAPI.Unexpose(params.ApplicationUnexpose{"dummy-service"}) s.AssertBlocked(c, err, msg) err = svc.Destroy() c.Assert(err, jc.ErrorIsNil) @@ -2023,7 +2025,7 @@ s.AddTestingService(c, "dummy-service", s.AddTestingCharm(c, "dummy")) for i, t := range serviceDestroyTests { c.Logf("test %d. %s", i, t.about) - err := s.applicationApi.Destroy(params.ApplicationDestroy{t.service}) + err := s.applicationAPI.Destroy(params.ApplicationDestroy{t.service}) if t.err != "" { c.Assert(err, gc.ErrorMatches, t.err) } else { @@ -2038,7 +2040,7 @@ serviceName := "wordpress" application, err := s.State.Application(serviceName) c.Assert(err, jc.ErrorIsNil) - err = s.applicationApi.Destroy(params.ApplicationDestroy{serviceName}) + err = s.applicationAPI.Destroy(params.ApplicationDestroy{serviceName}) c.Assert(err, jc.ErrorIsNil) err = application.Refresh() c.Assert(err, jc.Satisfies, errors.IsNotFound) @@ -2055,7 +2057,7 @@ // block remove-objects s.BlockRemoveObject(c, "TestBlockServiceDestroy") - err := s.applicationApi.Destroy(params.ApplicationDestroy{"dummy-service"}) + err := s.applicationAPI.Destroy(params.ApplicationDestroy{"dummy-service"}) s.AssertBlocked(c, err, "TestBlockServiceDestroy") // Tests may have invalid service names. application, err := s.State.Application("dummy-service") @@ -2073,7 +2075,7 @@ c.Assert(err, jc.ErrorIsNil) now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -2101,7 +2103,7 @@ c.Assert(err, jc.ErrorIsNil) // Try to destroy the subordinate alone; check it fails. - err = s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err = s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"logging/0"}, }) c.Assert(err, gc.ErrorMatches, `no units were destroyed: unit "logging/0" is a subordinate`) @@ -2112,7 +2114,7 @@ func (s *serviceSuite) assertDestroyPrincipalUnits(c *gc.C, units []*state.Unit) { // Destroy 2 of them; check they become Dying. - err := s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err := s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"wordpress/0", "wordpress/1"}, }) c.Assert(err, jc.ErrorIsNil) @@ -2121,7 +2123,7 @@ // Try to destroy an Alive one and a Dying one; check // it destroys the Alive one and ignores the Dying one. - err = s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err = s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"wordpress/2", "wordpress/0"}, }) c.Assert(err, jc.ErrorIsNil) @@ -2129,7 +2131,7 @@ // Try to destroy an Alive one along with a nonexistent one; check that // the valid instruction is followed but the invalid one is warned about. - err = s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err = s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"boojum/123", "wordpress/3"}, }) c.Assert(err, gc.ErrorMatches, `some units were not destroyed: unit "boojum/123" does not exist`) @@ -2140,7 +2142,7 @@ c.Assert(err, jc.ErrorIsNil) err = wp0.EnsureDead() c.Assert(err, jc.ErrorIsNil) - err = s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err = s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"wordpress/0", "wordpress/4"}, }) c.Assert(err, jc.ErrorIsNil) @@ -2156,7 +2158,7 @@ c.Assert(err, jc.ErrorIsNil) now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -2186,7 +2188,7 @@ func (s *serviceSuite) TestBlockChangesDestroyPrincipalUnits(c *gc.C) { units := s.setupDestroyPrincipalUnits(c) s.BlockAllChanges(c, "TestBlockChangesDestroyPrincipalUnits") - err := s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err := s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"wordpress/0", "wordpress/1"}, }) s.assertBlockedErrorAndLiveliness(c, err, "TestBlockChangesDestroyPrincipalUnits", units[0], units[1], units[2], units[3]) @@ -2195,7 +2197,7 @@ func (s *serviceSuite) TestBlockRemoveDestroyPrincipalUnits(c *gc.C) { units := s.setupDestroyPrincipalUnits(c) s.BlockRemoveObject(c, "TestBlockRemoveDestroyPrincipalUnits") - err := s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err := s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"wordpress/0", "wordpress/1"}, }) s.assertBlockedErrorAndLiveliness(c, err, "TestBlockRemoveDestroyPrincipalUnits", units[0], units[1], units[2], units[3]) @@ -2204,7 +2206,7 @@ func (s *serviceSuite) TestBlockDestroyDestroyPrincipalUnits(c *gc.C) { units := s.setupDestroyPrincipalUnits(c) s.BlockDestroyModel(c, "TestBlockDestroyDestroyPrincipalUnits") - err := s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err := s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"wordpress/0", "wordpress/1"}, }) c.Assert(err, jc.ErrorIsNil) @@ -2216,7 +2218,7 @@ // Try to destroy the principal and the subordinate together; check it warns // about the subordinate, but destroys the one it can. (The principal unit // agent will be responsible for destroying the subordinate.) - err := s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err := s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"wordpress/0", "logging/0"}, }) c.Assert(err, gc.ErrorMatches, `some units were not destroyed: unit "logging/0" is a subordinate`) @@ -2242,7 +2244,7 @@ s.BlockRemoveObject(c, "TestBlockRemoveDestroySubordinateUnits") // Try to destroy the subordinate alone; check it fails. - err = s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err = s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"logging/0"}, }) s.AssertBlocked(c, err, "TestBlockRemoveDestroySubordinateUnits") @@ -2250,7 +2252,7 @@ assertLife(c, wordpress0, state.Alive) assertLife(c, logging0, state.Alive) - err = s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err = s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"wordpress/0", "logging/0"}, }) s.AssertBlocked(c, err, "TestBlockRemoveDestroySubordinateUnits") @@ -2277,7 +2279,7 @@ s.BlockAllChanges(c, "TestBlockChangesDestroySubordinateUnits") // Try to destroy the subordinate alone; check it fails. - err = s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err = s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"logging/0"}, }) s.AssertBlocked(c, err, "TestBlockChangesDestroySubordinateUnits") @@ -2285,7 +2287,7 @@ assertLife(c, wordpress0, state.Alive) assertLife(c, logging0, state.Alive) - err = s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err = s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"wordpress/0", "logging/0"}, }) s.AssertBlocked(c, err, "TestBlockChangesDestroySubordinateUnits") @@ -2312,7 +2314,7 @@ s.BlockDestroyModel(c, "TestBlockDestroyDestroySubordinateUnits") // Try to destroy the subordinate alone; check it fails. - err = s.applicationApi.DestroyUnits(params.DestroyApplicationUnits{ + err = s.applicationAPI.DestroyUnits(params.DestroyApplicationUnits{ UnitNames: []string{"logging/0"}, }) c.Assert(err, gc.ErrorMatches, `no units were destroyed: unit "logging/0" is a subordinate`) @@ -2325,9 +2327,9 @@ application := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) // Update constraints for the application. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + cons, err := constraints.Parse("mem=4096", "cores=2") c.Assert(err, jc.ErrorIsNil) - err = s.applicationApi.SetConstraints(params.SetConstraints{ApplicationName: "dummy", Constraints: cons}) + err = s.applicationAPI.SetConstraints(params.SetConstraints{ApplicationName: "dummy", Constraints: cons}) c.Assert(err, jc.ErrorIsNil) // Ensure the constraints have been correctly updated. @@ -2339,13 +2341,13 @@ func (s *serviceSuite) setupSetServiceConstraints(c *gc.C) (*state.Application, constraints.Value) { application := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) // Update constraints for the application. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + cons, err := constraints.Parse("mem=4096", "cores=2") c.Assert(err, jc.ErrorIsNil) return application, cons } func (s *serviceSuite) assertSetServiceConstraints(c *gc.C, application *state.Application, cons constraints.Value) { - err := s.applicationApi.SetConstraints(params.SetConstraints{ApplicationName: "dummy", Constraints: cons}) + err := s.applicationAPI.SetConstraints(params.SetConstraints{ApplicationName: "dummy", Constraints: cons}) c.Assert(err, jc.ErrorIsNil) // Ensure the constraints have been correctly updated. obtained, err := application.Constraints() @@ -2354,7 +2356,7 @@ } func (s *serviceSuite) assertSetServiceConstraintsBlocked(c *gc.C, msg string, service *state.Application, cons constraints.Value) { - err := s.applicationApi.SetConstraints(params.SetConstraints{ApplicationName: "dummy", Constraints: cons}) + err := s.applicationAPI.SetConstraints(params.SetConstraints{ApplicationName: "dummy", Constraints: cons}) s.AssertBlocked(c, err, msg) } @@ -2380,13 +2382,13 @@ application := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) // Set constraints for the application. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + cons, err := constraints.Parse("mem=4096", "cores=2") c.Assert(err, jc.ErrorIsNil) err = application.SetConstraints(cons) c.Assert(err, jc.ErrorIsNil) // Check we can get the constraints. - result, err := s.applicationApi.GetConstraints(params.GetApplicationConstraints{"dummy"}) + result, err := s.applicationAPI.GetConstraints(params.GetApplicationConstraints{"dummy"}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Constraints, gc.DeepEquals, cons) } @@ -2421,7 +2423,7 @@ func (s *serviceSuite) assertAddRelation(c *gc.C, endpoints []string) { s.setupRelationScenario(c) - res, err := s.applicationApi.AddRelation(params.AddRelation{Endpoints: endpoints}) + res, err := s.applicationAPI.AddRelation(params.AddRelation{Endpoints: endpoints}) c.Assert(err, jc.ErrorIsNil) s.checkEndpoints(c, res.Endpoints) // Show that the relation was added. @@ -2455,7 +2457,7 @@ func (s *serviceSuite) TestBlockChangesAddRelation(c *gc.C) { s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) s.BlockAllChanges(c, "TestBlockChangesAddRelation") - _, err := s.applicationApi.AddRelation(params.AddRelation{Endpoints: []string{"wordpress", "mysql"}}) + _, err := s.applicationAPI.AddRelation(params.AddRelation{Endpoints: []string{"wordpress", "mysql"}}) s.AssertBlocked(c, err, "TestBlockChangesAddRelation") } @@ -2470,7 +2472,7 @@ func (s *serviceSuite) TestCallWithOnlyOneEndpoint(c *gc.C) { s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) endpoints := []string{"wordpress"} - _, err := s.applicationApi.AddRelation(params.AddRelation{Endpoints: endpoints}) + _, err := s.applicationAPI.AddRelation(params.AddRelation{Endpoints: endpoints}) c.Assert(err, gc.ErrorMatches, "no relations found") } @@ -2478,7 +2480,7 @@ s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) endpoints := []string{"wordpress", "mysql", "logging"} - _, err := s.applicationApi.AddRelation(params.AddRelation{Endpoints: endpoints}) + _, err := s.applicationAPI.AddRelation(params.AddRelation{Endpoints: endpoints}) c.Assert(err, gc.ErrorMatches, "cannot relate 3 endpoints") } @@ -2491,7 +2493,7 @@ _, err = s.State.AddRelation(eps...) c.Assert(err, jc.ErrorIsNil) // And try to add it again. - _, err = s.applicationApi.AddRelation(params.AddRelation{Endpoints: endpoints}) + _, err = s.applicationAPI.AddRelation(params.AddRelation{Endpoints: endpoints}) c.Assert(err, gc.ErrorMatches, `cannot add relation "wordpress:db mysql:server": relation already exists`) } @@ -2513,7 +2515,7 @@ } func (s *serviceSuite) assertDestroyRelationSuccess(c *gc.C, relation *state.Relation, endpoints []string) { - err := s.applicationApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + err := s.applicationAPI.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) c.Assert(err, jc.ErrorIsNil) // Show that the relation was removed. c.Assert(relation.Refresh(), jc.Satisfies, errors.IsNotFound) @@ -2535,7 +2537,7 @@ func (s *serviceSuite) TestNoRelation(c *gc.C) { s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) endpoints := []string{"wordpress", "mysql"} - err := s.applicationApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + err := s.applicationAPI.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) c.Assert(err, gc.ErrorMatches, `relation "wordpress:db mysql:server" not found`) } @@ -2543,14 +2545,14 @@ s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) s.AddTestingService(c, "riak", s.AddTestingCharm(c, "riak")) endpoints := []string{"riak", "wordpress"} - err := s.applicationApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + err := s.applicationAPI.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) c.Assert(err, gc.ErrorMatches, "no relations found") } func (s *serviceSuite) TestAttemptDestroyingWithOnlyOneEndpoint(c *gc.C) { s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) endpoints := []string{"wordpress"} - err := s.applicationApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + err := s.applicationAPI.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) c.Assert(err, gc.ErrorMatches, "no relations found") } @@ -2559,7 +2561,7 @@ s.AddTestingService(c, "riak", s.AddTestingCharm(c, "riak")) endpoints := []string{"riak:ring"} - err := s.applicationApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + err := s.applicationAPI.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) c.Assert(err, gc.ErrorMatches, `cannot destroy relation "riak:ring": is a peer relation`) } @@ -2573,12 +2575,12 @@ c.Assert(err, jc.ErrorIsNil) endpoints := []string{"wordpress", "mysql"} - err = s.applicationApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + err = s.applicationAPI.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) // Show that the relation was removed. c.Assert(rel.Refresh(), jc.Satisfies, errors.IsNotFound) // And try to destroy it again. - err = s.applicationApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + err = s.applicationAPI.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) c.Assert(err, gc.ErrorMatches, `relation "wordpress:db mysql:server" not found`) } @@ -2587,7 +2589,7 @@ relation := s.setupDestroyRelationScenario(c, endpoints) // block remove-objects s.BlockRemoveObject(c, "TestBlockRemoveDestroyRelation") - err := s.applicationApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + err := s.applicationAPI.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) s.AssertBlocked(c, err, "TestBlockRemoveDestroyRelation") assertLife(c, relation, state.Alive) } @@ -2596,7 +2598,7 @@ endpoints := []string{"wordpress", "mysql"} relation := s.setupDestroyRelationScenario(c, endpoints) s.BlockAllChanges(c, "TestBlockChangeDestroyRelation") - err := s.applicationApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + err := s.applicationAPI.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) s.AssertBlocked(c, err, "TestBlockChangeDestroyRelation") assertLife(c, relation, state.Alive) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/application_unit_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/application_unit_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/application_unit_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/application_unit_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,197 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package application_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/apiserver/application" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/state" + coretesting "github.com/juju/juju/testing" +) + +type ApplicationSuite struct { + testing.IsolationSuite + backend mockBackend + application mockApplication + charm mockCharm + + blockChecker mockBlockChecker + authorizer apiservertesting.FakeAuthorizer + api *application.API +} + +var _ = gc.Suite(&ApplicationSuite{}) + +func (s *ApplicationSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: names.NewUserTag("admin"), + } + s.application = mockApplication{} + s.charm = mockCharm{ + config: &charm.Config{ + Options: map[string]charm.Option{ + "stringOption": {Type: "string"}, + "intOption": {Type: "int", Default: int(123)}, + }, + }, + } + s.backend = mockBackend{ + application: &s.application, + charm: &s.charm, + } + s.blockChecker = mockBlockChecker{} + api, err := application.NewAPI( + &s.backend, + s.authorizer, + &s.blockChecker, + func(application.Charm) *state.Charm { + return &state.Charm{} + }, + ) + c.Assert(err, jc.ErrorIsNil) + s.api = api +} + +func (s *ApplicationSuite) TestSetCharmStorageConstraints(c *gc.C) { + toUint64Ptr := func(v uint64) *uint64 { + return &v + } + err := s.api.SetCharm(params.ApplicationSetCharm{ + ApplicationName: "postgresql", + CharmURL: "cs:postgresql", + StorageConstraints: map[string]params.StorageConstraints{ + "a": {}, + "b": {Pool: "radiant"}, + "c": {Size: toUint64Ptr(123)}, + "d": {Count: toUint64Ptr(456)}, + }, + }) + c.Assert(err, jc.ErrorIsNil) + s.backend.CheckCallNames(c, "ModelTag", "Application", "Charm") + s.application.CheckCallNames(c, "SetCharm") + s.application.CheckCall(c, 0, "SetCharm", state.SetCharmConfig{ + Charm: &state.Charm{}, + StorageConstraints: map[string]state.StorageConstraints{ + "a": {}, + "b": {Pool: "radiant"}, + "c": {Size: 123}, + "d": {Count: 456}, + }, + }) +} + +func (s *ApplicationSuite) TestSetCharmConfigSettings(c *gc.C) { + err := s.api.SetCharm(params.ApplicationSetCharm{ + ApplicationName: "postgresql", + CharmURL: "cs:postgresql", + ConfigSettings: map[string]string{"stringOption": "value"}, + }) + c.Assert(err, jc.ErrorIsNil) + s.backend.CheckCallNames(c, "ModelTag", "Application", "Charm") + s.charm.CheckCallNames(c, "Config") + s.application.CheckCallNames(c, "SetCharm") + s.application.CheckCall(c, 0, "SetCharm", state.SetCharmConfig{ + Charm: &state.Charm{}, + ConfigSettings: charm.Settings{"stringOption": "value"}, + }) +} + +func (s *ApplicationSuite) TestSetCharmConfigSettingsYAML(c *gc.C) { + err := s.api.SetCharm(params.ApplicationSetCharm{ + ApplicationName: "postgresql", + CharmURL: "cs:postgresql", + ConfigSettingsYAML: ` +postgresql: + stringOption: value +`, + }) + c.Assert(err, jc.ErrorIsNil) + s.backend.CheckCallNames(c, "ModelTag", "Application", "Charm") + s.charm.CheckCallNames(c, "Config") + s.application.CheckCallNames(c, "SetCharm") + s.application.CheckCall(c, 0, "SetCharm", state.SetCharmConfig{ + Charm: &state.Charm{}, + ConfigSettings: charm.Settings{"stringOption": "value"}, + }) +} + +type mockBackend struct { + application.Backend + testing.Stub + application *mockApplication + charm *mockCharm +} + +func (b *mockBackend) ModelTag() names.ModelTag { + b.MethodCall(b, "ModelTag") + b.PopNoErr() + return coretesting.ModelTag +} + +func (b *mockBackend) Application(name string) (application.Application, error) { + b.MethodCall(b, "Application", name) + if err := b.NextErr(); err != nil { + return nil, err + } + if b.application != nil { + return b.application, nil + } + return nil, errors.NotFoundf("application %q", name) +} + +func (b *mockBackend) Charm(curl *charm.URL) (application.Charm, error) { + b.MethodCall(b, "Charm", curl) + if err := b.NextErr(); err != nil { + return nil, err + } + if b.charm != nil { + return b.charm, nil + } + return nil, errors.NotFoundf("charm %q", curl) +} + +type mockApplication struct { + application.Application + testing.Stub +} + +func (a *mockApplication) SetCharm(cfg state.SetCharmConfig) error { + a.MethodCall(a, "SetCharm", cfg) + return a.NextErr() +} + +type mockCharm struct { + application.Charm + testing.Stub + config *charm.Config +} + +func (c *mockCharm) Config() *charm.Config { + c.MethodCall(c, "Config") + c.PopNoErr() + return c.config +} + +type mockBlockChecker struct { + testing.Stub +} + +func (c *mockBlockChecker) ChangeAllowed() error { + c.MethodCall(c, "ChangeAllowed") + return c.NextErr() +} + +func (c *mockBlockChecker) RemoveAllowed() error { + c.MethodCall(c, "RemoveAllowed") + return c.NextErr() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/backend.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/backend.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/backend.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/backend.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,190 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package application + +import ( + "gopkg.in/juju/charm.v6-unstable" + csparams "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/constraints" + "github.com/juju/juju/instance" + "github.com/juju/juju/state" +) + +// Backend defines the state functionality required by the application +// facade. For details on the methods, see the methods on state.State +// with the same names. +type Backend interface { + Application(string) (Application, error) + AddApplication(state.AddApplicationArgs) (*state.Application, error) + AddRelation(...state.Endpoint) (Relation, error) + AssignUnit(*state.Unit, state.AssignmentPolicy) error + AssignUnitWithPlacement(*state.Unit, *instance.Placement) error + Charm(*charm.URL) (Charm, error) + EndpointsRelation(...state.Endpoint) (Relation, error) + InferEndpoints(...string) ([]state.Endpoint, error) + Machine(string) (Machine, error) + ModelTag() names.ModelTag + Unit(string) (Unit, error) +} + +// BlockChecker defines the block-checking functionality required by +// the application facade. This is implemented by +// apiserver/common.BlockChecker. +type BlockChecker interface { + ChangeAllowed() error + RemoveAllowed() error +} + +// Application defines a subset of the functionality provided by the +// state.Application type, as required by the application facade. For +// details on the methods, see the methods on state.Application with +// the same names. +type Application interface { + AddUnit() (*state.Unit, error) + Charm() (Charm, bool, error) + CharmURL() (*charm.URL, bool) + Channel() csparams.Channel + ClearExposed() error + ConfigSettings() (charm.Settings, error) + Constraints() (constraints.Value, error) + Destroy() error + Endpoints() ([]state.Endpoint, error) + IsPrincipal() bool + Series() string + SetCharm(state.SetCharmConfig) error + SetConstraints(constraints.Value) error + SetExposed() error + SetMetricCredentials([]byte) error + SetMinUnits(int) error + UpdateConfigSettings(charm.Settings) error +} + +// Charm defines a subset of the functionality provided by the +// state.Charm type, as required by the application facade. For +// details on the methods, see the methods on state.Charm with +// the same names. +type Charm interface { + charm.Charm +} + +// Machine defines a subset of the functionality provided by the +// state.Machine type, as required by the application facade. For +// details on the methods, see the methods on state.Machine with +// the same names. +type Machine interface { +} + +// Relation defines a subset of the functionality provided by the +// state.Relation type, as required by the application facade. For +// details on the methods, see the methods on state.Relation with +// the same names. +type Relation interface { + Destroy() error + Endpoint(string) (state.Endpoint, error) +} + +// Unit defines a subset of the functionality provided by the +// state.Unit type, as required by the application facade. For +// details on the methods, see the methods on state.Unit with +// the same names. +type Unit interface { + Destroy() error + IsPrincipal() bool + Life() state.Life +} + +type stateShim struct { + *state.State +} + +// NewStateBackend converts a state.State into a Backend. +func NewStateBackend(st *state.State) Backend { + return stateShim{st} +} + +// CharmToStateCharm converts a Charm into a state.Charm. This is +// a hack that is required until the State interface methods we +// deal with stop accepting state.Charms, and start accepting +// charm.Charm and charm.URL. +func CharmToStateCharm(ch Charm) *state.Charm { + return ch.(stateCharmShim).Charm +} + +func (s stateShim) Application(name string) (Application, error) { + a, err := s.State.Application(name) + if err != nil { + return nil, err + } + return stateApplicationShim{a}, nil +} + +func (s stateShim) AddRelation(eps ...state.Endpoint) (Relation, error) { + r, err := s.State.AddRelation(eps...) + if err != nil { + return nil, err + } + return stateRelationShim{r}, nil +} + +func (s stateShim) Charm(curl *charm.URL) (Charm, error) { + ch, err := s.State.Charm(curl) + if err != nil { + return nil, err + } + return stateCharmShim{ch}, nil +} + +func (s stateShim) EndpointsRelation(eps ...state.Endpoint) (Relation, error) { + r, err := s.State.EndpointsRelation(eps...) + if err != nil { + return nil, err + } + return stateRelationShim{r}, nil +} + +func (s stateShim) Machine(name string) (Machine, error) { + m, err := s.State.Machine(name) + if err != nil { + return nil, err + } + return stateMachineShim{m}, nil +} + +func (s stateShim) Unit(name string) (Unit, error) { + u, err := s.State.Unit(name) + if err != nil { + return nil, err + } + return stateUnitShim{u}, nil +} + +type stateApplicationShim struct { + *state.Application +} + +func (a stateApplicationShim) Charm() (Charm, bool, error) { + ch, force, err := a.Application.Charm() + if err != nil { + return nil, false, err + } + return ch, force, nil +} + +type stateCharmShim struct { + *state.Charm +} + +type stateMachineShim struct { + *state.Machine +} + +type stateRelationShim struct { + *state.Relation +} + +type stateUnitShim struct { + *state.Unit +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/get.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/get.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,7 +12,10 @@ // Get returns the configuration for a service. func (api *API) Get(args params.ApplicationGet) (params.ApplicationGetResults, error) { - app, err := api.state.Application(args.ApplicationName) + if err := api.checkCanRead(); err != nil { + return params.ApplicationGetResults{}, err + } + app, err := api.backend.Application(args.ApplicationName) if err != nil { return params.ApplicationGetResults{}, err } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/get_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/get_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/application/get_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/application/get_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,6 +12,7 @@ apiapplication "github.com/juju/juju/api/application" "github.com/juju/juju/apiserver/application" + "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" "github.com/juju/juju/constraints" @@ -21,7 +22,7 @@ type getSuite struct { jujutesting.JujuConnSuite - serviceApi *application.API + serviceAPI *application.API authorizer apiservertesting.FakeAuthorizer } @@ -34,13 +35,18 @@ Tag: s.AdminUserTag(c), } var err error - s.serviceApi, err = application.NewAPI(s.State, nil, s.authorizer) + backend := application.NewStateBackend(s.State) + blockChecker := common.NewBlockChecker(s.State) + s.serviceAPI, err = application.NewAPI( + backend, s.authorizer, blockChecker, + application.CharmToStateCharm, + ) c.Assert(err, jc.ErrorIsNil) } func (s *getSuite) TestClientServiceGetSmoketest(c *gc.C) { s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - results, err := s.serviceApi.Get(params.ApplicationGet{"wordpress"}) + results, err := s.serviceAPI.Get(params.ApplicationGet{"wordpress"}) c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.DeepEquals, params.ApplicationGetResults{ Application: "wordpress", @@ -58,7 +64,7 @@ } func (s *getSuite) TestServiceGetUnknownService(c *gc.C) { - _, err := s.serviceApi.Get(params.ApplicationGet{"unknown"}) + _, err := s.serviceAPI.Get(params.ApplicationGet{"unknown"}) c.Assert(err, gc.ErrorMatches, `application "unknown" not found`) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authcontext.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/authcontext.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authcontext.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/authcontext.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,7 @@ import ( "net/http" + "net/url" "sync" "time" @@ -12,7 +13,9 @@ "github.com/juju/utils/clock" "gopkg.in/juju/names.v2" "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" "github.com/juju/juju/apiserver/authentication" "github.com/juju/juju/apiserver/common" @@ -21,13 +24,31 @@ "github.com/juju/juju/state/bakerystorage" ) +const ( + localUserIdentityLocationPath = "/auth" +) + // authContext holds authentication context shared // between all API endpoints. type authContext struct { st *state.State + clock clock.Clock agentAuth authentication.AgentAuthenticator - userAuth authentication.UserAuthenticator + + // localUserBakeryService is the bakery.Service used by the controller + // for authenticating local users. In time, we may want to use this for + // both local and external users. Note that this service does not + // discharge the third-party caveats. + localUserBakeryService *expirableStorageBakeryService + + // localUserThirdPartyBakeryService is the bakery.Service used by the + // controller for discharging third-party caveats for local users. + localUserThirdPartyBakeryService *bakery.Service + + // localUserInteractions maintains a set of in-progress local user + // authentication interactions. + localUserInteractions *authentication.Interactions // macaroonAuthOnce guards the fields below it. macaroonAuthOnce sync.Once @@ -37,29 +58,97 @@ // newAuthContext creates a new authentication context for st. func newAuthContext(st *state.State) (*authContext, error) { - ctxt := &authContext{st: st} + ctxt := &authContext{ + st: st, + // TODO(fwereade) 2016-07-21 there should be a clock parameter + clock: clock.WallClock, + localUserInteractions: authentication.NewInteractions(), + } + + // Create a bakery service for discharging third-party caveats for + // local user authentication. This service does not persist keys; + // its macaroons should be very short-lived. + localUserThirdPartyBakeryService, _, err := newBakeryService(st, nil, nil) + if err != nil { + return nil, errors.Trace(err) + } + ctxt.localUserThirdPartyBakeryService = localUserThirdPartyBakeryService + + // Create a bakery service for local user authentication. This service + // persists keys into MongoDB in a TTL collection. store, err := st.NewBakeryStorage() if err != nil { return nil, errors.Trace(err) } - // We use a non-nil, but empty key, because we don't use the - // key, and don't want to incur the overhead of generating one - // each time we create a service. - bakeryService, key, err := newBakeryService(st, store, nil) + locator := bakeryServicePublicKeyLocator{ctxt.localUserThirdPartyBakeryService} + localUserBakeryService, localUserBakeryServiceKey, err := newBakeryService( + st, store, locator, + ) if err != nil { return nil, errors.Trace(err) } - ctxt.userAuth.Service = &expirableStorageBakeryService{bakeryService, key, store, nil} - // TODO(fwereade) 2016-07-21 there should be a clock parameter - ctxt.userAuth.Clock = clock.WallClock + ctxt.localUserBakeryService = &expirableStorageBakeryService{ + localUserBakeryService, localUserBakeryServiceKey, store, locator, + } return ctxt, nil } +type bakeryServicePublicKeyLocator struct { + service *bakery.Service +} + +// PublicKeyForLocation implements bakery.PublicKeyLocator. +func (b bakeryServicePublicKeyLocator) PublicKeyForLocation(string) (*bakery.PublicKey, error) { + return b.service.PublicKey(), nil +} + +// CreateLocalLoginMacaroon creates a macaroon that may be provided to a user +// as proof that they have logged in with a valid username and password. This +// macaroon may then be used to obtain a discharge macaroon so that the user +// can log in without presenting their password for a set amount of time. +func (ctxt *authContext) CreateLocalLoginMacaroon(tag names.UserTag) (*macaroon.Macaroon, error) { + return authentication.CreateLocalLoginMacaroon(tag, ctxt.localUserThirdPartyBakeryService, ctxt.clock) +} + +// CheckLocalLoginCaveat parses and checks that the given caveat string is +// valid for a local login request, and returns the tag of the local user +// that the caveat asserts is logged in. checkers.ErrCaveatNotRecognized will +// be returned if the caveat is not recognised. +func (ctxt *authContext) CheckLocalLoginCaveat(caveat string) (names.UserTag, error) { + return authentication.CheckLocalLoginCaveat(caveat) +} + +// CheckLocalLoginRequest checks that the given HTTP request contains at least +// one valid local login macaroon minted using CreateLocalLoginMacaroon. It +// returns an error with a *bakery.VerificationError cause if the macaroon +// verification failed. If the macaroon is valid, CheckLocalLoginRequest returns +// a list of caveats to add to the discharge macaroon. +func (ctxt *authContext) CheckLocalLoginRequest(req *http.Request, tag names.UserTag) ([]checkers.Caveat, error) { + return authentication.CheckLocalLoginRequest(ctxt.localUserThirdPartyBakeryService, req, tag, ctxt.clock) +} + +// authenticator returns an authenticator.EntityAuthenticator for the API +// connection associated with the specified API server host. +func (ctxt *authContext) authenticator(serverHost string) authenticator { + return authenticator{ctxt: ctxt, serverHost: serverHost} +} + +// authenticator implements authenticator.EntityAuthenticator, delegating +// to the appropriate authenticator based on the tag kind. +type authenticator struct { + ctxt *authContext + serverHost string +} + // Authenticate implements authentication.EntityAuthenticator // by choosing the right kind of authentication for the given // tag. -func (ctxt *authContext) Authenticate(entityFinder authentication.EntityFinder, tag names.Tag, req params.LoginRequest) (state.Entity, error) { - auth, err := ctxt.authenticatorForTag(tag) +func (a authenticator) Authenticate( + entityFinder authentication.EntityFinder, + tag names.Tag, + req params.LoginRequest, +) (state.Entity, error) { + auth, err := a.authenticatorForTag(tag) if err != nil { return nil, errors.Trace(err) } @@ -68,12 +157,11 @@ // authenticatorForTag returns the authenticator appropriate // to use for a login with the given possibly-nil tag. -func (ctxt *authContext) authenticatorForTag(tag names.Tag) (authentication.EntityAuthenticator, error) { +func (a authenticator) authenticatorForTag(tag names.Tag) (authentication.EntityAuthenticator, error) { if tag == nil { - auth, err := ctxt.macaroonAuth() + auth, err := a.ctxt.externalMacaroonAuth() if errors.Cause(err) == errMacaroonAuthNotConfigured { - // Make a friendlier error message. - err = errors.New("no credentials provided") + err = errors.Trace(common.ErrNoCreds) } if err != nil { return nil, errors.Trace(err) @@ -82,17 +170,32 @@ } switch tag.Kind() { case names.UnitTagKind, names.MachineTagKind: - return &ctxt.agentAuth, nil + return &a.ctxt.agentAuth, nil case names.UserTagKind: - return &ctxt.userAuth, nil + return a.localUserAuth(), nil default: return nil, errors.Annotatef(common.ErrBadRequest, "unexpected login entity tag") } } -// macaroonAuth returns an authenticator that can authenticate macaroon-based -// logins. If it fails once, it will always fail. -func (ctxt *authContext) macaroonAuth() (authentication.EntityAuthenticator, error) { +// localUserAuth returns an authenticator that can authenticate logins for +// local users with either passwords or macaroons. +func (a authenticator) localUserAuth() *authentication.UserAuthenticator { + localUserIdentityLocation := url.URL{ + Scheme: "https", + Host: a.serverHost, + Path: localUserIdentityLocationPath, + } + return &authentication.UserAuthenticator{ + Service: a.ctxt.localUserBakeryService, + Clock: a.ctxt.clock, + LocalUserIdentityLocation: localUserIdentityLocation.String(), + } +} + +// externalMacaroonAuth returns an authenticator that can authenticate macaroon-based +// logins for external users. If it fails once, it will always fail. +func (ctxt *authContext) externalMacaroonAuth() (authentication.EntityAuthenticator, error) { ctxt.macaroonAuthOnce.Do(func() { ctxt._macaroonAuth, ctxt._macaroonAuthError = newExternalMacaroonAuth(ctxt.st) }) @@ -106,7 +209,7 @@ // newExternalMacaroonAuth returns an authenticator that can authenticate // macaroon-based logins for external users. This is just a helper function -// for authCtxt.macaroonAuth. +// for authCtxt.externalMacaroonAuth. func newExternalMacaroonAuth(st *state.State) (*authentication.ExternalMacaroonAuthenticator, error) { controllerCfg, err := st.ControllerConfig() if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authentication/agent.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/authentication/agent.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authentication/agent.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/authentication/agent.go 2016-10-13 14:31:49.000000000 +0000 @@ -43,6 +43,12 @@ // If this is a machine agent connecting, we need to check the // nonce matches, otherwise the wrong agent might be trying to // connect. + // + // NOTE(axw) with the current implementation of Login, it is + // important that we check the password before checking the + // nonce, or an unprovisioned machine in a hosted model will + // prevent a controller machine from logging into the hosted + // model. if machine, ok := authenticator.(*state.Machine); ok { if !machine.CheckProvisioned(req.Nonce) { return nil, errors.NotProvisionedf("machine %v", machine.Id()) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authentication/interactions.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/authentication/interactions.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authentication/interactions.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/authentication/interactions.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,136 @@ +// Copyright 2016 Canonical Ltd. All rights reserved. +// Licensed under the AGPLv3, see LICENCE file for details. + +package authentication + +import ( + "crypto/rand" + "fmt" + "sync" + "time" + + "github.com/juju/errors" + "gopkg.in/juju/names.v2" +) + +// ErrWaitCanceled is returned by Interactions.Wait when the cancel +// channel is signalled. +var ErrWaitCanceled = errors.New("wait canceled") + +// ErrExpired is returned by Interactions.Wait when interactions expire +// before they are done. +var ErrExpired = errors.New("interaction timed out") + +// Interactions maintains a set of Interactions. +type Interactions struct { + mu sync.Mutex + items map[string]*item +} + +type item struct { + c chan Interaction + caveatId string + expiry time.Time + done bool +} + +// Interaction records details of an in-progress interactive +// macaroon-based login. +type Interaction struct { + CaveatId string + LoginUser names.UserTag + LoginError error +} + +// NewInteractions returns a new Interactions. +func NewInteractions() *Interactions { + return &Interactions{ + items: make(map[string]*item), + } +} + +func newId() (string, error) { + var id [12]byte + if _, err := rand.Read(id[:]); err != nil { + return "", fmt.Errorf("cannot read random id: %v", err) + } + return fmt.Sprintf("%x", id[:]), nil +} + +// Start records the start of an interactive login, and returns a random ID +// that uniquely identifies it. A call to Wait with the same ID will return +// the Interaction once it is done. +func (m *Interactions) Start(caveatId string, expiry time.Time) (string, error) { + id, err := newId() + if err != nil { + return "", err + } + m.mu.Lock() + defer m.mu.Unlock() + m.items[id] = &item{ + c: make(chan Interaction, 1), + caveatId: caveatId, + expiry: expiry, + } + return id, nil +} + +// Done signals that the user has either logged in, or attempted to and failed. +func (m *Interactions) Done(id string, loginUser names.UserTag, loginError error) error { + m.mu.Lock() + defer m.mu.Unlock() + item := m.items[id] + + if item == nil { + return errors.NotFoundf("interaction %q", id) + } + if item.done { + return errors.Errorf("interaction %q already done", id) + } + item.done = true + item.c <- Interaction{ + CaveatId: item.caveatId, + LoginUser: loginUser, + LoginError: loginError, + } + return nil +} + +// Wait waits until the identified interaction is done, and returns the +// corresponding Interaction. If the cancel channel is signalled before +// the interaction is done, then ErrWaitCanceled is returned. If the +// interaction expires before it is done, ErrExpired is returned. +func (m *Interactions) Wait(id string, cancel <-chan struct{}) (*Interaction, error) { + m.mu.Lock() + item := m.items[id] + m.mu.Unlock() + if item == nil { + return nil, errors.NotFoundf("interaction %q", id) + } + select { + case <-cancel: + return nil, ErrWaitCanceled + case interaction, ok := <-item.c: + if !ok { + return nil, ErrExpired + } + m.mu.Lock() + delete(m.items, id) + m.mu.Unlock() + return &interaction, nil + } +} + +// Expire removes any interactions that were due to expire by the +// specified time. +func (m *Interactions) Expire(t time.Time) { + m.mu.Lock() + defer m.mu.Unlock() + for id, item := range m.items { + if item.done || item.expiry.After(t) { + continue + } + delete(m.items, id) + close(item.c) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authentication/interactions_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/authentication/interactions_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authentication/interactions_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/authentication/interactions_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,164 @@ +// Copyright 2016 Canonical Ltd. All rights reserved. +// Licensed under the AGPLv3, see LICENCE file for details. + +package authentication_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/apiserver/authentication" + coretesting "github.com/juju/juju/testing" +) + +type InteractionsSuite struct { + testing.IsolationSuite + interactions *authentication.Interactions +} + +var _ = gc.Suite(&InteractionsSuite{}) + +func (s *InteractionsSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.interactions = authentication.NewInteractions() +} + +func (s *InteractionsSuite) TestStart(c *gc.C) { + waitId, err := s.interactions.Start("caveat-id", time.Time{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(waitId, gc.Not(gc.Equals), "") +} + +func (s *InteractionsSuite) TestDone(c *gc.C) { + waitId := s.start(c, "caveat-id") + err := s.interactions.Done(waitId, names.NewUserTag("admin@local"), nil) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *InteractionsSuite) TestDoneNotFound(c *gc.C) { + err := s.interactions.Done("not-found", names.NewUserTag("admin@local"), nil) + c.Assert(err, jc.Satisfies, errors.IsNotFound) + c.Assert(err, gc.ErrorMatches, `interaction "not-found" not found`) +} + +func (s *InteractionsSuite) TestDoneTwice(c *gc.C) { + waitId := s.start(c, "caveat-id") + err := s.interactions.Done(waitId, names.NewUserTag("admin@local"), nil) + c.Assert(err, jc.ErrorIsNil) + err = s.interactions.Done(waitId, names.NewUserTag("admin@local"), nil) + c.Assert(err, gc.ErrorMatches, `interaction ".*" already done`) +} + +func (s *InteractionsSuite) TestWait(c *gc.C) { + waitId := s.start(c, "caveat-id") + loginUser := names.NewUserTag("admin@local") + loginError := errors.New("login failed") + s.done(c, waitId, loginUser, loginError) + interaction, err := s.interactions.Wait(waitId, nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(interaction, gc.NotNil) + c.Assert(interaction, jc.DeepEquals, &authentication.Interaction{ + CaveatId: "caveat-id", + LoginUser: loginUser, + LoginError: loginError, + }) +} + +func (s *InteractionsSuite) TestWaitNotFound(c *gc.C) { + interaction, err := s.interactions.Wait("not-found", nil) + c.Assert(err, gc.ErrorMatches, `interaction "not-found" not found`) + c.Assert(interaction, gc.IsNil) +} + +func (s *InteractionsSuite) TestWaitTwice(c *gc.C) { + waitId := s.start(c, "caveat-id") + s.done(c, waitId, names.NewUserTag("admin@local"), nil) + + _, err := s.interactions.Wait(waitId, nil) + c.Assert(err, jc.ErrorIsNil) + + // The Wait call above should have removed the item. + _, err = s.interactions.Wait(waitId, nil) + c.Assert(err, gc.ErrorMatches, `interaction ".*" not found`) +} + +func (s *InteractionsSuite) TestWaitCancellation(c *gc.C) { + waitId := s.start(c, "caveat-id") + + cancel := make(chan struct{}) + waitResult := make(chan error) + go func() { + _, err := s.interactions.Wait(waitId, cancel) + waitResult <- err + }() + + // Wait should not pass until we've cancelled. + select { + case err := <-waitResult: + c.Fatalf("unexpected result: %v", err) + case <-time.After(coretesting.ShortWait): + } + + cancel <- struct{}{} + select { + case err := <-waitResult: + c.Assert(err, gc.Equals, authentication.ErrWaitCanceled) + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out waiting for Wait to return") + } +} + +func (s *InteractionsSuite) TestWaitExpired(c *gc.C) { + t0 := time.Now() + t1 := t0.Add(time.Second) + t2 := t1.Add(time.Second) + + waitId, err := s.interactions.Start("caveat-id", t2) + c.Assert(err, jc.ErrorIsNil) + + type waitResult struct { + interaction *authentication.Interaction + err error + } + waitResultC := make(chan waitResult) + go func() { + interaction, err := s.interactions.Wait(waitId, nil) + waitResultC <- waitResult{interaction, err} + }() + + // This should do nothing, because there's nothing + // due to expire until t2. + s.interactions.Expire(t1) + + // Wait should not pass until the interaction expires. + select { + case result := <-waitResultC: + c.Fatalf("unexpected result: %v", result) + case <-time.After(coretesting.ShortWait): + } + + s.interactions.Expire(t2) + select { + case result := <-waitResultC: + c.Assert(result.err, gc.Equals, authentication.ErrExpired) + c.Assert(result.interaction, gc.IsNil) + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out waiting for Wait to return") + } +} + +func (s *InteractionsSuite) start(c *gc.C, caveatId string) string { + waitId, err := s.interactions.Start(caveatId, time.Time{}) + c.Assert(err, jc.ErrorIsNil) + return waitId +} + +func (s *InteractionsSuite) done(c *gc.C, waitId string, loginUser names.UserTag, loginError error) { + err := s.interactions.Done(waitId, loginUser, loginError) + c.Assert(err, jc.ErrorIsNil) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authentication/user.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/authentication/user.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authentication/user.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/authentication/user.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package authentication import ( + "net/http" "time" "github.com/juju/errors" @@ -12,6 +13,7 @@ "gopkg.in/juju/names.v2" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "github.com/juju/juju/apiserver/common" @@ -30,11 +32,21 @@ // Clock is used to calculate the expiry time for macaroons. Clock clock.Clock + + // LocalUserIdentityLocation holds the URL of the trusted third party + // that is used to address the is-authenticated-user third party caveat + // to for local users. This always points at the same controller + // agent that is servicing the authorisation request. + LocalUserIdentityLocation string } const ( usernameKey = "username" + // LocalLoginInteractionTimeout is how long a user has to complete + // an interactive login before it is expired. + LocalLoginInteractionTimeout = 2 * time.Minute + // TODO(axw) make this configurable via model config. localLoginExpiryTime = 24 * time.Hour @@ -65,57 +77,119 @@ return u.AgentAuthenticator.Authenticate(entityFinder, tag, req) } -// CreateLocalLoginMacaroon creates a time-limited macaroon for a local user -// to log into the controller with. The macaroon will be valid for use with -// UserAuthenticator.Authenticate until the time limit expires, or the Juju -// controller agent restarts. -// -// NOTE(axw) this method will generate a key for a previously unseen user, -// and store it in the bakery.Service's storage. Callers should first ensure -// the user is valid before calling this, to avoid filling storage with keys -// for invalid users. -func (u *UserAuthenticator) CreateLocalLoginMacaroon(tag names.UserTag) (*macaroon.Macaroon, error) { - - // Ensure that the private key that we generate and store will be - // removed from storage once the expiry time has elapsed. - expiryTime := u.Clock.Now().Add(localLoginExpiryTime) - bakeryService, err := u.Service.ExpireStorageAt(expiryTime) - if err != nil { - return nil, errors.Trace(err) - } - +// CreateLocalLoginMacaroon creates a macaroon that may be provided to a +// user as proof that they have logged in with a valid username and password. +// This macaroon may then be used to obtain a discharge macaroon so that +// the user can log in without presenting their password for a set amount +// of time. +func CreateLocalLoginMacaroon( + tag names.UserTag, + service BakeryService, + clock clock.Clock, +) (*macaroon.Macaroon, error) { // We create the macaroon with a random ID and random root key, which // enables multiple clients to login as the same user and obtain separate // macaroons without having them use the same root key. - m, err := bakeryService.NewMacaroon("", nil, []checkers.Caveat{ - // The macaroon may only be used to log in as the user - // specified by the tag passed to CreateLocalUserMacaroon. - checkers.DeclaredCaveat(usernameKey, tag.Canonical()), + return service.NewMacaroon("", nil, []checkers.Caveat{ + {Condition: "is-authenticated-user " + tag.Id()}, + checkers.TimeBeforeCaveat(clock.Now().Add(LocalLoginInteractionTimeout)), }) +} + +// CheckLocalLoginCaveat parses and checks that the given caveat string is +// valid for a local login request, and returns the tag of the local user +// that the caveat asserts is logged in. checkers.ErrCaveatNotRecognized will +// be returned if the caveat is not recognised. +func CheckLocalLoginCaveat(caveat string) (names.UserTag, error) { + var tag names.UserTag + op, rest, err := checkers.ParseCaveat(caveat) if err != nil { - return nil, errors.Annotate(err, "cannot create macaroon") + return tag, errors.Annotatef(err, "cannot parse caveat %q", caveat) + } + if op != "is-authenticated-user" { + return tag, checkers.ErrCaveatNotRecognized + } + if !names.IsValidUser(rest) { + return tag, errors.NotValidf("username %q", rest) + } + tag = names.NewUserTag(rest) + if !tag.IsLocal() { + tag = names.UserTag{} + return tag, errors.NotValidf("non-local username %q", rest) } - if err := addMacaroonTimeBeforeCaveat(bakeryService, m, expiryTime); err != nil { + return tag, nil +} + +// CheckLocalLoginRequest checks that the given HTTP request contains at least +// one valid local login macaroon minted by the given service using +// CreateLocalLoginMacaroon. It returns an error with a +// *bakery.VerificationError cause if the macaroon verification failed. If the +// macaroon is valid, CheckLocalLoginRequest returns a list of caveats to add +// to the discharge macaroon. +func CheckLocalLoginRequest( + service *bakery.Service, + req *http.Request, + tag names.UserTag, + clock clock.Clock, +) ([]checkers.Caveat, error) { + _, err := httpbakery.CheckRequest(service, req, nil, checkers.CheckerFunc{ + // Having a macaroon with an is-authenticated-user + // caveat is proof that the user is "logged in". + "is-authenticated-user", + func(cond, arg string) error { return nil }, + }) + if err != nil { return nil, errors.Trace(err) } - return m, nil + firstPartyCaveats := []checkers.Caveat{ + checkers.DeclaredCaveat("username", tag.Id()), + checkers.TimeBeforeCaveat(clock.Now().Add(localLoginExpiryTime)), + } + return firstPartyCaveats, nil } func (u *UserAuthenticator) authenticateMacaroons( entityFinder EntityFinder, tag names.UserTag, req params.LoginRequest, ) (state.Entity, error) { // Check for a valid request macaroon. - assert := map[string]string{usernameKey: tag.Canonical()} + assert := map[string]string{usernameKey: tag.Id()} _, err := u.Service.CheckAny(req.Macaroons, assert, checkers.New(checkers.TimeBefore)) if err != nil { - logger.Debugf("local-login macaroon authentication failed: %v", err) - if allMacaroonsExpired(u.Clock.Now(), req.Macaroons) { - return nil, common.ErrLoginExpired + cause := err + logger.Debugf("local-login macaroon authentication failed: %v", cause) + if _, ok := errors.Cause(err).(*bakery.VerificationError); !ok { + return nil, errors.Trace(err) + } + + // The root keys for these macaroons are stored in MongoDB. + // Expire the documents after after a set amount of time. + expiryTime := u.Clock.Now().Add(localLoginExpiryTime) + service, err := u.Service.ExpireStorageAt(expiryTime) + if err != nil { + return nil, errors.Trace(err) + } + + m, err := service.NewMacaroon("", nil, []checkers.Caveat{ + checkers.NeedDeclaredCaveat( + checkers.Caveat{ + Location: u.LocalUserIdentityLocation, + Condition: "is-authenticated-user " + tag.Id(), + }, + usernameKey, + ), + checkers.TimeBeforeCaveat(expiryTime), + }) + if err != nil { + return nil, errors.Annotate(err, "cannot create macaroon") + } + return nil, &common.DischargeRequiredError{ + Cause: cause, + Macaroon: m, } - return nil, errors.Trace(common.ErrBadCreds) } entity, err := entityFinder.FindEntity(tag) if errors.IsNotFound(err) { + logger.Debugf("entity %s not found", tag.String()) return nil, errors.Trace(common.ErrBadCreds) } else if err != nil { return nil, errors.Trace(err) @@ -123,41 +197,6 @@ return entity, nil } -// allMacaroonsExpired reports whether or not all of the macaroon -// slices' primary macaroons have expired. -func allMacaroonsExpired(now time.Time, ms []macaroon.Slice) bool { - for _, ms := range ms { - if len(ms) == 0 { - continue - } - m := ms[0] - var expired bool - for _, c := range m.Caveats() { - if c.Location != "" { - continue - } - cond, arg, err := checkers.ParseCaveat(c.Id) - if err != nil { - continue - } - if cond != checkers.CondTimeBefore { - continue - } - t, err := time.Parse(time.RFC3339Nano, arg) - if err != nil { - return false - } - if !now.Before(t) { - expired = true - } - } - if !expired { - return false - } - } - return true -} - // ExternalMacaroonAuthenticator performs authentication for external users using // macaroons. If the authentication fails because provided macaroons are invalid, // and macaroon authentiction is enabled, it will return a *common.DischargeRequiredError diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authentication/user_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/authentication/user_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authentication/user_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/authentication/user_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -25,7 +25,6 @@ "github.com/juju/juju/apiserver/params" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" ) @@ -140,7 +139,6 @@ Nonce: "", }) c.Assert(err, gc.ErrorMatches, "invalid request") - } func (s *userAuthenticatorSuite) TestValidMacaroonUserLogin(c *gc.C) { @@ -163,62 +161,56 @@ call := service.Calls()[0] c.Assert(call.Args, gc.HasLen, 3) c.Assert(call.Args[0], jc.DeepEquals, macaroons) - c.Assert(call.Args[1], jc.DeepEquals, map[string]string{"username": "bobbrown@local"}) + c.Assert(call.Args[1], jc.DeepEquals, map[string]string{"username": "bobbrown"}) // no check for checker function, can't compare functions } -func (s *userAuthenticatorSuite) TestMacaroonUserLoginExpired(c *gc.C) { - user := s.Factory.MakeUser(c, &factory.UserParams{ - Name: "bobbrown", - }) - clock := coretesting.NewClock(time.Now()) - - m := &macaroon.Macaroon{} - err := m.AddFirstPartyCaveat( - checkers.TimeBeforeCaveat(clock.Now().Add(-time.Second)).Condition, +func (s *userAuthenticatorSuite) TestCreateLocalLoginMacaroon(c *gc.C) { + service := mockBakeryService{} + clock := testing.NewClock(time.Time{}) + _, err := authentication.CreateLocalLoginMacaroon( + names.NewUserTag("bobbrown"), &service, clock, ) c.Assert(err, jc.ErrorIsNil) - - macaroons := []macaroon.Slice{{m}} - service := mockBakeryService{} - service.SetErrors(errors.New("auth failed")) - - // User login - authenticator := &authentication.UserAuthenticator{ - Service: &service, - Clock: clock, - } - _, err = authenticator.Authenticate(s.State, user.Tag(), params.LoginRequest{ - Credentials: "", - Nonce: "", - Macaroons: macaroons, + service.CheckCallNames(c, "NewMacaroon") + service.CheckCall(c, 0, "NewMacaroon", "", []byte(nil), []checkers.Caveat{ + {Condition: "is-authenticated-user bobbrown"}, + {Condition: "time-before 0001-01-01T00:02:00Z"}, }) - c.Assert(err, gc.Equals, common.ErrLoginExpired) } -func (s *userAuthenticatorSuite) TestCreateLocalLoginMacaroon(c *gc.C) { +func (s *userAuthenticatorSuite) TestAuthenticateLocalLoginMacaroon(c *gc.C) { service := mockBakeryService{} - clock := coretesting.NewClock(time.Time{}) + clock := testing.NewClock(time.Time{}) authenticator := &authentication.UserAuthenticator{ Service: &service, Clock: clock, + LocalUserIdentityLocation: "https://testing.invalid:1234/auth", } - _, err := authenticator.CreateLocalLoginMacaroon(names.NewUserTag("bobbrown")) - c.Assert(err, jc.ErrorIsNil) + service.SetErrors(&bakery.VerificationError{}) + _, err := authenticator.Authenticate( + authentication.EntityFinder(nil), + names.NewUserTag("bobbrown"), + params.LoginRequest{}, + ) + c.Assert(err, gc.FitsTypeOf, &common.DischargeRequiredError{}) - service.CheckCallNames(c, "ExpireStorageAt", "NewMacaroon", "AddCaveat") + service.CheckCallNames(c, "CheckAny", "ExpireStorageAt", "NewMacaroon") calls := service.Calls() - c.Assert(calls[0].Args, jc.DeepEquals, []interface{}{clock.Now().Add(24 * time.Hour)}) - c.Assert(calls[1].Args, jc.DeepEquals, []interface{}{ + c.Assert(calls[1].Args, jc.DeepEquals, []interface{}{clock.Now().Add(24 * time.Hour)}) + c.Assert(calls[2].Args, jc.DeepEquals, []interface{}{ "", []byte(nil), []checkers.Caveat{ - checkers.DeclaredCaveat("username", "bobbrown@local"), + checkers.NeedDeclaredCaveat( + checkers.Caveat{ + Location: "https://testing.invalid:1234/auth", + Condition: "is-authenticated-user bobbrown", + }, + "username", + ), + {Condition: "time-before 0001-01-02T00:00:00Z"}, }, }) - c.Assert(calls[2].Args, jc.DeepEquals, []interface{}{ - &macaroon.Macaroon{}, - checkers.TimeBeforeCaveat(clock.Now().Add(24 * time.Hour)), - }) } type mockBakeryService struct { @@ -362,7 +354,7 @@ if utag, ok := tag.(names.UserTag); ok { // It's a user tag which we need to be in canonical form // so we can look it up unambiguously. - tag = names.NewUserTag(utag.Canonical()) + tag = names.NewUserTag(utag.Id()) } if f[tag.String()] { return &simpleEntity{tag}, nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authenticator_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/authenticator_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authenticator_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/authenticator_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -32,8 +32,9 @@ func (s *agentAuthenticatorSuite) TestAuthenticatorForTag(c *gc.C) { fact := factory.NewFactory(s.State) user := fact.MakeUser(c, &factory.UserParams{Password: "password"}) - srv := newServer(c, s.State) - defer srv.Stop() + _, srv := newServer(c, s.State) + defer assertStop(c, srv) + authenticator, err := apiserver.ServerAuthenticatorForTag(srv, user.Tag()) c.Assert(err, jc.ErrorIsNil) c.Assert(authenticator, gc.NotNil) @@ -48,7 +49,7 @@ } func (s *agentAuthenticatorSuite) TestMachineGetsAgentAuthenticator(c *gc.C) { - srv := newServer(c, s.State) + _, srv := newServer(c, s.State) defer srv.Stop() authenticator, err := apiserver.ServerAuthenticatorForTag(srv, names.NewMachineTag("0")) c.Assert(err, jc.ErrorIsNil) @@ -57,7 +58,7 @@ } func (s *agentAuthenticatorSuite) TestUnitGetsAgentAuthenticator(c *gc.C) { - srv := newServer(c, s.State) + _, srv := newServer(c, s.State) defer srv.Stop() authenticator, err := apiserver.ServerAuthenticatorForTag(srv, names.NewUnitTag("wordpress/0")) c.Assert(err, jc.ErrorIsNil) @@ -66,7 +67,7 @@ } func (s *agentAuthenticatorSuite) TestNotSupportedTag(c *gc.C) { - srv := newServer(c, s.State) + _, srv := newServer(c, s.State) defer srv.Stop() authenticator, err := apiserver.ServerAuthenticatorForTag(srv, names.NewApplicationTag("not-support")) c.Assert(err, gc.ErrorMatches, "unexpected login entity tag: invalid request") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authhttp_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/authhttp_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/authhttp_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/authhttp_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,14 +24,14 @@ apitesting "github.com/juju/juju/api/testing" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" ) -// authHttpSuite provides helpers for testing HTTP "streaming" style APIs. -type authHttpSuite struct { +// authHTTPSuite provides helpers for testing HTTP "streaming" style APIs. +type authHTTPSuite struct { // macaroonAuthEnabled may be set by a test suite // before SetUpTest is called. If it is true, macaroon // authentication will be enabled for the duration @@ -54,7 +54,7 @@ password string } -func (s *authHttpSuite) SetUpTest(c *gc.C) { +func (s *authHTTPSuite) SetUpTest(c *gc.C) { if s.macaroonAuthEnabled { s.MacaroonSuite.SetUpTest(c) } else { @@ -77,7 +77,7 @@ } } -func (s *authHttpSuite) TearDownTest(c *gc.C) { +func (s *authHTTPSuite) TearDownTest(c *gc.C) { if s.macaroonAuthEnabled { s.MacaroonSuite.TearDownTest(c) } else { @@ -85,7 +85,7 @@ } } -func (s *authHttpSuite) baseURL(c *gc.C) *url.URL { +func (s *authHTTPSuite) baseURL(c *gc.C) *url.URL { info := s.APIInfo(c) return &url.URL{ Scheme: "https", @@ -119,7 +119,7 @@ c.Assert(err, gc.Equals, io.EOF) } -func (s *authHttpSuite) makeURL(c *gc.C, scheme, path string, queryParams url.Values) *url.URL { +func (s *authHTTPSuite) makeURL(c *gc.C, scheme, path string, queryParams url.Values) *url.URL { url := s.baseURL(c) query := "" if queryParams != nil { @@ -174,7 +174,7 @@ nonce string } -func (s *authHttpSuite) sendRequest(c *gc.C, p httpRequestParams) *http.Response { +func (s *authHTTPSuite) sendRequest(c *gc.C, p httpRequestParams) *http.Response { c.Logf("sendRequest: %s", p.url) hp := httptesting.DoRequestParams{ Do: p.do, @@ -225,20 +225,21 @@ // authRequest is like sendRequest but fills out p.tag and p.password // from the userTag and password fields in the suite. -func (s *authHttpSuite) authRequest(c *gc.C, p httpRequestParams) *http.Response { +func (s *authHTTPSuite) authRequest(c *gc.C, p httpRequestParams) *http.Response { p.tag = s.userTag.String() p.password = s.password return s.sendRequest(c, p) } -func (s *authHttpSuite) setupOtherModel(c *gc.C) *state.State { +func (s *authHTTPSuite) setupOtherModel(c *gc.C) *state.State { envState := s.Factory.MakeModel(c, nil) s.AddCleanup(func(*gc.C) { envState.Close() }) user := s.Factory.MakeUser(c, nil) - _, err := envState.AddModelUser(state.UserAccessSpec{ - User: user.UserTag(), - CreatedBy: s.userTag, - Access: description.ReadAccess}) + _, err := envState.AddModelUser(envState.ModelUUID(), + state.UserAccessSpec{ + User: user.UserTag(), + CreatedBy: s.userTag, + Access: permission.ReadAccess}) c.Assert(err, jc.ErrorIsNil) s.userTag = user.UserTag() s.password = "password" @@ -246,7 +247,7 @@ return envState } -func (s *authHttpSuite) uploadRequest(c *gc.C, uri string, contentType, path string) *http.Response { +func (s *authHTTPSuite) uploadRequest(c *gc.C, uri string, contentType, path string) *http.Response { if path == "" { return s.authRequest(c, httpRequestParams{ method: "POST", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backup.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/backup.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backup.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/backup.go 2016-10-13 14:31:49.000000000 +0000 @@ -101,7 +101,9 @@ return "", err } - sendStatusAndJSON(resp, http.StatusOK, ¶ms.BackupsUploadResult{ID: id}) + if err := sendStatusAndJSON(resp, http.StatusOK, ¶ms.BackupsUploadResult{ID: id}); err != nil { + return "", errors.Trace(err) + } return id, nil } @@ -162,6 +164,7 @@ // rather than in the Error field. func (h *backupHandler) sendError(w http.ResponseWriter, err error) { err, status := common.ServerErrorAndStatus(err) - - sendStatusAndJSON(w, status, err) + if err := sendStatusAndJSON(w, status, err); err != nil { + logger.Errorf("%v", err) + } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backups/backups.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/backups/backups.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backups/backups.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/backups/backups.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,6 +17,7 @@ "github.com/juju/juju/controller" "github.com/juju/juju/environs/config" "github.com/juju/juju/mongo" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/state/backups" ) @@ -30,7 +31,9 @@ MachineSeries(id string) (string, error) MongoConnectionInfo() *mongo.MongoInfo MongoSession() *mgo.Session + MongoVersion() (string, error) ModelTag() names.ModelTag + ControllerTag() names.ControllerTag ModelConfig() (*config.Config, error) ControllerConfig() (controller.Config, error) StateServingInfo() (state.StateServingInfo, error) @@ -48,8 +51,13 @@ // NewAPI creates a new instance of the Backups API facade. func NewAPI(backend Backend, resources facade.Resources, authorizer facade.Authorizer) (*API, error) { - if !authorizer.AuthClient() { - return nil, errors.Trace(common.ErrPerm) + isControllerAdmin, err := authorizer.HasPermission(permission.SuperuserAccess, backend.ControllerTag()) + if err != nil && !errors.IsNotFound(err) { + return nil, errors.Trace(err) + } + + if !authorizer.AuthClient() || !isControllerAdmin { + return nil, common.ErrPerm } // For now, backup operations are only permitted on the controller environment. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backups/backups_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/backups/backups_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backups/backups_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/backups/backups_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -35,7 +35,7 @@ s.JujuConnSuite.SetUpTest(c) s.resources = common.NewResources() s.resources.RegisterNamed("dataDir", common.StringResource("/var/lib/juju")) - tag := names.NewLocalUserTag("spam") + tag := names.NewLocalUserTag("admin") s.authorizer = &apiservertesting.FakeAuthorizer{Tag: tag} var err error s.api, err = backupsAPI.NewAPI(&stateShim{s.State}, s.resources, s.authorizer) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backups/create.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/backups/create.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backups/create.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/backups/create.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ "github.com/juju/replicaset" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/mongo" "github.com/juju/juju/state/backups" ) @@ -29,7 +30,15 @@ } mgoInfo := a.backend.MongoConnectionInfo() - dbInfo, err := backups.NewDBInfo(mgoInfo, session) + v, err := a.backend.MongoVersion() + if err != nil { + return p, errors.Annotatef(err, "discovering mongo version") + } + mongoVersion, err := mongo.NewVersion(v) + if err != nil { + return p, errors.Trace(err) + } + dbInfo, err := backups.NewDBInfo(mgoInfo, session, mongoVersion) if err != nil { return p, errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backups/restore.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/backups/restore.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backups/restore.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/backups/restore.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,7 @@ "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/mongo" "github.com/juju/juju/service" "github.com/juju/juju/service/common" "github.com/juju/juju/state" @@ -81,7 +82,16 @@ mgoInfo := a.backend.MongoConnectionInfo() logger.Debugf("mongo info from state %+v", mgoInfo) - dbInfo, err := backups.NewDBInfo(mgoInfo, session) + v, err := a.backend.MongoVersion() + if err != nil { + return errors.Annotatef(err, "discovering mongo version") + } + mongoVersion, err := mongo.NewVersion(v) + if err != nil { + return errors.Trace(err) + } + + dbInfo, err := backups.NewDBInfo(mgoInfo, session, mongoVersion) if err != nil { return errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backup_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/backup_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/backup_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/backup_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -29,12 +29,12 @@ ) type backupsCommonSuite struct { - authHttpSuite + authHTTPSuite fake *backupstesting.FakeBackups } func (s *backupsCommonSuite) SetUpTest(c *gc.C) { - s.authHttpSuite.SetUpTest(c) + s.authHTTPSuite.SetUpTest(c) s.fake = &backupstesting.FakeBackups{} s.PatchValue(apiserver.NewBackups, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/block/client.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/block/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/block/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/block/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -43,6 +44,7 @@ resources facade.Resources, authorizer facade.Authorizer, ) (*API, error) { + if !authorizer.AuthClient() { return nil, common.ErrPerm } @@ -57,8 +59,34 @@ return stateShim{st} } +func (a *API) checkCanRead() error { + canRead, err := a.authorizer.HasPermission(permission.ReadAccess, a.access.ModelTag()) + if err != nil && !errors.IsNotFound(err) { + return errors.Trace(err) + } + if !canRead { + return common.ErrPerm + } + return nil +} + +func (a *API) checkCanWrite() error { + canWrite, err := a.authorizer.HasPermission(permission.WriteAccess, a.access.ModelTag()) + if err != nil && !errors.IsNotFound(err) { + return errors.Trace(err) + } + if !canWrite { + return common.ErrPerm + } + return nil +} + // List implements Block.List(). func (a *API) List() (params.BlockResults, error) { + if err := a.checkCanRead(); err != nil { + return params.BlockResults{}, err + } + all, err := a.access.AllBlocks() if err != nil { return params.BlockResults{}, common.ServerError(err) @@ -88,12 +116,20 @@ // SwitchBlockOn implements Block.SwitchBlockOn(). func (a *API) SwitchBlockOn(args params.BlockSwitchParams) params.ErrorResult { + if err := a.checkCanWrite(); err != nil { + return params.ErrorResult{Error: common.ServerError(err)} + } + err := a.access.SwitchBlockOn(state.ParseBlockType(args.Type), args.Message) return params.ErrorResult{Error: common.ServerError(err)} } // SwitchBlockOff implements Block.SwitchBlockOff(). func (a *API) SwitchBlockOff(args params.BlockSwitchParams) params.ErrorResult { + if err := a.checkCanWrite(); err != nil { + return params.ErrorResult{Error: common.ServerError(err)} + } + err := a.access.SwitchBlockOff(state.ParseBlockType(args.Type)) return params.ErrorResult{Error: common.ServerError(err)} } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/block/state.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/block/state.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/block/state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/block/state.go 2016-10-13 14:31:49.000000000 +0000 @@ -3,12 +3,16 @@ package block -import "github.com/juju/juju/state" +import ( + "github.com/juju/juju/state" + names "gopkg.in/juju/names.v2" +) type blockAccess interface { AllBlocks() ([]state.Block, error) SwitchBlockOn(t state.BlockType, msg string) error SwitchBlockOff(t state.BlockType) error + ModelTag() names.ModelTag } type stateShim struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/bundle/bundle.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/bundle/bundle.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/bundle/bundle.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/bundle/bundle.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,89 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package bundle defines an API endpoint for functions dealing with bundles. +package bundle + +import ( + "strings" + + "github.com/juju/bundlechanges" + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/facade" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/constraints" + "github.com/juju/juju/state" + "github.com/juju/juju/storage" +) + +// init registers the Bundle facade. +func init() { + common.RegisterStandardFacade("Bundle", 1, newFacade) +} + +func newFacade(_ *state.State, _ facade.Resources, auth facade.Authorizer) (Bundle, error) { + return NewFacade(auth) +} + +// NewFacade creates and returns a new Bundle API facade. +func NewFacade(auth facade.Authorizer) (Bundle, error) { + if !auth.AuthClient() { + return nil, common.ErrPerm + } + return &bundleAPI{}, nil +} + +// Bundle defines the API endpoint used to retrieve bundle changes. +type Bundle interface { + // GetChanges returns the list of changes required to deploy the given + // bundle data. + GetChanges(params.BundleChangesParams) (params.BundleChangesResults, error) +} + +// bundleAPI implements the Bundle interface and is the concrete implementation +// of the API end point. +type bundleAPI struct{} + +// GetChanges returns the list of changes required to deploy the given bundle +// data. The changes are sorted by requirements, so that they can be applied in +// order. +func (b *bundleAPI) GetChanges(args params.BundleChangesParams) (params.BundleChangesResults, error) { + var results params.BundleChangesResults + data, err := charm.ReadBundleData(strings.NewReader(args.BundleDataYAML)) + if err != nil { + return results, errors.Annotate(err, "cannot read bundle YAML") + } + verifyConstraints := func(s string) error { + _, err := constraints.Parse(s) + return err + } + verifyStorage := func(s string) error { + _, err := storage.ParseConstraints(s) + return err + } + if err := data.Verify(verifyConstraints, verifyStorage); err != nil { + if err, ok := err.(*charm.VerificationError); ok { + results.Errors = make([]string, len(err.Errors)) + for i, e := range err.Errors { + results.Errors[i] = e.Error() + } + return results, nil + } + // This should never happen as Verify only returns verification errors. + return results, errors.Annotate(err, "cannot verify bundle") + } + changes := bundlechanges.FromData(data) + results.Changes = make([]*params.BundleChange, len(changes)) + for i, c := range changes { + results.Changes[i] = ¶ms.BundleChange{ + Id: c.Id(), + Method: c.Method(), + Args: c.GUIArgs(), + Requires: c.Requires(), + } + } + return results, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/bundle/bundle_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/bundle/bundle_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/bundle/bundle_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/bundle/bundle_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,200 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package bundle_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/apiserver/bundle" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + coretesting "github.com/juju/juju/testing" +) + +type bundleSuite struct { + coretesting.BaseSuite + facade bundle.Bundle +} + +var _ = gc.Suite(&bundleSuite{}) + +func (s *bundleSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + auth := apiservertesting.FakeAuthorizer{ + Tag: names.NewUserTag("who"), + } + facade, err := bundle.NewFacade(auth) + c.Assert(err, jc.ErrorIsNil) + s.facade = facade +} + +func (s *bundleSuite) TestGetChangesBundleContentError(c *gc.C) { + args := params.BundleChangesParams{ + BundleDataYAML: ":", + } + r, err := s.facade.GetChanges(args) + c.Assert(err, gc.ErrorMatches, `cannot read bundle YAML: cannot unmarshal bundle data: yaml: did not find expected key`) + c.Assert(r, gc.DeepEquals, params.BundleChangesResults{}) +} + +func (s *bundleSuite) TestGetChangesBundleVerificationErrors(c *gc.C) { + args := params.BundleChangesParams{ + BundleDataYAML: ` + applications: + django: + charm: django + to: [1] + haproxy: + charm: 42 + num_units: -1 + `, + } + r, err := s.facade.GetChanges(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Changes, gc.IsNil) + c.Assert(r.Errors, jc.SameContents, []string{ + `placement "1" refers to a machine not defined in this bundle`, + `too many units specified in unit placement for application "django"`, + `invalid charm URL in application "haproxy": cannot parse URL "42": name "42" not valid`, + `negative number of units specified on application "haproxy"`, + }) +} + +func (s *bundleSuite) TestGetChangesBundleConstraintsError(c *gc.C) { + args := params.BundleChangesParams{ + BundleDataYAML: ` + applications: + django: + charm: django + num_units: 1 + constraints: bad=wolf + `, + } + r, err := s.facade.GetChanges(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Changes, gc.IsNil) + c.Assert(r.Errors, jc.SameContents, []string{ + `invalid constraints "bad=wolf" in application "django": unknown constraint "bad"`, + }) +} + +func (s *bundleSuite) TestGetChangesBundleStorageError(c *gc.C) { + args := params.BundleChangesParams{ + BundleDataYAML: ` + applications: + django: + charm: django + num_units: 1 + storage: + bad: 0,100M + `, + } + r, err := s.facade.GetChanges(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Changes, gc.IsNil) + c.Assert(r.Errors, jc.SameContents, []string{ + `invalid storage "bad" in application "django": cannot parse count: count must be greater than zero, got "0"`, + }) +} + +func (s *bundleSuite) TestGetChangesSuccess(c *gc.C) { + args := params.BundleChangesParams{ + BundleDataYAML: ` + applications: + django: + charm: django + options: + debug: true + storage: + tmpfs: tmpfs,1G + haproxy: + charm: cs:trusty/haproxy-42 + relations: + - - django:web + - haproxy:web + `, + } + r, err := s.facade.GetChanges(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Changes, jc.DeepEquals, []*params.BundleChange{{ + Id: "addCharm-0", + Method: "addCharm", + Args: []interface{}{"django", ""}, + }, { + Id: "deploy-1", + Method: "deploy", + Args: []interface{}{ + "$addCharm-0", + "", + "django", + map[string]interface{}{"debug": true}, + "", + map[string]string{"tmpfs": "tmpfs,1G"}, + map[string]string{}, + map[string]int{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "addCharm-2", + Method: "addCharm", + Args: []interface{}{"cs:trusty/haproxy-42", "trusty"}, + }, { + Id: "deploy-3", + Method: "deploy", + Args: []interface{}{ + "$addCharm-2", + "trusty", + "haproxy", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + map[string]int{}, + }, + Requires: []string{"addCharm-2"}, + }, { + Id: "addRelation-4", + Method: "addRelation", + Args: []interface{}{"$deploy-1:web", "$deploy-3:web"}, + Requires: []string{"deploy-1", "deploy-3"}, + }}) + c.Assert(r.Errors, gc.IsNil) +} + +func (s *bundleSuite) TestGetChangesBundleEndpointBindingsSuccess(c *gc.C) { + args := params.BundleChangesParams{ + BundleDataYAML: ` + applications: + django: + charm: django + num_units: 1 + bindings: + url: public + `, + } + r, err := s.facade.GetChanges(args) + c.Assert(err, jc.ErrorIsNil) + + for _, change := range r.Changes { + if change.Method == "deploy" { + c.Assert(change, jc.DeepEquals, ¶ms.BundleChange{ + Id: "deploy-1", + Method: "deploy", + Args: []interface{}{ + "$addCharm-0", + "", + "django", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{"url": "public"}, + map[string]int{}, + }, + Requires: []string{"addCharm-0"}, + }) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/bundle/package_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/bundle/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/bundle/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/bundle/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package bundle_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func Test(t *stdtesting.T) { + testing.MgoTestPackage(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/cert_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/cert_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/cert_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/cert_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,171 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver_test + +import ( + "crypto/tls" + "runtime" + "time" + + "github.com/juju/loggo" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cert" + coretesting "github.com/juju/juju/testing" +) + +type certSuite struct { + apiserverBaseSuite +} + +var _ = gc.Suite(&certSuite{}) + +func (s *certSuite) TestUpdateCert(c *gc.C) { + config := s.sampleConfig(c) + certChanged := make(chan params.StateServingInfo) + config.CertChanged = certChanged + + srv := s.newServer(c, config) + + // Sanity check that the server works initially. + conn := s.OpenAPIAsAdmin(c, srv) + c.Assert(pingConn(conn), jc.ErrorIsNil) + + // Create a new certificate that's a year out of date, so we can + // tell that the server is using it because the connection + // will fail. + srvCert, srvKey, err := cert.NewServer(coretesting.CACert, coretesting.CAKey, time.Now().AddDate(-1, 0, 0), nil) + c.Assert(err, jc.ErrorIsNil) + info := params.StateServingInfo{ + Cert: string(srvCert), + PrivateKey: string(srvKey), + // No other fields are used by the cert listener. + } + certChanged <- info + // Send the same info again so that we are sure that + // the previously received information was acted upon + // (an alternative would be to sleep for a while, but this + // approach is quicker and more certain). + certChanged <- info + + // Check that we can't connect to the server because of the bad certificate. + apiInfo := s.APIInfo(srv) + apiInfo.Tag = s.Owner + apiInfo.Password = ownerPassword + _, err = api.Open(apiInfo, api.DialOpts{}) + c.Assert(err, gc.ErrorMatches, `unable to connect to API: .*: certificate has expired or is not yet valid`) + + // Now change it back and check that we can connect again. + info = params.StateServingInfo{ + Cert: coretesting.ServerCert, + PrivateKey: coretesting.ServerKey, + // No other fields are used by the cert listener. + } + certChanged <- info + certChanged <- info + + conn = s.OpenAPIAsAdmin(c, srv) + c.Assert(pingConn(conn), jc.ErrorIsNil) +} + +func (s *certSuite) TestAutocertFailure(c *gc.C) { + // We don't have a fake autocert server, but we can at least + // smoke test that the autocert path is followed when we try + // to connect to a DNS name - the AutocertURL configured + // by the testing suite is invalid so it should fail. + + config := s.sampleConfig(c) + config.AutocertDNSName = "somewhere.example" + + srv := s.newServer(c, config) + apiInfo := s.APIInfo(srv) + entries := gatherLog(func() { + _, err := tls.Dial("tcp", apiInfo.Addrs[0], &tls.Config{ + ServerName: "somewhere.example", + }) + expectedErr := `x509: certificate is valid for \*, not somewhere.example` + if runtime.GOOS == "windows" { + // For some reason, windows doesn't think that the certificate is signed + // by a valid authority. This could be problematic. + expectedErr = "x509: certificate signed by unknown authority" + } + // We can't get an autocert certificate, so we'll fall back to the local certificate + // which isn't valid for connecting to somewhere.example. + c.Assert(err, gc.ErrorMatches, expectedErr) + }) + // We will log the failure to get the certificate, thus assuring us that we actually tried. + c.Assert(entries, jc.LogMatches, jc.SimpleMessages{{ + loggo.ERROR, + `.*cannot get autocert certificate for "somewhere.example": Get https://0\.1\.2\.3/no-autocert-here: .*`, + }}) +} + +func (s *certSuite) TestAutocertNameMismatch(c *gc.C) { + config := s.sampleConfig(c) + config.AutocertDNSName = "somewhere.example" + + srv := s.newServer(c, config) + apiInfo := s.APIInfo(srv) + + entries := gatherLog(func() { + _, err := tls.Dial("tcp", apiInfo.Addrs[0], &tls.Config{ + ServerName: "somewhere.else", + }) + expectedErr := `x509: certificate is valid for \*, not somewhere.else` + if runtime.GOOS == "windows" { + // For some reason, windows doesn't think that the certificate is signed + // by a valid authority. This could be problematic. + expectedErr = "x509: certificate signed by unknown authority" + } + // We can't get an autocert certificate, so we'll fall back to the local certificate + // which isn't valid for connecting to somewhere.example. + c.Assert(err, gc.ErrorMatches, expectedErr) + }) + // Check that we logged the mismatch. + c.Assert(entries, jc.LogMatches, jc.SimpleMessages{{ + loggo.ERROR, + `.*cannot get autocert certificate for "somewhere.else": acme/autocert: host not configured`, + }}) +} + +func (s *certSuite) TestAutocertNoAutocertDNSName(c *gc.C) { + config := s.sampleConfig(c) + c.Assert(config.AutocertDNSName, gc.Equals, "") // sanity check + srv := s.newServer(c, config) + apiInfo := s.APIInfo(srv) + + entries := gatherLog(func() { + _, err := tls.Dial("tcp", apiInfo.Addrs[0], &tls.Config{ + ServerName: "somewhere.example", + }) + expectedErr := `x509: certificate is valid for \*, not somewhere.example` + if runtime.GOOS == "windows" { + // For some reason, windows doesn't think that the certificate is signed + // by a valid authority. This could be problematic. + expectedErr = "x509: certificate signed by unknown authority" + } + // We can't get an autocert certificate, so we'll fall back to the local certificate + // which isn't valid for connecting to somewhere.example. + c.Assert(err, gc.ErrorMatches, expectedErr) + }) + // Check that we never logged a failure to get the certificate. + c.Assert(entries, gc.Not(jc.LogMatches), jc.SimpleMessages{{ + loggo.ERROR, + `.*cannot get autocert certificate.*`, + }}) +} + +func gatherLog(f func()) []loggo.Entry { + var tw loggo.TestWriter + err := loggo.RegisterWriter("test", &tw) + if err != nil { + panic(err) + } + defer loggo.RemoveWriter("test") + f() + return tw.Log() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charmrevisionupdater/updater.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/charmrevisionupdater/updater.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charmrevisionupdater/updater.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/charmrevisionupdater/updater.go 2016-10-13 14:31:49.000000000 +0000 @@ -139,7 +139,18 @@ resultsIndexedServices = append(resultsIndexedServices, service) } - results, err := charmstore.LatestCharmInfo(client, charms, env.UUID()) + metadata := map[string]string{ + "environment_uuid": env.UUID(), + "cloud": env.Cloud(), + "cloud_region": env.CloudRegion(), + } + cloud, err := st.Cloud(env.Cloud()) + if err != nil { + metadata["provider"] = "unknown" + } else { + metadata["provider"] = cloud.Type + } + results, err := charmstore.LatestCharmInfo(client, charms, metadata) if err != nil { return nil, err } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charmrevisionupdater/updater_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/charmrevisionupdater/updater_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charmrevisionupdater/updater_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/charmrevisionupdater/updater_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,6 +21,7 @@ "github.com/juju/juju/charmstore" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" + "github.com/juju/juju/version" ) type charmVersionSuite struct { @@ -153,7 +154,7 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *charmVersionSuite) TestEnvironmentUUIDUsed(c *gc.C) { +func (s *charmVersionSuite) TestJujuMetadataHeaderIsSent(c *gc.C) { s.AddMachine(c, "0", state.JobManageModel) s.SetupScenario(c) @@ -183,5 +184,16 @@ env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) - c.Assert(header.Get(charmrepo.JujuMetadataHTTPHeader), gc.Equals, "environment_uuid="+env.UUID()) + cloud, err := s.State.Cloud(env.Cloud()) + c.Assert(err, jc.ErrorIsNil) + expected_header := []string{ + "environment_uuid=" + env.UUID(), + "cloud=" + env.Cloud(), + "cloud_region=" + env.CloudRegion(), + "provider=" + cloud.Type, + "controller_version=" + version.Current.String(), + } + for i, expected := range expected_header { + c.Assert(header[charmrepo.JujuMetadataHTTPHeader][i], gc.Equals, expected) + } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charms/client.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/charms/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charms/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/charms/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,6 +12,7 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -37,12 +38,24 @@ authorizer facade.Authorizer } +func (a *API) checkCanRead() error { + canRead, err := a.authorizer.HasPermission(permission.ReadAccess, a.access.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canRead { + return common.ErrPerm + } + return nil +} + // NewAPI returns a new charms API facade. func NewAPI( st *state.State, resources facade.Resources, authorizer facade.Authorizer, ) (*API, error) { + if !authorizer.AuthClient() { return nil, common.ErrPerm } @@ -56,13 +69,17 @@ // CharmInfo returns information about the requested charm. // NOTE: thumper 2016-06-29, this is not a bulk call and probably should be. func (a *API) CharmInfo(args params.CharmURL) (params.CharmInfo, error) { + if err := a.checkCanRead(); err != nil { + return params.CharmInfo{}, errors.Trace(err) + } + curl, err := charm.ParseURL(args.URL) if err != nil { - return params.CharmInfo{}, err + return params.CharmInfo{}, errors.Trace(err) } aCharm, err := a.access.Charm(curl) if err != nil { - return params.CharmInfo{}, err + return params.CharmInfo{}, errors.Trace(err) } info := params.CharmInfo{ Revision: aCharm.Revision(), @@ -79,6 +96,10 @@ // If supplied parameter contains any names, the result will be filtered // to return only the charms with supplied names. func (a *API) List(args params.CharmsList) (params.CharmsListResult, error) { + if err := a.checkCanRead(); err != nil { + return params.CharmsListResult{}, errors.Trace(err) + } + charms, err := a.access.AllCharms() if err != nil { return params.CharmsListResult{}, errors.Annotatef(err, " listing charms ") @@ -101,13 +122,17 @@ // IsMetered returns whether or not the charm is metered. func (a *API) IsMetered(args params.CharmURL) (params.IsMeteredResult, error) { + if err := a.checkCanRead(); err != nil { + return params.IsMeteredResult{}, errors.Trace(err) + } + curl, err := charm.ParseURL(args.URL) if err != nil { - return params.IsMeteredResult{false}, err + return params.IsMeteredResult{false}, errors.Trace(err) } aCharm, err := a.access.Charm(curl) if err != nil { - return params.IsMeteredResult{false}, err + return params.IsMeteredResult{false}, errors.Trace(err) } if aCharm.Metrics() != nil && len(aCharm.Metrics().Metrics) > 0 { return params.IsMeteredResult{true}, nil @@ -281,7 +306,15 @@ } return ¶ms.CharmMetrics{ Metrics: convertCharmMetricMap(metrics.Metrics), + Plan: convertCharmPlan(metrics.Plan), + } +} + +func convertCharmPlan(plan *charm.Plan) params.CharmPlan { + if plan == nil { + return params.CharmPlan{Required: false} } + return params.CharmPlan{Required: plan.Required} } func convertCharmMetricMap(metrics map[string]charm.Metric) map[string]params.CharmMetric { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charms/client_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/charms/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charms/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/charms/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -171,13 +171,13 @@ about: "invalid URL", charm: "wordpress", url: "not-valid!", - err: `URL has invalid charm or bundle name: "not-valid!"`, + err: `cannot parse URL "not-valid!": name "not-valid!" not valid`, }, { about: "invalid schema", charm: "wordpress", url: "not-valid:your-arguments", - err: `charm or bundle URL has invalid schema: "not-valid:your-arguments"`, + err: `cannot parse URL "not-valid:your-arguments": schema "not-valid" not valid`, }, { about: "unknown charm", @@ -195,7 +195,9 @@ c.Check(err, gc.ErrorMatches, t.err) continue } - c.Assert(err, jc.ErrorIsNil) + if c.Check(err, jc.ErrorIsNil) == false { + continue + } c.Check(info, jc.DeepEquals, t.expected) } } @@ -208,6 +210,9 @@ }) c.Assert(err, jc.ErrorIsNil) expected := ¶ms.CharmMetrics{ + Plan: params.CharmPlan{ + Required: true, + }, Metrics: map[string]params.CharmMetric{ "pings": params.CharmMetric{ Type: "gauge", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charms/state.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/charms/state.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charms/state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/charms/state.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,7 @@ import ( "gopkg.in/juju/charm.v6-unstable" + names "gopkg.in/juju/names.v2" "github.com/juju/juju/state" ) @@ -12,6 +13,7 @@ type charmsAccess interface { Charm(curl *charm.URL) (*state.Charm, error) AllCharms() ([]*state.Charm, error) + ModelTag() names.ModelTag } type stateShim struct { @@ -25,3 +27,7 @@ func (s stateShim) AllCharms() ([]*state.Charm, error) { return s.state.AllCharms() } + +func (s stateShim) ModelTag() names.ModelTag { + return s.state.ModelTag() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charms.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/charms.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charms.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/charms.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,6 @@ "bytes" "crypto/sha256" "encoding/hex" - "fmt" "io" "io/ioutil" "mime" @@ -31,32 +30,63 @@ "github.com/juju/juju/state/storage" ) -// charmsHandler handles charm upload through HTTPS in the API server. -type charmsHandler struct { - ctxt httpContext - dataDir string -} +type FailableHandlerFunc func(http.ResponseWriter, *http.Request) error -// bundleContentSenderFunc functions are responsible for sending a -// response related to a charm bundle. -type bundleContentSenderFunc func(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) error +// CharmsHTTPHandler creates is a http.Handler which serves POST +// requests to a PostHandler and GET requests to a GetHandler. +// +// TODO(katco): This is the beginning of inverting the dependencies in +// this callstack by splitting out the serving mechanism from the +// modules that are processing the requests. The next step is to +// publically expose construction of a suitable PostHandler and +// GetHandler whose goals should be clearly called out in their names, +// (e.g. charmPersitAPI for POSTs). +// +// To accomplish this, we'll have to make the httpContext type public +// so that we can pass it into these public functions. +// +// After we do this, we can then test the individual funcs/structs +// without standing up an entire HTTP server. I.e. actual unit +// tests. If you're in this area and can, please chissle away at this +// problem and update this TODO as needed! Many thanks, hacker! +type CharmsHTTPHandler struct { + PostHandler FailableHandlerFunc + GetHandler FailableHandlerFunc +} -func (h *charmsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (h *CharmsHTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var err error switch r.Method { case "POST": - err = h.servePost(w, r) + err = errors.Annotate(h.PostHandler(w, r), "cannot upload charm") case "GET": - err = h.serveGet(w, r) + err = errors.Annotate(h.GetHandler(w, r), "cannot retrieve charm") default: - err = errors.MethodNotAllowedf("unsupported method: %q", r.Method) + err = emitUnsupportedMethodErr(r.Method) } + if err != nil { - h.sendError(w, r, err) + if err := sendJSONError(w, r, errors.Trace(err)); err != nil { + logger.Errorf("%v", errors.Annotate(err, "cannot return error to user")) + } } } -func (h *charmsHandler) servePost(w http.ResponseWriter, r *http.Request) error { +// charmsHandler handles charm upload through HTTPS in the API server. +type charmsHandler struct { + ctxt httpContext + dataDir string +} + +// bundleContentSenderFunc functions are responsible for sending a +// response related to a charm bundle. +type bundleContentSenderFunc func(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) error + +func (h *charmsHandler) ServePost(w http.ResponseWriter, r *http.Request) error { + if r.Method != "POST" { + return errors.Trace(emitUnsupportedMethodErr(r.Method)) + } + st, _, err := h.ctxt.stateForRequestAuthenticatedUser(r) if err != nil { return errors.Trace(err) @@ -66,28 +96,36 @@ if err != nil { return errors.NewBadRequest(err, "") } - sendStatusAndJSON(w, http.StatusOK, ¶ms.CharmsResponse{CharmURL: charmURL.String()}) - return nil + return errors.Trace(sendStatusAndJSON(w, http.StatusOK, ¶ms.CharmsResponse{CharmURL: charmURL.String()})) } -func (h *charmsHandler) serveGet(w http.ResponseWriter, r *http.Request) error { +func (h *charmsHandler) ServeGet(w http.ResponseWriter, r *http.Request) error { + if r.Method != "GET" { + return errors.Trace(emitUnsupportedMethodErr(r.Method)) + } + st, _, err := h.ctxt.stateForRequestAuthenticated(r) if err != nil { return errors.Trace(err) } // Retrieve or list charm files. // Requires "url" (charm URL) and an optional "file" (the path to the - // charm file) to be included in the query. - charmArchivePath, filePath, err := h.processGet(r, st) + // charm file) to be included in the query. Optionally also receives an + // "icon" query for returning the charm icon or a default one in case the + // charm has no icon. + charmArchivePath, fileArg, serveIcon, err := h.processGet(r, st) if err != nil { // An error occurred retrieving the charm bundle. if errors.IsNotFound(err) { return errors.Trace(err) } + return errors.NewBadRequest(err, "") } + defer os.Remove(charmArchivePath) + var sender bundleContentSenderFunc - switch filePath { + switch fileArg { case "": // The client requested the list of charm files. sender = h.manifestSender @@ -96,40 +134,10 @@ sender = h.archiveSender default: // The client requested a specific file. - sender = h.archiveEntrySender(filePath) - } - if err := h.sendBundleContent(w, r, charmArchivePath, sender); err != nil { - return errors.Trace(err) + sender = h.archiveEntrySender(fileArg, serveIcon) } - return nil -} -// sendError sends a JSON-encoded error response. -// Note the difference from the error response sent by -// the sendError function - the error is encoded in the -// Error field as a string, not an Error object. -func (h *charmsHandler) sendError(w http.ResponseWriter, req *http.Request, err error) { - logger.Errorf("returning error from %s %s: %s", req.Method, req.URL, errors.Details(err)) - perr, status := common.ServerErrorAndStatus(err) - sendStatusAndJSON(w, status, ¶ms.CharmsResponse{ - Error: perr.Message, - ErrorCode: perr.Code, - ErrorInfo: perr.Info, - }) -} - -// sendBundleContent uses the given bundleContentSenderFunc to send a response -// related to the charm archive located in the given archivePath. -func (h *charmsHandler) sendBundleContent(w http.ResponseWriter, r *http.Request, archivePath string, sender bundleContentSenderFunc) error { - bundle, err := charm.ReadCharmArchive(archivePath) - if err != nil { - return errors.Annotatef(err, "unable to read archive in %q", archivePath) - } - // The bundleContentSenderFunc will set up and send an appropriate response. - if err := sender(w, r, bundle); err != nil { - return errors.Trace(err) - } - return nil + return errors.Trace(sendBundleContent(w, r, charmArchivePath, sender)) } // manifestSender sends a JSON-encoded response to the client including the @@ -139,16 +147,17 @@ if err != nil { return errors.Annotatef(err, "unable to read manifest in %q", bundle.Path) } - sendStatusAndJSON(w, http.StatusOK, ¶ms.CharmsResponse{ + return errors.Trace(sendStatusAndJSON(w, http.StatusOK, ¶ms.CharmsResponse{ Files: manifest.SortedValues(), - }) - return nil + })) } -// archiveEntrySender returns a bundleContentSenderFunc which is responsible for -// sending the contents of filePath included in the given charm bundle. If filePath -// does not identify a file or a symlink, a 403 forbidden error is returned. -func (h *charmsHandler) archiveEntrySender(filePath string) bundleContentSenderFunc { +// archiveEntrySender returns a bundleContentSenderFunc which is responsible +// for sending the contents of filePath included in the given charm bundle. If +// filePath does not identify a file or a symlink, a 403 forbidden error is +// returned. If serveIcon is true, then the charm icon.svg file is sent, or a +// default icon if that file is not included in the charm. +func (h *charmsHandler) archiveEntrySender(filePath string, serveIcon bool) bundleContentSenderFunc { return func(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) error { // TODO(fwereade) 2014-01-27 bug #1285685 // This doesn't handle symlinks helpfully, and should be talking in @@ -189,7 +198,15 @@ io.Copy(w, contents) return nil } - return errors.NotFoundf("charm") + if serveIcon { + // An icon was requested but none was found in the archive so + // return the default icon instead. + w.Header().Set("Content-Type", "image/svg+xml") + w.WriteHeader(http.StatusOK) + io.Copy(w, strings.NewReader(defaultIcon)) + return nil + } + return errors.NotFoundf("charm file") } } @@ -213,12 +230,18 @@ if schema == "" { schema = "local" } + series := query.Get("series") + if series != "" { + if err := charm.ValidateSeries(series); err != nil { + return nil, errors.NewBadRequest(err, "") + } + } // Make sure the content type is zip. contentType := r.Header.Get("Content-Type") if contentType != "application/zip" { - return nil, fmt.Errorf("expected Content-Type: application/zip, got: %v", contentType) + return nil, errors.BadRequestf("expected Content-Type: application/zip, got: %v", contentType) } charmFileName, err := writeCharmToTempFile(r.Body) @@ -233,7 +256,12 @@ } archive, err := charm.ReadCharmArchive(charmFileName) if err != nil { - return nil, fmt.Errorf("invalid charm archive: %v", err) + return nil, errors.BadRequestf("invalid charm archive: %v", err) + } + + name := archive.Meta().Name + if err := charm.ValidateName(name); err != nil { + return nil, errors.NewBadRequest(err, "") } // We got it, now let's reserve a charm URL for it in state. @@ -246,7 +274,7 @@ if schema == "local" { curl, err = st.PrepareLocalCharmUpload(curl) if err != nil { - return nil, err + return nil, errors.Trace(err) } } else { // "cs:" charms may only be uploaded into models which are @@ -267,7 +295,7 @@ if revisionStr != "" { curl.Revision, err = strconv.Atoi(revisionStr) if err != nil { - return nil, errors.NotValidf("revision") + return nil, errors.NewBadRequest(errors.NewNotValid(err, "revision"), "") } } if _, err := st.PrepareStoreCharmUpload(curl); err != nil { @@ -279,7 +307,7 @@ // provider storage and update the state. err = h.repackageAndUploadCharm(st, archive, curl) if err != nil { - return nil, err + return nil, errors.Trace(err) } return curl, nil } @@ -350,12 +378,12 @@ } switch len(paths) { case 0: - return "", fmt.Errorf("invalid charm archive: missing metadata.yaml") + return "", errors.Errorf("invalid charm archive: missing metadata.yaml") case 1: default: sort.Sort(byDepth(paths)) if depth(paths[0]) == depth(paths[1]) { - return "", fmt.Errorf("invalid charm archive: ambiguous root directory") + return "", errors.Errorf("invalid charm archive: ambiguous root directory") } } return filepath.Dir(paths[0]), nil @@ -414,88 +442,99 @@ } // processGet handles a charm file GET request after authentication. -// It returns the bundle path, the requested file path (if any) and an error. -func (h *charmsHandler) processGet(r *http.Request, st *state.State) (string, string, error) { +// It returns the bundle path, the requested file path (if any), whether the +// default charm icon has been requested and an error. +func (h *charmsHandler) processGet(r *http.Request, st *state.State) ( + archivePath string, + fileArg string, + serveIcon bool, + err error, +) { + errRet := func(err error) (string, string, bool, error) { + return "", "", false, err + } + query := r.URL.Query() // Retrieve and validate query parameters. curlString := query.Get("url") if curlString == "" { - return "", "", fmt.Errorf("expected url=CharmURL query argument") + return errRet(errors.Errorf("expected url=CharmURL query argument")) } curl, err := charm.ParseURL(curlString) if err != nil { - return "", "", errors.Annotate(err, "cannot parse charm URL") + return errRet(errors.Trace(err)) } - - var filePath string - file := query.Get("file") - if file == "" { - filePath = "" - } else { - filePath = path.Clean(file) + fileArg = query.Get("file") + if fileArg != "" { + fileArg = path.Clean(fileArg) + } else if query.Get("icon") == "1" { + serveIcon = true + fileArg = "icon.svg" } - // Prepare the bundle directories. - name := charm.Quote(curlString) - charmArchivePath := filepath.Join( - h.dataDir, - "charm-get-cache", - st.ModelUUID(), - name+".zip", - ) - - // Check if the charm archive is already in the cache. - if _, err := os.Stat(charmArchivePath); os.IsNotExist(err) { - // Download the charm archive and save it to the cache. - if err = h.downloadCharm(st, curl, charmArchivePath); err != nil { - return "", "", errors.Annotate(err, "unable to retrieve and save the charm") - } - } else if err != nil { - return "", "", errors.Annotate(err, "cannot access the charms cache") + // Ensure the working directory exists. + tmpDir := filepath.Join(h.dataDir, "charm-get-tmp") + if err = os.MkdirAll(tmpDir, 0755); err != nil { + return errRet(errors.Annotate(err, "cannot create charms tmp directory")) } - return charmArchivePath, filePath, nil -} -// downloadCharm downloads the given charm name from the provider storage and -// saves the corresponding zip archive to the given charmArchivePath. -func (h *charmsHandler) downloadCharm(st *state.State, curl *charm.URL, charmArchivePath string) error { + // Use the storage to retrieve and save the charm archive. storage := storage.NewStorage(st.ModelUUID(), st.MongoSession()) ch, err := st.Charm(curl) if err != nil { - return errors.Annotate(err, "cannot get charm from state") + return errRet(errors.Annotate(err, "cannot get charm from state")) } - // In order to avoid races, the archive is saved in a temporary file which - // is then atomically renamed. The temporary file is created in the - // charm cache directory so that we can safely assume the rename source and - // target live in the same file system. - cacheDir := filepath.Dir(charmArchivePath) - if err = os.MkdirAll(cacheDir, 0755); err != nil { - return errors.Annotate(err, "cannot create the charms cache") - } - tempCharmArchive, err := ioutil.TempFile(cacheDir, "charm") + reader, _, err := storage.Get(ch.StoragePath()) if err != nil { - return errors.Annotate(err, "cannot create charm archive temp file") + return errRet(errors.Annotate(err, "cannot get charm from model storage")) } - defer cleanupFile(tempCharmArchive) + defer reader.Close() - // Use the storage to retrieve and save the charm archive. - reader, _, err := storage.Get(ch.StoragePath()) + charmFile, err := ioutil.TempFile(tmpDir, "charm") if err != nil { - return errors.Annotate(err, "cannot get charm from model storage") + return errRet(errors.Annotate(err, "cannot create charm archive file")) } - defer reader.Close() - if _, err = io.Copy(tempCharmArchive, reader); err != nil { - return errors.Annotate(err, "error processing charm archive download") + if _, err = io.Copy(charmFile, reader); err != nil { + cleanupFile(charmFile) + return errRet(errors.Annotate(err, "error processing charm archive download")) } - tempCharmArchive.Close() - // Note that os.Rename won't fail if the target already exists; - // there's no problem if there's concurrent get requests for the - // same charm. - if err = os.Rename(tempCharmArchive.Name(), charmArchivePath); err != nil { - return errors.Annotate(err, "error renaming the charm archive") + charmFile.Close() + return charmFile.Name(), fileArg, serveIcon, nil +} + +// sendJSONError sends a JSON-encoded error response. Note the +// difference from the error response sent by the sendError function - +// the error is encoded in the Error field as a string, not an Error +// object. +func sendJSONError(w http.ResponseWriter, req *http.Request, err error) error { + logger.Errorf("returning error from %s %s: %s", req.Method, req.URL, errors.Details(err)) + perr, status := common.ServerErrorAndStatus(err) + return errors.Trace(sendStatusAndJSON(w, status, ¶ms.CharmsResponse{ + Error: perr.Message, + ErrorCode: perr.Code, + ErrorInfo: perr.Info, + })) +} + +// sendBundleContent uses the given bundleContentSenderFunc to send a +// response related to the charm archive located in the given +// archivePath. +func sendBundleContent( + w http.ResponseWriter, + r *http.Request, + archivePath string, + sender bundleContentSenderFunc, +) error { + bundle, err := charm.ReadCharmArchive(archivePath) + if err != nil { + return errors.Annotatef(err, "unable to read archive in %q", archivePath) + } + // The bundleContentSenderFunc will set up and send an appropriate response. + if err := sender(w, r, bundle); err != nil { + return errors.Trace(err) } return nil } @@ -529,3 +568,7 @@ } return model.MigrationMode() == state.MigrationModeImporting, nil } + +func emitUnsupportedMethodErr(method string) error { + return errors.MethodNotAllowedf("unsupported method: %q", method) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charms_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/charms_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/charms_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/charms_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,10 +4,10 @@ package apiserver_test import ( - "bytes" "encoding/json" "fmt" "io/ioutil" + "mime" "net/http" "net/url" "os" @@ -21,6 +21,7 @@ "gopkg.in/juju/charm.v6-unstable" "gopkg.in/macaroon-bakery.v1/httpbakery" + "github.com/juju/juju/apiserver" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" "github.com/juju/juju/state/storage" @@ -28,11 +29,11 @@ "github.com/juju/juju/testing/factory" ) -// charmsCommonSuite wraps authHttpSuite and adds +// charmsCommonSuite wraps authHTTPSuite and adds // some helper methods suitable for working with the // charms endpoint. type charmsCommonSuite struct { - authHttpSuite + authHTTPSuite } func (s *charmsCommonSuite) charmsURL(c *gc.C, query string) *url.URL { @@ -116,12 +117,12 @@ func (s *charmsSuite) TestPOSTRequiresAuth(c *gc.C) { resp := s.sendRequest(c, httpRequestParams{method: "POST", url: s.charmsURI(c, "")}) - s.assertErrorResponse(c, resp, http.StatusUnauthorized, "no credentials provided") + s.assertErrorResponse(c, resp, http.StatusUnauthorized, ".*no credentials provided$") } func (s *charmsSuite) TestGETRequiresAuth(c *gc.C) { resp := s.sendRequest(c, httpRequestParams{method: "GET", url: s.charmsURI(c, "")}) - s.assertErrorResponse(c, resp, http.StatusUnauthorized, "no credentials provided") + s.assertErrorResponse(c, resp, http.StatusUnauthorized, ".*no credentials provided$") } func (s *charmsSuite) TestRequiresPOSTorGET(c *gc.C) { @@ -142,11 +143,11 @@ nonce: "noncy", contentType: "foo/bar", }) - s.assertErrorResponse(c, resp, http.StatusInternalServerError, "tag kind machine not valid") + s.assertErrorResponse(c, resp, http.StatusInternalServerError, ".*tag kind machine not valid$") // Now try a user login. resp = s.authRequest(c, httpRequestParams{method: "POST", url: s.charmsURI(c, "")}) - s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected Content-Type: application/zip.+") + s.assertErrorResponse(c, resp, http.StatusBadRequest, ".*expected Content-Type: application/zip.+") } func (s *charmsSuite) TestUploadFailsWithInvalidZip(c *gc.C) { @@ -157,11 +158,11 @@ // Pretend we upload a zip by setting the Content-Type, so we can // check the error at extraction time later. resp := s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", tempFile.Name()) - s.assertErrorResponse(c, resp, http.StatusBadRequest, "cannot open charm archive: zip: not a valid zip file") + s.assertErrorResponse(c, resp, http.StatusBadRequest, ".*cannot open charm archive: zip: not a valid zip file$") // Now try with the default Content-Type. resp = s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/octet-stream", tempFile.Name()) - s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected Content-Type: application/zip, got: application/octet-stream") + s.assertErrorResponse(c, resp, http.StatusBadRequest, ".*expected Content-Type: application/zip, got: application/octet-stream$") } func (s *charmsSuite) TestUploadBumpsRevision(c *gc.C) { @@ -279,7 +280,7 @@ url := s.charmsURL(c, "series=quantal") url.Path = "/model/dead-beef-123456/charms" resp := s.authRequest(c, httpRequestParams{method: "POST", url: url.String()}) - s.assertErrorResponse(c, resp, http.StatusNotFound, `unknown model: "dead-beef-123456"`) + s.assertErrorResponse(c, resp, http.StatusNotFound, `.*unknown model: "dead-beef-123456"$`) } func (s *charmsSuite) TestUploadRepackagesNestedArchives(c *gc.C) { @@ -351,7 +352,7 @@ c.Assert(err, jc.ErrorIsNil) resp := s.uploadRequest(c, s.charmsURI(c, "?schema=cs&series=quantal"), "application/zip", ch.Path) - s.assertErrorResponse(c, resp, 400, "cs charms may only be uploaded during model migration import") + s.assertErrorResponse(c, resp, 400, ".*cs charms may only be uploaded during model migration import$") } func (s *charmsSuite) TestNonLocalCharmUpload(c *gc.C) { @@ -375,7 +376,7 @@ s.setModelImporting(c) ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") - resp := s.uploadRequest(c, s.charmsURI(c, "?schema=cs&revision=99"), "application/zip", ch.Path) + resp := s.uploadRequest(c, s.charmsURI(c, "?schema=cs&name=dummy&revision=99"), "application/zip", ch.Path) expectedURL := charm.MustParseURL("cs:dummy-99") s.assertUploadResponse(c, resp, expectedURL.String()) @@ -391,7 +392,7 @@ resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) s.assertErrorResponse( c, resp, http.StatusBadRequest, - "expected url=CharmURL query argument", + ".*expected url=CharmURL query argument$", ) } @@ -400,7 +401,7 @@ resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) s.assertErrorResponse( c, resp, http.StatusNotFound, - `unable to retrieve and save the charm: cannot get charm from state: charm "local:precise/no-such" not found`, + `.*cannot get charm from state: charm "local:precise/no-such" not found$`, ) } @@ -462,6 +463,60 @@ } } +func (s *charmsSuite) TestGetCharmIcon(c *gc.C) { + // Upload the local charms. + ch := testcharms.Repo.CharmArchive(c.MkDir(), "mysql") + s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) + ch = testcharms.Repo.CharmArchive(c.MkDir(), "dummy") + s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) + + // Prepare the tests. + svgMimeType := mime.TypeByExtension(".svg") + iconPath := filepath.Join(testcharms.Repo.CharmDirPath("mysql"), "icon.svg") + icon, err := ioutil.ReadFile(iconPath) + c.Assert(err, jc.ErrorIsNil) + tests := []struct { + about string + query string + expectType string + expectBody string + }{{ + about: "icon found", + query: "?url=local:quantal/mysql-1&file=icon.svg", + expectBody: string(icon), + }, { + about: "icon not found", + query: "?url=local:quantal/dummy-1&file=icon.svg", + }, { + about: "default icon requested: icon found", + query: "?url=local:quantal/mysql-1&icon=1", + expectBody: string(icon), + }, { + about: "default icon requested: icon not found", + query: "?url=local:quantal/dummy-1&icon=1", + expectBody: apiserver.DefaultIcon, + }, { + about: "default icon request ignored", + query: "?url=local:quantal/mysql-1&file=revision&icon=1", + expectType: "text/plain; charset=utf-8", + expectBody: "1", + }} + + for i, test := range tests { + c.Logf("\ntest %d: %s", i, test.about) + uri := s.charmsURI(c, test.query) + resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) + if test.expectBody == "" { + s.assertErrorResponse(c, resp, http.StatusNotFound, ".*charm file not found$") + continue + } + if test.expectType == "" { + test.expectType = svgMimeType + } + s.assertGetFileResponse(c, resp, test.expectBody, test.expectType) + } +} + func (s *charmsSuite) TestGetWorksForControllerMachines(c *gc.C) { // Make a controller machine. const nonce = "noncey" @@ -539,7 +594,7 @@ url := s.charmsURL(c, "url=local:quantal/dummy-1&file=revision") url.Path = "/model/dead-beef-123456/charms" resp := s.authRequest(c, httpRequestParams{method: "GET", url: url.String()}) - s.assertErrorResponse(c, resp, http.StatusNotFound, `unknown model: "dead-beef-123456"`) + s.assertErrorResponse(c, resp, http.StatusNotFound, `.*unknown model: "dead-beef-123456"$`) } func (s *charmsSuite) TestGetReturnsManifest(c *gc.C) { @@ -558,30 +613,27 @@ c.Assert(ctype, gc.Equals, params.ContentTypeJSON) } -func (s *charmsSuite) TestGetUsesCache(c *gc.C) { - // Add a fake charm archive in the cache directory. - cacheDir := filepath.Join(s.DataDir(), "charm-get-cache", s.State.ModelUUID()) - err := os.MkdirAll(cacheDir, 0755) - c.Assert(err, jc.ErrorIsNil) +func (s *charmsSuite) TestNoTempFilesLeftBehind(c *gc.C) { + // Add the dummy charm. + ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") + s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) - // Create and save a bundle in it. - charmDir := testcharms.Repo.ClonedDir(c.MkDir(), "dummy") - testPath := filepath.Join(charmDir.Path, "utils.js") - contents := "// blah blah" - err = ioutil.WriteFile(testPath, []byte(contents), 0755) - c.Assert(err, jc.ErrorIsNil) - var buffer bytes.Buffer - err = charmDir.ArchiveTo(&buffer) - c.Assert(err, jc.ErrorIsNil) - charmArchivePath := filepath.Join( - cacheDir, charm.Quote("local:trusty/django-42")+".zip") - err = ioutil.WriteFile(charmArchivePath, buffer.Bytes(), 0644) + // Download it. + uri := s.charmsURI(c, "?url=local:quantal/dummy-1&file=*") + resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) + assertResponse(c, resp, http.StatusOK, "application/zip") + + // Ensure the tmp directory exists but nothing is in it. + files, err := ioutil.ReadDir(filepath.Join(s.DataDir(), "charm-get-tmp")) c.Assert(err, jc.ErrorIsNil) + c.Check(files, gc.HasLen, 0) +} - // Ensure the cached contents are properly retrieved. - uri := s.charmsURI(c, "?url=local:trusty/django-42&file=utils.js") - resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) - s.assertGetFileResponse(c, resp, contents, params.ContentTypeJS) +func (s *charmsSuite) TestPOST_BadCharmNameErrorMessage(c *gc.C) { + url := s.charmsURL(c, "url=local:quantal/bad-name-1&file=revision") + url.Path = "/model/dead-beef-123456/charms" + resp := s.authRequest(c, httpRequestParams{method: "POST", url: url.String()}) + s.assertErrorResponse(c, resp, http.StatusNotFound, `.*unknown model: "dead-beef-123456"$`) } type charmsWithMacaroonsSuite struct { @@ -592,7 +644,7 @@ func (s *charmsWithMacaroonsSuite) SetUpTest(c *gc.C) { s.macaroonAuthEnabled = true - s.authHttpSuite.SetUpTest(c) + s.authHTTPSuite.SetUpTest(c) } func (s *charmsWithMacaroonsSuite) TestWithNoBasicAuthReturnsDischargeRequiredError(c *gc.C) { @@ -602,7 +654,7 @@ }) charmResponse := s.assertResponse(c, resp, http.StatusUnauthorized) - c.Assert(charmResponse.Error, gc.Equals, "verification failed: no macaroons") + c.Assert(charmResponse.Error, gc.Matches, ".*verification failed: no macaroons$") c.Assert(charmResponse.ErrorCode, gc.Equals, params.CodeDischargeRequired) c.Assert(charmResponse.ErrorInfo, gc.NotNil) c.Assert(charmResponse.ErrorInfo.Macaroon, gc.NotNil) @@ -620,7 +672,7 @@ url: s.charmsURI(c, ""), contentType: "foo/bar", }) - s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected Content-Type: application/zip.+") + s.assertErrorResponse(c, resp, http.StatusBadRequest, ".*expected Content-Type: application/zip.+") c.Assert(checkCount, gc.Equals, 1) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/api_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/api_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/api_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/api_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,6 +22,7 @@ "github.com/juju/juju/mongo" "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/state/presence" "github.com/juju/juju/status" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" @@ -112,7 +113,7 @@ func setDefaultStatus(c *gc.C, entity setStatuser) { now := time.Now() s := status.StatusInfo{ - Status: status.StatusStarted, + Status: status.Started, Message: "", Since: &now, } @@ -124,7 +125,7 @@ stateInfo := s.MongoInfo(c) stateInfo.Tag = e.Tag() stateInfo.Password = password - st, err := state.Open(s.State.ModelTag(), stateInfo, mongo.DialOpts{ + st, err := state.Open(s.State.ModelTag(), s.State.ControllerTag(), stateInfo, mongo.DialOpts{ Timeout: 25 * time.Millisecond, }, nil) if err == nil { @@ -159,9 +160,10 @@ // also tested live and it works. var scenarioStatus = ¶ms.FullStatus{ Model: params.ModelStatusInfo{ - Name: "controller", - Cloud: "dummy", - Version: "1.2.3", + Name: "controller", + CloudTag: "cloud-dummy", + CloudRegion: "dummy-region", + Version: "1.2.3", }, Machines: map[string]params.MachineStatus{ "0": { @@ -172,7 +174,7 @@ Data: make(map[string]interface{}), }, InstanceStatus: params.DetailedStatus{ - Status: status.StatusPending.String(), + Status: status.Pending.String(), Data: make(map[string]interface{}), }, Series: "quantal", @@ -189,7 +191,7 @@ Data: make(map[string]interface{}), }, InstanceStatus: params.DetailedStatus{ - Status: status.StatusPending.String(), + Status: status.Pending.String(), Data: make(map[string]interface{}), }, Series: "quantal", @@ -206,7 +208,7 @@ Data: make(map[string]interface{}), }, InstanceStatus: params.DetailedStatus{ - Status: status.StatusPending.String(), + Status: status.Pending.String(), Data: make(map[string]interface{}), }, Series: "quantal", @@ -224,7 +226,11 @@ "logging-directory": {"wordpress"}, }, SubordinateTo: []string{"wordpress"}, - // TODO(fwereade): why does the subordinate have no service status? + Status: params.DetailedStatus{ + Status: "waiting", + Info: "waiting for machine", + Data: map[string]interface{}{}, + }, }, "mysql": { Charm: "local:quantal/mysql-1", @@ -233,8 +239,8 @@ SubordinateTo: []string{}, Units: map[string]params.UnitStatus{}, Status: params.DetailedStatus{ - Status: "unknown", - Info: "Waiting for agent initialization to finish", + Status: "waiting", + Info: "waiting for machine", Data: map[string]interface{}{}, }, }, @@ -265,8 +271,8 @@ Subordinates: map[string]params.UnitStatus{ "logging/0": { WorkloadStatus: params.DetailedStatus{ - Status: "unknown", - Info: "Waiting for agent initialization to finish", + Status: "waiting", + Info: "waiting for machine", Data: make(map[string]interface{}), }, AgentStatus: params.DetailedStatus{ @@ -278,8 +284,8 @@ }, "wordpress/1": { WorkloadStatus: params.DetailedStatus{ - Status: "unknown", - Info: "Waiting for agent initialization to finish", + Status: "waiting", + Info: "waiting for machine", Data: make(map[string]interface{}), }, AgentStatus: params.DetailedStatus{ @@ -292,8 +298,8 @@ Subordinates: map[string]params.UnitStatus{ "logging/1": { WorkloadStatus: params.DetailedStatus{ - Status: "unknown", - Info: "Waiting for agent initialization to finish", + Status: "waiting", + Info: "waiting for machine", Data: make(map[string]interface{}), }, AgentStatus: params.DetailedStatus{ @@ -447,7 +453,7 @@ } now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "blam", Data: sd, Since: &now, @@ -471,11 +477,21 @@ s.setAgentPresence(c, wu) add(lu) } + allMachines, err := s.State.AllMachines() + c.Assert(err, jc.ErrorIsNil) + for _, m := range allMachines { + s.setAgentPresence(c, m) + } return } -func (s *baseSuite) setAgentPresence(c *gc.C, u *state.Unit) { - pinger, err := u.SetAgentPresence() +type presenceEntity interface { + SetAgentPresence() (*presence.Pinger, error) + WaitAgentPresence(timeout time.Duration) (err error) +} + +func (s *baseSuite) setAgentPresence(c *gc.C, e presenceEntity) { + pinger, err := e.SetAgentPresence() c.Assert(err, jc.ErrorIsNil) s.AddCleanup(func(c *gc.C) { c.Assert(worker.Stop(pinger), jc.ErrorIsNil) @@ -483,6 +499,6 @@ s.State.StartSync() s.BackingState.StartSync() - err = u.WaitAgentPresence(coretesting.LongWait) + err = e.WaitAgentPresence(coretesting.LongWait) c.Assert(err, jc.ErrorIsNil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/backend.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/backend.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/backend.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/backend.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,10 +9,10 @@ "gopkg.in/juju/names.v2" "github.com/juju/juju/constraints" - "github.com/juju/juju/core/description" "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" "github.com/juju/juju/network" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/status" ) @@ -32,40 +32,41 @@ // Backend contains the state.State methods used in this package, // allowing stubs to be created for testing. type Backend interface { - FindEntity(names.Tag) (state.Entity, error) - Unit(string) (Unit, error) - Application(string) (*state.Application, error) - Machine(string) (*state.Machine, error) - AllMachines() ([]*state.Machine, error) - AllApplications() ([]*state.Application, error) - AllRelations() ([]*state.Relation, error) - AddOneMachine(state.MachineTemplate) (*state.Machine, error) + AbortCurrentUpgrade() error + AddControllerUser(state.UserAccessSpec) (permission.UserAccess, error) AddMachineInsideMachine(state.MachineTemplate, string, instance.ContainerType) (*state.Machine, error) AddMachineInsideNewMachine(template, parentTemplate state.MachineTemplate, containerType instance.ContainerType) (*state.Machine, error) - ModelConstraints() (constraints.Value, error) - ModelConfig() (*config.Config, error) - ModelConfigValues() (config.ConfigValues, error) - UpdateModelConfig(map[string]interface{}, []string, state.ValidateConfigFunc) error - SetModelConstraints(constraints.Value) error - ModelUUID() string - ModelTag() names.ModelTag - Model() (*state.Model, error) - ForModel(tag names.ModelTag) (*state.State, error) - SetModelAgentVersion(version.Number) error - SetAnnotations(state.GlobalEntity, map[string]string) error + AddModelUser(string, state.UserAccessSpec) (permission.UserAccess, error) + AddOneMachine(state.MachineTemplate) (*state.Machine, error) + AddRelation(...state.Endpoint) (*state.Relation, error) + AllApplications() ([]*state.Application, error) + AllMachines() ([]*state.Machine, error) + AllRelations() ([]*state.Relation, error) Annotations(state.GlobalEntity) (map[string]string, error) - InferEndpoints(...string) ([]state.Endpoint, error) - EndpointsRelation(...state.Endpoint) (*state.Relation, error) + APIHostPorts() ([][]network.HostPort, error) + Application(string) (*state.Application, error) + ApplicationLeaders() (map[string]string, error) Charm(*charm.URL) (*state.Charm, error) + EndpointsRelation(...state.Endpoint) (*state.Relation, error) + FindEntity(names.Tag) (state.Entity, error) + ForModel(tag names.ModelTag) (*state.State, error) + InferEndpoints(...string) ([]state.Endpoint, error) + LatestMigration() (state.ModelMigration, error) LatestPlaceholderCharm(*charm.URL) (*state.Charm, error) - AddRelation(...state.Endpoint) (*state.Relation, error) - AddModelUser(state.UserAccessSpec) (description.UserAccess, error) - AddControllerUser(state.UserAccessSpec) (description.UserAccess, error) + Machine(string) (*state.Machine, error) + Model() (*state.Model, error) + ModelConfig() (*config.Config, error) + ModelConfigValues() (config.ConfigValues, error) + ModelConstraints() (constraints.Value, error) + ModelTag() names.ModelTag + ModelUUID() string RemoveUserAccess(names.UserTag, names.Tag) error + SetAnnotations(state.GlobalEntity, map[string]string) error + SetModelAgentVersion(version.Number) error + SetModelConstraints(constraints.Value) error + Unit(string) (Unit, error) + UpdateModelConfig(map[string]interface{}, []string, state.ValidateConfigFunc) error Watch() *state.Multiwatcher - AbortCurrentUpgrade() error - APIHostPorts() ([][]network.HostPort, error) - LatestModelMigration() (state.ModelMigration, error) } func NewStateBackend(st *state.State) Backend { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/bundles.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/bundles.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/bundles.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/bundles.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,54 +4,19 @@ package client import ( - "strings" - - "github.com/juju/bundlechanges" - "github.com/juju/errors" - "gopkg.in/juju/charm.v6-unstable" - + "github.com/juju/juju/apiserver/bundle" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/constraints" - "github.com/juju/juju/storage" ) // GetBundleChanges returns the list of changes required to deploy the given // bundle data. The changes are sorted by requirements, so that they can be // applied in order. -func (c *Client) GetBundleChanges(args params.GetBundleChangesParams) (params.GetBundleChangesResults, error) { - var results params.GetBundleChangesResults - data, err := charm.ReadBundleData(strings.NewReader(args.BundleDataYAML)) +// This call is deprecated, clients should use the GetChanges endpoint on the +// Bundle facade. +func (c *Client) GetBundleChanges(args params.BundleChangesParams) (params.BundleChangesResults, error) { + bundleAPI, err := bundle.NewFacade(c.api.auth) if err != nil { - return results, errors.Annotate(err, "cannot read bundle YAML") - } - verifyConstraints := func(s string) error { - _, err := constraints.Parse(s) - return err - } - verifyStorage := func(s string) error { - _, err := storage.ParseConstraints(s) - return err - } - if err := data.Verify(verifyConstraints, verifyStorage); err != nil { - if err, ok := err.(*charm.VerificationError); ok { - results.Errors = make([]string, len(err.Errors)) - for i, e := range err.Errors { - results.Errors[i] = e.Error() - } - return results, nil - } - // This should never happen as Verify only returns verification errors. - return results, errors.Annotate(err, "cannot verify bundle") - } - changes := bundlechanges.FromData(data) - results.Changes = make([]*params.BundleChangesChange, len(changes)) - for i, c := range changes { - results.Changes[i] = ¶ms.BundleChangesChange{ - Id: c.Id(), - Method: c.Method(), - Args: c.GUIArgs(), - Requires: c.Requires(), - } + return params.BundleChangesResults{}, err } - return results, nil + return bundleAPI.GetChanges(args) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/bundles_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/bundles_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/bundles_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/bundles_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,77 +10,10 @@ "github.com/juju/juju/apiserver/params" ) -func (s *serverSuite) TestGetBundleChangesBundleContentError(c *gc.C) { - args := params.GetBundleChangesParams{ - BundleDataYAML: ":", - } - r, err := s.client.GetBundleChanges(args) - c.Assert(err, gc.ErrorMatches, `cannot read bundle YAML: cannot unmarshal bundle data: yaml: did not find expected key`) - c.Assert(r, gc.DeepEquals, params.GetBundleChangesResults{}) -} - -func (s *serverSuite) TestGetBundleChangesBundleVerificationErrors(c *gc.C) { - args := params.GetBundleChangesParams{ - BundleDataYAML: ` - applications: - django: - charm: django - to: [1] - haproxy: - charm: 42 - num_units: -1 - `, - } - r, err := s.client.GetBundleChanges(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(r.Changes, gc.IsNil) - c.Assert(r.Errors, jc.SameContents, []string{ - `placement "1" refers to a machine not defined in this bundle`, - `too many units specified in unit placement for application "django"`, - `invalid charm URL in application "haproxy": URL has invalid charm or bundle name: "42"`, - `negative number of units specified on application "haproxy"`, - }) -} - -func (s *serverSuite) TestGetBundleChangesBundleConstraintsError(c *gc.C) { - args := params.GetBundleChangesParams{ - BundleDataYAML: ` - applications: - django: - charm: django - num_units: 1 - constraints: bad=wolf - `, - } - r, err := s.client.GetBundleChanges(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(r.Changes, gc.IsNil) - c.Assert(r.Errors, jc.SameContents, []string{ - `invalid constraints "bad=wolf" in application "django": unknown constraint "bad"`, - }) -} - -func (s *serverSuite) TestGetBundleChangesBundleStorageError(c *gc.C) { - args := params.GetBundleChangesParams{ - BundleDataYAML: ` - applications: - django: - charm: django - num_units: 1 - storage: - bad: 0,100M - `, - } - r, err := s.client.GetBundleChanges(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(r.Changes, gc.IsNil) - c.Assert(r.Errors, jc.SameContents, []string{ - `invalid storage "bad" in application "django": cannot parse count: count must be greater than zero, got "0"`, - }) -} - +// This test is here only to make sure that the endpoint is still provided by +// the Client facade. For full coverage, see tests in the Bundle facade. func (s *serverSuite) TestGetBundleChangesSuccess(c *gc.C) { - args := params.GetBundleChangesParams{ + args := params.BundleChangesParams{ BundleDataYAML: ` applications: django: @@ -98,7 +31,7 @@ } r, err := s.client.GetBundleChanges(args) c.Assert(err, jc.ErrorIsNil) - c.Assert(r.Changes, jc.DeepEquals, []*params.BundleChangesChange{{ + c.Assert(r.Changes, jc.DeepEquals, []*params.BundleChange{{ Id: "addCharm-0", Method: "addCharm", Args: []interface{}{"django", ""}, @@ -142,38 +75,3 @@ }}) c.Assert(r.Errors, gc.IsNil) } - -func (s *serverSuite) TestGetBundleChangesBundleEndpointBindingsSuccess(c *gc.C) { - args := params.GetBundleChangesParams{ - BundleDataYAML: ` - applications: - django: - charm: django - num_units: 1 - bindings: - url: public - `, - } - r, err := s.client.GetBundleChanges(args) - c.Assert(err, jc.ErrorIsNil) - - for _, change := range r.Changes { - if change.Method == "deploy" { - c.Assert(change, jc.DeepEquals, ¶ms.BundleChangesChange{ - Id: "deploy-1", - Method: "deploy", - Args: []interface{}{ - "$addCharm-0", - "", - "django", - map[string]interface{}{}, - "", - map[string]string{}, - map[string]string{"url": "public"}, - map[string]int{}, - }, - Requires: []string{"addCharm-0"}, - }) - } - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/client.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,12 +15,12 @@ "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/modelconfig" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/core/description" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/environs/manual" "github.com/juju/juju/instance" "github.com/juju/juju/network" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/state/stateenvirons" jujuversion "github.com/juju/juju/version" @@ -61,6 +61,28 @@ check *common.BlockChecker } +func (c *Client) checkCanRead() error { + canRead, err := c.api.auth.HasPermission(permission.ReadAccess, c.api.stateAccessor.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canRead { + return common.ErrPerm + } + return nil +} + +func (c *Client) checkCanWrite() error { + canWrite, err := c.api.auth.HasPermission(permission.WriteAccess, c.api.stateAccessor.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canWrite { + return common.ErrPerm + } + return nil +} + func newClient(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*Client, error) { urlGetter := common.NewToolsURLGetter(st.ModelUUID(), st) configGetter := stateenvirons.EnvironConfigGetter{st} @@ -116,6 +138,9 @@ } func (c *Client) WatchAll() (params.AllWatcherId, error) { + if err := c.checkCanRead(); err != nil { + return params.AllWatcherId{}, err + } w := c.api.stateAccessor.Watch() return params.AllWatcherId{ AllWatcherId: c.api.resources.Register(w), @@ -124,6 +149,9 @@ // Resolved implements the server side of Client.Resolved. func (c *Client) Resolved(p params.Resolved) error { + if err := c.checkCanWrite(); err != nil { + return err + } if err := c.check.ChangeAllowed(); err != nil { return errors.Trace(err) } @@ -136,6 +164,10 @@ // PublicAddress implements the server side of Client.PublicAddress. func (c *Client) PublicAddress(p params.PublicAddress) (results params.PublicAddressResults, err error) { + if err := c.checkCanRead(); err != nil { + return params.PublicAddressResults{}, err + } + switch { case names.IsValidMachine(p.Target): machine, err := c.api.stateAccessor.Machine(p.Target) @@ -164,6 +196,10 @@ // PrivateAddress implements the server side of Client.PrivateAddress. func (c *Client) PrivateAddress(p params.PrivateAddress) (results params.PrivateAddressResults, err error) { + if err := c.checkCanRead(); err != nil { + return params.PrivateAddressResults{}, err + } + switch { case names.IsValidMachine(p.Target): machine, err := c.api.stateAccessor.Machine(p.Target) @@ -193,6 +229,10 @@ // GetModelConstraints returns the constraints for the model. func (c *Client) GetModelConstraints() (params.GetConstraintsResults, error) { + if err := c.checkCanRead(); err != nil { + return params.GetConstraintsResults{}, err + } + cons, err := c.api.stateAccessor.ModelConstraints() if err != nil { return params.GetConstraintsResults{}, err @@ -202,6 +242,10 @@ // SetModelConstraints sets the constraints for the model. func (c *Client) SetModelConstraints(args params.SetConstraints) error { + if err := c.checkCanWrite(); err != nil { + return err + } + if err := c.check.ChangeAllowed(); err != nil { return errors.Trace(err) } @@ -210,6 +254,10 @@ // AddMachines adds new machines with the supplied parameters. func (c *Client) AddMachines(args params.AddMachines) (params.AddMachinesResults, error) { + if err := c.checkCanWrite(); err != nil { + return params.AddMachinesResults{}, err + } + return c.AddMachinesV2(args) } @@ -233,6 +281,10 @@ // InjectMachines injects a machine into state with provisioned status. func (c *Client) InjectMachines(args params.AddMachines) (params.AddMachinesResults, error) { + if err := c.checkCanWrite(); err != nil { + return params.AddMachinesResults{}, err + } + return c.AddMachines(args) } @@ -311,6 +363,10 @@ // ProvisioningScript returns a shell script that, when run, // provisions a machine agent on the machine executing the script. func (c *Client) ProvisioningScript(args params.ProvisioningScriptParams) (params.ProvisioningScriptResult, error) { + if err := c.checkCanWrite(); err != nil { + return params.ProvisioningScriptResult{}, err + } + var result params.ProvisioningScriptResult icfg, err := InstanceConfig(c.api.state(), args.MachineId, args.Nonce, args.DataDir) if err != nil { @@ -348,6 +404,10 @@ // DestroyMachines removes a given set of machines. func (c *Client) DestroyMachines(args params.DestroyMachines) error { + if err := c.checkCanWrite(); err != nil { + return err + } + if err := c.check.RemoveAllowed(); !args.Force && err != nil { return errors.Trace(err) } @@ -355,9 +415,11 @@ return common.DestroyMachines(c.api.stateAccessor, args.Force, args.MachineNames...) } -// ModelInfo returns information about the current model (default -// series and type). +// ModelInfo returns information about the current model. func (c *Client) ModelInfo() (params.ModelInfo, error) { + if err := c.checkCanWrite(); err != nil { + return params.ModelInfo{}, err + } state := c.api.stateAccessor conf, err := state.ModelConfig() if err != nil { @@ -367,27 +429,33 @@ if err != nil { return params.ModelInfo{}, err } - info := params.ModelInfo{ - DefaultSeries: config.PreferredSeries(conf), - Cloud: model.Cloud(), - CloudRegion: model.CloudRegion(), - CloudCredential: model.CloudCredential(), - ProviderType: conf.Type(), - Name: conf.Name(), - UUID: model.UUID(), - ControllerUUID: model.ControllerUUID(), + DefaultSeries: config.PreferredSeries(conf), + CloudTag: names.NewCloudTag(model.Cloud()).String(), + CloudRegion: model.CloudRegion(), + ProviderType: conf.Type(), + Name: conf.Name(), + UUID: model.UUID(), + OwnerTag: model.Owner().String(), + Life: params.Life(model.Life().String()), + } + if tag, ok := model.CloudCredential(); ok { + info.CloudCredentialTag = tag.String() } return info, nil } -func modelInfo(st *state.State, user description.UserAccess) (params.ModelUserInfo, error) { +func modelInfo(st *state.State, user permission.UserAccess) (params.ModelUserInfo, error) { return common.ModelUserInfo(user, st) } // ModelUserInfo returns information on all users in the model. func (c *Client) ModelUserInfo() (params.ModelUserInfoResults, error) { var results params.ModelUserInfoResults + if err := c.checkCanRead(); err != nil { + return results, err + } + env, err := c.api.stateAccessor.Model() if err != nil { return results, errors.Trace(err) @@ -412,11 +480,19 @@ // AgentVersion returns the current version that the API server is running. func (c *Client) AgentVersion() (params.AgentVersionResult, error) { + if err := c.checkCanRead(); err != nil { + return params.AgentVersionResult{}, err + } + return params.AgentVersionResult{Version: jujuversion.Current}, nil } // SetModelAgentVersion sets the model agent version. func (c *Client) SetModelAgentVersion(args params.SetModelAgentVersion) error { + if err := c.checkCanWrite(); err != nil { + return err + } + if err := c.check.ChangeAllowed(); err != nil { return errors.Trace(err) } @@ -435,6 +511,10 @@ // AbortCurrentUpgrade aborts and archives the current upgrade // synchronisation record, if any. func (c *Client) AbortCurrentUpgrade() error { + if err := c.checkCanWrite(); err != nil { + return err + } + if err := c.check.ChangeAllowed(); err != nil { return errors.Trace(err) } @@ -443,10 +523,18 @@ // FindTools returns a List containing all tools matching the given parameters. func (c *Client) FindTools(args params.FindToolsParams) (params.FindToolsResult, error) { + if err := c.checkCanWrite(); err != nil { + return params.FindToolsResult{}, err + } + return c.api.toolsFinder.FindTools(args) } func (c *Client) AddCharm(args params.AddCharm) error { + if err := c.checkCanWrite(); err != nil { + return err + } + return application.AddCharmWithAuthorization(c.api.state(), params.AddCharmWithAuthorization{ URL: args.URL, Channel: args.Channel, @@ -460,17 +548,29 @@ // The authorization macaroon, args.CharmStoreMacaroon, may be // omitted, in which case this call is equivalent to AddCharm. func (c *Client) AddCharmWithAuthorization(args params.AddCharmWithAuthorization) error { + if err := c.checkCanWrite(); err != nil { + return err + } + return application.AddCharmWithAuthorization(c.api.state(), args) } // ResolveCharm resolves the best available charm URLs with series, for charm // locations without a series specified. func (c *Client) ResolveCharms(args params.ResolveCharms) (params.ResolveCharmResults, error) { + if err := c.checkCanWrite(); err != nil { + return params.ResolveCharmResults{}, err + } + return application.ResolveCharms(c.api.state(), args) } // RetryProvisioning marks a provisioning error as transient on the machines. func (c *Client) RetryProvisioning(p params.Entities) (params.ErrorResults, error) { + if err := c.checkCanWrite(); err != nil { + return params.ErrorResults{}, err + } + if err := c.check.ChangeAllowed(); err != nil { return params.ErrorResults{}, errors.Trace(err) } @@ -485,6 +585,10 @@ // APIHostPorts returns the API host/port addresses stored in state. func (c *Client) APIHostPorts() (result params.APIHostPortsResult, err error) { + if err := c.checkCanWrite(); err != nil { + return result, err + } + var servers [][]network.HostPort if servers, err = c.api.stateAccessor.APIHostPorts(); err != nil { return params.APIHostPortsResult{}, err diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/client_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,13 +26,13 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/apiserver/testing" "github.com/juju/juju/constraints" - "github.com/juju/juju/core/description" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/environs/manual" toolstesting "github.com/juju/juju/environs/tools/testing" "github.com/juju/juju/instance" "github.com/juju/juju/network" + "github.com/juju/juju/permission" "github.com/juju/juju/rpc" "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" @@ -104,6 +104,24 @@ return pinger } +func (s *serverSuite) TestModelInfo(c *gc.C) { + model, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + conf, _ := s.State.ModelConfig() + info, err := s.client.ModelInfo() + c.Assert(err, jc.ErrorIsNil) + c.Assert(info.DefaultSeries, gc.Equals, config.PreferredSeries(conf)) + c.Assert(info.CloudRegion, gc.Equals, model.CloudRegion()) + c.Assert(info.ProviderType, gc.Equals, conf.Type()) + c.Assert(info.Name, gc.Equals, conf.Name()) + c.Assert(info.UUID, gc.Equals, model.UUID()) + c.Assert(info.OwnerTag, gc.Equals, model.Owner().String()) + c.Assert(info.Life, gc.Equals, params.Alive) + // The controller UUID is not returned by the ModelInfo endpoint on the + // Client facade. + c.Assert(info.ControllerUUID, gc.Equals, "") +} + func (s *serverSuite) TestModelUsersInfo(c *gc.C) { testAdmin := s.AdminUserTag(c) owner, err := s.State.UserAccess(testAdmin, s.State.ModelTag()) @@ -111,14 +129,14 @@ localUser1 := s.makeLocalModelUser(c, "ralphdoe", "Ralph Doe") localUser2 := s.makeLocalModelUser(c, "samsmith", "Sam Smith") - remoteUser1 := s.Factory.MakeModelUser(c, &factory.ModelUserParams{User: "bobjohns@ubuntuone", DisplayName: "Bob Johns", Access: description.WriteAccess}) - remoteUser2 := s.Factory.MakeModelUser(c, &factory.ModelUserParams{User: "nicshaw@idprovider", DisplayName: "Nic Shaw", Access: description.WriteAccess}) + remoteUser1 := s.Factory.MakeModelUser(c, &factory.ModelUserParams{User: "bobjohns@ubuntuone", DisplayName: "Bob Johns", Access: permission.WriteAccess}) + remoteUser2 := s.Factory.MakeModelUser(c, &factory.ModelUserParams{User: "nicshaw@idprovider", DisplayName: "Nic Shaw", Access: permission.WriteAccess}) results, err := s.client.ModelUserInfo() c.Assert(err, jc.ErrorIsNil) var expected params.ModelUserInfoResults for _, r := range []struct { - user description.UserAccess + user permission.UserAccess info *params.ModelUserInfo }{ { @@ -131,14 +149,14 @@ }, { localUser1, ¶ms.ModelUserInfo{ - UserName: "ralphdoe@local", + UserName: "ralphdoe", DisplayName: "Ralph Doe", Access: "admin", }, }, { localUser2, ¶ms.ModelUserInfo{ - UserName: "samsmith@local", + UserName: "samsmith", DisplayName: "Sam Smith", Access: "admin", }, @@ -167,7 +185,7 @@ c.Assert(results, jc.DeepEquals, expected) } -func lastConnPointer(c *gc.C, modelUser description.UserAccess, st *state.State) *time.Time { +func lastConnPointer(c *gc.C, modelUser permission.UserAccess, st *state.State) *time.Time { lastConn, err := st.LastModelConnection(modelUser.UserTag) if err != nil { if state.IsNeverConnectedError(err) { @@ -186,7 +204,7 @@ func (a ByUserName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByUserName) Less(i, j int) bool { return a[i].Result.UserName < a[j].Result.UserName } -func (s *serverSuite) makeLocalModelUser(c *gc.C, username, displayname string) description.UserAccess { +func (s *serverSuite) makeLocalModelUser(c *gc.C, username, displayname string) permission.UserAccess { // factory.MakeUser will create an ModelUser for a local user by defalut user := s.Factory.MakeUser(c, &factory.UserParams{Name: username, DisplayName: displayname}) modelUser, err := s.State.UserAccess(user.UserTag(), s.State.ModelTag()) @@ -397,20 +415,6 @@ c.Assert(status, jc.DeepEquals, scenarioStatus) } -func (s *clientSuite) TestClientModelInfo(c *gc.C) { - model, err := s.State.Model() - c.Assert(err, jc.ErrorIsNil) - conf, _ := s.State.ModelConfig() - info, err := s.APIState.Client().ModelInfo() - c.Assert(err, jc.ErrorIsNil) - c.Assert(info.DefaultSeries, gc.Equals, config.PreferredSeries(conf)) - c.Assert(info.CloudRegion, gc.Equals, model.CloudRegion()) - c.Assert(info.ProviderType, gc.Equals, conf.Type()) - c.Assert(info.Name, gc.Equals, conf.Name()) - c.Assert(info.UUID, gc.Equals, model.UUID()) - c.Assert(info.ControllerUUID, gc.Equals, model.ControllerUUID()) -} - func assertLife(c *gc.C, entity state.Living, life state.Life) { err := entity.Refresh() c.Assert(err, jc.ErrorIsNil) @@ -449,21 +453,21 @@ s.assertForceDestroyMachines(c) } -func (s *clientSuite) testClientUnitResolved(c *gc.C, retry bool, expectedResolvedMode state.ResolvedMode) { +func (s *clientSuite) testClientUnitResolved(c *gc.C, noretry bool, expectedResolvedMode state.ResolvedMode) { // Setup: s.setUpScenario(c) u, err := s.State.Unit("wordpress/0") c.Assert(err, jc.ErrorIsNil) now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "gaaah", Since: &now, } err = u.SetAgentStatus(sInfo) c.Assert(err, jc.ErrorIsNil) // Code under test: - err = s.APIState.Client().Resolved("wordpress/0", retry) + err = s.APIState.Client().Resolved("wordpress/0", noretry) c.Assert(err, jc.ErrorIsNil) // Freshen the unit's state. err = u.Refresh() @@ -475,11 +479,11 @@ } func (s *clientSuite) TestClientUnitResolved(c *gc.C) { - s.testClientUnitResolved(c, false, state.ResolvedNoHooks) + s.testClientUnitResolved(c, true, state.ResolvedNoHooks) } func (s *clientSuite) TestClientUnitResolvedRetry(c *gc.C) { - s.testClientUnitResolved(c, true, state.ResolvedRetryHooks) + s.testClientUnitResolved(c, false, state.ResolvedRetryHooks) } func (s *clientSuite) setupResolved(c *gc.C) *state.Unit { @@ -488,7 +492,7 @@ c.Assert(err, jc.ErrorIsNil) now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "gaaah", Since: &now, } @@ -498,7 +502,7 @@ } func (s *clientSuite) assertResolved(c *gc.C, u *state.Unit) { - err := s.APIState.Client().Resolved("wordpress/0", true) + err := s.APIState.Client().Resolved("wordpress/0", false) c.Assert(err, jc.ErrorIsNil) // Freshen the unit's state. err = u.Refresh() @@ -510,7 +514,7 @@ } func (s *clientSuite) assertResolvedBlocked(c *gc.C, u *state.Unit, msg string) { - err := s.APIState.Client().Resolved("wordpress/0", true) + err := s.APIState.Client().Resolved("wordpress/0", false) s.AssertBlocked(c, err, msg) } @@ -590,10 +594,10 @@ Id: m.Id(), InstanceId: "i-0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, }, Life: multiwatcher.Life("alive"), Series: "quantal", @@ -613,7 +617,7 @@ func (s *clientSuite) TestClientSetModelConstraints(c *gc.C) { // Set constraints for the model. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + cons, err := constraints.Parse("mem=4096", "cores=2") c.Assert(err, jc.ErrorIsNil) err = s.APIState.Client().SetModelConstraints(cons) c.Assert(err, jc.ErrorIsNil) @@ -626,7 +630,7 @@ func (s *clientSuite) assertSetModelConstraints(c *gc.C) { // Set constraints for the model. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + cons, err := constraints.Parse("mem=4096", "cores=2") c.Assert(err, jc.ErrorIsNil) err = s.APIState.Client().SetModelConstraints(cons) c.Assert(err, jc.ErrorIsNil) @@ -638,7 +642,7 @@ func (s *clientSuite) assertSetModelConstraintsBlocked(c *gc.C, msg string) { // Set constraints for the model. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + cons, err := constraints.Parse("mem=4096", "cores=2") c.Assert(err, jc.ErrorIsNil) err = s.APIState.Client().SetModelConstraints(cons) s.AssertBlocked(c, err, msg) @@ -661,7 +665,7 @@ func (s *clientSuite) TestClientGetModelConstraints(c *gc.C) { // Set constraints for the model. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + cons, err := constraints.Parse("mem=4096", "cores=2") c.Assert(err, jc.ErrorIsNil) err = s.State.SetModelConstraints(cons) c.Assert(err, jc.ErrorIsNil) @@ -1127,51 +1131,51 @@ c.Check(script, gc.Not(jc.Contains), "apt-get upgrade") } -var resolveCharmTests = []struct { - about string - url string - resolved string - parseErr string - resolveErr string -}{{ - about: "wordpress resolved", - url: "cs:wordpress", - resolved: "cs:trusty/wordpress", -}, { - about: "mysql resolved", - url: "cs:mysql", - resolved: "cs:precise/mysql", -}, { - about: "riak resolved", - url: "cs:riak", - resolved: "cs:trusty/riak", -}, { - about: "fully qualified char reference", - url: "cs:utopic/riak-5", - resolved: "cs:utopic/riak-5", -}, { - about: "charm with series and no revision", - url: "cs:precise/wordpress", - resolved: "cs:precise/wordpress", -}, { - about: "fully qualified reference not found", - url: "cs:utopic/riak-42", - resolveErr: `cannot resolve URL "cs:utopic/riak-42": charm not found`, -}, { - about: "reference not found", - url: "cs:no-such", - resolveErr: `cannot resolve URL "cs:no-such": charm or bundle not found`, -}, { - about: "invalid charm name", - url: "cs:", - parseErr: `URL has invalid charm or bundle name: "cs:"`, -}, { - about: "local charm", - url: "local:wordpress", - resolveErr: `only charm store charm references are supported, with cs: schema`, -}} - func (s *clientRepoSuite) TestResolveCharm(c *gc.C) { + resolveCharmTests := []struct { + about string + url string + resolved string + parseErr string + resolveErr string + }{{ + about: "wordpress resolved", + url: "cs:wordpress", + resolved: "cs:trusty/wordpress", + }, { + about: "mysql resolved", + url: "cs:mysql", + resolved: "cs:precise/mysql", + }, { + about: "riak resolved", + url: "cs:riak", + resolved: "cs:trusty/riak", + }, { + about: "fully qualified char reference", + url: "cs:utopic/riak-5", + resolved: "cs:utopic/riak-5", + }, { + about: "charm with series and no revision", + url: "cs:precise/wordpress", + resolved: "cs:precise/wordpress", + }, { + about: "fully qualified reference not found", + url: "cs:utopic/riak-42", + resolveErr: `cannot resolve URL "cs:utopic/riak-42": charm not found`, + }, { + about: "reference not found", + url: "cs:no-such", + resolveErr: `cannot resolve URL "cs:no-such": charm or bundle not found`, + }, { + about: "invalid charm name", + url: "cs:", + parseErr: `cannot parse URL "cs://": name "" not valid`, + }, { + about: "local charm", + url: "local:wordpress", + resolveErr: `only charm store charm references are supported, with cs: schema`, + }} + // Add some charms to be resolved later. for _, url := range []string{ "precise/wordpress-1", @@ -1190,18 +1194,22 @@ client := s.APIState.Client() ref, err := charm.ParseURL(test.url) if test.parseErr == "" { - if !c.Check(err, jc.ErrorIsNil) { + if c.Check(err, jc.ErrorIsNil) == false { continue } } else { - c.Assert(err, gc.NotNil) + if c.Check(err, gc.NotNil) == false { + continue + } c.Check(err, gc.ErrorMatches, test.parseErr) continue } curl, err := client.ResolveCharm(ref) if test.resolveErr == "" { - c.Assert(err, jc.ErrorIsNil) + if c.Check(err, jc.ErrorIsNil) == false { + continue + } c.Check(curl.String(), gc.Equals, test.resolved) continue } @@ -1215,7 +1223,7 @@ c.Assert(err, jc.ErrorIsNil) now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "error", Since: &now, } @@ -1226,7 +1234,7 @@ statusInfo, err := machine.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusError) + c.Assert(statusInfo.Status, gc.Equals, status.Error) c.Assert(statusInfo.Message, gc.Equals, "error") c.Assert(statusInfo.Data["transient"], jc.IsTrue) } @@ -1236,7 +1244,7 @@ c.Assert(err, jc.ErrorIsNil) now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "error", Since: &now, } @@ -1250,7 +1258,7 @@ c.Assert(err, jc.ErrorIsNil) statusInfo, err := machine.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusError) + c.Assert(statusInfo.Status, gc.Equals, status.Error) c.Assert(statusInfo.Message, gc.Equals, "error") c.Assert(statusInfo.Data["transient"], jc.IsTrue) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/filtering.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/filtering.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/filtering.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/filtering.go 2016-10-13 14:31:49.000000000 +0000 @@ -328,7 +328,7 @@ oneValidStatus = true // To preserve current expected behaviour, we only report on workload status // if the agent itself is not in error. - if agentStatus != status.StatusError && workloadStatus.WorkloadMatches(ps) { + if agentStatus != status.Error && workloadStatus.WorkloadMatches(ps) { return true, true, nil } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/instanceconfig.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/instanceconfig.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/instanceconfig.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/instanceconfig.go 2016-10-13 14:31:49.000000000 +0000 @@ -93,14 +93,8 @@ return nil, errors.Annotate(err, "setting up machine authentication") } - // Figure out if secure connections are supported. - info, err := st.StateServingInfo() - if err != nil { - return nil, errors.Annotate(err, "getting state serving info") - } - secureServerConnection := info.CAPrivateKey != "" - icfg, err := instancecfg.NewInstanceConfig(machineId, nonce, modelConfig.ImageStream(), machine.Series(), - secureServerConnection, apiInfo, + icfg, err := instancecfg.NewInstanceConfig(st.ControllerTag(), machineId, nonce, modelConfig.ImageStream(), + machine.Series(), apiInfo, ) if err != nil { return nil, errors.Annotate(err, "initializing instance config") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/perm_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/perm_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/perm_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/perm_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -307,8 +307,8 @@ func opClientServiceUpdate(c *gc.C, st api.Connection, mst *state.State) (func(), error) { args := params.ApplicationUpdate{ ApplicationName: "no-such-charm", - CharmUrl: "cs:quantal/wordpress-42", - ForceCharmUrl: true, + CharmURL: "cs:quantal/wordpress-42", + ForceCharmURL: true, SettingsStrings: map[string]string{"blog-title": "foo"}, SettingsYAML: `"wordpress": {"blog-title": "foo"}`, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/status.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/status.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/status.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/status.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,6 @@ "github.com/juju/errors" "github.com/juju/utils/set" "gopkg.in/juju/charm.v6-unstable" - "gopkg.in/juju/charm.v6-unstable/hooks" "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/common" @@ -20,7 +19,6 @@ "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/status" - "github.com/juju/juju/worker/uniter/operation" ) func agentStatusFromStatusInfo(s []status.StatusInfo, kind status.HistoryKind) []params.DetailedStatus { @@ -103,6 +101,7 @@ // StatusHistory returns a slice of past statuses for several entities. func (c *Client) StatusHistory(request params.StatusHistoryRequests) params.StatusHistoryResults { + results := params.StatusHistoryResults{} // TODO(perrito666) the contents of the loop could be split into // a oneHistory method for clarity. @@ -112,6 +111,15 @@ Date: request.Filter.Date, Delta: request.Filter.Delta, } + if err := c.checkCanRead(); err != nil { + history := params.StatusHistoryResult{ + Error: common.ServerError(err), + } + results.Results = append(results.Results, history) + continue + + } + if err := filter.Validate(); err != nil { history := params.StatusHistoryResult{ Error: common.ServerError(errors.Annotate(err, "cannot validate status history filter")), @@ -125,7 +133,7 @@ hist []params.DetailedStatus ) kind := status.HistoryKind(request.Kind) - err = errors.NotValidf("%q requires a unit, got %t", kind, request.Tag) + err = errors.NotValidf("%q requires a unit, got %T", kind, request.Tag) switch kind { case status.KindUnit, status.KindWorkload, status.KindUnitAgent: var u names.UnitTag @@ -154,17 +162,28 @@ // FullStatus gives the information needed for juju status over the api func (c *Client) FullStatus(args params.StatusParams) (params.FullStatus, error) { + if err := c.checkCanRead(); err != nil { + return params.FullStatus{}, err + } + var noStatus params.FullStatus var context statusContext var err error if context.services, context.units, context.latestCharms, err = fetchAllApplicationsAndUnits(c.api.stateAccessor, len(args.Patterns) <= 0); err != nil { return noStatus, errors.Annotate(err, "could not fetch services and units") - } else if context.machines, err = fetchMachines(c.api.stateAccessor, nil); err != nil { + } + if context.machines, err = fetchMachines(c.api.stateAccessor, nil); err != nil { return noStatus, errors.Annotate(err, "could not fetch machines") - } else if context.relations, err = fetchRelations(c.api.stateAccessor); err != nil { + } + if context.relations, err = fetchRelations(c.api.stateAccessor); err != nil { return noStatus, errors.Annotate(err, "could not fetch relations") } + if len(context.services) > 0 { + if context.leaders, err = c.api.stateAccessor.ApplicationLeaders(); err != nil { + return noStatus, errors.Annotate(err, " could not fetch leaders") + } + } logger.Debugf("Applications: %v", context.services) @@ -276,7 +295,7 @@ return info, errors.Annotate(err, "cannot get model") } info.Name = m.Name() - info.Cloud = m.Cloud() + info.CloudTag = names.NewCloudTag(m.Cloud()).String() info.CloudRegion = m.CloudRegion() cfg, err := m.Config() @@ -307,7 +326,7 @@ } func (c *Client) getMigrationStatus() (string, error) { - mig, err := c.api.stateAccessor.LatestModelMigration() + mig, err := c.api.stateAccessor.LatestMigration() if err != nil { if errors.IsNotFound(err) { return "", nil @@ -337,6 +356,7 @@ relations map[string][]*state.Relation units map[string]map[string]*state.Unit latestCharms map[charm.URL]*state.Charm + leaders map[string]string } // fetchMachines returns a map from top level machine id to machines, where machines[0] is the host @@ -502,6 +522,22 @@ logger.Debugf("error fetching public address: %q", err) } status.DNSName = addr.Value + + mAddrs := machine.Addresses() + if len(mAddrs) == 0 { + logger.Debugf("no IP addresses fetched for machine %q", instid) + // At least give it the newly created DNSName address, if it exists. + if addr.Value != "" { + mAddrs = append(mAddrs, addr) + } + } + for _, mAddr := range mAddrs { + switch mAddr.Scope { + case network.ScopeMachineLocal, network.ScopeLinkLocal: + continue + } + status.IPAddresses = append(status.IPAddresses, mAddr.Value) + } } else { if errors.IsNotProvisioned(err) { status.InstanceId = "pending" @@ -597,39 +633,46 @@ } func (context *statusContext) processApplication(service *state.Application) params.ApplicationStatus { - serviceCharmURL, _ := service.CharmURL() + serviceCharm, _, err := service.Charm() + if err != nil { + return params.ApplicationStatus{Err: common.ServerError(err)} + } + var processedStatus = params.ApplicationStatus{ - Charm: serviceCharmURL.String(), + Charm: serviceCharm.URL().String(), Series: service.Series(), Exposed: service.IsExposed(), Life: processLife(service), } - if latestCharm, ok := context.latestCharms[*serviceCharmURL.WithRevision(-1)]; ok && latestCharm != nil { - if latestCharm.Revision() > serviceCharmURL.Revision { + if latestCharm, ok := context.latestCharms[*serviceCharm.URL().WithRevision(-1)]; ok && latestCharm != nil { + if latestCharm.Revision() > serviceCharm.URL().Revision { processedStatus.CanUpgradeTo = latestCharm.String() } } - var err error processedStatus.Relations, processedStatus.SubordinateTo, err = context.processServiceRelations(service) if err != nil { - processedStatus.Err = err + processedStatus.Err = common.ServerError(err) return processedStatus } units := context.units[service.Name()] if service.IsPrincipal() { - processedStatus.Units = context.processUnits(units, serviceCharmURL.String()) - applicationStatus, err := service.Status() - if err != nil { - processedStatus.Err = err - return processedStatus - } - processedStatus.Status.Status = applicationStatus.Status.String() - processedStatus.Status.Info = applicationStatus.Message - processedStatus.Status.Data = applicationStatus.Data - processedStatus.Status.Since = applicationStatus.Since - + processedStatus.Units = context.processUnits(units, serviceCharm.URL().String()) + } + applicationStatus, err := service.Status() + if err != nil { + processedStatus.Err = common.ServerError(err) + return processedStatus + } + processedStatus.Status.Status = applicationStatus.Status.String() + processedStatus.Status.Info = applicationStatus.Message + processedStatus.Status.Data = applicationStatus.Data + processedStatus.Status.Since = applicationStatus.Since + + metrics := serviceCharm.Metrics() + planRequired := metrics != nil && metrics.Plan != nil && metrics.Plan.Required + if planRequired || len(service.MetricCredentials()) > 0 { processedStatus.MeterStatuses = context.processUnitMeterStatuses(units) } @@ -639,10 +682,16 @@ status.StatusHistoryFilter{Size: 1}, ) if err != nil { - processedStatus.Err = err + processedStatus.Err = common.ServerError(err) return processedStatus } - versions = append(versions, statuses[0]) + // Even though we fully expect there to be historical values there, + // even the first should be the empty string, the status history + // collection is not added to in a transactional manner, so it may be + // not there even though we'd really like it to be. Such is mongo. + if len(statuses) > 0 { + versions = append(versions, statuses[0]) + } } if len(versions) > 0 { sort.Sort(bySinceDescending(versions)) @@ -721,6 +770,9 @@ } } } + if leader := context.leaders[unit.ApplicationName()]; leader == unit.Name() { + result.Leader = true + } return result } @@ -763,14 +815,7 @@ // processUnitAndAgentStatus retrieves status information for both unit and unitAgents. func processUnitAndAgentStatus(unit *state.Unit, unitStatus *params.UnitStatus) { - unitStatus.AgentStatus, unitStatus.WorkloadStatus = processUnitStatus(unit) - processUnitLost(unit, unitStatus) -} - -// populateStatusFromGetter creates status information for machines, units. -func populateStatusFromGetter(agent *params.DetailedStatus, getter status.StatusGetter) { - statusInfo, err := getter.Status() - populateStatusFromStatusInfoAndErr(agent, statusInfo, err) + unitStatus.AgentStatus, unitStatus.WorkloadStatus = processUnit(unit) } // populateStatusFromStatusInfoAndErr creates AgentStatus from the typical output @@ -786,90 +831,31 @@ // processMachine retrieves version and status information for the given machine. // It also returns deprecated legacy status information. func processMachine(machine *state.Machine) (out params.DetailedStatus) { + statusInfo, err := common.MachineStatus(machine) + populateStatusFromStatusInfoAndErr(&out, statusInfo, err) + out.Life = processLife(machine) if t, err := machine.AgentTools(); err == nil { out.Version = t.Version.Number.String() } - - populateStatusFromGetter(&out, machine) - - if out.Err != nil { - return - } - // TODO(perrito666) add status validation. - outSt := status.Status(out.Status) - if outSt == status.StatusPending || outSt == status.StatusAllocating { - // The status is pending - there's no point - // in enquiring about the agent liveness. - return - } - return } // processUnit retrieves version and status information for the given unit. -func processUnitStatus(unit *state.Unit) (agentStatus, workloadStatus params.DetailedStatus) { - // First determine the agent status information. - unitAgent := unit.Agent() - populateStatusFromGetter(&agentStatus, unitAgent) +func processUnit(unit *state.Unit) (agentStatus, workloadStatus params.DetailedStatus) { + agent, workload := common.UnitStatus(unit) + populateStatusFromStatusInfoAndErr(&agentStatus, agent.Status, agent.Err) + populateStatusFromStatusInfoAndErr(&workloadStatus, workload.Status, workload.Err) + agentStatus.Life = processLife(unit) + if t, err := unit.AgentTools(); err == nil { agentStatus.Version = t.Version.Number.String() } - - // Second, determine the workload (unit) status. - populateStatusFromGetter(&workloadStatus, unit) return } -func canBeLost(unitStatus *params.UnitStatus) bool { - // TODO(perrito666) add status validation. - switch status.Status(unitStatus.AgentStatus.Status) { - case status.StatusAllocating: - return false - case status.StatusExecuting: - return unitStatus.AgentStatus.Info != operation.RunningHookMessage(string(hooks.Install)) - } - // TODO(fwereade/wallyworld): we should have an explicit place in the model - // to tell us when we've hit this point, instead of piggybacking on top of - // status and/or status history. - // TODO(perrito666) add status validation. - wlStatus := status.Status(unitStatus.WorkloadStatus.Status) - isInstalled := wlStatus != status.StatusMaintenance || unitStatus.WorkloadStatus.Info != status.MessageInstalling - return isInstalled -} - -// processUnitLost determines whether the given unit should be marked as lost. -// TODO(fwereade/wallyworld): this is also model-level code and should sit in -// between state and this package. -func processUnitLost(unit *state.Unit, unitStatus *params.UnitStatus) { - if !canBeLost(unitStatus) { - // The status is allocating or installing - there's no point - // in enquiring about the agent liveness. - return - } - agentAlive, err := unit.AgentPresence() - if err != nil { - return - } - - if unit.Life() != state.Dead && !agentAlive { - // If the unit is in error, it would be bad to throw away - // the error information as when the agent reconnects, that - // error information would then be lost. - // TODO(perrito666) add status validation. - wlStatus := status.Status(unitStatus.WorkloadStatus.Status) - - if wlStatus != status.StatusError { - unitStatus.WorkloadStatus.Status = status.StatusUnknown.String() - unitStatus.WorkloadStatus.Info = fmt.Sprintf("agent is lost, sorry! See 'juju status-history %s'", unit.Name()) - } - unitStatus.AgentStatus.Status = status.StatusLost.String() - unitStatus.AgentStatus.Info = "agent is not communicating with the server" - } -} - // filterStatusData limits what agent StatusData data is passed over // the API. This prevents unintended leakage of internal-only data. func filterStatusData(status map[string]interface{}) map[string]interface{} { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/statushistory_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/statushistory_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/statushistory_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/statushistory_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -28,7 +28,7 @@ func (s *statusHistoryTestSuite) SetUpTest(c *gc.C) { s.st = &mockState{} - tag := names.NewUserTag("user") + tag := names.NewUserTag("admin") authorizer := &apiservertesting.FakeAuthorizer{Tag: tag} var err error s.api, err = client.NewClient( @@ -125,17 +125,17 @@ func (s *statusHistoryTestSuite) TestStatusHistoryUnitOnly(c *gc.C) { s.st.unitHistory = statusInfoWithDates([]status.StatusInfo{ { - Status: status.StatusMaintenance, + Status: status.Maintenance, Message: "working", }, { - Status: status.StatusActive, + Status: status.Active, Message: "running", }, }) s.st.agentHistory = statusInfoWithDates([]status.StatusInfo{ { - Status: status.StatusIdle, + Status: status.Idle, }, }) h := s.api.StatusHistory(params.StatusHistoryRequests{ @@ -152,20 +152,20 @@ func (s *statusHistoryTestSuite) TestStatusHistoryAgentOnly(c *gc.C) { s.st.unitHistory = statusInfoWithDates([]status.StatusInfo{ { - Status: status.StatusMaintenance, + Status: status.Maintenance, Message: "working", }, { - Status: status.StatusActive, + Status: status.Active, Message: "running", }, }) s.st.agentHistory = statusInfoWithDates([]status.StatusInfo{ { - Status: status.StatusExecuting, + Status: status.Executing, }, { - Status: status.StatusIdle, + Status: status.Idle, }, }) h := s.api.StatusHistory(params.StatusHistoryRequests{ @@ -182,24 +182,24 @@ func (s *statusHistoryTestSuite) TestStatusHistoryCombined(c *gc.C) { s.st.unitHistory = statusInfoWithDates([]status.StatusInfo{ { - Status: status.StatusMaintenance, + Status: status.Maintenance, Message: "working", }, { - Status: status.StatusActive, + Status: status.Active, Message: "running", }, { - Status: status.StatusBlocked, + Status: status.Blocked, Message: "waiting", }, }) s.st.agentHistory = statusInfoWithDates([]status.StatusInfo{ { - Status: status.StatusExecuting, + Status: status.Executing, }, { - Status: status.StatusIdle, + Status: status.Idle, }, }) h := s.api.StatusHistory(params.StatusHistoryRequests{ @@ -228,6 +228,10 @@ return "uuid" } +func (m *mockState) ModelTag() names.ModelTag { + return names.NewModelTag("deadbeef-0bad-400d-8000-4b1d0d06f00d") +} + func (m *mockState) Unit(name string) (client.Unit, error) { if name != "unit/0" { return nil, errors.NotFoundf("%v", name) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/status_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/status_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client/status_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client/status_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -46,7 +46,7 @@ status, err := client.Status(nil) c.Assert(err, jc.ErrorIsNil) c.Check(status.Model.Name, gc.Equals, "controller") - c.Check(status.Model.Cloud, gc.Equals, "dummy") + c.Check(status.Model.CloudTag, gc.Equals, "cloud-dummy") c.Check(status.Applications, gc.HasLen, 0) c.Check(status.Machines, gc.HasLen, 1) resultMachine, ok := status.Machines[machine.Id()] @@ -57,22 +57,28 @@ c.Check(resultMachine.Series, gc.Equals, machine.Series()) } +func (s *statusSuite) TestFullStatusUnitLeadership(c *gc.C) { + u := s.Factory.MakeUnit(c, nil) + s.State.LeadershipClaimer().ClaimLeadership(u.ApplicationName(), u.Name(), time.Minute) + client := s.APIState.Client() + status, err := client.Status(nil) + c.Assert(err, jc.ErrorIsNil) + app, ok := status.Applications[u.ApplicationName()] + c.Assert(ok, jc.IsTrue) + unit, ok := app.Units[u.Name()] + c.Assert(ok, jc.IsTrue) + c.Assert(unit.Leader, jc.IsTrue) +} + var _ = gc.Suite(&statusUnitTestSuite{}) type statusUnitTestSuite struct { baseSuite - *factory.Factory -} - -func (s *statusUnitTestSuite) SetUpTest(c *gc.C) { - s.baseSuite.SetUpTest(c) - // State gets reset per test, so must the factory. - s.Factory = factory.NewFactory(s.State) } func (s *statusUnitTestSuite) TestProcessMachinesWithOneMachineAndOneContainer(c *gc.C) { - host := s.MakeMachine(c, &factory.MachineParams{InstanceId: instance.Id("0")}) - container := s.MakeMachineNested(c, host.Id(), nil) + host := s.Factory.MakeMachine(c, &factory.MachineParams{InstanceId: instance.Id("0")}) + container := s.Factory.MakeMachineNested(c, host.Id(), nil) machines := map[string][]*state.Machine{ host.Id(): {host, container}, } @@ -85,14 +91,14 @@ } func (s *statusUnitTestSuite) TestProcessMachinesWithEmbeddedContainers(c *gc.C) { - host := s.MakeMachine(c, &factory.MachineParams{InstanceId: instance.Id("1")}) - lxdHost := s.MakeMachineNested(c, host.Id(), nil) + host := s.Factory.MakeMachine(c, &factory.MachineParams{InstanceId: instance.Id("1")}) + lxdHost := s.Factory.MakeMachineNested(c, host.Id(), nil) machines := map[string][]*state.Machine{ host.Id(): { host, lxdHost, - s.MakeMachineNested(c, lxdHost.Id(), nil), - s.MakeMachineNested(c, host.Id(), nil), + s.Factory.MakeMachineNested(c, lxdHost.Id(), nil), + s.Factory.MakeMachineNested(c, host.Id(), nil), }, } @@ -124,7 +130,73 @@ } func (s *statusUnitTestSuite) TestMeterStatus(c *gc.C) { - service := s.MakeApplication(c, nil) + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) + service := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + + units, err := service.AllUnits() + c.Assert(err, jc.ErrorIsNil) + c.Assert(units, gc.HasLen, 0) + + for i, unit := range testUnits { + u, err := service.AddUnit() + testUnits[i].unitName = u.Name() + c.Assert(err, jc.ErrorIsNil) + if unit.setStatus != nil { + err := u.SetMeterStatus(unit.setStatus.Code.String(), unit.setStatus.Info) + c.Assert(err, jc.ErrorIsNil) + } + } + + client := s.APIState.Client() + status, err := client.Status(nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(status, gc.NotNil) + serviceStatus, ok := status.Applications[service.Name()] + c.Assert(ok, gc.Equals, true) + + c.Assert(serviceStatus.MeterStatuses, gc.HasLen, len(testUnits)-1) + for _, unit := range testUnits { + unitStatus, ok := serviceStatus.MeterStatuses[unit.unitName] + + if unit.expectedStatus != nil { + c.Assert(ok, gc.Equals, true) + c.Assert(&unitStatus, gc.DeepEquals, unit.expectedStatus) + } else { + c.Assert(ok, gc.Equals, false) + } + } +} + +func (s *statusUnitTestSuite) TestNoMeterStatusWhenNotRequired(c *gc.C) { + service := s.Factory.MakeApplication(c, nil) + + units, err := service.AllUnits() + c.Assert(err, jc.ErrorIsNil) + c.Assert(units, gc.HasLen, 0) + + for i, unit := range testUnits { + u, err := service.AddUnit() + testUnits[i].unitName = u.Name() + c.Assert(err, jc.ErrorIsNil) + if unit.setStatus != nil { + err := u.SetMeterStatus(unit.setStatus.Code.String(), unit.setStatus.Info) + c.Assert(err, jc.ErrorIsNil) + } + } + + client := s.APIState.Client() + status, err := client.Status(nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(status, gc.NotNil) + serviceStatus, ok := status.Applications[service.Name()] + c.Assert(ok, gc.Equals, true) + + c.Assert(serviceStatus.MeterStatuses, gc.HasLen, 0) +} + +func (s *statusUnitTestSuite) TestMeterStatusWithCredentials(c *gc.C) { + service := s.Factory.MakeApplication(c, nil) + c.Assert(service.SetMetricCredentials([]byte("magic-ticket")), jc.ErrorIsNil) units, err := service.AllUnits() c.Assert(err, jc.ErrorIsNil) @@ -190,7 +262,7 @@ } func (s *statusUnitTestSuite) TestWorkloadVersionLastWins(c *gc.C) { - application := s.MakeApplication(c, nil) + application := s.Factory.MakeApplication(c, nil) unit1 := addUnitWithVersion(c, application, "voltron") unit2 := addUnitWithVersion(c, application, "voltron") unit3 := addUnitWithVersion(c, application, "zarkon") @@ -202,7 +274,7 @@ } func (s *statusUnitTestSuite) TestWorkloadVersionSimple(c *gc.C) { - application := s.MakeApplication(c, nil) + application := s.Factory.MakeApplication(c, nil) unit1 := addUnitWithVersion(c, application, "voltron") appStatus := s.checkAppVersion(c, application, "voltron") @@ -210,7 +282,7 @@ } func (s *statusUnitTestSuite) TestWorkloadVersionBlanksCanWin(c *gc.C) { - application := s.MakeApplication(c, nil) + application := s.Factory.MakeApplication(c, nil) unit1 := addUnitWithVersion(c, application, "voltron") unit2 := addUnitWithVersion(c, application, "") @@ -220,12 +292,12 @@ } func (s *statusUnitTestSuite) TestWorkloadVersionNoUnits(c *gc.C) { - application := s.MakeApplication(c, nil) + application := s.Factory.MakeApplication(c, nil) s.checkAppVersion(c, application, "") } func (s *statusUnitTestSuite) TestWorkloadVersionOkWithUnset(c *gc.C) { - application := s.MakeApplication(c, nil) + application := s.Factory.MakeApplication(c, nil) unit, err := application.AddUnit() c.Assert(err, jc.ErrorIsNil) appStatus := s.checkAppVersion(c, application, "") @@ -255,10 +327,10 @@ checkMigStatus("") // Start it migrating. - mig, err := state2.CreateModelMigration(state.ModelMigrationSpec{ + mig, err := state2.CreateMigration(state.MigrationSpec{ InitiatedBy: names.NewUserTag("admin"), TargetInfo: migration.TargetInfo{ - ControllerTag: names.NewModelTag(utils.MustNewUUID().String()), + ControllerTag: names.NewControllerTag(utils.MustNewUUID().String()), Addrs: []string{"1.2.3.4:5555", "4.3.2.1:6666"}, CACert: "cert", AuthTag: names.NewUserTag("user"), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client_auth_root.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client_auth_root.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client_auth_root.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client_auth_root.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,87 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver - -import ( - "github.com/juju/errors" - "github.com/juju/utils/set" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/core/description" - "github.com/juju/juju/rpc" - "github.com/juju/juju/rpc/rpcreflect" -) - -// clientAuthRoot restricts API calls for users of a model. Initially the -// authorisation checks are only for read only access to the model, but in the -// near future, full ACL support is desirable. -type clientAuthRoot struct { - finder rpc.MethodFinder - user description.UserAccess -} - -// newClientAuthRoot returns a new restrictedRoot. -func newClientAuthRoot(finder rpc.MethodFinder, user description.UserAccess) *clientAuthRoot { - return &clientAuthRoot{finder, user} -} - -// FindMethod returns a not supported error if the rootName is not one of the -// facades available at the server root when there is no active model. -func (r *clientAuthRoot) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { - // The lookup of the name is done first to return a not found error if the - // user is looking for a method that we just don't have. - caller, err := r.finder.FindMethod(rootName, version, methodName) - if err != nil { - return nil, err - } - // ReadOnly User - if r.user.Access == description.ReadAccess { - canCall := isCallAllowableByReadOnlyUser(rootName, methodName) || - isCallReadOnly(rootName, methodName) - if !canCall { - return nil, errors.Trace(common.ErrPerm) - } - } - - // Check if our call requires higher access than the user has. - if doesCallRequireAdmin(rootName, methodName) && r.user.Access != description.AdminAccess { - return nil, errors.Trace(common.ErrPerm) - } - - return caller, nil -} - -// isCallAllowableByReadOnlyUser returns whether or not the method on the facade -// can be called by a read only user. -func isCallAllowableByReadOnlyUser(facade, _ /*method*/ string) bool { - // At this stage, any facade that is part of the restricted root (those - // that are accessable outside of models) are OK because the user would - // have access to those facades if they went through the controller API - // endpoint rather than a model oriented one. - return restrictedRootNames.Contains(facade) -} - -var modelManagerMethods = set.NewStrings( - "ModifyModelAccess", - "CreateModel", -) - -var controllerMethods = set.NewStrings( - "DestroyController", -) - -func doesCallRequireAdmin(facade, method string) bool { - // TODO(perrito666) This should filter adding users to controllers. - // TODO(perrito666) Add an exaustive list of facades/methods that are - // admin only and put them in an authoritative source to be re-used. - // TODO(perrito666) This is a stub, the idea is to maintain the current - // status of permissions until we decide what goes to admin only. - switch facade { - case "ModelManager": - return modelManagerMethods.Contains(method) - case "Controller": - return controllerMethods.Contains(method) - } - return false -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client_auth_root_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/client_auth_root_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/client_auth_root_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/client_auth_root_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver - -import ( - "reflect" - - "github.com/juju/errors" - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/core/description" - "github.com/juju/juju/testing/factory" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/rpc/rpcreflect" - "github.com/juju/juju/state/testing" -) - -type clientAuthRootSuite struct { - testing.StateSuite -} - -var _ = gc.Suite(&clientAuthRootSuite{}) - -func (*clientAuthRootSuite) AssertCallGood(c *gc.C, client *clientAuthRoot, rootName string, version int, methodName string) { - caller, err := client.FindMethod(rootName, version, methodName) - c.Check(err, jc.ErrorIsNil) - c.Assert(caller, gc.NotNil) -} - -func (*clientAuthRootSuite) AssertCallNotImplemented(c *gc.C, client *clientAuthRoot, rootName string, version int, methodName string) { - caller, err := client.FindMethod(rootName, version, methodName) - c.Check(errors.Cause(err), jc.Satisfies, isCallNotImplementedError) - c.Assert(caller, gc.IsNil) -} - -func (s *clientAuthRootSuite) AssertCallErrPerm(c *gc.C, client *clientAuthRoot, rootName string, version int, methodName string) { - caller, err := client.FindMethod(rootName, version, methodName) - c.Check(errors.Cause(err), gc.Equals, common.ErrPerm) - c.Assert(caller, gc.IsNil) -} - -func (s *clientAuthRootSuite) TestNormalUser(c *gc.C) { - modelUser := s.Factory.MakeModelUser(c, nil) - client := newClientAuthRoot(&fakeFinder{}, modelUser) - s.AssertCallGood(c, client, "Application", 1, "Deploy") - s.AssertCallGood(c, client, "UserManager", 1, "UserInfo") - s.AssertCallNotImplemented(c, client, "Client", 1, "Unknown") - s.AssertCallNotImplemented(c, client, "Unknown", 1, "Method") -} - -func (s *clientAuthRootSuite) TestAdminUser(c *gc.C) { - modelUser := s.Factory.MakeModelUser(c, &factory.ModelUserParams{Access: description.WriteAccess}) - client := newClientAuthRoot(&fakeFinder{}, modelUser) - s.AssertCallGood(c, client, "Client", 1, "FullStatus") - s.AssertCallErrPerm(c, client, "ModelManager", 2, "ModifyModelAccess") - s.AssertCallErrPerm(c, client, "ModelManager", 2, "CreateModel") - s.AssertCallErrPerm(c, client, "Controller", 3, "DestroyController") - - modelUser = s.Factory.MakeModelUser(c, &factory.ModelUserParams{Access: description.AdminAccess}) - client = newClientAuthRoot(&fakeFinder{}, modelUser) - s.AssertCallGood(c, client, "ModelManager", 2, "ModifyModelAccess") - s.AssertCallGood(c, client, "ModelManager", 2, "CreateModel") - s.AssertCallGood(c, client, "Controller", 3, "DestroyController") -} - -func (s *clientAuthRootSuite) TestReadOnlyUser(c *gc.C) { - modelUser := s.Factory.MakeModelUser(c, &factory.ModelUserParams{Access: description.ReadAccess}) - client := newClientAuthRoot(&fakeFinder{}, modelUser) - // deploys are bad - s.AssertCallErrPerm(c, client, "Application", 1, "Deploy") - // read only commands are fine - s.AssertCallGood(c, client, "Client", 1, "FullStatus") - // calls on the restricted root is also fine - s.AssertCallGood(c, client, "UserManager", 1, "AddUser") - s.AssertCallNotImplemented(c, client, "Client", 1, "Unknown") - s.AssertCallNotImplemented(c, client, "Unknown", 1, "Method") -} - -func isCallNotImplementedError(err error) bool { - _, ok := err.(*rpcreflect.CallNotImplementedError) - return ok -} - -type fakeFinder struct{} - -// FindMethod is the only thing we need to implement rpc.MethodFinder. -func (f *fakeFinder) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { - _, _, err := lookupMethod(rootName, version, methodName) - if err != nil { - return nil, err - } - // Just return a valid caller. - return &fakeCaller{}, nil -} - -// fakeCaller implements a rpcreflect.MethodCaller. We don't care what the -// actual reflect.Types or values actually are, the caller just has to be -// valid. -type fakeCaller struct{} - -func (*fakeCaller) ParamsType() reflect.Type { - return reflect.TypeOf("") -} - -func (*fakeCaller) ResultType() reflect.Type { - return reflect.TypeOf("") -} - -func (*fakeCaller) Call(_ /*objId*/ string, _ /*arg*/ reflect.Value) (reflect.Value, error) { - return reflect.ValueOf(""), nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/cloud/backend.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/cloud/backend.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/cloud/backend.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/cloud/backend.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,12 +11,16 @@ ) type Backend interface { + Clouds() (map[names.CloudTag]cloud.Cloud, error) Cloud(cloudName string) (cloud.Cloud, error) CloudCredentials(user names.UserTag, cloudName string) (map[string]cloud.Credential, error) ControllerModel() (Model, error) - UpdateCloudCredentials(user names.UserTag, cloudName string, credentials map[string]cloud.Credential) error + ControllerTag() names.ControllerTag + ModelTag() names.ModelTag + UpdateCloudCredential(names.CloudCredentialTag, cloud.Credential) error + RemoveCloudCredential(names.CloudCredentialTag) error - IsControllerAdministrator(names.UserTag) (bool, error) + IsControllerAdmin(names.UserTag) (bool, error) Close() error } @@ -39,6 +43,6 @@ type Model interface { Cloud() string - CloudCredential() string + CloudCredential() (names.CloudCredentialTag, bool) CloudRegion() string } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/cloud/cloud.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/cloud/cloud.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/cloud/cloud.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/cloud/cloud.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,18 +6,18 @@ package cloud import ( - "github.com/juju/loggo" + "github.com/juju/errors" "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) -var logger = loggo.GetLogger("juju.apiserver.cloud") - func init() { common.RegisterStandardFacade("Cloud", 1, newFacade) } @@ -25,27 +25,28 @@ // CloudAPI implements the model manager interface and is // the concrete implementation of the api end point. type CloudAPI struct { - backend Backend - authorizer facade.Authorizer - apiUser names.UserTag - getCredentialsAuthFunc common.GetAuthFunc - getCloudDefaultsAuthFunc common.GetAuthFunc + backend Backend + authorizer facade.Authorizer + apiUser names.UserTag + getCredentialsAuthFunc common.GetAuthFunc } -func newFacade(st *state.State, resources facade.Resources, auth facade.Authorizer) (*CloudAPI, error) { +func newFacade(st *state.State, _ facade.Resources, auth facade.Authorizer) (*CloudAPI, error) { return NewCloudAPI(NewStateBackend(st), auth) } // NewCloudAPI creates a new API server endpoint for managing the controller's // cloud definition and cloud credentials. func NewCloudAPI(backend Backend, authorizer facade.Authorizer) (*CloudAPI, error) { + if !authorizer.AuthClient() { return nil, common.ErrPerm } + getUserAuthFunc := func() (common.AuthFunc, error) { authUser, _ := authorizer.GetAuthTag().(names.UserTag) - isAdmin, err := backend.IsControllerAdministrator(authUser) - if err != nil { + isAdmin, err := authorizer.HasPermission(permission.SuperuserAccess, backend.ControllerTag()) + if err != nil && !errors.IsNotFound(err) { return nil, err } return func(tag names.Tag) bool { @@ -53,19 +54,33 @@ if !ok { return false } - return isAdmin || userTag.Canonical() == authUser.Canonical() + return isAdmin || userTag == authUser }, nil } return &CloudAPI{ - backend: backend, - authorizer: authorizer, - getCredentialsAuthFunc: getUserAuthFunc, - getCloudDefaultsAuthFunc: getUserAuthFunc, + backend: backend, + authorizer: authorizer, + getCredentialsAuthFunc: getUserAuthFunc, }, nil } +// Clouds returns the definitions of all clouds supported by the controller. +func (api *CloudAPI) Clouds() (params.CloudsResult, error) { + var result params.CloudsResult + clouds, err := api.backend.Clouds() + if err != nil { + return result, err + } + result.Clouds = make(map[string]params.Cloud) + for tag, cloud := range clouds { + paramsCloud := cloudToParams(cloud) + result.Clouds[tag.String()] = paramsCloud + } + return result, nil +} + // Cloud returns the cloud definitions for the specified clouds. -func (mm *CloudAPI) Cloud(args params.Entities) (params.CloudResults, error) { +func (api *CloudAPI) Cloud(args params.Entities) (params.CloudResults, error) { results := params.CloudResults{ Results: make([]params.CloudResult, len(args.Entities)), } @@ -74,29 +89,12 @@ if err != nil { return nil, err } - cloud, err := mm.backend.Cloud(tag.Id()) + cloud, err := api.backend.Cloud(tag.Id()) if err != nil { return nil, err } - authTypes := make([]string, len(cloud.AuthTypes)) - for i, authType := range cloud.AuthTypes { - authTypes[i] = string(authType) - } - regions := make([]params.CloudRegion, len(cloud.Regions)) - for i, region := range cloud.Regions { - regions[i] = params.CloudRegion{ - Name: region.Name, - Endpoint: region.Endpoint, - StorageEndpoint: region.StorageEndpoint, - } - } - return ¶ms.Cloud{ - Type: cloud.Type, - AuthTypes: authTypes, - Endpoint: cloud.Endpoint, - StorageEndpoint: cloud.StorageEndpoint, - Regions: regions, - }, nil + paramsCloud := cloudToParams(cloud) + return ¶msCloud, nil } for i, arg := range args.Entities { cloud, err := one(arg) @@ -109,21 +107,54 @@ return results, nil } -// CloudDefaults returns the cloud defaults for a set of users. -func (mm *CloudAPI) CloudDefaults(args params.Entities) (params.CloudDefaultsResults, error) { - results := params.CloudDefaultsResults{ - Results: make([]params.CloudDefaultsResult, len(args.Entities)), +func cloudToParams(cloud cloud.Cloud) params.Cloud { + authTypes := make([]string, len(cloud.AuthTypes)) + for i, authType := range cloud.AuthTypes { + authTypes[i] = string(authType) + } + regions := make([]params.CloudRegion, len(cloud.Regions)) + for i, region := range cloud.Regions { + regions[i] = params.CloudRegion{ + Name: region.Name, + Endpoint: region.Endpoint, + IdentityEndpoint: region.IdentityEndpoint, + StorageEndpoint: region.StorageEndpoint, + } + } + return params.Cloud{ + Type: cloud.Type, + AuthTypes: authTypes, + Endpoint: cloud.Endpoint, + IdentityEndpoint: cloud.IdentityEndpoint, + StorageEndpoint: cloud.StorageEndpoint, + Regions: regions, } - authFunc, err := mm.getCloudDefaultsAuthFunc() +} + +// DefaultCloud returns the tag of the cloud that models will be +// created in by default. +func (api *CloudAPI) DefaultCloud() (params.StringResult, error) { + controllerModel, err := api.backend.ControllerModel() if err != nil { - return results, err + return params.StringResult{}, err } - controllerModel, err := mm.backend.ControllerModel() + + return params.StringResult{ + Result: names.NewCloudTag(controllerModel.Cloud()).String(), + }, nil +} + +// UserCredentials returns the cloud credentials for a set of users. +func (api *CloudAPI) UserCredentials(args params.UserClouds) (params.StringsResults, error) { + results := params.StringsResults{ + Results: make([]params.StringsResult, len(args.UserClouds)), + } + authFunc, err := api.getCredentialsAuthFunc() if err != nil { return results, err } - for i, arg := range args.Entities { - userTag, err := names.ParseUserTag(arg.Tag) + for i, arg := range args.UserClouds { + userTag, err := names.ParseUserTag(arg.UserTag) if err != nil { results.Results[i].Error = common.ServerError(err) continue @@ -132,100 +163,165 @@ results.Results[i].Error = common.ServerError(common.ErrPerm) continue } - isAdmin, err := mm.backend.IsControllerAdministrator(userTag) + cloudTag, err := names.ParseCloudTag(arg.CloudTag) if err != nil { results.Results[i].Error = common.ServerError(err) continue } - cloudDefaults := params.CloudDefaults{ - CloudTag: names.NewCloudTag(controllerModel.Cloud()).String(), - CloudRegion: controllerModel.CloudRegion(), + cloudCredentials, err := api.backend.CloudCredentials(userTag, cloudTag.Id()) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue } - if isAdmin { - // As a special case, controller admins will default to - // using the same credential that was used to bootstrap. - cloudDefaults.CloudCredential = controllerModel.CloudCredential() + out := make([]string, 0, len(cloudCredentials)) + for tagId := range cloudCredentials { + out = append(out, names.NewCloudCredentialTag(tagId).String()) } - results.Results[i].Result = &cloudDefaults + results.Results[i].Result = out } return results, nil } -// Credentials returns the cloud credentials for a set of users. -func (mm *CloudAPI) Credentials(args params.UserClouds) (params.CloudCredentialsResults, error) { - results := params.CloudCredentialsResults{ - Results: make([]params.CloudCredentialsResult, len(args.UserClouds)), +// UpdateCredentials updates a set of cloud credentials. +func (api *CloudAPI) UpdateCredentials(args params.UpdateCloudCredentials) (params.ErrorResults, error) { + results := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Credentials)), } - authFunc, err := mm.getCredentialsAuthFunc() + authFunc, err := api.getCredentialsAuthFunc() if err != nil { return results, err } - for i, arg := range args.UserClouds { - userTag, err := names.ParseUserTag(arg.UserTag) + for i, arg := range args.Credentials { + tag, err := names.ParseCloudCredentialTag(arg.Tag) if err != nil { results.Results[i].Error = common.ServerError(err) continue } - if !authFunc(userTag) { + // NOTE(axw) if we add ACLs for cloud credentials, we'll need + // to change this auth check. + if !authFunc(tag.Owner()) { results.Results[i].Error = common.ServerError(common.ErrPerm) continue } - cloudTag, err := names.ParseCloudTag(arg.CloudTag) - if err != nil { + in := cloud.NewCredential( + cloud.AuthType(arg.Credential.AuthType), + arg.Credential.Attributes, + ) + if err := api.backend.UpdateCloudCredential(tag, in); err != nil { + if errors.IsNotFound(err) { + err = errors.Errorf( + "cannot update credential %q: controller does not manage cloud %q", + tag.Name(), tag.Cloud().Id()) + } results.Results[i].Error = common.ServerError(err) continue } - cloudCredentials, err := mm.backend.CloudCredentials(userTag, cloudTag.Id()) + } + return results, nil +} + +// RevokeCredentials revokes a set of cloud credentials. +func (api *CloudAPI) RevokeCredentials(args params.Entities) (params.ErrorResults, error) { + results := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Entities)), + } + authFunc, err := api.getCredentialsAuthFunc() + if err != nil { + return results, err + } + for i, arg := range args.Entities { + tag, err := names.ParseCloudCredentialTag(arg.Tag) if err != nil { results.Results[i].Error = common.ServerError(err) continue } - out := make(map[string]params.CloudCredential) - for name, credential := range cloudCredentials { - out[name] = params.CloudCredential{ - string(credential.AuthType()), - credential.Attributes(), - } + // NOTE(axw) if we add ACLs for cloud credentials, we'll need + // to change this auth check. + if !authFunc(tag.Owner()) { + results.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + if err := api.backend.RemoveCloudCredential(tag); err != nil { + results.Results[i].Error = common.ServerError(err) } - results.Results[i].Credentials = out } return results, nil } -// UpdateCredentials updates the cloud credentials for a set of users. -func (mm *CloudAPI) UpdateCredentials(args params.UsersCloudCredentials) (params.ErrorResults, error) { - results := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.Users)), +// Credential returns the specified cloud credential for each tag, minus secrets. +func (api *CloudAPI) Credential(args params.Entities) (params.CloudCredentialResults, error) { + results := params.CloudCredentialResults{ + Results: make([]params.CloudCredentialResult, len(args.Entities)), } - authFunc, err := mm.getCredentialsAuthFunc() + authFunc, err := api.getCredentialsAuthFunc() if err != nil { return results, err } - for i, arg := range args.Users { - userTag, err := names.ParseUserTag(arg.UserTag) + + for i, arg := range args.Entities { + credentialTag, err := names.ParseCloudCredentialTag(arg.Tag) if err != nil { results.Results[i].Error = common.ServerError(err) continue } - if !authFunc(userTag) { + if !authFunc(credentialTag.Owner()) { results.Results[i].Error = common.ServerError(common.ErrPerm) continue } - cloudTag, err := names.ParseCloudTag(arg.CloudTag) + + // Helper to look up and cache credential schemas for clouds. + schemaCache := make(map[string]map[cloud.AuthType]cloud.CredentialSchema) + credentialSchemas := func() (map[cloud.AuthType]cloud.CredentialSchema, error) { + cloudName := credentialTag.Cloud().Id() + if s, ok := schemaCache[cloudName]; ok { + return s, nil + } + cloud, err := api.backend.Cloud(cloudName) + if err != nil { + return nil, err + } + provider, err := environs.Provider(cloud.Type) + if err != nil { + return nil, err + } + schema := provider.CredentialSchemas() + schemaCache[cloudName] = schema + return schema, nil + } + cloudCredentials, err := api.backend.CloudCredentials(credentialTag.Owner(), credentialTag.Cloud().Id()) if err != nil { results.Results[i].Error = common.ServerError(err) continue } - in := make(map[string]cloud.Credential) - for name, credential := range arg.Credentials { - in[name] = cloud.NewCredential( - cloud.AuthType(credential.AuthType), credential.Attributes, - ) + + cred, ok := cloudCredentials[credentialTag.Id()] + if !ok { + results.Results[i].Error = common.ServerError(errors.NotFoundf("credential %q", credentialTag.Name())) + continue } - if err := mm.backend.UpdateCloudCredentials(userTag, cloudTag.Id(), in); err != nil { + + schemas, err := credentialSchemas() + if err != nil { results.Results[i].Error = common.ServerError(err) continue } + + attrs := cred.Attributes() + var redacted []string + // Mask out the secrets. + if s, ok := schemas[cred.AuthType()]; ok { + for _, attr := range s { + if attr.Hidden { + delete(attrs, attr.Name) + redacted = append(redacted, attr.Name) + } + } + } + results.Results[i].Result = ¶ms.CloudCredential{ + AuthType: string(cred.AuthType()), + Attributes: attrs, + Redacted: redacted, + } } return results, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/cloud/cloud_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/cloud/cloud_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/cloud/cloud_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/cloud/cloud_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,10 +9,12 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "github.com/juju/errors" cloudfacade "github.com/juju/juju/apiserver/cloud" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" "github.com/juju/juju/cloud" + _ "github.com/juju/juju/provider/dummy" ) type cloudSuite struct { @@ -27,7 +29,7 @@ func (s *cloudSuite) SetUpTest(c *gc.C) { s.IsolationSuite.SetUpTest(c) s.authorizer = apiservertesting.FakeAuthorizer{ - Tag: names.NewUserTag("bruce@local"), + Tag: names.NewUserTag("admin"), } s.backend = mockBackend{ cloud: cloud.Cloud{ @@ -36,8 +38,8 @@ Regions: []cloud.Region{{Name: "nether", Endpoint: "endpoint"}}, }, creds: map[string]cloud.Credential{ - "one": cloud.NewEmptyCredential(), - "two": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ + names.NewCloudCredentialTag("meep/bruce/one").Id(): cloud.NewEmptyCredential(), + names.NewCloudCredentialTag("meep/bruce/two").Id(): cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ "username": "admin", "password": "adm1n", }), @@ -50,7 +52,7 @@ func (s *cloudSuite) TestCloud(c *gc.C) { results, err := s.api.Cloud(params.Entities{ - []params.Entity{{"cloud-my-cloud"}, {"machine-0"}}, + Entities: []params.Entity{{Tag: "cloud-my-cloud"}, {Tag: "machine-0"}}, }) c.Assert(err, jc.ErrorIsNil) s.backend.CheckCalls(c, []gitjujutesting.StubCall{ @@ -68,55 +70,31 @@ }) } -func (s *cloudSuite) TestCloudDefaults(c *gc.C) { - results, err := s.api.CloudDefaults(params.Entities{[]params.Entity{ - {"machine-0"}, - {"user-admin"}, - {"user-bruce"}, - }}) - c.Assert(err, jc.ErrorIsNil) - s.backend.CheckCallNames(c, - "IsControllerAdministrator", // for auth-checking - "ControllerModel", - "IsControllerAdministrator", // to get default credential - ) - c.Assert(results.Results, gc.HasLen, 3) - c.Assert(results.Results[0].Error, jc.DeepEquals, ¶ms.Error{ - Message: `"machine-0" is not a valid user tag`, - }) - c.Assert(results.Results[1].Error, jc.DeepEquals, ¶ms.Error{ - Message: "permission denied", Code: params.CodeUnauthorized, - }) - c.Assert(results.Results[2].Error, gc.IsNil) - c.Assert(results.Results[2].Result, jc.DeepEquals, ¶ms.CloudDefaults{ - CloudTag: "cloud-some-cloud", - CloudRegion: "some-region", - CloudCredential: "", +func (s *cloudSuite) TestClouds(c *gc.C) { + result, err := s.api.Clouds() + c.Assert(err, jc.ErrorIsNil) + s.backend.CheckCallNames(c, "Clouds") + c.Assert(result.Clouds, jc.DeepEquals, map[string]params.Cloud{ + "cloud-my-cloud": { + Type: "dummy", + AuthTypes: []string{"empty", "userpass"}, + Regions: []params.CloudRegion{{Name: "nether", Endpoint: "endpoint"}}, + }, }) } -func (s *cloudSuite) TestCloudDefaultsAdminAccess(c *gc.C) { - s.authorizer.Tag = names.NewUserTag("admin@local") - results, err := s.api.CloudDefaults(params.Entities{[]params.Entity{ - {"user-admin"}, - }}) - c.Assert(err, jc.ErrorIsNil) - s.backend.CheckCallNames(c, - "IsControllerAdministrator", // for auth-checking - "ControllerModel", - "IsControllerAdministrator", // to get default credential - ) - c.Assert(results.Results, gc.HasLen, 1) - c.Assert(results.Results[0].Error, gc.IsNil) - c.Assert(results.Results[0].Result, jc.DeepEquals, ¶ms.CloudDefaults{ - CloudTag: "cloud-some-cloud", - CloudRegion: "some-region", - CloudCredential: "some-credential", +func (s *cloudSuite) TestDefaultCloud(c *gc.C) { + result, err := s.api.DefaultCloud() + c.Assert(err, jc.ErrorIsNil) + s.backend.CheckCallNames(c, "ControllerModel") + c.Assert(result, jc.DeepEquals, params.StringResult{ + Result: "cloud-some-cloud", }) } -func (s *cloudSuite) TestCredentials(c *gc.C) { - results, err := s.api.Credentials(params.UserClouds{[]params.UserCloud{{ +func (s *cloudSuite) TestUserCredentials(c *gc.C) { + s.authorizer.Tag = names.NewUserTag("bruce") + results, err := s.api.UserCredentials(params.UserClouds{UserClouds: []params.UserCloud{{ UserTag: "machine-0", CloudTag: "cloud-meep", }, { @@ -127,7 +105,7 @@ CloudTag: "cloud-meep", }}}) c.Assert(err, jc.ErrorIsNil) - s.backend.CheckCallNames(c, "IsControllerAdministrator", "CloudCredentials") + s.backend.CheckCallNames(c, "ControllerTag", "CloudCredentials") s.backend.CheckCall(c, 1, "CloudCredentials", names.NewUserTag("bruce"), "meep") c.Assert(results.Results, gc.HasLen, 3) @@ -138,118 +116,188 @@ Message: "permission denied", Code: params.CodeUnauthorized, }) c.Assert(results.Results[2].Error, gc.IsNil) - c.Assert(results.Results[2].Credentials, jc.DeepEquals, map[string]params.CloudCredential{ - "one": { - AuthType: "empty", - }, - "two": { - AuthType: "userpass", - Attributes: map[string]string{ - "username": "admin", - "password": "adm1n", - }, - }, + c.Assert(results.Results[2].Result, jc.SameContents, []string{ + "cloudcred-meep_bruce_one", + "cloudcred-meep_bruce_two", }) } -func (s *cloudSuite) TestCredentialsAdminAccess(c *gc.C) { - s.authorizer.Tag = names.NewUserTag("admin@local") - results, err := s.api.Credentials(params.UserClouds{[]params.UserCloud{{ +func (s *cloudSuite) TestUserCredentialsAdminAccess(c *gc.C) { + s.authorizer.Tag = names.NewUserTag("admin") + results, err := s.api.UserCredentials(params.UserClouds{UserClouds: []params.UserCloud{{ UserTag: "user-julia", CloudTag: "cloud-meep", }}}) c.Assert(err, jc.ErrorIsNil) - s.backend.CheckCallNames(c, "IsControllerAdministrator", "CloudCredentials") + s.backend.CheckCallNames(c, "ControllerTag", "CloudCredentials") c.Assert(results.Results, gc.HasLen, 1) // admin can access others' credentials c.Assert(results.Results[0].Error, gc.IsNil) } func (s *cloudSuite) TestUpdateCredentials(c *gc.C) { - results, err := s.api.UpdateCredentials(params.UsersCloudCredentials{[]params.UserCloudCredentials{{ - UserTag: "machine-0", - CloudTag: "cloud-meep", + s.backend.SetErrors(nil, errors.NotFoundf("cloud")) + s.authorizer.Tag = names.NewUserTag("bruce") + results, err := s.api.UpdateCredentials(params.UpdateCloudCredentials{Credentials: []params.UpdateCloudCredential{{ + Tag: "machine-0", }, { - UserTag: "user-admin", - CloudTag: "cloud-meep", + Tag: "cloudcred-meep_admin_whatever", }, { - UserTag: "user-bruce", - CloudTag: "cloud-meep", - Credentials: map[string]params.CloudCredential{ - "three": { - AuthType: "oauth1", - Attributes: map[string]string{"token": "foo:bar:baz"}, - }, - "four": { - AuthType: "access-key", - Attributes: map[string]string{ - "access-key": "foo", - "secret-key": "bar", - }, - }, + Tag: "cloudcred-meep_bruce_three", + Credential: params.CloudCredential{ + AuthType: "oauth1", + Attributes: map[string]string{"token": "foo:bar:baz"}, + }, + }, { + Tag: "cloudcred-badcloud_bruce_three", + Credential: params.CloudCredential{ + AuthType: "oauth1", + Attributes: map[string]string{"token": "foo:bar:baz"}, }, }}}) c.Assert(err, jc.ErrorIsNil) - s.backend.CheckCallNames(c, "IsControllerAdministrator", "UpdateCloudCredentials") - c.Assert(results.Results, gc.HasLen, 3) + s.backend.CheckCallNames(c, "ControllerTag", "UpdateCloudCredential", "UpdateCloudCredential") + c.Assert(results.Results, gc.HasLen, 4) c.Assert(results.Results[0].Error, jc.DeepEquals, ¶ms.Error{ - Message: `"machine-0" is not a valid user tag`, + Message: `"machine-0" is not a valid cloudcred tag`, }) c.Assert(results.Results[1].Error, jc.DeepEquals, ¶ms.Error{ Message: "permission denied", Code: params.CodeUnauthorized, }) c.Assert(results.Results[2].Error, gc.IsNil) + c.Assert(results.Results[3].Error, jc.DeepEquals, ¶ms.Error{ + Message: `cannot update credential "three": controller does not manage cloud "badcloud"`, + }) s.backend.CheckCall( - c, 1, "UpdateCloudCredentials", - names.NewUserTag("bruce"), - "meep", - map[string]cloud.Credential{ - "three": cloud.NewCredential( - cloud.OAuth1AuthType, - map[string]string{"token": "foo:bar:baz"}, - ), - "four": cloud.NewCredential( - cloud.AccessKeyAuthType, - map[string]string{"access-key": "foo", "secret-key": "bar"}, - ), - }, + c, 1, "UpdateCloudCredential", + names.NewCloudCredentialTag("meep/bruce/three"), + cloud.NewCredential( + cloud.OAuth1AuthType, + map[string]string{"token": "foo:bar:baz"}, + ), ) } func (s *cloudSuite) TestUpdateCredentialsAdminAccess(c *gc.C) { - s.authorizer.Tag = names.NewUserTag("admin@local") - results, err := s.api.UpdateCredentials(params.UsersCloudCredentials{[]params.UserCloudCredentials{{ - UserTag: "user-julia", - CloudTag: "cloud-meep", - Credentials: map[string]params.CloudCredential{ - "three": { - AuthType: "oauth1", - Attributes: map[string]string{"token": "foo:bar:baz"}, - }, + s.authorizer.Tag = names.NewUserTag("admin") + results, err := s.api.UpdateCredentials(params.UpdateCloudCredentials{Credentials: []params.UpdateCloudCredential{{ + Tag: "cloudcred-meep_julia_three", + Credential: params.CloudCredential{ + AuthType: "oauth1", + Attributes: map[string]string{"token": "foo:bar:baz"}, }, }}}) c.Assert(err, jc.ErrorIsNil) - s.backend.CheckCallNames(c, "IsControllerAdministrator", "UpdateCloudCredentials") + s.backend.CheckCallNames(c, "ControllerTag", "UpdateCloudCredential") c.Assert(results.Results, gc.HasLen, 1) // admin can update others' credentials c.Assert(results.Results[0].Error, gc.IsNil) } +func (s *cloudSuite) TestRevokeCredentials(c *gc.C) { + s.authorizer.Tag = names.NewUserTag("bruce") + results, err := s.api.RevokeCredentials(params.Entities{Entities: []params.Entity{{ + Tag: "machine-0", + }, { + Tag: "cloudcred-meep_admin_whatever", + }, { + Tag: "cloudcred-meep_bruce_three", + }}}) + c.Assert(err, jc.ErrorIsNil) + s.backend.CheckCallNames(c, "ControllerTag", "RemoveCloudCredential") + c.Assert(results.Results, gc.HasLen, 3) + c.Assert(results.Results[0].Error, jc.DeepEquals, ¶ms.Error{ + Message: `"machine-0" is not a valid cloudcred tag`, + }) + c.Assert(results.Results[1].Error, jc.DeepEquals, ¶ms.Error{ + Message: "permission denied", Code: params.CodeUnauthorized, + }) + c.Assert(results.Results[2].Error, gc.IsNil) + + s.backend.CheckCall( + c, 1, "RemoveCloudCredential", + names.NewCloudCredentialTag("meep/bruce/three"), + ) +} + +func (s *cloudSuite) TestRevokeCredentialsAdminAccess(c *gc.C) { + s.authorizer.Tag = names.NewUserTag("admin") + results, err := s.api.RevokeCredentials(params.Entities{Entities: []params.Entity{{ + Tag: "cloudcred-meep_julia_three", + }}}) + c.Assert(err, jc.ErrorIsNil) + s.backend.CheckCallNames(c, "ControllerTag", "RemoveCloudCredential") + c.Assert(results.Results, gc.HasLen, 1) + // admin can revoke others' credentials + c.Assert(results.Results[0].Error, gc.IsNil) +} + +func (s *cloudSuite) TestCredential(c *gc.C) { + s.authorizer.Tag = names.NewUserTag("bruce") + results, err := s.api.Credential(params.Entities{Entities: []params.Entity{{ + Tag: "machine-0", + }, { + Tag: "cloudcred-meep_admin_foo", + }, { + Tag: "cloudcred-meep_bruce_two", + }}}) + c.Assert(err, jc.ErrorIsNil) + s.backend.CheckCallNames(c, "ControllerTag", "CloudCredentials", "Cloud") + s.backend.CheckCall(c, 1, "CloudCredentials", names.NewUserTag("bruce"), "meep") + + c.Assert(results.Results, gc.HasLen, 3) + c.Assert(results.Results[0].Error, jc.DeepEquals, ¶ms.Error{ + Message: `"machine-0" is not a valid cloudcred tag`, + }) + c.Assert(results.Results[1].Error, jc.DeepEquals, ¶ms.Error{ + Message: "permission denied", Code: params.CodeUnauthorized, + }) + c.Assert(results.Results[2].Error, gc.IsNil) + c.Assert(results.Results[2].Result, jc.DeepEquals, ¶ms.CloudCredential{ + AuthType: "userpass", + Attributes: map[string]string{"username": "admin"}, + Redacted: []string{"password"}, + }) +} + +func (s *cloudSuite) TestCredentialAdminAccess(c *gc.C) { + s.authorizer.Tag = names.NewUserTag("admin") + results, err := s.api.Credential(params.Entities{Entities: []params.Entity{{ + Tag: "cloudcred-meep_bruce_two", + }}}) + c.Assert(err, jc.ErrorIsNil) + s.backend.CheckCallNames(c, "ControllerTag", "CloudCredentials", "Cloud") + c.Assert(results.Results, gc.HasLen, 1) + // admin can access others' credentials + c.Assert(results.Results[0].Error, gc.IsNil) +} + type mockBackend struct { gitjujutesting.Stub cloud cloud.Cloud creds map[string]cloud.Credential } -func (st *mockBackend) IsControllerAdministrator(user names.UserTag) (bool, error) { - st.MethodCall(st, "IsControllerAdministrator", user) - return user.Canonical() == "admin@local", st.NextErr() +func (st *mockBackend) IsControllerAdmin(user names.UserTag) (bool, error) { + st.MethodCall(st, "IsControllerAdmin", user) + return user.Id() == "admin", st.NextErr() } func (st *mockBackend) ControllerModel() (cloudfacade.Model, error) { st.MethodCall(st, "ControllerModel") - return &mockModel{"some-cloud", "some-region", "some-credential"}, st.NextErr() + credentialTag := names.NewCloudCredentialTag("some-cloud/admin/some-credential") + return &mockModel{"some-cloud", "some-region", credentialTag}, st.NextErr() +} + +func (st *mockBackend) ControllerTag() names.ControllerTag { + st.MethodCall(st, "ControllerTag") + return names.NewControllerTag("deadbeef-1bad-500d-9000-4b1d0d06f00d") +} + +func (st *mockBackend) ModelTag() names.ModelTag { + st.MethodCall(st, "ModelTag") + return names.NewModelTag("deadbeef-0bad-400d-8000-4b1d0d06f00d") } func (st *mockBackend) Cloud(name string) (cloud.Cloud, error) { @@ -257,13 +305,25 @@ return st.cloud, st.NextErr() } +func (st *mockBackend) Clouds() (map[names.CloudTag]cloud.Cloud, error) { + st.MethodCall(st, "Clouds") + return map[names.CloudTag]cloud.Cloud{ + names.NewCloudTag("my-cloud"): st.cloud, + }, st.NextErr() +} + func (st *mockBackend) CloudCredentials(user names.UserTag, cloudName string) (map[string]cloud.Credential, error) { st.MethodCall(st, "CloudCredentials", user, cloudName) return st.creds, st.NextErr() } -func (st *mockBackend) UpdateCloudCredentials(user names.UserTag, cloudName string, creds map[string]cloud.Credential) error { - st.MethodCall(st, "UpdateCloudCredentials", user, cloudName, creds) +func (st *mockBackend) UpdateCloudCredential(tag names.CloudCredentialTag, cred cloud.Credential) error { + st.MethodCall(st, "UpdateCloudCredential", tag, cred) + return st.NextErr() +} + +func (st *mockBackend) RemoveCloudCredential(tag names.CloudCredentialTag) error { + st.MethodCall(st, "RemoveCloudCredential", tag) return st.NextErr() } @@ -273,9 +333,9 @@ } type mockModel struct { - cloud string - cloudRegion string - cloudCredential string + cloud string + cloudRegion string + cloudCredentialTag names.CloudCredentialTag } func (m *mockModel) Cloud() string { @@ -286,6 +346,6 @@ return m.cloudRegion } -func (m *mockModel) CloudCredential() string { - return m.cloudCredential +func (m *mockModel) CloudCredential() (names.CloudCredentialTag, bool) { + return m.cloudCredentialTag, true } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/cloudspec/cloudspec.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/cloudspec/cloudspec.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/cloudspec/cloudspec.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/cloudspec/cloudspec.go 2016-10-13 14:31:49.000000000 +0000 @@ -66,26 +66,34 @@ results.Results[i].Error = common.ServerError(common.ErrPerm) continue } - spec, err := s.getCloudSpec(tag) - if err != nil { - results.Results[i].Error = common.ServerError(err) - continue - } - var paramsCloudCredential *params.CloudCredential - if spec.Credential != nil && spec.Credential.AuthType() != "" { - paramsCloudCredential = ¶ms.CloudCredential{ - string(spec.Credential.AuthType()), - spec.Credential.Attributes(), - } - } - results.Results[i].Result = ¶ms.CloudSpec{ - spec.Type, - spec.Name, - spec.Region, - spec.Endpoint, - spec.StorageEndpoint, - paramsCloudCredential, - } + results.Results[i] = s.GetCloudSpec(tag) } return results, nil } + +// GetCloudSpec constucts the CloudSpec for a validated and authorized model. +func (s CloudSpecAPI) GetCloudSpec(tag names.ModelTag) params.CloudSpecResult { + var result params.CloudSpecResult + spec, err := s.getCloudSpec(tag) + if err != nil { + result.Error = common.ServerError(err) + return result + } + var paramsCloudCredential *params.CloudCredential + if spec.Credential != nil && spec.Credential.AuthType() != "" { + paramsCloudCredential = ¶ms.CloudCredential{ + AuthType: string(spec.Credential.AuthType()), + Attributes: spec.Credential.Attributes(), + } + } + result.Result = ¶ms.CloudSpec{ + spec.Type, + spec.Name, + spec.Region, + spec.Endpoint, + spec.IdentityEndpoint, + spec.StorageEndpoint, + paramsCloudCredential, + } + return result +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/cloudspec/cloudspec_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/cloudspec/cloudspec_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/cloudspec/cloudspec_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/cloudspec/cloudspec_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -55,6 +55,7 @@ "name", "region", "endpoint", + "identity-endpoint", "storage-endpoint", &credential, } @@ -75,10 +76,11 @@ "name", "region", "endpoint", + "identity-endpoint", "storage-endpoint", ¶ms.CloudCredential{ - "auth-type", - map[string]string{"k": "v"}, + AuthType: "auth-type", + Attributes: map[string]string{"k": "v"}, }, }, }, { @@ -111,6 +113,7 @@ "name", "region", "endpoint", + "identity-endpoint", "storage-endpoint", nil, }, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/controllerconfig_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/controllerconfig_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/controllerconfig_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/controllerconfig_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -33,9 +33,9 @@ return nil, f.controllerConfigError } return map[string]interface{}{ - controller.ControllerUUIDKey: testing.ModelTag.Id(), + controller.ControllerUUIDKey: testing.ControllerTag.Id(), controller.CACertKey: testing.CACert, - controller.ApiPort: 4321, + controller.APIPort: 4321, controller.StatePort: 1234, }, nil } @@ -53,7 +53,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(map[string]interface{}(result.Config), jc.DeepEquals, map[string]interface{}{ "ca-cert": testing.CACert, - "controller-uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "controller-uuid": "deadbeef-1bad-500d-9000-4b1d0d06f00d", "state-port": 1234, "api-port": 4321, }) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/errors.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/errors.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/errors.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/errors.go 2016-10-13 14:31:49.000000000 +0000 @@ -89,6 +89,7 @@ var ( ErrBadId = errors.New("id not found") ErrBadCreds = errors.New("invalid entity name or password") + ErrNoCreds = errors.New("no credentials provided") ErrLoginExpired = errors.New("login expired") ErrPerm = errors.New("permission denied") ErrNotLoggedIn = errors.New("not logged in") @@ -122,6 +123,7 @@ lease.ErrClaimDenied: params.CodeLeaseClaimDenied, ErrBadId: params.CodeNotFound, ErrBadCreds: params.CodeUnauthorized, + ErrNoCreds: params.CodeNoCreds, ErrLoginExpired: params.CodeLoginExpired, ErrPerm: params.CodeUnauthorized, ErrNotLoggedIn: params.CodeUnauthorized, @@ -193,6 +195,7 @@ if err == nil { return nil } + logger.Tracef("server RPC error %v", errors.Details(err)) msg := err.Error() // Skip past annotations when looking for the code. err = errors.Cause(err) @@ -200,8 +203,6 @@ var info *params.ErrorInfo switch { case ok: - case isIOTimeout(err): - code = params.CodeRetry case errors.IsUnauthorized(err): code = params.CodeUnauthorized case errors.IsNotFound(err): @@ -251,17 +252,6 @@ } } -// Unfortunately there is no specific type of error for i/o timeout, -// and the error that bubbles up from mgo is annotated and a string type, -// so all we can do is look at the error suffix and see if it matches. -func isIOTimeout(err error) bool { - // Perhaps sometime in the future, we'll have additional ways to tell if - // the error is an i/o timeout type error, but for now this is all we - // have. - msg := err.Error() - return strings.HasSuffix(msg, "i/o timeout") -} - func DestroyErr(desc string, ids, errs []string) error { // TODO(waigani) refactor DestroyErr to take a map of ids to errors. if len(errs) == 0 { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/errors_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/errors_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/errors_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/errors_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -191,10 +191,6 @@ status: http.StatusNotFound, helperFunc: params.IsCodeModelNotFound, }, { - err: errors.Annotate(errors.New("i/o timeout"), "annotated"), - code: params.CodeRetry, - status: http.StatusServiceUnavailable, -}, { err: nil, code: "", status: http.StatusOK, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/getstatus_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/getstatus_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/getstatus_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/getstatus_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -70,7 +70,7 @@ c.Assert(result.Results, gc.HasLen, 1) machineStatus := result.Results[0] c.Assert(machineStatus.Error, gc.IsNil) - c.Assert(machineStatus.Status, gc.Equals, status.StatusPending.String()) + c.Assert(machineStatus.Status, gc.Equals, status.Pending.String()) } func (s *statusGetterSuite) TestGetUnitStatus(c *gc.C) { @@ -78,7 +78,7 @@ // on the unit returns the workload status not the agent status as it // does on a machine. unit := s.Factory.MakeUnit(c, &factory.UnitParams{Status: &status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, }}) result, err := s.getter.Status(params.Entities{[]params.Entity{{ unit.Tag().String(), @@ -87,12 +87,12 @@ c.Assert(result.Results, gc.HasLen, 1) unitStatus := result.Results[0] c.Assert(unitStatus.Error, gc.IsNil) - c.Assert(unitStatus.Status, gc.Equals, status.StatusMaintenance.String()) + c.Assert(unitStatus.Status, gc.Equals, status.Maintenance.String()) } func (s *statusGetterSuite) TestGetServiceStatus(c *gc.C) { service := s.Factory.MakeApplication(c, &factory.ApplicationParams{Status: &status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, }}) result, err := s.getter.Status(params.Entities{[]params.Entity{{ service.Tag().String(), @@ -101,7 +101,7 @@ c.Assert(result.Results, gc.HasLen, 1) serviceStatus := result.Results[0] c.Assert(serviceStatus.Error, gc.IsNil) - c.Assert(serviceStatus.Status, gc.Equals, status.StatusMaintenance.String()) + c.Assert(serviceStatus.Status, gc.Equals, status.Maintenance.String()) } func (s *statusGetterSuite) TestBulk(c *gc.C) { @@ -118,7 +118,7 @@ c.Assert(result.Results, gc.HasLen, 3) c.Assert(result.Results[0].Error, jc.Satisfies, params.IsCodeUnauthorized) c.Assert(result.Results[1].Error, gc.IsNil) - c.Assert(result.Results[1].Status, gc.Equals, status.StatusPending.String()) + c.Assert(result.Results[1].Status, gc.Equals, status.Pending.String()) c.Assert(result.Results[2].Error, gc.ErrorMatches, `"bad-tag" is not a valid tag`) } @@ -180,7 +180,7 @@ func (s *serviceStatusGetterSuite) TestGetServiceStatus(c *gc.C) { service := s.Factory.MakeApplication(c, &factory.ApplicationParams{Status: &status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, }}) result, err := s.getter.Status(params.Entities{[]params.Entity{{ service.Tag().String(), @@ -194,7 +194,7 @@ func (s *serviceStatusGetterSuite) TestGetUnitStatusNotLeader(c *gc.C) { // If the unit isn't the leader, it can't get it. unit := s.Factory.MakeUnit(c, &factory.UnitParams{Status: &status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, }}) result, err := s.getter.Status(params.Entities{[]params.Entity{{ unit.Tag().String(), @@ -208,7 +208,7 @@ func (s *serviceStatusGetterSuite) TestGetUnitStatusIsLeader(c *gc.C) { // If the unit isn't the leader, it can't get it. unit := s.Factory.MakeUnit(c, &factory.UnitParams{Status: &status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, }}) service, err := unit.Application() c.Assert(err, jc.ErrorIsNil) @@ -224,13 +224,13 @@ r := result.Results[0] c.Assert(r.Error, gc.IsNil) c.Assert(r.Application.Error, gc.IsNil) - c.Assert(r.Application.Status, gc.Equals, status.StatusMaintenance.String()) + c.Assert(r.Application.Status, gc.Equals, status.Maintenance.String()) units := r.Units c.Assert(units, gc.HasLen, 1) unitStatus, ok := units[unit.Name()] c.Assert(ok, jc.IsTrue) c.Assert(unitStatus.Error, gc.IsNil) - c.Assert(unitStatus.Status, gc.Equals, status.StatusMaintenance.String()) + c.Assert(unitStatus.Status, gc.Equals, status.Maintenance.String()) } func (s *serviceStatusGetterSuite) TestBulk(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/machine.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/machine.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/machine.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/machine.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,8 +6,11 @@ import ( "github.com/juju/errors" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/instance" "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/status" ) // StateJobs translates a slice of multiwatcher jobs to their equivalents in state. @@ -52,9 +55,18 @@ } type Machine interface { + Id() string + InstanceId() (instance.Id, error) + WantsVote() bool + HasVote() bool + Status() (status.StatusInfo, error) + ContainerType() instance.ContainerType + HardwareCharacteristics() (*instance.HardwareCharacteristics, error) Life() state.Life ForceDestroy() error Destroy() error + AgentPresence() (bool, error) + IsManager() bool } func DestroyMachines(st origStateInterface, force bool, ids ...string) error { @@ -82,3 +94,62 @@ } return DestroyErr("machines", ids, errs) } + +// ModelMachineInfo returns information about machine hardware for +// alive top level machines (not containers). +func ModelMachineInfo(st ModelManagerBackend) (machineInfo []params.ModelMachineInfo, _ error) { + machines, err := st.AllMachines() + if err != nil { + return nil, errors.Trace(err) + } + for _, m := range machines { + if m.Life() != state.Alive { + continue + } + var status string + statusInfo, err := MachineStatus(m) + if err == nil { + status = string(statusInfo.Status) + } else { + status = err.Error() + } + mInfo := params.ModelMachineInfo{ + Id: m.Id(), + HasVote: m.HasVote(), + WantsVote: m.WantsVote(), + Status: status, + } + instId, err := m.InstanceId() + switch { + case err == nil: + mInfo.InstanceId = string(instId) + case errors.IsNotProvisioned(err): + // ok, but no instance ID to get. + default: + return nil, errors.Trace(err) + } + if m.ContainerType() != "" && m.ContainerType() != instance.NONE { + machineInfo = append(machineInfo, mInfo) + continue + } + // Only include cores for physical machines. + hw, err := m.HardwareCharacteristics() + if err != nil && !errors.IsNotFound(err) { + return nil, errors.Trace(err) + } + if hw != nil && hw.String() != "" { + hwParams := ¶ms.MachineHardware{ + Cores: hw.CpuCores, + Arch: hw.Arch, + Mem: hw.Mem, + RootDisk: hw.RootDisk, + CpuPower: hw.CpuPower, + Tags: hw.Tags, + AvailabilityZone: hw.AvailabilityZone, + } + mInfo.Hardware = hwParams + } + machineInfo = append(machineInfo, mInfo) + } + return machineInfo, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/machinestatus.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/machinestatus.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/machinestatus.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/machinestatus.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,53 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common + +import ( + "github.com/juju/juju/state" + "github.com/juju/juju/status" +) + +// MachineStatusGetter defines the machine functionality +// required to status. +type MachineStatusGetter interface { + Status() (status.StatusInfo, error) + AgentPresence() (bool, error) + Id() string + Life() state.Life +} + +// MachineStatus returns the machine agent status for a given +// machine, with special handling for agent presence. +func MachineStatus(machine MachineStatusGetter) (status.StatusInfo, error) { + machineStatus, err := machine.Status() + if err != nil { + return status.StatusInfo{}, err + } + + if !canMachineBeDown(machineStatus) { + // The machine still being provisioned - there's no point in + // enquiring about the agent liveness. + return machineStatus, nil + } + + agentAlive, err := machine.AgentPresence() + if err != nil { + // We don't want any presence errors affecting status. + logger.Debugf("error determining presence for machine %s: %v", machine.Id(), err) + return machineStatus, nil + } + if machine.Life() != state.Dead && !agentAlive { + machineStatus.Status = status.Down + machineStatus.Message = "agent is not communicating with the server" + } + return machineStatus, nil +} + +func canMachineBeDown(machineStatus status.StatusInfo) bool { + switch machineStatus.Status { + case status.Pending, status.Stopped: + return false + } + return true +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/machinestatus_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/machinestatus_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/machinestatus_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/machinestatus_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,76 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common_test + +import ( + "errors" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/state" + "github.com/juju/juju/status" +) + +type MachineStatusSuite struct { + testing.IsolationSuite + machine *mockMachine +} + +var _ = gc.Suite(&MachineStatusSuite{}) + +func (s *MachineStatusSuite) SetUpTest(c *gc.C) { + s.machine = &mockMachine{ + status: status.Started, + } +} + +func (s *MachineStatusSuite) checkUntouched(c *gc.C) { + agent, err := common.MachineStatus(s.machine) + c.Check(err, jc.ErrorIsNil) + c.Assert(agent.Status, jc.DeepEquals, s.machine.status) +} + +func (s *MachineStatusSuite) TestNormal(c *gc.C) { + s.checkUntouched(c) +} + +func (s *MachineStatusSuite) TestErrors(c *gc.C) { + s.machine.statusErr = errors.New("status error") + + _, err := common.MachineStatus(s.machine) + c.Assert(err, gc.ErrorMatches, "status error") +} + +func (s *MachineStatusSuite) TestDown(c *gc.C) { + s.machine.agentDead = true + agent, err := common.MachineStatus(s.machine) + c.Assert(err, jc.ErrorIsNil) + c.Assert(agent, jc.DeepEquals, status.StatusInfo{ + Status: status.Down, + Message: "agent is not communicating with the server", + }) +} + +func (s *MachineStatusSuite) TestDownAndDead(c *gc.C) { + s.machine.agentDead = true + s.machine.life = state.Dead + // Status is untouched if unit is Dead. + s.checkUntouched(c) +} + +func (s *MachineStatusSuite) TestPresenceError(c *gc.C) { + s.machine.agentDead = true + s.machine.presenceErr = errors.New("boom") + // Presence error gets ignored, so no output is unchanged. + s.checkUntouched(c) +} + +func (s *MachineStatusSuite) TestNotDownIfPending(c *gc.C) { + s.machine.agentDead = true + s.machine.status = status.Pending + s.checkUntouched(c) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/machine_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/machine_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/machine_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/machine_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,11 +6,15 @@ import ( "github.com/juju/errors" jc "github.com/juju/testing/checkers" + "github.com/juju/utils" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/instance" "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/status" ) type machineSuite struct{} @@ -80,8 +84,61 @@ c.Assert(err, jc.ErrorIsNil) } +func (s *machineSuite) TestMachineHardwareInfo(c *gc.C) { + one := uint64(1) + amd64 := "amd64" + gig := uint64(1024) + st := mockState{ + machines: map[string]*mockMachine{ + "1": {id: "1", life: state.Alive, containerType: instance.NONE, + hw: &instance.HardwareCharacteristics{ + Arch: &amd64, + Mem: &gig, + CpuCores: &one, + CpuPower: &one, + }}, + "2": {id: "2", life: state.Alive, containerType: instance.LXD}, + "3": {life: state.Dying}, + }, + } + info, err := common.ModelMachineInfo(&st) + c.Assert(err, jc.ErrorIsNil) + c.Assert(info, jc.DeepEquals, []params.ModelMachineInfo{ + { + Id: "1", + Hardware: ¶ms.MachineHardware{ + Arch: &amd64, + Mem: &gig, + Cores: &one, + CpuPower: &one, + }, + }, { + Id: "2", + }, + }) +} + +func (s *machineSuite) TestMachineInstanceInfo(c *gc.C) { + st := mockState{ + machines: map[string]*mockMachine{ + "1": {id: "1", instId: instance.Id("123"), status: status.Down, hasVote: true, wantsVote: true}, + }, + } + info, err := common.ModelMachineInfo(&st) + c.Assert(err, jc.ErrorIsNil) + c.Assert(info, jc.DeepEquals, []params.ModelMachineInfo{ + { + Id: "1", + InstanceId: "123", + Status: "down", + HasVote: true, + WantsVote: true, + }, + }) +} + type mockState struct { - state.State + common.ModelManagerBackend machines map[string]*mockMachine } @@ -92,19 +149,71 @@ return nil, errors.Errorf("machine %s does not exist", id) } +func (st *mockState) AllMachines() (machines []common.Machine, _ error) { + // Ensure we get machines in id order. + var ids []string + for id := range st.machines { + ids = append(ids, id) + } + utils.SortStringsNaturally(ids) + for _, id := range ids { + machines = append(machines, st.machines[id]) + } + return machines, nil +} + type mockMachine struct { state.Machine + id string life state.Life + containerType instance.ContainerType + hw *instance.HardwareCharacteristics + instId instance.Id + hasVote, wantsVote bool + status status.Status + statusErr error destroyErr error forceDestroyErr error forceDestroyCalled bool destroyCalled bool + agentDead bool + presenceErr error +} + +func (m *mockMachine) Id() string { + return m.id } func (m *mockMachine) Life() state.Life { return m.life } +func (m *mockMachine) InstanceId() (instance.Id, error) { + return m.instId, nil +} + +func (m *mockMachine) WantsVote() bool { + return m.wantsVote +} + +func (m *mockMachine) HasVote() bool { + return m.hasVote +} + +func (m *mockMachine) Status() (status.StatusInfo, error) { + return status.StatusInfo{ + Status: m.status, + }, m.statusErr +} + +func (m *mockMachine) HardwareCharacteristics() (*instance.HardwareCharacteristics, error) { + return m.hw, nil +} + +func (m *mockMachine) AgentPresence() (bool, error) { + return !m.agentDead, m.presenceErr +} + func (m *mockMachine) ForceDestroy() error { m.forceDestroyCalled = true if m.forceDestroyErr != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modeldestroy.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modeldestroy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modeldestroy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modeldestroy.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,13 +5,19 @@ import ( "github.com/juju/errors" + "github.com/juju/utils/clock" "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/metricsender" ) -var sendMetrics = func(st metricsender.MetricsSenderBackend) error { - err := metricsender.SendMetrics(st, metricsender.DefaultMetricSender(), metricsender.DefaultMaxBatchesPerSend()) +var sendMetrics = func(st metricsender.ModelBackend) error { + cfg, err := st.ModelConfig() + if err != nil { + return errors.Annotatef(err, "failed to get model config for %s", st.ModelTag()) + } + + err = metricsender.SendMetrics(st, metricsender.DefaultMetricSender(), clock.WallClock, metricsender.DefaultMaxBatchesPerSend(), cfg.TransmitVendorMetrics()) return errors.Trace(err) } @@ -44,12 +50,12 @@ if destroyHostedModels { // Check we are operating on the controller state. - controllerCfg, err := st.ControllerConfig() + controllerModel, err := st.ControllerModel() if err != nil { return errors.Trace(err) } - if modelTag.Id() != controllerCfg.ControllerUUID() { - return errors.Errorf("expected controller model UUID %v, got %v", modelTag.Id(), controllerCfg.ControllerUUID()) + if modelTag != controllerModel.ModelTag() { + return errors.Errorf("expected controller model UUID %v, got %v", modelTag.Id(), controllerModel.ModelTag().Id()) } models, err := st.AllModels() if err != nil { @@ -65,6 +71,11 @@ if err = check.DestroyAllowed(); err != nil { return errors.Trace(err) } + err = sendMetrics(modelSt) + if err != nil { + logger.Errorf("failed to send leftover metrics: %v", err) + } + } } else { check := NewBlockChecker(st) @@ -87,7 +98,6 @@ return errors.Trace(err) } } - err = sendMetrics(st) if err != nil { logger.Errorf("failed to send leftover metrics: %v", err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modeldestroy_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modeldestroy_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modeldestroy_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modeldestroy_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,6 @@ package common_test import ( - "fmt" - "github.com/juju/errors" jtesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" @@ -83,7 +81,7 @@ jtesting.Stub } -func (t *testMetricSender) SendMetrics(st metricsender.MetricsSenderBackend) error { +func (t *testMetricSender) SendMetrics(st metricsender.ModelBackend) error { t.AddCall("SendMetrics") return nil } @@ -95,32 +93,9 @@ err := common.DestroyModel(s.modelManager, s.State.ModelTag()) c.Assert(err, jc.ErrorIsNil) - metricSender.CheckCalls(c, []jtesting.StubCall{{FuncName: "SendMetrics"}}) -} - -func (s *destroyModelSuite) TestDestroyModelManual(c *gc.C) { - _, nonManager := s.setUpManual(c) - - // If there are any non-manager manual machines in state, DestroyModel will - // error. It will not set the Dying flag on the environment. - err := common.DestroyModel(s.modelManager, s.State.ModelTag()) - c.Assert(err, gc.ErrorMatches, fmt.Sprintf("failed to destroy model: manually provisioned machines must first be destroyed with `juju destroy-machine %s`", nonManager.Id())) - model, err := s.State.Model() - c.Assert(err, jc.ErrorIsNil) - c.Assert(model.Life(), gc.Equals, state.Alive) - - // If we remove the non-manager machine, it should pass. - // Manager machines will remain. - err = nonManager.EnsureDead() - c.Assert(err, jc.ErrorIsNil) - err = nonManager.Remove() - c.Assert(err, jc.ErrorIsNil) - err = common.DestroyModel(s.modelManager, s.State.ModelTag()) - c.Assert(err, jc.ErrorIsNil) - err = model.Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(model.Life(), gc.Equals, state.Dying) - + metricSender.CheckCalls(c, []jtesting.StubCall{{ + FuncName: "SendMetrics", + }}) } func (s *destroyModelSuite) TestDestroyModel(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modelmanagerinterface.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modelmanagerinterface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modelmanagerinterface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modelmanagerinterface.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,9 @@ "github.com/juju/juju/apiserver/metricsender" "github.com/juju/juju/controller" "github.com/juju/juju/core/description" + "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/status" ) @@ -29,23 +31,33 @@ ModelUUID() string ModelsForUser(names.UserTag) ([]*state.UserModel, error) - IsControllerAdministrator(user names.UserTag) (bool, error) + IsControllerAdmin(user names.UserTag) (bool, error) NewModel(state.ModelArgs) (Model, ModelManagerBackend, error) - ComposeNewModelConfig(modelAttr map[string]interface{}) (map[string]interface{}, error) + ComposeNewModelConfig(modelAttr map[string]interface{}, regionSpec *environs.RegionSpec) (map[string]interface{}, error) ControllerModel() (Model, error) ControllerConfig() (controller.Config, error) ForModel(tag names.ModelTag) (ModelManagerBackend, error) + GetModel(names.ModelTag) (Model, error) Model() (Model, error) + ModelConfigDefaultValues() (config.ModelDefaultAttributes, error) + UpdateModelConfigDefaultValues(update map[string]interface{}, remove []string, regionSpec *environs.RegionSpec) error + Unit(name string) (*state.Unit, error) + ModelTag() names.ModelTag + ModelConfig() (*config.Config, error) AllModels() ([]Model, error) - AddModelUser(state.UserAccessSpec) (description.UserAccess, error) - AddControllerUser(state.UserAccessSpec) (description.UserAccess, error) + AddModelUser(string, state.UserAccessSpec) (permission.UserAccess, error) + AddControllerUser(state.UserAccessSpec) (permission.UserAccess, error) RemoveUserAccess(names.UserTag, names.Tag) error - UserAccess(names.UserTag, names.Tag) (description.UserAccess, error) - ModelTag() names.ModelTag + UserAccess(names.UserTag, names.Tag) (permission.UserAccess, error) + AllMachines() (machines []Machine, err error) + AllApplications() (applications []Application, err error) + ControllerUUID() string + ControllerTag() names.ControllerTag Export() (description.Model, error) - SetUserAccess(subject names.UserTag, target names.Tag, access description.Access) (description.UserAccess, error) + SetUserAccess(subject names.UserTag, target names.Tag, access permission.Access) (permission.UserAccess, error) LastModelConnection(user names.UserTag) (time.Time, error) + DumpAll() (map[string]interface{}, error) Close() error } @@ -59,9 +71,9 @@ Owner() names.UserTag Status() (status.StatusInfo, error) Cloud() string - CloudCredential() string + CloudCredential() (names.CloudCredentialTag, bool) CloudRegion() string - Users() ([]description.UserAccess, error) + Users() ([]permission.UserAccess, error) Destroy() error DestroyIncludingHosted() error } @@ -105,6 +117,15 @@ return modelManagerStateShim{otherState}, nil } +// GetModel implements ModelManagerBackend. +func (st modelManagerStateShim) GetModel(tag names.ModelTag) (Model, error) { + m, err := st.State.GetModel(tag) + if err != nil { + return nil, err + } + return modelShim{m}, nil +} + // Model implements ModelManagerBackend. func (st modelManagerStateShim) Model() (Model, error) { m, err := st.State.Model() @@ -132,14 +153,49 @@ } // Users implements ModelManagerBackend. -func (m modelShim) Users() ([]description.UserAccess, error) { +func (m modelShim) Users() ([]permission.UserAccess, error) { stateUsers, err := m.Model.Users() if err != nil { return nil, err } - users := make([]description.UserAccess, len(stateUsers)) + users := make([]permission.UserAccess, len(stateUsers)) for i, user := range stateUsers { users[i] = user } return users, nil } + +type machineShim struct { + *state.Machine +} + +func (st modelManagerStateShim) AllMachines() ([]Machine, error) { + allStateMachines, err := st.State.AllMachines() + if err != nil { + return nil, err + } + all := make([]Machine, len(allStateMachines)) + for i, m := range allStateMachines { + all[i] = machineShim{m} + } + return all, nil +} + +// Application defines methods provided by a state.Application instance. +type Application interface{} + +type applicationShim struct { + *state.Application +} + +func (st modelManagerStateShim) AllApplications() ([]Application, error) { + allStateApplications, err := st.State.AllApplications() + if err != nil { + return nil, err + } + all := make([]Application, len(allStateApplications)) + for i, a := range allStateApplications { + all[i] = applicationShim{a} + } + return all, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modelstatus.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modelstatus.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modelstatus.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modelstatus.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,132 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common + +import ( + "gopkg.in/juju/names.v2" + + "github.com/juju/errors" + + "github.com/juju/juju/apiserver/facade" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/permission" +) + +// ModelStatusAPI implements the ModelStatus() API. +type ModelStatusAPI struct { + authorizer facade.Authorizer + apiUser names.UserTag + backend ModelManagerBackend +} + +// NewModelStatusAPI creates an implementation providing the ModelStatus() API. +func NewModelStatusAPI(st ModelManagerBackend, authorizer facade.Authorizer, apiUser names.UserTag) *ModelStatusAPI { + return &ModelStatusAPI{ + authorizer: authorizer, + apiUser: apiUser, + backend: st, + } +} + +func (s *ModelStatusAPI) checkHasAdmin() error { + isAdmin, err := s.authorizer.HasPermission(permission.SuperuserAccess, s.backend.ControllerTag()) + if err != nil { + return errors.Trace(err) + } + if !isAdmin { + return ServerError(ErrPerm) + } + return nil +} + +// modelAuthCheck checks if the user is acting on their own behalf, or if they +// are an administrator acting on behalf of another user. +func (s *ModelStatusAPI) modelAuthCheck(modelTag names.ModelTag, owner names.UserTag) error { + if err := s.checkHasAdmin(); err == nil { + logger.Tracef("%q is a controller admin", s.apiUser.Id()) + return nil + } + if s.apiUser == owner { + return nil + } + isAdmin, err := s.authorizer.HasPermission(permission.AdminAccess, modelTag) + if err != nil { + return errors.Trace(err) + } + if isAdmin { + return nil + } + return ErrPerm +} + +// ModelStatus returns a summary of the model. +func (c *ModelStatusAPI) ModelStatus(req params.Entities) (params.ModelStatusResults, error) { + models := req.Entities + results := params.ModelStatusResults{} + + status := make([]params.ModelStatus, len(models)) + for i, model := range models { + modelStatus, err := c.modelStatus(model.Tag) + if err != nil { + return results, errors.Trace(err) + } + status[i] = modelStatus + } + results.Results = status + return results, nil +} + +func (c *ModelStatusAPI) modelStatus(tag string) (params.ModelStatus, error) { + var status params.ModelStatus + modelTag, err := names.ParseModelTag(tag) + if err != nil { + return status, errors.Trace(err) + } + st := c.backend + if modelTag != c.backend.ModelTag() { + if st, err = c.backend.ForModel(modelTag); err != nil { + return status, errors.Trace(err) + } + defer st.Close() + } + + model, err := st.Model() + if err != nil { + return status, errors.Trace(err) + } + if err := c.modelAuthCheck(modelTag, model.Owner()); err != nil { + return status, errors.Trace(err) + } + + machines, err := st.AllMachines() + if err != nil { + return status, errors.Trace(err) + } + + var hostedMachines []Machine + for _, m := range machines { + if !m.IsManager() { + hostedMachines = append(hostedMachines, m) + } + } + + applications, err := st.AllApplications() + if err != nil { + return status, errors.Trace(err) + } + + modelMachines, err := ModelMachineInfo(st) + if err != nil { + return status, errors.Trace(err) + } + + return params.ModelStatus{ + ModelTag: tag, + OwnerTag: model.Owner().String(), + Life: params.Life(model.Life().String()), + HostedMachineCount: len(hostedMachines), + ApplicationCount: len(applications), + Machines: modelMachines, + }, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modelstatus_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modelstatus_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modelstatus_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modelstatus_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,155 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common_test + +import ( + "github.com/juju/loggo" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/controller" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/instance" + "github.com/juju/juju/state" + statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +type modelStatusSuite struct { + statetesting.StateSuite + + controller *controller.ControllerAPI + resources *common.Resources + authorizer apiservertesting.FakeAuthorizer +} + +var _ = gc.Suite(&modelStatusSuite{}) + +func (s *modelStatusSuite) SetUpTest(c *gc.C) { + // Initial config needs to be set before the StateSuite SetUpTest. + s.InitialConfig = testing.CustomModelConfig(c, testing.Attrs{ + "name": "controller", + }) + + s.StateSuite.SetUpTest(c) + s.resources = common.NewResources() + s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) + + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: s.Owner, + AdminTag: s.Owner, + } + + controller, err := controller.NewControllerAPI(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + s.controller = controller + + loggo.GetLogger("juju.apiserver.controller").SetLogLevel(loggo.TRACE) +} + +func (s *modelStatusSuite) TestModelStatusNonAuth(c *gc.C) { + // Set up the user making the call. + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + anAuthoriser := apiservertesting.FakeAuthorizer{ + Tag: user.Tag(), + } + endpoint, err := controller.NewControllerAPI(s.State, s.resources, anAuthoriser) + c.Assert(err, jc.ErrorIsNil) + controllerModelTag := s.State.ModelTag().String() + + req := params.Entities{ + Entities: []params.Entity{{Tag: controllerModelTag}}, + } + _, err = endpoint.ModelStatus(req) + c.Assert(err, gc.ErrorMatches, "permission denied") +} + +func (s *modelStatusSuite) TestModelStatusOwnerAllowed(c *gc.C) { + // Set up the user making the call. + owner := s.Factory.MakeUser(c, nil) + anAuthoriser := apiservertesting.FakeAuthorizer{ + Tag: owner.Tag(), + } + st := s.Factory.MakeModel(c, &factory.ModelParams{Owner: owner.Tag()}) + defer st.Close() + endpoint, err := controller.NewControllerAPI(s.State, s.resources, anAuthoriser) + c.Assert(err, jc.ErrorIsNil) + + req := params.Entities{ + Entities: []params.Entity{{Tag: st.ModelTag().String()}}, + } + _, err = endpoint.ModelStatus(req) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *modelStatusSuite) TestModelStatus(c *gc.C) { + otherModelOwner := s.Factory.MakeModelUser(c, nil) + otherSt := s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "dummytoo", + Owner: otherModelOwner.UserTag, + ConfigAttrs: testing.Attrs{ + "controller": false, + }, + }) + defer otherSt.Close() + + eight := uint64(8) + s.Factory.MakeMachine(c, &factory.MachineParams{ + Jobs: []state.MachineJob{state.JobManageModel}, + Characteristics: &instance.HardwareCharacteristics{CpuCores: &eight}, + InstanceId: "id-4", + }) + s.Factory.MakeMachine(c, &factory.MachineParams{ + Jobs: []state.MachineJob{state.JobHostUnits}, InstanceId: "id-5"}) + s.Factory.MakeApplication(c, &factory.ApplicationParams{ + Charm: s.Factory.MakeCharm(c, nil), + }) + + otherFactory := factory.NewFactory(otherSt) + otherFactory.MakeMachine(c, &factory.MachineParams{InstanceId: "id-8"}) + otherFactory.MakeMachine(c, &factory.MachineParams{InstanceId: "id-9"}) + otherFactory.MakeApplication(c, &factory.ApplicationParams{ + Charm: otherFactory.MakeCharm(c, nil), + }) + + controllerModelTag := s.State.ModelTag().String() + hostedModelTag := otherSt.ModelTag().String() + + req := params.Entities{ + Entities: []params.Entity{{Tag: controllerModelTag}, {Tag: hostedModelTag}}, + } + results, err := s.controller.ModelStatus(req) + c.Assert(err, jc.ErrorIsNil) + + arch := "amd64" + mem := uint64(64 * 1024 * 1024 * 1024) + stdHw := ¶ms.MachineHardware{ + Arch: &arch, + Mem: &mem, + } + c.Assert(results.Results, jc.DeepEquals, []params.ModelStatus{{ + ModelTag: controllerModelTag, + HostedMachineCount: 1, + ApplicationCount: 1, + OwnerTag: s.Owner.String(), + Life: params.Alive, + Machines: []params.ModelMachineInfo{ + {Id: "0", Hardware: ¶ms.MachineHardware{Cores: &eight}, InstanceId: "id-4", Status: "pending", WantsVote: true}, + {Id: "1", Hardware: stdHw, InstanceId: "id-5", Status: "pending"}, + }, + }, { + ModelTag: hostedModelTag, + HostedMachineCount: 2, + ApplicationCount: 1, + OwnerTag: otherModelOwner.UserTag.String(), + Life: params.Alive, + Machines: []params.ModelMachineInfo{ + {Id: "0", Hardware: stdHw, InstanceId: "id-8", Status: "pending"}, + {Id: "1", Hardware: stdHw, InstanceId: "id-9", Status: "pending"}, + }, + }}) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modeluser.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modeluser.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modeluser.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modeluser.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,7 @@ "github.com/juju/errors" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -19,8 +19,8 @@ LastModelConnection(names.UserTag) (time.Time, error) } -// ModelUserInfo converts description.UserAccess to params.ModelUserInfo. -func ModelUserInfo(user description.UserAccess, st modelConnectionAbleBackend) (params.ModelUserInfo, error) { +// ModelUserInfo converts permission.UserAccess to params.ModelUserInfo. +func ModelUserInfo(user permission.UserAccess, st modelConnectionAbleBackend) (params.ModelUserInfo, error) { access, err := StateToParamsUserAccessPermission(user.Access) if err != nil { return params.ModelUserInfo{}, errors.Trace(err) @@ -44,14 +44,14 @@ return userInfo, nil } -// StateToParamsUserAccessPermission converts description.Access to params.AccessPermission. -func StateToParamsUserAccessPermission(descriptionAccess description.Access) (params.UserAccessPermission, error) { +// StateToParamsUserAccessPermission converts permission.Access to params.AccessPermission. +func StateToParamsUserAccessPermission(descriptionAccess permission.Access) (params.UserAccessPermission, error) { switch descriptionAccess { - case description.ReadAccess: + case permission.ReadAccess: return params.ModelReadAccess, nil - case description.WriteAccess: + case permission.WriteAccess: return params.ModelWriteAccess, nil - case description.AdminAccess: + case permission.AdminAccess: return params.ModelAdminAccess, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modelwatcher.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modelwatcher.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modelwatcher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modelwatcher.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs" "github.com/juju/juju/state" "github.com/juju/juju/state/watcher" ) @@ -56,30 +55,10 @@ // ModelConfig returns the current environment's configuration. func (m *ModelWatcher) ModelConfig() (params.ModelConfigResult, error) { result := params.ModelConfigResult{} - config, err := m.st.ModelConfig() if err != nil { return result, err } - allAttrs := config.AllAttrs() - - if !m.authorizer.AuthModelManager() { - // Mask out any secrets in the environment configuration - // with values of the same type, so it'll pass validation. - // - // TODO(dimitern) 201309-26 bug #1231384 - // Delete the code below and mark the bug as fixed, - // once it's live tested on MAAS and 1.16 compatibility - // is dropped. - provider, err := environs.Provider(config.Type()) - if err != nil { - return result, err - } - secretAttrs, err := provider.SecretAttrs(config) - for k := range secretAttrs { - allAttrs[k] = "not available" - } - } - result.Config = allAttrs + result.Config = config.AllAttrs() return result, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modelwatcher_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modelwatcher_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/modelwatcher_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/modelwatcher_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -99,26 +99,6 @@ c.Assert(err, gc.ErrorMatches, "pow") } -func (*environWatcherSuite) TestModelConfigMaskedSecrets(c *gc.C) { - authorizer := apiservertesting.FakeAuthorizer{ - Tag: names.NewMachineTag("0"), - EnvironManager: false, - } - testingEnvConfig := testingEnvConfig(c) - e := common.NewModelWatcher( - &fakeModelAccessor{modelConfig: testingEnvConfig}, - nil, - authorizer, - ) - result, err := e.ModelConfig() - c.Assert(err, jc.ErrorIsNil) - // Make sure the secret attribute is masked. - c.Check(result.Config["secret"], gc.Equals, "not available") - // And only that is masked. - result.Config["secret"] = "pork" - c.Check(map[string]interface{}(result.Config), jc.DeepEquals, testingEnvConfig.AllAttrs()) -} - func testingEnvConfig(c *gc.C) *config.Config { env, err := bootstrap.Prepare( modelcmd.BootstrapContext(testing.Context(c)), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/networkingcommon/export_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/networkingcommon/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/networkingcommon/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/networkingcommon/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networkingcommon - -var ( - NetInterfaces = &netInterfaces - InterfaceAddrs = &interfaceAddrs -) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/networkingcommon/types.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/networkingcommon/types.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/networkingcommon/types.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/networkingcommon/types.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,18 +4,13 @@ package networkingcommon import ( - "encoding/json" "net" - "regexp" - "sort" - "strings" "github.com/juju/errors" "github.com/juju/utils/set" "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cloudconfig/instancecfg" "github.com/juju/juju/environs" "github.com/juju/juju/network" providercommon "github.com/juju/juju/provider/common" @@ -126,6 +121,9 @@ // AllSubnets returns all backing subnets. AllSubnets() ([]BackingSubnet, error) + + // ModelTag returns the tag of the model this state is associated to. + ModelTag() names.ModelTag } func BackingSubnetToParamsSubnet(subnet BackingSubnet) params.Subnet { @@ -150,85 +148,6 @@ } } -type byMACThenCIDRThenIndexThenName []params.NetworkConfig - -func (c byMACThenCIDRThenIndexThenName) Len() int { - return len(c) -} - -func (c byMACThenCIDRThenIndexThenName) Swap(i, j int) { - orgI, orgJ := c[i], c[j] - c[j], c[i] = orgI, orgJ -} - -func (c byMACThenCIDRThenIndexThenName) Less(i, j int) bool { - if c[i].MACAddress == c[j].MACAddress { - // Same MACAddress means related interfaces. - if c[i].CIDR == "" || c[j].CIDR == "" { - // Empty CIDRs go at the bottom, otherwise order by InterfaceName. - return c[i].CIDR != "" || c[i].InterfaceName < c[j].InterfaceName - } - if c[i].DeviceIndex == c[j].DeviceIndex { - if c[i].InterfaceName == c[j].InterfaceName { - // Sort addresses of the same interface. - return c[i].CIDR < c[j].CIDR || c[i].Address < c[j].Address - } - // Prefer shorter names (e.g. parents) with equal DeviceIndex. - return c[i].InterfaceName < c[j].InterfaceName - } - // When both CIDR and DeviceIndex are non-empty, order by DeviceIndex - return c[i].DeviceIndex < c[j].DeviceIndex - } - // Group by MACAddress. - return c[i].MACAddress < c[j].MACAddress -} - -// SortNetworkConfigsByParents returns the given input sorted, such that any -// child interfaces appear after their parents. -func SortNetworkConfigsByParents(input []params.NetworkConfig) []params.NetworkConfig { - sortedInputCopy := CopyNetworkConfigs(input) - sort.Stable(byMACThenCIDRThenIndexThenName(sortedInputCopy)) - return sortedInputCopy -} - -type byInterfaceName []params.NetworkConfig - -func (c byInterfaceName) Len() int { - return len(c) -} - -func (c byInterfaceName) Swap(i, j int) { - orgI, orgJ := c[i], c[j] - c[j], c[i] = orgI, orgJ -} - -func (c byInterfaceName) Less(i, j int) bool { - return c[i].InterfaceName < c[j].InterfaceName -} - -// SortNetworkConfigsByInterfaceName returns the given input sorted by -// InterfaceName. -func SortNetworkConfigsByInterfaceName(input []params.NetworkConfig) []params.NetworkConfig { - sortedInputCopy := CopyNetworkConfigs(input) - sort.Stable(byInterfaceName(sortedInputCopy)) - return sortedInputCopy -} - -// NetworkConfigsToIndentedJSON returns the given input as an indented JSON -// string. -func NetworkConfigsToIndentedJSON(input []params.NetworkConfig) (string, error) { - jsonBytes, err := json.MarshalIndent(input, "", " ") - if err != nil { - return "", err - } - return string(jsonBytes), nil -} - -// CopyNetworkConfigs returns a copy of the given input -func CopyNetworkConfigs(input []params.NetworkConfig) []params.NetworkConfig { - return append([]params.NetworkConfig(nil), input...) -} - // NetworkConfigFromInterfaceInfo converts a slice of network.InterfaceInfo into // the equivalent params.NetworkConfig slice. func NetworkConfigFromInterfaceInfo(interfaceInfos []network.InterfaceInfo) []params.NetworkConfig { @@ -372,210 +291,333 @@ return netEnviron, nil } -var vlanInterfaceNameRegex = regexp.MustCompile(`^.+\.[0-9]{1,4}[^0-9]?$`) +// NetworkConfigSource defines the necessary calls to obtain the network +// configuration of a machine. +type NetworkConfigSource interface { + // SysClassNetPath returns the Linux kernel userspace SYSFS path used by + // this source. DefaultNetworkConfigSource() uses network.SysClassNetPath. + SysClassNetPath() string -var ( - netInterfaces = net.Interfaces - interfaceAddrs = (*net.Interface).Addrs -) + // Interfaces returns information about all network interfaces on the + // machine as []net.Interface. + Interfaces() ([]net.Interface, error) + + // InterfaceAddresses returns information about all addresses assigned to + // the network interface with the given name. + InterfaceAddresses(name string) ([]net.Addr, error) +} + +type netPackageConfigSource struct{} + +// SysClassNetPath implements NetworkConfigSource. +func (n *netPackageConfigSource) SysClassNetPath() string { + return network.SysClassNetPath +} + +// Interfaces implements NetworkConfigSource. +func (n *netPackageConfigSource) Interfaces() ([]net.Interface, error) { + return net.Interfaces() +} + +// InterfaceAddresses implements NetworkConfigSource. +func (n *netPackageConfigSource) InterfaceAddresses(name string) ([]net.Addr, error) { + iface, err := net.InterfaceByName(name) + if err != nil { + return nil, errors.Trace(err) + } + return iface.Addrs() +} + +// DefaultNetworkConfigSource returns a NetworkConfigSource backed by the net +// package, to be used with GetObservedNetworkConfig(). +func DefaultNetworkConfigSource() NetworkConfigSource { + return &netPackageConfigSource{} +} -// GetObservedNetworkConfig discovers what network interfaces exist on the -// machine, and returns that as a sorted slice of params.NetworkConfig to later -// update the state network config we have about the machine. -func GetObservedNetworkConfig() ([]params.NetworkConfig, error) { +// GetObservedNetworkConfig uses the given source to find all available network +// interfaces and their assigned addresses, and returns the result as +// []params.NetworkConfig. In addition to what the source returns, a few +// additional transformations are done: +// +// * On any OS, the state (UP/DOWN) of each interface and the DeviceIndex field, +// will be correctly populated. Loopback interfaces are also properly detected +// and will have InterfaceType set LoopbackInterface. +// * On Linux only, the InterfaceType field will be reliably detected for a few +// types: BondInterface, BridgeInterface, VLAN_8021QInterface. +// * Also on Linux, for interfaces that are discovered to be ports on a bridge, +// the ParentInterfaceName will be populated with the name of the bridge. +// * ConfigType fields will be set to ConfigManual when no address is detected, +// or ConfigStatic when it is. +// * TODO: any IPv6 addresses found will be ignored and treated as empty ATM. +// +// Result entries will be grouped by InterfaceName, in the same order they are +// returned by the given source. +func GetObservedNetworkConfig(source NetworkConfigSource) ([]params.NetworkConfig, error) { logger.Tracef("discovering observed machine network config...") - interfaces, err := netInterfaces() + interfaces, err := source.Interfaces() if err != nil { return nil, errors.Annotate(err, "cannot get network interfaces") } - var observedConfig []params.NetworkConfig + var namesOrder []string + nameToConfigs := make(map[string][]params.NetworkConfig) + sysClassNetPath := source.SysClassNetPath() for _, nic := range interfaces { - isUp := nic.Flags&net.FlagUp > 0 + nicType := network.ParseInterfaceType(sysClassNetPath, nic.Name) + nicConfig := interfaceToNetworkConfig(nic, nicType) + + if nicType == network.BridgeInterface { + updateParentForBridgePorts(nic.Name, sysClassNetPath, nameToConfigs) + } + + seenSoFar := false + if existing, ok := nameToConfigs[nic.Name]; ok { + nicConfig.ParentInterfaceName = existing[0].ParentInterfaceName + // If only ParentInterfaceName was set in a previous iteration (e.g. + // if the bridge appeared before the port), treat the interface as + // not yet seen. + seenSoFar = existing[0].InterfaceName != "" + } - derivedType := network.EthernetInterface - derivedConfigType := "" - if nic.Flags&net.FlagLoopback > 0 { - derivedType = network.LoopbackInterface - derivedConfigType = string(network.ConfigLoopback) - } else if vlanInterfaceNameRegex.MatchString(nic.Name) { - derivedType = network.VLAN_8021QInterface - } - - nicConfig := params.NetworkConfig{ - DeviceIndex: nic.Index, - MACAddress: nic.HardwareAddr.String(), - ConfigType: derivedConfigType, - MTU: nic.MTU, - InterfaceName: nic.Name, - InterfaceType: string(derivedType), - NoAutoStart: !isUp, - Disabled: !isUp, + if !seenSoFar { + nameToConfigs[nic.Name] = []params.NetworkConfig(nil) + namesOrder = append(namesOrder, nic.Name) } - addrs, err := interfaceAddrs(&nic) + addrs, err := source.InterfaceAddresses(nic.Name) if err != nil { return nil, errors.Annotatef(err, "cannot get interface %q addresses", nic.Name) } if len(addrs) == 0 { - observedConfig = append(observedConfig, nicConfig) logger.Infof("no addresses observed on interface %q", nic.Name) + nameToConfigs[nic.Name] = append(nameToConfigs[nic.Name], nicConfig) continue } for _, addr := range addrs { - cidrAddress := addr.String() - if cidrAddress == "" { - continue - } - ip, ipNet, err := net.ParseCIDR(cidrAddress) + addressConfig, err := interfaceAddressToNetworkConfig(nic.Name, nicConfig.ConfigType, addr) if err != nil { - logger.Warningf("cannot parse interface %q address %q as CIDR: %v", nic.Name, cidrAddress, err) - if ip := net.ParseIP(cidrAddress); ip == nil { - return nil, errors.Errorf("cannot parse interface %q IP address %q", nic.Name, cidrAddress) - } else { - ipNet = &net.IPNet{} - } - ipNet.IP = ip - ipNet.Mask = net.IPv4Mask(255, 255, 255, 0) - logger.Infof("assuming interface %q has observed address %q", nic.Name, ipNet.String()) - } - if ip.To4() == nil { - logger.Debugf("skipping observed IPv6 address %q on %q: not fully supported yet", ip, nic.Name) - continue + return nil, errors.Trace(err) } + // Need to copy nicConfig so only the fields relevant for the + // current address are updated. nicConfigCopy := nicConfig - nicConfigCopy.CIDR = ipNet.String() - nicConfigCopy.Address = ip.String() + nicConfigCopy.Address = addressConfig.Address + nicConfigCopy.CIDR = addressConfig.CIDR + nicConfigCopy.ConfigType = addressConfig.ConfigType + nameToConfigs[nic.Name] = append(nameToConfigs[nic.Name], nicConfigCopy) + } + } + + // Return all interfaces configs in input order. + var observedConfig []params.NetworkConfig + for _, name := range namesOrder { + observedConfig = append(observedConfig, nameToConfigs[name]...) + } + logger.Tracef("observed network config: %+v", observedConfig) + return observedConfig, nil +} - // TODO(dimitern): Add DNS servers, search domains, and gateway - // later. +func interfaceToNetworkConfig(nic net.Interface, nicType network.InterfaceType) params.NetworkConfig { + configType := network.ConfigManual // assume manual initially, until we parse the address. + isUp := nic.Flags&net.FlagUp > 0 + isLoopback := nic.Flags&net.FlagLoopback > 0 + isUnknown := nicType == network.UnknownInterface - observedConfig = append(observedConfig, nicConfigCopy) - } + switch { + case isUnknown && isLoopback: + nicType = network.LoopbackInterface + configType = network.ConfigLoopback + case isUnknown: + nicType = network.EthernetInterface + } + + return params.NetworkConfig{ + DeviceIndex: nic.Index, + MACAddress: nic.HardwareAddr.String(), + ConfigType: string(configType), + MTU: nic.MTU, + InterfaceName: nic.Name, + InterfaceType: string(nicType), + NoAutoStart: !isUp, + Disabled: !isUp, } - sortedConfig := SortNetworkConfigsByParents(observedConfig) +} - logger.Tracef("about to update network config with observed: %+v", sortedConfig) - return sortedConfig, nil +func updateParentForBridgePorts(bridgeName, sysClassNetPath string, nameToConfigs map[string][]params.NetworkConfig) { + ports := network.GetBridgePorts(sysClassNetPath, bridgeName) + for _, portName := range ports { + portConfigs, ok := nameToConfigs[portName] + if ok { + portConfigs[0].ParentInterfaceName = bridgeName + } else { + portConfigs = []params.NetworkConfig{{ParentInterfaceName: bridgeName}} + } + nameToConfigs[portName] = portConfigs + } } -// MergeProviderAndObservedNetworkConfigs returns the effective, sorted, network -// configs after merging providerConfig with observedConfig. -func MergeProviderAndObservedNetworkConfigs(providerConfigs, observedConfigs []params.NetworkConfig) ([]params.NetworkConfig, error) { - providerConfigsByName := make(map[string][]params.NetworkConfig) - sortedProviderConfigs := SortNetworkConfigsByParents(providerConfigs) - for _, config := range sortedProviderConfigs { - name := config.InterfaceName - providerConfigsByName[name] = append(providerConfigsByName[name], config) +func interfaceAddressToNetworkConfig(interfaceName, configType string, address net.Addr) (params.NetworkConfig, error) { + config := params.NetworkConfig{ + ConfigType: configType, } - jsonProviderConfig, err := NetworkConfigsToIndentedJSON(sortedProviderConfigs) - if err != nil { - return nil, errors.Annotatef(err, "cannot serialize provider config %#v as JSON", sortedProviderConfigs) + cidrAddress := address.String() + if cidrAddress == "" { + return config, nil } - logger.Debugf("provider network config of machine:\n%s", jsonProviderConfig) - sortedObservedConfigs := SortNetworkConfigsByParents(observedConfigs) - jsonObservedConfig, err := NetworkConfigsToIndentedJSON(sortedObservedConfigs) + ip, ipNet, err := net.ParseCIDR(cidrAddress) if err != nil { - return nil, errors.Annotatef(err, "cannot serialize observed config %#v as JSON", sortedObservedConfigs) + logger.Infof("cannot parse %q on interface %q as CIDR, trying as IP address: %v", cidrAddress, interfaceName, err) + if ip = net.ParseIP(cidrAddress); ip == nil { + return config, errors.Errorf("cannot parse IP address %q on interface %q", cidrAddress, interfaceName) + } else { + ipNet = &net.IPNet{IP: ip} + } + } + if ip.To4() == nil { + logger.Debugf("skipping observed IPv6 address %q on %q: not fully supported yet", ip, interfaceName) + // TODO(dimitern): Treat IPv6 addresses as empty until we can handle + // them reliably. + return config, nil } - logger.Debugf("observed network config of machine:\n%s", jsonObservedConfig) - var mergedConfigs []params.NetworkConfig - for _, config := range sortedObservedConfigs { - name := config.InterfaceName - logger.Tracef("merging observed config for device %q: %+v", name, config) - if strings.HasPrefix(name, instancecfg.DefaultBridgePrefix) { - logger.Tracef("found potential juju bridge %q in observed config", name) - unprefixedName := strings.TrimPrefix(name, instancecfg.DefaultBridgePrefix) - underlyingConfigs, underlyingKnownByProvider := providerConfigsByName[unprefixedName] - logger.Tracef("device %q underlying %q has provider config: %+v", name, unprefixedName, underlyingConfigs) - if underlyingKnownByProvider { - // This config is for a bridge created by Juju and not known by - // the provider. The bridge is configured to adopt the address - // allocated to the underlying interface, which is known by the - // provider. However, since the same underlying interface can - // have multiple addresses, we need to match the adopted - // bridgeConfig to the correct address. - - var underlyingConfig params.NetworkConfig - for i, underlying := range underlyingConfigs { - if underlying.Address == config.Address { - logger.Tracef("replacing undelying config %+v", underlying) - // Remove what we found before changing it below. - underlyingConfig = underlying - underlyingConfigs = append(underlyingConfigs[:i], underlyingConfigs[i+1:]...) - break - } - } - logger.Tracef("underlying provider config after update: %+v", underlyingConfigs) - - bridgeConfig := config - bridgeConfig.InterfaceType = string(network.BridgeInterface) - bridgeConfig.ConfigType = underlyingConfig.ConfigType - bridgeConfig.VLANTag = underlyingConfig.VLANTag - bridgeConfig.ProviderId = "" // Juju-created bridges never have a ProviderID - bridgeConfig.ProviderSpaceId = underlyingConfig.ProviderSpaceId - bridgeConfig.ProviderVLANId = underlyingConfig.ProviderVLANId - bridgeConfig.ProviderSubnetId = underlyingConfig.ProviderSubnetId - bridgeConfig.ProviderAddressId = underlyingConfig.ProviderAddressId - if underlyingParent := underlyingConfig.ParentInterfaceName; underlyingParent != "" { - bridgeConfig.ParentInterfaceName = instancecfg.DefaultBridgePrefix + underlyingParent - } - - underlyingConfig.ConfigType = string(network.ConfigManual) - underlyingConfig.ParentInterfaceName = name - underlyingConfig.ProviderAddressId = "" - underlyingConfig.CIDR = "" - underlyingConfig.Address = "" - - underlyingConfigs = append(underlyingConfigs, underlyingConfig) - providerConfigsByName[unprefixedName] = underlyingConfigs - logger.Tracef("updated provider network config by name: %+v", providerConfigsByName) + if ipNet.Mask != nil { + config.CIDR = ipNet.String() + } + config.Address = ip.String() + if configType != string(network.ConfigLoopback) { + config.ConfigType = string(network.ConfigStatic) + } - mergedConfigs = append(mergedConfigs, bridgeConfig) - continue - } + // TODO(dimitern): Add DNS servers, search domains, and gateway + // later. + + return config, nil +} + +// MergeProviderAndObservedNetworkConfigs returns the effective network configs, +// using observedConfigs as a base and selectively updating it using the +// matching providerConfigs for each interface. +func MergeProviderAndObservedNetworkConfigs(providerConfigs, observedConfigs []params.NetworkConfig) []params.NetworkConfig { + + providerConfigByName := networkConfigsByName(providerConfigs) + logger.Tracef("known provider config by name: %+v", providerConfigByName) + + providerConfigByAddress := networkConfigsByAddress(providerConfigs) + logger.Tracef("known provider config by address: %+v", providerConfigByAddress) + + var results []params.NetworkConfig + for _, observed := range observedConfigs { + + name, ipAddress := observed.InterfaceName, observed.Address + finalConfig := observed + + providerConfig, known := providerConfigByName[name] + if known { + finalConfig = mergeObservedAndProviderInterfaceConfig(finalConfig, providerConfig) + logger.Debugf("updated observed interface config for %q with: %+v", name, providerConfig) } - knownProviderConfigs, knownByProvider := providerConfigsByName[name] - if !knownByProvider { - // Not known by the provider and not a Juju-created bridge, so just - // use the observed config for it. - logger.Tracef("device %q not known to provider - adding only observed config: %+v", name, config) - mergedConfigs = append(mergedConfigs, config) - continue + providerConfig, known = providerConfigByAddress[ipAddress] + if known { + finalConfig = mergeObservedAndProviderAddressConfig(finalConfig, providerConfig) + logger.Debugf("updated observed address config for %q with: %+v", name, providerConfig) } - logger.Tracef("device %q has known provider network config: %+v", name, knownProviderConfigs) - for _, providerConfig := range knownProviderConfigs { - if providerConfig.Address == config.Address { - logger.Tracef( - "device %q has observed address %q, index %d, and MTU %q; overriding index %d and MTU %d from provider config", - name, config.Address, config.DeviceIndex, config.MTU, providerConfig.DeviceIndex, providerConfig.MTU, - ) - // Prefer observed device indices and MTU values as more up-to-date. - providerConfig.DeviceIndex = config.DeviceIndex - providerConfig.MTU = config.MTU + results = append(results, finalConfig) + logger.Debugf("merged config for %q: %+v", name, finalConfig) + } - mergedConfigs = append(mergedConfigs, providerConfig) - break - } - } + return results +} + +func networkConfigsByName(input []params.NetworkConfig) map[string]params.NetworkConfig { + configsByName := make(map[string]params.NetworkConfig, len(input)) + for _, config := range input { + configsByName[config.InterfaceName] = config } + return configsByName +} - sortedMergedConfigs := SortNetworkConfigsByParents(mergedConfigs) +func networkConfigsByAddress(input []params.NetworkConfig) map[string]params.NetworkConfig { + configsByAddress := make(map[string]params.NetworkConfig, len(input)) + for _, config := range input { + configsByAddress[config.Address] = config + } + return configsByAddress +} - jsonMergedConfig, err := NetworkConfigsToIndentedJSON(sortedMergedConfigs) - if err != nil { - errors.Annotatef(err, "cannot serialize merged config %#v as JSON", sortedMergedConfigs) +func mergeObservedAndProviderInterfaceConfig(observedConfig, providerConfig params.NetworkConfig) params.NetworkConfig { + finalConfig := observedConfig + + // The following fields cannot be observed and are only known by the + // provider. + finalConfig.ProviderId = providerConfig.ProviderId + finalConfig.ProviderVLANId = providerConfig.ProviderVLANId + finalConfig.ProviderSubnetId = providerConfig.ProviderSubnetId + + // The following few fields are only updated if their observed values are + // empty. + + if observedConfig.InterfaceType == "" { + finalConfig.InterfaceType = providerConfig.InterfaceType + } + + if observedConfig.VLANTag == 0 { + finalConfig.VLANTag = providerConfig.VLANTag + } + + if observedConfig.ParentInterfaceName == "" { + finalConfig.ParentInterfaceName = providerConfig.ParentInterfaceName + } + + return finalConfig +} + +func mergeObservedAndProviderAddressConfig(observedConfig, providerConfig params.NetworkConfig) params.NetworkConfig { + finalConfig := observedConfig + + // The following fields cannot be observed and are only known by the + // provider. + finalConfig.ProviderAddressId = providerConfig.ProviderAddressId + finalConfig.ProviderSubnetId = providerConfig.ProviderSubnetId + finalConfig.ProviderSpaceId = providerConfig.ProviderSpaceId + + // The following few fields are only updated if their observed values are + // empty. + + if observedConfig.ProviderVLANId == "" { + finalConfig.ProviderVLANId = providerConfig.ProviderVLANId + } + + if observedConfig.VLANTag == 0 { + finalConfig.VLANTag = providerConfig.VLANTag + } + + if observedConfig.ConfigType == "" { + finalConfig.ConfigType = providerConfig.ConfigType + } + + if observedConfig.CIDR == "" { + finalConfig.CIDR = providerConfig.CIDR + } + + if observedConfig.GatewayAddress == "" { + finalConfig.GatewayAddress = providerConfig.GatewayAddress + } + + if len(observedConfig.DNSServers) == 0 { + finalConfig.DNSServers = providerConfig.DNSServers + } + + if len(observedConfig.DNSSearchDomains) == 0 { + finalConfig.DNSSearchDomains = providerConfig.DNSSearchDomains } - logger.Debugf("combined machine network config:\n%s", jsonMergedConfig) - return mergedConfigs, nil + return finalConfig } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/networkingcommon/types_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/networkingcommon/types_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/networkingcommon/types_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/networkingcommon/types_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,10 +4,14 @@ package networkingcommon_test import ( + "errors" "fmt" - "math/rand" + "io/ioutil" "net" + "os" + "path/filepath" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -20,23 +24,20 @@ type TypesSuite struct { coretesting.BaseSuite + + stubConfigSource *stubNetworkConfigSource } var _ = gc.Suite(&TypesSuite{}) -func (s *TypesSuite) TestCopyNetworkConfig(c *gc.C) { - inputAndExpectedOutput := []params.NetworkConfig{{ - InterfaceName: "foo", - DNSServers: []string{"bar", "baz"}, - Address: "0.1.2.3", - }, { - DeviceIndex: 124, - ParentInterfaceName: "parent", - ProviderId: "nic-id", - }} - - output := networkingcommon.CopyNetworkConfigs(inputAndExpectedOutput) - c.Assert(output, jc.DeepEquals, inputAndExpectedOutput) +func (s *TypesSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.stubConfigSource = &stubNetworkConfigSource{ + Stub: &testing.Stub{}, + fakeSysClassNetPath: c.MkDir(), + interfaces: exampleObservedInterfaces, + interfaceAddrs: exampleObservedInterfaceAddrs, + } } func mustParseMAC(value string) net.HardwareAddr { @@ -59,12 +60,6 @@ HardwareAddr: mustParseMAC("aa:bb:cc:dd:ee:f0"), Flags: net.FlagUp | net.FlagBroadcast | net.FlagMulticast, }, { - Index: 3, - MTU: 1500, - Name: "eth1", - HardwareAddr: mustParseMAC("aa:bb:cc:dd:ee:f1"), - Flags: net.FlagUp | net.FlagBroadcast | net.FlagMulticast, -}, { Index: 10, MTU: 1500, Name: "br-eth0", @@ -77,6 +72,12 @@ HardwareAddr: mustParseMAC("aa:bb:cc:dd:ee:f1"), Flags: net.FlagUp | net.FlagBroadcast | net.FlagMulticast, }, { + Index: 3, + MTU: 1500, + Name: "eth1", + HardwareAddr: mustParseMAC("aa:bb:cc:dd:ee:f1"), + Flags: net.FlagUp | net.FlagBroadcast | net.FlagMulticast, +}, { Index: 12, MTU: 1500, Name: "br-eth0.100", @@ -158,14 +159,14 @@ var _ net.Addr = (*fakeAddr)(nil) var exampleObservedInterfaceAddrs = map[string][]net.Addr{ - "eth0": nil, - "eth1": nil, - "eth0.50": nil, - "eth0.100": nil, - "eth0.25": nil, - "eth1.11": nil, - "eth1.12": nil, - "eth1.13": nil, + "eth0": {fakeAddr("fe80::5054:ff:fedd:eef0/64")}, + "eth1": {fakeAddr("fe80::5054:ff:fedd:eef1/64")}, + "eth0.50": {fakeAddr("fe80::5054:ff:fedd:eef0:50/64")}, + "eth0.100": {fakeAddr("fe80::5054:ff:fedd:eef0:100/64")}, + "eth0.25": {fakeAddr("fe80::5054:ff:fedd:eef0:25/64")}, + "eth1.11": {fakeAddr("fe80::5054:ff:fedd:eef1:11/64")}, + "eth1.12": {fakeAddr("fe80::5054:ff:fedd:eef1:12/64")}, + "eth1.13": {fakeAddr("fe80::5054:ff:fedd:eef1:13/64")}, "lo": {fakeAddr("127.0.0.1/8"), fakeAddr("::1/128")}, "br-eth0": {fakeAddr("10.20.19.100/24"), fakeAddr("10.20.19.123/24"), fakeAddr("fe80::5054:ff:fedd:eef0/64")}, "br-eth1": {fakeAddr("10.20.19.105/24"), fakeAddr("fe80::5054:ff:fedd:eef1/64")}, @@ -177,7 +178,7 @@ "br-eth1.13": {fakeAddr("10.13.19.101/24"), fakeAddr("fe80::5054:ff:fedd:eef1/64")}, } -var expectedSortedObservedNetworkConfigs = []params.NetworkConfig{{ +var expectedObservedNetworkConfigs = []params.NetworkConfig{{ DeviceIndex: 1, InterfaceName: "lo", InterfaceType: string(network.LoopbackInterface), @@ -189,23 +190,25 @@ }, { DeviceIndex: 10, InterfaceName: "br-eth0", - InterfaceType: string(network.EthernetInterface), + InterfaceType: string(network.BridgeInterface), MACAddress: "aa:bb:cc:dd:ee:f0", CIDR: "10.20.19.0/24", Address: "10.20.19.100", MTU: 1500, + ConfigType: string(network.ConfigStatic), }, { DeviceIndex: 10, InterfaceName: "br-eth0", - InterfaceType: string(network.EthernetInterface), + InterfaceType: string(network.BridgeInterface), MACAddress: "aa:bb:cc:dd:ee:f0", CIDR: "10.20.19.0/24", Address: "10.20.19.123", MTU: 1500, + ConfigType: string(network.ConfigStatic), }, { DeviceIndex: 12, InterfaceName: "br-eth0.100", - InterfaceType: string(network.VLAN_8021QInterface), + InterfaceType: string(network.BridgeInterface), MACAddress: "aa:bb:cc:dd:ee:f0", CIDR: "10.100.19.0/24", Address: "10.100.19.100", @@ -213,102 +216,124 @@ }, { DeviceIndex: 14, InterfaceName: "br-eth0.250", - InterfaceType: string(network.VLAN_8021QInterface), + InterfaceType: string(network.BridgeInterface), MACAddress: "aa:bb:cc:dd:ee:f0", CIDR: "10.250.19.0/24", Address: "10.250.19.100", MTU: 1500, + ConfigType: string(network.ConfigStatic), }, { DeviceIndex: 16, InterfaceName: "br-eth0.50", - InterfaceType: string(network.VLAN_8021QInterface), + InterfaceType: string(network.BridgeInterface), MACAddress: "aa:bb:cc:dd:ee:f0", CIDR: "10.50.19.0/24", Address: "10.50.19.100", MTU: 1500, + ConfigType: string(network.ConfigStatic), }, { - DeviceIndex: 2, - InterfaceName: "eth0", - InterfaceType: string(network.EthernetInterface), - MACAddress: "aa:bb:cc:dd:ee:f0", - MTU: 1500, + DeviceIndex: 2, + InterfaceName: "eth0", + ParentInterfaceName: "br-eth0", + InterfaceType: string(network.EthernetInterface), + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + ConfigType: string(network.ConfigManual), }, { - DeviceIndex: 13, - InterfaceName: "eth0.100", - InterfaceType: string(network.VLAN_8021QInterface), - MACAddress: "aa:bb:cc:dd:ee:f0", - MTU: 1500, + DeviceIndex: 13, + InterfaceName: "eth0.100", + ParentInterfaceName: "br-eth0.100", + InterfaceType: string(network.VLAN_8021QInterface), + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + ConfigType: string(network.ConfigManual), }, { - DeviceIndex: 15, - InterfaceName: "eth0.250", - InterfaceType: string(network.VLAN_8021QInterface), - MACAddress: "aa:bb:cc:dd:ee:f0", - MTU: 1500, + DeviceIndex: 15, + InterfaceName: "eth0.250", + ParentInterfaceName: "br-eth0.250", + InterfaceType: string(network.VLAN_8021QInterface), + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + ConfigType: string(network.ConfigManual), }, { - DeviceIndex: 17, - InterfaceName: "eth0.50", - InterfaceType: string(network.VLAN_8021QInterface), - MACAddress: "aa:bb:cc:dd:ee:f0", - MTU: 1500, + DeviceIndex: 17, + InterfaceName: "eth0.50", + ParentInterfaceName: "br-eth0.50", + InterfaceType: string(network.VLAN_8021QInterface), + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + ConfigType: string(network.ConfigManual), }, { DeviceIndex: 11, InterfaceName: "br-eth1", - InterfaceType: string(network.EthernetInterface), + InterfaceType: string(network.BridgeInterface), MACAddress: "aa:bb:cc:dd:ee:f1", CIDR: "10.20.19.0/24", Address: "10.20.19.105", MTU: 1500, + ConfigType: string(network.ConfigStatic), }, { DeviceIndex: 18, InterfaceName: "br-eth1.11", - InterfaceType: string(network.VLAN_8021QInterface), + InterfaceType: string(network.BridgeInterface), MACAddress: "aa:bb:cc:dd:ee:f1", CIDR: "10.11.19.0/24", Address: "10.11.19.101", MTU: 1500, + ConfigType: string(network.ConfigStatic), }, { DeviceIndex: 20, InterfaceName: "br-eth1.12", - InterfaceType: string(network.VLAN_8021QInterface), + InterfaceType: string(network.BridgeInterface), MACAddress: "aa:bb:cc:dd:ee:f1", CIDR: "10.12.19.0/24", Address: "10.12.19.101", MTU: 1500, + ConfigType: string(network.ConfigStatic), }, { DeviceIndex: 22, InterfaceName: "br-eth1.13", - InterfaceType: string(network.VLAN_8021QInterface), + InterfaceType: string(network.BridgeInterface), MACAddress: "aa:bb:cc:dd:ee:f1", CIDR: "10.13.19.0/24", Address: "10.13.19.101", MTU: 1500, + ConfigType: string(network.ConfigStatic), }, { - DeviceIndex: 3, - InterfaceName: "eth1", - InterfaceType: string(network.EthernetInterface), - MACAddress: "aa:bb:cc:dd:ee:f1", - MTU: 1500, + DeviceIndex: 3, + InterfaceName: "eth1", + ParentInterfaceName: "br-eth1", + InterfaceType: string(network.EthernetInterface), + MACAddress: "aa:bb:cc:dd:ee:f1", + MTU: 1500, + ConfigType: string(network.ConfigManual), }, { - DeviceIndex: 19, - InterfaceName: "eth1.11", - InterfaceType: string(network.VLAN_8021QInterface), - MACAddress: "aa:bb:cc:dd:ee:f1", - MTU: 1500, + DeviceIndex: 19, + InterfaceName: "eth1.11", + ParentInterfaceName: "br-eth1.11", + InterfaceType: string(network.VLAN_8021QInterface), + MACAddress: "aa:bb:cc:dd:ee:f1", + MTU: 1500, + ConfigType: string(network.ConfigManual), }, { - DeviceIndex: 21, - InterfaceName: "eth1.12", - InterfaceType: string(network.VLAN_8021QInterface), - MACAddress: "aa:bb:cc:dd:ee:f1", - MTU: 1500, + DeviceIndex: 21, + InterfaceName: "eth1.12", + ParentInterfaceName: "br-eth1.12", + InterfaceType: string(network.VLAN_8021QInterface), + MACAddress: "aa:bb:cc:dd:ee:f1", + MTU: 1500, + ConfigType: string(network.ConfigManual), }, { - DeviceIndex: 23, - InterfaceName: "eth1.13", - InterfaceType: string(network.VLAN_8021QInterface), - MACAddress: "aa:bb:cc:dd:ee:f1", - MTU: 1500, + DeviceIndex: 23, + InterfaceName: "eth1.13", + ParentInterfaceName: "br-eth1.13", + InterfaceType: string(network.VLAN_8021QInterface), + MACAddress: "aa:bb:cc:dd:ee:f1", + MTU: 1500, + ConfigType: string(network.ConfigManual), }} -var expectedSortedProviderNetworkConfigs = []params.NetworkConfig{{ +var expectedProviderNetworkConfigs = []params.NetworkConfig{{ InterfaceName: "eth0", InterfaceType: string(network.EthernetInterface), MACAddress: "aa:bb:cc:dd:ee:f0", @@ -436,7 +461,7 @@ ProviderAddressId: "1302", }} -var expectedSortedMergedNetworkConfigs = []params.NetworkConfig{{ +var expectedFinalNetworkConfigs = []params.NetworkConfig{{ DeviceIndex: 1, InterfaceName: "lo", InterfaceType: string(network.LoopbackInterface), @@ -482,7 +507,7 @@ Address: "10.100.19.100", MTU: 1500, ConfigType: string(network.ConfigStatic), - ParentInterfaceName: "br-eth0", + ParentInterfaceName: "", ProviderSubnetId: "6", ProviderVLANId: "5005", VLANTag: 100, @@ -496,7 +521,7 @@ Address: "10.250.19.100", MTU: 1500, ConfigType: string(network.ConfigStatic), - ParentInterfaceName: "br-eth0", + ParentInterfaceName: "", ProviderSubnetId: "8", ProviderVLANId: "5008", VLANTag: 250, @@ -510,7 +535,7 @@ Address: "10.50.19.100", MTU: 1500, ConfigType: string(network.ConfigStatic), - ParentInterfaceName: "br-eth0", + ParentInterfaceName: "", ProviderSubnetId: "5", ProviderVLANId: "5004", VLANTag: 50, @@ -586,7 +611,7 @@ Address: "10.11.19.101", MTU: 1500, ConfigType: string(network.ConfigStatic), - ParentInterfaceName: "br-eth1", + ParentInterfaceName: "", ProviderSubnetId: "9", ProviderVLANId: "5013", VLANTag: 11, @@ -600,7 +625,7 @@ Address: "10.12.19.101", MTU: 1500, ConfigType: string(network.ConfigStatic), - ParentInterfaceName: "br-eth1", + ParentInterfaceName: "", ProviderSubnetId: "10", ProviderVLANId: "5014", VLANTag: 12, @@ -613,7 +638,7 @@ CIDR: "10.13.19.0/24", Address: "10.13.19.101", MTU: 1500, - ParentInterfaceName: "br-eth1", + ParentInterfaceName: "", ConfigType: string(network.ConfigStatic), ProviderSubnetId: "11", ProviderVLANId: "5015", @@ -669,24 +694,7 @@ VLANTag: 13, }} -var expectedSortedNetworkConfigsByInterfaceName = []params.NetworkConfig{ - {InterfaceName: "br-eth0"}, - {InterfaceName: "br-eth0.12"}, - {InterfaceName: "br-eth0.34"}, - {InterfaceName: "br-eth1"}, - {InterfaceName: "br-eth1.100"}, - {InterfaceName: "br-eth1.250"}, - {InterfaceName: "br-eth1.50"}, - {InterfaceName: "eth0"}, - {InterfaceName: "eth0.12"}, - {InterfaceName: "eth0.34"}, - {InterfaceName: "eth1"}, - {InterfaceName: "eth1.100"}, - {InterfaceName: "eth1.250"}, - {InterfaceName: "eth1.50"}, -} - -var expectedLinkLayerDeviceArgsWithMergedNetworkConfig = []state.LinkLayerDeviceArgs{{ +var expectedLinkLayerDeviceArgsWithFinalNetworkConfig = []state.LinkLayerDeviceArgs{{ Name: "lo", MTU: 65536, Type: state.LoopbackDevice, @@ -706,7 +714,7 @@ MACAddress: "aa:bb:cc:dd:ee:f0", IsAutoStart: true, IsUp: true, - ParentName: "br-eth0", + ParentName: "", }, { Name: "br-eth0.250", MTU: 1500, @@ -714,7 +722,7 @@ MACAddress: "aa:bb:cc:dd:ee:f0", IsAutoStart: true, IsUp: true, - ParentName: "br-eth0", + ParentName: "", }, { Name: "br-eth0.50", MTU: 1500, @@ -722,7 +730,7 @@ MACAddress: "aa:bb:cc:dd:ee:f0", IsAutoStart: true, IsUp: true, - ParentName: "br-eth0", + ParentName: "", }, { Name: "eth0", MTU: 1500, @@ -773,7 +781,7 @@ MACAddress: "aa:bb:cc:dd:ee:f1", IsAutoStart: true, IsUp: true, - ParentName: "br-eth1", + ParentName: "", }, { Name: "br-eth1.12", MTU: 1500, @@ -781,7 +789,7 @@ MACAddress: "aa:bb:cc:dd:ee:f1", IsAutoStart: true, IsUp: true, - ParentName: "br-eth1", + ParentName: "", }, { Name: "br-eth1.13", MTU: 1500, @@ -789,7 +797,7 @@ MACAddress: "aa:bb:cc:dd:ee:f1", IsAutoStart: true, IsUp: true, - ParentName: "br-eth1", + ParentName: "", }, { Name: "eth1", MTU: 1500, @@ -828,7 +836,7 @@ ParentName: "br-eth1.13", }} -var expectedLinkLayerDeviceAdressesWithMergedNetworkConfig = []state.LinkLayerDeviceAddress{{ +var expectedLinkLayerDeviceAdressesWithFinalNetworkConfig = []state.LinkLayerDeviceAddress{{ DeviceName: "lo", ConfigMethod: state.LoopbackAddress, CIDRAddress: "127.0.0.1/8", @@ -879,97 +887,376 @@ ProviderID: "1302", }} -func (s *TypesSuite) TestSortNetworkConfigsByParentsWithObservedConfigs(c *gc.C) { - s.checkSortNetworkConfigsByParentsWithAllInputPremutationsMatches(c, expectedSortedObservedNetworkConfigs) +func (s *TypesSuite) TestNetworkConfigsToStateArgs(c *gc.C) { + devicesArgs, devicesAddrs := networkingcommon.NetworkConfigsToStateArgs(expectedFinalNetworkConfigs) + + c.Check(devicesArgs, jc.DeepEquals, expectedLinkLayerDeviceArgsWithFinalNetworkConfig) + c.Check(devicesAddrs, jc.DeepEquals, expectedLinkLayerDeviceAdressesWithFinalNetworkConfig) } -func (s *TypesSuite) checkSortNetworkConfigsByParentsWithAllInputPremutationsMatches(c *gc.C, expectedOutput []params.NetworkConfig) { - expectedLength := len(expectedOutput) - jsonExpected := s.networkConfigsAsJSON(c, expectedOutput) - for i := 0; i < expectedLength; i++ { - shuffledInput := shuffleNetworkConfigs(expectedOutput) - result := networkingcommon.SortNetworkConfigsByParents(shuffledInput) - c.Assert(result, gc.HasLen, expectedLength) - jsonResult := s.networkConfigsAsJSON(c, result) - c.Check(jsonResult, gc.Equals, jsonExpected) - } +func (s *TypesSuite) TestMergeProviderAndObservedNetworkConfigsBothNil(c *gc.C) { + result := networkingcommon.MergeProviderAndObservedNetworkConfigs(nil, nil) + c.Check(result, gc.IsNil) } -func (s *TypesSuite) networkConfigsAsJSON(c *gc.C, input []params.NetworkConfig) string { - asJSON, err := networkingcommon.NetworkConfigsToIndentedJSON(input) - c.Assert(err, jc.ErrorIsNil) - return asJSON +func (s *TypesSuite) TestMergeProviderAndObservedNetworkConfigsNilObservedConfigs(c *gc.C) { + input := expectedProviderNetworkConfigs + result := networkingcommon.MergeProviderAndObservedNetworkConfigs(input, nil) + c.Check(result, gc.IsNil) +} + +func (s *TypesSuite) TestMergeProviderAndObservedNetworkConfigsNilProviderConfigs(c *gc.C) { + input := expectedObservedNetworkConfigs + result := networkingcommon.MergeProviderAndObservedNetworkConfigs(nil, input) + c.Check(result, jc.DeepEquals, input) +} + +func (s *TypesSuite) TestMergeProviderAndObservedNetworkConfigs(c *gc.C) { + observedConfig := expectedObservedNetworkConfigs + providerConfig := expectedProviderNetworkConfigs + result := networkingcommon.MergeProviderAndObservedNetworkConfigs(providerConfig, observedConfig) + c.Check(result, jc.DeepEquals, expectedFinalNetworkConfigs) +} + +func (s *TypesSuite) TestGetObservedNetworkConfigInterfacesError(c *gc.C) { + s.stubConfigSource.SetErrors(errors.New("no interfaces")) + + observedConfig, err := networkingcommon.GetObservedNetworkConfig(s.stubConfigSource) + c.Check(err, gc.ErrorMatches, "cannot get network interfaces: no interfaces") + c.Check(observedConfig, gc.IsNil) + + s.stubConfigSource.CheckCallNames(c, "Interfaces") +} + +func (s *TypesSuite) TestGetObservedNetworkConfigInterfaceAddressesError(c *gc.C) { + s.stubConfigSource.SetErrors( + nil, // Interfaces() succeeds. + errors.New("no addresses"), // InterfaceAddressses fails. + ) + + observedConfig, err := networkingcommon.GetObservedNetworkConfig(s.stubConfigSource) + c.Check(err, gc.ErrorMatches, `cannot get interface "lo" addresses: no addresses`) + c.Check(observedConfig, gc.IsNil) + + s.stubConfigSource.CheckCallNames(c, "Interfaces", "SysClassNetPath", "InterfaceAddresses") + s.stubConfigSource.CheckCall(c, 2, "InterfaceAddresses", "lo") +} + +func (s *TypesSuite) TestGetObservedNetworkConfigNoInterfaceAddresses(c *gc.C) { + s.stubConfigSource.interfaces = exampleObservedInterfaces[3:4] // only br-eth1 + s.stubConfigSource.interfaceAddrs = make(map[string][]net.Addr) + s.stubConfigSource.makeSysClassNetInterfacePath(c, "br-eth1", "bridge") + + observedConfig, err := networkingcommon.GetObservedNetworkConfig(s.stubConfigSource) + c.Check(err, jc.ErrorIsNil) + c.Check(observedConfig, jc.DeepEquals, []params.NetworkConfig{{ + DeviceIndex: 11, + MACAddress: "aa:bb:cc:dd:ee:f1", + MTU: 1500, + InterfaceName: "br-eth1", + InterfaceType: "bridge", + ConfigType: "manual", + }}) + + s.stubConfigSource.CheckCallNames(c, "Interfaces", "SysClassNetPath", "InterfaceAddresses") + s.stubConfigSource.CheckCall(c, 2, "InterfaceAddresses", "br-eth1") +} + +func (s *TypesSuite) TestGetObservedNetworkConfigLoopbackInfrerred(c *gc.C) { + s.stubConfigSource.interfaces = exampleObservedInterfaces[0:1] // only lo + s.stubConfigSource.makeSysClassNetInterfacePath(c, "lo", "") + + observedConfig, err := networkingcommon.GetObservedNetworkConfig(s.stubConfigSource) + c.Check(err, jc.ErrorIsNil) + c.Check(observedConfig, jc.DeepEquals, []params.NetworkConfig{{ + DeviceIndex: 1, + CIDR: "127.0.0.0/8", + Address: "127.0.0.1", + MTU: 65536, + InterfaceName: "lo", + InterfaceType: "loopback", // inferred from the flags. + ConfigType: "loopback", // since it is a loopback + }, { + DeviceIndex: 1, + MTU: 65536, + InterfaceName: "lo", + InterfaceType: "loopback", + ConfigType: "loopback", + }}) + + s.stubConfigSource.CheckCallNames(c, "Interfaces", "SysClassNetPath", "InterfaceAddresses") + s.stubConfigSource.CheckCall(c, 2, "InterfaceAddresses", "lo") } -func shuffleNetworkConfigs(input []params.NetworkConfig) []params.NetworkConfig { - inputLength := len(input) - output := make([]params.NetworkConfig, inputLength) - shuffled := rand.Perm(inputLength) - for i, j := range shuffled { - output[i] = input[j] +func (s *TypesSuite) TestGetObservedNetworkConfigVLANInfrerred(c *gc.C) { + s.stubConfigSource.interfaces = exampleObservedInterfaces[6:7] // only eth0.100 + s.stubConfigSource.interfaceAddrs = map[string][]net.Addr{ + "eth0.100": []net.Addr{ + fakeAddr("fe80::5054:ff:fedd:eef0:100/64"), + fakeAddr("10.100.19.123/24"), + }, } - return output + s.stubConfigSource.makeSysClassNetInterfacePath(c, "eth0.100", "vlan") + + observedConfig, err := networkingcommon.GetObservedNetworkConfig(s.stubConfigSource) + c.Check(err, jc.ErrorIsNil) + c.Check(observedConfig, jc.DeepEquals, []params.NetworkConfig{{ + DeviceIndex: 13, + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + InterfaceName: "eth0.100", + InterfaceType: "802.1q", + ConfigType: "manual", // the IPv6 address treated as empty. + }, { + DeviceIndex: 13, + CIDR: "10.100.19.0/24", + Address: "10.100.19.123", + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + InterfaceName: "eth0.100", + InterfaceType: "802.1q", + ConfigType: "static", + }}) + + s.stubConfigSource.CheckCallNames(c, "Interfaces", "SysClassNetPath", "InterfaceAddresses") + s.stubConfigSource.CheckCall(c, 2, "InterfaceAddresses", "eth0.100") } -func (s *TypesSuite) TestSortNetworkConfigsByParentsWithProviderConfigs(c *gc.C) { - s.checkSortNetworkConfigsByParentsWithAllInputPremutationsMatches(c, expectedSortedProviderNetworkConfigs) +func (s *TypesSuite) TestGetObservedNetworkConfigEthernetInfrerred(c *gc.C) { + s.stubConfigSource.interfaces = exampleObservedInterfaces[1:2] // only eth0 + s.stubConfigSource.makeSysClassNetInterfacePath(c, "eth0", "") + + observedConfig, err := networkingcommon.GetObservedNetworkConfig(s.stubConfigSource) + c.Check(err, jc.ErrorIsNil) + c.Check(observedConfig, jc.DeepEquals, []params.NetworkConfig{{ + DeviceIndex: 2, + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + InterfaceName: "eth0", + InterfaceType: "ethernet", + ConfigType: "manual", // the IPv6 address treated as empty. + }}) + + s.stubConfigSource.CheckCallNames(c, "Interfaces", "SysClassNetPath", "InterfaceAddresses") + s.stubConfigSource.CheckCall(c, 2, "InterfaceAddresses", "eth0") } -func (s *TypesSuite) TestSortNetworkConfigsByParentsWithMergedConfigs(c *gc.C) { - s.checkSortNetworkConfigsByParentsWithAllInputPremutationsMatches(c, expectedSortedMergedNetworkConfigs) +func (s *TypesSuite) TestGetObservedNetworkConfigBridgePortsHaveParentSet(c *gc.C) { + s.stubConfigSource.interfaces = exampleObservedInterfaces[1:5] // eth0, br-eth0, br-eth1, eth1 + br0Path := s.stubConfigSource.makeSysClassNetInterfacePath(c, "br-eth0", "bridge") + // "extra" added below to verify bridge ports which are discovered, but not + // found as interfaces from the source will be ignored. + s.stubConfigSource.makeSysClassNetBridgePorts(c, br0Path, "eth0", "extra") + br1Path := s.stubConfigSource.makeSysClassNetInterfacePath(c, "br-eth1", "bridge") + s.stubConfigSource.makeSysClassNetBridgePorts(c, br1Path, "eth1") + + observedConfig, err := networkingcommon.GetObservedNetworkConfig(s.stubConfigSource) + c.Check(err, jc.ErrorIsNil) + c.Check(observedConfig, jc.DeepEquals, []params.NetworkConfig{{ + DeviceIndex: 2, + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + InterfaceName: "eth0", + InterfaceType: "ethernet", + ParentInterfaceName: "br-eth0", + ConfigType: "manual", + }, { + DeviceIndex: 10, + CIDR: "10.20.19.0/24", + Address: "10.20.19.100", + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + InterfaceName: "br-eth0", + InterfaceType: "bridge", + ConfigType: "static", + }, { + DeviceIndex: 10, + CIDR: "10.20.19.0/24", + Address: "10.20.19.123", + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + InterfaceName: "br-eth0", + InterfaceType: "bridge", + ConfigType: "static", + }, { + DeviceIndex: 10, + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + InterfaceName: "br-eth0", + InterfaceType: "bridge", + ConfigType: "manual", + }, { + DeviceIndex: 11, + CIDR: "10.20.19.0/24", + Address: "10.20.19.105", + MACAddress: "aa:bb:cc:dd:ee:f1", + MTU: 1500, + InterfaceName: "br-eth1", + InterfaceType: "bridge", + ConfigType: "static", + }, { + DeviceIndex: 11, + MACAddress: "aa:bb:cc:dd:ee:f1", + MTU: 1500, + InterfaceName: "br-eth1", + InterfaceType: "bridge", + ConfigType: "manual", + }, { + DeviceIndex: 3, + MACAddress: "aa:bb:cc:dd:ee:f1", + MTU: 1500, + InterfaceName: "eth1", + InterfaceType: "ethernet", + ParentInterfaceName: "br-eth1", + ConfigType: "manual", + }}) + + s.stubConfigSource.CheckCallNames(c, + "Interfaces", "SysClassNetPath", + "InterfaceAddresses", // eth0 + "InterfaceAddresses", // br-eth0 + "InterfaceAddresses", // br-eth1 + "InterfaceAddresses", // eth1 + ) + s.stubConfigSource.CheckCall(c, 2, "InterfaceAddresses", "eth0") + s.stubConfigSource.CheckCall(c, 3, "InterfaceAddresses", "br-eth0") + s.stubConfigSource.CheckCall(c, 4, "InterfaceAddresses", "br-eth1") + s.stubConfigSource.CheckCall(c, 5, "InterfaceAddresses", "eth1") } -func (s *TypesSuite) TestSortNetworkConfigsByInterfaceName(c *gc.C) { - expectedLength := len(expectedSortedNetworkConfigsByInterfaceName) - jsonExpected := s.networkConfigsAsJSON(c, expectedSortedNetworkConfigsByInterfaceName) - for i := 0; i < expectedLength; i++ { - shuffledInput := shuffleNetworkConfigs(expectedSortedNetworkConfigsByInterfaceName) - result := networkingcommon.SortNetworkConfigsByInterfaceName(shuffledInput) - c.Assert(result, gc.HasLen, expectedLength) - jsonResult := s.networkConfigsAsJSON(c, result) - c.Check(jsonResult, gc.Equals, jsonExpected) +func (s *TypesSuite) TestGetObservedNetworkConfigAddressNotInCIDRFormat(c *gc.C) { + s.stubConfigSource.interfaces = exampleObservedInterfaces[1:2] // only eth0 + s.stubConfigSource.makeSysClassNetInterfacePath(c, "eth0", "") + // Simluate running on Windows, where net.InterfaceAddrs() returns + // non-CIDR-formatted addresses. + s.stubConfigSource.interfaceAddrs = map[string][]net.Addr{ + "eth0": []net.Addr{fakeAddr("10.20.19.42")}, } + + observedConfig, err := networkingcommon.GetObservedNetworkConfig(s.stubConfigSource) + c.Check(err, jc.ErrorIsNil) + c.Check(observedConfig, jc.DeepEquals, []params.NetworkConfig{{ + DeviceIndex: 2, + Address: "10.20.19.42", // just Address, no CIDR as netmask cannot be inferred. + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + InterfaceName: "eth0", + InterfaceType: "ethernet", + ConfigType: "static", + }}) + + s.stubConfigSource.CheckCallNames(c, "Interfaces", "SysClassNetPath", "InterfaceAddresses") + s.stubConfigSource.CheckCall(c, 2, "InterfaceAddresses", "eth0") } -func (s *TypesSuite) TestMergeProviderAndObservedNetworkConfigs(c *gc.C) { - observedConfigsLength := len(expectedSortedObservedNetworkConfigs) - providerConfigsLength := len(expectedSortedProviderNetworkConfigs) - jsonExpected := s.networkConfigsAsJSON(c, expectedSortedMergedNetworkConfigs) - for i := 0; i < observedConfigsLength; i++ { - shuffledObservedConfigs := shuffleNetworkConfigs(expectedSortedObservedNetworkConfigs) - for j := 0; j < providerConfigsLength; j++ { - shuffledProviderConfigs := shuffleNetworkConfigs(expectedSortedProviderNetworkConfigs) - - mergedConfigs, err := networkingcommon.MergeProviderAndObservedNetworkConfigs(shuffledProviderConfigs, shuffledObservedConfigs) - c.Assert(err, jc.ErrorIsNil) - jsonResult := s.networkConfigsAsJSON(c, mergedConfigs) - c.Check(jsonResult, gc.Equals, jsonExpected) - } +func (s *TypesSuite) TestGetObservedNetworkConfigEmptyAddressValue(c *gc.C) { + s.stubConfigSource.interfaces = exampleObservedInterfaces[1:2] // only eth0 + s.stubConfigSource.makeSysClassNetInterfacePath(c, "eth0", "") + s.stubConfigSource.interfaceAddrs = map[string][]net.Addr{ + "eth0": []net.Addr{fakeAddr("")}, + } + + observedConfig, err := networkingcommon.GetObservedNetworkConfig(s.stubConfigSource) + c.Check(err, jc.ErrorIsNil) + c.Check(observedConfig, jc.DeepEquals, []params.NetworkConfig{{ + DeviceIndex: 2, + MACAddress: "aa:bb:cc:dd:ee:f0", + MTU: 1500, + InterfaceName: "eth0", + InterfaceType: "ethernet", + ConfigType: "manual", + }}) + + s.stubConfigSource.CheckCallNames(c, "Interfaces", "SysClassNetPath", "InterfaceAddresses") + s.stubConfigSource.CheckCall(c, 2, "InterfaceAddresses", "eth0") +} + +func (s *TypesSuite) TestGetObservedNetworkConfigInvalidAddressValue(c *gc.C) { + s.stubConfigSource.interfaces = exampleObservedInterfaces[1:2] // only eth0 + s.stubConfigSource.makeSysClassNetInterfacePath(c, "eth0", "") + s.stubConfigSource.interfaceAddrs = map[string][]net.Addr{ + "eth0": []net.Addr{fakeAddr("invalid")}, } + + observedConfig, err := networkingcommon.GetObservedNetworkConfig(s.stubConfigSource) + c.Check(err, gc.ErrorMatches, `cannot parse IP address "invalid" on interface "eth0"`) + c.Check(observedConfig, gc.IsNil) + + s.stubConfigSource.CheckCallNames(c, "Interfaces", "SysClassNetPath", "InterfaceAddresses") + s.stubConfigSource.CheckCall(c, 2, "InterfaceAddresses", "eth0") } -func (s *TypesSuite) TestGetObservedNetworkConfig(c *gc.C) { - s.PatchValue(networkingcommon.NetInterfaces, func() ([]net.Interface, error) { - return exampleObservedInterfaces, nil - }) - s.PatchValue(networkingcommon.InterfaceAddrs, func(i *net.Interface) ([]net.Addr, error) { - c.Assert(i, gc.NotNil) - if addrs, found := exampleObservedInterfaceAddrs[i.Name]; found { - return addrs, nil - } - return nil, nil - }) +type stubNetworkConfigSource struct { + *testing.Stub + + fakeSysClassNetPath string + interfaces []net.Interface + interfaceAddrs map[string][]net.Addr +} - observedConfig, err := networkingcommon.GetObservedNetworkConfig() +// makeSysClassNetInterfacePath creates a subdir for the given interfaceName, +// and a uevent file there with the given devtype set. Returns the created path. +func (s *stubNetworkConfigSource) makeSysClassNetInterfacePath(c *gc.C, interfaceName, devType string) string { + interfacePath := filepath.Join(s.fakeSysClassNetPath, interfaceName) + err := os.Mkdir(interfacePath, 0755) c.Assert(err, jc.ErrorIsNil) - jsonResult := s.networkConfigsAsJSON(c, observedConfig) - jsonExpected := s.networkConfigsAsJSON(c, expectedSortedObservedNetworkConfigs) - c.Check(jsonResult, gc.Equals, jsonExpected) + + var contents string + if devType == "" { + contents = fmt.Sprintf(` +IFINDEX=42 +INTERFACE=%s +`, interfaceName) + } else { + contents = fmt.Sprintf(` +IFINDEX=42 +INTERFACE=%s +DEVTYPE=%s +`, interfaceName, devType) + } + ueventPath := filepath.Join(interfacePath, "uevent") + err = ioutil.WriteFile(ueventPath, []byte(contents), 0644) + c.Assert(err, jc.ErrorIsNil) + + return interfacePath } -func (s *TypesSuite) TestNetworkConfigsToStateArgs(c *gc.C) { - devicesArgs, devicesAddrs := networkingcommon.NetworkConfigsToStateArgs(expectedSortedMergedNetworkConfigs) +// makeSysClassNetBridgePorts creates a "brif" subdir in the given +// interfacePath, and one file for each entry in the given ports, named the same +// as the port value. Needed to simulate the FS structure network.GetBridgePorts() +// can handle. +func (s *stubNetworkConfigSource) makeSysClassNetBridgePorts(c *gc.C, interfacePath string, ports ...string) { + brifPath := filepath.Join(interfacePath, "brif") + err := os.Mkdir(brifPath, 0755) + c.Assert(err, jc.ErrorIsNil) + + for _, portName := range ports { + portPath := filepath.Join(brifPath, portName) + err = ioutil.WriteFile(portPath, []byte("#empty"), 0644) + c.Assert(err, jc.ErrorIsNil) + } +} + +// SysClassNetPath implements NetworkConfigSource. +func (s *stubNetworkConfigSource) SysClassNetPath() string { + s.AddCall("SysClassNetPath") + return s.fakeSysClassNetPath +} - c.Check(devicesArgs, jc.DeepEquals, expectedLinkLayerDeviceArgsWithMergedNetworkConfig) - c.Check(devicesAddrs, jc.DeepEquals, expectedLinkLayerDeviceAdressesWithMergedNetworkConfig) +// Interfaces implements NetworkConfigSource. +func (s *stubNetworkConfigSource) Interfaces() ([]net.Interface, error) { + s.AddCall("Interfaces") + if err := s.NextErr(); err != nil { + return nil, err + } + return s.interfaces, nil +} + +// InterfaceAddresses implements NetworkConfigSource. +func (s *stubNetworkConfigSource) InterfaceAddresses(name string) ([]net.Addr, error) { + s.AddCall("InterfaceAddresses", name) + if err := s.NextErr(); err != nil { + return nil, err + } + return s.interfaceAddrs[name], nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/permissions.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/permissions.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/permissions.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/permissions.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,155 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common + +import ( + "strings" + + "github.com/juju/errors" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/permission" + "github.com/juju/juju/state" +) + +// EveryoneTagName represents a special group that encompasses +// all external users. +const EveryoneTagName = "everyone@external" + +// UserAccess returns the access the user has on the model state +// and the host controller. +func UserAccess(st *state.State, utag names.UserTag) (modelUser, controllerUser permission.UserAccess, err error) { + var none permission.UserAccess + modelUser, err = st.UserAccess(utag, st.ModelTag()) + if err != nil && !errors.IsNotFound(err) { + return none, none, errors.Trace(err) + } + + controllerUser, err = state.ControllerAccess(st, utag) + if err != nil && !errors.IsNotFound(err) { + return none, none, errors.Trace(err) + } + + // TODO(perrito666) remove the following section about everyone group + // when groups are implemented, this accounts only for the lack of a local + // ControllerUser when logging in from an external user that has not been granted + // permissions on the controller but there are permissions for the special + // everyone group. + if !utag.IsLocal() { + controllerUser, err = maybeUseGroupPermission(st.UserAccess, controllerUser, st.ControllerTag(), utag) + if err != nil { + return none, none, errors.Annotatef(err, "obtaining ControllerUser for everyone group") + } + } + + if permission.IsEmptyUserAccess(modelUser) && + permission.IsEmptyUserAccess(controllerUser) { + return none, none, errors.NotFoundf("model or controller user") + } + return modelUser, controllerUser, nil +} + +// HasPermission returns true if the specified user has the specified +// permission on target. +func HasPermission(userGetter userAccessFunc, utag names.Tag, + requestedPermission permission.Access, target names.Tag) (bool, error) { + + validForKind := false + switch requestedPermission { + case permission.LoginAccess, permission.AddModelAccess, permission.SuperuserAccess: + validForKind = target.Kind() == names.ControllerTagKind + case permission.ReadAccess, permission.WriteAccess, permission.AdminAccess: + validForKind = target.Kind() == names.ModelTagKind + } + + if !validForKind { + return false, nil + } + + userTag, ok := utag.(names.UserTag) + if !ok { + // lets not reveal more than is strictly necessary + return false, nil + } + + user, err := userGetter(userTag, target) + if err != nil && !errors.IsNotFound(err) { + return false, errors.Annotatef(err, "while obtaining %s user", target.Kind()) + } + // there is a special case for external users, a group called everyone@external + if target.Kind() == names.ControllerTagKind && !userTag.IsLocal() { + controllerTag, ok := target.(names.ControllerTag) + if !ok { + return false, errors.NotValidf("controller tag") + } + + // TODO(perrito666) remove the following section about everyone group + // when groups are implemented, this accounts only for the lack of a local + // ControllerUser when logging in from an external user that has not been granted + // permissions on the controller but there are permissions for the special + // everyone group. + user, err = maybeUseGroupPermission(userGetter, user, controllerTag, userTag) + if err != nil { + return false, errors.Trace(err) + } + if permission.IsEmptyUserAccess(user) { + return false, nil + } + } + // returning this kind of information would be too much information to reveal too. + if errors.IsNotFound(err) { + return false, nil + } + modelPermission := user.Access.EqualOrGreaterModelAccessThan(requestedPermission) && target.Kind() == names.ModelTagKind + controllerPermission := user.Access.EqualOrGreaterControllerAccessThan(requestedPermission) && target.Kind() == names.ControllerTagKind + if !controllerPermission && !modelPermission { + return false, nil + } + return true, nil +} + +// maybeUseGroupPermission returns a permission.UserAccess updated +// with the group permissions that apply to it if higher than +// current. +// If the passed UserAccess is empty (controller user lacks permissions) +// but the group is not, a stand-in will be created to hold the group +// permissions. +func maybeUseGroupPermission( + userGetter userAccessFunc, + externalUser permission.UserAccess, + controllerTag names.ControllerTag, + userTag names.UserTag, +) (permission.UserAccess, error) { + + everyoneTag := names.NewUserTag(EveryoneTagName) + everyone, err := userGetter(everyoneTag, controllerTag) + if errors.IsNotFound(err) { + return externalUser, nil + } + if err != nil { + return permission.UserAccess{}, errors.Trace(err) + } + if permission.IsEmptyUserAccess(externalUser) && + !permission.IsEmptyUserAccess(everyone) { + externalUser = newControllerUserFromGroup(everyone, userTag) + } + + if everyone.Access.EqualOrGreaterControllerAccessThan(externalUser.Access) { + externalUser.Access = everyone.Access + } + return externalUser, nil +} + +type userAccessFunc func(names.UserTag, names.Tag) (permission.UserAccess, error) + +// newControllerUserFromGroup returns a permission.UserAccess that serves +// as a stand-in for a user that has group access but no explicit user +// access. +func newControllerUserFromGroup(everyoneAccess permission.UserAccess, + userTag names.UserTag) permission.UserAccess { + everyoneAccess.UserTag = userTag + everyoneAccess.UserID = strings.ToLower(userTag.Id()) + everyoneAccess.UserName = userTag.Id() + return everyoneAccess +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/permissions_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/permissions_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/permissions_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/permissions_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,196 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/permission" + "github.com/juju/juju/testing" +) + +type PermissionSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&PermissionSuite{}) + +type fakeUserAccess struct { + subjects []names.UserTag + objects []names.Tag + user permission.UserAccess + err error +} + +func (f *fakeUserAccess) call(subject names.UserTag, object names.Tag) (permission.UserAccess, error) { + f.subjects = append(f.subjects, subject) + f.objects = append(f.objects, object) + return f.user, f.err +} + +func (r *PermissionSuite) TestNoUserTagLacksPermission(c *gc.C) { + nonUser := names.NewModelTag("beef1beef1-0000-0000-000011112222") + target := names.NewModelTag("beef1beef2-0000-0000-000011112222") + hasPermission, err := common.HasPermission((&fakeUserAccess{}).call, nonUser, permission.ReadAccess, target) + c.Assert(hasPermission, jc.IsFalse) + c.Assert(err, jc.ErrorIsNil) +} + +func (r *PermissionSuite) TestHasPermission(c *gc.C) { + testCases := []struct { + title string + userGetterAccess permission.Access + user names.UserTag + target names.Tag + access permission.Access + expected bool + }{ + { + title: "user has lesser permissions than required", + userGetterAccess: permission.ReadAccess, + user: names.NewUserTag("validuser"), + target: names.NewModelTag("beef1beef2-0000-0000-000011112222"), + access: permission.WriteAccess, + expected: false, + }, + { + title: "user has equal permission than required", + userGetterAccess: permission.WriteAccess, + user: names.NewUserTag("validuser"), + target: names.NewModelTag("beef1beef2-0000-0000-000011112222"), + access: permission.WriteAccess, + expected: true, + }, + { + title: "user has greater permission than required", + userGetterAccess: permission.AdminAccess, + user: names.NewUserTag("validuser"), + target: names.NewModelTag("beef1beef2-0000-0000-000011112222"), + access: permission.WriteAccess, + expected: true, + }, + { + title: "user requests model permission on controller", + userGetterAccess: permission.AdminAccess, + user: names.NewUserTag("validuser"), + target: names.NewModelTag("beef1beef2-0000-0000-000011112222"), + access: permission.AddModelAccess, + expected: false, + }, + { + title: "user requests controller permission on model", + userGetterAccess: permission.AdminAccess, + user: names.NewUserTag("validuser"), + target: names.NewControllerTag("beef1beef2-0000-0000-000011112222"), + access: permission.AdminAccess, // notice user has this permission for model. + expected: false, + }, + { + title: "controller permissions also work", + userGetterAccess: permission.AddModelAccess, + user: names.NewUserTag("validuser"), + target: names.NewControllerTag("beef1beef2-0000-0000-000011112222"), + access: permission.AddModelAccess, + expected: true, + }, + } + for i, t := range testCases { + userGetter := &fakeUserAccess{ + user: permission.UserAccess{ + Access: t.userGetterAccess, + }} + c.Logf("HasPermission test n %d: %s", i, t.title) + hasPermission, err := common.HasPermission(userGetter.call, t.user, t.access, t.target) + c.Assert(hasPermission, gc.Equals, t.expected) + c.Assert(err, jc.ErrorIsNil) + } + +} + +func (r *PermissionSuite) TestUserGetterErrorReturns(c *gc.C) { + user := names.NewUserTag("validuser") + target := names.NewModelTag("beef1beef2-0000-0000-000011112222") + userGetter := &fakeUserAccess{ + user: permission.UserAccess{}, + err: errors.NotFoundf("a user"), + } + hasPermission, err := common.HasPermission(userGetter.call, user, permission.ReadAccess, target) + c.Assert(err, jc.ErrorIsNil) + c.Assert(hasPermission, jc.IsFalse) + c.Assert(userGetter.subjects, gc.HasLen, 1) + c.Assert(userGetter.subjects[0], gc.DeepEquals, user) + c.Assert(userGetter.objects, gc.HasLen, 1) + c.Assert(userGetter.objects[0], gc.DeepEquals, target) +} + +type fakeEveryoneUserAccess struct { + user permission.UserAccess + everyone permission.UserAccess +} + +func (f *fakeEveryoneUserAccess) call(subject names.UserTag, object names.Tag) (permission.UserAccess, error) { + if subject.Id() == common.EveryoneTagName { + return f.everyone, nil + } + return f.user, nil +} + +func (r *PermissionSuite) TestEveryoneAtExternal(c *gc.C) { + testCases := []struct { + title string + userGetterAccess permission.Access + everyoneAccess permission.Access + user names.UserTag + target names.Tag + access permission.Access + expected bool + }{ + { + title: "user has lesser permissions than everyone", + userGetterAccess: permission.LoginAccess, + everyoneAccess: permission.AddModelAccess, + user: names.NewUserTag("validuser@external"), + target: names.NewControllerTag("beef1beef2-0000-0000-000011112222"), + access: permission.AddModelAccess, + expected: true, + }, + { + title: "user has greater permissions than everyone", + userGetterAccess: permission.AddModelAccess, + everyoneAccess: permission.LoginAccess, + user: names.NewUserTag("validuser@external"), + target: names.NewControllerTag("beef1beef2-0000-0000-000011112222"), + access: permission.AddModelAccess, + expected: true, + }, + { + title: "everibody not considered if user is local", + userGetterAccess: permission.LoginAccess, + everyoneAccess: permission.AddModelAccess, + user: names.NewUserTag("validuser"), + target: names.NewControllerTag("beef1beef2-0000-0000-000011112222"), + access: permission.AddModelAccess, + expected: false, + }, + } + + for i, t := range testCases { + userGetter := &fakeEveryoneUserAccess{ + user: permission.UserAccess{ + Access: t.userGetterAccess, + }, + everyone: permission.UserAccess{ + Access: t.everyoneAccess, + }, + } + c.Logf(`HasPermission "everyone" test n %d: %s`, i, t.title) + hasPermission, err := common.HasPermission(userGetter.call, t.user, t.access, t.target) + c.Assert(err, jc.ErrorIsNil) + c.Assert(hasPermission, gc.Equals, t.expected) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/setstatus.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/setstatus.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/setstatus.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/setstatus.go 2016-10-13 14:31:49.000000000 +0000 @@ -209,7 +209,7 @@ if !ok { return NotSupportedError(tag, "updating status") } - if len(newData) > 0 && existingStatusInfo.Status != status.StatusError { + if len(newData) > 0 && existingStatusInfo.Status != status.Error { return fmt.Errorf("%s is not in an error state", names.ReadableString(tag)) } // TODO(perrito666) 2016-05-02 lp:1558657 diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/setstatus_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/setstatus_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/setstatus_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/setstatus_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -38,7 +38,7 @@ s.badTag = tag result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: tag.String(), - Status: status.StatusExecuting.String(), + Status: status.Executing.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -48,7 +48,7 @@ func (s *statusSetterSuite) TestNotATag(c *gc.C) { result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: "not a tag", - Status: status.StatusExecuting.String(), + Status: status.Executing.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -58,7 +58,7 @@ func (s *statusSetterSuite) TestNotFound(c *gc.C) { result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: names.NewMachineTag("42").String(), - Status: status.StatusDown.String(), + Status: status.Down.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -69,7 +69,7 @@ machine := s.Factory.MakeMachine(c, nil) result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: machine.Tag().String(), - Status: status.StatusStarted.String(), + Status: status.Started.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -79,7 +79,7 @@ c.Assert(err, jc.ErrorIsNil) machineStatus, err := machine.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(machineStatus.Status, gc.Equals, status.StatusStarted) + c.Assert(machineStatus.Status, gc.Equals, status.Started) } func (s *statusSetterSuite) TestSetUnitStatus(c *gc.C) { @@ -87,11 +87,11 @@ // on the unit returns the workload status not the agent status as it // does on a machine. unit := s.Factory.MakeUnit(c, &factory.UnitParams{Status: &status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, }}) result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: unit.Tag().String(), - Status: status.StatusActive.String(), + Status: status.Active.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -101,7 +101,7 @@ c.Assert(err, jc.ErrorIsNil) unitStatus, err := unit.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(unitStatus.Status, gc.Equals, status.StatusActive) + c.Assert(unitStatus.Status, gc.Equals, status.Active) } func (s *statusSetterSuite) TestSetServiceStatus(c *gc.C) { @@ -109,11 +109,11 @@ // ServiceStatusSetter that checks for leadership, so permission denied // here. service := s.Factory.MakeApplication(c, &factory.ApplicationParams{Status: &status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, }}) result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: service.Tag().String(), - Status: status.StatusActive.String(), + Status: status.Active.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -123,17 +123,17 @@ c.Assert(err, jc.ErrorIsNil) serviceStatus, err := service.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(serviceStatus.Status, gc.Equals, status.StatusMaintenance) + c.Assert(serviceStatus.Status, gc.Equals, status.Maintenance) } func (s *statusSetterSuite) TestBulk(c *gc.C) { s.badTag = names.NewMachineTag("42") result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: s.badTag.String(), - Status: status.StatusActive.String(), + Status: status.Active.String(), }, { Tag: "bad-tag", - Status: status.StatusActive.String(), + Status: status.Active.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 2) @@ -162,7 +162,7 @@ s.badTag = tag result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: tag.String(), - Status: status.StatusActive.String(), + Status: status.Active.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -172,7 +172,7 @@ func (s *serviceStatusSetterSuite) TestNotATag(c *gc.C) { result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: "not a tag", - Status: status.StatusActive.String(), + Status: status.Active.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -182,7 +182,7 @@ func (s *serviceStatusSetterSuite) TestNotFound(c *gc.C) { result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: names.NewUnitTag("foo/0").String(), - Status: status.StatusActive.String(), + Status: status.Active.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -193,7 +193,7 @@ machine := s.Factory.MakeMachine(c, nil) result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: machine.Tag().String(), - Status: status.StatusActive.String(), + Status: status.Active.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -206,11 +206,11 @@ // simple status setter to check to see if the unit (authTag) is a leader // and able to set the service status. However, that is for another day. service := s.Factory.MakeApplication(c, &factory.ApplicationParams{Status: &status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, }}) result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: service.Tag().String(), - Status: status.StatusActive.String(), + Status: status.Active.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -222,11 +222,11 @@ func (s *serviceStatusSetterSuite) TestSetUnitStatusNotLeader(c *gc.C) { // If the unit isn't the leader, it can't set it. unit := s.Factory.MakeUnit(c, &factory.UnitParams{Status: &status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, }}) result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: unit.Tag().String(), - Status: status.StatusActive.String(), + Status: status.Active.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) @@ -236,12 +236,12 @@ func (s *serviceStatusSetterSuite) TestSetUnitStatusIsLeader(c *gc.C) { service := s.Factory.MakeApplication(c, &factory.ApplicationParams{Status: &status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, }}) unit := s.Factory.MakeUnit(c, &factory.UnitParams{ Application: service, Status: &status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, }}) s.State.LeadershipClaimer().ClaimLeadership( service.Name(), @@ -249,7 +249,7 @@ time.Minute) result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: unit.Tag().String(), - Status: status.StatusActive.String(), + Status: status.Active.String(), }}}) c.Assert(err, jc.ErrorIsNil) @@ -260,7 +260,7 @@ c.Assert(err, jc.ErrorIsNil) unitStatus, err := service.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(unitStatus.Status, gc.Equals, status.StatusActive) + c.Assert(unitStatus.Status, gc.Equals, status.Active) } func (s *serviceStatusSetterSuite) TestBulk(c *gc.C) { @@ -268,13 +268,13 @@ machine := s.Factory.MakeMachine(c, nil) result, err := s.setter.SetStatus(params.SetStatus{[]params.EntityStatusArgs{{ Tag: s.badTag.String(), - Status: status.StatusActive.String(), + Status: status.Active.String(), }, { Tag: machine.Tag().String(), - Status: status.StatusActive.String(), + Status: status.Active.String(), }, { Tag: "bad-tag", - Status: status.StatusActive.String(), + Status: status.Active.String(), }}}) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 3) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/storagecommon/storage.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/storagecommon/storage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/storagecommon/storage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/storagecommon/storage.go 2016-10-13 14:31:49.000000000 +0000 @@ -258,7 +258,7 @@ ) (map[string]string, error) { storageTags := tags.ResourceTags( names.NewModelTag(modelUUID), - names.NewModelTag(controllerUUID), + names.NewControllerTag(controllerUUID), tagger, ) if storageInstance != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/storagecommon/volumes_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/storagecommon/volumes_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/storagecommon/volumes_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/storagecommon/volumes_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -40,7 +40,7 @@ &fakeVolume{tag: tag, params: volumeParams, info: info}, nil, // StorageInstance testing.ModelTag.Id(), - testing.ModelTag.Id(), + testing.ControllerTag.Id(), testing.CustomModelConfig(c, testing.Attrs{ "resource-tags": "a=b c=", }), @@ -53,7 +53,7 @@ Provider: "loop", Size: 1024, Tags: map[string]string{ - tags.JujuController: testing.ModelTag.Id(), + tags.JujuController: testing.ControllerTag.Id(), tags.JujuModel: testing.ModelTag.Id(), "a": "b", "c": "", @@ -71,7 +71,7 @@ }}, &fakeStorageInstance{tag: storageTag, owner: unitTag}, testing.ModelTag.Id(), - testing.ModelTag.Id(), + testing.ControllerTag.Id(), testing.CustomModelConfig(c, nil), &fakePoolManager{}, provider.CommonStorageProviders(), @@ -82,7 +82,7 @@ Provider: "loop", Size: 1024, Tags: map[string]string{ - tags.JujuController: testing.ModelTag.Id(), + tags.JujuController: testing.ControllerTag.Id(), tags.JujuModel: testing.ModelTag.Id(), tags.JujuStorageInstance: "mystore/0", tags.JujuStorageOwner: "mysql/123", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/testing/block.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/testing/block.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/testing/block.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/testing/block.go 2016-10-13 14:31:49.000000000 +0000 @@ -20,7 +20,7 @@ // It provides easy access to switch blocks on // as well as test whether operations are blocked or not. type BlockHelper struct { - ApiState api.Connection + apiState api.Connection client *block.Client } @@ -28,7 +28,7 @@ // to manage desired juju blocks. func NewBlockHelper(st api.Connection) BlockHelper { return BlockHelper{ - ApiState: st, + apiState: st, client: block.NewClient(st), } } @@ -52,7 +52,7 @@ func (s BlockHelper) Close() { s.client.Close() - s.ApiState.Close() + s.apiState.Close() } // BlockDestroyModel blocks destroy-model. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/testing/modelwatcher.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/testing/modelwatcher.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/testing/modelwatcher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/testing/modelwatcher.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,17 +9,10 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs" "github.com/juju/juju/state" - "github.com/juju/juju/state/stateenvirons" statetesting "github.com/juju/juju/state/testing" ) -const ( - HasSecrets = true - NoSecrets = false -) - type ModelWatcher interface { WatchForModelConfigChanges() (params.NotifyWatchResult, error) ModelConfig() (params.ModelConfigResult, error) @@ -29,46 +22,32 @@ modelWatcher ModelWatcher st *state.State resources *common.Resources - hasSecrets bool } func NewModelWatcherTest( modelWatcher ModelWatcher, st *state.State, resources *common.Resources, - hasSecrets bool) *ModelWatcherTest { - return &ModelWatcherTest{modelWatcher, st, resources, hasSecrets} +) *ModelWatcherTest { + return &ModelWatcherTest{modelWatcher, st, resources} } // AssertModelConfig provides a method to test the config from the // envWatcher. This allows other tests that embed this type to have // more than just the default test. -func (s *ModelWatcherTest) AssertModelConfig(c *gc.C, envWatcher ModelWatcher, hasSecrets bool) { +func (s *ModelWatcherTest) AssertModelConfig(c *gc.C, envWatcher ModelWatcher) { envConfig, err := s.st.ModelConfig() c.Assert(err, jc.ErrorIsNil) - newEnviron := stateenvirons.GetNewEnvironFunc(environs.New) result, err := envWatcher.ModelConfig() c.Assert(err, jc.ErrorIsNil) configAttributes := envConfig.AllAttrs() - // If the implementor doesn't provide secrets, we need to replace the config - // values in our environment to compare against with the secrets replaced. - if !hasSecrets { - env, err := newEnviron(s.st) - c.Assert(err, jc.ErrorIsNil) - secretAttrs, err := env.Provider().SecretAttrs(envConfig) - c.Assert(err, jc.ErrorIsNil) - for key := range secretAttrs { - configAttributes[key] = "not available" - } - } - c.Assert(result.Config, jc.DeepEquals, params.ModelConfig(configAttributes)) } func (s *ModelWatcherTest) TestModelConfig(c *gc.C) { - s.AssertModelConfig(c, s.modelWatcher, s.hasSecrets) + s.AssertModelConfig(c, s.modelWatcher) } func (s *ModelWatcherTest) TestWatchForModelConfigChanges(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/unitstatus.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/unitstatus.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/unitstatus.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/unitstatus.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,91 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common + +import ( + "fmt" + + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/state" + "github.com/juju/juju/status" + "github.com/juju/juju/worker/uniter/operation" +) + +// StatusAndErr pairs a StatusInfo with an error associated with +// retrieving it. +type StatusAndErr struct { + Status status.StatusInfo + Err error +} + +// UnitStatusGetter defines the unit functionality required to +// determine unit agent and workload status. +type UnitStatusGetter interface { + AgentStatus() (status.StatusInfo, error) + Status() (status.StatusInfo, error) + AgentPresence() (bool, error) + Name() string + Life() state.Life +} + +// UnitStatus returns the unit agent and workload status for a given +// unit, with special handling for agent presence. +func UnitStatus(unit UnitStatusGetter) (agent StatusAndErr, workload StatusAndErr) { + agent.Status, agent.Err = unit.AgentStatus() + workload.Status, workload.Err = unit.Status() + + if !canBeLost(agent.Status, workload.Status) { + // The unit is allocating or installing - there's no point in + // enquiring about the agent liveness. + return + } + + agentAlive, err := unit.AgentPresence() + if err != nil { + return + } + if unit.Life() != state.Dead && !agentAlive { + // If the unit is in error, it would be bad to throw away + // the error information as when the agent reconnects, that + // error information would then be lost. + if workload.Status.Status != status.Error { + workload.Status.Status = status.Unknown + workload.Status.Message = fmt.Sprintf("agent lost, see 'juju show-status-log %s'", unit.Name()) + } + agent.Status.Status = status.Lost + agent.Status.Message = "agent is not communicating with the server" + } + return +} + +func canBeLost(agent, workload status.StatusInfo) bool { + switch agent.Status { + case status.Allocating: + return false + case status.Executing: + return agent.Message != operation.RunningHookMessage(string(hooks.Install)) + } + + // TODO(fwereade/wallyworld): we should have an explicit place in the model + // to tell us when we've hit this point, instead of piggybacking on top of + // status and/or status history. + + return isWorkloadInstalled(workload) +} + +func isWorkloadInstalled(workload status.StatusInfo) bool { + switch workload.Status { + case status.Maintenance: + return workload.Message != status.MessageInstallingCharm + case status.Waiting: + switch workload.Message { + case status.MessageWaitForMachine: + case status.MessageInstallingAgent: + case status.MessageInitializingAgent: + return false + } + } + return true +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/unitstatus_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/unitstatus_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/unitstatus_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/unitstatus_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,137 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common_test + +import ( + "errors" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/state" + "github.com/juju/juju/status" +) + +type UnitStatusSuite struct { + testing.IsolationSuite + unit *fakeStatusUnit +} + +var _ = gc.Suite(&UnitStatusSuite{}) + +func (s *UnitStatusSuite) SetUpTest(c *gc.C) { + s.unit = &fakeStatusUnit{ + agentStatus: status.StatusInfo{ + Status: status.Started, + Message: "agent ok", + }, + status: status.StatusInfo{ + Status: status.Idle, + Message: "unit ok", + }, + presence: true, + } +} + +func (s *UnitStatusSuite) checkUntouched(c *gc.C) { + agent, workload := common.UnitStatus(s.unit) + c.Check(agent.Status, jc.DeepEquals, s.unit.agentStatus) + c.Check(agent.Err, jc.ErrorIsNil) + c.Check(workload.Status, jc.DeepEquals, s.unit.status) + c.Check(workload.Err, jc.ErrorIsNil) +} + +func (s *UnitStatusSuite) TestNormal(c *gc.C) { + s.checkUntouched(c) +} + +func (s *UnitStatusSuite) TestErrors(c *gc.C) { + s.unit.agentStatusErr = errors.New("agent status error") + s.unit.statusErr = errors.New("status error") + + agent, workload := common.UnitStatus(s.unit) + c.Check(agent.Err, gc.ErrorMatches, "agent status error") + c.Check(workload.Err, gc.ErrorMatches, "status error") +} + +func (s *UnitStatusSuite) TestLost(c *gc.C) { + s.unit.presence = false + agent, workload := common.UnitStatus(s.unit) + c.Check(agent.Status, jc.DeepEquals, status.StatusInfo{ + Status: status.Lost, + Message: "agent is not communicating with the server", + }) + c.Check(agent.Err, jc.ErrorIsNil) + c.Check(workload.Status, jc.DeepEquals, status.StatusInfo{ + Status: status.Unknown, + Message: "agent lost, see 'juju show-status-log foo/2'", + }) + c.Check(workload.Err, jc.ErrorIsNil) +} + +func (s *UnitStatusSuite) TestLostAndDead(c *gc.C) { + s.unit.presence = false + s.unit.life = state.Dead + // Status is untouched if unit is Dead. + s.checkUntouched(c) +} + +func (s *UnitStatusSuite) TestPresenceError(c *gc.C) { + s.unit.presence = false + s.unit.presenceErr = errors.New("boom") + // Presence error gets ignored, so no output is unchanged. + s.checkUntouched(c) +} + +func (s *UnitStatusSuite) TestNotLostIfAllocating(c *gc.C) { + s.unit.presence = false + s.unit.agentStatus.Status = status.Allocating + s.checkUntouched(c) +} + +func (s *UnitStatusSuite) TestCantBeLostDuringInstall(c *gc.C) { + s.unit.presence = false + s.unit.agentStatus.Status = status.Executing + s.unit.agentStatus.Message = "running install hook" + s.checkUntouched(c) +} + +func (s *UnitStatusSuite) TestCantBeLostDuringWorkloadInstall(c *gc.C) { + s.unit.presence = false + s.unit.status.Status = status.Maintenance + s.unit.status.Message = "installing charm software" + s.checkUntouched(c) +} + +type fakeStatusUnit struct { + agentStatus status.StatusInfo + agentStatusErr error + status status.StatusInfo + statusErr error + presence bool + presenceErr error + life state.Life +} + +func (u *fakeStatusUnit) Name() string { + return "foo/2" +} + +func (u *fakeStatusUnit) AgentStatus() (status.StatusInfo, error) { + return u.agentStatus, u.agentStatusErr +} + +func (u *fakeStatusUnit) Status() (status.StatusInfo, error) { + return u.status, u.statusErr +} + +func (u *fakeStatusUnit) AgentPresence() (bool, error) { + return u.presence, u.presenceErr +} + +func (u *fakeStatusUnit) Life() state.Life { + return u.life +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/watch.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/watch.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/common/watch.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/common/watch.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "github.com/juju/errors" "gopkg.in/juju/names.v2" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/controller/controller.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/controller/controller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/controller/controller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/controller/controller.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,18 +6,25 @@ package controller import ( + "encoding/json" "sort" "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/txn" "github.com/juju/utils/set" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" + "github.com/juju/juju/api" + "github.com/juju/juju/api/migrationtarget" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/common/cloudspec" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/core/migration" + coremigration "github.com/juju/juju/core/migration" + "github.com/juju/juju/migration" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/state/stateenvirons" ) @@ -33,18 +40,22 @@ AllModels() (params.UserModelList, error) DestroyController(args params.DestroyControllerArgs) error ModelConfig() (params.ModelConfigResults, error) + HostedModelConfigs() (params.HostedModelConfigsResults, error) + GetControllerAccess(params.Entities) (params.UserAccessResults, error) ControllerConfig() (params.ControllerConfigResult, error) ListBlockedModels() (params.ModelBlockInfoList, error) RemoveBlocks(args params.RemoveBlocksArgs) error WatchAllModels() (params.AllWatcherId, error) - ModelStatus(req params.Entities) (params.ModelStatusResults, error) - InitiateModelMigration(params.InitiateModelMigrationArgs) (params.InitiateModelMigrationResults, error) + ModelStatus(params.Entities) (params.ModelStatusResults, error) + InitiateMigration(params.InitiateMigrationArgs) (params.InitiateMigrationResults, error) + ModifyControllerAccess(params.ModifyControllerAccessRequest) (params.ErrorResults, error) } // ControllerAPI implements the environment manager interface and is // the concrete implementation of the api end point. type ControllerAPI struct { *common.ControllerConfigAPI + *common.ModelStatusAPI cloudspec.CloudSpecAPI state *state.State @@ -69,18 +80,11 @@ // Since we know this is a user tag (because AuthClient is true), // we just do the type assertion to the UserTag. apiUser, _ := authorizer.GetAuthTag().(names.UserTag) - isAdmin, err := st.IsControllerAdministrator(apiUser) - if err != nil { - return nil, errors.Trace(err) - } - // The entire end point is only accessible to controller administrators. - if !isAdmin { - return nil, errors.Trace(common.ErrPerm) - } environConfigGetter := stateenvirons.EnvironConfigGetter{st} return &ControllerAPI{ ControllerConfigAPI: common.NewControllerConfig(st), + ModelStatusAPI: common.NewModelStatusAPI(common.NewModelManagerBackend(st), authorizer, apiUser), CloudSpecAPI: cloudspec.NewCloudSpec(environConfigGetter.CloudSpec, common.AuthFuncForTag(st.ModelTag())), state: st, authorizer: authorizer, @@ -89,10 +93,24 @@ }, nil } +func (s *ControllerAPI) checkHasAdmin() error { + isAdmin, err := s.authorizer.HasPermission(permission.SuperuserAccess, s.state.ControllerTag()) + if err != nil { + return errors.Trace(err) + } + if !isAdmin { + return common.ServerError(common.ErrPerm) + } + return nil +} + // AllModels allows controller administrators to get the list of all the // environments in the controller. func (s *ControllerAPI) AllModels() (params.UserModelList, error) { result := params.UserModelList{} + if err := s.checkHasAdmin(); err != nil { + return result, errors.Trace(err) + } // Get all the environments that the authenticated user can see, and // supplement that with the other environments that exist that the user @@ -150,7 +168,9 @@ // list. func (s *ControllerAPI) ListBlockedModels() (params.ModelBlockInfoList, error) { results := params.ModelBlockInfoList{} - + if err := s.checkHasAdmin(); err != nil { + return results, errors.Trace(err) + } blocks, err := s.state.AllBlocksForController() if err != nil { return results, errors.Trace(err) @@ -193,6 +213,9 @@ // client.ModelGet func (s *ControllerAPI) ModelConfig() (params.ModelConfigResults, error) { result := params.ModelConfigResults{} + if err := s.checkHasAdmin(); err != nil { + return result, errors.Trace(err) + } controllerModel, err := s.state.ControllerModel() if err != nil { @@ -213,8 +236,55 @@ return result, nil } +// HostedModelConfigs returns all the information that the client needs in +// order to connect directly with the host model's provider and destroy it +// directly. +func (s *ControllerAPI) HostedModelConfigs() (params.HostedModelConfigsResults, error) { + result := params.HostedModelConfigsResults{} + if err := s.checkHasAdmin(); err != nil { + return result, errors.Trace(err) + } + + controllerModel, err := s.state.ControllerModel() + if err != nil { + return result, errors.Trace(err) + } + + allModels, err := s.state.AllModels() + if err != nil { + return result, errors.Trace(err) + } + + for _, model := range allModels { + if model.UUID() != controllerModel.UUID() { + config := params.HostedModelConfig{ + Name: model.Name(), + OwnerTag: model.Owner().String(), + } + modelConf, err := model.Config() + if err != nil { + config.Error = common.ServerError(err) + } else { + config.Config = modelConf.AllAttrs() + } + cloudSpec := s.GetCloudSpec(model.ModelTag()) + if config.Error == nil { + config.CloudSpec = cloudSpec.Result + config.Error = cloudSpec.Error + } + result.Models = append(result.Models, config) + } + } + + return result, nil +} + // RemoveBlocks removes all the blocks in the controller. func (s *ControllerAPI) RemoveBlocks(args params.RemoveBlocksArgs) error { + if err := s.checkHasAdmin(); err != nil { + return errors.Trace(err) + } + if !args.All { return errors.New("not supported") } @@ -225,6 +295,9 @@ // controller. The returned AllWatcherId should be used with Next on the // AllModelWatcher endpoint to receive deltas. func (c *ControllerAPI) WatchAllModels() (params.AllWatcherId, error) { + if err := c.checkHasAdmin(); err != nil { + return params.AllWatcherId{}, errors.Trace(err) + } w := c.state.WatchAllModels() return params.AllWatcherId{ AllWatcherId: c.resources.Register(w), @@ -258,34 +331,55 @@ return false } -// ModelStatus returns a summary of the environment. -func (c *ControllerAPI) ModelStatus(req params.Entities) (params.ModelStatusResults, error) { - envs := req.Entities - results := params.ModelStatusResults{} - status := make([]params.ModelStatus, len(envs)) - for i, env := range envs { - envStatus, err := c.environStatus(env.Tag) +// GetControllerAccess returns the level of access the specifed users +// have on the controller. +func (c *ControllerAPI) GetControllerAccess(req params.Entities) (params.UserAccessResults, error) { + results := params.UserAccessResults{} + isAdmin, err := c.authorizer.HasPermission(permission.SuperuserAccess, c.state.ControllerTag()) + if err != nil { + return results, errors.Trace(err) + } + + users := req.Entities + results.Results = make([]params.UserAccessResult, len(users)) + for i, user := range users { + userTag, err := names.ParseUserTag(user.Tag) if err != nil { - return results, errors.Trace(err) + results.Results[i].Error = common.ServerError(err) + continue } - status[i] = envStatus + if !isAdmin && !c.authorizer.AuthOwner(userTag) { + results.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + accessInfo, err := c.state.UserAccess(userTag, c.state.ControllerTag()) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + results.Results[i].Result = ¶ms.UserAccess{ + Access: string(accessInfo.Access), + UserTag: userTag.String()} } - results.Results = status return results, nil } -// InitiateModelMigration attempts to begin the migration of one or +// InitiateMigration attempts to begin the migration of one or // more models to other controllers. -func (c *ControllerAPI) InitiateModelMigration(reqArgs params.InitiateModelMigrationArgs) ( - params.InitiateModelMigrationResults, error, +func (c *ControllerAPI) InitiateMigration(reqArgs params.InitiateMigrationArgs) ( + params.InitiateMigrationResults, error, ) { - out := params.InitiateModelMigrationResults{ - Results: make([]params.InitiateModelMigrationResult, len(reqArgs.Specs)), + out := params.InitiateMigrationResults{ + Results: make([]params.InitiateMigrationResult, len(reqArgs.Specs)), + } + if err := c.checkHasAdmin(); err != nil { + return out, errors.Trace(err) } + for i, spec := range reqArgs.Specs { result := &out.Results[i] result.ModelTag = spec.ModelTag - id, err := c.initiateOneModelMigration(spec) + id, err := c.initiateOneMigration(spec) if err != nil { result.Error = common.ServerError(err) } else { @@ -295,7 +389,7 @@ return out, nil } -func (c *ControllerAPI) initiateOneModelMigration(spec params.ModelMigrationSpec) (string, error) { +func (c *ControllerAPI) initiateOneMigration(spec params.MigrationSpec) (string, error) { modelTag, err := names.ParseModelTag(spec.ModelTag) if err != nil { return "", errors.Annotate(err, "model tag") @@ -306,88 +400,220 @@ return "", errors.Annotate(err, "unable to read model") } - // Get State for model. hostedState, err := c.state.ForModel(modelTag) if err != nil { return "", errors.Trace(err) } defer hostedState.Close() - // Start the migration. - targetInfo := spec.TargetInfo - - controllerTag, err := names.ParseModelTag(targetInfo.ControllerTag) + // Construct target info. + specTarget := spec.TargetInfo + controllerTag, err := names.ParseControllerTag(specTarget.ControllerTag) if err != nil { return "", errors.Annotate(err, "controller tag") } - authTag, err := names.ParseUserTag(targetInfo.AuthTag) + authTag, err := names.ParseUserTag(specTarget.AuthTag) if err != nil { return "", errors.Annotate(err, "auth tag") } + var macs []macaroon.Slice + if specTarget.Macaroons != "" { + if err := json.Unmarshal([]byte(specTarget.Macaroons), &macs); err != nil { + return "", errors.Annotate(err, "invalid macaroons") + } + } + targetInfo := coremigration.TargetInfo{ + ControllerTag: controllerTag, + Addrs: specTarget.Addrs, + CACert: specTarget.CACert, + AuthTag: authTag, + Password: specTarget.Password, + Macaroons: macs, + } - args := state.ModelMigrationSpec{ - InitiatedBy: c.apiUser, - TargetInfo: migration.TargetInfo{ - ControllerTag: controllerTag, - Addrs: targetInfo.Addrs, - CACert: targetInfo.CACert, - AuthTag: authTag, - Password: targetInfo.Password, - }, + // Check if the migration is likely to succeed. + if !(spec.ExternalControl && spec.SkipInitialPrechecks) { + if err := runMigrationPrechecks(hostedState, targetInfo); err != nil { + return "", errors.Trace(err) + } } - mig, err := hostedState.CreateModelMigration(args) + + // Trigger the migration. + mig, err := hostedState.CreateMigration(state.MigrationSpec{ + InitiatedBy: c.apiUser, + TargetInfo: targetInfo, + ExternalControl: spec.ExternalControl, + }) if err != nil { return "", errors.Trace(err) } return mig.Id(), nil } -func (c *ControllerAPI) environStatus(tag string) (params.ModelStatus, error) { - var status params.ModelStatus - modelTag, err := names.ParseModelTag(tag) - if err != nil { - return status, errors.Trace(err) +// ModifyControllerAccess changes the model access granted to users. +func (c *ControllerAPI) ModifyControllerAccess(args params.ModifyControllerAccessRequest) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Changes)), } - st, err := c.state.ForModel(modelTag) - if err != nil { - return status, errors.Trace(err) + if len(args.Changes) == 0 { + return result, nil } - defer st.Close() - machines, err := st.AllMachines() + hasPermission, err := c.authorizer.HasPermission(permission.SuperuserAccess, c.state.ControllerTag()) if err != nil { - return status, errors.Trace(err) + return result, errors.Trace(err) } - var hostedMachines []*state.Machine - for _, m := range machines { - if !m.IsManager() { - hostedMachines = append(hostedMachines, m) + for i, arg := range args.Changes { + if !hasPermission { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + + controllerAccess := permission.Access(arg.Access) + if err := permission.ValidateControllerAccess(controllerAccess); err != nil { + result.Results[i].Error = common.ServerError(err) + continue + } + + targetUserTag, err := names.ParseUserTag(arg.UserTag) + if err != nil { + result.Results[i].Error = common.ServerError(errors.Annotate(err, "could not modify controller access")) + continue } + + result.Results[i].Error = common.ServerError( + ChangeControllerAccess(c.state, c.apiUser, targetUserTag, arg.Action, controllerAccess)) } + return result, nil +} - services, err := st.AllApplications() - if err != nil { - return status, errors.Trace(err) +var runMigrationPrechecks = func(st *state.State, targetInfo coremigration.TargetInfo) error { + // Check model and source controller. + if err := migration.SourcePrecheck(migration.PrecheckShim(st)); err != nil { + return errors.Annotate(err, "source prechecks failed") } - env, err := st.Model() + // Check target controller. + conn, err := api.Open(targetToAPIInfo(targetInfo), migration.ControllerDialOpts()) if err != nil { - return status, errors.Trace(err) + return errors.Annotate(err, "connect to target controller") } + defer conn.Close() + modelInfo, err := makeModelInfo(st) if err != nil { - return status, errors.Trace(err) + return errors.Trace(err) } + err = migrationtarget.NewClient(conn).Prechecks(modelInfo) + return errors.Annotate(err, "target prechecks failed") +} + +func makeModelInfo(st *state.State) (coremigration.ModelInfo, error) { + var empty coremigration.ModelInfo - return params.ModelStatus{ - ModelTag: tag, - OwnerTag: env.Owner().String(), - Life: params.Life(env.Life().String()), - HostedMachineCount: len(hostedMachines), - ApplicationCount: len(services), + model, err := st.Model() + if err != nil { + return empty, errors.Trace(err) + } + conf, err := st.ModelConfig() + if err != nil { + return empty, errors.Trace(err) + } + agentVersion, _ := conf.AgentVersion() + return coremigration.ModelInfo{ + UUID: model.UUID(), + Name: model.Name(), + Owner: model.Owner(), + AgentVersion: agentVersion, }, nil } +func targetToAPIInfo(ti coremigration.TargetInfo) *api.Info { + return &api.Info{ + Addrs: ti.Addrs, + CACert: ti.CACert, + Tag: ti.AuthTag, + Password: ti.Password, + Macaroons: ti.Macaroons, + } +} + +func grantControllerAccess(accessor *state.State, targetUserTag, apiUser names.UserTag, access permission.Access) error { + _, err := accessor.AddControllerUser(state.UserAccessSpec{User: targetUserTag, CreatedBy: apiUser, Access: access}) + if errors.IsAlreadyExists(err) { + controllerTag := accessor.ControllerTag() + controllerUser, err := accessor.UserAccess(targetUserTag, controllerTag) + if errors.IsNotFound(err) { + // Conflicts with prior check, must be inconsistent state. + err = txn.ErrExcessiveContention + } + if err != nil { + return errors.Annotate(err, "could not look up controller access for user") + } + + // Only set access if greater access is being granted. + if controllerUser.Access.EqualOrGreaterControllerAccessThan(access) { + return errors.Errorf("user already has %q access or greater", access) + } + if _, err = accessor.SetUserAccess(controllerUser.UserTag, controllerUser.Object, access); err != nil { + return errors.Annotate(err, "could not set controller access for user") + } + return nil + + } + if err != nil { + return errors.Trace(err) + } + return nil +} + +func revokeControllerAccess(accessor *state.State, targetUserTag, apiUser names.UserTag, access permission.Access) error { + controllerTag := accessor.ControllerTag() + switch access { + case permission.LoginAccess: + // Revoking login access removes all access. + err := accessor.RemoveUserAccess(targetUserTag, controllerTag) + return errors.Annotate(err, "could not revoke controller access") + case permission.AddModelAccess: + // Revoking add-model access sets login. + controllerUser, err := accessor.UserAccess(targetUserTag, controllerTag) + if err != nil { + return errors.Annotate(err, "could not look up controller access for user") + } + _, err = accessor.SetUserAccess(controllerUser.UserTag, controllerUser.Object, permission.LoginAccess) + return errors.Annotate(err, "could not set controller access to read-only") + case permission.SuperuserAccess: + // Revoking superuser sets add-model. + controllerUser, err := accessor.UserAccess(targetUserTag, controllerTag) + if err != nil { + return errors.Annotate(err, "could not look up controller access for user") + } + _, err = accessor.SetUserAccess(controllerUser.UserTag, controllerUser.Object, permission.AddModelAccess) + return errors.Annotate(err, "could not set controller access to add-model") + + default: + return errors.Errorf("don't know how to revoke %q access", access) + } + +} + +// ChangeControllerAccess performs the requested access grant or revoke action for the +// specified user on the controller. +func ChangeControllerAccess(accessor *state.State, apiUser, targetUserTag names.UserTag, action params.ControllerAction, access permission.Access) error { + switch action { + case params.GrantControllerAccess: + err := grantControllerAccess(accessor, targetUserTag, apiUser, access) + if err != nil { + return errors.Annotate(err, "could not grant controller access") + } + return nil + case params.RevokeControllerAccess: + return revokeControllerAccess(accessor, targetUserTag, apiUser, access) + default: + return errors.Errorf("unknown action %q", action) + } +} + func (o orderedBlockInfo) Swap(i, j int) { o[i], o[j] = o[j], o[i] } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/controller/controller_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/controller/controller_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/controller/controller_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/controller/controller_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,13 +4,17 @@ package controller_test import ( + "encoding/json" + "regexp" "time" + "github.com/juju/errors" "github.com/juju/loggo" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" "github.com/juju/juju/apiserver" "github.com/juju/juju/apiserver/common" @@ -18,16 +22,19 @@ "github.com/juju/juju/apiserver/facade/facadetest" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/core/description" - jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" + statetesting "github.com/juju/juju/state/testing" "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" ) type controllerSuite struct { - jujutesting.JujuConnSuite + statetesting.StateSuite controller *controller.ControllerAPI resources *common.Resources @@ -37,12 +44,18 @@ var _ = gc.Suite(&controllerSuite{}) func (s *controllerSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) + // Initial config needs to be set before the StateSuite SetUpTest. + s.InitialConfig = testing.CustomModelConfig(c, testing.Attrs{ + "name": "controller", + }) + + s.StateSuite.SetUpTest(c) s.resources = common.NewResources() s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) s.authorizer = apiservertesting.FakeAuthorizer{ - Tag: s.AdminUserTag(c), + Tag: s.Owner, + AdminTag: s.Owner, } controller, err := controller.NewControllerAPI(s.State, s.resources, s.authorizer) @@ -61,16 +74,6 @@ c.Assert(err, gc.ErrorMatches, "permission denied") } -func (s *controllerSuite) TestNewAPIRefusesNonAdmins(c *gc.C) { - user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) - anAuthoriser := apiservertesting.FakeAuthorizer{ - Tag: user.Tag(), - } - endPoint, err := controller.NewControllerAPI(s.State, s.resources, anAuthoriser) - c.Assert(endPoint, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "permission denied") -} - func (s *controllerSuite) checkEnvironmentMatches(c *gc.C, env params.Model, expected *state.Model) { c.Check(env.Name, gc.Equals, expected.Name()) c.Check(env.UUID, gc.Equals, expected.UUID()) @@ -86,11 +89,12 @@ st := s.Factory.MakeModel(c, &factory.ModelParams{ Name: "user", Owner: remoteUserTag}) defer st.Close() - st.AddModelUser(state.UserAccessSpec{ - User: admin.UserTag(), - CreatedBy: remoteUserTag, - DisplayName: "Foo Bar", - Access: description.ReadAccess}) + st.AddModelUser(st.ModelUUID(), + state.UserAccessSpec{ + User: admin.UserTag(), + CreatedBy: remoteUserTag, + DisplayName: "Foo Bar", + Access: permission.ReadAccess}) s.Factory.MakeModel(c, &factory.ModelParams{ Name: "no-access", Owner: remoteUserTag}).Close() @@ -109,6 +113,76 @@ c.Assert(obtained, jc.DeepEquals, expected) } +func (s *controllerSuite) TestHostedModelConfigs_OnlyHostedModelsReturned(c *gc.C) { + owner := s.Factory.MakeUser(c, nil) + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "first", Owner: owner.UserTag()}).Close() + remoteUserTag := names.NewUserTag("user@remote") + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "second", Owner: remoteUserTag}).Close() + + results, err := s.controller.HostedModelConfigs() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(results.Models), gc.Equals, 2) + + one := results.Models[0] + two := results.Models[1] + + c.Assert(one.Name, gc.Equals, "first") + c.Assert(one.OwnerTag, gc.Equals, owner.UserTag().String()) + c.Assert(two.Name, gc.Equals, "second") + c.Assert(two.OwnerTag, gc.Equals, remoteUserTag.String()) +} + +func (s *controllerSuite) makeCloudSpec(c *gc.C, pSpec *params.CloudSpec) environs.CloudSpec { + c.Assert(pSpec, gc.NotNil) + var credential *cloud.Credential + if pSpec.Credential != nil { + credentialValue := cloud.NewCredential( + cloud.AuthType(pSpec.Credential.AuthType), + pSpec.Credential.Attributes, + ) + credential = &credentialValue + } + spec := environs.CloudSpec{ + Type: pSpec.Type, + Name: pSpec.Name, + Region: pSpec.Region, + Endpoint: pSpec.Endpoint, + IdentityEndpoint: pSpec.IdentityEndpoint, + StorageEndpoint: pSpec.StorageEndpoint, + Credential: credential, + } + c.Assert(spec.Validate(), jc.ErrorIsNil) + return spec +} + +func (s *controllerSuite) TestHostedModelConfigs_CanOpenEnviron(c *gc.C) { + owner := s.Factory.MakeUser(c, nil) + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "first", Owner: owner.UserTag()}).Close() + remoteUserTag := names.NewUserTag("user@remote") + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "second", Owner: remoteUserTag}).Close() + + results, err := s.controller.HostedModelConfigs() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(results.Models), gc.Equals, 2) + + for _, model := range results.Models { + c.Assert(model.Error, gc.IsNil) + + cfg, err := config.New(config.NoDefaults, model.Config) + c.Assert(err, jc.ErrorIsNil) + spec := s.makeCloudSpec(c, model.CloudSpec) + _, err = environs.New(environs.OpenParams{ + Cloud: spec, + Config: cfg, + }) + c.Assert(err, jc.ErrorIsNil) + } +} + func (s *controllerSuite) TestListBlockedModels(c *gc.C) { st := s.Factory.MakeModel(c, &factory.ModelParams{ Name: "test"}) @@ -126,7 +200,7 @@ params.ModelBlockInfo{ Name: "controller", UUID: s.State.ModelUUID(), - OwnerTag: s.AdminUserTag(c).String(), + OwnerTag: s.Owner.String(), Blocks: []string{ "BlockDestroy", "BlockChange", @@ -135,7 +209,7 @@ params.ModelBlockInfo{ Name: "test", UUID: st.ModelUUID(), - OwnerTag: s.AdminUserTag(c).String(), + OwnerTag: s.Owner.String(), Blocks: []string{ "BlockDestroy", "BlockChange", @@ -162,7 +236,10 @@ Name: "test"}) defer st.Close() - authorizer := &apiservertesting.FakeAuthorizer{Tag: s.AdminUserTag(c)} + authorizer := &apiservertesting.FakeAuthorizer{ + Tag: s.Owner, + AdminTag: s.Owner, + } controller, err := controller.NewControllerAPI(st, common.NewResources(), authorizer) c.Assert(err, jc.ErrorIsNil) cfg, err := controller.ModelConfig() @@ -185,7 +262,7 @@ Name: "test"}) defer st.Close() - authorizer := &apiservertesting.FakeAuthorizer{Tag: s.AdminUserTag(c)} + authorizer := &apiservertesting.FakeAuthorizer{Tag: s.Owner} controller, err := controller.NewControllerAPI(st, common.NewResources(), authorizer) c.Assert(err, jc.ErrorIsNil) cfg, err := controller.ControllerConfig() @@ -256,54 +333,7 @@ } } -func (s *controllerSuite) TestModelStatus(c *gc.C) { - otherEnvOwner := s.Factory.MakeModelUser(c, nil) - otherSt := s.Factory.MakeModel(c, &factory.ModelParams{ - Name: "dummytoo", - Owner: otherEnvOwner.UserTag, - ConfigAttrs: testing.Attrs{ - "controller": false, - }, - }) - defer otherSt.Close() - - s.Factory.MakeMachine(c, &factory.MachineParams{Jobs: []state.MachineJob{state.JobManageModel}}) - s.Factory.MakeMachine(c, &factory.MachineParams{Jobs: []state.MachineJob{state.JobHostUnits}}) - s.Factory.MakeApplication(c, &factory.ApplicationParams{ - Charm: s.Factory.MakeCharm(c, nil), - }) - - otherFactory := factory.NewFactory(otherSt) - otherFactory.MakeMachine(c, nil) - otherFactory.MakeMachine(c, nil) - otherFactory.MakeApplication(c, &factory.ApplicationParams{ - Charm: otherFactory.MakeCharm(c, nil), - }) - - controllerEnvTag := s.State.ModelTag().String() - hostedEnvTag := otherSt.ModelTag().String() - - req := params.Entities{ - Entities: []params.Entity{{Tag: controllerEnvTag}, {Tag: hostedEnvTag}}, - } - results, err := s.controller.ModelStatus(req) - c.Assert(err, jc.ErrorIsNil) - c.Assert(results.Results, gc.DeepEquals, []params.ModelStatus{{ - ModelTag: controllerEnvTag, - HostedMachineCount: 1, - ApplicationCount: 1, - OwnerTag: "user-admin@local", - Life: params.Alive, - }, { - ModelTag: hostedEnvTag, - HostedMachineCount: 2, - ApplicationCount: 1, - OwnerTag: otherEnvOwner.UserTag.String(), - Life: params.Alive, - }}) -} - -func (s *controllerSuite) TestInitiateModelMigration(c *gc.C) { +func (s *controllerSuite) TestInitiateMigration(c *gc.C) { // Create two hosted models to migrate. st1 := s.Factory.MakeModel(c, nil) defer st1.Close() @@ -311,13 +341,20 @@ st2 := s.Factory.MakeModel(c, nil) defer st2.Close() - // Kick off the migration. - args := params.InitiateModelMigrationArgs{ - Specs: []params.ModelMigrationSpec{ + mac, err := macaroon.New([]byte("secret"), "id", "location") + c.Assert(err, jc.ErrorIsNil) + macsJSON, err := json.Marshal([]macaroon.Slice{{mac}}) + c.Assert(err, jc.ErrorIsNil) + + controller.SetPrecheckResult(s, nil) + + // Kick off migrations + args := params.InitiateMigrationArgs{ + Specs: []params.MigrationSpec{ { ModelTag: st1.ModelTag().String(), - TargetInfo: params.ModelMigrationTargetInfo{ - ControllerTag: randomModelTag(), + TargetInfo: params.MigrationTargetInfo{ + ControllerTag: randomControllerTag(), Addrs: []string{"1.1.1.1:1111", "2.2.2.2:2222"}, CACert: "cert1", AuthTag: names.NewUserTag("admin1").String(), @@ -325,36 +362,41 @@ }, }, { ModelTag: st2.ModelTag().String(), - TargetInfo: params.ModelMigrationTargetInfo{ - ControllerTag: randomModelTag(), + TargetInfo: params.MigrationTargetInfo{ + ControllerTag: randomControllerTag(), Addrs: []string{"3.3.3.3:3333"}, CACert: "cert2", AuthTag: names.NewUserTag("admin2").String(), + Macaroons: string(macsJSON), Password: "secret2", }, + ExternalControl: true, }, }, } - out, err := s.controller.InitiateModelMigration(args) + out, err := s.controller.InitiateMigration(args) c.Assert(err, jc.ErrorIsNil) c.Assert(out.Results, gc.HasLen, 2) states := []*state.State{st1, st2} for i, spec := range args.Specs { + c.Log(i) st := states[i] result := out.Results[i] - c.Check(result.Error, gc.IsNil) + c.Assert(result.Error, gc.IsNil) c.Check(result.ModelTag, gc.Equals, spec.ModelTag) expectedId := st.ModelUUID() + ":0" c.Check(result.MigrationId, gc.Equals, expectedId) // Ensure the migration made it into the DB correctly. - mig, err := st.LatestModelMigration() + mig, err := st.LatestMigration() c.Assert(err, jc.ErrorIsNil) c.Check(mig.Id(), gc.Equals, expectedId) c.Check(mig.ModelUUID(), gc.Equals, st.ModelUUID()) - c.Check(mig.InitiatedBy(), gc.Equals, s.AdminUserTag(c).Id()) + c.Check(mig.InitiatedBy(), gc.Equals, s.Owner.Id()) + c.Check(mig.ExternalControl(), gc.Equals, args.Specs[i].ExternalControl) + targetInfo, err := mig.TargetInfo() c.Assert(err, jc.ErrorIsNil) c.Check(targetInfo.ControllerTag.String(), gc.Equals, spec.TargetInfo.ControllerTag) @@ -362,22 +404,28 @@ c.Check(targetInfo.CACert, gc.Equals, spec.TargetInfo.CACert) c.Check(targetInfo.AuthTag.String(), gc.Equals, spec.TargetInfo.AuthTag) c.Check(targetInfo.Password, gc.Equals, spec.TargetInfo.Password) + + if spec.TargetInfo.Macaroons != "" { + macJSONdb, err := json.Marshal(targetInfo.Macaroons) + c.Assert(err, jc.ErrorIsNil) + c.Check(string(macJSONdb), gc.Equals, spec.TargetInfo.Macaroons) + } } } -func (s *controllerSuite) TestInitiateModelMigrationValidationError(c *gc.C) { +func (s *controllerSuite) TestInitiateMigrationSpecError(c *gc.C) { // Create a hosted model to migrate. st := s.Factory.MakeModel(c, nil) defer st.Close() // Kick off the migration with missing details. - args := params.InitiateModelMigrationArgs{ - Specs: []params.ModelMigrationSpec{{ + args := params.InitiateMigrationArgs{ + Specs: []params.MigrationSpec{{ ModelTag: st.ModelTag().String(), // TargetInfo missing }}, } - out, err := s.controller.InitiateModelMigration(args) + out, err := s.controller.InitiateMigration(args) c.Assert(err, jc.ErrorIsNil) c.Assert(out.Results, gc.HasLen, 1) result := out.Results[0] @@ -386,16 +434,17 @@ c.Check(result.Error, gc.ErrorMatches, "controller tag: .+ is not a valid tag") } -func (s *controllerSuite) TestInitiateModelMigrationPartialFailure(c *gc.C) { +func (s *controllerSuite) TestInitiateMigrationPartialFailure(c *gc.C) { st := s.Factory.MakeModel(c, nil) defer st.Close() + controller.SetPrecheckResult(s, nil) - args := params.InitiateModelMigrationArgs{ - Specs: []params.ModelMigrationSpec{ + args := params.InitiateMigrationArgs{ + Specs: []params.MigrationSpec{ { ModelTag: st.ModelTag().String(), - TargetInfo: params.ModelMigrationTargetInfo{ - ControllerTag: randomModelTag(), + TargetInfo: params.MigrationTargetInfo{ + ControllerTag: randomControllerTag(), Addrs: []string{"1.1.1.1:1111", "2.2.2.2:2222"}, CACert: "cert", AuthTag: names.NewUserTag("admin").String(), @@ -406,7 +455,7 @@ }, }, } - out, err := s.controller.InitiateModelMigration(args) + out, err := s.controller.InitiateMigration(args) c.Assert(err, jc.ErrorIsNil) c.Assert(out.Results, gc.HasLen, 2) @@ -417,7 +466,351 @@ c.Check(out.Results[1].Error, gc.ErrorMatches, "unable to read model: .+") } +func (s *controllerSuite) TestInitiateMigrationInvalidMacaroons(c *gc.C) { + st := s.Factory.MakeModel(c, nil) + defer st.Close() + + args := params.InitiateMigrationArgs{ + Specs: []params.MigrationSpec{ + { + ModelTag: st.ModelTag().String(), + TargetInfo: params.MigrationTargetInfo{ + ControllerTag: randomControllerTag(), + Addrs: []string{"1.1.1.1:1111", "2.2.2.2:2222"}, + CACert: "cert", + AuthTag: names.NewUserTag("admin").String(), + Macaroons: "BLAH", + }, + }, + }, + } + out, err := s.controller.InitiateMigration(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(out.Results, gc.HasLen, 1) + result := out.Results[0] + c.Check(result.ModelTag, gc.Equals, args.Specs[0].ModelTag) + c.Check(result.Error, gc.ErrorMatches, "invalid macaroons: .+") +} + +func (s *controllerSuite) TestInitiateMigrationPrecheckFail(c *gc.C) { + st := s.Factory.MakeModel(c, nil) + defer st.Close() + + controller.SetPrecheckResult(s, errors.New("boom")) + + args := params.InitiateMigrationArgs{ + Specs: []params.MigrationSpec{{ + ModelTag: st.ModelTag().String(), + TargetInfo: params.MigrationTargetInfo{ + ControllerTag: randomControllerTag(), + Addrs: []string{"1.1.1.1:1111"}, + CACert: "cert1", + AuthTag: names.NewUserTag("admin1").String(), + Password: "secret1", + }, + }}, + } + out, err := s.controller.InitiateMigration(args) + c.Assert(out.Results, gc.HasLen, 1) + c.Check(out.Results[0].Error, gc.ErrorMatches, "boom") + + active, err := st.IsMigrationActive() + c.Assert(err, jc.ErrorIsNil) + c.Check(active, jc.IsFalse) +} + +func (s *controllerSuite) TestInitiateMigrationSkipPrechecks(c *gc.C) { + st := s.Factory.MakeModel(c, nil) + defer st.Close() + controller.SetPrecheckResult(s, errors.New("should not happen")) + + args := params.InitiateMigrationArgs{ + Specs: []params.MigrationSpec{ + { + ModelTag: st.ModelTag().String(), + TargetInfo: params.MigrationTargetInfo{ + ControllerTag: randomControllerTag(), + Addrs: []string{"1.1.1.1:1111", "2.2.2.2:2222"}, + CACert: "cert", + AuthTag: names.NewUserTag("admin").String(), + Password: "secret", + }, + ExternalControl: true, + SkipInitialPrechecks: true, + }, + }, + } + out, err := s.controller.InitiateMigration(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(out.Results, gc.HasLen, 1) + c.Check(out.Results[0].ModelTag, gc.Equals, st.ModelTag().String()) + c.Check(out.Results[0].Error, gc.IsNil) +} + +func randomControllerTag() string { + uuid := utils.MustNewUUID().String() + return names.NewControllerTag(uuid).String() +} + func randomModelTag() string { uuid := utils.MustNewUUID().String() return names.NewModelTag(uuid).String() } + +func (s *controllerSuite) modifyControllerAccess(c *gc.C, user names.UserTag, action params.ControllerAction, access string) error { + args := params.ModifyControllerAccessRequest{ + Changes: []params.ModifyControllerAccess{{ + UserTag: user.String(), + Action: action, + Access: access, + }}} + result, err := s.controller.ModifyControllerAccess(args) + c.Assert(err, jc.ErrorIsNil) + return result.OneError() +} + +func (s *controllerSuite) controllerGrant(c *gc.C, user names.UserTag, access string) error { + return s.modifyControllerAccess(c, user, params.GrantControllerAccess, access) +} + +func (s *controllerSuite) controllerRevoke(c *gc.C, user names.UserTag, access string) error { + return s.modifyControllerAccess(c, user, params.RevokeControllerAccess, access) +} + +func (s *controllerSuite) TestGrantMissingUserFails(c *gc.C) { + user := names.NewLocalUserTag("foobar") + err := s.controllerGrant(c, user, string(permission.AddModelAccess)) + expectedErr := `could not grant controller access: user "foobar" does not exist locally: user "foobar" not found` + c.Assert(err, gc.ErrorMatches, expectedErr) +} + +func (s *controllerSuite) TestRevokeSuperuserLeavesAddModelAccess(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + + err := s.controllerGrant(c, user.UserTag(), string(permission.SuperuserAccess)) + c.Assert(err, gc.IsNil) + ctag := names.NewControllerTag(s.State.ControllerUUID()) + controllerUser, err := s.State.UserAccess(user.UserTag(), ctag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(controllerUser.Access, gc.Equals, permission.SuperuserAccess) + + err = s.controllerRevoke(c, user.UserTag(), string(permission.SuperuserAccess)) + c.Assert(err, gc.IsNil) + + controllerUser, err = s.State.UserAccess(user.UserTag(), controllerUser.Object) + c.Assert(err, jc.ErrorIsNil) + c.Assert(controllerUser.Access, gc.Equals, permission.AddModelAccess) +} + +func (s *controllerSuite) TestRevokeAddModelLeavesLoginAccess(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + + err := s.controllerGrant(c, user.UserTag(), string(permission.AddModelAccess)) + c.Assert(err, gc.IsNil) + ctag := names.NewControllerTag(s.State.ControllerUUID()) + controllerUser, err := s.State.UserAccess(user.UserTag(), ctag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(controllerUser.Access, gc.Equals, permission.AddModelAccess) + + err = s.controllerRevoke(c, user.UserTag(), string(permission.AddModelAccess)) + c.Assert(err, gc.IsNil) + + controllerUser, err = s.State.UserAccess(user.UserTag(), controllerUser.Object) + c.Assert(err, jc.ErrorIsNil) + c.Assert(controllerUser.Access, gc.Equals, permission.LoginAccess) +} + +func (s *controllerSuite) TestRevokeLoginRemovesControllerUser(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + err := s.controllerRevoke(c, user.UserTag(), string(permission.LoginAccess)) + c.Assert(err, gc.IsNil) + + ctag := names.NewControllerTag(s.State.ControllerUUID()) + _, err = s.State.UserAccess(user.UserTag(), ctag) + + c.Assert(errors.IsNotFound(err), jc.IsTrue) +} + +func (s *controllerSuite) TestRevokeControllerMissingUser(c *gc.C) { + user := names.NewLocalUserTag("foobar") + err := s.controllerRevoke(c, user, string(permission.AddModelAccess)) + expectedErr := `could not look up controller access for user: user "foobar" not found` + c.Assert(err, gc.ErrorMatches, expectedErr) +} + +func (s *controllerSuite) TestGrantOnlyGreaterAccess(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + + err := s.controllerGrant(c, user.UserTag(), string(permission.AddModelAccess)) + c.Assert(err, gc.IsNil) + ctag := names.NewControllerTag(s.State.ControllerUUID()) + controllerUser, err := s.State.UserAccess(user.UserTag(), ctag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(controllerUser.Access, gc.Equals, permission.AddModelAccess) + + err = s.controllerGrant(c, user.UserTag(), string(permission.AddModelAccess)) + expectedErr := `could not grant controller access: user already has "add-model" access or greater` + c.Assert(err, gc.ErrorMatches, expectedErr) +} + +func (s *controllerSuite) TestGrantControllerAddRemoteUser(c *gc.C) { + userTag := names.NewUserTag("foobar@ubuntuone") + + err := s.controllerGrant(c, userTag, string(permission.AddModelAccess)) + c.Assert(err, jc.ErrorIsNil) + + ctag := names.NewControllerTag(s.State.ControllerUUID()) + controllerUser, err := s.State.UserAccess(userTag, ctag) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(controllerUser.Access, gc.Equals, permission.AddModelAccess) +} + +func (s *controllerSuite) TestGrantControllerInvalidUserTag(c *gc.C) { + for _, testParam := range []struct { + tag string + validTag bool + }{{ + tag: "unit-foo/0", + validTag: true, + }, { + tag: "application-foo", + validTag: true, + }, { + tag: "relation-wordpress:db mysql:db", + validTag: true, + }, { + tag: "machine-0", + validTag: true, + }, { + tag: "user@local", + validTag: false, + }, { + tag: "user-Mua^h^h^h^arh", + validTag: true, + }, { + tag: "user@", + validTag: false, + }, { + tag: "user@ubuntuone", + validTag: false, + }, { + tag: "user@ubuntuone", + validTag: false, + }, { + tag: "@ubuntuone", + validTag: false, + }, { + tag: "in^valid.", + validTag: false, + }, { + tag: "", + validTag: false, + }, + } { + var expectedErr string + errPart := `could not modify controller access: "` + regexp.QuoteMeta(testParam.tag) + `" is not a valid ` + + if testParam.validTag { + // The string is a valid tag, but not a user tag. + expectedErr = errPart + `user tag` + } else { + // The string is not a valid tag of any kind. + expectedErr = errPart + `tag` + } + + args := params.ModifyControllerAccessRequest{ + Changes: []params.ModifyControllerAccess{{ + UserTag: testParam.tag, + Action: params.GrantControllerAccess, + Access: string(permission.SuperuserAccess), + }}} + + result, err := s.controller.ModifyControllerAccess(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.OneError(), gc.ErrorMatches, expectedErr) + } +} + +func (s *controllerSuite) TestModifyControllerAccessEmptyArgs(c *gc.C) { + args := params.ModifyControllerAccessRequest{Changes: []params.ModifyControllerAccess{{}}} + + result, err := s.controller.ModifyControllerAccess(args) + c.Assert(err, jc.ErrorIsNil) + expectedErr := `"" controller access not valid` + c.Assert(result.OneError(), gc.ErrorMatches, expectedErr) +} + +func (s *controllerSuite) TestModifyControllerAccessInvalidAction(c *gc.C) { + var dance params.ControllerAction = "dance" + args := params.ModifyControllerAccessRequest{ + Changes: []params.ModifyControllerAccess{{ + UserTag: "user-user@local", + Action: dance, + Access: string(permission.LoginAccess), + }}} + + result, err := s.controller.ModifyControllerAccess(args) + c.Assert(err, jc.ErrorIsNil) + expectedErr := `unknown action "dance"` + c.Assert(result.OneError(), gc.ErrorMatches, expectedErr) +} + +func (s *controllerSuite) TestGetControllerAccess(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + user2 := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + + err := s.controllerGrant(c, user.UserTag(), string(permission.SuperuserAccess)) + c.Assert(err, gc.IsNil) + err = s.controllerGrant(c, user2.UserTag(), string(permission.AddModelAccess)) + c.Assert(err, gc.IsNil) + req := params.Entities{ + Entities: []params.Entity{{Tag: user.Tag().String()}, {Tag: user2.Tag().String()}}, + } + results, err := s.controller.GetControllerAccess(req) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.DeepEquals, []params.UserAccessResult{{ + Result: ¶ms.UserAccess{ + Access: "superuser", + UserTag: user.Tag().String(), + }}, { + Result: ¶ms.UserAccess{ + Access: "add-model", + UserTag: user2.Tag().String(), + }}}) +} + +func (s *controllerSuite) TestGetControllerAccessPermissions(c *gc.C) { + // Set up the user making the call. + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + anAuthoriser := apiservertesting.FakeAuthorizer{ + Tag: user.Tag(), + } + endpoint, err := controller.NewControllerAPI(s.State, s.resources, anAuthoriser) + c.Assert(err, jc.ErrorIsNil) + args := params.ModifyControllerAccessRequest{ + Changes: []params.ModifyControllerAccess{{ + UserTag: user.Tag().String(), + Action: params.GrantControllerAccess, + Access: "superuser", + }}} + result, err := s.controller.ModifyControllerAccess(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.OneError(), jc.ErrorIsNil) + + // We ask for permissions for a different user as well as ourselves. + differentUser := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + req := params.Entities{ + Entities: []params.Entity{{Tag: user.Tag().String()}, {Tag: differentUser.Tag().String()}}, + } + results, err := endpoint.GetControllerAccess(req) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 2) + c.Assert(*results.Results[0].Result, jc.DeepEquals, params.UserAccess{ + Access: "superuser", + UserTag: user.Tag().String(), + }) + c.Assert(*results.Results[1].Error, gc.DeepEquals, params.Error{ + Message: "permission denied", Code: "unauthorized access", + }) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/controller/destroy.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/controller/destroy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/controller/destroy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/controller/destroy.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/permission" ) // DestroyController will attempt to destroy the controller. If the args @@ -20,6 +21,14 @@ // that it should wait for hosted models to be completely cleaned up // before proceeding. func (s *ControllerAPI) DestroyController(args params.DestroyControllerArgs) error { + hasPermission, err := s.authorizer.HasPermission(permission.SuperuserAccess, s.state.ControllerTag()) + if err != nil { + return errors.Trace(err) + } + if !hasPermission { + return errors.Trace(common.ErrPerm) + } + st := common.NewModelManagerBackend(s.state) controllerModel, err := st.ControllerModel() if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/controller/export_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/controller/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/controller/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/controller/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,19 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller + +import ( + "github.com/juju/juju/core/migration" + "github.com/juju/juju/state" +) + +type patcher interface { + PatchValue(destination, source interface{}) +} + +func SetPrecheckResult(p patcher, err error) { + p.PatchValue(&runMigrationPrechecks, func(*state.State, migration.TargetInfo) error { + return err + }) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/debuglog_db.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/debuglog_db.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/debuglog_db.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/debuglog_db.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,12 +4,11 @@ package apiserver import ( - "fmt" "net/http" - "time" "github.com/juju/errors" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" ) @@ -43,9 +42,7 @@ return errors.Annotate(tailer.Err(), "tailer stopped") } - line := formatLogRecord(rec) - _, err := socket.Write([]byte(line)) - if err != nil { + if err := socket.sendLogRecord(formatLogRecord(rec)); err != nil { return errors.Annotate(err, "sending failed") } @@ -73,19 +70,15 @@ return params } -func formatLogRecord(r *state.LogRecord) string { - return fmt.Sprintf("%s: %s %s %s %s %s\n", - r.Entity, - formatTime(r.Time), - r.Level.String(), - r.Module, - r.Location, - r.Message, - ) -} - -func formatTime(t time.Time) string { - return t.In(time.UTC).Format("2006-01-02 15:04:05") +func formatLogRecord(r *state.LogRecord) *params.LogMessage { + return ¶ms.LogMessage{ + Entity: r.Entity.String(), + Timestamp: r.Time, + Severity: r.Level.String(), + Module: r.Module, + Location: r.Location, + Message: r.Message, + } } var newLogTailer = _newLogTailer // For replacing in tests diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/debuglog_db_internal_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/debuglog_db_internal_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/debuglog_db_internal_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/debuglog_db_internal_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,6 +12,7 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" ) @@ -244,7 +245,17 @@ s.writes <- fmt.Sprintf("err: %v", err) } -func (s *fakeDebugLogSocket) Write(buf []byte) (int, error) { - s.writes <- string(buf) - return len(buf), nil +func (s *fakeDebugLogSocket) sendLogRecord(r *params.LogMessage) error { + s.writes <- fmt.Sprintf("%s: %s %s %s %s %s\n", + r.Entity, + s.formatTime(r.Timestamp), + r.Severity, + r.Module, + r.Location, + r.Message) + return nil +} + +func (c *fakeDebugLogSocket) formatTime(t time.Time) string { + return t.In(time.UTC).Format("2006-01-02 15:04:05") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/debuglog.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/debuglog.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/debuglog.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/debuglog.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,6 @@ package apiserver import ( - "io" "net" "net/http" "net/url" @@ -71,9 +70,8 @@ server := websocket.Server{ Handler: func(conn *websocket.Conn) { socket := &debugLogSocketImpl{conn} - defer socket.Close() + defer conn.Close() - logger.Infof("debug log handler starting") // Validate before authenticate because the authentication is // dependent on the state connection that is determined during the // validation. @@ -111,20 +109,21 @@ // debugLogSocket describes the functionality required for the // debuglog handlers to send logs to the client. type debugLogSocket interface { - io.Writer - // sendOk sends a nil error response, indicating there were no errors. sendOk() // sendError sends a JSON-encoded error response. sendError(err error) + + // sendLogRecord sends record JSON encoded. + sendLogRecord(record *params.LogMessage) error } // debugLogSocketImpl implements the debugLogSocket interface. It // wraps a websocket.Conn and provides a few debug-log specific helper // methods. type debugLogSocketImpl struct { - *websocket.Conn + conn *websocket.Conn } // sendOk implements debugLogSocket. @@ -134,11 +133,15 @@ // sendError implements debugLogSocket. func (s *debugLogSocketImpl) sendError(err error) { - sendJSON(s.Conn, ¶ms.ErrorResult{ + sendJSON(s.conn, ¶ms.ErrorResult{ Error: common.ServerError(err), }) } +func (s *debugLogSocketImpl) sendLogRecord(record *params.LogMessage) error { + return sendJSON(s.conn, record) +} + // debugLogParams contains the parsed debuglog API request parameters. type debugLogParams struct { maxLines uint diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/debuglog_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/debuglog_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/debuglog_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/debuglog_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,7 +19,7 @@ // debugLogBaseSuite has tests that should be run for both the file // and DB based variants of debuglog, as well as some test helpers. type debugLogBaseSuite struct { - authHttpSuite + authHTTPSuite } func (s *debugLogBaseSuite) TestBadParams(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/defaulticon.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/defaulticon.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/defaulticon.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/defaulticon.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,280 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +// defaultIcon holds the default charm icon SVG content. +// Keep this in sync with the default icon returned by the charm store. +const defaultIcon = ` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + +` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/deployer/deployer.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/deployer/deployer.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/deployer/deployer.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/deployer/deployer.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,6 +26,7 @@ *common.StateAddresser *common.APIAddresser *common.UnitsWatcher + *common.StatusSetter st *state.State resources facade.Resources @@ -70,6 +71,7 @@ StateAddresser: common.NewStateAddresser(st), APIAddresser: common.NewAPIAddresser(st, resources), UnitsWatcher: common.NewUnitsWatcher(st, resources, getCanWatch), + StatusSetter: common.NewStatusSetter(st, getAuthFunc), st: st, resources: resources, authorizer: authorizer, @@ -97,6 +99,11 @@ return result, err } +// SetStatus sets the status of the specified entities. +func (d *DeployerAPI) SetStatus(args params.SetStatus) (params.ErrorResults, error) { + return d.StatusSetter.SetStatus(args) +} + // getAllUnits returns a list of all principal and subordinate units // assigned to the given machine. func getAllUnits(st *state.State, tag names.Tag) ([]string, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/deployer/deployer_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/deployer/deployer_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/deployer/deployer_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/deployer/deployer_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,6 +19,7 @@ "github.com/juju/juju/network" "github.com/juju/juju/state" statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/status" coretesting "github.com/juju/juju/testing" ) @@ -354,3 +355,30 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.DeepEquals, expected) } + +func (s *deployerSuite) TestSetStatus(c *gc.C) { + args := params.SetStatus{ + Entities: []params.EntityStatusArgs{ + {Tag: "unit-mysql-0", Status: "blocked", Info: "waiting", Data: map[string]interface{}{"foo": "bar"}}, + {Tag: "unit-mysql-1", Status: "blocked", Info: "waiting", Data: map[string]interface{}{"foo": "bar"}}, + {Tag: "unit-fake-42", Status: "blocked", Info: "waiting", Data: map[string]interface{}{"foo": "bar"}}, + }, + } + results, err := s.deployer.SetStatus(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {nil}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + }, + }) + sInfo, err := s.principal0.Status() + c.Assert(err, jc.ErrorIsNil) + sInfo.Since = nil + c.Assert(sInfo, jc.DeepEquals, status.StatusInfo{ + Status: status.Blocked, + Message: "waiting", + Data: map[string]interface{}{"foo": "bar"}, + }) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/export_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,27 +15,25 @@ "github.com/juju/juju/apiserver/authentication" "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/observer" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" "github.com/juju/juju/rpc" "github.com/juju/juju/state" ) var ( - NewPingTimeout = newPingTimeout - MaxClientPingInterval = &maxClientPingInterval - MongoPingInterval = &mongoPingInterval - NewBackups = &newBackups - AllowedMethodsDuringUpgrades = allowedMethodsDuringUpgrades - BZMimeType = bzMimeType - JSMimeType = jsMimeType - SpritePath = spritePath - HasPermission = hasPermission + NewPingTimeout = newPingTimeout + MaxClientPingInterval = maxClientPingInterval + MongoPingInterval = mongoPingInterval + NewBackups = &newBackups + BZMimeType = bzMimeType + JSMimeType = jsMimeType + SpritePath = spritePath + DefaultIcon = defaultIcon ) func ServerMacaroon(srv *Server) (*macaroon.Macaroon, error) { - auth, err := srv.authCtxt.macaroonAuth() + auth, err := srv.authCtxt.externalMacaroonAuth() if err != nil { return nil, err } @@ -43,7 +41,7 @@ } func ServerBakeryService(srv *Server) (authentication.BakeryService, error) { - auth, err := srv.authCtxt.macaroonAuth() + auth, err := srv.authCtxt.externalMacaroonAuth() if err != nil { return nil, err } @@ -53,10 +51,10 @@ // ServerAuthenticatorForTag calls the authenticatorForTag method // of the server's authContext. func ServerAuthenticatorForTag(srv *Server, tag names.Tag) (authentication.EntityAuthenticator, error) { - return srv.authCtxt.authenticatorForTag(tag) + return srv.authCtxt.authenticator("testing.invalid:1234").authenticatorForTag(tag) } -func ApiHandlerWithEntity(entity state.Entity) *apiHandler { +func APIHandlerWithEntity(entity state.Entity) *apiHandler { return &apiHandler{entity: entity} } @@ -84,16 +82,16 @@ return &errRoot{err} } -// TestingApiRoot gives you an ApiRoot as a rpc.Methodfinder that is +// TestingAPIRoot gives you an APIRoot as a rpc.Methodfinder that is // *barely* connected to anything. Just enough to let you probe some // of the interfaces, but not enough to actually do any RPC calls. -func TestingApiRoot(st *state.State) rpc.MethodFinder { - return newApiRoot(st, common.NewResources(), nil) +func TestingAPIRoot(st *state.State) rpc.Root { + return newAPIRoot(st, common.NewResources(), nil) } -// TestingApiHandler gives you an ApiHandler that isn't connected to +// TestingAPIHandler gives you an APIHandler that isn't connected to // anything real. It's enough to let test some basic functionality though. -func TestingApiHandler(c *gc.C, srvSt, st *state.State) (*apiHandler, *common.Resources) { +func TestingAPIHandler(c *gc.C, srvSt, st *state.State) (*apiHandler, *common.Resources) { authCtxt, err := newAuthContext(srvSt) c.Assert(err, jc.ErrorIsNil) srv := &Server{ @@ -101,102 +99,65 @@ state: srvSt, tag: names.NewMachineTag("0"), } - h, err := newApiHandler(srv, st, nil, st.ModelUUID()) + h, err := newAPIHandler(srv, st, nil, st.ModelUUID(), "testing.invalid:1234") c.Assert(err, jc.ErrorIsNil) return h, h.getResources() } -// TestingApiHandlerWithEntity gives you the sane kind of ApiHandler as -// TestingApiHandler but sets the passed entity as the apiHandler +// TestingAPIHandlerWithEntity gives you the sane kind of APIHandler as +// TestingAPIHandler but sets the passed entity as the apiHandler // entity. -func TestingApiHandlerWithEntity(c *gc.C, srvSt, st *state.State, entity state.Entity) (*apiHandler, *common.Resources) { - h, hr := TestingApiHandler(c, srvSt, st) +func TestingAPIHandlerWithEntity(c *gc.C, srvSt, st *state.State, entity state.Entity) (*apiHandler, *common.Resources) { + h, hr := TestingAPIHandler(c, srvSt, st) h.entity = entity return h, hr } -// TestingUpgradingRoot returns a limited srvRoot -// in an upgrade scenario. -func TestingUpgradingRoot(st *state.State) rpc.MethodFinder { - r := TestingApiRoot(st) - return newUpgradingRoot(r) +// TestingUpgradingRoot returns a resricted srvRoot in an upgrade +// scenario. +func TestingUpgradingRoot(st *state.State) rpc.Root { + r := TestingAPIRoot(st) + return restrictRoot(r, upgradeMethodsOnly) } -// TestingRestrictedApiHandler returns a restricted srvRoot as if accessed -// from the root of the API path. -func TestingRestrictedApiHandler(st *state.State) rpc.MethodFinder { - r := TestingApiRoot(st) - return newRestrictedRoot(r) +// TestingControllerOnlyRoot returns a restricted srvRoot as if +// logged in to the root of the API path. +func TestingControllerOnlyRoot() rpc.Root { + r := TestingAPIRoot(nil) + return restrictRoot(r, controllerFacadesOnly) } -type preFacadeAdminApi struct{} - -func newPreFacadeAdminApi(srv *Server, root *apiHandler, observer observer.Observer) interface{} { - return &preFacadeAdminApi{} -} - -func (r *preFacadeAdminApi) Admin(id string) (*preFacadeAdminApi, error) { - return r, nil -} - -var PreFacadeModelTag = names.NewModelTag("383c49f3-526d-4f9e-b50a-1e6fa4e9b3d9") - -func (r *preFacadeAdminApi) Login(c params.Creds) (params.LoginResult, error) { - return params.LoginResult{ - ModelTag: PreFacadeModelTag.String(), - }, nil -} - -type failAdminApi struct{} - -func newFailAdminApi(srv *Server, root *apiHandler, observer observer.Observer) interface{} { - return &failAdminApi{} -} - -func (r *failAdminApi) Admin(id string) (*failAdminApi, error) { - return r, nil -} - -func (r *failAdminApi) Login(c params.Creds) (params.LoginResult, error) { - return params.LoginResult{}, fmt.Errorf("fail") +// TestingModelOnlyRoot returns a restricted srvRoot as if +// logged in to a model. +func TestingModelOnlyRoot() rpc.Root { + r := TestingAPIRoot(nil) + return restrictRoot(r, modelFacadesOnly) } -// SetPreFacadeAdminApi is used to create a test scenario where the API server -// does not know about API facade versioning. In this case, the client should -// login to the v1 facade, which sends backwards-compatible login fields. -// The v0 facade will fail on a pre-defined error. -func SetPreFacadeAdminApi(srv *Server) { - srv.adminApiFactories = map[int]adminApiFactory{ - 0: newFailAdminApi, - 1: newPreFacadeAdminApi, - } +// TestingRestrictedRoot returns a restricted srvRoot. +func TestingRestrictedRoot(check func(string, string) error) rpc.Root { + r := TestingAPIRoot(nil) + return restrictRoot(r, check) } -func SetAdminApiVersions(srv *Server, versions ...int) { - factories := make(map[int]adminApiFactory) +func SetAdminAPIVersions(srv *Server, versions ...int) { + factories := make(map[int]adminAPIFactory) for _, n := range versions { switch n { case 3: - factories[n] = newAdminApiV3 + factories[n] = newAdminAPIV3 default: panic(fmt.Errorf("unknown admin API version %d", n)) } } - srv.adminApiFactories = factories -} - -// TestingRestoreInProgressRoot returns a limited restoreInProgressRoot -// containing a srvRoot as returned by TestingSrvRoot. -func TestingRestoreInProgressRoot(st *state.State) *restoreInProgressRoot { - r := TestingApiRoot(st) - return newRestoreInProgressRoot(r) + srv.adminAPIFactories = factories } -// TestingAboutToRestoreRoot returns a limited aboutToRestoreRoot -// containing a srvRoot as returned by TestingSrvRoot. -func TestingAboutToRestoreRoot(st *state.State) *aboutToRestoreRoot { - r := TestingApiRoot(st) - return newAboutToRestoreRoot(r) +// TestingAboutToRestoreRoot returns a limited root which allows +// methods as per when a restore is about to happen. +func TestingAboutToRestoreRoot() rpc.Root { + r := TestingAPIRoot(nil) + return restrictRoot(r, aboutToRestoreMethodsOnly) } // Addr returns the address that the server is listening on. @@ -226,7 +187,7 @@ PatchValue(ptr, value interface{}) } -func AssertHasPermission(c *gc.C, handler *apiHandler, access description.Access, tag names.Tag, expect bool) { +func AssertHasPermission(c *gc.C, handler *apiHandler, access permission.Access, tag names.Tag, expect bool) { hasPermission, err := handler.HasPermission(access, tag) c.Assert(err, jc.ErrorIsNil) c.Assert(hasPermission, gc.Equals, expect) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/facade/interface.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/facade/interface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/facade/interface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/facade/interface.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( "gopkg.in/juju/names.v2" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -93,9 +93,17 @@ // func if anything. AuthClient() bool - // HasPermission returns true if the given access is allowed for the given + // HasPermission reports whether the given access is allowed for the given // target by the authenticated entity. - HasPermission(operation description.Access, target names.Tag) (bool, error) + HasPermission(operation permission.Access, target names.Tag) (bool, error) + + // UserHasPermission reports whether the given access is allowed for the given + // target by the given user. + UserHasPermission(user names.UserTag, operation permission.Access, target names.Tag) (bool, error) + + // ConnectedModel returns the UUID of the model to which the API + // connection was made. + ConnectedModel() string } // Resources allows you to store and retrieve Resource implementations. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/firewaller/firewaller_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/firewaller/firewaller_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/firewaller/firewaller_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/firewaller/firewaller_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -42,7 +42,7 @@ ) c.Assert(err, jc.ErrorIsNil) s.firewaller = firewallerAPI - s.ModelWatcherTest = commontesting.NewModelWatcherTest(s.firewaller, s.State, s.resources, commontesting.HasSecrets) + s.ModelWatcherTest = commontesting.NewModelWatcherTest(s.firewaller, s.State, s.resources) } func (s *firewallerSuite) TestFirewallerFailsWithNonEnvironManagerUser(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/gui.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/gui.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/gui.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/gui.go 2016-10-13 14:31:49.000000000 +0000 @@ -60,8 +60,8 @@ // - the "jujugui" directory includes a "templates/config.js.go" file which is // used to render the Juju GUI configuration file. The template receives at // least the following variables in its context: "base", "host", "socket", -// "staticURL", "uuid" and "version". It might receive more variables but -// cannot assume them to be always provided. +// "controllerSocket", "staticURL", "uuid" and "version". It might receive +// more variables but cannot assume them to be always provided. type guiRouter struct { dataDir string ctxt httpContext @@ -106,12 +106,16 @@ rootDir, hash, err := gr.ensureFiles(req) if err != nil { // Note that ensureFiles also checks that the model UUID is valid. - sendError(w, err) + if err := sendError(w, err); err != nil { + logger.Errorf("%v", err) + } return } qhash := req.URL.Query().Get(":hash") if qhash != "" && qhash != hash { - sendError(w, errors.NotFoundf("resource with %q hash", qhash)) + if err := sendError(w, errors.NotFoundf("resource with %q hash", qhash)); err != nil { + logger.Errorf("%v", err) + } return } uuid := req.URL.Query().Get(":modeluuid") @@ -264,7 +268,9 @@ for _, p := range parts { fpath, err := getGUIComboPath(h.rootDir, p) if err != nil { - sendError(w, errors.Annotate(err, "cannot combine files")) + if err := sendError(w, errors.Annotate(err, "cannot combine files")); err != nil { + logger.Errorf("%v", err) + } return } if fpath == "" { @@ -320,11 +326,13 @@ spriteFile := filepath.Join(h.rootDir, spritePath) spriteContent, err := ioutil.ReadFile(spriteFile) if err != nil { - sendError(w, errors.Annotate(err, "cannot read sprite file")) + if err := sendError(w, errors.Annotate(err, "cannot read sprite file")); err != nil { + logger.Errorf("%v", err) + } return } tmpl := filepath.Join(h.rootDir, "templates", "index.html.go") - renderGUITemplate(w, tmpl, map[string]interface{}{ + if err := renderGUITemplate(w, tmpl, map[string]interface{}{ // staticURL holds the root of the static hierarchy, hence why the // empty string is used here. "staticURL": h.hashedPath(""), @@ -333,23 +341,32 @@ // TODO frankban: make it possible to enable debug. "debug": false, "spriteContent": string(spriteContent), - }) + }); err != nil { + if err := sendError(w, err); err != nil { + logger.Errorf("%v", errors.Annotate(err, "cannot send error to client from rendering GUI template")) + } + } } // serveConfig serves the Juju GUI JavaScript configuration file. func (h *guiHandler) serveConfig(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", jsMimeType) tmpl := filepath.Join(h.rootDir, "templates", "config.js.go") - renderGUITemplate(w, tmpl, map[string]interface{}{ - "base": h.baseURLPath, - "host": req.Host, - "socket": "/model/$uuid/api", + if err := renderGUITemplate(w, tmpl, map[string]interface{}{ + "base": h.baseURLPath, + "host": req.Host, + "controllerSocket": "/api", + "socket": "/model/$uuid/api", // staticURL holds the root of the static hierarchy, hence why the // empty string is used here. "staticURL": h.hashedPath(""), "uuid": h.uuid, "version": jujuversion.Current.String(), - }) + }); err != nil { + if err := sendError(w, err); err != nil { + logger.Errorf("%v", errors.Annotate(err, "cannot send error to client from rendering GUI template")) + } + } } // hashedPath returns the gull path (including the GUI archive hash) to the @@ -358,16 +375,13 @@ return path.Join(h.baseURLPath, h.hash, p) } -func renderGUITemplate(w http.ResponseWriter, tmpl string, ctx map[string]interface{}) { +func renderGUITemplate(w http.ResponseWriter, tmpl string, ctx map[string]interface{}) error { // TODO frankban: cache parsed template. t, err := template.ParseFiles(tmpl) if err != nil { - sendError(w, errors.Annotate(err, "cannot parse template")) - return - } - if err := t.Execute(w, ctx); err != nil { - sendError(w, errors.Annotate(err, "cannot render template")) + return errors.Annotate(err, "cannot parse template") } + return errors.Annotate(t.Execute(w, ctx), "cannot render template") } // guiArchiveHandler serves the Juju GUI archive endpoints, used for uploading @@ -385,11 +399,15 @@ case "POST": handler = h.handlePost default: - sendError(w, errors.MethodNotAllowedf("unsupported method: %q", req.Method)) + if err := sendError(w, errors.MethodNotAllowedf("unsupported method: %q", req.Method)); err != nil { + logger.Errorf("%v", err) + } return } if err := handler(w, req); err != nil { - sendError(w, errors.Trace(err)) + if err := sendError(w, errors.Trace(err)); err != nil { + logger.Errorf("%v", err) + } } } @@ -432,10 +450,9 @@ Current: m.Version == currentVersion, } } - sendStatusAndJSON(w, http.StatusOK, params.GUIArchiveResponse{ + return errors.Trace(sendStatusAndJSON(w, http.StatusOK, params.GUIArchiveResponse{ Versions: versions, - }) - return nil + })) } // handlePost is used to upload new Juju GUI archives to the controller. @@ -505,9 +522,9 @@ } } else if !errors.IsNotFound(err) { return errors.Annotate(err, "cannot retrieve current GUI version") + } - sendStatusAndJSON(w, http.StatusOK, resp) - return nil + return errors.Trace(sendStatusAndJSON(w, http.StatusOK, resp)) } // guiVersionHandler is used to select the Juju GUI version served by the @@ -519,11 +536,15 @@ // ServeHTTP implements http.Handler. func (h *guiVersionHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { if req.Method != "PUT" { - sendError(w, errors.MethodNotAllowedf("unsupported method: %q", req.Method)) + if err := sendError(w, errors.MethodNotAllowedf("unsupported method: %q", req.Method)); err != nil { + logger.Errorf("%v", err) + } return } if err := h.handlePut(w, req); err != nil { - sendError(w, errors.Trace(err)) + if err := sendError(w, errors.Trace(err)); err != nil { + logger.Errorf("%v", err) + } } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/gui_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/gui_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/gui_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/gui_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -36,7 +36,7 @@ ) type guiSuite struct { - authHttpSuite + authHTTPSuite } var _ = gc.Suite(&guiSuite{}) @@ -385,12 +385,14 @@ } body := assertResponse(c, resp, test.expectedStatus, test.expectedContentType) if test.expectedError == "" { - c.Assert(string(body), gc.Equals, test.expectedBody) + c.Check(string(body), gc.Equals, test.expectedBody) } else { var jsonResp params.ErrorResult err := json.Unmarshal(body, &jsonResp) - c.Assert(err, jc.ErrorIsNil, gc.Commentf("body: %s", body)) - c.Assert(jsonResp.Error.Message, gc.Matches, test.expectedError) + if !c.Check(err, jc.ErrorIsNil, gc.Commentf("body: %s", body)) { + continue + } + c.Check(jsonResp.Error.Message, gc.Matches, test.expectedError) } } } @@ -496,6 +498,7 @@ // This is just an example and does not reflect the real Juju GUI config. base: '{{.base}}', host: '{{.host}}', + controllerSocket: '{{.controllerSocket}}', socket: '{{.socket}}', staticURL: '{{.staticURL}}', uuid: '{{.uuid}}', @@ -513,6 +516,7 @@ // This is just an example and does not reflect the real Juju GUI config. base: '/gui/%[1]s/', host: '%[2]s', + controllerSocket: '/api', socket: '/model/$uuid/api', staticURL: '/gui/%[1]s/%[3]s', uuid: '%[1]s', @@ -563,7 +567,7 @@ } type guiArchiveSuite struct { - authHttpSuite + authHTTPSuite } var _ = gc.Suite(&guiArchiveSuite{}) @@ -825,7 +829,7 @@ } type guiVersionSuite struct { - authHttpSuite + authHTTPSuite } var _ = gc.Suite(&guiVersionSuite{}) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/highavailability/highavailability.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/highavailability/highavailability.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/highavailability/highavailability.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/highavailability/highavailability.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,6 +15,8 @@ "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/constraints" + "github.com/juju/juju/mongo" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -41,7 +43,7 @@ // NewHighAvailabilityAPI creates a new server-side highavailability API end point. func NewHighAvailabilityAPI(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*HighAvailabilityAPI, error) { - // Only clients and environment managers can access the high availability service. + // Only clients and model managers can access the high availability facade. if !authorizer.AuthClient() && !authorizer.AuthModelManager() { return nil, common.ErrPerm } @@ -52,13 +54,32 @@ }, nil } +// EnableHA adds controller machines as necessary to ensure the +// controller has the number of machines specified. func (api *HighAvailabilityAPI) EnableHA(args params.ControllersSpecs) (params.ControllersChangeResults, error) { - results := params.ControllersChangeResults{Results: make([]params.ControllersChangeResult, len(args.Specs))} - for i, controllersServersSpec := range args.Specs { - result, err := EnableHASingle(api.state, controllersServersSpec) - results.Results[i].Result = result - results.Results[i].Error = common.ServerError(err) + results := params.ControllersChangeResults{} + + if api.authorizer.AuthClient() { + admin, err := api.authorizer.HasPermission(permission.SuperuserAccess, api.state.ControllerTag()) + if err != nil && !errors.IsNotFound(err) { + return results, errors.Trace(err) + } + if !admin { + return results, common.ServerError(common.ErrPerm) + } + } + + if len(args.Specs) == 0 { + return results, nil + } + if len(args.Specs) > 1 { + return results, errors.New("only one controller spec is supported") } + + result, err := enableHASingle(api.state, args.Specs[0]) + results.Results = make([]params.ControllersChangeResult, 1) + results.Results[0].Result = result + results.Results[0].Error = common.ServerError(err) return results, nil } @@ -83,9 +104,7 @@ } } -// EnableHASingle applies a single ControllersServersSpec specification to the current environment. -// Exported so it can be called by the legacy client API in the client package. -func EnableHASingle(st *state.State, spec params.ControllersSpec) (params.ControllersChanges, error) { +func enableHASingle(st *state.State, spec params.ControllersSpec) (params.ControllersChanges, error) { if !st.IsController() { return params.ControllersChanges{}, errors.New("unsupported with hosted models") } @@ -94,16 +113,6 @@ if err := blockChecker.ChangeAllowed(); err != nil { return params.ControllersChanges{}, errors.Trace(err) } - // Validate the environment tag if present. - if spec.ModelTag != "" { - tag, err := names.ParseModelTag(spec.ModelTag) - if err != nil { - return params.ControllersChanges{}, errors.Errorf("invalid model tag: %v", err) - } - if _, err := st.FindEntity(tag); err != nil { - return params.ControllersChanges{}, err - } - } series := spec.Series if series == "" { @@ -171,7 +180,12 @@ // StopHAReplicationForUpgrade will prompt the HA cluster to enter upgrade // mongo mode. func (api *HighAvailabilityAPI) StopHAReplicationForUpgrade(args params.UpgradeMongoParams) (params.MongoUpgradeResults, error) { - ha, err := api.state.SetUpgradeMongoMode(args.Target) + ha, err := api.state.SetUpgradeMongoMode(mongo.Version{ + Major: args.Target.Major, + Minor: args.Target.Minor, + Patch: args.Target.Patch, + StorageEngine: mongo.StorageEngine(args.Target.StorageEngine), + }) if err != nil { return params.MongoUpgradeResults{}, errors.Annotate(err, "cannot stop HA for ugprade") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/highavailability/highavailability_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/highavailability/highavailability_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/highavailability/highavailability_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/highavailability/highavailability_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -50,7 +50,7 @@ var ( emptyCons = constraints.Value{} - controllerCons = constraints.MustParse("mem=16G cpu-cores=16") + controllerCons = constraints.MustParse("mem=16G cores=16") defaultSeries = "" ) @@ -379,3 +379,24 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(machines, gc.HasLen, 0) } + +func (s *clientSuite) TestEnableHAMultipleSpecs(c *gc.C) { + arg := params.ControllersSpecs{ + Specs: []params.ControllersSpec{ + {NumControllers: 3}, + {NumControllers: 5}, + }, + } + results, err := s.haServer.EnableHA(arg) + c.Check(err, gc.ErrorMatches, "only one controller spec is supported") + c.Check(results.Results, gc.HasLen, 0) +} + +func (s *clientSuite) TestEnableHANoSpecs(c *gc.C) { + arg := params.ControllersSpecs{ + Specs: []params.ControllersSpec{}, + } + results, err := s.haServer.EnableHA(arg) + c.Check(err, jc.ErrorIsNil) + c.Check(results.Results, gc.HasLen, 0) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/httpcontext.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/httpcontext.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/httpcontext.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/httpcontext.go 2016-10-13 14:31:49.000000000 +0000 @@ -62,7 +62,8 @@ if err != nil { return nil, nil, errors.NewUnauthorized(err, "") } - entity, _, err := checkCreds(st, req, true, ctxt.srv.authCtxt) + authenticator := ctxt.srv.authCtxt.authenticator(r.Host) + entity, _, err := checkCreds(st, req, true, authenticator) if err != nil { if common.IsDischargeRequiredError(err) { return nil, nil, errors.Trace(err) @@ -71,7 +72,7 @@ // Handle the special case of a worker on a controller machine // acting on behalf of a hosted model. if isMachineTag(req.AuthTag) { - entity, err := checkControllerMachineCreds(ctxt.srv.state, req, ctxt.srv.authCtxt) + entity, err := checkControllerMachineCreds(ctxt.srv.state, req, authenticator) if err != nil { return nil, nil, errors.NewUnauthorized(err, "") } @@ -180,23 +181,23 @@ // sendJSON writes a JSON-encoded response value // to the given writer along with a trailing newline. -func sendJSON(w io.Writer, response interface{}) { +func sendJSON(w io.Writer, response interface{}) error { body, err := json.Marshal(response) if err != nil { logger.Errorf("cannot marshal JSON result %#v: %v", response, err) - return + return err } body = append(body, '\n') - w.Write(body) + _, err = w.Write(body) + return err } // sendStatusAndJSON sends an HTTP status code and // a JSON-encoded response to a client. -func sendStatusAndJSON(w http.ResponseWriter, statusCode int, response interface{}) { +func sendStatusAndJSON(w http.ResponseWriter, statusCode int, response interface{}) error { body, err := json.Marshal(response) if err != nil { - logger.Errorf("cannot marshal JSON result %#v: %v", response, err) - return + return errors.Errorf("cannot marshal JSON result %#v: %v", response, err) } if statusCode == http.StatusUnauthorized { @@ -205,15 +206,18 @@ w.Header().Set("Content-Type", params.ContentTypeJSON) w.Header().Set("Content-Length", fmt.Sprint(len(body))) w.WriteHeader(statusCode) - w.Write(body) + if _, err := w.Write(body); err != nil { + return errors.Annotate(err, "cannot write response") + } + return nil } // sendError sends a JSON-encoded error response // for errors encountered during processing. -func sendError(w http.ResponseWriter, err error) { - err1, statusCode := common.ServerErrorAndStatus(err) - logger.Debugf("sending error: %d %v", statusCode, err1) - sendStatusAndJSON(w, statusCode, ¶ms.ErrorResult{ - Error: err1, - }) +func sendError(w http.ResponseWriter, errToSend error) error { + paramsErr, statusCode := common.ServerErrorAndStatus(errToSend) + logger.Debugf("sending error: %d %v", statusCode, paramsErr) + return errors.Trace(sendStatusAndJSON(w, statusCode, ¶ms.ErrorResult{ + Error: paramsErr, + })) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemanager/imagemanager.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemanager/imagemanager.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemanager/imagemanager.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemanager/imagemanager.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,6 +10,7 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/state/imagestorage" ) @@ -58,6 +59,14 @@ // ListImages returns images matching the specified filter. func (api *ImageManagerAPI) ListImages(arg params.ImageFilterParams) (params.ListImageResult, error) { var result params.ListImageResult + admin, err := api.authorizer.HasPermission(permission.SuperuserAccess, api.state.ControllerTag()) + if err != nil { + return result, errors.Trace(err) + } + if !admin { + return result, common.ServerError(common.ErrPerm) + } + if len(arg.Images) > 1 { return result, errors.New("image filter with multiple terms not supported") } @@ -89,10 +98,19 @@ // DeleteImages deletes the images matching the specified filter. func (api *ImageManagerAPI) DeleteImages(arg params.ImageFilterParams) (params.ErrorResults, error) { + var result params.ErrorResults + admin, err := api.authorizer.HasPermission(permission.SuperuserAccess, api.state.ControllerTag()) + if err != nil { + return result, errors.Trace(err) + } + if !admin { + return result, common.ServerError(common.ErrPerm) + } + if err := api.check.ChangeAllowed(); err != nil { return params.ErrorResults{}, errors.Trace(err) } - var result params.ErrorResults + result.Results = make([]params.ErrorResult, len(arg.Images)) stor := api.state.ImageStorage() for i, imageSpec := range arg.Images { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemanager/state.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemanager/state.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemanager/state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemanager/state.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,10 +6,12 @@ import ( "github.com/juju/juju/state" "github.com/juju/juju/state/imagestorage" + names "gopkg.in/juju/names.v2" ) type stateInterface interface { ImageStorage() imagestorage.Storage + ControllerTag() names.ControllerTag } type stateShim struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/export_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -3,17 +3,7 @@ package imagemetadata -import ( - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state/cloudimagemetadata" -) - var ( CreateAPI = createAPI ProcessErrors = processErrors ) - -func ParseMetadataFromParams(api *API, p params.CloudImageMetadata, cfg *config.Config, cloudRegion string) (cloudimagemetadata.Metadata, error) { - return api.parseMetadataFromParams(p, cfg, cloudRegion) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/functions_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/functions_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/functions_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/functions_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,83 +7,13 @@ "fmt" jc "github.com/juju/testing/checkers" - "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/imagemetadata" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state/cloudimagemetadata" "github.com/juju/juju/testing" ) -type funcSuite struct { - baseImageMetadataSuite - - cfg *config.Config - expected cloudimagemetadata.Metadata -} - -var _ = gc.Suite(&funcSuite{}) - -func (s *funcSuite) SetUpTest(c *gc.C) { - s.baseImageMetadataSuite.SetUpTest(c) - - cfg, err := config.New(config.NoDefaults, mockConfig()) - c.Assert(err, jc.ErrorIsNil) - s.cfg = cfg - s.state = s.constructState(s.cfg, nil) - - s.expected = cloudimagemetadata.Metadata{ - cloudimagemetadata.MetadataAttributes{ - Stream: "released", - Source: "custom", - Series: series.LatestLts(), - Arch: "amd64", - Region: "dummy_region", - }, - 0, - "", - } -} - -func (s *funcSuite) TestParseMetadataNoSource(c *gc.C) { - m, err := imagemetadata.ParseMetadataFromParams(s.api, params.CloudImageMetadata{}, s.cfg, "dummy_region") - c.Assert(err, jc.ErrorIsNil) - c.Assert(m, gc.DeepEquals, s.expected) -} - -func (s *funcSuite) TestParseMetadataAnySource(c *gc.C) { - s.expected.Source = "any" - m, err := imagemetadata.ParseMetadataFromParams(s.api, params.CloudImageMetadata{Source: "any"}, s.cfg, "dummy_region") - c.Assert(err, jc.ErrorIsNil) - c.Assert(m, gc.DeepEquals, s.expected) -} - -func (s *funcSuite) TestParseMetadataAnyStream(c *gc.C) { - stream := "happy stream" - s.expected.Stream = stream - - m, err := imagemetadata.ParseMetadataFromParams(s.api, params.CloudImageMetadata{Stream: stream}, s.cfg, "dummy_region") - c.Assert(err, jc.ErrorIsNil) - c.Assert(m, gc.DeepEquals, s.expected) -} - -func (s *funcSuite) TestParseMetadataDefaultStream(c *gc.C) { - m, err := imagemetadata.ParseMetadataFromParams(s.api, params.CloudImageMetadata{}, s.cfg, "dummy_region") - c.Assert(err, jc.ErrorIsNil) - c.Assert(m, gc.DeepEquals, s.expected) -} - -func (s *funcSuite) TestParseMetadataAnyRegion(c *gc.C) { - region := "region" - s.expected.Region = region - - m, err := imagemetadata.ParseMetadataFromParams(s.api, params.CloudImageMetadata{Region: region}, s.cfg, "dummy_region") - c.Assert(err, jc.ErrorIsNil) - c.Assert(m, gc.DeepEquals, s.expected) -} - type funcMetadataSuite struct { testing.BaseSuite } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/metadata.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/metadata.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/metadata.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/metadata.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,6 +18,7 @@ "github.com/juju/juju/environs/config" envmetadata "github.com/juju/juju/environs/imagemetadata" "github.com/juju/juju/environs/simplestreams" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/state/cloudimagemetadata" "github.com/juju/juju/state/stateenvirons" @@ -71,6 +72,16 @@ // given filter. // Returned list contains metadata ordered by priority. func (api *API) List(filter params.ImageMetadataFilter) (params.ListCloudImageMetadataResult, error) { + if api.authorizer.AuthClient() { + admin, err := api.authorizer.HasPermission(permission.SuperuserAccess, api.metadata.ControllerTag()) + if err != nil { + return params.ListCloudImageMetadataResult{}, errors.Trace(err) + } + if !admin { + return params.ListCloudImageMetadataResult{}, common.ServerError(common.ErrPerm) + } + } + found, err := api.metadata.FindMetadata(cloudimagemetadata.MetadataFilter{ Region: filter.Region, Series: filter.Series, @@ -102,6 +113,15 @@ // It supports bulk calls. func (api *API) Save(metadata params.MetadataSaveParams) (params.ErrorResults, error) { all := make([]params.ErrorResult, len(metadata.Metadata)) + if api.authorizer.AuthClient() { + admin, err := api.authorizer.HasPermission(permission.SuperuserAccess, api.metadata.ControllerTag()) + if err != nil { + return params.ErrorResults{Results: all}, errors.Trace(err) + } + if !admin { + return params.ErrorResults{Results: all}, common.ServerError(common.ErrPerm) + } + } if len(metadata.Metadata) == 0 { return params.ErrorResults{Results: all}, nil } @@ -109,17 +129,9 @@ if err != nil { return params.ErrorResults{}, errors.Annotatef(err, "getting model config") } - model, err := api.metadata.Model() - if err != nil { - return params.ErrorResults{}, errors.Annotatef(err, "getting model") - } for i, one := range metadata.Metadata { - md, err := api.parseMetadataListFromParams(one, modelCfg, model.CloudRegion()) - if err != nil { - all[i] = params.ErrorResult{Error: common.ServerError(err)} - continue - } - err = api.metadata.SaveMetadata(md) + md := api.parseMetadataListFromParams(one, modelCfg) + err := api.metadata.SaveMetadata(md) all[i] = params.ErrorResult{Error: common.ServerError(err)} } return params.ErrorResults{Results: all}, nil @@ -129,6 +141,15 @@ // It supports bulk calls. func (api *API) Delete(images params.MetadataImageIds) (params.ErrorResults, error) { all := make([]params.ErrorResult, len(images.Ids)) + if api.authorizer.AuthClient() { + admin, err := api.authorizer.HasPermission(permission.SuperuserAccess, api.metadata.ControllerTag()) + if err != nil { + return params.ErrorResults{Results: all}, errors.Trace(err) + } + if !admin { + return params.ErrorResults{Results: all}, common.ServerError(common.ErrPerm) + } + } for i, imageId := range images.Ids { err := api.metadata.DeleteMetadata(imageId) all[i] = params.ErrorResult{common.ServerError(err)} @@ -153,59 +174,46 @@ return result } -func (api *API) parseMetadataListFromParams( - p params.CloudImageMetadataList, cfg *config.Config, cloudRegion string, -) ([]cloudimagemetadata.Metadata, error) { +func (api *API) parseMetadataListFromParams(p params.CloudImageMetadataList, cfg *config.Config) []cloudimagemetadata.Metadata { results := make([]cloudimagemetadata.Metadata, len(p.Metadata)) for i, metadata := range p.Metadata { - result, err := api.parseMetadataFromParams(metadata, cfg, cloudRegion) - if err != nil { - return nil, errors.Trace(err) + results[i] = cloudimagemetadata.Metadata{ + MetadataAttributes: cloudimagemetadata.MetadataAttributes{ + Stream: metadata.Stream, + Region: metadata.Region, + Version: metadata.Version, + Series: metadata.Series, + Arch: metadata.Arch, + VirtType: metadata.VirtType, + RootStorageType: metadata.RootStorageType, + RootStorageSize: metadata.RootStorageSize, + Source: metadata.Source, + }, + Priority: metadata.Priority, + ImageId: metadata.ImageId, + } + // TODO (anastasiamac 2016-08-24) This is a band-aid solution. + // Once correct value is read from simplestreams, this needs to go. + // Bug# 1616295 + if results[i].Stream == "" { + results[i].Stream = cfg.ImageStream() } - results[i] = result - } - return results, nil -} - -func (api *API) parseMetadataFromParams(p params.CloudImageMetadata, cfg *config.Config, cloudRegion string) (cloudimagemetadata.Metadata, error) { - result := cloudimagemetadata.Metadata{ - cloudimagemetadata.MetadataAttributes{ - Stream: p.Stream, - Region: p.Region, - Version: p.Version, - Series: p.Series, - Arch: p.Arch, - VirtType: p.VirtType, - RootStorageType: p.RootStorageType, - RootStorageSize: p.RootStorageSize, - Source: p.Source, - }, - p.Priority, - p.ImageId, - } - - // Fill in any required default values. - if p.Stream == "" { - result.Stream = cfg.ImageStream() - } - if p.Source == "" { - result.Source = "custom" - } - if result.Arch == "" { - result.Arch = "amd64" - } - if result.Series == "" { - result.Series = config.PreferredSeries(cfg) - } - if result.Region == "" { - result.Region = cloudRegion } - return result, nil + return results } // UpdateFromPublishedImages retrieves currently published image metadata and // updates stored ones accordingly. func (api *API) UpdateFromPublishedImages() error { + if api.authorizer.AuthClient() { + admin, err := api.authorizer.HasPermission(permission.SuperuserAccess, api.metadata.ControllerTag()) + if err != nil { + return errors.Trace(err) + } + if !admin { + return common.ServerError(common.ErrPerm) + } + } return api.retrievePublished() } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/metadata_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/metadata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/metadata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/metadata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,7 +22,7 @@ found, err := s.api.List(params.ImageMetadataFilter{}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Result, gc.HasLen, 0) - s.assertCalls(c, findMetadata) + s.assertCalls(c, "ControllerTag", findMetadata) } func (s *metadataSuite) TestFindEmpty(c *gc.C) { @@ -33,7 +33,7 @@ found, err := s.api.List(params.ImageMetadataFilter{}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Result, gc.HasLen, 0) - s.assertCalls(c, findMetadata) + s.assertCalls(c, "ControllerTag", findMetadata) } func (s *metadataSuite) TestFindEmptyGroups(c *gc.C) { @@ -47,7 +47,7 @@ found, err := s.api.List(params.ImageMetadataFilter{}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Result, gc.HasLen, 0) - s.assertCalls(c, findMetadata) + s.assertCalls(c, "ControllerTag", findMetadata) } func (s *metadataSuite) TestFindError(c *gc.C) { @@ -59,7 +59,7 @@ found, err := s.api.List(params.ImageMetadataFilter{}) c.Assert(err, gc.ErrorMatches, msg) c.Assert(found.Result, gc.HasLen, 0) - s.assertCalls(c, findMetadata) + s.assertCalls(c, "ControllerTag", findMetadata) } func (s *metadataSuite) TestFindOrder(c *gc.C) { @@ -92,14 +92,14 @@ params.CloudImageMetadata{ImageId: customImageId2, Priority: 20}, params.CloudImageMetadata{ImageId: publicImageId, Priority: 15}, }) - s.assertCalls(c, findMetadata) + s.assertCalls(c, "ControllerTag", findMetadata) } func (s *metadataSuite) TestSaveEmpty(c *gc.C) { errs, err := s.api.Save(params.MetadataSaveParams{}) c.Assert(err, jc.ErrorIsNil) c.Assert(errs.Results, gc.HasLen, 0) - s.assertCalls(c) + s.assertCalls(c, "ControllerTag") } func (s *metadataSuite) TestSave(c *gc.C) { @@ -112,6 +112,11 @@ s.state.saveMetadata = func(m []cloudimagemetadata.Metadata) error { saveCalls += 1 c.Assert(m, gc.HasLen, saveCalls) + // TODO (anastasiamac 2016-08-24) This is a check for a band-aid solution. + // Once correct value is read from simplestreams, this needs to go. + // Bug# 1616295 + // Ensure empty stream is changed to release + c.Assert(m[0].Stream, gc.DeepEquals, "released") if saveCalls == 1 { // don't err on first call return nil @@ -130,14 +135,14 @@ c.Assert(errs.Results, gc.HasLen, 2) c.Assert(errs.Results[0].Error, gc.IsNil) c.Assert(errs.Results[1].Error, gc.ErrorMatches, msg) - s.assertCalls(c, environConfig, "Model", saveMetadata, saveMetadata) + s.assertCalls(c, "ControllerTag", environConfig, saveMetadata, saveMetadata) } func (s *metadataSuite) TestDeleteEmpty(c *gc.C) { errs, err := s.api.Delete(params.MetadataImageIds{}) c.Assert(err, jc.ErrorIsNil) c.Assert(errs.Results, gc.HasLen, 0) - s.assertCalls(c) + s.assertCalls(c, "ControllerTag") } func (s *metadataSuite) TestDelete(c *gc.C) { @@ -157,5 +162,5 @@ c.Assert(errs.Results, gc.HasLen, 2) c.Assert(errs.Results[0].Error, gc.IsNil) c.Assert(errs.Results[1].Error, gc.ErrorMatches, msg) - s.assertCalls(c, deleteMetadata, deleteMetadata) + s.assertCalls(c, "ControllerTag", deleteMetadata, deleteMetadata) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/package_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -43,7 +43,7 @@ func (s *baseImageMetadataSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.resources = common.NewResources() - s.authorizer = testing.FakeAuthorizer{names.NewUserTag("testuser"), true} + s.authorizer = testing.FakeAuthorizer{Tag: names.NewUserTag("testuser"), EnvironManager: true, AdminTag: names.NewUserTag("testuser")} s.state = s.constructState(testConfig(c), &mockModel{"meep"}) @@ -83,6 +83,9 @@ model: func() (imagemetadata.Model, error) { return model, nil }, + controllerTag: func() names.ControllerTag { + return names.NewControllerTag("deadbeef-2f18-4fd2-967d-db9663db7bea") + }, } } @@ -94,6 +97,7 @@ deleteMetadata func(imageId string) error environConfig func() (*config.Config, error) model func() (imagemetadata.Model, error) + controllerTag func() names.ControllerTag } func (st *mockState) FindMetadata(f cloudimagemetadata.MetadataFilter) (map[string][]cloudimagemetadata.Metadata, error) { @@ -121,6 +125,11 @@ return st.model() } +func (st *mockState) ControllerTag() names.ControllerTag { + st.Stub.MethodCall(st, "ControllerTag") + return st.controllerTag() +} + type mockModel struct { cloudRegion string } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/state.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/state.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/state.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,7 @@ "github.com/juju/juju/environs/config" "github.com/juju/juju/state" "github.com/juju/juju/state/cloudimagemetadata" + names "gopkg.in/juju/names.v2" ) type metadataAcess interface { @@ -15,6 +16,7 @@ DeleteMetadata(imageId string) error Model() (Model, error) ModelConfig() (*config.Config, error) + ControllerTag() names.ControllerTag } type Model interface { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/updatefrompublished_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/updatefrompublished_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/imagemetadata/updatefrompublished_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/imagemetadata/updatefrompublished_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -242,7 +242,7 @@ // This will only save image metadata specific to provider cloud spec. s.expected = []cloudimagemetadata.Metadata{ cloudimagemetadata.Metadata{ - cloudimagemetadata.MetadataAttributes{ + MetadataAttributes: cloudimagemetadata.MetadataAttributes{ RootStorageType: "ebs", VirtType: "pv", Arch: "amd64", @@ -250,11 +250,11 @@ Region: "dummy_region", Source: "default cloud images", Stream: "released"}, - 10, - "ami-36745463", + Priority: 10, + ImageId: "ami-36745463", }, cloudimagemetadata.Metadata{ - cloudimagemetadata.MetadataAttributes{ + MetadataAttributes: cloudimagemetadata.MetadataAttributes{ RootStorageType: "ebs", VirtType: "pv", Arch: "amd64", @@ -262,8 +262,8 @@ Region: "dummy_region", Source: "default cloud images", Stream: "released"}, - 10, - "ami-26745463", + Priority: 10, + ImageId: "ami-26745463", }, } @@ -282,7 +282,7 @@ func (s *regionMetadataSuite) checkStoredPublished(c *gc.C) { err := s.api.UpdateFromPublishedImages() c.Assert(err, jc.ErrorIsNil) - s.assertCalls(c, environConfig, "Model", saveMetadata) + s.assertCalls(c, "ControllerTag", "ControllerTag", environConfig, saveMetadata) c.Assert(s.saved, jc.SameContents, s.expected) } @@ -392,12 +392,13 @@ priority := s.setupMetadata(c, anotherDS, cloudSpec, m1) m1.Source = anotherDS m1.Priority = priority + m1.Stream = "released" s.expected = append(s.expected, m1) err = s.api.UpdateFromPublishedImages() c.Assert(err, jc.ErrorIsNil) - s.assertCalls(c, environConfig, "Model", saveMetadata, environConfig, "Model", saveMetadata) + s.assertCalls(c, "ControllerTag", "ControllerTag", environConfig, saveMetadata, "ControllerTag", environConfig, saveMetadata) c.Assert(s.saved, jc.SameContents, s.expected) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/instancepoller/instancepoller.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/instancepoller/instancepoller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/instancepoller/instancepoller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/instancepoller/instancepoller.go 2016-10-13 14:31:49.000000000 +0000 @@ -212,6 +212,12 @@ Since: &now, } err = machine.SetInstanceStatus(s) + if status.Status(arg.Status) == status.ProvisioningError { + s.Status = status.Error + if err == nil { + err = machine.SetStatus(s) + } + } } result.Results[i].Error = common.ServerError(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/instancepoller/instancepoller_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/instancepoller/instancepoller_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/instancepoller/instancepoller_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/instancepoller/instancepoller_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,7 @@ "time" "github.com/juju/errors" + jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils/clock" gc "gopkg.in/check.v1" @@ -54,7 +55,7 @@ instancepoller.PatchState(s, s.st) var err error - s.clock = coretesting.NewClock(time.Now()) + s.clock = jujutesting.NewClock(time.Now()) s.api, err = instancepoller.NewInstancePollerAPI(nil, s.resources, s.authoriser, s.clock) c.Assert(err, jc.ErrorIsNil) @@ -342,7 +343,7 @@ func (s *InstancePollerSuite) TestStatusSuccess(c *gc.C) { now := time.Now() s1 := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "not really", Data: map[string]interface{}{ "price": 4.2, @@ -360,7 +361,7 @@ c.Assert(result, jc.DeepEquals, params.StatusResults{ Results: []params.StatusResult{ { - Status: status.StatusError.String(), + Status: status.Error.String(), Info: s1.Message, Data: s1.Data, Since: s1.Since, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/instancepoller/state.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/instancepoller/state.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/instancepoller/state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/instancepoller/state.go 2016-10-13 14:31:49.000000000 +0000 @@ -20,6 +20,7 @@ SetProviderAddresses(...network.Address) error InstanceStatus() (status.StatusInfo, error) SetInstanceStatus(status.StatusInfo) error + SetStatus(status.StatusInfo) error String() string Refresh() error Life() state.Life diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/keymanager/keymanager.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/keymanager/keymanager.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/keymanager/keymanager.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/keymanager/keymanager.go 2016-10-13 14:31:49.000000000 +0000 @@ -27,6 +27,9 @@ common.RegisterStandardFacade("KeyManager", 1, NewKeyManagerAPI) } +// The comment values used by juju internal ssh keys. +var internalComments = set.NewStrings([]string{"juju-client-key", "juju-system-key"}...) + // KeyManager defines the methods on the keymanager API end point. type KeyManager interface { ListKeys(arg params.ListSSHKeys) (params.StringsResults, error) @@ -128,16 +131,20 @@ fingerprint, comment, err := ssh.KeyFingerprint(key) if err != nil { keyInfo = append(keyInfo, fmt.Sprintf("Invalid key: %v", key)) + continue + } + // Only including user added keys not internal ones. + if internalComments.Contains(comment) { + continue + } + if mode == ssh.FullKeys { + keyInfo = append(keyInfo, key) } else { - if mode == ssh.FullKeys { - keyInfo = append(keyInfo, key) - } else { - shortKey := fingerprint - if comment != "" { - shortKey += fmt.Sprintf(" (%s)", comment) - } - keyInfo = append(keyInfo, shortKey) + shortKey := fingerprint + if comment != "" { + shortKey += fmt.Sprintf(" (%s)", comment) } + keyInfo = append(keyInfo, shortKey) } } return keyInfo @@ -318,33 +325,31 @@ // currentKeyDataForDelete gathers data used when deleting ssh keys. func (api *KeyManagerAPI) currentKeyDataForDelete() ( - keys map[string]string, invalidKeys []string, comments map[string]string, err error) { + currentKeys []string, byFingerprint map[string]string, byComment map[string]string, err error) { cfg, err := api.state.ModelConfig() if err != nil { return nil, nil, nil, fmt.Errorf("reading current key data: %v", err) } // For now, authorised keys are global, common to all users. - existingSSHKeys := ssh.SplitAuthorisedKeys(cfg.AuthorizedKeys()) + currentKeys = ssh.SplitAuthorisedKeys(cfg.AuthorizedKeys()) - // Build up a map of keys indexed by fingerprint, and fingerprints indexed by comment - // so we can easily get the key represented by each keyId, which may be either a fingerprint - // or comment. - keys = make(map[string]string) - comments = make(map[string]string) - for _, key := range existingSSHKeys { + // Make two maps that index keys by fingerprint and by comment for fast + // lookup of keys to delete which may be given as either. + byFingerprint = make(map[string]string) + byComment = make(map[string]string) + for _, key := range currentKeys { fingerprint, comment, err := ssh.KeyFingerprint(key) if err != nil { logger.Debugf("keeping unrecognised existing ssh key %q: %v", key, err) - invalidKeys = append(invalidKeys, key) continue } - keys[fingerprint] = key + byFingerprint[fingerprint] = key if comment != "" { - comments[comment] = fingerprint + byComment[comment] = key } } - return keys, invalidKeys, comments, nil + return currentKeys, byFingerprint, byComment, nil } // DeleteKeys deletes the authorised ssh keys for the specified user. @@ -363,32 +368,44 @@ return params.ErrorResults{}, common.ServerError(common.ErrPerm) } - sshKeys, invalidKeys, keyComments, err := api.currentKeyDataForDelete() + allKeys, byFingerprint, byComment, err := api.currentKeyDataForDelete() if err != nil { return params.ErrorResults{}, common.ServerError(fmt.Errorf("reading current key data: %v", err)) } - // We keep all existing invalid keys. - keysToWrite := invalidKeys + // Record the keys to be deleted in the second pass. + keysToDelete := make(set.Strings) // Find the keys corresponding to the specified key fingerprints or comments. for i, keyId := range arg.Keys { - // assume keyId may be a fingerprint - fingerprint := keyId - _, ok := sshKeys[keyId] - if !ok { - // keyId is a comment - fingerprint, ok = keyComments[keyId] + // Is given keyId a fingerprint? + key, ok := byFingerprint[keyId] + if ok { + keysToDelete.Add(key) + continue } - if !ok { - result.Results[i].Error = common.ServerError(fmt.Errorf("invalid ssh key: %s", keyId)) + // Not a fingerprint, is it a comment? + key, ok = byComment[keyId] + if ok { + if internalComments.Contains(keyId) { + result.Results[i].Error = common.ServerError(fmt.Errorf("may not delete internal key: %s", keyId)) + continue + } + keysToDelete.Add(key) + continue } - // We found the key to delete so remove it from those we wish to keep. - delete(sshKeys, fingerprint) + result.Results[i].Error = common.ServerError(fmt.Errorf("invalid ssh key: %s", keyId)) } - for _, key := range sshKeys { - keysToWrite = append(keysToWrite, key) + + var keysToWrite []string + + // Add back only the keys that are not deleted, preserving the order. + for _, key := range allKeys { + if !keysToDelete.Contains(key) { + keysToWrite = append(keysToWrite, key) + } } + if len(keysToWrite) == 0 { return params.ErrorResults{}, common.ServerError(fmt.Errorf("cannot delete all keys")) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/keymanager/keymanager_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/keymanager/keymanager_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/keymanager/keymanager_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/keymanager/keymanager_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -112,6 +112,26 @@ }) } +func (s *keyManagerSuite) TestListKeysHidesJujuInternal(c *gc.C) { + key1 := sshtesting.ValidKeyOne.Key + " juju-client-key" + key2 := sshtesting.ValidKeyTwo.Key + " juju-system-key" + s.setAuthorisedKeys(c, strings.Join([]string{key1, key2}, "\n")) + + args := params.ListSSHKeys{ + Entities: params.Entities{[]params.Entity{ + {Tag: s.AdminUserTag(c).Name()}, + }}, + Mode: ssh.FullKeys, + } + results, err := s.keymanager.ListKeys(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, gc.DeepEquals, params.StringsResults{ + Results: []params.StringsResult{ + {Result: nil}, + }, + }) +} + func (s *keyManagerSuite) assertEnvironKeys(c *gc.C, expected []string) { envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) @@ -228,7 +248,29 @@ {Error: apiservertesting.ServerError("invalid ssh key: invalid-key")}, }, }) - s.assertEnvironKeys(c, []string{"bad key", key1}) + s.assertEnvironKeys(c, []string{key1, "bad key"}) +} + +func (s *keyManagerSuite) TestDeleteKeysNotJujuInternal(c *gc.C) { + key1 := sshtesting.ValidKeyOne.Key + " juju-client-key" + key2 := sshtesting.ValidKeyTwo.Key + " juju-system-key" + key3 := sshtesting.ValidKeyThree.Key + " a user key" + initialKeys := []string{key1, key2, key3} + s.setAuthorisedKeys(c, strings.Join(initialKeys, "\n")) + + args := params.ModifyUserSSHKeys{ + User: s.AdminUserTag(c).Name(), + Keys: []string{"juju-client-key", "juju-system-key"}, + } + results, err := s.keymanager.DeleteKeys(args) + c.Check(results, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {Error: apiservertesting.ServerError("may not delete internal key: juju-client-key")}, + {Error: apiservertesting.ServerError("may not delete internal key: juju-system-key")}, + }, + }) + c.Assert(err, jc.ErrorIsNil) + s.assertEnvironKeys(c, initialKeys) } func (s *keyManagerSuite) TestBlockDeleteKeys(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/locallogin.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/locallogin.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/locallogin.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/locallogin.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,190 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "net/http" + + "github.com/juju/errors" + "github.com/juju/httprequest" + "github.com/julienschmidt/httprouter" + "gopkg.in/juju/names.v2" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + macaroon "gopkg.in/macaroon.v1" + + "github.com/juju/juju/apiserver/authentication" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +var ( + errorMapper httprequest.ErrorMapper = httpbakery.ErrorToResponse + handleJSON = errorMapper.HandleJSON +) + +func makeHandler(h httprouter.Handle) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + h(w, req, nil) + }) +} + +type localLoginHandlers struct { + authCtxt *authContext + state *state.State +} + +func (h *localLoginHandlers) serveLogin(p httprequest.Params) (interface{}, error) { + switch p.Request.Method { + case "POST": + return h.serveLoginPost(p) + case "GET": + return h.serveLoginGet(p) + default: + return nil, errors.Errorf("unsupported method %q", p.Request.Method) + } +} + +func (h *localLoginHandlers) serveLoginPost(p httprequest.Params) (interface{}, error) { + if err := p.Request.ParseForm(); err != nil { + return nil, err + } + waitId := p.Request.Form.Get("waitid") + if waitId == "" { + return nil, errors.NotValidf("missing waitid") + } + username := p.Request.Form.Get("user") + password := p.Request.Form.Get("password") + if !names.IsValidUser(username) { + return nil, errors.NotValidf("username %q", username) + } + userTag := names.NewUserTag(username) + if !userTag.IsLocal() { + return nil, errors.NotValidf("non-local username %q", username) + } + + authenticator := h.authCtxt.authenticator(p.Request.Host) + if _, err := authenticator.Authenticate(h.state, userTag, params.LoginRequest{ + Credentials: password, + }); err != nil { + // Mark the interaction as done (but failed), + // unblocking a pending "/auth/wait" request. + if err := h.authCtxt.localUserInteractions.Done(waitId, userTag, err); err != nil { + if !errors.IsNotFound(err) { + logger.Warningf( + "failed to record completion of interaction %q for %q", + waitId, userTag.Id(), + ) + } + } + return nil, errors.Trace(err) + } + + // Provide the client with a macaroon that they can use to + // prove that they have logged in, and obtain a discharge + // macaroon. + m, err := h.authCtxt.CreateLocalLoginMacaroon(userTag) + if err != nil { + return nil, err + } + cookie, err := httpbakery.NewCookie(macaroon.Slice{m}) + if err != nil { + return nil, err + } + http.SetCookie(p.Response, cookie) + + // Mark the interaction as done, unblocking a pending + // "/auth/wait" request. + if err := h.authCtxt.localUserInteractions.Done( + waitId, userTag, nil, + ); err != nil { + if errors.IsNotFound(err) { + err = errors.New("login timed out") + } + return nil, err + } + return nil, nil +} + +func (h *localLoginHandlers) serveLoginGet(p httprequest.Params) (interface{}, error) { + if p.Request.Header.Get("Accept") == "application/json" { + // The application/json content-type is used to + // inform the client of the supported auth methods. + return map[string]string{ + "juju_userpass": p.Request.URL.String(), + }, nil + } + // TODO(axw) return an HTML form. If waitid is supplied, + // it should be passed through so we can unblock a request + // on the /auth/wait endpoint. We should also support logging + // in when not specifically directed to the login page. + return nil, errors.NotImplementedf("GET") +} + +func (h *localLoginHandlers) serveWait(p httprequest.Params) (interface{}, error) { + if err := p.Request.ParseForm(); err != nil { + return nil, err + } + if p.Request.Method != "GET" { + return nil, errors.Errorf("unsupported method %q", p.Request.Method) + } + waitId := p.Request.Form.Get("waitid") + if waitId == "" { + return nil, errors.NotValidf("missing waitid") + } + interaction, err := h.authCtxt.localUserInteractions.Wait(waitId, nil) + if err != nil { + return nil, errors.Trace(err) + } + if interaction.LoginError != nil { + return nil, errors.Trace(err) + } + ctx := macaroonAuthContext{ + authContext: h.authCtxt, + req: p.Request, + } + macaroon, err := h.authCtxt.localUserThirdPartyBakeryService.Discharge( + &ctx, interaction.CaveatId, + ) + if err != nil { + return nil, errors.Annotate(err, "discharging macaroon") + } + return httpbakery.WaitResponse{macaroon}, nil +} + +func (h *localLoginHandlers) checkThirdPartyCaveat(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { + ctx := &macaroonAuthContext{authContext: h.authCtxt, req: req} + return ctx.CheckThirdPartyCaveat(cavId, cav) +} + +type macaroonAuthContext struct { + *authContext + req *http.Request +} + +// CheckThirdPartyCaveat is part of the bakery.ThirdPartyChecker interface. +func (ctx *macaroonAuthContext) CheckThirdPartyCaveat(cavId, cav string) ([]checkers.Caveat, error) { + tag, err := ctx.CheckLocalLoginCaveat(cav) + if err != nil { + return nil, errors.Trace(err) + } + firstPartyCaveats, err := ctx.CheckLocalLoginRequest(ctx.req, tag) + if err != nil { + if _, ok := errors.Cause(err).(*bakery.VerificationError); ok { + waitId, err := ctx.localUserInteractions.Start( + cavId, + ctx.clock.Now().Add(authentication.LocalLoginInteractionTimeout), + ) + if err != nil { + return nil, errors.Trace(err) + } + visitURL := localUserIdentityLocationPath + "/login?waitid=" + waitId + waitURL := localUserIdentityLocationPath + "/wait?waitid=" + waitId + return nil, httpbakery.NewInteractionRequiredError(visitURL, waitURL, nil, ctx.req) + } + return nil, errors.Trace(err) + } + return firstPartyCaveats, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/logsink_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/logsink_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/logsink_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/logsink_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -29,7 +29,7 @@ // logsinkBaseSuite has functionality that's shared between the the 2 logsink related suites type logsinkBaseSuite struct { - authHttpSuite + authHTTPSuite } func (s *logsinkBaseSuite) logsinkURL(c *gc.C, scheme string) *url.URL { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machine/machiner.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/machine/machiner.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machine/machiner.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/machine/machiner.go 2016-10-13 14:31:49.000000000 +0000 @@ -167,10 +167,7 @@ return nil } - mergedConfig, err := networkingcommon.MergeProviderAndObservedNetworkConfigs(providerConfig, observedConfig) - if err != nil { - return errors.Trace(err) - } + mergedConfig := networkingcommon.MergeProviderAndObservedNetworkConfigs(providerConfig, observedConfig) logger.Tracef("merged observed and provider network config: %+v", mergedConfig) return api.setOneMachineNetworkConfig(m, mergedConfig) @@ -245,10 +242,9 @@ continue } - sortedProviderConfig := networkingcommon.SortNetworkConfigsByParents(providerConfig) - logger.Tracef("sorted provider network config for %q: %+v", m.Id(), sortedProviderConfig) + logger.Tracef("provider network config for %q: %+v", m.Id(), providerConfig) - if err := api.setOneMachineNetworkConfig(m, sortedProviderConfig); err != nil { + if err := api.setOneMachineNetworkConfig(m, providerConfig); err != nil { result.Results[i].Error = common.ServerError(err) continue } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machine/machiner_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/machine/machiner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machine/machiner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/machine/machiner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -59,14 +59,14 @@ func (s *machinerSuite) TestSetStatus(c *gc.C) { now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusStarted, + Status: status.Started, Message: "blah", Since: &now, } err := s.machine0.SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusStopped, + Status: status.Stopped, Message: "foo", Since: &now, } @@ -75,9 +75,9 @@ args := params.SetStatus{ Entities: []params.EntityStatusArgs{ - {Tag: "machine-1", Status: status.StatusError.String(), Info: "not really"}, - {Tag: "machine-0", Status: status.StatusStopped.String(), Info: "foobar"}, - {Tag: "machine-42", Status: status.StatusStarted.String(), Info: "blah"}, + {Tag: "machine-1", Status: status.Error.String(), Info: "not really"}, + {Tag: "machine-0", Status: status.Stopped.String(), Info: "foobar"}, + {Tag: "machine-42", Status: status.Started.String(), Info: "blah"}, }} result, err := s.machiner.SetStatus(args) c.Assert(err, jc.ErrorIsNil) @@ -92,12 +92,12 @@ // Verify machine 0 - no change. statusInfo, err := s.machine0.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusStarted) + c.Assert(statusInfo.Status, gc.Equals, status.Started) c.Assert(statusInfo.Message, gc.Equals, "blah") // ...machine 1 is fine though. statusInfo, err = s.machine1.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusError) + c.Assert(statusInfo.Status, gc.Equals, status.Error) c.Assert(statusInfo.Message, gc.Equals, "not really") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machinemanager/machinemanager.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/machinemanager/machinemanager.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machinemanager/machinemanager.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/machinemanager/machinemanager.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,6 +13,7 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -55,6 +56,15 @@ results := params.AddMachinesResults{ Machines: make([]params.AddMachinesResult, len(args.MachineParams)), } + + canWrite, err := mm.authorizer.HasPermission(permission.WriteAccess, mm.st.ModelTag()) + if err != nil { + return results, errors.Trace(err) + } + if !canWrite { + return results, common.ErrPerm + } + if err := mm.check.ChangeAllowed(); err != nil { return results, errors.Trace(err) } @@ -98,7 +108,7 @@ if p.Series == "" { conf, err := mm.st.ModelConfig() if err != nil { - return nil, err + return nil, errors.Trace(err) } p.Series = config.PreferredSeries(conf) } @@ -107,7 +117,7 @@ if p.Placement != nil { env, err := mm.st.Model() if err != nil { - return nil, err + return nil, errors.Trace(err) } // For 1.21 we should support both UUID and name, and with 1.22 // just support UUID @@ -137,7 +147,7 @@ jobs, err := common.StateJobs(p.Jobs) if err != nil { - return nil, err + return nil, errors.Trace(err) } template := state.MachineTemplate{ Series: p.Series, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machinemanager/machinemanager_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/machinemanager/machinemanager_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machinemanager/machinemanager_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/machinemanager/machinemanager_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -135,6 +135,10 @@ return &mockBlock{}, false, nil } +func (st *mockState) ModelTag() names.ModelTag { + return names.NewModelTag("deadbeef-2f18-4fd2-967d-db9663db7bea") +} + func (st *mockState) ModelConfig() (*config.Config, error) { panic("not implemented") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machinemanager/state.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/machinemanager/state.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machinemanager/state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/machinemanager/state.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,11 +7,13 @@ "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" "github.com/juju/juju/state" + names "gopkg.in/juju/names.v2" ) type stateInterface interface { ModelConfig() (*config.Config, error) Model() (*state.Model, error) + ModelTag() names.ModelTag GetBlockForType(t state.BlockType) (state.Block, bool, error) AddOneMachine(template state.MachineTemplate) (*state.Machine, error) AddMachineInsideNewMachine(template, parentTemplate state.MachineTemplate, containerType instance.ContainerType) (*state.Machine, error) @@ -29,6 +31,9 @@ func (s stateShim) Model() (*state.Model, error) { return s.State.Model() } +func (s stateShim) ModelTag() names.ModelTag { + return s.State.ModelTag() +} func (s stateShim) GetBlockForType(t state.BlockType) (state.Block, bool, error) { return s.State.GetBlockForType(t) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machineundertaker/backend.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/machineundertaker/backend.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machineundertaker/backend.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/machineundertaker/backend.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,45 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker + +import ( + "github.com/juju/juju/network" + "github.com/juju/juju/state" +) + +// Backend defines the methods the machine undertaker needs from +// state.State. +type Backend interface { + // AllRemovedMachines returns all of the machines which have been + // marked for removal. + AllMachineRemovals() ([]string, error) + + // CompleteMachineRemovals removes the machines (and the associated removal + // requests) after the provider-level cleanup is done. + CompleteMachineRemovals(machineIDs ...string) error + + // WatchMachineRemovals returns a NotifyWatcher that triggers + // whenever machine removal requests are added or removed. + WatchMachineRemovals() state.NotifyWatcher + + // Machine gets a specific machine, so we can collect details of + // its network interfaces. + Machine(id string) (Machine, error) +} + +// Machine defines the methods we need from state.Machine. +type Machine interface { + // AllProviderInterfaceInfos returns the details needed to talk to + // the provider about this machine's attached devices. + AllProviderInterfaceInfos() ([]network.ProviderInterfaceInfo, error) +} + +type backendShim struct { + *state.State +} + +// Machine implements Machine. +func (b *backendShim) Machine(id string) (Machine, error) { + return b.State.Machine(id) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machineundertaker/package_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/machineundertaker/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machineundertaker/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/machineundertaker/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machineundertaker/undertaker.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/machineundertaker/undertaker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machineundertaker/undertaker.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/machineundertaker/undertaker.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,182 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/facade" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/network" + "github.com/juju/juju/state" + "github.com/juju/juju/state/watcher" +) + +var logger = loggo.GetLogger("juju.apiserver.machineundertaker") + +func init() { + common.RegisterStandardFacade("MachineUndertaker", 1, newAPIFromState) +} + +// API implements the API facade used by the machine undertaker. +type API struct { + backend Backend + resources facade.Resources + canManageModel func(modelUUID string) bool +} + +// NewAPI implements the API used by the machine undertaker worker to +// find out what provider-level resources need to be cleaned up when a +// machine goes away. +func NewAPI(backend Backend, resources facade.Resources, authorizer facade.Authorizer) (*API, error) { + if !authorizer.AuthModelManager() { + return nil, errors.Trace(common.ErrPerm) + } + + api := &API{ + backend: backend, + resources: resources, + canManageModel: func(modelUUID string) bool { + return modelUUID == authorizer.ConnectedModel() + }, + } + return api, nil +} + +func newAPIFromState(st *state.State, res facade.Resources, auth facade.Authorizer) (*API, error) { + return NewAPI(&backendShim{st}, res, auth) +} + +// AllMachineRemovals returns tags for all of the machines that have +// been marked for removal in the requested model. +func (m *API) AllMachineRemovals(models params.Entities) params.EntitiesResults { + results := make([]params.EntitiesResult, len(models.Entities)) + for i, entity := range models.Entities { + entities, err := m.allRemovalsForTag(entity.Tag) + results[i].Entities = entities + results[i].Error = common.ServerError(err) + } + return params.EntitiesResults{Results: results} +} + +func (m *API) allRemovalsForTag(tag string) ([]params.Entity, error) { + err := m.checkModelAuthorization(tag) + if err != nil { + return nil, errors.Trace(err) + } + machineIds, err := m.backend.AllMachineRemovals() + if err != nil { + return nil, errors.Trace(err) + } + var entities []params.Entity + for _, id := range machineIds { + entities = append(entities, params.Entity{ + Tag: names.NewMachineTag(id).String(), + }) + } + return entities, nil +} + +// GetMachineProviderInterfaceInfo returns the provider details for +// all network interfaces attached to the machines requested. +func (m *API) GetMachineProviderInterfaceInfo(machines params.Entities) params.ProviderInterfaceInfoResults { + results := make([]params.ProviderInterfaceInfoResult, len(machines.Entities)) + for i, entity := range machines.Entities { + results[i].MachineTag = entity.Tag + + interfaces, err := m.getInterfaceInfoForOneMachine(entity.Tag) + if err != nil { + results[i].Error = common.ServerError(err) + continue + } + + infos := make([]params.ProviderInterfaceInfo, len(interfaces)) + for i, info := range interfaces { + infos[i].InterfaceName = info.InterfaceName + infos[i].MACAddress = info.MACAddress + infos[i].ProviderId = string(info.ProviderId) + } + + results[i].Interfaces = infos + } + return params.ProviderInterfaceInfoResults{results} +} + +func (m *API) getInterfaceInfoForOneMachine(machineTag string) ([]network.ProviderInterfaceInfo, error) { + tag, err := names.ParseMachineTag(machineTag) + if err != nil { + return nil, errors.Trace(err) + } + machine, err := m.backend.Machine(tag.Id()) + if err != nil { + return nil, errors.Trace(err) + } + interfaces, err := machine.AllProviderInterfaceInfos() + if err != nil { + return nil, errors.Trace(err) + } + return interfaces, nil +} + +// CompleteMachineRemovals removes the specified machines from the +// model database. It should only be called once any provider-level +// cleanup has been done for those machines. +func (m *API) CompleteMachineRemovals(machines params.Entities) error { + machineIDs, err := collectMachineIDs(machines) + if err != nil { + return errors.Trace(err) + } + return m.backend.CompleteMachineRemovals(machineIDs...) +} + +// WatchMachineRemovals returns a watcher that will signal each time a +// machine is marked for removal. +func (m *API) WatchMachineRemovals(models params.Entities) params.NotifyWatchResults { + results := make([]params.NotifyWatchResult, len(models.Entities)) + for i, entity := range models.Entities { + id, err := m.watchRemovalsForTag(entity.Tag) + results[i].NotifyWatcherId = id + results[i].Error = common.ServerError(err) + } + return params.NotifyWatchResults{Results: results} +} + +func (m *API) watchRemovalsForTag(tag string) (string, error) { + err := m.checkModelAuthorization(tag) + if err != nil { + return "", errors.Trace(err) + } + watch := m.backend.WatchMachineRemovals() + if _, ok := <-watch.Changes(); ok { + return m.resources.Register(watch), nil + } else { + return "", watcher.EnsureErr(watch) + } +} + +func (m *API) checkModelAuthorization(tag string) error { + modelTag, err := names.ParseModelTag(tag) + if err != nil { + return errors.Trace(err) + } + if !m.canManageModel(modelTag.Id()) { + return errors.Trace(common.ErrPerm) + } + return nil +} + +func collectMachineIDs(args params.Entities) ([]string, error) { + machineIDs := make([]string, len(args.Entities)) + for i := range args.Entities { + tag, err := names.ParseMachineTag(args.Entities[i].Tag) + if err != nil { + return nil, errors.Trace(err) + } + machineIDs[i] = tag.Id() + } + return machineIDs, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machineundertaker/undertaker_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/machineundertaker/undertaker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/machineundertaker/undertaker_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/machineundertaker/undertaker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,317 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/machineundertaker" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/network" + "github.com/juju/juju/state" +) + +type undertakerSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&undertakerSuite{}) + +const ( + uuid1 = "12345678-1234-1234-1234-123456789abc" + tag1 = "model-12345678-1234-1234-1234-123456789abc" + uuid2 = "12345678-1234-1234-1234-123456789abd" + tag2 = "model-12345678-1234-1234-1234-123456789abd" +) + +func (*undertakerSuite) TestRequiresModelManager(c *gc.C) { + backend := &mockBackend{} + _, err := machineundertaker.NewAPI( + backend, + nil, + apiservertesting.FakeAuthorizer{EnvironManager: false}, + ) + c.Assert(err, gc.ErrorMatches, "permission denied") + _, err = machineundertaker.NewAPI( + backend, + nil, + apiservertesting.FakeAuthorizer{EnvironManager: true}, + ) + c.Assert(err, jc.ErrorIsNil) +} + +func (*undertakerSuite) TestAllMachineRemovalsNoResults(c *gc.C) { + _, _, api := makeAPI(c, uuid1) + result := api.AllMachineRemovals(makeEntities(tag1)) + c.Assert(result, gc.DeepEquals, params.EntitiesResults{ + Results: []params.EntitiesResult{{}}, // So, one empty set of entities. + }) +} + +func (*undertakerSuite) TestAllMachineRemovalsError(c *gc.C) { + backend, _, api := makeAPI(c, uuid1) + backend.SetErrors(errors.New("I don't want to set the world on fire")) + result := api.AllMachineRemovals(makeEntities(tag1)) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Error, gc.ErrorMatches, "I don't want to set the world on fire") + c.Assert(result.Results[0].Entities, jc.DeepEquals, []params.Entity{}) +} + +func (*undertakerSuite) TestAllMachineRemovalsRequiresModelTags(c *gc.C) { + _, _, api := makeAPI(c, uuid1) + results := api.AllMachineRemovals(makeEntities(tag1, "machine-0")) + c.Assert(results.Results, gc.HasLen, 2) + c.Assert(results.Results[0].Error, gc.IsNil) + c.Assert(results.Results[0].Entities, jc.DeepEquals, []params.Entity{}) + c.Assert(results.Results[1].Error, gc.ErrorMatches, `"machine-0" is not a valid model tag`) + c.Assert(results.Results[1].Entities, jc.DeepEquals, []params.Entity{}) +} + +func (*undertakerSuite) TestAllMachineRemovalsChecksModelTag(c *gc.C) { + _, _, api := makeAPI(c, uuid1) + results := api.AllMachineRemovals(makeEntities(tag2)) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.ErrorMatches, "permission denied") + c.Assert(results.Results[0].Entities, gc.IsNil) +} + +func (*undertakerSuite) TestAllMachineRemovals(c *gc.C) { + backend, _, api := makeAPI(c, uuid1) + backend.removals = []string{"0", "2"} + + result := api.AllMachineRemovals(makeEntities(tag1)) + c.Assert(result, gc.DeepEquals, makeEntitiesResults("machine-0", "machine-2")) +} + +func (*undertakerSuite) TestGetMachineProviderInterfaceInfo(c *gc.C) { + backend, _, api := makeAPI(c, "") + backend.machines = map[string]*mockMachine{ + "0": &mockMachine{ + Stub: &testing.Stub{}, + interfaceInfos: []network.ProviderInterfaceInfo{{ + InterfaceName: "billy", + MACAddress: "hexadecimal!", + ProviderId: "a number", + }, { + InterfaceName: "lily", + MACAddress: "octal?", + ProviderId: "different number", + }}}, + "2": &mockMachine{ + Stub: &testing.Stub{}, + interfaceInfos: []network.ProviderInterfaceInfo{{ + InterfaceName: "gilly", + MACAddress: "sexagesimal?!", + ProviderId: "some number", + }}, + }, + } + backend.SetErrors(nil, errors.NotFoundf("no machine 100 fool!")) + + args := makeEntities("machine-2", "machine-100", "machine-0", "machine-inv") + result := api.GetMachineProviderInterfaceInfo(args) + + c.Assert(result, gc.DeepEquals, params.ProviderInterfaceInfoResults{ + Results: []params.ProviderInterfaceInfoResult{{ + MachineTag: "machine-2", + Interfaces: []params.ProviderInterfaceInfo{{ + InterfaceName: "gilly", + MACAddress: "sexagesimal?!", + ProviderId: "some number", + }}, + }, { + MachineTag: "machine-100", + Error: common.ServerError( + errors.NotFoundf("no machine 100 fool!"), + ), + }, { + MachineTag: "machine-0", + Interfaces: []params.ProviderInterfaceInfo{{ + InterfaceName: "billy", + MACAddress: "hexadecimal!", + ProviderId: "a number", + }, { + InterfaceName: "lily", + MACAddress: "octal?", + ProviderId: "different number", + }}, + }, { + MachineTag: "machine-inv", + Error: common.ServerError( + errors.New(`"machine-inv" is not a valid machine tag`), + ), + }}, + }) +} + +func (*undertakerSuite) TestGetMachineProviderInterfaceInfoHandlesError(c *gc.C) { + backend, _, api := makeAPI(c, "") + backend.machines = map[string]*mockMachine{ + "0": &mockMachine{Stub: backend.Stub}, + } + backend.SetErrors(nil, errors.New("oops - problem getting interface infos")) + result := api.GetMachineProviderInterfaceInfo(makeEntities("machine-0")) + + c.Assert(result.Results, gc.DeepEquals, []params.ProviderInterfaceInfoResult{{ + MachineTag: "machine-0", + Error: common.ServerError(errors.New("oops - problem getting interface infos")), + }}) +} + +func (*undertakerSuite) TestCompleteMachineRemovalsWithNonMachineTags(c *gc.C) { + _, _, api := makeAPI(c, "") + err := api.CompleteMachineRemovals(makeEntities("machine-2", "application-a1")) + c.Assert(err, gc.ErrorMatches, `"application-a1" is not a valid machine tag`) +} + +func (*undertakerSuite) TestCompleteMachineRemovalsWithOtherError(c *gc.C) { + backend, _, api := makeAPI(c, "") + backend.SetErrors(errors.New("boom")) + err := api.CompleteMachineRemovals(makeEntities("machine-2")) + c.Assert(err, gc.ErrorMatches, "boom") +} + +func (*undertakerSuite) TestCompleteMachineRemovals(c *gc.C) { + backend, _, api := makeAPI(c, "") + err := api.CompleteMachineRemovals(makeEntities("machine-2", "machine-52")) + c.Assert(err, jc.ErrorIsNil) + backend.CheckCallNames(c, "CompleteMachineRemovals") + callArgs := backend.Calls()[0].Args + c.Assert(len(callArgs), gc.Equals, 1) + values, ok := callArgs[0].([]string) + c.Assert(ok, jc.IsTrue) + c.Assert(values, gc.DeepEquals, []string{"2", "52"}) +} + +func (*undertakerSuite) TestWatchMachineRemovals(c *gc.C) { + backend, res, api := makeAPI(c, uuid1) + + result := api.WatchMachineRemovals(makeEntities(tag1)) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(res.Get(result.Results[0].NotifyWatcherId), gc.NotNil) + c.Assert(result.Results[0].Error, gc.IsNil) + backend.CheckCallNames(c, "WatchMachineRemovals") +} + +func (*undertakerSuite) TestWatchMachineRemovalsPermissionError(c *gc.C) { + _, _, api := makeAPI(c, uuid1) + result := api.WatchMachineRemovals(makeEntities(tag2)) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Error, gc.ErrorMatches, "permission denied") +} + +func (*undertakerSuite) TestWatchMachineRemovalsError(c *gc.C) { + backend, _, api := makeAPI(c, uuid1) + backend.watcherBlowsUp = true + backend.SetErrors(errors.New("oh no!")) + + result := api.WatchMachineRemovals(makeEntities(tag1)) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Error, gc.ErrorMatches, "oh no!") + c.Assert(result.Results[0].NotifyWatcherId, gc.Equals, "") + backend.CheckCallNames(c, "WatchMachineRemovals") +} + +func makeAPI(c *gc.C, modelUUID string) (*mockBackend, *common.Resources, *machineundertaker.API) { + backend := &mockBackend{Stub: &testing.Stub{}} + res := common.NewResources() + api, err := machineundertaker.NewAPI( + backend, + res, + apiservertesting.FakeAuthorizer{ + EnvironManager: true, + ModelUUID: modelUUID, + }, + ) + c.Assert(err, jc.ErrorIsNil) + return backend, res, api +} + +func makeEntities(tags ...string) params.Entities { + return params.Entities{Entities: makeEntitySlice(tags...)} +} + +func makeEntitySlice(tags ...string) []params.Entity { + entities := make([]params.Entity, len(tags)) + for i := range tags { + entities[i] = params.Entity{Tag: tags[i]} + } + return entities +} + +func makeEntitiesResults(tags ...string) params.EntitiesResults { + return params.EntitiesResults{ + Results: []params.EntitiesResult{{ + Entities: makeEntitySlice(tags...), + }}, + } +} + +type mockBackend struct { + *testing.Stub + + removals []string + machines map[string]*mockMachine + watcherBlowsUp bool +} + +func (b *mockBackend) AllMachineRemovals() ([]string, error) { + b.AddCall("AllMachineRemovals") + return b.removals, b.NextErr() +} + +func (b *mockBackend) CompleteMachineRemovals(machineIDs ...string) error { + b.AddCall("CompleteMachineRemovals", machineIDs) + return b.NextErr() +} + +func (b *mockBackend) WatchMachineRemovals() state.NotifyWatcher { + b.AddCall("WatchMachineRemovals") + watcher := &mockWatcher{backend: b, out: make(chan struct{}, 1)} + if b.watcherBlowsUp { + close(watcher.out) + } else { + watcher.out <- struct{}{} + } + return watcher +} + +func (b *mockBackend) Machine(id string) (machineundertaker.Machine, error) { + b.AddCall("Machine", id) + return b.machines[id], b.NextErr() +} + +type mockMachine struct { + *testing.Stub + interfaceInfos []network.ProviderInterfaceInfo +} + +func (m *mockMachine) AllProviderInterfaceInfos() ([]network.ProviderInterfaceInfo, error) { + m.AddCall("AllProviderInterfaceInfos") + err := m.NextErr() + if err != nil { + return nil, err + } + return m.interfaceInfos, err +} + +type mockWatcher struct { + state.NotifyWatcher + + backend *mockBackend + out chan struct{} +} + +func (w *mockWatcher) Changes() <-chan struct{} { + return w.out +} + +func (w *mockWatcher) Err() error { + return w.backend.NextErr() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsadder/metricsadder_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsadder/metricsadder_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsadder/metricsadder_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsadder/metricsadder_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -174,7 +174,7 @@ }, { about: "user tag", tag: names.NewLocalUserTag("admin").String(), - expect: `"user-admin@local" is not a valid unit tag`, + expect: `"user-admin" is not a valid unit tag`, }, { about: "machine tag", tag: names.NewMachineTag("0").String(), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,9 @@ package metricsdebug import ( + "fmt" + "sort" + "github.com/juju/errors" "gopkg.in/juju/names.v2" @@ -23,8 +26,11 @@ // MetricBatchesForUnit returns metric batches for the given unit. MetricBatchesForUnit(unit string) ([]state.MetricBatch, error) - // MetricBatchesForService returns metric batches for the given service. - MetricBatchesForService(service string) ([]state.MetricBatch, error) + // MetricBatchesForApplication returns metric batches for the given application. + MetricBatchesForApplication(application string) ([]state.MetricBatch, error) + + //MetricBatchesForModel returns all metrics batches in the model. + MetricBatchesForModel() ([]state.MetricBatch, error) // Unit returns the unit based on its name. Unit(string) (*state.Unit, error) @@ -71,7 +77,15 @@ Results: make([]params.EntityMetrics, len(args.Entities)), } if len(args.Entities) == 0 { - return results, nil + batches, err := api.state.MetricBatchesForModel() + if err != nil { + return results, errors.Annotate(err, "failed to get metrics") + } + return params.MetricResults{ + Results: []params.EntityMetrics{{ + Metrics: api.filterLastValuePerKeyPerUnit(batches), + }}, + }, nil } for i, arg := range args.Entities { tag, err := names.ParseTag(arg.Tag) @@ -89,7 +103,7 @@ continue } case names.ApplicationTagKind: - batches, err = api.state.MetricBatchesForService(tag.Id()) + batches, err = api.state.MetricBatchesForApplication(tag.Id()) if err != nil { err = errors.Annotate(err, "failed to get metrics") results.Results[i].Error = common.ServerError(err) @@ -99,27 +113,46 @@ err := errors.Errorf("invalid tag %v", arg.Tag) results.Results[i].Error = common.ServerError(err) } - metricCount := 0 - for _, b := range batches { - metricCount += len(b.Metrics()) - } - metrics := make([]params.MetricResult, metricCount) - ix := 0 - for _, mb := range batches { - for _, m := range mb.Metrics() { - metrics[ix] = params.MetricResult{ - Key: m.Key, - Value: m.Value, - Time: m.Time, - } - ix++ - } - results.Results[i].Metrics = metrics - } + results.Results[i].Metrics = api.filterLastValuePerKeyPerUnit(batches) } return results, nil } +type byUnit []params.MetricResult + +func (t byUnit) Len() int { return len(t) } +func (t byUnit) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t byUnit) Less(i, j int) bool { + return t[i].Unit < t[j].Unit +} + +func (api *MetricsDebugAPI) filterLastValuePerKeyPerUnit(batches []state.MetricBatch) []params.MetricResult { + metrics := []params.MetricResult{} + for _, mb := range batches { + for _, m := range mb.UniqueMetrics() { + metrics = append(metrics, params.MetricResult{ + Key: m.Key, + Value: m.Value, + Time: m.Time, + Unit: mb.Unit(), + }) + } + } + uniq := map[string]params.MetricResult{} + for _, m := range metrics { + // we want unique keys per unit + uniq[fmt.Sprintf("%s-%s", m.Key, m.Unit)] = m + } + results := make([]params.MetricResult, len(uniq)) + i := 0 + for _, m := range uniq { + results[i] = m + i++ + } + sort.Sort(byUnit(results)) + return results +} + // SetMeterStatus sets meter statuses for entities. func (api *MetricsDebugAPI) SetMeterStatus(args params.MeterStatusParams) (params.ErrorResults, error) { results := params.ErrorResults{ @@ -162,15 +195,15 @@ return errors.Trace(err) } case names.ApplicationTag: - service, err := api.state.Application(entity.Id()) + application, err := api.state.Application(entity.Id()) if err != nil { return errors.Trace(err) } - chURL, _ := service.CharmURL() + chURL, _ := application.CharmURL() if chURL.Schema != "local" { return errors.New("not a local charm") } - units, err := service.AllUnits() + units, err := application.AllUnits() if err != nil { return errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -173,9 +173,10 @@ meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) - newTime := time.Now().Round(time.Second) - metricA := state.Metric{"pings", "5", newTime} - metricB := state.Metric{"pings", "10.5", newTime} + t0 := time.Now().Round(time.Second) + t1 := t0.Add(time.Second) + metricA := state.Metric{"pings", "5", t0} + metricB := state.Metric{"pings", "10.5", t1} s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA}}) s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA, metricB}}) args := params.Entities{Entities: []params.Entity{ @@ -184,29 +185,134 @@ result, err := s.metricsdebug.GetMetrics(args) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) - c.Assert(result.Results[0].Metrics, gc.HasLen, 3) - c.Assert(result.Results[0], gc.DeepEquals, params.EntityMetrics{ + c.Assert(result.Results[0].Metrics, gc.HasLen, 1) + c.Assert(result.Results[0], jc.DeepEquals, params.EntityMetrics{ Metrics: []params.MetricResult{ { Key: "pings", - Value: "5", - Time: newTime, - }, - { - Key: "pings", - Value: "5", - Time: newTime, - }, - { - Key: "pings", Value: "10.5", - Time: newTime, + Time: t1, + Unit: "metered/0", }, }, Error: nil, }) } +func (s *metricsDebugSuite) TestGetMetricsFiltersCorrectly(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + unit0 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + unit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + t0 := time.Now().Round(time.Second) + t1 := t0.Add(time.Second) + metricA := state.Metric{"pings", "5", t1} + metricB := state.Metric{"pings", "10.5", t0} + metricC := state.Metric{"juju-units", "8", t1} + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit0, Metrics: []state.Metric{metricA, metricB, metricC}}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit1, Metrics: []state.Metric{metricA, metricB, metricC}}) + args := params.Entities{} + result, err := s.metricsdebug.GetMetrics(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Metrics, gc.HasLen, 4) + c.Assert(result.Results[0].Metrics, jc.SameContents, []params.MetricResult{{ + Key: "pings", + Value: "5", + Time: t1, + Unit: "metered/0", + }, { + Key: "juju-units", + Value: "8", + Time: t1, + Unit: "metered/0", + }, { + Key: "pings", + Value: "5", + Time: t1, + Unit: "metered/1", + }, { + Key: "juju-units", + Value: "8", + Time: t1, + Unit: "metered/1", + }}, + ) +} + +func (s *metricsDebugSuite) TestGetMetricsFiltersCorrectlyWhenNotAllMetricsInEachBatch(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + unit0 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + unit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + t0 := time.Now().Round(time.Second) + t1 := t0.Add(time.Second) + metricA := state.Metric{"pings", "5", t1} + metricB := state.Metric{"pings", "10.5", t0} + metricC := state.Metric{"juju-units", "8", t1} + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit0, Metrics: []state.Metric{metricA, metricB, metricC}}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit1, Metrics: []state.Metric{metricA, metricB}}) + args := params.Entities{} + result, err := s.metricsdebug.GetMetrics(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Metrics, gc.HasLen, 3) + c.Assert(result.Results[0].Metrics, jc.SameContents, []params.MetricResult{{ + Key: "pings", + Value: "5", + Time: t1, + Unit: "metered/0", + }, { + Key: "juju-units", + Value: "8", + Time: t1, + Unit: "metered/0", + }, { + Key: "pings", + Value: "5", + Time: t1, + Unit: "metered/1", + }}, + ) +} + +func (s *metricsDebugSuite) TestGetMetricsFiltersCorrectlyWithMultipleBatchesPerUnit(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + unit0 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + unit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + t0 := time.Now().Round(time.Second) + t1 := t0.Add(time.Second) + metricA := state.Metric{"pings", "5", t1} + metricB := state.Metric{"pings", "10.5", t0} + metricC := state.Metric{"juju-units", "8", t1} + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit0, Metrics: []state.Metric{metricA, metricB}}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit0, Metrics: []state.Metric{metricC}}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit1, Metrics: []state.Metric{metricA, metricB}}) + args := params.Entities{} + result, err := s.metricsdebug.GetMetrics(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Metrics, gc.HasLen, 3) + c.Assert(result.Results[0].Metrics, jc.SameContents, []params.MetricResult{{ + Key: "pings", + Value: "5", + Time: t1, + Unit: "metered/0", + }, { + Key: "juju-units", + Value: "8", + Time: t1, + Unit: "metered/0", + }, { + Key: "pings", + Value: "5", + Time: t1, + Unit: "metered/1", + }}, + ) +} + func (s *metricsDebugSuite) TestGetMultipleMetricsNoMocks(c *gc.C) { meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{ @@ -275,3 +381,36 @@ c.Assert(metrics.Results[0].Metrics[1].Value, gc.Equals, metricUnit1.Metrics()[0].Value) c.Assert(metrics.Results[0].Metrics[1].Time, jc.TimeBetween(metricUnit1.Metrics()[0].Time, metricUnit1.Metrics()[0].Time)) } + +func (s *metricsDebugSuite) TestGetModelNoMocks(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{ + Charm: meteredCharm, + }) + unit0 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + unit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + + metricUnit0 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit0, + }) + metricUnit1 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit1, + }) + + args := params.Entities{Entities: []params.Entity{}} + metrics, err := s.metricsdebug.GetMetrics(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(metrics.Results, gc.HasLen, 1) + metric0 := metrics.Results[0].Metrics[0] + metric1 := metrics.Results[0].Metrics[1] + expected0 := metricUnit0.Metrics()[0] + expected1 := metricUnit1.Metrics()[0] + c.Assert(metric0.Key, gc.Equals, expected0.Key) + c.Assert(metric0.Value, gc.Equals, expected0.Value) + c.Assert(metric0.Time, jc.TimeBetween(expected0.Time, expected0.Time)) + c.Assert(metric0.Unit, gc.Equals, metricUnit0.Unit()) + c.Assert(metric1.Key, gc.Equals, expected1.Key) + c.Assert(metric1.Value, gc.Equals, expected1.Value) + c.Assert(metric1.Time, jc.TimeBetween(expected1.Time, expected1.Time)) + c.Assert(metric1.Unit, gc.Equals, metricUnit1.Unit()) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/metricsender.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/metricsender.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/metricsender.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/metricsender.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,11 +6,10 @@ package metricsender import ( - "time" - "github.com/juju/errors" "github.com/juju/loggo" wireformat "github.com/juju/romulus/wireformat/metrics" + "github.com/juju/utils/clock" "github.com/juju/juju/state" ) @@ -24,12 +23,14 @@ } var ( - defaultMaxBatchesPerSend = 10 - defaultSender MetricSender = &HttpSender{} + defaultMaxBatchesPerSend = 1000 + defaultSender MetricSender = &HTTPSender{} ) -func handleResponse(mm *state.MetricsManager, st MetricsSenderBackend, response wireformat.Response) { +func handleResponse(mm *state.MetricsManager, st ModelBackend, response wireformat.Response) int { + var acknowledgedBatches int for _, envResp := range response.EnvResponses { + acknowledgedBatches += len(envResp.AcknowledgedBatches) err := st.SetMetricBatchesSent(envResp.AcknowledgedBatches) if err != nil { logger.Errorf("failed to set sent on metrics %v", err) @@ -52,17 +53,19 @@ logger.Errorf("failed to set new grace period %v", err) } } + return acknowledgedBatches } // SendMetrics will send any unsent metrics // over the MetricSender interface in batches // no larger than batchSize. -func SendMetrics(st MetricsSenderBackend, sender MetricSender, batchSize int) error { +func SendMetrics(st ModelBackend, sender MetricSender, clock clock.Clock, batchSize int, transmitVendorMetrics bool) error { metricsManager, err := st.MetricsManager() if err != nil { return errors.Trace(err) } sent := 0 + held := 0 for { metrics, err := st.MetricsToSend(batchSize) if err != nil { @@ -77,9 +80,17 @@ } break } - wireData := make([]*wireformat.MetricBatch, lenM) - for i, m := range metrics { - wireData[i] = ToWire(m) + + var wireData []*wireformat.MetricBatch + var heldBatches []string + heldBatchUnits := map[string]bool{} + for _, m := range metrics { + if !transmitVendorMetrics && len(m.Credentials()) == 0 { + heldBatches = append(heldBatches, m.UUID()) + heldBatchUnits[m.Unit()] = true + } else { + wireData = append(wireData, ToWire(m)) + } } response, err := sender.Send(wireData) if err != nil { @@ -92,15 +103,30 @@ } if response != nil { // TODO (mattyw) We are currently ignoring errors during response handling. - handleResponse(metricsManager, st, *response) - // TODO(fwereade): 2016-03-17 lp:1558657 - if err := metricsManager.SetLastSuccessfulSend(time.Now()); err != nil { + acknowledged := handleResponse(metricsManager, st, *response) + // Stop sending if there are no acknowledged batches. + if acknowledged == 0 { + logger.Debugf("got 0 acks, ending send loop") + break + } + if err := metricsManager.SetLastSuccessfulSend(clock.Now()); err != nil { err = errors.Annotate(err, "failed to set successful send time") logger.Warningf("%v", err) return errors.Trace(err) } } - sent += lenM + // Mark held metric batches as sent so that they can be cleaned up later. + if len(heldBatches) > 0 { + err := st.SetMetricBatchesSent(heldBatches) + if err != nil { + return errors.Annotatef(err, "failed to mark metric batches as sent for %s", st.ModelTag()) + } + } + + setHeldBatchUnitMeterStatus(st, heldBatchUnits) + + sent += len(wireData) + held += len(heldBatches) } unsent, err := st.CountOfUnsentMetrics() @@ -111,11 +137,23 @@ if err != nil { return errors.Trace(err) } - logger.Infof("metrics collection summary: sent:%d unsent:%d (%d sent metrics stored)", sent, unsent, sentStored) + logger.Infof("metrics collection summary for %s: sent:%d unsent:%d held:%d (%d sent metrics stored)", st.ModelTag(), sent, unsent, held, sentStored) return nil } +func setHeldBatchUnitMeterStatus(st ModelBackend, units map[string]bool) { + for unitID, _ := range units { + unit, err := st.Unit(unitID) + if err != nil { + logger.Warningf("failed to get unit for setting held batch meter status: %v", err) + } + if err = unit.SetMeterStatus("RED", "transmit-vendor-metrics turned off"); err != nil { + logger.Warningf("failed to set held batch meter status: %v", err) + } + } +} + // DefaultMaxBatchesPerSend returns the default number of batches per send. func DefaultMaxBatchesPerSend() int { return defaultMaxBatchesPerSend diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/metricsender_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/metricsender_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/metricsender_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/metricsender_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,18 +9,22 @@ wireformat "github.com/juju/romulus/wireformat/metrics" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/metricsender" "github.com/juju/juju/apiserver/metricsender/testing" - jujutesting "github.com/juju/juju/juju/testing" + jujujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/testing/factory" + jujutesting "github.com/juju/testing" ) type MetricSenderSuite struct { - jujutesting.JujuConnSuite - unit *state.Unit + jujujutesting.JujuConnSuite + meteredUnit *state.Unit + credUnit *state.Unit + clock clock.Clock } var _ = gc.Suite(&MetricSenderSuite{}) @@ -32,13 +36,19 @@ func (s *MetricSenderSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) - meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) - s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + // Application with metrics credentials set. + credApp := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm, Name: "cred"}) + err := credApp.SetMetricCredentials([]byte("something here")) + c.Assert(err, jc.ErrorIsNil) + meteredApp := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + s.meteredUnit = s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredApp, SetCharmURL: true}) + s.credUnit = s.Factory.MakeUnit(c, &factory.UnitParams{Application: credApp, SetCharmURL: true}) + s.clock = jujutesting.NewClock(time.Now()) } func (s *MetricSenderSuite) TestToWire(c *gc.C) { now := time.Now().Round(time.Second) - metric := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) + metric := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Sent: false, Time: &now}) result := metricsender.ToWire(metric) m := metric.Metrics()[0] metrics := []wireformat.Metric{ @@ -61,15 +71,15 @@ } // TestSendMetrics creates 2 unsent metrics and a sent metric -// and checks that the 2 unsent metrics get sent and have their -// sent field set to true. +// and checks that the 2 unsent metrics get marked as sent (have their +// sent field set to true). func (s *MetricSenderSuite) TestSendMetrics(c *gc.C) { var sender testing.MockSender now := time.Now() - unsent1 := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Time: &now}) - unsent2 := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Time: &now}) - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: true, Time: &now}) - err := metricsender.SendMetrics(s.State, &sender, 10) + unsent1 := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Time: &now}) + unsent2 := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.meteredUnit, Time: &now}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Sent: true, Time: &now}) + err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) c.Assert(sender.Data, gc.HasLen, 1) c.Assert(sender.Data[0], gc.HasLen, 2) @@ -83,6 +93,84 @@ c.Assert(sent2.Sent(), jc.IsTrue) } +// TestSendMetricsAbort creates 7 unsent metrics and +// checks that the sending stops when no more batches are ack'ed. +func (s *MetricSenderSuite) TestSendMetricsAbort(c *gc.C) { + sender := &testing.MockSender{} + now := time.Now() + metrics := make([]*state.MetricBatch, 7) + for i := 0; i < 7; i++ { + metrics[i] = s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Time: &now}) + } + + sender.IgnoreBatches(metrics[0:2]...) + + // Send 4 batches per POST. + err := metricsender.SendMetrics(s.State, sender, s.clock, 4, true) + c.Assert(err, jc.ErrorIsNil) + c.Assert(sender.Data, gc.HasLen, 4) + + unsent := 0 + sent := 0 + for _, batch := range metrics { + b, err := s.State.MetricBatch(batch.UUID()) + c.Assert(err, jc.ErrorIsNil) + if b.Sent() { + sent++ + } else { + unsent++ + } + } + c.Assert(sent, gc.Equals, 5) + c.Assert(unsent, gc.Equals, 2) +} + +// TestHoldMetrics creates 2 unsent metrics and a sent metric +// and checks that only the metric from the application with credentials is sent. +// But both metrics are marked as sent. +func (s *MetricSenderSuite) TestHoldMetrics(c *gc.C) { + var sender testing.MockSender + now := time.Now() + unsent1 := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Time: &now}) + unsent2 := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.meteredUnit, Time: &now}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Sent: true, Time: &now}) + err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, false) + c.Assert(err, jc.ErrorIsNil) + c.Assert(sender.Data, gc.HasLen, 1) + c.Assert(sender.Data[0], gc.HasLen, 1) + c.Assert(sender.Data[0][0].UUID, gc.Equals, unsent1.UUID()) + sent1, err := s.State.MetricBatch(unsent1.UUID()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(sent1.Sent(), jc.IsTrue) + + sent2, err := s.State.MetricBatch(unsent2.UUID()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(sent2.Sent(), jc.IsTrue) +} + +func (s *MetricSenderSuite) TestHoldMetricsSetsMeterStatus(c *gc.C) { + var sender testing.MockSender + now := time.Now() + err := s.credUnit.SetMeterStatus("GREEN", "known starting point") + c.Assert(err, jc.ErrorIsNil) + err = s.meteredUnit.SetMeterStatus("GREEN", "known starting point") + c.Assert(err, jc.ErrorIsNil) + unsent1 := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Time: &now}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.meteredUnit, Time: &now}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Sent: true, Time: &now}) + err = metricsender.SendMetrics(s.State, &sender, s.clock, 10, false) + c.Assert(err, jc.ErrorIsNil) + c.Assert(sender.Data, gc.HasLen, 1) + c.Assert(sender.Data[0], gc.HasLen, 1) + c.Assert(sender.Data[0][0].UUID, gc.Equals, unsent1.UUID()) + msCred, err := s.credUnit.GetMeterStatus() + c.Assert(msCred.Code, gc.Equals, state.MeterGreen) + c.Assert(msCred.Info, gc.Equals, "known starting point") + msMetered, err := s.meteredUnit.GetMeterStatus() + c.Assert(msMetered.Code, gc.Equals, state.MeterRed) + c.Assert(msMetered.Info, gc.Equals, "transmit-vendor-metrics turned off") +} + // TestSendBulkMetrics tests the logic of splitting sends // into batches is done correctly. The batch size is changed // to send batches of 10 metrics. If we create 100 metrics 10 calls @@ -91,14 +179,14 @@ var sender testing.MockSender now := time.Now() for i := 0; i < 100; i++ { - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Time: &now}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Time: &now}) } - err := metricsender.SendMetrics(s.State, &sender, 10) + err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) c.Assert(sender.Data, gc.HasLen, 10) - for i := 0; i < 10; i++ { - c.Assert(sender.Data, gc.HasLen, 10) + for _, d := range sender.Data { + c.Assert(d, gc.HasLen, 10) } } @@ -107,9 +195,9 @@ func (s *MetricSenderSuite) TestDontSendWithNopSender(c *gc.C) { now := time.Now() for i := 0; i < 3; i++ { - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Sent: false, Time: &now}) } - err := metricsender.SendMetrics(s.State, metricsender.NopSender{}, 10) + err := metricsender.SendMetrics(s.State, metricsender.NopSender{}, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) sent, err := s.State.CountOfSentMetrics() c.Assert(err, jc.ErrorIsNil) @@ -120,9 +208,9 @@ sender := &testing.ErrorSender{Err: errors.New("something went wrong")} now := time.Now() for i := 0; i < 3; i++ { - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Sent: false, Time: &now}) } - err := metricsender.SendMetrics(s.State, sender, 1) + err := metricsender.SendMetrics(s.State, sender, s.clock, 1, true) c.Assert(err, gc.ErrorMatches, "something went wrong") mm, err := s.State.MetricsManager() c.Assert(err, jc.ErrorIsNil) @@ -136,9 +224,9 @@ c.Assert(err, jc.ErrorIsNil) now := time.Now() for i := 0; i < 3; i++ { - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.credUnit, Sent: false, Time: &now}) } - err = metricsender.SendMetrics(s.State, metricsender.NopSender{}, 10) + err = metricsender.SendMetrics(s.State, metricsender.NopSender{}, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) mm, err = s.State.MetricsManager() c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/sender.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/sender.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/sender.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/sender.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,13 +16,13 @@ metricsHost string = "https://api.jujucharms.com/omnibus/v2/metrics" ) -// HttpSender is the default used for sending +// HTTPSender is the default used for sending // metrics to the collector service. -type HttpSender struct { +type HTTPSender struct { } // Send sends the given metrics to the collector service. -func (s *HttpSender) Send(metrics []*wireformat.MetricBatch) (*wireformat.Response, error) { +func (s *HTTPSender) Send(metrics []*wireformat.MetricBatch) (*wireformat.Response, error) { b, err := json.Marshal(metrics) if err != nil { return nil, errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/sender_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/sender_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/sender_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/sender_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,19 +14,22 @@ wireformat "github.com/juju/romulus/wireformat/metrics" jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/metricsender" "github.com/juju/juju/cert" - jujutesting "github.com/juju/juju/juju/testing" + jujujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/testing/factory" + jujutesting "github.com/juju/testing" ) type SenderSuite struct { - jujutesting.JujuConnSuite + jujujutesting.JujuConnSuite unit *state.Unit meteredService *state.Application + clock clock.Clock } var _ = gc.Suite(&SenderSuite{}) @@ -48,6 +51,7 @@ meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) s.meteredService = s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Application: s.meteredService, SetCharmURL: true}) + s.clock = jujutesting.NewClock(time.Now()) } // startServer starts a test HTTP server, returning a function that should be @@ -61,13 +65,13 @@ } } -var _ metricsender.MetricSender = (*metricsender.HttpSender)(nil) +var _ metricsender.MetricSender = (*metricsender.HTTPSender)(nil) -// TestHttpSender checks that if the default sender +// TestHTTPSender checks that if the default sender // is in use metrics get sent -func (s *SenderSuite) TestHttpSender(c *gc.C) { +func (s *SenderSuite) TestHTTPSender(c *gc.C) { metricCount := 3 - expectedCharmUrl, _ := s.unit.CharmURL() + expectedCharmURL, _ := s.unit.CharmURL() receiverChan := make(chan wireformat.MetricBatch, metricCount) cleanup := s.startServer(c, testHandler(c, receiverChan, nil, 0)) @@ -78,14 +82,14 @@ for i := range metrics { metrics[i] = s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) } - var sender metricsender.HttpSender - err := metricsender.SendMetrics(s.State, &sender, 10) + var sender metricsender.HTTPSender + err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) c.Assert(receiverChan, gc.HasLen, metricCount) close(receiverChan) for batch := range receiverChan { - c.Assert(batch.CharmUrl, gc.Equals, expectedCharmUrl.String()) + c.Assert(batch.CharmUrl, gc.Equals, expectedCharmURL.String()) } for _, metric := range metrics { @@ -160,8 +164,8 @@ for i := range batches { batches[i] = s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) } - var sender metricsender.HttpSender - err := metricsender.SendMetrics(s.State, &sender, 10) + var sender metricsender.HTTPSender + err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, gc.ErrorMatches, test.expectedErr) for _, batch := range batches { m, err := s.State.MetricBatch(batch.UUID()) @@ -189,8 +193,8 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(status.Code, gc.Equals, state.MeterNotSet) - var sender metricsender.HttpSender - err = metricsender.SendMetrics(s.State, &sender, 10) + var sender metricsender.HTTPSender + err = metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) status, err = s.unit.GetMeterStatus() @@ -234,8 +238,8 @@ c.Assert(status.Code, gc.Equals, state.MeterNotSet) } - var sender metricsender.HttpSender - err := metricsender.SendMetrics(s.State, &sender, 10) + var sender metricsender.HTTPSender + err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) status, err := unit1.GetMeterStatus() @@ -256,8 +260,8 @@ _ = s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false}) cleanup := s.startServer(c, testHandler(c, nil, nil, 47*time.Hour)) defer cleanup() - var sender metricsender.HttpSender - err := metricsender.SendMetrics(s.State, &sender, 10) + var sender metricsender.HTTPSender + err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) mm, err := s.State.MetricsManager() c.Assert(err, jc.ErrorIsNil) @@ -269,8 +273,8 @@ cleanup := s.startServer(c, testHandler(c, nil, nil, -47*time.Hour)) defer cleanup() - var sender metricsender.HttpSender - err := metricsender.SendMetrics(s.State, &sender, 10) + var sender metricsender.HTTPSender + err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) mm, err := s.State.MetricsManager() c.Assert(err, jc.ErrorIsNil) @@ -282,8 +286,8 @@ cleanup := s.startServer(c, testHandler(c, nil, nil, 0)) defer cleanup() - var sender metricsender.HttpSender - err := metricsender.SendMetrics(s.State, &sender, 10) + var sender metricsender.HTTPSender + err := metricsender.SendMetrics(s.State, &sender, s.clock, 10, true) c.Assert(err, jc.ErrorIsNil) mm, err := s.State.MetricsManager() c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/stateinterface.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/stateinterface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/stateinterface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/stateinterface.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,9 @@ package metricsender import ( + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/environs/config" "github.com/juju/juju/state" ) @@ -19,5 +22,13 @@ SetMetricBatchesSent(batchUUIDs []string) error CountOfUnsentMetrics() (int, error) CountOfSentMetrics() (int, error) +} + +// ModelBackend contains additional methods that are used by the metrics sender. +type ModelBackend interface { + MetricsSenderBackend + Unit(name string) (*state.Unit, error) + ModelTag() names.ModelTag + ModelConfig() (*config.Config, error) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/testing/mocksender.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/testing/mocksender.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsender/testing/mocksender.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsender/testing/mocksender.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,13 +4,18 @@ package testing import ( + "fmt" + + "github.com/juju/juju/state" + wireformat "github.com/juju/romulus/wireformat/metrics" "github.com/juju/utils" ) // MockSender implements the metric sender interface. type MockSender struct { - Data [][]*wireformat.MetricBatch + UnackedBatches map[string]struct{} + Data [][]*wireformat.MetricBatch } // Send implements the Send interface. @@ -23,6 +28,12 @@ var envResponses = make(wireformat.EnvironmentResponses) for _, batch := range d { + if m.UnackedBatches != nil { + _, ok := m.UnackedBatches[fmt.Sprintf("%s/%s", batch.ModelUUID, batch.UUID)] + if ok { + continue + } + } envResponses.Ack(batch.ModelUUID, batch.UUID) } return &wireformat.Response{ @@ -31,6 +42,15 @@ }, nil } +func (m *MockSender) IgnoreBatches(batches ...*state.MetricBatch) { + if m.UnackedBatches == nil { + m.UnackedBatches = make(map[string]struct{}) + } + for _, batch := range batches { + m.UnackedBatches[fmt.Sprintf("%s/%s", batch.ModelUUID(), batch.UUID())] = struct{}{} + } +} + // ErrorSender implements the metric sender interface and is used // to return errors during testing type ErrorSender struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ import ( "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/utils/clock" "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/common" @@ -25,7 +26,7 @@ ) func init() { - common.RegisterStandardFacade("MetricsManager", 1, NewMetricsManagerAPI) + common.RegisterStandardFacade("MetricsManager", 1, newMetricsManagerAPI) } // MetricsManager defines the methods on the metricsmanager API end point. @@ -40,15 +41,26 @@ state *state.State accessEnviron common.GetAuthFunc + clock clock.Clock } var _ MetricsManager = (*MetricsManagerAPI)(nil) +// newMetricsManagerAPI wraps NewMetricsManagerAPI for RegisterStandardFacade. +func newMetricsManagerAPI( + st *state.State, + resources facade.Resources, + authorizer facade.Authorizer, +) (*MetricsManagerAPI, error) { + return NewMetricsManagerAPI(st, resources, authorizer, clock.WallClock) +} + // NewMetricsManagerAPI creates a new API endpoint for calling metrics manager functions. func NewMetricsManagerAPI( st *state.State, resources facade.Resources, authorizer facade.Authorizer, + clock clock.Clock, ) (*MetricsManagerAPI, error) { if !(authorizer.AuthMachineAgent() && authorizer.AuthModelManager()) { return nil, common.ErrPerm @@ -67,6 +79,7 @@ return &MetricsManagerAPI{ state: st, accessEnviron: accessEnviron, + clock: clock, }, nil } @@ -95,9 +108,19 @@ result.Results[i].Error = common.ServerError(common.ErrPerm) continue } - err = api.state.CleanupOldMetrics() + modelState := api.state + if tag != api.state.ModelTag() { + modelState, err = api.state.ForModel(tag) + if err != nil { + err = errors.Annotatef(err, "failed to access state for %s", tag) + result.Results[i].Error = common.ServerError(err) + continue + } + } + + err = modelState.CleanupOldMetrics() if err != nil { - err = errors.Annotate(err, "failed to cleanup old metrics") + err = errors.Annotatef(err, "failed to cleanup old metrics for %s", tag) result.Results[i].Error = common.ServerError(err) } } @@ -126,9 +149,23 @@ result.Results[i].Error = common.ServerError(common.ErrPerm) continue } - err = metricsender.SendMetrics(api.state, sender, maxBatchesPerSend) + modelState := api.state + if tag != api.state.ModelTag() { + modelState, err = api.state.ForModel(tag) + if err != nil { + err = errors.Annotatef(err, "failed to access state for %s", tag) + result.Results[i].Error = common.ServerError(err) + continue + } + } + txVendorMetrics, err := transmitVendorMetrics(modelState) + if err != nil { + result.Results[i].Error = common.ServerError(err) + continue + } + err = metricsender.SendMetrics(modelState, sender, api.clock, maxBatchesPerSend, txVendorMetrics) if err != nil { - err = errors.Annotate(err, "failed to send metrics") + err = errors.Annotatef(err, "failed to send metrics for %s", tag) logger.Warningf("%v", err) result.Results[i].Error = common.ServerError(err) continue @@ -136,3 +173,11 @@ } return result, nil } + +func transmitVendorMetrics(st *state.State) (bool, error) { + cfg, err := st.ModelConfig() + if err != nil { + return false, errors.Annotatef(err, "failed to get model config for %s", st.ModelTag()) + } + return cfg.TransmitVendorMetrics(), nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package metricsmanager_test import ( + "fmt" "time" "github.com/juju/errors" @@ -16,13 +17,14 @@ "github.com/juju/juju/apiserver/metricsmanager" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" - jujutesting "github.com/juju/juju/juju/testing" + jujujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/testing/factory" + jujutesting "github.com/juju/testing" ) type metricsManagerSuite struct { - jujutesting.JujuConnSuite + jujujutesting.JujuConnSuite metricsmanager *metricsmanager.MetricsManagerAPI authorizer apiservertesting.FakeAuthorizer @@ -37,7 +39,7 @@ Tag: names.NewMachineTag("0"), EnvironManager: true, } - manager, err := metricsmanager.NewMetricsManagerAPI(s.State, nil, s.authorizer) + manager, err := metricsmanager.NewMetricsManagerAPI(s.State, nil, s.authorizer, jujutesting.NewClock(time.Now())) c.Assert(err, jc.ErrorIsNil) s.metricsmanager = manager meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) @@ -62,7 +64,7 @@ anAuthoriser := s.authorizer anAuthoriser.EnvironManager = test.environManager anAuthoriser.Tag = test.tag - endPoint, err := metricsmanager.NewMetricsManagerAPI(s.State, nil, anAuthoriser) + endPoint, err := metricsmanager.NewMetricsManagerAPI(s.State, nil, anAuthoriser, jujutesting.NewClock(time.Now())) if test.expectedError == "" { c.Assert(err, jc.ErrorIsNil) c.Assert(endPoint, gc.NotNil) @@ -172,7 +174,9 @@ }} result, err := s.metricsmanager.SendMetrics(args) c.Assert(err, jc.ErrorIsNil) - expectedError := params.ErrorResult{Error: apiservertesting.PrefixedError("failed to send metrics: ", "an error")} + expectedError := params.ErrorResult{Error: apiservertesting.PrefixedError( + fmt.Sprintf("failed to send metrics for %s: ", s.State.ModelTag()), + "an error")} c.Assert(result.Results[0], jc.DeepEquals, expectedError) mm, err := s.State.MetricsManager() c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationflag/shim.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationflag/shim.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationflag/shim.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationflag/shim.go 2016-10-13 14:31:49.000000000 +0000 @@ -42,7 +42,7 @@ // MigrationPhase is part of the Backend interface. func (shim *backend) MigrationPhase() (migration.Phase, error) { - mig, err := shim.st.LatestModelMigration() + mig, err := shim.st.LatestMigration() if errors.IsNotFound(err) { return migration.NONE, nil } else if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationmaster/backend.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationmaster/backend.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationmaster/backend.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationmaster/backend.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,16 +4,23 @@ package migrationmaster import ( + "gopkg.in/juju/names.v2" + "github.com/juju/juju/migration" "github.com/juju/juju/state" + "github.com/juju/version" ) // Backend defines the state functionality required by the // migrationmaster facade. type Backend interface { - migration.StateExporter - - WatchForModelMigration() state.NotifyWatcher - LatestModelMigration() (state.ModelMigration, error) + WatchForMigration() state.NotifyWatcher + LatestMigration() (state.ModelMigration, error) + ModelUUID() string + ModelName() (string, error) + ModelOwner() (names.UserTag, error) + AgentVersion() (version.Number, error) RemoveExportingModelDocs() error + + migration.StateExporter } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationmaster/facade.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationmaster/facade.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationmaster/facade.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationmaster/facade.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,8 @@ package migrationmaster import ( + "encoding/json" + "github.com/juju/errors" "github.com/juju/utils" "github.com/juju/utils/set" @@ -15,6 +17,7 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/core/description" coremigration "github.com/juju/juju/core/migration" + "github.com/juju/juju/migration" "github.com/juju/juju/state/watcher" ) @@ -25,15 +28,17 @@ // API implements the API required for the model migration // master worker. type API struct { - backend Backend - authorizer facade.Authorizer - resources facade.Resources + backend Backend + precheckBackend migration.PrecheckBackend + authorizer facade.Authorizer + resources facade.Resources } // NewAPI creates a new API server endpoint for the model migration // master worker. func NewAPI( backend Backend, + precheckBackend migration.PrecheckBackend, resources facade.Resources, authorizer facade.Authorizer, ) (*API, error) { @@ -41,9 +46,10 @@ return nil, common.ErrPerm } return &API{ - backend: backend, - authorizer: authorizer, - resources: resources, + backend: backend, + precheckBackend: precheckBackend, + authorizer: authorizer, + resources: resources, }, nil } @@ -51,7 +57,7 @@ // associated with the API connection. The returned id should be used // with the NotifyWatcher facade to receive events. func (api *API) Watch() params.NotifyWatchResult { - watch := api.backend.WatchForModelMigration() + watch := api.backend.WatchForMigration() if _, ok := <-watch.Changes(); ok { return params.NotifyWatchResult{ NotifyWatcherId: api.resources.Register(watch), @@ -62,36 +68,39 @@ } } -// GetMigrationStatus returns the details and progress of the latest +// MigrationStatus returns the details and progress of the latest // model migration. -func (api *API) GetMigrationStatus() (params.MasterMigrationStatus, error) { +func (api *API) MigrationStatus() (params.MasterMigrationStatus, error) { empty := params.MasterMigrationStatus{} - mig, err := api.backend.LatestModelMigration() + mig, err := api.backend.LatestMigration() if err != nil { return empty, errors.Annotate(err, "retrieving model migration") } - target, err := mig.TargetInfo() if err != nil { return empty, errors.Annotate(err, "retrieving target info") } - phase, err := mig.Phase() if err != nil { return empty, errors.Annotate(err, "retrieving phase") } - + macsJSON, err := json.Marshal(target.Macaroons) + if err != nil { + return empty, errors.Annotate(err, "marshalling macaroons") + } return params.MasterMigrationStatus{ - Spec: params.ModelMigrationSpec{ + Spec: params.MigrationSpec{ ModelTag: names.NewModelTag(mig.ModelUUID()).String(), - TargetInfo: params.ModelMigrationTargetInfo{ + TargetInfo: params.MigrationTargetInfo{ ControllerTag: target.ControllerTag.String(), Addrs: target.Addrs, CACert: target.CACert, AuthTag: target.AuthTag.String(), Password: target.Password, + Macaroons: string(macsJSON), }, + ExternalControl: mig.ExternalControl(), }, MigrationId: mig.Id(), Phase: phase.String(), @@ -99,11 +108,39 @@ }, nil } +// ModelInfo returns essential information about the model to be +// migrated. +func (api *API) ModelInfo() (params.MigrationModelInfo, error) { + empty := params.MigrationModelInfo{} + + name, err := api.backend.ModelName() + if err != nil { + return empty, errors.Annotate(err, "retrieving model name") + } + + owner, err := api.backend.ModelOwner() + if err != nil { + return empty, errors.Annotate(err, "retrieving model owner") + } + + vers, err := api.backend.AgentVersion() + if err != nil { + return empty, errors.Annotate(err, "retrieving agent version") + } + + return params.MigrationModelInfo{ + UUID: api.backend.ModelUUID(), + Name: name, + OwnerTag: owner.String(), + AgentVersion: vers, + }, nil +} + // SetPhase sets the phase of the active model migration. The provided // phase must be a valid phase value, for example QUIESCE" or // "ABORT". See the core/migration package for the complete list. func (api *API) SetPhase(args params.SetMigrationPhaseArgs) error { - mig, err := api.backend.LatestModelMigration() + mig, err := api.backend.LatestMigration() if err != nil { return errors.Annotate(err, "could not get migration") } @@ -117,11 +154,17 @@ return errors.Annotate(err, "failed to set phase") } +// Prechecks performs pre-migration checks on the model and +// (source) controller. +func (api *API) Prechecks() error { + return migration.SourcePrecheck(api.precheckBackend) +} + // SetStatusMessage sets a human readable status message containing // information about the migration's progress. This will be shown in // status output shown to the end user. func (api *API) SetStatusMessage(args params.SetMigrationStatusMessageArgs) error { - mig, err := api.backend.LatestModelMigration() + mig, err := api.backend.LatestMigration() if err != nil { return errors.Annotate(err, "could not get migration") } @@ -157,7 +200,7 @@ // WatchMinionReports sets up a watcher which reports when a report // for a migration minion has arrived. func (api *API) WatchMinionReports() params.NotifyWatchResult { - mig, err := api.backend.LatestModelMigration() + mig, err := api.backend.LatestMigration() if err != nil { return params.NotifyWatchResult{Error: common.ServerError(err)} } @@ -177,17 +220,17 @@ } } -// GetMinionReports returns details of the reports made by migration +// MinionReports returns details of the reports made by migration // minions to the controller for the current migration phase. -func (api *API) GetMinionReports() (params.MinionReports, error) { +func (api *API) MinionReports() (params.MinionReports, error) { var out params.MinionReports - mig, err := api.backend.LatestModelMigration() + mig, err := api.backend.LatestMigration() if err != nil { return out, errors.Trace(err) } - reports, err := mig.GetMinionReports() + reports, err := mig.MinionReports() if err != nil { return out, errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationmaster/facade_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationmaster/facade_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationmaster/facade_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationmaster/facade_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,6 +14,7 @@ "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/migrationmaster" @@ -21,6 +22,7 @@ apiservertesting "github.com/juju/juju/apiserver/testing" "github.com/juju/juju/core/description" coremigration "github.com/juju/juju/core/migration" + "github.com/juju/juju/migration" "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" jujuversion "github.com/juju/juju/version" @@ -86,28 +88,49 @@ } } -func (s *Suite) TestGetMigrationStatus(c *gc.C) { - api := s.mustMakeAPI(c) +func (s *Suite) TestMigrationStatus(c *gc.C) { + var expectedMacaroons = ` +[[{"caveats":[],"location":"location","identifier":"id","signature":"a9802bf274262733d6283a69c62805b5668dbf475bcd7edc25a962833f7c2cba"}]]`[1:] - status, err := api.GetMigrationStatus() + api := s.mustMakeAPI(c) + status, err := api.MigrationStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(status, gc.DeepEquals, params.MasterMigrationStatus{ - Spec: params.ModelMigrationSpec{ + + c.Check(status, gc.DeepEquals, params.MasterMigrationStatus{ + Spec: params.MigrationSpec{ ModelTag: names.NewModelTag(modelUUID).String(), - TargetInfo: params.ModelMigrationTargetInfo{ - ControllerTag: names.NewModelTag(controllerUUID).String(), + TargetInfo: params.MigrationTargetInfo{ + ControllerTag: names.NewControllerTag(controllerUUID).String(), Addrs: []string{"1.1.1.1:1", "2.2.2.2:2"}, CACert: "trust me", AuthTag: names.NewUserTag("admin").String(), Password: "secret", + Macaroons: expectedMacaroons, }, }, MigrationId: "id", - Phase: "PRECHECK", + Phase: "IMPORT", PhaseChangedTime: s.backend.migration.PhaseChangedTime(), }) } +func (s *Suite) TestMigrationStatusExternalControl(c *gc.C) { + s.backend.migration.externalControl = true + status, err := s.mustMakeAPI(c).MigrationStatus() + c.Assert(err, jc.ErrorIsNil) + c.Check(status.Spec.ExternalControl, jc.IsTrue) +} + +func (s *Suite) TestModelInfo(c *gc.C) { + api := s.mustMakeAPI(c) + model, err := api.ModelInfo() + c.Assert(err, jc.ErrorIsNil) + c.Assert(model.UUID, gc.Equals, "model-uuid") + c.Assert(model.Name, gc.Equals, "model-name") + c.Assert(model.OwnerTag, gc.Equals, names.NewUserTag("owner").String()) + c.Assert(model.AgentVersion, gc.Equals, version.MustParse("1.2.3")) +} + func (s *Suite) TestSetPhase(c *gc.C) { api := s.mustMakeAPI(c) @@ -164,6 +187,12 @@ c.Assert(err, gc.ErrorMatches, "failed to set status message: blam") } +func (s *Suite) TestPrechecks(c *gc.C) { + api := s.mustMakeAPI(c) + err := api.Prechecks() + c.Assert(err, gc.ErrorMatches, "retrieving model: boom") +} + func (s *Suite) TestExport(c *gc.C) { s.model.AddApplication(description.ApplicationArgs{ Tag: names.NewApplicationTag("foo"), @@ -214,7 +243,7 @@ c.Assert(result.Error, gc.IsNil) s.stub.CheckCallNames(c, - "LatestModelMigration", + "LatestMigration", "ModelMigration.WatchMinionReports", ) @@ -229,7 +258,7 @@ } } -func (s *Suite) TestGetMinionReports(c *gc.C) { +func (s *Suite) TestMinionReports(c *gc.C) { // Report 16 unknowns. These are in reverse order in order to test // sorting. unknown := make([]names.Tag, 0, 16) @@ -250,7 +279,7 @@ } api := s.mustMakeAPI(c) - reports, err := api.GetMinionReports() + reports, err := api.MinionReports() c.Assert(err, jc.ErrorIsNil) // Expect the sample of unknowns to be in order and be limited to @@ -261,7 +290,7 @@ } c.Assert(reports, gc.DeepEquals, params.MinionReports{ MigrationId: "id", - Phase: "PRECHECK", + Phase: "IMPORT", SuccessCount: 3, UnknownCount: len(unknown), UnknownSample: expectedSample, @@ -276,11 +305,12 @@ } func (s *Suite) makeAPI() (*migrationmaster.API, error) { - return migrationmaster.NewAPI(s.backend, s.resources, s.authorizer) + return migrationmaster.NewAPI(s.backend, new(failingPrecheckBackend), + s.resources, s.authorizer) } func (s *Suite) mustMakeAPI(c *gc.C) *migrationmaster.API { - api, err := migrationmaster.NewAPI(s.backend, s.resources, s.authorizer) + api, err := s.makeAPI() c.Assert(err, jc.ErrorIsNil) return api } @@ -295,19 +325,35 @@ model description.Model } -func (b *stubBackend) WatchForModelMigration() state.NotifyWatcher { - b.stub.AddCall("WatchForModelMigration") +func (b *stubBackend) WatchForMigration() state.NotifyWatcher { + b.stub.AddCall("WatchForMigration") return apiservertesting.NewFakeNotifyWatcher() } -func (b *stubBackend) LatestModelMigration() (state.ModelMigration, error) { - b.stub.AddCall("LatestModelMigration") +func (b *stubBackend) LatestMigration() (state.ModelMigration, error) { + b.stub.AddCall("LatestMigration") if b.getErr != nil { return nil, b.getErr } return b.migration, nil } +func (b *stubBackend) ModelUUID() string { + return "model-uuid" +} + +func (b *stubBackend) ModelName() (string, error) { + return "model-name", nil +} + +func (b *stubBackend) ModelOwner() (names.UserTag, error) { + return names.NewUserTag("owner"), nil +} + +func (b *stubBackend) AgentVersion() (version.Number, error) { + return version.MustParse("1.2.3"), nil +} + func (b *stubBackend) RemoveExportingModelDocs() error { b.stub.AddCall("RemoveExportingModelDocs") return b.removeErr @@ -321,12 +367,13 @@ type stubMigration struct { state.ModelMigration - stub *testing.Stub - setPhaseErr error - phaseSet coremigration.Phase - setMessageErr error - messageSet string - minionReports *state.MinionReports + stub *testing.Stub + setPhaseErr error + phaseSet coremigration.Phase + setMessageErr error + messageSet string + minionReports *state.MinionReports + externalControl bool } func (m *stubMigration) Id() string { @@ -334,7 +381,7 @@ } func (m *stubMigration) Phase() (coremigration.Phase, error) { - return coremigration.PRECHECK, nil + return coremigration.IMPORT, nil } func (m *stubMigration) PhaseChangedTime() time.Time { @@ -349,13 +396,22 @@ return modelUUID } +func (m *stubMigration) ExternalControl() bool { + return m.externalControl +} + func (m *stubMigration) TargetInfo() (*coremigration.TargetInfo, error) { + mac, err := macaroon.New([]byte("secret"), "id", "location") + if err != nil { + panic(err) + } return &coremigration.TargetInfo{ - ControllerTag: names.NewModelTag(controllerUUID), + ControllerTag: names.NewControllerTag(controllerUUID), Addrs: []string{"1.1.1.1:1", "2.2.2.2:2"}, CACert: "trust me", AuthTag: names.NewUserTag("admin"), Password: "secret", + Macaroons: []macaroon.Slice{{mac}}, }, nil } @@ -380,7 +436,7 @@ return apiservertesting.NewFakeNotifyWatcher(), nil } -func (m *stubMigration) GetMinionReports() (*state.MinionReports, error) { +func (m *stubMigration) MinionReports() (*state.MinionReports, error) { return m.minionReports, nil } @@ -391,3 +447,11 @@ modelUUID = utils.MustNewUUID().String() controllerUUID = utils.MustNewUUID().String() } + +type failingPrecheckBackend struct { + migration.PrecheckBackend +} + +func (b *failingPrecheckBackend) Model() (migration.PrecheckModel, error) { + return nil, errors.New("boom") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationmaster/shim.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationmaster/shim.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationmaster/shim.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationmaster/shim.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,12 @@ package migrationmaster import ( + "github.com/juju/errors" "github.com/juju/juju/apiserver/facade" + "github.com/juju/juju/migration" "github.com/juju/juju/state" + "github.com/juju/version" + "gopkg.in/juju/names.v2" ) // newAPIForRegistration exists to provide the required signature for @@ -15,5 +19,42 @@ resources facade.Resources, authorizer facade.Authorizer, ) (*API, error) { - return NewAPI(st, resources, authorizer) + return NewAPI(&backendShim{st}, migration.PrecheckShim(st), resources, authorizer) +} + +// backendShim wraps a *state.State to implement Backend. It is +// untested, but is simple enough to be verified by inspection. +type backendShim struct { + *state.State +} + +// ModelName implements Backend. +func (s *backendShim) ModelName() (string, error) { + model, err := s.Model() + if err != nil { + return "", errors.Trace(err) + } + return model.Name(), nil +} + +// ModelOwner implements Backend. +func (s *backendShim) ModelOwner() (names.UserTag, error) { + model, err := s.Model() + if err != nil { + return names.UserTag{}, errors.Trace(err) + } + return model.Owner(), nil +} + +// AgentVersion implements Backend. +func (s *backendShim) AgentVersion() (version.Number, error) { + cfg, err := s.ModelConfig() + if err != nil { + return version.Zero, errors.Trace(err) + } + vers, ok := cfg.AgentVersion() + if !ok { + return version.Zero, errors.New("no agent version") + } + return vers, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationminion/backend.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationminion/backend.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationminion/backend.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationminion/backend.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,5 +9,5 @@ // MigrationMinion facade. type Backend interface { WatchMigrationStatus() state.NotifyWatcher - ModelMigration(string) (state.ModelMigration, error) + Migration(string) (state.ModelMigration, error) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationminion/migrationminion.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationminion/migrationminion.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationminion/migrationminion.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationminion/migrationminion.go 2016-10-13 14:31:49.000000000 +0000 @@ -59,11 +59,11 @@ return errors.New("unable to parse phase") } - mig, err := api.backend.ModelMigration(info.MigrationId) + mig, err := api.backend.Migration(info.MigrationId) if err != nil { return errors.Trace(err) } - err = mig.MinionReport(api.authorizer.GetAuthTag(), phase, info.Success) + err = mig.SubmitMinionReport(api.authorizer.GetAuthTag(), phase, info.Success) return errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationminion/migrationminion_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationminion/migrationminion_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationminion/migrationminion_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationminion/migrationminion_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -74,13 +74,13 @@ api := s.mustMakeAPI(c) err := api.Report(params.MinionReport{ MigrationId: "id", - Phase: "PRECHECK", + Phase: "IMPORT", Success: true, }) c.Assert(err, jc.ErrorIsNil) s.stub.CheckCalls(c, []testing.StubCall{ - {"ModelMigration", []interface{}{"id"}}, - {"Report", []interface{}{s.authorizer.Tag, migration.PRECHECK, true}}, + {"Migration", []interface{}{"id"}}, + {"Report", []interface{}{s.authorizer.Tag, migration.IMPORT, true}}, }) } @@ -127,8 +127,8 @@ return apiservertesting.NewFakeNotifyWatcher() } -func (b *stubBackend) ModelMigration(id string) (state.ModelMigration, error) { - b.stub.AddCall("ModelMigration", id) +func (b *stubBackend) Migration(id string) (state.ModelMigration, error) { + b.stub.AddCall("Migration", id) if b.modelLookupErr != nil { return nil, b.modelLookupErr } @@ -140,7 +140,7 @@ stub *testing.Stub } -func (m *stubModelMigration) MinionReport(tag names.Tag, phase migration.Phase, success bool) error { +func (m *stubModelMigration) SubmitMinionReport(tag names.Tag, phase migration.Phase, success bool) error { m.stub.AddCall("Report", tag, phase, success) return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationtarget/migrationtarget.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationtarget/migrationtarget.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationtarget/migrationtarget.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationtarget/migrationtarget.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,9 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" + coremigration "github.com/juju/juju/core/migration" "github.com/juju/juju/migration" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -47,9 +49,7 @@ return errors.Trace(common.ErrPerm) } - // Type assertion is fine because AuthClient is true. - apiUser := authorizer.GetAuthTag().(names.UserTag) - if isAdmin, err := st.IsControllerAdministrator(apiUser); err != nil { + if isAdmin, err := authorizer.HasPermission(permission.SuperuserAccess, st.ControllerTag()); err != nil { return errors.Trace(err) } else if !isAdmin { // The entire facade is only accessible to controller administrators. @@ -58,6 +58,24 @@ return nil } +// Prechecks ensure that the target controller is ready to accept a +// model migration. +func (api *API) Prechecks(model params.MigrationModelInfo) error { + ownerTag, err := names.ParseUserTag(model.OwnerTag) + if err != nil { + return errors.Trace(err) + } + return migration.TargetPrecheck( + migration.PrecheckShim(api.state), + coremigration.ModelInfo{ + UUID: model.UUID, + Name: model.Name, + Owner: ownerTag, + AgentVersion: model.AgentVersion, + }, + ) +} + // Import takes a serialized Juju model, deserializes it, and // recreates it in the receiving controller. func (api *API) Import(serialized params.SerializedModel) error { @@ -67,6 +85,8 @@ } defer st.Close() // TODO(mjs) - post import checks + // NOTE(fwereade) - checks here would be sensible, but we will + // also need to check after the binaries are imported too. return err } @@ -110,5 +130,6 @@ return errors.Trace(err) } - return model.SetMigrationMode(state.MigrationModeActive) + // TODO(fwereade) - need to validate binaries here. + return model.SetMigrationMode(state.MigrationModeNone) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationtarget/migrationtarget_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationtarget/migrationtarget_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/migrationtarget/migrationtarget_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/migrationtarget/migrationtarget_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,7 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" @@ -43,7 +44,8 @@ s.AddCleanup(func(*gc.C) { s.resources.StopAll() }) s.authorizer = apiservertesting.FakeAuthorizer{ - Tag: s.Owner, + Tag: s.Owner, + AdminTag: s.Owner, } } @@ -79,6 +81,33 @@ return names.NewModelTag(uuid) } +func (s *Suite) TestPrechecks(c *gc.C) { + api := s.mustNewAPI(c) + args := params.MigrationModelInfo{ + UUID: "uuid", + Name: "some-model", + OwnerTag: names.NewUserTag("someone").String(), + AgentVersion: s.controllerVersion(c), + } + err := api.Prechecks(args) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *Suite) TestPrechecksFail(c *gc.C) { + controllerVersion := s.controllerVersion(c) + + // Set the model version ahead of the controller. + modelVersion := controllerVersion + modelVersion.Minor++ + + api := s.mustNewAPI(c) + args := params.MigrationModelInfo{ + AgentVersion: modelVersion, + } + err := api.Prechecks(args) + c.Assert(err, gc.NotNil) +} + func (s *Suite) TestImport(c *gc.C) { api := s.mustNewAPI(c) tag := s.importModel(c, api) @@ -135,7 +164,7 @@ // The model should no longer exist. model, err := s.State.GetModel(tag) c.Assert(err, jc.ErrorIsNil) - c.Assert(model.MigrationMode(), gc.Equals, state.MigrationModeActive) + c.Assert(model.MigrationMode(), gc.Equals, state.MigrationModeNone) } func (s *Suite) TestActivateNotATag(c *gc.C) { @@ -186,3 +215,11 @@ c.Assert(err, jc.ErrorIsNil) return newUUID, bytes } + +func (s *Suite) controllerVersion(c *gc.C) version.Number { + cfg, err := s.State.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + vers, ok := cfg.AgentVersion() + c.Assert(ok, jc.IsTrue) + return vers +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelconfig/backend.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelconfig/backend.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelconfig/backend.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelconfig/backend.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,15 +7,16 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/environs/config" "github.com/juju/juju/state" + names "gopkg.in/juju/names.v2" ) // Backend contains the state.State methods used in this package, // allowing stubs to be created for testing. type Backend interface { common.BlockGetter + ControllerTag() names.ControllerTag + ModelTag() names.ModelTag ModelConfigValues() (config.ConfigValues, error) - ModelConfigDefaultValues() (config.ConfigValues, error) - UpdateModelConfigDefaultValues(map[string]interface{}, []string) error UpdateModelConfig(map[string]interface{}, []string, state.ValidateConfigFunc) error } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelconfig/modelconfig.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelconfig/modelconfig.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelconfig/modelconfig.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelconfig/modelconfig.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,8 +9,8 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -42,43 +42,43 @@ return client, nil } -// ModelGet implements the server-side part of the -// get-model-config CLI command. -func (c *ModelConfigAPI) ModelGet() (params.ModelConfigResults, error) { - result := params.ModelConfigResults{} - values, err := c.backend.ModelConfigValues() +func (c *ModelConfigAPI) checkCanWrite() error { + canWrite, err := c.auth.HasPermission(permission.WriteAccess, c.backend.ModelTag()) if err != nil { - return result, err + return errors.Trace(err) + } + if !canWrite { + return common.ErrPerm } + return nil +} - // TODO(wallyworld) - this can be removed once credentials are properly - // managed outside of model config. - // Strip out any model config attributes that are credential attributes. - provider, err := environs.Provider(values[config.TypeKey].Value.(string)) +func (c *ModelConfigAPI) isAdmin() error { + hasAccess, err := c.auth.HasPermission(permission.SuperuserAccess, c.backend.ControllerTag()) if err != nil { - return result, err + return errors.Trace(err) } - credSchemas := provider.CredentialSchemas() - var allCredentialAttributes []string - for _, schema := range credSchemas { - for _, attr := range schema { - allCredentialAttributes = append(allCredentialAttributes, attr.Name) - } + if !hasAccess { + return common.ErrPerm } - isCredentialAttribute := func(attr string) bool { - for _, a := range allCredentialAttributes { - if a == attr { - return true - } - } - return false + return nil +} + +// ModelGet implements the server-side part of the +// model-config CLI command. +func (c *ModelConfigAPI) ModelGet() (params.ModelConfigResults, error) { + result := params.ModelConfigResults{} + if err := c.checkCanWrite(); err != nil { + return result, errors.Trace(err) + } + + values, err := c.backend.ModelConfigValues() + if err != nil { + return result, errors.Trace(err) } result.Config = make(map[string]params.ConfigValue) for attr, val := range values { - if isCredentialAttribute(attr) { - continue - } // Authorized keys are able to be listed using // juju ssh-keys and including them here just // clutters everything. @@ -96,6 +96,10 @@ // ModelSet implements the server-side part of the // set-model-config CLI command. func (c *ModelConfigAPI) ModelSet(args params.ModelSet) error { + if err := c.checkCanWrite(); err != nil { + return err + } + if err := c.check.ChangeAllowed(); err != nil { return errors.Trace(err) } @@ -117,45 +121,11 @@ // ModelUnset implements the server-side part of the // set-model-config CLI command. func (c *ModelConfigAPI) ModelUnset(args params.ModelUnset) error { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - return c.backend.UpdateModelConfig(nil, args.Keys, nil) -} - -// ModelDefaults returns the default config values used when creating a new model. -func (c *ModelConfigAPI) ModelDefaults() (params.ModelConfigResults, error) { - result := params.ModelConfigResults{} - values, err := c.backend.ModelConfigDefaultValues() - if err != nil { - return result, err + if err := c.checkCanWrite(); err != nil { + return err } - result.Config = make(map[string]params.ConfigValue) - for attr, val := range values { - result.Config[attr] = params.ConfigValue{ - Value: val.Value, - Source: val.Source, - } - } - return result, nil -} - -// SetModelDefaults writes new values for the specified default model settings. -func (c *ModelConfigAPI) SetModelDefaults(args params.ModelSet) error { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - // Make sure we don't allow changing agent-version. - if _, found := args.Config["agent-version"]; found { - return errors.New("agent-version cannot have a default value") - } - return c.backend.UpdateModelConfigDefaultValues(args.Config, nil) -} - -// UnsetModelDefaults removes the specified default model settings. -func (c *ModelConfigAPI) UnsetModelDefaults(args params.ModelUnset) error { if err := c.check.ChangeAllowed(); err != nil { return errors.Trace(err) } - return c.backend.UpdateModelConfigDefaultValues(nil, args.Keys) + return c.backend.UpdateModelConfig(nil, args.Keys, nil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelconfig/modelconfig_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelconfig/modelconfig_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelconfig/modelconfig_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelconfig/modelconfig_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -32,7 +32,8 @@ func (s *modelconfigSuite) SetUpTest(c *gc.C) { s.IsolationSuite.SetUpTest(c) s.authorizer = apiservertesting.FakeAuthorizer{ - Tag: names.NewUserTag("bruce@local"), + Tag: names.NewUserTag("bruce@local"), + AdminTag: names.NewUserTag("bruce@local"), } s.backend = &mockBackend{ cfg: config.ConfigValues{ @@ -41,10 +42,6 @@ "ftp-proxy": {"http://proxy", "model"}, "authorized-keys": {testing.FakeAuthKeys, "model"}, }, - cfgDefaults: config.ConfigValues{ - "attr": {Value: "val", Source: "controller"}, - "attr2": {Value: "val2", Source: "controller"}, - }, } var err error s.api, err = modelconfig.NewModelConfigAPI(s.backend, &s.authorizer) @@ -156,77 +153,17 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *modelconfigSuite) TestModelDefaults(c *gc.C) { - result, err := s.api.ModelDefaults() - c.Assert(err, jc.ErrorIsNil) - expectedValues := map[string]params.ConfigValue{ - "attr": {Value: "val", Source: "controller"}, - "attr2": {Value: "val2", Source: "controller"}, - } - c.Assert(result.Config, jc.DeepEquals, expectedValues) -} - -func (s *modelconfigSuite) TestSetModelDefaults(c *gc.C) { - params := params.ModelSet{ - Config: map[string]interface{}{ - "attr3": "val3", - "attr4": "val4"}, - } - err := s.api.SetModelDefaults(params) - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.backend.cfgDefaults, jc.DeepEquals, config.ConfigValues{ - "attr": {Value: "val", Source: "controller"}, - "attr2": {Value: "val2", Source: "controller"}, - "attr3": {Value: "val3", Source: "controller"}, - "attr4": {Value: "val4", Source: "controller"}, - }) -} - -func (s *modelconfigSuite) TestBlockChangesSetModelDefaults(c *gc.C) { - s.blockAllChanges(c, "TestBlockChangesSetModelDefaults") - err := s.api.SetModelDefaults(params.ModelSet{}) - s.assertBlocked(c, err, "TestBlockChangesSetModelDefaults") -} - -func (s *modelconfigSuite) TestUnsetModelDefaults(c *gc.C) { - args := params.ModelUnset{[]string{"attr"}} - err := s.api.UnsetModelDefaults(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.backend.cfgDefaults, jc.DeepEquals, config.ConfigValues{ - "attr2": {Value: "val2", Source: "controller"}, - }) -} - -func (s *modelconfigSuite) TestBlockUnsetModelDefaults(c *gc.C) { - s.blockAllChanges(c, "TestBlockUnsetModelDefaults") - args := params.ModelUnset{[]string{"abc"}} - err := s.api.UnsetModelDefaults(args) - s.assertBlocked(c, err, "TestBlockUnsetModelDefaults") -} - -func (s *modelconfigSuite) TestUnsetModelDefaultsMissing(c *gc.C) { - // It's okay to unset a non-existent attribute. - args := params.ModelUnset{[]string{"not_there"}} - err := s.api.UnsetModelDefaults(args) - c.Assert(err, jc.ErrorIsNil) -} - type mockBackend struct { - cfg config.ConfigValues - cfgDefaults config.ConfigValues - old *config.Config - b state.BlockType - msg string + cfg config.ConfigValues + old *config.Config + b state.BlockType + msg string } func (m *mockBackend) ModelConfigValues() (config.ConfigValues, error) { return m.cfg, nil } -func (m *mockBackend) ModelConfigDefaultValues() (config.ConfigValues, error) { - return m.cfgDefaults, nil -} - func (m *mockBackend) UpdateModelConfig(update map[string]interface{}, remove []string, validate state.ValidateConfigFunc) error { if validate != nil { err := validate(update, remove, m.old) @@ -243,16 +180,6 @@ return nil } -func (m *mockBackend) UpdateModelConfigDefaultValues(update map[string]interface{}, remove []string) error { - for k, v := range update { - m.cfgDefaults[k] = config.ConfigValue{v, "controller"} - } - for _, n := range remove { - delete(m.cfgDefaults, n) - } - return nil -} - func (m *mockBackend) GetBlockForType(t state.BlockType) (state.Block, bool, error) { if m.b == t { return &mockBlock{t: t, m: m.msg}, true, nil @@ -261,6 +188,14 @@ } } +func (m *mockBackend) ModelTag() names.ModelTag { + return names.NewModelTag("deadbeef-2f18-4fd2-967d-db9663db7bea") +} + +func (m *mockBackend) ControllerTag() names.ControllerTag { + return names.NewControllerTag("deadbeef-babe-4fd2-967d-db9663db7bea") +} + type mockBlock struct { state.Block t state.BlockType diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelmanager/modelinfo_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelmanager/modelinfo_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelmanager/modelinfo_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelmanager/modelinfo_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,6 +24,8 @@ "github.com/juju/juju/core/description" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" + "github.com/juju/juju/instance" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/status" coretesting "github.com/juju/juju/testing" @@ -36,6 +38,10 @@ modelmanager *modelmanager.ModelManagerAPI } +func pUint64(v uint64) *uint64 { + return &v +} + var _ = gc.Suite(&modelInfoSuite{}) func (s *modelInfoSuite) SetUpTest(c *gc.C) { @@ -44,33 +50,88 @@ Tag: names.NewUserTag("admin@local"), } s.st = &mockState{ - uuid: coretesting.ModelTag.Id(), + modelUUID: coretesting.ModelTag.Id(), + controllerUUID: coretesting.ControllerTag.Id(), cloud: cloud.Cloud{ Type: "dummy", AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, }, + cfgDefaults: config.ModelDefaultAttributes{ + "attr": config.AttributeDefaultValues{ + Default: "", + Controller: "val", + Regions: []config.RegionDefaultValue{{ + Name: "dummy", + Value: "val++"}}}, + "attr2": config.AttributeDefaultValues{ + Controller: "val3", + Default: "val2", + Regions: []config.RegionDefaultValue{{ + Name: "left", + Value: "spam"}}}, + }, + } + s.st.controllerModel = &mockModel{ + owner: names.NewUserTag("admin@local"), + life: state.Alive, + cfg: coretesting.ModelConfig(c), + status: status.StatusInfo{ + Status: status.Available, + Since: &time.Time{}, + }, + users: []*mockModelUser{{ + userName: "admin", + access: permission.AdminAccess, + }, { + userName: "otheruser", + access: permission.AdminAccess, + }}, } + s.st.model = &mockModel{ owner: names.NewUserTag("bob@local"), cfg: coretesting.ModelConfig(c), life: state.Dying, status: status.StatusInfo{ - Status: status.StatusDestroying, + Status: status.Destroying, Since: &time.Time{}, }, + users: []*mockModelUser{{ userName: "admin", - access: description.AdminAccess, + access: permission.AdminAccess, }, { - userName: "bob@local", + userName: "bob", displayName: "Bob", - access: description.ReadAccess, + access: permission.ReadAccess, }, { - userName: "charlotte@local", + userName: "charlotte", displayName: "Charlotte", - access: description.ReadAccess, + access: permission.ReadAccess, + }, { + userName: "mary", + displayName: "Mary", + access: permission.WriteAccess, }}, } + s.st.machines = []common.Machine{ + &mockMachine{ + id: "1", + containerType: "none", + life: state.Alive, + hw: &instance.HardwareCharacteristics{CpuCores: pUint64(1)}, + }, + &mockMachine{ + id: "2", + life: state.Alive, + containerType: "lxc", + }, + &mockMachine{ + id: "3", + life: state.Dead, + }, + } + var err error s.modelmanager, err = modelmanager.NewModelManagerAPI(s.st, nil, &s.authorizer) c.Assert(err, jc.ErrorIsNil) @@ -78,26 +139,26 @@ func (s *modelInfoSuite) setAPIUser(c *gc.C, user names.UserTag) { s.authorizer.Tag = user - modelmanager, err := modelmanager.NewModelManagerAPI(s.st, nil, s.authorizer) + var err error + s.modelmanager, err = modelmanager.NewModelManagerAPI(s.st, nil, s.authorizer) c.Assert(err, jc.ErrorIsNil) - s.modelmanager = modelmanager } func (s *modelInfoSuite) TestModelInfo(c *gc.C) { info := s.getModelInfo(c) c.Assert(info, jc.DeepEquals, params.ModelInfo{ - Name: "testenv", - UUID: s.st.model.cfg.UUID(), - ControllerUUID: "deadbeef-0bad-400d-8000-4b1d0d06f00d", - OwnerTag: "user-bob@local", - ProviderType: "someprovider", - Cloud: "some-cloud", - CloudRegion: "some-region", - CloudCredential: "some-credential", - DefaultSeries: series.LatestLts(), - Life: params.Dying, + Name: "testenv", + UUID: s.st.model.cfg.UUID(), + ControllerUUID: "deadbeef-1bad-500d-9000-4b1d0d06f00d", + OwnerTag: "user-bob", + ProviderType: "someprovider", + CloudTag: "cloud-some-cloud", + CloudRegion: "some-region", + CloudCredentialTag: "cloudcred-some-cloud_bob_some-credential", + DefaultSeries: series.LatestLts(), + Life: params.Dying, Status: params.EntityStatus{ - Status: status.StatusDestroying, + Status: status.Destroying, Since: &time.Time{}, }, Users: []params.ModelUserInfo{{ @@ -105,19 +166,30 @@ LastConnection: &time.Time{}, Access: params.ModelAdminAccess, }, { - UserName: "bob@local", + UserName: "bob", DisplayName: "Bob", LastConnection: &time.Time{}, Access: params.ModelReadAccess, }, { - UserName: "charlotte@local", + UserName: "charlotte", DisplayName: "Charlotte", LastConnection: &time.Time{}, Access: params.ModelReadAccess, + }, { + UserName: "mary", + DisplayName: "Mary", + LastConnection: &time.Time{}, + Access: params.ModelWriteAccess, + }}, + Machines: []params.ModelMachineInfo{{ + Id: "1", + Hardware: ¶ms.MachineHardware{Cores: pUint64(1)}, + }, { + Id: "2", }}, }) s.st.CheckCalls(c, []gitjujutesting.StubCall{ - {"IsControllerAdministrator", []interface{}{names.NewUserTag("admin@local")}}, + {"ControllerTag", nil}, {"ModelUUID", nil}, {"ForModel", []interface{}{names.NewModelTag(s.st.model.cfg.UUID())}}, {"Model", nil}, @@ -125,6 +197,8 @@ {"LastModelConnection", []interface{}{names.NewUserTag("admin")}}, {"LastModelConnection", []interface{}{names.NewLocalUserTag("bob")}}, {"LastModelConnection", []interface{}{names.NewLocalUserTag("charlotte")}}, + {"LastModelConnection", []interface{}{names.NewLocalUserTag("mary")}}, + {"AllMachines", nil}, {"Close", nil}, }) s.st.model.CheckCalls(c, []gitjujutesting.StubCall{ @@ -133,6 +207,7 @@ {"ModelTag", nil}, {"ModelTag", nil}, {"ModelTag", nil}, + {"ModelTag", nil}, {"Status", nil}, {"Owner", nil}, {"Life", nil}, @@ -145,14 +220,26 @@ func (s *modelInfoSuite) TestModelInfoOwner(c *gc.C) { s.setAPIUser(c, names.NewUserTag("bob@local")) info := s.getModelInfo(c) - c.Assert(info.Users, gc.HasLen, 3) + c.Assert(info.Users, gc.HasLen, 4) + c.Assert(info.Machines, gc.HasLen, 2) +} + +func (s *modelInfoSuite) TestModelInfoWriteAccess(c *gc.C) { + mary := names.NewUserTag("mary@local") + s.authorizer.HasWriteTag = mary + s.setAPIUser(c, mary) + info := s.getModelInfo(c) + c.Assert(info.Users, gc.HasLen, 1) + c.Assert(info.Users[0].UserName, gc.Equals, "mary") + c.Assert(info.Machines, gc.HasLen, 2) } func (s *modelInfoSuite) TestModelInfoNonOwner(c *gc.C) { s.setAPIUser(c, names.NewUserTag("charlotte@local")) info := s.getModelInfo(c) c.Assert(info.Users, gc.HasLen, 1) - c.Assert(info.Users[0].UserName, gc.Equals, "charlotte@local") + c.Assert(info.Users[0].UserName, gc.Equals, "charlotte") + c.Assert(info.Machines, gc.HasLen, 0) } func (s *modelInfoSuite) getModelInfo(c *gc.C) params.ModelInfo { @@ -207,6 +294,10 @@ c.Assert(results.Results[0].Error, gc.ErrorMatches, expectedErr) } +type unitRetriever interface { + Unit(name string) (*state.Unit, error) +} + type mockState struct { gitjujutesting.Stub @@ -215,13 +306,20 @@ common.ToolsStorageGetter common.BlockGetter metricsender.MetricsSenderBackend + unitRetriever - uuid string + modelUUID string + controllerUUID string cloud cloud.Cloud + clouds map[names.CloudTag]cloud.Cloud model *mockModel controllerModel *mockModel - users []description.UserAccess - creds map[string]cloud.Credential + users []permission.UserAccess + cred cloud.Credential + machines []common.Machine + cfgDefaults config.ModelDefaultAttributes + blockMsg string + block state.BlockType } type fakeModelDescription struct { @@ -231,12 +329,12 @@ } func (st *mockState) Export() (description.Model, error) { - return &fakeModelDescription{UUID: st.uuid}, nil + return &fakeModelDescription{UUID: st.modelUUID}, nil } func (st *mockState) ModelUUID() string { st.MethodCall(st, "ModelUUID") - return st.uuid + return st.modelUUID } func (st *mockState) ModelsForUser(user names.UserTag) ([]*state.UserModel, error) { @@ -244,17 +342,22 @@ return nil, st.NextErr() } -func (st *mockState) IsControllerAdministrator(user names.UserTag) (bool, error) { - st.MethodCall(st, "IsControllerAdministrator", user) +func (st *mockState) AllApplications() ([]common.Application, error) { + st.MethodCall(st, "AllApplications") + return nil, st.NextErr() +} + +func (st *mockState) IsControllerAdmin(user names.UserTag) (bool, error) { + st.MethodCall(st, "IsControllerAdmin", user) if st.controllerModel == nil { - return user.Canonical() == "admin@local", st.NextErr() + return user.Id() == "admin", st.NextErr() } if st.controllerModel.users == nil { - return user.Canonical() == "admin@local", st.NextErr() + return user.Id() == "admin", st.NextErr() } for _, u := range st.controllerModel.users { - if user.Name() == u.userName && u.access == description.AdminAccess { + if user.Name() == u.userName && u.access == permission.AdminAccess { nextErr := st.NextErr() if user.Name() != "admin" { panic(user.Name()) @@ -276,7 +379,12 @@ return st.controllerModel, st.NextErr() } -func (st *mockState) ComposeNewModelConfig(modelAttr map[string]interface{}) (map[string]interface{}, error) { +func (st *mockState) ControllerTag() names.ControllerTag { + st.MethodCall(st, "ControllerTag") + return names.NewControllerTag(st.controllerUUID) +} + +func (st *mockState) ComposeNewModelConfig(modelAttr map[string]interface{}, regionSpec *environs.RegionSpec) (map[string]interface{}, error) { st.MethodCall(st, "ComposeNewModelConfig") attr := make(map[string]interface{}) for attrName, val := range modelAttr { @@ -286,10 +394,15 @@ return attr, st.NextErr() } +func (st *mockState) ControllerUUID() string { + st.MethodCall(st, "ControllerUUID") + return st.controllerUUID +} + func (st *mockState) ControllerConfig() (controller.Config, error) { st.MethodCall(st, "ControllerConfig") return controller.Config{ - controller.ControllerUUIDKey: "deadbeef-0bad-400d-8000-4b1d0d06f00d", + controller.ControllerUUIDKey: "deadbeef-1bad-500d-9000-4b1d0d06f00d", }, st.NextErr() } @@ -298,6 +411,11 @@ return st, st.NextErr() } +func (st *mockState) GetModel(tag names.ModelTag) (common.Model, error) { + st.MethodCall(st, "GetModel", tag) + return st.model, st.NextErr() +} + func (st *mockState) Model() (common.Model, error) { st.MethodCall(st, "Model") return st.model, st.NextErr() @@ -313,14 +431,24 @@ return []common.Model{st.model}, st.NextErr() } +func (st *mockState) AllMachines() ([]common.Machine, error) { + st.MethodCall(st, "AllMachines") + return st.machines, st.NextErr() +} + +func (st *mockState) Clouds() (map[names.CloudTag]cloud.Cloud, error) { + st.MethodCall(st, "Clouds") + return st.clouds, st.NextErr() +} + func (st *mockState) Cloud(name string) (cloud.Cloud, error) { st.MethodCall(st, "Cloud", name) return st.cloud, st.NextErr() } -func (st *mockState) CloudCredentials(user names.UserTag, cloudName string) (map[string]cloud.Credential, error) { - st.MethodCall(st, "CloudCredentials", user, cloudName) - return st.creds, st.NextErr() +func (st *mockState) CloudCredential(tag names.CloudCredentialTag) (cloud.Credential, error) { + st.MethodCall(st, "CloudCredential", tag) + return st.cred, st.NextErr() } func (st *mockState) Close() error { @@ -328,14 +456,14 @@ return st.NextErr() } -func (st *mockState) AddModelUser(spec state.UserAccessSpec) (description.UserAccess, error) { - st.MethodCall(st, "AddModelUser", spec) - return description.UserAccess{}, st.NextErr() +func (st *mockState) AddModelUser(modelUUID string, spec state.UserAccessSpec) (permission.UserAccess, error) { + st.MethodCall(st, "AddModelUser", modelUUID, spec) + return permission.UserAccess{}, st.NextErr() } -func (st *mockState) AddControllerUser(spec state.UserAccessSpec) (description.UserAccess, error) { +func (st *mockState) AddControllerUser(spec state.UserAccessSpec) (permission.UserAccess, error) { st.MethodCall(st, "AddControllerUser", spec) - return description.UserAccess{}, st.NextErr() + return permission.UserAccess{}, st.NextErr() } func (st *mockState) RemoveModelUser(tag names.UserTag) error { @@ -343,9 +471,9 @@ return st.NextErr() } -func (st *mockState) UserAccess(tag names.UserTag, target names.Tag) (description.UserAccess, error) { +func (st *mockState) UserAccess(tag names.UserTag, target names.Tag) (permission.UserAccess, error) { st.MethodCall(st, "ModelUser", tag, target) - return description.UserAccess{}, st.NextErr() + return permission.UserAccess{}, st.NextErr() } func (st *mockState) LastModelConnection(user names.UserTag) (time.Time, error) { @@ -358,9 +486,125 @@ return st.NextErr() } -func (st *mockState) SetUserAccess(subject names.UserTag, target names.Tag, access description.Access) (description.UserAccess, error) { +func (st *mockState) SetUserAccess(subject names.UserTag, target names.Tag, access permission.Access) (permission.UserAccess, error) { st.MethodCall(st, "SetUserAccess", subject, target, access) - return description.UserAccess{}, st.NextErr() + return permission.UserAccess{}, st.NextErr() +} + +func (st *mockState) ModelConfigDefaultValues() (config.ModelDefaultAttributes, error) { + st.MethodCall(st, "ModelConfigDefaultValues") + return st.cfgDefaults, nil +} + +func (st *mockState) UpdateModelConfigDefaultValues(update map[string]interface{}, remove []string, rspec *environs.RegionSpec) error { + st.MethodCall(st, "UpdateModelConfigDefaultValues", update, remove, rspec) + for k, v := range update { + if rspec != nil { + adv := st.cfgDefaults[k] + adv.Regions = append(adv.Regions, config.RegionDefaultValue{ + Name: rspec.Region, + Value: v}) + + } else { + st.cfgDefaults[k] = config.AttributeDefaultValues{Controller: v} + } + } + for _, n := range remove { + if rspec != nil { + for i, r := range st.cfgDefaults[n].Regions { + if r.Name == rspec.Region { + adv := st.cfgDefaults[n] + adv.Regions = append(adv.Regions[:i], adv.Regions[i+1:]...) + st.cfgDefaults[n] = adv + } + } + } else { + if len(st.cfgDefaults[n].Regions) == 0 { + delete(st.cfgDefaults, n) + } else { + + st.cfgDefaults[n] = config.AttributeDefaultValues{ + Regions: st.cfgDefaults[n].Regions} + } + } + } + return nil +} + +func (st *mockState) GetBlockForType(t state.BlockType) (state.Block, bool, error) { + st.MethodCall(st, "GetBlockForType", t) + if st.block == t { + return &mockBlock{t: t, m: st.blockMsg}, true, nil + } else { + return nil, false, nil + } +} + +func (st *mockState) DumpAll() (map[string]interface{}, error) { + st.MethodCall(st, "DumpAll") + return map[string]interface{}{ + "models": "lots of data", + }, st.NextErr() +} + +type mockBlock struct { + state.Block + t state.BlockType + m string +} + +func (m mockBlock) Id() string { return "" } + +func (m mockBlock) Tag() (names.Tag, error) { return names.NewModelTag("mocktesting"), nil } + +func (m mockBlock) Type() state.BlockType { return m.t } + +func (m mockBlock) Message() string { return m.m } + +func (m mockBlock) ModelUUID() string { return "" } + +type mockMachine struct { + common.Machine + id string + life state.Life + containerType instance.ContainerType + hw *instance.HardwareCharacteristics +} + +func (m *mockMachine) Id() string { + return m.id +} + +func (m *mockMachine) Life() state.Life { + return m.life +} + +func (m *mockMachine) ContainerType() instance.ContainerType { + return m.containerType +} + +func (m *mockMachine) HardwareCharacteristics() (*instance.HardwareCharacteristics, error) { + return m.hw, nil +} + +func (m *mockMachine) AgentPresence() (bool, error) { + return true, nil +} + +func (m *mockMachine) InstanceId() (instance.Id, error) { + return "", nil +} + +func (m *mockMachine) WantsVote() bool { + return false +} + +func (m *mockMachine) HasVote() bool { + return false +} + +func (m *mockMachine) Status() (status.StatusInfo, error) { + return status.StatusInfo{}, nil } type mockModel struct { @@ -413,20 +657,20 @@ return "some-region" } -func (m *mockModel) CloudCredential() string { +func (m *mockModel) CloudCredential() (names.CloudCredentialTag, bool) { m.MethodCall(m, "CloudCredential") m.PopNoErr() - return "some-credential" + return names.NewCloudCredentialTag("some-cloud/bob/some-credential"), true } -func (m *mockModel) Users() ([]description.UserAccess, error) { +func (m *mockModel) Users() ([]permission.UserAccess, error) { m.MethodCall(m, "Users") if err := m.NextErr(); err != nil { return nil, err } - users := make([]description.UserAccess, len(m.users)) + users := make([]permission.UserAccess, len(m.users)) for i, user := range m.users { - users[i] = description.UserAccess{ + users[i] = permission.UserAccess{ UserID: strings.ToLower(user.userName), UserTag: names.NewUserTag(user.userName), Object: m.ModelTag(), @@ -453,5 +697,5 @@ userName string displayName string lastConnection time.Time - access description.Access + access permission.Access } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelmanager/modelmanager.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelmanager/modelmanager.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelmanager/modelmanager.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelmanager/modelmanager.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ import ( "fmt" + "sort" "time" "github.com/juju/errors" @@ -24,11 +25,10 @@ "github.com/juju/juju/apiserver/params" jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/controller/modelmanager" - "github.com/juju/juju/core/description" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" - "github.com/juju/juju/juju/permission" "github.com/juju/juju/migration" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/state/stateenvirons" "github.com/juju/juju/tools" @@ -44,14 +44,17 @@ type ModelManager interface { CreateModel(args params.ModelCreateArgs) (params.ModelInfo, error) DumpModels(args params.Entities) params.MapResults + DumpModelsDB(args params.Entities) params.MapResults ListModels(user params.Entity) (params.UserModelList, error) - DestroyModel() error + DestroyModels(args params.Entities) (params.ErrorResults, error) } // ModelManagerAPI implements the model manager interface and is // the concrete implementation of the api end point. type ModelManagerAPI struct { + *common.ModelStatusAPI state common.ModelManagerBackend + check *common.BlockChecker authorizer facade.Authorizer toolsFinder *common.ToolsFinder apiUser names.UserTag @@ -80,17 +83,19 @@ apiUser, _ := authorizer.GetAuthTag().(names.UserTag) // Pretty much all of the user manager methods have special casing for admin // users, so look once when we start and remember if the user is an admin. - isAdmin, err := st.IsControllerAdministrator(apiUser) + isAdmin, err := authorizer.HasPermission(permission.SuperuserAccess, st.ControllerTag()) if err != nil { return nil, errors.Trace(err) } urlGetter := common.NewToolsURLGetter(st.ModelUUID(), st) return &ModelManagerAPI{ - state: st, - authorizer: authorizer, - toolsFinder: common.NewToolsFinder(configGetter, st, urlGetter), - apiUser: apiUser, - isAdmin: isAdmin, + ModelStatusAPI: common.NewModelStatusAPI(st, authorizer, apiUser), + state: st, + check: common.NewBlockChecker(st), + authorizer: authorizer, + toolsFinder: common.NewToolsFinder(configGetter, st, urlGetter), + apiUser: apiUser, + isAdmin: isAdmin, }, nil } @@ -98,29 +103,36 @@ // are an administrator acting on behalf of another user. func (m *ModelManagerAPI) authCheck(user names.UserTag) error { if m.isAdmin { - logger.Tracef("%q is a controller admin", m.apiUser.Canonical()) + logger.Tracef("%q is a controller admin", m.apiUser.Id()) return nil } // We can't just compare the UserTags themselves as the provider part // may be unset, and gets replaced with 'local'. We must compare against // the Canonical value of the user tag. - if m.apiUser.Canonical() == user.Canonical() { + if m.apiUser == user { return nil } return common.ErrPerm } +func (m *ModelManagerAPI) hasWriteAccess(modelTag names.ModelTag) (bool, error) { + canWrite, err := m.authorizer.HasPermission(permission.WriteAccess, modelTag) + if errors.IsNotFound(err) { + return false, nil + } + return canWrite, err +} + // ConfigSource describes a type that is able to provide config. // Abstracted primarily for testing. type ConfigSource interface { Config() (*config.Config, error) } -func (mm *ModelManagerAPI) newModelConfig( +func (m *ModelManagerAPI) newModelConfig( cloudSpec environs.CloudSpec, args params.ModelCreateArgs, - controllerUUID string, source ConfigSource, ) (*config.Config, error) { // For now, we just smash to the two maps together as we store @@ -141,25 +153,20 @@ } joint[config.NameKey] = args.Name - // Copy credential attributes across to model config. - // TODO(axw) credentials should not be going into model config. - if cloudSpec.Credential != nil { - for key, value := range cloudSpec.Credential.Attributes() { - joint[key] = value - } - } - baseConfig, err := source.Config() if err != nil { return nil, errors.Trace(err) } - if joint, err = mm.state.ComposeNewModelConfig(joint); err != nil { + + regionSpec := &environs.RegionSpec{Cloud: cloudSpec.Name, Region: cloudSpec.Region} + if joint, err = m.state.ComposeNewModelConfig(joint, regionSpec); err != nil { return nil, errors.Trace(err) } + creator := modelmanager.ModelConfigCreator{ Provider: environs.Provider, FindTools: func(n version.Number) (tools.List, error) { - result, err := mm.toolsFinder.FindTools(params.FindToolsParams{ + result, err := m.toolsFinder.FindTools(params.FindToolsParams{ Number: n, }) if err != nil { @@ -168,21 +175,24 @@ return result.List, nil }, } - return creator.NewModelConfig(cloudSpec, controllerUUID, baseConfig, joint) + return creator.NewModelConfig(cloudSpec, baseConfig, joint) } // CreateModel creates a new model using the account and // model config specified in the args. -func (mm *ModelManagerAPI) CreateModel(args params.ModelCreateArgs) (params.ModelInfo, error) { +func (m *ModelManagerAPI) CreateModel(args params.ModelCreateArgs) (params.ModelInfo, error) { result := params.ModelInfo{} - // TODO(perrito666) this check should be part of the authCheck, without this check - // any user in the controller may create models. - if !mm.isAdmin { - return result, errors.Trace(common.ErrPerm) + canAddModel, err := m.authorizer.HasPermission(permission.AddModelAccess, m.state.ControllerTag()) + if err != nil { + return result, errors.Trace(err) + } + if !canAddModel { + return result, common.ErrPerm } + // Get the controller model first. We need it both for the state // server owner and the ability to get the config. - controllerModel, err := mm.state.ControllerModel() + controllerModel, err := m.state.ControllerModel() if err != nil { return result, errors.Trace(err) } @@ -192,24 +202,53 @@ return result, errors.Trace(err) } - // Any user is able to create themselves an model (until real fine - // grain permissions are available), and admins (the creator of the state - // server model) are able to create models for other people. - err = mm.authCheck(ownerTag) - if err != nil { - return result, errors.Trace(err) + var cloudTag names.CloudTag + cloudRegionName := args.CloudRegion + if args.CloudTag != "" { + var err error + cloudTag, err = names.ParseCloudTag(args.CloudTag) + if err != nil { + return result, errors.Trace(err) + } + } else { + cloudTag = names.NewCloudTag(controllerModel.Cloud()) + } + if cloudRegionName == "" && cloudTag.Id() == controllerModel.Cloud() { + cloudRegionName = controllerModel.CloudRegion() } - cloudName := controllerModel.Cloud() - cloud, err := mm.state.Cloud(cloudName) + cloud, err := m.state.Cloud(cloudTag.Id()) if err != nil { + if errors.IsNotFound(err) && args.CloudTag != "" { + // A cloud was specified, and it was not found. + // Annotate the error with the supported clouds. + clouds, err := m.state.Clouds() + if err != nil { + return result, errors.Trace(err) + } + cloudNames := make([]string, 0, len(clouds)) + for tag := range clouds { + cloudNames = append(cloudNames, tag.Id()) + } + sort.Strings(cloudNames) + return result, errors.NewNotFound(err, fmt.Sprintf( + "cloud %q not found, expected one of %q", + cloudTag.Id(), cloudNames, + )) + } return result, errors.Annotate(err, "getting cloud definition") } - cloudCredentialName := args.CloudCredential - if cloudCredentialName == "" { - if ownerTag.Canonical() == controllerModel.Owner().Canonical() { - cloudCredentialName = controllerModel.CloudCredential() + var cloudCredentialTag names.CloudCredentialTag + if args.CloudCredentialTag != "" { + var err error + cloudCredentialTag, err = names.ParseCloudCredentialTag(args.CloudCredentialTag) + if err != nil { + return result, errors.Trace(err) + } + } else { + if ownerTag == controllerModel.Owner() { + cloudCredentialTag, _ = controllerModel.CloudCredential() } else { // TODO(axw) check if the user has one and only one // cloud credential, and if so, use it? For now, we @@ -229,37 +268,26 @@ } } - cloudRegionName := args.CloudRegion - if cloudRegionName == "" { - cloudRegionName = controllerModel.CloudRegion() - } - var credential *jujucloud.Credential - if cloudCredentialName != "" { - ownerCredentials, err := mm.state.CloudCredentials(ownerTag, controllerModel.Cloud()) + if cloudCredentialTag != (names.CloudCredentialTag{}) { + credentialValue, err := m.state.CloudCredential(cloudCredentialTag) if err != nil { - return result, errors.Annotate(err, "getting credentials") + return result, errors.Annotate(err, "getting credential") } - elem, ok := ownerCredentials[cloudCredentialName] - if !ok { - return result, errors.NewNotValid(nil, fmt.Sprintf( - "no such credential %q", cloudCredentialName, - )) - } - credential = &elem + credential = &credentialValue } - cloudSpec, err := environs.MakeCloudSpec(cloud, cloudName, cloudRegionName, credential) + cloudSpec, err := environs.MakeCloudSpec(cloud, cloudTag.Id(), cloudRegionName, credential) if err != nil { return result, errors.Trace(err) } - controllerCfg, err := mm.state.ControllerConfig() + controllerCfg, err := m.state.ControllerConfig() if err != nil { return result, errors.Trace(err) } - newConfig, err := mm.newModelConfig(cloudSpec, args, controllerCfg.ControllerUUID(), controllerModel) + newConfig, err := m.newModelConfig(cloudSpec, args, controllerModel) if err != nil { return result, errors.Annotate(err, "failed to create config") } @@ -282,10 +310,10 @@ // NOTE: check the agent-version of the config, and if it is > the current // version, it is not supported, also check existing tools, and if we don't // have tools for that version, also die. - model, st, err := mm.state.NewModel(state.ModelArgs{ - CloudName: cloudName, + model, st, err := m.state.NewModel(state.ModelArgs{ + CloudName: cloudTag.Id(), CloudRegion: cloudRegionName, - CloudCredential: cloudCredentialName, + CloudCredential: cloudCredentialTag, Config: newConfig, Owner: ownerTag, StorageProviderRegistry: storageProviderRegistry, @@ -295,18 +323,26 @@ } defer st.Close() - return mm.getModelInfo(model.ModelTag()) + return m.getModelInfo(model.ModelTag()) } -func (mm *ModelManagerAPI) dumpModel(args params.Entity) (map[string]interface{}, error) { +func (m *ModelManagerAPI) dumpModel(args params.Entity) (map[string]interface{}, error) { modelTag, err := names.ParseModelTag(args.Tag) if err != nil { return nil, errors.Trace(err) } - st := mm.state + isModelAdmin, err := m.authorizer.HasPermission(permission.AdminAccess, modelTag) + if err != nil { + return nil, errors.Trace(err) + } + if !isModelAdmin && !m.isAdmin { + return nil, common.ErrPerm + } + + st := m.state if st.ModelTag() != modelTag { - st, err = mm.state.ForModel(modelTag) + st, err = m.state.ForModel(modelTag) if err != nil { if errors.IsNotFound(err) { return nil, errors.Trace(common.ErrBadId) @@ -316,21 +352,6 @@ defer st.Close() } - // Check model permissions if the user isn't a controller admin. - if !mm.isAdmin { - user, err := st.UserAccess(mm.apiUser, mm.state.ModelTag()) - if err != nil { - if errors.IsNotFound(err) { - return nil, errors.Trace(common.ErrPerm) - } - // Something weird went on. - return nil, errors.Trace(err) - } - if user.Access != description.AdminAccess { - return nil, errors.Trace(common.ErrPerm) - } - } - bytes, err := migration.ExportModel(st) if err != nil { return nil, errors.Trace(err) @@ -351,15 +372,62 @@ return out.(map[string]interface{}), nil } +func (m *ModelManagerAPI) dumpModelDB(args params.Entity) (map[string]interface{}, error) { + modelTag, err := names.ParseModelTag(args.Tag) + if err != nil { + return nil, errors.Trace(err) + } + + isModelAdmin, err := m.authorizer.HasPermission(permission.AdminAccess, modelTag) + if err != nil { + return nil, errors.Trace(err) + } + if !isModelAdmin && !m.isAdmin { + return nil, common.ErrPerm + } + + st := m.state + if st.ModelTag() != modelTag { + st, err = m.state.ForModel(modelTag) + if err != nil { + if errors.IsNotFound(err) { + return nil, errors.Trace(common.ErrBadId) + } + return nil, errors.Trace(err) + } + defer st.Close() + } + + return st.DumpAll() +} + // DumpModels will export the models into the database agnostic // representation. The user needs to either be a controller admin, or have // admin privileges on the model itself. -func (mm *ModelManagerAPI) DumpModels(args params.Entities) params.MapResults { +func (m *ModelManagerAPI) DumpModels(args params.Entities) params.MapResults { + results := params.MapResults{ + Results: make([]params.MapResult, len(args.Entities)), + } + for i, entity := range args.Entities { + dumped, err := m.dumpModel(entity) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + results.Results[i].Result = dumped + } + return results +} + +// DumpModelsDB will gather all documents from all model collections +// for the specified model. The map result contains a map of collection +// names to lists of documents represented as maps. +func (m *ModelManagerAPI) DumpModelsDB(args params.Entities) params.MapResults { results := params.MapResults{ Results: make([]params.MapResult, len(args.Entities)), } for i, entity := range args.Entities { - dumped, err := mm.dumpModel(entity) + dumped, err := m.dumpModelDB(entity) if err != nil { results.Results[i].Error = common.ServerError(err) continue @@ -373,7 +441,7 @@ // has access to in the current server. Only that controller owner // can list models for any user (at this stage). Other users // can only ask about their own models. -func (mm *ModelManagerAPI) ListModels(user params.Entity) (params.UserModelList, error) { +func (m *ModelManagerAPI) ListModels(user params.Entity) (params.UserModelList, error) { result := params.UserModelList{} userTag, err := names.ParseUserTag(user.Tag) @@ -381,12 +449,12 @@ return result, errors.Trace(err) } - err = mm.authCheck(userTag) + err = m.authCheck(userTag) if err != nil { return result, errors.Trace(err) } - models, err := mm.state.ModelsForUser(userTag) + models, err := m.state.ModelsForUser(userTag) if err != nil { return result, errors.Trace(err) } @@ -414,21 +482,36 @@ return result, nil } -// DestroyModel will try to destroy the current model. +// DestroyModels will try to destroy the specified models. // If there is a block on destruction, this method will return an error. -func (m *ModelManagerAPI) DestroyModel() error { - // Any user is able to delete their own model (until real fine - // grain permissions are available), and admins (the creator of the state - // server model) are able to delete models for other people. - model, err := m.state.Model() - if err != nil { - return errors.Trace(err) +func (m *ModelManagerAPI) DestroyModels(args params.Entities) (params.ErrorResults, error) { + results := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Entities)), } - err = m.authCheck(model.Owner()) - if err != nil { - return errors.Trace(err) + + destroyModel := func(tag names.ModelTag) error { + model, err := m.state.GetModel(tag) + if err != nil { + return errors.Trace(err) + } + if err := m.authCheck(model.Owner()); err != nil { + return errors.Trace(err) + } + return errors.Trace(common.DestroyModel(m.state, model.ModelTag())) } - return errors.Trace(common.DestroyModel(m.state, model.ModelTag())) + + for i, arg := range args.Entities { + tag, err := names.ParseModelTag(arg.Tag) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + if err := destroyModel(tag); err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + } + return results, nil } // ModelInfo returns information about the specified models. @@ -440,7 +523,7 @@ getModelInfo := func(arg params.Entity) (params.ModelInfo, error) { tag, err := names.ParseModelTag(arg.Tag) if err != nil { - return params.ModelInfo{}, err + return params.ModelInfo{}, errors.Trace(err) } return m.getModelInfo(tag) } @@ -461,7 +544,7 @@ if errors.IsNotFound(err) { return params.ModelInfo{}, common.ErrPerm } else if err != nil { - return params.ModelInfo{}, err + return params.ModelInfo{}, errors.Trace(err) } defer st.Close() @@ -469,39 +552,42 @@ if errors.IsNotFound(err) { return params.ModelInfo{}, common.ErrPerm } else if err != nil { - return params.ModelInfo{}, err + return params.ModelInfo{}, errors.Trace(err) } cfg, err := model.Config() if err != nil { - return params.ModelInfo{}, err + return params.ModelInfo{}, errors.Trace(err) } controllerCfg, err := st.ControllerConfig() if err != nil { - return params.ModelInfo{}, err + return params.ModelInfo{}, errors.Trace(err) } users, err := model.Users() if err != nil { - return params.ModelInfo{}, err + return params.ModelInfo{}, errors.Trace(err) } status, err := model.Status() if err != nil { - return params.ModelInfo{}, err + return params.ModelInfo{}, errors.Trace(err) } owner := model.Owner() info := params.ModelInfo{ - Name: cfg.Name(), - UUID: cfg.UUID(), - ControllerUUID: controllerCfg.ControllerUUID(), - OwnerTag: owner.String(), - Life: params.Life(model.Life().String()), - Status: common.EntityStatusFromState(status), - ProviderType: cfg.Type(), - DefaultSeries: config.PreferredSeries(cfg), - Cloud: model.Cloud(), - CloudRegion: model.CloudRegion(), - CloudCredential: model.CloudCredential(), + Name: cfg.Name(), + UUID: cfg.UUID(), + ControllerUUID: controllerCfg.ControllerUUID(), + OwnerTag: owner.String(), + Life: params.Life(model.Life().String()), + Status: common.EntityStatusFromState(status), + ProviderType: cfg.Type(), + DefaultSeries: config.PreferredSeries(cfg), + CloudTag: names.NewCloudTag(model.Cloud()).String(), + CloudRegion: model.CloudRegion(), + } + + if cloudCredentialTag, ok := model.CloudCredential(); ok { + info.CloudCredentialTag = cloudCredentialTag.String() } authorizedOwner := m.authCheck(owner) == nil @@ -526,6 +612,17 @@ return params.ModelInfo{}, common.ErrPerm } + canSeeMachines := authorizedOwner + if !canSeeMachines { + if canSeeMachines, err = m.hasWriteAccess(tag); err != nil { + return params.ModelInfo{}, errors.Trace(err) + } + } + if canSeeMachines { + if info.Machines, err = common.ModelMachineInfo(st); err != nil { + return params.ModelInfo{}, err + } + } return info, nil } @@ -534,51 +631,51 @@ result = params.ErrorResults{ Results: make([]params.ErrorResult, len(args.Changes)), } + + canModifyController, err := m.authorizer.HasPermission(permission.SuperuserAccess, m.state.ControllerTag()) + if err != nil { + return result, errors.Trace(err) + } if len(args.Changes) == 0 { return result, nil } for i, arg := range args.Changes { - modelAccess, err := FromModelAccessParam(arg.Access) - if err != nil { + modelAccess := permission.Access(arg.Access) + if err := permission.ValidateModelAccess(modelAccess); err != nil { err = errors.Annotate(err, "could not modify model access") result.Results[i].Error = common.ServerError(err) continue } - targetUserTag, err := names.ParseUserTag(arg.UserTag) + modelTag, err := names.ParseModelTag(arg.ModelTag) if err != nil { result.Results[i].Error = common.ServerError(errors.Annotate(err, "could not modify model access")) continue } - modelTag, err := names.ParseModelTag(arg.ModelTag) + canModifyModel, err := m.authorizer.HasPermission(permission.AdminAccess, modelTag) + if err != nil { + return result, errors.Trace(err) + } + canModify := canModifyController || canModifyModel + + if !canModify { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + + targetUserTag, err := names.ParseUserTag(arg.UserTag) if err != nil { result.Results[i].Error = common.ServerError(errors.Annotate(err, "could not modify model access")) continue } result.Results[i].Error = common.ServerError( - ChangeModelAccess(m.state, modelTag, m.apiUser, targetUserTag, arg.Action, modelAccess, m.isAdmin)) + changeModelAccess(m.state, modelTag, m.apiUser, targetUserTag, arg.Action, modelAccess, m.isAdmin)) } return result, nil } -// resolveDescriptionAccess returns the state representation of the logical model -// access type. -func resolveDescriptionAccess(access permission.ModelAccess) (description.Access, error) { - var fail description.Access - switch access { - case permission.ModelAdminAccess: - return description.AdminAccess, nil - case permission.ModelReadAccess: - return description.ReadAccess, nil - case permission.ModelWriteAccess: - return description.WriteAccess, nil - } - logger.Errorf("invalid access permission: %+v", access) - return fail, errors.Errorf("invalid access permission") -} - func userAuthorizedToChangeAccess(st common.ModelManagerBackend, userIsAdmin bool, userTag names.UserTag) error { if userIsAdmin { // Just confirm that the model that has been given is a valid model. @@ -599,15 +696,15 @@ } return errors.Annotate(err, "could not retrieve user") } - if currentUser.Access != description.AdminAccess { + if currentUser.Access != permission.AdminAccess { return common.ErrPerm } return nil } -// ChangeModelAccess performs the requested access grant or revoke action for the +// changeModelAccess performs the requested access grant or revoke action for the // specified user on the specified model. -func ChangeModelAccess(accessor common.ModelManagerBackend, modelTag names.ModelTag, apiUser, targetUserTag names.UserTag, action params.ModelAction, access permission.ModelAccess, userIsAdmin bool) error { +func changeModelAccess(accessor common.ModelManagerBackend, modelTag names.ModelTag, apiUser, targetUserTag names.UserTag, action params.ModelAction, access permission.Access, userIsAdmin bool) error { st, err := accessor.ForModel(modelTag) if err != nil { return errors.Annotate(err, "could not lookup model") @@ -618,20 +715,11 @@ return errors.Trace(err) } - descriptionAccess, err := resolveDescriptionAccess(access) - if err != nil { - return errors.Annotate(err, "could not resolve model access") - } - - if descriptionAccess == description.UndefinedAccess { - return errors.NotValidf("changing model access to %q", description.UndefinedAccess) - } - switch action { case params.GrantModelAccess: - _, err = st.AddModelUser(state.UserAccessSpec{User: targetUserTag, CreatedBy: apiUser, Access: descriptionAccess}) + _, err = st.AddModelUser(modelTag.Id(), state.UserAccessSpec{User: targetUserTag, CreatedBy: apiUser, Access: access}) if errors.IsAlreadyExists(err) { - modelUser, err := st.UserAccess(targetUserTag, st.ModelTag()) + modelUser, err := st.UserAccess(targetUserTag, modelTag) if errors.IsNotFound(err) { // Conflicts with prior check, must be inconsistent state. err = txn.ErrExcessiveContention @@ -641,10 +729,10 @@ } // Only set access if greater access is being granted. - if modelUser.Access.EqualOrGreaterModelAccessThan(descriptionAccess) { - return errors.Errorf("user already has %q access or greater", descriptionAccess) + if modelUser.Access.EqualOrGreaterModelAccessThan(access) { + return errors.Errorf("user already has %q access or greater", access) } - if _, err = st.SetUserAccess(modelUser.UserTag, modelUser.Object, descriptionAccess); err != nil { + if _, err = st.SetUserAccess(modelUser.UserTag, modelUser.Object, access); err != nil { return errors.Annotate(err, "could not set model access for user") } return nil @@ -652,30 +740,30 @@ return errors.Annotate(err, "could not grant model access") case params.RevokeModelAccess: - switch descriptionAccess { - case description.ReadAccess: + switch access { + case permission.ReadAccess: // Revoking read access removes all access. - err := st.RemoveUserAccess(targetUserTag, st.ModelTag()) + err := st.RemoveUserAccess(targetUserTag, modelTag) return errors.Annotate(err, "could not revoke model access") - case description.WriteAccess: + case permission.WriteAccess: // Revoking write access sets read-only. - modelUser, err := st.UserAccess(targetUserTag, st.ModelTag()) + modelUser, err := st.UserAccess(targetUserTag, modelTag) if err != nil { return errors.Annotate(err, "could not look up model access for user") } - _, err = st.SetUserAccess(modelUser.UserTag, modelUser.Object, description.ReadAccess) + _, err = st.SetUserAccess(modelUser.UserTag, modelUser.Object, permission.ReadAccess) return errors.Annotate(err, "could not set model access to read-only") - case description.AdminAccess: + case permission.AdminAccess: // Revoking admin access sets read-write. - modelUser, err := st.UserAccess(targetUserTag, st.ModelTag()) + modelUser, err := st.UserAccess(targetUserTag, modelTag) if err != nil { return errors.Annotate(err, "could not look up model access for user") } - _, err = st.SetUserAccess(modelUser.UserTag, modelUser.Object, description.WriteAccess) + _, err = st.SetUserAccess(modelUser.UserTag, modelUser.Object, permission.WriteAccess) return errors.Annotate(err, "could not set model access to read-write") default: - return errors.Errorf("don't know how to revoke %q access", descriptionAccess) + return errors.Errorf("don't know how to revoke %q access", access) } default: @@ -683,16 +771,111 @@ } } -// FromModelAccessParam returns the logical model access type from the API wireformat type. -func FromModelAccessParam(paramAccess params.UserAccessPermission) (permission.ModelAccess, error) { - var fail permission.ModelAccess - switch paramAccess { - case params.ModelReadAccess: - return permission.ModelReadAccess, nil - case params.ModelWriteAccess: - return permission.ModelWriteAccess, nil - case params.ModelAdminAccess: - return permission.ModelAdminAccess, nil +// ModelDefaults returns the default config values used when creating a new model. +func (m *ModelManagerAPI) ModelDefaults() (params.ModelDefaultsResult, error) { + result := params.ModelDefaultsResult{} + if !m.isAdmin { + return result, common.ErrPerm + } + + values, err := m.state.ModelConfigDefaultValues() + if err != nil { + return result, errors.Trace(err) + } + result.Config = make(map[string]params.ModelDefaults) + for attr, val := range values { + settings := params.ModelDefaults{ + Controller: val.Controller, + Default: val.Default, + } + for _, v := range val.Regions { + settings.Regions = append( + settings.Regions, params.RegionDefaults{ + RegionName: v.Name, + Value: v.Value}) + } + result.Config[attr] = settings + } + return result, nil +} + +// SetModelDefaults writes new values for the specified default model settings. +func (m *ModelManagerAPI) SetModelDefaults(args params.SetModelDefaults) (params.ErrorResults, error) { + results := params.ErrorResults{Results: make([]params.ErrorResult, len(args.Config))} + if err := m.check.ChangeAllowed(); err != nil { + return results, errors.Trace(err) + } + for i, arg := range args.Config { + results.Results[i].Error = common.ServerError( + m.setModelDefaults(arg), + ) + } + return results, nil +} + +func (m *ModelManagerAPI) setModelDefaults(args params.ModelDefaultValues) error { + if !m.isAdmin { + return common.ErrPerm + } + + if err := m.check.ChangeAllowed(); err != nil { + return errors.Trace(err) + } + // Make sure we don't allow changing agent-version. + if _, found := args.Config["agent-version"]; found { + return errors.New("agent-version cannot have a default value") + } + + var rspec *environs.RegionSpec + if args.CloudRegion != "" { + spec, err := m.makeRegionSpec(args.CloudTag, args.CloudRegion) + if err != nil { + return errors.Trace(err) + } + rspec = spec + } + return m.state.UpdateModelConfigDefaultValues(args.Config, nil, rspec) +} + +// UnsetModelDefaults removes the specified default model settings. +func (m *ModelManagerAPI) UnsetModelDefaults(args params.UnsetModelDefaults) (params.ErrorResults, error) { + results := params.ErrorResults{Results: make([]params.ErrorResult, len(args.Keys))} + if !m.isAdmin { + return results, common.ErrPerm + } + + if err := m.check.ChangeAllowed(); err != nil { + return results, errors.Trace(err) + } + + for i, arg := range args.Keys { + var rspec *environs.RegionSpec + if arg.CloudRegion != "" { + spec, err := m.makeRegionSpec(arg.CloudTag, arg.CloudRegion) + if err != nil { + results.Results[i].Error = common.ServerError( + errors.Trace(err)) + continue + } + rspec = spec + } + results.Results[i].Error = common.ServerError( + m.state.UpdateModelConfigDefaultValues(nil, arg.Keys, rspec), + ) + } + return results, nil +} + +// makeRegionSpec is a helper method for methods that call +// state.UpdateModelConfigDefaultValues. +func (m *ModelManagerAPI) makeRegionSpec(cloudTag, r string) (*environs.RegionSpec, error) { + cTag, err := names.ParseCloudTag(cloudTag) + if err != nil { + return nil, errors.Trace(err) + } + rspec, err := environs.NewRegionSpec(cTag.Id(), r) + if err != nil { + return nil, errors.Trace(err) } - return fail, errors.Errorf("invalid model access permission %q", paramAccess) + return rspec, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelmanager/modelmanager_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelmanager/modelmanager_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/modelmanager/modelmanager_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/modelmanager/modelmanager_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,10 +19,10 @@ "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" "github.com/juju/juju/cloud" - "github.com/juju/juju/core/description" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/permission" "github.com/juju/juju/state/stateenvirons" "github.com/juju/juju/status" jujuversion "github.com/juju/juju/version" @@ -44,7 +44,7 @@ type modelManagerSuite struct { gitjujutesting.IsolationSuite - st mockState + st *mockState authoriser apiservertesting.FakeAuthorizer api *modelmanager.ModelManagerAPI } @@ -59,30 +59,35 @@ cfg, err := config.New(config.UseDefaults, attrs) c.Assert(err, jc.ErrorIsNil) - s.st = mockState{ - uuid: coretesting.ModelTag.Id(), - cloud: cloud.Cloud{ - Type: "dummy", - AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, - Regions: []cloud.Region{ - {Name: "some-region"}, - {Name: "qux"}, - }, + dummyCloud := cloud.Cloud{ + Type: "dummy", + AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, + Regions: []cloud.Region{ + {Name: "some-region"}, + {Name: "qux"}, + }, + } + + s.st = &mockState{ + modelUUID: coretesting.ModelTag.Id(), + cloud: dummyCloud, + clouds: map[names.CloudTag]cloud.Cloud{ + names.NewCloudTag("some-cloud"): dummyCloud, }, controllerModel: &mockModel{ owner: names.NewUserTag("admin@local"), life: state.Alive, cfg: cfg, status: status.StatusInfo{ - Status: status.StatusAvailable, + Status: status.Available, Since: &time.Time{}, }, users: []*mockModelUser{{ userName: "admin", - access: description.AdminAccess, + access: permission.AdminAccess, }, { userName: "otheruser", - access: description.AdminAccess, + access: permission.WriteAccess, }}, }, model: &mockModel{ @@ -91,47 +96,80 @@ tag: coretesting.ModelTag, cfg: cfg, status: status.StatusInfo{ - Status: status.StatusAvailable, + Status: status.Available, Since: &time.Time{}, }, users: []*mockModelUser{{ userName: "admin", - access: description.AdminAccess, + access: permission.AdminAccess, }, { userName: "otheruser", - access: description.AdminAccess, + access: permission.WriteAccess, }}, }, - creds: map[string]cloud.Credential{ - "some-credential": cloud.NewEmptyCredential(), + cred: cloud.NewEmptyCredential(), + cfgDefaults: config.ModelDefaultAttributes{ + "attr": config.AttributeDefaultValues{ + Default: "", + Controller: "val", + Regions: []config.RegionDefaultValue{{ + Name: "dummy", + Value: "val++"}}}, + "attr2": config.AttributeDefaultValues{ + Controller: "val3", + Default: "val2", + Regions: []config.RegionDefaultValue{{ + Name: "left", + Value: "spam"}}}, }, } s.authoriser = apiservertesting.FakeAuthorizer{ Tag: names.NewUserTag("admin@local"), } - api, err := modelmanager.NewModelManagerAPI(&s.st, nil, s.authoriser) + api, err := modelmanager.NewModelManagerAPI(s.st, nil, s.authoriser) c.Assert(err, jc.ErrorIsNil) s.api = api } +func (s *modelManagerSuite) setAPIUser(c *gc.C, user names.UserTag) { + s.authoriser.Tag = user + mm, err := modelmanager.NewModelManagerAPI(s.st, nil, s.authoriser) + c.Assert(err, jc.ErrorIsNil) + s.api = mm +} + +func (s *modelManagerSuite) getModelArgs(c *gc.C) state.ModelArgs { + for _, v := range s.st.Calls() { + if v.Args == nil { + continue + } + if newModelArgs, ok := v.Args[0].(state.ModelArgs); ok { + return newModelArgs + } + } + c.Fatal("failed to find state.ModelArgs") + panic("unreachable") +} + func (s *modelManagerSuite) TestCreateModelArgs(c *gc.C) { args := params.ModelCreateArgs{ Name: "foo", - OwnerTag: "user-admin@local", + OwnerTag: "user-admin", Config: map[string]interface{}{ "bar": "baz", }, - CloudRegion: "qux", - CloudCredential: "some-credential", + CloudRegion: "qux", + CloudCredentialTag: "cloudcred-some-cloud_admin_some-credential", } _, err := s.api.CreateModel(args) c.Assert(err, jc.ErrorIsNil) s.st.CheckCallNames(c, - "IsControllerAdministrator", + "ControllerTag", "ModelUUID", + "ControllerTag", "ControllerModel", "Cloud", - "CloudCredentials", + "CloudCredential", "ControllerConfig", "ComposeNewModelConfig", "NewModel", @@ -140,14 +178,15 @@ "ControllerConfig", "LastModelConnection", "LastModelConnection", - "Close", // close new model's state - "Close", // close controller model's state + "AllMachines", + "Close", + "Close", ) // We cannot predict the UUID, because it's generated, // so we just extract it and ensure that it's not the // same as the controller UUID. - newModelArgs := s.st.Calls()[7].Args[0].(state.ModelArgs) + newModelArgs := s.getModelArgs(c) uuid := newModelArgs.Config.UUID() c.Assert(uuid, gc.Not(gc.Equals), s.st.controllerModel.cfg.UUID()) @@ -168,28 +207,59 @@ newModelArgs.StorageProviderRegistry = nil c.Assert(newModelArgs, jc.DeepEquals, state.ModelArgs{ - Owner: names.NewUserTag("admin@local"), - CloudName: "some-cloud", - CloudRegion: "qux", - CloudCredential: "some-credential", - Config: cfg, + Owner: names.NewUserTag("admin@local"), + CloudName: "some-cloud", + CloudRegion: "qux", + CloudCredential: names.NewCloudCredentialTag( + "some-cloud/admin/some-credential", + ), + Config: cfg, }) } +func (s *modelManagerSuite) TestCreateModelArgsWithCloud(c *gc.C) { + args := params.ModelCreateArgs{ + Name: "foo", + OwnerTag: "user-admin", + Config: map[string]interface{}{ + "bar": "baz", + }, + CloudTag: "cloud-some-cloud", + CloudRegion: "qux", + CloudCredentialTag: "cloudcred-some-cloud_admin_some-credential", + } + _, err := s.api.CreateModel(args) + c.Assert(err, jc.ErrorIsNil) + + newModelArgs := s.getModelArgs(c) + c.Assert(newModelArgs.CloudName, gc.Equals, "some-cloud") +} + +func (s *modelManagerSuite) TestCreateModelArgsWithCloudNotFound(c *gc.C) { + s.st.SetErrors(nil, errors.NotFoundf("cloud")) + args := params.ModelCreateArgs{ + Name: "foo", + OwnerTag: "user-admin", + CloudTag: "cloud-some-unknown-cloud", + } + _, err := s.api.CreateModel(args) + c.Assert(err, gc.ErrorMatches, `cloud "some-unknown-cloud" not found, expected one of \["some-cloud"\]`) +} + func (s *modelManagerSuite) TestCreateModelDefaultRegion(c *gc.C) { args := params.ModelCreateArgs{ Name: "foo", - OwnerTag: "user-admin@local", + OwnerTag: "user-admin", } _, err := s.api.CreateModel(args) c.Assert(err, jc.ErrorIsNil) - newModelArgs := s.st.Calls()[7].Args[0].(state.ModelArgs) + newModelArgs := s.getModelArgs(c) c.Assert(newModelArgs.CloudRegion, gc.Equals, "some-region") } func (s *modelManagerSuite) TestCreateModelDefaultCredentialAdmin(c *gc.C) { - s.testCreateModelDefaultCredentialAdmin(c, "user-admin@local") + s.testCreateModelDefaultCredentialAdmin(c, "user-admin") } func (s *modelManagerSuite) TestCreateModelDefaultCredentialAdminNoDomain(c *gc.C) { @@ -205,40 +275,203 @@ _, err := s.api.CreateModel(args) c.Assert(err, jc.ErrorIsNil) - newModelArgs := s.st.Calls()[7].Args[0].(state.ModelArgs) - c.Assert(newModelArgs.CloudCredential, gc.Equals, "some-credential") + newModelArgs := s.getModelArgs(c) + c.Assert(newModelArgs.CloudCredential, gc.Equals, names.NewCloudCredentialTag( + "some-cloud/bob/some-credential", + )) } func (s *modelManagerSuite) TestCreateModelEmptyCredentialNonAdmin(c *gc.C) { args := params.ModelCreateArgs{ Name: "foo", - OwnerTag: "user-bob@local", + OwnerTag: "user-bob", } _, err := s.api.CreateModel(args) c.Assert(err, jc.ErrorIsNil) - newModelArgs := s.st.Calls()[6].Args[0].(state.ModelArgs) - c.Assert(newModelArgs.CloudCredential, gc.Equals, "") + newModelArgs := s.getModelArgs(c) + c.Assert(newModelArgs.CloudCredential, gc.Equals, names.CloudCredentialTag{}) } func (s *modelManagerSuite) TestCreateModelNoDefaultCredentialNonAdmin(c *gc.C) { s.st.cloud.AuthTypes = nil args := params.ModelCreateArgs{ Name: "foo", - OwnerTag: "user-bob@local", + OwnerTag: "user-bob", } _, err := s.api.CreateModel(args) c.Assert(err, gc.ErrorMatches, "no credential specified") } func (s *modelManagerSuite) TestCreateModelUnknownCredential(c *gc.C) { + s.st.SetErrors(nil, nil, errors.NotFoundf("credential")) args := params.ModelCreateArgs{ - Name: "foo", - OwnerTag: "user-admin@local", - CloudCredential: "bar", + Name: "foo", + OwnerTag: "user-admin", + CloudCredentialTag: "cloudcred-some-cloud_admin_bar", } _, err := s.api.CreateModel(args) - c.Assert(err, gc.ErrorMatches, `no such credential "bar"`) + c.Assert(err, gc.ErrorMatches, `getting credential: credential not found`) +} + +func (s *modelManagerSuite) TestModelDefaults(c *gc.C) { + result, err := s.api.ModelDefaults() + c.Assert(err, jc.ErrorIsNil) + expectedValues := map[string]params.ModelDefaults{ + "attr": { + Controller: "val", + Default: "", + Regions: []params.RegionDefaults{{ + RegionName: "dummy", + Value: "val++"}}}, + "attr2": { + Controller: "val3", + Default: "val2", + Regions: []params.RegionDefaults{{ + RegionName: "left", + Value: "spam"}}}, + } + c.Assert(result.Config, jc.DeepEquals, expectedValues) +} + +func (s *modelManagerSuite) TestSetModelDefaults(c *gc.C) { + params := params.SetModelDefaults{ + Config: []params.ModelDefaultValues{{ + Config: map[string]interface{}{ + "attr3": "val3", + "attr4": "val4"}, + }}} + result, err := s.api.SetModelDefaults(params) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.OneError(), jc.ErrorIsNil) + c.Assert(s.st.cfgDefaults, jc.DeepEquals, config.ModelDefaultAttributes{ + "attr": { + Controller: "val", + Default: "", + Regions: []config.RegionDefaultValue{{ + Name: "dummy", + Value: "val++"}}}, + "attr2": { + Controller: "val3", + Default: "val2", + Regions: []config.RegionDefaultValue{{ + Name: "left", + Value: "spam"}}}, + "attr3": {Controller: "val3"}, + "attr4": {Controller: "val4"}, + }) +} + +func (s *modelManagerSuite) blockAllChanges(c *gc.C, msg string) { + s.st.blockMsg = msg + s.st.block = state.ChangeBlock +} + +func (s *modelManagerSuite) assertBlocked(c *gc.C, err error, msg string) { + c.Assert(params.IsCodeOperationBlocked(err), jc.IsTrue, gc.Commentf("error: %#v", err)) + c.Assert(errors.Cause(err), jc.DeepEquals, ¶ms.Error{ + Message: msg, + Code: "operation is blocked", + }) +} + +func (s *modelManagerSuite) TestBlockChangesSetModelDefaults(c *gc.C) { + s.blockAllChanges(c, "TestBlockChangesSetModelDefaults") + _, err := s.api.SetModelDefaults(params.SetModelDefaults{}) + s.assertBlocked(c, err, "TestBlockChangesSetModelDefaults") +} + +func (s *modelManagerSuite) TestUnsetModelDefaults(c *gc.C) { + args := params.UnsetModelDefaults{ + Keys: []params.ModelUnsetKeys{{ + Keys: []string{"attr"}, + }}} + result, err := s.api.UnsetModelDefaults(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.OneError(), jc.ErrorIsNil) + want := config.ModelDefaultAttributes{ + "attr": config.AttributeDefaultValues{ + Regions: []config.RegionDefaultValue{ + config.RegionDefaultValue{ + Name: "dummy", + Value: "val++"}, + }}, + "attr2": config.AttributeDefaultValues{ + Default: "val2", + Controller: "val3", + Regions: []config.RegionDefaultValue{ + config.RegionDefaultValue{ + Name: "left", + Value: "spam"}}}} + c.Assert(s.st.cfgDefaults, jc.DeepEquals, want) +} + +func (s *modelManagerSuite) TestBlockUnsetModelDefaults(c *gc.C) { + s.blockAllChanges(c, "TestBlockUnsetModelDefaults") + args := params.UnsetModelDefaults{ + Keys: []params.ModelUnsetKeys{{ + Keys: []string{"abc"}, + }}} + _, err := s.api.UnsetModelDefaults(args) + s.assertBlocked(c, err, "TestBlockUnsetModelDefaults") +} + +func (s *modelManagerSuite) TestUnsetModelDefaultsMissing(c *gc.C) { + // It's okay to unset a non-existent attribute. + args := params.UnsetModelDefaults{ + Keys: []params.ModelUnsetKeys{{ + Keys: []string{"not there"}, + }}} + result, err := s.api.UnsetModelDefaults(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.OneError(), jc.ErrorIsNil) +} + +func (s *modelManagerSuite) TestModelDefaultsAsNormalUser(c *gc.C) { + s.setAPIUser(c, names.NewUserTag("charlie@local")) + got, err := s.api.ModelDefaults() + c.Assert(err, gc.ErrorMatches, "permission denied") + c.Assert(got, gc.DeepEquals, params.ModelDefaultsResult{}) +} + +func (s *modelManagerSuite) TestSetModelDefaultsAsNormalUser(c *gc.C) { + s.setAPIUser(c, names.NewUserTag("charlie@local")) + got, err := s.api.SetModelDefaults(params.SetModelDefaults{ + Config: []params.ModelDefaultValues{{ + Config: map[string]interface{}{ + "ftp-proxy": "http://charlie", + }}}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(got, jc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + params.ErrorResult{ + Error: ¶ms.Error{ + Message: "permission denied", + Code: "unauthorized access"}}}}) + + // Make sure it didn't change. + s.setAPIUser(c, names.NewUserTag("admin@local")) + cfg, err := s.api.ModelDefaults() + c.Assert(err, jc.ErrorIsNil) + c.Assert(cfg.Config["ftp-proxy"].Controller, gc.IsNil) +} + +func (s *modelManagerSuite) TestUnsetModelDefaultsAsNormalUser(c *gc.C) { + s.setAPIUser(c, names.NewUserTag("charlie@local")) + got, err := s.api.UnsetModelDefaults(params.UnsetModelDefaults{ + Keys: []params.ModelUnsetKeys{{ + Keys: []string{"attr2"}}}}) + c.Assert(err, gc.ErrorMatches, "permission denied") + c.Assert(got, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + params.ErrorResult{ + Error: nil}}}) + + // Make sure it didn't change. + s.setAPIUser(c, names.NewUserTag("admin@local")) + cfg, err := s.api.ModelDefaults() + c.Assert(err, jc.ErrorIsNil) + c.Assert(cfg.Config["attr2"].Controller.(string), gc.Equals, "val3") } func (s *modelManagerSuite) TestDumpModel(c *gc.C) { @@ -283,26 +516,78 @@ c.Check(result.Error.Message, gc.Equals, `id not found`) } -func (s *modelManagerSuite) TestDumpModelMissingUser(c *gc.C) { - s.st.SetErrors(nil, errors.New("boom")) - - authoriser := apiservertesting.FakeAuthorizer{ - Tag: names.NewUserTag("other@local"), +func (s *modelManagerSuite) TestDumpModelUsers(c *gc.C) { + models := params.Entities{[]params.Entity{{Tag: s.st.ModelTag().String()}}} + for _, user := range []names.UserTag{ + names.NewUserTag("otheruser"), + names.NewUserTag("unknown"), + } { + s.setAPIUser(c, user) + results := s.api.DumpModels(models) + c.Assert(results.Results, gc.HasLen, 1) + result := results.Results[0] + c.Assert(result.Result, gc.IsNil) + c.Assert(result.Error, gc.NotNil) + c.Check(result.Error.Message, gc.Equals, `permission denied`) } - api, err := modelmanager.NewModelManagerAPI(&s.st, nil, authoriser) - c.Assert(err, jc.ErrorIsNil) +} - models := params.Entities{[]params.Entity{{Tag: s.st.ModelTag().String()}}} - results := api.DumpModels(models) +func (s *modelManagerSuite) TestDumpModelsDB(c *gc.C) { + results := s.api.DumpModelsDB(params.Entities{[]params.Entity{{ + Tag: "bad-tag", + }, { + Tag: "application-foo", + }, { + Tag: s.st.ModelTag().String(), + }}}) + + c.Assert(results.Results, gc.HasLen, 3) + bad, notApp, good := results.Results[0], results.Results[1], results.Results[2] + c.Check(bad.Result, gc.IsNil) + c.Check(bad.Error.Message, gc.Equals, `"bad-tag" is not a valid tag`) + + c.Check(notApp.Result, gc.IsNil) + c.Check(notApp.Error.Message, gc.Equals, `"application-foo" is not a valid model tag`) + + c.Check(good.Error, gc.IsNil) + c.Check(good.Result, jc.DeepEquals, map[string]interface{}{ + "models": "lots of data", + }) +} + +func (s *modelManagerSuite) TestDumpModelsDBMissingModel(c *gc.C) { + s.st.SetErrors(errors.NotFoundf("boom")) + tag := names.NewModelTag("deadbeef-0bad-400d-8000-4b1d0d06f000") + models := params.Entities{[]params.Entity{{Tag: tag.String()}}} + results := s.api.DumpModelsDB(models) calls := s.st.Calls() + c.Logf("%#v", calls) lastCall := calls[len(calls)-1] - c.Check(lastCall.FuncName, gc.Equals, "ModelUser") + c.Check(lastCall.FuncName, gc.Equals, "ForModel") + c.Assert(results.Results, gc.HasLen, 1) result := results.Results[0] c.Assert(result.Result, gc.IsNil) c.Assert(result.Error, gc.NotNil) - c.Check(result.Error.Message, gc.Equals, `boom`) + c.Check(result.Error.Code, gc.Equals, `not found`) + c.Check(result.Error.Message, gc.Equals, `id not found`) +} + +func (s *modelManagerSuite) TestDumpModelsDBUsers(c *gc.C) { + models := params.Entities{[]params.Entity{{Tag: s.st.ModelTag().String()}}} + for _, user := range []names.UserTag{ + names.NewUserTag("otheruser"), + names.NewUserTag("unknown"), + } { + s.setAPIUser(c, user) + results := s.api.DumpModelsDB(models) + c.Assert(results.Results, gc.HasLen, 1) + result := results.Results[0] + c.Assert(result.Result, gc.IsNil) + c.Assert(result.Error, gc.NotNil) + c.Check(result.Error.Message, gc.Equals, `permission denied`) + } } // modelManagerStateSuite contains end-to-end tests. @@ -525,10 +810,10 @@ c.Assert(result.UserModels, gc.HasLen, 0) } -func (s *modelManagerStateSuite) checkModelMatches(c *gc.C, env params.Model, expected *state.Model) { - c.Check(env.Name, gc.Equals, expected.Name()) - c.Check(env.UUID, gc.Equals, expected.UUID()) - c.Check(env.OwnerTag, gc.Equals, expected.Owner().String()) +func (s *modelManagerStateSuite) checkModelMatches(c *gc.C, model params.Model, expected *state.Model) { + c.Check(model.Name, gc.Equals, expected.Name()) + c.Check(model.UUID, gc.Equals, expected.UUID()) + c.Check(model.OwnerTag, gc.Equals, expected.Owner().String()) } func (s *modelManagerStateSuite) TestListModelsAdminSelf(c *gc.C) { @@ -588,8 +873,12 @@ ) c.Assert(err, jc.ErrorIsNil) - err = s.modelmanager.DestroyModel() + results, err := s.modelmanager.DestroyModels(params.Entities{ + Entities: []params.Entity{{"model-" + m.UUID}}, + }) c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) model, err := st.Model() c.Assert(err, jc.ErrorIsNil) @@ -614,8 +903,13 @@ other := s.AdminUserTag(c) s.setAPIUser(c, other) - err = s.modelmanager.DestroyModel() + + results, err := s.modelmanager.DestroyModels(params.Entities{ + Entities: []params.Entity{{"model-" + m.UUID}}, + }) c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) s.setAPIUser(c, owner) model, err := st.Model() @@ -623,7 +917,7 @@ c.Assert(model.Life(), gc.Not(gc.Equals), state.Alive) } -func (s *modelManagerStateSuite) TestUserDestroysOtherModelDenied(c *gc.C) { +func (s *modelManagerStateSuite) TestDestroyModelErrors(c *gc.C) { owner := names.NewUserTag("admin@local") s.setAPIUser(c, owner) m, err := s.modelmanager.CreateModel(s.createArgs(c, owner)) @@ -639,8 +933,31 @@ user := names.NewUserTag("other@remote") s.setAPIUser(c, user) - err = s.modelmanager.DestroyModel() - c.Assert(err, gc.ErrorMatches, "permission denied") + + results, err := s.modelmanager.DestroyModels(params.Entities{ + Entities: []params.Entity{ + {"model-" + m.UUID}, + {"model-9f484882-2f18-4fd2-967d-db9663db7bea"}, + {"machine-42"}, + }, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, jc.DeepEquals, []params.ErrorResult{{ + // we don't have admin access to the model + ¶ms.Error{ + Message: "permission denied", + Code: params.CodeUnauthorized, + }, + }, { + ¶ms.Error{ + Message: "model not found", + Code: params.CodeNotFound, + }, + }, { + ¶ms.Error{ + Message: `"machine-42" is not a valid model tag`, + }, + }}) s.setAPIUser(c, owner) model, err := st.Model() @@ -656,8 +973,11 @@ Access: access, ModelTag: model.String(), }}} + result, err := s.modelmanager.ModifyModelAccess(args) - c.Assert(err, jc.ErrorIsNil) + if err != nil { + return err + } return result.OneError() } @@ -691,14 +1011,14 @@ func (s *modelManagerStateSuite) TestRevokeAdminLeavesReadAccess(c *gc.C) { s.setAPIUser(c, s.AdminUserTag(c)) - user := s.Factory.MakeModelUser(c, &factory.ModelUserParams{Access: description.WriteAccess}) + user := s.Factory.MakeModelUser(c, &factory.ModelUserParams{Access: permission.WriteAccess}) err := s.revoke(c, user.UserTag, params.ModelWriteAccess, user.Object.(names.ModelTag)) c.Assert(err, gc.IsNil) modelUser, err := s.State.UserAccess(user.UserTag, user.Object) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.Access, gc.Equals, description.ReadAccess) + c.Assert(modelUser.Access, gc.Equals, permission.ReadAccess) } func (s *modelManagerStateSuite) TestRevokeReadRemovesModelUser(c *gc.C) { @@ -719,7 +1039,7 @@ user := names.NewUserTag("bob") err := s.revoke(c, user, params.ModelReadAccess, st.ModelTag()) - c.Assert(err, gc.ErrorMatches, `could not revoke model access: model user "bob@local" does not exist`) + c.Assert(err, gc.ErrorMatches, `could not revoke model access: model user "bob" does not exist`) _, err = st.UserAccess(user, st.ModelTag()) c.Assert(errors.IsNotFound(err), jc.IsTrue) @@ -738,13 +1058,20 @@ c.Assert(err, gc.ErrorMatches, `user already has "read" access or greater`) } -func (s *modelManagerStateSuite) assertNewUser(c *gc.C, modelUser description.UserAccess, userTag, creatorTag names.UserTag) { +func (s *modelManagerStateSuite) assertNewUser(c *gc.C, modelUser permission.UserAccess, userTag, creatorTag names.UserTag) { c.Assert(modelUser.UserTag, gc.Equals, userTag) c.Assert(modelUser.CreatedBy, gc.Equals, creatorTag) _, err := s.State.LastModelConnection(modelUser.UserTag) c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) } +func (s *modelManagerStateSuite) assertModelAccess(c *gc.C, st *state.State) { + result, err := s.modelmanager.ModelInfo(params.Entities{Entities: []params.Entity{{Tag: st.ModelTag().String()}}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Error, gc.IsNil) +} + func (s *modelManagerStateSuite) TestGrantModelAddLocalUser(c *gc.C) { user := s.Factory.MakeUser(c, &factory.UserParams{Name: "foobar", NoModelUser: true}) apiUser := s.AdminUserTag(c) @@ -758,7 +1085,9 @@ modelUser, err := st.UserAccess(user.UserTag(), st.ModelTag()) c.Assert(err, jc.ErrorIsNil) s.assertNewUser(c, modelUser, user.UserTag(), apiUser) - c.Assert(modelUser.Access, gc.Equals, description.ReadAccess) + c.Assert(modelUser.Access, gc.Equals, permission.ReadAccess) + s.setAPIUser(c, user.UserTag()) + s.assertModelAccess(c, st) } func (s *modelManagerStateSuite) TestGrantModelAddRemoteUser(c *gc.C) { @@ -775,7 +1104,9 @@ c.Assert(err, jc.ErrorIsNil) s.assertNewUser(c, modelUser, userTag, apiUser) - c.Assert(modelUser.Access, gc.Equals, description.ReadAccess) + c.Assert(modelUser.Access, gc.Equals, permission.ReadAccess) + s.setAPIUser(c, userTag) + s.assertModelAccess(c, st) } func (s *modelManagerStateSuite) TestGrantModelAddAdminUser(c *gc.C) { @@ -790,7 +1121,9 @@ modelUser, err := st.UserAccess(user.UserTag(), st.ModelTag()) c.Assert(err, jc.ErrorIsNil) s.assertNewUser(c, modelUser, user.UserTag(), apiUser) - c.Assert(modelUser.Access, gc.Equals, description.WriteAccess) + c.Assert(modelUser.Access, gc.Equals, permission.WriteAccess) + s.setAPIUser(c, user.UserTag()) + s.assertModelAccess(c, st) } func (s *modelManagerStateSuite) TestGrantModelIncreaseAccess(c *gc.C) { @@ -798,37 +1131,40 @@ st := s.Factory.MakeModel(c, nil) defer st.Close() stFactory := factory.NewFactory(st) - user := stFactory.MakeModelUser(c, &factory.ModelUserParams{Access: description.ReadAccess}) + user := stFactory.MakeModelUser(c, &factory.ModelUserParams{Access: permission.ReadAccess}) err := s.grant(c, user.UserTag, params.ModelWriteAccess, st.ModelTag()) c.Assert(err, jc.ErrorIsNil) modelUser, err := st.UserAccess(user.UserTag, st.ModelTag()) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.Access, gc.Equals, description.WriteAccess) + c.Assert(modelUser.Access, gc.Equals, permission.WriteAccess) } func (s *modelManagerStateSuite) TestGrantToModelNoAccess(c *gc.C) { - apiUser := names.NewUserTag("bob@remote") - s.setAPIUser(c, apiUser) - + s.setAPIUser(c, s.AdminUserTag(c)) st := s.Factory.MakeModel(c, nil) defer st.Close() + apiUser := names.NewUserTag("bob@remote") + s.setAPIUser(c, apiUser) + other := names.NewUserTag("other@remote") err := s.grant(c, other, params.ModelReadAccess, st.ModelTag()) c.Assert(err, gc.ErrorMatches, "permission denied") } func (s *modelManagerStateSuite) TestGrantToModelReadAccess(c *gc.C) { + s.setAPIUser(c, s.AdminUserTag(c)) + st := s.Factory.MakeModel(c, nil) + defer st.Close() + apiUser := names.NewUserTag("bob@remote") s.setAPIUser(c, apiUser) - st := s.Factory.MakeModel(c, nil) - defer st.Close() stFactory := factory.NewFactory(st) stFactory.MakeModelUser(c, &factory.ModelUserParams{ - User: apiUser.Canonical(), Access: description.ReadAccess}) + User: apiUser.Id(), Access: permission.ReadAccess}) other := names.NewUserTag("other@remote") err := s.grant(c, other, params.ModelReadAccess, st.ModelTag()) @@ -836,14 +1172,15 @@ } func (s *modelManagerStateSuite) TestGrantToModelWriteAccess(c *gc.C) { - apiUser := names.NewUserTag("bob@remote") - s.setAPIUser(c, apiUser) - + s.setAPIUser(c, s.AdminUserTag(c)) st := s.Factory.MakeModel(c, nil) defer st.Close() + + apiUser := names.NewUserTag("admin@remote") + s.setAPIUser(c, apiUser) stFactory := factory.NewFactory(st) stFactory.MakeModelUser(c, &factory.ModelUserParams{ - User: apiUser.Canonical(), Access: description.AdminAccess}) + User: apiUser.Id(), Access: permission.AdminAccess}) other := names.NewUserTag("other@remote") err := s.grant(c, other, params.ModelReadAccess, st.ModelTag()) @@ -852,7 +1189,7 @@ modelUser, err := st.UserAccess(other, st.ModelTag()) c.Assert(err, jc.ErrorIsNil) s.assertNewUser(c, modelUser, other, apiUser) - c.Assert(modelUser.Access, gc.Equals, description.ReadAccess) + c.Assert(modelUser.Access, gc.Equals, permission.ReadAccess) } func (s *modelManagerStateSuite) TestGrantModelInvalidUserTag(c *gc.C) { @@ -911,9 +1248,10 @@ args := params.ModifyModelAccessRequest{ Changes: []params.ModifyModelAccess{{ - UserTag: testParam.tag, - Action: params.GrantModelAccess, - Access: params.ModelReadAccess, + ModelTag: "model-deadbeef-0bad-400d-8000-4b1d0d06f00d", + UserTag: testParam.tag, + Action: params.GrantModelAccess, + Access: params.ModelReadAccess, }}} result, err := s.modelmanager.ModifyModelAccess(args) @@ -928,7 +1266,7 @@ result, err := s.modelmanager.ModifyModelAccess(args) c.Assert(err, jc.ErrorIsNil) - expectedErr := `could not modify model access: invalid model access permission ""` + expectedErr := `could not modify model access: "" model access not valid` c.Assert(result.OneError(), gc.ErrorMatches, expectedErr) } @@ -937,7 +1275,7 @@ var dance params.ModelAction = "dance" args := params.ModifyModelAccessRequest{ Changes: []params.ModifyModelAccess{{ - UserTag: "user-user@local", + UserTag: "user-user", Action: dance, Access: params.ModelReadAccess, ModelTag: s.State.ModelTag().String(), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/observer/request_notifier.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/observer/request_notifier.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/observer/request_notifier.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/observer/request_notifier.go 2016-10-13 14:31:49.000000000 +0000 @@ -63,7 +63,7 @@ n.state.id = connectionID n.state.websocketConnected = n.clock.Now() - n.logger.Infof( + n.logger.Debugf( "[%X] API connection from %s", n.state.id, req.RemoteAddr, @@ -72,7 +72,7 @@ // Leave implements Observer. func (n *RequestObserver) Leave() { - n.logger.Infof( + n.logger.Debugf( "[%X] %s API connection terminated after %v", n.state.id, n.state.tag, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/apierror.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/apierror.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/apierror.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/apierror.go 2016-10-13 14:31:49.000000000 +0000 @@ -62,6 +62,7 @@ CodeModelNotFound = "model not found" CodeUnauthorized = "unauthorized access" CodeLoginExpired = "login expired" + CodeNoCreds = "no credentials provided" CodeCannotEnterScope = "cannot enter scope" CodeCannotEnterScopeYet = "cannot enter scope yet" CodeExcessiveContention = "excessive contention" @@ -126,6 +127,10 @@ return ErrCode(err) == CodeUnauthorized } +func IsCodeNoCreds(err error) bool { + return ErrCode(err) == CodeNoCreds +} + func IsCodeLoginExpired(err error) bool { return ErrCode(err) == CodeLoginExpired } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/charms.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/charms.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/charms.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/charms.go 2016-10-13 14:31:49.000000000 +0000 @@ -111,7 +111,13 @@ Description string `json:"description"` } +// CharmPlan mirrors charm.Plan +type CharmPlan struct { + Required bool `json:"required"` +} + // CharmMetrics mirrors charm.Metrics. type CharmMetrics struct { Metrics map[string]CharmMetric `json:"metrics"` + Plan CharmPlan `json:"plan"` } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/cloud.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/cloud.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/cloud.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/cloud.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,18 +5,20 @@ // Cloud holds information about a cloud. type Cloud struct { - Type string `json:"type"` - AuthTypes []string `json:"auth-types,omitempty"` - Endpoint string `json:"endpoint,omitempty"` - StorageEndpoint string `json:"storage-endpoint,omitempty"` - Regions []CloudRegion `json:"regions,omitempty"` + Type string `json:"type"` + AuthTypes []string `json:"auth-types,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + IdentityEndpoint string `json:"identity-endpoint,omitempty"` + StorageEndpoint string `json:"storage-endpoint,omitempty"` + Regions []CloudRegion `json:"regions,omitempty"` } // CloudRegion holds information about a cloud region. type CloudRegion struct { - Name string `json:"name"` - Endpoint string `json:"endpoint,omitempty"` - StorageEndpoint string `json:"storage-endpoint,omitempty"` + Name string `json:"name"` + Endpoint string `json:"endpoint,omitempty"` + IdentityEndpoint string `json:"identity-endpoint,omitempty"` + StorageEndpoint string `json:"storage-endpoint,omitempty"` } // CloudResult contains a cloud definition or an error. @@ -30,22 +32,34 @@ Results []CloudResult `json:"results,omitempty"` } -// CloudCredential contains a cloud credential. +// CloudsResult contains a set of Clouds. +type CloudsResult struct { + // Clouds is a map of clouds, keyed by cloud tag. + Clouds map[string]Cloud `json:"clouds,omitempty"` +} + +// CloudCredential contains a cloud credential +// possibly with secrets redacted. type CloudCredential struct { - AuthType string `json:"auth-type"` + // AuthType is the authentication type. + AuthType string `json:"auth-type"` + + // Attributes contains non-secret credential values. Attributes map[string]string `json:"attrs,omitempty"` + + // Redacted is a list of redacted attributes + Redacted []string `json:"redacted,omitempty"` } -// CloudCredentialsResult contains a set of credentials for a user and cloud, -// or an error. -type CloudCredentialsResult struct { - Error *Error `json:"error,omitempty"` - Credentials map[string]CloudCredential `json:"credentials,omitempty"` +// CloudCredentialResult contains a CloudCredential or an error. +type CloudCredentialResult struct { + Result *CloudCredential `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` } -// CloudCredentialsResults contains a set of CloudCredentialsResults. -type CloudCredentialsResults struct { - Results []CloudCredentialsResult `json:"results,omitempty"` +// CloudCredentialResults contains a set of CloudCredentialResults. +type CloudCredentialResults struct { + Results []CloudCredentialResult `json:"results,omitempty"` } // UserCloud contains a user/cloud tag pair, typically used for identifying @@ -55,50 +69,32 @@ CloudTag string `json:"cloud-tag"` } -// UserClouds contains a set of USerClouds. +// UserClouds contains a set of UserClouds. type UserClouds struct { UserClouds []UserCloud `json:"user-clouds,omitempty"` } -// UserCloudCredentials contains a user's credentials for a cloud. -type UserCloudCredentials struct { - UserTag string `json:"user-tag"` - CloudTag string `json:"cloud-tag"` - Credentials map[string]CloudCredential `json:"credentials"` -} - -// UsersCloudCredentials contains a set of UserCloudCredentials. -type UsersCloudCredentials struct { - Users []UserCloudCredentials `json:"users"` -} - -// CloudDefaults contains defaults for cloud name, region, and -// credential for a user. -type CloudDefaults struct { - CloudTag string `json:"cloud-tag"` - CloudRegion string `json:"region,omitempty"` - CloudCredential string `json:"credential,omitempty"` -} - -// CloudDefaultsResult contains a CloudDefaults or an error. -type CloudDefaultsResult struct { - Result *CloudDefaults `json:"result,omitempty"` - Error *Error `json:"error,omitempty"` +// UpdateCloudCredentials contains a set of tagged cloud credentials. +type UpdateCloudCredentials struct { + Credentials []UpdateCloudCredential `json:"credentials,omitempty"` } -// CloudDefaultsResults contains a set of CloudDefaultsResults. -type CloudDefaultsResults struct { - Results []CloudDefaultsResult `json:"results,omitempty"` +// UpdateCloudCredential contains a cloud credential and its tag, +// for updating in state. +type UpdateCloudCredential struct { + Tag string `json:"tag"` + Credential CloudCredential `json:"credential"` } // CloudSpec holds a cloud specification. type CloudSpec struct { - Type string `json:"type"` - Name string `json:"name"` - Region string `json:"region,omitempty"` - Endpoint string `json:"endpoint,omitempty"` - StorageEndpoint string `json:"storage-endpoint,omitempty"` - Credential *CloudCredential `json:"credential,omitempty"` + Type string `json:"type"` + Name string `json:"name"` + Region string `json:"region,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + IdentityEndpoint string `json:"identity-endpoint,omitempty"` + StorageEndpoint string `json:"storage-endpoint,omitempty"` + Credential *CloudCredential `json:"credential,omitempty"` } // CloudSpecResult contains a CloudSpec or an error. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/controller.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/controller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/controller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/controller.go 2016-10-13 14:31:49.000000000 +0000 @@ -35,14 +35,55 @@ // ModelStatus holds information about the status of a juju model. type ModelStatus struct { - ModelTag string `json:"model-tag"` - Life Life `json:"life"` - HostedMachineCount int `json:"hosted-machine-count"` - ApplicationCount int `json:"application-count"` - OwnerTag string `json:"owner-tag"` + ModelTag string `json:"model-tag"` + Life Life `json:"life"` + HostedMachineCount int `json:"hosted-machine-count"` + ApplicationCount int `json:"application-count"` + OwnerTag string `json:"owner-tag"` + Machines []ModelMachineInfo `json:"machines,omitempty"` } // ModelStatusResults holds status information about a group of models. type ModelStatusResults struct { Results []ModelStatus `json:"models"` } + +// ModifyControllerAccessRequest holds the parameters for making grant and revoke controller calls. +type ModifyControllerAccessRequest struct { + Changes []ModifyControllerAccess `json:"changes"` +} + +type ModifyControllerAccess struct { + UserTag string `json:"user-tag"` + Action ControllerAction `json:"action"` + Access string `json:"access"` +} + +// UserAccess holds the level of access a user +// has on a controller or model. +type UserAccess struct { + UserTag string `json:"user-tag"` + Access string `json:"access"` +} + +// UserAccessResult holds an access level for +// a user, or an error. +type UserAccessResult struct { + Result *UserAccess `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// UserAccessResults holds the results of an api +// call to look up access for users. +type UserAccessResults struct { + Results []UserAccessResult `json:"results,omitempty"` +} + +// ControllerAction is an action that can be performed on a model. +type ControllerAction string + +// Actions that can be preformed on a model. +const ( + GrantControllerAccess ControllerAction = "grant" + RevokeControllerAccess ControllerAction = "revoke" +) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/internal.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/internal.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/internal.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/internal.go 2016-10-13 14:31:49.000000000 +0000 @@ -111,18 +111,23 @@ // creation of the model. Config map[string]interface{} `json:"config,omitempty"` + // CloudTag is the tag of the cloud to create the model in. + // If this is empty, the model will be created in the same + // cloud as the controller model. + CloudTag string `json:"cloud-tag,omitempty"` + // CloudRegion is the name of the cloud region to create the // model in. If the cloud does not support regions, this must - // be empty. If this is empty, the model will be created in - // the same region as the controller model. + // be empty. If this is empty, and CloudTag is empty, the model + // will be created in the same region as the controller model. CloudRegion string `json:"region,omitempty"` - // CloudCredential is the name of the cloud credential to use + // CloudCredentialTag is the tag of the cloud credential to use // for managing the model's resources. If the cloud does not // require credentials, this may be empty. If this is empty, // and the owner is the controller owner, the same credential // used for the controller model will be used. - CloudCredential string `json:"credential,omitempty"` + CloudCredentialTag string `json:"credential,omitempty"` } // Model holds the result of an API call returning a name and UUID @@ -707,3 +712,13 @@ // Version holds the Juju GUI version number. Version version.Number `json:"version"` } + +// LogMessage is a structured logging entry. +type LogMessage struct { + Entity string `json:"tag"` + Timestamp time.Time `json:"ts"` + Severity string `json:"sev"` + Module string `json:"mod"` + Location string `json:"loc"` + Message string `json:"msg"` +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/metrics.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/metrics.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/metrics.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/metrics.go 2016-10-13 14:31:49.000000000 +0000 @@ -30,9 +30,10 @@ Error *Error `json:"error,omitempty"` } -// MetricResults contains a single metric. +// MetricResult contains a single metric. type MetricResult struct { Time time.Time `json:"time"` Key string `json:"key"` Value string `json:"value"` + Unit string `json:"unit"` } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/migration.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/migration.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/migration.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/migration.go 2016-10-13 14:31:49.000000000 +0000 @@ -3,40 +3,51 @@ package params -import "time" +import ( + "time" -// InitiateModelMigrationArgs holds the details required to start one -// or more model migrations. -type InitiateModelMigrationArgs struct { - Specs []ModelMigrationSpec `json:"specs"` -} - -// ModelMigrationSpec holds the details required to start the -// migration of a single model. -type ModelMigrationSpec struct { - ModelTag string `json:"model-tag"` - TargetInfo ModelMigrationTargetInfo `json:"target-info"` -} + "github.com/juju/version" +) -// ModelMigrationTargetInfo holds the details required to connect to -// and authenticate with a remote controller for model migration. -type ModelMigrationTargetInfo struct { +// InitiateMigrationArgs holds the details required to start one or +// more model migrations. +type InitiateMigrationArgs struct { + Specs []MigrationSpec `json:"specs"` +} + +// MigrationSpec holds the details required to start the migration of +// a single model. +type MigrationSpec struct { + ModelTag string `json:"model-tag"` + TargetInfo MigrationTargetInfo `json:"target-info"` + ExternalControl bool `json:"external-control"` + + // SkipInitialPrechecks allows the migration prechecks run during + // handling of the InitiateMigration API call to be bypassed. It + // is only honoured if ExternalControl is true. + SkipInitialPrechecks bool `json:"skip-initial-prechecks"` +} + +// MigrationTargetInfo holds the details required to connect to and +// authenticate with a remote controller for model migration. +type MigrationTargetInfo struct { ControllerTag string `json:"controller-tag"` Addrs []string `json:"addrs"` CACert string `json:"ca-cert"` AuthTag string `json:"auth-tag"` - Password string `json:"password"` + Password string `json:"password,omitempty"` + Macaroons string `json:"macaroons,omitempty"` } -// InitiateModelMigrationResults is used to return the result of one -// or more attempts to start model migrations. -type InitiateModelMigrationResults struct { - Results []InitiateModelMigrationResult `json:"results"` +// InitiateMigrationResults is used to return the result of one or +// more attempts to start model migrations. +type InitiateMigrationResults struct { + Results []InitiateMigrationResult `json:"results"` } -// InitiateModelMigrationResult is used to return the result of one -// model migration initiation attempt. -type InitiateModelMigrationResult struct { +// InitiateMigrationResult is used to return the result of one model +// migration initiation attempt. +type InitiateMigrationResult struct { ModelTag string `json:"model-tag"` Error *Error `json:"error,omitempty"` MigrationId string `json:"migration-id"` @@ -83,17 +94,27 @@ // model migration for the migrationmaster. It includes authentication // details for the remote controller. type MasterMigrationStatus struct { - Spec ModelMigrationSpec `json:"spec"` - MigrationId string `json:"migration-id"` - Phase string `json:"phase"` - PhaseChangedTime time.Time `json:"phase-changed-time"` + Spec MigrationSpec `json:"spec"` + MigrationId string `json:"migration-id"` + Phase string `json:"phase"` + PhaseChangedTime time.Time `json:"phase-changed-time"` +} + +// MigrationModelInfo is used to report basic model information to the +// migrationmaster worker. +type MigrationModelInfo struct { + UUID string `json:"uuid"` + Name string `json:"name"` + OwnerTag string `json:"owner-tag"` + AgentVersion version.Number `json:"agent-version"` } // MigrationStatus reports the current status of a model migration. type MigrationStatus struct { - MigrationId string `json:"migration-id"` - Attempt int `json:"attempt"` - Phase string `json:"phase"` + MigrationId string `json:"migration-id"` + Attempt int `json:"attempt"` + Phase string `json:"phase"` + ExternalControl bool `json:"external-control"` // TODO(mjs): I'm not convinced these Source fields will get used. SourceAPIAddrs []string `json:"source-api-addrs"` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/model.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/model.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/model.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/model.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,6 +22,44 @@ Config map[string]ConfigValue `json:"config"` } +// HostedModelConfig contains the model config and the cloud spec +// for the model, both things that a client needs to talk directly +// with the provider. This is used to take down mis-behaving models +// aggressively. +type HostedModelConfig struct { + Name string `json:"name"` + OwnerTag string `json:"owner"` + Config map[string]interface{} `json:"config,omitempty"` + CloudSpec *CloudSpec `json:"cloud-spec,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// HostedModelConfigsResults contains an entry for each hosted model +// in the controller. +type HostedModelConfigsResults struct { + Models []HostedModelConfig `json:"models"` +} + +// ModelDefaultsResult contains the result of client API calls to get the +// model default values. +type ModelDefaultsResult struct { + Config map[string]ModelDefaults `json:"config"` +} + +// ModelDefaults holds the settings for a given ModelDefaultsResult config +// attribute. +type ModelDefaults struct { + Default interface{} `json:"default,omitempty"` + Controller interface{} `json:"controller,omitempty"` + Regions []RegionDefaults `json:"regions,omitempty"` +} + +// RegionDefaults contains the settings for regions in a ModelDefaults. +type RegionDefaults struct { + RegionName string `json:"region-name"` + Value interface{} `json:"value"` +} + // ModelSet contains the arguments for ModelSet client API // call. type ModelSet struct { @@ -34,6 +72,34 @@ Keys []string `json:"keys"` } +// SetModelDefaults contains the arguments for SetModelDefaults +// client API call. +type SetModelDefaults struct { + Config []ModelDefaultValues `json:"config"` +} + +// ModelDefaultValues contains the default model values for +// a cloud/region. +type ModelDefaultValues struct { + CloudTag string `json:"cloud-tag,omitempty"` + CloudRegion string `json:"cloud-region,omitempty"` + Config map[string]interface{} `json:"config"` +} + +// ModelUnsetKeys contains the config keys to unset for +// a cloud/region. +type ModelUnsetKeys struct { + CloudTag string `json:"cloud-tag,omitempty"` + CloudRegion string `json:"cloud-region,omitempty"` + Keys []string `json:"keys"` +} + +// UnsetModelDefaults contains the arguments for UnsetModelDefaults +// client API call. +type UnsetModelDefaults struct { + Keys []ModelUnsetKeys `json:"keys"` +} + // SetModelAgentVersion contains the arguments for // SetModelAgentVersion client API call. type SetModelAgentVersion struct { @@ -42,17 +108,14 @@ // ModelInfo holds information about the Juju model. type ModelInfo struct { - // The json names for the fields below are as per the older - // field names for backward compatibility. New fields are - // camel-cased for consistency within this type only. - Name string `json:"name"` - UUID string `json:"uuid"` - ControllerUUID string `json:"controller-uuid"` - ProviderType string `json:"provider-type"` - DefaultSeries string `json:"default-series"` - Cloud string `json:"cloud"` - CloudRegion string `json:"cloud-region,omitempty"` - CloudCredential string `json:"cloud-credential,omitempty"` + Name string `json:"name"` + UUID string `json:"uuid"` + ControllerUUID string `json:"controller-uuid"` + ProviderType string `json:"provider-type"` + DefaultSeries string `json:"default-series"` + CloudTag string `json:"cloud-tag"` + CloudRegion string `json:"cloud-region,omitempty"` + CloudCredentialTag string `json:"cloud-credential-tag,omitempty"` // OwnerTag is the tag of the user that owns the model. OwnerTag string `json:"owner-tag"` @@ -67,6 +130,11 @@ // to the model. Owners and administrators can see all users // that have access; other users can only see their own details. Users []ModelUserInfo `json:"users"` + + // Machines contains information about the machines in the model. + // This information is available to owners and users with write + // access or greater. + Machines []ModelMachineInfo `json:"machines"` } // ModelInfoResult holds the result of a ModelInfo call. @@ -98,6 +166,27 @@ Results []ModelInfoListResult `json:"results"` } +// ModelMachineInfo holds information about a machine in a model. +type ModelMachineInfo struct { + Id string `json:"id"` + Hardware *MachineHardware `json:"hardware,omitempty"` + InstanceId string `json:"instance-id,omitempty"` + Status string `json:"status,omitempty"` + HasVote bool `json:"has-vote,omitempty"` + WantsVote bool `json:"wants-vote,omitempty"` +} + +// MachineHardware holds information about a machine's hardware characteristics. +type MachineHardware struct { + Arch *string `json:"arch,omitempty"` + Mem *uint64 `json:"mem,omitempty"` + RootDisk *uint64 `json:"root-disk,omitempty"` + Cores *uint64 `json:"cores,omitempty"` + CpuPower *uint64 `json:"cpu-power,omitempty"` + Tags *[]string `json:"tags,omitempty"` + AvailabilityZone *string `json:"availability-zone,omitempty"` +} + // ModelUserInfo holds information on a user who has access to a // model. Owners of a model can see this information for all users // who have access, so it should not include sensitive information. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/network.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/network.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/network.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/network.go 2016-10-13 14:31:49.000000000 +0000 @@ -138,6 +138,32 @@ Errors []error `json:"errors,omitempty"` } +// ProviderInterfaceInfoResults holds the results of a +// GetProviderInterfaceInfo call. +type ProviderInterfaceInfoResults struct { + Results []ProviderInterfaceInfoResult `json:"results"` +} + +// ProviderInterfaceInfoResult stores the provider interface +// information for one machine, or any error that occurred getting the +// information for that machine. +type ProviderInterfaceInfoResult struct { + MachineTag string `json:"machine-tag"` + Interfaces []ProviderInterfaceInfo `json:"interfaces"` + Error *Error `json:"error,omitempty"` +} + +// ProviderInterfaceInfo stores the details needed to identify an +// interface to a provider. It's the params equivalent of +// network.ProviderInterfaceInfo, defined here separately to ensure +// that API structures aren't inadvertently changed by internal +// changes. +type ProviderInterfaceInfo struct { + InterfaceName string `json:"interface-name"` + MACAddress string `json:"mac-address"` + ProviderId string `json:"provider-id"` +} + // Port encapsulates a protocol and port number. It is used in API // requests/responses. See also network.Port, from/to which this is // transformed. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/params.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/params.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/params.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/params.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,7 +17,6 @@ "github.com/juju/juju/constraints" "github.com/juju/juju/instance" - "github.com/juju/juju/mongo" "github.com/juju/juju/network" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/storage" @@ -46,6 +45,19 @@ Entities []Entity `json:"entities"` } +// EntitiesResults contains multiple Entities results (where each +// Entities is the result of a query). +type EntitiesResults struct { + Results []EntitiesResult `json:"results"` +} + +// EntitiesResult is the result of one query that either yields some +// set of entities or an error. +type EntitiesResult struct { + Entities []Entity `json:"entities"` + Error *Error `json:"error,omitempty"` +} + // EntityPasswords holds the parameters for making a SetPasswords call. type EntityPasswords struct { Changes []EntityPassword `json:"changes"` @@ -203,7 +215,7 @@ type ApplicationDeploy struct { ApplicationName string `json:"application"` Series string `json:"series"` - CharmUrl string `json:"charm-url"` + CharmURL string `json:"charm-url"` Channel string `json:"channel"` NumUnits int `json:"num-units"` Config map[string]string `json:"config,omitempty"` @@ -218,8 +230,8 @@ // ApplicationUpdate holds the parameters for making the application Update call. type ApplicationUpdate struct { ApplicationName string `json:"application"` - CharmUrl string `json:"charm-url"` - ForceCharmUrl bool `json:"force-charm-url"` + CharmURL string `json:"charm-url"` + ForceCharmURL bool `json:"force-charm-url"` ForceSeries bool `json:"force-series"` MinUnits *int `json:"min-units,omitempty"` SettingsStrings map[string]string `json:"settings,omitempty"` @@ -231,18 +243,39 @@ type ApplicationSetCharm struct { // ApplicationName is the name of the application to set the charm on. ApplicationName string `json:"application"` - // CharmUrl is the new url for the charm. - CharmUrl string `json:"charm-url"` + + // CharmURL is the new url for the charm. + CharmURL string `json:"charm-url"` + // Channel is the charm store channel from which the charm came. Channel string `json:"channel"` + + // ConfigSettings is the charm settings to set during the upgrade. + // This field is only understood by Application facade version 2 + // and greater. + ConfigSettings map[string]string `json:"config-settings,omitempty"` + + // ConfigSettingsYAML is the charm settings in YAML format to set + // during the upgrade. If this is non-empty, it will take precedence + // over ConfigSettings. This field is only understood by Application + // facade version 2 + ConfigSettingsYAML string `json:"config-settings-yaml,omitempty"` + // ForceUnits forces the upgrade on units in an error state. ForceUnits bool `json:"force-units"` + // ForceSeries forces the use of the charm even if it doesn't match the // series of the unit. ForceSeries bool `json:"force-series"` + // ResourceIDs is a map of resource names to resource IDs to activate during // the upgrade. ResourceIDs map[string]string `json:"resource-ids,omitempty"` + + // StorageConstraints is a map of storage names to storage constraints to + // update during the upgrade. This field is only understood by Application + // facade version 2 and greater. + StorageConstraints map[string]StorageConstraints `json:"storage-constraints,omitempty"` } // ApplicationExpose holds the parameters for making the application Expose call. @@ -577,14 +610,6 @@ Versions []int `json:"versions"` } -// LoginResult holds the result of a Login call. -type LoginResult struct { - Servers [][]HostPort `json:"servers"` - ModelTag string `json:"model-tag"` - LastConnection *time.Time `json:"last-connection"` - Facades []FacadeVersions `json:"facades"` -} - // RedirectInfoResult holds the result of a RedirectInfo call. type RedirectInfoResult struct { // Servers holds an entry for each server that holds the @@ -614,13 +639,16 @@ // the client, if any. Credentials *string `json:"credentials,omitempty"` - // ReadOnly holds whether the user has read-only access for the - // connected model. - ReadOnly bool `json:"read-only"` + // ControllerAccess holds the access the user has to the connected controller. + // It will be empty if the user has no access to the controller. + ControllerAccess string `json:"controller-access"` + + // ModelAccess holds the access the user has to the connected model. + ModelAccess string `json:"model-access"` } -// LoginResultV1 holds the result of an Admin v1 Login call. -type LoginResultV1 struct { +// LoginResult holds the result of an Admin Login call. +type LoginResult struct { // DischargeRequired implies that the login request has failed, and none of // the other fields are populated. It contains a macaroon which, when // discharged, will grant access on a subsequent call to Login. @@ -641,9 +669,8 @@ // ModelTag is the tag for the model that is being connected to. ModelTag string `json:"model-tag,omitempty"` - // ControllerTag is the tag for the model that holds the API servers. - // This is the initial model created when bootstrapping juju. - ControllerTag string `json:"server-tag,omitempty"` + // ControllerTag is the tag for the controller that runs the API servers. + ControllerTag string `json:"controller-tag,omitempty"` // UserInfo describes the authenticated user, if any. UserInfo *AuthUserInfo `json:"user-info,omitempty"` @@ -660,7 +687,6 @@ // ControllersServersSpec contains arguments for // the EnableHA client API call. type ControllersSpec struct { - ModelTag string `json:"model-tag"` NumControllers int `json:"num-controllers"` Constraints constraints.Value `json:"constraints,omitempty"` // Series is the series to associate with new controller machines. @@ -776,24 +802,24 @@ Message string `json:"x"` } -// GetBundleChangesParams holds parameters for making GetBundleChanges calls. -type GetBundleChangesParams struct { +// BundleChangesParams holds parameters for making Bundle.GetChanges calls. +type BundleChangesParams struct { // BundleDataYAML is the YAML-encoded charm bundle data // (see "github.com/juju/charm.BundleData"). BundleDataYAML string `json:"yaml"` } -// GetBundleChangesResults holds results of the GetBundleChanges call. -type GetBundleChangesResults struct { +// BundleChangesResults holds results of the Bundle.GetChanges call. +type BundleChangesResults struct { // Changes holds the list of changes required to deploy the bundle. // It is omitted if the provided bundle YAML has verification errors. - Changes []*BundleChangesChange `json:"changes,omitempty"` + Changes []*BundleChange `json:"changes,omitempty"` // Errors holds possible bundle verification errors. Errors []string `json:"errors,omitempty"` } -// BundleChangesChange holds a single change required to deploy a bundle. -type BundleChangesChange struct { +// BundleChange holds a single change required to deploy a bundle. +type BundleChange struct { // Id is the unique identifier for this change. Id string `json:"id"` // Method is the action to be performed to apply this change. @@ -806,10 +832,17 @@ Requires []string `json:"requires"` } +type MongoVersion struct { + Major int `json:"major"` + Minor int `json:"minor"` + Patch string `json:"patch"` + StorageEngine string `json:"engine"` +} + // UpgradeMongoParams holds the arguments required to // enter upgrade mongo mode. type UpgradeMongoParams struct { - Target mongo.Version `json:"target"` + Target MongoVersion `json:"target"` } // HAMember holds information that identifies one member diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/params_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/params_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/params_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/params_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -43,11 +43,11 @@ Id: "Benji", InstanceId: "Shazam", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusError, + Current: status.Error, Message: "foo", }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, }, Life: multiwatcher.Life("alive"), Series: "trusty", @@ -75,7 +75,7 @@ "foo": false, }, Status: multiwatcher.StatusInfo{ - Current: status.StatusActive, + Current: status.Active, Message: "all good", }, }, @@ -103,11 +103,11 @@ PrivateAddress: "10.0.0.1", MachineId: "1", WorkloadStatus: multiwatcher.StatusInfo{ - Current: status.StatusActive, + Current: status.Active, Message: "all good", }, AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusIdle, + Current: status.Idle, }, }, }, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/registration.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/registration.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/registration.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/registration.go 2016-10-13 14:31:49.000000000 +0000 @@ -3,10 +3,6 @@ package params -import ( - "gopkg.in/macaroon.v1" -) - // SecretKeyLoginRequest contains the parameters for completing // the registration of a user. The request contains the tag of // the user, and an encrypted and authenticated payload that @@ -57,8 +53,4 @@ // ControllerUUID is the UUID of the Juju controller. ControllerUUID string `json:"controller-uuid"` - - // Macaroon is a time-limited macaroon that can be used for - // authenticating as the registered user. - Macaroon *macaroon.Macaroon `json:"macaroon"` } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/status.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/status.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/status.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/status.go 2016-10-13 14:31:49.000000000 +0000 @@ -30,7 +30,7 @@ // ModelStatusInfo holds status information about the model itself. type ModelStatusInfo struct { Name string `json:"name"` - Cloud string `json:"cloud"` + CloudTag string `json:"cloud-tag"` CloudRegion string `json:"region,omitempty"` Version string `json:"version"` AvailableVersion string `json:"available-version"` @@ -42,15 +42,33 @@ AgentStatus DetailedStatus `json:"agent-status"` InstanceStatus DetailedStatus `json:"instance-status"` - DNSName string `json:"dns-name"` - InstanceId instance.Id `json:"instance-id"` - Series string `json:"series"` - Id string `json:"id"` - Containers map[string]MachineStatus `json:"containers"` - Hardware string `json:"hardware"` - Jobs []multiwatcher.MachineJob `json:"jobs"` - HasVote bool `json:"has-vote"` - WantsVote bool `json:"wants-vote"` + DNSName string `json:"dns-name"` + + // IPAddresses holds the IP addresses bound to this machine. + IPAddresses []string `json:"ip-addresses"` + + // InstanceId holds the unique identifier for this machine, based on + // what is supplied by the provider. + InstanceId instance.Id `json:"instance-id"` + + // Series holds the name of the operating system release installed on + // this machine. + Series string `json:"series"` + + // Id is the Juju identifier for this machine in this model. + Id string `json:"id"` + + // Containers holds the MachineStatus of any containers hosted on this + // machine. + Containers map[string]MachineStatus `json:"containers"` + + // Hardware holds a string of space-separated key=value pairs of + // hardware specification datum. + Hardware string `json:"hardware"` + + Jobs []multiwatcher.MachineJob `json:"jobs"` + HasVote bool `json:"has-vote"` + WantsVote bool `json:"wants-vote"` } // ApplicationStatus holds status info about an application. @@ -80,7 +98,7 @@ // AgentStatus holds the status for a unit's agent. AgentStatus DetailedStatus `json:"agent-status"` - // WorkloadStatus holds the status for a unit's workload + // WorkloadStatus holds the status for a unit's workload. WorkloadStatus DetailedStatus `json:"workload-status"` WorkloadVersion string `json:"workload-version"` @@ -89,6 +107,7 @@ PublicAddress string `json:"public-address"` Charm string `json:"charm"` Subordinates map[string]UnitStatus `json:"subordinates"` + Leader bool `json:"leader,omitempty"` } // RelationStatus holds status info about a relation. @@ -100,7 +119,7 @@ Endpoints []EndpointStatus `json:"endpoints"` } -// EndpointStatus holds status info about a single endpoint +// EndpointStatus holds status info about a single endpoint. type EndpointStatus struct { ApplicationName string `json:"application"` Name string `json:"name"` @@ -126,7 +145,7 @@ Err error `json:"err,omitempty"` } -// History holds many DetailedStatus, +// History holds many DetailedStatus. type History struct { Statuses []DetailedStatus `json:"statuses"` Error *Error `json:"error,omitempty"` @@ -147,7 +166,7 @@ Tag string `json:"tag"` } -// StatusHistoryRequests holds a slice of StatusHistoryArgs +// StatusHistoryRequests holds a slice of StatusHistoryArgs. type StatusHistoryRequests struct { Requests []StatusHistoryRequest `json:"requests"` } @@ -187,7 +206,7 @@ Results []StatusResult `json:"results"` } -// ApplicationStatusResult holds results for an application Full Status +// ApplicationStatusResult holds results for an application Full Status. type ApplicationStatusResult struct { Application StatusResult `json:"application"` Units map[string]StatusResult `json:"units"` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/usermanager.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/usermanager.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/params/usermanager.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/params/usermanager.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,7 @@ type UserInfo struct { Username string `json:"username"` DisplayName string `json:"display-name"` + Access string `json:"access"` CreatedBy string `json:"created-by"` DateCreated time.Time `json:"date-created"` LastConnection *time.Time `json:"last-connection,omitempty"` @@ -42,9 +43,8 @@ // AddUser stores the parameters to add one user. type AddUser struct { - Username string `json:"username"` - DisplayName string `json:"display-name"` - SharedModelTags []string `json:"shared-model-tags"` + Username string `json:"username"` + DisplayName string `json:"display-name"` // Password is optional. If it is empty, then // a secret key will be generated for the user @@ -52,9 +52,6 @@ // be possible to login with a password until // registration with the secret key is completed. Password string `json:"password,omitempty"` - - // ModelAccess is the permission that the user will have to access the models. - ModelAccess UserAccessPermission `json:"model-access-permission,omitempty"` } // AddUserResults holds the results of the bulk AddUser API call. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/pinger.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/pinger.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/pinger.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/pinger.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,15 +4,15 @@ package apiserver import ( - "errors" "time" - "launchpad.net/tomb" + "github.com/juju/errors" + "github.com/juju/utils/clock" + "gopkg.in/tomb.v1" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/state" - "github.com/juju/utils/clock" ) func init() { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/pinger_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/pinger_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/pinger_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/pinger_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -1,11 +1,6 @@ -// Copyright 2012, 2013 Canonical Ltd. +// Copyright 2012-2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. -// TODO(wallyworld) bug http://pad.lv/1408459 -// Re-enable tests for i386 when these tests are fixed to work on that architecture. - -// +build !386 - package apiserver_test import ( @@ -13,46 +8,56 @@ "github.com/juju/errors" "github.com/juju/loggo" - gitjujutesting "github.com/juju/testing" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" "github.com/juju/juju/api" "github.com/juju/juju/apiserver" - "github.com/juju/juju/juju/testing" "github.com/juju/juju/rpc" coretesting "github.com/juju/juju/testing" ) +// pingerSuite exercises the apiserver's ping timeout functionality +// from the outside. Ping API requests are made (or not) to a running +// API server to ensure that the server shuts down the API connection +// as expected once there's been no pings within the timeout period. type pingerSuite struct { - testing.JujuConnSuite + apiserverBaseSuite } var _ = gc.Suite(&pingerSuite{}) -var testPingPeriod = 100 * time.Millisecond +func (s *pingerSuite) newServerWithTestClock(c *gc.C) (*apiserver.Server, *testing.Clock) { + clock := testing.NewClock(time.Now()) + config := s.sampleConfig(c) + config.PingClock = clock + server := s.newServer(c, config) + return server, clock +} func (s *pingerSuite) TestConnectionBrokenDetection(c *gc.C) { - s.PatchValue(&api.PingPeriod, testPingPeriod) - - st, _ := s.OpenAPIAsNewMachine(c) + server, clock := s.newServerWithTestClock(c) + conn, _ := s.OpenAPIAsNewMachine(c, server) + clock.Advance(api.PingPeriod) // Connection still alive select { - case <-time.After(testPingPeriod): - case <-st.Broken(): + case <-conn.Broken(): c.Fatalf("connection should be alive still") + case <-time.After(coretesting.ShortWait): + // all good, connection still there } - // Close the connection and see if we detect this - go st.Close() + conn.Close() + clock.Advance(api.PingPeriod + time.Second) // Check it's detected select { - case <-time.After(testPingPeriod + time.Second): + case <-time.After(coretesting.ShortWait): c.Fatalf("connection not closed as expected") - case <-st.Broken(): + case <-conn.Broken(): return } } @@ -60,15 +65,13 @@ func (s *pingerSuite) TestPing(c *gc.C) { tw := &loggo.TestWriter{} c.Assert(loggo.RegisterWriter("ping-tester", tw), gc.IsNil) - defer loggo.RemoveWriter("ping-tester") - st, _ := s.OpenAPIAsNewMachine(c) - err := st.Ping() - c.Assert(err, jc.ErrorIsNil) - err = st.Close() - c.Assert(err, jc.ErrorIsNil) - err = st.Ping() - c.Assert(errors.Cause(err), gc.Equals, rpc.ErrShutdown) + server, _ := s.newServerWithTestClock(c) + conn, _ := s.OpenAPIAsNewMachine(c, server) + + c.Assert(pingConn(conn), jc.ErrorIsNil) + c.Assert(conn.Close(), jc.ErrorIsNil) + c.Assert(errors.Cause(pingConn(conn)), gc.Equals, rpc.ErrShutdown) // Make sure that ping messages have not been logged. for _, m := range tw.Log() { @@ -78,133 +81,87 @@ } func (s *pingerSuite) TestClientNoNeedToPing(c *gc.C) { - s.PatchValue(apiserver.MaxClientPingInterval, time.Duration(0)) - st, err := api.Open(s.APIInfo(c), api.DefaultDialOpts()) - c.Assert(err, jc.ErrorIsNil) - defer st.Close() + server, clock := s.newServerWithTestClock(c) + conn := s.OpenAPIAsAdmin(c, server) + + // Here we have a conundrum, we can't wait for a clock alarm because + // one isn't set because we don't have pingers for clients. So just + // a short wait then. time.Sleep(coretesting.ShortWait) - err = st.Ping() - c.Assert(err, jc.ErrorIsNil) + + clock.Advance(apiserver.MaxClientPingInterval * 2) + c.Assert(pingConn(conn), jc.ErrorIsNil) } func (s *pingerSuite) TestAgentConnectionShutsDownWithNoPing(c *gc.C) { - s.PatchValue(apiserver.MaxClientPingInterval, time.Duration(0)) - st, _ := s.OpenAPIAsNewMachine(c) - time.Sleep(coretesting.ShortWait) - err := st.Ping() - c.Assert(err, gc.ErrorMatches, "connection is shut down") -} + server, clock := s.newServerWithTestClock(c) + conn, _ := s.OpenAPIAsNewMachine(c, server) -func (s *pingerSuite) calculatePingTimeout(c *gc.C) time.Duration { - // Try opening an API connection a few times and take the max - // delay among the attempts. - attempt := utils.AttemptStrategy{ - Delay: coretesting.ShortWait, - Min: 3, - } - var maxTimeout time.Duration - for a := attempt.Start(); a.Next(); { - openStart := time.Now() - st, _ := s.OpenAPIAsNewMachine(c) - err := st.Ping() - if c.Check(err, jc.ErrorIsNil) { - openDelay := time.Since(openStart) - c.Logf("API open and initial ping took %v", openDelay) - if maxTimeout < openDelay { - maxTimeout = openDelay - } - } - if st != nil { - c.Check(st.Close(), jc.ErrorIsNil) - } - } - if !c.Failed() && maxTimeout > 0 { - return maxTimeout - } - c.Fatalf("cannot calculate ping timeout") - return 0 + waitAndAdvance(c, clock, apiserver.MaxClientPingInterval) + checkConnectionDies(c, conn) } func (s *pingerSuite) TestAgentConnectionDelaysShutdownWithPing(c *gc.C) { - // To negate the effects of an underpowered or heavily loaded - // machine running this test, tune the shortTimeout based on the - // maximum duration it takes to open an API connection. - shortTimeout := s.calculatePingTimeout(c) - attemptDelay := shortTimeout / 4 - - s.PatchValue(apiserver.MaxClientPingInterval, time.Duration(shortTimeout)) - - st, _ := s.OpenAPIAsNewMachine(c) - err := st.Ping() - c.Assert(err, jc.ErrorIsNil) - defer st.Close() + server, clock := s.newServerWithTestClock(c) + conn, _ := s.OpenAPIAsNewMachine(c, server) // As long as we don't wait too long, the connection stays open - attempt := utils.AttemptStrategy{ - Min: 10, - Delay: attemptDelay, - } - testStart := time.Now() - c.Logf( - "pinging %d times with %v delay, ping timeout %v, starting at %v", - attempt.Min, attempt.Delay, shortTimeout, testStart, - ) - var lastLoop time.Time - for a := attempt.Start(); a.Next(); { - testNow := time.Now() - loopDelta := testNow.Sub(lastLoop) - if lastLoop.IsZero() { - loopDelta = 0 - } - c.Logf("duration since last ping: %v", loopDelta) - err = st.Ping() - if !c.Check( - err, jc.ErrorIsNil, - gc.Commentf( - "ping timeout exceeded at %v (%v since the test start)", - testNow, testNow.Sub(testStart), - ), - ) { - c.Check(err, gc.ErrorMatches, "connection is shut down") - return - } - lastLoop = time.Now() + attemptDelay := apiserver.MaxClientPingInterval / 2 + for i := 0; i < 10; i++ { + waitAndAdvance(c, clock, attemptDelay) + c.Assert(pingConn(conn), jc.ErrorIsNil) } // However, once we stop pinging for too long, the connection dies - time.Sleep(2 * shortTimeout) // Exceed the timeout. - err = st.Ping() - c.Assert(err, gc.ErrorMatches, "connection is shut down") + waitAndAdvance(c, clock, apiserver.MaxClientPingInterval) + checkConnectionDies(c, conn) } -type mongoPingerSuite struct { - testing.JujuConnSuite -} +func (s *pingerSuite) TestAgentConnectionsShutDownWhenAPIServerDies(c *gc.C) { + clock := testing.NewClock(time.Now()) + config := s.sampleConfig(c) + config.Clock = clock + server := s.newServerDirtyKill(c, config) + conn, _ := s.OpenAPIAsNewMachine(c, server) -var _ = gc.Suite(&mongoPingerSuite{}) + err := pingConn(conn) + c.Assert(err, jc.ErrorIsNil) + server.Kill() -func (s *mongoPingerSuite) SetUpSuite(c *gc.C) { - s.JujuConnSuite.SetUpSuite(c) - // We need to set the ping interval before the server is started in test setup. - restore := gitjujutesting.PatchValue(apiserver.MongoPingInterval, coretesting.ShortWait) - s.AddCleanup(func(*gc.C) { restore() }) + // We know this is less than the client ping interval. + clock.Advance(apiserver.MongoPingInterval) + checkConnectionDies(c, conn) } -func (s *mongoPingerSuite) TestAgentConnectionsShutDownWhenStateDies(c *gc.C) { - st, _ := s.OpenAPIAsNewMachine(c) - err := st.Ping() - c.Assert(err, jc.ErrorIsNil) - gitjujutesting.MgoServer.Destroy() +func waitAndAdvance(c *gc.C, clock *testing.Clock, delta time.Duration) { + waitForClock(c, clock) + clock.Advance(delta) +} + +func waitForClock(c *gc.C, clock *testing.Clock) { + select { + case <-clock.Alarms(): + case <-time.After(coretesting.LongWait): + c.Fatal("timed out waiting for clock") + } +} +func checkConnectionDies(c *gc.C, conn api.Connection) { attempt := utils.AttemptStrategy{ Total: coretesting.LongWait, Delay: coretesting.ShortWait, } for a := attempt.Start(); a.Next(); { - if err := st.Ping(); err != nil { + err := pingConn(conn) + if err != nil { c.Assert(err, gc.ErrorMatches, "connection is shut down") return } } - c.Fatalf("timed out waiting for API server to die") + c.Fatal("connection didn't get shut down") +} + +func pingConn(conn api.Connection) error { + version := conn.BestFacadeVersion("Pinger") + return conn.APICall("Pinger", version, "", "Ping", nil, nil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/presence/util_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/presence/util_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/presence/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/presence/util_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,7 +15,6 @@ "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/presence" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker" "github.com/juju/juju/worker/workertest" ) @@ -69,7 +68,7 @@ context := &context{ c: c, stub: stub, - clock: coretesting.NewClock(time.Now()), + clock: testing.NewClock(time.Now()), timeout: time.After(time.Second), starts: make(chan worker.Worker, 1000), } @@ -91,7 +90,7 @@ type context struct { c *gc.C stub *testing.Stub - clock *coretesting.Clock + clock *testing.Clock timeout <-chan time.Time starts chan worker.Worker diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/imagemetadata_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/imagemetadata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/imagemetadata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/imagemetadata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -87,8 +87,14 @@ // Also make sure that these images metadata has been written to state for re-use saved, err := s.State.CloudImageMetadataStorage.FindMetadata(criteria) c.Assert(err, jc.ErrorIsNil) + stateExpected := s.convertCloudImageMetadata(expected[0]) + if len(saved["default cloud images"]) == len(stateExpected) { + for i, image := range saved["default cloud images"] { + stateExpected[i].DateCreated = image.DateCreated + } + } c.Assert(saved, gc.DeepEquals, map[string][]cloudimagemetadata.Metadata{ - "default cloud images": s.convertCloudImageMetadata(expected[0]), + "default cloud images": stateExpected, }) } @@ -139,6 +145,7 @@ }, one.Priority, one.ImageId, + 0, } } return expected diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/machineerror.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/machineerror.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/machineerror.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/machineerror.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( "time" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/provisioner.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/provisioner.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/provisioner.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/provisioner.go 2016-10-13 14:31:49.000000000 +0000 @@ -305,7 +305,7 @@ result.Status = statusInfo.Status.String() result.Info = statusInfo.Message result.Data = statusInfo.Data - if statusInfo.Status != status.StatusError { + if statusInfo.Status != status.Error { continue } // Transient errors are marked as such in the status data. @@ -665,37 +665,40 @@ preparedOK = false break } - if len(parentAddrs) == 0 { - err = errors.Errorf("host machine device %q has no addresses", parentDevice.Name()) - result.Results[i].Error = common.ServerError(err) - preparedOK = false - break - } - firstAddress := parentAddrs[0] - parentDeviceSubnet, err := firstAddress.Subnet() - if err != nil { - err = errors.Annotatef(err, - "cannot get subnet %q used by address %q of host machine device %q", - firstAddress.SubnetCIDR(), firstAddress.Value(), parentDevice.Name(), - ) - result.Results[i].Error = common.ServerError(err) - preparedOK = false - break - } info := network.InterfaceInfo{ InterfaceName: device.Name(), MACAddress: device.MACAddress(), - ConfigType: network.ConfigStatic, + ConfigType: network.ConfigManual, InterfaceType: network.InterfaceType(device.Type()), NoAutoStart: !device.IsAutoStart(), Disabled: !device.IsUp(), MTU: int(device.MTU()), - CIDR: parentDeviceSubnet.CIDR(), - ProviderSubnetId: parentDeviceSubnet.ProviderId(), - VLANTag: parentDeviceSubnet.VLANTag(), ParentInterfaceName: parentDevice.Name(), } + + if len(parentAddrs) > 0 { + logger.Infof("host machine device %q has addresses %v", parentDevice.Name(), parentAddrs) + + firstAddress := parentAddrs[0] + parentDeviceSubnet, err := firstAddress.Subnet() + if err != nil { + err = errors.Annotatef(err, + "cannot get subnet %q used by address %q of host machine device %q", + firstAddress.SubnetCIDR(), firstAddress.Value(), parentDevice.Name(), + ) + result.Results[i].Error = common.ServerError(err) + preparedOK = false + break + } + info.ConfigType = network.ConfigStatic + info.CIDR = parentDeviceSubnet.CIDR() + info.ProviderSubnetId = parentDeviceSubnet.ProviderId() + info.VLANTag = parentDeviceSubnet.VLANTag() + } else { + logger.Infof("host machine device %q has no addresses %v", parentDevice.Name(), parentAddrs) + } + logger.Tracef("prepared info for container interface %q: %+v", info.InterfaceName, info) preparedOK = true preparedInfo[j] = info @@ -714,9 +717,8 @@ logger.Debugf("got allocated info from provider: %+v", allocatedInfo) allocatedConfig := networkingcommon.NetworkConfigFromInterfaceInfo(allocatedInfo) - sortedAllocatedConfig := networkingcommon.SortNetworkConfigsByInterfaceName(allocatedConfig) - logger.Tracef("allocated sorted network config: %+v", sortedAllocatedConfig) - result.Results[i].Config = sortedAllocatedConfig + logger.Tracef("allocated network config: %+v", allocatedConfig) + result.Results[i].Config = allocatedConfig } return result, nil } @@ -807,8 +809,42 @@ Since: &now, } err = machine.SetInstanceStatus(s) + if status.Status(arg.Status) == status.ProvisioningError { + s.Status = status.Error + if err == nil { + err = machine.SetStatus(s) + } + } } result.Results[i].Error = common.ServerError(err) } return result, nil } + +// MarkMachinesForRemoval indicates that the specified machines are +// ready to have any provider-level resources cleaned up and then be +// removed. +func (p *ProvisionerAPI) MarkMachinesForRemoval(machines params.Entities) (params.ErrorResults, error) { + results := make([]params.ErrorResult, len(machines.Entities)) + canAccess, err := p.getAuthFunc() + if err != nil { + logger.Errorf("failed to get an authorisation function: %v", err) + return params.ErrorResults{}, errors.Trace(err) + } + for i, machine := range machines.Entities { + results[i].Error = common.ServerError(p.markOneMachineForRemoval(machine.Tag, canAccess)) + } + return params.ErrorResults{Results: results}, nil +} + +func (p *ProvisionerAPI) markOneMachineForRemoval(machineTag string, canAccess common.AuthFunc) error { + mTag, err := names.ParseMachineTag(machineTag) + if err != nil { + return errors.Trace(err) + } + machine, err := p.getMachine(canAccess, mTag) + if err != nil { + return errors.Trace(err) + } + return machine.MarkForRemoval() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/provisioner_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/provisioner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/provisioner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/provisioner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -101,7 +101,7 @@ func (s *withoutControllerSuite) SetUpTest(c *gc.C) { s.setUpTest(c, false) - s.ModelWatcherTest = commontesting.NewModelWatcherTest(s.provisioner, s.State, s.resources, commontesting.HasSecrets) + s.ModelWatcherTest = commontesting.NewModelWatcherTest(s.provisioner, s.State, s.resources) } func (s *withoutControllerSuite) TestProvisionerFailsWithNonMachineAgentNonManagerUser(c *gc.C) { @@ -321,21 +321,21 @@ func (s *withoutControllerSuite) TestSetStatus(c *gc.C) { now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusStarted, + Status: status.Started, Message: "blah", Since: &now, } err := s.machines[0].SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusStopped, + Status: status.Stopped, Message: "foo", Since: &now, } err = s.machines[1].SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "not really", Since: &now, } @@ -344,13 +344,13 @@ args := params.SetStatus{ Entities: []params.EntityStatusArgs{ - {Tag: s.machines[0].Tag().String(), Status: status.StatusError.String(), Info: "not really", + {Tag: s.machines[0].Tag().String(), Status: status.Error.String(), Info: "not really", Data: map[string]interface{}{"foo": "bar"}}, - {Tag: s.machines[1].Tag().String(), Status: status.StatusStopped.String(), Info: "foobar"}, - {Tag: s.machines[2].Tag().String(), Status: status.StatusStarted.String(), Info: "again"}, - {Tag: "machine-42", Status: status.StatusStarted.String(), Info: "blah"}, - {Tag: "unit-foo-0", Status: status.StatusStopped.String(), Info: "foobar"}, - {Tag: "application-bar", Status: status.StatusStopped.String(), Info: "foobar"}, + {Tag: s.machines[1].Tag().String(), Status: status.Stopped.String(), Info: "foobar"}, + {Tag: s.machines[2].Tag().String(), Status: status.Started.String(), Info: "again"}, + {Tag: "machine-42", Status: status.Started.String(), Info: "blah"}, + {Tag: "unit-foo-0", Status: status.Stopped.String(), Info: "foobar"}, + {Tag: "application-bar", Status: status.Stopped.String(), Info: "foobar"}, }} result, err := s.provisioner.SetStatus(args) c.Assert(err, jc.ErrorIsNil) @@ -366,22 +366,22 @@ }) // Verify the changes. - s.assertStatus(c, 0, status.StatusError, "not really", map[string]interface{}{"foo": "bar"}) - s.assertStatus(c, 1, status.StatusStopped, "foobar", map[string]interface{}{}) - s.assertStatus(c, 2, status.StatusStarted, "again", map[string]interface{}{}) + s.assertStatus(c, 0, status.Error, "not really", map[string]interface{}{"foo": "bar"}) + s.assertStatus(c, 1, status.Stopped, "foobar", map[string]interface{}{}) + s.assertStatus(c, 2, status.Started, "again", map[string]interface{}{}) } func (s *withoutControllerSuite) TestMachinesWithTransientErrors(c *gc.C) { now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusStarted, + Status: status.Started, Message: "blah", Since: &now, } err := s.machines[0].SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "transient error", Data: map[string]interface{}{"transient": true, "foo": "bar"}, Since: &now, @@ -389,7 +389,7 @@ err = s.machines[1].SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "error", Data: map[string]interface{}{"transient": false}, Since: &now, @@ -397,7 +397,7 @@ err = s.machines[2].SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "error", Since: &now, } @@ -405,7 +405,7 @@ c.Assert(err, jc.ErrorIsNil) // Machine 4 is provisioned but error not reset yet. sInfo = status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "transient error", Data: map[string]interface{}{"transient": true, "foo": "bar"}, Since: &now, @@ -435,14 +435,14 @@ anAuthorizer) now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusStarted, + Status: status.Started, Message: "blah", Since: &now, } err = s.machines[0].SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "transient error", Data: map[string]interface{}{"transient": true, "foo": "bar"}, Since: &now, @@ -450,7 +450,7 @@ err = s.machines[1].SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "error", Data: map[string]interface{}{"transient": false}, Since: &now, @@ -458,7 +458,7 @@ err = s.machines[2].SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "error", Since: &now, } @@ -608,27 +608,27 @@ aProvisioner, err := provisioner.NewProvisionerAPI(s.State, s.resources, anAuthorizer) c.Assert(err, jc.ErrorIsNil) - s.AssertModelConfig(c, aProvisioner, commontesting.NoSecrets) + s.AssertModelConfig(c, aProvisioner) } func (s *withoutControllerSuite) TestStatus(c *gc.C) { now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusStarted, + Status: status.Started, Message: "blah", Since: &now, } err := s.machines[0].SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusStopped, + Status: status.Stopped, Message: "foo", Since: &now, } err = s.machines[1].SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "not really", Data: map[string]interface{}{"foo": "bar"}, Since: &now, @@ -657,9 +657,9 @@ } c.Assert(result, gc.DeepEquals, params.StatusResults{ Results: []params.StatusResult{ - {Status: status.StatusStarted.String(), Info: "blah", Data: map[string]interface{}{}}, - {Status: status.StatusStopped.String(), Info: "foo", Data: map[string]interface{}{}}, - {Status: status.StatusError.String(), Info: "not really", Data: map[string]interface{}{"foo": "bar"}}, + {Status: status.Started.String(), Info: "blah", Data: map[string]interface{}{}}, + {Status: status.Stopped.String(), Info: "foo", Data: map[string]interface{}{}}, + {Status: status.Error.String(), Info: "not really", Data: map[string]interface{}{"foo": "bar"}}, {Error: apiservertesting.NotFoundError("machine 42")}, {Error: apiservertesting.ErrUnauthorized}, {Error: apiservertesting.ErrUnauthorized}, @@ -822,7 +822,7 @@ func (s *withoutControllerSuite) TestConstraints(c *gc.C) { // Add a machine with some constraints. - cons := constraints.MustParse("cpu-cores=123", "mem=8G") + cons := constraints.MustParse("cores=123", "mem=8G") template := state.MachineTemplate{ Series: "quantal", Jobs: []state.MachineJob{state.JobHostUnits}, @@ -1235,3 +1235,38 @@ c.Assert(tools.URL, gc.Equals, url) } } + +func (s *withoutControllerSuite) TestMarkMachinesForRemoval(c *gc.C) { + err := s.machines[0].EnsureDead() + c.Assert(err, jc.ErrorIsNil) + err = s.machines[2].EnsureDead() + c.Assert(err, jc.ErrorIsNil) + + res, err := s.provisioner.MarkMachinesForRemoval(params.Entities{ + Entities: []params.Entity{ + {Tag: "machine-2"}, // ok + {Tag: "machine-100"}, // not found + {Tag: "machine-0"}, // ok + {Tag: "machine-1"}, // not dead + {Tag: "machine-0-lxd-5"}, // unauthorised + {Tag: "application-thing"}, // only machines allowed + }, + }) + c.Assert(err, jc.ErrorIsNil) + results := res.Results + c.Assert(results, gc.HasLen, 6) + c.Check(results[0].Error, gc.IsNil) + c.Check(*results[1].Error, gc.Equals, + *common.ServerError(errors.NotFoundf("machine 100"))) + c.Check(*results[1].Error, jc.Satisfies, params.IsCodeNotFound) + c.Check(results[2].Error, gc.IsNil) + c.Check(*results[3].Error, gc.Equals, + *common.ServerError(errors.New("cannot remove machine 1: machine is not dead"))) + c.Check(*results[4].Error, gc.Equals, *apiservertesting.ErrUnauthorized) + c.Check(*results[5].Error, gc.Equals, + *common.ServerError(errors.New(`"application-thing" is not a valid machine tag`))) + + removals, err := s.State.AllMachineRemovals() + c.Assert(err, jc.ErrorIsNil) + c.Check(removals, jc.SameContents, []string{"0", "2"}) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/provisioninginfo.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/provisioninginfo.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/provisioninginfo.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/provisioninginfo.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,7 +34,7 @@ } canAccess, err := p.getAuthFunc() if err != nil { - return result, err + return result, errors.Trace(err) } for i, entity := range args.Entities { tag, err := names.ParseMachineTag(entity.Tag) @@ -54,7 +54,7 @@ func (p *ProvisionerAPI) getProvisioningInfo(m *state.Machine) (*params.ProvisioningInfo, error) { cons, err := m.Constraints() if err != nil { - return nil, err + return nil, errors.Trace(err) } volumes, err := p.machineVolumeParams(m) @@ -110,18 +110,18 @@ func (p *ProvisionerAPI) machineVolumeParams(m *state.Machine) ([]params.VolumeParams, error) { volumeAttachments, err := m.VolumeAttachments() if err != nil { - return nil, err + return nil, errors.Trace(err) } if len(volumeAttachments) == 0 { return nil, nil } modelConfig, err := p.st.ModelConfig() if err != nil { - return nil, err + return nil, errors.Trace(err) } controllerCfg, err := p.st.ControllerConfig() if err != nil { - return nil, err + return nil, errors.Trace(err) } allVolumeParams := make([]params.VolumeParams, 0, len(volumeAttachments)) for _, volumeAttachment := range volumeAttachments { @@ -290,7 +290,7 @@ } service, err := unit.Application() if err != nil { - return nil, err + return nil, errors.Trace(err) } if processedServicesSet.Contains(service.Name()) { // Already processed, skip it. @@ -298,7 +298,7 @@ } bindings, err := service.EndpointBindings() if err != nil { - return nil, err + return nil, errors.Trace(err) } processedServicesSet.Add(service.Name()) @@ -363,7 +363,7 @@ // Look for image metadata in state. data, err := p.findImageMetadata(imageConstraint, env) if err != nil { - return nil, err + return nil, errors.Trace(err) } sort.Sort(metadataList(data)) logger.Debugf("available image metadata for provisioning: %v", data) @@ -494,34 +494,35 @@ func (p *ProvisionerAPI) imageMetadataFromDataSources(env environs.Environ, constraint *imagemetadata.ImageConstraint) ([]params.CloudImageMetadata, error) { sources, err := environs.ImageMetadataSources(env) if err != nil { - return nil, err - } - - getStream := func(current string) string { - if current == "" { - if constraint.Stream != "" { - return constraint.Stream - } - return env.Config().ImageStream() - } - return current + return nil, errors.Trace(err) } - toModel := func(m *imagemetadata.ImageMetadata, mStream string, mSeries string, source string, priority int) cloudimagemetadata.Metadata { - - return cloudimagemetadata.Metadata{ - cloudimagemetadata.MetadataAttributes{ + cfg := env.Config() + toModel := func(m *imagemetadata.ImageMetadata, mSeries string, source string, priority int) cloudimagemetadata.Metadata { + result := cloudimagemetadata.Metadata{ + MetadataAttributes: cloudimagemetadata.MetadataAttributes{ Region: m.RegionName, Arch: m.Arch, VirtType: m.VirtType, RootStorageType: m.Storage, Source: source, Series: mSeries, - Stream: mStream, + Stream: m.Stream, + Version: m.Version, }, - priority, - m.Id, + Priority: priority, + ImageId: m.Id, + } + // TODO (anastasiamac 2016-08-24) This is a band-aid solution. + // Once correct value is read from simplestreams, this needs to go. + // Bug# 1616295 + if result.Stream == "" { + result.Stream = constraint.Stream + } + if result.Stream == "" { + result.Stream = cfg.ImageStream() } + return result } var metadataState []cloudimagemetadata.Metadata @@ -539,8 +540,7 @@ logger.Warningf("could not determine series for image id %s: %v", m.Id, err) continue } - mStream := getStream(m.Stream) - metadataState = append(metadataState, toModel(m, mStream, mSeries, info.Source, source.Priority())) + metadataState = append(metadataState, toModel(m, mSeries, info.Source, source.Priority())) } } if len(metadataState) > 0 { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/provisioninginfo_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/provisioninginfo_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/provisioner/provisioninginfo_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/provisioner/provisioninginfo_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -25,7 +25,7 @@ _, err := pm.Create("static-pool", "static", map[string]interface{}{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) - cons := constraints.MustParse("cpu-cores=123 mem=8G") + cons := constraints.MustParse("cores=123 mem=8G") template := state.MachineTemplate{ Series: "quantal", Jobs: []state.MachineJob{state.JobHostUnits}, @@ -48,7 +48,7 @@ controllerCfg := coretesting.FakeControllerConfig() // Dummy provider uses a random port, which is added to cfg used to create environment. - apiPort := dummy.ApiPort(s.Environ.Provider()) + apiPort := dummy.APIPort(s.Environ.Provider()) controllerCfg["api-port"] = apiPort expected := params.ProvisioningInfoResults{ Results: []params.ProvisioningInfoResult{ @@ -57,7 +57,7 @@ Series: "quantal", Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, Tags: map[string]string{ - tags.JujuController: coretesting.ModelTag.Id(), + tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, }}, @@ -68,7 +68,7 @@ Placement: template.Placement, Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, Tags: map[string]string{ - tags.JujuController: coretesting.ModelTag.Id(), + tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, Volumes: []params.VolumeParams{{ @@ -77,7 +77,7 @@ Provider: "static", Attributes: map[string]interface{}{"foo": "bar"}, Tags: map[string]string{ - tags.JujuController: coretesting.ModelTag.Id(), + tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, Attachment: ¶ms.VolumeAttachmentParams{ @@ -91,7 +91,7 @@ Provider: "static", Attributes: map[string]interface{}{"foo": "bar"}, Tags: map[string]string{ - tags.JujuController: coretesting.ModelTag.Id(), + tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, Attachment: ¶ms.VolumeAttachmentParams{ @@ -116,7 +116,7 @@ func (s *withoutControllerSuite) TestProvisioningInfoWithSingleNegativeAndPositiveSpaceInConstraints(c *gc.C) { s.addSpacesAndSubnets(c) - cons := constraints.MustParse("cpu-cores=123 mem=8G spaces=^space1,space2") + cons := constraints.MustParse("cores=123 mem=8G spaces=^space1,space2") template := state.MachineTemplate{ Series: "quantal", Jobs: []state.MachineJob{state.JobHostUnits}, @@ -134,7 +134,7 @@ controllerCfg := coretesting.FakeControllerConfig() // Dummy provider uses a random port, which is added to cfg used to create environment. - apiPort := dummy.ApiPort(s.Environ.Provider()) + apiPort := dummy.APIPort(s.Environ.Provider()) controllerCfg["api-port"] = apiPort expected := params.ProvisioningInfoResults{ Results: []params.ProvisioningInfoResult{{ @@ -145,7 +145,7 @@ Placement: template.Placement, Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, Tags: map[string]string{ - tags.JujuController: coretesting.ModelTag.Id(), + tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, SubnetsToZones: map[string][]string{ @@ -204,7 +204,7 @@ controllerCfg := coretesting.FakeControllerConfig() // Dummy provider uses a random port, which is added to cfg used to create environment. - apiPort := dummy.ApiPort(s.Environ.Provider()) + apiPort := dummy.APIPort(s.Environ.Provider()) controllerCfg["api-port"] = apiPort expected := params.ProvisioningInfoResults{ Results: []params.ProvisioningInfoResult{{ @@ -213,7 +213,7 @@ Series: "quantal", Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, Tags: map[string]string{ - tags.JujuController: coretesting.ModelTag.Id(), + tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), tags.JujuUnitsDeployed: wordpressUnit.Name(), }, @@ -234,8 +234,8 @@ _, err := s.State.AddSpace("empty", "", nil, true) c.Assert(err, jc.ErrorIsNil) - consEmptySpace := constraints.MustParse("cpu-cores=123 mem=8G spaces=empty") - consMissingSpace := constraints.MustParse("cpu-cores=123 mem=8G spaces=missing") + consEmptySpace := constraints.MustParse("cores=123 mem=8G spaces=empty") + consMissingSpace := constraints.MustParse("cores=123 mem=8G spaces=missing") templates := []state.MachineTemplate{{ Series: "quantal", Jobs: []state.MachineJob{state.JobHostUnits}, @@ -290,7 +290,7 @@ controllerCfg := coretesting.FakeControllerConfig() // Dummy provider uses a random port, which is added to cfg used to create environment. - apiPort := dummy.ApiPort(s.Environ.Provider()) + apiPort := dummy.APIPort(s.Environ.Provider()) controllerCfg["api-port"] = apiPort c.Assert(result, jc.DeepEquals, params.ProvisioningInfoResults{ Results: []params.ProvisioningInfoResult{ @@ -301,7 +301,7 @@ Placement: template.Placement, Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, Tags: map[string]string{ - tags.JujuController: coretesting.ModelTag.Id(), + tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, Volumes: []params.VolumeParams{{ @@ -310,7 +310,7 @@ Provider: "static", Attributes: nil, Tags: map[string]string{ - tags.JujuController: coretesting.ModelTag.Id(), + tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, Attachment: ¶ms.VolumeAttachmentParams{ @@ -345,7 +345,7 @@ results, err := aProvisioner.ProvisioningInfo(args) controllerCfg := coretesting.FakeControllerConfig() // Dummy provider uses a random port, which is added to cfg used to create environment. - apiPort := dummy.ApiPort(s.Environ.Provider()) + apiPort := dummy.APIPort(s.Environ.Provider()) controllerCfg["api-port"] = apiPort c.Assert(results, jc.DeepEquals, params.ProvisioningInfoResults{ Results: []params.ProvisioningInfoResult{ @@ -354,7 +354,7 @@ Series: "quantal", Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, Tags: map[string]string{ - tags.JujuController: coretesting.ModelTag.Id(), + tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, }}, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/read_only_calls.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/read_only_calls.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/read_only_calls.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/read_only_calls.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,75 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver - -import ( - "github.com/juju/utils/set" -) - -// readOnlyCalls specify a white-list of API calls that do not -// modify the database. The format of the calls is ".". -// At this stage, we are explicitly ignoring the facade version. -var readOnlyCalls = set.NewStrings( - "Action.Actions", - "Action.FindActionTagsByPrefix", - "Action.ListAll", - "Action.ListPending", - "Action.ListRunning", - "Action.ListCompleted", - "Action.ApplicationsCharmsActions", - "AllWatcher.Next", - "Annotations.Get", - "Application.GetConstraints", - "Application.CharmRelations", - "Application.Get", - "Block.List", - "Charms.CharmInfo", - "Charms.IsMetered", - "Charms.List", - "Client.AgentVersion", - "Client.APIHostPorts", - "Client.ModelGet", - "Client.ModelInfo", - "Client.ModelUserInfo", - "Client.FullStatus", - // FindTools, while being technically read only, isn't a useful - // command for a read only user to run. - // While GetBundleChanges is technically read only, it is a precursor - // to deploying the bundle or changes. But... let's leave it here anyway. - "Client.GetBundleChanges", - "Client.GetModelConstraints", - "Client.PrivateAddress", - "Client.PublicAddress", - // ResolveCharms, while being technically read only, isn't a useful - // command for a read only user to run. - // Status is so old it shouldn't be used. - "Client.StatusHistory", - "Client.WatchAll", - "Cloud.Cloud", - "Cloud.Credentials", - // TODO: add controller work. - "KeyManager.ListKeys", - "ModelManager.ModelInfo", - "Pinger.Ping", - "Spaces.ListSpaces", - "Storage.ListStorageDetails", - "Storage.ListFilesystems", - "Storage.ListPools", - "Storage.ListVolumes", - "Subnets.AllSpaces", - "Subnets.AllZones", - "Subnets.ListSubnets", - "UserManager.UserInfo", -) - -// isCallReadOnly returns whether or not the method on the facade -// is known to not alter the database. -func isCallReadOnly(facade, method string) bool { - key := facade + "." + method - // NOTE: maybe useful in the future to be able to specify entire facades - // as read only, in which case specifying something like "Facade.*" would - // be useful. Not sure we'll ever need this, but something to think about - // perhaps. - return readOnlyCalls.Contains(key) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/read_only_calls_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/read_only_calls_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/read_only_calls_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/read_only_calls_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,73 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver - -import ( - "strings" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" -) - -type readOnlyCallsSuite struct { -} - -var _ = gc.Suite(&readOnlyCallsSuite{}) - -func (*readOnlyCallsSuite) TestReadOnlyCallsExist(c *gc.C) { - // Iterate through the list of readOnlyCalls and make sure - // that the facades are reachable. - facades := common.Facades.List() - - maxVersion := map[string]int{} - for _, facade := range facades { - version := 0 - for _, ver := range facade.Versions { - if ver > version { - version = ver - } - } - maxVersion[facade.Name] = version - } - - for _, name := range readOnlyCalls.Values() { - parts := strings.Split(name, ".") - facade, method := parts[0], parts[1] - version := maxVersion[facade] - - _, _, err := lookupMethod(facade, version, method) - c.Check(err, jc.ErrorIsNil) - } -} - -func (*readOnlyCallsSuite) TestReadOnlyCall(c *gc.C) { - for _, test := range []struct { - facade string - method string - }{ - {"Action", "Actions"}, - {"Client", "FullStatus"}, - {"Application", "Get"}, - {"Storage", "ListStorageDetails"}, - } { - c.Logf("check %s.%s", test.facade, test.method) - c.Check(isCallReadOnly(test.facade, test.method), jc.IsTrue) - } -} - -func (*readOnlyCallsSuite) TestWritableCalls(c *gc.C) { - for _, test := range []struct { - facade string - method string - }{ - {"Client", "UnknownMethod"}, - {"Application", "Deploy"}, - {"UnknownFacade", "List"}, - } { - c.Logf("check %s.%s", test.facade, test.method) - c.Check(isCallReadOnly(test.facade, test.method), jc.IsFalse) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/registration.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/registration.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/registration.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/registration.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,11 +9,11 @@ "io/ioutil" "net/http" - "gopkg.in/macaroon.v1" - "github.com/juju/errors" "golang.org/x/crypto/nacl/secretbox" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" @@ -28,27 +28,54 @@ // used to complete a secure user registration process, and provide controller // login credentials. type registerUserHandler struct { - ctxt httpContext - createLocalLoginMacaroon func(names.UserTag) (*macaroon.Macaroon, error) + ctxt httpContext } // ServeHTTP implements the http.Handler interface. func (h *registerUserHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { if req.Method != "POST" { - sendError(w, errors.MethodNotAllowedf("unsupported method: %q", req.Method)) + err := sendError(w, errors.MethodNotAllowedf("unsupported method: %q", req.Method)) + if err != nil { + logger.Errorf("%v", err) + } return } st, err := h.ctxt.stateForRequestUnauthenticated(req) if err != nil { - sendError(w, err) + if err := sendError(w, err); err != nil { + logger.Errorf("%v", err) + } return } - response, err := h.processPost(req, st) + userTag, response, err := h.processPost(req, st) if err != nil { - sendError(w, err) + if err := sendError(w, err); err != nil { + logger.Errorf("%v", err) + } + return + } + + // Set a short-lived macaroon as a cookie on the response, + // which the client can use to obtain a discharge macaroon. + m, err := h.ctxt.srv.authCtxt.CreateLocalLoginMacaroon(userTag) + if err != nil { + if err := sendError(w, err); err != nil { + logger.Errorf("%v", err) + } return } - sendStatusAndJSON(w, http.StatusOK, response) + cookie, err := httpbakery.NewCookie(macaroon.Slice{m}) + if err != nil { + if err := sendError(w, err); err != nil { + logger.Errorf("%v", err) + } + return + } + http.SetCookie(w, cookie) + + if err := sendStatusAndJSON(w, http.StatusOK, response); err != nil { + logger.Errorf("%v", err) + } } // The client will POST to the "/register" endpoint with a JSON-encoded @@ -69,34 +96,40 @@ // NOTE(axw) it is important that the client and server choose their // own nonces, because reusing a nonce means that the key-stream can // be revealed. -func (h *registerUserHandler) processPost(req *http.Request, st *state.State) (*params.SecretKeyLoginResponse, error) { +func (h *registerUserHandler) processPost(req *http.Request, st *state.State) ( + names.UserTag, *params.SecretKeyLoginResponse, error, +) { + + failure := func(err error) (names.UserTag, *params.SecretKeyLoginResponse, error) { + return names.UserTag{}, nil, err + } data, err := ioutil.ReadAll(req.Body) if err != nil { - return nil, err + return failure(err) } var loginRequest params.SecretKeyLoginRequest if err := json.Unmarshal(data, &loginRequest); err != nil { - return nil, err + return failure(err) } // Basic validation: ensure that the request contains a valid user tag, // nonce, and ciphertext of the expected length. userTag, err := names.ParseUserTag(loginRequest.User) if err != nil { - return nil, err + return failure(err) } if len(loginRequest.Nonce) != secretboxNonceLength { - return nil, errors.NotValidf("nonce") + return failure(errors.NotValidf("nonce")) } // Decrypt the ciphertext with the user's secret key (if it has one). user, err := st.User(userTag) if err != nil { - return nil, err + return failure(err) } if len(user.SecretKey()) != secretboxKeyLength { - return nil, errors.NotFoundf("secret key for user %q", user.Name()) + return failure(errors.NotFoundf("secret key for user %q", user.Name())) } var key [secretboxKeyLength]byte var nonce [secretboxNonceLength]byte @@ -106,37 +139,37 @@ if !ok { // Cannot decrypt the ciphertext, which implies that the secret // key specified by the client is invalid. - return nil, errors.NotValidf("secret key") + return failure(errors.NotValidf("secret key")) } // Unmarshal the request payload, which contains the new password to // set for the user. var requestPayload params.SecretKeyLoginRequestPayload if err := json.Unmarshal(payloadBytes, &requestPayload); err != nil { - return nil, errors.Annotate(err, "cannot unmarshal payload") + return failure(errors.Annotate(err, "cannot unmarshal payload")) } if err := user.SetPassword(requestPayload.Password); err != nil { - return nil, errors.Annotate(err, "setting new password") + return failure(errors.Annotate(err, "setting new password")) } // Respond with the CA-cert and password, encrypted again with the // secret key. responsePayload, err := h.getSecretKeyLoginResponsePayload(st, userTag) if err != nil { - return nil, errors.Trace(err) + return failure(errors.Trace(err)) } payloadBytes, err = json.Marshal(responsePayload) if err != nil { - return nil, errors.Trace(err) + return failure(errors.Trace(err)) } if _, err := rand.Read(nonce[:]); err != nil { - return nil, errors.Trace(err) + return failure(errors.Trace(err)) } response := ¶ms.SecretKeyLoginResponse{ Nonce: nonce[:], PayloadCiphertext: secretbox.Seal(nil, payloadBytes, &nonce, &key), } - return response, nil + return userTag, response, nil } // getSecretKeyLoginResponsePayload returns the information required by the @@ -147,14 +180,9 @@ if !st.IsController() { return nil, errors.New("state is not for a controller") } - mac, err := h.createLocalLoginMacaroon(userTag) - if err != nil { - return nil, errors.Trace(err) - } payload := params.SecretKeyLoginResponsePayload{ CACert: st.CACert(), - ControllerUUID: st.ModelUUID(), - Macaroon: mac, + ControllerUUID: st.ControllerUUID(), } return &payload, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/registration_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/registration_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/registration_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/registration_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,14 +22,14 @@ ) type registrationSuite struct { - authHttpSuite + authHTTPSuite bob *state.User } var _ = gc.Suite(®istrationSuite{}) func (s *registrationSuite) SetUpTest(c *gc.C) { - s.authHttpSuite.SetUpTest(c) + s.authHTTPSuite.SetUpTest(c) bob, err := s.BackingState.AddUserWithSecretKey("bob", "", "admin") c.Assert(err, jc.ErrorIsNil) s.bob = bob diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restoring_root.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restoring_root.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restoring_root.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restoring_root.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,84 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver - -import ( - "github.com/juju/errors" - "github.com/juju/utils/set" - - "github.com/juju/juju/rpc" - "github.com/juju/juju/rpc/rpcreflect" -) - -var aboutToRestoreError = errors.New("juju restore is in progress - Juju functionality is limited to avoid data loss") -var restoreInProgressError = errors.New("juju restore is in progress - Juju api is off to prevent data loss") - -// aboutToRestoreRoot a root that will only allow a limited -// set of methods to run, defined in allowedMethodsAboutToRestore. -type aboutToRestoreRoot struct { - rpc.MethodFinder -} - -// restoreRoot a root that will not allow calls whatsoever during restore. -type restoreInProgressRoot struct { - rpc.MethodFinder -} - -// newAboutToRestoreRoot creates a root where all API calls -// but restore will fail with aboutToRestoreError. -func newAboutToRestoreRoot(finder rpc.MethodFinder) *aboutToRestoreRoot { - return &aboutToRestoreRoot{ - MethodFinder: finder, - } -} - -// newRestoreInProressRoot creates a root where all API calls -// but restore will fail with restoreInProgressError. -func newRestoreInProgressRoot(finder rpc.MethodFinder) *restoreInProgressRoot { - return &restoreInProgressRoot{ - MethodFinder: finder, - } -} - -// FindMethod extended srvRoot.FindMethod. It returns aboutToRestoreError -// for all API calls except Client.Restore -// for use while Juju is preparing to restore a backup. -func (r *aboutToRestoreRoot) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { - caller, err := r.MethodFinder.FindMethod(rootName, version, methodName) - if err != nil { - return nil, err - } - if !isMethodAllowedAboutToRestore(rootName, methodName) { - return nil, aboutToRestoreError - } - return caller, nil -} - -var allowedMethodsAboutToRestore = set.NewStrings( - "Client.FullStatus", // for "juju status" - "Client.ModelGet", // for "juju ssh" - "Client.PrivateAddress", // for "juju ssh" - "Client.PublicAddress", // for "juju ssh" - "Client.WatchDebugLog", // for "juju debug-log" - "Backups.Restore", // for "juju backups restore" - "Backups.FinishRestore", // for "juju backups restore" -) - -// isMethodAllowedAboutToRestore return true if this method is allowed when the server is in state.RestorePreparing mode -// at present only Backups.Restore is. -func isMethodAllowedAboutToRestore(rootName, methodName string) bool { - fullName := rootName + "." + methodName - return allowedMethodsAboutToRestore.Contains(fullName) -} - -// FindMethod extended srvRoot.FindMethod. It returns restoreInProgressError -// for all API calls. -// for use while Juju is restoring a backup. -func (r *restoreInProgressRoot) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { - _, err := r.MethodFinder.FindMethod(rootName, version, methodName) - if err != nil { - return nil, err - } - return nil, restoreInProgressError -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restoring_root_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restoring_root_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restoring_root_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restoring_root_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver_test - -import ( - _ "github.com/juju/testing/checkers" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver" - "github.com/juju/juju/testing" -) - -type restoreRootSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&restoreRootSuite{}) - -func (r *restoreRootSuite) TestFindAllowedMethodWhenPreparing(c *gc.C) { - root := apiserver.TestingAboutToRestoreRoot(nil) - - caller, err := root.FindMethod("Backups", 1, "Restore") - - c.Assert(err, jc.ErrorIsNil) - c.Assert(caller, gc.NotNil) -} - -func (r *restoreRootSuite) TestNothingAllowedMethodWhenPreparing(c *gc.C) { - root := apiserver.TestingRestoreInProgressRoot(nil) - - caller, err := root.FindMethod("Application", 1, "Deploy") - - c.Assert(err, gc.ErrorMatches, "juju restore is in progress - Juju api is off to prevent data loss") - c.Assert(caller, gc.IsNil) -} - -func (r *restoreRootSuite) TestFindDisallowedMethodWhenPreparing(c *gc.C) { - root := apiserver.TestingAboutToRestoreRoot(nil) - - caller, err := root.FindMethod("Application", 1, "Deploy") - - c.Assert(err, gc.ErrorMatches, "juju restore is in progress - Juju functionality is limited to avoid data loss") - c.Assert(caller, gc.IsNil) -} - -func (r *restoreRootSuite) TestFindDisallowedMethodWhenRestoring(c *gc.C) { - root := apiserver.TestingRestoreInProgressRoot(nil) - - caller, err := root.FindMethod("Application", 1, "Deploy") - - c.Assert(err, gc.ErrorMatches, "juju restore is in progress - Juju api is off to prevent data loss") - c.Assert(caller, gc.IsNil) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_controller.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_controller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_controller.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_controller.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,48 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/juju/utils/set" +) + +// The controllerFacadeNames are the root names that can be accessed +// using a controller-only login. Any facade added here needs to work +// independently of individual models. +var controllerFacadeNames = set.NewStrings( + "AllModelWatcher", + "Cloud", + "Controller", + "MigrationTarget", + "ModelManager", + "UserManager", +) + +// commonFacadeNames holds root names that can be accessed using both +// controller and model connections. +var commonFacadeNames = set.NewStrings( + "Pinger", + "Bundle", + + // TODO(mjs) - bug 1632172 - Exposed for model logins for + // backwards compatibility. Remove once we're sure no non-Juju + // clients care about it. + "HighAvailability", +) + +func controllerFacadesOnly(facadeName, _ string) error { + if !isControllerFacade(facadeName) { + return errors.NewNotSupported(nil, fmt.Sprintf("facade %q not supported for controller API connection", facadeName)) + } + return nil +} + +// isControllerFacade reports whether the given facade name can be accessed +// using the controller connection. +func isControllerFacade(facadeName string) bool { + return controllerFacadeNames.Contains(facadeName) || commonFacadeNames.Contains(facadeName) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_controller_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_controller_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_controller_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_controller_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,49 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver" + "github.com/juju/juju/rpc" + "github.com/juju/juju/testing" +) + +type restrictControllerSuite struct { + testing.BaseSuite + root rpc.Root +} + +var _ = gc.Suite(&restrictControllerSuite{}) + +func (s *restrictControllerSuite) SetUpSuite(c *gc.C) { + s.BaseSuite.SetUpSuite(c) + s.root = apiserver.TestingControllerOnlyRoot() +} + +func (s *restrictControllerSuite) TestAllowed(c *gc.C) { + s.assertMethod(c, "AllModelWatcher", 2, "Next") + s.assertMethod(c, "AllModelWatcher", 2, "Stop") + s.assertMethod(c, "ModelManager", 2, "CreateModel") + s.assertMethod(c, "ModelManager", 2, "ListModels") + s.assertMethod(c, "Pinger", 1, "Ping") + s.assertMethod(c, "Bundle", 1, "GetChanges") + s.assertMethod(c, "HighAvailability", 2, "EnableHA") +} + +func (s *restrictControllerSuite) TestNotAllowed(c *gc.C) { + caller, err := s.root.FindMethod("Client", 1, "FullStatus") + c.Assert(err, gc.ErrorMatches, `facade "Client" not supported for controller API connection`) + c.Assert(errors.IsNotSupported(err), jc.IsTrue) + c.Assert(caller, gc.IsNil) +} + +func (s *restrictControllerSuite) assertMethod(c *gc.C, facadeName string, version int, method string) { + caller, err := s.root.FindMethod(facadeName, version, method) + c.Check(err, jc.ErrorIsNil) + c.Check(caller, gc.NotNil) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restricted_root.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restricted_root.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restricted_root.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restricted_root.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,51 +4,36 @@ package apiserver import ( - "github.com/juju/errors" - "github.com/juju/utils/set" - "github.com/juju/juju/rpc" "github.com/juju/juju/rpc/rpcreflect" ) -// restrictedRoot restricts API calls to the environment manager and -// user manager when accessed through the root path on the API server. -type restrictedRoot struct { - rpc.MethodFinder -} - -// newRestrictedRoot returns a new restrictedRoot. -func newRestrictedRoot(finder rpc.MethodFinder) *restrictedRoot { +// restrictRoot wraps the provided root so that the check function is +// called on all method lookups. If the check returns an error the API +// call is blocked. +func restrictRoot(root rpc.Root, check func(string, string) error) *restrictedRoot { return &restrictedRoot{ - MethodFinder: finder, + Root: root, + check: check, } } -// The restrictedRootNames are the root names that can be accessed at the root -// of the API server. Any facade added here needs to work across environment -// boundaries. -var restrictedRootNames = set.NewStrings( - "AllModelWatcher", - "Controller", - "Cloud", - "MigrationTarget", - "ModelManager", - "UserManager", -) +type restrictedRoot struct { + rpc.Root + check func(facadeName, methodName string) error +} -// FindMethod returns a not supported error if the rootName is not one -// of the facades available at the server root when there is no active -// environment. -func (r *restrictedRoot) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { - // We restrict what facades are advertised at login, filtered on the restricted root names. - // Therefore we can't accurately know if a method is not found unless it resides on one - // of the restricted facades. - if !restrictedRootNames.Contains(rootName) { - return nil, errors.NotSupportedf("logged in to server, no model, %q", rootName) - } - caller, err := r.MethodFinder.FindMethod(rootName, version, methodName) - if err != nil { +// FindMethod implements rpc.Root. +func (r *restrictedRoot) FindMethod(facadeName string, version int, methodName string) (rpcreflect.MethodCaller, error) { + if err := r.check(facadeName, methodName); err != nil { return nil, err } - return caller, nil + return r.Root.FindMethod(facadeName, version, methodName) +} + +// restrictAll blocks all API requests, returned a fixed error. +func restrictAll(root rpc.Root, err error) *restrictedRoot { + return restrictRoot(root, func(string, string) error { + return err + }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restricted_root_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restricted_root_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restricted_root_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restricted_root_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,65 +15,45 @@ type restrictedRootSuite struct { testing.BaseSuite - - root rpc.MethodFinder + root rpc.Root } -var _ = gc.Suite(&restrictedRootSuite{}) - func (r *restrictedRootSuite) SetUpTest(c *gc.C) { r.BaseSuite.SetUpTest(c) - r.root = apiserver.TestingRestrictedApiHandler(nil) + r.root = apiserver.TestingRestrictedRoot(func(facade, method string) error { + if facade == "Client" && method == "FullStatus" { + return errors.New("blam") + } + return nil + }) } -func (r *restrictedRootSuite) assertMethodAllowed(c *gc.C, rootName string, version int, method string) { - caller, err := r.root.FindMethod(rootName, version, method) +func (r *restrictedRootSuite) TestAllowedMethod(c *gc.C) { + caller, err := r.root.FindMethod("Client", 1, "WatchAll") c.Check(err, jc.ErrorIsNil) c.Check(caller, gc.NotNil) } -func (r *restrictedRootSuite) TestFindAllowedMethod(c *gc.C) { - r.assertMethodAllowed(c, "AllModelWatcher", 2, "Next") - r.assertMethodAllowed(c, "AllModelWatcher", 2, "Stop") - - r.assertMethodAllowed(c, "ModelManager", 2, "CreateModel") - r.assertMethodAllowed(c, "ModelManager", 2, "ListModels") - - r.assertMethodAllowed(c, "UserManager", 1, "AddUser") - r.assertMethodAllowed(c, "UserManager", 1, "SetPassword") - r.assertMethodAllowed(c, "UserManager", 1, "UserInfo") - - r.assertMethodAllowed(c, "Controller", 3, "AllModels") - r.assertMethodAllowed(c, "Controller", 3, "DestroyController") - r.assertMethodAllowed(c, "Controller", 3, "ModelConfig") - r.assertMethodAllowed(c, "Controller", 3, "ListBlockedModels") -} - -func (r *restrictedRootSuite) TestFindDisallowedMethod(c *gc.C) { +func (r *restrictedRootSuite) TestDisallowedMethod(c *gc.C) { caller, err := r.root.FindMethod("Client", 1, "FullStatus") - - c.Assert(err, gc.ErrorMatches, `logged in to server, no model, "Client" not supported`) - c.Assert(errors.IsNotSupported(err), jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "blam") c.Assert(caller, gc.IsNil) } -func (r *restrictedRootSuite) TestNonExistentFacade(c *gc.C) { - caller, err := r.root.FindMethod("SomeFacade", 0, "Method") - - c.Assert(err, gc.ErrorMatches, `logged in to server, no model, "SomeFacade" not supported`) +func (r *restrictedRootSuite) TestMethodNonExistentVersion(c *gc.C) { + caller, err := r.root.FindMethod("Client", 99999999, "WatchAll") + c.Assert(err, gc.ErrorMatches, `unknown version .+`) c.Assert(caller, gc.IsNil) } -func (r *restrictedRootSuite) TestFindNonExistentMethod(c *gc.C) { - caller, err := r.root.FindMethod("ModelManager", 2, "Bar") - - c.Assert(err, gc.ErrorMatches, `no such request - method ModelManager\(2\).Bar is not implemented`) +func (r *restrictedRootSuite) TestNonExistentFacade(c *gc.C) { + caller, err := r.root.FindMethod("SomeFacade", 0, "Method") + c.Assert(err, gc.ErrorMatches, `unknown object type "SomeFacade"`) c.Assert(caller, gc.IsNil) } -func (r *restrictedRootSuite) TestFindMethodNonExistentVersion(c *gc.C) { - caller, err := r.root.FindMethod("UserManager", 99999999, "AddUser") - - c.Assert(err, gc.ErrorMatches, `unknown version \(99999999\) of interface "UserManager"`) +func (r *restrictedRootSuite) TestNonExistentMethod(c *gc.C) { + caller, err := r.root.FindMethod("Client", 1, "Bar") + c.Assert(err, gc.ErrorMatches, `no such request.+`) c.Assert(caller, gc.IsNil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_model.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_model.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_model.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_model.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,21 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "fmt" + + "github.com/juju/errors" +) + +func modelFacadesOnly(facadeName, _ string) error { + if !isModelFacade(facadeName) { + return errors.NewNotSupported(nil, fmt.Sprintf("facade %q not supported for model API connection", facadeName)) + } + return nil +} + +func isModelFacade(facadeName string) bool { + return !controllerFacadeNames.Contains(facadeName) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_model_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_model_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_model_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_model_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,45 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver" + "github.com/juju/juju/rpc" + "github.com/juju/juju/testing" +) + +type restrictModelSuite struct { + testing.BaseSuite + root rpc.Root +} + +var _ = gc.Suite(&restrictModelSuite{}) + +func (s *restrictModelSuite) SetUpSuite(c *gc.C) { + s.BaseSuite.SetUpSuite(c) + s.root = apiserver.TestingModelOnlyRoot() +} + +func (s *restrictModelSuite) TestAllowed(c *gc.C) { + s.assertMethod(c, "Client", 1, "FullStatus") + s.assertMethod(c, "Pinger", 1, "Ping") + s.assertMethod(c, "HighAvailability", 2, "EnableHA") +} + +func (s *restrictModelSuite) TestBlocked(c *gc.C) { + caller, err := s.root.FindMethod("ModelManager", 2, "ListModels") + c.Assert(err, gc.ErrorMatches, `facade "ModelManager" not supported for model API connection`) + c.Assert(errors.IsNotSupported(err), jc.IsTrue) + c.Assert(caller, gc.IsNil) +} + +func (s *restrictModelSuite) assertMethod(c *gc.C, facadeName string, version int, method string) { + caller, err := s.root.FindMethod(facadeName, version, method) + c.Check(err, jc.ErrorIsNil) + c.Check(caller, gc.NotNil) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_restore.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_restore.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_restore.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_restore.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,35 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "github.com/juju/errors" + "github.com/juju/utils/set" +) + +var aboutToRestoreError = errors.New("juju restore is in progress - functionality is limited to avoid data loss") +var restoreInProgressError = errors.New("juju restore is in progress - API is disabled to prevent data loss") + +// aboutToRestoreMethodsOnly can be used with restrictRoot to restrict +// the API to the methods allowed when the server is in +// state.RestorePreparing mode. +func aboutToRestoreMethodsOnly(facadeName string, methodName string) error { + fullName := facadeName + "." + methodName + if !allowedMethodsAboutToRestore.Contains(fullName) { + return aboutToRestoreError + } + return nil +} + +var allowedMethodsAboutToRestore = set.NewStrings( + "Client.FullStatus", // for "juju status" + "Client.ModelGet", // for "juju ssh" + "Client.PrivateAddress", // for "juju ssh" + "Client.PublicAddress", // for "juju ssh" + "Client.WatchDebugLog", // for "juju debug-log" + "Backups.Restore", // for "juju backups restore" + "Backups.FinishRestore", // for "juju backups restore" + "Bundle.GetChanges", // for retrieving bundle changes + "Pinger.Ping", // for connection health checks +) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_restore_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_restore_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_restore_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_restore_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,33 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver_test + +import ( + _ "github.com/juju/testing/checkers" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver" + "github.com/juju/juju/testing" +) + +type restrictRestoreSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&restrictRestoreSuite{}) + +func (r *restrictRestoreSuite) TestAllowed(c *gc.C) { + root := apiserver.TestingAboutToRestoreRoot() + caller, err := root.FindMethod("Backups", 1, "Restore") + c.Assert(err, jc.ErrorIsNil) + c.Assert(caller, gc.NotNil) +} + +func (r *restrictRestoreSuite) TestNotAllowed(c *gc.C) { + root := apiserver.TestingAboutToRestoreRoot() + caller, err := root.FindMethod("Application", 1, "Deploy") + c.Assert(err, gc.ErrorMatches, "juju restore is in progress - functionality is limited to avoid data loss") + c.Assert(caller, gc.IsNil) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_upgrades.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_upgrades.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_upgrades.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_upgrades.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,52 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "github.com/juju/utils/set" + + "github.com/juju/juju/apiserver/params" +) + +func upgradeMethodsOnly(facadeName, methodName string) error { + if !IsMethodAllowedDuringUpgrade(facadeName, methodName) { + return params.UpgradeInProgressError + } + return nil +} + +func IsMethodAllowedDuringUpgrade(facadeName, methodName string) bool { + methods, ok := allowedMethodsDuringUpgrades[facadeName] + if !ok { + return false + } + return methods.Contains(methodName) +} + +// allowedMethodsDuringUpgrades stores api calls +// that are not blocked during the upgrade process +// as well as their respective facade names. +// When needed, at some future point, this solution +// will need to be adjusted to cater for different +// facade versions as well. +var allowedMethodsDuringUpgrades = map[string]set.Strings{ + "Client": set.NewStrings( + "FullStatus", // for "juju status" + "FindTools", // for "juju upgrade-juju", before we can reset upgrade to re-run + "AbortCurrentUpgrade", // for "juju upgrade-juju", so that we can reset upgrade to re-run + + ), + "SSHClient": set.NewStrings( // allow all SSH client related calls + "PublicAddress", + "PrivateAddress", + "PublicKeys", + "Proxy", + ), + "Pinger": set.NewStrings( + "Ping", + ), + "Backups": set.NewStrings( + "FinishRestore", + ), +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_upgrades_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_upgrades_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/restrict_upgrades_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/restrict_upgrades_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,41 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/testing" +) + +type restrictUpgradesSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&restrictUpgradesSuite{}) + +func (r *restrictUpgradesSuite) TestAllowedMethods(c *gc.C) { + root := apiserver.TestingUpgradingRoot(nil) + checkAllowed := func(facade, method string) { + caller, err := root.FindMethod(facade, 1, method) + c.Check(err, jc.ErrorIsNil) + c.Check(caller, gc.NotNil) + } + checkAllowed("Client", "FullStatus") + checkAllowed("Client", "AbortCurrentUpgrade") + checkAllowed("SSHClient", "PublicAddress") + checkAllowed("SSHClient", "Proxy") + checkAllowed("Pinger", "Ping") +} + +func (r *restrictUpgradesSuite) TestFindDisallowedMethod(c *gc.C) { + root := apiserver.TestingUpgradingRoot(nil) + caller, err := root.FindMethod("Client", 1, "ModelSet") + c.Assert(errors.Cause(err), gc.Equals, params.UpgradeInProgressError) + c.Assert(caller, gc.IsNil) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/root.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/root.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/root.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/root.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,7 +14,7 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" "github.com/juju/juju/rpc" "github.com/juju/juju/rpc/rpcreflect" "github.com/juju/juju/state" @@ -41,29 +41,35 @@ } // apiHandler represents a single client's connection to the state -// after it has logged in. It contains an rpc.MethodFinder which it -// uses to dispatch Api calls appropriately. +// after it has logged in. It contains an rpc.Root which it +// uses to dispatch API calls appropriately. type apiHandler struct { state *state.State rpcConn *rpc.Conn resources *common.Resources entity state.Entity + // An empty modelUUID means that the user has logged in through the // root of the API server rather than the /model/:model-uuid/api // path, logins processed with v2 or later will only offer the // user manager and model manager api endpoints from here. modelUUID string + + // serverHost is the host:port of the API server that the client + // connected to. + serverHost string } var _ = (*apiHandler)(nil) -// newApiHandler returns a new apiHandler. -func newApiHandler(srv *Server, st *state.State, rpcConn *rpc.Conn, modelUUID string) (*apiHandler, error) { +// newAPIHandler returns a new apiHandler. +func newAPIHandler(srv *Server, st *state.State, rpcConn *rpc.Conn, modelUUID string, serverHost string) (*apiHandler, error) { r := &apiHandler{ - state: st, - resources: common.NewResources(), - rpcConn: rpcConn, - modelUUID: modelUUID, + state: st, + resources: common.NewResources(), + rpcConn: rpcConn, + modelUUID: modelUUID, + serverHost: serverHost, } if err := r.resources.RegisterNamed("machineID", common.StringResource(srv.tag.Id())); err != nil { return nil, errors.Trace(err) @@ -74,11 +80,6 @@ if err := r.resources.RegisterNamed("logDir", common.StringResource(srv.logDir)); err != nil { return nil, errors.Trace(err) } - if err := r.resources.RegisterNamed("createLocalLoginMacaroon", common.ValueResource{ - srv.authCtxt.userAuth.CreateLocalLoginMacaroon, - }); err != nil { - return nil, errors.Trace(err) - } return r, nil } @@ -137,8 +138,8 @@ objectCache map[objectKey]reflect.Value } -// newApiRoot returns a new apiRoot. -func newApiRoot(st *state.State, resources *common.Resources, authorizer facade.Authorizer) *apiRoot { +// newAPIRoot returns a new apiRoot. +func newAPIRoot(st *state.State, resources *common.Resources, authorizer facade.Authorizer) *apiRoot { r := &apiRoot{ state: st, resources: resources, @@ -280,14 +281,14 @@ // which has not logged in. type anonRoot struct { *apiHandler - adminApis map[int]interface{} + adminAPIs map[int]interface{} } // NewAnonRoot creates a new AnonRoot which dispatches to the given Admin API implementation. -func newAnonRoot(h *apiHandler, adminApis map[int]interface{}) *anonRoot { +func newAnonRoot(h *apiHandler, adminAPIs map[int]interface{}) *anonRoot { r := &anonRoot{ apiHandler: h, - adminApis: adminApis, + adminAPIs: adminAPIs, } return r } @@ -299,7 +300,7 @@ Version: version, } } - if api, ok := r.adminApis[version]; ok { + if api, ok := r.adminAPIs[version]; ok { return rpcreflect.ValueOf(reflect.ValueOf(api)).FindMethod(rootName, 0, methodName) } return nil, &rpc.RequestError{ @@ -344,47 +345,27 @@ return r.entity.Tag() } +// ConnectedModel returns the UUID of the model authenticated +// against. It's possible for it to be empty if the login was made +// directly to the root of the API instead of a model endpoint, but +// that method is deprecated. +func (r *apiHandler) ConnectedModel() string { + return r.modelUUID +} + // GetAuthEntity returns the authenticated entity. func (r *apiHandler) GetAuthEntity() state.Entity { return r.entity } // HasPermission returns true if the logged in user can perform on . -func (r *apiHandler) HasPermission(operation description.Access, target names.Tag) (bool, error) { - return hasPermission(r.state.UserAccess, r.entity.Tag(), operation, target) +func (r *apiHandler) HasPermission(operation permission.Access, target names.Tag) (bool, error) { + return common.HasPermission(r.state.UserAccess, r.entity.Tag(), operation, target) } -type userAccessFunc func(names.UserTag, names.Tag) (description.UserAccess, error) - -func hasPermission(userGetter userAccessFunc, entity names.Tag, - operation description.Access, target names.Tag) (bool, error) { - validForKind := false - - switch operation { - case description.LoginAccess, description.AddModelAccess, description.SuperuserAccess: - validForKind = target.Kind() == names.ControllerTagKind - case description.ReadAccess, description.WriteAccess, description.AdminAccess: - validForKind = target.Kind() == names.ModelTagKind - } - if !validForKind { - return false, nil - } - - userTag, ok := entity.(names.UserTag) - if !ok { - return false, errors.NotValidf("obtaining permission for subject kind %q", entity.Kind()) - } - - user, err := userGetter(userTag, target) - if err != nil { - return false, errors.Annotatef(err, "while obtaining %s user", target.Kind()) - } - modelPermission := user.Access.EqualOrGreaterModelAccessThan(operation) && target.Kind() == names.ModelTagKind - controllerPermission := user.Access.EqualOrGreaterControllerAccessThan(operation) && target.Kind() == names.ControllerTagKind - if !controllerPermission && !modelPermission { - return false, nil - } - return true, nil +// UserHasPermission returns true if the passed in user can perform on . +func (r *apiHandler) UserHasPermission(user names.UserTag, operation permission.Access, target names.Tag) (bool, error) { + return common.HasPermission(r.state.UserAccess, user, operation, target) } // DescribeFacades returns the list of available Facades and their Versions diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/root_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/root_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/root_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/root_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "sync" "time" - "github.com/juju/errors" + jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" @@ -17,7 +17,6 @@ "github.com/juju/juju/apiserver" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" - "github.com/juju/juju/core/description" "github.com/juju/juju/rpc/rpcreflect" "github.com/juju/juju/state" "github.com/juju/juju/testing" @@ -34,7 +33,7 @@ action := func() { close(triggered) } - clock := testing.NewClock(time.Now()) + clock := jujutesting.NewClock(time.Now()) timeout := apiserver.NewPingTimeout(action, clock, 50*time.Millisecond) for i := 0; i < 2; i++ { waitAlarm(c, clock) @@ -63,7 +62,7 @@ action := func() { close(triggered) } - clock := testing.NewClock(time.Now()) + clock := jujutesting.NewClock(time.Now()) timeout := apiserver.NewPingTimeout(action, clock, 20*time.Millisecond) waitAlarm(c, clock) @@ -78,7 +77,7 @@ } } -func waitAlarm(c *gc.C, clock *testing.Clock) { +func waitAlarm(c *gc.C, clock *jujutesting.Clock) { select { case <-time.After(testing.LongWait): c.Fatalf("alarm never set") @@ -119,7 +118,7 @@ var _ = gc.Suite(&rootSuite{}) func (r *rootSuite) TestFindMethodUnknownFacade(c *gc.C) { - root := apiserver.TestingApiRoot(nil) + root := apiserver.TestingAPIRoot(nil) caller, err := root.FindMethod("unknown-testing-facade", 0, "Method") c.Check(caller, gc.IsNil) c.Check(err, gc.FitsTypeOf, (*rpcreflect.CallNotImplementedError)(nil)) @@ -127,7 +126,7 @@ } func (r *rootSuite) TestFindMethodUnknownVersion(c *gc.C) { - srvRoot := apiserver.TestingApiRoot(nil) + srvRoot := apiserver.TestingAPIRoot(nil) defer common.Facades.Discard("my-testing-facade", 0) myGoodFacade := func( *state.State, facade.Resources, facade.Authorizer, @@ -144,7 +143,7 @@ } func (r *rootSuite) TestFindMethodEnsuresTypeMatch(c *gc.C) { - srvRoot := apiserver.TestingApiRoot(nil) + srvRoot := apiserver.TestingAPIRoot(nil) defer common.Facades.Discard("my-testing-facade", 0) defer common.Facades.Discard("my-testing-facade", 1) defer common.Facades.Discard("my-testing-facade", 2) @@ -207,7 +206,7 @@ } func (r *rootSuite) TestFindMethodCachesFacades(c *gc.C) { - srvRoot := apiserver.TestingApiRoot(nil) + srvRoot := apiserver.TestingAPIRoot(nil) defer common.Facades.Discard("my-counting-facade", 0) defer common.Facades.Discard("my-counting-facade", 1) var count int64 @@ -243,7 +242,7 @@ } func (r *rootSuite) TestFindMethodCachesFacadesWithId(c *gc.C) { - srvRoot := apiserver.TestingApiRoot(nil) + srvRoot := apiserver.TestingAPIRoot(nil) defer common.Facades.Discard("my-counting-facade", 0) var count int64 // like newCounter, but also tracks the "id" that was requested for @@ -275,7 +274,7 @@ } func (r *rootSuite) TestFindMethodCacheRaceSafe(c *gc.C) { - srvRoot := apiserver.TestingApiRoot(nil) + srvRoot := apiserver.TestingAPIRoot(nil) defer common.Facades.Discard("my-counting-facade", 0) var count int64 newIdCounter := func(context facade.Context) (facade.Facade, error) { @@ -326,7 +325,7 @@ } func (r *rootSuite) TestFindMethodHandlesInterfaceTypes(c *gc.C) { - srvRoot := apiserver.TestingApiRoot(nil) + srvRoot := apiserver.TestingAPIRoot(nil) defer common.Facades.Discard("my-interface-facade", 0) defer common.Facades.Discard("my-interface-facade", 1) common.RegisterStandardFacade("my-interface-facade", 0, func( @@ -388,7 +387,7 @@ entity := &stubStateEntity{tag} - apiHandler := apiserver.ApiHandlerWithEntity(entity) + apiHandler := apiserver.APIHandlerWithEntity(entity) authorized := apiHandler.AuthOwner(tag) c.Check(authorized, jc.IsTrue) @@ -402,111 +401,3 @@ c.Check(authorized, jc.IsFalse) } - -type fakeUserAccess struct { - subjects []names.UserTag - objects []names.Tag - user description.UserAccess - err error -} - -func (f *fakeUserAccess) call(subject names.UserTag, object names.Tag) (description.UserAccess, error) { - f.subjects = append(f.subjects, subject) - f.objects = append(f.objects, object) - return f.user, f.err -} - -func (r *rootSuite) TestNoUserTagLacksPermission(c *gc.C) { - nonUser := names.NewModelTag("beef1beef1-0000-0000-000011112222") - target := names.NewModelTag("beef1beef2-0000-0000-000011112222") - hasPermission, err := apiserver.HasPermission((&fakeUserAccess{}).call, nonUser, description.ReadAccess, target) - c.Assert(hasPermission, jc.IsFalse) - c.Assert(err, gc.ErrorMatches, "obtaining permission for subject kind \"model\" not valid") -} - -func (r *rootSuite) TestHasPermission(c *gc.C) { - testCases := []struct { - title string - userGetterAccess description.Access - user names.UserTag - target names.Tag - access description.Access - expected bool - }{ - { - title: "user has lesser permissions than required", - userGetterAccess: description.ReadAccess, - user: names.NewUserTag("validuser"), - target: names.NewModelTag("beef1beef2-0000-0000-000011112222"), - access: description.WriteAccess, - expected: false, - }, - { - title: "user has equal permission than required", - userGetterAccess: description.WriteAccess, - user: names.NewUserTag("validuser"), - target: names.NewModelTag("beef1beef2-0000-0000-000011112222"), - access: description.WriteAccess, - expected: true, - }, - { - title: "user has greater permission than required", - userGetterAccess: description.AdminAccess, - user: names.NewUserTag("validuser"), - target: names.NewModelTag("beef1beef2-0000-0000-000011112222"), - access: description.WriteAccess, - expected: true, - }, - { - title: "user requests model permission on controller", - userGetterAccess: description.AdminAccess, - user: names.NewUserTag("validuser"), - target: names.NewModelTag("beef1beef2-0000-0000-000011112222"), - access: description.AddModelAccess, - expected: false, - }, - { - title: "user requests controller permission on model", - userGetterAccess: description.AdminAccess, - user: names.NewUserTag("validuser"), - target: names.NewControllerTag("beef1beef2-0000-0000-000011112222"), - access: description.AdminAccess, // notice user has this permission for model. - expected: false, - }, - { - title: "controller permissions also work", - userGetterAccess: description.AddModelAccess, - user: names.NewUserTag("validuser"), - target: names.NewControllerTag("beef1beef2-0000-0000-000011112222"), - access: description.AddModelAccess, - expected: true, - }, - } - for i, t := range testCases { - userGetter := &fakeUserAccess{ - user: description.UserAccess{ - Access: t.userGetterAccess, - }} - c.Logf("HasPermission test n %d: %s", i, t.title) - hasPermission, err := apiserver.HasPermission(userGetter.call, t.user, t.access, t.target) - c.Assert(hasPermission, gc.Equals, t.expected) - c.Assert(err, jc.ErrorIsNil) - } - -} - -func (r *rootSuite) TestUserGetterErrorReturns(c *gc.C) { - user := names.NewUserTag("validuser") - target := names.NewModelTag("beef1beef2-0000-0000-000011112222") - userGetter := &fakeUserAccess{ - user: description.UserAccess{}, - err: errors.NotFoundf("a user"), - } - hasPermission, err := apiserver.HasPermission(userGetter.call, user, description.ReadAccess, target) - c.Assert(hasPermission, jc.IsFalse) - c.Assert(err, gc.ErrorMatches, "while obtaining model user: a user not found") - c.Assert(userGetter.subjects, gc.HasLen, 1) - c.Assert(userGetter.subjects[0], gc.DeepEquals, user) - c.Assert(userGetter.objects, gc.HasLen, 1) - c.Assert(userGetter.objects[0], gc.DeepEquals, target) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/server_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/server_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/server_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/server_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,6 +15,7 @@ "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/clock" "golang.org/x/net/websocket" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" @@ -31,14 +32,15 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cert" "github.com/juju/juju/controller" - "github.com/juju/juju/core/description" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/mongo" "github.com/juju/juju/network" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/state/presence" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" + "github.com/juju/juju/worker/workertest" ) var fastDialOpts = api.DialOpts{} @@ -52,8 +54,8 @@ func (s *serverSuite) TestStop(c *gc.C) { // Start our own instance of the server so we have // a handle on it to stop it. - srv := newServer(c, s.State) - defer srv.Stop() + _, srv := newServer(c, s.State) + defer assertStop(c, srv) machine, password := s.Factory.MakeMachineReturningPassword( c, &factory.MachineParams{Nonce: "fake_nonce"}) @@ -97,8 +99,8 @@ // Start our own instance of the server listening on // both IPv4 and IPv6 localhost addresses and an ephemeral port. - srv := newServer(c, s.State) - defer srv.Stop() + _, srv := newServer(c, s.State) + defer assertStop(c, srv) port := srv.Addr().Port portString := fmt.Sprintf("%d", port) @@ -196,7 +198,7 @@ Timeout: 5 * time.Second, SocketTimeout: 5 * time.Second, } - st, err := state.Open(s.State.ModelTag(), mongoInfo, dialOpts, nil) + st, err := state.Open(s.State.ModelTag(), s.State.ControllerTag(), mongoInfo, dialOpts, nil) c.Assert(err, gc.IsNil) defer st.Close() @@ -207,7 +209,7 @@ // Creating the server should succeed because it doesn't // access the state (note that newServer does not log in, // which *would* access the state). - srv := newServer(c, st) + _, srv := newServer(c, st) srv.Stop() } @@ -278,8 +280,8 @@ func (s *serverSuite) TestMinTLSVersion(c *gc.C) { loggo.GetLogger("juju.apiserver").SetLogLevel(loggo.TRACE) - srv := newServer(c, s.State) - defer srv.Stop() + _, srv := newServer(c, s.State) + defer assertStop(c, srv) // We have to use 'localhost' because that is what the TLS cert says. addr := fmt.Sprintf("localhost:%d", srv.Addr().Port) @@ -291,18 +293,25 @@ } func (s *serverSuite) TestNonCompatiblePathsAre404(c *gc.C) { - // we expose the API at '/' for compatibility, and at '/ModelUUID/api' - // for the correct location, but other Paths should fail. + // We expose the API at '/api', '/' (controller-only), and at '/ModelUUID/api' + // for the correct location, but other paths should fail. loggo.GetLogger("juju.apiserver").SetLogLevel(loggo.TRACE) - srv := newServer(c, s.State) - defer srv.Stop() + _, srv := newServer(c, s.State) + defer assertStop(c, srv) // We have to use 'localhost' because that is what the TLS cert says. addr := fmt.Sprintf("localhost:%d", srv.Addr().Port) - // '/' should be fine - conn, err := dialWebsocket(c, addr, "/", 0) + + // '/api' should be fine + conn, err := dialWebsocket(c, addr, "/api", 0) + c.Assert(err, jc.ErrorIsNil) + conn.Close() + + // '/`' should be fine + conn, err = dialWebsocket(c, addr, "/", 0) c.Assert(err, jc.ErrorIsNil) conn.Close() + // '/model/MODELUUID/api' should be fine conn, err = dialWebsocket(c, addr, "/model/dead-beef-123456/api", 0) c.Assert(err, jc.ErrorIsNil) @@ -318,8 +327,8 @@ } func (s *serverSuite) TestNoBakeryWhenNoIdentityURL(c *gc.C) { - srv := newServer(c, s.State) - defer srv.Stop() + _, srv := newServer(c, s.State) + defer assertStop(c, srv) // By default, when there is no identity location, no // bakery service or macaroon is created. _, err := apiserver.ServerMacaroon(srv) @@ -349,8 +358,8 @@ } func (s *macaroonServerSuite) TestServerBakery(c *gc.C) { - srv := newServer(c, s.State) - defer srv.Stop() + _, srv := newServer(c, s.State) + defer assertStop(c, srv) m, err := apiserver.ServerMacaroon(srv) c.Assert(err, gc.IsNil) bsvc, err := apiserver.ServerBakeryService(srv) @@ -400,8 +409,8 @@ } func (s *macaroonServerWrongPublicKeySuite) TestDischargeFailsWithWrongPublicKey(c *gc.C) { - srv := newServer(c, s.State) - defer srv.Stop() + _, srv := newServer(c, s.State) + defer assertStop(c, srv) m, err := apiserver.ServerMacaroon(srv) c.Assert(err, gc.IsNil) m = m.Clone() @@ -440,65 +449,146 @@ c.Assert(err, jc.ErrorIsNil) cu, err := s.State.UserAccess(user, ctag) c.Assert(err, jc.ErrorIsNil) - c.Assert(cu.Access, gc.Equals, description.LoginAccess) + c.Assert(cu.Access, gc.Equals, permission.LoginAccess) return u, ctag } -func (s *serverSuite) TestApiHandlerHasPermissionLogin(c *gc.C) { +func (s *serverSuite) TestAPIHandlerHasPermissionLogin(c *gc.C) { u, ctag := s.bootstrapHasPermissionTest(c) - handler, _ := apiserver.TestingApiHandlerWithEntity(c, s.State, s.State, u) + handler, _ := apiserver.TestingAPIHandlerWithEntity(c, s.State, s.State, u) defer handler.Kill() - apiserver.AssertHasPermission(c, handler, description.LoginAccess, ctag, true) - apiserver.AssertHasPermission(c, handler, description.AddModelAccess, ctag, false) - apiserver.AssertHasPermission(c, handler, description.SuperuserAccess, ctag, false) + apiserver.AssertHasPermission(c, handler, permission.LoginAccess, ctag, true) + apiserver.AssertHasPermission(c, handler, permission.AddModelAccess, ctag, false) + apiserver.AssertHasPermission(c, handler, permission.SuperuserAccess, ctag, false) } -func (s *serverSuite) TestApiHandlerHasPermissionAdmodel(c *gc.C) { +func (s *serverSuite) TestAPIHandlerHasPermissionAdmodel(c *gc.C) { u, ctag := s.bootstrapHasPermissionTest(c) user := u.UserTag() - handler, _ := apiserver.TestingApiHandlerWithEntity(c, s.State, s.State, u) + handler, _ := apiserver.TestingAPIHandlerWithEntity(c, s.State, s.State, u) defer handler.Kill() - ua, err := s.State.SetUserAccess(user, ctag, description.AddModelAccess) + ua, err := s.State.SetUserAccess(user, ctag, permission.AddModelAccess) c.Assert(err, jc.ErrorIsNil) - c.Assert(ua.Access, gc.Equals, description.AddModelAccess) + c.Assert(ua.Access, gc.Equals, permission.AddModelAccess) - apiserver.AssertHasPermission(c, handler, description.LoginAccess, ctag, true) - apiserver.AssertHasPermission(c, handler, description.AddModelAccess, ctag, true) - apiserver.AssertHasPermission(c, handler, description.SuperuserAccess, ctag, false) + apiserver.AssertHasPermission(c, handler, permission.LoginAccess, ctag, true) + apiserver.AssertHasPermission(c, handler, permission.AddModelAccess, ctag, true) + apiserver.AssertHasPermission(c, handler, permission.SuperuserAccess, ctag, false) } -func (s *serverSuite) TestApiHandlerHasPermissionSuperUser(c *gc.C) { +func (s *serverSuite) TestAPIHandlerHasPermissionSuperUser(c *gc.C) { u, ctag := s.bootstrapHasPermissionTest(c) user := u.UserTag() - handler, _ := apiserver.TestingApiHandlerWithEntity(c, s.State, s.State, u) + handler, _ := apiserver.TestingAPIHandlerWithEntity(c, s.State, s.State, u) defer handler.Kill() - ua, err := s.State.SetUserAccess(user, ctag, description.SuperuserAccess) + ua, err := s.State.SetUserAccess(user, ctag, permission.SuperuserAccess) c.Assert(err, jc.ErrorIsNil) - c.Assert(ua.Access, gc.Equals, description.SuperuserAccess) + c.Assert(ua.Access, gc.Equals, permission.SuperuserAccess) + + apiserver.AssertHasPermission(c, handler, permission.LoginAccess, ctag, true) + apiserver.AssertHasPermission(c, handler, permission.AddModelAccess, ctag, true) + apiserver.AssertHasPermission(c, handler, permission.SuperuserAccess, ctag, true) +} - apiserver.AssertHasPermission(c, handler, description.LoginAccess, ctag, true) - apiserver.AssertHasPermission(c, handler, description.AddModelAccess, ctag, true) - apiserver.AssertHasPermission(c, handler, description.SuperuserAccess, ctag, true) +func (s *serverSuite) TestAPIHandlerTeardownInitialEnviron(c *gc.C) { + s.checkAPIHandlerTeardown(c, s.State, s.State) } -func (s *serverSuite) TestApiHandlerTeardownInitialEnviron(c *gc.C) { - s.checkApiHandlerTeardown(c, s.State, s.State) +func (s *serverSuite) TestAPIHandlerTeardownOtherEnviron(c *gc.C) { + otherState := s.Factory.MakeModel(c, nil) + defer otherState.Close() + s.checkAPIHandlerTeardown(c, s.State, otherState) } -func (s *serverSuite) TestApiHandlerTeardownOtherEnviron(c *gc.C) { +func (s *serverSuite) TestAPIHandlerConnectedModel(c *gc.C) { otherState := s.Factory.MakeModel(c, nil) defer otherState.Close() - s.checkApiHandlerTeardown(c, s.State, otherState) + handler, _ := apiserver.TestingAPIHandler(c, s.State, otherState) + c.Check(handler.ConnectedModel(), gc.Equals, otherState.ModelUUID()) +} + +func (s *serverSuite) TestClosesStateFromPool(c *gc.C) { + pool := state.NewStatePool(s.State) + cfg := defaultServerConfig(c) + cfg.StatePool = pool + _, server := newServerWithConfig(c, s.State, cfg) + defer assertStop(c, server) + + w := s.State.WatchModels() + defer workertest.CleanKill(c, w) + // Initial change. + assertChange(c, w) + + otherState := s.Factory.MakeModel(c, nil) + defer otherState.Close() + + s.State.StartSync() + // This ensures that the model exists for more than one of the + // time slices that the watcher uses for coalescing + // events. Without it the model appears and disappears quickly + // enough that it never generates a change from WatchModels. + // Many Bothans died to bring us this information. + assertChange(c, w) + + model, err := otherState.Model() + c.Assert(err, jc.ErrorIsNil) + + // Ensure the model's in the pool but not referenced. + st, err := pool.Get(otherState.ModelUUID()) + c.Assert(err, jc.ErrorIsNil) + err = pool.Release(otherState.ModelUUID()) + c.Assert(err, jc.ErrorIsNil) + + // Make a request for the model API to check it releases + // state back into the pool once the connection is closed. + addr := fmt.Sprintf("localhost:%d", server.Addr().Port) + conn, err := dialWebsocket(c, addr, fmt.Sprintf("/model/%s/api", st.ModelUUID()), 0) + c.Assert(err, jc.ErrorIsNil) + conn.Close() + + // When the model goes away the API server should ensure st gets closed. + err = model.Destroy() + c.Assert(err, jc.ErrorIsNil) + + s.State.StartSync() + assertStateBecomesClosed(c, st) +} + +func assertChange(c *gc.C, w state.StringsWatcher) { + select { + case <-w.Changes(): + return + case <-time.After(coretesting.LongWait): + c.Fatalf("no changes on watcher") + } +} + +func assertStateBecomesClosed(c *gc.C, st *state.State) { + // This is gross but I can't see any other way to check for + // closedness outside the state package. + checkModel := func() { + attempt := utils.AttemptStrategy{ + Total: coretesting.LongWait, + Delay: coretesting.ShortWait, + } + for a := attempt.Start(); a.Next(); { + // This will panic once the state is closed. + _, _ = st.Model() + } + // If we got here then st is still open. + st.Close() + } + c.Assert(checkModel, gc.PanicMatches, "Session already closed") } -func (s *serverSuite) checkApiHandlerTeardown(c *gc.C, srvSt, st *state.State) { - handler, resources := apiserver.TestingApiHandler(c, srvSt, st) +func (s *serverSuite) checkAPIHandlerTeardown(c *gc.C, srvSt, st *state.State) { + handler, resources := apiserver.TestingAPIHandler(c, srvSt, st) resource := new(fakeResource) resources.Register(resource) @@ -507,17 +597,50 @@ c.Assert(resource.stopped, jc.IsTrue) } -// newServer returns a new running API server. -func newServer(c *gc.C, st *state.State) *apiserver.Server { - listener, err := net.Listen("tcp", ":0") - c.Assert(err, jc.ErrorIsNil) - srv, err := apiserver.NewServer(st, listener, apiserver.ServerConfig{ - Cert: []byte(coretesting.ServerCert), - Key: []byte(coretesting.ServerKey), +// defaultServerConfig returns the default configuration for starting a test server. +func defaultServerConfig(c *gc.C) apiserver.ServerConfig { + return apiserver.ServerConfig{ + Clock: clock.WallClock, + Cert: coretesting.ServerCert, + Key: coretesting.ServerKey, Tag: names.NewMachineTag("0"), LogDir: c.MkDir(), NewObserver: func() observer.Observer { return &fakeobserver.Instance{} }, - }) + AutocertURL: "https://0.1.2.3/no-autocert-here", + } +} + +// newServer returns a new running API server using the given state. +// The pool may be nil, in which case a pool using the given state +// will be used. +// +// It returns information suitable for connecting to the state +// without any authentication information or model tag, and the server +// that's been started. +func newServer(c *gc.C, st *state.State) (*api.Info, *apiserver.Server) { + return newServerWithConfig(c, st, defaultServerConfig(c)) +} + +// newServerWithConfig is like newServer except that the entire +// server configuration may be specified (see defaultServerConfig +// for a suitable starting point). +func newServerWithConfig(c *gc.C, st *state.State, cfg apiserver.ServerConfig) (*api.Info, *apiserver.Server) { + // Note that we can't listen on localhost here because TestAPIServerCanListenOnBothIPv4AndIPv6 assumes + // that we listen on IPv6 too, and listening on localhost does not do that. + listener, err := net.Listen("tcp", ":0") c.Assert(err, jc.ErrorIsNil) - return srv + srv, err := apiserver.NewServer(st, listener, cfg) + c.Assert(err, jc.ErrorIsNil) + return &api.Info{ + Addrs: []string{fmt.Sprintf("localhost:%d", srv.Addr().Port)}, + CACert: coretesting.CACert, + }, srv +} + +type stopper interface { + Stop() error +} + +func assertStop(c *gc.C, stopper stopper) { + c.Assert(stopper.Stop(), gc.IsNil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/spaces/spaces.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/spaces/spaces.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/spaces/spaces.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/spaces/spaces.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,6 +10,7 @@ "github.com/juju/juju/apiserver/common/networkingcommon" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -53,11 +54,27 @@ // CreateSpaces creates a new Juju network space, associating the // specified subnets with it (optional; can be empty). func (api *spacesAPI) CreateSpaces(args params.CreateSpacesParams) (results params.ErrorResults, err error) { + isAdmin, err := api.authorizer.HasPermission(permission.AdminAccess, api.backing.ModelTag()) + if err != nil && !errors.IsNotFound(err) { + return results, errors.Trace(err) + } + if !isAdmin { + return results, common.ServerError(common.ErrPerm) + } + return networkingcommon.CreateSpaces(api.backing, args) } // ListSpaces lists all the available spaces and their associated subnets. func (api *spacesAPI) ListSpaces() (results params.ListSpacesResults, err error) { + canRead, err := api.authorizer.HasPermission(permission.ReadAccess, api.backing.ModelTag()) + if err != nil && !errors.IsNotFound(err) { + return results, errors.Trace(err) + } + if !canRead { + return results, common.ServerError(common.ErrPerm) + } + err = networkingcommon.SupportsSpaces(api.backing) if err != nil { return results, common.ServerError(errors.Trace(err)) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/sshclient/facade.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/sshclient/facade.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/sshclient/facade.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/sshclient/facade.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,10 +6,13 @@ package sshclient import ( + "github.com/juju/errors" + "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" + "github.com/juju/juju/permission" ) func init() { @@ -18,7 +21,8 @@ // Facade implements the API required by the sshclient worker. type Facade struct { - backend Backend + backend Backend + authorizer facade.Authorizer } // New returns a new API facade for the sshclient worker. @@ -26,12 +30,27 @@ if !authorizer.AuthClient() { return nil, common.ErrPerm } - return &Facade{backend: backend}, nil + return &Facade{backend: backend, authorizer: authorizer}, nil +} + +func (facade *Facade) checkIsModelAdmin() error { + isModelAdmin, err := facade.authorizer.HasPermission(permission.AdminAccess, facade.backend.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !isModelAdmin { + return common.ErrPerm + } + return nil } // PublicAddress reports the preferred public network address for one // or more entities. Machines and units are suppored. func (facade *Facade) PublicAddress(args params.Entities) (params.SSHAddressResults, error) { + if err := facade.checkIsModelAdmin(); err != nil { + return params.SSHAddressResults{}, errors.Trace(err) + } + getter := func(m SSHMachine) (network.Address, error) { return m.PublicAddress() } return facade.getAddresses(args, getter) } @@ -39,6 +58,10 @@ // PrivateAddress reports the preferred private network address for one or // more entities. Machines and units are supported. func (facade *Facade) PrivateAddress(args params.Entities) (params.SSHAddressResults, error) { + if err := facade.checkIsModelAdmin(); err != nil { + return params.SSHAddressResults{}, errors.Trace(err) + } + getter := func(m SSHMachine) (network.Address, error) { return m.PrivateAddress() } return facade.getAddresses(args, getter) } @@ -68,6 +91,10 @@ // PublicKeys returns the public SSH hosts for one or more // entities. Machines and units are supported. func (facade *Facade) PublicKeys(args params.Entities) (params.SSHPublicKeysResults, error) { + if err := facade.checkIsModelAdmin(); err != nil { + return params.SSHPublicKeysResults{}, errors.Trace(err) + } + out := params.SSHPublicKeysResults{ Results: make([]params.SSHPublicKeysResult, len(args.Entities)), } @@ -90,6 +117,9 @@ // Proxy returns whether SSH connections should be proxied through the // controller hosts for the model associated with the API connection. func (facade *Facade) Proxy() (params.SSHProxyResult, error) { + if err := facade.checkIsModelAdmin(); err != nil { + return params.SSHProxyResult{}, errors.Trace(err) + } config, err := facade.backend.ModelConfig() if err != nil { return params.SSHProxyResult{}, err diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/sshclient/facade_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/sshclient/facade_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/sshclient/facade_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/sshclient/facade_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -43,6 +43,7 @@ s.backend = new(mockBackend) s.authorizer = new(apiservertesting.FakeAuthorizer) s.authorizer.Tag = names.NewUserTag("igor") + s.authorizer.AdminTag = names.NewUserTag("igor") facade, err := sshclient.New(s.backend, nil, s.authorizer) c.Assert(err, jc.ErrorIsNil) s.facade = facade @@ -150,6 +151,10 @@ proxySSH bool } +func (backend *mockBackend) ModelTag() names.ModelTag { + return names.NewModelTag("deadbeef-2f18-4fd2-967d-db9663db7bea") +} + func (backend *mockBackend) ModelConfig() (*config.Config, error) { backend.stub.AddCall("ModelConfig") attrs := testing.FakeConfig() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/sshclient/shim.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/sshclient/shim.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/sshclient/shim.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/sshclient/shim.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,6 +18,7 @@ ModelConfig() (*config.Config, error) GetMachineForEntity(tag string) (SSHMachine, error) GetSSHHostKeys(names.MachineTag) (state.SSHHostKeys, error) + ModelTag() names.ModelTag } // SSHMachine specifies the methods on State.Machine of interest to diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/base_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/base_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/base_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/base_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -50,7 +50,7 @@ func (s *baseStorageSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.resources = common.NewResources() - s.authorizer = testing.FakeAuthorizer{names.NewUserTag("testuser"), true} + s.authorizer = testing.FakeAuthorizer{Tag: names.NewUserTag("admin"), EnvironManager: true} s.calls = []string{} s.state = s.constructState() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/mock_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/mock_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/mock_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/mock_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -50,6 +50,7 @@ watchVolumeAttachment func(names.MachineTag, names.VolumeTag) state.NotifyWatcher watchBlockDevices func(names.MachineTag) state.NotifyWatcher modelName string + modelTag names.ModelTag volume func(tag names.VolumeTag) (state.Volume, error) machineVolumeAttachments func(machine names.MachineTag) ([]state.VolumeAttachment, error) volumeAttachments func(volume names.VolumeTag) ([]state.VolumeAttachment, error) @@ -115,6 +116,10 @@ return st.modelName, nil } +func (st *mockState) ModelTag() names.ModelTag { + return st.modelTag +} + func (st *mockState) AllVolumes() ([]state.Volume, error) { return st.allVolumes() } @@ -204,7 +209,7 @@ } func (m *mockVolume) Status() (status.StatusInfo, error) { - return status.StatusInfo{Status: status.StatusAttached}, nil + return status.StatusInfo{Status: status.Attached}, nil } type mockFilesystem struct { @@ -241,7 +246,7 @@ } func (m *mockFilesystem) Status() (status.StatusInfo, error) { - return status.StatusInfo{Status: status.StatusAttached}, nil + return status.StatusInfo{Status: status.Attached}, nil } type mockFilesystemAttachment struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/poollist_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/poollist_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/poollist_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/poollist_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -255,7 +255,9 @@ func (s *poolSuite) registerProviders(c *gc.C) { common := provider.CommonStorageProviders() - for _, providerType := range common.StorageProviderTypes() { + providerTypes, err := common.StorageProviderTypes() + c.Assert(err, jc.ErrorIsNil) + for _, providerType := range providerTypes { p, err := common.StorageProvider(providerType) c.Assert(err, jc.ErrorIsNil) s.registry.Providers[providerType] = p diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/shim.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/shim.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/shim.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/shim.go 2016-10-13 14:31:49.000000000 +0000 @@ -81,6 +81,9 @@ // ModelName is required for pool functionality. ModelName() (string, error) + // ModelTag is required for model permission checking. + ModelTag() names.ModelTag + // AllVolumes is required for volume functionality. AllVolumes() ([]state.Volume, error) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/storage.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/storage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/storage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/storage.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,6 +14,7 @@ "github.com/juju/juju/apiserver/common/storagecommon" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/status" "github.com/juju/juju/storage" @@ -48,11 +49,35 @@ authorizer: authorizer, }, nil } +func (api *API) checkCanRead() error { + canRead, err := api.authorizer.HasPermission(permission.ReadAccess, api.storage.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canRead { + return common.ErrPerm + } + return nil +} + +func (api *API) checkCanWrite() error { + canWrite, err := api.authorizer.HasPermission(permission.WriteAccess, api.storage.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canWrite { + return common.ErrPerm + } + return nil +} // StorageDetails retrieves and returns detailed information about desired // storage identified by supplied tags. If specified storage cannot be // retrieved, individual error is returned instead of storage information. func (api *API) StorageDetails(entities params.Entities) (params.StorageDetailsResults, error) { + if err := api.checkCanWrite(); err != nil { + return params.StorageDetailsResults{}, errors.Trace(err) + } results := make([]params.StorageDetailsResult, len(entities.Entities)) for i, entity := range entities.Entities { storageTag, err := names.ParseStorageTag(entity.Tag) @@ -77,6 +102,9 @@ // ListStorageDetails returns storage matching a filter. func (api *API) ListStorageDetails(filters params.StorageFilters) (params.StorageDetailsListResults, error) { + if err := api.checkCanRead(); err != nil { + return params.StorageDetailsListResults{}, errors.Trace(err) + } results := params.StorageDetailsListResults{ Results: make([]params.StorageDetailsListResult, len(filters.Filters)), } @@ -204,6 +232,10 @@ func (a *API) ListPools( filters params.StoragePoolFilters, ) (params.StoragePoolsResults, error) { + if err := a.checkCanRead(); err != nil { + return params.StoragePoolsResults{}, errors.Trace(err) + } + results := params.StoragePoolsResults{ Results: make([]params.StoragePoolsResult, len(filters.Filters)), } @@ -220,13 +252,16 @@ func (a *API) listPools(filter params.StoragePoolFilter) ([]params.StoragePool, error) { if err := a.validatePoolListFilter(filter); err != nil { - return nil, err + return nil, errors.Trace(err) } pools, err := a.poolManager.List() if err != nil { - return nil, err + return nil, errors.Trace(err) + } + providers, err := a.registry.StorageProviderTypes() + if err != nil { + return nil, errors.Trace(err) } - providers := a.registry.StorageProviderTypes() matches := buildFilter(filter) results := append( filterPools(pools, matches), @@ -333,6 +368,9 @@ // an independent list of volumes, or an error if the filter is invalid // or the volumes could not be listed. func (a *API) ListVolumes(filters params.VolumeFilters) (params.VolumeDetailsListResults, error) { + if err := a.checkCanRead(); err != nil { + return params.VolumeDetailsListResults{}, errors.Trace(err) + } results := params.VolumeDetailsListResults{ Results: make([]params.VolumeDetailsListResult, len(filters.Filters)), } @@ -480,6 +518,10 @@ results := params.FilesystemDetailsListResults{ Results: make([]params.FilesystemDetailsListResult, len(filters.Filters)), } + if err := a.checkCanRead(); err != nil { + return results, errors.Trace(err) + } + for i, filter := range filters.Filters { filesystems, filesystemAttachments, err := filterFilesystems(a.storage, filter) if err != nil { @@ -627,6 +669,10 @@ // instances from being processed. // A "CHANGE" block can block this operation. func (a *API) AddToUnit(args params.StoragesAddParams) (params.ErrorResults, error) { + if err := a.checkCanWrite(); err != nil { + return params.ErrorResults{}, errors.Trace(err) + } + // Check if changes are allowed and the operation may proceed. blockChecker := common.NewBlockChecker(a.storage) if err := blockChecker.ChangeAllowed(); err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/storage_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/storage_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/storage_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/storage_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -84,7 +84,7 @@ c.Assert(found.Results[0].Result, gc.HasLen, 1) wantedDetails := s.createTestStorageDetails() wantedDetails.Kind = params.StorageKindBlock - wantedDetails.Status.Status = status.StatusAttached + wantedDetails.Status.Status = status.Attached c.Assert(found.Results[0].Result[0], jc.DeepEquals, wantedDetails) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/volumelist_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/volumelist_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storage/volumelist_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/storage/volumelist_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -151,7 +151,7 @@ } expected := s.expectedVolumeDetails() expected.Storage.Kind = params.StorageKindBlock - expected.Storage.Status.Status = status.StatusAttached + expected.Storage.Status.Status = status.Attached expected.MachineAttachments[s.machineTag.String()] = params.VolumeAttachmentInfo{ ReadOnly: true, } @@ -177,7 +177,7 @@ } expected := s.expectedVolumeDetails() expected.Storage.Kind = params.StorageKindBlock - expected.Storage.Status.Status = status.StatusAttached + expected.Storage.Status.Status = status.Attached storageAttachmentDetails := expected.Storage.Attachments["unit-mysql-0"] storageAttachmentDetails.Location = filepath.FromSlash("/dev/sdd") expected.Storage.Attachments["unit-mysql-0"] = storageAttachmentDetails diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storageprovisioner/storageprovisioner_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/storageprovisioner/storageprovisioner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/storageprovisioner/storageprovisioner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/storageprovisioner/storageprovisioner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -357,7 +357,7 @@ Size: 1024, Provider: "machinescoped", Tags: map[string]string{ - tags.JujuController: testing.ModelTag.Id(), + tags.JujuController: testing.ControllerTag.Id(), tags.JujuModel: testing.ModelTag.Id(), }, Attachment: ¶ms.VolumeAttachmentParams{ @@ -372,7 +372,7 @@ Size: 2048, Provider: "environscoped", Tags: map[string]string{ - tags.JujuController: testing.ModelTag.Id(), + tags.JujuController: testing.ControllerTag.Id(), tags.JujuModel: testing.ModelTag.Id(), }, Attachment: ¶ms.VolumeAttachmentParams{ @@ -387,7 +387,7 @@ Size: 4096, Provider: "environscoped", Tags: map[string]string{ - tags.JujuController: testing.ModelTag.Id(), + tags.JujuController: testing.ControllerTag.Id(), tags.JujuModel: testing.ModelTag.Id(), }, Attachment: ¶ms.VolumeAttachmentParams{ @@ -422,7 +422,7 @@ Size: 1024, Provider: "machinescoped", Tags: map[string]string{ - tags.JujuController: testing.ModelTag.Id(), + tags.JujuController: testing.ControllerTag.Id(), tags.JujuModel: testing.ModelTag.Id(), }, }}, @@ -431,7 +431,7 @@ Size: 2048, Provider: "environscoped", Tags: map[string]string{ - tags.JujuController: testing.ModelTag.Id(), + tags.JujuController: testing.ControllerTag.Id(), tags.JujuModel: testing.ModelTag.Id(), }, }}, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/subnets/subnets.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/subnets/subnets.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/subnets/subnets.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/subnets/subnets.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,7 @@ "github.com/juju/juju/apiserver/common/networkingcommon" "github.com/juju/juju/apiserver/facade" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -49,6 +50,28 @@ return newAPIWithBacking(networkingcommon.NewStateShim(st), res, auth) } +func (api *subnetsAPI) checkCanRead() error { + canRead, err := api.authorizer.HasPermission(permission.ReadAccess, api.backing.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canRead { + return common.ServerError(common.ErrPerm) + } + return nil +} + +func (api *subnetsAPI) checkCanWrite() error { + canWrite, err := api.authorizer.HasPermission(permission.WriteAccess, api.backing.ModelTag()) + if err != nil { + return errors.Trace(err) + } + if !canWrite { + return common.ServerError(common.ErrPerm) + } + return nil +} + // newAPIWithBacking creates a new server-side Subnets API facade with // a common.NetworkBacking func newAPIWithBacking(backing networkingcommon.NetworkBacking, resources facade.Resources, authorizer facade.Authorizer) (SubnetsAPI, error) { @@ -65,11 +88,18 @@ // AllZones is defined on the API interface. func (api *subnetsAPI) AllZones() (params.ZoneResults, error) { + if err := api.checkCanRead(); err != nil { + return params.ZoneResults{}, err + } return networkingcommon.AllZones(api.backing) } // AllSpaces is defined on the API interface. func (api *subnetsAPI) AllSpaces() (params.SpaceResults, error) { + if err := api.checkCanRead(); err != nil { + return params.SpaceResults{}, err + } + var results params.SpaceResults spaces, err := api.backing.AllSpaces() @@ -89,11 +119,18 @@ // AddSubnets is defined on the API interface. func (api *subnetsAPI) AddSubnets(args params.AddSubnetsParams) (params.ErrorResults, error) { + if err := api.checkCanWrite(); err != nil { + return params.ErrorResults{}, err + } return networkingcommon.AddSubnets(api.backing, args) } // ListSubnets lists all the available subnets or only those matching // all given optional filters. func (api *subnetsAPI) ListSubnets(args params.SubnetsFilters) (results params.ListSubnetsResults, err error) { + if err := api.checkCanRead(); err != nil { + return params.ListSubnetsResults{}, err + } + return networkingcommon.ListSubnets(api.backing, args) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/testing/fakeapi.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/testing/fakeapi.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/testing/fakeapi.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/testing/fakeapi.go 2016-10-13 14:31:49.000000000 +0000 @@ -82,7 +82,7 @@ root := allVersions{ rpcreflect.ValueOf(reflect.ValueOf(srv.newRoot(modelUUID))), } - conn.ServeFinder(root, nil) + conn.ServeRoot(root, nil) conn.Start() <-conn.Dead() conn.Close() @@ -91,9 +91,9 @@ // allVersions serves the same methods as would be served // by rpc.Conn.Serve except that the facade version is ignored. type allVersions struct { - x rpcreflect.Value + rpcreflect.Value } func (av allVersions) FindMethod(rootMethodName string, version int, objMethodName string) (rpcreflect.MethodCaller, error) { - return av.x.FindMethod(rootMethodName, 0, objMethodName) + return av.Value.FindMethod(rootMethodName, 0, objMethodName) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/testing/fakeapi_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/testing/fakeapi_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/testing/fakeapi_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/testing/fakeapi_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,9 +18,10 @@ testing.IsolationSuite } +const fakeUUID = "f47ac10b-58cc-dead-beef-0e02b2c3d479" + func (*fakeAPISuite) TestFakeAPI(c *gc.C) { var r root - fakeUUID := "dead-beef" srv := apiservertesting.NewAPIServer(func(modelUUID string) interface{} { c.Check(modelUUID, gc.Equals, fakeUUID) return &r @@ -29,7 +30,7 @@ info := &api.Info{ Addrs: srv.Addrs, CACert: jtesting.CACert, - ModelTag: names.NewModelTag("dead-beef"), + ModelTag: names.NewModelTag(fakeUUID), } _, err := api.Open(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) @@ -49,11 +50,11 @@ return facade{r}, nil } -func (f facade) Login(req params.LoginRequest) (params.LoginResultV1, error) { +func (f facade) Login(req params.LoginRequest) (params.LoginResult, error) { f.r.calledMethods = append(f.r.calledMethods, "Login") - return params.LoginResultV1{ - ModelTag: names.NewModelTag("dead-beef").String(), - ControllerTag: names.NewModelTag("dead-beef").String(), + return params.LoginResult{ + ModelTag: names.NewModelTag(fakeUUID).String(), + ControllerTag: names.NewControllerTag(fakeUUID).String(), UserInfo: ¶ms.AuthUserInfo{ DisplayName: "foo", Identity: "user-bar", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/testing/fakeauthorizer.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/testing/fakeauthorizer.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/testing/fakeauthorizer.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/testing/fakeauthorizer.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,13 +6,16 @@ import ( "gopkg.in/juju/names.v2" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" ) // FakeAuthorizer implements the facade.Authorizer interface. type FakeAuthorizer struct { Tag names.Tag EnvironManager bool + ModelUUID string + AdminTag names.UserTag + HasWriteTag names.UserTag } func (fa FakeAuthorizer) AuthOwner(tag names.Tag) bool { @@ -46,7 +49,45 @@ return fa.Tag } -func (fa FakeAuthorizer) HasPermission(operation description.Access, target names.Tag) (bool, error) { - // TODO(perrito666) provide a way to pre-set the desired result here. - return fa.Tag == target, nil +// HasPermission returns true if the logged in user is admin or has a name equal to +// the pre-set admin tag. +func (fa FakeAuthorizer) HasPermission(operation permission.Access, target names.Tag) (bool, error) { + if fa.Tag.Kind() == names.UserTagKind { + ut := fa.Tag.(names.UserTag) + if ut.Name() == "admin" { + return true, nil + } + emptyTag := names.UserTag{} + if fa.AdminTag != emptyTag && ut == fa.AdminTag { + return true, nil + } + if operation == permission.WriteAccess && ut == fa.HasWriteTag { + return true, nil + } + return false, nil + } + return true, nil +} + +// ConnectedModel returns the UUID of the model the current client is +// connected to. +func (fa FakeAuthorizer) ConnectedModel() string { + return fa.ModelUUID +} + +// HasPermission returns true if the passed user is admin or has a name equal to +// the pre-set admin tag. +func (fa FakeAuthorizer) UserHasPermission(user names.UserTag, operation permission.Access, target names.Tag) (bool, error) { + if user.Name() == "admin" { + return true, nil + } + emptyTag := names.UserTag{} + if fa.AdminTag != emptyTag && user == fa.AdminTag { + return true, nil + } + ut := fa.Tag.(names.UserTag) + if ut == user { + return true, nil + } + return false, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/testing/stub_network.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/testing/stub_network.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/testing/stub_network.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/testing/stub_network.go 2016-10-13 14:31:49.000000000 +0000 @@ -371,10 +371,11 @@ } sb.EnvConfig = coretesting.CustomModelConfig(c, extraAttrs) sb.Cloud = environs.CloudSpec{ - Type: StubProviderType, - Name: "cloud-name", - Endpoint: "endpoint", - StorageEndpoint: "storage-endpoint", + Type: StubProviderType, + Name: "cloud-name", + Endpoint: "endpoint", + IdentityEndpoint: "identity-endpoint", + StorageEndpoint: "storage-endpoint", } sb.Zones = []providercommon.AvailabilityZone{} if withZones { @@ -434,6 +435,10 @@ return sb.EnvConfig, nil } +func (sb *StubBacking) ModelTag() names.ModelTag { + return names.NewModelTag("dbeef-2f18-4fd2-967d-db9663db7bea") +} + func (sb *StubBacking) CloudSpec(names.ModelTag) (environs.CloudSpec, error) { sb.MethodCall(sb, "CloudSpec") if err := sb.NextErr(); err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/tools.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/tools.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/tools.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/tools.go 2016-10-13 14:31:49.000000000 +0000 @@ -40,7 +40,9 @@ func (h *toolsDownloadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := h.ctxt.stateForRequestUnauthenticated(r) if err != nil { - sendError(w, err) + if err := sendError(w, err); err != nil { + logger.Errorf("%v", err) + } return } @@ -49,12 +51,18 @@ tarball, err := h.processGet(r, st) if err != nil { logger.Errorf("GET(%s) failed: %v", r.URL, err) - sendError(w, errors.NewBadRequest(err, "")) + if err := sendError(w, errors.NewBadRequest(err, "")); err != nil { + logger.Errorf("%v", err) + } return } - h.sendTools(w, http.StatusOK, tarball) + if err := h.sendTools(w, http.StatusOK, tarball); err != nil { + logger.Errorf("%v", err) + } default: - sendError(w, errors.MethodNotAllowedf("unsupported method: %q", r.Method)) + if err := sendError(w, errors.MethodNotAllowedf("unsupported method: %q", r.Method)); err != nil { + logger.Errorf("%v", err) + } } } @@ -63,7 +71,9 @@ // on the state connection that is determined during the validation. st, _, err := h.ctxt.stateForRequestAuthenticatedUser(r) if err != nil { - sendError(w, err) + if err := sendError(w, err); err != nil { + logger.Errorf("%v", err) + } return } @@ -72,14 +82,20 @@ // Add tools to storage. agentTools, err := h.processPost(r, st) if err != nil { - sendError(w, err) + if err := sendError(w, err); err != nil { + logger.Errorf("%v", err) + } return } - sendStatusAndJSON(w, http.StatusOK, ¶ms.ToolsResult{ + if err := sendStatusAndJSON(w, http.StatusOK, ¶ms.ToolsResult{ ToolsList: tools.List{agentTools}, - }) + }); err != nil { + logger.Errorf("%v", err) + } default: - sendError(w, errors.MethodNotAllowedf("unsupported method: %q", r.Method)) + if err := sendError(w, errors.MethodNotAllowedf("unsupported method: %q", r.Method)); err != nil { + logger.Errorf("%v", err) + } } } @@ -168,14 +184,17 @@ } // sendTools streams the tools tarball to the client. -func (h *toolsDownloadHandler) sendTools(w http.ResponseWriter, statusCode int, tarball []byte) { +func (h *toolsDownloadHandler) sendTools(w http.ResponseWriter, statusCode int, tarball []byte) error { w.Header().Set("Content-Type", "application/x-tar-gz") w.Header().Set("Content-Length", fmt.Sprint(len(tarball))) w.WriteHeader(statusCode) if _, err := w.Write(tarball); err != nil { - sendError(w, errors.NewBadRequest(errors.Annotatef(err, "failed to write tools"), "")) - return + return errors.Trace(sendError( + w, + errors.NewBadRequest(errors.Annotatef(err, "failed to write tools"), ""), + )) } + return nil } // processPost handles a tools upload POST request after authentication. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/tools_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/tools_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/tools_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/tools_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,9 +5,9 @@ import ( "crypto/sha256" - "encoding/base64" "encoding/json" "fmt" + "io" "io/ioutil" "net/http" "net/url" @@ -22,9 +22,9 @@ "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/macaroon-bakery.v1/httpbakery" - "gopkg.in/macaroon.v1" - "github.com/juju/juju/api/usermanager" + apiauthentication "github.com/juju/juju/api/authentication" + apitesting "github.com/juju/juju/api/testing" commontesting "github.com/juju/juju/apiserver/common/testing" "github.com/juju/juju/apiserver/params" envtesting "github.com/juju/juju/environs/testing" @@ -38,11 +38,11 @@ jujuversion "github.com/juju/juju/version" ) -// charmsCommonSuite wraps authHttpSuite and adds +// charmsCommonSuite wraps authHTTPSuite and adds // some helper methods suitable for working with the // tools endpoint. type toolsCommonSuite struct { - authHttpSuite + authHTTPSuite } func (s *toolsCommonSuite) toolsURL(c *gc.C, query string) *url.URL { @@ -505,37 +505,48 @@ } func (s *toolsWithMacaroonsSuite) TestCanPostWithLocalLogin(c *gc.C) { - // Create a new user, and a local login macaroon for it. - user := s.Factory.MakeUser(c, &factory.UserParams{Password: "hunter2"}) - conn := s.OpenAPIAs(c, user.Tag(), "hunter2") - defer conn.Close() - mac, err := usermanager.NewClient(conn).CreateLocalLoginMacaroon(user.UserTag()) - c.Assert(err, jc.ErrorIsNil) - - checkCount := 0 - s.DischargerLogin = func() string { - checkCount++ - return user.UserTag().Id() - } - do := func(req *http.Request) (*http.Response, error) { - data, err := json.Marshal(macaroon.Slice{mac}) - if err != nil { - return nil, err + // Create a new local user that we can log in as + // using macaroon authentication. + const password = "hunter2" + user := s.Factory.MakeUser(c, &factory.UserParams{Password: password}) + + // Install a "web-page" visitor that deals with the interaction + // method that Juju controllers support for authenticating local + // users. Note: the use of httpbakery.NewMultiVisitor is necessary + // to trigger httpbakery to query the authentication methods and + // bypass browser authentication. + var prompted bool + jar := apitesting.NewClearableCookieJar() + client := utils.GetNonValidatingHTTPClient() + client.Jar = jar + bakeryClient := httpbakery.NewClient() + bakeryClient.Client = client + bakeryClient.WebPageVisitor = httpbakery.NewMultiVisitor(apiauthentication.NewVisitor( + user.UserTag().Id(), + func(username string) (string, error) { + c.Assert(username, gc.Equals, user.UserTag().Id()) + prompted = true + return password, nil + }, + )) + bakeryDo := func(req *http.Request) (*http.Response, error) { + var body io.ReadSeeker + if req.Body != nil { + body = req.Body.(io.ReadSeeker) + req.Body = nil } - req.Header.Add(httpbakery.MacaroonsHeader, base64.StdEncoding.EncodeToString(data)) - return utils.GetNonValidatingHTTPClient().Do(req) + return bakeryClient.DoWithBodyAndCustomError(req, body, bakeryGetError) } - // send without using bakeryDo, so we don't pass any macaroon cookies - // along. + resp := s.sendRequest(c, httpRequestParams{ method: "POST", url: s.toolsURI(c, ""), tag: user.UserTag().String(), password: "", // no password forces macaroon usage - do: do, + do: bakeryDo, }) s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected binaryVersion argument") - c.Assert(checkCount, gc.Equals, 0) + c.Assert(prompted, jc.IsTrue) } // doer returns a Do function that can make a bakery request diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/undertaker/mock_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/undertaker/mock_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/undertaker/mock_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/undertaker/mock_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -64,7 +64,7 @@ func (m *mockState) RemoveAllModelDocs() error { if m.env.life != state.Dead { - return errors.New("transaction aborted") + return errors.New("model not dead") } m.removed = true return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/undertaker/undertaker.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/undertaker/undertaker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/undertaker/undertaker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/undertaker/undertaker.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,7 +18,7 @@ common.RegisterStandardFacade("Undertaker", 1, NewUndertakerAPI) } -// UndertakerAPI implements the API used by the machine undertaker worker. +// UndertakerAPI implements the API used by the model undertaker worker. type UndertakerAPI struct { st State resources facade.Resources @@ -86,14 +86,7 @@ // RemoveModel removes any records of this model from Juju. func (u *UndertakerAPI) RemoveModel() error { - err := u.st.RemoveAllModelDocs() - if err != nil { - // TODO(waigani) Return a human friendly error for now. The proper fix - // is to run a buildTxn within state.RemoveAllModelDocs, so we - // can return better errors than "transaction aborted". - return errors.New("an error occurred, unable to remove model") - } - return nil + return u.st.RemoveAllModelDocs() } func (u *UndertakerAPI) environResourceWatcher() params.NotifyWatchResult { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/undertaker/undertaker_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/undertaker/undertaker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/undertaker/undertaker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/undertaker/undertaker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -115,7 +115,7 @@ c.Assert(err, jc.ErrorIsNil) err = hostedAPI.RemoveModel() - c.Assert(err, gc.ErrorMatches, "an error occurred, unable to remove model") + c.Assert(err, gc.ErrorMatches, "model not dead") } func (s *undertakerSuite) TestRemoveDyingEnviron(c *gc.C) { @@ -128,7 +128,7 @@ c.Assert(err, jc.ErrorIsNil) err = hostedAPI.RemoveModel() - c.Assert(err, gc.ErrorMatches, "an error occurred, unable to remove model") + c.Assert(err, gc.ErrorMatches, "model not dead") } func (s *undertakerSuite) TestDeadRemoveEnviron(c *gc.C) { @@ -161,14 +161,14 @@ results, err := hostedAPI.SetStatus(params.SetStatus{ Entities: []params.EntityStatusArgs{{ - mock.env.Tag().String(), status.StatusDestroying.String(), + mock.env.Tag().String(), status.Destroying.String(), "woop", map[string]interface{}{"da": "ta"}, }}, }) c.Assert(err, jc.ErrorIsNil) c.Assert(results.Results, gc.HasLen, 1) c.Assert(results.Results[0].Error, gc.IsNil) - c.Assert(mock.env.status, gc.Equals, status.StatusDestroying) + c.Assert(mock.env.status, gc.Equals, status.Destroying) c.Assert(mock.env.statusInfo, gc.Equals, "woop") c.Assert(mock.env.statusData, jc.DeepEquals, map[string]interface{}{"da": "ta"}) } @@ -177,7 +177,7 @@ _, hostedAPI := s.setupStateAndAPI(c, true, "hostedenv") results, err := hostedAPI.SetStatus(params.SetStatus{ Entities: []params.EntityStatusArgs{{ - "model-6ada782f-bcd4-454b-a6da-d1793fbcb35e", status.StatusDestroying.String(), + "model-6ada782f-bcd4-454b-a6da-d1793fbcb35e", status.Destroying.String(), "woop", map[string]interface{}{"da": "ta"}, }}, }) @@ -190,7 +190,7 @@ _, hostedAPI := s.setupStateAndAPI(c, false, "hostedenv") results, err := hostedAPI.SetStatus(params.SetStatus{ Entities: []params.EntityStatusArgs{{ - "model-6ada782f-bcd4-454b-a6da-d1793fbcb35e", status.StatusDestroying.String(), + "model-6ada782f-bcd4-454b-a6da-d1793fbcb35e", status.Destroying.String(), "woop", map[string]interface{}{"da": "ta"}, }}, }) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/uniter/storage_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/uniter/storage_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/uniter/storage_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/uniter/storage_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/uniter/uniter.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/uniter/uniter.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/uniter/uniter.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/uniter/uniter.go 2016-10-13 14:31:49.000000000 +0000 @@ -571,7 +571,7 @@ return -1, err } default: - return -1, errors.BadRequestf("type %t does not have a CharmModifiedVersion", entity) + return -1, errors.BadRequestf("type %T does not have a CharmModifiedVersion", entity) } return service.CharmModifiedVersion(), nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/uniter/uniter_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/uniter/uniter_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/uniter/uniter_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/uniter/uniter_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -134,14 +134,14 @@ func (s *uniterSuite) TestSetStatus(c *gc.C) { now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusExecuting, + Status: status.Executing, Message: "blah", Since: &now, } err := s.wordpressUnit.SetAgentStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusExecuting, + Status: status.Executing, Message: "foo", Since: &now, } @@ -150,9 +150,9 @@ args := params.SetStatus{ Entities: []params.EntityStatusArgs{ - {Tag: "unit-mysql-0", Status: status.StatusError.String(), Info: "not really"}, - {Tag: "unit-wordpress-0", Status: status.StatusRebooting.String(), Info: "foobar"}, - {Tag: "unit-foo-42", Status: status.StatusActive.String(), Info: "blah"}, + {Tag: "unit-mysql-0", Status: status.Error.String(), Info: "not really"}, + {Tag: "unit-wordpress-0", Status: status.Rebooting.String(), Info: "foobar"}, + {Tag: "unit-foo-42", Status: status.Active.String(), Info: "blah"}, }} result, err := s.uniter.SetStatus(args) c.Assert(err, jc.ErrorIsNil) @@ -167,26 +167,26 @@ // Verify mysqlUnit - no change. statusInfo, err := s.mysqlUnit.AgentStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusExecuting) + c.Assert(statusInfo.Status, gc.Equals, status.Executing) c.Assert(statusInfo.Message, gc.Equals, "foo") // ...wordpressUnit is fine though. statusInfo, err = s.wordpressUnit.AgentStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusRebooting) + c.Assert(statusInfo.Status, gc.Equals, status.Rebooting) c.Assert(statusInfo.Message, gc.Equals, "foobar") } func (s *uniterSuite) TestSetAgentStatus(c *gc.C) { now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusExecuting, + Status: status.Executing, Message: "blah", Since: &now, } err := s.wordpressUnit.SetAgentStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusExecuting, + Status: status.Executing, Message: "foo", Since: &now, } @@ -195,9 +195,9 @@ args := params.SetStatus{ Entities: []params.EntityStatusArgs{ - {Tag: "unit-mysql-0", Status: status.StatusError.String(), Info: "not really"}, - {Tag: "unit-wordpress-0", Status: status.StatusExecuting.String(), Info: "foobar"}, - {Tag: "unit-foo-42", Status: status.StatusRebooting.String(), Info: "blah"}, + {Tag: "unit-mysql-0", Status: status.Error.String(), Info: "not really"}, + {Tag: "unit-wordpress-0", Status: status.Executing.String(), Info: "foobar"}, + {Tag: "unit-foo-42", Status: status.Rebooting.String(), Info: "blah"}, }} result, err := s.uniter.SetAgentStatus(args) c.Assert(err, jc.ErrorIsNil) @@ -212,26 +212,26 @@ // Verify mysqlUnit - no change. statusInfo, err := s.mysqlUnit.AgentStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusExecuting) + c.Assert(statusInfo.Status, gc.Equals, status.Executing) c.Assert(statusInfo.Message, gc.Equals, "foo") // ...wordpressUnit is fine though. statusInfo, err = s.wordpressUnit.AgentStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusExecuting) + c.Assert(statusInfo.Status, gc.Equals, status.Executing) c.Assert(statusInfo.Message, gc.Equals, "foobar") } func (s *uniterSuite) TestSetUnitStatus(c *gc.C) { now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "blah", Since: &now, } err := s.wordpressUnit.SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusTerminated, + Status: status.Terminated, Message: "foo", Since: &now, } @@ -240,9 +240,9 @@ args := params.SetStatus{ Entities: []params.EntityStatusArgs{ - {Tag: "unit-mysql-0", Status: status.StatusError.String(), Info: "not really"}, - {Tag: "unit-wordpress-0", Status: status.StatusTerminated.String(), Info: "foobar"}, - {Tag: "unit-foo-42", Status: status.StatusActive.String(), Info: "blah"}, + {Tag: "unit-mysql-0", Status: status.Error.String(), Info: "not really"}, + {Tag: "unit-wordpress-0", Status: status.Terminated.String(), Info: "foobar"}, + {Tag: "unit-foo-42", Status: status.Active.String(), Info: "blah"}, }} result, err := s.uniter.SetUnitStatus(args) c.Assert(err, jc.ErrorIsNil) @@ -257,12 +257,12 @@ // Verify mysqlUnit - no change. statusInfo, err := s.mysqlUnit.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusTerminated) + c.Assert(statusInfo.Status, gc.Equals, status.Terminated) c.Assert(statusInfo.Message, gc.Equals, "foo") // ...wordpressUnit is fine though. statusInfo, err = s.wordpressUnit.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusTerminated) + c.Assert(statusInfo.Status, gc.Equals, status.Terminated) c.Assert(statusInfo.Message, gc.Equals, "foobar") } @@ -772,9 +772,9 @@ // Verify the charm URL was set. err = s.wordpressUnit.Refresh() c.Assert(err, jc.ErrorIsNil) - charmUrl, needsUpgrade := s.wordpressUnit.CharmURL() - c.Assert(charmUrl, gc.NotNil) - c.Assert(charmUrl.String(), gc.Equals, s.wpCharm.String()) + charmURL, needsUpgrade := s.wordpressUnit.CharmURL() + c.Assert(charmURL, gc.NotNil) + c.Assert(charmURL.String(), gc.Equals, s.wpCharm.String()) c.Assert(needsUpgrade, jc.IsTrue) } @@ -2139,14 +2139,14 @@ func (s *uniterSuite) TestUnitStatus(c *gc.C) { now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, Message: "blah", Since: &now, } err := s.wordpressUnit.SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusTerminated, + Status: status.Terminated, Message: "foo", Since: &now, } @@ -2175,7 +2175,7 @@ c.Assert(result, gc.DeepEquals, params.StatusResults{ Results: []params.StatusResult{ {Error: apiservertesting.ErrUnauthorized}, - {Status: status.StatusMaintenance.String(), Info: "blah", Data: map[string]interface{}{}}, + {Status: status.Maintenance.String(), Info: "blah", Data: map[string]interface{}{}}, {Error: apiservertesting.ErrUnauthorized}, {Error: apiservertesting.ErrUnauthorized}, {Error: apiservertesting.ServerError(`"invalid" is not a valid tag`)}, @@ -2293,7 +2293,6 @@ s.uniter, s.State, s.resources, - commontesting.NoSecrets, ) } @@ -2375,7 +2374,7 @@ }, { about: "user tag", tag: names.NewLocalUserTag("admin").String(), - expect: `"user-admin@local" is not a valid unit tag`, + expect: `"user-admin" is not a valid unit tag`, }, { about: "machine tag", tag: names.NewMachineTag("0").String(), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/upgrading_root.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/upgrading_root.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/upgrading_root.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/upgrading_root.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver - -import ( - "github.com/juju/errors" - "github.com/juju/utils/set" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/rpc" - "github.com/juju/juju/rpc/rpcreflect" -) - -// upgradingRoot restricts API calls to those supported during an upgrade. -type upgradingRoot struct { - rpc.MethodFinder -} - -// newUpgradingRoot returns a new upgradingRoot. -func newUpgradingRoot(finder rpc.MethodFinder) *upgradingRoot { - return &upgradingRoot{finder} -} - -// allowedMethodsDuringUpgrades stores api calls -// that are not blocked during the upgrade process -// as well as their respective facade names. -// When needed, at some future point, this solution -// will need to be adjusted to cater for different -// facade versions as well. -var allowedMethodsDuringUpgrades = map[string]set.Strings{ - "Client": set.NewStrings( - "FullStatus", // for "juju status" - "FindTools", // for "juju upgrade-juju", before we can reset upgrade to re-run - "AbortCurrentUpgrade", // for "juju upgrade-juju", so that we can reset upgrade to re-run - - ), - "SSHClient": set.NewStrings( // allow all SSH client related calls - "PublicAddress", - "PrivateAddress", - "PublicKeys", - "Proxy", - ), - "Pinger": set.NewStrings( - "Ping", - ), - "Backups": set.NewStrings( - "FinishRestore", - ), -} - -func IsMethodAllowedDuringUpgrade(rootName, methodName string) bool { - methods, ok := allowedMethodsDuringUpgrades[rootName] - if !ok { - return false - } - return methods.Contains(methodName) -} - -// FindMethod returns UpgradeInProgressError for most API calls except those that are -// deemed safe or important for use while Juju is upgrading. -func (r *upgradingRoot) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { - caller, err := r.MethodFinder.FindMethod(rootName, version, methodName) - if err != nil { - return nil, errors.Trace(err) - } - if !IsMethodAllowedDuringUpgrade(rootName, methodName) { - logger.Debugf("Facade (%v) method (%v) was called during the upgrade but it was blocked.", rootName, methodName) - return nil, params.UpgradeInProgressError - } - return caller, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/upgrading_root_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/upgrading_root_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/upgrading_root_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/upgrading_root_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver_test - -import ( - "github.com/juju/errors" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/testing" -) - -type upgradingRootSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&upgradingRootSuite{}) - -func (r *upgradingRootSuite) TestAllowedMethods(c *gc.C) { - root := apiserver.TestingUpgradingRoot(nil) - checkAllowed := func(facade, method string) { - caller, err := root.FindMethod(facade, 1, method) - c.Check(err, jc.ErrorIsNil) - c.Check(caller, gc.NotNil) - } - checkAllowed("Client", "FullStatus") - checkAllowed("Client", "AbortCurrentUpgrade") - checkAllowed("SSHClient", "PublicAddress") - checkAllowed("SSHClient", "Proxy") - checkAllowed("Pinger", "Ping") -} - -func (r *upgradingRootSuite) TestFindDisallowedMethod(c *gc.C) { - root := apiserver.TestingUpgradingRoot(nil) - caller, err := root.FindMethod("Client", 1, "ModelSet") - c.Assert(errors.Cause(err), gc.Equals, params.UpgradeInProgressError) - c.Assert(caller, gc.IsNil) -} - -func (r *upgradingRootSuite) TestFindNonExistentMethod(c *gc.C) { - root := apiserver.TestingUpgradingRoot(nil) - caller, err := root.FindMethod("Foo", 0, "Bar") - c.Assert(err, gc.ErrorMatches, "unknown object type \"Foo\"") - c.Assert(caller, gc.IsNil) -} - -func (r *upgradingRootSuite) TestFindMethodNonExistentVersion(c *gc.C) { - root := apiserver.TestingUpgradingRoot(nil) - caller, err := root.FindMethod("Client", 99999999, "FullStatus") - c.Assert(err, gc.ErrorMatches, "unknown version \\(99999999\\) of interface \"Client\"") - c.Assert(caller, gc.IsNil) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/usermanager/usermanager.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/usermanager/usermanager.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/usermanager/usermanager.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/usermanager/usermanager.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,16 +6,14 @@ import ( "time" - "gopkg.in/macaroon.v1" - "github.com/juju/errors" "github.com/juju/loggo" "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/facade" - "github.com/juju/juju/apiserver/modelmanager" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -28,12 +26,11 @@ // UserManagerAPI implements the user manager interface and is the concrete // implementation of the api end point. type UserManagerAPI struct { - state *state.State - authorizer facade.Authorizer - createLocalLoginMacaroon func(names.UserTag) (*macaroon.Macaroon, error) - check *common.BlockChecker - apiUser names.UserTag - isAdmin bool + state *state.State + authorizer facade.Authorizer + check *common.BlockChecker + apiUser names.UserTag + isAdmin bool } func NewUserManagerAPI( @@ -50,30 +47,37 @@ apiUser, _ := authorizer.GetAuthTag().(names.UserTag) // Pretty much all of the user manager methods have special casing for admin // users, so look once when we start and remember if the user is an admin. - isAdmin, err := st.IsControllerAdministrator(apiUser) + isAdmin, err := authorizer.HasPermission(permission.SuperuserAccess, st.ControllerTag()) if err != nil { return nil, errors.Trace(err) } - resource, ok := resources.Get("createLocalLoginMacaroon").(common.ValueResource) - if !ok { - return nil, errors.NotFoundf("userAuth resource") - } - createLocalLoginMacaroon, ok := resource.Value.(func(names.UserTag) (*macaroon.Macaroon, error)) - if !ok { - return nil, errors.NotValidf("userAuth resource") - } - return &UserManagerAPI{ - state: st, - authorizer: authorizer, - createLocalLoginMacaroon: createLocalLoginMacaroon, - check: common.NewBlockChecker(st), - apiUser: apiUser, - isAdmin: isAdmin, + state: st, + authorizer: authorizer, + check: common.NewBlockChecker(st), + apiUser: apiUser, + isAdmin: isAdmin, }, nil } +func (api *UserManagerAPI) hasReadAccess() (bool, error) { + canRead, err := api.authorizer.HasPermission(permission.ReadAccess, api.state.ModelTag()) + if errors.IsNotFound(err) { + return false, nil + } + return canRead, err + +} + +func (api *UserManagerAPI) hasControllerAdminAccess() (bool, error) { + isAdmin, err := api.authorizer.HasPermission(permission.SuperuserAccess, api.state.ControllerTag()) + if errors.IsNotFound(err) { + return false, nil + } + return isAdmin, err +} + // AddUser adds a user with a username, and either a password or // a randomly generated secret key which will be returned. func (api *UserManagerAPI) AddUser(args params.AddUsers) (params.AddUserResults, error) { @@ -90,12 +94,12 @@ // Create the results list to populate. result.Results = make([]params.AddUserResult, len(args.Users)) - // Make sure we have admin. If not fail each of the requests and return w/o a top level error. - if !api.isAdmin { - for i, _ := range result.Results { - result.Results[i].Error = common.ServerError(common.ErrPerm) - } - return result, nil + isSuperUser, err := api.hasControllerAdminAccess() + if err != nil { + return result, errors.Trace(err) + } + if !isSuperUser { + return result, common.ErrPerm } for i, arg := range args.Users { @@ -117,31 +121,6 @@ } } - if len(arg.SharedModelTags) > 0 { - modelAccess, err := modelmanager.FromModelAccessParam(arg.ModelAccess) - if err != nil { - err = errors.Annotatef(err, "user %q created but models not shared", arg.Username) - result.Results[i].Error = common.ServerError(err) - continue - } - userTag := user.Tag().(names.UserTag) - for _, modelTagStr := range arg.SharedModelTags { - modelTag, err := names.ParseModelTag(modelTagStr) - if err != nil { - err = errors.Annotatef(err, "user %q created but model %q not shared", arg.Username, modelTagStr) - result.Results[i].Error = common.ServerError(err) - break - } - err = modelmanager.ChangeModelAccess( - common.NewModelManagerBackend(api.state), modelTag, api.apiUser, - userTag, params.GrantModelAccess, modelAccess, api.isAdmin) - if err != nil { - err = errors.Annotatef(err, "user %q created but model %q not shared", arg.Username, modelTagStr) - result.Results[i].Error = common.ServerError(err) - break - } - } - } } return result, nil } @@ -167,12 +146,12 @@ // Create the results list to populate. deletions.Results = make([]params.ErrorResult, len(entities.Entities)) - // Make sure we have admin. If not fail each of the requests and return w/o a top level error. - if !api.isAdmin { - for i, _ := range deletions.Results { - deletions.Results[i].Error = common.ServerError(common.ErrPerm) - } - return deletions, nil + isSuperUser, err := api.hasControllerAdminAccess() + if err != nil { + return deletions, errors.Trace(err) + } + if !api.isAdmin && !isSuperUser { + return deletions, common.ErrPerm } // Remove the entities. @@ -218,6 +197,14 @@ // EnableUser enables one or more users. If the user is already enabled, // the action is considered a success. func (api *UserManagerAPI) EnableUser(users params.Entities) (params.ErrorResults, error) { + isSuperUser, err := api.hasControllerAdminAccess() + if err != nil { + return params.ErrorResults{}, errors.Trace(err) + } + if !isSuperUser { + return params.ErrorResults{}, common.ErrPerm + } + if err := api.check.ChangeAllowed(); err != nil { return params.ErrorResults{}, errors.Trace(err) } @@ -227,6 +214,14 @@ // DisableUser disables one or more users. If the user is already disabled, // the action is considered a success. func (api *UserManagerAPI) DisableUser(users params.Entities) (params.ErrorResults, error) { + isSuperUser, err := api.hasControllerAdminAccess() + if err != nil { + return params.ErrorResults{}, errors.Trace(err) + } + if !isSuperUser { + return params.ErrorResults{}, common.ErrPerm + } + if err := api.check.ChangeAllowed(); err != nil { return params.ErrorResults{}, errors.Trace(err) } @@ -240,16 +235,18 @@ return result, nil } - // Create the results list to populate. - result.Results = make([]params.ErrorResult, len(args.Entities)) + isSuperUser, err := api.hasControllerAdminAccess() + if err != nil { + return result, errors.Trace(err) + } - if !api.isAdmin { - for i, _ := range result.Results { - result.Results[i].Error = common.ServerError(common.ErrPerm) - } - return result, nil + if !api.isAdmin && isSuperUser { + return result, common.ErrPerm } + // Create the results list to populate. + result.Results = make([]params.ErrorResult, len(args.Entities)) + for i, arg := range args.Entities { user, err := api.getUser(arg.Tag) if err != nil { @@ -267,6 +264,21 @@ // UserInfo returns information on a user. func (api *UserManagerAPI) UserInfo(request params.UserInfoRequest) (params.UserInfoResults, error) { var results params.UserInfoResults + isAdmin, err := api.hasControllerAdminAccess() + if err != nil { + return results, errors.Trace(err) + } + + var accessForUser = func(userTag names.UserTag, result *params.UserInfoResult) { + // Lookup the access the specified user has to the controller. + _, controllerUserAccess, err := common.UserAccess(api.state, userTag) + if err == nil { + result.Result.Access = string(controllerUserAccess.Access) + } else if err != nil && !errors.IsNotFound(err) { + result.Result = nil + result.Error = common.ServerError(err) + } + } var infoForUser = func(user *state.User) params.UserInfoResult { var lastLogin *time.Time @@ -278,7 +290,7 @@ } else { lastLogin = &userLastLogin } - return params.UserInfoResult{ + result := params.UserInfoResult{ Result: ¶ms.UserInfo{ Username: user.Name(), DisplayName: user.DisplayName(), @@ -288,6 +300,8 @@ Disabled: user.IsDisabled(), }, } + accessForUser(user.UserTag(), &result) + return result } argCount := len(request.Entities) @@ -297,20 +311,42 @@ return results, errors.Trace(err) } for _, user := range users { + if !isAdmin && !api.authorizer.AuthOwner(user.Tag()) { + continue + } results.Results = append(results.Results, infoForUser(user)) } return results, nil } // Create the results list to populate. - results.Results = make([]params.UserInfoResult, argCount) - for i, arg := range request.Entities { + for _, arg := range request.Entities { + userTag, err := names.ParseUserTag(arg.Tag) + if err != nil { + results.Results = append(results.Results, params.UserInfoResult{Error: common.ServerError(err)}) + continue + } + if !isAdmin && !api.authorizer.AuthOwner(userTag) { + results.Results = append(results.Results, params.UserInfoResult{Error: common.ServerError(common.ErrPerm)}) + continue + } + if !userTag.IsLocal() { + // TODO(wallyworld) record login information about external users. + result := params.UserInfoResult{ + Result: ¶ms.UserInfo{ + Username: userTag.Id(), + }, + } + accessForUser(userTag, &result) + results.Results = append(results.Results, result) + continue + } user, err := api.getUser(arg.Tag) if err != nil { - results.Results[i].Error = common.ServerError(err) + results.Results = append(results.Results, params.UserInfoResult{Error: common.ServerError(err)}) continue } - results.Results[i] = infoForUser(user) + results.Results = append(results.Results, infoForUser(user)) } return results, nil @@ -343,7 +379,13 @@ if err != nil { return errors.Trace(err) } - if api.apiUser != user.UserTag() && !api.isAdmin { + + isSuperUser, err := api.hasControllerAdminAccess() + if err != nil { + return errors.Trace(err) + } + + if api.apiUser != user.UserTag() && !api.isAdmin && !isSuperUser { return errors.Trace(common.ErrPerm) } if arg.Password == "" { @@ -354,30 +396,3 @@ } return nil } - -// CreateLocalLoginMacaroon creates a macaroon for the specified users to use -// for future logins. -func (api *UserManagerAPI) CreateLocalLoginMacaroon(args params.Entities) (params.MacaroonResults, error) { - results := params.MacaroonResults{ - Results: make([]params.MacaroonResult, len(args.Entities)), - } - createLocalLoginMacaroon := func(arg params.Entity) (*macaroon.Macaroon, error) { - user, err := api.getUser(arg.Tag) - if err != nil { - return nil, errors.Trace(err) - } - if api.apiUser != user.UserTag() && !api.isAdmin { - return nil, errors.Trace(common.ErrPerm) - } - return api.createLocalLoginMacaroon(user.UserTag()) - } - for i, arg := range args.Entities { - m, err := createLocalLoginMacaroon(arg) - if err != nil { - results.Results[i].Error = common.ServerError(err) - continue - } - results.Results[i].Result = m - } - return results, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/usermanager/usermanager_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/usermanager/usermanager_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/usermanager/usermanager_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/usermanager/usermanager_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,15 +11,15 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" - "gopkg.in/macaroon.v1" "github.com/juju/juju/apiserver/common" commontesting "github.com/juju/juju/apiserver/common/testing" + "github.com/juju/juju/apiserver/controller" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" "github.com/juju/juju/apiserver/usermanager" - "github.com/juju/juju/core/description" jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/testing/factory" ) @@ -27,11 +27,10 @@ type userManagerSuite struct { jujutesting.JujuConnSuite - usermanager *usermanager.UserManagerAPI - authorizer apiservertesting.FakeAuthorizer - adminName string - resources *common.Resources - createLocalLoginMacaroon func(names.UserTag) (*macaroon.Macaroon, error) + usermanager *usermanager.UserManagerAPI + authorizer apiservertesting.FakeAuthorizer + adminName string + resources *common.Resources commontesting.BlockHelper } @@ -41,16 +40,7 @@ func (s *userManagerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) - s.createLocalLoginMacaroon = func(tag names.UserTag) (*macaroon.Macaroon, error) { - return nil, errors.NotSupportedf("CreateLocalLoginMacaroon") - } s.resources = common.NewResources() - s.resources.RegisterNamed("createLocalLoginMacaroon", common.ValueResource{ - func(tag names.UserTag) (*macaroon.Macaroon, error) { - return s.createLocalLoginMacaroon(tag) - }, - }) - adminTag := s.AdminUserTag(c) s.adminName = adminTag.Name() s.authorizer = apiservertesting.FakeAuthorizer{ @@ -78,11 +68,9 @@ args := params.AddUsers{ Users: []params.AddUser{{ - Username: "foobar", - DisplayName: "Foo Bar", - Password: "password", - SharedModelTags: sharedModelTags, - ModelAccess: access, + Username: "foobar", + DisplayName: "Foo Bar", + Password: "password", }}} result, err := s.usermanager.AddUser(args) @@ -135,40 +123,6 @@ }) } -func (s *userManagerSuite) TestAddReadAccessUser(c *gc.C) { - s.addUserWithSharedModel(c, params.ModelReadAccess) -} - -func (s *userManagerSuite) TestAddWriteAccessUser(c *gc.C) { - s.addUserWithSharedModel(c, params.ModelWriteAccess) -} - -func (s *userManagerSuite) addUserWithSharedModel(c *gc.C, access params.UserAccessPermission) { - sharedModelState := s.Factory.MakeModel(c, nil) - defer sharedModelState.Close() - - s.assertAddUser(c, access, []string{sharedModelState.ModelTag().String()}) - - // Check that the model has been shared. - sharedModel, err := sharedModelState.Model() - c.Assert(err, jc.ErrorIsNil) - users, err := sharedModel.Users() - c.Assert(err, jc.ErrorIsNil) - var modelUserTags = make([]names.UserTag, len(users)) - for i, u := range users { - modelUserTags[i] = u.UserTag - if u.UserName == "foobar" { - c.Assert(u.Access, gc.Equals, description.ReadAccess) - } else if u.UserName == "admin" { - c.Assert(u.Access, gc.Equals, description.AdminAccess) - } - } - c.Assert(modelUserTags, jc.SameContents, []names.UserTag{ - names.NewLocalUserTag("foobar"), - names.NewLocalUserTag("admin"), - }) -} - func (s *userManagerSuite) TestBlockAddUser(c *gc.C) { args := params.AddUsers{ Users: []params.AddUser{{ @@ -203,14 +157,8 @@ Password: "password", }}} - got, err := usermanager.AddUser(args) - - for _, result := range got.Results { - c.Check(errors.Cause(result.Error), jc.DeepEquals, - ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}) - } - c.Assert(got.Results, gc.HasLen, 1) - c.Assert(err, jc.ErrorIsNil) + _, err = usermanager.AddUser(args) + c.Assert(err, gc.ErrorMatches, "permission denied") _, err = s.State.User(names.NewLocalUserTag("foobar")) c.Assert(err, jc.Satisfies, errors.IsNotFound) @@ -359,12 +307,8 @@ args := params.Entities{ []params.Entity{{barb.Tag().String()}}, } - got, err := usermanager.DisableUser(args) - for _, result := range got.Results { - c.Check(errors.Cause(result.Error), jc.DeepEquals, ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}) - } - c.Assert(got.Results, gc.HasLen, 1) - c.Assert(err, jc.ErrorIsNil) + _, err = usermanager.DisableUser(args) + c.Assert(err, gc.ErrorMatches, "permission denied") err = barb.Refresh() c.Assert(err, jc.ErrorIsNil) @@ -382,12 +326,8 @@ args := params.Entities{ []params.Entity{{barb.Tag().String()}}, } - got, err := usermanager.EnableUser(args) - for _, result := range got.Results { - c.Check(errors.Cause(result.Error), jc.DeepEquals, ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}) - } - c.Assert(got.Results, gc.HasLen, 1) - c.Assert(err, jc.ErrorIsNil) + _, err = usermanager.EnableUser(args) + c.Assert(err, gc.ErrorMatches, "permission denied") err = barb.Refresh() c.Assert(err, jc.ErrorIsNil) @@ -397,6 +337,10 @@ func (s *userManagerSuite) TestUserInfo(c *gc.C) { userFoo := s.Factory.MakeUser(c, &factory.UserParams{Name: "foobar", DisplayName: "Foo Bar"}) userBar := s.Factory.MakeUser(c, &factory.UserParams{Name: "barfoo", DisplayName: "Bar Foo", Disabled: true}) + err := controller.ChangeControllerAccess( + s.State, s.AdminUserTag(c), names.NewUserTag("fred@external"), + params.GrantControllerAccess, permission.AddModelAccess) + c.Assert(err, jc.ErrorIsNil) args := params.UserInfoRequest{ Entities: []params.Entity{ @@ -407,7 +351,7 @@ }, { Tag: names.NewLocalUserTag("ellie").String(), }, { - Tag: names.NewUserTag("not@remote").String(), + Tag: names.NewUserTag("fred@external").String(), }, { Tag: "not-a-tag", }, @@ -426,12 +370,14 @@ info: ¶ms.UserInfo{ Username: "foobar", DisplayName: "Foo Bar", + Access: "login", }, }, { user: userBar, info: ¶ms.UserInfo{ Username: "barfoo", DisplayName: "Bar Foo", + Access: "login", Disabled: true, }, }, { @@ -440,9 +386,9 @@ Code: params.CodeUnauthorized, }, }, { - err: ¶ms.Error{ - Message: "permission denied", - Code: params.CodeUnauthorized, + info: ¶ms.UserInfo{ + Username: "fred@external", + Access: "add-model", }, }, { err: ¶ms.Error{ @@ -451,9 +397,11 @@ }, } { if r.info != nil { - r.info.DateCreated = r.user.DateCreated() - r.info.LastConnection = lastLoginPointer(c, r.user) - r.info.CreatedBy = s.adminName + if names.NewUserTag(r.info.Username).IsLocal() { + r.info.DateCreated = r.user.DateCreated() + r.info.LastConnection = lastLoginPointer(c, r.user) + r.info.CreatedBy = s.adminName + } } expected.Results = append(expected.Results, params.UserInfoResult{Result: r.info, Error: r.err}) } @@ -479,6 +427,7 @@ info: ¶ms.UserInfo{ Username: "aardvark", DisplayName: "Aard Vark", + Access: "login", Disabled: true, }, }, { @@ -486,12 +435,14 @@ info: ¶ms.UserInfo{ Username: s.adminName, DisplayName: admin.DisplayName(), + Access: "superuser", }, }, { user: userFoo, info: ¶ms.UserInfo{ Username: "foobar", DisplayName: "Foo Bar", + Access: "login", }, }} { r.info.CreatedBy = s.adminName @@ -508,6 +459,70 @@ c.Assert(results, jc.DeepEquals, expected) } +func (s *userManagerSuite) TestUserInfoNonControllerAdmin(c *gc.C) { + s.Factory.MakeUser(c, &factory.UserParams{Name: "foobar", DisplayName: "Foo Bar"}) + userAardvark := s.Factory.MakeUser(c, &factory.UserParams{Name: "aardvark", DisplayName: "Aard Vark"}) + + authorizer := apiservertesting.FakeAuthorizer{ + Tag: userAardvark.Tag(), + } + usermanager, err := usermanager.NewUserManagerAPI(s.State, s.resources, authorizer) + c.Assert(err, jc.ErrorIsNil) + + args := params.UserInfoRequest{Entities: []params.Entity{ + {Tag: userAardvark.Tag().String()}, + {Tag: names.NewUserTag("foobar").String()}, + }} + results, err := usermanager.UserInfo(args) + c.Assert(err, jc.ErrorIsNil) + // Non admin users can only see themselves. + c.Assert(results, jc.DeepEquals, params.UserInfoResults{ + Results: []params.UserInfoResult{ + { + Result: ¶ms.UserInfo{ + Username: "aardvark", + DisplayName: "Aard Vark", + Access: "login", + CreatedBy: s.adminName, + DateCreated: userAardvark.DateCreated(), + LastConnection: lastLoginPointer(c, userAardvark), + }, + }, { + Error: ¶ms.Error{ + Message: "permission denied", + Code: params.CodeUnauthorized, + }, + }, + }, + }) +} + +func (s *userManagerSuite) TestUserInfoEveryonePermission(c *gc.C) { + _, err := s.State.AddControllerUser(state.UserAccessSpec{ + User: names.NewUserTag("everyone@external"), + Access: permission.AddModelAccess, + CreatedBy: s.AdminUserTag(c), + }) + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.AddControllerUser(state.UserAccessSpec{ + User: names.NewUserTag("aardvark@external"), + Access: permission.LoginAccess, + CreatedBy: s.AdminUserTag(c), + }) + c.Assert(err, jc.ErrorIsNil) + + args := params.UserInfoRequest{Entities: []params.Entity{{Tag: names.NewUserTag("aardvark@external").String()}}} + results, err := s.usermanager.UserInfo(args) + c.Assert(err, jc.ErrorIsNil) + // Non admin users can only see themselves. + c.Assert(results, jc.DeepEquals, params.UserInfoResults{ + Results: []params.UserInfoResult{{Result: ¶ms.UserInfo{ + Username: "aardvark@external", + Access: "add-model", + }}}, + }) +} + func lastLoginPointer(c *gc.C, user *state.User) *time.Time { lastLogin, err := user.LastLogin() if err != nil { @@ -685,12 +700,9 @@ c.Assert(ui.Results[0].Result.Username, gc.DeepEquals, jjam.Name()) // Remove jjam as chuck and fail. - got, err := usermanager.RemoveUser(params.Entities{ + _, err = usermanager.RemoveUser(params.Entities{ Entities: []params.Entity{{Tag: jjam.Tag().String()}}}) - c.Check(got.Results, gc.HasLen, 1) - c.Check(errors.Cause(got.Results[0].Error), jc.DeepEquals, - ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}) - c.Assert(err, jc.ErrorIsNil) + c.Assert(err, gc.ErrorMatches, "permission denied") // Make sure jjam is still around. err = jjam.Refresh() @@ -718,12 +730,9 @@ c.Assert(ui.Results[0].Result.Username, gc.DeepEquals, jjam.Name()) // Remove the user as the user - got, err := usermanager.RemoveUser(params.Entities{ + _, err = usermanager.RemoveUser(params.Entities{ Entities: []params.Entity{{Tag: jjam.Tag().String()}}}) - c.Assert(got.Results, gc.HasLen, 1) - c.Check(errors.Cause(got.Results[0].Error), jc.DeepEquals, - ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}) - c.Assert(err, jc.ErrorIsNil) + c.Assert(err, gc.ErrorMatches, "permission denied") // Check if deleted. err = jjam.Refresh() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/utils.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/utils.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/utils.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/utils.go 2016-10-13 14:31:49.000000000 +0000 @@ -52,11 +52,9 @@ if args.strict { return "", errors.Trace(common.UnknownModelError(args.modelUUID)) } - logger.Debugf("validate model uuid: empty modelUUID") return ssState.ModelUUID(), nil } if args.modelUUID == ssState.ModelUUID() { - logger.Debugf("validate model uuid: controller model - %s", args.modelUUID) return args.modelUUID, nil } if args.controllerModelOnly { @@ -69,6 +67,5 @@ if _, err := ssState.GetModel(modelTag); err != nil { return "", errors.Wrap(err, common.UnknownModelError(args.modelUUID)) } - logger.Debugf("validate model uuid: %s", args.modelUUID) return args.modelUUID, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/watcher.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/watcher.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/watcher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/watcher.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,6 +15,7 @@ "github.com/juju/juju/controller" "github.com/juju/juju/core/migration" "github.com/juju/juju/network" + "github.com/juju/juju/permission" "github.com/juju/juju/state" ) @@ -68,7 +69,14 @@ auth := context.Auth() resources := context.Resources() - if !auth.AuthClient() { + // HasPermission should not be replaced by auth.AuthClient() even if, at first sight, they seem + // equivalent because this allows us to remove login permission for a user + // (a permission that is given by default). + isAuthorized, err := auth.HasPermission(permission.LoginAccess, context.State().ControllerTag()) + if err != nil { + return nil, errors.Trace(err) + } + if !isAuthorized { return nil, common.ErrPerm } watcher, ok := resources.Get(id).(*state.Multiwatcher) @@ -403,7 +411,7 @@ // migrationBackend defines State functionality required by the // migration watchers. type migrationBackend interface { - LatestModelMigration() (state.ModelMigration, error) + LatestMigration() (state.ModelMigration, error) APIHostPorts() ([][]network.HostPort, error) ControllerConfig() (controller.Config, error) } @@ -450,7 +458,7 @@ return empty, err } - mig, err := w.st.LatestModelMigration() + mig, err := w.st.LatestMigration() if errors.IsNotFound(err) { return params.MigrationStatus{ Phase: migration.NONE.String(), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/watcher_test.go juju-core-2.0.0/src/github.com/juju/juju/apiserver/watcher_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/apiserver/watcher_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/apiserver/watcher_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -100,7 +100,7 @@ c.Assert(result, jc.DeepEquals, params.MigrationStatus{ MigrationId: "id", Attempt: 2, - Phase: "PRECHECK", + Phase: "IMPORT", SourceAPIAddrs: []string{"1.2.3.4:5", "2.3.4.5:6", "3.4.5.6:7"}, SourceCACert: "no worries", TargetAPIAddrs: []string{"1.2.3.4:5555"}, @@ -158,7 +158,7 @@ noMigration bool } -func (b *fakeMigrationBackend) LatestModelMigration() (state.ModelMigration, error) { +func (b *fakeMigrationBackend) LatestMigration() (state.ModelMigration, error) { if b.noMigration { return nil, errors.NotFoundf("migration") } @@ -201,12 +201,12 @@ } func (m *fakeModelMigration) Phase() (migration.Phase, error) { - return migration.PRECHECK, nil + return migration.IMPORT, nil } func (m *fakeModelMigration) TargetInfo() (*migration.TargetInfo, error) { return &migration.TargetInfo{ - ControllerTag: names.NewModelTag("uuid"), + ControllerTag: names.NewControllerTag("uuid"), Addrs: []string{"1.2.3.4:5555"}, CACert: "trust me", AuthTag: names.NewUserTag("admin"), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cert/cert.go juju-core-2.0.0/src/github.com/juju/juju/cert/cert.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cert/cert.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cert/cert.go 2016-10-13 14:31:49.000000000 +0000 @@ -90,11 +90,9 @@ // TODO(perrito666) 2016-05-02 lp:1558657 now := time.Now() - // A serial number can be up to 20 octets in size. - // https://tools.ietf.org/html/rfc5280#section-4.1.2.2 - serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 8*20)) + serialNumber, err := newSerialNumber() if err != nil { - return "", "", fmt.Errorf("failed to generate serial number: %s", err) + return "", "", errors.Trace(err) } template := &x509.Certificate{ @@ -127,7 +125,19 @@ return string(certPEMData), string(keyPEMData), nil } -// NewServer generates a certificate/key pair suitable for use by a server, with an +// newSerialNumber returns a new random serial number suitable +// for use in a certificate. +func newSerialNumber() (*big.Int, error) { + // A serial number can be up to 20 octets in size. + // https://tools.ietf.org/html/rfc5280#section-4.1.2.2 + n, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 8*20)) + if err != nil { + return nil, errors.Annotatef(err, "failed to generate serial number") + } + return n, nil +} + +// NewDefaultServer generates a certificate/key pair suitable for use by a server, with an // expiry time of 10 years. func NewDefaultServer(caCertPEM, caKeyPEM string, hostnames []string) (certPEM, keyPEM string, err error) { // TODO(perrito666) 2016-05-02 lp:1558657 @@ -164,10 +174,15 @@ if err != nil { return "", "", errors.Errorf("cannot generate key: %v", err) } + + serialNumber, err := newSerialNumber() + if err != nil { + return "", "", errors.Trace(err) + } // TODO(perrito666) 2016-05-02 lp:1558657 now := time.Now() template := &x509.Certificate{ - SerialNumber: new(big.Int), + SerialNumber: serialNumber, Subject: pkix.Name{ // This won't match host names with dots. The hostname // is hardcoded when connecting to avoid the issue. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cert/cert_test.go juju-core-2.0.0/src/github.com/juju/juju/cert/cert_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cert/cert_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cert/cert_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,6 +10,7 @@ "crypto/x509" "io" "io/ioutil" + "math/big" "net" "strings" "testing" @@ -46,7 +47,7 @@ func (certSuite) TestParseCertificate(c *gc.C) { xcert, err := cert.ParseCert(caCertPEM) c.Assert(err, jc.ErrorIsNil) - c.Assert(xcert.Subject.CommonName, gc.Equals, "juju testing") + c.Assert(xcert.Subject.CommonName, gc.Equals, `juju-generated CA for model "juju testing"`) xcert, err = cert.ParseCert(caKeyPEM) c.Check(xcert, gc.IsNil) @@ -60,7 +61,7 @@ func (certSuite) TestParseCertAndKey(c *gc.C) { xcert, key, err := cert.ParseCertAndKey(caCertPEM, caKeyPEM) c.Assert(err, jc.ErrorIsNil) - c.Assert(xcert.Subject.CommonName, gc.Equals, "juju testing") + c.Assert(xcert.Subject.CommonName, gc.Equals, `juju-generated CA for model "juju testing"`) c.Assert(key, gc.NotNil) c.Assert(xcert.PublicKey.(*rsa.PublicKey), gc.DeepEquals, &key.PublicKey) @@ -122,6 +123,10 @@ c.Assert(srvCert.BasicConstraintsValid, jc.IsFalse) c.Assert(srvCert.IsCA, jc.IsFalse) c.Assert(srvCert.ExtKeyUsage, gc.DeepEquals, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}) + c.Assert(srvCert.SerialNumber, gc.NotNil) + if srvCert.SerialNumber.Cmp(big.NewInt(0)) == 0 { + c.Fatalf("zero serial number") + } checkTLSConnection(c, caCert, srvCert, srvKey) } @@ -293,51 +298,58 @@ var ( caCertPEM = ` -----BEGIN CERTIFICATE----- -MIIBnTCCAUmgAwIBAgIBADALBgkqhkiG9w0BAQUwJjENMAsGA1UEChMEanVqdTEV -MBMGA1UEAxMManVqdSB0ZXN0aW5nMB4XDTEyMTExNDE0Mzg1NFoXDTIyMTExNDE0 -NDM1NFowJjENMAsGA1UEChMEanVqdTEVMBMGA1UEAxMManVqdSB0ZXN0aW5nMFow -CwYJKoZIhvcNAQEBA0sAMEgCQQCCOOpn9aWKcKr2GQGtygwD7PdfNe1I9BYiPAqa -2I33F5+6PqFdfujUKvoyTJI6XG4Qo/CECaaN9smhyq9DxzMhAgMBAAGjZjBkMA4G -A1UdDwEB/wQEAwIABDASBgNVHRMBAf8ECDAGAQH/AgEBMB0GA1UdDgQWBBQQDswP -FQGeGMeTzPbHW62EZbbTJzAfBgNVHSMEGDAWgBQQDswPFQGeGMeTzPbHW62EZbbT -JzALBgkqhkiG9w0BAQUDQQAqZzN0DqUyEfR8zIanozyD2pp10m9le+ODaKZDDNfH -8cB2x26F1iZ8ccq5IC2LtQf1IKJnpTcYlLuDvW6yB96g +MIICHDCCAcagAwIBAgIUfzWn5ktGMxD6OiTgfiZyvKdM+ZYwDQYJKoZIhvcNAQEL +BQAwazENMAsGA1UEChMEanVqdTEzMDEGA1UEAwwqanVqdS1nZW5lcmF0ZWQgQ0Eg +Zm9yIG1vZGVsICJqdWp1IHRlc3RpbmciMSUwIwYDVQQFExwxMjM0LUFCQ0QtSVMt +Tk9ULUEtUkVBTC1VVUlEMB4XDTE2MDkyMTEwNDgyN1oXDTI2MDkyODEwNDgyN1ow +azENMAsGA1UEChMEanVqdTEzMDEGA1UEAwwqanVqdS1nZW5lcmF0ZWQgQ0EgZm9y +IG1vZGVsICJqdWp1IHRlc3RpbmciMSUwIwYDVQQFExwxMjM0LUFCQ0QtSVMtTk9U +LUEtUkVBTC1VVUlEMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAL+0X+1zl2vt1wI4 +1Q+RnlltJyaJmtwCbHRhREXVGU7t0kTMMNERxqLnuNUyWRz90Rg8s9XvOtCqNYW7 +mypGrFECAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8w +HQYDVR0OBBYEFHueMLZ1QJ/2sKiPIJ28TzjIMRENMA0GCSqGSIb3DQEBCwUAA0EA +ovZN0RbUHrO8q9Eazh0qPO4mwW9jbGTDz126uNrLoz1g3TyWxIas1wRJ8IbCgxLy +XUrBZO5UPZab66lJWXyseA== -----END CERTIFICATE----- ` caKeyPEM = ` -----BEGIN RSA PRIVATE KEY----- -MIIBOwIBAAJBAII46mf1pYpwqvYZAa3KDAPs91817Uj0FiI8CprYjfcXn7o+oV1+ -6NQq+jJMkjpcbhCj8IQJpo32yaHKr0PHMyECAwEAAQJAYctedh4raLE+Ir0a3qnK -pjQSfiUggtYTvTf7+tfAnZu946PX88ysr7XHPkXEGP4tWDTbl8BfGndrTKswVOx6 -RQIhAOT5OzafJneDQ5cuGLN/hxIPBLWxKT1/25O6dhtBlRyPAiEAkZfFvCtBZyKB -JFwDdp+7gE98mXtaFrjctLWeFx797U8CIAnnqiMTwWM8H2ljyhfBtYMXeTmu3zzU -0hfS4hcNwDiLAiEAkNXXU7YEPkFJD46ps1x7/s0UOutHV8tXZD44ou+l1GkCIQDO -HOzuvYngJpoClGw0ipzJPoNZ2Z/GkdOWGByPeKu/8g== +MIIBOgIBAAJBAL+0X+1zl2vt1wI41Q+RnlltJyaJmtwCbHRhREXVGU7t0kTMMNER +xqLnuNUyWRz90Rg8s9XvOtCqNYW7mypGrFECAwEAAQJAMPa+JaUHgO6foxam/LIB +0u95N3OgFR+dWeBaEsgKDclpREdJ0rXNI+3C3kwqeEZR4omoPlBeSEewSkwHxpmI +0QIhAOjKiHZ5v6R8haleipbDzkGUnZW07hEwL5Ld4MNx/QQ1AiEA0tEzSSNAdM0C +M/vY0x5mekIYai8/tFSEG9PJ3ZkpEy0CIQCo9B3YxwI1Un777vbs903iQQeiWP+U +EAHnOQvhLgDxpQIgGkpml+9igW5zoOH+h02aQBLwEoXz7tw/YW0HFrCcE70CIGkS +ve4WjiEqnQaHNAPy0hY/1DfIgBOSpOfnkFHOk9vX -----END RSA PRIVATE KEY----- ` - nonCACert = `-----BEGIN CERTIFICATE----- -MIIBmjCCAUagAwIBAgIBADALBgkqhkiG9w0BAQUwJjENMAsGA1UEChMEanVqdTEV -MBMGA1UEAxMManVqdSB0ZXN0aW5nMB4XDTEyMTExNDE3MTU1NloXDTIyMTExNDE3 -MjA1NlowJjENMAsGA1UEChMEanVqdTEVMBMGA1UEAxMManVqdSB0ZXN0aW5nMFow -CwYJKoZIhvcNAQEBA0sAMEgCQQC96/CsTTY1Va8et6QYNXwrssAi36asFlV/fksG -hqRucidiz/+xHvhs9EiqEu7NGxeVAkcfIhXu6/BDlobtj2v5AgMBAAGjYzBhMA4G -A1UdDwEB/wQEAwIABDAPBgNVHRMBAf8EBTADAgEBMB0GA1UdDgQWBBRqbxkIW4R0 -vmmkUoYuWg9sDob4jzAfBgNVHSMEGDAWgBRqbxkIW4R0vmmkUoYuWg9sDob4jzAL -BgkqhkiG9w0BAQUDQQC3+KN8RppKdvlbP6fDwRC22PaCxd0PVyIHsn7I4jgpBPf8 -Z3codMYYA5/f0AmUsD7wi7nnJVPPLZK7JWu4VI/w + nonCACert = ` +-----BEGIN CERTIFICATE----- +MIIB8jCCAZygAwIBAgIVANueMZWTFEIx6AcNAWsG4VL4sUn5MA0GCSqGSIb3DQEB +CwUAMGsxDTALBgNVBAoTBGp1anUxMzAxBgNVBAMMKmp1anUtZ2VuZXJhdGVkIENB +IGZvciBtb2RlbCAianVqdSB0ZXN0aW5nIjElMCMGA1UEBRMcMTIzNC1BQkNELUlT +LU5PVC1BLVJFQUwtVVVJRDAeFw0xNjA5MjExMDQ4MjdaFw0yNjA5MjgxMDQ4Mjda +MBsxDTALBgNVBAoTBGp1anUxCjAIBgNVBAMTASowXDANBgkqhkiG9w0BAQEFAANL +ADBIAkEAwZps3qpPu2FCAhbxolf/BvSa+dMal3AhPMe+lwTuSbtS81W+WSrbwUSI +ZKSGHYDpFRN6ytNjt1oPbDNKDIR30wIDAQABo2cwZTAOBgNVHQ8BAf8EBAMCA6gw +EwYDVR0lBAwwCgYIKwYBBQUHAwEwHQYDVR0OBBYEFNNUDrcyP/4RbGBpKeC3gmfL +kjlwMB8GA1UdIwQYMBaAFHueMLZ1QJ/2sKiPIJ28TzjIMRENMA0GCSqGSIb3DQEB +CwUAA0EALiurKx//Qh5TQQ0TmT0P5f7OFLIs5XPSS98Lseb92h12CPNO4kB000Yh +Xa7kZRGngwFbvjzqZ0eOfmo0l8M23A== -----END CERTIFICATE----- ` - nonCAKey = `-----BEGIN RSA PRIVATE KEY----- -MIIBOgIBAAJBAL3r8KxNNjVVrx63pBg1fCuywCLfpqwWVX9+SwaGpG5yJ2LP/7Ee -+Gz0SKoS7s0bF5UCRx8iFe7r8EOWhu2Pa/kCAwEAAQJAdzuAxStUNPeuEWLJKkmp -wuVdqocuZCtBUeE/yMEOyibZ9NLKSuDJuDorkoeoiBz2vyUITHkLp4jgNmCI8NGg -AQIhAPZG9+3OghlzcqWR4nTho8KO/CuO9bu5G4jNEdIrSJ6BAiEAxWtoLZNMwI4Q -kj2moFk9GdBXZV9I0t1VTwcDvVyeAXkCIDrfvldQPdO9wJOKK3vLkS1qpyf2lhIZ -b1alx3PZuxOBAiAthPltYMRWtar+fTaZTFo5RH+SQSkibaRI534mQF+ySQIhAIml -yiWVLC2XrtwijDu1fwh/wtFCb/bPvqvgG5wgAO+2 + nonCAKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIBOwIBAAJBAMGabN6qT7thQgIW8aJX/wb0mvnTGpdwITzHvpcE7km7UvNVvlkq +28FEiGSkhh2A6RUTesrTY7daD2wzSgyEd9MCAwEAAQJBAKfeuOvRjVUSneOl9Vsp +Je7oBcD9dR8+kPNc1zungN7YVhIuxqvzXJSPeMGsHloPI+BcFFXv3t+eVCDT9sPL +L+ECIQDq1nqVIEX3k5nn6eI0L5CQbIfEyvWGJ/mOGSo9TWdN+QIhANMMsopPb9ct +Z61LqPmTtNX4nhHyMEjxbUzqzsZzsRcrAiBeYyhP6fHVSXERopK1kOyU79o+Aalf +a4/FSl4M16CO2QIgOBQZpNKyvxRbhhqijZ6H4IstRUt7NQahqlyCEQ1Qsv0CIQDQ +tUzgFwUpd6NVButkqWGqnmBeKUOs97dqSyOzN9Nk8w== -----END RSA PRIVATE KEY----- ` ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/charmstore/client_test.go juju-core-2.0.0/src/github.com/juju/juju/charmstore/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/charmstore/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/charmstore/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -64,10 +64,10 @@ Channel: params.StableChannel, }, { URL: bar, - Channel: params.DevelopmentChannel, + Channel: params.EdgeChannel, }, { URL: baz, - Channel: params.DevelopmentChannel, + Channel: params.EdgeChannel, }}, nil) c.Assert(err, jc.ErrorIsNil) expected := []CharmRevision{{ @@ -79,8 +79,8 @@ }} c.Check(ret, jc.SameContents, expected) s.wrapper.stableStub.CheckCall(c, 0, "Latest", params.StableChannel, []*charm.URL{foo}, map[string][]string(nil)) - s.wrapper.devStub.CheckCall(c, 0, "Latest", params.DevelopmentChannel, []*charm.URL{bar}, map[string][]string(nil)) - s.wrapper.devStub.CheckCall(c, 1, "Latest", params.DevelopmentChannel, []*charm.URL{baz}, map[string][]string(nil)) + s.wrapper.devStub.CheckCall(c, 0, "Latest", params.EdgeChannel, []*charm.URL{bar}, map[string][]string(nil)) + s.wrapper.devStub.CheckCall(c, 1, "Latest", params.EdgeChannel, []*charm.URL{baz}, map[string][]string(nil)) } func (s *ClientSuite) TestListResources(c *gc.C) { @@ -130,10 +130,10 @@ Channel: params.StableChannel, }, { URL: bar, - Channel: params.DevelopmentChannel, + Channel: params.EdgeChannel, }, { URL: baz, - Channel: params.DevelopmentChannel, + Channel: params.EdgeChannel, }}) c.Assert(err, jc.ErrorIsNil) @@ -152,8 +152,8 @@ {dev2Out}, }) s.wrapper.stableStub.CheckCall(c, 0, "ListResources", params.StableChannel, foo) - s.wrapper.devStub.CheckCall(c, 0, "ListResources", params.DevelopmentChannel, bar) - s.wrapper.devStub.CheckCall(c, 1, "ListResources", params.DevelopmentChannel, baz) + s.wrapper.devStub.CheckCall(c, 0, "ListResources", params.EdgeChannel, bar) + s.wrapper.devStub.CheckCall(c, 1, "ListResources", params.EdgeChannel, baz) } func (s *ClientSuite) TestListResourcesError(c *gc.C) { @@ -194,7 +194,7 @@ req := ResourceRequest{ Charm: charm.MustParseURL("cs:mysql"), - Channel: params.DevelopmentChannel, + Channel: params.EdgeChannel, Name: "name", Revision: 5, } @@ -205,8 +205,8 @@ c.Check(data.Resource, gc.DeepEquals, expected) c.Check(data.ReadCloser, gc.DeepEquals, rc) // call #0 is a call to makeWrapper - s.wrapper.stub.CheckCall(c, 1, "ResourceMeta", params.DevelopmentChannel, req.Charm, req.Name, req.Revision) - s.wrapper.stub.CheckCall(c, 2, "GetResource", params.DevelopmentChannel, req.Charm, req.Name, req.Revision) + s.wrapper.stub.CheckCall(c, 1, "ResourceMeta", params.EdgeChannel, req.Charm, req.Name, req.Revision) + s.wrapper.stub.CheckCall(c, 2, "GetResource", params.EdgeChannel, req.Charm, req.Name, req.Revision) } func (s *ClientSuite) TestResourceInfo(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/charmstore/latest.go juju-core-2.0.0/src/github.com/juju/juju/charmstore/latest.go --- juju-core-2.0~beta15/src/github.com/juju/juju/charmstore/latest.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/charmstore/latest.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,8 @@ "time" "github.com/juju/errors" + + "github.com/juju/juju/version" ) const jujuMetadataHTTPHeader = "Juju-Metadata" @@ -17,13 +19,19 @@ // the macaroon has been updated. This updated macaroon should be stored for // use in any further requests. Note that this map may be non-empty even if // this method returns an error (and the macaroons should be stored). -func LatestCharmInfo(client Client, charms []CharmID, modelUUID string) ([]CharmInfoResult, error) { +func LatestCharmInfo(client Client, charms []CharmID, metadata map[string]string) ([]CharmInfoResult, error) { // TODO(perrito666) 2016-05-02 lp:1558657 now := time.Now().UTC() // Do a bulk call to get the revision info for all charms. logger.Infof("retrieving revision information for %d charms", len(charms)) revResults, err := client.LatestRevisions(charms, map[string][]string{ - jujuMetadataHTTPHeader: []string{"environment_uuid=" + modelUUID}, + jujuMetadataHTTPHeader: []string{ + "environment_uuid=" + metadata["environment_uuid"], + "cloud=" + metadata["cloud"], + "cloud_region=" + metadata["cloud_region"], + "provider=" + metadata["provider"], + "controller_version=" + version.Current.String(), + }, }) if err != nil { err = errors.Annotate(err, "while getting latest charm revision info") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/charmstore/latest_test.go juju-core-2.0.0/src/github.com/juju/juju/charmstore/latest_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/charmstore/latest_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/charmstore/latest_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,6 +13,8 @@ "gopkg.in/juju/charm.v6-unstable" charmresource "gopkg.in/juju/charm.v6-unstable/resource" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "github.com/juju/juju/version" ) type LatestCharmInfoSuite struct { @@ -67,12 +69,19 @@ client, err := newCachingClient(s.cache, nil, s.lowLevel.makeWrapper) c.Assert(err, jc.ErrorIsNil) - results, err := LatestCharmInfo(client, charms, "foobar") + metadata := map[string]string{ + "environment_uuid": "foouuid", + "cloud": "foocloud", + "cloud_region": "fooregion", + "provider": "fooprovider", + } + results, err := LatestCharmInfo(client, charms, metadata) c.Assert(err, jc.ErrorIsNil) - s.lowLevel.stableStub.CheckCall(c, 0, "Latest", params.StableChannel, []*charm.URL{spam}, map[string][]string{"Juju-Metadata": []string{"environment_uuid=foobar"}}) - s.lowLevel.stableStub.CheckCall(c, 1, "Latest", params.StableChannel, []*charm.URL{eggs}, map[string][]string{"Juju-Metadata": []string{"environment_uuid=foobar"}}) - s.lowLevel.stableStub.CheckCall(c, 2, "Latest", params.StableChannel, []*charm.URL{ham}, map[string][]string{"Juju-Metadata": []string{"environment_uuid=foobar"}}) + header := []string{"environment_uuid=foouuid", "cloud=foocloud", "cloud_region=fooregion", "provider=fooprovider", "controller_version=" + version.Current.String()} + s.lowLevel.stableStub.CheckCall(c, 0, "Latest", params.StableChannel, []*charm.URL{spam}, map[string][]string{"Juju-Metadata": header}) + s.lowLevel.stableStub.CheckCall(c, 1, "Latest", params.StableChannel, []*charm.URL{eggs}, map[string][]string{"Juju-Metadata": header}) + s.lowLevel.stableStub.CheckCall(c, 2, "Latest", params.StableChannel, []*charm.URL{ham}, map[string][]string{"Juju-Metadata": header}) s.lowLevel.stableStub.CheckCall(c, 3, "ListResources", params.StableChannel, spam) s.lowLevel.stableStub.CheckCall(c, 4, "ListResources", params.StableChannel, eggs) s.lowLevel.stableStub.CheckCall(c, 5, "ListResources", params.StableChannel, ham) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloud/clouds.go juju-core-2.0.0/src/github.com/juju/juju/cloud/clouds.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloud/clouds.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloud/clouds.go 2016-10-13 14:31:49.000000000 +0000 @@ -58,6 +58,17 @@ EmptyAuthType AuthType = "empty" ) +// Attrs serves as a map to hold regions specific configuration attributes. +// This serves to reduce confusion over having a nested map, i.e. +// map[string]map[string]interface{} +type Attrs map[string]interface{} + +// RegionConfig holds a map of regions and the attributes that serve as the +// region specific configuration options. This allows model inheritance to +// function, providing a place to store configuration for a specific region +// which is passed down to other models under the same controller. +type RegionConfig map[string]Attrs + // Cloud is a cloud definition. type Cloud struct { // Type is the type of cloud, eg ec2, openstack etc. @@ -65,6 +76,9 @@ // environs.RegisterProvider. Type string + // Description describes the type of cloud. + Description string + // AuthTypes are the authentication modes supported by the cloud. AuthTypes AuthTypes @@ -72,6 +86,10 @@ // overridden by a region. Endpoint string + // IdentityEndpoint is the default identity endpoint for the cloud + // regions, may be overridden by a region. + IdentityEndpoint string + // StorageEndpoint is the default storage endpoint for the cloud // regions, may be overridden by a region. StorageEndpoint string @@ -88,6 +106,11 @@ // will be combined with Juju-generated, and user-supplied values; // user-supplied values taking precedence. Config map[string]interface{} + + // RegionConfig contains optional region specific configuration. + // Like Config above, this will be combined with Juju-generated and user + // supplied values; with user supplied values taking precedence. + RegionConfig RegionConfig } // Region is a cloud region. @@ -98,6 +121,11 @@ // Endpoint is the region's primary endpoint URL. Endpoint string + // IdentityEndpoint is the region's identity endpoint URL. + // If the cloud/region does not have an identity-specific + // endpoint URL, this will be empty. + IdentityEndpoint string + // StorageEndpoint is the region's storage endpoint URL. // If the cloud/region does not have a storage-specific // endpoint URL, this will be empty. @@ -113,12 +141,15 @@ // cloud is equivalent to Cloud, for marshalling and unmarshalling. type cloud struct { - Type string `yaml:"type"` - AuthTypes []AuthType `yaml:"auth-types,omitempty,flow"` - Endpoint string `yaml:"endpoint,omitempty"` - StorageEndpoint string `yaml:"storage-endpoint,omitempty"` - Regions regions `yaml:"regions,omitempty"` - Config map[string]interface{} `yaml:"config,omitempty"` + Type string `yaml:"type"` + Description string `yaml:"description,omitempty"` + AuthTypes []AuthType `yaml:"auth-types,omitempty,flow"` + Endpoint string `yaml:"endpoint,omitempty"` + IdentityEndpoint string `yaml:"identity-endpoint,omitempty"` + StorageEndpoint string `yaml:"storage-endpoint,omitempty"` + Regions regions `yaml:"regions,omitempty"` + Config map[string]interface{} `yaml:"config,omitempty"` + RegionConfig RegionConfig `yaml:"region-config,omitempty"` } // regions is a collection of regions, either as a map and/or @@ -138,8 +169,9 @@ // region is equivalent to Region, for marshalling and unmarshalling. type region struct { - Endpoint string `yaml:"endpoint,omitempty"` - StorageEndpoint string `yaml:"storage-endpoint,omitempty"` + Endpoint string `yaml:"endpoint,omitempty"` + IdentityEndpoint string `yaml:"identity-endpoint,omitempty"` + StorageEndpoint string `yaml:"storage-endpoint,omitempty"` } //DefaultLXD is the name of the default lxd cloud. @@ -148,9 +180,10 @@ // BuiltInClouds work out of the box. var BuiltInClouds = map[string]Cloud{ DefaultLXD: { - Type: lxdnames.ProviderType, - AuthTypes: []AuthType{EmptyAuthType}, - Regions: []Region{{Name: lxdnames.DefaultRegion}}, + Type: lxdnames.ProviderType, + AuthTypes: []AuthType{EmptyAuthType}, + Regions: []Region{{Name: lxdnames.DefaultRegion}}, + Description: defaultCloudDescription[lxdnames.ProviderType], }, } @@ -244,11 +277,33 @@ // the first region for the cloud as its default region. clouds := make(map[string]Cloud) for name, cloud := range metadata.Clouds { - clouds[name] = cloudFromInternal(cloud) + details := cloudFromInternal(cloud) + if details.Description == "" { + var ok bool + if details.Description, ok = defaultCloudDescription[name]; !ok { + details.Description = defaultCloudDescription[cloud.Type] + } + } + clouds[name] = details } return clouds, nil } +var defaultCloudDescription = map[string]string{ + "aws": "Amazon Web Services", + "aws-china": "Amazon China", + "aws-gov": "Amazon (USA Government)", + "google": "Google Cloud Platform", + "azure": "Microsoft Azure", + "azure-china": "Microsoft Azure China", + "rackspace": "Rackspace Cloud", + "joyent": "Joyent Cloud", + "cloudsigma": "CloudSigma Cloud", + "lxd": "LXD Container Hypervisor", + "maas": "Metal As A Service", + "openstack": "Openstack Cloud", +} + // WritePublicCloudMetadata marshals to YAML and writes the cloud metadata // to the public cloud file. func WritePublicCloudMetadata(cloudsMap map[string]Cloud) error { @@ -305,16 +360,22 @@ var regions regions for _, r := range in.Regions { regions.Slice = append(regions.Slice, yaml.MapItem{ - r.Name, region{r.Endpoint, r.StorageEndpoint}, + r.Name, region{ + r.Endpoint, + r.IdentityEndpoint, + r.StorageEndpoint, + }, }) } return &cloud{ - Type: in.Type, - AuthTypes: in.AuthTypes, - Endpoint: in.Endpoint, - StorageEndpoint: in.StorageEndpoint, - Regions: regions, - Config: in.Config, + Type: in.Type, + AuthTypes: in.AuthTypes, + Endpoint: in.Endpoint, + IdentityEndpoint: in.IdentityEndpoint, + StorageEndpoint: in.StorageEndpoint, + Regions: regions, + Config: in.Config, + RegionConfig: in.RegionConfig, } } @@ -330,18 +391,24 @@ regions = append(regions, Region{Name: name}) } else { regions = append(regions, Region{ - name, r.Endpoint, r.StorageEndpoint, + name, + r.Endpoint, + r.IdentityEndpoint, + r.StorageEndpoint, }) } } } meta := Cloud{ - Type: in.Type, - AuthTypes: in.AuthTypes, - Endpoint: in.Endpoint, - StorageEndpoint: in.StorageEndpoint, - Regions: regions, - Config: in.Config, + Type: in.Type, + AuthTypes: in.AuthTypes, + Endpoint: in.Endpoint, + IdentityEndpoint: in.IdentityEndpoint, + StorageEndpoint: in.StorageEndpoint, + Regions: regions, + Config: in.Config, + RegionConfig: in.RegionConfig, + Description: in.Description, } meta.denormaliseMetadata() return meta diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloud/clouds_test.go juju-core-2.0.0/src/github.com/juju/juju/cloud/clouds_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloud/clouds_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloud/clouds_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,12 +11,11 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/cloud" - "github.com/juju/juju/juju/osenv" "github.com/juju/juju/testing" ) type cloudSuite struct { - testing.BaseSuite + testing.FakeJujuXDGDataHomeSuite } var _ = gc.Suite(&cloudSuite{}) @@ -61,7 +60,7 @@ func (s *cloudSuite) TestParseCloudsAuthTypes(c *gc.C) { clouds := parsePublicClouds(c) rackspace := clouds["rackspace"] - c.Assert(rackspace.AuthTypes, jc.SameContents, cloud.AuthTypes{"access-key", "userpass"}) + c.Assert(rackspace.AuthTypes, jc.SameContents, cloud.AuthTypes{"userpass"}) } func (s *cloudSuite) TestParseCloudsConfig(c *gc.C) { @@ -84,6 +83,46 @@ }) } +func (s *cloudSuite) TestParseCloudsRegionConfig(c *gc.C) { + clouds, err := cloud.ParseCloudMetadata([]byte(`clouds: + testing: + type: dummy + config: + k1: v1 + k2: 2.0 + region-config: + region1: + mascot: [eggs, ham] + region2: + mascot: glenda + region3: + mascot: gopher +`)) + c.Assert(err, jc.ErrorIsNil) + c.Assert(clouds, gc.HasLen, 1) + testingCloud := clouds["testing"] + c.Assert(testingCloud, jc.DeepEquals, cloud.Cloud{ + Type: "dummy", + Config: map[string]interface{}{ + "k1": "v1", + "k2": float64(2.0), + }, + RegionConfig: cloud.RegionConfig{ + "region1": cloud.Attrs{ + "mascot": []interface{}{"eggs", "ham"}, + }, + + "region2": cloud.Attrs{ + "mascot": "glenda", + }, + + "region3": cloud.Attrs{ + "mascot": "gopher", + }, + }, + }) +} + func (s *cloudSuite) TestPublicCloudsMetadataFallback(c *gc.C) { clouds, fallbackUsed, err := cloud.PublicCloudMetadata("badfile.yaml") c.Assert(err, jc.ErrorIsNil) @@ -111,8 +150,9 @@ c.Assert(fallbackUsed, jc.IsFalse) c.Assert(clouds, jc.DeepEquals, map[string]cloud.Cloud{ "aws-me": cloud.Cloud{ - Type: "aws", - AuthTypes: []cloud.AuthType{"userpass"}, + Type: "aws", + Description: "Amazon Web Services", + AuthTypes: []cloud.AuthType{"userpass"}, }, }) } @@ -128,13 +168,11 @@ } func (s *cloudSuite) TestWritePublicCloudsMetadata(c *gc.C) { - origHome := osenv.SetJujuXDGDataHome(c.MkDir()) - s.AddCleanup(func(*gc.C) { osenv.SetJujuXDGDataHome(origHome) }) - clouds := map[string]cloud.Cloud{ "aws-me": cloud.Cloud{ - Type: "aws", - AuthTypes: []cloud.AuthType{"userpass"}, + Type: "aws", + Description: "Amazon Web Services", + AuthTypes: []cloud.AuthType{"userpass"}, }, } err := cloud.WritePublicCloudMetadata(clouds) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloud/credentials.go juju-core-2.0.0/src/github.com/juju/juju/cloud/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloud/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloud/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -33,6 +33,9 @@ authType AuthType attributes map[string]string + // Revoked is true if the credential has been revoked. + Revoked bool + // Label is optionally set to describe the credentials to a user. Label string } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloud/credentials_test.go juju-core-2.0.0/src/github.com/juju/juju/cloud/credentials_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloud/credentials_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloud/credentials_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -620,6 +620,7 @@ "password": "secret", }, ) + c.Assert(cred.Revoked, jc.IsFalse) schema := cloud.CredentialSchema{{ "username", cloud.CredentialAttr{}, }, { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloud/defaults.go juju-core-2.0.0/src/github.com/juju/juju/cloud/defaults.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloud/defaults.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloud/defaults.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package cloud - -// Defaults describes a set of defaults for cloud, region, -// and credential to use. -type Defaults struct { - // Cloud is the name of the cloud to use by default. - Cloud string - - // Region is the name of the cloud region to use by default, - // if the cloud supports regions. - Region string - - // Credential is the name of the cloud credential to use - // by default, if the cloud requires credentials. - Credential string -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloud/fallback_public_cloud.go juju-core-2.0.0/src/github.com/juju/juju/cloud/fallback_public_cloud.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloud/fallback_public_cloud.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloud/fallback_public_cloud.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,47 +5,52 @@ // Generated code - do not edit. -const fallbackPublicCloudInfo = ` -# DO NOT EDIT, will be overwritten, use "juju update-clouds" to refresh. +const fallbackPublicCloudInfo = `# DO NOT EDIT, will be overwritten, use "juju update-clouds" to refresh. clouds: aws: type: ec2 + description: Amazon Web Services auth-types: [ access-key ] regions: us-east-1: - endpoint: https://us-east-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.us-east-1.amazonaws.com us-west-1: - endpoint: https://us-west-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.us-west-1.amazonaws.com us-west-2: - endpoint: https://us-west-2.aws.amazon.com/v1.2/ + endpoint: https://ec2.us-west-2.amazonaws.com eu-west-1: - endpoint: https://eu-west-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.eu-west-1.amazonaws.com eu-central-1: - endpoint: https://eu-central-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.eu-central-1.amazonaws.com + ap-south-1: + endpoint: https://ec2.ap-south-1.amazonaws.com ap-southeast-1: - endpoint: https://ap-southeast-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.ap-southeast-1.amazonaws.com ap-southeast-2: - endpoint: https://ap-southeast-2.aws.amazon.com/v1.2/ + endpoint: https://ec2.ap-southeast-2.amazonaws.com ap-northeast-1: - endpoint: https://ap-northeast-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.ap-northeast-1.amazonaws.com ap-northeast-2: - endpoint: https://ap-northeast-2.aws.amazon.com/v1.2/ + endpoint: https://ec2.ap-northeast-2.amazonaws.com sa-east-1: - endpoint: https://sa-east-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.sa-east-1.amazonaws.com aws-china: type: ec2 + description: Amazon China auth-types: [ access-key ] regions: cn-north-1: - endpoint: https://ec2.cn-north-1.amazonaws.com.cn/ + endpoint: https://ec2.cn-north-1.amazonaws.com.cn aws-gov: type: ec2 + description: Amazon (USA Government) auth-types: [ access-key ] regions: us-gov-west-1: - endpoint: https://ec2.us-gov-west-1.amazonaws-govcloud.com + endpoint: https://ec2.us-gov-west-1.amazonaws.com google: type: gce + description: Google Cloud Platform auth-types: [ jsonfile, oauth2 ] regions: us-east1: @@ -58,75 +63,98 @@ endpoint: https://www.googleapis.com azure: type: azure - auth-types: [ userpass ] + description: Microsoft Azure + auth-types: [ interactive, service-principal-secret ] regions: centralus: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net eastus: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net eastus2: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net northcentralus: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net southcentralus: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net westus: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net northeurope: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net westeurope: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net eastasia: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net southeastasia: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net japaneast: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net japanwest: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net brazilsouth: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net australiaeast: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net australiasoutheast: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net centralindia: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net southindia: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net westindia: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net azure-china: type: azure - auth-types: [ userpass ] + description: Microsoft Azure China + auth-types: [ interactive, service-principal-secret ] regions: chinaeast: endpoint: https://management.chinacloudapi.cn storage-endpoint: https://core.chinacloudapi.cn + identity-endpoint: https://graph.chinacloudapi.cn chinanorth: endpoint: https://management.chinacloudapi.cn storage-endpoint: https://core.chinacloudapi.cn + identity-endpoint: https://graph.chinacloudapi.cn rackspace: type: rackspace - auth-types: [ access-key, userpass ] + description: Rackspace Cloud + auth-types: [ userpass ] endpoint: https://identity.api.rackspacecloud.com/v2.0 regions: dfw: @@ -143,6 +171,7 @@ endpoint: https://identity.api.rackspacecloud.com/v2.0 joyent: type: joyent + description: Joyent Cloud auth-types: [ userpass ] regions: eu-ams-1: @@ -159,6 +188,7 @@ endpoint: https://us-west-1.api.joyentcloud.com cloudsigma: type: cloudsigma + description: CloudSigma Cloud auth-types: [ userpass ] regions: hnl: diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloud/fallback-public-cloud.yaml juju-core-2.0.0/src/github.com/juju/juju/cloud/fallback-public-cloud.yaml --- juju-core-2.0~beta15/src/github.com/juju/juju/cloud/fallback-public-cloud.yaml 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloud/fallback-public-cloud.yaml 2016-10-13 14:31:49.000000000 +0000 @@ -2,42 +2,48 @@ clouds: aws: type: ec2 + description: Amazon Web Services auth-types: [ access-key ] regions: us-east-1: - endpoint: https://us-east-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.us-east-1.amazonaws.com us-west-1: - endpoint: https://us-west-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.us-west-1.amazonaws.com us-west-2: - endpoint: https://us-west-2.aws.amazon.com/v1.2/ + endpoint: https://ec2.us-west-2.amazonaws.com eu-west-1: - endpoint: https://eu-west-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.eu-west-1.amazonaws.com eu-central-1: - endpoint: https://eu-central-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.eu-central-1.amazonaws.com + ap-south-1: + endpoint: https://ec2.ap-south-1.amazonaws.com ap-southeast-1: - endpoint: https://ap-southeast-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.ap-southeast-1.amazonaws.com ap-southeast-2: - endpoint: https://ap-southeast-2.aws.amazon.com/v1.2/ + endpoint: https://ec2.ap-southeast-2.amazonaws.com ap-northeast-1: - endpoint: https://ap-northeast-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.ap-northeast-1.amazonaws.com ap-northeast-2: - endpoint: https://ap-northeast-2.aws.amazon.com/v1.2/ + endpoint: https://ec2.ap-northeast-2.amazonaws.com sa-east-1: - endpoint: https://sa-east-1.aws.amazon.com/v1.2/ + endpoint: https://ec2.sa-east-1.amazonaws.com aws-china: type: ec2 + description: Amazon China auth-types: [ access-key ] regions: cn-north-1: - endpoint: https://ec2.cn-north-1.amazonaws.com.cn/ + endpoint: https://ec2.cn-north-1.amazonaws.com.cn aws-gov: type: ec2 + description: Amazon (USA Government) auth-types: [ access-key ] regions: us-gov-west-1: - endpoint: https://ec2.us-gov-west-1.amazonaws-govcloud.com + endpoint: https://ec2.us-gov-west-1.amazonaws.com google: type: gce + description: Google Cloud Platform auth-types: [ jsonfile, oauth2 ] regions: us-east1: @@ -50,75 +56,98 @@ endpoint: https://www.googleapis.com azure: type: azure - auth-types: [ userpass ] + description: Microsoft Azure + auth-types: [ interactive, service-principal-secret ] regions: centralus: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net eastus: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net eastus2: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net northcentralus: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net southcentralus: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net westus: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net northeurope: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net westeurope: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net eastasia: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net southeastasia: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net japaneast: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net japanwest: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net brazilsouth: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net australiaeast: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net australiasoutheast: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net centralindia: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net southindia: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net westindia: endpoint: https://management.azure.com storage-endpoint: https://core.windows.net + identity-endpoint: https://graph.windows.net azure-china: type: azure - auth-types: [ userpass ] + description: Microsoft Azure China + auth-types: [ interactive, service-principal-secret ] regions: chinaeast: endpoint: https://management.chinacloudapi.cn storage-endpoint: https://core.chinacloudapi.cn + identity-endpoint: https://graph.chinacloudapi.cn chinanorth: endpoint: https://management.chinacloudapi.cn storage-endpoint: https://core.chinacloudapi.cn + identity-endpoint: https://graph.chinacloudapi.cn rackspace: type: rackspace - auth-types: [ access-key, userpass ] + description: Rackspace Cloud + auth-types: [ userpass ] endpoint: https://identity.api.rackspacecloud.com/v2.0 regions: dfw: @@ -135,6 +164,7 @@ endpoint: https://identity.api.rackspacecloud.com/v2.0 joyent: type: joyent + description: Joyent Cloud auth-types: [ userpass ] regions: eu-ams-1: @@ -151,6 +181,7 @@ endpoint: https://us-west-1.api.joyentcloud.com cloudsigma: type: cloudsigma + description: CloudSigma Cloud auth-types: [ userpass ] regions: hnl: diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloud/personalclouds_test.go juju-core-2.0.0/src/github.com/juju/juju/cloud/personalclouds_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloud/personalclouds_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloud/personalclouds_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -95,22 +95,26 @@ func (s *personalCloudSuite) assertPersonalClouds(c *gc.C, clouds map[string]cloud.Cloud) { c.Assert(clouds, jc.DeepEquals, map[string]cloud.Cloud{ "homestack": cloud.Cloud{ - Type: "openstack", - AuthTypes: []cloud.AuthType{"userpass", "access-key"}, - Endpoint: "http://homestack", + Type: "openstack", + Description: "Openstack Cloud", + AuthTypes: []cloud.AuthType{"userpass", "access-key"}, + Endpoint: "http://homestack", Regions: []cloud.Region{ cloud.Region{Name: "london", Endpoint: "http://london/1.0"}, }, }, "azurestack": cloud.Cloud{ - Type: "azure", - AuthTypes: []cloud.AuthType{"userpass"}, - StorageEndpoint: "http://storage.azurestack.local", + Type: "azure", + Description: "Microsoft Azure", + AuthTypes: []cloud.AuthType{"userpass"}, + IdentityEndpoint: "http://login.azurestack.local", + StorageEndpoint: "http://storage.azurestack.local", Regions: []cloud.Region{ cloud.Region{ - Name: "local", - Endpoint: "http://azurestack.local", - StorageEndpoint: "http://storage.azurestack.local", + Name: "local", + Endpoint: "http://azurestack.local", + IdentityEndpoint: "http://login.azurestack.local", + StorageEndpoint: "http://storage.azurestack.local", }, }, }, @@ -130,6 +134,7 @@ azurestack: type: azure auth-types: [userpass] + identity-endpoint: http://login.azurestack.local storage-endpoint: http://storage.azurestack.local regions: local: diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_centos.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_centos.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_centos.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_centos.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,8 @@ "fmt" "strings" + "github.com/juju/juju/feature" + "github.com/juju/utils/featureflag" "github.com/juju/utils/packaging" "github.com/juju/utils/packaging/config" "github.com/juju/utils/proxy" @@ -177,8 +179,10 @@ } pkgs := cfg.Packages() + if len(pkgs) > 0 { + cmds = append([]string{LogProgressCmd(fmt.Sprintf("Installing %s", strings.Join(pkgs, ", ")))}, cmds...) + } for _, pkg := range pkgs { - cmds = append(cmds, LogProgressCmd("Installing package: %s", pkg)) cmds = append(cmds, "package_manager_loop "+cfg.paccmder.InstallCmd(pkg)) } return cmds, nil @@ -210,6 +214,9 @@ "nmap-ncat", "tmux", } + if featureflag.Enabled(feature.DeveloperMode) { + packages = append(packages, "socat") + } // The required packages need to come from the correct repo. // For CentOS 7, this requires an rpm cloud archive be up. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_ubuntu.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_ubuntu.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_ubuntu.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_ubuntu.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,11 +8,15 @@ "fmt" "strings" + "github.com/juju/errors" "github.com/juju/utils" + "github.com/juju/utils/featureflag" "github.com/juju/utils/packaging" "github.com/juju/utils/packaging/config" "github.com/juju/utils/proxy" "gopkg.in/yaml.v2" + + "github.com/juju/juju/feature" ) // ubuntuCloudConfig is the cloudconfig type specific to Ubuntu machines @@ -143,7 +147,7 @@ // for adding all packages configured in this CloudConfig. func (cfg *ubuntuCloudConfig) getCommandsForAddingPackages() ([]string, error) { if !cfg.SystemUpdate() && len(cfg.PackageSources()) > 0 { - return nil, fmt.Errorf("update sources were specified, but OS updates have been disabled.") + return nil, errors.New("update sources were specified, but OS updates have been disabled.") } var cmds []string @@ -184,7 +188,6 @@ } cmds = append(cmds, config.PackageManagerLoopFunction) - looper := "package_manager_loop " if cfg.SystemUpdate() { @@ -196,6 +199,8 @@ cmds = append(cmds, looper+cfg.paccmder.UpgradeCmd()) } + var pkgCmds []string + var pkgNames []string var pkgsWithTargetRelease []string pkgs := cfg.Packages() for i, _ := range pkgs { @@ -211,7 +216,7 @@ continue } } - packageName := pack + pkgNames = append(pkgNames, pack) installArgs := []string{pack} if len(pkgsWithTargetRelease) == 3 { @@ -219,16 +224,16 @@ // install command args from the accumulated // pkgsWithTargetRelease slice and reset it. installArgs = append([]string{}, pkgsWithTargetRelease...) - packageName = strings.Join(installArgs, " ") pkgsWithTargetRelease = []string{} } - cmds = append(cmds, LogProgressCmd("Installing package: %s", packageName)) cmd := looper + cfg.paccmder.InstallCmd(installArgs...) - cmds = append(cmds, cmd) + pkgCmds = append(pkgCmds, cmd) } - if len(cmds) > 0 { + if len(pkgCmds) > 0 { + pkgCmds = append([]string{LogProgressCmd(fmt.Sprintf("Installing %s", strings.Join(pkgNames, ", ")))}, pkgCmds...) + cmds = append(cmds, pkgCmds...) // setting DEBIAN_FRONTEND=noninteractive prevents debconf // from prompting, always taking default values instead. cmds = append([]string{"export DEBIAN_FRONTEND=noninteractive"}, cmds...) @@ -270,6 +275,9 @@ "cloud-utils", "tmux", } + if featureflag.Enabled(feature.DeveloperMode) { + packages = append(packages, "socat") + } // The required packages need to come from the correct repo. // For precise, that might require an explicit --target-release parameter. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/cloudinit/renderscript_test.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/cloudinit/renderscript_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/cloudinit/renderscript_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/cloudinit/renderscript_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -36,10 +36,6 @@ environs.EnvironProvider } -func (p *testProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - return map[string]string{}, nil -} - func init() { environs.RegisterProvider("sshinit_test", &testProvider{}) } @@ -96,7 +92,7 @@ APIPort: 456, } } else { - icfg, err = instancecfg.NewInstanceConfig("0", "ya", imagemetadata.ReleasedStream, vers.Series, true, nil) + icfg, err = instancecfg.NewInstanceConfig(coretesting.ControllerTag, "0", "ya", imagemetadata.ReleasedStream, vers.Series, nil) c.Assert(err, jc.ErrorIsNil) icfg.Jobs = []multiwatcher.MachineJob{multiwatcher.JobHostUnits} } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/containerinit/container_userdata.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/containerinit/container_userdata.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/containerinit/container_userdata.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/containerinit/container_userdata.go 2016-10-13 14:31:49.000000000 +0000 @@ -202,17 +202,19 @@ // might include per-interface networking config if both networkConfig // is not nil and its Interfaces field is not empty. func newCloudInitConfigWithNetworks(series string, networkConfig *container.NetworkConfig) (cloudinit.CloudConfig, error) { - config, err := GenerateNetworkConfig(networkConfig) - if err != nil { - return nil, errors.Trace(err) - } cloudConfig, err := cloudinit.New(series) if err != nil { return nil, errors.Trace(err) } - cloudConfig.AddBootTextFile(networkInterfacesFile, config, 0644) - cloudConfig.AddRunCmd(raiseJujuNetworkInterfacesScript(systemNetworkInterfacesFile, networkInterfacesFile)) + if networkConfig != nil { + config, err := GenerateNetworkConfig(networkConfig) + if err != nil { + return nil, errors.Trace(err) + } + cloudConfig.AddBootTextFile(networkInterfacesFile, config, 0644) + cloudConfig.AddRunCmd(raiseJujuNetworkInterfacesScript(systemNetworkInterfacesFile, networkInterfacesFile)) + } return cloudConfig, nil } @@ -237,9 +239,12 @@ cloudConfig.AddRunCmd("ifconfig") if instanceConfig.MachineContainerHostname != "" { + logger.Debugf("Cloud-init configured to set hostname") cloudConfig.SetAttr("hostname", instanceConfig.MachineContainerHostname) } + cloudConfig.SetAttr("manage_etc_hosts", true) + data, err := cloudConfig.RenderYAML() if err != nil { return nil, errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/containerinit/container_userdata_test.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/containerinit/container_userdata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/containerinit/container_userdata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/containerinit/container_userdata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -211,14 +211,7 @@ assertUserData(c, cloudConf, expected) } -func (s *UserDataSuite) TestCloudInitUserDataFallbackConfig(c *gc.C) { - instanceConfig, err := containertesting.MockMachineConfig("1/lxd/0") - c.Assert(err, jc.ErrorIsNil) - networkConfig := container.BridgeNetworkConfig("foo", 0, nil) - data, err := containerinit.CloudInitUserData(instanceConfig, networkConfig) - c.Assert(err, jc.ErrorIsNil) - c.Assert(data, gc.NotNil) - +func CloudInitDataExcludingOutputSection(data string) []string { // Extract the "#cloud-config" header and all lines between // from the "bootcmd" section up to (but not including) the // "output" sections to match against expected. But we cannot @@ -249,8 +242,62 @@ linesToMatch = append(linesToMatch, line) } } + + return linesToMatch +} + +// TestCloudInitUserDataNoNetworkConfig tests that no network-interfaces, or +// related data, appear in user-data when no networkConfig is passed to +// CloudInitUserData. +func (s *UserDataSuite) TestCloudInitUserDataNoNetworkConfig(c *gc.C) { + instanceConfig, err := containertesting.MockMachineConfig("1/lxd/0") + c.Assert(err, jc.ErrorIsNil) + data, err := containerinit.CloudInitUserData(instanceConfig, nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(data, gc.NotNil) + + linesToMatch := CloudInitDataExcludingOutputSection(string(data)) + + c.Assert(strings.Join(linesToMatch, "\n"), gc.Equals, "#cloud-config") +} + +func (s *UserDataSuite) TestCloudInitUserDataFallbackConfig(c *gc.C) { + instanceConfig, err := containertesting.MockMachineConfig("1/lxd/0") + c.Assert(err, jc.ErrorIsNil) + networkConfig := container.BridgeNetworkConfig("foo", 0, nil) + data, err := containerinit.CloudInitUserData(instanceConfig, networkConfig) + c.Assert(err, jc.ErrorIsNil) + c.Assert(data, gc.NotNil) + + linesToMatch := CloudInitDataExcludingOutputSection(string(data)) + expected := fmt.Sprintf(s.expectedFallbackUserData, s.networkInterfacesFile, s.systemNetworkInterfacesFile) + var expectedLinesToMatch []string + for _, line := range strings.Split(expected, "\n") { + if strings.HasPrefix(line, "runcmd:") { + break + } + expectedLinesToMatch = append(expectedLinesToMatch, line) + } + + expectedLinesToMatch = append(expectedLinesToMatch, "manage_etc_hosts: true") + + c.Assert(strings.Join(linesToMatch, "\n")+"\n", gc.Equals, strings.Join(expectedLinesToMatch, "\n")+"\n") +} + +func (s *UserDataSuite) TestCloudInitUserDataFallbackConfigWithContainerHostname(c *gc.C) { + instanceConfig, err := containertesting.MockMachineConfig("1/lxd/0") + instanceConfig.MachineContainerHostname = "lxdhostname" + c.Assert(err, jc.ErrorIsNil) + networkConfig := container.BridgeNetworkConfig("foo", 0, nil) + data, err := containerinit.CloudInitUserData(instanceConfig, networkConfig) + c.Assert(err, jc.ErrorIsNil) + c.Assert(data, gc.NotNil) + + linesToMatch := CloudInitDataExcludingOutputSection(string(data)) + + expected := fmt.Sprintf(s.expectedFallbackUserData, s.networkInterfacesFile, s.systemNetworkInterfacesFile) var expectedLinesToMatch []string for _, line := range strings.Split(expected, "\n") { @@ -260,6 +307,9 @@ expectedLinesToMatch = append(expectedLinesToMatch, line) } + expectedLinesToMatch = append(expectedLinesToMatch, "hostname: lxdhostname") + expectedLinesToMatch = append(expectedLinesToMatch, "manage_etc_hosts: true") + c.Assert(strings.Join(linesToMatch, "\n")+"\n", gc.Equals, strings.Join(expectedLinesToMatch, "\n")+"\n") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg.go 2016-10-13 14:31:49.000000000 +0000 @@ -63,6 +63,9 @@ // or be empty when starting a controller. APIInfo *api.Info + // ControllerTag identifies the controller. + ControllerTag names.ControllerTag + // MachineNonce is set at provisioning/bootstrap time and used to // ensure the agent is running on the correct instance. MachineNonce string @@ -219,6 +222,11 @@ // models managed by this controller. ControllerInheritedConfig map[string]interface{} + // RegionInheritedConfig holds region specific configuration attributes to + // be shared across all models in the same controller on a particular + // cloud. + RegionInheritedConfig cloud.RegionConfig + // HostedModelConfig is a set of config attributes to be overlaid // on the controller model config (Config, above) to construct the // initial hosted model config. @@ -249,6 +257,7 @@ ControllerConfig map[string]interface{} `yaml:"controller-config"` ControllerModelConfig map[string]interface{} `yaml:"controller-model-config"` ControllerInheritedConfig map[string]interface{} `yaml:"controller-config-defaults,omitempty"` + RegionInheritedConfig cloud.RegionConfig `yaml:"region-inherited-config,omitempty"` HostedModelConfig map[string]interface{} `yaml:"hosted-model-config,omitempty"` BootstrapMachineInstanceId instance.Id `yaml:"bootstrap-machine-instance-id"` BootstrapMachineConstraints constraints.Value `yaml:"bootstrap-machine-constraints"` @@ -276,6 +285,7 @@ p.ControllerConfig, p.ControllerModelConfig.AllAttrs(), p.ControllerInheritedConfig, + p.RegionInheritedConfig, p.HostedModelConfig, p.BootstrapMachineInstanceId, p.BootstrapMachineConstraints, @@ -314,6 +324,7 @@ ControllerConfig: internal.ControllerConfig, ControllerModelConfig: cfg, ControllerInheritedConfig: internal.ControllerInheritedConfig, + RegionInheritedConfig: internal.RegionInheritedConfig, HostedModelConfig: internal.HostedModelConfig, BootstrapMachineInstanceId: internal.BootstrapMachineInstanceId, BootstrapMachineConstraints: internal.BootstrapMachineConstraints, @@ -380,9 +391,10 @@ Password: password, Nonce: cfg.MachineNonce, StateAddresses: cfg.stateHostAddrs(), - APIAddresses: cfg.ApiHostAddrs(), + APIAddresses: cfg.APIHostAddrs(), CACert: cacert, Values: cfg.AgentEnvironment, + Controller: cfg.ControllerTag, Model: cfg.APIInfo.ModelTag, } if cfg.Bootstrap == nil { @@ -414,7 +426,7 @@ return hosts } -func (cfg *InstanceConfig) ApiHostAddrs() []string { +func (cfg *InstanceConfig) APIHostAddrs() []string { var hosts []string if cfg.Bootstrap != nil { hosts = append(hosts, net.JoinHostPort( @@ -635,11 +647,11 @@ // but this takes care of the fixed entries and the ones that are // always needed. func NewInstanceConfig( + controllerTag names.ControllerTag, machineID, machineNonce, imageStream, series string, - secureServerConnections bool, apiInfo *api.Info, ) (*InstanceConfig, error) { dataDir, err := paths.DataDir(series) @@ -667,10 +679,11 @@ Tags: map[string]string{}, // Parameter entries. - MachineId: machineID, - MachineNonce: machineNonce, - APIInfo: apiInfo, - ImageStream: imageStream, + ControllerTag: controllerTag, + MachineId: machineID, + MachineNonce: machineNonce, + APIInfo: apiInfo, + ImageStream: imageStream, } return icfg, nil } @@ -685,7 +698,7 @@ ) (*InstanceConfig, error) { // For a bootstrap instance, the caller must provide the state.Info // and the api.Info. The machine id must *always* be "0". - icfg, err := NewInstanceConfig("0", agent.BootstrapNonce, "", series, true, nil) + icfg, err := NewInstanceConfig(names.NewControllerTag(config.ControllerUUID()), "0", agent.BootstrapNonce, "", series, nil) if err != nil { return nil, err } @@ -766,9 +779,9 @@ if icfg.Controller != nil { // Add NUMACTL preference. Needed to work for both bootstrap and high availability // Only makes sense for controller - logger.Debugf("Setting numa ctl preference to %v", icfg.Controller.Config.NumaCtlPreference()) + logger.Debugf("Setting numa ctl preference to %v", icfg.Controller.Config.NUMACtlPreference()) // Unfortunately, AgentEnvironment can only take strings as values - icfg.AgentEnvironment[agent.NumaCtlPreference] = fmt.Sprintf("%v", icfg.Controller.Config.NumaCtlPreference()) + icfg.AgentEnvironment[agent.NUMACtlPreference] = fmt.Sprintf("%v", icfg.Controller.Config.NUMACtlPreference()) } return nil } @@ -778,7 +791,7 @@ func InstanceTags(modelUUID, controllerUUID string, tagger tags.ResourceTagger, jobs []multiwatcher.MachineJob) map[string]string { instanceTags := tags.ResourceTags( names.NewModelTag(modelUUID), - names.NewModelTag(controllerUUID), + names.NewControllerTag(controllerUUID), tagger, ) if multiwatcher.AnyJobNeedsState(jobs...) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg_test.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -27,12 +27,12 @@ nonControllerJobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits} testInstanceTags(c, cfg, controllerJobs, map[string]string{ "juju-model-uuid": testing.ModelTag.Id(), - "juju-controller-uuid": testing.ModelTag.Id(), + "juju-controller-uuid": testing.ControllerTag.Id(), "juju-is-controller": "true", }) testInstanceTags(c, cfg, nonControllerJobs, map[string]string{ "juju-model-uuid": testing.ModelTag.Id(), - "juju-controller-uuid": testing.ModelTag.Id(), + "juju-controller-uuid": testing.ControllerTag.Id(), }) } @@ -42,14 +42,14 @@ }) testInstanceTags(c, cfg, nil, map[string]string{ "juju-model-uuid": testing.ModelTag.Id(), - "juju-controller-uuid": testing.ModelTag.Id(), + "juju-controller-uuid": testing.ControllerTag.Id(), "a": "b", "c": "", }) } func testInstanceTags(c *gc.C, cfg *config.Config, jobs []multiwatcher.MachineJob, expectTags map[string]string) { - tags := instancecfg.InstanceTags(testing.ModelTag.Id(), testing.ModelTag.Id(), cfg, jobs) + tags := instancecfg.InstanceTags(testing.ModelTag.Id(), testing.ControllerTag.Id(), cfg, jobs) c.Assert(tags, jc.DeepEquals, expectTags) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/providerinit/providerinit_test.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/providerinit/providerinit_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/providerinit/providerinit_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/providerinit/providerinit_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,7 +24,6 @@ "github.com/juju/juju/cloudconfig/instancecfg" "github.com/juju/juju/cloudconfig/providerinit" "github.com/juju/juju/environs/config" - "github.com/juju/juju/juju/osenv" "github.com/juju/juju/juju/paths" "github.com/juju/juju/mongo" "github.com/juju/juju/provider/dummy" @@ -45,7 +44,7 @@ } type CloudInitSuite struct { - testing.BaseSuite + testing.FakeJujuXDGDataHomeSuite } var _ = gc.Suite(&CloudInitSuite{}) @@ -140,8 +139,6 @@ } func (*CloudInitSuite) testUserData(c *gc.C, series string, bootstrap bool) { - testJujuXDGDataHome := c.MkDir() - defer osenv.SetJujuXDGDataHome(osenv.SetJujuXDGDataHome(testJujuXDGDataHome)) // Use actual series paths instead of local defaults logDir := must(paths.LogDir(series)) metricsSpoolDir := must(paths.MetricsSpoolDir(series)) @@ -160,9 +157,10 @@ multiwatcher.JobHostUnits, } cfg := &instancecfg.InstanceConfig{ - MachineId: "10", - MachineNonce: "5432", - Series: series, + ControllerTag: testing.ControllerTag, + MachineId: "10", + MachineNonce: "5432", + Series: series, APIInfo: &api.Info{ Addrs: []string{"127.0.0.1:1234"}, Password: "pw2", @@ -299,6 +297,7 @@ c.Assert(err, jc.ErrorIsNil) cfg := instancecfg.InstanceConfig{ + ControllerTag: testing.ControllerTag, MachineId: "10", AgentEnvironment: map[string]string{agent.ProviderType: "dummy"}, Series: series, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/sshinit/configure.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/sshinit/configure.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/sshinit/configure.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/sshinit/configure.go 2016-10-13 14:31:49.000000000 +0000 @@ -38,20 +38,6 @@ Series string } -// Configure connects to the specified host over SSH, -// and executes a script that carries out cloud-config. -// This isn't actually used anywhere because everybody wants to add custom stuff -// in between getting the script and actually running it -// I really suggest deleting it -func Configure(params ConfigureParams) error { - logger.Infof("Provisioning machine agent on %s", params.Host) - script, err := params.Config.RenderScript() - if err != nil { - return err - } - return RunConfigureScript(script, params) -} - // RunConfigureScript connects to the specified host over // SSH, and executes the provided script which is expected // to have been returned by cloudinit ConfigureScript. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/userdatacfg.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/userdatacfg.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/userdatacfg.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/userdatacfg.go 2016-10-13 14:31:49.000000000 +0000 @@ -127,7 +127,7 @@ return err } if targetOS != os.Windows { - c.conf.AddRunCmd(cloudinit.LogProgressCmd("Starting Juju machine agent (%s)", svcName)) + c.conf.AddRunCmd(cloudinit.LogProgressCmd("Starting Juju machine agent (service %s)", svcName)) } c.conf.AddScripts(cmds...) return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/userdatacfg_test.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/userdatacfg_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/userdatacfg_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/userdatacfg_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -104,6 +104,7 @@ const defaultMachineID = "99" cfg := new(testInstanceConfig) + cfg.ControllerTag = testing.ControllerTag cfg.AuthorizedKeys = "sshkey1" cfg.AgentEnvironment = map[string]string{ agent.ProviderType: "dummy", @@ -330,15 +331,13 @@ printf '%s\\n' 'FAKE_NONCE' > '/var/lib/juju/nonce.txt' test -e /proc/self/fd/9 \|\| exec 9>&2 \(\[ ! -e /home/ubuntu/.profile \] \|\| grep -q '.juju-proxy' /home/ubuntu/.profile\) \|\| printf .* >> /home/ubuntu/.profile -install -D -m 644 /dev/null '/etc/profile.d/juju-introspection.sh' -printf '%s\\n' '.*' > '/etc/profile.d/juju-introspection.sh' mkdir -p /var/lib/juju/locks \(id ubuntu &> /dev/null\) && chown ubuntu:ubuntu /var/lib/juju/locks mkdir -p /var/log/juju chown syslog:adm /var/log/juju bin='/var/lib/juju/tools/1\.2\.3-precise-amd64' mkdir -p \$bin -echo 'Fetching tools.* +echo 'Fetching Juju agent version.* curl .* '.*' --retry 10 -o \$bin/tools\.tar\.gz 'http://foo\.com/tools/released/juju1\.2\.3-precise-amd64\.tgz' sha256sum \$bin/tools\.tar\.gz > \$bin/juju1\.2\.3-precise-amd64\.sha256 grep '1234' \$bin/juju1\.2\.3-precise-amd64.sha256 \|\| \(echo "Tools checksum mismatch"; exit 1\) @@ -349,10 +348,10 @@ chmod 0600 '/var/lib/juju/agents/machine-0/agent\.conf' install -D -m 600 /dev/null '/var/lib/juju/bootstrap-params' printf '%s\\n' '.*' > '/var/lib/juju/bootstrap-params' -echo 'Bootstrapping Juju machine agent'.* +echo 'Installing Juju machine agent'.* /var/lib/juju/tools/1\.2\.3-precise-amd64/jujud bootstrap-state --timeout 10m0s --data-dir '/var/lib/juju' --debug '/var/lib/juju/bootstrap-params' ln -s 1\.2\.3-precise-amd64 '/var/lib/juju/tools/machine-0' -echo 'Starting Juju machine agent \(jujud-machine-0\)'.* +echo 'Starting Juju machine agent \(service jujud-machine-0\)'.* cat > /etc/init/jujud-machine-0\.conf << 'EOF'\\ndescription "juju agent for machine-0"\\nauthor "Juju Team "\\nstart on runlevel \[2345\]\\nstop on runlevel \[!2345\]\\nrespawn\\nnormal exit 0\\n\\nlimit nofile 20000 20000\\n\\nscript\\n\\n\\n # Ensure log files are properly protected\\n touch /var/log/juju/machine-0\.log\\n chown syslog:syslog /var/log/juju/machine-0\.log\\n chmod 0600 /var/log/juju/machine-0\.log\\n\\n exec '/var/lib/juju/tools/machine-0/jujud' machine --data-dir '/var/lib/juju' --machine-id 0 --debug >> /var/log/juju/machine-0\.log 2>&1\\nend script\\nEOF\\n start jujud-machine-0 rm \$bin/tools\.tar\.gz && rm \$bin/juju1\.2\.3-precise-amd64\.sha256 @@ -389,15 +388,13 @@ printf '%s\\n' 'FAKE_NONCE' > '/var/lib/juju/nonce.txt' test -e /proc/self/fd/9 \|\| exec 9>&2 \(\[ ! -e /home/ubuntu/\.profile \] \|\| grep -q '.juju-proxy' /home/ubuntu/.profile\) \|\| printf .* >> /home/ubuntu/.profile -install -D -m 644 /dev/null '/etc/profile.d/juju-introspection.sh' -printf '%s\\n' '.*' > '/etc/profile.d/juju-introspection.sh' mkdir -p /var/lib/juju/locks \(id ubuntu &> /dev/null\) && chown ubuntu:ubuntu /var/lib/juju/locks mkdir -p /var/log/juju chown syslog:adm /var/log/juju bin='/var/lib/juju/tools/1\.2\.3-quantal-amd64' mkdir -p \$bin -echo 'Fetching tools.* +echo 'Fetching Juju agent version.* curl .* --noproxy "\*" --insecure -o \$bin/tools\.tar\.gz 'https://state-addr\.testing\.invalid:54321/deadbeef-0bad-400d-8000-4b1d0d06f00d/tools/1\.2\.3-quantal-amd64' sha256sum \$bin/tools\.tar\.gz > \$bin/juju1\.2\.3-quantal-amd64\.sha256 grep '1234' \$bin/juju1\.2\.3-quantal-amd64.sha256 \|\| \(echo "Tools checksum mismatch"; exit 1\) @@ -407,7 +404,7 @@ cat > '/var/lib/juju/agents/machine-99/agent\.conf' << 'EOF'\\n.*\\nEOF chmod 0600 '/var/lib/juju/agents/machine-99/agent\.conf' ln -s 1\.2\.3-quantal-amd64 '/var/lib/juju/tools/machine-99' -echo 'Starting Juju machine agent \(jujud-machine-99\)'.* +echo 'Starting Juju machine agent \(service jujud-machine-99\)'.* cat > /etc/init/jujud-machine-99\.conf << 'EOF'\\ndescription "juju agent for machine-99"\\nauthor "Juju Team "\\nstart on runlevel \[2345\]\\nstop on runlevel \[!2345\]\\nrespawn\\nnormal exit 0\\n\\nlimit nofile 20000 20000\\n\\nscript\\n\\n\\n # Ensure log files are properly protected\\n touch /var/log/juju/machine-99\.log\\n chown syslog:syslog /var/log/juju/machine-99\.log\\n chmod 0600 /var/log/juju/machine-99\.log\\n\\n exec '/var/lib/juju/tools/machine-99/jujud' machine --data-dir '/var/lib/juju' --machine-id 99 --debug >> /var/log/juju/machine-99\.log 2>&1\\nend script\\nEOF\\n start jujud-machine-99 rm \$bin/tools\.tar\.gz && rm \$bin/juju1\.2\.3-quantal-amd64\.sha256 @@ -1050,6 +1047,7 @@ Password: "password", }, }, + ControllerTag: testing.ControllerTag, MachineId: "99", AuthorizedKeys: "sshkey1", Series: "quantal", @@ -1104,7 +1102,7 @@ machineId := "42" machineNonce := "fake-nonce" apiInfo := jujutesting.FakeAPIInfo(machineId) - instanceConfig, err := instancecfg.NewInstanceConfig(machineId, machineNonce, imagemetadata.ReleasedStream, "quantal", true, apiInfo) + instanceConfig, err := instancecfg.NewInstanceConfig(testing.ControllerTag, machineId, machineNonce, imagemetadata.ReleasedStream, "quantal", apiInfo) c.Assert(err, jc.ErrorIsNil) instanceConfig.SetTools(tools.List{ &tools.Tools{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/userdatacfg_unix.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/userdatacfg_unix.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/userdatacfg_unix.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/userdatacfg_unix.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,6 +34,8 @@ "github.com/juju/juju/service/upstart" ) +var logger = loggo.GetLogger("juju.cloudconfig") + const ( // curlCommand is the base curl command used to download tools. curlCommand = "curl -sSfw 'tools from %{url_effective} downloaded: HTTP %{http_code}; time %{time_total}s; size %{size_download} bytes; speed %{speed_download} bytes/s '" @@ -178,7 +180,7 @@ if stdout, _ := w.conf.Output(cloudinit.OutAll); stdout == "" { w.conf.SetOutput(cloudinit.OutAll, ">> "+w.icfg.CloudInitOutputLog, "") w.conf.AddBootCmd(initProgressCmd) - w.conf.AddBootCmd(cloudinit.LogProgressCmd("Logging to %s on remote host", w.icfg.CloudInitOutputLog)) + w.conf.AddBootCmd(cloudinit.LogProgressCmd("Logging to %s on the bootstrap machine", w.icfg.CloudInitOutputLog)) } w.conf.AddPackageCommands( @@ -210,9 +212,6 @@ w.conf.AddRunTextFile(keyFile, w.icfg.Controller.PublicImageSigningKey, 0644) } - // Write out the introspection helper bash functions in /etc/profile.d. - w.conf.AddRunTextFile("/etc/profile.d/juju-introspection.sh", introspectionWorkerBashFuncs, 0644) - // Make the lock dir and change the ownership of the lock dir itself to // ubuntu:ubuntu from root:root so the juju-run command run as the ubuntu // user is able to get access to the hook execution lock (like the uniter @@ -307,7 +306,7 @@ loggingOption, shquote(bootstrapParamsFile), } - w.conf.AddRunCmd(cloudinit.LogProgressCmd("Bootstrapping Juju machine agent")) + w.conf.AddRunCmd(cloudinit.LogProgressCmd("Installing Juju machine agent")) w.conf.AddScripts(strings.Join(bootstrapAgentArgs, " ")) return nil @@ -343,7 +342,8 @@ curlCommand += " --insecure" } curlCommand += " -o $bin/tools.tar.gz" - w.conf.AddRunCmd(cloudinit.LogProgressCmd("Fetching tools: %s <%s>", curlCommand, urls)) + w.conf.AddRunCmd(cloudinit.LogProgressCmd("Fetching Juju agent version %s for %s", tools.Version.Number, tools.Version.Arch)) + logger.Infof("Fetching agent: %s <%s>", curlCommand, urls) w.conf.AddRunCmd(toolsDownloadCommand(curlCommand, urls)) } @@ -351,7 +351,7 @@ fmt.Sprintf("sha256sum $bin/tools.tar.gz > $bin/juju%s.sha256", tools.Version), fmt.Sprintf(`grep '%s' $bin/juju%s.sha256 || (echo "Tools checksum mismatch"; exit 1)`, tools.SHA256, tools.Version), - fmt.Sprintf("tar zxf $bin/tools.tar.gz -C $bin"), + "tar zxf $bin/tools.tar.gz -C $bin", ) toolsJson, err := json.Marshal(tools) @@ -449,37 +449,3 @@ } return base64.StdEncoding.EncodeToString(data) } - -const introspectionWorkerBashFuncs = ` -jujuAgentCall () { - local agent=$1 - shift - local path= - for i in "$@"; do - path="$path/$i" - done - echo -e "GET $path HTTP/1.0\r\n" | socat abstract-connect:jujud-$agent STDIO -} - -jujuMachineAgentName () { - local machine=` + "`ls -d /var/lib/juju/agents/machine*`" + ` - machine=` + "`basename $machine`" + ` - echo $machine -} - -juju-goroutines () { - if [ "$#" -gt 1 ]; then - echo "expected no args (for machine agent) or one (unit agent)" - return 1 - fi - local agent=$(jujuMachineAgentName) - if [ "$#" -eq 1 ]; then - agent=$1 - fi - jujuAgentCall $agent debug/pprof/goroutine?debug=1 -} - -export -f jujuAgentCall -export -f jujuMachineAgentName -export -f juju-goroutines -` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/windows_userdata_test.go juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/windows_userdata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cloudconfig/windows_userdata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cloudconfig/windows_userdata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -734,6 +734,7 @@ fR+gLQjslxf64w0wCwYJKoZIhvcNAQEFA0EAbn0MaxWVgGYBomeLYfDdb8vCq/5/ G/2iCUQCXsVrBparMLFnor/iKOkJB5n3z3rtu70rFt+DpX6L8uBR3LB3+A== -----END CERTIFICATE----- +controller: controller-deadbeef-1bad-500d-9000-4b1d0d06f00d model: model-deadbeef-0bad-400d-8000-4b1d0d06f00d apiaddresses: - state-addr.testing.invalid:54321 diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -76,13 +76,13 @@ func NewListCommandForTest(store jujuclient.ClientStore) (cmd.Command, *ListCommand) { c := &listCommand{} c.SetClientStore(store) - return modelcmd.Wrap(c, modelcmd.ModelSkipDefault), &ListCommand{c} + return modelcmd.Wrap(c, modelcmd.WrapSkipDefaultModel), &ListCommand{c} } func NewRunCommandForTest(store jujuclient.ClientStore) (cmd.Command, *RunCommand) { c := &runCommand{} c.SetClientStore(store) - return modelcmd.Wrap(c, modelcmd.ModelSkipDefault), &RunCommand{c} + return modelcmd.Wrap(c, modelcmd.WrapSkipDefaultModel), &RunCommand{c} } func ActionResultsToMap(results []params.ActionResult) map[string]interface{} { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/list.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/list.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,19 +4,19 @@ package action import ( - "bytes" "fmt" + "io" "strings" - "text/tabwriter" "github.com/juju/cmd" - errors "github.com/juju/errors" + "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/utils" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) func NewListCommand() cmd.Command { @@ -40,7 +40,12 @@ // Set up the output. func (c *listCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "smart", cmd.DefaultFormatters) + c.ActionCommandBase.SetFlags(f) + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, + "tabular": c.printTabular, + }) f.BoolVar(&c.fullSchema, "schema", false, "Display the full action schema") } @@ -56,6 +61,9 @@ // Init validates the service name and any other options. func (c *listCommand) Init(args []string) error { + if c.out.Name() == "tabular" && c.fullSchema { + return errors.New("full schema not compatible with tabular output") + } switch len(args) { case 0: return errors.New("no application name specified") @@ -85,10 +93,6 @@ return err } - if len(actions) == 0 { - return c.out.Write(ctx, "No actions defined for "+c.applicationTag.Id()) - } - if c.fullSchema { verboseSpecs := make(map[string]interface{}) for k, v := range actions { @@ -108,25 +112,43 @@ sortedNames = append(sortedNames, name) } utils.SortStringsNaturally(sortedNames) - return c.printTabular(ctx, shortOutput, sortedNames) + + var output interface{} + switch c.out.Name() { + case "yaml", "json": + output = shortOutput + default: + if len(sortedNames) == 0 { + ctx.Infof("No actions defined for %s.", c.applicationTag.Id()) + return nil + } + var list []listOutput + for _, name := range sortedNames { + list = append(list, listOutput{name, shortOutput[name]}) + } + output = list + } + + return c.out.Write(ctx, output) +} + +type listOutput struct { + action string + description string } // printTabular prints the list of actions in tabular format -func (c *listCommand) printTabular(ctx *cmd.Context, actions map[string]string, sortedNames []string) error { - var out bytes.Buffer - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 2 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) - fmt.Fprintf(tw, "%s\t%s\n", "ACTION", "DESCRIPTION") - for _, name := range sortedNames { - fmt.Fprintf(tw, "%s\t%s\n", name, strings.TrimSpace(actions[name])) +func (c *listCommand) printTabular(writer io.Writer, value interface{}) error { + list, ok := value.([]listOutput) + if !ok { + return errors.New("unexpected value") + } + + tw := output.TabWriter(writer) + fmt.Fprintf(tw, "%s\t%s\n", "Action", "Description") + for _, value := range list { + fmt.Fprintf(tw, "%s\t%s\n", value.action, strings.TrimSpace(value.description)) } tw.Flush() - return c.out.Write(ctx, string(out.Bytes())) + return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/list_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/list_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/list_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "bytes" "errors" + "fmt" "strings" "github.com/juju/cmd" @@ -57,8 +58,12 @@ args: []string{validServiceId}, expectedSvc: names.NewApplicationTag(validServiceId), }, { + should: "schema with tabular output", + args: []string{"--format=tabular", "--schema", validServiceId}, + expectedErr: "full schema not compatible with tabular output", + }, { should: "init properly with valid application name and --schema", - args: []string{"--schema", validServiceId}, + args: []string{"--format=yaml", "--schema", validServiceId}, expectedOutputSchema: true, expectedSvc: names.NewApplicationTag(validServiceId), }} @@ -71,6 +76,7 @@ args := append([]string{modelFlag, "admin"}, t.args...) err := testing.InitCommand(s.wrappedCommand, args) if t.expectedErr == "" { + c.Check(err, jc.ErrorIsNil) c.Check(s.command.ApplicationTag(), gc.Equals, t.expectedSvc) c.Check(s.command.FullSchema(), gc.Equals, t.expectedOutputSchema) } else { @@ -82,7 +88,7 @@ func (s *ListSuite) TestRun(c *gc.C) { simpleOutput := ` -ACTION DESCRIPTION +Action Description kill Kill the database. no-description No description no-params An action with no parameters. @@ -110,14 +116,14 @@ withCharmActions: someCharmActions, }, { should: "get full schema results properly", - withArgs: []string{"--schema", validServiceId}, + withArgs: []string{"--format=yaml", "--schema", validServiceId}, expectFullSchema: true, withCharmActions: someCharmActions, }, { should: "work properly when no results found", withArgs: []string{validServiceId}, expectNoResults: true, - expectMessage: "No actions defined for " + validServiceId, + expectMessage: fmt.Sprintf("No actions defined for %s.\n", validServiceId), }} for i, t := range tests { @@ -144,7 +150,7 @@ if t.expectFullSchema { checkFullSchema(c, t.withCharmActions, result) } else if t.expectNoResults { - c.Check(string(result), gc.Matches, t.expectMessage+"(?sm).*") + c.Check(testing.Stderr(ctx), gc.Matches, t.expectMessage) } else { c.Check(testing.Stdout(ctx), gc.Equals, simpleOutput) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -52,7 +52,7 @@ s.store = jujuclienttesting.NewMemStore() s.store.CurrentControllerName = "ctrl" s.store.Accounts["ctrl"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/run.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/run.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/run.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/run.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,19 +4,19 @@ package action import ( - "fmt" "regexp" "strings" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" yaml "gopkg.in/yaml.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) var keyRule = regexp.MustCompile("^[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$") @@ -111,7 +111,8 @@ // SetFlags offers an option for YAML output. func (c *runCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "smart", cmd.DefaultFormatters) + c.ActionCommandBase.SetFlags(f) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) f.Var(&c.paramsYAML, "params", "Path to yaml-formatted params file") f.BoolVar(&c.parseStrings, "string-args", false, "Use raw string values of CLI args") } @@ -140,7 +141,7 @@ } ActionName := args[1] if valid := ActionNameRule.MatchString(ActionName); !valid { - return fmt.Errorf("invalid action name %q", ActionName) + return errors.Errorf("invalid action name %q", ActionName) } c.unitTag = names.NewUnitTag(unitName) c.actionName = ActionName @@ -152,13 +153,13 @@ for _, arg := range args[2:] { thisArg := strings.SplitN(arg, "=", 2) if len(thisArg) != 2 { - return fmt.Errorf("argument %q must be of the form key...=value", arg) + return errors.Errorf("argument %q must be of the form key...=value", arg) } keySlice := strings.Split(thisArg[0], ".") // check each key for validity for _, key := range keySlice { if valid := keyRule.MatchString(key); !valid { - return fmt.Errorf("key %q must start and end with lowercase alphanumeric, and contain only lowercase alphanumeric and hyphens", key) + return errors.Errorf("key %q must start and end with lowercase alphanumeric, and contain only lowercase alphanumeric and hyphens", key) } } // c.args={..., [key, key, key, key, value]} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/showoutput.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/showoutput.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/showoutput.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/showoutput.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,10 +9,11 @@ "github.com/juju/cmd" errors "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) func NewShowOutputCommand() cmd.Command { @@ -41,7 +42,8 @@ // Set up the output. func (c *showOutputCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "smart", cmd.DefaultFormatters) + c.ActionCommandBase.SetFlags(f) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) f.StringVar(&c.wait, "wait", "-1s", "Wait for results") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/status.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/status.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/status.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/status.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,11 +6,12 @@ import ( "github.com/juju/cmd" errors "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) func NewStatusCommand() cmd.Command { @@ -32,7 +33,8 @@ // Set up the output. func (c *statusCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "smart", cmd.DefaultFormatters) + c.ActionCommandBase.SetFlags(f) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) f.StringVar(&c.name, "name", "", "Action name") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/status_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/status_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/action/status_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/action/status_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -113,9 +113,10 @@ c.Assert(err, gc.ErrorMatches, tc.expectError) } if len(tc.results) > 0 { - buf, err := cmd.DefaultFormatters["yaml"](action.ActionResultsToMap(tc.results)) + out := &bytes.Buffer{} + err := cmd.FormatYaml(out, action.ActionResultsToMap(tc.results)) c.Check(err, jc.ErrorIsNil) - c.Check(ctx.Stdout.(*bytes.Buffer).String(), gc.Equals, string(buf)+"\n") + c.Check(ctx.Stdout.(*bytes.Buffer).String(), gc.Equals, out.String()) c.Check(ctx.Stderr.(*bytes.Buffer).String(), gc.Equals, "") } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/addrelation.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/addrelation.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/addrelation.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/addrelation.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,26 +4,35 @@ package application import ( - "fmt" - "github.com/juju/cmd" "github.com/juju/errors" "github.com/juju/juju/api/application" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/block" + "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" ) // NewAddRelationCommand returns a command to add a relation between 2 services. func NewAddRelationCommand() cmd.Command { - return modelcmd.Wrap(&addRelationCommand{}) + cmd := &addRelationCommand{} + cmd.newAPIFunc = func() (ApplicationAddRelationAPI, error) { + root, err := cmd.NewAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + return application.NewClient(root), nil + + } + return modelcmd.Wrap(cmd) } // addRelationCommand adds a relation between two application endpoints. type addRelationCommand struct { modelcmd.ModelCommandBase - Endpoints []string + Endpoints []string + newAPIFunc func() (ApplicationAddRelationAPI, error) } func (c *addRelationCommand) Info() *cmd.Info { @@ -37,31 +46,27 @@ func (c *addRelationCommand) Init(args []string) error { if len(args) != 2 { - return fmt.Errorf("a relation must involve two applications") + return errors.Errorf("a relation must involve two applications") } c.Endpoints = args return nil } -type serviceAddRelationAPI interface { +// ApplicationAddRelationAPI defines the API methods that application add relation command uses. +type ApplicationAddRelationAPI interface { Close() error AddRelation(endpoints ...string) (*params.AddRelationResults, error) } -func (c *addRelationCommand) getAPI() (serviceAddRelationAPI, error) { - root, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return application.NewClient(root), nil -} - -func (c *addRelationCommand) Run(_ *cmd.Context) error { - client, err := c.getAPI() +func (c *addRelationCommand) Run(ctx *cmd.Context) error { + client, err := c.newAPIFunc() if err != nil { return err } defer client.Close() _, err = client.AddRelation(c.Endpoints...) + if params.IsCodeUnauthorized(err) { + common.PermissionsMessage(ctx.Stderr, "add a relation") + } return block.ProcessBlockedError(err, block.BlockChange) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/addrelation_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/addrelation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/addrelation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/addrelation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -1,190 +1,102 @@ -// Copyright 2012, 2013 Canonical Ltd. +// Copyright 2012 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package application import ( + "strings" + + "github.com/juju/errors" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/cmd/juju/common" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/testcharms" - "github.com/juju/juju/testing" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + coretesting "github.com/juju/juju/testing" ) type AddRelationSuite struct { - jujutesting.RepoSuite - common.CmdBlockHelper + testing.IsolationSuite + mockAPI *mockAddAPI } func (s *AddRelationSuite) SetUpTest(c *gc.C) { - s.RepoSuite.SetUpTest(c) - s.CmdBlockHelper = common.NewCmdBlockHelper(s.APIState) - c.Assert(s.CmdBlockHelper, gc.NotNil) - s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) + s.IsolationSuite.SetUpTest(c) + s.mockAPI = &mockAddAPI{Stub: &testing.Stub{}} + s.mockAPI.addRelationFunc = func(endpoints ...string) (*params.AddRelationResults, error) { + // At the moment, cmd implementation ignores the return values, + // so nil is an acceptable return for testing purposes. + return nil, s.mockAPI.NextErr() + } } var _ = gc.Suite(&AddRelationSuite{}) -func runAddRelation(c *gc.C, args ...string) error { - _, err := testing.RunCommand(c, NewAddRelationCommand(), args...) +func (s *AddRelationSuite) runAddRelation(c *gc.C, args ...string) error { + _, err := coretesting.RunCommand(c, NewAddRelationCommandForTest(s.mockAPI), args...) return err } -var msWpAlreadyExists = `cannot add relation "wp:db ms:server": relation already exists` -var msLgAlreadyExists = `cannot add relation "lg:info ms:juju-info": relation already exists` -var wpLgAlreadyExists = `cannot add relation "lg:logging-directory wp:logging-dir": relation already exists` -var wpLgAlreadyExistsJuju = `cannot add relation "lg:info wp:juju-info": relation already exists` - -var addRelationTests = []struct { - args []string - err string -}{ - { - args: []string{"rk", "ms"}, - err: "no relations found", - }, { - err: "a relation must involve two applications", - }, { - args: []string{"rk"}, - err: "a relation must involve two applications", - }, { - args: []string{"rk:ring"}, - err: "a relation must involve two applications", - }, { - args: []string{"ping:pong", "tic:tac", "icki:wacki"}, - err: "a relation must involve two applications", - }, - - // Add a real relation, and check various ways of failing to re-add it. - { - args: []string{"ms", "wp"}, - }, { - args: []string{"ms", "wp"}, - err: msWpAlreadyExists, - }, { - args: []string{"wp", "ms"}, - err: msWpAlreadyExists, - }, { - args: []string{"ms", "wp:db"}, - err: msWpAlreadyExists, - }, { - args: []string{"ms:server", "wp"}, - err: msWpAlreadyExists, - }, { - args: []string{"ms:server", "wp:db"}, - err: msWpAlreadyExists, - }, - - // Add a real relation using an implicit endpoint. - { - args: []string{"ms", "lg"}, - }, { - args: []string{"ms", "lg"}, - err: msLgAlreadyExists, - }, { - args: []string{"lg", "ms"}, - err: msLgAlreadyExists, - }, { - args: []string{"ms:juju-info", "lg"}, - err: msLgAlreadyExists, - }, { - args: []string{"ms", "lg:info"}, - err: msLgAlreadyExists, - }, { - args: []string{"ms:juju-info", "lg:info"}, - err: msLgAlreadyExists, - }, - - // Add a real relation using an explicit endpoint, avoiding the potential implicit one. - { - args: []string{"wp", "lg"}, - }, { - args: []string{"wp", "lg"}, - err: wpLgAlreadyExists, - }, { - args: []string{"lg", "wp"}, - err: wpLgAlreadyExists, - }, { - args: []string{"wp:logging-dir", "lg"}, - err: wpLgAlreadyExists, - }, { - args: []string{"wp", "lg:logging-directory"}, - err: wpLgAlreadyExists, - }, { - args: []string{"wp:logging-dir", "lg:logging-directory"}, - err: wpLgAlreadyExists, - }, - - // Check we can still use the implicit endpoint if specified explicitly. - { - args: []string{"wp:juju-info", "lg"}, - }, { - args: []string{"wp:juju-info", "lg"}, - err: wpLgAlreadyExistsJuju, - }, { - args: []string{"lg", "wp:juju-info"}, - err: wpLgAlreadyExistsJuju, - }, { - args: []string{"wp:juju-info", "lg"}, - err: wpLgAlreadyExistsJuju, - }, { - args: []string{"wp", "lg:info"}, - err: wpLgAlreadyExistsJuju, - }, { - args: []string{"wp:juju-info", "lg:info"}, - err: wpLgAlreadyExistsJuju, - }, -} - -func (s *AddRelationSuite) TestAddRelation(c *gc.C) { - ch := testcharms.Repo.CharmArchivePath(s.CharmsPath, "wordpress") - err := runDeploy(c, ch, "wp", "--series", "quantal") - c.Assert(err, jc.ErrorIsNil) - ch = testcharms.Repo.CharmArchivePath(s.CharmsPath, "mysql") - err = runDeploy(c, ch, "ms", "--series", "quantal") - c.Assert(err, jc.ErrorIsNil) - ch = testcharms.Repo.CharmArchivePath(s.CharmsPath, "riak") - err = runDeploy(c, ch, "rk", "--series", "quantal") - c.Assert(err, jc.ErrorIsNil) - ch = testcharms.Repo.CharmArchivePath(s.CharmsPath, "logging") - err = runDeploy(c, ch, "lg", "--series", "quantal") - c.Assert(err, jc.ErrorIsNil) +func (s *AddRelationSuite) TestAddRelationWrongNumberOfArguments(c *gc.C) { + // No arguments + err := s.runAddRelation(c) + c.Assert(err, gc.ErrorMatches, "a relation must involve two applications") - for i, t := range addRelationTests { - c.Logf("test %d: %v", i, t.args) - err := runAddRelation(c, t.args...) - if t.err != "" { - c.Assert(err, gc.ErrorMatches, t.err) - } - } + // 1 argument + err = s.runAddRelation(c, "application1") + c.Assert(err, gc.ErrorMatches, "a relation must involve two applications") + + // more than 2 arguments + err = s.runAddRelation(c, "application1", "application2", "application3") + c.Assert(err, gc.ErrorMatches, "a relation must involve two applications") } -func (s *AddRelationSuite) TestBlockAddRelation(c *gc.C) { - ch := testcharms.Repo.CharmArchivePath(s.CharmsPath, "wordpress") - err := runDeploy(c, ch, "wp", "--series", "quantal") - c.Assert(err, jc.ErrorIsNil) - ch = testcharms.Repo.CharmArchivePath(s.CharmsPath, "mysql") - err = runDeploy(c, ch, "ms", "--series", "quantal") - c.Assert(err, jc.ErrorIsNil) - ch = testcharms.Repo.CharmArchivePath(s.CharmsPath, "riak") - err = runDeploy(c, ch, "rk", "--series", "quantal") - c.Assert(err, jc.ErrorIsNil) - ch = testcharms.Repo.CharmArchivePath(s.CharmsPath, "logging") - err = runDeploy(c, ch, "lg", "--series", "quantal") +func (s *AddRelationSuite) TestAddRelationSuccess(c *gc.C) { + err := s.runAddRelation(c, "application1", "application2") c.Assert(err, jc.ErrorIsNil) + s.mockAPI.CheckCall(c, 0, "AddRelation", []string{"application1", "application2"}) + s.mockAPI.CheckCall(c, 1, "Close") +} - // Block operation - s.BlockAllChanges(c, "TestBlockAddRelation") +func (s *AddRelationSuite) TestAddRelationFail(c *gc.C) { + msg := "fail add-relation call at API" + s.mockAPI.SetErrors(errors.New(msg)) + err := s.runAddRelation(c, "application1", "application2") + c.Assert(err, gc.ErrorMatches, msg) + s.mockAPI.CheckCall(c, 0, "AddRelation", []string{"application1", "application2"}) + s.mockAPI.CheckCall(c, 1, "Close") +} - for i, t := range addRelationTests { - c.Logf("test %d: %v", i, t.args) - err := runAddRelation(c, t.args...) - if len(t.args) == 2 { - // Only worry about Run being blocked. - // For len(t.args) != 2, an Init will fail - s.AssertBlocked(c, err, ".*TestBlockAddRelation.*") - } - } +func (s *AddRelationSuite) TestAddRelationBlocked(c *gc.C) { + s.mockAPI.SetErrors(common.OperationBlockedError("TestBlockAddRelation")) + err := s.runAddRelation(c, "application1", "application2") + coretesting.AssertOperationWasBlocked(c, err, ".*TestBlockAddRelation.*") + s.mockAPI.CheckCall(c, 0, "AddRelation", []string{"application1", "application2"}) + s.mockAPI.CheckCall(c, 1, "Close") +} + +func (s *AddRelationSuite) TestAddRelationUnauthorizedMentionsJujuGrant(c *gc.C) { + s.mockAPI.SetErrors(¶ms.Error{ + Message: "permission denied", + Code: params.CodeUnauthorized, + }) + ctx, _ := coretesting.RunCommand(c, NewAddRelationCommandForTest(s.mockAPI), "application1", "application2") + errString := strings.Replace(coretesting.Stderr(ctx), "\n", " ", -1) + c.Assert(errString, gc.Matches, `.*juju grant.*`) +} + +type mockAddAPI struct { + *testing.Stub + addRelationFunc func(endpoints ...string) (*params.AddRelationResults, error) +} + +func (s mockAddAPI) Close() error { + s.MethodCall(s, "Close") + return s.NextErr() +} + +func (s mockAddAPI) AddRelation(endpoints ...string) (*params.AddRelationResults, error) { + s.MethodCall(s, "AddRelation", endpoints) + return s.addRelationFunc(endpoints...) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/addunit.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/addunit.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/addunit.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/addunit.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,11 +9,13 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api/application" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/block" + "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/instance" ) @@ -123,7 +125,6 @@ Args: "", Purpose: usageAddUnitSummary, Doc: usageAddUnitDetails, - Aliases: []string{"add-units"}, } } @@ -166,7 +167,7 @@ // Run connects to the environment specified on the command line // and calls AddUnits for the given application. -func (c *addUnitCommand) Run(_ *cmd.Context) error { +func (c *addUnitCommand) Run(ctx *cmd.Context) error { apiclient, err := c.getAPI() if err != nil { return err @@ -180,6 +181,9 @@ c.Placement[i] = p } _, err = apiclient.AddUnits(c.ApplicationName, c.NumUnits, c.Placement) + if params.IsCodeUnauthorized(err) { + common.PermissionsMessage(ctx.Stderr, "add a unit") + } return block.ProcessBlockedError(err, block.BlockChange) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/addunit_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/addunit_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/addunit_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/addunit_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,7 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/application" "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" @@ -135,6 +136,16 @@ c.Check(stripped, gc.Matches, ".*TestBlockAddUnit.*") } +func (s *AddUnitSuite) TestUnauthorizedMentionsJujuGrant(c *gc.C) { + s.fake.err = ¶ms.Error{ + Message: "permission denied", + Code: params.CodeUnauthorized, + } + ctx, _ := testing.RunCommand(c, application.NewAddUnitCommandForTest(s.fake), "some-application-name") + errString := strings.Replace(testing.Stderr(ctx), "\n", " ", -1) + c.Assert(errString, gc.Matches, `.*juju grant.*`) +} + func (s *AddUnitSuite) TestForceMachine(c *gc.C) { err := s.runAddUnit(c, "some-application-name", "--to", "3") c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/bundle.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/bundle.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/bundle.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/bundle.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,14 +21,13 @@ "gopkg.in/yaml.v1" "github.com/juju/juju/api" - apiannotations "github.com/juju/juju/api/annotations" "github.com/juju/juju/api/application" "github.com/juju/juju/api/charms" - "github.com/juju/juju/api/modelconfig" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/charmstore" "github.com/juju/juju/constraints" "github.com/juju/juju/instance" + "github.com/juju/juju/resource/resourceadapters" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/state/watcher" "github.com/juju/juju/storage" @@ -57,9 +56,7 @@ bundleFilePath string, data *charm.BundleData, channel csparams.Channel, - client *api.Client, - serviceDeployer *applicationDeployer, - resolver *charmURLResolver, + apiRoot DeployAPI, log deploymentLogger, bundleStorage map[string]map[string]storage.Constraints, ) (map[*charm.URL]*macaroon.Macaroon, error) { @@ -93,7 +90,7 @@ numChanges := len(changes) // Initialize the unit status. - status, err := client.Status(nil) + status, err := apiRoot.Status(nil) if err != nil { return nil, errors.Annotate(err, "cannot get model status") } @@ -105,51 +102,26 @@ } // Instantiate a watcher used to follow the deployment progress. - watcher, err := watchAll(client) + watcher, err := apiRoot.WatchAll() if err != nil { return nil, errors.Annotate(err, "cannot watch model") } defer watcher.Stop() - applicationClient, err := serviceDeployer.newApplicationAPIClient() - if err != nil { - return nil, errors.Annotate(err, "cannot get application client") - } - - modelConfigClient, err := serviceDeployer.newModelConfigAPIClient() - if err != nil { - return nil, errors.Annotate(err, "cannot get model config client") - } - - annotationsClient, err := serviceDeployer.newAnnotationsAPIClient() - if err != nil { - return nil, errors.Annotate(err, "cannot get annotations client") - } - - charmsClient, err := serviceDeployer.newCharmsAPIClient() - if err != nil { - return nil, errors.Annotate(err, "cannot get charms client") - } // Instantiate the bundle handler. h := &bundleHandler{ - bundleDir: bundleFilePath, - changes: changes, - results: make(map[string]string, numChanges), - channel: channel, - client: client, - modelConfigClient: modelConfigClient, - applicationClient: applicationClient, - annotationsClient: annotationsClient, - charmsClient: charmsClient, - serviceDeployer: serviceDeployer, - bundleStorage: bundleStorage, - resolver: resolver, - log: log, - data: data, - unitStatus: unitStatus, - ignoredMachines: make(map[string]bool, len(data.Applications)), - ignoredUnits: make(map[string]bool, len(data.Applications)), - watcher: watcher, + bundleDir: bundleFilePath, + changes: changes, + results: make(map[string]string, numChanges), + channel: channel, + api: apiRoot, + bundleStorage: bundleStorage, + log: log, + data: data, + unitStatus: unitStatus, + ignoredMachines: make(map[string]bool, len(data.Applications)), + ignoredUnits: make(map[string]bool, len(data.Applications)), + watcher: watcher, } // Deploy the bundle. @@ -177,7 +149,7 @@ Channel: channels[cURL], } csMac := csMacs[cURL] - err = h.addService(change.Id(), change.Params, chID, csMac) + err = h.addService(apiRoot, change.Id(), change.Params, chID, csMac) } case *bundlechanges.AddUnitChange: err = h.addUnit(change.Id(), change.Params) @@ -218,23 +190,8 @@ // channel identifies the default channel to use for the bundle. channel csparams.Channel - // client is used to interact with the environment. - client *api.Client - - // modelConfigClient is used to get model config information. - modelConfigClient *modelconfig.Client - - // charmsClient is used to get charm information. - charmsClient *charms.Client - - // applicationClient is used to interact with applications. - applicationClient *application.Client - - // annotationsClient is used to interact with annotations. - annotationsClient *apiannotations.Client - - // serviceDeployer is used to deploy services. - serviceDeployer *applicationDeployer + // api is used to interact with the environment. + api DeployAPI // bundleStorage contains a mapping of application-specific storage // constraints. For each application, the storage constraints in the @@ -242,9 +199,6 @@ // in the bundle itself. bundleStorage map[string]map[string]storage.Constraints - // resolver is used to resolve charm and bundle URLs. - resolver *charmURLResolver - // log is used to output messages to the user, so that the user can keep // track of the bundle deployment progress. log deploymentLogger @@ -293,10 +247,10 @@ return nil, noChannel, nil, errors.Annotatef(err, "cannot deploy local charm at %q", charmPath) } if err == nil { - if curl, err = h.client.AddLocalCharm(curl, ch); err != nil { + if curl, err = h.api.AddLocalCharm(curl, ch); err != nil { return nil, noChannel, nil, err } - h.log.Infof("added charm %s", curl) + logger.Debugf("added charm %s", curl) h.results[id] = curl.String() return curl, noChannel, nil, nil } @@ -307,7 +261,12 @@ if err != nil { return nil, "", nil, errors.Trace(err) } - url, channel, _, store, err := h.resolver.resolve(ch) + modelCfg, err := getModelConfig(h.api) + if err != nil { + return nil, "", nil, errors.Trace(err) + } + + url, channel, _, err := h.api.Resolve(modelCfg, ch) if err != nil { return nil, channel, nil, errors.Annotatef(err, "cannot resolve URL %q", p.Charm) } @@ -315,18 +274,24 @@ return nil, channel, nil, errors.Errorf("expected charm URL, got bundle URL %q", p.Charm) } var csMac *macaroon.Macaroon - url, csMac, err = addCharmFromURL(h.client, url, channel, store.Client()) + url, csMac, err = addCharmFromURL(h.api, url, channel) if err != nil { return nil, channel, nil, errors.Annotatef(err, "cannot add charm %q", p.Charm) } - h.log.Infof("added charm %s", url) + logger.Debugf("added charm %s", url) h.results[id] = url.String() return url, channel, csMac, nil } // addService deploys or update an application with no units. Service options are // also set or updated. -func (h *bundleHandler) addService(id string, p bundlechanges.AddApplicationParams, chID charmstore.CharmID, csMac *macaroon.Macaroon) error { +func (h *bundleHandler) addService( + api DeployAPI, + id string, + p bundlechanges.AddApplicationParams, + chID charmstore.CharmID, + csMac *macaroon.Macaroon, +) error { h.results[id] = p.Application ch := chID.URL.String() // Handle application configuration. @@ -366,17 +331,24 @@ for resName, revision := range p.Resources { resources[resName] = fmt.Sprint(revision) } - charmInfo, err := h.charmsClient.CharmInfo(ch) + charmInfo, err := h.api.CharmInfo(ch) if err != nil { return err } - resNames2IDs, err := handleResources(h.serviceDeployer.api, resources, p.Application, chID, csMac, charmInfo.Meta.Resources) + resNames2IDs, err := resourceadapters.DeployResources( + p.Application, + chID, + csMac, + resources, + charmInfo.Meta.Resources, + api, + ) if err != nil { return errors.Trace(err) } // Figure out what series we need to deploy with. - conf, err := getModelConfig(h.modelConfigClient) + conf, err := getModelConfig(h.api) if err != nil { return err } @@ -391,23 +363,24 @@ conf: conf, fromBundle: true, } - series, message, err := selector.charmSeries() + series, err := selector.charmSeries() if err != nil { return errors.Trace(err) } // Deploy the application. - if err := h.serviceDeployer.applicationDeploy(applicationDeployParams{ - charmID: chID, - applicationName: p.Application, - series: series, - configYAML: configYAML, - constraints: cons, - storage: storageConstraints, - spaceBindings: p.EndpointBindings, - resources: resNames2IDs, + logger.Debugf("application %s is deploying (charm %s)", p.Application, ch) + h.log.Infof("Deploying charm %q", ch) + if err := api.Deploy(application.DeployArgs{ + CharmID: chID, + Cons: cons, + ApplicationName: p.Application, + Series: series, + ConfigYAML: configYAML, + Storage: storageConstraints, + Resources: resNames2IDs, + EndpointBindings: p.EndpointBindings, }); err == nil { - h.log.Infof("application %s deployed (charm %s %v)", p.Application, ch, fmt.Sprintf(message, series)) for resName := range resNames2IDs { h.log.Infof("added resource %s", resName) } @@ -419,12 +392,12 @@ // charm is compatible with the one declared in the bundle. If it is, // reuse the existing application or upgrade to a specified revision. // Exit with an error otherwise. - if err := h.upgradeCharm(p.Application, chID, csMac, resources); err != nil { + if err := h.upgradeCharm(api, p.Application, chID, csMac, resources); err != nil { return errors.Annotatef(err, "cannot upgrade application %q", p.Application) } // Update application configuration. if configYAML != "" { - if err := h.applicationClient.Update(params.ApplicationUpdate{ + if err := h.api.Update(params.ApplicationUpdate{ ApplicationName: p.Application, SettingsYAML: configYAML, }); err != nil { @@ -436,7 +409,7 @@ } // Update application constraints. if p.Constraints != "" { - if err := h.applicationClient.SetConstraints(p.Application, cons); err != nil { + if err := h.api.SetConstraints(p.Application, cons); err != nil { // This should never happen, as the bundle is already verified. return errors.Annotatef(err, "cannot update constraints for application %q", p.Application) } @@ -512,7 +485,7 @@ } } } - r, err := h.client.AddMachines([]params.AddMachineParams{machineParams}) + r, err := h.api.AddMachines([]params.AddMachineParams{machineParams}) if err != nil { return errors.Annotatef(err, "cannot create machine for holding %s", msg) } @@ -521,11 +494,11 @@ } machine = r[0].Machine if p.ContainerType == "" { - h.log.Infof("created new machine %s for holding %s", machine, msg) + logger.Debugf("created new machine %s for holding %s", machine, msg) } else if p.ParentId == "" { - h.log.Infof("created %s container in new machine for holding %s", machine, msg) + logger.Debugf("created %s container in new machine for holding %s", machine, msg) } else { - h.log.Infof("created %s container in machine %s for holding %s", machine, machineParams.ParentId, msg) + logger.Debugf("created %s container in machine %s for holding %s", machine, machineParams.ParentId, msg) } h.results[id] = machine return nil @@ -535,15 +508,15 @@ func (h *bundleHandler) addRelation(id string, p bundlechanges.AddRelationParams) error { ep1 := resolveRelation(p.Endpoint1, h.results) ep2 := resolveRelation(p.Endpoint2, h.results) - _, err := h.applicationClient.AddRelation(ep1, ep2) + _, err := h.api.AddRelation(ep1, ep2) if err == nil { // A new relation has been established. - h.log.Infof("related %s and %s", ep1, ep2) + h.log.Infof("Related %q and %q", ep1, ep2) return nil } if isErrRelationExists(err) { // The relation is already present in the environment. - h.log.Infof("%s and %s are already related", ep1, ep2) + logger.Debugf("%s and %s are already related", ep1, ep2) return nil } return errors.Annotatef(err, "cannot add relation between %q and %q", ep1, ep2) @@ -584,19 +557,19 @@ } placementArg = append(placementArg, placement) } - r, err := h.applicationClient.AddUnits(application, 1, placementArg) + r, err := h.api.AddUnits(application, 1, placementArg) if err != nil { return errors.Annotatef(err, "cannot add unit for application %q", application) } unit := r[0] if machineSpec == "" { - h.log.Infof("added %s unit to new machine", unit) + logger.Debugf("added %s unit to new machine", unit) // In this case, the unit name is stored in results instead of the // machine id, which is lazily evaluated later only if required. // This way we avoid waiting for watcher updates. h.results[id] = unit } else { - h.log.Infof("added %s unit to machine %s", unit, machineSpec) + logger.Debugf("added %s unit to new machine", unit) h.results[id] = machineSpec } // Note that the machineSpec can be empty for now, resulting in a partially @@ -609,7 +582,7 @@ // exposeService exposes an application. func (h *bundleHandler) exposeService(id string, p bundlechanges.ExposeParams) error { application := resolve(p.Application, h.results) - if err := h.applicationClient.Expose(application); err != nil { + if err := h.api.Expose(application); err != nil { return errors.Annotatef(err, "cannot expose application %s", application) } h.log.Infof("application %s exposed", application) @@ -628,14 +601,14 @@ default: return errors.Errorf("unexpected annotation entity type %q", p.EntityType) } - result, err := h.annotationsClient.Set(map[string]map[string]string{tag: p.Annotations}) + result, err := h.api.SetAnnotation(map[string]map[string]string{tag: p.Annotations}) if err == nil && len(result) > 0 { err = result[0].Error } if err != nil { return errors.Annotatef(err, "cannot set annotations for %s %q", p.EntityType, eid) } - h.log.Infof("annotations set for %s %s", p.EntityType, eid) + logger.Debugf("annotations set for %s %s", p.EntityType, eid) return nil } @@ -828,9 +801,15 @@ // If the application is already deployed using the given charm id, do nothing. // This function returns an error if the existing charm and the target one are // incompatible, meaning an upgrade from one to the other is not allowed. -func (h *bundleHandler) upgradeCharm(applicationName string, chID charmstore.CharmID, csMac *macaroon.Macaroon, resources map[string]string) error { +func (h *bundleHandler) upgradeCharm( + api DeployAPI, + applicationName string, + chID charmstore.CharmID, + csMac *macaroon.Macaroon, + resources map[string]string, +) error { id := chID.URL.String() - existing, err := h.applicationClient.GetCharmURL(applicationName) + existing, err := h.api.GetCharmURL(applicationName) if err != nil { return errors.Annotatef(err, "cannot retrieve info for application %q", applicationName) } @@ -846,13 +825,25 @@ if url.WithRevision(-1).Path() != existing.WithRevision(-1).Path() { return errors.Errorf("bundle charm %q is incompatible with existing charm %q", id, existing) } - filtered, err := getUpgradeResources(h.serviceDeployer.api, applicationName, url, h.client, resources) + charmsClient := charms.NewClient(api) + resourceLister, err := resourceadapters.NewAPIClient(api) + if err != nil { + return errors.Trace(err) + } + filtered, err := getUpgradeResources(charmsClient, resourceLister, applicationName, url, resources) if err != nil { return errors.Trace(err) } var resNames2IDs map[string]string if len(filtered) != 0 { - resNames2IDs, err = handleResources(h.serviceDeployer.api, resources, applicationName, chID, csMac, filtered) + resNames2IDs, err = resourceadapters.DeployResources( + applicationName, + chID, + csMac, + resources, + filtered, + api, + ) if err != nil { return errors.Trace(err) } @@ -862,7 +853,7 @@ CharmID: chID, ResourceIDs: resNames2IDs, } - if err := h.applicationClient.SetCharm(cfg); err != nil { + if err := h.api.SetCharm(cfg); err != nil { return errors.Annotatef(err, "cannot upgrade charm to %q", id) } h.log.Infof("upgraded charm for existing application %s (from %s to %s)", applicationName, existing, id) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/bundle_resources_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/bundle_resources_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/bundle_resources_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/bundle_resources_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -46,21 +46,16 @@ lines := strings.Split(output, "\n") expectedLines := strings.Split(strings.TrimSpace(` -added charm cs:trusty/starsay-42 -application starsay deployed (charm cs:trusty/starsay-42 with the series "trusty" defined by the bundle) +Deploying charm "cs:trusty/starsay-42" added resource install-resource added resource store-resource added resource upload-resource -added starsay/0 unit to new machine -deployment of bundle "local:bundle/example-0" completed +Deploy of bundle completed. `), "\n") c.Check(lines, gc.HasLen, len(expectedLines)) c.Check(lines[0], gc.Equals, expectedLines[0]) - c.Check(lines[1], gc.Equals, expectedLines[1]) // The "added resource" lines are checked after we sort since // the ordering of those lines is unknown. - c.Check(lines[5], gc.Equals, expectedLines[5]) - c.Check(lines[6], gc.Equals, expectedLines[6]) sort.Strings(lines) sort.Strings(expectedLines) c.Check(lines, jc.DeepEquals, expectedLines) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/bundle_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/bundle_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/bundle_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/bundle_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,7 +34,7 @@ // charm or bundle. The deployment output and error are returned. func runDeployCommand(c *gc.C, id string, args ...string) (string, error) { args = append([]string{id}, args...) - ctx, err := coretesting.RunCommand(c, NewDeployCommand(), args...) + ctx, err := coretesting.RunCommand(c, NewDefaultDeployCommand(), args...) return strings.Trim(coretesting.Stderr(ctx), "\n"), err } @@ -59,18 +59,8 @@ testcharms.UploadCharm(c, s.client, "xenial/mysql-42", "mysql") testcharms.UploadCharm(c, s.client, "xenial/wordpress-47", "wordpress") testcharms.UploadBundle(c, s.client, "bundle/wordpress-simple-1", "wordpress-simple") - output, err := runDeployCommand(c, "bundle/wordpress-simple") + _, err := runDeployCommand(c, "bundle/wordpress-simple") c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/mysql-42 -application mysql deployed (charm cs:xenial/mysql-42 with the series "xenial" defined by the bundle) -added charm cs:xenial/wordpress-47 -application wordpress deployed (charm cs:xenial/wordpress-47 with the series "xenial" defined by the bundle) -related wordpress:db and mysql:server -added mysql/0 unit to new machine -added wordpress/0 unit to new machine -deployment of bundle "cs:bundle/wordpress-simple-1" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "cs:xenial/mysql-42", "cs:xenial/wordpress-47") s.assertApplicationsDeployed(c, map[string]serviceInfo{ "mysql": {charm: "cs:xenial/mysql-42"}, @@ -87,17 +77,8 @@ testcharms.UploadCharm(c, s.client, "xenial/terms1-17", "terms1") testcharms.UploadCharm(c, s.client, "xenial/terms2-42", "terms2") testcharms.UploadBundle(c, s.client, "bundle/terms-simple-1", "terms-simple") - output, err := runDeployCommand(c, "bundle/terms-simple") + _, err := runDeployCommand(c, "bundle/terms-simple") c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/terms1-17 -application terms1 deployed (charm cs:xenial/terms1-17 with the series "xenial" defined by the bundle) -added charm cs:xenial/terms2-42 -application terms2 deployed (charm cs:xenial/terms2-42 with the series "xenial" defined by the bundle) -added terms1/0 unit to new machine -added terms2/0 unit to new machine -deployment of bundle "cs:bundle/terms-simple-1" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "cs:xenial/terms1-17", "cs:xenial/terms2-42") s.assertApplicationsDeployed(c, map[string]serviceInfo{ "terms1": {charm: "cs:xenial/terms1-17"}, @@ -114,21 +95,11 @@ testcharms.UploadCharm(c, s.client, "xenial/mysql-42", "mysql-storage") testcharms.UploadCharm(c, s.client, "xenial/wordpress-47", "wordpress") testcharms.UploadBundle(c, s.client, "bundle/wordpress-with-mysql-storage-1", "wordpress-with-mysql-storage") - output, err := runDeployCommand( + _, err := runDeployCommand( c, "bundle/wordpress-with-mysql-storage", "--storage", "mysql:logs=tmpfs,10G", // override logs ) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/mysql-42 -application mysql deployed (charm cs:xenial/mysql-42 with the series "xenial" defined by the bundle) -added charm cs:xenial/wordpress-47 -application wordpress deployed (charm cs:xenial/wordpress-47 with the series "xenial" defined by the bundle) -related wordpress:db and mysql:server -added mysql/0 unit to new machine -added wordpress/0 unit to new machine -deployment of bundle "cs:bundle/wordpress-with-mysql-storage-1" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "cs:xenial/mysql-42", "cs:xenial/wordpress-47") s.assertApplicationsDeployed(c, map[string]serviceInfo{ "mysql": { @@ -152,10 +123,13 @@ testcharms.UploadCharm(c, s.client, "xenial/wordpress-extra-bindings-47", "wordpress-extra-bindings") testcharms.UploadBundle(c, s.client, "bundle/wordpress-with-endpoint-bindings-1", "wordpress-with-endpoint-bindings") output, err := runDeployCommand(c, "bundle/wordpress-with-endpoint-bindings") - c.Assert(err, gc.ErrorMatches, + c.Assert(err, gc.ErrorMatches, ""+ "cannot deploy bundle: cannot deploy application \"mysql\": "+ - "cannot add application \"mysql\": unknown space \"db\" not valid") - c.Assert(output, gc.Equals, "added charm cs:xenial/mysql-42") + "cannot add application \"mysql\": unknown space \"db\" not valid") + c.Assert(output, gc.Equals, ""+ + `Located bundle "cs:bundle/wordpress-with-endpoint-bindings-1"`+"\n"+ + `Deploying charm "cs:xenial/mysql-42"`, + ) s.assertCharmsUploaded(c, "cs:xenial/mysql-42") s.assertApplicationsDeployed(c, map[string]serviceInfo{}) s.assertUnitsCreated(c, map[string]string{}) @@ -170,18 +144,8 @@ testcharms.UploadCharm(c, s.client, "xenial/mysql-42", "mysql") testcharms.UploadCharm(c, s.client, "xenial/wordpress-extra-bindings-47", "wordpress-extra-bindings") testcharms.UploadBundle(c, s.client, "bundle/wordpress-with-endpoint-bindings-1", "wordpress-with-endpoint-bindings") - output, err := runDeployCommand(c, "bundle/wordpress-with-endpoint-bindings") + _, err = runDeployCommand(c, "bundle/wordpress-with-endpoint-bindings") c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/mysql-42 -application mysql deployed (charm cs:xenial/mysql-42 with the series "xenial" defined by the bundle) -added charm cs:xenial/wordpress-extra-bindings-47 -application wordpress-extra-bindings deployed (charm cs:xenial/wordpress-extra-bindings-47 with the series "xenial" defined by the bundle) -related wordpress-extra-bindings:db and mysql:server -added mysql/0 unit to new machine -added wordpress-extra-bindings/0 unit to new machine -deployment of bundle "cs:bundle/wordpress-with-endpoint-bindings-1" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "cs:xenial/mysql-42", "cs:xenial/wordpress-extra-bindings-47") s.assertApplicationsDeployed(c, map[string]serviceInfo{ @@ -219,18 +183,8 @@ testcharms.UploadBundle(c, s.client, "bundle/wordpress-simple-1", "wordpress-simple") _, err := runDeployCommand(c, "bundle/wordpress-simple") c.Assert(err, jc.ErrorIsNil) - output, err := runDeployCommand(c, "bundle/wordpress-simple") + _, err = runDeployCommand(c, "bundle/wordpress-simple") c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/mysql-42 -reusing application mysql (charm: cs:xenial/mysql-42) -added charm cs:xenial/wordpress-47 -reusing application wordpress (charm: cs:xenial/wordpress-47) -wordpress:db and mysql:server are already related -avoid adding new units to application mysql: 1 unit already present -avoid adding new units to application wordpress: 1 unit already present -deployment of bundle "cs:bundle/wordpress-simple-1" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "cs:xenial/mysql-42", "cs:xenial/wordpress-47") s.assertApplicationsDeployed(c, map[string]serviceInfo{ "mysql": {charm: "cs:xenial/mysql-42"}, @@ -271,14 +225,8 @@ ` err := ioutil.WriteFile(path, []byte(data), 0644) c.Assert(err, jc.ErrorIsNil) - output, err := runDeployCommand(c, path) + _, err = runDeployCommand(c, path) c.Assert(err, jc.ErrorIsNil) - expectedOutput := fmt.Sprintf(` -added charm local:xenial/dummy-1 -application dummy deployed (charm local:xenial/dummy-1 with the series "xenial" defined by the bundle) -added dummy/0 unit to new machine -deployment of bundle %q completed`, path) - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "local:xenial/dummy-1") s.assertApplicationsDeployed(c, map[string]serviceInfo{ "dummy": {charm: "local:xenial/dummy-1"}, @@ -298,13 +246,8 @@ ` err := ioutil.WriteFile(path, []byte(data), 0644) c.Assert(err, jc.ErrorIsNil) - output, err := runDeployCommand(c, path) + _, err = runDeployCommand(c, path) c.Assert(err, jc.ErrorIsNil) - expectedOutput := fmt.Sprintf(` -added charm cs:~who/multi-series-0 -application dummy deployed (charm cs:~who/multi-series-0 with the series "trusty" defined by the bundle) -deployment of bundle %q completed`, path) - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "cs:~who/multi-series-0") s.assertApplicationsDeployed(c, map[string]serviceInfo{ "dummy": {charm: "cs:~who/multi-series-0"}, @@ -505,7 +448,7 @@ charmsPath := c.MkDir() mysqlPath := testcharms.Repo.ClonedDirPath(charmsPath, "mysql") wordpressPath := testcharms.Repo.ClonedDirPath(charmsPath, "wordpress") - output, err := s.DeployBundleYAML(c, fmt.Sprintf(` + _, err := s.DeployBundleYAML(c, fmt.Sprintf(` series: xenial applications: wordpress: @@ -518,17 +461,6 @@ - ["wordpress:db", "mysql:server"] `, wordpressPath, mysqlPath)) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm local:xenial/mysql-1 -application mysql deployed (charm local:xenial/mysql-1 with the series "xenial" defined by the bundle) -added charm local:xenial/wordpress-3 -application wordpress deployed (charm local:xenial/wordpress-3 with the series "xenial" defined by the bundle) -related wordpress:db and mysql:server -added mysql/0 unit to new machine -added mysql/1 unit to new machine -added wordpress/0 unit to new machine -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "local:xenial/mysql-1", "local:xenial/wordpress-3") s.assertApplicationsDeployed(c, map[string]serviceInfo{ "mysql": {charm: "local:xenial/mysql-1"}, @@ -546,7 +478,7 @@ charmsPath := c.MkDir() testcharms.UploadCharm(c, s.client, "xenial/wordpress-42", "wordpress") mysqlPath := testcharms.Repo.ClonedDirPath(charmsPath, "mysql") - output, err := s.DeployBundleYAML(c, fmt.Sprintf(` + _, err := s.DeployBundleYAML(c, fmt.Sprintf(` series: xenial applications: wordpress: @@ -560,16 +492,6 @@ - ["wordpress:db", "mysql:server"] `, mysqlPath)) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm local:xenial/mysql-1 -application mysql deployed (charm local:xenial/mysql-1 with the series "xenial" defined by the bundle) -added charm cs:xenial/wordpress-42 -application wordpress deployed (charm cs:xenial/wordpress-42 with the series "xenial" defined by the bundle) -related wordpress:db and mysql:server -added mysql/0 unit to new machine -added wordpress/0 unit to new machine -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "local:xenial/mysql-1", "cs:xenial/wordpress-42") s.assertApplicationsDeployed(c, map[string]serviceInfo{ "mysql": {charm: "local:xenial/mysql-1"}, @@ -585,7 +507,7 @@ func (s *BundleDeployCharmStoreSuite) TestDeployBundleApplicationOptions(c *gc.C) { testcharms.UploadCharm(c, s.client, "xenial/wordpress-42", "wordpress") testcharms.UploadCharm(c, s.client, "precise/dummy-0", "dummy") - output, err := s.DeployBundleYAML(c, ` + _, err := s.DeployBundleYAML(c, ` applications: wordpress: charm: wordpress @@ -600,15 +522,6 @@ skill-level: 47 `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:precise/dummy-0 -application customized deployed (charm cs:precise/dummy-0 with the series "precise" defined by the bundle) -added charm cs:xenial/wordpress-42 -application wordpress deployed (charm cs:xenial/wordpress-42 with the series "xenial" defined by the bundle) -added customized/0 unit to new machine -added wordpress/0 unit to new machine -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "cs:precise/dummy-0", "cs:xenial/wordpress-42") s.assertApplicationsDeployed(c, map[string]serviceInfo{ "customized": { @@ -629,25 +542,17 @@ func (s *BundleDeployCharmStoreSuite) TestDeployBundleApplicationConstrants(c *gc.C) { testcharms.UploadCharm(c, s.client, "xenial/wordpress-42", "wordpress") testcharms.UploadCharm(c, s.client, "precise/dummy-0", "dummy") - output, err := s.DeployBundleYAML(c, ` + _, err := s.DeployBundleYAML(c, ` applications: wordpress: charm: wordpress - constraints: mem=4G cpu-cores=2 + constraints: mem=4G cores=2 customized: charm: precise/dummy-0 num_units: 1 constraints: arch=i386 `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:precise/dummy-0 -application customized deployed (charm cs:precise/dummy-0 with the series "precise" defined by the bundle) -added charm cs:xenial/wordpress-42 -application wordpress deployed (charm cs:xenial/wordpress-42 with the series "xenial" defined by the bundle) -added customized/0 unit to new machine -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "cs:precise/dummy-0", "cs:xenial/wordpress-42") s.assertApplicationsDeployed(c, map[string]serviceInfo{ "customized": { @@ -656,7 +561,7 @@ }, "wordpress": { charm: "cs:xenial/wordpress-42", - constraints: constraints.MustParse("mem=4G cpu-cores=2"), + constraints: constraints.MustParse("mem=4G cores=2"), }, }) s.assertUnitsCreated(c, map[string]string{ @@ -670,7 +575,7 @@ testcharms.UploadCharm(c, s.client, "vivid/upgrade-2", "upgrade2") // First deploy the bundle. - output, err := s.DeployBundleYAML(c, ` + _, err := s.DeployBundleYAML(c, ` applications: wordpress: charm: wordpress @@ -683,49 +588,29 @@ num_units: 1 `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:vivid/upgrade-1 -application up deployed (charm cs:vivid/upgrade-1 with the series "vivid" defined by the bundle) -added charm cs:xenial/wordpress-42 -application wordpress deployed (charm cs:xenial/wordpress-42 with the series "xenial" defined by the bundle) -added up/0 unit to new machine -added wordpress/0 unit to new machine -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "cs:vivid/upgrade-1", "cs:xenial/wordpress-42") // Then deploy a new bundle with modified charm revision and options. - output, err = s.DeployBundleYAML(c, ` + _, err = s.DeployBundleYAML(c, ` applications: wordpress: charm: wordpress num_units: 1 options: blog-title: new title - constraints: spaces=new cpu-cores=8 + constraints: spaces=new cores=8 up: charm: vivid/upgrade-2 num_units: 1 `) c.Assert(err, jc.ErrorIsNil) - expectedOutput = ` -added charm cs:vivid/upgrade-2 -upgraded charm for existing application up (from cs:vivid/upgrade-1 to cs:vivid/upgrade-2) -added charm cs:xenial/wordpress-42 -reusing application wordpress (charm: cs:xenial/wordpress-42) -configuration updated for application wordpress -constraints applied for application wordpress -avoid adding new units to application up: 1 unit already present -avoid adding new units to application wordpress: 1 unit already present -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertCharmsUploaded(c, "cs:vivid/upgrade-1", "cs:vivid/upgrade-2", "cs:xenial/wordpress-42") s.assertApplicationsDeployed(c, map[string]serviceInfo{ "up": {charm: "cs:vivid/upgrade-2"}, "wordpress": { charm: "cs:xenial/wordpress-42", config: charm.Settings{"blog-title": "new title"}, - constraints: constraints.MustParse("spaces=new cpu-cores=8"), + constraints: constraints.MustParse("spaces=new cores=8"), }, }) s.assertUnitsCreated(c, map[string]string{ @@ -751,33 +636,19 @@ } // First deploy the bundle. - output, err := s.DeployBundleYAML(c, content) + _, err := s.DeployBundleYAML(c, content) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/wordpress-42 -application wordpress deployed (charm cs:xenial/wordpress-42 with the series "xenial" defined by the bundle) -application wordpress exposed -added wordpress/0 unit to new machine -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertApplicationsDeployed(c, expectedApplications) // Then deploy the same bundle again: no error is produced when the application // is exposed again. - output, err = s.DeployBundleYAML(c, content) + _, err = s.DeployBundleYAML(c, content) c.Assert(err, jc.ErrorIsNil) - expectedOutput = ` -added charm cs:xenial/wordpress-42 -reusing application wordpress (charm: cs:xenial/wordpress-42) -application wordpress exposed -avoid adding new units to application wordpress: 1 unit already present -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertApplicationsDeployed(c, expectedApplications) // Then deploy a bundle with the application unexposed, and check that the // application is not unexposed. - output, err = s.DeployBundleYAML(c, ` + _, err = s.DeployBundleYAML(c, ` applications: wordpress: charm: wordpress @@ -785,12 +656,6 @@ expose: false `) c.Assert(err, jc.ErrorIsNil) - expectedOutput = ` -added charm cs:xenial/wordpress-42 -reusing application wordpress (charm: cs:xenial/wordpress-42) -avoid adding new units to application wordpress: 1 unit already present -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertApplicationsDeployed(c, expectedApplications) } @@ -837,7 +702,7 @@ testcharms.UploadCharm(c, s.client, "xenial/mysql-1", "mysql") testcharms.UploadCharm(c, s.client, "xenial/postgres-2", "mysql") testcharms.UploadCharm(c, s.client, "xenial/varnish-3", "varnish") - output, err := s.DeployBundleYAML(c, ` + _, err := s.DeployBundleYAML(c, ` applications: wp: charm: wordpress @@ -857,24 +722,6 @@ - ["varnish:webcache", "wp:cache"] `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/mysql-1 -application mysql deployed (charm cs:xenial/mysql-1 with the series "xenial" defined by the bundle) -added charm cs:xenial/postgres-2 -application pgres deployed (charm cs:xenial/postgres-2 with the series "xenial" defined by the bundle) -added charm cs:xenial/varnish-3 -application varnish deployed (charm cs:xenial/varnish-3 with the series "xenial" defined by the bundle) -added charm cs:xenial/wordpress-0 -application wp deployed (charm cs:xenial/wordpress-0 with the series "xenial" defined by the bundle) -related wp:db and mysql:server -related wp:db and pgres:server -related varnish:webcache and wp:cache -added mysql/0 unit to new machine -added pgres/0 unit to new machine -added varnish/0 unit to new machine -added wp/0 unit to new machine -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertRelationsEstablished(c, "wp:db mysql:server", "wp:db pgres:server", "wp:cache varnish:webcache") s.assertUnitsCreated(c, map[string]string{ "mysql/0": "0", @@ -904,7 +751,7 @@ - ["wp:db", "mysql:server"] `) c.Assert(err, jc.ErrorIsNil) - output, err := s.DeployBundleYAML(c, ` + _, err = s.DeployBundleYAML(c, ` applications: wp: charm: wordpress @@ -920,20 +767,6 @@ - ["varnish:webcache", "wp:cache"] `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/mysql-1 -reusing application mysql (charm: cs:xenial/mysql-1) -added charm cs:xenial/varnish-3 -reusing application varnish (charm: cs:xenial/varnish-3) -added charm cs:xenial/wordpress-0 -reusing application wp (charm: cs:xenial/wordpress-0) -wp:db and mysql:server are already related -related varnish:webcache and wp:cache -avoid adding new units to application mysql: 1 unit already present -avoid adding new units to application varnish: 1 unit already present -avoid adding new units to application wp: 1 unit already present -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertRelationsEstablished(c, "wp:db mysql:server", "wp:cache varnish:webcache") s.assertUnitsCreated(c, map[string]string{ "mysql/0": "0", @@ -967,24 +800,8 @@ series: xenial 2: ` - output, err := s.DeployBundleYAML(c, content) + _, err := s.DeployBundleYAML(c, content) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/mysql-2 -application sql deployed (charm cs:xenial/mysql-2 with the series "xenial" defined by the bundle) -added charm cs:xenial/wordpress-0 -application wp deployed (charm cs:xenial/wordpress-0 with the series "xenial" defined by the bundle) -created new machine 0 for holding wp unit -created new machine 1 for holding wp unit -added wp/0 unit to machine 0 -created 0/lxd/0 container in machine 0 for holding sql unit -created new machine 2 for holding sql unit -created 1/lxd/0 container in machine 1 for holding wp unit -added sql/0 unit to machine 0/lxd/0 -added sql/1 unit to machine 2 -added wp/1 unit to machine 1/lxd/0 -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertApplicationsDeployed(c, map[string]serviceInfo{ "sql": {charm: "cs:xenial/mysql-2"}, "wp": { @@ -1018,20 +835,8 @@ delete(expectedUnits, "non-existent") // Redeploy the same bundle again. - output, err = s.DeployBundleYAML(c, content) + _, err = s.DeployBundleYAML(c, content) c.Assert(err, jc.ErrorIsNil) - expectedOutput = ` -added charm cs:xenial/mysql-2 -reusing application sql (charm: cs:xenial/mysql-2) -added charm cs:xenial/wordpress-0 -reusing application wp (charm: cs:xenial/wordpress-0) -configuration updated for application wp -avoid creating other machines to host wp units -avoid adding new units to application wp: 2 units already present -avoid creating other machines to host sql units -avoid adding new units to application sql: 2 units already present -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertUnitsCreated(c, map[string]string{ "sql/0": "0/lxd/0", "sql/1": "2", @@ -1081,7 +886,7 @@ func (s *BundleDeployCharmStoreSuite) TestDeployBundleMachineAttributes(c *gc.C) { testcharms.UploadCharm(c, s.client, "xenial/django-42", "dummy") - output, err := s.DeployBundleYAML(c, ` + _, err := s.DeployBundleYAML(c, ` applications: django: charm: cs:xenial/django-42 @@ -1092,21 +897,11 @@ machines: 1: series: xenial - constraints: "cpu-cores=4 mem=4G" + constraints: "cores=4 mem=4G" annotations: foo: bar `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/django-42 -application django deployed (charm cs:xenial/django-42 with the series "xenial" defined by the bundle) -created new machine 0 for holding django unit -annotations set for machine 0 -added django/0 unit to machine 0 -created new machine 1 for holding django unit -added django/1 unit to machine 1 -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertApplicationsDeployed(c, map[string]serviceInfo{ "django": {charm: "cs:xenial/django-42"}, }) @@ -1120,7 +915,7 @@ c.Assert(m.Series(), gc.Equals, "xenial") cons, err := m.Constraints() c.Assert(err, jc.ErrorIsNil) - expectedCons, err := constraints.Parse("cpu-cores=4 mem=4G") + expectedCons, err := constraints.Parse("cores=4 mem=4G") c.Assert(err, jc.ErrorIsNil) c.Assert(cons, jc.DeepEquals, expectedCons) ann, err := s.State.Annotations(m) @@ -1137,22 +932,13 @@ num_units: 2 `) c.Assert(err, jc.ErrorIsNil) - output, err := s.DeployBundleYAML(c, ` + _, err = s.DeployBundleYAML(c, ` applications: django: charm: cs:xenial/django-42 num_units: 5 `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/django-42 -reusing application django (charm: cs:xenial/django-42) -added django/2 unit to new machine -added django/3 unit to new machine -added django/4 unit to new machine -avoid adding new units to application django: 5 units already present -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertUnitsCreated(c, map[string]string{ "django/0": "0", "django/1": "1", @@ -1165,7 +951,7 @@ func (s *BundleDeployCharmStoreSuite) TestDeployBundleUnitPlacedInApplication(c *gc.C) { testcharms.UploadCharm(c, s.client, "xenial/django-42", "dummy") testcharms.UploadCharm(c, s.client, "xenial/wordpress-0", "wordpress") - output, err := s.DeployBundleYAML(c, ` + _, err := s.DeployBundleYAML(c, ` applications: wordpress: charm: wordpress @@ -1176,18 +962,6 @@ to: [wordpress] `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/django-42 -application django deployed (charm cs:xenial/django-42 with the series "xenial" defined by the bundle) -added charm cs:xenial/wordpress-0 -application wordpress deployed (charm cs:xenial/wordpress-0 with the series "xenial" defined by the bundle) -added wordpress/0 unit to new machine -added wordpress/1 unit to new machine -added wordpress/2 unit to new machine -added django/0 unit to machine 0 -added django/1 unit to machine 1 -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertUnitsCreated(c, map[string]string{ "django/0": "0", "django/1": "1", @@ -1201,7 +975,7 @@ testcharms.UploadCharm(c, s.client, "xenial/django-42", "dummy") testcharms.UploadCharm(c, s.client, "xenial/mem-47", "dummy") testcharms.UploadCharm(c, s.client, "xenial/rails-0", "dummy") - output, err := s.DeployBundleYAML(c, ` + _, err := s.DeployBundleYAML(c, ` applications: memcached: charm: cs:xenial/mem-47 @@ -1226,33 +1000,6 @@ series: xenial `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/django-42 -application django deployed (charm cs:xenial/django-42 with the series "xenial" defined by the bundle) -added charm cs:xenial/mem-47 -application memcached deployed (charm cs:xenial/mem-47 with the series "xenial" defined by the bundle) -added charm cs:xenial/rails-0 -application ror deployed (charm cs:xenial/rails-0 with the series "xenial" defined by the bundle) -created new machine 0 for holding memcached and ror units -added memcached/0 unit to machine 0 -added ror/0 unit to machine 0 -created 0/kvm/0 container in machine 0 for holding django unit -created new machine 1 for holding memcached unit -created new machine 2 for holding memcached unit -created new machine 3 for holding ror unit -added django/0 unit to machine 0 -added django/1 unit to machine 0/kvm/0 -added memcached/1 unit to machine 1 -added memcached/2 unit to machine 2 -added ror/1 unit to machine 3 -created 1/lxd/0 container in machine 1 for holding django unit -created 2/lxd/0 container in machine 2 for holding django unit -created 3/kvm/0 container in machine 3 for holding django unit -added django/2 unit to machine 1/lxd/0 -added django/3 unit to machine 2/lxd/0 -added django/4 unit to machine 3/kvm/0 -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertUnitsCreated(c, map[string]string{ "django/0": "0", "django/1": "0/kvm/0", @@ -1269,7 +1016,7 @@ func (s *BundleDeployCharmStoreSuite) TestDeployBundleUnitPlacedToMachines(c *gc.C) { testcharms.UploadCharm(c, s.client, "xenial/django-42", "dummy") - output, err := s.DeployBundleYAML(c, ` + _, err := s.DeployBundleYAML(c, ` applications: django: charm: cs:django @@ -1286,26 +1033,6 @@ 8: `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/django-42 -application django deployed (charm cs:xenial/django-42 with the series "xenial" defined by the bundle) -created new machine 0 for holding django unit -created new machine 1 for holding django unit -added django/0 unit to machine 0 -created new machine 2 for holding django unit -created 1/kvm/0 container in machine 1 for holding django unit -created 0/lxd/0 container in machine 0 for holding django unit -created 0/lxd/1 container in machine 0 for holding django unit -created 3/lxd/0 container in new machine for holding django unit -created 4/lxd/0 container in new machine for holding django unit -added django/1 unit to machine 2 -added django/2 unit to machine 1/kvm/0 -added django/3 unit to machine 0/lxd/0 -added django/4 unit to machine 0/lxd/1 -added django/5 unit to machine 3/lxd/0 -added django/6 unit to machine 4/lxd/0 -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertUnitsCreated(c, map[string]string{ "django/0": "0", // Machine "4" in the bundle. "django/1": "2", // Machine "new" in the bundle. @@ -1321,7 +1048,7 @@ testcharms.UploadCharm(c, s.client, "xenial/django-42", "dummy") testcharms.UploadCharm(c, s.client, "xenial/mem-47", "dummy") testcharms.UploadCharm(c, s.client, "xenial/rails-0", "dummy") - output, err := s.DeployBundleYAML(c, ` + _, err := s.DeployBundleYAML(c, ` applications: memcached: charm: cs:xenial/mem-47 @@ -1345,33 +1072,6 @@ 3: `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/django-42 -application django deployed (charm cs:xenial/django-42 with the series "xenial" defined by the bundle) -added charm cs:xenial/mem-47 -application memcached deployed (charm cs:xenial/mem-47 with the series "xenial" defined by the bundle) -added charm cs:xenial/rails-0 -application ror deployed (charm cs:xenial/rails-0 with the series "xenial" defined by the bundle) -created new machine 0 for holding django, memcached and ror units -created new machine 1 for holding memcached unit -created new machine 2 for holding memcached and ror units -added django/0 unit to machine 0 -added memcached/0 unit to machine 0 -added memcached/1 unit to machine 1 -added memcached/2 unit to machine 2 -added ror/0 unit to machine 0 -created 0/lxd/0 container in machine 0 for holding django unit -created 1/lxd/0 container in machine 1 for holding django unit -created 2/lxd/0 container in machine 2 for holding django unit -created 2/kvm/0 container in machine 2 for holding ror unit -created 2/kvm/1 container in machine 2 for holding ror unit -added django/1 unit to machine 0/lxd/0 -added django/2 unit to machine 1/lxd/0 -added django/3 unit to machine 2/lxd/0 -added ror/1 unit to machine 2/kvm/0 -added ror/2 unit to machine 2/kvm/1 -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertUnitsCreated(c, map[string]string{ "django/0": "0", "django/1": "0/lxd/0", @@ -1409,38 +1109,12 @@ 2: 3: ` - output, err = s.DeployBundleYAML(c, content) + _, err = s.DeployBundleYAML(c, content) c.Assert(err, jc.ErrorIsNil) - expectedOutput = ` -added charm cs:xenial/django-42 -reusing application django (charm: cs:xenial/django-42) -added charm cs:xenial/mem-47 -reusing application memcached (charm: cs:xenial/mem-47) -application node deployed (charm cs:xenial/django-42 with the series "xenial" defined by the bundle) -avoid creating other machines to host django and memcached units -avoid adding new units to application django: 4 units already present -avoid adding new units to application memcached: 3 units already present -created 1/lxd/1 container in machine 1 for holding node unit -added node/0 unit to machine 1/lxd/1 -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) // Redeploy the same bundle again and check that nothing happens. - output, err = s.DeployBundleYAML(c, content) + _, err = s.DeployBundleYAML(c, content) c.Assert(err, jc.ErrorIsNil) - expectedOutput = ` -added charm cs:xenial/django-42 -reusing application django (charm: cs:xenial/django-42) -added charm cs:xenial/mem-47 -reusing application memcached (charm: cs:xenial/mem-47) -reusing application node (charm: cs:xenial/django-42) -avoid creating other machines to host django and memcached units -avoid adding new units to application django: 4 units already present -avoid adding new units to application memcached: 3 units already present -avoid creating other machines to host node units -avoid adding new units to application node: 1 unit already present -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) s.assertUnitsCreated(c, map[string]string{ "django/0": "0", "django/1": "0/lxd/0", @@ -1456,7 +1130,7 @@ }) } -func (s *BundleDeployCharmStoreSuite) TestDeployBundleAnnotations(c *gc.C) { +func (s *BundleDeployCharmStoreSuite) TestDeployBundleWithAnnotations_OutputIsCorrect(c *gc.C) { testcharms.UploadCharm(c, s.client, "xenial/django-42", "dummy") testcharms.UploadCharm(c, s.client, "xenial/mem-47", "dummy") output, err := s.DeployBundleYAML(c, ` @@ -1476,18 +1150,34 @@ annotations: {foo: bar} `) c.Assert(err, jc.ErrorIsNil) - expectedOutput := ` -added charm cs:xenial/django-42 -application django deployed (charm cs:xenial/django-42 with the series "xenial" defined by the bundle) -annotations set for application django -added charm cs:xenial/mem-47 -application memcached deployed (charm cs:xenial/mem-47 with the series "xenial" defined by the bundle) -created new machine 0 for holding django unit -annotations set for machine 0 -added django/0 unit to machine 0 -added memcached/0 unit to new machine -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) + + c.Check(output, gc.Equals, ""+ + `Deploying charm "cs:xenial/django-42"`+"\n"+ + `Deploying charm "cs:xenial/mem-47"`+"\n"+ + `Deploy of bundle completed.`, + ) +} + +func (s *BundleDeployCharmStoreSuite) TestDeployBundleAnnotations(c *gc.C) { + testcharms.UploadCharm(c, s.client, "xenial/django-42", "dummy") + testcharms.UploadCharm(c, s.client, "xenial/mem-47", "dummy") + _, err := s.DeployBundleYAML(c, ` + applications: + django: + charm: cs:django + num_units: 1 + annotations: + key1: value1 + key2: value2 + to: [1] + memcached: + charm: xenial/mem-47 + num_units: 1 + machines: + 1: + annotations: {foo: bar} + `) + c.Assert(err, jc.ErrorIsNil) svc, err := s.State.Application("django") c.Assert(err, jc.ErrorIsNil) ann, err := s.State.Annotations(svc) @@ -1503,7 +1193,7 @@ c.Assert(ann, jc.DeepEquals, map[string]string{"foo": "bar"}) // Update the annotations and deploy the bundle again. - output, err = s.DeployBundleYAML(c, ` + _, err = s.DeployBundleYAML(c, ` applications: django: charm: cs:django @@ -1517,15 +1207,6 @@ annotations: {answer: 42} `) c.Assert(err, jc.ErrorIsNil) - expectedOutput = ` -added charm cs:xenial/django-42 -reusing application django (charm: cs:xenial/django-42) -annotations set for application django -avoid creating other machines to host django units -annotations set for machine 0 -avoid adding new units to application django: 1 unit already present -deployment of bundle "local:bundle/example-0" completed` - c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) ann, err = s.State.Annotations(svc) c.Assert(err, jc.ErrorIsNil) c.Assert(ann, jc.DeepEquals, map[string]string{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/config.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/config.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,395 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. +package application + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" + "unicode/utf8" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + + "github.com/juju/juju/api/application" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/juju/block" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" + "github.com/juju/utils/keyvalues" +) + +const maxValueSize = 5242880 // Max size for a config file. + +const ( + configSummary = `Gets, sets, or resets configuration for a deployed application.` + configDetails = `By default, all configuration (keys, values, metadata) for the application are +displayed if a key is not specified. + +Output includes the name of the charm used to deploy the application and a +listing of the application-specific configuration settings. +See ` + "`juju status`" + ` for application names. + +Examples: + juju config apache2 + juju config --format=json apache2 + juju config mysql dataset-size + juju config mysql --reset dataset-size,backup_dir + juju config apache2 --file path/to/config.yaml + juju config mysql dataset-size=80% backup_dir=/vol1/mysql/backups + juju config apache2 --model mymodel --file /home/ubuntu/mysql.yaml + +See also: + deploy + status +` +) + +// NewConfigCommand returns a command used to get, reset, and set application +// attributes. +func NewConfigCommand() cmd.Command { + return modelcmd.Wrap(&configCommand{}) +} + +type attributes map[string]string + +// configCommand get, sets, and resets configuration values of an application. +type configCommand struct { + api configCommandAPI + modelcmd.ModelCommandBase + out cmd.Output + + action func(configCommandAPI, *cmd.Context) error // get, set, or reset action set in Init + applicationName string + configFile cmd.FileVar + keys []string + reset []string // Holds the keys to be reset until parsed. + resetKeys []string // Holds the keys to be reset once parsed. + useFile bool + values attributes +} + +// configCommandAPI is an interface to allow passing in a fake implementation under test. +type configCommandAPI interface { + Close() error + Update(args params.ApplicationUpdate) error + Get(application string) (*params.ApplicationGetResults, error) + Set(application string, options map[string]string) error + Unset(application string, options []string) error +} + +// Info is part of the cmd.Command interface. +func (c *configCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "config", + Args: " [--reset ] [][=] ...]", + Purpose: configSummary, + Doc: configDetails, + } +} + +// SetFlags is part of the cmd.Command interface. +func (c *configCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) + f.Var(&c.configFile, "file", "path to yaml-formatted application config") + f.Var(cmd.NewAppendStringsValue(&c.reset), "reset", "Reset the provided comma delimited keys") +} + +// getAPI either uses the fake API set at test time or that is nil, gets a real +// API and sets that as the API. +func (c *configCommand) getAPI() (configCommandAPI, error) { + if c.api != nil { + return c.api, nil + } + root, err := c.NewAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + client := application.NewClient(root) + return client, nil +} + +// Init is part of the cmd.Command interface. +func (c *configCommand) Init(args []string) error { + if len(args) == 0 || len(strings.Split(args[0], "=")) > 1 { + return errors.New("no application name specified") + } + + // If there are arguments provided to reset, we turn it into a slice of + // strings and verify them. If there is one or more valid keys to reset and + // no other errors initalizing the command, c.resetDefaults will be called + // in c.Run. + if err := c.parseResetKeys(); err != nil { + return errors.Trace(err) + } + + c.applicationName = args[0] + args = args[1:] + + switch len(args) { + case 0: + return c.handleZeroArgs() + case 1: + return c.handleOneArg(args) + default: + return c.handleArgs(args) + } +} + +// handleZeroArgs handles the case where there are no positional args. +func (c *configCommand) handleZeroArgs() error { + // If there's a path we're setting args from a file + if c.configFile.Path != "" { + return c.parseSet([]string{}) + } + if len(c.reset) == 0 { + // If there's nothing to reset we're getting all the settings. + c.action = c.getConfig + } + // Otherwise just reset. + return nil +} + +// handleOneArg handles the case where there is one positional arg. +func (c *configCommand) handleOneArg(args []string) error { + // If there's an '=', this must be setting a value + if strings.Contains(args[0], "=") { + return c.parseSet(args) + } + // If there's no reset, we want to get a single value + if len(c.reset) == 0 { + c.action = c.getConfig + c.keys = args + return nil + } + // Otherwise we have reset and a get arg, which is invalid. + return errors.New("cannot reset and retrieve values simultaneously") +} + +// handleArgs handles the case where there's more than one positional arg. +func (c *configCommand) handleArgs(args []string) error { + // This must be setting values but let's make sure. + var pairs, numArgs int + numArgs = len(args) + for _, a := range args { + if strings.Contains(a, "=") { + pairs++ + } + } + if pairs == numArgs { + return c.parseSet(args) + } + if pairs == 0 { + return errors.New("can only retrieve a single value, or all values") + } + return errors.New("cannot set and retrieve values simultaneously") +} + +// parseResetKeys splits the keys provided to --reset. +func (c *configCommand) parseResetKeys() error { + if len(c.reset) == 0 { + return nil + } + var resetKeys []string + for _, value := range c.reset { + keys := strings.Split(strings.Trim(value, ","), ",") + resetKeys = append(resetKeys, keys...) + } + for _, k := range resetKeys { + if strings.Contains(k, "=") { + return errors.Errorf( + `--reset accepts a comma delimited set of keys "a,b,c", received: %q`, k) + } + } + + c.resetKeys = resetKeys + return nil +} + +// parseSet parses the command line args when --file is set or if the +// positional args are key=value pairs. +func (c *configCommand) parseSet(args []string) error { + file := c.configFile.Path != "" + if file && len(args) > 0 { + return errors.New("cannot specify --file and key=value arguments simultaneously") + } + c.action = c.setConfig + if file { + c.useFile = true + return nil + } + + settings, err := keyvalues.Parse(args, true) + if err != nil { + return err + } + c.values = settings + + return nil +} + +// Run implements the cmd.Command interface. +func (c *configCommand) Run(ctx *cmd.Context) error { + client, err := c.getAPI() + if err != nil { + return errors.Trace(err) + } + defer client.Close() + if len(c.resetKeys) > 0 { + if err := c.resetConfig(client, ctx); err != nil { + // We return this error naked as it is almost certainly going to be + // cmd.ErrSilent and the cmd.Command framework expects that back + // from cmd.Run if the process is blocked. + return err + } + } + if c.action == nil { + // If we are reset only we end up here, only we've already done that. + return nil + } + + return c.action(client, ctx) +} + +// resetConfig is the run action when we are resetting attributes. +func (c *configCommand) resetConfig(client configCommandAPI, ctx *cmd.Context) error { + return block.ProcessBlockedError(client.Unset(c.applicationName, c.resetKeys), block.BlockChange) +} + +// setConfig is the run action when we are setting new attribute values as args +// or as a file passed in. +func (c *configCommand) setConfig(client configCommandAPI, ctx *cmd.Context) error { + if c.useFile { + return c.setConfigFromFile(client, ctx) + } + + settings, err := c.validateValues(ctx) + if err != nil { + return errors.Trace(err) + } + + result, err := client.Get(c.applicationName) + if err != nil { + return err + } + + for k, v := range settings { + configValue := result.Config[k] + + configValueMap, ok := configValue.(map[string]interface{}) + if ok { + // convert the value to string and compare + if fmt.Sprintf("%v", configValueMap["value"]) == v { + logger.Warningf("the configuration setting %q already has the value %q", k, v) + } + } + } + + return block.ProcessBlockedError(client.Set(c.applicationName, settings), block.BlockChange) +} + +// setConfigFromFile sets the application configuration from settings passed +// in a YAML file. +func (c *configCommand) setConfigFromFile(client configCommandAPI, ctx *cmd.Context) error { + var ( + b []byte + err error + ) + if c.configFile.Path == "-" { + buf := bytes.Buffer{} + buf.ReadFrom(ctx.Stdin) + b = buf.Bytes() + } else { + b, err = c.configFile.Read(ctx) + if err != nil { + return err + } + } + return block.ProcessBlockedError( + client.Update( + params.ApplicationUpdate{ + ApplicationName: c.applicationName, + SettingsYAML: string(b)}), block.BlockChange) +} + +// getConfig is the run action to return one or all configuration values. +func (c *configCommand) getConfig(client configCommandAPI, ctx *cmd.Context) error { + results, err := client.Get(c.applicationName) + if err != nil { + return err + } + if len(c.keys) == 1 { + key := c.keys[0] + info, found := results.Config[key].(map[string]interface{}) + if !found { + return errors.Errorf("key %q not found in %q application settings.", key, c.applicationName) + } + out := &bytes.Buffer{} + err := cmd.FormatYaml(out, info["value"]) + if err != nil { + return err + } + fmt.Fprint(ctx.Stdout, out.String()) + return nil + } + + resultsMap := map[string]interface{}{ + "application": results.Application, + "charm": results.Charm, + "settings": results.Config, + } + return c.out.Write(ctx, resultsMap) +} + +// validateValues reads the values provided as args and validates that they are +// valid UTF-8. +func (c *configCommand) validateValues(ctx *cmd.Context) (map[string]string, error) { + settings := map[string]string{} + for k, v := range c.values { + //empty string is also valid as a setting value + if v == "" { + settings[k] = v + continue + } + + if v[0] != '@' { + if !utf8.ValidString(v) { + return nil, errors.Errorf("value for option %q contains non-UTF-8 sequences", k) + } + settings[k] = v + continue + } + nv, err := readValue(ctx, v[1:]) + if err != nil { + return nil, errors.Trace(err) + } + if !utf8.ValidString(nv) { + return nil, errors.Errorf("value for option %q contains non-UTF-8 sequences", k) + } + settings[k] = nv + } + return settings, nil +} + +// readValue reads the value of an option out of the named file. +// An empty content is valid, like in parsing the options. The upper +// size is 5M. +func readValue(ctx *cmd.Context, filename string) (string, error) { + absFilename := ctx.AbsPath(filename) + fi, err := os.Stat(absFilename) + if err != nil { + return "", errors.Errorf("cannot read option from file %q: %v", filename, err) + } + if fi.Size() > maxValueSize { + return "", errors.Errorf("size of option file is larger than 5M") + } + content, err := ioutil.ReadFile(ctx.AbsPath(filename)) + if err != nil { + return "", errors.Errorf("cannot read option from file %q: %v", filename, err) + } + return string(content), nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/config_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/config_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,362 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. +package application_test + +import ( + "bytes" + "io/ioutil" + "os" + "strings" + "unicode/utf8" + + "github.com/juju/cmd" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + goyaml "gopkg.in/yaml.v2" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/cmd/juju/application" + coretesting "github.com/juju/juju/testing" +) + +type configCommandSuite struct { + coretesting.FakeJujuXDGDataHomeSuite + dir string + fake *fakeApplicationAPI +} + +var ( + _ = gc.Suite(&configCommandSuite{}) + + validSetTestValue = "a value with spaces\nand newline\nand UTF-8 characters: \U0001F604 / \U0001F44D" + invalidSetTestValue = "a value with an invalid UTF-8 sequence: " + string([]byte{0xFF, 0xFF}) + yamlConfigValue = "dummy-application:\n skill-level: 9000\n username: admin001\n\n" +) + +var getTests = []struct { + application string + expected map[string]interface{} +}{ + { + "dummy-application", + map[string]interface{}{ + "application": "dummy-application", + "charm": "dummy", + "settings": map[string]interface{}{ + "title": map[string]interface{}{ + "description": "Specifies title", + "type": "string", + "value": "Nearly There", + }, + "skill-level": map[string]interface{}{ + "description": "Specifies skill-level", + "value": 100, + "type": "int", + }, + "username": map[string]interface{}{ + "description": "Specifies username", + "type": "string", + "value": "admin001", + }, + "outlook": map[string]interface{}{ + "description": "Specifies outlook", + "type": "string", + "value": "true", + }, + }, + }, + }, +} + +func (s *configCommandSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.fake = &fakeApplicationAPI{name: "dummy-application", charmName: "dummy", + values: map[string]interface{}{ + "title": "Nearly There", + "skill-level": 100, + "username": "admin001", + "outlook": "true", + }} + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + + s.dir = c.MkDir() + c.Assert(utf8.ValidString(validSetTestValue), jc.IsTrue) + c.Assert(utf8.ValidString(invalidSetTestValue), jc.IsFalse) + setupValueFile(c, s.dir, "valid.txt", validSetTestValue) + setupValueFile(c, s.dir, "invalid.txt", invalidSetTestValue) + setupBigFile(c, s.dir) + setupConfigFile(c, s.dir) +} + +func (s *configCommandSuite) TestGetCommandInit(c *gc.C) { + // missing args + err := coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{}) + c.Assert(err, gc.ErrorMatches, "no application name specified") +} + +func (s *configCommandSuite) TestGetCommandInitWithApplication(c *gc.C) { + err := coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{"app"}) + // everything ok + c.Assert(err, jc.ErrorIsNil) +} + +func (s *configCommandSuite) TestGetCommandInitWithKey(c *gc.C) { + err := coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{"app", "key"}) + // everything ok + c.Assert(err, jc.ErrorIsNil) +} + +func (s *configCommandSuite) TestGetConfig(c *gc.C) { + for _, t := range getTests { + ctx := coretesting.Context(c) + code := cmd.Main(application.NewConfigCommandForTest(s.fake), ctx, []string{t.application}) + c.Check(code, gc.Equals, 0) + c.Assert(ctx.Stderr.(*bytes.Buffer).String(), gc.Equals, "") + // round trip via goyaml to avoid being sucked into a quagmire of + // map[interface{}]interface{} vs map[string]interface{}. This is + // also required if we add json support to this command. + buf, err := goyaml.Marshal(t.expected) + c.Assert(err, jc.ErrorIsNil) + expected := make(map[string]interface{}) + err = goyaml.Unmarshal(buf, &expected) + c.Assert(err, jc.ErrorIsNil) + + actual := make(map[string]interface{}) + err = goyaml.Unmarshal(ctx.Stdout.(*bytes.Buffer).Bytes(), &actual) + c.Assert(err, jc.ErrorIsNil) + c.Assert(actual, gc.DeepEquals, expected) + } +} + +func (s *configCommandSuite) TestGetConfigKey(c *gc.C) { + ctx := coretesting.Context(c) + code := cmd.Main(application.NewConfigCommandForTest(s.fake), ctx, []string{"dummy-application", "title"}) + c.Check(code, gc.Equals, 0) + c.Assert(ctx.Stderr.(*bytes.Buffer).String(), gc.Equals, "") + c.Assert(ctx.Stdout.(*bytes.Buffer).String(), gc.Equals, "Nearly There\n") +} + +func (s *configCommandSuite) TestGetConfigKeyNotFound(c *gc.C) { + ctx := coretesting.Context(c) + code := cmd.Main(application.NewConfigCommandForTest(s.fake), ctx, []string{"dummy-application", "invalid"}) + c.Check(code, gc.Equals, 1) + c.Assert(ctx.Stderr.(*bytes.Buffer).String(), gc.Equals, "error: key \"invalid\" not found in \"dummy-application\" application settings.\n") + c.Assert(ctx.Stdout.(*bytes.Buffer).String(), gc.Equals, "") +} + +func (s *configCommandSuite) TestSetCommandInit(c *gc.C) { + // missing args + err := coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{}) + c.Assert(err, gc.ErrorMatches, "no application name specified") + + // missing application name + err = coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{"name=foo"}) + c.Assert(err, gc.ErrorMatches, "no application name specified") + + // --file path, but no application + err = coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{"--file", "testconfig.yaml"}) + c.Assert(err, gc.ErrorMatches, "no application name specified") + + // --file and options specified + err = coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{"application", "--file", "testconfig.yaml", "bees="}) + c.Assert(err, gc.ErrorMatches, "cannot specify --file and key=value arguments simultaneously") + + // --reset and no config name provided + err = coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{"application", "--reset"}) + c.Assert(err, gc.ErrorMatches, "flag needs an argument: --reset") + + // cannot set and retrieve simultaneously + err = coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{"application", "get", "set=value"}) + c.Assert(err, gc.ErrorMatches, "cannot set and retrieve values simultaneously") + + // cannot reset and get simultaneously + err = coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{"application", "--reset", "reset", "get"}) + c.Assert(err, gc.ErrorMatches, "cannot reset and retrieve values simultaneously") + + // invalid reset keys + err = coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{"application", "--reset", "reset,bad=key"}) + c.Assert(err, gc.ErrorMatches, `--reset accepts a comma delimited set of keys "a,b,c", received: "bad=key"`) + + // init too many args fails + err = coretesting.InitCommand(application.NewConfigCommandForTest(s.fake), []string{"application", "key", "another"}) + c.Assert(err, gc.ErrorMatches, "can only retrieve a single value, or all values") + +} + +func (s *configCommandSuite) TestSetOptionSuccess(c *gc.C) { + s.assertSetSuccess(c, s.dir, []string{ + "username=hello", + "outlook=hello@world.tld", + }, map[string]interface{}{ + "username": "hello", + "outlook": "hello@world.tld", + }) + s.assertSetSuccess(c, s.dir, []string{ + "username=hello=foo", + }, map[string]interface{}{ + "username": "hello=foo", + "outlook": "hello@world.tld", + }) + s.assertSetSuccess(c, s.dir, []string{ + "username=@valid.txt", + }, map[string]interface{}{ + "username": validSetTestValue, + "outlook": "hello@world.tld", + }) + s.assertSetSuccess(c, s.dir, []string{ + "username=", + }, map[string]interface{}{ + "username": "", + "outlook": "hello@world.tld", + }) +} + +func (s *configCommandSuite) TestSetSameValue(c *gc.C) { + s.assertSetSuccess(c, s.dir, []string{ + "username=hello", + "outlook=hello@world.tld", + }, map[string]interface{}{ + "username": "hello", + "outlook": "hello@world.tld", + }) + s.assertSetWarning(c, s.dir, []string{ + "username=hello", + }, "the configuration setting \"username\" already has the value \"hello\"") + s.assertSetWarning(c, s.dir, []string{ + "outlook=hello@world.tld", + }, "the configuration setting \"outlook\" already has the value \"hello@world.tld\"") + +} + +func (s *configCommandSuite) TestSetOptionFail(c *gc.C) { + s.assertSetFail(c, s.dir, []string{"foo", "bar"}, + "error: can only retrieve a single value, or all values\n") + s.assertSetFail(c, s.dir, []string{"=bar"}, "error: expected \"key=value\", got \"=bar\"\n") + s.assertSetFail(c, s.dir, []string{ + "username=@missing.txt", + }, "error: cannot read option from file \"missing.txt\": .* "+utils.NoSuchFileErrRegexp+"\n") + s.assertSetFail(c, s.dir, []string{ + "username=@big.txt", + }, "error: size of option file is larger than 5M\n") + s.assertSetFail(c, s.dir, []string{ + "username=@invalid.txt", + }, "error: value for option \"username\" contains non-UTF-8 sequences\n") +} + +func (s *configCommandSuite) TestSetConfig(c *gc.C) { + s.assertSetFail(c, s.dir, []string{ + "--file", + "missing.yaml", + }, "error.* "+utils.NoSuchFileErrRegexp+"\n") + + ctx := coretesting.ContextForDir(c, s.dir) + code := cmd.Main(application.NewConfigCommandForTest(s.fake), ctx, []string{ + "dummy-application", + "--file", + "testconfig.yaml"}) + + c.Check(code, gc.Equals, 0) + c.Check(s.fake.config, gc.Equals, yamlConfigValue) +} + +func (s *configCommandSuite) TestSetFromStdin(c *gc.C) { + s.fake = &fakeApplicationAPI{name: "dummy-application"} + ctx := coretesting.Context(c) + ctx.Stdin = strings.NewReader("settings:\n username:\n value: world\n") + code := cmd.Main(application.NewConfigCommandForTest(s.fake), ctx, []string{ + "dummy-application", + "--file", + "-"}) + + c.Check(code, gc.Equals, 0) + c.Check(s.fake.config, jc.DeepEquals, "settings:\n username:\n value: world\n") +} + +func (s *configCommandSuite) TestResetConfigToDefault(c *gc.C) { + s.fake = &fakeApplicationAPI{name: "dummy-application", values: map[string]interface{}{ + "username": "hello", + }} + s.assertSetSuccess(c, s.dir, []string{ + "--reset", + "username", + }, make(map[string]interface{})) +} + +func (s *configCommandSuite) TestBlockSetConfig(c *gc.C) { + // Block operation + s.fake.err = common.OperationBlockedError("TestBlockSetConfig") + ctx := coretesting.ContextForDir(c, s.dir) + code := cmd.Main(application.NewConfigCommandForTest(s.fake), ctx, []string{ + "dummy-application", + "--file", + "testconfig.yaml"}) + c.Check(code, gc.Equals, 1) + // msg is logged + stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) + c.Check(stripped, gc.Matches, ".*TestBlockSetConfig.*") +} + +// assertSetSuccess sets configuration options and checks the expected settings. +func (s *configCommandSuite) assertSetSuccess(c *gc.C, dir string, args []string, expect map[string]interface{}) { + ctx := coretesting.ContextForDir(c, dir) + code := cmd.Main(application.NewConfigCommandForTest(s.fake), ctx, append([]string{"dummy-application"}, args...)) + c.Assert(code, gc.Equals, 0) +} + +// assertSetFail sets configuration options and checks the expected error. +func (s *configCommandSuite) assertSetFail(c *gc.C, dir string, args []string, err string) { + ctx := coretesting.ContextForDir(c, dir) + code := cmd.Main(application.NewConfigCommandForTest(s.fake), ctx, append([]string{"dummy-application"}, args...)) + c.Check(code, gc.Not(gc.Equals), 0) + c.Assert(ctx.Stderr.(*bytes.Buffer).String(), gc.Matches, err) +} + +func (s *configCommandSuite) assertSetWarning(c *gc.C, dir string, args []string, w string) { + ctx := coretesting.ContextForDir(c, dir) + code := cmd.Main(application.NewConfigCommandForTest(s.fake), ctx, append([]string{"dummy-application"}, args...)) + c.Check(code, gc.Equals, 0) + + c.Assert(strings.Replace(c.GetTestLog(), "\n", " ", -1), gc.Matches, ".*WARNING.*"+w+".*") +} + +// setupValueFile creates a file containing one value for testing +// set with name=@filename. +func setupValueFile(c *gc.C, dir, filename, value string) string { + ctx := coretesting.ContextForDir(c, dir) + path := ctx.AbsPath(filename) + content := []byte(value) + err := ioutil.WriteFile(path, content, 0666) + c.Assert(err, jc.ErrorIsNil) + return path +} + +// setupBigFile creates a too big file for testing +// set with name=@filename. +func setupBigFile(c *gc.C, dir string) string { + ctx := coretesting.ContextForDir(c, dir) + path := ctx.AbsPath("big.txt") + file, err := os.Create(path) + c.Assert(err, jc.ErrorIsNil) + defer file.Close() + chunk := make([]byte, 1024) + for i := 0; i < cap(chunk); i++ { + chunk[i] = byte(i % 256) + } + for i := 0; i < 6000; i++ { + _, err = file.Write(chunk) + c.Assert(err, jc.ErrorIsNil) + } + return path +} + +// setupConfigFile creates a configuration file for testing set +// with the --file argument specifying a configuration file. +func setupConfigFile(c *gc.C, dir string) string { + ctx := coretesting.ContextForDir(c, dir) + path := ctx.AbsPath("testconfig.yaml") + content := []byte(yamlConfigValue) + err := ioutil.WriteFile(path, content, 0666) + c.Assert(err, jc.ErrorIsNil) + return path +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/constraints.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/constraints.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/constraints.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/constraints.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,11 +5,12 @@ import ( "fmt" + "io" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api/application" "github.com/juju/juju/cmd/juju/block" @@ -58,7 +59,7 @@ when deploying. Examples: - juju set-constraints mysql mem=8G cpu-cores=4 + juju set-constraints mysql mem=8G cores=4 juju set-constraints -m mymodel apache2 mem=8G arch=amd64 See also: @@ -108,11 +109,13 @@ } } -func formatConstraints(value interface{}) ([]byte, error) { - return []byte(value.(constraints.Value).String()), nil +func formatConstraints(writer io.Writer, value interface{}) error { + fmt.Fprint(writer, value.(constraints.Value).String()) + return nil } func (c *serviceGetConstraintsCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) c.out.AddFlags(f, "constraints", map[string]cmd.Formatter{ "constraints": formatConstraints, "yaml": cmd.FormatYaml, @@ -122,10 +125,10 @@ func (c *serviceGetConstraintsCommand) Init(args []string) error { if len(args) == 0 { - return fmt.Errorf("no application name specified") + return errors.Errorf("no application name specified") } if !names.IsValidApplication(args[0]) { - return fmt.Errorf("invalid application name %q", args[0]) + return errors.Errorf("invalid application name %q", args[0]) } c.ApplicationName, args = args[0], args[1:] @@ -167,10 +170,10 @@ func (c *serviceSetConstraintsCommand) Init(args []string) (err error) { if len(args) == 0 { - return fmt.Errorf("no application name specified") + return errors.Errorf("no application name specified") } if !names.IsValidApplication(args[0]) { - return fmt.Errorf("invalid application name %q", args[0]) + return errors.Errorf("invalid application name %q", args[0]) } c.ApplicationName, args = args[0], args[1:] diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/deploy.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/deploy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/deploy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/deploy.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,29 +5,30 @@ import ( "archive/zip" - "fmt" "os" "path/filepath" "strings" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/charm.v6-unstable" - charmresource "gopkg.in/juju/charm.v6-unstable/resource" "gopkg.in/juju/charmrepo.v2-unstable" - csclientparams "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/juju/charmrepo.v2-unstable/csclient" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/names.v2" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" - "launchpad.net/gnuflag" "github.com/juju/juju/api" - apiannotations "github.com/juju/juju/api/annotations" + "github.com/juju/juju/api/annotations" "github.com/juju/juju/api/application" apicharms "github.com/juju/juju/api/charms" "github.com/juju/juju/api/modelconfig" + apiparams "github.com/juju/juju/apiserver/params" "github.com/juju/juju/charmstore" "github.com/juju/juju/cmd/juju/block" + "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/constraints" "github.com/juju/juju/environs/config" @@ -38,28 +39,211 @@ var planURL = "https://api.jujucharms.com/omnibus/v2" +type CharmAdder interface { + AddLocalCharm(*charm.URL, charm.Charm) (*charm.URL, error) + AddCharm(*charm.URL, params.Channel) error + AddCharmWithAuthorization(*charm.URL, params.Channel, *macaroon.Macaroon) error + AuthorizeCharmstoreEntity(*charm.URL) (*macaroon.Macaroon, error) +} + +type ApplicationAPI interface { + AddMachines(machineParams []apiparams.AddMachineParams) ([]apiparams.AddMachinesResult, error) + AddRelation(endpoints ...string) (*apiparams.AddRelationResults, error) + AddUnits(application string, numUnits int, placement []*instance.Placement) ([]string, error) + Expose(application string) error + GetCharmURL(serviceName string) (*charm.URL, error) + SetAnnotation(annotations map[string]map[string]string) ([]apiparams.ErrorResult, error) + SetCharm(application.SetCharmConfig) error + SetConstraints(application string, constraints constraints.Value) error + Update(apiparams.ApplicationUpdate) error +} + +type ModelAPI interface { + ModelUUID() (string, bool) + ModelGet() (map[string]interface{}, error) +} + +// MeteredDeployAPI represents the methods of the API the deploy +// command needs for metered charms. +type MeteredDeployAPI interface { + IsMetered(charmURL string) (bool, error) + SetMetricCredentials(service string, credentials []byte) error +} + +// DeployAPI represents the methods of the API the deploy +// command needs. +type DeployAPI interface { + // TODO(katco): Pair DeployAPI down to only the methods required + // by the deploy command. + api.Connection + CharmAdder + MeteredDeployAPI + ApplicationAPI + ModelAPI + + // ApplicationClient + CharmInfo(string) (*apicharms.CharmInfo, error) + Deploy(application.DeployArgs) error + Status(patterns []string) (*apiparams.FullStatus, error) + + Resolve(*config.Config, *charm.URL) (*charm.URL, params.Channel, []string, error) + + GetBundle(*charm.URL) (charm.Bundle, error) + + WatchAll() (*api.AllWatcher, error) + + // AddPendingResources(client.AddPendingResourcesArgs) (ids []string, _ error) + // DeployResources(cmd.DeployResourcesArgs) (ids []string, _ error) +} + +// The following structs exist purely because Go cannot create a +// struct with a field named the same as a method name. The DeployAPI +// needs to both embed a *.Client and provide the +// api.Connection Client method. +// +// Once we pair down DeployAPI, this will not longer be a problem. + +type apiClient struct { + *api.Client +} + +type charmsClient struct { + *apicharms.Client +} + +type applicationClient struct { + *application.Client +} + +type modelConfigClient struct { + *modelconfig.Client +} + +type charmRepoClient struct { + *charmrepo.CharmStore +} + +type charmstoreClient struct { + *csclient.Client +} + +type annotationsClient struct { + *annotations.Client +} + +func (a *charmstoreClient) AuthorizeCharmstoreEntity(url *charm.URL) (*macaroon.Macaroon, error) { + return authorizeCharmStoreEntity(a.Client, url) +} + +type deployAPIAdapter struct { + api.Connection + *apiClient + *charmsClient + *applicationClient + *modelConfigClient + *charmRepoClient + *charmstoreClient + *annotationsClient +} + +func (a *deployAPIAdapter) Client() *api.Client { + return a.apiClient.Client +} + +func (a *deployAPIAdapter) ModelUUID() (string, bool) { + return a.apiClient.ModelUUID() +} + +func (a *deployAPIAdapter) Deploy(args application.DeployArgs) error { + for i, p := range args.Placement { + if p.Scope == "model-uuid" { + p.Scope = a.applicationClient.ModelUUID() + } + args.Placement[i] = p + } + + return errors.Trace(a.applicationClient.Deploy(args)) +} + +func (a *deployAPIAdapter) Resolve(cfg *config.Config, url *charm.URL) ( + *charm.URL, + params.Channel, + []string, + error, +) { + return resolveCharm(a.charmRepoClient.ResolveWithChannel, cfg, url) +} + +func (a *deployAPIAdapter) Get(url *charm.URL) (charm.Charm, error) { + return a.charmRepoClient.Get(url) +} + +func (a *deployAPIAdapter) SetAnnotation(annotations map[string]map[string]string) ([]apiparams.ErrorResult, error) { + return a.annotationsClient.Set(annotations) +} + +type NewAPIRootFn func() (DeployAPI, error) + +func NewDefaultDeployCommand() cmd.Command { + return NewDeployCommandWithDefaultAPI([]DeployStep{ + &RegisterMeteredCharm{ + RegisterURL: planURL + "/plan/authorize", + QueryURL: planURL + "/charm", + }, + }) +} + +func NewDeployCommandWithDefaultAPI(steps []DeployStep) cmd.Command { + deployCmd := &DeployCommand{Steps: steps} + cmd := modelcmd.Wrap(deployCmd) + deployCmd.NewAPIRoot = func() (DeployAPI, error) { + apiRoot, err := deployCmd.ModelCommandBase.NewAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + bakeryClient, err := deployCmd.BakeryClient() + if err != nil { + return nil, errors.Trace(err) + } + cstoreClient := newCharmStoreClient(bakeryClient).WithChannel(deployCmd.Channel) + + adapter := &deployAPIAdapter{ + Connection: apiRoot, + apiClient: &apiClient{Client: apiRoot.Client()}, + charmsClient: &charmsClient{Client: apicharms.NewClient(apiRoot)}, + applicationClient: &applicationClient{Client: application.NewClient(apiRoot)}, + modelConfigClient: &modelConfigClient{Client: modelconfig.NewClient(apiRoot)}, + charmstoreClient: &charmstoreClient{Client: cstoreClient}, + annotationsClient: &annotationsClient{Client: annotations.NewClient(apiRoot)}, + charmRepoClient: &charmRepoClient{CharmStore: charmrepo.NewCharmStoreFromClient(cstoreClient)}, + } + + return adapter, nil + } + return cmd +} + // NewDeployCommand returns a command to deploy services. -func NewDeployCommand() cmd.Command { +func NewDeployCommand(newAPIRoot NewAPIRootFn, steps []DeployStep) cmd.Command { return modelcmd.Wrap(&DeployCommand{ - Steps: []DeployStep{ - &RegisterMeteredCharm{ - RegisterURL: planURL + "/plan/authorize", - QueryURL: planURL + "/charm", - }, - }}) + Steps: steps, + NewAPIRoot: newAPIRoot, + }) } type DeployCommand struct { modelcmd.ModelCommandBase UnitCommandBase + // CharmOrBundle is either a charm URL, a path where a charm can be found, // or a bundle name. CharmOrBundle string // Channel holds the charmstore channel to use when obtaining // the charm to be deployed. - Channel csclientparams.Channel + Channel params.Channel + // Series is the series of the charm to deploy. Series string // Force is used to allow a charm to be deployed onto a machine @@ -68,6 +252,7 @@ ApplicationName string Config cmd.FileVar + ConstraintsStr string Constraints constraints.Value BindToSpaces string @@ -88,6 +273,9 @@ Bindings map[string]string Steps []DeployStep + // NewAPIRoot stores a function which returns a new API root. + NewAPIRoot NewAPIRootFn + flagSet *gnuflag.FlagSet } @@ -195,13 +383,16 @@ // DeployStep is an action that needs to be taken during charm deployment. type DeployStep interface { + // Set flags necessary for the deploy step. SetFlags(*gnuflag.FlagSet) + // RunPre runs before the call is made to add the charm to the environment. - RunPre(api.Connection, *httpbakery.Client, *cmd.Context, DeploymentInfo) error + RunPre(MeteredDeployAPI, *httpbakery.Client, *cmd.Context, DeploymentInfo) error + // RunPost runs after the call is made to add the charm to the environment. // The error parameter is used to notify the step of a previously occurred error. - RunPost(api.Connection, *httpbakery.Client, *cmd.Context, DeploymentInfo, error) error + RunPost(MeteredDeployAPI, *httpbakery.Client, *cmd.Context, DeploymentInfo, error) error } // DeploymentInfo is used to maintain all deployment information for @@ -210,6 +401,7 @@ CharmID charmstore.CharmID ApplicationName string ModelUUID string + CharmInfo *apicharms.CharmInfo } func (c *DeployCommand) Info() *cmd.Info { @@ -224,18 +416,20 @@ var ( // charmOnlyFlags and bundleOnlyFlags are used to validate flags based on // whether we are deploying a charm or a bundle. - charmOnlyFlags = []string{"bind", "config", "constraints", "force", "n", "num-units", "series", "to", "resource"} - bundleOnlyFlags = []string{} + charmOnlyFlags = []string{"bind", "config", "constraints", "force", "n", "num-units", "series", "to", "resource"} + bundleOnlyFlags = []string{} + modelCommandBaseFlags = []string{"B", "no-browser-login"} ) func (c *DeployCommand) SetFlags(f *gnuflag.FlagSet) { // Keep above charmOnlyFlags and bundleOnlyFlags lists updated when adding // new flags. c.UnitCommandBase.SetFlags(f) + c.ModelCommandBase.SetFlags(f) f.IntVar(&c.NumUnits, "n", 1, "Number of application units to deploy for principal charms") f.StringVar((*string)(&c.Channel), "channel", "", "Channel to use when getting the charm or bundle from the charm store") f.Var(&c.Config, "config", "Path to yaml-formatted application config") - f.Var(constraints.ConstraintsValue{Target: &c.Constraints}, "constraints", "Set application constraints") + f.StringVar(&c.ConstraintsStr, "constraints", "", "Set application constraints") f.StringVar(&c.Series, "series", "", "The series on which to deploy") f.BoolVar(&c.Force, "force", false, "Allow a charm to be deployed to a machine running an unsupported series") f.Var(storageFlag{&c.Storage, &c.BundleStorage}, "storage", "Charm storage constraints") @@ -255,7 +449,7 @@ switch len(args) { case 2: if !names.IsValidApplication(args[1]) { - return fmt.Errorf("invalid application name %q", args[1]) + return errors.Errorf("invalid application name %q", args[1]) } c.ApplicationName = args[1] fallthrough @@ -266,8 +460,8 @@ default: return cmd.CheckEmpty(args[2:]) } - err := c.parseBind() - if err != nil { + + if err := c.parseBind(); err != nil { return err } return c.UnitCommandBase.Init(args) @@ -277,221 +471,47 @@ ModelGet() (map[string]interface{}, error) } -var getModelConfig = func(client ModelConfigGetter) (*config.Config, error) { +var getModelConfig = func(api ModelConfigGetter) (*config.Config, error) { // Separated into a variable for easy overrides - attrs, err := client.ModelGet() + attrs, err := api.ModelGet() if err != nil { - return nil, err + return nil, errors.Wrap(err, errors.New("cannot fetch model settings")) } return config.New(config.NoDefaults, attrs) } -func (c *DeployCommand) maybeReadLocalBundleData(ctx *cmd.Context) ( - _ *charm.BundleData, bundleFile string, bundleFilePath string, _ error, -) { - bundleFile = c.CharmOrBundle - bundleData, err := charmrepo.ReadBundleFile(bundleFile) - if err == nil { - // For local bundles, we extract the local path of - // the bundle directory. - bundleFilePath = filepath.Dir(ctx.AbsPath(bundleFile)) - } else { - // We may have been given a local bundle archive or exploded directory. - if bundle, burl, pathErr := charmrepo.NewBundleAtPath(bundleFile); pathErr == nil { - bundleData = bundle.Data() - bundleFile = burl.String() - if info, err := os.Stat(bundleFile); err == nil && info.IsDir() { - bundleFilePath = bundleFile - } - err = nil - } else { - err = pathErr - } - } - return bundleData, bundleFile, bundleFilePath, err -} - -func (c *DeployCommand) deployCharmOrBundle(ctx *cmd.Context, client *api.Client, modelConfigClient *modelconfig.Client) error { - deployer := applicationDeployer{ctx, c} - - // We may have been given a local bundle file. - bundleData, bundleIdent, bundleFilePath, err := c.maybeReadLocalBundleData(ctx) - // If the bundle files existed but we couldn't read them, then - // return that error rather than trying to interpret as a charm. - if err != nil { - if info, statErr := os.Stat(c.CharmOrBundle); statErr == nil { - if info.IsDir() { - if _, ok := err.(*charmrepo.NotFoundError); !ok { - return err - } - } - } - } - - // If not a bundle then maybe a local charm. - if err != nil { - // Charm may have been supplied via a path reference. - ch, curl, charmErr := charmrepo.NewCharmAtPathForceSeries(c.CharmOrBundle, c.Series, c.Force) - if charmErr == nil { - if curl, charmErr = client.AddLocalCharm(curl, ch); charmErr != nil { - return charmErr - } - id := charmstore.CharmID{ - URL: curl, - // Local charms don't need a channel. - } - var csMac *macaroon.Macaroon // local charms don't need one. - return c.deployCharm(deployCharmArgs{ - id: id, - csMac: csMac, - series: curl.Series, - ctx: ctx, - client: client, - deployer: &deployer, - }) - } - // We check for several types of known error which indicate - // that the supplied reference was indeed a path but there was - // an issue reading the charm located there. - if charm.IsMissingSeriesError(charmErr) { - return charmErr - } - if charm.IsUnsupportedSeriesError(charmErr) { - return errors.Errorf("%v. Use --force to deploy the charm anyway.", charmErr) - } - if errors.Cause(charmErr) == zip.ErrFormat { - return errors.Errorf("invalid charm or bundle provided at %q", c.CharmOrBundle) - } - err = charmErr - } - if _, ok := err.(*charmrepo.NotFoundError); ok { - return errors.Errorf("no charm or bundle found at %q", c.CharmOrBundle) - } - // If we get a "not exists" error then we attempt to interpret the supplied - // charm or bundle reference as a URL below, otherwise we return the error. - if err != nil && err != os.ErrNotExist { - return err - } - - conf, err := getModelConfig(modelConfigClient) - if err != nil { - return err - } - - bakeryClient, err := c.BakeryClient() - if err != nil { +func (c *DeployCommand) deployBundle( + ctx *cmd.Context, + filePath string, + data *charm.BundleData, + channel params.Channel, + apiRoot DeployAPI, + bundleStorage map[string]map[string]storage.Constraints, +) error { + // TODO(ericsnow) Do something with the CS macaroons that were returned? + if _, err := deployBundle( + filePath, + data, + channel, + apiRoot, + ctx, + bundleStorage, + ); err != nil { return errors.Trace(err) } - csClient := newCharmStoreClient(bakeryClient).WithChannel(c.Channel) - - resolver := newCharmURLResolver(conf, csClient) - - var storeCharmOrBundleURL *charm.URL - var store *charmrepo.CharmStore - var supportedSeries []string - - var origURL *charm.URL - - // If we don't already have a bundle loaded, we try the charm store for a charm or bundle. - if bundleData == nil { - origURL, err = charm.ParseURL(c.CharmOrBundle) - if err != nil { - return errors.Trace(err) - } - // Charm or bundle has been supplied as a URL so we resolve and deploy using the store. - storeCharmOrBundleURL, c.Channel, supportedSeries, store, err = resolver.resolve(origURL) - if charm.IsUnsupportedSeriesError(err) { - return errors.Errorf("%v. Use --force to deploy the charm anyway.", err) - } - if err != nil { - return errors.Trace(err) - } - if storeCharmOrBundleURL.Series == "bundle" { - // Load the bundle entity. - bundle, err := store.GetBundle(storeCharmOrBundleURL) - if err != nil { - return errors.Trace(err) - } - bundleData = bundle.Data() - bundleIdent = storeCharmOrBundleURL.String() - } - } - // Handle a bundle. - if bundleData != nil { - if flags := getFlags(c.flagSet, charmOnlyFlags); len(flags) > 0 { - return errors.Errorf("Flags provided but not supported when deploying a bundle: %s.", strings.Join(flags, ", ")) - } - // TODO(ericsnow) Do something with the CS macaroons that were returned? - if _, err := deployBundle( - bundleFilePath, bundleData, c.Channel, client, &deployer, resolver, ctx, c.BundleStorage, - ); err != nil { - return errors.Trace(err) - } - ctx.Infof("deployment of bundle %q completed", bundleIdent) - return nil - } - // Handle a charm. - if flags := getFlags(c.flagSet, bundleOnlyFlags); len(flags) > 0 { - return errors.Errorf("Flags provided but not supported when deploying a charm: %s.", strings.Join(flags, ", ")) - } - - selector := seriesSelector{ - charmURLSeries: origURL.Series, - seriesFlag: c.Series, - supportedSeries: supportedSeries, - force: c.Force, - conf: conf, - fromBundle: false, - } - - // Get the series to use. - series, message, err := selector.charmSeries() - if charm.IsUnsupportedSeriesError(err) { - return errors.Errorf("%v. Use --force to deploy the charm anyway.", err) - } - - // Store the charm in state. - curl, csMac, err := addCharmFromURL(client, storeCharmOrBundleURL, c.Channel, csClient) - if err != nil { - if err1, ok := errors.Cause(err).(*termsRequiredError); ok { - terms := strings.Join(err1.Terms, " ") - return errors.Errorf(`Declined: please agree to the following terms %s. Try: "juju agree %s"`, terms, terms) - } - return errors.Annotatef(err, "storing charm for URL %q", storeCharmOrBundleURL) - } - ctx.Infof("Added charm %q to the model.", curl) - ctx.Infof("Deploying charm %q %v.", curl, fmt.Sprintf(message, series)) - id := charmstore.CharmID{ - URL: curl, - Channel: c.Channel, - } - return c.deployCharm(deployCharmArgs{ - id: id, - csMac: csMac, - series: series, - ctx: ctx, - client: client, - deployer: &deployer, - }) -} - -type deployCharmArgs struct { - id charmstore.CharmID - csMac *macaroon.Macaroon - series string - ctx *cmd.Context - client *api.Client - deployer *applicationDeployer + ctx.Infof("Deploy of bundle completed.") + return nil } -func (c *DeployCommand) deployCharm(args deployCharmArgs) (rErr error) { - conn, err := c.NewAPIRoot() - if err != nil { - return errors.Trace(err) - } - charmsClient := apicharms.NewClient(conn) - charmInfo, err := charmsClient.CharmInfo(args.id.URL.String()) +func (c *DeployCommand) deployCharm( + id charmstore.CharmID, + csMac *macaroon.Macaroon, + series string, + ctx *cmd.Context, + apiRoot DeployAPI, +) (rErr error) { + charmInfo, err := apiRoot.CharmInfo(id.URL.String()) if err != nil { return err } @@ -514,93 +534,74 @@ var configYAML []byte if c.Config.Path != "" { - configYAML, err = c.Config.Read(args.ctx) + configYAML, err = c.Config.Read(ctx) if err != nil { - return err + return errors.Trace(err) } } - state, err := c.NewAPIRoot() - if err != nil { - return errors.Trace(err) - } bakeryClient, err := c.BakeryClient() if err != nil { return errors.Trace(err) } - uuid, err := args.client.ModelUUID() - if err != nil { - return errors.Trace(err) + uuid, ok := apiRoot.ModelUUID() + if !ok { + return errors.New("API connection is controller-only (should never happen)") } deployInfo := DeploymentInfo{ - CharmID: args.id, + CharmID: id, ApplicationName: serviceName, ModelUUID: uuid, + CharmInfo: charmInfo, } for _, step := range c.Steps { - err = step.RunPre(state, bakeryClient, args.ctx, deployInfo) + err = step.RunPre(apiRoot, bakeryClient, ctx, deployInfo) if err != nil { - return err + return errors.Trace(err) } } defer func() { for _, step := range c.Steps { - err = step.RunPost(state, bakeryClient, args.ctx, deployInfo, rErr) + err = errors.Trace(step.RunPost(apiRoot, bakeryClient, ctx, deployInfo, rErr)) if err != nil { rErr = err } } }() - if args.id.URL != nil && args.id.URL.Schema != "local" && len(charmInfo.Meta.Terms) > 0 { - args.ctx.Infof("Deployment under prior agreement to terms: %s", + if id.URL != nil && id.URL.Schema != "local" && len(charmInfo.Meta.Terms) > 0 { + ctx.Infof("Deployment under prior agreement to terms: %s", strings.Join(charmInfo.Meta.Terms, " ")) } - ids, err := handleResources(c, c.Resources, serviceName, args.id, args.csMac, charmInfo.Meta.Resources) + ids, err := resourceadapters.DeployResources( + serviceName, + id, + csMac, + c.Resources, + charmInfo.Meta.Resources, + apiRoot, + ) if err != nil { return errors.Trace(err) } - params := applicationDeployParams{ - charmID: args.id, - applicationName: serviceName, - series: args.series, - numUnits: numUnits, - configYAML: string(configYAML), - constraints: c.Constraints, - placement: c.Placement, - storage: c.Storage, - spaceBindings: c.Bindings, - resources: ids, - } - return args.deployer.applicationDeploy(params) -} - -type APICmd interface { - NewAPIRoot() (api.Connection, error) -} - -func handleResources(c APICmd, resources map[string]string, serviceName string, chID charmstore.CharmID, csMac *macaroon.Macaroon, metaResources map[string]charmresource.Meta) (map[string]string, error) { - if len(resources) == 0 && len(metaResources) == 0 { - return nil, nil - } - - api, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - - ids, err := resourceadapters.DeployResources(serviceName, chID, csMac, resources, metaResources, api) - if err != nil { - return nil, errors.Trace(err) - } - - return ids, nil + return errors.Trace(apiRoot.Deploy(application.DeployArgs{ + CharmID: id, + Cons: c.Constraints, + ApplicationName: serviceName, + Series: series, + NumUnits: numUnits, + ConfigYAML: string(configYAML), + Placement: c.Placement, + Storage: c.Storage, + Resources: ids, + EndpointBindings: c.Bindings, + })) } const parseBindErrorPrefix = "--bind must be in the form '[] [= ...]'. " @@ -648,129 +649,315 @@ return nil } -type applicationDeployParams struct { - charmID charmstore.CharmID - applicationName string - series string - numUnits int - configYAML string - constraints constraints.Value - placement []*instance.Placement - storage map[string]storage.Constraints - spaceBindings map[string]string - resources map[string]string -} - -type applicationDeployer struct { - ctx *cmd.Context - api APICmd -} +func (c *DeployCommand) Run(ctx *cmd.Context) error { + var err error + c.Constraints, err = common.ParseConstraints(ctx, c.ConstraintsStr) + if err != nil { + return err + } + apiRoot, err := c.NewAPIRoot() + if err != nil { + return errors.Trace(err) + } + defer apiRoot.Close() -func (d *applicationDeployer) newApplicationAPIClient() (*application.Client, error) { - root, err := d.api.NewAPIRoot() + deploy, err := findDeployerFIFO( + c.maybeReadLocalBundle, + c.maybeReadLocalCharm, + c.maybePredeployedLocalCharm, + c.maybeReadCharmstoreBundleFn(apiRoot), + c.charmStoreCharm, // This always returns a deployer + ) if err != nil { - return nil, errors.Trace(err) + return errors.Trace(err) } - return application.NewClient(root), nil + + return block.ProcessBlockedError(deploy(ctx, apiRoot), block.BlockChange) } -func (d *applicationDeployer) newModelConfigAPIClient() (*modelconfig.Client, error) { - root, err := d.api.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) +func findDeployerFIFO(maybeDeployers ...func() (deployFn, error)) (deployFn, error) { + for _, d := range maybeDeployers { + if deploy, err := d(); err != nil { + return nil, errors.Trace(err) + } else if deploy != nil { + return deploy, nil + } } - return modelconfig.NewClient(root), nil + return nil, errors.NotFoundf("suitable deployer") } -func (d *applicationDeployer) newAnnotationsAPIClient() (*apiannotations.Client, error) { - root, err := d.api.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) +type deployFn func(*cmd.Context, DeployAPI) error + +func (c *DeployCommand) validateBundleFlags() error { + if flags := getFlags(c.flagSet, charmOnlyFlags); len(flags) > 0 { + return errors.Errorf("Flags provided but not supported when deploying a bundle: %s.", strings.Join(flags, ", ")) } - return apiannotations.NewClient(root), nil + return nil } -func (d *applicationDeployer) newCharmsAPIClient() (*apicharms.Client, error) { - root, err := d.api.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) +func (c *DeployCommand) validateCharmFlags() error { + if flags := getFlags(c.flagSet, bundleOnlyFlags); len(flags) > 0 { + return errors.Errorf("Flags provided but not supported when deploying a charm: %s.", strings.Join(flags, ", ")) } - return apicharms.NewClient(root), nil + return nil } -func (c *applicationDeployer) applicationDeploy(args applicationDeployParams) error { - serviceClient, err := c.newApplicationAPIClient() +func (c *DeployCommand) maybePredeployedLocalCharm() (deployFn, error) { + // If the charm's schema is local, we should definitively attempt + // to deploy a charm that's already deployed in the + // environment. + userCharmURL, err := charm.ParseURL(c.CharmOrBundle) if err != nil { - return err + return nil, errors.Trace(err) + } else if userCharmURL.Schema != "local" { + logger.Debugf("cannot interpret as a redeployment of a local charm from the controller") + return nil, nil } - defer serviceClient.Close() - for i, p := range args.placement { - if p.Scope == "model-uuid" { - p.Scope = serviceClient.ModelUUID() + + return func(ctx *cmd.Context, api DeployAPI) error { + formattedCharmURL := userCharmURL.String() + ctx.Infof("Located charm %q.", formattedCharmURL) + ctx.Infof("Deploying charm %q.", formattedCharmURL) + return errors.Trace(c.deployCharm( + charmstore.CharmID{URL: userCharmURL}, + (*macaroon.Macaroon)(nil), + userCharmURL.Series, + ctx, + api, + )) + }, nil +} + +func (c *DeployCommand) maybeReadLocalBundle() (deployFn, error) { + bundleFile := c.CharmOrBundle + var ( + bundleFilePath string + resolveRelativeBundleFilePath bool + ) + + bundleData, err := charmrepo.ReadBundleFile(bundleFile) + if err != nil { + // We may have been given a local bundle archive or exploded directory. + bundle, url, pathErr := charmrepo.NewBundleAtPath(bundleFile) + if charmrepo.IsInvalidPathError(pathErr) { + return nil, errors.Errorf(""+ + "The charm or bundle %q is ambiguous.\n"+ + "To deploy a local charm or bundle, run `juju deploy ./%[1]s`.\n"+ + "To deploy a charm or bundle from the store, run `juju deploy cs:%[1]s`.", + c.CharmOrBundle, + ) + } + if pathErr != nil { + // If the bundle files existed but we couldn't read them, + // then return that error rather than trying to interpret + // as a charm. + if info, statErr := os.Stat(c.CharmOrBundle); statErr == nil { + if info.IsDir() { + if _, ok := pathErr.(*charmrepo.NotFoundError); !ok { + return nil, pathErr + } + } + } + + logger.Debugf("cannot interpret as local bundle: %v", err) + return nil, nil } - args.placement[i] = p - } - clientArgs := application.DeployArgs{ - CharmID: args.charmID, - ApplicationName: args.applicationName, - Series: args.series, - NumUnits: args.numUnits, - ConfigYAML: args.configYAML, - Cons: args.constraints, - Placement: args.placement, - Storage: args.storage, - EndpointBindings: args.spaceBindings, - Resources: args.resources, + bundleData = bundle.Data() + bundleFile = url.String() + if info, err := os.Stat(bundleFile); err == nil && info.IsDir() { + bundleFilePath = bundleFile + } + } else { + resolveRelativeBundleFilePath = true } - return serviceClient.Deploy(clientArgs) -} - -func (c *DeployCommand) Run(ctx *cmd.Context) error { - client, err := c.NewAPIClient() - if err != nil { - return errors.Trace(err) + if err := c.validateBundleFlags(); err != nil { + return nil, errors.Trace(err) } - defer client.Close() - api, err := c.NewAPIRoot() - if err != nil { - return errors.Trace(err) + return func(ctx *cmd.Context, apiRoot DeployAPI) error { + // For local bundles, we extract the local path of the bundle + // directory. + if resolveRelativeBundleFilePath { + bundleFilePath = filepath.Dir(ctx.AbsPath(bundleFile)) + } + + return errors.Trace(c.deployBundle( + ctx, + bundleFilePath, + bundleData, + c.Channel, + apiRoot, + c.BundleStorage, + )) + }, nil +} + +func (c *DeployCommand) maybeReadLocalCharm() (deployFn, error) { + // Charm may have been supplied via a path reference. + ch, curl, err := charmrepo.NewCharmAtPathForceSeries(c.CharmOrBundle, c.Series, c.Force) + // We check for several types of known error which indicate + // that the supplied reference was indeed a path but there was + // an issue reading the charm located there. + if charm.IsMissingSeriesError(err) { + return nil, err + } else if charm.IsUnsupportedSeriesError(err) { + return nil, errors.Errorf("%v. Use --force to deploy the charm anyway.", err) + } else if errors.Cause(err) == zip.ErrFormat { + return nil, errors.Errorf("invalid charm or bundle provided at %q", c.CharmOrBundle) + } else if _, ok := err.(*charmrepo.NotFoundError); ok { + return nil, errors.Wrap(err, errors.NotFoundf("charm or bundle at %q", c.CharmOrBundle)) + } else if err != nil && err != os.ErrNotExist { + // If we get a "not exists" error then we attempt to interpret + // the supplied charm reference as a URL elsewhere, otherwise + // we return the error. + return nil, errors.Trace(err) + } else if err != nil { + logger.Debugf("cannot interpret as local charm: %v", err) + return nil, nil } - modelConfigClient := modelconfig.NewClient(api) - defer modelConfigClient.Close() - err = c.deployCharmOrBundle(ctx, client, modelConfigClient) - return block.ProcessBlockedError(err, block.BlockChange) -} + return func(ctx *cmd.Context, apiRoot DeployAPI) error { + if curl, err = apiRoot.AddLocalCharm(curl, ch); err != nil { + return errors.Trace(err) + } -type metricCredentialsAPI interface { - SetMetricCredentials(string, []byte) error - Close() error -} + id := charmstore.CharmID{ + URL: curl, + // Local charms don't need a channel. + } + + ctx.Infof("Deploying charm %q.", curl.String()) + return errors.Trace(c.deployCharm( + id, + (*macaroon.Macaroon)(nil), // local charms don't need one. + curl.Series, + ctx, + apiRoot, + )) + }, nil +} + +func (c *DeployCommand) maybeReadCharmstoreBundleFn(apiRoot DeployAPI) func() (deployFn, error) { + return func() (deployFn, error) { + userRequestedURL, err := charm.ParseURL(c.CharmOrBundle) + if err != nil { + return nil, errors.Trace(err) + } -type metricsCredentialsAPIImpl struct { - api *application.Client - state api.Connection -} + modelCfg, err := getModelConfig(apiRoot) + if err != nil { + return nil, errors.Trace(err) + } + + // Charm or bundle has been supplied as a URL so we resolve and + // deploy using the store. + storeCharmOrBundleURL, channel, _, err := apiRoot.Resolve(modelCfg, userRequestedURL) + if charm.IsUnsupportedSeriesError(err) { + return nil, errors.Errorf("%v. Use --force to deploy the charm anyway.", err) + } else if err != nil { + return nil, errors.Trace(err) + } else if storeCharmOrBundleURL.Series != "bundle" { + logger.Debugf( + `cannot interpret as charmstore bundle: %v (series) != "bundle"`, + storeCharmOrBundleURL.Series, + ) + return nil, nil + } + + if err := c.validateBundleFlags(); err != nil { + return nil, errors.Trace(err) + } + + return func(ctx *cmd.Context, apiRoot DeployAPI) error { + bundle, err := apiRoot.GetBundle(storeCharmOrBundleURL) + if err != nil { + return errors.Trace(err) + } + ctx.Infof("Located bundle %q", storeCharmOrBundleURL) + data := bundle.Data() -// SetMetricCredentials sets the credentials on the application. -func (s *metricsCredentialsAPIImpl) SetMetricCredentials(serviceName string, data []byte) error { - return s.api.SetMetricCredentials(serviceName, data) + return errors.Trace(c.deployBundle( + ctx, + "", // filepath + data, + channel, + apiRoot, + c.BundleStorage, + )) + }, nil + } } -// Close closes the api connection -func (s *metricsCredentialsAPIImpl) Close() error { - err := s.state.Close() +func (c *DeployCommand) charmStoreCharm() (deployFn, error) { + userRequestedURL, err := charm.ParseURL(c.CharmOrBundle) if err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } - return nil -} -var getMetricCredentialsAPI = func(state api.Connection) (metricCredentialsAPI, error) { - return &metricsCredentialsAPIImpl{api: application.NewClient(state), state: state}, nil + return func(ctx *cmd.Context, apiRoot DeployAPI) error { + // resolver.resolve potentially updates the series of anything + // passed in. Store this for use in seriesSelector. + userRequestedSeries := userRequestedURL.Series + + modelCfg, err := getModelConfig(apiRoot) + if err != nil { + return errors.Trace(err) + } + + // Charm or bundle has been supplied as a URL so we resolve and deploy using the store. + storeCharmOrBundleURL, channel, supportedSeries, err := apiRoot.Resolve(modelCfg, userRequestedURL) + if charm.IsUnsupportedSeriesError(err) { + return errors.Errorf("%v. Use --force to deploy the charm anyway.", err) + } else if err != nil { + return errors.Trace(err) + } + + if err := c.validateCharmFlags(); err != nil { + return errors.Trace(err) + } + + selector := seriesSelector{ + charmURLSeries: userRequestedSeries, + seriesFlag: c.Series, + supportedSeries: supportedSeries, + force: c.Force, + conf: modelCfg, + fromBundle: false, + } + + // Get the series to use. + series, err := selector.charmSeries() + if charm.IsUnsupportedSeriesError(err) { + return errors.Errorf("%v. Use --force to deploy the charm anyway.", err) + } + + // Store the charm in the controller + curl, csMac, err := addCharmFromURL(apiRoot, storeCharmOrBundleURL, channel) + if err != nil { + if err1, ok := errors.Cause(err).(*termsRequiredError); ok { + terms := strings.Join(err1.Terms, " ") + return errors.Errorf(`Declined: please agree to the following terms %s. Try: "juju agree %s"`, terms, terms) + } + return errors.Annotatef(err, "storing charm for URL %q", storeCharmOrBundleURL) + } + + formattedCharmURL := curl.String() + ctx.Infof("Located charm %q.", formattedCharmURL) + ctx.Infof("Deploying charm %q.", formattedCharmURL) + id := charmstore.CharmID{ + URL: curl, + Channel: channel, + } + return errors.Trace(c.deployCharm( + id, + csMac, + series, + ctx, + apiRoot, + )) + }, nil } // getFlags returns the flags with the given names. Only flags that are set and diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/deploy_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/deploy_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/deploy_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/deploy_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,6 @@ package application import ( - "encoding/json" "fmt" "io/ioutil" "net/http" @@ -13,10 +12,14 @@ "os" "path" "path/filepath" + "reflect" "sort" "strings" + "sync" "github.com/juju/errors" + "github.com/juju/gnuflag" + "github.com/juju/loggo" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" @@ -32,36 +35,40 @@ "gopkg.in/macaroon-bakery.v1/bakery/checkers" "gopkg.in/macaroon-bakery.v1/bakerytest" "gopkg.in/macaroon-bakery.v1/httpbakery" - "launchpad.net/gnuflag" + "gopkg.in/macaroon.v1" "github.com/juju/juju/api" + "github.com/juju/juju/api/application" + "github.com/juju/juju/api/charms" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/juju/common" + jjcharmstore "github.com/juju/juju/charmstore" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/constraints" + "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/testcharms" coretesting "github.com/juju/juju/testing" + jtesting "github.com/juju/juju/testing" ) type DeploySuite struct { testing.RepoSuite - common.CmdBlockHelper + coretesting.CmdBlockHelper } var _ = gc.Suite(&DeploySuite{}) func (s *DeploySuite) SetUpTest(c *gc.C) { s.RepoSuite.SetUpTest(c) - s.CmdBlockHelper = common.NewCmdBlockHelper(s.APIState) + s.CmdBlockHelper = coretesting.NewCmdBlockHelper(s.APIState) c.Assert(s.CmdBlockHelper, gc.NotNil) s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) } func runDeploy(c *gc.C, args ...string) error { - _, err := coretesting.RunCommand(c, NewDeployCommand(), args...) + _, err := coretesting.RunCommand(c, NewDefaultDeployCommand(), args...) return err } @@ -85,9 +92,6 @@ args: []string{"craziness", "burble1", "--to", "#:foo"}, err: `invalid --to parameter "#:foo"`, }, { - args: []string{"craziness", "burble1", "--constraints", "gibber=plop"}, - err: `invalid value "gibber=plop" for flag --constraints: unknown constraint "gibber"`, - }, { args: []string{"charm", "application", "--force"}, err: `--force is only used with --series`, }, @@ -96,14 +100,15 @@ func (s *DeploySuite) TestInitErrors(c *gc.C) { for i, t := range initErrorTests { c.Logf("test %d", i) - err := coretesting.InitCommand(NewDeployCommand(), t.args) + err := coretesting.InitCommand(NewDefaultDeployCommand(), t.args) c.Assert(err, gc.ErrorMatches, t.err) } } func (s *DeploySuite) TestNoCharmOrBundle(c *gc.C) { err := runDeploy(c, c.MkDir()) - c.Assert(err, gc.ErrorMatches, `no charm or bundle found at .*`) + c.Check(err, jc.Satisfies, errors.IsNotFound) + c.Assert(err, gc.ErrorMatches, `charm or bundle at .*`) } func (s *DeploySuite) TestBlockDeploy(c *gc.C) { @@ -129,12 +134,8 @@ func (s *DeploySuite) TestPathWithNoCharmOrBundle(c *gc.C) { err := runDeploy(c, c.MkDir()) - c.Assert(err, gc.ErrorMatches, `no charm or bundle found at .*`) -} - -func (s *DeploySuite) TestInvalidURL(c *gc.C) { - err := runDeploy(c, "cs:craz~ness") - c.Assert(err, gc.ErrorMatches, `URL has invalid charm or bundle name: "cs:craz~ness"`) + c.Check(err, jc.Satisfies, errors.IsNotFound) + c.Assert(err, gc.ErrorMatches, `charm or bundle at .*`) } func (s *DeploySuite) TestCharmDir(c *gc.C) { @@ -153,7 +154,10 @@ err = os.Chdir(s.CharmsPath) c.Assert(err, jc.ErrorIsNil) err = runDeploy(c, "multi-series") - c.Assert(err, gc.ErrorMatches, `.*path "multi-series" can not be a relative path`) + c.Assert(err, gc.ErrorMatches, ""+ + "The charm or bundle \"multi-series\" is ambiguous.\n"+ + "To deploy a local charm or bundle, run `juju deploy ./multi-series`.\n"+ + "To deploy a charm or bundle from the store, run `juju deploy cs:multi-series`.") } func (s *DeploySuite) TestDeployFromPathOldCharm(c *gc.C) { @@ -267,13 +271,13 @@ func (s *DeploySuite) TestConstraints(c *gc.C) { ch := testcharms.Repo.CharmArchivePath(s.CharmsPath, "dummy") - err := runDeploy(c, ch, "--constraints", "mem=2G cpu-cores=2", "--series", "trusty") + err := runDeploy(c, ch, "--constraints", "mem=2G cores=2", "--series", "trusty") c.Assert(err, jc.ErrorIsNil) curl := charm.MustParseURL("local:trusty/dummy-1") application, _ := s.AssertService(c, "dummy", curl, 1, 0) cons, err := application.Constraints() c.Assert(err, jc.ErrorIsNil) - c.Assert(cons, jc.DeepEquals, constraints.MustParse("mem=2G cpu-cores=2")) + c.Assert(cons, jc.DeepEquals, constraints.MustParse("mem=2G cores=2")) } func (s *DeploySuite) TestResources(c *gc.C) { @@ -463,6 +467,35 @@ c.Assert(err, gc.Not(gc.ErrorMatches), "machine 0 is the controller for a local model and cannot host units") } +func (s *DeploySuite) TestDeployLocalWithTerms(c *gc.C) { + ch := testcharms.Repo.ClonedDirPath(s.CharmsPath, "terms1") + output, err := runDeployCommand(c, ch, "--series", "trusty") + c.Assert(err, jc.ErrorIsNil) + c.Check(output, gc.Equals, `Deploying charm "local:trusty/terms1-1".`) + + curl := charm.MustParseURL("local:trusty/terms1-1") + s.AssertService(c, "terms1", curl, 1, 0) +} + +func (s *DeploySuite) TestDeployFlags(c *gc.C) { + command := DeployCommand{} + flagSet := gnuflag.NewFlagSet(command.Info().Name, gnuflag.ContinueOnError) + command.SetFlags(flagSet) + c.Assert(command.flagSet, jc.DeepEquals, flagSet) + // Add to the slice below if a new flag is introduced which is valid for + // both charms and bundles. + charmAndBundleFlags := []string{"channel", "storage"} + var allFlags []string + flagSet.VisitAll(func(flag *gnuflag.Flag) { + allFlags = append(allFlags, flag.Name) + }) + declaredFlags := append(charmAndBundleFlags, charmOnlyFlags...) + declaredFlags = append(declaredFlags, bundleOnlyFlags...) + declaredFlags = append(declaredFlags, modelCommandBaseFlags...) + sort.Strings(declaredFlags) + c.Assert(declaredFlags, jc.DeepEquals, allFlags) +} + type DeployLocalSuite struct { testing.RepoSuite } @@ -501,32 +534,20 @@ about: "public charm, success", uploadURL: "cs:~bob/trusty/wordpress1-10", deployURL: "cs:~bob/trusty/wordpress1", - expectOutput: ` -Added charm "cs:~bob/trusty/wordpress1-10" to the model. -Deploying charm "cs:~bob/trusty/wordpress1-10" with the user specified series "trusty".`, }, { about: "public charm, fully resolved, success", uploadURL: "cs:~bob/trusty/wordpress2-10", deployURL: "cs:~bob/trusty/wordpress2-10", - expectOutput: ` -Added charm "cs:~bob/trusty/wordpress2-10" to the model. -Deploying charm "cs:~bob/trusty/wordpress2-10" with the user specified series "trusty".`, }, { about: "non-public charm, success", uploadURL: "cs:~bob/trusty/wordpress3-10", deployURL: "cs:~bob/trusty/wordpress3", readPermUser: clientUserName, - expectOutput: ` -Added charm "cs:~bob/trusty/wordpress3-10" to the model. -Deploying charm "cs:~bob/trusty/wordpress3-10" with the user specified series "trusty".`, }, { about: "non-public charm, fully resolved, success", uploadURL: "cs:~bob/trusty/wordpress4-10", deployURL: "cs:~bob/trusty/wordpress4-10", readPermUser: clientUserName, - expectOutput: ` -Added charm "cs:~bob/trusty/wordpress4-10" to the model. -Deploying charm "cs:~bob/trusty/wordpress4-10" with the user specified series "trusty".`, }, { about: "non-public charm, access denied", uploadURL: "cs:~bob/trusty/wordpress5-10", @@ -543,29 +564,11 @@ about: "public bundle, success", uploadURL: "cs:~bob/bundle/wordpress-simple1-42", deployURL: "cs:~bob/bundle/wordpress-simple1", - expectOutput: ` -added charm cs:trusty/mysql-0 -application mysql deployed (charm cs:trusty/mysql-0 with the series "trusty" defined by the bundle) -added charm cs:trusty/wordpress-1 -application wordpress deployed (charm cs:trusty/wordpress-1 with the series "trusty" defined by the bundle) -related wordpress:db and mysql:server -added mysql/0 unit to new machine -added wordpress/0 unit to new machine -deployment of bundle "cs:~bob/bundle/wordpress-simple1-42" completed`, }, { about: "non-public bundle, success", uploadURL: "cs:~bob/bundle/wordpress-simple2-0", deployURL: "cs:~bob/bundle/wordpress-simple2-0", readPermUser: clientUserName, - expectOutput: ` -added charm cs:trusty/mysql-0 -reusing application mysql (charm: cs:trusty/mysql-0) -added charm cs:trusty/wordpress-1 -reusing application wordpress (charm: cs:trusty/wordpress-1) -wordpress:db and mysql:server are already related -avoid adding new units to application mysql: 1 unit already present -avoid adding new units to application wordpress: 1 unit already present -deployment of bundle "cs:~bob/bundle/wordpress-simple2-0" completed`, }, { about: "non-public bundle, access denied", uploadURL: "cs:~bob/bundle/wordpress-simple3-47", @@ -595,14 +598,12 @@ if test.readPermUser != "" { s.changeReadPerm(c, url, test.readPermUser) } - ctx, err := coretesting.RunCommand(c, NewDeployCommand(), test.deployURL, fmt.Sprintf("wordpress%d", i)) + _, err := coretesting.RunCommand(c, NewDefaultDeployCommand(), test.deployURL, fmt.Sprintf("wordpress%d", i)) if test.expectError != "" { c.Check(err, gc.ErrorMatches, test.expectError) continue } c.Assert(err, jc.ErrorIsNil) - output := strings.Trim(coretesting.Stderr(ctx), "\n") - c.Check(output, gc.Equals, strings.TrimSpace(test.expectOutput)) } } @@ -611,8 +612,8 @@ output, err := runDeployCommand(c, "trusty/terms1") c.Assert(err, jc.ErrorIsNil) expectedOutput := ` -Added charm "cs:trusty/terms1-1" to the model. -Deploying charm "cs:trusty/terms1-1" with the user specified series "trusty". +Located charm "cs:trusty/terms1-1". +Deploying charm "cs:trusty/terms1-1". Deployment under prior agreement to terms: term1/1 term3/1 ` c.Assert(output, gc.Equals, strings.TrimSpace(expectedOutput)) @@ -624,17 +625,6 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *DeploySuite) TestDeployLocalWithTerms(c *gc.C) { - ch := testcharms.Repo.ClonedDirPath(s.CharmsPath, "terms1") - output, err := runDeployCommand(c, ch, "--series", "trusty") - c.Assert(err, jc.ErrorIsNil) - // should not produce any output - c.Assert(output, gc.Equals, "") - - curl := charm.MustParseURL("local:trusty/terms1-1") - s.AssertService(c, "terms1", curl, 1, 0) -} - func (s *DeployCharmStoreSuite) TestDeployWithTermsNotSigned(c *gc.C) { s.termsDischargerError = &httpbakery.Error{ Message: "term agreement required: term/1 term/2", @@ -652,10 +642,10 @@ err := s.client.UploadCharmWithRevision(id, ch, -1) c.Assert(err, gc.IsNil) - err = s.client.Publish(id, []csclientparams.Channel{csclientparams.DevelopmentChannel}, nil) + err = s.client.Publish(id, []csclientparams.Channel{csclientparams.EdgeChannel}, nil) c.Assert(err, gc.IsNil) - _, err = runDeployCommand(c, "--channel", "development", "~client-username/wordpress") + _, err = runDeployCommand(c, "--channel", "edge", "~client-username/wordpress") c.Assert(err, gc.IsNil) s.assertCharmsUploaded(c, "cs:~client-username/precise/wordpress-0") s.assertApplicationsDeployed(c, map[string]serviceInfo{ @@ -886,39 +876,44 @@ } func (s *DeployCharmStoreSuite) TestAddMetricCredentials(c *gc.C) { - var called bool - setter := &testMetricCredentialsSetter{ - assert: func(serviceName string, data []byte) { - called = true - c.Assert(serviceName, gc.DeepEquals, "metered") - var b []byte - err := json.Unmarshal(data, &b) - c.Assert(err, gc.IsNil) - c.Assert(string(b), gc.Equals, "hello registration") - }, - } - - cleanup := jujutesting.PatchValue(&getMetricCredentialsAPI, func(_ api.Connection) (metricCredentialsAPI, error) { - return setter, nil - }) - defer cleanup() - stub := &jujutesting.Stub{} handler := &testMetricsRegistrationHandler{Stub: stub} server := httptest.NewServer(handler) defer server.Close() testcharms.UploadCharm(c, s.client, "cs:quantal/metered-1", "metered") - deploy := &DeployCommand{Steps: []DeployStep{&RegisterMeteredCharm{RegisterURL: server.URL, QueryURL: server.URL}}} - _, err := coretesting.RunCommand(c, modelcmd.Wrap(deploy), "cs:quantal/metered-1", "--plan", "someplan") - c.Assert(err, jc.ErrorIsNil) - curl := charm.MustParseURL("cs:quantal/metered-1") - svc, err := s.State.Application("metered") - c.Assert(err, jc.ErrorIsNil) - ch, _, err := svc.Charm() + charmDir := testcharms.Repo.CharmDir("metered") + + cfgAttrs := map[string]interface{}{ + "name": "name", + "uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "type": "foo", + } + meteredURL := charm.MustParseURL("cs:quantal/metered-1") + fakeAPI := vanillaFakeModelAPI(cfgAttrs) + withCharmDeployable(fakeAPI, meteredURL, "quantal", charmDir.Meta(), charmDir.Metrics(), true, 1) + + cfg, err := config.New(config.NoDefaults, cfgAttrs) + c.Assert(err, jc.ErrorIsNil) + withCharmRepoResolvable(fakeAPI, meteredURL, cfg) + + // `"hello registration"\n` (quotes and newline from json + // encoding) is returned by the fake http server. This is binary64 + // encoded before the call into SetMetricCredentials. + creds := append([]byte(`"aGVsbG8gcmVnaXN0cmF0aW9u"`), 0xA) + setMetricCredentialsCall := fakeAPI.Call("SetMetricCredentials", meteredURL.Name, creds).Returns(error(nil)) + + deploy := &DeployCommand{ + Steps: []DeployStep{&RegisterMeteredCharm{RegisterURL: server.URL, QueryURL: server.URL}}, + NewAPIRoot: func() (DeployAPI, error) { + return fakeAPI, nil + }, + } + _, err = coretesting.RunCommand(c, modelcmd.Wrap(deploy), "cs:quantal/metered-1", "--plan", "someplan") c.Assert(err, jc.ErrorIsNil) - c.Assert(ch.URL(), gc.DeepEquals, curl) - c.Assert(called, jc.IsTrue) + + c.Check(setMetricCredentialsCall(), gc.Equals, 1) + stub.CheckCalls(c, []jujutesting.StubCall{{ "Authorize", []interface{}{metricRegistrationPost{ ModelUUID: "deadbeef-0bad-400d-8000-4b1d0d06f00d", @@ -932,39 +927,40 @@ } func (s *DeployCharmStoreSuite) TestAddMetricCredentialsDefaultPlan(c *gc.C) { - var called bool - setter := &testMetricCredentialsSetter{ - assert: func(serviceName string, data []byte) { - called = true - c.Assert(serviceName, gc.DeepEquals, "metered") - var b []byte - err := json.Unmarshal(data, &b) - c.Assert(err, gc.IsNil) - c.Assert(string(b), gc.Equals, "hello registration") - }, - } - - cleanup := jujutesting.PatchValue(&getMetricCredentialsAPI, func(_ api.Connection) (metricCredentialsAPI, error) { - return setter, nil - }) - defer cleanup() - stub := &jujutesting.Stub{} handler := &testMetricsRegistrationHandler{Stub: stub} server := httptest.NewServer(handler) defer server.Close() testcharms.UploadCharm(c, s.client, "cs:quantal/metered-1", "metered") - deploy := &DeployCommand{Steps: []DeployStep{&RegisterMeteredCharm{RegisterURL: server.URL, QueryURL: server.URL}}} - _, err := coretesting.RunCommand(c, modelcmd.Wrap(deploy), "cs:quantal/metered-1") - c.Assert(err, jc.ErrorIsNil) - curl := charm.MustParseURL("cs:quantal/metered-1") - svc, err := s.State.Application("metered") + charmDir := testcharms.Repo.CharmDir("metered") + + cfgAttrs := map[string]interface{}{ + "name": "name", + "uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "type": "foo", + } + meteredURL := charm.MustParseURL("cs:quantal/metered-1") + fakeAPI := vanillaFakeModelAPI(cfgAttrs) + withCharmDeployable(fakeAPI, meteredURL, "quantal", charmDir.Meta(), charmDir.Metrics(), true, 1) + + cfg, err := config.New(config.NoDefaults, cfgAttrs) c.Assert(err, jc.ErrorIsNil) - ch, _, err := svc.Charm() + withCharmRepoResolvable(fakeAPI, meteredURL, cfg) + + creds := append([]byte(`"aGVsbG8gcmVnaXN0cmF0aW9u"`), 0xA) + setMetricCredentialsCall := fakeAPI.Call("SetMetricCredentials", meteredURL.Name, creds).Returns(error(nil)) + + deploy := &DeployCommand{ + Steps: []DeployStep{&RegisterMeteredCharm{RegisterURL: server.URL, QueryURL: server.URL}}, + NewAPIRoot: func() (DeployAPI, error) { + return fakeAPI, nil + }, + } + _, err = coretesting.RunCommand(c, modelcmd.Wrap(deploy), "cs:quantal/metered-1") c.Assert(err, jc.ErrorIsNil) - c.Assert(ch.URL(), gc.DeepEquals, curl) - c.Assert(called, jc.IsTrue) + + c.Check(setMetricCredentialsCall(), gc.Equals, 1) stub.CheckCalls(c, []jujutesting.StubCall{{ "DefaultPlan", []interface{}{"cs:quantal/metered-1"}, }, { @@ -979,47 +975,135 @@ }}) } -func (s *DeploySuite) TestAddMetricCredentialsDefaultForUnmeteredCharm(c *gc.C) { - var called bool - setter := &testMetricCredentialsSetter{ - assert: func(serviceName string, data []byte) { - called = true - c.Assert(serviceName, gc.DeepEquals, "dummy") - c.Assert(data, gc.DeepEquals, []byte{}) +func (s *DeployCharmStoreSuite) TestSetMetricCredentialsNotCalledForUnmeteredCharm(c *gc.C) { + charmDir := testcharms.Repo.CharmDir("dummy") + testcharms.UploadCharm(c, s.client, "cs:quantal/dummy-1", "dummy") + + cfgAttrs := map[string]interface{}{ + "name": "name", + "uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "type": "foo", + } + fakeAPI := vanillaFakeModelAPI(cfgAttrs) + + charmURL := charm.MustParseURL("cs:quantal/dummy-1") + cfg, err := config.New(config.NoDefaults, cfgAttrs) + c.Assert(err, jc.ErrorIsNil) + withCharmRepoResolvable(fakeAPI, charmURL, cfg) + withCharmDeployable(fakeAPI, charmURL, "quantal", charmDir.Meta(), charmDir.Metrics(), false, 1) + + deploy := &DeployCommand{ + Steps: []DeployStep{&RegisterMeteredCharm{}}, + NewAPIRoot: func() (DeployAPI, error) { + return fakeAPI, nil }, } - cleanup := jujutesting.PatchValue(&getMetricCredentialsAPI, func(_ api.Connection) (metricCredentialsAPI, error) { - return setter, nil - }) - defer cleanup() + _, err = coretesting.RunCommand(c, modelcmd.Wrap(deploy), "cs:quantal/dummy-1") + c.Assert(err, jc.ErrorIsNil) - ch := testcharms.Repo.ClonedDirPath(s.CharmsPath, "dummy") + for _, call := range fakeAPI.Calls() { + if call.FuncName == "SetMetricCredentials" { + c.Fatal("call to SetMetricCredentials was not supposed to happen") + } + } +} + +func (s *DeployCharmStoreSuite) TestAddMetricCredentialsNotNeededForOptionalPlan(c *gc.C) { + metricsYAML := ` +plan: + required: false +metrics: + pings: + type: gauge + description: ping pongs +` + meteredMetaYAML := ` +name: metered +description: metered charm +summary: summary +` + url, ch := testcharms.UploadCharmWithMeta(c, s.client, "cs:~user/quantal/metered", meteredMetaYAML, metricsYAML, 1) + + cfgAttrs := map[string]interface{}{ + "name": "name", + "uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "type": "foo", + } + fakeAPI := vanillaFakeModelAPI(cfgAttrs) - deploy := &DeployCommand{Steps: []DeployStep{&RegisterMeteredCharm{}}} - _, err := coretesting.RunCommand(c, modelcmd.Wrap(deploy), ch, "--series", "trusty") + cfg, err := config.New(config.NoDefaults, cfgAttrs) c.Assert(err, jc.ErrorIsNil) - curl := charm.MustParseURL("local:trusty/dummy-1") - s.AssertService(c, "dummy", curl, 1, 0) - c.Assert(called, jc.IsFalse) + withCharmRepoResolvable(fakeAPI, url, cfg) + withCharmDeployable(fakeAPI, url, "quantal", ch.Meta(), ch.Metrics(), true, 1) + + stub := &jujutesting.Stub{} + handler := &testMetricsRegistrationHandler{Stub: stub} + server := httptest.NewServer(handler) + defer server.Close() + deploy := &DeployCommand{ + Steps: []DeployStep{&RegisterMeteredCharm{RegisterURL: server.URL, QueryURL: server.URL}}, + NewAPIRoot: func() (DeployAPI, error) { + return fakeAPI, nil + }, + } + + _, err = coretesting.RunCommand(c, modelcmd.Wrap(deploy), url.String()) + c.Assert(err, jc.ErrorIsNil) + stub.CheckNoCalls(c) } -func (s *DeploySuite) TestDeployFlags(c *gc.C) { - command := DeployCommand{} - flagSet := gnuflag.NewFlagSet(command.Info().Name, gnuflag.ContinueOnError) - command.SetFlags(flagSet) - c.Assert(command.flagSet, jc.DeepEquals, flagSet) - // Add to the slice below if a new flag is introduced which is valid for - // both charms and bundles. - charmAndBundleFlags := []string{"channel", "storage"} - var allFlags []string - flagSet.VisitAll(func(flag *gnuflag.Flag) { - allFlags = append(allFlags, flag.Name) - }) - declaredFlags := append(charmAndBundleFlags, charmOnlyFlags...) - declaredFlags = append(declaredFlags, bundleOnlyFlags...) - sort.Strings(declaredFlags) - c.Assert(declaredFlags, jc.DeepEquals, allFlags) +func (s *DeployCharmStoreSuite) TestSetMetricCredentialsCalledWhenPlanSpecifiedWhenOptional(c *gc.C) { + metricsYAML := ` +plan: + required: false +metrics: + pings: + type: gauge + description: ping pongs +` + meteredMetaYAML := ` +name: metered +description: metered charm +summary: summary +` + url, ch := testcharms.UploadCharmWithMeta(c, s.client, "cs:~user/quantal/metered", meteredMetaYAML, metricsYAML, 1) + + cfgAttrs := map[string]interface{}{ + "name": "name", + "uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "type": "foo", + } + fakeAPI := vanillaFakeModelAPI(cfgAttrs) + + cfg, err := config.New(config.NoDefaults, cfgAttrs) + c.Assert(err, jc.ErrorIsNil) + withCharmRepoResolvable(fakeAPI, url, cfg) + withCharmDeployable(fakeAPI, url, "quantal", ch.Meta(), ch.Metrics(), true, 1) + + stub := &jujutesting.Stub{} + handler := &testMetricsRegistrationHandler{Stub: stub} + server := httptest.NewServer(handler) + defer server.Close() + deploy := &DeployCommand{ + Steps: []DeployStep{&RegisterMeteredCharm{RegisterURL: server.URL, QueryURL: server.URL}}, + NewAPIRoot: func() (DeployAPI, error) { + return fakeAPI, nil + }, + } + + _, err = coretesting.RunCommand(c, modelcmd.Wrap(deploy), url.String(), "--plan", "someplan") + c.Assert(err, jc.ErrorIsNil) + stub.CheckCalls(c, []jujutesting.StubCall{{ + "Authorize", []interface{}{metricRegistrationPost{ + ModelUUID: "deadbeef-0bad-400d-8000-4b1d0d06f00d", + CharmURL: "cs:~user/quantal/metered-0", + ApplicationName: "metered", + PlanURL: "someplan", + Budget: "personal", + Limit: "0", + }}, + }}) } func (s *DeployCharmStoreSuite) TestDeployCharmWithSomeEndpointBindingsSpecifiedSuccess(c *gc.C) { @@ -1052,28 +1136,42 @@ } func (s *DeployCharmStoreSuite) TestDeployCharmsEndpointNotImplemented(c *gc.C) { - setter := &testMetricCredentialsSetter{ - assert: func(serviceName string, data []byte) {}, - err: ¶ms.Error{ - Message: "IsMetered", - Code: params.CodeNotImplemented, - }, - } - cleanup := jujutesting.PatchValue(&getMetricCredentialsAPI, func(_ api.Connection) (metricCredentialsAPI, error) { - return setter, nil - }) - defer cleanup() - stub := &jujutesting.Stub{} handler := &testMetricsRegistrationHandler{Stub: stub} server := httptest.NewServer(handler) defer server.Close() - testcharms.UploadCharm(c, s.client, "cs:quantal/metered-1", "metered") - deploy := &DeployCommand{Steps: []DeployStep{&RegisterMeteredCharm{RegisterURL: server.URL, QueryURL: server.URL}}} - _, err := coretesting.RunCommand(c, modelcmd.Wrap(deploy), "cs:quantal/metered-1", "--plan", "someplan") + meteredCharmURL := charm.MustParseURL("cs:quantal/metered-1") + testcharms.UploadCharm(c, s.client, meteredCharmURL.String(), "metered") + charmDir := testcharms.Repo.CharmDir("metered") + + cfgAttrs := map[string]interface{}{ + "name": "name", + "uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "type": "foo", + } + fakeAPI := vanillaFakeModelAPI(cfgAttrs) + + cfg, err := config.New(config.NoDefaults, cfgAttrs) + c.Assert(err, jc.ErrorIsNil) + withCharmRepoResolvable(fakeAPI, meteredCharmURL, cfg) + withCharmDeployable(fakeAPI, meteredCharmURL, "quantal", charmDir.Meta(), charmDir.Metrics(), true, 1) - c.Assert(err, gc.ErrorMatches, "IsMetered") + // `"hello registration"\n` (quotes and newline from json + // encoding) is returned by the fake http server. This is binary64 + // encoded before the call into SetMetricCredentials. + creds := append([]byte(`"aGVsbG8gcmVnaXN0cmF0aW9u"`), 0xA) + fakeAPI.Call("SetMetricCredentials", meteredCharmURL.Name, creds).Returns(errors.New("IsMetered")) + + deploy := &DeployCommand{ + Steps: []DeployStep{&RegisterMeteredCharm{RegisterURL: server.URL, QueryURL: server.URL}}, + NewAPIRoot: func() (DeployAPI, error) { + return fakeAPI, nil + }, + } + _, err = coretesting.RunCommand(c, modelcmd.Wrap(deploy), "cs:quantal/metered-1", "--plan", "someplan") + + c.Check(err, gc.ErrorMatches, "IsMetered") } type ParseBindSuite struct { @@ -1135,3 +1233,500 @@ c.Check(err.Error(), gc.Equals, parseBindErrorPrefix+expectedErrorSuffix) c.Check(parsedBindings, gc.IsNil) } + +type DeployUnitTestSuite struct { + jujutesting.IsolationSuite + jujutesting.FakeHomeSuite + DeployAPI +} + +var _ = gc.Suite(&DeployUnitTestSuite{}) + +func (s *DeployUnitTestSuite) SetUpSuite(c *gc.C) { + s.IsolationSuite.SetUpSuite(c) + s.FakeHomeSuite.SetUpSuite(c) +} + +func (s *DeployUnitTestSuite) TearDownSuite(c *gc.C) { + s.FakeHomeSuite.TearDownSuite(c) + s.IsolationSuite.TearDownSuite(c) +} + +func (s *DeployUnitTestSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.FakeHomeSuite.SetUpTest(c) +} + +func (s *DeployUnitTestSuite) TearDownTest(c *gc.C) { + s.FakeHomeSuite.TearDownTest(c) + s.IsolationSuite.TearDownTest(c) +} + +func (s *DeployUnitTestSuite) TestDeployLocalCharm_GivesCorrectUserMessage(c *gc.C) { + // Copy dummy charm to path where we can deploy it from + charmsPath := c.MkDir() + charmDir := testcharms.Repo.ClonedDir(charmsPath, "dummy") + + cfgAttrs := map[string]interface{}{ + "name": "name", + "uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "type": "foo", + } + fakeAPI := vanillaFakeModelAPI(cfgAttrs) + + dummyURL := charm.MustParseURL("local:trusty/dummy-1") + withLocalCharmDeployable(fakeAPI, dummyURL, charmDir) + withCharmDeployable(fakeAPI, dummyURL, "trusty", charmDir.Meta(), charmDir.Metrics(), false, 1) + + cmd := NewDeployCommand(func() (DeployAPI, error) { + return fakeAPI, nil + }, nil) + context, err := jtesting.RunCommand(c, cmd, charmDir.Path, "--series", "trusty") + c.Assert(err, jc.ErrorIsNil) + + c.Check(jtesting.Stderr(context), gc.Equals, `Deploying charm "local:trusty/dummy-1".`+"\n") +} + +func (s *DeployUnitTestSuite) TestAddMetricCredentialsDefaultForUnmeteredCharm(c *gc.C) { + charmsPath := c.MkDir() + charmDir := testcharms.Repo.ClonedDir(charmsPath, "dummy") + + cfgAttrs := map[string]interface{}{ + "name": "name", + "uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "type": "foo", + } + dummyURL := charm.MustParseURL("local:trusty/dummy-1") + fakeAPI := vanillaFakeModelAPI(cfgAttrs) + withLocalCharmDeployable(fakeAPI, dummyURL, charmDir) + withCharmDeployable(fakeAPI, dummyURL, "trusty", charmDir.Meta(), charmDir.Metrics(), true, 1) + + deployCmd := NewDeployCommand(func() (DeployAPI, error) { + return fakeAPI, nil + }, nil) + _, err := coretesting.RunCommand(c, deployCmd, charmDir.Path, "--series", "trusty") + c.Assert(err, jc.ErrorIsNil) + + // We never attempt to set metric credentials + for _, call := range fakeAPI.Calls() { + if call.FuncName == "FacadeCall" { + c.Assert(call.Args[0], gc.Not(gc.Matches), "SetMetricCredentials") + } + } +} + +func (s *DeployUnitTestSuite) TestRedeployLocalCharm_SucceedsWhenDeployed(c *gc.C) { + charmsPath := c.MkDir() + charmDir := testcharms.Repo.ClonedDir(charmsPath, "dummy") + + fakeAPI := vanillaFakeModelAPI(map[string]interface{}{ + "name": "name", + "uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "type": "foo", + }) + dummyURL := charm.MustParseURL("local:trusty/dummy-0") + withLocalCharmDeployable(fakeAPI, dummyURL, charmDir) + withCharmDeployable(fakeAPI, dummyURL, "trusty", charmDir.Meta(), charmDir.Metrics(), false, 1) + + deployCmd := NewDeployCommand(func() (DeployAPI, error) { + return fakeAPI, nil + }, nil) + context, err := jtesting.RunCommand(c, deployCmd, dummyURL.String()) + c.Assert(err, jc.ErrorIsNil) + + c.Check(jtesting.Stderr(context), gc.Equals, ""+ + `Located charm "local:trusty/dummy-0".`+"\n"+ + `Deploying charm "local:trusty/dummy-0".`+"\n", + ) +} + +func (s *DeployUnitTestSuite) TestDeployBundle_OutputsCorrectMessage(c *gc.C) { + bundleDir := testcharms.Repo.BundleArchive(c.MkDir(), "wordpress-simple") + + cfgAttrs := map[string]interface{}{ + "name": "name", + "uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "type": "foo", + } + fakeAPI := vanillaFakeModelAPI(cfgAttrs) + withAllWatcher(fakeAPI) + + fakeBundleURL := charm.MustParseURL("cs:bundle/wordpress-simple") + cfg, err := config.New(config.NoDefaults, cfgAttrs) + c.Assert(err, jc.ErrorIsNil) + withCharmRepoResolvable(fakeAPI, fakeBundleURL, cfg) + fakeAPI.Call("GetBundle", fakeBundleURL).Returns(bundleDir, error(nil)) + + mysqlURL := charm.MustParseURL("cs:mysql") + withCharmRepoResolvable(fakeAPI, mysqlURL, cfg) + withCharmDeployable( + fakeAPI, + mysqlURL, + "quantal", + &charm.Meta{Series: []string{"quantal"}}, + &charm.Metrics{}, + false, + 0, + ) + fakeAPI.Call("AddUnits", "mysql", 1, []*instance.Placement(nil)).Returns([]string{"mysql/0"}, error(nil)) + + wordpressURL := charm.MustParseURL("cs:wordpress") + withCharmRepoResolvable(fakeAPI, wordpressURL, cfg) + withCharmDeployable( + fakeAPI, + wordpressURL, + "quantal", + &charm.Meta{Series: []string{"quantal"}}, + &charm.Metrics{}, + false, + 0, + ) + fakeAPI.Call("AddUnits", "wordpress", 1, []*instance.Placement(nil)).Returns([]string{"wordpress/0"}, error(nil)) + + fakeAPI.Call("AddRelation", "wordpress:db", "mysql:server").Returns( + ¶ms.AddRelationResults{}, + error(nil), + ) + + deployCmd := NewDeployCommand(func() (DeployAPI, error) { + return fakeAPI, nil + }, nil) + context, err := jtesting.RunCommand(c, deployCmd, "cs:bundle/wordpress-simple") + c.Assert(err, jc.ErrorIsNil) + + c.Check(jtesting.Stderr(context), gc.Equals, ""+ + `Located bundle "cs:bundle/wordpress-simple"`+"\n"+ + `Deploying charm "cs:mysql"`+"\n"+ + `Deploying charm "cs:wordpress"`+"\n"+ + `Related "wordpress:db" and "mysql:server"`+"\n"+ + `Deploy of bundle completed.`+ + "\n", + ) +} + +// fakeDeployAPI is a mock of the API used by the deploy command. It's +// a little muddled at the moment, but as the DeployAPI interface is +// sharpened, this will become so as well. +type fakeDeployAPI struct { + DeployAPI + *callMocker +} + +func (f *fakeDeployAPI) IsMetered(charmURL string) (bool, error) { + results := f.MethodCall(f, "IsMetered", charmURL) + return results[0].(bool), typeAssertError(results[1]) +} + +func (f *fakeDeployAPI) SetMetricCredentials(service string, credentials []byte) error { + results := f.MethodCall(f, "SetMetricCredentials", service, credentials) + return typeAssertError(results[0]) +} + +func (f *fakeDeployAPI) Close() error { + results := f.MethodCall(f, "Close") + return typeAssertError(results[0]) +} + +func (f *fakeDeployAPI) ModelGet() (map[string]interface{}, error) { + results := f.MethodCall(f, "ModelGet") + return results[0].(map[string]interface{}), typeAssertError(results[1]) +} + +func (f *fakeDeployAPI) Resolve(cfg *config.Config, url *charm.URL) ( + *charm.URL, + csclientparams.Channel, + []string, + error, +) { + results := f.MethodCall(f, "Resolve", cfg, url) + + return results[0].(*charm.URL), + results[1].(csclientparams.Channel), + results[2].([]string), + typeAssertError(results[3]) +} + +func (f *fakeDeployAPI) BestFacadeVersion(facade string) int { + results := f.MethodCall(f, "BestFacadeVersion", facade) + return results[0].(int) +} + +func (f *fakeDeployAPI) APICall(objType string, version int, id, request string, params, response interface{}) error { + results := f.MethodCall(f, "APICall", objType, version, id, request, params, response) + return typeAssertError(results[0]) +} + +func (f *fakeDeployAPI) Client() *api.Client { + results := f.MethodCall(f, "Client") + return results[0].(*api.Client) +} + +func (f *fakeDeployAPI) ModelUUID() (string, bool) { + results := f.MethodCall(f, "ModelUUID") + return results[0].(string), results[1].(bool) +} + +func (f *fakeDeployAPI) AddLocalCharm(url *charm.URL, ch charm.Charm) (*charm.URL, error) { + results := f.MethodCall(f, "AddLocalCharm", url, ch) + return results[0].(*charm.URL), typeAssertError(results[1]) +} + +func (f *fakeDeployAPI) AddCharm(url *charm.URL, channel csclientparams.Channel) error { + results := f.MethodCall(f, "AddCharm", url, channel) + return typeAssertError(results[0]) +} + +func (f *fakeDeployAPI) AddCharmWithAuthorization( + url *charm.URL, + channel csclientparams.Channel, + macaroon *macaroon.Macaroon, +) error { + results := f.MethodCall(f, "AddCharmWithAuthorization", url, channel, macaroon) + return typeAssertError(results[0]) +} + +func (f *fakeDeployAPI) CharmInfo(url string) (*charms.CharmInfo, error) { + results := f.MethodCall(f, "CharmInfo", url) + return results[0].(*charms.CharmInfo), typeAssertError(results[1]) +} + +func (f *fakeDeployAPI) Deploy(args application.DeployArgs) error { + results := f.MethodCall(f, "Deploy", args) + return typeAssertError(results[0]) +} + +func (f *fakeDeployAPI) GetBundle(url *charm.URL) (charm.Bundle, error) { + results := f.MethodCall(f, "GetBundle", url) + return results[0].(charm.Bundle), typeAssertError(results[1]) +} + +func (f *fakeDeployAPI) Status(patterns []string) (*params.FullStatus, error) { + results := f.MethodCall(f, "Status", patterns) + return results[0].(*params.FullStatus), typeAssertError(results[1]) +} + +func (f *fakeDeployAPI) WatchAll() (*api.AllWatcher, error) { + results := f.MethodCall(f, "WatchAll") + return results[0].(*api.AllWatcher), typeAssertError(results[1]) +} + +func (f *fakeDeployAPI) AddRelation(endpoints ...string) (*params.AddRelationResults, error) { + results := f.MethodCall(f, "AddRelation", variadicStringToInterface(endpoints...)...) + return results[0].(*params.AddRelationResults), typeAssertError(results[1]) +} + +func (f *fakeDeployAPI) AddUnits(application string, numUnits int, placement []*instance.Placement) ([]string, error) { + results := f.MethodCall(f, "AddUnits", application, numUnits, placement) + return results[0].([]string), typeAssertError(results[1]) +} + +func (f *fakeDeployAPI) Expose(application string) error { + results := f.MethodCall(f, "Expose", application) + return typeAssertError(results[0]) +} + +func (f *fakeDeployAPI) SetAnnotation(annotations map[string]map[string]string) ([]params.ErrorResult, error) { + results := f.MethodCall(f, "SetAnnotation", annotations) + return results[0].([]params.ErrorResult), typeAssertError(results[1]) +} + +func (f *fakeDeployAPI) GetCharmURL(serviceName string) (*charm.URL, error) { + results := f.MethodCall(f, "GetCharmURL", serviceName) + return results[0].(*charm.URL), typeAssertError(results[1]) +} + +func (f *fakeDeployAPI) SetCharm(cfg application.SetCharmConfig) error { + results := f.MethodCall(f, "SetCharm", cfg) + return typeAssertError(results[0]) +} + +func (f *fakeDeployAPI) Update(args params.ApplicationUpdate) error { + results := f.MethodCall(f, "Update", args) + return typeAssertError(results[0]) +} + +func (f *fakeDeployAPI) SetConstraints(application string, constraints constraints.Value) error { + results := f.MethodCall(f, "SetConstraints", application, constraints) + return typeAssertError(results[0]) +} + +func (f *fakeDeployAPI) AddMachines(machineParams []params.AddMachineParams) ([]params.AddMachinesResult, error) { + results := f.MethodCall(f, "AddMachines", machineParams) + return results[0].([]params.AddMachinesResult), typeAssertError(results[0]) +} + +type fakeBundle struct { + charm.Bundle + *callMocker +} + +func (f *fakeBundle) Data() *charm.BundleData { + results := f.MethodCall(f, "Data") + return results[0].(*charm.BundleData) +} + +func NewCallMocker() *callMocker { + return &callMocker{ + logger: logger, + results: make(map[string][]*callMockReturner), + } +} + +type callMocker struct { + jujutesting.Stub + + logger loggo.Logger + results map[string][]*callMockReturner +} + +func (m *callMocker) MethodCall(receiver interface{}, fnName string, args ...interface{}) []interface{} { + m.Stub.MethodCall(receiver, fnName, args...) + m.logger.Debugf("Call: %s(%v)", fnName, args) + results := m.Results(fnName, args...) + m.logger.Debugf("Results: %v", results) + return results +} + +func (m *callMocker) Results(fnName string, args ...interface{}) []interface{} { + for _, r := range m.results[fnName] { + if reflect.DeepEqual(r.args, args) == false { + continue + } + r.LogCall() + return r.retVals + } + return nil +} + +func (m *callMocker) Call(fnName string, args ...interface{}) *callMockReturner { + returner := &callMockReturner{args: args} + // Push on the front to hide old results. + m.results[fnName] = append([]*callMockReturner{returner}, m.results[fnName]...) + return returner +} + +type callMockReturner struct { + // args holds a reference to the arguments for which the retVals + // are valid. + args []interface{} + + // retVals holds a reference to the values that should be returned + // when the values held by args are seen. + retVals []interface{} + + // timesInvoked records the number of times this return has been + // reached. + timesInvoked struct { + sync.Mutex + + value int + } +} + +func (m *callMockReturner) Returns(retVals ...interface{}) func() int { + m.retVals = retVals + return m.numTimesInvoked +} + +func (m *callMockReturner) LogCall() { + m.timesInvoked.Lock() + defer m.timesInvoked.Unlock() + m.timesInvoked.value++ +} + +func (m *callMockReturner) numTimesInvoked() int { + m.timesInvoked.Lock() + defer m.timesInvoked.Unlock() + return m.timesInvoked.value +} + +func typeAssertError(err interface{}) error { + if err == nil { + return nil + } + return err.(error) +} + +func variadicStringToInterface(args ...string) []interface{} { + interfaceArgs := make([]interface{}, len(args)) + for i, a := range args { + interfaceArgs[i] = a + } + return interfaceArgs +} + +func vanillaFakeModelAPI(cfgAttrs map[string]interface{}) *fakeDeployAPI { + fakeAPI := &fakeDeployAPI{callMocker: NewCallMocker()} + + fakeAPI.Call("Close").Returns(error(nil)) + fakeAPI.Call("ModelGet").Returns(cfgAttrs, error(nil)) + fakeAPI.Call("ModelUUID").Returns("deadbeef-0bad-400d-8000-4b1d0d06f00d", true) + + return fakeAPI +} + +func withLocalCharmDeployable( + fakeAPI *fakeDeployAPI, + url *charm.URL, + c charm.Charm, +) { + fakeAPI.Call("AddLocalCharm", url, c).Returns(url, error(nil)) +} + +func withCharmDeployable( + fakeAPI *fakeDeployAPI, + url *charm.URL, + series string, + meta *charm.Meta, + metrics *charm.Metrics, + metered bool, + numUnits int, +) { + fakeAPI.Call("AddCharm", url, csclientparams.Channel("")).Returns(error(nil)) + fakeAPI.Call("CharmInfo", url.String()).Returns( + &charms.CharmInfo{ + URL: url.String(), + Meta: meta, + Metrics: metrics, + }, + error(nil), + ) + fakeAPI.Call("Deploy", application.DeployArgs{ + CharmID: jjcharmstore.CharmID{URL: url}, + ApplicationName: url.Name, + Series: series, + NumUnits: numUnits, + }).Returns(error(nil)) + fakeAPI.Call("IsMetered", url.String()).Returns(metered, error(nil)) + + // `"hello registration"\n` (quotes and newline from json + // encoding) is returned by the fake http server. This is binary64 + // encoded before the call into SetMetricCredentials. + creds := append([]byte(`"aGVsbG8gcmVnaXN0cmF0aW9u"`), 0xA) + fakeAPI.Call("SetMetricCredentials", url.Name, creds).Returns(error(nil)) +} + +func withCharmRepoResolvable( + fakeAPI *fakeDeployAPI, + url *charm.URL, + cfg *config.Config, +) { + fakeAPI.Call("Resolve", cfg, url).Returns( + url, + csclientparams.Channel(""), + []string{"quantal"}, // Supported series + error(nil), + ) +} + +func withAllWatcher(fakeAPI *fakeDeployAPI) { + id := "0" + fakeAPI.Call("WatchAll").Returns(api.NewAllWatcher(fakeAPI, &id), error(nil)) + + fakeAPI.Call("BestFacadeVersion", "Application").Returns(0) + fakeAPI.Call("BestFacadeVersion", "Annotations").Returns(0) + fakeAPI.Call("BestFacadeVersion", "AllWatcher").Returns(0) + fakeAPI.Call("BestFacadeVersion", "Charms").Returns(0) + fakeAPI.Call("APICall", "AllWatcher", 0, "0", "Stop", nil, nil).Returns(error(nil)) + fakeAPI.Call("Status", []string(nil)).Returns(¶ms.FullStatus{}, error(nil)) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,19 +8,40 @@ "gopkg.in/juju/charmrepo.v2-unstable/csclient" "gopkg.in/macaroon-bakery.v1/httpbakery" + "github.com/juju/juju/api" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/resource/resourceadapters" ) -// NewSetCommandForTest returns a SetCommand with the api provided as specified. -func NewSetCommandForTest(serviceAPI serviceAPI) cmd.Command { - return modelcmd.Wrap(&setCommand{ - serviceApi: serviceAPI, - }) +func NewUpgradeCharmCommandForTest( + store jujuclient.ClientStore, + apiOpener modelcmd.APIOpener, + deployResources resourceadapters.DeployResourcesFunc, + resolveCharm ResolveCharmFunc, + newCharmAdder NewCharmAdderFunc, + newCharmClient func(api.Connection) CharmClient, + newCharmUpgradeClient func(api.Connection) CharmUpgradeClient, + newModelConfigGetter func(api.Connection) ModelConfigGetter, + newResourceLister func(api.Connection) (ResourceLister, error), +) cmd.Command { + cmd := &upgradeCharmCommand{ + DeployResources: deployResources, + ResolveCharm: resolveCharm, + NewCharmAdder: newCharmAdder, + NewCharmClient: newCharmClient, + NewCharmUpgradeClient: newCharmUpgradeClient, + NewModelConfigGetter: newModelConfigGetter, + NewResourceLister: newResourceLister, + } + cmd.SetClientStore(store) + cmd.SetAPIOpener(apiOpener) + return modelcmd.Wrap(cmd) } -// NewGetCommand returns a GetCommand with the api provided as specified. -func NewGetCommandForTest(api getServiceAPI) cmd.Command { - return modelcmd.Wrap(&getCommand{ +// NewConfigCommandForTest returns a SetCommand with the api provided as specified. +func NewConfigCommandForTest(api configCommandAPI) cmd.Command { + return modelcmd.Wrap(&configCommand{ api: api, }) } @@ -32,6 +53,22 @@ }) } +// NewAddRelationCommandForTest returns an AddRelationCommand with the api provided as specified. +func NewAddRelationCommandForTest(api ApplicationAddRelationAPI) cmd.Command { + cmd := &addRelationCommand{newAPIFunc: func() (ApplicationAddRelationAPI, error) { + return api, nil + }} + return modelcmd.Wrap(cmd) +} + +// NewRemoveRelationCommandForTest returns an RemoveRelationCommand with the api provided as specified. +func NewRemoveRelationCommandForTest(api ApplicationDestroyRelationAPI) cmd.Command { + cmd := &removeRelationCommand{newAPIFunc: func() (ApplicationDestroyRelationAPI, error) { + return api, nil + }} + return modelcmd.Wrap(cmd) +} + type Patcher interface { PatchValue(dest, value interface{}) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/expose_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/expose_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/expose_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/expose_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,6 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" - "github.com/juju/juju/cmd/juju/common" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/rpc" "github.com/juju/juju/testcharms" @@ -18,12 +17,12 @@ type ExposeSuite struct { jujutesting.RepoSuite - common.CmdBlockHelper + testing.CmdBlockHelper } func (s *ExposeSuite) SetUpTest(c *gc.C) { s.RepoSuite.SetUpTest(c) - s.CmdBlockHelper = common.NewCmdBlockHelper(s.APIState) + s.CmdBlockHelper = testing.NewCmdBlockHelper(s.APIState) c.Assert(s.CmdBlockHelper, gc.NotNil) s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/fakeapplication_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/fakeapplication_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/fakeapplication_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/fakeapplication_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,20 +13,20 @@ // fakeServiceAPI is the fake application API for testing the application // update command. -type fakeServiceAPI struct { - serviceName string - charmName string - values map[string]interface{} - config string - err error +type fakeApplicationAPI struct { + name string + charmName string + values map[string]interface{} + config string + err error } -func (f *fakeServiceAPI) Update(args params.ApplicationUpdate) error { +func (f *fakeApplicationAPI) Update(args params.ApplicationUpdate) error { if f.err != nil { return f.err } - if args.ApplicationName != f.serviceName { + if args.ApplicationName != f.name { return errors.NotFoundf("application %q", args.ApplicationName) } @@ -34,12 +34,12 @@ return nil } -func (f *fakeServiceAPI) Close() error { +func (f *fakeApplicationAPI) Close() error { return nil } -func (f *fakeServiceAPI) Get(application string) (*params.ApplicationGetResults, error) { - if application != f.serviceName { +func (f *fakeApplicationAPI) Get(application string) (*params.ApplicationGetResults, error) { + if application != f.name { return nil, errors.NotFoundf("application %q", application) } @@ -53,18 +53,18 @@ } return ¶ms.ApplicationGetResults{ - Application: f.serviceName, + Application: f.name, Charm: f.charmName, Config: configInfo, }, nil } -func (f *fakeServiceAPI) Set(application string, options map[string]string) error { +func (f *fakeApplicationAPI) Set(application string, options map[string]string) error { if f.err != nil { return f.err } - if application != f.serviceName { + if application != f.name { return errors.NotFoundf("application %q", application) } @@ -78,19 +78,19 @@ return nil } -func (f *fakeServiceAPI) Unset(application string, options []string) error { +func (f *fakeApplicationAPI) Unset(application string, options []string) error { if f.err != nil { return f.err } - if application != f.serviceName { + if application != f.name { return errors.NotFoundf("application %q", application) } // Verify all options before unsetting any of them. for _, name := range options { if _, ok := f.values[name]; !ok { - return fmt.Errorf("unknown option %q", name) + return errors.Errorf("unknown option %q", name) } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/flags.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/flags.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/flags.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/flags.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,10 +21,17 @@ func (f storageFlag) Set(s string) error { fields := strings.SplitN(s, "=", 2) if len(fields) < 2 { - return errors.New("expected [:]=") + if f.bundleStores != nil { + return errors.New("expected [:]=") + } else { + return errors.New("expected =") + } } var serviceName, storageName string if colon := strings.IndexRune(fields[0], ':'); colon >= 0 { + if f.bundleStores == nil { + return errors.New("expected =") + } serviceName = fields[0][:colon] storageName = fields[0][colon+1:] } else { @@ -56,13 +63,15 @@ // String implements gnuflag.Value.String. func (f storageFlag) String() string { - strs := make([]string, 0, len(*f.stores)+len(*f.bundleStores)) + strs := make([]string, 0, len(*f.stores)) for store, cons := range *f.stores { strs = append(strs, fmt.Sprintf("%s=%v", store, cons)) } - for application, stores := range *f.bundleStores { - for store, cons := range stores { - strs = append(strs, fmt.Sprintf("%s:%s=%v", application, store, cons)) + if f.bundleStores != nil { + for application, stores := range *f.bundleStores { + for store, cons := range stores { + strs = append(strs, fmt.Sprintf("%s:%s=%v", application, store, cons)) + } } } return strings.Join(strs, " ") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/flags_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/flags_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/flags_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/flags_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,8 @@ "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + + "github.com/juju/juju/storage" ) var _ = gc.Suite(&FlagSuite{}) @@ -47,3 +49,47 @@ err = sm.Set("bar=someothervalue") c.Assert(err, gc.ErrorMatches, ".*duplicate.*bar.*") } + +func (FlagSuite) TestStorageFlag(c *gc.C) { + var stores map[string]storage.Constraints + flag := storageFlag{&stores, nil} + err := flag.Set("foo=bar") + c.Assert(err, jc.ErrorIsNil) + c.Assert(stores, jc.DeepEquals, map[string]storage.Constraints{ + "foo": {Pool: "bar", Count: 1}, + }) +} + +func (FlagSuite) TestStorageFlagErrors(c *gc.C) { + flag := storageFlag{new(map[string]storage.Constraints), nil} + err := flag.Set("foo") + c.Assert(err, gc.ErrorMatches, `expected =`) + err = flag.Set("foo:bar=baz") + c.Assert(err, gc.ErrorMatches, `expected =`) + err = flag.Set("foo=") + c.Assert(err, gc.ErrorMatches, `cannot parse disk constraints: storage constraints require at least one field to be specified`) +} + +func (FlagSuite) TestStorageFlagBundleStorage(c *gc.C) { + var stores map[string]storage.Constraints + var bundleStores map[string]map[string]storage.Constraints + flag := storageFlag{&stores, &bundleStores} + err := flag.Set("foo=bar") + c.Assert(err, jc.ErrorIsNil) + err = flag.Set("app:baz=qux") + c.Assert(err, jc.ErrorIsNil) + c.Assert(stores, jc.DeepEquals, map[string]storage.Constraints{ + "foo": {Pool: "bar", Count: 1}, + }) + c.Assert(bundleStores, jc.DeepEquals, map[string]map[string]storage.Constraints{ + "app": map[string]storage.Constraints{ + "baz": {Pool: "qux", Count: 1}, + }, + }) +} + +func (FlagSuite) TestStorageFlagBundleStorageErrors(c *gc.C) { + flag := storageFlag{new(map[string]storage.Constraints), new(map[string]map[string]storage.Constraints)} + err := flag.Set("foo") + c.Assert(err, gc.ErrorMatches, `expected \[\:]=`) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/get.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/get.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,132 +0,0 @@ -// Copyright 2012-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package application - -import ( - "fmt" - - "github.com/juju/cmd" - "github.com/juju/errors" - "launchpad.net/gnuflag" - - "github.com/juju/juju/api/application" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/modelcmd" -) - -var usageGetConfigSummary = ` -Displays configuration settings for a deployed application.`[1:] - -var usageGetConfigDetails = ` -By default, all configuration (keys, values, metadata) for the application are -displayed if a key is not specified. -Output includes the name of the charm used to deploy the application and a -listing of the application-specific configuration settings. -See `[1:] + "`juju status`" + ` for application names. - -Examples: - juju get-config mysql - juju get-config mysql-testing - juju get-config mysql wait-timeout - -See also: - set-config - deploy - status` - -// NewGetCommand returns a command used to get application attributes. -func NewGetCommand() cmd.Command { - return modelcmd.Wrap(&getCommand{}) -} - -// getCommand retrieves the configuration of an application. -type getCommand struct { - modelcmd.ModelCommandBase - applicationName string - key string - out cmd.Output - api getServiceAPI -} - -func (c *getCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "get-config", - Args: " [attribute-key]", - Purpose: usageGetConfigSummary, - Doc: usageGetConfigDetails, - Aliases: []string{"get-configs"}, - } -} - -func (c *getCommand) SetFlags(f *gnuflag.FlagSet) { - // TODO(dfc) add json formatting ? - c.out.AddFlags(f, "yaml", map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - }) -} - -func (c *getCommand) Init(args []string) error { - // TODO(dfc) add --schema-only - if len(args) == 0 { - return errors.New("no application name specified") - } - c.applicationName = args[0] - if len(args) == 1 { - return nil - } - c.key = args[1] - return cmd.CheckEmpty(args[2:]) -} - -// getServiceAPI defines the methods on the client API -// that the application get command calls. -type getServiceAPI interface { - Close() error - Get(application string) (*params.ApplicationGetResults, error) -} - -func (c *getCommand) getAPI() (getServiceAPI, error) { - if c.api != nil { - return c.api, nil - } - root, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return application.NewClient(root), nil -} - -// Run fetches the configuration of the application and formats -// the result as a YAML string. -func (c *getCommand) Run(ctx *cmd.Context) error { - apiclient, err := c.getAPI() - if err != nil { - return err - } - defer apiclient.Close() - - results, err := apiclient.Get(c.applicationName) - if err != nil { - return err - } - if c.key != "" { - info, found := results.Config[c.key].(map[string]interface{}) - if !found { - return fmt.Errorf("key %q not found in %q application settings.", c.key, c.applicationName) - } - out, err := cmd.FormatSmart(info["value"]) - if err != nil { - return err - } - fmt.Fprintf(ctx.Stdout, "%v\n", string(out)) - return nil - } - - resultsMap := map[string]interface{}{ - "application": results.Application, - "charm": results.Charm, - "settings": results.Config, - } - return c.out.Write(ctx, resultsMap) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/get_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/get_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/get_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/get_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,133 +0,0 @@ -// Copyright 2012-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package application_test - -import ( - "bytes" - - "github.com/juju/cmd" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - goyaml "gopkg.in/yaml.v2" - - "github.com/juju/juju/cmd/juju/application" - coretesting "github.com/juju/juju/testing" -) - -type GetSuite struct { - coretesting.FakeJujuXDGDataHomeSuite - fake *fakeServiceAPI -} - -var _ = gc.Suite(&GetSuite{}) - -var getTests = []struct { - application string - expected map[string]interface{} -}{ - { - "dummy-application", - map[string]interface{}{ - "application": "dummy-application", - "charm": "dummy", - "settings": map[string]interface{}{ - "title": map[string]interface{}{ - "description": "Specifies title", - "type": "string", - "value": "Nearly There", - }, - "skill-level": map[string]interface{}{ - "description": "Specifies skill-level", - "value": 100, - "type": "int", - }, - "username": map[string]interface{}{ - "description": "Specifies username", - "type": "string", - "value": "admin001", - }, - "outlook": map[string]interface{}{ - "description": "Specifies outlook", - "type": "string", - "value": "true", - }, - }, - }, - }, - - // TODO(dfc) add additional services (need more charms) - // TODO(dfc) add set tests -} - -func (s *GetSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.fake = &fakeServiceAPI{serviceName: "dummy-application", charmName: "dummy", - values: map[string]interface{}{ - "title": "Nearly There", - "skill-level": 100, - "username": "admin001", - "outlook": "true", - }} -} - -func (s *GetSuite) TestGetCommandInit(c *gc.C) { - // missing args - err := coretesting.InitCommand(application.NewGetCommandForTest(s.fake), []string{}) - c.Assert(err, gc.ErrorMatches, "no application name specified") -} - -func (s *GetSuite) TestGetCommandInitWithApplication(c *gc.C) { - err := coretesting.InitCommand(application.NewGetCommandForTest(s.fake), []string{"app"}) - // everything ok - c.Assert(err, jc.ErrorIsNil) -} - -func (s *GetSuite) TestGetCommandInitWithKey(c *gc.C) { - err := coretesting.InitCommand(application.NewGetCommandForTest(s.fake), []string{"app", "key"}) - // everything ok - c.Assert(err, jc.ErrorIsNil) -} - -func (s *GetSuite) TestGetCommandInitTooManyArgs(c *gc.C) { - err := coretesting.InitCommand(application.NewGetCommandForTest(s.fake), []string{"app", "key", "another"}) - c.Assert(err, gc.ErrorMatches, `unrecognized args: \["another"\]`) -} - -func (s *GetSuite) TestGetConfig(c *gc.C) { - for _, t := range getTests { - ctx := coretesting.Context(c) - code := cmd.Main(application.NewGetCommandForTest(s.fake), ctx, []string{t.application}) - c.Check(code, gc.Equals, 0) - c.Assert(ctx.Stderr.(*bytes.Buffer).String(), gc.Equals, "") - // round trip via goyaml to avoid being sucked into a quagmire of - // map[interface{}]interface{} vs map[string]interface{}. This is - // also required if we add json support to this command. - buf, err := goyaml.Marshal(t.expected) - c.Assert(err, jc.ErrorIsNil) - expected := make(map[string]interface{}) - err = goyaml.Unmarshal(buf, &expected) - c.Assert(err, jc.ErrorIsNil) - - actual := make(map[string]interface{}) - err = goyaml.Unmarshal(ctx.Stdout.(*bytes.Buffer).Bytes(), &actual) - c.Assert(err, jc.ErrorIsNil) - c.Assert(actual, gc.DeepEquals, expected) - } -} - -func (s *GetSuite) TestGetConfigKey(c *gc.C) { - ctx := coretesting.Context(c) - code := cmd.Main(application.NewGetCommandForTest(s.fake), ctx, []string{"dummy-application", "title"}) - c.Check(code, gc.Equals, 0) - c.Assert(ctx.Stderr.(*bytes.Buffer).String(), gc.Equals, "") - c.Assert(ctx.Stdout.(*bytes.Buffer).String(), gc.Equals, "Nearly There\n") -} - -func (s *GetSuite) TestGetConfigKeyNotFound(c *gc.C) { - ctx := coretesting.Context(c) - code := cmd.Main(application.NewGetCommandForTest(s.fake), ctx, []string{"dummy-application", "invalid"}) - c.Check(code, gc.Equals, 1) - c.Assert(ctx.Stderr.(*bytes.Buffer).String(), gc.Equals, "error: key \"invalid\" not found in \"dummy-application\" application settings.\n") - c.Assert(ctx.Stdout.(*bytes.Buffer).String(), gc.Equals, "") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/register.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/register.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/register.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/register.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,11 +14,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/macaroon-bakery.v1/httpbakery" - "launchpad.net/gnuflag" - - "github.com/juju/juju/api" - "github.com/juju/juju/api/charms" ) var budgetWithLimitRe = regexp.MustCompile(`^[a-zA-Z0-9\-]+:[0-9]+$`) @@ -50,7 +47,7 @@ // RunPre obtains authorization to deploy this charm. The authorization, if received is not // sent to the controller, rather it is kept as an attribute on RegisterMeteredCharm. -func (r *RegisterMeteredCharm) RunPre(state api.Connection, bakeryClient *httpbakery.Client, ctx *cmd.Context, deployInfo DeploymentInfo) error { +func (r *RegisterMeteredCharm) RunPre(api MeteredDeployAPI, bakeryClient *httpbakery.Client, ctx *cmd.Context, deployInfo DeploymentInfo) error { if allocBudget, allocLimit, err := parseBudgetWithLimit(r.AllocationSpec); err == nil { // Make these available to registration if valid. r.budget, r.limit = allocBudget, allocLimit @@ -58,14 +55,17 @@ return errors.Trace(err) } - charmsClient := charms.NewClient(state) - metered, err := charmsClient.IsMetered(deployInfo.CharmID.URL.String()) + metered, err := api.IsMetered(deployInfo.CharmID.URL.String()) if err != nil { - return err + return errors.Trace(err) } if !metered { return nil } + info := deployInfo.CharmInfo + if r.Plan == "" && info.Metrics != nil && !info.Metrics.PlanRequired() { + return nil + } if r.Plan == "" && deployInfo.CharmID.URL.Schema == "cs" { r.Plan, err = r.getDefaultPlan(bakeryClient, deployInfo.CharmID.URL.String()) @@ -75,8 +75,8 @@ if err1 != nil { return err1 } - charmUrl := deployInfo.CharmID.URL.String() - return errors.Errorf(`%v has no default plan. Try "juju deploy --plan with one of %v"`, charmUrl, strings.Join(options, ", ")) + charmURL := deployInfo.CharmID.URL.String() + return errors.Errorf(`%v has no default plan. Try "juju deploy --plan with one of %v"`, charmURL, strings.Join(options, ", ")) } return err } @@ -88,7 +88,8 @@ deployInfo.ApplicationName, r.budget, r.limit, - bakeryClient) + bakeryClient, + ) if err != nil { if deployInfo.CharmID.URL.Schema == "cs" { logger.Infof("failed to obtain plan authorization: %v", err) @@ -100,20 +101,13 @@ } // RunPost sends credentials obtained during the call to RunPre to the controller. -func (r *RegisterMeteredCharm) RunPost(state api.Connection, bakeryClient *httpbakery.Client, ctx *cmd.Context, deployInfo DeploymentInfo, prevErr error) error { +func (r *RegisterMeteredCharm) RunPost(api MeteredDeployAPI, bakeryClient *httpbakery.Client, ctx *cmd.Context, deployInfo DeploymentInfo, prevErr error) error { if prevErr != nil { return nil } if r.credentials == nil { return nil } - api, cerr := getMetricCredentialsAPI(state) - if cerr != nil { - logger.Infof("failed to get the metrics credentials setter: %v", cerr) - return cerr - } - defer api.Close() - err := api.SetMetricCredentials(deployInfo.ApplicationName, r.credentials) if err != nil { logger.Warningf("failed to set metric credentials: %v", err) @@ -124,11 +118,11 @@ } type noDefaultPlanError struct { - cUrl string + cURL string } func (e *noDefaultPlanError) Error() string { - return fmt.Sprintf("%v has no default plan", e.cUrl) + return fmt.Sprintf("%v has no default plan", e.cURL) } func isNoDefaultPlanError(e error) bool { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/register_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/register_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/register_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/register_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,6 @@ import ( "encoding/json" - "fmt" "net/http" "net/http/httptest" @@ -16,8 +15,7 @@ "gopkg.in/juju/charm.v6-unstable" "gopkg.in/macaroon-bakery.v1/httpbakery" - "github.com/juju/juju/api" - "github.com/juju/juju/apiserver/params" + apicharms "github.com/juju/juju/api/charms" "github.com/juju/juju/charmstore" coretesting "github.com/juju/juju/testing" ) @@ -59,16 +57,59 @@ }, ApplicationName: "application name", ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: true}, + }, + }, } - err := s.register.RunPre(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d) + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, jc.ErrorIsNil) - err = s.register.RunPost(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d, nil) + err = s.register.RunPost(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d, nil) c.Assert(err, jc.ErrorIsNil) authorization, err := json.Marshal([]byte("hello registration")) authorization = append(authorization, byte(0xa)) + s.stub.CheckCalls(c, []testing.StubCall{{ + "IsMetered", []interface{}{"cs:quantal/metered-1"}, + }, { + "Authorize", []interface{}{metricRegistrationPost{ + ModelUUID: "model uuid", + CharmURL: "cs:quantal/metered-1", + ApplicationName: "application name", + PlanURL: "someplan", + Budget: "personal", + Limit: "100", + }}, + }, { + "SetMetricCredentials", []interface{}{ + "application name", + authorization, + }}, + }) +} + +func (s *registrationSuite) TestOptionalPlanMeteredCharm(c *gc.C) { + client := httpbakery.NewClient() + d := DeploymentInfo{ + CharmID: charmstore.CharmID{ + URL: charm.MustParseURL("cs:quantal/metered-1"), + }, + ApplicationName: "application name", + ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: false}, + }, + }, + } + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, jc.ErrorIsNil) + err = s.register.RunPost(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d, nil) + c.Assert(err, jc.ErrorIsNil) + authorization, err := json.Marshal([]byte("hello registration")) + authorization = append(authorization, byte(0xa)) s.stub.CheckCalls(c, []testing.StubCall{{ - "APICall", []interface{}{"Charms", "IsMetered", params.CharmURL{URL: "cs:quantal/metered-1"}}, + "IsMetered", []interface{}{"cs:quantal/metered-1"}, }, { "Authorize", []interface{}{metricRegistrationPost{ ModelUUID: "model uuid", @@ -79,13 +120,50 @@ Limit: "100", }}, }, { - "APICall", []interface{}{"Application", "SetMetricCredentials", params.ApplicationMetricCredentials{ - Creds: []params.ApplicationMetricCredential{params.ApplicationMetricCredential{ - ApplicationName: "application name", - MetricCredentials: authorization, - }}, + "SetMetricCredentials", []interface{}{ + "application name", + authorization, }}, - }}) + }) +} + +func (s *registrationSuite) TestPlanNotSpecifiedCharm(c *gc.C) { + client := httpbakery.NewClient() + d := DeploymentInfo{ + CharmID: charmstore.CharmID{ + URL: charm.MustParseURL("cs:quantal/metered-1"), + }, + ApplicationName: "application name", + ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: nil, + }, + }, + } + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) + c.Assert(err, jc.ErrorIsNil) + err = s.register.RunPost(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d, nil) + c.Assert(err, jc.ErrorIsNil) + authorization, err := json.Marshal([]byte("hello registration")) + authorization = append(authorization, byte(0xa)) + s.stub.CheckCalls(c, []testing.StubCall{{ + "IsMetered", []interface{}{"cs:quantal/metered-1"}, + }, { + "Authorize", []interface{}{metricRegistrationPost{ + ModelUUID: "model uuid", + CharmURL: "cs:quantal/metered-1", + ApplicationName: "application name", + PlanURL: "someplan", + Budget: "personal", + Limit: "100", + }}, + }, { + "SetMetricCredentials", []interface{}{ + "application name", + authorization, + }}, + }) } func (s *registrationSuite) TestMeteredCharmAPIError(c *gc.C) { @@ -97,11 +175,16 @@ }, ApplicationName: "application name", ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: true}, + }, + }, } - err := s.register.RunPre(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d) + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, gc.ErrorMatches, `authorization failed: something failed`) s.stub.CheckCalls(c, []testing.StubCall{{ - "APICall", []interface{}{"Charms", "IsMetered", params.CharmURL{URL: "cs:quantal/metered-1"}}, + "IsMetered", []interface{}{"cs:quantal/metered-1"}, }, { "Authorize", []interface{}{metricRegistrationPost{ ModelUUID: "model uuid", @@ -122,6 +205,11 @@ }, ApplicationName: "application name", ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: true}, + }, + }, } s.register = &RegisterMeteredCharm{ Plan: "someplan", @@ -129,7 +217,7 @@ AllocationSpec: "invalid allocation", } - err := s.register.RunPre(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d) + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, gc.ErrorMatches, `invalid allocation, expecting :`) s.stub.CheckNoCalls(c) } @@ -142,17 +230,22 @@ }, ApplicationName: "application name", ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: true}, + }, + }, } - err := s.register.RunPre(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d) + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, jc.ErrorIsNil) deployError := errors.New("deployment failed") - err = s.register.RunPost(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d, deployError) + err = s.register.RunPost(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d, deployError) c.Assert(err, jc.ErrorIsNil) authorization, err := json.Marshal([]byte("hello registration")) authorization = append(authorization, byte(0xa)) c.Assert(err, jc.ErrorIsNil) s.stub.CheckCalls(c, []testing.StubCall{{ - "APICall", []interface{}{"Charms", "IsMetered", params.CharmURL{URL: "cs:quantal/metered-1"}}, + "IsMetered", []interface{}{"cs:quantal/metered-1"}, }, { "Authorize", []interface{}{metricRegistrationPost{ ModelUUID: "model uuid", @@ -173,15 +266,20 @@ }, ApplicationName: "application name", ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: true}, + }, + }, } - err := s.register.RunPre(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d) + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, jc.ErrorIsNil) - err = s.register.RunPost(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d, nil) + err = s.register.RunPost(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d, nil) c.Assert(err, jc.ErrorIsNil) authorization, err := json.Marshal([]byte("hello registration")) authorization = append(authorization, byte(0xa)) s.stub.CheckCalls(c, []testing.StubCall{{ - "APICall", []interface{}{"Charms", "IsMetered", params.CharmURL{URL: "local:quantal/metered-1"}}, + "IsMetered", []interface{}{"local:quantal/metered-1"}, }, { "Authorize", []interface{}{metricRegistrationPost{ ModelUUID: "model uuid", @@ -192,12 +290,10 @@ Limit: "100", }}, }, { - "APICall", []interface{}{"Application", "SetMetricCredentials", params.ApplicationMetricCredentials{ - Creds: []params.ApplicationMetricCredential{params.ApplicationMetricCredential{ - ApplicationName: "application name", - MetricCredentials: authorization, - }}, - }}, + "SetMetricCredentials", []interface{}{ + "application name", + authorization, + }, }}) } @@ -214,15 +310,20 @@ }, ApplicationName: "application name", ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: true}, + }, + }, } - err := s.register.RunPre(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d) + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, jc.ErrorIsNil) - err = s.register.RunPost(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d, nil) + err = s.register.RunPost(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d, nil) c.Assert(err, jc.ErrorIsNil) authorization, err := json.Marshal([]byte("hello registration")) authorization = append(authorization, byte(0xa)) s.stub.CheckCalls(c, []testing.StubCall{{ - "APICall", []interface{}{"Charms", "IsMetered", params.CharmURL{URL: "local:quantal/metered-1"}}, + "IsMetered", []interface{}{"local:quantal/metered-1"}, }, { "Authorize", []interface{}{metricRegistrationPost{ ModelUUID: "model uuid", @@ -233,13 +334,11 @@ Limit: "100", }}, }, { - "APICall", []interface{}{"Application", "SetMetricCredentials", params.ApplicationMetricCredentials{ - Creds: []params.ApplicationMetricCredential{params.ApplicationMetricCredential{ - ApplicationName: "application name", - MetricCredentials: authorization, - }}, + "SetMetricCredentials", []interface{}{ + "application name", + authorization, }}, - }}) + }) } func (s *registrationSuite) TestMeteredCharmNoPlanSet(c *gc.C) { @@ -254,16 +353,21 @@ }, ApplicationName: "application name", ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: true}, + }, + }, } - err := s.register.RunPre(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d) + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, jc.ErrorIsNil) - err = s.register.RunPost(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d, nil) + err = s.register.RunPost(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d, nil) c.Assert(err, jc.ErrorIsNil) authorization, err := json.Marshal([]byte("hello registration")) authorization = append(authorization, byte(0xa)) c.Assert(err, jc.ErrorIsNil) s.stub.CheckCalls(c, []testing.StubCall{{ - "APICall", []interface{}{"Charms", "IsMetered", params.CharmURL{URL: "cs:quantal/metered-1"}}, + "IsMetered", []interface{}{"cs:quantal/metered-1"}, }, { "DefaultPlan", []interface{}{"cs:quantal/metered-1"}, }, { @@ -276,12 +380,10 @@ Limit: "100", }}, }, { - "APICall", []interface{}{"Application", "SetMetricCredentials", params.ApplicationMetricCredentials{ - Creds: []params.ApplicationMetricCredential{params.ApplicationMetricCredential{ - ApplicationName: "application name", - MetricCredentials: authorization, - }}, - }}, + "SetMetricCredentials", []interface{}{ + "application name", + authorization, + }, }}) } @@ -298,11 +400,16 @@ }, ApplicationName: "application name", ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: true}, + }, + }, } - err := s.register.RunPre(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d) + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, gc.ErrorMatches, `cs:quantal/metered-1 has no default plan. Try "juju deploy --plan with one of thisplan, thisotherplan"`) s.stub.CheckCalls(c, []testing.StubCall{{ - "APICall", []interface{}{"Charms", "IsMetered", params.CharmURL{URL: "cs:quantal/metered-1"}}, + "IsMetered", []interface{}{"cs:quantal/metered-1"}, }, { "DefaultPlan", []interface{}{"cs:quantal/metered-1"}, }, { @@ -323,11 +430,16 @@ }, ApplicationName: "application name", ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: true}, + }, + }, } - err := s.register.RunPre(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d) + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, gc.ErrorMatches, `failed to query default plan:.*`) s.stub.CheckCalls(c, []testing.StubCall{{ - "APICall", []interface{}{"Charms", "IsMetered", params.CharmURL{URL: "cs:quantal/metered-1"}}, + "IsMetered", []interface{}{"cs:quantal/metered-1"}, }, { "DefaultPlan", []interface{}{"cs:quantal/metered-1"}, }}) @@ -341,20 +453,25 @@ }, ApplicationName: "application name", ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: true}, + }, + }, } - err := s.register.RunPre(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d) + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, jc.ErrorIsNil) s.stub.CheckCalls(c, []testing.StubCall{{ - "APICall", []interface{}{"Charms", "IsMetered", params.CharmURL{URL: "cs:quantal/unmetered-1"}}, + "IsMetered", []interface{}{"cs:quantal/unmetered-1"}, }}) s.stub.ResetCalls() - err = s.register.RunPost(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d, nil) + err = s.register.RunPost(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d, nil) c.Assert(err, jc.ErrorIsNil) s.stub.CheckCalls(c, []testing.StubCall{}) } func (s *registrationSuite) TestFailedAuth(c *gc.C) { - s.stub.SetErrors(nil, fmt.Errorf("could not authorize")) + s.stub.SetErrors(nil, errors.Errorf("could not authorize")) client := httpbakery.NewClient() d := DeploymentInfo{ CharmID: charmstore.CharmID{ @@ -362,14 +479,19 @@ }, ApplicationName: "application name", ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: true}, + }, + }, } - err := s.register.RunPre(&mockAPIConnection{Stub: s.stub}, client, s.ctx, d) + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) c.Assert(err, gc.ErrorMatches, `authorization failed:.*`) authorization, err := json.Marshal([]byte("hello registration")) authorization = append(authorization, byte(0xa)) c.Assert(err, jc.ErrorIsNil) s.stub.CheckCalls(c, []testing.StubCall{{ - "APICall", []interface{}{"Charms", "IsMetered", params.CharmURL{URL: "cs:quantal/metered-1"}}, + "IsMetered", []interface{}{"cs:quantal/metered-1"}, }, { "Authorize", []interface{}{metricRegistrationPost{ ModelUUID: "model uuid", @@ -382,6 +504,89 @@ }}) } +func (s *registrationSuite) TestPlanArgumentPlanRequiredInteraction(c *gc.C) { + tests := []struct { + about string + planArgument string + planRequired bool + noDefaultPlan bool + apiCalls []string + err string + }{{ + about: "deploy with --plan, required false", + planArgument: "plan", + planRequired: false, + apiCalls: []string{"IsMetered", "Authorize"}, + err: "", + }, { + about: "deploy with --plan, required true", + planArgument: "plan", + planRequired: true, + apiCalls: []string{"IsMetered", "Authorize"}, + err: "", + }, { + about: "deploy without --plan, required false with default plan", + planRequired: false, + apiCalls: []string{"IsMetered"}, + err: "", + }, { + about: "deploy without --plan, required true with default plan", + planRequired: true, + apiCalls: []string{"IsMetered", "DefaultPlan", "Authorize"}, + err: "", + }, { + about: "deploy without --plan, required false with no default plan", + planRequired: false, + noDefaultPlan: true, + apiCalls: []string{"IsMetered"}, + err: "", + }, { + about: "deploy without --plan, required true with no default plan", + planRequired: true, + noDefaultPlan: true, + apiCalls: []string{"IsMetered", "DefaultPlan", "ListPlans"}, + err: `cs:quantal/metered-1 has no default plan. Try "juju deploy --plan with one of thisplan, thisotherplan"`, + }, + } + for i, test := range tests { + s.stub.ResetCalls() + c.Logf("running test %d: %s", i, test.about) + if test.noDefaultPlan { + s.stub.SetErrors(nil, errors.NotFoundf("default plan")) + } else { + s.stub.SetErrors(nil) + } + s.register = &RegisterMeteredCharm{ + Plan: test.planArgument, + AllocationSpec: "personal:100", + RegisterURL: s.server.URL, + QueryURL: s.server.URL, + } + client := httpbakery.NewClient() + d := DeploymentInfo{ + CharmID: charmstore.CharmID{ + URL: charm.MustParseURL("cs:quantal/metered-1"), + }, + ApplicationName: "application name", + ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: test.planRequired}, + }, + }, + } + + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) + if test.err != "" { + c.Assert(err, gc.ErrorMatches, test.err) + } else { + c.Assert(err, jc.ErrorIsNil) + } + + s.stub.CheckCallNames(c, test.apiCalls...) + } +} + type testMetricsRegistrationHandler struct { *testing.Stub } @@ -459,32 +664,94 @@ } } -type mockAPIConnection struct { - api.Connection - *testing.Stub +var _ = gc.Suite(&noPlanRegistrationSuite{}) + +type noPlanRegistrationSuite struct { + testing.CleanupSuite + stub *testing.Stub + handler *testMetricsRegistrationHandler + server *httptest.Server + register DeployStep + ctx *cmd.Context } -func (*mockAPIConnection) BestFacadeVersion(facade string) int { - return 42 +func (s *noPlanRegistrationSuite) SetUpTest(c *gc.C) { + s.CleanupSuite.SetUpTest(c) + s.stub = &testing.Stub{} + s.handler = &testMetricsRegistrationHandler{Stub: s.stub} + s.server = httptest.NewServer(s.handler) + s.register = &RegisterMeteredCharm{ + Plan: "", + RegisterURL: s.server.URL, + AllocationSpec: "personal:100", + } + s.ctx = coretesting.Context(c) } -func (*mockAPIConnection) Close() error { - return nil +func (s *noPlanRegistrationSuite) TearDownTest(c *gc.C) { + s.CleanupSuite.TearDownTest(c) + s.server.Close() +} +func (s *noPlanRegistrationSuite) TestOptionalPlanMeteredCharm(c *gc.C) { + client := httpbakery.NewClient() + d := DeploymentInfo{ + CharmID: charmstore.CharmID{ + URL: charm.MustParseURL("cs:quantal/metered-1"), + }, + ApplicationName: "application name", + ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: &charm.Plan{Required: false}, + }, + }, + } + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) + c.Assert(err, jc.ErrorIsNil) + err = s.register.RunPost(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d, nil) + c.Assert(err, jc.ErrorIsNil) + s.stub.CheckCalls(c, []testing.StubCall{{ + "IsMetered", []interface{}{"cs:quantal/metered-1"}, + }}) } -func (m *mockAPIConnection) APICall(objType string, version int, id, request string, parameters, response interface{}) error { - m.MethodCall(m, "APICall", objType, request, parameters) +func (s *noPlanRegistrationSuite) TestPlanNotSpecifiedCharm(c *gc.C) { + client := httpbakery.NewClient() + d := DeploymentInfo{ + CharmID: charmstore.CharmID{ + URL: charm.MustParseURL("cs:quantal/metered-1"), + }, + ApplicationName: "application name", + ModelUUID: "model uuid", + CharmInfo: &apicharms.CharmInfo{ + Metrics: &charm.Metrics{ + Plan: nil, + }, + }, + } + err := s.register.RunPre(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d) + c.Assert(err, jc.ErrorIsNil) + err = s.register.RunPost(&mockMeteredDeployAPI{Stub: s.stub}, client, s.ctx, d, nil) + c.Assert(err, jc.ErrorIsNil) + s.stub.CheckCalls(c, []testing.StubCall{{ + "IsMetered", []interface{}{"cs:quantal/metered-1"}, + }}) +} - switch request { - case "IsMetered": - parameters := parameters.(params.CharmURL) - response := response.(*params.IsMeteredResult) - if parameters.URL == "cs:quantal/metered-1" || parameters.URL == "local:quantal/metered-1" { - response.Metered = true - } - case "SetMetricCredentials": - response := response.(*params.ErrorResults) - response.Results = append(response.Results, params.ErrorResult{Error: nil}) +type mockMeteredDeployAPI struct { + MeteredDeployAPI + *testing.Stub +} + +func (m *mockMeteredDeployAPI) IsMetered(charmURL string) (bool, error) { + m.AddCall("IsMetered", charmURL) + if charmURL == "cs:quantal/metered-1" || charmURL == "local:quantal/metered-1" { + return true, m.NextErr() } + return false, m.NextErr() + +} +func (m *mockMeteredDeployAPI) SetMetricCredentials(service string, credentials []byte) error { + m.AddCall("SetMetricCredentials", service, credentials) return m.NextErr() } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removeapplication.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removeapplication.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removeapplication.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removeapplication.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,6 @@ package application import ( - "fmt" - "github.com/juju/cmd" "github.com/juju/errors" "github.com/juju/romulus/api/budget" @@ -53,16 +51,15 @@ Args: "", Purpose: helpSummaryRmSvc, Doc: helpDetailsRmSvc, - Aliases: []string{"destroy-application"}, } } func (c *removeServiceCommand) Init(args []string) error { if len(args) == 0 { - return fmt.Errorf("no application specified") + return errors.Errorf("no application specified") } if !names.IsValidApplication(args[0]) { - return fmt.Errorf("invalid application name %q", args[0]) + return errors.Errorf("invalid application name %q", args[0]) } c.ApplicationName, args = args[0], args[1:] return cmd.CheckEmpty(args) @@ -132,12 +129,12 @@ resp, err := budgetClient.DeleteAllocation(modelUUID, c.ApplicationName) if wireformat.IsNotAvail(err) { - fmt.Fprintf(ctx.Stdout, "WARNING: Allocation not removed - %s.\n", err.Error()) + logger.Warningf("allocation not removed: %v", err) } else if err != nil { return err } if resp != "" { - fmt.Fprintf(ctx.Stdout, "%s\n", resp) + logger.Infof(resp) } return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removeapplication_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removeapplication_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removeapplication_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removeapplication_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,9 +8,13 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "gopkg.in/juju/charmrepo.v2-unstable" "gopkg.in/macaroon-bakery.v1/httpbakery" - "github.com/juju/juju/cmd/juju/common" + "github.com/juju/juju/api/annotations" + "github.com/juju/juju/api/application" + "github.com/juju/juju/api/charms" + "github.com/juju/juju/api/modelconfig" "github.com/juju/juju/cmd/modelcmd" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/rpc" @@ -22,7 +26,7 @@ type RemoveServiceSuite struct { jujutesting.RepoSuite - common.CmdBlockHelper + testing.CmdBlockHelper stub *jutesting.Stub budgetAPIClient budgetAPIClient } @@ -31,7 +35,7 @@ func (s *RemoveServiceSuite) SetUpTest(c *gc.C) { s.RepoSuite.SetUpTest(c) - s.CmdBlockHelper = common.NewCmdBlockHelper(s.APIState) + s.CmdBlockHelper = testing.NewCmdBlockHelper(s.APIState) c.Assert(s.CmdBlockHelper, gc.NotNil) s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) s.stub = &jutesting.Stub{} @@ -63,8 +67,8 @@ func (s *RemoveServiceSuite) TestRemoveLocalMetered(c *gc.C) { ch := testcharms.Repo.CharmArchivePath(s.CharmsPath, "metered") - deploy := &DeployCommand{} - _, err := testing.RunCommand(c, modelcmd.Wrap(deploy), ch, "--series", "quantal") + deploy := NewDefaultDeployCommand() + _, err := testing.RunCommand(c, deploy, ch, "--series", "quantal") c.Assert(err, jc.ErrorIsNil) err = runRemoveService(c, "metered") c.Assert(err, jc.ErrorIsNil) @@ -125,8 +129,31 @@ s.PatchValue(&getBudgetAPIClient, func(*httpbakery.Client) budgetAPIClient { return s.budgetAPIClient }) testcharms.UploadCharm(c, s.client, "cs:quantal/metered-1", "metered") - deploy := &DeployCommand{} - _, err := testing.RunCommand(c, modelcmd.Wrap(deploy), "cs:quantal/metered-1") + deployCmd := &DeployCommand{} + cmd := modelcmd.Wrap(deployCmd) + deployCmd.NewAPIRoot = func() (DeployAPI, error) { + apiRoot, err := deployCmd.ModelCommandBase.NewAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + bakeryClient, err := deployCmd.BakeryClient() + if err != nil { + return nil, errors.Trace(err) + } + cstoreClient := newCharmStoreClient(bakeryClient).WithChannel(deployCmd.Channel) + return &deployAPIAdapter{ + Connection: apiRoot, + apiClient: &apiClient{Client: apiRoot.Client()}, + charmsClient: &charmsClient{Client: charms.NewClient(apiRoot)}, + applicationClient: &applicationClient{Client: application.NewClient(apiRoot)}, + modelConfigClient: &modelConfigClient{Client: modelconfig.NewClient(apiRoot)}, + charmstoreClient: &charmstoreClient{Client: cstoreClient}, + annotationsClient: &annotationsClient{Client: annotations.NewClient(apiRoot)}, + charmRepoClient: &charmRepoClient{CharmStore: charmrepo.NewCharmStoreFromClient(cstoreClient)}, + }, nil + } + + _, err := testing.RunCommand(c, cmd, "cs:quantal/metered-1") c.Assert(err, jc.ErrorIsNil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removerelation.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removerelation.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removerelation.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removerelation.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,6 @@ package application import ( - "fmt" - "github.com/juju/cmd" "github.com/juju/errors" @@ -41,13 +39,23 @@ // NewRemoveRelationCommand returns a command to remove a relation between 2 services. func NewRemoveRelationCommand() cmd.Command { - return modelcmd.Wrap(&removeRelationCommand{}) + cmd := &removeRelationCommand{} + cmd.newAPIFunc = func() (ApplicationDestroyRelationAPI, error) { + root, err := cmd.NewAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + return application.NewClient(root), nil + + } + return modelcmd.Wrap(cmd) } // removeRelationCommand causes an existing application relation to be shut down. type removeRelationCommand struct { modelcmd.ModelCommandBase - Endpoints []string + Endpoints []string + newAPIFunc func() (ApplicationDestroyRelationAPI, error) } func (c *removeRelationCommand) Info() *cmd.Info { @@ -56,33 +64,25 @@ Args: "[:] [:]", Purpose: helpSummary, Doc: helpDetails, - Aliases: []string{"destroy-relation"}, } } func (c *removeRelationCommand) Init(args []string) error { if len(args) != 2 { - return fmt.Errorf("a relation must involve two applications") + return errors.Errorf("a relation must involve two applications") } c.Endpoints = args return nil } -type serviceDestroyRelationAPI interface { +// ApplicationDestroyRelationAPI defines the API methods that application remove relation command uses. +type ApplicationDestroyRelationAPI interface { Close() error DestroyRelation(endpoints ...string) error } -func (c *removeRelationCommand) getAPI() (serviceDestroyRelationAPI, error) { - root, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return application.NewClient(root), nil -} - func (c *removeRelationCommand) Run(_ *cmd.Context) error { - client, err := c.getAPI() + client, err := c.newAPIFunc() if err != nil { return err } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removerelation_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removerelation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removerelation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removerelation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -1,79 +1,87 @@ -// Copyright 2012, 2013 Canonical Ltd. +// Copyright 2012 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package application import ( "github.com/juju/errors" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/cmd/juju/common" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/rpc" - "github.com/juju/juju/testcharms" - "github.com/juju/juju/testing" + "github.com/juju/juju/apiserver/common" + coretesting "github.com/juju/juju/testing" ) type RemoveRelationSuite struct { - jujutesting.RepoSuite - common.CmdBlockHelper + testing.IsolationSuite + mockAPI *mockRemoveAPI } func (s *RemoveRelationSuite) SetUpTest(c *gc.C) { - s.RepoSuite.SetUpTest(c) - s.CmdBlockHelper = common.NewCmdBlockHelper(s.APIState) - c.Assert(s.CmdBlockHelper, gc.NotNil) - s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) + s.IsolationSuite.SetUpTest(c) + s.mockAPI = &mockRemoveAPI{Stub: &testing.Stub{}} + s.mockAPI.removeRelationFunc = func(endpoints ...string) error { + return s.mockAPI.NextErr() + } } var _ = gc.Suite(&RemoveRelationSuite{}) -func runRemoveRelation(c *gc.C, args ...string) error { - _, err := testing.RunCommand(c, NewRemoveRelationCommand(), args...) +func (s *RemoveRelationSuite) runRemoveRelation(c *gc.C, args ...string) error { + _, err := coretesting.RunCommand(c, NewRemoveRelationCommandForTest(s.mockAPI), args...) return err } -func (s *RemoveRelationSuite) setupRelationForRemove(c *gc.C) { - ch := testcharms.Repo.CharmArchivePath(s.CharmsPath, "riak") - err := runDeploy(c, ch, "riak", "--series", "quantal") - c.Assert(err, jc.ErrorIsNil) - ch = testcharms.Repo.CharmArchivePath(s.CharmsPath, "logging") - err = runDeploy(c, ch, "logging", "--series", "quantal") +func (s *RemoveRelationSuite) TestRemoveRelationWrongNumberOfArguments(c *gc.C) { + // No arguments + err := s.runRemoveRelation(c) + c.Assert(err, gc.ErrorMatches, "a relation must involve two applications") + + // 1 argument + err = s.runRemoveRelation(c, "application1") + c.Assert(err, gc.ErrorMatches, "a relation must involve two applications") + + // More than 2 arguments + err = s.runRemoveRelation(c, "application1", "application2", "application3") + c.Assert(err, gc.ErrorMatches, "a relation must involve two applications") +} + +func (s *RemoveRelationSuite) TestRemoveRelationSuccess(c *gc.C) { + err := s.runRemoveRelation(c, "application1", "application2") c.Assert(err, jc.ErrorIsNil) - runAddRelation(c, "riak", "logging") + s.mockAPI.CheckCall(c, 0, "DestroyRelation", []string{"application1", "application2"}) + s.mockAPI.CheckCall(c, 1, "Close") } -func (s *RemoveRelationSuite) TestRemoveRelation(c *gc.C) { - s.setupRelationForRemove(c) +func (s *RemoveRelationSuite) TestRemoveRelationFail(c *gc.C) { + msg := "fail remove-relation at API" + s.mockAPI.SetErrors(errors.New(msg)) + err := s.runRemoveRelation(c, "application1", "application2") + c.Assert(err, gc.ErrorMatches, msg) + s.mockAPI.CheckCall(c, 0, "DestroyRelation", []string{"application1", "application2"}) + s.mockAPI.CheckCall(c, 1, "Close") +} - // Destroy a relation that exists. - err := runRemoveRelation(c, "logging", "riak") - c.Assert(err, jc.ErrorIsNil) +func (s *RemoveRelationSuite) TestRemoveRelationBlocked(c *gc.C) { + s.mockAPI.SetErrors(common.OperationBlockedError("TestRemoveRelationBlocked")) + err := s.runRemoveRelation(c, "application1", "application2") + coretesting.AssertOperationWasBlocked(c, err, ".*TestRemoveRelationBlocked.*") + s.mockAPI.CheckCall(c, 0, "DestroyRelation", []string{"application1", "application2"}) + s.mockAPI.CheckCall(c, 1, "Close") +} + +type mockRemoveAPI struct { + *testing.Stub + removeRelationFunc func(endpoints ...string) error +} + +func (s mockRemoveAPI) Close() error { + s.MethodCall(s, "Close") + return s.NextErr() +} - // Destroy a relation that used to exist. - err = runRemoveRelation(c, "riak", "logging") - c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ - Message: `relation "logging:info riak:juju-info" not found`, - Code: "not found", - }) - - // Invalid removes. - err = runRemoveRelation(c, "ping", "pong") - c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ - Message: `application "ping" not found`, - Code: "not found", - }) - err = runRemoveRelation(c, "riak") - c.Assert(err, gc.ErrorMatches, `a relation must involve two applications`) -} - -func (s *RemoveRelationSuite) TestBlockRemoveRelation(c *gc.C) { - s.setupRelationForRemove(c) - - // block operation - s.BlockRemoveObject(c, "TestBlockRemoveRelation") - // Destroy a relation that exists. - err := runRemoveRelation(c, "logging", "riak") - s.AssertBlocked(c, err, ".*TestBlockRemoveRelation.*") +func (s mockRemoveAPI) DestroyRelation(endpoints ...string) error { + s.MethodCall(s, "DestroyRelation", endpoints) + return s.removeRelationFunc(endpoints...) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removeunit.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removeunit.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removeunit.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removeunit.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,6 @@ package application import ( - "fmt" - "github.com/juju/cmd" "github.com/juju/errors" "gopkg.in/juju/names.v2" @@ -44,7 +42,8 @@ juju remove-unit wordpress/2 wordpress/3 wordpress/4 -See also: remove-service +See also: + remove-service ` func (c *removeUnitCommand) Info() *cmd.Info { @@ -53,18 +52,17 @@ Args: " [...]", Purpose: "Remove application units from the model.", Doc: removeUnitDoc, - Aliases: []string{"destroy-unit"}, } } func (c *removeUnitCommand) Init(args []string) error { c.UnitNames = args if len(c.UnitNames) == 0 { - return fmt.Errorf("no units specified") + return errors.Errorf("no units specified") } for _, name := range c.UnitNames { if !names.IsValidUnit(name) { - return fmt.Errorf("invalid unit name %q", name) + return errors.Errorf("invalid unit name %q", name) } } return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removeunit_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removeunit_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/removeunit_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/removeunit_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,6 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" - "github.com/juju/juju/cmd/juju/common" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/testcharms" @@ -20,12 +19,12 @@ type RemoveUnitSuite struct { jujutesting.RepoSuite - common.CmdBlockHelper + testing.CmdBlockHelper } func (s *RemoveUnitSuite) SetUpTest(c *gc.C) { s.RepoSuite.SetUpTest(c) - s.CmdBlockHelper = common.NewCmdBlockHelper(s.APIState) + s.CmdBlockHelper = testing.NewCmdBlockHelper(s.APIState) c.Assert(s.CmdBlockHelper, gc.NotNil) s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/series_selector.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/series_selector.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/series_selector.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/series_selector.go 2016-10-13 14:31:49.000000000 +0000 @@ -46,7 +46,7 @@ // - model default (if it matches supported series) // - default from charm metadata supported series / series in url // - default LTS -func (s seriesSelector) charmSeries() (selectedSeries string, msg string, err error) { +func (s seriesSelector) charmSeries() (selectedSeries string, err error) { // User has requested a series with --series. if s.seriesFlag != "" { return s.userRequested(s.seriesFlag) @@ -62,14 +62,16 @@ // Use model default series, if explicitly set and supported by the charm. if defaultSeries, explicit := s.conf.DefaultSeries(); explicit { if isSeriesSupported(defaultSeries, s.supportedSeries) { - return defaultSeries, msgDefaultModelSeries, nil + logger.Infof(msgDefaultModelSeries, defaultSeries) + return defaultSeries, nil } } // Use the charm's perferred series, if it has one. In a multi-series // charm, the first series in the list is the preferred one. if len(s.supportedSeries) > 0 { - return s.supportedSeries[0], msgDefaultCharmSeries, nil + logger.Infof(msgDefaultCharmSeries, s.supportedSeries[0]) + return s.supportedSeries[0], nil } // Charm hasn't specified a default (likely due to being a local charm @@ -78,24 +80,29 @@ // At this point, because we have no idea what series the charm supports, // *everything* requires --force. if !s.force { - return "", "", s.unsupportedSeries(series.LatestLts()) + return "", s.unsupportedSeries(series.LatestLts()) } - return series.LatestLts(), msgLatestLTSSeries, nil + + latestLTS := series.LatestLts() + logger.Infof(msgLatestLTSSeries, latestLTS) + return latestLTS, nil } // userRequested checks the series the user has requested, and returns it if it // is supported, or if they used --force. -func (s seriesSelector) userRequested(series string) (selectedSeries string, msg string, err error) { +func (s seriesSelector) userRequested(series string) (selectedSeries string, err error) { if !s.force && !isSeriesSupported(series, s.supportedSeries) { - return "", "", s.unsupportedSeries(series) + return "", s.unsupportedSeries(series) } // either it's a supported series or the user used --force, so just // give them what they asked for. if s.fromBundle { - return series, msgBundleSeries, nil + logger.Infof(msgBundleSeries, series) + return series, nil } - return series, msgUserRequestedSeries, nil + logger.Infof(msgUserRequestedSeries, series) + return series, nil } func (s seriesSelector) unsupportedSeries(series string) error { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/series_selector_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/series_selector_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/series_selector_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/series_selector_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -150,14 +150,13 @@ previous := series.SetLatestLtsForTesting(test.ltsSeries) defer series.SetLatestLtsForTesting(previous) } - series, msg, err := test.seriesSelector.charmSeries() + series, err := test.seriesSelector.charmSeries() if test.err != "" { c.Check(err, gc.ErrorMatches, test.err) return } c.Check(err, jc.ErrorIsNil) c.Check(series, gc.Equals, test.expectedSeries) - c.Check(msg, gc.Matches, test.message) }() } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/set.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/set.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/set.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/set.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,213 +0,0 @@ -// Copyright 2012-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package application - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - "unicode/utf8" - - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/utils/keyvalues" - "launchpad.net/gnuflag" - - "github.com/juju/juju/api/application" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/juju/block" - "github.com/juju/juju/cmd/modelcmd" -) - -// NewSetCommand returns a command used to set application attributes. -func NewSetCommand() cmd.Command { - return modelcmd.Wrap(&setCommand{}) -} - -// setCommand updates the configuration of an application. -type setCommand struct { - modelcmd.ModelCommandBase - ApplicationName string - SettingsStrings map[string]string - Options []string - SettingsYAML cmd.FileVar - SetDefault bool - serviceApi serviceAPI -} - -var usageSetConfigSummary = ` -Sets configuration options for an application.`[1:] - -var usageSetConfigDetails = ` -Charms may, and frequently do, expose a number of configuration settings -for an application to the user. These can be set at deploy time, but may be set -at any time by using the `[1:] + "`juju set-config`" + ` command. The actual options -vary per charm (you can check the charm documentation, or use ` + "`juju get-\nconfig`" + - ` to check which options may be set). -If ‘value’ begins with the ‘@’ character, it is interpreted as a filename -and the actual value is read from it. The maximum size of the filename is -5M. -Values may be any UTF-8 encoded string. UTF-8 is accepted on the command -line and in referenced files. -See ` + "`juju status`" + ` for application names. - -Examples: - juju set-config mysql dataset-size=80% backup_dir=/vol1/mysql/backups - juju set-config apache2 --model mymodel --config /home/ubuntu/mysql.yaml - -See also: - get-config - deploy - status` - -const maxValueSize = 5242880 - -// Info implements Command.Info. -func (c *setCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "set-config", - Args: " = ...", - Purpose: usageSetConfigSummary, - Doc: usageSetConfigDetails, - Aliases: []string{"set-configs"}, - } -} - -// SetFlags implements Command.SetFlags. -func (c *setCommand) SetFlags(f *gnuflag.FlagSet) { - f.Var(&c.SettingsYAML, "config", "path to yaml-formatted application config") - f.BoolVar(&c.SetDefault, "to-default", false, "set application option values to default") -} - -// Init implements Command.Init. -func (c *setCommand) Init(args []string) error { - if len(args) == 0 || len(strings.Split(args[0], "=")) > 1 { - return errors.New("no application name specified") - } - if c.SettingsYAML.Path != "" && len(args) > 1 { - return errors.New("cannot specify --config when using key=value arguments") - } - c.ApplicationName = args[0] - if c.SetDefault { - c.Options = args[1:] - if len(c.Options) == 0 { - return errors.New("no configuration options specified") - } - return nil - } - settings, err := keyvalues.Parse(args[1:], true) - if err != nil { - return err - } - c.SettingsStrings = settings - return nil -} - -// serviceAPI defines the methods on the client API -// that the application set command calls. -type serviceAPI interface { - Close() error - Update(args params.ApplicationUpdate) error - Get(application string) (*params.ApplicationGetResults, error) - Set(application string, options map[string]string) error - Unset(application string, options []string) error -} - -func (c *setCommand) getServiceAPI() (serviceAPI, error) { - if c.serviceApi != nil { - return c.serviceApi, nil - } - root, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return application.NewClient(root), nil -} - -// Run updates the configuration of an application. -func (c *setCommand) Run(ctx *cmd.Context) error { - apiclient, err := c.getServiceAPI() - if err != nil { - return err - } - defer apiclient.Close() - - if c.SettingsYAML.Path != "" { - b, err := c.SettingsYAML.Read(ctx) - if err != nil { - return err - } - return block.ProcessBlockedError(apiclient.Update(params.ApplicationUpdate{ - ApplicationName: c.ApplicationName, - SettingsYAML: string(b), - }), block.BlockChange) - } else if c.SetDefault { - return block.ProcessBlockedError(apiclient.Unset(c.ApplicationName, c.Options), block.BlockChange) - } else if len(c.SettingsStrings) == 0 { - return nil - } - settings := map[string]string{} - for k, v := range c.SettingsStrings { - //empty string is also valid as a setting value - if v == "" { - settings[k] = v - continue - } - - if v[0] != '@' { - if !utf8.ValidString(v) { - return fmt.Errorf("value for option %q contains non-UTF-8 sequences", k) - } - settings[k] = v - continue - } - nv, err := readValue(ctx, v[1:]) - if err != nil { - return err - } - if !utf8.ValidString(nv) { - return fmt.Errorf("value for option %q contains non-UTF-8 sequences", k) - } - settings[k] = nv - } - - result, err := apiclient.Get(c.ApplicationName) - if err != nil { - return err - } - - for k, v := range settings { - configValue := result.Config[k] - - configValueMap, ok := configValue.(map[string]interface{}) - if ok { - // convert the value to string and compare - if fmt.Sprintf("%v", configValueMap["value"]) == v { - logger.Warningf("the configuration setting %q already has the value %q", k, v) - } - } - } - - return block.ProcessBlockedError(apiclient.Set(c.ApplicationName, settings), block.BlockChange) -} - -// readValue reads the value of an option out of the named file. -// An empty content is valid, like in parsing the options. The upper -// size is 5M. -func readValue(ctx *cmd.Context, filename string) (string, error) { - absFilename := ctx.AbsPath(filename) - fi, err := os.Stat(absFilename) - if err != nil { - return "", fmt.Errorf("cannot read option from file %q: %v", filename, err) - } - if fi.Size() > maxValueSize { - return "", fmt.Errorf("size of option file is larger than 5M") - } - content, err := ioutil.ReadFile(ctx.AbsPath(filename)) - if err != nil { - return "", fmt.Errorf("cannot read option from file %q: %v", filename, err) - } - return string(content), nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/set_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/set_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/set_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/set_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,234 +0,0 @@ -// Copyright 2012-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package application_test - -import ( - "bytes" - "io/ioutil" - "os" - "strings" - "unicode/utf8" - - "github.com/juju/cmd" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/cmd/juju/application" - coretesting "github.com/juju/juju/testing" -) - -type SetSuite struct { - coretesting.FakeJujuXDGDataHomeSuite - dir string - fakeServiceAPI *fakeServiceAPI -} - -var _ = gc.Suite(&SetSuite{}) - -var ( - validSetTestValue = "a value with spaces\nand newline\nand UTF-8 characters: \U0001F604 / \U0001F44D" - invalidSetTestValue = "a value with an invalid UTF-8 sequence: " + string([]byte{0xFF, 0xFF}) - yamlConfigValue = "dummy-application:\n skill-level: 9000\n username: admin001\n\n" -) - -func (s *SetSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.fakeServiceAPI = &fakeServiceAPI{serviceName: "dummy-application"} - - s.dir = c.MkDir() - c.Assert(utf8.ValidString(validSetTestValue), jc.IsTrue) - c.Assert(utf8.ValidString(invalidSetTestValue), jc.IsFalse) - setupValueFile(c, s.dir, "valid.txt", validSetTestValue) - setupValueFile(c, s.dir, "invalid.txt", invalidSetTestValue) - setupBigFile(c, s.dir) - setupConfigFile(c, s.dir) -} - -func (s *SetSuite) TestSetCommandInit(c *gc.C) { - // missing args - err := coretesting.InitCommand(application.NewSetCommandForTest(s.fakeServiceAPI), []string{}) - c.Assert(err, gc.ErrorMatches, "no application name specified") - - // missing application name - err = coretesting.InitCommand(application.NewSetCommandForTest(s.fakeServiceAPI), []string{"name=foo"}) - c.Assert(err, gc.ErrorMatches, "no application name specified") - - // --config path, but no application - err = coretesting.InitCommand(application.NewSetCommandForTest(s.fakeServiceAPI), []string{"--config", "testconfig.yaml"}) - c.Assert(err, gc.ErrorMatches, "no application name specified") - - // --config and options specified - err = coretesting.InitCommand(application.NewSetCommandForTest(s.fakeServiceAPI), []string{"application", "--config", "testconfig.yaml", "bees="}) - c.Assert(err, gc.ErrorMatches, "cannot specify --config when using key=value arguments") - - // --to-default and no config name provided - err = coretesting.InitCommand(application.NewSetCommandForTest(s.fakeServiceAPI), []string{"application", "--to-default"}) - c.Assert(err, gc.ErrorMatches, "no configuration options specified") - -} - -func (s *SetSuite) TestSetOptionSuccess(c *gc.C) { - s.assertSetSuccess(c, s.dir, []string{ - "username=hello", - "outlook=hello@world.tld", - }, map[string]interface{}{ - "username": "hello", - "outlook": "hello@world.tld", - }) - s.assertSetSuccess(c, s.dir, []string{ - "username=hello=foo", - }, map[string]interface{}{ - "username": "hello=foo", - "outlook": "hello@world.tld", - }) - s.assertSetSuccess(c, s.dir, []string{ - "username=@valid.txt", - }, map[string]interface{}{ - "username": validSetTestValue, - "outlook": "hello@world.tld", - }) - s.assertSetSuccess(c, s.dir, []string{ - "username=", - }, map[string]interface{}{ - "username": "", - "outlook": "hello@world.tld", - }) -} - -func (s *SetSuite) TestSetSameValue(c *gc.C) { - s.assertSetSuccess(c, s.dir, []string{ - "username=hello", - "outlook=hello@world.tld", - }, map[string]interface{}{ - "username": "hello", - "outlook": "hello@world.tld", - }) - s.assertSetWarning(c, s.dir, []string{ - "username=hello", - }, "the configuration setting \"username\" already has the value \"hello\"") - s.assertSetWarning(c, s.dir, []string{ - "outlook=hello@world.tld", - }, "the configuration setting \"outlook\" already has the value \"hello@world.tld\"") - -} - -func (s *SetSuite) TestSetOptionFail(c *gc.C) { - s.assertSetFail(c, s.dir, []string{"foo", "bar"}, "error: expected \"key=value\", got \"foo\"\n") - s.assertSetFail(c, s.dir, []string{"=bar"}, "error: expected \"key=value\", got \"=bar\"\n") - s.assertSetFail(c, s.dir, []string{ - "username=@missing.txt", - }, "error: cannot read option from file \"missing.txt\": .* "+utils.NoSuchFileErrRegexp+"\n") - s.assertSetFail(c, s.dir, []string{ - "username=@big.txt", - }, "error: size of option file is larger than 5M\n") - s.assertSetFail(c, s.dir, []string{ - "username=@invalid.txt", - }, "error: value for option \"username\" contains non-UTF-8 sequences\n") -} - -func (s *SetSuite) TestSetConfig(c *gc.C) { - s.assertSetFail(c, s.dir, []string{ - "--config", - "missing.yaml", - }, "error.* "+utils.NoSuchFileErrRegexp+"\n") - - ctx := coretesting.ContextForDir(c, s.dir) - code := cmd.Main(application.NewSetCommandForTest(s.fakeServiceAPI), ctx, []string{ - "dummy-application", - "--config", - "testconfig.yaml"}) - - c.Check(code, gc.Equals, 0) - c.Check(s.fakeServiceAPI.config, gc.Equals, yamlConfigValue) -} - -func (s *SetSuite) TestSetConfigToDefault(c *gc.C) { - s.fakeServiceAPI = &fakeServiceAPI{serviceName: "dummy-application", values: map[string]interface{}{ - "username": "hello", - }} - s.assertSetSuccess(c, s.dir, []string{ - "--to-default", - "username", - }, make(map[string]interface{})) -} - -func (s *SetSuite) TestBlockSetConfig(c *gc.C) { - // Block operation - s.fakeServiceAPI.err = common.OperationBlockedError("TestBlockSetConfig") - ctx := coretesting.ContextForDir(c, s.dir) - code := cmd.Main(application.NewSetCommandForTest(s.fakeServiceAPI), ctx, []string{ - "dummy-application", - "--config", - "testconfig.yaml"}) - c.Check(code, gc.Equals, 1) - // msg is logged - stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Check(stripped, gc.Matches, ".*TestBlockSetConfig.*") -} - -// assertSetSuccess sets configuration options and checks the expected settings. -func (s *SetSuite) assertSetSuccess(c *gc.C, dir string, args []string, expect map[string]interface{}) { - ctx := coretesting.ContextForDir(c, dir) - code := cmd.Main(application.NewSetCommandForTest(s.fakeServiceAPI), ctx, append([]string{"dummy-application"}, args...)) - c.Assert(code, gc.Equals, 0) -} - -// assertSetFail sets configuration options and checks the expected error. -func (s *SetSuite) assertSetFail(c *gc.C, dir string, args []string, err string) { - ctx := coretesting.ContextForDir(c, dir) - code := cmd.Main(application.NewSetCommandForTest(s.fakeServiceAPI), ctx, append([]string{"dummy-application"}, args...)) - c.Check(code, gc.Not(gc.Equals), 0) - c.Assert(ctx.Stderr.(*bytes.Buffer).String(), gc.Matches, err) -} - -func (s *SetSuite) assertSetWarning(c *gc.C, dir string, args []string, w string) { - ctx := coretesting.ContextForDir(c, dir) - code := cmd.Main(application.NewSetCommandForTest(s.fakeServiceAPI), ctx, append([]string{"dummy-application"}, args...)) - c.Check(code, gc.Equals, 0) - - c.Assert(strings.Replace(c.GetTestLog(), "\n", " ", -1), gc.Matches, ".*WARNING.*"+w+".*") -} - -// setupValueFile creates a file containing one value for testing -// set with name=@filename. -func setupValueFile(c *gc.C, dir, filename, value string) string { - ctx := coretesting.ContextForDir(c, dir) - path := ctx.AbsPath(filename) - content := []byte(value) - err := ioutil.WriteFile(path, content, 0666) - c.Assert(err, jc.ErrorIsNil) - return path -} - -// setupBigFile creates a too big file for testing -// set with name=@filename. -func setupBigFile(c *gc.C, dir string) string { - ctx := coretesting.ContextForDir(c, dir) - path := ctx.AbsPath("big.txt") - file, err := os.Create(path) - c.Assert(err, jc.ErrorIsNil) - defer file.Close() - chunk := make([]byte, 1024) - for i := 0; i < cap(chunk); i++ { - chunk[i] = byte(i % 256) - } - for i := 0; i < 6000; i++ { - _, err = file.Write(chunk) - c.Assert(err, jc.ErrorIsNil) - } - return path -} - -// setupConfigFile creates a configuration file for testing set -// with the --config argument specifying a configuration file. -func setupConfigFile(c *gc.C, dir string) string { - ctx := coretesting.ContextForDir(c, dir) - path := ctx.AbsPath("testconfig.yaml") - content := []byte(yamlConfigValue) - err := ioutil.WriteFile(path, content, 0666) - c.Assert(err, jc.ErrorIsNil) - return path -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/store.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/store.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/store.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/store.go 2016-10-13 14:31:49.000000000 +0000 @@ -1,6 +1,9 @@ // Copyright 2012, 2013 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. +// TODO(natefinch): change the code in this file to use the +// github.com/juju/juju/charmstore package to interact with the charmstore. + package application import ( @@ -10,13 +13,11 @@ "github.com/juju/errors" "gopkg.in/juju/charm.v6-unstable" - "gopkg.in/juju/charmrepo.v2-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient" csparams "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" - "github.com/juju/juju/api" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/environs/config" ) @@ -55,63 +56,52 @@ return false } -// charmURLResolver holds the information necessary to -// resolve charm and bundle URLs. -type charmURLResolver struct { - // store holds the repository to use for charmstore charms. - store *charmrepo.CharmStore - - // conf holds the current model configuration. - conf *config.Config -} - -func newCharmURLResolver(conf *config.Config, csClient *csclient.Client) *charmURLResolver { - r := &charmURLResolver{ - store: charmrepo.NewCharmStoreFromClient(csClient), - conf: conf, - } - return r -} - // TODO(ericsnow) Return charmstore.CharmID from resolve()? -// resolve resolves the given given charm or bundle URL string by looking it up -// in the charm store. The given csParams will be used to access the charm -// store. -// -// It returns the fully resolved URL, the channel, any series supported by the -// entity, and the store that holds it. -func (r *charmURLResolver) resolve(url *charm.URL) (*charm.URL, csparams.Channel, []string, *charmrepo.CharmStore, error) { +// ResolveCharmFunc is the type of a function that resolves a charm URL. +type ResolveCharmFunc func( + resolveWithChannel func(*charm.URL) (*charm.URL, csparams.Channel, []string, error), + conf *config.Config, + url *charm.URL, +) (*charm.URL, csparams.Channel, []string, error) + +func resolveCharm( + resolveWithChannel func(*charm.URL) (*charm.URL, csparams.Channel, []string, error), + conf *config.Config, + url *charm.URL, +) (*charm.URL, csparams.Channel, []string, error) { if url.Schema != "cs" { - return nil, csparams.NoChannel, nil, nil, errors.Errorf("unknown schema for charm URL %q", url) + return nil, csparams.NoChannel, nil, errors.Errorf("unknown schema for charm URL %q", url) } // If the user hasn't explicitly asked for a particular series, // query for the charm that matches the model's default series. // If this fails, we'll fall back to asking for whatever charm is available. defaultedSeries := false if url.Series == "" { - if s, ok := r.conf.DefaultSeries(); ok { + if s, ok := conf.DefaultSeries(); ok { defaultedSeries = true + // TODO(katco): Don't update the value passed in. Not only + // is there no indication that this method will do so, we + // return a charm.URL which signals to the developer that + // we don't modify the original. url.Series = s } } - charmStore := config.SpecializeCharmRepo(r.store, r.conf).(*charmrepo.CharmStore) - - resultUrl, channel, supportedSeries, err := charmStore.ResolveWithChannel(url) + resultURL, channel, supportedSeries, err := resolveWithChannel(url) if defaultedSeries && errors.Cause(err) == csparams.ErrNotFound { // we tried to use the model's default the series, but the store said it doesn't exist. // retry without the defaulted series, to take what we can get. url.Series = "" - resultUrl, channel, supportedSeries, err = charmStore.ResolveWithChannel(url) + resultURL, channel, supportedSeries, err = resolveWithChannel(url) } if err != nil { - return nil, csparams.NoChannel, nil, nil, errors.Trace(err) + return nil, csparams.NoChannel, nil, errors.Trace(err) } - if resultUrl.Series != "" && len(supportedSeries) == 0 { - supportedSeries = []string{resultUrl.Series} + if resultURL.Series != "" && len(supportedSeries) == 0 { + supportedSeries = []string{resultURL.Series} } - return resultUrl, channel, supportedSeries, charmStore, nil + return resultURL, channel, supportedSeries, nil } // TODO(ericsnow) Return charmstore.CharmID from addCharmFromURL()? @@ -120,13 +110,13 @@ // given charm URL to state. For non-public charm URLs, this function also // handles the macaroon authorization process using the given csClient. // The resulting charm URL of the added charm is displayed on stdout. -func addCharmFromURL(client *api.Client, curl *charm.URL, channel csparams.Channel, csClient *csclient.Client) (*charm.URL, *macaroon.Macaroon, error) { +func addCharmFromURL(client CharmAdder, curl *charm.URL, channel csparams.Channel) (*charm.URL, *macaroon.Macaroon, error) { var csMac *macaroon.Macaroon if err := client.AddCharm(curl, channel); err != nil { if !params.IsCodeUnauthorized(err) { return nil, nil, errors.Trace(err) } - m, err := authorizeCharmStoreEntity(csClient, curl) + m, err := client.AuthorizeCharmstoreEntity(curl) if err != nil { return nil, nil, maybeTermsAgreementError(err) } @@ -146,9 +136,6 @@ }) } -// TODO(natefinch): change the code in this file to use the -// github.com/juju/juju/charmstore package to interact with the charmstore. - // authorizeCharmStoreEntity acquires and return the charm store delegatable macaroon to be // used to add the charm corresponding to the given URL. // The macaroon is properly attenuated so that it can only be used to deploy diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/unexpose_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/unexpose_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/unexpose_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/unexpose_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,6 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" - "github.com/juju/juju/cmd/juju/common" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/rpc" "github.com/juju/juju/testcharms" @@ -18,12 +17,12 @@ type UnexposeSuite struct { jujutesting.RepoSuite - common.CmdBlockHelper + testing.CmdBlockHelper } func (s *UnexposeSuite) SetUpTest(c *gc.C) { s.RepoSuite.SetUpTest(c) - s.CmdBlockHelper = common.NewCmdBlockHelper(s.APIState) + s.CmdBlockHelper = testing.NewCmdBlockHelper(s.APIState) c.Assert(s.CmdBlockHelper, gc.NotNil) s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/upgradecharm.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/upgradecharm.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/upgradecharm.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/upgradecharm.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,50 +6,119 @@ import ( "fmt" "os" - "path/filepath" "strings" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/charm.v6-unstable" charmresource "gopkg.in/juju/charm.v6-unstable/resource" "gopkg.in/juju/charmrepo.v2-unstable" csclientparams "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" - "launchpad.net/gnuflag" "github.com/juju/juju/api" "github.com/juju/juju/api/application" + "github.com/juju/juju/api/base" "github.com/juju/juju/api/charms" "github.com/juju/juju/api/modelconfig" "github.com/juju/juju/charmstore" "github.com/juju/juju/cmd/juju/block" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/environs/config" "github.com/juju/juju/resource" "github.com/juju/juju/resource/resourceadapters" + "github.com/juju/juju/storage" ) // NewUpgradeCharmCommand returns a command which upgrades application's charm. func NewUpgradeCharmCommand() cmd.Command { - return modelcmd.Wrap(&upgradeCharmCommand{}) -} + cmd := &upgradeCharmCommand{ + DeployResources: resourceadapters.DeployResources, + ResolveCharm: resolveCharm, + NewCharmAdder: newCharmAdder, + NewCharmClient: func(conn api.Connection) CharmClient { + return charms.NewClient(conn) + }, + NewCharmUpgradeClient: func(conn api.Connection) CharmUpgradeClient { + return application.NewClient(conn) + }, + NewModelConfigGetter: func(conn api.Connection) ModelConfigGetter { + return modelconfig.NewClient(conn) + }, + NewResourceLister: func(conn api.Connection) (ResourceLister, error) { + resclient, err := resourceadapters.NewAPIClient(conn) + if err != nil { + return nil, err + } + return resclient, nil + }, + } + return modelcmd.Wrap(cmd) +} + +// CharmUpgradeClient defines a subset of the application facade, as required +// by the upgrade-charm command. +type CharmUpgradeClient interface { + GetCharmURL(string) (*charm.URL, error) + SetCharm(application.SetCharmConfig) error +} + +// CharmClient defines a subset of the charms facade, as required +// by the upgrade-charm command. +type CharmClient interface { + CharmInfo(string) (*charms.CharmInfo, error) +} + +// ResourceLister defines a subset of the resources facade, as required +// by the upgrade-charm command. +type ResourceLister interface { + ListResources([]string) ([]resource.ServiceResources, error) +} + +// NewCharmAdderFunc is the type of a function used to construct +// a new CharmAdder. +type NewCharmAdderFunc func( + api.Connection, + *httpbakery.Client, + csclientparams.Channel, +) CharmAdder // UpgradeCharm is responsible for upgrading an application's charm. type upgradeCharmCommand struct { modelcmd.ModelCommandBase + + DeployResources resourceadapters.DeployResourcesFunc + ResolveCharm ResolveCharmFunc + NewCharmAdder NewCharmAdderFunc + NewCharmClient func(api.Connection) CharmClient + NewCharmUpgradeClient func(api.Connection) CharmUpgradeClient + NewModelConfigGetter func(api.Connection) ModelConfigGetter + NewResourceLister func(api.Connection) (ResourceLister, error) + ApplicationName string ForceUnits bool ForceSeries bool SwitchURL string CharmPath string Revision int // defaults to -1 (latest) + // Resources is a map of resource name to filename to be uploaded on upgrade. Resources map[string]string // Channel holds the charmstore channel to use when obtaining // the charm to be upgraded to. Channel csclientparams.Channel + + // Config is a config file variable, pointing at a YAML file containing + // the application config to update. + Config cmd.FileVar + + // Storage is a map of storage constraints, keyed on the storage name + // defined in charm storage metadata, to add or update during upgrade. + Storage map[string]storage.Constraints } const upgradeCharmDoc = ` @@ -79,6 +148,18 @@ Where bar and baz are resources named in the metadata for the foo charm. +Storage constraints may be added or updated at upgrade time by specifying +the --storage flag, with the same format as specified in "juju deploy". +If new required storage is added by the new charm revision, then you must +specify constraints or the defaults will be applied. + + juju upgrade-charm foo --storage cache=ssd,10G + +Charm settings may be added or updated at upgrade time by specifying the +--config flag, pointing to a YAML-encoded application config file. + + juju upgrade-charm foo --config config.yaml + If the new version of a charm does not explicitly support the application's series, the upgrade is disallowed unless the --force-series flag is used. This option should be used with caution since using a charm on a machine running an unsupported series may @@ -123,6 +204,7 @@ } func (c *upgradeCharmCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) f.BoolVar(&c.ForceUnits, "force-units", false, "Upgrade all units immediately, even if in error state") f.StringVar((*string)(&c.Channel), "channel", "", "Channel to use when getting the charm or bundle from the charm store") f.BoolVar(&c.ForceSeries, "force-series", false, "Upgrade even if series of deployed applications are not supported by the new charm") @@ -130,66 +212,63 @@ f.StringVar(&c.CharmPath, "path", "", "Upgrade to a charm located at path") f.IntVar(&c.Revision, "revision", -1, "Explicit revision of current charm") f.Var(stringMap{&c.Resources}, "resource", "Resource to be uploaded to the controller") + f.Var(storageFlag{&c.Storage, nil}, "storage", "Charm storage constraints") + f.Var(&c.Config, "config", "Path to yaml-formatted application config") } func (c *upgradeCharmCommand) Init(args []string) error { switch len(args) { case 1: if !names.IsValidApplication(args[0]) { - return fmt.Errorf("invalid application name %q", args[0]) + return errors.Errorf("invalid application name %q", args[0]) } c.ApplicationName = args[0] case 0: - return fmt.Errorf("no application specified") + return errors.Errorf("no application specified") default: return cmd.CheckEmpty(args[1:]) } if c.SwitchURL != "" && c.Revision != -1 { - return fmt.Errorf("--switch and --revision are mutually exclusive") + return errors.Errorf("--switch and --revision are mutually exclusive") } if c.CharmPath != "" && c.Revision != -1 { - return fmt.Errorf("--path and --revision are mutually exclusive") + return errors.Errorf("--path and --revision are mutually exclusive") } if c.SwitchURL != "" && c.CharmPath != "" { - return fmt.Errorf("--switch and --path are mutually exclusive") + return errors.Errorf("--switch and --path are mutually exclusive") } return nil } -func (c *upgradeCharmCommand) newServiceAPIClient() (*application.Client, error) { - root, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return application.NewClient(root), nil -} - -func (c *upgradeCharmCommand) newModelConfigAPIClient() (*modelconfig.Client, error) { - root, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return modelconfig.NewClient(root), nil -} - // Run connects to the specified environment and starts the charm // upgrade process. func (c *upgradeCharmCommand) Run(ctx *cmd.Context) error { - client, err := c.NewAPIClient() + apiRoot, err := c.NewAPIRoot() if err != nil { - return err + return errors.Trace(err) } - defer client.Close() + defer apiRoot.Close() - serviceClient, err := c.newServiceAPIClient() - if err != nil { - return err + // If the user has specified config or storage constraints, + // make sure the server has facade version 2 at a minimum. + if c.Config.Path != "" || len(c.Storage) > 0 { + action := "updating config" + if c.Config.Path == "" { + action = "updating storage constraints" + } + if apiRoot.BestFacadeVersion("Application") < 2 { + suffix := "this server" + if version, ok := apiRoot.ServerVersion(); ok { + suffix = fmt.Sprintf("server version %s", version) + } + return errors.New(action + " at upgrade-charm time is not supported by " + suffix) + } } - defer serviceClient.Close() - oldURL, err := serviceClient.GetCharmURL(c.ApplicationName) + charmUpgradeClient := c.NewCharmUpgradeClient(apiRoot) + oldURL, err := charmUpgradeClient.GetCharmURL(c.ApplicationName) if err != nil { - return err + return errors.Trace(err) } newRef := c.SwitchURL @@ -206,53 +285,85 @@ newRef = oldURL.WithRevision(c.Revision).String() } - bakeryClient, err := c.BakeryClient() + // First, ensure the charm is added to the model. + modelConfigGetter := c.NewModelConfigGetter(apiRoot) + modelConfig, err := getModelConfig(modelConfigGetter) if err != nil { return errors.Trace(err) } - csClient := newCharmStoreClient(bakeryClient).WithChannel(c.Channel) - - modelConfigClient, err := c.newModelConfigAPIClient() - if err != nil { - return err - } - defer modelConfigClient.Close() - conf, err := getModelConfig(modelConfigClient) + bakeryClient, err := c.BakeryClient() if err != nil { return errors.Trace(err) } - resolver := newCharmURLResolver(conf, csClient) - chID, csMac, err := c.addCharm(oldURL, newRef, client, resolver) - if err != nil { - if err1, ok := errors.Cause(err).(*termsRequiredError); ok { - terms := strings.Join(err1.Terms, " ") - return errors.Errorf(`Declined: please agree to the following terms %s. Try: "juju agree %s"`, terms, terms) + charmAdder := c.NewCharmAdder(apiRoot, bakeryClient, c.Channel) + charmRepo := c.getCharmStore(bakeryClient, modelConfig) + chID, csMac, err := c.addCharm(charmAdder, charmRepo, modelConfig, oldURL, newRef) + if err != nil { + if termsErr, ok := errors.Cause(err).(*termsRequiredError); ok { + terms := strings.Join(termsErr.Terms, " ") + return errors.Wrap( + termsErr, + errors.Errorf( + `Declined: please agree to the following terms %s. Try: "juju agree %[1]s"`, + terms, + ), + ) } return block.ProcessBlockedError(err, block.BlockChange) } ctx.Infof("Added charm %q to the model.", chID.URL) - ids, err := c.upgradeResources(client, chID, csMac) + // Next, upgrade resources. + charmsClient := c.NewCharmClient(apiRoot) + resourceLister, err := c.NewResourceLister(apiRoot) + if err != nil { + return errors.Trace(err) + } + ids, err := c.upgradeResources(apiRoot, charmsClient, resourceLister, chID, csMac) if err != nil { return errors.Trace(err) } + // Finally, upgrade the application. + var configYAML []byte + if c.Config.Path != "" { + configYAML, err = c.Config.Read(ctx) + if err != nil { + return errors.Trace(err) + } + } cfg := application.SetCharmConfig{ - ApplicationName: c.ApplicationName, - CharmID: chID, - ForceSeries: c.ForceSeries, - ForceUnits: c.ForceUnits, - ResourceIDs: ids, + ApplicationName: c.ApplicationName, + CharmID: chID, + ConfigSettingsYAML: string(configYAML), + ForceSeries: c.ForceSeries, + ForceUnits: c.ForceUnits, + ResourceIDs: ids, + StorageConstraints: c.Storage, } - - return block.ProcessBlockedError(serviceClient.SetCharm(cfg), block.BlockChange) + return block.ProcessBlockedError(charmUpgradeClient.SetCharm(cfg), block.BlockChange) } // upgradeResources pushes metadata up to the server for each resource defined // in the new charm's metadata and returns a map of resource names to pending // IDs to include in the upgrage-charm call. -func (c *upgradeCharmCommand) upgradeResources(client *api.Client, chID charmstore.CharmID, csMac *macaroon.Macaroon) (map[string]string, error) { - filtered, err := getUpgradeResources(c, c.ApplicationName, chID.URL, client, c.Resources) +// +// TODO(axw) apiRoot is passed in here because DeloyResources requires it, +// DeployResources should accept a resource-specific client instead. +func (c *upgradeCharmCommand) upgradeResources( + apiRoot base.APICallCloser, + charmsClient CharmClient, + resourceLister ResourceLister, + chID charmstore.CharmID, + csMac *macaroon.Macaroon, +) (map[string]string, error) { + filtered, err := getUpgradeResources( + charmsClient, + resourceLister, + c.ApplicationName, + chID.URL, + c.Resources, + ) if err != nil { return nil, errors.Trace(err) } @@ -262,18 +373,25 @@ // Note: the validity of user-supplied resources to be uploaded will be // checked further down the stack. - return handleResources(c, c.Resources, c.ApplicationName, chID, csMac, filtered) -} - -// TODO(ericsnow) Move these helpers into handleResources()? - -func getUpgradeResources(c APICmd, serviceID string, cURL *charm.URL, client *api.Client, cliResources map[string]string) (map[string]charmresource.Meta, error) { - root, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - charmsClient := charms.NewClient(root) - meta, err := getMetaResources(cURL, charmsClient) + ids, err := c.DeployResources( + c.ApplicationName, + chID, + csMac, + c.Resources, + filtered, + apiRoot, + ) + return ids, errors.Trace(err) +} + +func getUpgradeResources( + charmsClient CharmClient, + resourceLister ResourceLister, + serviceID string, + charmURL *charm.URL, + cliResources map[string]string, +) (map[string]charmresource.Meta, error) { + meta, err := getMetaResources(charmURL, charmsClient) if err != nil { return nil, errors.Trace(err) } @@ -281,7 +399,7 @@ return nil, nil } - current, err := getResources(serviceID, c.NewAPIRoot) + current, err := getResources(serviceID, resourceLister) if err != nil { return nil, errors.Trace(err) } @@ -289,33 +407,27 @@ return filtered, nil } -func getMetaResources(cURL *charm.URL, client *charms.Client) (map[string]charmresource.Meta, error) { - // this gets the charm info that was added to the controller using addcharm. - charmInfo, err := client.CharmInfo(cURL.String()) +func getMetaResources(charmURL *charm.URL, client CharmClient) (map[string]charmresource.Meta, error) { + charmInfo, err := client.CharmInfo(charmURL.String()) if err != nil { return nil, errors.Trace(err) } return charmInfo.Meta.Resources, nil } -func getResources(serviceID string, newAPIRoot func() (api.Connection, error)) (map[string]resource.Resource, error) { - resclient, err := resourceadapters.NewAPIClient(newAPIRoot) - if err != nil { - return nil, errors.Trace(err) - } - svcs, err := resclient.ListResources([]string{serviceID}) +func getResources(serviceID string, resourceLister ResourceLister) (map[string]resource.Resource, error) { + svcs, err := resourceLister.ListResources([]string{serviceID}) if err != nil { return nil, errors.Trace(err) } - // ListResources guarantees a number of values returned == number of - // services passed in. return resource.AsMap(svcs[0].Resources), nil } -// TODO(ericsnow) Move filterResources() and shouldUploadMeta() -// somewhere more general under the "resource" package? - -func filterResources(meta map[string]charmresource.Meta, current map[string]resource.Resource, uploads map[string]string) map[string]charmresource.Meta { +func filterResources( + meta map[string]charmresource.Meta, + current map[string]resource.Resource, + uploads map[string]string, +) map[string]charmresource.Meta { filtered := make(map[string]charmresource.Meta) for name, res := range meta { if shouldUpgradeResource(res, uploads, current) { @@ -348,23 +460,55 @@ return true } -// addCharm interprets the new charmRef and adds the specified charm if the new charm is different -// to what's already deployed as specified by oldURL. +func newCharmAdder( + api api.Connection, + bakeryClient *httpbakery.Client, + channel csclientparams.Channel, +) CharmAdder { + csClient := newCharmStoreClient(bakeryClient).WithChannel(channel) + + // TODO(katco): This anonymous adapter should go away in favor of + // a comprehensive API passed into the upgrade-charm command. + charmstoreAdapter := &struct { + *charmstoreClient + *apiClient + }{ + charmstoreClient: &charmstoreClient{Client: csClient}, + apiClient: &apiClient{Client: api.Client()}, + } + return charmstoreAdapter +} + +func (c *upgradeCharmCommand) getCharmStore( + bakeryClient *httpbakery.Client, + modelConfig *config.Config, +) *charmrepo.CharmStore { + csClient := newCharmStoreClient(bakeryClient).WithChannel(c.Channel) + return config.SpecializeCharmRepo( + charmrepo.NewCharmStoreFromClient(csClient), + modelConfig, + ).(*charmrepo.CharmStore) +} + +// addCharm interprets the new charmRef and adds the specified charm if +// the new charm is different to what's already deployed as specified by +// oldURL. func (c *upgradeCharmCommand) addCharm( + charmAdder CharmAdder, + charmRepo *charmrepo.CharmStore, + config *config.Config, oldURL *charm.URL, charmRef string, - client *api.Client, - resolver *charmURLResolver, ) (charmstore.CharmID, *macaroon.Macaroon, error) { var id charmstore.CharmID // Charm may have been supplied via a path reference. ch, newURL, err := charmrepo.NewCharmAtPathForceSeries(charmRef, oldURL.Series, c.ForceSeries) if err == nil { - _, newName := filepath.Split(charmRef) + newName := ch.Meta().Name if newName != oldURL.Name { - return id, nil, fmt.Errorf("cannot upgrade %q to %q", oldURL.Name, newName) + return id, nil, errors.Errorf("cannot upgrade %q to %q", oldURL.Name, newName) } - addedURL, err := client.AddLocalCharm(newURL, ch) + addedURL, err := charmAdder.AddLocalCharm(newURL, ch) id.URL = addedURL return id, nil, err } @@ -383,7 +527,7 @@ } // Charm has been supplied as a URL so we resolve and deploy using the store. - newURL, channel, supportedSeries, store, err := resolver.resolve(refURL) + newURL, channel, supportedSeries, err := c.ResolveCharm(charmRepo.ResolveWithChannel, config, refURL) if err != nil { return id, nil, errors.Trace(err) } @@ -402,15 +546,15 @@ // or Revision flags, discover the latest. if *newURL == *oldURL { if refURL.Revision != -1 { - return id, nil, fmt.Errorf("already running specified charm %q", newURL) + return id, nil, errors.Errorf("already running specified charm %q", newURL) } // No point in trying to upgrade a charm store charm when // we just determined that's the latest revision // available. - return id, nil, fmt.Errorf("already running latest charm %q", newURL) + return id, nil, errors.Errorf("already running latest charm %q", newURL) } - curl, csMac, err := addCharmFromURL(client, newURL, channel, store.Client()) + curl, csMac, err := addCharmFromURL(charmAdder, newURL, channel) if err != nil { return id, nil, errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/upgradecharm_resources_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/upgradecharm_resources_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/upgradecharm_resources_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/upgradecharm_resources_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -45,7 +45,7 @@ s.RepoSuite.SetUpTest(c) chPath := testcharms.Repo.ClonedDirPath(s.CharmsPath, "riak") - _, err := testing.RunCommand(c, application.NewDeployCommand(), chPath, "riak", "--series", "quantal") + _, err := testing.RunCommand(c, application.NewDefaultDeployCommand(), chPath, "riak", "--series", "quantal") c.Assert(err, jc.ErrorIsNil) riak, err := s.State.Application("riak") c.Assert(err, jc.ErrorIsNil) @@ -55,8 +55,9 @@ c.Assert(forced, jc.IsFalse) } -var riakResourceMeta = []byte(` -name: riakresource +func (s *UpgradeCharmResourceSuite) TestUpgradeWithResources(c *gc.C) { + const riakResourceMeta = ` +name: riak summary: "K/V storage engine" description: "Scalable K/V Store in Erlang with Clocks :-)" provides: @@ -72,11 +73,10 @@ type: file filename: foo.lib description: some comment -`) +` -func (s *UpgradeCharmResourceSuite) TestUpgradeWithResources(c *gc.C) { myriakPath := testcharms.Repo.ClonedDir(c.MkDir(), "riak") - err := ioutil.WriteFile(path.Join(myriakPath.Path, "metadata.yaml"), riakResourceMeta, 0644) + err := ioutil.WriteFile(path.Join(myriakPath.Path, "metadata.yaml"), []byte(riakResourceMeta), 0644) c.Assert(err, jc.ErrorIsNil) data := []byte("some-data") @@ -193,12 +193,12 @@ err := ioutil.WriteFile(resourceFile, []byte(resourceContent), 0644) c.Assert(err, jc.ErrorIsNil) - ctx, err := testing.RunCommand(c, application.NewDeployCommand(), "trusty/starsay", "--resource", "upload-resource="+resourceFile) + ctx, err := testing.RunCommand(c, application.NewDefaultDeployCommand(), "trusty/starsay", "--resource", "upload-resource="+resourceFile) c.Assert(err, jc.ErrorIsNil) output := testing.Stderr(ctx) - expectedOutput := `Added charm "cs:trusty/starsay-1" to the model. -Deploying charm "cs:trusty/starsay-1" with the user specified series "trusty". + expectedOutput := `Located charm "cs:trusty/starsay-1". +Deploying charm "cs:trusty/starsay-1". ` c.Assert(output, gc.Equals, expectedOutput) s.assertCharmsUploaded(c, "cs:trusty/starsay-1") @@ -266,7 +266,7 @@ }, ID: "starsay/upload-resource", ApplicationID: "starsay", - Username: "admin@local", + Username: "admin", // Timestamp is checked above }} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/upgradecharm_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/upgradecharm_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/application/upgradecharm_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/application/upgradecharm_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,36 +7,233 @@ "fmt" "io/ioutil" "net/http/httptest" + "os" "path" "path/filepath" + "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" - "github.com/juju/utils" + "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" "gopkg.in/juju/charmrepo.v2-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient" csclientparams "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" - "gopkg.in/juju/charmstore.v5-unstable" + charmstore "gopkg.in/juju/charmstore.v5-unstable" "gopkg.in/macaroon-bakery.v1/httpbakery" + macaroon "gopkg.in/macaroon.v1" - "github.com/juju/juju/cmd/juju/common" + "strings" + + "github.com/juju/juju/api" + "github.com/juju/juju/api/application" + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/charms" + jujucharmstore "github.com/juju/juju/charmstore" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/environs/config" jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/juju/resource/resourceadapters" "github.com/juju/juju/rpc" "github.com/juju/juju/state" + "github.com/juju/juju/storage" "github.com/juju/juju/testcharms" - "github.com/juju/juju/testing" + coretesting "github.com/juju/juju/testing" ) -type UpgradeCharmErrorsSuite struct { +type UpgradeCharmSuite struct { + testing.IsolationSuite + testing.Stub + + deployResources resourceadapters.DeployResourcesFunc + resolveCharm ResolveCharmFunc + resolvedCharmURL *charm.URL + apiConnection mockAPIConnection + charmAdder mockCharmAdder + charmClient mockCharmClient + charmUpgradeClient mockCharmUpgradeClient + modelConfigGetter mockModelConfigGetter + resourceLister mockResourceLister + cmd cmd.Command +} + +var _ = gc.Suite(&UpgradeCharmSuite{}) + +func (s *UpgradeCharmSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.Stub.ResetCalls() + + // Create persistent cookies in a temporary location. + cookieFile := filepath.Join(c.MkDir(), "cookies") + s.PatchEnvironment("JUJU_COOKIEFILE", cookieFile) + + s.deployResources = func( + applicationID string, + chID jujucharmstore.CharmID, + csMac *macaroon.Macaroon, + filesAndRevisions map[string]string, + resources map[string]charmresource.Meta, + conn base.APICallCloser, + ) (ids map[string]string, err error) { + s.AddCall("DeployResources", applicationID, chID, csMac, filesAndRevisions, resources, conn) + return nil, s.NextErr() + } + + s.resolveCharm = func( + resolveWithChannel func(*charm.URL) (*charm.URL, csclientparams.Channel, []string, error), + conf *config.Config, + url *charm.URL, + ) (*charm.URL, csclientparams.Channel, []string, error) { + s.AddCall("ResolveCharm", resolveWithChannel, conf, url) + if err := s.NextErr(); err != nil { + return nil, csclientparams.NoChannel, nil, err + } + return s.resolvedCharmURL, csclientparams.StableChannel, []string{"quantal"}, nil + } + + currentCharmURL := charm.MustParseURL("cs:quantal/foo-1") + latestCharmURL := charm.MustParseURL("cs:quantal/foo-2") + s.resolvedCharmURL = latestCharmURL + + s.apiConnection = mockAPIConnection{ + bestFacadeVersion: 2, + serverVersion: &version.Number{ + Major: 1, + Minor: 2, + Patch: 3, + }, + } + s.charmAdder = mockCharmAdder{} + s.charmClient = mockCharmClient{ + charmInfo: &charms.CharmInfo{ + Meta: &charm.Meta{}, + }, + } + s.charmUpgradeClient = mockCharmUpgradeClient{charmURL: currentCharmURL} + s.modelConfigGetter = mockModelConfigGetter{} + s.resourceLister = mockResourceLister{} + + store := jujuclienttesting.NewMemStore() + store.CurrentControllerName = "foo" + store.Controllers["foo"] = jujuclient.ControllerDetails{} + store.Models["foo"] = &jujuclient.ControllerModels{ + CurrentModel: "admin/bar", + Models: map[string]jujuclient.ModelDetails{"admin/bar": {}}, + } + apiOpener := modelcmd.OpenFunc(func(store jujuclient.ClientStore, controller, model string) (api.Connection, error) { + s.AddCall("OpenAPI", store, controller, model) + return &s.apiConnection, nil + }) + + s.cmd = NewUpgradeCharmCommandForTest( + store, + apiOpener, + s.deployResources, + s.resolveCharm, + func(conn api.Connection, bakeryClient *httpbakery.Client, channel csclientparams.Channel) CharmAdder { + s.AddCall("NewCharmAdder", conn, bakeryClient, channel) + s.PopNoErr() + return &s.charmAdder + }, + func(conn api.Connection) CharmClient { + s.AddCall("NewCharmClient", conn) + s.PopNoErr() + return &s.charmClient + }, + func(conn api.Connection) CharmUpgradeClient { + s.AddCall("NewCharmUpgradeClient", conn) + s.PopNoErr() + return &s.charmUpgradeClient + }, + func(conn api.Connection) ModelConfigGetter { + s.AddCall("NewModelConfigGetter", conn) + return &s.modelConfigGetter + }, + func(conn api.Connection) (ResourceLister, error) { + s.AddCall("NewResourceLister", conn) + return &s.resourceLister, s.NextErr() + }, + ) +} + +func (s *UpgradeCharmSuite) runUpgradeCharm(c *gc.C, args ...string) (*cmd.Context, error) { + return coretesting.RunCommand(c, s.cmd, args...) +} + +func (s *UpgradeCharmSuite) TestStorageConstraints(c *gc.C) { + _, err := s.runUpgradeCharm(c, "foo", "--storage", "bar=baz") + c.Assert(err, jc.ErrorIsNil) + s.charmUpgradeClient.CheckCallNames(c, "GetCharmURL", "SetCharm") + s.charmUpgradeClient.CheckCall(c, 1, "SetCharm", application.SetCharmConfig{ + ApplicationName: "foo", + CharmID: jujucharmstore.CharmID{ + URL: s.resolvedCharmURL, + Channel: csclientparams.StableChannel, + }, + StorageConstraints: map[string]storage.Constraints{ + "bar": {Pool: "baz", Count: 1}, + }, + }) +} + +func (s *UpgradeCharmSuite) TestStorageConstraintsMinFacadeVersion(c *gc.C) { + s.apiConnection.bestFacadeVersion = 1 + _, err := s.runUpgradeCharm(c, "foo", "--storage", "bar=baz") + c.Assert(err, gc.ErrorMatches, + "updating storage constraints at upgrade-charm time is not supported by server version 1.2.3") +} + +func (s *UpgradeCharmSuite) TestStorageConstraintsMinFacadeVersionNoServerVersion(c *gc.C) { + s.apiConnection.bestFacadeVersion = 1 + s.apiConnection.serverVersion = nil + _, err := s.runUpgradeCharm(c, "foo", "--storage", "bar=baz") + c.Assert(err, gc.ErrorMatches, + "updating storage constraints at upgrade-charm time is not supported by this server") +} + +func (s *UpgradeCharmSuite) TestConfigSettings(c *gc.C) { + tempdir := c.MkDir() + configFile := filepath.Join(tempdir, "config.yaml") + err := ioutil.WriteFile(configFile, []byte("foo:{}"), 0644) + c.Assert(err, jc.ErrorIsNil) + + _, err = s.runUpgradeCharm(c, "foo", "--config", configFile) + c.Assert(err, jc.ErrorIsNil) + s.charmUpgradeClient.CheckCallNames(c, "GetCharmURL", "SetCharm") + s.charmUpgradeClient.CheckCall(c, 1, "SetCharm", application.SetCharmConfig{ + ApplicationName: "foo", + CharmID: jujucharmstore.CharmID{ + URL: s.resolvedCharmURL, + Channel: csclientparams.StableChannel, + }, + ConfigSettingsYAML: "foo:{}", + }) +} + +func (s *UpgradeCharmSuite) TestConfigSettingsMinFacadeVersion(c *gc.C) { + tempdir := c.MkDir() + configFile := filepath.Join(tempdir, "config.yaml") + err := ioutil.WriteFile(configFile, []byte("foo:{}"), 0644) + c.Assert(err, jc.ErrorIsNil) + + s.apiConnection.bestFacadeVersion = 1 + _, err = s.runUpgradeCharm(c, "foo", "--config", configFile) + c.Assert(err, gc.ErrorMatches, + "updating config at upgrade-charm time is not supported by server version 1.2.3") +} + +type UpgradeCharmErrorsStateSuite struct { jujutesting.RepoSuite handler charmstore.HTTPCloseHandler srv *httptest.Server } -func (s *UpgradeCharmErrorsSuite) SetUpTest(c *gc.C) { +func (s *UpgradeCharmErrorsStateSuite) SetUpTest(c *gc.C) { s.RepoSuite.SetUpTest(c) // Set up the charm store testing server. handler, err := charmstore.NewServer(s.Session.DB("juju-testing"), nil, "", charmstore.ServerParams{ @@ -46,6 +243,10 @@ c.Assert(err, jc.ErrorIsNil) s.handler = handler s.srv = httptest.NewServer(handler) + s.AddCleanup(func(*gc.C) { + s.handler.Close() + s.srv.Close() + }) s.PatchValue(&charmrepo.CacheDir, c.MkDir()) s.PatchValue(&newCharmStoreClient, func(bakeryClient *httpbakery.Client) *csclient.Client { @@ -56,20 +257,14 @@ }) } -func (s *UpgradeCharmErrorsSuite) TearDownTest(c *gc.C) { - s.handler.Close() - s.srv.Close() - s.RepoSuite.TearDownTest(c) -} - -var _ = gc.Suite(&UpgradeCharmErrorsSuite{}) +var _ = gc.Suite(&UpgradeCharmErrorsStateSuite{}) func runUpgradeCharm(c *gc.C, args ...string) error { - _, err := testing.RunCommand(c, NewUpgradeCharmCommand(), args...) + _, err := coretesting.RunCommand(c, NewUpgradeCharmCommand(), args...) return err } -func (s *UpgradeCharmErrorsSuite) TestInvalidArgs(c *gc.C) { +func (s *UpgradeCharmErrorsStateSuite) TestInvalidArgs(c *gc.C) { err := runUpgradeCharm(c) c.Assert(err, gc.ErrorMatches, "no application specified") err = runUpgradeCharm(c, "invalid:name") @@ -78,7 +273,7 @@ c.Assert(err, gc.ErrorMatches, `unrecognized args: \["bar"\]`) } -func (s *UpgradeCharmErrorsSuite) TestInvalidService(c *gc.C) { +func (s *UpgradeCharmErrorsStateSuite) TestInvalidService(c *gc.C) { err := runUpgradeCharm(c, "phony") c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `application "phony" not found`, @@ -86,13 +281,13 @@ }) } -func (s *UpgradeCharmErrorsSuite) deployService(c *gc.C) { +func (s *UpgradeCharmErrorsStateSuite) deployService(c *gc.C) { ch := testcharms.Repo.ClonedDirPath(s.CharmsPath, "riak") err := runDeploy(c, ch, "riak", "--series", "quantal") c.Assert(err, jc.ErrorIsNil) } -func (s *UpgradeCharmErrorsSuite) TestInvalidSwitchURL(c *gc.C) { +func (s *UpgradeCharmErrorsStateSuite) TestInvalidSwitchURL(c *gc.C) { s.deployService(c) err := runUpgradeCharm(c, "riak", "--switch=blah") c.Assert(err, gc.ErrorMatches, `cannot resolve URL "cs:blah": charm or bundle not found`) @@ -101,47 +296,47 @@ // TODO(dimitern): add tests with incompatible charms } -func (s *UpgradeCharmErrorsSuite) TestNoPathFails(c *gc.C) { +func (s *UpgradeCharmErrorsStateSuite) TestNoPathFails(c *gc.C) { s.deployService(c) err := runUpgradeCharm(c, "riak") c.Assert(err, gc.ErrorMatches, "upgrading a local charm requires either --path or --switch") } -func (s *UpgradeCharmErrorsSuite) TestSwitchAndRevisionFails(c *gc.C) { +func (s *UpgradeCharmErrorsStateSuite) TestSwitchAndRevisionFails(c *gc.C) { s.deployService(c) err := runUpgradeCharm(c, "riak", "--switch=riak", "--revision=2") c.Assert(err, gc.ErrorMatches, "--switch and --revision are mutually exclusive") } -func (s *UpgradeCharmErrorsSuite) TestPathAndRevisionFails(c *gc.C) { +func (s *UpgradeCharmErrorsStateSuite) TestPathAndRevisionFails(c *gc.C) { s.deployService(c) err := runUpgradeCharm(c, "riak", "--path=foo", "--revision=2") c.Assert(err, gc.ErrorMatches, "--path and --revision are mutually exclusive") } -func (s *UpgradeCharmErrorsSuite) TestSwitchAndPathFails(c *gc.C) { +func (s *UpgradeCharmErrorsStateSuite) TestSwitchAndPathFails(c *gc.C) { s.deployService(c) err := runUpgradeCharm(c, "riak", "--switch=riak", "--path=foo") c.Assert(err, gc.ErrorMatches, "--switch and --path are mutually exclusive") } -func (s *UpgradeCharmErrorsSuite) TestInvalidRevision(c *gc.C) { +func (s *UpgradeCharmErrorsStateSuite) TestInvalidRevision(c *gc.C) { s.deployService(c) err := runUpgradeCharm(c, "riak", "--revision=blah") - c.Assert(err, gc.ErrorMatches, `invalid value "blah" for flag --revision: strconv.ParseInt: parsing "blah": invalid syntax`) + c.Assert(err, gc.ErrorMatches, `invalid value "blah" for flag --revision: strconv.(ParseInt|Atoi): parsing "blah": invalid syntax`) } -type BaseUpgradeCharmSuite struct{} +type BaseUpgradeCharmStateSuite struct{} -type UpgradeCharmSuccessSuite struct { - BaseUpgradeCharmSuite +type UpgradeCharmSuccessStateSuite struct { + BaseUpgradeCharmStateSuite jujutesting.RepoSuite - common.CmdBlockHelper + coretesting.CmdBlockHelper path string riak *state.Application } -func (s *BaseUpgradeCharmSuite) assertUpgraded(c *gc.C, riak *state.Application, revision int, forced bool) *charm.URL { +func (s *BaseUpgradeCharmStateSuite) assertUpgraded(c *gc.C, riak *state.Application, revision int, forced bool) *charm.URL { err := riak.Refresh() c.Assert(err, jc.ErrorIsNil) ch, force, err := riak.Charm() @@ -151,9 +346,9 @@ return ch.URL() } -var _ = gc.Suite(&UpgradeCharmSuccessSuite{}) +var _ = gc.Suite(&UpgradeCharmSuccessStateSuite{}) -func (s *UpgradeCharmSuccessSuite) SetUpTest(c *gc.C) { +func (s *UpgradeCharmSuccessStateSuite) SetUpTest(c *gc.C) { s.RepoSuite.SetUpTest(c) s.path = testcharms.Repo.ClonedDirPath(s.CharmsPath, "riak") err := runDeploy(c, s.path, "--series", "quantal") @@ -165,18 +360,18 @@ c.Assert(ch.Revision(), gc.Equals, 7) c.Assert(forced, jc.IsFalse) - s.CmdBlockHelper = common.NewCmdBlockHelper(s.APIState) + s.CmdBlockHelper = coretesting.NewCmdBlockHelper(s.APIState) c.Assert(s.CmdBlockHelper, gc.NotNil) s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) } -func (s *UpgradeCharmSuccessSuite) assertLocalRevision(c *gc.C, revision int, path string) { +func (s *UpgradeCharmSuccessStateSuite) assertLocalRevision(c *gc.C, revision int, path string) { dir, err := charm.ReadCharmDir(path) c.Assert(err, jc.ErrorIsNil) c.Assert(dir.Revision(), gc.Equals, revision) } -func (s *UpgradeCharmSuccessSuite) TestLocalRevisionUnchanged(c *gc.C) { +func (s *UpgradeCharmSuccessStateSuite) TestLocalRevisionUnchanged(c *gc.C) { err := runUpgradeCharm(c, "riak", "--path", s.path) c.Assert(err, jc.ErrorIsNil) curl := s.assertUpgraded(c, s.riak, 8, false) @@ -186,14 +381,14 @@ s.assertLocalRevision(c, 7, s.path) } -func (s *UpgradeCharmSuccessSuite) TestBlockUpgradeCharm(c *gc.C) { +func (s *UpgradeCharmSuccessStateSuite) TestBlockUpgradeCharm(c *gc.C) { // Block operation s.BlockAllChanges(c, "TestBlockUpgradeCharm") err := runUpgradeCharm(c, "riak", "--path", s.path) s.AssertBlocked(c, err, ".*TestBlockUpgradeCharm.*") } -func (s *UpgradeCharmSuccessSuite) TestRespectsLocalRevisionWhenPossible(c *gc.C) { +func (s *UpgradeCharmSuccessStateSuite) TestRespectsLocalRevisionWhenPossible(c *gc.C) { dir, err := charm.ReadCharmDir(s.path) c.Assert(err, jc.ErrorIsNil) err = dir.SetDiskRevision(42) @@ -206,7 +401,7 @@ s.assertLocalRevision(c, 42, s.path) } -func (s *UpgradeCharmSuccessSuite) TestForcedSeriesUpgrade(c *gc.C) { +func (s *UpgradeCharmSuccessStateSuite) TestForcedSeriesUpgrade(c *gc.C) { path := testcharms.Repo.ClonedDirPath(c.MkDir(), "multi-series") err := runDeploy(c, path, "multi-series", "--series", "precise") c.Assert(err, jc.ErrorIsNil) @@ -216,28 +411,44 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(ch.Revision(), gc.Equals, 1) - // Copy files from a charm supporting a different set of series - // so we can try an upgrade requiring --force-series. - for _, f := range []string{"metadata.yaml", "revision"} { - err = utils.CopyFile( - filepath.Join(path, f), - filepath.Join(testcharms.Repo.CharmDirPath("multi-series2"), f)) - c.Assert(err, jc.ErrorIsNil) + // Overwrite the metadata.yaml to change the supported series. + metadataPath := filepath.Join(path, "metadata.yaml") + file, err := os.OpenFile(metadataPath, os.O_TRUNC|os.O_RDWR, 0666) + if err != nil { + c.Fatal(errors.Annotate(err, "cannot open metadata.yaml for overwriting")) + } + defer file.Close() + + metadata := strings.Join( + []string{ + `name: multi-series`, + `summary: "That's a dummy charm with multi-series."`, + `description: |`, + ` This is a longer description which`, + ` potentially contains multiple lines.`, + `series:`, + ` - trusty`, + ` - wily`, + }, + "\n", + ) + if _, err := file.WriteString(metadata); err != nil { + c.Fatal(errors.Annotate(err, "cannot write to metadata.yaml")) } + err = runUpgradeCharm(c, "multi-series", "--path", path, "--force-series") c.Assert(err, jc.ErrorIsNil) err = application.Refresh() c.Assert(err, jc.ErrorIsNil) + ch, force, err := application.Charm() c.Assert(err, jc.ErrorIsNil) - c.Assert(ch.Revision(), gc.Equals, 8) - c.Assert(force, gc.Equals, false) - s.AssertCharmUploaded(c, ch.URL()) - c.Assert(ch.URL().String(), gc.Equals, "local:precise/multi-series2-8") + c.Check(ch.Revision(), gc.Equals, 2) + c.Check(force, gc.Equals, false) } -func (s *UpgradeCharmSuccessSuite) TestInitWithResources(c *gc.C) { +func (s *UpgradeCharmSuccessStateSuite) TestInitWithResources(c *gc.C) { testcharms.Repo.CharmArchivePath(s.CharmsPath, "dummy") dir := c.MkDir() @@ -254,7 +465,7 @@ d := upgradeCharmCommand{} args := []string{"dummy", "--resource", res1, "--resource", res2} - err = testing.InitCommand(modelcmd.Wrap(&d), args) + err = coretesting.InitCommand(modelcmd.Wrap(&d), args) c.Assert(err, jc.ErrorIsNil) c.Assert(d.Resources, gc.DeepEquals, map[string]string{ "foo": foopath, @@ -262,7 +473,7 @@ }) } -func (s *UpgradeCharmSuccessSuite) TestForcedUnitsUpgrade(c *gc.C) { +func (s *UpgradeCharmSuccessStateSuite) TestForcedUnitsUpgrade(c *gc.C) { err := runUpgradeCharm(c, "riak", "--force-units", "--path", s.path) c.Assert(err, jc.ErrorIsNil) curl := s.assertUpgraded(c, s.riak, 8, true) @@ -271,7 +482,7 @@ s.assertLocalRevision(c, 7, s.path) } -func (s *UpgradeCharmSuccessSuite) TestBlockForcedUnitsUpgrade(c *gc.C) { +func (s *UpgradeCharmSuccessStateSuite) TestBlockForcedUnitsUpgrade(c *gc.C) { // Block operation s.BlockAllChanges(c, "TestBlockForcedUpgrade") err := runUpgradeCharm(c, "riak", "--force-units", "--path", s.path) @@ -282,7 +493,7 @@ s.assertLocalRevision(c, 7, s.path) } -func (s *UpgradeCharmSuccessSuite) TestCharmPath(c *gc.C) { +func (s *UpgradeCharmSuccessStateSuite) TestCharmPath(c *gc.C) { myriakPath := testcharms.Repo.ClonedDirPath(c.MkDir(), "riak") // Change the revision to 42 and upgrade to it with explicit revision. @@ -295,7 +506,7 @@ s.assertLocalRevision(c, 42, myriakPath) } -func (s *UpgradeCharmSuccessSuite) TestCharmPathNoRevUpgrade(c *gc.C) { +func (s *UpgradeCharmSuccessStateSuite) TestCharmPathNoRevUpgrade(c *gc.C) { // Revision 7 is running to start with. myriakPath := testcharms.Repo.ClonedDirPath(c.MkDir(), "riak") s.assertLocalRevision(c, 7, myriakPath) @@ -305,18 +516,30 @@ c.Assert(curl.String(), gc.Equals, "local:quantal/riak-8") } -func (s *UpgradeCharmSuccessSuite) TestCharmPathDifferentNameFails(c *gc.C) { +func (s *UpgradeCharmSuccessStateSuite) TestCharmPathDifferentNameFails(c *gc.C) { myriakPath := testcharms.Repo.RenamedClonedDirPath(s.CharmsPath, "riak", "myriak") - err := runUpgradeCharm(c, "riak", "--path", myriakPath) + metadataPath := filepath.Join(myriakPath, "metadata.yaml") + file, err := os.OpenFile(metadataPath, os.O_TRUNC|os.O_RDWR, 0666) + if err != nil { + c.Fatal(errors.Annotate(err, "cannot open metadata.yaml")) + } + defer file.Close() + + // Overwrite the metadata.yaml to contain a new name. + newMetadata := strings.Join([]string{`name: myriak`, `summary: ""`, `description: ""`}, "\n") + if _, err := file.WriteString(newMetadata); err != nil { + c.Fatal("cannot write to metadata.yaml") + } + err = runUpgradeCharm(c, "riak", "--path", myriakPath) c.Assert(err, gc.ErrorMatches, `cannot upgrade "riak" to "myriak"`) } -type UpgradeCharmCharmStoreSuite struct { - BaseUpgradeCharmSuite +type UpgradeCharmCharmStoreStateSuite struct { + BaseUpgradeCharmStateSuite charmStoreSuite } -var _ = gc.Suite(&UpgradeCharmCharmStoreSuite{}) +var _ = gc.Suite(&UpgradeCharmCharmStoreStateSuite{}) var upgradeCharmAuthorizationTests = []struct { about string @@ -356,7 +579,7 @@ expectError: `cannot resolve charm URL "cs:~bob/trusty/wordpress6-47": cannot get "/~bob/trusty/wordpress6-47/meta/any\?include=id&include=supported-series&include=published": unauthorized: access denied for user "client-username"`, }} -func (s *UpgradeCharmCharmStoreSuite) TestUpgradeCharmAuthorization(c *gc.C) { +func (s *UpgradeCharmCharmStoreStateSuite) TestUpgradeCharmAuthorization(c *gc.C) { testcharms.UploadCharm(c, s.client, "cs:~other/trusty/wordpress-0", "wordpress") err := runDeploy(c, "cs:~other/trusty/wordpress-0") c.Assert(err, jc.ErrorIsNil) @@ -375,7 +598,7 @@ } } -func (s *UpgradeCharmCharmStoreSuite) TestSwitch(c *gc.C) { +func (s *UpgradeCharmCharmStoreStateSuite) TestSwitch(c *gc.C) { testcharms.UploadCharm(c, s.client, "cs:~other/trusty/riak-0", "riak") testcharms.UploadCharm(c, s.client, "cs:~other/trusty/anotherriak-7", "riak") err := runDeploy(c, "cs:~other/trusty/riak-0") @@ -405,22 +628,22 @@ c.Assert(curl.String(), gc.Equals, "cs:~other/trusty/anotherriak-42") } -func (s *UpgradeCharmCharmStoreSuite) TestUpgradeCharmWithChannel(c *gc.C) { +func (s *UpgradeCharmCharmStoreStateSuite) TestUpgradeCharmWithChannel(c *gc.C) { id, ch := testcharms.UploadCharm(c, s.client, "cs:~client-username/trusty/wordpress-0", "wordpress") err := runDeploy(c, "cs:~client-username/trusty/wordpress-0") c.Assert(err, jc.ErrorIsNil) // Upload a new revision of the charm, but publish it - // only to the development channel. + // only to the beta channel. id.Revision = 1 err = s.client.UploadCharmWithRevision(id, ch, -1) c.Assert(err, gc.IsNil) - err = s.client.Publish(id, []csclientparams.Channel{csclientparams.DevelopmentChannel}, nil) + err = s.client.Publish(id, []csclientparams.Channel{csclientparams.BetaChannel}, nil) c.Assert(err, gc.IsNil) - err = runUpgradeCharm(c, "wordpress", "--channel", "development") + err = runUpgradeCharm(c, "wordpress", "--channel", "beta") c.Assert(err, gc.IsNil) s.assertCharmsUploaded(c, "cs:~client-username/trusty/wordpress-0", "cs:~client-username/trusty/wordpress-1") @@ -429,7 +652,7 @@ }) } -func (s *UpgradeCharmCharmStoreSuite) TestUpgradeWithTermsNotSigned(c *gc.C) { +func (s *UpgradeCharmCharmStoreStateSuite) TestUpgradeWithTermsNotSigned(c *gc.C) { id, ch := testcharms.UploadCharm(c, s.client, "quantal/terms1-1", "terms1") err := runDeploy(c, "quantal/terms1") c.Assert(err, jc.ErrorIsNil) @@ -446,3 +669,79 @@ err = runUpgradeCharm(c, "terms1") c.Assert(err, gc.ErrorMatches, expectedError) } + +type mockAPIConnection struct { + api.Connection + bestFacadeVersion int + serverVersion *version.Number +} + +func (m *mockAPIConnection) BestFacadeVersion(name string) int { + return m.bestFacadeVersion +} + +func (m *mockAPIConnection) ServerVersion() (version.Number, bool) { + if m.serverVersion != nil { + return *m.serverVersion, true + } + return version.Number{}, false +} + +func (*mockAPIConnection) Close() error { + return nil +} + +type mockCharmAdder struct { + CharmAdder + testing.Stub +} + +func (m *mockCharmAdder) AddCharm(curl *charm.URL, channel csclientparams.Channel) error { + m.MethodCall(m, "AddCharm", curl, channel) + return m.NextErr() +} + +type mockCharmClient struct { + CharmClient + testing.Stub + charmInfo *charms.CharmInfo +} + +func (m *mockCharmClient) CharmInfo(curl string) (*charms.CharmInfo, error) { + m.MethodCall(m, "CharmInfo", curl) + if err := m.NextErr(); err != nil { + return nil, err + } + return m.charmInfo, nil +} + +type mockCharmUpgradeClient struct { + CharmUpgradeClient + testing.Stub + charmURL *charm.URL +} + +func (m *mockCharmUpgradeClient) GetCharmURL(applicationName string) (*charm.URL, error) { + m.MethodCall(m, "GetCharmURL", applicationName) + return m.charmURL, m.NextErr() +} + +func (m *mockCharmUpgradeClient) SetCharm(cfg application.SetCharmConfig) error { + m.MethodCall(m, "SetCharm", cfg) + return m.NextErr() +} + +type mockModelConfigGetter struct { + ModelConfigGetter + testing.Stub +} + +func (m *mockModelConfigGetter) ModelGet() (map[string]interface{}, error) { + m.MethodCall(m, "ModelGet") + return coretesting.FakeConfig(), m.NextErr() +} + +type mockResourceLister struct { + ResourceLister + testing.Stub +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/backups.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/backups.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/backups.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/backups.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/api/backups" apiserverbackups "github.com/juju/juju/apiserver/backups" @@ -55,6 +55,7 @@ // SetFlags implements Command.SetFlags. func (c *CommandBase) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) if c.Log != nil { c.Log.AddFlags(f) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/create.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/create.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/create.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/create.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/state/backups" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/download.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/download.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/download.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/download.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/state/backups" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -72,7 +72,7 @@ api RestoreAPI, getArchive func(string) (ArchiveReader, *params.BackupsMetadataResult, error), newEnviron func(environs.OpenParams) (environs.Environ, error), - getRebootstrapParams func(string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error), + getRebootstrapParams func(*cmd.Context, string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error), ) cmd.Command { c := &restoreCommand{ getArchiveFunc: getArchive, @@ -81,7 +81,7 @@ newAPIClientFunc: func() (RestoreAPI, error) { return api, nil }, - waitForAgentFunc: func(ctx *cmd.Context, c *modelcmd.ModelCommandBase, controllerName string) error { + waitForAgentFunc: func(ctx *cmd.Context, c *modelcmd.ModelCommandBase, controllerName, hostedModelName string) error { return nil }, } @@ -102,8 +102,8 @@ } } -func GetRebootstrapParamsFunc(cloud string) func(string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error) { - return func(string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error) { +func GetRebootstrapParamsFunc(cloud string) func(*cmd.Context, string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error) { + return func(*cmd.Context, string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error) { return &restoreBootstrapParams{ ControllerConfig: testing.FakeControllerConfig(), Cloud: environs.CloudSpec{ @@ -115,8 +115,8 @@ } } -func GetRebootstrapParamsFuncWithError() func(string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error) { - return func(string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error) { +func GetRebootstrapParamsFuncWithError() func(*cmd.Context, string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error) { + return func(*cmd.Context, string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error) { return nil, errors.New("failed") } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/list.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/list.go 2016-10-13 14:31:49.000000000 +0000 @@ -64,7 +64,7 @@ } if len(result.List) == 0 { - fmt.Fprintln(ctx.Stdout, "(no backups found)") + ctx.Infof("No backups to display.") return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/restore.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/restore.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/restore.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/restore.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,8 +13,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/utils" - "launchpad.net/gnuflag" "github.com/juju/juju/api/backups" "github.com/juju/juju/apiserver/params" @@ -48,17 +48,18 @@ // it is invoked with "juju restore-backup". type restoreCommand struct { CommandBase - constraints constraints.Value - filename string - backupId string - bootstrap bool - uploadTools bool + constraints constraints.Value + constraintsStr string + filename string + backupId string + bootstrap bool + buildAgent bool newAPIClientFunc func() (RestoreAPI, error) newEnvironFunc func(environs.OpenParams) (environs.Environ, error) - getRebootstrapParamsFunc func(string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error) + getRebootstrapParamsFunc func(*cmd.Context, string, *params.BackupsMetadataResult) (*restoreBootstrapParams, error) getArchiveFunc func(string) (ArchiveReader, *params.BackupsMetadataResult, error) - waitForAgentFunc func(ctx *cmd.Context, c *modelcmd.ModelCommandBase, controllerName string) error + waitForAgentFunc func(ctx *cmd.Context, c *modelcmd.ModelCommandBase, controllerName, hostedModelName string) error } // RestoreAPI is used to invoke various API calls. @@ -105,13 +106,11 @@ // SetFlags handles known option flags. func (c *restoreCommand) SetFlags(f *gnuflag.FlagSet) { c.CommandBase.SetFlags(f) - f.Var(constraints.ConstraintsValue{Target: &c.constraints}, - "constraints", "set model constraints") - + f.StringVar(&c.constraintsStr, "constraints", "", "set model constraints") f.BoolVar(&c.bootstrap, "b", false, "Bootstrap a new state machine") f.StringVar(&c.filename, "file", "", "Provide a file to be used as the backup.") f.StringVar(&c.backupId, "id", "", "Provide the name of the backup to be restored") - f.BoolVar(&c.uploadTools, "upload-tools", false, "Upload tools if bootstraping a new machine") + f.BoolVar(&c.buildAgent, "build-agent", false, "Build binary agent if bootstraping a new machine") } // Init is where the preconditions for this commands can be checked. @@ -125,6 +124,7 @@ if c.backupId != "" && c.bootstrap { return errors.Errorf("it is not possible to rebootstrap and restore from an id.") } + var err error if c.filename != "" { c.filename, err = filepath.Abs(c.filename) @@ -146,7 +146,7 @@ // getRebootstrapParams returns the params for rebootstrapping the // specified controller. func (c *restoreCommand) getRebootstrapParams( - controllerName string, meta *params.BackupsMetadataResult, + ctx *cmd.Context, controllerName string, meta *params.BackupsMetadataResult, ) (*restoreBootstrapParams, error) { // TODO(axw) delete this and -b. We will update bootstrap with a flag // to specify a restore file. When we do that, we'll need to extract @@ -155,7 +155,11 @@ // things like the admin-secret, controller certificate etc with the // backup. store := c.ClientStore() - config, params, err := modelcmd.NewGetBootstrapConfigParamsFunc(store)(controllerName) + controllerDetails, err := store.ControllerByName(controllerName) + if err != nil { + return nil, errors.Trace(err) + } + config, params, err := modelcmd.NewGetBootstrapConfigParamsFunc(ctx, store)(controllerName) if err != nil { return nil, errors.Trace(err) } @@ -199,7 +203,7 @@ for k, v := range config.ControllerConfig { controllerCfg[k] = v } - controllerCfg[controller.ControllerUUIDKey] = params.ControllerUUID + controllerCfg[controller.ControllerUUIDKey] = controllerDetails.ControllerUUID controllerCfg[controller.CACertKey] = meta.CACert return &restoreBootstrapParams{ @@ -214,7 +218,7 @@ // rebootstrap will bootstrap a new server in safe-mode (not killing any other agent) // if there is no current server available to restore to. func (c *restoreCommand) rebootstrap(ctx *cmd.Context, meta *params.BackupsMetadataResult) error { - params, err := c.getRebootstrapParamsFunc(c.ControllerName(), meta) + params, err := c.getRebootstrapParamsFunc(ctx, c.ControllerName(), meta) if err != nil { return errors.Trace(err) } @@ -316,8 +320,8 @@ CloudCredentialName: params.CredentialName, CloudCredential: params.Cloud.Credential, ModelConstraints: c.constraints, - UploadTools: c.uploadTools, - BuildToolsTarball: sync.BuildToolsTarball, + BuildAgent: c.buildAgent, + BuildAgentTarball: sync.BuildAgentTarball, ControllerConfig: params.ControllerConfig, HostedModelConfig: hostedModelConfig, BootstrapSeries: meta.Series, @@ -337,7 +341,7 @@ // New controller is bootstrapped, so now record the API address so // we can connect. apiPort := params.ControllerConfig.APIPort() - err = common.SetBootstrapEndpointAddress(store, c.ControllerName(), apiPort, env) + err = common.SetBootstrapEndpointAddress(store, c.ControllerName(), bootVers, apiPort, env) if err != nil { return errors.Trace(err) } @@ -345,7 +349,7 @@ // To avoid race conditions when running scripted bootstraps, wait // for the controller's machine agent to be ready to accept commands // before exiting this bootstrap command. - return c.waitForAgentFunc(ctx, &c.ModelCommandBase, c.ControllerName()) + return c.waitForAgentFunc(ctx, &c.ModelCommandBase, c.ControllerName(), "default") } func (c *restoreCommand) newClient() (*backups.Client, error) { @@ -362,6 +366,12 @@ // Run is the entry point for this command. func (c *restoreCommand) Run(ctx *cmd.Context) error { + var err error + c.constraints, err = common.ParseConstraints(ctx, c.constraintsStr) + if err != nil { + return err + } + if c.Log != nil { if err := c.Log.Start(ctx); err != nil { return err diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/restore_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/restore_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/backups/restore_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/backups/restore_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -25,6 +25,7 @@ _ "github.com/juju/juju/provider/dummy" _ "github.com/juju/juju/provider/lxd" "github.com/juju/juju/testing" + "github.com/juju/juju/version" ) type restoreSuite struct { @@ -66,7 +67,7 @@ CurrentModel: "admin", } s.store.Accounts["testing"] = jujuclient.AccountDetails{ - User: "current-user@local", + User: "current-user", Password: "old-password", } s.store.BootstrapConfig["testing"] = jujuclient.BootstrapConfig{ @@ -77,6 +78,7 @@ "type": "dummy", "name": "admin", }, + ControllerModelUUID: testing.ModelTag.Id(), ControllerConfig: controller.Config{ "api-port": 17070, "state-port": 37017, @@ -229,7 +231,7 @@ boostrapped := false s.PatchValue(&backups.BootstrapFunc, func(ctx environs.BootstrapContext, environ environs.Environ, args bootstrap.BootstrapParams) error { c.Assert(args.ControllerConfig, jc.DeepEquals, controller.Config{ - "controller-uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", + "controller-uuid": "deadbeef-1bad-500d-9000-4b1d0d06f00d", "ca-cert": testing.CACert, "state-port": 1234, "api-port": 17777, @@ -239,6 +241,10 @@ return nil }) + intPtr := func(i int) *int { + return &i + } + _, err := testing.RunCommand(c, s.command, "restore", "-m", "testing:test1", "--file", "afile", "-b") c.Assert(err, jc.ErrorIsNil) c.Assert(boostrapped, jc.IsTrue) @@ -246,9 +252,13 @@ Cloud: "mycloud", CloudRegion: "a-region", CACert: testing.CACert, - ControllerUUID: "deadbeef-0bad-400d-8000-4b1d0d06f00d", + ControllerUUID: "deadbeef-1bad-500d-9000-4b1d0d06f00d", APIEndpoints: []string{"10.0.0.1:17777"}, UnresolvedAPIEndpoints: []string{"10.0.0.1:17777"}, + AgentVersion: version.Current.String(), + ModelCount: intPtr(2), + MachineCount: intPtr(1), + ControllerMachineCount: 1, }) } @@ -272,7 +282,7 @@ sort.Sort(args.Cloud.AuthTypes) c.Assert(args.Cloud, jc.DeepEquals, cloud.Cloud{ Type: "lxd", - AuthTypes: []cloud.AuthType{"certificate", "empty"}, + AuthTypes: []cloud.AuthType{"empty"}, Regions: []cloud.Region{{Name: "localhost"}}, }) return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/block.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/block.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/block.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/block.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,209 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package block - -import ( - "github.com/juju/cmd" - "github.com/juju/errors" - "launchpad.net/gnuflag" - - "github.com/juju/juju/cmd/modelcmd" -) - -// BaseBlockCommand is base command for all -// commands that enable blocks. -type BaseBlockCommand struct { - modelcmd.ModelCommandBase - desc string -} - -// Init initializes the command. -// Satisfying Command interface. -func (c *BaseBlockCommand) Init(args []string) error { - if len(args) > 1 { - return errors.Trace(errors.New("can only specify block message")) - } - - if len(args) == 1 { - c.desc = args[0] - } - return nil -} - -// internalRun blocks commands from running successfully. -func (c *BaseBlockCommand) internalRun(operation string) error { - client, err := getBlockClientAPI(c) - if err != nil { - return errors.Trace(err) - } - defer client.Close() - - return client.SwitchBlockOn(TypeFromOperation(operation), c.desc) -} - -// SetFlags implements Command.SetFlags. -func (c *BaseBlockCommand) SetFlags(f *gnuflag.FlagSet) { - c.ModelCommandBase.SetFlags(f) -} - -// BlockClientAPI defines the client API methods that block command uses. -type BlockClientAPI interface { - Close() error - SwitchBlockOn(blockType, msg string) error -} - -var getBlockClientAPI = func(p *BaseBlockCommand) (BlockClientAPI, error) { - return getBlockAPI(&p.ModelCommandBase) -} - -func newDestroyCommand() cmd.Command { - return modelcmd.Wrap(&destroyCommand{}) -} - -// destroyCommand blocks destroy environment. -type destroyCommand struct { - BaseBlockCommand -} - -var destroyBlockDoc = ` -This command allows to block model destruction. - -To disable the block, run unblock command - see "juju help unblock". -To by-pass the block, run destroy-model with --force option. - -"juju block destroy-model" only blocks destroy-model command. - -Examples: - # To prevent the model from being destroyed: - juju block destroy-model - -` - -// Info provides information about command. -// Satisfying Command interface. -func (c *destroyCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "destroy-model", - Purpose: "Block an operation that would destroy Juju model.", - Doc: destroyBlockDoc, - } -} - -// Satisfying Command interface. -func (c *destroyCommand) Run(_ *cmd.Context) error { - return c.internalRun(c.Info().Name) -} - -func newRemoveCommand() cmd.Command { - return modelcmd.Wrap(&removeCommand{}) -} - -// removeCommand blocks commands that remove juju objects. -type removeCommand struct { - BaseBlockCommand -} - -var removeBlockDoc = ` -This command allows to block all operations that would remove an object -from Juju model. - -To disable the block, run unblock command - see "juju help unblock". -To by-pass the block, where available, run desired remove command with --force option. - -"juju block remove-object" blocks these commands: - destroy-model - remove-machine - remove-relation - remove-application - remove-unit - -Examples: - # To prevent the machines, applications, units and relations from being removed: - juju block remove-object - -` - -// Info provides information about command. -// Satisfying Command interface. -func (c *removeCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "remove-object", - Purpose: "Block an operation that would remove an object.", - Doc: removeBlockDoc, - } -} - -// Satisfying Command interface. -func (c *removeCommand) Run(_ *cmd.Context) error { - return c.internalRun(c.Info().Name) -} - -func newChangeCommand() cmd.Command { - return modelcmd.Wrap(&changeCommand{}) -} - -// changeCommand blocks commands that may change environment. -type changeCommand struct { - BaseBlockCommand -} - -var changeBlockDoc = ` -This command allows to block all operations that would alter -Juju model. - -To disable the block, run unblock command - see "juju help unblock". -To by-pass the block, where available, run desired remove command with --force option. - -"juju block all-changes" blocks these commands: - add-machine - add-relation - add-unit - authorised-keys add - authorised-keys delete - authorised-keys import - deploy - destroy-model - enable-ha - expose - remove-machine - remove-relation - remove-application - remove-unit - resolved - retry-provisioning - run - set - set-constraints - set-model-config - sync-tools - unexpose - unset - unset-model-config - upgrade-charm - upgrade-juju - add-user - change-user-password - disable-user - enable-user - -Examples: - # To prevent changes to the model: - juju block all-changes - -` - -// Info provides information about command. -// Satisfying Command interface. -func (c *changeCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "all-changes", - Purpose: "Block operations that could change Juju model.", - Doc: changeBlockDoc, - } -} - -// Satisfying Command interface. -func (c *changeCommand) Run(_ *cmd.Context) error { - return c.internalRun(c.Info().Name) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/blocksuper.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/blocksuper.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/blocksuper.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/blocksuper.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package block - -import ( - "github.com/juju/cmd" -) - -const superBlockCmdDoc = ` - -Juju allows to safeguard deployed models from unintentional damage by preventing -execution of operations that could alter model. - -This is done by blocking certain commands from successful execution. Blocked commands -must be manually unblocked to proceed. - -"juju block" is used to list or to enable model blocks in - the Juju model. -` - -const superBlockCmdPurpose = "List and enable model blocks." - -// Command is the top-level command wrapping all storage functionality. -type Command struct { - cmd.SuperCommand -} - -// NewSuperBlockCommand creates the block supercommand and -// registers the subcommands that it supports. -func NewSuperBlockCommand() cmd.Command { - blockcmd := Command{ - SuperCommand: *cmd.NewSuperCommand( - cmd.SuperCommandParams{ - Name: "block", - Doc: superBlockCmdDoc, - UsagePrefix: "juju", - Purpose: superBlockCmdPurpose, - })} - blockcmd.Register(newDestroyCommand()) - blockcmd.Register(newRemoveCommand()) - blockcmd.Register(newChangeCommand()) - blockcmd.Register(newListCommand()) - return &blockcmd -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/block_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/block_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/block_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/block_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,99 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package block_test - -import ( - "strings" - - "github.com/juju/cmd" - "github.com/juju/errors" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/cmd/juju/block" - "github.com/juju/juju/testing" -) - -type BlockCommandSuite struct { - ProtectionCommandSuite - mockClient *block.MockBlockClient -} - -func (s *BlockCommandSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.mockClient = &block.MockBlockClient{} - s.PatchValue(block.BlockClient, func(p *block.BaseBlockCommand) (block.BlockClientAPI, error) { - return s.mockClient, nil - }) -} - -var _ = gc.Suite(&BlockCommandSuite{}) - -func (s *BlockCommandSuite) assertBlock(c *gc.C, operation, message string) { - expectedOp := block.TypeFromOperation(operation) - c.Assert(s.mockClient.BlockType, gc.DeepEquals, expectedOp) - c.Assert(s.mockClient.Msg, gc.DeepEquals, message) -} - -func (s *BlockCommandSuite) TestBlockCmdMoreArgs(c *gc.C) { - _, err := testing.RunCommand(c, block.NewDestroyCommand(), "change", "too much") - c.Assert( - err, - gc.ErrorMatches, - `.*can only specify block message.*`) -} - -func (s *BlockCommandSuite) TestBlockCmdNoMessage(c *gc.C) { - command := block.NewDestroyCommand() - _, err := testing.RunCommand(c, command) - c.Assert(err, jc.ErrorIsNil) - s.assertBlock(c, command.Info().Name, "") -} - -func (s *BlockCommandSuite) TestBlockDestroyOperations(c *gc.C) { - command := block.NewDestroyCommand() - _, err := testing.RunCommand(c, command, "TestBlockDestroyOperations") - c.Assert(err, jc.ErrorIsNil) - s.assertBlock(c, command.Info().Name, "TestBlockDestroyOperations") -} - -func (s *BlockCommandSuite) TestBlockRemoveOperations(c *gc.C) { - command := block.NewRemoveCommand() - _, err := testing.RunCommand(c, command, "TestBlockRemoveOperations") - c.Assert(err, jc.ErrorIsNil) - s.assertBlock(c, command.Info().Name, "TestBlockRemoveOperations") -} - -func (s *BlockCommandSuite) TestBlockChangeOperations(c *gc.C) { - command := block.NewChangeCommand() - _, err := testing.RunCommand(c, command, "TestBlockChangeOperations") - c.Assert(err, jc.ErrorIsNil) - s.assertBlock(c, command.Info().Name, "TestBlockChangeOperations") -} - -func (s *BlockCommandSuite) processErrorTest(c *gc.C, tstError error, blockType block.Block, expectedError error, expectedWarning string) { - if tstError != nil { - c.Assert(errors.Cause(block.ProcessBlockedError(tstError, blockType)), gc.Equals, expectedError) - } else { - c.Assert(block.ProcessBlockedError(tstError, blockType), jc.ErrorIsNil) - } - // warning displayed - logOutputText := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Assert(logOutputText, gc.Matches, expectedWarning) -} - -func (s *BlockCommandSuite) TestProcessErrOperationBlocked(c *gc.C) { - s.processErrorTest(c, common.OperationBlockedError("operations that remove"), block.BlockRemove, cmd.ErrSilent, ".*operations that remove.*") - s.processErrorTest(c, common.OperationBlockedError("destroy-model operation has been blocked"), block.BlockDestroy, cmd.ErrSilent, ".*destroy-model operation has been blocked.*") -} - -func (s *BlockCommandSuite) TestProcessErrNil(c *gc.C) { - s.processErrorTest(c, nil, block.BlockDestroy, nil, "") -} - -func (s *BlockCommandSuite) TestProcessErrAny(c *gc.C) { - err := errors.New("Test error Processing") - s.processErrorTest(c, err, block.BlockDestroy, err, "") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/disablecommand.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/disablecommand.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/disablecommand.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/disablecommand.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,95 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package block + +import ( + "strings" + + "github.com/juju/cmd" + "github.com/juju/errors" + + "github.com/juju/juju/cmd/modelcmd" +) + +// NewDisableCommand returns a disable-command command instance +// that will use the default API. +func NewDisableCommand() cmd.Command { + return modelcmd.Wrap(&disableCommand{ + apiFunc: func(c newAPIRoot) (blockClientAPI, error) { + return getBlockAPI(c) + }, + }) +} + +type disableCommand struct { + modelcmd.ModelCommandBase + apiFunc func(newAPIRoot) (blockClientAPI, error) + target string + message string +} + +// Init implements Command. +func (c *disableCommand) Init(args []string) error { + if len(args) < 1 { + return errors.Errorf("missing command set (%s)", validTargets) + } + c.target, args = args[0], args[1:] + target, ok := toAPIValue[c.target] + if !ok { + return errors.Errorf("bad command set, valid options: %s", validTargets) + } + c.target = target + c.message = strings.Join(args, " ") + return nil +} + +// Info implements Command. +func (c *disableCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "disable-command", + Args: " [message...]", + Purpose: "Disable commands for the model.", + Doc: disableCommandDoc, + } +} + +type blockClientAPI interface { + Close() error + SwitchBlockOn(blockType, msg string) error +} + +// Run implements Command.Run +func (c *disableCommand) Run(ctx *cmd.Context) error { + api, err := c.apiFunc(c) + if err != nil { + return errors.Annotate(err, "cannot connect to the API") + } + defer api.Close() + + return api.SwitchBlockOn(c.target, c.message) +} + +var disableCommandDoc = ` +Juju allows to safeguard deployed models from unintentional damage by preventing +execution of operations that could alter model. + +This is done by disabling certain sets of commands from successful execution. +Disabled commands must be manually enabled to proceed. + +Some commands offer a --force option that can be used to bypass the disabling. +` + commandSets + ` +Examples: + # To prevent the model from being destroyed: + juju disable-command destroy-model "Check with SA before destruction." + + # To prevent the machines, applications, units and relations from being removed: + juju disable-command remove-object + + # To prevent changes to the model: + juju disable-command all "Model locked down" + +See also: + disabled-commands + enable-command +` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/disablecommand_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/disablecommand_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/disablecommand_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/disablecommand_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,105 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package block_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/block" + "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&disableCommandSuite{}) + +type disableCommandSuite struct { + testing.FakeJujuXDGDataHomeSuite +} + +func (s *disableCommandSuite) TestInit(c *gc.C) { + for _, test := range []struct { + args []string + err string + }{ + { + err: "missing command set (all, destroy-model, remove-object)", + }, { + args: []string{"other"}, + err: "bad command set, valid options: all, destroy-model, remove-object", + }, { + args: []string{"all"}, + }, { + args: []string{"destroy-model"}, + }, { + args: []string{"remove-object"}, + }, { + args: []string{"all", "lots", "of", "args"}, + }, + } { + cmd := block.NewDisableCommand() + err := testing.InitCommand(cmd, test.args) + if test.err == "" { + c.Check(err, jc.ErrorIsNil) + } else { + c.Check(err.Error(), gc.Equals, test.err) + } + } +} + +func (s *disableCommandSuite) TestRunGetAPIError(c *gc.C) { + cmd := block.NewDisableCommandForTest(nil, errors.New("boom")) + _, err := testing.RunCommand(c, cmd, "all") + c.Assert(err.Error(), gc.Equals, "cannot connect to the API: boom") +} + +func (s *disableCommandSuite) TestRun(c *gc.C) { + for _, test := range []struct { + args []string + type_ string + message string + }{{ + args: []string{"all", "this is a single arg message"}, + type_: "BlockChange", + message: "this is a single arg message", + }, { + args: []string{"destroy-model", "this", "is", "many", "args"}, + type_: "BlockDestroy", + message: "this is many args", + }, { + args: []string{"remove-object", "this is a", "mix"}, + type_: "BlockRemove", + message: "this is a mix", + }} { + mockClient := &mockBlockClient{} + cmd := block.NewDisableCommandForTest(mockClient, nil) + _, err := testing.RunCommand(c, cmd, test.args...) + c.Check(err, jc.ErrorIsNil) + c.Check(mockClient.blockType, gc.Equals, test.type_) + c.Check(mockClient.message, gc.Equals, test.message) + } +} + +func (s *disableCommandSuite) TestRunError(c *gc.C) { + mockClient := &mockBlockClient{err: errors.New("boom")} + cmd := block.NewDisableCommandForTest(mockClient, nil) + _, err := testing.RunCommand(c, cmd, "all") + c.Check(err, gc.ErrorMatches, "boom") +} + +type mockBlockClient struct { + blockType string + message string + err error +} + +func (c *mockBlockClient) Close() error { + return nil +} + +func (c *mockBlockClient) SwitchBlockOn(blockType, message string) error { + c.blockType = blockType + c.message = message + return c.err +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/doc.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/doc.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/doc.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,53 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package block + +const commandSets = ` +Commands that can be disabled are grouped based on logical operations as follows: + +"destroy-model" prevents: + destroy-controller + destroy-model + +"remove-object" prevents: + destroy-controller + destroy-model + remove-machine + remove-relation + remove-application + remove-unit + +"all" prevents: + add-machine + add-relation + add-unit + add-ssh-key + add-user + change-user-password + deploy + disable-user + destroy-controller + destroy-model + enable-ha + enable-user + expose + import-ssh-key + remove-application + remove-machine + remove-relation + remove-ssh-key + remove-unit + resolved + retry-provisioning + run + set-config + set-constraints + set-model-config + sync-tools + unexpose + unset-config + unset-model-config + upgrade-charm + upgrade-juju + ` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/enablecommand.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/enablecommand.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/enablecommand.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/enablecommand.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,93 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package block + +import ( + "github.com/juju/cmd" + "github.com/juju/errors" + + "github.com/juju/juju/cmd/modelcmd" +) + +// NewEnableCommand returns a new command that eanbles previously disabled +// command sets. +func NewEnableCommand() cmd.Command { + return modelcmd.Wrap(&enableCommand{ + apiFunc: func(c newAPIRoot) (unblockClientAPI, error) { + return getBlockAPI(c) + }, + }) +} + +// enableCommand removes the block from desired operation. +type enableCommand struct { + modelcmd.ModelCommandBase + apiFunc func(newAPIRoot) (unblockClientAPI, error) + target string +} + +// Init implements Command. +func (c *enableCommand) Init(args []string) error { + if len(args) < 1 { + return errors.Errorf("missing command set (%s)", validTargets) + } + c.target, args = args[0], args[1:] + target, ok := toAPIValue[c.target] + if !ok { + return errors.Errorf("bad command set, valid options: %s", validTargets) + } + c.target = target + return cmd.CheckEmpty(args) +} + +// Info implementsCommand. +func (c *enableCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "enable-command", + Args: "", + Purpose: "Enable commands that had been previously disabled.", + Doc: enableDoc, + } +} + +// unblockClientAPI defines the client API methods that unblock command uses. +type unblockClientAPI interface { + Close() error + SwitchBlockOff(blockType string) error +} + +// Run implements Command. +func (c *enableCommand) Run(_ *cmd.Context) error { + api, err := c.apiFunc(c) + if err != nil { + return errors.Annotate(err, "cannot connect to the API") + } + defer api.Close() + + return api.SwitchBlockOff(c.target) +} + +const enableDoc = ` +Juju allows to safeguard deployed models from unintentional damage by preventing +execution of operations that could alter model. + +This is done by disabling certain sets of commands from successful execution. +Disabled commands must be manually enabled to proceed. + +Some commands offer a --force option that can be used to bypass a block. +` + commandSets + ` +Examples: + # To allow the model to be destroyed: + juju enable-command destroy-model + + # To allow the machines, applications, units and relations to be removed: + juju enable-command remove-object + + # To allow changes to the model: + juju enable-command all + +See also: + disable-command + disabled-commands +` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/enablecommand_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/enablecommand_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/enablecommand_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/enablecommand_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,100 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package block_test + +import ( + "errors" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/block" + "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&enableCommandSuite{}) + +type enableCommandSuite struct { + testing.FakeJujuXDGDataHomeSuite +} + +func (s *enableCommandSuite) TestInit(c *gc.C) { + for _, test := range []struct { + args []string + err string + }{ + { + err: "missing command set (all, destroy-model, remove-object)", + }, { + args: []string{"other"}, + err: "bad command set, valid options: all, destroy-model, remove-object", + }, { + args: []string{"all"}, + }, { + args: []string{"destroy-model"}, + }, { + args: []string{"remove-object"}, + }, { + args: []string{"all", "extra"}, + err: `unrecognized args: ["extra"]`, + }, + } { + cmd := block.NewEnableCommand() + err := testing.InitCommand(cmd, test.args) + if test.err == "" { + c.Check(err, jc.ErrorIsNil) + } else { + c.Check(err.Error(), gc.Equals, test.err) + } + } +} + +func (s *enableCommandSuite) TestRunGetAPIError(c *gc.C) { + cmd := block.NewEnableCommandForTest(nil, errors.New("boom")) + _, err := testing.RunCommand(c, cmd, "all") + c.Assert(err.Error(), gc.Equals, "cannot connect to the API: boom") +} + +func (s *enableCommandSuite) TestRun(c *gc.C) { + for _, test := range []struct { + args []string + type_ string + }{{ + args: []string{"all"}, + type_: "BlockChange", + }, { + args: []string{"destroy-model"}, + type_: "BlockDestroy", + }, { + args: []string{"remove-object"}, + type_: "BlockRemove", + }} { + mockClient := &mockUnblockClient{} + cmd := block.NewEnableCommandForTest(mockClient, nil) + _, err := testing.RunCommand(c, cmd, test.args...) + c.Check(err, jc.ErrorIsNil) + c.Check(mockClient.blockType, gc.Equals, test.type_) + } +} + +func (s *enableCommandSuite) TestRunError(c *gc.C) { + mockClient := &mockUnblockClient{err: errors.New("boom")} + cmd := block.NewEnableCommandForTest(mockClient, nil) + _, err := testing.RunCommand(c, cmd, "all") + c.Check(err, gc.ErrorMatches, "boom") +} + +type mockUnblockClient struct { + blockType string + err error +} + +func (c *mockUnblockClient) Close() error { + return nil +} + +func (c *mockUnblockClient) SwitchBlockOff(blockType string) error { + c.blockType = blockType + return c.err +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,53 +10,41 @@ "github.com/juju/juju/cmd/modelcmd" ) -var ( - BlockClient = &getBlockClientAPI - UnblockClient = &getUnblockClientAPI - ListClient = &getBlockListAPI - - NewDestroyCommand = newDestroyCommand - NewRemoveCommand = newRemoveCommand - NewChangeCommand = newChangeCommand - NewListCommand = newListCommand -) - -type MockBlockClient struct { - BlockType string - Msg string -} - -func (c *MockBlockClient) Close() error { - return nil +// NewDisableCommandForTest returns a new disable command with the +// apiFunc specified to return the args. +func NewDisableCommandForTest(api blockClientAPI, err error) cmd.Command { + return modelcmd.Wrap(&disableCommand{ + apiFunc: func(_ newAPIRoot) (blockClientAPI, error) { + return api, err + }, + }) } -func (c *MockBlockClient) SwitchBlockOn(blockType, msg string) error { - c.BlockType = blockType - c.Msg = msg - return nil +// NewEnableCommandForTest returns a new enable command with the +// apiFunc specified to return the args. +func NewEnableCommandForTest(api unblockClientAPI, err error) cmd.Command { + return modelcmd.Wrap(&enableCommand{ + apiFunc: func(_ newAPIRoot) (unblockClientAPI, error) { + return api, err + }, + }) } -func (c *MockBlockClient) SwitchBlockOff(blockType string) error { - c.BlockType = blockType - c.Msg = "" - return nil +type listMockAPI interface { + blockListAPI + // Can't include two interfaces that specify the same method + ListBlockedModels() ([]params.ModelBlockInfo, error) } -func (c *MockBlockClient) List() ([]params.Block, error) { - if c.BlockType == "" { - return []params.Block{}, nil - } - - return []params.Block{ - params.Block{ - Type: c.BlockType, - Message: c.Msg, +// NewListCommandForTest returns a new list command with the +// apiFunc specified to return the args. +func NewListCommandForTest(api listMockAPI, err error) cmd.Command { + return modelcmd.Wrap(&listCommand{ + apiFunc: func(_ newAPIRoot) (blockListAPI, error) { + return api, err }, - }, nil -} - -func NewUnblockCommandWithClient(client UnblockClientAPI) cmd.Command { - return modelcmd.Wrap(&unblockCommand{getClient: func() (UnblockClientAPI, error) { - return client, nil - }}) + controllerAPIFunc: func(_ newControllerAPIRoot) (controllerListAPI, error) { + return api, err + }, + }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/list.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/list.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,144 +4,255 @@ package block import ( - "bytes" "fmt" - "text/tabwriter" + "io" + "sort" + "strings" "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" + "gopkg.in/juju/names.v2" + "github.com/juju/juju/api" + "github.com/juju/juju/api/controller" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) -func newListCommand() cmd.Command { - return modelcmd.Wrap(&listCommand{}) +// NewListCommand returns the command that lists the disabled +// commands for the model. +func NewListCommand() cmd.Command { + return modelcmd.Wrap(&listCommand{ + apiFunc: func(c newAPIRoot) (blockListAPI, error) { + return getBlockAPI(c) + }, + controllerAPIFunc: func(c newControllerAPIRoot) (controllerListAPI, error) { + return getControllerAPI(c) + }, + }) } const listCommandDoc = ` -List blocks for Juju model. -This command shows if each block type is enabled. -For enabled blocks, block message is shown if it was specified. +List disabled commands for the model. +` + commandSets + ` +See also: + disable-command + enable-command ` // listCommand list blocks. type listCommand struct { modelcmd.ModelCommandBase - out cmd.Output + apiFunc func(newAPIRoot) (blockListAPI, error) + controllerAPIFunc func(newControllerAPIRoot) (controllerListAPI, error) + all bool + out cmd.Output } // Init implements Command.Init. func (c *listCommand) Init(args []string) (err error) { - return nil + return cmd.CheckEmpty(args) } // Info implements Command.Info. func (c *listCommand) Info() *cmd.Info { return &cmd.Info{ - Name: "list", - Purpose: "List Juju blocks.", + Name: "disabled-commands", + Purpose: "List disabled commands.", Doc: listCommandDoc, + Aliases: []string{"list-disabled-commands"}, } } // SetFlags implements Command.SetFlags. func (c *listCommand) SetFlags(f *gnuflag.FlagSet) { c.ModelCommandBase.SetFlags(f) - c.out.AddFlags(f, "blocks", map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, - "blocks": formatBlocks, + f.BoolVar(&c.all, "all", false, "Lists for all models (administrative users only)") + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, + "tabular": c.formatter, }) } // Run implements Command.Run. func (c *listCommand) Run(ctx *cmd.Context) (err error) { - api, err := getBlockListAPI(&c.ModelCommandBase) + if c.all { + return c.listForController(ctx) + } + return c.listForModel(ctx) +} + +const noBlocks = "No commands are currently disabled." + +func (c *listCommand) listForModel(ctx *cmd.Context) (err error) { + api, err := c.apiFunc(c) if err != nil { - return err + return errors.Trace(err) } defer api.Close() result, err := api.List() if err != nil { - return err + return errors.Trace(err) + } + if len(result) == 0 && c.out.Name() == "tabular" { + ctx.Infof(noBlocks) + return nil } return c.out.Write(ctx, formatBlockInfo(result)) } -// BlockListAPI defines the client API methods that block list command uses. -type BlockListAPI interface { +func (c *listCommand) listForController(ctx *cmd.Context) (err error) { + api, err := c.controllerAPIFunc(c) + if err != nil { + return errors.Trace(err) + } + defer api.Close() + + result, err := api.ListBlockedModels() + if err != nil { + return errors.Trace(err) + } + if len(result) == 0 && c.out.Name() == "tabular" { + ctx.Infof(noBlocks) + return nil + } + info, err := FormatModelBlockInfo(result) + if err != nil { + return errors.Trace(err) + } + return c.out.Write(ctx, info) +} + +func (c *listCommand) formatter(writer io.Writer, value interface{}) error { + if c.all { + return FormatTabularBlockedModels(writer, value) + } + return formatBlocks(writer, value) +} + +// blockListAPI defines the client API methods that block list command uses. +type blockListAPI interface { Close() error List() ([]params.Block, error) } -var getBlockListAPI = func(cmd *modelcmd.ModelCommandBase) (BlockListAPI, error) { - return getBlockAPI(cmd) +// controllerListAPI defines the methods on the controller API endpoint +// that the blocks command calls. +type controllerListAPI interface { + Close() error + ListBlockedModels() ([]params.ModelBlockInfo, error) } // BlockInfo defines the serialization behaviour of the block information. type BlockInfo struct { - Operation string `yaml:"block" json:"block"` - Enabled bool `yaml:"enabled" json:"enabled"` - Message *string `yaml:"message,omitempty" json:"message,omitempty"` + Commands string `yaml:"command-set" json:"command-set"` + Message string `yaml:"message,omitempty" json:"message,omitempty"` } // formatBlockInfo takes a set of Block and creates a // mapping to information structures. func formatBlockInfo(all []params.Block) []BlockInfo { - output := make([]BlockInfo, len(blockArgs)) - - info := make(map[string]BlockInfo, len(all)) - // not all block types may be returned from client - for _, one := range all { - op := OperationFromType(one.Type) - bi := BlockInfo{ - Operation: op, - // If client returned it, it means that it is enabled - Enabled: true, - Message: &one.Message, + output := make([]BlockInfo, len(all)) + for i, one := range all { + set, ok := toCmdValue[one.Type] + if !ok { + set = "" } - info[op] = bi - } - - for i, aType := range blockArgs { - if val, ok := info[aType]; ok { - output[i] = val - continue + output[i] = BlockInfo{ + Commands: set, + Message: one.Message, } - output[i] = BlockInfo{Operation: aType} } - return output } -// formatBlocks returns block list representation. -func formatBlocks(value interface{}) ([]byte, error) { +// formatBlocks writes block list representation. +func formatBlocks(writer io.Writer, value interface{}) error { blocks, ok := value.([]BlockInfo) if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", blocks, value) + return errors.Errorf("expected value of type %T, got %T", blocks, value) } - var out bytes.Buffer - // To format things as desired. - tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) - - for _, ablock := range blocks { - fmt.Fprintln(tw) - switched := "off" - if ablock.Enabled { - switched = "on" + + if len(blocks) == 0 { + fmt.Fprintf(writer, "No commands are currently disabled.") + return nil + } + + tw := output.TabWriter(writer) + w := output.Wrapper{tw} + w.Println("Disabled commands", "Message") + for _, info := range blocks { + w.Println(info.Commands, info.Message) + } + tw.Flush() + + return nil +} + +type newControllerAPIRoot interface { + NewControllerAPIRoot() (api.Connection, error) +} + +// getControllerAPI returns a block api for block manipulation. +func getControllerAPI(c newControllerAPIRoot) (*controller.Client, error) { + root, err := c.NewControllerAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + return controller.NewClient(root), nil +} + +type modelBlockInfo struct { + Name string `yaml:"name" json:"name"` + UUID string `yaml:"model-uuid" json:"model-uuid"` + Owner string `yaml:"owner" json:"owner"` + CommandSets []string `yaml:"disabled-commands,omitempty" json:"disabled-commands,omitempty"` +} + +func FormatModelBlockInfo(all []params.ModelBlockInfo) ([]modelBlockInfo, error) { + output := make([]modelBlockInfo, len(all)) + for i, one := range all { + tag, err := names.ParseUserTag(one.OwnerTag) + if err != nil { + return nil, errors.Trace(err) } - fmt.Fprintf(tw, "%v\t", ablock.Operation) - if ablock.Message != nil { - fmt.Fprintf(tw, "\t=%v, %v", switched, *ablock.Message) - continue + output[i] = modelBlockInfo{ + Name: one.Name, + UUID: one.UUID, + Owner: tag.Id(), + CommandSets: blocksToStr(one.Blocks), } - fmt.Fprintf(tw, "\t=%v", switched) + } + return output, nil +} + +// FormatTabularBlockedModels writes out tabular format for blocked models. +// This method is exported as it is also used by destroy-model. +func FormatTabularBlockedModels(writer io.Writer, value interface{}) error { + models, ok := value.([]modelBlockInfo) + if !ok { + return errors.Errorf("expected value of type %T, got %T", models, value) } + tw := output.TabWriter(writer) + w := output.Wrapper{tw} + w.Println("Name", "Model UUID", "Owner", "Disabled commands") + for _, model := range models { + w.Println(model.Name, model.UUID, model.Owner, strings.Join(model.CommandSets, ", ")) + } tw.Flush() + return nil +} - return out.Bytes(), nil +func blocksToStr(blocks []string) []string { + result := make([]string, len(blocks)) + for i, val := range blocks { + result[i] = operationFromType(val) + } + sort.Strings(result) + return result } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/list_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/list_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/list_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,70 +4,177 @@ package block_test import ( + "errors" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/block" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/testing" ) +var _ = gc.Suite(&listCommandSuite{}) + type listCommandSuite struct { - ProtectionCommandSuite - mockClient *block.MockBlockClient + testing.FakeJujuXDGDataHomeSuite } -func (s *listCommandSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.mockClient = &block.MockBlockClient{} - s.PatchValue(block.ListClient, func(_ *modelcmd.ModelCommandBase) (block.BlockListAPI, error) { - return s.mockClient, nil - }) -} +func (s *listCommandSuite) TestInit(c *gc.C) { + cmd := block.NewListCommand() + err := testing.InitCommand(cmd, nil) + c.Check(err, jc.ErrorIsNil) -var _ = gc.Suite(&listCommandSuite{}) + err = testing.InitCommand(cmd, []string{"anything"}) + c.Check(err.Error(), gc.Equals, `unrecognized args: ["anything"]`) +} func (s *listCommandSuite) TestListEmpty(c *gc.C) { - ctx, err := testing.RunCommand(c, block.NewListCommand()) + ctx, err := testing.RunCommand(c, block.NewListCommandForTest(&mockListClient{}, nil)) c.Assert(err, jc.ErrorIsNil) - c.Assert(testing.Stdout(ctx), gc.Equals, ` -destroy-model =off -remove-object =off -all-changes =off -`) + c.Assert(testing.Stderr(ctx), gc.Equals, "No commands are currently disabled.\n") +} + +func (s *listCommandSuite) TestListError(c *gc.C) { + _, err := testing.RunCommand(c, block.NewListCommandForTest( + &mockListClient{err: errors.New("boom")}, nil)) + c.Assert(err, gc.ErrorMatches, "boom") +} + +func (s *listCommandSuite) mock() *mockListClient { + return &mockListClient{ + blocks: []params.Block{ + { + Type: "BlockDestroy", + Message: "Sysadmins in control.", + }, { + Type: "BlockChange", + Message: "just temporary", + }, + }, + modelBlocks: []params.ModelBlockInfo{ + { + Name: "controller", + UUID: "fake-uuid-1", + OwnerTag: "user-admin", + Blocks: []string{"BlockDestroy", "BlockRemove"}, + }, { + Name: "model-a", + UUID: "fake-uuid-2", + OwnerTag: "user-bob@external", + Blocks: []string{"BlockChange"}, + }, { + Name: "model-b", + UUID: "fake-uuid-3", + OwnerTag: "user-charlie@external", + Blocks: []string{"BlockDestroy", "BlockChange"}, + }, + }, + } } func (s *listCommandSuite) TestList(c *gc.C) { - s.mockClient.SwitchBlockOn(string(multiwatcher.BlockRemove), "Test this one") - ctx, err := testing.RunCommand(c, block.NewListCommand()) + cmd := block.NewListCommandForTest(s.mock(), nil) + ctx, err := testing.RunCommand(c, cmd) c.Assert(err, jc.ErrorIsNil) - c.Assert(testing.Stdout(ctx), gc.Equals, ` -destroy-model =off -remove-object =on, Test this one -all-changes =off -`) -} - -func (s *listCommandSuite) TestListYaml(c *gc.C) { - s.mockClient.SwitchBlockOn(string(multiwatcher.BlockRemove), "Test this one") - ctx, err := testing.RunCommand(c, block.NewListCommand(), "--format", "yaml") - c.Assert(err, jc.ErrorIsNil) - c.Assert(testing.Stdout(ctx), gc.Equals, ` -- block: destroy-model - enabled: false -- block: remove-object - enabled: true - message: Test this one -- block: all-changes - enabled: false -`[1:]) -} - -func (s *listCommandSuite) TestListJson(c *gc.C) { - s.mockClient.SwitchBlockOn(string(multiwatcher.BlockRemove), "Test this one") - ctx, err := testing.RunCommand(c, block.NewListCommand(), "--format", "json") + c.Assert(testing.Stderr(ctx), gc.Equals, "") + c.Assert(testing.Stdout(ctx), gc.Equals, ""+ + "Disabled commands Message\n"+ + "destroy-model Sysadmins in control.\n"+ + "all just temporary\n"+ + "\n", + ) +} + +func (s *listCommandSuite) TestListYAML(c *gc.C) { + cmd := block.NewListCommandForTest(s.mock(), nil) + ctx, err := testing.RunCommand(c, cmd, "--format", "yaml") + c.Assert(err, jc.ErrorIsNil) + c.Assert(testing.Stdout(ctx), gc.Equals, ""+ + "- command-set: destroy-model\n"+ + " message: Sysadmins in control.\n"+ + "- command-set: all\n"+ + " message: just temporary\n", + ) +} + +func (s *listCommandSuite) TestListJSONEmpty(c *gc.C) { + ctx, err := testing.RunCommand(c, block.NewListCommandForTest(&mockListClient{}, nil), "--format", "json") + c.Assert(err, jc.ErrorIsNil) + c.Assert(testing.Stdout(ctx), gc.Equals, "[]\n") +} + +func (s *listCommandSuite) TestListJSON(c *gc.C) { + cmd := block.NewListCommandForTest(s.mock(), nil) + ctx, err := testing.RunCommand(c, cmd, "--format", "json") c.Assert(err, jc.ErrorIsNil) - c.Assert(testing.Stdout(ctx), gc.Equals, `[{"block":"destroy-model","enabled":false},{"block":"remove-object","enabled":true,"message":"Test this one"},{"block":"all-changes","enabled":false}] -`) + c.Assert(testing.Stdout(ctx), gc.Equals, ""+ + `[{"command-set":"destroy-model","message":"Sysadmins in control."},`+ + `{"command-set":"all","message":"just temporary"}]`+"\n") +} + +func (s *listCommandSuite) TestListAll(c *gc.C) { + cmd := block.NewListCommandForTest(s.mock(), nil) + ctx, err := testing.RunCommand(c, cmd, "--all") + c.Assert(err, jc.ErrorIsNil) + c.Assert(testing.Stderr(ctx), gc.Equals, "") + c.Assert(testing.Stdout(ctx), gc.Equals, ""+ + "Name Model UUID Owner Disabled commands\n"+ + "controller fake-uuid-1 admin destroy-model, remove-object\n"+ + "model-a fake-uuid-2 bob@external all\n"+ + "model-b fake-uuid-3 charlie@external all, destroy-model\n"+ + "\n") +} + +func (s *listCommandSuite) TestListAllYAML(c *gc.C) { + cmd := block.NewListCommandForTest(s.mock(), nil) + ctx, err := testing.RunCommand(c, cmd, "--format", "yaml", "--all") + c.Assert(err, jc.ErrorIsNil) + c.Assert(testing.Stdout(ctx), gc.Equals, ""+ + "- name: controller\n"+ + " model-uuid: fake-uuid-1\n"+ + " owner: admin\n"+ + " disabled-commands:\n"+ + " - destroy-model\n"+ + " - remove-object\n"+ + "- name: model-a\n"+ + " model-uuid: fake-uuid-2\n"+ + " owner: bob@external\n"+ + " disabled-commands:\n"+ + " - all\n"+ + "- name: model-b\n"+ + " model-uuid: fake-uuid-3\n"+ + " owner: charlie@external\n"+ + " disabled-commands:\n"+ + " - all\n"+ + " - destroy-model\n") +} + +func (s *listCommandSuite) TestListAllJSON(c *gc.C) { + cmd := block.NewListCommandForTest(s.mock(), nil) + ctx, err := testing.RunCommand(c, cmd, "--format", "json", "--all") + c.Assert(err, jc.ErrorIsNil) + c.Assert(testing.Stdout(ctx), gc.Equals, "["+ + `{"name":"controller","model-uuid":"fake-uuid-1","owner":"admin","disabled-commands":["destroy-model","remove-object"]},`+ + `{"name":"model-a","model-uuid":"fake-uuid-2","owner":"bob@external","disabled-commands":["all"]},`+ + `{"name":"model-b","model-uuid":"fake-uuid-3","owner":"charlie@external","disabled-commands":["all","destroy-model"]}`+ + "]\n") +} + +type mockListClient struct { + blocks []params.Block + modelBlocks []params.ModelBlockInfo + err error +} + +func (c *mockListClient) Close() error { + return nil +} + +func (c *mockListClient) List() ([]params.Block, error) { + return c.blocks, c.err +} + +func (c *mockListClient) ListBlockedModels() ([]params.ModelBlockInfo, error) { + return c.modelBlocks, c.err } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,24 +4,11 @@ package block_test import ( - stdtesting "testing" + "testing" gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" ) -func TestAll(t *stdtesting.T) { - testing.MgoTestPackage(t) -} - -type ProtectionCommandSuite struct { - testing.FakeJujuXDGDataHomeSuite -} - -func (s *ProtectionCommandSuite) assertErrorMatches(c *gc.C, err error, expected string) { - c.Assert( - err, - gc.ErrorMatches, - expected) +func TestAll(t *testing.T) { + gc.TestingT(t) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/protection.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/protection.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/protection.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/protection.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,52 +4,56 @@ package block import ( - "fmt" - - "github.com/juju/cmd" + "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/juju/api" apiblock "github.com/juju/juju/api/block" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/juju/state/multiwatcher" ) var logger = loggo.GetLogger("juju.cmd.juju.block") -// blockArgs has all valid operations that can be -// supplied to the command. -// These operations do not necessarily correspond to juju commands -// but are rather juju command groupings. -var blockArgs = []string{"destroy-model", "remove-object", "all-changes"} - -// TypeFromOperation translates given operation string -// such as destroy-model, remove-object, etc to -// block type string as defined in multiwatcher. -var TypeFromOperation = func(operation string) string { - for key, value := range blockTypes { - if value == operation { - return key - } +const ( + cmdAll = "all" + cmdDestroyModel = "destroy-model" + cmdRemoveObject = "remove-object" + + apiAll = "BlockChange" + apiDestroyModel = "BlockDestroy" + apiRemoveObject = "BlockRemove" +) + +var ( + toAPIValue = map[string]string{ + cmdAll: apiAll, + cmdDestroyModel: apiDestroyModel, + cmdRemoveObject: apiRemoveObject, } - panic(fmt.Sprintf("unknown operation %v", operation)) -} -var blockTypes = map[string]string{ - string(multiwatcher.BlockDestroy): "destroy-model", - string(multiwatcher.BlockRemove): "remove-object", - string(multiwatcher.BlockChange): "all-changes", + toCmdValue = map[string]string{ + apiAll: cmdAll, + apiDestroyModel: cmdDestroyModel, + apiRemoveObject: cmdRemoveObject, + } + + validTargets = cmdAll + ", " + cmdDestroyModel + ", " + cmdRemoveObject +) + +func operationFromType(blockType string) string { + value, ok := toCmdValue[blockType] + if !ok { + value = "" + } + return value } -// OperationFromType translates given block type as -// defined in multiwatcher into the operation -// such as destroy-model. -var OperationFromType = func(blockType string) string { - return blockTypes[blockType] +type newAPIRoot interface { + NewAPIRoot() (api.Connection, error) } // getBlockAPI returns a block api for block manipulation. -func getBlockAPI(c *modelcmd.ModelCommandBase) (*apiblock.Client, error) { +func getBlockAPI(c newAPIRoot) (*apiblock.Client, error) { root, err := c.NewAPIRoot() if err != nil { return nil, err @@ -87,31 +91,32 @@ return nil } if params.IsCodeOperationBlocked(err) { - logger.Errorf("\n%v%v", err, blockedMessages[block]) - return cmd.ErrSilent + msg := blockedMessages[block] + logger.Errorf("%v\n%v", err, msg) + return errors.New(msg) } return err } var removeMsg = ` -All operations that remove (or delete or terminate) machines, applications, units or -relations have been blocked for the current model. -To unblock removal, run +All operations that remove machines, applications, units or +relations have been disabled for the current model. +To enable removal, run - juju unblock remove-object + juju enable-command remove-object ` var destroyMsg = ` -destroy-model operation has been blocked for the current model. -To remove the block run +destroy-model operation has been disabled for the current model. +To enable the command run - juju unblock destroy-model + juju enable-command destroy-model ` var changeMsg = ` -All operations that change model have been blocked for the current model. -To unblock changes, run +All operations that change model have been disabled for the current model. +To enable changes, run - juju unblock all-changes + juju enable-command all ` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/unblock.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/unblock.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/unblock.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/unblock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,174 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package block - -import ( - "fmt" - "strings" - - "github.com/juju/cmd" - "github.com/juju/errors" - "launchpad.net/gnuflag" - - "github.com/juju/juju/cmd/modelcmd" -) - -// NewUnblockCommand returns a new command that removes the block from -// the specified operation. -func NewUnblockCommand() cmd.Command { - c := &unblockCommand{} - c.getClient = func() (UnblockClientAPI, error) { - return getBlockAPI(&c.ModelCommandBase) - } - return modelcmd.Wrap(c) -} - -// unblockCommand removes the block from desired operation. -type unblockCommand struct { - modelcmd.ModelCommandBase - operation string - getClient func() (UnblockClientAPI, error) -} - -var ( - unblockDoc = ` -Juju allows to safeguard deployed models from unintentional damage by preventing -execution of operations that could alter model. - -This is done by blocking certain commands from successful execution. Blocked commands -must be manually unblocked to proceed. - -Some commands offer a --force option that can be used to bypass a block. - -Commands that can be unblocked are grouped based on logical operations as follows: - -destroy-model includes command: - destroy-model - -remove-object includes termination commands: - destroy-model - remove-machine - remove-relation - remove-application - remove-unit - -all-changes includes all alteration commands - add-machine - add-relation - add-unit - authorised-keys add - authorised-keys delete - authorised-keys import - deploy - destroy-model - enable-ha - expose - remove-machine - remove-relation - remove-application - remove-unit - resolved - retry-provisioning - run - set - set-constraints - set-model-config - sync-tools - unexpose - unset - unset-model-config - upgrade-charm - upgrade-juju - add-user - change-user-password - disable-user - enable-user - -Examples: - # To allow the model to be destroyed: - juju unblock destroy-model - - # To allow the machines, applications, units and relations to be removed: - juju unblock remove-object - - # To allow changes to the model: - juju unblock all-changes - -See Also: - juju block -` - - // blockArgsFmt has formatted representation of block command valid arguments. - blockArgsFmt = fmt.Sprintf(strings.Join(blockArgs, " | ")) -) - -// assignValidOperation verifies that supplied operation is supported. -func (p *unblockCommand) assignValidOperation(cmd string, args []string) error { - if len(args) < 1 { - return errors.Trace(errors.Errorf("must specify one of [%v] to %v", blockArgsFmt, cmd)) - } - var err error - p.operation, err = p.obtainValidArgument(args[0]) - return err -} - -// obtainValidArgument returns polished argument: -// it checks that the argument is a supported operation and -// forces it into lower case for consistency. -func (p *unblockCommand) obtainValidArgument(arg string) (string, error) { - for _, valid := range blockArgs { - if strings.EqualFold(valid, arg) { - return strings.ToLower(arg), nil - } - } - return "", errors.Trace(errors.Errorf("%q is not a valid argument: use one of [%v]", arg, blockArgsFmt)) -} - -// Info provides information about command. -// Satisfying Command interface. -func (c *unblockCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "unblock", - Args: blockArgsFmt, - Purpose: "Unblock an operation that would alter a running model.", - Doc: unblockDoc, - } -} - -// Init initializes the command. -// Satisfying Command interface. -func (c *unblockCommand) Init(args []string) error { - if len(args) > 1 { - return errors.Trace(errors.New("can only specify block type")) - } - - return c.assignValidOperation("unblock", args) -} - -// SetFlags implements Command.SetFlags. -func (c *unblockCommand) SetFlags(f *gnuflag.FlagSet) { - c.ModelCommandBase.SetFlags(f) -} - -// Run unblocks previously blocked commands. -// Satisfying Command interface. -func (c *unblockCommand) Run(_ *cmd.Context) error { - client, err := c.getClient() - if err != nil { - return errors.Trace(err) - } - defer client.Close() - - return client.SwitchBlockOff(TypeFromOperation(c.operation)) -} - -// UnblockClientAPI defines the client API methods that unblock command uses. -type UnblockClientAPI interface { - Close() error - SwitchBlockOff(blockType string) error -} - -var getUnblockClientAPI = func(p *unblockCommand) (UnblockClientAPI, error) { - return getBlockAPI(&p.ModelCommandBase) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/unblock_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/unblock_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/block/unblock_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/block/unblock_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,67 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package block_test - -import ( - "strings" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/cmd/juju/block" - "github.com/juju/juju/testing" -) - -type UnblockCommandSuite struct { - ProtectionCommandSuite - mockClient *block.MockBlockClient -} - -func (s *UnblockCommandSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.mockClient = &block.MockBlockClient{} -} - -var _ = gc.Suite(&UnblockCommandSuite{}) - -func (s *UnblockCommandSuite) runUnblockCommand(c *gc.C, args ...string) error { - _, err := testing.RunCommand(c, block.NewUnblockCommandWithClient(s.mockClient), args...) - return err -} - -func (s *UnblockCommandSuite) assertRunUnblock(c *gc.C, operation string) { - err := s.runUnblockCommand(c, operation) - c.Assert(err, jc.ErrorIsNil) - - expectedOp := block.TypeFromOperation(strings.ToLower(operation)) - c.Assert(s.mockClient.BlockType, gc.DeepEquals, expectedOp) -} - -func (s *UnblockCommandSuite) TestUnblockCmdNoOperation(c *gc.C) { - s.assertErrorMatches(c, s.runUnblockCommand(c), `.*must specify one of.*`) -} - -func (s *UnblockCommandSuite) TestUnblockCmdMoreThanOneOperation(c *gc.C) { - s.assertErrorMatches(c, s.runUnblockCommand(c, "destroy-model", "change"), `.*can only specify block type.*`) -} - -func (s *UnblockCommandSuite) TestUnblockCmdOperationWithSeparator(c *gc.C) { - s.assertErrorMatches(c, s.runUnblockCommand(c, "destroy-model|"), `.*valid argument.*`) -} - -func (s *UnblockCommandSuite) TestUnblockCmdUnknownJujuOperation(c *gc.C) { - s.assertErrorMatches(c, s.runUnblockCommand(c, "add-machine"), `.*valid argument.*`) -} - -func (s *UnblockCommandSuite) TestUnblockCmdUnknownOperation(c *gc.C) { - s.assertErrorMatches(c, s.runUnblockCommand(c, "blah"), `.*valid argument.*`) -} - -func (s *UnblockCommandSuite) TestUnblockCmdValidDestroyEnvOperationUpperCase(c *gc.C) { - s.assertRunUnblock(c, "DESTROY-MODEL") -} - -func (s *UnblockCommandSuite) TestUnblockCmdValidDestroyEnvOperation(c *gc.C) { - s.assertRunUnblock(c, "destroy-model") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cachedimages/list.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cachedimages/list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cachedimages/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cachedimages/list.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,10 +8,11 @@ "time" "github.com/juju/cmd" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) const listCommandDoc = ` @@ -62,10 +63,7 @@ f.StringVar(&c.Kind, "kind", "", "The image kind to list eg lxd") f.StringVar(&c.Series, "series", "", "The series of the image to list eg xenial") f.StringVar(&c.Arch, "arch", "", "The architecture of the image to list eg amd64") - c.out.AddFlags(f, "yaml", map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, - }) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) } // Init implements Command.Init. @@ -121,7 +119,7 @@ } imageInfo := c.imageMetadataToImageInfo(results) if len(imageInfo) == 0 { - fmt.Fprintf(ctx.Stdout, "no matching images found\n") + ctx.Infof("No images to display.") return nil } fmt.Fprintf(ctx.Stdout, "Cached images:\n") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cachedimages/list_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cachedimages/list_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cachedimages/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cachedimages/list_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -59,7 +59,7 @@ func (*listImagesCommandSuite) TestListImagesNone(c *gc.C) { context, err := runListCommand(c, "--kind", "kvm") c.Assert(err, jc.ErrorIsNil) - c.Assert(testing.Stdout(context), gc.Equals, "no matching images found\n") + c.Assert(testing.Stderr(context), gc.Equals, "No images to display.\n") } func (*listImagesCommandSuite) TestListImagesFormatJson(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cachedimages/remove.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cachedimages/remove.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cachedimages/remove.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cachedimages/remove.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/cmd/modelcmd" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/charmcmd/charm.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/charmcmd/charm.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/charmcmd/charm.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/charmcmd/charm.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,13 @@ "github.com/juju/cmd" ) +var registeredSubCommands []cmd.Command + +// RegisterSubCommand registers the given command as a "juju charm" subcommand. +func RegisterSubCommand(c cmd.Command) { + registeredSubCommands = append(registeredSubCommands, c) +} + var charmDoc = ` "juju charm" is the the juju CLI equivalent of the "charm" command used by charm authors, though only applicable functionality is mirrored. @@ -31,14 +38,12 @@ }, ), } - spec := newCharmstoreSpec() // Sub-commands may be registered directly here, like so: - //charmCmd.Register(newXXXCommand(spec)) + //charmCmd.Register(newXXXCommand()) // ...or externally via RegisterSubCommand(). - for _, newSubCommand := range registeredSubCommands { - command := newSubCommand(spec) + for _, command := range registeredSubCommands { charmCmd.Register(command) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/charmcmd/store.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/charmcmd/store.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/charmcmd/store.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/charmcmd/store.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmcmd - -import ( - "io" - - "github.com/juju/cmd" - "github.com/juju/errors" - - "github.com/juju/juju/charmstore" - "github.com/juju/juju/cmd/modelcmd" -) - -// TODO(ericsnow) Factor out code from cmd/juju/commands/common.go and │ -// cmd/envcmd/base.go into cmd/charmstore.go and cmd/apicontext.go. Then │ -// use those here instead of copy-and-pasting here. - -/////////////////// -// The charmstoreSpec code is based loosely on code in cmd/juju/commands/deploy.go. - -// CharmstoreSpec provides the functionality needed to open a charm -// store client. -type CharmstoreSpec interface { - // Connect connects to the specified charm store. - Connect(ctx *cmd.Context) (charmstore.Client, io.Closer, error) -} - -type charmstoreSpec struct{} - -// newCharmstoreSpec creates a new charm store spec with default -// settings. -func newCharmstoreSpec() CharmstoreSpec { - return charmstoreSpec{} -} - -// Connect implements CharmstoreSpec. -func (cs charmstoreSpec) Connect(ctx *cmd.Context) (charmstore.Client, io.Closer, error) { - // Note that creating the API context in Connect is technically - // wrong, as it means we'll be creating the bakery context - // (and reading/writing the cookies) each time it's called. - // TODO(ericsnow) Move apiContext to a field on charmstoreSpec. - apiContext, err := modelcmd.NewAPIContext(ctx) - if err != nil { - return charmstore.Client{}, nil, errors.Trace(err) - } - // We use the default for URL. - client, err := charmstore.NewCustomClient(apiContext.BakeryClient, nil) - if err != nil { - apiContext.Close() - return charmstore.Client{}, nil, errors.Trace(err) - } - return client, apiContext, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/charmcmd/sub.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/charmcmd/sub.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/charmcmd/sub.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/charmcmd/sub.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmcmd - -import ( - "io" - - "github.com/juju/cmd" - "github.com/juju/errors" - - "github.com/juju/juju/charmstore" -) - -var registeredSubCommands []func(CharmstoreSpec) cmd.Command - -// RegisterSubCommand adds the provided func to the set of those that -// will be called when the juju command runs. Each returned command will -// be registered with the identified "juju" sub-supercommand. -func RegisterSubCommand(newCommand func(CharmstoreSpec) cmd.Command) { - registeredSubCommands = append(registeredSubCommands, newCommand) -} - -// NewCommandBase returns a new CommandBase. -func NewCommandBase(spec CharmstoreSpec) *CommandBase { - return &CommandBase{ - spec: newCharmstoreSpec(), - } -} - -// CommandBase is the type that should be embedded in "juju charm" -// sub-commands. -type CommandBase struct { - // TODO(ericsnow) This should be a modelcmd.ModelCommandBase. - cmd.CommandBase - spec CharmstoreSpec -} - -// Connect implements CommandBase. -func (c *CommandBase) Connect(ctx *cmd.Context) (charmstore.Client, io.Closer, error) { - if c.spec == nil { - return charmstore.Client{}, nil, errors.Errorf("missing charm store spec") - } - client, closer, err := c.spec.Connect(ctx) - if err != nil { - return charmstore.Client{}, nil, errors.Trace(err) - } - return client, closer, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/addcredential.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/addcredential.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/addcredential.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/addcredential.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,8 +12,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "golang.org/x/crypto/ssh/terminal" - "launchpad.net/gnuflag" jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/cmd/juju/common" @@ -99,6 +99,7 @@ } func (c *addCredentialCommand) SetFlags(f *gnuflag.FlagSet) { + c.CommandBase.SetFlags(f) f.BoolVar(&c.Replace, "replace", false, "Overwrite existing credential information") f.StringVar(&c.CredentialsFile, "f", "", "The YAML file containing credentials to add") } @@ -214,8 +215,39 @@ if err != nil { return errors.Trace(err) } - newCredential := jujucloud.NewCredential(authType, attrs) - existingCredentials.AuthCredentials[credentialName] = newCredential + + cloudEndpoint := c.cloud.Endpoint + cloudIdentityEndpoint := c.cloud.IdentityEndpoint + if len(c.cloud.Regions) > 0 { + // NOTE(axw) we use the first region in the cloud, + // because this is all we need for Azure right now. + // Each region has the same endpoints, so it does + // not matter which one we use. If we expand + // credential generation to other providers, and + // they do have region-specific endpoints, then we + // should prompt the user for the region to use. + // That would be better left to the provider, though. + region := c.cloud.Regions[0] + cloudEndpoint = region.Endpoint + cloudIdentityEndpoint = region.IdentityEndpoint + } + + credentialsProvider, err := environs.Provider(c.cloud.Type) + if err != nil { + return errors.Trace(err) + } + newCredential, err := credentialsProvider.FinalizeCredential( + ctxt, environs.FinalizeCredentialParams{ + Credential: jujucloud.NewCredential(authType, attrs), + CloudEndpoint: cloudEndpoint, + CloudIdentityEndpoint: cloudIdentityEndpoint, + }, + ) + if err != nil { + return errors.Annotate(err, "finalizing credential") + } + + existingCredentials.AuthCredentials[credentialName] = *newCredential err = c.store.UpdateCredential(c.CloudName, *existingCredentials) if err != nil { return errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/addcredential_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/addcredential_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/addcredential_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/addcredential_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -44,8 +44,10 @@ return nil, errors.NotFoundf("cloud %v", cloud) } return &jujucloud.Cloud{ - Type: "mock-addcredential-provider", - AuthTypes: s.authTypes, + Type: "mock-addcredential-provider", + AuthTypes: s.authTypes, + Endpoint: "cloud-endpoint", + IdentityEndpoint: "cloud-identity-endpoint", }, nil } } @@ -207,6 +209,36 @@ s.assertAddUserpassCredential(c, "fred\nuserpass\nuser\npassword\n", nil) } +func (s *addCredentialSuite) TestAddCredentialInteractive(c *gc.C) { + s.authTypes = []jujucloud.AuthType{"interactive"} + s.schema = map[jujucloud.AuthType]jujucloud.CredentialSchema{ + "interactive": {{"username", jujucloud.CredentialAttr{}}}, + } + + stdin := strings.NewReader("bobscreds\nbob\n") + ctx, err := s.run(c, stdin, "somecloud") + c.Assert(err, jc.ErrorIsNil) + + c.Assert(testing.Stderr(ctx), gc.Equals, ` +Enter credential name: Using auth-type "interactive". +Enter username: generating userpass credential +`[1:]) + + // FinalizeCredential should have generated a userpass credential + // based on the input from the interactive credential. + c.Assert(s.store.Credentials, jc.DeepEquals, map[string]jujucloud.CloudCredential{ + "somecloud": { + AuthCredentials: map[string]jujucloud.Credential{ + "bobscreds": jujucloud.NewCredential(jujucloud.UserPassAuthType, map[string]string{ + "username": "bob", + "password": "cloud-endpoint", + "application-password": "cloud-identity-endpoint", + }), + }, + }, + }) +} + func (s *addCredentialSuite) TestAddCredentialReplace(c *gc.C) { s.store.Credentials = map[string]jujucloud.CloudCredential{ "somecloud": { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/add.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/add.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/add.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/add.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,9 +6,10 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/cloud" + "github.com/juju/juju/cmd/juju/common" ) var usageAddCloudSummary = ` @@ -64,6 +65,7 @@ } func (c *addCloudCommand) SetFlags(f *gnuflag.FlagSet) { + c.CommandBase.SetFlags(f) f.BoolVar(&c.Replace, "replace", false, "Overwrite any existing cloud information") } @@ -88,12 +90,23 @@ if !ok { return errors.Errorf("cloud %q not found in file %q", c.Cloud, c.CloudFile) } + publicClouds, _, err := cloud.PublicCloudMetadata() + if err != nil { + return err + } + if _, ok = publicClouds[c.Cloud]; ok && !c.Replace { + return errors.Errorf("%q is the name of a public cloud; use --replace to use your cloud definition instead", c.Cloud) + } + builtinClouds := common.BuiltInClouds() + if _, ok = builtinClouds[c.Cloud]; ok && !c.Replace { + return errors.Errorf("%q is the name of a built-in cloud; use --replace to use your cloud definition instead", c.Cloud) + } personalClouds, err := cloud.PersonalCloudMetadata() if err != nil { return err } if _, ok = personalClouds[c.Cloud]; ok && !c.Replace { - return errors.Errorf("cloud called %q already exists; use --replace to replace this existing cloud", c.Cloud) + return errors.Errorf("%q already exists; use --replace to replace this existing cloud", c.Cloud) } if personalClouds == nil { personalClouds = make(map[string]cloud.Cloud) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/add_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/add_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/add_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/add_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,16 +16,11 @@ ) type addSuite struct { - testing.BaseSuite + testing.FakeJujuXDGDataHomeSuite } var _ = gc.Suite(&addSuite{}) -func (s *addSuite) SetUpTest(c *gc.C) { - origHome := osenv.SetJujuXDGDataHome(c.MkDir()) - s.AddCleanup(func(*gc.C) { osenv.SetJujuXDGDataHome(origHome) }) -} - func (s *addSuite) TestAddBadArgs(c *gc.C) { addCmd := cloud.NewAddCloudCommand() _, err := testing.RunCommand(c, addCmd) @@ -52,6 +47,14 @@ sourceFile := filepath.Join(sourceDir, "someclouds.yaml") source := ` clouds: + aws: + type: ec2 + auth-types: [ access-key ] + regions: + us-east-1: + endpoint: https://us-east-1.aws.amazon.com/v1.2/ + localhost: + type: lxd homestack: type: openstack auth-types: [userpass, access-key] @@ -87,7 +90,7 @@ func (s *addSuite) TestAddExisting(c *gc.C) { sourceFile := s.createTestCloudData(c) _, err := testing.RunCommand(c, cloud.NewAddCloudCommand(), "homestack", sourceFile) - c.Assert(err, gc.ErrorMatches, `cloud called \"homestack\" already exists; use --replace to replace this existing cloud`) + c.Assert(err, gc.ErrorMatches, `\"homestack\" already exists; use --replace to replace this existing cloud`) } func (s *addSuite) TestAddExistingReplace(c *gc.C) { @@ -109,6 +112,41 @@ `[1:]) } +func (s *addSuite) TestAddExistingPublic(c *gc.C) { + sourceFile := s.createTestCloudData(c) + _, err := testing.RunCommand(c, cloud.NewAddCloudCommand(), "aws", sourceFile) + c.Assert(err, gc.ErrorMatches, `\"aws\" is the name of a public cloud; use --replace to use your cloud definition instead`) +} + +func (s *addSuite) TestAddExistingBuiltin(c *gc.C) { + sourceFile := s.createTestCloudData(c) + _, err := testing.RunCommand(c, cloud.NewAddCloudCommand(), "localhost", sourceFile) + c.Assert(err, gc.ErrorMatches, `\"localhost\" is the name of a built-in cloud; use --replace to use your cloud definition instead`) +} + +func (s *addSuite) TestAddExistingPublicReplace(c *gc.C) { + sourceFile := s.createTestCloudData(c) + _, err := testing.RunCommand(c, cloud.NewAddCloudCommand(), "aws", sourceFile, "--replace") + c.Assert(err, jc.ErrorIsNil) + data, err := ioutil.ReadFile(osenv.JujuXDGDataHomePath("clouds.yaml")) + c.Assert(string(data), gc.Equals, ` +clouds: + aws: + type: ec2 + auth-types: [access-key] + regions: + us-east-1: + endpoint: https://us-east-1.aws.amazon.com/v1.2/ + homestack: + type: openstack + auth-types: [userpass, access-key] + endpoint: http://homestack + regions: + london: + endpoint: http://london/1.0 +`[1:]) +} + func (s *addSuite) TestAddNew(c *gc.C) { sourceFile := s.createTestCloudData(c) _, err := testing.RunCommand(c, cloud.NewAddCloudCommand(), "garage-maas", sourceFile) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/defaultcredential_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/defaultcredential_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/defaultcredential_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/defaultcredential_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,22 +12,16 @@ jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/cmd/juju/cloud" - "github.com/juju/juju/juju/osenv" "github.com/juju/juju/jujuclient/jujuclienttesting" "github.com/juju/juju/testing" ) type defaultCredentialSuite struct { - testing.BaseSuite + testing.FakeJujuXDGDataHomeSuite } var _ = gc.Suite(&defaultCredentialSuite{}) -func (s *defaultCredentialSuite) SetUpTest(c *gc.C) { - origHome := osenv.SetJujuXDGDataHome(c.MkDir()) - s.AddCleanup(func(*gc.C) { osenv.SetJujuXDGDataHome(origHome) }) -} - func (s *defaultCredentialSuite) TestBadArgs(c *gc.C) { cmd := cloud.NewSetDefaultCredentialCommand() _, err := testing.RunCommand(c, cmd) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/defaultregion_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/defaultregion_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/defaultregion_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/defaultregion_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,22 +13,16 @@ "github.com/juju/cmd" jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/cmd/juju/cloud" - "github.com/juju/juju/juju/osenv" "github.com/juju/juju/jujuclient/jujuclienttesting" "github.com/juju/juju/testing" ) type defaultRegionSuite struct { - testing.BaseSuite + testing.FakeJujuXDGDataHomeSuite } var _ = gc.Suite(&defaultRegionSuite{}) -func (s *defaultRegionSuite) SetUpTest(c *gc.C) { - origHome := osenv.SetJujuXDGDataHome(c.MkDir()) - s.AddCleanup(func(*gc.C) { osenv.SetJujuXDGDataHome(origHome) }) -} - func (s *defaultRegionSuite) TestBadArgs(c *gc.C) { cmd := cloud.NewSetDefaultRegionCommand() _, err := testing.RunCommand(c, cmd) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/detectcredentials.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/detectcredentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/detectcredentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/detectcredentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -69,7 +69,7 @@ Example: juju autoload-credentials -See Also: +See also: list-credentials remove-credential set-default-credential diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/detectcredentials_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/detectcredentials_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/detectcredentials_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/detectcredentials_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -76,6 +76,22 @@ return *p.credSchemas } +func (p *mockProvider) FinalizeCredential( + ctx environs.FinalizeCredentialContext, + args environs.FinalizeCredentialParams, +) (*jujucloud.Credential, error) { + if args.Credential.AuthType() == "interactive" { + fmt.Fprintln(ctx.GetStderr(), "generating userpass credential") + out := jujucloud.NewCredential(jujucloud.UserPassAuthType, map[string]string{ + "username": args.Credential.Attributes()["username"], + "password": args.CloudEndpoint, + "application-password": args.CloudIdentityEndpoint, + }) + return &out, nil + } + return &args.Credential, nil +} + func (s *detectCredentialsSuite) SetUpSuite(c *gc.C) { environs.RegisterProvider("mock-provider", &mockProvider{detectedCreds: &s.aCredential}) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,10 @@ package cloud import ( + "github.com/juju/cmd" + jujucloud "github.com/juju/juju/cloud" + "github.com/juju/juju/cmd/modelcmd" sstesting "github.com/juju/juju/environs/simplestreams/testing" "github.com/juju/juju/jujuclient" ) @@ -70,3 +73,11 @@ store: testStore, } } + +func NewUpdateCredentialCommandForTest(testStore jujuclient.ClientStore, api credentialAPI) cmd.Command { + c := &updateCredentialCommand{ + api: api, + } + c.SetClientStore(testStore) + return modelcmd.WrapController(c) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/listcredentials.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/listcredentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/listcredentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/listcredentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,18 +4,18 @@ package cloud import ( - "bytes" "fmt" + "io" "sort" "strings" - "text/tabwriter" "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/cmd/juju/common" + "github.com/juju/juju/cmd/output" "github.com/juju/juju/environs" "github.com/juju/juju/jujuclient" ) @@ -61,8 +61,36 @@ cloudByNameFunc func(string) (*jujucloud.Cloud, error) } +// CloudCredential contains attributes used to define credentials for a cloud. +type CloudCredential struct { + // DefaultCredential is the named credential to use by default. + DefaultCredential string `json:"default-credential,omitempty" yaml:"default-credential,omitempty"` + + // DefaultRegion is the cloud region to use by default. + DefaultRegion string `json:"default-region,omitempty" yaml:"default-region,omitempty"` + + // Credentials is the collection of all credentials registered by the user for a cloud, keyed on a cloud name. + Credentials map[string]Credential `json:"cloud-credentials,omitempty" yaml:",omitempty,inline"` +} + +// Credential instances represent cloud credentials. +type Credential struct { + // AuthType determines authentication type for the credential. + AuthType string `json:"auth-type" yaml:"auth-type"` + + // Attributes define details for individual credential. + // This collection is provider-specific: each provider is interested in different credential details. + Attributes map[string]string `json:"details,omitempty" yaml:",omitempty,inline"` + + // Revoked is true if the credential has been revoked. + Revoked bool `json:"revoked,omitempty" yaml:"revoked,omitempty"` + + // Label is optionally set to describe the credentials to a user. + Label string `json:"label,omitempty" yaml:"label,omitempty"` +} + type credentialsMap struct { - Credentials map[string]jujucloud.CloudCredential `yaml:"credentials" json:"credentials"` + Credentials map[string]CloudCredential `yaml:"credentials" json:"credentials"` } // NewListCredentialsCommand returns a command to list cloud credentials. @@ -84,6 +112,7 @@ } func (c *listCredentialsCommand) SetFlags(f *gnuflag.FlagSet) { + c.CommandBase.SetFlags(f) f.BoolVar(&c.showSecrets, "show-secrets", false, "Show secrets") c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ "yaml": cmd.FormatYaml, @@ -108,17 +137,6 @@ return c.personalCloudsFunc() } -// displayCloudName returns the provided cloud name prefixed -// with "local:" if it is a local cloud. -func displayCloudName(cloudName string, personalCloudNames []string) string { - for _, name := range personalCloudNames { - if cloudName == name { - return localPrefix + cloudName - } - } - return cloudName -} - func (c *listCredentialsCommand) Run(ctxt *cmd.Context) error { var credentials map[string]jujucloud.CloudCredential credentials, err := c.store.AllCredentials() @@ -143,14 +161,38 @@ personalCloudNames = append(personalCloudNames, name) } - displayCredentials := make(map[string]jujucloud.CloudCredential) + displayCredentials := make(map[string]CloudCredential) + var missingClouds []string for cloudName, cred := range credentials { if !c.showSecrets { if err := c.removeSecrets(cloudName, &cred); err != nil { + if errors.IsNotValid(err) { + missingClouds = append(missingClouds, cloudName) + continue + } return errors.Annotatef(err, "removing secrets from credentials for cloud %v", cloudName) } } - displayCredentials[displayCloudName(cloudName, personalCloudNames)] = cred + displayCredential := CloudCredential{ + DefaultCredential: cred.DefaultCredential, + DefaultRegion: cred.DefaultRegion, + } + if len(cred.AuthCredentials) != 0 { + displayCredential.Credentials = make(map[string]Credential, len(cred.AuthCredentials)) + for credName, credDetails := range cred.AuthCredentials { + displayCredential.Credentials[credName] = Credential{ + string(credDetails.AuthType()), + credDetails.Attributes(), + credDetails.Revoked, + credDetails.Label, + } + } + } + displayCredentials[cloudName] = displayCredential + } + if c.out.Name() == "tabular" && len(missingClouds) > 0 { + fmt.Fprintf(ctxt.GetStdout(), "The following clouds have been removed and are omitted from the results to avoid leaking secrets.\n"+ + "Run with --show-secrets to display these clouds' credentials: %v\n\n", strings.Join(missingClouds, ", ")) } return c.out.Write(ctxt, credentialsMap{displayCredentials}) } @@ -175,11 +217,16 @@ return nil } -// formatCredentialsTabular returns a tabular summary of cloud information. -func formatCredentialsTabular(value interface{}) ([]byte, error) { +// formatCredentialsTabular writes a tabular summary of cloud information. +func formatCredentialsTabular(writer io.Writer, value interface{}) error { credentials, ok := value.(credentialsMap) if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", credentials, value) + return errors.Errorf("expected value of type %T, got %T", credentials, value) + } + + if len(credentials.Credentials) == 0 { + fmt.Fprintln(writer, "No credentials to display.") + return nil } // For tabular we'll sort alphabetically by cloud, and then by credential name. @@ -189,26 +236,14 @@ } sort.Strings(cloudNames) - var out bytes.Buffer - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) - p := func(values ...string) { - text := strings.Join(values, "\t") - fmt.Fprintln(tw, text) - } - p("CLOUD\tCREDENTIALS") + tw := output.TabWriter(writer) + w := output.Wrapper{tw} + w.Println("Cloud", "Credentials") for _, cloudName := range cloudNames { var haveDefault bool var credentialNames []string credentials := credentials.Credentials[cloudName] - for credentialName := range credentials.AuthCredentials { + for credentialName := range credentials.Credentials { if credentialName == credentials.DefaultCredential { credentialNames = append([]string{credentialName + "*"}, credentialNames...) haveDefault = true @@ -221,9 +256,9 @@ } else { sort.Strings(credentialNames) } - p(cloudName, strings.Join(credentialNames, ", ")) + w.Println(cloudName, strings.Join(credentialNames, ", ")) } tw.Flush() - return out.Bytes(), nil + return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/listcredentials_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/listcredentials_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/listcredentials_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/listcredentials_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,17 +9,17 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "github.com/juju/errors" jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/cmd/juju/cloud" "github.com/juju/juju/environs" - "github.com/juju/juju/jujuclient" "github.com/juju/juju/jujuclient/jujuclienttesting" "github.com/juju/juju/testing" ) type listCredentialsSuite struct { testing.BaseSuite - store jujuclient.CredentialGetter + store *jujuclienttesting.MemStore personalCloudsFunc func() (map[string]jujucloud.Cloud, error) cloudByNameFunc func(string) (*jujucloud.Cloud, error) } @@ -30,7 +30,10 @@ "mycloud": {}, }, nil }, - cloudByNameFunc: func(string) (*jujucloud.Cloud, error) { + cloudByNameFunc: func(name string) (*jujucloud.Cloud, error) { + if name == "missingcloud" { + return nil, errors.NotValidf(name) + } return &jujucloud.Cloud{Type: "test-provider"}, nil }, }) @@ -107,11 +110,27 @@ func (s *listCredentialsSuite) TestListCredentialsTabular(c *gc.C) { out := s.listCredentials(c) c.Assert(out, gc.Equals, ` -CLOUD CREDENTIALS -aws down*, bob -azure azhja -google default -local:mycloud me +Cloud Credentials +aws down*, bob +azure azhja +google default +mycloud me + +`[1:]) +} + +func (s *listCredentialsSuite) TestListCredentialsTabularMissingCloud(c *gc.C) { + s.store.Credentials["missingcloud"] = jujucloud.CloudCredential{} + out := s.listCredentials(c) + c.Assert(out, gc.Equals, ` +The following clouds have been removed and are omitted from the results to avoid leaking secrets. +Run with --show-secrets to display these clouds' credentials: missingcloud + +Cloud Credentials +aws down*, bob +azure azhja +google default +mycloud me `[1:]) } @@ -119,13 +138,24 @@ func (s *listCredentialsSuite) TestListCredentialsTabularFiltered(c *gc.C) { out := s.listCredentials(c, "aws") c.Assert(out, gc.Equals, ` -CLOUD CREDENTIALS +Cloud Credentials aws down*, bob `[1:]) } func (s *listCredentialsSuite) TestListCredentialsYAMLWithSecrets(c *gc.C) { + s.store.Credentials["missingcloud"] = jujucloud.CloudCredential{ + AuthCredentials: map[string]jujucloud.Credential{ + "default": jujucloud.NewCredential( + jujucloud.AccessKeyAuthType, + map[string]string{ + "access-key": "key", + "secret-key": "secret", + }, + ), + }, + } out := s.listCredentials(c, "--format", "yaml", "--show-secrets") c.Assert(out, gc.Equals, ` credentials: @@ -153,7 +183,12 @@ client-email: email client-id: id private-key: key - local:mycloud: + missingcloud: + default: + auth-type: access-key + access-key: key + secret-key: secret + mycloud: me: auth-type: access-key access-key: key @@ -162,6 +197,17 @@ } func (s *listCredentialsSuite) TestListCredentialsYAMLNoSecrets(c *gc.C) { + s.store.Credentials["missingcloud"] = jujucloud.CloudCredential{ + AuthCredentials: map[string]jujucloud.Credential{ + "default": jujucloud.NewCredential( + jujucloud.AccessKeyAuthType, + map[string]string{ + "access-key": "key", + "secret-key": "secret", + }, + ), + }, + } out := s.listCredentials(c, "--format", "yaml") c.Assert(out, gc.Equals, ` credentials: @@ -185,7 +231,7 @@ auth-type: oauth2 client-email: email client-id: id - local:mycloud: + mycloud: me: auth-type: access-key access-key: key @@ -205,9 +251,48 @@ `[1:]) } -func (s *listCredentialsSuite) TestListCredentialsJSON(c *gc.C) { - // TODO(axw) test once json marshalling works properly - c.Skip("not implemented: credentials don't marshal to JSON yet") +func (s *listCredentialsSuite) TestListCredentialsJSONWithSecrets(c *gc.C) { + out := s.listCredentials(c, "--format", "json", "--show-secrets") + c.Assert(out, gc.Equals, ` +{"credentials":{"aws":{"default-credential":"down","default-region":"ap-southeast-2","cloud-credentials":{"bob":{"auth-type":"access-key","details":{"access-key":"key","secret-key":"secret"}},"down":{"auth-type":"userpass","details":{"password":"password","username":"user"}}}},"azure":{"cloud-credentials":{"azhja":{"auth-type":"userpass","details":{"application-id":"app-id","application-password":"app-secret","subscription-id":"subscription-id","tenant-id":"tenant-id"}}}},"google":{"cloud-credentials":{"default":{"auth-type":"oauth2","details":{"client-email":"email","client-id":"id","private-key":"key"}}}},"mycloud":{"cloud-credentials":{"me":{"auth-type":"access-key","details":{"access-key":"key","secret-key":"secret"}}}}}} +`[1:]) +} + +func (s *listCredentialsSuite) TestListCredentialsJSONNoSecrets(c *gc.C) { + out := s.listCredentials(c, "--format", "json") + c.Assert(out, gc.Equals, ` +{"credentials":{"aws":{"default-credential":"down","default-region":"ap-southeast-2","cloud-credentials":{"bob":{"auth-type":"access-key","details":{"access-key":"key"}},"down":{"auth-type":"userpass","details":{"username":"user"}}}},"azure":{"cloud-credentials":{"azhja":{"auth-type":"userpass","details":{"application-id":"app-id","subscription-id":"subscription-id","tenant-id":"tenant-id"}}}},"google":{"cloud-credentials":{"default":{"auth-type":"oauth2","details":{"client-email":"email","client-id":"id"}}}},"mycloud":{"cloud-credentials":{"me":{"auth-type":"access-key","details":{"access-key":"key"}}}}}} +`[1:]) +} + +func (s *listCredentialsSuite) TestListCredentialsJSONFiltered(c *gc.C) { + out := s.listCredentials(c, "--format", "json", "azure") + c.Assert(out, gc.Equals, ` +{"credentials":{"azure":{"cloud-credentials":{"azhja":{"auth-type":"userpass","details":{"application-id":"app-id","subscription-id":"subscription-id","tenant-id":"tenant-id"}}}}}} +`[1:]) +} + +func (s *listCredentialsSuite) TestListCredentialsEmpty(c *gc.C) { + s.store = &jujuclienttesting.MemStore{ + Credentials: map[string]jujucloud.CloudCredential{ + "aws": { + AuthCredentials: map[string]jujucloud.Credential{ + "bob": jujucloud.NewCredential( + jujucloud.OAuth2AuthType, + map[string]string{}, + ), + }, + }, + }, + } + out := strings.Replace(s.listCredentials(c), "\n", "", -1) + c.Assert(out, gc.Equals, "Cloud Credentialsaws bob") + + out = strings.Replace(s.listCredentials(c, "--format", "yaml"), "\n", "", -1) + c.Assert(out, gc.Equals, "credentials: aws: bob: auth-type: oauth2") + + out = strings.Replace(s.listCredentials(c, "--format", "json"), "\n", "", -1) + c.Assert(out, gc.Equals, `{"credentials":{"aws":{"cloud-credentials":{"bob":{"auth-type":"oauth2"}}}}}`) } func (s *listCredentialsSuite) TestListCredentialsNone(c *gc.C) { @@ -216,7 +301,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stderr(ctx), gc.Equals, "") out := strings.Replace(testing.Stdout(ctx), "\n", "", -1) - c.Assert(out, gc.Equals, "CLOUD CREDENTIALS") + c.Assert(out, gc.Equals, "No credentials to display.") ctx, err = testing.RunCommand(c, listCmd, "--format", "yaml") c.Assert(err, jc.ErrorIsNil) @@ -224,7 +309,11 @@ out = strings.Replace(testing.Stdout(ctx), "\n", "", -1) c.Assert(out, gc.Equals, "credentials: {}") - // TODO(axw) test json once json marshaling works properly + ctx, err = testing.RunCommand(c, listCmd, "--format", "json") + c.Assert(err, jc.ErrorIsNil) + c.Assert(testing.Stderr(ctx), gc.Equals, "") + out = strings.Replace(testing.Stdout(ctx), "\n", "", -1) + c.Assert(out, gc.Equals, `{"credentials":{}}`) } func (s *listCredentialsSuite) listCredentials(c *gc.C, args ...string) string { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/list.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/list.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,19 +4,18 @@ package cloud import ( - "bytes" - "fmt" + "io" "sort" - "strings" - "text/tabwriter" + "github.com/juju/ansiterm" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" - "launchpad.net/gnuflag" jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/cmd/juju/common" + "github.com/juju/juju/cmd/output" ) var logger = loggo.GetLogger("juju.cmd.juju.cloud") @@ -41,9 +40,11 @@ juju clouds -See also: show-cloud - update-clouds - add-cloud +See also: + add-cloud + regions + show-cloud + update-clouds ` // NewListCloudsCommand returns a command to list cloud information. @@ -61,6 +62,7 @@ } func (c *listCloudsCommand) SetFlags(f *gnuflag.FlagSet) { + c.CommandBase.SetFlags(f) c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ "yaml": cmd.FormatYaml, "json": cmd.FormatJson, @@ -68,8 +70,6 @@ }) } -const localPrefix = "local:" - func (c *listCloudsCommand) Run(ctxt *cmd.Context) error { details, err := listCloudDetails() if err != nil { @@ -145,37 +145,28 @@ return nil, err } for name, cloud := range personalClouds { - // Add to result with "local:" prefix. cloudDetails := makeCloudDetails(cloud) cloudDetails.Source = "local" - details.personal[localPrefix+name] = cloudDetails + details.personal[name] = cloudDetails + // Delete any built-in or public clouds with same name. + delete(details.builtin, name) + delete(details.public, name) } return details, nil } -// formatCloudsTabular returns a tabular summary of cloud information. -func formatCloudsTabular(value interface{}) ([]byte, error) { +// formatCloudsTabular writes a tabular summary of cloud information. +func formatCloudsTabular(writer io.Writer, value interface{}) error { clouds, ok := value.(*cloudList) if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", clouds, value) + return errors.Errorf("expected value of type %T, got %T", clouds, value) } - var out bytes.Buffer - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) - p := func(values ...string) { - text := strings.Join(values, "\t") - fmt.Fprintln(tw, text) - } - p("CLOUD\tTYPE\tREGIONS") + tw := output.TabWriter(writer) + w := output.Wrapper{tw} + w.Println("Cloud", "Regions", "Default", "Type", "Description") + w.SetColumnAlignRight(1) cloudNamesSorted := func(someClouds map[string]*cloudDetails) []string { // For tabular we'll sort alphabetically, user clouds last. @@ -187,34 +178,31 @@ return names } - printClouds := func(someClouds map[string]*cloudDetails) { + printClouds := func(someClouds map[string]*cloudDetails, color *ansiterm.Context) { cloudNames := cloudNamesSorted(someClouds) for _, name := range cloudNames { info := someClouds[name] - var regions []string - for _, region := range info.Regions { - regions = append(regions, fmt.Sprint(region.Key)) - } - // TODO(wallyworld) - we should be smarter about handling - // long region text, for now we'll display the first 7 as - // that covers all clouds except AWS and Azure and will - // prevent wrapping on a reasonable terminal width. - regionCount := len(regions) - if regionCount > 7 { - regionCount = 7 + defaultRegion := "" + if len(info.Regions) > 0 { + defaultRegion = info.RegionsMap[info.Regions[0].Key.(string)].Name } - regionText := strings.Join(regions[:regionCount], ", ") - if len(regions) > 7 { - regionText = regionText + " ..." + description := info.CloudDescription + if len(description) > 40 { + description = description[:39] } - p(name, info.CloudType, regionText) + w.PrintColor(color, name) + w.Println(len(info.Regions), defaultRegion, info.CloudType, description) } } - printClouds(clouds.public) - printClouds(clouds.builtin) - printClouds(clouds.personal) - + printClouds(clouds.public, nil) + printClouds(clouds.builtin, nil) + printClouds(clouds.personal, ansiterm.Foreground(ansiterm.BrightBlue)) + + w.Println("\nTry 'list-regions ' to see available regions.") + w.Println("'show-cloud ' or 'regions --format yaml ' can be used to see region endpoints.") + w.Println("'add-cloud' can add private clouds or private infrastructure.") + w.Println("Update the known public clouds with 'update-clouds'.") tw.Flush() - return out.Bytes(), nil + return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/list_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/list_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/list_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -29,7 +29,7 @@ out := testing.Stdout(ctx) out = strings.Replace(out, "\n", "", -1) // Just check couple of snippets of the output to make sure it looks ok. - c.Assert(out, gc.Matches, `.*aws-china[ ]*ec2[ ]*cn-north-1.*`) + c.Assert(out, gc.Matches, `.*aws-china[ ]*1[ ]*cn-north-1[ ]*ec2.*`) // TODO(wallyworld) - uncomment when we build with go 1.3 or greater // LXD should be there too. // c.Assert(out, gc.Matches, `.*localhost[ ]*lxd[ ]*localhost.*`) @@ -54,8 +54,30 @@ out := testing.Stdout(ctx) out = strings.Replace(out, "\n", "", -1) // Just check a snippet of the output to make sure it looks ok. - // local: clouds are last. - c.Assert(out, jc.Contains, `local:homestack openstack london`) + // local clouds are last. + // homestack should abut localhost and hence come last in the output. + c.Assert(out, jc.Contains, `Hypervisorhomestack 1 london openstack Openstack Cloud`) +} + +func (s *listSuite) TestListPublicAndPersonalSameName(c *gc.C) { + data := ` +clouds: + aws: + type: ec2 + auth-types: [access-key] + endpoint: http://custom +`[1:] + err := ioutil.WriteFile(osenv.JujuXDGDataHomePath("clouds.yaml"), []byte(data), 0600) + c.Assert(err, jc.ErrorIsNil) + + ctx, err := testing.RunCommand(c, cloud.NewListCloudsCommand(), "--format", "yaml") + c.Assert(err, jc.ErrorIsNil) + out := testing.Stdout(ctx) + out = strings.Replace(out, "\n", "", -1) + // Just check a snippet of the output to make sure it looks ok. + // local clouds are last. + c.Assert(out, gc.Not(gc.Matches), `.*aws:[ ]*defined: public[ ]*type: ec2[ ]*auth-types: \[access-key\].*`) + c.Assert(out, gc.Matches, `.*aws:[ ]*defined: local[ ]*type: ec2[ ]*description: Amazon Web Services[ ]*auth-types: \[access-key\].*`) } func (s *listSuite) TestListYAML(c *gc.C) { @@ -64,7 +86,7 @@ out := testing.Stdout(ctx) out = strings.Replace(out, "\n", "", -1) // Just check a snippet of the output to make sure it looks ok. - c.Assert(out, gc.Matches, `.*aws:[ ]*defined: public[ ]*type: ec2[ ]*auth-types: \[access-key\].*`) + c.Assert(out, gc.Matches, `.*aws:[ ]*defined: public[ ]*type: ec2[ ]*description: Amazon Web Services[ ]*auth-types: \[access-key\].*`) } func (s *listSuite) TestListJSON(c *gc.C) { @@ -73,7 +95,7 @@ out := testing.Stdout(ctx) out = strings.Replace(out, "\n", "", -1) // Just check a snippet of the output to make sure it looks ok. - c.Assert(out, gc.Matches, `.*{"aws":{"defined":"public","type":"ec2","auth-types":\["access-key"\].*`) + c.Assert(out, gc.Matches, `.*{"aws":{"defined":"public","type":"ec2","description":"Amazon Web Services","auth-types":\["access-key"\].*`) } func (s *listSuite) TestListPreservesRegionOrder(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/regions.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/regions.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/regions.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/regions.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,131 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloud + +import ( + "fmt" + "io" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "gopkg.in/yaml.v2" + + "github.com/juju/juju/cloud" + "github.com/juju/juju/cmd/output" +) + +type listRegionsCommand struct { + cmd.CommandBase + out cmd.Output + cloudName string +} + +var listRegionsDoc = ` +Examples: + + juju regions aws + +See also: + add-cloud + clouds + show-cloud + update-clouds +` + +// NewListRegionsCommand returns a command to list cloud region information. +func NewListRegionsCommand() cmd.Command { + return &listRegionsCommand{} +} + +// Info implements Command.Info. +func (c *listRegionsCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "regions", + Args: "", + Purpose: "Lists regions for a given cloud.", + Doc: listRegionsDoc, + Aliases: []string{"list-regions"}, + } +} + +// SetFlags implements Command.SetFlags. +func (c *listRegionsCommand) SetFlags(f *gnuflag.FlagSet) { + c.CommandBase.SetFlags(f) + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, + "tabular": c.formatRegionsListTabular, + }) +} + +// Init implements Command.Init. +func (c *listRegionsCommand) Init(args []string) error { + switch len(args) { + case 0: + return errors.New("no cloud specified") + case 1: + c.cloudName = args[0] + } + return cmd.CheckEmpty(args[1:]) +} + +// Run implements Command.Run. +func (c *listRegionsCommand) Run(ctxt *cmd.Context) error { + cloud, err := cloud.CloudByName(c.cloudName) + if err != nil { + return err + } + + if len(cloud.Regions) == 0 { + fmt.Fprintf(ctxt.GetStdout(), "Cloud %q has no regions defined.\n", c.cloudName) + return nil + } + var regions interface{} + if c.out.Name() == "json" { + details := make(map[string]regionDetails) + for _, r := range cloud.Regions { + details[r.Name] = regionDetails{ + Endpoint: r.Endpoint, + IdentityEndpoint: r.IdentityEndpoint, + StorageEndpoint: r.StorageEndpoint, + } + } + regions = details + } else { + details := make(yaml.MapSlice, len(cloud.Regions)) + for i, r := range cloud.Regions { + details[i] = yaml.MapItem{r.Name, regionDetails{ + Name: r.Name, + Endpoint: r.Endpoint, + IdentityEndpoint: r.IdentityEndpoint, + StorageEndpoint: r.StorageEndpoint, + }} + } + regions = details + } + err = c.out.Write(ctxt, regions) + if err != nil { + return err + } + return nil +} + +func (c *listRegionsCommand) formatRegionsListTabular(writer io.Writer, value interface{}) error { + regions, ok := value.(yaml.MapSlice) + if !ok { + return errors.Errorf("expected value of type %T, got %T", regions, value) + } + return formatRegionsTabular(writer, regions) +} + +func formatRegionsTabular(writer io.Writer, regions yaml.MapSlice) error { + tw := output.TabWriter(writer) + w := output.Wrapper{tw} + for _, r := range regions { + w.Println(r.Key) + } + tw.Flush() + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/regions_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/regions_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/regions_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/regions_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,116 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloud_test + +import ( + "encoding/json" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/cloud" + _ "github.com/juju/juju/provider/all" + "github.com/juju/juju/testing" +) + +type regionsSuite struct { + testing.FakeJujuXDGDataHomeSuite +} + +var _ = gc.Suite(®ionsSuite{}) + +func (s *regionsSuite) TestListRegionsInvalidCloud(c *gc.C) { + _, err := testing.RunCommand(c, cloud.NewListRegionsCommand(), "invalid") + c.Assert(err, gc.ErrorMatches, "cloud invalid not found") +} + +func (s *regionsSuite) TestListRegionsInvalidArgs(c *gc.C) { + _, err := testing.RunCommand(c, cloud.NewListRegionsCommand(), "aws", "another") + c.Assert(err, gc.ErrorMatches, `unrecognized args: \["another"\]`) +} + +func (s *regionsSuite) TestListRegions(c *gc.C) { + ctx, err := testing.RunCommand(c, cloud.NewListRegionsCommand(), "aws") + c.Assert(err, jc.ErrorIsNil) + out := testing.Stdout(ctx) + c.Assert(out, jc.DeepEquals, ` +us-east-1 +us-west-1 +us-west-2 +eu-west-1 +eu-central-1 +ap-south-1 +ap-southeast-1 +ap-southeast-2 +ap-northeast-1 +ap-northeast-2 +sa-east-1 + +`[1:]) +} + +func (s *regionsSuite) TestListRegionsYaml(c *gc.C) { + ctx, err := testing.RunCommand(c, cloud.NewListRegionsCommand(), "aws", "--format", "yaml") + c.Assert(err, jc.ErrorIsNil) + out := testing.Stdout(ctx) + c.Assert(out, jc.DeepEquals, ` +us-east-1: + endpoint: https://ec2.us-east-1.amazonaws.com +us-west-1: + endpoint: https://ec2.us-west-1.amazonaws.com +us-west-2: + endpoint: https://ec2.us-west-2.amazonaws.com +eu-west-1: + endpoint: https://ec2.eu-west-1.amazonaws.com +eu-central-1: + endpoint: https://ec2.eu-central-1.amazonaws.com +ap-south-1: + endpoint: https://ec2.ap-south-1.amazonaws.com +ap-southeast-1: + endpoint: https://ec2.ap-southeast-1.amazonaws.com +ap-southeast-2: + endpoint: https://ec2.ap-southeast-2.amazonaws.com +ap-northeast-1: + endpoint: https://ec2.ap-northeast-1.amazonaws.com +ap-northeast-2: + endpoint: https://ec2.ap-northeast-2.amazonaws.com +sa-east-1: + endpoint: https://ec2.sa-east-1.amazonaws.com +`[1:]) +} + +type regionDetails struct { + Endpoint string `json:"endpoint"` + IdentityEndpoint string `json:"identity-endpoint"` + StorageEndpoint string `json:"storage-endpoint"` +} + +func (s *regionsSuite) TestListRegionsJson(c *gc.C) { + ctx, err := testing.RunCommand(c, cloud.NewListRegionsCommand(), "azure", "--format", "json") + c.Assert(err, jc.ErrorIsNil) + out := testing.Stdout(ctx) + var data map[string]regionDetails + err = json.Unmarshal([]byte(out), &data) + c.Assert(err, jc.ErrorIsNil) + c.Assert(data, jc.DeepEquals, map[string]regionDetails{ + "northeurope": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "eastasia": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "japanwest": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "centralus": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "eastus2": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "japaneast": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "northcentralus": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "southcentralus": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "australiaeast": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "brazilsouth": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "centralindia": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "southindia": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "westeurope": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "westindia": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "westus": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "australiasoutheast": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "eastus": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + "southeastasia": {Endpoint: "https://management.azure.com", IdentityEndpoint: "https://graph.windows.net", StorageEndpoint: "https://core.windows.net"}, + }) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/remove_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/remove_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/remove_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/remove_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,16 +16,11 @@ ) type removeSuite struct { - testing.BaseSuite + testing.FakeJujuXDGDataHomeSuite } var _ = gc.Suite(&removeSuite{}) -func (s *removeSuite) SetUpTest(c *gc.C) { - origHome := osenv.SetJujuXDGDataHome(c.MkDir()) - s.AddCleanup(func(*gc.C) { osenv.SetJujuXDGDataHome(origHome) }) -} - func (s *removeSuite) TestRemoveBadArgs(c *gc.C) { cmd := cloud.NewRemoveCloudCommand() _, err := testing.RunCommand(c, cmd) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/show.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/show.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/show.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/show.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,8 +6,8 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/yaml.v2" - "launchpad.net/gnuflag" jujucloud "github.com/juju/juju/cloud" ) @@ -28,8 +28,9 @@ juju show-cloud google juju show-cloud azure-china --output ~/azure_cloud_details.txt -See also: clouds - update-clouds +See also: + clouds + update-clouds ` // NewShowCloudCommand returns a command to list cloud information. @@ -38,6 +39,7 @@ } func (c *showCloudCommand) SetFlags(f *gnuflag.FlagSet) { + c.CommandBase.SetFlags(f) // We only support yaml for display purposes. c.out.AddFlags(f, "yaml", map[string]cmd.Formatter{ "yaml": cmd.FormatYaml, @@ -76,31 +78,38 @@ } type regionDetails struct { - Name string `yaml:"-" json:"-"` - Endpoint string `yaml:"endpoint,omitempty" json:"endpoint,omitempty"` - StorageEndpoint string `yaml:"storage-endpoint,omitempty" json:"storage-endpoint,omitempty"` + Name string `yaml:"-" json:"-"` + Endpoint string `yaml:"endpoint,omitempty" json:"endpoint,omitempty"` + IdentityEndpoint string `yaml:"identity-endpoint,omitempty" json:"identity-endpoint,omitempty"` + StorageEndpoint string `yaml:"storage-endpoint,omitempty" json:"storage-endpoint,omitempty"` } type cloudDetails struct { - Source string `yaml:"defined,omitempty" json:"defined,omitempty"` - CloudType string `yaml:"type" json:"type"` - AuthTypes []string `yaml:"auth-types,omitempty,flow" json:"auth-types,omitempty"` - Endpoint string `yaml:"endpoint,omitempty" json:"endpoint,omitempty"` - StorageEndpoint string `yaml:"storage-endpoint,omitempty" json:"storage-endpoint,omitempty"` - // Regions is for when we want to print regions in order for yaml or tabular output. + Source string `yaml:"defined,omitempty" json:"defined,omitempty"` + CloudType string `yaml:"type" json:"type"` + CloudDescription string `yaml:"description" json:"description"` + AuthTypes []string `yaml:"auth-types,omitempty,flow" json:"auth-types,omitempty"` + Endpoint string `yaml:"endpoint,omitempty" json:"endpoint,omitempty"` + IdentityEndpoint string `yaml:"identity-endpoint,omitempty" json:"identity-endpoint,omitempty"` + StorageEndpoint string `yaml:"storage-endpoint,omitempty" json:"storage-endpoint,omitempty"` + // Regions is for when we want to print regions in order for yaml output. Regions yaml.MapSlice `yaml:"regions,omitempty" json:"-"` // Regions map is for json marshalling where format is important but not order. - RegionsMap map[string]regionDetails `yaml:"-" json:"regions,omitempty"` - Config map[string]interface{} `yaml:"config,omitempty" json:"config,omitempty"` + RegionsMap map[string]regionDetails `yaml:"-" json:"regions,omitempty"` + Config map[string]interface{} `yaml:"config,omitempty" json:"config,omitempty"` + RegionConfig jujucloud.RegionConfig `yaml:"region-config,omitempty" json:"region-config,omitempty"` } func makeCloudDetails(cloud jujucloud.Cloud) *cloudDetails { result := &cloudDetails{ - Source: "public", - CloudType: cloud.Type, - Endpoint: cloud.Endpoint, - StorageEndpoint: cloud.StorageEndpoint, - Config: cloud.Config, + Source: "public", + CloudType: cloud.Type, + Endpoint: cloud.Endpoint, + IdentityEndpoint: cloud.IdentityEndpoint, + StorageEndpoint: cloud.StorageEndpoint, + Config: cloud.Config, + RegionConfig: cloud.RegionConfig, + CloudDescription: cloud.Description, } result.AuthTypes = make([]string, len(cloud.AuthTypes)) for i, at := range cloud.AuthTypes { @@ -112,6 +121,9 @@ if region.Endpoint != result.Endpoint { r.Endpoint = region.Endpoint } + if region.IdentityEndpoint != result.IdentityEndpoint { + r.IdentityEndpoint = region.IdentityEndpoint + } if region.StorageEndpoint != result.StorageEndpoint { r.StorageEndpoint = region.StorageEndpoint } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/show_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/show_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/show_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/show_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -33,10 +33,11 @@ c.Assert(out, gc.Equals, ` defined: public type: ec2 +description: Amazon China auth-types: [access-key] regions: cn-north-1: - endpoint: https://ec2.cn-north-1.amazonaws.com.cn/ + endpoint: https://ec2.cn-north-1.amazonaws.com.cn `[1:]) } @@ -45,6 +46,7 @@ clouds: homestack: type: openstack + description: Openstack Cloud auth-types: [userpass, access-key] endpoint: http://homestack regions: @@ -55,12 +57,13 @@ `[1:] err := ioutil.WriteFile(osenv.JujuXDGDataHomePath("clouds.yaml"), []byte(data), 0600) - ctx, err := testing.RunCommand(c, cloud.NewShowCloudCommand(), "local:homestack") + ctx, err := testing.RunCommand(c, cloud.NewShowCloudCommand(), "homestack") c.Assert(err, jc.ErrorIsNil) out := testing.Stdout(ctx) c.Assert(out, gc.Equals, ` defined: local type: openstack +description: Openstack Cloud auth-types: [userpass, access-key] endpoint: http://homestack regions: @@ -70,3 +73,38 @@ bootstrap-timeout: 1800 `[1:]) } + +func (s *showSuite) TestShowWithRegionConfig(c *gc.C) { + data := ` +clouds: + homestack: + type: openstack + description: Openstack Cloud + auth-types: [userpass, access-key] + endpoint: http://homestack + regions: + london: + endpoint: http://london/1.0 + region-config: + london: + bootstrap-timeout: 1800 +`[1:] + err := ioutil.WriteFile(osenv.JujuXDGDataHomePath("clouds.yaml"), []byte(data), 0600) + + ctx, err := testing.RunCommand(c, cloud.NewShowCloudCommand(), "homestack") + c.Assert(err, jc.ErrorIsNil) + out := testing.Stdout(ctx) + c.Assert(out, gc.Equals, ` +defined: local +type: openstack +description: Openstack Cloud +auth-types: [userpass, access-key] +endpoint: http://homestack +regions: + london: + endpoint: http://london/1.0 +region-config: + london: + bootstrap-timeout: 1800 +`[1:]) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/updateclouds.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/updateclouds.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/updateclouds.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/updateclouds.go 2016-10-13 14:31:49.000000000 +0000 @@ -39,11 +39,16 @@ juju update-clouds -See also: clouds +See also: + clouds ` // NewUpdateCloudsCommand returns a command to update cloud information. -func NewUpdateCloudsCommand() cmd.Command { +var NewUpdateCloudsCommand = func() cmd.Command { + return newUpdateCloudsCommand() +} + +func newUpdateCloudsCommand() cmd.Command { return &updateCloudsCommand{ publicSigningKey: keys.JujuPublicKey, publicCloudURL: "https://streams.canonical.com/juju/public-clouds.syaml", @@ -59,7 +64,7 @@ } func (c *updateCloudsCommand) Run(ctxt *cmd.Context) error { - fmt.Fprint(ctxt.Stdout, "Fetching latest public cloud list...\n") + fmt.Fprint(ctxt.Stderr, "Fetching latest public cloud list...\n") client := utils.GetHTTPClient(utils.VerifySSLHostnames) resp, err := client.Get(c.publicCloudURL) if err != nil { @@ -70,12 +75,12 @@ if resp.StatusCode != http.StatusOK { switch resp.StatusCode { case http.StatusNotFound: - fmt.Fprintln(ctxt.Stdout, "Public cloud list is unavailable right now.") + fmt.Fprintln(ctxt.Stderr, "Public cloud list is unavailable right now.") return nil case http.StatusUnauthorized: return errors.Unauthorizedf("unauthorised access to URL %q", c.publicCloudURL) } - return fmt.Errorf("cannot read public cloud information at URL %q, %q", c.publicCloudURL, resp.Status) + return errors.Errorf("cannot read public cloud information at URL %q, %q", c.publicCloudURL, resp.Status) } cloudData, err := decodeCheckSignature(resp.Body, c.publicSigningKey) @@ -96,14 +101,14 @@ return err } if sameCloudInfo { - fmt.Fprintln(ctxt.Stdout, "Your list of public clouds is up to date, see `juju clouds`.") + fmt.Fprintln(ctxt.Stderr, "Your list of public clouds is up to date, see `juju clouds`.") return nil } if err := jujucloud.WritePublicCloudMetadata(newPublicClouds); err != nil { return errors.Annotate(err, "error writing new local public cloud data") } updateDetails := diffClouds(newPublicClouds, currentPublicClouds) - fmt.Fprintln(ctxt.Stdout, fmt.Sprintf("Updated your list of public clouds with %s", updateDetails)) + fmt.Fprintln(ctxt.Stderr, fmt.Sprintf("Updated your list of public clouds with %s", updateDetails)) return nil } @@ -118,7 +123,7 @@ } keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(publicKey)) if err != nil { - return nil, fmt.Errorf("failed to parse public key: %v", err) + return nil, errors.Errorf("failed to parse public key: %v", err) } _, err = openpgp.CheckDetachedSignature(keyring, bytes.NewBuffer(b.Bytes), b.ArmoredSignature.Body) @@ -180,9 +185,10 @@ } endpointChanged := new.Endpoint != old.Endpoint + identityEndpointChanged := new.IdentityEndpoint != old.IdentityEndpoint storageEndpointChanged := new.StorageEndpoint != old.StorageEndpoint - if endpointChanged || storageEndpointChanged || new.Type != old.Type || !sameAuthTypes() { + if endpointChanged || identityEndpointChanged || storageEndpointChanged || new.Type != old.Type || !sameAuthTypes() { diff.addChange(updateChange, attributeScope, cloudName) } @@ -200,7 +206,7 @@ continue } - if (oldRegion.Endpoint != newRegion.Endpoint) || (oldRegion.StorageEndpoint != newRegion.StorageEndpoint) { + if (oldRegion.Endpoint != newRegion.Endpoint) || (oldRegion.IdentityEndpoint != newRegion.IdentityEndpoint) || (oldRegion.StorageEndpoint != newRegion.StorageEndpoint) { diff.addChange(updateChange, regionScope, formatCloudRegion(newName)) } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/updateclouds_internal_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/updateclouds_internal_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/updateclouds_internal_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/updateclouds_internal_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -133,6 +133,15 @@ changed cloud attribute: - one`[1:], }, { + description: "cloud attributes change: identity endpoint", + old: map[string]jujucloud.Cloud{"one": jujucloud.Cloud{}}, + new: map[string]jujucloud.Cloud{"one": jujucloud.Cloud{IdentityEndpoint: "old_endpoint"}}, + expected: ` +1 cloud attribute changed: + + changed cloud attribute: + - one`[1:], + }, { description: "cloud attributes change: storage endpoint", old: map[string]jujucloud.Cloud{"one": jujucloud.Cloud{}}, new: map[string]jujucloud.Cloud{"one": jujucloud.Cloud{StorageEndpoint: "old_endpoint"}}, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/updateclouds_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/updateclouds_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/updateclouds_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/updateclouds_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,21 +18,15 @@ jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/cmd/juju/cloud" sstesting "github.com/juju/juju/environs/simplestreams/testing" - "github.com/juju/juju/juju/osenv" "github.com/juju/juju/testing" ) type updateCloudsSuite struct { - testing.BaseSuite + testing.FakeJujuXDGDataHomeSuite } var _ = gc.Suite(&updateCloudsSuite{}) -func (s *updateCloudsSuite) SetUpTest(c *gc.C) { - origHome := osenv.SetJujuXDGDataHome(c.MkDir()) - s.AddCleanup(func(*gc.C) { osenv.SetJujuXDGDataHome(origHome) }) -} - func encodeCloudYAML(c *gc.C, yaml string) string { // TODO(wallyworld) - move test signing key elsewhere keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(sstesting.SignedMetadataPrivateKey)) @@ -83,7 +77,7 @@ errString := strings.Replace(err.Error(), "\n", "", -1) c.Assert(errString, gc.Matches, errMsg) } - return testing.Stdout(out) + return testing.Stderr(out) } func (s *updateCloudsSuite) Test404(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/updatecredential.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/updatecredential.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/updatecredential.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/updatecredential.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,122 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloud + +import ( + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "gopkg.in/juju/names.v2" + + apicloud "github.com/juju/juju/api/cloud" + jujucloud "github.com/juju/juju/cloud" + "github.com/juju/juju/cmd/juju/common" + "github.com/juju/juju/cmd/modelcmd" +) + +var usageUpdateCredentialSummary = ` +Updates a credential for a cloud.`[1:] + +var usageUpdateCredentialDetails = ` +Updates a named credential for a cloud. + +Examples: + juju update-credential aws mysecrets + +See also: + add-credential + credentials`[1:] + +type updateCredentialCommand struct { + modelcmd.ControllerCommandBase + + api credentialAPI + + cloud string + credential string +} + +// NewUpdateCredentialCommand returns a command to update credential details. +func NewUpdateCredentialCommand() cmd.Command { + return modelcmd.WrapController(&updateCredentialCommand{}) +} + +// Init implements Command.Init. +func (c *updateCredentialCommand) Init(args []string) error { + if len(args) < 2 { + return errors.New("Usage: juju update-credential ") + } + c.cloud = args[0] + c.credential = args[1] + return cmd.CheckEmpty(args[2:]) +} + +// Info implements Command.Info +func (c *updateCredentialCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "update-credential", + Args: " ", + Purpose: usageUpdateCredentialSummary, + Doc: usageUpdateCredentialDetails, + } +} + +// SetFlags implements Command.SetFlags. +func (c *updateCredentialCommand) SetFlags(f *gnuflag.FlagSet) { + c.ControllerCommandBase.SetFlags(f) + f.StringVar(&c.credential, "credential", "", "Name of credential to update") + f.StringVar(&c.cloud, "cloud", "", "Cloud for which to update the credential") +} + +type credentialAPI interface { + UpdateCredential(tag names.CloudCredentialTag, credential jujucloud.Credential) error + Close() error +} + +func (c *updateCredentialCommand) getAPI() (credentialAPI, error) { + if c.api != nil { + return c.api, nil + } + api, err := c.NewAPIRoot() + if err != nil { + return nil, errors.Annotate(err, "opening API connection") + } + return apicloud.NewClient(api), nil +} + +// Run implements Command.Run +func (c *updateCredentialCommand) Run(ctx *cmd.Context) error { + cred, err := c.ClientStore().CredentialForCloud(c.cloud) + if errors.IsNotFound(err) { + ctx.Infof("No credentials exist for cloud %q", c.cloud) + return nil + } else if err != nil { + return err + } + credToUpdate, ok := cred.AuthCredentials[c.credential] + if !ok { + ctx.Infof("No credential called %q exists for cloud %q", c.credential, c.cloud) + return nil + } + + accountDetails, err := c.ClientStore().AccountDetails(c.ControllerName()) + if err != nil { + return errors.Trace(err) + } + credentialTag, err := common.ResolveCloudCredentialTag( + names.NewUserTag(accountDetails.User), names.NewCloudTag(c.cloud), c.credential, + ) + + client, err := c.getAPI() + if err != nil { + return err + } + defer client.Close() + + if err := client.UpdateCredential(credentialTag, credToUpdate); err != nil { + return err + } + ctx.Infof("Updated credential %q for user %q on cloud %q.", c.credential, accountDetails.User, c.cloud) + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/updatecredential_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/updatecredential_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/cloud/updatecredential_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/cloud/updatecredential_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,123 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloud_test + +import ( + "strings" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + jujucloud "github.com/juju/juju/cloud" + "github.com/juju/juju/cmd/juju/cloud" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/juju/testing" +) + +type updateCredentialSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&updateCredentialSuite{}) + +func (s *updateCredentialSuite) TestBadArgs(c *gc.C) { + store := &jujuclienttesting.MemStore{ + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + CurrentControllerName: "controller", + } + cmd := cloud.NewUpdateCredentialCommandForTest(store, nil) + _, err := testing.RunCommand(c, cmd) + c.Assert(err, gc.ErrorMatches, "Usage: juju update-credential ") + _, err = testing.RunCommand(c, cmd, "cloud", "credential", "extra") + c.Assert(err, gc.ErrorMatches, `unrecognized args: \["extra"\]`) +} + +func (s *updateCredentialSuite) TestMissingCredential(c *gc.C) { + store := &jujuclienttesting.MemStore{ + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + CurrentControllerName: "controller", + Credentials: map[string]jujucloud.CloudCredential{ + "aws": { + AuthCredentials: map[string]jujucloud.Credential{ + "my-credential": jujucloud.NewCredential(jujucloud.AccessKeyAuthType, nil), + }, + }, + }, + } + cmd := cloud.NewUpdateCredentialCommandForTest(store, nil) + ctx, err := testing.RunCommand(c, cmd, "aws", "foo") + c.Assert(err, jc.ErrorIsNil) + output := testing.Stderr(ctx) + output = strings.Replace(output, "\n", "", -1) + c.Assert(output, gc.Equals, `No credential called "foo" exists for cloud "aws"`) +} + +func (s *updateCredentialSuite) TestBadCloudName(c *gc.C) { + store := &jujuclienttesting.MemStore{ + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + CurrentControllerName: "controller", + } + cmd := cloud.NewUpdateCredentialCommandForTest(store, nil) + ctx, err := testing.RunCommand(c, cmd, "somecloud", "foo") + c.Assert(err, jc.ErrorIsNil) + output := testing.Stderr(ctx) + output = strings.Replace(output, "\n", "", -1) + c.Assert(output, gc.Equals, `No credentials exist for cloud "somecloud"`) +} + +func (s *updateCredentialSuite) TestUpdate(c *gc.C) { + store := &jujuclienttesting.MemStore{ + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + CurrentControllerName: "controller", + Accounts: map[string]jujuclient.AccountDetails{ + "controller": { + User: "admin@local", + }, + }, + Credentials: map[string]jujucloud.CloudCredential{ + "aws": { + AuthCredentials: map[string]jujucloud.Credential{ + "my-credential": jujucloud.NewCredential(jujucloud.AccessKeyAuthType, nil), + "another-credential": jujucloud.NewCredential(jujucloud.UserPassAuthType, nil), + }, + }, + }, + } + fake := &fakeUpdateCredentialAPI{} + cmd := cloud.NewUpdateCredentialCommandForTest(store, fake) + ctx, err := testing.RunCommand(c, cmd, "aws", "my-credential") + c.Assert(err, jc.ErrorIsNil) + output := testing.Stderr(ctx) + output = strings.Replace(output, "\n", "", -1) + c.Assert(output, gc.Equals, `Updated credential "my-credential" for user "admin@local" on cloud "aws".`) + c.Assert(fake.creds, jc.DeepEquals, map[names.CloudCredentialTag]jujucloud.Credential{ + names.NewCloudCredentialTag("aws/admin@local/my-credential"): jujucloud.NewCredential(jujucloud.AccessKeyAuthType, nil), + }) +} + +type fakeUpdateCredentialAPI struct { + creds map[names.CloudCredentialTag]jujucloud.Credential +} + +func (f *fakeUpdateCredentialAPI) UpdateCredential(tag names.CloudCredentialTag, credential jujucloud.Credential) error { + if f.creds == nil { + f.creds = make(map[names.CloudCredentialTag]jujucloud.Credential) + } + f.creds[tag] = credential + return nil +} + +func (*fakeUpdateCredentialAPI) Close() error { + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/add_sshkeys.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/add_sshkeys.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/add_sshkeys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/add_sshkeys.go 2016-10-13 14:31:49.000000000 +0000 @@ -63,7 +63,6 @@ Args: " ...", Purpose: usageAddSSHKeySummary, Doc: usageAddSSHKeyDetails, - Aliases: []string{"add-ssh-keys"}, } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/bootstrap.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/bootstrap.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/bootstrap.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/bootstrap.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,16 +7,17 @@ "bufio" "fmt" "os" - "os/user" + "sort" "strings" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" + "github.com/juju/schema" "github.com/juju/utils" "github.com/juju/utils/featureflag" "github.com/juju/version" "gopkg.in/juju/charm.v6-unstable" - "launchpad.net/gnuflag" jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/cmd/juju/common" @@ -48,7 +49,7 @@ initializing a Juju cloud environment. Initialization consists of creating a 'controller' model and provisioning a machine to act as controller. -We recommend you call your controller ‘username-region’ e.g. ‘fred-us-west-1’ +We recommend you call your controller ‘username-region’ e.g. ‘fred-us-east-1’ See --clouds for a list of clouds and credentials. See --regions for a list of available regions for a given cloud. @@ -95,10 +96,12 @@ juju bootstrap juju bootstrap --clouds juju bootstrap --regions aws - juju bootstrap joe-us-east1 google - juju bootstrap --config=~/config-rs.yaml joe-syd rackspace - juju bootstrap --config agent-version=1.25.3 joe-us-east-1 aws - juju bootstrap --config bootstrap-timeout=1200 joe-eastus azure + juju bootstrap aws + juju bootstrap aws/us-east-1 + juju bootstrap google joe-us-east1 + juju bootstrap --config=~/config-rs.yaml rackspace joe-syd + juju bootstrap --config agent-version=1.25.3 aws joe-us-east-1 + juju bootstrap --config bootstrap-timeout=1200 azure joe-eastus See also: add-credentials @@ -112,7 +115,7 @@ func newBootstrapCommand() cmd.Command { return modelcmd.Wrap( &bootstrapCommand{}, - modelcmd.ModelSkipFlags, modelcmd.ModelSkipDefault, + modelcmd.WrapSkipModelFlags, modelcmd.WrapSkipDefaultModel, ) } @@ -121,18 +124,21 @@ type bootstrapCommand struct { modelcmd.ModelCommandBase - Constraints constraints.Value - BootstrapConstraints constraints.Value - BootstrapSeries string - BootstrapImage string - UploadTools bool - MetadataSource string - Placement string - KeepBrokenEnvironment bool - AutoUpgrade bool - AgentVersionParam string - AgentVersion *version.Number - config common.ConfigFlag + Constraints constraints.Value + ConstraintsStr string + BootstrapConstraints constraints.Value + BootstrapConstraintsStr string + BootstrapSeries string + BootstrapImage string + BuildAgent bool + MetadataSource string + Placement string + KeepBrokenEnvironment bool + AutoUpgrade bool + AgentVersionParam string + AgentVersion *version.Number + config common.ConfigFlag + modelDefaults common.ConfigFlag showClouds bool showRegionsForCloud string @@ -143,30 +149,26 @@ Region string noGUI bool interactive bool - - flagset *gnuflag.FlagSet } func (c *bootstrapCommand) Info() *cmd.Info { return &cmd.Info{ Name: "bootstrap", - Args: " [/region]", + Args: "[[/region] []]", Purpose: usageBootstrapSummary, Doc: usageBootstrapDetails, } } func (c *bootstrapCommand) SetFlags(f *gnuflag.FlagSet) { - // we need to store this so that later we can easily check how many flags - // have been set (for interactive mode). - c.flagset = f - f.Var(constraints.ConstraintsValue{Target: &c.Constraints}, "constraints", "Set model constraints") - f.Var(constraints.ConstraintsValue{Target: &c.BootstrapConstraints}, "bootstrap-constraints", "Specify bootstrap machine constraints") + c.ModelCommandBase.SetFlags(f) + f.StringVar(&c.ConstraintsStr, "constraints", "", "Set model constraints") + f.StringVar(&c.BootstrapConstraintsStr, "bootstrap-constraints", "", "Specify bootstrap machine constraints") f.StringVar(&c.BootstrapSeries, "bootstrap-series", "", "Specify the series of the bootstrap machine") if featureflag.Enabled(feature.ImageMetadata) { f.StringVar(&c.BootstrapImage, "bootstrap-image", "", "Specify the image of the bootstrap machine") } - f.BoolVar(&c.UploadTools, "upload-tools", false, "Upload local version of tools before bootstrapping") + f.BoolVar(&c.BuildAgent, "build-agent", false, "Build local version of agent binary before bootstrapping") f.StringVar(&c.MetadataSource, "metadata-source", "", "Local path to use as tools and/or metadata source") f.StringVar(&c.Placement, "to", "", "Placement directive indicating an instance to bootstrap") f.BoolVar(&c.KeepBrokenEnvironment, "keep-broken", false, "Do not destroy the model if bootstrap fails") @@ -174,6 +176,7 @@ f.StringVar(&c.AgentVersionParam, "agent-version", "", "Version of tools to use for Juju agents") f.StringVar(&c.CredentialName, "credential", "", "Credentials to use when bootstrapping") f.Var(&c.config, "config", "Specify a controller configuration file, or one or more configuration\n options\n (--config config.yaml [--config key=value ...])") + f.Var(&c.modelDefaults, "model-default", "Specify a configuration file, or one or more configuration\n options to be set for all models, unless otherwise specified\n (--config config.yaml [--config key=value ...])") f.StringVar(&c.hostedModelName, "d", defaultHostedModelName, "Name of the default hosted model for the controller") f.StringVar(&c.hostedModelName, "default-model", defaultHostedModelName, "Name of the default hosted model for the controller") f.BoolVar(&c.noGUI, "no-gui", false, "Do not install the Juju GUI in the controller when bootstrapping") @@ -182,23 +185,8 @@ } func (c *bootstrapCommand) Init(args []string) (err error) { - if len(args) == 0 { - switch c.flagset.NFlag() { - case 0: - // no args or flags, go interactive. - c.interactive = true - return nil - case 1: - if c.UploadTools { - // juju bootstrap --upload-tools is ok for interactive, too. - c.interactive = true - return nil - } - // some other flag was set, which means non-interactive. - } - } if c.showClouds && c.showRegionsForCloud != "" { - return fmt.Errorf("--clouds and --regions can't be used together") + return errors.New("--clouds and --regions can't be used together") } if c.showClouds { return cmd.CheckEmpty(args) @@ -206,24 +194,12 @@ if c.showRegionsForCloud != "" { return cmd.CheckEmpty(args) } - if c.AgentVersionParam != "" && c.UploadTools { - return fmt.Errorf("--agent-version and --upload-tools can't be used together") + if c.AgentVersionParam != "" && c.BuildAgent { + return errors.New("--agent-version and --build-agent can't be used together") } if c.BootstrapSeries != "" && !charm.IsValidSeries(c.BootstrapSeries) { return errors.NotValidf("series %q", c.BootstrapSeries) } - if c.BootstrapImage != "" { - if c.BootstrapSeries == "" { - return errors.Errorf("--bootstrap-image must be used with --bootstrap-series") - } - cons, err := constraints.Merge(c.Constraints, c.BootstrapConstraints) - if err != nil { - return errors.Trace(err) - } - if !cons.HasArch() { - return errors.Errorf("--bootstrap-image must be used with --bootstrap-constraints, specifying architecture") - } - } // Parse the placement directive. Bootstrap currently only // supports provider-specific placement directives. @@ -231,7 +207,7 @@ _, err = instance.ParsePlacement(c.Placement) if err != instance.ErrPlacementScopeMissing { // We only support unscoped placement directives for bootstrap. - return fmt.Errorf("unsupported bootstrap placement directive %q", c.Placement) + return errors.Errorf("unsupported bootstrap placement directive %q", c.Placement) } } if !c.AutoUpgrade { @@ -249,20 +225,24 @@ } } if c.AgentVersion != nil && (c.AgentVersion.Major != jujuversion.Current.Major || c.AgentVersion.Minor != jujuversion.Current.Minor) { - return fmt.Errorf("requested agent version major.minor mismatch") + return errors.New("requested agent version major.minor mismatch") } - // The user must specify two positional arguments: the controller name, - // and the cloud name (optionally with region specified). - if len(args) < 2 { - return errors.New("controller name and cloud name are required") + switch len(args) { + case 0: + // no args or flags, go interactive. + c.interactive = true + return nil } - c.controllerName = args[0] - c.Cloud = args[1] + c.Cloud = args[0] if i := strings.IndexRune(c.Cloud, '/'); i > 0 { c.Cloud, c.Region = c.Cloud[:i], c.Cloud[i+1:] } - return cmd.CheckEmpty(args[2:]) + if len(args) > 1 { + c.controllerName = args[1] + return cmd.CheckEmpty(args[2:]) + } + return nil } // BootstrapInterface provides bootstrap functionality that Run calls to support cleaner testing. @@ -292,15 +272,61 @@ waitForAgentInitialisation = common.WaitForAgentInitialisation ) -var ambiguousCredentialError = errors.New(` +var ambiguousDetectedCredentialError = errors.New(` more than one credential detected run juju autoload-credentials and specify a credential using the --credential argument`[1:], ) +var ambiguousCredentialError = errors.New(` +more than one credential is available +specify a credential using the --credential argument`[1:], +) + +func (c *bootstrapCommand) parseConstraints(ctx *cmd.Context) (err error) { + allAliases := map[string]string{} + defer common.WarnConstraintAliases(ctx, allAliases) + if c.ConstraintsStr != "" { + cons, aliases, err := constraints.ParseWithAliases(c.ConstraintsStr) + for k, v := range aliases { + allAliases[k] = v + } + if err != nil { + return err + } + c.Constraints = cons + } + if c.BootstrapConstraintsStr != "" { + cons, aliases, err := constraints.ParseWithAliases(c.BootstrapConstraintsStr) + for k, v := range aliases { + allAliases[k] = v + } + if err != nil { + return err + } + c.BootstrapConstraints = cons + } + return nil +} + // Run connects to the environment specified on the command line and bootstraps // a juju in that environment if none already exists. If there is as yet no environments.yaml file, // the user is informed how to create one. func (c *bootstrapCommand) Run(ctx *cmd.Context) (resultErr error) { + if err := c.parseConstraints(ctx); err != nil { + return err + } + if c.BootstrapImage != "" { + if c.BootstrapSeries == "" { + return errors.Errorf("--bootstrap-image must be used with --bootstrap-series") + } + cons, err := constraints.Merge(c.Constraints, c.BootstrapConstraints) + if err != nil { + return errors.Trace(err) + } + if !cons.HasArch() { + return errors.Errorf("--bootstrap-image must be used with --bootstrap-constraints, specifying architecture") + } + } if c.interactive { if err := c.runInteractive(ctx); err != nil { return errors.Trace(err) @@ -362,6 +388,9 @@ for authType := range schemas { authTypes = append(authTypes, authType) } + // Since we are iterating over a map, lets sort the authTypes so + // they are always in a consistent order. + sort.Sort(jujucloud.AuthTypes(authTypes)) cloud = &jujucloud.Cloud{ Type: c.Cloud, AuthTypes: authTypes, @@ -377,14 +406,14 @@ return errors.Trace(err) } + provider, err := environs.Provider(cloud.Type) + if err != nil { + return errors.Trace(err) + } // Custom clouds may not have explicitly declared support for any auth- // types, in which case we'll assume that they support everything that // the provider supports. if len(cloud.AuthTypes) == 0 { - provider, err := environs.Provider(cloud.Type) - if err != nil { - return errors.Trace(err) - } for authType := range provider.CredentialSchemas() { cloud.AuthTypes = append(cloud.AuthTypes, authType) } @@ -394,8 +423,16 @@ store := c.ClientStore() var detectedCredentialName string credential, credentialName, regionName, err := modelcmd.GetCredentials( - store, c.Region, c.CredentialName, c.Cloud, cloud.Type, + ctx, store, modelcmd.GetCredentialsParams{ + Cloud: *cloud, + CloudName: c.Cloud, + CloudRegion: c.Region, + CredentialName: c.CredentialName, + }, ) + if errors.Cause(err) == modelcmd.ErrMultipleCredentials { + return ambiguousCredentialError + } if errors.IsNotFound(err) && c.CredentialName == "" { // No credential was explicitly specified, and no credential // was found in credentials.yaml; have the provider detect @@ -403,7 +440,7 @@ ctx.Verbosef("no credentials found, checking environment") detected, err := modelcmd.DetectCredential(c.Cloud, cloud.Type) if errors.Cause(err) == modelcmd.ErrMultipleCredentials { - return ambiguousCredentialError + return ambiguousDetectedCredentialError } else if err != nil { return errors.Trace(err) } @@ -428,12 +465,19 @@ region, err := getRegion(cloud, c.Cloud, regionName) if err != nil { fmt.Fprintf(ctx.GetStderr(), - "%s\n\nSpecify an alternative region, or try %q.", + "%s\n\nSpecify an alternative region, or try %q.\n", err, "juju update-clouds", ) return cmd.ErrSilent } + if c.controllerName == "" { + c.controllerName = defaultControllerName(c.Cloud, region.Name) + } + controllerModelUUID, err := utils.NewUUID() + if err != nil { + return errors.Trace(err) + } hostedModelUUID, err := utils.NewUUID() if err != nil { return errors.Trace(err) @@ -448,15 +492,53 @@ modelConfigAttrs := map[string]interface{}{ "type": cloud.Type, "name": bootstrap.ControllerModelName, - config.UUIDKey: controllerUUID.String(), + config.UUIDKey: controllerModelUUID.String(), } + userConfigAttrs, err := c.config.ReadAttrs(ctx) if err != nil { return errors.Trace(err) } + modelDefaultConfigAttrs, err := c.modelDefaults.ReadAttrs(ctx) + if err != nil { + return errors.Trace(err) + } + + // The provider may define some custom attributes specific + // to the provider. These will be added to the model config. + providerAttrs := make(map[string]interface{}) + if ps, ok := provider.(config.ConfigSchemaSource); ok { + for attr := range ps.ConfigSchema() { + // Start with the model defaults, and if also specified + // in the user config attrs, they override the model default. + if v, ok := modelDefaultConfigAttrs[attr]; ok { + providerAttrs[attr] = v + } + if v, ok := userConfigAttrs[attr]; ok { + providerAttrs[attr] = v + } + } + fields := schema.FieldMap(ps.ConfigSchema(), ps.ConfigDefaults()) + if coercedAttrs, err := fields.Coerce(providerAttrs, nil); err != nil { + return errors.Annotatef(err, "invalid attribute value(s) for %v cloud", cloud.Type) + } else { + providerAttrs = coercedAttrs.(map[string]interface{}) + } + } + // Start with the model defaults, then add in user config attributes. + for k, v := range modelDefaultConfigAttrs { + modelConfigAttrs[k] = v + } for k, v := range userConfigAttrs { modelConfigAttrs[k] = v } + // Provider specific attributes are either already specified in model + // config (but may have been coerced), or were not present. Either way, + // copy them in. + logger.Debugf("provider attrs: %v", providerAttrs) + for k, v := range providerAttrs { + modelConfigAttrs[k] = v + } bootstrapConfigAttrs := make(map[string]interface{}) controllerConfigAttrs := make(map[string]interface{}) // Based on the attribute names in clouds.yaml, create @@ -473,6 +555,17 @@ } inheritedControllerAttrs[k] = v } + // Model defaults are added to the inherited controller attributes. + // Any command line set model defaults override what is in the cloud config. + for k, v := range modelDefaultConfigAttrs { + switch { + case bootstrap.IsBootstrapAttribute(k): + return errors.Errorf("%q is a bootstrap only attribute, and cannot be set as a model-default", k) + case controller.ControllerOnlyAttribute(k): + return errors.Errorf("%q is a controller attribute, and cannot be set as a model-default", k) + } + inheritedControllerAttrs[k] = v + } for k, v := range modelConfigAttrs { switch { case bootstrap.IsBootstrapAttribute(k): @@ -483,7 +576,7 @@ delete(modelConfigAttrs, k) } } - bootstrapConfig, err := bootstrap.NewConfig(controllerUUID.String(), bootstrapConfigAttrs) + bootstrapConfig, err := bootstrap.NewConfig(bootstrapConfigAttrs) if err != nil { return errors.Annotate(err, "constructing bootstrap config") } @@ -493,6 +586,15 @@ if err != nil { return errors.Annotate(err, "constructing controller config") } + if controllerConfig.AutocertDNSName() != "" { + if _, ok := controllerConfigAttrs[controller.APIPort]; !ok { + // The configuration did not explicitly mention the API port, + // so default to 443 because it is not usually possible to + // obtain autocert certificates without listening on port 443. + controllerConfig[controller.APIPort] = 443 + } + } + if err := common.FinalizeAuthorizedKeys(ctx, modelConfigAttrs); err != nil { return errors.Annotate(err, "finalizing authorized-keys") } @@ -550,12 +652,13 @@ ControllerConfig: controllerConfig, ControllerName: c.controllerName, Cloud: environs.CloudSpec{ - Type: cloud.Type, - Name: c.Cloud, - Region: region.Name, - Endpoint: region.Endpoint, - StorageEndpoint: region.StorageEndpoint, - Credential: credential, + Type: cloud.Type, + Name: c.Cloud, + Region: region.Name, + Endpoint: region.Endpoint, + IdentityEndpoint: region.IdentityEndpoint, + StorageEndpoint: region.StorageEndpoint, + Credential: credential, }, CredentialName: credentialName, AdminSecret: bootstrapConfig.AdminSecret, @@ -594,10 +697,10 @@ defer func() { if resultErr != nil { if c.KeepBrokenEnvironment { - logger.Infof(` -bootstrap failed but --keep-broken was specified so model is not being destroyed. -When you are finished diagnosing the problem, remember to run juju destroy-model --force -to clean up the model.`[1:]) + ctx.Infof(` +bootstrap failed but --keep-broken was specified so resources are not being destroyed. +When you have finished diagnosing the problem, remember to clean up the failed controller. +See `[1:] + "`juju kill-controller`" + `.`) } else { handleBootstrapError(ctx, resultErr, func() error { return environsDestroy( @@ -684,8 +787,8 @@ BootstrapSeries: c.BootstrapSeries, BootstrapImage: c.BootstrapImage, Placement: c.Placement, - UploadTools: c.UploadTools, - BuildToolsTarball: sync.BuildToolsTarball, + BuildAgent: c.BuildAgent, + BuildAgentTarball: sync.BuildAgentTarball, AgentVersion: c.AgentVersion, MetadataDir: metadataDir, Cloud: *cloud, @@ -695,6 +798,7 @@ CloudCredentialName: credentialName, ControllerConfig: controllerConfig, ControllerInheritedConfig: inheritedControllerAttrs, + RegionInheritedConfig: cloud.RegionConfig, HostedModelConfig: hostedModelConfig, GUIDataSourceBaseURL: guiDataSourceBaseURL, AdminSecret: bootstrapConfig.AdminSecret, @@ -713,7 +817,11 @@ return errors.Trace(err) } - err = common.SetBootstrapEndpointAddress(c.ClientStore(), c.controllerName, controllerConfig.APIPort(), environ) + agentVersion := jujuversion.Current + if c.AgentVersion != nil { + agentVersion = *c.AgentVersion + } + err = common.SetBootstrapEndpointAddress(c.ClientStore(), c.controllerName, agentVersion, controllerConfig.APIPort(), environ) if err != nil { return errors.Annotate(err, "saving bootstrap endpoint address") } @@ -721,7 +829,7 @@ // To avoid race conditions when running scripted bootstraps, wait // for the controller's machine agent to be ready to accept commands // before exiting this bootstrap command. - return waitForAgentInitialisation(ctx, &c.ModelCommandBase, c.controllerName) + return waitForAgentInitialisation(ctx, &c.ModelCommandBase, c.controllerName, c.hostedModelName) } // runInteractive queries the user about bootstrap config interactively at the @@ -754,11 +862,7 @@ } } - var username string - if u, err := user.Current(); err == nil { - username = u.Username - } - defName := defaultControllerName(username, c.Cloud, c.Region, cloud) + defName := defaultControllerName(c.Cloud, c.Region) c.controllerName, err = queryName(defName, scanner, ctx.Stdout) if err != nil { @@ -785,6 +889,7 @@ return jujucloud.Region{ "", // no region name cloud.Endpoint, + cloud.IdentityEndpoint, cloud.StorageEndpoint, }, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/bootstrap_interactive.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/bootstrap_interactive.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/bootstrap_interactive.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/bootstrap_interactive.go 2016-10-13 14:31:49.000000000 +0000 @@ -91,15 +91,11 @@ return regionName, nil } -func defaultControllerName(username, cloudname, region string, cloud *jujucloud.Cloud) string { - name := cloudname - if len(cloud.Regions) > 1 { - name = region +func defaultControllerName(cloudname, region string) string { + if region == "" { + return cloudname } - if username == "" { - return name - } - return username + "-" + name + return cloudname + "-" + region } func queryName(defName string, scanner *bufio.Scanner, w io.Writer) (string, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/bootstrap_interactive_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/bootstrap_interactive_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/bootstrap_interactive_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/bootstrap_interactive_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,6 +15,7 @@ jujucloud "github.com/juju/juju/cloud" jujutesting "github.com/juju/juju/testing" + "github.com/juju/juju/version" ) type BSInteractSuite struct { @@ -30,18 +31,18 @@ c.Assert(cmd.interactive, jc.IsTrue) } -func (BSInteractSuite) TestInitUploadTools(c *gc.C) { +func (BSInteractSuite) TestInitBuildAgent(c *gc.C) { cmd := &bootstrapCommand{} - err := jujutesting.InitCommand(cmd, []string{"--upload-tools"}) + err := jujutesting.InitCommand(cmd, []string{"--build-agent"}) c.Assert(err, jc.ErrorIsNil) c.Assert(cmd.interactive, jc.IsTrue) - c.Assert(cmd.UploadTools, jc.IsTrue) + c.Assert(cmd.BuildAgent, jc.IsTrue) } func (BSInteractSuite) TestInitArg(c *gc.C) { cmd := &bootstrapCommand{} err := jujutesting.InitCommand(cmd, []string{"foo"}) - c.Assert(err, gc.ErrorMatches, "controller name and cloud name are required") + c.Assert(err, jc.ErrorIsNil) c.Assert(cmd.interactive, jc.IsFalse) } @@ -52,13 +53,20 @@ c.Assert(cmd.interactive, jc.IsFalse) } -func (BSInteractSuite) TestInitOtherFlag(c *gc.C) { +func (BSInteractSuite) TestInitInfoOnlyFlag(c *gc.C) { cmd := &bootstrapCommand{} err := jujutesting.InitCommand(cmd, []string{"--clouds"}) c.Assert(err, jc.ErrorIsNil) c.Assert(cmd.interactive, jc.IsFalse) } +func (BSInteractSuite) TestInitVariousFlags(c *gc.C) { + cmd := &bootstrapCommand{} + err := jujutesting.InitCommand(cmd, []string{"--keep-broken", "--agent-version", version.Current.String()}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmd.interactive, jc.IsTrue) +} + func (BSInteractSuite) TestQueryCloud(c *gc.C) { input := "search\n" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/bootstrap_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/bootstrap_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/bootstrap_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/bootstrap_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,6 +10,7 @@ "os" "path/filepath" "runtime" + "sort" "strings" "time" @@ -95,9 +96,11 @@ sourceDir := createToolsSource(c, vAll) s.PatchValue(&envtools.DefaultBaseURL, sourceDir) - s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c)) + expectedNumber := jujuversion.Current + expectedNumber.Build = 1235 + s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c, &expectedNumber)) - s.PatchValue(&waitForAgentInitialisation, func(*cmd.Context, *modelcmd.ModelCommandBase, string) error { + s.PatchValue(&waitForAgentInitialisation, func(*cmd.Context, *modelcmd.ModelCommandBase, string, string) error { return nil }) @@ -117,8 +120,30 @@ dummy.Reset(c) } +// bootstrapCommandWrapper wraps the bootstrap command. The wrapped command has +// the ability to disable fetching GUI information from simplestreams, so that +// it is possible to test the bootstrap process without connecting to the +// network. This ability can be turned on by setting disableGUI to true. +type bootstrapCommandWrapper struct { + bootstrapCommand + disableGUI bool +} + +func (c *bootstrapCommandWrapper) Run(ctx *cmd.Context) error { + if c.disableGUI { + c.bootstrapCommand.noGUI = true + } + return c.bootstrapCommand.Run(ctx) +} + func (s *BootstrapSuite) newBootstrapCommand() cmd.Command { - c := &bootstrapCommand{} + return s.newBootstrapCommandWrapper(true) +} + +func (s *BootstrapSuite) newBootstrapCommandWrapper(disableGUI bool) cmd.Command { + c := &bootstrapCommandWrapper{ + disableGUI: disableGUI, + } c.SetClientStore(s.store) return modelcmd.Wrap(c) } @@ -172,12 +197,19 @@ var restore testing.Restorer = func() { s.store = jujuclienttesting.NewMemStore() } + bootstrapVersion := v100p64 if test.version != "" { useVersion := strings.Replace(test.version, "%LTS%", series.LatestLts(), 1) - v := version.MustParseBinary(useVersion) - restore = restore.Add(testing.PatchValue(&jujuversion.Current, v.Number)) - restore = restore.Add(testing.PatchValue(&arch.HostArch, func() string { return v.Arch })) - restore = restore.Add(testing.PatchValue(&series.HostSeries, func() string { return v.Series })) + bootstrapVersion = version.MustParseBinary(useVersion) + restore = restore.Add(testing.PatchValue(&jujuversion.Current, bootstrapVersion.Number)) + restore = restore.Add(testing.PatchValue(&arch.HostArch, func() string { return bootstrapVersion.Arch })) + restore = restore.Add(testing.PatchValue(&series.HostSeries, func() string { return bootstrapVersion.Series })) + bootstrapVersion.Build = 1 + if test.upload != "" { + uploadVers := version.MustParseBinary(test.upload) + bootstrapVersion.Number = uploadVers.Number + } + restore = restore.Add(testing.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c, &bootstrapVersion.Number))) } if test.hostArch != "" { @@ -189,19 +221,24 @@ // Run command and check for uploads. args := append([]string{ - controllerName, cloudName, + cloudName, controllerName, "--config", "default-series=raring", }, test.args...) opc, errc := cmdtesting.RunCommand(cmdtesting.NullContext(c), s.newBootstrapCommand(), args...) + var err error + select { + case err = <-errc: + case <-time.After(coretesting.LongWait): + c.Fatal("timed out") + } // Check for remaining operations/errors. if test.err != "" { - err := <-errc c.Assert(err, gc.NotNil) stripped := strings.Replace(err.Error(), "\n", "", -1) c.Check(stripped, gc.Matches, test.err) return restore } - if !c.Check(<-errc, gc.IsNil) { + if !c.Check(err, gc.IsNil) { return restore } @@ -232,10 +269,16 @@ c.Assert(controller.UnresolvedAPIEndpoints, gc.DeepEquals, addrConnectedTo) c.Assert(controller.APIEndpoints, gc.DeepEquals, addrConnectedTo) c.Assert(utils.IsValidUUIDString(controller.ControllerUUID), jc.IsTrue) + // We don't care about build numbers here. + bootstrapVers := bootstrapVersion.Number + bootstrapVers.Build = 0 + controllerVers := version.MustParse(controller.AgentVersion) + controllerVers.Build = 0 + c.Assert(controllerVers.String(), gc.Equals, bootstrapVers.String()) - controllerModel, err := s.store.ModelByName(controllerName, "admin@local/controller") + controllerModel, err := s.store.ModelByName(controllerName, "admin/controller") c.Assert(err, jc.ErrorIsNil) - c.Assert(controllerModel.ModelUUID, gc.Equals, controller.ControllerUUID) + c.Assert(utils.IsValidUUIDString(controllerModel.ModelUUID), jc.IsTrue) // Bootstrap config should have been saved, and should only contain // the type, name, and any user-supplied configuration. @@ -248,6 +291,10 @@ "type": "dummy", "default-series": "raring", "authorized-keys": "public auth key\n", + // Dummy provider defaults + "broken": "", + "secret": "pork", + "controller": false, } for k, v := range config.ConfigDefaults() { if _, ok := expected[k]; !ok { @@ -264,7 +311,7 @@ }, { info: "bad --constraints", args: []string{"--constraints", "bad=wrong"}, - err: `invalid value "bad=wrong" for flag --constraints: unknown constraint "bad"`, + err: `unknown constraint "bad"`, }, { info: "conflicting --constraints", args: []string{"--constraints", "instance-type=foo mem=4G"}, @@ -276,41 +323,41 @@ err: `failed to bootstrap model: dummy.Bootstrap is broken`, }, { info: "constraints", - args: []string{"--constraints", "mem=4G cpu-cores=4"}, - constraints: constraints.MustParse("mem=4G cpu-cores=4"), + args: []string{"--constraints", "mem=4G cores=4"}, + constraints: constraints.MustParse("mem=4G cores=4"), }, { info: "bootstrap and environ constraints", - args: []string{"--constraints", "mem=4G cpu-cores=4", "--bootstrap-constraints", "mem=8G"}, - constraints: constraints.MustParse("mem=4G cpu-cores=4"), - bootstrapConstraints: constraints.MustParse("mem=8G cpu-cores=4"), + args: []string{"--constraints", "mem=4G cores=4", "--bootstrap-constraints", "mem=8G"}, + constraints: constraints.MustParse("mem=4G cores=4"), + bootstrapConstraints: constraints.MustParse("mem=8G cores=4"), }, { info: "unsupported constraint passed through but no error", - args: []string{"--constraints", "mem=4G cpu-cores=4 cpu-power=10"}, - constraints: constraints.MustParse("mem=4G cpu-cores=4 cpu-power=10"), + args: []string{"--constraints", "mem=4G cores=4 cpu-power=10"}, + constraints: constraints.MustParse("mem=4G cores=4 cpu-power=10"), }, { - info: "--upload-tools uses arch from constraint if it matches current version", + info: "--build-agent uses arch from constraint if it matches current version", version: "1.3.3-saucy-ppc64el", hostArch: "ppc64el", - args: []string{"--upload-tools", "--constraints", "arch=ppc64el"}, + args: []string{"--build-agent", "--constraints", "arch=ppc64el"}, upload: "1.3.3.1-raring-ppc64el", // from jujuversion.Current constraints: constraints.MustParse("arch=ppc64el"), }, { - info: "--upload-tools rejects mismatched arch", + info: "--build-agent rejects mismatched arch", version: "1.3.3-saucy-amd64", hostArch: "amd64", - args: []string{"--upload-tools", "--constraints", "arch=ppc64el"}, - err: `failed to bootstrap model: cannot build tools for "ppc64el" using a machine running on "amd64"`, + args: []string{"--build-agent", "--constraints", "arch=ppc64el"}, + err: `failed to bootstrap model: cannot use agent built for "ppc64el" using a machine running on "amd64"`, }, { - info: "--upload-tools rejects non-supported arch", + info: "--build-agent rejects non-supported arch", version: "1.3.3-saucy-mips64", hostArch: "mips64", - args: []string{"--upload-tools"}, + args: []string{"--build-agent"}, err: fmt.Sprintf(`failed to bootstrap model: model %q of type dummy does not support instances running on "mips64"`, bootstrap.ControllerModelName), }, { - info: "--upload-tools always bumps build number", + info: "--build-agent always bumps build number", version: "1.2.3.4-raring-amd64", hostArch: "amd64", - args: []string{"--upload-tools"}, + args: []string{"--build-agent"}, upload: "1.2.3.5-raring-amd64", }, { info: "placement", @@ -325,9 +372,9 @@ args: []string{"anything", "else"}, err: `unrecognized args: \["anything" "else"\]`, }, { - info: "--agent-version with --upload-tools", - args: []string{"--agent-version", "1.1.0", "--upload-tools"}, - err: `--agent-version and --upload-tools can't be used together`, + info: "--agent-version with --build-agent", + args: []string{"--agent-version", "1.1.0", "--build-agent"}, + err: `--agent-version and --build-agent can't be used together`, }, { info: "invalid --agent-version value", args: []string{"--agent-version", "foo"}, @@ -346,15 +393,18 @@ info: "--clouds with --regions", args: []string{"--clouds", "--regions", "aws"}, err: `--clouds and --regions can't be used together`, +}, { + info: "specifying bootstrap attribute as model-default", + args: []string{"--model-default", "bootstrap-timeout=10"}, + err: `"bootstrap-timeout" is a bootstrap only attribute, and cannot be set as a model-default`, +}, { + info: "specifying controller attribute as model-default", + args: []string{"--model-default", "api-port=12345"}, + err: `"api-port" is a controller attribute, and cannot be set as a model-default`, }} -func (s *BootstrapSuite) TestRunCloudNameMissing(c *gc.C) { - _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "my-controller") - c.Check(err, gc.ErrorMatches, "controller name and cloud name are required") -} - func (s *BootstrapSuite) TestRunCloudNameUnknown(c *gc.C) { - _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "my-controller", "unknown") + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "unknown", "my-controller") c.Check(err, gc.ErrorMatches, `unknown cloud "unknown", please try "juju update-clouds"`) } @@ -382,23 +432,60 @@ const controllerName = "dev" s.patchVersionAndSeries(c, "raring") - _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), controllerName, "dummy", "--auto-upgrade") + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "dummy", controllerName, "--auto-upgrade") c.Assert(err, jc.ErrorIsNil) - _, err = coretesting.RunCommand(c, s.newBootstrapCommand(), controllerName, "dummy", "--auto-upgrade") + _, err = coretesting.RunCommand(c, s.newBootstrapCommand(), "dummy", controllerName, "--auto-upgrade") c.Assert(err, gc.ErrorMatches, `controller "dev" already exists`) } +func (s *BootstrapSuite) TestBootstrapDefaultControllerName(c *gc.C) { + s.patchVersionAndSeries(c, "raring") + + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "dummy-cloud/region-1", "--auto-upgrade") + c.Assert(err, jc.ErrorIsNil) + currentController := s.store.CurrentControllerName + c.Assert(currentController, gc.Equals, "dummy-cloud-region-1") + details, err := s.store.ControllerByName(currentController) + c.Assert(err, jc.ErrorIsNil) + c.Assert(*details.ModelCount, gc.Equals, 2) + c.Assert(*details.MachineCount, gc.Equals, 1) + c.Assert(details.AgentVersion, gc.Equals, jujuversion.Current.String()) +} + +func (s *BootstrapSuite) TestBootstrapDefaultControllerNameNoRegions(c *gc.C) { + s.patchVersionAndSeries(c, "raring") + + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "no-cloud-regions", "--auto-upgrade") + c.Assert(err, jc.ErrorIsNil) + currentController := s.store.CurrentControllerName + c.Assert(currentController, gc.Equals, "no-cloud-regions") +} + func (s *BootstrapSuite) TestBootstrapSetsCurrentModel(c *gc.C) { s.patchVersionAndSeries(c, "raring") - _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "devcontroller", "dummy", "--auto-upgrade") + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "dummy", "devcontroller", "--auto-upgrade") c.Assert(err, jc.ErrorIsNil) currentController := s.store.CurrentControllerName c.Assert(currentController, gc.Equals, "devcontroller") modelName, err := s.store.CurrentModel(currentController) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelName, gc.Equals, "admin@local/default") + c.Assert(modelName, gc.Equals, "admin/default") +} + +func (s *BootstrapSuite) TestBootstrapSetsControllerDetails(c *gc.C) { + s.patchVersionAndSeries(c, "raring") + + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "dummy", "devcontroller", "--auto-upgrade") + c.Assert(err, jc.ErrorIsNil) + currentController := s.store.CurrentControllerName + c.Assert(currentController, gc.Equals, "devcontroller") + details, err := s.store.ControllerByName(currentController) + c.Assert(err, jc.ErrorIsNil) + c.Assert(*details.ModelCount, gc.Equals, 2) + c.Assert(*details.MachineCount, gc.Equals, 1) + c.Assert(details.AgentVersion, gc.Equals, jujuversion.Current.String()) } func (s *BootstrapSuite) TestBootstrapDefaultModel(c *gc.C) { @@ -411,7 +498,7 @@ coretesting.RunCommand( c, s.newBootstrapCommand(), - "devcontroller", "dummy", + "dummy", "devcontroller", "--auto-upgrade", "--default-model", "mymodel", "--config", "foo=bar", @@ -429,7 +516,7 @@ return &bootstrap }) coretesting.RunCommand( - c, s.newBootstrapCommand(), "devcontroller", "dummy", "--auto-upgrade", + c, s.newBootstrapCommand(), "dummy", "devcontroller", "--auto-upgrade", "--config", "bootstrap-timeout=99", ) c.Assert(bootstrap.args.DialOpts.Timeout, gc.Equals, 99*time.Second) @@ -448,7 +535,7 @@ c.Assert(err, jc.ErrorIsNil) coretesting.RunCommand( c, s.newBootstrapCommand(), - "devcontroller", "dummy", + "dummy", "devcontroller", "--auto-upgrade", "--config", "authorized-keys-path="+fakeSSHFile, ) @@ -456,6 +543,29 @@ c.Assert(ok, jc.IsFalse) } +func (s *BootstrapSuite) TestBootstrapModelDefaultConfig(c *gc.C) { + s.patchVersionAndSeries(c, "raring") + + var bootstrap fakeBootstrapFuncs + s.PatchValue(&getBootstrapFuncs, func() BootstrapInterface { + return &bootstrap + }) + + coretesting.RunCommand( + c, s.newBootstrapCommand(), + "dummy", "devcontroller", + "--model-default", "network=foo", + "--model-default", "ftp-proxy=model-proxy", + "--config", "ftp-proxy=controller-proxy", + ) + + c.Check(bootstrap.args.HostedModelConfig["network"], gc.Equals, "foo") + c.Check(bootstrap.args.ControllerInheritedConfig["network"], gc.Equals, "foo") + + c.Check(bootstrap.args.HostedModelConfig["ftp-proxy"], gc.Equals, "controller-proxy") + c.Check(bootstrap.args.ControllerInheritedConfig["ftp-proxy"], gc.Equals, "model-proxy") +} + func (s *BootstrapSuite) TestBootstrapDefaultConfigStripsInheritedAttributes(c *gc.C) { s.patchVersionAndSeries(c, "raring") @@ -469,7 +579,7 @@ c.Assert(err, jc.ErrorIsNil) coretesting.RunCommand( c, s.newBootstrapCommand(), - "devcontroller", "dummy", + "dummy", "devcontroller", "--auto-upgrade", "--config", "authorized-keys=ssh-key", "--config", "agent-version=1.19.0", @@ -487,7 +597,7 @@ s.PatchValue(&getBootstrapFuncs, func() BootstrapInterface { return &bootstrap }) - coretesting.RunCommand(c, s.newBootstrapCommand(), "devcontroller", "dummy") + coretesting.RunCommand(c, s.newBootstrapCommandWrapper(false), "dummy", "devcontroller") c.Assert(bootstrap.args.GUIDataSourceBaseURL, gc.Equals, gui.DefaultBaseURL) } @@ -500,7 +610,7 @@ return &bootstrap }) - coretesting.RunCommand(c, s.newBootstrapCommand(), "devcontroller", "dummy") + coretesting.RunCommand(c, s.newBootstrapCommandWrapper(false), "dummy", "devcontroller") c.Assert(bootstrap.args.GUIDataSourceBaseURL, gc.Equals, "https://1.2.3.4/gui/streams") } @@ -511,7 +621,7 @@ s.PatchValue(&getBootstrapFuncs, func() BootstrapInterface { return &bootstrap }) - coretesting.RunCommand(c, s.newBootstrapCommand(), "devcontroller", "dummy", "--no-gui") + coretesting.RunCommand(c, s.newBootstrapCommandWrapper(false), "dummy", "devcontroller", "--no-gui") c.Assert(bootstrap.args.GUIDataSourceBaseURL, gc.Equals, "") } @@ -533,8 +643,8 @@ store.SetErrors(errors.New("oh noes")) cmd := &bootstrapCommand{} cmd.SetClientStore(store) - wrapped := modelcmd.Wrap(cmd, modelcmd.ModelSkipFlags, modelcmd.ModelSkipDefault) - _, err := coretesting.RunCommand(c, wrapped, controllerName, "dummy", "--auto-upgrade") + wrapped := modelcmd.Wrap(cmd, modelcmd.WrapSkipModelFlags, modelcmd.WrapSkipDefaultModel) + _, err := coretesting.RunCommand(c, wrapped, "dummy", controllerName, "--auto-upgrade") store.CheckCallNames(c, "CredentialForCloud") c.Assert(err, gc.ErrorMatches, `loading credentials: oh noes`) } @@ -554,22 +664,36 @@ jujuclient.ClientStore, bootstrap.PrepareParams, ) (environs.Environ, error) { - return nil, fmt.Errorf("mock-prepare") + return nil, errors.New("mock-prepare") }) ctx := coretesting.Context(c) _, errc := cmdtesting.RunCommand( ctx, s.newBootstrapCommand(), - "devcontroller", "dummy", + "dummy", "devcontroller", ) c.Check(<-errc, gc.ErrorMatches, ".*mock-prepare$") c.Check(destroyed, jc.IsFalse) } -func (s *BootstrapSuite) writeControllerModelAccountInfo(c *gc.C, controller, model, user string) { - err := s.store.UpdateController(controller, jujuclient.ControllerDetails{ - CACert: "x", - ControllerUUID: "y", +type controllerModelAccountParams struct { + controller string + controllerUUID string + model string + user string +} + +func (s *BootstrapSuite) writeControllerModelAccountInfo(c *gc.C, context *controllerModelAccountParams) { + controller := context.controller + model := context.model + user := context.user + controllerUUID := "a-uuid" + if context.controllerUUID != "" { + controllerUUID = context.controllerUUID + } + err := s.store.AddController(controller, jujuclient.ControllerDetails{ + CACert: "a-cert", + ControllerUUID: controllerUUID, }) c.Assert(err, jc.ErrorIsNil) err = s.store.SetCurrentController(controller) @@ -594,32 +718,48 @@ jujuclient.ClientStore, bootstrap.PrepareParams, ) (environs.Environ, error) { - s.writeControllerModelAccountInfo(c, "foo", "foobar@local/bar", "foobar@local") - return nil, fmt.Errorf("mock-prepare") + ctx := controllerModelAccountParams{ + controller: "foo", + model: "foobar/bar", + user: "foobar", + } + s.writeControllerModelAccountInfo(c, &ctx) + return nil, errors.New("mock-prepare") }) - s.writeControllerModelAccountInfo(c, "olddevcontroller", "fred@local/fredmodel", "fred@local") - _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "devcontroller", "dummy", "--auto-upgrade") + ctx := controllerModelAccountParams{ + controller: "olddevcontroller", + controllerUUID: "another-uuid", + model: "fred/fredmodel", + user: "fred", + } + s.writeControllerModelAccountInfo(c, &ctx) + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "dummy", "devcontroller", "--auto-upgrade") c.Assert(err, gc.ErrorMatches, "mock-prepare") currentController := s.store.CurrentControllerName c.Assert(currentController, gc.Equals, "olddevcontroller") accountDetails, err := s.store.AccountDetails(currentController) c.Assert(err, jc.ErrorIsNil) - c.Assert(accountDetails.User, gc.Equals, "fred@local") + c.Assert(accountDetails.User, gc.Equals, "fred") currentModel, err := s.store.CurrentModel(currentController) c.Assert(err, jc.ErrorIsNil) - c.Assert(currentModel, gc.Equals, "fred@local/fredmodel") + c.Assert(currentModel, gc.Equals, "fred/fredmodel") } func (s *BootstrapSuite) TestBootstrapAlreadyExists(c *gc.C) { const controllerName = "devcontroller" s.patchVersionAndSeries(c, "raring") - s.writeControllerModelAccountInfo(c, "devcontroller", "fred@local/fredmodel", "fred@local") + cmaCtx := controllerModelAccountParams{ + controller: "devcontroller", + model: "fred/fredmodel", + user: "fred", + } + s.writeControllerModelAccountInfo(c, &cmaCtx) ctx := coretesting.Context(c) - _, errc := cmdtesting.RunCommand(ctx, s.newBootstrapCommand(), controllerName, "dummy", "--auto-upgrade") + _, errc := cmdtesting.RunCommand(ctx, s.newBootstrapCommand(), "dummy", controllerName, "--auto-upgrade") err := <-errc c.Assert(err, jc.Satisfies, errors.IsAlreadyExists) c.Assert(err, gc.ErrorMatches, fmt.Sprintf(`controller %q already exists`, controllerName)) @@ -627,10 +767,10 @@ c.Assert(currentController, gc.Equals, "devcontroller") accountDetails, err := s.store.AccountDetails(currentController) c.Assert(err, jc.ErrorIsNil) - c.Assert(accountDetails.User, gc.Equals, "fred@local") + c.Assert(accountDetails.User, gc.Equals, "fred") currentModel, err := s.store.CurrentModel(currentController) c.Assert(err, jc.ErrorIsNil) - c.Assert(currentModel, gc.Equals, "fred@local/fredmodel") + c.Assert(currentModel, gc.Equals, "fred/fredmodel") } func (s *BootstrapSuite) TestInvalidLocalSource(c *gc.C) { @@ -641,9 +781,9 @@ // The command returns with an error. _, err := coretesting.RunCommand( c, s.newBootstrapCommand(), "--metadata-source", c.MkDir(), - "devcontroller", "dummy", + "dummy", "devcontroller", ) - c.Check(err, gc.ErrorMatches, `failed to bootstrap model: Juju cannot bootstrap because no tools are available for your model(.|\n)*`) + c.Check(err, gc.ErrorMatches, `failed to bootstrap model: Juju cannot bootstrap because no agent binaries are available for your model(.|\n)*`) } // createImageMetadata creates some image metadata in a local directory. @@ -682,7 +822,7 @@ coretesting.RunCommand( c, s.newBootstrapCommand(), "--metadata-source", sourceDir, "--constraints", "mem=4G", - "devcontroller", "dummy-cloud/region-1", + "dummy-cloud/region-1", "devcontroller", "--config", "default-series=raring", ) c.Assert(bootstrap.args.MetadataDir, gc.Equals, sourceDir) @@ -703,7 +843,7 @@ coretesting.RunCommand( c, s.newBootstrapCommand(), "--agent-version", vers, - "devcontroller", "dummy-cloud/region-1", + "dummy-cloud/region-1", "devcontroller", "--config", "default-series=raring", ) c.Assert(bootstrap.args.AgentVersion, gc.NotNil) @@ -728,7 +868,7 @@ coretesting.RunCommand( c, s.newBootstrapCommand(), "--auto-upgrade", - "devcontroller", "dummy-cloud/region-1", + "dummy-cloud/region-1", "devcontroller", ) c.Assert(bootstrap.args.AgentVersion, gc.IsNil) } @@ -744,11 +884,13 @@ // are automatically synchronized. _, err := coretesting.RunCommand( c, s.newBootstrapCommand(), "--metadata-source", sourceDir, - "devcontroller", "dummy-cloud/region-1", "--config", "default-series=trusty", + "dummy-cloud/region-1", "devcontroller", "--config", "default-series=trusty", ) c.Assert(err, jc.ErrorIsNil) - bootstrapConfig, params, err := modelcmd.NewGetBootstrapConfigParamsFunc(s.store)("devcontroller") + bootstrapConfig, params, err := modelcmd.NewGetBootstrapConfigParamsFunc( + coretesting.Context(c), s.store, + )("devcontroller") c.Assert(err, jc.ErrorIsNil) provider, err := environs.Provider(bootstrapConfig.CloudType) c.Assert(err, jc.ErrorIsNil) @@ -795,7 +937,9 @@ } func (s *BootstrapSuite) setupAutoUploadTest(c *gc.C, vers, ser string) { - s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c)) + patchedVersion := version.MustParse(vers) + patchedVersion.Build = 1 + s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c, &patchedVersion)) sourceDir := createToolsSource(c, vAll) s.PatchValue(&envtools.DefaultBaseURL, sourceDir) @@ -818,63 +962,56 @@ // the current juju version. opc, errc := cmdtesting.RunCommand( cmdtesting.NullContext(c), s.newBootstrapCommand(), - "devcontroller", "dummy-cloud/region-1", + "dummy-cloud/region-1", "devcontroller", "--config", "default-series=raring", "--auto-upgrade", ) - c.Assert(<-errc, gc.IsNil) + select { + case err := <-errc: + c.Assert(err, jc.ErrorIsNil) + case <-time.After(coretesting.LongWait): + c.Fatal("timed out") + } c.Check((<-opc).(dummy.OpBootstrap).Env, gc.Equals, bootstrap.ControllerModelName) icfg := (<-opc).(dummy.OpFinalizeBootstrap).InstanceConfig c.Assert(icfg, gc.NotNil) c.Assert(icfg.AgentVersion().String(), gc.Equals, "1.7.3.1-raring-"+arch.HostArch()) } -func (s *BootstrapSuite) TestAutoUploadOnlyForDev(c *gc.C) { - s.setupAutoUploadTest(c, "1.8.3", "precise") - _, errc := cmdtesting.RunCommand( - cmdtesting.NullContext(c), s.newBootstrapCommand(), - "devcontroller", "dummy-cloud/region-1", - ) - err := <-errc - c.Assert(err, gc.ErrorMatches, - "failed to bootstrap model: Juju cannot bootstrap because no tools are available for your model(.|\n)*") -} - func (s *BootstrapSuite) TestMissingToolsError(c *gc.C) { s.setupAutoUploadTest(c, "1.8.3", "precise") _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), - "devcontroller", "dummy-cloud/region-1", - "--config", "default-series=raring", + "dummy-cloud/region-1", "devcontroller", + "--config", "default-series=raring", "--agent-version=1.8.4", ) c.Assert(err, gc.ErrorMatches, - "failed to bootstrap model: Juju cannot bootstrap because no tools are available for your model(.|\n)*") + "failed to bootstrap model: Juju cannot bootstrap because no agent binaries are available for your model(.|\n)*") } func (s *BootstrapSuite) TestMissingToolsUploadFailedError(c *gc.C) { - buildToolsTarballAlwaysFails := func(forceVersion *version.Number, stream string) (*sync.BuiltTools, error) { - return nil, fmt.Errorf("an error") + BuildAgentTarballAlwaysFails := func(build bool, forceVersion *version.Number, stream string) (*sync.BuiltAgent, error) { + return nil, errors.New("an error") } s.setupAutoUploadTest(c, "1.7.3", "precise") - s.PatchValue(&sync.BuildToolsTarball, buildToolsTarballAlwaysFails) + s.PatchValue(&sync.BuildAgentTarball, BuildAgentTarballAlwaysFails) ctx, err := coretesting.RunCommand( c, s.newBootstrapCommand(), - "devcontroller", "dummy-cloud/region-1", + "dummy-cloud/region-1", "devcontroller", "--config", "default-series=raring", "--config", "agent-stream=proposed", - "--auto-upgrade", + "--auto-upgrade", "--agent-version=1.7.3", ) - c.Check(coretesting.Stderr(ctx), gc.Equals, fmt.Sprintf(` + c.Check(coretesting.Stderr(ctx), gc.Equals, ` Creating Juju controller "devcontroller" on dummy-cloud/region-1 -Bootstrapping model %q -Starting new instance for initial controller -Building tools to upload (1.7.3.1-raring-%s) -`[1:], bootstrap.ControllerModelName, arch.HostArch())) - c.Check(err, gc.ErrorMatches, "failed to bootstrap model: cannot upload bootstrap tools: an error") +Looking for packaged Juju agent version 1.7.3 for amd64 +No packaged binary found, preparing local Juju agent binary +`[1:]) + c.Check(err, gc.ErrorMatches, "failed to bootstrap model: cannot package bootstrap agent binary: an error") } func (s *BootstrapSuite) TestBootstrapDestroy(c *gc.C) { @@ -883,12 +1020,17 @@ opc, errc := cmdtesting.RunCommand( cmdtesting.NullContext(c), s.newBootstrapCommand(), - "devcontroller", "dummy-cloud/region-1", + "dummy-cloud/region-1", "devcontroller", "--config", "broken=Bootstrap Destroy", "--auto-upgrade", ) - err := <-errc - c.Assert(err, gc.ErrorMatches, "failed to bootstrap model: dummy.Bootstrap is broken") + select { + case err := <-errc: + c.Assert(err, gc.ErrorMatches, "failed to bootstrap model: dummy.Bootstrap is broken") + case <-time.After(coretesting.LongWait): + c.Fatal("timed out") + } + var opDestroy *dummy.OpDestroy for opDestroy == nil { select { @@ -909,14 +1051,19 @@ resetJujuXDGDataHome(c) s.patchVersion(c) - opc, errc := cmdtesting.RunCommand(cmdtesting.NullContext(c), s.newBootstrapCommand(), + ctx := coretesting.Context(c) + opc, errc := cmdtesting.RunCommand(ctx, s.newBootstrapCommand(), "--keep-broken", - "devcontroller", "dummy-cloud/region-1", + "dummy-cloud/region-1", "devcontroller", "--config", "broken=Bootstrap Destroy", "--auto-upgrade", ) - err := <-errc - c.Assert(err, gc.ErrorMatches, "failed to bootstrap model: dummy.Bootstrap is broken") + select { + case err := <-errc: + c.Assert(err, gc.ErrorMatches, "failed to bootstrap model: dummy.Bootstrap is broken") + case <-time.After(coretesting.LongWait): + c.Fatal("timed out") + } done := false for !done { select { @@ -934,23 +1081,25 @@ break } } + stderr := strings.Replace(coretesting.Stderr(ctx), "\n", " ", -1) + c.Assert(stderr, gc.Matches, `.*See .*juju kill\-controller.*`) } func (s *BootstrapSuite) TestBootstrapUnknownCloudOrProvider(c *gc.C) { s.patchVersionAndSeries(c, "raring") - _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "ctrl", "no-such-provider") + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "no-such-provider", "ctrl") c.Assert(err, gc.ErrorMatches, `unknown cloud "no-such-provider", please try "juju update-clouds"`) } func (s *BootstrapSuite) TestBootstrapProviderNoRegionDetection(c *gc.C) { s.patchVersionAndSeries(c, "raring") - _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "ctrl", "no-cloud-region-detection") + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "no-cloud-region-detection", "ctrl") c.Assert(err, gc.ErrorMatches, `unknown cloud "no-cloud-region-detection", please try "juju update-clouds"`) } func (s *BootstrapSuite) TestBootstrapProviderNoRegions(c *gc.C) { ctx, err := coretesting.RunCommand( - c, s.newBootstrapCommand(), "ctrl", "no-cloud-regions", + c, s.newBootstrapCommand(), "no-cloud-regions", "ctrl", "--config", "default-series=precise", ) c.Check(coretesting.Stderr(ctx), gc.Matches, "Creating Juju controller \"ctrl\" on no-cloud-regions(.|\n)*") @@ -960,7 +1109,7 @@ func (s *BootstrapSuite) TestBootstrapCloudNoRegions(c *gc.C) { resetJujuXDGDataHome(c) ctx, err := coretesting.RunCommand( - c, s.newBootstrapCommand(), "ctrl", "dummy-cloud-without-regions", + c, s.newBootstrapCommand(), "dummy-cloud-without-regions", "ctrl", "--config", "default-series=precise", ) c.Check(coretesting.Stderr(ctx), gc.Matches, "Creating Juju controller \"ctrl\" on dummy-cloud-without-regions(.|\n)*") @@ -970,29 +1119,29 @@ func (s *BootstrapSuite) TestBootstrapCloudNoRegionsOneSpecified(c *gc.C) { resetJujuXDGDataHome(c) ctx, err := coretesting.RunCommand( - c, s.newBootstrapCommand(), "ctrl", "dummy-cloud-without-regions/my-region", + c, s.newBootstrapCommand(), "dummy-cloud-without-regions/my-region", "ctrl", "--config", "default-series=precise", ) c.Check(coretesting.Stderr(ctx), gc.Matches, - "region \"my-region\" not found \\(expected one of \\[\\]\\)\n\n.*") + "region \"my-region\" not found \\(expected one of \\[\\]\\)\n\n.*\n") c.Assert(err, gc.Equals, cmd.ErrSilent) } func (s *BootstrapSuite) TestBootstrapProviderNoCredentials(c *gc.C) { s.patchVersionAndSeries(c, "raring") - _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "ctrl", "no-credentials") + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "no-credentials", "ctrl") c.Assert(err, gc.ErrorMatches, `detecting credentials for "no-credentials" cloud provider: credentials not found`) } -func (s *BootstrapSuite) TestBootstrapProviderManyCredentials(c *gc.C) { +func (s *BootstrapSuite) TestBootstrapProviderManyDetectedCredentials(c *gc.C) { s.patchVersionAndSeries(c, "raring") - _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "ctrl", "many-credentials") - c.Assert(err, gc.ErrorMatches, ambiguousCredentialError.Error()) + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "many-credentials", "ctrl") + c.Assert(err, gc.ErrorMatches, ambiguousDetectedCredentialError.Error()) } func (s *BootstrapSuite) TestBootstrapProviderDetectRegionsInvalid(c *gc.C) { s.patchVersionAndSeries(c, "raring") - ctx, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "ctrl", "dummy/not-dummy") + ctx, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "dummy/not-dummy", "ctrl") c.Assert(err, gc.Equals, cmd.ErrSilent) stderr := strings.Replace(coretesting.Stderr(ctx), "\n", "", -1) c.Assert(stderr, gc.Matches, `region "not-dummy" not found \(expected one of \["dummy"\]\)Specify an alternative region, or try "juju update-clouds".`) @@ -1010,13 +1159,33 @@ AuthCredentials: map[string]cloud.Credential{"one": cloud.NewCredential("one", nil)}, }, } - coretesting.RunCommand(c, s.newBootstrapCommand(), "ctrl", - "many-credentials-no-auth-types", + coretesting.RunCommand(c, s.newBootstrapCommand(), + "many-credentials-no-auth-types", "ctrl", "--credential", "one", ) c.Assert(bootstrap.args.Cloud.AuthTypes, jc.SameContents, cloud.AuthTypes{"one", "two"}) } +func (s *BootstrapSuite) TestManyAvailableCredentialsNoneSpecified(c *gc.C) { + var bootstrap fakeBootstrapFuncs + s.PatchValue(&getBootstrapFuncs, func() BootstrapInterface { + return &bootstrap + }) + + s.patchVersionAndSeries(c, "raring") + s.store.Credentials = map[string]cloud.CloudCredential{ + "dummy": { + AuthCredentials: map[string]cloud.Credential{ + "one": cloud.NewCredential("one", nil), + "two": cloud.NewCredential("two", nil), + }, + }, + } + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "dummy", "ctrl") + msg := strings.Replace(err.Error(), "\n", "", -1) + c.Assert(msg, gc.Matches, "more than one credential is available.*") +} + func (s *BootstrapSuite) TestBootstrapProviderDetectRegions(c *gc.C) { resetJujuXDGDataHome(c) @@ -1029,12 +1198,13 @@ }) s.patchVersionAndSeries(c, "raring") - coretesting.RunCommand(c, s.newBootstrapCommand(), "ctrl", "dummy") + coretesting.RunCommand(c, s.newBootstrapCommand(), "dummy", "ctrl") c.Assert(bootstrap.args.CloudRegion, gc.Equals, "bruce") c.Assert(bootstrap.args.CloudCredentialName, gc.Equals, "default") + sort.Sort(bootstrap.args.Cloud.AuthTypes) c.Assert(bootstrap.args.Cloud, jc.DeepEquals, cloud.Cloud{ Type: "dummy", - AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, + AuthTypes: []cloud.AuthType{cloud.EmptyAuthType, cloud.UserPassAuthType}, Regions: []cloud.Region{{Name: "bruce", Endpoint: "endpoint"}}, }) } @@ -1051,11 +1221,12 @@ }) s.patchVersionAndSeries(c, "raring") - coretesting.RunCommand(c, s.newBootstrapCommand(), "ctrl", "dummy") + coretesting.RunCommand(c, s.newBootstrapCommand(), "dummy", "ctrl") c.Assert(bootstrap.args.CloudRegion, gc.Equals, "") + sort.Sort(bootstrap.args.Cloud.AuthTypes) c.Assert(bootstrap.args.Cloud, jc.DeepEquals, cloud.Cloud{ Type: "dummy", - AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, + AuthTypes: []cloud.AuthType{cloud.EmptyAuthType, cloud.UserPassAuthType}, }) } @@ -1069,10 +1240,10 @@ params bootstrap.PrepareParams, ) (environs.Environ, error) { prepareParams = params - return nil, fmt.Errorf("mock-prepare") + return nil, errors.New("mock-prepare") }) - _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "ctrl", "dummy/DUMMY") + _, err := coretesting.RunCommand(c, s.newBootstrapCommand(), "dummy/DUMMY", "ctrl") c.Assert(err, gc.ErrorMatches, "mock-prepare") c.Assert(prepareParams.Cloud.Region, gc.Equals, "dummy") } @@ -1085,10 +1256,10 @@ s.patchVersionAndSeries(c, "raring") _, err = coretesting.RunCommand( - c, s.newBootstrapCommand(), "ctrl", "dummy", + c, s.newBootstrapCommand(), "dummy", "ctrl", "--config", configFile, ) - c.Assert(err, gc.ErrorMatches, `controller: expected bool, got string.*`) + c.Assert(err, gc.ErrorMatches, `invalid attribute value\(s\) for dummy cloud: controller: expected bool, got string.*`) } func (s *BootstrapSuite) TestBootstrapMultipleConfigFiles(c *gc.C) { @@ -1105,7 +1276,7 @@ s.patchVersionAndSeries(c, "raring") _, err = coretesting.RunCommand( - c, s.newBootstrapCommand(), "ctrl", "dummy", + c, s.newBootstrapCommand(), "dummy", "ctrl", "--auto-upgrade", // the second config file should replace attributes // with the same name from the first, but leave the @@ -1124,7 +1295,7 @@ s.patchVersionAndSeries(c, "raring") _, err = coretesting.RunCommand( - c, s.newBootstrapCommand(), "ctrl", "dummy", + c, s.newBootstrapCommand(), "dummy", "ctrl", "--auto-upgrade", // Configuration specified on the command line overrides // anything specified in files, no matter what the order. @@ -1134,16 +1305,43 @@ c.Assert(err, jc.ErrorIsNil) } +func (s *BootstrapSuite) TestBootstrapAutocertDNSNameDefaultPort(c *gc.C) { + s.patchVersionAndSeries(c, "raring") + var bootstrap fakeBootstrapFuncs + s.PatchValue(&getBootstrapFuncs, func() BootstrapInterface { + return &bootstrap + }) + coretesting.RunCommand( + c, s.newBootstrapCommand(), "dummy", "ctrl", + "--config", "autocert-dns-name=foo.example", + ) + c.Assert(bootstrap.args.ControllerConfig.APIPort(), gc.Equals, 443) +} + +func (s *BootstrapSuite) TestBootstrapAutocertDNSNameExplicitAPIPort(c *gc.C) { + s.patchVersionAndSeries(c, "raring") + var bootstrap fakeBootstrapFuncs + s.PatchValue(&getBootstrapFuncs, func() BootstrapInterface { + return &bootstrap + }) + coretesting.RunCommand( + c, s.newBootstrapCommand(), "dummy", "ctrl", + "--config", "autocert-dns-name=foo.example", + "--config", "api-port=12345", + ) + c.Assert(bootstrap.args.ControllerConfig.APIPort(), gc.Equals, 12345) +} + func (s *BootstrapSuite) TestBootstrapCloudConfigAndAdHoc(c *gc.C) { s.patchVersionAndSeries(c, "raring") _, err := coretesting.RunCommand( - c, s.newBootstrapCommand(), "ctrl", "dummy-cloud-with-config", + c, s.newBootstrapCommand(), "dummy-cloud-with-config", "ctrl", "--auto-upgrade", // Configuration specified on the command line overrides // anything specified in files, no matter what the order. - "--config", "controller=false", + "--config", "controller=not-a-bool", ) - c.Assert(err, gc.ErrorMatches, "failed to bootstrap model: dummy.Bootstrap is broken") + c.Assert(err, gc.ErrorMatches, `invalid attribute value\(s\) for dummy cloud: controller: expected bool, got .*`) } func (s *BootstrapSuite) TestBootstrapPrintClouds(c *gc.C) { @@ -1205,6 +1403,7 @@ us-west-2 eu-west-1 eu-central-1 +ap-south-1 ap-southeast-1 ap-southeast-2 ap-northeast-1 @@ -1232,7 +1431,7 @@ // Record the controller name seen by ModelCommandBase at the end of bootstrap. var seenControllerName string - s.PatchValue(&waitForAgentInitialisation, func(_ *cmd.Context, base *modelcmd.ModelCommandBase, _ string) error { + s.PatchValue(&waitForAgentInitialisation, func(_ *cmd.Context, base *modelcmd.ModelCommandBase, _, _ string) error { seenControllerName = base.ControllerName() return nil }) @@ -1248,7 +1447,7 @@ close(opc) }() com := s.newBootstrapCommand() - args := []string{controllerName, "dummy", "--auto-upgrade"} + args := []string{"dummy", controllerName, "--auto-upgrade"} if err := coretesting.InitCommand(com, args); err != nil { errc <- err return @@ -1268,7 +1467,7 @@ // Simulate another controller being bootstrapped during the // bootstrap. Changing the current controller shouldn't affect the // bootstrap process. - c.Assert(s.store.UpdateController("another", jujuclient.ControllerDetails{ + c.Assert(s.store.AddController("another", jujuclient.ControllerDetails{ ControllerUUID: "uuid", CACert: "cert", }), jc.ErrorIsNil) @@ -1318,12 +1517,8 @@ // resetJujuXDGDataHome restores an new, clean Juju home environment without tools. func resetJujuXDGDataHome(c *gc.C) { - jenvDir := testing.JujuXDGDataHomePath("models") - err := os.RemoveAll(jenvDir) - c.Assert(err, jc.ErrorIsNil) - cloudsPath := cloud.JujuPersonalCloudsPath() - err = ioutil.WriteFile(cloudsPath, []byte(` + err := ioutil.WriteFile(cloudsPath, []byte(` clouds: dummy-cloud: type: dummy diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/debughooks.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/debughooks.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/debughooks.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/debughooks.go 2016-10-13 14:31:49.000000000 +0000 @@ -46,11 +46,11 @@ func (c *debugHooksCommand) Init(args []string) error { if len(args) < 1 { - return fmt.Errorf("no unit name specified") + return errors.Errorf("no unit name specified") } c.Target = args[0] if !names.IsValidUnit(c.Target) { - return fmt.Errorf("%q is not a valid unit name", c.Target) + return errors.Errorf("%q is not a valid unit name", c.Target) } // If any of the hooks is "*", then debug all hooks. @@ -64,11 +64,11 @@ return nil } -type charmRelationsApi interface { +type charmRelationsAPI interface { CharmRelations(serviceName string) ([]string, error) } -func (c *debugHooksCommand) getServiceAPI() (charmRelationsApi, error) { +func (c *debugHooksCommand) getServiceAPI() (charmRelationsAPI, error) { root, err := c.NewAPIRoot() if err != nil { return nil, errors.Trace(err) @@ -84,11 +84,11 @@ if err != nil { return err } - serviceApi, err := c.getServiceAPI() + serviceAPI, err := c.getServiceAPI() if err != nil { return err } - relations, err := serviceApi.CharmRelations(service) + relations, err := serviceAPI.CharmRelations(service) if err != nil { return err } @@ -111,7 +111,7 @@ } sort.Strings(names) logger.Infof("unknown hook %s, valid hook names: %v", hook, names) - return fmt.Errorf("unit %q does not contain hook %q", c.Target, hook) + return errors.Errorf("unit %q does not contain hook %q", c.Target, hook) } } return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/debuglog.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/debuglog.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/debuglog.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/debuglog.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,10 +6,15 @@ import ( "fmt" "io" + "os" + "time" + "github.com/juju/ansiterm" "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" - "launchpad.net/gnuflag" + "github.com/mattn/go-isatty" "github.com/juju/juju/api" "github.com/juju/juju/cmd/modelcmd" @@ -101,7 +106,11 @@ } func newDebugLogCommand() cmd.Command { - return modelcmd.Wrap(&debugLogCommand{}) + return newDebugLogCommandTZ(time.Local) +} + +func newDebugLogCommandTZ(tz *time.Location) cmd.Command { + return modelcmd.Wrap(&debugLogCommand{tz: tz}) } type debugLogCommand struct { @@ -109,9 +118,22 @@ level string params api.DebugLogParams + + utc bool + location bool + date bool + ms bool + + tail bool + notail bool + color bool + + format string + tz *time.Location } func (c *debugLogCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) f.Var(cmd.NewAppendStringsValue(&c.params.IncludeEntity), "i", "Only show log messages for these entities") f.Var(cmd.NewAppendStringsValue(&c.params.IncludeEntity), "include", "Only show log messages for these entities") f.Var(cmd.NewAppendStringsValue(&c.params.ExcludeEntity), "x", "Do not show log messages for these entities") @@ -126,24 +148,45 @@ f.UintVar(&c.params.Backlog, "lines", defaultLineCount, "") f.UintVar(&c.params.Limit, "limit", 0, "Exit once this many of the most recent (possibly filtered) lines are shown") f.BoolVar(&c.params.Replay, "replay", false, "Show the entire (possibly filtered) log and continue to append") - f.BoolVar(&c.params.NoTail, "T", false, "Stop after returning existing log messages") - f.BoolVar(&c.params.NoTail, "no-tail", false, "") + + f.BoolVar(&c.notail, "no-tail", false, "Stop after returning existing log messages") + f.BoolVar(&c.tail, "tail", false, "Wait for new logs") + f.BoolVar(&c.color, "color", false, "Force use of ANSI color codes") + + f.BoolVar(&c.utc, "utc", false, "Show times in UTC") + f.BoolVar(&c.location, "location", false, "Show filename and line numbers") + f.BoolVar(&c.date, "date", false, "Show dates as well as times") + f.BoolVar(&c.ms, "ms", false, "Show times to millisecond precision") } func (c *debugLogCommand) Init(args []string) error { if c.level != "" { level, ok := loggo.ParseLevel(c.level) if !ok || level < loggo.TRACE || level > loggo.ERROR { - return fmt.Errorf("level value %q is not one of %q, %q, %q, %q, %q", + return errors.Errorf("level value %q is not one of %q, %q, %q, %q, %q", c.level, loggo.TRACE, loggo.DEBUG, loggo.INFO, loggo.WARNING, loggo.ERROR) } c.params.Level = level } + if c.tail && c.notail { + return errors.NotValidf("setting --tail and --no-tail") + } + if c.utc { + c.tz = time.UTC + } + if c.date { + c.format = "2006-01-02 15:04:05" + } else { + c.format = "15:04:05" + } + if c.ms { + c.format = c.format + ".000" + } return cmd.CheckEmpty(args) } type DebugLogAPI interface { - WatchDebugLog(params api.DebugLogParams) (io.ReadCloser, error) + WatchDebugLog(params api.DebugLogParams) (<-chan api.LogMessage, error) Close() error } @@ -151,18 +194,69 @@ return c.NewAPIClient() } +func isTerminal(out io.Writer) bool { + f, ok := out.(*os.File) + if !ok { + return false + } + return isatty.IsTerminal(f.Fd()) +} + // Run retrieves the debug log via the API. func (c *debugLogCommand) Run(ctx *cmd.Context) (err error) { + if c.tail { + c.params.NoTail = false + } else if c.notail { + c.params.NoTail = true + } else { + // Set the default tail option to true if the caller is + // using a terminal. + c.params.NoTail = !isTerminal(ctx.Stdout) + } + client, err := getDebugLogAPI(c) if err != nil { return err } defer client.Close() - debugLog, err := client.WatchDebugLog(c.params) + messages, err := client.WatchDebugLog(c.params) if err != nil { return err } - defer debugLog.Close() - _, err = io.Copy(ctx.Stdout, debugLog) - return err + writer := ansiterm.NewWriter(ctx.Stdout) + if c.color { + writer.SetColorCapable(true) + } + for { + msg, ok := <-messages + if !ok { + break + } + c.writeLogRecord(writer, msg) + } + + return nil +} + +var SeverityColor = map[string]*ansiterm.Context{ + "TRACE": ansiterm.Foreground(ansiterm.Default), + "DEBUG": ansiterm.Foreground(ansiterm.Green), + "INFO": ansiterm.Foreground(ansiterm.BrightBlue), + "WARNING": ansiterm.Foreground(ansiterm.Yellow), + "ERROR": ansiterm.Foreground(ansiterm.BrightRed), + "CRITICAL": &ansiterm.Context{ + Foreground: ansiterm.White, + Background: ansiterm.Red, + }, +} + +func (c *debugLogCommand) writeLogRecord(w *ansiterm.Writer, r api.LogMessage) { + ts := r.Timestamp.In(c.tz).Format(c.format) + fmt.Fprintf(w, "%s: %s ", r.Entity, ts) + SeverityColor[r.Severity].Fprintf(w, r.Severity) + fmt.Fprintf(w, " %s ", r.Module) + if c.location { + loggo.LocationColor.Fprintf(w, "%s ", r.Location) + } + fmt.Fprintln(w, r.Message) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/debuglog_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/debuglog_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/debuglog_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/debuglog_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,7 @@ package commands import ( - "io" - "io/ioutil" - "strings" + "time" "github.com/juju/loggo" jc "github.com/juju/testing/checkers" @@ -80,11 +78,8 @@ Replay: true, }, }, { - args: []string{"--no-tail"}, - expected: api.DebugLogParams{ - Backlog: 10, - NoTail: true, - }, + args: []string{"--no-tail", "--tail"}, + errMatch: `setting --tail and --no-tail not valid`, }, { args: []string{"--limit", "100"}, expected: api.DebugLogParams{ @@ -129,30 +124,66 @@ } func (s *DebugLogSuite) TestLogOutput(c *gc.C) { + // test timezone is 6 hours east of UTC + tz := time.FixedZone("test", 6*60*60) s.PatchValue(&getDebugLogAPI, func(_ *debugLogCommand) (DebugLogAPI, error) { - return &fakeDebugLogAPI{log: "this is the log output"}, nil + return &fakeDebugLogAPI{log: []api.LogMessage{ + { + Entity: "machine-0", + Timestamp: time.Date(2016, 10, 9, 8, 15, 23, 345000000, time.UTC), + Severity: "INFO", + Module: "test.module", + Location: "somefile.go:123", + Message: "this is the log output", + }, + }}, nil }) - ctx, err := testing.RunCommand(c, newDebugLogCommand()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(testing.Stdout(ctx), gc.Equals, "this is the log output") -} + checkOutput := func(args ...string) { + count := len(args) + args, expected := args[:count-1], args[count-1] + ctx, err := testing.RunCommand(c, newDebugLogCommandTZ(tz), args...) + c.Check(err, jc.ErrorIsNil) + c.Check(testing.Stdout(ctx), gc.Equals, expected) -func newFakeDebugLogAPI(log string) DebugLogAPI { - return &fakeDebugLogAPI{log: log} + } + checkOutput( + "machine-0: 14:15:23 INFO test.module this is the log output\n") + checkOutput( + "--ms", + "machine-0: 14:15:23.345 INFO test.module this is the log output\n") + checkOutput( + "--utc", + "machine-0: 08:15:23 INFO test.module this is the log output\n") + checkOutput( + "--date", + "machine-0: 2016-10-09 14:15:23 INFO test.module this is the log output\n") + checkOutput( + "--utc", "--date", + "machine-0: 2016-10-09 08:15:23 INFO test.module this is the log output\n") + checkOutput( + "--location", + "machine-0: 14:15:23 INFO test.module somefile.go:123 this is the log output\n") } type fakeDebugLogAPI struct { - log string + log []api.LogMessage params api.DebugLogParams err error } -func (fake *fakeDebugLogAPI) WatchDebugLog(params api.DebugLogParams) (io.ReadCloser, error) { +func (fake *fakeDebugLogAPI) WatchDebugLog(params api.DebugLogParams) (<-chan api.LogMessage, error) { if fake.err != nil { return nil, fake.err } fake.params = params - return ioutil.NopCloser(strings.NewReader(fake.log)), nil + response := make(chan api.LogMessage) + go func() { + defer close(response) + for _, msg := range fake.log { + response <- msg + } + }() + return response, nil } func (fake *fakeDebugLogAPI) Close() error { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/enableha.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/enableha.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/enableha.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/enableha.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,18 +4,19 @@ package commands import ( - "bytes" "fmt" + "io" "strings" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api/highavailability" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/block" + "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/constraints" "github.com/juju/juju/instance" @@ -32,12 +33,12 @@ // NewClient does not return an error, so we'll return nil return highavailability.NewClient(root), nil } - return modelcmd.Wrap(haCommand) + return modelcmd.WrapController(haCommand) } // enableHACommand makes the controller highly available. type enableHACommand struct { - modelcmd.ModelCommandBase + modelcmd.ControllerCommandBase out cmd.Output // newHAClientFunc returns HA Client to be used by the command. @@ -50,6 +51,9 @@ // in the environment when creating new machines. Constraints constraints.Value + // ConstraintsStr contains the stringified version of the constraints. + ConstraintsStr string + // Placement specifies specific machine(s) which will be used to host // new controllers. If there are more controllers required than // machines specified, new machines will be created. @@ -62,8 +66,9 @@ const enableHADoc = ` To ensure availability of deployed applications, the Juju infrastructure -must itself be highly available. enable-ha must be called -to ensure that the specified number of controllers are made available. +must itself be highly available. The enable-ha command will ensure +that the specified number of controller machines are used to make up the +controller. An odd number of controllers is required. @@ -88,14 +93,12 @@ ` // formatSimple marshals value to a yaml-formatted []byte, unless value is nil. -func formatSimple(value interface{}) ([]byte, error) { +func formatSimple(writer io.Writer, value interface{}) error { enableHAResult, ok := value.(availabilityInfo) if !ok { - return nil, fmt.Errorf("unexpected result type for enable-ha call: %T", value) + return errors.Errorf("unexpected result type for enable-ha call: %T", value) } - var buf bytes.Buffer - for _, machineList := range []struct { message string list []string @@ -128,13 +131,13 @@ if len(machineList.list) == 0 { continue } - _, err := fmt.Fprintf(&buf, machineList.message, strings.Join(machineList.list, ", ")) + _, err := fmt.Fprintf(writer, machineList.message, strings.Join(machineList.list, ", ")) if err != nil { - return nil, err + return err } } - return buf.Bytes(), nil + return nil } func (c *enableHACommand) Info() *cmd.Info { @@ -146,9 +149,10 @@ } func (c *enableHACommand) SetFlags(f *gnuflag.FlagSet) { + c.ControllerCommandBase.SetFlags(f) f.IntVar(&c.NumControllers, "n", 0, "Number of controllers to make available") f.StringVar(&c.PlacementSpec, "to", "", "The machine(s) to become controllers, bypasses constraints") - f.Var(constraints.ConstraintsValue{&c.Constraints}, "constraints", "Additional machine constraints") + f.StringVar(&c.ConstraintsStr, "constraints", "", "Additional machine constraints") c.out.AddFlags(f, "simple", map[string]cmd.Formatter{ "yaml": cmd.FormatYaml, "json": cmd.FormatJson, @@ -159,7 +163,7 @@ func (c *enableHACommand) Init(args []string) error { if c.NumControllers < 0 || (c.NumControllers%2 != 1 && c.NumControllers != 0) { - return fmt.Errorf("must specify a number of controllers odd and non-negative") + return errors.Errorf("must specify a number of controllers odd and non-negative") } if c.PlacementSpec != "" { placementSpecs := strings.Split(c.PlacementSpec, ",") @@ -175,7 +179,7 @@ continue } if err != instance.ErrPlacementScopeMissing { - return fmt.Errorf("unsupported enable-ha placement directive %q", spec) + return errors.Errorf("unsupported enable-ha placement directive %q", spec) } c.Placement[i] = spec } @@ -205,6 +209,11 @@ // Run connects to the environment specified on the command line // and calls EnableHA. func (c *enableHACommand) Run(ctx *cmd.Context) error { + var err error + c.Constraints, err = common.ParseConstraints(ctx, c.ConstraintsStr) + if err != nil { + return err + } haClient, err := c.newHAClientFunc() if err != nil { return err diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/enableha_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/enableha_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/enableha_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/enableha_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,6 @@ "bytes" "encoding/json" "fmt" - "strings" "github.com/juju/cmd" jc "github.com/juju/testing/checkers" @@ -99,13 +98,13 @@ func (s *EnableHASuite) runEnableHA(c *gc.C, args ...string) (*cmd.Context, error) { command := &enableHACommand{newHAClientFunc: func() (MakeHAClient, error) { return s.fake, nil }} - return coretesting.RunCommand(c, modelcmd.Wrap(command), args...) + return coretesting.RunCommand(c, modelcmd.WrapController(command), args...) } func (s *EnableHASuite) TestEnableHA(c *gc.C) { ctx, err := s.runEnableHA(c, "-n", "1") c.Assert(err, jc.ErrorIsNil) - c.Assert(coretesting.Stdout(ctx), gc.Equals, "") + c.Assert(coretesting.Stdout(ctx), gc.Equals, "\n") c.Assert(s.fake.numControllers, gc.Equals, 1) c.Assert(&s.fake.cons, jc.Satisfies, constraints.IsEmpty) @@ -115,11 +114,7 @@ func (s *EnableHASuite) TestBlockEnableHA(c *gc.C) { s.fake.err = common.OperationBlockedError("TestBlockEnableHA") _, err := s.runEnableHA(c, "-n", "1") - c.Assert(err, gc.ErrorMatches, cmd.ErrSilent.Error()) - - // msg is logged - stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Check(stripped, gc.Matches, ".*TestBlockEnableHA.*") + coretesting.AssertOperationWasBlocked(c, err, ".*TestBlockEnableHA.*") } func (s *EnableHASuite) TestEnableHAFormatYaml(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/helptool.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/helptool.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/helptool.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/helptool.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,8 +9,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/charm.v6-unstable" - "launchpad.net/gnuflag" "github.com/juju/juju/network" "github.com/juju/juju/storage" @@ -85,6 +85,10 @@ return nil } +func (dummyHookContext) Component(name string) (jujuc.ContextComponent, error) { + return nil, nil +} + func newHelpToolCommand() cmd.Command { return &helpToolCommand{} } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/import_sshkeys.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/import_sshkeys.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/import_sshkeys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/import_sshkeys.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,10 +4,10 @@ package commands import ( - "errors" "fmt" "github.com/juju/cmd" + "github.com/juju/errors" "github.com/juju/juju/cmd/juju/block" "github.com/juju/juju/cmd/modelcmd" @@ -39,7 +39,7 @@ Multiple identities may be specified in a space delimited list: - juju import-ssh-key rheinlein lp:iasmiov gh:hharrison +juju import-ssh-key gh:rheinlein lp:iasmiov gh:hharrison See also: add-ssh-key @@ -64,17 +64,25 @@ Args: ": ...", Purpose: usageImportSSHKeySummary, Doc: usageImportSSHKeyDetails, - Aliases: []string{"import-ssh-keys"}, } } // Init implements Command.Init. func (c *importKeysCommand) Init(args []string) error { - switch len(args) { - case 0: + if len(args) == 0 { return errors.New("no ssh key id specified") - default: - c.sshKeyIds = args + } + c.sshKeyIds = args + for _, k := range c.sshKeyIds { + if len(k) < 3 { + return errors.NotValidf("%q key ID", k) + } + switch k[:3] { + case "lp:", "gh:": + default: + return errors.NewNotSupported(nil, + fmt.Sprintf("prefix in Key ID %q not supported, only lp: and gh: are allowed", k)) + } } return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/list_sshkeys.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/list_sshkeys.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/list_sshkeys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/list_sshkeys.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,8 +8,8 @@ "strings" "github.com/juju/cmd" + "github.com/juju/gnuflag" "github.com/juju/utils/ssh" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/modelcmd" ) @@ -51,12 +51,13 @@ Name: "ssh-keys", Purpose: usageListSSHKeysSummary, Doc: usageListSSHKeysDetails, - Aliases: []string{"list-ssh-keys", "ssh-key", "list-ssh-key"}, + Aliases: []string{"list-ssh-keys"}, } } // SetFlags implements Command.SetFlags. func (c *listKeysCommand) SetFlags(f *gnuflag.FlagSet) { + c.SSHKeysBase.SetFlags(f) f.BoolVar(&c.showFullKey, "full", false, "Show full key instead of just the fingerprint") } @@ -83,6 +84,10 @@ if result.Error != nil { return result.Error } + if len(result.Result) == 0 { + context.Infof("No keys to display.") + return nil + } fmt.Fprintf(context.Stdout, "Keys used in model: %s\n", c.ConnectionName()) fmt.Fprintln(context.Stdout, strings.Join(result.Result, "\n")) return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/main.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/main.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/main.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/main.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,6 @@ "github.com/juju/cmd" "github.com/juju/loggo" - rcmd "github.com/juju/romulus/cmd/commands" "github.com/juju/utils/featureflag" utilsos "github.com/juju/utils/os" "github.com/juju/utils/series" @@ -30,6 +29,7 @@ "github.com/juju/juju/cmd/juju/machine" "github.com/juju/juju/cmd/juju/metricsdebug" "github.com/juju/juju/cmd/juju/model" + rcmd "github.com/juju/juju/cmd/juju/romulus/commands" "github.com/juju/juju/cmd/juju/setmeterstatus" "github.com/juju/juju/cmd/juju/space" "github.com/juju/juju/cmd/juju/status" @@ -58,10 +58,10 @@ var jujuDoc = ` juju provides easy, intelligent application orchestration on top of cloud -infrastructure providers such as Amazon EC2, HP Cloud, MaaS, OpenStack, Windows -Azure, or your local machine. +infrastructure providers such as Amazon EC2, MaaS, OpenStack, Windows, Azure, +or your local machine. -https://juju.ubuntu.com/ +https://jujucharms.com/ ` const juju1xCmdName = "juju-1" @@ -130,13 +130,21 @@ // note that this has to come before we init the juju home directory, // since it relies on detecting the lack of said directory. - m.maybeWarnJuju1x() + newInstall := m.maybeWarnJuju1x() if err = juju.InitJujuXDGDataHome(); err != nil { fmt.Fprintf(os.Stderr, "error: %s\n", err) return 2 } + if newInstall { + fmt.Fprintf(ctx.Stderr, "Since Juju %v is being run for the first time, downloading latest cloud information.\n", jujuversion.Current.Major) + updateCmd := cloud.NewUpdateCloudsCommand() + if err := updateCmd.Run(ctx); err != nil { + fmt.Fprintf(ctx.Stderr, "error: %v\n", err) + } + } + for i := range x { x[i] ^= 255 } @@ -149,19 +157,22 @@ return cmd.Main(jcmd, ctx, args[1:]) } -func (m main) maybeWarnJuju1x() { +func (m main) maybeWarnJuju1x() (newInstall bool) { + newInstall = !juju2xConfigDataExists() if !shouldWarnJuju1x() { - return + return newInstall } ver, exists := m.juju1xVersion() if !exists { - return + return newInstall } fmt.Fprintf(os.Stderr, ` Welcome to Juju %s. If you meant to use Juju %s you can continue using it with the command %s e.g. '%s switch'. See https://jujucharms.com/docs/stable/introducing-2 for more details. + `[1:], jujuversion.Current, ver, juju1xCmdName, juju1xCmdName) + return newInstall } func (m main) juju1xVersion() (ver string, exists bool) { @@ -277,6 +288,7 @@ r.Register(user.NewLoginCommand()) r.Register(user.NewLogoutCommand()) r.Register(user.NewRemoveCommand()) + r.Register(user.NewWhoAmICommand()) // Manage cached images r.Register(cachedimages.NewRemoveCommand()) @@ -289,15 +301,10 @@ r.Register(machine.NewShowMachineCommand()) // Manage model - r.Register(model.NewGetCommand()) - r.Register(model.NewModelDefaultsCommand()) - r.Register(model.NewSetModelDefaultsCommand()) - r.Register(model.NewUnsetModelDefaultsCommand()) - r.Register(model.NewSetCommand()) - r.Register(model.NewUnsetCommand()) + r.Register(model.NewConfigCommand()) + r.Register(model.NewDefaultsCommand()) r.Register(model.NewRetryProvisioningCommand()) r.Register(model.NewDestroyCommand()) - r.Register(model.NewUsersCommand()) r.Register(model.NewGrantCommand()) r.Register(model.NewRevokeCommand()) r.Register(model.NewShowCommand()) @@ -307,6 +314,7 @@ } if featureflag.Enabled(feature.DeveloperMode) { r.Register(model.NewDumpCommand()) + r.Register(model.NewDumpDBCommand()) } // Manage and control actions @@ -320,17 +328,17 @@ // Manage and control services r.Register(application.NewAddUnitCommand()) - r.Register(application.NewGetCommand()) - r.Register(application.NewSetCommand()) - r.Register(application.NewDeployCommand()) + r.Register(application.NewConfigCommand()) + r.Register(application.NewDefaultDeployCommand()) r.Register(application.NewExposeCommand()) r.Register(application.NewUnexposeCommand()) r.Register(application.NewServiceGetConstraintsCommand()) r.Register(application.NewServiceSetConstraintsCommand()) // Operation protection commands - r.Register(block.NewSuperBlockCommand()) - r.Register(block.NewUnblockCommand()) + r.Register(block.NewDisableCommand()) + r.Register(block.NewListCommand()) + r.Register(block.NewEnableCommand()) // Manage storage r.Register(storage.NewAddCommand()) @@ -362,10 +370,9 @@ r.Register(controller.NewListModelsCommand()) r.Register(controller.NewKillCommand()) r.Register(controller.NewListControllersCommand()) - r.Register(controller.NewListBlocksCommand()) r.Register(controller.NewRegisterCommand()) r.Register(controller.NewUnregisterCommand(jujuclient.NewFileClientStore())) - r.Register(controller.NewRemoveBlocksCommand()) + r.Register(controller.NewEnableDestroyControllerCommand()) r.Register(controller.NewShowControllerCommand()) r.Register(controller.NewGetConfigCommand()) @@ -377,6 +384,7 @@ // Manage clouds and credentials r.Register(cloud.NewUpdateCloudsCommand()) r.Register(cloud.NewListCloudsCommand()) + r.Register(cloud.NewListRegionsCommand()) r.Register(cloud.NewShowCloudCommand()) r.Register(cloud.NewAddCloudCommand()) r.Register(cloud.NewRemoveCloudCommand()) @@ -386,6 +394,7 @@ r.Register(cloud.NewSetDefaultCredentialCommand()) r.Register(cloud.NewAddCredentialCommand()) r.Register(cloud.NewRemoveCredentialCommand()) + r.Register(cloud.NewUpdateCredentialCommand()) // Juju GUI commands. r.Register(gui.NewGUICommand()) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/main_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/main_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/main_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/main_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -23,7 +23,7 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/cmd/juju/application" - "github.com/juju/juju/cmd/juju/block" + "github.com/juju/juju/cmd/juju/cloud" "github.com/juju/juju/cmd/modelcmd" cmdtesting "github.com/juju/juju/cmd/testing" "github.com/juju/juju/feature" @@ -41,20 +41,16 @@ var _ = gc.Suite(&MainSuite{}) func deployHelpText() string { - return cmdtesting.HelpText(application.NewDeployCommand(), "juju deploy") + return cmdtesting.HelpText(application.NewDefaultDeployCommand(), "juju deploy") } -func setconfigHelpText() string { - return cmdtesting.HelpText(application.NewSetCommand(), "juju set-config") +func configHelpText() string { + return cmdtesting.HelpText(application.NewConfigCommand(), "juju config") } func syncToolsHelpText() string { return cmdtesting.HelpText(newSyncToolsCommand(), "juju sync-tools") } -func blockHelpText() string { - return cmdtesting.HelpText(block.NewSuperBlockCommand(), "juju block") -} - func (s *MainSuite) TestRunMain(c *gc.C) { // The test array structure needs to be inline here as some of the // expected values below use deployHelpText(). This constructs the deploy @@ -88,15 +84,15 @@ code: 0, out: deployHelpText(), }, { - summary: "juju --help set-config shows the same help as 'help set-config'", - args: []string{"--help", "set-config"}, + summary: "juju --help config shows the same help as 'help config'", + args: []string{"--help", "config"}, code: 0, - out: setconfigHelpText(), + out: configHelpText(), }, { - summary: "juju set-config --help shows the same help as 'help set-config'", - args: []string{"set-config", "--help"}, + summary: "juju config --help shows the same help as 'help config'", + args: []string{"config", "--help"}, code: 0, - out: setconfigHelpText(), + out: configHelpText(), }, { summary: "unknown command", args: []string{"discombobulate"}, @@ -131,18 +127,7 @@ Arch: arch.HostArch(), Series: series.HostSeries(), }.String() + "\n", - }, { - summary: "check block command registered properly", - args: []string{"block", "-h"}, - code: 0, - out: blockHelpText(), - }, { - summary: "check unblock command registered properly", - args: []string{"unblock"}, - code: 0, - out: "error: must specify one of [destroy-model | remove-object | all-changes] to unblock\n", - }, - } { + }} { c.Logf("test %d: %s", i, t.summary) out := badrun(c, t.code, t.args...) c.Assert(out, gc.Equals, t.out) @@ -190,9 +175,13 @@ Stdout: "1.25.0-trusty-amd64", Args: argChan, }) + stub := &gitjujutesting.Stub{} + s.PatchValue(&cloud.NewUpdateCloudsCommand, func() cmd.Command { + return &stubCommand{stub: stub} + }) // remove the new juju-home and create a fake old juju home. - err := os.Remove(osenv.JujuXDGDataHome()) + err := os.RemoveAll(osenv.JujuXDGDataHomeDir()) c.Assert(err, jc.ErrorIsNil) makeValidOldHome(c) @@ -217,7 +206,8 @@ Welcome to Juju %s. If you meant to use Juju 1.25.0 you can continue using it with the command juju-1 e.g. 'juju-1 switch'. See https://jujucharms.com/docs/stable/introducing-2 for more details. -`[1:], jujuversion.Current)) + +Since Juju 2 is being run for the first time, downloading latest cloud information.`[1:]+"\n", jujuversion.Current)) checkVersionOutput(c, string(stdout)) } @@ -233,9 +223,13 @@ Stdout: "1.25.0-trusty-amd64", Args: argChan, }) + stub := &gitjujutesting.Stub{} + s.PatchValue(&cloud.NewUpdateCloudsCommand, func() cmd.Command { + return &stubCommand{stub: stub} + }) // remove the new juju-home and create a fake old juju home. - err := os.Remove(osenv.JujuXDGDataHome()) + err := os.RemoveAll(osenv.JujuXDGDataHomeDir()) c.Assert(err, jc.ErrorIsNil) makeValidOldHome(c) @@ -251,7 +245,8 @@ assertNoArgs(c, argChan) - c.Check(string(stderr), gc.Equals, "") + c.Check(string(stderr), gc.Equals, ` +Since Juju 2 is being run for the first time, downloading latest cloud information.`[1:]+"\n") checkVersionOutput(c, string(stdout)) } @@ -300,9 +295,13 @@ Stdout: "1.25.0-trusty-amd64", Args: argChan, }) + stub := &gitjujutesting.Stub{} + s.PatchValue(&cloud.NewUpdateCloudsCommand, func() cmd.Command { + return &stubCommand{stub: stub} + }) // remove the new juju-home. - err := os.Remove(osenv.JujuXDGDataHome()) + err := os.RemoveAll(osenv.JujuXDGDataHomeDir()) c.Assert(err, jc.ErrorIsNil) // create fake (empty) old juju home. @@ -319,10 +318,44 @@ c.Assert(code, gc.Equals, 0) assertNoArgs(c, argChan) - c.Assert(string(stderr), gc.Equals, "") + c.Check(string(stderr), gc.Equals, ` +Since Juju 2 is being run for the first time, downloading latest cloud information.`[1:]+"\n") checkVersionOutput(c, string(stdout)) } +func (s *MainSuite) assertRunCommandUpdateCloud(c *gc.C, expectedCall string) { + argChan := make(chan []string, 1) + execCommand := s.GetExecCommand(gitjujutesting.PatchExecConfig{ + Stdout: "1.25.0-trusty-amd64", + Args: argChan, + }) + + stub := &gitjujutesting.Stub{} + s.PatchValue(&cloud.NewUpdateCloudsCommand, func() cmd.Command { + return &stubCommand{stub: stub} + + }) + var code int + gitjujutesting.CaptureOutput(c, func() { + code = main{ + execCommand: execCommand, + }.Run([]string{"juju", "version"}) + }) + c.Assert(code, gc.Equals, 0) + c.Assert(stub.Calls()[0].FuncName, gc.Equals, expectedCall) +} + +func (s *MainSuite) TestFirstRunUpdateCloud(c *gc.C) { + // remove the juju-home. + err := os.RemoveAll(osenv.JujuXDGDataHomeDir()) + c.Assert(err, jc.ErrorIsNil) + s.assertRunCommandUpdateCloud(c, "Run") +} + +func (s *MainSuite) TestRunNoUpdateCloud(c *gc.C) { + s.assertRunCommandUpdateCloud(c, "Info") +} + func makeValidOldHome(c *gc.C) { oldhome := osenv.OldJujuHomeDir() err := os.MkdirAll(oldhome, 0700) @@ -356,80 +389,69 @@ "add-cloud", "add-credential", "add-machine", - "add-machines", "add-model", "add-relation", "add-space", "add-ssh-key", - "add-ssh-keys", "add-storage", "add-subnet", "add-unit", - "add-units", "add-user", "agree", "agreements", "allocate", "autoload-credentials", "backups", - "block", - "blocks", "bootstrap", "budgets", "cached-images", "change-user-password", "charm", "clouds", + "config", "collect-metrics", "controllers", "create-backup", "create-budget", "create-storage-pool", "credentials", + "controller-config", "debug-hooks", "debug-log", - "debug-metrics", "remove-user", "deploy", "destroy-controller", "destroy-model", - "destroy-relation", - "destroy-application", - "destroy-unit", + "disable-command", "disable-user", + "disabled-commands", "download-backup", "enable-ha", + "enable-command", + "enable-destroy-controller", "enable-user", "expose", - "get-config", - "get-configs", "get-constraints", - "get-controller-config", - "get-model-config", "get-model-constraints", "grant", "gui", "help", "help-tool", "import-ssh-key", - "import-ssh-keys", "kill-controller", "list-actions", "list-agreements", - "list-all-blocks", "list-backups", - "list-blocks", "list-budgets", "list-cached-images", "list-clouds", "list-controllers", "list-credentials", - "list-machine", + "list-disabled-commands", "list-machines", "list-models", "list-plans", - "list-shares", - "list-ssh-key", + "list-regions", "list-ssh-keys", "list-spaces", "list-storage", @@ -438,26 +460,24 @@ "list-users", "login", "logout", - "machine", "machines", + "metrics", "model-config", "model-defaults", "models", "plans", + "regions", "register", "relate", //alias for add-relation - "remove-all-blocks", - "remove-application", // alias for destroy-application + "remove-application", "remove-backup", "remove-cached-images", "remove-cloud", "remove-credential", "remove-machine", - "remove-machines", - "remove-relation", // alias for destroy-relation + "remove-relation", "remove-ssh-key", - "remove-ssh-keys", - "remove-unit", // alias for destroy-unit + "remove-unit", "resolved", "restore-backup", "retry-provisioning", @@ -466,54 +486,45 @@ "run-action", "scp", "set-budget", - "set-config", - "set-configs", "set-constraints", "set-default-credential", "set-default-region", "set-meter-status", - "set-model-config", "set-model-constraints", - "set-model-default", "set-plan", - "ssh-key", - "ssh-keys", - "shares", "show-action-output", "show-action-status", "show-backup", "show-budget", "show-cloud", "show-controller", - "show-controllers", "show-machine", - "show-machines", "show-model", "show-status", + "show-status-log", "show-storage", "show-user", "spaces", "ssh", + "ssh-keys", "status", - "status-history", "storage", "storage-pools", "subnets", "switch", "sync-tools", - "unblock", "unexpose", "update-allocation", "upload-backup", "unregister", - "unset-model-config", - "unset-model-default", "update-clouds", + "update-credential", "upgrade-charm", "upgrade-gui", "upgrade-juju", "users", "version", + "whoami", } // devFeatures are feature flags that impact registration of commands. @@ -525,8 +536,6 @@ ) func (s *MainSuite) TestHelpCommands(c *gc.C) { - defer osenv.SetJujuXDGDataHome(osenv.SetJujuXDGDataHome(c.MkDir())) - // Check that we have correctly registered all the commands // by checking the help output. // First check default commands, and then check commands that are @@ -590,7 +599,6 @@ func (s *MainSuite) TestHelpGlobalOptions(c *gc.C) { // Check that we have correctly registered all the topics // by checking the help output. - defer osenv.SetJujuXDGDataHome(osenv.SetJujuXDGDataHome(c.MkDir())) out := badrun(c, 0, "help", "global-options") c.Assert(out, gc.Matches, `Global Options diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/migrate.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/migrate.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/migrate.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/migrate.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,26 +6,32 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" + "github.com/juju/juju/api" "github.com/juju/juju/api/controller" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/jujuclient" ) func newMigrateCommand() cmd.Command { - return modelcmd.WrapController(&migrateCommand{}) + var cmd migrateCommand + cmd.newAPIRoot = cmd.JujuCommandBase.NewAPIRoot + return modelcmd.WrapController(&cmd) } // migrateCommand initiates a model migration. type migrateCommand struct { modelcmd.ControllerCommandBase - api migrateAPI - + newAPIRoot func(jujuclient.ClientStore, string, string) (api.Connection, error) + api migrateAPI model string targetController string } type migrateAPI interface { - InitiateModelMigration(spec controller.ModelMigrationSpec) (string, error) + InitiateMigration(spec controller.MigrationSpec) (string, error) } const migrateDoc = ` @@ -84,7 +90,7 @@ return nil } -func (c *migrateCommand) getMigrationSpec() (*controller.ModelMigrationSpec, error) { +func (c *migrateCommand) getMigrationSpec() (*controller.MigrationSpec, error) { store := c.ClientStore() modelUUIDs, err := c.ModelUUIDs([]string{c.model}) @@ -103,13 +109,23 @@ return nil, err } - return &controller.ModelMigrationSpec{ + var macs []macaroon.Slice + if accountInfo.Password == "" { + var err error + macs, err = c.getTargetControllerMacaroons() + if err != nil { + return nil, errors.Trace(err) + } + } + + return &controller.MigrationSpec{ ModelUUID: modelUUID, TargetControllerUUID: controllerInfo.ControllerUUID, TargetAddrs: controllerInfo.APIEndpoints, TargetCACert: controllerInfo.CACert, TargetUser: accountInfo.User, TargetPassword: accountInfo.Password, + TargetMacaroons: macs, }, nil } @@ -123,7 +139,7 @@ if err != nil { return err } - id, err := api.InitiateModelMigration(*spec) + id, err := api.InitiateMigration(*spec) if err != nil { return err } @@ -137,3 +153,22 @@ } return c.NewControllerAPIClient() } + +func (c *migrateCommand) getTargetControllerMacaroons() ([]macaroon.Slice, error) { + apiContext, err := c.APIContext() + if err != nil { + return nil, errors.Trace(err) + } + + // Connect to the target controller, ensuring up-to-date macaroons, + // and return the macaroons in the cookie jar for the controller. + // + // TODO(axw,mjs) add a controller API that returns a macaroon that + // may be used for the sole purpose of migration. + api, err := c.newAPIRoot(c.ClientStore(), c.targetController, "") + if err != nil { + return nil, errors.Annotate(err, "connecting to target controller") + } + defer api.Close() + return httpbakery.MacaroonsForURL(apiContext.Jar, api.CookieURL()), nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/migrate_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/migrate_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/migrate_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/migrate_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,10 +4,18 @@ package commands import ( + "net/http" + "net/url" + "time" + "github.com/juju/cmd" + cookiejar "github.com/juju/persistent-cookiejar" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" + "github.com/juju/juju/api" "github.com/juju/juju/api/base" "github.com/juju/juju/api/controller" "github.com/juju/juju/cmd/modelcmd" @@ -19,8 +27,10 @@ type MigrateSuite struct { testing.FakeJujuXDGDataHomeSuite - api *fakeMigrateAPI - store *jujuclienttesting.MemStore + api *fakeMigrateAPI + targetControllerAPI *fakeTargetControllerAPI + store *jujuclienttesting.MemStore + password string } var _ = gc.Suite(&MigrateSuite{}) @@ -35,7 +45,7 @@ s.store = jujuclienttesting.NewMemStore() // Define the source controller in the config and set it as the default. - err := s.store.UpdateController("source", jujuclient.ControllerDetails{ + err := s.store.AddController("source", jujuclient.ControllerDetails{ ControllerUUID: "eeeeeeee-0bad-400d-8000-4b1d0d06f00d", CACert: "somecert", }) @@ -45,25 +55,25 @@ // Define an account for the model in the source controller in the config. err = s.store.UpdateAccount("source", jujuclient.AccountDetails{ - User: "source@local", + User: "source", }) c.Assert(err, jc.ErrorIsNil) // Define the model to migrate in the config. - err = s.store.UpdateModel("source", "source@local/model", jujuclient.ModelDetails{ + err = s.store.UpdateModel("source", "source/model", jujuclient.ModelDetails{ ModelUUID: modelUUID, }) c.Assert(err, jc.ErrorIsNil) // Define the account for the target controller. err = s.store.UpdateAccount("target", jujuclient.AccountDetails{ - User: "target@local", + User: "target", Password: "secret", }) c.Assert(err, jc.ErrorIsNil) // Define the target controller in the config. - err = s.store.UpdateController("target", jujuclient.ControllerDetails{ + err = s.store.AddController("target", jujuclient.ControllerDetails{ ControllerUUID: targetControllerUUID, APIEndpoints: []string{"1.2.3.4:5"}, CACert: "cert", @@ -71,6 +81,41 @@ c.Assert(err, jc.ErrorIsNil) s.api = &fakeMigrateAPI{} + + mac0, err := macaroon.New([]byte("secret0"), "id0", "location0") + c.Assert(err, jc.ErrorIsNil) + mac1, err := macaroon.New([]byte("secret1"), "id1", "location1") + c.Assert(err, jc.ErrorIsNil) + + jar, err := cookiejar.New(&cookiejar.Options{ + Filename: cookiejar.DefaultCookieFile(), + }) + c.Assert(err, jc.ErrorIsNil) + + s.targetControllerAPI = &fakeTargetControllerAPI{ + cookieURL: &url.URL{ + Scheme: "https", + Host: "testing.invalid", + Path: "/", + }, + macaroons: []macaroon.Slice{{mac0}}, + } + addCookie(c, jar, mac0, s.targetControllerAPI.cookieURL) + addCookie(c, jar, mac1, &url.URL{ + Scheme: "https", + Host: "tasting.invalid", + Path: "/", + }) + + err = jar.Save() + c.Assert(err, jc.ErrorIsNil) +} + +func addCookie(c *gc.C, jar *cookiejar.Jar, mac *macaroon.Macaroon, url *url.URL) { + cookie, err := httpbakery.NewCookie(macaroon.Slice{mac}) + c.Assert(err, jc.ErrorIsNil) + cookie.Expires = time.Now().Add(time.Hour) // only persistent cookies are stored + jar.SetCookies(url, []*http.Cookie{cookie}) } func (s *MigrateSuite) TestMissingModel(c *gc.C) { @@ -93,19 +138,40 @@ c.Assert(err, jc.ErrorIsNil) c.Check(testing.Stderr(ctx), gc.Matches, "Migration started with ID \"uuid:0\"\n") - c.Check(s.api.specSeen, jc.DeepEquals, &controller.ModelMigrationSpec{ + c.Check(s.api.specSeen, jc.DeepEquals, &controller.MigrationSpec{ ModelUUID: modelUUID, TargetControllerUUID: targetControllerUUID, TargetAddrs: []string{"1.2.3.4:5"}, TargetCACert: "cert", - TargetUser: "target@local", + TargetUser: "target", TargetPassword: "secret", }) } +func (s *MigrateSuite) TestSuccessMacaroons(c *gc.C) { + err := s.store.UpdateAccount("target", jujuclient.AccountDetails{ + User: "target", + Password: "", + }) + c.Assert(err, jc.ErrorIsNil) + + ctx, err := s.makeAndRun(c, "model", "target") + c.Assert(err, jc.ErrorIsNil) + + c.Check(testing.Stderr(ctx), gc.Matches, "Migration started with ID \"uuid:0\"\n") + c.Check(s.api.specSeen, jc.DeepEquals, &controller.MigrationSpec{ + ModelUUID: modelUUID, + TargetControllerUUID: targetControllerUUID, + TargetAddrs: []string{"1.2.3.4:5"}, + TargetCACert: "cert", + TargetUser: "target", + TargetMacaroons: s.targetControllerAPI.macaroons, + }) +} + func (s *MigrateSuite) TestModelDoesntExist(c *gc.C) { cmd := s.makeCommand() - cmd.SetModelApi(&fakeModelAPI{}) + cmd.SetModelAPI(&fakeModelAPI{}) _, err := s.run(c, cmd, "wat", "target") c.Check(err, gc.ErrorMatches, "model .+ not found") c.Check(s.api.specSeen, gc.IsNil) // API shouldn't have been called @@ -113,7 +179,7 @@ func (s *MigrateSuite) TestModelDoesntExistBeforeRefresh(c *gc.C) { cmd := s.makeCommand() - cmd.SetModelApi(&fakeModelAPI{model: "wat"}) // Model is available after refresh + cmd.SetModelAPI(&fakeModelAPI{model: "wat"}) // Model is available after refresh _, err := s.run(c, cmd, "wat", "target") c.Check(err, jc.ErrorIsNil) c.Check(s.api.specSeen, gc.NotNil) @@ -132,6 +198,9 @@ func (s *MigrateSuite) makeCommand() *migrateCommand { cmd := &migrateCommand{ api: s.api, + newAPIRoot: func(jujuclient.ClientStore, string, string) (api.Connection, error) { + return s.targetControllerAPI, nil + }, } cmd.SetClientStore(s.store) return cmd @@ -142,10 +211,10 @@ } type fakeMigrateAPI struct { - specSeen *controller.ModelMigrationSpec + specSeen *controller.MigrationSpec } -func (a *fakeMigrateAPI) InitiateModelMigration(spec controller.ModelMigrationSpec) (string, error) { +func (a *fakeMigrateAPI) InitiateMigration(spec controller.MigrationSpec) (string, error) { a.specSeen = &spec return "uuid:0", nil } @@ -161,10 +230,24 @@ return []base.UserModel{{ Name: m.model, UUID: modelUUID, - Owner: "source@local", + Owner: "source", }}, nil } func (m *fakeModelAPI) Close() error { return nil } + +type fakeTargetControllerAPI struct { + api.Connection + cookieURL *url.URL + macaroons []macaroon.Slice +} + +func (a *fakeTargetControllerAPI) CookieURL() *url.URL { + return a.cookieURL +} + +func (a *fakeTargetControllerAPI) Close() error { + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/plugin.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/plugin.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/plugin.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/plugin.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,7 +16,7 @@ "syscall" "github.com/juju/cmd" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/juju/osenv" @@ -94,7 +94,6 @@ func (c *PluginCommand) Run(ctx *cmd.Context) error { command := exec.Command(c.name, c.args...) command.Env = append(os.Environ(), []string{ - osenv.JujuXDGDataHomeEnvKey + "=" + osenv.JujuXDGDataHome(), osenv.JujuModelEnvKey + "=" + c.ConnectionName()}..., ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/plugin_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/plugin_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/plugin_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/plugin_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -165,22 +165,22 @@ // Plugins are run as model commands, and so require a current // account and model. store := jujuclient.NewFileClientStore() - err := store.UpdateController("myctrl", jujuclient.ControllerDetails{ - ControllerUUID: testing.ModelTag.Id(), + err := store.AddController("myctrl", jujuclient.ControllerDetails{ + ControllerUUID: testing.ControllerTag.Id(), CACert: "fake", }) c.Assert(err, jc.ErrorIsNil) err = store.SetCurrentController("myctrl") c.Assert(err, jc.ErrorIsNil) err = store.UpdateAccount("myctrl", jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", Password: "hunter2", }) c.Assert(err, jc.ErrorIsNil) suite.makeFullPlugin(PluginParams{Name: "foo"}) output := badrun(c, 0, "foo", "-m", "mymodel", "-p", "pluginarg") - expectedDebug := "foo -m mymodel -p pluginarg\nmodel is: mymodel\n.*home is: .*\\.local/share/juju\n" + expectedDebug := "foo -m mymodel -p pluginarg\nmodel is: mymodel\n" c.Assert(output, gc.Matches, expectedDebug) } @@ -235,7 +235,6 @@ echo {{.Name}} $* echo "model is: " $JUJU_MODEL -echo "home is: " $JUJU_DATA exit {{.ExitStatus}} ` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/remove_sshkeys.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/remove_sshkeys.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/remove_sshkeys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/remove_sshkeys.go 2016-10-13 14:31:49.000000000 +0000 @@ -52,7 +52,6 @@ Args: " ...", Purpose: usageRemoveSSHKeySummary, Doc: usageRemoveSSHKeyDetails, - Aliases: []string{"remove-ssh-keys"}, } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/resolved.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/resolved.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/resolved.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/resolved.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,11 +4,10 @@ package commands import ( - "fmt" - "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/juju/block" "github.com/juju/juju/cmd/modelcmd" @@ -22,31 +21,31 @@ type resolvedCommand struct { modelcmd.ModelCommandBase UnitName string - Retry bool + NoRetry bool } func (c *resolvedCommand) Info() *cmd.Info { return &cmd.Info{ Name: "resolved", Args: "", - Purpose: "Marks unit errors resolved.", + Purpose: "Marks unit errors resolved and re-executes failed hooks", } } func (c *resolvedCommand) SetFlags(f *gnuflag.FlagSet) { - f.BoolVar(&c.Retry, "r", false, "Re-execute failed hooks") - f.BoolVar(&c.Retry, "retry", false, "") + c.ModelCommandBase.SetFlags(f) + f.BoolVar(&c.NoRetry, "no-retry", false, "Do not re-execute failed hooks on the unit") } func (c *resolvedCommand) Init(args []string) error { if len(args) > 0 { c.UnitName = args[0] if !names.IsValidUnit(c.UnitName) { - return fmt.Errorf("invalid unit name %q", c.UnitName) + return errors.Errorf("invalid unit name %q", c.UnitName) } args = args[1:] } else { - return fmt.Errorf("no unit specified") + return errors.Errorf("no unit specified") } return cmd.CheckEmpty(args) } @@ -57,5 +56,5 @@ return err } defer client.Close() - return block.ProcessBlockedError(client.Resolved(c.UnitName, c.Retry), block.BlockChange) + return block.ProcessBlockedError(client.Resolved(c.UnitName, c.NoRetry), block.BlockChange) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/resolved_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/resolved_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/resolved_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/resolved_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,6 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/cmd/juju/application" - "github.com/juju/juju/cmd/juju/common" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/status" @@ -20,12 +19,12 @@ type ResolvedSuite struct { jujutesting.RepoSuite - common.CmdBlockHelper + testing.CmdBlockHelper } func (s *ResolvedSuite) SetUpTest(c *gc.C) { s.RepoSuite.SetUpTest(c) - s.CmdBlockHelper = common.NewCmdBlockHelper(s.APIState) + s.CmdBlockHelper = testing.NewCmdBlockHelper(s.APIState) c.Assert(s.CmdBlockHelper, gc.NotNil) s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) } @@ -38,7 +37,7 @@ } func runDeploy(c *gc.C, args ...string) error { - _, err := testing.RunCommand(c, application.NewDeployCommand(), args...) + _, err := testing.RunCommand(c, application.NewDefaultDeployCommand(), args...) return err } @@ -62,21 +61,21 @@ unit: "dummy/0", mode: state.ResolvedNone, }, { - args: []string{"dummy/1", "--retry"}, + args: []string{"dummy/1", "--no-retry"}, err: `unit "dummy/1" is not in an error state`, unit: "dummy/1", mode: state.ResolvedNone, }, { - args: []string{"dummy/2"}, + args: []string{"dummy/2", "--no-retry"}, unit: "dummy/2", mode: state.ResolvedNoHooks, }, { - args: []string{"dummy/2", "--retry"}, + args: []string{"dummy/2", "--no-retry"}, err: `cannot set resolved mode for unit "dummy/2": already resolved`, unit: "dummy/2", mode: state.ResolvedNoHooks, }, { - args: []string{"dummy/3", "--retry"}, + args: []string{"dummy/3"}, unit: "dummy/3", mode: state.ResolvedRetryHooks, }, { @@ -101,7 +100,7 @@ u, err := s.State.Unit(name) c.Assert(err, jc.ErrorIsNil) sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "lol borken", Since: &now, } @@ -136,7 +135,7 @@ u, err := s.State.Unit(name) c.Assert(err, jc.ErrorIsNil) sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "lol borken", Since: &now, } @@ -147,5 +146,5 @@ // Block operation s.BlockAllChanges(c, "TestBlockResolved") err = runResolved(c, []string{"dummy/2"}) - s.AssertBlocked(c, err, ".*TestBlockResolved.*") + testing.AssertOperationWasBlocked(c, err, ".*TestBlockResolved.*") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/run.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/run.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/run.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/run.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,8 +12,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" actionapi "github.com/juju/juju/api/action" "github.com/juju/juju/apiserver/params" @@ -39,7 +39,8 @@ } const runDoc = ` -Run the commands on the specified targets. +Run the commands on the specified targets. Only admin users of a model +are able to use this command. Targets are specified using either machine ids, application names or unit names. At least one target specifier is needed. @@ -78,7 +79,13 @@ } func (c *runCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "smart", cmd.DefaultFormatters) + c.ModelCommandBase.SetFlags(f) + c.out.AddFlags(f, "default", map[string]cmd.Formatter{ + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, + // default is used to format a single result specially. + "default": cmd.FormatYaml, + }) f.BoolVar(&c.all, "all", false, "Run the commands on all the machines") f.DurationVar(&c.timeout, "timeout", 5*time.Minute, "How long to wait before the remote command is considered to have failed") f.Var(cmd.NewStringsValue(nil, &c.machines), "machine", "One or more machine ids") @@ -88,23 +95,23 @@ func (c *runCommand) Init(args []string) error { if len(args) == 0 { - return fmt.Errorf("no commands specified") + return errors.Errorf("no commands specified") } c.commands, args = args[0], args[1:] if c.all { if len(c.machines) != 0 { - return fmt.Errorf("You cannot specify --all and individual machines") + return errors.Errorf("You cannot specify --all and individual machines") } if len(c.services) != 0 { - return fmt.Errorf("You cannot specify --all and individual applications") + return errors.Errorf("You cannot specify --all and individual applications") } if len(c.units) != 0 { - return fmt.Errorf("You cannot specify --all and individual units") + return errors.Errorf("You cannot specify --all and individual units") } } else { if len(c.machines) == 0 && len(c.services) == 0 && len(c.units) == 0 { - return fmt.Errorf("You must specify a target, either through --all, --machine, --application or --unit") + return errors.Errorf("You must specify a target, either through --all, --machine, --application or --unit") } } @@ -125,7 +132,7 @@ } } if len(nameErrors) > 0 { - return fmt.Errorf("The following run targets are not valid:\n%s", + return errors.Errorf("The following run targets are not valid:\n%s", strings.Join(nameErrors, "\n")) } @@ -270,9 +277,9 @@ <-afterFunc(1 * time.Second) } - // If we are just dealing with one result, AND we are using the smart + // If we are just dealing with one result, AND we are using the default // format, then pretend we were running it locally. - if len(values) == 1 && c.out.Name() == "smart" { + if len(values) == 1 && c.out.Name() == "default" { result, ok := values[0].(map[string]interface{}) if !ok { return errors.New("couldn't read action output") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/run_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/run_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/run_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/run_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,9 @@ package commands import ( + "bytes" "fmt" "sort" - "strings" "time" "github.com/juju/cmd" @@ -254,7 +254,8 @@ ConvertActionResults(unitResult, unitQuery), } - jsonFormatted, err := cmd.FormatJson(unformatted) + buff := &bytes.Buffer{} + err := cmd.FormatJson(buff, unformatted) c.Assert(err, jc.ErrorIsNil) context, err := testing.RunCommand(c, newRunCommand(), @@ -262,7 +263,7 @@ ) c.Assert(err, jc.ErrorIsNil) - c.Check(testing.Stdout(context), gc.Equals, string(jsonFormatted)+"\n") + c.Check(testing.Stdout(context), gc.Equals, buff.String()) } func (s *RunSuite) TestBlockRunForMachineAndUnit(c *gc.C) { @@ -272,10 +273,7 @@ _, err := testing.RunCommand(c, newRunCommand(), "--format=json", "--machine=0", "--unit=unit/0", "hostname", ) - c.Assert(err, gc.ErrorMatches, cmd.ErrSilent.Error()) - // msg is logged - stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Check(stripped, gc.Matches, ".*To unblock changes.*") + testing.AssertOperationWasBlocked(c, err, ".*To enable changes.*") } func (s *RunSuite) TestAllMachines(c *gc.C) { @@ -316,13 +314,14 @@ }, } - jsonFormatted, err := cmd.FormatJson(unformatted) + buff := &bytes.Buffer{} + err := cmd.FormatJson(buff, unformatted) c.Assert(err, jc.ErrorIsNil) context, err := testing.RunCommand(c, newRunCommand(), "--format=json", "--all", "hostname") c.Assert(err, jc.ErrorIsNil) - c.Check(testing.Stdout(context), gc.Equals, string(jsonFormatted)+"\n") + c.Check(testing.Stdout(context), gc.Equals, buff.String()) c.Check(testing.Stderr(context), gc.Equals, "") } @@ -331,10 +330,7 @@ // Block operation mock.block = true _, err := testing.RunCommand(c, newRunCommand(), "--format=json", "--all", "hostname") - c.Assert(err, gc.ErrorMatches, cmd.ErrSilent.Error()) - // msg is logged - stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Check(stripped, gc.Matches, ".*To unblock changes.*") + testing.AssertOperationWasBlocked(c, err, ".*To enable changes.*") } func (s *RunSuite) TestSingleResponse(c *gc.C) { @@ -358,10 +354,12 @@ ConvertActionResults(machineResult, query), } - jsonFormatted, err := cmd.FormatJson(unformatted) + jsonFormatted := &bytes.Buffer{} + err := cmd.FormatJson(jsonFormatted, unformatted) c.Assert(err, jc.ErrorIsNil) - yamlFormatted, err := cmd.FormatYaml(unformatted) + yamlFormatted := &bytes.Buffer{} + err = cmd.FormatYaml(yamlFormatted, unformatted) c.Assert(err, jc.ErrorIsNil) for i, test := range []struct { @@ -378,11 +376,11 @@ }, { message: "yaml output", format: "yaml", - stdout: string(yamlFormatted) + "\n", + stdout: yamlFormatted.String(), }, { message: "json output", format: "json", - stdout: string(jsonFormatted) + "\n", + stdout: jsonFormatted.String(), }} { c.Log(fmt.Sprintf("%v: %s", i, test.message)) args := []string{} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/scp.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/scp.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/scp.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/scp.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,6 @@ package commands import ( - "fmt" "net" "strings" @@ -93,7 +92,7 @@ func (c *scpCommand) Init(args []string) error { if len(args) < 2 { - return fmt.Errorf("at least two arguments required") + return errors.Errorf("at least two arguments required") } c.Args = args return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/ssh_common.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/ssh_common.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/ssh_common.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/ssh_common.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,6 @@ import ( "bufio" - "fmt" "io" "io/ioutil" "net" @@ -15,11 +14,11 @@ "time" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/utils" "github.com/juju/utils/set" "github.com/juju/utils/ssh" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api/sshclient" "github.com/juju/juju/cmd/modelcmd" @@ -89,6 +88,7 @@ } func (c *SSHCommon) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) f.BoolVar(&c.proxy, "proxy", false, "Proxy through the API server") f.BoolVar(&c.pty, "pty", true, "Enable pseudo-tty allocation") f.BoolVar(&c.noHostKeyChecks, "no-host-key-checks", false, "Skip host key checking (INSECURE)") @@ -231,11 +231,11 @@ func (c *SSHCommon) setProxyCommand(options *ssh.Options) error { apiServerHost, _, err := net.SplitHostPort(c.apiAddr) if err != nil { - return fmt.Errorf("failed to get proxy address: %v", err) + return errors.Errorf("failed to get proxy address: %v", err) } juju, err := getJujuExecutable() if err != nil { - return fmt.Errorf("failed to get juju executable path: %v", err) + return errors.Errorf("failed to get juju executable path: %v", err) } // TODO(mjs) 2016-05-09 LP #1579592 - It would be good to check the diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/ssh.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/ssh.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/ssh.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/ssh.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,6 @@ package commands import ( - "fmt" - "github.com/juju/cmd" "github.com/juju/errors" "github.com/juju/utils/ssh" @@ -69,7 +67,7 @@ func (c *sshCommand) Init(args []string) error { if len(args) == 0 { - return fmt.Errorf("no target name specified") + return errors.Errorf("no target name specified") } c.Target, c.Args = args[0], args[1:] return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/sshkeys_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/sshkeys_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/sshkeys_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/sshkeys_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,7 +13,6 @@ keymanagerserver "github.com/juju/juju/apiserver/keymanager" keymanagertesting "github.com/juju/juju/apiserver/keymanager/testing" - "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/juju/osenv" jujutesting "github.com/juju/juju/juju/testing" coretesting "github.com/juju/juju/testing" @@ -53,7 +52,7 @@ type keySuiteBase struct { jujutesting.JujuConnSuite - common.CmdBlockHelper + coretesting.CmdBlockHelper } func (s *keySuiteBase) SetUpSuite(c *gc.C) { @@ -63,7 +62,7 @@ func (s *keySuiteBase) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) - s.CmdBlockHelper = common.NewCmdBlockHelper(s.APIState) + s.CmdBlockHelper = coretesting.NewCmdBlockHelper(s.APIState) c.Assert(s.CmdBlockHelper, gc.NotNil) s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) } @@ -144,7 +143,7 @@ // Block operation s.BlockAllChanges(c, "TestBlockAddKey") _, err := coretesting.RunCommand(c, NewAddKeysCommand(), key2, "invalid-key") - s.AssertBlocked(c, err, ".*TestBlockAddKey.*") + coretesting.AssertOperationWasBlocked(c, err, ".*TestBlockAddKey.*") } type RemoveKeySuite struct { @@ -174,7 +173,7 @@ s.BlockAllChanges(c, "TestBlockRemoveKeys") _, err := coretesting.RunCommand(c, NewRemoveKeysCommand(), sshtesting.ValidKeyTwo.Fingerprint, "invalid-key") - s.AssertBlocked(c, err, ".*TestBlockRemoveKeys.*") + coretesting.AssertOperationWasBlocked(c, err, ".*TestBlockRemoveKeys.*") } type ImportKeySuite struct { @@ -192,9 +191,9 @@ key1 := sshtesting.ValidKeyOne.Key + " user@host" s.setAuthorizedKeys(c, key1) - context, err := coretesting.RunCommand(c, NewImportKeysCommand(), "lp:validuser", "invalid-key") + context, err := coretesting.RunCommand(c, NewImportKeysCommand(), "lp:validuser", "lp:invalid-key") c.Assert(err, jc.ErrorIsNil) - c.Assert(coretesting.Stderr(context), gc.Matches, `cannot import key id "invalid-key".*\n`) + c.Assert(coretesting.Stderr(context), gc.Matches, `cannot import key id "lp:invalid-key".*\n`) s.assertEnvironKeys(c, key1, sshtesting.ValidKeyThree.Key) } @@ -204,6 +203,6 @@ // Block operation s.BlockAllChanges(c, "TestBlockImportKeys") - _, err := coretesting.RunCommand(c, NewImportKeysCommand(), "lp:validuser", "invalid-key") - s.AssertBlocked(c, err, ".*TestBlockImportKeys.*") + _, err := coretesting.RunCommand(c, NewImportKeysCommand(), "lp:validuser", "lp:invalid-key") + coretesting.AssertOperationWasBlocked(c, err, ".*TestBlockImportKeys.*") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/switch_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/switch_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/switch_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/switch_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -77,12 +77,12 @@ s.addController(c, "a-controller") s.store.CurrentControllerName = "a-controller" s.store.Models["a-controller"] = &jujuclient.ControllerModels{ - Models: map[string]jujuclient.ModelDetails{"admin@local/mymodel": {}}, - CurrentModel: "admin@local/mymodel", + Models: map[string]jujuclient.ModelDetails{"admin/mymodel": {}}, + CurrentModel: "admin/mymodel", } ctx, err := s.run(c) c.Assert(err, jc.ErrorIsNil) - c.Assert(coretesting.Stdout(ctx), gc.Equals, "a-controller:admin@local/mymodel\n") + c.Assert(coretesting.Stdout(ctx), gc.Equals, "a-controller:admin/mymodel\n") } func (s *SwitchSimpleSuite) TestSwitchWritesCurrentController(c *gc.C) { @@ -131,80 +131,80 @@ s.store.CurrentControllerName = "ctrl" s.addController(c, "ctrl") s.store.Models["ctrl"] = &jujuclient.ControllerModels{ - Models: map[string]jujuclient.ModelDetails{"admin@local/mymodel": {}}, + Models: map[string]jujuclient.ModelDetails{"admin/mymodel": {}}, } context, err := s.run(c, "mymodel") c.Assert(err, jc.ErrorIsNil) - c.Assert(coretesting.Stderr(context), gc.Equals, "ctrl (controller) -> ctrl:admin@local/mymodel\n") + c.Assert(coretesting.Stderr(context), gc.Equals, "ctrl (controller) -> ctrl:admin/mymodel\n") s.stubStore.CheckCalls(c, []testing.StubCall{ {"CurrentController", nil}, {"CurrentModel", []interface{}{"ctrl"}}, {"ControllerByName", []interface{}{"mymodel"}}, {"AccountDetails", []interface{}{"ctrl"}}, - {"SetCurrentModel", []interface{}{"ctrl", "admin@local/mymodel"}}, + {"SetCurrentModel", []interface{}{"ctrl", "admin/mymodel"}}, }) - c.Assert(s.store.Models["ctrl"].CurrentModel, gc.Equals, "admin@local/mymodel") + c.Assert(s.store.Models["ctrl"].CurrentModel, gc.Equals, "admin/mymodel") } func (s *SwitchSimpleSuite) TestSwitchControllerToModelDifferentController(c *gc.C) { s.store.CurrentControllerName = "old" s.addController(c, "new") s.store.Models["new"] = &jujuclient.ControllerModels{ - Models: map[string]jujuclient.ModelDetails{"admin@local/mymodel": {}}, + Models: map[string]jujuclient.ModelDetails{"admin/mymodel": {}}, } context, err := s.run(c, "new:mymodel") c.Assert(err, jc.ErrorIsNil) - c.Assert(coretesting.Stderr(context), gc.Equals, "old (controller) -> new:admin@local/mymodel\n") + c.Assert(coretesting.Stderr(context), gc.Equals, "old (controller) -> new:admin/mymodel\n") s.stubStore.CheckCalls(c, []testing.StubCall{ {"CurrentController", nil}, {"CurrentModel", []interface{}{"old"}}, {"ControllerByName", []interface{}{"new:mymodel"}}, {"ControllerByName", []interface{}{"new"}}, {"AccountDetails", []interface{}{"new"}}, - {"SetCurrentModel", []interface{}{"new", "admin@local/mymodel"}}, + {"SetCurrentModel", []interface{}{"new", "admin/mymodel"}}, {"SetCurrentController", []interface{}{"new"}}, }) - c.Assert(s.store.Models["new"].CurrentModel, gc.Equals, "admin@local/mymodel") + c.Assert(s.store.Models["new"].CurrentModel, gc.Equals, "admin/mymodel") } func (s *SwitchSimpleSuite) TestSwitchLocalControllerToModelDifferentController(c *gc.C) { s.store.CurrentControllerName = "old" s.addController(c, "new") s.store.Models["new"] = &jujuclient.ControllerModels{ - Models: map[string]jujuclient.ModelDetails{"admin@local/mymodel": {}}, + Models: map[string]jujuclient.ModelDetails{"admin/mymodel": {}}, } context, err := s.run(c, "new:mymodel") c.Assert(err, jc.ErrorIsNil) - c.Assert(coretesting.Stderr(context), gc.Equals, "old (controller) -> new:admin@local/mymodel\n") + c.Assert(coretesting.Stderr(context), gc.Equals, "old (controller) -> new:admin/mymodel\n") s.stubStore.CheckCalls(c, []testing.StubCall{ {"CurrentController", nil}, {"CurrentModel", []interface{}{"old"}}, {"ControllerByName", []interface{}{"new:mymodel"}}, {"ControllerByName", []interface{}{"new"}}, {"AccountDetails", []interface{}{"new"}}, - {"SetCurrentModel", []interface{}{"new", "admin@local/mymodel"}}, + {"SetCurrentModel", []interface{}{"new", "admin/mymodel"}}, {"SetCurrentController", []interface{}{"new"}}, }) - c.Assert(s.store.Models["new"].CurrentModel, gc.Equals, "admin@local/mymodel") + c.Assert(s.store.Models["new"].CurrentModel, gc.Equals, "admin/mymodel") } func (s *SwitchSimpleSuite) TestSwitchControllerToDifferentControllerCurrentModel(c *gc.C) { s.store.CurrentControllerName = "old" s.addController(c, "new") s.store.Models["new"] = &jujuclient.ControllerModels{ - Models: map[string]jujuclient.ModelDetails{"admin@local/mymodel": {}}, - CurrentModel: "admin@local/mymodel", + Models: map[string]jujuclient.ModelDetails{"admin/mymodel": {}}, + CurrentModel: "admin/mymodel", } context, err := s.run(c, "new:mymodel") c.Assert(err, jc.ErrorIsNil) - c.Assert(coretesting.Stderr(context), gc.Equals, "old (controller) -> new:admin@local/mymodel\n") + c.Assert(coretesting.Stderr(context), gc.Equals, "old (controller) -> new:admin/mymodel\n") s.stubStore.CheckCalls(c, []testing.StubCall{ {"CurrentController", nil}, {"CurrentModel", []interface{}{"old"}}, {"ControllerByName", []interface{}{"new:mymodel"}}, {"ControllerByName", []interface{}{"new"}}, {"AccountDetails", []interface{}{"new"}}, - {"SetCurrentModel", []interface{}{"new", "admin@local/mymodel"}}, + {"SetCurrentModel", []interface{}{"new", "admin/mymodel"}}, {"SetCurrentController", []interface{}{"new"}}, }) } @@ -214,15 +214,15 @@ s.addController(c, "same") s.store.Models["same"] = &jujuclient.ControllerModels{ Models: map[string]jujuclient.ModelDetails{ - "admin@local/mymodel": {}, - "bianca@local/mymodel": {}, + "admin/mymodel": {}, + "bianca/mymodel": {}, }, - CurrentModel: "admin@local/mymodel", + CurrentModel: "admin/mymodel", } context, err := s.run(c, "bianca/mymodel") c.Assert(err, jc.ErrorIsNil) - c.Assert(coretesting.Stderr(context), gc.Equals, "same:admin@local/mymodel -> same:bianca@local/mymodel\n") - c.Assert(s.store.Models["same"].CurrentModel, gc.Equals, "bianca@local/mymodel") + c.Assert(coretesting.Stderr(context), gc.Equals, "same:admin/mymodel -> same:bianca/mymodel\n") + c.Assert(s.store.Models["same"].CurrentModel, gc.Equals, "bianca/mymodel") } func (s *SwitchSimpleSuite) TestSwitchUnknownNoCurrentController(c *gc.C) { @@ -239,12 +239,12 @@ s.addController(c, "ctrl") s.onRefresh = func() { s.store.Models["ctrl"] = &jujuclient.ControllerModels{ - Models: map[string]jujuclient.ModelDetails{"admin@local/unknown": {}}, + Models: map[string]jujuclient.ModelDetails{"admin/unknown": {}}, } } ctx, err := s.run(c, "unknown") c.Assert(err, jc.ErrorIsNil) - c.Assert(coretesting.Stderr(ctx), gc.Equals, "ctrl (controller) -> ctrl:admin@local/unknown\n") + c.Assert(coretesting.Stderr(ctx), gc.Equals, "ctrl (controller) -> ctrl:admin/unknown\n") s.CheckCallNames(c, "RefreshModels") } @@ -279,6 +279,6 @@ func (s *SwitchSimpleSuite) addController(c *gc.C, name string) { s.store.Controllers[name] = jujuclient.ControllerDetails{} s.store.Accounts[name] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/synctools.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/synctools.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/synctools.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/synctools.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,9 +8,9 @@ "io" "github.com/juju/cmd" + "github.com/juju/gnuflag" "github.com/juju/loggo" "github.com/juju/version" - "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/block" @@ -65,8 +65,8 @@ # Get locally available software to the model: juju sync-tools --debug --source=/home/ubuntu/sync-tools -See Also: - juju upgrade-juju +See also: + upgrade-juju ` @@ -79,6 +79,7 @@ } func (c *syncToolsCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) f.BoolVar(&c.allVersions, "all", false, "Copy all versions, not just the latest") f.StringVar(&c.versionStr, "version", "", "Copy a specific major[.minor] version") f.BoolVar(&c.dryRun, "dry-run", false, "Don't copy, just print what would be copied") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/synctools_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/synctools_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/synctools_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/synctools_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( "io" "io/ioutil" - "strings" "github.com/juju/cmd" "github.com/juju/errors" @@ -47,7 +46,7 @@ s.store = jujuclienttesting.NewMemStore() s.store.CurrentControllerName = "ctrl" s.store.Accounts["ctrl"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } } @@ -286,10 +285,7 @@ return common.OperationBlockedError("TestAPIAdapterBlockUploadTools") } _, err := s.runSyncToolsCommand(c, "-m", "test-target", "--destination", c.MkDir(), "--stream", "released") - c.Assert(err, gc.ErrorMatches, cmd.ErrSilent.Error()) - // msg is logged - stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Check(stripped, gc.Matches, ".*TestAPIAdapterBlockUploadTools.*") + coretesting.AssertOperationWasBlocked(c, err, ".*TestAPIAdapterBlockUploadTools.*") } type fakeSyncToolsAPI struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/upgradejuju.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/upgradejuju.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/upgradejuju.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/upgradejuju.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,10 +14,11 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/utils/series" "github.com/juju/version" - "launchpad.net/gnuflag" + "github.com/juju/juju/api/controller" "github.com/juju/juju/api/modelconfig" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/block" @@ -35,9 +36,9 @@ Juju provides agent software to every machine it creates. This command upgrades that software across an entire model, which is, by default, the current model. -A model's agent version can be shown with `[1:] + "`juju get-model-config agent-\nversion`" + `. +A model's agent version can be shown with `[1:] + "`juju model-config agent-\nversion`" + `. A version is denoted by: major.minor.patch -The upgrade candidate will be auto-selected if '--version' is not +The upgrade candidate will be auto-selected if '--agent-version' is not specified: - If the server major version matches the client major version, the version selected is minor+1. If such a minor version is not available then @@ -55,12 +56,12 @@ Examples: juju upgrade-juju --dry-run - juju upgrade-juju --version 2.0.1 + juju upgrade-juju --agent-version 2.0.1 See also: sync-tools` -func newUpgradeJujuCommand(minUpgradeVers map[int]version.Number, options ...modelcmd.WrapEnvOption) cmd.Command { +func newUpgradeJujuCommand(minUpgradeVers map[int]version.Number, options ...modelcmd.WrapOption) cmd.Command { if minUpgradeVers == nil { minUpgradeVers = minMajorUpgradeVersion } @@ -72,7 +73,7 @@ modelcmd.ModelCommandBase vers string Version version.Number - UploadTools bool + BuildAgent bool DryRun bool ResetPrevious bool AssumeYes bool @@ -93,8 +94,9 @@ } func (c *upgradeJujuCommand) SetFlags(f *gnuflag.FlagSet) { - f.StringVar(&c.vers, "version", "", "Upgrade to specific version") - f.BoolVar(&c.UploadTools, "upload-tools", false, "Upload local version of tools; for development use only") + c.ModelCommandBase.SetFlags(f) + f.StringVar(&c.vers, "agent-version", "", "Upgrade to specific version") + f.BoolVar(&c.BuildAgent, "build-agent", false, "Build a local version of the agent binary; for development use only") f.BoolVar(&c.DryRun, "dry-run", false, "Don't change anything, just report what would be changed") f.BoolVar(&c.ResetPrevious, "reset-previous-upgrade", false, "Clear the previous (incomplete) upgrade status (use with care)") f.BoolVar(&c.AssumeYes, "y", false, "Answer 'yes' to confirmation prompts") @@ -107,14 +109,14 @@ if err != nil { return err } - if c.UploadTools && vers.Build != 0 { + if c.BuildAgent && vers.Build != 0 { // TODO(fwereade): when we start taking versions from actual built - // code, we should disable --version when used with --upload-tools. + // code, we should disable --agent-version when used with --build-agent. // For now, it's the only way to experiment with version upgrade // behaviour live, so the only restriction is that Build cannot // be used (because its value needs to be chosen internally so as // not to collide with existing tools). - return errors.New("cannot specify build number when uploading tools") + return errors.New("cannot specify build number when building an agent") } c.Version = vers } @@ -177,6 +179,11 @@ Close() error } +type controllerAPI interface { + ModelConfig() (map[string]interface{}, error) + Close() error +} + var getUpgradeJujuAPI = func(c *upgradeJujuCommand) (upgradeJujuAPI, error) { return c.NewAPIClient() } @@ -189,6 +196,14 @@ return modelconfig.NewClient(api), nil } +var getControllerAPI = func(c *upgradeJujuCommand) (controllerAPI, error) { + api, err := c.NewControllerAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + return controller.NewClient(api), nil +} + // Run changes the version proposed for the juju envtools. func (c *upgradeJujuCommand) Run(ctx *cmd.Context) (err error) { @@ -202,6 +217,11 @@ return err } defer modelConfigClient.Close() + controllerClient, err := getControllerAPI(c) + if err != nil { + return err + } + defer controllerClient.Close() defer func() { if err == errUpToDate { ctx.Infof(err.Error()) @@ -219,14 +239,15 @@ return err } - controller, err := c.ClientStore().ControllerByName(c.ControllerName()) + controllerModelConfig, err := controllerClient.ModelConfig() if err != nil { return err } - if c.UploadTools && (cfg.UUID() != controller.ControllerUUID) { + isControllerModel := cfg.UUID() == controllerModelConfig[config.UUIDKey] + if c.BuildAgent && !isControllerModel { // For UploadTools, model must be the "controller" model, // that is, modelUUID == controllerUUID - return errors.Errorf("--upload-tools can only be used with the controller model") + return errors.Errorf("--build-agent can only be used with the controller model") } agentVersion, ok := cfg.AgentVersion() @@ -235,10 +256,10 @@ return errors.New("incomplete model configuration") } - if c.UploadTools && c.Version == version.Zero { + if c.BuildAgent && c.Version == version.Zero { // Currently, uploading tools assumes the version to be // the same as jujuversion.Current if not specified with - // --version. + // --agent-version. c.Version = jujuversion.Current } warnCompat := false @@ -271,7 +292,7 @@ case c.Version.Major > agentVersion.Major: // User is requesting an upgrade to a new major number // Only upgrade to a different major number if: - // 1 - Explicitly requested with --version or using --upload-tools, and + // 1 - Explicitly requested with --agent-version or using --build-agent, and // 2 - The environment is running a valid version to upgrade from, and // 3 - The upgrade is to a minor version of 0. minVer, ok := c.minMajorUpgradeVersion[c.Version.Major] @@ -298,22 +319,37 @@ if err != nil { return err } - if c.UploadTools && !c.DryRun { - if err := context.uploadTools(); err != nil { - return block.ProcessBlockedError(err, block.BlockChange) + // If we're running a custom build or the user has asked for a new agent + // to be built, upload a local jujud binary if possible. + uploadLocalBinary := isControllerModel && c.Version == version.Zero && tryImplicitUpload(agentVersion) + if !warnCompat && (uploadLocalBinary || c.BuildAgent) && !c.DryRun { + if err := context.uploadTools(c.BuildAgent); err != nil { + // If we've explicitly asked to build an agent binary, or the upload failed + // because changes were blocked, we'll return an error. + if err2 := block.ProcessBlockedError(err, block.BlockChange); c.BuildAgent || err2 == cmd.ErrSilent { + return err2 + } } + builtMsg := "" + if c.BuildAgent { + builtMsg = " (built from source)" + } + fmt.Fprintf(ctx.Stdout, "no prepackaged tools available, using local agent binary %v%s\n", context.chosen, builtMsg) } + + // If there was an error implicitly uploading a binary, we'll still look for any packaged binaries + // since there may still be a valid upgrade and the user didn't ask for any local binary. if err := context.validate(); err != nil { return err } // TODO(fwereade): this list may be incomplete, pending envtools.Upload change. - ctx.Infof("available tools:\n%s", formatTools(context.tools)) - ctx.Infof("best version:\n %s", context.chosen) + ctx.Verbosef("available tools:\n%s", formatTools(context.tools)) + ctx.Verbosef("best version:\n %s", context.chosen) if warnCompat { - logger.Infof("version %s incompatible with this client (%s)", context.chosen, jujuversion.Current) + fmt.Fprintf(ctx.Stderr, "version %s incompatible with this client (%s)\n", context.chosen, jujuversion.Current) } if c.DryRun { - ctx.Infof("upgrade to this version by running\n juju upgrade-juju --version=\"%s\"\n", context.chosen) + fmt.Fprintf(ctx.Stderr, "upgrade to this version by running\n juju upgrade-juju --agent-version=\"%s\"\n", context.chosen) } else { if c.ResetPrevious { if ok, err := c.confirmResetPreviousUpgrade(ctx); !ok || err != nil { @@ -338,11 +374,16 @@ return block.ProcessBlockedError(err, block.BlockChange) } } - logger.Infof("started upgrade to %s", context.chosen) + fmt.Fprintf(ctx.Stdout, "started upgrade to %s\n", context.chosen) } return nil } +func tryImplicitUpload(agentVersion version.Number) bool { + newerAgent := jujuversion.Current.Compare(agentVersion) > 0 + return newerAgent || agentVersion.Build > 0 || jujuversion.Current.Build > 0 +} + const resetPreviousUpgradeMessage = ` WARNING! using --reset-previous-upgrade when an upgrade is in progress will cause the upgrade to fail. Only use this option to clear an @@ -354,7 +395,7 @@ if c.AssumeYes { return true, nil } - fmt.Fprintf(ctx.Stdout, resetPreviousUpgradeMessage) + fmt.Fprint(ctx.Stdout, resetPreviousUpgradeMessage) scanner := bufio.NewScanner(ctx.Stdin) scanner.Scan() err := scanner.Err() @@ -392,7 +433,7 @@ if !params.IsCodeNotFound(err) { return nil, err } - if !c.UploadTools { + if !tryImplicitUpload(agentVersion) && !c.BuildAgent { // No tools found and we shouldn't upload any, so if we are not asking for a // major upgrade, pretend there is no more recent version available. if c.Version == version.Zero && agentVersion.Major == filterVersion.Major { @@ -428,7 +469,7 @@ // than that of any otherwise-matching available envtools. // uploadTools resets the chosen version and replaces the available tools // with the ones just uploaded. -func (context *upgradeContext) uploadTools() (err error) { +func (context *upgradeContext) uploadTools(buildAgent bool) (err error) { // TODO(fwereade): this is kinda crack: we should not assume that // jujuversion.Current matches whatever source happens to be built. The // ideal would be: @@ -450,14 +491,16 @@ } context.chosen = uploadVersion(context.chosen, context.tools) - builtTools, err := sync.BuildToolsTarball(&context.chosen, "upgrade") + builtTools, err := sync.BuildAgentTarball(buildAgent, &context.chosen, "upgrade") if err != nil { return errors.Trace(err) } defer os.RemoveAll(builtTools.Dir) + uploadToolsVersion := builtTools.Version + uploadToolsVersion.Number = context.chosen toolsPath := path.Join(builtTools.Dir, builtTools.StorageName) - logger.Infof("uploading tools %v (%dkB) to Juju controller", builtTools.Version, (builtTools.Size+512)/1024) + logger.Infof("uploading agent binary %v (%dkB) to Juju controller", uploadToolsVersion, (builtTools.Size+512)/1024) f, err := os.Open(toolsPath) if err != nil { return errors.Trace(err) @@ -468,7 +511,7 @@ return errors.Trace(err) } additionalSeries := series.OSSupportedSeries(os) - uploaded, err := context.apiClient.UploadTools(f, builtTools.Version, additionalSeries...) + uploaded, err := context.apiClient.UploadTools(f, uploadToolsVersion, additionalSeries...) if err != nil { return errors.Trace(err) } @@ -488,7 +531,7 @@ // agent version and doing major.minor+1.patch=0. // Upgrading across a major release boundary requires that the version - // be specified with --version. + // be specified with --agent-version. nextVersion := context.agent nextVersion.Minor += 1 nextVersion.Patch = 0 diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/upgradejuju_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/upgradejuju_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/commands/upgradejuju_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/commands/upgradejuju_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,7 @@ "io/ioutil" "strings" + "github.com/juju/errors" jc "github.com/juju/testing/checkers" "github.com/juju/utils/arch" "github.com/juju/utils/series" @@ -20,7 +21,6 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" - cmdcommon "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/environs/filestorage" "github.com/juju/juju/environs/sync" @@ -43,7 +43,7 @@ authoriser apiservertesting.FakeAuthorizer toolsDir string - cmdcommon.CmdBlockHelper + coretesting.CmdBlockHelper } func (s *UpgradeJujuSuite) SetUpTest(c *gc.C) { @@ -53,7 +53,7 @@ Tag: s.AdminUserTag(c), } - s.CmdBlockHelper = cmdcommon.NewCmdBlockHelper(s.APIState) + s.CmdBlockHelper = coretesting.NewCmdBlockHelper(s.APIState) c.Assert(s.CmdBlockHelper, gc.NotNil) s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) } @@ -83,38 +83,38 @@ args: []string{"--dev"}, expectInitErr: "flag provided but not defined: --dev", }, { - about: "invalid --version value", + about: "invalid --agent-version value", currentVersion: "1.0.0-quantal-amd64", - args: []string{"--version", "invalid-version"}, + args: []string{"--agent-version", "invalid-version"}, expectInitErr: "invalid version .*", }, { about: "just major version, no minor specified", currentVersion: "4.2.0-quantal-amd64", - args: []string{"--version", "4"}, + args: []string{"--agent-version", "4"}, expectInitErr: `invalid version "4"`, }, { about: "major version upgrade to incompatible version", currentVersion: "2.0.0-quantal-amd64", agentVersion: "2.0.0", - args: []string{"--version", "5.2.0"}, + args: []string{"--agent-version", "5.2.0"}, expectErr: `unknown version "5.2.0"`, }, { about: "major version downgrade to incompatible version", currentVersion: "4.2.0-quantal-amd64", agentVersion: "4.2.0", - args: []string{"--version", "3.2.0"}, + args: []string{"--agent-version", "3.2.0"}, expectErr: "cannot change version from 4.2.0 to 3.2.0", }, { - about: "--upload-tools with inappropriate version 1", + about: "--build-agent with inappropriate version 1", currentVersion: "4.2.0-quantal-amd64", agentVersion: "4.2.0", - args: []string{"--upload-tools", "--version", "3.1.0"}, + args: []string{"--build-agent", "--agent-version", "3.1.0"}, expectErr: "cannot change version from 4.2.0 to 3.1.0", }, { - about: "--upload-tools with inappropriate version 2", + about: "--build-agent with inappropriate version 2", currentVersion: "3.2.7-quantal-amd64", - args: []string{"--upload-tools", "--version", "3.2.8.4"}, - expectInitErr: "cannot specify build number when uploading tools", + args: []string{"--build-agent", "--agent-version", "3.2.8.4"}, + expectInitErr: "cannot specify build number when building an agent", }, { about: "latest supported stable release", tools: []string{"2.1.0-quantal-amd64", "2.1.2-quantal-i386", "2.1.3-quantal-amd64", "2.1-dev1-quantal-amd64"}, @@ -138,7 +138,7 @@ tools: []string{"3.3.0-quantal-amd64"}, currentVersion: "3.0.2-quantal-amd64", agentVersion: "2.8.2", - expectVersion: "2.8.2", + expectErr: "no compatible tools available", }, { about: "no next supported available", tools: []string{"2.2.0-quantal-amd64", "2.2.5-quantal-i386", "2.3.3-quantal-amd64", "2.1-dev1-quantal-amd64"}, @@ -150,7 +150,7 @@ tools: []string{"2.1-dev1-quantal-amd64", "2.1.0-quantal-amd64", "2.3-dev0-quantal-amd64", "3.0.1-quantal-amd64"}, currentVersion: "2.1-dev0-quantal-amd64", agentVersion: "2.0.0", - expectVersion: "2.1.0", + expectVersion: "2.1-dev0.1", }, { about: "latest current, when agent is dev", tools: []string{"2.1-dev1-quantal-amd64", "2.2.0-quantal-amd64", "2.3-dev0-quantal-amd64", "3.0.1-quantal-amd64"}, @@ -162,83 +162,83 @@ tools: []string{"2.3-dev0-quantal-amd64"}, currentVersion: "2.0.0-quantal-amd64", agentVersion: "2.0.0", - args: []string{"--version", "2.3-dev0"}, + args: []string{"--agent-version", "2.3-dev0"}, expectVersion: "2.3-dev0", }, { about: "specified major version", tools: []string{"3.0.2-quantal-amd64"}, currentVersion: "3.0.2-quantal-amd64", agentVersion: "2.8.2", - args: []string{"--version", "3.0.2"}, + args: []string{"--agent-version", "3.0.2"}, expectVersion: "3.0.2", upgradeMap: map[int]version.Number{3: version.MustParse("2.8.2")}, }, { about: "specified version missing, but already set", currentVersion: "3.0.0-quantal-amd64", agentVersion: "3.0.0", - args: []string{"--version", "3.0.0"}, + args: []string{"--agent-version", "3.0.0"}, expectVersion: "3.0.0", }, { about: "specified version, no tools", currentVersion: "3.0.0-quantal-amd64", agentVersion: "3.0.0", - args: []string{"--version", "3.2.0"}, + args: []string{"--agent-version", "3.2.0"}, expectErr: "no tools available", }, { about: "specified version, no matching major version", tools: []string{"4.2.0-quantal-amd64"}, currentVersion: "3.0.0-quantal-amd64", agentVersion: "3.0.0", - args: []string{"--version", "3.2.0"}, + args: []string{"--agent-version", "3.2.0"}, expectErr: "no matching tools available", }, { about: "specified version, no matching minor version", tools: []string{"3.4.0-quantal-amd64"}, currentVersion: "3.0.0-quantal-amd64", agentVersion: "3.0.0", - args: []string{"--version", "3.2.0"}, + args: []string{"--agent-version", "3.2.0"}, expectErr: "no matching tools available", }, { about: "specified version, no matching patch version", tools: []string{"3.2.5-quantal-amd64"}, currentVersion: "3.0.0-quantal-amd64", agentVersion: "3.0.0", - args: []string{"--version", "3.2.0"}, + args: []string{"--agent-version", "3.2.0"}, expectErr: "no matching tools available", }, { about: "specified version, no matching build version", tools: []string{"3.2.0.2-quantal-amd64"}, currentVersion: "3.0.0-quantal-amd64", agentVersion: "3.0.0", - args: []string{"--version", "3.2.0"}, + args: []string{"--agent-version", "3.2.0"}, expectErr: "no matching tools available", }, { about: "incompatible version (minor != 0)", tools: []string{"3.2.0-quantal-amd64"}, currentVersion: "4.2.0-quantal-amd64", agentVersion: "3.2.0", - args: []string{"--version", "3.2.0"}, + args: []string{"--agent-version", "3.2.0"}, expectErr: "cannot upgrade a 3.2.0 model with a 4.2.0 client", }, { about: "incompatible version (model major > client major)", tools: []string{"3.2.0-quantal-amd64"}, currentVersion: "3.2.0-quantal-amd64", agentVersion: "4.2.0", - args: []string{"--version", "3.2.0"}, + args: []string{"--agent-version", "3.2.0"}, expectErr: "cannot upgrade a 4.2.0 model with a 3.2.0 client", }, { about: "incompatible version (model major < client major - 1)", tools: []string{"3.2.0-quantal-amd64"}, currentVersion: "4.0.2-quantal-amd64", agentVersion: "2.0.0", - args: []string{"--version", "3.2.0"}, + args: []string{"--agent-version", "3.2.0"}, expectErr: "cannot upgrade a 2.0.0 model with a 4.0.2 client", }, { about: "minor version downgrade to incompatible version", tools: []string{"3.2.0-quantal-amd64"}, currentVersion: "3.2.0-quantal-amd64", agentVersion: "3.3-dev0", - args: []string{"--version", "3.2.0"}, + args: []string{"--agent-version", "3.2.0"}, expectErr: "cannot change version from 3.3-dev0 to 3.2.0", }, { about: "nothing available", @@ -255,21 +255,21 @@ about: "upload with default series", currentVersion: "2.2.0-quantal-amd64", agentVersion: "2.0.0", - args: []string{"--upload-tools"}, + args: []string{"--build-agent"}, expectVersion: "2.2.0.1", expectUploaded: []string{"2.2.0.1-quantal-amd64", "2.2.0.1-%LTS%-amd64", "2.2.0.1-raring-amd64"}, }, { about: "upload with explicit version", currentVersion: "2.2.0-quantal-amd64", agentVersion: "2.0.0", - args: []string{"--upload-tools", "--version", "2.7.3"}, + args: []string{"--build-agent", "--agent-version", "2.7.3"}, expectVersion: "2.7.3.1", expectUploaded: []string{"2.7.3.1-quantal-amd64", "2.7.3.1-%LTS%-amd64", "2.7.3.1-raring-amd64"}, }, { about: "upload dev version, currently on release version", currentVersion: "2.1.0-quantal-amd64", agentVersion: "2.0.0", - args: []string{"--upload-tools"}, + args: []string{"--build-agent"}, expectVersion: "2.1.0.1", expectUploaded: []string{"2.1.0.1-quantal-amd64", "2.1.0.1-%LTS%-amd64", "2.1.0.1-raring-amd64"}, }, { @@ -277,7 +277,7 @@ tools: []string{"2.4.6-quantal-amd64", "2.4.8-quantal-amd64"}, currentVersion: "2.4.6-quantal-amd64", agentVersion: "2.4.0", - args: []string{"--upload-tools"}, + args: []string{"--build-agent"}, expectVersion: "2.4.6.1", expectUploaded: []string{"2.4.6.1-quantal-amd64", "2.4.6.1-%LTS%-amd64", "2.4.6.1-raring-amd64"}, }, { @@ -285,7 +285,7 @@ tools: []string{"2.4.6-quantal-amd64", "2.4.6.2-saucy-i386", "2.4.8-quantal-amd64"}, currentVersion: "2.4.6-quantal-amd64", agentVersion: "2.4.6.2", - args: []string{"--upload-tools"}, + args: []string{"--build-agent"}, expectVersion: "2.4.6.3", expectUploaded: []string{"2.4.6.3-quantal-amd64", "2.4.6.3-%LTS%-amd64", "2.4.6.3-raring-amd64"}, }, { @@ -293,7 +293,7 @@ currentVersion: "2.2.0-quantal-amd64", tools: []string{"2.7.3.1-quantal-amd64"}, agentVersion: "2.0.0", - args: []string{"--upload-tools", "--version", "2.7.3"}, + args: []string{"--build-agent", "--agent-version", "2.7.3"}, expectVersion: "2.7.3.2", expectUploaded: []string{"2.7.3.2-quantal-amd64", "2.7.3.2-%LTS%-amd64", "2.7.3.2-raring-amd64"}, }, { @@ -301,7 +301,7 @@ tools: []string{"1.21.3-quantal-amd64", "1.22.1-quantal-amd64"}, currentVersion: "1.22.1-quantal-amd64", agentVersion: "1.20.14", - expectVersion: "1.21.3", + expectVersion: "1.22.1.1", }} func (s *UpgradeJujuSuite) TestUpgradeJuju(c *gc.C) { @@ -425,7 +425,7 @@ } err := s.State.UpdateModelConfig(updateAttrs, nil, nil) c.Assert(err, jc.ErrorIsNil) - s.PatchValue(&sync.BuildToolsTarball, toolstesting.GetMockBuildTools(c)) + s.PatchValue(&sync.BuildAgentTarball, toolstesting.GetMockBuildTools(c)) // Set API host ports so FindTools works. hostPorts := [][]network.HostPort{ @@ -434,7 +434,7 @@ err = s.State.SetAPIHostPorts(hostPorts) c.Assert(err, jc.ErrorIsNil) - s.CmdBlockHelper = cmdcommon.NewCmdBlockHelper(s.APIState) + s.CmdBlockHelper = coretesting.NewCmdBlockHelper(s.APIState) c.Assert(s.CmdBlockHelper, gc.NotNil) s.AddCleanup(func(*gc.C) { s.CmdBlockHelper.Close() }) } @@ -443,7 +443,7 @@ s.Reset(c) s.PatchValue(&jujuversion.Current, version.MustParse("1.99.99")) cmd := newUpgradeJujuCommand(map[int]version.Number{2: version.MustParse("1.99.99")}) - _, err := coretesting.RunCommand(c, cmd, "--upload-tools") + _, err := coretesting.RunCommand(c, cmd, "--build-agent") c.Assert(err, jc.ErrorIsNil) vers := version.Binary{ Number: jujuversion.Current, @@ -454,21 +454,87 @@ s.checkToolsUploaded(c, vers, vers.Number) } +func (s *UpgradeJujuSuite) TestUpgradeJujuWithImplicitUploadDevAgent(c *gc.C) { + s.Reset(c) + fakeAPI := &fakeUpgradeJujuAPINoState{ + name: "dummy-model", + uuid: "deadbeef-0bad-400d-8000-4b1d0d06f00d", + controllerUUID: "deadbeef-1bad-500d-9000-4b1d0d06f00d", + agentVersion: "1.99.99.1", + } + s.PatchValue(&getUpgradeJujuAPI, func(*upgradeJujuCommand) (upgradeJujuAPI, error) { + return fakeAPI, nil + }) + s.PatchValue(&getModelConfigAPI, func(*upgradeJujuCommand) (modelConfigAPI, error) { + return fakeAPI, nil + }) + s.PatchValue(&jujuversion.Current, version.MustParse("1.99.99")) + cmd := newUpgradeJujuCommand(nil) + _, err := coretesting.RunCommand(c, cmd) + c.Assert(err, jc.ErrorIsNil) + c.Assert(fakeAPI.tools, gc.Not(gc.HasLen), 0) + c.Assert(fakeAPI.tools[0].Version.Number, gc.Equals, version.MustParse("1.99.99.1")) +} + +func (s *UpgradeJujuSuite) TestUpgradeJujuWithImplicitUploadNewerClient(c *gc.C) { + s.Reset(c) + fakeAPI := &fakeUpgradeJujuAPINoState{ + name: "dummy-model", + uuid: "deadbeef-0bad-400d-8000-4b1d0d06f00d", + controllerUUID: "deadbeef-1bad-500d-9000-4b1d0d06f00d", + agentVersion: "1.99.99", + } + s.PatchValue(&getUpgradeJujuAPI, func(*upgradeJujuCommand) (upgradeJujuAPI, error) { + return fakeAPI, nil + }) + s.PatchValue(&getModelConfigAPI, func(*upgradeJujuCommand) (modelConfigAPI, error) { + return fakeAPI, nil + }) + s.PatchValue(&jujuversion.Current, version.MustParse("1.100.0")) + cmd := newUpgradeJujuCommand(nil) + _, err := coretesting.RunCommand(c, cmd) + c.Assert(err, jc.ErrorIsNil) + c.Assert(fakeAPI.tools, gc.Not(gc.HasLen), 0) + c.Assert(fakeAPI.tools[0].Version.Number, gc.Equals, version.MustParse("1.100.0.1")) + c.Assert(fakeAPI.modelAgentVersion, gc.Equals, fakeAPI.tools[0].Version.Number) +} + +func (s *UpgradeJujuSuite) TestUpgradeJujuWithImplicitUploadNonController(c *gc.C) { + s.Reset(c) + fakeAPI := &fakeUpgradeJujuAPINoState{ + name: "dummy-model", + uuid: "deadbeef-0000-400d-8000-4b1d0d06f00d", + controllerUUID: "deadbeef-1bad-500d-9000-4b1d0d06f00d", + agentVersion: "1.99.99.1", + } + s.PatchValue(&getUpgradeJujuAPI, func(*upgradeJujuCommand) (upgradeJujuAPI, error) { + return fakeAPI, nil + }) + s.PatchValue(&getModelConfigAPI, func(*upgradeJujuCommand) (modelConfigAPI, error) { + return fakeAPI, nil + }) + s.PatchValue(&jujuversion.Current, version.MustParse("1.99.99")) + cmd := newUpgradeJujuCommand(nil) + _, err := coretesting.RunCommand(c, cmd) + c.Assert(err, gc.ErrorMatches, "no more recent supported versions available") +} + func (s *UpgradeJujuSuite) TestBlockUpgradeJujuWithRealUpload(c *gc.C) { s.Reset(c) s.PatchValue(&jujuversion.Current, version.MustParse("1.99.99")) cmd := newUpgradeJujuCommand(map[int]version.Number{2: version.MustParse("1.99.99")}) // Block operation s.BlockAllChanges(c, "TestBlockUpgradeJujuWithRealUpload") - _, err := coretesting.RunCommand(c, cmd, "--upload-tools") - s.AssertBlocked(c, err, ".*TestBlockUpgradeJujuWithRealUpload.*") + _, err := coretesting.RunCommand(c, cmd, "--build-agent") + coretesting.AssertOperationWasBlocked(c, err, ".*TestBlockUpgradeJujuWithRealUpload.*") } func (s *UpgradeJujuSuite) TestFailUploadOnNonController(c *gc.C) { fakeAPI := &fakeUpgradeJujuAPINoState{ name: "dummy-model", uuid: "deadbeef-0000-400d-8000-4b1d0d06f00d", - controllerUUID: "deadbeef-0bad-400d-8000-4b1d0d06f00d", + controllerUUID: "deadbeef-1bad-500d-9000-4b1d0d06f00d", + agentVersion: "1.99.99", } s.PatchValue(&getUpgradeJujuAPI, func(*upgradeJujuCommand) (upgradeJujuAPI, error) { return fakeAPI, nil @@ -477,8 +543,8 @@ return fakeAPI, nil }) cmd := newUpgradeJujuCommand(nil) - _, err := coretesting.RunCommand(c, cmd, "--upload-tools", "-m", "dummy-model") - c.Assert(err, gc.ErrorMatches, "--upload-tools can only be used with the controller model") + _, err := coretesting.RunCommand(c, cmd, "--build-agent", "-m", "dummy-model") + c.Assert(err, gc.ErrorMatches, "--build-agent can only be used with the controller model") } type DryRunTest struct { @@ -494,16 +560,12 @@ tests := []DryRunTest{ { about: "dry run outputs and doesn't change anything when uploading tools", - cmdArgs: []string{"--upload-tools", "--dry-run"}, + cmdArgs: []string{"--build-agent", "--dry-run"}, tools: []string{"2.1.0-quantal-amd64", "2.1.2-quantal-i386", "2.1.3-quantal-amd64", "2.1-dev1-quantal-amd64", "2.2.3-quantal-amd64"}, currentVersion: "2.1.3-quantal-amd64", agentVersion: "2.0.0", - expectedCmdOutput: `available tools: - 2.1.3-quantal-amd64 -best version: - 2.1.3 -upgrade to this version by running - juju upgrade-juju --version="2.1.3" + expectedCmdOutput: `upgrade to this version by running + juju upgrade-juju --agent-version="2.1.3" `, }, { @@ -512,16 +574,8 @@ tools: []string{"2.1.0-quantal-amd64", "2.1.2-quantal-i386", "2.1.3-quantal-amd64", "2.1-dev1-quantal-amd64", "2.2.3-quantal-amd64"}, currentVersion: "2.0.0-quantal-amd64", agentVersion: "2.0.0", - expectedCmdOutput: `available tools: - 2.1-dev1-quantal-amd64 - 2.1.0-quantal-amd64 - 2.1.2-quantal-i386 - 2.1.3-quantal-amd64 - 2.2.3-quantal-amd64 -best version: - 2.1.3 -upgrade to this version by running - juju upgrade-juju --version="2.1.3" + expectedCmdOutput: `upgrade to this version by running + juju upgrade-juju --agent-version="2.1.3" `, }, { @@ -530,14 +584,8 @@ tools: []string{"2.1.0-quantal-amd64", "2.1.2-quantal-i386", "2.1.3-quantal-amd64", "1.2.3-myawesomeseries-amd64"}, currentVersion: "2.0.0-quantal-amd64", agentVersion: "2.0.0", - expectedCmdOutput: `available tools: - 2.1.0-quantal-amd64 - 2.1.2-quantal-i386 - 2.1.3-quantal-amd64 -best version: - 2.1.3 -upgrade to this version by running - juju upgrade-juju --version="2.1.3" + expectedCmdOutput: `upgrade to this version by running + juju upgrade-juju --agent-version="2.1.3" `, }, } @@ -597,11 +645,6 @@ } func (s *UpgradeJujuSuite) TestUpgradesDifferentMajor(c *gc.C) { - toolsList49Only := `available tools: - 4.9.0-trusty-amd64 -best version: - 4.9.0 -` tests := []struct { about string cmdArgs []string @@ -615,33 +658,27 @@ expectedErr string upgradeMap map[int]version.Number }{{ - about: "upgrade previous major to latest previous major", - tools: []string{"5.0.1-trusty-amd64", "4.9.0-trusty-amd64"}, - currentVersion: "5.0.0-trusty-amd64", - agentVersion: "4.8.5", - expectedVersion: "4.9.0", - expectedCmdOutput: toolsList49Only, - expectedLogOutput: `.*version 4.9.0 incompatible with this client \(5.0.0\).*started upgrade to 4.9.0.*`, + about: "upgrade previous major to latest previous major", + tools: []string{"5.0.1-trusty-amd64", "4.9.0-trusty-amd64"}, + currentVersion: "5.0.0-trusty-amd64", + agentVersion: "4.8.5", + expectedVersion: "4.9.0", }, { - about: "upgrade previous major to latest previous major --dry-run still warns", - tools: []string{"5.0.1-trusty-amd64", "4.9.0-trusty-amd64"}, - currentVersion: "5.0.1-trusty-amd64", - agentVersion: "4.8.5", - expectedVersion: "4.9.0", - expectedCmdOutput: toolsList49Only, - expectedLogOutput: `.*version 4.9.0 incompatible with this client \(5.0.1\).*started upgrade to 4.9.0.*`, + about: "upgrade previous major to latest previous major --dry-run still warns", + tools: []string{"5.0.1-trusty-amd64", "4.9.0-trusty-amd64"}, + currentVersion: "5.0.1-trusty-amd64", + agentVersion: "4.8.5", + expectedVersion: "4.9.0", }, { - about: "upgrade previous major to latest previous major with --version", - cmdArgs: []string{"--version=4.9.0"}, - tools: []string{"5.0.2-trusty-amd64", "4.9.0-trusty-amd64", "4.8.0-trusty-amd64"}, - currentVersion: "5.0.2-trusty-amd64", - agentVersion: "4.7.5", - expectedVersion: "4.9.0", - expectedCmdOutput: toolsList49Only, - expectedLogOutput: `.*version 4.9.0 incompatible with this client \(5.0.2\).*started upgrade to 4.9.0.*`, + about: "upgrade previous major to latest previous major with --agent-version", + cmdArgs: []string{"--agent-version=4.9.0"}, + tools: []string{"5.0.2-trusty-amd64", "4.9.0-trusty-amd64", "4.8.0-trusty-amd64"}, + currentVersion: "5.0.2-trusty-amd64", + agentVersion: "4.7.5", + expectedVersion: "4.9.0", }, { about: "can upgrade lower major version to current major version at minimum level", - cmdArgs: []string{"--version=6.0.5"}, + cmdArgs: []string{"--agent-version=6.0.5"}, tools: []string{"6.0.5-trusty-amd64", "5.9.9-trusty-amd64"}, currentVersion: "6.0.0-trusty-amd64", agentVersion: "5.9.8", @@ -650,7 +687,7 @@ upgradeMap: map[int]version.Number{6: version.MustParse("5.9.8")}, }, { about: "can upgrade lower major version to current major version above minimum level", - cmdArgs: []string{"--version=6.0.5"}, + cmdArgs: []string{"--agent-version=6.0.5"}, tools: []string{"6.0.5-trusty-amd64", "5.11.0-trusty-amd64"}, currentVersion: "6.0.1-trusty-amd64", agentVersion: "5.10.8", @@ -659,7 +696,7 @@ upgradeMap: map[int]version.Number{6: version.MustParse("5.9.8")}, }, { about: "can upgrade current to next major version", - cmdArgs: []string{"--version=6.0.5"}, + cmdArgs: []string{"--agent-version=6.0.5"}, tools: []string{"6.0.5-trusty-amd64", "5.11.0-trusty-amd64"}, currentVersion: "5.10.8-trusty-amd64", agentVersion: "5.10.8", @@ -667,7 +704,7 @@ upgradeMap: map[int]version.Number{6: version.MustParse("5.9.8")}, }, { about: "upgrade fails if not at minimum version", - cmdArgs: []string{"--version=7.0.1"}, + cmdArgs: []string{"--agent-version=7.0.1"}, tools: []string{"7.0.1-trusty-amd64"}, currentVersion: "7.0.1-trusty-amd64", agentVersion: "6.0.0", @@ -677,7 +714,7 @@ upgradeMap: map[int]version.Number{7: version.MustParse("6.7.8")}, }, { about: "upgrade fails if not a minor of 0", - cmdArgs: []string{"--version=7.1.1"}, + cmdArgs: []string{"--agent-version=7.1.1"}, tools: []string{"7.0.1-trusty-amd64", "7.1.1-trusty-amd64"}, currentVersion: "7.0.1-trusty-amd64", agentVersion: "6.7.8", @@ -687,7 +724,7 @@ upgradeMap: map[int]version.Number{7: version.MustParse("6.7.8")}, }, { about: "upgrade fails if not at minimum version and not a minor of 0", - cmdArgs: []string{"--version=7.1.1"}, + cmdArgs: []string{"--agent-version=7.1.1"}, tools: []string{"7.0.1-trusty-amd64", "7.1.1-trusty-amd64"}, currentVersion: "7.0.1-trusty-amd64", agentVersion: "6.0.0", @@ -783,7 +820,7 @@ // Block operation s.BlockAllChanges(c, "TestBlockUpgradeInProgress") err = modelcmd.Wrap(cmd).Run(coretesting.Context(c)) - s.AssertBlocked(c, err, ".*To unblock changes.*") + s.AssertBlocked(c, err, ".*To enable changes.*") } func (s *UpgradeJujuSuite) TestResetPreviousUpgrade(c *gc.C) { @@ -884,6 +921,19 @@ s.PatchValue(&getModelConfigAPI, func(*upgradeJujuCommand) (modelConfigAPI, error) { return a, nil }) + s.PatchValue(&getControllerAPI, func(*upgradeJujuCommand) (controllerAPI, error) { + return a, nil + }) +} + +func (a *fakeUpgradeJujuAPI) ModelConfig() (map[string]interface{}, error) { + controllerModel, err := a.st.ControllerModel() + if err != nil { + return make(map[string]interface{}), err + } + return map[string]interface{}{ + "uuid": controllerModel.UUID(), + }, nil } func (a *fakeUpgradeJujuAPI) addTools(tools ...string) { @@ -933,19 +983,48 @@ // Mock an API with no state type fakeUpgradeJujuAPINoState struct { upgradeJujuAPI - name string - uuid string - controllerUUID string + name string + uuid string + controllerUUID string + agentVersion string + tools coretools.List + modelAgentVersion version.Number } func (a *fakeUpgradeJujuAPINoState) Close() error { return nil } +func (a *fakeUpgradeJujuAPINoState) FindTools(majorVersion, minorVersion int, series, arch string) (params.FindToolsResult, error) { + var result params.FindToolsResult + if len(a.tools) == 0 { + result.Error = common.ServerError(errors.NotFoundf("tools")) + } else { + result.List = a.tools + } + return result, nil +} + +func (a *fakeUpgradeJujuAPINoState) UploadTools(r io.ReadSeeker, vers version.Binary, additionalSeries ...string) (coretools.List, error) { + a.tools = coretools.List{&coretools.Tools{Version: vers}} + for _, s := range additionalSeries { + v := vers + v.Series = s + a.tools = append(a.tools, &coretools.Tools{Version: v}) + } + return a.tools, nil +} + +func (a *fakeUpgradeJujuAPINoState) SetModelAgentVersion(version version.Number) error { + a.modelAgentVersion = version + return nil +} + func (a *fakeUpgradeJujuAPINoState) ModelGet() (map[string]interface{}, error) { return dummy.SampleConfig().Merge(map[string]interface{}{ "name": a.name, "uuid": a.uuid, "controller-uuid": a.controllerUUID, + "agent-version": a.agentVersion, }), nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/cloudcredential.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/cloudcredential.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/cloudcredential.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/cloudcredential.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,27 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common + +import ( + "fmt" + "strings" + + "github.com/juju/errors" + + "gopkg.in/juju/names.v2" +) + +// ResolveCloudCredentialTag takes a string which is of either the format +// "" or "/". If the string does not include +// a user, then the supplied user tag is implied. +func ResolveCloudCredentialTag(user names.UserTag, cloud names.CloudTag, credentialName string) (names.CloudCredentialTag, error) { + if i := strings.IndexRune(credentialName, '/'); i == -1 { + credentialName = fmt.Sprintf("%s/%s", user.Id(), credentialName) + } + s := fmt.Sprintf("%s/%s", cloud.Id(), credentialName) + if !names.IsValidCloudCredential(s) { + return names.CloudCredentialTag{}, errors.NotValidf("cloud credential name %q", s) + } + return names.NewCloudCredentialTag(s), nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/cloudcredential_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/cloudcredential_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/cloudcredential_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/cloudcredential_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,49 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common_test + +import ( + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/cmd/juju/common" +) + +var _ = gc.Suite(&cloudCredentialSuite{}) + +type cloudCredentialSuite struct { + testing.IsolationSuite +} + +func (*cloudCredentialSuite) TestResolveCloudCredentialTag(c *gc.C) { + testResolveCloudCredentialTag(c, + names.NewUserTag("admin@local"), + names.NewCloudTag("aws"), + "foo", + "aws/admin/foo", + ) +} + +func (*cloudCredentialSuite) TestResolveCloudCredentialTagOtherUser(c *gc.C) { + testResolveCloudCredentialTag(c, + names.NewUserTag("admin@local"), + names.NewCloudTag("aws"), + "brenda/foo", + "aws/brenda/foo", + ) +} + +func testResolveCloudCredentialTag( + c *gc.C, + user names.UserTag, + cloud names.CloudTag, + credentialName string, + expect string, +) { + tag, err := common.ResolveCloudCredentialTag(user, cloud, credentialName) + c.Assert(err, jc.ErrorIsNil) + c.Assert(tag.Id(), gc.Equals, expect) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/cmdblockhelper.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/cmdblockhelper.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/cmdblockhelper.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/cmdblockhelper.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,63 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common - -import ( - "strings" - - "github.com/juju/cmd" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/api" - "github.com/juju/juju/api/block" - cmdblock "github.com/juju/juju/cmd/juju/block" -) - -// CmdBlockHelper is a helper struct used to block commands. -type CmdBlockHelper struct { - blockClient *block.Client -} - -// NewCmdBlockHelper creates a block switch used in testing -// to manage desired juju blocks. -func NewCmdBlockHelper(st api.Connection) CmdBlockHelper { - return CmdBlockHelper{ - blockClient: block.NewClient(st), - } -} - -// on switches on desired block and -// asserts that no errors were encountered. -func (s *CmdBlockHelper) on(c *gc.C, blockType, msg string) { - c.Assert(s.blockClient.SwitchBlockOn(cmdblock.TypeFromOperation(blockType), msg), gc.IsNil) -} - -// BlockAllChanges switches changes block on. -// This prevents all changes to juju environment. -func (s *CmdBlockHelper) BlockAllChanges(c *gc.C, msg string) { - s.on(c, "all-changes", msg) -} - -// BlockRemoveObject switches remove block on. -// This prevents any object/entity removal on juju environment -func (s *CmdBlockHelper) BlockRemoveObject(c *gc.C, msg string) { - s.on(c, "remove-object", msg) -} - -// BlockDestroyModel switches destroy block on. -// This prevents juju environment destruction. -func (s *CmdBlockHelper) BlockDestroyModel(c *gc.C, msg string) { - s.on(c, "destroy-model", msg) -} - -func (s *CmdBlockHelper) Close() { - s.blockClient.Close() -} - -func (s *CmdBlockHelper) AssertBlocked(c *gc.C, err error, msg string) { - c.Assert(err, gc.ErrorMatches, cmd.ErrSilent.Error()) - // msg is logged - stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Check(stripped, gc.Matches, msg) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/controller.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/controller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/controller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/controller.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package common import ( + "fmt" "io" "strings" "time" @@ -12,15 +13,16 @@ "github.com/juju/errors" "github.com/juju/utils" - apiblock "github.com/juju/juju/api/block" + "github.com/juju/juju/api/block" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/juju/block" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/environs" + "github.com/juju/juju/environs/bootstrap" "github.com/juju/juju/instance" "github.com/juju/juju/juju" "github.com/juju/juju/jujuclient" "github.com/juju/juju/network" + "github.com/juju/version" ) var allInstances = func(environ environs.Environ) ([]instance.Instance, error) { @@ -28,10 +30,14 @@ } // SetBootstrapEndpointAddress writes the API endpoint address of the -// bootstrap server into the connection information. This should only be run -// once directly after Bootstrap. It assumes that there is just one instance -// in the environment - the bootstrap instance. -func SetBootstrapEndpointAddress(store jujuclient.ControllerStore, controllerName string, apiPort int, environ environs.Environ) error { +// bootstrap server, plus the agent version, into the connection information. +// This should only be run once directly after Bootstrap. It assumes that +// there is just one instance in the environment - the bootstrap instance. +func SetBootstrapEndpointAddress( + store jujuclient.ControllerStore, + controllerName string, agentVersion version.Number, + apiPort int, environ environs.Environ, +) error { instances, err := allInstances(environ) if err != nil { return errors.Trace(err) @@ -52,7 +58,16 @@ return errors.Annotate(err, "failed to get bootstrap instance addresses") } apiHostPorts := network.AddressesWithPort(netAddrs, apiPort) - return juju.UpdateControllerAddresses(store, controllerName, nil, apiHostPorts...) + // At bootstrap we have 2 models, the controller model and the default. + two := 2 + params := juju.UpdateControllerParams{ + AgentVersion: agentVersion.String(), + AddrConnectedTo: apiHostPorts, + MachineCount: &length, + ControllerMachineCount: &length, + ModelCount: &two, + } + return juju.UpdateControllerDetailsFromLogin(store, controllerName, params) } var ( @@ -61,13 +76,18 @@ blockAPI = getBlockAPI ) +type listBlocksAPI interface { + List() ([]params.Block, error) + Close() error +} + // getBlockAPI returns a block api for listing blocks. -func getBlockAPI(c *modelcmd.ModelCommandBase) (block.BlockListAPI, error) { +func getBlockAPI(c *modelcmd.ModelCommandBase) (listBlocksAPI, error) { root, err := c.NewAPIRoot() if err != nil { return nil, errors.Trace(err) } - return apiblock.NewClient(root), nil + return block.NewClient(root), nil } // tryAPI attempts to open the API and makes a trivial call @@ -87,7 +107,7 @@ // WaitForAgentInitialisation polls the bootstrapped controller with a read-only // command which will fail until the controller is fully initialised. // TODO(wallyworld) - add a bespoke command to maybe the admin facade for this purpose. -func WaitForAgentInitialisation(ctx *cmd.Context, c *modelcmd.ModelCommandBase, controllerName string) error { +func WaitForAgentInitialisation(ctx *cmd.Context, c *modelcmd.ModelCommandBase, controllerName, hostedModelName string) error { // TODO(katco): 2016-08-09: lp:1611427 attempts := utils.AttemptStrategy{ Min: bootstrapReadyPollCount, @@ -98,11 +118,24 @@ err error ) + // Make a best effort to find the new controller address so we can print it. + addressInfo := "" + controller, err := c.ClientStore().ControllerByName(controllerName) + if err == nil && len(controller.APIEndpoints) > 0 { + addr, err := network.ParseHostPort(controller.APIEndpoints[0]) + if err == nil { + addressInfo = fmt.Sprintf(" at %s", addr.Address.Value) + } + } + + ctx.Infof("Contacting Juju controller%s to verify accessibility...", addressInfo) apiAttempts = 1 for attempt := attempts.Start(); attempt.Next(); apiAttempts++ { err = tryAPI(c) if err == nil { - ctx.Infof("Bootstrap complete, %s now available.", controllerName) + ctx.Infof("Bootstrap complete, %q controller now available.", controllerName) + ctx.Infof("Controller machines are in the %q model.", bootstrap.ControllerModelName) + ctx.Infof("Initial model %q added.", hostedModelName) break } // As the API server is coming up, it goes through a number of steps. @@ -120,10 +153,10 @@ strings.HasSuffix(errorMessage, "connection is shut down"), strings.HasSuffix(errorMessage, "no api connection available"), strings.Contains(errorMessage, "spaces are still being discovered"): - ctx.Infof("Waiting for API to become available") + ctx.Verbosef("Still waiting for API to become available") continue case params.ErrCode(err) == params.CodeUpgradeInProgress: - ctx.Infof("Waiting for API to become available: %v", err) + ctx.Verbosef("Still waiting for API to become available: %v", err) continue } break diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/controller_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/controller_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/controller_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/controller_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,9 +12,9 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/juju/block" "github.com/juju/juju/cmd/modelcmd" cmdtesting "github.com/juju/juju/cmd/testing" + "github.com/juju/juju/jujuclient/jujuclienttesting" "github.com/juju/juju/rpc" "github.com/juju/juju/testing" "github.com/juju/juju/version" @@ -29,7 +29,7 @@ func (s *controllerSuite) SetUpTest(c *gc.C) { s.mockBlockClient = &mockBlockClient{} - s.PatchValue(&blockAPI, func(*modelcmd.ModelCommandBase) (block.BlockListAPI, error) { + s.PatchValue(&blockAPI, func(*modelcmd.ModelCommandBase) (listBlocksAPI, error) { err := s.mockBlockClient.loginError if err != nil { s.mockBlockClient.loginError = nil @@ -93,7 +93,9 @@ } { s.mockBlockClient.numRetries = t.numRetries s.mockBlockClient.retryCount = 0 - err := WaitForAgentInitialisation(cmdtesting.NullContext(c), nil, "controller") + cmd := &modelcmd.ModelCommandBase{} + cmd.SetClientStore(jujuclienttesting.NewMemStore()) + err := WaitForAgentInitialisation(cmdtesting.NullContext(c), cmd, "controller", "default") c.Check(errors.Cause(err), gc.DeepEquals, t.err) expectedRetries := t.numRetries if t.numRetries <= 0 { @@ -109,7 +111,9 @@ func (s *controllerSuite) TestWaitForAgentAPIReadyWaitsForSpaceDiscovery(c *gc.C) { s.mockBlockClient.discoveringSpacesError = 2 - err := WaitForAgentInitialisation(cmdtesting.NullContext(c), nil, "controller") + cmd := &modelcmd.ModelCommandBase{} + cmd.SetClientStore(jujuclienttesting.NewMemStore()) + err := WaitForAgentInitialisation(cmdtesting.NullContext(c), cmd, "controller", "default") c.Assert(err, jc.ErrorIsNil) c.Assert(s.mockBlockClient.discoveringSpacesError, gc.Equals, 0) } @@ -118,7 +122,9 @@ s.mockBlockClient.numRetries = 0 s.mockBlockClient.retryCount = 0 s.mockBlockClient.loginError = io.EOF - err := WaitForAgentInitialisation(cmdtesting.NullContext(c), nil, "controller") + cmd := &modelcmd.ModelCommandBase{} + cmd.SetClientStore(jujuclienttesting.NewMemStore()) + err := WaitForAgentInitialisation(cmdtesting.NullContext(c), cmd, "controller", "default") c.Check(err, jc.ErrorIsNil) c.Check(s.mockBlockClient.retryCount, gc.Equals, 1) @@ -128,7 +134,9 @@ s.mockBlockClient.numRetries = 0 s.mockBlockClient.retryCount = 0 s.mockBlockClient.loginError = errors.NewUnauthorized(nil, "") - err := WaitForAgentInitialisation(cmdtesting.NullContext(c), nil, "controller") + cmd := &modelcmd.ModelCommandBase{} + cmd.SetClientStore(jujuclienttesting.NewMemStore()) + err := WaitForAgentInitialisation(cmdtesting.NullContext(c), cmd, "controller", "default") c.Check(err, jc.Satisfies, errors.IsUnauthorized) c.Check(s.mockBlockClient.retryCount, gc.Equals, 0) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/errors.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/errors.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/errors.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/errors.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,21 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common + +import ( + "fmt" + "io" +) + +func PermissionsMessage(writer io.Writer, command string) { + const ( + perm = "You do not have permission to %s." + grant = `You may ask an administrator to grant you access with "juju grant".` + ) + + if command == "" { + command = "complete this operation" + } + fmt.Fprintf(writer, "\n%s\n%s\n\n", fmt.Sprintf(perm, command), grant) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/flags.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/flags.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/flags.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/flags.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,6 +10,7 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/juju/constraints" "github.com/juju/utils" "gopkg.in/yaml.v2" ) @@ -76,3 +77,29 @@ } return strings.Join(strs, " ") } + +// WarnConstraintAliases shows a warning to the user that they have used an +// alias for a constraint that might go away sometime. +func WarnConstraintAliases(ctx *cmd.Context, aliases map[string]string) { + for alias, canonical := range aliases { + ctx.Infof("Warning: constraint %q is deprecated in favor of %q.\n", alias, canonical) + } +} + +// ParseConstraints parses the given constraints and uses WarnConstraintAliases +// if any aliases were used. +func ParseConstraints(ctx *cmd.Context, cons string) (constraints.Value, error) { + if cons == "" { + return constraints.Value{}, nil + } + constraint, aliases, err := constraints.ParseWithAliases(cons) + // we always do these, even on errors, so that the error messages have + // context. + for alias, canonical := range aliases { + ctx.Infof("Warning: constraint %q is deprecated in favor of %q.\n", alias, canonical) + } + if err != nil { + return constraints.Value{}, err + } + return constraint, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/model.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/model.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/common/model.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/common/model.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package common import ( + "fmt" "time" "github.com/juju/errors" @@ -14,16 +15,25 @@ // ModelInfo contains information about a model. type ModelInfo struct { - Name string `json:"name" yaml:"name"` - UUID string `json:"model-uuid" yaml:"model-uuid"` - ControllerUUID string `json:"controller-uuid" yaml:"controller-uuid"` - Owner string `json:"owner" yaml:"owner"` - Cloud string `json:"cloud" yaml:"cloud"` - CloudRegion string `json:"region,omitempty" yaml:"region,omitempty"` - ProviderType string `json:"type" yaml:"type"` - Life string `json:"life" yaml:"life"` - Status ModelStatus `json:"status" yaml:"status"` - Users map[string]ModelUserInfo `json:"users" yaml:"users"` + Name string `json:"name" yaml:"name"` + UUID string `json:"model-uuid" yaml:"model-uuid"` + ControllerUUID string `json:"controller-uuid" yaml:"controller-uuid"` + ControllerName string `json:"controller-name" yaml:"controller-name"` + Owner string `json:"owner" yaml:"owner"` + Cloud string `json:"cloud" yaml:"cloud"` + CloudRegion string `json:"region,omitempty" yaml:"region,omitempty"` + ProviderType string `json:"type" yaml:"type"` + Life string `json:"life" yaml:"life"` + Status ModelStatus `json:"status" yaml:"status"` + Users map[string]ModelUserInfo `json:"users" yaml:"users"` + Machines map[string]ModelMachineInfo `json:"machines,omitempty" yaml:"machines,omitempty"` +} + +// ModelMachineInfo contains information about a machine in a model. +// We currently only care about showing core count, but might +// in the future care about memory, disks, containers etc. +type ModelMachineInfo struct { + Cores uint64 `json:"cores" yaml:"cores"` } // ModelStatus contains the current status of a model. @@ -54,6 +64,10 @@ if info.Status.Since != nil { status.Since = UserFriendlyDuration(*info.Status.Since, now) } + cloudTag, err := names.ParseCloudTag(info.CloudTag) + if err != nil { + return ModelInfo{}, errors.Trace(err) + } return ModelInfo{ Name: info.Name, UUID: info.UUID, @@ -61,14 +75,29 @@ Owner: tag.Id(), Life: string(info.Life), Status: status, - Cloud: info.Cloud, + Cloud: cloudTag.Id(), CloudRegion: info.CloudRegion, ProviderType: info.ProviderType, Users: ModelUserInfoFromParams(info.Users, now), + Machines: ModelMachineInfoFromParams(info.Machines), }, nil } -// ModelUserInfoFromParams translates []params.ModelInfo to a map of +// ModelMachineInfoFromParams translates []params.ModelMachineInfo to a map of +// machine ids to ModelMachineInfo. +func ModelMachineInfoFromParams(machines []params.ModelMachineInfo) map[string]ModelMachineInfo { + output := make(map[string]ModelMachineInfo, len(machines)) + for _, info := range machines { + mInfo := ModelMachineInfo{} + if info.Hardware != nil && info.Hardware.Cores != nil { + mInfo.Cores = *info.Hardware.Cores + } + output[info.Id] = mInfo + } + return output +} + +// ModelUserInfoFromParams translates []params.ModelUserInfo to a map of // user names to ModelUserInfo. func ModelUserInfoFromParams(users []params.ModelUserInfo, now time.Time) map[string]ModelUserInfo { output := make(map[string]ModelUserInfo) @@ -82,7 +111,18 @@ } else { outInfo.LastConnection = "never connected" } - output[names.NewUserTag(info.UserName).Canonical()] = outInfo + output[names.NewUserTag(info.UserName).Id()] = outInfo } return output } + +// OwnerQualifiedModelName returns the model name qualified with the +// model owner if the owner is not the same as the given canonical +// user name. If the owner is a local user, we omit the domain. +func OwnerQualifiedModelName(modelName string, owner, user names.UserTag) string { + if owner.Id() == user.Id() { + return modelName + } + ownerName := owner.Id() + return fmt.Sprintf("%s/%s", ownerName, modelName) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/addmodel.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/addmodel.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/addmodel.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/addmodel.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,22 +4,25 @@ package controller import ( + "bytes" "fmt" + "sort" "strings" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api" "github.com/juju/juju/api/base" cloudapi "github.com/juju/juju/api/cloud" "github.com/juju/juju/api/modelmanager" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cloud" + jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" "github.com/juju/juju/environs/config" "github.com/juju/juju/jujuclient" ) @@ -51,37 +54,52 @@ } const addModelHelpDoc = ` -Adding a model is typically done in order to run a specific workload. The -model is of the same cloud type as the controller and is managed by that -controller. By default, the controller is the current controller. The -credentials used to add the model are the ones used to create any future -resources within the model (` + "`juju deploy`, `juju add-unit`" + `). +Adding a model is typically done in order to run a specific workload. +To add a model, you must at a minimum specify a model name. You may +also supply model-specific configuration, a credential, and which +cloud/region to deploy the model to. The cloud/region and credentials +are the ones used to create any future resources within the model. Model names can be duplicated across controllers but must be unique for any given controller. Model names may only contain lowercase letters, digits and hyphens, and may not start with a hyphen. +Credential names are specified either in the form "credential-name", or +"credential-owner/credential-name". There is currently no way to acquire +access to another user's credentials, so the only valid value for +credential-owner is your own user name. This may change in a future +release. + +If no cloud/region is specified, then the model will be deployed to +the same cloud/region as the controller model. If a region is specified +without a cloud qualifier, then it is assumed to be in the same cloud +as the controller model. It is not currently possible for a controller +to manage multiple clouds, so the only valid cloud is the same cloud +as the controller model is deployed to. This may change in a future +release. + Examples: juju add-model mymodel + juju add-model mymodel us-east-1 + juju add-model mymodel aws/us-east-1 juju add-model mymodel --config my-config.yaml --config image-stream=daily juju add-model mymodel --credential credential_name --config authorized-keys="ssh-rsa ..." - juju add-model mymodel --region us-east-1 ` func (c *addModelCommand) Info() *cmd.Info { return &cmd.Info{ Name: "add-model", - Args: "", + Args: " [cloud|region|(cloud/region)]", Purpose: "Adds a hosted model.", Doc: strings.TrimSpace(addModelHelpDoc), } } func (c *addModelCommand) SetFlags(f *gnuflag.FlagSet) { + c.ControllerCommandBase.SetFlags(f) f.StringVar(&c.Owner, "owner", "", "The owner of the new model if not the current user") f.StringVar(&c.CredentialName, "credential", "", "Credential used to add the model") - f.StringVar(&c.CloudRegion, "region", "", "Cloud region to add the model to") f.Var(&c.Config, "config", "Path to YAML model configuration file or individual options (--config config.yaml [--config key=value ...])") } @@ -91,6 +109,10 @@ } c.Name, args = args[0], args[1:] + if len(args) > 0 { + c.CloudRegion, args = args[0], args[1:] + } + if !names.IsValidModelName(c.Name) { return errors.Errorf("%q is not a valid name: model names may only contain lowercase letters, digits and hyphens", c.Name) } @@ -103,17 +125,22 @@ } type AddModelAPI interface { - CreateModel(name, owner, cloudRegion, cloudCredential string, config map[string]interface{}) (params.ModelInfo, error) + CreateModel( + name, owner, cloudName, cloudRegion string, + cloudCredential names.CloudCredentialTag, + config map[string]interface{}, + ) (base.ModelInfo, error) } type CloudAPI interface { - Cloud(names.CloudTag) (cloud.Cloud, error) - CloudDefaults(names.UserTag) (cloud.Defaults, error) - Credentials(names.UserTag, names.CloudTag) (map[string]cloud.Credential, error) - UpdateCredentials(names.UserTag, names.CloudTag, map[string]cloud.Credential) error + DefaultCloud() (names.CloudTag, error) + Clouds() (map[names.CloudTag]jujucloud.Cloud, error) + Cloud(names.CloudTag) (jujucloud.Cloud, error) + UserCredentials(names.UserTag, names.CloudTag) ([]names.CloudCredentialTag, error) + UpdateCredential(names.CloudCredentialTag, jujucloud.Credential) error } -func (c *addModelCommand) newApiRoot() (api.Connection, error) { +func (c *addModelCommand) newAPIRoot() (api.Connection, error) { if c.apiRoot != nil { return c.apiRoot, nil } @@ -121,7 +148,7 @@ } func (c *addModelCommand) Run(ctx *cmd.Context) error { - api, err := c.newApiRoot() + api, err := c.newAPIRoot() if err != nil { return errors.Annotate(err, "opening API connection") } @@ -129,10 +156,6 @@ store := c.ClientStore() controllerName := c.ControllerName() - controllerDetails, err := store.ControllerByName(controllerName) - if err != nil { - return errors.Trace(err) - } accountDetails, err := store.AccountDetails(controllerName) if err != nil { return errors.Trace(err) @@ -143,7 +166,7 @@ if !names.IsValidUser(c.Owner) { return errors.Errorf("%q is not a valid user name", c.Owner) } - modelOwner = names.NewUserTag(c.Owner).Canonical() + modelOwner = names.NewUserTag(c.Owner).Id() } forUserSuffix := fmt.Sprintf(" for user '%s'", names.NewUserTag(modelOwner).Name()) @@ -152,47 +175,39 @@ return errors.Trace(err) } - // If the user has specified a credential, then we will upload it if - // it doesn't already exist in the controller, and it exists locally. - if c.CredentialName != "" { - cloudClient := c.newCloudAPI(api) - modelOwnerTag := names.NewUserTag(modelOwner) - - defaults, err := cloudClient.CloudDefaults(modelOwnerTag) - if err != nil { - return errors.Trace(err) - } - cloudTag := names.NewCloudTag(defaults.Cloud) - credentials, err := cloudClient.Credentials(modelOwnerTag, cloudTag) + cloudClient := c.newCloudAPI(api) + var cloudTag names.CloudTag + var cloud jujucloud.Cloud + var cloudRegion string + if c.CloudRegion != "" { + cloudTag, cloud, cloudRegion, err = c.getCloudRegion(cloudClient) if err != nil { return errors.Trace(err) } + } - if _, ok := credentials[c.CredentialName]; !ok { - cloudDetails, err := cloudClient.Cloud(cloudTag) - if err != nil { - return errors.Trace(err) - } - credential, _, _, err := modelcmd.GetCredentials( - store, c.CloudRegion, c.CredentialName, - cloudTag.Id(), cloudDetails.Type, - ) - if err != nil { - return errors.Trace(err) - } - ctx.Infof("uploading credential '%s' to controller%s", c.CredentialName, forUserSuffix) - credentials = map[string]cloud.Credential{c.CredentialName: *credential} - if err := cloudClient.UpdateCredentials(modelOwnerTag, cloudTag, credentials); err != nil { + // If the user has specified a credential, then we will upload it if + // it doesn't already exist in the controller, and it exists locally. + var credentialTag names.CloudCredentialTag + if c.CredentialName != "" { + var err error + if c.CloudRegion == "" { + if cloudTag, cloud, err = defaultCloud(cloudClient); err != nil { return errors.Trace(err) } - } else { - ctx.Infof("using credential '%s' cached in controller", c.CredentialName) + } + credentialTag, err = c.maybeUploadCredential(ctx, cloudClient, cloudTag, cloudRegion, cloud, modelOwner) + if err != nil { + return errors.Trace(err) } } addModelClient := c.newAddModelAPI(api) - model, err := addModelClient.CreateModel(c.Name, modelOwner, c.CloudRegion, c.CredentialName, attrs) + model, err := addModelClient.CreateModel(c.Name, modelOwner, cloudTag.Id(), cloudRegion, credentialTag, attrs) if err != nil { + if params.IsCodeUnauthorized(err) { + common.PermissionsMessage(ctx.Stderr, "add a model") + } return errors.Trace(err) } @@ -211,18 +226,30 @@ } } - if model.CloudRegion != "" { - messageFormat += " on %s/%s" - messageArgs = append(messageArgs, controllerDetails.Cloud, model.CloudRegion) + if c.CloudRegion != "" || model.CloudRegion != "" { + // The user explicitly requested a cloud/region, + // or the cloud supports multiple regions. Whichever + // the case, tell the user which cloud/region the + // model was deployed to. + cloudRegion := model.Cloud + if model.CloudRegion != "" { + cloudRegion += "/" + model.CloudRegion + } + messageFormat += " on %s" + messageArgs = append(messageArgs, cloudRegion) } if model.CloudCredential != "" { + tag := names.NewCloudCredentialTag(model.CloudCredential) + credentialName := tag.Name() + if tag.Owner().Id() != modelOwner { + credentialName = fmt.Sprintf("%s/%s", tag.Owner().Id(), credentialName) + } messageFormat += " with credential '%s'" - messageArgs = append(messageArgs, model.CloudCredential) + messageArgs = append(messageArgs, credentialName) } messageFormat += forUserSuffix - // lp#1594335 // "Added '' model [on /] [with credential ''] for user ''" ctx.Infof(messageFormat, messageArgs...) @@ -238,6 +265,188 @@ return nil } +func (c *addModelCommand) getCloudRegion(cloudClient CloudAPI) (cloudTag names.CloudTag, cloud jujucloud.Cloud, cloudRegion string, err error) { + var cloudName string + sep := strings.IndexRune(c.CloudRegion, '/') + if sep >= 0 { + // User specified "cloud/region". + cloudName, cloudRegion = c.CloudRegion[:sep], c.CloudRegion[sep+1:] + if !names.IsValidCloud(cloudName) { + return names.CloudTag{}, jujucloud.Cloud{}, "", errors.NotValidf("cloud name %q", cloudName) + } + cloudTag = names.NewCloudTag(cloudName) + if cloud, err = cloudClient.Cloud(cloudTag); err != nil { + return names.CloudTag{}, jujucloud.Cloud{}, "", errors.Trace(err) + } + } else { + // User specified "cloud" or "region". We'll try first + // for cloud (check if it's a valid cloud name, and + // whether there is a cloud by that name), and then + // as a region within the default cloud. + if names.IsValidCloud(c.CloudRegion) { + cloudName = c.CloudRegion + } else { + cloudRegion = c.CloudRegion + } + if cloudName != "" { + cloudTag = names.NewCloudTag(cloudName) + cloud, err = cloudClient.Cloud(cloudTag) + if params.IsCodeNotFound(err) { + // No such cloud with the specified name, + // so we'll try the name as a region in + // the default cloud. + cloudRegion, cloudName = cloudName, "" + } else if err != nil { + return names.CloudTag{}, jujucloud.Cloud{}, "", errors.Trace(err) + } + } + if cloudName == "" { + cloudTag, cloud, err = defaultCloud(cloudClient) + if err != nil && !errors.IsNotFound(err) { + return names.CloudTag{}, jujucloud.Cloud{}, "", errors.Trace(err) + } + } + } + if cloudRegion != "" { + // A region has been specified, make sure it exists. + if _, err := jujucloud.RegionByName(cloud.Regions, cloudRegion); err != nil { + if cloudRegion == c.CloudRegion { + // The string is not in the format cloud/region, + // so we should tell that the user that it is + // neither a cloud nor a region in the default + // cloud (or that there is no default cloud). + err := c.unsupportedCloudOrRegionError(cloudClient, cloudTag) + return names.CloudTag{}, jujucloud.Cloud{}, "", errors.Trace(err) + } + return names.CloudTag{}, jujucloud.Cloud{}, "", errors.Trace(err) + } + } else if len(cloud.Regions) > 0 { + // The first region in the list is the default. + cloudRegion = cloud.Regions[0].Name + } + return cloudTag, cloud, cloudRegion, nil +} + +func (c *addModelCommand) unsupportedCloudOrRegionError(cloudClient CloudAPI, defaultCloudTag names.CloudTag) (err error) { + clouds, err := cloudClient.Clouds() + if err != nil { + return errors.Annotate(err, "querying supported clouds") + } + cloudNames := make([]string, 0, len(clouds)) + for tag := range clouds { + cloudNames = append(cloudNames, tag.Id()) + } + sort.Strings(cloudNames) + + var buf bytes.Buffer + tw := output.TabWriter(&buf) + fmt.Fprintln(tw, "Cloud\tRegions") + for _, cloudName := range cloudNames { + cloud := clouds[names.NewCloudTag(cloudName)] + regionNames := make([]string, len(cloud.Regions)) + for i, region := range cloud.Regions { + regionNames[i] = region.Name + } + fmt.Fprintf(tw, "%s\t%s\n", cloudName, strings.Join(regionNames, ", ")) + } + tw.Flush() + + var prefix string + if defaultCloudTag != (names.CloudTag{}) { + prefix = fmt.Sprintf(` +%q is neither a cloud supported by this controller, +nor a region in the controller's default cloud %q. +The clouds/regions supported by this controller are:`[1:], + c.CloudRegion, defaultCloudTag.Id()) + } else { + prefix = fmt.Sprintf(` +%q is not a cloud supported by this controller, +and there is no default cloud. The clouds/regions supported +by this controller are:`[1:], c.CloudRegion) + } + return errors.Errorf("%s\n\n%s", prefix, buf.String()) +} + +func defaultCloud(cloudClient CloudAPI) (names.CloudTag, jujucloud.Cloud, error) { + cloudTag, err := cloudClient.DefaultCloud() + if err != nil { + if params.IsCodeNotFound(err) { + return names.CloudTag{}, jujucloud.Cloud{}, errors.NewNotFound(nil, ` +there is no default cloud defined, please specify one using: + + juju add-model [flags] cloud[/region]`[1:]) + } + return names.CloudTag{}, jujucloud.Cloud{}, errors.Trace(err) + } + cloud, err := cloudClient.Cloud(cloudTag) + if err != nil { + return names.CloudTag{}, jujucloud.Cloud{}, errors.Trace(err) + } + return cloudTag, cloud, nil +} + +func (c *addModelCommand) maybeUploadCredential( + ctx *cmd.Context, + cloudClient CloudAPI, + cloudTag names.CloudTag, + cloudRegion string, + cloud jujucloud.Cloud, + modelOwner string, +) (names.CloudCredentialTag, error) { + + modelOwnerTag := names.NewUserTag(modelOwner) + credentialTag, err := common.ResolveCloudCredentialTag( + modelOwnerTag, cloudTag, c.CredentialName, + ) + if err != nil { + return names.CloudCredentialTag{}, errors.Trace(err) + } + + // Check if the credential is already in the controller. + // + // TODO(axw) consider implementing a call that can check + // that the credential exists without fetching all of the + // names. + credentialTags, err := cloudClient.UserCredentials(modelOwnerTag, cloudTag) + if err != nil { + return names.CloudCredentialTag{}, errors.Trace(err) + } + credentialId := credentialTag.Id() + for _, tag := range credentialTags { + if tag.Id() != credentialId { + continue + } + ctx.Infof("Using credential '%s' cached in controller", c.CredentialName) + return credentialTag, nil + } + + if credentialTag.Owner().Id() != modelOwner { + // Another user's credential was specified, so + // we cannot automatically upload. + return names.CloudCredentialTag{}, errors.NotFoundf( + "credential '%s'", c.CredentialName, + ) + } + + // Upload the credential from the client, if it exists locally. + credential, _, _, err := modelcmd.GetCredentials( + ctx, c.ClientStore(), modelcmd.GetCredentialsParams{ + Cloud: cloud, + CloudName: cloudTag.Id(), + CloudRegion: cloudRegion, + CredentialName: credentialTag.Name(), + }, + ) + if err != nil { + return names.CloudCredentialTag{}, errors.Trace(err) + } + ctx.Infof("Uploading credential '%s' to controller", credentialTag.Id()) + if err := cloudClient.UpdateCredential(credentialTag, *credential); err != nil { + return names.CloudCredentialTag{}, errors.Trace(err) + } + return credentialTag, nil +} + func (c *addModelCommand) getConfigValues(ctx *cmd.Context) (map[string]interface{}, error) { configValues, err := c.Config.ReadAttrs(ctx) if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/addmodel_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/addmodel_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/addmodel_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/addmodel_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,15 +6,19 @@ import ( "fmt" "io/ioutil" + "strings" "github.com/juju/cmd" "github.com/juju/errors" + gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" "gopkg.in/yaml.v2" "github.com/juju/juju/api" + "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cloud" "github.com/juju/juju/cmd/juju/controller" @@ -22,28 +26,27 @@ "github.com/juju/juju/jujuclient/jujuclienttesting" _ "github.com/juju/juju/provider/ec2" "github.com/juju/juju/testing" - "gopkg.in/juju/names.v2" ) -type addSuite struct { +type AddModelSuite struct { testing.FakeJujuXDGDataHomeSuite fakeAddModelAPI *fakeAddClient - fakeCloundAPI *fakeCloudAPI + fakeCloudAPI *fakeCloudAPI store *jujuclienttesting.MemStore } -var _ = gc.Suite(&addSuite{}) +var _ = gc.Suite(&AddModelSuite{}) -func (s *addSuite) SetUpTest(c *gc.C) { +func (s *AddModelSuite) SetUpTest(c *gc.C) { s.FakeJujuXDGDataHomeSuite.SetUpTest(c) s.fakeAddModelAPI = &fakeAddClient{ - model: params.ModelInfo{ - Name: "test", - UUID: "fake-model-uuid", - OwnerTag: "ignored-for-now", + model: base.ModelInfo{ + Name: "test", + UUID: "fake-model-uuid", + Owner: "ignored-for-now", }, } - s.fakeCloundAPI = &fakeCloudAPI{} + s.fakeCloudAPI = &fakeCloudAPI{} // Set up the current controller, and write just enough info // so we don't try to refresh @@ -52,7 +55,7 @@ s.store.CurrentControllerName = controllerName s.store.Controllers[controllerName] = jujuclient.ControllerDetails{} s.store.Accounts[controllerName] = jujuclient.AccountDetails{ - User: "bob@local", + User: "bob", } s.store.Credentials["aws"] = cloud.CloudCredential{ AuthCredentials: map[string]cloud.Credential{ @@ -72,19 +75,20 @@ return nil } -func (s *addSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { - command, _ := controller.NewAddModelCommandForTest(&fakeAPIConnection{}, s.fakeAddModelAPI, s.fakeCloundAPI, s.store) +func (s *AddModelSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { + command, _ := controller.NewAddModelCommandForTest(&fakeAPIConnection{}, s.fakeAddModelAPI, s.fakeCloudAPI, s.store) return testing.RunCommand(c, command, args...) } -func (s *addSuite) TestInit(c *gc.C) { +func (s *AddModelSuite) TestInit(c *gc.C) { modelNameErr := "%q is not a valid name: model names may only contain lowercase letters, digits and hyphens" for i, test := range []struct { - args []string - err string - name string - owner string - values map[string]interface{} + args []string + err string + name string + owner string + cloudRegion string + values map[string]interface{} }{ { err: "model name is required", @@ -118,7 +122,11 @@ name: "new-model", values: map[string]interface{}{"key": "value", "key2": "value2"}, }, { - args: []string{"new-model", "extra", "args"}, + args: []string{"new-model", "cloud/region"}, + name: "new-model", + cloudRegion: "cloud/region", + }, { + args: []string{"new-model", "cloud/region", "extra", "args"}, err: `unrecognized args: \["extra" "args"\]`, }, } { @@ -133,6 +141,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(command.Name, gc.Equals, test.name) c.Assert(command.Owner, gc.Equals, test.owner) + c.Assert(command.CloudRegion, gc.Equals, test.cloudRegion) attrs, err := command.Config.ReadAttrs(nil) c.Assert(err, jc.ErrorIsNil) if len(test.values) == 0 { @@ -143,12 +152,12 @@ } } -func (s *addSuite) TestAddExistingName(c *gc.C) { +func (s *AddModelSuite) TestAddExistingName(c *gc.C) { // If there's any model details existing, we just overwrite them. The // controller will error out if the model already exists. Overwriting // means we'll replace any stale details from an previously existing // model with the same name. - err := s.store.UpdateModel("test-master", "bob@local/test", jujuclient.ModelDetails{ + err := s.store.UpdateModel("test-master", "bob/test", jujuclient.ModelDetails{ "stale-uuid", }) c.Assert(err, jc.ErrorIsNil) @@ -156,21 +165,123 @@ _, err = s.run(c, "test") c.Assert(err, jc.ErrorIsNil) - details, err := s.store.ModelByName("test-master", "bob@local/test") + details, err := s.store.ModelByName("test-master", "bob/test") c.Assert(err, jc.ErrorIsNil) c.Assert(details, jc.DeepEquals, &jujuclient.ModelDetails{"fake-model-uuid"}) } -func (s *addSuite) TestCredentialsPassedThrough(c *gc.C) { - c.Skip("TODO(wallyworld) - port to using new credential management") +func (s *AddModelSuite) TestAddModelUnauthorizedMentionsJujuGrant(c *gc.C) { + s.fakeAddModelAPI.err = ¶ms.Error{ + Message: "permission denied", + Code: params.CodeUnauthorized, + } + ctx, _ := s.run(c, "test") + errString := strings.Replace(testing.Stderr(ctx), "\n", " ", -1) + c.Assert(errString, gc.Matches, `.*juju grant.*`) +} + +func (s *AddModelSuite) TestCredentialsPassedThrough(c *gc.C) { _, err := s.run(c, "test", "--credential", "secrets") c.Assert(err, jc.ErrorIsNil) - c.Assert(s.fakeAddModelAPI.cloudCredential, gc.Equals, "secrets") - c.Assert(s.fakeAddModelAPI.config["type"], gc.Equals, "ec2") + c.Assert(s.fakeAddModelAPI.cloudCredential, gc.Equals, names.NewCloudCredentialTag("aws/bob/secrets")) } -func (s *addSuite) TestComandLineConfigPassedThrough(c *gc.C) { +func (s *AddModelSuite) TestCredentialsOtherUserPassedThrough(c *gc.C) { + _, err := s.run(c, "test", "--credential", "other/secrets") + c.Assert(err, jc.ErrorIsNil) + + c.Assert(s.fakeAddModelAPI.cloudCredential, gc.Equals, names.NewCloudCredentialTag("aws/other/secrets")) +} + +func (s *AddModelSuite) TestCredentialsOtherUserPassedThroughWhenCloud(c *gc.C) { + _, err := s.run(c, "test", "--credential", "other/secrets", "aws/us-west-1") + c.Assert(err, jc.ErrorIsNil) + + c.Assert(s.fakeAddModelAPI.cloudCredential, gc.Equals, names.NewCloudCredentialTag("aws/other/secrets")) +} + +func (s *AddModelSuite) TestCredentialsNoDefaultCloud(c *gc.C) { + s.fakeCloudAPI.SetErrors(¶ms.Error{Code: params.CodeNotFound}) + _, err := s.run(c, "test", "--credential", "secrets") + c.Assert(err, gc.ErrorMatches, `there is no default cloud defined, please specify one using: + + juju add-model \[flags\] \ cloud\[/region\]`) +} + +func (s *AddModelSuite) TestCloudRegionPassedThrough(c *gc.C) { + _, err := s.run(c, "test", "aws/us-west-1") + c.Assert(err, jc.ErrorIsNil) + + c.Assert(s.fakeAddModelAPI.cloudName, gc.Equals, "aws") + c.Assert(s.fakeAddModelAPI.cloudRegion, gc.Equals, "us-west-1") +} + +func (s *AddModelSuite) TestDefaultCloudPassedThrough(c *gc.C) { + _, err := s.run(c, "test") + c.Assert(err, jc.ErrorIsNil) + + s.fakeCloudAPI.CheckCallNames(c /*none*/) + c.Assert(s.fakeAddModelAPI.cloudName, gc.Equals, "") + c.Assert(s.fakeAddModelAPI.cloudRegion, gc.Equals, "") +} + +func (s *AddModelSuite) TestDefaultCloudRegionPassedThrough(c *gc.C) { + _, err := s.run(c, "test", "us-west-1") + c.Assert(err, jc.ErrorIsNil) + + s.fakeCloudAPI.CheckCalls(c, []gitjujutesting.StubCall{ + {"Cloud", []interface{}{names.NewCloudTag("us-west-1")}}, + {"DefaultCloud", nil}, + {"Cloud", []interface{}{names.NewCloudTag("aws")}}, + }) + c.Assert(s.fakeAddModelAPI.cloudName, gc.Equals, "aws") + c.Assert(s.fakeAddModelAPI.cloudRegion, gc.Equals, "us-west-1") +} + +func (s *AddModelSuite) TestNoDefaultCloudRegion(c *gc.C) { + s.fakeCloudAPI.SetErrors( + ¶ms.Error{Code: params.CodeNotFound}, // no default region + ) + _, err := s.run(c, "test", "us-west-1") + c.Assert(err, gc.ErrorMatches, ` +"us-west-1" is not a cloud supported by this controller, +and there is no default cloud. The clouds/regions supported +by this controller are: + +Cloud Regions +aws us-east-1, us-west-1 +lxd +`[1:]) + s.fakeCloudAPI.CheckCalls(c, []gitjujutesting.StubCall{ + {"Cloud", []interface{}{names.NewCloudTag("us-west-1")}}, + {"DefaultCloud", nil}, + {"Clouds", nil}, + }) +} + +func (s *AddModelSuite) TestCloudDefaultRegionPassedThrough(c *gc.C) { + _, err := s.run(c, "test", "aws") + c.Assert(err, jc.ErrorIsNil) + + c.Assert(s.fakeAddModelAPI.cloudName, gc.Equals, "aws") + c.Assert(s.fakeAddModelAPI.cloudRegion, gc.Equals, "us-east-1") +} + +func (s *AddModelSuite) TestInvalidCloudOrRegionName(c *gc.C) { + _, err := s.run(c, "test", "oro") + c.Assert(err, gc.ErrorMatches, ` +"oro" is neither a cloud supported by this controller, +nor a region in the controller's default cloud "aws". +The clouds/regions supported by this controller are: + +Cloud Regions +aws us-east-1, us-west-1 +lxd +`[1:]) +} + +func (s *AddModelSuite) TestComandLineConfigPassedThrough(c *gc.C) { _, err := s.run(c, "test", "--config", "account=magic", "--config", "cloud=special") c.Assert(err, jc.ErrorIsNil) @@ -178,7 +289,7 @@ c.Assert(s.fakeAddModelAPI.config["cloud"], gc.Equals, "special") } -func (s *addSuite) TestConfigFileValuesPassedThrough(c *gc.C) { +func (s *AddModelSuite) TestConfigFileValuesPassedThrough(c *gc.C) { config := map[string]string{ "account": "magic", "cloud": "9", @@ -196,7 +307,7 @@ c.Assert(s.fakeAddModelAPI.config["cloud"], gc.Equals, "9") } -func (s *addSuite) TestConfigFileWithNestedMaps(c *gc.C) { +func (s *AddModelSuite) TestConfigFileWithNestedMaps(c *gc.C) { nestedConfig := map[string]interface{}{ "account": "magic", "cloud": "9", @@ -219,7 +330,7 @@ c.Assert(s.fakeAddModelAPI.config["nested"], jc.DeepEquals, nestedConfig) } -func (s *addSuite) TestConfigFileFailsToConform(c *gc.C) { +func (s *AddModelSuite) TestConfigFileFailsToConform(c *gc.C) { nestedConfig := map[int]interface{}{ 9: "9", } @@ -238,7 +349,7 @@ c.Assert(err, gc.ErrorMatches, `unable to parse config: map keyed with non-string value`) } -func (s *addSuite) TestConfigFileFormatError(c *gc.C) { +func (s *AddModelSuite) TestConfigFileFormatError(c *gc.C) { file, err := ioutil.TempFile(c.MkDir(), "") c.Assert(err, jc.ErrorIsNil) file.Write(([]byte)("not: valid: yaml")) @@ -248,13 +359,13 @@ c.Assert(err, gc.ErrorMatches, `unable to parse config: yaml: .*`) } -func (s *addSuite) TestConfigFileDoesntExist(c *gc.C) { +func (s *AddModelSuite) TestConfigFileDoesntExist(c *gc.C) { _, err := s.run(c, "test", "--config", "missing-file") errMsg := ".*" + utils.NoSuchFileErrRegexp c.Assert(err, gc.ErrorMatches, errMsg) } -func (s *addSuite) TestConfigValuePrecedence(c *gc.C) { +func (s *AddModelSuite) TestConfigValuePrecedence(c *gc.C) { config := map[string]string{ "account": "magic", "cloud": "9", @@ -272,31 +383,31 @@ c.Assert(s.fakeAddModelAPI.config["cloud"], gc.Equals, "special") } -func (s *addSuite) TestAddErrorRemoveConfigstoreInfo(c *gc.C) { +func (s *AddModelSuite) TestAddErrorRemoveConfigstoreInfo(c *gc.C) { s.fakeAddModelAPI.err = errors.New("bah humbug") _, err := s.run(c, "test") c.Assert(err, gc.ErrorMatches, "bah humbug") - _, err = s.store.ModelByName("test-master", "bob@local/test") + _, err = s.store.ModelByName("test-master", "bob/test") c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *addSuite) TestAddStoresValues(c *gc.C) { +func (s *AddModelSuite) TestAddStoresValues(c *gc.C) { _, err := s.run(c, "test") c.Assert(err, jc.ErrorIsNil) - model, err := s.store.ModelByName("test-master", "bob@local/test") + model, err := s.store.ModelByName("test-master", "bob/test") c.Assert(err, jc.ErrorIsNil) c.Assert(model, jc.DeepEquals, &jujuclient.ModelDetails{"fake-model-uuid"}) } -func (s *addSuite) TestNoEnvCacheOtherUser(c *gc.C) { +func (s *AddModelSuite) TestNoEnvCacheOtherUser(c *gc.C) { _, err := s.run(c, "test", "--owner", "zeus") c.Assert(err, jc.ErrorIsNil) // Creating a model for another user does not update the model cache. - _, err = s.store.ModelByName("test-master", "bob@local/test") + _, err = s.store.ModelByName("test-master", "bob/test") c.Assert(err, jc.Satisfies, errors.IsNotFound) } @@ -304,11 +415,12 @@ // AddModel command. type fakeAddClient struct { owner string + cloudName string cloudRegion string - cloudCredential string + cloudCredential names.CloudCredentialTag config map[string]interface{} err error - model params.ModelInfo + model base.ModelInfo } var _ controller.AddModelAPI = (*fakeAddClient)(nil) @@ -317,12 +429,13 @@ return nil } -func (f *fakeAddClient) CreateModel(name, owner, cloudRegion, cloudCredential string, config map[string]interface{}) (params.ModelInfo, error) { +func (f *fakeAddClient) CreateModel(name, owner, cloudName, cloudRegion string, cloudCredential names.CloudCredentialTag, config map[string]interface{}) (base.ModelInfo, error) { if f.err != nil { - return params.ModelInfo{}, f.err + return base.ModelInfo{}, f.err } f.owner = owner f.cloudCredential = cloudCredential + f.cloudName = cloudName f.cloudRegion = cloudRegion f.config = config return f.model, nil @@ -331,14 +444,51 @@ // TODO(wallyworld) - improve this stub and add test asserts type fakeCloudAPI struct { controller.CloudAPI + gitjujutesting.Stub +} + +func (c *fakeCloudAPI) DefaultCloud() (names.CloudTag, error) { + c.MethodCall(c, "DefaultCloud") + return names.NewCloudTag("aws"), c.NextErr() +} + +func (c *fakeCloudAPI) Clouds() (map[names.CloudTag]cloud.Cloud, error) { + c.MethodCall(c, "Clouds") + return map[names.CloudTag]cloud.Cloud{ + names.NewCloudTag("aws"): { + Regions: []cloud.Region{ + {Name: "us-east-1"}, + {Name: "us-west-1"}, + }, + }, + names.NewCloudTag("lxd"): {}, + }, c.NextErr() +} + +func (c *fakeCloudAPI) Cloud(tag names.CloudTag) (cloud.Cloud, error) { + c.MethodCall(c, "Cloud", tag) + if tag.Id() != "aws" { + return cloud.Cloud{}, ¶ms.Error{Code: params.CodeNotFound} + } + return cloud.Cloud{ + Type: "ec2", + AuthTypes: []cloud.AuthType{cloud.AccessKeyAuthType}, + Regions: []cloud.Region{ + {Name: "us-east-1"}, + {Name: "us-west-1"}, + }, + }, c.NextErr() } -func (c *fakeCloudAPI) Credentials(names.UserTag, names.CloudTag) (map[string]cloud.Credential, error) { - return map[string]cloud.Credential{ - "default": cloud.NewEmptyCredential(), - }, nil +func (c *fakeCloudAPI) UserCredentials(user names.UserTag, cloud names.CloudTag) ([]names.CloudCredentialTag, error) { + c.MethodCall(c, "UserCredentials", user, cloud) + return []names.CloudCredentialTag{ + names.NewCloudCredentialTag("cloud/admin/default"), + names.NewCloudCredentialTag("aws/other/secrets"), + }, c.NextErr() } -func (c *fakeCloudAPI) UpdateCredentials(names.UserTag, names.CloudTag, map[string]cloud.Credential) error { - return nil +func (c *fakeCloudAPI) UpdateCredential(credentialTag names.CloudCredentialTag, credential cloud.Credential) error { + c.MethodCall(c, "UpdateCredential", credentialTag, credential) + return c.NextErr() } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/destroy.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/destroy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/destroy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/destroy.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,13 +9,13 @@ "fmt" "io" "strings" - "text/tabwriter" "time" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" + "github.com/juju/utils/clock" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api/base" "github.com/juju/juju/api/controller" @@ -36,8 +36,8 @@ // controller environment anyway. return modelcmd.WrapController( &destroyCommand{}, - modelcmd.ControllerSkipFlags, - modelcmd.ControllerSkipDefault, + modelcmd.WrapControllerSkipControllerFlags, + modelcmd.WrapControllerSkipDefaultController, ) } @@ -59,7 +59,8 @@ juju destroy-controller --destroy-all-models mycontroller See also: - kill-controller` + kill-controller + unregister` var usageSummary = ` Destroys a controller.`[1:] @@ -75,6 +76,7 @@ type destroyControllerAPI interface { Close() error ModelConfig() (map[string]interface{}, error) + HostedModelConfigs() ([]controller.HostedConfig, error) CloudSpec(names.ModelTag) (environs.CloudSpec, error) DestroyController(destroyModels bool) error ListBlockedModels() ([]params.ModelBlockInfo, error) @@ -102,21 +104,16 @@ // SetFlags implements Command.SetFlags. func (c *destroyCommand) SetFlags(f *gnuflag.FlagSet) { - f.BoolVar(&c.destroyModels, "destroy-all-models", false, "Destroy all hosted models in the controller") c.destroyCommandBase.SetFlags(f) + f.BoolVar(&c.destroyModels, "destroy-all-models", false, "Destroy all hosted models in the controller") } // Run implements Command.Run func (c *destroyCommand) Run(ctx *cmd.Context) error { controllerName := c.ControllerName() store := c.ClientStore() - controllerDetails, err := store.ControllerByName(controllerName) - if err != nil { - return errors.Annotate(err, "cannot read controller info") - } - if !c.assumeYes { - if err = confirmDestruction(ctx, c.ControllerName()); err != nil { + if err := confirmDestruction(ctx, c.ControllerName()); err != nil { return err } } @@ -130,7 +127,7 @@ defer api.Close() // Obtain controller environ so we can clean up afterwards. - controllerEnviron, err := c.getControllerEnviron(store, controllerName, api) + controllerEnviron, err := c.getControllerEnviron(ctx, store, controllerName, api) if err != nil { return errors.Annotate(err, "getting controller environ") } @@ -151,7 +148,7 @@ } } - updateStatus := newTimedStatusUpdater(ctx, api, controllerDetails.ControllerUUID) + updateStatus := newTimedStatusUpdater(ctx, api, controllerEnviron.Config().UUID(), clock.WallClock) ctrStatus, modelsStatus := updateStatus(0) if !c.destroyModels { if err := c.checkNoAliveHostedModels(ctx, modelsStatus); err != nil { @@ -191,7 +188,7 @@ // and there are models still alive. var buf bytes.Buffer for _, model := range models { - if model.Life != params.Alive { + if model.Life != string(params.Alive) { continue } buf.WriteString(fmtModelStatus(model)) @@ -218,15 +215,20 @@ logger.Errorf(destroyControllerBlockedMsg) if api != nil { models, err := api.ListBlockedModels() - var bytes []byte + out := &bytes.Buffer{} if err == nil { - bytes, err = formatTabularBlockedModels(models) + var info interface{} + info, err = block.FormatModelBlockInfo(models) + if err != nil { + return errors.Trace(err) + } + err = block.FormatTabularBlockedModels(out, info) } if err != nil { - logger.Errorf("Unable to list blocked models: %s", err) + logger.Errorf("Unable to list models: %s", err) return cmd.ErrSilent } - ctx.Infof(string(bytes)) + ctx.Infof(out.String()) } return cmd.ErrSilent } @@ -237,10 +239,11 @@ return destroyErr } -const destroyControllerBlockedMsg = `there are blocks preventing controller destruction -To remove all blocks in the controller, please run: +const destroyControllerBlockedMsg = `there are models with disabled commands preventing controller destruction - juju controller remove-blocks +To enable controller destruction, please run: + + juju enable-destroy-controller ` @@ -258,41 +261,6 @@ ` -func formatTabularBlockedModels(value interface{}) ([]byte, error) { - models, ok := value.([]params.ModelBlockInfo) - if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", models, value) - } - - var out bytes.Buffer - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) - fmt.Fprintf(tw, "NAME\tMODEL UUID\tOWNER\tBLOCKS\n") - for _, model := range models { - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", model.Name, model.UUID, model.OwnerTag, blocksToStr(model.Blocks)) - } - tw.Flush() - return out.Bytes(), nil -} - -func blocksToStr(blocks []string) string { - result := "" - sep := "" - for _, blk := range blocks { - result = result + sep + block.OperationFromType(blk) - sep = "," - } - - return result -} - // destroyCommandBase provides common attributes and methods that both the controller // destroy and controller kill commands require. type destroyCommandBase struct { @@ -319,6 +287,7 @@ // SetFlags implements Command.SetFlags. func (c *destroyCommandBase) SetFlags(f *gnuflag.FlagSet) { + c.ControllerCommandBase.SetFlags(f) f.BoolVar(&c.assumeYes, "y", false, "Do not ask for confirmation") f.BoolVar(&c.assumeYes, "yes", false, "") } @@ -341,11 +310,12 @@ // Environ by first checking the config store, then querying the // API if the information is not in the store. func (c *destroyCommandBase) getControllerEnviron( + ctx *cmd.Context, store jujuclient.ClientStore, controllerName string, sysAPI destroyControllerAPI, ) (environs.Environ, error) { - env, err := c.getControllerEnvironFromStore(store, controllerName) + env, err := c.getControllerEnvironFromStore(ctx, store, controllerName) if errors.IsNotFound(err) { return c.getControllerEnvironFromAPI(sysAPI, controllerName) } else if err != nil { @@ -355,10 +325,11 @@ } func (c *destroyCommandBase) getControllerEnvironFromStore( + ctx *cmd.Context, store jujuclient.ClientStore, controllerName string, ) (environs.Environ, error) { - bootstrapConfig, params, err := modelcmd.NewGetBootstrapConfigParamsFunc(store)(controllerName) + bootstrapConfig, params, err := modelcmd.NewGetBootstrapConfigParamsFunc(ctx, store)(controllerName) if err != nil { return nil, errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/destroy_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/destroy_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/destroy_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/destroy_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,6 +15,7 @@ "gopkg.in/juju/names.v2" "github.com/juju/juju/api/base" + apicontroller "github.com/juju/juju/api/controller" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/controller" "github.com/juju/juju/cmd/modelcmd" @@ -31,6 +32,10 @@ test1UUID = "1871299e-1370-4f3e-83ab-1849ed7b1076" test2UUID = "c59d0e3b-2bd7-4867-b1b9-f1ef8a0bb004" test3UUID = "82bf9738-764b-49c1-9c19-18f6ee155854" + + test1ControllerUUID = "2371299e-1370-4f3e-83ab-1849ed7b1076" + test2ControllerUUID = "f89d0e3b-5bd7-9867-b1b9-f1ef8a0bb004" + test3ControllerUUID = "cfbf9738-764b-49c1-9c19-18f6ee155854" ) type DestroySuite struct { @@ -50,12 +55,13 @@ // fakeDestroyAPI mocks out the controller API type fakeDestroyAPI struct { gitjujutesting.Stub - cloud environs.CloudSpec - env map[string]interface{} - destroyAll bool - blocks []params.ModelBlockInfo - envStatus map[string]base.ModelStatus - allEnvs []base.UserModel + cloud environs.CloudSpec + env map[string]interface{} + destroyAll bool + blocks []params.ModelBlockInfo + envStatus map[string]base.ModelStatus + allModels []base.UserModel + hostedConfig []apicontroller.HostedConfig } func (f *fakeDestroyAPI) Close() error { @@ -79,6 +85,14 @@ return f.env, nil } +func (f *fakeDestroyAPI) HostedModelConfigs() ([]apicontroller.HostedConfig, error) { + f.MethodCall(f, "HostedModelConfigs") + if err := f.NextErr(); err != nil { + return nil, err + } + return f.hostedConfig, nil +} + func (f *fakeDestroyAPI) DestroyController(destroyAll bool) error { f.MethodCall(f, "DestroyController", destroyAll) f.destroyAll = destroyAll @@ -101,7 +115,7 @@ func (f *fakeDestroyAPI) AllModels() ([]base.UserModel, error) { f.MethodCall(f, "AllModels") - return f.allEnvs, f.NextErr() + return f.allModels, f.NextErr() } // fakeDestroyAPIClient mocks out the client API @@ -151,42 +165,42 @@ s.store.Controllers["test1"] = jujuclient.ControllerDetails{ APIEndpoints: []string{"localhost"}, CACert: testing.CACert, - ControllerUUID: test1UUID, + ControllerUUID: test1ControllerUUID, } s.store.Controllers["test3"] = jujuclient.ControllerDetails{ APIEndpoints: []string{"localhost"}, CACert: testing.CACert, - ControllerUUID: test3UUID, + ControllerUUID: test3ControllerUUID, } s.store.Accounts["test1"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } var modelList = []struct { - name string - serverUUID string - modelUUID string - bootstrapCfg map[string]interface{} + name string + controllerUUID string + modelUUID string + bootstrapCfg map[string]interface{} }{ { - name: "test1:admin", - serverUUID: test1UUID, - modelUUID: test1UUID, - bootstrapCfg: createBootstrapInfo(c, "admin"), + name: "test1:admin", + controllerUUID: test1ControllerUUID, + modelUUID: test1UUID, + bootstrapCfg: createBootstrapInfo(c, "admin"), }, { - name: "test2:test2", - serverUUID: test1UUID, - modelUUID: test2UUID, + name: "test2:test2", + controllerUUID: test2ControllerUUID, + modelUUID: test2UUID, }, { - name: "test3:admin", - serverUUID: test3UUID, - modelUUID: test3UUID, + name: "test3:admin", + controllerUUID: test3ControllerUUID, + modelUUID: test3UUID, }, } for _, model := range modelList { controllerName, modelName := modelcmd.SplitModelName(model.name) s.store.UpdateController(controllerName, jujuclient.ControllerDetails{ - ControllerUUID: model.serverUUID, + ControllerUUID: model.controllerUUID, APIEndpoints: []string{"localhost"}, CACert: testing.CACert, }) @@ -195,23 +209,24 @@ }) if model.bootstrapCfg != nil { s.store.BootstrapConfig[controllerName] = jujuclient.BootstrapConfig{ - Config: createBootstrapInfo(c, "admin"), - CloudType: "dummy", + ControllerModelUUID: model.modelUUID, + Config: createBootstrapInfo(c, "admin"), + CloudType: "dummy", } } uuid := model.modelUUID - s.api.allEnvs = append(s.api.allEnvs, base.UserModel{ + s.api.allModels = append(s.api.allModels, base.UserModel{ Name: model.name, UUID: uuid, - Owner: owner.Canonical(), + Owner: owner.Id(), }) s.api.envStatus[model.modelUUID] = base.ModelStatus{ UUID: uuid, - Life: params.Dead, + Life: string(params.Dead), HostedMachineCount: 0, ServiceCount: 0, - Owner: owner.Canonical(), + Owner: owner.Id(), } } } @@ -286,7 +301,7 @@ checkControllerRemovedFromStore(c, "test1", s.store) } -func (s *DestroySuite) TestDestroyWithDestroyAllEnvsFlag(c *gc.C) { +func (s *DestroySuite) TestDestroyWithDestroyAllModelsFlag(c *gc.C) { _, err := s.runDestroyCommand(c, "test1", "-y", "--destroy-all-models") c.Assert(err, jc.ErrorIsNil) c.Assert(s.api.destroyAll, jc.IsTrue) @@ -312,7 +327,7 @@ func (s *DestroySuite) TestDestroyControllerAliveModels(c *gc.C) { for uuid, status := range s.api.envStatus { - status.Life = params.Alive + status.Life = string(params.Alive) s.api.envStatus[uuid] = status } s.api.SetErrors(¶ms.Error{Code: params.CodeHasHostedModels}) @@ -325,8 +340,8 @@ flag. Models: - owner@local/test2:test2 (alive) - owner@local/test3:admin (alive) + owner/test2:test2 (alive) + owner/test3:admin (alive) `) } @@ -360,11 +375,12 @@ ControllerUUID: test1UUID, } s.store.Accounts["test1"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } s.store.BootstrapConfig["test1"] = jujuclient.BootstrapConfig{ - Config: createBootstrapInfo(c, "admin"), - CloudType: "dummy", + ControllerModelUUID: test1UUID, + Config: createBootstrapInfo(c, "admin"), + CloudType: "dummy", } } @@ -421,8 +437,8 @@ s.api.SetErrors(¶ms.Error{Code: params.CodeOperationBlocked}) s.runDestroyCommand(c, "test1", "-y") testLog := c.GetTestLog() - c.Check(testLog, jc.Contains, "To remove all blocks in the controller, please run:") - c.Check(testLog, jc.Contains, "juju controller remove-blocks") + c.Check(testLog, jc.Contains, "To enable controller destruction, please run:") + c.Check(testLog, jc.Contains, "juju enable-destroy-controller") } func (s *DestroySuite) TestDestroyListBlocksError(c *gc.C) { @@ -432,9 +448,9 @@ ) s.runDestroyCommand(c, "test1", "-y") testLog := c.GetTestLog() - c.Check(testLog, jc.Contains, "To remove all blocks in the controller, please run:") - c.Check(testLog, jc.Contains, "juju controller remove-blocks") - c.Check(testLog, jc.Contains, "Unable to list blocked models: unexpected api error") + c.Check(testLog, jc.Contains, "To enable controller destruction, please run:") + c.Check(testLog, jc.Contains, "juju enable-destroy-controller") + c.Check(testLog, jc.Contains, "Unable to list models: unexpected api error") } func (s *DestroySuite) TestDestroyReturnsBlocks(c *gc.C) { @@ -443,7 +459,7 @@ params.ModelBlockInfo{ Name: "test1", UUID: test1UUID, - OwnerTag: "cheryl@local", + OwnerTag: "user-cheryl", Blocks: []string{ "BlockDestroy", }, @@ -451,7 +467,7 @@ params.ModelBlockInfo{ Name: "test2", UUID: test2UUID, - OwnerTag: "bob@local", + OwnerTag: "user-bob", Blocks: []string{ "BlockDestroy", "BlockChange", @@ -460,7 +476,8 @@ } ctx, _ := s.runDestroyCommand(c, "test1", "-y", "--destroy-all-models") c.Assert(testing.Stderr(ctx), gc.Equals, "Destroying controller\n"+ - "NAME MODEL UUID OWNER BLOCKS\n"+ - "test1 1871299e-1370-4f3e-83ab-1849ed7b1076 cheryl@local destroy-model\n"+ - "test2 c59d0e3b-2bd7-4867-b1b9-f1ef8a0bb004 bob@local destroy-model,all-changes\n") + "Name Model UUID Owner Disabled commands\n"+ + "test1 1871299e-1370-4f3e-83ab-1849ed7b1076 cheryl destroy-model\n"+ + "test2 c59d0e3b-2bd7-4867-b1b9-f1ef8a0bb004 bob all, destroy-model\n") + c.Assert(testing.Stdout(ctx), gc.Equals, "") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/enabledestroy.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/enabledestroy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/enabledestroy.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/enabledestroy.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,66 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller + +import ( + "github.com/juju/cmd" + "github.com/juju/errors" + + "github.com/juju/juju/cmd/modelcmd" +) + +// NewEnableDestroyControllerCommand returns a command that allows a controller admin +// to remove blocks from the controller. +func NewEnableDestroyControllerCommand() cmd.Command { + return modelcmd.WrapController(&enableDestroyController{}) +} + +type enableDestroyController struct { + modelcmd.ControllerCommandBase + api removeBlocksAPI +} + +type removeBlocksAPI interface { + Close() error + RemoveBlocks() error +} + +var enableDestroyDoc = ` +Any model in the controller that has disabled commands will block a controller +from being destroyed. + +A controller administrator is able to enable all the commands across all the models +in a Juju controller so that the controller can be destoyed if desired. + +See also: + disable-command + disabled-commands + enable-command +` + +// Info implements Command.Info +func (c *enableDestroyController) Info() *cmd.Info { + return &cmd.Info{ + Name: "enable-destroy-controller", + Purpose: "Enable destroy-controller by removing disabled commands in the controller.", + Doc: enableDestroyDoc, + } +} + +func (c *enableDestroyController) getAPI() (removeBlocksAPI, error) { + if c.api != nil { + return c.api, nil + } + return c.NewControllerAPIClient() +} + +// Run implements Command.Run +func (c *enableDestroyController) Run(ctx *cmd.Context) error { + client, err := c.getAPI() + if err != nil { + return errors.Trace(err) + } + defer client.Close() + return errors.Trace(client.RemoveBlocks()) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/enabledestroy_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/enabledestroy_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/enabledestroy_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/enabledestroy_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,69 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller_test + +import ( + "github.com/juju/cmd" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/cmd/juju/controller" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/juju/testing" +) + +type enableDestroyControllerSuite struct { + baseControllerSuite + api *fakeRemoveBlocksAPI + store *jujuclienttesting.MemStore +} + +var _ = gc.Suite(&enableDestroyControllerSuite{}) + +func (s *enableDestroyControllerSuite) SetUpTest(c *gc.C) { + s.baseControllerSuite.SetUpTest(c) + + s.api = &fakeRemoveBlocksAPI{} + s.store = jujuclienttesting.NewMemStore() + s.store.CurrentControllerName = "fake" + s.store.Controllers["fake"] = jujuclient.ControllerDetails{} +} + +func (s *enableDestroyControllerSuite) newCommand() cmd.Command { + return controller.NewEnableDestroyControllerCommandForTest(s.api, s.store) +} + +func (s *enableDestroyControllerSuite) TestRemove(c *gc.C) { + _, err := testing.RunCommand(c, s.newCommand()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.api.called, jc.IsTrue) +} + +func (s *enableDestroyControllerSuite) TestUnrecognizedArg(c *gc.C) { + _, err := testing.RunCommand(c, s.newCommand(), "whoops") + c.Assert(err, gc.ErrorMatches, `unrecognized args: \["whoops"\]`) + c.Assert(s.api.called, jc.IsFalse) +} + +func (s *enableDestroyControllerSuite) TestEnvironmentsError(c *gc.C) { + s.api.err = common.ErrPerm + _, err := testing.RunCommand(c, s.newCommand()) + c.Assert(err, gc.ErrorMatches, "permission denied") +} + +type fakeRemoveBlocksAPI struct { + err error + called bool +} + +func (f *fakeRemoveBlocksAPI) Close() error { + return nil +} + +func (f *fakeRemoveBlocksAPI) RemoveBlocks() error { + f.called = true + return f.err +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,12 @@ package controller import ( + "time" + "github.com/juju/cmd" + jc "github.com/juju/testing/checkers" "github.com/juju/utils/clock" + gc "gopkg.in/check.v1" "github.com/juju/juju/api" "github.com/juju/juju/api/base" @@ -15,17 +19,19 @@ // NewListControllersCommandForTest returns a listControllersCommand with the clientstore provided // as specified. -func NewListControllersCommandForTest(testStore jujuclient.ClientStore) *listControllersCommand { +func NewListControllersCommandForTest(testStore jujuclient.ClientStore, api func(string) ControllerAccessAPI) *listControllersCommand { return &listControllersCommand{ store: testStore, + api: api, } } // NewShowControllerCommandForTest returns a showControllerCommand with the clientstore provided // as specified. -func NewShowControllerCommandForTest(testStore jujuclient.ClientStore) *showControllerCommand { +func NewShowControllerCommandForTest(testStore jujuclient.ClientStore, api func(string) ControllerAccessAPI) *showControllerCommand { return &showControllerCommand{ store: testStore, + api: api, } } @@ -71,10 +77,10 @@ return ®isterCommand{apiOpen: apiOpen, listModelsFunc: listModels, store: store} } -// NewRemoveBlocksCommandForTest returns a RemoveBlocksCommand with the +// NewEnableDestroyControllerCommandForTest returns a enableDestroyController with the // function used to open the API connection mocked out. -func NewRemoveBlocksCommandForTest(api removeBlocksAPI, store jujuclient.ClientStore) cmd.Command { - c := &removeBlocksCommand{ +func NewEnableDestroyControllerCommandForTest(api removeBlocksAPI, store jujuclient.ClientStore) cmd.Command { + c := &enableDestroyController{ api: api, } c.SetClientStore(store) @@ -99,8 +105,8 @@ cmd.SetClientStore(store) return modelcmd.WrapController( cmd, - modelcmd.ControllerSkipFlags, - modelcmd.ControllerSkipDefault, + modelcmd.WrapControllerSkipControllerFlags, + modelcmd.WrapControllerSkipDefaultController, ) } @@ -113,27 +119,30 @@ apierr error, clock clock.Clock, apiOpen modelcmd.APIOpener, -) cmd.Command { +) (cmd.Command, *killCommand) { kill := &killCommand{ destroyCommandBase: destroyCommandBase{ api: api, clientapi: clientapi, apierr: apierr, }, + clock: clock, } kill.SetClientStore(store) - return wrapKillCommand(kill, apiOpen, clock) + return wrapKillCommand(kill, apiOpen, clock), kill } -// NewListBlocksCommandForTest returns a ListBlocksCommand with the controller -// endpoint mocked out. -func NewListBlocksCommandForTest(api listBlocksAPI, apierr error, store jujuclient.ClientStore) cmd.Command { - c := &listBlocksCommand{ - api: api, - apierr: apierr, - } - c.SetClientStore(store) - return modelcmd.WrapController(c) +// KillTimeout returns the internal timeout duration of the kill command. +func KillTimeout(c *gc.C, command cmd.Command) time.Duration { + kill, ok := command.(*killCommand) + c.Assert(ok, jc.IsTrue) + return kill.timeout +} + +// KillWaitForModels calls the WaitForModels method of the kill command. +func KillWaitForModels(command cmd.Command, ctx *cmd.Context, api destroyControllerAPI, uuid string) error { + kill := command.(*killCommand) + return kill.WaitForModels(ctx, api, uuid) } // NewGetConfigCommandCommandForTest returns a GetConfigCommandCommand with diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/getconfig.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/getconfig.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/getconfig.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/getconfig.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,15 +4,15 @@ package controller import ( - "fmt" "strings" "github.com/juju/cmd" - "launchpad.net/gnuflag" - "github.com/juju/errors" + "github.com/juju/gnuflag" + apicontroller "github.com/juju/juju/api/controller" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" "github.com/juju/juju/controller" ) @@ -35,16 +35,17 @@ Examples: - juju get-controller-config - juju get-controller-config api-port - juju get-controller-config -c mycontroller + juju controller-config + juju controller-config api-port + juju controller-config -c mycontroller -See also: controllers +See also: + controllers ` func (c *getConfigCommand) Info() *cmd.Info { return &cmd.Info{ - Name: "get-controller-config", + Name: "controller-config", Args: "[]", Purpose: "Displays configuration settings for a controller.", Doc: strings.TrimSpace(getControllerHelpDoc), @@ -52,7 +53,8 @@ } func (c *getConfigCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "smart", cmd.DefaultFormatters) + c.ControllerCommandBase.SetFlags(f) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) } func (c *getConfigCommand) Init(args []string) (err error) { @@ -92,7 +94,7 @@ if value, found := attrs[c.key]; found { return c.out.Write(ctx, value) } - return fmt.Errorf("key %q not found in %q controller.", c.key, c.ControllerName()) + return errors.Errorf("key %q not found in %q controller.", c.key, c.ControllerName()) } // If key is empty, write out the whole lot. return c.out.Write(ctx, attrs) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/kill.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/kill.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/kill.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/kill.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,28 +4,39 @@ package controller import ( + "fmt" "time" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/utils/clock" - "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" ) const killDoc = ` Forcibly destroy the specified controller. If the API server is accessible, -this command will attempt to destroy the controller model and all -hosted models and their resources. +this command will attempt to destroy the controller model and all hosted models +and their resources. -If the API server is unreachable, the machines of the controller model -will be destroyed through the cloud provisioner. If there are additional -machines, including machines within hosted models, these machines will -not be destroyed and will never be reconnected to the Juju controller being -destroyed. +If the API server is unreachable, the machines of the controller model will be +destroyed through the cloud provisioner. If there are additional machines, +including machines within hosted models, these machines will not be destroyed +and will never be reconnected to the Juju controller being destroyed. + +The normal process of killing the controller will involve watching the hosted +models as they are brought down in a controlled manner. If for some reason the +models do not stop cleanly, there is a default five minute timeout. If no change +in the model state occurs for the duration of this timeout, the command will +stop watching and destroy the models directly through the cloud provider. + +See also: + destroy-controller + unregister ` // NewKillCommand returns a command to kill a controller. Killing is a forceful @@ -36,7 +47,9 @@ // environment method. This shouldn't really matter in practice as the // user trying to take down the controller will need to have access to the // controller environment anyway. - return wrapKillCommand(&killCommand{}, nil, clock.WallClock) + return wrapKillCommand(&killCommand{ + clock: clock.WallClock, + }, nil, clock.WallClock) } // wrapKillCommand provides the common wrapping used by tests and @@ -48,15 +61,25 @@ openStrategy := modelcmd.NewTimeoutOpener(apiOpen, clock, 10*time.Second) return modelcmd.WrapController( kill, - modelcmd.ControllerSkipFlags, - modelcmd.ControllerSkipDefault, - modelcmd.ControllerAPIOpener(openStrategy), + modelcmd.WrapControllerSkipControllerFlags, + modelcmd.WrapControllerSkipDefaultController, + modelcmd.WrapControllerAPIOpener(openStrategy), ) } // killCommand kills the specified controller. type killCommand struct { destroyCommandBase + + clock clock.Clock + timeout time.Duration +} + +// SetFlags implements Command.SetFlags. +func (c *killCommand) SetFlags(f *gnuflag.FlagSet) { + c.destroyCommandBase.SetFlags(f) + f.Var(newDurationValue(time.Minute*5, &c.timeout), "t", "Timeout before direct destruction") + f.Var(newDurationValue(time.Minute*5, &c.timeout), "timeout", "") } // Info implements Command.Info. @@ -69,12 +92,6 @@ } } -// SetFlags implements Command.SetFlags. -func (c *killCommand) SetFlags(f *gnuflag.FlagSet) { - f.BoolVar(&c.assumeYes, "y", false, "Do not ask for confirmation") - f.BoolVar(&c.assumeYes, "yes", false, "") -} - // Init implements Command.Init. func (c *killCommand) Init(args []string) error { return c.destroyCommandBase.Init(args) @@ -84,13 +101,8 @@ func (c *killCommand) Run(ctx *cmd.Context) error { controllerName := c.ControllerName() store := c.ClientStore() - controllerDetails, err := store.ControllerByName(controllerName) - if err != nil { - return errors.Annotate(err, "cannot read controller info") - } - if !c.assumeYes { - if err = confirmDestruction(ctx, controllerName); err != nil { + if err := confirmDestruction(ctx, controllerName); err != nil { return err } } @@ -111,35 +123,125 @@ } // Obtain controller environ so we can clean up afterwards. - controllerEnviron, err := c.getControllerEnviron(store, controllerName, api) + controllerEnviron, err := c.getControllerEnviron(ctx, store, controllerName, api) if err != nil { return errors.Annotate(err, "getting controller environ") } // If we were unable to connect to the API, just destroy the controller through // the environs interface. if api == nil { - ctx.Infof("Unable to connect to the API server. Destroying through provider.") + ctx.Infof("Unable to connect to the API server, destroying through provider") return environs.Destroy(controllerName, controllerEnviron, store) } // Attempt to destroy the controller and all environments. err = api.DestroyController(true) if err != nil { - ctx.Infof("Unable to destroy controller through the API: %s. Destroying through provider.", err) + ctx.Infof("Unable to destroy controller through the API: %s\nDestroying through provider", err) return environs.Destroy(controllerName, controllerEnviron, store) } ctx.Infof("Destroying controller %q\nWaiting for resources to be reclaimed", controllerName) - updateStatus := newTimedStatusUpdater(ctx, api, controllerDetails.ControllerUUID) - for ctrStatus, envsStatus := updateStatus(0); hasUnDeadModels(envsStatus); ctrStatus, envsStatus = updateStatus(2 * time.Second) { - ctx.Infof(fmtCtrStatus(ctrStatus)) - for _, envStatus := range envsStatus { - ctx.Verbosef(fmtModelStatus(envStatus)) + uuid := controllerEnviron.Config().UUID() + if err := c.WaitForModels(ctx, api, uuid); err != nil { + c.DirectDestroyRemaining(ctx, api) + } + return environs.Destroy(controllerName, controllerEnviron, store) +} + +// DirectDestroyRemaining will attempt to directly destroy any remaining +// models that have machines left. +func (c *killCommand) DirectDestroyRemaining(ctx *cmd.Context, api destroyControllerAPI) { + hasErrors := false + hostedConfig, err := api.HostedModelConfigs() + if err != nil { + hasErrors = true + logger.Errorf("unable to retrieve hosted model config: %v", err) + } + for _, model := range hostedConfig { + ctx.Infof("Killing %s/%s directly", model.Owner.Id(), model.Name) + cfg, err := config.New(config.NoDefaults, model.Config) + if err != nil { + logger.Errorf(err.Error()) + hasErrors = true + continue } + env, err := environs.New(environs.OpenParams{ + Cloud: model.CloudSpec, + Config: cfg, + }) + if err != nil { + logger.Errorf(err.Error()) + hasErrors = true + continue + } + if err := env.Destroy(); err != nil { + logger.Errorf(err.Error()) + hasErrors = true + } else { + ctx.Infof(" done") + } + } + if hasErrors { + logger.Errorf("there were problems destroying some models, manual intervention may be necessary to ensure resources are released") + } else { + ctx.Infof("All hosted models destroyed, cleaning up controller machines") } +} - ctx.Infof("All hosted models reclaimed, cleaning up controller machines") +// WaitForModels will wait for the models to bring themselves down nicely. +// It will return the UUIDs of any models that need to be removed forceably. +func (c *killCommand) WaitForModels(ctx *cmd.Context, api destroyControllerAPI, uuid string) error { + thirtySeconds := (time.Second * 30) + updateStatus := newTimedStatusUpdater(ctx, api, uuid, c.clock) + + ctrStatus, modelsStatus := updateStatus(0) + lastStatus := ctrStatus + lastChange := c.clock.Now().Truncate(time.Second) + deadline := lastChange.Add(c.timeout) + for ; hasUnDeadModels(modelsStatus) && (deadline.After(c.clock.Now())); ctrStatus, modelsStatus = updateStatus(5 * time.Second) { + now := c.clock.Now().Truncate(time.Second) + if ctrStatus != lastStatus { + lastStatus = ctrStatus + lastChange = now + deadline = lastChange.Add(c.timeout) + } + timeSinceLastChange := now.Sub(lastChange) + timeUntilDestruction := deadline.Sub(now) + warning := "" + // We want to show the warning if it has been more than 30 seconds since + // the last change, or we are within 30 seconds of our timeout. + if timeSinceLastChange > thirtySeconds || timeUntilDestruction < thirtySeconds { + warning = fmt.Sprintf(", will kill machines directly in %s", timeUntilDestruction) + } + ctx.Infof("%s%s", fmtCtrStatus(ctrStatus), warning) + for _, modelStatus := range modelsStatus { + ctx.Verbosef(fmtModelStatus(modelStatus)) + } + } + if hasUnDeadModels(modelsStatus) { + return errors.New("timed out") + } else { + ctx.Infof("All hosted models reclaimed, cleaning up controller machines") + } + return nil +} - return environs.Destroy(controllerName, controllerEnviron, store) +type durationValue time.Duration + +func newDurationValue(value time.Duration, p *time.Duration) *durationValue { + *p = value + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + if err != nil { + return err + } + *d = durationValue(v) + return err } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/killstatus.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/killstatus.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/killstatus.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/killstatus.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,12 +10,13 @@ "github.com/juju/cmd" "github.com/juju/errors" "github.com/juju/juju/apiserver/params" + "github.com/juju/utils/clock" "gopkg.in/juju/names.v2" ) type ctrData struct { UUID string - Life params.Life + Life string HostedModelCount int HostedMachineCount int ServiceCount int @@ -25,7 +26,7 @@ UUID string Owner string Name string - Life params.Life + Life string HostedMachineCount int ServiceCount int @@ -33,13 +34,15 @@ // newTimedStatusUpdater returns a function which waits a given period of time // before querying the apiserver for updated data. -func newTimedStatusUpdater(ctx *cmd.Context, api destroyControllerAPI, uuid string) func(time.Duration) (ctrData, []modelData) { +func newTimedStatusUpdater(ctx *cmd.Context, api destroyControllerAPI, controllerModelUUID string, clock clock.Clock) func(time.Duration) (ctrData, []modelData) { return func(wait time.Duration) (ctrData, []modelData) { - time.Sleep(wait) + if wait > 0 { + <-clock.After(wait) + } // If we hit an error, status.HostedModelCount will be 0, the polling // loop will stop and we'll go directly to destroying the model. - ctrStatus, modelsStatus, err := newData(api, uuid) + ctrStatus, modelsStatus, err := newData(api, controllerModelUUID) if err != nil { ctx.Infof("Unable to get the controller summary from the API: %s.", err) } @@ -48,7 +51,7 @@ } } -func newData(api destroyControllerAPI, ctrUUID string) (ctrData, []modelData, error) { +func newData(api destroyControllerAPI, controllerModelUUID string) (ctrData, []modelData, error) { models, err := api.AllModels() if err != nil { return ctrData{}, nil, errors.Trace(err) @@ -57,7 +60,7 @@ return ctrData{}, nil, errors.New("no models found") } - status, err := api.ModelStatus(names.NewModelTag(ctrUUID)) + status, err := api.ModelStatus(names.NewModelTag(controllerModelUUID)) if err != nil { return ctrData{}, nil, errors.Trace(err) } @@ -71,7 +74,7 @@ modelName := map[string]string{} var i int for _, model := range models { - if model.UUID != ctrUUID { + if model.UUID != controllerModelUUID { modelName[model.UUID] = model.Name hostedTags[i] = names.NewModelTag(model.UUID) i++ @@ -88,7 +91,7 @@ var modelsData []modelData var aliveModelCount int for _, model := range hostedStatus { - if model.Life == params.Dead { + if model.Life == string(params.Dead) { continue } modelsData = append(modelsData, modelData{ @@ -106,7 +109,7 @@ } ctrFinalStatus := ctrData{ - ctrUUID, + controllerModelUUID, ctrStatus.Life, aliveModelCount, hostedMachinesCount, @@ -118,7 +121,7 @@ func hasUnDeadModels(models []modelData) bool { for _, model := range models { - if model.Life != params.Dead { + if model.Life != string(params.Dead) { return true } } @@ -127,7 +130,7 @@ func hasAliveModels(models []modelData) bool { for _, model := range models { - if model.Life == params.Alive { + if model.Life == string(params.Alive) { return true } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/kill_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/kill_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/kill_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/kill_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils/clock" gc "gopkg.in/check.v1" @@ -23,22 +24,39 @@ cmdtesting "github.com/juju/juju/cmd/testing" "github.com/juju/juju/jujuclient" _ "github.com/juju/juju/provider/dummy" - "github.com/juju/juju/testing" + coretesting "github.com/juju/juju/testing" ) type KillSuite struct { baseDestroySuite + + clock *testing.Clock } var _ = gc.Suite(&KillSuite{}) +func (s *KillSuite) SetUpTest(c *gc.C) { + s.baseDestroySuite.SetUpTest(c) + s.clock = testing.NewClock(time.Now()) +} + func (s *KillSuite) runKillCommand(c *gc.C, args ...string) (*cmd.Context, error) { - return testing.RunCommand(c, s.newKillCommand(), args...) + return coretesting.RunCommand(c, s.newKillCommand(), args...) } func (s *KillSuite) newKillCommand() cmd.Command { + wrapped, _ := controller.NewKillCommandForTest( + s.api, s.clientapi, s.store, s.apierror, s.clock, nil) + return wrapped +} + +func (s *KillSuite) newKillCommandBoth() (cmd.Command, cmd.Command) { + clock := s.clock + if clock == nil { + clock = testing.NewClock(time.Now()) + } return controller.NewKillCommandForTest( - s.api, s.clientapi, s.store, s.apierror, &mockClock{}, nil) + s.api, s.clientapi, s.store, s.apierror, clock, nil) } func (s *KillSuite) TestKillNoControllerNameError(c *gc.C) { @@ -51,6 +69,238 @@ c.Assert(err, gc.ErrorMatches, "flag provided but not defined: -n") } +func (s *KillSuite) TestKillDurationFlags(c *gc.C) { + for i, test := range []struct { + args []string + expected time.Duration + err string + }{ + { + expected: 5 * time.Minute, + }, { + args: []string{"-t", "2m"}, + expected: 2 * time.Minute, + }, { + args: []string{"--timeout", "2m"}, + expected: 2 * time.Minute, + }, { + args: []string{"-t", "0"}, + expected: 0, + }, + } { + c.Logf("duration test %d", i) + wrapped, inner := s.newKillCommandBoth() + args := append([]string{"test1"}, test.args...) + err := coretesting.InitCommand(wrapped, args) + if test.err == "" { + c.Check(err, jc.ErrorIsNil) + c.Check(controller.KillTimeout(c, inner), gc.Equals, test.expected) + } else { + c.Check(err, gc.ErrorMatches, test.err) + } + } +} + +func (s *KillSuite) TestKillWaitForModels_AllGood(c *gc.C) { + s.resetAPIModels(c) + wrapped, inner := s.newKillCommandBoth() + err := coretesting.InitCommand(wrapped, []string{"test1", "--timeout=1m"}) + c.Assert(err, jc.ErrorIsNil) + + ctx := coretesting.Context(c) + err = controller.KillWaitForModels(inner, ctx, s.api, test1UUID) + c.Assert(err, jc.ErrorIsNil) + c.Assert(coretesting.Stderr(ctx), gc.Equals, "All hosted models reclaimed, cleaning up controller machines\n") +} + +func (s *KillSuite) TestKillWaitForModels_ActuallyWaits(c *gc.C) { + s.resetAPIModels(c) + s.addModel("model-1", base.ModelStatus{ + UUID: test2UUID, + Life: string(params.Dying), + Owner: "admin", + HostedMachineCount: 2, + ServiceCount: 2, + }) + wrapped, inner := s.newKillCommandBoth() + err := coretesting.InitCommand(wrapped, []string{"test1", "--timeout=1m"}) + c.Assert(err, jc.ErrorIsNil) + + ctx := coretesting.Context(c) + result := make(chan error) + go func() { + err := controller.KillWaitForModels(inner, ctx, s.api, test1UUID) + result <- err + }() + + s.syncClockAlarm(c) + s.setModelStatus(base.ModelStatus{ + UUID: test2UUID, + Life: string(params.Dying), + Owner: "admin", + HostedMachineCount: 1, + }) + s.clock.Advance(5 * time.Second) + + s.syncClockAlarm(c) + s.removeModel(test2UUID) + s.clock.Advance(5 * time.Second) + + select { + case err := <-result: + c.Assert(err, jc.ErrorIsNil) + case <-time.After(coretesting.LongWait): + c.Fatal("timed out waiting for result") + } + expect := "" + + "Waiting on 1 model, 2 machines, 2 applications\n" + + "Waiting on 1 model, 1 machine\n" + + "All hosted models reclaimed, cleaning up controller machines\n" + + c.Assert(coretesting.Stderr(ctx), gc.Equals, expect) +} + +func (s *KillSuite) TestKillWaitForModels_TimeoutResetsWithChange(c *gc.C) { + s.resetAPIModels(c) + s.addModel("model-1", base.ModelStatus{ + UUID: test2UUID, + Life: string(params.Dying), + Owner: "admin", + HostedMachineCount: 2, + ServiceCount: 2, + }) + wrapped, inner := s.newKillCommandBoth() + err := coretesting.InitCommand(wrapped, []string{"test1", "--timeout=20s"}) + c.Assert(err, jc.ErrorIsNil) + + ctx := coretesting.Context(c) + result := make(chan error) + go func() { + err := controller.KillWaitForModels(inner, ctx, s.api, test1UUID) + result <- err + }() + + s.syncClockAlarm(c) + s.clock.Advance(5 * time.Second) + + s.syncClockAlarm(c) + s.setModelStatus(base.ModelStatus{ + UUID: test2UUID, + Life: string(params.Dying), + Owner: "admin", + HostedMachineCount: 1, + }) + s.clock.Advance(5 * time.Second) + + s.syncClockAlarm(c) + s.removeModel(test2UUID) + s.clock.Advance(5 * time.Second) + + select { + case err := <-result: + c.Assert(err, jc.ErrorIsNil) + case <-time.After(coretesting.LongWait): + c.Fatal("timed out waiting for result") + } + expect := "" + + "Waiting on 1 model, 2 machines, 2 applications, will kill machines directly in 20s\n" + + "Waiting on 1 model, 2 machines, 2 applications, will kill machines directly in 15s\n" + + "Waiting on 1 model, 1 machine, will kill machines directly in 20s\n" + + "All hosted models reclaimed, cleaning up controller machines\n" + + c.Assert(coretesting.Stderr(ctx), gc.Equals, expect) +} + +func (s *KillSuite) TestKillWaitForModels_TimeoutWithNoChange(c *gc.C) { + s.resetAPIModels(c) + s.addModel("model-1", base.ModelStatus{ + UUID: test2UUID, + Life: string(params.Dying), + Owner: "admin", + HostedMachineCount: 2, + ServiceCount: 2, + }) + wrapped, inner := s.newKillCommandBoth() + err := coretesting.InitCommand(wrapped, []string{"test1", "--timeout=1m"}) + c.Assert(err, jc.ErrorIsNil) + + ctx := coretesting.Context(c) + result := make(chan error) + go func() { + err := controller.KillWaitForModels(inner, ctx, s.api, test1UUID) + result <- err + }() + + for i := 0; i < 12; i++ { + s.syncClockAlarm(c) + s.clock.Advance(5 * time.Second) + } + + select { + case err := <-result: + c.Assert(err, gc.ErrorMatches, "timed out") + case <-time.After(coretesting.LongWait): + c.Fatal("timed out waiting for result") + } + expect := "" + + "Waiting on 1 model, 2 machines, 2 applications\n" + + "Waiting on 1 model, 2 machines, 2 applications\n" + + "Waiting on 1 model, 2 machines, 2 applications\n" + + "Waiting on 1 model, 2 machines, 2 applications\n" + + "Waiting on 1 model, 2 machines, 2 applications\n" + + "Waiting on 1 model, 2 machines, 2 applications\n" + + "Waiting on 1 model, 2 machines, 2 applications\n" + + "Waiting on 1 model, 2 machines, 2 applications, will kill machines directly in 25s\n" + + "Waiting on 1 model, 2 machines, 2 applications, will kill machines directly in 20s\n" + + "Waiting on 1 model, 2 machines, 2 applications, will kill machines directly in 15s\n" + + "Waiting on 1 model, 2 machines, 2 applications, will kill machines directly in 10s\n" + + "Waiting on 1 model, 2 machines, 2 applications, will kill machines directly in 5s\n" + + c.Assert(coretesting.Stderr(ctx), gc.Equals, expect) +} + +func (s *KillSuite) resetAPIModels(c *gc.C) { + s.api.allModels = nil + s.api.envStatus = map[string]base.ModelStatus{} + s.addModel("controller", base.ModelStatus{ + UUID: test1UUID, + Life: string(params.Alive), + Owner: "admin", + TotalMachineCount: 1, + }) +} + +func (s *KillSuite) addModel(name string, status base.ModelStatus) { + s.api.allModels = append(s.api.allModels, base.UserModel{ + Name: name, + UUID: status.UUID, + Owner: status.Owner, + }) + s.api.envStatus[status.UUID] = status +} + +func (s *KillSuite) setModelStatus(status base.ModelStatus) { + s.api.envStatus[status.UUID] = status +} + +func (s *KillSuite) removeModel(uuid string) { + for i, v := range s.api.allModels { + if v.UUID == uuid { + s.api.allModels = append(s.api.allModels[:i], s.api.allModels[i+1:]...) + break + } + } + delete(s.api.envStatus, uuid) +} + +func (s *KillSuite) syncClockAlarm(c *gc.C) { + select { + case <-s.clock.Alarms(): + case <-time.After(coretesting.LongWait): + c.Fatal("timed out waiting for test clock After call") + } +} + func (s *KillSuite) TestKillUnknownArgument(c *gc.C) { _, err := s.runKillCommand(c, "model", "whoops") c.Assert(err, gc.ErrorMatches, `unrecognized args: \["whoops"\]`) @@ -65,7 +315,7 @@ s.apierror = errors.New("connection refused") ctx, err := s.runKillCommand(c, "test1", "-y") c.Assert(err, jc.ErrorIsNil) - c.Check(testing.Stderr(ctx), jc.Contains, "Unable to open API: connection refused") + c.Check(coretesting.Stderr(ctx), jc.Contains, "Unable to open API: connection refused") checkControllerRemovedFromStore(c, "test1", s.store) } @@ -100,7 +350,7 @@ s.api.SetErrors(errors.New("some destroy error")) ctx, err := s.runKillCommand(c, "test1", "-y") c.Assert(err, jc.ErrorIsNil) - c.Check(testing.Stderr(ctx), jc.Contains, "Unable to destroy controller through the API: some destroy error. Destroying through provider.") + c.Check(coretesting.Stderr(ctx), jc.Contains, "Unable to destroy controller through the API: some destroy error\nDestroying through provider") c.Assert(s.api.destroyAll, jc.IsTrue) checkControllerRemovedFromStore(c, "test1", s.store) } @@ -118,15 +368,15 @@ select { case err := <-errc: c.Check(err, gc.ErrorMatches, "controller destruction aborted") - case <-time.After(testing.LongWait): + case <-time.After(coretesting.LongWait): c.Fatalf("command took too long") } - c.Check(testing.Stdout(ctx), gc.Matches, "WARNING!.*test1(.|\n)*") + c.Check(coretesting.Stdout(ctx), gc.Matches, "WARNING!.*test1(.|\n)*") checkControllerExistsInStore(c, "test1", s.store) } func (s *KillSuite) TestKillCommandControllerAlias(c *gc.C) { - _, err := testing.RunCommand(c, s.newKillCommand(), "test1", "-y") + _, err := coretesting.RunCommand(c, s.newKillCommand(), "test1", "-y") c.Assert(err, jc.ErrorIsNil) checkControllerRemovedFromStore(c, "test1:test1", s.store) } @@ -135,8 +385,8 @@ testDialer := func(_ jujuclient.ClientStore, controllerName, modelName string) (api.Connection, error) { return nil, common.ErrPerm } - cmd := controller.NewKillCommandForTest(nil, nil, s.store, nil, clock.WallClock, modelcmd.OpenFunc(testDialer)) - _, err := testing.RunCommand(c, cmd, "test1", "-y") + cmd, _ := controller.NewKillCommandForTest(nil, nil, s.store, nil, clock.WallClock, modelcmd.OpenFunc(testDialer)) + _, err := coretesting.RunCommand(c, cmd, "test1", "-y") c.Assert(err, gc.ErrorMatches, "cannot destroy controller: permission denied") checkControllerExistsInStore(c, "test1", s.store) } @@ -151,10 +401,10 @@ return nil, errors.New("kill command waited too long") } - cmd := controller.NewKillCommandForTest(nil, nil, s.store, nil, clock, modelcmd.OpenFunc(testDialer)) - ctx, err := testing.RunCommand(c, cmd, "test1", "-y") + cmd, _ := controller.NewKillCommandForTest(nil, nil, s.store, nil, clock, modelcmd.OpenFunc(testDialer)) + ctx, err := coretesting.RunCommand(c, cmd, "test1", "-y") c.Check(err, jc.ErrorIsNil) - c.Check(testing.Stderr(ctx), jc.Contains, "Unable to open API: open connection timed out") + c.Check(coretesting.Stderr(ctx), jc.Contains, "Unable to open API: open connection timed out") checkControllerRemovedFromStore(c, "test1", s.store) // Check that we were actually told to wait for 10s. c.Assert(clock.wait, gc.Equals, 10*time.Second) @@ -172,7 +422,7 @@ } func (s *KillSuite) TestControllerStatus(c *gc.C) { - s.api.allEnvs = []base.UserModel{ + s.api.allModels = []base.UserModel{ {Name: "admin", UUID: "123", Owner: names.NewUserTag("admin").String(), @@ -186,15 +436,15 @@ } s.api.envStatus = make(map[string]base.ModelStatus) - for _, env := range s.api.allEnvs { + for _, env := range s.api.allModels { owner, err := names.ParseUserTag(env.Owner) c.Assert(err, jc.ErrorIsNil) s.api.envStatus[env.UUID] = base.ModelStatus{ UUID: env.UUID, - Life: params.Dying, + Life: string(params.Dying), HostedMachineCount: 2, ServiceCount: 1, - Owner: owner.Canonical(), + Owner: owner.Id(), } } @@ -213,13 +463,13 @@ ServiceCount int }{ { - Owner: "bob@local", + Owner: "bob", Name: "env1", Life: params.Dying, HostedMachineCount: 2, ServiceCount: 1, }, { - Owner: "jo@local", + Owner: "jo", Name: "env2", Life: params.Dying, HostedMachineCount: 2, @@ -228,7 +478,7 @@ } { c.Assert(envsStatus[i].Owner, gc.Equals, expected.Owner) c.Assert(envsStatus[i].Name, gc.Equals, expected.Name) - c.Assert(envsStatus[i].Life, gc.Equals, expected.Life) + c.Assert(envsStatus[i].Life, gc.Equals, string(expected.Life)) c.Assert(envsStatus[i].HostedMachineCount, gc.Equals, expected.HostedMachineCount) c.Assert(envsStatus[i].ServiceCount, gc.Equals, expected.ServiceCount) } @@ -238,7 +488,7 @@ func (s *KillSuite) TestFmtControllerStatus(c *gc.C) { data := controller.CtrData{ "uuid", - params.Alive, + string(params.Alive), 3, 20, 8, @@ -250,13 +500,13 @@ func (s *KillSuite) TestFmtEnvironStatus(c *gc.C) { data := controller.ModelData{ "uuid", - "owner@local", + "owner", "envname", - params.Dying, + string(params.Dying), 8, 1, } out := controller.FmtModelStatus(data) - c.Assert(out, gc.Equals, "\towner@local/envname (dying), 8 machines, 1 application") + c.Assert(out, gc.Equals, "\towner/envname (dying), 8 machines, 1 application") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listblocks.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listblocks.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listblocks.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listblocks.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,77 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package controller - -import ( - "github.com/juju/cmd" - "github.com/juju/errors" - "launchpad.net/gnuflag" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/modelcmd" -) - -// NewListBlocksCommand returns a command to list the blocks in a controller. -func NewListBlocksCommand() cmd.Command { - return modelcmd.WrapController(&listBlocksCommand{}) -} - -// listBlocksCommand lists all blocks for environments within the controller. -type listBlocksCommand struct { - modelcmd.ControllerCommandBase - out cmd.Output - api listBlocksAPI - apierr error -} - -var listBlocksDoc = `List all blocks for models within the specified controller` - -// listBlocksAPI defines the methods on the controller API endpoint -// that the blocks command calls. -type listBlocksAPI interface { - Close() error - ListBlockedModels() ([]params.ModelBlockInfo, error) -} - -// Info implements Command.Info. -func (c *listBlocksCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "blocks", - Purpose: "List all blocks within the controller.", - Doc: listBlocksDoc, - Aliases: []string{"list-all-blocks", "list-blocks"}, - } -} - -// SetFlags implements Command.SetFlags. -func (c *listBlocksCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, - "tabular": formatTabularBlockedModels, - }) -} - -func (c *listBlocksCommand) getAPI() (listBlocksAPI, error) { - if c.api != nil { - return c.api, c.apierr - } - return c.NewControllerAPIClient() -} - -// Run implements Command.Run -func (c *listBlocksCommand) Run(ctx *cmd.Context) error { - api, err := c.getAPI() - if err != nil { - return errors.Annotate(err, "cannot connect to the API") - } - defer api.Close() - - envs, err := api.ListBlockedModels() - if err != nil { - logger.Errorf("Unable to list blocked models: %s", err) - return err - } - return c.out.Write(ctx, envs) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listblocks_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listblocks_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listblocks_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listblocks_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,124 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package controller_test - -import ( - "github.com/juju/cmd" - "github.com/juju/errors" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/juju/controller" - "github.com/juju/juju/jujuclient" - "github.com/juju/juju/jujuclient/jujuclienttesting" - _ "github.com/juju/juju/provider/dummy" - "github.com/juju/juju/testing" -) - -type ListBlocksSuite struct { - testing.FakeJujuXDGDataHomeSuite - api *fakeListBlocksAPI - apierror error - store *jujuclienttesting.MemStore -} - -var _ = gc.Suite(&ListBlocksSuite{}) - -// fakeListBlocksAPI mocks out the controller API -type fakeListBlocksAPI struct { - err error - blocks []params.ModelBlockInfo -} - -func (f *fakeListBlocksAPI) Close() error { return nil } - -func (f *fakeListBlocksAPI) ListBlockedModels() ([]params.ModelBlockInfo, error) { - return f.blocks, f.err -} - -func (s *ListBlocksSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.apierror = nil - s.api = &fakeListBlocksAPI{ - blocks: []params.ModelBlockInfo{ - params.ModelBlockInfo{ - Name: "test1", - UUID: "test1-uuid", - OwnerTag: "cheryl@local", - Blocks: []string{ - "BlockDestroy", - }, - }, - params.ModelBlockInfo{ - Name: "test2", - UUID: "test2-uuid", - OwnerTag: "bob@local", - Blocks: []string{ - "BlockDestroy", - "BlockChange", - }, - }, - }, - } - s.store = jujuclienttesting.NewMemStore() - s.store.Controllers["dummysys"] = jujuclient.ControllerDetails{} -} - -func (s *ListBlocksSuite) runListBlocksCommand(c *gc.C, args ...string) (*cmd.Context, error) { - cmd := controller.NewListBlocksCommandForTest(s.api, s.apierror, s.store) - args = append(args, []string{"-c", "dummysys"}...) - return testing.RunCommand(c, cmd, args...) -} - -func (s *ListBlocksSuite) TestListBlocksCannotConnectToAPI(c *gc.C) { - s.apierror = errors.New("connection refused") - _, err := s.runListBlocksCommand(c) - c.Assert(err, gc.ErrorMatches, "cannot connect to the API: connection refused") -} - -func (s *ListBlocksSuite) TestListBlocksError(c *gc.C) { - s.api.err = errors.New("unexpected api error") - s.runListBlocksCommand(c) - testLog := c.GetTestLog() - c.Check(testLog, jc.Contains, "Unable to list blocked models: unexpected api error") -} - -func (s *ListBlocksSuite) TestListBlocksTabular(c *gc.C) { - ctx, err := s.runListBlocksCommand(c) - c.Check(err, jc.ErrorIsNil) - c.Check(testing.Stdout(ctx), gc.Equals, ""+ - "NAME MODEL UUID OWNER BLOCKS\n"+ - "test1 test1-uuid cheryl@local destroy-model\n"+ - "test2 test2-uuid bob@local destroy-model,all-changes\n"+ - "\n") -} - -func (s *ListBlocksSuite) TestListBlocksJSON(c *gc.C) { - ctx, err := s.runListBlocksCommand(c, "--format", "json") - c.Check(err, jc.ErrorIsNil) - c.Check(testing.Stdout(ctx), gc.Equals, "["+ - `{"name":"test1","model-uuid":"test1-uuid","owner-tag":"cheryl@local",`+ - `"blocks":["BlockDestroy"]},`+ - `{"name":"test2","model-uuid":"test2-uuid","owner-tag":"bob@local",`+ - `"blocks":["BlockDestroy","BlockChange"]}`+ - "]\n") -} - -func (s *ListBlocksSuite) TestListBlocksYAML(c *gc.C) { - ctx, err := s.runListBlocksCommand(c, "--format", "yaml") - c.Check(err, jc.ErrorIsNil) - c.Check(testing.Stdout(ctx), gc.Equals, ""+ - "- name: test1\n"+ - " uuid: test1-uuid\n"+ - " ownertag: cheryl@local\n"+ - " blocks:\n"+ - " - BlockDestroy\n"+ - "- name: test2\n"+ - " uuid: test2-uuid\n"+ - " ownertag: bob@local\n"+ - " blocks:\n"+ - " - BlockDestroy\n"+ - " - BlockChange\n") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listcontrollersconverters.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listcontrollersconverters.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listcontrollersconverters.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listcontrollersconverters.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "github.com/juju/errors" "gopkg.in/juju/names.v2" + "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/jujuclient" ) @@ -19,16 +20,28 @@ CurrentController string `yaml:"current-controller" json:"current-controller"` } +// ControllerMachines holds the total number of controller +// machines and the number of active ones. +type ControllerMachines struct { + Active int `yaml:"active"` + Total int `yaml:"total"` +} + // ControllerItem defines the serialization behaviour of controller information. type ControllerItem struct { - ModelName string `yaml:"current-model,omitempty" json:"current-model,omitempty"` - User string `yaml:"user,omitempty" json:"user,omitempty"` - Server string `yaml:"recent-server,omitempty" json:"recent-server,omitempty"` - ControllerUUID string `yaml:"uuid" json:"uuid"` - APIEndpoints []string `yaml:"api-endpoints,flow" json:"api-endpoints"` - CACert string `yaml:"ca-cert" json:"ca-cert"` - Cloud string `yaml:"cloud" json:"cloud"` - CloudRegion string `yaml:"region,omitempty" json:"region,omitempty"` + ModelName string `yaml:"current-model,omitempty" json:"current-model,omitempty"` + User string `yaml:"user,omitempty" json:"user,omitempty"` + Access string `yaml:"access,omitempty" json:"access,omitempty"` + Server string `yaml:"recent-server,omitempty" json:"recent-server,omitempty"` + ControllerUUID string `yaml:"uuid" json:"uuid"` + APIEndpoints []string `yaml:"api-endpoints,flow" json:"api-endpoints"` + CACert string `yaml:"ca-cert" json:"ca-cert"` + Cloud string `yaml:"cloud" json:"cloud"` + CloudRegion string `yaml:"region,omitempty" json:"region,omitempty"` + AgentVersion string `yaml:"agent-version,omitempty" json:"agent-version,omitempty"` + ModelCount *int `yaml:"model-count,omitempty" json:"model-count,omitempty"` + MachineCount *int `yaml:"machine-count,omitempty" json:"machine-count,omitempty"` + ControllerMachines *ControllerMachines `yaml:"controller-machines,omitempty" json:"controller-machins,omitempty"` } // convertControllerDetails takes a map of Controllers and @@ -54,7 +67,7 @@ serverName = details.APIEndpoints[0] } - var userName string + var userName, access string accountDetails, err := c.store.AccountDetails(controllerName) if err != nil { if !errors.IsNotFound(err) { @@ -63,6 +76,7 @@ } } else { userName = accountDetails.User + access = accountDetails.LastKnownAccess } var modelName string @@ -79,21 +93,36 @@ // model name relative to that user. if unqualifiedModelName, owner, err := jujuclient.SplitModelName(modelName); err == nil { user := names.NewUserTag(userName) - modelName = ownerQualifiedModelName(unqualifiedModelName, owner, user) + modelName = common.OwnerQualifiedModelName(unqualifiedModelName, owner, user) } } } - controllers[controllerName] = ControllerItem{ + item := ControllerItem{ ModelName: modelName, User: userName, + Access: access, Server: serverName, APIEndpoints: details.APIEndpoints, ControllerUUID: details.ControllerUUID, CACert: details.CACert, Cloud: details.Cloud, CloudRegion: details.CloudRegion, + AgentVersion: details.AgentVersion, + } + if details.MachineCount != nil && *details.MachineCount > 0 { + item.MachineCount = details.MachineCount + } + if details.ModelCount != nil && *details.ModelCount > 0 { + item.ModelCount = details.ModelCount + } + if details.ControllerMachineCount > 0 { + item.ControllerMachines = &ControllerMachines{ + Total: details.ControllerMachineCount, + Active: details.ActiveControllerMachineCount, + } } + controllers[controllerName] = item } return controllers, errs } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listcontrollersformatters.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listcontrollersformatters.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listcontrollersformatters.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listcontrollersformatters.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,47 +4,47 @@ package controller import ( - "bytes" "fmt" + "io" "sort" - "strings" - "text/tabwriter" "github.com/juju/errors" + "github.com/juju/version" + + "github.com/juju/juju/cmd/output" + jujuversion "github.com/juju/juju/version" ) -const noValueDisplay = "-" +const ( + noValueDisplay = "-" + notKnownDisplay = "(unknown)" +) -func formatControllersListTabular(value interface{}) ([]byte, error) { +func (c *listControllersCommand) formatControllersListTabular(writer io.Writer, value interface{}) error { controllers, ok := value.(ControllerSet) if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", controllers, value) + return errors.Errorf("expected value of type %T, got %T", controllers, value) } - return formatControllersTabular(controllers) + return formatControllersTabular(writer, controllers, !c.refresh) } // formatControllersTabular returns a tabular summary of controller/model items // sorted by controller name alphabetically. -func formatControllersTabular(set ControllerSet) ([]byte, error) { - var out bytes.Buffer - - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) - print := func(values ...string) { - fmt.Fprintln(tw, strings.Join(values, "\t")) +func formatControllersTabular(writer io.Writer, set ControllerSet, promptRefresh bool) error { + tw := output.TabWriter(writer) + w := output.Wrapper{tw} + + if promptRefresh && len(set.Controllers) > 0 { + fmt.Fprintln(writer, "Use --refresh to see the latest information.") + fmt.Fprintln(writer) } - - print("CONTROLLER", "MODEL", "USER", "CLOUD/REGION") + w.Println("Controller", "Model", "User", "Access", "Cloud/Region", "Models", "Machines", "HA", "Version") + tw.SetColumnAlignRight(5) + tw.SetColumnAlignRight(6) + tw.SetColumnAlignRight(7) names := []string{} - for name, _ := range set.Controllers { + for name := range set.Controllers { names = append(names, name) } sort.Strings(names) @@ -56,19 +56,70 @@ modelName = c.ModelName } userName := noValueDisplay + access := noValueDisplay if c.User != "" { userName = c.User + access = notKnownDisplay + if c.Access != "" { + access = c.Access + } } if name == set.CurrentController { name += "*" + w.PrintColor(output.CurrentHighlight, name) + } else { + w.Print(name) } cloudRegion := c.Cloud if c.CloudRegion != "" { cloudRegion += "/" + c.CloudRegion } - print(name, modelName, userName, cloudRegion) + agentVersion := c.AgentVersion + staleVersion := false + if agentVersion == "" { + agentVersion = notKnownDisplay + } else { + agentVersionNum, err := version.Parse(agentVersion) + staleVersion = err == nil && jujuversion.Current.Compare(agentVersionNum) > 0 + } + machineCount := noValueDisplay + if c.MachineCount != nil && *c.MachineCount > 0 { + machineCount = fmt.Sprintf("%d", *c.MachineCount) + } + modelCount := noValueDisplay + if c.ModelCount != nil && *c.ModelCount > 0 { + modelCount = fmt.Sprintf("%d", *c.ModelCount) + } + w.Print(modelName, userName, access, cloudRegion, modelCount, machineCount) + controllerMachineInfo, warn := controllerMachineStatus(c.ControllerMachines) + if warn { + w.PrintColor(output.WarningHighlight, controllerMachineInfo) + } else { + w.Print(controllerMachineInfo) + } + if staleVersion { + w.PrintColor(output.WarningHighlight, agentVersion) + } else { + w.Print(agentVersion) + } + w.Println() } tw.Flush() + return nil +} - return out.Bytes(), nil +func controllerMachineStatus(machines *ControllerMachines) (string, bool) { + if machines == nil || machines.Total == 0 { + return "-", false + } + if machines.Total == 1 { + return "none", false + } + controllerMachineStatus := "" + warn := machines.Active < machines.Total + controllerMachineStatus = fmt.Sprintf("%d", machines.Total) + if machines.Active < machines.Total { + controllerMachineStatus = fmt.Sprintf("%d/%d", machines.Active, machines.Total) + } + return controllerMachineStatus, warn } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listcontrollers.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listcontrollers.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listcontrollers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listcontrollers.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,13 +6,19 @@ import ( "fmt" "strings" + "sync" "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" + "gopkg.in/juju/names.v2" + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/controller" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/environs/bootstrap" "github.com/juju/juju/jujuclient" + "github.com/juju/juju/status" ) var helpControllersSummary = ` @@ -51,19 +57,60 @@ // SetFlags implements Command.SetFlags. func (c *listControllersCommand) SetFlags(f *gnuflag.FlagSet) { c.JujuCommandBase.SetFlags(f) + f.BoolVar(&c.refresh, "refresh", false, "Connect to each controller to download the latest details") c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ "yaml": cmd.FormatYaml, "json": cmd.FormatJson, - "tabular": formatControllersListTabular, + "tabular": c.formatControllersListTabular, }) } +func (c *listControllersCommand) getAPI(controllerName string) (ControllerAccessAPI, error) { + if c.api != nil { + return c.api(controllerName), nil + } + api, err := c.NewAPIRoot(c.store, controllerName, "") + if err != nil { + return nil, errors.Annotate(err, "opening API connection") + } + return controller.NewClient(api), nil +} + // Run implements Command.Run func (c *listControllersCommand) Run(ctx *cmd.Context) error { controllers, err := c.store.AllControllers() if err != nil { return errors.Annotate(err, "failed to list controllers") } + if len(controllers) == 0 && c.out.Name() == "tabular" { + ctx.Infof("%s", modelcmd.ErrNoControllersDefined) + return nil + } + if c.refresh && len(controllers) > 0 { + var wg sync.WaitGroup + wg.Add(len(controllers)) + for controllerName := range controllers { + name := controllerName + go func() { + defer wg.Done() + client, err := c.getAPI(name) + if err != nil { + fmt.Fprintf(ctx.GetStderr(), "error connecting to api for %q: %v\n", name, err) + return + } + defer client.Close() + if err := c.refreshControllerDetails(client, name); err != nil { + fmt.Fprintf(ctx.GetStderr(), "error updating cached details for %q: %v\n", name, err) + } + }() + } + wg.Wait() + // Reload controller details + controllers, err = c.store.AllControllers() + if err != nil { + return errors.Annotate(err, "failed to list controllers") + } + } details, errs := c.convertControllerDetails(controllers) if len(errs) > 0 { fmt.Fprintln(ctx.Stderr, strings.Join(errs, "\n")) @@ -81,9 +128,69 @@ return c.out.Write(ctx, controllerSet) } +func (c *listControllersCommand) refreshControllerDetails(client ControllerAccessAPI, controllerName string) error { + // First, get all the models the user can see, and their details. + var modelStatus []base.ModelStatus + allModels, err := client.AllModels() + if err != nil { + return err + } + var controllerModelUUID string + modelTags := make([]names.ModelTag, len(allModels)) + for i, m := range allModels { + modelTags[i] = names.NewModelTag(m.UUID) + if m.Name == bootstrap.ControllerModelName { + controllerModelUUID = m.UUID + } + } + modelStatus, err = client.ModelStatus(modelTags...) + if err != nil { + return err + } + + c.mu.Lock() + defer c.mu.Unlock() + // Use the model information to update the cached controller details. + details, err := c.store.ControllerByName(controllerName) + if err != nil { + return err + } + + modelCount := len(allModels) + details.ModelCount = &modelCount + machineCount := 0 + for _, s := range modelStatus { + machineCount += s.TotalMachineCount + } + details.MachineCount = &machineCount + details.ActiveControllerMachineCount, details.ControllerMachineCount = controllerMachineCounts(controllerModelUUID, modelStatus) + return c.store.UpdateController(controllerName, *details) +} + +func controllerMachineCounts(controllerModelUUID string, modelStatus []base.ModelStatus) (activeCount, totalCount int) { + for _, s := range modelStatus { + if s.UUID != controllerModelUUID { + continue + } + for _, m := range s.Machines { + if !m.WantsVote { + continue + } + totalCount++ + if m.Status != string(status.Down) && m.HasVote { + activeCount++ + } + } + } + return activeCount, totalCount +} + type listControllersCommand struct { modelcmd.JujuCommandBase - out cmd.Output - store jujuclient.ClientStore + out cmd.Output + store jujuclient.ClientStore + api func(controllerName string) ControllerAccessAPI + refresh bool + mu sync.Mutex } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listcontrollers_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listcontrollers_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listcontrollers_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listcontrollers_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,33 +12,36 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "github.com/juju/juju/api/base" "github.com/juju/juju/cmd/juju/controller" + "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/jujuclient/jujuclienttesting" "github.com/juju/juju/testing" ) type ListControllersSuite struct { baseControllerSuite + api func(string) controller.ControllerAccessAPI } var _ = gc.Suite(&ListControllersSuite{}) func (s *ListControllersSuite) TestListControllersEmptyStore(c *gc.C) { - s.expectedOutput = ` -CONTROLLER MODEL USER CLOUD/REGION - -`[1:] - s.store = jujuclienttesting.NewMemStore() - s.assertListControllers(c) + context, err := s.runListControllers(c) + c.Assert(err, jc.ErrorIsNil) + c.Check(testing.Stdout(context), gc.Equals, "") + c.Check(testing.Stderr(context), gc.Equals, modelcmd.ErrNoControllersDefined.Error()) } func (s *ListControllersSuite) TestListControllers(c *gc.C) { s.expectedOutput = ` -CONTROLLER MODEL USER CLOUD/REGION -aws-test admin - aws/us-east-1 -mallards* my-model admin@local mallards/mallards1 -mark-test-prodstack - admin@local prodstack +Use --refresh to see the latest information. + +Controller Model User Access Cloud/Region Models Machines HA Version +aws-test controller - - aws/us-east-1 2 5 - 2.0.1 +mallards* my-model admin superuser mallards/mallards1 - - - (unknown) +mark-test-prodstack - admin (unknown) prodstack - - - (unknown) `[1:] @@ -47,29 +50,115 @@ s.assertListControllers(c) } +func (s *ListControllersSuite) TestListControllersRefresh(c *gc.C) { + s.createTestClientStore(c) + s.api = func(controllerNamee string) controller.ControllerAccessAPI { + fakeController := &fakeController{ + controllerName: controllerNamee, + modelNames: map[string]string{ + "abc": "controller", + "def": "my-model", + "ghi": "controller", + }, + store: s.store, + } + return fakeController + } + s.expectedOutput = ` +Controller Model User Access Cloud/Region Models Machines HA Version +aws-test controller admin (unknown) aws/us-east-1 1 2 - 2.0.1 +mallards* my-model admin superuser mallards/mallards1 2 4 - (unknown) +mark-test-prodstack - admin (unknown) prodstack - - - (unknown) + +`[1:] + s.assertListControllers(c, "--refresh") +} + +func (s *ListControllersSuite) setupAPIForControllerMachines() { + s.api = func(controllerName string) controller.ControllerAccessAPI { + fakeController := &fakeController{ + controllerName: controllerName, + modelNames: map[string]string{ + "abc": "controller", + "def": "my-model", + "ghi": "controller", + }, + store: s.store, + } + switch controllerName { + case "aws-test": + fakeController.machines = map[string][]base.Machine{ + "ghi": { + {Id: "1", HasVote: true, WantsVote: true, Status: "active"}, + {Id: "2", HasVote: true, WantsVote: true, Status: "down"}, + {Id: "3", HasVote: false, WantsVote: true, Status: "active"}, + }, + "abc": { + {Id: "1", HasVote: true, WantsVote: true, Status: "active"}, + }, + "def": { + {Id: "1", HasVote: true, WantsVote: true, Status: "active"}, + }, + } + case "mallards": + fakeController.machines = map[string][]base.Machine{ + "abc": { + {Id: "1", HasVote: true, WantsVote: true, Status: "active"}, + }, + } + } + return fakeController + } +} + +func (s *ListControllersSuite) TestListControllersKnownHAStatus(c *gc.C) { + s.createTestClientStore(c) + s.setupAPIForControllerMachines() + s.expectedOutput = ` +Controller Model User Access Cloud/Region Models Machines HA Version +aws-test controller admin (unknown) aws/us-east-1 1 2 1/3 2.0.1 +mallards* my-model admin superuser mallards/mallards1 2 4 none (unknown) +mark-test-prodstack - admin (unknown) prodstack - - - (unknown) + +`[1:] + s.assertListControllers(c, "--refresh") +} + func (s *ListControllersSuite) TestListControllersYaml(c *gc.C) { s.expectedOutput = ` controllers: aws-test: - current-model: admin - user: admin@local + current-model: controller + user: admin recent-server: this-is-aws-test-of-many-api-endpoints uuid: this-is-the-aws-test-uuid api-endpoints: [this-is-aws-test-of-many-api-endpoints] ca-cert: this-is-aws-test-ca-cert cloud: aws region: us-east-1 + agent-version: 2.0.1 + model-count: 1 + machine-count: 2 + controller-machines: + active: 1 + total: 3 mallards: current-model: my-model - user: admin@local + user: admin + access: superuser recent-server: this-is-another-of-many-api-endpoints uuid: this-is-another-uuid api-endpoints: [this-is-another-of-many-api-endpoints, this-is-one-more-of-many-api-endpoints] ca-cert: this-is-another-ca-cert cloud: mallards region: mallards1 + model-count: 2 + machine-count: 4 + controller-machines: + active: 1 + total: 1 mark-test-prodstack: - user: admin@local + user: admin recent-server: this-is-one-of-many-api-endpoints uuid: this-is-a-uuid api-endpoints: [this-is-one-of-many-api-endpoints] @@ -79,7 +168,12 @@ `[1:] s.createTestClientStore(c) - s.assertListControllers(c, "--format", "yaml") + s.setupAPIForControllerMachines() + s.assertListControllers(c, "--format", "yaml", "--refresh") +} + +func intPtr(i int) *int { + return &i } func (s *ListControllersSuite) TestListControllersJson(c *gc.C) { @@ -93,18 +187,22 @@ Controllers: map[string]controller.ControllerItem{ "aws-test": { ControllerUUID: "this-is-the-aws-test-uuid", - ModelName: "admin", - User: "admin@local", + ModelName: "controller", + User: "admin", Server: "this-is-aws-test-of-many-api-endpoints", APIEndpoints: []string{"this-is-aws-test-of-many-api-endpoints"}, CACert: "this-is-aws-test-ca-cert", Cloud: "aws", CloudRegion: "us-east-1", + AgentVersion: "2.0.1", + ModelCount: intPtr(2), + MachineCount: intPtr(5), }, "mallards": { ControllerUUID: "this-is-another-uuid", ModelName: "my-model", - User: "admin@local", + User: "admin", + Access: "superuser", Server: "this-is-another-of-many-api-endpoints", APIEndpoints: []string{"this-is-another-of-many-api-endpoints", "this-is-one-more-of-many-api-endpoints"}, CACert: "this-is-another-ca-cert", @@ -113,7 +211,7 @@ }, "mark-test-prodstack": { ControllerUUID: "this-is-a-uuid", - User: "admin@local", + User: "admin", Server: "this-is-one-of-many-api-endpoints", APIEndpoints: []string{"this-is-one-of-many-api-endpoints"}, CACert: "this-is-a-ca-cert", @@ -153,7 +251,7 @@ } func (s *ListControllersSuite) runListControllers(c *gc.C, args ...string) (*cmd.Context, error) { - return testing.RunCommand(c, controller.NewListControllersCommandForTest(s.store), args...) + return testing.RunCommand(c, controller.NewListControllersCommandForTest(s.store, s.api), args...) } func (s *ListControllersSuite) assertListControllersFailed(c *gc.C, args ...string) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listmodels.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listmodels.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listmodels.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listmodels.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,20 +4,20 @@ package controller import ( - "bytes" "fmt" - "text/tabwriter" + "io" "time" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" "github.com/juju/juju/jujuclient" ) @@ -51,9 +51,10 @@ juju models juju models --user bob -See also: add-model - share-model - unshare-model +See also: + add-model + share-model + unshare-model ` // ModelManagerAPI defines the methods on the model manager API that @@ -97,6 +98,7 @@ // SetFlags implements Command.SetFlags. func (c *modelsCommand) SetFlags(f *gnuflag.FlagSet) { + c.ControllerCommandBase.SetFlags(f) f.StringVar(&c.user, "user", "", "The user to list models for (administrative users only)") f.BoolVar(&c.all, "all", false, "Lists all models, regardless of user accessibility (administrative users only)") f.BoolVar(&c.listUUID, "uuid", false, "Display UUID for models") @@ -114,9 +116,9 @@ Models []common.ModelInfo `yaml:"models" json:"models"` // CurrentModel is the name of the current model, qualified for the - // user for which we're listing models. i.e. for the user admin@local, - // and the model admin@local/foo, this field will contain "foo"; for - // bob@local and the same model, the field will contain "admin/foo". + // user for which we're listing models. i.e. for the user admin, + // and the model admin/foo, this field will contain "foo"; for + // bob and the same model, the field will contain "admin/foo". CurrentModel string `yaml:"current-model,omitempty" json:"current-model,omitempty"` // CurrentModelQualified is the fully qualified name for the current @@ -160,6 +162,7 @@ if err != nil { return errors.Trace(err) } + model.ControllerName = c.ControllerName() modelInfo = append(modelInfo, model) } @@ -174,7 +177,7 @@ userForListing := names.NewUserTag(c.user) unqualifiedModelName, owner, err := jujuclient.SplitModelName(current) if err == nil { - modelSet.CurrentModel = ownerQualifiedModelName( + modelSet.CurrentModel = common.OwnerQualifiedModelName( unqualifiedModelName, owner, userForListing, ) } @@ -187,7 +190,7 @@ // When the output is tabular, we inform the user when there // are no models available, and tell them how to go about // creating or granting access to them. - fmt.Fprintf(ctx.Stderr, "\n%s\n\n", errNoModels.Error()) + fmt.Fprintln(ctx.Stderr, noModelsMessage) } return nil } @@ -246,10 +249,10 @@ } // formatTabular takes an interface{} to adhere to the cmd.Formatter interface -func (c *modelsCommand) formatTabular(value interface{}) ([]byte, error) { +func (c *modelsCommand) formatTabular(writer io.Writer, value interface{}) error { modelSet, ok := value.(ModelSet) if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", modelSet, value) + return errors.Errorf("expected value of type %T, got %T", modelSet, value) } // We need the tag of the user for which we're listing models, @@ -263,53 +266,68 @@ userForLastConn = userForListing } - var out bytes.Buffer - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) - fmt.Fprintf(tw, "MODEL") + tw := output.TabWriter(writer) + w := output.Wrapper{tw} + w.Println("Controller: " + c.ControllerName()) + w.Println() + w.Print("Model") if c.listUUID { - fmt.Fprintf(tw, "\tMODEL UUID") + w.Print("UUID") + } + // Only owners, or users with write access or above get to see machines and cores. + haveMachineInfo := false + for _, m := range modelSet.Models { + if haveMachineInfo = len(m.Machines) > 0; haveMachineInfo { + break + } + } + if haveMachineInfo { + w.Println("Owner", "Status", "Machines", "Cores", "Access", "Last connection") + offset := 0 + if c.listUUID { + offset++ + } + tw.SetColumnAlignRight(3 + offset) + tw.SetColumnAlignRight(4 + offset) + } else { + w.Println("Owner", "Status", "Access", "Last connection") } - fmt.Fprintf(tw, "\tOWNER\tSTATUS\tLAST CONNECTION\n") for _, model := range modelSet.Models { owner := names.NewUserTag(model.Owner) - name := ownerQualifiedModelName(model.Name, owner, userForListing) + name := common.OwnerQualifiedModelName(model.Name, owner, userForListing) if jujuclient.JoinOwnerModelName(owner, model.Name) == modelSet.CurrentModelQualified { name += "*" + w.PrintColor(output.CurrentHighlight, name) + } else { + w.Print(name) } - fmt.Fprintf(tw, "%s", name) if c.listUUID { - fmt.Fprintf(tw, "\t%s", model.UUID) + w.Print(model.UUID) } - lastConnection := model.Users[userForLastConn.Canonical()].LastConnection + lastConnection := model.Users[userForLastConn.Id()].LastConnection if lastConnection == "" { lastConnection = "never connected" } - fmt.Fprintf(tw, "\t%s\t%s\t%s\n", model.Owner, model.Status.Current, lastConnection) + userForAccess := loggedInUser + if c.user != "" { + userForAccess = names.NewUserTag(c.user) + } + access := model.Users[userForAccess.Id()].Access + w.Print(model.Owner, model.Status.Current) + if haveMachineInfo { + machineInfo := fmt.Sprintf("%d", len(model.Machines)) + cores := uint64(0) + for _, m := range model.Machines { + cores += m.Cores + } + coresInfo := "-" + if cores > 0 { + coresInfo = fmt.Sprintf("%d", cores) + } + w.Print(machineInfo, coresInfo) + } + w.Println(access, lastConnection) } tw.Flush() - return out.Bytes(), nil -} - -// ownerQualifiedModelName returns the model name qualified with the -// model owner if the owner is not the same as the given canonical -// user name. If the owner is a local user, we omit the domain. -func ownerQualifiedModelName(modelName string, owner, user names.UserTag) string { - if owner.Canonical() == user.Canonical() { - return modelName - } - var ownerName string - if owner.IsLocal() { - ownerName = owner.Name() - } else { - ownerName = owner.Canonical() - } - return fmt.Sprintf("%s/%s", ownerName, modelName) + return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listmodels_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listmodels_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/listmodels_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/listmodels_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -30,10 +30,11 @@ var _ = gc.Suite(&ModelsSuite{}) type fakeModelMgrAPIClient struct { - err error - user string - models []base.UserModel - all bool + err error + user string + models []base.UserModel + all bool + inclMachines bool } func (f *fakeModelMgrAPIClient) Close() error { @@ -68,28 +69,37 @@ Name: model.Name, UUID: model.UUID, OwnerTag: names.NewUserTag(model.Owner).String(), + CloudTag: "cloud-dummy", } switch model.Name { case "test-model1": last1 := time.Date(2015, 3, 20, 0, 0, 0, 0, time.UTC) - result.Status.Status = status.StatusActive + result.Status.Status = status.Active if f.user != "" { result.Users = []params.ModelUserInfo{{ UserName: f.user, LastConnection: &last1, + Access: params.ModelReadAccess, }} } + if f.inclMachines { + one := uint64(1) + result.Machines = []params.ModelMachineInfo{ + {Id: "0", Hardware: ¶ms.MachineHardware{Cores: &one}}, {Id: "1"}, + } + } case "test-model2": last2 := time.Date(2015, 3, 1, 0, 0, 0, 0, time.UTC) - result.Status.Status = status.StatusActive + result.Status.Status = status.Active if f.user != "" { result.Users = []params.ModelUserInfo{{ UserName: f.user, LastConnection: &last2, + Access: params.ModelWriteAccess, }} } case "test-model3": - result.Status.Status = status.StatusDestroying + result.Status.Status = status.Destroying } results[i].Result = result } @@ -103,11 +113,11 @@ models := []base.UserModel{ { Name: "test-model1", - Owner: "admin@local", + Owner: "admin", UUID: "test-model1-UUID", }, { Name: "test-model2", - Owner: "carlotta@local", + Owner: "carlotta", UUID: "test-model2-UUID", }, { Name: "test-model3", @@ -117,16 +127,16 @@ } s.api = &fakeModelMgrAPIClient{ models: models, - user: "admin@local", + user: "admin", } s.store = jujuclienttesting.NewMemStore() s.store.CurrentControllerName = "fake" s.store.Controllers["fake"] = jujuclient.ControllerDetails{} s.store.Models["fake"] = &jujuclient.ControllerModels{ - CurrentModel: "admin@local/test-model1", + CurrentModel: "admin/test-model1", } s.store.Accounts["fake"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", Password: "password", } } @@ -138,12 +148,14 @@ func (s *ModelsSuite) TestModelsOwner(c *gc.C) { context, err := testing.RunCommand(c, s.newCommand()) c.Assert(err, jc.ErrorIsNil) - c.Assert(s.api.user, gc.Equals, "admin@local") + c.Assert(s.api.user, gc.Equals, "admin") c.Assert(testing.Stdout(context), gc.Equals, ""+ - "MODEL OWNER STATUS LAST CONNECTION\n"+ - "test-model1* admin@local active 2015-03-20\n"+ - "carlotta/test-model2 carlotta@local active 2015-03-01\n"+ - "daiwik@external/test-model3 daiwik@external destroying never connected\n"+ + "Controller: fake\n"+ + "\n"+ + "Model Owner Status Access Last connection\n"+ + "test-model1* admin active read 2015-03-20\n"+ + "carlotta/test-model2 carlotta active write 2015-03-01\n"+ + "daiwik@external/test-model3 daiwik@external destroying never connected\n"+ "\n") } @@ -152,10 +164,12 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(s.api.user, gc.Equals, "bob") c.Assert(testing.Stdout(context), gc.Equals, ""+ - "MODEL OWNER STATUS LAST CONNECTION\n"+ - "admin/test-model1* admin@local active 2015-03-20\n"+ - "carlotta/test-model2 carlotta@local active 2015-03-01\n"+ - "daiwik@external/test-model3 daiwik@external destroying never connected\n"+ + "Controller: fake\n"+ + "\n"+ + "Model Owner Status Access Last connection\n"+ + "admin/test-model1* admin active read 2015-03-20\n"+ + "carlotta/test-model2 carlotta active write 2015-03-01\n"+ + "daiwik@external/test-model3 daiwik@external destroying never connected\n"+ "\n") } @@ -164,10 +178,12 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(s.api.all, jc.IsTrue) c.Assert(testing.Stdout(context), gc.Equals, ""+ - "MODEL OWNER STATUS LAST CONNECTION\n"+ - "admin/test-model1* admin@local active 2015-03-20\n"+ - "carlotta/test-model2 carlotta@local active 2015-03-01\n"+ - "daiwik@external/test-model3 daiwik@external destroying never connected\n"+ + "Controller: fake\n"+ + "\n"+ + "Model Owner Status Access Last connection\n"+ + "admin/test-model1* admin active read 2015-03-20\n"+ + "carlotta/test-model2 carlotta active write 2015-03-01\n"+ + "daiwik@external/test-model3 daiwik@external destroying never connected\n"+ "\n") } @@ -176,22 +192,42 @@ context, err := testing.RunCommand(c, s.newCommand()) c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ""+ - "MODEL OWNER STATUS LAST CONNECTION\n"+ - "test-model1 admin@local active 2015-03-20\n"+ - "carlotta/test-model2 carlotta@local active 2015-03-01\n"+ - "daiwik@external/test-model3 daiwik@external destroying never connected\n"+ + "Controller: fake\n"+ + "\n"+ + "Model Owner Status Access Last connection\n"+ + "test-model1 admin active read 2015-03-20\n"+ + "carlotta/test-model2 carlotta active write 2015-03-01\n"+ + "daiwik@external/test-model3 daiwik@external destroying never connected\n"+ "\n") } func (s *ModelsSuite) TestModelsUUID(c *gc.C) { + s.api.inclMachines = true context, err := testing.RunCommand(c, s.newCommand(), "--uuid") c.Assert(err, jc.ErrorIsNil) - c.Assert(s.api.user, gc.Equals, "admin@local") + c.Assert(s.api.user, gc.Equals, "admin") + c.Assert(testing.Stdout(context), gc.Equals, ""+ + "Controller: fake\n"+ + "\n"+ + "Model UUID Owner Status Machines Cores Access Last connection\n"+ + "test-model1* test-model1-UUID admin active 2 1 read 2015-03-20\n"+ + "carlotta/test-model2 test-model2-UUID carlotta active 0 - write 2015-03-01\n"+ + "daiwik@external/test-model3 test-model3-UUID daiwik@external destroying 0 - never connected\n"+ + "\n") +} + +func (s *ModelsSuite) TestModelsMachineInfo(c *gc.C) { + s.api.inclMachines = true + context, err := testing.RunCommand(c, s.newCommand()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.api.user, gc.Equals, "admin") c.Assert(testing.Stdout(context), gc.Equals, ""+ - "MODEL MODEL UUID OWNER STATUS LAST CONNECTION\n"+ - "test-model1* test-model1-UUID admin@local active 2015-03-20\n"+ - "carlotta/test-model2 test-model2-UUID carlotta@local active 2015-03-01\n"+ - "daiwik@external/test-model3 test-model3-UUID daiwik@external destroying never connected\n"+ + "Controller: fake\n"+ + "\n"+ + "Model Owner Status Machines Cores Access Last connection\n"+ + "test-model1* admin active 2 1 read 2015-03-20\n"+ + "carlotta/test-model2 carlotta active 0 - write 2015-03-01\n"+ + "daiwik@external/test-model3 daiwik@external destroying 0 - never connected\n"+ "\n") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/mock_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/mock_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/mock_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/mock_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,22 +4,26 @@ package controller_test import ( - "errors" - "github.com/juju/juju/api" - "github.com/juju/juju/network" "gopkg.in/juju/names.v2" ) +// mockAPIConnection implements just enough of the api.Connection interface +// to satisfy the methods used by the register command. type mockAPIConnection struct { api.Connection - info *api.Info - opts api.DialOpts - addr string - apiHostPorts [][]network.HostPort - controllerTag names.ModelTag - username string - password string + + // addr is returned by Addr. + addr string + + // controllerTag is returned by ControllerTag. + controllerTag names.ControllerTag + + // authTag is returned by AuthTag. + authTag names.Tag + + // controllerAccess is returned by ControllerAccess. + controllerAccess string } func (*mockAPIConnection) Close() error { @@ -30,19 +34,14 @@ return m.addr } -func (m *mockAPIConnection) APIHostPorts() [][]network.HostPort { - return m.apiHostPorts +func (m *mockAPIConnection) ControllerTag() names.ControllerTag { + return m.controllerTag } -func (m *mockAPIConnection) ControllerTag() (names.ModelTag, error) { - if m.controllerTag.Id() == "" { - return m.controllerTag, errors.New("no server tag") - } - return m.controllerTag, nil +func (m *mockAPIConnection) AuthTag() names.Tag { + return m.authTag } -func (m *mockAPIConnection) SetPassword(username, password string) error { - m.username = username - m.password = password - return nil +func (m *mockAPIConnection) ControllerAccess() string { + return m.controllerAccess } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -60,6 +60,9 @@ ca-cert: this-is-aws-test-ca-cert cloud: aws region: us-east-1 + model-count: 2 + machine-count: 5 + agent-version: 2.0.1 mallards: uuid: this-is-another-uuid api-endpoints: [this-is-another-of-many-api-endpoints, this-is-one-more-of-many-api-endpoints] @@ -78,12 +81,12 @@ controllers: aws-test: models: - admin: + controller: uuid: ghi - current-model: admin + current-model: controller mallards: models: - admin: + controller: uuid: abc my-model: uuid: def @@ -93,12 +96,13 @@ const testAccountsYaml = ` controllers: aws-test: - user: admin@local + user: admin password: hun+er2 mark-test-prodstack: - user: admin@local + user: admin password: hunter2 mallards: - user: admin@local + user: admin password: hunter2 + last-known-access: superuser ` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/register.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/register.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/register.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/register.go 2016-10-13 14:31:49.000000000 +0000 @@ -28,22 +28,25 @@ "github.com/juju/juju/api/base" "github.com/juju/juju/api/modelmanager" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/jujuclient" + "github.com/juju/juju/permission" ) -var errNoModels = errors.New(` +var noModelsMessage = ` There are no models available. You can add models with "juju add-model", or you can ask an administrator or owner -of a model to grant access to that model with "juju grant".`[1:]) +of a model to grant access to that model with "juju grant". +` // NewRegisterCommand returns a command to allow the user to register a controller. func NewRegisterCommand() cmd.Command { - cmd := ®isterCommand{} - cmd.apiOpen = cmd.APIOpen - cmd.listModelsFunc = cmd.listModels - cmd.store = jujuclient.NewFileClientStore() - return modelcmd.WrapBase(cmd) + c := ®isterCommand{} + c.apiOpen = c.APIOpen + c.listModelsFunc = c.listModels + c.store = jujuclient.NewFileClientStore() + return modelcmd.WrapBase(c) } // registerCommand logs in to a Juju controller and caches the connection @@ -53,30 +56,45 @@ apiOpen api.OpenFunc listModelsFunc func(_ jujuclient.ClientStore, controller, user string) ([]base.UserModel, error) store jujuclient.ClientStore - EncodedData string + Arg string + + // onRunError is executed if non-nil if there is an error at the end + // of the Run method. + onRunError func() } var usageRegisterSummary = ` -Registers a Juju user to a controller.`[1:] +Registers a controller.`[1:] var usageRegisterDetails = ` -Connects to a controller and completes the user registration process that -began with the `[1:] + "`juju add-user`" + ` command. The latter prints out the 'string' -that is referred to in Usage. -The user will be prompted for a password, which, once set, causes the -registration string to be voided. In order to start using Juju the user +The register command adds details of a controller to the local system. +This is done either by completing the user registration process that +began with the 'juju add-user' command, or by providing the DNS host +name of a public controller. + +To complete the user registration process, you should have been provided +with a base64-encoded blob of data (the output of 'juju add-user') +which can be copied and pasted as the argument to 'register'. +You will be prompted for a password, which, once set, causes the +registration string to be voided. In order to start using Juju the user can now either add a model or wait for a model to be shared with them. -Some machine providers will require the user to be in possession of +Some machine providers will require the user to be in possession of certain credentials in order to add a model. +When adding a controller at a public address, authentication via some +external third party (for example Ubuntu SSO) will be required, usually +by using a web browser. + Examples: - juju register MFATA3JvZDAnExMxMDQuMTU0LjQyLjQ0OjE3MDcwExAxMC4xMjguMC4yOjE3MDcw - BCBEFCaXerhNImkKKabuX5ULWf2Bp4AzPNJEbXVWgraLrAA= + juju register MFATA3JvZDAnExMxMDQuMTU0LjQyLjQ0OjE3MDcwExAxMC4xMjguMC4yOjE3MDcwBCBEFCaXerhNImkKKabuX5ULWf2Bp4AzPNJEbXVWgraLrAA= + + juju register public-controller.example.com See also: add-user - change-user-password` + change-user-password + unregister` // Info implements Command.Info // `register` may seem generic, but is seen as simple and without potential @@ -84,7 +102,7 @@ func (c *registerCommand) Info() *cmd.Info { return &cmd.Info{ Name: "register", - Args: "", + Args: "|", Purpose: usageRegisterSummary, Doc: usageRegisterDetails, } @@ -95,37 +113,155 @@ if len(args) < 1 { return errors.New("registration data missing") } - c.EncodedData, args = args[0], args[1:] + c.Arg, args = args[0], args[1:] if err := cmd.CheckEmpty(args); err != nil { - return err + return errors.Trace(err) } return nil } +// Run implements Command.Run. func (c *registerCommand) Run(ctx *cmd.Context) error { + err := c.run(ctx) + if err != nil && c.onRunError != nil { + c.onRunError() + } + return err +} +func (c *registerCommand) run(ctx *cmd.Context) error { store := modelcmd.QualifyingClientStore{c.store} registrationParams, err := c.getParameters(ctx, store) if err != nil { return errors.Trace(err) } - _, err = store.ControllerByName(registrationParams.controllerName) - if err == nil { - return errors.AlreadyExistsf("controller %q", registrationParams.controllerName) - } else if !errors.IsNotFound(err) { + controllerDetails, accountDetails, err := c.controllerDetails(ctx, registrationParams) + if err != nil { + return errors.Trace(err) + } + controllerName, err := c.updateController( + ctx, + store, + registrationParams.defaultControllerName, + controllerDetails, + accountDetails, + ) + if err != nil { + return errors.Trace(err) + } + // Log into the controller to verify the credentials, and + // list the models available. + models, err := c.listModelsFunc(store, controllerName, accountDetails.User) + if err != nil { + return errors.Trace(err) + } + for _, model := range models { + owner := names.NewUserTag(model.Owner) + if err := store.UpdateModel( + controllerName, + jujuclient.JoinOwnerModelName(owner, model.Name), + jujuclient.ModelDetails{model.UUID}, + ); err != nil { + return errors.Annotate(err, "storing model details") + } + } + if err := store.SetCurrentController(controllerName); err != nil { return errors.Trace(err) } + fmt.Fprintf( + ctx.Stderr, "\nWelcome, %s. You are now logged into %q.\n", + friendlyUserName(accountDetails.User), controllerName, + ) + return c.maybeSetCurrentModel(ctx, store, controllerName, accountDetails.User, models) +} + +func friendlyUserName(user string) string { + u := names.NewUserTag(user) + if u.IsLocal() { + return u.Name() + } + return u.Id() +} + +// controllerDetails returns controller and account details to be registered for the +// given registration parameters. +func (c *registerCommand) controllerDetails(ctx *cmd.Context, p *registrationParams) (jujuclient.ControllerDetails, jujuclient.AccountDetails, error) { + if p.publicHost != "" { + return c.publicControllerDetails(p.publicHost) + } + return c.nonPublicControllerDetails(ctx, p) +} + +// publicControllerDetails returns controller and account details to be registered +// for the given public controller host name. +func (c *registerCommand) publicControllerDetails(host string) (jujuclient.ControllerDetails, jujuclient.AccountDetails, error) { + errRet := func(err error) (jujuclient.ControllerDetails, jujuclient.AccountDetails, error) { + return jujuclient.ControllerDetails{}, jujuclient.AccountDetails{}, err + } + apiAddr := host + if !strings.Contains(apiAddr, ":") { + apiAddr += ":443" + } + // Make a direct API connection because we don't yet know the + // controller UUID so can't store the thus-incomplete controller + // details to make a conventional connection. + // + // Unfortunately this means we'll connect twice to the controller + // but it's probably best to go through the conventional path the + // second time. + bclient, err := c.BakeryClient() + if err != nil { + return errRet(errors.Trace(err)) + } + dialOpts := api.DefaultDialOpts() + dialOpts.BakeryClient = bclient + conn, err := c.apiOpen(&api.Info{ + Addrs: []string{apiAddr}, + }, dialOpts) + if err != nil { + return errRet(errors.Trace(err)) + } + defer conn.Close() + user, ok := conn.AuthTag().(names.UserTag) + if !ok { + return errRet(errors.Errorf("logged in as %v, not a user", conn.AuthTag())) + } + // If we get to here, then we have a cached macaroon for the registered + // user. If we encounter an error after here, we need to clear it. + c.onRunError = func() { + if err := c.ClearControllerMacaroons([]string{apiAddr}); err != nil { + logger.Errorf("failed to clear macaroon: %v", err) + } + } + return jujuclient.ControllerDetails{ + APIEndpoints: []string{apiAddr}, + ControllerUUID: conn.ControllerTag().Id(), + }, jujuclient.AccountDetails{ + User: user.Id(), + LastKnownAccess: conn.ControllerAccess(), + }, nil +} + +// nonPublicControllerDetails returns controller and account details to be registered with +// respect to the given registration parameters. +func (c *registerCommand) nonPublicControllerDetails(ctx *cmd.Context, registrationParams *registrationParams) (jujuclient.ControllerDetails, jujuclient.AccountDetails, error) { + errRet := func(err error) (jujuclient.ControllerDetails, jujuclient.AccountDetails, error) { + return jujuclient.ControllerDetails{}, jujuclient.AccountDetails{}, err + } // During registration we must set a new password. This has to be done // atomically with the clearing of the secret key. payloadBytes, err := json.Marshal(params.SecretKeyLoginRequestPayload{ registrationParams.newPassword, }) if err != nil { - return errors.Trace(err) + return errRet(errors.Trace(err)) } - // Make the registration call. + // Make the registration call. If this is successful, the client's + // cookie jar will be populated with a macaroon that may be used + // to log in below without the user having to type in the password + // again. req := params.SecretKeyLoginRequest{ Nonce: registrationParams.nonce[:], User: registrationParams.userTag.String(), @@ -137,71 +273,77 @@ } resp, err := c.secretKeyLogin(registrationParams.controllerAddrs, req) if err != nil { - return errors.Trace(err) + return errRet(errors.Trace(err)) } // Decrypt the response to authenticate the controller and // obtain its CA certificate. if len(resp.Nonce) != len(registrationParams.nonce) { - return errors.NotValidf("response nonce") + return errRet(errors.NotValidf("response nonce")) } var respNonce [24]byte copy(respNonce[:], resp.Nonce) payloadBytes, ok := secretbox.Open(nil, resp.PayloadCiphertext, &respNonce, ®istrationParams.key) if !ok { - return errors.NotValidf("response payload") + return errRet(errors.NotValidf("response payload")) } var responsePayload params.SecretKeyLoginResponsePayload if err := json.Unmarshal(payloadBytes, &responsePayload); err != nil { - return errors.Annotate(err, "unmarshalling response payload") - } - - // Store the controller and account details. - controllerDetails := jujuclient.ControllerDetails{ - APIEndpoints: registrationParams.controllerAddrs, - ControllerUUID: responsePayload.ControllerUUID, - CACert: responsePayload.CACert, + return errRet(errors.Annotate(err, "unmarshalling response payload")) } - if err := store.UpdateController(registrationParams.controllerName, controllerDetails); err != nil { - return errors.Trace(err) + user := registrationParams.userTag.Id() + ctx.Infof("Initial password successfully set for %s.", friendlyUserName(user)) + // If we get to here, then we have a cached macaroon for the registered + // user. If we encounter an error after here, we need to clear it. + c.onRunError = func() { + if err := c.ClearControllerMacaroons(registrationParams.controllerAddrs); err != nil { + logger.Errorf("failed to clear macaroon: %v", err) + } } - macaroonJSON, err := responsePayload.Macaroon.MarshalJSON() + return jujuclient.ControllerDetails{ + APIEndpoints: registrationParams.controllerAddrs, + ControllerUUID: responsePayload.ControllerUUID, + CACert: responsePayload.CACert, + }, jujuclient.AccountDetails{ + User: user, + LastKnownAccess: string(permission.LoginAccess), + }, nil +} + +// updateController prompts for a controller name and updates the +// controller and account details in the given client store. +// It returns the name of the updated controller. +func (c *registerCommand) updateController( + ctx *cmd.Context, + store jujuclient.ClientStore, + defaultControllerName string, + controllerDetails jujuclient.ControllerDetails, + accountDetails jujuclient.AccountDetails, +) (string, error) { + // Check that the same controller isn't already stored, so that we + // can avoid needlessly asking for a controller name in that case. + all, err := store.AllControllers() if err != nil { - return errors.Annotate(err, "marshalling temporary credential to JSON") - } - accountDetails := jujuclient.AccountDetails{ - User: registrationParams.userTag.Canonical(), - Macaroon: string(macaroonJSON), + return "", errors.Trace(err) } - if err := store.UpdateAccount(registrationParams.controllerName, accountDetails); err != nil { - return errors.Trace(err) + for name, ctl := range all { + if ctl.ControllerUUID == controllerDetails.ControllerUUID { + // TODO(rogpeppe) lp#1614010 Succeed but override the account details in this case? + return "", errors.Errorf("controller is already registered as %q", name) + } } - - // Log into the controller to verify the credentials, and - // list the models available. - models, err := c.listModelsFunc(store, registrationParams.controllerName, accountDetails.User) + controllerName, err := c.promptControllerName(store, defaultControllerName, ctx.Stderr, ctx.Stdin) if err != nil { - return errors.Trace(err) + return "", errors.Trace(err) } - for _, model := range models { - owner := names.NewUserTag(model.Owner) - if err := store.UpdateModel( - registrationParams.controllerName, - jujuclient.JoinOwnerModelName(owner, model.Name), - jujuclient.ModelDetails{model.UUID}, - ); err != nil { - return errors.Annotate(err, "storing model details") - } + + if err := store.AddController(controllerName, controllerDetails); err != nil { + return "", errors.Trace(err) } - if err := store.SetCurrentController(registrationParams.controllerName); err != nil { - return errors.Trace(err) + if err := store.UpdateAccount(controllerName, accountDetails); err != nil { + return "", errors.Annotatef(err, "cannot update account information: %v", err) } - - fmt.Fprintf( - ctx.Stderr, "\nWelcome, %s. You are now logged into %q.\n", - registrationParams.userTag.Id(), registrationParams.controllerName, - ) - return c.maybeSetCurrentModel(ctx, store, registrationParams.controllerName, accountDetails.User, models) + return controllerName, nil } func (c *registerCommand) listModels(store jujuclient.ClientStore, controllerName, userName string) ([]base.UserModel, error) { @@ -216,7 +358,7 @@ func (c *registerCommand) maybeSetCurrentModel(ctx *cmd.Context, store jujuclient.ClientStore, controllerName, userName string, models []base.UserModel) error { if len(models) == 0 { - fmt.Fprintf(ctx.Stderr, "\n%s\n\n", errNoModels.Error()) + fmt.Fprint(ctx.Stderr, noModelsMessage) return nil } @@ -231,51 +373,62 @@ if err != nil { return errors.Trace(err) } - fmt.Fprintf(ctx.Stderr, "\nCurrent model set to %q.\n\n", modelName) - } else { - fmt.Fprintf(ctx.Stderr, ` + fmt.Fprintf(ctx.Stderr, "\nCurrent model set to %q.\n", modelName) + return nil + } + fmt.Fprintf(ctx.Stderr, ` There are %d models available. Use "juju switch" to select one of them: `, len(models)) - user := names.NewUserTag(userName) - ownerModelNames := make(set.Strings) - otherModelNames := make(set.Strings) - for _, model := range models { - if model.Owner == userName { - ownerModelNames.Add(model.Name) - continue - } - owner := names.NewUserTag(model.Owner) - modelName := ownerQualifiedModelName(model.Name, owner, user) - otherModelNames.Add(modelName) - } - for _, modelName := range ownerModelNames.SortedValues() { - fmt.Fprintf(ctx.Stderr, " - juju switch %s\n", modelName) - } - for _, modelName := range otherModelNames.SortedValues() { - fmt.Fprintf(ctx.Stderr, " - juju switch %s\n", modelName) + user := names.NewUserTag(userName) + ownerModelNames := make(set.Strings) + otherModelNames := make(set.Strings) + for _, model := range models { + if model.Owner == userName { + ownerModelNames.Add(model.Name) + continue } - fmt.Fprintln(ctx.Stderr) + owner := names.NewUserTag(model.Owner) + modelName := common.OwnerQualifiedModelName(model.Name, owner, user) + otherModelNames.Add(modelName) + } + for _, modelName := range ownerModelNames.SortedValues() { + fmt.Fprintf(ctx.Stderr, " - juju switch %s\n", modelName) + } + for _, modelName := range otherModelNames.SortedValues() { + fmt.Fprintf(ctx.Stderr, " - juju switch %s\n", modelName) } return nil } type registrationParams struct { - userTag names.UserTag - controllerName string - controllerAddrs []string - key [32]byte - nonce [24]byte - newPassword string + // publicHost holds the host name of a public controller. + // If this is set, all other fields will be empty. + publicHost string + + defaultControllerName string + userTag names.UserTag + controllerAddrs []string + key [32]byte + nonce [24]byte + newPassword string } // getParameters gets all of the parameters required for registering, prompting // the user as necessary. func (c *registerCommand) getParameters(ctx *cmd.Context, store jujuclient.ClientStore) (*registrationParams, error) { - + var params registrationParams + if strings.Contains(c.Arg, ".") || c.Arg == "localhost" { + // Looks like a host name - no URL-encoded base64 string should + // contain a dot and every public controller name should. + // Allow localhost for development purposes. + params.publicHost = c.Arg + // No need for password shenanigans if we're using a public controller. + return ¶ms, nil + } // Decode key, username, controller addresses from the string supplied // on the command line. - decodedData, err := base64.URLEncoding.DecodeString(c.EncodedData) + decodedData, err := base64.URLEncoding.DecodeString(c.Arg) if err != nil { return nil, errors.Trace(err) } @@ -284,21 +437,13 @@ return nil, errors.Trace(err) } - params := registrationParams{ - controllerAddrs: info.Addrs, - userTag: names.NewUserTag(info.User), - } + params.controllerAddrs = info.Addrs + params.userTag = names.NewUserTag(info.User) if len(info.SecretKey) != len(params.key) { return nil, errors.NotValidf("secret key") } copy(params.key[:], info.SecretKey) - - // Prompt the user for the controller name. - controllerName, err := c.promptControllerName(store, info.ControllerName, ctx.Stderr, ctx.Stdin) - if err != nil { - return nil, errors.Trace(err) - } - params.controllerName = controllerName + params.defaultControllerName = info.ControllerName // Prompt the user for the new password to set. newPassword, err := c.promptNewPassword(ctx.Stderr, ctx.Stdin) @@ -316,6 +461,11 @@ } func (c *registerCommand) secretKeyLogin(addrs []string, request params.SecretKeyLoginRequest) (*params.SecretKeyLoginResponse, error) { + apiContext, err := c.APIContext() + if err != nil { + return nil, errors.Annotate(err, "getting API context") + } + buf, err := json.Marshal(&request) if err != nil { return nil, errors.Annotate(err, "marshalling request") @@ -347,6 +497,8 @@ } // Using the address we connected to above, perform the request. + // A success response will include a macaroon cookie that we can + // use to log in with. urlString := fmt.Sprintf("https://%s/register", apiAddr) httpReq, err := http.NewRequest("POST", urlString, r) if err != nil { @@ -354,6 +506,7 @@ } httpReq.Header.Set("Content-Type", "application/json") httpClient := utils.GetNonValidatingHTTPClient() + httpClient.Jar = apiContext.Jar httpResp, err := httpClient.Do(httpReq) if err != nil { return nil, errors.Trace(err) @@ -370,7 +523,7 @@ var resp params.SecretKeyLoginResponse if err := json.NewDecoder(httpResp.Body).Decode(&resp); err != nil { - return nil, errors.Trace(err) + return nil, errors.Annotatef(err, "cannot decode login response") } return &resp, nil } @@ -378,7 +531,7 @@ func (c *registerCommand) promptNewPassword(stderr io.Writer, stdin io.Reader) (string, error) { password, err := c.readPassword("Enter a new password: ", stderr, stdin) if err != nil { - return "", errors.Trace(err) + return "", errors.Annotatef(err, "cannot read password") } if password == "" { return "", errors.NewNotValid(nil, "you must specify a non-empty password") @@ -393,37 +546,38 @@ return password, nil } -const errControllerConflicts = `WARNING: The controller proposed %q which clashes with an existing` + - ` controller. The two controllers are entirely different. - -` - func (c *registerCommand) promptControllerName(store jujuclient.ClientStore, suggestedName string, stderr io.Writer, stdin io.Reader) (string, error) { - _, err := store.ControllerByName(suggestedName) - if err == nil { - fmt.Fprintf(stderr, errControllerConflicts, suggestedName) - suggestedName = "" - } - var setMsg string - setMsg = "Enter a name for this controller: " if suggestedName != "" { - setMsg = fmt.Sprintf("Enter a name for this controller [%s]: ", - suggestedName) - } - fmt.Fprintf(stderr, setMsg) - defer stderr.Write([]byte{'\n'}) - name, err := c.readLine(stdin) - if err != nil { - return "", errors.Trace(err) - } - name = strings.TrimSpace(name) - if name == "" && suggestedName == "" { - return "", errors.NewNotValid(nil, "you must specify a non-empty controller name") + if _, err := store.ControllerByName(suggestedName); err == nil { + suggestedName = "" + } } - if name == "" && suggestedName != "" { - return suggestedName, nil + for { + var setMsg string + setMsg = "Enter a name for this controller: " + if suggestedName != "" { + setMsg = fmt.Sprintf("Enter a name for this controller [%s]: ", suggestedName) + } + fmt.Fprintf(stderr, setMsg) + name, err := c.readLine(stdin) + if err != nil { + return "", errors.Trace(err) + } + name = strings.TrimSpace(name) + if name == "" { + if suggestedName == "" { + fmt.Fprintln(stderr, "You must specify a non-empty controller name.") + continue + } + name = suggestedName + } + _, err = store.ControllerByName(name) + if err == nil { + fmt.Fprintf(stderr, "Controller %q already exists.\n", name) + continue + } + return name, nil } - return name, nil } func (c *registerCommand) readPassword(prompt string, stderr io.Writer, stdin io.Reader) (string, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/register_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/register_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/register_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/register_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,6 @@ package controller_test import ( - "bytes" "encoding/asn1" "encoding/base64" "encoding/json" @@ -20,12 +19,13 @@ jc "github.com/juju/testing/checkers" "golang.org/x/crypto/nacl/secretbox" gc "gopkg.in/check.v1" - "gopkg.in/macaroon.v1" + "gopkg.in/juju/names.v2" "github.com/juju/juju/api" "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/controller" + cmdtesting "github.com/juju/juju/cmd/testing" "github.com/juju/juju/jujuclient" "github.com/juju/juju/jujuclient/jujuclienttesting" "github.com/juju/juju/testing" @@ -43,6 +43,12 @@ httpHandler http.Handler } +const noModelsText = ` +There are no models available. You can add models with +"juju add-model", or you can ask an administrator or owner +of a model to grant access to that model with "juju grant". +` + var _ = gc.Suite(&RegisterSuite{}) func (s *RegisterSuite) SetUpTest(c *gc.C) { @@ -57,7 +63,7 @@ serverURL, err := url.Parse(s.server.URL) c.Assert(err, jc.ErrorIsNil) s.apiConnection = &mockAPIConnection{ - controllerTag: testing.ModelTag, + controllerTag: names.NewControllerTag(mockControllerUUID), addr: serverURL.Host, } s.listModelsControllerName = "" @@ -76,61 +82,6 @@ s.FakeJujuXDGDataHomeSuite.TearDownTest(c) } -func (s *RegisterSuite) apiOpen(info *api.Info, opts api.DialOpts) (api.Connection, error) { - if s.apiOpenError != nil { - return nil, s.apiOpenError - } - s.apiConnection.info = info - s.apiConnection.opts = opts - return s.apiConnection, nil -} - -func (s *RegisterSuite) run(c *gc.C, stdin io.Reader, args ...string) (*cmd.Context, error) { - command := controller.NewRegisterCommandForTest(s.apiOpen, s.listModels, s.store) - err := testing.InitCommand(command, args) - c.Assert(err, jc.ErrorIsNil) - ctx := testing.Context(c) - ctx.Stdin = stdin - return ctx, command.Run(ctx) -} - -func (s *RegisterSuite) encodeRegistrationData(c *gc.C, user string, secretKey []byte) string { - data, err := asn1.Marshal(jujuclient.RegistrationInfo{ - User: user, - Addrs: []string{s.apiConnection.addr}, - SecretKey: secretKey, - }) - c.Assert(err, jc.ErrorIsNil) - // Append some junk to the end of the encoded data to - // ensure that, if we have to pad the data in add-user, - // register can still decode it. - data = append(data, 0, 0, 0) - return base64.URLEncoding.EncodeToString(data) -} - -func (s *RegisterSuite) encodeRegistrationDataWithControllerName(c *gc.C, user string, secretKey []byte, controller string) string { - data, err := asn1.Marshal(jujuclient.RegistrationInfo{ - User: user, - Addrs: []string{s.apiConnection.addr}, - SecretKey: secretKey, - ControllerName: controller, - }) - c.Assert(err, jc.ErrorIsNil) - // Append some junk to the end of the encoded data to - // ensure that, if we have to pad the data in add-user, - // register can still decode it. - data = append(data, 0, 0, 0) - return base64.URLEncoding.EncodeToString(data) -} - -func (s *RegisterSuite) seal(c *gc.C, message, key, nonce []byte) []byte { - var keyArray [32]byte - var nonceArray [24]byte - c.Assert(copy(keyArray[:], key), gc.Equals, len(keyArray)) - c.Assert(copy(nonceArray[:], nonce), gc.Equals, len(nonceArray)) - return secretbox.Seal(nil, message, &nonceArray, &keyArray) -} - func (s *RegisterSuite) TestInit(c *gc.C) { registerCommand := controller.NewRegisterCommandForTest(nil, nil, nil) @@ -139,71 +90,65 @@ err = testing.InitCommand(registerCommand, []string{"foo"}) c.Assert(err, jc.ErrorIsNil) - c.Assert(registerCommand.EncodedData, gc.Equals, "foo") + c.Assert(registerCommand.Arg, gc.Equals, "foo") err = testing.InitCommand(registerCommand, []string{"foo", "bar"}) c.Assert(err, gc.ErrorMatches, `unrecognized args: \["bar"\]`) } func (s *RegisterSuite) TestRegister(c *gc.C) { - ctx := s.testRegister(c, "") + s.testRegisterSuccess(c, nil, "") c.Assert(s.listModelsControllerName, gc.Equals, "controller-name") - c.Assert(s.listModelsUserName, gc.Equals, "bob@local") - stderr := testing.Stderr(ctx) - c.Assert(stderr, gc.Equals, ` -Enter a name for this controller [controller-name]: -Enter a new password: -Confirm password: - -Welcome, bob. You are now logged into "controller-name". - -There are no models available. You can add models with -"juju add-model", or you can ask an administrator or owner -of a model to grant access to that model with "juju grant". - -`[1:]) + c.Assert(s.listModelsUserName, gc.Equals, "bob") } func (s *RegisterSuite) TestRegisterOneModel(c *gc.C) { s.listModels = func(_ jujuclient.ClientStore, controllerName, userName string) ([]base.UserModel, error) { return []base.UserModel{{ Name: "theoneandonly", - Owner: "carol@local", - UUID: "df136476-12e9-11e4-8a70-b2227cce2b54", + Owner: "carol", + UUID: mockControllerUUID, }}, nil } - s.testRegister(c, "") + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: »hunter2 + +Confirm password: »hunter2 + +Initial password successfully set for bob. +Enter a name for this controller \[controller-name\]: » + +Welcome, bob. You are now logged into "controller-name". + +Current model set to "carol/theoneandonly". +`[1:]) + s.testRegisterSuccess(c, prompter, "") c.Assert( s.store.Models["controller-name"].CurrentModel, - gc.Equals, "carol@local/theoneandonly", + gc.Equals, "carol/theoneandonly", ) + prompter.CheckDone() } func (s *RegisterSuite) TestRegisterMultipleModels(c *gc.C) { s.listModels = func(_ jujuclient.ClientStore, controllerName, userName string) ([]base.UserModel, error) { return []base.UserModel{{ Name: "model1", - Owner: "bob@local", - UUID: "df136476-12e9-11e4-8a70-b2227cce2b54", + Owner: "bob", + UUID: mockControllerUUID, }, { Name: "model2", - Owner: "bob@local", - UUID: "df136476-12e9-11e4-8a70-b2227cce2b55", + Owner: "bob", + UUID: "eeeeeeee-12e9-11e4-8a70-b2227cce2b55", }}, nil } - ctx := s.testRegister(c, "") + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: »hunter2 - // When there are multiple models, no current model will be set. - // Instead, the command will output the list of models and inform - // the user how to set the current model. - _, err := s.store.CurrentModel("controller-name") - c.Assert(err, jc.Satisfies, errors.IsNotFound) +Confirm password: »hunter2 - stderr := testing.Stderr(ctx) - c.Assert(stderr, gc.Equals, ` -Enter a name for this controller [controller-name]: -Enter a new password: -Confirm password: +Initial password successfully set for bob. +Enter a name for this controller \[controller-name\]: » Welcome, bob. You are now logged into "controller-name". @@ -211,57 +156,59 @@ one of them: - juju switch model1 - juju switch model2 - `[1:]) + defer prompter.CheckDone() + s.testRegisterSuccess(c, prompter, "") + + // When there are multiple models, no current model will be set. + // Instead, the command will output the list of models and inform + // the user how to set the current model. + _, err := s.store.CurrentModel("controller-name") + c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *RegisterSuite) testRegister(c *gc.C, expectedError string) *cmd.Context { - secretKey := []byte(strings.Repeat("X", 32)) - respNonce := []byte(strings.Repeat("X", 24)) +// testRegisterSuccess tests that the register command when the given +// stdio instance is used for input and output. If stdio is nil, a +// default prompter will be used. +// If controllerName is non-empty, that name will be expected +// to be the name of the registered controller. +func (s *RegisterSuite) testRegisterSuccess(c *gc.C, stdio io.ReadWriter, controllerName string) { + srv := s.mockServer(c) + s.httpHandler = srv - macaroon, err := macaroon.New(nil, "mymacaroon", "tone") - c.Assert(err, jc.ErrorIsNil) - macaroonJSON, err := macaroon.MarshalJSON() - c.Assert(err, jc.ErrorIsNil) + if controllerName == "" { + controllerName = "controller-name" + } - var requests []*http.Request - var requestBodies [][]byte - const controllerUUID = "df136476-12e9-11e4-8a70-b2227cce2b54" - responsePayloadPlaintext, err := json.Marshal(params.SecretKeyLoginResponsePayload{ - CACert: testing.CACert, - ControllerUUID: controllerUUID, - Macaroon: macaroon, - }) - c.Assert(err, jc.ErrorIsNil) - response, err := json.Marshal(params.SecretKeyLoginResponse{ - Nonce: respNonce, - PayloadCiphertext: s.seal(c, responsePayloadPlaintext, secretKey, respNonce), - }) - c.Assert(err, jc.ErrorIsNil) - s.httpHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - requests = append(requests, r) - requestBody, err := ioutil.ReadAll(requests[0].Body) - c.Check(err, jc.ErrorIsNil) - requestBodies = append(requestBodies, requestBody) - _, err = w.Write(response) - c.Check(err, jc.ErrorIsNil) - }) + registrationData := s.encodeRegistrationData(c, jujuclient.RegistrationInfo{ + User: "bob", + SecretKey: mockSecretKey, + ControllerName: "controller-name", + }) + c.Logf("registration data: %q", registrationData) + if stdio == nil { + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: »hunter2 + +Confirm password: »hunter2 + +Initial password successfully set for bob. +Enter a name for this controller \[controller-name\]: » - registrationData := s.encodeRegistrationDataWithControllerName(c, "bob", secretKey, "controller-name") - stdin := strings.NewReader("\nhunter2\nhunter2\n") - ctx, err := s.run(c, stdin, registrationData) - if expectedError != "" { - c.Assert(err, gc.ErrorMatches, expectedError) - return ctx +Welcome, bob. You are now logged into "controller-name". +`[1:]+noModelsText) + defer prompter.CheckDone() + stdio = prompter } + err := s.run(c, stdio, registrationData) c.Assert(err, jc.ErrorIsNil) // There should have been one POST command to "/register". - c.Assert(requests, gc.HasLen, 1) - c.Assert(requests[0].Method, gc.Equals, "POST") - c.Assert(requests[0].URL.Path, gc.Equals, "/register") + c.Assert(srv.requests, gc.HasLen, 1) + c.Assert(srv.requests[0].Method, gc.Equals, "POST") + c.Assert(srv.requests[0].URL.Path, gc.Equals, "/register") var request params.SecretKeyLoginRequest - err = json.Unmarshal(requestBodies[0], &request) + err = json.Unmarshal(srv.requestBodies[0], &request) c.Assert(err, jc.ErrorIsNil) c.Assert(request.User, jc.DeepEquals, "user-bob") c.Assert(request.Nonce, gc.HasLen, 24) @@ -269,115 +216,205 @@ "hunter2", }) c.Assert(err, jc.ErrorIsNil) - expectedCiphertext := s.seal(c, requestPayloadPlaintext, secretKey, request.Nonce) + expectedCiphertext := s.seal(c, requestPayloadPlaintext, mockSecretKey, request.Nonce) c.Assert(request.PayloadCiphertext, jc.DeepEquals, expectedCiphertext) // The controller and account details should be recorded with - // the specified controller name ("controller-name") and user + // the specified controller name and user // name from the registration string. - controller, err := s.store.ControllerByName("controller-name") + controller, err := s.store.ControllerByName(controllerName) c.Assert(err, jc.ErrorIsNil) c.Assert(controller, jc.DeepEquals, &jujuclient.ControllerDetails{ - ControllerUUID: controllerUUID, + ControllerUUID: mockControllerUUID, APIEndpoints: []string{s.apiConnection.addr}, CACert: testing.CACert, }) - account, err := s.store.AccountDetails("controller-name") + account, err := s.store.AccountDetails(controllerName) c.Assert(err, jc.ErrorIsNil) c.Assert(account, jc.DeepEquals, &jujuclient.AccountDetails{ - User: "bob@local", - Macaroon: string(macaroonJSON), + User: "bob", + LastKnownAccess: "login", }) - return ctx } func (s *RegisterSuite) TestRegisterInvalidRegistrationData(c *gc.C) { - _, err := s.run(c, bytes.NewReader(nil), "not base64") + err := s.run(c, nil, "not base64") c.Assert(err, gc.ErrorMatches, "illegal base64 data at input byte 3") - _, err = s.run(c, bytes.NewReader(nil), "YXNuLjEK") + err = s.run(c, nil, "YXNuLjEK") c.Assert(err, gc.ErrorMatches, "asn1: structure error: .*") } func (s *RegisterSuite) TestRegisterEmptyControllerName(c *gc.C) { - secretKey := []byte(strings.Repeat("X", 32)) - registrationData := s.encodeRegistrationData(c, "bob", secretKey) - stdin := strings.NewReader("\n") - _, err := s.run(c, stdin, registrationData) - c.Assert(err, gc.ErrorMatches, "you must specify a non-empty controller name") + srv := s.mockServer(c) + s.httpHandler = srv + registrationData := s.encodeRegistrationData(c, jujuclient.RegistrationInfo{ + User: "bob", + SecretKey: mockSecretKey, + }) + // We check that it loops when an empty controller name + // is entered and that the loop terminates when the user + // types ^D. + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: »hunter2 + +Confirm password: »hunter2 + +Initial password successfully set for bob. +Enter a name for this controller: » +You must specify a non-empty controller name. +Enter a name for this controller: » +You must specify a non-empty controller name. +Enter a name for this controller: »» +`[1:]) + err := s.run(c, prompter, registrationData) + c.Assert(err, gc.ErrorMatches, "EOF") + prompter.AssertDone() } func (s *RegisterSuite) TestRegisterControllerNameExists(c *gc.C) { - err := s.store.UpdateController("controller-name", jujuclient.ControllerDetails{ - ControllerUUID: "df136476-12e9-11e4-8a70-b2227cce2b54", + err := s.store.AddController("controller-name", jujuclient.ControllerDetails{ + ControllerUUID: "0d75314a-5266-4f4f-8523-415be76f92dc", CACert: testing.CACert, }) + c.Assert(err, jc.ErrorIsNil) + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: »hunter2 + +Confirm password: »hunter2 + +Initial password successfully set for bob. +Enter a name for this controller: »controller-name +Controller "controller-name" already exists. +Enter a name for this controller: »other-name + +Welcome, bob. You are now logged into "other-name". +`[1:]+noModelsText) + s.testRegisterSuccess(c, prompter, "other-name") + prompter.AssertDone() +} + +func (s *RegisterSuite) TestControllerUUIDExists(c *gc.C) { + // Controller has the UUID from s.testRegister to mimic a user with + // this controller already registered (regardless of its name). + err := s.store.AddController("controller-name", jujuclient.ControllerDetails{ + ControllerUUID: mockControllerUUID, + CACert: testing.CACert, + }) + + s.listModels = func(_ jujuclient.ClientStore, controllerName, userName string) ([]base.UserModel, error) { + return []base.UserModel{{ + Name: "model-name", + Owner: "bob", + UUID: mockControllerUUID, + }}, nil + } + + registrationData := s.encodeRegistrationData(c, jujuclient.RegistrationInfo{ + User: "bob", + SecretKey: mockSecretKey, + ControllerName: "controller-name", + }) - secretKey := []byte(strings.Repeat("X", 32)) - registrationData := s.encodeRegistrationData(c, "bob", secretKey) - stdin := strings.NewReader("controller-name\nhunter2\nhunter2\n") - _, err = s.run(c, stdin, registrationData) - c.Assert(err, gc.ErrorMatches, `controller "controller-name" already exists`) + srv := s.mockServer(c) + s.httpHandler = srv + + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: »hunter2 + +Confirm password: »hunter2 + +Initial password successfully set for bob. +`[1:]) + err = s.run(c, prompter, registrationData) + c.Assert(err, gc.ErrorMatches, `controller is already registered as "controller-name"`, gc.Commentf("details: %v", errors.Details(err))) + prompter.CheckDone() } func (s *RegisterSuite) TestProposedControllerNameExists(c *gc.C) { - err := s.store.UpdateController("controller-name", jujuclient.ControllerDetails{ - ControllerUUID: "df136476-12e9-11e4-8a70-b2227cce2b54", + // Controller does not have the UUID from s.testRegister, thereby + // mimicing a user with an already registered 'foreign' controller. + err := s.store.AddController("controller-name", jujuclient.ControllerDetails{ + ControllerUUID: "0d75314a-5266-4f4f-8523-415be76f92dc", CACert: testing.CACert, }) + c.Assert(err, jc.ErrorIsNil) s.listModels = func(_ jujuclient.ClientStore, controllerName, userName string) ([]base.UserModel, error) { return []base.UserModel{{ Name: "model-name", - Owner: "bob@local", - UUID: "df136476-12e9-11e4-8a70-b2227cce2b54", + Owner: "bob", + UUID: mockControllerUUID, }}, nil } - ctx := s.testRegister(c, "you must specify a non-empty controller name") + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: »hunter2 - secretKey := []byte(strings.Repeat("X", 32)) - registrationData := s.encodeRegistrationDataWithControllerName(c, "bob", secretKey, "controller-name") - stdin := strings.NewReader("controller-name1\nhunter2\nhunter2\n") - _, err = s.run(c, stdin, registrationData) - c.Assert(err, jc.ErrorIsNil) - stderr := testing.Stderr(ctx) - c.Assert(stderr, gc.Equals, ` -WARNING: The controller proposed "controller-name" which clashes with an existing controller. The two controllers are entirely different. +Confirm password: »hunter2 -Enter a name for this controller: -`[1:]) +Initial password successfully set for bob. +Enter a name for this controller: »controller-name +Controller "controller-name" already exists. +Enter a name for this controller: »other-name + +Welcome, bob. You are now logged into "other-name". +Current model set to "bob/model-name". +`[1:]) + defer prompter.CheckDone() + s.testRegisterSuccess(c, prompter, "other-name") } func (s *RegisterSuite) TestRegisterEmptyPassword(c *gc.C) { - secretKey := []byte(strings.Repeat("X", 32)) - registrationData := s.encodeRegistrationData(c, "bob", secretKey) - stdin := strings.NewReader("controller-name\n\n") - _, err := s.run(c, stdin, registrationData) + registrationData := s.encodeRegistrationData(c, jujuclient.RegistrationInfo{ + User: "bob", + SecretKey: mockSecretKey, + }) + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: » + +`[1:]) + defer prompter.CheckDone() + err := s.run(c, prompter, registrationData) c.Assert(err, gc.ErrorMatches, "you must specify a non-empty password") } func (s *RegisterSuite) TestRegisterPasswordMismatch(c *gc.C) { - secretKey := []byte(strings.Repeat("X", 32)) - registrationData := s.encodeRegistrationData(c, "bob", secretKey) - stdin := strings.NewReader("controller-name\nhunter2\nhunter3\n") - _, err := s.run(c, stdin, registrationData) + registrationData := s.encodeRegistrationData(c, jujuclient.RegistrationInfo{ + User: "bob", + SecretKey: mockSecretKey, + }) + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: »hunter2 + +Confirm password: »hunter3 + +`[1:]) + defer prompter.CheckDone() + err := s.run(c, prompter, registrationData) c.Assert(err, gc.ErrorMatches, "passwords do not match") } func (s *RegisterSuite) TestAPIOpenError(c *gc.C) { - secretKey := []byte(strings.Repeat("X", 32)) - registrationData := s.encodeRegistrationData(c, "bob", secretKey) - stdin := strings.NewReader("controller-name\nhunter2\nhunter2\n") + registrationData := s.encodeRegistrationData(c, jujuclient.RegistrationInfo{ + User: "bob", + SecretKey: mockSecretKey, + }) + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: »hunter2 + +Confirm password: »hunter2 + +`[1:]) + defer prompter.CheckDone() s.apiOpenError = errors.New("open failed") - _, err := s.run(c, stdin, registrationData) + err := s.run(c, prompter, registrationData) c.Assert(err, gc.ErrorMatches, `open failed`) } func (s *RegisterSuite) TestRegisterServerError(c *gc.C) { - secretKey := []byte(strings.Repeat("X", 32)) response, err := json.Marshal(params.ErrorResult{ Error: ¶ms.Error{Message: "xyz", Code: "123"}, }) @@ -387,12 +424,176 @@ _, err = w.Write(response) c.Check(err, jc.ErrorIsNil) }) + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: »hunter2 + +Confirm password: »hunter2 - registrationData := s.encodeRegistrationData(c, "bob", secretKey) - stdin := strings.NewReader("controller-name\nhunter2\nhunter2\n") - _, err = s.run(c, stdin, registrationData) +`[1:]) + + registrationData := s.encodeRegistrationData(c, jujuclient.RegistrationInfo{ + User: "bob", + SecretKey: mockSecretKey, + }) + err = s.run(c, prompter, registrationData) c.Assert(err, gc.ErrorMatches, "xyz") + // Check that the controller hasn't been added. _, err = s.store.ControllerByName("controller-name") c.Assert(err, jc.Satisfies, errors.IsNotFound) } + +func (s *RegisterSuite) TestRegisterPublic(c *gc.C) { + s.apiConnection.authTag = names.NewUserTag("bob@external") + s.apiConnection.controllerAccess = "login" + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a name for this controller: »public-controller-name + +Welcome, bob@external. You are now logged into "public-controller-name". +`[1:]+noModelsText) + defer prompter.CheckDone() + err := s.run(c, prompter, "0.1.2.3") + c.Assert(err, jc.ErrorIsNil) + + // The controller and account details should be recorded with + // the specified controller name and user + // name from the auth tag. + + controller, err := s.store.ControllerByName("public-controller-name") + c.Assert(err, jc.ErrorIsNil) + c.Assert(controller, jc.DeepEquals, &jujuclient.ControllerDetails{ + ControllerUUID: mockControllerUUID, + APIEndpoints: []string{"0.1.2.3:443"}, + }) + account, err := s.store.AccountDetails("public-controller-name") + c.Assert(err, jc.ErrorIsNil) + c.Assert(account, jc.DeepEquals, &jujuclient.AccountDetails{ + User: "bob@external", + LastKnownAccess: "login", + }) +} + +func (s *RegisterSuite) TestRegisterPublicAPIOpenError(c *gc.C) { + s.apiOpenError = errors.New("open failed") + err := s.run(c, noPrompts(c), "0.1.2.3") + c.Assert(err, gc.ErrorMatches, `open failed`) +} + +func (s *RegisterSuite) TestRegisterPublicWithPort(c *gc.C) { + s.apiConnection.authTag = names.NewUserTag("bob@external") + s.apiConnection.controllerAccess = "login" + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a name for this controller: »public-controller-name + +Welcome, bob@external. You are now logged into "public-controller-name". +`[1:]+noModelsText) + defer prompter.CheckDone() + err := s.run(c, prompter, "0.1.2.3:5678") + c.Assert(err, jc.ErrorIsNil) + + // The controller and account details should be recorded with + // the specified controller name and user + // name from the auth tag. + + controller, err := s.store.ControllerByName("public-controller-name") + c.Assert(err, jc.ErrorIsNil) + c.Assert(controller, jc.DeepEquals, &jujuclient.ControllerDetails{ + ControllerUUID: mockControllerUUID, + APIEndpoints: []string{"0.1.2.3:5678"}, + }) +} + +type mockServer struct { + requests []*http.Request + requestBodies [][]byte + response []byte +} + +const mockControllerUUID = "df136476-12e9-11e4-8a70-b2227cce2b54" + +var mockSecretKey = []byte(strings.Repeat("X", 32)) + +// mockServer returns a mock HTTP server that will always respond with a +// response encoded with mockSecretKey and a constant nonce, containing +// testing.CACert and mockControllerUUID. +// +// Each time a call is made, the requests and requestBodies fields in +// the returned mockServer instance are appended with the request details. +func (s *RegisterSuite) mockServer(c *gc.C) *mockServer { + respNonce := []byte(strings.Repeat("X", 24)) + + responsePayloadPlaintext, err := json.Marshal(params.SecretKeyLoginResponsePayload{ + CACert: testing.CACert, + ControllerUUID: mockControllerUUID, + }) + c.Assert(err, jc.ErrorIsNil) + response, err := json.Marshal(params.SecretKeyLoginResponse{ + Nonce: respNonce, + PayloadCiphertext: s.seal(c, responsePayloadPlaintext, mockSecretKey, respNonce), + }) + c.Assert(err, jc.ErrorIsNil) + return &mockServer{ + response: response, + } +} + +func (s *RegisterSuite) encodeRegistrationData(c *gc.C, info jujuclient.RegistrationInfo) string { + info.Addrs = []string{s.apiConnection.addr} + data, err := asn1.Marshal(info) + c.Assert(err, jc.ErrorIsNil) + // Append some junk to the end of the encoded data to + // ensure that, if we have to pad the data in add-user, + // register can still decode it. + data = append(data, 0, 0, 0) + return base64.URLEncoding.EncodeToString(data) +} + +func (srv *mockServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + srv.requests = append(srv.requests, r) + requestBody, err := ioutil.ReadAll(r.Body) + if err != nil { + panic(err) + } + srv.requestBodies = append(srv.requestBodies, requestBody) + _, err = w.Write(srv.response) + if err != nil { + panic(err) + } +} + +func (s *RegisterSuite) apiOpen(info *api.Info, opts api.DialOpts) (api.Connection, error) { + if s.apiOpenError != nil { + return nil, s.apiOpenError + } + return s.apiConnection, nil +} + +func (s *RegisterSuite) run(c *gc.C, stdio io.ReadWriter, args ...string) error { + if stdio == nil { + p := noPrompts(c) + stdio = p + defer p.CheckDone() + } + + command := controller.NewRegisterCommandForTest(s.apiOpen, s.listModels, s.store) + err := testing.InitCommand(command, args) + c.Assert(err, jc.ErrorIsNil) + return command.Run(&cmd.Context{ + Dir: c.MkDir(), + Stdin: stdio, + Stdout: stdio, + Stderr: stdio, + }) +} + +func noPrompts(c *gc.C) *cmdtesting.SeqPrompter { + return cmdtesting.NewSeqPrompter(c, "»", "") +} + +func (s *RegisterSuite) seal(c *gc.C, message, key, nonce []byte) []byte { + var keyArray [32]byte + var nonceArray [24]byte + c.Assert(copy(keyArray[:], key), gc.Equals, len(keyArray)) + c.Assert(copy(nonceArray[:], nonce), gc.Equals, len(nonceArray)) + return secretbox.Seal(nil, message, &nonceArray, &keyArray) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/removeblocks.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/removeblocks.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/removeblocks.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/removeblocks.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,64 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package controller - -import ( - "github.com/juju/cmd" - "github.com/juju/errors" - - "github.com/juju/juju/cmd/modelcmd" -) - -// NewRemoveBlocksCommand returns a command that allows a controller admin -// to remove blocks from the controller. -func NewRemoveBlocksCommand() cmd.Command { - return modelcmd.WrapController(&removeBlocksCommand{}) -} - -type removeBlocksCommand struct { - modelcmd.ControllerCommandBase - api removeBlocksAPI -} - -type removeBlocksAPI interface { - Close() error - RemoveBlocks() error -} - -var removeBlocksDoc = ` -Remove all blocks in the Juju controller. - -A controller administrator is able to remove all the blocks that have been added -in a Juju controller. - -See Also: - juju block - juju unblock -` - -// Info implements Command.Info -func (c *removeBlocksCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "remove-all-blocks", - Purpose: "Remove all blocks in the Juju controller.", - Doc: removeBlocksDoc, - } -} - -func (c *removeBlocksCommand) getAPI() (removeBlocksAPI, error) { - if c.api != nil { - return c.api, nil - } - return c.NewControllerAPIClient() -} - -// Run implements Command.Run -func (c *removeBlocksCommand) Run(ctx *cmd.Context) error { - client, err := c.getAPI() - if err != nil { - return errors.Trace(err) - } - defer client.Close() - return errors.Annotatef(client.RemoveBlocks(), "cannot remove blocks") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/removeblocks_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/removeblocks_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/removeblocks_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/removeblocks_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,69 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package controller_test - -import ( - "github.com/juju/cmd" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/cmd/juju/controller" - "github.com/juju/juju/jujuclient" - "github.com/juju/juju/jujuclient/jujuclienttesting" - "github.com/juju/juju/testing" -) - -type removeBlocksSuite struct { - baseControllerSuite - api *fakeRemoveBlocksAPI - store *jujuclienttesting.MemStore -} - -var _ = gc.Suite(&removeBlocksSuite{}) - -func (s *removeBlocksSuite) SetUpTest(c *gc.C) { - s.baseControllerSuite.SetUpTest(c) - - s.api = &fakeRemoveBlocksAPI{} - s.store = jujuclienttesting.NewMemStore() - s.store.CurrentControllerName = "fake" - s.store.Controllers["fake"] = jujuclient.ControllerDetails{} -} - -func (s *removeBlocksSuite) newCommand() cmd.Command { - return controller.NewRemoveBlocksCommandForTest(s.api, s.store) -} - -func (s *removeBlocksSuite) TestRemove(c *gc.C) { - _, err := testing.RunCommand(c, s.newCommand()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.api.called, jc.IsTrue) -} - -func (s *removeBlocksSuite) TestUnrecognizedArg(c *gc.C) { - _, err := testing.RunCommand(c, s.newCommand(), "whoops") - c.Assert(err, gc.ErrorMatches, `unrecognized args: \["whoops"\]`) - c.Assert(s.api.called, jc.IsFalse) -} - -func (s *removeBlocksSuite) TestEnvironmentsError(c *gc.C) { - s.api.err = common.ErrPerm - _, err := testing.RunCommand(c, s.newCommand()) - c.Assert(err, gc.ErrorMatches, "cannot remove blocks: permission denied") -} - -type fakeRemoveBlocksAPI struct { - err error - called bool -} - -func (f *fakeRemoveBlocksAPI) Close() error { - return nil -} - -func (f *fakeRemoveBlocksAPI) RemoveBlocks() error { - f.called = true - return f.err -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/showcontroller.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/showcontroller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/showcontroller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/showcontroller.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,12 +4,21 @@ package controller import ( + "fmt" + "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" + "gopkg.in/juju/names.v2" + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/controller" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/environs/bootstrap" "github.com/juju/juju/jujuclient" + "github.com/juju/juju/permission" + "github.com/juju/juju/status" ) var usageShowControllerSummary = ` @@ -26,6 +35,17 @@ See also: controllers`[1:] +type showControllerCommand struct { + modelcmd.JujuCommandBase + + out cmd.Output + store jujuclient.ClientStore + api func(controllerName string) ControllerAccessAPI + + controllerNames []string + showPasswords bool +} + // NewShowControllerCommand returns a command to show details of the desired controllers. func NewShowControllerCommand() cmd.Command { cmd := &showControllerCommand{ @@ -47,7 +67,6 @@ Args: "[ ...]", Purpose: usageShowControllerSummary, Doc: usageShowControllerDetails, - Aliases: []string{"show-controllers"}, } } @@ -61,6 +80,26 @@ }) } +// ControllerAccessAPI defines a subset of the api/controller/Client API. +type ControllerAccessAPI interface { + GetControllerAccess(user string) (permission.Access, error) + ModelConfig() (map[string]interface{}, error) + ModelStatus(models ...names.ModelTag) ([]base.ModelStatus, error) + AllModels() ([]base.UserModel, error) + Close() error +} + +func (c *showControllerCommand) getAPI(controllerName string) (ControllerAccessAPI, error) { + if c.api != nil { + return c.api(controllerName), nil + } + api, err := c.NewAPIRoot(c.store, controllerName, "") + if err != nil { + return nil, errors.Annotate(err, "opening API connection") + } + return controller.NewClient(api), nil +} + // Run implements Command.Run func (c *showControllerCommand) Run(ctx *cmd.Context) error { controllerNames := c.controllerNames @@ -74,20 +113,88 @@ controllerNames = []string{currentController} } controllers := make(map[string]ShowControllerDetails) - for _, name := range controllerNames { - one, err := c.store.ControllerByName(name) + for _, controllerName := range controllerNames { + one, err := c.store.ControllerByName(controllerName) if err != nil { return err } - controllers[name] = c.convertControllerForShow(name, one) + var access string + client, err := c.getAPI(controllerName) + if err != nil { + return err + } + defer client.Close() + accountDetails, err := c.store.AccountDetails(controllerName) + if err != nil { + fmt.Fprintln(ctx.Stderr, err) + access = "(error)" + } else { + access = c.userAccess(client, ctx, accountDetails.User) + one.AgentVersion = c.agentVersion(client, ctx) + } + + var details ShowControllerDetails + var modelStatus []base.ModelStatus + allModels, err := client.AllModels() + if err != nil { + details.Errors = append(details.Errors, err.Error()) + continue + } + modelTags := make([]names.ModelTag, len(allModels)) + for i, m := range allModels { + modelTags[i] = names.NewModelTag(m.UUID) + } + modelStatus, err = client.ModelStatus(modelTags...) + if err != nil { + details.Errors = append(details.Errors, err.Error()) + continue + } + c.convertControllerForShow(&details, controllerName, one, access, allModels, modelStatus) + controllers[controllerName] = details } return c.out.Write(ctx, controllers) } +func (c *showControllerCommand) userAccess(client ControllerAccessAPI, ctx *cmd.Context, user string) string { + var access string + userAccess, err := client.GetControllerAccess(user) + if err == nil { + access = string(userAccess) + } else { + code := params.ErrCode(err) + if code != "" { + access = fmt.Sprintf("(%s)", code) + } else { + fmt.Fprintln(ctx.Stderr, err) + access = "(error)" + } + } + return access +} + +func (c *showControllerCommand) agentVersion(client ControllerAccessAPI, ctx *cmd.Context) string { + var ver string + mc, err := client.ModelConfig() + if err != nil { + code := params.ErrCode(err) + if code != "" { + ver = fmt.Sprintf("(%s)", code) + } else { + fmt.Fprintln(ctx.Stderr, err) + ver = "(error)" + } + return ver + } + return mc["agent-version"].(string) +} + type ShowControllerDetails struct { // Details contains the same details that client store caches for this controller. Details ControllerDetails `yaml:"details,omitempty" json:"details,omitempty"` + // Machines is a collection of all machines forming the controller cluster. + Machines map[string]MachineDetails `yaml:"controller-machines,omitempty" json:"controller-machines,omitempty"` + // Models is a collection of all models for this controller. Models map[string]ModelDetails `yaml:"models,omitempty" json:"models,omitempty"` @@ -117,12 +224,36 @@ // CloudRegion is the name of the cloud region that this controller runs in. CloudRegion string `yaml:"region,omitempty" json:"region,omitempty"` + + // AgentVersion is the version of the agent running on this controller. + // AgentVersion need not always exist so we omitempty here. This struct is + // used in both list-controller and show-controller. show-controller + // displays the agent version where list-controller does not. + AgentVersion string `yaml:"agent-version,omitempty" json:"agent-version,omitempty"` +} + +// ModelDetails holds details of a model to show. +type MachineDetails struct { + // ID holds the id of the machine. + ID string `yaml:"id,omitempty" json:"id,omitempty"` + + // InstanceID holds the cloud instance id of the machine. + InstanceID string `yaml:"instance-id,omitempty" json:"instance-id,omitempty"` + + // HAStatus holds information informing of the HA status of the machine. + HAStatus string `yaml:"ha-status,omitempty" json:"ha-status,omitempty"` } // ModelDetails holds details of a model to show. type ModelDetails struct { // ModelUUID holds the details of a model. ModelUUID string `yaml:"uuid" json:"uuid"` + + // MachineCount holds the number of machines in the model. + MachineCount *int `yaml:"machine-count,omitempty" json:"machine-count,omitempty"` + + // CoreCount holds the number of cores across the machines in the model. + CoreCount *int `yaml:"core-count,omitempty" json:"core-count,omitempty"` } // AccountDetails holds details of an account to show. @@ -130,26 +261,56 @@ // User is the username for the account. User string `yaml:"user" json:"user"` + // Access is the level of access the user has on the controller. + Access string `yaml:"access,omitempty" json:"access,omitempty"` + // Password is the password for the account. Password string `yaml:"password,omitempty" json:"password,omitempty"` } -func (c *showControllerCommand) convertControllerForShow(controllerName string, details *jujuclient.ControllerDetails) ShowControllerDetails { - controller := ShowControllerDetails{ - Details: ControllerDetails{ - ControllerUUID: details.ControllerUUID, - APIEndpoints: details.APIEndpoints, - CACert: details.CACert, - Cloud: details.Cloud, - CloudRegion: details.CloudRegion, - }, - } - c.convertModelsForShow(controllerName, &controller) - c.convertAccountsForShow(controllerName, &controller) - return controller +func (c *showControllerCommand) convertControllerForShow( + controller *ShowControllerDetails, + controllerName string, + details *jujuclient.ControllerDetails, + access string, + allModels []base.UserModel, + modelStatus []base.ModelStatus, +) { + + controller.Details = ControllerDetails{ + ControllerUUID: details.ControllerUUID, + APIEndpoints: details.APIEndpoints, + CACert: details.CACert, + Cloud: details.Cloud, + CloudRegion: details.CloudRegion, + AgentVersion: details.AgentVersion, + } + c.convertModelsForShow(controllerName, controller, allModels, modelStatus) + c.convertAccountsForShow(controllerName, controller, access) + var controllerModelUUID string + for _, m := range allModels { + if m.Name == bootstrap.ControllerModelName { + controllerModelUUID = m.UUID + break + } + } + if controllerModelUUID != "" { + var controllerModel base.ModelStatus + found := false + for _, m := range modelStatus { + if m.UUID == controllerModelUUID { + controllerModel = m + found = true + break + } + } + if found { + c.convertMachinesForShow(controllerName, controller, controllerModel) + } + } } -func (c *showControllerCommand) convertAccountsForShow(controllerName string, controller *ShowControllerDetails) { +func (c *showControllerCommand) convertAccountsForShow(controllerName string, controller *ShowControllerDetails, access string) { storeDetails, err := c.store.AccountDetails(controllerName) if err != nil && !errors.IsNotFound(err) { controller.Errors = append(controller.Errors, err.Error()) @@ -158,7 +319,8 @@ return } details := &AccountDetails{ - User: storeDetails.User, + User: storeDetails.User, + Access: access, } if c.showPasswords { details.Password = storeDetails.Password @@ -166,33 +328,71 @@ controller.Account = details } -func (c *showControllerCommand) convertModelsForShow(controllerName string, controller *ShowControllerDetails) { - models, err := c.store.AllModels(controllerName) - if errors.IsNotFound(err) { - return - } else if err != nil { - controller.Errors = append(controller.Errors, err.Error()) - return - } - if len(models) > 0 { - controller.Models = make(map[string]ModelDetails) - for modelName, model := range models { - controller.Models[modelName] = ModelDetails{model.ModelUUID} +func (c *showControllerCommand) convertModelsForShow( + controllerName string, + controller *ShowControllerDetails, + models []base.UserModel, + modelStatus []base.ModelStatus, +) { + controller.Models = make(map[string]ModelDetails) + for i, model := range models { + modelDetails := ModelDetails{ModelUUID: model.UUID} + if modelStatus[i].TotalMachineCount > 0 { + modelDetails.MachineCount = new(int) + *modelDetails.MachineCount = modelStatus[i].TotalMachineCount } + if modelStatus[i].CoreCount > 0 { + modelDetails.CoreCount = new(int) + *modelDetails.CoreCount = modelStatus[i].CoreCount + } + controller.Models[model.Name] = modelDetails } + var err error controller.CurrentModel, err = c.store.CurrentModel(controllerName) if err != nil && !errors.IsNotFound(err) { controller.Errors = append(controller.Errors, err.Error()) - return } } -type showControllerCommand struct { - modelcmd.JujuCommandBase - - out cmd.Output - store jujuclient.ClientStore +func (c *showControllerCommand) convertMachinesForShow( + controllerName string, + controller *ShowControllerDetails, + controllerModel base.ModelStatus, +) { + controller.Machines = make(map[string]MachineDetails) + numControllers := 0 + for _, m := range controllerModel.Machines { + if !m.WantsVote { + continue + } + numControllers++ + } + for _, m := range controllerModel.Machines { + if !m.WantsVote { + // Skip non controller machines. + continue + } + instId := m.InstanceId + if instId == "" { + instId = "(unprovisioned)" + } + details := MachineDetails{InstanceID: instId} + if numControllers > 1 { + details.HAStatus = haStatus(m.HasVote, m.WantsVote, m.Status) + } + controller.Machines[m.Id] = details + } +} - controllerNames []string - showPasswords bool +func haStatus(hasVote bool, wantsVote bool, statusStr string) string { + if statusStr == string(status.Down) { + return "down, lost connection" + } + if !wantsVote { + return "" + } + if hasVote { + return "ha-enabled" + } + return "ha-pending" } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/showcontroller_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/showcontroller_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/showcontroller_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/showcontroller_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,19 +10,47 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + "github.com/juju/juju/api/base" "github.com/juju/juju/cmd/juju/controller" "github.com/juju/juju/jujuclient" "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/juju/permission" "github.com/juju/juju/testing" ) type ShowControllerSuite struct { baseControllerSuite + fakeController *fakeController + api func(string) controller.ControllerAccessAPI } var _ = gc.Suite(&ShowControllerSuite{}) +func (s *ShowControllerSuite) SetUpTest(c *gc.C) { + s.baseControllerSuite.SetUpTest(c) + s.fakeController = &fakeController{ + modelNames: map[string]string{ + "abc": "controller", + "def": "my-model", + "ghi": "controller", + }, + machines: map[string][]base.Machine{ + "ghi": { + {Id: "0", InstanceId: "id-0", HasVote: false, WantsVote: true, Status: "active"}, + {Id: "1", InstanceId: "id-1", HasVote: false, WantsVote: true, Status: "down"}, + {Id: "2", InstanceId: "id-2", HasVote: true, WantsVote: true, Status: "active"}, + {Id: "3", InstanceId: "id-3", HasVote: false, WantsVote: false, Status: "active"}, + }, + }, + } + s.api = func(controllerNamee string) controller.ControllerAccessAPI { + s.fakeController.controllerName = controllerNamee + return s.fakeController + } +} + func (s *ShowControllerSuite) TestShowOneControllerOneInStore(c *gc.C) { s.controllersYaml = `controllers: mallards: @@ -30,8 +58,9 @@ api-endpoints: [this-is-another-of-many-api-endpoints, this-is-one-more-of-many-api-endpoints] ca-cert: this-is-another-ca-cert cloud: mallards + agent-version: 999.99.99 ` - s.createTestClientStore(c) + s.fakeController.store = s.createTestClientStore(c) s.expectedOutput = ` mallards: @@ -40,14 +69,20 @@ api-endpoints: [this-is-another-of-many-api-endpoints, this-is-one-more-of-many-api-endpoints] ca-cert: this-is-another-ca-cert cloud: mallards + agent-version: 999.99.99 models: - admin: + controller: uuid: abc + machine-count: 2 + core-count: 4 my-model: uuid: def + machine-count: 2 + core-count: 4 current-model: my-model account: - user: admin@local + user: admin + access: superuser `[1:] s.assertShowController(c, "mallards") @@ -60,8 +95,9 @@ api-endpoints: [this-is-another-of-many-api-endpoints, this-is-one-more-of-many-api-endpoints] ca-cert: this-is-another-ca-cert cloud: mallards + agent-version: 999.99.99 ` - s.createTestClientStore(c) + s.fakeController.store = s.createTestClientStore(c) s.expectedOutput = ` mallards: @@ -70,14 +106,20 @@ api-endpoints: [this-is-another-of-many-api-endpoints, this-is-one-more-of-many-api-endpoints] ca-cert: this-is-another-ca-cert cloud: mallards + agent-version: 999.99.99 models: - admin: + controller: uuid: abc + machine-count: 2 + core-count: 4 my-model: uuid: def + machine-count: 2 + core-count: 4 current-model: my-model account: - user: admin@local + user: admin + access: superuser password: hunter2 `[1:] @@ -92,11 +134,12 @@ ca-cert: this-is-another-ca-cert cloud: mallards region: mallards1 + agent-version: 999.99.99 ` store := s.createTestClientStore(c) store.BootstrapConfig["mallards"] = jujuclient.BootstrapConfig{ Config: map[string]interface{}{ - "name": "admin", + "name": "controller", "type": "maas", "extra": "value", }, @@ -106,6 +149,7 @@ CloudRegion: "mallards1", CloudEndpoint: "http://mallards.local/MAAS", } + s.fakeController.store = store s.expectedOutput = ` mallards: @@ -115,21 +159,27 @@ ca-cert: this-is-another-ca-cert cloud: mallards region: mallards1 + agent-version: 999.99.99 models: - admin: + controller: uuid: abc + machine-count: 2 + core-count: 4 my-model: uuid: def + machine-count: 2 + core-count: 4 current-model: my-model account: - user: admin@local + user: admin + access: superuser `[1:] s.assertShowController(c, "mallards") } func (s *ShowControllerSuite) TestShowOneControllerManyInStore(c *gc.C) { - s.createTestClientStore(c) + s.fakeController.store = s.createTestClientStore(c) s.expectedOutput = ` aws-test: @@ -139,18 +189,32 @@ ca-cert: this-is-aws-test-ca-cert cloud: aws region: us-east-1 + agent-version: 999.99.99 + controller-machines: + "0": + instance-id: id-0 + ha-status: ha-pending + "1": + instance-id: id-1 + ha-status: down, lost connection + "2": + instance-id: id-2 + ha-status: ha-enabled models: - admin: + controller: uuid: ghi - current-model: admin + machine-count: 2 + core-count: 4 + current-model: controller account: - user: admin@local + user: admin + access: superuser `[1:] s.assertShowController(c, "aws-test") } func (s *ShowControllerSuite) TestShowSomeControllerMoreInStore(c *gc.C) { - s.createTestClientStore(c) + s.fakeController.store = s.createTestClientStore(c) s.expectedOutput = ` aws-test: details: @@ -159,45 +223,61 @@ ca-cert: this-is-aws-test-ca-cert cloud: aws region: us-east-1 + agent-version: 999.99.99 + controller-machines: + "0": + instance-id: id-0 + ha-status: ha-pending + "1": + instance-id: id-1 + ha-status: down, lost connection + "2": + instance-id: id-2 + ha-status: ha-enabled models: - admin: + controller: uuid: ghi - current-model: admin + machine-count: 2 + core-count: 4 + current-model: controller account: - user: admin@local + user: admin + access: superuser mark-test-prodstack: details: uuid: this-is-a-uuid api-endpoints: [this-is-one-of-many-api-endpoints] ca-cert: this-is-a-ca-cert cloud: prodstack + agent-version: 999.99.99 account: - user: admin@local + user: admin + access: superuser `[1:] s.assertShowController(c, "aws-test", "mark-test-prodstack") } func (s *ShowControllerSuite) TestShowControllerJsonOne(c *gc.C) { - s.createTestClientStore(c) + s.fakeController.store = s.createTestClientStore(c) s.expectedOutput = ` -{"aws-test":{"details":{"uuid":"this-is-the-aws-test-uuid","api-endpoints":["this-is-aws-test-of-many-api-endpoints"],"ca-cert":"this-is-aws-test-ca-cert","cloud":"aws","region":"us-east-1"},"models":{"admin":{"uuid":"ghi"}},"current-model":"admin","account":{"user":"admin@local"}}} +{"aws-test":{"details":{"uuid":"this-is-the-aws-test-uuid","api-endpoints":["this-is-aws-test-of-many-api-endpoints"],"ca-cert":"this-is-aws-test-ca-cert","cloud":"aws","region":"us-east-1","agent-version":"999.99.99"},"controller-machines":{"0":{"instance-id":"id-0","ha-status":"ha-pending"},"1":{"instance-id":"id-1","ha-status":"down, lost connection"},"2":{"instance-id":"id-2","ha-status":"ha-enabled"}},"models":{"controller":{"uuid":"ghi","machine-count":2,"core-count":4}},"current-model":"controller","account":{"user":"admin","access":"superuser"}}} `[1:] s.assertShowController(c, "--format", "json", "aws-test") } func (s *ShowControllerSuite) TestShowControllerJsonMany(c *gc.C) { - s.createTestClientStore(c) + s.fakeController.store = s.createTestClientStore(c) s.expectedOutput = ` -{"aws-test":{"details":{"uuid":"this-is-the-aws-test-uuid","api-endpoints":["this-is-aws-test-of-many-api-endpoints"],"ca-cert":"this-is-aws-test-ca-cert","cloud":"aws","region":"us-east-1"},"models":{"admin":{"uuid":"ghi"}},"current-model":"admin","account":{"user":"admin@local"}},"mark-test-prodstack":{"details":{"uuid":"this-is-a-uuid","api-endpoints":["this-is-one-of-many-api-endpoints"],"ca-cert":"this-is-a-ca-cert","cloud":"prodstack"},"account":{"user":"admin@local"}}} +{"aws-test":{"details":{"uuid":"this-is-the-aws-test-uuid","api-endpoints":["this-is-aws-test-of-many-api-endpoints"],"ca-cert":"this-is-aws-test-ca-cert","cloud":"aws","region":"us-east-1","agent-version":"999.99.99"},"controller-machines":{"0":{"instance-id":"id-0","ha-status":"ha-pending"},"1":{"instance-id":"id-1","ha-status":"down, lost connection"},"2":{"instance-id":"id-2","ha-status":"ha-enabled"}},"models":{"controller":{"uuid":"ghi","machine-count":2,"core-count":4}},"current-model":"controller","account":{"user":"admin","access":"superuser"}},"mark-test-prodstack":{"details":{"uuid":"this-is-a-uuid","api-endpoints":["this-is-one-of-many-api-endpoints"],"ca-cert":"this-is-a-ca-cert","cloud":"prodstack","agent-version":"999.99.99"},"account":{"user":"admin","access":"superuser"}}} `[1:] s.assertShowController(c, "--format", "json", "aws-test", "mark-test-prodstack") } func (s *ShowControllerSuite) TestShowControllerReadFromStoreErr(c *gc.C) { - s.createTestClientStore(c) + s.fakeController.store = s.createTestClientStore(c) msg := "fail getting controller" errStore := jujuclienttesting.NewStubStore() @@ -211,9 +291,10 @@ func (s *ShowControllerSuite) TestShowControllerNoArgs(c *gc.C) { store := s.createTestClientStore(c) + s.fakeController.store = store s.expectedOutput = ` -{"aws-test":{"details":{"uuid":"this-is-the-aws-test-uuid","api-endpoints":["this-is-aws-test-of-many-api-endpoints"],"ca-cert":"this-is-aws-test-ca-cert","cloud":"aws","region":"us-east-1"},"models":{"admin":{"uuid":"ghi"}},"current-model":"admin","account":{"user":"admin@local"}}} +{"aws-test":{"details":{"uuid":"this-is-the-aws-test-uuid","api-endpoints":["this-is-aws-test-of-many-api-endpoints"],"ca-cert":"this-is-aws-test-ca-cert","cloud":"aws","region":"us-east-1","agent-version":"999.99.99"},"controller-machines":{"0":{"instance-id":"id-0","ha-status":"ha-pending"},"1":{"instance-id":"id-1","ha-status":"down, lost connection"},"2":{"instance-id":"id-2","ha-status":"ha-enabled"}},"models":{"controller":{"uuid":"ghi","machine-count":2,"core-count":4}},"current-model":"controller","account":{"user":"admin","access":"superuser"}}} `[1:] store.CurrentControllerName = "aws-test" s.assertShowController(c, "--format", "json") @@ -244,7 +325,7 @@ } func (s *ShowControllerSuite) runShowController(c *gc.C, args ...string) (*cmd.Context, error) { - return testing.RunCommand(c, controller.NewShowControllerCommandForTest(s.store), args...) + return testing.RunCommand(c, controller.NewShowControllerCommandForTest(s.store, s.api), args...) } func (s *ShowControllerSuite) assertShowControllerFailed(c *gc.C, args ...string) { @@ -257,3 +338,51 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, s.expectedOutput) } + +type fakeController struct { + controllerName string + store jujuclient.ClientStore + modelNames map[string]string + machines map[string][]base.Machine +} + +func (*fakeController) GetControllerAccess(user string) (permission.Access, error) { + return "superuser", nil +} + +func (*fakeController) ModelConfig() (map[string]interface{}, error) { + return map[string]interface{}{"agent-version": "999.99.99"}, nil +} + +func (c *fakeController) ModelStatus(models ...names.ModelTag) (result []base.ModelStatus, _ error) { + for _, mtag := range models { + result = append(result, base.ModelStatus{ + UUID: mtag.Id(), + TotalMachineCount: 2, + CoreCount: 4, + Machines: c.machines[mtag.Id()], + }) + } + return result, nil +} + +func (c *fakeController) AllModels() (result []base.UserModel, _ error) { + all, err := c.store.AllModels(c.controllerName) + if errors.IsNotFound(err) { + return result, nil + } + if err != nil { + return nil, err + } + for _, m := range all { + result = append(result, base.UserModel{ + UUID: m.ModelUUID, + Name: c.modelNames[m.ModelUUID], + }) + } + return result, nil +} + +func (*fakeController) Close() error { + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/unregister.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/unregister.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/controller/unregister.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/controller/unregister.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" jujucmd "github.com/juju/juju/cmd" "github.com/juju/juju/cmd/modelcmd" @@ -33,17 +33,19 @@ } var usageUnregisterDetails = ` -Removes local connection information for the specified controller. -This command does not destroy the controller. In order to regain -access to an unregistered controller, it will need to be added -again using the juju register command. +Removes local connection information for the specified controller. This +command does not destroy the controller. In order to regain access to an +unregistered controller, it will need to be added again using the juju register +command. Examples: juju unregister my-controller -See Also: - juju register` +See also: + destroy-controller + kill-controller + register` // Info implements Command.Info // `unregister` may seem generic as a command, but aligns with `register`. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/gui/gui.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/gui/gui.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/gui/gui.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/gui/gui.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,9 +9,9 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/httprequest" "github.com/juju/webbrowser" - "launchpad.net/gnuflag" "github.com/juju/juju/api" "github.com/juju/juju/cmd/modelcmd" @@ -57,6 +57,7 @@ // SetFlags implements the cmd.Command interface. func (c *guiCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) f.BoolVar(&c.showCreds, "show-credentials", false, "Show admin credentials to use for logging into the Juju GUI") f.BoolVar(&c.noBrowser, "no-browser", false, "Do not try to open the web browser, just print the Juju GUI URL") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/gui/gui_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/gui/gui_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/gui/gui_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/gui/gui_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -78,7 +78,7 @@ s.patchBrowser(nil) out, err := s.run(c, "--show-credentials") c.Assert(err, jc.ErrorIsNil) - c.Assert(out, jc.Contains, "Username: admin@local\nPassword: dummy-secret") + c.Assert(out, jc.Contains, "Username: admin\nPassword: dummy-secret") } func (s *guiSuite) TestGUISuccessNoBrowser(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/gui/upgradegui.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/gui/upgradegui.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/gui/upgradegui.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/gui/upgradegui.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,10 +16,10 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/version" - "launchpad.net/gnuflag" - "github.com/juju/juju/api" + "github.com/juju/juju/api/controller" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" @@ -28,12 +28,12 @@ // NewUpgradeGUICommand creates and returns a new upgrade-gui command. func NewUpgradeGUICommand() cmd.Command { - return modelcmd.Wrap(&upgradeGUICommand{}) + return modelcmd.WrapController(&upgradeGUICommand{}) } // upgradeGUICommand upgrades to a new Juju GUI version in the controller. type upgradeGUICommand struct { - modelcmd.ModelCommandBase + modelcmd.ControllerCommandBase versOrPath string list bool @@ -68,6 +68,7 @@ // SetFlags implements the cmd.Command interface. func (c *upgradeGUICommand) SetFlags(f *gnuflag.FlagSet) { + c.ControllerCommandBase.SetFlags(f) f.BoolVar(&c.list, "list", false, "List available Juju GUI release versions without upgrading") } @@ -104,7 +105,7 @@ defer archive.r.Close() // Open the Juju API client. - client, err := c.NewAPIClient() + client, err := c.NewControllerAPIClient() if err != nil { return errors.Annotate(err, "cannot establish API connection") } @@ -301,7 +302,7 @@ // given version and reports whether that's the current version served by the // controller. If the given version is not present in the server, an empty // hash is returned. -func existingVersionInfo(client *api.Client, vers version.Number) (hash string, current bool, err error) { +func existingVersionInfo(client *controller.Client, vers version.Number) (hash string, current bool, err error) { versions, err := clientGUIArchives(client) if err != nil { return "", false, errors.Annotate(err, "cannot retrieve GUI versions from the controller") @@ -349,17 +350,17 @@ } // clientGUIArchives is defined for testing purposes. -var clientGUIArchives = func(client *api.Client) ([]params.GUIArchiveVersion, error) { +var clientGUIArchives = func(client *controller.Client) ([]params.GUIArchiveVersion, error) { return client.GUIArchives() } // clientSelectGUIVersion is defined for testing purposes. -var clientSelectGUIVersion = func(client *api.Client, vers version.Number) error { +var clientSelectGUIVersion = func(client *controller.Client, vers version.Number) error { return client.SelectGUIVersion(vers) } // clientUploadGUIArchive is defined for testing purposes. -var clientUploadGUIArchive = func(client *api.Client, r io.ReadSeeker, hash string, size int64, vers version.Number) (bool, error) { +var clientUploadGUIArchive = func(client *controller.Client, r io.ReadSeeker, hash string, size int64, vers version.Number) (bool, error) { return client.UploadGUIArchive(r, hash, size, vers) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/gui/upgradegui_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/gui/upgradegui_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/gui/upgradegui_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/gui/upgradegui_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,7 +21,7 @@ "github.com/juju/version" gc "gopkg.in/check.v1" - "github.com/juju/juju/api" + "github.com/juju/juju/api/controller" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/gui" envgui "github.com/juju/juju/environs/gui" @@ -48,7 +48,7 @@ func (s *upgradeGUISuite) patchClientGUIArchives(c *gc.C, returnedVersions []params.GUIArchiveVersion, returnedErr error) calledFunc { var called bool - f := func(client *api.Client) ([]params.GUIArchiveVersion, error) { + f := func(client *controller.Client) ([]params.GUIArchiveVersion, error) { called = true return returnedVersions, returnedErr } @@ -60,7 +60,7 @@ func (s *upgradeGUISuite) patchClientSelectGUIVersion(c *gc.C, expectedVers string, returnedErr error) calledFunc { var called bool - f := func(client *api.Client, vers version.Number) error { + f := func(client *controller.Client, vers version.Number) error { called = true c.Assert(vers.String(), gc.Equals, expectedVers) return returnedErr @@ -73,7 +73,7 @@ func (s *upgradeGUISuite) patchClientUploadGUIArchive(c *gc.C, expectedHash string, expectedSize int64, expectedVers string, returnedIsCurrent bool, returnedErr error) calledFunc { var called bool - f := func(client *api.Client, r io.ReadSeeker, hash string, size int64, vers version.Number) (bool, error) { + f := func(client *controller.Client, r io.ReadSeeker, hash string, size int64, vers version.Number) (bool, error) { called = true c.Assert(hash, gc.Equals, expectedHash) c.Assert(size, gc.Equals, expectedSize) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/add.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/add.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/add.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/add.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,8 +9,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api/machinemanager" "github.com/juju/juju/api/modelconfig" @@ -68,8 +68,8 @@ juju add-machine zone=us-east-1a (start a machine in zone us-east-1a on AWS) juju add-machine maas2.name (acquire machine maas2.name on MAAS) -See Also: - juju remove-machine +See also: + remove-machine ` func init() { @@ -100,6 +100,8 @@ Series string // If specified, these constraints are merged with those already in the model. Constraints constraints.Value + // If specified, these constraints are merged with those already in the model. + ConstraintsStr string // Placement is passed verbatim to the API, to be parsed and evaluated server-side. Placement *instance.Placement // NumMachines is the number of machines to add. @@ -114,20 +116,20 @@ Args: "[:machine | | ssh:[user@]host | placement]", Purpose: "Start a new, empty machine and optionally a container, or add a container to a machine.", Doc: addMachineDoc, - Aliases: []string{"add-machines"}, } } func (c *addCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) f.StringVar(&c.Series, "series", "", "The charm series") f.IntVar(&c.NumMachines, "n", 1, "The number of machines to add") - f.Var(constraints.ConstraintsValue{Target: &c.Constraints}, "constraints", "Additional machine constraints") + f.StringVar(&c.ConstraintsStr, "constraints", "", "Additional machine constraints") f.Var(disksFlag{&c.Disks}, "disks", "Constraints for disks to attach to the machine") } func (c *addCommand) Init(args []string) error { if c.Constraints.Container != nil { - return fmt.Errorf("container constraint %q not allowed when adding a machine", *c.Constraints.Container) + return errors.Errorf("container constraint %q not allowed when adding a machine", *c.Constraints.Container) } placement, err := cmd.ZeroOrOneArgs(args) if err != nil { @@ -151,7 +153,7 @@ AddMachines([]params.AddMachineParams) ([]params.AddMachinesResult, error) Close() error ForceDestroyMachines(machines ...string) error - ModelUUID() (string, error) + ModelUUID() (string, bool) ProvisioningScript(params.ProvisioningScriptParams) (script string, err error) } @@ -203,6 +205,11 @@ } func (c *addCommand) Run(ctx *cmd.Context) error { + var err error + c.Constraints, err = common.ParseConstraints(ctx, c.ConstraintsStr) + if err != nil { + return err + } client, err := c.getClientAPI() if err != nil { return errors.Trace(err) @@ -229,6 +236,9 @@ defer modelConfigClient.Close() configAttrs, err := modelConfigClient.ModelGet() if err != nil { + if params.IsCodeUnauthorized(err) { + common.PermissionsMessage(ctx.Stderr, "add a machine to this model") + } return errors.Trace(err) } config, err := config.New(config.NoDefaults, configAttrs) @@ -263,9 +273,9 @@ logger.Infof("model provisioning") if c.Placement != nil && c.Placement.Scope == "model-uuid" { - uuid, err := client.ModelUUID() - if err != nil { - return errors.Trace(err) + uuid, ok := client.ModelUUID() + if !ok { + return errors.New("API connection is controller-only (should never happen)") } c.Placement.Scope = uuid } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/add_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/add_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/add_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/add_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -67,13 +67,6 @@ count: 1, placement: "lxd:4", }, { - args: []string{"--constraints", "mem=8G"}, - count: 1, - constraints: "mem=8192M", - }, { - args: []string{"--constraints", "container=lxd"}, - errorString: `container constraint "lxd" not allowed when adding a machine`, - }, { args: []string{"ssh:user@10.10.0.3"}, count: 1, placement: "ssh:user@10.10.0.3", @@ -130,6 +123,16 @@ }) } +func (s *AddMachineSuite) TestAddMachineUnauthorizedMentionsJujuGrant(c *gc.C) { + s.fakeAddMachine.addModelGetError = ¶ms.Error{ + Message: "permission denied", + Code: params.CodeUnauthorized, + } + ctx, _ := s.run(c) + errString := strings.Replace(testing.Stderr(ctx), "\n", " ", -1) + c.Assert(errString, gc.Matches, `.*juju grant.*`) +} + func (s *AddMachineSuite) TestSSHPlacement(c *gc.C) { s.PatchValue(machine.ManualProvisioner, func(args manual.ProvisionMachineArgs) (string, error) { return "42", nil @@ -182,10 +185,7 @@ func (s *AddMachineSuite) TestBlockedError(c *gc.C) { s.fakeAddMachine.addError = common.OperationBlockedError("TestBlockedError") _, err := s.run(c) - c.Assert(err, gc.Equals, cmd.ErrSilent) - // msg is logged - stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Check(stripped, gc.Matches, ".*TestBlockedError.*") + testing.AssertOperationWasBlocked(c, err, ".*TestBlockedError.*") } func (s *AddMachineSuite) TestAddMachineWithDisks(c *gc.C) { @@ -207,19 +207,20 @@ } type fakeAddMachineAPI struct { - successOrder []bool - currentOp int - args []params.AddMachineParams - addError error - providerType string + successOrder []bool + currentOp int + args []params.AddMachineParams + addError error + addModelGetError error + providerType string } func (f *fakeAddMachineAPI) Close() error { return nil } -func (f *fakeAddMachineAPI) ModelUUID() (string, error) { - return "fake-uuid", nil +func (f *fakeAddMachineAPI) ModelUUID() (string, bool) { + return "fake-uuid", true } func (f *fakeAddMachineAPI) AddMachines(args []params.AddMachineParams) ([]params.AddMachinesResult, error) { @@ -254,6 +255,9 @@ } func (f *fakeAddMachineAPI) ModelGet() (map[string]interface{}, error) { + if f.addModelGetError != nil { + return nil, f.addModelGetError + } providerType := "dummy" if f.providerType != "" { providerType = f.providerType diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/base.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/base.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/base.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/base.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,10 +5,11 @@ import ( "fmt" + "io" "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/status" @@ -29,15 +30,18 @@ api statusAPI machineIds []string defaultFormat string + color bool } // SetFlags sets utc and format flags based on user specified options. func (c *baselistMachinesCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) f.BoolVar(&c.isoTime, "utc", false, "Display time as UTC in RFC3339 format") + f.BoolVar(&c.color, "color", false, "Force use of ANSI color codes") c.out.AddFlags(f, c.defaultFormat, map[string]cmd.Formatter{ "yaml": cmd.FormatYaml, "json": cmd.FormatJson, - "tabular": status.FormatMachineTabular, + "tabular": c.tabular, }) } @@ -72,3 +76,7 @@ formatted := formatter.MachineFormat(c.machineIds) return c.out.Write(ctx, formatted) } + +func (c *baselistMachinesCommand) tabular(writer io.Writer, value interface{}) error { + return status.FormatMachineTabular(writer, c.color, value) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,11 +19,11 @@ } // NewAddCommand returns an AddCommand with the api provided as specified. -func NewAddCommandForTest(api AddMachineAPI, mcApi ModelConfigAPI, mmApi MachineManagerAPI) (cmd.Command, *AddCommand) { +func NewAddCommandForTest(api AddMachineAPI, mcAPI ModelConfigAPI, mmAPI MachineManagerAPI) (cmd.Command, *AddCommand) { cmd := &addCommand{ api: api, - machineManagerAPI: mmApi, - modelConfigAPI: mcApi, + machineManagerAPI: mmAPI, + modelConfigAPI: mcAPI, } return modelcmd.Wrap(cmd), &AddCommand{cmd} } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/list.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/list.go 2016-10-13 14:31:49.000000000 +0000 @@ -46,7 +46,7 @@ Name: "machines", Purpose: usageListMachinesSummary, Doc: usageListMachinesDetails, - Aliases: []string{"list-machines", "machine", "list-machine"}, + Aliases: []string{"list-machines"}, } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/list_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/list_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/list_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -37,7 +37,11 @@ AgentStatus: params.DetailedStatus{ Status: "started", }, - DNSName: "10.0.0.1", + DNSName: "10.0.0.1", + IPAddresses: []string{ + "10.0.0.1", + "10.0.1.1", + }, InstanceId: "juju-badd06-0", Series: "trusty", Hardware: "availability-zone=us-east-1", @@ -47,7 +51,11 @@ AgentStatus: params.DetailedStatus{ Status: "started", }, - DNSName: "10.0.0.2", + DNSName: "10.0.0.2", + IPAddresses: []string{ + "10.0.0.2", + "10.0.1.2", + }, InstanceId: "juju-badd06-1", Series: "trusty", Containers: map[string]params.MachineStatus{ @@ -56,7 +64,11 @@ AgentStatus: params.DetailedStatus{ Status: "pending", }, - DNSName: "10.0.0.3", + DNSName: "10.0.0.3", + IPAddresses: []string{ + "10.0.0.3", + "10.0.1.3", + }, InstanceId: "juju-badd06-1-lxd-0", Series: "trusty", }, @@ -79,10 +91,10 @@ context, err := testing.RunCommand(c, newMachineListCommand()) c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ""+ - "MACHINE STATE DNS INS-ID SERIES AZ\n"+ - "0 started 10.0.0.1 juju-badd06-0 trusty us-east-1\n"+ - "1 started 10.0.0.2 juju-badd06-1 trusty \n"+ - " 1/lxd/0 pending 10.0.0.3 juju-badd06-1-lxd-0 trusty \n"+ + "Machine State DNS Inst id Series AZ\n"+ + "0 started 10.0.0.1 juju-badd06-0 trusty us-east-1\n"+ + "1 started 10.0.0.2 juju-badd06-1 trusty \n"+ + "1/lxd/0 pending 10.0.0.3 juju-badd06-1-lxd-0 trusty \n"+ "\n") } @@ -96,6 +108,9 @@ " juju-status:\n"+ " current: started\n"+ " dns-name: 10.0.0.1\n"+ + " ip-addresses:\n"+ + " - 10.0.0.1\n"+ + " - 10.0.1.1\n"+ " instance-id: juju-badd06-0\n"+ " series: trusty\n"+ " hardware: availability-zone=us-east-1\n"+ @@ -103,6 +118,9 @@ " juju-status:\n"+ " current: started\n"+ " dns-name: 10.0.0.2\n"+ + " ip-addresses:\n"+ + " - 10.0.0.2\n"+ + " - 10.0.1.2\n"+ " instance-id: juju-badd06-1\n"+ " series: trusty\n"+ " containers:\n"+ @@ -110,6 +128,9 @@ " juju-status:\n"+ " current: pending\n"+ " dns-name: 10.0.0.3\n"+ + " ip-addresses:\n"+ + " - 10.0.0.3\n"+ + " - 10.0.1.3\n"+ " instance-id: juju-badd06-1-lxd-0\n"+ " series: trusty\n") } @@ -118,7 +139,7 @@ context, err := testing.RunCommand(c, newMachineListCommand(), "--format", "json") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ""+ - "{\"model\":\"dummyenv\",\"machines\":{\"0\":{\"juju-status\":{\"current\":\"started\"},\"dns-name\":\"10.0.0.1\",\"instance-id\":\"juju-badd06-0\",\"machine-status\":{},\"series\":\"trusty\",\"hardware\":\"availability-zone=us-east-1\"},\"1\":{\"juju-status\":{\"current\":\"started\"},\"dns-name\":\"10.0.0.2\",\"instance-id\":\"juju-badd06-1\",\"machine-status\":{},\"series\":\"trusty\",\"containers\":{\"1/lxd/0\":{\"juju-status\":{\"current\":\"pending\"},\"dns-name\":\"10.0.0.3\",\"instance-id\":\"juju-badd06-1-lxd-0\",\"machine-status\":{},\"series\":\"trusty\"}}}}}\n") + "{\"model\":\"dummyenv\",\"machines\":{\"0\":{\"juju-status\":{\"current\":\"started\"},\"dns-name\":\"10.0.0.1\",\"ip-addresses\":[\"10.0.0.1\",\"10.0.1.1\"],\"instance-id\":\"juju-badd06-0\",\"machine-status\":{},\"series\":\"trusty\",\"hardware\":\"availability-zone=us-east-1\"},\"1\":{\"juju-status\":{\"current\":\"started\"},\"dns-name\":\"10.0.0.2\",\"ip-addresses\":[\"10.0.0.2\",\"10.0.1.2\"],\"instance-id\":\"juju-badd06-1\",\"machine-status\":{},\"series\":\"trusty\",\"containers\":{\"1/lxd/0\":{\"juju-status\":{\"current\":\"pending\"},\"dns-name\":\"10.0.0.3\",\"ip-addresses\":[\"10.0.0.3\",\"10.0.1.3\"],\"instance-id\":\"juju-badd06-1-lxd-0\",\"machine-status\":{},\"series\":\"trusty\"}}}}}\n") } func (s *MachineListCommandSuite) TestListMachineArgsError(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/remove.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/remove.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/remove.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/remove.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,11 +4,10 @@ package machine import ( - "fmt" - "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/juju/block" "github.com/juju/juju/cmd/modelcmd" @@ -56,22 +55,22 @@ Args: " ...", Purpose: "Removes one or more machines from a model.", Doc: destroyMachineDoc, - Aliases: []string{"remove-machines"}, } } // SetFlags implements Command.SetFlags. func (c *removeCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) f.BoolVar(&c.Force, "force", false, "Completely remove a machine and all its dependencies") } func (c *removeCommand) Init(args []string) error { if len(args) == 0 { - return fmt.Errorf("no machines specified") + return errors.Errorf("no machines specified") } for _, id := range args { if !names.IsValidMachine(id) { - return fmt.Errorf("invalid machine id %q", id) + return errors.Errorf("invalid machine id %q", id) } } c.MachineIds = args diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/remove_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/remove_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/remove_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/remove_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,6 @@ package machine_test import ( - "strings" - "github.com/juju/cmd" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -93,21 +91,15 @@ func (s *RemoveMachineSuite) TestBlockedError(c *gc.C) { s.fake.removeError = common.OperationBlockedError("TestBlockedError") _, err := s.run(c, "1") - c.Assert(err, gc.Equals, cmd.ErrSilent) c.Assert(s.fake.forced, jc.IsFalse) - // msg is logged - stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Assert(stripped, gc.Matches, ".*TestBlockedError.*") + testing.AssertOperationWasBlocked(c, err, ".*TestBlockedError.*") } func (s *RemoveMachineSuite) TestForceBlockedError(c *gc.C) { s.fake.removeError = common.OperationBlockedError("TestForceBlockedError") _, err := s.run(c, "--force", "1") - c.Assert(err, gc.Equals, cmd.ErrSilent) c.Assert(s.fake.forced, jc.IsTrue) - // msg is logged - stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Assert(stripped, gc.Matches, ".*TestForceBlockedError.*") + testing.AssertOperationWasBlocked(c, err, ".*TestForceBlockedError.*") } type fakeRemoveMachineAPI struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/show.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/show.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/show.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/show.go 2016-10-13 14:31:49.000000000 +0000 @@ -47,7 +47,6 @@ Args: " ...", Purpose: "Show a machine's status.", Doc: showMachineCommandDoc, - Aliases: []string{"show-machines"}, } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/show_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/show_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/machine/show_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/machine/show_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -36,6 +36,9 @@ " juju-status:\n"+ " current: started\n"+ " dns-name: 10.0.0.1\n"+ + " ip-addresses:\n"+ + " - 10.0.0.1\n"+ + " - 10.0.1.1\n"+ " instance-id: juju-badd06-0\n"+ " series: trusty\n"+ " hardware: availability-zone=us-east-1\n"+ @@ -43,6 +46,9 @@ " juju-status:\n"+ " current: started\n"+ " dns-name: 10.0.0.2\n"+ + " ip-addresses:\n"+ + " - 10.0.0.2\n"+ + " - 10.0.1.2\n"+ " instance-id: juju-badd06-1\n"+ " series: trusty\n"+ " containers:\n"+ @@ -50,6 +56,9 @@ " juju-status:\n"+ " current: pending\n"+ " dns-name: 10.0.0.3\n"+ + " ip-addresses:\n"+ + " - 10.0.0.3\n"+ + " - 10.0.1.3\n"+ " instance-id: juju-badd06-1-lxd-0\n"+ " series: trusty\n") } @@ -63,6 +72,9 @@ " juju-status:\n"+ " current: started\n"+ " dns-name: 10.0.0.1\n"+ + " ip-addresses:\n"+ + " - 10.0.0.1\n"+ + " - 10.0.1.1\n"+ " instance-id: juju-badd06-0\n"+ " series: trusty\n"+ " hardware: availability-zone=us-east-1\n") @@ -72,10 +84,10 @@ context, err := testing.RunCommand(c, newMachineShowCommand(), "--format", "tabular", "0", "1") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ""+ - "MACHINE STATE DNS INS-ID SERIES AZ\n"+ - "0 started 10.0.0.1 juju-badd06-0 trusty us-east-1\n"+ - "1 started 10.0.0.2 juju-badd06-1 trusty \n"+ - " 1/lxd/0 pending 10.0.0.3 juju-badd06-1-lxd-0 trusty \n"+ + "Machine State DNS Inst id Series AZ\n"+ + "0 started 10.0.0.1 juju-badd06-0 trusty us-east-1\n"+ + "1 started 10.0.0.2 juju-badd06-1 trusty \n"+ + "1/lxd/0 pending 10.0.0.3 juju-badd06-1-lxd-0 trusty \n"+ "\n") } @@ -83,5 +95,5 @@ context, err := testing.RunCommand(c, newMachineShowCommand(), "--format", "json", "0", "1") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ""+ - "{\"model\":\"dummyenv\",\"machines\":{\"0\":{\"juju-status\":{\"current\":\"started\"},\"dns-name\":\"10.0.0.1\",\"instance-id\":\"juju-badd06-0\",\"machine-status\":{},\"series\":\"trusty\",\"hardware\":\"availability-zone=us-east-1\"},\"1\":{\"juju-status\":{\"current\":\"started\"},\"dns-name\":\"10.0.0.2\",\"instance-id\":\"juju-badd06-1\",\"machine-status\":{},\"series\":\"trusty\",\"containers\":{\"1/lxd/0\":{\"juju-status\":{\"current\":\"pending\"},\"dns-name\":\"10.0.0.3\",\"instance-id\":\"juju-badd06-1-lxd-0\",\"machine-status\":{},\"series\":\"trusty\"}}}}}\n") + "{\"model\":\"dummyenv\",\"machines\":{\"0\":{\"juju-status\":{\"current\":\"started\"},\"dns-name\":\"10.0.0.1\",\"ip-addresses\":[\"10.0.0.1\",\"10.0.1.1\"],\"instance-id\":\"juju-badd06-0\",\"machine-status\":{},\"series\":\"trusty\",\"hardware\":\"availability-zone=us-east-1\"},\"1\":{\"juju-status\":{\"current\":\"started\"},\"dns-name\":\"10.0.0.2\",\"ip-addresses\":[\"10.0.0.2\",\"10.0.1.2\"],\"instance-id\":\"juju-badd06-1\",\"machine-status\":{},\"series\":\"trusty\",\"containers\":{\"1/lxd/0\":{\"juju-status\":{\"current\":\"pending\"},\"dns-name\":\"10.0.0.3\",\"ip-addresses\":[\"10.0.0.3\",\"10.0.1.3\"],\"instance-id\":\"juju-badd06-1-lxd-0\",\"machine-status\":{},\"series\":\"trusty\"}}}}}\n") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/metricsdebug/collectmetrics.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/metricsdebug/collectmetrics.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/metricsdebug/collectmetrics.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/metricsdebug/collectmetrics.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,7 +13,6 @@ "github.com/juju/loggo" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api" actionapi "github.com/juju/juju/api/action" @@ -81,11 +80,6 @@ return nil } -// SetFlags implements Command.SetFlags. -func (c *collectMetricsCommand) SetFlags(f *gnuflag.FlagSet) { - c.ModelCommandBase.SetFlags(f) -} - type runClient interface { action.APIClient Run(run params.RunParams) ([]params.ActionResult, error) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/metricsdebug/metricsdebug.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/metricsdebug/metricsdebug.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/metricsdebug/metricsdebug.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/metricsdebug/metricsdebug.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,120 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package metricsdebug - -import ( - "encoding/json" - "fmt" - "strings" - "text/tabwriter" - "time" - - "github.com/juju/cmd" - "github.com/juju/errors" - "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" - - "github.com/juju/juju/api/metricsdebug" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/modelcmd" -) - -const debugMetricsDoc = ` -Display recently collected metrics and exit -` - -// DebugMetricsCommand retrieves metrics stored in the juju controller. -type DebugMetricsCommand struct { - modelcmd.ModelCommandBase - Json bool - Tag names.Tag - Count int -} - -// New creates a new DebugMetricsCommand. -func New() cmd.Command { - return modelcmd.Wrap(&DebugMetricsCommand{}) -} - -// Info implements Command.Info. -func (c *DebugMetricsCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "debug-metrics", - Args: "[application or unit]", - Purpose: "Retrieve metrics collected by the given unit/application.", - Doc: debugMetricsDoc, - } -} - -// Init reads and verifies the cli arguments for the DebugMetricsCommand -func (c *DebugMetricsCommand) Init(args []string) error { - if len(args) == 0 { - return errors.New("you need to specify a unit or application.") - } - if names.IsValidUnit(args[0]) { - c.Tag = names.NewUnitTag(args[0]) - } else if names.IsValidApplication(args[0]) { - c.Tag = names.NewApplicationTag(args[0]) - } else { - return errors.Errorf("%q is not a valid unit or application", args[0]) - } - if err := cmd.CheckEmpty(args[1:]); err != nil { - return errors.Errorf("unknown command line arguments: " + strings.Join(args, ",")) - } - return nil -} - -// SetFlags implements Command.SetFlags. -func (c *DebugMetricsCommand) SetFlags(f *gnuflag.FlagSet) { - c.ModelCommandBase.SetFlags(f) - f.IntVar(&c.Count, "n", 0, "Number of metrics to retrieve") - f.BoolVar(&c.Json, "json", false, "Output metrics as json") -} - -type GetMetricsClient interface { - GetMetrics(tag string) ([]params.MetricResult, error) - Close() error -} - -var newClient = func(env modelcmd.ModelCommandBase) (GetMetricsClient, error) { - state, err := env.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return metricsdebug.NewClient(state), nil -} - -// Run implements Command.Run. -func (c *DebugMetricsCommand) Run(ctx *cmd.Context) error { - client, err := newClient(c.ModelCommandBase) - if err != nil { - return errors.Trace(err) - } - metrics, err := client.GetMetrics(c.Tag.String()) - if err != nil { - return errors.Trace(err) - } - defer client.Close() - if len(metrics) == 0 { - return nil - } - if c.Count > 0 && len(metrics) > c.Count { - metrics = metrics[:c.Count] - } - if c.Json { - b, err := json.MarshalIndent(metrics, "", " ") - if err != nil { - return errors.Trace(err) - } - fmt.Fprintf(ctx.Stdout, string(b)) - return nil - } - tw := tabwriter.NewWriter(ctx.Stdout, 0, 1, 1, ' ', 0) - fmt.Fprintf(tw, "TIME\tMETRIC\tVALUE\n") - for _, m := range metrics { - fmt.Fprintf(tw, "%v\t%v\t%v\n", m.Time.Format(time.RFC3339), m.Key, m.Value) - } - tw.Flush() - return nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/metricsdebug/metricsdebug_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/metricsdebug/metricsdebug_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/metricsdebug/metricsdebug_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/metricsdebug/metricsdebug_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,196 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package metricsdebug_test - -import ( - "bytes" - "fmt" - stdtesting "testing" - "text/tabwriter" - "time" - - "github.com/juju/cmd/cmdtesting" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/juju/metricsdebug" - "github.com/juju/juju/cmd/modelcmd" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/testing/factory" -) - -func TestPackage(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - -type MockGetMetricsClient struct { - testing.Stub -} - -func (m *MockGetMetricsClient) GetMetrics(tag string) ([]params.MetricResult, error) { - m.Stub.MethodCall(m, "GetMetrics", tag) - return nil, nil -} -func (m *MockGetMetricsClient) Close() error { - m.Stub.MethodCall(m, "Close") - return nil -} - -type DebugMetricsMockSuite struct { - coretesting.FakeJujuXDGDataHomeSuite -} - -var _ = gc.Suite(&DebugMetricsMockSuite{}) - -func (s *DebugMetricsMockSuite) TestUnit(c *gc.C) { - client := MockGetMetricsClient{testing.Stub{}} - s.PatchValue(metricsdebug.NewClient, func(_ modelcmd.ModelCommandBase) (metricsdebug.GetMetricsClient, error) { - return &client, nil - }) - _, err := coretesting.RunCommand(c, metricsdebug.New(), "metered/0") - c.Assert(err, jc.ErrorIsNil) - client.CheckCall(c, 0, "GetMetrics", "unit-metered-0") -} - -func (s *DebugMetricsMockSuite) TestService(c *gc.C) { - client := MockGetMetricsClient{testing.Stub{}} - s.PatchValue(metricsdebug.NewClient, func(_ modelcmd.ModelCommandBase) (metricsdebug.GetMetricsClient, error) { - return &client, nil - }) - _, err := coretesting.RunCommand(c, metricsdebug.New(), "metered") - c.Assert(err, jc.ErrorIsNil) - client.CheckCall(c, 0, "GetMetrics", "application-metered") -} - -func (s *DebugMetricsMockSuite) TestNotValidServiceOrUnit(c *gc.C) { - client := MockGetMetricsClient{testing.Stub{}} - s.PatchValue(metricsdebug.NewClient, func(_ modelcmd.ModelCommandBase) (metricsdebug.GetMetricsClient, error) { - return &client, nil - }) - _, err := coretesting.RunCommand(c, metricsdebug.New(), "!!!!!!") - c.Assert(err, gc.ErrorMatches, `"!!!!!!" is not a valid unit or application`) -} - -type DebugMetricsCommandSuite struct { - jujutesting.JujuConnSuite -} - -var _ = gc.Suite(&DebugMetricsCommandSuite{}) - -func (s *DebugMetricsCommandSuite) TestDebugNoArgs(c *gc.C) { - _, err := coretesting.RunCommand(c, metricsdebug.New()) - c.Assert(err, gc.ErrorMatches, "you need to specify a unit or application.") -} - -func (s *DebugMetricsCommandSuite) TestUnits(c *gc.C) { - meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) - meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) - unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) - unit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) - newTime := time.Now().Round(time.Second) - metricA := state.Metric{"pings", "5", newTime} - metricB := state.Metric{"pings", "10.5", newTime} - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA}}) - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit2, Metrics: []state.Metric{metricA, metricB}}) - outputTime := newTime.Format(time.RFC3339) - expectedOutput := bytes.Buffer{} - tw := tabwriter.NewWriter(&expectedOutput, 0, 1, 1, ' ', 0) - fmt.Fprintf(tw, "TIME\tMETRIC\tVALUE\n") - fmt.Fprintf(tw, "%v\tpings\t5\n", outputTime) - fmt.Fprintf(tw, "%v\tpings\t10.5\n", outputTime) - tw.Flush() - ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered/1") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, expectedOutput.String()) -} - -func (s *DebugMetricsCommandSuite) TestServiceWithNoption(c *gc.C) { - meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) - meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) - unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) - unit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) - newTime := time.Now().Round(time.Second) - metricA := state.Metric{"pings", "5", newTime} - metricB := state.Metric{"pings", "10.5", newTime} - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA}}) - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit2, Metrics: []state.Metric{metricA, metricB}}) - outputTime := newTime.Format(time.RFC3339) - expectedOutput := bytes.Buffer{} - tw := tabwriter.NewWriter(&expectedOutput, 0, 1, 1, ' ', 0) - fmt.Fprintf(tw, "TIME\tMETRIC\tVALUE\n") - fmt.Fprintf(tw, "%v\tpings\t5\n", outputTime) - fmt.Fprintf(tw, "%v\tpings\t5\n", outputTime) - tw.Flush() - ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered", "-n", "2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, expectedOutput.String()) -} - -func (s *DebugMetricsCommandSuite) TestServiceWithNoptionGreaterThanMetricCount(c *gc.C) { - meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) - meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) - unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) - unit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) - newTime := time.Now().Round(time.Second) - metricA := state.Metric{"pings", "5", newTime} - metricB := state.Metric{"pings", "10.5", newTime} - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA}}) - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit2, Metrics: []state.Metric{metricA, metricB}}) - outputTime := newTime.Format(time.RFC3339) - expectedOutput := bytes.Buffer{} - tw := tabwriter.NewWriter(&expectedOutput, 0, 1, 1, ' ', 0) - fmt.Fprintf(tw, "TIME\tMETRIC\tVALUE\n") - fmt.Fprintf(tw, "%v\tpings\t5\n", outputTime) - fmt.Fprintf(tw, "%v\tpings\t5\n", outputTime) - fmt.Fprintf(tw, "%v\tpings\t10.5\n", outputTime) - tw.Flush() - ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered", "-n", "42") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, expectedOutput.String()) -} - -func (s *DebugMetricsCommandSuite) TestNoMetrics(c *gc.C) { - meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) - meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) - s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) - ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered", "-n", "2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, "") -} - -func (s *DebugMetricsCommandSuite) TestUnitJsonOutput(c *gc.C) { - meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) - meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) - unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) - newTime := time.Now().Round(time.Second) - metricA := state.Metric{"pings", "5", newTime} - metricB := state.Metric{"pings", "10.5", newTime} - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA}}) - s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA, metricB}}) - outputTime := newTime.Format(time.RFC3339) - expectedOutput := fmt.Sprintf(`[ - { - "time": "%v", - "key": "pings", - "value": "5" - }, - { - "time": "%v", - "key": "pings", - "value": "5" - }, - { - "time": "%v", - "key": "pings", - "value": "10.5" - } -]`, outputTime, outputTime, outputTime) - ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered/0", "--json") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, expectedOutput) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/metricsdebug/metrics.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/metricsdebug/metrics.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/metricsdebug/metrics.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/metricsdebug/metrics.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,150 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsdebug + +import ( + "fmt" + "io" + "time" + + "github.com/gosuri/uitable" + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/api/metricsdebug" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/modelcmd" +) + +const metricsDoc = ` +Display recently collected metrics. +` + +// MetricsCommand retrieves metrics stored in the juju controller. +type MetricsCommand struct { + modelcmd.ModelCommandBase + out cmd.Output + + Tags []string + All bool +} + +// New creates a new MetricsCommand. +func New() cmd.Command { + return modelcmd.Wrap(&MetricsCommand{}) +} + +// Info implements Command.Info. +func (c *MetricsCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "metrics", + Args: "[tag1[...tagN]]", + Purpose: "Retrieve metrics collected by specified entities.", + Doc: metricsDoc, + } +} + +// Init reads and verifies the cli arguments for the MetricsCommand +func (c *MetricsCommand) Init(args []string) error { + if !c.All && len(args) == 0 { + return errors.New("you need to specify at least one unit or application") + } else if c.All && len(args) > 0 { + return errors.New("cannot use --all with additional entities") + } + c.Tags = make([]string, len(args)) + for i, arg := range args { + if names.IsValidUnit(arg) { + c.Tags[i] = names.NewUnitTag(arg).String() + } else if names.IsValidApplication(arg) { + c.Tags[i] = names.NewApplicationTag(arg).String() + } else { + return errors.Errorf("%q is not a valid unit or application", args[0]) + } + } + return nil +} + +// SetFlags implements cmd.Command.SetFlags. +func (c *MetricsCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "tabular": formatTabular, + "json": cmd.FormatJson, + "yaml": cmd.FormatYaml, + }) + f.BoolVar(&c.All, "all", false, "retrieve metrics collected by all units in the model") +} + +type GetMetricsClient interface { + GetMetrics(tags ...string) ([]params.MetricResult, error) + Close() error +} + +var newClient = func(env modelcmd.ModelCommandBase) (GetMetricsClient, error) { + state, err := env.NewAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + return metricsdebug.NewClient(state), nil +} + +type metric struct { + Unit string `json:"unit" yaml:"unit"` + Timestamp time.Time `json:"timestamp" yaml:"timestamp"` + Metric string `json:"metric" yaml:"metric"` + Value string `json:"value" yaml:"value"` +} + +// Run implements Command.Run. +func (c *MetricsCommand) Run(ctx *cmd.Context) error { + client, err := newClient(c.ModelCommandBase) + if err != nil { + return errors.Trace(err) + } + var metrics []params.MetricResult + if c.All { + metrics, err = client.GetMetrics() + } else { + metrics, err = client.GetMetrics(c.Tags...) + } + if err != nil { + return errors.Trace(err) + } + defer client.Close() + if len(metrics) == 0 { + return nil + } + results := make([]metric, len(metrics)) + for i, m := range metrics { + results[i] = metric{ + Unit: m.Unit, + Timestamp: m.Time, + Metric: m.Key, + Value: m.Value, + } + } + return errors.Trace(c.out.Write(ctx, results)) +} + +// formatTabular returns a tabular view of collected metrics. +func formatTabular(writer io.Writer, value interface{}) error { + metrics, ok := value.([]metric) + if !ok { + return errors.Errorf("expected value of type %T, got %T", metrics, value) + } + table := uitable.New() + table.MaxColWidth = 50 + table.Wrap = true + for _, col := range []int{1, 2, 3, 4} { + table.RightAlign(col) + } + table.AddRow("UNIT", "TIMESTAMP", "METRIC", "VALUE") + for _, m := range metrics { + table.AddRow(m.Unit, m.Timestamp.Format(time.RFC3339), m.Metric, m.Value) + } + _, err := fmt.Fprint(writer, table.String()) + return errors.Trace(err) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/metricsdebug/metrics_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/metricsdebug/metrics_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/metricsdebug/metrics_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/metricsdebug/metrics_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,148 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsdebug_test + +import ( + stdtesting "testing" + "time" + + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/juju/metricsdebug" + "github.com/juju/juju/cmd/modelcmd" + coretesting "github.com/juju/juju/testing" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} + +type mockGetMetricsClient struct { + testing.Stub + metrics []params.MetricResult +} + +func (m *mockGetMetricsClient) GetMetrics(tags ...string) ([]params.MetricResult, error) { + m.AddCall("GetMetrics", tags) + return m.metrics, m.NextErr() +} + +func (m *mockGetMetricsClient) Close() error { + m.AddCall("Close") + return m.NextErr() +} + +type metricsSuite struct { + coretesting.FakeJujuXDGDataHomeSuite + + client *mockGetMetricsClient +} + +var _ = gc.Suite(&metricsSuite{}) + +func (s *metricsSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.client = &mockGetMetricsClient{Stub: testing.Stub{}} + s.PatchValue(metricsdebug.NewClient, func(_ modelcmd.ModelCommandBase) (metricsdebug.GetMetricsClient, error) { + return s.client, nil + }) +} + +func (s *metricsSuite) TestDefaultTabulatFormat(c *gc.C) { + s.client.metrics = []params.MetricResult{{ + Unit: "unit-metered-0", + Key: "pings", + Value: "5.0", + Time: time.Date(2016, 8, 22, 12, 02, 03, 0, time.UTC), + }, { + Unit: "unit-metered-0", + Key: "pongs", + Value: "15.0", + Time: time.Date(2016, 8, 22, 12, 02, 04, 0, time.UTC), + }} + ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered/0") + c.Assert(err, jc.ErrorIsNil) + s.client.CheckCall(c, 0, "GetMetrics", []string{"unit-metered-0"}) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, `UNIT TIMESTAMP METRIC VALUE +unit-metered-0 2016-08-22T12:02:03Z pings 5.0 +unit-metered-0 2016-08-22T12:02:04Z pongs 15.0 +`) +} + +func (s *metricsSuite) TestJSONFormat(c *gc.C) { + s.client.metrics = []params.MetricResult{{ + Unit: "unit-metered-0", + Key: "pings", + Value: "5.0", + Time: time.Date(2016, 8, 22, 12, 02, 03, 0, time.UTC), + }, { + Unit: "unit-metered-0", + Key: "pongs", + Value: "15.0", + Time: time.Date(2016, 8, 22, 12, 02, 04, 0, time.UTC), + }} + ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered", "--format", "json") + c.Assert(err, jc.ErrorIsNil) + s.client.CheckCall(c, 0, "GetMetrics", []string{"application-metered"}) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, `[{"unit":"unit-metered-0","timestamp":"2016-08-22T12:02:03Z","metric":"pings","value":"5.0"},{"unit":"unit-metered-0","timestamp":"2016-08-22T12:02:04Z","metric":"pongs","value":"15.0"}] +`) +} + +func (s *metricsSuite) TestYAMLFormat(c *gc.C) { + s.client.metrics = []params.MetricResult{{ + Unit: "unit-metered-0", + Key: "pings", + Value: "5.0", + Time: time.Date(2016, 8, 22, 12, 02, 03, 0, time.UTC), + }, { + Unit: "unit-metered-0", + Key: "pongs", + Value: "15.0", + Time: time.Date(2016, 8, 22, 12, 02, 04, 0, time.UTC), + }} + ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered", "--format", "yaml") + c.Assert(err, jc.ErrorIsNil) + s.client.CheckCall(c, 0, "GetMetrics", []string{"application-metered"}) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, `- unit: unit-metered-0 + timestamp: 2016-08-22T12:02:03Z + metric: pings + value: "5.0" +- unit: unit-metered-0 + timestamp: 2016-08-22T12:02:04Z + metric: pongs + value: "15.0" +`) +} + +func (s *metricsSuite) TestAll(c *gc.C) { + _, err := coretesting.RunCommand(c, metricsdebug.New(), "--all") + c.Assert(err, jc.ErrorIsNil) + s.client.CheckCall(c, 0, "GetMetrics", []string(nil)) +} + +func (s *metricsSuite) TestAllWithExtraArgs(c *gc.C) { + _, err := coretesting.RunCommand(c, metricsdebug.New(), "--all", "metered") + c.Assert(err, gc.ErrorMatches, "cannot use --all with additional entities") +} + +func (s *metricsSuite) TestInvalidUnitName(c *gc.C) { + _, err := coretesting.RunCommand(c, metricsdebug.New(), "metered-/0") + c.Assert(err, gc.ErrorMatches, `"metered-/0" is not a valid unit or application`) +} + +func (s *metricsSuite) TestAPIClientError(c *gc.C) { + s.client.SetErrors(errors.New("a silly error")) + _, err := coretesting.RunCommand(c, metricsdebug.New(), "metered/0") + c.Assert(err, gc.ErrorMatches, `a silly error`) +} + +func (s *metricsSuite) TestNoArgs(c *gc.C) { + _, err := coretesting.RunCommand(c, metricsdebug.New()) + c.Assert(err, gc.ErrorMatches, "you need to specify at least one unit or application") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/configcommand.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/configcommand.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/configcommand.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/configcommand.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,373 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. +package model + +import ( + "bytes" + "io" + "sort" + "strings" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "github.com/juju/utils/keyvalues" + + "github.com/juju/juju/api/modelconfig" + "github.com/juju/juju/cmd/juju/block" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" + "github.com/juju/juju/environs/config" +) + +const ( + modelConfigSummary = "Displays or sets configuration values on a model." + modelConfigHelpDoc = ` +By default, all configuration (keys, source, and values) for the current model +are displayed. + +Supplying one key name returns only the value for the key. Supplying key=value +will set the supplied key to the supplied value, this can be repeated for +multiple keys. + +Examples + juju model-config default-series + juju model-config -m mycontroller:mymodel + juju model-config ftp-proxy=10.0.0.1:8000 + juju model-config -m othercontroller:mymodel default-series=yakkety test-mode=false + juju model-config --reset default-series test-mode + +See also: + models + model-defaults +` +) + +// NewConfigCommand wraps configCommand with sane model settings. +func NewConfigCommand() cmd.Command { + return modelcmd.Wrap(&configCommand{}) +} + +type attributes map[string]interface{} + +// configCommand is the simplified command for accessing and setting +// attributes related to model configuration. +type configCommand struct { + api configCommandAPI + modelcmd.ModelCommandBase + out cmd.Output + + action func(configCommandAPI, *cmd.Context) error // The action which we want to handle, set in cmd.Init. + keys []string + reset []string // Holds the keys to be reset until parsed. + resetKeys []string // Holds the keys to be reset once parsed. + values attributes +} + +// configCommandAPI defines an API interface to be used during testing. +type configCommandAPI interface { + Close() error + ModelGet() (map[string]interface{}, error) + ModelGetWithMetadata() (config.ConfigValues, error) + ModelSet(config map[string]interface{}) error + ModelUnset(keys ...string) error +} + +// Info implements part of the cmd.Command interface. +func (c *configCommand) Info() *cmd.Info { + return &cmd.Info{ + Args: "[[<=value>] ...]", + Doc: modelConfigHelpDoc, + Name: "model-config", + Purpose: modelConfigSummary, + } +} + +// SetFlags implements part of the cmd.Command interface. +func (c *configCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) + + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "json": cmd.FormatJson, + "tabular": formatConfigTabular, + "yaml": cmd.FormatYaml, + }) + f.Var(cmd.NewAppendStringsValue(&c.reset), "reset", "Reset the provided comma delimited keys") +} + +// Init implements part of the cmd.Command interface. +func (c *configCommand) Init(args []string) error { + // If there are arguments provided to reset, we turn it into a slice of + // strings and verify them. If there is one or more valid keys to reset and + // no other errors initalizing the command, c.resetDefaults will be called + // in c.Run. + if err := c.parseResetKeys(); err != nil { + return errors.Trace(err) + } + + switch len(args) { + case 0: + return c.handleZeroArgs() + case 1: + return c.handleOneArg(args[0]) + default: + return c.handleArgs(args) + } +} + +// handleZeroArgs handles the case where there are no positional args. +func (c *configCommand) handleZeroArgs() error { + // If reset is empty we're getting configuration + if len(c.reset) == 0 { + c.action = c.getConfig + } + // Otherwise we're going to reset args. + return nil +} + +// handleOneArg handles the case where there is one positional arg. +func (c *configCommand) handleOneArg(arg string) error { + if strings.Contains(arg, "=") { + return c.parseSetKeys([]string{arg}) + } + // If we are not setting a value, then we are retrieving one so we need to + // make sure that we are not resetting because it is not valid to get and + // reset simultaneously. + if len(c.reset) > 0 { + return errors.New("cannot set and retrieve model values simultaneously") + } + c.keys = []string{arg} + c.action = c.getConfig + return nil + +} + +// handleArgs handles the case where there's more than one positional arg. +func (c *configCommand) handleArgs(args []string) error { + err := c.parseSetKeys(args) + if err != nil { + if !strings.Contains(strings.Join(args, " "), "=") { + return errors.New("can only retrieve a single value, or all values") + } + return errors.Trace(err) + } + return nil +} + +// parseSetKeys iterates over the args and make sure that the key=value pairs +// are valid. It also checks that the same key isn't being reset. +func (c *configCommand) parseSetKeys(args []string) error { + options, err := keyvalues.Parse(args, true) + if err != nil { + return errors.Trace(err) + } + c.values = make(attributes) + for k, v := range options { + if k == config.AgentVersionKey { + return errors.Errorf(`agent-version must be set via "upgrade-juju"`) + } + c.values[k] = v + } + + for _, k := range c.resetKeys { + if _, ok := c.values[k]; ok { + return errors.Errorf( + "key %q cannot be both set and reset in the same command", k) + } + } + + c.action = c.setConfig + return nil +} + +// parseResetKeys splits the keys provided to --reset after trimming any +// leading or trailing comma. It then verifies that we haven't incorrectly +// received any key=value pairs and finally sets the value(s) on c.resetKeys. +func (c *configCommand) parseResetKeys() error { + if len(c.reset) == 0 { + return nil + } + var resetKeys []string + for _, value := range c.reset { + keys := strings.Split(strings.Trim(value, ","), ",") + resetKeys = append(resetKeys, keys...) + } + + for _, k := range resetKeys { + if k == config.AgentVersionKey { + return errors.Errorf("%q cannot be reset", config.AgentVersionKey) + } + if strings.Contains(k, "=") { + return errors.Errorf( + `--reset accepts a comma delimited set of keys "a,b,c", received: %q`, k) + } + } + c.resetKeys = resetKeys + return nil +} + +// getAPI returns the API. This allows passing in a test configCommandAPI +// implementation. +func (c *configCommand) getAPI() (configCommandAPI, error) { + if c.api != nil { + return c.api, nil + } + api, err := c.NewAPIRoot() + if err != nil { + return nil, errors.Annotate(err, "opening API connection") + } + client := modelconfig.NewClient(api) + return client, nil +} + +// Run implements the meaty part of the cmd.Command interface. +func (c *configCommand) Run(ctx *cmd.Context) error { + client, err := c.getAPI() + if err != nil { + return err + } + defer client.Close() + + if len(c.resetKeys) > 0 { + err := c.resetConfig(client, ctx) + if err != nil { + // We return this error naked as it is almost certainly going to be + // cmd.ErrSilent and the cmd.Command framework expects that back + // from cmd.Run if the process is blocked. + return err + } + } + if c.action == nil { + // If we are reset only we end up here, only we've already done that. + return nil + } + return c.action(client, ctx) +} + +// reset unsets the keys provided to the command. +func (c *configCommand) resetConfig(client configCommandAPI, ctx *cmd.Context) error { + // ctx unused in this method + if err := c.verifyKnownKeys(client); err != nil { + return errors.Trace(err) + } + + return block.ProcessBlockedError(client.ModelUnset(c.resetKeys...), block.BlockChange) +} + +// set sets the provided key/value pairs on the model. +func (c *configCommand) setConfig(client configCommandAPI, ctx *cmd.Context) error { + // ctx unused in this method. + envAttrs, err := client.ModelGet() + if err != nil { + return err + } + for key := range c.values { + if _, exists := envAttrs[key]; !exists { + logger.Warningf("key %q is not defined in the current model configuration: possible misspelling", key) + } + + } + return block.ProcessBlockedError(client.ModelSet(c.values), block.BlockChange) +} + +// get writes the value of a single key or the full output for the model to the cmd.Context. +func (c *configCommand) getConfig(client configCommandAPI, ctx *cmd.Context) error { + attrs, err := client.ModelGetWithMetadata() + if err != nil { + return err + } + + for attrName := range attrs { + // We don't want model attributes included, these are available + // via show-model. + if c.isModelAttrbute(attrName) { + delete(attrs, attrName) + } + } + + if len(c.keys) == 1 { + key := c.keys[0] + if value, found := attrs[key]; found { + if c.out.Name() == "tabular" { + return cmd.FormatYaml(ctx.Stdout, value.Value) + } + attrs = config.ConfigValues{ + key: config.ConfigValue{ + Source: value.Source, + Value: value.Value, + }, + } + } else { + return errors.Errorf("key %q not found in %q model.", key, attrs["name"]) + } + } + return c.out.Write(ctx, attrs) +} + +// verifyKnownKeys is a helper to validate the keys we are operating with +// against the set of known attributes from the model. +func (c *configCommand) verifyKnownKeys(client configCommandAPI) error { + known, err := client.ModelGet() + if err != nil { + return errors.Trace(err) + } + + allKeys := c.resetKeys[:] + for k := range c.values { + allKeys = append(allKeys, k) + } + + for _, key := range allKeys { + // check if the key exists in the known config + // and warn the user if the key is not defined + if _, exists := known[key]; !exists { + logger.Warningf( + "key %q is not defined in the current model configuration: possible misspelling", key) + } + } + return nil +} + +// isModelAttribute returns if the supplied attribute is a valid model +// attribute. +func (c *configCommand) isModelAttrbute(attr string) bool { + switch attr { + case config.NameKey, config.TypeKey, config.UUIDKey: + return true + } + return false +} + +// formatConfigTabular writes a tabular summary of config information. +func formatConfigTabular(writer io.Writer, value interface{}) error { + configValues, ok := value.(config.ConfigValues) + if !ok { + return errors.Errorf("expected value of type %T, got %T", configValues, value) + } + + tw := output.TabWriter(writer) + w := output.Wrapper{tw} + + var valueNames []string + for name := range configValues { + valueNames = append(valueNames, name) + } + sort.Strings(valueNames) + w.Println("Attribute", "From", "Value") + + for _, name := range valueNames { + info := configValues[name] + out := &bytes.Buffer{} + err := cmd.FormatYaml(out, info.Value) + if err != nil { + return errors.Annotatef(err, "formatting value for %q", name) + } + // Some attribute values have a newline appended + // which makes the output messy. + valString := strings.TrimSuffix(out.String(), "\n") + w.Println(name, info.Source, valString) + } + + tw.Flush() + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/configcommand_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/configcommand_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/configcommand_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/configcommand_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,211 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. +package model_test + +import ( + "github.com/juju/cmd" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/cmd/juju/model" + "github.com/juju/juju/testing" +) + +type ConfigCommandSuite struct { + fakeEnvSuite +} + +var _ = gc.Suite(&ConfigCommandSuite{}) + +func (s *ConfigCommandSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { + command := model.NewConfigCommandForTest(s.fake) + return testing.RunCommand(c, command, args...) +} + +func (s *ConfigCommandSuite) TestInit(c *gc.C) { + for i, test := range []struct { + desc string + args []string + errorMatch string + nilErr bool + }{ + { // Test set + desc: "keys cannot be duplicates", + args: []string{"special=extra", "special=other"}, + errorMatch: `key "special" specified more than once`, + }, { + desc: "agent-version cannot be set", + args: []string{"agent-version=2.0.0"}, + errorMatch: `agent-version must be set via "upgrade-juju"`, + }, { + // Test reset + desc: "reset requires arg", + args: []string{"--reset"}, + errorMatch: "flag needs an argument: --reset", + }, { + desc: "cannot set and retrieve at the same time", + args: []string{"--reset", "something", "weird"}, + errorMatch: "cannot set and retrieve model values simultaneously", + }, { + desc: "agent-version cannot be reset", + args: []string{"--reset", "agent-version"}, + errorMatch: `"agent-version" cannot be reset`, + }, { + desc: "set and reset cannot have duplicate keys", + args: []string{"--reset", "special", "special=extra"}, + errorMatch: `key "special" cannot be both set and reset in the same command`, + }, { + desc: "reset cannot have k=v pairs", + args: []string{"--reset", "a,b,c=d,e"}, + errorMatch: `--reset accepts a comma delimited set of keys "a,b,c", received: "c=d"`, + }, { + // Test get + desc: "get all succeds", + args: nil, + nilErr: true, + }, { + desc: "get one succeeds", + args: []string{"one"}, + nilErr: true, + }, { + desc: "get multiple fails", + args: []string{"one", "two"}, + errorMatch: "can only retrieve a single value, or all values", + }, { + // test variations + desc: "test reset interspersed", + args: []string{"--reset", "one", "special=foo", "--reset", "two"}, + nilErr: true, + }, + } { + c.Logf("test %d: %s", i, test.desc) + cmd := model.NewConfigCommandForTest(s.fake) + err := testing.InitCommand(cmd, test.args) + if test.nilErr { + c.Check(err, jc.ErrorIsNil) + continue + } + c.Check(err, gc.ErrorMatches, test.errorMatch) + } +} + +func (s *ConfigCommandSuite) TestSingleValue(c *gc.C) { + context, err := s.run(c, "special") + c.Assert(err, jc.ErrorIsNil) + + output := testing.Stdout(context) + c.Assert(output, gc.Equals, "special value\n") +} + +func (s *ConfigCommandSuite) TestGetUnknownValue(c *gc.C) { + context, err := s.run(c, "unknown") + c.Assert(err, gc.ErrorMatches, `key "unknown" not found in { ""} model.`) + + output := testing.Stdout(context) + c.Assert(output, gc.Equals, "") +} + +func (s *ConfigCommandSuite) TestSingleValueJSON(c *gc.C) { + context, err := s.run(c, "--format=json", "special") + c.Assert(err, jc.ErrorIsNil) + + want := "{\"special\":{\"Value\":\"special value\",\"Source\":\"model\"}}\n" + output := testing.Stdout(context) + c.Assert(output, gc.Equals, want) +} + +func (s *ConfigCommandSuite) TestSingleValueYAML(c *gc.C) { + context, err := s.run(c, "--format=yaml", "special") + c.Assert(err, jc.ErrorIsNil) + + want := "" + + "special:\n" + + " value: special value\n" + + " source: model\n" + + output := testing.Stdout(context) + c.Assert(output, gc.Equals, want) +} + +func (s *ConfigCommandSuite) TestAllValuesYAML(c *gc.C) { + context, err := s.run(c, "--format=yaml") + c.Assert(err, jc.ErrorIsNil) + + output := testing.Stdout(context) + expected := "" + + "running:\n" + + " value: true\n" + + " source: model\n" + + "special:\n" + + " value: special value\n" + + " source: model\n" + c.Assert(output, gc.Equals, expected) +} + +func (s *ConfigCommandSuite) TestAllValuesJSON(c *gc.C) { + context, err := s.run(c, "--format=json") + c.Assert(err, jc.ErrorIsNil) + + output := testing.Stdout(context) + expected := `{"running":{"Value":true,"Source":"model"},"special":{"Value":"special value","Source":"model"}}` + "\n" + c.Assert(output, gc.Equals, expected) +} + +func (s *ConfigCommandSuite) TestAllValuesTabular(c *gc.C) { + context, err := s.run(c) + c.Assert(err, jc.ErrorIsNil) + + output := testing.Stdout(context) + expected := "" + + "Attribute From Value\n" + + "running model true\n" + + "special model special value\n" + + "\n" + c.Assert(output, gc.Equals, expected) +} + +func (s *ConfigCommandSuite) TestPassesValues(c *gc.C) { + _, err := s.run(c, "special=extra", "unknown=foo") + c.Assert(err, jc.ErrorIsNil) + expected := map[string]interface{}{ + "special": "extra", + "unknown": "foo", + } + c.Assert(s.fake.values, jc.DeepEquals, expected) +} + +func (s *ConfigCommandSuite) TestSettingKnownValue(c *gc.C) { + _, err := s.run(c, "special=extra", "unknown=foo") + c.Assert(err, jc.ErrorIsNil) + // Command succeeds, but warning logged. + expected := `key "unknown" is not defined in the current model configuration: possible misspelling` + c.Check(c.GetTestLog(), jc.Contains, expected) +} + +func (s *ConfigCommandSuite) TestBlockedError(c *gc.C) { + s.fake.err = common.OperationBlockedError("TestBlockedError") + _, err := s.run(c, "special=extra") + testing.AssertOperationWasBlocked(c, err, ".*TestBlockedError.*") +} + +func (s *ConfigCommandSuite) TestResetPassesValues(c *gc.C) { + _, err := s.run(c, "--reset", "special,running") + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.fake.resetKeys, jc.DeepEquals, []string{"special", "running"}) +} + +func (s *ConfigCommandSuite) TestResettingUnKnownValue(c *gc.C) { + _, err := s.run(c, "--reset", "unknown") + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.fake.resetKeys, jc.DeepEquals, []string{"unknown"}) + // Command succeeds, but warning logged. + expected := `key "unknown" is not defined in the current model configuration: possible misspelling` + c.Check(c.GetTestLog(), jc.Contains, expected) +} + +func (s *ConfigCommandSuite) TestResetBlockedError(c *gc.C) { + s.fake.err = common.OperationBlockedError("TestBlockedError") + _, err := s.run(c, "--reset", "special") + testing.AssertOperationWasBlocked(c, err, ".*TestBlockedError.*") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/constraints.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/constraints.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/constraints.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/constraints.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,11 @@ package model import ( + "fmt" + "io" + "github.com/juju/cmd" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/cmd/juju/block" "github.com/juju/juju/cmd/modelcmd" @@ -51,7 +54,7 @@ const setConstraintsDocExamples = ` Examples: - juju set-model-constraints cpu-cores=8 mem=16G + juju set-model-constraints cores=8 mem=16G juju set-model-constraints -m mymodel root-disk=64G See also: @@ -100,11 +103,13 @@ return c.NewAPIClient() } -func formatConstraints(value interface{}) ([]byte, error) { - return []byte(value.(constraints.Value).String()), nil +func formatConstraints(writer io.Writer, value interface{}) error { + fmt.Fprint(writer, value.(constraints.Value).String()) + return nil } func (c *modelGetConstraintsCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) c.out.AddFlags(f, "constraints", map[string]cmd.Formatter{ "constraints": formatConstraints, "yaml": cmd.FormatYaml, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/defaultscommand.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/defaultscommand.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/defaultscommand.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/defaultscommand.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,626 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. +package model + +import ( + "bytes" + "fmt" + "io" + "sort" + "strings" + + "gopkg.in/juju/names.v2" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "github.com/juju/utils/keyvalues" + + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" + cloudapi "github.com/juju/juju/api/cloud" + "github.com/juju/juju/api/modelmanager" + jujucloud "github.com/juju/juju/cloud" + "github.com/juju/juju/cmd/juju/block" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" + "github.com/juju/juju/environs/config" +) + +const ( + modelDefaultsSummary = `Displays or sets default configuration settings for a model.` + modelDefaultsHelpDoc = ` +By default, all default configuration (keys and values) are +displayed if a key is not specified. Supplying key=value will set the +supplied key to the supplied value. This can be repeated for multiple keys. +By default, the model is the current model. + + +Examples: + juju model-defaults + juju model-defaults http-proxy + juju model-defaults aws/us-east-1 http-proxy + juju model-defaults us-east-1 http-proxy + juju model-defaults -m mymodel type + juju model-defaults ftp-proxy=10.0.0.1:8000 + juju model-defaults aws/us-east-1 ftp-proxy=10.0.0.1:8000 + juju model-defaults us-east-1 ftp-proxy=10.0.0.1:8000 + juju model-defaults -m othercontroller:mymodel default-series=yakkety test-mode=false + juju model-defaults --reset default-series test-mode + juju model-defaults aws/us-east-1 --reset http-proxy + juju model-defaults us-east-1 --reset http-proxy + +See also: + models + model-config +` +) + +// NewDefaultsCommand wraps defaultsCommand with sane model settings. +func NewDefaultsCommand() cmd.Command { + defaultsCmd := &defaultsCommand{ + newCloudAPI: func(caller base.APICallCloser) cloudAPI { + return cloudapi.NewClient(caller) + }, + newDefaultsAPI: func(caller base.APICallCloser) defaultsCommandAPI { + return modelmanager.NewClient(caller) + }, + } + defaultsCmd.newAPIRoot = defaultsCmd.NewAPIRoot + return modelcmd.WrapController(defaultsCmd) +} + +// defaultsCommand is compound command for accessing and setting attributes +// related to default model configuration. +type defaultsCommand struct { + out cmd.Output + modelcmd.ControllerCommandBase + + newAPIRoot func() (api.Connection, error) + newDefaultsAPI func(base.APICallCloser) defaultsCommandAPI + newCloudAPI func(base.APICallCloser) cloudAPI + + action func(defaultsCommandAPI, *cmd.Context) error // The function handling the input, set in Init. + key string + resetKeys []string // Holds the keys to be reset once parsed. + cloudName, regionName string + reset []string // Holds the keys to be reset until parsed. + values attributes +} + +// cloudAPI defines an API to be passed in for testing. +type cloudAPI interface { + Close() error + DefaultCloud() (names.CloudTag, error) + Cloud(names.CloudTag) (jujucloud.Cloud, error) +} + +// defaultsCommandAPI defines an API to be used during testing. +type defaultsCommandAPI interface { + // Close closes the api connection. + Close() error + + // ModelDefaults returns the default config values used when creating a new model. + ModelDefaults() (config.ModelDefaultAttributes, error) + + // SetModelDefaults sets the default config values to use + // when creating new models. + SetModelDefaults(cloud, region string, config map[string]interface{}) error + + // UnsetModelDefaults clears the default model + // configuration values. + UnsetModelDefaults(cloud, region string, keys ...string) error +} + +// Info implements part of the cmd.Command interface. +func (c *defaultsCommand) Info() *cmd.Info { + return &cmd.Info{ + Args: "[[] ][<=value>] ...]", + Doc: modelDefaultsHelpDoc, + Name: "model-defaults", + Purpose: modelDefaultsSummary, + } +} + +// SetFlags implements part of the cmd.Command interface. +func (c *defaultsCommand) SetFlags(f *gnuflag.FlagSet) { + c.ControllerCommandBase.SetFlags(f) + + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, + "tabular": formatDefaultConfigTabular, + }) + f.Var(cmd.NewAppendStringsValue(&c.reset), "reset", "Reset the provided comma delimited keys") +} + +// Init implements part of the cmd.Command interface. +// This needs to parse a command line invocation to reset and set, or get +// model-default values. The arguments may be interspersed as demosntrated in +// the examples. +// +// This sets foo=baz and unsets bar in aws/us-east-1 +// juju model-defaults aws/us-east-1 foo=baz --reset bar +// +// If aws is the cloud of the current or specified controller -- specified by +// -c somecontroller -- then the following would also be equivalent. +// juju model-defaults --reset bar us-east-1 foo=baz +// +// If one doesn't specify a cloud or region the command is still valid but for +// setting the default on the controller: +// juju model-defaults foo=baz --reset bar +// +// Of course one can specify multiple keys to reset --reset a,b,c and one can +// also specify multiple values to set a=b c=d e=f. I.e. comma separated for +// resetting and space separated for setting. One may also only set or reset as +// a singular action. +// juju model-defaults --reset foo +// juju model-defaults a=b c=d e=f +// juju model-defaults a=b c=d --reset e,f +// +// cloud/region may also be specified so above examples with that option might +// be like the following invokation. +// juju model-defaults us-east-1 a=b c=d --reset e,f +// +// Finally one can also ask for the all the defaults or the defaults for one +// specific setting. In this case specifying a region is not valid as +// model-defaults shows the settings for a value at all locations that it has a +// default set -- or at a minimum the default and "-" for a controller with no +// value set. +// juju model-defaults +// juju model-defaults no-proxy +// +// It is not valid to reset and get or to set and get values. It is also +// neither valid to reset and set the same key, nor to set the same key to +// different values in the same command. +// +// For those playing along that all means the first positional arg can be a +// cloud/region, a region, a key=value to set, a key to get the settings for, +// or empty. Other caveats are that one cannot set and reset a value for the +// same key, that is to say keys to be mutated must be unique. +// +// Here we go... +func (c *defaultsCommand) Init(args []string) error { + var err error + // If there's nothing to reset and no args we're returning everything. So + // we short circuit immediately. + if len(args) == 0 && len(c.reset) == 0 { + c.action = c.getDefaults + return nil + } + + // If there is an argument provided to reset, we turn it into a slice of + // strings and verify them. If there is one or more valid keys to reset and + // no other errors initalizing the command, c.resetDefaults will be called + // in c.Run. + if err = c.parseResetKeys(); err != nil { + return errors.Trace(err) + } + + // Look at the first positional arg and test to see if it is a valid + // optional specification of cloud/region or region. If it is then + // cloudName and regionName are set on the object and the positional args + // are returned without the first element. If it cannot be validated; + // cloudName and regionName are left empty and we get back the same args we + // passed in. + args, err = c.parseArgsForRegion(args) + if err != nil { + return errors.Trace(err) + } + + // Remember we *might* have one less arg at this point if we chopped the + // first off because it was a valid cloud/region option. + switch { + case len(args) > 0 && strings.Contains(args[len(args)-1], "="): + // In the event that we are setting values, the final positional arg + // will always have an "=" in it. So if we see that we know we want to + // set args. + return c.handleSetArgs(args) + case len(args) == 0: + c.action = c.getDefaults + return nil + case len(args) == 1: + // We want to get settings for the provided key. + return c.handleOneArg(args[0]) + default: // case args > 1 + // Specifying any non key=value positional args after a key=value pair + // is invalid input. So if we have more than one the input is almost + // certainly invalid, but in different possible ways. + return c.handleExtraArgs(args) + } + +} + +// parseResetKeys splits the keys provided to --reset after trimming any +// leading or trailing comma. It then verifies that we haven't incorrectly +// received any key=value pairs and finally sets the value(s) on c.resetKeys. +func (c *defaultsCommand) parseResetKeys() error { + if len(c.reset) == 0 { + return nil + } + var resetKeys []string + for _, value := range c.reset { + keys := strings.Split(strings.Trim(value, ","), ",") + resetKeys = append(resetKeys, keys...) + } + + for _, k := range resetKeys { + if k == config.AgentVersionKey { + return errors.Errorf("%q cannot be reset", config.AgentVersionKey) + } + if strings.Contains(k, "=") { + return errors.Errorf( + `--reset accepts a comma delimited set of keys "a,b,c", received: %q`, k) + } + } + c.resetKeys = resetKeys + return nil +} + +// parseArgsForRegion parses args to check if the first arg is a region and +// returns the appropriate remaining args. +func (c *defaultsCommand) parseArgsForRegion(args []string) ([]string, error) { + var err error + if len(args) > 0 { + // determine if the first arg is cloud/region or region and return + // appropriate positional args. + args, err = c.parseCloudRegion(args) + if err != nil { + return nil, errors.Trace(err) + } + } + return args, nil +} + +// parseCloudRegion examines args to see if the first arg is a cloud/region or +// region. If not it returns the full args slice. If it is then it sets cloud +// and/or region on the object and sends the remaining args back to the caller. +func (c *defaultsCommand) parseCloudRegion(args []string) ([]string, error) { + var cloud, region string + cr := args[0] + // Must have no more than one slash and it must not be at the beginning or end. + if strings.Count(cr, "/") == 1 && !strings.HasPrefix(cr, "/") && !strings.HasSuffix(cr, "/") { + elems := strings.Split(cr, "/") + cloud, region = elems[0], elems[1] + } else { + region = cr + } + + // TODO(redir) 2016-10-05 #1627162 + // We don't disallow "=" in region names, but probably should. + if strings.Contains(region, "=") { + return args, nil + } + + valid, err := c.validCloudRegion(cloud, region) + if err != nil { + return nil, errors.Trace(err) + } + if !valid { + return args, nil + } + return args[1:], nil +} + +// validCloudRegion checks that region is a valid region in cloud, or default cloud +// if cloud is not specified. +func (c *defaultsCommand) validCloudRegion(cloudName, region string) (bool, error) { + var ( + isCloudRegion bool + cloud jujucloud.Cloud + cTag names.CloudTag + err error + ) + + root, err := c.newAPIRoot() + if err != nil { + return false, errors.Trace(err) + } + cc := c.newCloudAPI(root) + defer cc.Close() + + if cloudName == "" { + cTag, err = cc.DefaultCloud() + if err != nil { + return false, errors.Trace(err) + } + } else { + if !names.IsValidCloud(cloudName) { + return false, errors.Errorf("invalid cloud %q", cloudName) + } + cTag = names.NewCloudTag(cloudName) + } + cloud, err = cc.Cloud(cTag) + if err != nil { + return false, errors.Trace(err) + } + + for _, r := range cloud.Regions { + if r.Name == region { + c.cloudName = cTag.Id() + c.regionName = region + isCloudRegion = true + break + } + } + return isCloudRegion, nil +} + +// handleSetArgs parses args for setting defaults. +func (c *defaultsCommand) handleSetArgs(args []string) error { + argZeroKeyOnly := !strings.Contains(args[0], "=") + // If an invalid region was specified, the first positional arg won't have + // an "=". If we see one here we know it is invalid. + switch { + case argZeroKeyOnly && c.regionName == "": + return errors.Errorf("invalid region specified: %q", args[0]) + case argZeroKeyOnly && c.regionName != "": + return errors.New("cannot set and retrieve default values simultaneously") + default: + if err := c.parseSetKeys(args); err != nil { + return errors.Trace(err) + } + c.action = c.setDefaults + return nil + } +} + +// parseSetKeys iterates over the args and make sure that the key=value pairs +// are valid. It also checks that the same key isn't also being reset. +func (c *defaultsCommand) parseSetKeys(args []string) error { + options, err := keyvalues.Parse(args, true) + if err != nil { + return errors.Trace(err) + } + + c.values = make(attributes) + for k, v := range options { + if k == config.AgentVersionKey { + return errors.Errorf(`%q must be set via "upgrade-juju"`, config.AgentVersionKey) + } + c.values[k] = v + } + for _, k := range c.resetKeys { + if _, ok := c.values[k]; ok { + return errors.Errorf( + "key %q cannot be both set and unset in the same command", k) + } + } + return nil +} + +// handleOneArg handles the case where we have one positional arg after +// processing for a region and the reset flag. +func (c *defaultsCommand) handleOneArg(arg string) error { + resetSpecified := c.resetKeys != nil + regionSpecified := c.regionName != "" + + if regionSpecified { + if resetSpecified { + // If a region was specified and reset was specified, we shouldn't have + // an extra arg. If it had an "=" in it, we should have handled it + // already. + return errors.New("cannot retrieve defaults for a region and reset attributes at the same time") + } + } + if resetSpecified { + // It makes no sense to supply a positional arg that isn't a region if + // we are resetting keys in a region, so we must have gotten an invalid + // region. + return errors.Errorf("invalid region specified: %q", arg) + } + // We can retrieve a value. + c.key = arg + c.action = c.getDefaults + return nil +} + +// handleExtraArgs handles the case where too many args were supplied. +func (c *defaultsCommand) handleExtraArgs(args []string) error { + resetSpecified := c.resetKeys != nil + regionSpecified := c.regionName != "" + numArgs := len(args) + + // if we have a key=value pair here then something is wrong because the + // last positional arg is not one. We assume the user intended to get a + // value after setting them. + for _, arg := range args { + if strings.Contains(arg, "=") { + return errors.New("cannot set and retrieve default values simultaneously") + } + } + + if !regionSpecified { + if resetSpecified { + if numArgs == 2 { + // It makes no sense to supply a positional arg that isn't a + // region if we are resetting a region, so we must have gotten + // an invalid region. + return errors.Errorf("invalid region specified: %q", args[0]) + } + } + if !resetSpecified { + // If we land here it is because there are extraneous positional + // args. + return errors.New("can only retrieve defaults for one key or all") + } + } + return errors.New("invalid input") +} + +// Run implements part of the cmd.Command interface. +func (c *defaultsCommand) Run(ctx *cmd.Context) error { + root, err := c.newAPIRoot() + if err != nil { + return errors.Trace(err) + } + client := c.newDefaultsAPI(root) + if err != nil { + return errors.Trace(err) + } + defer client.Close() + + if len(c.resetKeys) > 0 { + err := c.resetDefaults(client, ctx) + if err != nil { + // We return this error naked as it is almost certainly going to be + // cmd.ErrSilent and the cmd.Command framework expects that back + // from cmd.Run if the process is blocked. + return err + } + } + if c.action == nil { + // If we are reset only we end up here, only we've already done that. + return nil + } + return c.action(client, ctx) +} + +// getDefaults writes out the value for a single key or the full tree of +// defaults. +func (c *defaultsCommand) getDefaults(client defaultsCommandAPI, ctx *cmd.Context) error { + attrs, err := client.ModelDefaults() + if err != nil { + return err + } + + valueForRegion := func(region string, regions []config.RegionDefaultValue) (config.RegionDefaultValue, bool) { + for _, r := range regions { + if r.Name == region { + return r, true + } + } + return config.RegionDefaultValue{}, false + } + + // Filter by region if necessary. + if c.regionName != "" { + for attrName, attr := range attrs { + if regionDefault, ok := valueForRegion(c.regionName, attr.Regions); !ok { + delete(attrs, attrName) + } else { + attrForRegion := attr + attrForRegion.Regions = []config.RegionDefaultValue{regionDefault} + attrs[attrName] = attrForRegion + } + } + } + + if c.key != "" { + if value, ok := attrs[c.key]; ok { + attrs = config.ModelDefaultAttributes{ + c.key: value, + } + } else { + msg := fmt.Sprintf("there are no default model values for %q", c.key) + if c.regionName != "" { + msg += fmt.Sprintf(" in region %q", c.regionName) + } + return errors.New(msg) + } + } + // If c.keys is empty, write out the whole lot. + return c.out.Write(ctx, attrs) +} + +// setDefaults sets defaults as provided in c.values. +func (c *defaultsCommand) setDefaults(client defaultsCommandAPI, ctx *cmd.Context) error { + // ctx unused in this method. + if err := c.verifyKnownKeys(client); err != nil { + return errors.Trace(err) + } + return block.ProcessBlockedError( + client.SetModelDefaults( + c.cloudName, c.regionName, c.values), block.BlockChange) +} + +// resetDefaults resets the keys in resetKeys. +func (c *defaultsCommand) resetDefaults(client defaultsCommandAPI, ctx *cmd.Context) error { + // ctx unused in this method. + if err := c.verifyKnownKeys(client); err != nil { + return errors.Trace(err) + } + return block.ProcessBlockedError( + client.UnsetModelDefaults( + c.cloudName, c.regionName, c.resetKeys...), block.BlockChange) + +} + +// verifyKnownKeys is a helper to validate the keys we are operating with +// against the set of known attributes from the model. +func (c *defaultsCommand) verifyKnownKeys(client defaultsCommandAPI) error { + known, err := client.ModelDefaults() + if err != nil { + return errors.Trace(err) + } + + allKeys := c.resetKeys[:] + for k := range c.values { + allKeys = append(allKeys, k) + } + + for _, key := range allKeys { + // check if the key exists in the known config + // and warn the user if the key is not defined + if _, exists := known[key]; !exists { + logger.Warningf( + "key %q is not defined in the known model configuration: possible misspelling", key) + } + } + return nil +} + +// formatConfigTabular writes a tabular summary of default config information. +func formatDefaultConfigTabular(writer io.Writer, value interface{}) error { + defaultValues, ok := value.(config.ModelDefaultAttributes) + if !ok { + return errors.Errorf("expected value of type %T, got %T", defaultValues, value) + } + + tw := output.TabWriter(writer) + w := output.Wrapper{tw} + + p := func(name string, value config.AttributeDefaultValues) { + var c, d interface{} + switch value.Default { + case nil: + d = "-" + case "": + d = `""` + default: + d = value.Default + } + switch value.Controller { + case nil: + c = "-" + case "": + c = `""` + default: + c = value.Controller + } + w.Println(name, d, c) + for _, region := range value.Regions { + w.Println(" "+region.Name, region.Value, "-") + } + } + var valueNames []string + for name := range defaultValues { + valueNames = append(valueNames, name) + } + sort.Strings(valueNames) + + w.Println("Attribute", "Default", "Controller") + + for _, name := range valueNames { + info := defaultValues[name] + out := &bytes.Buffer{} + err := cmd.FormatYaml(out, info) + if err != nil { + return errors.Annotatef(err, "formatting value for %q", name) + } + p(name, info) + } + + tw.Flush() + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/defaultscommand_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/defaultscommand_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/defaultscommand_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/defaultscommand_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,383 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. +package model_test + +import ( + "strings" + + "github.com/juju/cmd" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/cmd/juju/model" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/juju/testing" +) + +type DefaultsCommandSuite struct { + fakeModelDefaultEnvSuite + store *jujuclienttesting.MemStore +} + +var _ = gc.Suite(&DefaultsCommandSuite{}) + +func (s *DefaultsCommandSuite) SetUpTest(c *gc.C) { + s.fakeModelDefaultEnvSuite.SetUpTest(c) + s.store = jujuclienttesting.NewMemStore() + s.store.CurrentControllerName = "controller" + s.store.Controllers["controller"] = jujuclient.ControllerDetails{} +} + +func (s *DefaultsCommandSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { + command := model.NewDefaultsCommandForTest(s.fakeAPIRoot, s.fakeDefaultsAPI, s.fakeCloudAPI, s.store) + return testing.RunCommand(c, command, args...) +} + +func (s *DefaultsCommandSuite) TestDefaultsInit(c *gc.C) { + for i, test := range []struct { + description string + args []string + errorMatch string + nilErr bool + }{ + { + // Test set + description: "test set key specified more than once", + args: []string{"special=extra", "special=other"}, + errorMatch: `key "special" specified more than once`, + }, { + description: "test cannot set agent-version", + args: []string{"agent-version=2.0.0"}, + errorMatch: `"agent-version" must be set via "upgrade-juju"`, + }, { + description: "test set multiple keys", + args: []string{"foo=bar", "baz=eggs"}, + nilErr: true, + }, { + // Test reset + description: "test empty args with reset fails", + args: []string{"--reset"}, + errorMatch: "flag needs an argument: --reset", + }, { + description: "test reset with positional arg interpereted as invalid region", + args: []string{"--reset", "something", "weird"}, + errorMatch: `invalid region specified: "weird"`, + }, + { + description: "test reset with valid region and duplicate key set", + args: []string{"--reset", "something", "dummy-region", "something=weird"}, + errorMatch: `key "something" cannot be both set and unset in the same command`, + }, + { + description: "test reset with valid region and extra positional arg", + args: []string{"--reset", "something", "dummy-region", "weird"}, + errorMatch: "cannot retrieve defaults for a region and reset attributes at the same time", + }, { + description: "test reset with valid region only", + args: []string{"--reset", "foo", "dummy-region"}, + nilErr: true, + }, { + description: "test cannot reset agent version", + args: []string{"--reset", "agent-version"}, + errorMatch: `"agent-version" cannot be reset`, + }, { + description: "test reset inits", + args: []string{"--reset", "foo"}, + nilErr: true, + }, { + description: "test trailing reset fails", + args: []string{"foo=bar", "--reset"}, + errorMatch: "flag needs an argument: --reset", + }, { + description: "test reset and get init", + args: []string{"--reset", "agent-version,b", "foo=bar"}, + errorMatch: `"agent-version" cannot be reset`, + }, { + description: "test reset with key=val fails", + args: []string{"--reset", "foo=bar"}, + errorMatch: `--reset accepts a comma delimited set of keys "a,b,c", received: "foo=bar"`, + }, { + description: "test reset multiple with key=val fails", + args: []string{"--reset", "a,foo=bar,b"}, + errorMatch: `--reset accepts a comma delimited set of keys "a,b,c", received: "foo=bar"`, + }, { + description: "test reset with two positional args fails expecting a region", + args: []string{"--reset", "a", "b", "c"}, + errorMatch: `invalid region specified: "b"`, + }, { + description: "test reset with two positional args fails expecting a region reordered", + args: []string{"a", "--reset", "b", "c"}, + errorMatch: `invalid region specified: "a"`, + }, { + description: "test multiple reset inits", + args: []string{"--reset", "a", "--reset", "b"}, + nilErr: true, + }, { + description: "test multiple reset and set inits", + args: []string{"--reset", "a", "b=c", "--reset", "d"}, + nilErr: true, + }, { + description: "test multiple reset with valid region inits", + args: []string{"dummy-region", "--reset", "a", "--reset", "b"}, + nilErr: true, + }, { + description: "test multiple reset with two positional args fails expecting a region reordered", + args: []string{"a", "--reset", "b", "--reset", "c", "d"}, + errorMatch: `invalid region specified: "a"`, + }, { + description: "test reset multiple with key=val fails", + args: []string{"--reset", "a", "--reset", "b,foo=bar,c"}, + errorMatch: `--reset accepts a comma delimited set of keys "a,b,c", received: "foo=bar"`, + }, { + // test get + description: "test no args inits", + args: nil, + nilErr: true, + }, { + description: "one key arg inits", + args: []string{"one"}, + nilErr: true, + }, { + description: "test two key args fails", + args: []string{"one", "two"}, + errorMatch: "can only retrieve defaults for one key or all", + }, { + description: "test multiple key args fails", + args: []string{"one", "two", "three"}, + errorMatch: "can only retrieve defaults for one key or all", + }, { + description: "test valid region and one arg", + args: []string{"dummy-region", "one"}, + nilErr: true, + }, { + description: "test valid region and no args", + args: []string{"dummy-region"}, + nilErr: true, + }, { + // test cloud/region + description: "test invalid cloud fails", + args: []string{"invalidCloud/invalidRegion", "one=two"}, + errorMatch: "Unknown cloud", + }, { + description: "test valid cloud with invalid region fails", + args: []string{"dummy/invalidRegion", "one=two"}, + errorMatch: `invalid region specified: "dummy/invalidRegion"`, + }, { + description: "test no cloud with invalid region fails", + args: []string{"invalidRegion", "one=two"}, + errorMatch: `invalid region specified: "invalidRegion"`, + }, { + description: "test valid region with set arg succeeds", + args: []string{"dummy-region", "one=two"}, + nilErr: true, + }, { + description: "test valid region with set and reset succeeds", + args: []string{"dummy-region", "one=two", "--reset", "three"}, + nilErr: true, + }, { + description: "test reset and set with extra key is interpereted as invalid region", + args: []string{"--reset", "something,else", "invalidRegion", "is=weird"}, + errorMatch: `invalid region specified: "invalidRegion"`, + }, { + description: "test reset and set with valid region and extra key fails", + args: []string{"--reset", "something,else", "dummy-region", "invalidkey", "is=weird"}, + errorMatch: "cannot set and retrieve default values simultaneously", + }, { + // test various invalid + description: "test too many positional args with reset", + args: []string{"--reset", "a", "b", "c", "d"}, + errorMatch: "invalid input", + }, { + description: "test too many positional args with invalid region set", + args: []string{"a", "a=b", "b", "c=d"}, + errorMatch: `invalid region specified: "a"`, + }, { + description: "test invalid positional args with set", + args: []string{"a=b", "b", "c=d"}, + errorMatch: `expected "key=value", got "b"`, + }, { + description: "test invalid positional args with set and trailing key", + args: []string{"a=b", "c=d", "e"}, + errorMatch: "cannot set and retrieve default values simultaneously", + }, { + description: "test invalid positional args with valid region, set, reset", + args: []string{"dummy-region", "a=b", "--reset", "c,d,", "e=f", "g"}, + errorMatch: "cannot set and retrieve default values simultaneously", + }, { + // Test some random orderings + description: "test invalid positional args with set, reset with trailing comman and split key=values", + args: []string{"dummy-region", "a=b", "--reset", "c,d,", "e=f"}, + nilErr: true, + }, { + description: "test leading comma with reset", + args: []string{"--reset", ",a,b"}, + nilErr: true, + }, + } { + c.Logf("test %d: %s", i, test.description) + cmd := model.NewDefaultsCommandForTest(s.fakeAPIRoot, s.fakeDefaultsAPI, s.fakeCloudAPI, s.store) + err := testing.InitCommand(cmd, test.args) + if test.nilErr { + c.Check(err, jc.ErrorIsNil) + continue + } + c.Check(err, gc.ErrorMatches, test.errorMatch) + } +} + +func (s *DefaultsCommandSuite) TestResetUnknownValueLogs(c *gc.C) { + _, err := s.run(c, "--reset", "attr,weird") + c.Assert(err, jc.ErrorIsNil) + expected := `key "weird" is not defined in the known model configuration: possible misspelling` + c.Check(c.GetTestLog(), jc.Contains, expected) +} + +func (s *DefaultsCommandSuite) TestResetAttr(c *gc.C) { + _, err := s.run(c, "--reset", "attr,unknown") + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.fakeDefaultsAPI.defaults, jc.DeepEquals, config.ModelDefaultAttributes{ + "attr2": {Controller: "bar", Default: nil, Regions: []config.RegionDefaultValue{{ + Name: "dummy-region", + Value: "dummy-value", + }, { + Name: "another-region", + Value: "another-value", + }}}, + }) +} + +func (s *DefaultsCommandSuite) TestResetBlockedError(c *gc.C) { + s.fakeDefaultsAPI.err = common.OperationBlockedError("TestBlockedError") + _, err := s.run(c, "--reset", "attr") + testing.AssertOperationWasBlocked(c, err, ".*TestBlockedError.*") +} + +func (s *DefaultsCommandSuite) TestSetUnknownValueLogs(c *gc.C) { + _, err := s.run(c, "weird=foo") + c.Assert(err, jc.ErrorIsNil) + expected := `key "weird" is not defined in the known model configuration: possible misspelling` + c.Check(c.GetTestLog(), jc.Contains, expected) +} + +func (s *DefaultsCommandSuite) TestSet(c *gc.C) { + _, err := s.run(c, "special=extra", "attr=baz") + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.fakeDefaultsAPI.defaults, jc.DeepEquals, config.ModelDefaultAttributes{ + "attr": {Controller: "baz", Default: nil, Regions: nil}, + "attr2": {Controller: "bar", Default: nil, Regions: []config.RegionDefaultValue{{ + Name: "dummy-region", + Value: "dummy-value", + }, { + Name: "another-region", + Value: "another-value", + }}}, + "special": {Controller: "extra", Default: nil, Regions: nil}, + }) +} + +func (s *DefaultsCommandSuite) TestSetConveysCloudRegion(c *gc.C) { + table := []struct { + input, cloud, region string + }{ + {"", "", ""}, + {"dummy-region", "dummy", "dummy-region"}, + {"dummy/dummy-region", "dummy", "dummy-region"}, + {"another-region", "dummy", "another-region"}, + } + for i, test := range table { + c.Logf("test %d", i) + var err error + if test.input == "" { + _, err = s.run(c, "special=extra", "--reset", "attr") + } else { + _, err = s.run(c, test.input, "special=extra", "--reset", "attr") + } + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.fakeDefaultsAPI.region, jc.DeepEquals, test.region) + c.Assert(s.fakeDefaultsAPI.cloud, jc.DeepEquals, test.cloud) + } +} + +func (s *DefaultsCommandSuite) TestBlockedErrorOnSet(c *gc.C) { + s.fakeDefaultsAPI.err = common.OperationBlockedError("TestBlockedError") + _, err := s.run(c, "special=extra") + testing.AssertOperationWasBlocked(c, err, ".*TestBlockedError.*") +} + +func (s *DefaultsCommandSuite) TestGetSingleValue(c *gc.C) { + context, err := s.run(c, "attr2") + c.Assert(err, jc.ErrorIsNil) + + output := strings.TrimSpace(testing.Stdout(context)) + expected := "" + + "Attribute Default Controller\n" + + "attr2 - bar\n" + + " dummy-region dummy-value -\n" + + " another-region another-value -" + c.Assert(output, gc.Equals, expected) +} + +func (s *DefaultsCommandSuite) TestGetSingleValueJSON(c *gc.C) { + context, err := s.run(c, "--format=json", "attr2") + c.Assert(err, jc.ErrorIsNil) + + output := strings.TrimSpace(testing.Stdout(context)) + c.Assert(output, gc.Equals, + `{"attr2":{"controller":"bar","regions":[{"name":"dummy-region","value":"dummy-value"},{"name":"another-region","value":"another-value"}]}}`) +} + +func (s *DefaultsCommandSuite) TestGetAllValuesYAML(c *gc.C) { + context, err := s.run(c, "--format=yaml") + c.Assert(err, jc.ErrorIsNil) + + output := strings.TrimSpace(testing.Stdout(context)) + expected := "" + + "attr:\n" + + " default: foo\n" + + "attr2:\n" + + " controller: bar\n" + + " regions:\n" + + " - name: dummy-region\n" + + " value: dummy-value\n" + + " - name: another-region\n" + + " value: another-value" + c.Assert(output, gc.Equals, expected) +} + +func (s *DefaultsCommandSuite) TestGetAllValuesJSON(c *gc.C) { + context, err := s.run(c, "--format=json") + c.Assert(err, jc.ErrorIsNil) + + output := strings.TrimSpace(testing.Stdout(context)) + expected := `{"attr":{"default":"foo"},"attr2":{"controller":"bar","regions":[{"name":"dummy-region","value":"dummy-value"},{"name":"another-region","value":"another-value"}]}}` + c.Assert(output, gc.Equals, expected) +} + +func (s *DefaultsCommandSuite) TestGetAllValuesTabular(c *gc.C) { + context, err := s.run(c) + c.Assert(err, jc.ErrorIsNil) + + output := strings.TrimSpace(testing.Stdout(context)) + expected := "" + + "Attribute Default Controller\n" + + "attr foo -\n" + + "attr2 - bar\n" + + " dummy-region dummy-value -\n" + + " another-region another-value -" + c.Assert(output, gc.Equals, expected) +} + +func (s *DefaultsCommandSuite) TestGetRegionValuesTabular(c *gc.C) { + context, err := s.run(c, "dummy-region") + c.Assert(err, jc.ErrorIsNil) + + output := strings.TrimSpace(testing.Stdout(context)) + expected := "" + + "Attribute Default Controller\n" + + "attr2 - bar\n" + + " dummy-region dummy-value -" + c.Assert(output, gc.Equals, expected) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/destroy.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/destroy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/destroy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/destroy.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,33 +5,49 @@ import ( "fmt" + "time" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" - "launchpad.net/gnuflag" + "gopkg.in/juju/names.v2" + "github.com/juju/juju/api/base" "github.com/juju/juju/api/modelmanager" "github.com/juju/juju/apiserver/params" jujucmd "github.com/juju/juju/cmd" "github.com/juju/juju/cmd/juju/block" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/jujuclient" ) var logger = loggo.GetLogger("juju.cmd.juju.model") // NewDestroyCommand returns a command used to destroy a model. func NewDestroyCommand() cmd.Command { + destroyCmd := &destroyCommand{} + destroyCmd.RefreshModels = destroyCmd.ModelCommandBase.RefreshModels + destroyCmd.sleepFunc = time.Sleep return modelcmd.Wrap( - &destroyCommand{}, - modelcmd.ModelSkipDefault, - modelcmd.ModelSkipFlags, + destroyCmd, + modelcmd.WrapSkipDefaultModel, + modelcmd.WrapSkipModelFlags, ) } // destroyCommand destroys the specified model. type destroyCommand struct { modelcmd.ModelCommandBase + // RefreshModels hides the RefreshModels function defined + // in ModelCommandBase. This allows overriding for testing. + // NOTE: ideal solution would be to have the base implement a method + // like store.ModelByName which auto-refreshes. + RefreshModels func(jujuclient.ClientStore, string) error + + // sleepFunc is used when calling the timed function to get model status updates. + sleepFunc func(time.Duration) + envName string assumeYes bool api DestroyModelAPI @@ -46,10 +62,11 @@ Examples: - juju destroy-model test - juju destroy-model -y mymodel + juju destroy-model test + juju destroy-model -y mymodel -See also: destroy-controller +See also: + destroy-controller ` var destroyEnvMsg = ` WARNING! This command will destroy the %q model. @@ -61,7 +78,8 @@ // API that the destroy command calls. It is exported for mocking in tests. type DestroyModelAPI interface { Close() error - DestroyModel() error + DestroyModel(names.ModelTag) error + ModelStatus(models ...names.ModelTag) ([]base.ModelStatus, error) } // Info implements Command.Info. @@ -76,6 +94,7 @@ // SetFlags implements Command.SetFlags. func (c *destroyCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) f.BoolVar(&c.assumeYes, "y", false, "Do not prompt for confirmation") f.BoolVar(&c.assumeYes, "yes", false, "") } @@ -96,7 +115,7 @@ if c.api != nil { return c.api, nil } - root, err := c.NewAPIRoot() + root, err := c.NewControllerAPIRoot() if err != nil { return nil, errors.Trace(err) } @@ -114,9 +133,17 @@ return errors.Annotate(err, "cannot read controller details") } modelDetails, err := store.ModelByName(controllerName, modelName) + if errors.IsNotFound(err) { + if err := c.RefreshModels(store, controllerName); err != nil { + return errors.Annotate(err, "refreshing models cache") + } + // Now try again. + modelDetails, err = store.ModelByName(controllerName, modelName) + } if err != nil { return errors.Annotate(err, "cannot read model info") } + if modelDetails.ModelUUID == controllerDetails.ControllerUUID { return errors.Errorf("%q is a controller; use 'juju destroy-controller' to destroy it", modelName) } @@ -137,11 +164,21 @@ defer api.Close() // Attempt to destroy the model. - err = api.DestroyModel() + ctx.Infof("Destroying model") + err = api.DestroyModel(names.NewModelTag(modelDetails.ModelUUID)) if err != nil { return c.handleError(errors.Annotate(err, "cannot destroy model"), modelName) } + // Wait for model to be destroyed. + const modelStatusPollWait = 2 * time.Second + modelStatus := newTimedModelStatus(ctx, api, names.NewModelTag(modelDetails.ModelUUID), c.sleepFunc) + modelData := modelStatus(0) + for modelData != nil { + ctx.Infof(formatDestroyModelInfo(modelData) + "...") + modelData = modelStatus(modelStatusPollWait) + } + err = store.RemoveModel(controllerName, modelName) if err != nil && !errors.IsNotFound(err) { return errors.Trace(err) @@ -149,6 +186,48 @@ return nil } +type modelData struct { + machineCount int + applicationCount int +} + +// newTimedModelStatus returns a function which waits a given period of time +// before querying the API server for the status of a model. +func newTimedModelStatus(ctx *cmd.Context, api DestroyModelAPI, tag names.ModelTag, sleepFunc func(time.Duration)) func(time.Duration) *modelData { + return func(wait time.Duration) *modelData { + sleepFunc(wait) + status, err := api.ModelStatus(tag) + if err != nil { + if params.ErrCode(err) != params.CodeNotFound { + ctx.Infof("Unable to get the model status from the API: %v.", err) + } + return nil + } + if l := len(status); l != 1 { + ctx.Infof("error finding model status: expected one result, got %d", l) + return nil + } + return &modelData{ + machineCount: status[0].HostedMachineCount, + applicationCount: status[0].ServiceCount, + } + } +} + +func formatDestroyModelInfo(data *modelData) string { + out := "Waiting on model to be removed" + if data.machineCount == 0 && data.applicationCount == 0 { + return out + } + if data.machineCount > 0 { + out += fmt.Sprintf(", %d machine(s)", data.machineCount) + } + if data.applicationCount > 0 { + out += fmt.Sprintf(", %d application(s)", data.applicationCount) + } + return out +} + func (c *destroyCommand) handleError(err error, modelName string) error { if err == nil { return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/destroy_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/destroy_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/destroy_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/destroy_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,10 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/model" "github.com/juju/juju/cmd/modelcmd" @@ -24,27 +27,39 @@ type DestroySuite struct { testing.FakeJujuXDGDataHomeSuite - api *fakeDestroyAPI + api *fakeAPI store *jujuclienttesting.MemStore + sleep func(time.Duration) } var _ = gc.Suite(&DestroySuite{}) // fakeDestroyAPI mocks out the cient API -type fakeDestroyAPI struct { - err error - env map[string]interface{} +type fakeAPI struct { + err error + env map[string]interface{} + statusCallCount int + modelInfoErr []*params.Error } -func (f *fakeDestroyAPI) Close() error { return nil } +func (f *fakeAPI) Close() error { return nil } -func (f *fakeDestroyAPI) DestroyModel() error { +func (f *fakeAPI) DestroyModel(names.ModelTag) error { return f.err } +func (f *fakeAPI) ModelStatus(models ...names.ModelTag) ([]base.ModelStatus, error) { + var err *params.Error = ¶ms.Error{Code: params.CodeNotFound} + if f.statusCallCount < len(f.modelInfoErr) { + err = f.modelInfoErr[f.statusCallCount] + } + f.statusCallCount++ + return []base.ModelStatus{{}}, err +} + func (s *DestroySuite) SetUpTest(c *gc.C) { s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.api = &fakeDestroyAPI{} + s.api = &fakeAPI{} s.api.err = nil s.store = jujuclienttesting.NewMemStore() @@ -52,22 +67,23 @@ s.store.Controllers["test1"] = jujuclient.ControllerDetails{ControllerUUID: "test1-uuid"} s.store.Models["test1"] = &jujuclient.ControllerModels{ Models: map[string]jujuclient.ModelDetails{ - "admin@local/test1": {"test1-uuid"}, - "admin@local/test2": {"test2-uuid"}, + "admin/test1": {"test1-uuid"}, + "admin/test2": {"test2-uuid"}, }, } s.store.Accounts["test1"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } + s.sleep = func(time.Duration) {} } func (s *DestroySuite) runDestroyCommand(c *gc.C, args ...string) (*cmd.Context, error) { - cmd := model.NewDestroyCommandForTest(s.api, s.store) + cmd := model.NewDestroyCommandForTest(s.api, noOpRefresh, s.store, s.sleep) return testing.RunCommand(c, cmd, args...) } func (s *DestroySuite) NewDestroyCommand() cmd.Command { - return model.NewDestroyCommandForTest(s.api, s.store) + return model.NewDestroyCommandForTest(s.api, noOpRefresh, s.store, s.sleep) } func checkModelExistsInStore(c *gc.C, name string, store jujuclient.ClientStore) { @@ -97,9 +113,17 @@ c.Assert(err, gc.ErrorMatches, `unrecognized args: \["whoops"\]`) } -func (s *DestroySuite) TestDestroyUnknownModel(c *gc.C) { - _, err := s.runDestroyCommand(c, "foo") - c.Assert(err, gc.ErrorMatches, `cannot read model info: model test1:admin@local/foo not found`) +func (s *DestroySuite) TestDestroyUnknownModelCallsRefresh(c *gc.C) { + called := false + refresh := func(jujuclient.ClientStore, string) error { + called = true + return nil + } + + cmd := model.NewDestroyCommandForTest(s.api, refresh, s.store, s.sleep) + _, err := testing.RunCommand(c, cmd, "foo") + c.Check(called, jc.IsTrue) + c.Check(err, gc.ErrorMatches, `cannot read model info: model test1:admin/foo not found`) } func (s *DestroySuite) TestDestroyCannotConnectToAPI(c *gc.C) { @@ -107,34 +131,43 @@ _, err := s.runDestroyCommand(c, "test2", "-y") c.Assert(err, gc.ErrorMatches, "cannot destroy model: connection refused") c.Check(c.GetTestLog(), jc.Contains, "failed to destroy model \"test2\"") - checkModelExistsInStore(c, "test1:admin@local/test2", s.store) + checkModelExistsInStore(c, "test1:admin/test2", s.store) } func (s *DestroySuite) TestSystemDestroyFails(c *gc.C) { _, err := s.runDestroyCommand(c, "test1", "-y") c.Assert(err, gc.ErrorMatches, `"test1" is a controller; use 'juju destroy-controller' to destroy it`) - checkModelExistsInStore(c, "test1:admin@local/test1", s.store) + checkModelExistsInStore(c, "test1:admin/test1", s.store) } func (s *DestroySuite) TestDestroy(c *gc.C) { - checkModelExistsInStore(c, "test1:admin@local/test2", s.store) + checkModelExistsInStore(c, "test1:admin/test2", s.store) _, err := s.runDestroyCommand(c, "test2", "-y") c.Assert(err, jc.ErrorIsNil) - checkModelRemovedFromStore(c, "test1:admin@local/test2", s.store) + checkModelRemovedFromStore(c, "test1:admin/test2", s.store) +} + +func (s *DestroySuite) TestDestroyBlocks(c *gc.C) { + checkModelExistsInStore(c, "test1:admin/test2", s.store) + s.api.modelInfoErr = []*params.Error{{}, {Code: params.CodeNotFound}} + _, err := s.runDestroyCommand(c, "test2", "-y") + c.Assert(err, jc.ErrorIsNil) + checkModelRemovedFromStore(c, "test1:admin/test2", s.store) + c.Assert(s.api.statusCallCount, gc.Equals, 1) } func (s *DestroySuite) TestFailedDestroyModel(c *gc.C) { s.api.err = errors.New("permission denied") _, err := s.runDestroyCommand(c, "test1:test2", "-y") c.Assert(err, gc.ErrorMatches, "cannot destroy model: permission denied") - checkModelExistsInStore(c, "test1:admin@local/test2", s.store) + checkModelExistsInStore(c, "test1:admin/test2", s.store) } func (s *DestroySuite) resetModel(c *gc.C) { s.store.Models["test1"] = &jujuclient.ControllerModels{ Models: map[string]jujuclient.ModelDetails{ - "admin@local/test1": {"test1-uuid"}, - "admin@local/test2": {"test2-uuid"}, + "admin/test1": {"test1-uuid"}, + "admin/test2": {"test2-uuid"}, }, } } @@ -156,7 +189,7 @@ c.Fatalf("command took too long") } c.Check(testing.Stdout(ctx), gc.Matches, "WARNING!.*test2(.|\n)*") - checkModelExistsInStore(c, "test1:admin@local/test1", s.store) + checkModelExistsInStore(c, "test1:admin/test1", s.store) // EOF on stdin: equivalent to answering no. stdin.Reset() @@ -169,7 +202,7 @@ c.Fatalf("command took too long") } c.Check(testing.Stdout(ctx), gc.Matches, "WARNING!.*test2(.|\n)*") - checkModelExistsInStore(c, "test1:admin@local/test2", s.store) + checkModelExistsInStore(c, "test1:admin/test2", s.store) for _, answer := range []string{"y", "Y", "yes", "YES"} { stdin.Reset() @@ -182,7 +215,7 @@ case <-time.After(testing.LongWait): c.Fatalf("command took too long") } - checkModelRemovedFromStore(c, "test1:admin@local/test2", s.store) + checkModelRemovedFromStore(c, "test1:admin/test2", s.store) // Add the test2 model back into the store for the next test s.resetModel(c) @@ -190,7 +223,7 @@ } func (s *DestroySuite) TestBlockedDestroy(c *gc.C) { - s.api.err = ¶ms.Error{Code: params.CodeOperationBlocked} - s.runDestroyCommand(c, "test2", "-y") - c.Check(c.GetTestLog(), jc.Contains, "To remove the block") + s.api.err = common.OperationBlockedError("TestBlockedDestroy") + _, err := s.runDestroyCommand(c, "test2", "-y") + testing.AssertOperationWasBlocked(c, err, ".*TestBlockedDestroy.*") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/dumpdb.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/dumpdb.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/dumpdb.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/dumpdb.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,110 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package model + +import ( + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" +) + +// NewDumpDBCommand returns a fully constructed dump-db command. +func NewDumpDBCommand() cmd.Command { + return modelcmd.WrapController(&dumpDBCommand{}) +} + +type dumpDBCommand struct { + modelcmd.ControllerCommandBase + out cmd.Output + api DumpDBAPI + + model string +} + +const dumpDBHelpDoc = ` +dump-db returns all that is stored in the database for the specified model. + +Examples: + + juju dump-db + juju dump-db mymodel + +See also: + models +` + +// Info implements Command. +func (c *dumpDBCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "dump-db", + Args: "[model-name]", + Purpose: "Displays the mongo documents for of the model.", + Doc: dumpDBHelpDoc, + } +} + +// SetFlags implements Command. +func (c *dumpDBCommand) SetFlags(f *gnuflag.FlagSet) { + c.ControllerCommandBase.SetFlags(f) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) +} + +// Init implements Command. +func (c *dumpDBCommand) Init(args []string) error { + if len(args) == 1 { + c.model = args[0] + return nil + } + return cmd.CheckEmpty(args) +} + +// DumpDBAPI specifies the used function calls of the ModelManager. +type DumpDBAPI interface { + Close() error + DumpModelDB(names.ModelTag) (map[string]interface{}, error) +} + +func (c *dumpDBCommand) getAPI() (DumpDBAPI, error) { + if c.api != nil { + return c.api, nil + } + return c.NewModelManagerAPIClient() +} + +// Run implements Command. +func (c *dumpDBCommand) Run(ctx *cmd.Context) error { + client, err := c.getAPI() + if err != nil { + return err + } + defer client.Close() + + store := c.ClientStore() + if c.model == "" { + c.model, err = store.CurrentModel(c.ControllerName()) + if err != nil { + return err + } + } + + modelDetails, err := store.ModelByName( + c.ControllerName(), + c.model, + ) + if err != nil { + return errors.Annotate(err, "getting model details") + } + + modelTag := names.NewModelTag(modelDetails.ModelUUID) + results, err := client.DumpModelDB(modelTag) + if err != nil { + return err + } + + return c.out.Write(ctx, results) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/dumpdb_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/dumpdb_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/dumpdb_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/dumpdb_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,80 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for info. + +package model_test + +import ( + gitjujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/cmd/juju/model" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/juju/testing" +) + +type DumpDBCommandSuite struct { + testing.FakeJujuXDGDataHomeSuite + fake fakeDumpDBClient + store *jujuclienttesting.MemStore +} + +var _ = gc.Suite(&DumpDBCommandSuite{}) + +func (s *DumpDBCommandSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.fake.ResetCalls() + s.store = jujuclienttesting.NewMemStore() + s.store.CurrentControllerName = "testing" + s.store.Controllers["testing"] = jujuclient.ControllerDetails{} + s.store.Accounts["testing"] = jujuclient.AccountDetails{ + User: "admin", + } + err := s.store.UpdateModel("testing", "admin/mymodel", jujuclient.ModelDetails{ + testing.ModelTag.Id(), + }) + c.Assert(err, jc.ErrorIsNil) + s.store.Models["testing"].CurrentModel = "admin/mymodel" +} + +func (s *DumpDBCommandSuite) TestDumpDB(c *gc.C) { + ctx, err := testing.RunCommand(c, model.NewDumpDBCommandForTest(&s.fake, s.store)) + c.Assert(err, jc.ErrorIsNil) + s.fake.CheckCalls(c, []gitjujutesting.StubCall{ + {"DumpModelDB", []interface{}{testing.ModelTag}}, + {"Close", nil}, + }) + + out := testing.Stdout(ctx) + c.Assert(out, gc.Equals, `all-others: heaps of data +models: + name: testing + uuid: fake-uuid +`) +} + +type fakeDumpDBClient struct { + gitjujutesting.Stub +} + +func (f *fakeDumpDBClient) Close() error { + f.MethodCall(f, "Close") + return f.NextErr() +} + +func (f *fakeDumpDBClient) DumpModelDB(model names.ModelTag) (map[string]interface{}, error) { + f.MethodCall(f, "DumpModelDB", model) + err := f.NextErr() + if err != nil { + return nil, err + } + return map[string]interface{}{ + "models": map[string]interface{}{ + "name": "testing", + "uuid": "fake-uuid", + }, + "all-others": "heaps of data", + }, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/dump.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/dump.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/dump.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/dump.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,22 +6,24 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" - "github.com/juju/juju/api/modelmanager" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) // NewDumpCommand returns a fully constructed dump-model command. func NewDumpCommand() cmd.Command { - return modelcmd.Wrap(&dumpCommand{}) + return modelcmd.WrapController(&dumpCommand{}) } type dumpCommand struct { - modelcmd.ModelCommandBase + modelcmd.ControllerCommandBase out cmd.Output api DumpModelAPI + + model string } const dumpModelHelpDoc = ` @@ -31,7 +33,7 @@ Examples: juju dump-model - juju dump-model -m mymodel + juju dump-model mymodel See also: models @@ -41,6 +43,7 @@ func (c *dumpCommand) Info() *cmd.Info { return &cmd.Info{ Name: "dump-model", + Args: "[model-name]", Purpose: "Displays the database agnostic representation of the model.", Doc: dumpModelHelpDoc, } @@ -48,14 +51,16 @@ // SetFlags implements Command. func (c *dumpCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "yaml", map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, - }) + c.ControllerCommandBase.SetFlags(f) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) } // Init implements Command. -func (c *dumpCommand) Init(args []string) (err error) { +func (c *dumpCommand) Init(args []string) error { + if len(args) == 1 { + c.model = args[0] + return nil + } return cmd.CheckEmpty(args) } @@ -69,11 +74,7 @@ if c.api != nil { return c.api, nil } - root, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return modelmanager.NewClient(root), nil + return c.NewModelManagerAPIClient() } // Run implements Command. @@ -85,9 +86,16 @@ defer client.Close() store := c.ClientStore() + if c.model == "" { + c.model, err = store.CurrentModel(c.ControllerName()) + if err != nil { + return err + } + } + modelDetails, err := store.ModelByName( c.ControllerName(), - c.ModelName(), + c.model, ) if err != nil { return errors.Annotate(err, "getting model details") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/dump_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/dump_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/dump_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/dump_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -50,13 +50,13 @@ s.store.CurrentControllerName = "testing" s.store.Controllers["testing"] = jujuclient.ControllerDetails{} s.store.Accounts["testing"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } - err := s.store.UpdateModel("testing", "admin@local/mymodel", jujuclient.ModelDetails{ + err := s.store.UpdateModel("testing", "admin/mymodel", jujuclient.ModelDetails{ testing.ModelTag.Id(), }) c.Assert(err, jc.ErrorIsNil) - s.store.Models["testing"].CurrentModel = "admin@local/mymodel" + s.store.Models["testing"].CurrentModel = "admin/mymodel" } func (s *DumpCommandSuite) TestDump(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,58 +4,34 @@ package model import ( + "time" + "github.com/juju/cmd" + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/jujuclient" ) -// NewGetCommandForTest returns a GetCommand with the api provided as specified. -func NewGetCommandForTest(api GetModelAPI) cmd.Command { - cmd := &getCommand{ - api: api, - } - return modelcmd.Wrap(cmd) -} - -// NewSetCommandForTest returns a SetCommand with the api provided as specified. -func NewSetCommandForTest(api SetModelAPI) cmd.Command { - cmd := &setCommand{ +// NewConfigCommandForTest returns a configCommand with the api +// provided as specified. +func NewConfigCommandForTest(api configCommandAPI) cmd.Command { + cmd := &configCommand{ api: api, } return modelcmd.Wrap(cmd) } -// NewUnsetCommandForTest returns an UnsetCommand with the api provided as specified. -func NewUnsetCommandForTest(api UnsetModelAPI) cmd.Command { - cmd := &unsetCommand{ - api: api, - } - return modelcmd.Wrap(cmd) -} - -// NewGetDefaultsCommandForTest returns a GetDefaultsCommand with the api provided as specified. -func NewGetDefaultsCommandForTest(api modelDefaultsAPI) cmd.Command { - cmd := &getDefaultsCommand{ - newAPIFunc: func() (modelDefaultsAPI, error) { return api, nil }, - } - return modelcmd.Wrap(cmd) -} - -// NewSetDefaultsCommandForTest returns a SetDefaultsCommand with the api provided as specified. -func NewSetDefaultsCommandForTest(api setModelDefaultsAPI) cmd.Command { - cmd := &setDefaultsCommand{ - newAPIFunc: func() (setModelDefaultsAPI, error) { return api, nil }, +// NewDefaultsCommandForTest returns a defaultsCommand with the api provided as specified. +func NewDefaultsCommandForTest(apiRoot api.Connection, dAPI defaultsCommandAPI, cAPI cloudAPI, store jujuclient.ClientStore) cmd.Command { + cmd := &defaultsCommand{ + newAPIRoot: func() (api.Connection, error) { return apiRoot, nil }, + newDefaultsAPI: func(caller base.APICallCloser) defaultsCommandAPI { return dAPI }, + newCloudAPI: func(caller base.APICallCloser) cloudAPI { return cAPI }, } - return modelcmd.Wrap(cmd) -} - -// NewUnsetDefaultsCommandForTest returns a UnsetDefaultsCommand with the api provided as specified. -func NewUnsetDefaultsCommandForTest(api unsetModelDefaultsAPI) cmd.Command { - cmd := &unsetDefaultsCommand{ - newAPIFunc: func() (unsetModelDefaultsAPI, error) { return api, nil }, - } - return modelcmd.Wrap(cmd) + cmd.SetClientStore(store) + return modelcmd.WrapController(cmd) } // NewRetryProvisioningCommandForTest returns a RetryProvisioningCommand with the api provided as specified. @@ -66,16 +42,9 @@ return modelcmd.Wrap(cmd) } -// NewUsersCommandForTest returns a UsersCommand with the api provided as specified. -func NewUsersCommandForTest(api UsersAPI, store jujuclient.ClientStore) cmd.Command { - cmd := &usersCommand{api: api} - cmd.SetClientStore(store) - return modelcmd.Wrap(cmd) -} - // NewShowCommandForTest returns a ShowCommand with the api provided as specified. -func NewShowCommandForTest(api ShowModelAPI, store jujuclient.ClientStore) cmd.Command { - cmd := &showModelCommand{api: api} +func NewShowCommandForTest(api ShowModelAPI, refreshFunc func(jujuclient.ClientStore, string) error, store jujuclient.ClientStore) cmd.Command { + cmd := &showModelCommand{api: api, RefreshModels: refreshFunc} cmd.SetClientStore(store) return modelcmd.Wrap(cmd) } @@ -84,19 +53,32 @@ func NewDumpCommandForTest(api DumpModelAPI, store jujuclient.ClientStore) cmd.Command { cmd := &dumpCommand{api: api} cmd.SetClientStore(store) - return modelcmd.Wrap(cmd) + return modelcmd.WrapController(cmd) +} + +// NewDumpDBCommandForTest returns a DumpDBCommand with the api provided as specified. +func NewDumpDBCommandForTest(api DumpDBAPI, store jujuclient.ClientStore) cmd.Command { + cmd := &dumpDBCommand{api: api} + cmd.SetClientStore(store) + return modelcmd.WrapController(cmd) } // NewDestroyCommandForTest returns a DestroyCommand with the api provided as specified. -func NewDestroyCommandForTest(api DestroyModelAPI, store jujuclient.ClientStore) cmd.Command { +func NewDestroyCommandForTest( + api DestroyModelAPI, + refreshFunc func(jujuclient.ClientStore, string) error, store jujuclient.ClientStore, + sleepFunc func(time.Duration), +) cmd.Command { cmd := &destroyCommand{ - api: api, + api: api, + RefreshModels: refreshFunc, + sleepFunc: sleepFunc, } cmd.SetClientStore(store) return modelcmd.Wrap( cmd, - modelcmd.ModelSkipDefault, - modelcmd.ModelSkipFlags, + modelcmd.WrapSkipDefaultModel, + modelcmd.WrapSkipModelFlags, ) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/fakeenv_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/fakeenv_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/fakeenv_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/fakeenv_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,12 +4,19 @@ package model_test import ( + "errors" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + "github.com/juju/juju/api" + jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/environs/config" "github.com/juju/juju/testing" ) +// ModelConfig related fake environment for testing. + type fakeEnvSuite struct { testing.FakeJujuXDGDataHomeSuite fake *fakeEnvAPI @@ -26,15 +33,18 @@ defaults: config.ConfigValues{ "attr": {Value: "foo", Source: "default"}, "attr2": {Value: "bar", Source: "controller"}, + "attr3": {Value: "baz", Source: "region"}, }, } } type fakeEnvAPI struct { - values map[string]interface{} - defaults config.ConfigValues - err error - keys []string + values map[string]interface{} + cloud, region string + defaults config.ConfigValues + err error + keys []string + resetKeys []string } func (f *fakeEnvAPI) Close() error { @@ -53,36 +63,137 @@ return result, nil } -func (f *fakeEnvAPI) ModelDefaults() (config.ConfigValues, error) { +func (f *fakeEnvAPI) ModelSet(config map[string]interface{}) error { + f.values = config + return f.err +} + +func (f *fakeEnvAPI) ModelUnset(keys ...string) error { + f.resetKeys = keys + return f.err +} + +// ModelDefaults related fake environment for testing. + +type fakeModelDefaultEnvSuite struct { + testing.FakeJujuXDGDataHomeSuite + fakeAPIRoot *fakeAPIConnection + fakeDefaultsAPI *fakeModelDefaultsAPI + fakeCloudAPI *fakeCloudAPI +} + +func (s *fakeModelDefaultEnvSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.fakeAPIRoot = &fakeAPIConnection{} + s.fakeDefaultsAPI = &fakeModelDefaultsAPI{ + values: map[string]interface{}{ + "name": "test-model", + "special": "special value", + "running": true, + }, + defaults: config.ModelDefaultAttributes{ + "attr": {Default: "foo"}, + "attr2": { + Controller: "bar", + Regions: []config.RegionDefaultValue{{ + "dummy-region", + "dummy-value", + }, { + "another-region", + "another-value", + }}}, + }, + } + s.fakeCloudAPI = &fakeCloudAPI{ + clouds: map[string]jujucloud.Cloud{ + "cloud-dummy": { + Type: "dummy-cloud", + Regions: []jujucloud.Region{ + {Name: "dummy-region"}, + {Name: "another-region"}, + }, + }, + }, + } +} + +type fakeAPIConnection struct { + api.Connection +} + +func (*fakeAPIConnection) Close() error { + return nil +} + +type fakeModelDefaultsAPI struct { + values map[string]interface{} + cloud, region string + defaults config.ModelDefaultAttributes + err error + keys []string +} + +func (f *fakeModelDefaultsAPI) Close() error { + return nil +} + +func (f *fakeModelDefaultsAPI) ModelGet() (map[string]interface{}, error) { + return f.values, nil +} + +func (f *fakeModelDefaultsAPI) ModelDefaults() (config.ModelDefaultAttributes, error) { return f.defaults, nil } -func (f *fakeEnvAPI) SetModelDefaults(cfg map[string]interface{}) error { +func (f *fakeModelDefaultsAPI) SetModelDefaults(cloud, region string, cfg map[string]interface{}) error { if f.err != nil { return f.err } + f.cloud = cloud + f.region = region for name, val := range cfg { - f.defaults[name] = config.ConfigValue{Value: val, Source: "controller"} + f.defaults[name] = config.AttributeDefaultValues{Controller: val} } return nil } -func (f *fakeEnvAPI) UnsetModelDefaults(keys ...string) error { +func (f *fakeModelDefaultsAPI) UnsetModelDefaults(cloud, region string, keys ...string) error { if f.err != nil { return f.err } + f.cloud = cloud + f.region = region for _, key := range keys { delete(f.defaults, key) } return nil } -func (f *fakeEnvAPI) ModelSet(config map[string]interface{}) error { +func (f *fakeModelDefaultsAPI) ModelSet(config map[string]interface{}) error { f.values = config return f.err } -func (f *fakeEnvAPI) ModelUnset(keys ...string) error { +func (f *fakeModelDefaultsAPI) ModelUnset(keys ...string) error { f.keys = keys return f.err } + +type fakeCloudAPI struct { + clouds map[string]jujucloud.Cloud +} + +func (f *fakeCloudAPI) Close() error { return nil } +func (f *fakeCloudAPI) DefaultCloud() (names.CloudTag, error) { + return names.NewCloudTag("dummy"), nil +} +func (f *fakeCloudAPI) Cloud(name names.CloudTag) (jujucloud.Cloud, error) { + var ( + c jujucloud.Cloud + ok bool + ) + if c, ok = f.clouds[name.String()]; !ok { + return jujucloud.Cloud{}, errors.New("Unknown cloud") + } + return c, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/getdefaults.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/getdefaults.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/getdefaults.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/getdefaults.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,165 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model - -import ( - "bytes" - "fmt" - "sort" - "strings" - "text/tabwriter" - - "github.com/juju/cmd" - "github.com/juju/errors" - "launchpad.net/gnuflag" - - "github.com/juju/juju/api/modelconfig" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/juju/environs/config" -) - -// NewModelDefaultsCommand returns a command used to print the -// default model config attributes. -func NewModelDefaultsCommand() cmd.Command { - c := &getDefaultsCommand{} - c.newAPIFunc = func() (modelDefaultsAPI, error) { - api, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Annotate(err, "opening API connection") - } - return modelconfig.NewClient(api), nil - } - return modelcmd.Wrap(c) -} - -type getDefaultsCommand struct { - modelcmd.ModelCommandBase - newAPIFunc func() (modelDefaultsAPI, error) - key string - out cmd.Output -} - -const modelDefaultsHelpDoc = ` -By default, all default configuration (keys and values) are -displayed if a key is not specified. -By default, the model is the current model. - -Examples: - - juju model-defaults - juju model-defaults http-proxy - juju model-defaults -m mymodel type - -See also: - models - set-model-defaults - unset-model-defaults - set-model-config - get-model-config - unset-model-config -` - -func (c *getDefaultsCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "model-defaults", - Args: "[]", - Purpose: "Displays default configuration settings for a model.", - Doc: modelDefaultsHelpDoc, - } -} - -func (c *getDefaultsCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, - "tabular": formatDefaultConfigTabular, - }) -} - -func (c *getDefaultsCommand) Init(args []string) (err error) { - c.key, err = cmd.ZeroOrOneArgs(args) - return -} - -// modelDefaultsAPI defines the api methods used by this command. -type modelDefaultsAPI interface { - // Close closes the api connection. - Close() error - - // ModelDefaults returns the default config values used when creating a new model. - ModelDefaults() (config.ConfigValues, error) -} - -func (c *getDefaultsCommand) Run(ctx *cmd.Context) error { - client, err := c.newAPIFunc() - if err != nil { - return err - } - defer client.Close() - - attrs, err := client.ModelDefaults() - if err != nil { - return err - } - - if c.key != "" { - if value, ok := attrs[c.key]; ok { - attrs = config.ConfigValues{ - c.key: value, - } - } else { - return errors.Errorf("key %q not found in %q model defaults.", c.key, attrs["name"]) - } - } - // If key is empty, write out the whole lot. - return c.out.Write(ctx, attrs) -} - -// formatConfigTabular returns a tabular summary of default config information. -func formatDefaultConfigTabular(value interface{}) ([]byte, error) { - configValues, ok := value.(config.ConfigValues) - if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", configValues, value) - } - - var out bytes.Buffer - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) - p := func(values ...string) { - text := strings.Join(values, "\t") - fmt.Fprintln(tw, text) - } - var valueNames []string - for name := range configValues { - valueNames = append(valueNames, name) - } - sort.Strings(valueNames) - p("ATTRIBUTE\tDEFAULT\tCONTROLLER") - - for _, name := range valueNames { - info := configValues[name] - val, err := cmd.FormatSmart(info.Value) - if err != nil { - return nil, errors.Annotatef(err, "formatting value for %q", name) - } - d := "-" - c := "-" - if info.Source == "default" { - d = string(val) - } else { - c = string(val) - } - p(name, d, c) - } - - tw.Flush() - return out.Bytes(), nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/getdefaults_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/getdefaults_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/getdefaults_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/getdefaults_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,92 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model_test - -import ( - "strings" - - "github.com/juju/cmd" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/cmd/juju/model" - "github.com/juju/juju/testing" -) - -type getDefaultsSuite struct { - fakeEnvSuite -} - -var _ = gc.Suite(&getDefaultsSuite{}) - -func (s *getDefaultsSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { - command := model.NewGetDefaultsCommandForTest(s.fake) - return testing.RunCommand(c, command, args...) -} - -func (s *getDefaultsSuite) TestInitArgCount(c *gc.C) { - // zero or one args is fine. - err := testing.InitCommand(model.NewGetDefaultsCommandForTest(s.fake), nil) - c.Check(err, jc.ErrorIsNil) - err = testing.InitCommand(model.NewGetDefaultsCommandForTest(s.fake), []string{"one"}) - c.Check(err, jc.ErrorIsNil) - // More than one is not allowed. - err = testing.InitCommand(model.NewGetDefaultsCommandForTest(s.fake), []string{"one", "two"}) - c.Check(err, gc.ErrorMatches, `unrecognized args: \["two"\]`) -} - -func (s *getDefaultsSuite) TestSingleValue(c *gc.C) { - context, err := s.run(c, "attr") - c.Assert(err, jc.ErrorIsNil) - - output := strings.TrimSpace(testing.Stdout(context)) - expected := "" + - "ATTRIBUTE DEFAULT CONTROLLER\n" + - "attr foo -" - c.Assert(output, gc.Equals, expected) -} - -func (s *getDefaultsSuite) TestSingleValueJSON(c *gc.C) { - context, err := s.run(c, "--format=json", "attr") - c.Assert(err, jc.ErrorIsNil) - - output := strings.TrimSpace(testing.Stdout(context)) - c.Assert(output, gc.Equals, `{"attr":{"Value":"foo","Source":"default"}}`) -} - -func (s *getDefaultsSuite) TestAllValuesYAML(c *gc.C) { - context, err := s.run(c, "--format=yaml") - c.Assert(err, jc.ErrorIsNil) - - output := strings.TrimSpace(testing.Stdout(context)) - expected := "" + - "attr:\n" + - " value: foo\n" + - " source: default\n" + - "attr2:\n" + - " value: bar\n" + - " source: controller" - c.Assert(output, gc.Equals, expected) -} - -func (s *getDefaultsSuite) TestAllValuesJSON(c *gc.C) { - context, err := s.run(c, "--format=json") - c.Assert(err, jc.ErrorIsNil) - - output := strings.TrimSpace(testing.Stdout(context)) - expected := `{"attr":{"Value":"foo","Source":"default"},"attr2":{"Value":"bar","Source":"controller"}}` - c.Assert(output, gc.Equals, expected) -} - -func (s *getDefaultsSuite) TestAllValuesTabular(c *gc.C) { - context, err := s.run(c) - c.Assert(err, jc.ErrorIsNil) - - output := strings.TrimSpace(testing.Stdout(context)) - expected := "" + - "ATTRIBUTE DEFAULT CONTROLLER\n" + - "attr foo -\n" + - "attr2 - bar" - c.Assert(output, gc.Equals, expected) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/get.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/get.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,176 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model - -import ( - "bytes" - "fmt" - "sort" - "strings" - "text/tabwriter" - - "github.com/juju/cmd" - "github.com/juju/errors" - "launchpad.net/gnuflag" - - "github.com/juju/juju/api/modelconfig" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/juju/environs/config" -) - -func NewGetCommand() cmd.Command { - return modelcmd.Wrap(&getCommand{}) -} - -// getCommand is able to output either the entire environment or -// the requested value in a format of the user's choosing. -type getCommand struct { - modelcmd.ModelCommandBase - api GetModelAPI - key string - out cmd.Output -} - -const getModelHelpDoc = ` -By default, all configuration (keys and values) for the model are -displayed if a key is not specified. -By default, the model is the current model. - -Examples: - - juju get-model-config default-series - juju get-model-config -m mymodel type - -See also: - models - set-model-config - unset-model-config -` - -func (c *getCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "get-model-config", - Aliases: []string{"model-config"}, - Args: "[]", - Purpose: "Displays configuration settings for a model.", - Doc: getModelHelpDoc, - } -} - -func (c *getCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, - "tabular": formatConfigTabular, - }) -} - -func (c *getCommand) Init(args []string) (err error) { - c.key, err = cmd.ZeroOrOneArgs(args) - return -} - -type GetModelAPI interface { - Close() error - ModelGet() (map[string]interface{}, error) - ModelGetWithMetadata() (config.ConfigValues, error) -} - -func (c *getCommand) getAPI() (GetModelAPI, error) { - if c.api != nil { - return c.api, nil - } - api, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Annotate(err, "opening API connection") - } - return modelconfig.NewClient(api), nil -} - -func (c *getCommand) isModelAttrbute(attr string) bool { - switch attr { - case config.NameKey, config.TypeKey, config.UUIDKey: - return true - } - return false -} - -func (c *getCommand) Run(ctx *cmd.Context) error { - client, err := c.getAPI() - if err != nil { - return err - } - defer client.Close() - - attrs, err := client.ModelGetWithMetadata() - if err != nil { - return err - } - - for attrName := range attrs { - // We don't want model attributes included, these are available - // via show-model. - if c.isModelAttrbute(attrName) { - delete(attrs, attrName) - } - } - - if c.key != "" { - if value, found := attrs[c.key]; found { - out, err := cmd.FormatSmart(value.Value) - if err != nil { - return err - } - fmt.Fprintf(ctx.Stdout, "%v\n", string(out)) - return nil - } - return fmt.Errorf("key %q not found in %q model.", c.key, attrs["name"]) - } - // If key is empty, write out the whole lot. - return c.out.Write(ctx, attrs) -} - -// formatConfigTabular returns a tabular summary of config information. -func formatConfigTabular(value interface{}) ([]byte, error) { - configValues, ok := value.(config.ConfigValues) - if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", configValues, value) - } - - var out bytes.Buffer - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) - p := func(values ...string) { - text := strings.Join(values, "\t") - fmt.Fprintln(tw, text) - } - var valueNames []string - for name := range configValues { - valueNames = append(valueNames, name) - } - sort.Strings(valueNames) - p("ATTRIBUTE\tFROM\tVALUE") - - for _, name := range valueNames { - info := configValues[name] - val, err := cmd.FormatSmart(info.Value) - if err != nil { - return nil, errors.Annotatef(err, "formatting value for %q", name) - } - // Some attribute values have a newline appended - // which makes the output messy. - valString := strings.TrimSuffix(string(val), "\n") - p(name, info.Source, valString) - } - - tw.Flush() - return out.Bytes(), nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/get_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/get_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/get_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/get_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model_test - -import ( - "strings" - - "github.com/juju/cmd" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/cmd/juju/model" - "github.com/juju/juju/testing" -) - -type GetSuite struct { - fakeEnvSuite -} - -var _ = gc.Suite(&GetSuite{}) - -func (s *GetSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { - command := model.NewGetCommandForTest(s.fake) - return testing.RunCommand(c, command, args...) -} - -func (s *GetSuite) TestInit(c *gc.C) { - // zero or one args is fine. - err := testing.InitCommand(model.NewGetCommandForTest(s.fake), nil) - c.Check(err, jc.ErrorIsNil) - err = testing.InitCommand(model.NewGetCommandForTest(s.fake), []string{"one"}) - c.Check(err, jc.ErrorIsNil) - // More than one is not allowed. - err = testing.InitCommand(model.NewGetCommandForTest(s.fake), []string{"one", "two"}) - c.Check(err, gc.ErrorMatches, `unrecognized args: \["two"\]`) -} - -func (s *GetSuite) TestSingleValue(c *gc.C) { - context, err := s.run(c, "special") - c.Assert(err, jc.ErrorIsNil) - - output := strings.TrimSpace(testing.Stdout(context)) - c.Assert(output, gc.Equals, "special value") -} - -func (s *GetSuite) TestSingleValueJSON(c *gc.C) { - context, err := s.run(c, "--format=json", "special") - c.Assert(err, jc.ErrorIsNil) - - output := strings.TrimSpace(testing.Stdout(context)) - c.Assert(output, gc.Equals, "special value") -} - -func (s *GetSuite) TestAllValuesYAML(c *gc.C) { - context, err := s.run(c, "--format=yaml") - c.Assert(err, jc.ErrorIsNil) - - output := strings.TrimSpace(testing.Stdout(context)) - expected := "" + - "running:\n" + - " value: true\n" + - " source: model\n" + - "special:\n" + - " value: special value\n" + - " source: model" - c.Assert(output, gc.Equals, expected) -} - -func (s *GetSuite) TestAllValuesJSON(c *gc.C) { - context, err := s.run(c, "--format=json") - c.Assert(err, jc.ErrorIsNil) - - output := strings.TrimSpace(testing.Stdout(context)) - expected := `{"running":{"Value":true,"Source":"model"},"special":{"Value":"special value","Source":"model"}}` - c.Assert(output, gc.Equals, expected) -} - -func (s *GetSuite) TestAllValuesTabular(c *gc.C) { - context, err := s.run(c) - c.Assert(err, jc.ErrorIsNil) - - output := strings.TrimSpace(testing.Stdout(context)) - expected := "" + - "ATTRIBUTE FROM VALUE\n" + - "running model True\n" + - "special model special value" - c.Assert(output, gc.Equals, expected) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/grantrevoke.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/grantrevoke.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/grantrevoke.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/grantrevoke.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,44 +6,54 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/juju/block" "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/juju/juju/permission" + "github.com/juju/juju/permission" ) var usageGrantSummary = ` -Grants access to a Juju user for a model.`[1:] +Grants access level to a Juju user for a model or controller.`[1:] var usageGrantDetails = ` By default, the controller is the current controller. -Model access can also be granted at user-addition time with the -`[1:] + "`juju add-user`" + ` command. - Users with read access are limited in what they can do with models: ` + "`juju models`, `juju machines`, and `juju status`" + `. Examples: -Grant user 'joe' default (read) access to model 'mymodel': +Grant user 'joe' 'read' access to model 'mymodel': + + juju grant joe read mymodel + +Grant user 'jim' 'write' access to model 'mymodel': - juju grant joe mymodel + juju grant jim write mymodel -Grant user 'jim' write access to model 'mymodel': +Grant user 'sam' 'read' access to models 'model1' and 'model2': - juju grant --acl=write jim mymodel + juju grant sam read model1 model2 -Grant user 'sam' default (read) access to models 'model1' and 'model2': +Grant user 'maria' 'add-model' access to the controller: - juju grant sam model1 model2 + juju grant maria add-model + +Valid access levels for models are: + read + write + admin + +Valid access levels for controllers are: + login + add-model + superuser See also: revoke add-user` var usageRevokeSummary = ` -Revokes access from a Juju user for a model.`[1:] +Revokes access from a Juju user for a model or controller`[1:] var usageRevokeDetails = ` By default, the controller is the current controller. @@ -53,13 +63,17 @@ write access. Examples: -Revoke read (and write) access from user 'joe' for model 'mymodel': +Revoke 'read' (and 'write') access from user 'joe' for model 'mymodel': + + juju revoke joe read mymodel - juju revoke joe mymodel +Revoke 'write' access from user 'sam' for models 'model1' and 'model2': -Revoke write access from user 'sam' for models 'model1' and 'model2': + juju revoke sam write model1 model2 - juju revoke --acl=write sam model1 model2 +Revoke 'add-model' access from user 'maria' to the controller: + + juju revoke maria add-model See also: grant`[1:] @@ -67,14 +81,9 @@ type accessCommand struct { modelcmd.ControllerCommandBase - User string - ModelNames []string - ModelAccess string -} - -// SetFlags implements cmd.Command. -func (c *accessCommand) SetFlags(f *gnuflag.FlagSet) { - f.StringVar(&c.ModelAccess, "acl", "read", "Access control ('read' or 'write')") + User string + ModelNames []string + Access string } // Init implements cmd.Command. @@ -84,16 +93,29 @@ } if len(args) < 2 { - return errors.New("no model specified") - } - - _, err := permission.ParseModelAccess(c.ModelAccess) - if err != nil { - return err + return errors.New("no permission level specified") } c.User = args[0] - c.ModelNames = args[1:] + c.ModelNames = args[2:] + c.Access = args[1] + // Special case for backwards compatibility. + if c.Access == "addmodel" { + c.Access = "add-model" + } + if len(c.ModelNames) > 0 { + if err := permission.ValidateControllerAccess(permission.Access(c.Access)); err == nil { + return errors.Errorf("You have specified a controller access permission %q.\n"+ + "If you intended to change controller access, do not specify any model names.\n"+ + "See 'juju help grant'.", c.Access) + } + return permission.ValidateModelAccess(permission.Access(c.Access)) + } + if err := permission.ValidateModelAccess(permission.Access(c.Access)); err == nil { + return errors.Errorf("You have specified a model access permission %q.\n"+ + "If you intended to change model access, you need to specify one or more model names.\n"+ + "See 'juju help grant'.", c.Access) + } return nil } @@ -112,28 +134,55 @@ func (c *grantCommand) Info() *cmd.Info { return &cmd.Info{ Name: "grant", - Args: " ...", + Args: " [ ...]", Purpose: usageGrantSummary, Doc: usageGrantDetails, } } -func (c *grantCommand) getAPI() (GrantModelAPI, error) { +func (c *grantCommand) getModelAPI() (GrantModelAPI, error) { if c.api != nil { return c.api, nil } return c.NewModelManagerAPIClient() } +func (c *grantCommand) getControllerAPI() (GrantControllerAPI, error) { + return c.NewControllerAPIClient() +} + // GrantModelAPI defines the API functions used by the grant command. type GrantModelAPI interface { Close() error GrantModel(user, access string, modelUUIDs ...string) error } +// GrantControllerAPI defines the API functions used by the grant command. +type GrantControllerAPI interface { + Close() error + GrantController(user, access string) error +} + // Run implements cmd.Command. func (c *grantCommand) Run(ctx *cmd.Context) error { - client, err := c.getAPI() + if len(c.ModelNames) > 0 { + return c.runForModel() + } + return c.runForController() +} + +func (c *grantCommand) runForController() error { + client, err := c.getControllerAPI() + if err != nil { + return err + } + defer client.Close() + + return block.ProcessBlockedError(client.GrantController(c.User, c.Access), block.BlockChange) +} + +func (c *grantCommand) runForModel() error { + client, err := c.getModelAPI() if err != nil { return err } @@ -143,7 +192,7 @@ if err != nil { return err } - return block.ProcessBlockedError(client.GrantModel(c.User, c.ModelAccess, models...), block.BlockChange) + return block.ProcessBlockedError(client.GrantModel(c.User, c.Access, models...), block.BlockChange) } // NewRevokeCommand returns a new revoke command. @@ -161,36 +210,63 @@ func (c *revokeCommand) Info() *cmd.Info { return &cmd.Info{ Name: "revoke", - Args: " ...", + Args: " [ ...]", Purpose: usageRevokeSummary, Doc: usageRevokeDetails, } } -func (c *revokeCommand) getAPI() (RevokeModelAPI, error) { +func (c *revokeCommand) getModelAPI() (RevokeModelAPI, error) { if c.api != nil { return c.api, nil } return c.NewModelManagerAPIClient() } +func (c *revokeCommand) getControllerAPI() (RevokeControllerAPI, error) { + return c.NewControllerAPIClient() +} + // RevokeModelAPI defines the API functions used by the revoke command. type RevokeModelAPI interface { Close() error RevokeModel(user, access string, modelUUIDs ...string) error } +// RevokeControllerAPI defines the API functions used by the revoke command. +type RevokeControllerAPI interface { + Close() error + RevokeController(user, access string) error +} + // Run implements cmd.Command. func (c *revokeCommand) Run(ctx *cmd.Context) error { - client, err := c.getAPI() + if len(c.ModelNames) > 0 { + return c.runForModel() + } + return c.runForController() +} + +func (c *revokeCommand) runForController() error { + client, err := c.getControllerAPI() if err != nil { return err } defer client.Close() - modelUUIDs, err := c.ModelUUIDs(c.ModelNames) + return block.ProcessBlockedError(client.RevokeController(c.User, c.Access), block.BlockChange) +} + +func (c *revokeCommand) runForModel() error { + client, err := c.getModelAPI() + if err != nil { + return err + } + defer client.Close() + + models, err := c.ModelUUIDs(c.ModelNames) if err != nil { return err } - return block.ProcessBlockedError(client.RevokeModel(c.User, c.ModelAccess, modelUUIDs...), block.BlockChange) + return block.ProcessBlockedError(client.RevokeModel(c.User, c.Access, models...), block.BlockChange) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/grantrevoke_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/grantrevoke_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/grantrevoke_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/grantrevoke_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,11 +4,13 @@ package model_test import ( + "strings" + "github.com/juju/cmd" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/apiserver/common" "github.com/juju/juju/cmd/juju/model" "github.com/juju/juju/jujuclient" "github.com/juju/juju/jujuclient/jujuclienttesting" @@ -42,16 +44,16 @@ s.store.CurrentControllerName = controllerName s.store.Controllers[controllerName] = jujuclient.ControllerDetails{} s.store.Accounts[controllerName] = jujuclient.AccountDetails{ - User: "bob@local", + User: "bob", } s.store.Models = map[string]*jujuclient.ControllerModels{ controllerName: { Models: map[string]jujuclient.ModelDetails{ - "bob@local/foo": jujuclient.ModelDetails{fooModelUUID}, - "bob@local/bar": jujuclient.ModelDetails{barModelUUID}, - "bob@local/baz": jujuclient.ModelDetails{bazModelUUID}, - "bob@local/model1": jujuclient.ModelDetails{model1ModelUUID}, - "bob@local/model2": jujuclient.ModelDetails{model2ModelUUID}, + "bob/foo": jujuclient.ModelDetails{fooModelUUID}, + "bob/bar": jujuclient.ModelDetails{barModelUUID}, + "bob/baz": jujuclient.ModelDetails{bazModelUUID}, + "bob/model1": jujuclient.ModelDetails{model1ModelUUID}, + "bob/model2": jujuclient.ModelDetails{model2ModelUUID}, }, }, } @@ -65,7 +67,7 @@ func (s *grantRevokeSuite) TestPassesValues(c *gc.C) { user := "sam" models := []string{fooModelUUID, barModelUUID, bazModelUUID} - _, err := s.run(c, "sam", "foo", "bar", "baz") + _, err := s.run(c, "sam", "read", "foo", "bar", "baz") c.Assert(err, jc.ErrorIsNil) c.Assert(s.fake.user, jc.DeepEquals, user) c.Assert(s.fake.modelUUIDs, jc.DeepEquals, models) @@ -74,7 +76,7 @@ func (s *grantRevokeSuite) TestAccess(c *gc.C) { sam := "sam" - _, err := s.run(c, "--acl", "write", "sam", "model1", "model2") + _, err := s.run(c, "sam", "write", "model1", "model2") c.Assert(err, jc.ErrorIsNil) c.Assert(s.fake.user, jc.DeepEquals, sam) c.Assert(s.fake.modelUUIDs, jc.DeepEquals, []string{model1ModelUUID, model2ModelUUID}) @@ -82,10 +84,9 @@ } func (s *grantRevokeSuite) TestBlockGrant(c *gc.C) { - s.fake.err = ¶ms.Error{Code: params.CodeOperationBlocked} - _, err := s.run(c, "sam", "foo") - c.Assert(err, gc.Equals, cmd.ErrSilent) - c.Check(c.GetTestLog(), jc.Contains, "To unblock changes") + s.fake.err = common.OperationBlockedError("TestBlockGrant") + _, err := s.run(c, "sam", "read", "foo") + testing.AssertOperationWasBlocked(c, err, ".*TestBlockGrant.*") } type grantSuite struct { @@ -107,7 +108,7 @@ err := testing.InitCommand(wrappedCmd, []string{}) c.Assert(err, gc.ErrorMatches, "no user specified") - err = testing.InitCommand(wrappedCmd, []string{"bob", "model1", "model2"}) + err = testing.InitCommand(wrappedCmd, []string{"bob", "read", "model1", "model2"}) c.Assert(err, jc.ErrorIsNil) c.Assert(grantCmd.User, gc.Equals, "bob") @@ -115,9 +116,20 @@ err = testing.InitCommand(wrappedCmd, []string{}) c.Assert(err, gc.ErrorMatches, `no user specified`) +} - err = testing.InitCommand(wrappedCmd, []string{"nomodel"}) - c.Assert(err, gc.ErrorMatches, `no model specified`) +// TestInitGrantAddModel checks that both the documented 'add-model' access and +// the backwards-compatible 'addmodel' work to grant the AddModel permission. +func (s *grantSuite) TestInitGrantAddModel(c *gc.C) { + wrappedCmd, grantCmd := model.NewGrantCommandForTest(s.fake, s.store) + // The documented case, add-model. + err := testing.InitCommand(wrappedCmd, []string{"bob", "add-model"}) + c.Check(err, jc.ErrorIsNil) + + // The backwards-compatible case, addmodel. + err = testing.InitCommand(wrappedCmd, []string{"bob", "addmodel"}) + c.Check(err, jc.ErrorIsNil) + c.Assert(grantCmd.Access, gc.Equals, "add-model") } type revokeSuite struct { @@ -139,7 +151,7 @@ err := testing.InitCommand(wrappedCmd, []string{}) c.Assert(err, gc.ErrorMatches, "no user specified") - err = testing.InitCommand(wrappedCmd, []string{"bob", "model1", "model2"}) + err = testing.InitCommand(wrappedCmd, []string{"bob", "read", "model1", "model2"}) c.Assert(err, jc.ErrorIsNil) c.Assert(revokeCmd.User, gc.Equals, "bob") @@ -148,8 +160,34 @@ err = testing.InitCommand(wrappedCmd, []string{}) c.Assert(err, gc.ErrorMatches, `no user specified`) - err = testing.InitCommand(wrappedCmd, []string{"nomodel"}) - c.Assert(err, gc.ErrorMatches, `no model specified`) +} + +// TestInitRevokeAddModel checks that both the documented 'add-model' access and +// the backwards-compatible 'addmodel' work to revoke the AddModel permission. +func (s *grantSuite) TestInitRevokeAddModel(c *gc.C) { + wrappedCmd, revokeCmd := model.NewRevokeCommandForTest(s.fake, s.store) + // The documented case, add-model. + err := testing.InitCommand(wrappedCmd, []string{"bob", "add-model"}) + c.Check(err, jc.ErrorIsNil) + + // The backwards-compatible case, addmodel. + err = testing.InitCommand(wrappedCmd, []string{"bob", "addmodel"}) + c.Check(err, jc.ErrorIsNil) + c.Assert(revokeCmd.Access, gc.Equals, "add-model") +} + +func (s *grantSuite) TestModelAccessForController(c *gc.C) { + wrappedCmd, _ := model.NewRevokeCommandForTest(s.fake, s.store) + err := testing.InitCommand(wrappedCmd, []string{"bob", "write"}) + msg := strings.Replace(err.Error(), "\n", "", -1) + c.Check(msg, gc.Matches, `You have specified a model access permission "write".*`) +} + +func (s *grantSuite) TestControllerAccessForModel(c *gc.C) { + wrappedCmd, _ := model.NewRevokeCommandForTest(s.fake, s.store) + err := testing.InitCommand(wrappedCmd, []string{"bob", "superuser", "default"}) + msg := strings.Replace(err.Error(), "\n", "", -1) + c.Check(msg, gc.Matches, `You have specified a controller access permission "superuser".*`) } type fakeGrantRevokeAPI struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/retryprovisioning_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/retryprovisioning_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/retryprovisioning_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/retryprovisioning_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,10 +4,8 @@ package model_test import ( - "fmt" "strings" - "github.com/juju/cmd" "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -63,7 +61,7 @@ m.data["transient"] = true } else { results[i].Error = common.ServerError( - fmt.Errorf("%s is not in an error state", + errors.Errorf("%s is not in an error state", names.ReadableString(machine))) } } else { @@ -153,9 +151,6 @@ c.Check(err, gc.ErrorMatches, t.err) continue } - c.Assert(err, gc.ErrorMatches, cmd.ErrSilent.Error()) - // msg is logged - stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Check(stripped, gc.Matches, ".*TestBlockRetryProvisioning.*") + testing.AssertOperationWasBlocked(c, err, ".*TestBlockRetryProvisioning.*") } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/setdefaults.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/setdefaults.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/setdefaults.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/setdefaults.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,107 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model - -import ( - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/utils/keyvalues" - - "github.com/juju/juju/api/modelconfig" - "github.com/juju/juju/cmd/juju/block" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/juju/environs/config" -) - -// NewSetModelDefaultsCommand returns a command used to set default -// model attributes used when creating a new model. -func NewSetModelDefaultsCommand() cmd.Command { - c := &setDefaultsCommand{} - c.newAPIFunc = func() (setModelDefaultsAPI, error) { - api, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Annotate(err, "opening API connection") - } - return modelconfig.NewClient(api), nil - } - return modelcmd.Wrap(c) -} - -type setDefaultsCommand struct { - modelcmd.ModelCommandBase - newAPIFunc func() (setModelDefaultsAPI, error) - values attributes -} - -const setModelDefaultsHelpDoc = ` -A shared model configuration attribute is set so that all newly created -models use this value unless overridden. -Consult the online documentation for a list of keys and possible values. - -Examples: - - juju set-model-default logging-config='=WARNING;unit=INFO' - juju set-model-default -m mymodel ftp-proxy=http://proxy default-series=xenial - -See also: - models - model-defaults - unset-model-default -` - -func (c *setDefaultsCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "set-model-default", - Args: "= ...", - Purpose: "Sets default configuration keys on a model.", - Doc: setModelDefaultsHelpDoc, - } -} - -func (c *setDefaultsCommand) Init(args []string) (err error) { - if len(args) == 0 { - return errors.New("no key, value pairs specified") - } - - options, err := keyvalues.Parse(args, true) - if err != nil { - return err - } - - c.values = make(attributes) - for key, value := range options { - if key == "agent-version" { - return errors.New("agent-version must be set via upgrade-juju") - } - c.values[key] = value - } - for key := range c.values { - // check if the key exists in the known config - // and warn the user if the key is not defined - if _, exists := config.ConfigDefaults()[key]; !exists { - logger.Warningf("key %q is not defined in the known model configuration: possible misspelling", key) - } - } - - return nil -} - -type setModelDefaultsAPI interface { - // Close closes the api connection. - Close() error - - // SetModelDefaults sets the default config values to use - // when creating new models. - SetModelDefaults(config map[string]interface{}) error -} - -func (c *setDefaultsCommand) Run(ctx *cmd.Context) error { - client, err := c.newAPIFunc() - if err != nil { - return err - } - defer client.Close() - - return block.ProcessBlockedError(client.SetModelDefaults(c.values), block.BlockChange) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/setdefaults_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/setdefaults_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/setdefaults_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/setdefaults_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,77 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model_test - -import ( - "github.com/juju/cmd" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/cmd/juju/model" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/testing" -) - -type SetDefaultsSuite struct { - fakeEnvSuite -} - -var _ = gc.Suite(&SetDefaultsSuite{}) - -func (s *SetDefaultsSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { - command := model.NewSetDefaultsCommandForTest(s.fake) - return testing.RunCommand(c, command, args...) -} - -func (s *SetDefaultsSuite) TestInitKeyArgs(c *gc.C) { - for i, test := range []struct { - args []string - errorMatch string - }{ - { - errorMatch: "no key, value pairs specified", - }, { - args: []string{"special"}, - errorMatch: `expected "key=value", got "special"`, - }, { - args: []string{"special=extra", "special=other"}, - errorMatch: `key "special" specified more than once`, - }, { - args: []string{"agent-version=2.0.0"}, - errorMatch: "agent-version must be set via upgrade-juju", - }, - } { - c.Logf("test %d", i) - setCmd := model.NewSetCommandForTest(s.fake) - err := testing.InitCommand(setCmd, test.args) - c.Check(err, gc.ErrorMatches, test.errorMatch) - } -} - -func (s *SetDefaultsSuite) TestInitUnknownValue(c *gc.C) { - unsetCmd := model.NewUnsetDefaultsCommandForTest(s.fake) - err := testing.InitCommand(unsetCmd, []string{"attr", "weird"}) - c.Assert(err, jc.ErrorIsNil) - expected := `key "weird" is not defined in the known model configuration: possible misspelling` - c.Check(c.GetTestLog(), jc.Contains, expected) -} - -func (s *SetDefaultsSuite) TestSet(c *gc.C) { - _, err := s.run(c, "special=extra", "attr=baz") - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.fake.defaults, jc.DeepEquals, config.ConfigValues{ - "attr": {Value: "baz", Source: "controller"}, - "attr2": {Value: "bar", Source: "controller"}, - "special": {Value: "extra", Source: "controller"}, - }) -} - -func (s *SetDefaultsSuite) TestBlockedError(c *gc.C) { - s.fake.err = common.OperationBlockedError("TestBlockedError") - _, err := s.run(c, "special=extra") - c.Assert(err, gc.Equals, cmd.ErrSilent) - // msg is logged - c.Check(c.GetTestLog(), jc.Contains, "TestBlockedError") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/set.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/set.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/set.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/set.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,113 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model - -import ( - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/utils/keyvalues" - - "github.com/juju/juju/api/modelconfig" - "github.com/juju/juju/cmd/juju/block" - "github.com/juju/juju/cmd/modelcmd" -) - -func NewSetCommand() cmd.Command { - return modelcmd.Wrap(&setCommand{}) -} - -type attributes map[string]interface{} - -type setCommand struct { - modelcmd.ModelCommandBase - api SetModelAPI - values attributes -} - -const setModelHelpDoc = ` -Model configuration consists of a collection of keys and their respective values. -By default, the model is the current model. -Consult the online documentation for a list of keys and possible values. - -Examples: - - juju set-model-config logging-config='=WARNING;unit=INFO' - juju set-model-config -m mymodel ftp-proxy=http://proxy default-series=xenial - -See also: - models - get-model-config - unset-model-config -` - -func (c *setCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "set-model-config", - Args: "= ...", - Purpose: "Sets configuration keys on a model.", - Doc: setModelHelpDoc, - } -} - -func (c *setCommand) Init(args []string) (err error) { - if len(args) == 0 { - return errors.New("no key, value pairs specified") - } - - options, err := keyvalues.Parse(args, true) - if err != nil { - return err - } - - c.values = make(attributes) - for key, value := range options { - if key == "agent-version" { - return errors.New("agent-version must be set via upgrade-juju") - } - c.values[key] = value - } - - return nil -} - -type SetModelAPI interface { - Close() error - ModelGet() (map[string]interface{}, error) - ModelSet(config map[string]interface{}) error -} - -func (c *setCommand) getAPI() (SetModelAPI, error) { - if c.api != nil { - return c.api, nil - } - api, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Annotate(err, "opening API connection") - } - return modelconfig.NewClient(api), nil -} - -func (c *setCommand) Run(ctx *cmd.Context) error { - client, err := c.getAPI() - if err != nil { - return err - } - defer client.Close() - - // extra call to the API to retrieve env config - envAttrs, err := client.ModelGet() - if err != nil { - return err - } - for key := range c.values { - // check if the key exists in the existing env config - // and warn the user if the key is not defined in - // the existing config - if _, exists := envAttrs[key]; !exists { - logger.Warningf("key %q is not defined in the current model configuration: possible misspelling", key) - } - - } - return block.ProcessBlockedError(client.ModelSet(c.values), block.BlockChange) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/set_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/set_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/set_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/set_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,76 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model_test - -import ( - "github.com/juju/cmd" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/cmd/juju/model" - "github.com/juju/juju/testing" -) - -type SetSuite struct { - fakeEnvSuite -} - -var _ = gc.Suite(&SetSuite{}) - -func (s *SetSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { - command := model.NewSetCommandForTest(s.fake) - return testing.RunCommand(c, command, args...) -} - -func (s *SetSuite) TestInit(c *gc.C) { - for i, test := range []struct { - args []string - errorMatch string - }{ - { - errorMatch: "no key, value pairs specified", - }, { - args: []string{"special"}, - errorMatch: `expected "key=value", got "special"`, - }, { - args: []string{"special=extra", "special=other"}, - errorMatch: `key "special" specified more than once`, - }, { - args: []string{"agent-version=2.0.0"}, - errorMatch: "agent-version must be set via upgrade-juju", - }, - } { - c.Logf("test %d", i) - setCmd := model.NewSetCommandForTest(s.fake) - err := testing.InitCommand(setCmd, test.args) - c.Check(err, gc.ErrorMatches, test.errorMatch) - } -} - -func (s *SetSuite) TestPassesValues(c *gc.C) { - _, err := s.run(c, "special=extra", "unknown=foo") - c.Assert(err, jc.ErrorIsNil) - expected := map[string]interface{}{ - "special": "extra", - "unknown": "foo", - } - c.Assert(s.fake.values, jc.DeepEquals, expected) -} - -func (s *SetSuite) TestSettingKnownValue(c *gc.C) { - _, err := s.run(c, "special=extra", "unknown=foo") - c.Assert(err, jc.ErrorIsNil) - // Command succeeds, but warning logged. - expected := `key "unknown" is not defined in the current model configuration: possible misspelling` - c.Check(c.GetTestLog(), jc.Contains, expected) -} - -func (s *SetSuite) TestBlockedError(c *gc.C) { - s.fake.err = common.OperationBlockedError("TestBlockedError") - _, err := s.run(c, "special=extra") - c.Assert(err, gc.Equals, cmd.ErrSilent) - // msg is logged - c.Check(c.GetTestLog(), jc.Contains, "TestBlockedError") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/show.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/show.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/show.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/show.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,24 +8,34 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api/modelmanager" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" + "github.com/juju/juju/jujuclient" ) const showModelCommandDoc = `Show information about the current or specified model` func NewShowCommand() cmd.Command { - return modelcmd.Wrap(&showModelCommand{}) + showCmd := &showModelCommand{} + showCmd.RefreshModels = showCmd.ModelCommandBase.RefreshModels + return modelcmd.Wrap(showCmd, modelcmd.WrapSkipModelFlags) } // showModelCommand shows all the users with access to the current model. type showModelCommand struct { modelcmd.ModelCommandBase + // RefreshModels hides the RefreshModels function defined + // in ModelCommandBase. This allows overriding for testing. + // NOTE: ideal solution would be to have the base implement a method + // like store.ModelByName which auto-refreshes. + RefreshModels func(jujuclient.ClientStore, string) error + out cmd.Output api ShowModelAPI } @@ -41,7 +51,7 @@ if c.api != nil { return c.api, nil } - api, err := c.NewAPIRoot() + api, err := c.NewControllerAPIRoot() if err != nil { return nil, errors.Trace(err) } @@ -52,6 +62,7 @@ func (c *showModelCommand) Info() *cmd.Info { return &cmd.Info{ Name: "show-model", + Args: "", Purpose: "Shows information about the current or specified model.", Doc: showModelCommandDoc, } @@ -59,10 +70,27 @@ // SetFlags implements Command.SetFlags. func (c *showModelCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "yaml", map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, - }) + c.ModelCommandBase.SetFlags(f) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) +} + +// Init implements Command.Init. +func (c *showModelCommand) Init(args []string) error { + if len(args) > 0 { + c.SetModelName(args[0]) + args = args[1:] + } + if err := c.ModelCommandBase.Init(args); err != nil { + return err + } + if c.ModelName() == "" { + defaultModel, err := modelcmd.GetCurrentModel(c.ClientStore()) + if err != nil { + return err + } + c.SetModelName(defaultModel) + } + return nil } // Run implements Command.Run. @@ -78,6 +106,16 @@ c.ControllerName(), c.ModelName(), ) + if errors.IsNotFound(err) { + if err := c.RefreshModels(store, c.ControllerName()); err != nil { + return errors.Annotate(err, "refreshing models cache") + } + // Now try again. + modelDetails, err = store.ModelByName( + c.ControllerName(), + c.ModelName(), + ) + } if err != nil { return errors.Annotate(err, "getting model details") } @@ -106,6 +144,7 @@ if err != nil { return nil, errors.Trace(err) } + out.ControllerName = c.ControllerName() output[out.Name] = out } return output, nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/show_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/show_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/show_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/show_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "time" + "github.com/juju/cmd" "github.com/juju/errors" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" @@ -59,11 +60,11 @@ statusSince := time.Date(2016, 4, 5, 0, 0, 0, 0, time.UTC) users := []params.ModelUserInfo{{ - UserName: "admin@local", + UserName: "admin", LastConnection: &lastConnection, Access: "write", }, { - UserName: "bob@local", + UserName: "bob", DisplayName: "Bob", Access: "read", }} @@ -74,13 +75,13 @@ Name: "mymodel", UUID: testing.ModelTag.Id(), ControllerUUID: "1ca2293b-fdb9-4299-97d6-55583bb39364", - OwnerTag: "user-admin@local", - Cloud: "some-cloud", + OwnerTag: "user-admin", + CloudTag: "cloud-some-cloud", CloudRegion: "some-region", ProviderType: "openstack", Life: params.Alive, Status: params.EntityStatus{ - Status: status.StatusActive, + Status: status.Active, Since: &statusSince, }, Users: users, @@ -91,7 +92,8 @@ "name": "mymodel", "model-uuid": "deadbeef-0bad-400d-8000-4b1d0d06f00d", "controller-uuid": "1ca2293b-fdb9-4299-97d6-55583bb39364", - "owner": "admin@local", + "controller-name": "testing", + "owner": "admin", "cloud": "some-cloud", "region": "some-region", "type": "openstack", @@ -101,11 +103,11 @@ "since": "2016-04-05", }, "users": attrs{ - "admin@local": attrs{ + "admin": attrs{ "access": "write", "last-connection": "2015-03-20", }, - "bob@local": attrs{ + "bob": attrs{ "display-name": "Bob", "access": "read", "last-connection": "never connected", @@ -118,17 +120,21 @@ s.store.CurrentControllerName = "testing" s.store.Controllers["testing"] = jujuclient.ControllerDetails{} s.store.Accounts["testing"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } - err := s.store.UpdateModel("testing", "admin@local/mymodel", jujuclient.ModelDetails{ + err := s.store.UpdateModel("testing", "admin/mymodel", jujuclient.ModelDetails{ testing.ModelTag.Id(), }) c.Assert(err, jc.ErrorIsNil) - s.store.Models["testing"].CurrentModel = "admin@local/mymodel" + s.store.Models["testing"].CurrentModel = "admin/mymodel" +} + +func (s *ShowCommandSuite) newShowCommand() cmd.Command { + return model.NewShowCommandForTest(&s.fake, noOpRefresh, s.store) } func (s *ShowCommandSuite) TestShow(c *gc.C) { - _, err := testing.RunCommand(c, model.NewShowCommandForTest(&s.fake, s.store)) + _, err := testing.RunCommand(c, s.newShowCommand()) c.Assert(err, jc.ErrorIsNil) s.fake.CheckCalls(c, []gitjujutesting.StubCall{ {"ModelInfo", []interface{}{[]names.ModelTag{testing.ModelTag}}}, @@ -136,19 +142,34 @@ }) } +func (s *ShowCommandSuite) TestShowUnknownCallsRefresh(c *gc.C) { + called := false + refresh := func(jujuclient.ClientStore, string) error { + called = true + return nil + } + _, err := testing.RunCommand(c, model.NewShowCommandForTest(&s.fake, refresh, s.store), "unknown") + c.Check(called, jc.IsTrue) + c.Check(err, jc.Satisfies, errors.IsNotFound) +} + func (s *ShowCommandSuite) TestShowFormatYaml(c *gc.C) { - ctx, err := testing.RunCommand(c, model.NewShowCommandForTest(&s.fake, s.store), "--format", "yaml") + ctx, err := testing.RunCommand(c, s.newShowCommand(), "--format", "yaml") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(ctx), jc.YAMLEquals, s.expectedOutput) } func (s *ShowCommandSuite) TestShowFormatJson(c *gc.C) { - ctx, err := testing.RunCommand(c, model.NewShowCommandForTest(&s.fake, s.store), "--format", "json") + ctx, err := testing.RunCommand(c, s.newShowCommand(), "--format", "json") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(ctx), jc.JSONEquals, s.expectedOutput) } func (s *ShowCommandSuite) TestUnrecognizedArg(c *gc.C) { - _, err := testing.RunCommand(c, model.NewShowCommandForTest(&s.fake, s.store), "-m", "admin", "whoops") + _, err := testing.RunCommand(c, s.newShowCommand(), "admin", "whoops") c.Assert(err, gc.ErrorMatches, `unrecognized args: \["whoops"\]`) } + +func noOpRefresh(jujuclient.ClientStore, string) error { + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/unsetdefaults.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/unsetdefaults.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/unsetdefaults.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/unsetdefaults.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,96 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model - -import ( - "github.com/juju/cmd" - "github.com/juju/errors" - - "github.com/juju/juju/api/modelconfig" - "github.com/juju/juju/cmd/juju/block" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/juju/environs/config" -) - -// NewUnsetModelDefaultsCommand returns a command used to reset default -// model attributes used when creating a new model. -func NewUnsetModelDefaultsCommand() cmd.Command { - c := &unsetDefaultsCommand{} - c.newAPIFunc = func() (unsetModelDefaultsAPI, error) { - api, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Annotate(err, "opening API connection") - } - return modelconfig.NewClient(api), nil - } - return modelcmd.Wrap(c) - -} - -type unsetDefaultsCommand struct { - modelcmd.ModelCommandBase - newAPIFunc func() (unsetModelDefaultsAPI, error) - keys []string -} - -// unsetModelDefaultsHelpDoc is multi-line since we need to use ` to denote -// commands for ease in markdown. -const unsetModelDefaultsHelpDoc = ` -A shared model configuration attribute is unset so that all newly created -models will use any Juju defined default. -Consult the online documentation for a list of keys and possible values. - -Examples: - - juju unset-model-default ftp-proxy test-mode - -See also: - set-model-config - get-model-config -` - -func (c *unsetDefaultsCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "unset-model-default", - Args: " ...", - Purpose: "Unsets default model configuration.", - Doc: unsetModelDefaultsHelpDoc, - } -} - -func (c *unsetDefaultsCommand) Init(args []string) error { - if len(args) == 0 { - return errors.New("no keys specified") - } - c.keys = args - - for _, key := range c.keys { - // check if the key exists in the known config - // and warn the user if the key is not defined - if _, exists := config.ConfigDefaults()[key]; !exists { - logger.Warningf("key %q is not defined in the known model configuration: possible misspelling", key) - } - } - - return nil -} - -type unsetModelDefaultsAPI interface { - // Close closes the api connection. - Close() error - - // UnsetModelDefaults clears the default model - // configuration values. - UnsetModelDefaults(keys ...string) error -} - -func (c *unsetDefaultsCommand) Run(ctx *cmd.Context) error { - client, err := c.newAPIFunc() - if err != nil { - return err - } - defer client.Close() - - return block.ProcessBlockedError(client.UnsetModelDefaults(c.keys...), block.BlockChange) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/unsetdefaults_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/unsetdefaults_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/unsetdefaults_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/unsetdefaults_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,60 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model_test - -import ( - "github.com/juju/cmd" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/cmd/juju/model" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/testing" -) - -type UnsetDefaultsSuite struct { - fakeEnvSuite -} - -var _ = gc.Suite(&UnsetDefaultsSuite{}) - -func (s *UnsetDefaultsSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { - command := model.NewUnsetDefaultsCommandForTest(s.fake) - return testing.RunCommand(c, command, args...) -} - -func (s *UnsetDefaultsSuite) TestInitArgCount(c *gc.C) { - unsetCmd := model.NewUnsetDefaultsCommandForTest(s.fake) - // Only empty is a problem. - err := testing.InitCommand(unsetCmd, []string{}) - c.Assert(err, gc.ErrorMatches, "no keys specified") - // Everything else is fine. - err = testing.InitCommand(unsetCmd, []string{"something", "weird"}) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *UnsetDefaultsSuite) TestInitUnknownValue(c *gc.C) { - unsetCmd := model.NewUnsetDefaultsCommandForTest(s.fake) - err := testing.InitCommand(unsetCmd, []string{"attr", "weird"}) - c.Assert(err, jc.ErrorIsNil) - expected := `key "weird" is not defined in the known model configuration: possible misspelling` - c.Check(c.GetTestLog(), jc.Contains, expected) -} - -func (s *UnsetDefaultsSuite) TestUnset(c *gc.C) { - _, err := s.run(c, "attr", "unknown") - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.fake.defaults, jc.DeepEquals, config.ConfigValues{ - "attr2": {Value: "bar", Source: "controller"}, - }) -} - -func (s *UnsetDefaultsSuite) TestBlockedError(c *gc.C) { - s.fake.err = common.OperationBlockedError("TestBlockedError") - _, err := s.run(c, "attr") - c.Assert(err, gc.Equals, cmd.ErrSilent) - // msg is logged - c.Check(c.GetTestLog(), jc.Contains, "TestBlockedError") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/unset.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/unset.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/unset.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/unset.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model - -import ( - "github.com/juju/cmd" - "github.com/juju/errors" - - "github.com/juju/juju/api/modelconfig" - "github.com/juju/juju/cmd/juju/block" - "github.com/juju/juju/cmd/modelcmd" -) - -func NewUnsetCommand() cmd.Command { - return modelcmd.Wrap(&unsetCommand{}) -} - -type unsetCommand struct { - modelcmd.ModelCommandBase - api UnsetModelAPI - keys []string -} - -// unsetModelHelpDoc is multi-line since we need to use ` to denote -// commands for ease in markdown. -const unsetModelHelpDoc = "" + - "A model key is reset to its default value. If it does not have such a\n" + - "value defined then it is removed.\n" + - "Attempting to remove a required key with no default value will result\n" + - "in an error.\n" + - "By default, the model is the current model.\n" + - "Model configuration key values can be viewed with `juju get-model-config`.\n" + unsetModelHelpDocExamples - -const unsetModelHelpDocExamples = ` -Examples: - - juju unset-model-config ftp-proxy test-mode - -See also: - set-model-config - get-model-config -` - -func (c *unsetCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "unset-model-config", - Args: " ...", - Purpose: "Unsets model configuration.", - Doc: unsetModelHelpDoc, - } -} - -func (c *unsetCommand) Init(args []string) error { - if len(args) == 0 { - return errors.New("no keys specified") - } - c.keys = args - return nil -} - -type UnsetModelAPI interface { - Close() error - ModelGet() (map[string]interface{}, error) - ModelUnset(keys ...string) error -} - -func (c *unsetCommand) getAPI() (UnsetModelAPI, error) { - if c.api != nil { - return c.api, nil - } - api, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Annotate(err, "opening API connection") - } - return modelconfig.NewClient(api), nil -} - -func (c *unsetCommand) Run(ctx *cmd.Context) error { - client, err := c.getAPI() - if err != nil { - return err - } - defer client.Close() - - // extra call to the API to retrieve env config - envAttrs, err := client.ModelGet() - if err != nil { - return err - } - for _, key := range c.keys { - // check if the key exists in the existing env config - // and warn the user if the key is not defined in - // the existing config - if _, exists := envAttrs[key]; !exists { - logger.Warningf("key %q is not defined in the current model configuration: possible misspelling", key) - } - - } - return block.ProcessBlockedError(client.ModelUnset(c.keys...), block.BlockChange) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/unset_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/unset_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/unset_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/unset_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package model_test - -import ( - "github.com/juju/cmd" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/cmd/juju/model" - "github.com/juju/juju/testing" -) - -type UnsetSuite struct { - fakeEnvSuite -} - -var _ = gc.Suite(&UnsetSuite{}) - -func (s *UnsetSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { - command := model.NewUnsetCommandForTest(s.fake) - return testing.RunCommand(c, command, args...) -} - -func (s *UnsetSuite) TestInit(c *gc.C) { - unsetCmd := model.NewUnsetCommandForTest(s.fake) - // Only empty is a problem. - err := testing.InitCommand(unsetCmd, []string{}) - c.Assert(err, gc.ErrorMatches, "no keys specified") - // Everything else is fine. - err = testing.InitCommand(unsetCmd, []string{"something", "weird"}) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *UnsetSuite) TestPassesValues(c *gc.C) { - _, err := s.run(c, "special", "running") - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.fake.keys, jc.DeepEquals, []string{"special", "running"}) -} - -func (s *UnsetSuite) TestUnsettingKnownValue(c *gc.C) { - _, err := s.run(c, "unknown") - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.fake.keys, jc.DeepEquals, []string{"unknown"}) - // Command succeeds, but warning logged. - expected := `key "unknown" is not defined in the current model configuration: possible misspelling` - c.Check(c.GetTestLog(), jc.Contains, expected) -} - -func (s *UnsetSuite) TestBlockedError(c *gc.C) { - s.fake.err = common.OperationBlockedError("TestBlockedError") - _, err := s.run(c, "special") - c.Assert(err, gc.Equals, cmd.ErrSilent) - // msg is logged - c.Check(c.GetTestLog(), jc.Contains, "TestBlockedError") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/users.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/users.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/users.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/users.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,133 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for infos. - -package model - -import ( - "bytes" - "fmt" - "text/tabwriter" - "time" - - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/utils/set" - "launchpad.net/gnuflag" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/juju/common" - "github.com/juju/juju/cmd/modelcmd" -) - -var usageListSharesSummary = ` -Shows all users with access to a model for the current controller.`[1:] - -var usageListSharesDetails = ` -By default, the model is the current model. - -Examples: - juju shares - juju shares -m mymodel - -See also: - grant`[1:] - -func NewUsersCommand() cmd.Command { - return modelcmd.Wrap(&usersCommand{}) -} - -// usersCommand shows all the users with access to the current model. -type usersCommand struct { - modelcmd.ModelCommandBase - out cmd.Output - api UsersAPI -} - -// UsersAPI defines the methods on the client API that the -// users command calls. -type UsersAPI interface { - Close() error - ModelUserInfo() ([]params.ModelUserInfo, error) -} - -func (c *usersCommand) getAPI() (UsersAPI, error) { - if c.api != nil { - return c.api, nil - } - return c.NewAPIClient() -} - -// Info implements Command.Info. -func (c *usersCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "shares", - Purpose: usageListSharesSummary, - Doc: usageListSharesDetails, - Aliases: []string{"list-shares"}, - } -} - -// SetFlags implements Command.SetFlags. -func (c *usersCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, - "tabular": c.formatTabular, - }) -} - -// Run implements Command.Run. -func (c *usersCommand) Run(ctx *cmd.Context) (err error) { - client, err := c.getAPI() - if err != nil { - return err - } - defer client.Close() - - result, err := client.ModelUserInfo() - if err != nil { - return err - } - // TODO(perrito666) 2016-05-02 lp:1558657 - return c.out.Write(ctx, common.ModelUserInfoFromParams(result, time.Now())) -} - -// formatTabular takes an interface{} to adhere to the cmd.Formatter interface -func (c *usersCommand) formatTabular(value interface{}) ([]byte, error) { - users, ok := value.(map[string]common.ModelUserInfo) - if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", users, value) - } - var out bytes.Buffer - if err := formatTabularUserInfo(users, &out); err != nil { - return nil, errors.Trace(err) - } - return out.Bytes(), nil -} - -func formatTabularUserInfo(users map[string]common.ModelUserInfo, out *bytes.Buffer) error { - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - names := set.NewStrings() - for name := range users { - names.Add(name) - } - tw := tabwriter.NewWriter(out, minwidth, tabwidth, padding, padchar, flags) - fmt.Fprintf(tw, "NAME\tACCESS\tLAST CONNECTION\n") - for _, name := range names.SortedValues() { - user := users[name] - displayName := name - if user.DisplayName != "" { - displayName = fmt.Sprintf("%s (%s)", name, user.DisplayName) - } - fmt.Fprintf(tw, "%s\t%s\t%s\n", displayName, user.Access, user.LastConnection) - } - tw.Flush() - return nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/users_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/users_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/model/users_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/model/users_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,112 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for info. - -package model_test - -import ( - "time" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/juju/model" - "github.com/juju/juju/jujuclient" - "github.com/juju/juju/jujuclient/jujuclienttesting" - "github.com/juju/juju/testing" -) - -type UsersCommandSuite struct { - testing.FakeJujuXDGDataHomeSuite - fake *fakeModelUsersClient - store *jujuclienttesting.MemStore -} - -var _ = gc.Suite(&UsersCommandSuite{}) - -type fakeModelUsersClient struct { - users []params.ModelUserInfo -} - -func (f *fakeModelUsersClient) Close() error { - return nil -} - -func (f *fakeModelUsersClient) ModelUserInfo() ([]params.ModelUserInfo, error) { - return f.users, nil -} - -func (s *UsersCommandSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - last1 := time.Date(2015, 3, 20, 0, 0, 0, 0, time.UTC) - last2 := time.Date(2015, 3, 1, 0, 0, 0, 0, time.UTC) - - userlist := []params.ModelUserInfo{ - { - UserName: "admin@local", - LastConnection: &last1, - Access: "write", - }, { - UserName: "bob@local", - DisplayName: "Bob", - LastConnection: &last2, - Access: "read", - }, { - UserName: "charlie@ubuntu.com", - DisplayName: "Charlie", - Access: "read", - }, - } - - s.fake = &fakeModelUsersClient{users: userlist} - - s.store = jujuclienttesting.NewMemStore() - s.store.CurrentControllerName = "testing" - s.store.Controllers["testing"] = jujuclient.ControllerDetails{} - s.store.Accounts["testing"] = jujuclient.AccountDetails{ - User: "admin@local", - } -} - -func (s *UsersCommandSuite) TestModelUsers(c *gc.C) { - context, err := testing.RunCommand(c, model.NewUsersCommandForTest(s.fake, s.store), "-m", "admin") - c.Assert(err, jc.ErrorIsNil) - c.Assert(testing.Stdout(context), gc.Equals, ""+ - "NAME ACCESS LAST CONNECTION\n"+ - "admin@local write 2015-03-20\n"+ - "bob@local (Bob) read 2015-03-01\n"+ - "charlie@ubuntu.com (Charlie) read never connected\n"+ - "\n") -} - -func (s *UsersCommandSuite) TestModelUsersFormatJson(c *gc.C) { - context, err := testing.RunCommand(c, model.NewUsersCommandForTest(s.fake, s.store), "-m", "admin", "--format", "json") - c.Assert(err, jc.ErrorIsNil) - c.Assert(testing.Stdout(context), gc.Equals, "{"+ - `"admin@local":{"access":"write","last-connection":"2015-03-20"},`+ - `"bob@local":{"display-name":"Bob","access":"read","last-connection":"2015-03-01"},`+ - `"charlie@ubuntu.com":{"display-name":"Charlie","access":"read","last-connection":"never connected"}`+ - "}\n") -} - -func (s *UsersCommandSuite) TestUserInfoFormatYaml(c *gc.C) { - context, err := testing.RunCommand(c, model.NewUsersCommandForTest(s.fake, s.store), "-m", "admin", "--format", "yaml") - c.Assert(err, jc.ErrorIsNil) - c.Assert(testing.Stdout(context), gc.Equals, ""+ - "admin@local:\n"+ - " access: write\n"+ - " last-connection: 2015-03-20\n"+ - "bob@local:\n"+ - " display-name: Bob\n"+ - " access: read\n"+ - " last-connection: 2015-03-01\n"+ - "charlie@ubuntu.com:\n"+ - " display-name: Charlie\n"+ - " access: read\n"+ - " last-connection: never connected\n") -} - -func (s *UsersCommandSuite) TestUnrecognizedArg(c *gc.C) { - _, err := testing.RunCommand(c, model.NewUsersCommandForTest(s.fake, s.store), "-m", "admin", "whoops") - c.Assert(err, gc.ErrorMatches, `unrecognized args: \["whoops"\]`) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/agree/agree.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/agree/agree.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/agree/agree.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/agree/agree.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,256 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agree + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" + "github.com/juju/terms-client/api" + "github.com/juju/terms-client/api/wireformat" + "gopkg.in/juju/charm.v6-unstable" +) + +var ( + clientNew = api.NewClient +) + +const agreeDoc = ` +Agree to the terms required by a charm. + +When deploying a charm that requires agreement to terms, use 'juju agree' to +view the terms and agree to them. Then the charm may be deployed. + +Once you have agreed to terms, you will not be prompted to view them again. + +Examples: + # Displays terms for somePlan revision 1 and prompts for agreement. + juju agree somePlan/1 + + # Displays the terms for revision 1 of somePlan, revision 2 of otherPlan, + # and prompts for agreement. + juju agree somePlan/1 otherPlan/2 + + # Agrees to the terms without prompting. + juju agree somePlan/1 otherPlan/2 --yes +` + +// NewAgreeCommand returns a new command that can be +// used to create user agreements. +func NewAgreeCommand() cmd.Command { + return &agreeCommand{} +} + +type term struct { + owner string + name string + revision int +} + +// agreeCommand creates a user agreement to the specified terms. +type agreeCommand struct { + modelcmd.JujuCommandBase + out cmd.Output + + terms []term + termIds []string + SkipTermContent bool +} + +// SetFlags implements Command.SetFlags. +func (c *agreeCommand) SetFlags(f *gnuflag.FlagSet) { + c.JujuCommandBase.SetFlags(f) + f.BoolVar(&c.SkipTermContent, "yes", false, "Agree to terms non interactively") + c.out.AddFlags(f, "json", output.DefaultFormatters) +} + +// Info implements Command.Info. +func (c *agreeCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "agree", + Args: "", + Purpose: "Agree to terms.", + Doc: agreeDoc, + } +} + +// Init read and verifies the arguments. +func (c *agreeCommand) Init(args []string) error { + if len(args) < 1 { + return errors.New("missing arguments") + } + + for _, t := range args { + termId, err := charm.ParseTerm(t) + if err != nil { + return errors.Annotate(err, "invalid term format") + } + if termId.Revision == 0 { + return errors.Errorf("must specify a valid term revision %q", t) + } + c.terms = append(c.terms, term{owner: termId.Owner, name: termId.Name, revision: termId.Revision}) + c.termIds = append(c.termIds, t) + } + if len(c.terms) == 0 { + return errors.New("must specify a valid term revision") + } + return nil +} + +// Run implements Command.Run. +func (c *agreeCommand) Run(ctx *cmd.Context) error { + client, err := c.BakeryClient() + if err != nil { + return errors.Trace(err) + } + + termsClient, err := clientNew(api.HTTPClient(client)) + if err != nil { + return err + } + + if c.SkipTermContent { + err := saveAgreements(ctx, termsClient, c.terms) + if err != nil { + return errors.Trace(err) + } + return nil + } + + needAgreement := []wireformat.GetTermsResponse{} + terms, err := termsClient.GetUnsignedTerms(&wireformat.CheckAgreementsRequest{ + Terms: c.termIds, + }) + if err != nil { + return errors.Annotate(err, "failed to retrieve terms") + } + needAgreement = append(needAgreement, terms...) + + if len(needAgreement) == 0 { + fmt.Fprintf(ctx.Stdout, "Already agreed\n") + return nil + } + + err = printTerms(ctx, needAgreement) + if err != nil { + return errors.Trace(err) + } + fmt.Fprintf(ctx.Stdout, "Do you agree to the displayed terms? (Y/n): ") + answer, err := userAnswer() + if err != nil { + return errors.Trace(err) + } + + agreedTerms := make([]term, len(needAgreement)) + for i, t := range needAgreement { + agreedTerms[i] = term{owner: t.Owner, name: t.Name, revision: t.Revision} + } + + answer = strings.TrimSpace(answer) + if userAgrees(answer) { + err = saveAgreements(ctx, termsClient, agreedTerms) + if err != nil { + return errors.Trace(err) + } + } else { + fmt.Fprintf(ctx.Stdout, "You didn't agree to the presented terms.\n") + return nil + } + + return nil +} + +func saveAgreements(ctx *cmd.Context, termsClient api.Client, ts []term) error { + agreements := make([]wireformat.SaveAgreement, len(ts)) + for i, t := range ts { + agreements[i] = wireformat.SaveAgreement{ + TermOwner: t.owner, + TermName: t.name, + TermRevision: t.revision, + } + } + response, err := termsClient.SaveAgreement(&wireformat.SaveAgreements{Agreements: agreements}) + if err != nil { + return errors.Annotate(err, "failed to save user agreement") + } + for _, agreement := range response.Agreements { + termName := agreement.Term + if agreement.Owner != "" { + termName = fmt.Sprintf("%v/%v", agreement.Owner, agreement.Term) + } + _, err = fmt.Fprintf(ctx.Stdout, "Agreed to revision %v of %v for Juju users\n", agreement.Revision, termName) + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +var userAnswer = func() (string, error) { + return bufio.NewReader(os.Stdin).ReadString('\n') +} + +func printTerms(ctx *cmd.Context, terms []wireformat.GetTermsResponse) (returnErr error) { + output := "" + for _, t := range terms { + if t.Owner != "" { + output += fmt.Sprintf(` +=== %v/%v/%v: %v === +%v +======== +`, t.Owner, t.Name, t.Revision, t.CreatedOn, t.Content) + } else { + output += fmt.Sprintf(` +=== %v/%v: %v === +%v +======== +`, t.Name, t.Revision, t.CreatedOn, t.Content) + } + } + defer func() { + if returnErr != nil { + _, err := fmt.Fprint(ctx.Stdout, output) + returnErr = errors.Annotate(err, "failed to print plan") + } + }() + + buffer := bytes.NewReader([]byte(output)) + pager, err := pagerCmd() + if err != nil { + return err + } + pager.Stdout = ctx.Stdout + pager.Stdin = buffer + err = pager.Run() + return errors.Annotate(err, "failed to print plan") +} + +func pagerCmd() (*exec.Cmd, error) { + os.Unsetenv("LESS") + if pager := os.Getenv("PAGER"); pager != "" { + if pagerPath, err := exec.LookPath(pager); err == nil { + return exec.Command(pagerPath), nil + } + } + if lessPath, err := exec.LookPath("less"); err == nil { + return exec.Command(lessPath, "-P", "Press 'q' to quit after you've read the terms."), nil + } + return nil, errors.NotFoundf("pager") +} + +func userAgrees(input string) bool { + if input == "y" || input == "Y" || input == "" { + return true + } + return false +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/agree/agree_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/agree/agree_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/agree/agree_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/agree/agree_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,267 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agree_test + +import ( + "runtime" + "sync" + "testing" + + "github.com/juju/cmd/cmdtesting" + "github.com/juju/terms-client/api" + "github.com/juju/terms-client/api/wireformat" + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/romulus/agree" + coretesting "github.com/juju/juju/testing" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} + +var _ = gc.Suite(&agreeSuite{}) + +var testTerms = "Test Terms" + +type agreeSuite struct { + client *mockClient + coretesting.FakeJujuXDGDataHomeSuite +} + +func (s *agreeSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.client = &mockClient{} + + jujutesting.PatchValue(agree.ClientNew, func(...api.ClientOption) (api.Client, error) { + return s.client, nil + }) +} + +func (s *agreeSuite) TestAgreementNothingToSign(c *gc.C) { + jujutesting.PatchValue(agree.UserAnswer, func() (string, error) { + return "y", nil + }) + + s.client.user = "test-user" + s.client.setUnsignedTerms([]wireformat.GetTermsResponse{}) + + ctx, err := cmdtesting.RunCommand(c, agree.NewAgreeCommand(), "test-term/1") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, `Already agreed +`) +} + +func (s *agreeSuite) TestAgreement(c *gc.C) { + if runtime.GOOS == "windows" { + c.Skip("less not on windows, bug 1614330") + } + var answer string + jujutesting.PatchValue(agree.UserAnswer, func() (string, error) { + return answer, nil + }) + + s.client.user = "test-user" + s.client.setUnsignedTerms([]wireformat.GetTermsResponse{{ + Name: "test-term", + Revision: 1, + Content: testTerms, + }}) + tests := []struct { + about string + args []string + err string + stdout string + answer string + apiCalls []jujutesting.StubCall + clientTerms []wireformat.GetTermsResponse + }{{ + about: "everything works", + args: []string{"test-term/1", "--yes"}, + stdout: "Agreed to revision 1 of test-term for Juju users\n", + apiCalls: []jujutesting.StubCall{{FuncName: "SaveAgreement", Args: []interface{}{&wireformat.SaveAgreements{Agreements: []wireformat.SaveAgreement{{TermName: "test-term", TermRevision: 1}}}}}}, + }, { + about: "everything works with owner term", + args: []string{"owner/test-term/1", "--yes"}, + stdout: "Agreed to revision 1 of owner/test-term for Juju users\n", + apiCalls: []jujutesting.StubCall{{FuncName: "SaveAgreement", Args: []interface{}{&wireformat.SaveAgreements{Agreements: []wireformat.SaveAgreement{{TermOwner: "owner", TermName: "test-term", TermRevision: 1}}}}}}, + }, { + about: "cannot parse revision number", + args: []string{"test-term/abc"}, + err: `must specify a valid term revision "test-term/abc"`, + }, { + about: "missing arguments", + args: []string{}, + err: "missing arguments", + }, { + about: "everything works - user accepts", + args: []string{"test-term/1"}, + answer: "y", + stdout: ` +=== test-term/1: 0001-01-01 00:00:00 +0000 UTC === +Test Terms +======== +Do you agree to the displayed terms? (Y/n): Agreed to revision 1 of test-term for Juju users +`, + apiCalls: []jujutesting.StubCall{{ + FuncName: "GetUnunsignedTerms", Args: []interface{}{ + &wireformat.CheckAgreementsRequest{Terms: []string{"test-term/1"}}, + }, + }, { + FuncName: "SaveAgreement", Args: []interface{}{ + &wireformat.SaveAgreements{Agreements: []wireformat.SaveAgreement{{TermName: "test-term", TermRevision: 1}}}, + }, + }}, + }, { + about: "everything works - user refuses", + args: []string{"test-term/1"}, + answer: "n", + stdout: ` +=== test-term/1: 0001-01-01 00:00:00 +0000 UTC === +Test Terms +======== +Do you agree to the displayed terms? (Y/n): You didn't agree to the presented terms. +`, + apiCalls: []jujutesting.StubCall{{ + FuncName: "GetUnunsignedTerms", Args: []interface{}{ + &wireformat.CheckAgreementsRequest{Terms: []string{"test-term/1"}}, + }, + }}, + }, { + about: "must not accept 0 revision", + args: []string{"test-term/0", "--yes"}, + err: `must specify a valid term revision "test-term/0"`, + }, { + about: "user accepts, multiple terms", + args: []string{"test-term/1", "test-term/2"}, + answer: "y", + stdout: ` +=== test-term/1: 0001-01-01 00:00:00 +0000 UTC === +Test Terms +======== +Do you agree to the displayed terms? (Y/n): Agreed to revision 1 of test-term for Juju users +`, + apiCalls: []jujutesting.StubCall{ + { + FuncName: "GetUnunsignedTerms", Args: []interface{}{ + &wireformat.CheckAgreementsRequest{Terms: []string{"test-term/1", "test-term/2"}}, + }, + }, { + FuncName: "SaveAgreement", Args: []interface{}{ + &wireformat.SaveAgreements{Agreements: []wireformat.SaveAgreement{ + {TermName: "test-term", TermRevision: 1}, + }}, + }, + }}, + }, { + about: "valid then unknown arguments", + args: []string{"test-term/1", "unknown", "arguments"}, + err: `must specify a valid term revision "unknown"`, + }, { + about: "user accepts all the terms", + args: []string{"test-term/1", "test-term/2", "--yes"}, + stdout: `Agreed to revision 1 of test-term for Juju users +Agreed to revision 2 of test-term for Juju users +`, + apiCalls: []jujutesting.StubCall{ + {FuncName: "SaveAgreement", Args: []interface{}{&wireformat.SaveAgreements{ + Agreements: []wireformat.SaveAgreement{ + {TermName: "test-term", TermRevision: 1}, + {TermName: "test-term", TermRevision: 2}, + }}}}}, + }, { + about: "everything works with term owner - user accepts", + clientTerms: []wireformat.GetTermsResponse{{ + Name: "test-term", + Owner: "test-owner", + Revision: 1, + Content: testTerms, + }}, + args: []string{"test-owner/test-term/1"}, + answer: "y", + stdout: ` +=== test-owner/test-term/1: 0001-01-01 00:00:00 +0000 UTC === +Test Terms +======== +Do you agree to the displayed terms? (Y/n): Agreed to revision 1 of test-owner/test-term for Juju users +`, + apiCalls: []jujutesting.StubCall{{ + FuncName: "GetUnunsignedTerms", Args: []interface{}{ + &wireformat.CheckAgreementsRequest{Terms: []string{"test-owner/test-term/1"}}, + }, + }, { + FuncName: "SaveAgreement", Args: []interface{}{ + &wireformat.SaveAgreements{Agreements: []wireformat.SaveAgreement{{TermOwner: "test-owner", TermName: "test-term", TermRevision: 1}}}, + }, + }}, + }} + for i, test := range tests { + s.client.ResetCalls() + if len(test.clientTerms) > 0 { + s.client.setUnsignedTerms(test.clientTerms) + } + c.Logf("running test %d: %s", i, test.about) + if test.answer != "" { + answer = test.answer + } + ctx, err := cmdtesting.RunCommand(c, agree.NewAgreeCommand(), test.args...) + if test.err != "" { + c.Assert(err, gc.ErrorMatches, test.err) + } else { + c.Assert(err, jc.ErrorIsNil) + } + if ctx != nil { + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, test.stdout) + } + if len(test.apiCalls) > 0 { + s.client.CheckCalls(c, test.apiCalls) + } + } +} + +type mockClient struct { + api.Client + jujutesting.Stub + + lock sync.Mutex + user string + terms []wireformat.GetTermsResponse + unsignedTerms []wireformat.GetTermsResponse +} + +func (c *mockClient) setUnsignedTerms(t []wireformat.GetTermsResponse) { + c.lock.Lock() + defer c.lock.Unlock() + c.unsignedTerms = t +} + +// SaveAgreement saves user's agreement to the specified +// revision of the terms documents +func (c *mockClient) SaveAgreement(p *wireformat.SaveAgreements) (*wireformat.SaveAgreementResponses, error) { + c.AddCall("SaveAgreement", p) + responses := make([]wireformat.AgreementResponse, len(p.Agreements)) + for i, agreement := range p.Agreements { + responses[i] = wireformat.AgreementResponse{ + User: c.user, + Owner: agreement.TermOwner, + Term: agreement.TermName, + Revision: agreement.TermRevision, + } + } + return &wireformat.SaveAgreementResponses{responses}, nil +} + +func (c *mockClient) GetUnsignedTerms(p *wireformat.CheckAgreementsRequest) ([]wireformat.GetTermsResponse, error) { + c.MethodCall(c, "GetUnunsignedTerms", p) + r := make([]wireformat.GetTermsResponse, len(c.unsignedTerms)) + copy(r, c.unsignedTerms) + return r, nil +} + +func (c *mockClient) GetUsersAgreements() ([]wireformat.AgreementResponse, error) { + c.MethodCall(c, "GetUsersAgreements") + return []wireformat.AgreementResponse{}, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/agree/export.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/agree/export.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/agree/export.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/agree/export.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,12 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agree + +// These two var are exported becuase they are useful in tests outside of this +// package. Unless you are writing a test you shouldn't be using either of these +// values. +var ( + ClientNew = &clientNew + UserAnswer = &userAnswer +) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/allocate/allocate.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/allocate/allocate.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/allocate/allocate.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/allocate/allocate.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,140 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package allocate + +import ( + "fmt" + "regexp" + "strings" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/utils" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + api "github.com/juju/romulus/api/budget" +) + +var budgetWithLimitRe = regexp.MustCompile(`^[a-zA-Z0-9\-]+:[0-9]+$`) + +type allocateCommand struct { + modelcmd.ModelCommandBase + api apiClient + Budget string + ModelUUID string + Services []string + Limit string +} + +// NewAllocateCommand returns a new allocateCommand +func NewAllocateCommand() modelcmd.ModelCommand { + return &allocateCommand{} +} + +const doc = ` +Allocate budget for the specified applications, replacing any prior allocations +made for the specified applications. + +Examples: + # Assigns application "db" to an allocation on budget "somebudget" with + # the limit "42". + juju allocate somebudget:42 db + + # Application names assume the current selected model, unless otherwise + # specified with: + juju allocate -m [ ... + + # Models may also be referenced by UUID when necessary: + juju allocate --model-uuid ... +` + +// SetFlags implements cmd.Command.SetFlags. +func (c *allocateCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) + f.StringVar(&c.ModelUUID, "model-uuid", "", "Model UUID of allocation") +} + +// Info implements cmd.Command.Info. +func (c *allocateCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "allocate", + Args: ": [ ...]", + Purpose: "Allocate budget to applications.", + Doc: doc, + } +} + +// Init implements cmd.Command.Init. +func (c *allocateCommand) Init(args []string) error { + if len(args) < 2 { + return errors.New("budget and application name required") + } + budgetWithLimit := args[0] + var err error + c.Budget, c.Limit, err = parseBudgetWithLimit(budgetWithLimit) + if err != nil { + return errors.Annotate(err, `expected args in the form "budget:limit [application ...]"`) + } + if c.ModelUUID == "" { + c.ModelUUID, err = c.modelUUID() + if err != nil { + return err + } + } else { + if !utils.IsValidUUIDString(c.ModelUUID) { + return errors.NotValidf("model UUID %q", c.ModelUUID) + } + } + + c.Services = args[1:] + return nil +} + +// Run implements cmd.Command.Run and has most of the logic for the run command. +func (c *allocateCommand) Run(ctx *cmd.Context) error { + client, err := c.BakeryClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := c.newAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + resp, err := api.CreateAllocation(c.Budget, c.Limit, c.ModelUUID, c.Services) + if err != nil { + return errors.Annotate(err, "failed to create allocation") + } + fmt.Fprintln(ctx.Stdout, resp) + return nil +} + +func (c *allocateCommand) modelUUID() (string, error) { + model, err := c.ClientStore().ModelByName(c.ControllerName(), c.ModelName()) + if err != nil { + return "", errors.Trace(err) + } + return model.ModelUUID, nil +} + +func parseBudgetWithLimit(bl string) (string, string, error) { + if !budgetWithLimitRe.MatchString(bl) { + return "", "", errors.New("invalid budget specification, expecting :") + } + parts := strings.Split(bl, ":") + return parts[0], parts[1], nil +} + +func (c *allocateCommand) newAPIClient(bakery *httpbakery.Client) (apiClient, error) { + if c.api != nil { + return c.api, nil + } + c.api = api.NewClient(bakery) + return c.api, nil +} + +type apiClient interface { + CreateAllocation(string, string, string, []string) (string, error) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/allocate/allocate_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/allocate/allocate_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/allocate/allocate_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/allocate/allocate_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,148 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package allocate_test + +import ( + "github.com/juju/cmd" + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/romulus/allocate" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/jujuclient/jujuclienttesting" + coretesting "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&allocateSuite{}) + +type allocateSuite struct { + coretesting.FakeJujuXDGDataHomeSuite + stub *testing.Stub + mockAPI *mockapi + store jujuclient.ClientStore +} + +func (s *allocateSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.store = &jujuclienttesting.MemStore{ + CurrentControllerName: "controller", + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + Models: map[string]*jujuclient.ControllerModels{ + "controller": { + Models: map[string]jujuclient.ModelDetails{ + "admin/model": {"model-uuid"}, + }, + CurrentModel: "admin/model", + }, + }, + Accounts: map[string]jujuclient.AccountDetails{ + "controller": { + User: "admin", + }, + }, + } + s.stub = &testing.Stub{} + s.mockAPI = newMockAPI(s.stub) +} + +func (s *allocateSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { + alloc := allocate.NewAllocateCommandForTest(s.mockAPI, s.store) + a := []string{"-m", "controller:model"} + a = append(a, args...) + return cmdtesting.RunCommand(c, alloc, a...) +} + +func (s *allocateSuite) TestAllocate(c *gc.C) { + s.mockAPI.resp = "allocation updated" + ctx, err := s.run(c, "name:100", "db") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "allocation updated\n") + s.mockAPI.CheckCall(c, 0, "CreateAllocation", "name", "100", "model-uuid", []string{"db"}) +} + +func (s *allocateSuite) TestAllocateAPIError(c *gc.C) { + s.stub.SetErrors(errors.New("something failed")) + _, err := s.run(c, "name:100", "db") + c.Assert(err, gc.ErrorMatches, "failed to create allocation: something failed") + s.mockAPI.CheckCall(c, 0, "CreateAllocation", "name", "100", "model-uuid", []string{"db"}) +} + +func (s *allocateSuite) TestAllocateZero(c *gc.C) { + s.mockAPI.resp = "allocation updated" + _, err := s.run(c, "name:0", "db") + c.Assert(err, jc.ErrorIsNil) + s.mockAPI.CheckCall(c, 0, "CreateAllocation", "name", "0", "model-uuid", []string{"db"}) +} + +func (s *allocateSuite) TestAllocateModelUUID(c *gc.C) { + s.mockAPI.resp = "allocation updated" + _, err := s.run(c, "name:0", "--model-uuid", "30f7a9f2-220d-4268-b336-35e7daacae79", "db") + c.Assert(err, jc.ErrorIsNil) + s.mockAPI.CheckCall(c, 0, "CreateAllocation", "name", "0", "30f7a9f2-220d-4268-b336-35e7daacae79", []string{"db"}) +} + +func (s *allocateSuite) TestAllocateErrors(c *gc.C) { + tests := []struct { + about string + args []string + expectedError string + }{{ + about: "no args", + args: []string{}, + expectedError: "budget and application name required", + }, { + about: "budget without allocation limit", + args: []string{"name", "db"}, + expectedError: `expected args in the form "budget:limit \[application ...\]": invalid budget specification, expecting :`, + }, { + about: "application not specified", + args: []string{"name:100"}, + expectedError: "budget and application name required", + }, { + about: "negative allocation limit", + args: []string{"name:-100", "db"}, + expectedError: `expected args in the form "budget:limit \[application ...\]": invalid budget specification, expecting :`, + }, { + about: "non-numeric allocation limit", + args: []string{"name:abcd", "db"}, + expectedError: `expected args in the form "budget:limit \[application ...\]": invalid budget specification, expecting :`, + }, { + about: "empty allocation limit", + args: []string{"name:", "db"}, + expectedError: `expected args in the form "budget:limit \[application ...\]": invalid budget specification, expecting :`, + }, { + about: "invalid model UUID", + args: []string{"--model-uuid", "nope", "name:100", "db"}, + expectedError: `model UUID "nope" not valid`, + }, { + about: "arguments in wrong order", + args: []string{"name:", "db:50"}, + expectedError: `expected args in the form "budget:limit \[application ...\]": invalid budget specification, expecting :`, + }} + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + _, err := s.run(c, test.args...) + c.Check(err, gc.ErrorMatches, test.expectedError) + s.mockAPI.CheckNoCalls(c) + } +} + +func newMockAPI(s *testing.Stub) *mockapi { + return &mockapi{Stub: s} +} + +type mockapi struct { + *testing.Stub + resp string +} + +func (api *mockapi) CreateAllocation(name, limit, modelUUID string, services []string) (string, error) { + api.MethodCall(api, "CreateAllocation", name, limit, modelUUID, services) + return api.resp, api.NextErr() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/allocate/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/allocate/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/allocate/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/allocate/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,16 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package allocate + +import ( + "github.com/juju/cmd" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/jujuclient" +) + +func NewAllocateCommandForTest(api apiClient, store jujuclient.ClientStore) cmd.Command { + c := &allocateCommand{api: api} + c.SetClientStore(store) + return modelcmd.Wrap(c) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/allocate/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/allocate/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/allocate/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/allocate/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package allocate_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/commands/commands.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/commands/commands.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/commands/commands.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/commands/commands.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,51 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package commands provides functionality for registering all the romulus commands. +package commands + +import ( + "github.com/juju/cmd" + + "github.com/juju/juju/cmd/juju/romulus/agree" + "github.com/juju/juju/cmd/juju/romulus/allocate" + "github.com/juju/juju/cmd/juju/romulus/createbudget" + "github.com/juju/juju/cmd/juju/romulus/listagreements" + "github.com/juju/juju/cmd/juju/romulus/listbudgets" + "github.com/juju/juju/cmd/juju/romulus/listplans" + "github.com/juju/juju/cmd/juju/romulus/setbudget" + "github.com/juju/juju/cmd/juju/romulus/setplan" + "github.com/juju/juju/cmd/juju/romulus/showbudget" + "github.com/juju/juju/cmd/juju/romulus/updateallocation" + "github.com/juju/juju/cmd/modelcmd" +) + +type commandRegister interface { + Register(cmd.Command) +} + +// RegisterAll registers all romulus commands with the +// provided command registry. +func RegisterAll(r commandRegister) { + register := func(c cmd.Command) { + switch c := c.(type) { + case modelcmd.ModelCommand: + r.Register(modelcmd.Wrap(c)) + case modelcmd.CommandBase: + r.Register(modelcmd.WrapBase(c)) + default: + r.Register(c) + } + + } + register(agree.NewAgreeCommand()) + register(listagreements.NewListAgreementsCommand()) + register(allocate.NewAllocateCommand()) + register(listbudgets.NewListBudgetsCommand()) + register(createbudget.NewCreateBudgetCommand()) + register(listplans.NewListPlansCommand()) + register(setbudget.NewSetBudgetCommand()) + register(setplan.NewSetPlanCommand()) + register(showbudget.NewShowBudgetCommand()) + register(updateallocation.NewUpdateAllocationCommand()) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/commands/commands_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/commands/commands_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/commands/commands_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/commands/commands_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,46 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package commands_test + +import ( + stdtesting "testing" + + "github.com/juju/cmd" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/romulus/commands" +) + +type commandSuite struct{} + +var _ = gc.Suite(&commandSuite{}) + +type mockRegister struct { + commands []string +} + +func (m *mockRegister) Register(command cmd.Command) { + m.commands = append(m.commands, command.Info().Name) +} + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} + +func (s *commandSuite) TestRegister(c *gc.C) { + m := &mockRegister{} + commands.RegisterAll(m) + c.Assert(m.commands, gc.DeepEquals, []string{ + "agree", + "agreements", + "allocate", + "budgets", + "create-budget", + "plans", + "set-budget", + "set-plan", + "show-budget", + "update-allocation", + }) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/createbudget/createbudget.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/createbudget/createbudget.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/createbudget/createbudget.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/createbudget/createbudget.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,85 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package createbudget + +import ( + "fmt" + "strconv" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/juju/cmd/modelcmd" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + api "github.com/juju/romulus/api/budget" +) + +type createBudgetCommand struct { + modelcmd.JujuCommandBase + Name string + Value string +} + +// NewCreateBudgetCommand returns a new createBudgetCommand +func NewCreateBudgetCommand() cmd.Command { + return &createBudgetCommand{} +} + +const doc = ` +Create a new budget with monthly limit. + +Examples: + # Creates a budget named 'qa' with a limit of 42. + juju create-budget qa 42 +` + +// Info implements cmd.Command.Info. +func (c *createBudgetCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "create-budget", + Purpose: "Create a new budget.", + Doc: doc, + } +} + +// Init implements cmd.Command.Init. +func (c *createBudgetCommand) Init(args []string) error { + if len(args) < 2 { + return errors.New("name and value required") + } + c.Name, c.Value = args[0], args[1] + if _, err := strconv.ParseInt(c.Value, 10, 32); err != nil { + return errors.New("budget value needs to be a whole number") + } + return cmd.CheckEmpty(args[2:]) +} + +// Run implements cmd.Command.Run and has most of the logic for the run command. +func (c *createBudgetCommand) Run(ctx *cmd.Context) error { + client, err := c.BakeryClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := newAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + resp, err := api.CreateBudget(c.Name, c.Value) + if err != nil { + return errors.Annotate(err, "failed to create the budget") + } + fmt.Fprintln(ctx.Stdout, resp) + return nil +} + +var newAPIClient = newAPIClientImpl + +func newAPIClientImpl(c *httpbakery.Client) (apiClient, error) { + client := api.NewClient(c) + return client, nil +} + +type apiClient interface { + CreateBudget(name string, limit string) (string, error) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/createbudget/createbudget_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/createbudget/createbudget_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/createbudget/createbudget_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/createbudget/createbudget_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,95 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package createbudget_test + +import ( + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/romulus/createbudget" + coretesting "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&createBudgetSuite{}) + +type createBudgetSuite struct { + coretesting.FakeJujuXDGDataHomeSuite + stub *testing.Stub + mockAPI *mockapi +} + +func (s *createBudgetSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.stub = &testing.Stub{} + s.mockAPI = newMockAPI(s.stub) + s.PatchValue(createbudget.NewAPIClient, createbudget.APIClientFnc(s.mockAPI)) +} + +func (s *createBudgetSuite) TestCreateBudget(c *gc.C) { + s.mockAPI.resp = "name budget set to 5" + createCmd := createbudget.NewCreateBudgetCommand() + ctx, err := cmdtesting.RunCommand(c, createCmd, "name", "5") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "name budget set to 5\n") + s.mockAPI.CheckCall(c, 0, "CreateBudget", "name", "5") +} + +func (s *createBudgetSuite) TestCreateBudgetAPIError(c *gc.C) { + s.mockAPI.SetErrors(errors.New("something failed")) + createCmd := createbudget.NewCreateBudgetCommand() + _, err := cmdtesting.RunCommand(c, createCmd, "name", "5") + c.Assert(err, gc.ErrorMatches, "failed to create the budget: something failed") + s.mockAPI.CheckCall(c, 0, "CreateBudget", "name", "5") +} + +func (s *createBudgetSuite) TestCreateBudgetErrors(c *gc.C) { + tests := []struct { + about string + args []string + expectedError string + }{ + { + about: "test value needs to be a number", + args: []string{"name", "badvalue"}, + expectedError: "budget value needs to be a whole number", + }, + { + about: "value is missing", + args: []string{"name"}, + expectedError: "name and value required", + }, + { + about: "no args", + args: []string{}, + expectedError: "name and value required", + }, + } + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + if test.expectedError != "" { + s.mockAPI.SetErrors(errors.New(test.expectedError)) + } + createCmd := createbudget.NewCreateBudgetCommand() + _, err := cmdtesting.RunCommand(c, createCmd, test.args...) + c.Assert(err, gc.ErrorMatches, test.expectedError) + s.mockAPI.CheckNoCalls(c) + } +} + +func newMockAPI(s *testing.Stub) *mockapi { + return &mockapi{Stub: s} +} + +type mockapi struct { + *testing.Stub + resp string +} + +func (api *mockapi) CreateBudget(name, value string) (string, error) { + api.MethodCall(api, "CreateBudget", name, value) + return api.resp, api.NextErr() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/createbudget/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/createbudget/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/createbudget/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/createbudget/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,18 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package createbudget + +import ( + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + NewAPIClient = &newAPIClient +) + +func APIClientFnc(api apiClient) func(*httpbakery.Client) (apiClient, error) { + return func(*httpbakery.Client) (apiClient, error) { + return api, nil + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/createbudget/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/createbudget/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/createbudget/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/createbudget/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package createbudget_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listagreements/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listagreements/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listagreements/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listagreements/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,8 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listagreements + +var ( + NewClient = &newClient +) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listagreements/listagreements.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listagreements/listagreements.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listagreements/listagreements.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listagreements/listagreements.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,109 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listagreements + +import ( + "encoding/json" + "io" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/terms-client/api" + "github.com/juju/terms-client/api/wireformat" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + newClient = func(client *httpbakery.Client) (TermsServiceClient, error) { + return api.NewClient(api.HTTPClient(client)) + } +) + +// TermsServiceClient defines methods needed for the Terms Service CLI +// commands. +type TermsServiceClient interface { + GetUsersAgreements() ([]wireformat.AgreementResponse, error) +} + +const listAgreementsDoc = ` +List terms the user has agreed to. +` + +// NewListAgreementsCommand returns a new command that can be +// used to list agreements a user has made. +func NewListAgreementsCommand() *listAgreementsCommand { + return &listAgreementsCommand{} +} + +type term struct { + name string + revision int +} + +var _ cmd.Command = (*listAgreementsCommand)(nil) + +// listAgreementsCommand creates a user agreement to the specified +// Terms and Conditions document. +type listAgreementsCommand struct { + modelcmd.JujuCommandBase + out cmd.Output +} + +// SetFlags implements Command.SetFlags. +func (c *listAgreementsCommand) SetFlags(f *gnuflag.FlagSet) { + c.JujuCommandBase.SetFlags(f) + c.out.AddFlags(f, "json", map[string]cmd.Formatter{ + "json": formatJSON, + "yaml": cmd.FormatYaml, + }) +} + +// Info implements Command.Info. +func (c *listAgreementsCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "agreements", + Purpose: "List user's agreements.", + Doc: listAgreementsDoc, + Aliases: []string{"list-agreements"}, + } +} + +// Run implements Command.Run. +func (c *listAgreementsCommand) Run(ctx *cmd.Context) error { + client, err := c.BakeryClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + + apiClient, err := newClient(client) + if err != nil { + return errors.Annotate(err, "failed to create a terms API client") + } + + agreements, err := apiClient.GetUsersAgreements() + if err != nil { + return errors.Annotate(err, "failed to list user agreements") + } + if len(agreements) == 0 { + ctx.Infof("No agreements to display.") + return nil + } + err = c.out.Write(ctx, agreements) + if err != nil { + return errors.Mask(err) + } + return nil +} + +func formatJSON(writer io.Writer, value interface{}) error { + bytes, err := json.MarshalIndent(value, "", " ") + if err != nil { + return err + } + bytes = append(bytes, '\n') + _, err = writer.Write(bytes) + return err +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listagreements/listagreements_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listagreements/listagreements_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listagreements/listagreements_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listagreements/listagreements_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,147 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listagreements_test + +import ( + "errors" + "time" + + "github.com/juju/cmd/cmdtesting" + "github.com/juju/terms-client/api/wireformat" + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "github.com/juju/juju/cmd/juju/romulus/listagreements" + coretesting "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&listAgreementsSuite{}) + +var testTermsAndConditions = "Test Terms and Conditions" + +type listAgreementsSuite struct { + coretesting.FakeJujuXDGDataHomeSuite + client *mockClient +} + +func (s *listAgreementsSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.client = &mockClient{} + + jujutesting.PatchValue(listagreements.NewClient, func(_ *httpbakery.Client) (listagreements.TermsServiceClient, error) { + return s.client, nil + }) +} + +const ( + expectedListAgreementsJSONOutput = `[ + { + "user": "test-user", + "term": "test-term", + "revision": 1, + "created-on": "2015-12-25T00:00:00Z" + } +] +` + expectedListAgreementsJSONOutputWithOwner = `[ + { + "user": "test-user", + "owner": "owner", + "term": "test-term", + "revision": 1, + "created-on": "2015-12-25T00:00:00Z" + } +] +` +) + +func (s *listAgreementsSuite) TestGetUsersAgreements(c *gc.C) { + ctx, err := cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, "") + c.Assert(cmdtesting.Stderr(ctx), gc.Equals, "No agreements to display.\n") + c.Assert(s.client.called, jc.IsTrue) + + s.client.setError("well, this is embarassing") + ctx, err = cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand()) + c.Assert(err, gc.ErrorMatches, "failed to list user agreements: well, this is embarassing") + c.Assert(s.client.called, jc.IsTrue) + + agreements := []wireformat.AgreementResponse{{ + User: "test-user", + Term: "test-term", + Revision: 1, + CreatedOn: time.Date(2015, 12, 25, 0, 0, 0, 0, time.UTC), + }} + s.client.setAgreements(agreements) + + ctx, err = cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(ctx, gc.NotNil) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, expectedListAgreementsJSONOutput) + c.Assert(s.client.called, jc.IsTrue) + + ctx, err = cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand(), "--format", "yaml") + c.Assert(err, jc.ErrorIsNil) + c.Assert(ctx, gc.NotNil) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, "- user: test-user\n term: test-term\n revision: 1\n createdon: 2015-12-25T00:00:00Z\n") + c.Assert(s.client.called, jc.IsTrue) +} + +func (s *listAgreementsSuite) TestGetUsersAgreementsWithTermOwner(c *gc.C) { + s.client.setError("well, this is embarassing") + ctx, err := cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand()) + c.Assert(err, gc.ErrorMatches, "failed to list user agreements: well, this is embarassing") + c.Assert(s.client.called, jc.IsTrue) + + agreements := []wireformat.AgreementResponse{{ + User: "test-user", + Owner: "owner", + Term: "test-term", + Revision: 1, + CreatedOn: time.Date(2015, 12, 25, 0, 0, 0, 0, time.UTC), + }} + s.client.setAgreements(agreements) + + ctx, err = cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(ctx, gc.NotNil) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, expectedListAgreementsJSONOutputWithOwner) + c.Assert(s.client.called, jc.IsTrue) + + ctx, err = cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand(), "--format", "yaml") + c.Assert(err, jc.ErrorIsNil) + c.Assert(ctx, gc.NotNil) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, "- user: test-user\n owner: owner\n term: test-term\n revision: 1\n createdon: 2015-12-25T00:00:00Z\n") + c.Assert(s.client.called, jc.IsTrue) +} + +type mockClient struct { + called bool + + agreements []wireformat.AgreementResponse + err string +} + +func (c *mockClient) setAgreements(agreements []wireformat.AgreementResponse) { + c.agreements = agreements + c.called = false + c.err = "" +} + +func (c *mockClient) setError(err string) { + c.err = err + c.called = false + c.agreements = nil +} + +func (c *mockClient) GetUsersAgreements() ([]wireformat.AgreementResponse, error) { + c.called = true + if c.err != "" { + return nil, errors.New(c.err) + } + return c.agreements, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listagreements/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listagreements/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listagreements/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listagreements/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listagreements_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,20 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listbudgets + +import ( + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + NewAPIClient = &newAPIClient +) + +// APIClientFnc returns a function that returns the provided apiClient +// and can be used to patch the NewAPIClient variable for tests. +func APIClientFnc(api apiClient) func(*httpbakery.Client) (apiClient, error) { + return func(*httpbakery.Client) (apiClient, error) { + return api, nil + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/list-budgets.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/list-budgets.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/list-budgets.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/list-budgets.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,119 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listbudgets + +import ( + "fmt" + "io" + "sort" + + "github.com/gosuri/uitable" + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "github.com/juju/juju/cmd/modelcmd" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + api "github.com/juju/romulus/api/budget" + wireformat "github.com/juju/romulus/wireformat/budget" +) + +// NewListBudgetsCommand returns a new command that is used +// to list budgets a user has access to. +func NewListBudgetsCommand() modelcmd.CommandBase { + return &listBudgetsCommand{} +} + +type listBudgetsCommand struct { + modelcmd.JujuCommandBase + + out cmd.Output +} + +const listBudgetsDoc = ` +List the available budgets. + +Examples: + juju budgets +` + +// Info implements cmd.Command.Info. +func (c *listBudgetsCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "budgets", + Purpose: "List budgets.", + Doc: listBudgetsDoc, + Aliases: []string{"list-budgets"}, + } +} + +// SetFlags implements cmd.Command.SetFlags. +func (c *listBudgetsCommand) SetFlags(f *gnuflag.FlagSet) { + c.JujuCommandBase.SetFlags(f) + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "tabular": formatTabular, + "json": cmd.FormatJson, + }) +} + +func (c *listBudgetsCommand) Run(ctx *cmd.Context) error { + client, err := c.BakeryClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := newAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + budgets, err := api.ListBudgets() + if err != nil { + return errors.Annotate(err, "failed to retrieve budgets") + } + if budgets == nil { + return errors.New("no budget information available") + } + err = c.out.Write(ctx, budgets) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// formatTabular returns a tabular view of available budgets. +func formatTabular(writer io.Writer, value interface{}) error { + b, ok := value.(*wireformat.ListBudgetsResponse) + if !ok { + return errors.Errorf("expected value of type %T, got %T", b, value) + } + sort.Sort(b.Budgets) + + table := uitable.New() + table.MaxColWidth = 50 + table.Wrap = true + for _, col := range []int{1, 2, 3, 4} { + table.RightAlign(col) + } + + table.AddRow("BUDGET", "MONTHLY", "ALLOCATED", "AVAILABLE", "SPENT") + for _, budgetEntry := range b.Budgets { + table.AddRow(budgetEntry.Budget, budgetEntry.Limit, budgetEntry.Allocated, budgetEntry.Available, budgetEntry.Consumed) + } + table.AddRow("TOTAL", b.Total.Limit, b.Total.Allocated, b.Total.Available, b.Total.Consumed) + table.AddRow("", "", "", "", "") + table.AddRow("Credit limit:", b.Credit, "", "", "") + fmt.Fprint(writer, table) + return nil +} + +var newAPIClient = newAPIClientImpl + +func newAPIClientImpl(c *httpbakery.Client) (apiClient, error) { + client := api.NewClient(c) + return client, nil +} + +type apiClient interface { + // ListBudgets returns a list of budgets a user has access to. + ListBudgets() (*wireformat.ListBudgetsResponse, error) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/list-budgets_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/list-budgets_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/list-budgets_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/list-budgets_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,150 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listbudgets_test + +import ( + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/romulus/wireformat/budget" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/romulus/listbudgets" + coretesting "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&listBudgetsSuite{}) + +type listBudgetsSuite struct { + coretesting.FakeJujuXDGDataHomeSuite + stub *testing.Stub + mockAPI *mockapi +} + +func (s *listBudgetsSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.stub = &testing.Stub{} + s.mockAPI = &mockapi{Stub: s.stub} + s.PatchValue(listbudgets.NewAPIClient, listbudgets.APIClientFnc(s.mockAPI)) +} + +func (s *listBudgetsSuite) TestUnexpectedParameters(c *gc.C) { + listBudgets := listbudgets.NewListBudgetsCommand() + _, err := cmdtesting.RunCommand(c, listBudgets, "unexpected") + c.Assert(err, gc.ErrorMatches, `unrecognized args: \["unexpected"\]`) +} + +func (s *listBudgetsSuite) TestAPIError(c *gc.C) { + s.mockAPI.SetErrors(errors.New("well, this is embarrassing")) + listBudgets := listbudgets.NewListBudgetsCommand() + _, err := cmdtesting.RunCommand(c, listBudgets) + c.Assert(err, gc.ErrorMatches, "failed to retrieve budgets: well, this is embarrassing") +} + +func (s *listBudgetsSuite) TestListBudgetsOutput(c *gc.C) { + s.mockAPI.result = &budget.ListBudgetsResponse{ + Budgets: budget.BudgetSummaries{ + budget.BudgetSummary{ + Owner: "bob", + Budget: "personal", + Limit: "50", + Allocated: "30", + Unallocated: "20", + Available: "45", + Consumed: "5", + }, + budget.BudgetSummary{ + Owner: "bob", + Budget: "work", + Limit: "200", + Allocated: "100", + Unallocated: "100", + Available: "150", + Consumed: "50", + }, + budget.BudgetSummary{ + Owner: "bob", + Budget: "team", + Limit: "50", + Allocated: "10", + Unallocated: "40", + Available: "40", + Consumed: "10", + }, + }, + Total: budget.BudgetTotals{ + Limit: "300", + Allocated: "140", + Available: "235", + Unallocated: "160", + Consumed: "65", + }, + Credit: "400", + } + // Expected command output. Make sure budgets are sorted alphabetically. + expected := "" + + "BUDGET \tMONTHLY\tALLOCATED\tAVAILABLE\tSPENT\n" + + "personal \t 50\t 30\t 45\t 5\n" + + "team \t 50\t 10\t 40\t 10\n" + + "work \t 200\t 100\t 150\t 50\n" + + "TOTAL \t 300\t 140\t 235\t 65\n" + + " \t \t \t \t \n" + + "Credit limit:\t 400\t \t \t \n" + + listBudgets := listbudgets.NewListBudgetsCommand() + + ctx, err := cmdtesting.RunCommand(c, listBudgets) + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, expected) + s.mockAPI.CheckCallNames(c, "ListBudgets") +} + +func (s *listBudgetsSuite) TestListBudgetsOutputNoBudgets(c *gc.C) { + s.mockAPI.result = &budget.ListBudgetsResponse{ + Budgets: budget.BudgetSummaries{}, + Total: budget.BudgetTotals{ + Limit: "0", + Allocated: "0", + Available: "0", + Unallocated: "0", + Consumed: "0", + }, + Credit: "0", + } + expected := "" + + "BUDGET \tMONTHLY\tALLOCATED\tAVAILABLE\tSPENT\n" + + "TOTAL \t 0\t 0\t 0\t 0\n" + + " \t \t \t \t \n" + + "Credit limit:\t 0\t \t \t \n" + + listBudgets := listbudgets.NewListBudgetsCommand() + + ctx, err := cmdtesting.RunCommand(c, listBudgets) + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, expected) + s.mockAPI.CheckCallNames(c, "ListBudgets") +} + +func (s *listBudgetsSuite) TestListBudgetsNoOutput(c *gc.C) { + listBudgets := listbudgets.NewListBudgetsCommand() + + ctx, err := cmdtesting.RunCommand(c, listBudgets) + c.Assert(err, gc.ErrorMatches, `no budget information available`) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, ``) + s.mockAPI.CheckCallNames(c, "ListBudgets") +} + +type mockapi struct { + *testing.Stub + result *budget.ListBudgetsResponse +} + +func (api *mockapi) ListBudgets() (*budget.ListBudgetsResponse, error) { + api.AddCall("ListBudgets") + if err := api.NextErr(); err != nil { + return nil, err + } + return api.result, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listbudgets/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listbudgets_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listplans/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listplans/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listplans/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listplans/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,20 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listplans + +import ( + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + NewClient = &newClient +) + +// APIClientFnc returns a function that returns the provided apiClient +// and can be used to patch the NewAPIClient variable for tests. +func APIClientFnc(api apiClient) func(client *httpbakery.Client) (apiClient, error) { + return func(*httpbakery.Client) (apiClient, error) { + return api, nil + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listplans/list_plans.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listplans/list_plans.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listplans/list_plans.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listplans/list_plans.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,222 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The listplans package contains implementation of the command that +// can be used to list plans that are available for a charm. +package listplans + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + + "github.com/gosuri/uitable" + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + api "github.com/juju/romulus/api/plan" + wireformat "github.com/juju/romulus/wireformat/plan" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/yaml.v2" + + rcmd "github.com/juju/juju/cmd/juju/romulus" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" +) + +// apiClient defines the interface of the plan api client need by this command. +type apiClient interface { + // GetAssociatedPlans returns the plans associated with the charm. + GetAssociatedPlans(charmURL string) ([]wireformat.Plan, error) +} + +var newClient = func(client *httpbakery.Client) (apiClient, error) { + return api.NewClient(api.HTTPClient(client)) +} + +const listPlansDoc = ` +List plans available for the specified charm. + +Examples: + juju plans cs:webapp +` + +// ListPlansCommand retrieves plans that are available for the specified charm +type ListPlansCommand struct { + modelcmd.JujuCommandBase + + out cmd.Output + CharmURL string + + CharmResolver rcmd.CharmResolver +} + +// NewListPlansCommand creates a new ListPlansCommand. +func NewListPlansCommand() modelcmd.CommandBase { + return &ListPlansCommand{ + CharmResolver: rcmd.NewCharmStoreResolver(), + } +} + +// Info implements Command.Info. +func (c *ListPlansCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "plans", + Args: "", + Purpose: "List plans.", + Doc: listPlansDoc, + Aliases: []string{"list-plans"}, + } +} + +// Init reads and verifies the cli arguments for the ListPlansCommand +func (c *ListPlansCommand) Init(args []string) error { + if len(args) == 0 { + return errors.New("missing arguments") + } + charmURL, args := args[0], args[1:] + if err := cmd.CheckEmpty(args); err != nil { + return errors.Errorf("unknown command line arguments: " + strings.Join(args, ",")) + } + c.CharmURL = charmURL + return nil +} + +// SetFlags implements Command.SetFlags. +func (c *ListPlansCommand) SetFlags(f *gnuflag.FlagSet) { + c.JujuCommandBase.SetFlags(f) + defaultFormat := "tabular" + c.out.AddFlags(f, defaultFormat, map[string]cmd.Formatter{ + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, + "smart": cmd.FormatSmart, + "summary": formatSummary, + "tabular": formatTabular, + }) +} + +// Run implements Command.Run. +// Retrieves the plan from the plans service. The set of plans to be +// retrieved can be limited using the plan and isv flags. +func (c *ListPlansCommand) Run(ctx *cmd.Context) (rErr error) { + client, err := c.BakeryClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + + resolvedURL, err := c.CharmResolver.Resolve(client.VisitWebPage, client.Client, c.CharmURL) + if err != nil { + return errors.Annotatef(err, "failed to resolve charmURL %v", c.CharmURL) + } + c.CharmURL = resolvedURL + + apiClient, err := newClient(client) + if err != nil { + return errors.Annotate(err, "failed to create a plan API client") + } + + plans, err := apiClient.GetAssociatedPlans(c.CharmURL) + if err != nil { + return errors.Annotate(err, "failed to retrieve plans") + } + + output := make([]plan, len(plans)) + for i, p := range plans { + outputPlan := plan{ + URL: p.URL, + } + def, err := readPlan(bytes.NewBufferString(p.Definition)) + if err != nil { + return errors.Annotate(err, "failed to parse plan definition") + } + if def.Description != nil { + outputPlan.Price = def.Description.Price + outputPlan.Description = def.Description.Text + } + output[i] = outputPlan + } + + if len(output) == 0 && c.out.Name() == "tabular" { + ctx.Infof("No plans to display.") + } + + err = c.out.Write(ctx, output) + if err != nil { + return errors.Trace(err) + } + + return nil +} + +type plan struct { + URL string `json:"plan" yaml:"plan"` + Price string `json:"price" yaml:"price"` + Description string `json:"description" yaml:"description"` +} + +// formatSummary returns a summary of available plans. +func formatSummary(writer io.Writer, value interface{}) error { + plans, ok := value.([]plan) + if !ok { + return errors.Errorf("expected value of type %T, got %T", plans, value) + } + tw := output.TabWriter(writer) + p := func(values ...interface{}) { + for _, v := range values { + fmt.Fprintf(tw, "%s\t", v) + } + fmt.Fprintln(tw) + } + p("Plan", "Price") + for _, plan := range plans { + p(plan.URL, plan.Price) + } + tw.Flush() + return nil +} + +// formatTabular returns a tabular summary of available plans. +func formatTabular(writer io.Writer, value interface{}) error { + plans, ok := value.([]plan) + if !ok { + return errors.Errorf("expected value of type %T, got %T", plans, value) + } + + table := uitable.New() + table.MaxColWidth = 50 + table.Wrap = true + + table.AddRow("Plan", "Price", "Description") + for _, plan := range plans { + table.AddRow(plan.URL, plan.Price, plan.Description) + } + fmt.Fprint(writer, table) + return nil +} + +type planModel struct { + Description *descriptionModel `json:"description,omitempty"` +} + +// descriptionModel provides a human readable description of the plan. +type descriptionModel struct { + Price string `json:"price,omitempty"` + Text string `json:"text,omitempty"` +} + +// readPlan reads, parses and returns a planModel struct representation. +func readPlan(r io.Reader) (plan *planModel, err error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return + } + + var doc planModel + err = yaml.Unmarshal(data, &doc) + if err != nil { + return + } + return &doc, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listplans/list_plans_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listplans/list_plans_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listplans/list_plans_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listplans/list_plans_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,237 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listplans_test + +import ( + "net/http" + "net/url" + "time" + + "github.com/juju/cmd/cmdtesting" + api "github.com/juju/romulus/api/plan" + wireformat "github.com/juju/romulus/wireformat/plan" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/romulus/listplans" + coretesting "github.com/juju/juju/testing" +) + +var ( + testPlan1 = ` + description: + text: | + Lorem ipsum dolor sit amet, + consectetur adipiscing elit. + Nunc pretium purus nec magna faucibus, sed eleifend dui fermentum. Nulla nec ornare lorem, sed imperdiet turpis. Nam auctor quis massa et commodo. Maecenas in magna erat. Duis non iaculis risus, a malesuada quam. Sed quis commodo sapien. Suspendisse laoreet diam eu interdum tristique. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. + Donec eu nunc quis eros fermentum porta non ut justo. Donec ut tempus sapien. Suspendisse bibendum fermentum eros, id feugiat justo elementum quis. Quisque vel volutpat risus. Aenean pellentesque ultrices consequat. Maecenas luctus, augue vitae ullamcorper vulputate, purus ligula accumsan diam, ut efficitur diam tellus ac nibh. Cras eros ligula, mattis in ex quis, porta efficitur quam. Donec porta, est ut interdum blandit, enim est elementum sapien, quis congue orci dui et nulla. Maecenas vehicula malesuada vehicula. Phasellus sapien ante, semper eu ornare sed, vulputate id nunc. Maecenas in orci mollis, sagittis lorem quis, ultrices metus. Integer molestie tempor augue, pulvinar blandit sapien ultricies eget. + Fusce sed tellus sit amet tortor mollis pellentesque. Nulla tempus sem tellus, vitae tempor ipsum scelerisque eu. Cras tempor, tellus nec pretium egestas, felis massa luctus velit, vitae feugiat nunc velit ac tellus. Maecenas quis nisi diam. Sed pulvinar suscipit nibh sit amet cursus. Ut sem orci, consequat id pretium id, lacinia id nisl. Maecenas id quam at nisi eleifend porta. Vestibulum at ligula arcu. Quisque tincidunt pulvinar egestas. Ut suscipit ornare ligula a fermentum. Morbi ante justo, condimentum ut risus vitae, molestie elementum elit. Curabitur malesuada commodo diam sed ultrices. Vestibulum tincidunt turpis at ultricies fermentum. Morbi ipsum felis, laoreet quis risus id, ornare elementum urna. Morbi ultrices porttitor pulvinar. Maecenas facilisis velit sit amet tellus feugiat iaculis. + metrics: + pings: + unit: + transform: max + period: hour + gaps: zero +` + testPlan2 = ` + metrics: + pongs: + unit: + transform: max + period: hour + gaps: zero +` +) + +type ListPlansCommandSuite struct { + coretesting.FakeJujuXDGDataHomeSuite + mockAPI *mockapi + stub *testing.Stub +} + +var _ = gc.Suite(&ListPlansCommandSuite{}) + +func (s *ListPlansCommandSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.stub = &testing.Stub{} + s.mockAPI = newMockAPI(s.stub) + s.PatchValue(listplans.NewClient, listplans.APIClientFnc(s.mockAPI)) +} + +func (s *ListPlansCommandSuite) TestTabularOutput(c *gc.C) { + listPlans := &listplans.ListPlansCommand{ + CharmResolver: &mockCharmResolver{ + ResolvedURL: "series/some-charm-url", + Stub: s.stub, + }, + } + ctx, err := cmdtesting.RunCommand(c, listPlans, "some-charm-url") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, + `Plan Price Description +bob/test-plan-1 Lorem ipsum dolor sit amet, + consectetur adipiscing elit. + Nunc pretium purus nec magna faucibus, sed + eleifend dui fermentum. Nulla nec ornare lorem, + sed imperdiet turpis. Nam auctor quis massa et + commodo. Maecenas in magna erat. Duis non iaculis + risus, a malesuada quam. Sed quis commodo sapien. + Suspendisse laoreet diam eu interdum tristique. + Class aptent taciti sociosqu ad litora torquent + per conubia nostra, per inceptos himenaeos. + Donec eu nunc quis eros fermentum porta non ut + justo. Donec ut tempus sapien. Suspendisse + bibendum fermentum eros, id feugiat justo + elementum quis. Quisque vel volutpat risus. Aenean + pellentesque ultrices consequat. Maecenas luctus, + augue vitae ullamcorper vulputate, purus ligula + accumsan diam, ut efficitur diam tellus ac nibh. + Cras eros ligula, mattis in ex quis, porta + efficitur quam. Donec porta, est ut interdum + blandit, enim est elementum sapien, quis congue + orci dui et nulla. Maecenas vehicula malesuada + vehicula. Phasellus sapien ante, semper eu ornare + sed, vulputate id nunc. Maecenas in orci mollis, + sagittis lorem quis, ultrices metus. Integer + molestie tempor augue, pulvinar blandit sapien + ultricies eget. + Fusce sed tellus sit amet tortor mollis + pellentesque. Nulla tempus sem tellus, vitae + tempor ipsum scelerisque eu. Cras tempor, tellus + nec pretium egestas, felis massa luctus velit, + vitae feugiat nunc velit ac tellus. Maecenas quis + nisi diam. Sed pulvinar suscipit nibh sit amet + cursus. Ut sem orci, consequat id pretium id, + lacinia id nisl. Maecenas id quam at nisi eleifend + porta. Vestibulum at ligula arcu. Quisque + tincidunt pulvinar egestas. Ut suscipit ornare + ligula a fermentum. Morbi ante justo, condimentum + ut risus vitae, molestie elementum elit. Curabitur + malesuada commodo diam sed ultrices. Vestibulum + tincidunt turpis at ultricies fermentum. Morbi + ipsum felis, laoreet quis risus id, ornare + elementum urna. Morbi ultrices porttitor pulvinar. + Maecenas facilisis velit sit amet tellus feugiat + iaculis. + +carol/test-plan-2 +`) +} + +func (s *ListPlansCommandSuite) TestGetCommands(c *gc.C) { + tests := []struct { + about string + args []string + err string + resolvedCharmURL string + apiCall []interface{} + }{{ + about: "charm url is resolved", + args: []string{"some-charm-url"}, + resolvedCharmURL: "series/some-charm-url-1", + apiCall: []interface{}{"series/some-charm-url-1"}, + }, { + about: "everything works - default format", + args: []string{"some-charm-url"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "everything works - yaml", + args: []string{"some-charm-url", "--format", "yaml"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "everything works - smart", + args: []string{"some-charm-url", "--format", "smart"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "everything works - json", + args: []string{"some-charm-url", "--format", "json"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "everything works - summary", + args: []string{"some-charm-url", "--format", "summary"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "everything works - tabular", + args: []string{"some-charm-url", "--format", "tabular"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "missing argument", + args: []string{}, + err: `missing arguments`, + apiCall: []interface{}{}, + }, { + about: "unknown arguments", + args: []string{"some-charm-url", "extra", "arguments"}, + err: `unknown command line arguments: extra,arguments`, + apiCall: []interface{}{}, + }, + } + + for i, t := range tests { + c.Logf("Running test %d %s", i, t.about) + s.mockAPI.reset() + + listPlans := &listplans.ListPlansCommand{ + CharmResolver: &mockCharmResolver{ + ResolvedURL: t.resolvedCharmURL, + Stub: s.stub, + }, + } + _, err := cmdtesting.RunCommand(c, listPlans, t.args...) + if t.err != "" { + c.Assert(err, gc.ErrorMatches, t.err) + } else { + c.Assert(err, jc.ErrorIsNil) + s.mockAPI.CheckCall(c, 0, "Resolve", t.args[0]) + s.mockAPI.CheckCall(c, 1, "GetAssociatedPlans", t.apiCall...) + } + } +} + +// mockapi mocks the plan service api +type mockapi struct { + *testing.Stub + api.Client +} + +func newMockAPI(s *testing.Stub) *mockapi { + return &mockapi{Stub: s} +} + +// Get implements the Get function of the api.PlanClient interface. +// TODO (domas) : fix once querying by charm url is in place +func (m *mockapi) GetAssociatedPlans(charmURL string) ([]wireformat.Plan, error) { + m.AddCall("GetAssociatedPlans", charmURL) + p1 := wireformat.Plan{ + URL: "bob/test-plan-1", + Definition: testPlan1, + CreatedOn: time.Date(2015, 0, 0, 0, 0, 0, 0, time.UTC).Format(time.RFC3339), + } + p2 := wireformat.Plan{ + URL: "carol/test-plan-2", + Definition: testPlan2, + CreatedOn: time.Date(2015, 0, 0, 0, 0, 0, 0, time.UTC).Format(time.RFC3339), + } + return []wireformat.Plan{p1, p2}, m.NextErr() +} + +func (m *mockapi) reset() { + m.ResetCalls() +} + +// mockCharmResolver is a mock implementation of cmd.CharmResolver. +type mockCharmResolver struct { + *testing.Stub + ResolvedURL string +} + +// Resolve implements cmd.CharmResolver. +func (r *mockCharmResolver) Resolve(_ func(*url.URL) error, _ *http.Client, charmURL string) (string, error) { + r.AddCall("Resolve", charmURL) + if r.ResolvedURL != "" { + return r.ResolvedURL, r.NextErr() + } + return charmURL, r.NextErr() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listplans/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listplans/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/listplans/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/listplans/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listplans_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/resolve.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/resolve.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/resolve.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/resolve.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,55 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "net/http" + "net/url" + + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient" +) + +// CharmResolver interface defines the functionality to resolve a charm URL. +type CharmResolver interface { + // Resolve resolves the charm URL. + Resolve(visitWebPage func(*url.URL) error, client *http.Client, charmURL string) (string, error) +} + +// CharmStoreResolver implements the CharmResolver interface. +type CharmStoreResolver struct { + csURL string +} + +// NewCharmStoreResolver creates a new charm store resolver. +func NewCharmStoreResolver() *CharmStoreResolver { + return &CharmStoreResolver{ + csURL: csclient.ServerURL, + } +} + +// Resolve implements the CharmResolver interface. +func (r *CharmStoreResolver) Resolve(visitWebPage func(*url.URL) error, client *http.Client, charmURL string) (string, error) { + repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ + URL: r.csURL, + HTTPClient: client, + VisitWebPage: visitWebPage, + }) + + curl, err := charm.ParseURL(charmURL) + if err != nil { + return "", errors.Annotate(err, "could not parse charm url") + } + // ignore local charm urls + if curl.Schema == "local" { + return charmURL, nil + } + resolvedURL, _, err := repo.Resolve(curl) + if err != nil { + return "", errors.Trace(err) + } + return resolvedURL.String(), nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setbudget/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setbudget/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setbudget/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setbudget/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,18 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setbudget + +import ( + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + NewAPIClient = &newAPIClient +) + +func APIClientFnc(api apiClient) func(*httpbakery.Client) (apiClient, error) { + return func(*httpbakery.Client) (apiClient, error) { + return api, nil + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setbudget/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setbudget/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setbudget/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setbudget/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setbudget_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setbudget/setbudget.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setbudget/setbudget.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setbudget/setbudget.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setbudget/setbudget.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,86 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setbudget + +import ( + "fmt" + "strconv" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/juju/cmd/modelcmd" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + api "github.com/juju/romulus/api/budget" +) + +type setBudgetCommand struct { + modelcmd.JujuCommandBase + Name string + Value string +} + +// NewSetBudgetCommand returns a new setBudgetCommand. +func NewSetBudgetCommand() cmd.Command { + return &setBudgetCommand{} +} + +const doc = ` +Set the monthly budget limit. + +Examples: + # Sets the monthly limit for budget named 'personal' to 96. + juju set-budget personal 96 +` + +// Info implements cmd.Command.Info. +func (c *setBudgetCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "set-budget", + Args: " ", + Purpose: "Set the budget limit.", + Doc: doc, + } +} + +// Init implements cmd.Command.Init. +func (c *setBudgetCommand) Init(args []string) error { + if len(args) < 2 { + return errors.New("name and value required") + } + c.Name, c.Value = args[0], args[1] + if _, err := strconv.ParseInt(c.Value, 10, 32); err != nil { + return errors.New("budget value needs to be a whole number") + } + return cmd.CheckEmpty(args[2:]) +} + +// Run implements cmd.Command.Run and contains most of the setbudget logic. +func (c *setBudgetCommand) Run(ctx *cmd.Context) error { + client, err := c.BakeryClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := newAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + resp, err := api.SetBudget(c.Name, c.Value) + if err != nil { + return errors.Annotate(err, "failed to set the budget") + } + fmt.Fprintln(ctx.Stdout, resp) + return nil +} + +var newAPIClient = newAPIClientImpl + +func newAPIClientImpl(c *httpbakery.Client) (apiClient, error) { + client := api.NewClient(c) + return client, nil +} + +type apiClient interface { + SetBudget(string, string) (string, error) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setbudget/setbudget_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setbudget/setbudget_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setbudget/setbudget_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setbudget/setbudget_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,94 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setbudget_test + +import ( + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/romulus/setbudget" + coretesting "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&setBudgetSuite{}) + +type setBudgetSuite struct { + coretesting.FakeJujuXDGDataHomeSuite + stub *testing.Stub + mockAPI *mockapi +} + +func (s *setBudgetSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.stub = &testing.Stub{} + s.mockAPI = newMockAPI(s.stub) + s.PatchValue(setbudget.NewAPIClient, setbudget.APIClientFnc(s.mockAPI)) +} + +func (s *setBudgetSuite) TestSetBudget(c *gc.C) { + s.mockAPI.resp = "name budget set to 5" + set := setbudget.NewSetBudgetCommand() + ctx, err := cmdtesting.RunCommand(c, set, "name", "5") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "name budget set to 5\n") + s.mockAPI.CheckCall(c, 0, "SetBudget", "name", "5") +} + +func (s *setBudgetSuite) TestSetBudgetAPIError(c *gc.C) { + s.stub.SetErrors(errors.New("something failed")) + set := setbudget.NewSetBudgetCommand() + _, err := cmdtesting.RunCommand(c, set, "name", "5") + c.Assert(err, gc.ErrorMatches, "failed to set the budget: something failed") + s.mockAPI.CheckCall(c, 0, "SetBudget", "name", "5") +} + +func (s *setBudgetSuite) TestSetBudgetErrors(c *gc.C) { + tests := []struct { + about string + args []string + expectedError string + }{ + { + about: "value needs to be a number", + args: []string{"name", "badvalue"}, + expectedError: "budget value needs to be a whole number", + }, + { + about: "value is missing", + args: []string{"name"}, + expectedError: "name and value required", + }, + { + about: "no args", + args: []string{}, + expectedError: "name and value required", + }, + } + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + s.stub.SetErrors(errors.New(test.expectedError)) + defer s.mockAPI.ResetCalls() + set := setbudget.NewSetBudgetCommand() + _, err := cmdtesting.RunCommand(c, set, test.args...) + c.Assert(err, gc.ErrorMatches, test.expectedError) + s.mockAPI.CheckNoCalls(c) + } +} + +func newMockAPI(s *testing.Stub) *mockapi { + return &mockapi{Stub: s} +} + +type mockapi struct { + *testing.Stub + resp string +} + +func (api *mockapi) SetBudget(name, value string) (string, error) { + api.MethodCall(api, "SetBudget", name, value) + return api.resp, api.NextErr() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setplan/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setplan/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setplan/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setplan/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,20 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setplan + +import ( + api "github.com/juju/romulus/api/plan" +) + +var ( + NewAuthorizationClient = &newAuthorizationClient +) + +// APIClientFnc returns a function that returns the provided apiClient +// and can be used to patch the NewAPIClient variable for tests. +func APIClientFnc(client authorizationClient) func(...api.ClientOption) (authorizationClient, error) { + return func(...api.ClientOption) (authorizationClient, error) { + return client, nil + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setplan/set_plan.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setplan/set_plan.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setplan/set_plan.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setplan/set_plan.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,123 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The setplan package contains the implementation of the juju set-plan +// command. +package setplan + +import ( + "encoding/json" + "net/url" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/juju/api/application" + "github.com/juju/juju/cmd/modelcmd" + "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" + + api "github.com/juju/romulus/api/plan" +) + +// authorizationClient defines the interface of an api client that +// the comand uses to create an authorization macaroon. +type authorizationClient interface { + // Authorize returns the authorization macaroon for the specified environment, + // charm url, application name and plan. + Authorize(environmentUUID, charmURL, applicationName, plan string, visitWebPage func(*url.URL) error) (*macaroon.Macaroon, error) +} + +var newAuthorizationClient = func(options ...api.ClientOption) (authorizationClient, error) { + return api.NewAuthorizationClient(options...) +} + +// NewSetPlanCommand returns a new command that is used to set metric credentials for a +// deployed application. +func NewSetPlanCommand() cmd.Command { + return modelcmd.Wrap(&setPlanCommand{}) +} + +// setPlanCommand is a command-line tool for setting +// Application.MetricCredential for development & demonstration purposes. +type setPlanCommand struct { + modelcmd.ModelCommandBase + + Application string + Plan string +} + +// Info implements cmd.Command. +func (c *setPlanCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "set-plan", + Args: " ", + Purpose: "Set the plan for an application.", + Doc: ` +Set the plan for the deployed application, effective immediately. + +The specified plan name must be a valid plan that is offered for this +particular charm. Use "juju list-plans " for more information. + +Examples: + juju set-plan myapp example/uptime +`, + } +} + +// Init implements cmd.Command. +func (c *setPlanCommand) Init(args []string) error { + if len(args) < 2 { + return errors.New("need to specify application name and plan url") + } + + applicationName := args[0] + if !names.IsValidApplication(applicationName) { + return errors.Errorf("invalid application name %q", applicationName) + } + + c.Plan = args[1] + c.Application = applicationName + + return c.ModelCommandBase.Init(args[2:]) +} + +func (c *setPlanCommand) requestMetricCredentials(client *application.Client, ctx *cmd.Context) ([]byte, error) { + envUUID := client.ModelUUID() + charmURL, err := client.GetCharmURL(c.Application) + if err != nil { + return nil, errors.Trace(err) + } + + hc, err := c.BakeryClient() + if err != nil { + return nil, errors.Trace(err) + } + authClient, err := newAuthorizationClient(api.HTTPClient(hc)) + if err != nil { + return nil, errors.Trace(err) + } + m, err := authClient.Authorize(envUUID, charmURL.String(), c.Application, c.Plan, hc.VisitWebPage) + if err != nil { + return nil, errors.Trace(err) + } + ms := macaroon.Slice{m} + return json.Marshal(ms) +} + +// Run implements cmd.Command. +func (c *setPlanCommand) Run(ctx *cmd.Context) error { + root, err := c.NewAPIRoot() + if err != nil { + return errors.Trace(err) + } + client := application.NewClient(root) + credentials, err := c.requestMetricCredentials(client, ctx) + if err != nil { + return errors.Trace(err) + } + err = client.SetMetricCredentials(c.Application, credentials) + if err != nil { + return errors.Trace(err) + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setplan/set_plan_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setplan/set_plan_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/setplan/set_plan_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/setplan/set_plan_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,180 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setplan_test + +import ( + "encoding/json" + "fmt" + "net/url" + stdtesting "testing" + + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon.v1" + + "github.com/juju/juju/cmd/juju/romulus/setplan" + jjjtesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + "github.com/juju/juju/testcharms" + jjtesting "github.com/juju/juju/testing" +) + +func TestPackage(t *stdtesting.T) { + jjtesting.MgoTestPackage(t) +} + +var _ = gc.Suite(&setPlanCommandSuite{}) + +type setPlanCommandSuite struct { + jjjtesting.JujuConnSuite + + mockAPI *mockapi + charmURL string +} + +func (s *setPlanCommandSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + ch := testcharms.Repo.CharmDir("dummy") + curl := charm.MustParseURL( + fmt.Sprintf("local:quantal/%s-%d", ch.Meta().Name, ch.Revision()), + ) + s.charmURL = curl.String() + charmInfo := state.CharmInfo{ + Charm: ch, + ID: curl, + StoragePath: "dummy-path", + SHA256: "dummy-1", + } + dummyCharm, err := s.State.AddCharm(charmInfo) + c.Assert(err, jc.ErrorIsNil) + s.AddTestingService(c, "mysql", dummyCharm) + + mockAPI, err := newMockAPI() + c.Assert(err, jc.ErrorIsNil) + s.mockAPI = mockAPI + + s.PatchValue(setplan.NewAuthorizationClient, setplan.APIClientFnc(s.mockAPI)) +} + +func (s setPlanCommandSuite) TestSetPlanCommand(c *gc.C) { + tests := []struct { + about string + plan string + application string + err string + apiErr error + apiCalls []testing.StubCall + }{{ + about: "all is well", + plan: "bob/default", + application: "mysql", + apiCalls: []testing.StubCall{{ + FuncName: "Authorize", + Args: []interface{}{ + s.State.ModelUUID(), + s.charmURL, + "mysql", + }, + }}, + }, { + about: "invalid application name", + plan: "bob/default", + application: "mysql-0", + err: "invalid application name \"mysql-0\"", + }, { + about: "unknown application", + plan: "bob/default", + application: "wordpress", + err: "application \"wordpress\" not found.*", + }, { + about: "unknown application", + plan: "bob/default", + application: "mysql", + apiErr: errors.New("some strange error"), + err: "some strange error", + }, + } + for i, test := range tests { + c.Logf("running test %d: %v", i, test.about) + s.mockAPI.ResetCalls() + if test.apiErr != nil { + s.mockAPI.SetErrors(test.apiErr) + } + _, err := cmdtesting.RunCommand(c, setplan.NewSetPlanCommand(), test.application, test.plan) + if test.err == "" { + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.mockAPI.Calls(), gc.HasLen, 1) + s.mockAPI.CheckCalls(c, test.apiCalls) + + app, err := s.State.Application("mysql") + c.Assert(err, jc.ErrorIsNil) + svcMacaroon := app.MetricCredentials() + data, err := json.Marshal(macaroon.Slice{s.mockAPI.macaroon}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(svcMacaroon, gc.DeepEquals, data) + } else { + c.Assert(err, gc.ErrorMatches, test.err) + c.Assert(s.mockAPI.Calls(), gc.HasLen, 0) + } + } +} + +func (s *setPlanCommandSuite) TestNoArgs(c *gc.C) { + _, err := cmdtesting.RunCommand(c, setplan.NewSetPlanCommand()) + c.Assert(err, gc.ErrorMatches, "need to specify application name and plan url") +} + +func newMockAPI() (*mockapi, error) { + kp, err := bakery.GenerateKey() + if err != nil { + return nil, errors.Trace(err) + } + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: "omnibus", + Key: kp, + }) + if err != nil { + return nil, errors.Trace(err) + } + return &mockapi{ + service: svc, + }, nil +} + +type mockapi struct { + testing.Stub + + service *bakery.Service + macaroon *macaroon.Macaroon +} + +func (m *mockapi) Authorize(modelUUID, charmURL, applicationName, plan string, visitWebPage func(*url.URL) error) (*macaroon.Macaroon, error) { + err := m.NextErr() + if err != nil { + return nil, errors.Trace(err) + } + m.AddCall("Authorize", modelUUID, charmURL, applicationName) + macaroon, err := m.service.NewMacaroon( + "", + nil, + []checkers.Caveat{ + checkers.DeclaredCaveat("environment", modelUUID), + checkers.DeclaredCaveat("charm", charmURL), + checkers.DeclaredCaveat("service", applicationName), + checkers.DeclaredCaveat("plan", plan), + }, + ) + if err != nil { + return nil, errors.Trace(err) + } + m.macaroon = macaroon + return m.macaroon, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/showbudget/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/showbudget/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/showbudget/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/showbudget/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,29 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package showbudget + +import ( + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + NewBudgetAPIClient = &newBudgetAPIClient + NewAPIClient = &newAPIClient +) + +// APIClientFnc returns a function that returns the provided APIClient +// and can be used to patch the NewAPIClient variable in tests +func NewAPIClientFnc(api APIClient) func(*showBudgetCommand) (APIClient, error) { + return func(*showBudgetCommand) (APIClient, error) { + return api, nil + } +} + +// BudgetAPIClientFnc returns a function that returns the provided budgetAPIClient +// and can be used to patch the NewBudgetAPIClient variable for tests. +func BudgetAPIClientFnc(api budgetAPIClient) func(*httpbakery.Client) (budgetAPIClient, error) { + return func(*httpbakery.Client) (budgetAPIClient, error) { + return api, nil + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/showbudget/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/showbudget/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/showbudget/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/showbudget/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package showbudget_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/showbudget/show_budget.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/showbudget/show_budget.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/showbudget/show_budget.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/showbudget/show_budget.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,191 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package showbudget + +import ( + "fmt" + "io" + "sort" + + "github.com/gosuri/uitable" + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "github.com/juju/juju/api/modelmanager" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/loggo" + "gopkg.in/juju/names.v2" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + api "github.com/juju/romulus/api/budget" + wireformat "github.com/juju/romulus/wireformat/budget" +) + +var logger = loggo.GetLogger("romulus.cmd.showbudget") + +// NewShowBudgetCommand returns a new command that is used +// to show details of the specified wireformat. +func NewShowBudgetCommand() modelcmd.CommandBase { + return &showBudgetCommand{} +} + +type showBudgetCommand struct { + modelcmd.ModelCommandBase + + out cmd.Output + budget string +} + +const showBudgetDoc = ` +Display budget usage information. + +Examples: + juju show-budget personal +` + +// Info implements cmd.Command.Info. +func (c *showBudgetCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "show-budget", + Args: "", + Purpose: "Show details about a budget.", + Doc: showBudgetDoc, + } +} + +// Init implements cmd.Command.Init. +func (c *showBudgetCommand) Init(args []string) error { + if len(args) < 1 { + return errors.New("missing arguments") + } + c.budget, args = args[0], args[1:] + + return cmd.CheckEmpty(args) +} + +// SetFlags implements cmd.Command.SetFlags. +func (c *showBudgetCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "tabular": formatTabular, + "json": cmd.FormatJson, + }) +} + +func (c *showBudgetCommand) Run(ctx *cmd.Context) error { + client, err := c.BakeryClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := newBudgetAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + budget, err := api.GetBudget(c.budget) + if err != nil { + return errors.Annotate(err, "failed to retrieve the budget") + } + c.resolveModelNames(budget) + err = c.out.Write(ctx, budget) + return errors.Trace(err) +} + +// resolveModelNames is a best-effort method to resolve model names - if we +// encounter any error, we do not issue an error. +func (c *showBudgetCommand) resolveModelNames(budget *wireformat.BudgetWithAllocations) { + models := make([]names.ModelTag, len(budget.Allocations)) + for i, allocation := range budget.Allocations { + models[i] = names.NewModelTag(allocation.Model) + } + client, err := newAPIClient(c) + if err != nil { + logger.Errorf("failed to open the API client: %v", err) + return + } + modelInfoSlice, err := client.ModelInfo(models) + if err != nil { + logger.Errorf("failed to retrieve model info: %v", err) + return + } + for j, info := range modelInfoSlice { + if info.Error != nil { + logger.Errorf("failed to get model info for model %q: %v", models[j], info.Error) + continue + } + for i, allocation := range budget.Allocations { + if info.Result.UUID == allocation.Model { + budget.Allocations[i].Model = info.Result.Name + } + } + } +} + +// formatTabular returns a tabular view of available budgets. +func formatTabular(writer io.Writer, value interface{}) error { + b, ok := value.(*wireformat.BudgetWithAllocations) + if !ok { + return errors.Errorf("expected value of type %T, got %T", b, value) + } + + table := uitable.New() + table.MaxColWidth = 50 + table.Wrap = true + for _, col := range []int{2, 3, 5} { + table.RightAlign(col) + } + + table.AddRow("MODEL", "SERVICES", "SPENT", "ALLOCATED", "BY", "USAGE") + for _, allocation := range b.Allocations { + firstLine := true + // We'll sort the service names to avoid nondeterministic + // command output. + services := make([]string, 0, len(allocation.Services)) + for serviceName, _ := range allocation.Services { + services = append(services, serviceName) + } + sort.Strings(services) + for _, serviceName := range services { + service, _ := allocation.Services[serviceName] + if firstLine { + table.AddRow(allocation.Model, serviceName, service.Consumed, allocation.Limit, allocation.Owner, allocation.Usage) + firstLine = false + continue + } + table.AddRow("", serviceName, service.Consumed, "", "") + } + + } + table.AddRow("", "", "", "", "") + table.AddRow("TOTAL", "", b.Total.Consumed, b.Total.Allocated, "", b.Total.Usage) + table.AddRow("BUDGET", "", "", b.Limit, "") + table.AddRow("UNALLOCATED", "", "", b.Total.Unallocated, "") + fmt.Fprint(writer, table) + return nil +} + +var newAPIClient = newAPIClientImpl + +func newAPIClientImpl(c *showBudgetCommand) (APIClient, error) { + root, err := c.NewControllerAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + return modelmanager.NewClient(root), nil +} + +type APIClient interface { + ModelInfo(tags []names.ModelTag) ([]params.ModelInfoResult, error) +} + +var newBudgetAPIClient = newBudgetAPIClientImpl + +func newBudgetAPIClientImpl(c *httpbakery.Client) (budgetAPIClient, error) { + client := api.NewClient(c) + return client, nil +} + +type budgetAPIClient interface { + GetBudget(string) (*wireformat.BudgetWithAllocations, error) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/showbudget/show_budget_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/showbudget/show_budget_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/showbudget/show_budget_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/showbudget/show_budget_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,202 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details.s + +package showbudget_test + +import ( + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/romulus/wireformat/budget" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/juju/romulus/showbudget" + coretesting "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&showBudgetSuite{}) + +type showBudgetSuite struct { + coretesting.FakeJujuXDGDataHomeSuite + stub *testing.Stub + mockBudgetAPI *mockBudgetAPI + mockAPI *mockAPI +} + +func (s *showBudgetSuite) SetUpTest(c *gc.C) { + s.CleanupSuite.SetUpTest(c) + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.stub = &testing.Stub{} + s.mockBudgetAPI = &mockBudgetAPI{s.stub} + s.mockAPI = &mockAPI{s.stub} + s.PatchValue(showbudget.NewBudgetAPIClient, showbudget.BudgetAPIClientFnc(s.mockBudgetAPI)) + s.PatchValue(showbudget.NewAPIClient, showbudget.NewAPIClientFnc(s.mockAPI)) +} + +func (s *showBudgetSuite) TestShowBudgetCommand(c *gc.C) { + tests := []struct { + about string + args []string + err string + budget string + apierr string + resolveerr string + output string + }{{ + about: "missing argument", + err: `missing arguments`, + }, { + about: "unknown arguments", + args: []string{"my-special-budget", "extra", "arguments"}, + err: `unrecognized args: \["extra" "arguments"\]`, + }, { + about: "api error", + args: []string{"personal"}, + apierr: "well, this is embarrassing", + err: "failed to retrieve the budget: well, this is embarrassing", + }, { + about: "all ok", + args: []string{"personal"}, + budget: "personal", + output: "" + + "MODEL \tSERVICES \tSPENT\tALLOCATED\tBY \tUSAGE\n" + + "model.joe \tmysql \t 200\t 1200\tuser.joe \t 42%\n" + + " \twordpress\t 300\t \t \n" + + "model.jess \tlandscape\t 600\t 1000\tuser.jess\t 60%\n" + + "uuid3 \tmysql \t 10\t 100\tuser.bob \t 10%\n" + + " \t \t \t \t \n" + + "TOTAL \t \t 1110\t 2300\t \t 48%\n" + + "BUDGET \t \t \t 4000\t \n" + + "UNALLOCATED\t \t \t 1700\t \n", + }, { + about: "all ok", + args: []string{"personal"}, + budget: "personal", + resolveerr: "test error", + output: "" + + "MODEL \tSERVICES \tSPENT\tALLOCATED\tBY \tUSAGE\n" + + "uuid1 \tmysql \t 200\t 1200\tuser.joe \t 42%\n" + + " \twordpress\t 300\t \t \n" + + "uuid2 \tlandscape\t 600\t 1000\tuser.jess\t 60%\n" + + "uuid3 \tmysql \t 10\t 100\tuser.bob \t 10%\n" + + " \t \t \t \t \n" + + "TOTAL \t \t 1110\t 2300\t \t 48%\n" + + "BUDGET \t \t \t 4000\t \n" + + "UNALLOCATED\t \t \t 1700\t \n", + }, + } + + for i, test := range tests { + c.Logf("running test %d: %v", i, test.about) + s.mockAPI.ResetCalls() + + errs := []error{} + if test.apierr != "" { + errs = append(errs, errors.New(test.apierr)) + } else { + errs = append(errs, nil) + } + if test.resolveerr != "" { + errs = append(errs, errors.New(test.resolveerr)) + } else { + errs = append(errs, nil) + } + s.mockAPI.SetErrors(errs...) + + showBudget := showbudget.NewShowBudgetCommand() + + ctx, err := cmdtesting.RunCommand(c, showBudget, test.args...) + if test.err == "" { + c.Assert(err, jc.ErrorIsNil) + s.stub.CheckCalls(c, []testing.StubCall{ + {"GetBudget", []interface{}{test.budget}}, + {"ModelInfo", []interface{}{[]names.ModelTag{names.NewModelTag("uuid1"), names.NewModelTag("uuid2"), names.NewModelTag("uuid3")}}}, + }) + output := cmdtesting.Stdout(ctx) + c.Assert(output, gc.Equals, test.output) + } else { + c.Assert(err, gc.ErrorMatches, test.err) + } + } +} + +type mockAPI struct { + *testing.Stub +} + +func (api *mockAPI) ModelInfo(tags []names.ModelTag) ([]params.ModelInfoResult, error) { + api.AddCall("ModelInfo", tags) + return []params.ModelInfoResult{{ + Result: ¶ms.ModelInfo{ + Name: "model.jess", + UUID: "uuid2", + }, + }, { + Result: ¶ms.ModelInfo{ + Name: "model.joe", + UUID: "uuid1", + }, + }, { + Error: ¶ms.Error{ + Message: "not found", + }, + }, + }, api.NextErr() +} + +type mockBudgetAPI struct { + *testing.Stub +} + +func (api *mockBudgetAPI) GetBudget(name string) (*budget.BudgetWithAllocations, error) { + api.AddCall("GetBudget", name) + return &budget.BudgetWithAllocations{ + Limit: "4000", + Total: budget.BudgetTotals{ + Allocated: "2300", + Unallocated: "1700", + Available: "1190", + Consumed: "1110", + Usage: "48%", + }, + Allocations: []budget.Allocation{{ + Owner: "user.joe", + Limit: "1200", + Consumed: "500", + Usage: "42%", + Model: "uuid1", + Services: map[string]budget.ServiceAllocation{ + "wordpress": budget.ServiceAllocation{ + Consumed: "300", + }, + "mysql": budget.ServiceAllocation{ + Consumed: "200", + }, + }, + }, { + Owner: "user.jess", + Limit: "1000", + Consumed: "600", + Usage: "60%", + Model: "uuid2", + Services: map[string]budget.ServiceAllocation{ + "landscape": budget.ServiceAllocation{ + Consumed: "600", + }, + }, + }, { + Owner: "user.bob", + Limit: "100", + Consumed: "10", + Usage: "10%", + Model: "uuid3", + Services: map[string]budget.ServiceAllocation{ + "mysql": budget.ServiceAllocation{ + Consumed: "10", + }, + }, + }}}, api.NextErr() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,16 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package updateallocation + +import ( + "github.com/juju/cmd" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/jujuclient" +) + +func NewUpdateAllocateCommandForTest(api apiClient, store jujuclient.ClientStore) cmd.Command { + c := &updateAllocationCommand{api: api} + c.SetClientStore(store) + return modelcmd.Wrap(c) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package updateallocation_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/updateallocation.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/updateallocation.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/updateallocation.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/updateallocation.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,101 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package updateallocation defines the command used to update allocations. +package updateallocation + +import ( + "fmt" + "strconv" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/juju/cmd/modelcmd" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + api "github.com/juju/romulus/api/budget" +) + +type updateAllocationCommand struct { + modelcmd.ModelCommandBase + api apiClient + Name string + Value string +} + +// NewUpdateAllocationCommand returns a new updateAllocationCommand. +func NewUpdateAllocationCommand() modelcmd.ModelCommand { + return &updateAllocationCommand{} +} + +func (c *updateAllocationCommand) newAPIClient(bakery *httpbakery.Client) (apiClient, error) { + if c.api != nil { + return c.api, nil + } + c.api = api.NewClient(bakery) + return c.api, nil +} + +type apiClient interface { + UpdateAllocation(string, string, string) (string, error) +} + +const doc = ` +Updates an existing allocation on an application. + +Examples: + # Sets the allocation for the wordpress application to 10. + juju update-allocation wordpress 10 +` + +// Info implements cmd.Command.Info. +func (c *updateAllocationCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "update-allocation", + Args: " ", + Purpose: "Update an allocation.", + Doc: doc, + } +} + +// Init implements cmd.Command.Init. +func (c *updateAllocationCommand) Init(args []string) error { + if len(args) < 2 { + return errors.New("application and value required") + } + c.Name, c.Value = args[0], args[1] + if _, err := strconv.ParseInt(c.Value, 10, 32); err != nil { + return errors.New("value needs to be a whole number") + } + return cmd.CheckEmpty(args[2:]) +} + +func (c *updateAllocationCommand) modelUUID() (string, error) { + model, err := c.ClientStore().ModelByName(c.ControllerName(), c.ModelName()) + if err != nil { + return "", errors.Trace(err) + } + return model.ModelUUID, nil +} + +// Run implements cmd.Command.Run and contains most of the setbudget logic. +func (c *updateAllocationCommand) Run(ctx *cmd.Context) error { + modelUUID, err := c.modelUUID() + if err != nil { + return errors.Annotate(err, "failed to get model uuid") + } + client, err := c.BakeryClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := c.newAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + resp, err := api.UpdateAllocation(modelUUID, c.Name, c.Value) + if err != nil { + return errors.Annotate(err, "failed to update the allocation") + } + fmt.Fprintln(ctx.Stdout, resp) + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/updateallocation_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/updateallocation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/updateallocation_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/romulus/updateallocation/updateallocation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,118 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package updateallocation_test + +import ( + "github.com/juju/cmd" + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/romulus/updateallocation" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/jujuclient/jujuclienttesting" + coretesting "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&updateAllocationSuite{}) + +type updateAllocationSuite struct { + coretesting.FakeJujuXDGDataHomeSuite + stub *testing.Stub + mockAPI *mockapi + store jujuclient.ClientStore +} + +func (s *updateAllocationSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.store = &jujuclienttesting.MemStore{ + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + Models: map[string]*jujuclient.ControllerModels{ + "controller": { + Models: map[string]jujuclient.ModelDetails{ + "admin/model": {"model-uuid"}, + }, + CurrentModel: "admin/model", + }, + }, + Accounts: map[string]jujuclient.AccountDetails{ + "controller": { + User: "admin", + }, + }, + } + s.stub = &testing.Stub{} + s.mockAPI = newMockAPI(s.stub) +} + +func (s *updateAllocationSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { + updateAlloc := updateallocation.NewUpdateAllocateCommandForTest(s.mockAPI, s.store) + a := []string{"-m", "controller:model"} + a = append(a, args...) + return cmdtesting.RunCommand(c, updateAlloc, a...) +} + +func (s *updateAllocationSuite) TestUpdateAllocation(c *gc.C) { + s.mockAPI.resp = "name budget set to 5" + ctx, err := s.run(c, "name", "5") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "name budget set to 5\n") + s.mockAPI.CheckCall(c, 0, "UpdateAllocation", "model-uuid", "name", "5") +} + +func (s *updateAllocationSuite) TestUpdateAllocationAPIError(c *gc.C) { + s.stub.SetErrors(errors.New("something failed")) + _, err := s.run(c, "name", "5") + c.Assert(err, gc.ErrorMatches, "failed to update the allocation: something failed") + s.mockAPI.CheckCall(c, 0, "UpdateAllocation", "model-uuid", "name", "5") +} + +func (s *updateAllocationSuite) TestUpdateAllocationErrors(c *gc.C) { + tests := []struct { + about string + args []string + expectedError string + }{ + { + about: "value needs to be a number", + args: []string{"name", "badvalue"}, + expectedError: "value needs to be a whole number", + }, + { + about: "value is missing", + args: []string{"name"}, + expectedError: "application and value required", + }, + { + about: "no args", + args: []string{}, + expectedError: "application and value required", + }, + } + for i, test := range tests { + s.mockAPI.ResetCalls() + c.Logf("test %d: %s", i, test.about) + _, err := s.run(c, test.args...) + c.Check(err, gc.ErrorMatches, test.expectedError) + s.mockAPI.CheckNoCalls(c) + } +} + +func newMockAPI(s *testing.Stub) *mockapi { + return &mockapi{Stub: s} +} + +type mockapi struct { + *testing.Stub + resp string +} + +func (api *mockapi) UpdateAllocation(modelUUID, name, value string) (string, error) { + api.MethodCall(api, "UpdateAllocation", modelUUID, name, value) + return api.resp, api.NextErr() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/setmeterstatus/setmeterstatus.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/setmeterstatus/setmeterstatus.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/setmeterstatus/setmeterstatus.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/setmeterstatus/setmeterstatus.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,8 +8,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api/metricsdebug" "github.com/juju/juju/cmd/modelcmd" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/add.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/add.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/add.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/add.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,8 @@ "github.com/juju/errors" "github.com/juju/utils/set" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" ) @@ -67,6 +69,9 @@ if errors.IsNotSupported(err) { ctx.Infof("cannot add space %q: %v", c.Name, err) } + if params.IsCodeUnauthorized(err) { + common.PermissionsMessage(ctx.Stderr, "add a space") + } return errors.Annotatef(err, "cannot add space %q", c.Name) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/add_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/add_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/add_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/add_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/space" ) @@ -72,3 +73,15 @@ s.api.CheckCallNames(c, "AddSpace", "Close") s.api.CheckCall(c, 0, "AddSpace", "foo", s.Strings("10.1.2.0/24"), true) } + +func (s *AddSuite) TestRunUnauthorizedMentionsJujuGrant(c *gc.C) { + s.api.SetErrors(¶ms.Error{ + Message: "permission denied", + Code: params.CodeUnauthorized, + }) + + s.AssertRunFailsUnauthorized(c, + `*.juju grant.*`, + "foo", "10.1.2.0/24", + ) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/list.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/list.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,15 +5,18 @@ import ( "fmt" + "io" "net" + "sort" "strings" "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) // NewListCommand returns a command used to list spaces. @@ -49,11 +52,11 @@ // SetFlags is defined on the cmd.Command interface. func (c *listCommand) SetFlags(f *gnuflag.FlagSet) { c.SpaceCommandBase.SetFlags(f) - c.out.AddFlags(f, "yaml", map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, + "tabular": c.printTabular, }) - f.BoolVar(&c.Short, "short", false, "only display spaces.") } @@ -80,7 +83,7 @@ } if len(spaces) == 0 { ctx.Infof("no spaces to display") - return c.out.Write(ctx, nil) + return nil } if c.Short { @@ -135,6 +138,53 @@ }) } +// printTabular prints the list of spaces in tabular format +func (c *listCommand) printTabular(writer io.Writer, value interface{}) error { + tw := output.TabWriter(writer) + if c.Short { + list, ok := value.(formattedShortList) + if !ok { + return errors.New("unexpected value") + } + fmt.Fprintln(tw, "Space") + spaces := list.Spaces + sort.Strings(spaces) + for _, space := range spaces { + fmt.Fprintf(tw, "%v\n", space) + } + } else { + list, ok := value.(formattedList) + if !ok { + return errors.New("unexpected value") + } + + fmt.Fprintf(tw, "%s\t%s\n", "Space", "Subnets") + spaces := []string{} + for name, _ := range list.Spaces { + spaces = append(spaces, name) + } + sort.Strings(spaces) + for _, name := range spaces { + subnets := list.Spaces[name] + fmt.Fprintf(tw, "%s", name) + if len(subnets) == 0 { + fmt.Fprintf(tw, "\n") + continue + } + cidrs := []string{} + for subnet, _ := range subnets { + cidrs = append(cidrs, subnet) + } + sort.Strings(cidrs) + for _, cidr := range cidrs { + fmt.Fprintf(tw, "\t%v\n", cidr) + } + } + } + tw.Flush() + return nil +} + const ( typeUnknown = "unknown" typeIPv4 = "ipv4" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/list_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/list_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/list_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -42,17 +42,17 @@ about: "unrecognized arguments", args: s.Strings("foo"), expectErr: `unrecognized args: \["foo"\]`, - expectFormat: "yaml", + expectFormat: "tabular", }, { about: "invalid format", args: s.Strings("--format", "foo"), expectErr: `invalid value "foo" for flag --format: unknown format "foo"`, - expectFormat: "yaml", + expectFormat: "tabular", }, { about: "invalid format (value is case-sensitive)", args: s.Strings("--format", "JSON"), expectErr: `invalid value "JSON" for flag --format: unknown format "JSON"`, - expectFormat: "yaml", + expectFormat: "tabular", }, { about: "json format", args: s.Strings("--format", "json"), @@ -62,10 +62,14 @@ args: s.Strings("--format", "yaml"), expectFormat: "yaml", }, { + about: "tabular format", + args: s.Strings("--format", "tabular"), + expectFormat: "tabular", + }, { // --output and -o are tested separately in TestOutputFormats. about: "both --output and -o specified (latter overrides former)", args: s.Strings("--output", "foo", "-o", "bar"), - expectFormat: "yaml", + expectFormat: "tabular", }} { c.Logf("test #%d: %s", i, test.about) // Create a new instance of the subcommand for each test, but @@ -175,6 +179,19 @@ } `, "") + "\n" + expectedTabular := `Space Subnets +space1 2001:db8::/32 + invalid +space2 10.1.2.0/24 + 4.3.2.0/28 + +` + expectedShortTabular := `Space +space1 +space2 + +` + assertAPICalls := func() { // Verify the API calls and reset the recorded calls. s.api.CheckCallNames(c, "ListSpaces", "Close") @@ -239,10 +256,12 @@ expected string short bool }{ - {"", expectedYAML, false}, // default format is YAML + {"", expectedTabular, false}, // default format is tabular + {"tabular", expectedTabular, false}, {"yaml", expectedYAML, false}, {"json", expectedJSON, false}, - {"", expectedShortYAML, true}, // default format is YAML + {"", expectedShortTabular, true}, // default format is tabular + {"tabular", expectedShortTabular, true}, {"yaml", expectedShortYAML, true}, {"json", expectedShortJSON, true}, } { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package space_test import ( + "strings" stdtesting "testing" "github.com/juju/cmd" @@ -91,6 +92,15 @@ return err } +// AssertRunFailsUnauthoirzed is a shortcut for calling RunCommand with the +// passed args then asserting the error is as expected, finally returning the +// error. +func (s *BaseSpaceSuite) AssertRunFailsUnauthorized(c *gc.C, expectErr string, args ...string) error { + _, stderr, err := s.RunCommand(c, args...) + c.Assert(strings.Replace(stderr, "\n", " ", -1), gc.Matches, `.*juju grant.*`) + return err +} + // AssertRunFails is a shortcut for calling RunCommand with the // passed args then asserting the output is empty and the error is as // expected, finally returning the error. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/rename.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/rename.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/rename.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/rename.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,8 +8,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/modelcmd" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/update.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/update.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/space/update.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/space/update.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,6 @@ "github.com/juju/cmd" "github.com/juju/errors" "github.com/juju/utils/set" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/modelcmd" ) @@ -32,10 +31,6 @@ CIDRs) "leave" their current space and "enter" the one we're updating. ` -func (c *updateCommand) SetFlags(f *gnuflag.FlagSet) { - c.SpaceCommandBase.SetFlags(f) -} - // Info is defined on the cmd.Command interface. func (c *updateCommand) Info() *cmd.Info { return &cmd.Info{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/formatted.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/formatted.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/formatted.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/formatted.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,9 @@ import ( "encoding/json" + "fmt" + + "gopkg.in/juju/names.v2" "github.com/juju/juju/instance" "github.com/juju/juju/status" @@ -39,6 +42,7 @@ Err error `json:"-" yaml:",omitempty"` JujuStatus statusInfoContents `json:"juju-status,omitempty" yaml:"juju-status,omitempty"` DNSName string `json:"dns-name,omitempty" yaml:"dns-name,omitempty"` + IPAddresses []string `json:"ip-addresses,omitempty" yaml:"ip-addresses,omitempty"` InstanceId instance.Id `json:"instance-id,omitempty" yaml:"instance-id,omitempty"` MachineStatus statusInfoContents `json:"machine-status,omitempty" yaml:"machine-status,omitempty"` Series string `json:"series,omitempty" yaml:"series,omitempty"` @@ -111,6 +115,7 @@ JujuStatusInfo statusInfoContents `json:"juju-status,omitempty" yaml:"juju-status"` MeterStatus *meterStatus `json:"meter-status,omitempty" yaml:"meter-status,omitempty"` + Leader bool `json:"leader,omitempty" yaml:"leader,omitempty"` Charm string `json:"upgrading-from,omitempty" yaml:"upgrading-from,omitempty"` Machine string `json:"machine,omitempty" yaml:"machine,omitempty"` OpenedPorts []string `json:"open-ports,omitempty" yaml:"open-ports,omitempty"` @@ -118,6 +123,42 @@ Subordinates map[string]unitStatus `json:"subordinates,omitempty" yaml:"subordinates,omitempty"` } +func (s *formattedStatus) applicationScale(name string) (string, bool) { + // The current unit count are units that are either in Idle or Executing status. + // In other words, units that are active and available. + currentUnitCount := 0 + desiredUnitCount := 0 + + app := s.Applications[name] + match := func(u unitStatus) { + desiredUnitCount += 1 + switch u.JujuStatusInfo.Current { + case status.Executing, status.Idle: + currentUnitCount += 1 + } + } + // If the app is subordinate to other units, then this is a subordinate charm. + if len(app.SubordinateTo) > 0 { + for _, a := range s.Applications { + for _, u := range a.Units { + for sub, subStatus := range u.Subordinates { + if subAppName, _ := names.UnitApplication(sub); subAppName == name { + match(subStatus) + } + } + } + } + } else { + for _, u := range app.Units { + match(u) + } + } + if currentUnitCount == desiredUnitCount { + return fmt.Sprint(currentUnitCount), false + } + return fmt.Sprintf("%d/%d", currentUnitCount, desiredUnitCount), true +} + type statusInfoContents struct { Err error `json:"-" yaml:",omitempty"` Current status.Status `json:"current,omitempty" yaml:"current,omitempty"` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/formatter.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/formatter.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/formatter.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/formatter.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ "github.com/juju/utils/series" "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/common" @@ -41,15 +42,19 @@ return &sf } -func (sf *statusFormatter) format() formattedStatus { +func (sf *statusFormatter) format() (formattedStatus, error) { if sf.status == nil { - return formattedStatus{} + return formattedStatus{}, nil + } + cloudTag, err := names.ParseCloudTag(sf.status.Model.CloudTag) + if err != nil { + return formattedStatus{}, err } out := formattedStatus{ Model: modelStatus{ Name: sf.status.Model.Name, Controller: sf.controllerName, - Cloud: sf.status.Model.Cloud, + Cloud: cloudTag.Id(), CloudRegion: sf.status.Model.CloudRegion, Version: sf.status.Model.Version, AvailableVersion: sf.status.Model.AvailableVersion, @@ -64,7 +69,7 @@ for sn, s := range sf.status.Applications { out.Applications[sn] = sf.formatApplication(sn, s) } - return out + return out, nil } // MachineFormat takes stored model information (params.FullStatus) and formats machine status info. @@ -96,6 +101,7 @@ out = machineStatus{ JujuStatus: sf.getStatusInfoContents(machine.AgentStatus), DNSName: machine.DNSName, + IPAddresses: machine.IPAddresses, InstanceId: machine.InstanceId, MachineStatus: sf.getStatusInfoContents(machine.InstanceStatus), Series: machine.Series, @@ -202,6 +208,7 @@ PublicAddress: info.unit.PublicAddress, Charm: info.unit.Charm, Subordinates: make(map[string]unitStatus), + Leader: info.unit.Leader, } if ms, ok := info.meterStatuses[info.unitName]; ok { @@ -267,7 +274,7 @@ func (sf *statusFormatter) updateUnitStatusInfo(unit *params.UnitStatus, applicationName string) { // TODO(perrito66) add status validation. - if status.Status(unit.WorkloadStatus.Status) == status.StatusError { + if status.Status(unit.WorkloadStatus.Status) == status.Error { if relation, ok := sf.relations[getRelationIdFromData(unit)]; ok { // Append the details of the other endpoint on to the status info string. if ep, ok := findOtherEndpoint(relation.Endpoints, applicationName); ok { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/history.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/history.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/history.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/history.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,8 +11,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" @@ -20,6 +20,8 @@ "github.com/juju/juju/status" ) +// TODO(peritto666) - add tests + // NewStatusHistoryCommand returns a command that reports the history // of status changes for the specified unit. func NewStatusHistoryCommand() cmd.Command { @@ -56,7 +58,7 @@ func (c *statusHistoryCommand) Info() *cmd.Info { return &cmd.Info{ - Name: "status-history", + Name: "show-status-log", Args: "", Purpose: "Output past statuses for the specified entity.", Doc: statusHistoryDoc, @@ -64,6 +66,7 @@ } func (c *statusHistoryCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) f.StringVar(&c.outputContent, "type", "unit", "Type of statuses to be displayed [agent|workload|combined|machine|machineInstance|container|containerinstance]") f.IntVar(&c.backlogSize, "n", 0, "Returns the last N logs (cannot be combined with --days or --date)") f.IntVar(&c.backlogSizeDays, "days", 0, "Returns the logs for the past days (cannot be combined with -n or --date)") @@ -143,7 +146,7 @@ } tag = names.NewUnitTag(c.entityName) default: - if names.IsValidMachine(c.entityName) { + if !names.IsValidMachine(c.entityName) { return errors.Errorf("%q is not a valid name for a %s", c.entityName, kind) } tag = names.NewMachineTag(c.entityName) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/output_oneline.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/output_oneline.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/output_oneline.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/output_oneline.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,19 +4,19 @@ package status import ( - "bytes" "fmt" + "io" "strings" "github.com/juju/errors" "github.com/juju/utils" ) -// FormatOneline returns a brief list of units and their subordinates. +// FormatOneline writes a brief list of units and their subordinates. // Subordinates will be indented 2 spaces and listed under their // superiors. This format works with version 2 of the CLI. -func FormatOneline(value interface{}) ([]byte, error) { - return formatOneline(value, func(out *bytes.Buffer, format, uName string, u unitStatus, level int) { +func FormatOneline(writer io.Writer, value interface{}) error { + return formatOneline(writer, value, func(out io.Writer, format, uName string, u unitStatus, level int) { status := fmt.Sprintf( "agent:%s, workload:%s", u.JujuStatusInfo.Current, @@ -30,14 +30,13 @@ }) } -type onelinePrintf func(out *bytes.Buffer, format, uName string, u unitStatus, level int) +type onelinePrintf func(out io.Writer, format, uName string, u unitStatus, level int) -func formatOneline(value interface{}, printf onelinePrintf) ([]byte, error) { +func formatOneline(writer io.Writer, value interface{}, printf onelinePrintf) error { fs, valueConverted := value.(formattedStatus) if !valueConverted { - return nil, errors.Errorf("expected value of type %T, got %T", fs, value) + return errors.Errorf("expected value of type %T, got %T", fs, value) } - var out bytes.Buffer pprint := func(uName string, u unitStatus, level int) { var fmtPorts string @@ -45,7 +44,7 @@ fmtPorts = fmt.Sprintf(" %s", strings.Join(u.OpenedPorts, ", ")) } format := indent("\n", level*2, "- %s: %s (%v)"+fmtPorts) - printf(&out, format, uName, u, level) + printf(writer, format, uName, u, level) } for _, svcName := range utils.SortStringsNaturally(stringKeysFromMap(fs.Applications)) { @@ -57,5 +56,5 @@ } } - return out.Bytes(), nil + return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/output_summary.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/output_summary.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/output_summary.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/output_summary.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,18 +6,21 @@ import ( "bytes" "fmt" + "io" "net" "strings" - "text/tabwriter" + "github.com/juju/ansiterm" + "github.com/juju/ansiterm/tabwriter" "github.com/juju/errors" "github.com/juju/utils" "github.com/juju/utils/set" + "github.com/juju/juju/cmd/output" "github.com/juju/juju/status" ) -// FormatSummary returns a summary of the current environment +// FormatSummary writes a summary of the current environment // including the following information: // - Headers: // - All subnets the environment occupies. @@ -27,50 +30,50 @@ // - Units: Displays total #, and then # in each state. // - Applications: Displays total #, their names, and how many of each // are exposed. -func FormatSummary(value interface{}) ([]byte, error) { +func FormatSummary(writer io.Writer, value interface{}) error { fs, valueConverted := value.(formattedStatus) if !valueConverted { - return nil, errors.Errorf("expected value of type %T, got %T", fs, value) + return errors.Errorf("expected value of type %T, got %T", fs, value) } - f := newSummaryFormatter() + f := newSummaryFormatter(writer) stateToMachine := f.aggregateMachineStates(fs.Machines) svcExposure := f.aggregateServiceAndUnitStates(fs.Applications) p := f.delimitValuesWithTabs // Print everything out p("Running on subnets:", strings.Join(f.netStrings, ", ")) - p("Utilizing ports:", f.portsInColumnsOf(3)) + p(" Utilizing ports:", f.portsInColumnsOf(3)) f.tw.Flush() // Right align summary information - f.tw.Init(&f.out, 0, 2, 1, ' ', tabwriter.AlignRight) - p("# MACHINES:", fmt.Sprintf("(%d)", len(fs.Machines))) + f.tw.Init(writer, 0, 1, 2, ' ', tabwriter.AlignRight) + p("# Machines:", fmt.Sprintf("(%d)", len(fs.Machines))) f.printStateToCount(stateToMachine) p(" ") - p("# UNITS:", fmt.Sprintf("(%d)", f.numUnits)) + p("# Units:", fmt.Sprintf("(%d)", f.numUnits)) f.printStateToCount(f.stateToUnit) p(" ") - p("# APPLICATIONS:", fmt.Sprintf(" (%d)", len(fs.Applications))) + p("# Applications:", fmt.Sprintf("(%d)", len(fs.Applications))) for _, svcName := range utils.SortStringsNaturally(stringKeysFromMap(svcExposure)) { s := svcExposure[svcName] p(svcName, fmt.Sprintf("%d/%d\texposed", s[true], s[true]+s[false])) } f.tw.Flush() - return f.out.Bytes(), nil + return nil } -func newSummaryFormatter() *summaryFormatter { +func newSummaryFormatter(writer io.Writer) *summaryFormatter { f := &summaryFormatter{ ipAddrs: make([]net.IPNet, 0), netStrings: make([]string, 0), openPorts: set.NewStrings(), stateToUnit: make(map[status.Status]int), } - f.tw = tabwriter.NewWriter(&f.out, 0, 1, 1, ' ', 0) + f.tw = output.TabWriter(writer) return f } @@ -81,8 +84,7 @@ openPorts set.Strings // status -> count stateToUnit map[status.Status]int - tw *tabwriter.Writer - out bytes.Buffer + tw *ansiterm.TabWriter } func (f *summaryFormatter) delimitValuesWithTabs(values ...string) { @@ -161,7 +163,7 @@ f.resolveAndTrackIp(m.DNSName) if agentState := m.JujuStatus.Current; agentState == "" { - agentState = status.StatusPending + agentState = status.Pending } else { stateToMachine[agentState]++ } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/output_tabular.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/output_tabular.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/output_tabular.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/output_tabular.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,19 +4,19 @@ package status import ( - "bytes" "fmt" "io" "regexp" "sort" "strings" - "text/tabwriter" + "github.com/juju/ansiterm" "github.com/juju/errors" "github.com/juju/utils" "github.com/juju/utils/set" "gopkg.in/juju/charm.v6-unstable/hooks" + "github.com/juju/juju/cmd/output" "github.com/juju/juju/instance" "github.com/juju/juju/status" ) @@ -76,40 +76,25 @@ return r.relations[k] } -func printHelper(tw *tabwriter.Writer) func(...interface{}) { - return func(values ...interface{}) { - for i, v := range values { - if i != len(values)-1 { - fmt.Fprintf(tw, "%v\t", v) - } else { - fmt.Fprintf(tw, "%v", v) - } - } - fmt.Fprintln(tw) - } -} - -func getTabWriter(out io.Writer) *tabwriter.Writer { - padding := 2 - return tabwriter.NewWriter(out, 0, 1, padding, ' ', 0) -} - -// FormatTabular returns a tabular summary of machines, applications, and +// FormatTabular writes a tabular summary of machines, applications, and // units. Any subordinate items are indented by two spaces beneath // their superior. -func FormatTabular(value interface{}) ([]byte, error) { - const maxVersionWidth = 7 +func FormatTabular(writer io.Writer, forceColor bool, value interface{}) error { + const maxVersionWidth = 15 const ellipsis = "..." const truncatedWidth = maxVersionWidth - len(ellipsis) fs, valueConverted := value.(formattedStatus) if !valueConverted { - return nil, errors.Errorf("expected value of type %T, got %T", fs, value) + return errors.Errorf("expected value of type %T, got %T", fs, value) } - var out bytes.Buffer // To format things into columns. - tw := getTabWriter(&out) - p := printHelper(tw) + tw := output.TabWriter(writer) + if forceColor { + tw.SetColorCapable(forceColor) + } + w := output.Wrapper{tw} + p := w.Println outputHeaders := func(values ...interface{}) { p() p(values...) @@ -120,12 +105,14 @@ cloudRegion += "/" + fs.Model.CloudRegion } - header := []interface{}{"MODEL", "CONTROLLER", "CLOUD/REGION", "VERSION"} + header := []interface{}{"Model", "Controller", "Cloud/Region", "Version"} values := []interface{}{fs.Model.Name, fs.Model.Controller, cloudRegion, fs.Model.Version} - if fs.Model.AvailableVersion != "" { - header = append(header, "UPGRADE-AVAILABLE") - values = append(values, fs.Model.AvailableVersion) + message := getModelMessage(fs.Model) + if message != "" { + header = append(header, "Notes") + values = append(values, message) } + // The first set of headers don't use outputHeaders because it adds the blank line. p(header...) p(values...) @@ -133,7 +120,9 @@ units := make(map[string]unitStatus) metering := false relations := newRelationFormatter() - outputHeaders("APP", "VERSION", "STATUS", "EXPOSED", "ORIGIN", "CHARM", "REV", "OS") + outputHeaders("App", "Version", "Status", "Scale", "Charm", "Store", "Rev", "OS", "Notes") + tw.SetColumnAlignRight(3) + tw.SetColumnAlignRight(6) for _, appName := range utils.SortStringsNaturally(stringKeysFromMap(fs.Applications)) { app := fs.Applications[appName] version := app.Version @@ -141,14 +130,24 @@ if len(version) > maxVersionWidth { version = version[:truncatedWidth] + ellipsis } - p(appName, - version, - app.StatusInfo.Current, - fmt.Sprintf("%t", app.Exposed), + // Notes may well contain other things later. + notes := "" + if app.Exposed { + notes = "exposed" + } + w.Print(appName, version) + w.PrintStatus(app.StatusInfo.Current) + scale, warn := fs.applicationScale(appName) + if warn { + w.PrintColor(output.WarningHighlight, scale) + } else { + w.Print(scale) + } + p(app.CharmName, app.CharmOrigin, - app.CharmName, app.CharmRev, - app.OS) + app.OS, + notes) for un, u := range app.Units { units[un] = u @@ -156,7 +155,6 @@ metering = true } } - // Ensure that we pick a consistent name for peer relations. sortedRelTypes := make([]string, 0, len(app.Relations)) for relType := range app.Relations { @@ -172,15 +170,6 @@ } } - if relations.len() > 0 { - outputHeaders("RELATION", "PROVIDES", "CONSUMES", "TYPE") - for _, k := range relations.sorted() { - r := relations.get(k) - if r != nil { - p(r.relation, r.application1, r.application2, r.relationType()) - } - } - } pUnit := func(name string, u unitStatus, level int) { message := u.WorkloadStatusInfo.Message @@ -188,10 +177,13 @@ if agentDoing != "" { message = fmt.Sprintf("(%s) %s", agentDoing, message) } + if u.Leader { + name += "*" + } + w.Print(indent("", level*2, name)) + w.PrintStatus(u.WorkloadStatusInfo.Current) + w.PrintStatus(u.JujuStatusInfo.Current) p( - indent("", level*2, name), - u.WorkloadStatusInfo.Current, - u.JujuStatusInfo.Current, u.Machine, u.PublicAddress, strings.Join(u.OpenedPorts, ","), @@ -199,7 +191,7 @@ ) } - outputHeaders("UNIT", "WORKLOAD", "AGENT", "MACHINE", "PUBLIC-ADDRESS", "PORTS", "MESSAGE") + outputHeaders("Unit", "Workload", "Agent", "Machine", "Public address", "Ports", "Message") for _, name := range utils.SortStringsNaturally(stringKeysFromMap(units)) { u := units[name] pUnit(name, u, 0) @@ -208,7 +200,7 @@ } if metering { - outputHeaders("METER", "STATUS", "MESSAGE") + outputHeaders("Meter", "Status", "Message") for _, name := range utils.SortStringsNaturally(stringKeysFromMap(units)) { u := units[name] if u.MeterStatus != nil { @@ -217,38 +209,44 @@ } } - var pMachine func(machineStatus) - pMachine = func(m machineStatus) { - // We want to display availability zone so extract from hardware info". - hw, err := instance.ParseHardware(m.Hardware) - if err != nil { - logger.Warningf("invalid hardware info %s for machine %v", m.Hardware, m) - } - az := "" - if hw.AvailabilityZone != nil { - az = *hw.AvailabilityZone - } - p(m.Id, m.JujuStatus.Current, m.DNSName, m.InstanceId, m.Series, az) - for _, name := range utils.SortStringsNaturally(stringKeysFromMap(m.Containers)) { - pMachine(m.Containers[name]) + p() + printMachines(tw, fs.Machines) + + if relations.len() > 0 { + outputHeaders("Relation", "Provides", "Consumes", "Type") + for _, k := range relations.sorted() { + r := relations.get(k) + if r != nil { + p(r.relation, r.application1, r.application2, r.relationType()) + } } } - p() - printMachines(tw, fs.Machines) tw.Flush() - return out.Bytes(), nil + return nil +} + +func getModelMessage(model modelStatus) string { + // Select the most important message about the model (if any). + switch { + case model.Migration != "": + return "migrating: " + model.Migration + case model.AvailableVersion != "": + return "upgrade available: " + model.AvailableVersion + default: + return "" + } } -func printMachines(tw *tabwriter.Writer, machines map[string]machineStatus) { - p := printHelper(tw) - p("MACHINE", "STATE", "DNS", "INS-ID", "SERIES", "AZ") +func printMachines(tw *ansiterm.TabWriter, machines map[string]machineStatus) { + w := output.Wrapper{tw} + w.Println("Machine", "State", "DNS", "Inst id", "Series", "AZ") for _, name := range utils.SortStringsNaturally(stringKeysFromMap(machines)) { - printMachine(p, machines[name], "") + printMachine(w, machines[name]) } } -func printMachine(p func(...interface{}), m machineStatus, prefix string) { +func printMachine(w output.Wrapper, m machineStatus) { // We want to display availability zone so extract from hardware info". hw, err := instance.ParseHardware(m.Hardware) if err != nil { @@ -258,33 +256,35 @@ if hw.AvailabilityZone != nil { az = *hw.AvailabilityZone } - p(prefix+m.Id, m.JujuStatus.Current, m.DNSName, m.InstanceId, m.Series, az) + w.Print(m.Id) + w.PrintStatus(m.JujuStatus.Current) + w.Println(m.DNSName, m.InstanceId, m.Series, az) for _, name := range utils.SortStringsNaturally(stringKeysFromMap(m.Containers)) { - printMachine(p, m.Containers[name], prefix+" ") + printMachine(w, m.Containers[name]) } } -// FormatMachineTabular returns a tabular summary of machine -func FormatMachineTabular(value interface{}) ([]byte, error) { +// FormatMachineTabular writes a tabular summary of machine +func FormatMachineTabular(writer io.Writer, forceColor bool, value interface{}) error { fs, valueConverted := value.(formattedMachineStatus) if !valueConverted { - return nil, errors.Errorf("expected value of type %T, got %T", fs, value) + return errors.Errorf("expected value of type %T, got %T", fs, value) + } + tw := output.TabWriter(writer) + if forceColor { + tw.SetColorCapable(forceColor) } - var out bytes.Buffer - // To format things into columns. - tw := getTabWriter(&out) - printMachines(tw, fs.Machines) tw.Flush() - return out.Bytes(), nil + return nil } // agentDoing returns what hook or action, if any, // the agent is currently executing. // The hook name or action is extracted from the agent message. func agentDoing(agentStatus statusInfoContents) string { - if agentStatus.Current != status.StatusExecuting { + if agentStatus.Current != status.Executing { return "" } // First see if we can determine a hook name. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/status.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/status.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/status.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/status.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,13 +5,14 @@ import ( "fmt" + "io" "os" "strconv" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" - "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" @@ -37,54 +38,67 @@ patterns []string isoTime bool api statusAPI + + color bool } var usageSummary = ` -Displays the current status of Juju, applications, and units.`[1:] +Reports the current status of the model, machines, applications and units.`[1:] var usageDetails = ` -By default (without argument), the status of Juju and all applications and all -units will be displayed. -Application or unit names may be used as output filters (the '*' can be used -as a wildcard character). -In addition to matched applications and units, related machines, applications, and -units will also be displayed. If a subordinate unit is matched, then its -principal unit will be displayed. If a principal unit is matched, then all -of its subordinates will be displayed. -Explanation of the different formats: -- {short|line|oneline}: List units and their subordinates. For each - unit, the IP address and agent status are listed. -- summary: Displays the subnet(s) and port(s) the model utilises. - Also displays aggregate information about: - - MACHINES: total #, and # in each state. - - UNITS: total #, and # in each state. - - APPLICATIONS: total #, and # exposed of each application. -- tabular (default): Displays information in a tabular format in these sections: - - Machines: ID, STATE, DNS, INS-ID, SERIES, AZ - - Applications: NAME, EXPOSED, CHARM - - Units: ID, STATE, VERSION, MACHINE, PORTS, PUBLIC-ADDRESS - - Also displays subordinate units. -- yaml: Displays information on machines, applications, and units in yaml format. -Note: AZ above is the cloud region's availability zone. +By default (without argument), the status of the model, including all +applications and units will be output. + +Application or unit names may be used as output filters (the '*' can be used as +a wildcard character). In addition to matched applications and units, related +machines, applications, and units will also be displayed. If a subordinate unit +is matched, then its principal unit will be displayed. If a principal unit is +matched, then all of its subordinates will be displayed. + +The available output formats are: + +- tabular (default): Displays status in a tabular format with a separate table + for the model, machines, applications, relations (if any) and units. + Note: in this format, the AZ column refers to the cloud region's + availability zone. +- {short|line|oneline}: List units and their subordinates. For each unit, the IP + address and agent status are listed. +- summary: Displays the subnet(s) and port(s) the model utilises. Also displays + aggregate information about: + - Machines: total #, and # in each state. + - Units: total #, and # in each state. + - Applications: total #, and # exposed of each application. +- yaml: Displays information about the model, machines, applications, and units + in structured YAML format. +- json: Displays information about the model, machines, applications, and units + in structured JSON format. Examples: - juju status - juju status mysql - juju status nova-* + juju show-status + juju show-status mysql + juju show-status nova-* + +See also: + machines + show-model + show-status-log + storage ` func (c *statusCommand) Info() *cmd.Info { return &cmd.Info{ - Name: "status", + Name: "show-status", Args: "[filter pattern ...]", Purpose: usageSummary, Doc: usageDetails, - Aliases: []string{"show-status"}, + Aliases: []string{"status"}, } } func (c *statusCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) f.BoolVar(&c.isoTime, "utc", false, "Display time as UTC in RFC3339 format") + f.BoolVar(&c.color, "color", false, "Force use of ANSI color codes") defaultFormat := "tabular" @@ -94,7 +108,7 @@ "short": FormatOneline, "oneline": FormatOneline, "line": FormatOneline, - "tabular": FormatTabular, + "tabular": c.FormatTabular, "summary": FormatSummary, }) } @@ -115,12 +129,12 @@ return nil } -var newApiClientForStatus = func(c *statusCommand) (statusAPI, error) { +var newAPIClientForStatus = func(c *statusCommand) (statusAPI, error) { return c.NewAPIClient() } func (c *statusCommand) Run(ctx *cmd.Context) error { - apiclient, err := newApiClientForStatus(c) + apiclient, err := newAPIClientForStatus(c) if err != nil { return errors.Trace(err) } @@ -139,6 +153,13 @@ } formatter := newStatusFormatter(status, c.ControllerName(), c.isoTime) - formatted := formatter.format() + formatted, err := formatter.format() + if err != nil { + return err + } return c.out.Write(ctx, formatted) } + +func (c *statusCommand) FormatTabular(writer io.Writer, value interface{}) error { + return FormatTabular(writer, c.color, value) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/status_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/status_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/status/status_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/status/status_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -162,6 +162,7 @@ "name": "controller", "controller": "kontroll", "cloud": "dummy", + "region": "dummy-region", "version": "1.2.3", } @@ -170,14 +171,15 @@ "current": "started", "since": "01 Apr 15 01:23+10:00", }, - "dns-name": "controller-0.dns", - "instance-id": "controller-0", + "dns-name": "10.0.0.1", + "ip-addresses": []string{"10.0.0.1"}, + "instance-id": "controller-0", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", "controller-member-status": "adding-vote", } machine1 = M{ @@ -185,56 +187,60 @@ "current": "started", "since": "01 Apr 15 01:23+10:00", }, - "dns-name": "controller-1.dns", - "instance-id": "controller-1", + "dns-name": "10.0.1.1", + "ip-addresses": []string{"10.0.1.1"}, + "instance-id": "controller-1", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", } machine2 = M{ "juju-status": M{ "current": "started", "since": "01 Apr 15 01:23+10:00", }, - "dns-name": "controller-2.dns", - "instance-id": "controller-2", + "dns-name": "10.0.2.1", + "ip-addresses": []string{"10.0.2.1"}, + "instance-id": "controller-2", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", } machine3 = M{ "juju-status": M{ "current": "started", "since": "01 Apr 15 01:23+10:00", }, - "dns-name": "controller-3.dns", - "instance-id": "controller-3", + "dns-name": "10.0.3.1", + "ip-addresses": []string{"10.0.3.1"}, + "instance-id": "controller-3", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", } machine4 = M{ "juju-status": M{ "current": "started", "since": "01 Apr 15 01:23+10:00", }, - "dns-name": "controller-4.dns", - "instance-id": "controller-4", + "dns-name": "10.0.4.1", + "ip-addresses": []string{"10.0.4.1"}, + "instance-id": "controller-4", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", } machine1WithContainers = M{ "juju-status": M{ @@ -253,8 +259,9 @@ "current": "started", "since": "01 Apr 15 01:23+10:00", }, - "dns-name": "controller-3.dns", - "instance-id": "controller-3", + "dns-name": "10.0.3.1", + "ip-addresses": []string{"10.0.3.1"}, + "instance-id": "controller-3", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", @@ -262,8 +269,9 @@ "series": "quantal", }, }, - "dns-name": "controller-2.dns", - "instance-id": "controller-2", + "dns-name": "10.0.2.1", + "ip-addresses": []string{"10.0.2.1"}, + "instance-id": "controller-2", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", @@ -283,40 +291,45 @@ "series": "quantal", }, }, - "dns-name": "controller-1.dns", - "instance-id": "controller-1", + "dns-name": "10.0.1.1", + "ip-addresses": []string{"10.0.1.1"}, + "instance-id": "controller-1", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", } unexposedService = dummyCharm(M{ "application-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, }) exposedService = dummyCharm(M{ "application-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "exposed": true, }) loggingCharm = M{ - "charm": "cs:quantal/logging-1", - "charm-origin": "jujucharms", - "charm-name": "logging", - "charm-rev": 1, - "series": "quantal", - "os": "ubuntu", - "exposed": true, - "application-status": M{}, + "charm": "cs:quantal/logging-1", + "charm-origin": "jujucharms", + "charm-name": "logging", + "charm-rev": 1, + "series": "quantal", + "os": "ubuntu", + "exposed": true, + "application-status": M{ + "current": "error", + "message": "somehow lost in all those logs", + "since": "01 Apr 15 01:23+10:00", + }, "relations": M{ "logging-directory": L{"wordpress"}, "info": L{"mysql"}, @@ -338,7 +351,7 @@ {"json", json.Marshal, json.Unmarshal}, } -var machineCons = constraints.MustParse("cpu-cores=2 mem=8G root-disk=8G") +var machineCons = constraints.MustParse("cores=2 mem=8G root-disk=8G") var statusTests = []testCase{ // Status tests @@ -371,8 +384,8 @@ startAliveMachine{"0"}, setAddresses{"0", []network.Address{ - network.NewAddress("10.0.0.1"), - network.NewScopedAddress("controller-0.dns", network.ScopePublic), + network.NewAddress("10.0.0.2"), + network.NewScopedAddress("10.0.0.1", network.ScopePublic), }}, expect{ "simulate the PA starting an instance in response to the state change", @@ -384,14 +397,15 @@ "current": "pending", "since": "01 Apr 15 01:23+10:00", }, - "dns-name": "controller-0.dns", - "instance-id": "controller-0", + "dns-name": "10.0.0.1", + "ip-addresses": []string{"10.0.0.1", "10.0.0.2"}, + "instance-id": "controller-0", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", "controller-member-status": "adding-vote", }, }, @@ -399,13 +413,28 @@ }, }, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, expect{ "simulate the MA started and set the machine status", M{ "model": model, "machines": M{ - "0": machine0, + "0": M{ + "juju-status": M{ + "current": "started", + "since": "01 Apr 15 01:23+10:00", + }, + "dns-name": "10.0.0.1", + "ip-addresses": []string{"10.0.0.1", "10.0.0.2"}, + "instance-id": "controller-0", + "machine-status": M{ + "current": "pending", + "since": "01 Apr 15 01:23+10:00", + }, + "series": "quantal", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", + "controller-member-status": "adding-vote", + }, }, "applications": M{}, }, @@ -418,8 +447,9 @@ "model": model, "machines": M{ "0": M{ - "dns-name": "controller-0.dns", - "instance-id": "controller-0", + "dns-name": "10.0.0.1", + "ip-addresses": []string{"10.0.0.1", "10.0.0.2"}, + "instance-id": "controller-0", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", @@ -430,7 +460,7 @@ "version": "1.2.3", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", "controller-member-status": "adding-vote", }, }, @@ -442,11 +472,11 @@ "instance with different hardware characteristics", addMachine{machineId: "0", cons: machineCons, job: state.JobManageModel}, setAddresses{"0", []network.Address{ - network.NewAddress("10.0.0.1"), - network.NewScopedAddress("controller-0.dns", network.ScopePublic), + network.NewAddress("10.0.0.2"), + network.NewScopedAddress("10.0.0.1", network.ScopePublic), }}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, expect{ "machine 0 has specific hardware characteristics", M{ @@ -457,14 +487,15 @@ "current": "started", "since": "01 Apr 15 01:23+10:00", }, - "dns-name": "controller-0.dns", - "instance-id": "controller-0", + "dns-name": "10.0.0.1", + "ip-addresses": []string{"10.0.0.1", "10.0.0.2"}, + "instance-id": "controller-0", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=2 mem=8192M root-disk=8192M", + "hardware": "arch=amd64 cores=2 mem=8192M root-disk=8192M", "controller-member-status": "adding-vote", }, }, @@ -476,7 +507,7 @@ "instance without addresses", addMachine{machineId: "0", cons: machineCons, job: state.JobManageModel}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, expect{ "machine 0 has no dns-name", M{ @@ -493,7 +524,7 @@ "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=2 mem=8192M root-disk=8192M", + "hardware": "arch=amd64 cores=2 mem=8192M root-disk=8192M", "controller-member-status": "adding-vote", }, }, @@ -545,7 +576,7 @@ "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", "controller-member-status": "adding-vote", }, }, @@ -557,9 +588,9 @@ "add two services and expose one, then add 2 more machines and some units", // step 0 addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addCharm{"dummy"}, addService{name: "dummy-application", charm: "dummy"}, addService{name: "exposed-application", charm: "dummy"}, @@ -595,13 +626,13 @@ // step 10 addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addMachine{machineId: "2", job: state.JobHostUnits}, - setAddresses{"2", network.NewAddresses("controller-2.dns")}, + setAddresses{"2", network.NewAddresses("10.0.2.1")}, startAliveMachine{"2"}, - setMachineStatus{"2", status.StatusStarted, ""}, + setMachineStatus{"2", status.Started, ""}, expect{ "two more machines added", M{ @@ -621,7 +652,7 @@ // step 19 addAliveUnit{"dummy-application", "1"}, addAliveUnit{"exposed-application", "2"}, - setAgentStatus{"exposed-application/0", status.StatusError, "You Require More Vespene Gas", nil}, + setAgentStatus{"exposed-application/0", status.Error, "You Require More Vespene Gas", nil}, // Open multiple ports with different protocols, // ensure they're sorted on protocol, then number. openUnitPort{"exposed-application/0", "udp", 10}, @@ -631,8 +662,8 @@ // Simulate some status with no info, while the agent is down. // Status used to be down, we no longer support said state. // now is one of: pending, started, error. - setUnitStatus{"dummy-application/0", status.StatusTerminated, "", nil}, - setAgentStatus{"dummy-application/0", status.StatusIdle, "", nil}, + setUnitStatus{"dummy-application/0", status.Terminated, "", nil}, + setAgentStatus{"dummy-application/0", status.Idle, "", nil}, expect{ "add two units, one alive (in error state), one started", @@ -666,7 +697,7 @@ "open-ports": L{ "2/tcp", "3/tcp", "2/udp", "10/udp", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", }, }, }), @@ -686,7 +717,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -698,12 +729,12 @@ addMachine{machineId: "3", job: state.JobHostUnits}, startMachine{"3"}, // Simulate some status with info, while the agent is down. - setAddresses{"3", network.NewAddresses("controller-3.dns")}, - setMachineStatus{"3", status.StatusStopped, "Really?"}, + setAddresses{"3", network.NewAddresses("10.0.3.1")}, + setMachineStatus{"3", status.Stopped, "Really?"}, addMachine{machineId: "4", job: state.JobHostUnits}, - setAddresses{"4", network.NewAddresses("controller-4.dns")}, + setAddresses{"4", network.NewAddresses("10.0.4.1")}, startAliveMachine{"4"}, - setMachineStatus{"4", status.StatusError, "Beware the red toys"}, + setMachineStatus{"4", status.Error, "Beware the red toys"}, ensureDyingUnit{"dummy-application/0"}, addMachine{machineId: "5", job: state.JobHostUnits}, ensureDeadMachine{"5"}, @@ -716,8 +747,9 @@ "1": machine1, "2": machine2, "3": M{ - "dns-name": "controller-3.dns", - "instance-id": "controller-3", + "dns-name": "10.0.3.1", + "ip-addresses": []string{"10.0.3.1"}, + "instance-id": "controller-3", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", @@ -728,11 +760,12 @@ "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", }, "4": M{ - "dns-name": "controller-4.dns", - "instance-id": "controller-4", + "dns-name": "10.0.4.1", + "ip-addresses": []string{"10.0.4.1"}, + "instance-id": "controller-4", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", @@ -743,7 +776,7 @@ "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", }, "5": M{ "juju-status": M{ @@ -782,7 +815,7 @@ "open-ports": L{ "2/tcp", "3/tcp", "2/udp", "10/udp", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", }, }, }), @@ -802,7 +835,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -836,7 +869,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -874,7 +907,7 @@ "open-ports": L{ "2/tcp", "3/tcp", "2/udp", "10/udp", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", }, }, }), @@ -906,7 +939,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -944,7 +977,7 @@ "open-ports": L{ "2/tcp", "3/tcp", "2/udp", "10/udp", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", }, }, }), @@ -977,7 +1010,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -1003,7 +1036,7 @@ "open-ports": L{ "2/tcp", "3/tcp", "2/udp", "10/udp", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", }, }, }), @@ -1014,14 +1047,14 @@ test( // 5 "a unit with a hook relation error", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addCharm{"wordpress"}, addService{name: "wordpress", charm: "wordpress"}, @@ -1033,7 +1066,7 @@ relateServices{"wordpress", "mysql"}, - setAgentStatus{"wordpress/0", status.StatusError, + setAgentStatus{"wordpress/0", status.Error, "hook failed: some-relation-changed", map[string]interface{}{"relation-id": 0}}, @@ -1067,7 +1100,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -1076,23 +1109,23 @@ "server": L{"wordpress"}, }, "application-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "units": M{ "mysql/0": M{ "machine": "1", "workload-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "juju-status": M{ "current": "allocating", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -1103,14 +1136,14 @@ test( // 6 "a unit with a hook relation error when the agent is down", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addCharm{"wordpress"}, addService{name: "wordpress", charm: "wordpress"}, @@ -1122,7 +1155,7 @@ relateServices{"wordpress", "mysql"}, - setAgentStatus{"wordpress/0", status.StatusError, + setAgentStatus{"wordpress/0", status.Error, "hook failed: some-relation-changed", map[string]interface{}{"relation-id": 0}}, @@ -1156,7 +1189,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -1165,23 +1198,23 @@ "server": L{"wordpress"}, }, "application-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "units": M{ "mysql/0": M{ "machine": "1", "workload-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "juju-status": M{ "current": "allocating", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -1219,16 +1252,16 @@ "dummy-application": dummyCharm(M{ "life": "dying", "application-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "units": M{ "dummy-application/0": M{ "machine": "0", "workload-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "juju-status": M{ @@ -1248,10 +1281,10 @@ addService{name: "dummy-application", charm: "dummy"}, addMachine{machineId: "0", job: state.JobHostUnits}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addUnit{"dummy-application", "0"}, - setAgentStatus{"dummy-application/0", status.StatusIdle, "", nil}, - setUnitStatus{"dummy-application/0", status.StatusActive, "", nil}, + setAgentStatus{"dummy-application/0", status.Idle, "", nil}, + setUnitStatus{"dummy-application/0", status.Active, "", nil}, expect{ "unit shows that agent is lost", M{ @@ -1269,7 +1302,7 @@ }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", }, }, "applications": M{ @@ -1283,7 +1316,7 @@ "machine": "0", "workload-status": M{ "current": "unknown", - "message": "agent is lost, sorry! See 'juju status-history dummy-application/0'", + "message": "agent lost, see 'juju show-status-log dummy-application/0'", "since": "01 Apr 15 01:23+10:00", }, "juju-status": M{ @@ -1303,9 +1336,9 @@ test( // 9 "complex scenario with multiple related services", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addCharm{"wordpress"}, addCharm{"mysql"}, addCharm{"varnish"}, @@ -1313,37 +1346,37 @@ addService{name: "project", charm: "wordpress"}, setServiceExposed{"project", true}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addAliveUnit{"project", "1"}, - setAgentStatus{"project/0", status.StatusIdle, "", nil}, - setUnitStatus{"project/0", status.StatusActive, "", nil}, + setAgentStatus{"project/0", status.Idle, "", nil}, + setUnitStatus{"project/0", status.Active, "", nil}, addService{name: "mysql", charm: "mysql"}, setServiceExposed{"mysql", true}, addMachine{machineId: "2", job: state.JobHostUnits}, - setAddresses{"2", network.NewAddresses("controller-2.dns")}, + setAddresses{"2", network.NewAddresses("10.0.2.1")}, startAliveMachine{"2"}, - setMachineStatus{"2", status.StatusStarted, ""}, + setMachineStatus{"2", status.Started, ""}, addAliveUnit{"mysql", "2"}, - setAgentStatus{"mysql/0", status.StatusIdle, "", nil}, - setUnitStatus{"mysql/0", status.StatusActive, "", nil}, + setAgentStatus{"mysql/0", status.Idle, "", nil}, + setUnitStatus{"mysql/0", status.Active, "", nil}, addService{name: "varnish", charm: "varnish"}, setServiceExposed{"varnish", true}, addMachine{machineId: "3", job: state.JobHostUnits}, - setAddresses{"3", network.NewAddresses("controller-3.dns")}, + setAddresses{"3", network.NewAddresses("10.0.3.1")}, startAliveMachine{"3"}, - setMachineStatus{"3", status.StatusStarted, ""}, + setMachineStatus{"3", status.Started, ""}, addAliveUnit{"varnish", "3"}, addService{name: "private", charm: "wordpress"}, setServiceExposed{"private", true}, addMachine{machineId: "4", job: state.JobHostUnits}, - setAddresses{"4", network.NewAddresses("controller-4.dns")}, + setAddresses{"4", network.NewAddresses("10.0.4.1")}, startAliveMachine{"4"}, - setMachineStatus{"4", status.StatusStarted, ""}, + setMachineStatus{"4", status.Started, ""}, addAliveUnit{"private", "4"}, relateServices{"project", "mysql"}, @@ -1379,7 +1412,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, "relations": M{ @@ -1404,7 +1437,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", }, }, "relations": M{ @@ -1420,23 +1453,23 @@ "os": "ubuntu", "exposed": true, "application-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "units": M{ "varnish/0": M{ "machine": "3", "workload-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "juju-status": M{ "current": "allocating", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-3.dns", + "public-address": "10.0.3.1", }, }, "relations": M{ @@ -1446,23 +1479,23 @@ "private": wordpressCharm(M{ "exposed": true, "application-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "units": M{ "private/0": M{ "machine": "4", "workload-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "juju-status": M{ "current": "allocating", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-4.dns", + "public-address": "10.0.4.1", }, }, "relations": M{ @@ -1474,37 +1507,38 @@ }, ), test( // 10 - "simple peer scenario", + "simple peer scenario with leader", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addCharm{"riak"}, addCharm{"wordpress"}, addService{name: "riak", charm: "riak"}, setServiceExposed{"riak", true}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addAliveUnit{"riak", "1"}, - setAgentStatus{"riak/0", status.StatusIdle, "", nil}, - setUnitStatus{"riak/0", status.StatusActive, "", nil}, + setAgentStatus{"riak/0", status.Idle, "", nil}, + setUnitStatus{"riak/0", status.Active, "", nil}, addMachine{machineId: "2", job: state.JobHostUnits}, - setAddresses{"2", network.NewAddresses("controller-2.dns")}, + setAddresses{"2", network.NewAddresses("10.0.2.1")}, startAliveMachine{"2"}, - setMachineStatus{"2", status.StatusStarted, ""}, + setMachineStatus{"2", status.Started, ""}, addAliveUnit{"riak", "2"}, - setAgentStatus{"riak/1", status.StatusIdle, "", nil}, - setUnitStatus{"riak/1", status.StatusActive, "", nil}, + setAgentStatus{"riak/1", status.Idle, "", nil}, + setUnitStatus{"riak/1", status.Active, "", nil}, addMachine{machineId: "3", job: state.JobHostUnits}, - setAddresses{"3", network.NewAddresses("controller-3.dns")}, + setAddresses{"3", network.NewAddresses("10.0.3.1")}, startAliveMachine{"3"}, - setMachineStatus{"3", status.StatusStarted, ""}, + setMachineStatus{"3", status.Started, ""}, addAliveUnit{"riak", "3"}, - setAgentStatus{"riak/2", status.StatusIdle, "", nil}, - setUnitStatus{"riak/2", status.StatusActive, "", nil}, + setAgentStatus{"riak/2", status.Idle, "", nil}, + setUnitStatus{"riak/2", status.Active, "", nil}, + setUnitAsLeader{"riak/1"}, expect{ "multiples related peer units", @@ -1540,7 +1574,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, "riak/1": M{ "machine": "2", @@ -1552,7 +1586,8 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", + "leader": true, }, "riak/2": M{ "machine": "3", @@ -1564,7 +1599,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-3.dns", + "public-address": "10.0.3.1", }, }, "relations": M{ @@ -1578,11 +1613,11 @@ // Subordinate tests test( // 11 - "one application with one subordinate application", + "one application with one subordinate application and leader", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addCharm{"wordpress"}, addCharm{"mysql"}, addCharm{"logging"}, @@ -1590,22 +1625,22 @@ addService{name: "wordpress", charm: "wordpress"}, setServiceExposed{"wordpress", true}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addAliveUnit{"wordpress", "1"}, - setAgentStatus{"wordpress/0", status.StatusIdle, "", nil}, - setUnitStatus{"wordpress/0", status.StatusActive, "", nil}, + setAgentStatus{"wordpress/0", status.Idle, "", nil}, + setUnitStatus{"wordpress/0", status.Active, "", nil}, addService{name: "mysql", charm: "mysql"}, setServiceExposed{"mysql", true}, addMachine{machineId: "2", job: state.JobHostUnits}, - setAddresses{"2", network.NewAddresses("controller-2.dns")}, + setAddresses{"2", network.NewAddresses("10.0.2.1")}, startAliveMachine{"2"}, - setMachineStatus{"2", status.StatusStarted, ""}, + setMachineStatus{"2", status.Started, ""}, addAliveUnit{"mysql", "2"}, - setAgentStatus{"mysql/0", status.StatusIdle, "", nil}, - setUnitStatus{"mysql/0", status.StatusActive, "", nil}, + setAgentStatus{"mysql/0", status.Idle, "", nil}, + setUnitStatus{"mysql/0", status.Active, "", nil}, addService{name: "logging", charm: "logging"}, setServiceExposed{"logging", true}, @@ -1618,9 +1653,13 @@ addSubordinate{"mysql/0", "logging"}, setUnitsAlive{"logging"}, - setAgentStatus{"logging/0", status.StatusIdle, "", nil}, - setUnitStatus{"logging/0", status.StatusActive, "", nil}, - setAgentStatus{"logging/1", status.StatusError, "somehow lost in all those logs", nil}, + setAgentStatus{"logging/0", status.Idle, "", nil}, + setUnitStatus{"logging/0", status.Active, "", nil}, + setAgentStatus{"logging/1", status.Error, "somehow lost in all those logs", nil}, + + setUnitAsLeader{"mysql/0"}, + setUnitAsLeader{"logging/1"}, + setUnitAsLeader{"wordpress/0"}, expect{ "multiples related peer units", @@ -1659,10 +1698,11 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", + "leader": true, }, }, "relations": M{ @@ -1698,10 +1738,12 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", + "leader": true, }, }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", + "leader": true, }, }, "relations": M{ @@ -1752,10 +1794,11 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", + "leader": true, }, }, "relations": M{ @@ -1791,10 +1834,12 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", + "leader": true, }, }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", + "leader": true, }, }, "relations": M{ @@ -1809,7 +1854,7 @@ // scoped on wordpress/0 scopedExpect{ - "subordinates scoped on logging", + "subordinates scoped on wordpress", []string{"wordpress/0"}, M{ "model": model, @@ -1844,10 +1889,11 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", + "leader": true, }, }, "relations": M{ @@ -1864,37 +1910,37 @@ "machines with containers", // step 0 addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addCharm{"mysql"}, addService{name: "mysql", charm: "mysql"}, setServiceExposed{"mysql", true}, // step 7 addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addAliveUnit{"mysql", "1"}, - setAgentStatus{"mysql/0", status.StatusIdle, "", nil}, - setUnitStatus{"mysql/0", status.StatusActive, "", nil}, + setAgentStatus{"mysql/0", status.Idle, "", nil}, + setUnitStatus{"mysql/0", status.Active, "", nil}, // step 14: A container on machine 1. addContainer{"1", "1/lxd/0", state.JobHostUnits}, - setAddresses{"1/lxd/0", network.NewAddresses("controller-2.dns")}, + setAddresses{"1/lxd/0", network.NewAddresses("10.0.2.1")}, startAliveMachine{"1/lxd/0"}, - setMachineStatus{"1/lxd/0", status.StatusStarted, ""}, + setMachineStatus{"1/lxd/0", status.Started, ""}, addAliveUnit{"mysql", "1/lxd/0"}, - setAgentStatus{"mysql/1", status.StatusIdle, "", nil}, - setUnitStatus{"mysql/1", status.StatusActive, "", nil}, + setAgentStatus{"mysql/1", status.Idle, "", nil}, + setUnitStatus{"mysql/1", status.Active, "", nil}, addContainer{"1", "1/lxd/1", state.JobHostUnits}, // step 22: A nested container. addContainer{"1/lxd/0", "1/lxd/0/lxd/0", state.JobHostUnits}, - setAddresses{"1/lxd/0/lxd/0", network.NewAddresses("controller-3.dns")}, + setAddresses{"1/lxd/0/lxd/0", network.NewAddresses("10.0.3.1")}, startAliveMachine{"1/lxd/0/lxd/0"}, - setMachineStatus{"1/lxd/0/lxd/0", status.StatusStarted, ""}, + setMachineStatus{"1/lxd/0/lxd/0", status.Started, ""}, expect{ "machines with nested containers", @@ -1922,7 +1968,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, "mysql/1": M{ "machine": "1/lxd/0", @@ -1934,7 +1980,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", }, }, }), @@ -1960,8 +2006,9 @@ "current": "started", "since": "01 Apr 15 01:23+10:00", }, - "dns-name": "controller-2.dns", - "instance-id": "controller-2", + "dns-name": "10.0.2.1", + "ip-addresses": []string{"10.0.2.1"}, + "instance-id": "controller-2", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", @@ -1970,15 +2017,16 @@ "series": "quantal", }, }, - "dns-name": "controller-1.dns", - "instance-id": "controller-1", + "dns-name": "10.0.1.1", + "ip-addresses": []string{"10.0.1.1"}, + "instance-id": "controller-1", "machine-status": M{ "current": "pending", "since": "01 Apr 15 01:23+10:00", }, "series": "quantal", - "hardware": "arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M", + "hardware": "arch=amd64 cores=1 mem=1024M root-disk=8192M", }, }, "applications": M{ @@ -1999,7 +2047,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", }, }, }), @@ -2010,13 +2058,13 @@ test( // 13 "application with out of date charm", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addCharm{"mysql"}, addService{name: "mysql", charm: "mysql"}, setServiceExposed{"mysql", true}, @@ -2036,23 +2084,23 @@ "can-upgrade-to": "cs:quantal/mysql-23", "exposed": true, "application-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "units": M{ "mysql/0": M{ "machine": "1", "workload-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "juju-status": M{ "current": "allocating", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -2063,13 +2111,13 @@ test( // 14 "unit with out of date charm", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addCharm{"mysql"}, addService{name: "mysql", charm: "mysql"}, setServiceExposed{"mysql", true}, @@ -2107,7 +2155,7 @@ "since": "01 Apr 15 01:23+10:00", }, "upgrading-from": "cs:quantal/mysql-1", - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -2118,13 +2166,13 @@ test( // 15 "application and unit with out of date charms", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addCharm{"mysql"}, addService{name: "mysql", charm: "mysql"}, setServiceExposed{"mysql", true}, @@ -2164,7 +2212,7 @@ "since": "01 Apr 15 01:23+10:00", }, "upgrading-from": "cs:quantal/mysql-1", - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -2175,13 +2223,13 @@ test( // 16 "application with local charm not shown as out of date", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addCharm{"mysql"}, addService{name: "mysql", charm: "mysql"}, setServiceExposed{"mysql", true}, @@ -2220,7 +2268,7 @@ "since": "01 Apr 15 01:23+10:00", }, "upgrading-from": "cs:quantal/mysql-1", - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -2231,35 +2279,36 @@ test( // 17 "deploy two services; set meter statuses on one", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addMachine{machineId: "2", job: state.JobHostUnits}, - setAddresses{"2", network.NewAddresses("controller-2.dns")}, + setAddresses{"2", network.NewAddresses("10.0.2.1")}, startAliveMachine{"2"}, - setMachineStatus{"2", status.StatusStarted, ""}, + setMachineStatus{"2", status.Started, ""}, addMachine{machineId: "3", job: state.JobHostUnits}, - setAddresses{"3", network.NewAddresses("controller-3.dns")}, + setAddresses{"3", network.NewAddresses("10.0.3.1")}, startAliveMachine{"3"}, - setMachineStatus{"3", status.StatusStarted, ""}, + setMachineStatus{"3", status.Started, ""}, addMachine{machineId: "4", job: state.JobHostUnits}, - setAddresses{"4", network.NewAddresses("controller-4.dns")}, + setAddresses{"4", network.NewAddresses("10.0.4.1")}, startAliveMachine{"4"}, - setMachineStatus{"4", status.StatusStarted, ""}, + setMachineStatus{"4", status.Started, ""}, addCharm{"mysql"}, addService{name: "mysql", charm: "mysql"}, setServiceExposed{"mysql", true}, - addService{name: "servicewithmeterstatus", charm: "mysql"}, + addCharm{"metered"}, + addService{name: "servicewithmeterstatus", charm: "metered"}, addAliveUnit{"mysql", "1"}, addAliveUnit{"servicewithmeterstatus", "2"}, @@ -2268,14 +2317,14 @@ setServiceExposed{"mysql", true}, - setAgentStatus{"mysql/0", status.StatusIdle, "", nil}, - setUnitStatus{"mysql/0", status.StatusActive, "", nil}, - setAgentStatus{"servicewithmeterstatus/0", status.StatusIdle, "", nil}, - setUnitStatus{"servicewithmeterstatus/0", status.StatusActive, "", nil}, - setAgentStatus{"servicewithmeterstatus/1", status.StatusIdle, "", nil}, - setUnitStatus{"servicewithmeterstatus/1", status.StatusActive, "", nil}, - setAgentStatus{"servicewithmeterstatus/2", status.StatusIdle, "", nil}, - setUnitStatus{"servicewithmeterstatus/2", status.StatusActive, "", nil}, + setAgentStatus{"mysql/0", status.Idle, "", nil}, + setUnitStatus{"mysql/0", status.Active, "", nil}, + setAgentStatus{"servicewithmeterstatus/0", status.Idle, "", nil}, + setUnitStatus{"servicewithmeterstatus/0", status.Active, "", nil}, + setAgentStatus{"servicewithmeterstatus/1", status.Idle, "", nil}, + setUnitStatus{"servicewithmeterstatus/1", status.Active, "", nil}, + setAgentStatus{"servicewithmeterstatus/2", status.Idle, "", nil}, + setUnitStatus{"servicewithmeterstatus/2", status.Active, "", nil}, setUnitMeterStatus{"servicewithmeterstatus/1", "GREEN", "test green status"}, setUnitMeterStatus{"servicewithmeterstatus/2", "RED", "test red status"}, @@ -2309,12 +2358,12 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), - "servicewithmeterstatus": mysqlCharm(M{ + "servicewithmeterstatus": meteredCharm(M{ "application-status": M{ "current": "active", "since": "01 Apr 15 01:23+10:00", @@ -2330,7 +2379,7 @@ "current": "idle", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", }, "servicewithmeterstatus/1": M{ "machine": "3", @@ -2346,7 +2395,7 @@ "color": "green", "message": "test green status", }, - "public-address": "controller-3.dns", + "public-address": "10.0.3.1", }, "servicewithmeterstatus/2": M{ "machine": "4", @@ -2362,7 +2411,7 @@ "color": "red", "message": "test red status", }, - "public-address": "controller-4.dns", + "public-address": "10.0.4.1", }, }, }), @@ -2380,6 +2429,7 @@ "name": "controller", "controller": "kontroll", "cloud": "dummy", + "region": "dummy-region", "version": "1.2.3", "upgrade-available": "1.2.4", }, @@ -2391,17 +2441,17 @@ test( // 19 "consistent workload version", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addCharm{"mysql"}, addService{name: "mysql", charm: "mysql"}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addAliveUnit{"mysql", "1"}, setUnitWorkloadVersion{"mysql/0", "the best!"}, @@ -2417,23 +2467,23 @@ "mysql": mysqlCharm(M{ "version": "the best!", "application-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "units": M{ "mysql/0": M{ "machine": "1", "workload-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "juju-status": M{ "current": "allocating", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -2444,24 +2494,24 @@ test( // 20 "mixed workload version", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addCharm{"mysql"}, addService{name: "mysql", charm: "mysql"}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addAliveUnit{"mysql", "1"}, setUnitWorkloadVersion{"mysql/0", "the best!"}, addMachine{machineId: "2", job: state.JobHostUnits}, - setAddresses{"2", network.NewAddresses("controller-2.dns")}, + setAddresses{"2", network.NewAddresses("10.0.2.1")}, startAliveMachine{"2"}, - setMachineStatus{"2", status.StatusStarted, ""}, + setMachineStatus{"2", status.Started, ""}, addAliveUnit{"mysql", "2"}, setUnitWorkloadVersion{"mysql/1", "not as good"}, @@ -2478,36 +2528,36 @@ "mysql": mysqlCharm(M{ "version": "not as good", "application-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "units": M{ "mysql/0": M{ "machine": "1", "workload-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "juju-status": M{ "current": "allocating", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, "mysql/1": M{ "machine": "2", "workload-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "juju-status": M{ "current": "allocating", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-2.dns", + "public-address": "10.0.2.1", }, }, }), @@ -2515,6 +2565,77 @@ }, }, ), + test( // 21 + "instance with localhost addresses", + addMachine{machineId: "0", cons: machineCons, job: state.JobManageModel}, + setAddresses{"0", []network.Address{ + network.NewScopedAddress("127.0.0.1", network.ScopeMachineLocal), + network.NewScopedAddress("::1", network.ScopeMachineLocal), + network.NewScopedAddress("10.0.0.2", network.ScopeCloudLocal), + }}, + startAliveMachine{"0"}, + setMachineStatus{"0", status.Started, ""}, + expect{ + "machine 0 has localhost addresses that should not display", + M{ + "model": model, + "machines": M{ + "0": M{ + "juju-status": M{ + "current": "started", + "since": "01 Apr 15 01:23+10:00", + }, + "dns-name": "10.0.0.2", + "ip-addresses": []string{"10.0.0.2"}, + "instance-id": "controller-0", + "machine-status": M{ + "current": "pending", + "since": "01 Apr 15 01:23+10:00", + }, + "series": "quantal", + "hardware": "arch=amd64 cores=2 mem=8192M root-disk=8192M", + "controller-member-status": "adding-vote", + }, + }, + "applications": M{}, + }, + }, + ), + test( // 22 + "instance with IPv6 addresses", + addMachine{machineId: "0", cons: machineCons, job: state.JobManageModel}, + setAddresses{"0", []network.Address{ + network.NewScopedAddress("::1", network.ScopeMachineLocal), + network.NewScopedAddress("2001:db8::0:1", network.ScopeCloudLocal), + }}, + startAliveMachine{"0"}, + setMachineStatus{"0", status.Started, ""}, + expect{ + "machine 0 has an IPv6 address", + M{ + "model": model, + "machines": M{ + "0": M{ + "juju-status": M{ + "current": "started", + "since": "01 Apr 15 01:23+10:00", + }, + "dns-name": "2001:db8::0:1", + "ip-addresses": []string{"2001:db8::0:1"}, + "instance-id": "controller-0", + "machine-status": M{ + "current": "pending", + "since": "01 Apr 15 01:23+10:00", + }, + "series": "quantal", + "hardware": "arch=amd64 cores=2 mem=8192M root-disk=8192M", + "controller-member-status": "adding-vote", + }, + }, + "applications": M{}, + }, + }, + ), } func mysqlCharm(extras M) M { @@ -2533,6 +2654,22 @@ return charm } +func meteredCharm(extras M) M { + charm := M{ + "charm": "cs:quantal/metered-1", + "charm-origin": "jujucharms", + "charm-name": "metered", + "charm-rev": 1, + "series": "quantal", + "os": "ubuntu", + "exposed": false, + } + for key, value := range extras { + charm[key] = value + } + return charm +} + func dummyCharm(extras M) M { charm := M{ "charm": "cs:quantal/dummy-1", @@ -2632,7 +2769,7 @@ // lp:1558657 now := time.Now() s := status.StatusInfo{ - Status: status.StatusUnknown, + Status: status.Unknown, Message: "missing", Since: &now, } @@ -2869,6 +3006,17 @@ c.Assert(err, jc.ErrorIsNil) } +type setUnitAsLeader struct { + unitName string +} + +func (s setUnitAsLeader) step(c *gc.C, ctx *context) { + u, err := ctx.st.Unit(s.unitName) + c.Assert(err, jc.ErrorIsNil) + err = ctx.st.LeadershipClaimer().ClaimLeadership(u.ApplicationName(), u.Name(), time.Minute) + c.Assert(err, jc.ErrorIsNil) +} + type setUnitStatus struct { unitName string status status.Status @@ -2927,14 +3075,14 @@ // lp:1558657 now := time.Now() s := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "", Since: &now, } err = u.SetStatus(s) c.Assert(err, jc.ErrorIsNil) sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -3135,9 +3283,9 @@ type setToolsUpgradeAvailable struct{} func (ua setToolsUpgradeAvailable) step(c *gc.C, ctx *context) { - env, err := ctx.st.Model() + model, err := ctx.st.Model() c.Assert(err, jc.ErrorIsNil) - err = env.UpdateLatestToolsVersion(nextVersion) + err = model.UpdateLatestToolsVersion(nextVersion) c.Assert(err, jc.ErrorIsNil) } @@ -3156,44 +3304,24 @@ func (s *StatusSuite) TestMigrationInProgress(c *gc.C) { // This test isn't part of statusTests because migrations can't be // run on controller models. - - const hostedModelName = "hosted" - const statusText = "foo bar" - - f := factory.NewFactory(s.BackingState) - hostedSt := f.MakeModel(c, &factory.ModelParams{ - Name: hostedModelName, - }) - defer hostedSt.Close() - - mig, err := hostedSt.CreateModelMigration(state.ModelMigrationSpec{ - InitiatedBy: names.NewUserTag("admin"), - TargetInfo: migration.TargetInfo{ - ControllerTag: names.NewModelTag(utils.MustNewUUID().String()), - Addrs: []string{"1.2.3.4:5555", "4.3.2.1:6666"}, - CACert: "cert", - AuthTag: names.NewUserTag("user"), - Password: "password", - }, - }) - c.Assert(err, jc.ErrorIsNil) - err = mig.SetStatusMessage(statusText) - c.Assert(err, jc.ErrorIsNil) + st := s.setupMigrationTest(c) + defer st.Close() expected := M{ "model": M{ - "name": hostedModelName, + "name": "hosted", "controller": "kontroll", "cloud": "dummy", + "region": "dummy-region", "version": "1.2.3", - "migration": statusText, + "migration": "foo bar", }, "machines": M{}, "applications": M{}, } for _, format := range statusFormats { - code, stdout, stderr := runStatus(c, "-m", hostedModelName, "--format", format.name) + code, stdout, stderr := runStatus(c, "-m", "hosted", "--format", format.name) c.Check(code, gc.Equals, 0) c.Assert(stderr, gc.HasLen, 0, gc.Commentf("status failed: %s", stderr)) @@ -3210,18 +3338,92 @@ } } -type fakeApiClient struct { +func (s *StatusSuite) TestMigrationInProgressTabular(c *gc.C) { + expected := ` +Model Controller Cloud/Region Version Notes +hosted kontroll dummy/dummy-region 1.2.3 migrating: foo bar + +App Version Status Scale Charm Store Rev OS Notes + +Unit Workload Agent Machine Public address Ports Message + +Machine State DNS Inst id Series AZ + +`[1:] + + st := s.setupMigrationTest(c) + defer st.Close() + code, stdout, stderr := runStatus(c, "-m", "hosted", "--format", "tabular") + c.Check(code, gc.Equals, 0) + c.Assert(stderr, gc.HasLen, 0, gc.Commentf("status failed: %s", stderr)) + c.Assert(string(stdout), gc.Equals, expected) +} + +func (s *StatusSuite) TestMigrationInProgressAndUpgradeAvailable(c *gc.C) { + expected := ` +Model Controller Cloud/Region Version Notes +hosted kontroll dummy/dummy-region 1.2.3 migrating: foo bar + +App Version Status Scale Charm Store Rev OS Notes + +Unit Workload Agent Machine Public address Ports Message + +Machine State DNS Inst id Series AZ + +`[1:] + + st := s.setupMigrationTest(c) + defer st.Close() + + model, err := st.Model() + c.Assert(err, jc.ErrorIsNil) + err = model.UpdateLatestToolsVersion(nextVersion) + c.Assert(err, jc.ErrorIsNil) + + code, stdout, stderr := runStatus(c, "-m", "hosted", "--format", "tabular") + c.Check(code, gc.Equals, 0) + c.Assert(stderr, gc.HasLen, 0, gc.Commentf("status failed: %s", stderr)) + c.Assert(string(stdout), gc.Equals, expected) +} + +func (s *StatusSuite) setupMigrationTest(c *gc.C) *state.State { + const hostedModelName = "hosted" + const statusText = "foo bar" + + f := factory.NewFactory(s.BackingState) + hostedSt := f.MakeModel(c, &factory.ModelParams{ + Name: hostedModelName, + }) + + mig, err := hostedSt.CreateMigration(state.MigrationSpec{ + InitiatedBy: names.NewUserTag("admin"), + TargetInfo: migration.TargetInfo{ + ControllerTag: names.NewControllerTag(utils.MustNewUUID().String()), + Addrs: []string{"1.2.3.4:5555", "4.3.2.1:6666"}, + CACert: "cert", + AuthTag: names.NewUserTag("user"), + Password: "password", + }, + }) + c.Assert(err, jc.ErrorIsNil) + err = mig.SetStatusMessage(statusText) + c.Assert(err, jc.ErrorIsNil) + + return hostedSt +} + +type fakeAPIClient struct { statusReturn *params.FullStatus patternsUsed []string closeCalled bool } -func (a *fakeApiClient) Status(patterns []string) (*params.FullStatus, error) { +func (a *fakeAPIClient) Status(patterns []string) (*params.FullStatus, error) { a.patternsUsed = patterns return a.statusReturn, nil } -func (a *fakeApiClient) Close() error { +func (a *fakeAPIClient) Close() error { a.closeCalled = true return nil } @@ -3233,7 +3435,7 @@ addMachine{machineId: "0", job: state.JobManageModel}, setAddresses{"0", network.NewAddresses("localhost")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addCharm{"wordpress"}, addCharm{"mysql"}, addCharm{"logging"}, @@ -3242,19 +3444,19 @@ addMachine{machineId: "1", job: state.JobHostUnits}, setAddresses{"1", network.NewAddresses("localhost")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addAliveUnit{"wordpress", "1"}, - setAgentStatus{"wordpress/0", status.StatusIdle, "", nil}, - setUnitStatus{"wordpress/0", status.StatusActive, "", nil}, + setAgentStatus{"wordpress/0", status.Idle, "", nil}, + setUnitStatus{"wordpress/0", status.Active, "", nil}, addService{name: "mysql", charm: "mysql"}, setServiceExposed{"mysql", true}, addMachine{machineId: "2", job: state.JobHostUnits}, - setAddresses{"2", network.NewAddresses("10.0.0.1")}, + setAddresses{"2", network.NewAddresses("10.0.0.2")}, startAliveMachine{"2"}, - setMachineStatus{"2", status.StatusStarted, ""}, + setMachineStatus{"2", status.Started, ""}, addAliveUnit{"mysql", "2"}, - setAgentStatus{"mysql/0", status.StatusIdle, "", nil}, - setUnitStatus{"mysql/0", status.StatusActive, "", nil}, + setAgentStatus{"mysql/0", status.Idle, "", nil}, + setUnitStatus{"mysql/0", status.Active, "", nil}, addService{name: "logging", charm: "logging"}, setServiceExposed{"logging", true}, relateServices{"wordpress", "mysql"}, @@ -3263,9 +3465,9 @@ addSubordinate{"wordpress/0", "logging"}, addSubordinate{"mysql/0", "logging"}, setUnitsAlive{"logging"}, - setAgentStatus{"logging/0", status.StatusIdle, "", nil}, - setUnitStatus{"logging/0", status.StatusActive, "", nil}, - setAgentStatus{"logging/1", status.StatusError, "somehow lost in all those logs", nil}, + setAgentStatus{"logging/0", status.Idle, "", nil}, + setUnitStatus{"logging/0", status.Active, "", nil}, + setAgentStatus{"logging/1", status.Error, "somehow lost in all those logs", nil}, } for _, s := range steps { s.step(c, ctx) @@ -3274,19 +3476,19 @@ c.Check(code, gc.Equals, 0) c.Check(string(stderr), gc.Equals, "") c.Assert(string(stdout), gc.Equals, ` -Running on subnets: 127.0.0.1/8, 10.0.0.1/8 -Utilizing ports: - # MACHINES: (3) - started: 3 - - # UNITS: (4) - active: 3 - error: 1 - - # APPLICATIONS: (3) - logging 1/1 exposed - mysql 1/1 exposed - wordpress 1/1 exposed +Running on subnets: 127.0.0.1/8, 10.0.0.2/8 + Utilizing ports: + # Machines: (3) + started: 3 + + # Units: (4) + active: 3 + error: 1 + + # Applications: (3) + logging 1/1 exposed + mysql 1/1 exposed + wordpress 1/1 exposed `[1:]) } @@ -3295,9 +3497,9 @@ defer s.resetContext(c, ctx) steps := []stepper{ addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addCharm{"wordpress"}, addCharm{"mysql"}, addCharm{"logging"}, @@ -3305,22 +3507,22 @@ addService{name: "wordpress", charm: "wordpress"}, setServiceExposed{"wordpress", true}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addAliveUnit{"wordpress", "1"}, - setAgentStatus{"wordpress/0", status.StatusIdle, "", nil}, - setUnitStatus{"wordpress/0", status.StatusActive, "", nil}, + setAgentStatus{"wordpress/0", status.Idle, "", nil}, + setUnitStatus{"wordpress/0", status.Active, "", nil}, addService{name: "mysql", charm: "mysql"}, setServiceExposed{"mysql", true}, addMachine{machineId: "2", job: state.JobHostUnits}, - setAddresses{"2", network.NewAddresses("controller-2.dns")}, + setAddresses{"2", network.NewAddresses("10.0.2.1")}, startAliveMachine{"2"}, - setMachineStatus{"2", status.StatusStarted, ""}, + setMachineStatus{"2", status.Started, ""}, addAliveUnit{"mysql", "2"}, - setAgentStatus{"mysql/0", status.StatusIdle, "", nil}, - setUnitStatus{"mysql/0", status.StatusActive, "", nil}, + setAgentStatus{"mysql/0", status.Idle, "", nil}, + setUnitStatus{"mysql/0", status.Active, "", nil}, addService{name: "logging", charm: "logging"}, setServiceExposed{"logging", true}, @@ -3333,18 +3535,18 @@ addSubordinate{"mysql/0", "logging"}, setUnitsAlive{"logging"}, - setAgentStatus{"logging/0", status.StatusIdle, "", nil}, - setUnitStatus{"logging/0", status.StatusActive, "", nil}, - setAgentStatus{"logging/1", status.StatusError, "somehow lost in all those logs", nil}, + setAgentStatus{"logging/0", status.Idle, "", nil}, + setUnitStatus{"logging/0", status.Active, "", nil}, + setAgentStatus{"logging/1", status.Error, "somehow lost in all those logs", nil}, } ctx.run(c, steps) const expected = ` -- mysql/0: controller-2.dns (agent:idle, workload:active) - - logging/1: controller-2.dns (agent:idle, workload:error) -- wordpress/0: controller-1.dns (agent:idle, workload:active) - - logging/0: controller-1.dns (agent:idle, workload:active) +- mysql/0: 10.0.2.1 (agent:idle, workload:active) + - logging/1: 10.0.2.1 (agent:idle, workload:error) +- wordpress/0: 10.0.1.1 (agent:idle, workload:active) + - logging/0: 10.0.1.1 (agent:idle, workload:active) ` assertOneLineStatus(c, expected) } @@ -3373,33 +3575,33 @@ steps := []stepper{ setToolsUpgradeAvailable{}, addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startMachineWithHardware{"0", instance.MustParseHardware("availability-zone=us-east-1a")}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addCharm{"wordpress"}, addCharm{"mysql"}, addCharm{"logging"}, addService{name: "wordpress", charm: "wordpress"}, setServiceExposed{"wordpress", true}, addMachine{machineId: "1", job: state.JobHostUnits}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, + setMachineStatus{"1", status.Started, ""}, addAliveUnit{"wordpress", "1"}, - setAgentStatus{"wordpress/0", status.StatusIdle, "", nil}, - setUnitStatus{"wordpress/0", status.StatusActive, "", nil}, + setAgentStatus{"wordpress/0", status.Idle, "", nil}, + setUnitStatus{"wordpress/0", status.Active, "", nil}, setUnitTools{"wordpress/0", version.MustParseBinary("1.2.3-trusty-ppc")}, addService{name: "mysql", charm: "mysql"}, setServiceExposed{"mysql", true}, addMachine{machineId: "2", job: state.JobHostUnits}, - setAddresses{"2", network.NewAddresses("controller-2.dns")}, + setAddresses{"2", network.NewAddresses("10.0.2.1")}, startAliveMachine{"2"}, - setMachineStatus{"2", status.StatusStarted, ""}, + setMachineStatus{"2", status.Started, ""}, addAliveUnit{"mysql", "2"}, - setAgentStatus{"mysql/0", status.StatusIdle, "", nil}, + setAgentStatus{"mysql/0", status.Idle, "", nil}, setUnitStatus{ "mysql/0", - status.StatusMaintenance, + status.Maintenance, "installing all the things", nil}, setUnitTools{"mysql/0", version.MustParseBinary("1.2.3-trusty-ppc")}, addService{name: "logging", charm: "logging"}, @@ -3410,12 +3612,15 @@ addSubordinate{"wordpress/0", "logging"}, addSubordinate{"mysql/0", "logging"}, setUnitsAlive{"logging"}, - setAgentStatus{"logging/0", status.StatusIdle, "", nil}, - setUnitStatus{"logging/0", status.StatusActive, "", nil}, - setAgentStatus{"logging/1", status.StatusError, "somehow lost in all those logs", nil}, + setAgentStatus{"logging/0", status.Idle, "", nil}, + setUnitStatus{"logging/0", status.Active, "", nil}, + setAgentStatus{"logging/1", status.Error, "somehow lost in all those logs", nil}, setUnitWorkloadVersion{"logging/1", "a bit too long, really"}, setUnitWorkloadVersion{"wordpress/0", "4.5.3"}, setUnitWorkloadVersion{"mysql/0", "5.7.13"}, + setUnitAsLeader{"mysql/0"}, + setUnitAsLeader{"logging/1"}, + setUnitAsLeader{"wordpress/0"}, } for _, s := range steps { s.step(c, ctx) @@ -3434,32 +3639,32 @@ c.Check(code, gc.Equals, 0) c.Check(string(stderr), gc.Equals, "") expected := ` -MODEL CONTROLLER CLOUD/REGION VERSION UPGRADE-AVAILABLE -controller kontroll dummy 1.2.3 1.2.4 +Model Controller Cloud/Region Version Notes +controller kontroll dummy/dummy-region 1.2.3 upgrade available: 1.2.4 -APP VERSION STATUS EXPOSED ORIGIN CHARM REV OS -logging a bi... true jujucharms logging 1 ubuntu -mysql 5.7.13 maintenance true jujucharms mysql 1 ubuntu -wordpress 4.5.3 active true jujucharms wordpress 3 ubuntu +App Version Status Scale Charm Store Rev OS Notes +logging a bit too lo... error 2 logging jujucharms 1 ubuntu exposed +mysql 5.7.13 maintenance 1 mysql jujucharms 1 ubuntu exposed +wordpress 4.5.3 active 1 wordpress jujucharms 3 ubuntu exposed + +Unit Workload Agent Machine Public address Ports Message +mysql/0* maintenance idle 2 10.0.2.1 installing all the things + logging/1* error idle 10.0.2.1 somehow lost in all those logs +wordpress/0* active idle 1 10.0.1.1 + logging/0 active idle 10.0.1.1 + +Machine State DNS Inst id Series AZ +0 started 10.0.0.1 controller-0 quantal us-east-1a +1 started 10.0.1.1 controller-1 quantal +2 started 10.0.2.1 controller-2 quantal -RELATION PROVIDES CONSUMES TYPE +Relation Provides Consumes Type juju-info logging mysql regular logging-dir logging wordpress regular info mysql logging subordinate db mysql wordpress regular logging-directory wordpress logging subordinate -UNIT WORKLOAD AGENT MACHINE PUBLIC-ADDRESS PORTS MESSAGE -mysql/0 maintenance idle 2 controller-2.dns installing all the things - logging/1 error idle controller-2.dns somehow lost in all those logs -wordpress/0 active idle 1 controller-1.dns - logging/0 active idle controller-1.dns - -MACHINE STATE DNS INS-ID SERIES AZ -0 started controller-0.dns controller-0 quantal us-east-1a -1 started controller-1.dns controller-1 quantal -2 started controller-2.dns controller-2 quantal - `[1:] c.Assert(string(stdout), gc.Equals, expected) } @@ -3475,21 +3680,21 @@ Units: map[string]unitStatus{ "foo/0": { JujuStatusInfo: statusInfoContents{ - Current: status.StatusExecuting, + Current: status.Executing, Message: "running config-changed hook", }, WorkloadStatusInfo: statusInfoContents{ - Current: status.StatusMaintenance, + Current: status.Maintenance, Message: "doing some work", }, }, "foo/1": { JujuStatusInfo: statusInfoContents{ - Current: status.StatusExecuting, + Current: status.Executing, Message: "running action backup database", }, WorkloadStatusInfo: statusInfoContents{ - Current: status.StatusMaintenance, + Current: status.Maintenance, Message: "doing some work", }, }, @@ -3497,20 +3702,21 @@ }, }, } - out, err := FormatTabular(status) + out := &bytes.Buffer{} + err := FormatTabular(out, false, status) c.Assert(err, jc.ErrorIsNil) - c.Assert(string(out), gc.Equals, ` -MODEL CONTROLLER CLOUD/REGION VERSION + c.Assert(out.String(), gc.Equals, ` +Model Controller Cloud/Region Version -APP VERSION STATUS EXPOSED ORIGIN CHARM REV OS -foo false 0 +App Version Status Scale Charm Store Rev OS Notes +foo 2 0 -UNIT WORKLOAD AGENT MACHINE PUBLIC-ADDRESS PORTS MESSAGE +Unit Workload Agent Machine Public address Ports Message foo/0 maintenance executing (config-changed) doing some work foo/1 maintenance executing (backup database) doing some work -MACHINE STATE DNS INS-ID SERIES AZ +Machine State DNS Inst id Series AZ `[1:]) } @@ -3530,36 +3736,37 @@ }, }, } - out, err := FormatTabular(status) + out := &bytes.Buffer{} + err := FormatTabular(out, false, status) c.Assert(err, jc.ErrorIsNil) - sections, err := splitTableSections(out) + sections, err := splitTableSections(out.Bytes()) c.Assert(err, jc.ErrorIsNil) - c.Assert(sections["RELATION"], gc.DeepEquals, []string{ - "RELATION PROVIDES CONSUMES TYPE", + c.Assert(sections["Relation"], gc.DeepEquals, []string{ + "Relation Provides Consumes Type", "replicator foo foo peer", }) } -func (s *StatusSuite) TestStatusWithNilStatusApi(c *gc.C) { +func (s *StatusSuite) TestStatusWithNilStatusAPI(c *gc.C) { ctx := s.newContext(c) defer s.resetContext(c, ctx) steps := []stepper{ addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, } for _, s := range steps { s.step(c, ctx) } - client := fakeApiClient{} + client := fakeAPIClient{} var status = client.Status s.PatchValue(&status, func(_ []string) (*params.FullStatus, error) { return nil, nil }) - s.PatchValue(&newApiClientForStatus, func(_ *statusCommand) (statusAPI, error) { + s.PatchValue(&newAPIClientForStatus, func(_ *statusCommand) (statusAPI, error) { return &client, nil }) @@ -3589,24 +3796,25 @@ }, }, } - out, err := FormatTabular(status) + out := &bytes.Buffer{} + err := FormatTabular(out, false, status) c.Assert(err, jc.ErrorIsNil) - c.Assert(string(out), gc.Equals, ` -MODEL CONTROLLER CLOUD/REGION VERSION + c.Assert(out.String(), gc.Equals, ` +Model Controller Cloud/Region Version -APP VERSION STATUS EXPOSED ORIGIN CHARM REV OS -foo false 0 +App Version Status Scale Charm Store Rev OS Notes +foo 0/2 0 -UNIT WORKLOAD AGENT MACHINE PUBLIC-ADDRESS PORTS MESSAGE +Unit Workload Agent Machine Public address Ports Message foo/0 foo/1 -METER STATUS MESSAGE +Meter Status Message foo/0 strange warning: stable strangelets foo/1 up things are looking up -MACHINE STATE DNS INS-ID SERIES AZ +Machine State DNS Inst id Series AZ `[1:]) } @@ -3623,9 +3831,9 @@ // And the machine's job is to manage the environment addMachine{machineId: "0", job: state.JobManageModel}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, - // And the machine's address is "controller-0.dns" - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setMachineStatus{"0", status.Started, ""}, + // And the machine's address is "10.0.0.1" + setAddresses{"0", network.NewAddresses("10.0.0.1")}, // And a container is started // And the container's ID is "0/lxd/0" addContainer{"0", "0/lxd/0", state.JobHostUnits}, @@ -3644,28 +3852,28 @@ // And the machine's job is to host units addMachine{machineId: "1", job: state.JobHostUnits}, startAliveMachine{"1"}, - setMachineStatus{"1", status.StatusStarted, ""}, - // And the machine's address is "controller-1.dns" - setAddresses{"1", network.NewAddresses("controller-1.dns")}, + setMachineStatus{"1", status.Started, ""}, + // And the machine's address is "10.0.1.1" + setAddresses{"1", network.NewAddresses("10.0.1.1")}, // And a unit of "wordpress" is deployed to machine "1" addAliveUnit{"wordpress", "1"}, // And the unit is started - setAgentStatus{"wordpress/0", status.StatusIdle, "", nil}, - setUnitStatus{"wordpress/0", status.StatusActive, "", nil}, + setAgentStatus{"wordpress/0", status.Idle, "", nil}, + setUnitStatus{"wordpress/0", status.Active, "", nil}, // And a machine is started // And the machine's ID is "2" // And the machine's job is to host units addMachine{machineId: "2", job: state.JobHostUnits}, startAliveMachine{"2"}, - setMachineStatus{"2", status.StatusStarted, ""}, - // And the machine's address is "controller-2.dns" - setAddresses{"2", network.NewAddresses("controller-2.dns")}, + setMachineStatus{"2", status.Started, ""}, + // And the machine's address is "10.0.2.1" + setAddresses{"2", network.NewAddresses("10.0.2.1")}, // And a unit of "mysql" is deployed to machine "2" addAliveUnit{"mysql", "2"}, // And the unit is started - setAgentStatus{"mysql/0", status.StatusIdle, "", nil}, - setUnitStatus{"mysql/0", status.StatusActive, "", nil}, + setAgentStatus{"mysql/0", status.Idle, "", nil}, + setUnitStatus{"mysql/0", status.Active, "", nil}, // And the "logging" service is added addService{name: "logging", charm: "logging"}, // And the service is exposed @@ -3678,12 +3886,12 @@ relateServices{"mysql", "logging"}, // And the "logging" service is a subordinate to unit 0 of the "wordpress" service addSubordinate{"wordpress/0", "logging"}, - setAgentStatus{"logging/0", status.StatusIdle, "", nil}, - setUnitStatus{"logging/0", status.StatusActive, "", nil}, + setAgentStatus{"logging/0", status.Idle, "", nil}, + setUnitStatus{"logging/0", status.Active, "", nil}, // And the "logging" service is a subordinate to unit 0 of the "mysql" service addSubordinate{"mysql/0", "logging"}, - setAgentStatus{"logging/1", status.StatusIdle, "", nil}, - setUnitStatus{"logging/1", status.StatusActive, "", nil}, + setAgentStatus{"logging/1", status.Idle, "", nil}, + setUnitStatus{"logging/1", status.Active, "", nil}, setUnitsAlive{"logging"}, } @@ -3697,17 +3905,17 @@ defer s.resetContext(c, ctx) // Given unit 1 of the "logging" service has an error - setAgentStatus{"logging/1", status.StatusError, "mock error", nil}.step(c, ctx) + setAgentStatus{"logging/1", status.Error, "mock error", nil}.step(c, ctx) // And unit 0 of the "mysql" service has an error - setAgentStatus{"mysql/0", status.StatusError, "mock error", nil}.step(c, ctx) + setAgentStatus{"mysql/0", status.Error, "mock error", nil}.step(c, ctx) // When I run juju status --format oneline started _, stdout, stderr := runStatus(c, "--format", "oneline", "active") c.Assert(string(stderr), gc.Equals, "") // Then I should receive output prefixed with: const expected = ` -- wordpress/0: controller-1.dns (agent:idle, workload:active) - - logging/0: controller-1.dns (agent:idle, workload:active) +- wordpress/0: 10.0.1.1 (agent:idle, workload:active) + - logging/0: 10.0.1.1 (agent:idle, workload:active) ` c.Assert(string(stdout), gc.Equals, expected[1:]) } @@ -3723,8 +3931,8 @@ // Then I should receive output prefixed with: const expected = ` -- wordpress/0: controller-1.dns (agent:idle, workload:active) - - logging/0: controller-1.dns (agent:idle, workload:active) +- wordpress/0: 10.0.1.1 (agent:idle, workload:active) + - logging/0: 10.0.1.1 (agent:idle, workload:active) ` c.Assert(string(stdout), gc.Equals, expected[1:]) } @@ -3756,13 +3964,16 @@ " name: controller\n" + " controller: kontroll\n" + " cloud: dummy\n" + + " region: dummy-region\n" + " version: 1.2.3\n" + "machines:\n" + " \"0\":\n" + " juju-status:\n" + " current: started\n" + " since: 01 Apr 15 01:23+10:00\n" + - " dns-name: controller-0.dns\n" + + " dns-name: 10.0.0.1\n" + + " ip-addresses:\n" + + " - 10.0.0.1\n" + " instance-id: controller-0\n" + " machine-status:\n" + " current: pending\n" + @@ -3778,7 +3989,7 @@ " current: pending\n" + " since: 01 Apr 15 01:23+10:00\n" + " series: quantal\n" + - " hardware: arch=amd64 cpu-cores=1 mem=1024M root-disk=8192M\n" + + " hardware: arch=amd64 cores=1 mem=1024M root-disk=8192M\n" + " controller-member-status: adding-vote\n" + "applications: {}\n" @@ -3791,15 +4002,15 @@ defer s.resetContext(c, ctx) // Given unit 1 of the "logging" service has an error - setAgentStatus{"logging/1", status.StatusError, "mock error", nil}.step(c, ctx) + setAgentStatus{"logging/1", status.Error, "mock error", nil}.step(c, ctx) // When I run juju status --format oneline error _, stdout, stderr := runStatus(c, "--format", "oneline", "error") c.Assert(stderr, gc.IsNil) // Then I should receive output prefixed with: const expected = ` -- mysql/0: controller-2.dns (agent:idle, workload:active) - - logging/1: controller-2.dns (agent:idle, workload:error) +- mysql/0: 10.0.2.1 (agent:idle, workload:active) + - logging/1: 10.0.2.1 (agent:idle, workload:error) ` c.Assert(string(stdout), gc.Equals, expected[1:]) } @@ -3815,8 +4026,8 @@ // Then I should receive output prefixed with: const expected = ` -- mysql/0: controller-2.dns (agent:idle, workload:active) - - logging/1: controller-2.dns (agent:idle, workload:active) +- mysql/0: 10.0.2.1 (agent:idle, workload:active) + - logging/1: 10.0.2.1 (agent:idle, workload:active) ` c.Assert(string(stdout), gc.Equals, expected[1:]) @@ -3839,8 +4050,8 @@ // Then I should receive output prefixed with: const expected = ` -- mysql/0: controller-2.dns (agent:idle, workload:active) - - logging/1: controller-2.dns (agent:idle, workload:active) +- mysql/0: 10.0.2.1 (agent:idle, workload:active) + - logging/1: 10.0.2.1 (agent:idle, workload:active) ` c.Assert(string(stdout), gc.Equals, expected[1:]) } @@ -3857,8 +4068,8 @@ // Then I should receive output prefixed with: const expected = ` -- wordpress/0: controller-1.dns (agent:idle, workload:active) - - logging/0: controller-1.dns (agent:idle, workload:active) +- wordpress/0: 10.0.1.1 (agent:idle, workload:active) + - logging/0: 10.0.1.1 (agent:idle, workload:active) ` c.Assert(string(stdout), gc.Equals, expected[1:]) } @@ -3870,8 +4081,8 @@ // Given the address for machine "1" is "localhost" setAddresses{"1", network.NewAddresses("localhost", "127.0.0.1")}.step(c, ctx) - // And the address for machine "2" is "10.0.0.1" - setAddresses{"2", network.NewAddresses("10.0.0.1")}.step(c, ctx) + // And the address for machine "2" is "10.0.0.2" + setAddresses{"2", network.NewAddresses("10.0.0.2")}.step(c, ctx) // When I run juju status --format oneline 127.0.0.1 _, stdout, stderr := runStatus(c, "--format", "oneline", "127.0.0.1") c.Assert(stderr, gc.IsNil) @@ -3891,8 +4102,8 @@ // Given the address for machine "1" is "localhost" setAddresses{"1", network.NewAddresses("localhost")}.step(c, ctx) - // And the address for machine "2" is "10.0.0.1" - setAddresses{"2", network.NewAddresses("10.0.0.1")}.step(c, ctx) + // And the address for machine "2" is "10.0.0.2" + setAddresses{"2", network.NewAddresses("10.0.0.2")}.step(c, ctx) openUnitPort{"wordpress/0", "tcp", 80}.step(c, ctx) // When I run juju status --format oneline 80/tcp _, stdout, stderr := runStatus(c, "--format", "oneline", "80/tcp") @@ -3917,10 +4128,10 @@ // Then I should receive output prefixed with: const expected = ` -- mysql/0: controller-2.dns (agent:idle, workload:active) - - logging/1: controller-2.dns (agent:idle, workload:active) -- wordpress/0: controller-1.dns (agent:idle, workload:active) - - logging/0: controller-1.dns (agent:idle, workload:active) +- mysql/0: 10.0.2.1 (agent:idle, workload:active) + - logging/1: 10.0.2.1 (agent:idle, workload:active) +- wordpress/0: 10.0.1.1 (agent:idle, workload:active) + - logging/0: 10.0.1.1 (agent:idle, workload:active) ` c.Assert(string(stdout), gc.Equals, expected[1:]) } @@ -3938,8 +4149,8 @@ // Then I should receive output prefixed with: const expected = ` -- mysql/0: controller-2.dns (agent:idle, workload:active) - - logging/1: controller-2.dns (agent:idle, workload:active) +- mysql/0: 10.0.2.1 (agent:idle, workload:active) + - logging/1: 10.0.2.1 (agent:idle, workload:active) ` c.Assert(string(stdout), gc.Equals, expected[1:]) } @@ -3953,10 +4164,10 @@ // Then I should receive output prefixed with: const expected = ` -- mysql/0: controller-2.dns (agent:idle, workload:active) - - logging/1: controller-2.dns (agent:idle, workload:active) -- wordpress/0: controller-1.dns (agent:idle, workload:active) - - logging/0: controller-1.dns (agent:idle, workload:active) +- mysql/0: 10.0.2.1 (agent:idle, workload:active) + - logging/1: 10.0.2.1 (agent:idle, workload:active) +- wordpress/0: 10.0.1.1 (agent:idle, workload:active) + - logging/0: 10.0.1.1 (agent:idle, workload:active) ` c.Assert(string(stdout), gc.Equals, expected[1:]) } @@ -3970,10 +4181,10 @@ // Then I should receive output prefixed with: const expected = ` -- mysql/0: controller-2.dns (agent:idle, workload:active) - - logging/1: controller-2.dns (agent:idle, workload:active) -- wordpress/0: controller-1.dns (agent:idle, workload:active) - - logging/0: controller-1.dns (agent:idle, workload:active) +- mysql/0: 10.0.2.1 (agent:idle, workload:active) + - logging/1: 10.0.2.1 (agent:idle, workload:active) +- wordpress/0: 10.0.1.1 (agent:idle, workload:active) + - logging/0: 10.0.1.1 (agent:idle, workload:active) ` c.Assert(string(stdout), gc.Equals, expected[1:]) } @@ -4029,16 +4240,16 @@ var statusTimeTest = test( "status generates timestamps as UTC in ISO format", addMachine{machineId: "0", job: state.JobManageModel}, - setAddresses{"0", network.NewAddresses("controller-0.dns")}, + setAddresses{"0", network.NewAddresses("10.0.0.1")}, startAliveMachine{"0"}, - setMachineStatus{"0", status.StatusStarted, ""}, + setMachineStatus{"0", status.Started, ""}, addCharm{"dummy"}, addService{name: "dummy-application", charm: "dummy"}, addMachine{machineId: "1", job: state.JobHostUnits}, startAliveMachine{"1"}, - setAddresses{"1", network.NewAddresses("controller-1.dns")}, - setMachineStatus{"1", status.StatusStarted, ""}, + setAddresses{"1", network.NewAddresses("10.0.1.1")}, + setMachineStatus{"1", status.Started, ""}, addAliveUnit{"dummy-application", "1"}, expect{ @@ -4048,6 +4259,7 @@ "name": "controller", "controller": "kontroll", "cloud": "dummy", + "region": "dummy-region", "version": "1.2.3", }, "machines": M{ @@ -4057,23 +4269,23 @@ "applications": M{ "dummy-application": dummyCharm(M{ "application-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "units": M{ "dummy-application/0": M{ "machine": "1", "workload-status": M{ - "current": "unknown", - "message": "Waiting for agent initialization to finish", + "current": "waiting", + "message": "waiting for machine", "since": "01 Apr 15 01:23+10:00", }, "juju-status": M{ "current": "allocating", "since": "01 Apr 15 01:23+10:00", }, - "public-address": "controller-1.dns", + "public-address": "10.0.1.1", }, }, }), @@ -4094,6 +4306,9 @@ func (s *StatusSuite) TestFormatProvisioningError(c *gc.C) { status := ¶ms.FullStatus{ + Model: params.ModelStatusInfo{ + CloudTag: "cloud-dummy", + }, Machines: map[string]params.MachineStatus{ "1": { AgentStatus: params.DetailedStatus{ @@ -4109,9 +4324,13 @@ }, } formatter := NewStatusFormatter(status, true) - formatted := formatter.format() + formatted, err := formatter.format() + c.Assert(err, jc.ErrorIsNil) c.Check(formatted, jc.DeepEquals, formattedStatus{ + Model: modelStatus{ + Cloud: "dummy", + }, Machines: map[string]machineStatus{ "1": { JujuStatus: statusInfoContents{Current: "error", Message: ""}, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/add.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/add.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/add.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/add.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,6 +14,7 @@ "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/storage" ) @@ -130,6 +131,9 @@ storages := c.createStorageAddParams() results, err := api.AddToUnit(storages) if err != nil { + if params.IsCodeUnauthorized(err) { + common.PermissionsMessage(ctx.Stderr, "add storage") + } return err } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/add_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/add_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/add_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/add_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -35,7 +35,7 @@ result := make([]params.ErrorResult, len(storages)) for i, one := range storages { if strings.HasPrefix(one.StorageName, "err") { - result[i].Error = common.ServerError(fmt.Errorf("test failure")) + result[i].Error = common.ServerError(errors.Errorf("test failure")) } } return result, nil @@ -149,10 +149,10 @@ result := make([]params.ErrorResult, len(storages)) for i, one := range storages { if one.StorageName == "storage2" { - result[i].Error = common.ServerError(fmt.Errorf(`storage pool "barf" not found`)) + result[i].Error = common.ServerError(errors.Errorf(`storage pool "barf" not found`)) } if one.StorageName == "storage42" { - result[i].Error = common.ServerError(fmt.Errorf(`storage "storage42" not found`)) + result[i].Error = common.ServerError(errors.Errorf(`storage "storage42" not found`)) } } return result, nil @@ -175,7 +175,7 @@ result := make([]params.ErrorResult, len(storages)) for i, one := range storages { if one.StorageName == "storage42" || one.StorageName == "storage2" { - result[i].Error = common.ServerError(fmt.Errorf(`storage "%v" not found`, one.StorageName)) + result[i].Error = common.ServerError(errors.Errorf(`storage "%v" not found`, one.StorageName)) } } return result, nil @@ -199,7 +199,7 @@ if one.StorageName == "storage42" || one.StorageName == "storage2" { result[i].Error = common.ServerError(errors.New(unitErr)) } else { - result[i].Error = common.ServerError(fmt.Errorf(`storage "%v" not found`, one.StorageName)) + result[i].Error = common.ServerError(errors.Errorf(`storage "%v" not found`, one.StorageName)) } } return result, nil @@ -223,6 +223,20 @@ s.assertAddErrorOutput(c, "cmd: error out silently", "", fmt.Sprintf("%v\n", expectedErr)) } +func (s *addSuite) TestUnauthorizedMentionsJujuGrant(c *gc.C) { + s.args = []string{"tst/123", "data"} + s.mockAPI.addToUnitFunc = func(storages []params.StorageAddParams) ([]params.ErrorResult, error) { + return nil, ¶ms.Error{ + Message: "permission denied", + Code: params.CodeUnauthorized, + } + } + + ctx, _ := s.runAdd(c, s.args...) + errString := strings.Replace(testing.Stderr(ctx), "\n", " ", -1) + c.Assert(errString, gc.Matches, `.*juju grant.*`) +} + func (s *addSuite) assertAddOutput(c *gc.C, expectedOut, expectedErr string) { context, err := s.runAdd(c, s.args...) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/filesystemlistformatters.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/filesystemlistformatters.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/filesystemlistformatters.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/filesystemlistformatters.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,41 +4,23 @@ package storage import ( - "bytes" "fmt" + "io" "sort" "strings" - "text/tabwriter" "github.com/dustin/go-humanize" - "github.com/juju/errors" + "github.com/juju/juju/cmd/output" ) -// formatFilesystemListTabular returns a tabular summary of filesystem instances. -func formatFilesystemListTabular(value interface{}) ([]byte, error) { - infos, ok := value.(map[string]FilesystemInfo) - if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", infos, value) - } - return formatFilesystemListTabularTyped(infos), nil -} - -func formatFilesystemListTabularTyped(infos map[string]FilesystemInfo) []byte { - var out bytes.Buffer - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) +// formatFilesystemListTabular writes a tabular summary of filesystem instances. +func formatFilesystemListTabular(writer io.Writer, infos map[string]FilesystemInfo) error { + tw := output.TabWriter(writer) print := func(values ...string) { fmt.Fprintln(tw, strings.Join(values, "\t")) } - print("MACHINE", "UNIT", "STORAGE", "ID", "VOLUME", "PROVIDER-ID", "MOUNTPOINT", "SIZE", "STATE", "MESSAGE") + print("Machine", "Unit", "Storage", "Id", "Volume", "Provider id", "Mountpoint", "Size", "State", "Message") filesystemAttachmentInfos := make(filesystemAttachmentInfos, 0, len(infos)) for filesystemId, info := range infos { @@ -84,8 +66,7 @@ ) } - tw.Flush() - return out.Bytes() + return tw.Flush() } type filesystemAttachmentInfo struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/filesystemlist_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/filesystemlist_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/filesystemlist_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/filesystemlist_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -88,7 +88,7 @@ } var expectedFilesystemListTabular = ` -MACHINE UNIT STORAGE ID VOLUME PROVIDER-ID MOUNTPOINT SIZE STATE MESSAGE +Machine Unit Storage Id Volume Provider id Mountpoint Size State Message 0 abc/0 db-dir/1001 0/0 0/1 provider-supplied-filesystem-0-0 /mnt/fuji 512MiB attached 0 transcode/0 shared-fs/0 4 provider-supplied-filesystem-4 /mnt/doom 1.0GiB attached 0 1 provider-supplied-filesystem-1 2.0GiB attaching failed to attach, will retry @@ -182,7 +182,7 @@ FilesystemId: "provider-supplied-filesystem-0-0", Size: 512, }, - Status: createTestStatus(status.StatusAttached, ""), + Status: createTestStatus(status.Attached, ""), MachineAttachments: map[string]params.FilesystemAttachmentInfo{ "machine-0": params.FilesystemAttachmentInfo{ MountPoint: "/mnt/fuji", @@ -192,7 +192,7 @@ StorageTag: "storage-db-dir-1001", OwnerTag: "unit-abc-0", Kind: params.StorageKindBlock, - Status: createTestStatus(status.StatusAttached, ""), + Status: createTestStatus(status.Attached, ""), Attachments: map[string]params.StorageAttachmentDetails{ "unit-abc-0": params.StorageAttachmentDetails{ StorageTag: "storage-db-dir-1001", @@ -211,7 +211,7 @@ FilesystemId: "provider-supplied-filesystem-1", Size: 2048, }, - Status: createTestStatus(status.StatusAttaching, "failed to attach, will retry"), + Status: createTestStatus(status.Attaching, "failed to attach, will retry"), MachineAttachments: map[string]params.FilesystemAttachmentInfo{ "machine-0": params.FilesystemAttachmentInfo{}, }, @@ -223,7 +223,7 @@ Info: params.FilesystemInfo{ Size: 42, }, - Status: createTestStatus(status.StatusPending, ""), + Status: createTestStatus(status.Pending, ""), MachineAttachments: map[string]params.FilesystemAttachmentInfo{ "machine-1": params.FilesystemAttachmentInfo{}, }, @@ -236,7 +236,7 @@ FilesystemId: "provider-supplied-filesystem-2", Size: 3, }, - Status: createTestStatus(status.StatusAttached, ""), + Status: createTestStatus(status.Attached, ""), MachineAttachments: map[string]params.FilesystemAttachmentInfo{ "machine-1": params.FilesystemAttachmentInfo{ MountPoint: "/mnt/zion", @@ -251,7 +251,7 @@ FilesystemId: "provider-supplied-filesystem-4", Size: 1024, }, - Status: createTestStatus(status.StatusAttached, ""), + Status: createTestStatus(status.Attached, ""), MachineAttachments: map[string]params.FilesystemAttachmentInfo{ "machine-0": params.FilesystemAttachmentInfo{ MountPoint: "/mnt/doom", @@ -266,7 +266,7 @@ StorageTag: "storage-shared-fs-0", OwnerTag: "application-transcode", Kind: params.StorageKindBlock, - Status: createTestStatus(status.StatusAttached, ""), + Status: createTestStatus(status.Attached, ""), Attachments: map[string]params.StorageAttachmentDetails{ "unit-transcode-0": params.StorageAttachmentDetails{ StorageTag: "storage-shared-fs-0", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/listformatters.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/listformatters.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/listformatters.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/listformatters.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,25 +4,18 @@ package storage import ( - "bytes" "fmt" + "io" "sort" "strconv" "strings" - "text/tabwriter" - "github.com/juju/errors" + "github.com/juju/juju/cmd/output" ) -// formatListTabular returns a tabular summary of storage instances. -func formatStorageListTabular(value interface{}) ([]byte, error) { - storageInfo, ok := value.(map[string]StorageInfo) - if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", storageInfo, value) - } - var out bytes.Buffer - // To format things into columns. - tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) +// formatListTabular writes a tabular summary of storage instances. +func formatStorageListTabular(writer io.Writer, storageInfo map[string]StorageInfo) error { + tw := output.TabWriter(writer) p := func(values ...interface{}) { for _, v := range values { fmt.Fprintf(tw, "%v\t", v) @@ -30,7 +23,7 @@ fmt.Fprintln(tw) } p("[Storage]") - p("UNIT\tID\tLOCATION\tSTATUS\tMESSAGE") + p("Unit\tId\tLocation\tStatus\tMessage") byUnit := make(map[string]map[string]storageAttachmentInfo) for storageId, storageInfo := range storageInfo { @@ -88,7 +81,7 @@ } tw.Flush() - return out.Bytes(), nil + return nil } type storageAttachmentInfo struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/list.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/list.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,11 @@ package storage import ( + "io" + "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" @@ -83,7 +85,8 @@ if err != nil { return err } - if output == nil { + if output == nil && c.out.Name() == "tabular" { + ctx.Infof("No storage to display.") return nil } return c.out.Write(ctx, output) @@ -120,22 +123,18 @@ return output, nil } -func formatListTabular(value interface{}) ([]byte, error) { - - switch value.(type) { +func formatListTabular(writer io.Writer, value interface{}) error { + switch value := value.(type) { case map[string]StorageInfo: - output, err := formatStorageListTabular(value) - return output, err + return formatStorageListTabular(writer, value) case map[string]FilesystemInfo: - output, err := formatFilesystemListTabular(value) - return output, err + return formatFilesystemListTabular(writer, value) case map[string]VolumeInfo: - output, err := formatVolumeListTabular(value) - return output, err + return formatVolumeListTabular(writer, value) default: - return nil, errors.Errorf("unexpected value of type %T", value) + return errors.Errorf("unexpected value of type %T", value) } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/list_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/list_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/list_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -40,12 +40,12 @@ nil, // Default format is tabular ` -\[Storage\] -UNIT ID LOCATION STATUS MESSAGE -postgresql/0 db-dir/1100 hither attached -transcode/0 db-dir/1000 thither pending -transcode/0 shared-fs/0 there attached -transcode/1 shared-fs/0 here attached +\[Storage\] +Unit Id Location Status Message +postgresql/0 db-dir/1100 hither attached +transcode/0 db-dir/1000 thither pending +transcode/0 shared-fs/0 there attached +transcode/1 shared-fs/0 here attached `[1:]) } @@ -97,12 +97,12 @@ nil, // Default format is tabular ` -\[Storage\] -UNIT ID LOCATION STATUS MESSAGE -postgresql/0 db-dir/1100 hither attached -transcode/0 db-dir/1000 thither pending -transcode/0 shared-fs/0 there attached -transcode/1 shared-fs/0 here attached +\[Storage\] +Unit Id Location Status Message +postgresql/0 db-dir/1100 hither attached +transcode/0 db-dir/1000 thither pending +transcode/0 shared-fs/0 there attached +transcode/1 shared-fs/0 here attached `[1:]) } @@ -154,7 +154,7 @@ OwnerTag: "unit-transcode-0", Kind: params.StorageKindBlock, Status: params.EntityStatus{ - Status: status.StatusPending, + Status: status.Pending, Since: &epoch, }, Attachments: map[string]params.StorageAttachmentDetails{ @@ -167,7 +167,7 @@ OwnerTag: "unit-postgresql-0", Kind: params.StorageKindBlock, Status: params.EntityStatus{ - Status: status.StatusAttached, + Status: status.Attached, Since: &epoch, }, Persistent: true, @@ -181,7 +181,7 @@ OwnerTag: "application-transcode", Kind: params.StorageKindFilesystem, Status: params.EntityStatus{ - Status: status.StatusAttached, + Status: status.Attached, Since: &epoch, }, Persistent: true, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -44,6 +44,6 @@ s.store.CurrentControllerName = "testing" s.store.Controllers["testing"] = jujuclient.ControllerDetails{} s.store.Accounts["testing"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/poolcreate.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/poolcreate.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/poolcreate.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/poolcreate.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,6 @@ "github.com/juju/cmd" "github.com/juju/errors" "github.com/juju/utils/keyvalues" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/modelcmd" ) @@ -93,11 +92,6 @@ } } -// SetFlags implements Command.SetFlags. -func (c *poolCreateCommand) SetFlags(f *gnuflag.FlagSet) { - c.StorageCommandBase.SetFlags(f) -} - // Run implements Command.Run. func (c *poolCreateCommand) Run(ctx *cmd.Context) (err error) { api, err := c.newAPIFunc() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/poollistformatters.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/poollistformatters.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/poollistformatters.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/poollistformatters.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,42 +4,34 @@ package storage import ( - "bytes" "fmt" + "io" "sort" "strings" - "text/tabwriter" "github.com/juju/errors" + "github.com/juju/juju/cmd/output" ) // formatPoolListTabular returns a tabular summary of pool instances or // errors out if parameter is not a map of PoolInfo. -func formatPoolListTabular(value interface{}) ([]byte, error) { +func formatPoolListTabular(writer io.Writer, value interface{}) error { pools, ok := value.(map[string]PoolInfo) if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", pools, value) + return errors.Errorf("expected value of type %T, got %T", pools, value) } - return formatPoolsTabular(pools) + formatPoolsTabular(writer, pools) + return nil } // formatPoolsTabular returns a tabular summary of pool instances. -func formatPoolsTabular(pools map[string]PoolInfo) ([]byte, error) { - var out bytes.Buffer - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) +func formatPoolsTabular(writer io.Writer, pools map[string]PoolInfo) { + tw := output.TabWriter(writer) print := func(values ...string) { fmt.Fprintln(tw, strings.Join(values, "\t")) } - print("NAME", "PROVIDER", "ATTRS") + print("Name", "Provider", "Attrs") poolNames := make([]string, 0, len(pools)) for name := range pools { @@ -61,6 +53,4 @@ print(name, pool.Provider, strings.Join(attrs, " ")) } tw.Flush() - - return out.Bytes(), nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/poollist.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/poollist.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/poollist.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/poollist.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "github.com/juju/cmd" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" @@ -104,6 +104,7 @@ return err } if len(result) == 0 { + ctx.Infof("No storage pools to display.") return nil } output := formatPoolInfo(result) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/poollist_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/poollist_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/poollist_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/poollist_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -84,7 +84,7 @@ "--name", "xyz", "--name", "abc", "--format", "tabular"}, ` -NAME PROVIDER ATTRS +Name Provider Attrs abc testType key=value one=1 two=2 testName0 a key=value one=1 two=2 testName1 b key=value one=1 two=2 @@ -102,7 +102,7 @@ []string{"--name", "myaw", "--name", "xyz", "--name", "abc", "--format", "tabular"}, ` -NAME PROVIDER ATTRS +Name Provider Attrs abc testType a=true b=maybe c=well myaw testType a=true b=maybe c=well xyz testType a=true b=maybe c=well diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/show.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/show.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/show.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/show.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,11 +6,12 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) // NewShowCommand returns a command that shows storage details @@ -60,7 +61,7 @@ // SetFlags implements Command.SetFlags. func (c *showCommand) SetFlags(f *gnuflag.FlagSet) { c.StorageCommandBase.SetFlags(f) - c.out.AddFlags(f, "yaml", cmd.DefaultFormatters) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) } // Run implements Command.Run. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/volumelistformatters.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/volumelistformatters.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/volumelistformatters.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/volumelistformatters.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,41 +4,23 @@ package storage import ( - "bytes" "fmt" + "io" "sort" "strings" - "text/tabwriter" "github.com/dustin/go-humanize" - "github.com/juju/errors" + "github.com/juju/juju/cmd/output" ) // formatVolumeListTabular returns a tabular summary of volume instances. -func formatVolumeListTabular(value interface{}) ([]byte, error) { - infos, ok := value.(map[string]VolumeInfo) - if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", infos, value) - } - return formatVolumeListTabularTyped(infos), nil -} - -func formatVolumeListTabularTyped(infos map[string]VolumeInfo) []byte { - var out bytes.Buffer - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) +func formatVolumeListTabular(writer io.Writer, infos map[string]VolumeInfo) error { + tw := output.TabWriter(writer) print := func(values ...string) { fmt.Fprintln(tw, strings.Join(values, "\t")) } - print("MACHINE", "UNIT", "STORAGE", "ID", "PROVIDER-ID", "DEVICE", "SIZE", "STATE", "MESSAGE") + print("Machine", "Unit", "Storage", "Id", "Provider Id", "Device", "Size", "State", "Message") volumeAttachmentInfos := make(volumeAttachmentInfos, 0, len(infos)) for volumeId, info := range infos { @@ -84,8 +66,7 @@ ) } - tw.Flush() - return out.Bytes() + return tw.Flush() } type volumeAttachmentInfo struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/volumelist_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/volumelist_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/storage/volumelist_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/storage/volumelist_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -89,7 +89,7 @@ } var expectedVolumeListTabular = ` -MACHINE UNIT STORAGE ID PROVIDER-ID DEVICE SIZE STATE MESSAGE +Machine Unit Storage Id Provider Id Device Size State Message 0 abc/0 db-dir/1001 0/0 provider-supplied-volume-0-0 loop0 512MiB attached 0 transcode/0 shared-fs/0 4 provider-supplied-volume-4 xvdf2 1.0GiB attached 0 1 provider-supplied-volume-1 2.0GiB attaching failed to attach, will retry @@ -182,7 +182,7 @@ VolumeId: "provider-supplied-volume-0-0", Size: 512, }, - Status: createTestStatus(status.StatusAttached, ""), + Status: createTestStatus(status.Attached, ""), MachineAttachments: map[string]params.VolumeAttachmentInfo{ "machine-0": params.VolumeAttachmentInfo{ DeviceName: "loop0", @@ -192,7 +192,7 @@ StorageTag: "storage-db-dir-1001", OwnerTag: "unit-abc-0", Kind: params.StorageKindBlock, - Status: createTestStatus(status.StatusAttached, ""), + Status: createTestStatus(status.Attached, ""), Attachments: map[string]params.StorageAttachmentDetails{ "unit-abc-0": params.StorageAttachmentDetails{ StorageTag: "storage-db-dir-1001", @@ -213,7 +213,7 @@ Persistent: true, Size: 2048, }, - Status: createTestStatus(status.StatusAttaching, "failed to attach, will retry"), + Status: createTestStatus(status.Attaching, "failed to attach, will retry"), MachineAttachments: map[string]params.VolumeAttachmentInfo{ "machine-0": params.VolumeAttachmentInfo{}, }, @@ -225,7 +225,7 @@ Info: params.VolumeInfo{ Size: 42, }, - Status: createTestStatus(status.StatusPending, ""), + Status: createTestStatus(status.Pending, ""), MachineAttachments: map[string]params.VolumeAttachmentInfo{ "machine-1": params.VolumeAttachmentInfo{}, }, @@ -238,7 +238,7 @@ VolumeId: "provider-supplied-volume-2", Size: 3, }, - Status: createTestStatus(status.StatusAttached, ""), + Status: createTestStatus(status.Attached, ""), MachineAttachments: map[string]params.VolumeAttachmentInfo{ "machine-1": params.VolumeAttachmentInfo{ DeviceName: "xvdf1", @@ -254,7 +254,7 @@ Persistent: true, Size: 1024, }, - Status: createTestStatus(status.StatusAttached, ""), + Status: createTestStatus(status.Attached, ""), MachineAttachments: map[string]params.VolumeAttachmentInfo{ "machine-0": params.VolumeAttachmentInfo{ DeviceName: "xvdf2", @@ -269,7 +269,7 @@ StorageTag: "storage-shared-fs-0", OwnerTag: "application-transcode", Kind: params.StorageKindBlock, - Status: createTestStatus(status.StatusAttached, ""), + Status: createTestStatus(status.Attached, ""), Attachments: map[string]params.StorageAttachmentDetails{ "unit-transcode-0": params.StorageAttachmentDetails{ StorageTag: "storage-shared-fs-0", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/add.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/add.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/add.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/add.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,8 @@ "github.com/juju/juju/network" "gopkg.in/juju/names.v2" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" ) @@ -112,6 +114,9 @@ ctx.Infof("ERROR: %v.", err) return nil } else if err != nil { + if params.IsCodeUnauthorized(err) { + common.PermissionsMessage(ctx.Stderr, "add a subnet") + } return errors.Annotatef(err, "cannot add subnet") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/add_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/add_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/add_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/add_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,6 +12,7 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/subnet" "github.com/juju/juju/network" coretesting "github.com/juju/juju/testing" @@ -219,6 +220,15 @@ ) } +func (s *AddSuite) TestRunUnauthorizedMentionsJujuGrant(c *gc.C) { + s.api.SetErrors(¶ms.Error{ + Message: "permission denied", + Code: params.CodeUnauthorized, + }) + _, stderr, _ := s.RunCommand(c, "10.10.0.0/24", "myspace") + c.Assert(strings.Replace(stderr, "\n", " ", -1), gc.Matches, `.*juju grant.*`) +} + func (s *AddSuite) TestRunWithAmbiguousCIDRDisplaysError(c *gc.C) { apiError := errors.New(`multiple subnets with CIDR "10.10.0.0/24" `) s.api.SetErrors(apiError) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/create.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/create.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/create.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/create.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( "strings" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/cmd" "github.com/juju/errors" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/create_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/create_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/create_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/create_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,7 +21,7 @@ var _ = gc.Suite(&CreateSuite{}) func (s *CreateSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetFeatureFlags(feature.PostNetCLIMVP) + s.BaseSubnetSuite.SetFeatureFlags(feature.PostNetCLIMVP) s.BaseSubnetSuite.SetUpTest(c) s.command, _ = subnet.NewCreateCommandForTest(s.api) c.Assert(s.command, gc.NotNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/list.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/list.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "net" "strings" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/cmd" "github.com/juju/errors" @@ -15,6 +15,7 @@ "gopkg.in/juju/names.v2" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) // NewListCommand returns a cammin used to list all subnets @@ -60,10 +61,7 @@ // SetFlags is defined on the cmd.Command interface. func (c *listCommand) SetFlags(f *gnuflag.FlagSet) { c.SubnetCommandBase.SetFlags(f) - c.Out.AddFlags(f, "yaml", map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, - }) + c.Out.AddFlags(f, "yaml", output.DefaultFormatters) f.StringVar(&c.SpaceName, "space", "", "Filter results by space name") f.StringVar(&c.ZoneName, "zone", "", "Filter results by zone name") @@ -105,9 +103,9 @@ // Display a nicer message in case no subnets were found. if len(subnets) == 0 { if c.SpaceName != "" || c.ZoneName != "" { - ctx.Infof("no subnets found matching requested criteria") + ctx.Infof("No subnets found matching requested criteria.") } else { - ctx.Infof("no subnets to display") + ctx.Infof("No subnets to display.") } return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/list_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/list_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/list_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -223,7 +223,7 @@ s.api.Subnets = s.api.Subnets[0:0] s.AssertRunSucceeds(c, - `no subnets found matching requested criteria\n`, + `No subnets found matching requested criteria.\n`, "", // empty stdout. "--space", "default", ) @@ -237,7 +237,7 @@ s.api.Subnets = s.api.Subnets[0:0] s.AssertRunSucceeds(c, - `no subnets to display\n`, + `No subnets to display.\n`, "", // empty stdout. ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -27,7 +27,6 @@ // BaseSubnetSuite is used for embedding in other suites. type BaseSubnetSuite struct { coretesting.FakeJujuXDGDataHomeSuite - coretesting.BaseSuite command cmd.Command api *StubAPI @@ -37,11 +36,9 @@ func (s *BaseSubnetSuite) SetUpSuite(c *gc.C) { s.FakeJujuXDGDataHomeSuite.SetUpSuite(c) - s.BaseSuite.SetUpSuite(c) } func (s *BaseSubnetSuite) TearDownSuite(c *gc.C) { - s.BaseSuite.TearDownSuite(c) s.FakeJujuXDGDataHomeSuite.TearDownSuite(c) } @@ -49,11 +46,10 @@ // If any post-MVP command suite enabled the flag, keep it. hasFeatureFlag := featureflag.Enabled(feature.PostNetCLIMVP) - s.BaseSuite.SetUpTest(c) s.FakeJujuXDGDataHomeSuite.SetUpTest(c) if hasFeatureFlag { - s.BaseSuite.SetFeatureFlags(feature.PostNetCLIMVP) + s.FakeJujuXDGDataHomeSuite.SetFeatureFlags(feature.PostNetCLIMVP) } s.api = NewStubAPI() @@ -64,7 +60,6 @@ } func (s *BaseSubnetSuite) TearDownTest(c *gc.C) { - s.BaseSuite.TearDownTest(c) s.FakeJujuXDGDataHomeSuite.TearDownTest(c) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/remove_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/remove_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/subnet/remove_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/subnet/remove_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,7 +21,7 @@ var _ = gc.Suite(&RemoveSuite{}) func (s *RemoveSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetFeatureFlags(feature.PostNetCLIMVP) + s.BaseSubnetSuite.SetFeatureFlags(feature.PostNetCLIMVP) s.BaseSubnetSuite.SetUpTest(c) s.command, _ = subnet.NewRemoveCommandForTest(s.api) c.Assert(s.command, gc.NotNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/add.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/add.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/add.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/add.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,24 +7,22 @@ "encoding/asn1" "encoding/base64" "fmt" - "strings" "github.com/juju/cmd" "github.com/juju/errors" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/block" + "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/juju/juju/permission" "github.com/juju/juju/jujuclient" ) var usageSummary = ` Adds a Juju user to a controller.`[1:] -var usageDetails = ` -A ` + "`juju register`" + ` command will be printed, which must be executed by the +const usageDetails = "A `juju register` command will be printed, which must be executed by the" + ` user to complete the registration process. The user's details are stored within the shared model, and will be removed when the model is destroyed. @@ -34,9 +32,8 @@ Examples: juju add-user bob juju add-user --controller mycontroller bob - juju add-user --models=mymodel --acl=read bob -See also: +See also: register grant users @@ -44,11 +41,11 @@ disable-user enable-user change-user-password - remove-user`[1:] + remove-user` // AddUserAPI defines the usermanager API methods that the add command uses. type AddUserAPI interface { - AddUser(username, displayName, password, access string, modelUUIDs ...string) (names.UserTag, []byte, error) + AddUser(username, displayName, password string) (names.UserTag, []byte, error) Close() error } @@ -62,13 +59,6 @@ api AddUserAPI User string DisplayName string - ModelNames string - ModelAccess string -} - -func (c *addCommand) SetFlags(f *gnuflag.FlagSet) { - f.StringVar(&c.ModelNames, "models", "", "Models the new user is granted access to") - f.StringVar(&c.ModelAccess, "acl", "read", "Access controls") } // Info implements Command.Info. @@ -84,12 +74,7 @@ // Init implements Command.Init. func (c *addCommand) Init(args []string) error { if len(args) == 0 { - return fmt.Errorf("no username supplied") - } - - _, err := permission.ParseModelAccess(c.ModelAccess) - if err != nil { - return err + return errors.Errorf("no username supplied") } c.User, args = args[0], args[1:] @@ -111,25 +96,14 @@ defer api.Close() } - var modelNames []string - for _, modelArg := range strings.Split(c.ModelNames, ",") { - modelArg = strings.TrimSpace(modelArg) - if len(modelArg) > 0 { - modelNames = append(modelNames, modelArg) - } - } - - // If we need to share a model, look up the model UUIDs from the supplied names. - modelUUIDs, err := c.ModelUUIDs(modelNames) - if err != nil { - return errors.Trace(err) - } - // Add a user without a password. This will generate a temporary // secret key, which we'll print out for the user to supply to // "juju register". - _, secretKey, err := api.AddUser(c.User, c.DisplayName, "", c.ModelAccess, modelUUIDs...) + _, secretKey, err := api.AddUser(c.User, c.DisplayName, "") if err != nil { + if params.IsCodeUnauthorized(err) { + common.PermissionsMessage(ctx.Stderr, "add a user") + } return block.ProcessBlockedError(err, block.BlockChange) } @@ -172,18 +146,13 @@ ) fmt.Fprintf(ctx.Stdout, "User %q added\n", displayName) - for _, modelName := range modelNames { - fmt.Fprintf(ctx.Stdout, "User %q granted %s access to model %q\n", displayName, c.ModelAccess, modelName) - } fmt.Fprintf(ctx.Stdout, "Please send this command to %v:\n", c.User) fmt.Fprintf(ctx.Stdout, " juju register %s\n", base64RegistrationData, ) - if len(modelNames) == 0 { - fmt.Fprintf(ctx.Stdout, ` + fmt.Fprintf(ctx.Stdout, ` %q has not been granted access to any models. You can use "juju grant" to grant access. `, displayName) - } return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/add_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/add_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/add_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/add_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,6 +14,7 @@ "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/user" "github.com/juju/juju/testing" ) @@ -34,7 +35,7 @@ } func (s *UserAddCommandSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { - addCommand, _ := user.NewAddCommandForTest(s.mockAPI, s.store, &mockModelApi{}) + addCommand, _ := user.NewAddCommandForTest(s.mockAPI, s.store, &mockModelAPI{}) return testing.RunCommand(c, addCommand, args...) } @@ -60,29 +61,19 @@ args: []string{"foobar", "Foo Bar", "extra"}, errorString: `unrecognized args: \["extra"\]`, }, { - args: []string{"foobar", "--models", "foo,bar", "--acl=read"}, - user: "foobar", - models: "foo,bar", - acl: "read", + args: []string{"foobar"}, + user: "foobar", }, { - args: []string{"foobar", "--models", "baz", "--acl=write"}, - user: "foobar", - models: "baz", - acl: "write", + args: []string{"foobar"}, + user: "foobar", }} { c.Logf("test %d (%q)", i, test.args) - wrappedCommand, command := user.NewAddCommandForTest(s.mockAPI, s.store, &mockModelApi{}) + wrappedCommand, command := user.NewAddCommandForTest(s.mockAPI, s.store, &mockModelAPI{}) err := testing.InitCommand(wrappedCommand, test.args) if test.errorString == "" { c.Check(err, jc.ErrorIsNil) c.Check(command.User, gc.Equals, test.user) c.Check(command.DisplayName, gc.Equals, test.displayname) - if len(test.models) > 0 { - c.Check(command.ModelNames, gc.Equals, test.models) - } - if test.acl != "" { - c.Check(command.ModelAccess, gc.Equals, test.acl) - } } else { c.Check(err, gc.ErrorMatches, test.errorString) } @@ -94,26 +85,6 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(s.mockAPI.username, gc.Equals, "foobar") c.Assert(s.mockAPI.displayname, gc.Equals, "") - c.Assert(s.mockAPI.access, gc.Equals, "read") - c.Assert(s.mockAPI.models, gc.HasLen, 0) - expected := ` -User "foobar" added -Please send this command to foobar: - juju register MEYTBmZvb2JhcjAREw8xMjcuMC4wLjE6MTIzNDUEIFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYEwd0ZXN0aW5n - -"foobar" has not been granted access to any models. You can use "juju grant" to grant access. -`[1:] - c.Assert(testing.Stdout(context), gc.Equals, expected) - c.Assert(testing.Stderr(context), gc.Equals, "") -} - -func (s *UserAddCommandSuite) TestAddUserWithUsernameAndACL(c *gc.C) { - context, err := s.run(c, "--acl", "write", "foobar") - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.mockAPI.username, gc.Equals, "foobar") - c.Assert(s.mockAPI.displayname, gc.Equals, "") - c.Assert(s.mockAPI.access, gc.Equals, "write") - c.Assert(s.mockAPI.models, gc.HasLen, 0) expected := ` User "foobar" added Please send this command to foobar: @@ -130,8 +101,6 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(s.mockAPI.username, gc.Equals, "foobar") c.Assert(s.mockAPI.displayname, gc.Equals, "Foo Bar") - c.Assert(s.mockAPI.access, gc.Equals, "read") - c.Assert(s.mockAPI.models, gc.HasLen, 0) expected := ` User "Foo Bar (foobar)" added Please send this command to foobar: @@ -143,41 +112,21 @@ c.Assert(testing.Stderr(context), gc.Equals, "") } -type mockModelApi struct{} +type mockModelAPI struct{} -func (m *mockModelApi) ListModels(user string) ([]base.UserModel, error) { - return []base.UserModel{{Name: "model", UUID: "modeluuid", Owner: "current-user@local"}}, nil +func (m *mockModelAPI) ListModels(user string) ([]base.UserModel, error) { + return []base.UserModel{{Name: "model", UUID: "modeluuid", Owner: "current-user"}}, nil } -func (m *mockModelApi) Close() error { +func (m *mockModelAPI) Close() error { return nil } -func (s *UserAddCommandSuite) TestAddUserWithModelAccess(c *gc.C) { - context, err := s.run(c, "foobar", "--models", "model") - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.mockAPI.username, gc.Equals, "foobar") - c.Assert(s.mockAPI.displayname, gc.Equals, "") - c.Assert(s.mockAPI.access, gc.Equals, "read") - c.Assert(s.mockAPI.models, gc.DeepEquals, []string{"modeluuid"}) - expected := ` -User "foobar" added -User "foobar" granted read access to model "model" -Please send this command to foobar: - juju register MEYTBmZvb2JhcjAREw8xMjcuMC4wLjE6MTIzNDUEIFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYEwd0ZXN0aW5n -`[1:] - c.Assert(testing.Stdout(context), gc.Equals, expected) - c.Assert(testing.Stderr(context), gc.Equals, "") -} - func (s *UserAddCommandSuite) TestBlockAddUser(c *gc.C) { // Block operation s.mockAPI.blocked = true _, err := s.run(c, "foobar", "Foo Bar") - c.Assert(err, gc.ErrorMatches, cmd.ErrSilent.Error()) - // msg is logged - stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) - c.Check(stripped, gc.Matches, ".*To unblock changes.*") + testing.AssertOperationWasBlocked(c, err, ".*To enable changes.*") } func (s *UserAddCommandSuite) TestAddUserErrorResponse(c *gc.C) { @@ -186,7 +135,18 @@ c.Assert(err, gc.ErrorMatches, s.mockAPI.failMessage) } +func (s *UserAddCommandSuite) TestAddUserUnauthorizedMentionsJujuGrant(c *gc.C) { + s.mockAPI.addError = ¶ms.Error{ + Message: "permission denied", + Code: params.CodeUnauthorized, + } + ctx, _ := s.run(c, "foobar") + errString := strings.Replace(testing.Stderr(ctx), "\n", " ", -1) + c.Assert(errString, gc.Matches, `.*juju grant.*`) +} + type mockAddUserAPI struct { + addError error failMessage string blocked bool secretKey []byte @@ -194,19 +154,18 @@ username string displayname string password string - access string - models []string } -func (m *mockAddUserAPI) AddUser(username, displayname, password, access string, models ...string) (names.UserTag, []byte, error) { +func (m *mockAddUserAPI) AddUser(username, displayname, password string) (names.UserTag, []byte, error) { if m.blocked { return names.UserTag{}, nil, common.OperationBlockedError("the operation has been blocked") } + if m.addError != nil { + return names.UserTag{}, nil, m.addError + } m.username = username m.displayname = displayname m.password = password - m.access = access - m.models = models if m.failMessage != "" { return names.UserTag{}, nil, errors.New(m.failMessage) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/change_password.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/change_password.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/change_password.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/change_password.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,10 +13,14 @@ "github.com/juju/errors" "golang.org/x/crypto/ssh/terminal" "gopkg.in/juju/names.v2" - "gopkg.in/macaroon.v1" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "github.com/juju/juju/api" + "github.com/juju/juju/api/authentication" "github.com/juju/juju/cmd/juju/block" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/juju" + "github.com/juju/juju/jujuclient" ) const userChangePasswordDoc = ` @@ -31,19 +35,23 @@ juju change-user-password juju change-user-password bob -See also: add-user +See also: + add-user ` func NewChangePasswordCommand() cmd.Command { - return modelcmd.WrapController(&changePasswordCommand{}) + var cmd changePasswordCommand + cmd.newAPIConnection = juju.NewAPIConnection + return modelcmd.WrapController(&cmd) } // changePasswordCommand changes the password for a user. type changePasswordCommand struct { modelcmd.ControllerCommandBase - api ChangePasswordAPI - User string + newAPIConnection func(juju.NewAPIConnectionParams) (api.Connection, error) + api ChangePasswordAPI + User string } // Info implements Command.Info. @@ -69,7 +77,6 @@ // ChangePasswordAPI defines the usermanager API methods that the change // password command uses. type ChangePasswordAPI interface { - CreateLocalLoginMacaroon(names.UserTag) (*macaroon.Macaroon, error) SetPassword(username, password string) error Close() error } @@ -103,7 +110,7 @@ return errors.NotValidf("user name %q", c.User) } userTag = names.NewUserTag(c.User) - if userTag.Canonical() != accountDetails.User { + if userTag.Id() != accountDetails.User { // The account details don't correspond to the username // being changed, so we don't need to update the account // locally. @@ -118,48 +125,62 @@ return errors.Errorf("cannot change password for external user %q", userTag) } } - - if accountDetails != nil && accountDetails.Macaroon == "" { - // Generate a macaroon first to guard against I/O failures - // occurring after the password has been changed, preventing - // future logins. - macaroon, err := c.api.CreateLocalLoginMacaroon(userTag) - if err != nil { - return errors.Trace(err) - } - accountDetails.Password = "" - - // TODO(axw) update jujuclient with code for marshalling - // and unmarshalling macaroons as YAML. - macaroonJSON, err := macaroon.MarshalJSON() - if err != nil { - return errors.Trace(err) - } - accountDetails.Macaroon = string(macaroonJSON) - - if err := store.UpdateAccount(controllerName, *accountDetails); err != nil { - return errors.Annotate(err, "failed to update client credentials") - } - } - - if err := c.api.SetPassword(userTag.Canonical(), newPassword); err != nil { + if err := c.api.SetPassword(userTag.Id(), newPassword); err != nil { return block.ProcessBlockedError(err, block.BlockChange) } + if accountDetails == nil { ctx.Infof("Password for %q has been updated.", c.User) } else { + if accountDetails.Password != "" { + // Log back in with macaroon authentication, so we can + // discard the password without having to log back in + // immediately. + if err := c.recordMacaroon(accountDetails.User, newPassword); err != nil { + return errors.Annotate(err, "recording macaroon") + } + // Wipe the password from disk. In the event of an + // error occurring after SetPassword and before the + // account details being updated, the user will be + // able to recover by running "juju login". + accountDetails.Password = "" + if err := store.UpdateAccount(controllerName, *accountDetails); err != nil { + return errors.Annotate(err, "failed to update client credentials") + } + } ctx.Infof("Your password has been updated.") } return nil } +func (c *changePasswordCommand) recordMacaroon(user, password string) error { + accountDetails := &jujuclient.AccountDetails{User: user} + args, err := c.NewAPIConnectionParams( + c.ClientStore(), c.ControllerName(), "", accountDetails, + ) + if err != nil { + return errors.Trace(err) + } + args.DialOpts.BakeryClient.WebPageVisitor = httpbakery.NewMultiVisitor( + authentication.NewVisitor(accountDetails.User, func(string) (string, error) { + return password, nil + }), + args.DialOpts.BakeryClient.WebPageVisitor, + ) + api, err := c.newAPIConnection(args) + if err != nil { + return errors.Annotate(err, "connecting to API") + } + return api.Close() +} + func readAndConfirmPassword(ctx *cmd.Context) (string, error) { // Don't add the carriage returns before readPassword, but add // them directly after the readPassword so any errors are output // on their own lines. // // TODO(axw) retry/loop on failure - fmt.Fprint(ctx.Stderr, "password: ") + fmt.Fprint(ctx.Stderr, "new password: ") password, err := readPassword(ctx.Stdin) fmt.Fprint(ctx.Stderr, "\n") if err != nil { @@ -169,7 +190,7 @@ return "", errors.Errorf("you must enter a password") } - fmt.Fprint(ctx.Stderr, "type password again: ") + fmt.Fprint(ctx.Stderr, "type new password again: ") verify, err := readPassword(ctx.Stdin) fmt.Fprint(ctx.Stderr, "\n") if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/change_password_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/change_password_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/change_password_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/change_password_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,12 +11,11 @@ "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/names.v2" - "gopkg.in/macaroon.v1" + "github.com/juju/juju/api" "github.com/juju/juju/cmd/juju/user" + "github.com/juju/juju/juju" "github.com/juju/juju/jujuclient" - "github.com/juju/juju/jujuclient/jujuclienttesting" coretesting "github.com/juju/juju/testing" ) @@ -34,15 +33,22 @@ s.store = s.BaseSuite.store } -func (s *ChangePasswordCommandSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { - changePasswordCommand, _ := user.NewChangePasswordCommandForTest(s.mockAPI, s.store) +func (s *ChangePasswordCommandSuite) run(c *gc.C, args ...string) (*cmd.Context, *juju.NewAPIConnectionParams, error) { + var argsOut juju.NewAPIConnectionParams + newAPIConnection := func(args juju.NewAPIConnectionParams) (api.Connection, error) { + argsOut = args + return mockAPIConnection{}, nil + } + changePasswordCommand, _ := user.NewChangePasswordCommandForTest( + newAPIConnection, s.mockAPI, s.store, + ) ctx := coretesting.Context(c) ctx.Stdin = strings.NewReader("sekrit\nsekrit\n") err := coretesting.InitCommand(changePasswordCommand, args) if err != nil { - return ctx, err + return ctx, nil, err } - return ctx, changePasswordCommand.Run(ctx) + return ctx, &argsOut, changePasswordCommand.Run(ctx) } func (s *ChangePasswordCommandSuite) TestInit(c *gc.C) { @@ -65,7 +71,7 @@ }, } { c.Logf("test %d", i) - wrappedCommand, command := user.NewChangePasswordCommandForTest(nil, s.store) + wrappedCommand, command := user.NewChangePasswordCommandForTest(nil, nil, s.store) err := coretesting.InitCommand(wrappedCommand, test.args) if test.errorString == "" { c.Check(command.User, gc.Equals, test.user) @@ -76,71 +82,44 @@ } func (s *ChangePasswordCommandSuite) assertAPICalls(c *gc.C, user, pass string) { - var offset int - if user == "current-user@local" { - s.mockAPI.CheckCall(c, 0, "CreateLocalLoginMacaroon", names.NewUserTag(user)) - offset += 1 - } - s.mockAPI.CheckCall(c, offset, "SetPassword", user, pass) + s.mockAPI.CheckCall(c, 0, "SetPassword", user, pass) } func (s *ChangePasswordCommandSuite) TestChangePassword(c *gc.C) { - context, err := s.run(c) + context, args, err := s.run(c) c.Assert(err, jc.ErrorIsNil) - s.assertAPICalls(c, "current-user@local", "sekrit") + s.assertAPICalls(c, "current-user", "sekrit") c.Assert(coretesting.Stdout(context), gc.Equals, "") c.Assert(coretesting.Stderr(context), gc.Equals, ` -password: -type password again: +new password: +type new password again: Your password has been updated. `[1:]) + // The command should have logged in without a password to get a macaroon. + c.Assert(args.AccountDetails, jc.DeepEquals, &jujuclient.AccountDetails{ + User: "current-user", + }) } func (s *ChangePasswordCommandSuite) TestChangePasswordFail(c *gc.C) { - s.mockAPI.SetErrors(nil, errors.New("failed to do something")) - _, err := s.run(c) + s.mockAPI.SetErrors(errors.New("failed to do something")) + _, _, err := s.run(c) c.Assert(err, gc.ErrorMatches, "failed to do something") - s.assertAPICalls(c, "current-user@local", "sekrit") -} - -// We create a macaroon, but fail to write it to accounts.yaml. -// We should not call SetPassword subsequently. -func (s *ChangePasswordCommandSuite) TestNoSetPasswordAfterFailedWrite(c *gc.C) { - store := jujuclienttesting.NewStubStore() - store.AccountDetailsFunc = func(string) (*jujuclient.AccountDetails, error) { - return &jujuclient.AccountDetails{"user", "old-password", ""}, nil - } - store.ControllerByNameFunc = func(string) (*jujuclient.ControllerDetails, error) { - return &jujuclient.ControllerDetails{}, nil - } - s.store = store - store.SetErrors(nil, errors.New("failed to write")) - - _, err := s.run(c) - c.Assert(err, gc.ErrorMatches, "failed to update client credentials: failed to write") - s.mockAPI.CheckCallNames(c, "CreateLocalLoginMacaroon") // no SetPassword + s.assertAPICalls(c, "current-user", "sekrit") } func (s *ChangePasswordCommandSuite) TestChangeOthersPassword(c *gc.C) { // The checks for user existence and admin rights are tested // at the apiserver level. - _, err := s.run(c, "other") + _, _, err := s.run(c, "other") c.Assert(err, jc.ErrorIsNil) - s.assertAPICalls(c, "other@local", "sekrit") + s.assertAPICalls(c, "other", "sekrit") } type mockChangePasswordAPI struct { testing.Stub } -func (m *mockChangePasswordAPI) CreateLocalLoginMacaroon(tag names.UserTag) (*macaroon.Macaroon, error) { - m.MethodCall(m, "CreateLocalLoginMacaroon", tag) - if err := m.NextErr(); err != nil { - return nil, err - } - return fakeLocalLoginMacaroon(tag), nil -} - func (m *mockChangePasswordAPI) SetPassword(username, password string) error { m.MethodCall(m, "SetPassword", username, password) return m.NextErr() @@ -150,10 +129,10 @@ return nil } -func fakeLocalLoginMacaroon(tag names.UserTag) *macaroon.Macaroon { - mac, err := macaroon.New([]byte("abcdefghijklmnopqrstuvwx"), tag.Canonical(), "juju") - if err != nil { - panic(err) - } - return mac +type mockAPIConnection struct { + api.Connection +} + +func (mockAPIConnection) Close() error { + return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/export_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,9 @@ import ( "github.com/juju/cmd" + "github.com/juju/utils/clock" + "github.com/juju/juju/api" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/juju" "github.com/juju/juju/jujuclient" @@ -35,10 +37,10 @@ *disenableUserBase } -func NewAddCommandForTest(api AddUserAPI, store jujuclient.ClientStore, modelApi modelcmd.ModelAPI) (cmd.Command, *AddCommand) { +func NewAddCommandForTest(api AddUserAPI, store jujuclient.ClientStore, modelAPI modelcmd.ModelAPI) (cmd.Command, *AddCommand) { c := &addCommand{api: api} c.SetClientStore(store) - c.SetModelApi(modelApi) + c.SetModelAPI(modelAPI) return modelcmd.WrapController(c), &AddCommand{c} } @@ -49,15 +51,24 @@ } func NewShowUserCommandForTest(api UserInfoAPI, store jujuclient.ClientStore) cmd.Command { - cmd := &infoCommand{infoCommandBase: infoCommandBase{api: api}} + cmd := &infoCommand{infoCommandBase: infoCommandBase{ + clock: clock.WallClock, + api: api}} cmd.SetClientStore(store) return modelcmd.WrapController(cmd) } // NewChangePasswordCommand returns a ChangePasswordCommand with the api // and writer provided as specified. -func NewChangePasswordCommandForTest(api ChangePasswordAPI, store jujuclient.ClientStore) (cmd.Command, *ChangePasswordCommand) { - c := &changePasswordCommand{api: api} +func NewChangePasswordCommandForTest( + newAPIConnection func(juju.NewAPIConnectionParams) (api.Connection, error), + api ChangePasswordAPI, + store jujuclient.ClientStore, +) (cmd.Command, *ChangePasswordCommand) { + c := &changePasswordCommand{ + newAPIConnection: newAPIConnection, + api: api, + } c.SetClientStore(store) return modelcmd.WrapController(c), &ChangePasswordCommand{c} } @@ -65,7 +76,7 @@ // NewLoginCommand returns a LoginCommand with the api // and writer provided as specified. func NewLoginCommandForTest( - newLoginAPI func(juju.NewAPIConnectionParams) (LoginAPI, error), + newLoginAPI func(juju.NewAPIConnectionParams) (LoginAPI, ConnectionAPI, error), store jujuclient.ClientStore, ) (cmd.Command, *LoginCommand) { c := &loginCommand{newLoginAPI: newLoginAPI} @@ -98,8 +109,20 @@ } // NewListCommand returns a ListCommand with the api provided as specified. -func NewListCommandForTest(api UserInfoAPI, store jujuclient.ClientStore) cmd.Command { - c := &listCommand{infoCommandBase: infoCommandBase{api: api}} +func NewListCommandForTest(api UserInfoAPI, modelAPI modelUsersAPI, store jujuclient.ClientStore, clock clock.Clock) cmd.Command { + c := &listCommand{ + infoCommandBase: infoCommandBase{ + clock: clock, + api: api, + }, + modelUserAPI: modelAPI, + } c.SetClientStore(store) return modelcmd.WrapController(c) } + +// NewWhoAmICommandForTest returns a whoAMI command with a mock store. +func NewWhoAmICommandForTest(store jujuclient.ClientStore) cmd.Command { + c := &whoAmICommand{store: store} + return c +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/info.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/info.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/info.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/info.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,16 +4,17 @@ package user import ( - "time" - "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" + "github.com/juju/utils/clock" + "gopkg.in/juju/names.v2" "github.com/juju/juju/api/usermanager" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) var helpSummary = ` @@ -45,6 +46,7 @@ type infoCommandBase struct { modelcmd.ControllerCommandBase api UserInfoAPI + clock clock.Clock exactTime bool out cmd.Output } @@ -54,7 +56,11 @@ } func NewShowUserCommand() cmd.Command { - return modelcmd.WrapController(&infoCommand{}) + return modelcmd.WrapController(&infoCommand{ + infoCommandBase: infoCommandBase{ + clock: clock.WallClock, + }, + }) } // infoCommand retrieves information about a single user. @@ -66,9 +72,10 @@ // UserInfo defines the serialization behaviour of the user information. type UserInfo struct { Username string `yaml:"user-name" json:"user-name"` - DisplayName string `yaml:"display-name" json:"display-name"` - DateCreated string `yaml:"date-created" json:"date-created"` - LastConnection string `yaml:"last-connection" json:"last-connection"` + DisplayName string `yaml:"display-name,omitempty" json:"display-name,omitempty"` + Access string `yaml:"access" json:"access"` + DateCreated string `yaml:"date-created,omitempty" json:"date-created,omitempty"` + LastConnection string `yaml:"last-connection,omitempty" json:"last-connection,omitempty"` Disabled bool `yaml:"disabled,omitempty" json:"disabled,omitempty"` } @@ -85,7 +92,7 @@ // SetFlags implements Command.SetFlags. func (c *infoCommand) SetFlags(f *gnuflag.FlagSet) { c.infoCommandBase.SetFlags(f) - c.out.AddFlags(f, "yaml", cmd.DefaultFormatters) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) } // Init implements Command.Init. @@ -132,21 +139,23 @@ func (c *infoCommandBase) apiUsersToUserInfoSlice(users []params.UserInfo) []UserInfo { var output []UserInfo - // TODO(perrito666) 2016-05-02 lp:1558657 - var now = time.Now() + var now = c.clock.Now() for _, info := range users { outInfo := UserInfo{ - Username: info.Username, - DisplayName: info.DisplayName, - Disabled: info.Disabled, - LastConnection: common.LastConnection(info.LastConnection, now, c.exactTime), + Username: info.Username, + DisplayName: info.DisplayName, + Access: info.Access, + Disabled: info.Disabled, } - if c.exactTime { - outInfo.DateCreated = info.DateCreated.String() - } else { - outInfo.DateCreated = common.UserFriendlyDuration(info.DateCreated, now) + // TODO(wallyworld) record login information about external users. + if names.NewUserTag(info.Username).IsLocal() { + outInfo.LastConnection = common.LastConnection(info.LastConnection, now, c.exactTime) + if c.exactTime { + outInfo.DateCreated = info.DateCreated.String() + } else { + outInfo.DateCreated = common.UserFriendlyDuration(info.DateCreated, now) + } } - output = append(output, outInfo) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/info_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/info_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/info_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/info_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -51,11 +51,17 @@ LastConnection: &lastConnection, } switch usernames[0] { - case "current-user@local": + case "current-user": info.Username = "current-user" + info.Access = "add-model" case "foobar": info.Username = "foobar" info.DisplayName = "Foo Bar" + info.Access = "login" + case "fred@external": + info.Username = "fred@external" + info.DisplayName = "Fred External" + info.Access = "add-model" default: return nil, common.ErrPerm } @@ -66,7 +72,7 @@ context, err := testing.RunCommand(c, s.NewShowUserCommand()) c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, `user-name: current-user -display-name: "" +access: add-model date-created: 1981-02-27 last-connection: 2014-01-01 `) @@ -76,7 +82,7 @@ context, err := testing.RunCommand(c, s.NewShowUserCommand(), "--exact-time") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, `user-name: current-user -display-name: "" +access: add-model date-created: 1981-02-27 16:10:05 +0000 UTC last-connection: 2014-01-01 00:00:00 +0000 UTC `) @@ -87,11 +93,21 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, `user-name: foobar display-name: Foo Bar +access: login date-created: 1981-02-27 last-connection: 2014-01-01 `) } +func (s *UserInfoCommandSuite) TestUserInfoExternalUser(c *gc.C) { + context, err := testing.RunCommand(c, s.NewShowUserCommand(), "fred@external") + c.Assert(err, jc.ErrorIsNil) + c.Assert(testing.Stdout(context), gc.Equals, `user-name: fred@external +display-name: Fred External +access: add-model +`) +} + func (s *UserInfoCommandSuite) TestUserInfoUserDoesNotExist(c *gc.C) { _, err := testing.RunCommand(c, s.NewShowUserCommand(), "barfoo") c.Assert(err, gc.ErrorMatches, "permission denied") @@ -101,7 +117,7 @@ context, err := testing.RunCommand(c, s.NewShowUserCommand(), "--format", "json") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ` -{"user-name":"current-user","display-name":"","date-created":"1981-02-27","last-connection":"2014-01-01"} +{"user-name":"current-user","access":"add-model","date-created":"1981-02-27","last-connection":"2014-01-01"} `[1:]) } @@ -109,7 +125,7 @@ context, err := testing.RunCommand(c, s.NewShowUserCommand(), "foobar", "--format", "json") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ` -{"user-name":"foobar","display-name":"Foo Bar","date-created":"1981-02-27","last-connection":"2014-01-01"} +{"user-name":"foobar","display-name":"Foo Bar","access":"login","date-created":"1981-02-27","last-connection":"2014-01-01"} `[1:]) } @@ -117,7 +133,7 @@ context, err := testing.RunCommand(c, s.NewShowUserCommand(), "--format", "yaml") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, `user-name: current-user -display-name: "" +access: add-model date-created: 1981-02-27 last-connection: 2014-01-01 `) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/list.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/list.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,26 +4,39 @@ package user import ( - "bytes" - "fmt" - "text/tabwriter" + "io" + "github.com/juju/ansiterm" "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" + "github.com/juju/utils/clock" + "github.com/juju/utils/set" + "gopkg.in/juju/names.v2" "github.com/juju/juju/api/usermanager" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/juju/common" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" ) var usageListUsersSummary = ` -Lists Juju users allowed to connect to a controller.`[1:] +Lists Juju users allowed to connect to a controller or model.`[1:] var usageListUsersDetails = ` -By default, the tabular format is used. +When used without a model name argument, users relevant to a controller are printed. +When used with a model name, users relevant to the specified model are printed. Examples: + Print the users relevant to the current controller: juju users + + Print the users relevant to the controller "another": + juju users -c another + + Print the users relevant to the model "mymodel": + juju users mymodel See also: add-user @@ -33,13 +46,39 @@ enable-user`[1:] func NewListCommand() cmd.Command { - return modelcmd.WrapController(&listCommand{}) + return modelcmd.WrapController(&listCommand{ + infoCommandBase: infoCommandBase{ + clock: clock.WallClock, + }, + }) } // listCommand shows all the users in the Juju server. type listCommand struct { infoCommandBase - All bool + modelUserAPI modelUsersAPI + + All bool + modelName string + currentUser string +} + +// ModelUsersAPI defines the methods on the client API that the +// users command calls. +type modelUsersAPI interface { + Close() error + ModelUserInfo() ([]params.ModelUserInfo, error) +} + +func (c *listCommand) getModelAPI() (modelUsersAPI, error) { + if c.modelUserAPI != nil { + return c.modelUserAPI, nil + } + conn, err := c.NewModelAPIRoot(c.modelName) + if err != nil { + return nil, errors.Trace(err) + } + return conn.Client(), nil } // Info implements Command.Info. @@ -63,8 +102,51 @@ }) } +// Init implements Command.Init. +func (c *listCommand) Init(args []string) (err error) { + c.modelName, err = cmd.ZeroOrOneArgs(args) + if err != nil { + return err + } + return err +} + // Run implements Command.Run. func (c *listCommand) Run(ctx *cmd.Context) (err error) { + if c.out.Name() == "tabular" { + // Only the tabular outputters need to know the current user, + // but both of them do, so do it in one place. + accountDetails, err := c.ClientStore().AccountDetails(c.ControllerName()) + if err != nil { + return err + } + c.currentUser = names.NewUserTag(accountDetails.User).Id() + } + if c.modelName == "" { + return c.controllerUsers(ctx) + } + return c.modelUsers(ctx) +} + +func (c *listCommand) modelUsers(ctx *cmd.Context) error { + client, err := c.getModelAPI() + if err != nil { + return err + } + defer client.Close() + + result, err := client.ModelUserInfo() + if err != nil { + return err + } + if len(result) == 0 { + ctx.Infof("No users to display.") + return nil + } + return c.out.Write(ctx, common.ModelUserInfoFromParams(result, c.clock.Now())) +} + +func (c *listCommand) controllerUsers(ctx *cmd.Context) error { // Note: the InfoCommandBase and the UserInfo struct are defined // in info.go. client, err := c.getUserInfoAPI() @@ -78,32 +160,79 @@ return err } + if len(result) == 0 { + ctx.Infof("No users to display.") + return nil + } + return c.out.Write(ctx, c.apiUsersToUserInfoSlice(result)) } -func (c *listCommand) formatTabular(value interface{}) ([]byte, error) { +func (c *listCommand) formatTabular(writer io.Writer, value interface{}) error { + if c.modelName == "" { + return c.formatControllerUsers(writer, value) + } + return c.formatModelUsers(writer, value) +} + +func (c *listCommand) isLoggedInUser(username string) bool { + tag := names.NewUserTag(username) + return tag.Id() == c.currentUser +} + +func (c *listCommand) formatModelUsers(writer io.Writer, value interface{}) error { + users, ok := value.(map[string]common.ModelUserInfo) + if !ok { + return errors.Errorf("expected value of type %T, got %T", users, value) + } + modelUsers := set.NewStrings() + for name := range users { + modelUsers.Add(name) + } + tw := output.TabWriter(writer) + w := output.Wrapper{tw} + w.Println("Name", "Display name", "Access", "Last connection") + for _, name := range modelUsers.SortedValues() { + user := users[name] + + var highlight *ansiterm.Context + userName := name + if c.isLoggedInUser(name) { + userName += "*" + highlight = output.CurrentHighlight + } + w.PrintColor(highlight, userName) + w.Println(user.DisplayName, user.Access, user.LastConnection) + } + tw.Flush() + return nil +} + +func (c *listCommand) formatControllerUsers(writer io.Writer, value interface{}) error { users, valueConverted := value.([]UserInfo) if !valueConverted { - return nil, errors.Errorf("expected value of type %T, got %T", users, value) + return errors.Errorf("expected value of type %T, got %T", users, value) } - var out bytes.Buffer - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) - fmt.Fprintf(tw, "NAME\tDISPLAY NAME\tDATE CREATED\tLAST CONNECTION\n") + + tw := output.TabWriter(writer) + w := output.Wrapper{tw} + w.Println("Controller: " + c.ControllerName()) + w.Println() + w.Println("Name", "Display name", "Access", "Date created", "Last connection") for _, user := range users { conn := user.LastConnection if user.Disabled { conn += " (disabled)" } - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", user.Username, user.DisplayName, user.DateCreated, conn) + var highlight *ansiterm.Context + userName := user.Username + if c.isLoggedInUser(user.Username) { + userName += "*" + highlight = output.CurrentHighlight + } + w.PrintColor(highlight, userName) + w.Println(user.DisplayName, user.Access, user.DateCreated, conn) } tw.Flush() - return out.Bytes(), nil + return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/list_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/list_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/list_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,11 +9,13 @@ "github.com/juju/cmd" "github.com/juju/errors" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "github.com/juju/juju/api/usermanager" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/user" + "github.com/juju/juju/jujuclient" "github.com/juju/juju/testing" ) @@ -21,47 +23,83 @@ // This suite provides basic tests for the "show-user" command type UserListCommandSuite struct { BaseSuite + + clock fakeClock } var _ = gc.Suite(&UserListCommandSuite{}) func (s *UserListCommandSuite) newUserListCommand() cmd.Command { - return user.NewListCommandForTest(&fakeUserListAPI{}, s.store) + clock := &fakeClock{now: time.Date(2016, 9, 15, 12, 0, 0, 0, time.UTC)} + api := &fakeUserListAPI{clock} + return user.NewListCommandForTest(api, api, s.store, clock) } -type fakeUserListAPI struct{} +type fakeUserListAPI struct { + clock *fakeClock +} func (*fakeUserListAPI) Close() error { return nil } +type fakeClock struct { + clock.Clock + now time.Time +} + +func (f *fakeClock) Now() time.Time { + return f.now +} + +func (f *fakeUserListAPI) ModelUserInfo() ([]params.ModelUserInfo, error) { + last1 := time.Date(2015, 3, 20, 0, 0, 0, 0, time.UTC) + last2 := time.Date(2015, 3, 1, 0, 0, 0, 0, time.UTC) + + userlist := []params.ModelUserInfo{ + { + UserName: "admin", + LastConnection: &last1, + Access: "write", + }, { + UserName: "adam", + DisplayName: "Adam", + LastConnection: &last2, + Access: "read", + }, { + UserName: "charlie@ubuntu.com", + DisplayName: "Charlie", + Access: "read", + }, + } + return userlist, nil +} + func (f *fakeUserListAPI) UserInfo(usernames []string, all usermanager.IncludeDisabled) ([]params.UserInfo, error) { if len(usernames) > 0 { return nil, errors.Errorf("expected no usernames, got %d", len(usernames)) } - // lp:1558657 - now := time.Now().UTC().Round(time.Second) + now := f.clock.Now() last1 := time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC) - // The extra two seconds here are needed to make sure - // we don't get intermittent failures in formatting. - last2 := now.Add(-35*time.Minute + -2*time.Second) + last2 := now.Add(-35 * time.Minute) result := []params.UserInfo{ { Username: "adam", DisplayName: "Adam Zulu", + Access: "login", DateCreated: time.Date(2012, 10, 8, 0, 0, 0, 0, time.UTC), LastConnection: &last1, }, { Username: "barbara", DisplayName: "Barbara Yellow", + Access: "add-model", DateCreated: time.Date(2013, 5, 2, 0, 0, 0, 0, time.UTC), LastConnection: &now, }, { Username: "charlie", DisplayName: "Charlie Xavier", - // The extra two minutes here are needed to make sure - // we don't get intermittent failures in formatting. - DateCreated: now.Add(-6*time.Hour + -2*time.Minute), + Access: "superuser", + DateCreated: now.Add(-6 * time.Hour), }, } if all { @@ -76,14 +114,23 @@ return result, nil } +func (s *UserListCommandSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.store.Accounts["testing"] = jujuclient.AccountDetails{ + User: "adam", + Password: "password", + } +} + func (s *UserListCommandSuite) TestUserInfo(c *gc.C) { context, err := testing.RunCommand(c, s.newUserListCommand()) c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ""+ - "NAME DISPLAY NAME DATE CREATED LAST CONNECTION\n"+ - "adam Adam Zulu 2012-10-08 2014-01-01\n"+ - "barbara Barbara Yellow 2013-05-02 just now\n"+ - "charlie Charlie Xavier 6 hours ago never connected\n"+ + "Controller: testing\n\n"+ + "Name Display name Access Date created Last connection\n"+ + "adam* Adam Zulu login 2012-10-08 2014-01-01\n"+ + "barbara Barbara Yellow add-model 2013-05-02 just now\n"+ + "charlie Charlie Xavier superuser 6 hours ago never connected\n"+ "\n") } @@ -91,23 +138,24 @@ context, err := testing.RunCommand(c, s.newUserListCommand(), "--all") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ""+ - "NAME DISPLAY NAME DATE CREATED LAST CONNECTION\n"+ - "adam Adam Zulu 2012-10-08 2014-01-01\n"+ - "barbara Barbara Yellow 2013-05-02 just now\n"+ - "charlie Charlie Xavier 6 hours ago never connected\n"+ - "davey Davey Willow 2014-10-09 35 minutes ago (disabled)\n"+ + "Controller: testing\n\n"+ + "Name Display name Access Date created Last connection\n"+ + "adam* Adam Zulu login 2012-10-08 2014-01-01\n"+ + "barbara Barbara Yellow add-model 2013-05-02 just now\n"+ + "charlie Charlie Xavier superuser 6 hours ago never connected\n"+ + "davey Davey Willow 2014-10-09 35 minutes ago (disabled)\n"+ "\n") } func (s *UserListCommandSuite) TestUserInfoExactTime(c *gc.C) { context, err := testing.RunCommand(c, s.newUserListCommand(), "--exact-time") c.Assert(err, jc.ErrorIsNil) - dateRegex := `\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \+0000 UTC` - c.Assert(testing.Stdout(context), gc.Matches, ""+ - "NAME DISPLAY NAME DATE CREATED LAST CONNECTION\n"+ - "adam Adam Zulu 2012-10-08 00:00:00 \\+0000 UTC 2014-01-01 00:00:00 \\+0000 UTC\n"+ - "barbara Barbara Yellow 2013-05-02 00:00:00 \\+0000 UTC "+dateRegex+"\n"+ - "charlie Charlie Xavier "+dateRegex+" never connected\n"+ + c.Assert(testing.Stdout(context), gc.Equals, ""+ + "Controller: testing\n\n"+ + "Name Display name Access Date created Last connection\n"+ + "adam* Adam Zulu login 2012-10-08 00:00:00 +0000 UTC 2014-01-01 00:00:00 +0000 UTC\n"+ + "barbara Barbara Yellow add-model 2013-05-02 00:00:00 +0000 UTC 2016-09-15 12:00:00 +0000 UTC\n"+ + "charlie Charlie Xavier superuser 2016-09-15 06:00:00 +0000 UTC never connected\n"+ "\n") } @@ -115,9 +163,9 @@ context, err := testing.RunCommand(c, s.newUserListCommand(), "--format", "json") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, "["+ - `{"user-name":"adam","display-name":"Adam Zulu","date-created":"2012-10-08","last-connection":"2014-01-01"},`+ - `{"user-name":"barbara","display-name":"Barbara Yellow","date-created":"2013-05-02","last-connection":"just now"},`+ - `{"user-name":"charlie","display-name":"Charlie Xavier","date-created":"6 hours ago","last-connection":"never connected"}`+ + `{"user-name":"adam","display-name":"Adam Zulu","access":"login","date-created":"2012-10-08","last-connection":"2014-01-01"},`+ + `{"user-name":"barbara","display-name":"Barbara Yellow","access":"add-model","date-created":"2013-05-02","last-connection":"just now"},`+ + `{"user-name":"charlie","display-name":"Charlie Xavier","access":"superuser","date-created":"6 hours ago","last-connection":"never connected"}`+ "]\n") } @@ -127,19 +175,60 @@ c.Assert(testing.Stdout(context), gc.Equals, ""+ "- user-name: adam\n"+ " display-name: Adam Zulu\n"+ + " access: login\n"+ " date-created: 2012-10-08\n"+ " last-connection: 2014-01-01\n"+ "- user-name: barbara\n"+ " display-name: Barbara Yellow\n"+ + " access: add-model\n"+ " date-created: 2013-05-02\n"+ " last-connection: just now\n"+ "- user-name: charlie\n"+ " display-name: Charlie Xavier\n"+ + " access: superuser\n"+ " date-created: 6 hours ago\n"+ " last-connection: never connected\n") } +func (s *UserListCommandSuite) TestModelUsers(c *gc.C) { + context, err := testing.RunCommand(c, s.newUserListCommand(), "admin") + c.Assert(err, jc.ErrorIsNil) + c.Assert(testing.Stdout(context), gc.Equals, ""+ + "Name Display name Access Last connection\n"+ + "adam* Adam read 2015-03-01\n"+ + "admin write 2015-03-20\n"+ + "charlie@ubuntu.com Charlie read never connected\n"+ + "\n") +} + +func (s *UserListCommandSuite) TestModelUsersFormatJson(c *gc.C) { + context, err := testing.RunCommand(c, s.newUserListCommand(), "admin", "--format", "json") + c.Assert(err, jc.ErrorIsNil) + c.Assert(testing.Stdout(context), gc.Equals, "{"+ + `"adam":{"display-name":"Adam","access":"read","last-connection":"2015-03-01"},`+ + `"admin":{"access":"write","last-connection":"2015-03-20"},`+ + `"charlie@ubuntu.com":{"display-name":"Charlie","access":"read","last-connection":"never connected"}`+ + "}\n") +} + +func (s *UserListCommandSuite) TestModelUsersInfoFormatYaml(c *gc.C) { + context, err := testing.RunCommand(c, s.newUserListCommand(), "admin", "--format", "yaml") + c.Assert(err, jc.ErrorIsNil) + c.Assert(testing.Stdout(context), gc.Equals, ""+ + "adam:\n"+ + " display-name: Adam\n"+ + " access: read\n"+ + " last-connection: 2015-03-01\n"+ + "admin:\n"+ + " access: write\n"+ + " last-connection: 2015-03-20\n"+ + "charlie@ubuntu.com:\n"+ + " display-name: Charlie\n"+ + " access: read\n"+ + " last-connection: never connected\n") +} + func (s *UserListCommandSuite) TestTooManyArgs(c *gc.C) { - _, err := testing.RunCommand(c, s.newUserListCommand(), "whoops") + _, err := testing.RunCommand(c, s.newUserListCommand(), "model", "whoops") c.Assert(err, gc.ErrorMatches, `unrecognized args: \["whoops"\]`) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/login.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/login.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/login.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/login.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,9 +9,9 @@ "github.com/juju/cmd" "github.com/juju/errors" "gopkg.in/juju/names.v2" - "gopkg.in/macaroon.v1" "github.com/juju/juju/api/usermanager" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/juju" "github.com/juju/juju/jujuclient" @@ -26,21 +26,22 @@ juju login bob -See also: enable-user - disable-user - logout +See also: + disable-user + enable-user + logout ` // NewLoginCommand returns a new cmd.Command to handle "juju login". func NewLoginCommand() cmd.Command { return modelcmd.WrapController(&loginCommand{ - newLoginAPI: func(args juju.NewAPIConnectionParams) (LoginAPI, error) { + newLoginAPI: func(args juju.NewAPIConnectionParams) (LoginAPI, ConnectionAPI, error) { api, err := juju.NewAPIConnection(args) if err != nil { - return nil, errors.Trace(err) + return nil, nil, errors.Trace(err) } - return usermanager.NewClient(api), nil + return usermanager.NewClient(api), api, nil }, }) } @@ -48,7 +49,7 @@ // loginCommand changes the password for a user. type loginCommand struct { modelcmd.ControllerCommandBase - newLoginAPI func(juju.NewAPIConnectionParams) (LoginAPI, error) + newLoginAPI func(juju.NewAPIConnectionParams) (LoginAPI, ConnectionAPI, error) User string } @@ -74,19 +75,52 @@ // LoginAPI provides the API methods that the login command uses. type LoginAPI interface { - CreateLocalLoginMacaroon(names.UserTag) (*macaroon.Macaroon, error) Close() error } +// ConnectionAPI provides relevant API methods off the underlying connection. +type ConnectionAPI interface { + AuthTag() names.Tag + ControllerAccess() string +} + // Run implements Command.Run. func (c *loginCommand) Run(ctx *cmd.Context) error { controllerName := c.ControllerName() store := c.ClientStore() + accountDetails, err := store.AccountDetails(controllerName) + if err != nil && !errors.IsNotFound(err) { + return errors.Trace(err) + } user := c.User + if user == "" && accountDetails == nil { + // The username has not been specified, and there + // is no current account. See if the user can log + // in with macaroons. + args, err := c.NewAPIConnectionParams( + store, controllerName, "", + &jujuclient.AccountDetails{}, + ) + if err != nil { + return errors.Trace(err) + } + api, conn, err := c.newLoginAPI(args) + if err == nil { + authTag := conn.AuthTag() + api.Close() + ctx.Infof("You are now logged in to %q as %q.", controllerName, authTag.Id()) + return nil + } + if !params.IsCodeNoCreds(err) { + return errors.Annotate(err, "creating API connection") + } + // CodeNoCreds was returned, which means that external + // users are not supported. Fall back to prompting the + // user for their username and password. + } + if user == "" { - // TODO(rog) Try macaroon login first before - // falling back to prompting for username. // The username has not been specified, so prompt for it. fmt.Fprint(ctx.Stderr, "username: ") var err error @@ -106,53 +140,33 @@ // Make sure that the client is not already logged in, // or if it is, that it is logged in as the specified // user. - accountDetails, err := store.AccountDetails(controllerName) - if err != nil && !errors.IsNotFound(err) { - return errors.Trace(err) - } - if accountDetails != nil && accountDetails.User != userTag.Canonical() { + if accountDetails != nil && accountDetails.User != userTag.Id() { return errors.New(`already logged in Run "juju logout" first before attempting to log in as a different user. `) } - // Read password from the terminal, and attempt to log in using that. - fmt.Fprint(ctx.Stderr, "password: ") - password, err := readPassword(ctx.Stdin) - fmt.Fprintln(ctx.Stderr) - if err != nil { - return errors.Trace(err) - } + + // Log in without specifying a password in the account details. This + // will trigger macaroon-based authentication, which will prompt the + // user for their password. accountDetails = &jujuclient.AccountDetails{ - User: userTag.Canonical(), - Password: password, + User: userTag.Id(), } params, err := c.NewAPIConnectionParams(store, controllerName, "", accountDetails) if err != nil { return errors.Trace(err) } - api, err := c.newLoginAPI(params) + api, conn, err := c.newLoginAPI(params) if err != nil { return errors.Annotate(err, "creating API connection") } defer api.Close() - // Create a new local login macaroon, and update the account details - // in the client store, removing the recorded password (if any) and - // storing the macaroon. - macaroon, err := api.CreateLocalLoginMacaroon(userTag) - if err != nil { - return errors.Annotate(err, "failed to create a temporary credential") - } - macaroonJSON, err := macaroon.MarshalJSON() - if err != nil { - return errors.Annotate(err, "marshalling temporary credential to JSON") - } - accountDetails.Password = "" - accountDetails.Macaroon = string(macaroonJSON) + accountDetails.LastKnownAccess = conn.ControllerAccess() if err := store.UpdateAccount(controllerName, *accountDetails); err != nil { return errors.Annotate(err, "failed to record temporary credential") } - ctx.Infof("You are now logged in to %q as %q.", controllerName, userTag.Canonical()) + ctx.Infof("You are now logged in to %q as %q.", controllerName, userTag.Id()) return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/login_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/login_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/login_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/login_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,6 @@ package user_test import ( - "errors" "strings" "github.com/juju/cmd" @@ -12,6 +11,7 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/user" "github.com/juju/juju/juju" "github.com/juju/juju/jujuclient" @@ -20,7 +20,8 @@ type LoginCommandSuite struct { BaseSuite - mockAPI *mockLoginAPI + mockAPI *mockLoginAPI + loginErr error } var _ = gc.Suite(&LoginCommandSuite{}) @@ -28,16 +29,22 @@ func (s *LoginCommandSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.mockAPI = &mockLoginAPI{} + s.loginErr = nil } func (s *LoginCommandSuite) run(c *gc.C, stdin string, args ...string) (*cmd.Context, juju.NewAPIConnectionParams, error) { var argsOut juju.NewAPIConnectionParams - cmd, _ := user.NewLoginCommandForTest(func(args juju.NewAPIConnectionParams) (user.LoginAPI, error) { + cmd, _ := user.NewLoginCommandForTest(func(args juju.NewAPIConnectionParams) (user.LoginAPI, user.ConnectionAPI, error) { argsOut = args // The account details are modified in place, so take a copy. accountDetails := *argsOut.AccountDetails argsOut.AccountDetails = &accountDetails - return s.mockAPI, nil + if s.loginErr != nil { + err := s.loginErr + s.loginErr = nil + return nil, nil, err + } + return s.mockAPI, s.mockAPI, nil }, s.store) ctx := coretesting.Context(c) if stdin == "" { @@ -87,15 +94,12 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(coretesting.Stdout(context), gc.Equals, "") c.Assert(coretesting.Stderr(context), gc.Equals, ` -username: password: -You are now logged in to "testing" as "current-user@local". +username: You are now logged in to "testing" as "current-user". `[1:], ) - s.assertStorePassword(c, "current-user@local", "") - s.assertStoreMacaroon(c, "current-user@local", fakeLocalLoginMacaroon(names.NewUserTag("current-user@local"))) + s.assertStorePassword(c, "current-user", "", "superuser") c.Assert(args.AccountDetails, jc.DeepEquals, &jujuclient.AccountDetails{ - User: "current-user@local", - Password: "sekrit", + User: "current-user", }) } @@ -106,15 +110,12 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(coretesting.Stdout(context), gc.Equals, "") c.Assert(coretesting.Stderr(context), gc.Equals, ` -password: -You are now logged in to "testing" as "new-user@local". +You are now logged in to "testing" as "new-user". `[1:], ) - s.assertStorePassword(c, "new-user@local", "") - s.assertStoreMacaroon(c, "new-user@local", fakeLocalLoginMacaroon(names.NewUserTag("new-user@local"))) + s.assertStorePassword(c, "new-user", "", "superuser") c.Assert(args.AccountDetails, jc.DeepEquals, &jujuclient.AccountDetails{ - User: "new-user@local", - Password: "sekrit", + User: "new-user", }) } @@ -131,14 +132,42 @@ `) } -func (s *LoginCommandSuite) TestLoginFail(c *gc.C) { - s.mockAPI.SetErrors(errors.New("failed to do something")) - _, _, err := s.run(c, "", "current-user") - c.Assert(err, gc.ErrorMatches, "failed to create a temporary credential: failed to do something") - s.assertStorePassword(c, "current-user@local", "old-password") - s.assertStoreMacaroon(c, "current-user@local", nil) +func (s *LoginCommandSuite) TestLoginWithMacaroons(c *gc.C) { + err := s.store.RemoveAccount("testing") + c.Assert(err, jc.ErrorIsNil) + context, args, err := s.run(c, "") + c.Assert(err, jc.ErrorIsNil) + c.Assert(coretesting.Stdout(context), gc.Equals, "") + c.Assert(coretesting.Stderr(context), gc.Equals, ` +You are now logged in to "testing" as "user@external". +`[1:], + ) + c.Assert(args.AccountDetails, jc.DeepEquals, &jujuclient.AccountDetails{}) +} + +func (s *LoginCommandSuite) TestLoginWithMacaroonsNotSupported(c *gc.C) { + err := s.store.RemoveAccount("testing") + c.Assert(err, jc.ErrorIsNil) + s.loginErr = ¶ms.Error{Code: params.CodeNoCreds, Message: "barf"} + context, _, err := s.run(c, "new-user\nsekrit\n") + c.Assert(err, jc.ErrorIsNil) + c.Assert(coretesting.Stdout(context), gc.Equals, "") + c.Assert(coretesting.Stderr(context), gc.Equals, ` +username: You are now logged in to "testing" as "new-user". +`[1:], + ) +} + +type mockLoginAPI struct{} + +func (*mockLoginAPI) Close() error { + return nil +} + +func (*mockLoginAPI) AuthTag() names.Tag { + return names.NewUserTag("user@external") } -type mockLoginAPI struct { - mockChangePasswordAPI +func (*mockLoginAPI) ControllerAccess() string { + return "superuser" } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/logout.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/logout.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/logout.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/logout.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/jujuclient" @@ -29,9 +29,9 @@ Examples: juju logout -See Also: - juju change-user-password - juju login +See also: + change-user-password + login ` @@ -116,7 +116,7 @@ // they know their password. If they have just bootstrapped, // they will have a randomly generated password which they will // be unaware of. - if accountDetails.Macaroon == "" && accountDetails.Password != "" && !c.Force { + if accountDetails.Password != "" && !c.Force { return errors.New(`preventing account loss It appears that you have not changed the password for @@ -131,6 +131,14 @@ `) } + details, err := store.ControllerByName(controllerName) + if err != nil { + return errors.Trace(err) + } + if err := c.ClearControllerMacaroons(details.APIEndpoints); err != nil { + return errors.Trace(err) + } + // Remove the account credentials. if err := store.RemoveAccount(controllerName); err != nil { return errors.Annotate(err, "failed to clear credentials") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/logout_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/logout_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/logout_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/logout_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,16 @@ package user_test import ( + "net/http" + "net/url" + "path/filepath" + "time" + "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/persistent-cookiejar" jc "github.com/juju/testing/checkers" + "github.com/juju/utils" gc "gopkg.in/check.v1" "github.com/juju/juju/cmd/juju/user" @@ -55,9 +62,31 @@ } func (s *LogoutCommandSuite) TestLogout(c *gc.C) { - details := s.store.Accounts["testing"] - details.Macaroon = "a-macaroon" - s.store.Accounts["testing"] = details + cookiefile := filepath.Join(utils.Home(), ".go-cookies") + jar, err := cookiejar.New(&cookiejar.Options{Filename: cookiefile}) + c.Assert(err, jc.ErrorIsNil) + cont, err := s.store.CurrentController() + c.Assert(err, jc.ErrorIsNil) + host := s.store.Controllers[cont].APIEndpoints[0] + u, err := url.Parse("https://" + host) + c.Assert(err, jc.ErrorIsNil) + other, err := url.Parse("https://www.example.com") + c.Assert(err, jc.ErrorIsNil) + + // we hav to set the expiration or it's not considered a "persistent" + // cookie, and the jar won't save it. + jar.SetCookies(u, []*http.Cookie{{ + Name: "foo", + Value: "bar", + Expires: time.Now().Add(time.Hour * 24)}}) + jar.SetCookies(other, []*http.Cookie{{ + Name: "baz", + Value: "bat", + Expires: time.Now().Add(time.Hour * 24)}}) + err = jar.Save() + c.Assert(err, jc.ErrorIsNil) + + s.setPassword(c, "testing", "") ctx, err := s.run(c) c.Assert(err, jc.ErrorIsNil) c.Assert(coretesting.Stdout(ctx), gc.Equals, "") @@ -67,14 +96,19 @@ ) _, err = s.store.AccountDetails("testing") c.Assert(err, jc.Satisfies, errors.IsNotFound) + + jar, err = cookiejar.New(&cookiejar.Options{Filename: cookiefile}) + c.Assert(err, jc.ErrorIsNil) + cookies := jar.Cookies(other) + c.Assert(cookies, gc.HasLen, 1) } func (s *LogoutCommandSuite) TestLogoutCount(c *gc.C) { // Create multiple controllers. We'll log out of each one // to observe the messages printed out by "logout". + s.setPassword(c, "testing", "") controllers := []string{"testing", "testing2", "testing3"} details := s.store.Accounts["testing"] - details.Macaroon = "a-macaroon" for _, controller := range controllers { s.store.Controllers[controller] = s.store.Controllers["testing"] err := s.store.UpdateAccount(controller, details) @@ -95,9 +129,8 @@ } } -func (s *LogoutCommandSuite) TestLogoutWithoutMacaroon(c *gc.C) { - s.assertStorePassword(c, "current-user@local", "old-password") - s.assertStoreMacaroon(c, "current-user@local", nil) +func (s *LogoutCommandSuite) TestLogoutWithPassword(c *gc.C) { + s.assertStorePassword(c, "current-user", "old-password", "") _, err := s.run(c) c.Assert(err, gc.NotNil) c.Assert(err.Error(), gc.Equals, `preventing account loss @@ -114,9 +147,8 @@ `) } -func (s *LogoutCommandSuite) TestLogoutWithoutMacaroonForced(c *gc.C) { - s.assertStorePassword(c, "current-user@local", "old-password") - s.assertStoreMacaroon(c, "current-user@local", nil) +func (s *LogoutCommandSuite) TestLogoutWithPasswordForced(c *gc.C) { + s.assertStorePassword(c, "current-user", "old-password", "") _, err := s.run(c, "--force") c.Assert(err, jc.ErrorIsNil) _, err = s.store.AccountDetails("testing") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/remove.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/remove.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/remove.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/remove.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/cmd/juju/block" "github.com/juju/juju/cmd/modelcmd" @@ -84,7 +84,7 @@ // Init implements Command.Init. func (c *removeCommand) Init(args []string) error { if len(args) == 0 { - return fmt.Errorf("no username supplied") + return errors.Errorf("no username supplied") } c.UserName = args[0] return cmd.CheckEmpty(args[1:]) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/user_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/user_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/user_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/user_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/macaroon.v1" "github.com/juju/juju/jujuclient" "github.com/juju/juju/jujuclient/jujuclienttesting" @@ -26,29 +25,25 @@ s.store.Controllers["testing"] = jujuclient.ControllerDetails{ APIEndpoints: []string{"127.0.0.1:12345"}, CACert: testing.CACert, - ControllerUUID: testing.ModelTag.Id(), + ControllerUUID: testing.ControllerTag.Id(), } s.store.Accounts["testing"] = jujuclient.AccountDetails{ - User: "current-user@local", + User: "current-user", Password: "old-password", } } -func (s *BaseSuite) assertStorePassword(c *gc.C, user, pass string) { - details, err := s.store.AccountDetails("testing") - c.Assert(err, jc.ErrorIsNil) - c.Assert(details.User, gc.Equals, user) - c.Assert(details.Password, gc.Equals, pass) +func (s *BaseSuite) setPassword(c *gc.C, controller, pass string) { + details, ok := s.store.Accounts[controller] + c.Assert(ok, jc.IsTrue) + details.Password = pass + s.store.Accounts[controller] = details } -func (s *BaseSuite) assertStoreMacaroon(c *gc.C, user string, mac *macaroon.Macaroon) { +func (s *BaseSuite) assertStorePassword(c *gc.C, user, pass, access string) { details, err := s.store.AccountDetails("testing") c.Assert(err, jc.ErrorIsNil) - if mac == nil { - c.Assert(details.Macaroon, gc.Equals, "") - return - } - macaroonJSON, err := mac.MarshalJSON() - c.Assert(err, jc.ErrorIsNil) - c.Assert(details.Macaroon, gc.Equals, string(macaroonJSON)) + c.Assert(details.User, gc.Equals, user) + c.Assert(details.Password, gc.Equals, pass) + c.Assert(details.LastKnownAccess, gc.Equals, access) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/whoami.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/whoami.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/whoami.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/whoami.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,127 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package user + +import ( + "fmt" + "io" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/cmd/juju/common" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" + "github.com/juju/juju/jujuclient" +) + +var whoAmIDetails = ` +Display the current controller, model and logged in user name. + +Examples: + juju whoami + +See also: + controllers + login + logout + models + users +`[1:] + +// NewWhoAmICommand returns a command to print login details. +func NewWhoAmICommand() cmd.Command { + cmd := &whoAmICommand{ + store: jujuclient.NewFileClientStore(), + } + return modelcmd.WrapBase(cmd) +} + +// Info implements Command.Info +func (c *whoAmICommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "whoami", + Purpose: "Print current login details", + Doc: whoAmIDetails, + } +} + +// SetFlags implements Command.SetFlags. +func (c *whoAmICommand) SetFlags(f *gnuflag.FlagSet) { + c.JujuCommandBase.SetFlags(f) + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, + "tabular": formatWhoAmITabular, + }) +} + +type whoAmI struct { + ControllerName string `yaml:"controller" json:"controller"` + ModelName string `yaml:"model,omitempty" json:"model,omitempty"` + UserName string `yaml:"user" json:"user"` +} + +func formatWhoAmITabular(writer io.Writer, value interface{}) error { + details, ok := value.(whoAmI) + if !ok { + return errors.Errorf("expected value of type %T, got %T", details, value) + } + tw := output.TabWriter(writer) + fmt.Fprintf(tw, "Controller:\t%s\n", details.ControllerName) + modelName := details.ModelName + if modelName == "" { + modelName = "" + } + fmt.Fprintf(tw, "Model:\t%s\n", modelName) + fmt.Fprintf(tw, "User:\t%s", details.UserName) + return tw.Flush() +} + +// Run implements Command.Run +func (c *whoAmICommand) Run(ctx *cmd.Context) error { + controllerName, err := c.store.CurrentController() + if err != nil && !errors.IsNotFound(err) { + return err + } + if err != nil { + fmt.Fprintln(ctx.Stderr, "There is no current controller.\nRun juju list-controllers to see available controllers.") + return nil + } + modelName, err := c.store.CurrentModel(controllerName) + if err != nil && !errors.IsNotFound(err) { + return err + } + userDetails, err := c.store.AccountDetails(controllerName) + if err != nil && !errors.IsNotFound(err) { + return err + } + if err != nil { + fmt.Fprintf(ctx.Stderr, "You are not logged in to controller %q and model %q.\nRun juju login if you want to login.\n", controllerName, modelName) + return nil + } + // Only qualify model name if there is a current model. + if modelName != "" { + if unqualifiedModelName, owner, err := jujuclient.SplitModelName(modelName); err == nil { + user := names.NewUserTag(userDetails.User) + modelName = common.OwnerQualifiedModelName(unqualifiedModelName, owner, user) + } + } + + result := whoAmI{ + ControllerName: controllerName, + ModelName: modelName, + UserName: userDetails.User, + } + return c.out.Write(ctx, result) +} + +type whoAmICommand struct { + modelcmd.JujuCommandBase + + out cmd.Output + store jujuclient.ClientStore +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/whoami_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/whoami_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/user/whoami_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/user/whoami_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,189 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package user_test + +import ( + "github.com/juju/cmd" + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/user" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/juju/testing" +) + +type WhoAmITestSuite struct { + testing.BaseSuite + store jujuclient.ClientStore + expectedOutput string + expectedErr string +} + +var _ = gc.Suite(&WhoAmITestSuite{}) + +func (s *WhoAmITestSuite) TestEmptyStore(c *gc.C) { + s.expectedOutput = ` +There is no current controller. +Run juju list-controllers to see available controllers. +`[1:] + + s.store = jujuclienttesting.NewMemStore() + s.assertWhoAmI(c) +} + +func (s *WhoAmITestSuite) TestNoCurrentController(c *gc.C) { + s.expectedOutput = ` +There is no current controller. +Run juju list-controllers to see available controllers. +`[1:] + + s.store = &jujuclienttesting.MemStore{ + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + } + s.assertWhoAmI(c) +} + +func (s *WhoAmITestSuite) TestNoCurrentModel(c *gc.C) { + s.expectedOutput = ` +Controller: controller +Model: +User: admin +`[1:] + + s.store = &jujuclienttesting.MemStore{ + CurrentControllerName: "controller", + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + Models: map[string]*jujuclient.ControllerModels{ + "controller": { + Models: map[string]jujuclient.ModelDetails{ + "admin/model": {"model-uuid"}, + }, + }, + }, + Accounts: map[string]jujuclient.AccountDetails{ + "controller": { + User: "admin", + }, + }, + } + s.assertWhoAmI(c) +} + +func (s *WhoAmITestSuite) TestNoCurrentUser(c *gc.C) { + s.expectedOutput = ` +You are not logged in to controller "controller" and model "admin/model". +Run juju login if you want to login. +`[1:] + + s.store = &jujuclienttesting.MemStore{ + CurrentControllerName: "controller", + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + Models: map[string]*jujuclient.ControllerModels{ + "controller": { + Models: map[string]jujuclient.ModelDetails{ + "admin/model": {"model-uuid"}, + }, + CurrentModel: "admin/model", + }, + }, + } + s.assertWhoAmI(c) +} + +func (s *WhoAmITestSuite) assertWhoAmIForUser(c *gc.C, user, format string) { + s.store = &jujuclienttesting.MemStore{ + CurrentControllerName: "controller", + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + Models: map[string]*jujuclient.ControllerModels{ + "controller": { + Models: map[string]jujuclient.ModelDetails{ + "admin/model": {"model-uuid"}, + }, + CurrentModel: "admin/model", + }, + }, + Accounts: map[string]jujuclient.AccountDetails{ + "controller": { + User: user, + }, + }, + } + s.assertWhoAmI(c, "--format", format) +} + +func (s *WhoAmITestSuite) TestWhoAmISameUser(c *gc.C) { + s.expectedOutput = ` +Controller: controller +Model: model +User: admin +`[1:] + s.assertWhoAmIForUser(c, "admin", "tabular") +} + +func (s *WhoAmITestSuite) TestWhoAmIYaml(c *gc.C) { + s.expectedOutput = ` +controller: controller +model: model +user: admin +`[1:] + s.assertWhoAmIForUser(c, "admin", "yaml") +} + +func (s *WhoAmITestSuite) TestWhoAmIJson(c *gc.C) { + s.expectedOutput = ` +{"controller":"controller","model":"model","user":"admin"} +`[1:] + s.assertWhoAmIForUser(c, "admin", "json") +} + +func (s *WhoAmITestSuite) TestWhoAmIDifferentUsersModel(c *gc.C) { + s.expectedOutput = ` +Controller: controller +Model: admin/model +User: bob +`[1:] + s.assertWhoAmIForUser(c, "bob", "tabular") +} + +func (s *WhoAmITestSuite) TestFromStoreErr(c *gc.C) { + msg := "fail getting current controller" + errStore := jujuclienttesting.NewStubStore() + errStore.SetErrors(errors.New(msg)) + s.store = errStore + s.expectedErr = msg + s.assertWhoAmIFailed(c) + errStore.CheckCallNames(c, "CurrentController") +} + +func (s *WhoAmITestSuite) runWhoAmI(c *gc.C, args ...string) (*cmd.Context, error) { + return testing.RunCommand(c, user.NewWhoAmICommandForTest(s.store), args...) +} + +func (s *WhoAmITestSuite) assertWhoAmIFailed(c *gc.C, args ...string) { + _, err := s.runWhoAmI(c, args...) + c.Assert(err, gc.ErrorMatches, s.expectedErr) +} + +func (s *WhoAmITestSuite) assertWhoAmI(c *gc.C, args ...string) string { + context, err := s.runWhoAmI(c, args...) + c.Assert(err, jc.ErrorIsNil) + output := testing.Stdout(context) + if output == "" { + output = testing.Stderr(context) + } + if s.expectedOutput != "" { + c.Assert(output, gc.Equals, s.expectedOutput) + } + return output +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/version_canary.go juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/version_canary.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/juju/version_canary.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/juju/version_canary.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,10 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build !go1.6 + +package main + +// This line intentionally does not compile. This file will only be compiled if +// you are compiling with a version of Go that is lower than the one we support. +var requiredGoVersion = This_project_requires_Go_1_6 diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/agent.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/agent.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/agent.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/agent.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,8 +11,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/agent" "github.com/juju/juju/cmd/jujud/util" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/agenttest/agent.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/agenttest/agent.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/agenttest/agent.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/agenttest/agent.go 2016-10-13 14:31:49.000000000 +0000 @@ -146,6 +146,7 @@ StateAddresses: stateInfo.Addrs, APIAddresses: apiInfo.Addrs, CACert: stateInfo.CACert, + Controller: coretesting.ControllerTag, Model: apiInfo.ModelTag, }) c.Assert(err, jc.ErrorIsNil) @@ -206,6 +207,7 @@ StateAddresses: stateInfo.Addrs, APIAddresses: apiAddr, CACert: stateInfo.CACert, + Controller: s.State.ControllerTag(), Model: modelTag, }, params.StateServingInfo{ @@ -247,7 +249,7 @@ c.Assert(err, jc.ErrorIsNil) info, ok := config.MongoInfo() c.Assert(ok, jc.IsTrue) - st, err := state.Open(config.Model(), info, mongotest.DialOpts(), stateenvirons.GetNewPolicyFunc( + st, err := state.Open(config.Model(), config.Controller(), info, mongotest.DialOpts(), stateenvirons.GetNewPolicyFunc( stateenvirons.GetNewEnvironFunc(environs.New), )) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/apiagent.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/apiagent.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/apiagent.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/apiagent.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,18 +12,18 @@ // Many manifolds completely depend on an agent and an API connection; this // type configures them. -type AgentApiManifoldConfig struct { +type AgentAPIManifoldConfig struct { AgentName string APICallerName string } -// AgentApiStartFunc encapsulates the behaviour that varies among AgentApiManifolds. -type AgentApiStartFunc func(agent.Agent, base.APICaller) (worker.Worker, error) +// AgentAPIStartFunc encapsulates the behaviour that varies among AgentAPIManifolds. +type AgentAPIStartFunc func(agent.Agent, base.APICaller) (worker.Worker, error) -// AgentApiManifold returns a dependency.Manifold that calls the supplied start +// AgentAPIManifold returns a dependency.Manifold that calls the supplied start // func with the API and agent resources defined in the config (once those // resources are present). -func AgentApiManifold(config AgentApiManifoldConfig, start AgentApiStartFunc) dependency.Manifold { +func AgentAPIManifold(config AgentAPIManifoldConfig, start AgentAPIStartFunc) dependency.Manifold { return dependency.Manifold{ Inputs: []string{ config.AgentName, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/apiagent_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/apiagent_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/apiagent_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/apiagent_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,26 +17,26 @@ dt "github.com/juju/juju/worker/dependency/testing" ) -type AgentApiManifoldSuite struct { +type AgentAPIManifoldSuite struct { testing.IsolationSuite testing.Stub manifold dependency.Manifold worker worker.Worker } -var _ = gc.Suite(&AgentApiManifoldSuite{}) +var _ = gc.Suite(&AgentAPIManifoldSuite{}) -func (s *AgentApiManifoldSuite) SetUpTest(c *gc.C) { +func (s *AgentAPIManifoldSuite) SetUpTest(c *gc.C) { s.IsolationSuite.SetUpTest(c) s.Stub = testing.Stub{} s.worker = &dummyWorker{} - s.manifold = engine.AgentApiManifold(engine.AgentApiManifoldConfig{ + s.manifold = engine.AgentAPIManifold(engine.AgentAPIManifoldConfig{ AgentName: "agent-name", APICallerName: "api-caller-name", }, s.newWorker) } -func (s *AgentApiManifoldSuite) newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { +func (s *AgentAPIManifoldSuite) newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { s.AddCall("newWorker", a, apiCaller) if err := s.NextErr(); err != nil { return nil, err @@ -44,15 +44,15 @@ return s.worker, nil } -func (s *AgentApiManifoldSuite) TestInputs(c *gc.C) { +func (s *AgentAPIManifoldSuite) TestInputs(c *gc.C) { c.Check(s.manifold.Inputs, jc.DeepEquals, []string{"agent-name", "api-caller-name"}) } -func (s *AgentApiManifoldSuite) TestOutput(c *gc.C) { +func (s *AgentAPIManifoldSuite) TestOutput(c *gc.C) { c.Check(s.manifold.Output, gc.IsNil) } -func (s *AgentApiManifoldSuite) TestStartAgentMissing(c *gc.C) { +func (s *AgentAPIManifoldSuite) TestStartAgentMissing(c *gc.C) { context := dt.StubContext(nil, map[string]interface{}{ "agent-name": dependency.ErrMissing, }) @@ -62,7 +62,7 @@ c.Check(err, gc.Equals, dependency.ErrMissing) } -func (s *AgentApiManifoldSuite) TestStartApiConnMissing(c *gc.C) { +func (s *AgentAPIManifoldSuite) TestStartAPIConnMissing(c *gc.C) { context := dt.StubContext(nil, map[string]interface{}{ "agent-name": &dummyAgent{}, "api-caller-name": dependency.ErrMissing, @@ -73,12 +73,12 @@ c.Check(err, gc.Equals, dependency.ErrMissing) } -func (s *AgentApiManifoldSuite) TestStartFailure(c *gc.C) { +func (s *AgentAPIManifoldSuite) TestStartFailure(c *gc.C) { expectAgent := &dummyAgent{} - expectApiCaller := &dummyApiCaller{} + expectAPICaller := &dummyAPICaller{} context := dt.StubContext(nil, map[string]interface{}{ "agent-name": expectAgent, - "api-caller-name": expectApiCaller, + "api-caller-name": expectAPICaller, }) s.SetErrors(errors.New("some error")) @@ -87,16 +87,16 @@ c.Check(err, gc.ErrorMatches, "some error") s.CheckCalls(c, []testing.StubCall{{ FuncName: "newWorker", - Args: []interface{}{expectAgent, expectApiCaller}, + Args: []interface{}{expectAgent, expectAPICaller}, }}) } -func (s *AgentApiManifoldSuite) TestStartSuccess(c *gc.C) { +func (s *AgentAPIManifoldSuite) TestStartSuccess(c *gc.C) { expectAgent := &dummyAgent{} - expectApiCaller := &dummyApiCaller{} + expectAPICaller := &dummyAPICaller{} context := dt.StubContext(nil, map[string]interface{}{ "agent-name": expectAgent, - "api-caller-name": expectApiCaller, + "api-caller-name": expectAPICaller, }) worker, err := s.manifold.Start(context) @@ -104,11 +104,11 @@ c.Check(worker, gc.Equals, s.worker) s.CheckCalls(c, []testing.StubCall{{ FuncName: "newWorker", - Args: []interface{}{expectAgent, expectApiCaller}, + Args: []interface{}{expectAgent, expectAPICaller}, }}) } -type dummyApiCaller struct { +type dummyAPICaller struct { base.APICaller } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/api.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/api.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/api.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/api.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,16 +11,16 @@ // Some (hopefully growing number of) manifolds completely depend on an API // connection; this type configures them. -type ApiManifoldConfig struct { +type APIManifoldConfig struct { APICallerName string } -// ApiStartFunc encapsulates the behaviour that varies among ApiManifolds. -type ApiStartFunc func(base.APICaller) (worker.Worker, error) +// APIStartFunc encapsulates the behaviour that varies among APIManifolds. +type APIStartFunc func(base.APICaller) (worker.Worker, error) -// ApiManifold returns a dependency.Manifold that calls the supplied start +// APIManifold returns a dependency.Manifold that calls the supplied start // func with the API resource defined in the config (once it's present). -func ApiManifold(config ApiManifoldConfig, start ApiStartFunc) dependency.Manifold { +func APIManifold(config APIManifoldConfig, start APIStartFunc) dependency.Manifold { return dependency.Manifold{ Inputs: []string{ config.APICallerName, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/api_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/api_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/api_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/api_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,25 +16,25 @@ dt "github.com/juju/juju/worker/dependency/testing" ) -type ApiManifoldSuite struct { +type APIManifoldSuite struct { testing.IsolationSuite testing.Stub manifold dependency.Manifold worker worker.Worker } -var _ = gc.Suite(&ApiManifoldSuite{}) +var _ = gc.Suite(&APIManifoldSuite{}) -func (s *ApiManifoldSuite) SetUpTest(c *gc.C) { +func (s *APIManifoldSuite) SetUpTest(c *gc.C) { s.IsolationSuite.SetUpTest(c) s.Stub = testing.Stub{} s.worker = &dummyWorker{} - s.manifold = engine.ApiManifold(engine.ApiManifoldConfig{ + s.manifold = engine.APIManifold(engine.APIManifoldConfig{ APICallerName: "api-caller-name", }, s.newWorker) } -func (s *ApiManifoldSuite) newWorker(apiCaller base.APICaller) (worker.Worker, error) { +func (s *APIManifoldSuite) newWorker(apiCaller base.APICaller) (worker.Worker, error) { s.AddCall("newWorker", apiCaller) if err := s.NextErr(); err != nil { return nil, err @@ -42,15 +42,15 @@ return s.worker, nil } -func (s *ApiManifoldSuite) TestInputs(c *gc.C) { +func (s *APIManifoldSuite) TestInputs(c *gc.C) { c.Check(s.manifold.Inputs, jc.DeepEquals, []string{"api-caller-name"}) } -func (s *ApiManifoldSuite) TestOutput(c *gc.C) { +func (s *APIManifoldSuite) TestOutput(c *gc.C) { c.Check(s.manifold.Output, gc.IsNil) } -func (s *ApiManifoldSuite) TestStartApiMissing(c *gc.C) { +func (s *APIManifoldSuite) TestStartAPIMissing(c *gc.C) { context := dt.StubContext(nil, map[string]interface{}{ "api-caller-name": dependency.ErrMissing, }) @@ -60,10 +60,10 @@ c.Check(err, gc.Equals, dependency.ErrMissing) } -func (s *ApiManifoldSuite) TestStartFailure(c *gc.C) { - expectApiCaller := &dummyApiCaller{} +func (s *APIManifoldSuite) TestStartFailure(c *gc.C) { + expectAPICaller := &dummyAPICaller{} context := dt.StubContext(nil, map[string]interface{}{ - "api-caller-name": expectApiCaller, + "api-caller-name": expectAPICaller, }) s.SetErrors(errors.New("some error")) @@ -72,14 +72,14 @@ c.Check(err, gc.ErrorMatches, "some error") s.CheckCalls(c, []testing.StubCall{{ FuncName: "newWorker", - Args: []interface{}{expectApiCaller}, + Args: []interface{}{expectAPICaller}, }}) } -func (s *ApiManifoldSuite) TestStartSuccess(c *gc.C) { - expectApiCaller := &dummyApiCaller{} +func (s *APIManifoldSuite) TestStartSuccess(c *gc.C) { + expectAPICaller := &dummyAPICaller{} context := dt.StubContext(nil, map[string]interface{}{ - "api-caller-name": expectApiCaller, + "api-caller-name": expectAPICaller, }) worker, err := s.manifold.Start(context) @@ -87,6 +87,6 @@ c.Check(worker, gc.Equals, s.worker) s.CheckCalls(c, []testing.StubCall{{ FuncName: "newWorker", - Args: []interface{}{expectApiCaller}, + Args: []interface{}{expectAPICaller}, }}) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/enginetest/agentapimanifold.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/enginetest/agentapimanifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/enginetest/agentapimanifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/enginetest/agentapimanifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,24 +13,24 @@ dt "github.com/juju/juju/worker/dependency/testing" ) -// AgentApiManifoldTestConfig returns a AgentApiManifoldConfig -// suitable for use with RunAgentApiManifold. -func AgentApiManifoldTestConfig() engine.AgentApiManifoldConfig { - return engine.AgentApiManifoldConfig{ +// AgentAPIManifoldTestConfig returns a AgentAPIManifoldConfig +// suitable for use with RunAgentAPIManifold. +func AgentAPIManifoldTestConfig() engine.AgentAPIManifoldConfig { + return engine.AgentAPIManifoldConfig{ AgentName: "agent-name", APICallerName: "api-caller-name", } } -// RunAgentApiManifold is useful for testing manifolds based on -// AgentApiManifold. It takes the manifold, sets up the resources -// required to successfully pass AgentApiManifold's checks and then +// RunAgentAPIManifold is useful for testing manifolds based on +// AgentAPIManifold. It takes the manifold, sets up the resources +// required to successfully pass AgentAPIManifold's checks and then // runs the manifold start func. // // An agent and apiCaller may be optionally provided. If they are nil, // dummy barely-good-enough defaults will be used (these dummies are // fine not actually used for much). -func RunAgentApiManifold( +func RunAgentAPIManifold( manifold dependency.Manifold, agent agent.Agent, apiCaller base.APICaller, ) (worker.Worker, error) { if agent == nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/valueworker.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/valueworker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine/valueworker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine/valueworker.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "reflect" "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/worker" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/engine_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/engine_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,6 @@ import ( "fmt" - "runtime" "sync" "time" @@ -43,6 +42,7 @@ "environ-tracker", "firewaller", "instance-poller", + "machine-undertaker", "metric-worker", "migration-fortress", "migration-inactive-flag", @@ -124,14 +124,6 @@ } ) -// Some workers only exist on certain operating systems. -func init() { - if runtime.GOOS == "linux" { - alwaysMachineWorkers = append(alwaysMachineWorkers, "introspection") - alwaysUnitWorkers = append(alwaysUnitWorkers, "introspection") - } -} - type ModelManifoldsFunc func(config model.ManifoldsConfig) dependency.Manifolds func TrackModels(c *gc.C, tracker *engineTracker, inner ModelManifoldsFunc) ModelManifoldsFunc { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/introspection.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/introspection.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/introspection.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/introspection.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,54 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agent + +import ( + "runtime" + + "github.com/juju/errors" + + "github.com/juju/juju/agent" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/introspection" +) + +// introspectionConfig defines the various components that the introspection +// worker reports on or needs to start up. +type introspectionConfig struct { + Agent agent.Agent + Engine *dependency.Engine + WorkerFunc func(config introspection.Config) (worker.Worker, error) +} + +// startIntrospection creates the introspection worker. It cannot and should +// not be in the engine itself as it reports on the engine, and other aspects +// of the runtime. If we put it in the engine, then it is mostly likely shut +// down in the times we need it most, which is when the agent is having +// problems shutting down. Here we effectively start the worker and tie its +// life to that of the engine that is returned. +func startIntrospection(cfg introspectionConfig) error { + if runtime.GOOS != "linux" { + logger.Debugf("introspection worker not supported on %q", runtime.GOOS) + return nil + } + + socketName := "jujud-" + cfg.Agent.CurrentConfig().Tag().String() + w, err := cfg.WorkerFunc(introspection.Config{ + SocketName: socketName, + Reporter: cfg.Engine, + }) + if err != nil { + return errors.Trace(err) + } + go func() { + cfg.Engine.Wait() + logger.Debugf("engine stopped, stopping introspection") + w.Kill() + w.Wait() + logger.Debugf("introspection stopped") + }() + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/introspection_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/introspection_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/introspection_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/introspection_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,132 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agent + +import ( + "runtime" + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/agent" + cmdutil "github.com/juju/juju/cmd/jujud/util" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/introspection" +) + +type introspectionSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&introspectionSuite{}) + +func (s *introspectionSuite) TestStartNonLinux(c *gc.C) { + if runtime.GOOS == "linux" { + c.Skip("testing for non-linux") + } + var started bool + + cfg := introspectionConfig{ + WorkerFunc: func(_ introspection.Config) (worker.Worker, error) { + started = true + return nil, errors.New("shouldn't call start") + }, + } + + err := startIntrospection(cfg) + c.Assert(err, jc.ErrorIsNil) + c.Assert(started, jc.IsFalse) +} + +func (s *introspectionSuite) TestStartError(c *gc.C) { + if runtime.GOOS != "linux" { + c.Skip("introspection worker not supported on non-linux") + } + + cfg := introspectionConfig{ + Agent: &dummyAgent{}, + WorkerFunc: func(_ introspection.Config) (worker.Worker, error) { + return nil, errors.New("boom") + }, + } + + err := startIntrospection(cfg) + c.Check(err, gc.ErrorMatches, "boom") +} + +func (s *introspectionSuite) TestStartSuccess(c *gc.C) { + if runtime.GOOS != "linux" { + c.Skip("introspection worker not supported on non-linux") + } + fake := &dummyWorker{ + done: make(chan struct{}), + } + + config := dependency.EngineConfig{ + IsFatal: cmdutil.IsFatal, + WorstError: cmdutil.MoreImportantError, + } + engine, err := dependency.NewEngine(config) + c.Assert(err, jc.ErrorIsNil) + + cfg := introspectionConfig{ + Agent: &dummyAgent{}, + Engine: engine, + WorkerFunc: func(cfg introspection.Config) (worker.Worker, error) { + fake.config = cfg + return fake, nil + }, + } + + err = startIntrospection(cfg) + c.Assert(err, jc.ErrorIsNil) + + c.Check(fake.config.Reporter, gc.Equals, engine) + c.Check(fake.config.SocketName, gc.Equals, "jujud-machine-42") + + // Stopping the engine causes the introspection worker to stop. + engine.Kill() + + select { + case <-fake.done: + case <-time.After(coretesting.LongWait): + c.Fatalf("worker did not get stopped") + } +} + +type dummyAgent struct { + agent.Agent +} + +func (*dummyAgent) CurrentConfig() agent.Config { + return &dummyConfig{} +} + +type dummyConfig struct { + agent.Config +} + +func (*dummyConfig) Tag() names.Tag { + return names.NewMachineTag("42") +} + +type dummyWorker struct { + config introspection.Config + done chan struct{} +} + +func (d *dummyWorker) Kill() { + close(d.done) +} + +func (d *dummyWorker) Wait() error { + <-d.done + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine/apiworkers_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine/apiworkers_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine/apiworkers_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine/apiworkers_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/api" "github.com/juju/juju/cmd/jujud/agent/machine" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine/manifolds.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine/manifolds.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine/manifolds.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine/manifolds.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,6 +13,7 @@ coreagent "github.com/juju/juju/agent" "github.com/juju/juju/api" + "github.com/juju/juju/api/base" apideployer "github.com/juju/juju/api/deployer" "github.com/juju/juju/cmd/jujud/agent/engine" "github.com/juju/juju/container/lxd" @@ -30,7 +31,6 @@ "github.com/juju/juju/worker/gate" "github.com/juju/juju/worker/hostkeyreporter" "github.com/juju/juju/worker/identityfilewriter" - "github.com/juju/juju/worker/introspection" "github.com/juju/juju/worker/logforwarder" "github.com/juju/juju/worker/logforwarder/sinks" "github.com/juju/juju/worker/logger" @@ -55,6 +55,7 @@ // ManifoldsConfig allows specialisation of the result of Manifolds. type ManifoldsConfig struct { + // Agent contains the agent that will be wrapped and made available to // its dependencies via a dependency.Engine. Agent coreagent.Agent @@ -120,6 +121,11 @@ // Clock supplies timekeeping services to various workers. Clock clock.Clock + + // ValidateMigration is called by the migrationminion during the + // migration process to check that the agent will be ok when + // connected to the new target controller. + ValidateMigration func(base.APICaller) error } // Manifolds returns a set of co-configured manifolds covering the @@ -157,13 +163,6 @@ // foundation stone on which most other manifolds ultimately depend. agentName: agent.Manifold(config.Agent), - // The introspection worker provides debugging information over - // an abstract domain socket - linux only (for now). - introspectionName: introspection.Manifold(introspection.ManifoldConfig{ - AgentName: agentName, - WorkerFunc: introspection.NewWorker, - }), - // The termination worker returns ErrTerminateAgent if a // termination signal is received by the process it's running // in. It has no inputs and its only output is the error it @@ -220,7 +219,7 @@ apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{ AgentName: agentName, APIConfigWatcherName: apiConfigWatcherName, - APIOpen: apicaller.APIOpen, + APIOpen: api.Open, NewConnection: apicaller.ScaryConnect, Filter: connectFilter, }), @@ -296,11 +295,13 @@ NewWorker: migrationflag.NewWorker, }), migrationMinionName: migrationminion.Manifold(migrationminion.ManifoldConfig{ - AgentName: agentName, - APICallerName: apiCallerName, - FortressName: migrationFortressName, - NewFacade: migrationminion.NewFacade, - NewWorker: migrationminion.NewWorker, + AgentName: agentName, + APICallerName: apiCallerName, + FortressName: migrationFortressName, + APIOpen: api.Open, + ValidateMigration: config.ValidateMigration, + NewFacade: migrationminion.NewFacade, + NewWorker: migrationminion.NewWorker, }), // The serving-info-setter manifold sets grabs the state @@ -487,7 +488,6 @@ migrationInactiveFlagName = "migration-inactive-flag" migrationMinionName = "migration-minion" - introspectionName = "introspection" servingInfoSetterName = "serving-info-setter" apiWorkersName = "unconverted-api-workers" rebootName = "reboot-executor" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine/manifolds_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine/manifolds_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine/manifolds_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine/manifolds_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -48,7 +48,6 @@ "api-config-watcher", "disk-manager", "host-key-reporter", - "introspection", "log-forwarder", "log-sender", "logging-config-updater", @@ -95,7 +94,6 @@ "agent", "api-caller", "api-config-watcher", - "introspection", "log-forwarder", "state", "state-config-watcher", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine/servinginfo_setter.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine/servinginfo_setter.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine/servinginfo_setter.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine/servinginfo_setter.go 2016-10-13 14:31:49.000000000 +0000 @@ -53,24 +53,35 @@ // If the machine needs State, grab the state serving info // over the API and write it to the agent configuration. - machine, err := apiState.Entity(tag) + if controller, err := isController(apiState, tag); err != nil { + return nil, errors.Annotate(err, "checking controller status") + } else if !controller { + // Not a controller, nothing to do. + return nil, dependency.ErrUninstall + } + + info, err := apiState.StateServingInfo() if err != nil { - return nil, err + return nil, errors.Annotate(err, "getting state serving info") } - for _, job := range machine.Jobs() { - if job.NeedsState() { - info, err := apiState.StateServingInfo() - if err != nil { - return nil, errors.Errorf("cannot get state serving info: %v", err) - } - err = agent.ChangeConfig(func(config coreagent.ConfigSetter) error { - config.SetStateServingInfo(info) - return nil - }) - if err != nil { - return nil, err - } + err = agent.ChangeConfig(func(config coreagent.ConfigSetter) error { + existing, hasInfo := config.StateServingInfo() + if hasInfo { + // Use the existing cert and key as they appear to + // have been already updated by the cert updater + // worker to have this machine's IP address as + // part of the cert. This changed cert is never + // put back into the database, so it isn't + // reflected in the copy we have got from + // apiState. + info.Cert = existing.Cert + info.PrivateKey = existing.PrivateKey } + config.SetStateServingInfo(info) + return nil + }) + if err != nil { + return nil, errors.Trace(err) } // All is well - we're done (no actual worker is actually returned). @@ -78,3 +89,16 @@ }, } } + +func isController(apiState *apiagent.State, tag names.MachineTag) (bool, error) { + machine, err := apiState.Entity(tag) + if err != nil { + return false, errors.Trace(err) + } + for _, job := range machine.Jobs() { + if job.NeedsState() { + return true, nil + } + } + return false, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine/servinginfo_setter_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine/servinginfo_setter_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine/servinginfo_setter_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine/servinginfo_setter_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -48,7 +48,7 @@ c.Check(err, gc.Equals, dependency.ErrMissing) } -func (s *ServingInfoSetterSuite) TestStartApiCallerMissing(c *gc.C) { +func (s *ServingInfoSetterSuite) TestStartAPICallerMissing(c *gc.C) { context := dt.StubContext(nil, map[string]interface{}{ "agent": &mockAgent{}, "api-caller": dependency.ErrMissing, @@ -97,14 +97,10 @@ }) w, err := s.manifold.Start(context) c.Assert(w, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "boom") + c.Assert(err, gc.ErrorMatches, "checking controller status: boom") } -func (s *ServingInfoSetterSuite) TestJobManageEnviron(c *gc.C) { - // State serving info should be set for machines with JobManageEnviron. - const mockAPIPort = 1234 - - a := &mockAgent{} +func (s *ServingInfoSetterSuite) startManifold(c *gc.C, a coreagent.Agent, mockAPIPort int) { apiCaller := basetesting.APICallerFunc( func(objType string, version int, id, request string, args, response interface{}) error { c.Assert(objType, gc.Equals, "Agent") @@ -118,7 +114,9 @@ case "StateServingInfo": result := response.(*params.StateServingInfo) *result = params.StateServingInfo{ - APIPort: mockAPIPort, + Cert: "cert", + PrivateKey: "key", + APIPort: mockAPIPort, } default: c.Fatalf("not sure how to handle: %q", request) @@ -133,10 +131,41 @@ w, err := s.manifold.Start(context) c.Assert(w, gc.IsNil) c.Assert(err, gc.Equals, dependency.ErrUninstall) +} + +func (s *ServingInfoSetterSuite) TestJobManageEnviron(c *gc.C) { + // State serving info should be set for machines with JobManageEnviron. + const mockAPIPort = 1234 + + a := &mockAgent{} + s.startManifold(c, a, mockAPIPort) + + // Verify that the state serving info was actually set. + c.Assert(a.conf.ssiSet, jc.IsTrue) + c.Assert(a.conf.ssi.APIPort, gc.Equals, mockAPIPort) + c.Assert(a.conf.ssi.Cert, gc.Equals, "cert") + c.Assert(a.conf.ssi.PrivateKey, gc.Equals, "key") +} + +func (s *ServingInfoSetterSuite) TestJobManageEnvironNotOverwriteCert(c *gc.C) { + // State serving info should be set for machines with JobManageEnviron. + const mockAPIPort = 1234 + + a := &mockAgent{} + existingCert := "some cert set by certupdater" + existingKey := "some key set by certupdater" + a.conf.SetStateServingInfo(params.StateServingInfo{ + Cert: existingCert, + PrivateKey: existingKey, + }) + + s.startManifold(c, a, mockAPIPort) // Verify that the state serving info was actually set. c.Assert(a.conf.ssiSet, jc.IsTrue) c.Assert(a.conf.ssi.APIPort, gc.Equals, mockAPIPort) + c.Assert(a.conf.ssi.Cert, gc.Equals, existingCert) + c.Assert(a.conf.ssi.PrivateKey, gc.Equals, existingKey) } func (s *ServingInfoSetterSuite) TestJobHostUnits(c *gc.C) { @@ -200,6 +229,10 @@ return mc.tag } +func (mc *mockConfig) StateServingInfo() (params.StateServingInfo, bool) { + return mc.ssi, mc.ssiSet +} + func (mc *mockConfig) SetStateServingInfo(info params.StateServingInfo) { mc.ssiSet = true mc.ssi = info diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,7 +16,10 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" + "github.com/juju/juju/api" apiagent "github.com/juju/juju/api/agent" + "github.com/juju/juju/api/base" apimachiner "github.com/juju/juju/api/machiner" "github.com/juju/juju/controller" "github.com/juju/loggo" @@ -33,12 +36,10 @@ "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2" "gopkg.in/natefinch/lumberjack.v2" - "launchpad.net/gnuflag" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/agent" "github.com/juju/juju/agent/tools" - "github.com/juju/juju/api" apideployer "github.com/juju/juju/api/deployer" "github.com/juju/juju/api/metricsmanager" apiprovisioner "github.com/juju/juju/api/provisioner" @@ -77,7 +78,9 @@ "github.com/juju/juju/worker/deployer" "github.com/juju/juju/worker/gate" "github.com/juju/juju/worker/imagemetadataworker" + "github.com/juju/juju/worker/introspection" "github.com/juju/juju/worker/logsender" + "github.com/juju/juju/worker/migrationmaster" "github.com/juju/juju/worker/modelworkermanager" "github.com/juju/juju/worker/mongoupgrader" "github.com/juju/juju/worker/peergrouper" @@ -187,7 +190,7 @@ func (a *machineAgentCmd) Init(args []string) error { if !names.IsValidMachine(a.machineId) { - return fmt.Errorf("--machine-id option must be set, and expects a non-negative integer") + return errors.Errorf("--machine-id option must be set, and expects a non-negative integer") } if err := a.agentInitializer.CheckArgs(args); err != nil { return err @@ -343,21 +346,29 @@ // upgradeCertificateDNSNames ensure that the controller certificate // recorded in the agent config and also mongo server.pem contains the -// DNSNames entires required by Juju/ -func (a *MachineAgent) upgradeCertificateDNSNames() error { - agentConfig := a.CurrentConfig() - si, ok := agentConfig.StateServingInfo() +// DNSNames entries required by Juju. +func upgradeCertificateDNSNames(config agent.ConfigSetter) error { + si, ok := config.StateServingInfo() if !ok || si.CAPrivateKey == "" { // No certificate information exists yet, nothing to do. return nil } - // Parse the current certificate to get the current dns names. - serverCert, err := cert.ParseCert(si.Cert) - if err != nil { - return err + + // Validate the current certificate and private key pair, and then + // extract the current DNS names from the certificate. If the + // certificate validation fails, or it does not contain the DNS + // names we require, we will generate a new one. + var dnsNames set.Strings + serverCert, _, err := cert.ParseCertAndKey(si.Cert, si.PrivateKey) + if err != nil { + // The certificate is invalid, so create a new one. + logger.Infof("parsing certificate/key failed, will generate a new one: %v", err) + dnsNames = set.NewStrings() + } else { + dnsNames = set.NewStrings(serverCert.DNSNames...) } + update := false - dnsNames := set.NewStrings(serverCert.DNSNames...) requiredDNSNames := []string{"local", "juju-apiserver", "juju-mongodb"} for _, dnsName := range requiredDNSNames { if dnsNames.Contains(dnsName) { @@ -369,18 +380,17 @@ if !update { return nil } + // Write a new certificate to the mongo pem and agent config files. - si.Cert, si.PrivateKey, err = cert.NewDefaultServer(agentConfig.CACert(), si.CAPrivateKey, dnsNames.Values()) + si.Cert, si.PrivateKey, err = cert.NewDefaultServer(config.CACert(), si.CAPrivateKey, dnsNames.Values()) if err != nil { return err } - if err := mongo.UpdateSSLKey(agentConfig.DataDir(), si.Cert, si.PrivateKey); err != nil { + if err := mongo.UpdateSSLKey(config.DataDir(), si.Cert, si.PrivateKey); err != nil { return err } - return a.AgentConfigWriter.ChangeConfig(func(config agent.ConfigSetter) error { - config.SetStateServingInfo(si) - return nil - }) + config.SetStateServingInfo(si) + return nil } // Run runs a machine agent. @@ -388,19 +398,23 @@ defer a.tomb.Done() if err := a.ReadConfig(a.Tag().String()); err != nil { - return fmt.Errorf("cannot read agent configuration: %v", err) + return errors.Errorf("cannot read agent configuration: %v", err) } logger.Infof("machine agent %v start (%s [%s])", a.Tag(), jujuversion.Current, runtime.Compiler) if flags := featureflag.String(); flags != "" { logger.Warningf("developer feature flags enabled: %s", flags) } + if err := introspection.WriteProfileFunctions(); err != nil { + // This isn't fatal, just annoying. + logger.Errorf("failed to write profile funcs: %v", err) + } // Before doing anything else, we need to make sure the certificate generated for // use by mongo to validate controller connections is correct. This needs to be done // before any possible restart of the mongo service. // See bug http://pad.lv/1434680 - if err := a.upgradeCertificateDNSNames(); err != nil { + if err := a.AgentConfigWriter.ChangeConfig(upgradeCertificateDNSNames); err != nil { return errors.Annotate(err, "error upgrading server certificate") } @@ -463,6 +477,7 @@ LogSource: a.bufferedLogs, NewDeployContext: newDeployContext, Clock: clock.WallClock, + ValidateMigration: a.validateMigration, }) if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { @@ -470,6 +485,17 @@ } return nil, err } + if err := startIntrospection(introspectionConfig{ + Agent: a, + Engine: engine, + WorkerFunc: introspection.NewWorker, + }); err != nil { + // If the introspection worker failed to start, we just log error + // but continue. It is very unlikely to happen in the real world + // as the only issue is connecting to the abstract domain socket + // and the agent is controlled by by the OS to only have one. + logger.Errorf("failed to start introspection worker: %v", err) + } return engine, nil } } @@ -479,7 +505,7 @@ // We need to reopen the API to clear the reboot flag after // scheduling the reboot. It may be cleaner to do this in the reboot // worker, before returning the ErrRebootMachine. - conn, err := apicaller.OnlyConnect(a, apicaller.APIOpen) + conn, err := apicaller.OnlyConnect(a, api.Open) if err != nil { logger.Infof("Reboot: Error connecting to state") return errors.Trace(err) @@ -646,7 +672,11 @@ } } - runner := newConnRunner(apiConn) + runner := worker.NewRunner( + cmdutil.ConnectionIsFatal(logger, apiConn), + cmdutil.MoreImportant, + worker.RestartDelay, + ) defer func() { // If startAPIWorkers exits early with an error, stop the // runner so that any already started runners aren't leaked. @@ -661,7 +691,7 @@ if params.IsCodeDead(cause) || cause == worker.ErrTerminateAgent { return nil, worker.ErrTerminateAgent } - return nil, fmt.Errorf("setting up container support: %v", err) + return nil, errors.Errorf("setting up container support: %v", err) } if isModelManager { @@ -744,8 +774,7 @@ if !ok { return nil, errors.New("no state info available") } - st, err := state.Open( - agentConfig.Model(), info, mongo.DefaultDialOpts(), + st, err := state.Open(agentConfig.Model(), agentConfig.Controller(), info, mongo.DefaultDialOpts(), stateenvirons.GetNewPolicyFunc( stateenvirons.GetNewEnvironFunc(environs.New), ), @@ -756,6 +785,15 @@ return st, nil } +// validateMigration is called by the migrationminion to help check +// that the agent will be ok when connected to a new controller. +func (a *MachineAgent) validateMigration(apiCaller base.APICaller) error { + // TODO(mjs) - more extensive checks to come. + facade := apimachiner.NewState(apiCaller) + _, err := facade.Machine(names.NewMachineTag(a.machineId)) + return errors.Trace(err) +} + // setupContainerSupport determines what containers can be run on this machine and // initialises suitable infrastructure to support such containers. func (a *MachineAgent) setupContainerSupport(runner worker.Runner, st api.Connection, agentConfig agent.Config) error { @@ -854,7 +892,11 @@ return nil, errors.Annotate(err, "machine lookup") } - runner := newConnRunner(st) + runner := worker.NewRunner( + cmdutil.PingerIsFatal(logger, st), + cmdutil.MoreImportant, + worker.RestartDelay, + ) singularRunner, err := newSingularStateRunner(runner, st, m) if err != nil { return nil, errors.Trace(err) @@ -868,9 +910,10 @@ useMultipleCPUs() a.startWorkerAfterUpgrade(runner, "model worker manager", func() (worker.Worker, error) { w, err := modelworkermanager.New(modelworkermanager.Config{ - Backend: st, - NewWorker: a.startModelWorkers, - ErrorDelay: worker.RestartDelay, + ControllerUUID: st.ControllerUUID(), + Backend: st, + NewWorker: a.startModelWorkers, + ErrorDelay: worker.RestartDelay, }) if err != nil { return nil, errors.Annotate(err, "cannot start model worker manager") @@ -944,7 +987,7 @@ }) a.startWorkerAfterUpgrade(singularRunner, "txnpruner", func() (worker.Worker, error) { - return txnpruner.New(st, time.Hour*2), nil + return txnpruner.New(st, time.Hour*2, clock.WallClock), nil }) default: return nil, errors.Errorf("unknown job type %q", job) @@ -955,8 +998,8 @@ // startModelWorkers starts the set of workers that run for every model // in each controller. -func (a *MachineAgent) startModelWorkers(uuid string) (worker.Worker, error) { - modelAgent, err := model.WrapAgent(a, uuid) +func (a *MachineAgent) startModelWorkers(controllerUUID, modelUUID string) (worker.Worker, error) { + modelAgent, err := model.WrapAgent(a, controllerUUID, modelUUID) if err != nil { return nil, errors.Trace(err) } @@ -987,6 +1030,7 @@ StatusHistoryPrunerInterval: 5 * time.Minute, SpacesImportedGate: a.discoverSpacesComplete, NewEnvironFunc: newEnvirons, + NewMigrationMaster: migrationmaster.NewWorker, }) if err := dependency.Install(engine, manifolds); err != nil { if err := worker.Stop(engine); err != nil { @@ -1012,11 +1056,11 @@ if err != nil { return nil, errors.Trace(err) } - return a.newApiserverWorker(st, certChanged) + return a.newAPIserverWorker(st, certChanged) } } -func (a *MachineAgent) newApiserverWorker(st *state.State, certChanged chan params.StateServingInfo) (worker.Worker, error) { +func (a *MachineAgent) newAPIserverWorker(st *state.State, certChanged chan params.StateServingInfo) (worker.Worker, error) { agentConfig := a.CurrentConfig() // If the configuration does not have the required information, // it is currently not a recoverable error, so we kill the whole @@ -1026,8 +1070,8 @@ if !ok { return nil, &cmdutil.FatalError{"StateServingInfo not available and we need it"} } - cert := []byte(info.Cert) - key := []byte(info.PrivateKey) + cert := info.Cert + key := info.PrivateKey if len(cert) == 0 || len(key) == 0 { return nil, &cmdutil.FatalError{"configuration does not have controller cert/key"} @@ -1055,13 +1099,17 @@ } server, err := apiserver.NewServer(st, listener, apiserver.ServerConfig{ - Cert: cert, - Key: key, - Tag: tag, - DataDir: dataDir, - LogDir: logDir, - Validator: a.limitLogins, - CertChanged: certChanged, + Clock: clock.WallClock, + Cert: cert, + Key: key, + Tag: tag, + DataDir: dataDir, + LogDir: logDir, + Validator: a.limitLogins, + CertChanged: certChanged, + AutocertURL: controllerConfig.AutocertURL(), + AutocertDNSName: controllerConfig.AutocertDNSName(), + AllowModelAccess: controllerConfig.AllowModelAccess(), NewObserver: newObserverFn( controllerConfig, clock.WallClock, @@ -1268,9 +1316,9 @@ func openState(agentConfig agent.Config, dialOpts mongo.DialOpts) (_ *state.State, _ *state.Machine, err error) { info, ok := agentConfig.MongoInfo() if !ok { - return nil, nil, fmt.Errorf("no state info available") + return nil, nil, errors.Errorf("no state info available") } - st, err := state.Open(agentConfig.Model(), info, dialOpts, + st, err := state.Open(agentConfig.Model(), agentConfig.Controller(), info, dialOpts, stateenvirons.GetNewPolicyFunc( stateenvirons.GetNewEnvironFunc(environs.New), ), @@ -1430,9 +1478,9 @@ if agentServiceName != "" { svc, err := service.DiscoverService(agentServiceName, common.Conf{}) if err != nil { - errs = append(errs, fmt.Errorf("cannot remove service %q: %v", agentServiceName, err)) + errs = append(errs, errors.Errorf("cannot remove service %q: %v", agentServiceName, err)) } else if err := svc.Remove(); err != nil { - errs = append(errs, fmt.Errorf("cannot remove service %q: %v", agentServiceName, err)) + errs = append(errs, errors.Errorf("cannot remove service %q: %v", agentServiceName, err)) } } @@ -1460,11 +1508,7 @@ if len(errs) == 0 { return nil } - return fmt.Errorf("uninstall failed: %v", errs) -} - -func newConnRunner(conns ...cmdutil.Pinger) worker.Runner { - return worker.NewRunner(cmdutil.ConnectionIsFatal(logger, conns...), cmdutil.MoreImportant, worker.RestartDelay) + return errors.Errorf("uninstall failed: %v", errs) } type MongoSessioner interface { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/machine_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/machine_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -28,6 +28,7 @@ "gopkg.in/juju/charmrepo.v2-unstable" "gopkg.in/juju/names.v2" "gopkg.in/natefinch/lumberjack.v2" + "gopkg.in/tomb.v1" "github.com/juju/juju/agent" "github.com/juju/juju/api" @@ -35,6 +36,7 @@ apimachiner "github.com/juju/juju/api/machiner" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cert" + "github.com/juju/juju/cmd/jujud/agent/model" "github.com/juju/juju/core/migration" "github.com/juju/juju/environs" envtesting "github.com/juju/juju/environs/testing" @@ -52,9 +54,11 @@ "github.com/juju/juju/worker" "github.com/juju/juju/worker/authenticationworker" "github.com/juju/juju/worker/certupdater" + "github.com/juju/juju/worker/dependency" "github.com/juju/juju/worker/diskmanager" "github.com/juju/juju/worker/instancepoller" "github.com/juju/juju/worker/machiner" + "github.com/juju/juju/worker/migrationmaster" "github.com/juju/juju/worker/mongoupgrader" "github.com/juju/juju/worker/storageprovisioner" "github.com/juju/juju/worker/upgrader" @@ -257,7 +261,7 @@ // lp:1558657 now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -333,7 +337,7 @@ svc := s.AddTestingService(c, "test-service", charm) err := svc.SetExposed() c.Assert(err, jc.ErrorIsNil) - units, err := juju.AddUnits(s.State, svc, 1, nil) + units, err := juju.AddUnits(s.State, svc, svc.Name(), 1, nil) c.Assert(err, jc.ErrorIsNil) // It should be allocated to a machine, which should then be provisioned. @@ -383,7 +387,7 @@ // Add one unit to a service; charm := s.AddTestingCharm(c, "dummy") svc := s.AddTestingService(c, "test-service", charm) - units, err := juju.AddUnits(s.State, svc, 1, nil) + units, err := juju.AddUnits(s.State, svc, svc.Name(), 1, nil) c.Assert(err, jc.ErrorIsNil) m, instId := s.waitProvisioned(c, units[0]) @@ -1039,7 +1043,9 @@ for { stateInfo, _ := a.CurrentConfig().StateServingInfo() srvCert, err := cert.ParseCert(stateInfo.Cert) - c.Assert(err, jc.ErrorIsNil) + if !c.Check(err, jc.ErrorIsNil) { + break + } sanIPs := make([]string, len(srvCert.IPAddresses)) for i, ip := range srvCert.IPAddresses { sanIPs[i] = ip.String() @@ -1048,7 +1054,7 @@ close(updated) break } - time.Sleep(10 * time.Millisecond) + time.Sleep(100 * time.Millisecond) } }() @@ -1058,7 +1064,30 @@ } func (s *MachineSuite) TestCertificateDNSUpdated(c *gc.C) { - // Disable the certificate work so it doesn't update the certificate. + m, _, _ := s.primeAgent(c, state.JobManageModel) + a := s.newAgent(c, m) + s.testCertificateDNSUpdated(c, a) +} + +func (s *MachineSuite) TestCertificateDNSUpdatedInvalidPrivateKey(c *gc.C) { + m, agentConfig, _ := s.primeAgent(c, state.JobManageModel) + + // Write out config with an invalid private key. This should + // cause the agent to rewrite the cert and key. + si, ok := agentConfig.StateServingInfo() + c.Assert(ok, jc.IsTrue) + si.PrivateKey = "foo" + agentConfig.SetStateServingInfo(si) + err := agentConfig.Write() + c.Assert(err, jc.ErrorIsNil) + + a := s.newAgent(c, m) + s.testCertificateDNSUpdated(c, a) +} + +func (s *MachineSuite) testCertificateDNSUpdated(c *gc.C, a *MachineAgent) { + // Disable the certificate worker so that the certificate could + // only have been updated during agent startup. newUpdater := func(certupdater.AddressWatcher, certupdater.StateServingInfoGetter, certupdater.ControllerConfigGetter, certupdater.APIHostPortsGetter, certupdater.StateServingInfoSetter, ) worker.Worker { @@ -1066,35 +1095,32 @@ } s.PatchValue(&newCertificateUpdater, newUpdater) - // Set up the machine agent. - m, _, _ := s.primeAgent(c, state.JobManageModel) - a := s.newAgent(c, m) - - // Set up check that certificate has been updated when the agent starts. - updated := make(chan struct{}) - expectedDnsNames := set.NewStrings("local", "juju-apiserver", "juju-mongodb") - go func() { - for { - stateInfo, _ := a.CurrentConfig().StateServingInfo() - srvCert, err := cert.ParseCert(stateInfo.Cert) - c.Assert(err, jc.ErrorIsNil) - certDnsNames := set.NewStrings(srvCert.DNSNames...) - if !expectedDnsNames.Difference(certDnsNames).IsEmpty() { - continue - } - pemContent, err := ioutil.ReadFile(filepath.Join(s.DataDir(), "server.pem")) - c.Assert(err, jc.ErrorIsNil) - if string(pemContent) == stateInfo.Cert+"\n"+stateInfo.PrivateKey { - close(updated) - break - } - time.Sleep(10 * time.Millisecond) - } - }() + // Set up a channel which fires when State is opened. + started := make(chan struct{}, 16) + s.PatchValue(&reportOpenedState, func(*state.State) { + started <- struct{}{} + }) + // Start the agent. go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }() defer func() { c.Check(a.Stop(), jc.ErrorIsNil) }() - s.assertChannelActive(c, updated, "certificate to be updated") + + // Wait for State to be opened. Once this occurs we know that the + // agent's initial startup has happened. + s.assertChannelActive(c, started, "agent to start up") + + // Check that certificate was updated when the agent started. + stateInfo, _ := a.CurrentConfig().StateServingInfo() + srvCert, _, err := cert.ParseCertAndKey(stateInfo.Cert, stateInfo.PrivateKey) + c.Assert(err, jc.ErrorIsNil) + expectedDnsNames := set.NewStrings("local", "juju-apiserver", "juju-mongodb") + certDnsNames := set.NewStrings(srvCert.DNSNames...) + c.Check(expectedDnsNames.Difference(certDnsNames).IsEmpty(), jc.IsTrue) + + // Check the mongo certificate file too. + pemContent, err := ioutil.ReadFile(filepath.Join(s.DataDir(), "server.pem")) + c.Assert(err, jc.ErrorIsNil) + c.Check(string(pemContent), gc.Equals, stateInfo.Cert+"\n"+stateInfo.PrivateKey) } func (s *MachineSuite) setupIgnoreAddresses(c *gc.C, expectedIgnoreValue bool) chan bool { @@ -1285,15 +1311,31 @@ uuid := st.ModelUUID() tracker := NewEngineTracker() - instrumented := TrackModels(c, tracker, modelManifolds) + + // Replace the real migrationmaster worker with a fake one which + // does nothing. This is required to make this test be reliable as + // the environment required for the migrationmaster to operate + // correctly is too involved to set up from here. + // + // TODO(mjs) - an alternative might be to provide a fake Facade + // and api.Open to the real migrationmaster but this test is + // awfully far away from the low level details of the worker. + origModelManifolds := modelManifolds + modelManifoldsDisablingMigrationMaster := func(config model.ManifoldsConfig) dependency.Manifolds { + config.NewMigrationMaster = func(config migrationmaster.Config) (worker.Worker, error) { + return &nullWorker{}, nil + } + return origModelManifolds(config) + } + instrumented := TrackModels(c, tracker, modelManifoldsDisablingMigrationMaster) s.PatchValue(&modelManifolds, instrumented) - targetControllerTag := names.NewModelTag(utils.MustNewUUID().String()) - _, err := st.CreateModelMigration(state.ModelMigrationSpec{ + targetControllerTag := names.NewControllerTag(utils.MustNewUUID().String()) + _, err := st.CreateMigration(state.MigrationSpec{ InitiatedBy: names.NewUserTag("admin"), TargetInfo: migration.TargetInfo{ ControllerTag: targetControllerTag, - Addrs: []string{"1.2.3.4:5555", "4.3.2.1:6666"}, + Addrs: []string{"1.2.3.4:5555"}, CACert: "cert", AuthTag: names.NewUserTag("user"), Password: "password", @@ -1387,3 +1429,16 @@ c.Assert(s.fakeEnsureMongo.EnsureCount, gc.Equals, 1) c.Assert(s.fakeEnsureMongo.InitiateCount, gc.Equals, 0) } + +type nullWorker struct { + tomb tomb.Tomb +} + +func (w *nullWorker) Kill() { + w.tomb.Kill(nil) + w.tomb.Done() +} + +func (w *nullWorker) Wait() error { + return w.tomb.Wait() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/model/agent.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/model/agent.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/model/agent.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/model/agent.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,19 +16,24 @@ // model; its config is immutable; and it doesn't use OldPassword. // // It's a strong sign that the agent package needs some work... -func WrapAgent(a agent.Agent, uuid string) (agent.Agent, error) { - if !names.IsValidModel(uuid) { - return nil, errors.NotValidf("model uuid %q", uuid) +func WrapAgent(a agent.Agent, controllerUUID, modelUUID string) (agent.Agent, error) { + if !names.IsValidModel(modelUUID) { + return nil, errors.NotValidf("model uuid %q", modelUUID) + } + if !names.IsValidController(controllerUUID) { + return nil, errors.NotValidf("controller uuid %q", controllerUUID) } return &modelAgent{ - Agent: a, - uuid: uuid, + Agent: a, + modelUUID: modelUUID, + controllerUUID: controllerUUID, }, nil } type modelAgent struct { agent.Agent - uuid string + modelUUID string + controllerUUID string } // ChangeConfig is part of the agent.Agent interface. This implementation @@ -41,20 +46,28 @@ // returns an agent.Config that reports tweaked API connection information. func (a *modelAgent) CurrentConfig() agent.Config { return &modelAgentConfig{ - Config: a.Agent.CurrentConfig(), - uuid: a.uuid, + Config: a.Agent.CurrentConfig(), + modelUUID: a.modelUUID, + controllerUUID: a.controllerUUID, } } type modelAgentConfig struct { agent.Config - uuid string + modelUUID string + controllerUUID string } // Model is part of the agent.Config interface. This implementation always // returns the configured model tag. func (c *modelAgentConfig) Model() names.ModelTag { - return names.NewModelTag(c.uuid) + return names.NewModelTag(c.modelUUID) +} + +// Controller is part of the agent.Config interface. This implementation always +// returns the configured controller tag. +func (c *modelAgentConfig) Controller() names.ControllerTag { + return names.NewControllerTag(c.controllerUUID) } // APIInfo is part of the agent.Config interface. This implementation always @@ -64,7 +77,7 @@ if !ok { return nil, false } - info.ModelTag = names.NewModelTag(c.uuid) + info.ModelTag = names.NewModelTag(c.modelUUID) return info, true } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/model/agent_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/model/agent_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/model/agent_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/model/agent_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -23,19 +23,27 @@ var _ = gc.Suite(&WrapAgentSuite{}) -func (s *WrapAgentSuite) TestRequiresUUID(c *gc.C) { - agent, err := model.WrapAgent(&mockAgent{}, "lol-nope-no-hope") +func (s *WrapAgentSuite) TestRequiresControllerUUID(c *gc.C) { + agent, err := model.WrapAgent(&mockAgent{}, "lol-nope-no-hope", coretesting.ModelTag.Id()) + c.Check(err, gc.ErrorMatches, `controller uuid "lol-nope-no-hope" not valid`) + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(agent, gc.IsNil) +} + +func (s *WrapAgentSuite) TestRequiresModelUUID(c *gc.C) { + agent, err := model.WrapAgent(&mockAgent{}, coretesting.ControllerTag.Id(), "lol-nope-no-hope") c.Check(err, gc.ErrorMatches, `model uuid "lol-nope-no-hope" not valid`) c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(agent, gc.IsNil) } func (s *WrapAgentSuite) TestWraps(c *gc.C) { - agent, err := model.WrapAgent(&mockAgent{}, coretesting.ModelTag.Id()) + agent, err := model.WrapAgent(&mockAgent{}, coretesting.ControllerTag.Id(), coretesting.ModelTag.Id()) c.Assert(err, jc.ErrorIsNil) config := agent.CurrentConfig() c.Check(config.Model(), gc.Equals, coretesting.ModelTag) + c.Check(config.Controller(), gc.Equals, coretesting.ControllerTag) c.Check(config.OldPassword(), gc.Equals, "") apiInfo, ok := config.APIInfo() @@ -62,6 +70,10 @@ return names.NewModelTag("mock-model-uuid") } +func (mock *mockConfig) Controller() names.ControllerTag { + return names.NewControllerTag("mock-controller-uuid") +} + func (mock *mockConfig) APIInfo() (*api.Info, bool) { return &api.Info{ Addrs: []string{"here", "there"}, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/model/manifolds.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/model/manifolds.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/model/manifolds.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/model/manifolds.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,10 +6,12 @@ import ( "time" + "github.com/juju/juju/api" "github.com/juju/utils/clock" "github.com/juju/utils/voyeur" coreagent "github.com/juju/juju/agent" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/jujud/agent/engine" "github.com/juju/juju/core/life" "github.com/juju/juju/environs" @@ -29,6 +31,7 @@ "github.com/juju/juju/worker/gate" "github.com/juju/juju/worker/instancepoller" "github.com/juju/juju/worker/lifeflag" + "github.com/juju/juju/worker/machineundertaker" "github.com/juju/juju/worker/metricworker" "github.com/juju/juju/worker/migrationflag" "github.com/juju/juju/worker/migrationmaster" @@ -88,6 +91,10 @@ // NewEnvironFunc is a function opens a provider "environment" // (typically environs.New). NewEnvironFunc environs.NewEnvironFunc + + // NewMigrationMaster is called to create a new migrationmaster + // worker. + NewMigrationMaster func(migrationmaster.Config) (worker.Worker, error) } // Manifolds returns a set of interdependent dependency manifolds that will @@ -108,8 +115,9 @@ }), apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{ AgentName: agentName, - APIOpen: apicaller.APIOpen, + APIOpen: api.Open, NewConnection: apicaller.OnlyConnect, + Filter: apiConnectFilter, }), // The spaces-imported gate will be unlocked when space @@ -175,7 +183,7 @@ FortressName: migrationFortressName, Clock: config.Clock, NewFacade: migrationmaster.NewFacade, - NewWorker: migrationmaster.NewWorker, + NewWorker: config.NewMigrationMaster, })), // Everything else should be wrapped in ifResponsible, @@ -277,6 +285,11 @@ // TODO(fwereade): 2016-03-17 lp:1558657 NewTimer: worker.NewTimer, })), + machineUndertakerName: ifNotMigrating(machineundertaker.Manifold(machineundertaker.ManifoldConfig{ + APICallerName: apiCallerName, + EnvironName: environTrackerName, + NewWorker: machineundertaker.NewWorker, + })), } } @@ -290,6 +303,16 @@ } } +func apiConnectFilter(err error) error { + // If the model is no longer there, then convert to ErrRemoved so + // that the dependency engine for the model is stopped. + // See http://pad.lv/1614809 + if params.IsCodeModelNotFound(err) { + return ErrRemoved + } + return err +} + var ( // ifResponsible wraps a manifold such that it only runs if the // responsibility flag is set. @@ -359,4 +382,5 @@ metricWorkerName = "metric-worker" stateCleanerName = "state-cleaner" statusHistoryPrunerName = "status-history-pruner" + machineUndertakerName = "machine-undertaker" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/model/manifolds_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/model/manifolds_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/model/manifolds_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/model/manifolds_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -42,6 +42,7 @@ "firewaller", "instance-poller", "is-responsible-flag", + "machine-undertaker", "metric-worker", "migration-fortress", "migration-inactive-flag", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/unit/manifolds.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/unit/manifolds.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/unit/manifolds.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/unit/manifolds.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,8 @@ "github.com/juju/utils/voyeur" coreagent "github.com/juju/juju/agent" + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" msapi "github.com/juju/juju/api/meterstatus" "github.com/juju/juju/cmd/jujud/agent/engine" "github.com/juju/juju/worker" @@ -20,7 +22,6 @@ "github.com/juju/juju/worker/apiconfigwatcher" "github.com/juju/juju/worker/dependency" "github.com/juju/juju/worker/fortress" - "github.com/juju/juju/worker/introspection" "github.com/juju/juju/worker/leadership" "github.com/juju/juju/worker/logger" "github.com/juju/juju/worker/logsender" @@ -52,6 +53,11 @@ // AgentConfigChanged is set whenever the unit agent's config // is updated. AgentConfigChanged *voyeur.Value + + // ValidateMigration is called by the migrationminion during the + // migration process to check that the agent will be ok when + // connected to the new target controller. + ValidateMigration func(base.APICaller) error } // Manifolds returns a set of co-configured manifolds covering the various @@ -82,13 +88,6 @@ // (Currently, that is "all manifolds", but consider a shared clock.) agentName: agent.Manifold(config.Agent), - // The introspection worker provides debugging information over - // an abstract domain socket - linux only (for now). - introspectionName: introspection.Manifold(introspection.ManifoldConfig{ - AgentName: agentName, - WorkerFunc: introspection.NewWorker, - }), - // The api-config-watcher manifold monitors the API server // addresses in the agent config and bounces when they // change. It's required as part of model migrations. @@ -105,7 +104,7 @@ apiCallerName: apicaller.Manifold(apicaller.ManifoldConfig{ AgentName: agentName, APIConfigWatcherName: apiConfigWatcherName, - APIOpen: apicaller.APIOpen, + APIOpen: api.Open, NewConnection: apicaller.ScaryConnect, Filter: connectFilter, }), @@ -146,11 +145,13 @@ NewWorker: migrationflag.NewWorker, }), migrationMinionName: migrationminion.Manifold(migrationminion.ManifoldConfig{ - AgentName: agentName, - APICallerName: apiCallerName, - FortressName: migrationFortressName, - NewFacade: migrationminion.NewFacade, - NewWorker: migrationminion.NewWorker, + AgentName: agentName, + APICallerName: apiCallerName, + FortressName: migrationFortressName, + APIOpen: api.Open, + ValidateMigration: config.ValidateMigration, + NewFacade: migrationminion.NewFacade, + NewWorker: migrationminion.NewWorker, }), // The logging config updater is a leaf worker that indirectly @@ -194,6 +195,7 @@ leadershipTrackerName: ifNotMigrating(leadership.Manifold(leadership.ManifoldConfig{ AgentName: agentName, APICallerName: apiCallerName, + Clock: clock.WallClock, LeadershipGuarantee: config.LeadershipGuarantee, })), @@ -274,7 +276,6 @@ migrationInactiveFlagName = "migration-inactive-flag" migrationMinionName = "migration-minion" - introspectionName = "introspection" loggingConfigUpdaterName = "logging-config-updater" proxyConfigUpdaterName = "proxy-config-updater" apiAddressUpdaterName = "api-address-updater" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/unit/manifolds_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/unit/manifolds_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/unit/manifolds_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/unit/manifolds_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -37,7 +37,6 @@ "agent", "api-config-watcher", "api-caller", - "introspection", "log-sender", "upgrader", "migration-fortress", @@ -68,7 +67,6 @@ "machine-lock", "api-config-watcher", "api-caller", - "introspection", "log-sender", "upgrader", "migration-fortress", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/unit.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/unit.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/unit.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/unit.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,26 +4,28 @@ package agent import ( - "fmt" "runtime" "time" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" "github.com/juju/utils/featureflag" "github.com/juju/utils/voyeur" "gopkg.in/juju/names.v2" "gopkg.in/natefinch/lumberjack.v2" - "launchpad.net/gnuflag" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/uniter" "github.com/juju/juju/cmd/jujud/agent/unit" cmdutil "github.com/juju/juju/cmd/jujud/util" jujuversion "github.com/juju/juju/version" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/introspection" "github.com/juju/juju/worker/logsender" ) @@ -85,7 +87,7 @@ return cmdutil.RequiredError("unit-name") } if !names.IsValidUnit(a.UnitName) { - return fmt.Errorf(`--unit-name option expects "/" argument`) + return errors.Errorf(`--unit-name option expects "/" argument`) } if err := a.AgentConf.CheckArgs(args); err != nil { return err @@ -140,6 +142,7 @@ LogSource: a.bufferedLogs, LeadershipGuarantee: 30 * time.Second, AgentConfigChanged: a.configChangedVal, + ValidateMigration: a.validateMigration, }) config := dependency.EngineConfig{ @@ -158,6 +161,17 @@ } return nil, err } + if err := startIntrospection(introspectionConfig{ + Agent: a, + Engine: engine, + WorkerFunc: introspection.NewWorker, + }); err != nil { + // If the introspection worker failed to start, we just log error + // but continue. It is very unlikely to happen in the real world + // as the only issue is connecting to the abstract domain socket + // and the agent is controlled by by the OS to only have one. + logger.Errorf("failed to start introspection worker: %v", err) + } return engine, nil } @@ -170,3 +184,26 @@ a.configChangedVal.Set(true) return errors.Trace(err) } + +// validateMigration is called by the migrationminion to help check +// that the agent will be ok when connected to a new controller. +func (a *UnitAgent) validateMigration(apiCaller base.APICaller) error { + // TODO(mjs) - more extensive checks to come. + unitTag := names.NewUnitTag(a.UnitName) + facade := uniter.NewState(apiCaller, unitTag) + _, err := facade.Unit(unitTag) + if err != nil { + return errors.Trace(err) + } + model, err := facade.Model() + if err != nil { + return errors.Trace(err) + } + curModelUUID := a.CurrentConfig().Model().Id() + newModelUUID := model.UUID() + if newModelUUID != curModelUUID { + return errors.Errorf("model mismatch when validating: got %q, expected %q", + newModelUUID, curModelUUID) + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/unit_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/unit_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/agent/unit_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/agent/unit_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -140,13 +140,13 @@ statusInfo, err := unit.Status() c.Assert(err, jc.ErrorIsNil) switch statusInfo.Status { - case status.StatusMaintenance, status.StatusWaiting, status.StatusBlocked: + case status.Maintenance, status.Waiting, status.Blocked: c.Logf("waiting...") continue - case status.StatusActive: + case status.Active: c.Logf("active!") return - case status.StatusUnknown: + case status.Unknown: // Active units may have a status of unknown if they have // started but not run status-set. c.Logf("unknown but active!") @@ -157,10 +157,10 @@ statusInfo, err = unit.AgentStatus() c.Assert(err, jc.ErrorIsNil) switch statusInfo.Status { - case status.StatusAllocating, status.StatusExecuting, status.StatusRebooting, status.StatusIdle: + case status.Allocating, status.Executing, status.Rebooting, status.Idle: c.Logf("waiting...") continue - case status.StatusError: + case status.Error: stateConn.StartSync() c.Logf("unit is still down") default: diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/bootstrap.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/bootstrap.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/bootstrap.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/bootstrap.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,13 +15,13 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" "github.com/juju/utils/arch" "github.com/juju/utils/series" "github.com/juju/utils/ssh" "github.com/juju/version" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/agent" "github.com/juju/juju/agent/agentbootstrap" @@ -111,7 +111,7 @@ err = c.ReadConfig("machine-0") if err != nil { - return err + return errors.Annotate(err, "cannot read config") } agentConfig := c.CurrentConfig() @@ -141,7 +141,7 @@ Config: args.ControllerModelConfig, }) if err != nil { - return err + return errors.Annotate(err, "new environ") } newConfigAttrs := make(map[string]interface{}) @@ -175,11 +175,11 @@ instances, err := env.Instances([]instance.Id{args.BootstrapMachineInstanceId}) if err != nil { - return err + return errors.Annotate(err, "getting bootstrap instance") } addrs, err := instances[0].Addresses() if err != nil { - return err + return errors.Annotate(err, "bootstrap instance addresses") } // When machine addresses are reported from state, they have @@ -226,7 +226,7 @@ } if err := c.startMongo(addrs, agentConfig); err != nil { - return err + return errors.Annotate(err, "failed to start mongo") } controllerModelCfg, err := env.Config().Apply(newConfigAttrs) @@ -292,7 +292,7 @@ // Add custom image metadata to environment storage. if len(args.CustomImageMetadata) > 0 { - if err := c.saveCustomImageMetadata(st, args.CustomImageMetadata); err != nil { + if err := c.saveCustomImageMetadata(st, env, args.CustomImageMetadata); err != nil { return err } } @@ -451,35 +451,43 @@ var seriesFromVersion = series.VersionSeries // saveCustomImageMetadata stores the custom image metadata to the database, -func (c *BootstrapCommand) saveCustomImageMetadata(st *state.State, imageMetadata []*imagemetadata.ImageMetadata) error { +func (c *BootstrapCommand) saveCustomImageMetadata(st *state.State, env environs.Environ, imageMetadata []*imagemetadata.ImageMetadata) error { logger.Debugf("saving custom image metadata") - return storeImageMetadataInState(st, "custom", simplestreams.CUSTOM_CLOUD_DATA, imageMetadata) + return storeImageMetadataInState(st, env, "custom", simplestreams.CUSTOM_CLOUD_DATA, imageMetadata) } // storeImageMetadataInState writes image metadata into state store. -func storeImageMetadataInState(st *state.State, source string, priority int, existingMetadata []*imagemetadata.ImageMetadata) error { +func storeImageMetadataInState(st *state.State, env environs.Environ, source string, priority int, existingMetadata []*imagemetadata.ImageMetadata) error { if len(existingMetadata) == 0 { return nil } + cfg := env.Config() metadataState := make([]cloudimagemetadata.Metadata, len(existingMetadata)) for i, one := range existingMetadata { m := cloudimagemetadata.Metadata{ - cloudimagemetadata.MetadataAttributes{ + MetadataAttributes: cloudimagemetadata.MetadataAttributes{ Stream: one.Stream, Region: one.RegionName, Arch: one.Arch, VirtType: one.VirtType, RootStorageType: one.Storage, Source: source, + Version: one.Version, }, - priority, - one.Id, + Priority: priority, + ImageId: one.Id, } s, err := seriesFromVersion(one.Version) if err != nil { return errors.Annotatef(err, "cannot determine series for version %v", one.Version) } m.Series = s + if m.Stream == "" { + m.Stream = cfg.ImageStream() + } + if m.Source == "" { + m.Source = "custom" + } metadataState[i] = m } if err := st.CloudImageMetadataStorage.SaveMetadata(metadataState); err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/bootstrap_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/bootstrap_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/bootstrap_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/bootstrap_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -253,7 +253,7 @@ }}) // Retrieve the state so that it is possible to access the GUI storage. - st, err := state.Open(testing.ModelTag, &mongo.MongoInfo{ + st, err := state.Open(testing.ModelTag, testing.ControllerTag, &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, @@ -300,6 +300,7 @@ UpgradedToVersion: jujuversion.Current, Password: testPassword, Nonce: agent.BootstrapNonce, + Controller: testing.ControllerTag, Model: testing.ModelTag, StateAddresses: []string{gitjujutesting.MgoServer.Addr()}, APIAddresses: []string{"0.1.2.3:1234"}, @@ -362,7 +363,7 @@ c.Assert(s.fakeEnsureMongo.InitiateParams.User, gc.Equals, "") c.Assert(s.fakeEnsureMongo.InitiateParams.Password, gc.Equals, "") - st, err := state.Open(testing.ModelTag, &mongo.MongoInfo{ + st, err := state.Open(testing.ModelTag, testing.ControllerTag, &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, @@ -398,7 +399,7 @@ _, cmd, err := s.initBootstrapCommand(c, nil) c.Assert(err, jc.ErrorIsNil) err = cmd.Run(nil) - c.Assert(err, gc.ErrorMatches, `invalid oplog size: "NaN"`) + c.Assert(err, gc.ErrorMatches, `failed to start mongo: invalid oplog size: "NaN"`) } func (s *BootstrapSuite) TestInitializeEnvironmentToolsNotFound(c *gc.C) { @@ -415,7 +416,7 @@ err = cmd.Run(nil) c.Assert(err, jc.ErrorIsNil) - st, err := state.Open(testing.ModelTag, &mongo.MongoInfo{ + st, err := state.Open(testing.ModelTag, testing.ControllerTag, &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, @@ -442,7 +443,7 @@ err = cmd.Run(nil) c.Assert(err, jc.ErrorIsNil) - st, err := state.Open(testing.ModelTag, &mongo.MongoInfo{ + st, err := state.Open(testing.ModelTag, testing.ControllerTag, &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, @@ -478,7 +479,7 @@ err = cmd.Run(nil) c.Assert(err, jc.ErrorIsNil) - st, err := state.Open(testing.ModelTag, &mongo.MongoInfo{ + st, err := state.Open(testing.ModelTag, testing.ControllerTag, &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, @@ -499,7 +500,7 @@ err = cmd.Run(nil) c.Assert(err, jc.ErrorIsNil) - st, err := state.Open(testing.ModelTag, &mongo.MongoInfo{ + st, err := state.Open(testing.ModelTag, testing.ControllerTag, &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, @@ -529,7 +530,7 @@ // Check we can log in to mongo as admin. info.Tag, info.Password = nil, testPassword - st, err := state.Open(testing.ModelTag, info, mongotest.DialOpts(), nil) + st, err := state.Open(testing.ModelTag, testing.ControllerTag, info, mongotest.DialOpts(), nil) c.Assert(err, jc.ErrorIsNil) defer st.Close() @@ -556,7 +557,7 @@ stateinfo, ok := machineConf1.MongoInfo() c.Assert(ok, jc.IsTrue) - st, err = state.Open(testing.ModelTag, stateinfo, mongotest.DialOpts(), nil) + st, err = state.Open(testing.ModelTag, testing.ControllerTag, stateinfo, mongotest.DialOpts(), nil) c.Assert(err, jc.ErrorIsNil) defer st.Close() @@ -682,7 +683,7 @@ // The tools should have been added to tools storage, and // exploded into each of the supported series of // the same operating system if the tools were uploaded. - st, err := state.Open(testing.ModelTag, &mongo.MongoInfo{ + st, err := state.Open(testing.ModelTag, testing.ControllerTag, &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, @@ -731,7 +732,7 @@ } func assertWrittenToState(c *gc.C, metadata cloudimagemetadata.Metadata) { - st, err := state.Open(testing.ModelTag, &mongo.MongoInfo{ + st, err := state.Open(testing.ModelTag, testing.ControllerTag, &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, @@ -744,6 +745,13 @@ // find all image metadata in state all, err := st.CloudImageMetadataStorage.FindMetadata(cloudimagemetadata.MetadataFilter{}) c.Assert(err, jc.ErrorIsNil) + // if there was no stream, it should have defaulted to "released" + if metadata.Stream == "" { + metadata.Stream = "released" + } + if metadata.DateCreated == 0 && len(all[metadata.Source]) > 0 { + metadata.DateCreated = all[metadata.Source][0].DateCreated + } c.Assert(all, gc.DeepEquals, map[string][]cloudimagemetadata.Metadata{ metadata.Source: []cloudimagemetadata.Metadata{metadata}, }) @@ -759,7 +767,7 @@ // This metadata should have also been written to state... expect := cloudimagemetadata.Metadata{ - cloudimagemetadata.MetadataAttributes{ + MetadataAttributes: cloudimagemetadata.MetadataAttributes{ Region: "region", Arch: "amd64", Version: "14.04", @@ -768,8 +776,8 @@ VirtType: "virtType", Source: "custom", }, - simplestreams.CUSTOM_CLOUD_DATA, - "imageId", + Priority: simplestreams.CUSTOM_CLOUD_DATA, + ImageId: "imageId", } assertWrittenToState(c, expect) } @@ -796,10 +804,8 @@ provider, err := environs.Provider(cfg.Type()) c.Assert(err, jc.ErrorIsNil) controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = cfg.UUID() cfg, err = provider.PrepareConfig(environs.PrepareConfigParams{ - ControllerUUID: controllerCfg.ControllerUUID(), - Config: cfg, + Config: cfg, }) c.Assert(err, jc.ErrorIsNil) env, err := provider.Open(environs.OpenParams{ @@ -809,6 +815,10 @@ c.Assert(err, jc.ErrorIsNil) err = env.PrepareForBootstrap(nullContext()) c.Assert(err, jc.ErrorIsNil) + s.AddCleanup(func(c *gc.C) { + err := env.DestroyController(controllerCfg.ControllerUUID()) + c.Assert(err, jc.ErrorIsNil) + }) s.PatchValue(&keys.JujuPublicKey, sstesting.SignedMetadataPublicKey) envtesting.MustUploadFakeTools(s.toolsStorage, cfg.AgentStream(), cfg.AgentStream()) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/dumplogs/dumplogs.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/dumplogs/dumplogs.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/dumplogs/dumplogs.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/dumplogs/dumplogs.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,9 +17,9 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/agent" jujudagent "github.com/juju/juju/cmd/jujud/agent" @@ -108,7 +108,7 @@ return errors.New("no database connection info available (is this a controller host?)") } - st0, err := state.Open(config.Model(), info, mongo.DefaultDialOpts(), nil) + st0, err := state.Open(config.Model(), config.Controller(), info, mongo.DefaultDialOpts(), nil) if err != nil { return errors.Annotate(err, "failed to connect to database") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/main.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/main.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/main.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/main.go 2016-10-13 14:31:49.000000000 +0000 @@ -44,7 +44,7 @@ juju provides easy, intelligent service orchestration on top of models such as OpenStack, Amazon AWS, or bare metal. jujud is a component of juju. -https://juju.ubuntu.com/ +https://jujucharms.com/ The jujud command can also forward invocations over RPC for execution by the juju unit agent. When used in this way, it expects to be called via a symlink @@ -131,6 +131,7 @@ func jujuDMain(args []string, ctx *cmd.Context) (code int, err error) { // Assuming an average of 200 bytes per log message, use up to // 200MB for the log buffer. + defer logger.Debugf("jujud complete, code %d, err %v", code, err) logCh, err := logsender.InstallBufferedLogWriter(1048576) if err != nil { return 1, errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/main_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/main_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/main_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/main_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,9 +18,9 @@ stdtesting "testing" "github.com/juju/cmd" + "github.com/juju/gnuflag" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/gnuflag" "github.com/juju/juju/environs" "github.com/juju/juju/juju/names" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/reboot/reboot_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/reboot/reboot_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/reboot/reboot_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/reboot/reboot_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -73,6 +73,7 @@ StateAddresses: []string{s.mgoInst.Addr()}, CACert: coretesting.CACert, Password: "fake", + Controller: s.State.ControllerTag(), Model: s.State.ModelTag(), MongoVersion: mongo.Mongo24, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/run.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/run.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/run.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/run.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,11 +12,11 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/mutex" "github.com/juju/utils/clock" "github.com/juju/utils/exec" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/agent" cmdutil "github.com/juju/juju/cmd/jujud/util" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/upgrade_mongo.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/upgrade_mongo.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/upgrade_mongo.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/upgrade_mongo.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,6 +15,7 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/replicaset" "github.com/juju/retry" "github.com/juju/utils" @@ -24,7 +25,6 @@ "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" - "launchpad.net/gnuflag" "github.com/juju/juju/agent" "github.com/juju/juju/juju/paths" @@ -345,7 +345,7 @@ } var numaCtlPolicy bool - if numaCtlString := u.agentConfig.Value(agent.NumaCtlPreference); numaCtlString != "" { + if numaCtlString := u.agentConfig.Value(agent.NUMACtlPreference); numaCtlString != "" { var err error if numaCtlPolicy, err = strconv.ParseBool(numaCtlString); err != nil { return errors.Annotatef(err, "invalid numactl preference: %q", numaCtlString) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/upgrade_mongo_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/upgrade_mongo_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/upgrade_mongo_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/upgrade_mongo_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -49,16 +49,17 @@ return mock.now } +func (mock *mockClock) NewTimer(time.Duration) clock.Timer { + panic("unexpected call to NewTimer") +} + func (mock *mockClock) After(wait time.Duration) <-chan time.Time { mock.now = mock.now.Add(wait) return time.After(time.Microsecond) } func (mock *mockClock) AfterFunc(d time.Duration, f func()) clock.Timer { - if d > 0 { - mock.now = mock.now.Add(d) - } - return time.AfterFunc(0, f) + panic("unexpected call to AfterFunc") } func retryCallArgs() retry.CallArgs { @@ -261,12 +262,12 @@ f.ranCommands = append(f.ranCommands, []string{"mongo.ReStartServiceFail"}) return errors.New("failing restart") } -func (f *fakeRunCommand) ensureServiceInstalled(dataDir string, statePort, oplogSizeMB int, setNumaControlPolicy bool, version mongo.Version, auth bool) error { +func (f *fakeRunCommand) ensureServiceInstalled(dataDir string, statePort, oplogSizeMB int, setNUMAControlPolicy bool, version mongo.Version, auth bool) error { ran := []string{"mongo.EnsureServiceInstalled", dataDir, strconv.Itoa(statePort), strconv.Itoa(oplogSizeMB), - strconv.FormatBool(setNumaControlPolicy), + strconv.FormatBool(setNUMAControlPolicy), version.String(), strconv.FormatBool(auth)} @@ -379,6 +380,7 @@ StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, Nonce: "a nonce", + Controller: testing.ControllerTag, Model: testing.ModelTag, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/util/util.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/util/util.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/util/util.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/util/util.go 2016-10-13 14:31:49.000000000 +0000 @@ -115,18 +115,16 @@ return err } -// Pinger provides a type that knows how to ping. -type Pinger interface { - - // Ping pings something. - Ping() error +// Breakable provides a type that exposes an IsBroken check. +type Breakable interface { + IsBroken() bool } // ConnectionIsFatal returns a function suitable for passing as the // isFatal argument to worker.NewRunner, that diagnoses an error as // fatal if the connection has failed or if the error is otherwise // fatal. -func ConnectionIsFatal(logger loggo.Logger, conns ...Pinger) func(err error) bool { +func ConnectionIsFatal(logger loggo.Logger, conns ...Breakable) func(err error) bool { return func(err error) bool { if IsFatal(err) { return true @@ -140,8 +138,42 @@ } } -// ConnectionIsDead returns true if the given pinger fails to ping. -var ConnectionIsDead = func(logger loggo.Logger, conn Pinger) bool { +// ConnectionIsDead returns true if the given Breakable is broken. +var ConnectionIsDead = func(logger loggo.Logger, conn Breakable) bool { + return conn.IsBroken() +} + +// Pinger provides a type that knows how to ping. +type Pinger interface { + Ping() error +} + +// PingerIsFatal returns a function suitable for passing as the +// isFatal argument to worker.NewRunner, that diagnoses an error as +// fatal if the Pinger has failed or if the error is otherwise fatal. +// +// TODO(mjs) - this only exists for checking State instances in the +// machine agent and won't be necessary once either: +// 1. State grows a Broken() channel like api.Connection (which is +// actually quite a nice idea). +// 2. The dependency engine conversion is completed for the machine +// agent. +func PingerIsFatal(logger loggo.Logger, conns ...Pinger) func(err error) bool { + return func(err error) bool { + if IsFatal(err) { + return true + } + for _, conn := range conns { + if PingerIsDead(logger, conn) { + return true + } + } + return false + } +} + +// PingerIsDead returns true if the given pinger fails to ping. +var PingerIsDead = func(logger loggo.Logger, conn Pinger) bool { if err := conn.Ping(); err != nil { logger.Infof("error pinging %T: %v", conn, err) return true @@ -167,7 +199,7 @@ // Otherwise leave the default false value to indicate to EnsureServer // that numactl should not be used. var numaCtlPolicy bool - if numaCtlString := agentConfig.Value(agent.NumaCtlPreference); numaCtlString != "" { + if numaCtlString := agentConfig.Value(agent.NUMACtlPreference); numaCtlString != "" { var err error if numaCtlPolicy, err = strconv.ParseBool(numaCtlString); err != nil { return mongo.EnsureServerParams{}, fmt.Errorf("invalid numactl preference: %q", numaCtlString) @@ -190,7 +222,7 @@ DataDir: agentConfig.DataDir(), OplogSize: oplogSize, - SetNumaControlPolicy: numaCtlPolicy, + SetNUMAControlPolicy: numaCtlPolicy, } return params, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/util/util_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/util/util_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/util/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/util/util_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -78,18 +78,13 @@ } func (s *toolSuite) TestConnectionIsFatal(c *gc.C) { - var ( - errPinger testPinger = func() error { - return stderrors.New("ping error") - } - okPinger testPinger = func() error { - return nil - } - ) - for i, pinger := range []testPinger{errPinger, okPinger} { + okConn := &testConn{broken: false} + errConn := &testConn{broken: true} + + for i, conn := range []*testConn{errConn, okConn} { for j, test := range isFatalTests { c.Logf("test %d.%d: %s", i, j, test.err) - fatal := ConnectionIsFatal(logger, pinger)(test.err) + fatal := ConnectionIsFatal(logger, conn)(test.err) if test.isFatal { c.Check(fatal, jc.IsTrue) } else { @@ -100,26 +95,62 @@ } func (s *toolSuite) TestConnectionIsFatalWithMultipleConns(c *gc.C) { - var ( - errPinger testPinger = func() error { - return stderrors.New("ping error") - } - okPinger testPinger = func() error { - return nil + okConn := &testConn{broken: false} + errConn := &testConn{broken: true} + + someErr := stderrors.New("foo") + + c.Assert(ConnectionIsFatal(logger, okConn, okConn)(someErr), + jc.IsFalse) + c.Assert(ConnectionIsFatal(logger, okConn, okConn, okConn)(someErr), + jc.IsFalse) + c.Assert(ConnectionIsFatal(logger, okConn, errConn)(someErr), + jc.IsTrue) + c.Assert(ConnectionIsFatal(logger, okConn, okConn, errConn)(someErr), + jc.IsTrue) + c.Assert(ConnectionIsFatal(logger, errConn, okConn, okConn)(someErr), + jc.IsTrue) +} + +func (s *toolSuite) TestPingerIsFatal(c *gc.C) { + var errPinger testPinger = func() error { + return stderrors.New("ping error") + } + var okPinger testPinger = func() error { + return nil + } + for i, pinger := range []testPinger{errPinger, okPinger} { + for j, test := range isFatalTests { + c.Logf("test %d.%d: %s", i, j, test.err) + fatal := PingerIsFatal(logger, pinger)(test.err) + if test.isFatal { + c.Check(fatal, jc.IsTrue) + } else { + c.Check(fatal, gc.Equals, i == 0) + } } - ) + } +} + +func (s *toolSuite) TestPingerIsFatalWithMultipleConns(c *gc.C) { + var errPinger testPinger = func() error { + return stderrors.New("ping error") + } + var okPinger testPinger = func() error { + return nil + } someErr := stderrors.New("foo") - c.Assert(ConnectionIsFatal(logger, okPinger, okPinger)(someErr), + c.Assert(PingerIsFatal(logger, okPinger, okPinger)(someErr), jc.IsFalse) - c.Assert(ConnectionIsFatal(logger, okPinger, okPinger, okPinger)(someErr), + c.Assert(PingerIsFatal(logger, okPinger, okPinger, okPinger)(someErr), jc.IsFalse) - c.Assert(ConnectionIsFatal(logger, okPinger, errPinger)(someErr), + c.Assert(PingerIsFatal(logger, okPinger, errPinger)(someErr), jc.IsTrue) - c.Assert(ConnectionIsFatal(logger, okPinger, okPinger, errPinger)(someErr), + c.Assert(PingerIsFatal(logger, okPinger, okPinger, errPinger)(someErr), jc.IsTrue) - c.Assert(ConnectionIsFatal(logger, errPinger, okPinger, okPinger)(someErr), + c.Assert(PingerIsFatal(logger, errPinger, okPinger, okPinger)(someErr), jc.IsTrue) } @@ -131,6 +162,14 @@ } } +type testConn struct { + broken bool +} + +func (c *testConn) IsBroken() bool { + return c.broken +} + type testPinger func() error func (f testPinger) Ping() error { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/version_canary.go juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/version_canary.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/jujud/version_canary.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/jujud/version_canary.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,10 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build !go1.6 + +package main + +// This line intentionally does not compile. This file will only be compiled if +// you are compiling with a version of Go that is lower than the one we support. +var requiredGoVersion = This_project_requires_Go_1_6 diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/apicontext.go juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/apicontext.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/apicontext.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/apicontext.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,10 +4,10 @@ package modelcmd import ( - "net/http" "os" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/idmclient/ussologin" "github.com/juju/persistent-cookiejar" "gopkg.in/juju/environschema.v1/form" @@ -20,8 +20,20 @@ // APIContext holds the context required for making connections to // APIs used by juju. type APIContext struct { - Jar *cookiejar.Jar - BakeryClient *httpbakery.Client + Jar *cookiejar.Jar + WebPageVisitor httpbakery.Visitor +} + +// AuthOpts holds flags relating to authentication. +type AuthOpts struct { + // NoBrowser specifies that web-browser-based auth should + // not be used when authenticating. + NoBrowser bool +} + +func (o *AuthOpts) SetFlags(f *gnuflag.FlagSet) { + f.BoolVar(&o.NoBrowser, "B", false, "Do not use web browser for authentication") + f.BoolVar(&o.NoBrowser, "no-browser-login", false, "") } // NewAPIContext returns an API context that will use the given @@ -33,35 +45,39 @@ // // This function is provided for use by commands that cannot use // JujuCommandBase. Most clients should use that instead. -func NewAPIContext(ctxt *cmd.Context) (*APIContext, error) { +func NewAPIContext(ctxt *cmd.Context, opts *AuthOpts) (*APIContext, error) { jar, err := cookiejar.New(&cookiejar.Options{ Filename: cookieFile(), }) if err != nil { return nil, errors.Trace(err) } - client := httpbakery.NewClient() - client.Jar = jar - if ctxt != nil { + var visitors []httpbakery.Visitor + if ctxt != nil && opts != nil && opts.NoBrowser { filler := &form.IOFiller{ In: ctxt.Stdin, Out: ctxt.Stdout, } - client.VisitWebPage = ussologin.VisitWebPage( - "juju", - &http.Client{}, - filler, - jujuclient.NewTokenStore(), - ) + visitors = append(visitors, ussologin.NewVisitor("juju", filler, jujuclient.NewTokenStore())) } else { - client.VisitWebPage = httpbakery.OpenWebBrowser + visitors = append(visitors, httpbakery.WebBrowserVisitor) } + webPageVisitor := httpbakery.NewMultiVisitor(visitors...) return &APIContext{ - Jar: jar, - BakeryClient: client, + Jar: jar, + WebPageVisitor: webPageVisitor, }, nil } +// NewBakeryClient returns a new httpbakery.Client, using the API context's +// persistent cookie jar and web page visitor. +func (ctx *APIContext) NewBakeryClient() *httpbakery.Client { + client := httpbakery.NewClient() + client.Jar = ctx.Jar + client.WebPageVisitor = ctx.WebPageVisitor + return client +} + // cookieFile returns the path to the cookie used to store authorization // macaroons. The returned value can be overridden by setting the // JUJU_COOKIEFILE or GO_COOKIEFILE environment variables. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/base.go juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/base.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/base.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/base.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,16 +4,21 @@ package modelcmd import ( + "bufio" "fmt" + "io" "net/http" + "os" "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" + "golang.org/x/crypto/ssh/terminal" "gopkg.in/juju/names.v2" "gopkg.in/macaroon-bakery.v1/httpbakery" - "launchpad.net/gnuflag" "github.com/juju/juju/api" + "github.com/juju/juju/api/authentication" "github.com/juju/juju/api/base" "github.com/juju/juju/api/modelmanager" "github.com/juju/juju/apiserver/params" @@ -48,8 +53,9 @@ cmd.CommandBase cmdContext *cmd.Context apiContext *APIContext - modelApi ModelAPI + modelAPI_ ModelAPI apiOpenFunc api.OpenFunc + authOpts AuthOpts } // closeContext closes the command's API context @@ -62,9 +68,14 @@ } } -// SetModelApi sets the api used to access model information. -func (c *JujuCommandBase) SetModelApi(api ModelAPI) { - c.modelApi = api +// SetFlags implements cmd.Command.SetFlags. +func (c *JujuCommandBase) SetFlags(f *gnuflag.FlagSet) { + c.authOpts.SetFlags(f) +} + +// SetModelAPI sets the api used to access model information. +func (c *JujuCommandBase) SetModelAPI(api ModelAPI) { + c.modelAPI_ = api } // SetAPIOpen sets the function used for opening an API connection. @@ -73,15 +84,15 @@ } func (c *JujuCommandBase) modelAPI(store jujuclient.ClientStore, controllerName string) (ModelAPI, error) { - if c.modelApi != nil { - return c.modelApi, nil + if c.modelAPI_ != nil { + return c.modelAPI_, nil } conn, err := c.NewAPIRoot(store, controllerName, "") if err != nil { return nil, errors.Trace(err) } - c.modelApi = modelmanager.NewClient(conn) - return c.modelApi, nil + c.modelAPI_ = modelmanager.NewClient(conn) + return c.modelAPI_, nil } // NewAPIRoot returns a new connection to the API server for the given @@ -152,13 +163,29 @@ controllerName, modelName string, accountDetails *jujuclient.AccountDetails, ) (juju.NewAPIConnectionParams, error) { - if err := c.initAPIContext(); err != nil { + bakeryClient, err := c.BakeryClient() + if err != nil { return juju.NewAPIConnectionParams{}, errors.Trace(err) } + var getPassword func(username string) (string, error) + if c.cmdContext != nil { + getPassword = func(username string) (string, error) { + fmt.Fprintf(c.cmdContext.Stderr, "please enter password for %s on %s: ", username, controllerName) + defer fmt.Fprintln(c.cmdContext.Stderr) + return readPassword(c.cmdContext.Stdin) + } + } else { + getPassword = func(username string) (string, error) { + return "", errors.New("no context to prompt for password") + } + } + return newAPIConnectionParams( store, controllerName, modelName, - accountDetails, c.apiContext.BakeryClient, + accountDetails, + bakeryClient, c.apiOpen, + getPassword, ) } @@ -168,10 +195,11 @@ // have the correct TLS setup - use api.Connection.HTTPClient // for that. func (c *JujuCommandBase) HTTPClient() (*http.Client, error) { - if err := c.initAPIContext(); err != nil { + bakeryClient, err := c.BakeryClient() + if err != nil { return nil, errors.Trace(err) } - return c.apiContext.BakeryClient.Client, nil + return bakeryClient.Client, nil } // BakeryClient returns a macaroon bakery client that @@ -180,7 +208,7 @@ if err := c.initAPIContext(); err != nil { return nil, errors.Trace(err) } - return c.apiContext.BakeryClient, nil + return c.apiContext.NewBakeryClient(), nil } // APIOpen establishes a connection to the API server using the @@ -228,7 +256,7 @@ if c.apiContext != nil { return nil } - apiContext, err := NewAPIContext(c.cmdContext) + apiContext, err := NewAPIContext(c.cmdContext, &c.authOpts) if err != nil { return errors.Trace(err) } @@ -236,6 +264,35 @@ return nil } +// APIContext returns the API context used by the command. +// It should only be called while the Run method is being called. +// +// The returned APIContext should not be closed (it will be +// closed when the Run method completes). +func (c *JujuCommandBase) APIContext() (*APIContext, error) { + if err := c.initAPIContext(); err != nil { + return nil, errors.Trace(err) + } + return c.apiContext, nil +} + +// ClearControllerMacaroons will remove all macaroons stored +// for the controller from the persistent cookie jar. +// This is called both from 'juju logout' and a failed 'juju register'. +func (c *JujuCommandBase) ClearControllerMacaroons(endpoints []string) error { + apictx, err := c.APIContext() + if err != nil { + return errors.Trace(err) + } + for _, s := range endpoints { + apictx.Jar.RemoveAllHost(s) + } + if err := apictx.Jar.Save(); err != nil { + return errors.Annotate(err, "can't remove cached authentication cookie") + } + return nil +} + func (c *JujuCommandBase) setCmdContext(ctx *cmd.Context) { c.cmdContext = ctx } @@ -285,6 +342,7 @@ accountDetails *jujuclient.AccountDetails, bakery *httpbakery.Client, apiOpen api.OpenFunc, + getPassword func(string) (string, error), ) (juju.NewAPIConnectionParams, error) { if controllerName == "" { return juju.NewAPIConnectionParams{}, errors.Trace(errNoNameSpecified) @@ -300,30 +358,11 @@ dialOpts := api.DefaultDialOpts() dialOpts.BakeryClient = bakery - openAPI := func(info *api.Info, opts api.DialOpts) (api.Connection, error) { - conn, err := apiOpen(info, opts) - if err != nil { - userTag, ok := info.Tag.(names.UserTag) - if ok && userTag.IsLocal() && params.IsCodeLoginExpired(err) { - // This is a bit gross, but we don't seem to have - // a way of having an error with a cause that does - // not influence the error message. We want to keep - // the type/code so we don't lose the fact that the - // error was caused by an API login expiry. - return nil, ¶ms.Error{ - Code: params.CodeLoginExpired, - Message: fmt.Sprintf(`login expired - -Your login for the %q controller has expired. -To log back in, run the following command: - - juju login %v -`, controllerName, userTag.Name()), - } - } - return nil, err - } - return conn, nil + if accountDetails != nil { + bakery.WebPageVisitor = httpbakery.NewMultiVisitor( + authentication.NewVisitor(accountDetails.User, getPassword), + bakery.WebPageVisitor, + ) } return juju.NewAPIConnectionParams{ @@ -332,24 +371,19 @@ AccountDetails: accountDetails, ModelUUID: modelUUID, DialOpts: dialOpts, - OpenAPI: openAPI, + OpenAPI: apiOpen, }, nil } -// NewGetBootstrapConfigFunc returns a function that, given a controller name, -// returns the bootstrap config for that controller in the given client store. -func NewGetBootstrapConfigFunc(store jujuclient.ClientStore) func(string) (*config.Config, error) { - return bootstrapConfigGetter{store}.getBootstrapConfig -} - // NewGetBootstrapConfigParamsFunc returns a function that, given a controller name, // returns the params needed to bootstrap a fresh copy of that controller in the given client store. -func NewGetBootstrapConfigParamsFunc(store jujuclient.ClientStore) func(string) (*jujuclient.BootstrapConfig, *environs.PrepareConfigParams, error) { - return bootstrapConfigGetter{store}.getBootstrapConfigParams +func NewGetBootstrapConfigParamsFunc(ctx *cmd.Context, store jujuclient.ClientStore) func(string) (*jujuclient.BootstrapConfig, *environs.PrepareConfigParams, error) { + return bootstrapConfigGetter{ctx, store}.getBootstrapConfigParams } type bootstrapConfigGetter struct { - jujuclient.ClientStore + ctx *cmd.Context + store jujuclient.ClientStore } func (g bootstrapConfigGetter) getBootstrapConfig(controllerName string) (*config.Config, error) { @@ -365,22 +399,36 @@ } func (g bootstrapConfigGetter) getBootstrapConfigParams(controllerName string) (*jujuclient.BootstrapConfig, *environs.PrepareConfigParams, error) { - if _, err := g.ClientStore.ControllerByName(controllerName); err != nil { + if _, err := g.store.ControllerByName(controllerName); err != nil { return nil, nil, errors.Annotate(err, "resolving controller name") } - bootstrapConfig, err := g.BootstrapConfigForController(controllerName) + bootstrapConfig, err := g.store.BootstrapConfigForController(controllerName) if err != nil { return nil, nil, errors.Annotate(err, "getting bootstrap config") } var credential *cloud.Credential if bootstrapConfig.Credential != "" { + bootstrapCloud := cloud.Cloud{ + Type: bootstrapConfig.CloudType, + Endpoint: bootstrapConfig.CloudEndpoint, + IdentityEndpoint: bootstrapConfig.CloudIdentityEndpoint, + } + if bootstrapConfig.CloudRegion != "" { + bootstrapCloud.Regions = []cloud.Region{{ + Name: bootstrapConfig.CloudRegion, + Endpoint: bootstrapConfig.CloudEndpoint, + IdentityEndpoint: bootstrapConfig.CloudIdentityEndpoint, + }} + } credential, _, _, err = GetCredentials( - g.ClientStore, - bootstrapConfig.CloudRegion, - bootstrapConfig.Credential, - bootstrapConfig.Cloud, - bootstrapConfig.CloudType, + g.ctx, g.store, + GetCredentialsParams{ + Cloud: bootstrapCloud, + CloudName: bootstrapConfig.Cloud, + CloudRegion: bootstrapConfig.CloudRegion, + CredentialName: bootstrapConfig.Credential, + }, ) if err != nil { return nil, nil, errors.Trace(err) @@ -402,26 +450,60 @@ } // Add attributes from the controller details. - controllerDetails, err := g.ControllerByName(controllerName) + controllerDetails, err := g.store.ControllerByName(controllerName) if err != nil { return nil, nil, errors.Trace(err) } - bootstrapConfig.Config[config.UUIDKey] = controllerDetails.ControllerUUID + // TODO(wallyworld) - remove after beta18 + controllerModelUUID := bootstrapConfig.ControllerModelUUID + if controllerModelUUID == "" { + controllerModelUUID = controllerDetails.ControllerUUID + } + + bootstrapConfig.Config[config.UUIDKey] = controllerModelUUID cfg, err := config.New(config.NoDefaults, bootstrapConfig.Config) if err != nil { return nil, nil, errors.Trace(err) } return bootstrapConfig, &environs.PrepareConfigParams{ - controllerDetails.ControllerUUID, environs.CloudSpec{ bootstrapConfig.CloudType, bootstrapConfig.Cloud, bootstrapConfig.CloudRegion, bootstrapConfig.CloudEndpoint, + bootstrapConfig.CloudIdentityEndpoint, bootstrapConfig.CloudStorageEndpoint, credential, }, cfg, }, nil } + +// TODO(axw) this is now in three places: change-password, +// register, and here. Refactor and move to a common location. +func readPassword(stdin io.Reader) (string, error) { + if f, ok := stdin.(*os.File); ok && terminal.IsTerminal(int(f.Fd())) { + password, err := terminal.ReadPassword(int(f.Fd())) + return string(password), err + } + return readLine(stdin) +} + +func readLine(stdin io.Reader) (string, error) { + // Read one byte at a time to avoid reading beyond the delimiter. + line, err := bufio.NewReader(byteAtATimeReader{stdin}).ReadString('\n') + if err != nil { + return "", errors.Trace(err) + } + return line[:len(line)-1], nil +} + +type byteAtATimeReader struct { + io.Reader +} + +// Read is part of the io.Reader interface. +func (r byteAtATimeReader) Read(out []byte) (int, error) { + return r.Reader.Read(out[:1]) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/base_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/base_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/base_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/base_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,53 +34,36 @@ } s.store.Models["foo"] = &jujuclient.ControllerModels{ Models: map[string]jujuclient.ModelDetails{ - "admin@local/badmodel": {"deadbeef"}, - "admin@local/goodmodel": {"deadbeef2"}, + "admin/badmodel": {"deadbeef"}, + "admin/goodmodel": {"deadbeef2"}, }, - CurrentModel: "admin@local/badmodel", + CurrentModel: "admin/badmodel", } s.store.Accounts["foo"] = jujuclient.AccountDetails{ - User: "bar@local", Password: "hunter2", + User: "bar", Password: "hunter2", } } -func (s *BaseCommandSuite) TestLoginExpiry(c *gc.C) { - apiOpen := func(*api.Info, api.DialOpts) (api.Connection, error) { - return nil, ¶ms.Error{Code: params.CodeLoginExpired, Message: "meep"} - } - var cmd modelcmd.JujuCommandBase - cmd.SetAPIOpen(apiOpen) - conn, err := cmd.NewAPIRoot(s.store, "foo", "") - c.Assert(conn, gc.IsNil) - c.Assert(err, gc.ErrorMatches, `login expired - -Your login for the "foo" controller has expired. -To log back in, run the following command: - - juju login bar -`) -} - func (s *BaseCommandSuite) assertUnknownModel(c *gc.C, current, expectedCurrent string) { s.store.Models["foo"].CurrentModel = current apiOpen := func(*api.Info, api.DialOpts) (api.Connection, error) { return nil, errors.Trace(¶ms.Error{Code: params.CodeModelNotFound, Message: "model deaddeaf not found"}) } - cmd := modelcmd.NewModelCommandBase(s.store, "foo", "admin@local/badmodel") + cmd := modelcmd.NewModelCommandBase(s.store, "foo", "admin/badmodel") cmd.SetAPIOpen(apiOpen) conn, err := cmd.NewAPIRoot() c.Assert(conn, gc.IsNil) msg := strings.Replace(err.Error(), "\n", "", -1) - c.Assert(msg, gc.Equals, `model "admin@local/badmodel" has been removed from the controller, run 'juju models' and switch to one of them.There are 1 accessible models on controller "foo".`) + c.Assert(msg, gc.Equals, `model "admin/badmodel" has been removed from the controller, run 'juju models' and switch to one of them.There are 1 accessible models on controller "foo".`) c.Assert(s.store.Models["foo"].Models, gc.HasLen, 1) - c.Assert(s.store.Models["foo"].Models["admin@local/goodmodel"], gc.DeepEquals, jujuclient.ModelDetails{"deadbeef2"}) + c.Assert(s.store.Models["foo"].Models["admin/goodmodel"], gc.DeepEquals, jujuclient.ModelDetails{"deadbeef2"}) c.Assert(s.store.Models["foo"].CurrentModel, gc.Equals, expectedCurrent) } func (s *BaseCommandSuite) TestUnknownModel(c *gc.C) { - s.assertUnknownModel(c, "admin@local/badmodel", "") + s.assertUnknownModel(c, "admin/badmodel", "") } func (s *BaseCommandSuite) TestUnknownModelNotCurrent(c *gc.C) { - s.assertUnknownModel(c, "admin@local/goodmodel", "admin@local/goodmodel") + s.assertUnknownModel(c, "admin/goodmodel", "admin/goodmodel") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/clientstore.go juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/clientstore.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/clientstore.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/clientstore.go 2016-10-13 14:31:49.000000000 +0000 @@ -35,8 +35,7 @@ if err != nil { return "", errors.Trace(err) } - // Make sure that the user name is canonical. - owner = names.NewUserTag(owner.Canonical()) + owner = names.NewUserTag(owner.Id()) modelName = jujuclient.JoinOwnerModelName(owner, unqualifiedModelName) } return modelName, nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/controller.go juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/controller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/controller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/controller.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/api" "github.com/juju/juju/api/controller" @@ -19,19 +19,18 @@ // ErrNoControllersDefined is returned by commands that operate on // a controller if there is no current controller, no controller has been // explicitly specified, and there is no default controller. - ErrNoControllersDefined = errors.New(`no controller + ErrNoControllersDefined = errors.New(`No controllers registered. -Please either create your own new controller using "juju bootstrap" or -connect to another controller that you have been given access to using "juju register". +Please either create a new controller using "juju bootstrap" or connect to +another controller that you have been given access to using "juju register". `) - // ErrNotLoggedInToController is returned by commands that operate on + // ErrNoCurrentController is returned by commands that operate on // a controller if there is no current controller, no controller has been // explicitly specified, and there is no default controller but there are - // controllers that client knows about, i.e. the user needs to log in to one of them. - ErrNotLoggedInToController = errors.New(`not logged in + // controllers that client knows about. + ErrNoCurrentController = errors.New(`No selected controller. -Please use "juju controllers" to view all controllers available to you. -You can login into an existing controller using "juju login -c ". +Please use "juju switch" to select a controller. `) ) @@ -140,6 +139,27 @@ // credentials. Only the UserManager and ModelManager may be accessed // through this API connection. func (c *ControllerCommandBase) NewAPIRoot() (api.Connection, error) { + return c.newAPIRoot("") +} + +// NewAPIRoot returns a new connection to the API server for the named model +// in the specified controller. +func (c *ControllerCommandBase) NewModelAPIRoot(modelName string) (api.Connection, error) { + _, err := c.store.ModelByName(c.controllerName, modelName) + if err != nil { + if !errors.IsNotFound(err) { + return nil, errors.Trace(err) + } + // The model isn't known locally, so query the models + // available in the controller, and cache them locally. + if err := c.RefreshModels(c.store, c.controllerName); err != nil { + return nil, errors.Annotate(err, "refreshing models") + } + } + return c.newAPIRoot(modelName) +} + +func (c *ControllerCommandBase) newAPIRoot(modelName string) (api.Connection, error) { if c.controllerName == "" { controllers, err := c.store.AllControllers() if err != nil { @@ -148,13 +168,13 @@ if len(controllers) == 0 { return nil, errors.Trace(ErrNoControllersDefined) } - return nil, errors.Trace(ErrNotLoggedInToController) + return nil, errors.Trace(ErrNoCurrentController) } opener := c.opener if opener == nil { opener = OpenFunc(c.JujuCommandBase.NewAPIRoot) } - return opener.Open(c.store, c.controllerName, "") + return opener.Open(c.store, c.controllerName, modelName) } // ModelUUIDs returns the model UUIDs for the given model names. @@ -180,25 +200,32 @@ return result, nil } -// WrapControllerOption sets various parameters of the -// ControllerCommand wrapper. +// WrapControllerOption specifies an option to the WrapController function. type WrapControllerOption func(*sysCommandWrapper) -// ControllerSkipFlags instructs the wrapper to skip -c -// and --controller flag definition. -func ControllerSkipFlags(w *sysCommandWrapper) { - w.setFlags = false +// Options for the WrapController call. +var ( + // WrapControllerSkipControllerFlags specifies that the -c + // and --controller flag flags should not be defined. + WrapControllerSkipControllerFlags WrapControllerOption = wrapControllerSkipControllerFlags + + // WrapSkipDefaultModel specifies that no default controller should + // be used. + WrapControllerSkipDefaultController WrapControllerOption = wrapControllerSkipDefaultController +) + +func wrapControllerSkipControllerFlags(w *sysCommandWrapper) { + w.setControllerFlags = false } -// ControllerSkipDefault instructs the wrapper not to -// use the default controller name. -func ControllerSkipDefault(w *sysCommandWrapper) { - w.useDefaultControllerName = false +func wrapControllerSkipDefaultController(w *sysCommandWrapper) { + w.useDefaultController = false } -// ControllerAPIOpener instructs the underlying controller command to use a -// different APIOpener strategy. -func ControllerAPIOpener(opener APIOpener) WrapControllerOption { +// WrapControllerAPIOpener specifies that the given APIOpener +// should should be used to open the API connection when +// NewAPIRoot or NewControllerAPIRoot are called. +func WrapControllerAPIOpener(opener APIOpener) WrapControllerOption { return func(w *sysCommandWrapper) { w.ControllerCommand.SetAPIOpener(opener) } @@ -208,9 +235,9 @@ // that proxies to each of the ControllerCommand methods. func WrapController(c ControllerCommand, options ...WrapControllerOption) cmd.Command { wrapper := &sysCommandWrapper{ - ControllerCommand: c, - setFlags: true, - useDefaultControllerName: true, + ControllerCommand: c, + setControllerFlags: true, + useDefaultController: true, } for _, option := range options { option(wrapper) @@ -220,14 +247,14 @@ type sysCommandWrapper struct { ControllerCommand - setFlags bool - useDefaultControllerName bool - controllerName string + setControllerFlags bool + useDefaultController bool + controllerName string } // SetFlags implements Command.SetFlags, then calls the wrapped command's SetFlags. func (w *sysCommandWrapper) SetFlags(f *gnuflag.FlagSet) { - if w.setFlags { + if w.setControllerFlags { f.StringVar(&w.controllerName, "c", "", "Controller to operate in") f.StringVar(&w.controllerName, "controller", "", "") } @@ -242,25 +269,38 @@ } store = QualifyingClientStore{store} w.SetClientStore(store) - if w.setFlags { - if w.controllerName == "" && w.useDefaultControllerName { + + if w.setControllerFlags { + if w.controllerName == "" && w.useDefaultController { + store := w.ClientStore() currentController, err := store.CurrentController() - if errors.IsNotFound(err) { - return ErrNoControllersDefined - } if err != nil { - return errors.Trace(err) + return translateControllerError(store, err) } w.controllerName = currentController } - if w.controllerName == "" && !w.useDefaultControllerName { + if w.controllerName == "" && !w.useDefaultController { return ErrNoControllersDefined } } if w.controllerName != "" { if err := w.SetControllerName(w.controllerName); err != nil { - return errors.Trace(err) + return translateControllerError(w.ClientStore(), err) } } return w.ControllerCommand.Init(args) } + +func translateControllerError(store jujuclient.ClientStore, err error) error { + if !errors.IsNotFound(err) { + return err + } + controllers, err2 := store.AllControllers() + if err2 != nil { + return err2 + } + if len(controllers) == 0 { + return errors.Wrap(err, ErrNoControllersDefined) + } + return errors.Wrap(err, ErrNoCurrentController) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/controller_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/controller_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/controller_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/controller_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "github.com/juju/cmd" "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -22,15 +23,15 @@ var _ = gc.Suite(&ControllerCommandSuite{}) func (s *ControllerCommandSuite) TestControllerCommandNoneSpecified(c *gc.C) { - _, err := initTestControllerCommand(c, nil) - c.Assert(err, gc.ErrorMatches, "no controller(.|\n)*") + _, _, err := initTestControllerCommand(c, nil) + c.Assert(errors.Cause(err), gc.Equals, modelcmd.ErrNoControllersDefined) } func (s *ControllerCommandSuite) TestControllerCommandInitCurrentController(c *gc.C) { store := jujuclienttesting.NewMemStore() store.CurrentControllerName = "foo" store.Accounts["foo"] = jujuclient.AccountDetails{ - User: "bar@local", + User: "bar", } store.Controllers["foo"] = jujuclient.ControllerDetails{} testEnsureControllerName(c, store, "foo") @@ -42,7 +43,7 @@ store := jujuclienttesting.NewMemStore() store.CurrentControllerName = "foo" store.Accounts["explicit"] = jujuclient.AccountDetails{ - User: "bar@local", + User: "bar", } store.Controllers["explicit"] = jujuclient.ControllerDetails{} testEnsureControllerName(c, store, "explicit", "-c", "explicit") @@ -51,7 +52,7 @@ func (s *ControllerCommandSuite) TestWrapWithoutFlags(c *gc.C) { cmd := new(testControllerCommand) - wrapped := modelcmd.WrapController(cmd, modelcmd.ControllerSkipFlags) + wrapped := modelcmd.WrapController(cmd, modelcmd.WrapControllerSkipControllerFlags) err := cmdtesting.InitCommand(wrapped, []string{"-s", "testsys"}) c.Assert(err, gc.ErrorMatches, "flag provided but not defined: -s") } @@ -65,18 +66,23 @@ } func (c *testControllerCommand) Run(ctx *cmd.Context) error { - panic("should not be called") + return nil } -func initTestControllerCommand(c *gc.C, store jujuclient.ClientStore, args ...string) (*testControllerCommand, error) { +func initTestControllerCommand(c *gc.C, store jujuclient.ClientStore, args ...string) (cmd.Command, *testControllerCommand, error) { cmd := new(testControllerCommand) cmd.SetClientStore(store) wrapped := modelcmd.WrapController(cmd) - return cmd, cmdtesting.InitCommand(wrapped, args) + if err := cmdtesting.InitCommand(wrapped, args); err != nil { + return nil, nil, err + } + return wrapped, cmd, nil } func testEnsureControllerName(c *gc.C, store jujuclient.ClientStore, expect string, args ...string) { - cmd, err := initTestControllerCommand(c, store, args...) + cmd, controllerCmd, err := initTestControllerCommand(c, store, args...) + c.Assert(err, jc.ErrorIsNil) + err = cmd.Run(nil) c.Assert(err, jc.ErrorIsNil) - c.Assert(cmd.ControllerName(), gc.Equals, expect) + c.Assert(controllerCmd.ControllerName(), gc.Equals, expect) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/credentials.go juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "io/ioutil" + "github.com/juju/cmd" "github.com/juju/errors" "github.com/juju/utils" @@ -20,24 +21,62 @@ ErrMultipleCredentials = errors.New("more than one credential detected") ) +// GetCredentialsParams contains parameters for the GetCredentials function. +type GetCredentialsParams struct { + // Cloud is the cloud definition. + Cloud cloud.Cloud + + // CloudName is the name of the cloud for which credentials are being + // obtained. + CloudName string + + // CloudRegion is the name of the region that the user has specified. + // If this is empty, then GetCredentials will determine the default + // region, and return that. The default region is the one set by the + // user in credentials.yaml, or if there is none set, the first region + // in the cloud's list. + CloudRegion string + + // CredentialName is the name of the credential to get. + CredentialName string +} + // GetCredentials returns a curated set of credential values for a given cloud. // The credential key values are read from the credentials store and the provider // finalises the values to resolve things like json files. // If region is not specified, the default credential region is used. func GetCredentials( - store jujuclient.CredentialGetter, region, credentialName, cloudName, cloudType string, + ctx *cmd.Context, + store jujuclient.CredentialGetter, + args GetCredentialsParams, ) (_ *cloud.Credential, chosenCredentialName, regionName string, _ error) { credential, credentialName, defaultRegion, err := credentialByName( - store, cloudName, credentialName, + store, args.CloudName, args.CredentialName, ) if err != nil { return nil, "", "", errors.Trace(err) } - regionName = region + regionName = args.CloudRegion if regionName == "" { regionName = defaultRegion + if regionName == "" && len(args.Cloud.Regions) > 0 { + // No region was specified, use the first region + // in the list. + regionName = args.Cloud.Regions[0].Name + } + } + + cloudEndpoint := args.Cloud.Endpoint + cloudIdentityEndpoint := args.Cloud.IdentityEndpoint + if regionName != "" { + region, err := cloud.RegionByName(args.Cloud.Regions, regionName) + if err != nil { + return nil, "", "", errors.Trace(err) + } + cloudEndpoint = region.Endpoint + cloudIdentityEndpoint = region.IdentityEndpoint } readFile := func(f string) ([]byte, error) { @@ -49,7 +88,7 @@ } // Finalize credential against schemas supported by the provider. - provider, err := environs.Provider(cloudType) + provider, err := environs.Provider(args.Cloud.Type) if err != nil { return nil, "", "", errors.Trace(err) } @@ -59,10 +98,25 @@ ) if err != nil { return nil, "", "", errors.Annotatef( - err, "validating %q credential for cloud %q", - credentialName, cloudName, + err, "finalizing %q credential for cloud %q", + credentialName, args.CloudName, ) } + + credential, err = provider.FinalizeCredential( + ctx, environs.FinalizeCredentialParams{ + Credential: *credential, + CloudEndpoint: cloudEndpoint, + CloudIdentityEndpoint: cloudIdentityEndpoint, + }, + ) + if err != nil { + return nil, "", "", errors.Annotatef( + err, "finalizing %q credential for cloud %q", + credentialName, args.CloudName, + ) + } + return credential, credentialName, regionName, nil } @@ -84,9 +138,13 @@ return nil, "", "", errors.Annotate(err, "loading credentials") } if credentialName == "" { - // No credential specified, so use the default for the cloud. credentialName = cloudCredentials.DefaultCredential - if credentialName == "" && len(cloudCredentials.AuthCredentials) == 1 { + if credentialName == "" { + // No credential specified, but there's more than one. + if len(cloudCredentials.AuthCredentials) > 1 { + return nil, "", "", ErrMultipleCredentials + } + // No credential specified, so use the default for the cloud. for credentialName = range cloudCredentials.AuthCredentials { } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/credentials_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/credentials_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/credentials_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/credentials_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,8 @@ package modelcmd_test import ( + "fmt" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -42,25 +44,59 @@ } return map[cloud.AuthType]cloud.CredentialSchema{ cloud.UserPassAuthType: schema, + "interactive": cloud.CredentialSchema{ + {"username", cloud.CredentialAttr{}}, + }, + } +} + +func (mockProvider) FinalizeCredential( + ctx environs.FinalizeCredentialContext, + args environs.FinalizeCredentialParams, +) (*cloud.Credential, error) { + if args.Credential.AuthType() == "interactive" { + username := args.Credential.Attributes()["username"] + fmt.Fprintf(ctx.GetStderr(), "generating credential for %q\n", username) + out := cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ + "username": username, + "password": "sekret", + "key": "value", + }) + return &out, nil } + return &args.Credential, nil } type credentialsSuite struct { testing.FakeJujuXDGDataHomeSuite + cloud cloud.Cloud + store *jujuclienttesting.MemStore } var _ = gc.Suite(&credentialsSuite{}) -func (s *credentialsSuite) assertGetCredentials(c *gc.C, region string) { +func (s *credentialsSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.cloud = cloud.Cloud{ + Type: "fake", + Regions: []cloud.Region{ + {Name: "first-region"}, + {Name: "second-region"}, + }, + } + dir := c.MkDir() keyFile := filepath.Join(dir, "keyfile") err := ioutil.WriteFile(keyFile, []byte("value"), 0600) c.Assert(err, jc.ErrorIsNil) - store := jujuclienttesting.NewMemStore() - store.Credentials["cloud"] = cloud.CloudCredential{ - DefaultRegion: "default-region", + s.store = jujuclienttesting.NewMemStore() + s.store.Credentials["cloud"] = cloud.CloudCredential{ + DefaultRegion: "second-region", AuthCredentials: map[string]cloud.Credential{ + "interactive": cloud.NewCredential("interactive", map[string]string{ + "username": "user", + }), "secrets": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ "username": "user", "password": "sekret", @@ -68,17 +104,27 @@ }), }, } +} +func (s *credentialsSuite) assertGetCredentials(c *gc.C, cred, region string) { credential, credentialName, regionName, err := modelcmd.GetCredentials( - store, region, "secrets", "cloud", "fake", + testing.Context(c), s.store, modelcmd.GetCredentialsParams{ + Cloud: s.cloud, + CloudName: "cloud", + CloudRegion: region, + CredentialName: cred, + }, ) c.Assert(err, jc.ErrorIsNil) expectedRegion := region if expectedRegion == "" { - expectedRegion = "default-region" + expectedRegion = s.store.Credentials["cloud"].DefaultRegion + if expectedRegion == "" && len(s.cloud.Regions) > 0 { + expectedRegion = "first-region" + } } c.Assert(regionName, gc.Equals, expectedRegion) - c.Assert(credentialName, gc.Equals, "secrets") + c.Assert(credentialName, gc.Equals, cred) c.Assert(credential.Attributes(), jc.DeepEquals, map[string]string{ "key": "value", "username": "user", @@ -86,10 +132,30 @@ }) } -func (s *credentialsSuite) TestGetCredentialsDefaultRegion(c *gc.C) { - s.assertGetCredentials(c, "") +func (s *credentialsSuite) TestGetCredentialsUserDefaultRegion(c *gc.C) { + s.assertGetCredentials(c, "secrets", "") +} + +func (s *credentialsSuite) TestGetCredentialsCloudDefaultRegion(c *gc.C) { + creds := s.store.Credentials["cloud"] + creds.DefaultRegion = "" + s.store.Credentials["cloud"] = creds + s.assertGetCredentials(c, "secrets", "") +} + +func (s *credentialsSuite) TestGetCredentialsNoRegion(c *gc.C) { + creds := s.store.Credentials["cloud"] + creds.DefaultRegion = "" + s.store.Credentials["cloud"] = creds + s.cloud.Regions = nil + s.assertGetCredentials(c, "secrets", "") } func (s *credentialsSuite) TestGetCredentials(c *gc.C) { - s.assertGetCredentials(c, "region") + s.cloud.Regions = append(s.cloud.Regions, cloud.Region{Name: "third-region"}) + s.assertGetCredentials(c, "secrets", "third-region") +} + +func (s *credentialsSuite) TestGetCredentialsProviderFinalizeCredential(c *gc.C) { + s.assertGetCredentials(c, "interactive", "") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/modelcommand.go juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/modelcommand.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/modelcommand.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/modelcommand.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,8 +10,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" - "launchpad.net/gnuflag" "github.com/juju/juju/api" "github.com/juju/juju/environs" @@ -24,7 +24,7 @@ // ErrNoModelSpecified is returned by commands that operate on // an environment if there is no current model, no model // has been explicitly specified, and there is no default model. -var ErrNoModelSpecified = errors.New(`no model in focus +var ErrNoModelSpecified = errors.New(`No model in focus. Please use "juju models" to see models available to you. You can set current model by running "juju switch" @@ -173,28 +173,15 @@ return root.Client(), nil } -// NewAPIRoot returns a new connection to the API server for the environment. +// NewAPIRoot returns a new connection to the API server for the environment +// directed to the model specified on the command line. func (c *ModelCommandBase) NewAPIRoot() (api.Connection, error) { // This is work in progress as we remove the ModelName from downstream code. // We want to be able to specify the environment in a number of ways, one of // which is the connection name on the client machine. - if c.controllerName == "" { - controllers, err := c.store.AllControllers() - if err != nil { - return nil, errors.Trace(err) - } - if len(controllers) == 0 { - return nil, errors.Trace(ErrNoControllersDefined) - } - return nil, errors.Trace(ErrNotLoggedInToController) - } if c.modelName == "" { return nil, errors.Trace(ErrNoModelSpecified) } - opener := c.opener - if opener == nil { - opener = OpenFunc(c.JujuCommandBase.NewAPIRoot) - } _, err := c.store.ModelByName(c.controllerName, c.modelName) if err != nil { if !errors.IsNotFound(err) { @@ -206,7 +193,35 @@ return nil, errors.Annotate(err, "refreshing models") } } - return opener.Open(c.store, c.controllerName, c.modelName) + return c.newAPIRoot(c.modelName) +} + +// NewControllerAPIRoot returns a new connection to the API server for the environment +// directed to the controller specified on the command line. +// This is for the use of model-centered commands that still want +// to talk to controller-only APIs. +func (c *ModelCommandBase) NewControllerAPIRoot() (api.Connection, error) { + return c.newAPIRoot("") +} + +// newAPIRoot is the internal implementation of NewAPIRoot and NewControllerAPIRoot; +// if modelName is empty, it makes a controller-only connection. +func (c *ModelCommandBase) newAPIRoot(modelName string) (api.Connection, error) { + if c.controllerName == "" { + controllers, err := c.store.AllControllers() + if err != nil { + return nil, errors.Trace(err) + } + if len(controllers) == 0 { + return nil, errors.Trace(ErrNoControllersDefined) + } + return nil, errors.Trace(ErrNoCurrentController) + } + opener := c.opener + if opener == nil { + opener = OpenFunc(c.JujuCommandBase.NewAPIRoot) + } + return opener.Open(c.store, c.controllerName, modelName) } // ConnectionName returns the name of the connection if there is one. @@ -217,19 +232,25 @@ return c.modelName } -// WrapControllerOption sets various parameters of the -// ModelCommand wrapper. -type WrapEnvOption func(*modelCommandWrapper) - -// ModelSkipFlags instructs the wrapper to skip --m and -// --model flag definition. -func ModelSkipFlags(w *modelCommandWrapper) { - w.skipFlags = true +// WrapOption specifies an option to the Wrap function. +type WrapOption func(*modelCommandWrapper) + +// Options for the Wrap function. +var ( + // WrapSkipModelFlags specifies that the -m and --model flags + // should not be defined. + WrapSkipModelFlags WrapOption = wrapSkipModelFlags + + // WrapSkipDefaultModel specifies that no default model should + // be used. + WrapSkipDefaultModel WrapOption = wrapSkipDefaultModel +) + +func wrapSkipModelFlags(w *modelCommandWrapper) { + w.skipModelFlags = true } -// ModelSkipDefault instructs the wrapper not to -// use the default model. -func ModelSkipDefault(w *modelCommandWrapper) { +func wrapSkipDefaultModel(w *modelCommandWrapper) { w.useDefaultModel = false } @@ -237,12 +258,11 @@ // that proxies to each of the ModelCommand methods. // Any provided options are applied to the wrapped command // before it is returned. -func Wrap(c ModelCommand, options ...WrapEnvOption) cmd.Command { +func Wrap(c ModelCommand, options ...WrapOption) cmd.Command { wrapper := &modelCommandWrapper{ ModelCommand: c, - skipFlags: false, + skipModelFlags: false, useDefaultModel: true, - allowEmptyEnv: false, } for _, option := range options { option(wrapper) @@ -253,9 +273,8 @@ type modelCommandWrapper struct { ModelCommand - skipFlags bool + skipModelFlags bool useDefaultModel bool - allowEmptyEnv bool modelName string } @@ -264,7 +283,7 @@ } func (w *modelCommandWrapper) SetFlags(f *gnuflag.FlagSet) { - if !w.skipFlags { + if !w.skipModelFlags { f.StringVar(&w.modelName, "m", "", "Model to operate in. Accepts [:]") f.StringVar(&w.modelName, "model", "", "") } @@ -278,7 +297,7 @@ } store = QualifyingClientStore{store} w.SetClientStore(store) - if !w.skipFlags { + if !w.skipModelFlags { if w.modelName == "" && w.useDefaultModel { // Look for the default. defaultModel, err := GetCurrentModel(store) @@ -288,16 +307,12 @@ w.modelName = defaultModel } if w.modelName == "" && !w.useDefaultModel { - if w.allowEmptyEnv { - return w.ModelCommand.Init(args) - } else { - return errors.Trace(ErrNoModelSpecified) - } + return errors.Trace(ErrNoModelSpecified) } } if w.modelName != "" { if err := w.SetModelName(w.modelName); err != nil { - return errors.Annotate(err, "setting model name") + return translateControllerError(store, err) } } return w.ModelCommand.Init(args) @@ -330,11 +345,6 @@ } } -type ModelGetter interface { - ModelGet() (map[string]interface{}, error) - Close() error -} - // SplitModelName splits a model name into its controller // and model parts. If the model is unqualified, then the // returned controller string will be empty, and the returned diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/modelcommand_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/modelcommand_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/modelcmd/modelcommand_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/modelcmd/modelcommand_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,6 +18,7 @@ "github.com/juju/juju/juju/osenv" "github.com/juju/juju/jujuclient" "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/juju/permission" "github.com/juju/juju/testing" ) @@ -34,7 +35,7 @@ s.store.CurrentControllerName = "foo" s.store.Controllers["foo"] = jujuclient.ControllerDetails{} s.store.Accounts["foo"] = jujuclient.AccountDetails{ - User: "bar@local", Password: "hunter2", + User: "bar", Password: "hunter2", } } @@ -54,34 +55,34 @@ } func (s *ModelCommandSuite) TestGetCurrentModelCurrentControllerModel(c *gc.C) { - err := s.store.UpdateModel("foo", "admin@local/mymodel", jujuclient.ModelDetails{"uuid"}) + err := s.store.UpdateModel("foo", "admin/mymodel", jujuclient.ModelDetails{"uuid"}) c.Assert(err, jc.ErrorIsNil) - err = s.store.SetCurrentModel("foo", "admin@local/mymodel") + err = s.store.SetCurrentModel("foo", "admin/mymodel") c.Assert(err, jc.ErrorIsNil) env, err := modelcmd.GetCurrentModel(s.store) c.Assert(err, jc.ErrorIsNil) - c.Assert(env, gc.Equals, "foo:admin@local/mymodel") + c.Assert(env, gc.Equals, "foo:admin/mymodel") } func (s *ModelCommandSuite) TestGetCurrentModelJujuEnvSet(c *gc.C) { - os.Setenv(osenv.JujuModelEnvKey, "admin@local/magic") + os.Setenv(osenv.JujuModelEnvKey, "admin/magic") env, err := modelcmd.GetCurrentModel(s.store) - c.Assert(env, gc.Equals, "admin@local/magic") + c.Assert(env, gc.Equals, "admin/magic") c.Assert(err, jc.ErrorIsNil) } func (s *ModelCommandSuite) TestGetCurrentModelBothSet(c *gc.C) { - os.Setenv(osenv.JujuModelEnvKey, "admin@local/magic") + os.Setenv(osenv.JujuModelEnvKey, "admin/magic") - err := s.store.UpdateModel("foo", "admin@local/mymodel", jujuclient.ModelDetails{"uuid"}) + err := s.store.UpdateModel("foo", "admin/mymodel", jujuclient.ModelDetails{"uuid"}) c.Assert(err, jc.ErrorIsNil) - err = s.store.SetCurrentModel("foo", "admin@local/mymodel") + err = s.store.SetCurrentModel("foo", "admin/mymodel") c.Assert(err, jc.ErrorIsNil) env, err := modelcmd.GetCurrentModel(s.store) c.Assert(err, jc.ErrorIsNil) - c.Assert(env, gc.Equals, "admin@local/magic") + c.Assert(env, gc.Equals, "admin/magic") } func (s *ModelCommandSuite) TestModelCommandInitExplicit(c *gc.C) { @@ -95,11 +96,11 @@ } func (s *ModelCommandSuite) TestModelCommandInitEnvFile(c *gc.C) { - err := s.store.UpdateModel("foo", "admin@local/mymodel", jujuclient.ModelDetails{"uuid"}) + err := s.store.UpdateModel("foo", "admin/mymodel", jujuclient.ModelDetails{"uuid"}) c.Assert(err, jc.ErrorIsNil) - err = s.store.SetCurrentModel("foo", "admin@local/mymodel") + err = s.store.SetCurrentModel("foo", "admin/mymodel") c.Assert(err, jc.ErrorIsNil) - s.testEnsureModelName(c, "admin@local/mymodel") + s.testEnsureModelName(c, "admin/mymodel") } func (s *ModelCommandSuite) TestBootstrapContext(c *gc.C) { @@ -114,7 +115,7 @@ func (s *ModelCommandSuite) TestWrapWithoutFlags(c *gc.C) { cmd := new(testCommand) - wrapped := modelcmd.Wrap(cmd, modelcmd.ModelSkipFlags) + wrapped := modelcmd.Wrap(cmd, modelcmd.WrapSkipModelFlags) args := []string{"-m", "testenv"} err := cmdtesting.InitCommand(wrapped, args) // 1st position is always the flag @@ -189,6 +190,7 @@ func (s *macaroonLoginSuite) SetUpTest(c *gc.C) { s.MacaroonSuite.SetUpTest(c) s.MacaroonSuite.AddModelUser(c, testUser) + s.MacaroonSuite.AddControllerUser(c, testUser, permission.LoginAccess) s.controllerName = "my-controller" s.modelName = testUser + "/my-model" @@ -198,7 +200,7 @@ s.store = jujuclienttesting.NewMemStore() s.store.Controllers[s.controllerName] = jujuclient.ControllerDetails{ APIEndpoints: apiInfo.Addrs, - ControllerUUID: apiInfo.ModelTag.Id(), + ControllerUUID: s.State.ControllerUUID(), CACert: apiInfo.CACert, } s.store.Accounts[s.controllerName] = jujuclient.AccountDetails{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/output/output.go juju-core-2.0.0/src/github.com/juju/juju/cmd/output/output.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/output/output.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/output/output.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,112 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package output + +import ( + "fmt" + "io" + + "github.com/juju/ansiterm" + "github.com/juju/cmd" + "github.com/juju/juju/status" +) + +// DefaultFormatters holds the formatters that can be +// specified with the --format flag. +var DefaultFormatters = map[string]cmd.Formatter{ + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, +} + +// TabWriter returns a new tab writer with common layout definition. +func TabWriter(writer io.Writer) *ansiterm.TabWriter { + const ( + // To format things into columns. + minwidth = 0 + tabwidth = 1 + padding = 2 + padchar = ' ' + flags = 0 + ) + return ansiterm.NewTabWriter(writer, minwidth, tabwidth, padding, padchar, flags) +} + +// Wrapper provides some helper functions for writing values out tab separated. +type Wrapper struct { + *ansiterm.TabWriter +} + +// Print writes each value followed by a tab. +func (w *Wrapper) Print(values ...interface{}) { + for _, v := range values { + fmt.Fprintf(w, "%v\t", v) + } +} + +// Printf writes the formatted text followed by a tab. +func (w *Wrapper) Printf(format string, values ...interface{}) { + fmt.Fprintf(w, format+"\t", values...) +} + +// Println writes many tab separated values finished with a new line. +func (w *Wrapper) Println(values ...interface{}) { + for i, v := range values { + if i != len(values)-1 { + fmt.Fprintf(w, "%v\t", v) + } else { + fmt.Fprintf(w, "%v", v) + } + } + fmt.Fprintln(w) +} + +// PrintColor writes the value out in the color context specified. +func (w *Wrapper) PrintColor(ctx *ansiterm.Context, value interface{}) { + if ctx != nil { + ctx.Fprintf(w.TabWriter, "%v\t", value) + } else { + fmt.Fprintf(w, "%v\t", value) + } +} + +// PrintStatus writes out the status value in the standard color. +func (w *Wrapper) PrintStatus(status status.Status) { + w.PrintColor(statusColors[status], status) +} + +// CurrentHighlight is the color used to show the current +// controller, user or model in tabular +var CurrentHighlight = ansiterm.Foreground(ansiterm.Green) + +// ErrorHighlight is the color used to show error conditions. +var ErrorHighlight = ansiterm.Foreground(ansiterm.Red) + +// WarningHighlight is the color used to show warning conditions. +// Generally things that the user should be aware of, but not necessarily +// requiring any user action. +var WarningHighlight = ansiterm.Foreground(ansiterm.Yellow) + +// GoodHighlight is used to indicate good or success conditions. +var GoodHighlight = ansiterm.Foreground(ansiterm.Green) + +var statusColors = map[status.Status]*ansiterm.Context{ + // good + status.Active: GoodHighlight, + status.Idle: GoodHighlight, + status.Started: GoodHighlight, + // busy + status.Allocating: WarningHighlight, + status.Executing: WarningHighlight, + status.Lost: WarningHighlight, + status.Maintenance: WarningHighlight, + status.Pending: WarningHighlight, + status.Rebooting: WarningHighlight, + status.Stopped: WarningHighlight, + status.Unknown: WarningHighlight, + // bad + status.Blocked: ErrorHighlight, + status.Down: ErrorHighlight, + status.Error: ErrorHighlight, + status.Failed: ErrorHighlight, +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/addimage.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/addimage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/addimage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/addimage.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,8 +6,8 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/utils/series" - "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/cloudimagemetadata.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/cloudimagemetadata.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/cloudimagemetadata.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/cloudimagemetadata.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,7 @@ package main import ( - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/api/imagemetadata" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/deleteimage.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/deleteimage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/deleteimage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/deleteimage.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/cmd/modelcmd" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/imagemetadata.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/imagemetadata.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/imagemetadata.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/imagemetadata.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,9 +10,9 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/utils/arch" "github.com/juju/utils/series" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/environs" @@ -31,7 +31,7 @@ // NOTE(axw) this is a work-around for the TODO below. This // means that the command will only work if you've bootstrapped // the specified environment. - bootstrapConfig, params, err := modelcmd.NewGetBootstrapConfigParamsFunc(c.ClientStore())(c.ControllerName()) + bootstrapConfig, params, err := modelcmd.NewGetBootstrapConfigParamsFunc(context, c.ClientStore())(c.ControllerName()) if err != nil { return nil, errors.Trace(err) } @@ -203,7 +203,7 @@ } err = imagemetadata.MergeAndWriteMetadata(c.Series, []*imagemetadata.ImageMetadata{im}, &cloudSpec, targetStorage) if err != nil { - return fmt.Errorf("image metadata files could not be created: %v", err) + return errors.Errorf("image metadata files could not be created: %v", err) } dir := context.AbsPath(c.Dir) dest := filepath.Join(dir, storage.BaseImagesPath, "streams", "v1") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/imagemetadata_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/imagemetadata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/imagemetadata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/imagemetadata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -151,14 +151,15 @@ "name": "ec2-latest-lts", "type": "ec2", "uuid": testing.ModelTag.Id(), - "controller-uuid": testing.ModelTag.Id(), + "controller-uuid": testing.ControllerTag.Id(), "region": "us-east-1", }) c.Assert(err, jc.ErrorIsNil) s.store.BootstrapConfig["ec2-controller"] = jujuclient.BootstrapConfig{ - Cloud: "ec2", - CloudRegion: "us-east-1", - Config: ec2Config.AllAttrs(), + ControllerConfig: testing.FakeControllerConfig(), + Cloud: "ec2", + CloudRegion: "us-east-1", + Config: ec2Config.AllAttrs(), } ctx, err := runImageMetadata(c, s.store, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/listformatter.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/listformatter.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/listformatter.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/listformatter.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,44 +4,33 @@ package main import ( - "bytes" "fmt" + "io" "strings" - "text/tabwriter" "github.com/juju/errors" + "github.com/juju/juju/cmd/output" ) -func formatMetadataListTabular(value interface{}) ([]byte, error) { +func formatMetadataListTabular(writer io.Writer, value interface{}) error { metadata, ok := value.([]MetadataInfo) if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", metadata, value) + return errors.Errorf("expected value of type %T, got %T", metadata, value) } - return formatMetadataTabular(metadata) + formatMetadataTabular(writer, metadata) + return nil } -// formatMetadataTabular returns a tabular summary of cloud image metadata. -func formatMetadataTabular(metadata []MetadataInfo) ([]byte, error) { - var out bytes.Buffer - - const ( - // To format things into columns. - minwidth = 0 - tabwidth = 1 - padding = 2 - padchar = ' ' - flags = 0 - ) - tw := tabwriter.NewWriter(&out, minwidth, tabwidth, padding, padchar, flags) +// formatMetadataTabular writes a tabular summary of cloud image metadata. +func formatMetadataTabular(writer io.Writer, metadata []MetadataInfo) { + tw := output.TabWriter(writer) print := func(values ...string) { fmt.Fprintln(tw, strings.Join(values, "\t")) } - print("SOURCE", "SERIES", "ARCH", "REGION", "IMAGE-ID", "STREAM", "VIRT-TYPE", "STORAGE-TYPE") + print("Source", "Series", "Arch", "Region", "Image id", "Stream", "Virt Type", "Storage Type") for _, m := range metadata { print(m.Source, m.Series, m.Arch, m.Region, m.ImageId, m.Stream, m.VirtType, m.RootStorageType) } tw.Flush() - - return out.Bytes(), nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/listimages.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/listimages.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/listimages.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/listimages.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "strings" "github.com/juju/cmd" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/modelcmd" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/listimages_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/listimages_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/listimages_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/listimages_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,7 +34,7 @@ s.store.CurrentControllerName = "testing" s.store.Controllers["testing"] = jujuclient.ControllerDetails{} s.store.Accounts["testing"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } } @@ -64,7 +64,7 @@ func (s *ListSuite) TestListDefault(c *gc.C) { // Default format is tabular s.assertValidList(c, ` -SOURCE SERIES ARCH REGION IMAGE-ID STREAM VIRT-TYPE STORAGE-TYPE +Source Series Arch Region Image id Stream Virt Type Storage Type custom vivid amd64 asia im-21 released kvm ebs custom vivid amd64 europe im-21 released kvm ebs custom vivid amd64 us im-21 released kvm ebs diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/metadataplugin_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/metadataplugin_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/metadataplugin_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/metadataplugin_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,7 +16,6 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/feature" - "github.com/juju/juju/juju/osenv" "github.com/juju/juju/testing" ) @@ -58,8 +57,6 @@ localArgs := append([]string{"-test.run", "TestRunMain", "-run-main", "--", "juju-metadata"}, args...) ps := exec.Command(os.Args[0], localArgs...) - - ps.Env = append(os.Environ(), osenv.JujuXDGDataHomeEnvKey+"="+osenv.JujuXDGDataHome()) output, err := ps.CombinedOutput() if exit != 0 { c.Assert(err, gc.ErrorMatches, fmt.Sprintf("exit status %d", exit)) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/signmetadata.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/signmetadata.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/signmetadata.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/signmetadata.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,15 +4,15 @@ package main import ( - "fmt" "io/ioutil" "os" "path/filepath" "strings" "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" - "launchpad.net/gnuflag" "github.com/juju/juju/environs/simplestreams" ) @@ -55,10 +55,10 @@ func (c *signMetadataCommand) Init(args []string) error { if c.dir == "" { - return fmt.Errorf("directory must be specified") + return errors.Errorf("directory must be specified") } if c.keyFile == "" { - return fmt.Errorf("keyfile must be specified") + return errors.Errorf("keyfile must be specified") } return cmd.CheckEmpty(args) } @@ -88,15 +88,15 @@ logger.Infof("signing file %q", filename) f, err := os.Open(filename) if err != nil { - return fmt.Errorf("opening file %q: %v", filename, err) + return errors.Errorf("opening file %q: %v", filename, err) } encoded, err := simplestreams.Encode(f, key, passphrase) if err != nil { - return fmt.Errorf("encoding file %q: %v", filename, err) + return errors.Errorf("encoding file %q: %v", filename, err) } signedFilename := strings.Replace(filename, simplestreams.UnsignedSuffix, simplestreams.SignedSuffix, -1) if err = ioutil.WriteFile(signedFilename, encoded, 0644); err != nil { - return fmt.Errorf("writing signed file %q: %v", signedFilename, err) + return errors.Errorf("writing signed file %q: %v", signedFilename, err) } } // Now process any directories in dir. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/toolsmetadata.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/toolsmetadata.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/toolsmetadata.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/toolsmetadata.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,9 +7,10 @@ "fmt" "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" "github.com/juju/utils" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/environs/filestorage" @@ -35,7 +36,7 @@ public bool } -var toolsMetadataDoc = ` +const toolsMetadataDoc = ` generate-tools creates simplestreams tools metadata. This command works by scanning a directory for tools tarballs from which to generate @@ -60,22 +61,17 @@ Examples: - - generate metadata for "released" tools, looking in the "releases" directory: +# generate metadata for "released": +juju metadata generate-tools -d - juju metadata generate-tools -d +# generate metadata for "released": +juju metadata generate-tools -d --stream released - - generate metadata for "released" tools, looking in the "released" directory: - - juju metadata generate-tools -d --stream released - - - generate metadata for "proposed" tools, looking in the "proposed" directory: - - juju metadata generate-tools -d --stream proposed - - - generate metadata for "proposed" tools, first removing existing "proposed" metadata: - - juju metadata generate-tools -d --stream proposed --clean +# generate metadata for "proposed": +juju metadata generate-tools -d --stream proposed +# generate metadata for "proposed", first removing existing "proposed" metadata: +juju metadata generate-tools -d --stream proposed --clean ` func (c *toolsMetadataCommand) Info() *cmd.Info { @@ -88,10 +84,12 @@ func (c *toolsMetadataCommand) SetFlags(f *gnuflag.FlagSet) { f.StringVar(&c.metadataDir, "d", "", "local directory in which to store metadata") - // If no stream is specified, we'll generate metadata for the legacy tools location. - f.StringVar(&c.stream, "stream", "", "simplestreams stream for which to generate the metadata") - f.BoolVar(&c.clean, "clean", false, "remove any existing metadata for the specified stream before generating new metadata") - f.BoolVar(&c.public, "public", false, "tools are for a public cloud, so generate mirrors information") + f.StringVar(&c.stream, "stream", envtools.ReleasedStream, + "simplestreams stream for which to generate the metadata") + f.BoolVar(&c.clean, "clean", false, + "remove any existing metadata for the specified stream before generating new metadata") + f.BoolVar(&c.public, "public", false, + "tools are for a public cloud, so generate mirrors information") } func (c *toolsMetadataCommand) Run(context *cmd.Context) error { @@ -101,47 +99,39 @@ loggo.RegisterWriter("toolsmetadata", writer) defer loggo.RemoveWriter("toolsmetadata") if c.metadataDir == "" { - c.metadataDir = osenv.JujuXDGDataHome() + c.metadataDir = osenv.JujuXDGDataHomeDir() } else { c.metadataDir = context.AbsPath(c.metadataDir) } sourceStorage, err := filestorage.NewFileStorageReader(c.metadataDir) if err != nil { - return err + return errors.Trace(err) } - // We now store the tools in a directory named after their stream, but the - // legacy behaviour is to store all tools in a single "releases" directory. - toolsDir := c.stream - if c.stream == "" { - fmt.Fprintf(context.Stdout, "No stream specified, defaulting to released tools in the releases directory.\n") - c.stream = envtools.ReleasedStream - toolsDir = envtools.LegacyReleaseDirectory - } fmt.Fprintf(context.Stdout, "Finding tools in %s for stream %s.\n", c.metadataDir, c.stream) - toolsList, err := envtools.ReadList(sourceStorage, toolsDir, -1, -1) + toolsList, err := envtools.ReadList(sourceStorage, c.stream, -1, -1) if err == envtools.ErrNoTools { var source string source, err = envtools.ToolsURL(envtools.DefaultBaseURL) if err != nil { - return err + return errors.Trace(err) } toolsList, err = envtools.FindToolsForCloud(toolsDataSources(source), simplestreams.CloudSpec{}, c.stream, -1, -1, coretools.Filter{}) } if err != nil { - return err + return errors.Trace(err) } targetStorage, err := filestorage.NewFileStorageWriter(c.metadataDir) if err != nil { - return err + return errors.Trace(err) } writeMirrors := envtools.DoNotWriteMirrors if c.public { writeMirrors = envtools.WriteMirrors } - return mergeAndWriteMetadata(targetStorage, toolsDir, c.stream, c.clean, toolsList, writeMirrors) + return errors.Trace(mergeAndWriteMetadata(targetStorage, c.stream, c.stream, c.clean, toolsList, writeMirrors)) } func toolsDataSources(urls ...string) []simplestreams.DataSource { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/toolsmetadata_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/toolsmetadata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/toolsmetadata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/toolsmetadata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "os" "path/filepath" "regexp" + "runtime" "sort" "strings" "text/template" @@ -47,7 +48,7 @@ "name": "erewhemos", "type": "dummy", "uuid": coretesting.ModelTag.Id(), - "controller-uuid": coretesting.ModelTag.Id(), + "controller-uuid": coretesting.ControllerTag.Id(), "conroller": true, }) c.Assert(err, jc.ErrorIsNil) @@ -111,63 +112,34 @@ return buf.String() } -var expectedOutputDirectoryReleasedTemplate = expectedOutputCommon + ` -.*Writing tools/streams/v1/index2\.json -.*Writing tools/streams/v1/index\.json -.*Writing tools/streams/v1/com\.ubuntu\.juju-{{.Stream}}-tools\.json -` - var expectedOutputDirectoryTemplate = expectedOutputCommon + ` .*Writing tools/streams/v1/index2\.json .*Writing tools/streams/v1/com\.ubuntu\.juju-{{.Stream}}-tools\.json ` -var expectedOutputMirrorsTemplate = expectedOutputCommon + ` -.*Writing tools/streams/v1/index2\.json -.*Writing tools/streams/v1/index\.json -.*Writing tools/streams/v1/com\.ubuntu\.juju-{{.Stream}}-tools\.json -.*Writing tools/streams/v1/mirrors\.json -` - -var expectedOutputDirectoryLegacyReleased = "No stream specified, defaulting to released tools in the releases directory.\n" + - makeExpectedOutput(expectedOutputDirectoryReleasedTemplate, "released", "releases") - -var expectedOutputMirrorsReleased = makeExpectedOutput(expectedOutputMirrorsTemplate, "released", "released") - -func (s *ToolsMetadataSuite) TestGenerateLegacyRelease(c *gc.C) { - metadataDir := osenv.JujuXDGDataHome() // default metadata dir - toolstesting.MakeTools(c, metadataDir, "releases", versionStrings) - ctx := coretesting.Context(c) - code := cmd.Main(newToolsMetadataCommand(), ctx, nil) - c.Assert(code, gc.Equals, 0) - output := ctx.Stdout.(*bytes.Buffer).String() - c.Assert(output, gc.Matches, expectedOutputDirectoryLegacyReleased) - metadata := toolstesting.ParseMetadataFromDir(c, metadataDir, "released", false) - c.Assert(metadata, gc.HasLen, len(versionStrings)) - obtainedVersionStrings := make([]string, len(versionStrings)) - for i, metadata := range metadata { - s := fmt.Sprintf("%s-%s-%s", metadata.Version, metadata.Release, metadata.Arch) - obtainedVersionStrings[i] = s - } - c.Assert(obtainedVersionStrings, gc.DeepEquals, versionStrings) -} - func (s *ToolsMetadataSuite) TestGenerateToDirectory(c *gc.C) { metadataDir := c.MkDir() - toolstesting.MakeTools(c, metadataDir, "releases", versionStrings) + toolstesting.MakeTools(c, metadataDir, "released", versionStrings) ctx := coretesting.Context(c) code := cmd.Main(newToolsMetadataCommand(), ctx, []string{"-d", metadataDir}) - c.Assert(code, gc.Equals, 0) + c.Check(code, gc.Equals, 0) output := ctx.Stdout.(*bytes.Buffer).String() - c.Assert(output, gc.Matches, expectedOutputDirectoryLegacyReleased) + + outputDirReleasedTmpl := expectedOutputCommon + ` +.*Writing tools/streams/v1/index2\.json +.*Writing tools/streams/v1/index\.json +.*Writing tools/streams/v1/com\.ubuntu\.juju-{{.Stream}}-tools\.json +` + expectedOutput := makeExpectedOutput(outputDirReleasedTmpl, "released", "released") + c.Check(output, gc.Matches, expectedOutput) metadata := toolstesting.ParseMetadataFromDir(c, metadataDir, "released", false) - c.Assert(metadata, gc.HasLen, len(versionStrings)) + c.Check(metadata, gc.HasLen, len(versionStrings)) obtainedVersionStrings := make([]string, len(versionStrings)) for i, metadata := range metadata { s := fmt.Sprintf("%s-%s-%s", metadata.Version, metadata.Release, metadata.Arch) obtainedVersionStrings[i] = s } - c.Assert(obtainedVersionStrings, gc.DeepEquals, versionStrings) + c.Check(obtainedVersionStrings, gc.DeepEquals, versionStrings) } func (s *ToolsMetadataSuite) TestGenerateStream(c *gc.C) { @@ -289,15 +261,24 @@ } func (s *ToolsMetadataSuite) TestGenerateWithMirrors(c *gc.C) { + metadataDir := c.MkDir() toolstesting.MakeTools(c, metadataDir, "released", versionStrings) ctx := coretesting.Context(c) code := cmd.Main(newToolsMetadataCommand(), ctx, []string{"--public", "-d", metadataDir, "--stream", "released"}) c.Assert(code, gc.Equals, 0) output := ctx.Stdout.(*bytes.Buffer).String() - c.Assert(output, gc.Matches, expectedOutputMirrorsReleased) + + mirrosTmpl := expectedOutputCommon + ` +.*Writing tools/streams/v1/index2\.json +.*Writing tools/streams/v1/index\.json +.*Writing tools/streams/v1/com\.ubuntu\.juju-{{.Stream}}-tools\.json +.*Writing tools/streams/v1/mirrors\.json +` + expectedOutput := makeExpectedOutput(mirrosTmpl, "released", "released") + c.Check(output, gc.Matches, expectedOutput) metadata := toolstesting.ParseMetadataFromDir(c, metadataDir, "released", true) - c.Assert(metadata, gc.HasLen, len(versionStrings)) + c.Check(metadata, gc.HasLen, len(versionStrings)) obtainedVersionStrings := make([]string, len(versionStrings)) for i, metadata := range metadata { s := fmt.Sprintf("%s-%s-%s", metadata.Version, metadata.Release, metadata.Arch) @@ -307,23 +288,29 @@ } func (s *ToolsMetadataSuite) TestNoTools(c *gc.C) { + if runtime.GOOS == "windows" { + c.Skip("Skipping on windows, test only set up for Linux tools") + } ctx := coretesting.Context(c) code := cmd.Main(newToolsMetadataCommand(), ctx, nil) c.Assert(code, gc.Equals, 1) stdout := ctx.Stdout.(*bytes.Buffer).String() - c.Assert(stdout, gc.Matches, ".*\nFinding tools in .*\n") + c.Assert(stdout, gc.Matches, ".*Finding tools in .*\n") stderr := ctx.Stderr.(*bytes.Buffer).String() c.Assert(stderr, gc.Matches, "error: no tools available\n") } func (s *ToolsMetadataSuite) TestPatchLevels(c *gc.C) { + if runtime.GOOS == "windows" { + c.Skip("Skipping on windows, test only set up for Linux tools") + } currentVersion := jujuversion.Current currentVersion.Build = 0 versionStrings := []string{ currentVersion.String() + "-precise-amd64", currentVersion.String() + ".1-precise-amd64", } - metadataDir := osenv.JujuXDGDataHome() // default metadata dir + metadataDir := osenv.JujuXDGDataHomeDir() // default metadata dir toolstesting.MakeTools(c, metadataDir, "released", versionStrings) ctx := coretesting.Context(c) code := cmd.Main(newToolsMetadataCommand(), ctx, []string{"--stream", "released"}) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/validateimagemetadata.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/validateimagemetadata.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/validateimagemetadata.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/validateimagemetadata.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,16 +4,19 @@ package main import ( + "bytes" "fmt" "os" "path/filepath" "strings" "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/utils" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/environs/imagemetadata" @@ -84,7 +87,7 @@ } func (c *validateImageMetadataCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "smart", cmd.DefaultFormatters) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) f.StringVar(&c.providerType, "p", "", "the provider type eg ec2, openstack") f.StringVar(&c.metadataDir, "d", "", "directory where metadata files are found") f.StringVar(&c.series, "s", "", "the series for which to validate (overrides env config series)") @@ -96,13 +99,13 @@ func (c *validateImageMetadataCommand) Init(args []string) error { if c.providerType != "" { if c.series == "" { - return fmt.Errorf("series required if provider type is specified") + return errors.Errorf("series required if provider type is specified") } if c.region == "" { - return fmt.Errorf("region required if provider type is specified") + return errors.Errorf("region required if provider type is specified") } if c.metadataDir == "" { - return fmt.Errorf("metadata directory required if provider type is specified") + return errors.Errorf("metadata directory required if provider type is specified") } } return cmd.CheckEmpty(args) @@ -127,7 +130,7 @@ newCfg, err := cfg.Apply(map[string]interface{}{"image-stream": oes.stream}) if err != nil { // This should never happen. - panic(fmt.Errorf("unexpected error making override config: %v", err)) + panic(errors.Errorf("unexpected error making override config: %v", err)) } return newCfg } @@ -144,8 +147,9 @@ metadata := map[string]interface{}{ "Resolve Metadata": *resolveInfo, } - if metadataYaml, yamlErr := cmd.FormatYaml(metadata); yamlErr == nil { - err = fmt.Errorf("%v\n%v", err, string(metadataYaml)) + buff := &bytes.Buffer{} + if yamlErr := cmd.FormatYaml(buff, metadata); yamlErr == nil { + err = errors.Errorf("%v\n%v", err, buff.String()) } } return err @@ -165,7 +169,7 @@ sources = append(sources, fmt.Sprintf("- %s (%s)", s.Description(), url)) } } - return fmt.Errorf( + return errors.Errorf( "no matching image ids for region %s using sources:\n%s", params.Region, strings.Join(sources, "\n")) } @@ -182,7 +186,7 @@ } mdLookup, ok := environ.(simplestreams.MetadataValidator) if !ok { - return nil, fmt.Errorf("%s provider does not support image metadata validation", environ.Config().Type()) + return nil, errors.Errorf("%s provider does not support image metadata validation", environ.Config().Type()) } params, err = mdLookup.MetadataLookupParams(c.region) if err != nil { @@ -200,7 +204,7 @@ } mdLookup, ok := prov.(simplestreams.MetadataValidator) if !ok { - return nil, fmt.Errorf("%s provider does not support image metadata validation", c.providerType) + return nil, errors.Errorf("%s provider does not support image metadata validation", c.providerType) } params, err = mdLookup.MetadataLookupParams(c.region) if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/validateimagemetadata_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/validateimagemetadata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/validateimagemetadata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/validateimagemetadata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -98,47 +98,46 @@ "name": "ec2", "type": "ec2", "default-series": "precise", - "region": "us-east-1", - "controller-uuid": ec2UUID, + "controller-uuid": coretesting.ControllerTag.Id(), "uuid": ec2UUID, }) c.Assert(err, jc.ErrorIsNil) store.Controllers["ec2-controller"] = jujuclient.ControllerDetails{ - ControllerUUID: coretesting.ModelTag.Id(), + ControllerUUID: coretesting.ControllerTag.Id(), CACert: coretesting.CACert, } store.Accounts["ec2-controller"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } store.BootstrapConfig["ec2-controller"] = jujuclient.BootstrapConfig{ - Config: ec2Config.AllAttrs(), - Cloud: "ec2", - CloudType: "ec2", - CloudRegion: "us-east-1", + ControllerConfig: coretesting.FakeControllerConfig(), + ControllerModelUUID: ec2UUID, + Config: ec2Config.AllAttrs(), + Cloud: "ec2", + CloudType: "ec2", + CloudRegion: "us-east-1", + CloudEndpoint: "https://ec2.us-east-1.amazonaws.com", } azureUUID := utils.MustNewUUID().String() azureConfig, err := config.New(config.UseDefaults, map[string]interface{}{ - "name": "azure", - "type": "azure", - "controller-uuid": azureUUID, - "uuid": azureUUID, - "default-series": "raring", - "location": "West US", - "subscription-id": "foo", - "application-id": "bar", - "application-password": "baz", - "tenant-id": "qux", + "name": "azure", + "type": "azure", + "controller-uuid": coretesting.ControllerTag.Id(), + "uuid": azureUUID, + "default-series": "raring", }) c.Assert(err, jc.ErrorIsNil) store.Controllers["azure-controller"] = jujuclient.ControllerDetails{ - ControllerUUID: coretesting.ModelTag.Id(), + ControllerUUID: coretesting.ControllerTag.Id(), CACert: coretesting.CACert, } store.Accounts["azure-controller"] = jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", } store.BootstrapConfig["azure-controller"] = jujuclient.BootstrapConfig{ + ControllerConfig: coretesting.FakeControllerConfig(), + ControllerModelUUID: azureUUID, Config: azureConfig.AllAttrs(), Cloud: "azure", CloudType: "azure", @@ -150,11 +149,10 @@ store.Credentials["azure"] = cloud.CloudCredential{ AuthCredentials: map[string]cloud.Credential{ "default": cloud.NewCredential( - cloud.UserPassAuthType, + "service-principal-secret", map[string]string{ "application-id": "application-id", "subscription-id": "subscription-id", - "tenant-id": "tenant-id", "application-password": "application-password", }, ), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/validatetoolsmetadata.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/validatetoolsmetadata.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/validatetoolsmetadata.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/validatetoolsmetadata.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,16 +4,19 @@ package main import ( + "bytes" "fmt" "os" "strings" "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/utils/arch" "github.com/juju/version" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/cmd/output" "github.com/juju/juju/environs" "github.com/juju/juju/environs/simplestreams" "github.com/juju/juju/environs/tools" @@ -111,7 +114,7 @@ } func (c *validateToolsMetadataCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "smart", cmd.DefaultFormatters) + c.out.AddFlags(f, "yaml", output.DefaultFormatters) f.StringVar(&c.providerType, "p", "", "the provider type eg ec2, openstack") f.StringVar(&c.metadataDir, "d", "", "directory where metadata files are found") f.StringVar(&c.series, "s", "", "the series for which to validate (overrides env config series)") @@ -127,10 +130,10 @@ func (c *validateToolsMetadataCommand) Init(args []string) error { if c.providerType != "" { if c.region == "" { - return fmt.Errorf("region required if provider type is specified") + return errors.Errorf("region required if provider type is specified") } if c.metadataDir == "" { - return fmt.Errorf("metadata directory required if provider type is specified") + return errors.Errorf("metadata directory required if provider type is specified") } } if c.exactVersion == "current" { @@ -153,7 +156,7 @@ if err == nil { mdLookup, ok := environ.(simplestreams.MetadataValidator) if !ok { - return fmt.Errorf("%s provider does not support tools metadata validation", environ.Config().Type()) + return errors.Errorf("%s provider does not support tools metadata validation", environ.Config().Type()) } params, err = mdLookup.MetadataLookupParams(c.region) if err != nil { @@ -167,9 +170,7 @@ if c.metadataDir == "" { return err } - params = &simplestreams.MetadataLookupParams{ - Architectures: arch.AllSupportedArches, - } + params = &simplestreams.MetadataLookupParams{} } } else { prov, err := environs.Provider(c.providerType) @@ -178,7 +179,7 @@ } mdLookup, ok := prov.(simplestreams.MetadataValidator) if !ok { - return fmt.Errorf("%s provider does not support tools metadata validation", c.providerType) + return errors.Errorf("%s provider does not support tools metadata validation", c.providerType) } params, err = mdLookup.MetadataLookupParams(c.region) if err != nil { @@ -186,6 +187,10 @@ } } + if len(params.Architectures) == 0 { + params.Architectures = arch.AllSupportedArches + } + if c.series != "" { params.Series = c.series } @@ -218,8 +223,9 @@ metadata := map[string]interface{}{ "Resolve Metadata": *resolveInfo, } - if metadataYaml, yamlErr := cmd.FormatYaml(metadata); yamlErr == nil { - err = fmt.Errorf("%v\n%v", err, string(metadataYaml)) + buff := &bytes.Buffer{} + if yamlErr := cmd.FormatYaml(buff, metadata); yamlErr == nil { + err = errors.Errorf("%v\n%v", err, buff.String()) } } return err @@ -239,7 +245,7 @@ sources = append(sources, fmt.Sprintf("- %s (%s)", s.Description(), url)) } } - return fmt.Errorf("no matching tools using sources:\n%s", strings.Join(sources, "\n")) + return errors.Errorf("no matching tools using sources:\n%s", strings.Join(sources, "\n")) } return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/validatetoolsmetadata_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/validatetoolsmetadata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-metadata/validatetoolsmetadata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-metadata/validatetoolsmetadata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ "github.com/juju/cmd" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" "gopkg.in/amz.v3/aws" gc "gopkg.in/check.v1" @@ -79,7 +80,7 @@ func (s *ValidateToolsMetadataSuite) makeLocalMetadata(c *gc.C, stream, version, region, series, endpoint string) error { tm := []*tools.ToolsMetadata{{ Version: version, - Arch: "amd64", + Arch: arch.HostArch(), Release: series, }} targetStorage, err := filestorage.NewFileStorageWriter(s.metadataDir) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-upgrade-mongo/upgrade.go juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-upgrade-mongo/upgrade.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/plugins/juju-upgrade-mongo/upgrade.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/plugins/juju-upgrade-mongo/upgrade.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,10 +15,10 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/replicaset" "github.com/juju/utils" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/api/highavailability" "github.com/juju/juju/apiserver/params" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/supercommand.go juju-core-2.0.0/src/github.com/juju/juju/cmd/supercommand.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/supercommand.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/supercommand.go 2016-10-13 14:31:49.000000000 +0000 @@ -61,4 +61,5 @@ func runNotifier(name string) { logger.Infof("running %s [%s %s %s]", name, jujuversion.Current, runtime.Compiler, runtime.Version()) + logger.Debugf(" args: %#v", os.Args) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/testing/package_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/testing/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/testing/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/testing/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/testing/prompt.go juju-core-2.0.0/src/github.com/juju/juju/cmd/testing/prompt.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/testing/prompt.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/testing/prompt.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,220 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + "io" + "strings" + + "github.com/juju/errors" + "github.com/juju/loggo" + gc "gopkg.in/check.v1" +) + +var logger = loggo.GetLogger("juju.cmd.testing") + +// NewSeqPrompter returns a prompter that can be used to check a sequence of +// IO interactions. Expected input from the user is marked with the +// given user input marker (for example a distinctive unicode character +// that will not occur in the rest of the text) and runs to the end of a +// line. +// +// All output text in between user input is treated as regular expressions. +// +// As a special case, if an input marker is followed only by a single input +// marker on that line, the checker will cause io.EOF to be returned for +// that prompt. +// +// The returned SeqPrompter wraps a Prompter and checks that each +// read and write corresponds to the expected action in the sequence. +// +// After all interaction is done, CheckDone or AssertDone should be called to +// check that no more interactions are expected. +// +// Any failures will result in the test failing. +// +// For example given the prompter created with: +// +// checker := NewSeqPrompter(c, "»", `What is your name: »Bob +// And your age: »148 +// You're .* old, Bob! +// `) +// +// The following code will pass the checker: +// +// fmt.Fprintf(checker, "What is your name: ") +// buf := make([]byte, 100) +// n, _ := checker.Read(buf) +// name := strings.TrimSpace(string(buf[0:n])) +// fmt.Fprintf(checker, "And your age: ") +// n, _ = checker.Read(buf) +// age, err := strconv.Atoi(strings.TrimSpace(string(buf[0:n]))) +// c.Assert(err, gc.IsNil) +// if age > 90 { +// fmt.Fprintf(checker, "You're very old, %s!\n", name) +// } +// checker.CheckDone() +func NewSeqPrompter(c *gc.C, userInputMarker, text string) *SeqPrompter { + p := &SeqPrompter{ + c: c, + } + for { + i := strings.Index(text, userInputMarker) + if i == -1 { + p.finalText = text + break + } + prompt := text[0:i] + text = text[i+len(userInputMarker):] + endLine := strings.Index(text, "\n") + if endLine == -1 { + c.Errorf("no newline found after expected input %q", text) + } + reply := text[0 : endLine+1] + if reply[0:len(reply)-1] == userInputMarker { + // EOF line. + reply = "" + } + text = text[endLine+1:] + if prompt == "" && len(p.ios) > 0 { + // Combine multiple contiguous inputs together. + p.ios[len(p.ios)-1].reply += reply + } else { + p.ios = append(p.ios, ioInteraction{ + prompt: prompt, + reply: reply, + }) + } + } + p.Prompter = NewPrompter(p.prompt) + return p +} + +type SeqPrompter struct { + *Prompter + c *gc.C + ios []ioInteraction + finalText string + failed bool +} + +type ioInteraction struct { + prompt string + reply string +} + +func (p *SeqPrompter) prompt(text string) (string, error) { + if p.failed { + return "", errors.New("prompter failed") + } + if len(p.ios) == 0 { + p.c.Errorf("unexpected prompt %q; expected none", text) + return "", errors.New("unexpected prompt") + } + if !p.c.Check(text, gc.Matches, p.ios[0].prompt) { + p.failed = true + return "", errors.Errorf("unexpected prompt %q; expected %q", text, p.ios[0].prompt) + } + reply := p.ios[0].reply + logger.Infof("prompt %q -> %q", text, reply) + p.ios = p.ios[1:] + return reply, nil +} + +// CheckDone asserts that all the expected prompts +// have been printed and all the replies read, and +// reports whether the check succeeded. +func (p *SeqPrompter) CheckDone() bool { + if p.failed { + // No point in doing the details checks if + // a prompt failed earlier - it just makes + // the resulting test failure noisy. + p.c.Errorf("prompter has failed") + return false + } + r := p.c.Check(p.ios, gc.HasLen, 0, gc.Commentf("unused prompts")) + r = p.c.Check(p.HasUnread(), gc.Equals, false, gc.Commentf("some input was not read")) && r + r = p.c.Check(p.Tail(), gc.Matches, p.finalText, gc.Commentf("final text mismatch")) && r + return r +} + +// AssertDone is like CheckDone but aborts the test if +// the check fails. +func (p *SeqPrompter) AssertDone() { + if !p.CheckDone() { + p.c.FailNow() + } +} + +// NewPrompter returns an io.ReadWriter implementation that calls the +// given function every time Read is called after some text has been +// written or if all the previously returned text has been read. The +// function's argument contains all the text printed since the last +// input. The function should return the text that the user is expected +// to type, or an error to return from Read. If it returns an empty string, +// and no error, it will return io.EOF instead. +func NewPrompter(prompt func(string) (string, error)) *Prompter { + return &Prompter{ + prompt: prompt, + } +} + +// Prompter is designed to be used in a cmd.Context to +// check interactive request-response sequences +// using stdin and stdout. +type Prompter struct { + prompt func(string) (string, error) + + written []byte + allWritten []byte + pending []byte + pendingError error +} + +// Tail returns all the text written since the last prompt. +func (p *Prompter) Tail() string { + return string(p.written) +} + +// HasUnread reports whether any input +// from the last prompt remains unread. +func (p *Prompter) HasUnread() bool { + return len(p.pending) != 0 +} + +// Read implements io.Reader. +func (p *Prompter) Read(buf []byte) (int, error) { + if len(p.pending) == 0 && p.pendingError == nil { + s, err := p.prompt(string(p.written)) + if s == "" && err == nil { + err = io.EOF + } + p.written = nil + p.pending = []byte(s) + p.pendingError = err + } + if len(p.pending) > 0 { + n := copy(buf, p.pending) + p.pending = p.pending[n:] + return n, nil + } + if err := p.pendingError; err != nil { + p.pendingError = nil + return 0, err + } + panic("unreachable") +} + +// String returns all the text that has been written to +// the prompter since it was created. +func (p *Prompter) String() string { + return string(p.allWritten) +} + +// Write implements io.Writer. +func (p *Prompter) Write(buf []byte) (int, error) { + p.written = append(p.written, buf...) + p.allWritten = append(p.allWritten, buf...) + return len(buf), nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/testing/prompt_test.go juju-core-2.0.0/src/github.com/juju/juju/cmd/testing/prompt_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/testing/prompt_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/testing/prompt_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,136 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing_test + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + cmdtesting "github.com/juju/juju/cmd/testing" + "github.com/juju/testing" +) + +type prompterSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&prompterSuite{}) + +func (*prompterSuite) TestPrompter(c *gc.C) { + noPrompt := func(p string) (string, error) { + c.Fatalf("unpexected prompt (text %q)", p) + panic("unreachable") + } + promptFn := noPrompt + p := cmdtesting.NewPrompter(func(p string) (string, error) { + return promptFn(p) + }) + + promptText := "hello: " + promptReply := "reply\n" + + fmt.Fprint(p, promptText) + promptFn = func(p string) (string, error) { + c.Assert(p, gc.Equals, promptText) + return promptReply, nil + } + c.Assert(readStr(c, p, 20), gc.Equals, promptReply) + + promptText = "some text\ngoodbye: " + promptReply = "again\n" + fmt.Fprint(p, promptText[0:10]) + fmt.Fprint(p, promptText[10:]) + + c.Assert(readStr(c, p, 3), gc.Equals, promptReply[0:3]) + c.Assert(readStr(c, p, 20), gc.Equals, promptReply[3:]) + + fmt.Fprint(p, "final text\n") + + c.Assert(p.Tail(), gc.Equals, "final text\n") + c.Assert(p.HasUnread(), gc.Equals, false) +} + +func (*prompterSuite) TestUnreadInput(c *gc.C) { + p := cmdtesting.NewPrompter(func(s string) (string, error) { + return "hello world", nil + }) + c.Assert(readStr(c, p, 3), gc.Equals, "hel") + + c.Assert(p.HasUnread(), gc.Equals, true) +} + +func (*prompterSuite) TestError(c *gc.C) { + expectErr := errors.New("something") + p := cmdtesting.NewPrompter(func(s string) (string, error) { + return "", expectErr + }) + buf := make([]byte, 3) + n, err := p.Read(buf) + c.Assert(n, gc.Equals, 0) + c.Assert(err, gc.Equals, expectErr) +} + +func (*prompterSuite) TestSeqPrompter(c *gc.C) { + p := cmdtesting.NewSeqPrompter(c, "»", ` +hello: »reply +some text +goodbye: »again +final +`[1:]) + fmt.Fprint(p, "hello: ") + c.Assert(readStr(c, p, 1), gc.Equals, "r") + c.Assert(readStr(c, p, 20), gc.Equals, "eply\n") + fmt.Fprint(p, "some text\n") + fmt.Fprint(p, "goodbye: ") + c.Assert(readStr(c, p, 20), gc.Equals, "again\n") + fmt.Fprint(p, "final\n") + p.AssertDone() +} + +func (*prompterSuite) TestSeqPrompterEOF(c *gc.C) { + p := cmdtesting.NewSeqPrompter(c, "»", ` +hello: »» +final +`[1:]) + fmt.Fprint(p, "hello: ") + n, err := p.Read(make([]byte, 10)) + c.Assert(n, gc.Equals, 0) + c.Assert(err, gc.Equals, io.EOF) + fmt.Fprint(p, "final\n") + p.AssertDone() +} + +func (*prompterSuite) TestNewIOChecker(c *gc.C) { + checker := cmdtesting.NewSeqPrompter(c, "»", `What is your name: »Bob +»more +And your age: »148 +You're .* old, Bob +more! +`) + fmt.Fprintf(checker, "What is your name: ") + buf := make([]byte, 100) + n, _ := checker.Read(buf) + name := strings.TrimSpace(string(buf[0:n])) + fmt.Fprintf(checker, "And your age: ") + n, _ = checker.Read(buf) + age, err := strconv.Atoi(strings.TrimSpace(string(buf[0:n]))) + c.Assert(err, gc.IsNil) + if age > 90 { + fmt.Fprintf(checker, "You're very old, %s!\n", name) + } + checker.CheckDone() +} + +func readStr(c *gc.C, r io.Reader, nb int) string { + buf := make([]byte, nb) + n, err := r.Read(buf) + c.Assert(err, jc.ErrorIsNil) + return string(buf[0:n]) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/cmd/testing/testing.go juju-core-2.0.0/src/github.com/juju/juju/cmd/testing/testing.go --- juju-core-2.0~beta15/src/github.com/juju/juju/cmd/testing/testing.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/cmd/testing/testing.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,9 +13,9 @@ "os/exec" "github.com/juju/cmd" + "github.com/juju/gnuflag" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/gnuflag" "github.com/juju/juju/juju/osenv" "github.com/juju/juju/provider/dummy" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/component/all/resource.go juju-core-2.0.0/src/github.com/juju/juju/component/all/resource.go --- juju-core-2.0~beta15/src/github.com/juju/juju/component/all/resource.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/component/all/resource.go 2016-10-13 14:31:49.000000000 +0000 @@ -53,6 +53,9 @@ // for the component in a "juju" command context. func (r resources) registerForClient() error { r.registerPublicCommands() + + // needed for help-tool + r.registerHookContextCommands() return nil } @@ -80,7 +83,7 @@ }) } -// resourcesApiClient adds a Close() method to the resources public API client. +// resourcesAPIClient adds a Close() method to the resources public API client. type resourcesAPIClient struct { *client.Client closeConnFunc func() error @@ -118,16 +121,16 @@ return } - charmcmd.RegisterSubCommand(func(spec charmcmd.CharmstoreSpec) jujucmd.Command { - base := charmcmd.NewCommandBase(spec) - resBase := &resourceadapters.CharmCmdBase{base} - return cmd.NewListCharmResourcesCommand(resBase) - }) + charmcmd.RegisterSubCommand(cmd.NewListCharmResourcesCommand()) commands.RegisterEnvCommand(func() modelcmd.ModelCommand { return cmd.NewUploadCommand(cmd.UploadDeps{ NewClient: func(c *cmd.UploadCommand) (cmd.UploadClient, error) { - return resourceadapters.NewAPIClient(c.NewAPIRoot) + apiRoot, err := c.NewAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + return resourceadapters.NewAPIClient(apiRoot) }, OpenResource: func(s string) (cmd.ReadSeekCloser, error) { return os.Open(s) @@ -139,7 +142,11 @@ commands.RegisterEnvCommand(func() modelcmd.ModelCommand { return cmd.NewShowServiceCommand(cmd.ShowServiceDeps{ NewClient: func(c *cmd.ShowServiceCommand) (cmd.ShowServiceClient, error) { - return resourceadapters.NewAPIClient(c.NewAPIRoot) + apiRoot, err := c.NewAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + return resourceadapters.NewAPIClient(apiRoot) }, }) }) @@ -169,7 +176,7 @@ r.registerHookContextFacade() } -func (c resources) registerHookContextCommands() { +func (r resources) registerHookContextCommands() { if markRegistered(resource.ComponentName, "hook-context-commands") == false { return } @@ -181,11 +188,7 @@ if err != nil { return nil, errors.Trace(err) } - typedCtx, ok := compCtx.(*context.Context) - if !ok { - return nil, errors.Trace(err) - } - cmd, err := contextcmd.NewGetCmd(typedCtx) + cmd, err := contextcmd.NewGetCmd(compCtx) if err != nil { return nil, errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/constraints/constraints.go juju-core-2.0.0/src/github.com/juju/juju/constraints/constraints.go --- juju-core-2.0~beta15/src/github.com/juju/juju/constraints/constraints.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/constraints/constraints.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,9 @@ package constraints import ( + "encoding/json" "fmt" "math" - "reflect" "strconv" "strings" @@ -20,9 +20,11 @@ // The following constants list the supported constraint attribute names, as defined // by the fields in the Value struct. const ( - Arch = "arch" - Container = "container" - CpuCores = "cpu-cores" + Arch = "arch" + Container = "container" + // cpuCores is an alias for Cores. + cpuCores = "cpu-cores" + Cores = "cores" CpuPower = "cpu-power" Mem = "mem" RootDisk = "root-disk" @@ -47,7 +49,7 @@ // CpuCores, if not nil, indicates that a machine must have at least that // number of effective cores available. - CpuCores *uint64 `json:"cpu-cores,omitempty" yaml:"cpu-cores,omitempty"` + CpuCores *uint64 `json:"cores,omitempty" yaml:"cores,omitempty"` // CpuPower, if not nil, indicates that a machine must have at least that // amount of CPU power available, where 100 CpuPower is considered to be @@ -85,29 +87,17 @@ VirtType *string `json:"virt-type,omitempty" yaml:"virt-type,omitempty"` } -// fieldNames records a mapping from the constraint tag to struct field name. -// eg "root-disk" maps to RootDisk. -var fieldNames map[string]string - -func init() { - // Create the fieldNames map by inspecting the json tags for each of - // the Value struct fields. - fieldNames = make(map[string]string) - typ := reflect.TypeOf(Value{}) - for i := 0; i < typ.NumField(); i++ { - field := typ.Field(i) - if tag := field.Tag.Get("json"); tag != "" { - if i := strings.Index(tag, ","); i >= 0 { - tag = tag[0:i] - } - if tag == "-" { - continue - } - if tag != "" { - fieldNames[tag] = field.Name - } - } +var rawAliases = map[string]string{ + cpuCores: Cores, +} + +// resolveAlias returns the canonical representation of the given key, if it'a +// an alias listed in aliases, otherwise it returns the original key. +func resolveAlias(key string) string { + if canonical, ok := rawAliases[key]; ok { + return canonical } + return key } // IsEmpty returns if the given constraints value has no constraints set @@ -120,6 +110,24 @@ return v.Arch != nil && *v.Arch != "" } +// HasMem returns true if the constraints.Value specifies a minimum amount +// of memory. +func (v *Value) HasMem() bool { + return v.Mem != nil && *v.Mem > 0 +} + +// HasCpuPower returns true if the constraints.Value specifies a minimum amount +// of CPU power. +func (v *Value) HasCpuPower() bool { + return v.CpuPower != nil && *v.CpuPower > 0 +} + +// HasCpuCores returns true if the constraints.Value specifies a minimum number +// of CPU cores. +func (v *Value) HasCpuCores() bool { + return v.CpuCores != nil && *v.CpuCores > 0 +} + // HasInstanceType returns true if the constraints.Value specifies an instance type. func (v *Value) HasInstanceType() bool { return v.InstanceType != nil && *v.InstanceType != "" @@ -182,7 +190,7 @@ strs = append(strs, "container="+string(*v.Container)) } if v.CpuCores != nil { - strs = append(strs, "cpu-cores="+uintStr(*v.CpuCores)) + strs = append(strs, "cores="+uintStr(*v.CpuCores)) } if v.CpuPower != nil { strs = append(strs, "cpu-power="+uintStr(*v.CpuPower)) @@ -226,7 +234,7 @@ values = append(values, fmt.Sprintf("Arch: %q", *v.Arch)) } if v.CpuCores != nil { - values = append(values, fmt.Sprintf("CpuCores: %v", *v.CpuCores)) + values = append(values, fmt.Sprintf("Cores: %v", *v.CpuCores)) } if v.CpuPower != nil { values = append(values, fmt.Sprintf("CpuPower: %v", *v.CpuPower)) @@ -270,19 +278,36 @@ // each of which must contain only spaces and name=value pairs. If any // name is specified more than once, an error is returned. func Parse(args ...string) (Value, error) { - cons := Value{} + v, _, err := ParseWithAliases(args...) + return v, err +} + +// ParseWithAliases constructs a constraints.Value from the supplied arguments, each +// of which must contain only spaces and name=value pairs. If any name is +// specified more than once, an error is returned. The aliases map returned +// contains a map of aliases used, and their canonical values. +func ParseWithAliases(args ...string) (cons Value, aliases map[string]string, err error) { + aliases = make(map[string]string) for _, arg := range args { raws := strings.Split(strings.TrimSpace(arg), " ") for _, raw := range raws { if raw == "" { continue } - if err := cons.setRaw(raw); err != nil { - return Value{}, err + name, val, err := splitRaw(raw) + if err != nil { + return Value{}, nil, errors.Trace(err) + } + if canonical, ok := rawAliases[name]; ok { + aliases[name] = canonical + name = canonical + } + if err := cons.setRaw(name, val); err != nil { + return Value{}, aliases, errors.Trace(err) } } } - return cons, nil + return cons, aliases, nil } // Merge returns the effective constraints after merging any given @@ -323,30 +348,29 @@ return v.Target.String() } -func (v *Value) fieldFromTag(tagName string) (reflect.Value, bool) { - fieldName := fieldNames[tagName] - val := reflect.ValueOf(v).Elem().FieldByName(fieldName) - return val, val.IsValid() +// attributesWithValues returns the non-zero attribute tags and their values from the constraint. +func (v *Value) attributesWithValues() map[string]interface{} { + // These can never fail, so we ignore the error for the sake of keeping our + // API clean. I'm sorry (but not that sorry). + b, _ := json.Marshal(v) + result := map[string]interface{}{} + _ = json.Unmarshal(b, &result) + return result } -// attributesWithValues returns the non-zero attribute tags and their values from the constraint. -func (v *Value) attributesWithValues() (result map[string]interface{}) { - result = make(map[string]interface{}) - for fieldTag, fieldName := range fieldNames { - val := reflect.ValueOf(v).Elem().FieldByName(fieldName) - if !val.IsNil() { - result[fieldTag] = val.Elem().Interface() - } - } +func fromAttributes(attr map[string]interface{}) Value { + b, _ := json.Marshal(attr) + var result Value + _ = json.Unmarshal(b, &result) return result } // hasAny returns any attrTags for which the constraint has a non-nil value. func (v *Value) hasAny(attrTags ...string) []string { - attrValues := v.attributesWithValues() - var result []string = []string{} + attributes := v.attributesWithValues() + var result []string for _, tag := range attrTags { - _, ok := attrValues[tag] + _, ok := attributes[resolveAlias(tag)] if ok { result = append(result, tag) } @@ -356,32 +380,31 @@ // without returns a copy of the constraint without values for // the specified attributes. -func (v *Value) without(attrTags ...string) (Value, error) { - result := *v +func (v *Value) without(attrTags ...string) Value { + attributes := v.attributesWithValues() for _, tag := range attrTags { - val, ok := result.fieldFromTag(tag) - if !ok { - return Value{}, errors.Errorf("unknown constraint %q", tag) - } - val.Set(reflect.Zero(val.Type())) + delete(attributes, resolveAlias(tag)) } - return result, nil + return fromAttributes(attributes) } -// setRaw interprets a name=value string and sets the supplied value. -func (v *Value) setRaw(raw string) error { - eq := strings.Index(raw, "=") +func splitRaw(s string) (name, val string, err error) { + eq := strings.Index(s, "=") if eq <= 0 { - return errors.Errorf("malformed constraint %q", raw) + return "", "", errors.Errorf("malformed constraint %q", s) } - name, str := raw[:eq], raw[eq+1:] + return s[:eq], s[eq+1:], nil +} + +// setRaw interprets a name=value string and sets the supplied value. +func (v *Value) setRaw(name, str string) error { var err error - switch name { + switch resolveAlias(name) { case Arch: err = v.setArch(str) case Container: err = v.setContainer(str) - case CpuCores: + case Cores: err = v.setCpuCores(str) case CpuPower: err = v.setCpuPower(str) @@ -417,9 +440,20 @@ if err != nil { return errors.Trace(err) } + canonicals := map[string]string{} for k, val := range values { vstr := fmt.Sprintf("%v", val) - switch k { + key, ok := k.(string) + if !ok { + return errors.Errorf("unexpected non-string key: %#v", k) + } + canonical := resolveAlias(key) + if v, ok := canonicals[canonical]; ok { + // duplicate entry + return errors.Errorf("constraint %q duplicates constraint %q", key, v) + } + canonicals[canonical] = key + switch canonical { case Arch: v.Arch = &vstr case Container: @@ -427,7 +461,7 @@ v.Container = &ctype case InstanceType: v.InstanceType = &vstr - case CpuCores: + case Cores: v.CpuCores, err = parseUint64(vstr) case CpuPower: v.CpuPower, err = parseUint64(vstr) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/constraints/constraints_test.go juju-core-2.0.0/src/github.com/juju/juju/constraints/constraints_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/constraints/constraints_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/constraints/constraints_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,6 @@ "encoding/json" "fmt" "strings" - "testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -17,10 +16,6 @@ "github.com/juju/juju/instance" ) -func TestPackage(t *testing.T) { - gc.TestingT(t) -} - type ConstraintsSuite struct{} var _ = gc.Suite(&ConstraintsSuite{}) @@ -105,36 +100,42 @@ err: `bad "arch" constraint: already set`, }, - // "cpu-cores" in detail. + // "cores" in detail. { - summary: "set cpu-cores empty", - args: []string{"cpu-cores="}, + summary: "set cores empty", + args: []string{"cores="}, + }, { + summary: "set cores zero", + args: []string{"cores=0"}, + }, { + summary: "set cores", + args: []string{"cores=4"}, + }, { + summary: "set nonsense cores 1", + args: []string{"cores=cheese"}, + err: `bad "cores" constraint: must be a non-negative integer`, }, { - summary: "set cpu-cores zero", - args: []string{"cpu-cores=0"}, + summary: "set nonsense cores 2", + args: []string{"cores=-1"}, + err: `bad "cores" constraint: must be a non-negative integer`, }, { + summary: "set nonsense cores 3", + args: []string{"cores=123.45"}, + err: `bad "cores" constraint: must be a non-negative integer`, + }, { + summary: "double set cores together", + args: []string{"cores=128 cores=1"}, + err: `bad "cores" constraint: already set`, + }, { + summary: "double set cores separately", + args: []string{"cores=128", "cores=1"}, + err: `bad "cores" constraint: already set`, + }, + + // "cpu-cores" + { summary: "set cpu-cores", args: []string{"cpu-cores=4"}, - }, { - summary: "set nonsense cpu-cores 1", - args: []string{"cpu-cores=cheese"}, - err: `bad "cpu-cores" constraint: must be a non-negative integer`, - }, { - summary: "set nonsense cpu-cores 2", - args: []string{"cpu-cores=-1"}, - err: `bad "cpu-cores" constraint: must be a non-negative integer`, - }, { - summary: "set nonsense cpu-cores 3", - args: []string{"cpu-cores=123.45"}, - err: `bad "cpu-cores" constraint: must be a non-negative integer`, - }, { - summary: "double set cpu-cores together", - args: []string{"cpu-cores=128 cpu-cores=1"}, - err: `bad "cpu-cores" constraint: already set`, - }, { - summary: "double set cpu-cores separately", - args: []string{"cpu-cores=128", "cpu-cores=1"}, - err: `bad "cpu-cores" constraint: already set`, }, // "cpu-power" in detail. @@ -316,13 +317,13 @@ { summary: "kitchen sink together", args: []string{ - "root-disk=8G mem=2T arch=i386 cpu-cores=4096 cpu-power=9001 container=lxd " + + "root-disk=8G mem=2T arch=i386 cores=4096 cpu-power=9001 container=lxd " + "tags=foo,bar spaces=space1,^space2 instance-type=foo", "virt-type=kvm"}, }, { summary: "kitchen sink separately", args: []string{ - "root-disk=8G", "mem=2T", "cpu-cores=4096", "cpu-power=9001", "arch=armhf", + "root-disk=8G", "mem=2T", "cores=4096", "cpu-power=9001", "arch=armhf", "container=lxd", "tags=foo,bar", "spaces=space1,^space2", "instance-type=foo", "virt-type=kvm"}, }, @@ -347,22 +348,34 @@ } } +func (s *ConstraintsSuite) TestParseAliases(c *gc.C) { + v, aliases, err := constraints.ParseWithAliases("cpu-cores=5 arch=amd64") + c.Assert(err, jc.ErrorIsNil) + c.Assert(v, gc.DeepEquals, constraints.Value{ + CpuCores: uint64p(5), + Arch: strp("amd64"), + }) + c.Assert(aliases, gc.DeepEquals, map[string]string{ + "cpu-cores": "cores", + }) +} + func (s *ConstraintsSuite) TestMerge(c *gc.C) { con1 := constraints.MustParse("arch=amd64 mem=4G") - con2 := constraints.MustParse("cpu-cores=42") + con2 := constraints.MustParse("cores=42") con3 := constraints.MustParse( "root-disk=8G container=lxd spaces=space1,^space2", ) merged, err := constraints.Merge(con1, con2) c.Assert(err, jc.ErrorIsNil) - c.Assert(merged, jc.DeepEquals, constraints.MustParse("arch=amd64 mem=4G cpu-cores=42")) + c.Assert(merged, jc.DeepEquals, constraints.MustParse("arch=amd64 mem=4G cores=42")) merged, err = constraints.Merge(con1) c.Assert(err, jc.ErrorIsNil) c.Assert(merged, jc.DeepEquals, con1) merged, err = constraints.Merge(con1, con2, con3) c.Assert(err, jc.ErrorIsNil) c.Assert(merged, jc.DeepEquals, constraints. - MustParse("arch=amd64 mem=4G cpu-cores=42 root-disk=8G container=lxd spaces=space1,^space2"), + MustParse("arch=amd64 mem=4G cores=42 root-disk=8G container=lxd spaces=space1,^space2"), ) merged, err = constraints.Merge() c.Assert(err, jc.ErrorIsNil) @@ -379,14 +392,14 @@ } func (s *ConstraintsSuite) TestParseMissingTagsAndSpaces(c *gc.C) { - con := constraints.MustParse("arch=amd64 mem=4G cpu-cores=1 root-disk=8G") + con := constraints.MustParse("arch=amd64 mem=4G cores=1 root-disk=8G") c.Check(con.Tags, gc.IsNil) c.Check(con.Spaces, gc.IsNil) } func (s *ConstraintsSuite) TestParseNoTagsNoSpaces(c *gc.C) { con := constraints.MustParse( - "arch=amd64 mem=4G cpu-cores=1 root-disk=8G tags= spaces=", + "arch=amd64 mem=4G cores=1 root-disk=8G tags= spaces=", ) c.Assert(con.Tags, gc.Not(gc.IsNil)) c.Assert(con.Spaces, gc.Not(gc.IsNil)) @@ -444,7 +457,7 @@ c.Check(&con, gc.Not(jc.Satisfies), constraints.IsEmpty) con = constraints.MustParse("cpu-power=") c.Check(&con, gc.Not(jc.Satisfies), constraints.IsEmpty) - con = constraints.MustParse("cpu-cores=") + con = constraints.MustParse("cores=") c.Check(&con, gc.Not(jc.Satisfies), constraints.IsEmpty) con = constraints.MustParse("container=") c.Check(&con, gc.Not(jc.Satisfies), constraints.IsEmpty) @@ -584,7 +597,7 @@ c.Check(cons.HasInstanceType(), jc.IsTrue) } -const initialWithoutCons = "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4 spaces=space1,^space2 tags=foo container=lxd instance-type=bar" +const initialWithoutCons = "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4 spaces=space1,^space2 tags=foo container=lxd instance-type=bar" var withoutTests = []struct { initial string @@ -593,133 +606,71 @@ }{{ initial: initialWithoutCons, without: []string{"root-disk"}, - final: "mem=4G arch=amd64 cpu-power=1000 cpu-cores=4 tags=foo spaces=space1,^space2 container=lxd instance-type=bar", + final: "mem=4G arch=amd64 cpu-power=1000 cores=4 tags=foo spaces=space1,^space2 container=lxd instance-type=bar", }, { initial: initialWithoutCons, without: []string{"mem"}, - final: "root-disk=8G arch=amd64 cpu-power=1000 cpu-cores=4 tags=foo spaces=space1,^space2 container=lxd instance-type=bar", + final: "root-disk=8G arch=amd64 cpu-power=1000 cores=4 tags=foo spaces=space1,^space2 container=lxd instance-type=bar", }, { initial: initialWithoutCons, without: []string{"arch"}, - final: "root-disk=8G mem=4G cpu-power=1000 cpu-cores=4 tags=foo spaces=space1,^space2 container=lxd instance-type=bar", + final: "root-disk=8G mem=4G cpu-power=1000 cores=4 tags=foo spaces=space1,^space2 container=lxd instance-type=bar", }, { initial: initialWithoutCons, without: []string{"cpu-power"}, - final: "root-disk=8G mem=4G arch=amd64 cpu-cores=4 tags=foo spaces=space1,^space2 container=lxd instance-type=bar", + final: "root-disk=8G mem=4G arch=amd64 cores=4 tags=foo spaces=space1,^space2 container=lxd instance-type=bar", }, { initial: initialWithoutCons, - without: []string{"cpu-cores"}, + without: []string{"cores"}, final: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 tags=foo spaces=space1,^space2 container=lxd instance-type=bar", }, { initial: initialWithoutCons, without: []string{"tags"}, - final: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4 spaces=space1,^space2 container=lxd instance-type=bar", + final: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4 spaces=space1,^space2 container=lxd instance-type=bar", }, { initial: initialWithoutCons, without: []string{"spaces"}, - final: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4 tags=foo container=lxd instance-type=bar", + final: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4 tags=foo container=lxd instance-type=bar", }, { initial: initialWithoutCons, without: []string{"container"}, - final: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4 tags=foo spaces=space1,^space2 instance-type=bar", + final: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4 tags=foo spaces=space1,^space2 instance-type=bar", }, { initial: initialWithoutCons, without: []string{"instance-type"}, - final: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4 tags=foo spaces=space1,^space2 container=lxd", + final: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4 tags=foo spaces=space1,^space2 container=lxd", }, { initial: initialWithoutCons, without: []string{"root-disk", "mem", "arch"}, - final: "cpu-power=1000 cpu-cores=4 tags=foo spaces=space1,^space2 container=lxd instance-type=bar", + final: "cpu-power=1000 cores=4 tags=foo spaces=space1,^space2 container=lxd instance-type=bar", }} func (s *ConstraintsSuite) TestWithout(c *gc.C) { for i, t := range withoutTests { c.Logf("test %d", i) initial := constraints.MustParse(t.initial) - final, err := constraints.Without(initial, t.without...) - c.Assert(err, jc.ErrorIsNil) + final := constraints.Without(initial, t.without...) c.Check(final, jc.DeepEquals, constraints.MustParse(t.final)) } } -func (s *ConstraintsSuite) TestWithoutError(c *gc.C) { - cons := constraints.MustParse("mem=4G") - _, err := constraints.Without(cons, "foo") - c.Assert(err, gc.ErrorMatches, `unknown constraint "foo"`) -} - -func (s *ConstraintsSuite) TestAttributesWithValues(c *gc.C) { - for i, consStr := range []string{ - "", - "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4 instance-type=foo tags=foo,bar spaces=space1,^space2", - } { - c.Logf("test %d", i) - cons := constraints.MustParse(consStr) - obtained := constraints.AttributesWithValues(cons) - assertMissing := func(attrName string) { - _, ok := obtained[attrName] - c.Check(ok, jc.IsFalse) - } - if cons.Arch != nil { - c.Check(obtained["arch"], gc.Equals, *cons.Arch) - } else { - assertMissing("arch") - } - if cons.Mem != nil { - c.Check(obtained["mem"], gc.Equals, *cons.Mem) - } else { - assertMissing("mem") - } - if cons.CpuCores != nil { - c.Check(obtained["cpu-cores"], gc.Equals, *cons.CpuCores) - } else { - assertMissing("cpu-cores") - } - if cons.CpuPower != nil { - c.Check(obtained["cpu-power"], gc.Equals, *cons.CpuPower) - } else { - assertMissing("cpu-power") - } - if cons.RootDisk != nil { - c.Check(obtained["root-disk"], gc.Equals, *cons.RootDisk) - } else { - assertMissing("root-disk") - } - if cons.Tags != nil { - c.Check(obtained["tags"], gc.DeepEquals, *cons.Tags) - } else { - assertMissing("tags") - } - if cons.Spaces != nil { - c.Check(obtained["spaces"], gc.DeepEquals, *cons.Spaces) - } else { - assertMissing("spaces") - } - if cons.InstanceType != nil { - c.Check(obtained["instance-type"], gc.Equals, *cons.InstanceType) - } else { - assertMissing("instance-type") - } - } -} - var hasAnyTests = []struct { cons string attrs []string expected []string }{ { - cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 spaces=space1,^space2 cpu-cores=4", + cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 spaces=space1,^space2 cores=4", attrs: []string{"root-disk", "tags", "mem", "spaces"}, expected: []string{"root-disk", "mem", "spaces"}, }, { - cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4", + cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4", attrs: []string{"root-disk", "tags", "mem"}, expected: []string{"root-disk", "mem"}, }, { - cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4", + cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4", attrs: []string{"tags", "spaces"}, expected: []string{}, }, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/constraints/export_test.go juju-core-2.0.0/src/github.com/juju/juju/constraints/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/constraints/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/constraints/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ var WithFallbacks = withFallbacks -func Without(cons Value, attrTags ...string) (Value, error) { +func Without(cons Value, attrTags ...string) Value { return cons.without(attrTags...) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/constraints/package_test.go juju-core-2.0.0/src/github.com/juju/juju/constraints/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/constraints/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/constraints/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package constraints_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/constraints/validation.go juju-core-2.0.0/src/github.com/juju/juju/constraints/validation.go --- juju-core-2.0~beta15/src/github.com/juju/juju/constraints/validation.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/constraints/validation.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,6 @@ import ( "fmt" - "math" "reflect" "github.com/juju/utils/set" @@ -80,7 +79,7 @@ // RegisterVocabulary is defined on Validator. func (v *validator) RegisterVocabulary(attributeName string, allowedValues interface{}) { - v.vocab[attributeName] = convertToSlice(allowedValues) + v.vocab[resolveAlias(attributeName)] = convertToSlice(allowedValues) } var checkIsCollection = func(coll interface{}) { @@ -102,6 +101,7 @@ // UpdateVocabulary is defined on Validator. func (v *validator) UpdateVocabulary(attributeName string, allowedValues interface{}) { + attributeName = resolveAlias(attributeName) // If this attribute is not registered, delegate to RegisterVocabulary() currentValues, ok := v.vocab[attributeName] if !ok { @@ -120,11 +120,19 @@ newValues := convertToSlice(allowedValues) writeUnique(newValues) + v.updateVocabularyFromMap(attributeName, unique) +} + +func (v *validator) updateVocabularyFromMap(attributeName string, valuesMap map[interface{}]bool) { + attributeName = resolveAlias(attributeName) var merged []interface{} - for one, _ := range unique { + for one, _ := range valuesMap { + // TODO (anastasiamac) Because it's coming from the map, the order maybe affected + // and can be unreliable. Not sure how to fix it yet... + // How can we guarantee the order here? merged = append(merged, one) } - v.vocab[attributeName] = merged + v.RegisterVocabulary(attributeName, merged) } // checkConflicts returns an error if the constraints Value contains conflicting attributes. @@ -179,7 +187,7 @@ // checkInVocab returns an error if the attribute value is not allowed by the // vocab which may have been registered for it. func (v *validator) checkInVocab(attributeName string, attributeValue interface{}) error { - validValues, ok := v.vocab[attributeName] + validValues, ok := v.vocab[resolveAlias(attributeName)] if !ok { return nil } @@ -192,40 +200,54 @@ "invalid constraint value: %v=%v\nvalid values are: %v", attributeName, attributeValue, validValues) } -// coerce returns v in a format that allows constraint values to be easily compared. -// Its main purpose is to cast all numeric values to int64 or float64. +// coerce returns v in a format that allows constraint values to be easily +// compared. Its main purpose is to cast all numeric values to float64 (since +// the numbers we compare are generated from json serialization). func coerce(v interface{}) interface{} { - if v != nil { - switch vv := reflect.TypeOf(v); vv.Kind() { - case reflect.String: - return v - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return int64(reflect.ValueOf(v).Int()) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - uval := reflect.ValueOf(v).Uint() - // Just double check the value is in range. - if uval > math.MaxInt64 { - panic(fmt.Errorf("constraint value %v is too large", uval)) - } - return int64(uval) - case reflect.Float32, reflect.Float64: - return float64(reflect.ValueOf(v).Float()) - } + switch val := v.(type) { + case string: + return v + // Yes, these are all the same, however we can't put them into a single + // case, or the value becomes interface{}, which can't be converted to a + // float64. + case int: + return float64(val) + case int8: + return float64(val) + case int16: + return float64(val) + case int32: + return float64(val) + case int64: + return float64(val) + case uint: + return float64(val) + case uint8: + return float64(val) + case uint16: + return float64(val) + case uint32: + return float64(val) + case uint64: + return float64(val) + case float32: + return float64(val) + case float64: + return val } return v } // withFallbacks returns a copy of v with nil values taken from vFallback. func withFallbacks(v Value, vFallback Value) Value { - result := vFallback - for _, fieldName := range fieldNames { - resultVal := reflect.ValueOf(&result).Elem().FieldByName(fieldName) - val := reflect.ValueOf(&v).Elem().FieldByName(fieldName) - if !val.IsNil() { - resultVal.Set(val) + vAttr := v.attributesWithValues() + fbAttr := vFallback.attributesWithValues() + for k, v := range fbAttr { + if _, ok := vAttr[k]; !ok { + vAttr[k] = v } } - return result + return fromAttributes(vAttr) } // Validate is defined on Validator. @@ -259,7 +281,7 @@ // Null out the conflicting consFallback attribute values because // cons takes priority. We can't error here because we // know that aConflicts contains valid attr names. - consFallbackMinusConflicts, _ := consFallback.without(fallbackConflicts...) + consFallbackMinusConflicts := consFallback.without(fallbackConflicts...) // The result is cons with fallbacks coming from any // non conflicting consFallback attributes. return withFallbacks(cons, consFallbackMinusConflicts), nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/constraints/validation_test.go juju-core-2.0.0/src/github.com/juju/juju/constraints/validation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/constraints/validation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/constraints/validation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,6 +17,7 @@ var _ = gc.Suite(&validationSuite{}) var validationTests = []struct { + desc string cons string unsupported []string vocab map[string][]interface{} @@ -25,95 +26,111 @@ err string }{ { - cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4", + desc: "base good", + cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4", }, { - cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4 tags=foo", + desc: "unsupported", + cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4 tags=foo", unsupported: []string{"tags"}, }, { - cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4 instance-type=foo", + desc: "multiple unsupported", + cons: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4 instance-type=foo", unsupported: []string{"cpu-power", "instance-type"}, }, { - // Ambiguous constraint errors take precedence over unsupported errors. - cons: "root-disk=8G mem=4G cpu-cores=4 instance-type=foo", + desc: "Ambiguous constraint errors take precedence over unsupported errors.", + cons: "root-disk=8G mem=4G cores=4 instance-type=foo", reds: []string{"mem", "arch"}, blues: []string{"instance-type"}, - unsupported: []string{"cpu-cores"}, + unsupported: []string{"cores"}, err: `ambiguous constraints: "instance-type" overlaps with "mem"`, }, { - cons: "root-disk=8G mem=4G arch=amd64 cpu-cores=4 instance-type=foo", + desc: "red conflicts", + cons: "root-disk=8G mem=4G arch=amd64 cores=4 instance-type=foo", reds: []string{"mem", "arch"}, - err: "", }, { - cons: "root-disk=8G mem=4G arch=amd64 cpu-cores=4 instance-type=foo", + desc: "blue conflicts", + cons: "root-disk=8G mem=4G arch=amd64 cores=4 instance-type=foo", blues: []string{"mem", "arch"}, - err: "", }, { - cons: "root-disk=8G mem=4G arch=amd64 cpu-cores=4 instance-type=foo", + desc: "red and blue conflicts", + cons: "root-disk=8G mem=4G arch=amd64 cores=4 instance-type=foo", reds: []string{"mem", "arch"}, blues: []string{"instance-type"}, err: `ambiguous constraints: "arch" overlaps with "instance-type"`, }, { - cons: "root-disk=8G mem=4G arch=amd64 cpu-cores=4 instance-type=foo", + desc: "ambiguous constraints red to blue", + cons: "root-disk=8G mem=4G arch=amd64 cores=4 instance-type=foo", reds: []string{"instance-type"}, blues: []string{"mem", "arch"}, err: `ambiguous constraints: "arch" overlaps with "instance-type"`, }, { - cons: "root-disk=8G mem=4G cpu-cores=4 instance-type=foo", + desc: "ambiguous constraints blue to red", + cons: "root-disk=8G mem=4G cores=4 instance-type=foo", reds: []string{"mem", "arch"}, blues: []string{"instance-type"}, err: `ambiguous constraints: "instance-type" overlaps with "mem"`, }, { - cons: "arch=amd64 mem=4G cpu-cores=4", + desc: "arch vocab", + cons: "arch=amd64 mem=4G cores=4", vocab: map[string][]interface{}{"arch": {"amd64", "i386"}}, }, { - cons: "mem=4G cpu-cores=4", - vocab: map[string][]interface{}{"cpu-cores": {2, 4, 8}}, + desc: "cores vocab", + cons: "mem=4G cores=4", + vocab: map[string][]interface{}{"cores": {2, 4, 8}}, }, { + desc: "instance-type vocab", cons: "mem=4G instance-type=foo", vocab: map[string][]interface{}{"instance-type": {"foo", "bar"}}, }, { + desc: "tags vocab", cons: "mem=4G tags=foo,bar", vocab: map[string][]interface{}{"tags": {"foo", "bar", "another"}}, }, { - cons: "arch=i386 mem=4G cpu-cores=4", + desc: "invalid arch vocab", + cons: "arch=i386 mem=4G cores=4", vocab: map[string][]interface{}{"arch": {"amd64"}}, err: "invalid constraint value: arch=i386\nvalid values are:.*", }, { - cons: "mem=4G cpu-cores=5", - vocab: map[string][]interface{}{"cpu-cores": {2, 4, 8}}, - err: "invalid constraint value: cpu-cores=5\nvalid values are:.*", + desc: "invalid cores vocab", + cons: "mem=4G cores=5", + vocab: map[string][]interface{}{"cores": {2, 4, 8}}, + err: "invalid constraint value: cores=5\nvalid values are:.*", }, { + desc: "invalid instance-type vocab", cons: "mem=4G instance-type=foo", vocab: map[string][]interface{}{"instance-type": {"bar"}}, err: "invalid constraint value: instance-type=foo\nvalid values are:.*", }, { + desc: "invalid tags vocab", cons: "mem=4G tags=foo,other", vocab: map[string][]interface{}{"tags": {"foo", "bar", "another"}}, err: "invalid constraint value: tags=other\nvalid values are:.*", }, { + desc: "instance-type and arch", cons: "arch=i386 mem=4G instance-type=foo", vocab: map[string][]interface{}{ "instance-type": {"foo", "bar"}, "arch": {"amd64", "i386"}}, }, { + desc: "virt-type", cons: "virt-type=bar", vocab: map[string][]interface{}{"virt-type": {"bar"}}, }, @@ -121,7 +138,7 @@ func (s *validationSuite) TestValidation(c *gc.C) { for i, t := range validationTests { - c.Logf("test %d", i) + c.Logf("test %d: %s", i, t.desc) validator := constraints.NewValidator() validator.RegisterUnsupported(t.unsupported) validator.RegisterConflicts(t.reds, t.blues) @@ -185,18 +202,18 @@ consFallback: "instance-type=foo", expected: "instance-type=foo", }, { - desc: "cpu-cores with empty fallback", - cons: "cpu-cores=2", - expected: "cpu-cores=2", - }, { - desc: "cpu-cores with ignored fallback", - cons: "cpu-cores=4", - consFallback: "cpu-cores=8", - expected: "cpu-cores=4", - }, { - desc: "cpu-cores from fallback", - consFallback: "cpu-cores=8", - expected: "cpu-cores=8", + desc: "cores with empty fallback", + cons: "cores=2", + expected: "cores=2", + }, { + desc: "cores with ignored fallback", + cons: "cores=4", + consFallback: "cores=8", + expected: "cores=4", + }, { + desc: "cores from fallback", + consFallback: "cores=8", + expected: "cores=8", }, { desc: "cpu-power with empty fallback", cons: "cpu-power=100", @@ -257,68 +274,68 @@ }, { desc: "non-overlapping mix", cons: "root-disk=8G mem=4G arch=amd64", - consFallback: "cpu-power=1000 cpu-cores=4", - expected: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4", + consFallback: "cpu-power=1000 cores=4", + expected: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4", }, { desc: "overlapping mix", cons: "root-disk=8G mem=4G arch=amd64", - consFallback: "cpu-power=1000 cpu-cores=4 mem=8G", - expected: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cpu-cores=4", + consFallback: "cpu-power=1000 cores=4 mem=8G", + expected: "root-disk=8G mem=4G arch=amd64 cpu-power=1000 cores=4", }, { desc: "fallback only, no conflicts", - consFallback: "root-disk=8G cpu-cores=4 instance-type=foo", + consFallback: "root-disk=8G cores=4 instance-type=foo", reds: []string{"mem", "arch"}, blues: []string{"instance-type"}, - expected: "root-disk=8G cpu-cores=4 instance-type=foo", + expected: "root-disk=8G cores=4 instance-type=foo", }, { desc: "no fallback, no conflicts", - cons: "root-disk=8G cpu-cores=4 instance-type=foo", + cons: "root-disk=8G cores=4 instance-type=foo", reds: []string{"mem", "arch"}, blues: []string{"instance-type"}, - expected: "root-disk=8G cpu-cores=4 instance-type=foo", + expected: "root-disk=8G cores=4 instance-type=foo", }, { desc: "conflict value from override", consFallback: "root-disk=8G instance-type=foo", - cons: "root-disk=8G cpu-cores=4 instance-type=bar", + cons: "root-disk=8G cores=4 instance-type=bar", reds: []string{"mem", "arch"}, blues: []string{"instance-type"}, - expected: "root-disk=8G cpu-cores=4 instance-type=bar", + expected: "root-disk=8G cores=4 instance-type=bar", }, { desc: "unsupported attributes ignored", consFallback: "root-disk=8G instance-type=foo", - cons: "root-disk=8G cpu-cores=4 instance-type=bar", + cons: "root-disk=8G cores=4 instance-type=bar", reds: []string{"mem", "arch"}, blues: []string{"instance-type"}, unsupported: []string{"instance-type"}, - expected: "root-disk=8G cpu-cores=4 instance-type=bar", + expected: "root-disk=8G cores=4 instance-type=bar", }, { desc: "red conflict masked from fallback", consFallback: "root-disk=8G mem=4G", - cons: "root-disk=8G cpu-cores=4 instance-type=bar", + cons: "root-disk=8G cores=4 instance-type=bar", reds: []string{"mem", "arch"}, blues: []string{"instance-type"}, - expected: "root-disk=8G cpu-cores=4 instance-type=bar", + expected: "root-disk=8G cores=4 instance-type=bar", }, { desc: "second red conflict masked from fallback", consFallback: "root-disk=8G arch=amd64", - cons: "root-disk=8G cpu-cores=4 instance-type=bar", + cons: "root-disk=8G cores=4 instance-type=bar", reds: []string{"mem", "arch"}, blues: []string{"instance-type"}, - expected: "root-disk=8G cpu-cores=4 instance-type=bar", + expected: "root-disk=8G cores=4 instance-type=bar", }, { desc: "blue conflict masked from fallback", - consFallback: "root-disk=8G cpu-cores=4 instance-type=bar", + consFallback: "root-disk=8G cores=4 instance-type=bar", cons: "root-disk=8G mem=4G", reds: []string{"mem", "arch"}, blues: []string{"instance-type"}, - expected: "root-disk=8G cpu-cores=4 mem=4G", + expected: "root-disk=8G cores=4 mem=4G", }, { desc: "both red conflicts used, blue mased from fallback", - consFallback: "root-disk=8G cpu-cores=4 instance-type=bar", + consFallback: "root-disk=8G cores=4 instance-type=bar", cons: "root-disk=8G arch=amd64 mem=4G", reds: []string{"mem", "arch"}, blues: []string{"instance-type"}, - expected: "root-disk=8G cpu-cores=4 arch=amd64 mem=4G", + expected: "root-disk=8G cores=4 arch=amd64 mem=4G", }, } @@ -340,7 +357,7 @@ validator := constraints.NewValidator() validator.RegisterConflicts([]string{"instance-type"}, []string{"mem"}) consFallback := constraints.MustParse("instance-type=foo mem=4G") - cons := constraints.MustParse("cpu-cores=2") + cons := constraints.MustParse("cores=2") _, err := validator.Merge(consFallback, cons) c.Assert(err, gc.ErrorMatches, `ambiguous constraints: "instance-type" overlaps with "mem"`) _, err = validator.Merge(cons, consFallback) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/container.go juju-core-2.0.0/src/github.com/juju/juju/container/kvm/container.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/container.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/kvm/container.go 2016-10-13 14:31:49.000000000 +0000 @@ -29,8 +29,8 @@ func (c *kvmContainer) Start(params StartParams) error { - logger.Debugf("Synchronise images for %s %s %v", params.Series, params.Arch, params.ImageDownloadUrl) - if err := SyncImages(params.Series, params.Arch, params.ImageDownloadUrl); err != nil { + logger.Debugf("Synchronise images for %s %s %v", params.Series, params.Arch, params.ImageDownloadURL) + if err := SyncImages(params.Series, params.Arch, params.ImageDownloadURL); err != nil { return err } var bridge string diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/instance.go juju-core-2.0.0/src/github.com/juju/juju/container/kvm/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/kvm/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -27,12 +27,12 @@ func (kvm *kvmInstance) Status() instance.InstanceStatus { if kvm.container.IsRunning() { return instance.InstanceStatus{ - Status: status.StatusRunning, + Status: status.Running, Message: "running", } } return instance.InstanceStatus{ - Status: status.StatusStopped, + Status: status.Stopped, Message: "stopped", } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/interface.go juju-core-2.0.0/src/github.com/juju/juju/container/kvm/interface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/interface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/kvm/interface.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,7 +16,7 @@ Memory uint64 // MB CpuCores uint64 RootDisk uint64 // GB - ImageDownloadUrl string + ImageDownloadURL string } // Container represents a virtualized container instance and provides diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/kvm.go juju-core-2.0.0/src/github.com/juju/juju/container/kvm/kvm.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/kvm.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/kvm/kvm.go 2016-10-13 14:31:49.000000000 +0000 @@ -30,7 +30,7 @@ // In order for Juju to be able to create the hardware characteristics of // the kvm machines it creates, we need to be explicit in our definition - // of memory, cpu-cores and root-disk. The defaults here have been + // of memory, cores and root-disk. The defaults here have been // extracted from the uvt-kvm executable. DefaultMemory uint64 = 512 // MB DefaultCpu uint64 = 1 @@ -124,7 +124,7 @@ defer func() { if err != nil { - callback(status.StatusProvisioningError, fmt.Sprintf("Creating container: %v", err), nil) + callback(status.ProvisioningError, fmt.Sprintf("Creating container: %v", err), nil) } }() @@ -159,18 +159,18 @@ // If the Simplestream requested is anything but released, update // our StartParams to request it. if instanceConfig.ImageStream != imagemetadata.ReleasedStream { - startParams.ImageDownloadUrl = imagemetadata.UbuntuCloudImagesURL + "/" + instanceConfig.ImageStream + startParams.ImageDownloadURL = imagemetadata.UbuntuCloudImagesURL + "/" + instanceConfig.ImageStream } var hardware instance.HardwareCharacteristics hardware, err = instance.ParseHardware( - fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cpu-cores=%v", + fmt.Sprintf("arch=%s mem=%vM root-disk=%vG cores=%v", startParams.Arch, startParams.Memory, startParams.RootDisk, startParams.CpuCores)) if err != nil { return nil, nil, errors.Annotate(err, "failed to parse hardware") } - callback(status.StatusAllocating, "Creating container; it might take some time", nil) + callback(status.Allocating, "Creating container; it might take some time", nil) logger.Tracef("create the container, constraints: %v", cons) if err := kvmContainer.Start(startParams); err != nil { err = errors.Annotate(err, "kvm container creation failed") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/kvm_test.go juju-core-2.0.0/src/github.com/juju/juju/container/kvm/kvm_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/kvm_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/kvm/kvm_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -189,7 +189,7 @@ // purely for the side-effect. containertesting.CreateContainerWithMachineConfig(c, s.manager, instanceConfig) - c.Assert(kvm.TestStartParams.ImageDownloadUrl, gc.Equals, "") + c.Assert(kvm.TestStartParams.ImageDownloadURL, gc.Equals, "") } // Test that CreateContainer creates proper startParams. @@ -204,7 +204,7 @@ // purely for the side-effect. containertesting.CreateContainerWithMachineConfig(c, s.manager, instanceConfig) - c.Assert(kvm.TestStartParams.ImageDownloadUrl, gc.Equals, "http://cloud-images.ubuntu.com/daily") + c.Assert(kvm.TestStartParams.ImageDownloadURL, gc.Equals, "http://cloud-images.ubuntu.com/daily") } func (s *KVMSuite) TestStartContainerUtilizesSimpleStream(c *gc.C) { @@ -215,7 +215,7 @@ startParams := kvm.StartParams{ Series: "mocked-series", Arch: "mocked-arch", - ImageDownloadUrl: "mocked-url", + ImageDownloadURL: "mocked-url", } mockedContainer := kvm.NewEmptyKvmContainer() mockedContainer.Start(startParams) @@ -225,7 +225,7 @@ "sync arch=%s release=%s --source=%s", startParams.Arch, startParams.Series, - startParams.ImageDownloadUrl, + startParams.ImageDownloadURL, ), " ", ) @@ -266,14 +266,14 @@ RootDisk: kvm.DefaultDisk, }, }, { - cons: "cpu-cores=4", + cons: "cores=4", expected: kvm.StartParams{ Memory: kvm.DefaultMemory, CpuCores: 4, RootDisk: kvm.DefaultDisk, }, }, { - cons: "cpu-cores=0", + cons: "cores=0", expected: kvm.StartParams{ Memory: kvm.DefaultMemory, CpuCores: kvm.MinCpu, @@ -334,7 +334,7 @@ `tags constraint of "foo,bar" being ignored as not supported`, }, }, { - cons: "mem=4G cpu-cores=4 root-disk=20G arch=armhf cpu-power=100 container=lxd tags=foo,bar", + cons: "mem=4G cores=4 root-disk=20G arch=armhf cpu-power=100 container=lxd tags=foo,bar", expected: kvm.StartParams{ Memory: 4 * 1024, CpuCores: 4, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/live_test.go juju-core-2.0.0/src/github.com/juju/juju/container/kvm/live_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/kvm/live_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/kvm/live_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -88,7 +88,7 @@ func createContainer(c *gc.C, manager container.Manager, machineId string) instance.Instance { machineNonce := "fake-nonce" apiInfo := jujutesting.FakeAPIInfo(machineId) - instanceConfig, err := instancecfg.NewInstanceConfig(machineId, machineNonce, imagemetadata.ReleasedStream, "quantal", true, apiInfo) + instanceConfig, err := instancecfg.NewInstanceConfig(coretesting.ControllerTag, machineId, machineNonce, imagemetadata.ReleasedStream, "quantal", apiInfo) c.Assert(err, jc.ErrorIsNil) network := container.BridgeNetworkConfig("virbr0", 0, nil) @@ -106,7 +106,7 @@ inst, hardware, err := manager.CreateContainer(instanceConfig, constraints.Value{}, "precise", network, nil, callback) c.Assert(err, jc.ErrorIsNil) c.Assert(hardware, gc.NotNil) - expected := fmt.Sprintf("arch=%s cpu-cores=1 mem=512M root-disk=8192M", arch.HostArch()) + expected := fmt.Sprintf("arch=%s cores=1 mem=512M root-disk=8192M", arch.HostArch()) c.Assert(hardware.String(), gc.Equals, expected) return inst } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/lxd/initialisation.go juju-core-2.0.0/src/github.com/juju/juju/container/lxd/initialisation.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/lxd/initialisation.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/lxd/initialisation.go 2016-10-13 14:31:49.000000000 +0000 @@ -1,37 +1,16 @@ // Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. -// +build go1.3 +// +build go1.3, !linux package lxd import ( - "bytes" - "fmt" - "io/ioutil" - "net" - "os" - "os/exec" - "strings" - - "github.com/juju/errors" - "github.com/juju/utils/packaging/config" - "github.com/juju/utils/packaging/manager" "github.com/juju/utils/proxy" "github.com/juju/juju/container" ) -const lxdBridgeFile = "/etc/default/lxd-bridge" - -var requiredPackages = []string{ - "lxd", -} - -var xenialPackages = []string{ - "zfsutils-linux", -} - type containerInitialiser struct { series string } @@ -39,339 +18,17 @@ // containerInitialiser implements container.Initialiser. var _ container.Initialiser = (*containerInitialiser)(nil) -// NewContainerInitialiser returns an instance used to perform the steps -// required to allow a host machine to run a LXC container. +// NewContainerInitialiser - on anything but Linux this is a NOP func NewContainerInitialiser(series string) container.Initialiser { - return &containerInitialiser{series} + return &containerInitialiser{} } -// Initialise is specified on the container.Initialiser interface. +// Initialise - on anything but Linux this is a NOP func (ci *containerInitialiser) Initialise() error { - err := ensureDependencies(ci.series) - if err != nil { - return err - } - - err = configureLXDBridge() - if err != nil { - return err - } - proxies := proxy.DetectProxies() - err = ConfigureLXDProxies(proxies) - if err != nil { - return err - } - - // Well... this will need to change soon once we are passed 17.04 as who - // knows what the series name will be. - if ci.series >= "xenial" { - configureZFS() - } - return nil } -// getPackageManager is a helper function which returns the -// package manager implementation for the current system. -func getPackageManager(series string) (manager.PackageManager, error) { - return manager.NewPackageManager(series) -} - -// getPackagingConfigurer is a helper function which returns the -// packaging configuration manager for the current system. -func getPackagingConfigurer(series string) (config.PackagingConfigurer, error) { - return config.NewPackagingConfigurer(series) -} - -// ConfigureLXDProxies will try to set the lxc config core.proxy_http and core.proxy_https -// configuration values based on the current environment. +// ConfigureLXDProxies - on anything but Linux this is a NOP func ConfigureLXDProxies(proxies proxy.Settings) error { - setter, err := getLXDConfigSetter() - if err != nil { - return errors.Trace(err) - } - return errors.Trace(configureLXDProxies(setter, proxies)) -} - -var getLXDConfigSetter = getConfigSetterConnect - -func getConfigSetterConnect() (configSetter, error) { - return ConnectLocal() -} - -type configSetter interface { - SetConfig(key, value string) error -} - -func configureLXDProxies(setter configSetter, proxies proxy.Settings) error { - err := setter.SetConfig("core.proxy_http", proxies.Http) - if err != nil { - return errors.Trace(err) - } - err = setter.SetConfig("core.proxy_https", proxies.Https) - if err != nil { - return errors.Trace(err) - } - err = setter.SetConfig("core.proxy_ignore_hosts", proxies.NoProxy) - if err != nil { - return errors.Trace(err) - } return nil } - -var execCommand = exec.Command - -var configureZFS = func() { - /* create a 100 GB pool by default (sparse, so it won't actually fill - * that immediately) - */ - output, err := execCommand( - "lxd", - "init", - "--auto", - "--storage-backend", "zfs", - "--storage-pool", "lxd", - "--storage-create-loop", "100", - ).CombinedOutput() - - if err != nil { - logger.Errorf("configuring zfs failed with %s: %s", err, string(output)) - } -} - -var configureLXDBridge = func() error { - f, err := os.OpenFile(lxdBridgeFile, os.O_RDWR, 0777) - if err != nil { - /* We're using an old version of LXD which doesn't have - * lxd-bridge; let's not fail here. - */ - if os.IsNotExist(err) { - logger.Debugf("couldn't find %s, not configuring it", lxdBridgeFile) - return nil - } - return errors.Trace(err) - } - defer f.Close() - - existing, err := ioutil.ReadAll(f) - if err != nil { - return errors.Trace(err) - } - - newBridgeCfg, err := bridgeConfiguration(string(existing)) - if err != nil { - return errors.Trace(err) - } - - if newBridgeCfg == string(existing) { - return nil - } - - _, err = f.Seek(0, 0) - if err != nil { - return errors.Trace(err) - } - _, err = f.WriteString(newBridgeCfg) - if err != nil { - return errors.Trace(err) - } - - /* non-systemd systems don't have the lxd-bridge service, so this always fails */ - _ = exec.Command("service", "lxd-bridge", "restart").Run() - return exec.Command("service", "lxd", "restart").Run() -} - -var interfaceAddrs = func() ([]net.Addr, error) { - return net.InterfaceAddrs() -} - -func editLXDBridgeFile(input string, subnet string) string { - buffer := bytes.Buffer{} - - newValues := map[string]string{ - "USE_LXD_BRIDGE": "true", - "EXISTING_BRIDGE": "", - "LXD_BRIDGE": "lxdbr0", - "LXD_IPV4_ADDR": fmt.Sprintf("10.0.%s.1", subnet), - "LXD_IPV4_NETMASK": "255.255.255.0", - "LXD_IPV4_NETWORK": fmt.Sprintf("10.0.%s.1/24", subnet), - "LXD_IPV4_DHCP_RANGE": fmt.Sprintf("10.0.%s.2,10.0.%s.254", subnet, subnet), - "LXD_IPV4_DHCP_MAX": "253", - "LXD_IPV4_NAT": "true", - "LXD_IPV6_PROXY": "false", - } - found := map[string]bool{} - - for _, line := range strings.Split(input, "\n") { - out := line - - for prefix, value := range newValues { - if strings.HasPrefix(line, prefix+"=") { - out = fmt.Sprintf(`%s="%s"`, prefix, value) - found[prefix] = true - break - } - } - - buffer.WriteString(out) - buffer.WriteString("\n") - } - - for prefix, value := range newValues { - if !found[prefix] { - buffer.WriteString(prefix) - buffer.WriteString("=") - buffer.WriteString(value) - buffer.WriteString("\n") - found[prefix] = true // not necessary but keeps "found" logically consistent - } - } - - return buffer.String() -} - -// ensureDependencies creates a set of install packages using -// apt.GetPreparePackages and runs each set of packages through -// apt.GetInstall. -func ensureDependencies(series string) error { - if series == "precise" { - return fmt.Errorf("LXD is not supported in precise.") - } - - pacman, err := getPackageManager(series) - if err != nil { - return err - } - pacconfer, err := getPackagingConfigurer(series) - if err != nil { - return err - } - - for _, pack := range requiredPackages { - pkg := pack - if config.SeriesRequiresCloudArchiveTools(series) && - pacconfer.IsCloudArchivePackage(pack) { - pkg = strings.Join(pacconfer.ApplyCloudArchiveTarget(pack), " ") - } - - if config.RequiresBackports(series, pack) { - pkg = fmt.Sprintf("--target-release %s-backports %s", series, pkg) - } - - if err := pacman.Install(pkg); err != nil { - return err - } - } - - if series >= "xenial" { - for _, pack := range xenialPackages { - pacman.Install(fmt.Sprintf("--no-install-recommends %s", pack)) - } - } - - return err -} - -// findNextAvailableIPv4Subnet scans the list of interfaces on the machine -// looking for 10.0.0.0/16 networks and returns the next subnet not in -// use, having first detected the highest subnet. The next subnet can -// actually be lower if we overflowed 255 whilst seeking out the next -// unused subnet. If all subnets are in use an error is returned. -// -// TODO(frobware): this is not an ideal solution as it doesn't take -// into account any static routes that may be set up on the machine. -// -// TODO(frobware): this only caters for IPv4 setups. -func findNextAvailableIPv4Subnet() (string, error) { - _, ip10network, err := net.ParseCIDR("10.0.0.0/16") - if err != nil { - return "", errors.Trace(err) - } - - addrs, err := interfaceAddrs() - if err != nil { - return "", errors.Annotatef(err, "cannot get network interface addresses") - } - - max := 0 - usedSubnets := make(map[int]bool) - - for _, address := range addrs { - addr, network, err := net.ParseCIDR(address.String()) - if err != nil { - logger.Debugf("cannot parse address %q: %v (ignoring)", address.String(), err) - continue - } - if !ip10network.Contains(addr) { - logger.Debugf("find available subnet, skipping %q", network.String()) - continue - } - subnet := int(network.IP[2]) - usedSubnets[subnet] = true - if subnet > max { - max = subnet - } - } - - if len(usedSubnets) == 0 { - return "0", nil - } - - for i := 0; i < 256; i++ { - max = (max + 1) % 256 - if _, inUse := usedSubnets[max]; !inUse { - return fmt.Sprintf("%d", max), nil - } - } - - return "", errors.New("could not find unused subnet") -} - -func parseLXDBridgeConfigValues(input string) map[string]string { - values := make(map[string]string) - - for _, line := range strings.Split(input, "\n") { - line = strings.TrimSpace(line) - - if line == "" || strings.HasPrefix(line, "#") || !strings.Contains(line, "=") { - continue - } - - tokens := strings.Split(line, "=") - - if tokens[0] == "" { - continue // no key - } - - value := "" - - if len(tokens) > 1 { - value = tokens[1] - if strings.HasPrefix(value, `"`) && strings.HasSuffix(value, `"`) { - value = strings.Trim(value, `"`) - } - } - - values[tokens[0]] = value - } - return values -} - -// bridgeConfiguration ensures that input has a valid setting for -// LXD_IPV4_ADDR, returning the existing input if is already set, and -// allocating the next available subnet if it is not. -func bridgeConfiguration(input string) (string, error) { - values := parseLXDBridgeConfigValues(input) - ipAddr := net.ParseIP(values["LXD_IPV4_ADDR"]) - - if ipAddr == nil || ipAddr.To4() == nil { - logger.Infof("LXD_IPV4_ADDR is not set; searching for unused subnet") - subnet, err := findNextAvailableIPv4Subnet() - if err != nil { - return "", errors.Trace(err) - } - logger.Infof("setting LXD_IPV4_ADDR=10.0.%s.1", subnet) - return editLXDBridgeFile(input, subnet), nil - } - return input, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/lxd/initialisation_linux.go juju-core-2.0.0/src/github.com/juju/juju/container/lxd/initialisation_linux.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/lxd/initialisation_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/lxd/initialisation_linux.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,413 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "strings" + "syscall" + + "github.com/juju/errors" + "github.com/juju/utils/packaging/config" + "github.com/juju/utils/packaging/manager" + "github.com/juju/utils/proxy" + "github.com/lxc/lxd/shared" + + "github.com/juju/juju/container" + "github.com/juju/juju/tools/lxdclient" +) + +const lxdBridgeFile = "/etc/default/lxd-bridge" + +var requiredPackages = []string{ + "lxd", +} + +var xenialPackages = []string{ + "zfsutils-linux", +} + +type containerInitialiser struct { + series string +} + +// containerInitialiser implements container.Initialiser. +var _ container.Initialiser = (*containerInitialiser)(nil) + +// NewContainerInitialiser returns an instance used to perform the steps +// required to allow a host machine to run a LXC container. +func NewContainerInitialiser(series string) container.Initialiser { + return &containerInitialiser{series} +} + +// Initialise is specified on the container.Initialiser interface. +func (ci *containerInitialiser) Initialise() error { + err := ensureDependencies(ci.series) + if err != nil { + return err + } + + err = configureLXDBridge() + if err != nil { + return err + } + proxies := proxy.DetectProxies() + err = ConfigureLXDProxies(proxies) + if err != nil { + return err + } + + // Well... this will need to change soon once we are passed 17.04 as who + // knows what the series name will be. + if ci.series >= "xenial" { + configureZFS() + } + + return nil +} + +// getPackageManager is a helper function which returns the +// package manager implementation for the current system. +func getPackageManager(series string) (manager.PackageManager, error) { + return manager.NewPackageManager(series) +} + +// getPackagingConfigurer is a helper function which returns the +// packaging configuration manager for the current system. +func getPackagingConfigurer(series string) (config.PackagingConfigurer, error) { + return config.NewPackagingConfigurer(series) +} + +// ConfigureLXDProxies will try to set the lxc config core.proxy_http and core.proxy_https +// configuration values based on the current environment. +func ConfigureLXDProxies(proxies proxy.Settings) error { + setter, err := getLXDConfigSetter() + if err != nil { + return errors.Trace(err) + } + return errors.Trace(configureLXDProxies(setter, proxies)) +} + +var getLXDConfigSetter = getConfigSetterConnect + +func getConfigSetterConnect() (configSetter, error) { + return ConnectLocal() +} + +type configSetter interface { + SetConfig(key, value string) error +} + +func configureLXDProxies(setter configSetter, proxies proxy.Settings) error { + err := setter.SetConfig("core.proxy_http", proxies.Http) + if err != nil { + return errors.Trace(err) + } + err = setter.SetConfig("core.proxy_https", proxies.Https) + if err != nil { + return errors.Trace(err) + } + err = setter.SetConfig("core.proxy_ignore_hosts", proxies.NoProxy) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// df returns the number of free bytes on the file system at the given path +var df = func(path string) (uint64, error) { + // Note: do not use golang.org/x/sys/unix for this, it is + // the best solution but will break the build in s390x + // and introduce cgo dependency lp:1632541 + statfs := syscall.Statfs_t{} + err := syscall.Statfs(path, &statfs) + if err != nil { + return 0, err + } + return uint64(statfs.Bsize) * statfs.Bfree, nil +} + +var configureZFS = func() { + /* create a pool that will occupy 90% of the free disk space + (sparse, so it won't actually fill that immediately) + */ + + // Find 90% of the free disk space + freeBytes, err := df("/") + if err != nil { + logger.Errorf("configuring zfs failed - unable to find file system size: %s", err) + } + GigaBytesToUse := freeBytes * 9 / (10 * 1024 * 1024 * 1024) + + output, err := exec.Command( + "lxd", + "init", + "--auto", + "--storage-backend", "zfs", + "--storage-pool", "lxd", + "--storage-create-loop", fmt.Sprintf("%d", GigaBytesToUse), + ).CombinedOutput() + + if err != nil { + logger.Errorf("configuring zfs failed with %s: %s", err, string(output)) + } +} + +var configureLXDBridge = func() error { + client, err := ConnectLocal() + if err != nil { + return errors.Trace(err) + } + + status, err := client.ServerStatus() + if err != nil { + return errors.Trace(err) + } + + if shared.StringInSlice("network", status.APIExtensions) { + return lxdclient.CreateDefaultBridgeInDefaultProfile(client) + } + + f, err := os.OpenFile(lxdBridgeFile, os.O_RDWR, 0777) + if err != nil { + /* We're using an old version of LXD which doesn't have + * lxd-bridge; let's not fail here. + */ + if os.IsNotExist(err) { + logger.Debugf("couldn't find %s, not configuring it", lxdBridgeFile) + return nil + } + return errors.Trace(err) + } + defer f.Close() + + existing, err := ioutil.ReadAll(f) + if err != nil { + return errors.Trace(err) + } + + newBridgeCfg, err := bridgeConfiguration(string(existing)) + if err != nil { + return errors.Trace(err) + } + + if newBridgeCfg == string(existing) { + return nil + } + + _, err = f.Seek(0, 0) + if err != nil { + return errors.Trace(err) + } + _, err = f.WriteString(newBridgeCfg) + if err != nil { + return errors.Trace(err) + } + + /* non-systemd systems don't have the lxd-bridge service, so this always fails */ + _ = exec.Command("service", "lxd-bridge", "restart").Run() + return exec.Command("service", "lxd", "restart").Run() +} + +var interfaceAddrs = func() ([]net.Addr, error) { + return net.InterfaceAddrs() +} + +func editLXDBridgeFile(input string, subnet string) string { + buffer := bytes.Buffer{} + + newValues := map[string]string{ + "USE_LXD_BRIDGE": "true", + "EXISTING_BRIDGE": "", + "LXD_BRIDGE": "lxdbr0", + "LXD_IPV4_ADDR": fmt.Sprintf("10.0.%s.1", subnet), + "LXD_IPV4_NETMASK": "255.255.255.0", + "LXD_IPV4_NETWORK": fmt.Sprintf("10.0.%s.1/24", subnet), + "LXD_IPV4_DHCP_RANGE": fmt.Sprintf("10.0.%s.2,10.0.%s.254", subnet, subnet), + "LXD_IPV4_DHCP_MAX": "253", + "LXD_IPV4_NAT": "true", + "LXD_IPV6_PROXY": "false", + } + found := map[string]bool{} + + for _, line := range strings.Split(input, "\n") { + out := line + + for prefix, value := range newValues { + if strings.HasPrefix(line, prefix+"=") { + out = fmt.Sprintf(`%s="%s"`, prefix, value) + found[prefix] = true + break + } + } + + buffer.WriteString(out) + buffer.WriteString("\n") + } + + for prefix, value := range newValues { + if !found[prefix] { + buffer.WriteString(prefix) + buffer.WriteString("=") + buffer.WriteString(value) + buffer.WriteString("\n") + found[prefix] = true // not necessary but keeps "found" logically consistent + } + } + + return buffer.String() +} + +// ensureDependencies creates a set of install packages using +// apt.GetPreparePackages and runs each set of packages through +// apt.GetInstall. +func ensureDependencies(series string) error { + if series == "precise" { + return fmt.Errorf("LXD is not supported in precise.") + } + + pacman, err := getPackageManager(series) + if err != nil { + return err + } + pacconfer, err := getPackagingConfigurer(series) + if err != nil { + return err + } + + for _, pack := range requiredPackages { + pkg := pack + if config.SeriesRequiresCloudArchiveTools(series) && + pacconfer.IsCloudArchivePackage(pack) { + pkg = strings.Join(pacconfer.ApplyCloudArchiveTarget(pack), " ") + } + + if config.RequiresBackports(series, pack) { + pkg = fmt.Sprintf("--target-release %s-backports %s", series, pkg) + } + + if err := pacman.Install(pkg); err != nil { + return err + } + } + + if series >= "xenial" { + for _, pack := range xenialPackages { + pacman.Install(fmt.Sprintf("--no-install-recommends %s", pack)) + } + } + + return err +} + +// findNextAvailableIPv4Subnet scans the list of interfaces on the machine +// looking for 10.0.0.0/16 networks and returns the next subnet not in +// use, having first detected the highest subnet. The next subnet can +// actually be lower if we overflowed 255 whilst seeking out the next +// unused subnet. If all subnets are in use an error is returned. +// +// TODO(frobware): this is not an ideal solution as it doesn't take +// into account any static routes that may be set up on the machine. +// +// TODO(frobware): this only caters for IPv4 setups. +func findNextAvailableIPv4Subnet() (string, error) { + _, ip10network, err := net.ParseCIDR("10.0.0.0/16") + if err != nil { + return "", errors.Trace(err) + } + + addrs, err := interfaceAddrs() + if err != nil { + return "", errors.Annotatef(err, "cannot get network interface addresses") + } + + max := 0 + usedSubnets := make(map[int]bool) + + for _, address := range addrs { + addr, network, err := net.ParseCIDR(address.String()) + if err != nil { + logger.Debugf("cannot parse address %q: %v (ignoring)", address.String(), err) + continue + } + if !ip10network.Contains(addr) { + logger.Debugf("find available subnet, skipping %q", network.String()) + continue + } + subnet := int(network.IP[2]) + usedSubnets[subnet] = true + if subnet > max { + max = subnet + } + } + + if len(usedSubnets) == 0 { + return "0", nil + } + + for i := 0; i < 256; i++ { + max = (max + 1) % 256 + if _, inUse := usedSubnets[max]; !inUse { + return fmt.Sprintf("%d", max), nil + } + } + + return "", errors.New("could not find unused subnet") +} + +func parseLXDBridgeConfigValues(input string) map[string]string { + values := make(map[string]string) + + for _, line := range strings.Split(input, "\n") { + line = strings.TrimSpace(line) + + if line == "" || strings.HasPrefix(line, "#") || !strings.Contains(line, "=") { + continue + } + + tokens := strings.Split(line, "=") + + if tokens[0] == "" { + continue // no key + } + + value := "" + + if len(tokens) > 1 { + value = tokens[1] + if strings.HasPrefix(value, `"`) && strings.HasSuffix(value, `"`) { + value = strings.Trim(value, `"`) + } + } + + values[tokens[0]] = value + } + return values +} + +// bridgeConfiguration ensures that input has a valid setting for +// LXD_IPV4_ADDR, returning the existing input if is already set, and +// allocating the next available subnet if it is not. +func bridgeConfiguration(input string) (string, error) { + values := parseLXDBridgeConfigValues(input) + ipAddr := net.ParseIP(values["LXD_IPV4_ADDR"]) + + if ipAddr == nil || ipAddr.To4() == nil { + logger.Infof("LXD_IPV4_ADDR is not set; searching for unused subnet") + subnet, err := findNextAvailableIPv4Subnet() + if err != nil { + return "", errors.Trace(err) + } + logger.Infof("setting LXD_IPV4_ADDR=10.0.%s.1", subnet) + return editLXDBridgeFile(input, subnet), nil + } + return input, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/lxd/initialisation_test.go juju-core-2.0.0/src/github.com/juju/juju/container/lxd/initialisation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/lxd/initialisation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/lxd/initialisation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -1,7 +1,7 @@ // Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. -// +build go1.3 +// +build go1.3, linux package lxd @@ -79,13 +79,13 @@ s.BaseSuite.SetUpTest(c) s.calledCmds = []string{} s.PatchValue(&manager.RunCommandWithRetry, getMockRunCommandWithRetry(&s.calledCmds)) - s.PatchValue(&configureZFS, func() {}) s.PatchValue(&configureLXDBridge, func() error { return nil }) s.PatchValue(&getLXDConfigSetter, func() (configSetter, error) { return &mockConfigSetter{}, nil }) // Fake the lxc executable for all the tests. testing.PatchExecutableAsEchoArgs(c, s, "lxc") + testing.PatchExecutableAsEchoArgs(c, s, "lxd") } func (s *InitialiserSuite) TestLTSSeriesPackages(c *gc.C) { @@ -122,6 +122,21 @@ }) } +func (s *InitialiserSuite) TestLXDInitZFS(c *gc.C) { + // Patch df so it always returns 100GB + df100 := func(path string) (uint64, error) { + return 100 * 1024 * 1024 * 1024, nil + } + s.PatchValue(&df, df100) + + container := NewContainerInitialiser("xenial") + err := container.Initialise() + c.Assert(err, jc.ErrorIsNil) + + testing.AssertEchoArgs(c, "lxd", "init", "--auto", "--storage-backend", + "zfs", "--storage-pool", "lxd", "--storage-create-loop", "90") +} + type mockConfigSetter struct { keys []string values []string diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/lxd/instance.go juju-core-2.0.0/src/github.com/juju/juju/container/lxd/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/lxd/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/lxd/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -38,23 +38,23 @@ // Status implements instance.Instance.Status. func (lxd *lxdInstance) Status() instance.InstanceStatus { - jujuStatus := status.StatusPending + jujuStatus := status.Pending instStatus, err := lxd.client.Status(lxd.id) if err != nil { return instance.InstanceStatus{ - Status: status.StatusEmpty, + Status: status.Empty, Message: fmt.Sprintf("could not get status: %v", err), } } switch instStatus { case lxdclient.StatusStarting, lxdclient.StatusStarted: - jujuStatus = status.StatusAllocating + jujuStatus = status.Allocating case lxdclient.StatusRunning: - jujuStatus = status.StatusRunning + jujuStatus = status.Running case lxdclient.StatusFreezing, lxdclient.StatusFrozen, lxdclient.StatusThawed, lxdclient.StatusStopping, lxdclient.StatusStopped: - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty default: - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty } return instance.InstanceStatus{ Status: jujuStatus, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/lxd/lxd.go juju-core-2.0.0/src/github.com/juju/juju/container/lxd/lxd.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/lxd/lxd.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/lxd/lxd.go 2016-10-13 14:31:49.000000000 +0000 @@ -51,7 +51,7 @@ return nil, errors.Trace(err) } - client, err := lxdclient.Connect(cfg) + client, err := lxdclient.Connect(cfg, false) if err != nil { return nil, errors.Trace(err) } @@ -96,7 +96,7 @@ defer func() { if err != nil { - callback(status.StatusProvisioningError, fmt.Sprintf("Creating container: %v", err), nil) + callback(status.ProvisioningError, fmt.Sprintf("Creating container: %v", err), nil) } }() @@ -111,7 +111,7 @@ err = manager.client.EnsureImageExists(series, lxdclient.DefaultImageSources, func(progress string) { - callback(status.StatusProvisioning, progress, nil) + callback(status.Provisioning, progress, nil) }) if err != nil { err = errors.Annotatef(err, "failed to ensure LXD image") @@ -123,7 +123,9 @@ return nil, nil, errors.Trace(err) } - userData, err := containerinit.CloudInitUserData(instanceConfig, networkConfig) + // Do not pass networkConfig, as we want to directly inject our own ENI + // rather than using cloud-init. + userData, err := containerinit.CloudInitUserData(instanceConfig, nil) if err != nil { return } @@ -143,6 +145,8 @@ return } + // TODO(macgreagoir) This might be dead code. Do we always get + // len(nics) > 0? profiles := []string{} if len(nics) == 0 { @@ -152,22 +156,44 @@ logger.Infof("instance %q configured with %v network devices", name, nics) } + // Push the required /etc/network/interfaces file to the container. + // By pushing this file (which happens after LXD init, and before LXD + // start) we ensure that we get Juju's version of ENI, as opposed to + // the default LXD version, which may assume it can do DHCP over eth0. + // Especially on a multi-nic host, it is possible for MAAS to provide + // DHCP on a different space to that which the container eth0 interface + // will be bridged, or not provide DHCP at all. + eni, err := containerinit.GenerateNetworkConfig(networkConfig) + if err != nil { + err = errors.Annotatef(err, "failed to generate /etc/network/interfaces content") + return + } + spec := lxdclient.InstanceSpec{ Name: name, Image: manager.client.ImageNameForSeries(series), Metadata: metadata, Devices: nics, Profiles: profiles, + Files: lxdclient.Files{ + lxdclient.File{ + Content: []byte(eni), + Path: "/etc/network/interfaces", + GID: 0, + UID: 0, + Mode: 0644, + }, + }, } logger.Infof("starting instance %q (image %q)...", spec.Name, spec.Image) - callback(status.StatusProvisioning, "Starting container", nil) + callback(status.Provisioning, "Starting container", nil) _, err = manager.client.AddInstance(spec) if err != nil { return } - callback(status.StatusRunning, "Container started", nil) + callback(status.Running, "Container started", nil) inst = &lxdInstance{name, manager.client} return } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/container/testing/common.go juju-core-2.0.0/src/github.com/juju/juju/container/testing/common.go --- juju-core-2.0~beta15/src/github.com/juju/juju/container/testing/common.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/container/testing/common.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,13 +17,14 @@ "github.com/juju/juju/instance" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/status" + "github.com/juju/juju/testing" "github.com/juju/juju/tools" ) func MockMachineConfig(machineId string) (*instancecfg.InstanceConfig, error) { apiInfo := jujutesting.FakeAPIInfo(machineId) - instanceConfig, err := instancecfg.NewInstanceConfig(machineId, "fake-nonce", imagemetadata.ReleasedStream, "quantal", true, apiInfo) + instanceConfig, err := instancecfg.NewInstanceConfig(testing.ControllerTag, machineId, "fake-nonce", imagemetadata.ReleasedStream, "quantal", apiInfo) if err != nil { return nil, err } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/CONTRIBUTING.md juju-core-2.0.0/src/github.com/juju/juju/CONTRIBUTING.md --- juju-core-2.0~beta15/src/github.com/juju/juju/CONTRIBUTING.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/CONTRIBUTING.md 2016-10-13 14:31:49.000000000 +0000 @@ -16,12 +16,12 @@ * code review: http://reviews.vapour.ws/ Documentation: -* https://juju.ubuntu.com/docs/ +* https://jujucharms.com/docs/ * overview: http://blog.labix.org/2013/06/25/the-heart-of-juju * [other docs](doc/) Community: -* https://juju.ubuntu.com/community/ +* https://jujucharms.com/community/ * juju: https://lists.ubuntu.com/mailman/listinfo/juju * juju-dev: https://lists.ubuntu.com/mailman/listinfo/juju-dev * [#juju on freenode](http://webchat.freenode.net/?channels=juju) @@ -408,7 +408,7 @@ Take a look at the community page: - https://juju.ubuntu.com/community/ + https://jujucharms.com/community/ juju has two channels on IRC (freenode.net): diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/controller/config.go juju-core-2.0.0/src/github.com/juju/juju/controller/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/controller/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/controller/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,8 +18,8 @@ var logger = loggo.GetLogger("juju.controller") const ( - // ApiPort is the port used for api connections. - ApiPort = "api-port" + // APIPort is the port used for api connections. + APIPort = "api-port" // AuditingEnabled determines whether the controller will record // auditing information. @@ -40,8 +40,26 @@ // IdentityPublicKey sets the public key of the identity manager. IdentityPublicKey = "identity-public-key" - // NumaControlPolicyKey stores the value for this setting - SetNumaControlPolicyKey = "set-numa-control-policy" + // NUMAControlPolicyKey stores the value for this setting + SetNUMAControlPolicyKey = "set-numa-control-policy" + + // AutocertDNSNameKey sets the DNS name of the controller. If a + // client connects to this name, an official certificate will be + // automatically requested. Connecting to any other host name + // will use the usual self-generated certificate. + AutocertDNSNameKey = "autocert-dns-name" + + // AutocertURLKey sets the URL used to obtain official TLS + // certificates when a client connects to the API. By default, + // certficates are obtains from LetsEncrypt. A good value for + // testing is + // "https://acme-staging.api.letsencrypt.org/directory". + AutocertURLKey = "autocert-url" + + // AllowModelAccessKey sets whether the controller will allow users to + // connect to models they have been authorized for even when + // they don't have any access rights to the controller itself. + AllowModelAccessKey = "allow-model-access" // Attribute Defaults @@ -49,27 +67,30 @@ // AuditingEnabled config value. DefaultAuditingEnabled = false - // DefaultNumaControlPolicy should not be used by default. + // DefaultNUMAControlPolicy should not be used by default. // Only use numactl if user specifically requests it - DefaultNumaControlPolicy = false + DefaultNUMAControlPolicy = false // DefaultStatePort is the default port the controller is listening on. DefaultStatePort int = 37017 - // DefaultApiPort is the default port the API server is listening on. + // DefaultAPIPort is the default port the API server is listening on. DefaultAPIPort int = 17070 ) // ControllerOnlyConfigAttributes are attributes which are only relevant // for a controller, never a model. var ControllerOnlyConfigAttributes = []string{ - ApiPort, - StatePort, + AllowModelAccessKey, + APIPort, + AutocertDNSNameKey, + AutocertURLKey, CACertKey, ControllerUUIDKey, - IdentityURL, IdentityPublicKey, - SetNumaControlPolicyKey, + IdentityURL, + SetNUMAControlPolicyKey, + StatePort, } // ControllerOnlyAttribute returns true if the specified attribute name @@ -148,7 +169,7 @@ // APIPort returns the API server port for the environment. func (c Config) APIPort() int { - return c.mustInt(ApiPort) + return c.mustInt(APIPort) } // AuditingEnabled returns whether or not auditing has been enabled @@ -179,6 +200,19 @@ return c.asString(IdentityURL) } +// AutocertURL returns the URL used to obtain official TLS certificates +// when a client connects to the API. See AutocertURLKey +// for more details. +func (c Config) AutocertURL() string { + return c.asString(AutocertURLKey) +} + +// AutocertDNSName returns the DNS name of the controller. +// See AutocertDNSNameKey for more details. +func (c Config) AutocertDNSName() string { + return c.asString(AutocertDNSNameKey) +} + // IdentityPublicKey returns the public key of the identity manager. func (c Config) IdentityPublicKey() *bakery.PublicKey { key := c.asString(IdentityPublicKey) @@ -195,31 +229,42 @@ return &pubKey } -// NumaCtlPreference returns if numactl is preferred. -func (c Config) NumaCtlPreference() bool { - if numa, ok := c[SetNumaControlPolicyKey]; ok { +// NUMACtlPreference returns if numactl is preferred. +func (c Config) NUMACtlPreference() bool { + if numa, ok := c[SetNUMAControlPolicyKey]; ok { return numa.(bool) } - return DefaultNumaControlPolicy + return DefaultNUMAControlPolicy +} + +// AllowModelAccess reports whether users are allowed to access models +// they have been granted permission for even when they can't access +// the controller. +func (c Config) AllowModelAccess() bool { + value, _ := c[AllowModelAccessKey].(bool) + return value } // Validate ensures that config is a valid configuration. func Validate(c Config) error { + if v, ok := c[IdentityPublicKey].(string); ok { + var key bakery.PublicKey + if err := key.UnmarshalText([]byte(v)); err != nil { + return errors.Annotate(err, "invalid identity public key") + } + } + if v, ok := c[IdentityURL].(string); ok { u, err := url.Parse(v) if err != nil { return errors.Annotate(err, "invalid identity URL") } - if u.Scheme != "https" { - return errors.Errorf("URL needs to be https") - } - - } - - if v, ok := c[IdentityPublicKey].(string); ok { - var key bakery.PublicKey - if err := key.UnmarshalText([]byte(v)); err != nil { - return errors.Annotate(err, "invalid identity public key") + // If we've got an identity public key, we allow an HTTP + // scheme for the identity server because we won't need + // to rely on insecure transport to obtain the public + // key. + if _, ok := c[IdentityPublicKey]; !ok && u.Scheme != "https" { + return errors.Errorf("URL needs to be https when %s not provided", IdentityPublicKey) } } @@ -246,16 +291,22 @@ var configChecker = schema.FieldMap(schema.Fields{ AuditingEnabled: schema.Bool(), - ApiPort: schema.ForceInt(), + APIPort: schema.ForceInt(), StatePort: schema.ForceInt(), IdentityURL: schema.String(), IdentityPublicKey: schema.String(), - SetNumaControlPolicyKey: schema.Bool(), + SetNUMAControlPolicyKey: schema.Bool(), + AutocertURLKey: schema.String(), + AutocertDNSNameKey: schema.String(), + AllowModelAccessKey: schema.Bool(), }, schema.Defaults{ - ApiPort: DefaultAPIPort, + APIPort: DefaultAPIPort, AuditingEnabled: DefaultAuditingEnabled, StatePort: DefaultStatePort, IdentityURL: schema.Omit, IdentityPublicKey: schema.Omit, - SetNumaControlPolicyKey: DefaultNumaControlPolicy, + SetNUMAControlPolicyKey: DefaultNUMAControlPolicy, + AutocertURLKey: schema.Omit, + AutocertDNSNameKey: schema.Omit, + AllowModelAccessKey: schema.Omit, }) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/controller/config_test.go juju-core-2.0.0/src/github.com/juju/juju/controller/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/controller/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/controller/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -72,3 +72,64 @@ c.Assert(sanIPs, jc.SameContents, test.sanValues) } } + +var validateTests = []struct { + about string + config controller.Config + expectError string +}{{ + about: "missing CA cert", + expectError: `missing CA certificate`, +}, { + about: "bad CA cert", + config: controller.Config{ + controller.CACertKey: "xxx", + }, + expectError: `bad CA certificate in configuration: no certificates found`, +}, { + about: "bad controller UUID", + config: controller.Config{ + controller.ControllerUUIDKey: "xxx", + controller.CACertKey: testing.CACert, + }, + expectError: `controller-uuid: expected UUID, got string\("xxx"\)`, +}, { + about: "HTTPS identity URL OK", + config: controller.Config{ + controller.IdentityURL: "https://0.1.2.3/foo", + controller.CACertKey: testing.CACert, + }, +}, { + about: "HTTP identity URL requires public key", + config: controller.Config{ + controller.IdentityURL: "http://0.1.2.3/foo", + controller.CACertKey: testing.CACert, + }, + expectError: `URL needs to be https when identity-public-key not provided`, +}, { + about: "HTTP identity URL OK if public key is provided", + config: controller.Config{ + controller.IdentityPublicKey: `o/yOqSNWncMo1GURWuez/dGR30TscmmuIxgjztpoHEY=`, + controller.IdentityURL: "http://0.1.2.3/foo", + controller.CACertKey: testing.CACert, + }, +}, { + about: "invalid identity public key", + config: controller.Config{ + controller.IdentityPublicKey: `xxxx`, + controller.CACertKey: testing.CACert, + }, + expectError: `invalid identity public key: wrong length for base64 key, got 3 want 32`, +}} + +func (s *ConfigSuite) TestValidate(c *gc.C) { + for i, test := range validateTests { + c.Logf("test %d: %v", i, test.about) + err := test.config.Validate() + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + } else { + c.Assert(err, jc.ErrorIsNil) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/controller/modelmanager/createmodel.go juju-core-2.0.0/src/github.com/juju/juju/controller/modelmanager/createmodel.go --- juju-core-2.0~beta15/src/github.com/juju/juju/controller/modelmanager/createmodel.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/controller/modelmanager/createmodel.go 2016-10-13 14:31:49.000000000 +0000 @@ -48,7 +48,6 @@ // The config will be validated with the provider before being returned. func (c ModelConfigCreator) NewModelConfig( cloud environs.CloudSpec, - controllerUUID string, base *config.Config, attrs map[string]interface{}, ) (*config.Config, error) { @@ -89,7 +88,7 @@ } attrs[config.UUIDKey] = uuid.String() } - cfg, err := finalizeConfig(provider, cloud, controllerUUID, attrs) + cfg, err := finalizeConfig(provider, cloud, attrs) if err != nil { return nil, errors.Trace(err) } @@ -176,7 +175,6 @@ var fields []string // For now, all models in a controller must be of the same type. fields = append(fields, config.TypeKey) - fields = append(fields, provider.RestrictedConfigAttributes()...) return fields, nil } @@ -185,7 +183,6 @@ func finalizeConfig( provider environs.EnvironProvider, cloud environs.CloudSpec, - controllerUUID string, attrs map[string]interface{}, ) (*config.Config, error) { cfg, err := config.New(config.UseDefaults, attrs) @@ -193,9 +190,8 @@ return nil, errors.Annotate(err, "creating config from values failed") } cfg, err = provider.PrepareConfig(environs.PrepareConfigParams{ - ControllerUUID: controllerUUID, - Cloud: cloud, - Config: cfg, + Cloud: cloud, + Config: cfg, }) if err != nil { return nil, errors.Annotate(err, "provider config preparation failed") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/controller/modelmanager/createmodel_test.go juju-core-2.0.0/src/github.com/juju/juju/controller/modelmanager/createmodel_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/controller/modelmanager/createmodel_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/controller/modelmanager/createmodel_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,7 +12,6 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/cloud" - "github.com/juju/juju/controller" "github.com/juju/juju/controller/modelmanager" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" @@ -58,17 +57,12 @@ }), ) c.Assert(err, jc.ErrorIsNil) - - // TODO(wallyworld) - we need to separate controller and model schemas - // Remove any remaining controller attributes from the env config. - baseConfig, err = baseConfig.Remove(controller.ControllerOnlyConfigAttributes) - c.Assert(err, jc.ErrorIsNil) s.baseConfig = baseConfig } func (s *ModelConfigCreatorSuite) newModelConfig(attrs map[string]interface{}) (*config.Config, error) { cloudSpec := environs.CloudSpec{Type: "fake"} - return s.creator.NewModelConfig(cloudSpec, coretesting.ModelTag.Id(), s.baseConfig, attrs) + return s.creator.NewModelConfig(cloudSpec, s.baseConfig, attrs) } func (s *ModelConfigCreatorSuite) TestCreateModelValidatesConfig(c *gc.C) { @@ -88,11 +82,10 @@ c.Assert(cfg.AllAttrs(), jc.DeepEquals, expected) s.fake.Stub.CheckCallNames(c, - "RestrictedConfigAttributes", "PrepareConfig", "Validate", ) - validateCall := s.fake.Stub.Calls()[2] + validateCall := s.fake.Stub.Calls()[1] c.Assert(validateCall.Args, gc.HasLen, 2) c.Assert(validateCall.Args[0], gc.Equals, cfg) c.Assert(validateCall.Args[1], gc.IsNil) @@ -107,10 +100,6 @@ key: "type", value: "dummy", errMatch: `specified type "dummy" does not match controller "fake"`, - }, { - key: "restricted", - value: 51, - errMatch: `specified restricted "51" does not match controller "area51"`, }} { c.Logf("%d: %s", i, test.key) _, err := s.newModelConfig(coretesting.Attrs( @@ -222,10 +211,7 @@ expected: []string{"type"}, }, { provider: "ec2", - expected: []string{ - "type", - "vpc-id-force", - }, + expected: []string{"type"}, }} { c.Logf("%d: %s provider", i, test.provider) provider, err := environs.Provider(test.provider) @@ -242,11 +228,6 @@ restrictedConfigAttributes []string } -func (p *fakeProvider) RestrictedConfigAttributes() []string { - p.MethodCall(p, "RestrictedConfigAttributes") - return p.restrictedConfigAttributes -} - func (p *fakeProvider) Validate(cfg, old *config.Config) (*config.Config, error) { p.MethodCall(p, "Validate", cfg, old) return cfg, p.NextErr() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/access.go juju-core-2.0.0/src/github.com/juju/juju/core/description/access.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/access.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/access.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,133 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package description - -import ( - "github.com/juju/errors" - "github.com/juju/schema" -) - -// Access represents a level of access. -type Access string - -const ( - // UndefinedAccess is not a valid access type. It is the value - // used when access is not defined at all. - UndefinedAccess Access = "" - - // Model Permissions - - // ReadAccess allows a user to read information about a permission subject, - // without being able to make any changes. - ReadAccess Access = "read" - - // WriteAccess allows a user to make changes to a permission subject. - WriteAccess Access = "write" - - // AdminAccess allows a user full control over the subject. - AdminAccess Access = "admin" - - // Controller permissions - - // LoginAccess allows a user to log-ing into the subject. - LoginAccess Access = "login" - - // AddModelAccess allows user to add new models in subjects supporting it. - AddModelAccess Access = "addmodel" - - // SuperuserAccess allows user unrestricted permissions in the subject. - SuperuserAccess Access = "superuser" -) - -// Validate returns error if the current is not a valid access level. -func (a Access) Validate() error { - switch a { - case UndefinedAccess, AdminAccess, ReadAccess, WriteAccess, - LoginAccess, AddModelAccess, SuperuserAccess: - return nil - } - return errors.NotValidf("access level %s", a) -} - -// ValidateModelAccess returns error if the passed access is not a valid -// model access level. -func ValidateModelAccess(access Access) error { - switch access { - case ReadAccess, WriteAccess, AdminAccess: - return nil - } - return errors.NotValidf("%q model access", access) -} - -//ValidateControllerAccess returns error if the passed access is not a valid -// controller access level. -func ValidateControllerAccess(access Access) error { - switch access { - case LoginAccess, AddModelAccess, SuperuserAccess: - return nil - } - return errors.NotValidf("%q controller access", access) -} - -// EqualOrGreaterAccessThan returns true if the provided access is equal or -// less than the current. -func (a Access) EqualOrGreaterModelAccessThan(access Access) bool { - if a == access { - return true - } - switch a { - case UndefinedAccess: - return false - case ReadAccess: - return access == UndefinedAccess - case WriteAccess: - return access == ReadAccess || - access == UndefinedAccess - case AdminAccess: - return access == ReadAccess || - access == WriteAccess - } - return false -} - -func (a Access) EqualOrGreaterControllerAccessThan(access Access) bool { - if a == access { - return true - } - switch a { - case UndefinedAccess: - return false - case LoginAccess: - return access == UndefinedAccess - case AddModelAccess: - return access == UndefinedAccess || - access == LoginAccess - case SuperuserAccess: - return access == UndefinedAccess || - access == LoginAccess || - access == AddModelAccess - } - return false -} - -// accessField returns a Checker that accepts a string value only -// and returns a valid Access or an error. -func accessField() schema.Checker { - return accessC{} -} - -type accessC struct{} - -func (c accessC) Coerce(v interface{}, path []string) (interface{}, error) { - s := schema.String() - in, err := s.Coerce(v, path) - if err != nil { - return nil, err - } - access := Access(in.(string)) - if err := access.Validate(); err != nil { - return nil, errors.Trace(err) - } - return access, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/action.go juju-core-2.0.0/src/github.com/juju/juju/core/description/action.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/action.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/action.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,214 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/schema" +) + +type actions struct { + Version int `yaml:"version"` + Actions_ []*action `yaml:"actions"` +} + +type action struct { + Id_ string `yaml:"id"` + Receiver_ string `yaml:"receiver"` + Name_ string `yaml:"name"` + Parameters_ map[string]interface{} `yaml:"parameters"` + Enqueued_ time.Time `yaml:"enqueued"` + // Can't use omitempty with time.Time, it just doesn't work + // (nothing is serialised), so use a pointer in the struct. + Started_ *time.Time `yaml:"started,omitempty"` + Completed_ *time.Time `yaml:"completed,omitempty"` + Status_ string `yaml:"status"` + Message_ string `yaml:"message"` + Results_ map[string]interface{} `yaml:"results"` +} + +// Id implements Action. +func (i *action) Id() string { + return i.Id_ +} + +// Receiver implements Action. +func (i *action) Receiver() string { + return i.Receiver_ +} + +// Name implements Action. +func (i *action) Name() string { + return i.Name_ +} + +// Parameters implements Action. +func (i *action) Parameters() map[string]interface{} { + return i.Parameters_ +} + +// Enqueued implements Action. +func (i *action) Enqueued() time.Time { + return i.Enqueued_ +} + +// Started implements Action. +func (i *action) Started() time.Time { + var zero time.Time + if i.Started_ == nil { + return zero + } + return *i.Started_ +} + +// Completed implements Action. +func (i *action) Completed() time.Time { + var zero time.Time + if i.Completed_ == nil { + return zero + } + return *i.Completed_ +} + +// Status implements Action. +func (i *action) Status() string { + return i.Status_ +} + +// Message implements Action. +func (i *action) Message() string { + return i.Message_ +} + +// Results implements Action. +func (i *action) Results() map[string]interface{} { + return i.Results_ +} + +// ActionArgs is an argument struct used to create a +// new internal action type that supports the Action interface. +type ActionArgs struct { + Id string + Receiver string + Name string + Parameters map[string]interface{} + Enqueued time.Time + Started time.Time + Completed time.Time + Status string + Message string + Results map[string]interface{} +} + +func newAction(args ActionArgs) *action { + action := &action{ + Receiver_: args.Receiver, + Name_: args.Name, + Parameters_: args.Parameters, + Enqueued_: args.Enqueued, + Status_: args.Status, + Message_: args.Message, + Id_: args.Id, + Results_: args.Results, + } + if !args.Started.IsZero() { + value := args.Started + action.Started_ = &value + } + if !args.Completed.IsZero() { + value := args.Completed + action.Completed_ = &value + } + return action +} + +func importActions(source map[string]interface{}) ([]*action, error) { + checker := versionedChecker("actions") + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "actions version schema check failed") + } + valid := coerced.(map[string]interface{}) + + version := int(valid["version"].(int64)) + importFunc, ok := actionDeserializationFuncs[version] + if !ok { + return nil, errors.NotValidf("version %d", version) + } + sourceList := valid["actions"].([]interface{}) + return importActionList(sourceList, importFunc) +} + +func importActionList(sourceList []interface{}, importFunc actionDeserializationFunc) ([]*action, error) { + result := make([]*action, 0, len(sourceList)) + for i, value := range sourceList { + source, ok := value.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("unexpected value for action %d, %T", i, value) + } + action, err := importFunc(source) + if err != nil { + return nil, errors.Annotatef(err, "action %d", i) + } + result = append(result, action) + } + return result, nil +} + +type actionDeserializationFunc func(map[string]interface{}) (*action, error) + +var actionDeserializationFuncs = map[int]actionDeserializationFunc{ + 1: importActionV1, +} + +func importActionV1(source map[string]interface{}) (*action, error) { + fields := schema.Fields{ + "receiver": schema.String(), + "name": schema.String(), + "parameters": schema.StringMap(schema.Any()), + "enqueued": schema.Time(), + "started": schema.Time(), + "completed": schema.Time(), + "status": schema.String(), + "message": schema.String(), + "results": schema.StringMap(schema.Any()), + "id": schema.String(), + } + // Some values don't have to be there. + defaults := schema.Defaults{ + "started": time.Time{}, + "completed": time.Time{}, + } + checker := schema.FieldMap(fields, defaults) + + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "action v1 schema check failed") + } + valid := coerced.(map[string]interface{}) + action := &action{ + Id_: valid["id"].(string), + Receiver_: valid["receiver"].(string), + Name_: valid["name"].(string), + Status_: valid["status"].(string), + Message_: valid["message"].(string), + Parameters_: valid["parameters"].(map[string]interface{}), + Enqueued_: valid["enqueued"].(time.Time).UTC(), + Results_: valid["results"].(map[string]interface{}), + } + + started := valid["started"].(time.Time) + if !started.IsZero() { + started = started.UTC() + action.Started_ = &started + } + completed := valid["completed"].(time.Time) + if !started.IsZero() { + completed = completed.UTC() + action.Completed_ = &completed + } + return action, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/action_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/action_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/action_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/action_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,94 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/yaml.v2" +) + +type ActionSerializationSuite struct { + SliceSerializationSuite +} + +var _ = gc.Suite(&ActionSerializationSuite{}) + +func (s *ActionSerializationSuite) SetUpTest(c *gc.C) { + s.SliceSerializationSuite.SetUpTest(c) + s.importName = "actions" + s.sliceName = "actions" + s.importFunc = func(m map[string]interface{}) (interface{}, error) { + return importActions(m) + } + s.testFields = func(m map[string]interface{}) { + m["actions"] = []interface{}{} + } +} + +func (s *ActionSerializationSuite) TestNewAction(c *gc.C) { + args := ActionArgs{ + Id: "foo", + Receiver: "bar", + Name: "bam", + Parameters: map[string]interface{}{"foo": 3, "bar": "bam"}, + Enqueued: time.Now(), + Started: time.Now(), + Completed: time.Now(), + Status: "happy", + Message: "a message", + Results: map[string]interface{}{"the": 3, "thing": "bam"}, + } + action := newAction(args) + c.Check(action.Id(), gc.Equals, args.Id) + c.Check(action.Receiver(), gc.Equals, args.Receiver) + c.Check(action.Name(), gc.Equals, args.Name) + c.Check(action.Parameters(), jc.DeepEquals, args.Parameters) + c.Check(action.Enqueued(), gc.Equals, args.Enqueued) + c.Check(action.Started(), gc.Equals, args.Started) + c.Check(action.Completed(), gc.Equals, args.Completed) + c.Check(action.Status(), gc.Equals, args.Status) + c.Check(action.Message(), gc.Equals, args.Message) + c.Check(action.Results(), jc.DeepEquals, args.Results) +} + +func (s *ActionSerializationSuite) TestParsingSerializedData(c *gc.C) { + initial := actions{ + Version: 1, + Actions_: []*action{ + newAction(ActionArgs{ + Id: "foo", + Receiver: "bar", + Name: "bam", + Parameters: map[string]interface{}{"foo": 3, "bar": "bam"}, + Enqueued: time.Now().UTC(), + Started: time.Now().UTC(), + Completed: time.Now().UTC(), + Status: "happy", + Message: "a message", + Results: map[string]interface{}{"the": 3, "thing": "bam"}, + }), + newAction(ActionArgs{ + Name: "bing", + Enqueued: time.Now().UTC(), + Parameters: map[string]interface{}{"bop": 4, "beep": "fish"}, + Results: map[string]interface{}{"eggs": 5, "spam": "wow"}, + }), + }, + } + + bytes, err := yaml.Marshal(initial) + c.Assert(err, jc.ErrorIsNil) + + var source map[string]interface{} + err = yaml.Unmarshal(bytes, &source) + c.Assert(err, jc.ErrorIsNil) + + actions, err := importActions(source) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(actions, jc.DeepEquals, initial.Actions_) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/application.go juju-core-2.0.0/src/github.com/juju/juju/core/description/application.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/application.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/application.go 2016-10-13 14:31:49.000000000 +0000 @@ -35,8 +35,7 @@ Status_ *status `yaml:"status"` StatusHistory_ `yaml:"status-history"` - Settings_ map[string]interface{} `yaml:"settings"` - SettingsRefCount_ int `yaml:"settings-refcount"` + Settings_ map[string]interface{} `yaml:"settings"` Leader_ string `yaml:"leader,omitempty"` LeadershipSettings_ map[string]interface{} `yaml:"leadership-settings"` @@ -48,9 +47,8 @@ Annotations_ `yaml:"annotations,omitempty"` - Constraints_ *constraints `yaml:"constraints,omitempty"` - - // Storage Constraints + Constraints_ *constraints `yaml:"constraints,omitempty"` + StorageConstraints_ map[string]*storageconstraint `yaml:"storage-constraints,omitempty"` } // ApplicationArgs is an argument struct used to add an application to the Model. @@ -65,15 +63,15 @@ Exposed bool MinUnits int Settings map[string]interface{} - SettingsRefCount int Leader string LeadershipSettings map[string]interface{} + StorageConstraints map[string]StorageConstraintArgs MetricsCredentials []byte } func newApplication(args ApplicationArgs) *application { creds := base64.StdEncoding.EncodeToString(args.MetricsCredentials) - svc := &application{ + app := &application{ Name_: args.Tag.Id(), Series_: args.Series, Subordinate_: args.Subordinate, @@ -84,14 +82,19 @@ Exposed_: args.Exposed, MinUnits_: args.MinUnits, Settings_: args.Settings, - SettingsRefCount_: args.SettingsRefCount, Leader_: args.Leader, LeadershipSettings_: args.LeadershipSettings, MetricsCredentials_: creds, StatusHistory_: newStatusHistory(), } - svc.setUnits(nil) - return svc + app.setUnits(nil) + if len(args.StorageConstraints) > 0 { + app.StorageConstraints_ = make(map[string]*storageconstraint) + for key, value := range args.StorageConstraints { + app.StorageConstraints_[key] = newStorageConstraint(value) + } + } + return app } // Tag implements Application. @@ -149,11 +152,6 @@ return s.Settings_ } -// SettingsRefCount implements Application. -func (s *application) SettingsRefCount() int { - return s.SettingsRefCount_ -} - // Leader implements Application. func (s *application) Leader() string { return s.Leader_ @@ -164,6 +162,15 @@ return s.LeadershipSettings_ } +// StorageConstraints implements Application. +func (a *application) StorageConstraints() map[string]StorageConstraint { + result := make(map[string]StorageConstraint) + for key, value := range a.StorageConstraints_ { + result[key] = value + } + return result +} + // MetricsCredentials implements Application. func (s *application) MetricsCredentials() []byte { // Here we are explicitly throwing away any decode error. We check that @@ -310,20 +317,21 @@ "min-units": schema.Int(), "status": schema.StringMap(schema.Any()), "settings": schema.StringMap(schema.Any()), - "settings-refcount": schema.Int(), "leader": schema.String(), "leadership-settings": schema.StringMap(schema.Any()), + "storage-constraints": schema.StringMap(schema.StringMap(schema.Any())), "metrics-creds": schema.String(), "units": schema.StringMap(schema.Any()), } defaults := schema.Defaults{ - "subordinate": false, - "force-charm": false, - "exposed": false, - "min-units": int64(0), - "leader": "", - "metrics-creds": "", + "subordinate": false, + "force-charm": false, + "exposed": false, + "min-units": int64(0), + "leader": "", + "metrics-creds": "", + "storage-constraints": schema.Omit, } addAnnotationSchema(fields, defaults) addConstraintsSchema(fields, defaults) @@ -348,7 +356,6 @@ Exposed_: valid["exposed"].(bool), MinUnits_: int(valid["min-units"].(int64)), Settings_: valid["settings"].(map[string]interface{}), - SettingsRefCount_: int(valid["settings-refcount"].(int64)), Leader_: valid["leader"].(string), LeadershipSettings_: valid["leadership-settings"].(map[string]interface{}), StatusHistory_: newStatusHistory(), @@ -366,6 +373,14 @@ result.Constraints_ = constraints } + if constraintsMap, ok := valid["storage-constraints"]; ok { + constraints, err := importStorageConstraints(constraintsMap.(map[string]interface{})) + if err != nil { + return nil, errors.Trace(err) + } + result.StorageConstraints_ = constraints + } + encodedCreds := valid["metrics-creds"].(string) // The model stores the creds encoded, but we want to make sure that // we are storing something that can be decoded. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/application_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/application_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/application_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/application_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -47,8 +47,7 @@ "settings": map[interface{}]interface{}{ "key": "value", }, - "settings-refcount": 1, - "leader": "ubuntu/0", + "leader": "ubuntu/0", "leadership-settings": map[interface{}]interface{}{ "leader": true, }, @@ -62,8 +61,11 @@ } } -func minimalApplication() *application { - s := newApplication(minimalApplicationArgs()) +func minimalApplication(args ...ApplicationArgs) *application { + if len(args) == 0 { + args = []ApplicationArgs{minimalApplicationArgs()} + } + s := newApplication(args[0]) s.SetStatus(minimalStatusArgs()) u := s.AddUnit(minimalUnitArgs()) u.SetAgentStatus(minimalStatusArgs()) @@ -91,8 +93,7 @@ Settings: map[string]interface{}{ "key": "value", }, - SettingsRefCount: 1, - Leader: "ubuntu/0", + Leader: "ubuntu/0", LeadershipSettings: map[string]interface{}{ "leader": true, }, @@ -114,8 +115,7 @@ Settings: map[string]interface{}{ "key": "value", }, - SettingsRefCount: 1, - Leader: "magic/1", + Leader: "magic/1", LeadershipSettings: map[string]interface{}{ "leader": true, }, @@ -134,7 +134,6 @@ c.Assert(application.Exposed(), jc.IsTrue) c.Assert(application.MinUnits(), gc.Equals, 42) c.Assert(application.Settings(), jc.DeepEquals, args.Settings) - c.Assert(application.SettingsRefCount(), gc.Equals, 1) c.Assert(application.Leader(), gc.Equals, "magic/1") c.Assert(application.LeadershipSettings(), jc.DeepEquals, args.LeadershipSettings) c.Assert(application.MetricsCredentials(), jc.DeepEquals, []byte("sekrit")) @@ -205,6 +204,31 @@ c.Assert(application.Constraints(), jc.DeepEquals, newConstraints(args)) } +func (s *ApplicationSerializationSuite) TestStorageConstraints(c *gc.C) { + args := minimalApplicationArgs() + args.StorageConstraints = map[string]StorageConstraintArgs{ + "first": {Pool: "first", Size: 1234, Count: 1}, + "second": {Pool: "second", Size: 4321, Count: 7}, + } + initial := minimalApplication(args) + + application := s.exportImport(c, initial) + + constraints := application.StorageConstraints() + c.Assert(constraints, gc.HasLen, 2) + first, found := constraints["first"] + c.Assert(found, jc.IsTrue) + c.Check(first.Pool(), gc.Equals, "first") + c.Check(first.Size(), gc.Equals, uint64(1234)) + c.Check(first.Count(), gc.Equals, uint64(1)) + + second, found := constraints["second"] + c.Assert(found, jc.IsTrue) + c.Check(second.Pool(), gc.Equals, "second") + c.Check(second.Size(), gc.Equals, uint64(4321)) + c.Check(second.Count(), gc.Equals, uint64(7)) +} + func (s *ApplicationSerializationSuite) TestLeaderValid(c *gc.C) { args := minimalApplicationArgs() args.Leader = "ubuntu/1" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/blockdevice.go juju-core-2.0.0/src/github.com/juju/juju/core/description/blockdevice.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/blockdevice.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/blockdevice.go 2016-10-13 14:31:49.000000000 +0000 @@ -174,7 +174,7 @@ "uuid": schema.String(), "hardware-id": schema.String(), "bus-address": schema.String(), - "size": schema.Uint(), + "size": schema.ForceUint(), "fs-type": schema.String(), "in-use": schema.Bool(), "mount-point": schema.String(), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/cloudimagemetadata.go juju-core-2.0.0/src/github.com/juju/juju/core/description/cloudimagemetadata.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/cloudimagemetadata.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/cloudimagemetadata.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,217 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + "github.com/juju/errors" + "github.com/juju/schema" +) + +type cloudimagemetadataset struct { + Version int `yaml:"version"` + CloudImageMetadata_ []*cloudimagemetadata `yaml:"cloudimagemetadata"` +} + +type cloudimagemetadata struct { + Stream_ string `yaml:"stream"` + Region_ string `yaml:"region"` + Version_ string `yaml:"version"` + Series_ string `yaml:"series"` + Arch_ string `yaml:"arch"` + VirtType_ string `yaml:"virt-type"` + RootStorageType_ string `yaml:"root-storage-type"` + RootStorageSize_ *uint64 `yaml:"root-storage-size,omitempty"` + DateCreated_ int64 `yaml:"date-created"` + Source_ string `yaml:"source"` + Priority_ int `yaml:"priority"` + ImageId_ string `yaml:"image-id"` +} + +// Stream implements CloudImageMetadata. +func (i *cloudimagemetadata) Stream() string { + return i.Stream_ +} + +// Region implements CloudImageMetadata. +func (i *cloudimagemetadata) Region() string { + return i.Region_ +} + +// Version implements CloudImageMetadata. +func (i *cloudimagemetadata) Version() string { + return i.Version_ +} + +// Series implements CloudImageMetadata. +func (i *cloudimagemetadata) Series() string { + return i.Series_ +} + +// Arch implements CloudImageMetadata. +func (i *cloudimagemetadata) Arch() string { + return i.Arch_ +} + +// VirtType implements CloudImageMetadata. +func (i *cloudimagemetadata) VirtType() string { + return i.VirtType_ +} + +// RootStorageType implements CloudImageMetadata. +func (i *cloudimagemetadata) RootStorageType() string { + return i.RootStorageType_ +} + +// RootStorageSize implements CloudImageMetadata. +func (i *cloudimagemetadata) RootStorageSize() (uint64, bool) { + if i.RootStorageSize_ == nil { + return 0, false + } + return *i.RootStorageSize_, true +} + +// DateCreated implements CloudImageMetadata. +func (i *cloudimagemetadata) DateCreated() int64 { + return i.DateCreated_ +} + +// Source implements CloudImageMetadata. +func (i *cloudimagemetadata) Source() string { + return i.Source_ +} + +// Priority implements CloudImageMetadata. +func (i *cloudimagemetadata) Priority() int { + return i.Priority_ +} + +//ImageId implements CloudImageMetadata. +func (i *cloudimagemetadata) ImageId() string { + return i.ImageId_ +} + +// CloudImageMetadataArgs is an argument struct used to create a +// new internal cloudimagemetadata type that supports the CloudImageMetadata interface. +type CloudImageMetadataArgs struct { + Stream string + Region string + Version string + Series string + Arch string + VirtType string + RootStorageType string + RootStorageSize *uint64 + DateCreated int64 + Source string + Priority int + ImageId string +} + +func newCloudImageMetadata(args CloudImageMetadataArgs) *cloudimagemetadata { + cloudimagemetadata := &cloudimagemetadata{ + Stream_: args.Stream, + Region_: args.Region, + Version_: args.Version, + Series_: args.Series, + Arch_: args.Arch, + VirtType_: args.VirtType, + RootStorageType_: args.RootStorageType, + RootStorageSize_: args.RootStorageSize, + DateCreated_: args.DateCreated, + Source_: args.Source, + Priority_: args.Priority, + ImageId_: args.ImageId, + } + return cloudimagemetadata +} + +func importCloudImageMetadata(source map[string]interface{}) ([]*cloudimagemetadata, error) { + checker := versionedChecker("cloudimagemetadata") + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "cloudimagemetadata version schema check failed") + } + valid := coerced.(map[string]interface{}) + + version := int(valid["version"].(int64)) + importFunc, ok := cloudimagemetadataDeserializationFuncs[version] + if !ok { + return nil, errors.NotValidf("version %d", version) + } + sourceList := valid["cloudimagemetadata"].([]interface{}) + return importCloudImageMetadataList(sourceList, importFunc) +} + +func importCloudImageMetadataList(sourceList []interface{}, importFunc cloudimagemetadataDeserializationFunc) ([]*cloudimagemetadata, error) { + result := make([]*cloudimagemetadata, 0, len(sourceList)) + for i, value := range sourceList { + source, ok := value.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("unexpected type for cloudimagemetadata %d, %#v", i, value) + } + cloudimagemetadata, err := importFunc(source) + if err != nil { + return nil, errors.Annotatef(err, "cloudimagemetadata %d", i) + } + result = append(result, cloudimagemetadata) + } + return result, nil +} + +type cloudimagemetadataDeserializationFunc func(map[string]interface{}) (*cloudimagemetadata, error) + +var cloudimagemetadataDeserializationFuncs = map[int]cloudimagemetadataDeserializationFunc{ + 1: importCloudImageMetadataV1, +} + +func importCloudImageMetadataV1(source map[string]interface{}) (*cloudimagemetadata, error) { + fields := schema.Fields{ + "stream": schema.String(), + "region": schema.String(), + "version": schema.String(), + "series": schema.String(), + "arch": schema.String(), + "virt-type": schema.String(), + "root-storage-type": schema.String(), + "root-storage-size": schema.Uint(), + "date-created": schema.Int(), + "source": schema.String(), + "priority": schema.Int(), + "image-id": schema.String(), + } + // Some values don't have to be there. + defaults := schema.Defaults{ + "root-storage-size": schema.Omit, + } + checker := schema.FieldMap(fields, defaults) + + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "cloudimagemetadata v1 schema check failed") + } + valid := coerced.(map[string]interface{}) + _, ok := valid["root-storage-size"] + var pointerSize *uint64 + if ok { + rootStorageSize := valid["root-storage-size"].(uint64) + pointerSize = &rootStorageSize + } + + cloudimagemetadata := &cloudimagemetadata{ + Stream_: valid["stream"].(string), + Region_: valid["region"].(string), + Version_: valid["version"].(string), + Series_: valid["series"].(string), + Arch_: valid["arch"].(string), + VirtType_: valid["virt-type"].(string), + RootStorageType_: valid["root-storage-type"].(string), + RootStorageSize_: pointerSize, + DateCreated_: valid["date-created"].(int64), + Source_: valid["source"].(string), + Priority_: int(valid["priority"].(int64)), + ImageId_: valid["image-id"].(string), + } + + return cloudimagemetadata, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/cloudimagemetadata_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/cloudimagemetadata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/cloudimagemetadata_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/cloudimagemetadata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,101 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/yaml.v2" +) + +type CloudImageMetadataSerializationSuite struct { + SliceSerializationSuite +} + +var _ = gc.Suite(&CloudImageMetadataSerializationSuite{}) + +func (s *CloudImageMetadataSerializationSuite) SetUpTest(c *gc.C) { + s.SliceSerializationSuite.SetUpTest(c) + s.importName = "cloudimagemetadata" + s.sliceName = "cloudimagemetadata" + s.importFunc = func(m map[string]interface{}) (interface{}, error) { + return importCloudImageMetadata(m) + } + s.testFields = func(m map[string]interface{}) { + m["cloudimagemetadata"] = []interface{}{} + } +} + +func (s *CloudImageMetadataSerializationSuite) TestNewCloudImageMetadata(c *gc.C) { + storageSize := uint64(3) + args := CloudImageMetadataArgs{ + Stream: "stream", + Region: "region-test", + Version: "14.04", + Series: "trusty", + Arch: "arch", + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test", + RootStorageSize: &storageSize, + Source: "test", + Priority: 0, + ImageId: "foo", + DateCreated: 0, + } + metadata := newCloudImageMetadata(args) + c.Check(metadata.Stream(), gc.Equals, args.Stream) + c.Check(metadata.Region(), gc.Equals, args.Region) + c.Check(metadata.Version(), gc.Equals, args.Version) + c.Check(metadata.Series(), gc.Equals, args.Series) + c.Check(metadata.Arch(), gc.Equals, args.Arch) + c.Check(metadata.VirtType(), gc.Equals, args.VirtType) + c.Check(metadata.RootStorageType(), gc.Equals, args.RootStorageType) + value, ok := metadata.RootStorageSize() + c.Check(ok, jc.IsTrue) + c.Check(value, gc.Equals, *args.RootStorageSize) + c.Check(metadata.Source(), gc.Equals, args.Source) + c.Check(metadata.Priority(), gc.Equals, args.Priority) + c.Check(metadata.ImageId(), gc.Equals, args.ImageId) + c.Check(metadata.DateCreated(), gc.Equals, args.DateCreated) +} + +func (s *CloudImageMetadataSerializationSuite) TestParsingSerializedData(c *gc.C) { + storageSize := uint64(3) + initial := cloudimagemetadataset{ + Version: 1, + CloudImageMetadata_: []*cloudimagemetadata{ + newCloudImageMetadata(CloudImageMetadataArgs{ + Stream: "stream", + Region: "region-test", + Version: "14.04", + Series: "trusty", + Arch: "arch", + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test", + RootStorageSize: &storageSize, + Source: "test", + Priority: 0, + ImageId: "foo", + DateCreated: 0, + }), + newCloudImageMetadata(CloudImageMetadataArgs{ + Stream: "stream", + Region: "region-test", + Version: "14.04", + }), + }, + } + + bytes, err := yaml.Marshal(initial) + c.Assert(err, jc.ErrorIsNil) + + var source map[string]interface{} + err = yaml.Unmarshal(bytes, &source) + c.Assert(err, jc.ErrorIsNil) + + metadata, err := importCloudImageMetadata(source) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(metadata, jc.DeepEquals, initial.CloudImageMetadata_) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/constraints.go juju-core-2.0.0/src/github.com/juju/juju/core/description/constraints.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/constraints.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/constraints.go 2016-10-13 14:31:49.000000000 +0000 @@ -55,7 +55,7 @@ Architecture_ string `yaml:"architecture,omitempty"` Container_ string `yaml:"container,omitempty"` - CpuCores_ uint64 `yaml:"cpu-cores,omitempty"` + CpuCores_ uint64 `yaml:"cores,omitempty"` CpuPower_ uint64 `yaml:"cpu-power,omitempty"` InstanceType_ string `yaml:"instance-type,omitempty"` Memory_ uint64 `yaml:"memory,omitempty"` @@ -151,11 +151,12 @@ fields := schema.Fields{ "architecture": schema.String(), "container": schema.String(), - "cpu-cores": schema.Uint(), - "cpu-power": schema.Uint(), + "cpu-cores": schema.ForceUint(), + "cores": schema.ForceUint(), + "cpu-power": schema.ForceUint(), "instance-type": schema.String(), - "memory": schema.Uint(), - "root-disk": schema.Uint(), + "memory": schema.ForceUint(), + "root-disk": schema.ForceUint(), "spaces": schema.List(schema.String()), "tags": schema.List(schema.String()), @@ -166,7 +167,8 @@ defaults := schema.Defaults{ "architecture": "", "container": "", - "cpu-cores": uint64(0), + "cpu-cores": schema.Omit, + "cores": schema.Omit, "cpu-power": uint64(0), "instance-type": "", "memory": uint64(0), @@ -183,7 +185,22 @@ if err != nil { return nil, errors.Annotatef(err, "constraints v1 schema check failed") } + valid := coerced.(map[string]interface{}) + _, hasCPU := valid["cpu-cores"] + _, hasCores := valid["cores"] + if hasCPU && hasCores { + return nil, errors.Errorf("can not specify both cores and cores constraints") + } + + var cores uint64 + if hasCPU { + cores = valid["cpu-cores"].(uint64) + } + if hasCores { + cores = valid["cores"].(uint64) + } + // From here we know that the map returned from the schema coercion // contains fields of the right type. @@ -191,7 +208,7 @@ Version: 1, Architecture_: valid["architecture"].(string), Container_: valid["container"].(string), - CpuCores_: valid["cpu-cores"].(uint64), + CpuCores_: cores, CpuPower_: valid["cpu-power"].(uint64), InstanceType_: valid["instance-type"].(string), Memory_: valid["memory"].(uint64), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/filesystem.go juju-core-2.0.0/src/github.com/juju/juju/core/description/filesystem.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/filesystem.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/filesystem.go 2016-10-13 14:31:49.000000000 +0000 @@ -174,6 +174,9 @@ if f.Status_ == nil { return errors.NotValidf("filesystem %q missing status", f.ID_) } + if _, err := f.Binding(); err != nil { + return errors.Wrap(err, errors.NotValidf("filesystem %q binding", f.ID_)) + } return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/interfaces.go juju-core-2.0.0/src/github.com/juju/juju/core/description/interfaces.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/interfaces.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/interfaces.go 2016-10-13 14:31:49.000000000 +0000 @@ -85,6 +85,12 @@ SSHHostKeys() []SSHHostKey AddSSHHostKey(SSHHostKeyArgs) SSHHostKey + CloudImageMetadata() []CloudImageMetadata + AddCloudImageMetadata(CloudImageMetadataArgs) CloudImageMetadata + + Actions() []Action + AddAction(ActionArgs) Action + Sequences() map[string]int SetSequence(name string, value int) @@ -94,6 +100,12 @@ Filesystems() []Filesystem AddFilesystem(FilesystemArgs) Filesystem + Storages() []Storage + AddStorage(StorageArgs) Storage + + StoragePools() []StoragePool + AddStoragePool(StoragePoolArgs) StoragePool + Validate() error } @@ -105,7 +117,7 @@ CreatedBy() names.UserTag DateCreated() time.Time LastConnection() time.Time - Access() Access + Access() string } // Address represents an IP Address of some form. @@ -161,9 +173,6 @@ Containers() []Machine AddContainer(MachineArgs) Machine - // TODO: - // Storage - BlockDevices() []BlockDevice AddBlockDevice(BlockDeviceArgs) BlockDevice @@ -174,10 +183,7 @@ // enough stuff set, like tools, and addresses etc. Validate() error - // reboot doc - // block devices // port docs - // machine filesystems } // OpenedPorts represents a collection of port ranges that are open on a @@ -254,12 +260,12 @@ MinUnits() int Settings() map[string]interface{} - SettingsRefCount() int Leader() string LeadershipSettings() map[string]interface{} MetricsCredentials() []byte + StorageConstraints() map[string]StorageConstraint Units() []Unit AddUnit(UnitArgs) Unit @@ -284,8 +290,6 @@ MeterStatusCode() string MeterStatusInfo() string - // TODO: storage - Tools() AgentTools SetTools(AgentToolsArgs) @@ -306,6 +310,9 @@ AgentStatusHistory() []Status SetAgentStatusHistory([]StatusArgs) + AddPayload(PayloadArgs) Payload + Payloads() []Payload + Validate() error } @@ -392,6 +399,36 @@ Keys() []string } +// CloudImageMetadata represents an IP cloudimagemetadata. +type CloudImageMetadata interface { + Stream() string + Region() string + Version() string + Series() string + Arch() string + VirtType() string + RootStorageType() string + RootStorageSize() (uint64, bool) + DateCreated() int64 + Source() string + Priority() int + ImageId() string +} + +// Action represents an IP action. +type Action interface { + Id() string + Receiver() string + Name() string + Parameters() map[string]interface{} + Enqueued() time.Time + Started() time.Time + Completed() time.Time + Results() map[string]interface{} + Status() string + Message() string +} + // Volume represents a volume (disk, logical volume, etc.) in the model. type Volume interface { HasStatus @@ -453,3 +490,37 @@ MountPoint() string ReadOnly() bool } + +// Storage represents the state of a unit or application-wide storage instance +// in the model. +type Storage interface { + Tag() names.StorageTag + Kind() string + // Owner returns the tag of the application or unit that owns this storage + // instance. + Owner() (names.Tag, error) + Name() string + + Attachments() []names.UnitTag + + Validate() error +} + +// StoragePool represents a named storage pool and its settings. +type StoragePool interface { + Name() string + Provider() string + Attributes() map[string]interface{} +} + +// StorageConstraint repressents the user-specified constraints for +// provisioning storage instances for an application unit. +type StorageConstraint interface { + // Pool is the name of the storage pool from which to provision the + // storage instances. + Pool() string + // Size is the required size of the storage instances, in MiB. + Size() uint64 + // Count is the required number of storage instances. + Count() uint64 +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/ipaddress.go juju-core-2.0.0/src/github.com/juju/juju/core/description/ipaddress.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/ipaddress.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/ipaddress.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,19 +10,19 @@ type ipaddresses struct { Version int `yaml:"version"` - IPAddresses_ []*ipaddress `yaml:"ipaddresses"` + IPAddresses_ []*ipaddress `yaml:"ip-addresses"` } type ipaddress struct { ProviderID_ string `yaml:"provider-id,omitempty"` - DeviceName_ string `yaml:"devicename"` - MachineID_ string `yaml:"machineid"` - SubnetCIDR_ string `yaml:"subnetcidr"` - ConfigMethod_ string `yaml:"configmethod"` + DeviceName_ string `yaml:"device-name"` + MachineID_ string `yaml:"machine-id"` + SubnetCIDR_ string `yaml:"subnet-cidr"` + ConfigMethod_ string `yaml:"config-method"` Value_ string `yaml:"value"` - DNSServers_ []string `yaml:"dnsservers"` - DNSSearchDomains_ []string `yaml:"dnssearchdomains"` - GatewayAddress_ string `yaml:"gatewayaddress"` + DNSServers_ []string `yaml:"dns-servers"` + DNSSearchDomains_ []string `yaml:"dns-search-domains"` + GatewayAddress_ string `yaml:"gateway-address"` } // ProviderID implements IPAddress. @@ -99,10 +99,10 @@ } func importIPAddresses(source map[string]interface{}) ([]*ipaddress, error) { - checker := versionedChecker("ipaddresses") + checker := versionedChecker("ip-addresses") coerced, err := checker.Coerce(source, nil) if err != nil { - return nil, errors.Annotatef(err, "ipaddresses version schema check failed") + return nil, errors.Annotatef(err, "ip-addresses version schema check failed") } valid := coerced.(map[string]interface{}) @@ -111,7 +111,7 @@ if !ok { return nil, errors.NotValidf("version %d", version) } - sourceList := valid["ipaddresses"].([]interface{}) + sourceList := valid["ip-addresses"].([]interface{}) return importIPAddressList(sourceList, importFunc) } @@ -120,11 +120,11 @@ for i, value := range sourceList { source, ok := value.(map[string]interface{}) if !ok { - return nil, errors.Errorf("unexpected value for ipaddress %d, %T", i, value) + return nil, errors.Errorf("unexpected value for ip-address %d, %T", i, value) } ipaddress, err := importFunc(source) if err != nil { - return nil, errors.Annotatef(err, "ipaddress %d", i) + return nil, errors.Annotatef(err, "ip-address %d", i) } result = append(result, ipaddress) } @@ -139,15 +139,15 @@ func importIPAddressV1(source map[string]interface{}) (*ipaddress, error) { fields := schema.Fields{ - "provider-id": schema.String(), - "devicename": schema.String(), - "machineid": schema.String(), - "subnetcidr": schema.String(), - "configmethod": schema.String(), - "value": schema.String(), - "dnsservers": schema.List(schema.String()), - "dnssearchdomains": schema.List(schema.String()), - "gatewayaddress": schema.String(), + "provider-id": schema.String(), + "device-name": schema.String(), + "machine-id": schema.String(), + "subnet-cidr": schema.String(), + "config-method": schema.String(), + "value": schema.String(), + "dns-servers": schema.List(schema.String()), + "dns-search-domains": schema.List(schema.String()), + "gateway-address": schema.String(), } // Some values don't have to be there. defaults := schema.Defaults{ @@ -157,28 +157,28 @@ coerced, err := checker.Coerce(source, nil) if err != nil { - return nil, errors.Annotatef(err, "ipaddress v1 schema check failed") + return nil, errors.Annotatef(err, "ip address v1 schema check failed") } valid := coerced.(map[string]interface{}) - dnsserversInterface := valid["dnsservers"].([]interface{}) + dnsserversInterface := valid["dns-servers"].([]interface{}) dnsservers := make([]string, len(dnsserversInterface)) for i, d := range dnsserversInterface { dnsservers[i] = d.(string) } - dnssearchInterface := valid["dnssearchdomains"].([]interface{}) + dnssearchInterface := valid["dns-search-domains"].([]interface{}) dnssearch := make([]string, len(dnssearchInterface)) for i, d := range dnssearchInterface { dnssearch[i] = d.(string) } return &ipaddress{ ProviderID_: valid["provider-id"].(string), - DeviceName_: valid["devicename"].(string), - MachineID_: valid["machineid"].(string), - SubnetCIDR_: valid["subnetcidr"].(string), - ConfigMethod_: valid["configmethod"].(string), + DeviceName_: valid["device-name"].(string), + MachineID_: valid["machine-id"].(string), + SubnetCIDR_: valid["subnet-cidr"].(string), + ConfigMethod_: valid["config-method"].(string), Value_: valid["value"].(string), DNSServers_: dnsservers, DNSSearchDomains_: dnssearch, - GatewayAddress_: valid["gatewayaddress"].(string), + GatewayAddress_: valid["gateway-address"].(string), }, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/ipaddress_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/ipaddress_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/ipaddress_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/ipaddress_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,13 +17,13 @@ func (s *IPAddressSerializationSuite) SetUpTest(c *gc.C) { s.SliceSerializationSuite.SetUpTest(c) - s.importName = "ipaddresses" - s.sliceName = "ipaddresses" + s.importName = "ip-addresses" + s.sliceName = "ip-addresses" s.importFunc = func(m map[string]interface{}) (interface{}, error) { return importIPAddresses(m) } s.testFields = func(m map[string]interface{}) { - m["ipaddresses"] = []interface{}{} + m["ip-addresses"] = []interface{}{} } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/linklayerdevice.go juju-core-2.0.0/src/github.com/juju/juju/core/description/linklayerdevice.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/linklayerdevice.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/linklayerdevice.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,19 +10,19 @@ type linklayerdevices struct { Version int `yaml:"version"` - LinkLayerDevices_ []*linklayerdevice `yaml:"linklayerdevices"` + LinkLayerDevices_ []*linklayerdevice `yaml:"link-layer-devices"` } type linklayerdevice struct { Name_ string `yaml:"name"` MTU_ uint `yaml:"mtu"` ProviderID_ string `yaml:"provider-id,omitempty"` - MachineID_ string `yaml:"machineid"` + MachineID_ string `yaml:"machine-id"` Type_ string `yaml:"type"` - MACAddress_ string `yaml:"macaddress"` - IsAutoStart_ bool `yaml:"isautostart"` - IsUp_ bool `yaml:"isup"` - ParentName_ string `yaml:"parentname"` + MACAddress_ string `yaml:"mac-address"` + IsAutoStart_ bool `yaml:"is-autostart"` + IsUp_ bool `yaml:"is-up"` + ParentName_ string `yaml:"parent-name"` } // ProviderID implements LinkLayerDevice. @@ -99,10 +99,10 @@ } func importLinkLayerDevices(source map[string]interface{}) ([]*linklayerdevice, error) { - checker := versionedChecker("linklayerdevices") + checker := versionedChecker("link-layer-devices") coerced, err := checker.Coerce(source, nil) if err != nil { - return nil, errors.Annotatef(err, "linklayerdevices version schema check failed") + return nil, errors.Annotatef(err, "link-layer-devices version schema check failed") } valid := coerced.(map[string]interface{}) @@ -111,7 +111,7 @@ if !ok { return nil, errors.NotValidf("version %d", version) } - sourceList := valid["linklayerdevices"].([]interface{}) + sourceList := valid["link-layer-devices"].([]interface{}) return importLinkLayerDeviceList(sourceList, importFunc) } @@ -120,11 +120,11 @@ for i, value := range sourceList { source, ok := value.(map[string]interface{}) if !ok { - return nil, errors.Errorf("unexpected value for linklayerdevice %d, %T", i, value) + return nil, errors.Errorf("unexpected value for link-layer-device %d, %T", i, value) } linklayerdevice, err := importFunc(source) if err != nil { - return nil, errors.Annotatef(err, "linklayerdevice %d", i) + return nil, errors.Annotatef(err, "link-layer-device %d", i) } result = append(result, linklayerdevice) } @@ -139,15 +139,15 @@ func importLinkLayerDeviceV1(source map[string]interface{}) (*linklayerdevice, error) { fields := schema.Fields{ - "provider-id": schema.String(), - "machineid": schema.String(), - "name": schema.String(), - "mtu": schema.Int(), - "type": schema.String(), - "macaddress": schema.String(), - "isautostart": schema.Bool(), - "isup": schema.Bool(), - "parentname": schema.String(), + "provider-id": schema.String(), + "machine-id": schema.String(), + "name": schema.String(), + "mtu": schema.Int(), + "type": schema.String(), + "mac-address": schema.String(), + "is-autostart": schema.Bool(), + "is-up": schema.Bool(), + "parent-name": schema.String(), } // Some values don't have to be there. defaults := schema.Defaults{ @@ -162,13 +162,13 @@ valid := coerced.(map[string]interface{}) return &linklayerdevice{ ProviderID_: valid["provider-id"].(string), - MachineID_: valid["machineid"].(string), + MachineID_: valid["machine-id"].(string), Name_: valid["name"].(string), MTU_: uint(valid["mtu"].(int64)), Type_: valid["type"].(string), - MACAddress_: valid["macaddress"].(string), - IsAutoStart_: valid["isautostart"].(bool), - IsUp_: valid["isup"].(bool), - ParentName_: valid["parentname"].(string), + MACAddress_: valid["mac-address"].(string), + IsAutoStart_: valid["is-autostart"].(bool), + IsUp_: valid["is-up"].(bool), + ParentName_: valid["parent-name"].(string), }, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/linklayerdevice_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/linklayerdevice_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/linklayerdevice_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/linklayerdevice_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,13 +17,13 @@ func (s *LinkLayerDeviceSerializationSuite) SetUpTest(c *gc.C) { s.SliceSerializationSuite.SetUpTest(c) - s.importName = "linklayerdevices" - s.sliceName = "linklayerdevices" + s.importName = "link-layer-devices" + s.sliceName = "link-layer-devices" s.importFunc = func(m map[string]interface{}) (interface{}, error) { return importLinkLayerDevices(m) } s.testFields = func(m map[string]interface{}) { - m["linklayerdevices"] = []interface{}{} + m["link-layer-devices"] = []interface{}{} } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/machine.go juju-core-2.0.0/src/github.com/juju/juju/core/description/machine.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/machine.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/machine.go 2016-10-13 14:31:49.000000000 +0000 @@ -589,7 +589,7 @@ Architecture_ string `yaml:"architecture,omitempty"` Memory_ uint64 `yaml:"memory,omitempty"` RootDisk_ uint64 `yaml:"root-disk,omitempty"` - CpuCores_ uint64 `yaml:"cpu-cores,omitempty"` + CpuCores_ uint64 `yaml:"cores,omitempty"` CpuPower_ uint64 `yaml:"cpu-power,omitempty"` Tags_ []string `yaml:"tags,omitempty"` AvailabilityZone_ string `yaml:"availability-zone,omitempty"` @@ -667,10 +667,10 @@ "instance-id": schema.String(), "status": schema.String(), "architecture": schema.String(), - "memory": schema.Uint(), - "root-disk": schema.Uint(), - "cpu-cores": schema.Uint(), - "cpu-power": schema.Uint(), + "memory": schema.ForceUint(), + "root-disk": schema.ForceUint(), + "cores": schema.ForceUint(), + "cpu-power": schema.ForceUint(), "tags": schema.List(schema.String()), "availability-zone": schema.String(), } @@ -679,7 +679,7 @@ "architecture": "", "memory": uint64(0), "root-disk": uint64(0), - "cpu-cores": uint64(0), + "cores": uint64(0), "cpu-power": uint64(0), "tags": schema.Omit, "availability-zone": "", @@ -701,7 +701,7 @@ Architecture_: valid["architecture"].(string), Memory_: valid["memory"].(uint64), RootDisk_: valid["root-disk"].(uint64), - CpuCores_: valid["cpu-cores"].(uint64), + CpuCores_: valid["cores"].(uint64), CpuPower_: valid["cpu-power"].(uint64), Tags_: convertToStringSlice(valid["tags"]), AvailabilityZone_: valid["availability-zone"].(string), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/model.go juju-core-2.0.0/src/github.com/juju/juju/core/description/model.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/model.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/model.go 2016-10-13 14:31:49.000000000 +0000 @@ -50,8 +50,12 @@ m.setSubnets(nil) m.setIPAddresses(nil) m.setSSHHostKeys(nil) + m.setCloudImageMetadatas(nil) + m.setActions(nil) m.setVolumes(nil) m.setFilesystems(nil) + m.setStorages(nil) + m.setStoragePools(nil) return m } @@ -119,11 +123,15 @@ Applications_ applications `yaml:"applications"` Relations_ relations `yaml:"relations"` Spaces_ spaces `yaml:"spaces"` - LinkLayerDevices_ linklayerdevices `yaml:"linklayerdevices"` - IPAddresses_ ipaddresses `yaml:"ipaddresses"` + LinkLayerDevices_ linklayerdevices `yaml:"link-layer-devices"` + IPAddresses_ ipaddresses `yaml:"ip-addresses"` Subnets_ subnets `yaml:"subnets"` - SSHHostKeys_ sshHostKeys `yaml:"sshhostkeys"` + CloudImageMetadata_ cloudimagemetadataset `yaml:"cloud-image-metadata"` + + Actions_ actions `yaml:"actions"` + + SSHHostKeys_ sshHostKeys `yaml:"ssh-host-keys"` Sequences_ map[string]int `yaml:"sequences"` @@ -135,10 +143,10 @@ CloudRegion_ string `yaml:"cloud-region,omitempty"` CloudCredential_ string `yaml:"cloud-credential,omitempty"` - // TODO: - // Storage... - Volumes_ volumes `yaml:"volumes"` - Filesystems_ filesystems `yaml:"filesystems"` + Volumes_ volumes `yaml:"volumes"` + Filesystems_ filesystems `yaml:"filesystems"` + Storages_ storages `yaml:"storages"` + StoragePools_ storagepools `yaml:"storage-pools"` } func (m *model) Tag() names.ModelTag { @@ -184,7 +192,7 @@ type ByName []User func (a ByName) Len() int { return len(a) } -func (a ByName) Less(i, j int) bool { return a[i].Name().Canonical() < a[j].Name().Canonical() } +func (a ByName) Less(i, j int) bool { return a[i].Name().Id() < a[j].Name().Id() } func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // Users implements Model. @@ -402,6 +410,52 @@ } } +// CloudImageMetadatas implements Model. +func (m *model) CloudImageMetadata() []CloudImageMetadata { + var result []CloudImageMetadata + for _, addr := range m.CloudImageMetadata_.CloudImageMetadata_ { + result = append(result, addr) + } + return result +} + +// Actions implements Model. +func (m *model) Actions() []Action { + var result []Action + for _, addr := range m.Actions_.Actions_ { + result = append(result, addr) + } + return result +} + +// AddCloudImageMetadata implements Model. +func (m *model) AddCloudImageMetadata(args CloudImageMetadataArgs) CloudImageMetadata { + addr := newCloudImageMetadata(args) + m.CloudImageMetadata_.CloudImageMetadata_ = append(m.CloudImageMetadata_.CloudImageMetadata_, addr) + return addr +} + +func (m *model) setCloudImageMetadatas(cloudimagemetadataList []*cloudimagemetadata) { + m.CloudImageMetadata_ = cloudimagemetadataset{ + Version: 1, + CloudImageMetadata_: cloudimagemetadataList, + } +} + +// AddAction implements Model. +func (m *model) AddAction(args ActionArgs) Action { + addr := newAction(args) + m.Actions_.Actions_ = append(m.Actions_.Actions_, addr) + return addr +} + +func (m *model) setActions(actionsList []*action) { + m.Actions_ = actions{ + Version: 1, + Actions_: actionsList, + } +} + // Sequences implements Model. func (m *model) Sequences() map[string]int { return m.Sequences_ @@ -486,29 +540,72 @@ } } +// Storages implements Model. +func (m *model) Storages() []Storage { + var result []Storage + for _, storage := range m.Storages_.Storages_ { + result = append(result, storage) + } + return result +} + +// AddStorage implemets Model. +func (m *model) AddStorage(args StorageArgs) Storage { + storage := newStorage(args) + m.Storages_.Storages_ = append(m.Storages_.Storages_, storage) + return storage +} + +func (m *model) setStorages(storageList []*storage) { + m.Storages_ = storages{ + Version: 1, + Storages_: storageList, + } +} + +// StoragePools implements Model. +func (m *model) StoragePools() []StoragePool { + var result []StoragePool + for _, pool := range m.StoragePools_.Pools_ { + result = append(result, pool) + } + return result +} + +// AddStoragePool implemets Model. +func (m *model) AddStoragePool(args StoragePoolArgs) StoragePool { + pool := newStoragePool(args) + m.StoragePools_.Pools_ = append(m.StoragePools_.Pools_, pool) + return pool +} + +func (m *model) setStoragePools(poolList []*storagepool) { + m.StoragePools_ = storagepools{ + Version: 1, + Pools_: poolList, + } +} + // Validate implements Model. func (m *model) Validate() error { // A model needs an owner. if m.Owner_ == "" { return errors.NotValidf("missing model owner") } - + allMachines := set.NewStrings() unitsWithOpenPorts := set.NewStrings() for _, machine := range m.Machines_.Machines_ { - if err := machine.Validate(); err != nil { + if err := m.validateMachine(machine, allMachines, unitsWithOpenPorts); err != nil { return errors.Trace(err) } - for _, op := range machine.OpenedPorts() { - for _, pr := range op.OpenPorts() { - unitsWithOpenPorts.Add(pr.UnitName()) - } - } } + allApplications := set.NewStrings() allUnits := set.NewStrings() for _, application := range m.Applications_.Applications_ { if err := application.Validate(); err != nil { return errors.Trace(err) } + allApplications.Add(application.Name()) allUnits = allUnits.Union(application.unitNames()) } // Make sure that all the unit names specified in machine opened ports @@ -536,33 +633,88 @@ if err != nil { return errors.Trace(err) } - err = m.validateVolumes() + + err = m.validateStorage(allMachines, allApplications, allUnits) if err != nil { return errors.Trace(err) } - err = m.validateFilesystems() - if err != nil { + + return nil +} + +func (m *model) validateMachine(machine Machine, allMachineIDs, unitsWithOpenPorts set.Strings) error { + if err := machine.Validate(); err != nil { return errors.Trace(err) } - + allMachineIDs.Add(machine.Id()) + for _, op := range machine.OpenedPorts() { + for _, pr := range op.OpenPorts() { + unitsWithOpenPorts.Add(pr.UnitName()) + } + } + for _, container := range machine.Containers() { + err := m.validateMachine(container, allMachineIDs, unitsWithOpenPorts) + if err != nil { + return errors.Trace(err) + } + } return nil } -func (m *model) validateVolumes() error { +func (m *model) validateStorage(allMachineIDs, allApplications, allUnits set.Strings) error { + appsAndUnits := allApplications.Union(allUnits) + allStorage := set.NewStrings() + for i, storage := range m.Storages_.Storages_ { + if err := storage.Validate(); err != nil { + return errors.Annotatef(err, "storage[%d]", i) + } + allStorage.Add(storage.Tag().Id()) + owner, err := storage.Owner() + if err != nil { + return errors.Wrap(err, errors.NotValidf("storage[%d] owner (%s)", i, owner)) + } + ownerID := owner.Id() + if !appsAndUnits.Contains(ownerID) { + return errors.NotValidf("storage[%d] owner (%s)", i, ownerID) + } + for _, unit := range storage.Attachments() { + if !allUnits.Contains(unit.Id()) { + return errors.NotValidf("storage[%d] attachment referencing unknown unit %q", i, unit) + } + } + } + allVolumes := set.NewStrings() for i, volume := range m.Volumes_.Volumes_ { if err := volume.Validate(); err != nil { return errors.Annotatef(err, "volume[%d]", i) } + allVolumes.Add(volume.Tag().Id()) + if storeID := volume.Storage().Id(); storeID != "" && !allStorage.Contains(storeID) { + return errors.NotValidf("volume[%d] referencing unknown storage %q", i, storeID) + } + for j, attachment := range volume.Attachments() { + if machineID := attachment.Machine().Id(); !allMachineIDs.Contains(machineID) { + return errors.NotValidf("volume[%d].attachment[%d] referencing unknown machine %q", i, j, machineID) + } + } } - return nil -} - -func (m *model) validateFilesystems() error { for i, filesystem := range m.Filesystems_.Filesystems_ { if err := filesystem.Validate(); err != nil { return errors.Annotatef(err, "filesystem[%d]", i) } + if storeID := filesystem.Storage().Id(); storeID != "" && !allStorage.Contains(storeID) { + return errors.NotValidf("filesystem[%d] referencing unknown storage %q", i, storeID) + } + if volID := filesystem.Volume().Id(); volID != "" && !allVolumes.Contains(volID) { + return errors.NotValidf("filesystem[%d] referencing unknown volume %q", i, volID) + } + for j, attachment := range filesystem.Attachments() { + if machineID := attachment.Machine().Id(); !allMachineIDs.Contains(machineID) { + return errors.NotValidf("filesystem[%d].attachment[%d] referencing unknown machine %q", i, j, machineID) + } + } } + return nil } @@ -741,24 +893,28 @@ func importModelV1(source map[string]interface{}) (*model, error) { fields := schema.Fields{ - "owner": schema.String(), - "cloud": schema.String(), - "cloud-region": schema.String(), - "config": schema.StringMap(schema.Any()), - "latest-tools": schema.String(), - "blocks": schema.StringMap(schema.String()), - "users": schema.StringMap(schema.Any()), - "machines": schema.StringMap(schema.Any()), - "applications": schema.StringMap(schema.Any()), - "relations": schema.StringMap(schema.Any()), - "sshhostkeys": schema.StringMap(schema.Any()), - "ipaddresses": schema.StringMap(schema.Any()), - "spaces": schema.StringMap(schema.Any()), - "subnets": schema.StringMap(schema.Any()), - "linklayerdevices": schema.StringMap(schema.Any()), - "volumes": schema.StringMap(schema.Any()), - "filesystems": schema.StringMap(schema.Any()), - "sequences": schema.StringMap(schema.Int()), + "owner": schema.String(), + "cloud": schema.String(), + "cloud-region": schema.String(), + "config": schema.StringMap(schema.Any()), + "latest-tools": schema.String(), + "blocks": schema.StringMap(schema.String()), + "users": schema.StringMap(schema.Any()), + "machines": schema.StringMap(schema.Any()), + "applications": schema.StringMap(schema.Any()), + "relations": schema.StringMap(schema.Any()), + "ssh-host-keys": schema.StringMap(schema.Any()), + "cloud-image-metadata": schema.StringMap(schema.Any()), + "actions": schema.StringMap(schema.Any()), + "ip-addresses": schema.StringMap(schema.Any()), + "spaces": schema.StringMap(schema.Any()), + "subnets": schema.StringMap(schema.Any()), + "link-layer-devices": schema.StringMap(schema.Any()), + "volumes": schema.StringMap(schema.Any()), + "filesystems": schema.StringMap(schema.Any()), + "storages": schema.StringMap(schema.Any()), + "storage-pools": schema.StringMap(schema.Any()), + "sequences": schema.StringMap(schema.Int()), } // Some values don't have to be there. defaults := schema.Defaults{ @@ -851,10 +1007,10 @@ } result.setSpaces(spaces) - deviceMap := valid["linklayerdevices"].(map[string]interface{}) + deviceMap := valid["link-layer-devices"].(map[string]interface{}) devices, err := importLinkLayerDevices(deviceMap) if err != nil { - return nil, errors.Annotate(err, "linklayerdevices") + return nil, errors.Annotate(err, "link-layer-devices") } result.setLinkLayerDevices(devices) @@ -865,20 +1021,34 @@ } result.setSubnets(subnets) - addressMap := valid["ipaddresses"].(map[string]interface{}) + addressMap := valid["ip-addresses"].(map[string]interface{}) addresses, err := importIPAddresses(addressMap) if err != nil { - return nil, errors.Annotate(err, "ipaddresses") + return nil, errors.Annotate(err, "ip-addresses") } result.setIPAddresses(addresses) - sshHostKeyMap := valid["sshhostkeys"].(map[string]interface{}) + sshHostKeyMap := valid["ssh-host-keys"].(map[string]interface{}) hostKeys, err := importSSHHostKeys(sshHostKeyMap) if err != nil { - return nil, errors.Annotate(err, "sshhostkeys") + return nil, errors.Annotate(err, "ssh-host-keys") } result.setSSHHostKeys(hostKeys) + cloudimagemetadataMap := valid["cloud-image-metadata"].(map[string]interface{}) + cloudimagemetadata, err := importCloudImageMetadata(cloudimagemetadataMap) + if err != nil { + return nil, errors.Annotate(err, "cloud-image-metadata") + } + result.setCloudImageMetadatas(cloudimagemetadata) + + actionsMap := valid["actions"].(map[string]interface{}) + actions, err := importActions(actionsMap) + if err != nil { + return nil, errors.Annotate(err, "actions") + } + result.setActions(actions) + volumes, err := importVolumes(valid["volumes"].(map[string]interface{})) if err != nil { return nil, errors.Annotate(err, "volumes") @@ -891,5 +1061,17 @@ } result.setFilesystems(filesystems) + storages, err := importStorages(valid["storages"].(map[string]interface{})) + if err != nil { + return nil, errors.Annotate(err, "storages") + } + result.setStorages(storages) + + pools, err := importStoragePools(valid["storage-pools"].(map[string]interface{})) + if err != nil { + return nil, errors.Annotate(err, "storage-pools") + } + result.setStoragePools(pools) + return result, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/model_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/model_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/model_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/model_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -635,6 +635,72 @@ c.Assert(model.SSHHostKeys(), jc.DeepEquals, keys) } +func (s *ModelSerializationSuite) TestCloudImageMetadata(c *gc.C) { + storageSize := uint64(3) + initial := NewModel(ModelArgs{Owner: names.NewUserTag("owner")}) + image := initial.AddCloudImageMetadata(CloudImageMetadataArgs{ + Stream: "stream", + Region: "region-test", + Version: "14.04", + Series: "trusty", + Arch: "arch", + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test", + RootStorageSize: &storageSize, + Source: "test", + Priority: 2, + ImageId: "1", + DateCreated: 2, + }) + c.Assert(image.Stream(), gc.Equals, "stream") + c.Assert(image.Region(), gc.Equals, "region-test") + c.Assert(image.Version(), gc.Equals, "14.04") + c.Assert(image.Arch(), gc.Equals, "arch") + c.Assert(image.VirtType(), gc.Equals, "virtType-test") + c.Assert(image.RootStorageType(), gc.Equals, "rootStorageType-test") + value, ok := image.RootStorageSize() + c.Assert(ok, jc.IsTrue) + c.Assert(value, gc.Equals, uint64(3)) + c.Assert(image.Source(), gc.Equals, "test") + c.Assert(image.Priority(), gc.Equals, 2) + c.Assert(image.ImageId(), gc.Equals, "1") + c.Assert(image.DateCreated(), gc.Equals, int64(2)) + + metadata := initial.CloudImageMetadata() + c.Assert(metadata, gc.HasLen, 1) + c.Assert(metadata[0], jc.DeepEquals, image) + + bytes, err := yaml.Marshal(initial) + c.Assert(err, jc.ErrorIsNil) + + model, err := Deserialize(bytes) + c.Assert(err, jc.ErrorIsNil) + c.Assert(model.CloudImageMetadata(), jc.DeepEquals, metadata) +} + +func (s *ModelSerializationSuite) TestAction(c *gc.C) { + initial := NewModel(ModelArgs{Owner: names.NewUserTag("owner")}) + enqueued := time.Now().UTC() + action := initial.AddAction(ActionArgs{ + Name: "foo", + Enqueued: enqueued, + Parameters: map[string]interface{}{}, + Results: map[string]interface{}{}, + }) + c.Assert(action.Name(), gc.Equals, "foo") + c.Assert(action.Enqueued(), gc.Equals, enqueued) + actions := initial.Actions() + c.Assert(actions, gc.HasLen, 1) + c.Assert(actions[0], jc.DeepEquals, action) + + bytes, err := yaml.Marshal(initial) + c.Assert(err, jc.ErrorIsNil) + + model, err := Deserialize(bytes) + c.Assert(err, jc.ErrorIsNil) + c.Assert(model.Actions(), jc.DeepEquals, actions) +} + func (s *ModelSerializationSuite) TestVolumeValidation(c *gc.C) { model := NewModel(ModelArgs{Owner: names.NewUserTag("owner")}) model.AddVolume(testVolumeArgs()) @@ -681,3 +747,59 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(model.Filesystems(), jc.DeepEquals, filesystems) } + +func (s *ModelSerializationSuite) TestStorage(c *gc.C) { + initial := NewModel(ModelArgs{Owner: names.NewUserTag("owner")}) + storage := initial.AddStorage(testStorageArgs()) + storages := initial.Storages() + c.Assert(storages, gc.HasLen, 1) + c.Assert(storages[0], jc.DeepEquals, storage) + + bytes, err := yaml.Marshal(initial) + c.Assert(err, jc.ErrorIsNil) + + model, err := Deserialize(bytes) + c.Assert(err, jc.ErrorIsNil) + c.Assert(model.Storages(), jc.DeepEquals, storages) +} + +func (s *ModelSerializationSuite) TestStoragePools(c *gc.C) { + initial := NewModel(ModelArgs{Owner: names.NewUserTag("owner")}) + poolOne := map[string]interface{}{ + "foo": 42, + "value": true, + } + poolTwo := map[string]interface{}{ + "value": "spanner", + } + initial.AddStoragePool(StoragePoolArgs{ + Name: "one", Provider: "sparkles", Attributes: poolOne}) + initial.AddStoragePool(StoragePoolArgs{ + Name: "two", Provider: "spanner", Attributes: poolTwo}) + + pools := initial.StoragePools() + c.Assert(pools, gc.HasLen, 2) + one, two := pools[0], pools[1] + c.Check(one.Name(), gc.Equals, "one") + c.Check(one.Provider(), gc.Equals, "sparkles") + c.Check(one.Attributes(), jc.DeepEquals, poolOne) + c.Check(two.Name(), gc.Equals, "two") + c.Check(two.Provider(), gc.Equals, "spanner") + c.Check(two.Attributes(), jc.DeepEquals, poolTwo) + + bytes, err := yaml.Marshal(initial) + c.Assert(err, jc.ErrorIsNil) + + model, err := Deserialize(bytes) + c.Assert(err, jc.ErrorIsNil) + + pools = model.StoragePools() + c.Assert(pools, gc.HasLen, 2) + one, two = pools[0], pools[1] + c.Check(one.Name(), gc.Equals, "one") + c.Check(one.Provider(), gc.Equals, "sparkles") + c.Check(one.Attributes(), jc.DeepEquals, poolOne) + c.Check(two.Name(), gc.Equals, "two") + c.Check(two.Provider(), gc.Equals, "spanner") + c.Check(two.Attributes(), jc.DeepEquals, poolTwo) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/payload.go juju-core-2.0.0/src/github.com/juju/juju/core/description/payload.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/payload.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/payload.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,144 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + "github.com/juju/errors" + "github.com/juju/schema" +) + +// Payload represents a charm payload for a unit. +type Payload interface { + Name() string + Type() string + RawID() string + State() string + Labels() []string +} + +type payloads struct { + Version int `yaml:"version"` + Payloads_ []*payload `yaml:"payloads"` +} + +type payload struct { + Name_ string `yaml:"name"` + Type_ string `yaml:"type"` + RawID_ string `yaml:"raw-id"` + State_ string `yaml:"state"` + Labels_ []string `yaml:"labels,omitempty"` +} + +// Name implements Payload. +func (p *payload) Name() string { + return p.Name_ +} + +// Type implements Payload. +func (p *payload) Type() string { + return p.Type_ +} + +// RawID implements Payload. +func (p *payload) RawID() string { + return p.RawID_ +} + +// State implements Payload. +func (p *payload) State() string { + return p.State_ +} + +// Labels implements Payload. +func (p *payload) Labels() []string { + return p.Labels_ +} + +// PayloadArgs is an argument struct used to create a +// new internal payload type that supports the Payload interface. +type PayloadArgs struct { + Name string + Type string + RawID string + State string + Labels []string +} + +func newPayload(args PayloadArgs) *payload { + return &payload{ + Name_: args.Name, + Type_: args.Type, + RawID_: args.RawID, + State_: args.State, + Labels_: args.Labels, + } +} + +func importPayloads(source map[string]interface{}) ([]*payload, error) { + checker := versionedChecker("payloads") + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "payloads version schema check failed") + } + valid := coerced.(map[string]interface{}) + + version := int(valid["version"].(int64)) + importFunc, ok := payloadDeserializationFuncs[version] + if !ok { + return nil, errors.NotValidf("version %d", version) + } + sourceList := valid["payloads"].([]interface{}) + return importPayloadList(sourceList, importFunc) +} + +func importPayloadList(sourceList []interface{}, importFunc payloadDeserializationFunc) ([]*payload, error) { + result := make([]*payload, 0, len(sourceList)) + for i, value := range sourceList { + source, ok := value.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("unexpected value for payload %d, %T", i, value) + } + payload, err := importFunc(source) + if err != nil { + return nil, errors.Annotatef(err, "payload %d", i) + } + result = append(result, payload) + } + return result, nil +} + +type payloadDeserializationFunc func(map[string]interface{}) (*payload, error) + +var payloadDeserializationFuncs = map[int]payloadDeserializationFunc{ + 1: importPayloadV1, +} + +func importPayloadV1(source map[string]interface{}) (*payload, error) { + fields := schema.Fields{ + "name": schema.String(), + "type": schema.String(), + "raw-id": schema.String(), + "state": schema.String(), + "labels": schema.List(schema.String()), + } + // Some values don't have to be there. + defaults := schema.Defaults{ + "labels": schema.Omit, + } + checker := schema.FieldMap(fields, defaults) + + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "payload v1 schema check failed") + } + valid := coerced.(map[string]interface{}) + + return &payload{ + Name_: valid["name"].(string), + Type_: valid["type"].(string), + RawID_: valid["raw-id"].(string), + State_: valid["state"].(string), + Labels_: convertToStringSlice(valid["labels"]), + }, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/payload_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/payload_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/payload_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/payload_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,85 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/yaml.v2" +) + +type PayloadSerializationSuite struct { + SliceSerializationSuite +} + +var _ = gc.Suite(&PayloadSerializationSuite{}) + +func (s *PayloadSerializationSuite) SetUpTest(c *gc.C) { + s.SliceSerializationSuite.SetUpTest(c) + s.importName = "payloads" + s.sliceName = "payloads" + s.importFunc = func(m map[string]interface{}) (interface{}, error) { + return importPayloads(m) + } + s.testFields = func(m map[string]interface{}) { + m["payloads"] = []interface{}{} + } +} + +func allPayloadArgs() PayloadArgs { + return PayloadArgs{ + Name: "bob", + Type: "docker", + RawID: "d06f00d", + State: "running", + Labels: []string{"auto", "foo"}, + } +} + +func (s *PayloadSerializationSuite) TestNewPayload(c *gc.C) { + p := newPayload(allPayloadArgs()) + c.Check(p.Name(), gc.Equals, "bob") + c.Check(p.Type(), gc.Equals, "docker") + c.Check(p.RawID(), gc.Equals, "d06f00d") + c.Check(p.State(), gc.Equals, "running") + c.Check(p.Labels(), jc.DeepEquals, []string{"auto", "foo"}) +} + +func (s *PayloadSerializationSuite) exportImport(c *gc.C, p *payload) *payload { + initial := payloads{ + Version: 1, + Payloads_: []*payload{p}, + } + + bytes, err := yaml.Marshal(initial) + c.Assert(err, jc.ErrorIsNil) + + var source map[string]interface{} + err = yaml.Unmarshal(bytes, &source) + c.Assert(err, jc.ErrorIsNil) + + payloads, err := importPayloads(source) + c.Assert(err, jc.ErrorIsNil) + c.Assert(payloads, gc.HasLen, 1) + return payloads[0] +} + +func (s *PayloadSerializationSuite) TestParsingSerializedData(c *gc.C) { + initial := newPayload(allPayloadArgs()) + imported := s.exportImport(c, initial) + c.Assert(imported, jc.DeepEquals, initial) +} + +func (s *PayloadSerializationSuite) TestImportEmpty(c *gc.C) { + payloads, err := importPayloads(emptyPayloadMap()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(payloads, gc.HasLen, 0) +} + +func emptyPayloadMap() map[string]interface{} { + return map[string]interface{}{ + "version": 1, + "payloads": []interface{}{}, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/serialization.go juju-core-2.0.0/src/github.com/juju/juju/core/description/serialization.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/serialization.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/serialization.go 2016-10-13 14:31:49.000000000 +0000 @@ -64,3 +64,17 @@ } return result } + +// convertToMapOfMaps is expected to be used on a field with the schema +// checker `schema.StringMap(schema.StringMap(schema.Any())`. +func convertToMapOfMaps(field interface{}) map[string]map[string]interface{} { + if field == nil { + return nil + } + fieldMap := field.(map[string]interface{}) + result := make(map[string]map[string]interface{}) + for key, value := range fieldMap { + result[key] = value.(map[string]interface{}) + } + return result +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/sshhostkey.go juju-core-2.0.0/src/github.com/juju/juju/core/description/sshhostkey.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/sshhostkey.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/sshhostkey.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,107 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + "github.com/juju/errors" + "github.com/juju/schema" +) + +type sshHostKeys struct { + Version int `yaml:"version"` + SSHHostKeys_ []*sshHostKey `yaml:"ssh-host-keys"` +} + +type sshHostKey struct { + MachineID_ string `yaml:"machine-id"` + Keys_ []string `yaml:"keys"` +} + +// MachineID implements SSHHostKey. +func (i *sshHostKey) MachineID() string { + return i.MachineID_ +} + +// Keys implements SSHHostKey. +func (i *sshHostKey) Keys() []string { + return i.Keys_ +} + +// SSHHostKeyArgs is an argument struct used to create a +// new internal sshHostKey type that supports the SSHHostKey interface. +type SSHHostKeyArgs struct { + MachineID string + Keys []string +} + +func newSSHHostKey(args SSHHostKeyArgs) *sshHostKey { + return &sshHostKey{ + MachineID_: args.MachineID, + Keys_: args.Keys, + } +} + +func importSSHHostKeys(source map[string]interface{}) ([]*sshHostKey, error) { + checker := versionedChecker("ssh-host-keys") + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "ssh-host-keys version schema check failed") + } + valid := coerced.(map[string]interface{}) + + version := int(valid["version"].(int64)) + importFunc, ok := sshHostKeyDeserializationFuncs[version] + if !ok { + return nil, errors.NotValidf("version %d", version) + } + sourceList := valid["ssh-host-keys"].([]interface{}) + return importSSHHostKeyList(sourceList, importFunc) +} + +func importSSHHostKeyList(sourceList []interface{}, importFunc sshHostKeyDeserializationFunc) ([]*sshHostKey, error) { + result := make([]*sshHostKey, 0, len(sourceList)) + for i, value := range sourceList { + source, ok := value.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("unexpected value for ssh-host-key %d, %T", i, value) + } + sshHostKey, err := importFunc(source) + if err != nil { + return nil, errors.Annotatef(err, "ssh-host-key %d", i) + } + result = append(result, sshHostKey) + } + return result, nil +} + +type sshHostKeyDeserializationFunc func(map[string]interface{}) (*sshHostKey, error) + +var sshHostKeyDeserializationFuncs = map[int]sshHostKeyDeserializationFunc{ + 1: importSSHHostKeyV1, +} + +func importSSHHostKeyV1(source map[string]interface{}) (*sshHostKey, error) { + fields := schema.Fields{ + "machine-id": schema.String(), + "keys": schema.List(schema.String()), + } + // Some values don't have to be there. + defaults := schema.Defaults{} + checker := schema.FieldMap(fields, defaults) + + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "sshhostkey v1 schema check failed") + } + valid := coerced.(map[string]interface{}) + keysInterface := valid["keys"].([]interface{}) + keys := make([]string, len(keysInterface)) + for i, d := range keysInterface { + keys[i] = d.(string) + } + return &sshHostKey{ + MachineID_: valid["machine-id"].(string), + Keys_: keys, + }, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/sshhostkeys.go juju-core-2.0.0/src/github.com/juju/juju/core/description/sshhostkeys.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/sshhostkeys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/sshhostkeys.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,107 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package description - -import ( - "github.com/juju/errors" - "github.com/juju/schema" -) - -type sshHostKeys struct { - Version int `yaml:"version"` - SSHHostKeys_ []*sshHostKey `yaml:"sshhostkeys"` -} - -type sshHostKey struct { - MachineID_ string `yaml:"machineid"` - Keys_ []string `yaml:"keys"` -} - -// MachineID implements SSHHostKey. -func (i *sshHostKey) MachineID() string { - return i.MachineID_ -} - -// Keys implements SSHHostKey. -func (i *sshHostKey) Keys() []string { - return i.Keys_ -} - -// SSHHostKeyArgs is an argument struct used to create a -// new internal sshHostKey type that supports the SSHHostKey interface. -type SSHHostKeyArgs struct { - MachineID string - Keys []string -} - -func newSSHHostKey(args SSHHostKeyArgs) *sshHostKey { - return &sshHostKey{ - MachineID_: args.MachineID, - Keys_: args.Keys, - } -} - -func importSSHHostKeys(source map[string]interface{}) ([]*sshHostKey, error) { - checker := versionedChecker("sshhostkeys") - coerced, err := checker.Coerce(source, nil) - if err != nil { - return nil, errors.Annotatef(err, "sshhostkeys version schema check failed") - } - valid := coerced.(map[string]interface{}) - - version := int(valid["version"].(int64)) - importFunc, ok := sshHostKeyDeserializationFuncs[version] - if !ok { - return nil, errors.NotValidf("version %d", version) - } - sourceList := valid["sshhostkeys"].([]interface{}) - return importSSHHostKeyList(sourceList, importFunc) -} - -func importSSHHostKeyList(sourceList []interface{}, importFunc sshHostKeyDeserializationFunc) ([]*sshHostKey, error) { - result := make([]*sshHostKey, 0, len(sourceList)) - for i, value := range sourceList { - source, ok := value.(map[string]interface{}) - if !ok { - return nil, errors.Errorf("unexpected value for sshhostkey %d, %T", i, value) - } - sshHostKey, err := importFunc(source) - if err != nil { - return nil, errors.Annotatef(err, "sshhostkey %d", i) - } - result = append(result, sshHostKey) - } - return result, nil -} - -type sshHostKeyDeserializationFunc func(map[string]interface{}) (*sshHostKey, error) - -var sshHostKeyDeserializationFuncs = map[int]sshHostKeyDeserializationFunc{ - 1: importSSHHostKeyV1, -} - -func importSSHHostKeyV1(source map[string]interface{}) (*sshHostKey, error) { - fields := schema.Fields{ - "machineid": schema.String(), - "keys": schema.List(schema.String()), - } - // Some values don't have to be there. - defaults := schema.Defaults{} - checker := schema.FieldMap(fields, defaults) - - coerced, err := checker.Coerce(source, nil) - if err != nil { - return nil, errors.Annotatef(err, "sshhostkey v1 schema check failed") - } - valid := coerced.(map[string]interface{}) - keysInterface := valid["keys"].([]interface{}) - keys := make([]string, len(keysInterface)) - for i, d := range keysInterface { - keys[i] = d.(string) - } - return &sshHostKey{ - MachineID_: valid["machineid"].(string), - Keys_: keys, - }, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/sshhostkey_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/sshhostkey_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/sshhostkey_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/sshhostkey_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,13 +17,13 @@ func (s *SSHHostKeySerializationSuite) SetUpTest(c *gc.C) { s.SliceSerializationSuite.SetUpTest(c) - s.importName = "sshhostkeys" - s.sliceName = "sshhostkeys" + s.importName = "ssh-host-keys" + s.sliceName = "ssh-host-keys" s.importFunc = func(m map[string]interface{}) (interface{}, error) { return importSSHHostKeys(m) } s.testFields = func(m map[string]interface{}) { - m["sshhostkeys"] = []interface{}{} + m["ssh-host-keys"] = []interface{}{} } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storageconstraint.go juju-core-2.0.0/src/github.com/juju/juju/core/description/storageconstraint.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storageconstraint.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/storageconstraint.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,111 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + "github.com/juju/errors" + "github.com/juju/schema" +) + +// StorageConstraintArgs is an argument struct used to create a new internal +// storageconstraint type that supports the StorageConstraint interface. +type StorageConstraintArgs struct { + Pool string + Size uint64 + Count uint64 +} + +func newStorageConstraint(args StorageConstraintArgs) *storageconstraint { + return &storageconstraint{ + Version: 1, + Pool_: args.Pool, + Size_: args.Size, + Count_: args.Count, + } +} + +type storageconstraint struct { + Version int `yaml:"version"` + + Pool_ string `yaml:"pool"` + Size_ uint64 `yaml:"size"` + Count_ uint64 `yaml:"count"` +} + +// Pool implements StorageConstraint. +func (s *storageconstraint) Pool() string { + return s.Pool_ +} + +// Size implements StorageConstraint. +func (s *storageconstraint) Size() uint64 { + return s.Size_ +} + +// Count implements StorageConstraint. +func (s *storageconstraint) Count() uint64 { + return s.Count_ +} + +func importStorageConstraints(sourceMap map[string]interface{}) (map[string]*storageconstraint, error) { + result := make(map[string]*storageconstraint) + for key, value := range sourceMap { + source, ok := value.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("unexpected value for storageconstraint %q, %T", key, value) + } + constraint, err := importStorageConstraint(source) + if err != nil { + return nil, errors.Trace(err) + } + result[key] = constraint + } + return result, nil +} + +// importStorageConstraint constructs a new StorageConstraint from a map representing a serialised +// StorageConstraint instance. +func importStorageConstraint(source map[string]interface{}) (*storageconstraint, error) { + version, err := getVersion(source) + if err != nil { + return nil, errors.Annotate(err, "storageconstraint version schema check failed") + } + + importFunc, ok := storageconstraintDeserializationFuncs[version] + if !ok { + return nil, errors.NotValidf("version %d", version) + } + + return importFunc(source) +} + +type storageconstraintDeserializationFunc func(map[string]interface{}) (*storageconstraint, error) + +var storageconstraintDeserializationFuncs = map[int]storageconstraintDeserializationFunc{ + 1: importStorageConstraintV1, +} + +func importStorageConstraintV1(source map[string]interface{}) (*storageconstraint, error) { + fields := schema.Fields{ + "pool": schema.String(), + "size": schema.Uint(), + "count": schema.Uint(), + } + checker := schema.FieldMap(fields, nil) + + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "storageconstraint v1 schema check failed") + } + valid := coerced.(map[string]interface{}) + // From here we know that the map returned from the schema coercion + // contains fields of the right type. + + return &storageconstraint{ + Version: 1, + Pool_: valid["pool"].(string), + Size_: valid["size"].(uint64), + Count_: valid["count"].(uint64), + }, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storageconstraint_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/storageconstraint_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storageconstraint_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/storageconstraint_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,74 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/yaml.v2" +) + +type StorageConstraintSerializationSuite struct { + SerializationSuite +} + +var _ = gc.Suite(&StorageConstraintSerializationSuite{}) + +func (s *StorageConstraintSerializationSuite) SetUpTest(c *gc.C) { + s.SerializationSuite.SetUpTest(c) + s.importName = "storageconstraint" + s.importFunc = func(m map[string]interface{}) (interface{}, error) { + return importStorageConstraint(m) + } + s.testFields = func(m map[string]interface{}) { + m["pool"] = "" + m["size"] = 0 + m["count"] = 0 + } +} + +func (s *StorageConstraintSerializationSuite) TestMissingValue(c *gc.C) { + testMap := s.makeMap(1) + delete(testMap, "pool") + _, err := importStorageConstraint(testMap) + c.Check(err.Error(), gc.Equals, "storageconstraint v1 schema check failed: pool: expected string, got nothing") +} + +func (*StorageConstraintSerializationSuite) TestParsing(c *gc.C) { + addr, err := importStorageConstraint(map[string]interface{}{ + "version": 1, + "pool": "olympic", + "size": 50, + "count": 2, + }) + c.Assert(err, jc.ErrorIsNil) + expected := &storageconstraint{ + Version: 1, + Pool_: "olympic", + Size_: 50, + Count_: 2, + } + c.Assert(addr, jc.DeepEquals, expected) +} + +func (*StorageConstraintSerializationSuite) TestParsingSerializedData(c *gc.C) { + initial := &storageconstraint{ + Version: 1, + Pool_: "olympic", + Size_: 50, + Count_: 2, + } + + bytes, err := yaml.Marshal(initial) + c.Assert(err, jc.ErrorIsNil) + + var source map[string]interface{} + err = yaml.Unmarshal(bytes, &source) + c.Assert(err, jc.ErrorIsNil) + + storageconstraints, err := importStorageConstraint(source) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(storageconstraints, jc.DeepEquals, initial) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storage.go juju-core-2.0.0/src/github.com/juju/juju/core/description/storage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storage.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/storage.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,169 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + "github.com/juju/errors" + "github.com/juju/schema" + "gopkg.in/juju/names.v2" +) + +type storages struct { + Version int `yaml:"version"` + Storages_ []*storage `yaml:"storages"` +} + +type storage struct { + ID_ string `yaml:"id"` + Kind_ string `yaml:"kind"` + Owner_ string `yaml:"owner"` + Name_ string `yaml:"name"` + + Attachments_ []string `yaml:"attachments"` +} + +// StorageArgs is an argument struct used to add a storage to the Model. +type StorageArgs struct { + Tag names.StorageTag + Kind string + Owner names.Tag + Name string + Attachments []names.UnitTag +} + +func newStorage(args StorageArgs) *storage { + s := &storage{ + ID_: args.Tag.Id(), + Kind_: args.Kind, + Name_: args.Name, + } + if args.Owner != nil { + s.Owner_ = args.Owner.String() + } + for _, unit := range args.Attachments { + s.Attachments_ = append(s.Attachments_, unit.Id()) + } + return s +} + +// Tag implements Storage. +func (s *storage) Tag() names.StorageTag { + return names.NewStorageTag(s.ID_) +} + +// Kind implements Storage. +func (s *storage) Kind() string { + return s.Kind_ +} + +// Owner implements Storage. +func (s *storage) Owner() (names.Tag, error) { + if s.Owner_ == "" { + return nil, nil + } + tag, err := names.ParseTag(s.Owner_) + if err != nil { + return nil, errors.Trace(err) + } + return tag, nil +} + +// Name implements Storage. +func (s *storage) Name() string { + return s.Name_ +} + +// Attachments implements Storage. +func (s *storage) Attachments() []names.UnitTag { + var result []names.UnitTag + for _, unit := range s.Attachments_ { + result = append(result, names.NewUnitTag(unit)) + } + return result +} + +// Validate implements Storage. +func (s *storage) Validate() error { + if s.ID_ == "" { + return errors.NotValidf("storage missing id") + } + if s.Owner_ == "" { + return errors.NotValidf("storage %q missing owner", s.ID_) + } + // Also check that the owner and attachments are valid. + if _, err := s.Owner(); err != nil { + return errors.Wrap(err, errors.NotValidf("storage %q invalid owner", s.ID_)) + } + return nil +} + +func importStorages(source map[string]interface{}) ([]*storage, error) { + checker := versionedChecker("storages") + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "storages version schema check failed") + } + valid := coerced.(map[string]interface{}) + + version := int(valid["version"].(int64)) + importFunc, ok := storageDeserializationFuncs[version] + if !ok { + return nil, errors.NotValidf("version %d", version) + } + sourceList := valid["storages"].([]interface{}) + return importStorageList(sourceList, importFunc) +} + +func importStorageList(sourceList []interface{}, importFunc storageDeserializationFunc) ([]*storage, error) { + result := make([]*storage, 0, len(sourceList)) + for i, value := range sourceList { + source, ok := value.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("unexpected value for storage %d, %T", i, value) + } + storage, err := importFunc(source) + if err != nil { + return nil, errors.Annotatef(err, "storage %d", i) + } + result = append(result, storage) + } + return result, nil +} + +type storageDeserializationFunc func(map[string]interface{}) (*storage, error) + +var storageDeserializationFuncs = map[int]storageDeserializationFunc{ + 1: importStorageV1, +} + +func importStorageV1(source map[string]interface{}) (*storage, error) { + fields := schema.Fields{ + "id": schema.String(), + "kind": schema.String(), + "owner": schema.String(), + "name": schema.String(), + "attachments": schema.List(schema.String()), + } + + // Normally a list would have defaults, but in this case storage + // should always have at least one attachment. + checker := schema.FieldMap(fields, nil) // no defaults + + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "storage v1 schema check failed") + } + valid := coerced.(map[string]interface{}) + // From here we know that the map returned from the schema coercion + // contains fields of the right type. + result := &storage{ + ID_: valid["id"].(string), + Kind_: valid["kind"].(string), + Owner_: valid["owner"].(string), + Name_: valid["name"].(string), + Attachments_: convertToStringSlice(valid["attachments"]), + } + + return result, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storagepool.go juju-core-2.0.0/src/github.com/juju/juju/core/description/storagepool.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storagepool.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/storagepool.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,115 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + "github.com/juju/errors" + "github.com/juju/schema" +) + +type storagepools struct { + Version int `yaml:"version"` + Pools_ []*storagepool `yaml:"pools"` +} + +type storagepool struct { + Name_ string `yaml:"name"` + Provider_ string `yaml:"provider"` + Attributes_ map[string]interface{} `yaml:"attributes"` +} + +// StoragePoolArgs is an argument struct used to add a storage pool to the +// Model. +type StoragePoolArgs struct { + Name string + Provider string + Attributes map[string]interface{} +} + +func newStoragePool(args StoragePoolArgs) *storagepool { + return &storagepool{ + Name_: args.Name, + Provider_: args.Provider, + Attributes_: args.Attributes, + } +} + +// Name implements StoragePool. +func (s *storagepool) Name() string { + return s.Name_ +} + +// Provider implements StoragePool. +func (s *storagepool) Provider() string { + return s.Provider_ +} + +// Name implements StoragePool. +func (s *storagepool) Attributes() map[string]interface{} { + return s.Attributes_ +} + +func importStoragePools(source map[string]interface{}) ([]*storagepool, error) { + checker := versionedChecker("pools") + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "storagepools version schema check failed") + } + valid := coerced.(map[string]interface{}) + + version := int(valid["version"].(int64)) + importFunc, ok := storagePoolDeserializationFuncs[version] + if !ok { + return nil, errors.NotValidf("version %d", version) + } + sourceList := valid["pools"].([]interface{}) + return importStoragePoolList(sourceList, importFunc) +} + +func importStoragePoolList(sourceList []interface{}, importFunc storagePoolDeserializationFunc) ([]*storagepool, error) { + result := make([]*storagepool, 0, len(sourceList)) + for i, value := range sourceList { + source, ok := value.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("unexpected value for storagepool %d, %T", i, value) + } + pool, err := importFunc(source) + if err != nil { + return nil, errors.Annotatef(err, "storagepool %d", i) + } + result = append(result, pool) + } + return result, nil +} + +type storagePoolDeserializationFunc func(map[string]interface{}) (*storagepool, error) + +var storagePoolDeserializationFuncs = map[int]storagePoolDeserializationFunc{ + 1: importStoragePoolV1, +} + +func importStoragePoolV1(source map[string]interface{}) (*storagepool, error) { + fields := schema.Fields{ + "name": schema.String(), + "provider": schema.String(), + "attributes": schema.StringMap(schema.Any()), + } + + checker := schema.FieldMap(fields, nil) // no defaults + + coerced, err := checker.Coerce(source, nil) + if err != nil { + return nil, errors.Annotatef(err, "storagepool v1 schema check failed") + } + valid := coerced.(map[string]interface{}) + // From here we know that the map returned from the schema coercion + // contains fields of the right type. + result := &storagepool{ + Name_: valid["name"].(string), + Provider_: valid["provider"].(string), + Attributes_: valid["attributes"].(map[string]interface{}), + } + + return result, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storagepool_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/storagepool_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storagepool_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/storagepool_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,78 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/yaml.v2" +) + +type StoragePoolSerializationSuite struct { + SliceSerializationSuite +} + +var _ = gc.Suite(&StoragePoolSerializationSuite{}) + +func (s *StoragePoolSerializationSuite) SetUpTest(c *gc.C) { + s.SliceSerializationSuite.SetUpTest(c) + s.importName = "storagepools" + s.sliceName = "pools" + s.importFunc = func(m map[string]interface{}) (interface{}, error) { + return importStoragePools(m) + } + s.testFields = func(m map[string]interface{}) { + m["pools"] = []interface{}{} + } +} + +func testStoragePool() *storagepool { + v := newStoragePool(testStoragePoolArgs()) + return v +} + +func testStoragePoolArgs() StoragePoolArgs { + return StoragePoolArgs{ + Name: "test", + Provider: "magic", + Attributes: map[string]interface{}{ + "method": "madness", + }, + } +} + +func (s *StoragePoolSerializationSuite) TestNewStoragePool(c *gc.C) { + storagepool := testStoragePool() + + c.Check(storagepool.Name(), gc.Equals, "test") + c.Check(storagepool.Provider(), gc.Equals, "magic") + c.Check(storagepool.Attributes(), jc.DeepEquals, map[string]interface{}{ + "method": "madness", + }) +} + +func (s *StoragePoolSerializationSuite) exportImport(c *gc.C, storagepool_ *storagepool) *storagepool { + initial := storagepools{ + Version: 1, + Pools_: []*storagepool{storagepool_}, + } + + bytes, err := yaml.Marshal(initial) + c.Assert(err, jc.ErrorIsNil) + + var source map[string]interface{} + err = yaml.Unmarshal(bytes, &source) + c.Assert(err, jc.ErrorIsNil) + + storagepools, err := importStoragePools(source) + c.Assert(err, jc.ErrorIsNil) + c.Assert(storagepools, gc.HasLen, 1) + return storagepools[0] +} + +func (s *StoragePoolSerializationSuite) TestParsingSerializedData(c *gc.C) { + original := testStoragePool() + storagepool := s.exportImport(c, original) + c.Assert(storagepool, jc.DeepEquals, original) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storage_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/storage_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/storage_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/storage_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,123 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package description + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + "gopkg.in/yaml.v2" +) + +type StorageSerializationSuite struct { + SliceSerializationSuite +} + +var _ = gc.Suite(&StorageSerializationSuite{}) + +func (s *StorageSerializationSuite) SetUpTest(c *gc.C) { + s.SliceSerializationSuite.SetUpTest(c) + s.importName = "storages" + s.sliceName = "storages" + s.importFunc = func(m map[string]interface{}) (interface{}, error) { + return importStorages(m) + } + s.testFields = func(m map[string]interface{}) { + m["storages"] = []interface{}{} + } +} + +func testStorageMap() map[interface{}]interface{} { + return map[interface{}]interface{}{ + "id": "db/0", + "kind": "magic", + "owner": "application-postgresql", + "name": "db", + "attachments": []interface{}{ + "postgresql/0", + "postgresql/1", + }, + } +} + +func testStorage() *storage { + v := newStorage(testStorageArgs()) + return v +} + +func testStorageArgs() StorageArgs { + return StorageArgs{ + Tag: names.NewStorageTag("db/0"), + Kind: "magic", + Owner: names.NewApplicationTag("postgresql"), + Name: "db", + Attachments: []names.UnitTag{ + names.NewUnitTag("postgresql/0"), + names.NewUnitTag("postgresql/1"), + }, + } +} + +func (s *StorageSerializationSuite) TestNewStorage(c *gc.C) { + storage := testStorage() + + c.Check(storage.Tag(), gc.Equals, names.NewStorageTag("db/0")) + c.Check(storage.Kind(), gc.Equals, "magic") + owner, err := storage.Owner() + c.Check(err, jc.ErrorIsNil) + c.Check(owner, gc.Equals, names.NewApplicationTag("postgresql")) + c.Check(storage.Name(), gc.Equals, "db") + c.Check(storage.Attachments(), jc.DeepEquals, []names.UnitTag{ + names.NewUnitTag("postgresql/0"), + names.NewUnitTag("postgresql/1"), + }) +} + +func (s *StorageSerializationSuite) TestStorageValid(c *gc.C) { + storage := testStorage() + c.Assert(storage.Validate(), jc.ErrorIsNil) +} + +func (s *StorageSerializationSuite) TestStorageValidMissingID(c *gc.C) { + v := newStorage(StorageArgs{}) + err := v.Validate() + c.Check(err, gc.ErrorMatches, `storage missing id not valid`) + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *StorageSerializationSuite) TestStorageMatches(c *gc.C) { + bytes, err := yaml.Marshal(testStorage()) + c.Assert(err, jc.ErrorIsNil) + + var source map[interface{}]interface{} + err = yaml.Unmarshal(bytes, &source) + c.Assert(err, jc.ErrorIsNil) + c.Assert(source, jc.DeepEquals, testStorageMap()) +} + +func (s *StorageSerializationSuite) exportImport(c *gc.C, storage_ *storage) *storage { + initial := storages{ + Version: 1, + Storages_: []*storage{storage_}, + } + + bytes, err := yaml.Marshal(initial) + c.Assert(err, jc.ErrorIsNil) + + var source map[string]interface{} + err = yaml.Unmarshal(bytes, &source) + c.Assert(err, jc.ErrorIsNil) + + storages, err := importStorages(source) + c.Assert(err, jc.ErrorIsNil) + c.Assert(storages, gc.HasLen, 1) + return storages[0] +} + +func (s *StorageSerializationSuite) TestParsingSerializedData(c *gc.C) { + original := testStorage() + storage := s.exportImport(c, original) + c.Assert(storage, jc.DeepEquals, original) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/subnet.go juju-core-2.0.0/src/github.com/juju/juju/core/description/subnet.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/subnet.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/subnet.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,15 +16,15 @@ type subnet struct { ProviderId_ string `yaml:"provider-id,omitempty"` CIDR_ string `yaml:"cidr"` - VLANTag_ int `yaml:"vlantag"` + VLANTag_ int `yaml:"vlan-tag"` - AvailabilityZone_ string `yaml:"availabilityzone"` - SpaceName_ string `yaml:"spacename"` + AvailabilityZone_ string `yaml:"availability-zone"` + SpaceName_ string `yaml:"space-name"` // These will be deprecated once the address allocation strategy for // EC2 is changed. They are unused already on MAAS. - AllocatableIPHigh_ string `yaml:"allocatableiphigh,omitempty"` - AllocatableIPLow_ string `yaml:"allocatableiplow,omitempty"` + AllocatableIPHigh_ string `yaml:"allocatable-ip-high,omitempty"` + AllocatableIPLow_ string `yaml:"allocatable-ip-low,omitempty"` } // SubnetArgs is an argument struct used to create a @@ -130,19 +130,19 @@ func importSubnetV1(source map[string]interface{}) (*subnet, error) { fields := schema.Fields{ - "cidr": schema.String(), - "provider-id": schema.String(), - "vlantag": schema.Int(), - "spacename": schema.String(), - "availabilityzone": schema.String(), - "allocatableiphigh": schema.String(), - "allocatableiplow": schema.String(), + "cidr": schema.String(), + "provider-id": schema.String(), + "vlan-tag": schema.Int(), + "space-name": schema.String(), + "availability-zone": schema.String(), + "allocatable-ip-high": schema.String(), + "allocatable-ip-low": schema.String(), } defaults := schema.Defaults{ - "provider-id": "", - "allocatableiphigh": "", - "allocatableiplow": "", + "provider-id": "", + "allocatable-ip-high": "", + "allocatable-ip-low": "", } checker := schema.FieldMap(fields, defaults) @@ -157,10 +157,10 @@ return &subnet{ CIDR_: valid["cidr"].(string), ProviderId_: valid["provider-id"].(string), - VLANTag_: int(valid["vlantag"].(int64)), - SpaceName_: valid["spacename"].(string), - AvailabilityZone_: valid["availabilityzone"].(string), - AllocatableIPHigh_: valid["allocatableiphigh"].(string), - AllocatableIPLow_: valid["allocatableiplow"].(string), + VLANTag_: int(valid["vlan-tag"].(int64)), + SpaceName_: valid["space-name"].(string), + AvailabilityZone_: valid["availability-zone"].(string), + AllocatableIPHigh_: valid["allocatable-ip-high"].(string), + AllocatableIPLow_: valid["allocatable-ip-low"].(string), }, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/unit.go juju-core-2.0.0/src/github.com/juju/juju/core/description/unit.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/unit.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/unit.go 2016-10-13 14:31:49.000000000 +0000 @@ -31,10 +31,6 @@ Principal_ string `yaml:"principal,omitempty"` Subordinates_ []string `yaml:"subordinates,omitempty"` - // TODO: - // storage constraints - // storage attachment count - PasswordHash_ string `yaml:"password-hash"` Tools_ *agentTools `yaml:"tools"` @@ -44,6 +40,8 @@ Annotations_ `yaml:"annotations,omitempty"` Constraints_ *constraints `yaml:"constraints,omitempty"` + + Payloads_ payloads `yaml:"payloads"` } // UnitArgs is an argument struct used to add a Unit to a Application in the Model. @@ -66,7 +64,7 @@ for _, s := range args.Subordinates { subordinates = append(subordinates, s.Id()) } - return &unit{ + u := &unit{ Name_: args.Tag.Id(), Machine_: args.Machine.Id(), PasswordHash_: args.PasswordHash, @@ -79,6 +77,8 @@ WorkloadVersionHistory_: newStatusHistory(), AgentStatusHistory_: newStatusHistory(), } + u.setPayloads(nil) + return u } // Tag implements Unit. @@ -218,6 +218,29 @@ u.Constraints_ = newConstraints(args) } +// AddPayload implements Unit. +func (u *unit) AddPayload(args PayloadArgs) Payload { + payload := newPayload(args) + u.Payloads_.Payloads_ = append(u.Payloads_.Payloads_, payload) + return payload +} + +// Payloads implements Unit. +func (u *unit) Payloads() []Payload { + var result []Payload + for _, payload := range u.Payloads_.Payloads_ { + result = append(result, payload) + } + return result +} + +func (u *unit) setPayloads(payloadList []*payload) { + u.Payloads_ = payloads{ + Version: 1, + Payloads_: payloadList, + } +} + // Validate impelements Unit. func (u *unit) Validate() error { if u.Name_ == "" { @@ -294,6 +317,8 @@ "meter-status-code": schema.String(), "meter-status-info": schema.String(), + + "payloads": schema.StringMap(schema.Any()), } defaults := schema.Defaults{ "principal": "", @@ -370,5 +395,12 @@ } result.WorkloadStatus_ = workloadStatus + payloadMap := valid["payloads"].(map[string]interface{}) + payloads, err := importPayloads(payloadMap) + if err != nil { + return nil, errors.Annotate(err, "payloads") + } + result.setPayloads(payloads) + return result, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/unit_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/unit_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/unit_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/unit_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -39,6 +39,10 @@ "workload-version-history": emptyStatusHistoryMap(), "password-hash": "secure-hash", "tools": minimalAgentToolsMap(), + "payloads": map[interface{}]interface{}{ + "version": 1, + "payloads": []interface{}{}, + }, } } @@ -194,3 +198,19 @@ c.Check(point.Updated(), gc.Equals, args[i].Updated) } } + +func (s *UnitSerializationSuite) TestPayloads(c *gc.C) { + initial := minimalUnit() + expected := initial.AddPayload(allPayloadArgs()) + c.Check(expected.Name(), gc.Equals, "bob") + c.Check(expected.Type(), gc.Equals, "docker") + c.Check(expected.RawID(), gc.Equals, "d06f00d") + c.Check(expected.State(), gc.Equals, "running") + c.Check(expected.Labels(), jc.DeepEquals, []string{"auto", "foo"}) + + unit := s.exportImport(c, initial) + + payloads := unit.Payloads() + c.Assert(payloads, gc.HasLen, 1) + c.Assert(payloads[0], jc.DeepEquals, expected) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/useraccess.go juju-core-2.0.0/src/github.com/juju/juju/core/description/useraccess.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/useraccess.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/useraccess.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package description - -import ( - "time" - - "gopkg.in/juju/names.v2" -) - -// UserAccess represents a user access to a target whereas the user -// could represent a remote user or a user across multiple models the -// user access always represents a single user for a single target. -// There should be no more than one UserAccess per target/user pair. -// Many of these fields are storage artifacts but generate them from -// other fields implies out of band knowledge of other packages. -type UserAccess struct { - // UserID is the stored ID of the user. - UserID string - // UserTag is the tag for the user. - UserTag names.UserTag - // Object is the tag for the object of this access grant. - Object names.Tag - // Access represents the level of access subjec has over object. - Access Access - // CreatedBy is the tag of the user that granted the access. - CreatedBy names.UserTag - // DateCreated is the date the user was created in UTC. - DateCreated time.Time - // DisplayName is the name we are showing for this user. - DisplayName string - // UserName is the actual username for this access. - UserName string -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/user.go juju-core-2.0.0/src/github.com/juju/juju/core/description/user.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/user.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/user.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,14 +22,14 @@ CreatedBy names.UserTag DateCreated time.Time LastConnection time.Time - Access Access + Access string } func newUser(args UserArgs) *user { u := &user{ - Name_: args.Name.Canonical(), + Name_: args.Name.Id(), DisplayName_: args.DisplayName, - CreatedBy_: args.CreatedBy.Canonical(), + CreatedBy_: args.CreatedBy.Id(), DateCreated_: args.DateCreated, Access_: args.Access, } @@ -45,7 +45,7 @@ DisplayName_ string `yaml:"display-name,omitempty"` CreatedBy_ string `yaml:"created-by"` DateCreated_ time.Time `yaml:"date-created"` - Access_ Access `yaml:"access"` + Access_ string `yaml:"access"` // Can't use omitempty with time.Time, it just doesn't work, // so use a pointer in the struct. LastConnection_ *time.Time `yaml:"last-connection,omitempty"` @@ -81,7 +81,7 @@ } // Access implements User. -func (u *user) Access() Access { +func (u *user) Access() string { return u.Access_ } @@ -132,7 +132,7 @@ "read-only": schema.Bool(), "date-created": schema.Time(), "last-connection": schema.Time(), - "access": accessField(), + "access": schema.String(), } // Some values don't have to be there. @@ -155,7 +155,7 @@ DisplayName_: valid["display-name"].(string), CreatedBy_: valid["created-by"].(string), DateCreated_: valid["date-created"].(time.Time), - Access_: valid["access"].(Access), + Access_: valid["access"].(string), } lastConn := valid["last-connection"].(time.Time) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/user_test.go juju-core-2.0.0/src/github.com/juju/juju/core/description/user_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/user_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/user_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -60,18 +60,17 @@ Version: 1, Users_: []*user{ &user{ - Name_: "admin@local", - CreatedBy_: "admin@local", + Name_: "admin", + CreatedBy_: "admin", DateCreated_: time.Date(2015, 10, 9, 12, 34, 56, 0, time.UTC), LastConnection_: &lastConn, }, &user{ - Name_: "read-only@local", + Name_: "read-only", DisplayName_: "A read only user", - CreatedBy_: "admin@local", + CreatedBy_: "admin", DateCreated_: time.Date(2015, 10, 9, 12, 34, 56, 0, time.UTC), - // We want to fail if someone breaks ReadAccess definition. - Access_: Access("read"), + Access_: "read", }, }, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/description/volume.go juju-core-2.0.0/src/github.com/juju/juju/core/description/volume.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/description/volume.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/description/volume.go 2016-10-13 14:31:49.000000000 +0000 @@ -180,6 +180,9 @@ if v.Status_ == nil { return errors.NotValidf("volume %q missing status", v.ID_) } + if _, err := v.Binding(); err != nil { + return errors.Wrap(err, errors.NotValidf("volume %q binding", v.ID_)) + } return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/migration/migration.go juju-core-2.0.0/src/github.com/juju/juju/core/migration/migration.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/migration/migration.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/migration/migration.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,9 @@ import ( "time" + "github.com/juju/errors" "github.com/juju/version" + "gopkg.in/juju/names.v2" ) // MigrationStatus returns the details for a migration as needed by @@ -25,6 +27,10 @@ // its current value. PhaseChangedTime time.Time + // ExternalControl indicates the current migration should be + // controlled by an external process. + ExternalControl bool + // TargetInfo contains the details of how to connect to the target // controller. TargetInfo TargetInfo @@ -45,3 +51,27 @@ // source controller. Tools map[version.Binary]string // version -> tools URI } + +// ModelInfo is used to report basic details about a model. +type ModelInfo struct { + UUID string + Owner names.UserTag + Name string + AgentVersion version.Number +} + +func (i *ModelInfo) Validate() error { + if i.UUID == "" { + return errors.NotValidf("empty UUID") + } + if i.Owner.Name() == "" { + return errors.NotValidf("empty Owner") + } + if i.Name == "" { + return errors.NotValidf("empty Name") + } + if i.AgentVersion.Compare(version.Number{}) == 0 { + return errors.NotValidf("empty Version") + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/migration/phase.go juju-core-2.0.0/src/github.com/juju/juju/core/migration/phase.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/migration/phase.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/migration/phase.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,6 @@ UNKNOWN Phase = iota NONE QUIESCE - PRECHECK IMPORT VALIDATION SUCCESS @@ -27,7 +26,6 @@ "UNKNOWN", // To catch uninitialised fields. "NONE", // For watchers to indicate there's never been a migration attempt. "QUIESCE", - "PRECHECK", "IMPORT", "VALIDATION", "SUCCESS", @@ -83,7 +81,7 @@ return false } switch p { - case QUIESCE, PRECHECK, IMPORT, VALIDATION, SUCCESS: + case QUIESCE, IMPORT, VALIDATION, SUCCESS: return true default: return false @@ -95,8 +93,7 @@ // The keys are the "from" states and the values enumerate the // possible "to" states. var validTransitions = map[Phase][]Phase{ - QUIESCE: {PRECHECK, ABORT}, - PRECHECK: {IMPORT, ABORT}, + QUIESCE: {IMPORT, ABORT}, IMPORT: {VALIDATION, ABORT}, VALIDATION: {SUCCESS, ABORT}, SUCCESS: {LOGTRANSFER}, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/migration/phase_test.go juju-core-2.0.0/src/github.com/juju/juju/core/migration/phase_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/migration/phase_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/migration/phase_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,7 +24,7 @@ } func (s *PhaseSuite) TestStringValid(c *gc.C) { - c.Check(migration.PRECHECK.String(), gc.Equals, "PRECHECK") + c.Check(migration.IMPORT.String(), gc.Equals, "IMPORT") c.Check(migration.UNKNOWN.String(), gc.Equals, "UNKNOWN") c.Check(migration.ABORT.String(), gc.Equals, "ABORT") } @@ -74,8 +74,7 @@ func (s *PhaseSuite) TestCanTransitionTo(c *gc.C) { c.Check(migration.QUIESCE.CanTransitionTo(migration.SUCCESS), jc.IsFalse) c.Check(migration.QUIESCE.CanTransitionTo(migration.ABORT), jc.IsTrue) - c.Check(migration.QUIESCE.CanTransitionTo(migration.PRECHECK), jc.IsTrue) + c.Check(migration.QUIESCE.CanTransitionTo(migration.IMPORT), jc.IsTrue) c.Check(migration.QUIESCE.CanTransitionTo(migration.Phase(-1)), jc.IsFalse) - c.Check(migration.ABORT.CanTransitionTo(migration.QUIESCE), jc.IsFalse) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/migration/targetinfo.go juju-core-2.0.0/src/github.com/juju/juju/core/migration/targetinfo.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/migration/targetinfo.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/migration/targetinfo.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,20 +5,22 @@ import ( "github.com/juju/errors" - "github.com/juju/juju/network" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" + + "github.com/juju/juju/network" ) // TargetInfo holds the details required to connect to a // migration's target controller. // -// TODO(mjs) - Note the similarity to api.Info. It would be nice -// to be able to use api.Info here but state can't import api and -// moving api.Info to live under the core package is too big a project -// to be done right now. +// TODO(mjs) - Note the similarity to api.Info. It would be nice to be +// able to use api.Info here but state can't import api and moving +// api.Info to live under the core package is too big a project to be +// done right now. type TargetInfo struct { // ControllerTag holds tag for the target controller. - ControllerTag names.ModelTag + ControllerTag names.ControllerTag // Addrs holds the addresses and ports of the target controller's // API servers. @@ -34,6 +36,10 @@ // Password holds the password to use with AuthTag. Password string + + // Macaroons holds macaroons to use with AuthTag. At least one of + // Password or Macaroons must be set. + Macaroons []macaroon.Slice } // Validate returns an error if the TargetInfo contains bad data. Nil @@ -61,8 +67,8 @@ return errors.NotValidf("empty AuthTag") } - if info.Password == "" { - return errors.NotValidf("empty Password") + if info.Password == "" && len(info.Macaroons) == 0 { + return errors.NotValidf("missing Password & Macaroons") } return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/core/migration/targetinfo_test.go juju-core-2.0.0/src/github.com/juju/juju/core/migration/targetinfo_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/core/migration/targetinfo_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/core/migration/targetinfo_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "github.com/juju/utils" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" "github.com/juju/juju/core/migration" coretesting "github.com/juju/juju/testing" @@ -28,13 +29,13 @@ }{{ "empty ControllerTag", func(info *migration.TargetInfo) { - info.ControllerTag = names.NewModelTag("fooo") + info.ControllerTag = names.NewControllerTag("fooo") }, "ControllerTag not valid", }, { "invalid ControllerTag", func(info *migration.TargetInfo) { - info.ControllerTag = names.NewModelTag("") + info.ControllerTag = names.NewControllerTag("") }, "ControllerTag not valid", }, { @@ -62,30 +63,34 @@ }, "empty AuthTag not valid", }, { - "Password", + "Password & Macaroons", func(info *migration.TargetInfo) { info.Password = "" + info.Macaroons = nil }, - "empty Password not valid", + "missing Password & Macaroons not valid", }, { - "Success", + "Success - empty Password", + func(info *migration.TargetInfo) { + info.Password = "" + }, + "", + }, { + "Success - empty Macaroons", + func(info *migration.TargetInfo) { + info.Macaroons = nil + }, + "", + }, { + "Success - all set", func(*migration.TargetInfo) {}, "", }} - modelTag := names.NewModelTag(utils.MustNewUUID().String()) for _, test := range tests { c.Logf("---- %s -----------", test.label) - - info := migration.TargetInfo{ - ControllerTag: modelTag, - Addrs: []string{"1.2.3.4:5555", "4.3.2.1:6666"}, - CACert: "cert", - AuthTag: names.NewUserTag("user"), - Password: "password", - } + info := makeValidTargetInfo(c) test.tweakInfo(&info) - err := info.Validate() if test.errorPattern == "" { c.Check(err, jc.ErrorIsNil) @@ -95,3 +100,16 @@ } } } + +func makeValidTargetInfo(c *gc.C) migration.TargetInfo { + mac, err := macaroon.New([]byte("secret"), "id", "location") + c.Assert(err, jc.ErrorIsNil) + return migration.TargetInfo{ + ControllerTag: names.NewControllerTag(utils.MustNewUUID().String()), + Addrs: []string{"1.2.3.4:5555"}, + CACert: "cert", + AuthTag: names.NewUserTag("user"), + Password: "password", + Macaroons: []macaroon.Slice{{mac}}, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/dependencies.tsv juju-core-2.0.0/src/github.com/juju/juju/dependencies.tsv --- juju-core-2.0~beta15/src/github.com/juju/juju/dependencies.tsv 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/dependencies.tsv 2016-10-13 14:31:49.000000000 +0000 @@ -1,11 +1,15 @@ -github.com/Azure/azure-sdk-for-go git 3b480eaaf6b4236d43a3c06cba969da6f53c8b66 2015-11-23T16:56:25Z +github.com/Azure/azure-sdk-for-go git 902d95d9f311ae585ee98cfd18f418b467d60d5a 2016-07-20T05:16:58Z +github.com/Azure/go-autorest git 6f40a8acfe03270d792cb8155e2942c09d7cff95 2016-07-19T23:14:56Z github.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z github.com/altoros/gosigma git 31228935eec685587914528585da4eb9b073c76d 2015-04-08T14:52:32Z +github.com/beorn7/perks git 3ac7bf7a47d159a033b107610db8a1b6575507a4 2016-02-29T21:34:45Z github.com/bmizerany/pat git c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c 2016-02-17T10:32:42Z github.com/coreos/go-systemd git 7b2428fec40033549c68f54e26e89e7ca9a9ce31 2016-02-02T21:14:25Z +github.com/dgrijalva/jwt-go git 01aeca54ebda6e0fbfafd0a524d234159c05ec20 2016-07-05T20:30:06Z github.com/dustin/go-humanize git 145fabdb1ab757076a70a886d092a3af27f66f4c 2014-12-28T07:11:48Z github.com/gabriel-samfira/sys git 9ddc60d56b511544223adecea68da1e4f2153beb 2015-06-08T13:21:19Z github.com/godbus/dbus git 32c6cc29c14570de4cf6d7e7737d68fb2d01ad15 2016-05-06T22:25:50Z +github.com/golang/protobuf git 34a5f244f1c01cdfee8e60324258cfbb97a42aec 2015-05-26T01:21:09Z github.com/google/go-querystring git 9235644dd9e52eeae6fa48efd539fdc351a0af53 2016-04-01T23:30:42Z github.com/gorilla/schema git 08023a0215e7fc27a9aecd8b8c50913c40019478 2016-04-26T23:15:12Z github.com/gorilla/websocket git 13e4d0621caa4d77fd9aa470ef6d7ab63d1a5e41 2015-09-23T22:29:30Z @@ -13,64 +17,73 @@ github.com/joyent/gocommon git ade826b8b54e81a779ccb29d358a45ba24b7809c 2016-03-20T19:31:33Z github.com/joyent/gosdc git 2f11feadd2d9891e92296a1077c3e2e56939547d 2014-05-24T00:08:15Z github.com/joyent/gosign git 0da0d5f1342065321c97812b1f4ac0c2b0bab56c 2014-05-24T00:07:34Z +github.com/juju/ansiterm git b99631de12cf04a906c1d4e4ec54fb86eae5863d 2016-09-07T23:45:32Z github.com/juju/blobstore git 06056004b3d7b54bbb7984d830c537bad00fec21 2015-07-29T11:18:58Z github.com/juju/bundlechanges git 6791af0ab78efe88ff99c2a0095208b3b7a32055 2016-07-20T09:32:50Z -github.com/juju/cmd git 035efd5daac768531ef240ab9e5ee32e3498fbef 2016-08-02T03:51:17Z +github.com/juju/cmd git 1c6973d59b804e4d3c293fbf240f067e73436bc9 2016-08-23T10:31:14Z github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z +github.com/juju/gnuflag git 4e76c56581859c14d9d87e1ddbe29e1c0f10195f 2016-08-09T16:52:14Z github.com/juju/go4 git 40d72ab9641a2a8c36a9c46a51e28367115c8e59 2016-02-22T16:32:58Z github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z -github.com/juju/gomaasapi git c4008a71e7212cb6a99a9c17bb218034927d82b7 2016-07-28T00:29:23Z +github.com/juju/gomaasapi git 8c484173e0870fc49c9214c56c6ae8dc9c26463d 2016-09-19T18:34:33Z github.com/juju/govmomi git 4354a88d4b34abe467215f77c2fc1cb9f78b66f7 2015-04-24T01:54:48Z github.com/juju/httpprof git 14bf14c307672fd2456bdbf35d19cf0ccd3cf565 2014-12-17T16:00:36Z -github.com/juju/httprequest git 796aaafaf712f666df58d31a482c51233038bf9f 2016-05-03T15:03:27Z +github.com/juju/httprequest git 266fd1e9debf09c037a63f074d099a2da4559ece 2016-10-06T15:09:09Z github.com/juju/idmclient git 3dda079a75cccb85083d4c3877e638f5d6ab79c2 2016-05-26T05:00:34Z -github.com/juju/loggo git 15901ae4de786d05edae84a27c93d3fbef66c91e 2016-08-04T22:15:26Z +github.com/juju/loggo git 3b7ece48644d35850f4ced4c2cbc2cf8413f58e0 2016-08-18T02:57:24Z github.com/juju/mempool git 24974d6c264fe5a29716e7d56ea24c4bd904b7cc 2016-02-05T10:49:27Z github.com/juju/mutex git 59c26ee163447c5c57f63ff71610d433862013de 2016-06-17T01:09:07Z -github.com/juju/persistent-cookiejar git e710b897c13ca52828ca2fc9769465186fd6d15c 2016-03-31T17:12:27Z +github.com/juju/persistent-cookiejar git b48f5b9290d63455d10de0c0e4c26e06e6e74842 2016-10-07T16:41:21Z github.com/juju/replicaset git fb7294cf57a1e2f08a57691f1246d129a87ab7e8 2015-05-08T02:21:43Z github.com/juju/retry git 62c62032529169c7ec02fa48f93349604c345e1f 2015-10-29T02:48:21Z github.com/juju/rfc git ebdbbdb950cd039a531d15cdc2ac2cbd94f068ee 2016-07-11T02:42:13Z -github.com/juju/romulus git f790f93d956741903ce5b1f027df4c9404227d55 2016-08-08T09:53:44Z +github.com/juju/romulus git bf7827fa2f360ab762c134766ff1d4fff959ea03 2016-08-17T23:29:48Z github.com/juju/schema git 075de04f9b7d7580d60a1e12a0b3f50bb18e6998 2016-04-20T04:42:03Z github.com/juju/terms-client git 9b925afd677234e4146dde3cb1a11e187cbed64e 2016-08-09T13:19:00Z -github.com/juju/testing git d325c22badd4ba3a5fde01d479b188c7a06df755 2016-08-02T03:47:59Z -github.com/juju/txn git 99ec629d0066a4d73c54d8e021a7fc1dc07df614 2015-06-09T16:58:27Z +github.com/juju/testing git 692d58e72934a2e2b56f663259696e035e6351ff 2016-09-30T14:09:10Z +github.com/juju/txn git 18d812a45ffc407a4d5f849036b7d8d12febaf08 2016-09-13T21:23:40Z github.com/juju/usso git 68a59c96c178fbbad65926e7f93db50a2cd14f33 2016-04-01T10:44:24Z -github.com/juju/utils git 10adcbfe55417518543ed3c3341de2c7db0a3450 2016-07-29T19:45:31Z +github.com/juju/utils git 406e7197d0690a3f28c5a147138774eec4c1355e 2016-09-26T13:08:26Z github.com/juju/version git 4ae6172c00626779a5a462c3e3d22fc0e889431a 2016-06-03T19:49:58Z github.com/juju/webbrowser git 54b8c57083b4afb7dc75da7f13e2967b2606a507 2016-03-09T14:36:29Z github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z github.com/juju/zip git f6b1e93fa2e29a1d7d49b566b2b51efb060c982a 2016-02-05T10:52:21Z github.com/julienschmidt/httprouter git 77a895ad01ebc98a4dc95d8355bc825ce80a56f6 2015-10-13T22:55:20Z -github.com/lxc/lxd git 62f62e9d6e0da14947023f99764eac29c26cef8d 2016-03-28T00:14:48Z +github.com/lunixbochs/vtclean git 4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36 2016-01-25T03:51:06Z +github.com/lxc/lxd git 95a324a23696e937c466996d57554e3677b3c84a 2016-10-11T20:54:09Z +github.com/mattn/go-colorable git ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8 2016-07-31T23:54:17Z +github.com/mattn/go-isatty git 66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8 2016-08-06T12:27:52Z github.com/mattn/go-runewidth git d96d1bd051f2bd9e7e43d602782b37b93b1b5666 2015-11-18T07:21:59Z +github.com/matttproud/golang_protobuf_extensions git c12348ce28de40eed0136aa2b644d0ee0650e56c 2016-04-24T11:30:07Z +github.com/prometheus/client_golang git b90ee0840e8e7dfb84c08d13b9c4f3a794586a21 2016-05-13T04:20:11Z +github.com/prometheus/client_model git fa8ad6fec33561be4280a8f0514318c79d7f6cb6 2015-02-12T10:17:44Z +github.com/prometheus/common git dd586c1c5abb0be59e60f942c22af711a2008cb4 2016-05-03T22:05:32Z +github.com/prometheus/procfs git abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 2016-04-11T19:08:41Z github.com/rogpeppe/fastuuid git 6724a57986aff9bff1a1770e9347036def7c89f6 2015-01-06T09:32:20Z -golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z +golang.org/x/crypto git 8e06e8ddd9629eb88639aba897641bff8031f1d3 2016-09-22T17:06:29Z golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z golang.org/x/oauth2 git 11c60b6f71a6ad48ed6f93c65fa4c6f9b1b5b46a 2015-03-25T02:00:22Z google.golang.org/api git 0d3983fb069cb6651353fc44c5cb604e263f2a93 2014-12-10T23:51:26Z google.golang.org/cloud git f20d6dcccb44ed49de45ae3703312cb46e627db1 2015-03-19T22:36:35Z -gopkg.in/amz.v3 git a651c43e72df7778b14ac6b54e5ac119d32b1263 2016-04-20T02:16:08Z +gopkg.in/amz.v3 git 18899065239e006cc73b0e66800c98c2ce4eee50 2016-10-06T07:29:34Z gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z gopkg.in/goose.v1 git 495e6fa2ab89bc5ed2c8e1bbcbc4c9e4a3c97d37 2016-03-17T17:25:46Z gopkg.in/ini.v1 git 776aa739ce9373377cd16f526cdf06cb4c89b40f 2016-02-22T23:24:41Z gopkg.in/juju/blobstore.v2 git 51fa6e26128d74e445c72d3a91af555151cc3654 2016-01-25T02:37:03Z -gopkg.in/juju/charm.v6-unstable git a3bb92d047b0892452b6a39ece59b4d3a2ac35b9 2016-07-22T08:34:31Z -gopkg.in/juju/charmrepo.v2-unstable git 6e6733987fb03100f30e494cc1134351fe4a593b 2016-05-30T23:07:41Z -gopkg.in/juju/charmstore.v5-unstable git 2cb9f80553dddaae8c5e2161ea45f4be5d9afc00 2016-05-27T11:46:22Z +gopkg.in/juju/charm.v6-unstable git 83771c4919d6810bce5b7e63f46bea5fbfed0b93 2016-10-03T20:31:18Z +gopkg.in/juju/charmrepo.v2-unstable git 73c1113f7ddee0306f4b3c19773d35a3f153c04a 2016-08-11T14:04:21Z +gopkg.in/juju/charmstore.v5-unstable git fd1eef3002fc6b6daff5e97efab6f5056d22dcc7 2016-09-16T10:09:07Z gopkg.in/juju/environschema.v1 git 7359fc7857abe2b11b5b3e23811a9c64cb6b01e0 2015-11-04T11:58:10Z -gopkg.in/juju/jujusvg.v1 git cc128825adce31ea13020d24e7b3302bac86a8c3 2016-05-30T22:53:36Z -gopkg.in/juju/names.v2 git 3e0d33a444fec55aea7269b849eb22da41e73072 2016-07-18T22:31:20Z -gopkg.in/macaroon-bakery.v1 git b097c9d99b2537efaf54492e08f7e148f956ba51 2016-05-24T09:38:11Z +gopkg.in/juju/jujusvg.v2 git d82160011935ef79fc7aca84aba2c6f74700fe75 2016-06-09T10:52:15Z +gopkg.in/juju/names.v2 git 3317ff7471a685109e262892b5f81b940ad5782f 2016-10-05T07:27:06Z +gopkg.in/macaroon-bakery.v1 git 469b44e6f1f9479e115c8ae879ef80695be624d5 2016-06-22T12:14:21Z gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z -gopkg.in/mgo.v2 git 29cc868a5ca65f401ff318143f9408d02f4799cc 2016-06-09T18:00:28Z +gopkg.in/mgo.v2 git f2b6f6c918c452ad107eec89615f074e3bd80e33 2016-08-18T01:52:18Z gopkg.in/natefinch/lumberjack.v2 git 514cbda263a734ae8caac038dadf05f8f3f9f738 2016-01-25T11:17:49Z gopkg.in/natefinch/npipe.v2 git c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6 2016-06-21T03:49:01Z +gopkg.in/tomb.v1 git dd632973f1e7218eb1089048e0798ec9ae7dceb8 2014-10-24T13:56:13Z gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z gopkg.in/yaml.v2 git a83829b6f1293c91addabc89d0571c246397bbf4 2016-03-01T20:40:22Z -launchpad.net/gnuflag bzr roger.peppe@canonical.com-20140716064605-pk32dnmfust02yab 13 -launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/doc/juju-core-release-process.txt juju-core-2.0.0/src/github.com/juju/juju/doc/juju-core-release-process.txt --- juju-core-2.0~beta15/src/github.com/juju/juju/doc/juju-core-release-process.txt 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/doc/juju-core-release-process.txt 2016-10-13 14:31:49.000000000 +0000 @@ -70,7 +70,7 @@ apt-get update && apt-get install juju-core should install the latest release on your system. -Test this release by bootstrapping a model in ec2 (and hp cloud if you have access), and do a basic wordpress + mysql deployment. If you can relate wordpress and mysql, expose wordpress and get to the public address on the wordpress setup screen, this release is a success. +Test this release by bootstrapping a model in ec2, and do a basic wordpress + mysql deployment. If you can relate wordpress and mysql, expose wordpress and get to the public address on the wordpress setup screen, this release is a success. If this step fails then this release is a failure and the release number is unused. Do not reuse release numbers. It is ok to have gaps in the sequence, we've done it before, water is still wet. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/doc/simplestreams-metadata.txt juju-core-2.0.0/src/github.com/juju/juju/doc/simplestreams-metadata.txt --- juju-core-2.0~beta15/src/github.com/juju/juju/doc/simplestreams-metadata.txt 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/doc/simplestreams-metadata.txt 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ The simplestreams format is used to describe related items in a structural fashion. See the Launchpad project lp:simplestreams for more details. -For supported public clouds like Amazon, HP Cloud etc, no action is required by the +For supported public clouds like Amazon, etc., no action is required by the end user so the following information is more for those interested in what happens under the covers. Those setting up a private cloud, or who want to change how things work (eg use a different Ubuntu image), need to pay closer attention. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/downloader/downloader.go juju-core-2.0.0/src/github.com/juju/juju/downloader/downloader.go --- juju-core-2.0~beta15/src/github.com/juju/juju/downloader/downloader.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/downloader/downloader.go 2016-10-13 14:31:49.000000000 +0000 @@ -39,48 +39,17 @@ // Start starts a new download and returns it. func (dlr Downloader) Start(req Request) *Download { - dl := StartDownload(req, dlr.OpenBlob) - return dl + return StartDownload(req, dlr.OpenBlob) } // Download starts a new download, waits for it to complete, and -// returns the local name of the file. -func (dlr Downloader) Download(req Request, abort <-chan struct{}) (filename string, err error) { +// returns the local name of the file. The download can be aborted by +// closing the Abort channel in the Request provided. +func (dlr Downloader) Download(req Request) (string, error) { if err := os.MkdirAll(req.TargetDir, 0755); err != nil { return "", errors.Trace(err) } dl := dlr.Start(req) - file, err := dl.Wait(abort) - if file != nil { - defer file.Close() - } - if err != nil { - return "", errors.Trace(err) - } - return file.Name(), nil -} - -// DownloadWithAlternates tries each of the provided requests until -// one succeeds. If none succeed then the error from the most recent -// attempt is returned. At least one request must be provided. -func (dlr Downloader) DownloadWithAlternates(requests []Request, abort <-chan struct{}) (filename string, err error) { - if len(requests) == 0 { - return "", errors.New("no requests to try") - } - - for _, req := range requests { - filename, err = dlr.Download(req, abort) - if errors.IsNotValid(err) { - break - } - if err == nil { - break - } - logger.Errorf("download request to %s failed: %v", req.URL, err) - // Try the next one. - } - if err != nil { - return "", errors.Trace(err) - } - return filename, nil + filename, err := dl.Wait() + return filename, errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/downloader/downloader_test.go juju-core-2.0.0/src/github.com/juju/juju/downloader/downloader_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/downloader/downloader_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/downloader/downloader_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,11 +4,8 @@ package downloader_test import ( - "io/ioutil" "net/url" - "os" "path/filepath" - "time" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" @@ -53,7 +50,7 @@ return URL } -func (s *DownloaderSuite) testDownload(c *gc.C, hostnameVerification utils.SSLHostnameVerification) { +func (s *DownloaderSuite) testStart(c *gc.C, hostnameVerification utils.SSLHostnameVerification) { tmp := c.MkDir() gitjujutesting.Server.Response(200, nil, []byte("archive")) dlr := downloader.New(downloader.NewArgs{ @@ -64,54 +61,30 @@ TargetDir: tmp, }) status := <-dl.Done() - defer os.Remove(status.File.Name()) - defer status.File.Close() c.Assert(status.Err, gc.IsNil) - c.Assert(status.File, gc.NotNil) - - dir, _ := filepath.Split(status.File.Name()) + dir, _ := filepath.Split(status.Filename) c.Assert(filepath.Clean(dir), gc.Equals, tmp) - assertFileContents(c, status.File, "archive") + assertFileContents(c, status.Filename, "archive") } func (s *DownloaderSuite) TestDownloadWithoutDisablingSSLHostnameVerification(c *gc.C) { - s.testDownload(c, utils.VerifySSLHostnames) + s.testStart(c, utils.VerifySSLHostnames) } func (s *DownloaderSuite) TestDownloadWithDisablingSSLHostnameVerification(c *gc.C) { - s.testDownload(c, utils.NoVerifySSLHostnames) + s.testStart(c, utils.NoVerifySSLHostnames) } -func (s *DownloaderSuite) TestDownloadError(c *gc.C) { - gitjujutesting.Server.Response(404, nil, nil) - dlr := downloader.New(downloader.NewArgs{ - HostnameVerification: utils.VerifySSLHostnames, - }) - dl := dlr.Start(downloader.Request{ - URL: s.URL(c, "/archive.tgz"), - TargetDir: c.MkDir(), - }) - status := <-dl.Done() - c.Assert(status.File, gc.IsNil) - c.Assert(status.Err, gc.ErrorMatches, `cannot download ".*": bad http response: 404 Not Found`) -} - -func (s *DownloaderSuite) TestStopDownload(c *gc.C) { +func (s *DownloaderSuite) TestDownload(c *gc.C) { tmp := c.MkDir() - dlr := downloader.New(downloader.NewArgs{ - HostnameVerification: utils.VerifySSLHostnames, - }) - dl := dlr.Start(downloader.Request{ - URL: s.URL(c, "/x.tgz"), + gitjujutesting.Server.Response(200, nil, []byte("archive")) + dlr := downloader.New(downloader.NewArgs{}) + filename, err := dlr.Download(downloader.Request{ + URL: s.URL(c, "/archive.tgz"), TargetDir: tmp, }) - dl.Stop() - select { - case status := <-dl.Done(): - c.Fatalf("received status %#v after stop", status) - case <-time.After(testing.ShortWait): - } - infos, err := ioutil.ReadDir(tmp) c.Assert(err, jc.ErrorIsNil) - c.Assert(infos, gc.HasLen, 0) + dir, _ := filepath.Split(filename) + c.Assert(filepath.Clean(dir), gc.Equals, tmp) + assertFileContents(c, filename, "archive") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/downloader/download.go juju-core-2.0.0/src/github.com/juju/juju/downloader/download.go --- juju-core-2.0~beta15/src/github.com/juju/juju/downloader/download.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/downloader/download.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,6 @@ "github.com/juju/errors" "github.com/juju/utils" - "launchpad.net/tomb" ) // Request holds a single download request. @@ -27,125 +26,88 @@ // the download is invalid then the func must return errors.NotValid. // If no func is provided then no verification happens. Verify func(*os.File) error + + // Abort is a channel that will cancel the download when it is closed. + Abort <-chan struct{} } // Status represents the status of a completed download. type Status struct { - // File holds the downloaded data on success. - File *os.File + // Filename is the name of the file which holds the downloaded + // data on success. + Filename string // Err describes any error encountered while downloading. Err error } -// Download can download a file from the network. -type Download struct { - tomb tomb.Tomb - done chan Status - openBlob func(*url.URL) (io.ReadCloser, error) -} - -// StartDownload returns a new Download instance based on the provided -// request. openBlob is used to gain access to the blob, whether through -// an HTTP request or some other means. +// StartDownload starts a new download as specified by `req` using +// `openBlob` to actually pull the remote data. func StartDownload(req Request, openBlob func(*url.URL) (io.ReadCloser, error)) *Download { - dl := newDownload(openBlob) - go dl.run(req) - return dl -} - -func newDownload(openBlob func(*url.URL) (io.ReadCloser, error)) *Download { if openBlob == nil { openBlob = NewHTTPBlobOpener(utils.NoVerifySSLHostnames) } - return &Download{ - done: make(chan Status), + dl := &Download{ + done: make(chan Status, 1), openBlob: openBlob, } + go dl.run(req) + return dl } -// Stop stops any download that's in progress. -func (dl *Download) Stop() { - dl.tomb.Kill(nil) - dl.tomb.Wait() +// Download can download a file from the network. +type Download struct { + done chan Status + openBlob func(*url.URL) (io.ReadCloser, error) } // Done returns a channel that receives a status when the download has -// completed. It is the receiver's responsibility to close and remove -// the received file. +// completed or is aborted. Exactly one Status value will be sent for +// each download once it finishes (successfully or otherwise) or is +// aborted. +// +// It is the receiver's responsibility to handle and remove the +// downloaded file. func (dl *Download) Done() <-chan Status { return dl.done } -// Wait blocks until the download completes or the abort channel receives. -func (dl *Download) Wait(abort <-chan struct{}) (*os.File, error) { - defer dl.Stop() - - select { - case <-abort: - logger.Infof("download aborted") - return nil, errors.New("aborted") - case status := <-dl.Done(): - if status.Err != nil { - if status.File != nil { - if err := status.File.Close(); err != nil { - logger.Errorf("failed to close file: %v", err) - } - } - return nil, errors.Trace(status.Err) - } - return status.File, nil - } +// Wait blocks until the download finishes (successfully or +// otherwise), or the download is aborted. There will only be a +// filename if err is nil. +func (dl *Download) Wait() (string, error) { + // No select required here because each download will always + // return a value once it completes. Downloads can be aborted via + // the Abort channel provided a creation time. + status := <-dl.Done() + return status.Filename, errors.Trace(status.Err) } func (dl *Download) run(req Request) { - defer dl.tomb.Done() - // TODO(dimitern) 2013-10-03 bug #1234715 // Add a testing HTTPS storage to verify the // disableSSLHostnameVerification behavior here. - file, err := download(req, dl.openBlob) + filename, err := dl.download(req) if err != nil { - err = errors.Annotatef(err, "cannot download %q", req.URL) - } - - if err == nil { + err = errors.Trace(err) + } else { logger.Infof("download complete (%q)", req.URL) - if req.Verify != nil { - err = verifyDownload(file, req) - } - } - - status := Status{ - File: file, - Err: err, - } - select { - case dl.done <- status: - // no-op - case <-dl.tomb.Dying(): - cleanTempFile(file) - } -} - -func verifyDownload(file *os.File, req Request) error { - err := req.Verify(file) - if err != nil { - if errors.IsNotValid(err) { - logger.Errorf("download of %s invalid: %v", req.URL, err) + err = verifyDownload(filename, req) + if err != nil { + os.Remove(filename) + filename = "" } - return errors.Trace(err) } - logger.Infof("download verified (%q)", req.URL) - if _, err := file.Seek(0, os.SEEK_SET); err != nil { - logger.Errorf("failed to seek to beginning of file: %v", err) - return errors.Trace(err) + // No select needed here because the channel has a size of 1 and + // will only be written to once. + dl.done <- Status{ + Filename: filename, + Err: err, } - return nil } -func download(req Request, openBlob func(*url.URL) (io.ReadCloser, error)) (file *os.File, err error) { +func (dl *Download) download(req Request) (filename string, err error) { logger.Infof("downloading from %s", req.URL) dir := req.TargetDir @@ -154,37 +116,61 @@ } tempFile, err := ioutil.TempFile(dir, "inprogress-") if err != nil { - return nil, errors.Trace(err) + return "", errors.Trace(err) } defer func() { + tempFile.Close() if err != nil { - cleanTempFile(tempFile) + os.Remove(tempFile.Name()) } }() - reader, err := openBlob(req.URL) + blobReader, err := dl.openBlob(req.URL) if err != nil { - return nil, errors.Trace(err) + return "", errors.Trace(err) } - defer reader.Close() + defer blobReader.Close() + reader := &abortableReader{blobReader, req.Abort} _, err = io.Copy(tempFile, reader) if err != nil { - return nil, errors.Trace(err) + return "", errors.Trace(err) } - if _, err := tempFile.Seek(0, 0); err != nil { - return nil, errors.Trace(err) + + return tempFile.Name(), nil +} + +// abortableReader wraps a Reader, returning an error from Read calls +// if the abort channel provided is closed. +type abortableReader struct { + r io.Reader + abort <-chan struct{} +} + +// Read implements io.Reader. +func (ar *abortableReader) Read(p []byte) (int, error) { + select { + case <-ar.abort: + return 0, errors.New("download aborted") + default: } - return tempFile, nil + return ar.r.Read(p) } -func cleanTempFile(f *os.File) { - if f == nil { - return +func verifyDownload(filename string, req Request) error { + if req.Verify == nil { + return nil } - f.Close() - if err := os.Remove(f.Name()); err != nil { - logger.Errorf("cannot remove temp file %q: %v", f.Name(), err) + file, err := os.Open(filename) + if err != nil { + return errors.Annotate(err, "opening for verify") + } + defer file.Close() + + if err := req.Verify(file); err != nil { + return errors.Trace(err) } + logger.Infof("download verified (%q)", req.URL) + return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/downloader/download_test.go juju-core-2.0.0/src/github.com/juju/juju/downloader/download_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/downloader/download_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/downloader/download_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,6 @@ "net/url" "os" "path/filepath" - "time" "github.com/juju/errors" gitjujutesting "github.com/juju/testing" @@ -65,13 +64,11 @@ downloader.NewHTTPBlobOpener(hostnameVerification), ) status := <-d.Done() - defer status.File.Close() c.Assert(status.Err, gc.IsNil) - c.Assert(status.File, gc.NotNil) - dir, _ := filepath.Split(status.File.Name()) + dir, _ := filepath.Split(status.Filename) c.Assert(filepath.Clean(dir), gc.Equals, tmp) - assertFileContents(c, status.File, "archive") + assertFileContents(c, status.Filename, "archive") } func (s *DownloadSuite) TestDownloadWithoutDisablingSSLHostnameVerification(c *gc.C) { @@ -84,36 +81,18 @@ func (s *DownloadSuite) TestDownloadError(c *gc.C) { gitjujutesting.Server.Response(404, nil, nil) - d := downloader.StartDownload( - downloader.Request{ - URL: s.URL(c, "/archive.tgz"), - TargetDir: c.MkDir(), - }, - downloader.NewHTTPBlobOpener(utils.VerifySSLHostnames), - ) - status := <-d.Done() - c.Assert(status.File, gc.IsNil) - c.Assert(status.Err, gc.ErrorMatches, `cannot download ".*": bad http response: 404 Not Found`) -} - -func (s *DownloadSuite) TestStop(c *gc.C) { tmp := c.MkDir() d := downloader.StartDownload( downloader.Request{ - URL: s.URL(c, "/x.tgz"), + URL: s.URL(c, "/archive.tgz"), TargetDir: tmp, }, downloader.NewHTTPBlobOpener(utils.VerifySSLHostnames), ) - d.Stop() - select { - case status := <-d.Done(): - c.Fatalf("received status %#v after stop", status) - case <-time.After(testing.ShortWait): - } - infos, err := ioutil.ReadDir(tmp) - c.Assert(err, jc.ErrorIsNil) - c.Assert(infos, gc.HasLen, 0) + filename, err := d.Wait() + c.Assert(filename, gc.Equals, "") + c.Assert(err, gc.ErrorMatches, `bad http response: 404 Not Found`) + checkDirEmpty(c, tmp) } func (s *DownloadSuite) TestVerifyValid(c *gc.C) { @@ -131,11 +110,10 @@ }, downloader.NewHTTPBlobOpener(utils.VerifySSLHostnames), ) - status := <-dl.Done() - c.Assert(status.Err, jc.ErrorIsNil) - + filename, err := dl.Wait() + c.Assert(err, jc.ErrorIsNil) + c.Check(filename, gc.Not(gc.Equals), "") stub.CheckCallNames(c, "Verify") - stub.CheckCall(c, 0, "Verify", status.File) } func (s *DownloadSuite) TestVerifyInvalid(c *gc.C) { @@ -154,19 +132,40 @@ }, downloader.NewHTTPBlobOpener(utils.VerifySSLHostnames), ) - status := <-dl.Done() - - c.Check(errors.Cause(status.Err), gc.Equals, invalid) + filename, err := dl.Wait() + c.Check(filename, gc.Equals, "") + c.Check(errors.Cause(err), gc.Equals, invalid) stub.CheckCallNames(c, "Verify") - stub.CheckCall(c, 0, "Verify", status.File) + checkDirEmpty(c, tmp) +} + +func (s *DownloadSuite) TestAbort(c *gc.C) { + tmp := c.MkDir() + gitjujutesting.Server.Response(200, nil, []byte("archive")) + abort := make(chan struct{}) + close(abort) + dl := downloader.StartDownload( + downloader.Request{ + URL: s.URL(c, "/archive.tgz"), + TargetDir: tmp, + Abort: abort, + }, + downloader.NewHTTPBlobOpener(utils.VerifySSLHostnames), + ) + filename, err := dl.Wait() + c.Check(filename, gc.Equals, "") + c.Check(err, gc.ErrorMatches, "download aborted") + checkDirEmpty(c, tmp) +} + +func assertFileContents(c *gc.C, filename, expect string) { + got, err := ioutil.ReadFile(filename) + c.Assert(err, jc.ErrorIsNil) + c.Check(string(got), gc.Equals, expect) } -func assertFileContents(c *gc.C, f *os.File, expect string) { - got, err := ioutil.ReadAll(f) +func checkDirEmpty(c *gc.C, dir string) { + files, err := ioutil.ReadDir(dir) c.Assert(err, jc.ErrorIsNil) - if !c.Check(string(got), gc.Equals, expect) { - info, err := f.Stat() - c.Assert(err, jc.ErrorIsNil) - c.Logf("info %#v", info) - } + c.Check(files, gc.HasLen, 0) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/bootstrap.go juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/bootstrap.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/bootstrap.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/bootstrap.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,7 +17,9 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils" + "github.com/juju/utils/arch" "github.com/juju/utils/series" + "github.com/juju/utils/set" "github.com/juju/utils/ssh" "github.com/juju/version" "gopkg.in/juju/names.v2" @@ -41,8 +43,8 @@ jujuversion "github.com/juju/juju/version" ) -const noToolsMessage = `Juju cannot bootstrap because no tools are available for your model. -You may want to use the 'agent-metadata-url' configuration setting to specify the tools location. +const noToolsMessage = `Juju cannot bootstrap because no agent binaries are available for your model. +You may want to use the 'agent-metadata-url' configuration setting to specify the binaries' location. ` var ( @@ -96,6 +98,11 @@ // across all models in the same controller. ControllerInheritedConfig map[string]interface{} + // RegionInheritedConfig holds region specific configuration attributes to + // be shared across all models in the same controller on a particular + // cloud. + RegionInheritedConfig cloud.RegionConfig + // HostedModelConfig is the set of config attributes to be overlaid // on the controller config to construct the initial hosted model // config. @@ -105,15 +112,15 @@ // directive used to choose the initial instance. Placement string - // UploadTools reports whether we should upload the local tools and - // override the environment's specified agent-version. It is an error - // to specify UploadTools with a nil BuildToolsTarball. - UploadTools bool + // BuildAgent reports whether we should build and upload the local agent + // binary and override the environment's specified agent-version. + // It is an error to specify BuildAgent with a nil BuildAgentTarball. + BuildAgent bool - // BuildToolsTarball, if non-nil, is a function that may be used to + // BuildAgentTarball, if non-nil, is a function that may be used to // build tools to upload. If this is nil, tools uploading will never // take place. - BuildToolsTarball sync.BuildToolsTarballFunc + BuildAgentTarball sync.BuildAgentTarballFunc // MetadataDir is an optional path to a local directory containing // tools and/or image metadata. @@ -174,63 +181,144 @@ return errors.Errorf("model configuration has no authorized-keys") } + _, supportsNetworking := environs.SupportsNetworking(environ) + logger.Debugf("model %q supports service/machine networks: %v", cfg.Name(), supportsNetworking) + disableNetworkManagement, _ := cfg.DisableNetworkManagement() + logger.Debugf("network management by juju enabled: %v", !disableNetworkManagement) + // Set default tools metadata source, add image metadata source, // then verify constraints. Providers may rely on image metadata // for constraint validation. var customImageMetadata []*imagemetadata.ImageMetadata if args.MetadataDir != "" { var err error - customImageMetadata, err = setPrivateMetadataSources(environ, args.MetadataDir) + customImageMetadata, err = setPrivateMetadataSources(args.MetadataDir) if err != nil { return err } } - if err := validateConstraints(environ, args.ModelConstraints); err != nil { - return err + + var bootstrapSeries *string + if args.BootstrapSeries != "" { + bootstrapSeries = &args.BootstrapSeries } - if err := validateConstraints(environ, args.BootstrapConstraints); err != nil { - return err + + var bootstrapArchForImageSearch string + if args.BootstrapConstraints.Arch != nil { + bootstrapArchForImageSearch = *args.BootstrapConstraints.Arch + } else if args.ModelConstraints.Arch != nil { + bootstrapArchForImageSearch = *args.ModelConstraints.Arch + } else { + bootstrapArchForImageSearch = arch.HostArch() + // We no longer support i386. + if bootstrapArchForImageSearch == arch.I386 { + bootstrapArchForImageSearch = arch.AMD64 + } + } + + ctx.Verbosef("Loading image metadata") + imageMetadata, err := bootstrapImageMetadata(environ, + bootstrapSeries, + bootstrapArchForImageSearch, + args.BootstrapImage, + &customImageMetadata, + ) + if err != nil { + return errors.Trace(err) + } + + // We want to determine a list of valid architectures for which to pick tools and images. + // This includes architectures from custom and other available image metadata. + architectures := set.NewStrings() + if len(customImageMetadata) > 0 { + for _, customMetadata := range customImageMetadata { + architectures.Add(customMetadata.Arch) + } + } + if len(imageMetadata) > 0 { + for _, iMetadata := range imageMetadata { + architectures.Add(iMetadata.Arch) + } } constraintsValidator, err := environ.ConstraintsValidator() if err != nil { return err } + constraintsValidator.UpdateVocabulary(constraints.Arch, architectures.SortedValues()) + bootstrapConstraints, err := constraintsValidator.Merge( args.ModelConstraints, args.BootstrapConstraints, ) if err != nil { - return err + return errors.Trace(err) } - _, supportsNetworking := environs.SupportsNetworking(environ) - - var bootstrapSeries *string - if args.BootstrapSeries != "" { - bootstrapSeries = &args.BootstrapSeries + // The arch we use to find tools isn't the boostrapConstraints arch. + // We copy the constraints arch to a separate variable and + // update it from the host arch if not specified. + // (axw) This is still not quite right: + // For e.g. if there is a MAAS with only ARM64 machines, + // on an AMD64 client, we're going to look for only AMD64 tools, + // limiting what the provider can bootstrap anyway. + var bootstrapArch string + if bootstrapConstraints.Arch != nil { + bootstrapArch = *bootstrapConstraints.Arch + } else { + // If no arch is specified as a constraint, we'll bootstrap + // on the same arch as the client used to bootstrap. + bootstrapArch = arch.HostArch() + // We no longer support controllers on i386. + // If we are bootstrapping from an i386 client, + // we'll look for amd64 tools. + if bootstrapArch == arch.I386 { + bootstrapArch = arch.AMD64 + } } - ctx.Infof("Bootstrapping model %q", cfg.Name()) - logger.Debugf("model %q supports service/machine networks: %v", cfg.Name(), supportsNetworking) - disableNetworkManagement, _ := cfg.DisableNetworkManagement() - logger.Debugf("network management by juju enabled: %v", !disableNetworkManagement) - availableTools, err := findAvailableTools( - environ, args.AgentVersion, bootstrapConstraints.Arch, - bootstrapSeries, args.UploadTools, args.BuildToolsTarball != nil, - ) - if errors.IsNotFound(err) { - return errors.New(noToolsMessage) - } else if err != nil { - return err + var availableTools coretools.List + if !args.BuildAgent { + ctx.Infof("Looking for packaged Juju agent version %s for %s", args.AgentVersion, bootstrapArch) + availableTools, err = findPackagedTools(environ, args.AgentVersion, &bootstrapArch, bootstrapSeries) + if err != nil && !errors.IsNotFound(err) { + return err + } } - - imageMetadata, err := bootstrapImageMetadata( - environ, availableTools, - args.BootstrapImage, - &customImageMetadata, - ) - if err != nil { - return errors.Trace(err) + // If there are no prepackaged tools and a specific version has not been + // requested, look for or build a local binary. + var builtTools *sync.BuiltAgent + if len(availableTools) == 0 && (args.AgentVersion == nil || isCompatibleVersion(*args.AgentVersion, jujuversion.Current)) { + if args.BuildAgentTarball == nil { + return errors.New("cannot build agent binary to upload") + } + if err := validateUploadAllowed(environ, &bootstrapArch, bootstrapSeries, constraintsValidator); err != nil { + return err + } + if args.BuildAgent { + ctx.Infof("Building local Juju agent binary version %s for %s", args.AgentVersion, bootstrapArch) + } else { + ctx.Infof("No packaged binary found, preparing local Juju agent binary") + } + var forceVersion version.Number + availableTools, forceVersion = locallyBuildableTools(bootstrapSeries) + builtTools, err = args.BuildAgentTarball(args.BuildAgent, &forceVersion, cfg.AgentStream()) + if err != nil { + return errors.Annotate(err, "cannot package bootstrap agent binary") + } + defer os.RemoveAll(builtTools.Dir) + for i, tool := range availableTools { + if tool.URL != "" { + continue + } + filename := filepath.Join(builtTools.Dir, builtTools.StorageName) + tool.URL = fmt.Sprintf("file://%s", filename) + tool.Size = builtTools.Size + tool.SHA256 = builtTools.Sha256Hash + availableTools[i] = tool + } + } + if len(availableTools) == 0 { + return errors.New(noToolsMessage) } // If we're uploading, we must override agent-version; @@ -251,12 +339,14 @@ return err } - ctx.Infof("Starting new instance for initial controller") + ctx.Verbosef("Starting new instance for initial controller") result, err := environ.Bootstrap(ctx, environs.BootstrapParams{ + CloudName: args.CloudName, + CloudRegion: args.CloudRegion, ControllerConfig: args.ControllerConfig, ModelConstraints: args.ModelConstraints, - BootstrapConstraints: args.BootstrapConstraints, + BootstrapConstraints: bootstrapConstraints, BootstrapSeries: args.BootstrapSeries, Placement: args.Placement, AvailableTools: availableTools, @@ -273,44 +363,26 @@ if err != nil { return err } - selectedToolsList, err := setBootstrapTools(environ, matchingTools) + selectedToolsList, err := getBootstrapToolsVersion(matchingTools) if err != nil { return err } - havePrepackaged := false - for i, selectedTools := range selectedToolsList { - if selectedTools.URL != "" { - havePrepackaged = true - continue - } - ctx.Infof("Building tools to upload (%s)", selectedTools.Version) - builtTools, err := args.BuildToolsTarball(&selectedTools.Version.Number, cfg.AgentStream()) - if err != nil { - return errors.Annotate(err, "cannot upload bootstrap tools") - } - defer os.RemoveAll(builtTools.Dir) - filename := filepath.Join(builtTools.Dir, builtTools.StorageName) - selectedTools.URL = fmt.Sprintf("file://%s", filename) - selectedTools.Size = builtTools.Size - selectedTools.SHA256 = builtTools.Sha256Hash - selectedToolsList[i] = selectedTools - } - if !havePrepackaged && !args.UploadTools { - // There are no prepackaged agents, so we must upload - // even though the user didn't ask for it. We only do - // this when the image-stream is not "released" and - // the agent version hasn't been specified. - logger.Infof("no prepackaged tools available") + // We set agent-version to the newest version, so the agent will immediately upgrade itself. + // Note that this only is relevant if a specific agent version has not been requested, since + // in that case the specific version will be the only version available. + newestVersion, _ := matchingTools.Newest() + if err := setBootstrapToolsVersion(environ, newestVersion); err != nil { + return err } - ctx.Infof("Installing Juju agent on bootstrap instance") + logger.Infof("Installing Juju agent on bootstrap instance") publicKey, err := userPublicSigningKey() if err != nil { return err } instanceConfig, err := instancecfg.NewBootstrapInstanceConfig( args.ControllerConfig, - args.BootstrapConstraints, + bootstrapConstraints, args.ModelConstraints, result.Series, publicKey, @@ -330,7 +402,7 @@ if err := result.Finalize(ctx, instanceConfig, args.DialOpts); err != nil { return err } - ctx.Infof("Bootstrap agent installed") + ctx.Infof("Bootstrap agent now started") return nil } @@ -387,6 +459,7 @@ icfg.Bootstrap.ControllerCloudCredentialName = args.CloudCredentialName icfg.Bootstrap.ControllerConfig = args.ControllerConfig icfg.Bootstrap.ControllerInheritedConfig = args.ControllerInheritedConfig + icfg.Bootstrap.RegionInheritedConfig = args.Cloud.RegionConfig icfg.Bootstrap.HostedModelConfig = args.HostedModelConfig icfg.Bootstrap.Timeout = args.DialOpts.Timeout icfg.Bootstrap.GUI = guiArchive(args.GUIDataSourceBaseURL, func(msg string) { @@ -422,7 +495,8 @@ // state database will have the synthesised image metadata added to it. func bootstrapImageMetadata( environ environs.Environ, - availableTools coretools.List, + bootstrapSeries *string, + bootstrapArch string, bootstrapImageId string, customImageMetadata *[]*imagemetadata.ImageMetadata, ) ([]*imagemetadata.ImageMetadata, error) { @@ -446,15 +520,10 @@ } if bootstrapImageId != "" { - arches := availableTools.Arches() - if len(arches) != 1 { - return nil, errors.NotValidf("multiple architectures with bootstrap image") - } - allSeries := availableTools.AllSeries() - if len(allSeries) != 1 { - return nil, errors.NotValidf("multiple series with bootstrap image") + if bootstrapSeries == nil { + return nil, errors.NotValidf("no series specified with bootstrap image") } - seriesVersion, err := series.SeriesVersion(allSeries[0]) + seriesVersion, err := series.SeriesVersion(*bootstrapSeries) if err != nil { return nil, errors.Trace(err) } @@ -463,7 +532,7 @@ // filter on those properties should allow for empty values. meta := &imagemetadata.ImageMetadata{ Id: bootstrapImageId, - Arch: arches[0], + Arch: bootstrapArch, Version: seriesVersion, RegionName: region.Region, Endpoint: region.Endpoint, @@ -480,10 +549,9 @@ if err != nil { return nil, errors.Trace(err) } + // This constraint will search image metadata for all supported architectures and series. imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{ CloudSpec: region, - Series: availableTools.AllSeries(), - Arches: availableTools.Arches(), Stream: environ.Config().ImageStream(), }) logger.Debugf("constraints for image metadata lookup %v", imageConstraint) @@ -509,31 +577,17 @@ return publicImageMetadata, nil } -// setBootstrapTools returns the newest tools from the given tools list, -// and updates the agent-version configuration attribute. -func setBootstrapTools(environ environs.Environ, possibleTools coretools.List) (coretools.List, error) { +// getBootstrapToolsVersion returns the newest tools from the given tools list. +func getBootstrapToolsVersion(possibleTools coretools.List) (coretools.List, error) { if len(possibleTools) == 0 { - return nil, fmt.Errorf("no bootstrap tools available") + return nil, errors.New("no bootstrap tools available") } var newVersion version.Number newVersion, toolsList := possibleTools.Newest() logger.Infof("newest version: %s", newVersion) - cfg := environ.Config() - if agentVersion, _ := cfg.AgentVersion(); agentVersion != newVersion { - cfg, err := cfg.Apply(map[string]interface{}{ - "agent-version": newVersion.String(), - }) - if err == nil { - err = environ.SetConfig(cfg) - } - if err != nil { - return nil, fmt.Errorf("failed to update model configuration: %v", err) - } - } bootstrapVersion := newVersion // We should only ever bootstrap the exact same version as the client, - // or we risk bootstrap incompatibility. We still set agent-version to - // the newest version, so the agent will immediately upgrade itself. + // or we risk bootstrap incompatibility. if !isCompatibleVersion(newVersion, jujuversion.Current) { compatibleVersion, compatibleTools := findCompatibleTools(possibleTools, jujuversion.Current) if len(compatibleTools) == 0 { @@ -549,6 +603,23 @@ return toolsList, nil } +// setBootstrapToolsVersion updates the agent-version configuration attribute. +func setBootstrapToolsVersion(environ environs.Environ, toolsVersion version.Number) error { + cfg := environ.Config() + if agentVersion, _ := cfg.AgentVersion(); agentVersion != toolsVersion { + cfg, err := cfg.Apply(map[string]interface{}{ + "agent-version": toolsVersion.String(), + }) + if err == nil { + err = environ.SetConfig(cfg) + } + if err != nil { + return errors.Errorf("failed to update model configuration: %v", err) + } + } + return nil +} + // findCompatibleTools finds tools in the list that have the same major, minor // and patch level as jujuversion.Current. // @@ -573,7 +644,7 @@ // setPrivateMetadataSources sets the default tools metadata source // for tools syncing, and adds an image metadata source after verifying // the contents. -func setPrivateMetadataSources(env environs.Environ, metadataDir string) ([]*imagemetadata.ImageMetadata, error) { +func setPrivateMetadataSources(metadataDir string) ([]*imagemetadata.ImageMetadata, error) { logger.Infof("Setting default tools and image metadata sources: %s", metadataDir) tools.DefaultBaseURL = metadataDir @@ -604,15 +675,6 @@ return existingMetadata, nil } -func validateConstraints(env environs.Environ, cons constraints.Value) error { - validator, err := env.ConstraintsValidator() - if err != nil { - return errors.Trace(err) - } - unsupported, err := validator.Validate(cons) - return errors.Annotatef(err, "unsupported constraints: %v", unsupported) -} - // guiArchive returns information on the GUI archive that will be uploaded // to the controller. Possible errors in retrieving the GUI archive information // do not prevent the model to be bootstrapped. If dataSourceBaseURL is @@ -633,7 +695,7 @@ logProgress(fmt.Sprintf("Cannot use Juju GUI at %q: %s", path, err)) return nil } - logProgress(fmt.Sprintf("Preparing for Juju GUI %s installation from local archive", vers)) + logProgress(fmt.Sprintf("Fetching Juju GUI %s from local archive", vers)) return &coretools.GUIArchive{ Version: vers, URL: "file://" + filepath.ToSlash(path), @@ -658,7 +720,7 @@ return nil } // Metadata info are returned in descending version order. - logProgress(fmt.Sprintf("Preparing for Juju GUI %s release installation", allMeta[0].Version)) + logProgress(fmt.Sprintf("Fetching Juju GUI %s", allMeta[0].Version)) return &coretools.GUIArchive{ Version: allMeta[0].Version, URL: allMeta[0].FullPath, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/bootstrap_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/bootstrap_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/bootstrap_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/bootstrap_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,6 +18,7 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" "github.com/juju/utils/arch" + jujuos "github.com/juju/utils/os" "github.com/juju/utils/series" "github.com/juju/version" gc "gopkg.in/check.v1" @@ -136,8 +137,8 @@ func (s *bootstrapSuite) TestBootstrapSpecifiedConstraints(c *gc.C) { env := newEnviron("foo", useDefaultKeys, nil) s.setDummyStorage(c, env) - bootstrapCons := constraints.MustParse("cpu-cores=3 mem=7G") - modelCons := constraints.MustParse("cpu-cores=2 mem=4G") + bootstrapCons := constraints.MustParse("cores=3 mem=7G") + modelCons := constraints.MustParse("cores=2 mem=4G") err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ ControllerConfig: coretesting.FakeControllerConfig(), AdminSecret: "admin-secret", @@ -231,6 +232,129 @@ c.Assert(env.instanceConfig.Bootstrap.BootstrapMachineConstraints, jc.DeepEquals, bootstrapCons) } +func (s *bootstrapSuite) TestBootstrapAddsArchFromImageToExistingProviderSupportedArches(c *gc.C) { + data := s.setupImageMetadata(c) + env := s.setupProviderWithSomeSupportedArches(c) + // Even though test provider does not explicitly support architecture used by this test, + // the fact that we have an image for it, adds this architecture to those supported by provider. + // Bootstrap should succeed with no failures as constraints validator used internally + // would have both provider supported architectures and architectures retrieved from images metadata. + bootstrapCons := constraints.MustParse(fmt.Sprintf("arch=%v", data.architecture)) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + ControllerConfig: coretesting.FakeControllerConfig(), + AdminSecret: "admin-secret", + CAPrivateKey: coretesting.CAKey, + BootstrapImage: "img-id", + BootstrapSeries: "precise", + BootstrapConstraints: bootstrapCons, + MetadataDir: data.metadataDir, + }) + c.Assert(err, jc.ErrorIsNil) + s.assertBootstrapImageMetadata(c, env.bootstrapEnviron, data, bootstrapCons) +} + +type testImageMetadata struct { + architecture string + metadataDir string + metadata []*imagemetadata.ImageMetadata +} + +// setupImageMetadata returns architecture for which metadata was setup +func (s *bootstrapSuite) setupImageMetadata(c *gc.C) testImageMetadata { + testArch := arch.S390X + s.PatchValue(&series.HostSeries, func() string { return "precise" }) + s.PatchValue(&arch.HostArch, func() string { return testArch }) + + metadataDir, metadata := createImageMetadataForArch(c, testArch) + stor, err := filestorage.NewFileStorageWriter(metadataDir) + c.Assert(err, jc.ErrorIsNil) + envtesting.UploadFakeTools(c, stor, "released", "released") + + return testImageMetadata{testArch, metadataDir, metadata} +} + +func (s *bootstrapSuite) assertBootstrapImageMetadata(c *gc.C, env *bootstrapEnviron, testData testImageMetadata, bootstrapCons constraints.Value) { + c.Assert(env.bootstrapCount, gc.Equals, 1) + c.Assert(env.args.ImageMetadata, gc.HasLen, 1) + c.Assert(env.args.ImageMetadata[0], jc.DeepEquals, &imagemetadata.ImageMetadata{ + Id: "img-id", + Arch: testData.architecture, + Version: "12.04", + RegionName: "nether", + Endpoint: "hearnoretheir", + Stream: "released", + }) + c.Assert(env.instanceConfig.Bootstrap.CustomImageMetadata, gc.HasLen, 2) + c.Assert(env.instanceConfig.Bootstrap.CustomImageMetadata[0], jc.DeepEquals, testData.metadata[0]) + c.Assert(env.instanceConfig.Bootstrap.CustomImageMetadata[1], jc.DeepEquals, env.args.ImageMetadata[0]) + c.Assert(env.instanceConfig.Bootstrap.BootstrapMachineConstraints, jc.DeepEquals, bootstrapCons) + +} +func (s *bootstrapSuite) setupProviderWithSomeSupportedArches(c *gc.C) bootstrapEnvironWithRegion { + env := bootstrapEnvironWithRegion{ + newEnviron("foo", useDefaultKeys, nil), + simplestreams.CloudSpec{ + Region: "nether", + Endpoint: "hearnoretheir", + }, + } + s.setDummyStorage(c, env.bootstrapEnviron) + + // test provider constraints only has amd64 and arm64 as supported architectures + consBefore, err := env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + desiredArch := constraints.MustParse("arch=i386") + unsupported, err := consBefore.Validate(desiredArch) + c.Assert(err.Error(), jc.Contains, `invalid constraint value: arch=i386`) + c.Assert(unsupported, gc.HasLen, 0) + + return env +} + +func (s *bootstrapSuite) TestBootstrapAddsArchFromImageToProviderWithNoSupportedArches(c *gc.C) { + data := s.setupImageMetadata(c) + env := s.setupProviderWithNoSupportedArches(c) + // Even though test provider does not explicitly support architecture used by this test, + // the fact that we have an image for it, adds this architecture to those supported by provider. + // Bootstrap should succeed with no failures as constraints validator used internally + // would have both provider supported architectures and architectures retrieved from images metadata. + bootstrapCons := constraints.MustParse(fmt.Sprintf("arch=%v", data.architecture)) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + ControllerConfig: coretesting.FakeControllerConfig(), + AdminSecret: "admin-secret", + CAPrivateKey: coretesting.CAKey, + BootstrapImage: "img-id", + BootstrapSeries: "precise", + BootstrapConstraints: bootstrapCons, + MetadataDir: data.metadataDir, + }) + c.Assert(err, jc.ErrorIsNil) + s.assertBootstrapImageMetadata(c, env.bootstrapEnviron, data, bootstrapCons) +} + +func (s *bootstrapSuite) setupProviderWithNoSupportedArches(c *gc.C) bootstrapEnvironNoExplicitArchitectures { + env := bootstrapEnvironNoExplicitArchitectures{ + &bootstrapEnvironWithRegion{ + newEnviron("foo", useDefaultKeys, nil), + simplestreams.CloudSpec{ + Region: "nether", + Endpoint: "hearnoretheir", + }, + }, + } + s.setDummyStorage(c, env.bootstrapEnviron) + + consBefore, err := env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + // test provider constraints only has amd64 and arm64 as supported architectures + desiredArch := constraints.MustParse("arch=i386") + unsupported, err := consBefore.Validate(desiredArch) + c.Assert(err, jc.ErrorIsNil) + c.Assert(unsupported, gc.HasLen, 0) + + return env +} + // TestBootstrapImageMetadataFromAllSources tests that we are looking for // image metadata in all data sources available to environment. // Abandoning look up too soon led to misleading bootstrap failures: @@ -276,7 +400,94 @@ } } -func (s *bootstrapSuite) TestBootstrapUploadTools(c *gc.C) { +func (s *bootstrapSuite) TestBootstrapLocalTools(c *gc.C) { + if runtime.GOOS == "windows" { + c.Skip("issue 1403084: Currently does not work because of jujud problems") + } + + // Client host is CentOS, wanting to bootstrap a CentOS + // controller. This is fine. + + s.PatchValue(&jujuos.HostOS, func() jujuos.OSType { return jujuos.CentOS }) + s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 }) + s.PatchValue(bootstrap.FindTools, func(environs.Environ, int, int, string, tools.Filter) (tools.List, error) { + return nil, errors.NotFoundf("tools") + }) + env := newEnviron("foo", useDefaultKeys, nil) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + AdminSecret: "admin-secret", + CAPrivateKey: coretesting.CAKey, + ControllerConfig: coretesting.FakeControllerConfig(), + BuildAgentTarball: func(bool, *version.Number, string) (*sync.BuiltAgent, error) { + return &sync.BuiltAgent{Dir: c.MkDir()}, nil + }, + BootstrapSeries: "centos7", + }) + c.Assert(err, jc.ErrorIsNil) + + c.Check(env.bootstrapCount, gc.Equals, 1) + c.Check(env.args.BootstrapSeries, gc.Equals, "centos7") + c.Check(env.args.AvailableTools.AllSeries(), jc.SameContents, []string{"centos7"}) +} + +func (s *bootstrapSuite) TestBootstrapLocalToolsMismatchingOS(c *gc.C) { + if runtime.GOOS == "windows" { + c.Skip("issue 1403084: Currently does not work because of jujud problems") + } + + // Client host is a Windows system, wanting to bootstrap a trusty + // controller with local tools. This can't work. + + s.PatchValue(&jujuos.HostOS, func() jujuos.OSType { return jujuos.Windows }) + s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 }) + s.PatchValue(bootstrap.FindTools, func(environs.Environ, int, int, string, tools.Filter) (tools.List, error) { + return nil, errors.NotFoundf("tools") + }) + env := newEnviron("foo", useDefaultKeys, nil) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + AdminSecret: "admin-secret", + CAPrivateKey: coretesting.CAKey, + ControllerConfig: coretesting.FakeControllerConfig(), + BuildAgentTarball: func(bool, *version.Number, string) (*sync.BuiltAgent, error) { + return &sync.BuiltAgent{Dir: c.MkDir()}, nil + }, + BootstrapSeries: "trusty", + }) + c.Assert(err, gc.ErrorMatches, `cannot use agent built for "trusty" using a machine running "Windows"`) +} + +func (s *bootstrapSuite) TestBootstrapLocalToolsDifferentLinuxes(c *gc.C) { + if runtime.GOOS == "windows" { + c.Skip("issue 1403084: Currently does not work because of jujud problems") + } + + // Client host is some unspecified Linux system, wanting to + // bootstrap a trusty controller with local tools. This should be + // OK. + + s.PatchValue(&jujuos.HostOS, func() jujuos.OSType { return jujuos.GenericLinux }) + s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 }) + s.PatchValue(bootstrap.FindTools, func(environs.Environ, int, int, string, tools.Filter) (tools.List, error) { + return nil, errors.NotFoundf("tools") + }) + env := newEnviron("foo", useDefaultKeys, nil) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + AdminSecret: "admin-secret", + CAPrivateKey: coretesting.CAKey, + ControllerConfig: coretesting.FakeControllerConfig(), + BuildAgentTarball: func(bool, *version.Number, string) (*sync.BuiltAgent, error) { + return &sync.BuiltAgent{Dir: c.MkDir()}, nil + }, + BootstrapSeries: "trusty", + }) + c.Assert(err, jc.ErrorIsNil) + + c.Check(env.bootstrapCount, gc.Equals, 1) + c.Check(env.args.BootstrapSeries, gc.Equals, "trusty") + c.Check(env.args.AvailableTools.AllSeries(), jc.SameContents, []string{"trusty"}) +} + +func (s *bootstrapSuite) TestBootstrapBuildAgent(c *gc.C) { if runtime.GOOS == "windows" { c.Skip("issue 1403084: Currently does not work because of jujud problems") } @@ -285,18 +496,20 @@ // such as s390. s.PatchValue(&arch.HostArch, func() string { return arch.ARM64 }) s.PatchValue(bootstrap.FindTools, func(environs.Environ, int, int, string, tools.Filter) (tools.List, error) { + c.Fatal("should not call FindTools if BuildAgent is specified") return nil, errors.NotFoundf("tools") }) env := newEnviron("foo", useDefaultKeys, nil) err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ - UploadTools: true, + BuildAgent: true, AdminSecret: "admin-secret", CAPrivateKey: coretesting.CAKey, ControllerConfig: coretesting.FakeControllerConfig(), - BuildToolsTarball: func(ver *version.Number, _ string) (*sync.BuiltTools, error) { - c.Logf("BuildToolsTarball version %s", ver) - return &sync.BuiltTools{Dir: c.MkDir()}, nil + BuildAgentTarball: func(build bool, ver *version.Number, _ string) (*sync.BuiltAgent, error) { + c.Logf("BuildAgentTarball version %s", ver) + c.Assert(build, jc.IsTrue) + return &sync.BuiltAgent{Dir: c.MkDir()}, nil }, }) c.Assert(err, jc.ErrorIsNil) @@ -307,6 +520,53 @@ c.Check(agentVersion.String(), gc.Equals, "1.99.0.1") } +func (s *bootstrapSuite) assertBootstrapPackagedToolsAvailable(c *gc.C, clientArch string) { + // Patch out HostArch and FindTools to allow the test to pass on other architectures, + // such as s390. + s.PatchValue(&arch.HostArch, func() string { return clientArch }) + toolsArch := clientArch + if toolsArch == "i386" { + toolsArch = "amd64" + } + findToolsOk := false + s.PatchValue(bootstrap.FindTools, func(_ environs.Environ, _ int, _ int, _ string, filter tools.Filter) (tools.List, error) { + c.Assert(filter.Arch, gc.Equals, toolsArch) + c.Assert(filter.Series, gc.Equals, "quantal") + findToolsOk = true + vers := version.Binary{ + Number: jujuversion.Current, + Series: "quantal", + Arch: toolsArch, + } + return tools.List{{ + Version: vers, + }}, nil + }) + + env := newEnviron("foo", useDefaultKeys, nil) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + AdminSecret: "admin-secret", + CAPrivateKey: coretesting.CAKey, + ControllerConfig: coretesting.FakeControllerConfig(), + BootstrapSeries: "quantal", + BuildAgentTarball: func(bool, *version.Number, string) (*sync.BuiltAgent, error) { + c.Fatal("should not call BuildAgentTarball if there are packaged tools") + return nil, nil + }, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(findToolsOk, jc.IsTrue) +} + +func (s *bootstrapSuite) TestBootstrapPackagedTools(c *gc.C) { + if runtime.GOOS == "windows" { + c.Skip("issue 1403084: Currently does not work because of jujud problems") + } + for _, a := range arch.AllSupportedArches { + s.assertBootstrapPackagedToolsAvailable(c, a) + } +} + func (s *bootstrapSuite) TestBootstrapNoToolsNonReleaseStream(c *gc.C) { if runtime.GOOS == "windows" { c.Skip("issue 1403084: Currently does not work because of jujud problems") @@ -324,8 +584,8 @@ AdminSecret: "admin-secret", CAPrivateKey: coretesting.CAKey, ControllerConfig: coretesting.FakeControllerConfig(), - BuildToolsTarball: func(*version.Number, string) (*sync.BuiltTools, error) { - return &sync.BuiltTools{Dir: c.MkDir()}, nil + BuildAgentTarball: func(bool, *version.Number, string) (*sync.BuiltAgent, error) { + return &sync.BuiltAgent{Dir: c.MkDir()}, nil }, }) // bootstrap.Bootstrap leaves it to the provider to @@ -348,8 +608,8 @@ ControllerConfig: coretesting.FakeControllerConfig(), AdminSecret: "admin-secret", CAPrivateKey: coretesting.CAKey, - BuildToolsTarball: func(*version.Number, string) (*sync.BuiltTools, error) { - return &sync.BuiltTools{Dir: c.MkDir()}, nil + BuildAgentTarball: func(bool, *version.Number, string) (*sync.BuiltAgent, error) { + return &sync.BuiltAgent{Dir: c.MkDir()}, nil }, }) // bootstrap.Bootstrap leaves it to the provider to @@ -357,7 +617,7 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *bootstrapSuite) TestSetBootstrapTools(c *gc.C) { +func (s *bootstrapSuite) TestBootstrapToolsVersion(c *gc.C) { availableVersions := []version.Binary{ version.MustParseBinary("1.18.0-trusty-arm64"), version.MustParseBinary("1.18.1-trusty-arm64"), @@ -371,31 +631,26 @@ } type test struct { - currentVersion version.Number - expectedTools version.Number - expectedAgentVersion version.Number + currentVersion version.Number + expectedTools version.Number } tests := []test{{ - currentVersion: version.MustParse("1.18.0"), - expectedTools: version.MustParse("1.18.0"), - expectedAgentVersion: version.MustParse("1.18.1.3"), + currentVersion: version.MustParse("1.18.0"), + expectedTools: version.MustParse("1.18.0"), }, { - currentVersion: version.MustParse("1.18.1.4"), - expectedTools: version.MustParse("1.18.1.3"), - expectedAgentVersion: version.MustParse("1.18.1.3"), + currentVersion: version.MustParse("1.18.1.4"), + expectedTools: version.MustParse("1.18.1.3"), }, { // build number is ignored unless major/minor don't // match the latest. - currentVersion: version.MustParse("1.18.1.2"), - expectedTools: version.MustParse("1.18.1.3"), - expectedAgentVersion: version.MustParse("1.18.1.3"), + currentVersion: version.MustParse("1.18.1.2"), + expectedTools: version.MustParse("1.18.1.3"), }, { // If the current patch level exceeds whatever's in // the tools source (e.g. when bootstrapping from trunk) // then the latest available tools will be chosen. - currentVersion: version.MustParse("1.18.2"), - expectedTools: version.MustParse("1.18.1.3"), - expectedAgentVersion: version.MustParse("1.18.1.3"), + currentVersion: version.MustParse("1.18.2"), + expectedTools: version.MustParse("1.18.1.3"), }} env := newEnviron("foo", useDefaultKeys, nil) @@ -406,13 +661,11 @@ err = env.SetConfig(cfg) c.Assert(err, jc.ErrorIsNil) s.PatchValue(&jujuversion.Current, t.currentVersion) - bootstrapTools, err := bootstrap.SetBootstrapTools(env, availableTools) + tools, err := bootstrap.GetBootstrapToolsVersion(availableTools) c.Assert(err, jc.ErrorIsNil) - for _, tools := range bootstrapTools { - c.Assert(tools.Version.Number, gc.Equals, t.expectedTools) - } - agentVersion, _ := env.Config().AgentVersion() - c.Assert(agentVersion, gc.Equals, t.expectedAgentVersion) + c.Assert(tools, gc.Not(gc.HasLen), 0) + toolsVersion, _ := tools.Newest() + c.Assert(toolsVersion, gc.Equals, t.expectedTools) } } @@ -442,7 +695,7 @@ GUIDataSourceBaseURL: "https://1.2.3.4/gui/sources", }) c.Assert(err, jc.ErrorIsNil) - c.Assert(coretesting.Stderr(ctx), jc.Contains, "Preparing for Juju GUI 2.0.42 release installation\n") + c.Assert(coretesting.Stderr(ctx), jc.Contains, "Fetching Juju GUI 2.0.42\n") // The most recent GUI release info has been stored. c.Assert(env.instanceConfig.Bootstrap.GUI.URL, gc.Equals, "https://1.2.3.4/juju-gui-2.0.42.tar.bz2") @@ -462,7 +715,7 @@ CAPrivateKey: coretesting.CAKey, }) c.Assert(err, jc.ErrorIsNil) - c.Assert(coretesting.Stderr(ctx), jc.Contains, "Preparing for Juju GUI 2.2.0 installation from local archive\n") + c.Assert(coretesting.Stderr(ctx), jc.Contains, "Fetching Juju GUI 2.2.0 from local archive\n") // Check GUI URL and version. c.Assert(env.instanceConfig.Bootstrap.GUI.URL, gc.Equals, "file://"+path) @@ -602,10 +855,16 @@ // createImageMetadata creates some image metadata in a local directory. func createImageMetadata(c *gc.C) (dir string, _ []*imagemetadata.ImageMetadata) { + return createImageMetadataForArch(c, "amd64") +} + +// createImageMetadataForArch creates some image metadata in a local directory for +// specified arch. +func createImageMetadataForArch(c *gc.C, arch string) (dir string, _ []*imagemetadata.ImageMetadata) { // Generate some image metadata. im := []*imagemetadata.ImageMetadata{{ Id: "1234", - Arch: "amd64", + Arch: arch, Version: "13.04", RegionName: "region", Endpoint: "endpoint", @@ -710,10 +969,24 @@ password := "lisboan-pork" + cloudName := "dummy" + dummyCloud := cloud.Cloud{ + RegionConfig: cloud.RegionConfig{ + "a-region": cloud.Attrs{ + "a-key": "a-value", + }, + "b-region": cloud.Attrs{ + "b-key": "b-value", + }, + }, + } + env := newEnviron("foo", useDefaultKeys, nil) err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ ControllerConfig: coretesting.FakeControllerConfig(), ControllerInheritedConfig: map[string]interface{}{"ftp-proxy": "http://proxy"}, + CloudName: cloudName, + Cloud: dummyCloud, AdminSecret: password, CAPrivateKey: coretesting.CAKey, }) @@ -729,6 +1002,14 @@ Password: password, Info: mongo.Info{CACert: coretesting.CACert}, }) c.Check(icfg.Bootstrap.ControllerInheritedConfig, gc.DeepEquals, map[string]interface{}{"ftp-proxy": "http://proxy"}) + c.Check(icfg.Bootstrap.RegionInheritedConfig, jc.DeepEquals, cloud.RegionConfig{ + "a-region": cloud.Attrs{ + "a-key": "a-value", + }, + "b-region": cloud.Attrs{ + "b-key": "b-value", + }, + }) controllerCfg := icfg.Controller.Config c.Check(controllerCfg["ca-private-key"], gc.IsNil) c.Check(icfg.Bootstrap.StateServingInfo.StatePort, gc.Equals, controllerCfg.StatePort()) @@ -809,6 +1090,11 @@ AdminSecret: "admin-secret", CAPrivateKey: coretesting.CAKey, AgentVersion: toolsVersion, + BuildAgentTarball: func(build bool, ver *version.Number, _ string) (*sync.BuiltAgent, error) { + c.Logf("BuildAgentTarball version %s", ver) + c.Assert(build, jc.IsFalse) + return &sync.BuiltAgent{Dir: c.MkDir()}, nil + }, }) vers, _ := env.cfg.AgentVersion() return err, env.bootstrapCount, vers @@ -856,7 +1142,7 @@ // The bootstrap client major and minor versions need to match the tools asked for. toolsVersion := version.MustParse("10.11.12") err, bootstrapCount, _ := s.setupBootstrapSpecificVersion(c, 10, 1, &toolsVersion) - c.Assert(strings.Replace(err.Error(), "\n", "", -1), gc.Matches, ".* no tools are available .*") + c.Assert(strings.Replace(err.Error(), "\n", "", -1), gc.Matches, ".* no agent binaries are available .*") c.Assert(bootstrapCount, gc.Equals, 0) } @@ -865,10 +1151,34 @@ // The bootstrap client major and minor versions need to match the tools asked for. toolsVersion := version.MustParse("10.11.12") err, bootstrapCount, _ := s.setupBootstrapSpecificVersion(c, 1, 11, &toolsVersion) - c.Assert(strings.Replace(err.Error(), "\n", "", -1), gc.Matches, ".* no tools are available .*") + c.Assert(strings.Replace(err.Error(), "\n", "", -1), gc.Matches, ".* no agent binaries are available .*") c.Assert(bootstrapCount, gc.Equals, 0) } +func (s *bootstrapSuite) TestAvailableToolsInvalidArch(c *gc.C) { + s.PatchValue(&arch.HostArch, func() string { + return arch.S390X + }) + s.PatchValue(bootstrap.FindTools, func(environs.Environ, int, int, string, tools.Filter) (tools.List, error) { + c.Fatal("find packaged tools should not be called") + return nil, errors.New("unexpected") + }) + + env := newEnviron("foo", useDefaultKeys, nil) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + BuildAgent: true, + AdminSecret: "admin-secret", + CAPrivateKey: coretesting.CAKey, + ControllerConfig: coretesting.FakeControllerConfig(), + BuildAgentTarball: func(build bool, ver *version.Number, _ string) (*sync.BuiltAgent, error) { + c.Logf("BuildAgentTarball version %s", ver) + c.Assert(build, jc.IsTrue) + return &sync.BuiltAgent{Dir: c.MkDir()}, nil + }, + }) + c.Assert(err, gc.ErrorMatches, `model "foo" of type dummy does not support instances running on "s390x"`) +} + type bootstrapEnviron struct { cfg *config.Config environs.Environ // stub out all methods we don't care about. @@ -922,7 +1232,7 @@ if args.BootstrapSeries != "" { series = args.BootstrapSeries } - return &environs.BootstrapResult{Arch: arch.HostArch(), Series: series, Finalize: finalizer}, nil + return &environs.BootstrapResult{Arch: args.AvailableTools.Arches()[0], Series: series, Finalize: finalizer}, nil } func (e *bootstrapEnviron) Config() *config.Config { @@ -953,3 +1263,13 @@ func (e bootstrapEnvironWithRegion) Region() (simplestreams.CloudSpec, error) { return e.region, nil } + +type bootstrapEnvironNoExplicitArchitectures struct { + *bootstrapEnvironWithRegion +} + +func (e bootstrapEnvironNoExplicitArchitectures) ConstraintsValidator() (constraints.Validator, error) { + e.constraintsValidatorCount++ + v := constraints.NewValidator() + return v, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/config.go juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -123,7 +123,7 @@ // in well-defined locations: $JUJU_DATA/ca-cert.pem, and // $JUJU_DATA/ca-private-key.pem. If none of these are set, an // error is returned. -func NewConfig(controllerUUID string, attrs map[string]interface{}) (Config, error) { +func NewConfig(attrs map[string]interface{}) (Config, error) { coerced, err := configChecker.Coerce(attrs, nil) if err != nil { return Config{}, errors.Trace(err) @@ -191,9 +191,6 @@ // corresponding "-path" attribute is set, or otherwise from a default // path. func readFileAttr(attrs map[string]interface{}, key, defaultPath string) (content string, userSpecified bool, _ error) { - if !osenv.IsJujuXDGDataHomeSet() { - return "", false, errors.Errorf("$JUJU_DATA is not set, cannot read %q", key) - } path, ok := attrs[key+"-path"].(string) if ok { userSpecified = true diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/config_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package bootstrap_test import ( + "io/ioutil" "time" gitjujutesting "github.com/juju/testing" @@ -11,6 +12,7 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/environs/bootstrap" + "github.com/juju/juju/juju/osenv" "github.com/juju/juju/testing" ) @@ -21,7 +23,7 @@ var _ = gc.Suite(&ConfigSuite{}) func (*ConfigSuite) TestDefaultConfig(c *gc.C) { - cfg, err := bootstrap.NewConfig(testing.ModelTag.Id(), nil) + cfg, err := bootstrap.NewConfig(nil) c.Assert(err, jc.ErrorIsNil) // These three things are generated. @@ -35,7 +37,7 @@ } func (*ConfigSuite) TestConfigValuesSpecified(c *gc.C) { - cfg, err := bootstrap.NewConfig(testing.ModelTag.Id(), map[string]interface{}{ + cfg, err := bootstrap.NewConfig(map[string]interface{}{ "admin-secret": "sekrit", "ca-cert": testing.CACert, "ca-private-key": testing.CAKey, @@ -55,13 +57,20 @@ }) } +func (s *ConfigSuite) addFiles(c *gc.C, files ...gitjujutesting.TestFile) { + for _, f := range files { + err := ioutil.WriteFile(osenv.JujuXDGDataHomePath(f.Name), []byte(f.Data), 0666) + c.Assert(err, gc.IsNil) + } +} + func (s *ConfigSuite) TestDefaultConfigReadsDefaultCACertKeyFiles(c *gc.C) { - s.Home.AddFiles(c, []gitjujutesting.TestFile{ - {".local/share/juju/ca-cert.pem", testing.CACert}, - {".local/share/juju/ca-private-key.pem", testing.CAKey}, + s.addFiles(c, []gitjujutesting.TestFile{ + {"ca-cert.pem", testing.CACert}, + {"ca-private-key.pem", testing.CAKey}, }...) - cfg, err := bootstrap.NewConfig(testing.ModelTag.Id(), nil) + cfg, err := bootstrap.NewConfig(nil) c.Assert(err, jc.ErrorIsNil) c.Assert(cfg.CACert, gc.Equals, testing.CACert) @@ -69,12 +78,12 @@ } func (s *ConfigSuite) TestConfigReadsCACertKeyFilesFromPaths(c *gc.C) { - s.Home.AddFiles(c, []gitjujutesting.TestFile{ - {".local/share/juju/ca-cert-2.pem", testing.OtherCACert}, - {".local/share/juju/ca-private-key-2.pem", testing.OtherCAKey}, + s.addFiles(c, []gitjujutesting.TestFile{ + {"ca-cert-2.pem", testing.OtherCACert}, + {"ca-private-key-2.pem", testing.OtherCAKey}, }...) - cfg, err := bootstrap.NewConfig(testing.ModelTag.Id(), map[string]interface{}{ + cfg, err := bootstrap.NewConfig(map[string]interface{}{ "ca-cert-path": "ca-cert-2.pem", "ca-private-key-path": "ca-private-key-2.pem", }) @@ -124,7 +133,7 @@ } func (*ConfigSuite) testConfigError(c *gc.C, attrs map[string]interface{}, expect string) { - _, err := bootstrap.NewConfig(testing.ModelTag.Id(), attrs) + _, err := bootstrap.NewConfig(attrs) c.Assert(err, gc.ErrorMatches, expect) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/export_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,10 +4,10 @@ package bootstrap var ( - ValidateUploadAllowed = validateUploadAllowed - SetBootstrapTools = setBootstrapTools - FindTools = &findTools - FindBootstrapTools = findBootstrapTools - FindAvailableTools = findAvailableTools - GUIFetchMetadata = &guiFetchMetadata + ValidateUploadAllowed = validateUploadAllowed + GetBootstrapToolsVersion = getBootstrapToolsVersion + FindTools = &findTools + FindBootstrapTools = findBootstrapTools + FindPackagedTools = findPackagedTools + GUIFetchMetadata = &guiFetchMetadata ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/prepare.go juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/prepare.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/prepare.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/prepare.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,7 @@ "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/jujuclient" + "github.com/juju/juju/permission" ) // ControllerModelName is the name of the admin model in each controller. @@ -117,7 +118,7 @@ names.NewUserTag(details.AccountDetails.User), modelName, ) - if err := store.UpdateController(controllerName, details.ControllerDetails); err != nil { + if err := store.AddController(controllerName, details.ControllerDetails); err != nil { return errors.Trace(err) } if err := store.UpdateBootstrapConfig(controllerName, details.BootstrapConfig); err != nil { @@ -147,9 +148,7 @@ return nil, details, errors.Trace(err) } - cfg, err = p.PrepareConfig(environs.PrepareConfigParams{ - args.ControllerConfig.ControllerUUID(), args.Cloud, cfg, - }) + cfg, err = p.PrepareConfig(environs.PrepareConfigParams{args.Cloud, cfg}) if err != nil { return nil, details, errors.Trace(err) } @@ -200,8 +199,10 @@ } details.CACert = caCert details.ControllerUUID = args.ControllerConfig.ControllerUUID() + details.ControllerModelUUID = args.ModelConfig[config.UUIDKey].(string) details.User = environs.AdminUser details.Password = args.AdminSecret + details.LastKnownAccess = string(permission.SuperuserAccess) details.ModelUUID = cfg.UUID() details.ControllerDetails.Cloud = args.Cloud.Name details.ControllerDetails.CloudRegion = args.Cloud.Region @@ -209,6 +210,7 @@ details.BootstrapConfig.Cloud = args.Cloud.Name details.BootstrapConfig.CloudRegion = args.Cloud.Region details.CloudEndpoint = args.Cloud.Endpoint + details.CloudIdentityEndpoint = args.Cloud.IdentityEndpoint details.CloudStorageEndpoint = args.Cloud.StorageEndpoint details.Credential = args.CredentialName diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/prepare_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/prepare_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/prepare_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/prepare_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -52,11 +52,11 @@ controllerStore := jujuclienttesting.NewMemStore() ctx := envtesting.BootstrapContext(c) controllerCfg := controller.Config{ - controller.ControllerUUIDKey: testing.ModelTag.Id(), + controller.ControllerUUIDKey: testing.ControllerTag.Id(), controller.CACertKey: testing.CACert, - controller.ApiPort: 17777, + controller.APIPort: 17777, controller.StatePort: 1234, - controller.SetNumaControlPolicyKey: true, + controller.SetNUMAControlPolicyKey: true, } _, err = bootstrap.Prepare(ctx, controllerStore, bootstrap.PrepareParams{ ControllerConfig: controllerCfg, @@ -70,7 +70,7 @@ // Check that controller was cached foundController, err := controllerStore.ControllerByName(cfg.Name()) c.Assert(err, jc.ErrorIsNil) - c.Assert(foundController.ControllerUUID, gc.DeepEquals, cfg.UUID()) + c.Assert(foundController.ControllerUUID, gc.DeepEquals, controllerCfg.ControllerUUID()) c.Assert(foundController.Cloud, gc.Equals, "dummy") // Check that bootstrap config was written @@ -78,9 +78,9 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(bootstrapCfg, jc.DeepEquals, &jujuclient.BootstrapConfig{ ControllerConfig: controller.Config{ - controller.ApiPort: 17777, + controller.APIPort: 17777, controller.StatePort: 1234, - controller.SetNumaControlPolicyKey: true, + controller.SetNUMAControlPolicyKey: true, }, Config: map[string]interface{}{ "default-series": "xenial", @@ -95,10 +95,13 @@ "development": false, "test-mode": true, }, - Cloud: "dummy", - CloudType: "dummy", - CloudEndpoint: "dummy-endpoint", - CloudStorageEndpoint: "dummy-storage-endpoint", + ControllerModelUUID: cfg.UUID(), + Cloud: "dummy", + CloudRegion: "dummy-region", + CloudType: "dummy", + CloudEndpoint: "dummy-endpoint", + CloudIdentityEndpoint: "dummy-identity-endpoint", + CloudStorageEndpoint: "dummy-storage-endpoint", }) // Check we cannot call Prepare again. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/tools.go juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/tools.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/tools.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/tools.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,6 @@ "github.com/juju/utils/arch" jujuos "github.com/juju/utils/os" "github.com/juju/utils/series" - "github.com/juju/utils/set" "github.com/juju/version" "github.com/juju/juju/constraints" @@ -26,13 +25,13 @@ // validateUploadAllowed returns an error if an attempt to upload tools should // not be allowed. -func validateUploadAllowed(env environs.Environ, toolsArch, toolsSeries *string) error { +func validateUploadAllowed(env environs.Environ, toolsArch, toolsSeries *string, validator constraints.Validator) error { // Now check that the architecture and series for which we are setting up an // environment matches that from which we are bootstrapping. hostArch := arch.HostArch() // We can't build tools for a different architecture if one is specified. if toolsArch != nil && *toolsArch != hostArch { - return fmt.Errorf("cannot build tools for %q using a machine running on %q", *toolsArch, hostArch) + return fmt.Errorf("cannot use agent built for %q using a machine running on %q", *toolsArch, hostArch) } hostOS := jujuos.HostOS() if toolsSeries != nil { @@ -40,17 +39,11 @@ if err != nil { return errors.Trace(err) } - if toolsSeriesOS != hostOS { - return errors.Errorf("cannot build tools for %q using a machine running %q", *toolsSeries, hostOS) + if !toolsSeriesOS.EquivalentTo(hostOS) { + return errors.Errorf("cannot use agent built for %q using a machine running %q", *toolsSeries, hostOS) } } // If no architecture is specified, ensure the target provider supports instances matching our architecture. - validator, err := env.ConstraintsValidator() - if err != nil { - return errors.Annotate(err, - "no packaged tools available and cannot determine model's supported architectures", - ) - } if _, err := validator.Validate(constraints.Value{Arch: &hostArch}); err != nil { return errors.Errorf( "model %q of type %s does not support instances running on %q", @@ -60,29 +53,13 @@ return nil } -// findAvailableTools returns a list of available tools, -// including tools that may be locally built and then -// uploaded. Tools that need to be built will have an -// empty URL. -func findAvailableTools( +// findPackagedTools returns a list of tools for in simplestreams. +func findPackagedTools( env environs.Environ, vers *version.Number, arch, series *string, - upload, canBuild bool, ) (coretools.List, error) { - if upload { - // We're forcing an upload: ensure we can do so. - if !canBuild { - return nil, errors.New("cannot build tools to upload") - } - if err := validateUploadAllowed(env, arch, series); err != nil { - return nil, err - } - return locallyBuildableTools(series), nil - } - - // We're not forcing an upload, so look for tools - // in the environment's simplestreams search paths + // Look for tools in the environment's simplestreams search paths // for existing tools. // If the user hasn't asked for a specified tools version, see if @@ -92,64 +69,36 @@ vers = &agentVersion } } - logger.Infof("looking for bootstrap tools: version=%v", vers) + logger.Infof("looking for bootstrap agent binaries: version=%v", vers) toolsList, findToolsErr := findBootstrapTools(env, vers, arch, series) - if findToolsErr != nil && !errors.IsNotFound(findToolsErr) { + logger.Infof("found %d packaged agent binaries", len(toolsList)) + if findToolsErr != nil { return nil, findToolsErr } - - preferredStream := envtools.PreferredStream(vers, env.Config().Development(), env.Config().AgentStream()) - if preferredStream == envtools.ReleasedStream || vers != nil || !canBuild { - // We are not running a development build, or agent-version - // was specified, or we cannot build any tools; the only tools - // available are the ones we've just found. - return toolsList, findToolsErr - } - // The tools located may not include the ones that the - // provider requires. We are running a development build, - // so augment the list of tools with those that we can build - // locally. - - // Collate the set of arch+series that are externally available - // so we can see if we need to build any locally. If we need - // to, only then do we validate that we can upload (which - // involves a potentially expensive ConstraintsValidator call). - archSeries := make(set.Strings) - for _, tools := range toolsList { - archSeries.Add(tools.Version.Arch + tools.Version.Series) - } - var localToolsList coretools.List - for _, tools := range locallyBuildableTools(series) { - if !archSeries.Contains(tools.Version.Arch + tools.Version.Series) { - localToolsList = append(localToolsList, tools) - } - } - if len(localToolsList) == 0 || validateUploadAllowed(env, arch, series) != nil { - return toolsList, findToolsErr - } - return append(toolsList, localToolsList...), nil + return toolsList, nil } // locallyBuildableTools returns the list of tools that // can be built locally, for series of the same OS. -func locallyBuildableTools(toolsSeries *string) (buildable coretools.List) { +func locallyBuildableTools(toolsSeries *string) (buildable coretools.List, _ version.Number) { + buildNumber := jujuversion.Current + // Increment the build number so we know it's a custom build. + buildNumber.Build++ for _, ser := range series.SupportedSeries() { - if os, err := series.GetOSFromSeries(ser); err != nil || os != jujuos.HostOS() { + if os, err := series.GetOSFromSeries(ser); err != nil || !os.EquivalentTo(jujuos.HostOS()) { continue } if toolsSeries != nil && ser != *toolsSeries { continue } binary := version.Binary{ - Number: jujuversion.Current, + Number: buildNumber, Series: ser, Arch: arch.HostArch(), } - // Increment the build number so we know it's a development build. - binary.Build++ buildable = append(buildable, &coretools.Tools{Version: binary}) } - return buildable + return buildable, buildNumber } // findBootstrapTools returns a tools.List containing only those tools with diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/tools_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/tools_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap/tools_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap/tools_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -36,8 +36,10 @@ s.PatchValue(&jujuversion.Current, devVersion) env := newEnviron("foo", useDefaultKeys, nil) arch := arch.PPC64EL - err := bootstrap.ValidateUploadAllowed(env, &arch, nil) - c.Assert(err, gc.ErrorMatches, `cannot build tools for "ppc64el" using a machine running on "amd64"`) + validator, err := env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + err = bootstrap.ValidateUploadAllowed(env, &arch, nil, validator) + c.Assert(err, gc.ErrorMatches, `cannot use agent built for "ppc64el" using a machine running on "amd64"`) } func (s *toolsSuite) TestValidateUploadAllowedIncompatibleHostOS(c *gc.C) { @@ -45,8 +47,10 @@ s.PatchValue(&os.HostOS, func() os.OSType { return os.Ubuntu }) env := newEnviron("foo", useDefaultKeys, nil) series := "win2012" - err := bootstrap.ValidateUploadAllowed(env, nil, &series) - c.Assert(err, gc.ErrorMatches, `cannot build tools for "win2012" using a machine running "Ubuntu"`) + validator, err := env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + err = bootstrap.ValidateUploadAllowed(env, nil, &series, validator) + c.Assert(err, gc.ErrorMatches, `cannot use agent built for "win2012" using a machine running "Ubuntu"`) } func (s *toolsSuite) TestValidateUploadAllowedIncompatibleTargetArch(c *gc.C) { @@ -59,7 +63,9 @@ devVersion.Build = 1234 s.PatchValue(&jujuversion.Current, devVersion) env := newEnviron("foo", useDefaultKeys, nil) - err := bootstrap.ValidateUploadAllowed(env, nil, nil) + validator, err := env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + err = bootstrap.ValidateUploadAllowed(env, nil, nil, validator) c.Assert(err, gc.ErrorMatches, `model "foo" of type dummy does not support instances running on "ppc64el"`) } @@ -70,7 +76,9 @@ centos7 := "centos7" s.PatchValue(&arch.HostArch, func() string { return arm64 }) s.PatchValue(&os.HostOS, func() os.OSType { return os.CentOS }) - err := bootstrap.ValidateUploadAllowed(env, &arm64, ¢os7) + validator, err := env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + err = bootstrap.ValidateUploadAllowed(env, &arm64, ¢os7, validator) c.Assert(err, jc.ErrorIsNil) } @@ -162,7 +170,7 @@ return nil, errors.New("splat") }) env := newEnviron("foo", useDefaultKeys, nil) - _, err := bootstrap.FindAvailableTools(env, nil, nil, nil, false, false) + _, err := bootstrap.FindPackagedTools(env, nil, nil, nil) c.Assert(err, gc.ErrorMatches, "splat") } @@ -173,45 +181,10 @@ env := newEnviron("foo", useDefaultKeys, map[string]interface{}{ "agent-version": "1.17.1", }) - _, err := bootstrap.FindAvailableTools(env, nil, nil, nil, false, false) + _, err := bootstrap.FindPackagedTools(env, nil, nil, nil) c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *toolsSuite) TestFindAvailableToolsForceUpload(c *gc.C) { - s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 }) - var findToolsCalled int - s.PatchValue(bootstrap.FindTools, func(_ environs.Environ, major, minor int, stream string, f tools.Filter) (tools.List, error) { - findToolsCalled++ - return nil, errors.NotFoundf("tools") - }) - env := newEnviron("foo", useDefaultKeys, nil) - uploadedTools, err := bootstrap.FindAvailableTools(env, nil, nil, nil, true, true) - c.Assert(err, jc.ErrorIsNil) - c.Assert(uploadedTools, gc.Not(gc.HasLen), 0) - c.Assert(findToolsCalled, gc.Equals, 0) - expectedVersion := jujuversion.Current - expectedVersion.Build++ - for _, tools := range uploadedTools { - c.Assert(tools.Version.Number, gc.Equals, expectedVersion) - c.Assert(tools.URL, gc.Equals, "") - } -} - -func (s *toolsSuite) TestFindAvailableToolsForceUploadInvalidArch(c *gc.C) { - s.PatchValue(&arch.HostArch, func() string { - return arch.I386 - }) - var findToolsCalled int - s.PatchValue(bootstrap.FindTools, func(_ environs.Environ, major, minor int, stream string, f tools.Filter) (tools.List, error) { - findToolsCalled++ - return nil, errors.NotFoundf("tools") - }) - env := newEnviron("foo", useDefaultKeys, nil) - _, err := bootstrap.FindAvailableTools(env, nil, nil, nil, true, true) - c.Assert(err, gc.ErrorMatches, `model "foo" of type dummy does not support instances running on "i386"`) - c.Assert(findToolsCalled, gc.Equals, 0) -} - func (s *toolsSuite) TestFindAvailableToolsSpecificVersion(c *gc.C) { currentVersion := version.Binary{ Number: jujuversion.Current, @@ -237,7 +210,7 @@ }) env := newEnviron("foo", useDefaultKeys, nil) toolsVersion := version.MustParse("10.11.12") - result, err := bootstrap.FindAvailableTools(env, &toolsVersion, nil, nil, false, false) + result, err := bootstrap.FindPackagedTools(env, &toolsVersion, nil, nil) c.Assert(err, jc.ErrorIsNil) c.Assert(findToolsCalled, gc.Equals, 1) c.Assert(result, jc.DeepEquals, tools.List{ @@ -248,36 +221,6 @@ }) } -func (s *toolsSuite) TestFindAvailableToolsAutoUpload(c *gc.C) { - s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 }) - trustyTools := &tools.Tools{ - Version: version.MustParseBinary("1.2.3-trusty-amd64"), - URL: "http://testing.invalid/tools.tar.gz", - } - s.PatchValue(bootstrap.FindTools, func(_ environs.Environ, major, minor int, stream string, f tools.Filter) (tools.List, error) { - return tools.List{trustyTools}, nil - }) - env := newEnviron("foo", useDefaultKeys, map[string]interface{}{ - "agent-stream": "proposed"}) - availableTools, err := bootstrap.FindAvailableTools(env, nil, nil, nil, false, true) - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(availableTools), jc.GreaterThan, 1) - c.Assert(env.constraintsValidatorCount, gc.Equals, 1) - var trustyToolsFound int - expectedVersion := jujuversion.Current - expectedVersion.Build++ - for _, tools := range availableTools { - if tools == trustyTools { - trustyToolsFound++ - } else { - c.Assert(tools.Version.Number, gc.Equals, expectedVersion) - c.Assert(tools.Version.Series, gc.Not(gc.Equals), "trusty") - c.Assert(tools.URL, gc.Equals, "") - } - } - c.Assert(trustyToolsFound, gc.Equals, 1) -} - func (s *toolsSuite) TestFindAvailableToolsCompleteNoValidate(c *gc.C) { s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 }) @@ -298,7 +241,7 @@ return allTools, nil }) env := newEnviron("foo", useDefaultKeys, nil) - availableTools, err := bootstrap.FindAvailableTools(env, nil, nil, nil, false, false) + availableTools, err := bootstrap.FindPackagedTools(env, nil, nil, nil) c.Assert(err, jc.ErrorIsNil) c.Assert(availableTools, gc.HasLen, len(allTools)) c.Assert(env.constraintsValidatorCount, gc.Equals, 0) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap.go juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/bootstrap.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/bootstrap.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,6 +17,14 @@ // BootstrapParams holds the parameters for bootstrapping an environment. type BootstrapParams struct { + // Cloud contains the name of the cloud that Juju will be + // bootstrapped in. Used for printing feedback during bootstrap. + CloudName string + + // CloudRegion is the name of the cloud region that Juju will be + // bootstrapped in. Used for printing feedback during bootstrap. + CloudRegion string + // ControllerConfig contains the configuration attributes for the // bootstrapped controller. ControllerConfig controller.Config diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/broker.go juju-core-2.0.0/src/github.com/juju/juju/environs/broker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/broker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/broker.go 2016-10-13 14:31:49.000000000 +0000 @@ -70,8 +70,14 @@ // that may be used to start this instance. ImageMetadata []*imagemetadata.ImageMetadata - // StatusCallback is a callback to be used by the instance to report changes in status. + // StatusCallback is a callback to be used by the instance to report + // changes in status. Its signature is consistent with other + // status-related functions to allow them to be used as callbacks. StatusCallback func(settableStatus status.Status, info string, data map[string]interface{}) error + + // CleanupCallback is a callback to be used to clean up any residual + // status-reporting output from StatusCallback. + CleanupCallback func(info string) error } // StartInstanceResult holds the result of an diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/cloudspec.go juju-core-2.0.0/src/github.com/juju/juju/environs/cloudspec.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/cloudspec.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/cloudspec.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,6 +26,9 @@ // Endpoint is the endpoint for the cloud (region). Endpoint string + // IdentityEndpoint is the identity endpoint for the cloud (region). + IdentityEndpoint string + // StorageEndpoint is the storage endpoint for the cloud (region). StorageEndpoint string @@ -51,12 +54,13 @@ // Cloud, cloud and region names, and credential. func MakeCloudSpec(cloud jujucloud.Cloud, cloudName, cloudRegionName string, credential *jujucloud.Credential) (CloudSpec, error) { cloudSpec := CloudSpec{ - Type: cloud.Type, - Name: cloudName, - Region: cloudRegionName, - Endpoint: cloud.Endpoint, - StorageEndpoint: cloud.StorageEndpoint, - Credential: credential, + Type: cloud.Type, + Name: cloudName, + Region: cloudRegionName, + Endpoint: cloud.Endpoint, + IdentityEndpoint: cloud.IdentityEndpoint, + StorageEndpoint: cloud.StorageEndpoint, + Credential: credential, } if cloudRegionName != "" { cloudRegion, err := jujucloud.RegionByName(cloud.Regions, cloudRegionName) @@ -64,7 +68,28 @@ return CloudSpec{}, errors.Annotate(err, "getting cloud region definition") } cloudSpec.Endpoint = cloudRegion.Endpoint + cloudSpec.IdentityEndpoint = cloudRegion.IdentityEndpoint cloudSpec.StorageEndpoint = cloudRegion.StorageEndpoint } return cloudSpec, nil } + +// RegionSpec contains the information needed to lookup specific region +// configuration. This is for use in calling +// state/modelconfig.(ComposeNewModelConfig) so there is no need to serialize +// it. +type RegionSpec struct { + // Cloud is the name of the cloud. + Cloud string + + // Region is the name of the cloud region. + Region string +} + +// NewRegionSpec returns a RegionSpec ensuring neither arg is empty. +func NewRegionSpec(cloud, region string) (*RegionSpec, error) { + if cloud == "" || region == "" { + return nil, errors.New("cloud and region are required to be non empty strings") + } + return &RegionSpec{Cloud: cloud, Region: region}, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/cloudspec_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/cloudspec_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/cloudspec_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/cloudspec_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,53 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. +package environs_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/environs" +) + +type cloudSpecSuite struct { +} + +var _ = gc.Suite(&cloudSpecSuite{}) + +func (s *cloudSpecSuite) TestNewRegionSpec(c *gc.C) { + tests := []struct { + description, cloud, region, errMatch string + nilErr bool + want *environs.RegionSpec + }{ + { + description: "test empty cloud", + cloud: "", + region: "aregion", + errMatch: "cloud and region are required to be non empty strings", + want: nil, + }, { + description: "test empty region", + cloud: "acloud", + region: "", + errMatch: "cloud and region are required to be non empty strings", + want: nil, + }, { + description: "test valid", + cloud: "acloud", + region: "aregion", + nilErr: true, + want: &environs.RegionSpec{Cloud: "acloud", Region: "aregion"}, + }, + } + for i, test := range tests { + c.Logf("Test %d: %s", i, test.description) + rspec, err := environs.NewRegionSpec(test.cloud, test.region) + if !test.nilErr { + c.Check(err, gc.ErrorMatches, test.errMatch) + } else { + c.Check(err, jc.ErrorIsNil) + } + c.Check(rspec, jc.DeepEquals, test.want) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/config/config.go juju-core-2.0.0/src/github.com/juju/juju/environs/config/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/config/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/config/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -75,23 +75,23 @@ // AgentMetadataURLKey stores the key for this setting. AgentMetadataURLKey = "agent-metadata-url" - // HttpProxyKey stores the key for this setting. - HttpProxyKey = "http-proxy" + // HTTPProxyKey stores the key for this setting. + HTTPProxyKey = "http-proxy" - // HttpsProxyKey stores the key for this setting. - HttpsProxyKey = "https-proxy" + // HTTPSProxyKey stores the key for this setting. + HTTPSProxyKey = "https-proxy" - // FtpProxyKey stores the key for this setting. - FtpProxyKey = "ftp-proxy" + // FTPProxyKey stores the key for this setting. + FTPProxyKey = "ftp-proxy" - // AptHttpProxyKey stores the key for this setting. - AptHttpProxyKey = "apt-http-proxy" + // AptHTTPProxyKey stores the key for this setting. + AptHTTPProxyKey = "apt-http-proxy" - // AptHttpsProxyKey stores the key for this setting. - AptHttpsProxyKey = "apt-https-proxy" + // AptHTTPSProxyKey stores the key for this setting. + AptHTTPSProxyKey = "apt-https-proxy" - // AptFtpProxyKey stores the key for this setting. - AptFtpProxyKey = "apt-ftp-proxy" + // AptFTPProxyKey stores the key for this setting. + AptFTPProxyKey = "apt-ftp-proxy" // NoProxyKey stores the key for this setting. NoProxyKey = "no-proxy" @@ -125,6 +125,10 @@ // automatically retry a hook that has failed AutomaticallyRetryHooks = "automatically-retry-hooks" + // TransmitVendorMetricsKey is the key for whether the controller sends + // metrics collected in this model for anonymized aggregate analytics. + TransmitVendorMetricsKey = "transmit-vendor-metrics" + // // Deprecated Settings Attributes // @@ -290,6 +294,7 @@ "enable-os-upgrade": true, "development": false, "test-mode": false, + TransmitVendorMetricsKey: true, // Image and agent streams and URLs. "image-stream": "released", @@ -301,13 +306,13 @@ LogForwardEnabled: false, // Proxy settings. - HttpProxyKey: "", - HttpsProxyKey: "", - FtpProxyKey: "", + HTTPProxyKey: "", + HTTPSProxyKey: "", + FTPProxyKey: "", NoProxyKey: "", - AptHttpProxyKey: "", - AptHttpsProxyKey: "", - AptFtpProxyKey: "", + AptHTTPProxyKey: "", + AptHTTPSProxyKey: "", + AptFTPProxyKey: "", "apt-mirror": "", } @@ -566,26 +571,26 @@ // proxy. func (c *Config) ProxySettings() proxy.Settings { return proxy.Settings{ - Http: c.HttpProxy(), - Https: c.HttpsProxy(), - Ftp: c.FtpProxy(), + Http: c.HTTPProxy(), + Https: c.HTTPSProxy(), + Ftp: c.FTPProxy(), NoProxy: c.NoProxy(), } } -// HttpProxy returns the http proxy for the environment. -func (c *Config) HttpProxy() string { - return c.asString(HttpProxyKey) +// HTTPProxy returns the http proxy for the environment. +func (c *Config) HTTPProxy() string { + return c.asString(HTTPProxyKey) } -// HttpsProxy returns the https proxy for the environment. -func (c *Config) HttpsProxy() string { - return c.asString(HttpsProxyKey) +// HTTPSProxy returns the https proxy for the environment. +func (c *Config) HTTPSProxy() string { + return c.asString(HTTPSProxyKey) } -// FtpProxy returns the ftp proxy for the environment. -func (c *Config) FtpProxy() string { - return c.asString(FtpProxyKey) +// FTPProxy returns the ftp proxy for the environment. +func (c *Config) FTPProxy() string { + return c.asString(FTPProxyKey) } // NoProxy returns the 'no proxy' for the environment. @@ -612,28 +617,28 @@ // AptProxySettings returns all three proxy settings; http, https and ftp. func (c *Config) AptProxySettings() proxy.Settings { return proxy.Settings{ - Http: c.AptHttpProxy(), - Https: c.AptHttpsProxy(), - Ftp: c.AptFtpProxy(), + Http: c.AptHTTPProxy(), + Https: c.AptHTTPSProxy(), + Ftp: c.AptFTPProxy(), } } -// AptHttpProxy returns the apt http proxy for the environment. +// AptHTTPProxy returns the apt http proxy for the environment. // Falls back to the default http-proxy if not specified. -func (c *Config) AptHttpProxy() string { - return addSchemeIfMissing("http", c.getWithFallback(AptHttpProxyKey, HttpProxyKey)) +func (c *Config) AptHTTPProxy() string { + return addSchemeIfMissing("http", c.getWithFallback(AptHTTPProxyKey, HTTPProxyKey)) } -// AptHttpsProxy returns the apt https proxy for the environment. +// AptHTTPSProxy returns the apt https proxy for the environment. // Falls back to the default https-proxy if not specified. -func (c *Config) AptHttpsProxy() string { - return addSchemeIfMissing("https", c.getWithFallback(AptHttpsProxyKey, HttpsProxyKey)) +func (c *Config) AptHTTPSProxy() string { + return addSchemeIfMissing("https", c.getWithFallback(AptHTTPSProxyKey, HTTPSProxyKey)) } -// AptFtpProxy returns the apt ftp proxy for the environment. +// AptFTPProxy returns the apt ftp proxy for the environment. // Falls back to the default ftp-proxy if not specified. -func (c *Config) AptFtpProxy() string { - return addSchemeIfMissing("ftp", c.getWithFallback(AptFtpProxyKey, FtpProxyKey)) +func (c *Config) AptFTPProxy() string { + return addSchemeIfMissing("ftp", c.getWithFallback(AptFTPProxyKey, FTPProxyKey)) } // AptMirror sets the apt mirror for the environment. @@ -763,6 +768,16 @@ } } +// TransmitVendorMetrics returns whether the controller sends charm-collected metrics +// in this model for anonymized aggregate analytics. By default this should be true. +func (c *Config) TransmitVendorMetrics() bool { + if val, ok := c.defined[TransmitVendorMetricsKey].(bool); !ok { + return true + } else { + return val + } +} + // ProvisionerHarvestMode reports the harvesting methodology the // provisioner should take. func (c *Config) ProvisionerHarvestMode() HarvestMode { @@ -932,13 +947,13 @@ "firewall-mode": schema.Omit, "logging-config": schema.Omit, ProvisionerHarvestModeKey: schema.Omit, - HttpProxyKey: schema.Omit, - HttpsProxyKey: schema.Omit, - FtpProxyKey: schema.Omit, + HTTPProxyKey: schema.Omit, + HTTPSProxyKey: schema.Omit, + FTPProxyKey: schema.Omit, NoProxyKey: schema.Omit, - AptHttpProxyKey: schema.Omit, - AptHttpsProxyKey: schema.Omit, - AptFtpProxyKey: schema.Omit, + AptHTTPProxyKey: schema.Omit, + AptHTTPSProxyKey: schema.Omit, + AptFTPProxyKey: schema.Omit, "apt-mirror": schema.Omit, AgentStreamKey: schema.Omit, ResourceTagsKey: schema.Omit, @@ -956,6 +971,7 @@ IgnoreMachineAddresses: schema.Omit, AutomaticallyRetryHooks: schema.Omit, "test-mode": schema.Omit, + TransmitVendorMetricsKey: schema.Omit, } func allowEmpty(attr string) bool { @@ -1017,7 +1033,7 @@ if fields[name] == nil { if val, isString := value.(string); isString && val != "" { // only warn about attributes with non-empty string values - logger.Errorf("unknown config field %q", name) + logger.Warningf("unknown config field %q", name) } result[name] = value } @@ -1049,9 +1065,9 @@ // proxy settings. func ProxyConfigMap(proxySettings proxy.Settings) map[string]interface{} { settings := make(map[string]interface{}) - addIfNotEmpty(settings, HttpProxyKey, proxySettings.Http) - addIfNotEmpty(settings, HttpsProxyKey, proxySettings.Https) - addIfNotEmpty(settings, FtpProxyKey, proxySettings.Ftp) + addIfNotEmpty(settings, HTTPProxyKey, proxySettings.Http) + addIfNotEmpty(settings, HTTPSProxyKey, proxySettings.Https) + addIfNotEmpty(settings, FTPProxyKey, proxySettings.Ftp) addIfNotEmpty(settings, NoProxyKey, proxySettings.NoProxy) return settings } @@ -1060,9 +1076,9 @@ // proxy settings. func AptProxyConfigMap(proxySettings proxy.Settings) map[string]interface{} { settings := make(map[string]interface{}) - addIfNotEmpty(settings, AptHttpProxyKey, proxySettings.Http) - addIfNotEmpty(settings, AptHttpsProxyKey, proxySettings.Https) - addIfNotEmpty(settings, AptFtpProxyKey, proxySettings.Ftp) + addIfNotEmpty(settings, AptHTTPProxyKey, proxySettings.Http) + addIfNotEmpty(settings, AptHTTPSProxyKey, proxySettings.Https) + addIfNotEmpty(settings, AptFTPProxyKey, proxySettings.Ftp) return settings } @@ -1110,19 +1126,19 @@ Group: environschema.JujuGroup, Immutable: true, }, - AptFtpProxyKey: { + AptFTPProxyKey: { // TODO document acceptable format Description: "The APT FTP proxy for the model", Type: environschema.Tstring, Group: environschema.EnvironGroup, }, - AptHttpProxyKey: { + AptHTTPProxyKey: { // TODO document acceptable format Description: "The APT HTTP proxy for the model", Type: environschema.Tstring, Group: environschema.EnvironGroup, }, - AptHttpsProxyKey: { + AptHTTPSProxyKey: { // TODO document acceptable format Description: "The APT HTTPS proxy for the model", Type: environschema.Tstring, @@ -1186,17 +1202,17 @@ Immutable: true, Group: environschema.EnvironGroup, }, - FtpProxyKey: { + FTPProxyKey: { Description: "The FTP proxy value to configure on instances, in the FTP_PROXY environment variable", Type: environschema.Tstring, Group: environschema.EnvironGroup, }, - HttpProxyKey: { + HTTPProxyKey: { Description: "The HTTP proxy value to configure on instances, in the HTTP_PROXY environment variable", Type: environschema.Tstring, Group: environschema.EnvironGroup, }, - HttpsProxyKey: { + HTTPSProxyKey: { Description: "The HTTPS proxy value to configure on instances, in the HTTPS_PROXY environment variable", Type: environschema.Tstring, Group: environschema.EnvironGroup, @@ -1306,4 +1322,9 @@ Type: environschema.Tbool, Group: environschema.EnvironGroup, }, + TransmitVendorMetricsKey: { + Description: "Determines whether metrics declared by charms deployed into this model are sent for anonymized aggregate analytics", + Type: environschema.Tbool, + Group: environschema.EnvironGroup, + }, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/config/config_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/config/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/config/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/config/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -20,6 +20,7 @@ "gopkg.in/juju/charmrepo.v2-unstable" "gopkg.in/juju/environschema.v1" + "github.com/juju/juju/cert" "github.com/juju/juju/environs/config" "github.com/juju/juju/juju/osenv" "github.com/juju/juju/testing" @@ -469,8 +470,8 @@ "logforward-enabled": true, "syslog-host": "localhost:1234", "syslog-ca-cert": "abc", - "syslog-client-cert": caCert, - "syslog-client-key": caKey, + "syslog-client-cert": testing.CACert, + "syslog-client-key": testing.CAKey, }), err: `invalid syslog forwarding config: validating TLS config: parsing CA certificate: no certificates found`, }, { @@ -482,8 +483,8 @@ "logforward-enabled": true, "syslog-host": "localhost:1234", "syslog-ca-cert": invalidCACert, - "syslog-client-cert": caCert, - "syslog-client-key": caKey, + "syslog-client-cert": testing.CACert, + "syslog-client-key": testing.CAKey, }), err: `invalid syslog forwarding config: validating TLS config: parsing CA certificate: asn1: syntax error: data truncated`, }, { @@ -492,9 +493,9 @@ attrs: minimalConfigAttrs.Merge(testing.Attrs{ "logforward-enabled": true, "syslog-host": "10.0.0.1:12345", - "syslog-ca-cert": caCert, + "syslog-ca-cert": testing.CACert, "syslog-client-cert": invalidCACert, - "syslog-client-key": caKey, + "syslog-client-key": testing.CAKey, }), err: `invalid syslog forwarding config: validating TLS config: parsing client key pair: asn1: syntax error: data truncated`, }, { @@ -503,8 +504,8 @@ attrs: minimalConfigAttrs.Merge(testing.Attrs{ "logforward-enabled": true, "syslog-host": "10.0.0.1:12345", - "syslog-ca-cert": caCert, - "syslog-client-cert": caCert, + "syslog-ca-cert": testing.CACert, + "syslog-client-cert": testing.CACert, "syslog-client-key": invalidCAKey, }), err: `invalid syslog forwarding config: validating TLS config: parsing client key pair: (crypto/)?tls: failed to parse private key`, @@ -514,12 +515,24 @@ attrs: minimalConfigAttrs.Merge(testing.Attrs{ "logforward-enabled": true, "syslog-host": "10.0.0.1:12345", - "syslog-ca-cert": caCert, - "syslog-client-cert": caCert, - "syslog-client-key": caKey2, + "syslog-ca-cert": testing.CACert, + "syslog-client-cert": testing.ServerCert, + "syslog-client-key": serverKey2, }), err: `invalid syslog forwarding config: validating TLS config: parsing client key pair: (crypto/)?tls: private key does not match public key`, }, { + about: "transmit-vendor-metrics asserted with default value", + useDefaults: config.UseDefaults, + attrs: minimalConfigAttrs.Merge(testing.Attrs{ + "transmit-vendor-metrics": true, + }), + }, { + about: "transmit-vendor-metrics asserted false", + useDefaults: config.UseDefaults, + attrs: minimalConfigAttrs.Merge(testing.Attrs{ + "transmit-vendor-metrics": false, + }), + }, { about: "Valid syslog config values", useDefaults: config.UseDefaults, attrs: minimalConfigAttrs.Merge(testing.Attrs{ @@ -699,6 +712,14 @@ } else { c.Assert(resourceTags, gc.HasLen, 0) } + + xmit := cfg.TransmitVendorMetrics() + expectedXmit, xmitAsserted := test.attrs["transmit-vendor-metrics"] + if xmitAsserted { + c.Check(xmit, gc.Equals, expectedXmit) + } else { + c.Check(xmit, jc.IsTrue) + } } func (test configTest) assertDuration(c *gc.C, name string, actual time.Duration, defaultInSeconds int) { @@ -976,12 +997,12 @@ "ftp-proxy": "ftp://user@10.0.0.1", "no-proxy": "localhost,10.0.3.1", }) - c.Assert(config.HttpProxy(), gc.Equals, "http://user@10.0.0.1") - c.Assert(config.AptHttpProxy(), gc.Equals, "http://user@10.0.0.1") - c.Assert(config.HttpsProxy(), gc.Equals, "https://user@10.0.0.1") - c.Assert(config.AptHttpsProxy(), gc.Equals, "https://user@10.0.0.1") - c.Assert(config.FtpProxy(), gc.Equals, "ftp://user@10.0.0.1") - c.Assert(config.AptFtpProxy(), gc.Equals, "ftp://user@10.0.0.1") + c.Assert(config.HTTPProxy(), gc.Equals, "http://user@10.0.0.1") + c.Assert(config.AptHTTPProxy(), gc.Equals, "http://user@10.0.0.1") + c.Assert(config.HTTPSProxy(), gc.Equals, "https://user@10.0.0.1") + c.Assert(config.AptHTTPSProxy(), gc.Equals, "https://user@10.0.0.1") + c.Assert(config.FTPProxy(), gc.Equals, "ftp://user@10.0.0.1") + c.Assert(config.AptFTPProxy(), gc.Equals, "ftp://user@10.0.0.1") c.Assert(config.NoProxy(), gc.Equals, "localhost,10.0.3.1") } @@ -994,12 +1015,12 @@ "ftp-proxy": "user@10.0.0.1", "no-proxy": "localhost,10.0.3.1", }) - c.Assert(config.HttpProxy(), gc.Equals, "user@10.0.0.1") - c.Assert(config.AptHttpProxy(), gc.Equals, "http://user@10.0.0.1") - c.Assert(config.HttpsProxy(), gc.Equals, "user@10.0.0.1") - c.Assert(config.AptHttpsProxy(), gc.Equals, "https://user@10.0.0.1") - c.Assert(config.FtpProxy(), gc.Equals, "user@10.0.0.1") - c.Assert(config.AptFtpProxy(), gc.Equals, "ftp://user@10.0.0.1") + c.Assert(config.HTTPProxy(), gc.Equals, "user@10.0.0.1") + c.Assert(config.AptHTTPProxy(), gc.Equals, "http://user@10.0.0.1") + c.Assert(config.HTTPSProxy(), gc.Equals, "user@10.0.0.1") + c.Assert(config.AptHTTPSProxy(), gc.Equals, "https://user@10.0.0.1") + c.Assert(config.FTPProxy(), gc.Equals, "user@10.0.0.1") + c.Assert(config.AptFTPProxy(), gc.Equals, "ftp://user@10.0.0.1") c.Assert(config.NoProxy(), gc.Equals, "localhost,10.0.3.1") } @@ -1013,23 +1034,23 @@ "apt-https-proxy": "https://user@10.0.0.2", "apt-ftp-proxy": "ftp://user@10.0.0.2", }) - c.Assert(config.HttpProxy(), gc.Equals, "http://user@10.0.0.1") - c.Assert(config.AptHttpProxy(), gc.Equals, "http://user@10.0.0.2") - c.Assert(config.HttpsProxy(), gc.Equals, "https://user@10.0.0.1") - c.Assert(config.AptHttpsProxy(), gc.Equals, "https://user@10.0.0.2") - c.Assert(config.FtpProxy(), gc.Equals, "ftp://user@10.0.0.1") - c.Assert(config.AptFtpProxy(), gc.Equals, "ftp://user@10.0.0.2") + c.Assert(config.HTTPProxy(), gc.Equals, "http://user@10.0.0.1") + c.Assert(config.AptHTTPProxy(), gc.Equals, "http://user@10.0.0.2") + c.Assert(config.HTTPSProxy(), gc.Equals, "https://user@10.0.0.1") + c.Assert(config.AptHTTPSProxy(), gc.Equals, "https://user@10.0.0.2") + c.Assert(config.FTPProxy(), gc.Equals, "ftp://user@10.0.0.1") + c.Assert(config.AptFTPProxy(), gc.Equals, "ftp://user@10.0.0.2") } func (s *ConfigSuite) TestProxyValuesNotSet(c *gc.C) { s.addJujuFiles(c) config := newTestConfig(c, testing.Attrs{}) - c.Assert(config.HttpProxy(), gc.Equals, "") - c.Assert(config.AptHttpProxy(), gc.Equals, "") - c.Assert(config.HttpsProxy(), gc.Equals, "") - c.Assert(config.AptHttpsProxy(), gc.Equals, "") - c.Assert(config.FtpProxy(), gc.Equals, "") - c.Assert(config.AptFtpProxy(), gc.Equals, "") + c.Assert(config.HTTPProxy(), gc.Equals, "") + c.Assert(config.AptHTTPProxy(), gc.Equals, "") + c.Assert(config.HTTPSProxy(), gc.Equals, "") + c.Assert(config.AptHTTPSProxy(), gc.Equals, "") + c.Assert(config.FTPProxy(), gc.Equals, "") + c.Assert(config.AptFTPProxy(), gc.Equals, "") c.Assert(config.NoProxy(), gc.Equals, "") } @@ -1163,43 +1184,13 @@ return s } -var caCert = ` ------BEGIN CERTIFICATE----- -MIIBjDCCATigAwIBAgIBADALBgkqhkiG9w0BAQUwHjENMAsGA1UEChMEanVqdTEN -MAsGA1UEAxMEcm9vdDAeFw0xMjExMDkxNjQwMjhaFw0yMjExMDkxNjQ1MjhaMB4x -DTALBgNVBAoTBGp1anUxDTALBgNVBAMTBHJvb3QwWTALBgkqhkiG9w0BAQEDSgAw -RwJAduA1Gnb2VJLxNGfG4St0Qy48Y3q5Z5HheGtTGmti/FjlvQvScCFGCnJG7fKA -Knd7ia3vWg7lxYkIvMPVP88LAQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAKQwEgYD -VR0TAQH/BAgwBgEB/wIBATAdBgNVHQ4EFgQUlvKX8vwp0o+VdhdhoA9O6KlOm00w -HwYDVR0jBBgwFoAUlvKX8vwp0o+VdhdhoA9O6KlOm00wCwYJKoZIhvcNAQEFA0EA -LlNpevtFr8gngjAFFAO/FXc7KiZcCrA5rBfb/rEy297lIqmKt5++aVbLEPyxCIFC -r71Sj63TUTFWtRZAxvn9qQ== ------END CERTIFICATE----- -`[1:] - -var caKey = ` ------BEGIN RSA PRIVATE KEY----- -MIIBOQIBAAJAduA1Gnb2VJLxNGfG4St0Qy48Y3q5Z5HheGtTGmti/FjlvQvScCFG -CnJG7fKAKnd7ia3vWg7lxYkIvMPVP88LAQIDAQABAkEAsFOdMSYn+AcF1M/iBfjo -uQWJ+Zz+CgwuvumjGNsUtmwxjA+hh0fCn0Ah2nAt4Ma81vKOKOdQ8W6bapvsVDH0 -6QIhAJOkLmEKm4H5POQV7qunRbRsLbft/n/SHlOBz165WFvPAiEAzh9fMf70std1 -sVCHJRQWKK+vw3oaEvPKvkPiV5ui0C8CIGNsvybuo8ald5IKCw5huRlFeIxSo36k -m3OVCXc6zfwVAiBnTUe7WcivPNZqOC6TAZ8dYvdWo4Ifz3jjpEfymjid1wIgBIJv -ERPyv2NQqIFQZIyzUP7LVRIWfpFFOo9/Ww/7s5Y= ------END RSA PRIVATE KEY----- -`[1:] - -var caKey2 = ` ------BEGIN RSA PRIVATE KEY----- -MIIBOQIBAAJBAJkSWRrr81y8pY4dbNgt+8miSKg4z6glp2KO2NnxxAhyyNtQHKvC -+fJALJj+C2NhuvOv9xImxOl3Hg8fFPCXCtcCAwEAAQJATQNzO11NQvJS5U6eraFt -FgSFQ8XZjILtVWQDbJv8AjdbEgKMHEy33icsAKIUAx8jL9kjq6K9kTdAKXZi9grF -UQIhAPD7jccIDUVm785E5eR9eisq0+xpgUIa24Jkn8cAlst5AiEAopxVFl1auer3 -GP2In3pjdL4ydzU/gcRcYisoJqwHpM8CIHtqmaXBPeq5WT9ukb5/dL3+5SJCtmxA -jQMuvZWRe6khAiBvMztYtPSDKXRbCZ4xeQ+kWSDHtok8Y5zNoTeu4nvDrwIgb3Al -fikzPveC5g6S6OvEQmyDz59tYBubm2XHgvxqww0= ------END RSA PRIVATE KEY----- -`[1:] +var serverKey2 = func() string { + _, key, err := cert.NewDefaultServer(testing.CACert, testing.CAKey, nil) + if err != nil { + panic(err) + } + return string(key) +}() var invalidCAKey = ` -----BEGIN RSA PRIVATE KEY----- diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/config/source.go juju-core-2.0.0/src/github.com/juju/juju/environs/config/source.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/config/source.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/config/source.go 2016-10-13 14:31:49.000000000 +0000 @@ -3,6 +3,10 @@ package config +import ( + "github.com/juju/schema" +) + // These constants define named sources of model config attributes. // After a call to UpdateModelConfig, any attributes added/removed // will have a source of JujuModelConfigSource. @@ -15,6 +19,11 @@ // come from those associated with the controller. JujuControllerSource = "controller" + // JujuRegionSource is used to label model config attributes that come from + // those associated with the region where the model is + // running. + JujuRegionSource = "region" + // JujuModelConfigSource is used to label model config attributes that // have been explicitly set by the user. JujuModelConfigSource = "model" @@ -42,3 +51,38 @@ } return result } + +// ConfigSchemaSource instances provide information on config attributes +// and the default attribute values. +type ConfigSchemaSource interface { + // ConfigSchema returns extra config attributes specific + // to this provider only. + ConfigSchema() schema.Fields + + // ConfigDefaults returns the default values for the + // provider specific config attributes. + ConfigDefaults() schema.Defaults +} + +// ModelDefaultAttributes is a map of configuration values to a list of possible +// values. +type ModelDefaultAttributes map[string]AttributeDefaultValues + +// AttributeDefaultValues represents all the default values at each level for a given +// setting. +type AttributeDefaultValues struct { + // Default and Controller represent the values as set at those levels. + Default interface{} `json:"default,omitempty" yaml:"default,omitempty"` + Controller interface{} `json:"controller,omitempty" yaml:"controller,omitempty"` + // Regions is a slice of Region representing the values as set in each + // region. + Regions []RegionDefaultValue `json:"regions,omitempty" yaml:"regions,omitempty"` +} + +// RegionDefaultValue holds the region information for each region in DefaultSetting. +type RegionDefaultValue struct { + // Name represents the region name for this specific setting. + Name string `json:"name" yaml:"name"` + // Value is the value of the setting this represents in the named region. + Value interface{} `json:"value" yaml:"value"` +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/imagemetadata/simplestreams_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/imagemetadata/simplestreams_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/imagemetadata/simplestreams_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/imagemetadata/simplestreams_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -31,7 +31,7 @@ validCloudSpec simplestreams.CloudSpec } -var liveUrls = map[string]liveTestData{ +var liveURLs = map[string]liveTestData{ "ec2": { baseURL: imagemetadata.DefaultUbuntuBaseURL, requireSigned: true, @@ -51,8 +51,8 @@ } var ok bool var testData liveTestData - if testData, ok = liveUrls[*vendor]; !ok { - keys := reflect.ValueOf(liveUrls).MapKeys() + if testData, ok = liveURLs[*vendor]; !ok { + keys := reflect.ValueOf(liveURLs).MapKeys() t.Fatalf("Unknown vendor %s. Must be one of %s", *vendor, keys) } registerLiveSimpleStreamsTests(testData.baseURL, imagemetadata.NewImageConstraint(simplestreams.LookupParams{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/imagemetadata/validation.go juju-core-2.0.0/src/github.com/juju/juju/environs/imagemetadata/validation.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/imagemetadata/validation.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/imagemetadata/validation.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,9 +21,6 @@ if params.Endpoint == "" { return nil, nil, fmt.Errorf("required parameter endpoint not specified") } - if len(params.Architectures) == 0 { - return nil, nil, fmt.Errorf("required parameter arches not specified") - } if len(params.Sources) == 0 { return nil, nil, fmt.Errorf("required parameter sources not specified") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/instances/instancetype.go juju-core-2.0.0/src/github.com/juju/juju/environs/instances/instancetype.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/instances/instancetype.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/instances/instancetype.go 2016-10-13 14:31:49.000000000 +0000 @@ -103,7 +103,7 @@ var itypes []InstanceType // Rules used to select instance types: - // - non memory constraints like cpu-cores etc are always honoured + // - non memory constraints like cores etc are always honoured // - if no mem constraint specified and instance-type not specified, // try opinionated default with enough mem to run a server. // - if no matches and no mem constraint specified, try again and diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/instances/instancetype_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/instances/instancetype_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/instances/instancetype_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/instances/instancetype_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -124,8 +124,8 @@ arches []string }{ { - about: "cpu-cores", - cons: "cpu-cores=2", + about: "cores", + cons: "cores=2", expectedItypes: []string{ "c1.medium", "m1.large", "m1.xlarge", "c1.xlarge", "cc1.4xlarge", "cc2.8xlarge", @@ -154,7 +154,7 @@ }, { about: "enough memory for mongodb if mem not specified", - cons: "cpu-cores=4", + cons: "cores=4", itypesToUse: []InstanceType{ {Id: "5", Name: "it-5", Arches: []string{"amd64"}, Mem: 1024, CpuCores: 2}, {Id: "4", Name: "it-4", Arches: []string{"amd64"}, Mem: 2048, CpuCores: 4}, @@ -198,7 +198,7 @@ }, { about: "largest mem available matching other constraints if mem not specified", - cons: "cpu-cores=4", + cons: "cores=4", itypesToUse: []InstanceType{ {Id: "3", Name: "it-3", Arches: []string{"amd64"}, Mem: 1024, CpuCores: 2}, {Id: "2", Name: "it-2", Arches: []string{"amd64"}, Mem: 256, CpuCores: 4}, @@ -208,7 +208,7 @@ }, { about: "largest mem available matching other constraints if mem not specified, cost is tie breaker", - cons: "cpu-cores=4", + cons: "cores=4", itypesToUse: []InstanceType{ {Id: "4", Name: "it-4", Arches: []string{"amd64"}, Mem: 1024, CpuCores: 2}, {Id: "3", Name: "it-3", Arches: []string{"amd64"}, Mem: 256, CpuCores: 4}, @@ -261,8 +261,8 @@ _, err = MatchingInstanceTypes(instanceTypes, "test", constraints.MustParse("arch=i386 mem=8G")) c.Check(err, gc.ErrorMatches, `no instance types in test matching constraints "arch=i386 mem=8192M"`) - _, err = MatchingInstanceTypes(instanceTypes, "test", constraints.MustParse("cpu-cores=9000")) - c.Check(err, gc.ErrorMatches, `no instance types in test matching constraints "cpu-cores=9000"`) + _, err = MatchingInstanceTypes(instanceTypes, "test", constraints.MustParse("cores=9000")) + c.Check(err, gc.ErrorMatches, `no instance types in test matching constraints "cores=9000"`) _, err = MatchingInstanceTypes(instanceTypes, "test", constraints.MustParse("mem=90000M")) c.Check(err, gc.ErrorMatches, `no instance types in test matching constraints "mem=90000M"`) @@ -280,7 +280,7 @@ {"", "m1.large", []string{"amd64"}}, {"cpu-power=100", "m1.small", []string{"amd64", "armhf"}}, {"arch=amd64", "m1.small", []string{"amd64"}}, - {"cpu-cores=3", "m1.xlarge", []string{"amd64"}}, + {"cores=3", "m1.xlarge", []string{"amd64"}}, {"cpu-power=", "t1.micro", []string{"amd64", "armhf"}}, {"cpu-power=500", "c1.medium", []string{"amd64", "armhf"}}, {"cpu-power=2000", "c1.xlarge", []string{"amd64"}}, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/interface.go juju-core-2.0.0/src/github.com/juju/juju/environs/interface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/interface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/interface.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,8 @@ package environs import ( + "io" + "gopkg.in/juju/environschema.v1" "github.com/juju/juju/cloud" @@ -19,10 +21,7 @@ config.Validator ProviderCredentials - // RestrictedConfigAttributes are provider specific attributes stored in - // the config that really cannot or should not be changed across - // environments running inside a single juju server. - RestrictedConfigAttributes() []string + // TODO(wallyworld) - embed config.ConfigSchemaSource and make all providers implement it // PrepareConfig prepares the configuration for a new model, based on // the provided arguments. PrepareConfig is expected to produce a @@ -37,11 +36,6 @@ // Open should not perform any expensive operations, such as querying // the cloud API, as it will be called frequently. Open(OpenParams) (Environ, error) - - // SecretAttrs filters the supplied configuration returning only values - // which are considered sensitive. All of the values of these secret - // attributes need to be strings. - SecretAttrs(cfg *config.Config) (map[string]string, error) } // OpenParams contains the parameters for EnvironProvider.Open. @@ -66,9 +60,6 @@ // PrepareConfigParams contains the parameters for EnvironProvider.PrepareConfig. type PrepareConfigParams struct { - // ControllerUUID is the UUID of the controller to be bootstrapped. - ControllerUUID string - // Cloud is the cloud specification to use to connect to the cloud. Cloud CloudSpec @@ -99,6 +90,40 @@ // If no credentials can be detected, DetectCredentials should // return an error satisfying errors.IsNotFound. DetectCredentials() (*cloud.CloudCredential, error) + + // FinalizeCredential finalizes a credential, updating any attributes + // as necessary. This is always done client-side, when adding the + // credential to credentials.yaml and before uploading credentials to + // the controller. The provider may completely alter a credential, even + // going as far as changing the auth-type, but the output must be a + // fully formed credential. + FinalizeCredential( + FinalizeCredentialContext, + FinalizeCredentialParams, + ) (*cloud.Credential, error) +} + +// FinalizeCredentialContext is an interface passed into FinalizeCredential +// to provide a means of interacting with the user when finalizing credentials. +type FinalizeCredentialContext interface { + GetStderr() io.Writer +} + +// FinalizeCredentialParams contains the parameters for +// ProviderCredentials.FinalizeCredential. +type FinalizeCredentialParams struct { + // Credential is the credential that the provider should finalize.` + Credential cloud.Credential + + // CloudEndpoint is the endpoint for the cloud that the credentials are + // for. This may be used by the provider to communicate with the cloud + // to finalize the credentials. + CloudEndpoint string + + // CloudIdentityEndpoint is the identity endpoint for the cloud that the + // credentials are for. This may be used by the provider to communicate + // with the cloud to finalize the credentials. + CloudIdentityEndpoint string } // CloudRegionDetector is an interface that an EnvironProvider implements @@ -180,6 +205,10 @@ // architecture. Bootstrap(ctx BootstrapContext, params BootstrapParams) (*BootstrapResult, error) + // BootstrapMessage optionally provides a message to be displayed to + // the user at bootstrap time. + BootstrapMessage() string + // Create creates the environment for a new hosted model. // // This will be called before any workers begin operating on the diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/jujutest/livetests.go juju-core-2.0.0/src/github.com/juju/juju/environs/jujutest/livetests.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/jujutest/livetests.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/jujutest/livetests.go 2016-10-13 14:31:49.000000000 +0000 @@ -116,14 +116,14 @@ t.CleanupSuite.SetUpTest(c) t.PatchValue(&jujuversion.Current, coretesting.FakeVersionNumber) storageDir := c.MkDir() - baseUrlPath := filepath.Join(storageDir, "tools") - t.DefaultBaseURL = utils.MakeFileURL(baseUrlPath) + baseURLPath := filepath.Join(storageDir, "tools") + t.DefaultBaseURL = utils.MakeFileURL(baseURLPath) t.ToolsFixture.SetUpTest(c) stor, err := filestorage.NewFileStorageWriter(storageDir) c.Assert(err, jc.ErrorIsNil) t.UploadFakeTools(c, stor, "released", "released") t.toolsStorage = stor - t.CleanupSuite.PatchValue(&envtools.BundleTools, envtoolstesting.GetMockBundleTools(c)) + t.CleanupSuite.PatchValue(&envtools.BundleTools, envtoolstesting.GetMockBundleTools(c, nil)) } func (t *LiveTests) TearDownSuite(c *gc.C) { @@ -542,7 +542,7 @@ c.Assert(err, jc.ErrorIsNil) svc, err := st.AddApplication(state.AddApplicationArgs{Name: "dummy", Charm: sch}) c.Assert(err, jc.ErrorIsNil) - units, err := juju.AddUnits(st, svc, 1, nil) + units, err := juju.AddUnits(st, svc, "dummy", 1, nil) c.Assert(err, jc.ErrorIsNil) unit := units[0] @@ -762,7 +762,7 @@ func (t *LiveTests) TestStartInstanceWithEmptyNonceFails(c *gc.C) { machineId := "4" apiInfo := jujutesting.FakeAPIInfo(machineId) - instanceConfig, err := instancecfg.NewInstanceConfig(machineId, "", "released", "quantal", true, apiInfo) + instanceConfig, err := instancecfg.NewInstanceConfig(coretesting.ControllerTag, machineId, "", "released", "quantal", apiInfo) c.Assert(err, jc.ErrorIsNil) t.PrepareOnce(c) @@ -770,7 +770,7 @@ c, t.toolsStorage, "released", "released", version.MustParseBinary("5.4.5-trusty-amd64"), )) params := environs.StartInstanceParams{ - ControllerUUID: coretesting.ModelTag.Id(), + ControllerUUID: coretesting.ControllerTag.Id(), Tools: possibleTools, InstanceConfig: instanceConfig, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/jujutest/tests.go juju-core-2.0.0/src/github.com/juju/juju/environs/jujutest/tests.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/jujutest/tests.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/jujutest/tests.go 2016-10-13 14:31:49.000000000 +0000 @@ -111,8 +111,8 @@ func (t *Tests) SetUpTest(c *gc.C) { storageDir := c.MkDir() - baseUrlPath := filepath.Join(storageDir, "tools") - t.DefaultBaseURL = utils.MakeFileURL(baseUrlPath) + baseURLPath := filepath.Join(storageDir, "tools") + t.DefaultBaseURL = utils.MakeFileURL(baseURLPath) t.ToolsFixture.SetUpTest(c) stor, err := filestorage.NewFileStorageWriter(storageDir) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/manual/init_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/manual/init_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/manual/init_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/manual/init_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -49,7 +49,7 @@ defer installFakeSSH(c, manual.DetectionScript, []string{scriptResponse, "non-empty-stderr"}, 0)() hc, _, err = manual.DetectSeriesAndHardwareCharacteristics("hostname") c.Assert(err, jc.ErrorIsNil) - c.Assert(hc.String(), gc.Equals, "arch=armhf cpu-cores=1 mem=4M") + c.Assert(hc.String(), gc.Equals, "arch=armhf cores=1 mem=4M") } func (s *initialisationSuite) TestDetectHardwareCharacteristics(c *gc.C) { @@ -60,7 +60,7 @@ }{{ "Single CPU socket, single core, no hyper-threading", []string{"edgy", "armv4", "MemTotal: 4096 kB", "processor: 0"}, - "arch=armhf cpu-cores=1 mem=4M", + "arch=armhf cores=1 mem=4M", }, { "Single CPU socket, single core, hyper-threading", []string{ @@ -72,7 +72,7 @@ "physical id: 0", "cpu cores: 1", }, - "arch=armhf cpu-cores=1 mem=4M", + "arch=armhf cores=1 mem=4M", }, { "Single CPU socket, dual-core, no hyper-threading", []string{ @@ -84,7 +84,7 @@ "physical id: 0", "cpu cores: 2", }, - "arch=armhf cpu-cores=2 mem=4M", + "arch=armhf cores=2 mem=4M", }, { "Dual CPU socket, each single-core, hyper-threading", []string{ @@ -102,7 +102,7 @@ "physical id: 1", "cpu cores: 1", }, - "arch=armhf cpu-cores=2 mem=4M", + "arch=armhf cores=2 mem=4M", }} for i, test := range tests { c.Logf("test %d: %s", i, test.summary) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/networking.go juju-core-2.0.0/src/github.com/juju/juju/environs/networking.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/networking.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/networking.go 2016-10-13 14:31:49.000000000 +0000 @@ -48,8 +48,8 @@ AllocateContainerAddresses(hostInstanceID instance.Id, containerTag names.MachineTag, preparedInfo []network.InterfaceInfo) ([]network.InterfaceInfo, error) // ReleaseContainerAddresses releases the previously allocated - // addresses matching the interface infos passed in. - ReleaseContainerAddresses(interfaces []network.InterfaceInfo) error + // addresses matching the interface details passed in. + ReleaseContainerAddresses(interfaces []network.ProviderInterfaceInfo) error } // NetworkingEnviron combines the standard Environ interface with the diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/open.go juju-core-2.0.0/src/github.com/juju/juju/environs/open.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/open.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/open.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ ) // AdminUser is the initial admin user created for all controllers. -const AdminUser = "admin@local" +const AdminUser = "admin" // New returns a new environment based on the provided configuration. func New(args OpenParams) (Environ, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/open_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/open_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/open_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/open_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -75,7 +75,7 @@ // New controller should have been added to collection. foundController, err := cache.ControllerByName(cfg.Name()) c.Assert(err, jc.ErrorIsNil) - c.Assert(foundController.ControllerUUID, gc.DeepEquals, cfg.UUID()) + c.Assert(foundController.ControllerUUID, gc.DeepEquals, controllerCfg.ControllerUUID()) } func (s *OpenSuite) TestUpdateEnvInfo(c *gc.C) { @@ -89,7 +89,6 @@ }) c.Assert(err, jc.ErrorIsNil) controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = uuid _, err = bootstrap.Prepare(ctx, store, bootstrap.PrepareParams{ ControllerConfig: controllerCfg, ControllerName: "controller-name", @@ -103,7 +102,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(foundController.ControllerUUID, gc.Not(gc.Equals), "") c.Assert(foundController.CACert, gc.Not(gc.Equals), "") - foundModel, err := store.ModelByName("controller-name", "admin@local/admin-model") + foundModel, err := store.ModelByName("controller-name", "admin/admin-model") c.Assert(err, jc.ErrorIsNil) c.Assert(foundModel, jc.DeepEquals, &jujuclient.ModelDetails{ ModelUUID: cfg.UUID(), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/simplestreams/simplestreams.go juju-core-2.0.0/src/github.com/juju/juju/environs/simplestreams/simplestreams.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/simplestreams/simplestreams.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/simplestreams/simplestreams.go 2016-10-13 14:31:49.000000000 +0000 @@ -454,7 +454,7 @@ logger.Debugf("skipping index %q because of missing information: %v", indexURL, err) return nil, resolveInfo, err } - if _, ok := err.(*noMatchingProductsError); ok { + if _, ok := err.(*noMatchingProductsError); !ok { logger.Debugf("%v", err) } } @@ -951,6 +951,7 @@ logger.Tracef("finding products at path %q", productFilesPath) data, url, err := fetchData(indexRef.Source, productFilesPath, requireSigned) if err != nil { + logger.Tracef("can't read product data: %v", err) return nil, fmt.Errorf("cannot read product data, %v", err) } return ParseCloudMetadata(data, format, url, indexRef.valueParams.ValueTemplate) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/sync/sync.go juju-core-2.0.0/src/github.com/juju/juju/environs/sync/sync.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/sync/sync.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/sync/sync.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,6 @@ import ( "bytes" - "fmt" "io" "io/ioutil" "os" @@ -77,7 +76,7 @@ func SyncTools(syncContext *SyncContext) error { sourceDataSource, err := selectSourceDatasource(syncContext) if err != nil { - return err + return errors.Trace(err) } logger.Infof("listing available tools") @@ -94,8 +93,8 @@ if syncContext.Stream == "" { // We now store the tools in a directory named after their stream, but the // legacy behaviour is to store all tools in a single "releases" directory. - toolsDir = envtools.LegacyReleaseDirectory - syncContext.Stream = envtools.PreferredStream(&jujuversion.Current, false, syncContext.Stream) + toolsDir = envtools.ReleasedStream + syncContext.Stream = envtools.PreferredStream(&jujuversion.Current, false, "") } sourceTools, err := envtools.FindToolsForCloud( []simplestreams.DataSource{sourceDataSource}, simplestreams.CloudSpec{}, @@ -109,7 +108,7 @@ envtools.ReleasedStream, syncContext.MajorVersion, syncContext.MinorVersion, coretools.Filter{}) } if err != nil { - return err + return errors.Trace(err) } logger.Infof("found %d tools", len(sourceTools)) @@ -127,7 +126,7 @@ switch err { case nil, coretools.ErrNoMatches, envtools.ErrNoTools: default: - return err + return errors.Trace(err) } for _, tool := range targetTools { logger.Debugf("found target tool: %v", tool) @@ -216,18 +215,18 @@ // Juju tools built for one series do not necessarily run on another, but this // func exists only for development use cases. func upload(stor storage.Storage, stream string, forceVersion *version.Number, fakeSeries ...string) (*coretools.Tools, error) { - builtTools, err := BuildToolsTarball(forceVersion, stream) + builtTools, err := BuildAgentTarball(true, forceVersion, stream) if err != nil { return nil, err } defer os.RemoveAll(builtTools.Dir) - logger.Debugf("Uploading tools for %v", fakeSeries) + logger.Debugf("Uploading agent binaries for %v", fakeSeries) return syncBuiltTools(stor, stream, builtTools, fakeSeries...) } // cloneToolsForSeries copies the built tools tarball into a tarball for the specified // stream and series and generates corresponding metadata. -func cloneToolsForSeries(toolsInfo *BuiltTools, stream string, series ...string) error { +func cloneToolsForSeries(toolsInfo *BuiltAgent, stream string, series ...string) error { // Copy the tools to the target storage, recording a Tools struct for each one. var targetTools coretools.List targetTools = append(targetTools, &coretools.Tools{ @@ -278,9 +277,9 @@ return envtools.MergeAndWriteMetadata(metadataStore, stream, stream, targetTools, false) } -// BuiltTools contains metadata for a tools tarball resulting from +// BuiltAgent contains metadata for a tools tarball resulting from // a call to BundleTools. -type BuiltTools struct { +type BuiltAgent struct { Version version.Binary Dir string StorageName string @@ -288,19 +287,19 @@ Size int64 } -// BuildToolsTarballFunc is a function which can build a tools tarball. -type BuildToolsTarballFunc func(forceVersion *version.Number, stream string) (*BuiltTools, error) +// BuildAgentTarballFunc is a function which can build an agent tarball. +type BuildAgentTarballFunc func(build bool, forceVersion *version.Number, stream string) (*BuiltAgent, error) // Override for testing. -var BuildToolsTarball BuildToolsTarballFunc = buildToolsTarball +var BuildAgentTarball BuildAgentTarballFunc = buildAgentTarball -// buildToolsTarball bundles a tools tarball and places it in a temp directory in -// the expected tools path. -func buildToolsTarball(forceVersion *version.Number, stream string) (builtTools *BuiltTools, err error) { +// BuildAgentTarball bundles an agent tarball and places it in a temp directory in +// the expected agent path. +func buildAgentTarball(build bool, forceVersion *version.Number, stream string) (_ *BuiltAgent, err error) { // TODO(rog) find binaries from $PATH when not using a development // version of juju within a $GOPATH. - logger.Debugf("Building tools") + logger.Debugf("Making agent binary tarball") // We create the entire archive before asking the environment to // start uploading so that we can be sure we have archived // correctly. @@ -310,16 +309,28 @@ } defer f.Close() defer os.Remove(f.Name()) - toolsVersion, sha256Hash, err := envtools.BundleTools(f, forceVersion) + toolsVersion, sha256Hash, err := envtools.BundleTools(build, f, forceVersion) if err != nil { return nil, err } + // Built agent version needs to match the client used to bootstrap. + builtVersion := toolsVersion + builtVersion.Build = 0 + clientVersion := jujuversion.Current + clientVersion.Build = 0 + if builtVersion.Number.Compare(clientVersion) != 0 { + return nil, errors.Errorf("agent binary %v not compatibile with bootstrap client %v", toolsVersion.Number, jujuversion.Current) + } fileInfo, err := f.Stat() if err != nil { - return nil, fmt.Errorf("cannot stat newly made tools archive: %v", err) + return nil, errors.Errorf("cannot stat newly made tools archive: %v", err) } size := fileInfo.Size() - logger.Infof("built tools %v (%dkB)", toolsVersion, (size+512)/1024) + reportedVersion := toolsVersion + if forceVersion != nil { + reportedVersion.Number = *forceVersion + } + logger.Infof("using agent binary %v aliased to %v (%dkB)", toolsVersion, reportedVersion, (size+512)/1024) baseToolsDir, err := ioutil.TempDir("", "juju-tools") if err != nil { return nil, err @@ -341,7 +352,7 @@ if err != nil { return nil, err } - return &BuiltTools{ + return &BuiltAgent{ Version: toolsVersion, Dir: baseToolsDir, StorageName: storageName, @@ -351,7 +362,7 @@ } // syncBuiltTools copies to storage a tools tarball and cloned copies for each series. -func syncBuiltTools(stor storage.Storage, stream string, builtTools *BuiltTools, fakeSeries ...string) (*coretools.Tools, error) { +func syncBuiltTools(stor storage.Storage, stream string, builtTools *BuiltAgent, fakeSeries ...string) (*coretools.Tools, error) { if err := cloneToolsForSeries(builtTools, stream, fakeSeries...); err != nil { return nil, err } @@ -364,7 +375,7 @@ MajorVersion: builtTools.Version.Major, MinorVersion: -1, } - logger.Debugf("uploading tools to cloud storage") + logger.Debugf("uploading agent binaries to cloud storage") err := SyncTools(syncContext) if err != nil { return nil, err diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/sync/sync_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/sync/sync_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/sync/sync_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/sync/sync_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,8 @@ import ( "bytes" + "compress/gzip" + "fmt" "io" "io/ioutil" "net/http" @@ -22,6 +24,7 @@ "github.com/juju/utils" "github.com/juju/utils/arch" "github.com/juju/utils/series" + "github.com/juju/utils/tar" "github.com/juju/version" gc "gopkg.in/check.v1" @@ -33,6 +36,7 @@ envtesting "github.com/juju/juju/environs/testing" envtools "github.com/juju/juju/environs/tools" toolstesting "github.com/juju/juju/environs/tools/testing" + "github.com/juju/juju/juju/names" coretesting "github.com/juju/juju/testing" coretools "github.com/juju/juju/tools" jujuversion "github.com/juju/juju/version" @@ -233,10 +237,12 @@ stor, err := filestorage.NewFileStorageWriter(c.MkDir()) c.Assert(err, jc.ErrorIsNil) s.targetStorage = stor +} +func (s *uploadSuite) patchBundleTools(c *gc.C, v *version.Number) { // Mock out building of tools. Sync should not care about the contents // of tools archives, other than that they hash correctly. - s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c)) + s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c, v)) } func (s *uploadSuite) assertEqualsCurrentVersion(c *gc.C, v version.Binary) { @@ -249,6 +255,7 @@ } func (s *uploadSuite) TestUpload(c *gc.C) { + s.patchBundleTools(c, nil) t, err := sync.Upload(s.targetStorage, "released", nil) c.Assert(err, jc.ErrorIsNil) s.assertEqualsCurrentVersion(c, t.Version) @@ -257,6 +264,7 @@ } func (s *uploadSuite) TestUploadFakeSeries(c *gc.C) { + s.patchBundleTools(c, nil) seriesToUpload := "precise" if seriesToUpload == series.HostSeries() { seriesToUpload = "raring" @@ -267,19 +275,17 @@ } func (s *uploadSuite) TestUploadAndForceVersion(c *gc.C) { - // This test actually tests three things: - // the writing of the FORCE-VERSION file; - // the reading of the FORCE-VERSION file by the version package; - // and the reading of the version from jujud. vers := jujuversion.Current vers.Patch++ + s.patchBundleTools(c, &vers) t, err := sync.Upload(s.targetStorage, "released", &vers) c.Assert(err, jc.ErrorIsNil) - c.Assert(t.Version, gc.Equals, version.Binary{Number: vers, Arch: arch.HostArch(), Series: series.HostSeries()}) + c.Assert(t.Version, gc.Equals, version.Binary{Number: jujuversion.Current, Arch: arch.HostArch(), Series: series.HostSeries()}) } func (s *uploadSuite) TestSyncTools(c *gc.C) { - builtTools, err := sync.BuildToolsTarball(nil, "released") + s.patchBundleTools(c, nil) + builtTools, err := sync.BuildAgentTarball(true, nil, "released") c.Assert(err, jc.ErrorIsNil) t, err := sync.SyncBuiltTools(s.targetStorage, "released", builtTools) c.Assert(err, jc.ErrorIsNil) @@ -288,11 +294,12 @@ } func (s *uploadSuite) TestSyncToolsFakeSeries(c *gc.C) { + s.patchBundleTools(c, nil) seriesToUpload := "precise" if seriesToUpload == series.HostSeries() { seriesToUpload = "raring" } - builtTools, err := sync.BuildToolsTarball(nil, "testing") + builtTools, err := sync.BuildAgentTarball(true, nil, "testing") c.Assert(err, jc.ErrorIsNil) t, err := sync.SyncBuiltTools(s.targetStorage, "testing", builtTools, "quantal", seriesToUpload) @@ -301,17 +308,15 @@ } func (s *uploadSuite) TestSyncAndForceVersion(c *gc.C) { - // This test actually tests three things: - // the writing of the FORCE-VERSION file; - // the reading of the FORCE-VERSION file by the version package; - // and the reading of the version from jujud. vers := jujuversion.Current vers.Patch++ - builtTools, err := sync.BuildToolsTarball(&vers, "released") + s.patchBundleTools(c, &vers) + builtTools, err := sync.BuildAgentTarball(true, &vers, "released") c.Assert(err, jc.ErrorIsNil) t, err := sync.SyncBuiltTools(s.targetStorage, "released", builtTools) c.Assert(err, jc.ErrorIsNil) - c.Assert(t.Version, gc.Equals, version.Binary{Number: vers, Arch: arch.HostArch(), Series: series.HostSeries()}) + // Reported version from build call matches the real jujud version. + c.Assert(t.Version, gc.Equals, version.Binary{Number: jujuversion.Current, Arch: arch.HostArch(), Series: series.HostSeries()}) } func (s *uploadSuite) assertUploadedTools(c *gc.C, t *coretools.Tools, expectSeries []string, stream string) { @@ -352,7 +357,7 @@ defer f.Close() defer os.Remove(f.Name()) - return envtools.BundleTools(f, &jujuversion.Current) + return envtools.BundleTools(true, f, &jujuversion.Current) } type badBuildSuite struct { @@ -418,9 +423,9 @@ vers, sha256Hash, err := bundleTools(c) c.Assert(vers, gc.DeepEquals, version.Binary{}) c.Assert(sha256Hash, gc.Equals, "") - c.Assert(err, gc.ErrorMatches, `build command "go" failed: exit status 1; `) + c.Assert(err, gc.ErrorMatches, `cannot build jujud agent binary from source: build command "go" failed: exit status 1; `) - s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c)) + s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c, &jujuversion.Current)) // Test that BundleTools func passes after it is // mocked out @@ -437,10 +442,10 @@ // Test that original Upload Func fails as expected t, err := sync.Upload(stor, "released", nil) c.Assert(t, gc.IsNil) - c.Assert(err, gc.ErrorMatches, `build command "go" failed: exit status 1; `) + c.Assert(err, gc.ErrorMatches, `cannot build jujud agent binary from source: build command \"go\" failed: exit status 1; `) // Test that Upload func passes after BundleTools func is mocked out - s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c)) + s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c, nil)) t, err = sync.Upload(stor, "released", nil) c.Assert(err, jc.ErrorIsNil) s.assertEqualsCurrentVersion(c, t.Version) @@ -448,19 +453,25 @@ } func (s *badBuildSuite) TestBuildToolsBadBuild(c *gc.C) { - // Test that original BuildToolsTarball fails - builtTools, err := sync.BuildToolsTarball(nil, "released") - c.Assert(err, gc.ErrorMatches, `build command "go" failed: exit status 1; `) + // Test that original BuildAgentTarball fails + builtTools, err := sync.BuildAgentTarball(true, nil, "released") + c.Assert(err, gc.ErrorMatches, `cannot build jujud agent binary from source: build command \"go\" failed: exit status 1; `) c.Assert(builtTools, gc.IsNil) - // Test that BuildToolsTarball func passes after BundleTools func is + // Test that BuildAgentTarball func passes after BundleTools func is // mocked out - s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c)) - builtTools, err = sync.BuildToolsTarball(nil, "released") + s.PatchValue(&envtools.BundleTools, toolstesting.GetMockBundleTools(c, nil)) + builtTools, err = sync.BuildAgentTarball(true, nil, "released") s.assertEqualsCurrentVersion(c, builtTools.Version) c.Assert(err, jc.ErrorIsNil) } +func (s *badBuildSuite) TestBuildToolsNoBinaryAvailable(c *gc.C) { + builtTools, err := sync.BuildAgentTarball(false, nil, "released") + c.Assert(err, gc.ErrorMatches, `no prepackaged agent available and no jujud binary can be found`) + c.Assert(builtTools, gc.IsNil) +} + func (s *uploadSuite) TestMockBundleTools(c *gc.C) { var ( writer io.Writer @@ -470,15 +481,17 @@ ) p.WriteString("Hello World") - s.PatchValue(&envtools.BundleTools, func(writerArg io.Writer, forceVersionArg *version.Number) (vers version.Binary, sha256Hash string, err error) { + s.PatchValue(&envtools.BundleTools, func(build bool, writerArg io.Writer, forceVersionArg *version.Number) (vers version.Binary, sha256Hash string, err error) { + c.Assert(build, jc.IsTrue) writer = writerArg n, err = writer.Write(p.Bytes()) c.Assert(err, jc.ErrorIsNil) forceVersion = forceVersionArg + vers.Number = jujuversion.Current return }) - _, err := sync.BuildToolsTarball(&jujuversion.Current, "released") + _, err := sync.BuildAgentTarball(true, &jujuversion.Current, "released") c.Assert(err, jc.ErrorIsNil) c.Assert(*forceVersion, gc.Equals, jujuversion.Current) c.Assert(writer, gc.NotNil) @@ -486,35 +499,38 @@ } func (s *uploadSuite) TestMockBuildTools(c *gc.C) { + checkTools := func(tools *sync.BuiltAgent, vers version.Binary) { + c.Check(tools.StorageName, gc.Equals, "name") + c.Check(tools.Version, jc.DeepEquals, vers) + + f, err := os.Open(filepath.Join(tools.Dir, "name")) + c.Assert(err, jc.ErrorIsNil) + defer f.Close() + + gzr, err := gzip.NewReader(f) + c.Assert(err, jc.ErrorIsNil) + + _, tr, err := tar.FindFile(gzr, names.Jujud) + c.Assert(err, jc.ErrorIsNil) + + content, err := ioutil.ReadAll(tr) + c.Assert(err, jc.ErrorIsNil) + c.Check(string(content), gc.Equals, fmt.Sprintf("jujud contents %s", vers)) + } + current := version.MustParseBinary("1.9.1-trusty-amd64") s.PatchValue(&jujuversion.Current, current.Number) s.PatchValue(&arch.HostArch, func() string { return current.Arch }) s.PatchValue(&series.HostSeries, func() string { return current.Series }) buildToolsFunc := toolstesting.GetMockBuildTools(c) - builtTools, err := buildToolsFunc(nil, "released") + builtTools, err := buildToolsFunc(true, nil, "released") c.Assert(err, jc.ErrorIsNil) - - builtTools.Dir = "" - - expectedBuiltTools := &sync.BuiltTools{ - StorageName: "name", - Version: current, - Size: 127, - Sha256Hash: "6a19d08ca4913382ca86508aa38eb8ee5b9ae2d74333fe8d862c0f9e29b82c39", - } - c.Assert(builtTools, gc.DeepEquals, expectedBuiltTools) + checkTools(builtTools, current) vers := version.MustParseBinary("1.5.3-trusty-amd64") - builtTools, err = buildToolsFunc(&vers.Number, "released") + builtTools, err = buildToolsFunc(true, &vers.Number, "released") c.Assert(err, jc.ErrorIsNil) - builtTools.Dir = "" - expectedBuiltTools = &sync.BuiltTools{ - StorageName: "name", - Version: vers, - Size: 127, - Sha256Hash: "cad8ccedab8f26807ff379ddc2f2f78d9a7cac1276e001154cee5e39b9ddcc38", - } - c.Assert(builtTools, gc.DeepEquals, expectedBuiltTools) + checkTools(builtTools, vers) } func (s *uploadSuite) TestStorageToolsUploaderWriteMirrors(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/tags/tags.go juju-core-2.0.0/src/github.com/juju/juju/environs/tags/tags.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/tags/tags.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/tags/tags.go 2016-10-13 14:31:49.000000000 +0000 @@ -47,7 +47,7 @@ // ResourceTags returns tags to set on an infrastructure resource // for the specified Juju environment. -func ResourceTags(modelTag, controllerTag names.ModelTag, taggers ...ResourceTagger) map[string]string { +func ResourceTags(modelTag names.ModelTag, controllerTag names.ControllerTag, taggers ...ResourceTagger) map[string]string { allTags := make(map[string]string) for _, tagger := range taggers { tags, ok := tagger.ResourceTags() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/tags/tags_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/tags/tags_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/tags/tags_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/tags/tags_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,18 +19,18 @@ var _ = gc.Suite(&tagsSuite{}) func (*tagsSuite) TestResourceTagsUUID(c *gc.C) { - testResourceTags(c, testing.ModelTag, names.NewModelTag(""), nil, map[string]string{ + testResourceTags(c, testing.ControllerTag, names.NewModelTag(""), nil, map[string]string{ + "juju-model-uuid": "", + "juju-controller-uuid": testing.ControllerTag.Id(), + }) + testResourceTags(c, names.NewControllerTag(""), testing.ModelTag, nil, map[string]string{ "juju-model-uuid": testing.ModelTag.Id(), "juju-controller-uuid": "", }) - testResourceTags(c, names.NewModelTag(""), testing.ModelTag, nil, map[string]string{ - "juju-model-uuid": "", - "juju-controller-uuid": testing.ModelTag.Id(), - }) } func (*tagsSuite) TestResourceTagsResourceTaggers(c *gc.C) { - testResourceTags(c, testing.ModelTag, testing.ModelTag, []tags.ResourceTagger{ + testResourceTags(c, testing.ControllerTag, testing.ModelTag, []tags.ResourceTagger{ resourceTagger(func() (map[string]string, bool) { return map[string]string{ "over": "ridden", @@ -54,14 +54,14 @@ }), }, map[string]string{ "juju-model-uuid": testing.ModelTag.Id(), - "juju-controller-uuid": testing.ModelTag.Id(), + "juju-controller-uuid": testing.ControllerTag.Id(), "froman": "egg", "over": "easy", "extra": "play", }) } -func testResourceTags(c *gc.C, model, controller names.ModelTag, taggers []tags.ResourceTagger, expectTags map[string]string) { +func testResourceTags(c *gc.C, controller names.ControllerTag, model names.ModelTag, taggers []tags.ResourceTagger, expectTags map[string]string) { tags := tags.ResourceTags(model, controller, taggers...) c.Assert(tags, jc.DeepEquals, expectTags) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/testing/tools.go juju-core-2.0.0/src/github.com/juju/juju/environs/testing/tools.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/testing/tools.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/testing/tools.go 2016-10-13 14:31:49.000000000 +0000 @@ -402,7 +402,7 @@ Err string } -var noToolsMessage = "Juju cannot bootstrap because no tools are available for your model.*" +var noToolsMessage = "Juju cannot bootstrap because no agent binaries are available for your model.*" var BootstrapToolsTests = []BootstrapToolsTest{ { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/build.go juju-core-2.0.0/src/github.com/juju/juju/environs/tools/build.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/build.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/tools/build.go 2016-10-13 14:31:49.000000000 +0000 @@ -20,6 +20,7 @@ "github.com/juju/version" "github.com/juju/juju/juju/names" + jujuversion "github.com/juju/juju/version" ) // Archive writes the executable files found in the given directory in @@ -167,7 +168,7 @@ logger.Infof("couldn't find existing jujud") return err } - logger.Infof("found existing jujud") + logger.Infof("Found agent binary to upload (%s)", jujudLocation) // TODO(thumper): break this out into a util function. // copy the file into the dir. source, err := os.Open(jujudLocation) @@ -206,9 +207,23 @@ return nil } +func packageLocalTools(toolsDir string, buildAgent bool) error { + if !buildAgent { + if err := copyExistingJujud(toolsDir); err != nil { + return errors.New("no prepackaged agent available and no jujud binary can be found") + } + return nil + } + logger.Infof("Building agent binary to upload (%s)", jujuversion.Current.String()) + if err := buildJujud(toolsDir); err != nil { + return errors.Annotate(err, "cannot build jujud agent binary from source") + } + return nil +} + // BundleToolsFunc is a function which can bundle all the current juju tools // in gzipped tar format to the given writer. -type BundleToolsFunc func(w io.Writer, forceVersion *version.Number) (version.Binary, string, error) +type BundleToolsFunc func(build bool, w io.Writer, forceVersion *version.Number) (version.Binary, string, error) // Override for testing. var BundleTools BundleToolsFunc = bundleTools @@ -217,18 +232,22 @@ // format to the given writer. // If forceVersion is not nil, a FORCE-VERSION file is included in // the tools bundle so it will lie about its current version number. -func bundleTools(w io.Writer, forceVersion *version.Number) (tvers version.Binary, sha256Hash string, err error) { +func bundleTools(build bool, w io.Writer, forceVersion *version.Number) (tvers version.Binary, sha256Hash string, err error) { dir, err := ioutil.TempDir("", "juju-tools") if err != nil { return version.Binary{}, "", err } defer os.RemoveAll(dir) + if err := packageLocalTools(dir, build); err != nil { + return version.Binary{}, "", err + } - if err := copyExistingJujud(dir); err != nil { - logger.Debugf("copy existing failed: %v", err) - if err := buildJujud(dir); err != nil { - return version.Binary{}, "", err - } + // Extract the version number that the jujud binary was built with. + // This is used to check compatibility with the version of the client + // being used to bootstrap. + tvers, err = getVersionFromJujud(dir) + if err != nil { + return version.Binary{}, "", errors.Trace(err) } if forceVersion != nil { @@ -238,11 +257,6 @@ } } - tvers, err = getVersionFromJujud(dir) - if err != nil { - return version.Binary{}, "", errors.Trace(err) - } - sha256hash, err := archiveAndSHA256(w, dir) if err != nil { return version.Binary{}, "", err diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/simplestreams.go juju-core-2.0.0/src/github.com/juju/juju/environs/tools/simplestreams.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/simplestreams.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/tools/simplestreams.go 2016-10-13 14:31:49.000000000 +0000 @@ -51,9 +51,6 @@ var DefaultBaseURL = "https://streams.canonical.com/juju/tools" const ( - // Legacy release directory for Juju < 1.21. - LegacyReleaseDirectory = "releases" - // Used to specify the released tools metadata. ReleasedStream = "released" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/simplestreams_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/tools/simplestreams_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/simplestreams_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/tools/simplestreams_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -45,7 +45,7 @@ validCloudSpec simplestreams.CloudSpec } -var liveUrls = map[string]liveTestData{ +var liveURLs = map[string]liveTestData{ "ec2": { baseURL: tools.DefaultBaseURL, requireSigned: true, @@ -65,8 +65,8 @@ } var ok bool var testData liveTestData - if testData, ok = liveUrls[*vendor]; !ok { - keys := reflect.ValueOf(liveUrls).MapKeys() + if testData, ok = liveURLs[*vendor]; !ok { + keys := reflect.ValueOf(liveURLs).MapKeys() t.Fatalf("Unknown vendor %s. Must be one of %s", *vendor, keys) } registerLiveSimpleStreamsTests(testData.baseURL, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/testing/testing.go juju-core-2.0.0/src/github.com/juju/juju/environs/tools/testing/testing.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/testing/testing.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/tools/testing/testing.go 2016-10-13 14:31:49.000000000 +0000 @@ -37,25 +37,27 @@ jujuversion "github.com/juju/juju/version" ) -func GetMockBundleTools(c *gc.C) tools.BundleToolsFunc { - return func(w io.Writer, forceVersion *version.Number) (version.Binary, string, error) { +func GetMockBundleTools(c *gc.C, expectedForceVersion *version.Number) tools.BundleToolsFunc { + return func(build bool, w io.Writer, forceVersion *version.Number) (version.Binary, string, error) { + if expectedForceVersion != nil { + c.Assert(forceVersion, jc.DeepEquals, expectedForceVersion) + } else { + c.Assert(forceVersion, gc.IsNil) + } vers := version.Binary{ Number: jujuversion.Current, Arch: arch.HostArch(), Series: series.HostSeries(), } - if forceVersion != nil { - vers.Number = *forceVersion - } sha256Hash := fmt.Sprintf("%x", sha256.New().Sum(nil)) return vers, sha256Hash, nil } } -// GetMockBuildTools returns a sync.BuildToolsTarballFunc implementation which generates +// GetMockBuildTools returns a sync.BuildAgentTarballFunc implementation which generates // a fake tools tarball. -func GetMockBuildTools(c *gc.C) sync.BuildToolsTarballFunc { - return func(forceVersion *version.Number, stream string) (*sync.BuiltTools, error) { +func GetMockBuildTools(c *gc.C) sync.BuildAgentTarballFunc { + return func(build bool, forceVersion *version.Number, stream string) (*sync.BuiltAgent, error) { vers := version.Binary{ Number: jujuversion.Current, Arch: arch.HostArch(), @@ -73,7 +75,7 @@ name := "name" ioutil.WriteFile(filepath.Join(toolsDir, name), tgz, 0777) - return &sync.BuiltTools{ + return &sync.BuiltAgent{ Dir: toolsDir, StorageName: name, Version: vers, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/tools.go juju-core-2.0.0/src/github.com/juju/juju/environs/tools/tools.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/tools.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/tools/tools.go 2016-10-13 14:31:49.000000000 +0000 @@ -44,7 +44,7 @@ if filter.Arch != "" { toolsConstraint.Arches = []string{filter.Arch} } else { - logger.Tracef("no architecture specified when finding tools, looking for any") + logger.Tracef("no architecture specified when finding agent binaries, looking for any") toolsConstraint.Arches = arch.AllSupportedArches } // The old tools search allowed finding tools without needing to specify a series. @@ -56,15 +56,12 @@ seriesToSearch = []string{filter.Series} } else { seriesToSearch = series.SupportedSeries() - logger.Tracef("no series specified when finding tools, looking for %v", seriesToSearch) + logger.Tracef("no series specified when finding agent binaries, looking for %v", seriesToSearch) } toolsConstraint.Series = seriesToSearch return toolsConstraint, nil } -// Define some boolean parameter values. -const DoNotAllowRetry = false - // HasAgentMirror is an optional interface that an Environ may // implement to support agent/tools mirror lookup. // @@ -84,9 +81,7 @@ // If minorVersion = -1, then only majorVersion is considered. // If no *available* tools have the supplied major.minor version number, or match the // supplied filter, the function returns a *NotFoundError. -func FindTools(env environs.Environ, majorVersion, minorVersion int, stream string, - filter coretools.Filter) (list coretools.List, err error) { - +func FindTools(env environs.Environ, majorVersion, minorVersion int, stream string, filter coretools.Filter) (_ coretools.List, err error) { var cloudSpec simplestreams.CloudSpec switch env := env.(type) { case simplestreams.HasRegion: @@ -100,26 +95,26 @@ } // If only one of region or endpoint is provided, that is a problem. if cloudSpec.Region != cloudSpec.Endpoint && (cloudSpec.Region == "" || cloudSpec.Endpoint == "") { - return nil, fmt.Errorf("cannot find tools without a complete cloud configuration") + return nil, errors.New("cannot find agent binaries without a complete cloud configuration") } - logger.Infof("finding tools in stream %q", stream) + logger.Infof("finding agent binaries in stream %q", stream) if minorVersion >= 0 { - logger.Infof("reading tools with major.minor version %d.%d", majorVersion, minorVersion) + logger.Infof("reading agent binaries with major.minor version %d.%d", majorVersion, minorVersion) } else { - logger.Infof("reading tools with major version %d", majorVersion) + logger.Infof("reading agent binaries with major version %d", majorVersion) } defer convertToolsError(&err) // Construct a tools filter. // Discard all that are known to be irrelevant. if filter.Number != version.Zero { - logger.Infof("filtering tools by version: %s", filter.Number) + logger.Infof("filtering agent binaries by version: %s", filter.Number) } if filter.Series != "" { - logger.Infof("filtering tools by series: %s", filter.Series) + logger.Infof("filtering agent binaries by series: %s", filter.Series) } if filter.Arch != "" { - logger.Infof("filtering tools by architecture: %s", filter.Arch) + logger.Infof("filtering agent binaries by architecture: %s", filter.Arch) } sources, err := GetMetadataSources(env) if err != nil { @@ -172,7 +167,7 @@ } // FindExactTools returns only the tools that match the supplied version. -func FindExactTools(env environs.Environ, vers version.Number, series string, arch string) (t *coretools.Tools, err error) { +func FindExactTools(env environs.Environ, vers version.Number, series string, arch string) (_ *coretools.Tools, err error) { logger.Infof("finding exact version %s", vers) // Construct a tools filter. // Discard all that are known to be irrelevant. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/tools_test.go juju-core-2.0.0/src/github.com/juju/juju/environs/tools/tools_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/tools_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/tools/tools_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -208,10 +208,10 @@ // messages. This still helps to ensure that all log messages are // properly formed. messages := []jc.SimpleMessage{ - {loggo.INFO, "reading tools with major version 1"}, - {loggo.INFO, "filtering tools by version: \\d+\\.\\d+\\.\\d+"}, - {loggo.TRACE, "no architecture specified when finding tools, looking for "}, - {loggo.TRACE, "no series specified when finding tools, looking for \\[.*\\]"}, + {loggo.INFO, "reading agent binaries with major version 1"}, + {loggo.INFO, "filtering agent binaries by version: \\d+\\.\\d+\\.\\d+"}, + {loggo.TRACE, "no architecture specified when finding agent binaries, looking for "}, + {loggo.TRACE, "no series specified when finding agent binaries, looking for \\[.*\\]"}, } sources, err := envtools.GetMetadataSources(s.env) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/validation.go juju-core-2.0.0/src/github.com/juju/juju/environs/tools/validation.go --- juju-core-2.0~beta15/src/github.com/juju/juju/environs/tools/validation.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/environs/tools/validation.go 2016-10-13 14:31:49.000000000 +0000 @@ -23,9 +23,6 @@ // ValidateToolsMetadata attempts to load tools metadata for the specified cloud attributes and returns // any tools versions found, or an error if the metadata could not be loaded. func ValidateToolsMetadata(params *ToolsMetadataLookupParams) ([]string, *simplestreams.ResolveInfo, error) { - if len(params.Architectures) == 0 { - return nil, nil, fmt.Errorf("required parameter arches not specified") - } if len(params.Sources) == 0 { return nil, nil, fmt.Errorf("required parameter sources not specified") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/api_cloud_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/api_cloud_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/api_cloud_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/api_cloud_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "gopkg.in/juju/names.v2" apicloud "github.com/juju/juju/api/cloud" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cloud" "github.com/juju/juju/juju/testing" ) @@ -26,16 +27,48 @@ func (s *CloudAPISuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) - s.client = apicloud.NewClient(s.APIState) + s.client = apicloud.NewClient(s.OpenControllerAPI(c)) +} + +func (s *CloudAPISuite) TearDownTest(c *gc.C) { + s.client.Close() + s.JujuConnSuite.TearDownTest(c) } func (s *CloudAPISuite) TestCloudAPI(c *gc.C) { result, err := s.client.Cloud(names.NewCloudTag("dummy")) c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, cloud.Cloud{ - Type: "dummy", - AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, - Endpoint: "dummy-endpoint", - StorageEndpoint: "dummy-storage-endpoint", + Type: "dummy", + AuthTypes: []cloud.AuthType{cloud.EmptyAuthType, cloud.UserPassAuthType}, + Regions: []cloud.Region{ + { + Name: "dummy-region", + Endpoint: "dummy-endpoint", + IdentityEndpoint: "dummy-identity-endpoint", + StorageEndpoint: "dummy-storage-endpoint", + }, + }, + Endpoint: "dummy-endpoint", + IdentityEndpoint: "dummy-identity-endpoint", + StorageEndpoint: "dummy-storage-endpoint", + }) +} + +func (s *CloudAPISuite) TestCredentialsAPI(c *gc.C) { + tag := names.NewCloudCredentialTag("dummy/admin/default") + err := s.client.UpdateCredential(tag, cloud.NewCredential( + cloud.UserPassAuthType, + map[string]string{"username": "fred", "password": "secret"}, + )) + c.Assert(err, jc.ErrorIsNil) + result, err := s.client.Credentials(tag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, []params.CloudCredentialResult{ + {Result: ¶ms.CloudCredential{ + AuthType: "userpass", + Attributes: map[string]string{"username": "fred"}, + Redacted: []string{"password"}, + }}, }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/api_model_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/api_model_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/api_model_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/api_model_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,9 +16,9 @@ "github.com/juju/juju/api" "github.com/juju/juju/api/modelmanager" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/core/description" jujunames "github.com/juju/juju/juju/names" "github.com/juju/juju/juju/testing" + "github.com/juju/juju/permission" "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" @@ -42,14 +42,15 @@ username := "foo@ubuntuone" model, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) - mm := modelmanager.NewClient(s.APIState) + mm := modelmanager.NewClient(s.OpenControllerAPI(c)) + defer mm.Close() err = mm.GrantModel(username, "read", model.UUID()) c.Assert(err, jc.ErrorIsNil) user := names.NewUserTag(username) modelUser, err := s.State.UserAccess(user, model.ModelTag()) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.UserName, gc.Equals, user.Canonical()) + c.Assert(modelUser.UserName, gc.Equals, user.Id()) lastConn, err := s.State.LastModelConnection(modelUser.UserTag) c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) c.Assert(lastConn.IsZero(), jc.IsTrue) @@ -60,11 +61,12 @@ user := s.Factory.MakeModelUser(c, &factory.ModelUserParams{User: "foo@ubuntuone"}) model, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) - mm := modelmanager.NewClient(s.APIState) + mm := modelmanager.NewClient(s.OpenControllerAPI(c)) + defer mm.Close() modelUser, err := s.State.UserAccess(user.UserTag, s.State.ModelTag()) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser, gc.Not(gc.DeepEquals), description.UserAccess{}) + c.Assert(modelUser, gc.Not(gc.DeepEquals), permission.UserAccess{}) // Then test unsharing the environment. err = mm.RevokeModel(user.UserName, "read", model.UUID()) @@ -72,7 +74,7 @@ modelUser, err = s.State.UserAccess(user.UserTag, s.State.ModelTag()) c.Assert(errors.IsNotFound(err), jc.IsTrue) - c.Assert(modelUser, gc.DeepEquals, description.UserAccess{}) + c.Assert(modelUser, gc.DeepEquals, permission.UserAccess{}) } func (s *apiEnvironmentSuite) TestEnvironmentUserInfo(c *gc.C) { @@ -99,7 +101,7 @@ }) } -func lastConnPointer(c *gc.C, st *state.State, modelUser description.UserAccess) *time.Time { +func lastConnPointer(c *gc.C, st *state.State, modelUser permission.UserAccess) *time.Time { lastConn, err := st.LastModelConnection(modelUser.UserTag) if err != nil { if state.IsNeverConnectedError(err) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/api_undertaker_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/api_undertaker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/api_undertaker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/api_undertaker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -59,7 +59,7 @@ info := result.Result c.Assert(info.UUID, gc.Equals, coretesting.ModelTag.Id()) c.Assert(info.Name, gc.Equals, "controller") - c.Assert(info.GlobalName, gc.Equals, "user-admin@local/controller") + c.Assert(info.GlobalName, gc.Equals, "user-admin/controller") c.Assert(info.IsSystem, jc.IsTrue) c.Assert(info.Life, gc.Equals, params.Alive) } @@ -88,7 +88,7 @@ undertakerClient, err := undertaker.NewClient(st, apiwatcher.NewNotifyWatcher) c.Assert(err, jc.ErrorIsNil) c.Assert(undertakerClient, gc.NotNil) - c.Assert(undertakerClient.RemoveModel(), gc.ErrorMatches, "an error occurred, unable to remove model") + c.Assert(undertakerClient.RemoveModel(), gc.ErrorMatches, "can't remove model: model not dead") } func (s *undertakerSuite) TestHostedEnvironInfo(c *gc.C) { @@ -102,7 +102,7 @@ envInfo := result.Result c.Assert(envInfo.UUID, gc.Equals, otherSt.ModelUUID()) c.Assert(envInfo.Name, gc.Equals, "hosted-env") - c.Assert(envInfo.GlobalName, gc.Equals, "user-admin@local/hosted-env") + c.Assert(envInfo.GlobalName, gc.Equals, "user-admin/hosted-env") c.Assert(envInfo.IsSystem, jc.IsFalse) c.Assert(envInfo.Life, gc.Equals, params.Alive) } @@ -147,7 +147,7 @@ // Aborts on alive environ. err := undertakerClient.RemoveModel() - c.Assert(err, gc.ErrorMatches, "an error occurred, unable to remove model") + c.Assert(err, gc.ErrorMatches, "can't remove model: model not dead") factory.NewFactory(otherSt).MakeApplication(c, nil) env, err := otherSt.Model() @@ -156,7 +156,7 @@ // Aborts on dying environ. err = undertakerClient.RemoveModel() - c.Assert(err, gc.ErrorMatches, "an error occurred, unable to remove model") + c.Assert(err, gc.ErrorMatches, "can't remove model: model not dead") err = otherSt.Cleanup() c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/block_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/block_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/block_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/block_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,13 +4,11 @@ package featuretests import ( - "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/api/block" jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" ) type blockSuite struct { @@ -32,8 +30,3 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(found, gc.HasLen, 0) } - -func (s *blockSuite) TestBlockFacadeCallGettingErrors(c *gc.C) { - err := s.blockClient.SwitchBlockOff(state.DestroyBlock.String()) - c.Assert(errors.Cause(err), gc.ErrorMatches, `.*is already OFF.*`) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_controller_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_controller_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_controller_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_controller_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,10 +4,8 @@ package featuretests import ( - "fmt" "os" "reflect" - "strings" "time" "github.com/juju/cmd" @@ -19,9 +17,10 @@ "gopkg.in/yaml.v2" "github.com/juju/juju/api" + "github.com/juju/juju/api/base" "github.com/juju/juju/api/modelmanager" - "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/commands" + "github.com/juju/juju/instance" "github.com/juju/juju/juju" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/jujuclient" @@ -45,30 +44,38 @@ return context } -func (s *cmdControllerSuite) createModelAdminUser(c *gc.C, modelname string, isServer bool) params.ModelInfo { - modelManager := modelmanager.NewClient(s.APIState) - model, err := modelManager.CreateModel(modelname, s.AdminUserTag(c).Id(), "", "", map[string]interface{}{ - "controller": isServer, - }) +func (s *cmdControllerSuite) createModelAdminUser(c *gc.C, modelname string, isServer bool) base.ModelInfo { + modelManager := modelmanager.NewClient(s.OpenControllerAPI(c)) + defer modelManager.Close() + model, err := modelManager.CreateModel( + modelname, s.AdminUserTag(c).Id(), "", "", names.CloudCredentialTag{}, map[string]interface{}{ + "controller": isServer, + }, + ) c.Assert(err, jc.ErrorIsNil) return model } func (s *cmdControllerSuite) createModelNormalUser(c *gc.C, modelname string, isServer bool) { s.run(c, "add-user", "test") - modelManager := modelmanager.NewClient(s.APIState) - _, err := modelManager.CreateModel(modelname, names.NewLocalUserTag("test").Id(), "", "", map[string]interface{}{ - "authorized-keys": "ssh-key", - "controller": isServer, - }) + modelManager := modelmanager.NewClient(s.OpenControllerAPI(c)) + defer modelManager.Close() + _, err := modelManager.CreateModel( + modelname, names.NewLocalUserTag("test").Id(), "", "", names.CloudCredentialTag{}, map[string]interface{}{ + "authorized-keys": "ssh-key", + "controller": isServer, + }, + ) c.Assert(err, jc.ErrorIsNil) } func (s *cmdControllerSuite) TestControllerListCommand(c *gc.C) { context := s.run(c, "list-controllers") expectedOutput := ` -CONTROLLER MODEL USER CLOUD/REGION -kontroll* controller admin@local dummy +Use --refresh to see the latest information. + +Controller Model User Access Cloud/Region Models Machines HA Version +kontroll* controller admin superuser dummy/dummy-region - - - (unknown) `[1:] c.Assert(testing.Stdout(context), gc.Equals, expectedOutput) @@ -78,9 +85,11 @@ s.createModelAdminUser(c, "new-model", false) context := s.run(c, "list-models") c.Assert(testing.Stdout(context), gc.Equals, ""+ - "MODEL OWNER STATUS LAST CONNECTION\n"+ - "controller* admin@local available just now\n"+ - "new-model admin@local available never connected\n"+ + "Controller: kontroll\n"+ + "\n"+ + "Model Owner Status Access Last connection\n"+ + "controller* admin available admin just now\n"+ + "new-model admin available admin never connected\n"+ "\n") } @@ -88,31 +97,43 @@ s.createModelNormalUser(c, "new-model", false) context := s.run(c, "list-models", "--all") c.Assert(testing.Stdout(context), gc.Equals, ""+ - "MODEL OWNER STATUS LAST CONNECTION\n"+ - "admin/controller* admin@local available just now\n"+ - "test/new-model test@local available never connected\n"+ + "Controller: kontroll\n"+ + "\n"+ + "Model Owner Status Access Last connection\n"+ + "admin/controller* admin available admin just now\n"+ + "test/new-model test available never connected\n"+ "\n") } func (s *cmdControllerSuite) TestListModelsYAML(c *gc.C) { + s.Factory.MakeMachine(c, nil) + two := uint64(2) + s.Factory.MakeMachine(c, &factory.MachineParams{Characteristics: &instance.HardwareCharacteristics{CpuCores: &two}}) context := s.run(c, "list-models", "--format=yaml") c.Assert(testing.Stdout(context), gc.Matches, ` models: - name: controller model-uuid: deadbeef-0bad-400d-8000-4b1d0d06f00d - controller-uuid: deadbeef-0bad-400d-8000-4b1d0d06f00d - owner: admin@local + controller-uuid: deadbeef-1bad-500d-9000-4b1d0d06f00d + controller-name: kontroll + owner: admin cloud: dummy + region: dummy-region type: dummy life: alive status: current: available since: .* users: - admin@local: + admin: display-name: admin access: admin last-connection: just now + machines: + "0": + cores: 0 + "1": + cores: 2 current-model: controller `[1:]) } @@ -128,7 +149,7 @@ c.Assert(err, jc.ErrorIsNil) now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusDestroying, + Status: status.Destroying, Message: "", Since: &now, } @@ -139,31 +160,42 @@ // don't exist, and they will go away quickly. context := s.run(c, "list-models") c.Assert(testing.Stdout(context), gc.Equals, ""+ - "MODEL OWNER STATUS LAST CONNECTION\n"+ - "controller* admin@local available just now\n"+ - "new-model admin@local destroying never connected\n"+ + "Controller: kontroll\n"+ + "\n"+ + "Model Owner Status Access Last connection\n"+ + "controller* admin available admin just now\n"+ + "new-model admin destroying admin never connected\n"+ "\n") } func (s *cmdControllerSuite) TestAddModel(c *gc.C) { + s.testAddModel(c) +} + +func (s *cmdControllerSuite) TestAddModelWithCloudAndRegion(c *gc.C) { + s.testAddModel(c, "dummy/dummy-region") +} + +func (s *cmdControllerSuite) testAddModel(c *gc.C, args ...string) { // The JujuConnSuite doesn't set up an ssh key in the fake home dir, // so fake one on the command line. The dummy provider also expects // a config value for 'controller'. - context := s.run( - c, "add-model", "new-model", + args = append([]string{"add-model", "new-model"}, args...) + args = append(args, "--config", "authorized-keys=fake-key", "--config", "controller=false", ) + context := s.run(c, args...) c.Check(testing.Stdout(context), gc.Equals, "") c.Check(testing.Stderr(context), gc.Equals, ` -Added 'new-model' model with credential 'cred' for user 'admin' +Added 'new-model' model on dummy/dummy-region with credential 'cred' for user 'admin' `[1:]) // Make sure that the saved server details are sufficient to connect // to the api server. accountDetails, err := s.ControllerStore.AccountDetails("kontroll") c.Assert(err, jc.ErrorIsNil) - modelDetails, err := s.ControllerStore.ModelByName("kontroll", "admin@local/new-model") + modelDetails, err := s.ControllerStore.ModelByName("kontroll", "admin/new-model") c.Assert(err, jc.ErrorIsNil) api, err := juju.NewAPIConnection(juju.NewAPIConnectionParams{ Store: s.ControllerStore, @@ -189,6 +221,7 @@ st := s.Factory.MakeModel(c, &factory.ModelParams{ Name: "just-a-controller", ConfigAttrs: testing.Attrs{"controller": true}, + CloudRegion: "dummy-region", }) defer st.Close() factory.NewFactory(st).MakeApplication(c, nil) @@ -240,18 +273,19 @@ destroyOp := (<-ops).(dummy.OpDestroy) c.Assert(destroyOp.Env, gc.Equals, "controller") - c.Assert(destroyOp.Cloud, jc.DeepEquals, dummy.SampleCloudSpec()) + c.Assert(destroyOp.Cloud, gc.Equals, "dummy") + c.Assert(destroyOp.CloudRegion, gc.Equals, "dummy-region") store := jujuclient.NewFileClientStore() _, err := store.ControllerByName("kontroll") c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *cmdControllerSuite) TestRemoveBlocks(c *gc.C) { +func (s *cmdControllerSuite) TestEnableDestroyController(c *gc.C) { s.State.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") s.State.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") - s.run(c, "remove-all-blocks") + s.run(c, "enable-destroy-controller") blocks, err := s.State.AllBlocksForController() c.Assert(err, jc.ErrorIsNil) @@ -260,7 +294,8 @@ func (s *cmdControllerSuite) TestControllerKill(c *gc.C) { st := s.Factory.MakeModel(c, &factory.ModelParams{ - Name: "foo", + Name: "foo", + CloudRegion: "dummy-region", }) st.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") @@ -273,18 +308,6 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *cmdControllerSuite) TestListBlocks(c *gc.C) { - s.State.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") - s.State.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") - - ctx := s.run(c, "list-all-blocks", "--format", "json") - expected := fmt.Sprintf(`[{"name":"controller","model-uuid":"%s","owner-tag":"%s","blocks":["BlockDestroy","BlockChange"]}]`, - s.State.ModelUUID(), s.AdminUserTag(c).String()) - - strippedOut := strings.Replace(testing.Stdout(ctx), "\n", "", -1) - c.Check(strippedOut, gc.Equals, expected) -} - func (s *cmdControllerSuite) TestSystemKillCallsEnvironDestroyOnHostedEnviron(c *gc.C) { st := s.Factory.MakeModel(c, &factory.ModelParams{ Name: "foo", @@ -335,7 +358,7 @@ } func (s *cmdControllerSuite) TestGetControllerConfigYAML(c *gc.C) { - context := s.run(c, "get-controller-config", "--format=yaml") + context := s.run(c, "controller-config", "--format=yaml") controllerCfg, err := s.State.ControllerConfig() c.Assert(err, jc.ErrorIsNil) cfgYaml, err := yaml.Marshal(controllerCfg) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_credential_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_credential_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_credential_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_credential_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,57 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package featuretests + +import ( + "github.com/juju/cmd" + "github.com/juju/loggo" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + apicloud "github.com/juju/juju/api/cloud" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cloud" + "github.com/juju/juju/cmd/juju/commands" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/testing" +) + +type cmdCredentialSuite struct { + jujutesting.JujuConnSuite +} + +func (s *cmdCredentialSuite) run(c *gc.C, args ...string) *cmd.Context { + context := testing.Context(c) + command := commands.NewJujuCommand(context) + c.Assert(testing.InitCommand(command, args), jc.ErrorIsNil) + c.Assert(command.Run(context), jc.ErrorIsNil) + loggo.RemoveWriter("warning") + return context +} + +func (s *cmdCredentialSuite) TestUpdateCredentialCommand(c *gc.C) { + store := jujuclient.NewFileClientStore() + store.UpdateCredential("dummy", cloud.CloudCredential{ + AuthCredentials: map[string]cloud.Credential{ + "mine": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{"username": "fred", "password": "secret"}), + }, + }) + s.run(c, "update-credential", "dummy", "mine") + + client := apicloud.NewClient(s.OpenControllerAPI(c)) + defer client.Close() + + tag := names.NewCloudCredentialTag("dummy/admin@local/mine") + result, err := client.Credentials(tag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, []params.CloudCredentialResult{ + {Result: ¶ms.CloudCredential{ + AuthType: "userpass", + Attributes: map[string]string{"username": "fred"}, + Redacted: []string{"password"}, + }}, + }) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_login_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_login_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_login_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_login_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -36,13 +36,15 @@ } command := commands.NewJujuCommand(context) c.Assert(testing.InitCommand(command, args), jc.ErrorIsNil) - c.Assert(command.Run(context), jc.ErrorIsNil) + err := command.Run(context) + c.Assert(err, jc.ErrorIsNil, gc.Commentf("stdout: %q; stderr: %q", context.Stdout, context.Stderr)) loggo.RemoveWriter("warning") // remove logger added by main command return context } func (s *cmdLoginSuite) createTestUser(c *gc.C) { - s.run(c, nil, "add-user", "test", "--models", "controller") + s.run(c, nil, "add-user", "test") + s.run(c, nil, "grant", "test", "read", "controller") s.changeUserPassword(c, "test", "hunter2") } @@ -61,8 +63,8 @@ context := s.run(c, strings.NewReader("hunter2\nhunter2\n"), "login", "test") c.Assert(testing.Stdout(context), gc.Equals, "") c.Assert(testing.Stderr(context), gc.Equals, ` -password: -You are now logged in to "kontroll" as "test@local". +please enter password for test on kontroll: +You are now logged in to "kontroll" as "test". `[1:]) // We should have a macaroon, but no password, in the client store. @@ -70,7 +72,6 @@ accountDetails, err := store.AccountDetails("kontroll") c.Assert(err, jc.ErrorIsNil) c.Assert(accountDetails.Password, gc.Equals, "") - c.Assert(accountDetails.Macaroon, gc.Not(gc.Equals), "") // We should be able to login with the macaroon. s.run(c, nil, "status") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_metrics.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_metrics.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_metrics.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_metrics.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,166 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package featuretests + +import ( + "time" + + "github.com/gosuri/uitable" + "github.com/juju/cmd/cmdtesting" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/juju/metricsdebug" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +type cmdMetricsCommandSuite struct { + jujutesting.JujuConnSuite +} + +var _ = gc.Suite(&cmdMetricsCommandSuite{}) + +func (s *cmdMetricsCommandSuite) TestDebugNoArgs(c *gc.C) { + _, err := coretesting.RunCommand(c, metricsdebug.New()) + c.Assert(err, gc.ErrorMatches, "you need to specify at least one unit or application") +} + +type metric struct { + Unit string `json:"unit" yaml:"unit"` + Timestamp time.Time `json:"timestamp" yaml:"timestamp"` + Metric string `json:"metric" yaml:"metric"` + Value string `json:"value" yaml:"value"` +} + +func formatTabular(metrics ...metric) string { + table := uitable.New() + table.MaxColWidth = 50 + table.Wrap = true + for _, col := range []int{1, 2, 3, 4} { + table.RightAlign(col) + } + table.AddRow("UNIT", "TIMESTAMP", "METRIC", "VALUE") + for _, m := range metrics { + table.AddRow(m.Unit, m.Timestamp.Format(time.RFC3339), m.Metric, m.Value) + } + return table.String() + "\n" +} + +func (s *cmdMetricsCommandSuite) TestUnits(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + unit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + newTime1 := time.Now().Round(time.Second) + newTime2 := newTime1.Add(time.Second) + metricA := state.Metric{"pings", "5", newTime1} + metricB := state.Metric{"pings", "10.5", newTime2} + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA}}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit2, Metrics: []state.Metric{metricA, metricB}}) + ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered/1") + c.Assert(err, jc.ErrorIsNil) + + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, + formatTabular(metric{ + Unit: unit2.Name(), + Timestamp: newTime2, + Metric: "pings", + Value: "10.5", + }), + ) + ctx, err = coretesting.RunCommand(c, metricsdebug.New(), "metered/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, + formatTabular(metric{ + Unit: unit.Name(), + Timestamp: newTime1, + Metric: "pings", + Value: "5", + }), + ) +} + +func (s *cmdMetricsCommandSuite) TestAll(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + unit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + newTime1 := time.Now().Round(time.Second) + newTime2 := newTime1.Add(time.Second) + metricA := state.Metric{"pings", "5", newTime1} + metricB := state.Metric{"pings", "10.5", newTime2} + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA}}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit2, Metrics: []state.Metric{metricA, metricB}}) + ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "--all") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, + formatTabular([]metric{{ + Unit: unit.Name(), + Timestamp: newTime1, + Metric: "pings", + Value: "5", + }, { + Unit: unit2.Name(), + Timestamp: newTime2, + Metric: "pings", + Value: "10.5", + }}...), + ) +} + +func (s *cmdMetricsCommandSuite) TestFormatJSON(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + unit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + newTime1 := time.Now().Round(time.Second) + newTime2 := newTime1.Add(time.Second) + metricA := state.Metric{"pings", "5", newTime1} + metricB := state.Metric{"pings", "10.5", newTime2} + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA}}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit2, Metrics: []state.Metric{metricA, metricB}}) + ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered/1", "--format", "json") + c.Assert(err, jc.ErrorIsNil) + expectedOutput := []metric{{ + Unit: unit2.Name(), + Timestamp: newTime2, + Metric: "pings", + Value: "10.5", + }} + c.Assert(cmdtesting.Stdout(ctx), jc.JSONEquals, expectedOutput) +} + +func (s *cmdMetricsCommandSuite) TestFormatYAML(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + unit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + newTime1 := time.Now().Round(time.Second) + newTime2 := newTime1.Add(time.Second) + metricA := state.Metric{"pings", "5", newTime1} + metricB := state.Metric{"pings", "10.5", newTime2} + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA}}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit2, Metrics: []state.Metric{metricA, metricB}}) + ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered/1", "--format", "yaml") + c.Assert(err, jc.ErrorIsNil) + expectedOutput := []metric{{ + Unit: unit2.Name(), + Timestamp: newTime2, + Metric: "pings", + Value: "10.5", + }} + c.Assert(cmdtesting.Stdout(ctx), jc.YAMLEquals, expectedOutput) +} + +func (s *cmdMetricsCommandSuite) TestNoMetrics(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) + ctx, err := coretesting.RunCommand(c, metricsdebug.New(), "metered") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, "") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_model_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_model_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_model_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_model_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,10 +12,15 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "gopkg.in/yaml.v1" "github.com/juju/juju/cmd/juju/commands" "github.com/juju/juju/core/description" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/feature" jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" @@ -41,7 +46,7 @@ func (s *cmdModelSuite) TestGrantModelCmdStack(c *gc.C) { username := "bar@ubuntuone" - context := s.run(c, "grant", username, "controller") + context := s.run(c, "grant", username, "read", "controller") obtained := strings.Replace(testing.Stdout(context), "\n", "", -1) expected := "" c.Assert(obtained, gc.Equals, expected) @@ -49,8 +54,8 @@ user := names.NewUserTag(username) modelUser, err := s.State.UserAccess(user, s.State.ModelTag()) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.UserName, gc.Equals, user.Canonical()) - c.Assert(modelUser.CreatedBy.Canonical(), gc.Equals, s.AdminUserTag(c).Canonical()) + c.Assert(modelUser.UserName, gc.Equals, user.Id()) + c.Assert(modelUser.CreatedBy.Id(), gc.Equals, s.AdminUserTag(c).Id()) lastConn, err := s.State.LastModelConnection(modelUser.UserTag) c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) c.Assert(lastConn.IsZero(), jc.IsTrue) @@ -60,7 +65,7 @@ // Firstly share a model with a user username := "bar@ubuntuone" s.Factory.MakeModelUser(c, &factory.ModelUserParams{ - User: username, Access: description.ReadAccess}) + User: username, Access: permission.ReadAccess}) // Because we are calling into juju through the main command, // and the main command adds a warning logging writer, we need @@ -68,7 +73,7 @@ loggo.RemoveWriter("warning") // Then test that the unshare command stack is hooked up - context := s.run(c, "revoke", username, "controller") + context := s.run(c, "revoke", username, "read", "controller") obtained := strings.Replace(testing.Stdout(context), "\n", "", -1) expected := "" c.Assert(obtained, gc.Equals, expected) @@ -76,13 +81,13 @@ user := names.NewUserTag(username) modelUser, err := s.State.UserAccess(user, s.State.ModelTag()) c.Assert(errors.IsNotFound(err), jc.IsTrue) - c.Assert(modelUser, gc.DeepEquals, description.UserAccess{}) + c.Assert(modelUser, gc.DeepEquals, permission.UserAccess{}) } func (s *cmdModelSuite) TestModelUsersCmd(c *gc.C) { // Firstly share an model with a user username := "bar@ubuntuone" - context := s.run(c, "grant", username, "controller") + context := s.run(c, "grant", username, "read", "controller") user := names.NewUserTag(username) modelUser, err := s.State.UserAccess(user, s.State.ModelTag()) c.Assert(err, jc.ErrorIsNil) @@ -93,35 +98,101 @@ // to clear the logging writers here. loggo.RemoveWriter("warning") - context = s.run(c, "list-shares") + context = s.run(c, "list-users", "controller") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ""+ - "NAME ACCESS LAST CONNECTION\n"+ - "admin@local (admin) admin just now\n"+ - "bar@ubuntuone read never connected\n"+ + "Name Display name Access Last connection\n"+ + "admin* admin admin just now\n"+ + "bar@ubuntuone read never connected\n"+ "\n") } -func (s *cmdModelSuite) TestGet(c *gc.C) { +func (s *cmdModelSuite) TestModelConfigGet(c *gc.C) { err := s.State.UpdateModelConfig(map[string]interface{}{"special": "known"}, nil, nil) c.Assert(err, jc.ErrorIsNil) - context := s.run(c, "get-model-config", "special") + context := s.run(c, "model-config", "special") c.Assert(testing.Stdout(context), gc.Equals, "known\n") } -func (s *cmdModelSuite) TestSet(c *gc.C) { - s.run(c, "set-model-config", "special=known") - s.assertEnvValue(c, "special", "known") +func (s *cmdModelSuite) TestModelConfigSet(c *gc.C) { + s.run(c, "model-config", "special=known") + s.assertModelValue(c, "special", "known") } -func (s *cmdModelSuite) TestUnset(c *gc.C) { +func (s *cmdModelSuite) TestModelConfigReset(c *gc.C) { err := s.State.UpdateModelConfig(map[string]interface{}{"special": "known"}, nil, nil) c.Assert(err, jc.ErrorIsNil) - s.run(c, "unset-model-config", "special") - s.assertEnvValueMissing(c, "special") + s.run(c, "model-config", "--reset", "special") + s.assertModelValueMissing(c, "special") +} + +func (s *cmdModelSuite) TestModelDefaultsGet(c *gc.C) { + err := s.State.UpdateModelConfigDefaultValues(map[string]interface{}{"special": "known"}, nil, nil) + c.Assert(err, jc.ErrorIsNil) + + context := s.run(c, "model-defaults", "special") + c.Assert(testing.Stdout(context), gc.Equals, ` +Attribute Default Controller +special - known + +`[1:]) +} + +func (s *cmdModelSuite) TestModelDefaultsGetRegion(c *gc.C) { + err := s.State.UpdateModelConfigDefaultValues(map[string]interface{}{"special": "known"}, nil, &environs.RegionSpec{"dummy", "dummy-region"}) + c.Assert(err, jc.ErrorIsNil) + + context := s.run(c, "model-defaults", "dummy-region", "special") + c.Assert(testing.Stdout(context), gc.Equals, ` +Attribute Default Controller +special - - + dummy-region known - + +`[1:]) +} + +func (s *cmdModelSuite) TestModelDefaultsSet(c *gc.C) { + s.run(c, "model-defaults", "special=known") + defaults, err := s.State.ModelConfigDefaultValues() + c.Assert(err, jc.ErrorIsNil) + value, found := defaults["special"] + c.Assert(found, jc.IsTrue) + c.Assert(value.Controller, gc.Equals, "known") +} + +func (s *cmdModelSuite) TestModelDefaultsSetRegion(c *gc.C) { + s.run(c, "model-defaults", "dummy/dummy-region", "special=known") + defaults, err := s.State.ModelConfigDefaultValues() + c.Assert(err, jc.ErrorIsNil) + value, found := defaults["special"] + c.Assert(found, jc.IsTrue) + c.Assert(value.Controller, gc.IsNil) + c.Assert(value.Regions, jc.SameContents, []config.RegionDefaultValue{{"dummy-region", "known"}}) +} + +func (s *cmdModelSuite) TestModelDefaultsReset(c *gc.C) { + err := s.State.UpdateModelConfigDefaultValues(map[string]interface{}{"special": "known"}, nil, nil) + c.Assert(err, jc.ErrorIsNil) + + s.run(c, "model-defaults", "--reset", "special") + defaults, err := s.State.ModelConfigDefaultValues() + c.Assert(err, jc.ErrorIsNil) + _, found := defaults["special"] + c.Assert(found, jc.IsFalse) +} + +func (s *cmdModelSuite) TestModelDefaultsResetRegion(c *gc.C) { + err := s.State.UpdateModelConfigDefaultValues(map[string]interface{}{"special": "known"}, nil, &environs.RegionSpec{"dummy", "dummy-region"}) + c.Assert(err, jc.ErrorIsNil) + + s.run(c, "model-defaults", "dummy-region", "--reset", "special") + defaults, err := s.State.ModelConfigDefaultValues() + c.Assert(err, jc.ErrorIsNil) + _, found := defaults["special"] + c.Assert(found, jc.IsFalse) } func (s *cmdModelSuite) TestRetryProvisioning(c *gc.C) { @@ -134,17 +205,50 @@ c.Check(stripped, gc.Equals, `machine 0 is not in an error state`) } -func (s *cmdModelSuite) assertEnvValue(c *gc.C, key string, expected interface{}) { - envConfig, err := s.State.ModelConfig() +func (s *cmdModelSuite) TestDumpModel(c *gc.C) { + s.SetFeatureFlags(feature.DeveloperMode) + s.Factory.MakeMachine(c, &factory.MachineParams{ + Jobs: []state.MachineJob{state.JobManageModel}, + }) + ctx := s.run(c, "dump-model") + output := testing.Stdout(ctx) + // The output is yaml formatted output that is a model description. + model, err := description.Deserialize([]byte(output)) + c.Assert(err, jc.ErrorIsNil) + c.Assert(model.Config()["name"], gc.Equals, "controller") +} + +func (s *cmdModelSuite) TestDumpModelDB(c *gc.C) { + s.SetFeatureFlags(feature.DeveloperMode) + s.Factory.MakeMachine(c, &factory.MachineParams{ + Jobs: []state.MachineJob{state.JobManageModel}, + }) + ctx := s.run(c, "dump-db") + output := testing.Stdout(ctx) + // The output is map of collection names to documents. + // Defaults to yaml output. + var valueMap map[string]interface{} + err := yaml.Unmarshal([]byte(output), &valueMap) + c.Assert(err, jc.ErrorIsNil) + c.Logf("%#v", valueMap) + model := valueMap["models"] + // yaml unmarshals maps with interface keys. + modelMap, ok := model.(map[interface{}]interface{}) + c.Assert(ok, jc.IsTrue) + c.Assert(modelMap["name"], gc.Equals, "controller") +} + +func (s *cmdModelSuite) assertModelValue(c *gc.C, key string, expected interface{}) { + modelConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) - value, found := envConfig.AllAttrs()[key] + value, found := modelConfig.AllAttrs()[key] c.Assert(found, jc.IsTrue) c.Assert(value, gc.Equals, expected) } -func (s *cmdModelSuite) assertEnvValueMissing(c *gc.C, key string) { - envConfig, err := s.State.ModelConfig() +func (s *cmdModelSuite) assertModelValueMissing(c *gc.C, key string) { + modelConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) - _, found := envConfig.AllAttrs()[key] + _, found := modelConfig.AllAttrs()[key] c.Assert(found, jc.IsFalse) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_register_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_register_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_register_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_register_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,15 +5,18 @@ import ( "io" - "strings" + "regexp" "github.com/juju/cmd" "github.com/juju/loggo" + cookiejar "github.com/juju/persistent-cookiejar" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/httpbakery" "github.com/juju/juju/api" "github.com/juju/juju/cmd/juju/commands" + cmdtesting "github.com/juju/juju/cmd/testing" "github.com/juju/juju/juju" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/testing" @@ -23,68 +26,94 @@ jujutesting.JujuConnSuite } -func (s *cmdRegistrationSuite) run(c *gc.C, stdin io.Reader, args ...string) *cmd.Context { - context := testing.Context(c) - if stdin != nil { - context.Stdin = stdin - } - command := commands.NewJujuCommand(context) - c.Assert(testing.InitCommand(command, args), jc.ErrorIsNil) - c.Assert(command.Run(context), jc.ErrorIsNil) - loggo.RemoveWriter("warning") // remove logger added by main command - return context -} - func (s *cmdRegistrationSuite) TestAddUserAndRegister(c *gc.C) { // First, add user "bob", and record the "juju register" command // that is printed out. - context := s.run(c, nil, "add-user", "bob", "Bob Dobbs") + + context := run(c, nil, "add-user", "bob", "Bob Dobbs") c.Check(testing.Stderr(context), gc.Equals, "") stdout := testing.Stdout(context) - c.Check(stdout, gc.Matches, ` + expectPat := ` User "Bob Dobbs \(bob\)" added Please send this command to bob: - juju register .* + juju register (.+) "Bob Dobbs \(bob\)" has not been granted access to any models(.|\n)* -`[1:]) - jujuRegisterCommand := strings.Fields(strings.TrimSpace( - strings.SplitN(stdout[strings.Index(stdout, "juju register"):], "\n", 2)[0], - )) - c.Logf("%q", jujuRegisterCommand) +`[1:] + c.Assert(stdout, gc.Matches, expectPat) + + arg := regexp.MustCompile("^" + expectPat + "$").FindStringSubmatch(stdout)[1] + c.Logf("juju register %q", arg) // Now run the "juju register" command. We need to pass the - // controller name and password to set. - stdin := strings.NewReader("bob-controller\nhunter2\nhunter2\n") - args := jujuRegisterCommand[1:] // drop the "juju" - context = s.run(c, stdin, args...) - c.Check(testing.Stdout(context), gc.Equals, "") - c.Check(testing.Stderr(context), gc.Equals, ` -WARNING: The controller proposed "kontroll" which clashes with an existing controller. The two controllers are entirely different. - -Enter a name for this controller: -Enter a new password: -Confirm password: + // controller name and password to set, and we need a different + // file store to mimic a different local OS user. + s.CreateUserHome(c, &jujutesting.UserHomeParams{ + Username: "bob", + }) -Welcome, bob. You are now logged into "bob-controller". + // The expected prompt does not include a warning about the controller + // name, as this new local user does not have a controller named + // "kontroll" registered. + prompter := cmdtesting.NewSeqPrompter(c, "»", ` +Enter a new password: »hunter2 + +Confirm password: »hunter2 -There are no models available. You can add models with -"juju add-model", or you can ask an administrator or owner -of a model to grant access to that model with "juju grant". +Initial password successfully set for bob. +Enter a name for this controller \[kontroll\]: »bob-controller +Welcome, bob. You are now logged into "bob-controller". + +There are no models available. (.|\n)* `[1:]) + context = run(c, prompter, "register", arg) + prompter.AssertDone() + // Make sure that the saved server details are sufficient to connect // to the api server. + jar, err := cookiejar.New(&cookiejar.Options{ + Filename: cookiejar.DefaultCookieFile(), + }) + c.Assert(err, jc.ErrorIsNil) + dialOpts := api.DefaultDialOpts() + dialOpts.BakeryClient = httpbakery.NewClient() + dialOpts.BakeryClient.Jar = jar accountDetails, err := s.ControllerStore.AccountDetails("bob-controller") c.Assert(err, jc.ErrorIsNil) api, err := juju.NewAPIConnection(juju.NewAPIConnectionParams{ Store: s.ControllerStore, ControllerName: "bob-controller", AccountDetails: accountDetails, - DialOpts: api.DefaultDialOpts(), + DialOpts: dialOpts, OpenAPI: api.Open, }) c.Assert(err, jc.ErrorIsNil) c.Assert(api.Close(), jc.ErrorIsNil) } + +// run runs a juju command with the given arguments. +// If stdio is given, it will be used for all input and output +// to the command; otherwise testing.Context will be used. +// +// It returns the context used to run the command. +func run(c *gc.C, stdio io.ReadWriter, args ...string) *cmd.Context { + var context *cmd.Context + if stdio != nil { + context = &cmd.Context{ + Dir: c.MkDir(), + Stdin: stdio, + Stdout: stdio, + Stderr: stdio, + } + } else { + context = testing.Context(c) + } + command := commands.NewJujuCommand(context) + c.Assert(testing.InitCommand(command, args), jc.ErrorIsNil) + err := command.Run(context) + c.Assert(err, jc.ErrorIsNil, gc.Commentf("stderr: %q", context.Stderr)) + loggo.RemoveWriter("warning") // remove logger added by main command + return context +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_relation_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_relation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_relation_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_relation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,47 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package featuretests + +import ( + "os" + + gc "gopkg.in/check.v1" + + "github.com/juju/juju/juju/osenv" + jujutesting "github.com/juju/juju/juju/testing" +) + +type CmdRelationSuite struct { + jujutesting.JujuConnSuite + apps []string +} + +func (s *CmdRelationSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + os.Setenv(osenv.JujuModelEnvKey, "") + + s.apps = []string{"wordpress", "mysql"} + for _, app := range s.apps { + ch := s.AddTestingCharm(c, app) + s.AddTestingService(c, app, ch) + } +} + +func (s *CmdRelationSuite) TestAddRelationSuccess(c *gc.C) { + runCommandExpectSuccess(c, "add-relation", s.apps...) +} + +func (s *CmdRelationSuite) TestAddRelationFail(c *gc.C) { + runCommandExpectSuccess(c, "add-relation", s.apps...) + runCommandExpectFailure(c, "add-relation", `cannot add relation "wordpress:db mysql:server": relation already exists`, s.apps...) +} + +func (s *CmdRelationSuite) TestRemoveRelationSuccess(c *gc.C) { + runCommandExpectSuccess(c, "add-relation", s.apps...) + runCommandExpectSuccess(c, "remove-relation", s.apps...) +} + +func (s *CmdRelationSuite) TestRemoveRelationFail(c *gc.C) { + runCommandExpectFailure(c, "remove-relation", `relation "wordpress:db mysql:server" not found`, s.apps...) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_subnet_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_subnet_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_subnet_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_subnet_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -150,7 +150,7 @@ context := s.Run(c, expectedSuccess, "list-subnets") s.AssertOutput(c, context, "", // no stdout output - "no subnets to display\n", + "No subnets to display.\n", ) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmdjuju_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmdjuju_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmdjuju_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmdjuju_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -47,7 +47,7 @@ c.Assert(err, jc.ErrorIsNil) context, err := testing.RunCommand(c, application.NewServiceGetConstraintsCommand(), "svc") - c.Assert(testing.Stdout(context), gc.Equals, "cpu-cores=64\n") + c.Assert(testing.Stdout(context), gc.Equals, "cores=64\n") c.Assert(testing.Stderr(context), gc.Equals, "") } @@ -55,7 +55,7 @@ ch := s.AddTestingCharm(c, "dummy") svc := s.AddTestingService(c, "dummy-service", ch) - _, err := testing.RunCommand(c, application.NewSetCommand(), "dummy-service", + _, err := testing.RunCommand(c, application.NewConfigCommand(), "dummy-service", "username=hello", "outlook=hello@world.tld") c.Assert(err, jc.ErrorIsNil) @@ -81,7 +81,7 @@ err := svc.UpdateConfigSettings(settings) c.Assert(err, jc.ErrorIsNil) - _, err = testing.RunCommand(c, application.NewSetCommand(), "--to-default", "dummy-service", "username") + _, err = testing.RunCommand(c, application.NewConfigCommand(), "dummy-service", "--reset", "username") c.Assert(err, jc.ErrorIsNil) expect := charm.Settings{ @@ -118,7 +118,7 @@ ch := s.AddTestingCharm(c, "dummy") s.AddTestingService(c, "dummy-service", ch) - context, err := testing.RunCommand(c, application.NewGetCommand(), "dummy-service") + context, err := testing.RunCommand(c, application.NewConfigCommand(), "dummy-service") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), jc.DeepEquals, expected) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_user_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_user_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/cmd_juju_user_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/cmd_juju_user_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -49,34 +49,6 @@ c.Assert(user.IsDisabled(), jc.IsFalse) } -func (s *UserSuite) TestUserAddGrantModel(c *gc.C) { - sharedModelState := s.Factory.MakeModel(c, &factory.ModelParams{ - Name: "amodel", - }) - defer sharedModelState.Close() - - ctx, err := s.RunUserCommand(c, "", "add-user", "test", "--models", "amodel") - c.Assert(err, jc.ErrorIsNil) - c.Assert(testing.Stdout(ctx), jc.HasPrefix, `User "test" added`) - user, err := s.State.User(names.NewLocalUserTag("test")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(user.IsDisabled(), jc.IsFalse) - - // Check model is shared with expected users. - sharedModel, err := sharedModelState.Model() - c.Assert(err, jc.ErrorIsNil) - users, err := sharedModel.Users() - c.Assert(err, jc.ErrorIsNil) - var modelUserTags = make([]names.UserTag, len(users)) - for i, u := range users { - modelUserTags[i] = u.UserTag - } - c.Assert(modelUserTags, jc.SameContents, []names.UserTag{ - user.Tag().(names.UserTag), - names.NewLocalUserTag("admin"), - }) -} - func (s *UserSuite) TestUserChangePassword(c *gc.C) { user, err := s.State.User(s.AdminUserTag(c)) c.Assert(err, jc.ErrorIsNil) @@ -149,8 +121,10 @@ c.Assert(err, jc.ErrorIsNil) periodPattern := `(just now|\d+ \S+ ago)` expected := fmt.Sprintf(` -NAME\s+DISPLAY NAME\s+DATE CREATED\s+LAST CONNECTION -admin\s+admin\s+%s\s+%s +Controller: kontroll + +Name\s+Display name\s+Access\s+Date created\s+Last connection +admin.*\s+admin\s+superuser\s+%s\s+%s `[1:], periodPattern, periodPattern) c.Assert(testing.Stdout(ctx), gc.Matches, expected) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/dblog_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/dblog_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/dblog_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/dblog_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,6 @@ package featuretests import ( - "bufio" "time" "github.com/juju/loggo" @@ -155,35 +154,52 @@ dbLogger.Log(t, "juju.foo", "code.go:42", loggo.INFO, "all is well") dbLogger.Log(t.Add(time.Second), "juju.bar", "go.go:99", loggo.ERROR, "no it isn't") - lines := make(chan string) - go func(numLines int) { + messages := make(chan api.LogMessage) + go func(numMessages int) { client := s.APIState.Client() - reader, err := client.WatchDebugLog(api.DebugLogParams{}) + logMessages, err := client.WatchDebugLog(api.DebugLogParams{}) c.Assert(err, jc.ErrorIsNil) - defer reader.Close() - bufReader := bufio.NewReader(reader) - for n := 0; n < numLines; n++ { - line, err := bufReader.ReadString('\n') - c.Assert(err, jc.ErrorIsNil) - lines <- line + for n := 0; n < numMessages; n++ { + messages <- <-logMessages } }(3) - assertLine := func(expected string) { + assertMessage := func(expected api.LogMessage) { select { - case actual := <-lines: - c.Assert(actual, gc.Equals, expected) + case actual := <-messages: + c.Assert(actual, jc.DeepEquals, expected) case <-time.After(coretesting.LongWait): c.Fatal("timed out waiting for log line") } } // Read the 2 lines that are in the logs collection. - assertLine("machine-99: 2015-06-23 13:08:49 INFO juju.foo code.go:42 all is well\n") - assertLine("machine-99: 2015-06-23 13:08:50 ERROR juju.bar go.go:99 no it isn't\n") + assertMessage(api.LogMessage{ + Entity: "machine-99", + Timestamp: t, + Severity: "INFO", + Module: "juju.foo", + Location: "code.go:42", + Message: "all is well", + }) + assertMessage(api.LogMessage{ + Entity: "machine-99", + Timestamp: t.Add(time.Second), + Severity: "ERROR", + Module: "juju.bar", + Location: "go.go:99", + Message: "no it isn't", + }) // Now write and observe another log. This should be read from the oplog. dbLogger.Log(t.Add(2*time.Second), "ju.jitsu", "no.go:3", loggo.WARNING, "beep beep") - assertLine("machine-99: 2015-06-23 13:08:51 WARNING ju.jitsu no.go:3 beep beep\n") + assertMessage(api.LogMessage{ + Entity: "machine-99", + Timestamp: t.Add(2 * time.Second), + Severity: "WARNING", + Module: "ju.jitsu", + Location: "no.go:3", + Message: "beep beep", + }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/package_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,17 +6,20 @@ import ( "flag" "runtime" - stdtesting "testing" + "testing" + "github.com/juju/cmd" + "github.com/juju/loggo" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + jujucmd "github.com/juju/juju/cmd/juju/commands" coretesting "github.com/juju/juju/testing" ) var runFeatureTests = flag.Bool("featuretests", true, "Run long-running feature tests.") func init() { - flag.Parse() if *runFeatureTests == false { @@ -29,6 +32,7 @@ gc.Suite(&BakeryStorageSuite{}) gc.Suite(&blockSuite{}) gc.Suite(&cmdControllerSuite{}) + gc.Suite(&cmdCredentialSuite{}) gc.Suite(&cmdJujuSuite{}) gc.Suite(&cmdLoginSuite{}) gc.Suite(&cmdModelSuite{}) @@ -39,6 +43,7 @@ gc.Suite(&dumpLogsCommandSuite{}) gc.Suite(&undertakerSuite{}) gc.Suite(&upgradeSuite{}) + gc.Suite(&CmdRelationSuite{}) // TODO (anastasiamac 2016-07-19) Bug#1603585 // These tests cannot run on windows - they require a bootstrapped controller. @@ -48,6 +53,28 @@ } } -func TestPackage(t *stdtesting.T) { +func TestPackage(t *testing.T) { coretesting.MgoTestPackage(t) } + +func runCommand(c *gc.C, args ...string) (*cmd.Context, error) { + // Writers need to be reset, because + // they are set globally in the juju/cmd package and will + // return an error if we attempt to run two commands in the + // same test. + loggo.ResetWriters() + ctx := coretesting.Context(c) + command := jujucmd.NewJujuCommand(ctx) + return coretesting.RunCommand(c, command, args...) +} + +func runCommandExpectSuccess(c *gc.C, command string, args ...string) { + _, err := runCommand(c, append([]string{command}, args...)...) + c.Assert(err, jc.ErrorIsNil) +} + +func runCommandExpectFailure(c *gc.C, command, expectedError string, args ...string) { + context, err := runCommand(c, append([]string{command}, args...)...) + c.Assert(err, gc.ErrorMatches, "cmd: error out silently") + c.Assert(coretesting.Stderr(context), jc.Contains, expectedError) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/storage_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/storage_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/storage_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/storage_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -125,9 +125,9 @@ createUnitWithStorage(c, &s.JujuConnSuite, testPool) expected := ` -[Storage] -UNIT ID LOCATION STATUS MESSAGE -storage-block/0 data/0 pending +[Storage] +Unit Id Location Status Message +storage-block/0 data/0 pending `[1:] runList(c, expected) @@ -139,9 +139,9 @@ // There are currently no guarantees about whether storage // will be persistent until it has been provisioned. expected := ` -[Storage] -UNIT ID LOCATION STATUS MESSAGE -storage-block/0 data/0 pending +[Storage] +Unit Id Location Status Message +storage-block/0 data/0 pending `[1:] runList(c, expected) @@ -250,7 +250,7 @@ stdout, _, err := runPoolList(c) c.Assert(err, jc.ErrorIsNil) expected := ` -NAME PROVIDER ATTRS +Name Provider Attrs block loop it=works environscoped environscoped environscoped-block environscoped-block @@ -279,7 +279,7 @@ func (s *cmdStorageSuite) TestListPoolsNameNoMatch(c *gc.C) { stdout, stderr, err := runPoolList(c, "--name", "cranky") c.Assert(err, jc.ErrorIsNil) - c.Assert(stderr, gc.Equals, "") + c.Assert(stderr, gc.Equals, "No storage pools to display.\n") c.Assert(stdout, gc.Equals, "") } @@ -443,7 +443,7 @@ stdout, _, err := runVolumeList(c, "0") c.Assert(err, jc.ErrorIsNil) expected := ` -MACHINE UNIT STORAGE ID PROVIDER-ID DEVICE SIZE STATE MESSAGE +Machine Unit Storage Id Provider Id Device Size State Message 0 storage-block/0 data/0 0/0 pending `[1:] @@ -521,7 +521,9 @@ context, err := runAddToUnit(c, u, "nonstorage=1") c.Assert(errors.Cause(err), gc.ErrorMatches, "cmd: error out silently") c.Assert(testing.Stdout(context), gc.Equals, "") - c.Assert(testing.Stderr(context), gc.Equals, "failed to add \"nonstorage\": charm storage \"nonstorage\" not found\n") + c.Assert(testing.Stderr(context), gc.Equals, + `failed to add "nonstorage": adding storage to unit storage-block/0: charm storage "nonstorage" not found`+"\n", + ) instancesAfter, err := s.State.AllStorageInstances() c.Assert(err, jc.ErrorIsNil) @@ -545,9 +547,9 @@ context, err := runJujuCommand(c, "storage", "list") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ` -[Storage] -UNIT ID LOCATION STATUS MESSAGE -storage-filesystem/0 data/0 pending +[Storage] +Unit Id Location Status Message +storage-filesystem/0 data/0 pending `[1:]) c.Assert(testing.Stderr(context), gc.Equals, "") @@ -568,10 +570,10 @@ context, err = runJujuCommand(c, "list-storage") c.Assert(err, jc.ErrorIsNil) c.Assert(testing.Stdout(context), gc.Equals, ` -[Storage] -UNIT ID LOCATION STATUS MESSAGE -storage-filesystem/0 data/0 pending -storage-filesystem/0 data/1 pending +[Storage] +Unit Id Location Status Message +storage-filesystem/0 data/0 pending +storage-filesystem/0 data/1 pending `[1:]) c.Assert(testing.Stderr(context), gc.Equals, "") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/syslog_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/syslog_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/syslog_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/syslog_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -186,7 +186,7 @@ msg := s.popMessagesUntil(c, `something happened!`, received) expected := `<11>1 2099-06-01T23:02:01.000000023Z machine-0.%s jujud-machine-agent-%s - - [origin enterpriseID="28978" sofware="jujud-machine-agent" swVersion="%s"][model@28978 controller-uuid="%s" model-uuid="%s"][log@28978 module="juju.featuretests.syslog" source="syslog_test.go:99999"] something happened!` modelID := coretesting.ModelTag.Id() - ctlrID := modelID + ctlrID := coretesting.ControllerTag.Id() c.Check(msg.Message, gc.Equals, fmt.Sprintf(expected, modelID, modelID[:28], version.Current, ctlrID, modelID)) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/upgrade_test.go juju-core-2.0.0/src/github.com/juju/juju/featuretests/upgrade_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/featuretests/upgrade_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/featuretests/upgrade_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -299,7 +299,7 @@ return apiState != nil && err == nil } -func (s *upgradeSuite) checkLoginToAPIAsUser(c *gc.C, conf agent.Config, expectFullApi bool) { +func (s *upgradeSuite) checkLoginToAPIAsUser(c *gc.C, conf agent.Config, expectFullAPI bool) { var err error // Multiple attempts may be necessary because there is a small gap // between the post-upgrade version being written to the agent's @@ -309,7 +309,7 @@ // can occasionally fail. for a := coretesting.LongAttempt.Start(); a.Next(); { err = s.attemptRestrictedAPIAsUser(c, conf) - switch expectFullApi { + switch expectFullAPI { case FullAPIExposed: if err == nil { return diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/.gitignore juju-core-2.0.0/src/github.com/juju/juju/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/juju/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/.gitignore 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +cmd/juju/juju +cmd/jujud/jujud +cmd/builddb/builddb +cmd/charmd/charmd +cmd/charmload/charmload +provider/ec2/internal/ec2instancetypes/index.json +tags +!tags/ +TAGS +!TAGS/ +.emacs.desktop +.emacs.desktop.lock +*.test +*.sw[nop] diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/instance/instance.go juju-core-2.0.0/src/github.com/juju/juju/instance/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/instance/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/instance/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -57,13 +57,25 @@ // HardwareCharacteristics represents the characteristics of the instance (if known). // Attributes that are nil are unknown or not supported. type HardwareCharacteristics struct { - Arch *string `json:"arch,omitempty" yaml:"arch,omitempty"` - Mem *uint64 `json:"mem,omitempty" yaml:"mem,omitempty"` - RootDisk *uint64 `json:"root-disk,omitempty" yaml:"rootdisk,omitempty"` - CpuCores *uint64 `json:"cpu-cores,omitempty" yaml:"cpucores,omitempty"` - CpuPower *uint64 `json:"cpu-power,omitempty" yaml:"cpupower,omitempty"` - Tags *[]string `json:"tags,omitempty" yaml:"tags,omitempty"` + // Arch is the architecture of the processor. + Arch *string `json:"arch,omitempty" yaml:"arch,omitempty"` + // Mem is the size of RAM in megabytes. + Mem *uint64 `json:"mem,omitempty" yaml:"mem,omitempty"` + + // RootDisk is the size of the disk in megabytes. + RootDisk *uint64 `json:"root-disk,omitempty" yaml:"rootdisk,omitempty"` + + // CpuCores is the number of logical cores the processor has. + CpuCores *uint64 `json:"cpu-cores,omitempty" yaml:"cpucores,omitempty"` + + // CpuPower is a relative representation of the speed of the processor. + CpuPower *uint64 `json:"cpu-power,omitempty" yaml:"cpupower,omitempty"` + + // Tags is a list of strings that identify the machine. + Tags *[]string `json:"tags,omitempty" yaml:"tags,omitempty"` + + // AvailabilityZone defines the zone in which the machine resides. AvailabilityZone *string `json:"availability-zone,omitempty" yaml:"availabilityzone,omitempty"` } @@ -73,7 +85,7 @@ strs = append(strs, fmt.Sprintf("arch=%s", *hc.Arch)) } if hc.CpuCores != nil { - strs = append(strs, fmt.Sprintf("cpu-cores=%d", *hc.CpuCores)) + strs = append(strs, fmt.Sprintf("cores=%d", *hc.CpuCores)) } if hc.CpuPower != nil { strs = append(strs, fmt.Sprintf("cpu-power=%d", *hc.CpuPower)) @@ -93,16 +105,6 @@ return strings.Join(strs, " ") } -// Implement gnuflag.Value -func (hc *HardwareCharacteristics) Set(s string) error { - parsed, err := ParseHardware(s) - if err != nil { - return err - } - *hc = parsed - return nil -} - // MustParseHardware constructs a HardwareCharacteristics from the supplied arguments, // as Parse, but panics on failure. func MustParseHardware(args ...string) HardwareCharacteristics { @@ -143,7 +145,7 @@ switch name { case "arch": err = hc.setArch(str) - case "cpu-cores": + case "cores": err = hc.setCpuCores(str) case "cpu-power": err = hc.setCpuPower(str) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/instance/instance_test.go juju-core-2.0.0/src/github.com/juju/juju/instance/instance_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/instance/instance_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/instance/instance_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -96,36 +96,36 @@ err: `bad "arch" characteristic: already set`, }, - // "cpu-cores" in detail. + // "cores" in detail. { - summary: "set cpu-cores empty", - args: []string{"cpu-cores="}, + summary: "set cores empty", + args: []string{"cores="}, }, { - summary: "set cpu-cores zero", - args: []string{"cpu-cores=0"}, + summary: "set cores zero", + args: []string{"cores=0"}, }, { - summary: "set cpu-cores", - args: []string{"cpu-cores=4"}, + summary: "set cores", + args: []string{"cores=4"}, }, { - summary: "set nonsense cpu-cores 1", - args: []string{"cpu-cores=cheese"}, - err: `bad "cpu-cores" characteristic: must be a non-negative integer`, + summary: "set nonsense cores 1", + args: []string{"cores=cheese"}, + err: `bad "cores" characteristic: must be a non-negative integer`, }, { - summary: "set nonsense cpu-cores 2", - args: []string{"cpu-cores=-1"}, - err: `bad "cpu-cores" characteristic: must be a non-negative integer`, + summary: "set nonsense cores 2", + args: []string{"cores=-1"}, + err: `bad "cores" characteristic: must be a non-negative integer`, }, { - summary: "set nonsense cpu-cores 3", - args: []string{"cpu-cores=123.45"}, - err: `bad "cpu-cores" characteristic: must be a non-negative integer`, + summary: "set nonsense cores 3", + args: []string{"cores=123.45"}, + err: `bad "cores" characteristic: must be a non-negative integer`, }, { - summary: "double set cpu-cores together", - args: []string{"cpu-cores=128 cpu-cores=1"}, - err: `bad "cpu-cores" characteristic: already set`, + summary: "double set cores together", + args: []string{"cores=128 cores=1"}, + err: `bad "cores" characteristic: already set`, }, { - summary: "double set cpu-cores separately", - args: []string{"cpu-cores=128", "cpu-cores=1"}, - err: `bad "cpu-cores" characteristic: already set`, + summary: "double set cores separately", + args: []string{"cores=128", "cores=1"}, + err: `bad "cores" characteristic: already set`, }, // "cpu-power" in detail. @@ -264,10 +264,10 @@ // Everything at once. { summary: "kitchen sink together", - args: []string{" root-disk=4G mem=2T arch=i386 cpu-cores=4096 cpu-power=9001 availability-zone=a_zone"}, + args: []string{" root-disk=4G mem=2T arch=i386 cores=4096 cpu-power=9001 availability-zone=a_zone"}, }, { summary: "kitchen sink separately", - args: []string{"root-disk=4G", "mem=2T", "cpu-cores=4096", "cpu-power=9001", "arch=armhf", "availability-zone=a_zone"}, + args: []string{"root-disk=4G", "mem=2T", "cores=4096", "cpu-power=9001", "arch=armhf", "availability-zone=a_zone"}, }, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/instance/namespace.go juju-core-2.0.0/src/github.com/juju/juju/instance/namespace.go --- juju-core-2.0~beta15/src/github.com/juju/juju/instance/namespace.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/instance/namespace.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,6 +19,7 @@ type Namespace interface { // Prefix returns the common part of the hostnames. i.e. 'juju-xxxxxx-' Prefix() string + // Hostname returns a name suitable to be used for a machine hostname. // This function returns an error if the machine tags is invalid. Hostname(machineID string) (string, error) @@ -26,6 +27,9 @@ // MachineTag does the reverse of the Hostname method, and extracts the // Tag from the hostname. MachineTag(hostname string) (names.MachineTag, error) + + // Value returns the input prefixed with the namespace prefix. + Value(string) string } type namespace struct { @@ -50,7 +54,12 @@ return "", errors.Errorf("machine ID %q is not a valid machine", machineID) } machineID = strings.Replace(machineID, "/", "-", -1) - return n.Prefix() + machineID, nil + return n.Value(machineID), nil +} + +// Value returns the input prefixed with the namespace prefix. +func (n *namespace) Value(s string) string { + return n.Prefix() + s } // Hostname implements Namespace. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/api.go juju-core-2.0.0/src/github.com/juju/juju/juju/api.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/api.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/api.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,13 +4,11 @@ package juju import ( - "encoding/json" "time" "github.com/juju/errors" "github.com/juju/loggo" "gopkg.in/juju/names.v2" - "gopkg.in/macaroon.v1" "github.com/juju/juju/api" "github.com/juju/juju/jujuclient" @@ -109,24 +107,49 @@ // controllers that redirect involves them having well known // public addresses that won't change over time. hostPorts := st.APIHostPorts() - err = updateControllerAddresses(args.Store, args.ControllerName, controller, hostPorts, addrConnectedTo) + agentVersion := "" + if v, ok := st.ServerVersion(); ok { + agentVersion = v.String() + } + params := UpdateControllerParams{ + AgentVersion: agentVersion, + AddrConnectedTo: []network.HostPort{addrConnectedTo}, + CurrentHostPorts: hostPorts, + } + err = updateControllerDetailsFromLogin(args.Store, args.ControllerName, controller, params) if err != nil { logger.Errorf("cannot cache API addresses: %v", err) } - if apiInfo.Tag == nil && !apiInfo.SkipLogin { - // We used macaroon auth to login; save the username - // that we've logged in as. - user, ok := st.AuthTag().(names.UserTag) - if ok && !user.IsLocal() { - if err := args.Store.UpdateAccount(args.ControllerName, jujuclient.AccountDetails{ - User: user.Canonical(), - }); err != nil { - logger.Errorf("cannot update account information: %v", err) + + // Process the account details obtained from login. + var accountDetails *jujuclient.AccountDetails + user, ok := st.AuthTag().(names.UserTag) + if !apiInfo.SkipLogin { + if ok { + if accountDetails, err = args.Store.AccountDetails(args.ControllerName); err != nil { + if !errors.IsNotFound(err) { + logger.Errorf("cannot load local account information: %v", err) + } + } else { + accountDetails.LastKnownAccess = st.ControllerAccess() } - } else { + } + if ok && !user.IsLocal() && apiInfo.Tag == nil { + // We used macaroon auth to login; save the username + // that we've logged in as. + accountDetails = &jujuclient.AccountDetails{ + User: user.Id(), + LastKnownAccess: st.ControllerAccess(), + } + } else if apiInfo.Tag == nil { logger.Errorf("unexpected logged-in username %v", st.AuthTag()) } } + if accountDetails != nil { + if err := args.Store.UpdateAccount(args.ControllerName, *accountDetails); err != nil { + logger.Errorf("cannot update account information: %v", err) + } + } return st, nil } @@ -152,25 +175,17 @@ return apiInfo, controller, nil } account := args.AccountDetails + if account.User != "" { + userTag := names.NewUserTag(account.User) + if userTag.IsLocal() { + apiInfo.Tag = userTag + } + } if args.AccountDetails.Password != "" { - // If a password is available, we always use - // that. - // - // TODO(axw) make it invalid to store both - // password and macaroon in accounts.yaml? - apiInfo.Tag = names.NewUserTag(account.User) + // If a password is available, we always use that. + // If no password is recorded, we'll attempt to + // authenticate using macaroons. apiInfo.Password = account.Password - } else if args.AccountDetails.Macaroon != "" { - var m macaroon.Macaroon - if err := json.Unmarshal([]byte(account.Macaroon), &m); err != nil { - return nil, nil, errors.Trace(err) - } - apiInfo.Tag = names.NewUserTag(account.User) - apiInfo.Macaroons = []macaroon.Slice{{&m}} - } else { - // Neither a password nor a local user macaroon was - // found, so we'll use external macaroon authentication, - // which requires that no tag be specified. } return apiInfo, controller, nil } @@ -293,36 +308,71 @@ return false } -// UpdateControllerAddresses writes any new api addresses to the client controller file. +// UpdateControllerParams holds values used to update a controller details +// after bootstrap or a login operation. +type UpdateControllerParams struct { + // AgentVersion is the version of the controller agent. + AgentVersion string + + // CurrentHostPorts are the available api addresses. + CurrentHostPorts [][]network.HostPort + + // AddrConnectedTo are the previously known api addresses. + AddrConnectedTo []network.HostPort + + // ModelCount (when set) is the number of models visible to the user. + ModelCount *int + + // ControllerMachineCount (when set) is the total number of controller machines in the environment. + ControllerMachineCount *int + + // MachineCount (when set) is the total number of machines in the models. + MachineCount *int +} + +// UpdateControllerDetailsFromLogin writes any new api addresses and other relevant details +// to the client controller file. // Controller may be specified by a UUID or name, and must already exist. -func UpdateControllerAddresses( +func UpdateControllerDetailsFromLogin( store jujuclient.ControllerStore, controllerName string, - currentHostPorts [][]network.HostPort, addrConnectedTo ...network.HostPort, + params UpdateControllerParams, ) error { controllerDetails, err := store.ControllerByName(controllerName) if err != nil { return errors.Trace(err) } - return updateControllerAddresses( - store, controllerName, controllerDetails, - currentHostPorts, addrConnectedTo..., - ) + return updateControllerDetailsFromLogin(store, controllerName, controllerDetails, params) } -func updateControllerAddresses( +func updateControllerDetailsFromLogin( store jujuclient.ControllerStore, controllerName string, controllerDetails *jujuclient.ControllerDetails, - currentHostPorts [][]network.HostPort, addrConnectedTo ...network.HostPort, + params UpdateControllerParams, ) error { // Get the new endpoint addresses. - addrs, unresolvedAddrs, addrsChanged := PrepareEndpointsForCaching(*controllerDetails, currentHostPorts, addrConnectedTo...) - if !addrsChanged { + addrs, unresolvedAddrs, addrsChanged := PrepareEndpointsForCaching(*controllerDetails, params.CurrentHostPorts, params.AddrConnectedTo...) + agentChanged := params.AgentVersion != controllerDetails.AgentVersion + if !addrsChanged && !agentChanged && params.ModelCount == nil && params.MachineCount == nil && params.ControllerMachineCount == nil { return nil } // Write the new controller data. - controllerDetails.APIEndpoints = addrs - controllerDetails.UnresolvedAPIEndpoints = unresolvedAddrs + if addrsChanged { + controllerDetails.APIEndpoints = addrs + controllerDetails.UnresolvedAPIEndpoints = unresolvedAddrs + } + if agentChanged { + controllerDetails.AgentVersion = params.AgentVersion + } + if params.ModelCount != nil { + controllerDetails.ModelCount = params.ModelCount + } + if params.MachineCount != nil { + controllerDetails.MachineCount = params.MachineCount + } + if params.ControllerMachineCount != nil { + controllerDetails.ControllerMachineCount = *params.ControllerMachineCount + } err := store.UpdateController(controllerName, *controllerDetails) return errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/api_test.go juju-core-2.0.0/src/github.com/juju/juju/juju/api_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/api_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/api_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -129,7 +129,7 @@ return expectState, nil } - st, err := newAPIConnectionFromNames(c, "noconfig", "admin@local/admin", store, apiOpen) + st, err := newAPIConnectionFromNames(c, "noconfig", "admin/admin", store, apiOpen) c.Assert(err, jc.ErrorIsNil) c.Assert(st, gc.Equals, expectState) c.Assert(called, gc.Equals, 1) @@ -139,23 +139,54 @@ jc.DeepEquals, []string{"0.1.2.3:1234", "[2001:db8::1]:1234"}, ) + c.Assert( + store.Controllers["noconfig"].AgentVersion, + gc.Equals, + "1.2.3", + ) controllerBefore, err := store.ControllerByName("noconfig") c.Assert(err, jc.ErrorIsNil) - // If APIHostPorts haven't changed, then the store won't be updated. + // If APIHostPorts or agent version haven't changed, then the store won't be updated. stubStore := jujuclienttesting.WrapClientStore(store) - st, err = newAPIConnectionFromNames(c, "noconfig", "admin@local/admin", stubStore, apiOpen) + st, err = newAPIConnectionFromNames(c, "noconfig", "admin/admin", stubStore, apiOpen) c.Assert(err, jc.ErrorIsNil) c.Assert(st, gc.Equals, expectState) c.Assert(called, gc.Equals, 2) - stubStore.CheckCallNames(c, "AccountDetails", "ModelByName", "ControllerByName") + stubStore.CheckCallNames(c, "AccountDetails", "ModelByName", "ControllerByName", "AccountDetails", "UpdateAccount") controllerAfter, err := store.ControllerByName("noconfig") c.Assert(err, jc.ErrorIsNil) c.Assert(controllerBefore, gc.DeepEquals, controllerAfter) } +func (s *NewAPIClientSuite) TestUpdatesLastKnownAccess(c *gc.C) { + store := newClientStore(c, "noconfig") + + called := 0 + expectState := mockedAPIState(mockedHostPort | mockedModelTag) + apiOpen := func(apiInfo *api.Info, opts api.DialOpts) (api.Connection, error) { + checkCommonAPIInfoAttrs(c, apiInfo, opts) + c.Check(apiInfo.ModelTag, gc.Equals, names.NewModelTag(fakeUUID)) + called++ + return expectState, nil + } + + stubStore := jujuclienttesting.WrapClientStore(store) + st, err := newAPIConnectionFromNames(c, "noconfig", "admin/admin", stubStore, apiOpen) + c.Assert(err, jc.ErrorIsNil) + c.Assert(st, gc.Equals, expectState) + c.Assert(called, gc.Equals, 1) + stubStore.CheckCallNames(c, "AccountDetails", "ModelByName", "ControllerByName", "UpdateController", "AccountDetails", "UpdateAccount") + + c.Assert( + store.Accounts["noconfig"], + jc.DeepEquals, + jujuclient.AccountDetails{User: "admin", Password: "hunter2", LastKnownAccess: "superuser"}, + ) +} + func (s *NewAPIClientSuite) TestWithInfoNoAddresses(c *gc.C) { store := newClientStore(c, "noconfig") err := store.UpdateController("noconfig", jujuclient.ControllerDetails{ @@ -206,7 +237,7 @@ return nil, fmt.Errorf("OpenAPI called too many times") } - st0, err := newAPIConnectionFromNames(c, "ctl", "admin@local/admin", store, redirOpen) + st0, err := newAPIConnectionFromNames(c, "ctl", "admin/admin", store, redirOpen) c.Assert(err, jc.ErrorIsNil) c.Assert(openCount, gc.Equals, 2) st := st0.(*mockAPIState) @@ -220,7 +251,7 @@ } func checkCommonAPIInfoAttrs(c *gc.C, apiInfo *api.Info, opts api.DialOpts) { - c.Check(apiInfo.Tag, gc.Equals, names.NewUserTag("admin@local")) + c.Check(apiInfo.Tag, gc.Equals, names.NewUserTag("admin")) c.Check(string(apiInfo.CACert), gc.Equals, "certificate") c.Check(apiInfo.Password, gc.Equals, "hunter2") c.Check(opts, gc.DeepEquals, api.DefaultDialOpts()) @@ -239,28 +270,18 @@ c.Assert(st, gc.IsNil) } -func setEndpointAddressAndHostname(c *gc.C, store jujuclient.ControllerStore, addr, host string) { - // Populate the controller details with known address and hostname. - details, err := store.ControllerByName("my-controller") - c.Assert(err, jc.ErrorIsNil) - details.APIEndpoints = []string{addr} - details.UnresolvedAPIEndpoints = []string{host} - err = store.UpdateController("my-controller", *details) - c.Assert(err, jc.ErrorIsNil) -} - // newClientStore returns a client store that contains information -// based on the given controller namd and info. +// based on the given controller name and info. func newClientStore(c *gc.C, controllerName string) *jujuclienttesting.MemStore { store := jujuclienttesting.NewMemStore() - err := store.UpdateController(controllerName, jujuclient.ControllerDetails{ + err := store.AddController(controllerName, jujuclient.ControllerDetails{ ControllerUUID: fakeUUID, CACert: "certificate", APIEndpoints: []string{"0.1.2.3:5678"}, }) c.Assert(err, jc.ErrorIsNil) - err = store.UpdateModel(controllerName, "admin@local/admin", jujuclient.ModelDetails{ + err = store.UpdateModel(controllerName, "admin/admin", jujuclient.ModelDetails{ fakeUUID, }) c.Assert(err, jc.ErrorIsNil) @@ -269,7 +290,7 @@ // if "creds" is not initialised. If it is, it may overwrite // this one. err = store.UpdateAccount(controllerName, jujuclient.AccountDetails{ - User: "admin@local", + User: "admin", Password: "hunter2", }) c.Assert(err, jc.ErrorIsNil) @@ -339,7 +360,7 @@ ControllerUUID: fakeUUID, CACert: "certificate", } - err := s.ControllerStore.UpdateController(name, controllerDetails) + err := s.ControllerStore.AddController(name, controllerDetails) c.Assert(err, jc.ErrorIsNil) return controllerDetails } @@ -349,6 +370,10 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(found.UnresolvedAPIEndpoints, check, 0) c.Assert(found.APIEndpoints, check, 0) + c.Assert(found.AgentVersion, gc.Equals, "1.2.3") + c.Assert(found.ModelCount, gc.IsNil) + c.Assert(found.MachineCount, gc.IsNil) + c.Assert(found.ControllerMachineCount, gc.Equals, 0) } func (s *CacheAPIEndpointsSuite) assertControllerUpdated(c *gc.C, name string) { @@ -361,7 +386,12 @@ func (s *CacheAPIEndpointsSuite) TestPrepareEndpointsForCaching(c *gc.C) { s.assertCreateController(c, "controller-name1") - err := juju.UpdateControllerAddresses(s.ControllerStore, "controller-name1", s.hostPorts, s.apiHostPort) + params := juju.UpdateControllerParams{ + AgentVersion: "1.2.3", + AddrConnectedTo: []network.HostPort{s.apiHostPort}, + CurrentHostPorts: s.hostPorts, + } + err := juju.UpdateControllerDetailsFromLogin(s.ControllerStore, "controller-name1", params) c.Assert(err, jc.ErrorIsNil) controllerDetails, err := s.ControllerStore.ControllerByName("controller-name1") c.Assert(err, jc.ErrorIsNil) @@ -369,6 +399,30 @@ s.assertControllerUpdated(c, "controller-name1") } +func intptr(i int) *int { + return &i +} + +func (s *CacheAPIEndpointsSuite) TestUpdateModelMachineCount(c *gc.C) { + s.assertCreateController(c, "controller-name1") + params := juju.UpdateControllerParams{ + AgentVersion: "1.2.3", + ControllerMachineCount: intptr(1), + ModelCount: intptr(2), + MachineCount: intptr(3), + } + err := juju.UpdateControllerDetailsFromLogin(s.ControllerStore, "controller-name1", params) + c.Assert(err, jc.ErrorIsNil) + controllerDetails, err := s.ControllerStore.ControllerByName("controller-name1") + c.Assert(err, jc.ErrorIsNil) + c.Assert(controllerDetails.UnresolvedAPIEndpoints, gc.HasLen, 0) + c.Assert(controllerDetails.APIEndpoints, gc.HasLen, 0) + c.Assert(controllerDetails.AgentVersion, gc.Equals, "1.2.3") + c.Assert(controllerDetails.ControllerMachineCount, gc.Equals, 1) + c.Assert(*controllerDetails.ModelCount, gc.Equals, 2) + c.Assert(*controllerDetails.MachineCount, gc.Equals, 3) +} + func (s *CacheAPIEndpointsSuite) TestResolveSkippedWhenHostnamesUnchanged(c *gc.C) { // Test that if new endpoints hostnames are the same as the // cached, no DNS resolution happens (i.e. we don't resolve on @@ -383,7 +437,7 @@ CACert: "certificate", UnresolvedAPIEndpoints: network.HostPortsToStrings(hps), } - err := s.ControllerStore.UpdateController("controller-name", controllerDetails) + err := s.ControllerStore.AddController("controller-name", controllerDetails) c.Assert(err, jc.ErrorIsNil) addrs, hosts, changed := juju.PrepareEndpointsForCaching( @@ -432,7 +486,7 @@ CACert: "certificate", UnresolvedAPIEndpoints: strUnsorted, } - err := s.ControllerStore.UpdateController("controller-name", controllerDetails) + err := s.ControllerStore.AddController("controller-name", controllerDetails) c.Assert(err, jc.ErrorIsNil) addrs, hosts, changed := juju.PrepareEndpointsForCaching( @@ -482,7 +536,7 @@ UnresolvedAPIEndpoints: strUnsorted, APIEndpoints: strResolved, } - err := s.ControllerStore.UpdateController("controller-name", controllerDetails) + err := s.ControllerStore.AddController("controller-name", controllerDetails) c.Assert(err, jc.ErrorIsNil) addrs, hosts, changed := juju.PrepareEndpointsForCaching( @@ -530,7 +584,7 @@ ControllerUUID: fakeUUID, CACert: "certificate", } - err := s.ControllerStore.UpdateController("controller-name", controllerDetails) + err := s.ControllerStore.AddController("controller-name", controllerDetails) c.Assert(err, jc.ErrorIsNil) addrs, hosts, changed := juju.PrepareEndpointsForCaching( diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/deploy.go juju-core-2.0.0/src/github.com/juju/juju/juju/deploy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/deploy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/deploy.go 2016-10-13 14:31:49.000000000 +0000 @@ -36,10 +36,18 @@ } type ApplicationDeployer interface { - Model() (*state.Model, error) AddApplication(state.AddApplicationArgs) (*state.Application, error) } +type UnitAssigner interface { + AssignUnit(*state.Unit, state.AssignmentPolicy) error + AssignUnitWithPlacement(*state.Unit, *instance.Placement) error +} + +type UnitAdder interface { + AddUnit() (*state.Unit, error) +} + // DeployApplication takes a charm and various parameters and deploys it. func DeployApplication(st ApplicationDeployer, args DeployApplicationParams) (*state.Application, error) { settings, err := args.Charm.Config().ValidateSettings(args.ConfigSettings) @@ -104,25 +112,31 @@ // AddUnits starts n units of the given application using the specified placement // directives to allocate the machines. -func AddUnits(st *state.State, svc *state.Application, n int, placement []*instance.Placement) ([]*state.Unit, error) { +func AddUnits( + unitAssigner UnitAssigner, + unitAdder UnitAdder, + appName string, + n int, + placement []*instance.Placement, +) ([]*state.Unit, error) { units := make([]*state.Unit, n) // Hard code for now till we implement a different approach. policy := state.AssignCleanEmpty // TODO what do we do if we fail half-way through this process? for i := 0; i < n; i++ { - unit, err := svc.AddUnit() + unit, err := unitAdder.AddUnit() if err != nil { - return nil, errors.Annotatef(err, "cannot add unit %d/%d to application %q", i+1, n, svc.Name()) + return nil, errors.Annotatef(err, "cannot add unit %d/%d to application %q", i+1, n, appName) } // Are there still placement directives to use? if i > len(placement)-1 { - if err := st.AssignUnit(unit, policy); err != nil { + if err := unitAssigner.AssignUnit(unit, policy); err != nil { return nil, errors.Trace(err) } units[i] = unit continue } - if err := st.AssignUnitWithPlacement(unit, placement[i]); err != nil { + if err := unitAssigner.AssignUnitWithPlacement(unit, placement[i]); err != nil { return nil, errors.Annotatef(err, "adding new machine to host unit %q", unit.Name()) } units[i] = unit diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/deploy_test.go juju-core-2.0.0/src/github.com/juju/juju/juju/deploy_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/deploy_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/deploy_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -251,7 +251,7 @@ func (s *DeployLocalSuite) TestDeployConstraints(c *gc.C) { err := s.State.SetModelConstraints(constraints.MustParse("mem=2G")) c.Assert(err, jc.ErrorIsNil) - serviceCons := constraints.MustParse("cpu-cores=2") + serviceCons := constraints.MustParse("cores=2") service, err := juju.DeployApplication(s.State, juju.DeployApplicationParams{ ApplicationName: "bob", @@ -265,7 +265,7 @@ func (s *DeployLocalSuite) TestDeployNumUnits(c *gc.C) { f := &fakeDeployer{State: s.State} - serviceCons := constraints.MustParse("cpu-cores=2") + serviceCons := constraints.MustParse("cores=2") _, err := juju.DeployApplication(f, juju.DeployApplicationParams{ ApplicationName: "bob", @@ -284,7 +284,7 @@ func (s *DeployLocalSuite) TestDeployForceMachineId(c *gc.C) { f := &fakeDeployer{State: s.State} - serviceCons := constraints.MustParse("cpu-cores=2") + serviceCons := constraints.MustParse("cores=2") _, err := juju.DeployApplication(f, juju.DeployApplicationParams{ ApplicationName: "bob", @@ -306,7 +306,7 @@ func (s *DeployLocalSuite) TestDeployForceMachineIdWithContainer(c *gc.C) { f := &fakeDeployer{State: s.State} - serviceCons := constraints.MustParse("cpu-cores=2") + serviceCons := constraints.MustParse("cores=2") _, err := juju.DeployApplication(f, juju.DeployApplicationParams{ ApplicationName: "bob", @@ -327,7 +327,7 @@ func (s *DeployLocalSuite) TestDeploy(c *gc.C) { f := &fakeDeployer{State: s.State} - serviceCons := constraints.MustParse("cpu-cores=2") + serviceCons := constraints.MustParse("cores=2") placement := []*instance.Placement{ {Scope: s.State.ModelUUID(), Directive: "valid"}, {Scope: "#", Directive: "0"}, @@ -353,7 +353,7 @@ func (s *DeployLocalSuite) TestDeployWithFewerPlacement(c *gc.C) { f := &fakeDeployer{State: s.State} - serviceCons := constraints.MustParse("cpu-cores=2") + serviceCons := constraints.MustParse("cores=2") placement := []*instance.Placement{{Scope: s.State.ModelUUID(), Directive: "valid"}} _, err := juju.DeployApplication(f, juju.DeployApplicationParams{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/home.go juju-core-2.0.0/src/github.com/juju/juju/juju/home.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/home.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/home.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,7 +19,6 @@ if jujuXDGDataHome == "" { return errors.New("cannot determine juju data home, required environment variables are not set") } - osenv.SetJujuXDGDataHome(jujuXDGDataHome) charmrepo.CacheDir = osenv.JujuXDGDataHomePath("charmcache") if err := ssh.LoadClientKeys(osenv.JujuXDGDataHomePath("ssh")); err != nil { return errors.Annotate(err, "cannot load ssh client keys") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/mock_test.go juju-core-2.0.0/src/github.com/juju/juju/juju/mock_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/mock_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/mock_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,6 +10,8 @@ "github.com/juju/juju/api" "github.com/juju/juju/network" + "github.com/juju/juju/testing" + "github.com/juju/version" ) type mockAPIState struct { @@ -59,7 +61,7 @@ return &mockAPIState{ apiHostPorts: apiHostPorts, modelTag: modelTag, - controllerTag: modelTag, + controllerTag: testing.ControllerTag.Id(), addr: addr, } } @@ -71,6 +73,10 @@ return nil } +func (s *mockAPIState) ServerVersion() (version.Number, bool) { + return version.MustParse("1.2.3"), true +} + func (s *mockAPIState) Addr() string { return s.addr } @@ -79,12 +85,31 @@ return s.apiHostPorts } -func (s *mockAPIState) ModelTag() (names.ModelTag, error) { - return names.ParseModelTag(s.modelTag) +func (s *mockAPIState) ModelTag() (names.ModelTag, bool) { + if s.modelTag == "" { + return names.ModelTag{}, false + } + t, err := names.ParseModelTag(s.modelTag) + if err != nil { + panic("bad model tag") + } + return t, true +} + +func (s *mockAPIState) ControllerTag() names.ControllerTag { + t, err := names.ParseControllerTag(s.controllerTag) + if err != nil { + panic("bad controller tag") + } + return t +} + +func (s *mockAPIState) AuthTag() names.Tag { + return names.NewUserTag("admin") } -func (s *mockAPIState) ControllerTag() (names.ModelTag, error) { - return names.ParseModelTag(s.controllerTag) +func (s *mockAPIState) ControllerAccess() string { + return "superuser" } func panicAPIOpen(apiInfo *api.Info, opts api.DialOpts) (api.Connection, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/osenv/home.go juju-core-2.0.0/src/github.com/juju/juju/juju/osenv/home.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/osenv/home.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/osenv/home.go 2016-10-13 14:31:49.000000000 +0000 @@ -37,29 +37,23 @@ func JujuXDGDataHome() string { jujuXDGDataHomeMu.Lock() defer jujuXDGDataHomeMu.Unlock() - if jujuXDGDataHome == "" { - panic("juju home hasn't been initialized") - } return jujuXDGDataHome } -// IsJujuXDGDataHomeSet is a way to check if SetJuuHome has been called. -func IsJujuXDGDataHomeSet() bool { - jujuXDGDataHomeMu.Lock() - defer jujuXDGDataHomeMu.Unlock() - return jujuXDGDataHome != "" -} - // JujuXDGDataHomePath returns the path to a file in the // current juju home. func JujuXDGDataHomePath(names ...string) string { - all := append([]string{JujuXDGDataHome()}, names...) + all := append([]string{JujuXDGDataHomeDir()}, names...) return filepath.Join(all...) } // JujuXDGDataHomeDir returns the directory where juju should store application-specific files func JujuXDGDataHomeDir() string { - JujuXDGDataHomeDir := os.Getenv(JujuXDGDataHomeEnvKey) + JujuXDGDataHomeDir := JujuXDGDataHome() + if JujuXDGDataHomeDir != "" { + return JujuXDGDataHomeDir + } + JujuXDGDataHomeDir = os.Getenv(JujuXDGDataHomeEnvKey) if JujuXDGDataHomeDir == "" { if runtime.GOOS == "windows" { JujuXDGDataHomeDir = jujuXDGDataHomeWin() @@ -79,7 +73,7 @@ // If xdg config home is not defined, the standard indicates that its default value // is $HOME/.local/share home := utils.Home() - return filepath.Join(home, ".local/share", "juju") + return filepath.Join(home, ".local", "share", "juju") } // jujuXDGDataHomeWin returns the directory where juju should store application-specific files on Windows. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/osenv/home_test.go juju-core-2.0.0/src/github.com/juju/juju/juju/osenv/home_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/osenv/home_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/osenv/home_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,6 @@ package osenv_test import ( - "path/filepath" - - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/juju/osenv" @@ -19,29 +16,19 @@ var _ = gc.Suite(&JujuXDGDataHomeSuite{}) +func (s *JujuXDGDataHomeSuite) TearDownTest(c *gc.C) { + osenv.SetJujuXDGDataHome("") +} + func (s *JujuXDGDataHomeSuite) TestStandardHome(c *gc.C) { testJujuXDGDataHome := c.MkDir() osenv.SetJujuXDGDataHome(testJujuXDGDataHome) c.Assert(osenv.JujuXDGDataHome(), gc.Equals, testJujuXDGDataHome) } -func (s *JujuXDGDataHomeSuite) TestErrorHome(c *gc.C) { - // Invalid juju home leads to panic when retrieving. - f := func() { _ = osenv.JujuXDGDataHome() } - c.Assert(f, gc.PanicMatches, "juju home hasn't been initialized") - f = func() { _ = osenv.JujuXDGDataHomePath("current-environment") } - c.Assert(f, gc.PanicMatches, "juju home hasn't been initialized") -} - func (s *JujuXDGDataHomeSuite) TestHomePath(c *gc.C) { testJujuHome := c.MkDir() osenv.SetJujuXDGDataHome(testJujuHome) - envPath := osenv.JujuXDGDataHomePath("current-environment") - c.Assert(envPath, gc.Equals, filepath.Join(testJujuHome, "current-environment")) -} - -func (s *JujuXDGDataHomeSuite) TestIsHomeSet(c *gc.C) { - c.Assert(osenv.IsJujuXDGDataHomeSet(), jc.IsFalse) - osenv.SetJujuXDGDataHome(c.MkDir()) - c.Assert(osenv.IsJujuXDGDataHomeSet(), jc.IsTrue) + envPath := osenv.JujuXDGDataHomeDir() + c.Assert(envPath, gc.Equals, testJujuHome) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/permission/model.go juju-core-2.0.0/src/github.com/juju/juju/juju/permission/model.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/permission/model.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/permission/model.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,40 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package permission - -import ( - "github.com/juju/errors" -) - -// ModelAccess defines the permission that a user has on a model. -type ModelAccess int - -const ( - _ = iota - - // ModelReadAccess allows a user to read a model but not to change it. - ModelReadAccess ModelAccess = iota - - // ModelWriteAccess allows a user write access to the model. - ModelWriteAccess ModelAccess = iota - - // ModelAdminAccess allows a user to perform administrative tasks on a model. - ModelAdminAccess ModelAccess = iota -) - -// ParseModelAccess parses a user-facing string representation of a model -// access permission into a logical representation. -func ParseModelAccess(access string) (ModelAccess, error) { - var fail = ModelAccess(0) - switch access { - case "read": - return ModelReadAccess, nil - case "write": - return ModelWriteAccess, nil - case "admin": - return ModelAdminAccess, nil - default: - return fail, errors.Errorf("invalid model access permission %q", access) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/permission/model_test.go juju-core-2.0.0/src/github.com/juju/juju/juju/permission/model_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/permission/model_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/permission/model_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package permission_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/juju/permission" -) - -type permissionSuite struct{} - -var _ = gc.Suite(&permissionSuite{}) - -func (s *permissionSuite) TestParseModelAccessValid(c *gc.C) { - var ( - access permission.ModelAccess - err error - ) - - _, err = permission.ParseModelAccess("") - c.Check(err, gc.ErrorMatches, "invalid model access permission.*") - - access, err = permission.ParseModelAccess("read") - c.Check(err, jc.ErrorIsNil) - c.Check(access, gc.Equals, permission.ModelReadAccess) - - access, err = permission.ParseModelAccess("write") - c.Check(err, jc.ErrorIsNil) - c.Check(access, gc.Equals, permission.ModelWriteAccess) - - access, err = permission.ParseModelAccess("orange") - c.Check(err, gc.ErrorMatches, "invalid model access permission.*") -} - -func (s *permissionSuite) TestParseModelAccessInvalid(c *gc.C) { - _, err := permission.ParseModelAccess("preposterous") - c.Check(err, gc.ErrorMatches, "invalid model access permission.*") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/permission/package_test.go juju-core-2.0.0/src/github.com/juju/juju/juju/permission/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/permission/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/permission/package_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package permission_test - -import ( - stdtesting "testing" - - gc "gopkg.in/check.v1" -) - -func Test(t *stdtesting.T) { - gc.TestingT(t) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/testing/conn.go juju-core-2.0.0/src/github.com/juju/juju/juju/testing/conn.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/testing/conn.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/testing/conn.go 2016-10-13 14:31:49.000000000 +0000 @@ -141,9 +141,9 @@ } func (s *JujuConnSuite) AdminUserTag(c *gc.C) names.UserTag { - env, err := s.State.ControllerModel() + model, err := s.State.ControllerModel() c.Assert(err, jc.ErrorIsNil) - return env.Owner() + return model.Owner() } func (s *JujuConnSuite) MongoInfo(c *gc.C) *mongo.MongoInfo { @@ -163,11 +163,14 @@ // openAPIAs opens the API and ensures that the api.Connection returned will be // closed during the test teardown by using a cleanup function. -func (s *JujuConnSuite) openAPIAs(c *gc.C, tag names.Tag, password, nonce string) api.Connection { +func (s *JujuConnSuite) openAPIAs(c *gc.C, tag names.Tag, password, nonce string, controllerOnly bool) api.Connection { apiInfo := s.APIInfo(c) apiInfo.Tag = tag apiInfo.Password = password apiInfo.Nonce = nonce + if controllerOnly { + apiInfo.ModelTag = names.ModelTag{} + } apiState, err := api.Open(apiInfo, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) c.Assert(apiState, gc.NotNil) @@ -179,14 +182,22 @@ // authentication. The returned api.Connection should not be closed by the caller // as a cleanup function has been registered to do that. func (s *JujuConnSuite) OpenAPIAs(c *gc.C, tag names.Tag, password string) api.Connection { - return s.openAPIAs(c, tag, password, "") + return s.openAPIAs(c, tag, password, "", false) +} + +func (s *JujuConnSuite) OpenControllerAPIAs(c *gc.C, tag names.Tag, password string) api.Connection { + return s.openAPIAs(c, tag, password, "", true) } // OpenAPIAsMachine opens the API using the given machine tag, password and // nonce for authentication. The returned api.Connection should not be closed by // the caller as a cleanup function has been registered to do that. func (s *JujuConnSuite) OpenAPIAsMachine(c *gc.C, tag names.Tag, password, nonce string) api.Connection { - return s.openAPIAs(c, tag, password, nonce) + return s.openAPIAs(c, tag, password, nonce, false) +} + +func (s *JujuConnSuite) OpenControllerAPI(c *gc.C) api.Connection { + return s.OpenControllerAPIAs(c, s.AdminUserTag(c), AdminSecret) } // OpenAPIAsNewMachine creates a new machine entry that lives in system state, @@ -205,7 +216,7 @@ c.Assert(err, jc.ErrorIsNil) err = machine.SetProvisioned("foo", "fake_nonce", nil) c.Assert(err, jc.ErrorIsNil) - return s.openAPIAs(c, machine.Tag(), password, "fake_nonce"), machine + return s.openAPIAs(c, machine.Tag(), password, "fake_nonce", false), machine } // DefaultVersions returns a slice of unique 'versions' for the current @@ -240,33 +251,71 @@ return versions } -func (s *JujuConnSuite) setUpConn(c *gc.C) { - if s.RootDir != "" { - c.Fatal("JujuConnSuite.setUpConn without teardown") +// UserHomeParams stores parameters with which to create an os user home dir. +type UserHomeParams struct { + // The username of the operating system user whose fake home + // directory is to be created. + Username string + + // Override the default osenv.JujuModelEnvKey. + ModelEnvKey string + + // Should the oldJujuXDGDataHome field be set? + // This is likely only true during setUpConn, as we want teardown to + // reset to the most original value. + SetOldHome bool +} + +// Create a home directory and Juju data home for user username. +// This is used by setUpConn to create the 'ubuntu' user home, after RootDir, +// and may be used again later for other users. +func (s *JujuConnSuite) CreateUserHome(c *gc.C, params *UserHomeParams) { + if s.RootDir == "" { + c.Fatal("JujuConnSuite.setUpConn required first for RootDir") } - s.RootDir = c.MkDir() - s.oldHome = utils.Home() - home := filepath.Join(s.RootDir, "/home/ubuntu") + c.Assert(params.Username, gc.Not(gc.Equals), "") + home := filepath.Join(s.RootDir, "home", params.Username) err := os.MkdirAll(home, 0777) c.Assert(err, jc.ErrorIsNil) err = utils.SetHome(home) c.Assert(err, jc.ErrorIsNil) + jujuHome := filepath.Join(home, ".local", "share") err = os.MkdirAll(filepath.Join(home, ".local", "share"), 0777) c.Assert(err, jc.ErrorIsNil) - s.oldJujuXDGDataHome = osenv.SetJujuXDGDataHome(filepath.Join(home, ".local", "share", "juju")) - err = os.MkdirAll(osenv.JujuXDGDataHome(), 0777) - c.Assert(err, jc.ErrorIsNil) + previousJujuXDGDataHome := osenv.SetJujuXDGDataHome(jujuHome) + if params.SetOldHome { + s.oldJujuXDGDataHome = previousJujuXDGDataHome + } err = os.MkdirAll(s.DataDir(), 0777) c.Assert(err, jc.ErrorIsNil) - s.PatchEnvironment(osenv.JujuModelEnvKey, "controller") - cfg, err := config.New(config.UseDefaults, (map[string]interface{})(s.sampleConfig())) - c.Assert(err, jc.ErrorIsNil) + jujuModelEnvKey := "JUJU_MODEL" + if params.ModelEnvKey != "" { + jujuModelEnvKey = params.ModelEnvKey + } + s.PatchEnvironment(osenv.JujuModelEnvKey, jujuModelEnvKey) s.ControllerStore = jujuclient.NewFileClientStore() +} + +func (s *JujuConnSuite) setUpConn(c *gc.C) { + if s.RootDir != "" { + c.Fatal("JujuConnSuite.setUpConn without teardown") + } + s.RootDir = c.MkDir() + s.oldHome = utils.Home() + userHomeParams := UserHomeParams{ + Username: "ubuntu", + ModelEnvKey: "controller", + SetOldHome: true, + } + s.CreateUserHome(c, &userHomeParams) + + cfg, err := config.New(config.UseDefaults, (map[string]interface{})(s.sampleConfig())) + c.Assert(err, jc.ErrorIsNil) ctx := testing.Context(c) s.ControllerConfig = testing.FakeControllerConfig() @@ -307,16 +356,26 @@ s.PatchValue(&keys.JujuPublicKey, sstesting.SignedMetadataPublicKey) // Dummy provider uses a random port, which is added to cfg used to create environment. - apiPort := dummy.ApiPort(environ.Provider()) + apiPort := dummy.APIPort(environ.Provider()) s.ControllerConfig["api-port"] = apiPort err = bootstrap.Bootstrap(modelcmd.BootstrapContext(ctx), environ, bootstrap.BootstrapParams{ ControllerConfig: s.ControllerConfig, CloudName: cloudSpec.Name, + CloudRegion: "dummy-region", Cloud: cloud.Cloud{ - Type: cloudSpec.Type, - AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, - Endpoint: cloudSpec.Endpoint, - StorageEndpoint: cloudSpec.StorageEndpoint, + Type: cloudSpec.Type, + AuthTypes: []cloud.AuthType{cloud.EmptyAuthType, cloud.UserPassAuthType}, + Endpoint: cloudSpec.Endpoint, + IdentityEndpoint: cloudSpec.IdentityEndpoint, + StorageEndpoint: cloudSpec.StorageEndpoint, + Regions: []cloud.Region{ + { + Name: "dummy-region", + Endpoint: "dummy-endpoint", + IdentityEndpoint: "dummy-identity-endpoint", + StorageEndpoint: "dummy-storage-endpoint", + }, + }, }, CloudCredential: cloudSpec.Credential, CloudCredentialName: "cred", @@ -329,7 +388,7 @@ s.BackingState = getStater.GetStateInAPIServer() s.BackingStatePool = getStater.GetStatePoolInAPIServer() - s.State, err = newState(environ, s.BackingState.MongoConnectionInfo()) + s.State, err = newState(s.ControllerConfig.ControllerUUID(), environ, s.BackingState.MongoConnectionInfo()) c.Assert(err, jc.ErrorIsNil) apiInfo, err := environs.APIInfo(s.ControllerConfig.ControllerUUID(), testing.ModelTag.Id(), testing.CACert, s.ControllerConfig.APIPort(), environ) @@ -397,11 +456,14 @@ // newState returns a new State that uses the given environment. // The environment must have already been bootstrapped. -func newState(environ environs.Environ, mongoInfo *mongo.MongoInfo) (*state.State, error) { +func newState(controllerUUID string, environ environs.Environ, mongoInfo *mongo.MongoInfo) (*state.State, error) { + if controllerUUID == "" { + return nil, errors.New("missing controller UUID") + } config := environ.Config() password := AdminSecret if password == "" { - return nil, fmt.Errorf("cannot connect without admin-secret") + return nil, errors.Errorf("cannot connect without admin-secret") } modelTag := names.NewModelTag(config.UUID()) @@ -410,13 +472,14 @@ newPolicyFunc := stateenvirons.GetNewPolicyFunc( stateenvirons.GetNewEnvironFunc(environs.New), ) - st, err := state.Open(modelTag, mongoInfo, opts, newPolicyFunc) + controllerTag := names.NewControllerTag(controllerUUID) + st, err := state.Open(modelTag, controllerTag, mongoInfo, opts, newPolicyFunc) if errors.IsUnauthorized(errors.Cause(err)) { // We try for a while because we might succeed in // connecting to mongo before the state has been // initialized and the initial password set. for a := redialStrategy.Start(); a.Next(); { - st, err = state.Open(modelTag, mongoInfo, opts, newPolicyFunc) + st, err = state.Open(modelTag, controllerTag, mongoInfo, opts, newPolicyFunc) if !errors.IsUnauthorized(errors.Cause(err)) { break } @@ -427,35 +490,9 @@ } else if err != nil { return nil, err } - if err := updateSecrets(environ, st); err != nil { - st.Close() - return nil, fmt.Errorf("unable to push secrets: %v", err) - } return st, nil } -func updateSecrets(env environs.Environ, st *state.State) error { - secrets, err := env.Provider().SecretAttrs(env.Config()) - if err != nil { - return err - } - cfg, err := st.ModelConfig() - if err != nil { - return err - } - secretAttrs := make(map[string]interface{}) - attrs := cfg.AllAttrs() - for k, v := range secrets { - if _, exists := attrs[k]; exists { - // Environment already has secrets. Won't send again. - return nil - } else { - secretAttrs[k] = v - } - } - return st.UpdateModelConfig(secretAttrs, nil, nil) -} - // PutCharm uploads the given charm to provider storage, and adds a // state.Charm to the state. The charm is not uploaded if a charm with // the same URL already exists in the state. @@ -667,6 +704,7 @@ StateAddresses: s.MongoInfo(c).Addrs, APIAddresses: s.APIInfo(c).Addrs, CACert: testing.CACert, + Controller: s.State.ControllerTag(), Model: s.State.ModelTag(), }) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/juju/testing/instance.go juju-core-2.0.0/src/github.com/juju/juju/juju/testing/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/juju/testing/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/juju/testing/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -175,11 +175,11 @@ machineNonce := "fake_nonce" apiInfo := FakeAPIInfo(machineId) instanceConfig, err := instancecfg.NewInstanceConfig( + testing.ControllerTag, machineId, machineNonce, imagemetadata.ReleasedStream, preferredSeries, - true, apiInfo, ) if err != nil { @@ -200,7 +200,7 @@ instanceConfig.Jobs = []multiwatcher.MachineJob{multiwatcher.JobHostUnits, multiwatcher.JobManageModel} } cfg := env.Config() - instanceConfig.Tags = instancecfg.InstanceTags(params.ControllerUUID, params.ControllerUUID, cfg, nil) + instanceConfig.Tags = instancecfg.InstanceTags(env.Config().UUID(), params.ControllerUUID, cfg, nil) params.Tools = possibleTools params.InstanceConfig = instanceConfig return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/accountsfile_test.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/accountsfile_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/accountsfile_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/accountsfile_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -20,11 +20,22 @@ var _ = gc.Suite(&AccountsFileSuite{}) -const testAccountsYAML = ` +const testLegacyAccountsYAML = ` controllers: ctrl: user: admin@local password: hunter2 + last-known-access: superuser + kontroll: + user: bob@remote +` + +const testAccountsYAML = ` +controllers: + ctrl: + user: admin + password: hunter2 + last-known-access: superuser kontroll: user: bob@remote ` @@ -36,8 +47,9 @@ var ( ctrlAdminAccountDetails = jujuclient.AccountDetails{ - User: "admin@local", - Password: "hunter2", + User: "admin", + Password: "hunter2", + LastKnownAccess: "superuser", } kontrollBobRemoteAccountDetails = jujuclient.AccountDetails{ User: "bob@remote", @@ -65,6 +77,21 @@ c.Assert(accounts, gc.HasLen, 0) } +func (s *AccountsFileSuite) TestMigrateLegacyLocal(c *gc.C) { + err := ioutil.WriteFile(jujuclient.JujuAccountsPath(), []byte(testLegacyAccountsYAML), 0644) + c.Assert(err, jc.ErrorIsNil) + + accounts, err := jujuclient.ReadAccountsFile(jujuclient.JujuAccountsPath()) + c.Assert(err, jc.ErrorIsNil) + + migratedData, err := ioutil.ReadFile(jujuclient.JujuAccountsPath()) + c.Assert(err, jc.ErrorIsNil) + migratedAccounts, err := jujuclient.ParseAccounts(migratedData) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(migratedData), jc.DeepEquals, testAccountsYAML[1:]) + c.Assert(migratedAccounts, jc.DeepEquals, accounts) +} + func writeTestAccountsFile(c *gc.C) { err := jujuclient.WriteAccountsFile(testControllerAccounts) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/accounts.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/accounts.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/accounts.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/accounts.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,9 +6,11 @@ import ( "io/ioutil" "os" + "strings" "github.com/juju/errors" "github.com/juju/utils" + "gopkg.in/juju/names.v2" "gopkg.in/yaml.v2" "github.com/juju/juju/juju/osenv" @@ -30,16 +32,34 @@ } return nil, err } - if err := migrateLegacyAccounts(data); err != nil { - return nil, err - } accounts, err := ParseAccounts(data) if err != nil { return nil, err } + if err := migrateLocalAccountUsers(accounts); err != nil { + return nil, err + } return accounts, nil } +func migrateLocalAccountUsers(accounts map[string]AccountDetails) error { + changes := false + for user, account := range accounts { + if !strings.HasSuffix(account.User, "@local") { + continue + } + tag := names.NewUserTag(account.User) + updated := account + updated.User = tag.Id() + accounts[user] = updated + changes = true + } + if changes { + return WriteAccountsFile(accounts) + } + return nil +} + // WriteAccountsFile marshals to YAML details of the given accounts // and writes it to the accounts file. func WriteAccountsFile(controllerAccounts map[string]AccountDetails) error { @@ -62,37 +82,3 @@ type accountsCollection struct { ControllerAccounts map[string]AccountDetails `yaml:"controllers"` } - -// TODO(axw) 2016-07-14 #1603841 -// Drop this code once we get to 2.0. -func migrateLegacyAccounts(data []byte) error { - type legacyControllerAccounts struct { - Accounts map[string]AccountDetails `yaml:"accounts"` - CurrentAccount string `yaml:"current-account,omitempty"` - } - type legacyAccountsCollection struct { - ControllerAccounts map[string]legacyControllerAccounts `yaml:"controllers"` - } - var legacy legacyAccountsCollection - if err := yaml.Unmarshal(data, &legacy); err != nil { - return errors.Annotate(err, "cannot unmarshal accounts") - } - result := make(map[string]AccountDetails) - for controller, controllerAccounts := range legacy.ControllerAccounts { - if controllerAccounts.CurrentAccount == "" { - continue - } - details, ok := controllerAccounts.Accounts[controllerAccounts.CurrentAccount] - if !ok { - continue - } - result[controller] = details - } - if len(result) > 0 { - // Only write if we found at least one, - // which means the file was in legacy - // format. Otherwise leave it alone. - return WriteAccountsFile(result) - } - return nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/accounts_test.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/accounts_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/accounts_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/accounts_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "os" + "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -47,105 +48,41 @@ c.Assert(*details, jc.DeepEquals, kontrollBobRemoteAccountDetails) } -/* -func (s *AccountsSuite) TestAllAccountsNoFile(c *gc.C) { - err := os.Remove(jujuclient.JujuAccountsPath()) - c.Assert(err, jc.ErrorIsNil) - accounts, err := s.store.AllAccounts("not-found") - c.Assert(err, gc.ErrorMatches, "accounts for controller not-found not found") - c.Assert(accounts, gc.HasLen, 0) -} - -func (s *AccountsSuite) TestAllAccounts(c *gc.C) { - accounts, err := s.store.AllAccounts("kontroll") - c.Assert(err, jc.ErrorIsNil) - c.Assert(accounts, jc.DeepEquals, testControllerAccounts["kontroll"].Accounts) -} - -func (s *AccountsSuite) TestCurrentAccount(c *gc.C) { - current, err := s.store.CurrentAccount("kontroll") - c.Assert(err, jc.ErrorIsNil) - c.Assert(current, gc.Equals, "admin@local") -} - -func (s *AccountsSuite) TestCurrentAccountNotSet(c *gc.C) { - _, err := s.store.CurrentAccount("ctrl") - c.Assert(err, jc.Satisfies, errors.IsNotFound) -} - -func (s *AccountsSuite) TestCurrentAccountControllerNotFound(c *gc.C) { - _, err := s.store.CurrentAccount("not-found") - c.Assert(err, jc.Satisfies, errors.IsNotFound) -} - -func (s *AccountsSuite) TestSetCurrentAccountControllerNotFound(c *gc.C) { - err := s.store.SetCurrentAccount("not-found", "admin@local") - c.Assert(err, jc.Satisfies, errors.IsNotFound) -} - -func (s *AccountsSuite) TestSetCurrentAccountAccountNotFound(c *gc.C) { - err := s.store.SetCurrentAccount("kontroll", "admin@nowhere") - c.Assert(err, jc.Satisfies, errors.IsNotFound) -} - -func (s *AccountsSuite) TestSetCurrentAccount(c *gc.C) { - err := s.store.SetCurrentAccount("kontroll", "admin@local") +func (s *AccountsSuite) TestUpdateAccountIgnoresEmptyAccess(c *gc.C) { + testAccountDetails := jujuclient.AccountDetails{ + User: "admin", + Password: "fnord", + } + err := s.store.UpdateAccount("ctrl", testAccountDetails) c.Assert(err, jc.ErrorIsNil) - accounts, err := jujuclient.ReadAccountsFile(jujuclient.JujuAccountsPath()) + details, err := s.store.AccountDetails("ctrl") c.Assert(err, jc.ErrorIsNil) - c.Assert(accounts["kontroll"].CurrentAccount, gc.Equals, "admin@local") + testAccountDetails.LastKnownAccess = ctrlAdminAccountDetails.LastKnownAccess + c.Assert(testAccountDetails.LastKnownAccess, gc.Equals, "superuser") + c.Assert(*details, jc.DeepEquals, testAccountDetails) } func (s *AccountsSuite) TestUpdateAccountNewController(c *gc.C) { - testAccountDetails := jujuclient.AccountDetails{User: "admin@local"} - err := s.store.UpdateAccount("new-controller", "admin@local", testAccountDetails) - c.Assert(err, jc.ErrorIsNil) - accounts, err := s.store.AllAccounts("new-controller") - c.Assert(err, jc.ErrorIsNil) - c.Assert(accounts, jc.DeepEquals, map[string]jujuclient.AccountDetails{ - "admin@local": testAccountDetails, - }) -} - -func (s *AccountsSuite) TestUpdateAccountExistingControllerMultipleAccounts(c *gc.C) { - testAccountDetails := jujuclient.AccountDetails{User: "bob@environs"} - err := s.store.UpdateAccount("kontroll", "bob@environs", testAccountDetails) - c.Assert(err, jc.Satisfies, errors.IsAlreadyExists) - c.Assert(err, gc.ErrorMatches, "alternative account for controller kontroll already exists") - accounts, err := s.store.AllAccounts("kontroll") - c.Assert(err, jc.ErrorIsNil) - _, ok := accounts["bob@environs"] - c.Assert(ok, jc.IsFalse) -} - -func (s *AccountsSuite) TestUpdateAccountExistingControllerNewAccount(c *gc.C) { - accounts, err := s.store.AllAccounts("kontroll") - c.Assert(err, jc.ErrorIsNil) - for account := range accounts { - err := s.store.RemoveAccount("kontroll", account) - c.Assert(err, jc.ErrorIsNil) - } - testAccountDetails := jujuclient.AccountDetails{User: "bob@environs"} - err = s.store.UpdateAccount("kontroll", "bob@environs", testAccountDetails) + testAccountDetails := jujuclient.AccountDetails{User: "admin"} + err := s.store.UpdateAccount("new-controller", testAccountDetails) c.Assert(err, jc.ErrorIsNil) - accounts, err = s.store.AllAccounts("kontroll") + details, err := s.store.AccountDetails("new-controller") c.Assert(err, jc.ErrorIsNil) - c.Assert(accounts, jc.DeepEquals, map[string]jujuclient.AccountDetails{ - "bob@environs": testAccountDetails, - }) + c.Assert(*details, jc.DeepEquals, testAccountDetails) } func (s *AccountsSuite) TestUpdateAccountOverwrites(c *gc.C) { testAccountDetails := jujuclient.AccountDetails{ - User: "admin@local", - Password: "fnord", + User: "admin", + Password: "fnord", + LastKnownAccess: "add-model", } for i := 0; i < 2; i++ { // Twice so we exercise the code path of updating with // identical details. - err := s.store.UpdateAccount("kontroll", "admin@local", testAccountDetails) + err := s.store.UpdateAccount("kontroll", testAccountDetails) c.Assert(err, jc.ErrorIsNil) - details, err := s.store.AccountDetails("kontroll", "admin@local") + details, err := s.store.AccountDetails("kontroll") c.Assert(err, jc.ErrorIsNil) c.Assert(*details, jc.DeepEquals, testAccountDetails) } @@ -154,30 +91,25 @@ func (s *AccountsSuite) TestRemoveAccountNoFile(c *gc.C) { err := os.Remove(jujuclient.JujuAccountsPath()) c.Assert(err, jc.ErrorIsNil) - err = s.store.RemoveAccount("not-found", "admin@nowhere") + err = s.store.RemoveAccount("not-found") c.Assert(err, jc.Satisfies, errors.IsNotFound) } func (s *AccountsSuite) TestRemoveAccountControllerNotFound(c *gc.C) { - err := s.store.RemoveAccount("not-found", "admin@nowhere") - c.Assert(err, jc.Satisfies, errors.IsNotFound) -} - -func (s *AccountsSuite) TestRemoveAccountNotFound(c *gc.C) { - err := s.store.RemoveAccount("kontroll", "admin@nowhere") + err := s.store.RemoveAccount("not-found") c.Assert(err, jc.Satisfies, errors.IsNotFound) } func (s *AccountsSuite) TestRemoveAccount(c *gc.C) { - err := s.store.RemoveAccount("kontroll", "admin@local") + err := s.store.RemoveAccount("kontroll") c.Assert(err, jc.ErrorIsNil) - _, err = s.store.AccountDetails("kontroll", "admin@local") + _, err = s.store.AccountDetails("kontroll") c.Assert(err, jc.Satisfies, errors.IsNotFound) } func (s *AccountsSuite) TestRemoveControllerRemovesaccounts(c *gc.C) { store := jujuclient.NewFileClientStore() - err := store.UpdateController("kontroll", jujuclient.ControllerDetails{ + err := store.AddController("kontroll", jujuclient.ControllerDetails{ ControllerUUID: "abc", CACert: "woop", }) @@ -190,4 +122,3 @@ _, ok := accounts["kontroll"] c.Assert(ok, jc.IsFalse) // kontroll accounts are removed } -*/ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/bootstrapconfigfile_test.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/bootstrapconfigfile_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/bootstrapconfigfile_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/bootstrapconfigfile_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -30,6 +30,7 @@ model-config: name: admin type: ec2 + controller-model-uuid: deadbeef-1bad-500d-9000-4b1d0d06f00d credential: default cloud: aws type: ec2 @@ -42,6 +43,7 @@ model-config: name: admin type: maas + controller-model-uuid: deadbeef-1bad-500d-9000-4b1d0d06f00d cloud: maas type: maas region: 127.0.0.1 @@ -57,11 +59,12 @@ "type": "ec2", "name": "admin", }, - Credential: "default", - Cloud: "aws", - CloudType: "ec2", - CloudRegion: "us-east-1", - CloudEndpoint: "https://us-east-1.amazonaws.com", + ControllerModelUUID: "deadbeef-1bad-500d-9000-4b1d0d06f00d", + Credential: "default", + Cloud: "aws", + CloudType: "ec2", + CloudRegion: "us-east-1", + CloudEndpoint: "https://us-east-1.amazonaws.com", }, "mallards": { ControllerConfig: controller.Config{ @@ -72,9 +75,10 @@ "type": "maas", "name": "admin", }, - Cloud: "maas", - CloudType: "maas", - CloudRegion: "127.0.0.1", + ControllerModelUUID: "deadbeef-1bad-500d-9000-4b1d0d06f00d", + Cloud: "maas", + CloudType: "maas", + CloudRegion: "127.0.0.1", }, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/bootstrapconfig.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/bootstrapconfig.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/bootstrapconfig.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/bootstrapconfig.go 2016-10-13 14:31:49.000000000 +0000 @@ -55,14 +55,6 @@ if err != nil { return nil, errors.Annotate(err, "cannot unmarshal bootstrap config") } - // TODO(wallyworld) - drop when we get to beta 15. - // This is for backwards compatibility with beta 13. - for controllerName, cfg := range result.ControllerBootstrapConfig { - if cfg.Config == nil { - cfg.Config = cfg.OldConfig - result.ControllerBootstrapConfig[controllerName] = cfg - } - } return result.ControllerBootstrapConfig, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/controllersfile_test.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/controllersfile_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/controllersfile_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/controllersfile_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -29,18 +29,24 @@ ca-cert: this-is-aws-test-ca-cert cloud: aws region: us-east-1 + controller-machine-count: 0 + active-controller-machine-count: 0 mallards: unresolved-api-endpoints: [maas-1-05.cluster.mallards] uuid: this-is-another-uuid api-endpoints: [this-is-another-of-many-api-endpoints, this-is-one-more-of-many-api-endpoints] ca-cert: this-is-another-ca-cert cloud: mallards + controller-machine-count: 0 + active-controller-machine-count: 0 mark-test-prodstack: unresolved-api-endpoints: [vm-23532.prodstack.canonical.com, great.test.server.hostname.co.nz] uuid: this-is-a-uuid api-endpoints: [this-is-one-of-many-api-endpoints] ca-cert: this-is-a-ca-cert cloud: prodstack + controller-machine-count: 0 + active-controller-machine-count: 0 current-controller: mallards ` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/controllers_test.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/controllers_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/controllers_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/controllers_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "fmt" + "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -27,12 +28,12 @@ s.store = jujuclient.NewFileClientStore() s.controllerName = "test.controller" s.controller = jujuclient.ControllerDetails{ - []string{"test.server.hostname"}, - "test.uuid", - []string{"test.api.endpoint"}, - "test.ca.cert", - "aws", - "southeastasia", + UnresolvedAPIEndpoints: []string{"test.server.hostname"}, + ControllerUUID: "test.uuid", + APIEndpoints: []string{"test.api.endpoint"}, + CACert: "test.ca.cert", + Cloud: "aws", + CloudRegion: "southeastasia", } } @@ -61,27 +62,70 @@ c.Assert(found, gc.DeepEquals, &expected) } -func (s *ControllersSuite) TestUpdateControllerAddFirst(c *gc.C) { - err := s.store.UpdateController(s.controllerName, s.controller) +func (s *ControllersSuite) TestAddController(c *gc.C) { + err := s.store.AddController(s.controllerName, s.controller) + c.Assert(err, jc.ErrorIsNil) + s.assertUpdateSucceeded(c) +} + +func (s *ControllersSuite) TestAddControllerDupUUIDFails(c *gc.C) { + err := s.store.AddController(s.controllerName, s.controller) + c.Assert(err, jc.ErrorIsNil) + s.assertUpdateSucceeded(c) + // Try to add it again + err = s.store.AddController(s.controllerName+"-copy", s.controller) + c.Assert(err, gc.ErrorMatches, `controller with UUID .* already exists`) +} + +func (s *ControllersSuite) TestAddControllerDupNameFails(c *gc.C) { + err := s.store.AddController(s.controllerName, s.controller) c.Assert(err, jc.ErrorIsNil) s.assertUpdateSucceeded(c) + // Try to add it again + err = s.store.AddController(s.controllerName, s.controller) + c.Assert(err, gc.ErrorMatches, `controller with name .* already exists`) +} + +func (s *ControllersSuite) TestUpdateControllerAddFirst(c *gc.C) { + // UpdateController should fail if no controller has first been added + // with AddController. + err := s.store.UpdateController(s.controllerName, s.controller) + c.Assert(err, gc.ErrorMatches, `controllers not found`) } func (s *ControllersSuite) TestUpdateControllerAddNew(c *gc.C) { + // UpdateController should fail if no controller has first been added + // with AddController. s.assertControllerNotExists(c) err := s.store.UpdateController(s.controllerName, s.controller) - c.Assert(err, jc.ErrorIsNil) - s.assertUpdateSucceeded(c) + c.Assert(err, gc.ErrorMatches, `controller .*not found`) } func (s *ControllersSuite) TestUpdateController(c *gc.C) { s.controllerName = firstTestControllerName(c) - + all := writeTestControllersFile(c) + // This is not a restore (backup), so update with the existing UUID. + s.controller.ControllerUUID = all.Controllers[s.controllerName].ControllerUUID err := s.store.UpdateController(s.controllerName, s.controller) c.Assert(err, jc.ErrorIsNil) s.assertUpdateSucceeded(c) } +// Try and fail to use an existing controller's UUID to update another exisiting +// controller's config. +func (s *ControllersSuite) TestUpdateControllerDupUUID(c *gc.C) { + firstControllerName := firstTestControllerName(c) + all := writeTestControllersFile(c) + firstControllerUUID := all.Controllers[firstControllerName].ControllerUUID + for name, details := range all.Controllers { + if details.ControllerUUID != firstControllerUUID { + details.ControllerUUID = firstControllerUUID + err := s.store.UpdateController(name, details) + c.Assert(err, gc.ErrorMatches, `controller .* with UUID .* already exists`) + } + } +} + func (s *ControllersSuite) TestRemoveControllerNoFile(c *gc.C) { err := s.store.RemoveController(s.controllerName) c.Assert(err, jc.ErrorIsNil) @@ -104,21 +148,35 @@ c.Assert(found, gc.IsNil) } -func (s *ControllersSuite) TestRemoveControllerRemovesIdenticalControllers(c *gc.C) { - name := firstTestControllerName(c) - details, err := s.store.ControllerByName(name) +func (s *ControllersSuite) TestCurrentControllerNoneExists(c *gc.C) { + _, err := s.store.CurrentController() + c.Assert(err, jc.Satisfies, errors.IsNotFound) + c.Assert(err, gc.ErrorMatches, "current controller not found") +} + +func (s *ControllersSuite) TestCurrentController(c *gc.C) { + writeTestControllersFile(c) + + current, err := s.store.CurrentController() + c.Assert(err, jc.ErrorIsNil) + c.Assert(current, gc.Equals, "mallards") +} + +func (s *ControllersSuite) TestSetCurrentController(c *gc.C) { + err := s.store.AddController(s.controllerName, s.controller) c.Assert(err, jc.ErrorIsNil) - err = s.store.UpdateController(name+"-copy", *details) + err = s.store.SetCurrentController(s.controllerName) c.Assert(err, jc.ErrorIsNil) - err = s.store.RemoveController(name) + controllers, err := jujuclient.ReadControllersFile(jujuclient.JujuControllersPath()) c.Assert(err, jc.ErrorIsNil) + c.Assert(controllers.CurrentController, gc.Equals, s.controllerName) +} - for _, name := range []string{name, name + "-copy"} { - found, err := s.store.ControllerByName(name) - c.Assert(err, gc.ErrorMatches, fmt.Sprintf("controller %v not found", name)) - c.Assert(found, gc.IsNil) - } +func (s *ControllersSuite) TestSetCurrentControllerNoneExists(c *gc.C) { + err := s.store.SetCurrentController(s.controllerName) + c.Assert(err, jc.Satisfies, errors.IsNotFound) + c.Assert(err, gc.ErrorMatches, "controller test.controller not found") } func (s *ControllersSuite) assertWriteFails(c *gc.C, failureMessage string) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/controllervalidation_test.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/controllervalidation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/controllervalidation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/controllervalidation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,12 +18,12 @@ func (s *ControllerValidationSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.controller = jujuclient.ControllerDetails{ - []string{"test.server.hostname"}, - "test.uuid", - []string{"test.api.endpoint"}, - "test.ca.cert", - "aws", - "southeastasia", + UnresolvedAPIEndpoints: []string{"test.server.hostname"}, + ControllerUUID: "test.uuid", + APIEndpoints: []string{"test.api.endpoint"}, + CACert: "test.ca.cert", + Cloud: "aws", + CloudRegion: "southeastasia", } } @@ -38,11 +38,6 @@ s.assertValidateControllerDetailsFails(c, "missing uuid, controller details not valid") } -func (s *ControllerValidationSuite) TestValidateControllerDetailsNoCACert(c *gc.C) { - s.controller.CACert = "" - s.assertValidateControllerDetailsFails(c, "missing ca-cert, controller details not valid") -} - func (s *ControllerValidationSuite) assertValidateControllerDetailsFails(c *gc.C, failureMessage string) { err := jujuclient.ValidateControllerDetails(s.controller) c.Assert(err, gc.ErrorMatches, failureMessage) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/credentials_test.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/credentials_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/credentials_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/credentials_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,6 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/cloud" - "github.com/juju/juju/juju/osenv" "github.com/juju/juju/jujuclient" "github.com/juju/juju/testing" ) @@ -79,9 +78,6 @@ } func (s *CredentialsSuite) TestUpdateCredentialRemovesDefaultIfNecessary(c *gc.C) { - origHome := osenv.SetJujuXDGDataHome(c.MkDir()) - s.AddCleanup(func(*gc.C) { osenv.SetJujuXDGDataHome(origHome) }) - s.cloudName = firstTestCloudName(c) store := jujuclient.NewFileCredentialStore() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/file.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/file.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/file.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/file.go 2016-10-13 14:31:49.000000000 +0000 @@ -108,7 +108,46 @@ return nil, errors.NotFoundf("controller %s", name) } -// UpdateController implements ControllersUpdater. +// AddController implements ControllerUpdater. +func (s *store) AddController(name string, details ControllerDetails) error { + if err := ValidateControllerName(name); err != nil { + return errors.Trace(err) + } + if err := ValidateControllerDetails(details); err != nil { + return errors.Trace(err) + } + + releaser, err := s.acquireLock() + if err != nil { + return errors.Annotatef(err, "cannot add controller %v", name) + } + defer releaser.Release() + + all, err := ReadControllersFile(JujuControllersPath()) + if err != nil { + return errors.Annotate(err, "cannot get controllers") + } + + if len(all.Controllers) == 0 { + all.Controllers = make(map[string]ControllerDetails) + } + + if _, ok := all.Controllers[name]; ok { + return errors.AlreadyExistsf("controller with name %s", name) + } + + for k, v := range all.Controllers { + if v.ControllerUUID == details.ControllerUUID { + return errors.AlreadyExistsf("controller with UUID %s (%s)", + details.ControllerUUID, k) + } + } + + all.Controllers[name] = details + return WriteControllersFile(all) +} + +// UpdateController implements ControllerUpdater. func (s *store) UpdateController(name string, details ControllerDetails) error { if err := ValidateControllerName(name); err != nil { return errors.Trace(err) @@ -129,14 +168,25 @@ } if len(all.Controllers) == 0 { - all.Controllers = make(map[string]ControllerDetails) + return errors.NotFoundf("controllers") + } + + for k, v := range all.Controllers { + if v.ControllerUUID == details.ControllerUUID && k != name { + return errors.AlreadyExistsf("controller %s with UUID %s", + k, v.ControllerUUID) + } + } + + if _, ok := all.Controllers[name]; !ok { + return errors.NotFoundf("controller %s", name) } all.Controllers[name] = details return WriteControllersFile(all) } -// SetCurrentController implements ControllersUpdater. +// SetCurrentController implements ControllerUpdater. func (s *store) SetCurrentController(name string) error { if err := ValidateControllerName(name); err != nil { return errors.Trace(err) @@ -489,6 +539,11 @@ } if oldDetails, ok := accounts[controllerName]; ok && details == oldDetails { return nil + } else { + // Only update last known access if it has a value. + if details.LastKnownAccess == "" { + details.LastKnownAccess = oldDetails.LastKnownAccess + } } accounts[controllerName] = details diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/interface.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/interface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/interface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/interface.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,6 +34,32 @@ // CloudRegion is the name of the cloud region that this controller // runs in. This will be empty for clouds without regions. CloudRegion string `yaml:"region,omitempty"` + + // AgentVersion is the version of the agent running on this controller. + // While this isn't strictly needed to connect to a controller, it is used + // in formatting show-controller output where this struct is also used. + AgentVersion string `yaml:"agent-version,omitempty"` + + // ControllerMachineCount represents the number of controller machines + // It is cached here so under normal usage list-controllers + // does not need to hit the server. + ControllerMachineCount int `yaml:"controller-machine-count"` + + // ActiveControllerMachineCount represents the number of controller machines + // and which are active in the HA cluster. + // It is cached here so under normal usage list-controllers + // does not need to hit the server. + ActiveControllerMachineCount int `yaml:"active-controller-machine-count"` + + // ModelCount is the number of models to which a user has access. + // It is cached here so under normal usage list-controllers + // does not need to hit the server. + ModelCount *int `yaml:"model-count,omitempty"` + + // MachineCount is the number of machines in all models to + // which a user has access. It is cached here so under normal + // usage list-controllers does not need to hit the server. + MachineCount *int `yaml:"machine-count,omitempty"` } // ModelDetails holds details of a model. @@ -50,10 +76,8 @@ // Password is the password for the account. Password string `yaml:"password,omitempty"` - // Macaroon is a time-limited macaroon that may be - // used to log in. This string is the JSON-encoding - // of a gopkg.in/macaroon.v1.Macaroon. - Macaroon string `yaml:"macaroon,omitempty"` + // LastKnownAccess is the last known access level for the account. + LastKnownAccess string `yaml:"last-known-access,omitempty"` } // BootstrapConfig holds the configuration used to bootstrap a controller. @@ -66,12 +90,10 @@ ControllerConfig controller.Config `yaml:"controller-config"` // Config is the complete configuration for the provider. - // This should be updated with the region, endpoint and credentials. Config map[string]interface{} `yaml:"model-config"` - // TODO(wallyworld) - drop when we get to beta 15. - // This is for backwards compatibility with beta 13. - OldConfig map[string]interface{} `yaml:"base-model-config,omitempty"` + // ControllerModelUUID is the controller model UUID. + ControllerModelUUID string `yaml:"controller-model-uuid"` // Credential is the name of the credential used to bootstrap. // @@ -93,19 +115,32 @@ // use when communicating with the cloud. CloudEndpoint string `yaml:"endpoint,omitempty"` + // CloudIdentityEndpoint is the location of the API endpoint to use + // when communicating with the cloud's identity service. This will + // be empty for clouds that have no identity-specific API endpoint. + CloudIdentityEndpoint string `yaml:"identity-endpoint,omitempty"` + // CloudStorageEndpoint is the location of the API endpoint to use // when communicating with the cloud's storage service. This will - // be empty for clouds that have no cloud-specific API endpoint. + // be empty for clouds that have no storage-specific API endpoint. CloudStorageEndpoint string `yaml:"storage-endpoint,omitempty"` } // ControllerUpdater stores controller details. type ControllerUpdater interface { - // UpdateController adds the given controller to the controller + // AddController adds the given controller to the controller + // collection. + // + // Where UpdateController is concerned with the controller name, + // AddController uses the controller UUID and will not add a + // duplicate even if the name is different. + AddController(controllerName string, details ControllerDetails) error + + // UpdateController updates the given controller in the controller // collection. // - // If the controller does not already exist, it will be added. - // Otherwise, it will be overwritten with the new details. + // If a controller of controllerName exists it will be overwritten + // with the new details. UpdateController(controllerName string, details ControllerDetails) error // SetCurrentController sets the name of the current controller. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/jujuclienttesting/mem.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/jujuclienttesting/mem.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/jujuclienttesting/mem.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/jujuclienttesting/mem.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,7 +34,11 @@ // AllController implements ControllerGetter.AllController func (c *MemStore) AllControllers() (map[string]jujuclient.ControllerDetails, error) { - return c.Controllers, nil + result := make(map[string]jujuclient.ControllerDetails) + for name, details := range c.Controllers { + result[name] = details + } + return result, nil } // ControllerByName implements ControllerGetter.ControllerByName @@ -68,6 +72,29 @@ return nil } +// AddController implements ControllerUpdater.AddController +func (c *MemStore) AddController(name string, one jujuclient.ControllerDetails) error { + if err := jujuclient.ValidateControllerName(name); err != nil { + return err + } + if err := jujuclient.ValidateControllerDetails(one); err != nil { + return err + } + + if _, ok := c.Controllers[name]; ok { + return errors.AlreadyExistsf("controller with name %s", name) + } + + for k, v := range c.Controllers { + if v.ControllerUUID == one.ControllerUUID { + return errors.AlreadyExistsf("controller with UUID %s (%s)", + one.ControllerUUID, k) + } + } + c.Controllers[name] = one + return nil +} + // UpdateController implements ControllerUpdater.UpdateController func (c *MemStore) UpdateController(name string, one jujuclient.ControllerDetails) error { if err := jujuclient.ValidateControllerName(name); err != nil { @@ -76,6 +103,22 @@ if err := jujuclient.ValidateControllerDetails(one); err != nil { return err } + + if len(c.Controllers) == 0 { + return errors.NotFoundf("controllers") + } + + for k, v := range c.Controllers { + if v.ControllerUUID == one.ControllerUUID && k != name { + return errors.AlreadyExistsf("controller %s with UUID %s", + k, v.ControllerUUID) + } + } + + if _, ok := c.Controllers[name]; !ok { + return errors.NotFoundf("controller %s", name) + } + c.Controllers[name] = one return nil } @@ -222,6 +265,11 @@ if err := jujuclient.ValidateAccountDetails(details); err != nil { return err } + oldDetails := c.Accounts[controllerName] + // Only update last known access if it has a value. + if details.LastKnownAccess == "" { + details.LastKnownAccess = oldDetails.LastKnownAccess + } c.Accounts[controllerName] = details return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/jujuclienttesting/stub.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/jujuclienttesting/stub.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/jujuclienttesting/stub.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/jujuclienttesting/stub.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,6 +15,7 @@ AllControllersFunc func() (map[string]jujuclient.ControllerDetails, error) ControllerByNameFunc func(name string) (*jujuclient.ControllerDetails, error) + AddControllerFunc func(name string, one jujuclient.ControllerDetails) error UpdateControllerFunc func(name string, one jujuclient.ControllerDetails) error RemoveControllerFunc func(name string) error SetCurrentControllerFunc func(name string) error @@ -49,6 +50,9 @@ result.ControllerByNameFunc = func(name string) (*jujuclient.ControllerDetails, error) { return nil, result.Stub.NextErr() } + result.AddControllerFunc = func(name string, one jujuclient.ControllerDetails) error { + return result.Stub.NextErr() + } result.UpdateControllerFunc = func(name string, one jujuclient.ControllerDetails) error { return result.Stub.NextErr() } @@ -117,6 +121,7 @@ stub := NewStubStore() stub.AllControllersFunc = underlying.AllControllers stub.ControllerByNameFunc = underlying.ControllerByName + stub.AddControllerFunc = underlying.AddController stub.UpdateControllerFunc = underlying.UpdateController stub.RemoveControllerFunc = underlying.RemoveController stub.SetCurrentControllerFunc = underlying.SetCurrentController @@ -147,7 +152,13 @@ return c.ControllerByNameFunc(name) } -// UpdateController implements ControllersUpdater.UpdateController +// AddController implements ControllerUpdater.AddController +func (c *StubStore) AddController(name string, one jujuclient.ControllerDetails) error { + c.MethodCall(c, "AddController", name, one) + return c.AddControllerFunc(name, one) +} + +// UpdateController implements ControllerUpdater.UpdateController func (c *StubStore) UpdateController(name string, one jujuclient.ControllerDetails) error { c.MethodCall(c, "UpdateController", name, one) return c.UpdateControllerFunc(name, one) @@ -159,7 +170,7 @@ return c.RemoveControllerFunc(name) } -// SetCurrentController implements ControllersUpdater.SetCurrentController. +// SetCurrentController implements ControllerUpdater.SetCurrentController. func (c *StubStore) SetCurrentController(name string) error { c.MethodCall(c, "SetCurrentController", name) return c.SetCurrentControllerFunc(name) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/modelsfile_test.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/modelsfile_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/modelsfile_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/modelsfile_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,6 +24,21 @@ controllers: ctrl: models: + admin/admin: + uuid: ghi + kontroll: + models: + admin/admin: + uuid: abc + admin/my-model: + uuid: def + current-model: admin/my-model +` + +const testLegacyModelsYAML = ` +controllers: + ctrl: + models: admin@local/admin: uuid: ghi kontroll: @@ -38,14 +53,14 @@ var testControllerModels = map[string]*jujuclient.ControllerModels{ "kontroll": { Models: map[string]jujuclient.ModelDetails{ - "admin@local/admin": kontrollAdminModelDetails, - "admin@local/my-model": kontrollMyModelModelDetails, + "admin/admin": kontrollAdminModelDetails, + "admin/my-model": kontrollMyModelModelDetails, }, - CurrentModel: "admin@local/my-model", + CurrentModel: "admin/my-model", }, "ctrl": { Models: map[string]jujuclient.ModelDetails{ - "admin@local/admin": ctrlAdminModelDetails, + "admin/admin": ctrlAdminModelDetails, }, }, } @@ -75,6 +90,21 @@ c.Assert(models, gc.HasLen, 0) } +func (s *ModelsFileSuite) TestMigrateLegacyLocal(c *gc.C) { + err := ioutil.WriteFile(jujuclient.JujuModelsPath(), []byte(testLegacyModelsYAML), 0644) + c.Assert(err, jc.ErrorIsNil) + + models, err := jujuclient.ReadModelsFile(jujuclient.JujuModelsPath()) + c.Assert(err, jc.ErrorIsNil) + + migratedData, err := ioutil.ReadFile(jujuclient.JujuModelsPath()) + c.Assert(err, jc.ErrorIsNil) + migratedModels, err := jujuclient.ParseModels(migratedData) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(migratedData), jc.DeepEquals, testModelsYAML[1:]) + c.Assert(migratedModels, jc.DeepEquals, models) +} + func writeTestModelsFile(c *gc.C) { err := jujuclient.WriteModelsFile(testControllerModels) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/models.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/models.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/models.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/models.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,16 +34,63 @@ } return nil, err } - if err := migrateLegacyModels(data); err != nil { - return nil, err - } models, err := ParseModels(data) if err != nil { return nil, err } + if err := migrateLocalModelUsers(models); err != nil { + return nil, err + } return models, nil } +// migrateLocalModelUsers strips any @local domains from any qualified model names. +func migrateLocalModelUsers(usermodels map[string]*ControllerModels) error { + changes := false + for _, modelDetails := range usermodels { + for name, model := range modelDetails.Models { + migratedName, changed, err := migrateModelName(name) + if err != nil { + return errors.Trace(err) + } + if !changed { + continue + } + delete(modelDetails.Models, name) + modelDetails.Models[migratedName] = model + changes = true + } + migratedName, changed, err := migrateModelName(modelDetails.CurrentModel) + if err != nil { + return errors.Trace(err) + } + if !changed { + continue + } + modelDetails.CurrentModel = migratedName + } + if changes { + return WriteModelsFile(usermodels) + } + return nil +} + +func migrateModelName(legacyName string) (string, bool, error) { + i := strings.IndexRune(legacyName, '/') + if i < 0 { + return legacyName, false, nil + } + owner := legacyName[:i] + if !names.IsValidUser(owner) { + return "", false, errors.NotValidf("user name %q", owner) + } + if !strings.HasSuffix(owner, "@local") { + return legacyName, false, nil + } + rawModelName := legacyName[i+1:] + return JoinOwnerModelName(names.NewUserTag(owner), rawModelName), true, nil +} + // WriteModelsFile marshals to YAML details of the given models // and writes it to the models file. func WriteModelsFile(models map[string]*ControllerModels) error { @@ -79,56 +126,9 @@ CurrentModel string `yaml:"current-model,omitempty"` } -// TODO(axw) 2016-07-14 #1603841 -// Drop this code once we get to 2.0. -func migrateLegacyModels(data []byte) error { - accounts, err := ReadAccountsFile(JujuAccountsPath()) - if err != nil { - return err - } - - type legacyAccountModels struct { - Models map[string]ModelDetails `yaml:"models"` - CurrentModel string `yaml:"current-model,omitempty"` - } - type legacyControllerAccountModels struct { - AccountModels map[string]*legacyAccountModels `yaml:"accounts"` - } - type legacyModelsCollection struct { - ControllerAccountModels map[string]legacyControllerAccountModels `yaml:"controllers"` - } - - var legacy legacyModelsCollection - if err := yaml.Unmarshal(data, &legacy); err != nil { - return errors.Annotate(err, "cannot unmarshal models") - } - result := make(map[string]*ControllerModels) - for controller, controllerAccountModels := range legacy.ControllerAccountModels { - accountDetails, ok := accounts[controller] - if !ok { - continue - } - accountModels, ok := controllerAccountModels.AccountModels[accountDetails.User] - if !ok { - continue - } - result[controller] = &ControllerModels{ - accountModels.Models, - accountModels.CurrentModel, - } - } - if len(result) > 0 { - // Only write if we found at least one, - // which means the file was in legacy - // format. Otherwise leave it alone. - return WriteModelsFile(result) - } - return nil -} - // JoinOwnerModelName returns a model name qualified with the model owner. func JoinOwnerModelName(owner names.UserTag, modelName string) string { - return fmt.Sprintf("%s/%s", owner.Canonical(), modelName) + return fmt.Sprintf("%s/%s", owner.Id(), modelName) } // IsQualifiedModelName returns true if the provided model name is qualified diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/models_test.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/models_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/models_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/models_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -31,28 +31,28 @@ func (s *ModelsSuite) TestModelByNameNoFile(c *gc.C) { err := os.Remove(jujuclient.JujuModelsPath()) c.Assert(err, jc.ErrorIsNil) - details, err := s.store.ModelByName("not-found", "admin@local/admin") + details, err := s.store.ModelByName("not-found", "admin/admin") c.Assert(err, gc.ErrorMatches, "models for controller not-found not found") c.Assert(details, gc.IsNil) } func (s *ModelsSuite) TestModelByNameControllerNotFound(c *gc.C) { - details, err := s.store.ModelByName("not-found", "admin@local/admin") + details, err := s.store.ModelByName("not-found", "admin/admin") c.Assert(err, gc.ErrorMatches, "models for controller not-found not found") c.Assert(details, gc.IsNil) } func (s *ModelsSuite) TestModelByNameModelNotFound(c *gc.C) { - details, err := s.store.ModelByName("kontroll", "admin@local/not-found") - c.Assert(err, gc.ErrorMatches, "model kontroll:admin@local/not-found not found") + details, err := s.store.ModelByName("kontroll", "admin/not-found") + c.Assert(err, gc.ErrorMatches, "model kontroll:admin/not-found not found") c.Assert(details, gc.IsNil) } func (s *ModelsSuite) TestModelByName(c *gc.C) { - details, err := s.store.ModelByName("kontroll", "admin@local/admin") + details, err := s.store.ModelByName("kontroll", "admin/admin") c.Assert(err, jc.ErrorIsNil) c.Assert(details, gc.NotNil) - c.Assert(*details, jc.DeepEquals, testControllerModels["kontroll"].Models["admin@local/admin"]) + c.Assert(*details, jc.DeepEquals, testControllerModels["kontroll"].Models["admin/admin"]) } func (s *ModelsSuite) TestAllModelsNoFile(c *gc.C) { @@ -72,7 +72,7 @@ func (s *ModelsSuite) TestCurrentModel(c *gc.C) { current, err := s.store.CurrentModel("kontroll") c.Assert(err, jc.ErrorIsNil) - c.Assert(current, gc.Equals, "admin@local/my-model") + c.Assert(current, gc.Equals, "admin/my-model") } func (s *ModelsSuite) TestCurrentModelNotSet(c *gc.C) { @@ -86,44 +86,44 @@ } func (s *ModelsSuite) TestSetCurrentModelControllerNotFound(c *gc.C) { - err := s.store.SetCurrentModel("not-found", "admin@local/admin") + err := s.store.SetCurrentModel("not-found", "admin/admin") c.Assert(err, jc.Satisfies, errors.IsNotFound) } func (s *ModelsSuite) TestSetCurrentModelModelNotFound(c *gc.C) { - err := s.store.SetCurrentModel("kontroll", "admin@local/not-found") + err := s.store.SetCurrentModel("kontroll", "admin/not-found") c.Assert(err, jc.Satisfies, errors.IsNotFound) } func (s *ModelsSuite) TestSetCurrentModel(c *gc.C) { - err := s.store.SetCurrentModel("kontroll", "admin@local/admin") + err := s.store.SetCurrentModel("kontroll", "admin/admin") c.Assert(err, jc.ErrorIsNil) all, err := jujuclient.ReadModelsFile(jujuclient.JujuModelsPath()) c.Assert(err, jc.ErrorIsNil) - c.Assert(all["kontroll"].CurrentModel, gc.Equals, "admin@local/admin") + c.Assert(all["kontroll"].CurrentModel, gc.Equals, "admin/admin") } func (s *ModelsSuite) TestUpdateModelNewController(c *gc.C) { testModelDetails := jujuclient.ModelDetails{"test.uuid"} - err := s.store.UpdateModel("new-controller", "admin@local/new-model", testModelDetails) + err := s.store.UpdateModel("new-controller", "admin/new-model", testModelDetails) c.Assert(err, jc.ErrorIsNil) models, err := s.store.AllModels("new-controller") c.Assert(err, jc.ErrorIsNil) c.Assert(models, jc.DeepEquals, map[string]jujuclient.ModelDetails{ - "admin@local/new-model": testModelDetails, + "admin/new-model": testModelDetails, }) } func (s *ModelsSuite) TestUpdateModelExistingControllerAndModelNewModel(c *gc.C) { testModelDetails := jujuclient.ModelDetails{"test.uuid"} - err := s.store.UpdateModel("kontroll", "admin@local/new-model", testModelDetails) + err := s.store.UpdateModel("kontroll", "admin/new-model", testModelDetails) c.Assert(err, jc.ErrorIsNil) models, err := s.store.AllModels("kontroll") c.Assert(err, jc.ErrorIsNil) c.Assert(models, jc.DeepEquals, map[string]jujuclient.ModelDetails{ - "admin@local/admin": kontrollAdminModelDetails, - "admin@local/my-model": kontrollMyModelModelDetails, - "admin@local/new-model": testModelDetails, + "admin/admin": kontrollAdminModelDetails, + "admin/my-model": kontrollMyModelModelDetails, + "admin/new-model": testModelDetails, }) } @@ -132,9 +132,9 @@ for i := 0; i < 2; i++ { // Twice so we exercise the code path of updating with // identical details. - err := s.store.UpdateModel("kontroll", "admin@local/admin", testModelDetails) + err := s.store.UpdateModel("kontroll", "admin/admin", testModelDetails) c.Assert(err, jc.ErrorIsNil) - details, err := s.store.ModelByName("kontroll", "admin@local/admin") + details, err := s.store.ModelByName("kontroll", "admin/admin") c.Assert(err, jc.ErrorIsNil) c.Assert(*details, jc.DeepEquals, testModelDetails) } @@ -152,42 +152,42 @@ c.Assert(err, jc.ErrorIsNil) testModelDetails := jujuclient.ModelDetails{"test.uuid"} - err = s.store.UpdateModel("ctrl", "admin@local/admin", testModelDetails) + err = s.store.UpdateModel("ctrl", "admin/admin", testModelDetails) c.Assert(err, jc.ErrorIsNil) models, err := s.store.AllModels("ctrl") c.Assert(err, jc.ErrorIsNil) c.Assert(models, jc.DeepEquals, map[string]jujuclient.ModelDetails{ - "admin@local/admin": testModelDetails, + "admin/admin": testModelDetails, }) } func (s *ModelsSuite) TestRemoveModelNoFile(c *gc.C) { err := os.Remove(jujuclient.JujuModelsPath()) c.Assert(err, jc.ErrorIsNil) - err = s.store.RemoveModel("not-found", "admin@local/admin") + err = s.store.RemoveModel("not-found", "admin/admin") c.Assert(err, jc.Satisfies, errors.IsNotFound) } func (s *ModelsSuite) TestRemoveModelControllerNotFound(c *gc.C) { - err := s.store.RemoveModel("not-found", "admin@local/admin") + err := s.store.RemoveModel("not-found", "admin/admin") c.Assert(err, jc.Satisfies, errors.IsNotFound) } func (s *ModelsSuite) TestRemoveModelNotFound(c *gc.C) { - err := s.store.RemoveModel("kontroll", "admin@local/not-found") + err := s.store.RemoveModel("kontroll", "admin/not-found") c.Assert(err, jc.Satisfies, errors.IsNotFound) } func (s *ModelsSuite) TestRemoveModel(c *gc.C) { - err := s.store.RemoveModel("kontroll", "admin@local/admin") + err := s.store.RemoveModel("kontroll", "admin/admin") c.Assert(err, jc.ErrorIsNil) - _, err = s.store.ModelByName("kontroll", "admin@local/admin") + _, err = s.store.ModelByName("kontroll", "admin/admin") c.Assert(err, jc.Satisfies, errors.IsNotFound) } func (s *ModelsSuite) TestRemoveControllerRemovesModels(c *gc.C) { store := jujuclient.NewFileClientStore() - err := store.UpdateController("kontroll", jujuclient.ControllerDetails{ + err := store.AddController("kontroll", jujuclient.ControllerDetails{ ControllerUUID: "abc", CACert: "woop", }) @@ -197,6 +197,6 @@ models, err := jujuclient.ReadModelsFile(jujuclient.JujuModelsPath()) c.Assert(err, jc.ErrorIsNil) - _, ok := models["admin@local/kontroll"] + _, ok := models["admin/kontroll"] c.Assert(ok, jc.IsFalse) // kontroll models are removed } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/modelvalidation_test.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/modelvalidation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/modelvalidation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/modelvalidation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -27,7 +27,6 @@ func (s *ModelValidationSuite) TestValidateModelName(c *gc.C) { c.Assert(jujuclient.ValidateModelName("foo@bar/baz"), jc.ErrorIsNil) - c.Assert(jujuclient.ValidateModelName("foo/bar"), gc.ErrorMatches, `validating model name \"foo/bar\": validating model owner name: unqualified user name "foo" not valid`) c.Assert(jujuclient.ValidateModelName("foo"), gc.ErrorMatches, `validating model name "foo": unqualified model name "foo" not valid`) c.Assert(jujuclient.ValidateModelName(""), gc.ErrorMatches, `validating model name "": unqualified model name "" not valid`) c.Assert(jujuclient.ValidateModelName("!"), gc.ErrorMatches, `validating model name "!": unqualified model name "!" not valid`) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/validation.go juju-core-2.0.0/src/github.com/juju/juju/jujuclient/validation.go --- juju-core-2.0~beta15/src/github.com/juju/juju/jujuclient/validation.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/jujuclient/validation.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,9 +13,6 @@ if details.ControllerUUID == "" { return errors.NotValidf("missing uuid, controller details") } - if details.CACert == "" { - return errors.NotValidf("missing ca-cert, controller details") - } return nil } @@ -34,10 +31,6 @@ } // It is valid for a password to be blank, because the client // may use macaroons instead. - // - // TODO(axw) expand validation rules to check that at least - // one of Password or Macaroon is non-empty, for local users. - // External users may have neither. return nil } @@ -52,26 +45,16 @@ // ValidateModelName validates the given model name. func ValidateModelName(name string) error { - modelName, owner, err := SplitModelName(name) + modelName, _, err := SplitModelName(name) if err != nil { return errors.Annotatef(err, "validating model name %q", name) } - if err := validateUserTag(owner); err != nil { - err = errors.Annotate(err, "validating model owner name") - return errors.Annotatef(err, "validating model name %q", name) - } if !names.IsValidModelName(modelName) { return errors.NotValidf("model name %q", name) } return nil } -// ValidateAccountName validates the given account name. -func ValidateAccountName(name string) error { - // An account name is a domain-qualified user, e.g. bob@local. - return validateUser(name) -} - // ValidateBootstrapConfig validates the given boostrap config. func ValidateBootstrapConfig(cfg BootstrapConfig) error { if cfg.Cloud == "" { @@ -90,13 +73,5 @@ if !names.IsValidUser(name) { return errors.NotValidf("user name %q", name) } - tag := names.NewUserTag(name) - return validateUserTag(tag) -} - -func validateUserTag(tag names.UserTag) error { - if tag.Id() != tag.Canonical() { - return errors.NotValidf("unqualified user name %q", tag.Id()) - } return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/logfwd/record_test.go juju-core-2.0.0/src/github.com/juju/juju/logfwd/record_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/logfwd/record_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/logfwd/record_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -131,7 +131,7 @@ func (s *LocationSuite) TestParseLocationBogusLine(c *gc.C) { _, err := logfwd.ParseLocation(validLocation.Module, "spam.go:xxx") - c.Check(err, gc.ErrorMatches, `failed to parse sourceLine: line number must be non-negative integer: strconv.ParseInt: parsing "xxx": invalid syntax`) + c.Check(err, gc.ErrorMatches, `failed to parse sourceLine: line number must be non-negative integer: strconv.(ParseInt|Atoi): parsing "xxx": invalid syntax`) } func (s *LocationSuite) TestValidateValid(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/logfwd/syslog/config_test.go juju-core-2.0.0/src/github.com/juju/juju/logfwd/syslog/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/logfwd/syslog/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/logfwd/syslog/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "github.com/juju/juju/cert" "github.com/juju/juju/logfwd/syslog" coretesting "github.com/juju/juju/testing" ) @@ -54,9 +55,9 @@ cfg := syslog.RawConfig{ Enabled: true, Host: "", - CACert: validCACert, - ClientCert: validCert, - ClientKey: validKey, + CACert: coretesting.CACert, + ClientCert: coretesting.ServerCert, + ClientKey: coretesting.ServerKey, } err := cfg.Validate() @@ -67,9 +68,9 @@ func (s *ConfigSuite) TestRawValidateMissingHostNotEnabled(c *gc.C) { cfg := syslog.RawConfig{ Host: "", - CACert: validCACert, - ClientCert: validCert, - ClientKey: validKey, + CACert: coretesting.CACert, + ClientCert: coretesting.ServerCert, + ClientKey: coretesting.ServerKey, } err := cfg.Validate() @@ -80,9 +81,9 @@ cfg := syslog.RawConfig{ Enabled: true, Host: ":9876", - CACert: validCACert, - ClientCert: validCert, - ClientKey: validKey, + CACert: coretesting.CACert, + ClientCert: coretesting.ServerCert, + ClientKey: coretesting.ServerKey, } err := cfg.Validate() @@ -94,8 +95,8 @@ cfg := syslog.RawConfig{ Host: "a.b.c:9876", CACert: "", - ClientCert: validCert, - ClientKey: validKey, + ClientCert: coretesting.ServerCert, + ClientKey: coretesting.ServerKey, } err := cfg.Validate() @@ -106,9 +107,9 @@ func (s *ConfigSuite) TestRawValidateBadCACert(c *gc.C) { cfg := syslog.RawConfig{ Host: "a.b.c:9876", - CACert: invalidCACert, - ClientCert: validCert, - ClientKey: validKey, + CACert: invalidCert, + ClientCert: coretesting.ServerCert, + ClientKey: coretesting.ServerKey, } err := cfg.Validate() @@ -120,8 +121,8 @@ cfg := syslog.RawConfig{ Host: "a.b.c:9876", CACert: "abc", - ClientCert: validCert, - ClientKey: validKey, + ClientCert: coretesting.ServerCert, + ClientKey: coretesting.ServerKey, } err := cfg.Validate() @@ -132,9 +133,9 @@ func (s *ConfigSuite) TestRawValidateMissingCert(c *gc.C) { cfg := syslog.RawConfig{ Host: "a.b.c:9876", - CACert: validCACert, + CACert: coretesting.CACert, ClientCert: "", - ClientKey: validKey, + ClientKey: coretesting.ServerKey, } err := cfg.Validate() @@ -145,9 +146,9 @@ func (s *ConfigSuite) TestRawValidateBadCert(c *gc.C) { cfg := syslog.RawConfig{ Host: "a.b.c:9876", - CACert: validCACert, + CACert: coretesting.CACert, ClientCert: invalidCert, - ClientKey: validKey, + ClientKey: coretesting.ServerKey, } err := cfg.Validate() @@ -158,9 +159,9 @@ func (s *ConfigSuite) TestRawValidateBadCertFormat(c *gc.C) { cfg := syslog.RawConfig{ Host: "a.b.c:9876", - CACert: validCACert, + CACert: coretesting.CACert, ClientCert: "abc", - ClientKey: validKey, + ClientKey: coretesting.ServerKey, } err := cfg.Validate() @@ -171,8 +172,8 @@ func (s *ConfigSuite) TestRawValidateMissingKey(c *gc.C) { cfg := syslog.RawConfig{ Host: "a.b.c:9876", - CACert: validCACert, - ClientCert: validCert, + CACert: coretesting.CACert, + ClientCert: coretesting.ServerCert, ClientKey: "", } @@ -184,8 +185,8 @@ func (s *ConfigSuite) TestRawValidateBadKey(c *gc.C) { cfg := syslog.RawConfig{ Host: "a.b.c:9876", - CACert: validCACert, - ClientCert: validCert, + CACert: coretesting.CACert, + ClientCert: coretesting.ServerCert, ClientKey: invalidKey, } @@ -197,8 +198,8 @@ func (s *ConfigSuite) TestRawValidateBadKeyFormat(c *gc.C) { cfg := syslog.RawConfig{ Host: "a.b.c:9876", - CACert: validCACert, - ClientCert: validCert, + CACert: coretesting.CACert, + ClientCert: coretesting.ServerCert, ClientKey: "abc", } @@ -208,76 +209,19 @@ } func (s *ConfigSuite) TestRawValidateCertKeyMismatch(c *gc.C) { + _, key, err := cert.NewDefaultServer(coretesting.CACert, coretesting.CAKey, nil) + c.Assert(err, jc.ErrorIsNil) cfg := syslog.RawConfig{ Host: "a.b.c:9876", - CACert: validCACert, - ClientCert: validCert, - ClientKey: validKey2, + CACert: coretesting.CACert, + ClientCert: coretesting.ServerCert, + ClientKey: key, } - err := cfg.Validate() - + err = cfg.Validate() c.Check(err, gc.ErrorMatches, `validating TLS config: parsing client key pair: (crypto/)?tls: private key does not match public key`) } -var validCACert = ` ------BEGIN CERTIFICATE----- -MIIBjTCCATmgAwIBAgIBADALBgkqhkiG9w0BAQUwHjENMAsGA1UEChMEanVqdTEN -MAsGA1UEAxMEcm9vdDAeFw0xMjExMDkxNjQxMjlaFw0yMjExMDkxNjQ2MjlaMB4x -DTALBgNVBAoTBGp1anUxDTALBgNVBAMTBHJvb3QwWjALBgkqhkiG9w0BAQEDSwAw -SAJBAIW7CbHFJivvV9V6mO8AGzJS9lqjUf6MdEPsdF6wx2Cpzr/lSFIggCwRA138 -9MuFxflxb/3U8Nq+rd8rVtTgFMECAwEAAaNmMGQwDgYDVR0PAQH/BAQDAgCkMBIG -A1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFJafrxqByMN9BwGfcmuF0Lw/1QII -MB8GA1UdIwQYMBaAFJafrxqByMN9BwGfcmuF0Lw/1QIIMAsGCSqGSIb3DQEBBQNB -AHq3vqNhxya3s33DlQfSj9whsnqM0Nm+u8mBX/T76TF5rV7+B33XmYzSyfA3yBi/ -zHaUR/dbHuiNTO+KXs3/+Y4= ------END CERTIFICATE----- -`[1:] - -var validCert = ` ------BEGIN CERTIFICATE----- -MIIBjDCCATigAwIBAgIBADALBgkqhkiG9w0BAQUwHjENMAsGA1UEChMEanVqdTEN -MAsGA1UEAxMEcm9vdDAeFw0xMjExMDkxNjQwMjhaFw0yMjExMDkxNjQ1MjhaMB4x -DTALBgNVBAoTBGp1anUxDTALBgNVBAMTBHJvb3QwWTALBgkqhkiG9w0BAQEDSgAw -RwJAduA1Gnb2VJLxNGfG4St0Qy48Y3q5Z5HheGtTGmti/FjlvQvScCFGCnJG7fKA -Knd7ia3vWg7lxYkIvMPVP88LAQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAKQwEgYD -VR0TAQH/BAgwBgEB/wIBATAdBgNVHQ4EFgQUlvKX8vwp0o+VdhdhoA9O6KlOm00w -HwYDVR0jBBgwFoAUlvKX8vwp0o+VdhdhoA9O6KlOm00wCwYJKoZIhvcNAQEFA0EA -LlNpevtFr8gngjAFFAO/FXc7KiZcCrA5rBfb/rEy297lIqmKt5++aVbLEPyxCIFC -r71Sj63TUTFWtRZAxvn9qQ== ------END CERTIFICATE----- -`[1:] - -var validKey = ` ------BEGIN RSA PRIVATE KEY----- -MIIBOQIBAAJAduA1Gnb2VJLxNGfG4St0Qy48Y3q5Z5HheGtTGmti/FjlvQvScCFG -CnJG7fKAKnd7ia3vWg7lxYkIvMPVP88LAQIDAQABAkEAsFOdMSYn+AcF1M/iBfjo -uQWJ+Zz+CgwuvumjGNsUtmwxjA+hh0fCn0Ah2nAt4Ma81vKOKOdQ8W6bapvsVDH0 -6QIhAJOkLmEKm4H5POQV7qunRbRsLbft/n/SHlOBz165WFvPAiEAzh9fMf70std1 -sVCHJRQWKK+vw3oaEvPKvkPiV5ui0C8CIGNsvybuo8ald5IKCw5huRlFeIxSo36k -m3OVCXc6zfwVAiBnTUe7WcivPNZqOC6TAZ8dYvdWo4Ifz3jjpEfymjid1wIgBIJv -ERPyv2NQqIFQZIyzUP7LVRIWfpFFOo9/Ww/7s5Y= ------END RSA PRIVATE KEY----- -`[1:] - -var validKey2 = ` ------BEGIN RSA PRIVATE KEY----- -MIIBOQIBAAJBAJkSWRrr81y8pY4dbNgt+8miSKg4z6glp2KO2NnxxAhyyNtQHKvC -+fJALJj+C2NhuvOv9xImxOl3Hg8fFPCXCtcCAwEAAQJATQNzO11NQvJS5U6eraFt -FgSFQ8XZjILtVWQDbJv8AjdbEgKMHEy33icsAKIUAx8jL9kjq6K9kTdAKXZi9grF -UQIhAPD7jccIDUVm785E5eR9eisq0+xpgUIa24Jkn8cAlst5AiEAopxVFl1auer3 -GP2In3pjdL4ydzU/gcRcYisoJqwHpM8CIHtqmaXBPeq5WT9ukb5/dL3+5SJCtmxA -jQMuvZWRe6khAiBvMztYtPSDKXRbCZ4xeQ+kWSDHtok8Y5zNoTeu4nvDrwIgb3Al -fikzPveC5g6S6OvEQmyDz59tYBubm2XHgvxqww0= ------END RSA PRIVATE KEY----- -`[1:] - -var invalidCACert = ` ------BEGIN CERTIFICATE----- -MIIBOgIBAAJAZabKgKInuOxj5vDWLwHHQtK3/45KB+32D15w94Nt83BmuGxo90lw ------END CERTIFICATE----- -`[1:] - var invalidCert = ` -----BEGIN CERTIFICATE----- MIIBOgIBAAJAZabKgKInuOxj5vDWLwHHQtK3/45KB+32D15w94Nt83BmuGxo90lw diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/Makefile juju-core-2.0.0/src/github.com/juju/juju/Makefile --- juju-core-2.0~beta15/src/github.com/juju/juju/Makefile 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/Makefile 2016-10-13 14:31:49.000000000 +0000 @@ -36,7 +36,7 @@ ifeq ($(JUJU_MAKE_GODEPS),true) $(GOPATH)/bin/godeps: - go get launchpad.net/godeps + go get github.com/rogpeppe/godeps godeps: $(GOPATH)/bin/godeps $(GOPATH)/bin/godeps -u dependencies.tsv @@ -94,7 +94,7 @@ @echo Installing dependencies @sudo apt-get --yes install --no-install-recommends \ $(strip $(DEPENDENCIES)) \ - $(shell apt-cache madison juju-mongodb mongodb-server | head -1 | cut -d '|' -f1) + $(shell apt-cache madison juju-mongodb3.2 juju-mongodb mongodb-server | head -1 | cut -d '|' -f1) # Install bash_completion install-etc: diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/migration/dialopts.go juju-core-2.0.0/src/github.com/juju/juju/migration/dialopts.go --- juju-core-2.0~beta15/src/github.com/juju/juju/migration/dialopts.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/migration/dialopts.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,24 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package migration + +import ( + "time" + + "github.com/juju/juju/api" +) + +// ControllerDialOpts returns dial parameters suitable for connecting +// from the source controller to the target controller during model +// migrations. The total attempt time can't be too long because the +// areas of the code which make these connections need to be +// interruptable but a number of retries is useful to deal with short +// lived issues. +func ControllerDialOpts() api.DialOpts { + return api.DialOpts{ + DialAddressInterval: 50 * time.Millisecond, + Timeout: 1 * time.Second, + RetryDelay: 100 * time.Millisecond, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/migration/migration.go juju-core-2.0.0/src/github.com/juju/juju/migration/migration.go --- juju-core-2.0~beta15/src/github.com/juju/juju/migration/migration.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/migration/migration.go 2016-10-13 14:31:49.000000000 +0000 @@ -166,10 +166,10 @@ } func uploadCharms(config UploadBinariesConfig) error { - for _, charmUrl := range config.Charms { - logger.Debugf("sending charm %s to target", charmUrl) + for _, charmURL := range config.Charms { + logger.Debugf("sending charm %s to target", charmURL) - curl, err := charm.ParseURL(charmUrl) + curl, err := charm.ParseURL(charmURL) if err != nil { return errors.Annotate(err, "bad charm URL") } @@ -215,22 +215,3 @@ } return nil } - -// PrecheckBackend is implemented by *state.State but defined as an interface -// for easier testing. -type PrecheckBackend interface { - NeedsCleanup() (bool, error) -} - -// Precheck checks the database state to make sure that the preconditions -// for model migration are met. -func Precheck(backend PrecheckBackend) error { - cleanupNeeded, err := backend.NeedsCleanup() - if err != nil { - return errors.Annotate(err, "precheck cleanups") - } - if cleanupNeeded { - return errors.New("precheck failed: cleanup needed") - } - return nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/migration/migration_test.go juju-core-2.0.0/src/github.com/juju/juju/migration/migration_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/migration/migration_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/migration/migration_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,13 +17,10 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" - "github.com/juju/juju/controller" "github.com/juju/juju/core/description" - "github.com/juju/juju/environs/config" "github.com/juju/juju/migration" "github.com/juju/juju/provider/dummy" _ "github.com/juju/juju/provider/dummy" - "github.com/juju/juju/state" statetesting "github.com/juju/juju/state/testing" "github.com/juju/juju/testing" "github.com/juju/juju/tools" @@ -196,69 +193,3 @@ _, err = description.Deserialize(bytes) c.Assert(err, jc.ErrorIsNil) } - -type PrecheckSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&PrecheckSuite{}) - -// Assert that *state.State implements the PrecheckBackend -var _ migration.PrecheckBackend = (*state.State)(nil) - -func (*PrecheckSuite) TestPrecheckCleanups(c *gc.C) { - backend := &fakePrecheckBackend{} - err := migration.Precheck(backend) - c.Assert(err, jc.ErrorIsNil) -} - -func (*PrecheckSuite) TestPrecheckCleanupsError(c *gc.C) { - backend := &fakePrecheckBackend{ - cleanupError: errors.New("boom"), - } - err := migration.Precheck(backend) - c.Assert(err, gc.ErrorMatches, "precheck cleanups: boom") -} - -func (*PrecheckSuite) TestPrecheckCleanupsNeeded(c *gc.C) { - backend := &fakePrecheckBackend{ - cleanupNeeded: true, - } - err := migration.Precheck(backend) - c.Assert(err, gc.ErrorMatches, "precheck failed: cleanup needed") -} - -type fakePrecheckBackend struct { - cleanupNeeded bool - cleanupError error -} - -func (f *fakePrecheckBackend) NeedsCleanup() (bool, error) { - return f.cleanupNeeded, f.cleanupError -} - -type InternalSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&InternalSuite{}) - -type stateGetter struct { - cfg *config.Config -} - -func (e *stateGetter) Model() (*state.Model, error) { - return &state.Model{}, nil -} - -func (s *stateGetter) ModelConfig() (*config.Config, error) { - return s.cfg, nil -} - -func (s *stateGetter) ControllerConfig() (controller.Config, error) { - return map[string]interface{}{ - controller.ControllerUUIDKey: testing.ModelTag.Id(), - controller.CACertKey: testing.CACert, - controller.ApiPort: 4321, - }, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/migration/precheck.go juju-core-2.0.0/src/github.com/juju/juju/migration/precheck.go --- juju-core-2.0~beta15/src/github.com/juju/juju/migration/precheck.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/migration/precheck.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,329 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package migration + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/juju/version" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/apiserver/common" + coremigration "github.com/juju/juju/core/migration" + "github.com/juju/juju/state" + "github.com/juju/juju/status" + "github.com/juju/juju/tools" +) + +// PrecheckBackend defines the interface to query Juju's state +// for migration prechecks. +type PrecheckBackend interface { + AgentVersion() (version.Number, error) + NeedsCleanup() (bool, error) + Model() (PrecheckModel, error) + AllModels() ([]PrecheckModel, error) + IsUpgrading() (bool, error) + IsMigrationActive(string) (bool, error) + AllMachines() ([]PrecheckMachine, error) + AllApplications() ([]PrecheckApplication, error) + ControllerBackend() (PrecheckBackend, error) +} + +// PrecheckModel describes the state interface a model as needed by +// the migration prechecks. +type PrecheckModel interface { + UUID() string + Name() string + Owner() names.UserTag + Life() state.Life + MigrationMode() state.MigrationMode +} + +// PrecheckMachine describes the state interface for a machine needed +// by migration prechecks. +type PrecheckMachine interface { + Id() string + AgentTools() (*tools.Tools, error) + Life() state.Life + Status() (status.StatusInfo, error) + AgentPresence() (bool, error) + InstanceStatus() (status.StatusInfo, error) + ShouldRebootOrShutdown() (state.RebootAction, error) +} + +// PrecheckApplication describes the state interface for an +// application needed by migration prechecks. +type PrecheckApplication interface { + Name() string + Life() state.Life + CharmURL() (*charm.URL, bool) + AllUnits() ([]PrecheckUnit, error) + MinUnits() int +} + +// PrecheckUnit describes state interface for a unit needed by +// migration prechecks. +type PrecheckUnit interface { + Name() string + AgentTools() (*tools.Tools, error) + Life() state.Life + CharmURL() (*charm.URL, bool) + AgentStatus() (status.StatusInfo, error) + Status() (status.StatusInfo, error) + AgentPresence() (bool, error) +} + +// SourcePrecheck checks the state of the source controller to make +// sure that the preconditions for model migration are met. The +// backend provided must be for the model to be migrated. +func SourcePrecheck(backend PrecheckBackend) error { + if err := checkModel(backend); err != nil { + return errors.Trace(err) + } + + if err := checkMachines(backend); err != nil { + return errors.Trace(err) + } + + if err := checkApplications(backend); err != nil { + return errors.Trace(err) + } + + if cleanupNeeded, err := backend.NeedsCleanup(); err != nil { + return errors.Annotate(err, "checking cleanups") + } else if cleanupNeeded { + return errors.New("cleanup needed") + } + + // Check the source controller. + controllerBackend, err := backend.ControllerBackend() + if err != nil { + return errors.Trace(err) + } + if err := checkController(controllerBackend); err != nil { + return errors.Annotate(err, "controller") + } + return nil +} + +func checkModel(backend PrecheckBackend) error { + model, err := backend.Model() + if err != nil { + return errors.Annotate(err, "retrieving model") + } + if model.Life() != state.Alive { + return errors.Errorf("model is %s", model.Life()) + } + if model.MigrationMode() == state.MigrationModeImporting { + return errors.New("model is being imported as part of another migration") + } + return nil +} + +// TargetPrecheck checks the state of the target controller to make +// sure that the preconditions for model migration are met. The +// backend provided must be for the target controller. +func TargetPrecheck(backend PrecheckBackend, modelInfo coremigration.ModelInfo) error { + if err := modelInfo.Validate(); err != nil { + return errors.Trace(err) + } + + // This check is necessary because there is a window between the + // REAP phase and then end of the DONE phase where a model's + // documents have been deleted but the migration isn't quite done + // yet. Migrating a model back into the controller during this + // window can upset the migrationmaster worker. + // + // See also https://lpad.tv/1611391 + if migrating, err := backend.IsMigrationActive(modelInfo.UUID); err != nil { + return errors.Annotate(err, "checking for active migration") + } else if migrating { + return errors.New("model is being migrated out of target controller") + } + + controllerVersion, err := backend.AgentVersion() + if err != nil { + return errors.Annotate(err, "retrieving model version") + } + + if controllerVersion.Compare(modelInfo.AgentVersion) < 0 { + return errors.Errorf("model has higher version than target controller (%s > %s)", + modelInfo.AgentVersion, controllerVersion) + } + + if err := checkController(backend); err != nil { + return errors.Trace(err) + } + + // Check for conflicts with existing models + models, err := backend.AllModels() + if err != nil { + return errors.Annotate(err, "retrieving models") + } + for _, model := range models { + // If the model is importing then it's probably left behind + // from a previous migration attempt. It will be removed + // before the next import. + if model.UUID() == modelInfo.UUID && model.MigrationMode() != state.MigrationModeImporting { + return errors.Errorf("model with same UUID already exists (%s)", modelInfo.UUID) + } + if model.Name() == modelInfo.Name && model.Owner() == modelInfo.Owner { + return errors.Errorf("model named %q already exists", model.Name()) + } + } + + return nil +} + +func checkController(backend PrecheckBackend) error { + model, err := backend.Model() + if err != nil { + return errors.Annotate(err, "retrieving model") + } + if model.Life() != state.Alive { + return errors.Errorf("model is %s", model.Life()) + } + + if upgrading, err := backend.IsUpgrading(); err != nil { + return errors.Annotate(err, "checking for upgrades") + } else if upgrading { + return errors.New("upgrade in progress") + } + + err = checkMachines(backend) + return errors.Trace(err) +} + +func checkMachines(backend PrecheckBackend) error { + modelVersion, err := backend.AgentVersion() + if err != nil { + return errors.Annotate(err, "retrieving model version") + } + + machines, err := backend.AllMachines() + if err != nil { + return errors.Annotate(err, "retrieving machines") + } + for _, machine := range machines { + if machine.Life() != state.Alive { + return errors.Errorf("machine %s is %s", machine.Id(), machine.Life()) + } + + if statusInfo, err := machine.InstanceStatus(); err != nil { + return errors.Annotatef(err, "retrieving machine %s instance status", machine.Id()) + } else if statusInfo.Status != status.Running { + return newStatusError("machine %s not running", machine.Id(), statusInfo.Status) + } + + if statusInfo, err := common.MachineStatus(machine); err != nil { + return errors.Annotatef(err, "retrieving machine %s status", machine.Id()) + } else if statusInfo.Status != status.Started { + return newStatusError("machine %s agent not functioning at this time", + machine.Id(), statusInfo.Status) + } + + if rebootAction, err := machine.ShouldRebootOrShutdown(); err != nil { + return errors.Annotatef(err, "retrieving machine %s reboot status", machine.Id()) + } else if rebootAction != state.ShouldDoNothing { + return errors.Errorf("machine %s is scheduled to %s", machine.Id(), rebootAction) + } + + if err := checkAgentTools(modelVersion, machine, "machine "+machine.Id()); err != nil { + return errors.Trace(err) + } + } + return nil +} + +func checkApplications(backend PrecheckBackend) error { + modelVersion, err := backend.AgentVersion() + if err != nil { + return errors.Annotate(err, "retrieving model version") + } + apps, err := backend.AllApplications() + if err != nil { + return errors.Annotate(err, "retrieving applications") + } + for _, app := range apps { + if app.Life() != state.Alive { + return errors.Errorf("application %s is %s", app.Name(), app.Life()) + } + err := checkUnits(app, modelVersion) + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +func checkUnits(app PrecheckApplication, modelVersion version.Number) error { + units, err := app.AllUnits() + if err != nil { + return errors.Annotatef(err, "retrieving units for %s", app.Name()) + } + if len(units) < app.MinUnits() { + return errors.Errorf("application %s is below its minimum units threshold", app.Name()) + } + + appCharmURL, _ := app.CharmURL() + + for _, unit := range units { + if unit.Life() != state.Alive { + return errors.Errorf("unit %s is %s", unit.Name(), unit.Life()) + } + + if err := checkUnitAgentStatus(unit); err != nil { + return errors.Trace(err) + } + + if err := checkAgentTools(modelVersion, unit, "unit "+unit.Name()); err != nil { + return errors.Trace(err) + } + + unitCharmURL, _ := unit.CharmURL() + if appCharmURL.String() != unitCharmURL.String() { + return errors.Errorf("unit %s is upgrading", unit.Name()) + } + } + return nil +} + +func checkUnitAgentStatus(unit PrecheckUnit) error { + statusData, _ := common.UnitStatus(unit) + if statusData.Err != nil { + return errors.Annotatef(statusData.Err, "retrieving unit %s status", unit.Name()) + } + agentStatus := statusData.Status.Status + if agentStatus != status.Idle { + return newStatusError("unit %s not idle", unit.Name(), agentStatus) + } + return nil +} + +func checkAgentTools(modelVersion version.Number, agent agentToolsGetter, agentLabel string) error { + tools, err := agent.AgentTools() + if err != nil { + return errors.Annotatef(err, "retrieving tools for %s", agentLabel) + } + agentVersion := tools.Version.Number + if agentVersion != modelVersion { + return errors.Errorf("%s tools don't match model (%s != %s)", + agentLabel, agentVersion, modelVersion) + } + return nil +} + +type agentToolsGetter interface { + AgentTools() (*tools.Tools, error) +} + +func newStatusError(format, id string, s status.Status) error { + msg := fmt.Sprintf(format, id) + if s != status.Empty { + msg += fmt.Sprintf(" (%s)", s) + } + return errors.New(msg) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/migration/precheck_shim.go juju-core-2.0.0/src/github.com/juju/juju/migration/precheck_shim.go --- juju-core-2.0~beta15/src/github.com/juju/juju/migration/precheck_shim.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/migration/precheck_shim.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,119 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package migration + +import ( + "github.com/juju/errors" + "github.com/juju/version" + + "github.com/juju/juju/state" +) + +// PrecheckShim wraps a *state.State to implement PrecheckBackend. +func PrecheckShim(st *state.State) PrecheckBackend { + return &precheckShim{st} +} + +// precheckShim is untested, but is simple enough to be verified by +// inspection. +type precheckShim struct { + *state.State +} + +// Model implements PrecheckBackend. +func (s *precheckShim) Model() (PrecheckModel, error) { + model, err := s.State.Model() + if err != nil { + return nil, errors.Trace(err) + } + return model, nil +} + +// AllModels implements PrecheckBackend. +func (s *precheckShim) AllModels() ([]PrecheckModel, error) { + models, err := s.State.AllModels() + if err != nil { + return nil, errors.Trace(err) + } + out := make([]PrecheckModel, 0, len(models)) + for _, model := range models { + out = append(out, model) + } + return out, nil +} + +// IsMigrationActive implements PrecheckBackend. +func (s *precheckShim) IsMigrationActive(modelUUID string) (bool, error) { + return state.IsMigrationActive(s.State, modelUUID) +} + +// AgentVersion implements PrecheckBackend. +func (s *precheckShim) AgentVersion() (version.Number, error) { + cfg, err := s.State.ModelConfig() + if err != nil { + return version.Zero, errors.Trace(err) + } + vers, ok := cfg.AgentVersion() + if !ok { + return version.Zero, errors.New("no model agent version") + } + return vers, nil +} + +// AllMachines implements PrecheckBackend. +func (s *precheckShim) AllMachines() ([]PrecheckMachine, error) { + machines, err := s.State.AllMachines() + if err != nil { + return nil, errors.Trace(err) + } + out := make([]PrecheckMachine, 0, len(machines)) + for _, machine := range machines { + out = append(out, machine) + } + return out, nil +} + +// AllApplications implements PrecheckBackend. +func (s *precheckShim) AllApplications() ([]PrecheckApplication, error) { + apps, err := s.State.AllApplications() + if err != nil { + return nil, errors.Trace(err) + } + out := make([]PrecheckApplication, 0, len(apps)) + for _, app := range apps { + out = append(out, &precheckAppShim{app}) + } + return out, nil +} + +// ControllerBackend implements PrecheckBackend. +func (s *precheckShim) ControllerBackend() (PrecheckBackend, error) { + model, err := s.State.ControllerModel() + if err != nil { + return nil, errors.Trace(err) + } + st, err := s.State.ForModel(model.ModelTag()) + if err != nil { + return nil, errors.Trace(err) + } + return PrecheckShim(st), nil +} + +// precheckAppShim implements PrecheckApplication. +type precheckAppShim struct { + *state.Application +} + +// AllUnits implements PrecheckApplication. +func (s *precheckAppShim) AllUnits() ([]PrecheckUnit, error) { + units, err := s.Application.AllUnits() + if err != nil { + return nil, errors.Trace(err) + } + out := make([]PrecheckUnit, 0, len(units)) + for _, unit := range units { + out = append(out, unit) + } + return out, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/migration/precheck_test.go juju-core-2.0.0/src/github.com/juju/juju/migration/precheck_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/migration/precheck_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/migration/precheck_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,771 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package migration_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + "github.com/juju/version" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/names.v2" + + coremigration "github.com/juju/juju/core/migration" + "github.com/juju/juju/migration" + "github.com/juju/juju/state" + "github.com/juju/juju/status" + "github.com/juju/juju/testing" + "github.com/juju/juju/tools" +) + +var ( + modelName = "model-name" + modelUUID = "model-uuid" + modelOwner = names.NewUserTag("owner") + backendVersionBinary = version.MustParseBinary("1.2.3-trusty-amd64") + backendVersion = backendVersionBinary.Number +) + +type SourcePrecheckSuite struct { + precheckBaseSuite +} + +var _ = gc.Suite(&SourcePrecheckSuite{}) + +func sourcePrecheck(backend migration.PrecheckBackend) error { + return migration.SourcePrecheck(backend) +} + +func (*SourcePrecheckSuite) TestSuccess(c *gc.C) { + backend := newHappyBackend() + backend.controllerBackend = newHappyBackend() + err := migration.SourcePrecheck(backend) + c.Assert(err, jc.ErrorIsNil) +} + +func (*SourcePrecheckSuite) TestDyingModel(c *gc.C) { + backend := newFakeBackend() + backend.model.life = state.Dying + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "model is dying") +} + +func (*SourcePrecheckSuite) TestCharmUpgrades(c *gc.C) { + backend := &fakeBackend{ + apps: []migration.PrecheckApplication{ + &fakeApp{ + name: "spanner", + charmURL: "cs:spanner-3", + units: []migration.PrecheckUnit{ + &fakeUnit{name: "spanner/0", charmURL: "cs:spanner-3"}, + &fakeUnit{name: "spanner/1", charmURL: "cs:spanner-2"}, + }, + }, + }, + } + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "unit spanner/1 is upgrading") +} + +func (*SourcePrecheckSuite) TestImportingModel(c *gc.C) { + backend := newFakeBackend() + backend.model.migrationMode = state.MigrationModeImporting + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "model is being imported as part of another migration") +} + +func (*SourcePrecheckSuite) TestCleanupsError(c *gc.C) { + backend := newFakeBackend() + backend.cleanupErr = errors.New("boom") + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "checking cleanups: boom") +} + +func (*SourcePrecheckSuite) TestCleanupsNeeded(c *gc.C) { + backend := newFakeBackend() + backend.cleanupNeeded = true + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "cleanup needed") +} + +func (s *SourcePrecheckSuite) TestIsUpgradingError(c *gc.C) { + backend := newFakeBackend() + backend.controllerBackend.isUpgradingErr = errors.New("boom") + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "controller: checking for upgrades: boom") +} + +func (s *SourcePrecheckSuite) TestIsUpgrading(c *gc.C) { + backend := newFakeBackend() + backend.controllerBackend.isUpgrading = true + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "controller: upgrade in progress") +} + +func (s *SourcePrecheckSuite) TestAgentVersionError(c *gc.C) { + s.checkAgentVersionError(c, sourcePrecheck) +} + +func (s *SourcePrecheckSuite) TestMachineRequiresReboot(c *gc.C) { + s.checkRebootRequired(c, sourcePrecheck) +} + +func (s *SourcePrecheckSuite) TestMachineVersionsDontMatch(c *gc.C) { + s.checkMachineVersionsDontMatch(c, sourcePrecheck) +} + +func (s *SourcePrecheckSuite) TestDyingMachine(c *gc.C) { + backend := newBackendWithDyingMachine() + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "machine 0 is dying") +} + +func (s *SourcePrecheckSuite) TestNonStartedMachine(c *gc.C) { + backend := newBackendWithDownMachine() + err := migration.SourcePrecheck(backend) + c.Assert(err.Error(), gc.Equals, "machine 0 agent not functioning at this time (down)") +} + +func (s *SourcePrecheckSuite) TestProvisioningMachine(c *gc.C) { + err := migration.SourcePrecheck(newBackendWithProvisioningMachine()) + c.Assert(err.Error(), gc.Equals, "machine 0 not running (allocating)") +} + +func (s *SourcePrecheckSuite) TestDownMachineAgent(c *gc.C) { + err := migration.SourcePrecheck(newBackendWithDownMachineAgent()) + c.Assert(err.Error(), gc.Equals, "machine 1 agent not functioning at this time (down)") +} + +func (s *SourcePrecheckSuite) TestDyingApplication(c *gc.C) { + backend := &fakeBackend{ + apps: []migration.PrecheckApplication{ + &fakeApp{ + name: "foo", + life: state.Dying, + }, + }, + } + err := migration.SourcePrecheck(backend) + c.Assert(err.Error(), gc.Equals, "application foo is dying") +} + +func (s *SourcePrecheckSuite) TestWithPendingMinUnits(c *gc.C) { + backend := &fakeBackend{ + apps: []migration.PrecheckApplication{ + &fakeApp{ + name: "foo", + minunits: 2, + units: []migration.PrecheckUnit{&fakeUnit{name: "foo/0"}}, + }, + }, + } + err := migration.SourcePrecheck(backend) + c.Assert(err.Error(), gc.Equals, "application foo is below its minimum units threshold") +} + +func (s *SourcePrecheckSuite) TestUnitVersionsDontMatch(c *gc.C) { + backend := &fakeBackend{ + apps: []migration.PrecheckApplication{ + &fakeApp{ + name: "foo", + units: []migration.PrecheckUnit{&fakeUnit{name: "foo/0"}}, + }, + &fakeApp{ + name: "bar", + units: []migration.PrecheckUnit{ + &fakeUnit{name: "bar/0"}, + &fakeUnit{name: "bar/1", version: version.MustParseBinary("1.2.4-trusty-ppc64")}, + }, + }, + }, + } + err := migration.SourcePrecheck(backend) + c.Assert(err.Error(), gc.Equals, "unit bar/1 tools don't match model (1.2.4 != 1.2.3)") +} + +func (s *SourcePrecheckSuite) TestDeadUnit(c *gc.C) { + backend := &fakeBackend{ + apps: []migration.PrecheckApplication{ + &fakeApp{ + name: "foo", + units: []migration.PrecheckUnit{ + &fakeUnit{name: "foo/0", life: state.Dead}, + }, + }, + }, + } + err := migration.SourcePrecheck(backend) + c.Assert(err.Error(), gc.Equals, "unit foo/0 is dead") +} + +func (s *SourcePrecheckSuite) TestUnitNotIdle(c *gc.C) { + backend := &fakeBackend{ + apps: []migration.PrecheckApplication{ + &fakeApp{ + name: "foo", + units: []migration.PrecheckUnit{ + &fakeUnit{name: "foo/0", agentStatus: status.Failed}, + }, + }, + }, + } + err := migration.SourcePrecheck(backend) + c.Assert(err.Error(), gc.Equals, "unit foo/0 not idle (failed)") +} + +func (s *SourcePrecheckSuite) TestUnitLost(c *gc.C) { + backend := &fakeBackend{ + apps: []migration.PrecheckApplication{ + &fakeApp{ + name: "foo", + units: []migration.PrecheckUnit{ + &fakeUnit{name: "foo/0", lost: true}, + }, + }, + }, + } + err := migration.SourcePrecheck(backend) + c.Assert(err.Error(), gc.Equals, "unit foo/0 not idle (lost)") +} + +func (*SourcePrecheckSuite) TestDyingControllerModel(c *gc.C) { + backend := newFakeBackend() + backend.controllerBackend.model.life = state.Dying + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "controller: model is dying") +} + +func (s *SourcePrecheckSuite) TestControllerAgentVersionError(c *gc.C) { + backend := newFakeBackend() + backend.controllerBackend.agentVersionErr = errors.New("boom") + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "controller: retrieving model version: boom") + +} + +func (s *SourcePrecheckSuite) TestControllerMachineVersionsDontMatch(c *gc.C) { + backend := newFakeBackend() + backend.controllerBackend = newBackendWithMismatchingTools() + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "controller: machine . tools don't match model.+") +} + +func (s *SourcePrecheckSuite) TestControllerMachineRequiresReboot(c *gc.C) { + backend := newFakeBackend() + backend.controllerBackend = newBackendWithRebootingMachine() + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "controller: machine 0 is scheduled to reboot") +} + +func (s *SourcePrecheckSuite) TestDyingControllerMachine(c *gc.C) { + backend := &fakeBackend{ + controllerBackend: newBackendWithDyingMachine(), + } + err := migration.SourcePrecheck(backend) + c.Assert(err, gc.ErrorMatches, "controller: machine 0 is dying") +} + +func (s *SourcePrecheckSuite) TestNonStartedControllerMachine(c *gc.C) { + backend := &fakeBackend{ + controllerBackend: newBackendWithDownMachine(), + } + err := migration.SourcePrecheck(backend) + c.Assert(err.Error(), gc.Equals, "controller: machine 0 agent not functioning at this time (down)") +} + +func (s *SourcePrecheckSuite) TestProvisioningControllerMachine(c *gc.C) { + backend := &fakeBackend{ + controllerBackend: newBackendWithProvisioningMachine(), + } + err := migration.SourcePrecheck(backend) + c.Assert(err.Error(), gc.Equals, "controller: machine 0 not running (allocating)") +} + +type TargetPrecheckSuite struct { + precheckBaseSuite + modelInfo coremigration.ModelInfo +} + +var _ = gc.Suite(&TargetPrecheckSuite{}) + +func (s *TargetPrecheckSuite) SetUpTest(c *gc.C) { + s.modelInfo = coremigration.ModelInfo{ + UUID: modelUUID, + Owner: modelOwner, + Name: modelName, + AgentVersion: backendVersion, + } +} + +func (s *TargetPrecheckSuite) runPrecheck(backend migration.PrecheckBackend) error { + return migration.TargetPrecheck(backend, s.modelInfo) +} + +func (s *TargetPrecheckSuite) TestSuccess(c *gc.C) { + err := migration.TargetPrecheck(newHappyBackend(), s.modelInfo) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *TargetPrecheckSuite) TestVersionLessThanSource(c *gc.C) { + backend := newFakeBackend() + s.modelInfo.AgentVersion = version.MustParse("1.2.4") + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err.Error(), gc.Equals, + `model has higher version than target controller (1.2.4 > 1.2.3)`) +} + +func (s *TargetPrecheckSuite) TestDying(c *gc.C) { + backend := newFakeBackend() + backend.model.life = state.Dying + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err, gc.ErrorMatches, "model is dying") +} + +func (s *TargetPrecheckSuite) TestMachineRequiresReboot(c *gc.C) { + s.checkRebootRequired(c, s.runPrecheck) +} + +func (s *TargetPrecheckSuite) TestAgentVersionError(c *gc.C) { + s.checkAgentVersionError(c, s.runPrecheck) +} + +func (s *TargetPrecheckSuite) TestIsUpgradingError(c *gc.C) { + backend := &fakeBackend{ + isUpgradingErr: errors.New("boom"), + } + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err, gc.ErrorMatches, "checking for upgrades: boom") +} + +func (s *TargetPrecheckSuite) TestIsUpgrading(c *gc.C) { + backend := &fakeBackend{ + isUpgrading: true, + } + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err, gc.ErrorMatches, "upgrade in progress") +} + +func (s *TargetPrecheckSuite) TestIsMigrationActiveError(c *gc.C) { + backend := &fakeBackend{migrationActiveErr: errors.New("boom")} + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err, gc.ErrorMatches, "checking for active migration: boom") +} + +func (s *TargetPrecheckSuite) TestIsMigrationActive(c *gc.C) { + backend := &fakeBackend{migrationActive: true} + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err, gc.ErrorMatches, "model is being migrated out of target controller") +} + +func (s *TargetPrecheckSuite) TestMachineVersionsDontMatch(c *gc.C) { + s.checkMachineVersionsDontMatch(c, s.runPrecheck) +} + +func (s *TargetPrecheckSuite) TestDyingMachine(c *gc.C) { + backend := newBackendWithDyingMachine() + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err, gc.ErrorMatches, "machine 0 is dying") +} + +func (s *TargetPrecheckSuite) TestNonStartedMachine(c *gc.C) { + backend := newBackendWithDownMachine() + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err.Error(), gc.Equals, "machine 0 agent not functioning at this time (down)") +} + +func (s *TargetPrecheckSuite) TestProvisioningMachine(c *gc.C) { + backend := newBackendWithProvisioningMachine() + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err.Error(), gc.Equals, "machine 0 not running (allocating)") +} + +func (s *TargetPrecheckSuite) TestDownMachineAgent(c *gc.C) { + backend := newBackendWithDownMachineAgent() + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err.Error(), gc.Equals, "machine 1 agent not functioning at this time (down)") +} + +func (s *TargetPrecheckSuite) TestModelNameAlreadyInUse(c *gc.C) { + backend := newFakeBackend() + backend.models = []migration.PrecheckModel{ + &fakeModel{ + name: modelName, + owner: modelOwner, + }, + } + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err, gc.ErrorMatches, "model named \"model-name\" already exists") +} + +func (s *TargetPrecheckSuite) TestModelNameOverlapOkForDifferentOwner(c *gc.C) { + backend := newFakeBackend() + backend.models = []migration.PrecheckModel{ + &fakeModel{name: modelName, owner: names.NewUserTag("someone.else")}, + } + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *TargetPrecheckSuite) TestUUIDAlreadyExists(c *gc.C) { + backend := newFakeBackend() + backend.models = []migration.PrecheckModel{ + &fakeModel{uuid: modelUUID}, + } + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err.Error(), gc.Equals, "model with same UUID already exists (model-uuid)") +} + +func (s *TargetPrecheckSuite) TestUUIDAlreadyExistsButImporting(c *gc.C) { + backend := newFakeBackend() + backend.models = []migration.PrecheckModel{ + &fakeModel{ + uuid: modelUUID, + migrationMode: state.MigrationModeImporting, + }, + } + err := migration.TargetPrecheck(backend, s.modelInfo) + c.Assert(err, jc.ErrorIsNil) +} + +type precheckRunner func(migration.PrecheckBackend) error + +type precheckBaseSuite struct { + testing.BaseSuite +} + +func (*precheckBaseSuite) checkRebootRequired(c *gc.C, runPrecheck precheckRunner) { + err := runPrecheck(newBackendWithRebootingMachine()) + c.Assert(err, gc.ErrorMatches, "machine 0 is scheduled to reboot") +} + +func (*precheckBaseSuite) checkAgentVersionError(c *gc.C, runPrecheck precheckRunner) { + backend := &fakeBackend{ + agentVersionErr: errors.New("boom"), + } + err := runPrecheck(backend) + c.Assert(err, gc.ErrorMatches, "retrieving model version: boom") +} + +func (*precheckBaseSuite) checkMachineVersionsDontMatch(c *gc.C, runPrecheck precheckRunner) { + err := runPrecheck(newBackendWithMismatchingTools()) + c.Assert(err.Error(), gc.Equals, "machine 1 tools don't match model (1.3.1 != 1.2.3)") +} + +func newHappyBackend() *fakeBackend { + return &fakeBackend{ + machines: []migration.PrecheckMachine{ + &fakeMachine{id: "0"}, + &fakeMachine{id: "1"}, + }, + apps: []migration.PrecheckApplication{ + &fakeApp{ + name: "foo", + units: []migration.PrecheckUnit{&fakeUnit{name: "foo/0"}}, + }, + &fakeApp{ + name: "bar", + units: []migration.PrecheckUnit{ + &fakeUnit{name: "bar/0"}, + &fakeUnit{name: "bar/1"}, + }, + }, + }, + } +} + +func newBackendWithMismatchingTools() *fakeBackend { + return &fakeBackend{ + machines: []migration.PrecheckMachine{ + &fakeMachine{id: "0"}, + &fakeMachine{id: "1", version: version.MustParseBinary("1.3.1-xenial-amd64")}, + }, + } +} + +func newBackendWithRebootingMachine() *fakeBackend { + return &fakeBackend{ + machines: []migration.PrecheckMachine{ + &fakeMachine{id: "0", rebootAction: state.ShouldReboot}, + }, + } +} + +func newBackendWithDyingMachine() *fakeBackend { + return &fakeBackend{ + machines: []migration.PrecheckMachine{ + &fakeMachine{id: "0", life: state.Dying}, + &fakeMachine{id: "1"}, + }, + } +} + +func newBackendWithDownMachine() *fakeBackend { + return &fakeBackend{ + machines: []migration.PrecheckMachine{ + &fakeMachine{id: "0", status: status.Down}, + &fakeMachine{id: "1"}, + }, + } +} + +func newBackendWithProvisioningMachine() *fakeBackend { + return &fakeBackend{ + machines: []migration.PrecheckMachine{ + &fakeMachine{id: "0", instanceStatus: status.Provisioning}, + &fakeMachine{id: "1"}, + }, + } +} + +func newBackendWithDownMachineAgent() *fakeBackend { + return &fakeBackend{ + machines: []migration.PrecheckMachine{ + &fakeMachine{id: "0"}, + &fakeMachine{id: "1", lost: true}, + }, + } +} + +func newFakeBackend() *fakeBackend { + return &fakeBackend{ + controllerBackend: &fakeBackend{}, + } +} + +type fakeBackend struct { + agentVersionErr error + + model fakeModel + models []migration.PrecheckModel + + cleanupNeeded bool + cleanupErr error + + isUpgrading bool + isUpgradingErr error + + migrationActive bool + migrationActiveErr error + + machines []migration.PrecheckMachine + allMachinesErr error + + apps []migration.PrecheckApplication + allAppsErr error + + controllerBackend *fakeBackend +} + +func (b *fakeBackend) Model() (migration.PrecheckModel, error) { + return &b.model, nil +} + +func (b *fakeBackend) AllModels() ([]migration.PrecheckModel, error) { + return b.models, nil +} + +func (b *fakeBackend) NeedsCleanup() (bool, error) { + return b.cleanupNeeded, b.cleanupErr +} + +func (b *fakeBackend) AgentVersion() (version.Number, error) { + return backendVersion, b.agentVersionErr +} + +func (b *fakeBackend) IsUpgrading() (bool, error) { + return b.isUpgrading, b.isUpgradingErr +} + +func (b *fakeBackend) IsMigrationActive(string) (bool, error) { + return b.migrationActive, b.migrationActiveErr +} + +func (b *fakeBackend) AllMachines() ([]migration.PrecheckMachine, error) { + return b.machines, b.allMachinesErr +} + +func (b *fakeBackend) AllApplications() ([]migration.PrecheckApplication, error) { + return b.apps, b.allAppsErr + +} + +func (b *fakeBackend) ControllerBackend() (migration.PrecheckBackend, error) { + if b.controllerBackend == nil { + return b, nil + } + return b.controllerBackend, nil +} + +type fakeModel struct { + uuid string + name string + owner names.UserTag + life state.Life + migrationMode state.MigrationMode +} + +func (m *fakeModel) UUID() string { + return m.uuid +} + +func (m *fakeModel) Name() string { + return m.name +} + +func (m *fakeModel) Owner() names.UserTag { + return m.owner +} + +func (m *fakeModel) Life() state.Life { + return m.life +} + +func (m *fakeModel) MigrationMode() state.MigrationMode { + return m.migrationMode +} + +type fakeMachine struct { + id string + version version.Binary + life state.Life + status status.Status + instanceStatus status.Status + lost bool + rebootAction state.RebootAction +} + +func (m *fakeMachine) Id() string { + return m.id +} + +func (m *fakeMachine) Life() state.Life { + return m.life +} + +func (m *fakeMachine) Status() (status.StatusInfo, error) { + s := m.status + if s == "" { + // Avoid the need to specify this everywhere. + s = status.Started + } + return status.StatusInfo{Status: s}, nil +} + +func (m *fakeMachine) InstanceStatus() (status.StatusInfo, error) { + s := m.instanceStatus + if s == "" { + // Avoid the need to specify this everywhere. + s = status.Running + } + return status.StatusInfo{Status: s}, nil +} + +func (m *fakeMachine) AgentPresence() (bool, error) { + return !m.lost, nil +} + +func (m *fakeMachine) AgentTools() (*tools.Tools, error) { + // Avoid having to specify the version when it's supposed to match + // the model config. + v := m.version + if v.Compare(version.Zero) == 0 { + v = backendVersionBinary + } + return &tools.Tools{ + Version: v, + }, nil +} + +func (m *fakeMachine) ShouldRebootOrShutdown() (state.RebootAction, error) { + if m.rebootAction == "" { + return state.ShouldDoNothing, nil + } + return m.rebootAction, nil +} + +type fakeApp struct { + name string + life state.Life + charmURL string + units []migration.PrecheckUnit + minunits int +} + +func (a *fakeApp) Name() string { + return a.name +} + +func (a *fakeApp) Life() state.Life { + return a.life +} + +func (a *fakeApp) CharmURL() (*charm.URL, bool) { + url := a.charmURL + if url == "" { + url = "cs:foo-1" + } + return charm.MustParseURL(url), false +} + +func (a *fakeApp) AllUnits() ([]migration.PrecheckUnit, error) { + return a.units, nil +} + +func (a *fakeApp) MinUnits() int { + return a.minunits +} + +type fakeUnit struct { + name string + version version.Binary + life state.Life + charmURL string + agentStatus status.Status + lost bool +} + +func (u *fakeUnit) Name() string { + return u.name +} + +func (u *fakeUnit) AgentTools() (*tools.Tools, error) { + // Avoid having to specify the version when it's supposed to match + // the model config. + v := u.version + if v.Compare(version.Zero) == 0 { + v = backendVersionBinary + } + return &tools.Tools{ + Version: v, + }, nil +} + +func (u *fakeUnit) Life() state.Life { + return u.life +} + +func (u *fakeUnit) CharmURL() (*charm.URL, bool) { + url := u.charmURL + if url == "" { + url = "cs:foo-1" + } + return charm.MustParseURL(url), false +} + +func (u *fakeUnit) AgentStatus() (status.StatusInfo, error) { + s := u.agentStatus + if s == "" { + // Avoid the need to specify this everywhere. + s = status.Idle + } + return status.StatusInfo{Status: s}, nil +} + +func (u *fakeUnit) Status() (status.StatusInfo, error) { + return status.StatusInfo{Status: status.Idle}, nil +} + +func (u *fakeUnit) AgentPresence() (bool, error) { + return !u.lost, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/mongo/mongo.go juju-core-2.0.0/src/github.com/juju/juju/mongo/mongo.go --- juju-core-2.0~beta15/src/github.com/juju/juju/mongo/mongo.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/mongo/mongo.go 2016-10-13 14:31:49.000000000 +0000 @@ -381,9 +381,9 @@ // algorithm defined in Mongo. OplogSize int - // SetNumaControlPolicy preference - whether the user + // SetNUMAControlPolicy preference - whether the user // wants to set the numa control policy when starting mongo. - SetNumaControlPolicy bool + SetNUMAControlPolicy bool } // EnsureServer ensures that the MongoDB server is installed, @@ -411,7 +411,7 @@ } operatingsystem := series.HostSeries() - if err := installMongod(operatingsystem, args.SetNumaControlPolicy); err != nil { + if err := installMongod(operatingsystem, args.SetNUMAControlPolicy); err != nil { // This isn't treated as fatal because the Juju MongoDB // package is likely to be already installed anyway. There // could just be a temporary issue with apt-get/yum/whatever @@ -449,7 +449,17 @@ } } - svcConf := newConf(args.DataDir, dbDir, mongoPath, args.StatePort, oplogSizeMB, args.SetNumaControlPolicy, mgoVersion, true) + svcConf := newConf(ConfigArgs{ + DataDir: args.DataDir, + DBDir: dbDir, + MongoPath: mongoPath, + Port: args.StatePort, + OplogSizeMB: oplogSizeMB, + WantNUMACtl: args.SetNUMAControlPolicy, + Version: mgoVersion, + Auth: true, + IPv6: network.SupportsIPv6(), + }) svc, err := newService(ServiceName, svcConf) if err != nil { return err diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/mongo/mongo_test.go juju-core-2.0.0/src/github.com/juju/juju/mongo/mongo_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/mongo/mongo_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/mongo/mongo_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -108,6 +108,20 @@ } } +func (s *MongoSuite) makeConfigArgs(dataDir string) mongo.ConfigArgs { + return mongo.ConfigArgs{ + DataDir: dataDir, + DBDir: dataDir, + MongoPath: mongo.JujuMongod24Path, + Port: 1234, + OplogSizeMB: 1024, + WantNUMACtl: false, + Version: s.mongodVersion, + Auth: true, + IPv6: true, + } +} + func (s *MongoSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) @@ -207,7 +221,7 @@ } func (s *MongoSuite) TestEnsureServer(c *gc.C) { - dataDir := s.testEnsureServerNumaCtl(c, false) + dataDir := s.testEnsureServerNUMACtl(c, false) s.assertSSLKeyFile(c, dataDir) s.assertSharedSecretFile(c, dataDir) @@ -284,11 +298,11 @@ s.data.CheckCallNames(c, "Installed", "Exists", "Running", "Start") } -func (s *MongoSuite) TestEnsureServerNumaCtl(c *gc.C) { - s.testEnsureServerNumaCtl(c, true) +func (s *MongoSuite) TestEnsureServerNUMACtl(c *gc.C) { + s.testEnsureServerNUMACtl(c, true) } -func (s *MongoSuite) testEnsureServerNumaCtl(c *gc.C, setNumaPolicy bool) string { +func (s *MongoSuite) testEnsureServerNUMACtl(c *gc.C, setNUMAPolicy bool) string { dataDir := c.MkDir() dbDir := filepath.Join(dataDir, "db") @@ -297,7 +311,7 @@ testing.PatchExecutableAsEchoArgs(c, s, pm.PackageManager) testParams := makeEnsureServerParams(dataDir) - testParams.SetNumaControlPolicy = setNumaPolicy + testParams.SetNUMAControlPolicy = setNUMAPolicy err = mongo.EnsureServer(testParams) c.Assert(err, jc.ErrorIsNil) @@ -309,7 +323,7 @@ service := installed[0] c.Assert(service.Name(), gc.Equals, "juju-db") c.Assert(service.Conf().Desc, gc.Equals, "juju state database") - if setNumaPolicy { + if setNUMAPolicy { stripped := strings.Replace(service.Conf().ExtraScript, "\n", "", -1) c.Assert(stripped, gc.Matches, `.* sysctl .*`) } else { @@ -579,30 +593,34 @@ } func (s *MongoSuite) TestNewServiceWithReplSet(c *gc.C) { - dataDir := c.MkDir() - - conf := mongo.NewConf(dataDir, dataDir, mongo.JujuMongod24Path, 1234, 1024, false, s.mongodVersion, true) + conf := mongo.NewConf(s.makeConfigArgs(c.MkDir())) c.Assert(strings.Contains(conf.ExecStart, "--replSet"), jc.IsTrue) } func (s *MongoSuite) TestNewServiceWithNumCtl(c *gc.C) { - dataDir := c.MkDir() - - conf := mongo.NewConf(dataDir, dataDir, mongo.JujuMongod24Path, 1234, 1024, true, s.mongodVersion, true) + args := s.makeConfigArgs(c.MkDir()) + args.WantNUMACtl = true + conf := mongo.NewConf(args) c.Assert(conf.ExtraScript, gc.Not(gc.Matches), "") } -func (s *MongoSuite) TestNewServiceIPv6(c *gc.C) { - dataDir := c.MkDir() - - conf := mongo.NewConf(dataDir, dataDir, mongo.JujuMongod24Path, 1234, 1024, false, s.mongodVersion, true) +func (s *MongoSuite) TestNewServiceWithIPv6(c *gc.C) { + args := s.makeConfigArgs(c.MkDir()) + args.IPv6 = true + conf := mongo.NewConf(args) c.Assert(strings.Contains(conf.ExecStart, "--ipv6"), jc.IsTrue) } -func (s *MongoSuite) TestNewServiceWithJournal(c *gc.C) { - dataDir := c.MkDir() +func (s *MongoSuite) TestNewServiceWithoutIPv6(c *gc.C) { + args := s.makeConfigArgs(c.MkDir()) + args.IPv6 = false + conf := mongo.NewConf(args) + c.Assert(strings.Contains(conf.ExecStart, "--ipv6"), jc.IsFalse) +} - conf := mongo.NewConf(dataDir, dataDir, mongo.JujuMongod24Path, 1234, 1024, false, s.mongodVersion, true) +func (s *MongoSuite) TestNewServiceWithJournal(c *gc.C) { + args := s.makeConfigArgs(c.MkDir()) + conf := mongo.NewConf(args) c.Assert(conf.ExecStart, gc.Matches, `.* --journal.*`) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/mongo/open.go juju-core-2.0.0/src/github.com/juju/juju/mongo/open.go --- juju-core-2.0~beta15/src/github.com/juju/juju/mongo/open.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/mongo/open.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,17 +19,19 @@ "github.com/juju/juju/cert" ) -// SocketTimeout should be long enough that -// even a slow mongo server will respond in that -// length of time. Since mongo servers ping themselves -// every 10 seconds, we use a value of just over 2 -// ping periods to allow for delayed pings due to -// issues such as CPU starvation etc. -const SocketTimeout = 21 * time.Second - -// defaultDialTimeout should be representative of -// the upper bound of time taken to dial a mongo -// server from within the same cloud/private network. +// SocketTimeout should be long enough that even a slow mongo server +// will respond in that length of time, and must also be long enough +// to allow for completion of heavyweight queries. +// +// Note: 1 minute is mgo's default socket timeout value. +// +// Also note: We have observed mongodb occasionally getting "stuck" +// for over 30s in the field. +const SocketTimeout = time.Minute + +// defaultDialTimeout should be representative of the upper bound of +// time taken to dial a mongo server from within the same +// cloud/private network. const defaultDialTimeout = 30 * time.Second // DialOpts holds configuration parameters that control the @@ -125,26 +127,27 @@ tlsConfig.RootCAs = pool tlsConfig.ServerName = "juju-mongodb" - dial := func(addr net.Addr) (net.Conn, error) { - c, err := net.Dial("tcp", addr.String()) + dial := func(server *mgo.ServerAddr) (net.Conn, error) { + addr := server.TCPAddr().String() + c, err := net.DialTimeout("tcp", addr, opts.Timeout) if err != nil { - logger.Debugf("connection failed, will retry: %v", err) + logger.Warningf("mongodb connection failed, will retry: %v", err) return nil, err } cc := tls.Client(c, tlsConfig) if err := cc.Handshake(); err != nil { - logger.Debugf("TLS handshake failed: %v", err) + logger.Warningf("TLS handshake failed: %v", err) return nil, err } - logger.Infof("dialled mongo successfully on address %q", addr) + logger.Debugf("dialled mongodb server at %q", addr) return cc, nil } return &mgo.DialInfo{ - Addrs: info.Addrs, - Timeout: opts.Timeout, - Dial: dial, - Direct: opts.Direct, + Addrs: info.Addrs, + Timeout: opts.Timeout, + DialServer: dial, + Direct: opts.Direct, }, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/mongo/oplog.go juju-core-2.0.0/src/github.com/juju/juju/mongo/oplog.go --- juju-core-2.0~beta15/src/github.com/juju/juju/mongo/oplog.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/mongo/oplog.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ "github.com/juju/errors" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" ) // OplogDoc represents a document in the oplog.rs collection. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/mongo/prealloc_test.go juju-core-2.0.0/src/github.com/juju/juju/mongo/prealloc_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/mongo/prealloc_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/mongo/prealloc_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -96,7 +96,7 @@ desc: "result is non-numeric", output: `Filesystem 1K-blocks Used Available Use% Mounted on /dev/vda1 8124856 1365292 abc 18% /`, - err: `strconv.ParseInt: parsing "abc": invalid syntax`, + err: `strconv.(ParseInt|Atoi): parsing "abc": invalid syntax`, }, { desc: "not enough lines", output: "abc", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/mongo/service.go juju-core-2.0.0/src/github.com/juju/juju/mongo/service.go --- juju-core-2.0~beta15/src/github.com/juju/juju/mongo/service.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/mongo/service.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,7 @@ "github.com/juju/errors" "github.com/juju/utils" + "github.com/juju/juju/network" "github.com/juju/juju/service" "github.com/juju/juju/service/common" ) @@ -135,34 +136,48 @@ return filepath.Join(dataDir, SharedSecretFile) } +// ConfigArgs holds the attributes of a service configuration for mongo. +type ConfigArgs struct { + DataDir, DBDir, MongoPath string + Port, OplogSizeMB int + WantNUMACtl bool + Version Version + Auth bool + IPv6 bool +} + // newConf returns the init system config for the mongo state service. -func newConf(dataDir, dbDir, mongoPath string, port, oplogSizeMB int, wantNumaCtl bool, version Version, auth bool) common.Conf { - mongoCmd := mongoPath + +func newConf(args ConfigArgs) common.Conf { + mongoCmd := args.MongoPath + - " --dbpath " + utils.ShQuote(dbDir) + + " --dbpath " + utils.ShQuote(args.DBDir) + " --sslOnNormalPorts" + - " --sslPEMKeyFile " + utils.ShQuote(sslKeyPath(dataDir)) + + " --sslPEMKeyFile " + utils.ShQuote(sslKeyPath(args.DataDir)) + // --sslPEMKeyPassword has to have its argument passed with = thanks to // https://bugs.launchpad.net/juju-core/+bug/1581284. " --sslPEMKeyPassword=ignored" + - " --port " + fmt.Sprint(port) + + " --port " + fmt.Sprint(args.Port) + " --syslog" + " --journal" + " --replSet " + ReplicaSetName + - " --ipv6" + " --quiet" + - " --oplogSize " + strconv.Itoa(oplogSizeMB) + " --oplogSize " + strconv.Itoa(args.OplogSizeMB) + + if args.IPv6 { + mongoCmd = mongoCmd + + " --ipv6" + } - if auth { + if args.Auth { mongoCmd = mongoCmd + " --auth" + - " --keyFile " + utils.ShQuote(sharedSecretPath(dataDir)) + " --keyFile " + utils.ShQuote(sharedSecretPath(args.DataDir)) } else { mongoCmd = mongoCmd + " --noauth" } - if version.StorageEngine != WiredTiger { + if args.Version.StorageEngine != WiredTiger { mongoCmd = mongoCmd + " --noprealloc" + " --smallfiles" @@ -171,7 +186,7 @@ " --storageEngine wiredTiger" } extraScript := "" - if wantNumaCtl { + if args.WantNUMACtl { extraScript = fmt.Sprintf(detectMultiNodeScript, multinodeVarName, multinodeVarName) mongoCmd = fmt.Sprintf(numaCtlWrap, multinodeVarName) + mongoCmd } @@ -190,7 +205,7 @@ // EnsureServiceInstalled is a convenience method to [re]create // the mongo service. -func EnsureServiceInstalled(dataDir string, statePort, oplogSizeMB int, setNumaControlPolicy bool, version Version, auth bool) error { +func EnsureServiceInstalled(dataDir string, statePort, oplogSizeMB int, setNUMAControlPolicy bool, version Version, auth bool) error { mongoPath, err := Path(version) if err != nil { return errors.Annotate(err, "cannot get mongo path") @@ -205,7 +220,17 @@ } } - svcConf := newConf(dataDir, dbDir, mongoPath, statePort, oplogSizeMB, setNumaControlPolicy, version, auth) + svcConf := newConf(ConfigArgs{ + DataDir: dataDir, + DBDir: dbDir, + MongoPath: mongoPath, + Port: statePort, + OplogSizeMB: oplogSizeMB, + WantNUMACtl: setNUMAControlPolicy, + Version: version, + Auth: auth, + IPv6: network.SupportsIPv6(), + }) svc, err := newService(ServiceName, svcConf) if err != nil { return errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/mongo/service_test.go juju-core-2.0.0/src/github.com/juju/juju/mongo/service_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/mongo/service_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/mongo/service_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -28,7 +28,17 @@ mongodVersion := mongo.Mongo24 port := 12345 oplogSizeMB := 10 - conf := mongo.NewConf(dataDir, dbDir, mongodPath, port, oplogSizeMB, false, mongodVersion, true) + conf := mongo.NewConf(mongo.ConfigArgs{ + DataDir: dataDir, + DBDir: dbDir, + MongoPath: mongodPath, + Port: port, + OplogSizeMB: oplogSizeMB, + WantNUMACtl: false, + Version: mongodVersion, + Auth: true, + IPv6: true, + }) expected := common.Conf{ Desc: "juju state database", @@ -46,9 +56,9 @@ " --syslog" + " --journal" + " --replSet juju" + - " --ipv6" + " --quiet" + " --oplogSize 10" + + " --ipv6" + " --auth" + " --keyFile '/var/lib/juju/shared-secret'" + " --noprealloc" + diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/network/address.go juju-core-2.0.0/src/github.com/juju/juju/network/address.go --- juju-core-2.0~beta15/src/github.com/juju/juju/network/address.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/network/address.go 2016-10-13 14:31:49.000000000 +0000 @@ -436,8 +436,14 @@ func publicMatch(addr Address) scopeMatch { switch addr.Scope { case ScopePublic: + if addr.Type == IPv4Address { + return exactScopeIPv4 + } return exactScope case ScopeCloudLocal, ScopeUnknown: + if addr.Type == IPv4Address { + return fallbackScopeIPv4 + } return fallbackScope } return invalidScope @@ -453,8 +459,14 @@ func cloudLocalMatch(addr Address) scopeMatch { switch addr.Scope { case ScopeCloudLocal: + if addr.Type == IPv4Address { + return exactScopeIPv4 + } return exactScope case ScopePublic, ScopeUnknown: + if addr.Type == IPv4Address { + return fallbackScopeIPv4 + } return fallbackScope } return invalidScope @@ -462,6 +474,9 @@ func cloudOrMachineLocalMatch(addr Address) scopeMatch { if addr.Scope == ScopeMachineLocal { + if addr.Type == IPv4Address { + return exactScopeIPv4 + } return exactScope } return cloudLocalMatch(addr) @@ -471,7 +486,9 @@ const ( invalidScope scopeMatch = iota + exactScopeIPv4 exactScope + fallbackScopeIPv4 fallbackScope ) @@ -498,7 +515,7 @@ matches := filterAndCollateAddressIndexes(numAddr, getAddrFunc, matchFunc) // Retrieve the indexes of the addresses with the best scope and type match. - allowedMatchTypes := []scopeMatch{exactScope, fallbackScope} + allowedMatchTypes := []scopeMatch{exactScopeIPv4, exactScope, fallbackScopeIPv4, fallbackScope} for _, matchType := range allowedMatchTypes { indexes, ok := matches[matchType] if ok && len(indexes) > 0 { @@ -513,7 +530,7 @@ matches := filterAndCollateAddressIndexes(numAddr, getAddrFunc, matchFunc) // Retrieve the indexes of the addresses with the best scope and type match. - allowedMatchTypes := []scopeMatch{exactScope, fallbackScope} + allowedMatchTypes := []scopeMatch{exactScopeIPv4, exactScope, fallbackScopeIPv4, fallbackScope} var prioritized []int for _, matchType := range allowedMatchTypes { indexes, ok := matches[matchType] @@ -530,7 +547,7 @@ for i := 0; i < numAddr; i++ { matchType := matchFunc(getAddrFunc(i)) switch matchType { - case exactScope, fallbackScope: + case exactScopeIPv4, exactScope, fallbackScopeIPv4, fallbackScope: matches[matchType] = append(matches[matchType], i) } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/network/address_test.go juju-core-2.0.0/src/github.com/juju/juju/network/address_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/network/address_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/network/address_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -335,12 +335,26 @@ }, 0, }, { - "first public address is picked when both public IPs and public hostnames exist", + "public IP address is picked when both public IPs and public hostnames exist", []network.Address{ network.NewScopedAddress("10.0.0.1", network.ScopeUnknown), network.NewScopedAddress("example.com", network.ScopePublic), network.NewScopedAddress("8.8.8.8", network.ScopePublic), }, + 2, +}, { + "hostname is picked over cloud local address", + []network.Address{ + network.NewScopedAddress("10.0.0.1", network.ScopeUnknown), + network.NewScopedAddress("example.com", network.ScopePublic), + }, + 1, +}, { + "IPv4 preferred over IPv6", + []network.Address{ + network.NewScopedAddress("2001:db8::1", network.ScopePublic), + network.NewScopedAddress("8.8.8.8", network.ScopePublic), + }, 1, }} @@ -398,7 +412,6 @@ network.NewScopedAddress("2001:db8::1", network.ScopePublic), network.NewScopedAddress("fc00::1", network.ScopeCloudLocal), network.NewScopedAddress("8.8.8.8", network.ScopePublic), - network.NewScopedAddress("10.0.0.1", network.ScopeCloudLocal), }, 1, }, { @@ -407,7 +420,6 @@ network.NewScopedAddress("8.8.8.8", network.ScopePublic), network.NewScopedAddress("2001:db8::1", network.ScopePublic), network.NewScopedAddress("fc00::1", network.ScopeCloudLocal), - network.NewScopedAddress("10.0.0.1", network.ScopeCloudLocal), }, 2, }} @@ -423,13 +435,21 @@ } var selectInternalMachineTests = []selectTest{{ - "first cloud local address is selected", + "first cloud local IPv4 address is selected", []network.Address{ network.NewScopedAddress("fc00::1", network.ScopeCloudLocal), network.NewScopedAddress("2001:db8::1", network.ScopePublic), network.NewScopedAddress("10.0.0.1", network.ScopeCloudLocal), network.NewScopedAddress("8.8.8.8", network.ScopePublic), }, + 2, +}, { + "first cloud local address is selected", + []network.Address{ + network.NewScopedAddress("fc00::1", network.ScopeCloudLocal), + network.NewScopedAddress("2001:db8::1", network.ScopePublic), + network.NewScopedAddress("8.8.8.8", network.ScopePublic), + }, 0, }, { "first cloud local hostname is selected", @@ -448,7 +468,7 @@ }, 0, }, { - "first machine local address is selected even with public/cloud hostnames", + "first machine local IPv4 address is selected even with public/cloud hostnames", []network.Address{ network.NewScopedAddress("public.example.com", network.ScopePublic), network.NewScopedAddress("::1", network.ScopeMachineLocal), @@ -458,9 +478,19 @@ network.NewScopedAddress("fe80::1", network.ScopeLinkLocal), network.NewScopedAddress("127.0.0.2", network.ScopeMachineLocal), }, + 4, +}, { + "first machine local non-IPv4 address is selected even with public/cloud hostnames", + []network.Address{ + network.NewScopedAddress("public.example.com", network.ScopePublic), + network.NewScopedAddress("::1", network.ScopeMachineLocal), + network.NewScopedAddress("unknown.example.com", network.ScopeUnknown), + network.NewScopedAddress("cloud.internal", network.ScopeCloudLocal), + network.NewScopedAddress("fe80::1", network.ScopeLinkLocal), + }, 1, }, { - "first cloud local hostname is selected even with other machine/cloud addresses", + "cloud local IPv4 is selected even with other machine/cloud addresses", []network.Address{ network.NewScopedAddress("169.254.1.1", network.ScopeLinkLocal), network.NewScopedAddress("cloud-unknown.internal", network.ScopeUnknown), @@ -469,6 +499,15 @@ network.NewScopedAddress("127.0.0.1", network.ScopeMachineLocal), network.NewScopedAddress("127.0.0.2", network.ScopeMachineLocal), }, + 4, +}, { + "first cloud local hostname is selected even with other machine/cloud addresses", + []network.Address{ + network.NewScopedAddress("169.254.1.1", network.ScopeLinkLocal), + network.NewScopedAddress("cloud-unknown.internal", network.ScopeUnknown), + network.NewScopedAddress("cloud-local.internal", network.ScopeCloudLocal), + network.NewScopedAddress("fc00::1", network.ScopeCloudLocal), + }, 2, }} @@ -499,7 +538,7 @@ }, []string{"8.8.8.8:9999"}, }, { - "a cloud local IPv4 addresses are selected", + "cloud local IPv4 addresses are selected", []network.HostPort{ {network.NewScopedAddress("10.1.0.1", network.ScopeCloudLocal), 8888}, {network.NewScopedAddress("8.8.8.8", network.ScopePublic), 123}, @@ -515,14 +554,22 @@ }, []string{}, }, { - "cloud local addresses are preferred to a public addresses", + "cloud local IPv4 addresses are preferred to a public addresses", []network.HostPort{ {network.NewScopedAddress("2001:db8::1", network.ScopePublic), 123}, {network.NewScopedAddress("fc00::1", network.ScopeCloudLocal), 123}, {network.NewScopedAddress("8.8.8.8", network.ScopePublic), 123}, {network.NewScopedAddress("10.0.0.1", network.ScopeCloudLocal), 4444}, }, - []string{"[fc00::1]:123", "10.0.0.1:4444"}, + []string{"10.0.0.1:4444"}, +}, { + "cloud local IPv6 addresses are preferred to a public addresses", + []network.HostPort{ + {network.NewScopedAddress("2001:db8::1", network.ScopePublic), 123}, + {network.NewScopedAddress("fc00::1", network.ScopeCloudLocal), 123}, + {network.NewScopedAddress("8.8.8.8", network.ScopePublic), 123}, + }, + []string{"[fc00::1]:123"}, }} func (s *AddressSuite) TestSelectInternalHostPorts(c *gc.C) { @@ -543,7 +590,7 @@ }, []string{"8.8.8.8:9999"}, }, { - "a cloud local IPv4 addresses are selected", + "cloud local IPv4 addresses are selected", []network.HostPort{ {network.NewScopedAddress("10.1.0.1", network.ScopeCloudLocal), 8888}, {network.NewScopedAddress("8.8.8.8", network.ScopePublic), 123}, @@ -566,7 +613,7 @@ {network.NewScopedAddress("8.8.8.8", network.ScopePublic), 123}, {network.NewScopedAddress("10.0.0.1", network.ScopeCloudLocal), 4444}, }, - []string{"[fc00::1]:123", "10.0.0.1:4444", "[2001:db8::1]:123", "8.8.8.8:123"}, + []string{"10.0.0.1:4444", "[fc00::1]:123", "8.8.8.8:123", "[2001:db8::1]:123"}, }} func (s *AddressSuite) TestPrioritizeInternalHostPorts(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/network/export_test.go juju-core-2.0.0/src/github.com/juju/juju/network/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/network/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/network/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -3,4 +3,7 @@ package network -var NetLookupIP = &netLookupIP +var ( + NetLookupIP = &netLookupIP + NetListen = &netListen +) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/network/hostport_test.go juju-core-2.0.0/src/github.com/juju/juju/network/hostport_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/network/hostport_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/network/hostport_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -219,13 +219,13 @@ err: `cannot parse " " as address:port: missing port in address `, }, { input: ":", - err: `cannot parse ":" port: strconv.ParseInt: parsing "": invalid syntax`, + err: `cannot parse ":" port: strconv.(ParseInt|Atoi): parsing "": invalid syntax`, }, { input: "host", err: `cannot parse "host" as address:port: missing port in address host`, }, { input: "host:port", - err: `cannot parse "host:port" port: strconv.ParseInt: parsing "port": invalid syntax`, + err: `cannot parse "host:port" port: strconv.(ParseInt|Atoi): parsing "port": invalid syntax`, }, { input: "::1", err: `cannot parse "::1" as address:port: too many colons in address ::1`, @@ -234,7 +234,7 @@ err: `cannot parse "1.2.3.4" as address:port: missing port in address 1.2.3.4`, }, { input: "1.2.3.4:foo", - err: `cannot parse "1.2.3.4:foo" port: strconv.ParseInt: parsing "foo": invalid syntax`, + err: `cannot parse "1.2.3.4:foo" port: strconv.(ParseInt|Atoi): parsing "foo": invalid syntax`, }} { c.Logf("test %d: input %q", i, test.input) // First test all error cases with a single argument. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/network/network.go juju-core-2.0.0/src/github.com/juju/juju/network/network.go --- juju-core-2.0~beta15/src/github.com/juju/juju/network/network.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/network/network.go 2016-10-13 14:31:49.000000000 +0000 @@ -303,6 +303,23 @@ return ipNet.String() } +// ProviderInterfaceInfo holds enough information to identify an +// interface or link layer device to a provider so that it can be +// queried or manipulated. Its initial purpose is to pass to +// provider.ReleaseContainerAddresses. +type ProviderInterfaceInfo struct { + // InterfaceName is the raw OS-specific network device name (e.g. + // "eth1", even for a VLAN eth1.42 virtual interface). + InterfaceName string + + // ProviderId is a provider-specific NIC id. + ProviderId Id + + // MACAddress is the network interface's hardware MAC address + // (e.g. "aa:bb:cc:dd:ee:ff"). + MACAddress string +} + // LXCNetDefaultConfig is the location of the default network config // of the lxc package. It's exported to allow cross-package testing. var LXCNetDefaultConfig = "/etc/default/lxc-net" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/network/utils.go juju-core-2.0.0/src/github.com/juju/juju/network/utils.go --- juju-core-2.0~beta15/src/github.com/juju/juju/network/utils.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/network/utils.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,10 @@ import ( "bufio" + "io/ioutil" + "net" "os" + "path/filepath" "strings" "github.com/juju/errors" @@ -161,3 +164,93 @@ return parsedValues, nil } + +var netListen = net.Listen + +// SupportsIPv6 reports whether the platform supports IPv6 networking +// functionality. +// +// Source: https://github.com/golang/net/blob/master/internal/nettest/stack.go +func SupportsIPv6() bool { + ln, err := netListen("tcp6", "[::1]:0") + if err != nil { + return false + } + ln.Close() + return true +} + +// SysClassNetRoot is the full Linux SYSFS path containing information about +// each network interface on the system. Used as argument to +// ParseInterfaceType(). +const SysClassNetPath = "/sys/class/net" + +// ParseInterfaceType parses the DEVTYPE attribute from the Linux kernel +// userspace SYSFS location "/uevent" and returns it as +// InterfaceType. SysClassNetPath should be passed as sysPath. Returns +// UnknownInterface if the type cannot be reliably determined for any reason. +// +// Example call: network.ParseInterfaceType(network.SysClassNetPath, "br-eth1") +func ParseInterfaceType(sysPath, interfaceName string) InterfaceType { + const deviceType = "DEVTYPE=" + location := filepath.Join(sysPath, interfaceName, "uevent") + + data, err := ioutil.ReadFile(location) + if err != nil { + logger.Debugf("ignoring error reading %q: %v", location, err) + return UnknownInterface + } + + devtype := "" + lines := strings.Fields(string(data)) + for _, line := range lines { + if !strings.HasPrefix(line, deviceType) { + continue + } + + devtype = strings.TrimPrefix(line, deviceType) + switch devtype { + case "bridge": + return BridgeInterface + case "vlan": + return VLAN_8021QInterface + case "bond": + return BondInterface + case "": + // DEVTYPE is not present for some types, like Ethernet and loopback + // interfaces, so if missing do not try to guess. + break + } + } + + return UnknownInterface +} + +// GetBridgePorts extracts and returns the names of all interfaces configured as +// ports of the given bridgeName from the Linux kernel userspace SYSFS location +// "/brif/*". SysClassNetPath should be passed as sysPath. +// Returns an empty result if the ports cannot be determined reliably for any +// reason, or if there are no configured ports for the bridge. +// +// Example call: network.GetBridgePorts(network.SysClassNetPath, "br-eth1") +func GetBridgePorts(sysPath, bridgeName string) []string { + portsGlobPath := filepath.Join(sysPath, bridgeName, "brif", "*") + // Glob ignores I/O errors and can only return ErrBadPattern, which we treat + // as no results, but for debugging we're still logging the error. + paths, err := filepath.Glob(portsGlobPath) + if err != nil { + logger.Debugf("ignoring error traversing path %q: %v", portsGlobPath, err) + } + + if len(paths) == 0 { + return nil + } + + // We need to convert full paths like /sys/class/net/br-eth0/brif/eth0 to + // just names. + names := make([]string, len(paths)) + for i := range paths { + names[i] = filepath.Base(paths[i]) + } + return names +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/network/utils_test.go juju-core-2.0.0/src/github.com/juju/juju/network/utils_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/network/utils_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/network/utils_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,10 +6,13 @@ package network_test import ( + "errors" "fmt" "io/ioutil" + "net" "os" "path/filepath" + "strings" "github.com/juju/testing" jc "github.com/juju/testing/checkers" @@ -119,3 +122,110 @@ SearchDomains: []string{"two", "three"}, }) } + +func (s *UtilsSuite) TestSupportsIPv6Error(c *gc.C) { + s.PatchValue(network.NetListen, func(netFamily, bindAddress string) (net.Listener, error) { + c.Check(netFamily, gc.Equals, "tcp6") + c.Check(bindAddress, gc.Equals, "[::1]:0") + return nil, errors.New("boom!") + }) + c.Check(network.SupportsIPv6(), jc.IsFalse) +} + +func (s *UtilsSuite) TestSupportsIPv6OK(c *gc.C) { + s.PatchValue(network.NetListen, func(_, _ string) (net.Listener, error) { + return &mockListener{}, nil + }) + c.Check(network.SupportsIPv6(), jc.IsTrue) +} + +func (*UtilsSuite) TestParseInterfaceType(c *gc.C) { + fakeSysPath := filepath.Join(c.MkDir(), network.SysClassNetPath) + err := os.MkdirAll(fakeSysPath, 0700) + c.Check(err, jc.ErrorIsNil) + + writeFakeUEvent := func(interfaceName string, lines ...string) string { + fakeInterfacePath := filepath.Join(fakeSysPath, interfaceName) + err := os.MkdirAll(fakeInterfacePath, 0700) + c.Check(err, jc.ErrorIsNil) + + fakeUEventPath := filepath.Join(fakeInterfacePath, "uevent") + contents := strings.Join(lines, "\n") + err = ioutil.WriteFile(fakeUEventPath, []byte(contents), 0644) + c.Check(err, jc.ErrorIsNil) + return fakeUEventPath + } + + result := network.ParseInterfaceType(fakeSysPath, "missing") + c.Check(result, gc.Equals, network.UnknownInterface) + + writeFakeUEvent("eth0", "IFINDEX=1", "INTERFACE=eth0") + result = network.ParseInterfaceType(fakeSysPath, "eth0") + c.Check(result, gc.Equals, network.UnknownInterface) + + fakeUEventPath := writeFakeUEvent("eth0.42", "DEVTYPE=vlan") + result = network.ParseInterfaceType(fakeSysPath, "eth0.42") + c.Check(result, gc.Equals, network.VLAN_8021QInterface) + + os.Chmod(fakeUEventPath, 0000) // permission denied error is OK + result = network.ParseInterfaceType(fakeSysPath, "eth0.42") + c.Check(result, gc.Equals, network.UnknownInterface) + + writeFakeUEvent("bond0", "DEVTYPE=bond") + result = network.ParseInterfaceType(fakeSysPath, "bond0") + c.Check(result, gc.Equals, network.BondInterface) + + writeFakeUEvent("br-ens4", "DEVTYPE=bridge") + result = network.ParseInterfaceType(fakeSysPath, "br-ens4") + c.Check(result, gc.Equals, network.BridgeInterface) + + // First DEVTYPE found wins. + writeFakeUEvent("foo", "DEVTYPE=vlan", "DEVTYPE=bridge") + result = network.ParseInterfaceType(fakeSysPath, "foo") + c.Check(result, gc.Equals, network.VLAN_8021QInterface) + + writeFakeUEvent("fake", "DEVTYPE=warp-drive") + result = network.ParseInterfaceType(fakeSysPath, "fake") + c.Check(result, gc.Equals, network.UnknownInterface) +} + +func (*UtilsSuite) TestGetBridgePorts(c *gc.C) { + fakeSysPath := filepath.Join(c.MkDir(), network.SysClassNetPath) + err := os.MkdirAll(fakeSysPath, 0700) + c.Check(err, jc.ErrorIsNil) + + writeFakePorts := func(bridgeName string, portNames ...string) { + fakePortsPath := filepath.Join(fakeSysPath, bridgeName, "brif") + err := os.MkdirAll(fakePortsPath, 0700) + c.Check(err, jc.ErrorIsNil) + + for _, portName := range portNames { + portPath := filepath.Join(fakePortsPath, portName) + err = ioutil.WriteFile(portPath, []byte(""), 0644) + c.Check(err, jc.ErrorIsNil) + } + } + + result := network.GetBridgePorts(fakeSysPath, "missing") + c.Check(result, gc.IsNil) + + writeFakePorts("br-eth0") + result = network.GetBridgePorts(fakeSysPath, "br-eth0") + c.Check(result, gc.IsNil) + + writeFakePorts("br-eth0", "eth0") + result = network.GetBridgePorts(fakeSysPath, "br-eth0") + c.Check(result, jc.DeepEquals, []string{"eth0"}) + + writeFakePorts("br-ovs", "eth0", "eth1", "eth2") + result = network.GetBridgePorts(fakeSysPath, "br-ovs") + c.Check(result, jc.DeepEquals, []string{"eth0", "eth1", "eth2"}) +} + +type mockListener struct { + net.Listener +} + +func (*mockListener) Close() error { + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/patches/001-mgo.v2-issue-277-fix.diff juju-core-2.0.0/src/github.com/juju/juju/patches/001-mgo.v2-issue-277-fix.diff --- juju-core-2.0~beta15/src/github.com/juju/juju/patches/001-mgo.v2-issue-277-fix.diff 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/patches/001-mgo.v2-issue-277-fix.diff 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -diff --git a/session.go b/session.go -index a8ad115..75cb838 100644 - -This applies the minimal changes to fix the mgo duplicate key error, -see https://github.com/go-mgo/mgo/pull/291 and https://github.com/go-mgo/mgo/pull/302 - -It also includes logging so we can see that the patch is applied. - -Diff on github: https://github.com/go-mgo/mgo/compare/v2...babbageclunk:fix-277-v2-minimal?expand=1 -Generated with "git diff v2..fix-277-v2-minimal" - -Apply from $GOPATH/src with: patch -p1 < github.com/juju/juju/patches/001-mgo.v2-issue-277-fix.diff - ---- a/gopkg.in/mgo.v2/session.go -+++ b/gopkg.in/mgo.v2/session.go -@@ -41,6 +41,7 @@ import ( - "sync" - "time" - -+ "github.com/juju/loggo" - "gopkg.in/mgo.v2/bson" - ) - -@@ -144,9 +145,18 @@ type Iter struct { - var ( - ErrNotFound = errors.New("not found") - ErrCursor = errors.New("invalid cursor") -+ -+ logPatchedOnce sync.Once -+ logger = loggo.GetLogger("mgo") - ) - --const defaultPrefetch = 0.25 -+const ( -+ defaultPrefetch = 0.25 -+ -+ // How many times we will retry an upsert if it produces duplicate -+ // key errors. -+ maxUpsertRetries = 5 -+) - - // Dial establishes a new session to the cluster identified by the given seed - // server(s). The session will enable communication with all of the servers in -@@ -410,6 +420,16 @@ func (addr *ServerAddr) TCPAddr() *net.TCPAddr { - - // DialWithInfo establishes a new session to the cluster identified by info. - func DialWithInfo(info *DialInfo) (*Session, error) { -+ // This is using loggo because that can be done here in a -+ // localised patch, while using mgo's logging would need a change -+ // in Juju to call mgo.SetLogger. It's in this short-lived patch -+ // as a stop-gap because it's proving difficult to tell if the -+ // patch is applied in a running system. If you see it in -+ // committed code then something has gone very awry - please -+ // complain loudly! (babbageclunk) -+ logPatchedOnce.Do(func() { -+ logger.Debugf("duplicate key error patch applied") -+ }) - addrs := make([]string, len(info.Addrs)) - for i, addr := range info.Addrs { - p := strings.LastIndexAny(addr, "]:") -@@ -2478,7 +2498,16 @@ func (c *Collection) Upsert(selector interface{}, update interface{}) (info *Cha - Flags: 1, - Upsert: true, - } -- lerr, err := c.writeOp(&op, true) -+ var lerr *LastError -+ // <= to allow for the first attempt (not a retry). -+ for i := 0; i <= maxUpsertRetries; i++ { -+ lerr, err = c.writeOp(&op, true) -+ // Retry duplicate key errors on upserts. -+ // https://docs.mongodb.com/v3.2/reference/method/db.collection.update/#use-unique-indexes -+ if !IsDup(err) { -+ break -+ } -+ } - if err == nil && lerr != nil { - info = &ChangeInfo{} - if lerr.UpdatedExisting { -@@ -4208,13 +4237,22 @@ func (q *Query) Apply(change Change, result interface{}) (info *ChangeInfo, err - session.SetMode(Strong, false) - - var doc valueResult -- err = session.DB(dbname).Run(&cmd, &doc) -- if err != nil { -- if qerr, ok := err.(*QueryError); ok && qerr.Message == "No matching object found" { -- return nil, ErrNotFound -+ for retries := 0; ; retries++ { -+ err = session.DB(dbname).Run(&cmd, &doc) -+ if err != nil { -+ if qerr, ok := err.(*QueryError); ok && qerr.Message == "No matching object found" { -+ return nil, ErrNotFound -+ } -+ if change.Upsert && IsDup(err) && retries < maxUpsertRetries { -+ // Retry duplicate key errors on upserts. -+ // https://docs.mongodb.com/v3.2/reference/method/db.collection.update/#use-unique-indexes -+ continue -+ } -+ return nil, err - } -- return nil, err -+ break // No error, so don't retry. - } -+ - if doc.LastError.N == 0 { - return nil, ErrNotFound - } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/patches/README.md juju-core-2.0.0/src/github.com/juju/juju/patches/README.md --- juju-core-2.0~beta15/src/github.com/juju/juju/patches/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/patches/README.md 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,13 @@ +Emergency patches for dependencies +================================== + +Files in this directory named `*.patch` or `*.diff` will be applied to +the source tree before building Juju for release. The expectation is +that these should be changes that will be accepted upstream, but that +we need to apply sooner. + +They're applied with `$GOPATH/src` as the current directory, and with +`-p1` to strip one component off the start of the file path. + +For more details, see `lp:juju-release-tools/apply_patches.py` or ask +babbageclunk or mgz in IRC. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/patches/stats-race-fix.diff juju-core-2.0.0/src/github.com/juju/juju/patches/stats-race-fix.diff --- juju-core-2.0~beta15/src/github.com/juju/juju/patches/stats-race-fix.diff 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/patches/stats-race-fix.diff 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,178 @@ +diff --git a/stats.go b/stats.go +index 59723e6..1c72091 100644 +--- a/gopkg.in/mgo.v2/stats.go ++++ b/gopkg.in/mgo.v2/stats.go +@@ -30,43 +30,29 @@ import ( + "sync" + ) + +-var stats *Stats +-var statsMutex sync.Mutex ++var stats Stats + + func SetStats(enabled bool) { +- statsMutex.Lock() +- if enabled { +- if stats == nil { +- stats = &Stats{} +- } +- } else { +- stats = nil +- } +- statsMutex.Unlock() ++ stats.reset(enabled) + } + +-func GetStats() (snapshot Stats) { +- statsMutex.Lock() +- snapshot = *stats +- statsMutex.Unlock() +- return ++func GetStats() Stats { ++ stats.mu.RLock() ++ defer stats.mu.RUnlock() ++ return stats + } + + func ResetStats() { +- statsMutex.Lock() ++ // If we call ResetStats we assume you want to use stats, so we enable ++ // them. + debug("Resetting stats") +- old := stats +- stats = &Stats{} +- // These are absolute values: +- stats.Clusters = old.Clusters +- stats.SocketsInUse = old.SocketsInUse +- stats.SocketsAlive = old.SocketsAlive +- stats.SocketRefs = old.SocketRefs +- statsMutex.Unlock() +- return ++ stats.reset(true) + } + + type Stats struct { ++ mu sync.RWMutex ++ enabled bool ++ + Clusters int + MasterConns int + SlaveConns int +@@ -78,70 +64,74 @@ type Stats struct { + SocketRefs int + } + +-func (stats *Stats) cluster(delta int) { +- if stats != nil { +- statsMutex.Lock() +- stats.Clusters += delta +- statsMutex.Unlock() ++func (stats *Stats) reset(enabled bool) { ++ stats.mu.Lock() ++ defer stats.mu.Unlock() ++ ++ stats.MasterConns = 0 ++ stats.SlaveConns = 0 ++ stats.SentOps = 0 ++ stats.ReceivedOps = 0 ++ stats.ReceivedDocs = 0 ++ ++ if !enabled { ++ // These are absolute values so we don't reset them unless we are ++ // disabling stats altogether. ++ stats.Clusters = 0 ++ stats.SocketsInUse = 0 ++ stats.SocketsAlive = 0 ++ stats.SocketRefs = 0 + } + } + ++func (stats *Stats) cluster(delta int) { ++ stats.mu.Lock() ++ stats.Clusters += delta ++ stats.mu.Unlock() ++} ++ + func (stats *Stats) conn(delta int, master bool) { +- if stats != nil { +- statsMutex.Lock() +- if master { +- stats.MasterConns += delta +- } else { +- stats.SlaveConns += delta +- } +- statsMutex.Unlock() ++ stats.mu.Lock() ++ if master { ++ stats.MasterConns += delta ++ } else { ++ stats.SlaveConns += delta + } ++ stats.mu.Unlock() + } + + func (stats *Stats) sentOps(delta int) { +- if stats != nil { +- statsMutex.Lock() +- stats.SentOps += delta +- statsMutex.Unlock() +- } ++ stats.mu.Lock() ++ stats.SentOps += delta ++ stats.mu.Unlock() + } + + func (stats *Stats) receivedOps(delta int) { +- if stats != nil { +- statsMutex.Lock() +- stats.ReceivedOps += delta +- statsMutex.Unlock() +- } ++ stats.mu.Lock() ++ stats.ReceivedOps += delta ++ stats.mu.Unlock() + } + + func (stats *Stats) receivedDocs(delta int) { +- if stats != nil { +- statsMutex.Lock() +- stats.ReceivedDocs += delta +- statsMutex.Unlock() +- } ++ stats.mu.Lock() ++ stats.ReceivedDocs += delta ++ stats.mu.Unlock() + } + + func (stats *Stats) socketsInUse(delta int) { +- if stats != nil { +- statsMutex.Lock() +- stats.SocketsInUse += delta +- statsMutex.Unlock() +- } ++ stats.mu.Lock() ++ stats.SocketsInUse += delta ++ stats.mu.Unlock() + } + + func (stats *Stats) socketsAlive(delta int) { +- if stats != nil { +- statsMutex.Lock() +- stats.SocketsAlive += delta +- statsMutex.Unlock() +- } ++ stats.mu.Lock() ++ stats.SocketsAlive += delta ++ stats.mu.Unlock() + } + + func (stats *Stats) socketRefs(delta int) { +- if stats != nil { +- statsMutex.Lock() +- stats.SocketRefs += delta +- statsMutex.Unlock() +- } ++ stats.mu.Lock() ++ stats.SocketRefs += delta ++ stats.mu.Unlock() + } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/payload/status/list.go juju-core-2.0.0/src/github.com/juju/juju/payload/status/list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/payload/status/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/payload/status/list.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/payload" @@ -68,6 +68,7 @@ } func (c *ListCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) defaultFormat := "tabular" c.out.AddFlags(f, defaultFormat, map[string]cmd.Formatter{ "tabular": FormatTabular, @@ -98,6 +99,11 @@ fmt.Fprintf(ctx.Stderr, "%v\n", err) } + if len(payloads) == 0 { + ctx.Infof("No payloads to display.") + return nil + } + // Note that we do not worry about c.CompatVersion for payloads... formatter := newListFormatter(payloads) formatted := formatter.format() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/payload/status/list_test.go juju-core-2.0.0/src/github.com/juju/juju/payload/status/list_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/payload/status/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/payload/status/list_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -82,9 +82,9 @@ c.Check(stdout, gc.Equals, ` [Unit Payloads] -UNIT MACHINE PAYLOAD-CLASS STATUS TYPE ID TAGS -a-application/0 1 spam running docker idspam a-tag -another-application/1 2 eggs running docker ideggs +Unit Machine Payload class Status Type Id Tags +a-application/0 1 spam running docker idspam a-tag +another-application/1 2 eggs running docker ideggs `[1:]) c.Check(stderr, gc.Equals, "") @@ -95,12 +95,8 @@ code, stdout, stderr := runList(c, command) c.Assert(code, gc.Equals, 0) - c.Check(stdout, gc.Equals, ` -[Unit Payloads] -UNIT MACHINE PAYLOAD-CLASS STATUS TYPE ID TAGS - -`[1:]) - c.Check(stderr, gc.Equals, "") + c.Check(stderr, gc.Equals, "No payloads to display.\n") + c.Check(stdout, gc.Equals, "") } func (s *listSuite) TestPatternsOkay(c *gc.C) { @@ -121,9 +117,9 @@ c.Check(stdout, gc.Equals, ` [Unit Payloads] -UNIT MACHINE PAYLOAD-CLASS STATUS TYPE ID TAGS -a-application/0 1 spam running docker idspam a-tag -another-application/1 2 eggs running docker ideggs a-tag +Unit Machine Payload class Status Type Id Tags +a-application/0 1 spam running docker idspam a-tag +another-application/1 2 eggs running docker ideggs a-tag `[1:]) c.Check(stderr, gc.Equals, "") @@ -158,9 +154,9 @@ formats := map[string]string{ "tabular": ` [Unit Payloads] -UNIT MACHINE PAYLOAD-CLASS STATUS TYPE ID TAGS -a-application/0 1 spam running docker idspam a-tag -another-application/1 2 eggs running docker ideggs +Unit Machine Payload class Status Type Id Tags +a-application/0 1 spam running docker idspam a-tag +another-application/1 2 eggs running docker ideggs `[1:], "yaml": ` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/payload/status/output_tabular.go juju-core-2.0.0/src/github.com/juju/juju/payload/status/output_tabular.go --- juju-core-2.0~beta15/src/github.com/juju/juju/payload/status/output_tabular.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/payload/status/output_tabular.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,43 +4,41 @@ package status import ( - "bytes" "fmt" + "io" "strings" - "text/tabwriter" "github.com/juju/errors" + "github.com/juju/juju/cmd/output" ) const tabularSection = "[Unit Payloads]" var ( tabularColumns = []string{ - "UNIT", - "MACHINE", - "PAYLOAD-CLASS", - "STATUS", - "TYPE", - "ID", - "TAGS", // TODO(ericsnow) Chane this to "LABELS"? + "Unit", + "Machine", + "Payload class", + "Status", + "Type", + "Id", + "Tags", // TODO(ericsnow) Chane this to "LABELS"? } tabularHeader = strings.Join(tabularColumns, "\t") + "\t" tabularRow = strings.Repeat("%s\t", len(tabularColumns)) ) -// FormatTabular returns a tabular summary of payloads. -func FormatTabular(value interface{}) ([]byte, error) { +// FormatTabular writes a tabular summary of payloads. +func FormatTabular(writer io.Writer, value interface{}) error { payloads, valueConverted := value.([]FormattedPayload) if !valueConverted { - return nil, errors.Errorf("expected value of type %T, got %T", payloads, value) + return errors.Errorf("expected value of type %T, got %T", payloads, value) } // TODO(ericsnow) sort the rows first? - var out bytes.Buffer - // To format things into columns. - tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) + tw := output.TabWriter(writer) // Write the header. fmt.Fprintln(tw, tabularSection) @@ -61,5 +59,5 @@ } tw.Flush() - return out.Bytes(), nil + return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/payload/status/output_tabular_test.go juju-core-2.0.0/src/github.com/juju/juju/payload/status/output_tabular_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/payload/status/output_tabular_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/payload/status/output_tabular_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,8 @@ package status_test import ( + "bytes" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -21,26 +23,28 @@ payload := status.NewPayload("spam", "a-application", 1, 0) payload.Labels = []string{"a-tag", "other"} formatted := status.Formatted(payload) - data, err := status.FormatTabular(formatted) + buff := &bytes.Buffer{} + err := status.FormatTabular(buff, formatted) c.Assert(err, jc.ErrorIsNil) - c.Check(string(data), gc.Equals, ` + c.Check(buff.String(), gc.Equals, ` [Unit Payloads] -UNIT MACHINE PAYLOAD-CLASS STATUS TYPE ID TAGS -a-application/0 1 spam running docker idspam a-tag other +Unit Machine Payload class Status Type Id Tags +a-application/0 1 spam running docker idspam a-tag other `[1:]) } func (s *outputTabularSuite) TestFormatTabularMinimal(c *gc.C) { payload := status.NewPayload("spam", "a-application", 1, 0) formatted := status.Formatted(payload) - data, err := status.FormatTabular(formatted) + buff := &bytes.Buffer{} + err := status.FormatTabular(buff, formatted) c.Assert(err, jc.ErrorIsNil) - c.Check(string(data), gc.Equals, ` + c.Check(buff.String(), gc.Equals, ` [Unit Payloads] -UNIT MACHINE PAYLOAD-CLASS STATUS TYPE ID TAGS -a-application/0 1 spam running docker idspam +Unit Machine Payload class Status Type Id Tags +a-application/0 1 spam running docker idspam `[1:]) } @@ -65,24 +69,24 @@ p22A, p10x, ) - data, err := status.FormatTabular(formatted) + buff := &bytes.Buffer{} + err := status.FormatTabular(buff, formatted) c.Assert(err, jc.ErrorIsNil) - c.Check(string(data), gc.Equals, ` + c.Check(buff.String(), gc.Equals, ` [Unit Payloads] -UNIT MACHINE PAYLOAD-CLASS STATUS TYPE ID TAGS -a-application/0 1 spam running docker idspam a-tag -a-application/1 2 spam stopped docker idspam a-tag -a-application/1 2 spam running docker idspamB -a-application/1 2 eggs running kvm ideggs -a-application/2 2 spam running docker idspam -another-application/0 1 ham running docker idham other extra +Unit Machine Payload class Status Type Id Tags +a-application/0 1 spam running docker idspam a-tag +a-application/1 2 spam stopped docker idspam a-tag +a-application/1 2 spam running docker idspamB +a-application/1 2 eggs running kvm ideggs +a-application/2 2 spam running docker idspam +another-application/0 1 ham running docker idham other extra `[1:]) } func (s *outputTabularSuite) TestFormatTabularBadValue(c *gc.C) { bogus := "should have been []formattedPayload" - _, err := status.FormatTabular(bogus) - + err := status.FormatTabular(nil, bogus) c.Check(err, gc.ErrorMatches, `expected value of type .*`) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/permission/access.go juju-core-2.0.0/src/github.com/juju/juju/permission/access.go --- juju-core-2.0~beta15/src/github.com/juju/juju/permission/access.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/permission/access.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,161 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package permission + +import ( + "github.com/juju/errors" + "github.com/juju/schema" +) + +// Access represents a level of access. +type Access string + +const ( + // NoAccess allows a user no permissions at all. + NoAccess Access = "" + + // Model Permissions + + // ReadAccess allows a user to read information about a permission subject, + // without being able to make any changes. + ReadAccess Access = "read" + + // WriteAccess allows a user to make changes to a permission subject. + WriteAccess Access = "write" + + // AdminAccess allows a user full control over the subject. + AdminAccess Access = "admin" + + // Controller permissions + + // LoginAccess allows a user to log-ing into the subject. + LoginAccess Access = "login" + + // AddModelAccess allows user to add new models in subjects supporting it. + AddModelAccess Access = "add-model" + + // SuperuserAccess allows user unrestricted permissions in the subject. + SuperuserAccess Access = "superuser" +) + +// Validate returns error if the current is not a valid access level. +func (a Access) Validate() error { + switch a { + case NoAccess, AdminAccess, ReadAccess, WriteAccess, + LoginAccess, AddModelAccess, SuperuserAccess: + return nil + } + return errors.NotValidf("access level %s", a) +} + +// ValidateModelAccess returns error if the passed access is not a valid +// model access level. +func ValidateModelAccess(access Access) error { + switch access { + case ReadAccess, WriteAccess, AdminAccess: + return nil + } + return errors.NotValidf("%q model access", access) +} + +//ValidateControllerAccess returns error if the passed access is not a valid +// controller access level. +func ValidateControllerAccess(access Access) error { + switch access { + case LoginAccess, AddModelAccess, SuperuserAccess: + return nil + } + return errors.NotValidf("%q controller access", access) +} + +func (a Access) controllerValue() int { + switch a { + case NoAccess: + return 0 + case LoginAccess: + return 1 + case AddModelAccess: + return 2 + case SuperuserAccess: + return 3 + default: + return -1 + } +} + +func (a Access) modelValue() int { + switch a { + case NoAccess: + return 0 + case ReadAccess: + return 1 + case WriteAccess: + return 2 + case AdminAccess: + return 3 + default: + return -1 + } +} + +// EqualOrGreaterModelAccessThan returns true if the current access is equal +// or greater than the passed in access level. +func (a Access) EqualOrGreaterModelAccessThan(access Access) bool { + v1, v2 := a.modelValue(), access.modelValue() + if v1 < 0 || v2 < 0 { + return false + } + return v1 >= v2 +} + +// GreaterModelAccessThan returns true if the current access is greater than +// the passed in access level. +func (a Access) GreaterModelAccessThan(access Access) bool { + v1, v2 := a.modelValue(), access.modelValue() + if v1 < 0 || v2 < 0 { + return false + } + return v1 > v2 +} + +// EqualOrGreaterControllerAccessThan returns true if the current access is +// equal or greater than the passed in access level. +func (a Access) EqualOrGreaterControllerAccessThan(access Access) bool { + v1, v2 := a.controllerValue(), access.controllerValue() + if v1 < 0 || v2 < 0 { + return false + } + return v1 >= v2 +} + +// GreaterControllerAccessThan returns true if the current access is +// greater than the passed in access level. +func (a Access) GreaterControllerAccessThan(access Access) bool { + v1, v2 := a.controllerValue(), access.controllerValue() + if v1 < 0 || v2 < 0 { + return false + } + return v1 > v2 +} + +// accessField returns a Checker that accepts a string value only +// and returns a valid Access or an error. +func accessField() schema.Checker { + return accessC{} +} + +type accessC struct{} + +func (c accessC) Coerce(v interface{}, path []string) (interface{}, error) { + s := schema.String() + in, err := s.Coerce(v, path) + if err != nil { + return nil, err + } + access := Access(in.(string)) + if err := access.Validate(); err != nil { + return nil, errors.Trace(err) + } + return access, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/permission/access_test.go juju-core-2.0.0/src/github.com/juju/juju/permission/access_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/permission/access_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/permission/access_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,201 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package permission_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/permission" +) + +type accessSuite struct{} + +var _ = gc.Suite(&accessSuite{}) + +func (*accessSuite) TestEqualOrGreaterModelAccessThan(c *gc.C) { + // A very boring but necessary test to test explicit responses. + var ( + undefined = permission.NoAccess + read = permission.ReadAccess + write = permission.WriteAccess + admin = permission.AdminAccess + login = permission.LoginAccess + addmodel = permission.AddModelAccess + superuser = permission.SuperuserAccess + ) + // None of the controller permissions return true for any comparison. + for _, value := range []permission.Access{login, addmodel, superuser} { + c.Check(value.EqualOrGreaterModelAccessThan(undefined), jc.IsFalse) + c.Check(value.EqualOrGreaterModelAccessThan(read), jc.IsFalse) + c.Check(value.EqualOrGreaterModelAccessThan(write), jc.IsFalse) + c.Check(value.EqualOrGreaterModelAccessThan(admin), jc.IsFalse) + c.Check(value.EqualOrGreaterModelAccessThan(login), jc.IsFalse) + c.Check(value.EqualOrGreaterModelAccessThan(addmodel), jc.IsFalse) + c.Check(value.EqualOrGreaterModelAccessThan(superuser), jc.IsFalse) + } + // No comparison against a controller permission will return true + for _, value := range []permission.Access{undefined, read, write, admin} { + c.Check(value.EqualOrGreaterModelAccessThan(login), jc.IsFalse) + c.Check(value.EqualOrGreaterModelAccessThan(addmodel), jc.IsFalse) + c.Check(value.EqualOrGreaterModelAccessThan(superuser), jc.IsFalse) + } + + c.Check(undefined.EqualOrGreaterModelAccessThan(undefined), jc.IsTrue) + c.Check(undefined.EqualOrGreaterModelAccessThan(read), jc.IsFalse) + c.Check(undefined.EqualOrGreaterModelAccessThan(write), jc.IsFalse) + c.Check(undefined.EqualOrGreaterModelAccessThan(admin), jc.IsFalse) + + c.Check(read.EqualOrGreaterModelAccessThan(undefined), jc.IsTrue) + c.Check(read.EqualOrGreaterModelAccessThan(read), jc.IsTrue) + c.Check(read.EqualOrGreaterModelAccessThan(write), jc.IsFalse) + c.Check(read.EqualOrGreaterModelAccessThan(admin), jc.IsFalse) + + c.Check(write.EqualOrGreaterModelAccessThan(undefined), jc.IsTrue) + c.Check(write.EqualOrGreaterModelAccessThan(read), jc.IsTrue) + c.Check(write.EqualOrGreaterModelAccessThan(write), jc.IsTrue) + c.Check(write.EqualOrGreaterModelAccessThan(admin), jc.IsFalse) + + c.Check(admin.EqualOrGreaterModelAccessThan(undefined), jc.IsTrue) + c.Check(admin.EqualOrGreaterModelAccessThan(read), jc.IsTrue) + c.Check(admin.EqualOrGreaterModelAccessThan(write), jc.IsTrue) + c.Check(admin.EqualOrGreaterModelAccessThan(admin), jc.IsTrue) +} + +func (*accessSuite) TestGreaterModelAccessThan(c *gc.C) { + // A very boring but necessary test to test explicit responses. + var ( + undefined = permission.NoAccess + read = permission.ReadAccess + write = permission.WriteAccess + admin = permission.AdminAccess + login = permission.LoginAccess + addmodel = permission.AddModelAccess + superuser = permission.SuperuserAccess + ) + // None of undefined or the controller permissions return true for any comparison. + for _, value := range []permission.Access{undefined, login, addmodel, superuser} { + c.Check(value.GreaterModelAccessThan(undefined), jc.IsFalse) + c.Check(value.GreaterModelAccessThan(read), jc.IsFalse) + c.Check(value.GreaterModelAccessThan(write), jc.IsFalse) + c.Check(value.GreaterModelAccessThan(admin), jc.IsFalse) + c.Check(value.GreaterModelAccessThan(login), jc.IsFalse) + c.Check(value.GreaterModelAccessThan(addmodel), jc.IsFalse) + c.Check(value.GreaterModelAccessThan(superuser), jc.IsFalse) + } + // No comparison against a controller permission will return true + for _, value := range []permission.Access{undefined, read, write, admin} { + c.Check(value.GreaterModelAccessThan(login), jc.IsFalse) + c.Check(value.GreaterModelAccessThan(addmodel), jc.IsFalse) + c.Check(value.GreaterModelAccessThan(superuser), jc.IsFalse) + } + + c.Check(read.GreaterModelAccessThan(undefined), jc.IsTrue) + c.Check(read.GreaterModelAccessThan(read), jc.IsFalse) + c.Check(read.GreaterModelAccessThan(write), jc.IsFalse) + c.Check(read.GreaterModelAccessThan(admin), jc.IsFalse) + + c.Check(write.GreaterModelAccessThan(undefined), jc.IsTrue) + c.Check(write.GreaterModelAccessThan(read), jc.IsTrue) + c.Check(write.GreaterModelAccessThan(write), jc.IsFalse) + c.Check(write.GreaterModelAccessThan(admin), jc.IsFalse) + + c.Check(admin.GreaterModelAccessThan(undefined), jc.IsTrue) + c.Check(admin.GreaterModelAccessThan(read), jc.IsTrue) + c.Check(admin.GreaterModelAccessThan(write), jc.IsTrue) + c.Check(admin.GreaterModelAccessThan(admin), jc.IsFalse) +} + +func (*accessSuite) TestEqualOrGreaterControllerAccessThan(c *gc.C) { + // A very boring but necessary test to test explicit responses. + var ( + undefined = permission.NoAccess + read = permission.ReadAccess + write = permission.WriteAccess + admin = permission.AdminAccess + login = permission.LoginAccess + addmodel = permission.AddModelAccess + superuser = permission.SuperuserAccess + ) + // None of the model permissions return true for any comparison. + for _, value := range []permission.Access{read, write, admin} { + c.Check(value.EqualOrGreaterControllerAccessThan(undefined), jc.IsFalse) + c.Check(value.EqualOrGreaterControllerAccessThan(read), jc.IsFalse) + c.Check(value.EqualOrGreaterControllerAccessThan(write), jc.IsFalse) + c.Check(value.EqualOrGreaterControllerAccessThan(admin), jc.IsFalse) + c.Check(value.EqualOrGreaterControllerAccessThan(login), jc.IsFalse) + c.Check(value.EqualOrGreaterControllerAccessThan(addmodel), jc.IsFalse) + c.Check(value.EqualOrGreaterControllerAccessThan(superuser), jc.IsFalse) + } + // No comparison against a model permission will return true + for _, value := range []permission.Access{undefined, login, addmodel, superuser} { + c.Check(value.EqualOrGreaterControllerAccessThan(read), jc.IsFalse) + c.Check(value.EqualOrGreaterControllerAccessThan(write), jc.IsFalse) + c.Check(value.EqualOrGreaterControllerAccessThan(admin), jc.IsFalse) + } + + c.Check(undefined.EqualOrGreaterControllerAccessThan(undefined), jc.IsTrue) + c.Check(undefined.EqualOrGreaterControllerAccessThan(login), jc.IsFalse) + c.Check(undefined.EqualOrGreaterControllerAccessThan(addmodel), jc.IsFalse) + c.Check(undefined.EqualOrGreaterControllerAccessThan(superuser), jc.IsFalse) + + c.Check(login.EqualOrGreaterControllerAccessThan(undefined), jc.IsTrue) + c.Check(login.EqualOrGreaterControllerAccessThan(login), jc.IsTrue) + c.Check(login.EqualOrGreaterControllerAccessThan(addmodel), jc.IsFalse) + c.Check(login.EqualOrGreaterControllerAccessThan(superuser), jc.IsFalse) + + c.Check(addmodel.EqualOrGreaterControllerAccessThan(undefined), jc.IsTrue) + c.Check(addmodel.EqualOrGreaterControllerAccessThan(login), jc.IsTrue) + c.Check(addmodel.EqualOrGreaterControllerAccessThan(addmodel), jc.IsTrue) + c.Check(addmodel.EqualOrGreaterControllerAccessThan(superuser), jc.IsFalse) + + c.Check(superuser.EqualOrGreaterControllerAccessThan(undefined), jc.IsTrue) + c.Check(superuser.EqualOrGreaterControllerAccessThan(login), jc.IsTrue) + c.Check(superuser.EqualOrGreaterControllerAccessThan(addmodel), jc.IsTrue) + c.Check(superuser.EqualOrGreaterControllerAccessThan(superuser), jc.IsTrue) +} + +func (*accessSuite) TestGreaterControllerAccessThan(c *gc.C) { + // A very boring but necessary test to test explicit responses. + var ( + undefined = permission.NoAccess + read = permission.ReadAccess + write = permission.WriteAccess + admin = permission.AdminAccess + login = permission.LoginAccess + addmodel = permission.AddModelAccess + superuser = permission.SuperuserAccess + ) + // None of undefined or the model permissions return true for any comparison. + for _, value := range []permission.Access{undefined, read, write, admin} { + c.Check(value.GreaterControllerAccessThan(undefined), jc.IsFalse) + c.Check(value.GreaterControllerAccessThan(read), jc.IsFalse) + c.Check(value.GreaterControllerAccessThan(write), jc.IsFalse) + c.Check(value.GreaterControllerAccessThan(admin), jc.IsFalse) + c.Check(value.GreaterControllerAccessThan(login), jc.IsFalse) + c.Check(value.GreaterControllerAccessThan(addmodel), jc.IsFalse) + c.Check(value.GreaterControllerAccessThan(superuser), jc.IsFalse) + } + // No comparison against a model permission will return true + for _, value := range []permission.Access{undefined, login, addmodel, superuser} { + c.Check(value.GreaterControllerAccessThan(read), jc.IsFalse) + c.Check(value.GreaterControllerAccessThan(write), jc.IsFalse) + c.Check(value.GreaterControllerAccessThan(admin), jc.IsFalse) + } + + c.Check(login.GreaterControllerAccessThan(undefined), jc.IsTrue) + c.Check(login.GreaterControllerAccessThan(login), jc.IsFalse) + c.Check(login.GreaterControllerAccessThan(addmodel), jc.IsFalse) + c.Check(login.GreaterControllerAccessThan(superuser), jc.IsFalse) + + c.Check(addmodel.GreaterControllerAccessThan(undefined), jc.IsTrue) + c.Check(addmodel.GreaterControllerAccessThan(login), jc.IsTrue) + c.Check(addmodel.GreaterControllerAccessThan(addmodel), jc.IsFalse) + c.Check(addmodel.GreaterControllerAccessThan(superuser), jc.IsFalse) + + c.Check(superuser.GreaterControllerAccessThan(undefined), jc.IsTrue) + c.Check(superuser.GreaterControllerAccessThan(login), jc.IsTrue) + c.Check(superuser.GreaterControllerAccessThan(addmodel), jc.IsTrue) + c.Check(superuser.GreaterControllerAccessThan(superuser), jc.IsFalse) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/permission/package_test.go juju-core-2.0.0/src/github.com/juju/juju/permission/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/permission/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/permission/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package permission_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *testing.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/permission/useraccess.go juju-core-2.0.0/src/github.com/juju/juju/permission/useraccess.go --- juju-core-2.0~beta15/src/github.com/juju/juju/permission/useraccess.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/permission/useraccess.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,41 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package permission + +import ( + "time" + + "gopkg.in/juju/names.v2" +) + +// UserAccess represents a user access to a target whereas the user +// could represent a remote user or a user across multiple models the +// user access always represents a single user for a single target. +// There should be no more than one UserAccess per target/user pair. +// Many of these fields are storage artifacts but generate them from +// other fields implies out of band knowledge of other packages. +type UserAccess struct { + // UserID is the stored ID of the user. + UserID string + // UserTag is the tag for the user. + UserTag names.UserTag + // Object is the tag for the object of this access grant. + Object names.Tag + // Access represents the level of access subject has over object. + Access Access + // CreatedBy is the tag of the user that granted the access. + CreatedBy names.UserTag + // DateCreated is the date the user was created in UTC. + DateCreated time.Time + // DisplayName is the name we are showing for this user. + DisplayName string + // UserName is the actual username for this access. + UserName string +} + +// IsEmptyUserAccess returns true if the passed UserAccess instance +// is empty. +func IsEmptyUserAccess(a UserAccess) bool { + return a == UserAccess{} +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/async.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/async.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/async.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/async.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,70 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azure + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/Azure/go-autorest/autorest" +) + +// asyncCreationRespondDecorator returns an autorest.RespondDecorator +// that replaces non-failure provisioning states with "Succeeded", to +// prevent the autorest code from blocking until the resource is completely +// provisioned. +func asyncCreationRespondDecorator(original autorest.RespondDecorator) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + if resp.Body != nil { + if err := overrideProvisioningState(resp); err != nil { + return err + } + } + return original(r).Respond(resp) + }) + } +} + +func overrideProvisioningState(resp *http.Response) error { + var buf bytes.Buffer + if _, err := buf.ReadFrom(resp.Body); err != nil { + return err + } + if err := resp.Body.Close(); err != nil { + return err + } + resp.Body = ioutil.NopCloser(&buf) + + body := make(map[string]interface{}) + if err := json.Unmarshal(buf.Bytes(), &body); err != nil { + // Don't treat failure to decode the body as an error, + // or we may get in the way of response handling. + return nil + } + properties, ok := body["properties"].(map[string]interface{}) + if !ok { + // No properties, nothing to do. + return nil + } + provisioningState, ok := properties["provisioningState"] + if !ok { + // No provisioningState, nothing to do. + return nil + } + + switch provisioningState { + case "Canceled", "Failed", "Succeeded": + // In any of these cases, pass on the body untouched. + default: + properties["provisioningState"] = "Succeeded" + buf.Reset() + if err := json.NewEncoder(&buf).Encode(body); err != nil { + return err + } + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/auth.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/auth.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/auth.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/auth.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,101 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azure + +import ( + "net/http" + "sync" + + "github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/juju/errors" + + "github.com/juju/juju/environs" + "github.com/juju/juju/provider/azure/internal/azureauth" +) + +// cloudSpecAuth is an implementation of autorest.Authorizer. +type cloudSpecAuth struct { + cloud environs.CloudSpec + sender autorest.Sender + mu sync.Mutex + token *azure.ServicePrincipalToken +} + +// WithAuthorization is part of the autorest.Authorizer interface. +func (c *cloudSpecAuth) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return nil, err + } + token, err := c.getToken() + if err != nil { + return nil, err + } + return autorest.CreatePreparer(token.WithAuthorization()).Prepare(r) + }) + } +} + +func (c *cloudSpecAuth) refresh() error { + token, err := c.getToken() + if err != nil { + return err + } + return token.Refresh() +} + +func (c *cloudSpecAuth) getToken() (*azure.ServicePrincipalToken, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.token != nil { + return c.token, nil + } + token, err := AuthToken(c.cloud, c.sender) + if err != nil { + return nil, errors.Trace(err) + } + c.token = token + return c.token, nil +} + +// AuthToken returns a service principal token, suitable for authorizing +// Resource Manager API requests, based on the supplied CloudSpec. +func AuthToken(cloud environs.CloudSpec, sender autorest.Sender) (*azure.ServicePrincipalToken, error) { + if authType := cloud.Credential.AuthType(); authType != clientCredentialsAuthType { + // We currently only support a single auth-type for + // non-interactive authentication. Interactive auth + // is used only to generate a service-principal. + return nil, errors.NotSupportedf("auth-type %q", authType) + } + + credAttrs := cloud.Credential.Attributes() + subscriptionId := credAttrs[credAttrSubscriptionId] + appId := credAttrs[credAttrAppId] + appPassword := credAttrs[credAttrAppPassword] + client := subscriptions.Client{subscriptions.NewWithBaseURI(cloud.Endpoint)} + client.Sender = sender + oauthConfig, _, err := azureauth.OAuthConfig(client, cloud.Endpoint, subscriptionId) + if err != nil { + return nil, errors.Trace(err) + } + + resource := azureauth.TokenResource(cloud.Endpoint) + token, err := azure.NewServicePrincipalToken( + *oauthConfig, + appId, + appPassword, + resource, + ) + if err != nil { + return nil, errors.Annotate(err, "constructing service principal token") + } + if sender != nil { + token.SetSender(sender) + } + return token, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/auth_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/auth_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/auth_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/auth_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,64 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azure_test + +import ( + "net/http" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" + "github.com/juju/juju/provider/azure" + "github.com/juju/juju/provider/azure/internal/azuretesting" +) + +type AuthSuite struct { + testing.IsolationSuite + requests []*http.Request +} + +var _ = gc.Suite(&AuthSuite{}) + +func (s *AuthSuite) TestAuthTokenServicePrincipalSecret(c *gc.C) { + spec := environs.CloudSpec{ + Type: "azure", + Name: "azure", + Region: "westus", + Endpoint: "https://api.azurestack.local", + IdentityEndpoint: "https://graph.azurestack.local", + StorageEndpoint: "https://storage.azurestack.local", + Credential: fakeServicePrincipalCredential(), + } + senders := azuretesting.Senders{ + discoverAuthSender(), + } + token, err := azure.AuthToken(spec, &senders) + c.Assert(err, jc.ErrorIsNil) + c.Assert(token, gc.NotNil) +} + +func (s *AuthSuite) TestAuthTokenInteractive(c *gc.C) { + spec := environs.CloudSpec{ + Type: "azure", + Name: "azure", + Region: "westus", + Endpoint: "https://api.azurestack.local", + IdentityEndpoint: "https://graph.azurestack.local", + StorageEndpoint: "https://storage.azurestack.local", + Credential: fakeInteractiveCredential(), + } + senders := azuretesting.Senders{} + _, err := azure.AuthToken(spec, &senders) + c.Assert(err, gc.ErrorMatches, `auth-type "interactive" not supported`) +} + +func fakeInteractiveCredential() *cloud.Credential { + cred := cloud.NewCredential("interactive", map[string]string{ + "subscription-id": fakeSubscriptionId, + }) + return &cred +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/config.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -39,7 +39,7 @@ type azureModelConfig struct { *config.Config - storageAccountType storage.AccountType + storageAccountType string } var knownStorageAccountTypes = []string{ @@ -121,7 +121,7 @@ azureConfig := &azureModelConfig{ newCfg, - storage.AccountType(storageAccountType), + storageAccountType, } return azureConfig, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/config_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,7 @@ package azure_test import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" + "github.com/Azure/go-autorest/autorest/mocks" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -16,9 +16,7 @@ const ( fakeApplicationId = "00000000-0000-0000-0000-000000000000" - fakeTenantId = "11111111-1111-1111-1111-111111111111" fakeSubscriptionId = "22222222-2222-2222-2222-222222222222" - fakeStorageAccount = "mrblobby" fakeStorageAccountKey = "quay" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/credentials.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,14 @@ package azure import ( + "github.com/Azure/go-autorest/autorest" "github.com/juju/errors" + "github.com/juju/utils" + "github.com/juju/utils/clock" "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" + "github.com/juju/juju/provider/azure/internal/azureauth" ) const ( @@ -14,24 +19,46 @@ credAttrSubscriptionId = "subscription-id" credAttrTenantId = "tenant-id" credAttrAppPassword = "application-password" + + // clientCredentialsAuthType is the auth-type for the + // "client credentials" OAuth flow, which requires a + // service principal with a password. + clientCredentialsAuthType cloud.AuthType = "service-principal-secret" + + // deviceCodeAuthType is the auth-type for the interactive + // "device code" OAuth flow. + deviceCodeAuthType cloud.AuthType = "interactive" ) // environPoviderCredentials is an implementation of // environs.ProviderCredentials for the Azure Resource // Manager cloud provider. -type environProviderCredentials struct{} +type environProviderCredentials struct { + sender autorest.Sender + requestInspector autorest.PrepareDecorator + interactiveCreateServicePrincipal azureauth.InteractiveCreateServicePrincipalFunc +} // CredentialSchemas is part of the environs.ProviderCredentials interface. func (environProviderCredentials) CredentialSchemas() map[cloud.AuthType]cloud.CredentialSchema { return map[cloud.AuthType]cloud.CredentialSchema{ - cloud.UserPassAuthType: { + // deviceCodeAuthType is the interactive device-code oauth + // flow. This is only supported on the client side; it will + // be used to generate a service principal, and transformed + // into clientCredentialsAuthType. + deviceCodeAuthType: {{ + credAttrSubscriptionId, cloud.CredentialAttr{Description: "Azure subscription ID"}, + }}, + + // clientCredentialsAuthType is the "client credentials" + // oauth flow, which requires a service principal with a + // password. + clientCredentialsAuthType: { { credAttrAppId, cloud.CredentialAttr{Description: "Azure Active Directory application ID"}, }, { credAttrSubscriptionId, cloud.CredentialAttr{Description: "Azure subscription ID"}, }, { - credAttrTenantId, cloud.CredentialAttr{Description: "Azure Active Directory tenant ID"}, - }, { credAttrAppPassword, cloud.CredentialAttr{ Description: "Azure Active Directory application password", Hidden: true, @@ -45,3 +72,39 @@ func (environProviderCredentials) DetectCredentials() (*cloud.CloudCredential, error) { return nil, errors.NotFoundf("credentials") } + +// FinalizeCredential is part of the environs.ProviderCredentials interface. +func (c environProviderCredentials) FinalizeCredential( + ctx environs.FinalizeCredentialContext, + args environs.FinalizeCredentialParams, +) (*cloud.Credential, error) { + switch authType := args.Credential.AuthType(); authType { + case deviceCodeAuthType: + subscriptionId := args.Credential.Attributes()[credAttrSubscriptionId] + applicationId, password, err := c.interactiveCreateServicePrincipal( + ctx.GetStderr(), + c.sender, + c.requestInspector, + args.CloudEndpoint, + args.CloudIdentityEndpoint, + subscriptionId, + clock.WallClock, + utils.NewUUID, + ) + if err != nil { + return nil, errors.Trace(err) + } + out := cloud.NewCredential(clientCredentialsAuthType, map[string]string{ + credAttrSubscriptionId: subscriptionId, + credAttrAppId: applicationId, + credAttrAppPassword: password, + }) + out.Label = args.Credential.Label + return &out, nil + + case clientCredentialsAuthType: + return &args.Credential, nil + default: + return nil, errors.NotSupportedf("%q auth-type", authType) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/credentials_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/credentials_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/credentials_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/credentials_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,53 +4,123 @@ package azure_test import ( + "io" + + "github.com/Azure/go-autorest/autorest" "github.com/juju/errors" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" + "github.com/juju/juju/cloud" "github.com/juju/juju/environs" envtesting "github.com/juju/juju/environs/testing" "github.com/juju/juju/provider/azure" - "github.com/juju/juju/testing" + coretesting "github.com/juju/juju/testing" ) type credentialsSuite struct { - testing.BaseSuite + testing.IsolationSuite + interactiveCreateServicePrincipalCreator provider environs.EnvironProvider } var _ = gc.Suite(&credentialsSuite{}) func (s *credentialsSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetUpTest(c) - s.provider = newProvider(c, azure.ProviderConfig{}) + s.IsolationSuite.SetUpTest(c) + s.interactiveCreateServicePrincipalCreator = interactiveCreateServicePrincipalCreator{} + s.provider = newProvider(c, azure.ProviderConfig{ + InteractiveCreateServicePrincipal: s.interactiveCreateServicePrincipalCreator.InteractiveCreateServicePrincipal, + }) } func (s *credentialsSuite) TestCredentialSchemas(c *gc.C) { - envtesting.AssertProviderAuthTypes(c, s.provider, "userpass") + envtesting.AssertProviderAuthTypes(c, s.provider, + "interactive", + "service-principal-secret", + ) } var sampleCredentialAttributes = map[string]string{ "application-id": "application", "application-password": "password", "subscription-id": "subscription", - "tenant-id": "tenant", } -func (s *credentialsSuite) TestUserPassCredentialsValid(c *gc.C) { - envtesting.AssertProviderCredentialsValid(c, s.provider, "userpass", map[string]string{ +func (s *credentialsSuite) TestServicePrincipalSecretCredentialsValid(c *gc.C) { + envtesting.AssertProviderCredentialsValid(c, s.provider, "service-principal-secret", map[string]string{ "application-id": "application", "application-password": "password", "subscription-id": "subscription", - "tenant-id": "tenant", }) } -func (s *credentialsSuite) TestUserPassHiddenAttributes(c *gc.C) { - envtesting.AssertProviderCredentialsAttributesHidden(c, s.provider, "userpass", "application-password") +func (s *credentialsSuite) TestServicePrincipalSecretHiddenAttributes(c *gc.C) { + envtesting.AssertProviderCredentialsAttributesHidden(c, s.provider, "service-principal-secret", "application-password") } func (s *credentialsSuite) TestDetectCredentials(c *gc.C) { _, err := s.provider.DetectCredentials() c.Assert(err, jc.Satisfies, errors.IsNotFound) } + +func (s *credentialsSuite) TestFinalizeCredentialInteractive(c *gc.C) { + in := cloud.NewCredential("interactive", map[string]string{"subscription-id": "subscription"}) + ctx := coretesting.Context(c) + out, err := s.provider.FinalizeCredential(ctx, environs.FinalizeCredentialParams{ + Credential: in, + CloudEndpoint: "https://arm.invalid", + CloudIdentityEndpoint: "https://graph.invalid", + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(out, gc.NotNil) + c.Assert(out.AuthType(), gc.Equals, cloud.AuthType("service-principal-secret")) + c.Assert(out.Attributes(), jc.DeepEquals, map[string]string{ + "application-id": "appid", + "application-password": "service-principal-password", + "subscription-id": "subscription", + }) + + s.interactiveCreateServicePrincipalCreator.CheckCallNames(c, "InteractiveCreateServicePrincipal") + args := s.interactiveCreateServicePrincipalCreator.Calls()[0].Args + c.Assert(args[3], gc.Equals, "https://arm.invalid") + c.Assert(args[4], gc.Equals, "https://graph.invalid") + c.Assert(args[5], gc.Equals, "subscription") +} + +func (s *credentialsSuite) TestFinalizeCredentialInteractiveError(c *gc.C) { + in := cloud.NewCredential("interactive", map[string]string{"subscription-id": "subscription"}) + s.interactiveCreateServicePrincipalCreator.SetErrors(errors.New("blargh")) + ctx := coretesting.Context(c) + _, err := s.provider.FinalizeCredential(ctx, environs.FinalizeCredentialParams{ + Credential: in, + CloudEndpoint: "https://arm.invalid", + CloudIdentityEndpoint: "https://graph.invalid", + }) + c.Assert(err, gc.ErrorMatches, "blargh") +} + +type interactiveCreateServicePrincipalCreator struct { + testing.Stub +} + +func (c *interactiveCreateServicePrincipalCreator) InteractiveCreateServicePrincipal( + stderr io.Writer, + sender autorest.Sender, + requestInspector autorest.PrepareDecorator, + resourceManagerEndpoint string, + graphEndpoint string, + subscriptionId string, + clock clock.Clock, + newUUID func() (utils.UUID, error), +) (appId, password string, _ error) { + c.MethodCall( + c, "InteractiveCreateServicePrincipal", + stderr, sender, requestInspector, resourceManagerEndpoint, + graphEndpoint, subscriptionId, clock, newUUID, + ) + return "appid", "service-principal-password", c.NextErr() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/deployments.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/deployments.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/deployments.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/deployments.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,42 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azure + +import ( + "github.com/Azure/azure-sdk-for-go/arm/resources/resources" + "github.com/Azure/go-autorest/autorest" + "github.com/juju/errors" + + "github.com/juju/juju/provider/azure/internal/armtemplates" +) + +func createDeployment( + callAPI callAPIFunc, + client resources.DeploymentsClient, + resourceGroup string, + deploymentName string, + t armtemplates.Template, +) error { + templateMap, err := t.Map() + if err != nil { + return errors.Trace(err) + } + deployment := resources.Deployment{ + &resources.DeploymentProperties{ + Template: &templateMap, + Mode: resources.Incremental, + }, + } + if err := callAPI(func() (autorest.Response, error) { + return client.CreateOrUpdate( + resourceGroup, + deploymentName, + deployment, + nil, // abort channel + ) + }); err != nil { + return errors.Annotatef(err, "creating deployment %q", deploymentName) + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/environ.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,20 +11,18 @@ "strings" "sync" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/Azure/azure-sdk-for-go/arm/resources" + "github.com/Azure/azure-sdk-for-go/arm/resources/resources" "github.com/Azure/azure-sdk-for-go/arm/storage" azurestorage "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils/arch" "github.com/juju/utils/os" jujuseries "github.com/juju/utils/series" - "github.com/juju/utils/set" "gopkg.in/juju/names.v2" "github.com/juju/juju/cloudconfig/instancecfg" @@ -37,7 +35,10 @@ "github.com/juju/juju/environs/tags" "github.com/juju/juju/instance" jujunetwork "github.com/juju/juju/network" + "github.com/juju/juju/provider/azure/internal/armtemplates" internalazurestorage "github.com/juju/juju/provider/azure/internal/azurestorage" + "github.com/juju/juju/provider/azure/internal/errorutils" + "github.com/juju/juju/provider/azure/internal/tracing" "github.com/juju/juju/provider/common" "github.com/juju/juju/state" "github.com/juju/juju/tools" @@ -49,6 +50,15 @@ // defaultRootDiskSize is the default root disk size to give // to a VM, if none is specified. defaultRootDiskSize = 30 * 1024 // 30 GiB + + // serviceErrorCodeDeploymentCannotBeCancelled is the error code for + // service errors in response to an attempt to cancel a deployment + // that cannot be cancelled. + serviceErrorCodeDeploymentCannotBeCancelled = "DeploymentCannotBeCancelled" + + // controllerAvailabilitySet is the name of the availability set + // used for controller machines. + controllerAvailabilitySet = "juju-controller" ) type azureEnviron struct { @@ -77,19 +87,21 @@ // envName is the name of the environment. envName string - // azure auth token, and management clients - token *azure.ServicePrincipalToken - compute compute.ManagementClient - resources resources.ManagementClient - storage storage.ManagementClient - network network.ManagementClient - storageClient azurestorage.Client - - mu sync.Mutex - config *azureModelConfig - instanceTypes map[string]instances.InstanceType - storageAccount *storage.Account - storageAccountKeys *storage.AccountKeys + // authorizer is the authorizer we use for Azure. + authorizer *cloudSpecAuth + + compute compute.ManagementClient + resources resources.ManagementClient + storage storage.ManagementClient + network network.ManagementClient + storageClient azurestorage.Client + storageAccountName string + + mu sync.Mutex + config *azureModelConfig + instanceTypes map[string]instances.InstanceType + storageAccount *storage.Account + storageAccountKey *storage.AccountKey } var _ environs.Environ = (*azureEnviron)(nil) @@ -121,30 +133,32 @@ if err := env.SetConfig(cfg); err != nil { return nil, errors.Trace(err) } + modelTag := names.NewModelTag(cfg.UUID()) env.resourceGroup = resourceGroupName(modelTag, cfg.Name()) env.envName = cfg.Name() + + // We need a deterministic storage account name, so that we can + // defer creation of the storage account to the VM deployment, + // and retain the ability to create multiple deployments in + // parallel. + // + // We use the last 20 non-hyphen hex characters of the model's + // UUID as the storage account name, prefixed with "juju". The + // probability of clashing with another storage account should + // be negligible. + uuidAlphaNumeric := strings.Replace(env.config.Config.UUID(), "-", "", -1) + env.storageAccountName = "juju" + uuidAlphaNumeric[len(uuidAlphaNumeric)-20:] + return &env, nil } func (env *azureEnviron) initEnviron() error { credAttrs := env.cloud.Credential.Attributes() env.subscriptionId = credAttrs[credAttrSubscriptionId] - tenantId := credAttrs[credAttrTenantId] - appId := credAttrs[credAttrAppId] - appPassword := credAttrs[credAttrAppPassword] - token, err := azure.NewServicePrincipalToken( - appId, - appPassword, - tenantId, - azure.AzureResourceManagerScope, - ) - if err != nil { - return errors.Annotate(err, "constructing service principal token") - } - env.token = token - if env.provider.config.Sender != nil { - env.token.SetSender(env.provider.config.Sender) + env.authorizer = &cloudSpecAuth{ + cloud: env.cloud, + sender: env.provider.config.Sender, } env.compute = compute.NewWithBaseURI(env.cloud.Endpoint, env.subscriptionId) @@ -158,13 +172,13 @@ "azure.network": &env.network.Client, } for id, client := range clients { - client.Authorizer = env.token + client.Authorizer = env.authorizer logger := loggo.GetLogger(id) if env.provider.config.Sender != nil { client.Sender = env.provider.config.Sender } - client.ResponseInspector = tracingRespondDecorator(logger) - client.RequestInspector = tracingPrepareDecorator(logger) + client.ResponseInspector = tracing.RespondDecorator(logger) + client.RequestInspector = tracing.PrepareDecorator(logger) if env.provider.config.RequestInspector != nil { tracer := client.RequestInspector inspector := env.provider.config.RequestInspector @@ -201,7 +215,6 @@ ctx environs.BootstrapContext, args environs.BootstrapParams, ) (*environs.BootstrapResult, error) { - if err := env.initResourceGroup(args.ControllerConfig.ControllerUUID()); err != nil { return nil, errors.Annotate(err, "creating controller resource group") } @@ -216,139 +229,48 @@ return result, nil } -// initResourceGroup creates and initialises a resource group for this -// environment. The resource group will have a storage account, and -// internal network and subnet. +// BootstrapMessage is part of the Environ interface. +func (env *azureEnviron) BootstrapMessage() string { + return "" +} + +// initResourceGroup creates a resource group for this environment. func (env *azureEnviron) initResourceGroup(controllerUUID string) error { location := env.location resourceGroupsClient := resources.GroupsClient{env.resources} - networkClient := env.network - storageAccountsClient := storage.AccountsClient{env.storage} env.mu.Lock() tags := tags.ResourceTags( names.NewModelTag(env.config.Config.UUID()), - names.NewModelTag(controllerUUID), + names.NewControllerTag(controllerUUID), env.config, ) - storageAccountType := env.config.storageAccountType env.mu.Unlock() logger.Debugf("creating resource group %q", env.resourceGroup) - if err := env.callAPI(func() (autorest.Response, error) { + err := env.callAPI(func() (autorest.Response, error) { group, err := resourceGroupsClient.CreateOrUpdate(env.resourceGroup, resources.ResourceGroup{ Location: to.StringPtr(location), - Tags: toTagsPtr(tags), + Tags: to.StringMapPtr(tags), }) return group.Response, err - }); err != nil { - return errors.Annotate(err, "creating resource group") - } - - // Create an internal network for all VMs in the - // resource group to connect to. - vnetPtr, err := createInternalVirtualNetwork( - env.callAPI, networkClient, env.resourceGroup, location, tags, - ) - if err != nil { - return errors.Annotate(err, "creating virtual network") - } - - _, err = createInternalSubnet( - env.callAPI, networkClient, env.resourceGroup, vnetPtr, location, tags, - ) - if err != nil { - return errors.Annotate(err, "creating subnet") - } - - // Create a storage account for the resource group. - if err := createStorageAccount( - env.callAPI, storageAccountsClient, storageAccountType, - env.resourceGroup, location, tags, - env.provider.config.StorageAccountNameGenerator, - ); err != nil { - return errors.Annotate(err, "creating storage account") - } - return nil -} - -func createStorageAccount( - callAPI callAPIFunc, - client storage.AccountsClient, - accountType storage.AccountType, - resourceGroup string, - location string, - tags map[string]string, - accountNameGenerator func() string, -) error { - logger.Debugf("creating storage account (finding available name)") - const maxAttempts = 10 - for remaining := maxAttempts; remaining > 0; remaining-- { - accountName := accountNameGenerator() - logger.Debugf("- checking storage account name %q", accountName) - var result storage.CheckNameAvailabilityResult - if err := callAPI(func() (autorest.Response, error) { - var err error - result, err = client.CheckNameAvailability( - storage.AccountCheckNameAvailabilityParameters{ - Name: to.StringPtr(accountName), - // Azure is a little inconsistent with when Type is - // required. It's required here. - Type: to.StringPtr("Microsoft.Storage/storageAccounts"), - }, - ) - return result.Response, err - }); err != nil { - return errors.Annotate(err, "checking account name availability") - } - if !to.Bool(result.NameAvailable) { - logger.Debugf( - "%q is not available (%v): %v", - accountName, result.Reason, result.Message, - ) - continue - } - createParams := storage.AccountCreateParameters{ - Location: to.StringPtr(location), - Tags: toTagsPtr(tags), - Properties: &storage.AccountPropertiesCreateParameters{ - AccountType: accountType, - }, - } - logger.Debugf("- creating %q storage account %q", accountType, accountName) - // TODO(axw) account creation can fail if the account name is - // available, but contains profanity. We should retry a set - // number of times even if creating fails. - if err := callAPI(func() (autorest.Response, error) { - result, err := client.Create(resourceGroup, accountName, createParams) - return result.Response, err - }); err != nil { - return errors.Trace(err) - } - return nil - } - return errors.New("could not find available storage account name") + }) + return errors.Annotate(err, "creating resource group") } // ControllerInstances is specified in the Environ interface. func (env *azureEnviron) ControllerInstances(controllerUUID string) ([]instance.Id, error) { - // controllers are tagged with tags.JujuIsController, so just - // list the instances in the controller resource group and pick - // those ones out. - instances, err := env.allInstances(env.resourceGroup, true) + instances, err := env.allInstances(env.resourceGroup, false, true) if err != nil { return nil, err } - var ids []instance.Id - for _, inst := range instances { - azureInstance := inst.(*azureInstance) - if toTags(azureInstance.Tags)[tags.JujuIsController] == "true" { - ids = append(ids, inst.Id()) - } - } - if len(ids) == 0 { + if len(instances) == 0 { return nil, environs.ErrNoInstances } + ids := make([]instance.Id, len(instances)) + for i, inst := range instances { + ids[i] = inst.Id() + } return ids, nil } @@ -407,9 +329,8 @@ []string{constraints.InstanceType}, []string{ constraints.Mem, - constraints.CpuCores, + constraints.Cores, constraints.Arch, - constraints.RootDisk, }, ) return validator, nil @@ -447,38 +368,22 @@ return nil, errors.New("missing controller UUID") } - location := env.location - vmClient := compute.VirtualMachinesClient{env.compute} - availabilitySetClient := compute.AvailabilitySetsClient{env.compute} - networkClient := env.network - vmImagesClient := compute.VirtualMachineImagesClient{env.compute} - vmExtensionClient := compute.VirtualMachineExtensionsClient{env.compute} - // Get the required configuration and config-dependent information // required to create the instance. We take the lock just once, to // ensure we obtain all information based on the same configuration. env.mu.Lock() envTags := tags.ResourceTags( names.NewModelTag(env.config.Config.UUID()), - names.NewModelTag(args.ControllerUUID), + names.NewControllerTag(args.ControllerUUID), env.config, ) + storageAccountType := env.config.storageAccountType imageStream := env.config.ImageStream() instanceTypes, err := env.getInstanceTypesLocked() if err != nil { env.mu.Unlock() return nil, errors.Trace(err) } - storageAccount, err := env.getStorageAccountLocked(false) - if err != nil { - env.mu.Unlock() - return nil, errors.Annotate(err, "getting storage account") - } - internalNetworkSubnet, err := env.getInternalSubnetLocked() - if err != nil { - env.mu.Unlock() - return nil, errors.Trace(err) - } env.mu.Unlock() // If the user has not specified a root-disk size, then @@ -492,12 +397,13 @@ } // Identify the instance type and image to provision. + series := args.Tools.OneSeries() instanceSpec, err := findInstanceSpec( - vmImagesClient, + compute.VirtualMachineImagesClient{env.compute}, instanceTypes, &instances.InstanceConstraint{ - Region: location, - Series: args.Tools.OneSeries(), + Region: env.location, + Series: series, Arches: args.Tools.Arches(), Constraints: args.Constraints, }, @@ -513,6 +419,18 @@ instanceSpec.InstanceType.RootDisk = rootDisk } + // Windows images are 127GiB, and cannot be made smaller. + const windowsMinRootDiskMB = 127 * 1024 + seriesOS, err := jujuseries.GetOSFromSeries(series) + if err != nil { + return nil, errors.Trace(err) + } + if seriesOS == os.Windows { + if instanceSpec.InstanceType.RootDisk < windowsMinRootDiskMB { + instanceSpec.InstanceType.RootDisk = windowsMinRootDiskMB + } + } + // Pick tools by filtering the available tools down to the architecture of // the image that will be provisioned. selectedTools, err := args.Tools.Match(tools.Filter{ @@ -544,27 +462,11 @@ // machine with this. vmTags[jujuMachineNameTag] = vmName - // If the machine will run a controller, then we need to open the - // API port for it. - var apiPortPtr *int - if args.InstanceConfig.Controller != nil { - apiPort := args.InstanceConfig.Controller.Config.APIPort() - apiPortPtr = &apiPort - } - - vm, err := createVirtualMachine( - env.resourceGroup, location, vmName, - vmTags, envTags, + if err := env.createVirtualMachine( + vmName, vmTags, envTags, instanceSpec, args.InstanceConfig, - args.DistributionGroup, - env.Instances, - apiPortPtr, internalNetworkSubnet, - storageAccount, - networkClient, vmClient, - availabilitySetClient, vmExtensionClient, - env.callAPI, - ) - if err != nil { + storageAccountType, + ); err != nil { logger.Errorf("creating instance failed, destroying: %v", err) if err := env.StopInstances(instance.Id(vmName)); err != nil { logger.Errorf("could not destroy failed virtual machine: %v", err) @@ -575,7 +477,7 @@ // Note: the instance is initialised without addresses to keep the // API chatter down. We will refresh the instance if we need to know // the addresses. - inst := &azureInstance{vm, env, nil, nil} + inst := &azureInstance{vmName, "Creating", env, nil, nil} amd64 := arch.AMD64 hc := &instance.HardwareCharacteristics{ Arch: &amd64, @@ -593,57 +495,151 @@ // // All resources created are tagged with the specified "vmTags", so if // this function fails then all resources can be deleted by tag. -func createVirtualMachine( - resourceGroup, location, vmName string, +func (env *azureEnviron) createVirtualMachine( + vmName string, vmTags, envTags map[string]string, instanceSpec *instances.InstanceSpec, instanceConfig *instancecfg.InstanceConfig, - distributionGroupFunc func() ([]instance.Id, error), - instancesFunc func([]instance.Id) ([]instance.Instance, error), - apiPort *int, - internalNetworkSubnet *network.Subnet, - storageAccount *storage.Account, - networkClient network.ManagementClient, - vmClient compute.VirtualMachinesClient, - availabilitySetClient compute.AvailabilitySetsClient, - vmExtensionClient compute.VirtualMachineExtensionsClient, - callAPI callAPIFunc, -) (compute.VirtualMachine, error) { + storageAccountType string, +) error { + + deploymentsClient := resources.DeploymentsClient{env.resources} - storageProfile, err := newStorageProfile( - vmName, instanceSpec, storageAccount, + var apiPort int + if instanceConfig.Controller != nil { + apiPortValue := instanceConfig.Controller.Config.APIPort() + apiPort = apiPortValue + } else { + apiPorts := instanceConfig.APIInfo.Ports() + if len(apiPorts) != 1 { + return errors.Errorf("expected one API port, found %v", apiPorts) + } + apiPort = apiPorts[0] + } + resources := networkTemplateResources(env.location, envTags, apiPort) + resources = append(resources, storageAccountTemplateResource( + env.location, envTags, + env.storageAccountName, storageAccountType, + )) + + osProfile, seriesOS, err := newOSProfile( + vmName, instanceConfig, + env.provider.config.RandomWindowsAdminPassword, ) if err != nil { - return compute.VirtualMachine{}, errors.Annotate(err, "creating storage profile") + return errors.Annotate(err, "creating OS profile") } - - osProfile, seriesOS, err := newOSProfile(vmName, instanceConfig) + storageProfile, err := newStorageProfile(vmName, env.storageAccountName, instanceSpec) if err != nil { - return compute.VirtualMachine{}, errors.Annotate(err, "creating OS profile") + return errors.Annotate(err, "creating storage profile") } - networkProfile, err := newNetworkProfile( - callAPI, networkClient, - vmName, apiPort, internalNetworkSubnet, - resourceGroup, location, vmTags, + var vmDependsOn []string + var availabilitySetSubResource *compute.SubResource + availabilitySetName, err := availabilitySetName( + vmName, vmTags, instanceConfig.Controller != nil, ) if err != nil { - return compute.VirtualMachine{}, errors.Annotate(err, "creating network profile") + return errors.Annotate(err, "getting availability set name") + } + if availabilitySetName != "" { + availabilitySetId := fmt.Sprintf( + `[resourceId('Microsoft.Compute/availabilitySets','%s')]`, + availabilitySetName, + ) + resources = append(resources, armtemplates.Resource{ + APIVersion: compute.APIVersion, + Type: "Microsoft.Compute/availabilitySets", + Name: availabilitySetName, + Location: env.location, + Tags: envTags, + }) + availabilitySetSubResource = &compute.SubResource{ + ID: to.StringPtr(availabilitySetId), + } + vmDependsOn = append(vmDependsOn, availabilitySetId) } - availabilitySetId, err := createAvailabilitySet( - callAPI, availabilitySetClient, - vmName, resourceGroup, location, - vmTags, envTags, - distributionGroupFunc, instancesFunc, + publicIPAddressName := vmName + "-public-ip" + publicIPAddressId := fmt.Sprintf(`[resourceId('Microsoft.Network/publicIPAddresses', '%s')]`, publicIPAddressName) + resources = append(resources, armtemplates.Resource{ + APIVersion: network.APIVersion, + Type: "Microsoft.Network/publicIPAddresses", + Name: publicIPAddressName, + Location: env.location, + Tags: vmTags, + Properties: &network.PublicIPAddressPropertiesFormat{ + PublicIPAllocationMethod: network.Dynamic, + }, + }) + + // Controller and non-controller machines are assigned to separate + // subnets. This enables us to create controller-specific NSG rules + // just by targeting the controller subnet. + subnetName := internalSubnetName + subnetPrefix := internalSubnetPrefix + if instanceConfig.Controller != nil { + subnetName = controllerSubnetName + subnetPrefix = controllerSubnetPrefix + } + subnetId := fmt.Sprintf( + `[concat(resourceId('Microsoft.Network/virtualNetworks', '%s'), '/subnets/%s')]`, + internalNetworkName, subnetName, ) + + privateIP, err := machineSubnetIP(subnetPrefix, instanceConfig.MachineId) if err != nil { - return compute.VirtualMachine{}, errors.Annotate(err, "creating availability set") + return errors.Annotatef(err, "computing private IP address") } + nicName := vmName + "-primary" + nicId := fmt.Sprintf(`[resourceId('Microsoft.Network/networkInterfaces', '%s')]`, nicName) + ipConfigurations := []network.InterfaceIPConfiguration{{ + Name: to.StringPtr("primary"), + Properties: &network.InterfaceIPConfigurationPropertiesFormat{ + Primary: to.BoolPtr(true), + PrivateIPAddress: to.StringPtr(privateIP.String()), + PrivateIPAllocationMethod: network.Static, + Subnet: &network.Subnet{ID: to.StringPtr(subnetId)}, + PublicIPAddress: &network.PublicIPAddress{ + ID: to.StringPtr(publicIPAddressId), + }, + }, + }} + resources = append(resources, armtemplates.Resource{ + APIVersion: network.APIVersion, + Type: "Microsoft.Network/networkInterfaces", + Name: nicName, + Location: env.location, + Tags: vmTags, + Properties: &network.InterfacePropertiesFormat{ + IPConfigurations: &ipConfigurations, + }, + DependsOn: []string{ + publicIPAddressId, + fmt.Sprintf( + `[resourceId('Microsoft.Network/virtualNetworks', '%s')]`, + internalNetworkName, + ), + }, + }) - vmArgs := compute.VirtualMachine{ - Location: to.StringPtr(location), - Tags: toTagsPtr(vmTags), + nics := []compute.NetworkInterfaceReference{{ + ID: to.StringPtr(nicId), + Properties: &compute.NetworkInterfaceReferenceProperties{ + Primary: to.BoolPtr(true), + }, + }} + vmDependsOn = append(vmDependsOn, nicId) + vmDependsOn = append(vmDependsOn, fmt.Sprintf( + `[resourceId('Microsoft.Storage/storageAccounts', '%s')]`, + env.storageAccountName, + )) + resources = append(resources, armtemplates.Resource{ + APIVersion: compute.APIVersion, + Type: "Microsoft.Compute/virtualMachines", + Name: vmName, + Location: env.location, + Tags: vmTags, Properties: &compute.VirtualMachineProperties{ HardwareProfile: &compute.HardwareProfile{ VMSize: compute.VirtualMachineSizeTypes( @@ -652,95 +648,78 @@ }, StorageProfile: storageProfile, OsProfile: osProfile, - NetworkProfile: networkProfile, - AvailabilitySet: &compute.SubResource{ - ID: to.StringPtr(availabilitySetId), + NetworkProfile: &compute.NetworkProfile{ + &nics, }, + AvailabilitySet: availabilitySetSubResource, }, - } - var vm compute.VirtualMachine - if err := callAPI(func() (autorest.Response, error) { - var err error - vm, err = vmClient.CreateOrUpdate(resourceGroup, vmName, vmArgs) - return vm.Response, err - }); err != nil { - return compute.VirtualMachine{}, errors.Annotate(err, "creating virtual machine") - } + DependsOn: vmDependsOn, + }) // On Windows and CentOS, we must add the CustomScript VM // extension to run the CustomData script. switch seriesOS { case os.Windows, os.CentOS: - if err := createVMExtension( - callAPI, vmExtensionClient, seriesOS, - resourceGroup, vmName, location, vmTags, - ); err != nil { - return compute.VirtualMachine{}, errors.Annotate( + properties, err := vmExtensionProperties(seriesOS) + if err != nil { + return errors.Annotate( err, "creating virtual machine extension", ) } + resources = append(resources, armtemplates.Resource{ + APIVersion: compute.APIVersion, + Type: "Microsoft.Compute/virtualMachines/extensions", + Name: vmName + "/" + extensionName, + Location: env.location, + Tags: vmTags, + Properties: properties, + DependsOn: []string{"Microsoft.Compute/virtualMachines/" + vmName}, + }) + } + + logger.Debugf("- creating virtual machine deployment") + template := armtemplates.Template{Resources: resources} + // NOTE(axw) VMs take a long time to go to "Succeeded", so we do not + // block waiting for them to be fully provisioned. This means we won't + // return an error from StartInstance if the VM fails provisioning; + // we will instead report the error via the instance's status. + deploymentsClient.ResponseInspector = asyncCreationRespondDecorator( + deploymentsClient.ResponseInspector, + ) + if err := createDeployment( + env.callAPI, + deploymentsClient, + env.resourceGroup, + vmName, // deployment name + template, + ); err != nil { + return errors.Trace(err) } - return vm, nil + return nil } // createAvailabilitySet creates the availability set for a machine to use // if it doesn't already exist, and returns the availability set's ID. The // algorithm used for choosing the availability set is: -// - if there is a distribution group, use the same availability set as -// the instances in that group. Instances in the group may be in -// different availability sets (when multiple services colocated on a -// machine), so we pick one arbitrarily -// - if there is no distribution group, create an availability name with -// a name based on the value of the tags.JujuUnitsDeployed tag in vmTags, -// if it exists -// - if there are no units assigned to the machine, then use the "juju" -// availability set -func createAvailabilitySet( - callAPI callAPIFunc, - client compute.AvailabilitySetsClient, - vmName, resourceGroup, location string, - vmTags, envTags map[string]string, - distributionGroupFunc func() ([]instance.Id, error), - instancesFunc func([]instance.Id) ([]instance.Instance, error), +// - if the machine is a controller, use the availability set name +// "juju-controller"; +// - if the machine has units assigned, create an availability +// name with a name based on the value of the tags.JujuUnitsDeployed tag +// in vmTags, if it exists; +// - otherwise, do not assign the machine to an availability set +func availabilitySetName( + vmName string, + vmTags map[string]string, + controller bool, ) (string, error) { logger.Debugf("selecting availability set for %q", vmName) - - // First we check if there's a distribution group, and if so, - // use the availability set of the first instance we find in it. - var instanceIds []instance.Id - if distributionGroupFunc != nil { - var err error - instanceIds, err = distributionGroupFunc() - if err != nil { - return "", errors.Annotate( - err, "querying distribution group", - ) - } - } - instances, err := instancesFunc(instanceIds) - switch err { - case nil, environs.ErrPartialInstances, environs.ErrNoInstances: - default: - return "", errors.Annotate( - err, "querying distribution group instances", - ) - } - for _, instance := range instances { - if instance == nil { - continue - } - instance := instance.(*azureInstance) - availabilitySetSubResource := instance.Properties.AvailabilitySet - if availabilitySetSubResource == nil || availabilitySetSubResource.ID == nil { - continue - } - logger.Debugf("- selecting availability set of %q", instance.Name) - return to.String(availabilitySetSubResource.ID), nil + if controller { + return controllerAvailabilitySet, nil } // We'll have to create an availability set. Use the name of one of the // services assigned to the machine. - availabilitySetName := "juju" + var availabilitySetName string if unitNames, ok := vmTags[tags.JujuUnitsDeployed]; ok { for _, unitName := range strings.Fields(unitNames) { if !names.IsValidUnit(unitName) { @@ -748,42 +727,21 @@ } serviceName, err := names.UnitApplication(unitName) if err != nil { - return "", errors.Annotate( - err, "getting service name", - ) + return "", errors.Annotate(err, "getting service name") } availabilitySetName = serviceName break } } - - logger.Debugf("- creating availability set %q", availabilitySetName) - var availabilitySet compute.AvailabilitySet - if err := callAPI(func() (autorest.Response, error) { - var err error - availabilitySet, err = client.CreateOrUpdate( - resourceGroup, availabilitySetName, compute.AvailabilitySet{ - Location: to.StringPtr(location), - // NOTE(axw) we do *not* want to use vmTags here, - // because an availability set is shared by machines. - Tags: toTagsPtr(envTags), - }, - ) - return availabilitySet.Response, err - }); err != nil { - return "", errors.Annotatef( - err, "creating availability set %q", availabilitySetName, - ) - } - return to.String(availabilitySet.ID), nil + return availabilitySetName, nil } // newStorageProfile creates the storage profile for a virtual machine, // based on the series and chosen instance spec. func newStorageProfile( vmName string, + storageAccountName string, instanceSpec *instances.InstanceSpec, - storageAccount *storage.Account, ) (*compute.StorageProfile, error) { logger.Debugf("creating storage profile for %q", vmName) @@ -796,19 +754,22 @@ sku := urnParts[2] version := urnParts[3] - osDisksRoot := osDiskVhdRoot(storageAccount) + osDisksRoot := fmt.Sprintf( + `reference(resourceId('Microsoft.Storage/storageAccounts', '%s'), '%s').primaryEndpoints.blob`, + storageAccountName, storage.APIVersion, + ) osDiskName := vmName + osDiskURI := fmt.Sprintf( + `[concat(%s, '%s/%s%s')]`, + osDisksRoot, osDiskVHDContainer, osDiskName, vhdExtension, + ) osDiskSizeGB := mibToGB(instanceSpec.InstanceType.RootDisk) osDisk := &compute.OSDisk{ Name: to.StringPtr(osDiskName), CreateOption: compute.FromImage, Caching: compute.ReadWrite, - Vhd: &compute.VirtualHardDisk{ - URI: to.StringPtr( - osDisksRoot + osDiskName + vhdExtension, - ), - }, - DiskSizeGB: to.IntPtr(int(osDiskSizeGB)), + Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(osDiskURI)}, + DiskSizeGB: to.Int32Ptr(int32(osDiskSizeGB)), } return &compute.StorageProfile{ ImageReference: &compute.ImageReference{ @@ -826,7 +787,11 @@ return uint64(b / (1000 * 1000 * 1000)) } -func newOSProfile(vmName string, instanceConfig *instancecfg.InstanceConfig) (*compute.OSProfile, os.OSType, error) { +func newOSProfile( + vmName string, + instanceConfig *instancecfg.InstanceConfig, + randomAdminPassword func() string, +) (*compute.OSProfile, os.OSType, error) { logger.Debugf("creating OS profile for %q", vmName) customData, err := providerinit.ComposeUserData(instanceConfig, nil, AzureRenderer{}) @@ -844,7 +809,7 @@ return nil, os.Unknown, errors.Trace(err) } switch seriesOS { - case os.Ubuntu, os.CentOS, os.Arch: + case os.Ubuntu, os.CentOS: // SSH keys are handled by custom data, but must also be // specified in order to forego providing a password, and // disable password authentication. @@ -876,152 +841,194 @@ // StopInstances is specified in the InstanceBroker interface. func (env *azureEnviron) StopInstances(ids ...instance.Id) error { - computeClient := env.compute - networkClient := env.network + if len(ids) == 0 { + return nil + } - // Query the instances, so we can inspect the VirtualMachines - // and delete related resources. - instances, err := env.Instances(ids) - switch err { - case environs.ErrNoInstances: + // First up, cancel the deployments. Then we can identify the resources + // that need to be deleted without racing with their creation. + var wg sync.WaitGroup + var existing int + cancelResults := make([]error, len(ids)) + for i, id := range ids { + logger.Debugf("canceling deployment for instance %q", id) + wg.Add(1) + go func(i int, id instance.Id) { + defer wg.Done() + cancelResults[i] = errors.Annotatef( + env.cancelDeployment(string(id)), + "canceling deployment %q", id, + ) + }(i, id) + } + wg.Wait() + for _, err := range cancelResults { + if err == nil { + existing++ + } else if !errors.IsNotFound(err) { + return err + } + } + if existing == 0 { + // None of the instances exist, so we can stop now. return nil - default: + } + + maybeStorageClient, err := env.getStorageClient() + if errors.IsNotFound(err) { + // It is possible, if unlikely, that the first deployment for a + // hosted model will fail or be canceled before the model's + // storage account is created. We must therefore cater for the + // account being missing or incomplete here. + maybeStorageClient = nil + } else if err != nil { return errors.Trace(err) - case nil, environs.ErrPartialInstances: - // handled below - break } - storageClient, err := env.getStorageClient() + // List network interfaces and public IP addresses. + instanceNics, err := instanceNetworkInterfaces( + env.callAPI, env.resourceGroup, + network.InterfacesClient{env.network}, + ) + if err != nil { + return errors.Trace(err) + } + instancePips, err := instancePublicIPAddresses( + env.callAPI, env.resourceGroup, + network.PublicIPAddressesClient{env.network}, + ) if err != nil { return errors.Trace(err) } - for _, inst := range instances { - if inst == nil { + // Delete the deployments, virtual machines, and related resources. + deleteResults := make([]error, len(ids)) + for i, id := range ids { + if errors.IsNotFound(cancelResults[i]) { continue } - if err := deleteInstance( - inst.(*azureInstance), - env.callAPI, computeClient, networkClient, storageClient, - ); err != nil { - return errors.Annotatef(err, "deleting instance %q", inst.Id()) + // The deployment does not exist, so there's nothing more to do. + logger.Debugf("deleting instance %q", id) + wg.Add(1) + go func(i int, id instance.Id) { + defer wg.Done() + err := env.deleteVirtualMachine( + id, + maybeStorageClient, + instanceNics[id], + instancePips[id], + ) + deleteResults[i] = errors.Annotatef( + err, "deleting instance %q", id, + ) + }(i, id) + } + wg.Wait() + for _, err := range deleteResults { + if err != nil && !errors.IsNotFound(err) { + return errors.Trace(err) } } + return nil } -// deleteInstances deletes a virtual machine and all of the resources that -// it owns, and any corresponding network security rules. -func deleteInstance( - inst *azureInstance, - callAPI callAPIFunc, - computeClient compute.ManagementClient, - networkClient network.ManagementClient, - storageClient internalazurestorage.Client, -) error { - vmName := string(inst.Id()) - vmClient := compute.VirtualMachinesClient{computeClient} - nicClient := network.InterfacesClient{networkClient} - nsgClient := network.SecurityGroupsClient{networkClient} - securityRuleClient := network.SecurityRulesClient{networkClient} - publicIPClient := network.PublicIPAddressesClient{networkClient} - logger.Debugf("deleting instance %q", vmName) - - logger.Debugf("- deleting virtual machine") - var deleteResult autorest.Response - if err := callAPI(func() (autorest.Response, error) { +// cancelDeployment cancels a template deployment. +func (env *azureEnviron) cancelDeployment(name string) error { + deploymentsClient := resources.DeploymentsClient{env.resources} + logger.Debugf("- canceling deployment %q", name) + var cancelResult autorest.Response + if err := env.callAPI(func() (autorest.Response, error) { var err error - deleteResult, err = vmClient.Delete(inst.env.resourceGroup, vmName) - return deleteResult, err + cancelResult, err = deploymentsClient.Cancel(env.resourceGroup, name) + return cancelResult, err }); err != nil { - if deleteResult.Response == nil || deleteResult.StatusCode != http.StatusNotFound { + if cancelResult.Response != nil { + switch cancelResult.StatusCode { + case http.StatusNotFound: + return errors.NewNotFound(err, fmt.Sprintf("deployment %q not found", name)) + case http.StatusConflict: + if err, ok := errorutils.ServiceError(err); ok { + if err.Code == serviceErrorCodeDeploymentCannotBeCancelled { + // Deployments can only canceled while they're running. + return nil + } + } + } + } + return errors.Annotatef(err, "canceling deployment %q", name) + } + return nil +} + +// deleteVirtualMachine deletes a virtual machine and all of the resources that +// it owns, and any corresponding network security rules. +func (env *azureEnviron) deleteVirtualMachine( + instId instance.Id, + maybeStorageClient internalazurestorage.Client, + networkInterfaces []network.Interface, + publicIPAddresses []network.PublicIPAddress, +) error { + vmClient := compute.VirtualMachinesClient{env.compute} + nicClient := network.InterfacesClient{env.network} + nsgClient := network.SecurityGroupsClient{env.network} + securityRuleClient := network.SecurityRulesClient{env.network} + pipClient := network.PublicIPAddressesClient{env.network} + deploymentsClient := resources.DeploymentsClient{env.resources} + vmName := string(instId) + + logger.Debugf("- deleting virtual machine (%s)", vmName) + if err := deleteResource(env.callAPI, vmClient, env.resourceGroup, vmName); err != nil { + if !errors.IsNotFound(err) { return errors.Annotate(err, "deleting virtual machine") } } - // Delete the VM's OS disk VHD. - logger.Debugf("- deleting OS VHD") - blobClient := storageClient.GetBlobService() - if _, err := blobClient.DeleteBlobIfExists(osDiskVHDContainer, vmName); err != nil { - return errors.Annotate(err, "deleting OS VHD") + if maybeStorageClient != nil { + logger.Debugf("- deleting OS VHD (%s)", vmName) + blobClient := maybeStorageClient.GetBlobService() + if _, err := blobClient.DeleteBlobIfExists(osDiskVHDContainer, vmName, nil); err != nil { + return errors.Annotate(err, "deleting OS VHD") + } } - // Delete network security rules that refer to the VM. - logger.Debugf("- deleting security rules") + logger.Debugf("- deleting security rules (%s)", vmName) if err := deleteInstanceNetworkSecurityRules( - inst.env.resourceGroup, inst.Id(), nsgClient, - securityRuleClient, inst.env.callAPI, + env.resourceGroup, instId, nsgClient, + securityRuleClient, env.callAPI, ); err != nil { return errors.Annotate(err, "deleting network security rules") } - // Detach public IPs from NICs. This must be done before public - // IPs can be deleted. In the future, VMs may not necessarily - // have a public IP, so we don't use the presence of a public - // IP to indicate the existence of an instance. - logger.Debugf("- detaching public IP addresses") - for _, nic := range inst.networkInterfaces { - if nic.Properties.IPConfigurations == nil { - continue - } - var detached bool - for i, ipConfiguration := range *nic.Properties.IPConfigurations { - if ipConfiguration.Properties.PublicIPAddress == nil { - continue - } - ipConfiguration.Properties.PublicIPAddress = nil - (*nic.Properties.IPConfigurations)[i] = ipConfiguration - detached = true - } - if detached { - if err := callAPI(func() (autorest.Response, error) { - result, err := nicClient.CreateOrUpdate( - inst.env.resourceGroup, to.String(nic.Name), nic, - ) - return result.Response, err - }); err != nil { - return errors.Annotate(err, "detaching public IP addresses") + logger.Debugf("- deleting network interfaces (%s)", vmName) + for _, nic := range networkInterfaces { + nicName := to.String(nic.Name) + logger.Tracef("deleting NIC %q", nicName) + if err := deleteResource(env.callAPI, nicClient, env.resourceGroup, nicName); err != nil { + if !errors.IsNotFound(err) { + return errors.Annotate(err, "deleting NIC") } } } - // Delete public IPs. - logger.Debugf("- deleting public IPs") - for _, pip := range inst.publicIPAddresses { + logger.Debugf("- deleting public IPs (%s)", vmName) + for _, pip := range publicIPAddresses { pipName := to.String(pip.Name) logger.Tracef("deleting public IP %q", pipName) - var result autorest.Response - if err := callAPI(func() (autorest.Response, error) { - var err error - result, err = publicIPClient.Delete(inst.env.resourceGroup, pipName) - return result, err - }); err != nil { - if result.Response == nil || result.StatusCode != http.StatusNotFound { + if err := deleteResource(env.callAPI, pipClient, env.resourceGroup, pipName); err != nil { + if !errors.IsNotFound(err) { return errors.Annotate(err, "deleting public IP") } } } - // Delete NICs. - // - // NOTE(axw) this *must* be deleted last, or we risk leaking resources. - logger.Debugf("- deleting network interfaces") - for _, nic := range inst.networkInterfaces { - nicName := to.String(nic.Name) - logger.Tracef("deleting NIC %q", nicName) - var result autorest.Response - if err := callAPI(func() (autorest.Response, error) { - var err error - result, err = nicClient.Delete(inst.env.resourceGroup, nicName) - return result, err - }); err != nil { - if result.Response == nil || result.StatusCode != http.StatusNotFound { - return errors.Annotate(err, "deleting NIC") - } + // The deployment must be deleted last, or we risk leaking resources. + logger.Debugf("- deleting deployment (%s)", vmName) + if err := deleteResource(env.callAPI, deploymentsClient, env.resourceGroup, vmName); err != nil { + if !errors.IsNotFound(err) { + return errors.Annotate(err, "deleting deployment") } } - return nil } @@ -1038,7 +1045,7 @@ if len(ids) == 0 { return nil, nil } - all, err := env.allInstances(resourceGroup, refreshAddresses) + all, err := env.allInstances(resourceGroup, refreshAddresses, false) if err != nil { return nil, errors.Trace(err) } @@ -1066,7 +1073,7 @@ // AllInstances is specified in the InstanceBroker interface. func (env *azureEnviron) AllInstances() ([]instance.Instance, error) { - return env.allInstances(env.resourceGroup, true /* refresh addresses */) + return env.allInstances(env.resourceGroup, true /* refresh addresses */, false /* all instances */) } // allInstances returns all of the instances in the given resource group, @@ -1074,82 +1081,52 @@ func (env *azureEnviron) allInstances( resourceGroup string, refreshAddresses bool, + controllerOnly bool, ) ([]instance.Instance, error) { - vmClient := compute.VirtualMachinesClient{env.compute} - nicClient := network.InterfacesClient{env.network} - pipClient := network.PublicIPAddressesClient{env.network} - - // Due to how deleting instances works, we have to get creative about - // listing instances. We list NICs and return an instance for each - // unique value of the jujuMachineNameTag tag. - // - // The machine provisioner will call AllInstances so it can delete - // unknown instances. StopInstances must delete VMs before NICs and - // public IPs, because a VM cannot have less than 1 NIC. Thus, we can - // potentially delete a VM but then fail to delete its NIC. - var nicsResult network.InterfaceListResult + deploymentsClient := resources.DeploymentsClient{env.resources} + var deploymentsResult resources.DeploymentListResult if err := env.callAPI(func() (autorest.Response, error) { var err error - nicsResult, err = nicClient.List(resourceGroup) - return nicsResult.Response, err + deploymentsResult, err = deploymentsClient.List(resourceGroup, "", nil) + return deploymentsResult.Response, err }); err != nil { - if nicsResult.Response.Response != nil && nicsResult.StatusCode == http.StatusNotFound { + if deploymentsResult.Response.Response != nil && deploymentsResult.StatusCode == http.StatusNotFound { // This will occur if the resource group does not // exist, e.g. in a fresh hosted environment. return nil, nil } return nil, errors.Trace(err) } - if nicsResult.Value == nil || len(*nicsResult.Value) == 0 { + if deploymentsResult.Value == nil || len(*deploymentsResult.Value) == 0 { return nil, nil } - // Create an azureInstance for each VM. - var result compute.VirtualMachineListResult - if err := env.callAPI(func() (autorest.Response, error) { - var err error - result, err = vmClient.List(resourceGroup) - return result.Response, err - }); err != nil { - return nil, errors.Annotate(err, "listing virtual machines") - } - vmNames := make(set.Strings) - var azureInstances []*azureInstance - if result.Value != nil { - azureInstances = make([]*azureInstance, len(*result.Value)) - for i, vm := range *result.Value { - inst := &azureInstance{vm, env, nil, nil} - azureInstances[i] = inst - vmNames.Add(to.String(vm.Name)) - } - } - - // Create additional azureInstances for NICs without machines. See - // comments above for rationale. This needs to happen before calling - // setInstanceAddresses, so we still associate the NICs/PIPs. - for _, nic := range *nicsResult.Value { - vmName, ok := toTags(nic.Tags)[jujuMachineNameTag] - if !ok || vmNames.Contains(vmName) { + azureInstances := make([]*azureInstance, 0, len(*deploymentsResult.Value)) + for _, deployment := range *deploymentsResult.Value { + name := to.String(deployment.Name) + if deployment.Properties == nil || deployment.Properties.Dependencies == nil { continue } - vm := compute.VirtualMachine{ - Name: to.StringPtr(vmName), - Properties: &compute.VirtualMachineProperties{ - ProvisioningState: to.StringPtr("Partially Deleted"), - }, + if controllerOnly && !isControllerDeployment(deployment) { + continue } - inst := &azureInstance{vm, env, nil, nil} + provisioningState := to.String(deployment.Properties.ProvisioningState) + inst := &azureInstance{name, provisioningState, env, nil, nil} azureInstances = append(azureInstances, inst) - vmNames.Add(to.String(vm.Name)) } if len(azureInstances) > 0 && refreshAddresses { if err := setInstanceAddresses( - pipClient, resourceGroup, azureInstances, nicsResult, + env.callAPI, + resourceGroup, + network.InterfacesClient{env.network}, + network.PublicIPAddressesClient{env.network}, + azureInstances, ); err != nil { return nil, errors.Trace(err) } } + instances := make([]instance.Instance, len(azureInstances)) for i, inst := range azureInstances { instances[i] = inst @@ -1157,6 +1134,26 @@ return instances, nil } +func isControllerDeployment(deployment resources.DeploymentExtended) bool { + for _, d := range *deployment.Properties.Dependencies { + if d.DependsOn == nil { + continue + } + if to.String(d.ResourceType) != "Microsoft.Compute/virtualMachines" { + continue + } + for _, on := range *d.DependsOn { + if to.String(on.ResourceType) != "Microsoft.Compute/availabilitySets" { + continue + } + if to.String(on.ResourceName) == controllerAvailabilitySet { + return true + } + } + } + return false +} + // Destroy is specified in the Environ interface. func (env *azureEnviron) Destroy() error { logger.Debugf("destroying model %q", env.envName) @@ -1246,7 +1243,7 @@ var result autorest.Response if err := env.callAPI(func() (autorest.Response, error) { var err error - result, err = client.Delete(resourceGroup) + result, err = client.Delete(resourceGroup, nil) return result, err }); err != nil { if result.Response == nil || result.StatusCode != http.StatusNotFound { @@ -1339,22 +1336,6 @@ return instanceTypes, nil } -// getInternalSubnetLocked queries the internal subnet for the environment. -func (env *azureEnviron) getInternalSubnetLocked() (*network.Subnet, error) { - client := network.SubnetsClient{env.network} - vnetName := internalNetworkName - subnetName := internalSubnetName - var subnet network.Subnet - if err := env.callAPI(func() (autorest.Response, error) { - var err error - subnet, err = client.Get(env.resourceGroup, vnetName, subnetName) - return subnet.Response, err - }); err != nil { - return nil, errors.Annotate(err, "getting internal subnet") - } - return &subnet, nil -} - // getStorageClient queries the storage account key, and uses it to construct // a new storage client. func (env *azureEnviron) getStorageClient() (internalazurestorage.Client, error) { @@ -1364,17 +1345,17 @@ if err != nil { return nil, errors.Annotate(err, "getting storage account") } - storageAccountKeys, err := env.getStorageAccountKeysLocked( + storageAccountKey, err := env.getStorageAccountKeyLocked( to.String(storageAccount.Name), false, ) if err != nil { - return nil, errors.Annotate(err, "getting storage account keys") + return nil, errors.Annotate(err, "getting storage account key") } client, err := getStorageClient( env.provider.config.NewStorageClient, env.storageEndpoint, storageAccount, - storageAccountKeys, + storageAccountKey, ) if err != nil { return nil, errors.Annotate(err, "getting storage client") @@ -1395,51 +1376,40 @@ return env.storageAccount, nil } client := storage.AccountsClient{env.storage} - var result storage.AccountListResult + var account storage.Account if err := env.callAPI(func() (autorest.Response, error) { var err error - result, err = client.List() - return result.Response, err + account, err = client.GetProperties(env.resourceGroup, env.storageAccountName) + return account.Response, err }); err != nil { - return nil, errors.Annotate(err, "listing storage accounts") - } - if result.Value == nil || len(*result.Value) == 0 { - return nil, errors.NotFoundf("storage account") - } - for _, account := range *result.Value { - if toTags(account.Tags)[tags.JujuModel] != env.config.UUID() { - continue + if account.Response.Response != nil && account.Response.StatusCode == http.StatusNotFound { + return nil, errors.NewNotFound(err, fmt.Sprintf("storage account not found")) } - env.storageAccount = &account - return &account, nil + return nil, errors.Annotate(err, "getting storage account") } - return nil, errors.NotFoundf("storage account") -} - -// getStorageAccountKeys returns the storage account keys for this -// environment's storage account. If refresh is true, cached keys -// will be refreshed. -func (env *azureEnviron) getStorageAccountKeys(accountName string, refresh bool) (*storage.AccountKeys, error) { - env.mu.Lock() - defer env.mu.Unlock() - return env.getStorageAccountKeysLocked(accountName, refresh) + env.storageAccount = &account + return env.storageAccount, nil } -func (env *azureEnviron) getStorageAccountKeysLocked(accountName string, refresh bool) (*storage.AccountKeys, error) { - if !refresh && env.storageAccountKeys != nil { - return env.storageAccountKeys, nil +// getStorageAccountKeysLocked returns a storage account key for this +// environment's storage account. If refresh is true, any cached key +// will be refreshed. This method assumes that env.mu is held. +func (env *azureEnviron) getStorageAccountKeyLocked(accountName string, refresh bool) (*storage.AccountKey, error) { + if !refresh && env.storageAccountKey != nil { + return env.storageAccountKey, nil } client := storage.AccountsClient{env.storage} - var listKeysResult storage.AccountKeys - if err := env.callAPI(func() (autorest.Response, error) { - var err error - listKeysResult, err = client.ListKeys(env.resourceGroup, accountName) - return listKeysResult.Response, err - }); err != nil { - return nil, errors.Annotate(err, "listing storage account keys") + key, err := getStorageAccountKey( + env.callAPI, + client, + env.resourceGroup, + accountName, + ) + if err != nil { + return nil, errors.Trace(err) } - env.storageAccountKeys = &listKeysResult - return env.storageAccountKeys, nil + env.storageAccountKey = key + return key, nil } // AgentMirror is specified in the tools.HasAgentMirror interface. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/environprovider.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/environprovider.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/environprovider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/environprovider.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,14 +4,14 @@ package azure import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils/clock" - "github.com/juju/juju/cloud" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" + "github.com/juju/juju/provider/azure/internal/azureauth" "github.com/juju/juju/provider/azure/internal/azurestorage" ) @@ -33,12 +33,17 @@ // clients. NewStorageClient azurestorage.NewClientFunc - // StorageAccountNameGenerator is a function returning storage - // account names. - StorageAccountNameGenerator func() string - // RetryClock is used when retrying API calls due to rate-limiting. RetryClock clock.Clock + + // RandomWindowsAdminPassword is a function used to generate + // a random password for the Windows admin user. + RandomWindowsAdminPassword func() string + + // InteractiveCreateServicePrincipal is a function used to + // interactively create/update service principals with + // password credentials. + InteractiveCreateServicePrincipal azureauth.InteractiveCreateServicePrincipalFunc } // Validate validates the Azure provider configuration. @@ -46,12 +51,15 @@ if cfg.NewStorageClient == nil { return errors.NotValidf("nil NewStorageClient") } - if cfg.StorageAccountNameGenerator == nil { - return errors.NotValidf("nil StorageAccountNameGenerator") - } if cfg.RetryClock == nil { return errors.NotValidf("nil RetryClock") } + if cfg.RandomWindowsAdminPassword == nil { + return errors.NotValidf("nil RandomWindowsAdminPassword") + } + if cfg.InteractiveCreateServicePrincipal == nil { + return errors.NotValidf("nil InteractiveCreateServicePrincipal") + } return nil } @@ -66,7 +74,14 @@ if err := config.Validate(); err != nil { return nil, errors.Annotate(err, "validating environ provider configuration") } - return &azureEnvironProvider{config: config}, nil + return &azureEnvironProvider{ + environProviderCredentials: environProviderCredentials{ + sender: config.Sender, + requestInspector: config.RequestInspector, + interactiveCreateServicePrincipal: config.InteractiveCreateServicePrincipal, + }, + config: config, + }, nil } // Open is part of the EnvironProvider interface. @@ -82,11 +97,6 @@ return environ, nil } -// RestrictedConfigAttributes is part of the EnvironProvider interface. -func (prov *azureEnvironProvider) RestrictedConfigAttributes() []string { - return []string{} -} - // PrepareConfig is part of the EnvironProvider interface. func (prov *azureEnvironProvider) PrepareConfig(args environs.PrepareConfigParams) (*config.Config, error) { if err := validateCloudSpec(args.Cloud); err != nil { @@ -95,11 +105,6 @@ return args.Config, nil } -// SecretAttrs is part of the EnvironProvider interface. -func (prov *azureEnvironProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - return map[string]string{}, nil -} - func validateCloudSpec(spec environs.CloudSpec) error { if err := spec.Validate(); err != nil { return errors.Trace(err) @@ -107,7 +112,7 @@ if spec.Credential == nil { return errors.NotValidf("missing credential") } - if authType := spec.Credential.AuthType(); authType != cloud.UserPassAuthType { + if authType := spec.Credential.AuthType(); authType != clientCredentialsAuthType { return errors.NotSupportedf("%q auth-type", authType) } return nil @@ -119,5 +124,5 @@ // level. var verifyCredentials = func(e *azureEnviron) error { // TODO(axw) user-friendly error message - return e.token.EnsureFresh() + return e.authorizer.refresh() } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/environprovider_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/environprovider_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/environprovider_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/environprovider_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,19 +4,17 @@ package azure_test import ( - "bytes" - "io/ioutil" "net/http" - "sync" "time" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/cloud" "github.com/juju/juju/environs" "github.com/juju/juju/provider/azure" + "github.com/juju/juju/provider/azure/internal/azureauth" "github.com/juju/juju/provider/azure/internal/azuretesting" "github.com/juju/juju/testing" ) @@ -34,27 +32,29 @@ func (s *environProviderSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.provider = newProvider(c, azure.ProviderConfig{ - Sender: &s.sender, - RequestInspector: requestRecorder(&s.requests), + Sender: &s.sender, + RequestInspector: azuretesting.RequestRecorder(&s.requests), + RandomWindowsAdminPassword: func() string { return "sorandom" }, + InteractiveCreateServicePrincipal: azureauth.InteractiveCreateServicePrincipal, }) s.spec = environs.CloudSpec{ - Type: "azure", - Name: "azure", - Region: "westus", - Endpoint: "https://api.azurestack.local", - StorageEndpoint: "https://storage.azurestack.local", - Credential: fakeUserPassCredential(), + Type: "azure", + Name: "azure", + Region: "westus", + Endpoint: "https://api.azurestack.local", + IdentityEndpoint: "https://login.azurestack.local", + StorageEndpoint: "https://storage.azurestack.local", + Credential: fakeServicePrincipalCredential(), } s.sender = nil } -func fakeUserPassCredential() *cloud.Credential { +func fakeServicePrincipalCredential() *cloud.Credential { cred := cloud.NewCredential( - cloud.UserPassAuthType, + "service-principal-secret", map[string]string{ "application-id": fakeApplicationId, "subscription-id": fakeSubscriptionId, - "tenant-id": fakeTenantId, "application-password": "opensezme", }, ) @@ -105,43 +105,14 @@ var storage azuretesting.MockStorageClient config.NewStorageClient = storage.NewClient } - if config.StorageAccountNameGenerator == nil { - config.StorageAccountNameGenerator = func() string { - return fakeStorageAccount - } - } if config.RetryClock == nil { - config.RetryClock = testing.NewClock(time.Time{}) + config.RetryClock = jujutesting.NewClock(time.Time{}) + } + if config.InteractiveCreateServicePrincipal == nil { + config.InteractiveCreateServicePrincipal = azureauth.InteractiveCreateServicePrincipal } + config.RandomWindowsAdminPassword = func() string { return "sorandom" } environProvider, err := azure.NewProvider(config) c.Assert(err, jc.ErrorIsNil) return environProvider } - -func requestRecorder(requests *[]*http.Request) autorest.PrepareDecorator { - if requests == nil { - return nil - } - var mu sync.Mutex - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(req *http.Request) (*http.Request, error) { - // Save the request body, since it will be consumed. - reqCopy := *req - if req.Body != nil { - var buf bytes.Buffer - if _, err := buf.ReadFrom(req.Body); err != nil { - return nil, err - } - if err := req.Body.Close(); err != nil { - return nil, err - } - reqCopy.Body = ioutil.NopCloser(&buf) - req.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes())) - } - mu.Lock() - *requests = append(*requests, &reqCopy) - mu.Unlock() - return req, nil - }) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/environ_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/environ_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/environ_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/environ_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,7 @@ import ( "encoding/json" + "errors" "fmt" "io/ioutil" "net/http" @@ -12,18 +13,17 @@ "reflect" "time" - autorestazure "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/network" - "github.com/Azure/azure-sdk-for-go/arm/resources" + "github.com/Azure/azure-sdk-for-go/arm/resources/resources" "github.com/Azure/azure-sdk-for-go/arm/storage" + autorestazure "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/mocks" + "github.com/Azure/go-autorest/autorest/to" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" "github.com/juju/utils/arch" - "github.com/juju/utils/series" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" @@ -38,12 +38,63 @@ envtools "github.com/juju/juju/environs/tools" "github.com/juju/juju/instance" "github.com/juju/juju/provider/azure" + "github.com/juju/juju/provider/azure/internal/armtemplates" + "github.com/juju/juju/provider/azure/internal/azureauth" "github.com/juju/juju/provider/azure/internal/azuretesting" "github.com/juju/juju/testing" "github.com/juju/juju/tools" "github.com/juju/version" ) +const storageAccountName = "juju400d80004b1d0d06f00d" + +var ( + quantalImageReference = compute.ImageReference{ + Publisher: to.StringPtr("Canonical"), + Offer: to.StringPtr("UbuntuServer"), + Sku: to.StringPtr("12.10"), + Version: to.StringPtr("latest"), + } + win2012ImageReference = compute.ImageReference{ + Publisher: to.StringPtr("MicrosoftWindowsServer"), + Offer: to.StringPtr("WindowsServer"), + Sku: to.StringPtr("2012-Datacenter"), + Version: to.StringPtr("latest"), + } + centos7ImageReference = compute.ImageReference{ + Publisher: to.StringPtr("OpenLogic"), + Offer: to.StringPtr("CentOS"), + Sku: to.StringPtr("7.1"), + Version: to.StringPtr("latest"), + } + + sshPublicKeys = []compute.SSHPublicKey{{ + Path: to.StringPtr("/home/ubuntu/.ssh/authorized_keys"), + KeyData: to.StringPtr(testing.FakeAuthKeys), + }} + linuxOsProfile = compute.OSProfile{ + ComputerName: to.StringPtr("machine-0"), + CustomData: to.StringPtr(""), + AdminUsername: to.StringPtr("ubuntu"), + LinuxConfiguration: &compute.LinuxConfiguration{ + DisablePasswordAuthentication: to.BoolPtr(true), + SSH: &compute.SSHConfiguration{ + PublicKeys: &sshPublicKeys, + }, + }, + } + windowsOsProfile = compute.OSProfile{ + ComputerName: to.StringPtr("machine-0"), + CustomData: to.StringPtr(""), + AdminUsername: to.StringPtr("JujuAdministrator"), + AdminPassword: to.StringPtr("sorandom"), + WindowsConfiguration: &compute.WindowsConfiguration{ + ProvisionVMAgent: to.BoolPtr(true), + EnableAutomaticUpdates: to.BoolPtr(true), + }, + } +) + type environSuite struct { testing.BaseSuite @@ -53,23 +104,16 @@ sender azuretesting.Senders retryClock mockClock - controllerUUID string - tags map[string]*string - group *resources.ResourceGroup - vmSizes *compute.VirtualMachineSizeListResult - storageAccounts []storage.Account - storageNameAvailabilityResult *storage.CheckNameAvailabilityResult - storageAccount *storage.Account - storageAccountKeys *storage.AccountKeys - vnet *network.VirtualNetwork - nsg *network.SecurityGroup - subnet *network.Subnet - ubuntuServerSKUs []compute.VirtualMachineImageResource - publicIPAddress *network.PublicIPAddress - oldNetworkInterfaces *network.InterfaceListResult - newNetworkInterface *network.Interface - jujuAvailabilitySet *compute.AvailabilitySet - virtualMachine *compute.VirtualMachine + controllerUUID string + envTags map[string]*string + vmTags map[string]*string + group *resources.ResourceGroup + vmSizes *compute.VirtualMachineSizeListResult + storageAccounts []storage.Account + storageAccount *storage.Account + storageAccountKeys *storage.AccountListKeysResult + ubuntuServerSKUs []compute.VirtualMachineImageResource + deployment *resources.Deployment } var _ = gc.Suite(&environSuite{}) @@ -79,87 +123,67 @@ s.storageClient = azuretesting.MockStorageClient{} s.sender = nil s.requests = nil - s.retryClock = mockClock{Clock: testing.NewClock(time.Time{})} + s.retryClock = mockClock{Clock: gitjujutesting.NewClock(time.Time{})} s.provider = newProvider(c, azure.ProviderConfig{ Sender: azuretesting.NewSerialSender(&s.sender), - RequestInspector: requestRecorder(&s.requests), + RequestInspector: azuretesting.RequestRecorder(&s.requests), NewStorageClient: s.storageClient.NewClient, - RetryClock: &testing.AutoAdvancingClock{ + RetryClock: &gitjujutesting.AutoAdvancingClock{ &s.retryClock, s.retryClock.Advance, }, + RandomWindowsAdminPassword: func() string { return "sorandom" }, + InteractiveCreateServicePrincipal: azureauth.InteractiveCreateServicePrincipal, }) - s.controllerUUID = testing.ModelTag.Id() - envTags := map[string]*string{ + s.controllerUUID = testing.ControllerTag.Id() + s.envTags = map[string]*string{ "juju-model-uuid": to.StringPtr(testing.ModelTag.Id()), "juju-controller-uuid": to.StringPtr(s.controllerUUID), } - s.tags = map[string]*string{ - "juju-machine-name": to.StringPtr("machine-0"), + s.vmTags = map[string]*string{ + "juju-model-uuid": to.StringPtr(testing.ModelTag.Id()), + "juju-controller-uuid": to.StringPtr(s.controllerUUID), + "juju-machine-name": to.StringPtr("machine-0"), } s.group = &resources.ResourceGroup{ Location: to.StringPtr("westus"), - Tags: &envTags, + Tags: &s.envTags, + Properties: &resources.ResourceGroupProperties{ + ProvisioningState: to.StringPtr("Succeeded"), + }, } vmSizes := []compute.VirtualMachineSize{{ Name: to.StringPtr("Standard_D1"), - NumberOfCores: to.IntPtr(1), - OsDiskSizeInMB: to.IntPtr(1047552), - ResourceDiskSizeInMB: to.IntPtr(51200), - MemoryInMB: to.IntPtr(3584), - MaxDataDiskCount: to.IntPtr(2), + NumberOfCores: to.Int32Ptr(1), + OsDiskSizeInMB: to.Int32Ptr(1047552), + ResourceDiskSizeInMB: to.Int32Ptr(51200), + MemoryInMB: to.Int32Ptr(3584), + MaxDataDiskCount: to.Int32Ptr(2), }} s.vmSizes = &compute.VirtualMachineSizeListResult{Value: &vmSizes} - s.storageNameAvailabilityResult = &storage.CheckNameAvailabilityResult{ - NameAvailable: to.BoolPtr(true), - } - s.storageAccount = &storage.Account{ Name: to.StringPtr("my-storage-account"), Type: to.StringPtr("Standard_LRS"), - Tags: &envTags, + Tags: &s.envTags, Properties: &storage.AccountProperties{ PrimaryEndpoints: &storage.Endpoints{ - Blob: to.StringPtr(fmt.Sprintf("https://%s.blob.storage.azurestack.local/", fakeStorageAccount)), + Blob: to.StringPtr(fmt.Sprintf("https://%s.blob.storage.azurestack.local/", storageAccountName)), }, + ProvisioningState: "Succeeded", }, } - s.storageAccountKeys = &storage.AccountKeys{ - Key1: to.StringPtr("key-1"), - } - - addressPrefixes := []string{"10.0.0.0/16"} - s.vnet = &network.VirtualNetwork{ - ID: to.StringPtr("juju-internal-network"), - Name: to.StringPtr("juju-internal-network"), - Location: to.StringPtr("westus"), - Tags: &envTags, - Properties: &network.VirtualNetworkPropertiesFormat{ - AddressSpace: &network.AddressSpace{&addressPrefixes}, - }, - } - - s.nsg = &network.SecurityGroup{ - ID: to.StringPtr(path.Join( - "/subscriptions", fakeSubscriptionId, - "resourceGroups", "juju-testenv-model-"+testing.ModelTag.Id(), - "providers/Microsoft.Network/networkSecurityGroups/juju-internal-nsg", - )), - Tags: &envTags, - } - - s.subnet = &network.Subnet{ - ID: to.StringPtr("subnet-id"), - Name: to.StringPtr("juju-internal-subnet"), - Properties: &network.SubnetPropertiesFormat{ - AddressPrefix: to.StringPtr("10.0.0.0/16"), - NetworkSecurityGroup: &network.SubResource{s.nsg.ID}, - }, + keys := []storage.AccountKey{{ + KeyName: to.StringPtr("key-1-name"), + Value: to.StringPtr("key-1"), + Permissions: storage.FULL, + }} + s.storageAccountKeys = &storage.AccountListKeysResult{ + Keys: &keys, } s.ubuntuServerSKUs = []compute.VirtualMachineImageResource{ @@ -171,126 +195,7 @@ {Name: to.StringPtr("16.04-LTS")}, } - s.publicIPAddress = &network.PublicIPAddress{ - ID: to.StringPtr("public-ip-id"), - Name: to.StringPtr("machine-0-public-ip"), - Location: to.StringPtr("westus"), - Tags: &s.tags, - Properties: &network.PublicIPAddressPropertiesFormat{ - PublicIPAllocationMethod: network.Dynamic, - IPAddress: to.StringPtr("1.2.3.4"), - }, - } - - // Existing IPs/NICs. These are the results of querying NICs so we - // can tell which IP to allocate. - oldIPConfigurations := []network.InterfaceIPConfiguration{{ - ID: to.StringPtr("ip-configuration-0-id"), - Name: to.StringPtr("ip-configuration-0"), - Properties: &network.InterfaceIPConfigurationPropertiesFormat{ - PrivateIPAddress: to.StringPtr("10.0.0.4"), - PrivateIPAllocationMethod: network.Static, - Subnet: &network.SubResource{ID: s.subnet.ID}, - }, - }} - oldNetworkInterfaces := []network.Interface{{ - ID: to.StringPtr("network-interface-0-id"), - Name: to.StringPtr("network-interface-0"), - Properties: &network.InterfacePropertiesFormat{ - IPConfigurations: &oldIPConfigurations, - Primary: to.BoolPtr(true), - }, - }} - s.oldNetworkInterfaces = &network.InterfaceListResult{ - Value: &oldNetworkInterfaces, - } - - // The newly created IP/NIC. - newIPConfigurations := []network.InterfaceIPConfiguration{{ - ID: to.StringPtr("ip-configuration-1-id"), - Name: to.StringPtr("primary"), - Properties: &network.InterfaceIPConfigurationPropertiesFormat{ - PrivateIPAddress: to.StringPtr("10.0.0.5"), - PrivateIPAllocationMethod: network.Static, - Subnet: &network.SubResource{ID: s.subnet.ID}, - PublicIPAddress: &network.SubResource{ID: s.publicIPAddress.ID}, - }, - }} - s.newNetworkInterface = &network.Interface{ - ID: to.StringPtr("network-interface-1-id"), - Name: to.StringPtr("network-interface-1"), - Location: to.StringPtr("westus"), - Tags: &s.tags, - Properties: &network.InterfacePropertiesFormat{ - IPConfigurations: &newIPConfigurations, - }, - } - - s.jujuAvailabilitySet = &compute.AvailabilitySet{ - ID: to.StringPtr("juju-availability-set-id"), - Name: to.StringPtr("juju"), - Location: to.StringPtr("westus"), - Tags: &envTags, - } - - sshPublicKeys := []compute.SSHPublicKey{{ - Path: to.StringPtr("/home/ubuntu/.ssh/authorized_keys"), - KeyData: to.StringPtr(testing.FakeAuthKeys), - }} - networkInterfaceReferences := []compute.NetworkInterfaceReference{{ - ID: s.newNetworkInterface.ID, - Properties: &compute.NetworkInterfaceReferenceProperties{ - Primary: to.BoolPtr(true), - }, - }} - s.virtualMachine = &compute.VirtualMachine{ - ID: to.StringPtr("machine-0-id"), - Name: to.StringPtr("machine-0"), - Location: to.StringPtr("westus"), - Tags: &s.tags, - Properties: &compute.VirtualMachineProperties{ - HardwareProfile: &compute.HardwareProfile{ - VMSize: "Standard_D1", - }, - StorageProfile: &compute.StorageProfile{ - ImageReference: &compute.ImageReference{ - Publisher: to.StringPtr("Canonical"), - Offer: to.StringPtr("UbuntuServer"), - Sku: to.StringPtr("12.10"), - Version: to.StringPtr("latest"), - }, - OsDisk: &compute.OSDisk{ - Name: to.StringPtr("machine-0"), - CreateOption: compute.FromImage, - Caching: compute.ReadWrite, - Vhd: &compute.VirtualHardDisk{ - URI: to.StringPtr(fmt.Sprintf( - "https://%s.blob.storage.azurestack.local/osvhds/machine-0.vhd", - fakeStorageAccount, - )), - }, - // 30 GiB is roughly 32 GB. - DiskSizeGB: to.IntPtr(32), - }, - }, - OsProfile: &compute.OSProfile{ - ComputerName: to.StringPtr("machine-0"), - CustomData: to.StringPtr(""), - AdminUsername: to.StringPtr("ubuntu"), - LinuxConfiguration: &compute.LinuxConfiguration{ - DisablePasswordAuthentication: to.BoolPtr(true), - SSH: &compute.SSHConfiguration{ - PublicKeys: &sshPublicKeys, - }, - }, - }, - NetworkProfile: &compute.NetworkProfile{ - NetworkInterfaces: &networkInterfaceReferences, - }, - AvailabilitySet: &compute.SubResource{ID: s.jujuAvailabilitySet.ID}, - ProvisioningState: to.StringPtr("Successful"), - }, - } + s.deployment = nil } func (s *environSuite) openEnviron(c *gc.C, attrs ...testing.Attrs) environs.Environ { @@ -314,7 +219,10 @@ // Force an explicit refresh of the access token, so it isn't done // implicitly during the tests. - *sender = azuretesting.Senders{tokenRefreshSender()} + *sender = azuretesting.Senders{ + discoverAuthSender(), + tokenRefreshSender(), + } err = azure.ForceTokenRefresh(env) c.Assert(err, jc.ErrorIsNil) return env @@ -329,18 +237,22 @@ ) environs.Environ { // Opening the environment should not incur network communication, // so we don't set s.sender until after opening. - cfg := makeTestModelConfig(c, attrs...) - *sender = azuretesting.Senders{tokenRefreshSender()} cfg, err := provider.PrepareConfig(environs.PrepareConfigParams{ - Config: cfg, + Config: makeTestModelConfig(c, attrs...), Cloud: fakeCloudSpec(), }) c.Assert(err, jc.ErrorIsNil) + env, err := provider.Open(environs.OpenParams{ Cloud: fakeCloudSpec(), Config: cfg, }) c.Assert(err, jc.ErrorIsNil) + + *sender = azuretesting.Senders{ + discoverAuthSender(), + tokenRefreshSender(), + } err = env.PrepareForBootstrap(ctx) c.Assert(err, jc.ErrorIsNil) return env @@ -348,17 +260,17 @@ func fakeCloudSpec() environs.CloudSpec { return environs.CloudSpec{ - Type: "azure", - Name: "azure", - Region: "westus", - Endpoint: "https://api.azurestack.local", - StorageEndpoint: "https://storage.azurestack.local", - Credential: fakeUserPassCredential(), + Type: "azure", + Name: "azure", + Region: "westus", + Endpoint: "https://api.azurestack.local", + IdentityEndpoint: "https://login.microsoftonline.com", + StorageEndpoint: "https://storage.azurestack.local", + Credential: fakeServicePrincipalCredential(), } } func tokenRefreshSender() *azuretesting.MockSender { - // lp:1558657 tokenRefreshSender := azuretesting.NewSenderWithValue(&autorestazure.Token{ AccessToken: "access-token", ExpiresOn: fmt.Sprint(time.Now().Add(time.Hour).Unix()), @@ -368,40 +280,35 @@ return tokenRefreshSender } +func discoverAuthSender() *azuretesting.MockSender { + const fakeTenantId = "11111111-1111-1111-1111-111111111111" + sender := mocks.NewSender() + resp := mocks.NewResponseWithStatus("", http.StatusUnauthorized) + mocks.SetResponseHeaderValues(resp, "WWW-Authenticate", []string{ + fmt.Sprintf( + `authorization_uri="https://testing.invalid/%s"`, + fakeTenantId, + ), + }) + sender.AppendResponse(resp) + return &azuretesting.MockSender{ + Sender: sender, + PathPattern: ".*/subscriptions/" + fakeSubscriptionId, + } +} + func (s *environSuite) initResourceGroupSenders() azuretesting.Senders { resourceGroupName := "juju-testenv-model-deadbeef-0bad-400d-8000-4b1d0d06f00d" - return azuretesting.Senders{ - s.makeSender(".*/resourcegroups/"+resourceGroupName, s.group), - s.makeSender(".*/virtualnetworks/juju-internal-network", s.vnet), - s.makeSender(".*/networkSecurityGroups/juju-internal-nsg", s.nsg), - s.makeSender(".*/virtualnetworks/juju-internal-network/subnets/juju-internal-subnet", s.subnet), - s.makeSender(".*/checkNameAvailability", s.storageNameAvailabilityResult), - s.makeSender(".*/storageAccounts/.*", s.storageAccount), - } + senders := azuretesting.Senders{s.makeSender(".*/resourcegroups/"+resourceGroupName, s.group)} + return senders } func (s *environSuite) startInstanceSenders(controller bool) azuretesting.Senders { - senders := azuretesting.Senders{ - s.vmSizesSender(), - s.storageAccountsSender(), - s.makeSender(".*/subnets/juju-internal-subnet", s.subnet), - s.makeSender(".*/Canonical/.*/UbuntuServer/skus", s.ubuntuServerSKUs), - s.makeSender(".*/publicIPAddresses/machine-0-public-ip", s.publicIPAddress), - s.makeSender(".*/networkInterfaces", s.oldNetworkInterfaces), - s.makeSender(".*/networkInterfaces/machine-0-primary", s.newNetworkInterface), - } - if controller { - senders = append(senders, - s.makeSender(".*/networkSecurityGroups/juju-internal-nsg", &network.SecurityGroup{ - Properties: &network.SecurityGroupPropertiesFormat{}, - }), - s.makeSender(".*/networkSecurityGroups/juju-internal-nsg", &network.SecurityGroup{}), - ) + senders := azuretesting.Senders{s.vmSizesSender()} + if s.ubuntuServerSKUs != nil { + senders = append(senders, s.makeSender(".*/Canonical/.*/UbuntuServer/skus", s.ubuntuServerSKUs)) } - senders = append(senders, - s.makeSender(".*/availabilitySets/.*", s.jujuAvailabilitySet), - s.makeSender(".*/virtualMachines/machine-0", s.virtualMachine), - ) + senders = append(senders, s.makeSender("/deployments/machine-0", s.deployment)) return senders } @@ -421,9 +328,8 @@ return s.makeSender(".*/vmSizes", s.vmSizes) } -func (s *environSuite) storageAccountsSender() *azuretesting.MockSender { - accounts := []storage.Account{*s.storageAccount} - return s.makeSender(".*/storageAccounts", storage.AccountListResult{Value: &accounts}) +func (s *environSuite) storageAccountSender() *azuretesting.MockSender { + return s.makeSender(".*/storageAccounts/"+storageAccountName, s.storageAccount) } func (s *environSuite) storageAccountKeysSender() *azuretesting.MockSender { @@ -439,19 +345,23 @@ func makeStartInstanceParams(c *gc.C, controllerUUID, series string) environs.StartInstanceParams { machineTag := names.NewMachineTag("0") apiInfo := &api.Info{ - Addrs: []string{"localhost:246"}, + Addrs: []string{"localhost:17777"}, CACert: testing.CACert, Password: "admin", Tag: machineTag, ModelTag: testing.ModelTag, } - const secureServerConnections = true icfg, err := instancecfg.NewInstanceConfig( + names.NewControllerTag(controllerUUID), machineTag.Id(), "yanonce", imagemetadata.ReleasedStream, - series, secureServerConnections, apiInfo, + series, apiInfo, ) c.Assert(err, jc.ErrorIsNil) + icfg.Tags = map[string]string{ + tags.JujuModel: testing.ModelTag.Id(), + tags.JujuController: controllerUUID, + } return environs.StartInstanceParams{ ControllerUUID: controllerUUID, @@ -488,7 +398,7 @@ type mockClock struct { gitjujutesting.Stub - *testing.Clock + *gitjujutesting.Clock } func (c *mockClock) After(d time.Duration) <-chan time.Time { @@ -506,7 +416,7 @@ env := s.openEnviron(c) sender := mocks.NewSender() - sender.EmitContent("{}") + sender.AppendResponse(mocks.NewResponseWithContent("{}")) s.sender = azuretesting.Senders{sender} s.requests = nil env.AllInstances() // trigger a query @@ -537,9 +447,88 @@ RootDisk: &rootDisk, CpuCores: &cpuCores, }) - requests := s.assertStartInstanceRequests(c, s.requests) - availabilitySetName := path.Base(requests.availabilitySet.URL.Path) - c.Assert(availabilitySetName, gc.Equals, "juju") + s.assertStartInstanceRequests(c, s.requests, assertStartInstanceRequestsParams{ + imageReference: &quantalImageReference, + diskSizeGB: 32, + osProfile: &linuxOsProfile, + }) +} + +func (s *environSuite) TestStartInstanceWindowsMinRootDisk(c *gc.C) { + // The minimum OS disk size for Windows machines is 127GiB. + cons := constraints.MustParse("root-disk=44G") + s.testStartInstanceWindows(c, cons, 127*1024, 136) +} + +func (s *environSuite) TestStartInstanceWindowsGrowableRootDisk(c *gc.C) { + // The OS disk size may be grown larger than 127GiB. + cons := constraints.MustParse("root-disk=200G") + s.testStartInstanceWindows(c, cons, 200*1024, 214) +} + +func (s *environSuite) testStartInstanceWindows( + c *gc.C, cons constraints.Value, + expect uint64, requestValue int, +) { + // Starting a Windows VM, we should not expect an image query. + s.PatchValue(&s.ubuntuServerSKUs, nil) + + env := s.openEnviron(c) + s.sender = s.startInstanceSenders(false) + s.requests = nil + args := makeStartInstanceParams(c, s.controllerUUID, "win2012") + args.Constraints = cons + result, err := env.StartInstance(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.NotNil) + c.Assert(result.Hardware.RootDisk, jc.DeepEquals, &expect) + + vmExtensionSettings := map[string]interface{}{ + "commandToExecute": `` + + `move C:\AzureData\CustomData.bin C:\AzureData\CustomData.ps1 && ` + + `powershell.exe -ExecutionPolicy Unrestricted -File C:\AzureData\CustomData.ps1 && ` + + `del /q C:\AzureData\CustomData.ps1`, + } + s.assertStartInstanceRequests(c, s.requests, assertStartInstanceRequestsParams{ + imageReference: &win2012ImageReference, + diskSizeGB: requestValue, + vmExtension: &compute.VirtualMachineExtensionProperties{ + Publisher: to.StringPtr("Microsoft.Compute"), + Type: to.StringPtr("CustomScriptExtension"), + TypeHandlerVersion: to.StringPtr("1.4"), + AutoUpgradeMinorVersion: to.BoolPtr(true), + Settings: &vmExtensionSettings, + }, + osProfile: &windowsOsProfile, + }) +} + +func (s *environSuite) TestStartInstanceCentOS(c *gc.C) { + // Starting a CentOS VM, we should not expect an image query. + s.PatchValue(&s.ubuntuServerSKUs, nil) + + env := s.openEnviron(c) + s.sender = s.startInstanceSenders(false) + s.requests = nil + args := makeStartInstanceParams(c, s.controllerUUID, "centos7") + _, err := env.StartInstance(args) + c.Assert(err, jc.ErrorIsNil) + + vmExtensionSettings := map[string]interface{}{ + "commandToExecute": `bash -c 'base64 -d /var/lib/waagent/CustomData | bash'`, + } + s.assertStartInstanceRequests(c, s.requests, assertStartInstanceRequestsParams{ + imageReference: ¢os7ImageReference, + diskSizeGB: 32, + vmExtension: &compute.VirtualMachineExtensionProperties{ + Publisher: to.StringPtr("Microsoft.OSTCExtensions"), + Type: to.StringPtr("CustomScriptForLinux"), + TypeHandlerVersion: to.StringPtr("1.4"), + AutoUpgradeMinorVersion: to.BoolPtr(true), + Settings: &vmExtensionSettings, + }, + osProfile: &linuxOsProfile, + }) } func (s *environSuite) TestStartInstanceTooManyRequests(c *gc.C) { @@ -553,7 +542,11 @@ // Make the VirtualMachines.CreateOrUpdate call respond with // 429 (StatusTooManyRequests) failures, and then with success. rateLimitedSender := mocks.NewSender() - rateLimitedSender.EmitStatus("(ã€ã‚œãƒ­ã‚œ)ã€", http.StatusTooManyRequests) + rateLimitedSender.AppendAndRepeatResponse(mocks.NewResponseWithBodyAndStatus( + mocks.NewBody("{}"), // empty JSON response to appease go-autorest + http.StatusTooManyRequests, + "(ã€ã‚œãƒ­ã‚œ)ã€", + ), failures) successSender := senders[len(senders)-1] senders = senders[:len(senders)-1] for i := 0; i < failures; i++ { @@ -565,14 +558,17 @@ _, err := env.StartInstance(makeStartInstanceParams(c, s.controllerUUID, "quantal")) c.Assert(err, jc.ErrorIsNil) - c.Assert(s.requests, gc.HasLen, 9+failures) - s.assertStartInstanceRequests(c, s.requests[:9]) + c.Assert(s.requests, gc.HasLen, numExpectedStartInstanceRequests+failures) + s.assertStartInstanceRequests(c, s.requests[:numExpectedStartInstanceRequests], assertStartInstanceRequestsParams{ + imageReference: &quantalImageReference, + diskSizeGB: 32, + osProfile: &linuxOsProfile, + }) - // The last two requests should match the third-to-last, which - // is checked by assertStartInstanceRequests. - for i := 9; i < 9+failures; i++ { + // The final requests should all be identical. + for i := numExpectedStartInstanceRequests; i < numExpectedStartInstanceRequests+failures; i++ { c.Assert(s.requests[i].Method, gc.Equals, "PUT") - assertCreateVirtualMachineRequestBody(c, s.requests[i], s.virtualMachine) + c.Assert(s.requests[i].URL.Path, gc.Equals, s.requests[numExpectedStartInstanceRequests-1].URL.Path) } s.retryClock.CheckCalls(c, []gitjujutesting.StubCall{ @@ -594,11 +590,15 @@ // retrying before giving up. const failures = 8 - // Make the VirtualMachines.CreateOrUpdate call respond with - // enough 429 (StatusTooManyRequests) failures to cause the - // method to give up retrying. + // Make the VirtualMachines.Get call respond with enough 429 + // (StatusTooManyRequests) failures to cause the method to give + // up retrying. rateLimitedSender := mocks.NewSender() - rateLimitedSender.EmitStatus("(ã€ã‚œãƒ­ã‚œ)ã€", http.StatusTooManyRequests) + rateLimitedSender.AppendAndRepeatResponse(mocks.NewResponseWithBodyAndStatus( + mocks.NewBody("{}"), // empty JSON response to appease go-autorest + http.StatusTooManyRequests, + "(ã€ã‚œãƒ­ã‚œ)ã€", + ), failures) senders = senders[:len(senders)-1] for i := 0; i < failures; i++ { senders = append(senders, rateLimitedSender) @@ -606,7 +606,7 @@ s.sender = senders _, err := env.StartInstance(makeStartInstanceParams(c, s.controllerUUID, "quantal")) - c.Assert(err, gc.ErrorMatches, "creating virtual machine.*: max duration exceeded: .*failed with.*") + c.Assert(err, gc.ErrorMatches, `creating virtual machine "machine-0": creating deployment "machine-0": max duration exceeded: .*`) s.retryClock.CheckCalls(c, []gitjujutesting.StubCall{ {"After", []interface{}{5 * time.Second}}, // t0 + 5s @@ -627,82 +627,303 @@ func (s *environSuite) TestStartInstanceServiceAvailabilitySet(c *gc.C) { env := s.openEnviron(c) + unitsDeployed := "mysql/0 wordpress/0" + s.vmTags[tags.JujuUnitsDeployed] = &unitsDeployed s.sender = s.startInstanceSenders(false) s.requests = nil - unitsDeployed := "mysql/0 wordpress/0" params := makeStartInstanceParams(c, s.controllerUUID, "quantal") params.InstanceConfig.Tags[tags.JujuUnitsDeployed] = unitsDeployed + _, err := env.StartInstance(params) c.Assert(err, jc.ErrorIsNil) - s.tags[tags.JujuUnitsDeployed] = &unitsDeployed - requests := s.assertStartInstanceRequests(c, s.requests) - availabilitySetName := path.Base(requests.availabilitySet.URL.Path) - c.Assert(availabilitySetName, gc.Equals, "mysql") -} - -func (s *environSuite) assertStartInstanceRequests(c *gc.C, requests []*http.Request) startInstanceRequests { - // Clear the fields that don't get sent in the request. - s.publicIPAddress.ID = nil - s.publicIPAddress.Name = nil - s.publicIPAddress.Properties.IPAddress = nil - s.newNetworkInterface.ID = nil - s.newNetworkInterface.Name = nil - (*s.newNetworkInterface.Properties.IPConfigurations)[0].ID = nil - s.jujuAvailabilitySet.ID = nil - s.jujuAvailabilitySet.Name = nil - s.virtualMachine.ID = nil - s.virtualMachine.Name = nil - s.virtualMachine.Properties.ProvisioningState = nil + s.assertStartInstanceRequests(c, s.requests, assertStartInstanceRequestsParams{ + availabilitySetName: "mysql", + imageReference: &quantalImageReference, + diskSizeGB: 32, + osProfile: &linuxOsProfile, + }) +} - // Validate HTTP request bodies. - c.Assert(requests, gc.HasLen, 9) - c.Assert(requests[0].Method, gc.Equals, "GET") // vmSizes - c.Assert(requests[1].Method, gc.Equals, "GET") // storage accounts - c.Assert(requests[2].Method, gc.Equals, "GET") // juju-testenv-model-deadbeef-0bad-400d-8000-4b1d0d06f00d - c.Assert(requests[3].Method, gc.Equals, "GET") // skus - c.Assert(requests[4].Method, gc.Equals, "PUT") - assertRequestBody(c, requests[4], s.publicIPAddress) - c.Assert(requests[5].Method, gc.Equals, "GET") // NICs - c.Assert(requests[6].Method, gc.Equals, "PUT") - assertRequestBody(c, requests[6], s.newNetworkInterface) - c.Assert(requests[7].Method, gc.Equals, "PUT") - assertRequestBody(c, requests[7], s.jujuAvailabilitySet) - c.Assert(requests[8].Method, gc.Equals, "PUT") - assertCreateVirtualMachineRequestBody(c, requests[8], s.virtualMachine) - - return startInstanceRequests{ - vmSizes: requests[0], - storageAccounts: requests[1], - subnet: requests[2], - skus: requests[3], - publicIPAddress: requests[4], - nics: requests[5], - networkInterface: requests[6], - availabilitySet: requests[7], - virtualMachine: requests[8], - } +const numExpectedStartInstanceRequests = 3 + +type assertStartInstanceRequestsParams struct { + availabilitySetName string + imageReference *compute.ImageReference + vmExtension *compute.VirtualMachineExtensionProperties + diskSizeGB int + osProfile *compute.OSProfile } -func assertCreateVirtualMachineRequestBody(c *gc.C, req *http.Request, expect *compute.VirtualMachine) { - // CustomData is non-deterministic, so don't compare it. +func (s *environSuite) assertStartInstanceRequests( + c *gc.C, + requests []*http.Request, + args assertStartInstanceRequestsParams, +) startInstanceRequests { + nsgId := `[resourceId('Microsoft.Network/networkSecurityGroups', 'juju-internal-nsg')]` + securityRules := []network.SecurityRule{{ + Name: to.StringPtr("SSHInbound"), + Properties: &network.SecurityRulePropertiesFormat{ + Description: to.StringPtr("Allow SSH access to all machines"), + Protocol: network.TCP, + SourceAddressPrefix: to.StringPtr("*"), + SourcePortRange: to.StringPtr("*"), + DestinationAddressPrefix: to.StringPtr("*"), + DestinationPortRange: to.StringPtr("22"), + Access: network.Allow, + Priority: to.Int32Ptr(100), + Direction: network.Inbound, + }, + }, { + Name: to.StringPtr("JujuAPIInbound"), + Properties: &network.SecurityRulePropertiesFormat{ + Description: to.StringPtr("Allow API connections to controller machines"), + Protocol: network.TCP, + SourceAddressPrefix: to.StringPtr("*"), + SourcePortRange: to.StringPtr("*"), + DestinationAddressPrefix: to.StringPtr("192.168.16.0/20"), + DestinationPortRange: to.StringPtr("17777"), + Access: network.Allow, + Priority: to.Int32Ptr(101), + Direction: network.Inbound, + }, + }} + subnets := []network.Subnet{{ + Name: to.StringPtr("juju-internal-subnet"), + Properties: &network.SubnetPropertiesFormat{ + AddressPrefix: to.StringPtr("192.168.0.0/20"), + NetworkSecurityGroup: &network.SecurityGroup{ + ID: to.StringPtr(nsgId), + }, + }, + }, { + Name: to.StringPtr("juju-controller-subnet"), + Properties: &network.SubnetPropertiesFormat{ + AddressPrefix: to.StringPtr("192.168.16.0/20"), + NetworkSecurityGroup: &network.SecurityGroup{ + ID: to.StringPtr(nsgId), + }, + }, + }} + + subnetName := "juju-internal-subnet" + privateIPAddress := "192.168.0.4" + if args.availabilitySetName == "juju-controller" { + subnetName = "juju-controller-subnet" + privateIPAddress = "192.168.16.4" + } + subnetId := fmt.Sprintf( + `[concat(resourceId('Microsoft.Network/virtualNetworks', 'juju-internal-network'), '/subnets/%s')]`, + subnetName, + ) + + publicIPAddressId := `[resourceId('Microsoft.Network/publicIPAddresses', 'machine-0-public-ip')]` + + ipConfigurations := []network.InterfaceIPConfiguration{{ + Name: to.StringPtr("primary"), + Properties: &network.InterfaceIPConfigurationPropertiesFormat{ + Primary: to.BoolPtr(true), + PrivateIPAddress: to.StringPtr(privateIPAddress), + PrivateIPAllocationMethod: network.Static, + Subnet: &network.Subnet{ID: to.StringPtr(subnetId)}, + PublicIPAddress: &network.PublicIPAddress{ + ID: to.StringPtr(publicIPAddressId), + }, + }, + }} + + nicId := `[resourceId('Microsoft.Network/networkInterfaces', 'machine-0-primary')]` + nics := []compute.NetworkInterfaceReference{{ + ID: to.StringPtr(nicId), + Properties: &compute.NetworkInterfaceReferenceProperties{ + Primary: to.BoolPtr(true), + }, + }} + vmDependsOn := []string{ + nicId, + `[resourceId('Microsoft.Storage/storageAccounts', '` + storageAccountName + `')]`, + } + + addressPrefixes := []string{"192.168.0.0/20", "192.168.16.0/20"} + templateResources := []armtemplates.Resource{{ + APIVersion: network.APIVersion, + Type: "Microsoft.Network/networkSecurityGroups", + Name: "juju-internal-nsg", + Location: "westus", + Tags: to.StringMap(s.envTags), + Properties: &network.SecurityGroupPropertiesFormat{ + SecurityRules: &securityRules, + }, + }, { + APIVersion: network.APIVersion, + Type: "Microsoft.Network/virtualNetworks", + Name: "juju-internal-network", + Location: "westus", + Tags: to.StringMap(s.envTags), + Properties: &network.VirtualNetworkPropertiesFormat{ + AddressSpace: &network.AddressSpace{&addressPrefixes}, + Subnets: &subnets, + }, + DependsOn: []string{nsgId}, + }, { + APIVersion: storage.APIVersion, + Type: "Microsoft.Storage/storageAccounts", + Name: storageAccountName, + Location: "westus", + Tags: to.StringMap(s.envTags), + StorageSku: &storage.Sku{ + Name: storage.SkuName("Standard_LRS"), + }, + }} + + var availabilitySetSubResource *compute.SubResource + if args.availabilitySetName != "" { + availabilitySetId := fmt.Sprintf( + `[resourceId('Microsoft.Compute/availabilitySets','%s')]`, + args.availabilitySetName, + ) + templateResources = append(templateResources, armtemplates.Resource{ + APIVersion: compute.APIVersion, + Type: "Microsoft.Compute/availabilitySets", + Name: args.availabilitySetName, + Location: "westus", + Tags: to.StringMap(s.envTags), + }) + availabilitySetSubResource = &compute.SubResource{ + ID: to.StringPtr(availabilitySetId), + } + vmDependsOn = append([]string{availabilitySetId}, vmDependsOn...) + } + + templateResources = append(templateResources, []armtemplates.Resource{{ + APIVersion: network.APIVersion, + Type: "Microsoft.Network/publicIPAddresses", + Name: "machine-0-public-ip", + Location: "westus", + Tags: to.StringMap(s.vmTags), + Properties: &network.PublicIPAddressPropertiesFormat{ + PublicIPAllocationMethod: network.Dynamic, + }, + }, { + APIVersion: network.APIVersion, + Type: "Microsoft.Network/networkInterfaces", + Name: "machine-0-primary", + Location: "westus", + Tags: to.StringMap(s.vmTags), + Properties: &network.InterfacePropertiesFormat{ + IPConfigurations: &ipConfigurations, + }, + DependsOn: []string{ + publicIPAddressId, + `[resourceId('Microsoft.Network/virtualNetworks', 'juju-internal-network')]`, + }, + }, { + APIVersion: compute.APIVersion, + Type: "Microsoft.Compute/virtualMachines", + Name: "machine-0", + Location: "westus", + Tags: to.StringMap(s.vmTags), + Properties: &compute.VirtualMachineProperties{ + HardwareProfile: &compute.HardwareProfile{ + VMSize: "Standard_D1", + }, + StorageProfile: &compute.StorageProfile{ + ImageReference: args.imageReference, + OsDisk: &compute.OSDisk{ + Name: to.StringPtr("machine-0"), + CreateOption: compute.FromImage, + Caching: compute.ReadWrite, + Vhd: &compute.VirtualHardDisk{ + URI: to.StringPtr(fmt.Sprintf( + `[concat(reference(resourceId('Microsoft.Storage/storageAccounts', '%s'), '%s').primaryEndpoints.blob, 'osvhds/machine-0.vhd')]`, + storageAccountName, storage.APIVersion, + )), + }, + DiskSizeGB: to.Int32Ptr(int32(args.diskSizeGB)), + }, + }, + OsProfile: args.osProfile, + NetworkProfile: &compute.NetworkProfile{&nics}, + AvailabilitySet: availabilitySetSubResource, + }, + DependsOn: vmDependsOn, + }}...) + if args.vmExtension != nil { + templateResources = append(templateResources, armtemplates.Resource{ + APIVersion: compute.APIVersion, + Type: "Microsoft.Compute/virtualMachines/extensions", + Name: "machine-0/JujuCustomScriptExtension", + Location: "westus", + Tags: to.StringMap(s.vmTags), + Properties: args.vmExtension, + DependsOn: []string{"Microsoft.Compute/virtualMachines/machine-0"}, + }) + } + templateMap := map[string]interface{}{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "resources": templateResources, + } + deployment := &resources.Deployment{ + &resources.DeploymentProperties{ + Template: &templateMap, + Mode: resources.Incremental, + }, + } + + // Validate HTTP request bodies. + var startInstanceRequests startInstanceRequests + if args.vmExtension != nil { + // It must be Windows or CentOS, so + // there should be no image query. + c.Assert(requests, gc.HasLen, numExpectedStartInstanceRequests-1) + c.Assert(requests[0].Method, gc.Equals, "GET") // vmSizes + c.Assert(requests[1].Method, gc.Equals, "PUT") // create deployment + startInstanceRequests.vmSizes = requests[0] + startInstanceRequests.deployment = requests[1] + } else { + c.Assert(requests, gc.HasLen, numExpectedStartInstanceRequests) + c.Assert(requests[0].Method, gc.Equals, "GET") // vmSizes + c.Assert(requests[1].Method, gc.Equals, "GET") // skus + c.Assert(requests[2].Method, gc.Equals, "PUT") // create deployment + startInstanceRequests.vmSizes = requests[0] + startInstanceRequests.skus = requests[1] + startInstanceRequests.deployment = requests[2] + } + + // Marshal/unmarshal the deployment we expect, so it's in map form. + var expected resources.Deployment + data, err := json.Marshal(&deployment) + c.Assert(err, jc.ErrorIsNil) + err = json.Unmarshal(data, &expected) + c.Assert(err, jc.ErrorIsNil) + + // Check that we send what we expect. CustomData is non-deterministic, + // so don't compare it. // TODO(axw) shouldn't CustomData be deterministic? Look into this. - var virtualMachine compute.VirtualMachine - unmarshalRequestBody(c, req, &virtualMachine) - c.Assert(to.String(virtualMachine.Properties.OsProfile.CustomData), gc.Not(gc.HasLen), 0) - virtualMachine.Properties.OsProfile.CustomData = to.StringPtr("") - c.Assert(&virtualMachine, jc.DeepEquals, expect) + var actual resources.Deployment + unmarshalRequestBody(c, startInstanceRequests.deployment, &actual) + c.Assert(actual.Properties, gc.NotNil) + c.Assert(actual.Properties.Template, gc.NotNil) + resources := (*actual.Properties.Template)["resources"].([]interface{}) + c.Assert(resources, gc.HasLen, len(templateResources)) + + vmResourceIndex := len(resources) - 1 + if args.vmExtension != nil { + vmResourceIndex-- + } + vmResource := resources[vmResourceIndex].(map[string]interface{}) + vmResourceProperties := vmResource["properties"].(map[string]interface{}) + osProfile := vmResourceProperties["osProfile"].(map[string]interface{}) + osProfile["customData"] = "" + c.Assert(actual, jc.DeepEquals, expected) + + return startInstanceRequests } type startInstanceRequests struct { - vmSizes *http.Request - storageAccounts *http.Request - subnet *http.Request - skus *http.Request - publicIPAddress *http.Request - nics *http.Request - networkInterface *http.Request - availabilitySet *http.Request - virtualMachine *http.Request + vmSizes *http.Request + skus *http.Request + deployment *http.Request } func (s *environSuite) TestBootstrap(c *gc.C) { @@ -717,72 +938,30 @@ result, err := env.Bootstrap( ctx, environs.BootstrapParams{ ControllerConfig: testing.FakeControllerConfig(), - AvailableTools: makeToolsList(series.LatestLts()), + AvailableTools: makeToolsList("quantal"), + BootstrapSeries: "quantal", }, ) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Arch, gc.Equals, "amd64") - c.Assert(result.Series, gc.Equals, series.LatestLts()) - - c.Assert(len(s.requests), gc.Equals, 17) - - c.Assert(s.requests[0].Method, gc.Equals, "PUT") // resource group - c.Assert(s.requests[1].Method, gc.Equals, "PUT") // vnet - c.Assert(s.requests[2].Method, gc.Equals, "PUT") // network security group - c.Assert(s.requests[3].Method, gc.Equals, "PUT") // subnet - c.Assert(s.requests[4].Method, gc.Equals, "POST") // check storage account name - c.Assert(s.requests[5].Method, gc.Equals, "PUT") // create storage account - - assertRequestBody(c, s.requests[0], &s.group) - - s.vnet.ID = nil - s.vnet.Name = nil - assertRequestBody(c, s.requests[1], s.vnet) - - securityRules := []network.SecurityRule{{ - Name: to.StringPtr("SSHInbound"), - Properties: &network.SecurityRulePropertiesFormat{ - Description: to.StringPtr("Allow SSH access to all machines"), - Protocol: network.TCP, - SourceAddressPrefix: to.StringPtr("*"), - SourcePortRange: to.StringPtr("*"), - DestinationAddressPrefix: to.StringPtr("*"), - DestinationPortRange: to.StringPtr("22"), - Access: network.Allow, - Priority: to.IntPtr(100), - Direction: network.Inbound, - }, - }} - assertRequestBody(c, s.requests[2], &network.SecurityGroup{ - Location: to.StringPtr("westus"), - Tags: s.nsg.Tags, - Properties: &network.SecurityGroupPropertiesFormat{ - SecurityRules: &securityRules, - }, - }) + c.Assert(result.Series, gc.Equals, "quantal") - s.subnet.ID = nil - s.subnet.Name = nil - assertRequestBody(c, s.requests[3], s.subnet) - - assertRequestBody(c, s.requests[4], &storage.AccountCheckNameAvailabilityParameters{ - Name: to.StringPtr(fakeStorageAccount), - Type: to.StringPtr("Microsoft.Storage/storageAccounts"), - }) - - assertRequestBody(c, s.requests[5], &storage.AccountCreateParameters{ - Location: to.StringPtr("westus"), - Tags: s.storageAccount.Tags, - Properties: &storage.AccountPropertiesCreateParameters{ - AccountType: "Standard_LRS", - }, + c.Assert(len(s.requests), gc.Equals, numExpectedStartInstanceRequests+1) + s.vmTags[tags.JujuIsController] = to.StringPtr("true") + s.assertStartInstanceRequests(c, s.requests[1:], assertStartInstanceRequestsParams{ + availabilitySetName: "juju-controller", + imageReference: &quantalImageReference, + diskSizeGB: 32, + osProfile: &linuxOsProfile, }) } func (s *environSuite) TestAllInstancesResourceGroupNotFound(c *gc.C) { env := s.openEnviron(c) sender := mocks.NewSender() - sender.EmitStatus("resource group not found", http.StatusNotFound) + sender.AppendResponse(mocks.NewResponseWithStatus( + "resource group not found", http.StatusNotFound, + )) s.sender = azuretesting.Senders{sender} _, err := env.AllInstances() c.Assert(err, jc.ErrorIsNil) @@ -790,9 +969,15 @@ func (s *environSuite) TestStopInstancesNotFound(c *gc.C) { env := s.openEnviron(c) - sender := mocks.NewSender() - sender.EmitStatus("vm not found", http.StatusNotFound) - s.sender = azuretesting.Senders{sender, sender, sender} + sender0 := mocks.NewSender() + sender0.AppendResponse(mocks.NewResponseWithStatus( + "vm not found", http.StatusNotFound, + )) + sender1 := mocks.NewSender() + sender1.AppendResponse(mocks.NewResponseWithStatus( + "vm not found", http.StatusNotFound, + )) + s.sender = azuretesting.Senders{sender0, sender1} err := env.StopInstances("a", "b") c.Assert(err, jc.ErrorIsNil) } @@ -800,52 +985,130 @@ func (s *environSuite) TestStopInstances(c *gc.C) { env := s.openEnviron(c) - // Security group has rules for machine-0 but not machine-1, and - // has a rule that doesn't match either. + // Security group has rules for machine-0, as well as a rule that doesn't match. nsg := makeSecurityGroup( - makeSecurityRule("machine-0-80", "10.0.0.4", "80"), - makeSecurityRule("machine-0-1000-2000", "10.0.0.4", "1000-2000"), - makeSecurityRule("machine-42", "10.0.0.5", "*"), + makeSecurityRule("machine-0-80", "192.168.0.4", "80"), + makeSecurityRule("machine-0-1000-2000", "192.168.0.4", "1000-2000"), + makeSecurityRule("machine-42", "192.168.0.5", "*"), ) // Create an IP configuration with a public IP reference. This will // cause an update to the NIC to detach public IPs. - nic0IPConfiguration := makeIPConfiguration("10.0.0.4") - nic0IPConfiguration.Properties.PublicIPAddress = &network.SubResource{} + nic0IPConfiguration := makeIPConfiguration("192.168.0.4") + nic0IPConfiguration.Properties.PublicIPAddress = &network.PublicIPAddress{} nic0 := makeNetworkInterface("nic-0", "machine-0", nic0IPConfiguration) s.sender = azuretesting.Senders{ - s.networkInterfacesSender( - nic0, - makeNetworkInterface("nic-1", "machine-1"), - makeNetworkInterface("nic-2", "machine-1"), - ), - s.virtualMachinesSender(makeVirtualMachine("machine-0")), - s.publicIPAddressesSender( - makePublicIPAddress("pip-0", "machine-0", "1.2.3.4"), - ), - s.storageAccountsSender(), + s.makeSender(".*/deployments/machine-0/cancel", nil), // POST + s.storageAccountSender(), s.storageAccountKeysSender(), + s.networkInterfacesSender(nic0), + s.publicIPAddressesSender(makePublicIPAddress("pip-0", "machine-0", "1.2.3.4")), s.makeSender(".*/virtualMachines/machine-0", nil), // DELETE s.makeSender(".*/networkSecurityGroups/juju-internal-nsg", nsg), // GET s.makeSender(".*/networkSecurityGroups/juju-internal-nsg/securityRules/machine-0-80", nil), // DELETE s.makeSender(".*/networkSecurityGroups/juju-internal-nsg/securityRules/machine-0-1000-2000", nil), // DELETE - s.makeSender(".*/networkInterfaces/nic-0", nic0), // PUT - s.makeSender(".*/publicIPAddresses/pip-0", nil), // DELETE s.makeSender(".*/networkInterfaces/nic-0", nil), // DELETE - s.makeSender(".*/virtualMachines/machine-1", nil), // DELETE - s.makeSender(".*/networkSecurityGroups/juju-internal-nsg", nsg), // GET - s.makeSender(".*/networkInterfaces/nic-1", nil), // DELETE - s.makeSender(".*/networkInterfaces/nic-2", nil), // DELETE + s.makeSender(".*/publicIPAddresses/pip-0", nil), // DELETE + s.makeSender(".*/deployments/machine-0", nil), // DELETE } - err := env.StopInstances("machine-0", "machine-1", "machine-2") + err := env.StopInstances("machine-0") c.Assert(err, jc.ErrorIsNil) s.storageClient.CheckCallNames(c, - "NewClient", "DeleteBlobIfExists", "DeleteBlobIfExists", + "NewClient", "DeleteBlobIfExists", ) s.storageClient.CheckCall(c, 1, "DeleteBlobIfExists", "osvhds", "machine-0") - s.storageClient.CheckCall(c, 2, "DeleteBlobIfExists", "osvhds", "machine-1") +} + +func (s *environSuite) TestStopInstancesMultiple(c *gc.C) { + env := s.openEnviron(c) + + vmDeleteSender0 := s.makeSender(".*/virtualMachines/machine-[01]", nil) + vmDeleteSender1 := s.makeSender(".*/virtualMachines/machine-[01]", nil) + vmDeleteSender0.SetError(errors.New("blargh")) + vmDeleteSender1.SetError(errors.New("blargh")) + + s.sender = azuretesting.Senders{ + s.makeSender(".*/deployments/machine-[01]/cancel", nil), // POST + s.makeSender(".*/deployments/machine-[01]/cancel", nil), // POST + + // We should only query the NICs, public IPs, and storage + // account/keys, regardless of how many instances are deleted. + s.storageAccountSender(), + s.storageAccountKeysSender(), + s.networkInterfacesSender(), + s.publicIPAddressesSender(), + + vmDeleteSender0, + vmDeleteSender1, + } + err := env.StopInstances("machine-0", "machine-1") + c.Assert(err, gc.ErrorMatches, `deleting instance "machine-[01]":.*blargh`) +} + +func (s *environSuite) TestStopInstancesDeploymentNotFound(c *gc.C) { + env := s.openEnviron(c) + + cancelSender := mocks.NewSender() + cancelSender.AppendResponse(mocks.NewResponseWithStatus( + "deployment not found", http.StatusNotFound, + )) + s.sender = azuretesting.Senders{cancelSender} + err := env.StopInstances("machine-0") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *environSuite) TestStopInstancesStorageAccountNoKeys(c *gc.C) { + s.PatchValue(&s.storageAccountKeys.Keys, nil) + s.testStopInstancesStorageAccountNotFound(c) +} + +func (s *environSuite) TestStopInstancesStorageAccountNoFullKey(c *gc.C) { + keys := *s.storageAccountKeys.Keys + s.PatchValue(&keys[0].Permissions, storage.READ) + s.testStopInstancesStorageAccountNotFound(c) +} + +func (s *environSuite) testStopInstancesStorageAccountNotFound(c *gc.C) { + env := s.openEnviron(c) + s.sender = azuretesting.Senders{ + s.makeSender("/deployments/machine-0", s.deployment), // Cancel + s.storageAccountSender(), + s.storageAccountKeysSender(), + s.networkInterfacesSender(), // GET: no NICs + s.publicIPAddressesSender(), // GET: no public IPs + s.makeSender(".*/virtualMachines/machine-0", nil), // DELETE + s.makeSender(".*/networkSecurityGroups/juju-internal-nsg", makeSecurityGroup()), // GET: no rules + s.makeSender(".*/deployments/machine-0", nil), // DELETE + } + err := env.StopInstances("machine-0") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *environSuite) TestStopInstancesStorageAccountError(c *gc.C) { + env := s.openEnviron(c) + errorSender := s.storageAccountSender() + errorSender.SetError(errors.New("blargh")) + s.sender = azuretesting.Senders{ + s.makeSender("/deployments/machine-0", s.deployment), // Cancel + errorSender, + } + err := env.StopInstances("machine-0") + c.Assert(err, gc.ErrorMatches, "getting storage account:.*blargh") +} + +func (s *environSuite) TestStopInstancesStorageAccountKeysError(c *gc.C) { + env := s.openEnviron(c) + errorSender := s.storageAccountKeysSender() + errorSender.SetError(errors.New("blargh")) + s.sender = azuretesting.Senders{ + s.makeSender("/deployments/machine-0", s.deployment), // Cancel + s.storageAccountSender(), + errorSender, + } + err := env.StopInstances("machine-0") + c.Assert(err, gc.ErrorMatches, "getting storage account key:.*blargh") } func (s *environSuite) TestConstraintsValidatorUnsupported(c *gc.C) { @@ -930,7 +1193,7 @@ c.Assert(s.requests[0].Method, gc.Equals, "GET") c.Assert(s.requests[0].URL.Query().Get("$filter"), gc.Equals, fmt.Sprintf( "tagname eq 'juju-controller-uuid' and tagvalue eq '%s'", - testing.ModelTag.Id(), + testing.ControllerTag.Id(), )) c.Assert(s.requests[1].Method, gc.Equals, "DELETE") c.Assert(s.requests[2].Method, gc.Equals, "DELETE") @@ -950,17 +1213,21 @@ } result := resources.ResourceGroupListResult{Value: &groups} - errorSender1 := s.makeSender(".*/resourcegroups/group[12]", nil) - errorSender1.EmitStatus("foo", http.StatusInternalServerError) - errorSender2 := s.makeSender(".*/resourcegroups/group[12]", nil) - errorSender2.EmitStatus("bar", http.StatusInternalServerError) + makeErrorSender := func(err string) *azuretesting.MockSender { + errorSender := &azuretesting.MockSender{ + Sender: mocks.NewSender(), + PathPattern: ".*/resourcegroups/group[12].*", + } + errorSender.SetError(errors.New(err)) + return errorSender + } env := s.openEnviron(c) s.requests = nil s.sender = azuretesting.Senders{ s.makeSender(".*/resourcegroups", result), // GET - errorSender1, // DELETE - errorSender2, // DELETE + makeErrorSender("foo"), // DELETE + makeErrorSender("bar"), // DELETE } destroyErr := env.DestroyController(s.controllerUUID) // checked below, once we know the order of deletions. @@ -978,8 +1245,8 @@ c.Assert(groupsDeleted, jc.SameContents, []string{"group1", "group2"}) c.Check(destroyErr, gc.ErrorMatches, - `deleting resource group "group1":.* failed with .*; `+ - `deleting resource group "group2":.* failed with .*`) - c.Check(destroyErr, gc.ErrorMatches, ".*failed with foo.*") - c.Check(destroyErr, gc.ErrorMatches, ".*failed with bar.*") + `deleting resource group "group1":.*; `+ + `deleting resource group "group2":.*`) + c.Check(destroyErr, gc.ErrorMatches, ".*foo.*") + c.Check(destroyErr, gc.ErrorMatches, ".*bar.*") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/export_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,5 +13,5 @@ } func ForceTokenRefresh(env environs.Environ) error { - return env.(*azureEnviron).token.Refresh() + return env.(*azureEnviron).authorizer.refresh() } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/init.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/init.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/init.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/init.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ "github.com/juju/utils/clock" "github.com/juju/juju/environs" + "github.com/juju/juju/provider/azure/internal/azureauth" "github.com/juju/juju/provider/azure/internal/azurestorage" ) @@ -27,9 +28,10 @@ func init() { environProvider, err := NewProvider(ProviderConfig{ - NewStorageClient: azurestorage.NewClient, - StorageAccountNameGenerator: RandomStorageAccountName, - RetryClock: &clock.WallClock, + NewStorageClient: azurestorage.NewClient, + RetryClock: &clock.WallClock, + RandomWindowsAdminPassword: randomAdminPassword, + InteractiveCreateServicePrincipal: azureauth.InteractiveCreateServicePrincipal, }) if err != nil { panic(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,10 +8,9 @@ "net/http" "strings" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" "github.com/juju/errors" "github.com/juju/juju/instance" @@ -21,7 +20,8 @@ ) type azureInstance struct { - compute.VirtualMachine + vmName string + provisioningState string env *azureEnviron networkInterfaces []network.Interface publicIPAddresses []network.PublicIPAddress @@ -32,21 +32,39 @@ // Note: we use Name and not Id, since all VM operations are in // terms of the VM name (qualified by resource group). The ID is // an internal detail. - return instance.Id(to.String(inst.VirtualMachine.Name)) + return instance.Id(inst.vmName) } // Status is specified in the Instance interface. func (inst *azureInstance) Status() instance.InstanceStatus { - // NOTE(axw) ideally we would use the power state, but that is only - // available when using the "instance view". Instance view is only - // delivered when explicitly requested, and you can only request it - // when querying a single VM. This means the results of AllInstances - // or Instances would have the instance view missing. + instanceStatus := status.Empty + message := inst.provisioningState + switch inst.provisioningState { + case "Succeeded": + // TODO(axw) once a VM has been started, we should + // start using its power state to show if it's + // really running or not. This is just a nice to + // have, since we should not expect a VM to ever + // be stopped. + instanceStatus = status.Running + message = "" + case "Canceled", "Failed": + // TODO(axw) if the provisioning state is "Failed", then we + // should use the error message from the deployment description + // as the Message. The error details are not currently exposed + // in the Azure SDK. See: + // https://github.com/Azure/azure-sdk-for-go/issues/399 + instanceStatus = status.ProvisioningError + case "Running": + message = "" + fallthrough + default: + instanceStatus = status.Provisioning + } return instance.InstanceStatus{ - Status: status.StatusEmpty, - Message: to.String(inst.Properties.ProvisioningState), + Status: instanceStatus, + Message: message, } - } // setInstanceAddresses queries Azure for the NICs and public IPs associated @@ -54,69 +72,83 @@ // VirtualMachines are up-to-date, and that there are no concurrent accesses // to the instances. func setInstanceAddresses( - pipClient network.PublicIPAddressesClient, + callAPI callAPIFunc, resourceGroup string, + nicClient network.InterfacesClient, + pipClient network.PublicIPAddressesClient, instances []*azureInstance, - nicsResult network.InterfaceListResult, ) (err error) { - - instanceNics := make(map[instance.Id][]network.Interface) - instancePips := make(map[instance.Id][]network.PublicIPAddress) - for _, inst := range instances { - instanceNics[inst.Id()] = nil - instancePips[inst.Id()] = nil + instanceNics, err := instanceNetworkInterfaces( + callAPI, resourceGroup, nicClient, + ) + if err != nil { + return errors.Annotate(err, "listing network interfaces") } - - // When setAddresses returns without error, update each - // instance's network interfaces and public IP addresses. - setInstanceFields := func(inst *azureInstance) { + instancePips, err := instancePublicIPAddresses( + callAPI, resourceGroup, pipClient, + ) + if err != nil { + return errors.Annotate(err, "listing public IP addresses") + } + for _, inst := range instances { inst.networkInterfaces = instanceNics[inst.Id()] inst.publicIPAddresses = instancePips[inst.Id()] } - defer func() { - if err != nil { - return - } - for _, inst := range instances { - setInstanceFields(inst) - } - }() + return nil +} - // We do not rely on references because of how StopInstances works. - // In order to not leak resources we must not delete the virtual - // machine until after all of its dependencies are deleted. - // - // NICs and PIPs cannot be deleted until they have no references. - // Thus, we cannot delete a PIP until there is no reference to it - // in any NICs, and likewise we cannot delete a NIC until there - // is no reference to it in any virtual machine. - - if nicsResult.Value != nil { - for _, nic := range *nicsResult.Value { - instanceId := instance.Id(toTags(nic.Tags)[jujuMachineNameTag]) - if _, ok := instanceNics[instanceId]; !ok { - continue - } - instanceNics[instanceId] = append(instanceNics[instanceId], nic) - } +// instanceNetworkInterfaces lists all network interfaces in the resource +// group, and returns a mapping from instance ID to the network interfaces +// associated with that instance. +func instanceNetworkInterfaces( + callAPI callAPIFunc, + resourceGroup string, + nicClient network.InterfacesClient, +) (map[instance.Id][]network.Interface, error) { + var nicsResult network.InterfaceListResult + if err := callAPI(func() (autorest.Response, error) { + var err error + nicsResult, err = nicClient.List(resourceGroup) + return nicsResult.Response, err + }); err != nil { + return nil, errors.Annotate(err, "listing network interfaces") } - - pipsResult, err := pipClient.List(resourceGroup) - if err != nil { - return errors.Annotate(err, "listing public IP addresses") + if nicsResult.Value == nil || len(*nicsResult.Value) == 0 { + return nil, nil } - if pipsResult.Value != nil { - for _, pip := range *pipsResult.Value { - instanceId := instance.Id(toTags(pip.Tags)[jujuMachineNameTag]) - if _, ok := instanceNics[instanceId]; !ok { - continue - } - instancePips[instanceId] = append(instancePips[instanceId], pip) - } + instanceNics := make(map[instance.Id][]network.Interface) + for _, nic := range *nicsResult.Value { + instanceId := instance.Id(toTags(nic.Tags)[jujuMachineNameTag]) + instanceNics[instanceId] = append(instanceNics[instanceId], nic) } + return instanceNics, nil +} - // Fields will be assigned to instances by the deferred call. - return nil +// interfacePublicIPAddresses lists all public IP addresses in the resource +// group, and returns a mapping from instance ID to the public IP addresses +// associated with that instance. +func instancePublicIPAddresses( + callAPI callAPIFunc, + resourceGroup string, + pipClient network.PublicIPAddressesClient, +) (map[instance.Id][]network.PublicIPAddress, error) { + var pipsResult network.PublicIPAddressListResult + if err := callAPI(func() (autorest.Response, error) { + var err error + pipsResult, err = pipClient.List(resourceGroup) + return pipsResult.Response, err + }); err != nil { + return nil, errors.Annotate(err, "listing public IP addresses") + } + if pipsResult.Value == nil || len(*pipsResult.Value) == 0 { + return nil, nil + } + instancePips := make(map[instance.Id][]network.PublicIPAddress) + for _, pip := range *pipsResult.Value { + instanceId := instance.Id(toTags(pip.Tags)[jujuMachineNameTag]) + instancePips[instanceId] = append(instancePips[instanceId], pip) + } + return instancePips, nil } // Addresses is specified in the Instance interface. @@ -149,14 +181,10 @@ return addresses, nil } -// internalNetworkAddress returns the instance's jujunetwork.Address for the -// internal virtual network. This address is used to identify the machine in +// primaryNetworkAddress returns the instance's primary jujunetwork.Address for +// the internal virtual network. This address is used to identify the machine in // network security rules. -func (inst *azureInstance) internalNetworkAddress() (jujunetwork.Address, error) { - subscriptionId := inst.env.subscriptionId - resourceGroup := inst.env.resourceGroup - internalSubnetId := internalSubnetId(resourceGroup, subscriptionId) - +func (inst *azureInstance) primaryNetworkAddress() (jujunetwork.Address, error) { for _, nic := range inst.networkInterfaces { if nic.Properties.IPConfigurations == nil { continue @@ -165,7 +193,7 @@ if ipConfiguration.Properties.Subnet == nil { continue } - if strings.ToLower(to.String(ipConfiguration.Properties.Subnet.ID)) != strings.ToLower(internalSubnetId) { + if !to.Bool(ipConfiguration.Properties.Primary) { continue } privateIpAddress := ipConfiguration.Properties.PrivateIPAddress @@ -185,7 +213,7 @@ func (inst *azureInstance) OpenPorts(machineId string, ports []jujunetwork.PortRange) error { nsgClient := network.SecurityGroupsClient{inst.env.network} securityRuleClient := network.SecurityRulesClient{inst.env.network} - internalNetworkAddress, err := inst.internalNetworkAddress() + primaryNetworkAddress, err := inst.primaryNetworkAddress() if err != nil { return errors.Trace(err) } @@ -194,7 +222,7 @@ var nsg network.SecurityGroup if err := inst.env.callAPI(func() (autorest.Response, error) { var err error - nsg, err = nsgClient.Get(inst.env.resourceGroup, securityGroupName) + nsg, err = nsgClient.Get(inst.env.resourceGroup, securityGroupName, "") return nsg.Response, err }); err != nil { return errors.Annotate(err, "querying network security group") @@ -258,17 +286,17 @@ SourcePortRange: to.StringPtr("*"), DestinationPortRange: to.StringPtr(portRange), SourceAddressPrefix: to.StringPtr("*"), - DestinationAddressPrefix: to.StringPtr(internalNetworkAddress.Value), + DestinationAddressPrefix: to.StringPtr(primaryNetworkAddress.Value), Access: network.Allow, - Priority: to.IntPtr(priority), + Priority: to.Int32Ptr(priority), Direction: network.Inbound, }, } if err := inst.env.callAPI(func() (autorest.Response, error) { - result, err := securityRuleClient.CreateOrUpdate( + return securityRuleClient.CreateOrUpdate( inst.env.resourceGroup, securityGroupName, ruleName, rule, + nil, // abort channel ) - return result.Response, err }); err != nil { return errors.Annotatef(err, "creating security rule for %s", ports) } @@ -294,6 +322,7 @@ var err error result, err = securityRuleClient.Delete( inst.env.resourceGroup, securityGroupName, ruleName, + nil, // abort channel ) return result, err }); err != nil { @@ -312,7 +341,7 @@ var nsg network.SecurityGroup if err := inst.env.callAPI(func() (autorest.Response, error) { var err error - nsg, err = nsgClient.Get(inst.env.resourceGroup, securityGroupName) + nsg, err = nsgClient.Get(inst.env.resourceGroup, securityGroupName, "") return nsg.Response, err }); err != nil { return nil, errors.Annotate(err, "querying network security group") @@ -330,7 +359,7 @@ if rule.Properties.Access != network.Allow { continue } - if to.Int(rule.Properties.Priority) <= securityRuleInternalMax { + if to.Int32(rule.Properties.Priority) <= securityRuleInternalMax { continue } if !strings.HasPrefix(to.String(rule.Name), prefix) { @@ -385,7 +414,7 @@ var nsg network.SecurityGroup if err := callAPI(func() (autorest.Response, error) { var err error - nsg, err = nsgClient.Get(resourceGroup, internalSecurityGroupName) + nsg, err = nsgClient.Get(resourceGroup, internalSecurityGroupName, "") return nsg.Response, err }); err != nil { return errors.Annotate(err, "querying network security group") @@ -399,11 +428,17 @@ if !strings.HasPrefix(ruleName, prefix) { continue } - result, err := securityRuleClient.Delete( - resourceGroup, - internalSecurityGroupName, - ruleName, - ) + var result autorest.Response + err := callAPI(func() (autorest.Response, error) { + var err error + result, err = securityRuleClient.Delete( + resourceGroup, + internalSecurityGroupName, + ruleName, + nil, // abort channel + ) + return result, err + }) if err != nil { if result.Response == nil || result.StatusCode != http.StatusNotFound { return errors.Annotatef(err, "deleting security rule %q", ruleName) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/instance_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/instance_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/instance_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/instance_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,10 +7,11 @@ "net/http" "path" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/Azure/azure-sdk-for-go/arm/resources/resources" + "github.com/Azure/go-autorest/autorest/mocks" + "github.com/Azure/go-autorest/autorest/to" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -18,7 +19,9 @@ "github.com/juju/juju/instance" jujunetwork "github.com/juju/juju/network" "github.com/juju/juju/provider/azure" + "github.com/juju/juju/provider/azure/internal/azureauth" "github.com/juju/juju/provider/azure/internal/azuretesting" + "github.com/juju/juju/status" "github.com/juju/juju/testing" ) @@ -29,7 +32,7 @@ requests []*http.Request sender azuretesting.Senders env environs.Environ - virtualMachines []compute.VirtualMachine + deployments []resources.DeploymentExtended networkInterfaces []network.Interface publicIPAddresses []network.PublicIPAddress } @@ -39,8 +42,10 @@ func (s *instanceSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.provider = newProvider(c, azure.ProviderConfig{ - Sender: &s.sender, - RequestInspector: requestRecorder(&s.requests), + Sender: &s.sender, + RequestInspector: azuretesting.RequestRecorder(&s.requests), + RandomWindowsAdminPassword: func() string { return "sorandom" }, + InteractiveCreateServicePrincipal: azureauth.InteractiveCreateServicePrincipal, }) s.env = openEnviron(c, s.provider, &s.sender) s.sender = nil @@ -49,9 +54,27 @@ makeNetworkInterface("nic-0", "machine-0"), } s.publicIPAddresses = nil - s.virtualMachines = []compute.VirtualMachine{ - makeVirtualMachine("machine-0"), - makeVirtualMachine("machine-1"), + s.deployments = []resources.DeploymentExtended{ + makeDeployment("machine-0"), + makeDeployment("machine-1"), + } +} + +func makeDeployment(name string) resources.DeploymentExtended { + dependsOn := []resources.BasicDependency{{ + ResourceType: to.StringPtr("Microsoft.Compute/availabilitySets"), + ResourceName: to.StringPtr("mysql"), + }} + dependencies := []resources.Dependency{{ + ResourceType: to.StringPtr("Microsoft.Compute/virtualMachines"), + DependsOn: &dependsOn, + }} + return resources.DeploymentExtended{ + Name: to.StringPtr(name), + Properties: &resources.DeploymentPropertiesExtended{ + ProvisioningState: to.StringPtr("Succeeded"), + Dependencies: &dependencies, + }, } } @@ -59,7 +82,7 @@ return compute.VirtualMachine{ Name: to.StringPtr(name), Properties: &compute.VirtualMachineProperties{ - ProvisioningState: to.StringPtr("Successful"), + ProvisioningState: to.StringPtr("Succeeded"), }, } } @@ -114,7 +137,7 @@ DestinationAddressPrefix: to.StringPtr(ipAddress), DestinationPortRange: to.StringPtr(ports), Access: network.Allow, - Priority: to.IntPtr(200), + Priority: to.Int32Ptr(200), Direction: network.Inbound, }, } @@ -127,29 +150,28 @@ } func (s *instanceSuite) getInstances(c *gc.C, ids ...instance.Id) []instance.Instance { + s.sender = s.getInstancesSender() + instances, err := s.env.Instances(ids) + c.Assert(err, jc.ErrorIsNil) + s.sender = azuretesting.Senders{} + s.requests = nil + return instances +} +func (s *instanceSuite) getInstancesSender() azuretesting.Senders { + deploymentsSender := azuretesting.NewSenderWithValue(&resources.DeploymentListResult{ + Value: &s.deployments, + }) + deploymentsSender.PathPattern = ".*/deployments" nicsSender := azuretesting.NewSenderWithValue(&network.InterfaceListResult{ Value: &s.networkInterfaces, }) nicsSender.PathPattern = ".*/networkInterfaces" - - vmsSender := azuretesting.NewSenderWithValue(&compute.VirtualMachineListResult{ - Value: &s.virtualMachines, - }) - vmsSender.PathPattern = ".*/virtualMachines" - pipsSender := azuretesting.NewSenderWithValue(&network.PublicIPAddressListResult{ Value: &s.publicIPAddresses, }) pipsSender.PathPattern = ".*/publicIPAddresses" - - s.sender = azuretesting.Senders{nicsSender, vmsSender, pipsSender} - - instances, err := s.env.Instances(ids) - c.Assert(err, jc.ErrorIsNil) - s.sender = azuretesting.Senders{} - s.requests = nil - return instances + return azuretesting.Senders{deploymentsSender, nicsSender, pipsSender} } func networkSecurityGroupSender(rules []network.SecurityRule) *azuretesting.MockSender { @@ -164,22 +186,32 @@ func (s *instanceSuite) TestInstanceStatus(c *gc.C) { inst := s.getInstance(c) - c.Assert(inst.Status().Message, gc.Equals, "Successful") + assertInstanceStatus(c, inst.Status(), status.Running, "") } -func (s *instanceSuite) TestInstanceStatusNilProvisioningState(c *gc.C) { - s.virtualMachines[0].Properties.ProvisioningState = nil +func (s *instanceSuite) TestInstanceStatusDeploymentFailed(c *gc.C) { + s.deployments[0].Properties.ProvisioningState = to.StringPtr("Failed") + inst := s.getInstance(c) + assertInstanceStatus(c, inst.Status(), status.ProvisioningError, "Failed") +} + +func (s *instanceSuite) TestInstanceStatusDeploymentCanceled(c *gc.C) { + s.deployments[0].Properties.ProvisioningState = to.StringPtr("Canceled") inst := s.getInstance(c) - c.Assert(inst.Status().Message, gc.Equals, "") + assertInstanceStatus(c, inst.Status(), status.ProvisioningError, "Canceled") } -func (s *instanceSuite) TestInstanceStatusNoVM(c *gc.C) { - // Instances will still return an instance if there's a NIC, which is - // the last thing we delete. If there's no VM, we return the string - // "Partially Deleted" from Instance.Status(). - s.virtualMachines = nil +func (s *instanceSuite) TestInstanceStatusNilProvisioningState(c *gc.C) { + s.deployments[0].Properties.ProvisioningState = nil inst := s.getInstance(c) - c.Assert(inst.Status().Message, gc.Equals, "Partially Deleted") + assertInstanceStatus(c, inst.Status(), status.Allocating, "") +} + +func assertInstanceStatus(c *gc.C, actual instance.InstanceStatus, status status.Status, message string) { + c.Assert(actual, jc.DeepEquals, instance.InstanceStatus{ + Status: status, + Message: message, + }) } func (s *instanceSuite) TestInstanceAddressesEmpty(c *gc.C) { @@ -260,7 +292,7 @@ Protocol: network.UDP, DestinationPortRange: to.StringPtr("*"), Access: network.Allow, - Priority: to.IntPtr(200), + Priority: to.Int32Ptr(200), Direction: network.Inbound, }, }, { @@ -269,7 +301,7 @@ Protocol: network.TCP, DestinationPortRange: to.StringPtr("1000-2000"), Access: network.Allow, - Priority: to.IntPtr(201), + Priority: to.Int32Ptr(201), Direction: network.Inbound, }, }, { @@ -278,7 +310,7 @@ Protocol: network.Asterisk, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, - Priority: to.IntPtr(202), + Priority: to.Int32Ptr(202), Direction: network.Inbound, }, }, { @@ -287,7 +319,7 @@ Protocol: network.TCP, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, - Priority: to.IntPtr(202), + Priority: to.Int32Ptr(202), Direction: network.Inbound, }, }, { @@ -296,7 +328,7 @@ Protocol: network.TCP, DestinationPortRange: to.StringPtr("80"), Access: network.Deny, - Priority: to.IntPtr(202), + Priority: to.Int32Ptr(202), Direction: network.Inbound, }, }, { @@ -305,7 +337,7 @@ Protocol: network.TCP, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, - Priority: to.IntPtr(202), + Priority: to.Int32Ptr(202), Direction: network.Outbound, }, }, { @@ -314,7 +346,7 @@ Protocol: network.TCP, DestinationPortRange: to.StringPtr("80"), Access: network.Allow, - Priority: to.IntPtr(199), // internal range + Priority: to.Int32Ptr(199), // internal range Direction: network.Inbound, }, }}) @@ -345,7 +377,9 @@ inst := s.getInstance(c) sender := mocks.NewSender() notFoundSender := mocks.NewSender() - notFoundSender.EmitStatus("rule not found", http.StatusNotFound) + notFoundSender.AppendResponse(mocks.NewResponseWithStatus( + "rule not found", http.StatusNotFound, + )) s.sender = azuretesting.Senders{sender, notFoundSender} err := inst.ClosePorts("0", []jujunetwork.PortRange{{ @@ -374,8 +408,9 @@ ) ipConfiguration := network.InterfaceIPConfiguration{ Properties: &network.InterfaceIPConfigurationPropertiesFormat{ + Primary: to.BoolPtr(true), PrivateIPAddress: to.StringPtr("10.0.0.4"), - Subnet: &network.SubResource{ + Subnet: &network.Subnet{ ID: to.StringPtr(internalSubnetId), }, }, @@ -386,7 +421,7 @@ inst := s.getInstance(c) okSender := mocks.NewSender() - okSender.EmitContent("{}") + okSender.AppendResponse(mocks.NewResponseWithContent("{}")) nsgSender := networkSecurityGroupSender(nil) s.sender = azuretesting.Senders{nsgSender, okSender, okSender} @@ -415,7 +450,7 @@ DestinationPortRange: to.StringPtr("1000"), DestinationAddressPrefix: to.StringPtr("10.0.0.4"), Access: network.Allow, - Priority: to.IntPtr(200), + Priority: to.Int32Ptr(200), Direction: network.Inbound, }, }) @@ -430,7 +465,7 @@ DestinationPortRange: to.StringPtr("1000-2000"), DestinationAddressPrefix: to.StringPtr("10.0.0.4"), Access: network.Allow, - Priority: to.IntPtr(201), + Priority: to.Int32Ptr(201), Direction: network.Inbound, }, }) @@ -444,8 +479,9 @@ ) ipConfiguration := network.InterfaceIPConfiguration{ Properties: &network.InterfaceIPConfigurationPropertiesFormat{ + Primary: to.BoolPtr(true), PrivateIPAddress: to.StringPtr("10.0.0.4"), - Subnet: &network.SubResource{ + Subnet: &network.Subnet{ ID: to.StringPtr(internalSubnetId), }, }, @@ -456,14 +492,14 @@ inst := s.getInstance(c) okSender := mocks.NewSender() - okSender.EmitContent("{}") + okSender.AppendResponse(mocks.NewResponseWithContent("{}")) nsgSender := networkSecurityGroupSender([]network.SecurityRule{{ Name: to.StringPtr("machine-0-tcp-1000"), Properties: &network.SecurityRulePropertiesFormat{ Protocol: network.Asterisk, DestinationPortRange: to.StringPtr("1000"), Access: network.Allow, - Priority: to.IntPtr(202), + Priority: to.Int32Ptr(202), Direction: network.Inbound, }, }}) @@ -494,7 +530,7 @@ DestinationPortRange: to.StringPtr("1000-2000"), DestinationAddressPrefix: to.StringPtr("10.0.0.4"), Access: network.Allow, - Priority: to.IntPtr(200), + Priority: to.Int32Ptr(200), Direction: network.Inbound, }, }) @@ -505,6 +541,24 @@ c.Assert(err, gc.ErrorMatches, "internal network address not found") } +func (s *instanceSuite) TestAllInstances(c *gc.C) { + s.sender = s.getInstancesSender() + instances, err := s.env.AllInstances() + c.Assert(err, jc.ErrorIsNil) + c.Assert(instances, gc.HasLen, 2) + c.Assert(instances[0].Id(), gc.Equals, instance.Id("machine-0")) + c.Assert(instances[1].Id(), gc.Equals, instance.Id("machine-1")) +} + +func (s *instanceSuite) TestControllerInstances(c *gc.C) { + *(*(*s.deployments[0].Properties.Dependencies)[0].DependsOn)[0].ResourceName = "juju-controller" + s.sender = s.getInstancesSender() + ids, err := s.env.ControllerInstances("foo") + c.Assert(err, jc.ErrorIsNil) + c.Assert(ids, gc.HasLen, 1) + c.Assert(ids[0], gc.Equals, instance.Id("machine-0")) +} + var internalSecurityGroupPath = path.Join( "/subscriptions", fakeSubscriptionId, "resourceGroups", "juju-testenv-model-"+testing.ModelTag.Id(), diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/instancetype.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/instancetype.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/instancetype.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/instancetype.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,8 @@ package azure import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/go-autorest/autorest/to" "github.com/juju/errors" "github.com/juju/utils/arch" @@ -105,12 +105,12 @@ Id: sizeName, Name: sizeName, Arches: []string{arch.AMD64}, - CpuCores: uint64(to.Int(size.NumberOfCores)), - Mem: uint64(to.Int(size.MemoryInMB)), + CpuCores: uint64(to.Int32(size.NumberOfCores)), + Mem: uint64(to.Int32(size.MemoryInMB)), // NOTE(axw) size.OsDiskSizeInMB is the *maximum* // OS-disk size. When we create a VM, we can create // one that is smaller. - RootDisk: mbToMib(uint64(to.Int(size.OsDiskSizeInMB))), + RootDisk: mbToMib(uint64(to.Int32(size.OsDiskSizeInMB))), Cost: uint64(cost), VirtType: &vtype, // tags are not currently supported by azure diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/ad/client.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/ad/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/ad/client.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/ad/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,100 @@ +// This file is based on code from Azure/azure-sdk-for-go +// and Azure/go-autorest, which are both +// Copyright Microsoft Corporation. See the LICENSE +// file in this directory for details. +// +// NOTE(axw) this file contains a client for a subset of the +// Microsoft Graph API, which is not currently supported by +// the Azure SDK. When it is, this will be deleted. + +package ad + +import ( + "fmt" + "io/ioutil" + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + + "github.com/juju/juju/version" +) + +const ( + // APIVersion is the version of the Active Directory API + APIVersion = "1.6" +) + +func UserAgent() string { + return "Juju/" + version.Current.String() +} + +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string +} + +func NewManagementClient(baseURI string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + } +} + +// WithOdataErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +// +// NOTE(axw) this function is based on go-autorest/autorest/azure.WithErrorUnlessStatusCode. +// The only difference is that we extract "odata.error", instead of "error", +// from the response body; and we do not extract the message, which is in a +// different format for odata.error, and irrelevant to us. +func WithOdataErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var oe odataRequestError + defer resp.Body.Close() + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &oe) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("ad: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } else if oe.ServiceError == nil { + oe.ServiceError = &odataServiceError{Code: "Unknown"} + } + + e := azure.RequestError{ + ServiceError: &azure.ServiceError{Code: oe.ServiceError.Code}, + RequestID: azure.ExtractRequestID(resp), + } + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} + +type odataRequestError struct { + ServiceError *odataServiceError `json:"odata.error"` +} + +type odataServiceError struct { + Code string `json:"code"` +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/ad/LICENSE juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/ad/LICENSE --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/ad/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/ad/LICENSE 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/ad/models.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/ad/models.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/ad/models.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/ad/models.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,64 @@ +// This file is based on code from Azure/azure-sdk-for-go, +// which is Copyright Microsoft Corporation. See the LICENSE +// file in this directory for details. +// +// NOTE(axw) this file contains types for a subset of the +// Microsoft Graph API, which is not currently supported by +// the Azure SDK. When it is, this will be deleted. + +package ad + +import ( + "time" + + "github.com/Azure/go-autorest/autorest" +) + +type AADObject struct { + autorest.Response `json:"-"` + ObjectID string `json:"objectId"` + ObjectType string `json:"objectType"` + DisplayName string `json:"displayName"` + UserPrincipalName string `json:"userPrincipalName"` + Mail string `json:"mail"` + MailEnabled bool `json:"mailEnabled"` + SecurityEnabled bool `json:"securityEnabled"` + SignInName string `json:"signInName"` + ServicePrincipalNames []string `json:"servicePrincipalNames"` + UserType string `json:"userType"` +} + +type PasswordCredentialsListResult struct { + autorest.Response `json:"-"` + Value []PasswordCredential `json:"value,omitempty"` +} + +type PasswordCredentialsUpdateParameters struct { + Value []PasswordCredential `json:"value,omitempty"` +} + +type PasswordCredential struct { + CustomKeyIdentifier []byte `json:"customKeyIdentifier,omitempty"` + KeyId string `json:"keyId,omitempty"` + Value string `json:"value,omitempty"` + StartDate time.Time `json:"startDate,omitempty"` + EndDate time.Time `json:"endDate,omitempty"` +} + +type ServicePrincipalListResult struct { + autorest.Response `json:"-"` + Value []ServicePrincipal `json:"value,omitempty"` +} + +type ServicePrincipalCreateParameters struct { + ApplicationID string `json:"appId,omitempty"` + AccountEnabled bool `json:"accountEnabled,omitempty"` + PasswordCredentials []PasswordCredential `json:"passwordCredentials,omitempty"` +} + +type ServicePrincipal struct { + autorest.Response `json:"-"` + ApplicationID string `json:"appId,omitempty"` + ObjectID string `json:"objectId,omitempty"` + AccountEnabled bool `json:"accountEnabled,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/ad/serviceprincipals.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/ad/serviceprincipals.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/ad/serviceprincipals.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/ad/serviceprincipals.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,229 @@ +// This file is based on code from Azure/azure-sdk-for-go, +// which is Copyright Microsoft Corporation. See the LICENSE +// file in this directory for details. +// +// NOTE(axw) this file contains a client for a subset of the +// Microsoft Graph API, which is not currently supported by +// the Azure SDK. When it is, this will be deleted. + +package ad + +import ( + "net/http" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +type ServicePrincipalsClient struct { + ManagementClient +} + +func (client ServicePrincipalsClient) Create(parameters ServicePrincipalCreateParameters, cancel <-chan struct{}) (result ServicePrincipal, err error) { + req, err := client.CreatePreparer(parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "Create", nil, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "Create", nil, "Failure responding to request") + } + + return +} + +func (client ServicePrincipalsClient) CreatePreparer(parameters ServicePrincipalCreateParameters, cancel <-chan struct{}) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/servicePrincipals"), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +func (client ServicePrincipalsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +func (client ServicePrincipalsClient) CreateResponder(resp *http.Response) (result ServicePrincipal, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + WithOdataErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +func (client ServicePrincipalsClient) List(filter string) (result ServicePrincipalListResult, err error) { + req, err := client.ListPreparer(filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "List", nil, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "List", nil, "Failure responding to request") + } + + return +} + +func (client ServicePrincipalsClient) ListPreparer(filter string) (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if filter != "" { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/servicePrincipals"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +func (client ServicePrincipalsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +func (client ServicePrincipalsClient) ListResponder(resp *http.Response) (result ServicePrincipalListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + WithOdataErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +func (client ServicePrincipalsClient) ListPasswordCredentials(objectId string) (result PasswordCredentialsListResult, err error) { + req, err := client.ListPasswordCredentialsPreparer(objectId) + if err != nil { + return result, autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "ListPasswordCredentials", nil, "Failure preparing request") + } + + resp, err := client.ListPasswordCredentialsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "ListPasswordCredentials", nil, "Failure sending request") + } + + result, err = client.ListPasswordCredentialsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "ListPasswordCredentials", nil, "Failure responding to request") + } + + return +} + +func (client ServicePrincipalsClient) ListPasswordCredentialsPreparer(objectId string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "objectId": autorest.Encode("path", objectId), + } + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/servicePrincipals/{objectId}/passwordCredentials", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +func (client ServicePrincipalsClient) ListPasswordCredentialsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +func (client ServicePrincipalsClient) ListPasswordCredentialsResponder(resp *http.Response) (result PasswordCredentialsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + WithOdataErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +func (client ServicePrincipalsClient) UpdatePasswordCredentials(objectId string, parameters PasswordCredentialsUpdateParameters) (result autorest.Response, err error) { + req, err := client.UpdatePasswordCredentialsPreparer(objectId, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "UpdatePasswordCredentials", nil, "Failure preparing request") + } + + resp, err := client.UpdatePasswordCredentialsSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "UpdatePasswordCredentials", nil, "Failure sending request") + } + + result, err = client.UpdatePasswordCredentialsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "ad.ServicePrincipalsClient", "UpdatePasswordCredentials", nil, "Failure responding to request") + } + + return +} + +func (client ServicePrincipalsClient) UpdatePasswordCredentialsPreparer(objectId string, parameters PasswordCredentialsUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "objectId": autorest.Encode("path", objectId), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/servicePrincipals/{objectId}/passwordCredentials", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +func (client ServicePrincipalsClient) UpdatePasswordCredentialsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +func (client ServicePrincipalsClient) UpdatePasswordCredentialsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + WithOdataErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/ad/users.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/ad/users.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/ad/users.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/ad/users.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,67 @@ +// This file is based on code from Azure/azure-sdk-for-go, +// which is Copyright Microsoft Corporation. See the LICENSE +// file in this directory for details. +// +// NOTE(axw) this file contains a client for a subset of the +// Microsoft Graph API, which is not currently supported by +// the Azure SDK. When it is, this will be deleted. + +package ad + +import ( + "net/http" + + "github.com/Azure/go-autorest/autorest" +) + +type UsersClient struct { + ManagementClient +} + +func (client UsersClient) GetCurrentUser() (result AADObject, err error) { + req, err := client.GetCurrentUserPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "ad.UsersClient", "GetCurrentUser", nil, "Failure preparing request") + } + + resp, err := client.GetCurrentUserSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "ad.UsersClient", "GetCurrentUser", nil, "Failure sending request") + } + + result, err = client.GetCurrentUserResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "ad.UsersClient", "GetCurrentUser", nil, "Failure responding to request") + } + + return +} + +func (client UsersClient) GetCurrentUserPreparer() (*http.Request, error) { + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/me"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +func (client UsersClient) GetCurrentUserSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +func (client UsersClient) GetCurrentUserResponder(resp *http.Response) (result AADObject, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + WithOdataErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/armtemplates/template.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/armtemplates/template.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/armtemplates/template.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/armtemplates/template.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,44 @@ +package armtemplates + +import "github.com/Azure/azure-sdk-for-go/arm/storage" + +const ( + schema = "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#" + contentVersion = "1.0.0.0" +) + +// Template represents an Azure Resource Manager (ARM) Template. +// See: https://azure.microsoft.com/en-us/documentation/articles/resource-group-authoring-templates/ +type Template struct { + // Resources contains the definitions of resources that will + // be created by the template. + Resources []Resource `json:"resources"` +} + +// Map returns the template as a map, suitable for use in +// azure-sdk-for-go/arm/resources/resources/DeploymentProperties.Template. +func (t *Template) Map() (map[string]interface{}, error) { + m := map[string]interface{}{ + "$schema": schema, + "contentVersion": contentVersion, + "resources": t.Resources, + } + return m, nil +} + +// Resource describes a template resource. For information on the +// individual fields, see https://azure.microsoft.com/en-us/documentation/articles/resource-group-authoring-templates/. +type Resource struct { + APIVersion string `json:"apiVersion"` + Type string `json:"type"` + Name string `json:"name"` + Location string `json:"location,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Comments string `json:"comments,omitempty"` + DependsOn []string `json:"dependsOn,omitempty"` + Properties interface{} `json:"properties,omitempty"` + Resources []Resource `json:"resources,omitempty"` + + // Non-uniform attributes. + StorageSku *storage.Sku `json:"sku,omitempty"` +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/discovery.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/discovery.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/discovery.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/discovery.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,59 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azureauth + +import ( + "net/http" + "net/url" + "regexp" + "strings" + + "github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions" + "github.com/juju/errors" + "github.com/juju/utils" +) + +const authenticateHeaderKey = "WWW-Authenticate" + +var authorizationUriRegexp = regexp.MustCompile(`authorization_uri="([^"]*)"`) + +// DiscoverAuthorizationID returns the OAuth authorization URI for the given +// subscription ID. This can be used to determine the AD tenant ID. +func DiscoverAuthorizationURI(client subscriptions.Client, subscriptionID string) (*url.URL, error) { + // We make an unauthenticated request to the Azure API, which + // responds with the authentication URL with the tenant ID in it. + result, err := client.Get(subscriptionID) + if err == nil { + return nil, errors.New("expected unauthorized error response") + } + if result.Response.Response == nil { + return nil, errors.Trace(err) + } + if result.StatusCode != http.StatusUnauthorized { + return nil, errors.Annotatef(err, "expected unauthorized error response, got %v", result.StatusCode) + } + + header := result.Header.Get(authenticateHeaderKey) + if header == "" { + return nil, errors.Errorf("%s header not found", authenticateHeaderKey) + } + match := authorizationUriRegexp.FindStringSubmatch(header) + if match == nil { + return nil, errors.Errorf( + "authorization_uri not found in %s header (%q)", + authenticateHeaderKey, header, + ) + } + return url.Parse(match[1]) +} + +// AuthorizationURITenantID returns the tenant ID portion of the given URL, +// which is expected to have come from DiscoverAuthorizationURI. +func AuthorizationURITenantID(url *url.URL) (string, error) { + path := strings.TrimPrefix(url.Path, "/") + if _, err := utils.UUIDFromString(path); err != nil { + return "", errors.NotValidf("authorization_uri %q", url) + } + return path, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/discovery_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/discovery_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/discovery_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/discovery_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,100 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azureauth_test + +import ( + "net/http" + "net/url" + + "github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions" + "github.com/Azure/go-autorest/autorest/mocks" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/provider/azure/internal/azureauth" +) + +type DiscoverySuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&DiscoverySuite{}) + +func (*DiscoverySuite) TestDiscoverAuthorizationURI(c *gc.C) { + sender := mocks.NewSender() + resp := mocks.NewResponseWithStatus("", http.StatusUnauthorized) + mocks.SetResponseHeaderValues(resp, "WWW-Authenticate", []string{ + `foo bar authorization_uri="https://testing.invalid/meep" baz`, + }) + sender.AppendResponse(resp) + + client := subscriptions.NewClient() + client.Sender = sender + authURI, err := azureauth.DiscoverAuthorizationURI(client, "subscription_id") + c.Assert(err, jc.ErrorIsNil) + c.Assert(authURI, jc.DeepEquals, &url.URL{ + Scheme: "https", + Host: "testing.invalid", + Path: "/meep", + }) +} + +func (*DiscoverySuite) TestDiscoverAuthorizationURIMissingHeader(c *gc.C) { + sender := mocks.NewSender() + resp := mocks.NewResponseWithStatus("", http.StatusUnauthorized) + sender.AppendResponse(resp) + + client := subscriptions.NewClient() + client.Sender = sender + _, err := azureauth.DiscoverAuthorizationURI(client, "subscription_id") + c.Assert(err, gc.ErrorMatches, `WWW-Authenticate header not found`) +} + +func (*DiscoverySuite) TestDiscoverAuthorizationURIHeaderMismatch(c *gc.C) { + sender := mocks.NewSender() + resp := mocks.NewResponseWithStatus("", http.StatusUnauthorized) + mocks.SetResponseHeaderValues(resp, "WWW-Authenticate", []string{`foo bar baz`}) + sender.AppendResponse(resp) + + client := subscriptions.NewClient() + client.Sender = sender + _, err := azureauth.DiscoverAuthorizationURI(client, "subscription_id") + c.Assert(err, gc.ErrorMatches, `authorization_uri not found in WWW-Authenticate header \("foo bar baz"\)`) +} + +func (*DiscoverySuite) TestDiscoverAuthorizationURIUnexpectedSuccess(c *gc.C) { + sender := mocks.NewSender() + resp := mocks.NewResponseWithStatus("", http.StatusOK) + sender.AppendResponse(resp) + + client := subscriptions.NewClient() + client.Sender = sender + _, err := azureauth.DiscoverAuthorizationURI(client, "subscription_id") + c.Assert(err, gc.ErrorMatches, "expected unauthorized error response") +} + +func (*DiscoverySuite) TestDiscoverAuthorizationURIUnexpectedStatusCode(c *gc.C) { + sender := mocks.NewSender() + resp := mocks.NewResponseWithStatus("", http.StatusNotFound) + sender.AppendResponse(resp) + + client := subscriptions.NewClient() + client.Sender = sender + _, err := azureauth.DiscoverAuthorizationURI(client, "subscription_id") + c.Assert(err, gc.ErrorMatches, "expected unauthorized error response, got 404: .*") +} + +func (*DiscoverySuite) TestAuthorizationURITenantID(c *gc.C) { + tenantId, err := azureauth.AuthorizationURITenantID(&url.URL{Path: "/3671f5a9-c0d0-472b-a80c-48135cf5a9f1"}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(tenantId, gc.Equals, "3671f5a9-c0d0-472b-a80c-48135cf5a9f1") +} + +func (*DiscoverySuite) TestAuthorizationURITenantIDError(c *gc.C) { + url, err := url.Parse("https://testing.invalid/foo") + c.Assert(err, jc.ErrorIsNil) + _, err = azureauth.AuthorizationURITenantID(url) + c.Assert(err, gc.ErrorMatches, `authorization_uri "https://testing.invalid/foo" not valid`) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/interactive.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/interactive.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/interactive.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/interactive.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,348 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azureauth + +import ( + "fmt" + "io" + "net/url" + "path" + "time" + + "github.com/Azure/azure-sdk-for-go/arm/authorization" + "github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/utils" + "github.com/juju/utils/clock" + + "github.com/juju/juju/provider/azure/internal/ad" + "github.com/juju/juju/provider/azure/internal/errorutils" + "github.com/juju/juju/provider/azure/internal/tracing" +) + +var logger = loggo.GetLogger("juju.provider.azure.internal.azureauth") + +const ( + // jujuApplicationId is the ID of the Azure application that we use + // for interactive authentication. When the user logs in, a service + // principal will be created in their Active Directory tenant for + // the application. + jujuApplicationId = "cbb548f1-5039-4836-af0b-727e8571f6a9" + + // passwordExpiryDuration is how long the application password we + // set will remain valid. + passwordExpiryDuration = 365 * 24 * time.Hour +) + +// InteractiveCreateServicePrincipalFunc is a function type for +// interactively creating service principals for a subscription. +type InteractiveCreateServicePrincipalFunc func( + stderr io.Writer, + sender autorest.Sender, + requestInspector autorest.PrepareDecorator, + resourceManagerEndpoint string, + graphEndpoint string, + subscriptionId string, + clock clock.Clock, + newUUID func() (utils.UUID, error), +) (appId, password string, _ error) + +// InteractiveCreateServicePrincipal interactively creates service +// principals for a subscription. +func InteractiveCreateServicePrincipal( + stderr io.Writer, + sender autorest.Sender, + requestInspector autorest.PrepareDecorator, + resourceManagerEndpoint string, + graphEndpoint string, + subscriptionId string, + clock clock.Clock, + newUUID func() (utils.UUID, error), +) (appId, password string, _ error) { + + subscriptionsClient := subscriptions.Client{ + subscriptions.NewWithBaseURI(resourceManagerEndpoint), + } + subscriptionsClient.Sender = sender + setClientInspectors(&subscriptionsClient.Client, requestInspector, "azure.subscriptions") + + oauthConfig, tenantId, err := OAuthConfig( + subscriptionsClient, + resourceManagerEndpoint, + subscriptionId, + ) + if err != nil { + return "", "", errors.Trace(err) + } + + client := autorest.NewClientWithUserAgent("juju") + client.Sender = sender + setClientInspectors(&client, requestInspector, "azure.autorest") + + // Perform the interactive authentication. The user will be prompted to + // open a URL and input a device code, after which they will have to + // enter their username and password if they are not already + // authenticated with Azure. + fmt.Fprintln(stderr, "Initiating interactive authentication.") + fmt.Fprintln(stderr) + armResource := TokenResource(resourceManagerEndpoint) + clientId := jujuApplicationId + deviceCode, err := azure.InitiateDeviceAuth(&client, *oauthConfig, clientId, armResource) + if err != nil { + return "", "", errors.Annotate(err, "initiating interactive authentication") + } + fmt.Fprintln(stderr, to.String(deviceCode.Message)+"\n") + token, err := azure.WaitForUserCompletion(&client, deviceCode) + if err != nil { + return "", "", errors.Annotate(err, "waiting for interactive authentication to completed") + } + + // Create service principal tokens that we can use to authorize API + // requests to Active Directory and Resource Manager. These tokens + // are only valid for a short amount of time, so we must create a + // service principal password that can be used to obtain new tokens. + armSpt, err := azure.NewServicePrincipalTokenFromManualToken(*oauthConfig, clientId, armResource, *token) + if err != nil { + return "", "", errors.Annotate(err, "creating temporary ARM service principal token") + } + if client.Sender != nil { + armSpt.SetSender(client.Sender) + } + if err := armSpt.Refresh(); err != nil { + return "", "", errors.Trace(err) + } + + // The application requires permissions for both ARM and AD, so we + // can use the token for both APIs. + graphResource := TokenResource(graphEndpoint) + graphToken := armSpt.Token + graphToken.Resource = graphResource + graphSpt, err := azure.NewServicePrincipalTokenFromManualToken(*oauthConfig, clientId, graphResource, graphToken) + if err != nil { + return "", "", errors.Annotate(err, "creating temporary Graph service principal token") + } + if client.Sender != nil { + graphSpt.SetSender(client.Sender) + } + if err := graphSpt.Refresh(); err != nil { + return "", "", errors.Trace(err) + } + + directoryURL, err := url.Parse(graphEndpoint) + if err != nil { + return "", "", errors.Annotate(err, "parsing identity endpoint") + } + directoryURL.Path = path.Join(directoryURL.Path, tenantId) + directoryClient := ad.NewManagementClient(directoryURL.String()) + authorizationClient := authorization.NewWithBaseURI(resourceManagerEndpoint, subscriptionId) + directoryClient.Authorizer = graphSpt + authorizationClient.Authorizer = armSpt + authorizationClient.Sender = client.Sender + directoryClient.Sender = client.Sender + setClientInspectors(&directoryClient.Client, requestInspector, "azure.directory") + setClientInspectors(&authorizationClient.Client, requestInspector, "azure.authorization") + + userObject, err := ad.UsersClient{directoryClient}.GetCurrentUser() + if err != nil { + return "", "", errors.Trace(err) + } + fmt.Fprintf(stderr, "Authenticated as %q.\n", userObject.DisplayName) + + fmt.Fprintln(stderr, "Creating/updating service principal.") + servicePrincipalObjectId, password, err := createOrUpdateServicePrincipal( + ad.ServicePrincipalsClient{directoryClient}, + subscriptionId, + clock, + newUUID, + ) + if err != nil { + return "", "", errors.Trace(err) + } + + fmt.Fprintln(stderr, "Assigning Owner role to service principal.") + if err := createRoleAssignment( + authorizationClient, + subscriptionId, + servicePrincipalObjectId, + newUUID, + ); err != nil { + return "", "", errors.Trace(err) + } + return jujuApplicationId, password, nil +} + +func setClientInspectors( + client *autorest.Client, + requestInspector autorest.PrepareDecorator, + loggingModule string, +) { + logger := loggo.GetLogger(loggingModule) + client.ResponseInspector = tracing.RespondDecorator(logger) + client.RequestInspector = tracing.PrepareDecorator(logger) + if requestInspector != nil { + tracer := client.RequestInspector + client.RequestInspector = func(p autorest.Preparer) autorest.Preparer { + p = tracer(p) + p = requestInspector(p) + return p + } + } +} + +func createOrUpdateServicePrincipal( + client ad.ServicePrincipalsClient, + subscriptionId string, + clock clock.Clock, + newUUID func() (utils.UUID, error), +) (servicePrincipalObjectId, password string, _ error) { + passwordCredential, err := preparePasswordCredential(clock, newUUID) + if err != nil { + return "", "", errors.Annotate(err, "preparing password credential") + } + + servicePrincipal, err := client.Create( + ad.ServicePrincipalCreateParameters{ + ApplicationID: jujuApplicationId, + AccountEnabled: true, + PasswordCredentials: []ad.PasswordCredential{passwordCredential}, + }, + nil, // abort + ) + if err != nil { + if !isMultipleObjectsWithSameKeyValueErr(err) { + return "", "", errors.Trace(err) + } + // The service principal already exists, so we'll fall out + // and update the service principal's password credentials. + } else { + // The service principal was created successfully, with the + // requested password credential. + return servicePrincipal.ObjectID, passwordCredential.Value, nil + } + + // The service principal already exists, so we need to query + // its object ID, and fetch the existing password credentials + // to update. + servicePrincipal, err = getServicePrincipal(client) + if err != nil { + return "", "", errors.Trace(err) + } + if err := addServicePrincipalPasswordCredential( + client, servicePrincipal.ObjectID, + passwordCredential, + ); err != nil { + return "", "", errors.Annotate(err, "updating password credentials") + } + return servicePrincipal.ObjectID, passwordCredential.Value, nil +} + +func isMultipleObjectsWithSameKeyValueErr(err error) bool { + if err, ok := errorutils.ServiceError(err); ok { + return err.Code == "Request_MultipleObjectsWithSameKeyValue" + } + return false +} + +func preparePasswordCredential( + clock clock.Clock, + newUUID func() (utils.UUID, error), +) (ad.PasswordCredential, error) { + password, err := newUUID() + if err != nil { + return ad.PasswordCredential{}, errors.Annotate(err, "generating password") + } + passwordKeyUUID, err := newUUID() + if err != nil { + return ad.PasswordCredential{}, errors.Annotate(err, "generating password key ID") + } + startDate := clock.Now().UTC() + endDate := startDate.Add(passwordExpiryDuration) + return ad.PasswordCredential{ + CustomKeyIdentifier: []byte("juju-" + startDate.Format("20060102")), + KeyId: passwordKeyUUID.String(), + Value: password.String(), + StartDate: startDate, + EndDate: endDate, + }, nil +} + +func addServicePrincipalPasswordCredential( + client ad.ServicePrincipalsClient, + servicePrincipalObjectId string, + passwordCredential ad.PasswordCredential, +) error { + existing, err := client.ListPasswordCredentials(servicePrincipalObjectId) + if err != nil { + return errors.Trace(err) + } + passwordCredentials := append(existing.Value, passwordCredential) + _, err = client.UpdatePasswordCredentials( + servicePrincipalObjectId, + ad.PasswordCredentialsUpdateParameters{passwordCredentials}, + ) + return errors.Trace(err) +} + +func getServicePrincipal(client ad.ServicePrincipalsClient) (ad.ServicePrincipal, error) { + // TODO(axw) filter by Service Principal Name (SPN). + // It works without that, but the response is noisy. + result, err := client.List("") + if err != nil { + return ad.ServicePrincipal{}, errors.Annotate(err, "listing service principals") + } + for _, sp := range result.Value { + if sp.ApplicationID == jujuApplicationId { + return sp, nil + } + } + return ad.ServicePrincipal{}, errors.NotFoundf("service principal") +} + +func createRoleAssignment( + authorizationClient authorization.ManagementClient, + subscriptionId string, + servicePrincipalObjectId string, + newUUID func() (utils.UUID, error), +) error { + // Find the role definition with the name "Owner". + roleScope := path.Join("subscriptions", subscriptionId) + roleDefinitionsClient := authorization.RoleDefinitionsClient{authorizationClient} + result, err := roleDefinitionsClient.List(roleScope, "roleName eq 'Owner'") + if err != nil { + return errors.Annotate(err, "listing role definitions") + } + if result.Value == nil || len(*result.Value) == 0 { + return errors.NotFoundf("Owner role definition") + } + roleDefinitionId := (*result.Value)[0].ID + + // The UUID value for the role assignment name is unimportant. Azure + // will prevent multiple role assignments for the same role definition + // and principal pair. + roleAssignmentUUID, err := newUUID() + if err != nil { + return errors.Annotate(err, "generating role assignment ID") + } + roleAssignmentsClient := authorization.RoleAssignmentsClient{authorizationClient} + roleAssignmentName := roleAssignmentUUID.String() + if _, err := roleAssignmentsClient.Create(roleScope, roleAssignmentName, authorization.RoleAssignmentCreateParameters{ + Properties: &authorization.RoleAssignmentProperties{ + RoleDefinitionID: roleDefinitionId, + PrincipalID: to.StringPtr(servicePrincipalObjectId), + }, + }); err != nil { + if err, ok := errorutils.ServiceError(err); ok { + const serviceErrorCodeRoleAssignmentExists = "RoleAssignmentExists" + if err.Code == serviceErrorCodeRoleAssignmentExists { + return nil + } + } + return errors.Annotate(err, "creating role assignment") + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/interactive_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/interactive_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/interactive_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/interactive_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,298 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azureauth_test + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/arm/authorization" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/mocks" + "github.com/Azure/go-autorest/autorest/to" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/provider/azure/internal/ad" + "github.com/juju/juju/provider/azure/internal/azureauth" + "github.com/juju/juju/provider/azure/internal/azuretesting" +) + +func clockStartTime() time.Time { + t, _ := time.Parse("2006-Jan-02 3:04am", "2016-Sep-19 9:47am") + return t +} + +type InteractiveSuite struct { + testing.IsolationSuite + clock *testing.Clock + newUUID func() (utils.UUID, error) +} + +var _ = gc.Suite(&InteractiveSuite{}) + +func deviceCodeSender() autorest.Sender { + return azuretesting.NewSenderWithValue(azure.DeviceCode{ + DeviceCode: to.StringPtr("device-code"), + Interval: to.Int64Ptr(1), // 1 second between polls + Message: to.StringPtr("open your browser, etc."), + }) +} + +func tokenSender() autorest.Sender { + return azuretesting.NewSenderWithValue(azure.Token{ + RefreshToken: "refresh-token", + ExpiresOn: fmt.Sprint(time.Now().Add(time.Hour).Unix()), + }) +} + +func passwordCredentialsListSender() autorest.Sender { + return azuretesting.NewSenderWithValue(ad.PasswordCredentialsListResult{ + Value: []ad.PasswordCredential{{ + KeyId: "password-credential-key-id", + }}, + }) +} + +func updatePasswordCredentialsSender() autorest.Sender { + sender := mocks.NewSender() + sender.AppendResponse(mocks.NewResponseWithStatus("", http.StatusNoContent)) + return sender +} + +func currentUserSender() autorest.Sender { + return azuretesting.NewSenderWithValue(ad.AADObject{ + DisplayName: "Foo Bar", + }) +} + +func createServicePrincipalSender() autorest.Sender { + return azuretesting.NewSenderWithValue(ad.ServicePrincipal{ + ApplicationID: "cbb548f1-5039-4836-af0b-727e8571f6a9", + ObjectID: "sp-object-id", + }) +} + +func createServicePrincipalAlreadyExistsSender() autorest.Sender { + sender := mocks.NewSender() + body := mocks.NewBody(`{"odata.error":{"code":"Request_MultipleObjectsWithSameKeyValue"}}`) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusConflict, "")) + return sender +} + +func servicePrincipalListSender() autorest.Sender { + return azuretesting.NewSenderWithValue(ad.ServicePrincipalListResult{ + Value: []ad.ServicePrincipal{{ + ApplicationID: "cbb548f1-5039-4836-af0b-727e8571f6a9", + ObjectID: "sp-object-id", + }}, + }) +} + +func roleDefinitionListSender() autorest.Sender { + roleDefinitions := []authorization.RoleDefinition{{ + ID: to.StringPtr("owner-role-id"), + Name: to.StringPtr("Owner"), + }} + return azuretesting.NewSenderWithValue(authorization.RoleDefinitionListResult{ + Value: &roleDefinitions, + }) +} + +func roleAssignmentSender() autorest.Sender { + return azuretesting.NewSenderWithValue(authorization.RoleAssignment{}) +} + +func roleAssignmentAlreadyExistsSender() autorest.Sender { + sender := mocks.NewSender() + body := mocks.NewBody(`{"error":{"code":"RoleAssignmentExists"}}`) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusConflict, "")) + return sender +} + +func (s *InteractiveSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + uuids := []string{ + "33333333-3333-3333-3333-333333333333", // password + "44444444-4444-4444-4444-444444444444", // password key ID + "55555555-5555-5555-5555-555555555555", // role assignment ID + } + s.newUUID = func() (utils.UUID, error) { + uuid, err := utils.UUIDFromString(uuids[0]) + if err != nil { + return utils.UUID{}, err + } + uuids = uuids[1:] + return uuid, nil + } + s.clock = testing.NewClock(clockStartTime()) +} + +func (s *InteractiveSuite) TestInteractive(c *gc.C) { + + var requests []*http.Request + senders := azuretesting.Senders{ + oauthConfigSender(), + deviceCodeSender(), + tokenSender(), // CheckForUserCompletion returns a token. + + // Token.Refresh returns a token. We do this + // twice: once for ARM, and once for AAD. + tokenSender(), + tokenSender(), + + currentUserSender(), + createServicePrincipalSender(), + roleDefinitionListSender(), + roleAssignmentSender(), + } + + var stderr bytes.Buffer + subscriptionId := "22222222-2222-2222-2222-222222222222" + appId, password, err := azureauth.InteractiveCreateServicePrincipal( + &stderr, + &senders, + azuretesting.RequestRecorder(&requests), + "https://arm.invalid", + "https://graph.invalid", + subscriptionId, + s.clock, + s.newUUID, + ) + c.Assert(err, jc.ErrorIsNil) + c.Assert(appId, gc.Equals, "cbb548f1-5039-4836-af0b-727e8571f6a9") + c.Assert(password, gc.Equals, "33333333-3333-3333-3333-333333333333") + c.Assert(stderr.String(), gc.Equals, ` +Initiating interactive authentication. + +open your browser, etc. + +Authenticated as "Foo Bar". +Creating/updating service principal. +Assigning Owner role to service principal. +`[1:]) + + // Token refreshes don't go through the inspectors. + c.Assert(requests, gc.HasLen, 7) + c.Check(requests[0].URL.Path, gc.Equals, "/subscriptions/22222222-2222-2222-2222-222222222222") + c.Check(requests[1].URL.Path, gc.Equals, "/11111111-1111-1111-1111-111111111111/oauth2/devicecode") + c.Check(requests[2].URL.Path, gc.Equals, "/11111111-1111-1111-1111-111111111111/oauth2/token") + c.Check(requests[3].URL.Path, gc.Equals, "/11111111-1111-1111-1111-111111111111/me") + c.Check(requests[4].URL.Path, gc.Equals, "/11111111-1111-1111-1111-111111111111/servicePrincipals") + c.Check(requests[5].URL.Path, gc.Equals, "/subscriptions/22222222-2222-2222-2222-222222222222/providers/Microsoft.Authorization/roleDefinitions") + c.Check(requests[6].URL.Path, gc.Equals, "/subscriptions/22222222-2222-2222-2222-222222222222/providers/Microsoft.Authorization/roleAssignments/55555555-5555-5555-5555-555555555555") + + // The service principal creation includes the password. Check that the + // password returned from the function is the same as the one set in the + // request. + var params ad.ServicePrincipalCreateParameters + err = json.NewDecoder(requests[4].Body).Decode(¶ms) + c.Assert(err, jc.ErrorIsNil) + c.Assert(params.PasswordCredentials, gc.HasLen, 1) + assertPasswordCredential(c, params.PasswordCredentials[0]) +} + +func assertPasswordCredential(c *gc.C, cred ad.PasswordCredential) { + startDate := cred.StartDate + endDate := cred.EndDate + c.Assert(startDate, gc.Equals, clockStartTime()) + c.Assert(endDate.Sub(startDate), gc.Equals, 365*24*time.Hour) + + cred.StartDate = time.Time{} + cred.EndDate = time.Time{} + c.Assert(cred, jc.DeepEquals, ad.PasswordCredential{ + CustomKeyIdentifier: []byte("juju-20160919"), + KeyId: "44444444-4444-4444-4444-444444444444", + Value: "33333333-3333-3333-3333-333333333333", + }) +} + +func (s *InteractiveSuite) TestInteractiveRoleAssignmentAlreadyExists(c *gc.C) { + var requests []*http.Request + senders := azuretesting.Senders{ + oauthConfigSender(), + deviceCodeSender(), + tokenSender(), + tokenSender(), + tokenSender(), + currentUserSender(), + createServicePrincipalSender(), + roleDefinitionListSender(), + roleAssignmentAlreadyExistsSender(), + } + _, _, err := azureauth.InteractiveCreateServicePrincipal( + ioutil.Discard, + &senders, + azuretesting.RequestRecorder(&requests), + "https://arm.invalid", + "https://graph.invalid", + "22222222-2222-2222-2222-222222222222", + s.clock, + s.newUUID, + ) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *InteractiveSuite) TestInteractiveServicePrincipalAlreadyExists(c *gc.C) { + var requests []*http.Request + senders := azuretesting.Senders{ + oauthConfigSender(), + deviceCodeSender(), + tokenSender(), + tokenSender(), + tokenSender(), + currentUserSender(), + createServicePrincipalAlreadyExistsSender(), + servicePrincipalListSender(), + passwordCredentialsListSender(), + updatePasswordCredentialsSender(), + roleDefinitionListSender(), + roleAssignmentAlreadyExistsSender(), + } + _, password, err := azureauth.InteractiveCreateServicePrincipal( + ioutil.Discard, + &senders, + azuretesting.RequestRecorder(&requests), + "https://arm.invalid", + "https://graph.invalid", + "22222222-2222-2222-2222-222222222222", + s.clock, + s.newUUID, + ) + c.Assert(err, jc.ErrorIsNil) + c.Assert(password, gc.Equals, "33333333-3333-3333-3333-333333333333") + + c.Assert(requests, gc.HasLen, 10) + c.Check(requests[0].URL.Path, gc.Equals, "/subscriptions/22222222-2222-2222-2222-222222222222") + c.Check(requests[1].URL.Path, gc.Equals, "/11111111-1111-1111-1111-111111111111/oauth2/devicecode") + c.Check(requests[2].URL.Path, gc.Equals, "/11111111-1111-1111-1111-111111111111/oauth2/token") + c.Check(requests[3].URL.Path, gc.Equals, "/11111111-1111-1111-1111-111111111111/me") + c.Check(requests[4].URL.Path, gc.Equals, "/11111111-1111-1111-1111-111111111111/servicePrincipals") // create + c.Check(requests[5].URL.Path, gc.Equals, "/11111111-1111-1111-1111-111111111111/servicePrincipals") // list + c.Check(requests[6].URL.Path, gc.Equals, "/11111111-1111-1111-1111-111111111111/servicePrincipals/sp-object-id/passwordCredentials") // list + c.Check(requests[7].URL.Path, gc.Equals, "/11111111-1111-1111-1111-111111111111/servicePrincipals/sp-object-id/passwordCredentials") // update + c.Check(requests[8].URL.Path, gc.Equals, "/subscriptions/22222222-2222-2222-2222-222222222222/providers/Microsoft.Authorization/roleDefinitions") + c.Check(requests[9].URL.Path, gc.Equals, "/subscriptions/22222222-2222-2222-2222-222222222222/providers/Microsoft.Authorization/roleAssignments/55555555-5555-5555-5555-555555555555") + + // Make sure that we don't wipe existing password credentials, and that + // the new password credential matches the one returned from the + // function. + var params ad.PasswordCredentialsUpdateParameters + err = json.NewDecoder(requests[7].Body).Decode(¶ms) + c.Assert(err, jc.ErrorIsNil) + c.Assert(params.Value, gc.HasLen, 2) + c.Assert(params.Value[0], jc.DeepEquals, ad.PasswordCredential{ + KeyId: "password-credential-key-id", + StartDate: time.Time{}.UTC(), + EndDate: time.Time{}.UTC(), + }) + assertPasswordCredential(c, params.Value[1]) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/oauth.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/oauth.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/oauth.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/oauth.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,41 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azureauth + +import ( + "github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/juju/errors" +) + +// OAuthConfig returns an azure.OAuthConfig based on the given resource +// manager endpoint and subscription ID. This will make a request to the +// resource manager API to discover the Active Directory tenant ID. +func OAuthConfig( + client subscriptions.Client, + resourceManagerEndpoint string, + subscriptionId string, +) (*azure.OAuthConfig, string, error) { + authURI, err := DiscoverAuthorizationURI(client, subscriptionId) + if err != nil { + return nil, "", errors.Annotate(err, "detecting auth URI") + } + logger.Debugf("discovered auth URI: %s", authURI) + + // The authorization URI scheme and host identifies the AD endpoint. + // The authorization URI path identifies the AD tenant. + tenantId, err := AuthorizationURITenantID(authURI) + if err != nil { + return nil, "", errors.Annotate(err, "getting tenant ID") + } + authURI.Path = "" + adEndpoint := authURI.String() + + cloudEnv := azure.Environment{ActiveDirectoryEndpoint: adEndpoint} + oauthConfig, err := cloudEnv.OAuthConfigForTenant(tenantId) + if err != nil { + return nil, "", errors.Annotate(err, "getting OAuth configuration") + } + return oauthConfig, tenantId, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/oauth_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/oauth_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/oauth_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/oauth_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,61 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azureauth_test + +import ( + "net/http" + "net/url" + + "github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/mocks" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/provider/azure/internal/azureauth" +) + +type OAuthConfigSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&OAuthConfigSuite{}) + +const fakeTenantId = "11111111-1111-1111-1111-111111111111" + +func oauthConfigSender() autorest.Sender { + sender := mocks.NewSender() + resp := mocks.NewResponseWithStatus("", http.StatusUnauthorized) + mocks.SetResponseHeaderValues(resp, "WWW-Authenticate", []string{ + `authorization_uri="https://testing.invalid/` + fakeTenantId + `"`, + }) + sender.AppendResponse(resp) + return sender +} + +func (s *OAuthConfigSuite) TestOAuthConfig(c *gc.C) { + client := subscriptions.Client{subscriptions.NewWithBaseURI("https://testing.invalid")} + client.Sender = oauthConfigSender() + cfg, tenantId, err := azureauth.OAuthConfig(client, "https://testing.invalid", "subscription-id") + c.Assert(err, jc.ErrorIsNil) + c.Assert(tenantId, gc.Equals, fakeTenantId) + + baseURL := url.URL{ + Scheme: "https", + Host: "testing.invalid", + RawQuery: "api-version=1.0", + } + expectedCfg := &azure.OAuthConfig{ + AuthorizeEndpoint: baseURL, + TokenEndpoint: baseURL, + DeviceCodeEndpoint: baseURL, + } + expectedCfg.AuthorizeEndpoint.Path = "/11111111-1111-1111-1111-111111111111/oauth2/authorize" + expectedCfg.TokenEndpoint.Path = "/11111111-1111-1111-1111-111111111111/oauth2/token" + expectedCfg.DeviceCodeEndpoint.Path = "/11111111-1111-1111-1111-111111111111/oauth2/devicecode" + + c.Assert(cfg, jc.DeepEquals, expectedCfg) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/package_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azureauth_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/utils.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/utils.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/utils.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/utils.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,16 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azureauth + +import "strings" + +// TokenResource returns a resource value suitable for auth tokens, based on +// an endpoint URI. +func TokenResource(uri string) string { + resource := uri + if !strings.HasSuffix(resource, "/") { + resource += "/" + } + return resource +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/utils_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/utils_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azureauth/utils_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azureauth/utils_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,24 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azureauth_test + +import ( + "github.com/juju/testing" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/provider/azure/internal/azureauth" +) + +type TokenResourceSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&TokenResourceSuite{}) + +func (s *TokenResourceSuite) TestTokenResource(c *gc.C) { + out := azureauth.TokenResource("https://graph.windows.net") + c.Assert(out, gc.Equals, "https://graph.windows.net/") + out = azureauth.TokenResource("https://graph.windows.net/") + c.Assert(out, gc.Equals, "https://graph.windows.net/") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azurestorage/interface.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azurestorage/interface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azurestorage/interface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azurestorage/interface.go 2016-10-13 14:31:49.000000000 +0000 @@ -33,7 +33,7 @@ // Otherwise returns false. // // See https://godoc.org/github.com/Azure/azure-sdk-for-go/storage#BlobStorageClient.DeleteBlobIfExists - DeleteBlobIfExists(container, name string) (bool, error) + DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) } // NewClientFunc is the type of the NewClient function. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azuretesting/recorder.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azuretesting/recorder.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azuretesting/recorder.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azuretesting/recorder.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,43 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package azuretesting + +import ( + "bytes" + "io/ioutil" + "net/http" + "sync" + + "github.com/Azure/go-autorest/autorest" +) + +// RequestRecorder returns an autorest.PrepareDecorator that records requests +// to ghe given slice. +func RequestRecorder(requests *[]*http.Request) autorest.PrepareDecorator { + if requests == nil { + return nil + } + var mu sync.Mutex + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(req *http.Request) (*http.Request, error) { + // Save the request body, since it will be consumed. + reqCopy := *req + if req.Body != nil { + var buf bytes.Buffer + if _, err := buf.ReadFrom(req.Body); err != nil { + return nil, err + } + if err := req.Body.Close(); err != nil { + return nil, err + } + reqCopy.Body = ioutil.NopCloser(&buf) + req.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes())) + } + mu.Lock() + *requests = append(*requests, &reqCopy) + mu.Unlock() + return req, nil + }) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azuretesting/senders.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azuretesting/senders.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azuretesting/senders.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azuretesting/senders.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,10 +10,13 @@ "regexp" "sync" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" + "github.com/juju/loggo" ) +var logger = loggo.GetLogger("juju.provider.azure.internal.azuretesting") + // MockSender is a wrapper around autorest/mocks.Sender, extending it with // request path checking to ease testing. type MockSender struct { @@ -49,7 +52,7 @@ panic(err) } sender := &MockSender{Sender: mocks.NewSender()} - sender.EmitContent(string(content)) + sender.AppendResponse(mocks.NewResponseWithContent(string(content))) return sender } @@ -58,6 +61,7 @@ type Senders []autorest.Sender func (s *Senders) Do(req *http.Request) (*http.Response, error) { + logger.Debugf("Senders.Do(%s)", req.URL) if len(*s) == 0 { response := mocks.NewResponseWithStatus("", http.StatusInternalServerError) return response, fmt.Errorf("no sender for %q", req.URL) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azuretesting/storage.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azuretesting/storage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/azuretesting/storage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/azuretesting/storage.go 2016-10-13 14:31:49.000000000 +0000 @@ -41,7 +41,7 @@ return storage.BlobListResponse{}, c.NextErr() } -func (c *MockStorageClient) DeleteBlobIfExists(container, name string) (bool, error) { +func (c *MockStorageClient) DeleteBlobIfExists(container, name string, headers map[string]string) (bool, error) { c.MethodCall(c, "DeleteBlobIfExists", container, name) if c.DeleteBlobIfExistsFunc != nil { return c.DeleteBlobIfExistsFunc(container, name) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/errorutils/errors.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/errorutils/errors.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/errorutils/errors.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/errorutils/errors.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,25 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package errorutils + +import ( + "github.com/juju/errors" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +// ServiceError returns the *azure.ServiceError underlying the +// supplied error, if any, and a bool indicating whether one +// was found. +func ServiceError(err error) (*azure.ServiceError, bool) { + err = errors.Cause(err) + if d, ok := err.(autorest.DetailedError); ok { + err = d.Original + } + if r, ok := err.(*azure.RequestError); ok { + return r.ServiceError, true + } + return nil, false +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/imageutils/images.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/imageutils/images.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/imageutils/images.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/imageutils/images.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,8 +9,8 @@ "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/go-autorest/autorest/to" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils/arch" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/imageutils/images_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/imageutils/images_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/imageutils/images_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/imageutils/images_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,8 @@ package imageutils_test import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/go-autorest/autorest/mocks" jc "github.com/juju/testing/checkers" "github.com/juju/utils/arch" gc "gopkg.in/check.v1" @@ -27,13 +27,14 @@ func (s *imageutilsSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.mockSender = mocks.NewSender() + s.client.ManagementClient = compute.New("subscription-id") s.client.Sender = s.mockSender } func (s *imageutilsSuite) TestSeriesImage(c *gc.C) { - s.mockSender.EmitContent( + s.mockSender.AppendResponse(mocks.NewResponseWithContent( `[{"name": "14.04.3"}, {"name": "14.04.1-LTS"}, {"name": "12.04.5"}]`, - ) + )) image, err := imageutils.SeriesImage("trusty", "released", "westus", s.client) c.Assert(err, jc.ErrorIsNil) c.Assert(image, gc.NotNil) @@ -45,9 +46,9 @@ } func (s *imageutilsSuite) TestSeriesImageInvalidSKU(c *gc.C) { - s.mockSender.EmitContent( + s.mockSender.AppendResponse(mocks.NewResponseWithContent( `[{"name": "12.04.invalid"}, {"name": "12.04.5-LTS"}]`, - ) + )) image, err := imageutils.SeriesImage("precise", "released", "westus", s.client) c.Assert(err, jc.ErrorIsNil) c.Assert(image, gc.NotNil) @@ -69,26 +70,27 @@ s.assertImageId(c, "centos7", "released", "OpenLogic:CentOS:7.1:latest") } -func (s *imageutilsSuite) TestSeriesImageArch(c *gc.C) { - _, err := imageutils.SeriesImage("arch", "released", "westus", s.client) - c.Assert(err, gc.ErrorMatches, "deploying Arch not supported") +func (s *imageutilsSuite) TestSeriesImageGenericLinux(c *gc.C) { + _, err := imageutils.SeriesImage("genericlinux", "released", "westus", s.client) + c.Assert(err, gc.ErrorMatches, "deploying GenericLinux not supported") } func (s *imageutilsSuite) TestSeriesImageStream(c *gc.C) { - s.mockSender.EmitContent(`[{"name": "14.04.2"}, {"name": "14.04.3-DAILY"}, {"name": "14.04.1-LTS"}]`) + s.mockSender.AppendAndRepeatResponse(mocks.NewResponseWithContent( + `[{"name": "14.04.2"}, {"name": "14.04.3-DAILY"}, {"name": "14.04.1-LTS"}]`), 2) s.assertImageId(c, "trusty", "daily", "Canonical:UbuntuServer:14.04.3-DAILY:latest") s.assertImageId(c, "trusty", "released", "Canonical:UbuntuServer:14.04.2:latest") } func (s *imageutilsSuite) TestSeriesImageNotFound(c *gc.C) { - s.mockSender.EmitContent(`[]`) + s.mockSender.AppendResponse(mocks.NewResponseWithContent(`[]`)) image, err := imageutils.SeriesImage("trusty", "released", "westus", s.client) c.Assert(err, gc.ErrorMatches, "selecting SKU for trusty: Ubuntu SKUs not found") c.Assert(image, gc.IsNil) } func (s *imageutilsSuite) TestSeriesImageStreamNotFound(c *gc.C) { - s.mockSender.EmitContent(`[{"name": "14.04-beta1"}]`) + s.mockSender.AppendResponse(mocks.NewResponseWithContent(`[{"name": "14.04-beta1"}]`)) _, err := imageutils.SeriesImage("trusty", "whatever", "westus", s.client) c.Assert(err, gc.ErrorMatches, "selecting SKU for trusty: Ubuntu SKUs for whatever stream not found") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/iputils/iputils.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/iputils/iputils.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/internal/iputils/iputils.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/internal/iputils/iputils.go 2016-10-13 14:31:49.000000000 +0000 @@ -47,6 +47,27 @@ return nil, errors.Errorf("no addresses available in %s", subnet) } +// NthSubnetIP returns the n'th IP address in a given subnet, where n is a +// zero-based index, zero being the first available IP address in the subnet. +// +// If n is out of range, NthSubnetIP will return nil. +func NthSubnetIP(subnet *net.IPNet, n int) net.IP { + ones, bits := subnet.Mask.Size() + base := ipUint32(subnet.IP) + var valid int + for i := reservedAddressRangeEnd + 1; i < (1< 31 { logger.Debugf("ignore disk with invalid LUN: %+v", disk) continue @@ -662,7 +659,7 @@ } for i, inUse := range inUse { if !inUse { - return i, nil + return int32(i), nil } } return -1, errors.New("all LUNs are in use") @@ -670,7 +667,7 @@ // diskBusAddress returns the value to use in the BusAddress field of // VolumeAttachmentInfo for a disk with the specified LUN. -func diskBusAddress(lun int) string { +func diskBusAddress(lun int32) string { return fmt.Sprintf("scsi@5:0.0.%d", lun) } @@ -726,20 +723,77 @@ newClient internalazurestorage.NewClientFunc, storageEndpoint string, storageAccount *armstorage.Account, - storageAccountKeys *armstorage.AccountKeys, + storageAccountKey *armstorage.AccountKey, ) (internalazurestorage.Client, error) { storageAccountName := to.String(storageAccount.Name) - storageAccountKey := to.String(storageAccountKeys.Key1) const useHTTPS = true return newClient( - storageAccountName, storageAccountKey, - storageEndpoint, azurestorage.DefaultAPIVersion, useHTTPS, + storageAccountName, + to.String(storageAccountKey.Value), + storageEndpoint, + azurestorage.DefaultAPIVersion, + useHTTPS, ) } -// RandomStorageAccountName returns a random storage account name. -func RandomStorageAccountName() string { - const maxStorageAccountNameLen = 24 - validRunes := append(utils.LowerAlpha, utils.Digits...) - return utils.RandomString(maxStorageAccountNameLen, validRunes) +// getStorageAccountKey returns the key for the storage account. +func getStorageAccountKey( + callAPI callAPIFunc, + client armstorage.AccountsClient, + resourceGroup, accountName string, +) (*armstorage.AccountKey, error) { + logger.Debugf("getting keys for storage account %q", accountName) + var listKeysResult armstorage.AccountListKeysResult + if err := callAPI(func() (autorest.Response, error) { + var err error + listKeysResult, err = client.ListKeys(resourceGroup, accountName) + return listKeysResult.Response, err + }); err != nil { + if listKeysResult.Response.Response != nil && listKeysResult.StatusCode == http.StatusNotFound { + return nil, errors.NewNotFound(err, "storage account keys not found") + } + return nil, errors.Annotate(err, "listing storage account keys") + } + if listKeysResult.Keys == nil { + return nil, errors.NotFoundf("storage account keys") + } + + // We need a storage key with full permissions. + var fullKey *armstorage.AccountKey + for _, key := range *listKeysResult.Keys { + logger.Debugf("storage account key: %#v", key) + // At least some of the time, Azure returns the permissions + // in title-case, which does not match the constant. + if strings.ToUpper(string(key.Permissions)) != string(armstorage.FULL) { + continue + } + fullKey = &key + break + } + if fullKey == nil { + return nil, errors.NotFoundf( + "storage account key with %q permission", + armstorage.FULL, + ) + } + return fullKey, nil +} + +// storageAccountTemplateResource returns a template resource definition +// for creating a storage account. +func storageAccountTemplateResource( + location string, + envTags map[string]string, + accountName, accountType string, +) armtemplates.Resource { + return armtemplates.Resource{ + APIVersion: armstorage.APIVersion, + Type: "Microsoft.Storage/storageAccounts", + Name: accountName, + Location: location, + Tags: envTags, + StorageSku: &armstorage.Sku{ + Name: armstorage.SkuName(accountType), + }, + } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/storage_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/storage_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/storage_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/storage_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,11 +7,10 @@ "fmt" "net/http" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" "github.com/Azure/azure-sdk-for-go/arm/compute" - "github.com/Azure/azure-sdk-for-go/arm/network" armstorage "github.com/Azure/azure-sdk-for-go/arm/storage" azurestorage "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/go-autorest/autorest/to" "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -19,6 +18,7 @@ "github.com/juju/juju/instance" "github.com/juju/juju/provider/azure" + "github.com/juju/juju/provider/azure/internal/azureauth" "github.com/juju/juju/provider/azure/internal/azuretesting" "github.com/juju/juju/storage" "github.com/juju/juju/testing" @@ -40,9 +40,11 @@ s.storageClient = azuretesting.MockStorageClient{} s.requests = nil envProvider := newProvider(c, azure.ProviderConfig{ - Sender: &s.sender, - NewStorageClient: s.storageClient.NewClient, - RequestInspector: requestRecorder(&s.requests), + Sender: &s.sender, + NewStorageClient: s.storageClient.NewClient, + RequestInspector: azuretesting.RequestRecorder(&s.requests), + RandomWindowsAdminPassword: func() string { return "sorandom" }, + InteractiveCreateServicePrincipal: azureauth.InteractiveCreateServicePrincipal, }) s.sender = nil @@ -61,34 +63,45 @@ // Force an explicit refresh of the access token, so it isn't done // implicitly during the tests. - s.sender = azuretesting.Senders{tokenRefreshSender()} + s.sender = azuretesting.Senders{ + tokenRefreshSender(), + } err = azure.ForceVolumeSourceTokenRefresh(volumeSource) c.Assert(err, jc.ErrorIsNil) return volumeSource } -func (s *storageSuite) accountsSender() *azuretesting.MockSender { +func (s *storageSuite) accountSender() *azuretesting.MockSender { envTags := map[string]*string{ "juju-model-uuid": to.StringPtr(testing.ModelTag.Id()), } - accounts := []armstorage.Account{{ - Name: to.StringPtr(fakeStorageAccount), + account := armstorage.Account{ + Name: to.StringPtr(storageAccountName), Type: to.StringPtr("Standard_LRS"), Tags: &envTags, Properties: &armstorage.AccountProperties{ PrimaryEndpoints: &armstorage.Endpoints{ - Blob: to.StringPtr(fmt.Sprintf("https://%s.blob.storage.azurestack.local/", fakeStorageAccount)), + Blob: to.StringPtr(fmt.Sprintf("https://%s.blob.storage.azurestack.local/", storageAccountName)), }, }, - }} - accountsSender := azuretesting.NewSenderWithValue(armstorage.AccountListResult{Value: &accounts}) - accountsSender.PathPattern = ".*/storageAccounts" - return accountsSender + } + accountSender := azuretesting.NewSenderWithValue(account) + accountSender.PathPattern = ".*/storageAccounts/" + storageAccountName + ".*" + return accountSender } func (s *storageSuite) accountKeysSender() *azuretesting.MockSender { - keys := armstorage.AccountKeys{Key1: to.StringPtr(fakeStorageAccountKey), Key2: to.StringPtr("key2")} - keysSender := azuretesting.NewSenderWithValue(&keys) + keys := []armstorage.AccountKey{{ + KeyName: to.StringPtr(fakeStorageAccountKey + "-name"), + Value: to.StringPtr(fakeStorageAccountKey), + Permissions: armstorage.FULL, + }, { + KeyName: to.StringPtr("key2-name"), + Value: to.StringPtr("key2"), + Permissions: armstorage.FULL, + }} + result := armstorage.AccountListKeysResult{Keys: &keys} + keysSender := azuretesting.NewSenderWithValue(&result) keysSender.PathPattern = ".*/storageAccounts/.*/listKeys" return keysSender } @@ -122,11 +135,11 @@ func (s *storageSuite) TestCreateVolumes(c *gc.C) { // machine-1 has a single data disk with LUN 0. - machine1DataDisks := []compute.DataDisk{{Lun: to.IntPtr(0)}} + machine1DataDisks := []compute.DataDisk{{Lun: to.Int32Ptr(0)}} // machine-2 has 32 data disks; no LUNs free. machine2DataDisks := make([]compute.DataDisk, 32) for i := range machine2DataDisks { - machine2DataDisks[i].Lun = to.IntPtr(i) + machine2DataDisks[i].Lun = to.Int32Ptr(int32(i)) } // volume-0 and volume-2 are attached to machine-0 @@ -173,17 +186,7 @@ }, }} - // There should be a couple of API calls to list instances, - // and one update per modified instance. - nics := []network.Interface{ - makeNetworkInterface("nic-0", "machine-0"), - makeNetworkInterface("nic-1", "machine-1"), - makeNetworkInterface("nic-2", "machine-2"), - } - nicsSender := azuretesting.NewSenderWithValue(network.InterfaceListResult{ - Value: &nics, - }) - nicsSender.PathPattern = `.*/Microsoft\.Network/networkInterfaces` + // There should be a one API calls to list VMs, and one update per modified instance. virtualMachinesSender := azuretesting.NewSenderWithValue(compute.VirtualMachineListResult{ Value: &virtualMachines, }) @@ -194,9 +197,8 @@ updateVirtualMachine1Sender.PathPattern = `.*/Microsoft\.Compute/virtualMachines/machine-1` volumeSource := s.volumeSource(c) s.sender = azuretesting.Senders{ - nicsSender, virtualMachinesSender, - s.accountsSender(), + s.accountSender(), updateVirtualMachine0Sender, updateVirtualMachine1Sender, } @@ -212,49 +214,48 @@ c.Check(results[4].Error, gc.ErrorMatches, "choosing LUN: all LUNs are in use") // Validate HTTP request bodies. - c.Assert(s.requests, gc.HasLen, 5) - c.Assert(s.requests[0].Method, gc.Equals, "GET") // list NICs - c.Assert(s.requests[1].Method, gc.Equals, "GET") // list virtual machines - c.Assert(s.requests[2].Method, gc.Equals, "GET") // list storage accounts - c.Assert(s.requests[3].Method, gc.Equals, "PUT") // update machine-0 - c.Assert(s.requests[4].Method, gc.Equals, "PUT") // update machine-1 + c.Assert(s.requests, gc.HasLen, 4) + c.Assert(s.requests[0].Method, gc.Equals, "GET") // list virtual machines + c.Assert(s.requests[1].Method, gc.Equals, "GET") // list storage accounts + c.Assert(s.requests[2].Method, gc.Equals, "PUT") // update machine-0 + c.Assert(s.requests[3].Method, gc.Equals, "PUT") // update machine-1 machine0DataDisks := []compute.DataDisk{{ - Lun: to.IntPtr(0), - DiskSizeGB: to.IntPtr(1), + Lun: to.Int32Ptr(0), + DiskSizeGB: to.Int32Ptr(1), Name: to.StringPtr("volume-0"), Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-0.vhd", - fakeStorageAccount, + storageAccountName, ))}, Caching: compute.ReadWrite, CreateOption: compute.Empty, }, { - Lun: to.IntPtr(1), - DiskSizeGB: to.IntPtr(1), + Lun: to.Int32Ptr(1), + DiskSizeGB: to.Int32Ptr(1), Name: to.StringPtr("volume-2"), Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-2.vhd", - fakeStorageAccount, + storageAccountName, ))}, Caching: compute.ReadWrite, CreateOption: compute.Empty, }} virtualMachines[0].Properties.StorageProfile.DataDisks = &machine0DataDisks - assertRequestBody(c, s.requests[3], &virtualMachines[0]) + assertRequestBody(c, s.requests[2], &virtualMachines[0]) machine1DataDisks = append(machine1DataDisks, compute.DataDisk{ - Lun: to.IntPtr(1), - DiskSizeGB: to.IntPtr(2), + Lun: to.Int32Ptr(1), + DiskSizeGB: to.Int32Ptr(2), Name: to.StringPtr("volume-1"), Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-1.vhd", - fakeStorageAccount, + storageAccountName, ))}, Caching: compute.ReadWrite, CreateOption: compute.Empty, }) - assertRequestBody(c, s.requests[4], &virtualMachines[1]) + assertRequestBody(c, s.requests[3], &virtualMachines[1]) } func (s *storageSuite) TestListVolumes(c *gc.C) { @@ -283,14 +284,14 @@ volumeSource := s.volumeSource(c) s.sender = azuretesting.Senders{ - s.accountsSender(), + s.accountSender(), s.accountKeysSender(), } volumeIds, err := volumeSource.ListVolumes() c.Assert(err, jc.ErrorIsNil) s.storageClient.CheckCallNames(c, "NewClient", "ListBlobs") s.storageClient.CheckCall( - c, 0, "NewClient", fakeStorageAccount, fakeStorageAccountKey, + c, 0, "NewClient", storageAccountName, fakeStorageAccountKey, "storage.azurestack.local", azurestorage.DefaultAPIVersion, true, ) s.storageClient.CheckCall(c, 1, "ListBlobs", "datavhds", azurestorage.ListBlobsParameters{}) @@ -300,7 +301,7 @@ func (s *storageSuite) TestListVolumesErrors(c *gc.C) { volumeSource := s.volumeSource(c) s.sender = azuretesting.Senders{ - s.accountsSender(), + s.accountSender(), s.accountKeysSender(), } @@ -335,14 +336,14 @@ volumeSource := s.volumeSource(c) s.sender = azuretesting.Senders{ - s.accountsSender(), + s.accountSender(), s.accountKeysSender(), } results, err := volumeSource.DescribeVolumes([]string{"volume-0", "volume-1", "volume-0", "volume-42"}) c.Assert(err, jc.ErrorIsNil) s.storageClient.CheckCallNames(c, "NewClient", "ListBlobs") s.storageClient.CheckCall( - c, 0, "NewClient", fakeStorageAccount, fakeStorageAccountKey, + c, 0, "NewClient", storageAccountName, fakeStorageAccountKey, "storage.azurestack.local", azurestorage.DefaultAPIVersion, true, ) c.Assert(results, gc.HasLen, 4) @@ -371,7 +372,7 @@ func (s *storageSuite) TestDestroyVolumes(c *gc.C) { volumeSource := s.volumeSource(c) s.sender = azuretesting.Senders{ - s.accountsSender(), + s.accountSender(), s.accountKeysSender(), } results, err := volumeSource.DestroyVolumes([]string{"volume-0", "volume-42"}) @@ -387,24 +388,24 @@ func (s *storageSuite) TestAttachVolumes(c *gc.C) { // machine-1 has a single data disk with LUN 0. machine1DataDisks := []compute.DataDisk{{ - Lun: to.IntPtr(0), + Lun: to.Int32Ptr(0), Name: to.StringPtr("volume-1"), Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-1.vhd", - fakeStorageAccount, + storageAccountName, )), }, }} // machine-2 has 32 data disks; no LUNs free. machine2DataDisks := make([]compute.DataDisk, 32) for i := range machine2DataDisks { - machine2DataDisks[i].Lun = to.IntPtr(i) + machine2DataDisks[i].Lun = to.Int32Ptr(int32(i)) machine2DataDisks[i].Name = to.StringPtr(fmt.Sprintf("volume-%d", i)) machine2DataDisks[i].Vhd = &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-%d.vhd", - fakeStorageAccount, i, + storageAccountName, i, )), } } @@ -449,17 +450,7 @@ }, }} - // There should be a couple of API calls to list instances, - // and one update per modified instance. - nics := []network.Interface{ - makeNetworkInterface("nic-0", "machine-0"), - makeNetworkInterface("nic-1", "machine-1"), - makeNetworkInterface("nic-2", "machine-2"), - } - nicsSender := azuretesting.NewSenderWithValue(network.InterfaceListResult{ - Value: &nics, - }) - nicsSender.PathPattern = `.*/Microsoft\.Network/networkInterfaces` + // There should be a one API calls to list VMs, and one update per modified instance. virtualMachinesSender := azuretesting.NewSenderWithValue(compute.VirtualMachineListResult{ Value: &virtualMachines, }) @@ -468,9 +459,8 @@ updateVirtualMachine0Sender.PathPattern = `.*/Microsoft\.Compute/virtualMachines/machine-0` volumeSource := s.volumeSource(c) s.sender = azuretesting.Senders{ - nicsSender, virtualMachinesSender, - s.accountsSender(), + s.accountSender(), updateVirtualMachine0Sender, } @@ -485,62 +475,61 @@ c.Check(results[4].Error, gc.ErrorMatches, "choosing LUN: all LUNs are in use") // Validate HTTP request bodies. - c.Assert(s.requests, gc.HasLen, 4) - c.Assert(s.requests[0].Method, gc.Equals, "GET") // list NICs - c.Assert(s.requests[1].Method, gc.Equals, "GET") // list virtual machines - c.Assert(s.requests[2].Method, gc.Equals, "GET") // list storage accounts - c.Assert(s.requests[3].Method, gc.Equals, "PUT") // update machine-0 + c.Assert(s.requests, gc.HasLen, 3) + c.Assert(s.requests[0].Method, gc.Equals, "GET") // list virtual machines + c.Assert(s.requests[1].Method, gc.Equals, "GET") // list storage accounts + c.Assert(s.requests[2].Method, gc.Equals, "PUT") // update machine-0 machine0DataDisks := []compute.DataDisk{{ - Lun: to.IntPtr(0), + Lun: to.Int32Ptr(0), Name: to.StringPtr("volume-0"), Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-0.vhd", - fakeStorageAccount, + storageAccountName, ))}, Caching: compute.ReadWrite, CreateOption: compute.Attach, }, { - Lun: to.IntPtr(1), + Lun: to.Int32Ptr(1), Name: to.StringPtr("volume-2"), Vhd: &compute.VirtualHardDisk{URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-2.vhd", - fakeStorageAccount, + storageAccountName, ))}, Caching: compute.ReadWrite, CreateOption: compute.Attach, }} virtualMachines[0].Properties.StorageProfile.DataDisks = &machine0DataDisks - assertRequestBody(c, s.requests[3], &virtualMachines[0]) + assertRequestBody(c, s.requests[2], &virtualMachines[0]) } func (s *storageSuite) TestDetachVolumes(c *gc.C) { // machine-0 has a three data disks: volume-0, volume-1 and volume-2 machine0DataDisks := []compute.DataDisk{{ - Lun: to.IntPtr(0), + Lun: to.Int32Ptr(0), Name: to.StringPtr("volume-0"), Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-0.vhd", - fakeStorageAccount, + storageAccountName, )), }, }, { - Lun: to.IntPtr(1), + Lun: to.Int32Ptr(1), Name: to.StringPtr("volume-1"), Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-1.vhd", - fakeStorageAccount, + storageAccountName, )), }, }, { - Lun: to.IntPtr(2), + Lun: to.Int32Ptr(2), Name: to.StringPtr("volume-2"), Vhd: &compute.VirtualHardDisk{ URI: to.StringPtr(fmt.Sprintf( "https://%s.blob.storage.azurestack.local/datavhds/volume-2.vhd", - fakeStorageAccount, + storageAccountName, )), }, }} @@ -575,16 +564,7 @@ }, }} - // There should be a couple of API calls to list instances, - // and one update per modified instance. - nics := []network.Interface{ - makeNetworkInterface("nic-0", "machine-0"), - makeNetworkInterface("nic-1", "machine-1"), - } - nicsSender := azuretesting.NewSenderWithValue(network.InterfaceListResult{ - Value: &nics, - }) - nicsSender.PathPattern = `.*/Microsoft\.Network/networkInterfaces` + // There should be a one API calls to list VMs, and one update per modified instance. virtualMachinesSender := azuretesting.NewSenderWithValue(compute.VirtualMachineListResult{ Value: &virtualMachines, }) @@ -593,9 +573,8 @@ updateVirtualMachine0Sender.PathPattern = `.*/Microsoft\.Compute/virtualMachines/machine-0` volumeSource := s.volumeSource(c) s.sender = azuretesting.Senders{ - nicsSender, virtualMachinesSender, - s.accountsSender(), + s.accountSender(), updateVirtualMachine0Sender, } @@ -609,16 +588,15 @@ c.Check(results[3], gc.ErrorMatches, "instance machine-42 not found") // Validate HTTP request bodies. - c.Assert(s.requests, gc.HasLen, 4) - c.Assert(s.requests[0].Method, gc.Equals, "GET") // list NICs - c.Assert(s.requests[1].Method, gc.Equals, "GET") // list virtual machines - c.Assert(s.requests[2].Method, gc.Equals, "GET") // list storage accounts - c.Assert(s.requests[3].Method, gc.Equals, "PUT") // update machine-0 + c.Assert(s.requests, gc.HasLen, 3) + c.Assert(s.requests[0].Method, gc.Equals, "GET") // list virtual machines + c.Assert(s.requests[1].Method, gc.Equals, "GET") // list storage accounts + c.Assert(s.requests[2].Method, gc.Equals, "PUT") // update machine-0 machine0DataDisks = []compute.DataDisk{ machine0DataDisks[0], machine0DataDisks[2], } virtualMachines[0].Properties.StorageProfile.DataDisks = &machine0DataDisks - assertRequestBody(c, s.requests[3], &virtualMachines[0]) + assertRequestBody(c, s.requests[2], &virtualMachines[0]) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/tracing.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/tracing.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/tracing.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/tracing.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package azure - -import ( - "net/http" - "net/http/httputil" - - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/juju/loggo" -) - -// tracingPrepareDecorator returns an autorest.PrepareDecorator that -// logs requests at trace level. -func tracingPrepareDecorator(logger loggo.Logger) autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - dump, err := httputil.DumpRequest(r, true) - if err != nil { - logger.Tracef("failed to dump request: %v", err) - logger.Tracef("%+v", r) - } else { - logger.Tracef("%s", dump) - } - return p.Prepare(r) - }) - } -} - -// tracingRespondDecorator returns an autorest.RespondDecorator that -// logs responses at trace level. -func tracingRespondDecorator(logger loggo.Logger) autorest.RespondDecorator { - return func(r autorest.Responder) autorest.Responder { - return autorest.ResponderFunc(func(resp *http.Response) error { - dump, err := httputil.DumpResponse(resp, true) - if err != nil { - logger.Tracef("failed to dump response: %v", err) - logger.Tracef("%+v", resp) - } else { - logger.Tracef("%s", dump) - } - return r.Respond(resp) - }) - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/utils.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/utils.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/utils.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/utils.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,12 +4,14 @@ package azure import ( + "fmt" "math/rand" "net/http" "time" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "github.com/juju/errors" "github.com/juju/retry" "github.com/juju/utils" "github.com/juju/utils/clock" @@ -21,11 +23,6 @@ maxRetryDuration = 5 * time.Minute ) -func toTagsPtr(tags map[string]string) *map[string]*string { - stringPtrMap := to.StringMapPtr(tags) - return &stringPtrMap -} - func toTags(tags *map[string]*string) map[string]string { if tags == nil { return nil @@ -89,3 +86,25 @@ Clock: c.clock, }) } + +// deleteResource deletes a resource with the given name from the resource +// group, using the provided "Deleter". If the resource does not exist, an +// error satisfying errors.IsNotFound will be returned. +func deleteResource(callAPI callAPIFunc, deleter resourceDeleter, resourceGroup, name string) error { + var result autorest.Response + if err := callAPI(func() (autorest.Response, error) { + var err error + result, err = deleter.Delete(resourceGroup, name, nil) + return result, err + }); err != nil { + if result.Response != nil && result.StatusCode == http.StatusNotFound { + return errors.NewNotFound(err, fmt.Sprintf("resource %q not found", name)) + } + return errors.Annotate(err, "canceling deployment") + } + return nil +} + +type resourceDeleter interface { + Delete(resourceGroup, name string, cancel <-chan struct{}) (autorest.Response, error) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/vmextension.go juju-core-2.0.0/src/github.com/juju/juju/provider/azure/vmextension.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/azure/vmextension.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/azure/vmextension.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,8 @@ package azure import ( - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" - "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/go-autorest/autorest/to" "github.com/juju/errors" jujuos "github.com/juju/utils/os" ) @@ -32,13 +31,9 @@ linuxCustomScriptVersion = "1.4" ) -// createVMExtension creates a CustomScript VM extension for the given VM +// vmExtension creates a CustomScript VM extension for the given VM // which will execute the CustomData on the machine as a script. -func createVMExtension( - callAPI callAPIFunc, - vmExtensionClient compute.VirtualMachineExtensionsClient, - os jujuos.OSType, resourceGroup, vmName, location string, vmTags map[string]string, -) error { +func vmExtensionProperties(os jujuos.OSType) (*compute.VirtualMachineExtensionProperties, error) { var commandToExecute, extensionPublisher, extensionType, extensionVersion string switch os { @@ -56,28 +51,17 @@ // Ubuntu renders CustomData as cloud-config, and interprets // it with cloud-init. Windows and CentOS do not use cloud-init // on Azure. - return errors.NotSupportedf("CustomScript extension for OS %q", os) + return nil, errors.NotSupportedf("CustomScript extension for OS %q", os) } extensionSettings := map[string]interface{}{ "commandToExecute": commandToExecute, } - extension := compute.VirtualMachineExtension{ - Location: to.StringPtr(location), - Tags: toTagsPtr(vmTags), - Properties: &compute.VirtualMachineExtensionProperties{ - Publisher: to.StringPtr(extensionPublisher), - Type: to.StringPtr(extensionType), - TypeHandlerVersion: to.StringPtr(extensionVersion), - AutoUpgradeMinorVersion: to.BoolPtr(true), - Settings: &extensionSettings, - }, - } - err := callAPI(func() (autorest.Response, error) { - result, err := vmExtensionClient.CreateOrUpdate( - resourceGroup, vmName, extensionName, extension, - ) - return result.Response, err - }) - return err + return &compute.VirtualMachineExtensionProperties{ + Publisher: to.StringPtr(extensionPublisher), + Type: to.StringPtr(extensionType), + TypeHandlerVersion: to.StringPtr(extensionVersion), + AutoUpgradeMinorVersion: to.BoolPtr(true), + Settings: &extensionSettings, + }, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/config_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -82,7 +82,10 @@ c.Logf("test %d: %s", i, test.info) attrs := validAttrs().Merge(test.insert).Delete(test.remove...) testConfig := newConfig(c, attrs) - environ, err := environs.New(environs.OpenParams{fakeCloudSpec(), testConfig}) + environ, err := environs.New(environs.OpenParams{ + Cloud: fakeCloudSpec(), + Config: testConfig, + }) if test.err == "" { c.Check(err, gc.IsNil) attrs := environ.Config().AllAttrs() @@ -148,7 +151,10 @@ baseConfig := newConfig(c, validAttrs()) for i, test := range changeConfigTests { c.Logf("test %d: %s", i, test.info) - environ, err := environs.New(environs.OpenParams{fakeCloudSpec(), baseConfig}) + environ, err := environs.New(environs.OpenParams{ + Cloud: fakeCloudSpec(), + Config: baseConfig, + }) c.Assert(err, gc.IsNil) attrs := validAttrs().Merge(test.insert).Delete(test.remove...) testConfig := newConfig(c, attrs) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/credentials.go juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "github.com/juju/errors" "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" ) type environProviderCredentials struct{} @@ -35,3 +36,8 @@ func (environProviderCredentials) DetectCredentials() (*cloud.CloudCredential, error) { return nil, errors.NotFoundf("credentials") } + +// FinalizeCredential is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) FinalizeCredential(_ environs.FinalizeCredentialContext, args environs.FinalizeCredentialParams) (*cloud.Credential, error) { + return &args.Credential, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/environcaps.go juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/environcaps.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/environcaps.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/environcaps.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,31 +5,8 @@ import ( "github.com/juju/juju/constraints" - "github.com/juju/juju/environs/imagemetadata" - "github.com/juju/juju/environs/simplestreams" - "github.com/juju/juju/provider/common" ) -func (env *environ) SupportedArchitectures() ([]string, error) { - env.archMutex.Lock() - defer env.archMutex.Unlock() - if env.supportedArchitectures != nil { - return env.supportedArchitectures, nil - } - logger.Debugf("Getting supported architectures from simplestream.") - cloudSpec, err := env.Region() - if err != nil { - return nil, err - } - imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{ - CloudSpec: cloudSpec, - Stream: env.Config().ImageStream(), - }) - env.supportedArchitectures, err = common.SupportedArchitectures(env, imageConstraint) - logger.Debugf("Supported architectures: %v", env.supportedArchitectures) - return env.supportedArchitectures, err -} - var unsupportedConstraints = []string{ constraints.Container, constraints.InstanceType, @@ -42,11 +19,6 @@ func (env *environ) ConstraintsValidator() (constraints.Validator, error) { validator := constraints.NewValidator() validator.RegisterUnsupported(unsupportedConstraints) - supportedArches, err := env.SupportedArchitectures() - if err != nil { - return nil, err - } - validator.RegisterVocabulary(constraints.Arch, supportedArches) return validator, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/environ.go juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,6 @@ "github.com/altoros/gosigma" "github.com/juju/errors" - "github.com/juju/utils/arch" "github.com/juju/juju/constraints" "github.com/juju/juju/environs" @@ -27,12 +26,8 @@ name string cloud environs.CloudSpec client *environClient - - lock sync.Mutex - archMutex sync.Mutex - - ecfg *environConfig - supportedArchitectures []string + lock sync.Mutex + ecfg *environConfig } // Name returns the Environ's name. @@ -98,6 +93,11 @@ return common.Bootstrap(ctx, env, params) } +// BootstrapMessage is part of the Environ interface. +func (env *environ) BootstrapMessage() string { + return "" +} + func (e *environ) ControllerInstances(controllerUUID string) ([]instance.Id, error) { return e.client.getControllerIds() } @@ -144,9 +144,8 @@ env.lock.Lock() defer env.lock.Unlock() return &simplestreams.MetadataLookupParams{ - Region: region, - Endpoint: gosigma.ResolveEndpoint(region), - Architectures: arch.AllSupportedArches, - Series: config.PreferredSeries(env.ecfg), + Region: region, + Endpoint: gosigma.ResolveEndpoint(region), + Series: config.PreferredSeries(env.ecfg), }, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/environ_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/environ_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/environ_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/environ_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,6 @@ "github.com/juju/juju/environs" "github.com/juju/juju/environs/simplestreams" "github.com/juju/juju/testing" - "github.com/juju/utils/arch" ) var _ environs.Environ = (*environ)(nil) @@ -34,6 +33,9 @@ func (s *environSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) + s.PatchValue(&newClient, func(environs.CloudSpec, string) (*environClient, error) { + return nil, nil + }) } func (s *environSuite) TearDownTest(c *gc.C) { @@ -41,17 +43,12 @@ } func (s *environSuite) TestBase(c *gc.C) { - s.PatchValue(&newClient, func(environs.CloudSpec, string) (*environClient, error) { - return nil, nil - }) - baseConfig := newConfig(c, validAttrs().Merge(testing.Attrs{"name": "testname"})) env, err := environs.New(environs.OpenParams{ Cloud: fakeCloudSpec(), Config: baseConfig, }) c.Assert(err, gc.IsNil) - env.(*environ).supportedArchitectures = []string{arch.AMD64} cfg := env.Config() c.Assert(cfg, gc.NotNil) @@ -64,44 +61,28 @@ c.Assert(hasRegion, gc.NotNil) cloudSpec, err := hasRegion.Region() - c.Check(err, gc.IsNil) + c.Assert(err, gc.IsNil) c.Check(cloudSpec.Region, gc.Not(gc.Equals), "") c.Check(cloudSpec.Endpoint, gc.Not(gc.Equals), "") - validator, err := env.ConstraintsValidator() - c.Check(validator, gc.NotNil) - c.Check(err, gc.IsNil) - - amd64, i386 := arch.AMD64, arch.I386 - _, err = validator.Validate(constraints.Value{Arch: &amd64}) - c.Check(err, gc.IsNil) - _, err = validator.Validate(constraints.Value{Arch: &i386}) - c.Check(err, gc.ErrorMatches, "invalid constraint value: arch=i386\nvalid values are: \\[amd64\\]") - c.Check(env.OpenPorts(nil), gc.IsNil) c.Check(env.ClosePorts(nil), gc.IsNil) - ports, err := env.Ports() + c.Assert(err, gc.IsNil) c.Check(ports, gc.IsNil) - c.Check(err, gc.IsNil) } func (s *environSuite) TestUnsupportedConstraints(c *gc.C) { - s.PatchValue(&newClient, func(environs.CloudSpec, string) (*environClient, error) { - return nil, nil - }) - baseConfig := newConfig(c, validAttrs().Merge(testing.Attrs{"name": "testname"})) env, err := environs.New(environs.OpenParams{ Cloud: fakeCloudSpec(), Config: baseConfig, }) c.Assert(err, gc.IsNil) - env.(*environ).supportedArchitectures = []string{arch.AMD64} validator, err := env.ConstraintsValidator() + c.Assert(err, gc.IsNil) c.Check(validator, gc.NotNil) - c.Check(err, gc.IsNil) unsupported, err := validator.Validate(constraints.MustParse( "arch=amd64 tags=foo cpu-power=100 virt-type=kvm", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -31,19 +31,19 @@ func (i sigmaInstance) Status() instance.InstanceStatus { entityStatus := i.server.Status() logger.Tracef("sigmaInstance.Status: %s", entityStatus) - jujuStatus := status.StatusPending + jujuStatus := status.Pending switch entityStatus { case gosigma.ServerStarting: - jujuStatus = status.StatusAllocating + jujuStatus = status.Allocating case gosigma.ServerRunning: - jujuStatus = status.StatusRunning + jujuStatus = status.Running case gosigma.ServerStopping, gosigma.ServerStopped: - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty case gosigma.ServerUnavailable: // I am not sure about this one. - jujuStatus = status.StatusPending + jujuStatus = status.Pending default: - jujuStatus = status.StatusPending + jujuStatus = status.Pending } return instance.InstanceStatus{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/provider.go juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/provider.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/provider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/provider.go 2016-10-13 14:31:49.000000000 +0000 @@ -81,13 +81,6 @@ return env, nil } -// RestrictedConfigAttributes are provider specific attributes stored in -// the config that really cannot or should not be changed across -// environments running inside a single juju server. -func (environProvider) RestrictedConfigAttributes() []string { - return []string{} -} - // PrepareConfig is defined by EnvironProvider. func (environProvider) PrepareConfig(args environs.PrepareConfigParams) (*config.Config, error) { if err := validateCloudSpec(args.Cloud); err != nil { @@ -124,13 +117,6 @@ return newEcfg.Config, nil } -// SecretAttrs filters the supplied configuration returning only values -// which are considered sensitive. All of the values of these secret -// attributes need to be strings. -func (environProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - return map[string]string{}, nil -} - func validateCloudSpec(spec environs.CloudSpec) error { if err := spec.Validate(); err != nil { return errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/storage.go juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/storage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/cloudsigma/storage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/cloudsigma/storage.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,8 +10,8 @@ ) // StorageProviderTypes implements storage.ProviderRegistry. -func (*environ) StorageProviderTypes() []storage.ProviderType { - return nil +func (*environ) StorageProviderTypes() ([]storage.ProviderType, error) { + return nil, nil } // StorageProvider implements storage.ProviderRegistry. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/bootstrap.go juju-core-2.0.0/src/github.com/juju/juju/provider/common/bootstrap.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/bootstrap.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/common/bootstrap.go 2016-10-13 14:31:49.000000000 +0000 @@ -55,8 +55,8 @@ return bsResult, nil } -// BootstrapInstance creates a new instance with the series and architecture -// of its choice, constrained to those of the available tools, and +// BootstrapInstance creates a new instance with the series of its choice, +// constrained to those of the available tools, and // returns the instance result, series, and a function that // must be called to finalize the bootstrap process by transferring // the tools and installing the initial Juju controller. @@ -132,24 +132,59 @@ } maybeSetBridge(instanceConfig) - fmt.Fprintln(ctx.GetStderr(), "Launching instance") + bootstrapMsg := env.BootstrapMessage() + if bootstrapMsg != "" { + ctx.Infof(bootstrapMsg) + } + + cloudRegion := args.CloudName + if args.CloudRegion != "" { + cloudRegion += "/" + args.CloudRegion + } + fmt.Fprintf(ctx.GetStderr(), "Launching controller instance(s) on %s...\n", cloudRegion) + // Print instance status reports status changes during provisioning. + // Note the carriage returns, meaning subsequent prints are to the same + // line of stderr, not a new line. instanceStatus := func(settableStatus status.Status, info string, data map[string]interface{}) error { - fmt.Fprintf(ctx.GetStderr(), "%s \r", info) + // The data arg is not expected to be used in this case, but + // print it, rather than ignore it, if we get something. + dataString := "" + if len(data) > 0 { + dataString = fmt.Sprintf(" %v", data) + } + fmt.Fprintf(ctx.GetStderr(), " - %s%s\r", info, dataString) + return nil + } + // Likely used after the final instanceStatus call to white-out the + // current stderr line before the next use, removing any residual status + // reporting output. + statusCleanup := func(info string) error { + // The leading spaces account for the leading characters + // emitted by instanceStatus above. + fmt.Fprintf(ctx.GetStderr(), " %s\r", info) return nil } result, err := env.StartInstance(environs.StartInstanceParams{ - ControllerUUID: args.ControllerConfig.ControllerUUID(), - Constraints: args.BootstrapConstraints, - Tools: availableTools, - InstanceConfig: instanceConfig, - Placement: args.Placement, - ImageMetadata: imageMetadata, - StatusCallback: instanceStatus, + ControllerUUID: args.ControllerConfig.ControllerUUID(), + Constraints: args.BootstrapConstraints, + Tools: availableTools, + InstanceConfig: instanceConfig, + Placement: args.Placement, + ImageMetadata: imageMetadata, + StatusCallback: instanceStatus, + CleanupCallback: statusCleanup, }) if err != nil { return nil, "", nil, errors.Annotate(err, "cannot start bootstrap instance") } - fmt.Fprintf(ctx.GetStderr(), " - %s\n", result.Instance.Id()) + + msg := fmt.Sprintf(" - %s (%s)", result.Instance.Id(), formatHardware(result.Hardware)) + // We need some padding below to overwrite any previous messages. + if len(msg) < 40 { + padding := make([]string, 40-len(msg)) + msg += strings.Join(padding, " ") + } + fmt.Fprintln(ctx.GetStderr(), msg) finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig, opts environs.BootstrapDialOpts) error { icfg.Bootstrap.BootstrapMachineInstanceId = result.Instance.Id() @@ -171,6 +206,31 @@ return result, selectedSeries, finalize, nil } +func formatHardware(hw *instance.HardwareCharacteristics) string { + if hw == nil { + return "" + } + out := make([]string, 0, 3) + if hw.Arch != nil && *hw.Arch != "" { + out = append(out, fmt.Sprintf("arch=%s", *hw.Arch)) + } + if hw.Mem != nil && *hw.Mem > 0 { + out = append(out, fmt.Sprintf("mem=%s", formatMemory(*hw.Mem))) + } + if hw.CpuCores != nil && *hw.CpuCores > 0 { + out = append(out, fmt.Sprintf("cores=%d", *hw.CpuCores)) + } + return strings.Join(out, " ") +} + +func formatMemory(m uint64) string { + if m < 1024 { + return fmt.Sprintf("%dM", m) + } + s := fmt.Sprintf("%.1f", float32(m)/1024.0) + return strings.TrimSuffix(s, ".0") + "G" +} + // FinishBootstrap completes the bootstrap process by connecting // to the instance via SSH and carrying out the cloud-config. // @@ -259,7 +319,9 @@ }) } -type Addresser interface { +// InstanceRefresher is the subet of the Instance interface required +// for waiting for SSH access to become availble. +type InstanceRefresher interface { // Refresh refreshes the addresses for the instance. Refresh() error @@ -267,6 +329,10 @@ // To ensure that the results are up to date, call // Refresh first. Addresses() ([]network.Address, error) + + // Status returns the provider-specific status for the + // instance. + Status() instance.InstanceStatus } type RefreshableInstance struct { @@ -414,7 +480,14 @@ // the presence of a file on the machine that contains the // machine's nonce. The "checkHostScript" is a bash script // that performs this file check. -func WaitSSH(stdErr io.Writer, interrupted <-chan os.Signal, client ssh.Client, checkHostScript string, inst Addresser, opts environs.BootstrapDialOpts) (addr string, err error) { +func WaitSSH( + stdErr io.Writer, + interrupted <-chan os.Signal, + client ssh.Client, + checkHostScript string, + inst InstanceRefresher, + opts environs.BootstrapDialOpts, +) (addr string, err error) { globalTimeout := time.After(opts.Timeout) pollAddresses := time.NewTimer(0) @@ -440,6 +513,13 @@ if err := inst.Refresh(); err != nil { return "", fmt.Errorf("refreshing addresses: %v", err) } + instanceStatus := inst.Status() + if instanceStatus.Status == status.ProvisioningError { + if instanceStatus.Message != "" { + return "", errors.Errorf("instance provisioning failed (%v)", instanceStatus.Message) + } + return "", errors.Errorf("instance provisioning failed") + } addresses, err := inst.Addresses() if err != nil { return "", fmt.Errorf("getting addresses: %v", err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/bootstrap_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/common/bootstrap_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/bootstrap_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/common/bootstrap_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,10 +4,13 @@ package common_test import ( + "bytes" "fmt" "os" + "strings" "time" + "github.com/juju/errors" jc "github.com/juju/testing/checkers" "github.com/juju/utils/arch" "github.com/juju/utils/series" @@ -16,6 +19,7 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/cloudconfig/instancecfg" + "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/constraints" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" @@ -24,6 +28,7 @@ "github.com/juju/juju/instance" "github.com/juju/juju/network" "github.com/juju/juju/provider/common" + "github.com/juju/juju/status" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/tools" jujuversion "github.com/juju/juju/version" @@ -65,7 +70,7 @@ "name": "whatever", "type": "anything, really", "uuid": coretesting.ModelTag.Id(), - "controller-uuid": coretesting.ModelTag.Id(), + "controller-uuid": coretesting.ControllerTag.Id(), "ca-cert": coretesting.CACert, "ca-private-key": coretesting.CAKey, "authorized-keys": coretesting.FakeAuthKeys, @@ -108,12 +113,12 @@ expectedMcfg.EnableOSUpgrade = env.Config().EnableOSUpgrade() expectedMcfg.Tags = map[string]string{ "juju-model-uuid": coretesting.ModelTag.Id(), - "juju-controller-uuid": coretesting.ModelTag.Id(), + "juju-controller-uuid": coretesting.ControllerTag.Id(), "juju-is-controller": "true", } c.Assert(icfg, jc.DeepEquals, expectedMcfg) - return nil, nil, nil, fmt.Errorf("meh, not started") + return nil, nil, nil, errors.Errorf("meh, not started") } env.startInstance = startInstance @@ -213,7 +218,8 @@ config: getConfig, setConfig: setConfig, } - ctx := envtesting.BootstrapContext(c) + inner := coretesting.Context(c) + ctx := modelcmd.BootstrapContext(inner) result, err := common.Bootstrap(ctx, env, environs.BootstrapParams{ ControllerConfig: coretesting.FakeControllerConfig(), AvailableTools: tools.List{ @@ -228,6 +234,10 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(result.Arch, gc.Equals, "ppc64el") // based on hardware characteristics c.Assert(result.Series, gc.Equals, config.PreferredSeries(mocksConfig)) + output := inner.Stderr.(*bytes.Buffer) + lines := strings.Split(output.String(), "\n") + c.Assert(len(lines), jc.GreaterThan, 1) + c.Assert(lines[0], gc.Equals, "Some message") } type neverRefreshes struct { @@ -237,6 +247,10 @@ return nil } +func (neverRefreshes) Status() instance.InstanceStatus { + return instance.InstanceStatus{} +} + type neverAddresses struct { neverRefreshes } @@ -245,6 +259,18 @@ return nil, nil } +type failsProvisioning struct { + neverAddresses + message string +} + +func (f failsProvisioning) Status() instance.InstanceStatus { + return instance.InstanceStatus{ + Status: status.ProvisioningError, + Message: f.message, + } +} + var testSSHTimeout = environs.BootstrapDialOpts{ Timeout: coretesting.ShortWait, RetryDelay: 1 * time.Millisecond, @@ -267,12 +293,20 @@ c.Check(coretesting.Stderr(ctx), gc.Matches, "Waiting for address\n") } +func (s *BootstrapSuite) TestWaitSSHNoticesProvisioningFailures(c *gc.C) { + ctx := coretesting.Context(c) + _, err := common.WaitSSH(ctx.Stderr, nil, ssh.DefaultClient, "/bin/true", failsProvisioning{}, testSSHTimeout) + c.Check(err, gc.ErrorMatches, `instance provisioning failed`) + _, err = common.WaitSSH(ctx.Stderr, nil, ssh.DefaultClient, "/bin/true", failsProvisioning{message: "blargh"}, testSSHTimeout) + c.Check(err, gc.ErrorMatches, `instance provisioning failed \(blargh\)`) +} + type brokenAddresses struct { neverRefreshes } func (brokenAddresses) Addresses() ([]network.Address, error) { - return nil, fmt.Errorf("Addresses will never work") + return nil, errors.Errorf("Addresses will never work") } func (s *BootstrapSuite) TestWaitSSHStopsOnBadError(c *gc.C) { @@ -343,6 +377,10 @@ return nil } +func (ac *addressesChange) Status() instance.InstanceStatus { + return instance.InstanceStatus{} +} + func (ac *addressesChange) Addresses() ([]network.Address, error) { return network.NewAddresses(ac.addrs[0]...), nil } @@ -368,3 +406,56 @@ "Waiting for address\n"+ "(.|\n)*(Attempting to connect to 0.1.2.4:22\n)+(.|\n)*") } + +type FormatHardwareSuite struct{} + +var _ = gc.Suite(&FormatHardwareSuite{}) + +func (s *FormatHardwareSuite) check(c *gc.C, hw *instance.HardwareCharacteristics, expected string) { + c.Check(common.FormatHardware(hw), gc.Equals, expected) +} + +func (s *FormatHardwareSuite) TestNil(c *gc.C) { + s.check(c, nil, "") +} + +func (s *FormatHardwareSuite) TestFieldsNil(c *gc.C) { + s.check(c, &instance.HardwareCharacteristics{}, "") +} + +func (s *FormatHardwareSuite) TestArch(c *gc.C) { + arch := "" + s.check(c, &instance.HardwareCharacteristics{Arch: &arch}, "") + arch = "amd64" + s.check(c, &instance.HardwareCharacteristics{Arch: &arch}, "arch=amd64") +} + +func (s *FormatHardwareSuite) TestCores(c *gc.C) { + var cores uint64 + s.check(c, &instance.HardwareCharacteristics{CpuCores: &cores}, "") + cores = 24 + s.check(c, &instance.HardwareCharacteristics{CpuCores: &cores}, "cores=24") +} + +func (s *FormatHardwareSuite) TestMem(c *gc.C) { + var mem uint64 + s.check(c, &instance.HardwareCharacteristics{Mem: &mem}, "") + mem = 800 + s.check(c, &instance.HardwareCharacteristics{Mem: &mem}, "mem=800M") + mem = 1024 + s.check(c, &instance.HardwareCharacteristics{Mem: &mem}, "mem=1G") + mem = 2712 + s.check(c, &instance.HardwareCharacteristics{Mem: &mem}, "mem=2.6G") +} + +func (s *FormatHardwareSuite) TestAll(c *gc.C) { + arch := "ppc64" + var cores uint64 = 2 + var mem uint64 = 123 + hw := &instance.HardwareCharacteristics{ + Arch: &arch, + CpuCores: &cores, + Mem: &mem, + } + s.check(c, hw, "arch=ppc64 mem=123M cores=2") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/destroy.go juju-core-2.0.0/src/github.com/juju/juju/provider/common/destroy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/destroy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/common/destroy.go 2016-10-13 14:31:49.000000000 +0000 @@ -49,7 +49,11 @@ func destroyStorage(env environs.Environ) error { logger.Infof("destroying storage") - for _, storageProviderType := range env.StorageProviderTypes() { + storageProviderTypes, err := env.StorageProviderTypes() + if err != nil { + return errors.Trace(err) + } + for _, storageProviderType := range storageProviderTypes { storageProvider, err := env.StorageProvider(storageProviderType) if err != nil { return errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/disk.go juju-core-2.0.0/src/github.com/juju/juju/provider/common/disk.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/disk.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/common/disk.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,16 +5,16 @@ import ( jujuos "github.com/juju/utils/os" - "github.com/juju/utils/series" + jujuseries "github.com/juju/utils/series" ) // MinRootDiskSizeGiB is the minimum size for the root disk of an // instance, in Gigabytes. This value accommodates the anticipated // size of the initial image, any updates, and future application // data. -func MinRootDiskSizeGiB(ser string) uint64 { +func MinRootDiskSizeGiB(series string) uint64 { // See comment below that explains why we're ignoring the error - os, _ := series.GetOSFromSeries(ser) + os, _ := jujuseries.GetOSFromSeries(series) switch os { case jujuos.Ubuntu, jujuos.CentOS: return 8 diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/export_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/common/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/common/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,4 +6,5 @@ var ( ConnectSSH = &connectSSH InternalAvailabilityZoneAllocations = &internalAvailabilityZoneAllocations + FormatHardware = formatHardware ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/mock_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/common/mock_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/mock_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/common/mock_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -38,10 +38,6 @@ environs.Environ // stub out other methods with panics } -func (*mockEnviron) SupportedArchitectures() ([]string, error) { - return []string{"amd64", "arm64"}, nil -} - func (env *mockEnviron) Storage() storage.Storage { return env.storage } @@ -49,6 +45,11 @@ func (env *mockEnviron) AllInstances() ([]instance.Instance, error) { return env.allInstances() } + +func (env *mockEnviron) BootstrapMessage() string { + return "Some message" +} + func (env *mockEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { inst, hw, networkInfo, err := env.startInstance( args.Placement, @@ -90,7 +91,7 @@ return []simplestreams.DataSource{datasource}, nil } -func (env *mockEnviron) StorageProviderTypes() []jujustorage.ProviderType { +func (env *mockEnviron) StorageProviderTypes() ([]jujustorage.ProviderType, error) { return env.storageProviders.StorageProviderTypes() } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/provider.go juju-core-2.0.0/src/github.com/juju/juju/provider/common/provider.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/provider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/common/provider.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,12 +7,11 @@ "github.com/juju/errors" "github.com/juju/juju/environs" - "github.com/juju/juju/environs/imagemetadata" ) // DefaultProvider exposes the various common implementations found in // this package as methods of a single type. This facilitates treating -// the implentations as a bundle, e.g. satisfying interfaces. +// the implementations as a bundle, e.g. satisfying interfaces. type DefaultProvider struct { // Env is the Juju environment that methods target. Env environs.Environ @@ -34,13 +33,3 @@ } return nil } - -// SupportedArchitectures returns all the image architectures for env -// matching the constraints. -func (dp DefaultProvider) SupportedArchitectures(imageConstraint *imagemetadata.ImageConstraint) ([]string, error) { - arches, err := SupportedArchitectures(dp.Env, imageConstraint) - if err != nil { - return nil, errors.Trace(err) - } - return arches, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/state.go juju-core-2.0.0/src/github.com/juju/juju/provider/common/state.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/common/state.go 2016-10-13 14:31:49.000000000 +0000 @@ -130,10 +130,7 @@ } // ProviderStateInstances extracts the instance IDs from provider-state. -func ProviderStateInstances( - env environs.Environ, - stor storage.StorageReader, -) ([]instance.Id, error) { +func ProviderStateInstances(stor storage.StorageReader) ([]instance.Id, error) { st, err := LoadState(stor) if err != nil { return nil, err diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/supportedarchitectures.go juju-core-2.0.0/src/github.com/juju/juju/provider/common/supportedarchitectures.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/supportedarchitectures.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/common/supportedarchitectures.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common - -import ( - "github.com/juju/utils/set" - - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/imagemetadata" -) - -// SupportedArchitectures returns all the image architectures for env matching the constraints. -func SupportedArchitectures(env environs.Environ, imageConstraint *imagemetadata.ImageConstraint) ([]string, error) { - sources, err := environs.ImageMetadataSources(env) - if err != nil { - return nil, err - } - matchingImages, _, err := imagemetadata.Fetch(sources, imageConstraint) - if err != nil { - return nil, err - } - var arches = set.NewStrings() - for _, im := range matchingImages { - arches.Add(im.Arch) - } - return arches.SortedValues(), nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/supportedarchitectures_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/common/supportedarchitectures_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/common/supportedarchitectures_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/common/supportedarchitectures_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,97 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/filestorage" - "github.com/juju/juju/environs/imagemetadata" - imagetesting "github.com/juju/juju/environs/imagemetadata/testing" - "github.com/juju/juju/environs/simplestreams" - "github.com/juju/juju/provider/common" - coretesting "github.com/juju/juju/testing" -) - -type archSuite struct { - coretesting.FakeJujuXDGDataHomeSuite -} - -var _ = gc.Suite(&archSuite{}) - -func (s *archSuite) setupMetadata(c *gc.C, arches []string) (environs.Environ, simplestreams.CloudSpec) { - imagetesting.PatchOfficialDataSources(&s.CleanupSuite, "") - env := &mockEnviron{ - config: configGetter(c), - } - - var images []*imagemetadata.ImageMetadata - for _, arch := range arches { - images = append(images, &imagemetadata.ImageMetadata{ - Id: "image-id", - Arch: arch, - RegionName: "Region", - Endpoint: "https://endpoint/", - }) - } - // Append an image from another region with some other arch to ensure it is ignored. - images = append(images, &imagemetadata.ImageMetadata{ - Id: "image-id", - Arch: "arch", - RegionName: "Region-Two", - Endpoint: "https://endpoint/", - }) - cloudSpec := simplestreams.CloudSpec{ - Region: "Region", - Endpoint: "https://endpoint/", - } - - metadataDir := c.MkDir() - stor, err := filestorage.NewFileStorageWriter(metadataDir) - c.Assert(err, jc.ErrorIsNil) - err = imagemetadata.MergeAndWriteMetadata("precise", images, &cloudSpec, stor) - c.Assert(err, jc.ErrorIsNil) - - id := "SupportedArchitectures" - environs.RegisterImageDataSourceFunc(id, func(environs.Environ) (simplestreams.DataSource, error) { - return simplestreams.NewURLDataSource(id, "file://"+metadataDir+"/images", false, simplestreams.DEFAULT_CLOUD_DATA, false), nil - }) - s.AddCleanup(func(*gc.C) { - environs.UnregisterImageDataSourceFunc(id) - }) - - return env, cloudSpec -} - -func (s *archSuite) TestSupportedArchitecturesNone(c *gc.C) { - env, cloudSpec := s.setupMetadata(c, nil) - imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{ - CloudSpec: cloudSpec, - }) - arches, err := common.SupportedArchitectures(env, imageConstraint) - c.Assert(err, jc.ErrorIsNil) - c.Assert(arches, gc.HasLen, 0) -} - -func (s *archSuite) TestSupportedArchitecturesOne(c *gc.C) { - env, cloudSpec := s.setupMetadata(c, []string{"ppc64el"}) - imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{ - CloudSpec: cloudSpec, - }) - arches, err := common.SupportedArchitectures(env, imageConstraint) - c.Assert(err, jc.ErrorIsNil) - c.Assert(arches, jc.SameContents, []string{"ppc64el"}) -} - -func (s *archSuite) TestSupportedArchitecturesMany(c *gc.C) { - env, cloudSpec := s.setupMetadata(c, []string{"ppc64el", "amd64"}) - imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{ - CloudSpec: cloudSpec, - }) - arches, err := common.SupportedArchitectures(env, imageConstraint) - c.Assert(err, jc.ErrorIsNil) - c.Assert(arches, jc.SameContents, []string{"amd64", "ppc64el"}) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/dummy/config_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/dummy/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/dummy/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/dummy/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,31 +26,6 @@ dummy.Reset(c) } -func (*ConfigSuite) TestSecretAttrs(c *gc.C) { - attrs := dummy.SampleConfig().Delete("secret") - ctx := envtesting.BootstrapContext(c) - env, err := bootstrap.Prepare( - ctx, jujuclienttesting.NewMemStore(), - bootstrap.PrepareParams{ - ControllerConfig: testing.FakeControllerConfig(), - ModelConfig: attrs, - ControllerName: attrs["name"].(string), - Cloud: dummy.SampleCloudSpec(), - AdminSecret: AdminSecret, - }, - ) - c.Assert(err, jc.ErrorIsNil) - defer env.Destroy() - expected := map[string]string{ - "secret": "pork", - } - cfg, err := config.New(config.NoDefaults, attrs) - c.Assert(err, jc.ErrorIsNil) - actual, err := env.Provider().SecretAttrs(cfg) - c.Assert(err, jc.ErrorIsNil) - c.Assert(actual, gc.DeepEquals, expected) -} - var firewallModeTests = []struct { configFirewallMode string firewallMode string diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/dummy/environs.go juju-core-2.0.0/src/github.com/juju/juju/provider/dummy/environs.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/dummy/environs.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/dummy/environs.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,16 +16,13 @@ // // The DNS name of instances is the same as the Id, // with ".dns" appended. -// -// To avoid enumerating all possible series and architectures, -// any series or architecture with the prefix "unknown" is -// treated as bad when starting a new instance. package dummy import ( "fmt" "net" "os" + "runtime" "strconv" "strings" "sync" @@ -33,10 +30,12 @@ "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/retry" "github.com/juju/schema" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils/arch" + "github.com/juju/utils/clock" "github.com/juju/utils/series" gc "gopkg.in/check.v1" "gopkg.in/juju/environschema.v1" @@ -77,13 +76,15 @@ // SampleCloudSpec returns an environs.CloudSpec that can be used to // open a dummy Environ. func SampleCloudSpec() environs.CloudSpec { - cred := cloud.NewEmptyCredential() + cred := cloud.NewCredential(cloud.UserPassAuthType, map[string]string{"username": "dummy", "passeord": "secret"}) return environs.CloudSpec{ - Type: "dummy", - Name: "dummy", - Endpoint: "dummy-endpoint", - StorageEndpoint: "dummy-storage-endpoint", - Credential: &cred, + Type: "dummy", + Name: "dummy", + Endpoint: "dummy-endpoint", + IdentityEndpoint: "dummy-identity-endpoint", + Region: "dummy-region", + StorageEndpoint: "dummy-storage-endpoint", + Credential: &cred, } } @@ -147,9 +148,10 @@ } type OpDestroy struct { - Env string - Cloud environs.CloudSpec - Error error + Env string + Cloud string + CloudRegion string + Error error } type OpNetworkInterfaces struct { @@ -215,11 +217,12 @@ supportsSpaces bool supportsSpaceDiscovery bool apiPort int + controllerState *environState state map[string]*environState } -// ApiPort returns the randon api port used by the given provider instance. -func ApiPort(p environs.EnvironProvider) int { +// APIPort returns the randon api port used by the given provider instance. +func APIPort(p environs.EnvironProvider) int { return p.(*environProvider).apiPort } @@ -227,20 +230,20 @@ // It can be shared between several environ values, // so that a given environment can be opened several times. type environState struct { - name string - ops chan<- Operation - newStatePolicy state.NewPolicyFunc - mu sync.Mutex - maxId int // maximum instance id allocated so far. - maxAddr int // maximum allocated address last byte - insts map[instance.Id]*dummyInstance - globalPorts map[network.PortRange]bool - bootstrapped bool - apiListener net.Listener - apiServer *apiserver.Server - apiState *state.State - apiStatePool *state.StatePool - bootstrapConfig *config.Config + name string + ops chan<- Operation + newStatePolicy state.NewPolicyFunc + mu sync.Mutex + maxId int // maximum instance id allocated so far. + maxAddr int // maximum allocated address last byte + insts map[instance.Id]*dummyInstance + globalPorts map[network.PortRange]bool + bootstrapped bool + apiListener net.Listener + apiServer *apiserver.Server + apiState *state.State + apiStatePool *state.StatePool + creator string } // environ represents a client's connection to a given environment's @@ -288,6 +291,7 @@ dummy.mu.Lock() dummy.ops = discardOperations oldState := dummy.state + dummy.controllerState = nil dummy.state = make(map[string]*environState) dummy.newStatePolicy = stateenvirons.GetNewPolicyFunc( stateenvirons.GetNewEnvironFunc(environs.New), @@ -308,7 +312,17 @@ s.destroy() } if mongoAlive() { - err := gitjujutesting.MgoServer.Reset() + err := retry.Call(retry.CallArgs{ + Func: gitjujutesting.MgoServer.Reset, + // Only interested in retrying the intermittent + // 'unexpected message'. + IsFatalError: func(err error) bool { + return !strings.HasSuffix(err.Error(), "unexpected message") + }, + Delay: time.Millisecond, + Clock: clock.WallClock, + Attempts: 5, + }) c.Assert(err, jc.ErrorIsNil) } } @@ -393,12 +407,15 @@ // newState creates the state for a new environment with the given name. func newState(name string, ops chan<- Operation, newStatePolicy state.NewPolicyFunc) *environState { + buf := make([]byte, 8192) + buf = buf[:runtime.Stack(buf, false)] s := &environState{ name: name, ops: ops, newStatePolicy: newStatePolicy, insts: make(map[instance.Id]*dummyInstance), globalPorts: make(map[network.PortRange]bool), + creator: string(buf), } return s } @@ -511,14 +528,44 @@ return fields } -func (p *environProvider) CredentialSchemas() map[cloud.AuthType]cloud.CredentialSchema { - return map[cloud.AuthType]cloud.CredentialSchema{cloud.EmptyAuthType: {}} +var _ config.ConfigSchemaSource = (*environProvider)(nil) + +// ConfigSchema returns extra config attributes specific +// to this provider only. +func (p environProvider) ConfigSchema() schema.Fields { + return configFields +} + +// ConfigDefaults returns the default values for the +// provider specific config attributes. +func (p environProvider) ConfigDefaults() schema.Defaults { + return configDefaults +} + +func (environProvider) CredentialSchemas() map[cloud.AuthType]cloud.CredentialSchema { + return map[cloud.AuthType]cloud.CredentialSchema{ + cloud.EmptyAuthType: {}, + cloud.UserPassAuthType: { + { + "username", cloud.CredentialAttr{Description: "The username to authenticate with."}, + }, { + "password", cloud.CredentialAttr{ + Description: "The password for the specified username.", + Hidden: true, + }, + }, + }, + } } func (*environProvider) DetectCredentials() (*cloud.CloudCredential, error) { return cloud.NewEmptyCloudCredential(), nil } +func (*environProvider) FinalizeCredential(ctx environs.FinalizeCredentialContext, args environs.FinalizeCredentialParams) (*cloud.Credential, error) { + return &args.Credential, nil +} + func (*environProvider) DetectRegions() ([]cloud.Region, error) { return []cloud.Region{{Name: "dummy"}}, nil } @@ -566,72 +613,12 @@ return env, nil } -// RestrictedConfigAttributes is specified in the EnvironProvider interface. -func (p *environProvider) RestrictedConfigAttributes() []string { - return nil -} - // PrepareConfig is specified in the EnvironProvider interface. func (p *environProvider) PrepareConfig(args environs.PrepareConfigParams) (*config.Config, error) { - ecfg, err := p.newConfig(args.Config) - if err != nil { + if _, err := dummy.newConfig(args.Config); err != nil { return nil, err } - p.mu.Lock() - defer p.mu.Unlock() - - controllerUUID := args.ControllerUUID - if controllerUUID != args.Config.UUID() { - // NOTE: this check might appear redundant, but it's not: some tests - // (apiserver/modelmanager) inject a string value and determine that - // the config is validated later; validating here would render that - // test meaningless. - if args.Config.AllAttrs()["controller"] == true { - // NOTE: cfg.Apply *does* validate, but we're only adding a - // valid value so it doesn't matter. - return args.Config.Apply(map[string]interface{}{ - "controller": false, - }) - } - return args.Config, nil - } - - envState, ok := p.state[controllerUUID] - if ok { - // PrepareConfig is expected to return the same result given - // the same input. We assume that the args are the same for a - // previously prepared/bootstrapped controller. - return envState.bootstrapConfig, nil - } - - name := args.Config.Name() - if ecfg.controller() && len(p.state) != 0 { - for _, old := range p.state { - panic(fmt.Errorf("cannot share a state between two dummy environs; old %q; new %q", old.name, name)) - } - } - - // The environment has not been prepared, so create it and record it. - // We don't start listening for State or API connections until - // PrepareForBootstrap has been called. - envState = newState(name, p.ops, p.newStatePolicy) - cfg := args.Config - if ecfg.controller() { - p.apiPort = envState.listenAPI() - } - envState.bootstrapConfig = cfg - p.state[controllerUUID] = envState - return cfg, nil -} - -func (*environProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - ecfg, err := dummy.newConfig(cfg) - if err != nil { - return nil, err - } - return map[string]string{ - "secret": ecfg.secret(), - }, nil + return args.Config, nil } // Override for testing - the data directory with which the state api server is initialised. @@ -664,11 +651,35 @@ // Create is part of the Environ interface. func (e *environ) Create(args environs.CreateParams) error { + dummy.mu.Lock() + defer dummy.mu.Unlock() + dummy.state[e.modelUUID] = newState(e.name, dummy.ops, dummy.newStatePolicy) return nil } // PrepareForBootstrap is part of the Environ interface. func (e *environ) PrepareForBootstrap(ctx environs.BootstrapContext) error { + dummy.mu.Lock() + defer dummy.mu.Unlock() + ecfg := e.ecfgUnlocked + + if ecfg.controller() && dummy.controllerState != nil { + // Because of global variables, we can only have one dummy + // controller per process. Panic if there is an attempt to + // bootstrap while there is another active controller. + old := dummy.controllerState + panic(fmt.Errorf("cannot share a state between two dummy environs; old %q; new %q: %s", old.name, e.name, old.creator)) + } + + // The environment has not been prepared, so create it and record it. + // We don't start listening for State or API connections until + // Bootstrap has been called. + envState := newState(e.name, dummy.ops, dummy.newStatePolicy) + if ecfg.controller() { + dummy.apiPort = envState.listenAPI() + dummy.controllerState = envState + } + dummy.state[e.modelUUID] = envState return nil } @@ -685,7 +696,7 @@ return nil, err } if _, ok := args.ControllerConfig.CACert(); !ok { - return nil, fmt.Errorf("no CA certificate in controller configuration") + return nil, errors.New("no CA certificate in controller configuration") } logger.Infof("would pick tools from %s", availableTools) @@ -697,7 +708,7 @@ estate.mu.Lock() defer estate.mu.Unlock() if estate.bootstrapped { - return nil, fmt.Errorf("model is already bootstrapped") + return nil, errors.New("model is already bootstrapped") } // Create an instance for the bootstrap node. @@ -723,10 +734,20 @@ return err } - cloudCredentials := make(map[string]cloud.Credential) - if icfg.Bootstrap.ControllerCloudCredential != nil { - cloudCredentials[icfg.Bootstrap.ControllerCloudCredentialName] = - *icfg.Bootstrap.ControllerCloudCredential + adminUser := names.NewUserTag("admin@local") + var cloudCredentialTag names.CloudCredentialTag + if icfg.Bootstrap.ControllerCloudCredentialName != "" { + cloudCredentialTag = names.NewCloudCredentialTag(fmt.Sprintf( + "%s/%s/%s", + icfg.Bootstrap.ControllerCloudName, + adminUser.Id(), + icfg.Bootstrap.ControllerCloudCredentialName, + )) + } + + cloudCredentials := make(map[names.CloudCredentialTag]cloud.Credential) + if icfg.Bootstrap.ControllerCloudCredential != nil && icfg.Bootstrap.ControllerCloudCredentialName != "" { + cloudCredentials[cloudCredentialTag] = *icfg.Bootstrap.ControllerCloudCredential } info := stateInfo() @@ -735,14 +756,15 @@ // user is constructed with an empty password here. // It is set just below. st, err := state.Initialize(state.InitializeParams{ + Clock: clock.WallClock, ControllerConfig: icfg.Controller.Config, ControllerModelArgs: state.ModelArgs{ - Owner: names.NewUserTag("admin@local"), + Owner: adminUser, Config: icfg.Bootstrap.ControllerModelConfig, Constraints: icfg.Bootstrap.BootstrapMachineConstraints, CloudName: icfg.Bootstrap.ControllerCloudName, CloudRegion: icfg.Bootstrap.ControllerCloudRegion, - CloudCredential: icfg.Bootstrap.ControllerCloudCredentialName, + CloudCredential: cloudCredentialTag, StorageProviderRegistry: e, }, Cloud: icfg.Bootstrap.ControllerCloud, @@ -781,13 +803,16 @@ estate.apiStatePool = state.NewStatePool(st) estate.apiServer, err = apiserver.NewServer(st, estate.apiListener, apiserver.ServerConfig{ - Cert: []byte(testing.ServerCert), - Key: []byte(testing.ServerKey), + Clock: clock.WallClock, + Cert: testing.ServerCert, + Key: testing.ServerKey, Tag: names.NewMachineTag("0"), DataDir: DataDir, LogDir: LogDir, StatePool: estate.apiStatePool, NewObserver: func() observer.Observer { return &fakeobserver.Instance{} }, + // Should never be used but prevent external access just in case. + AutocertURL: "https://0.1.2.3/no-autocert-here", }) if err != nil { panic(err) @@ -806,6 +831,11 @@ return bsResult, nil } +// BootstrapMessage is part of the Environ interface. +func (e *environ) BootstrapMessage() string { + return "" +} + func (e *environ) ControllerInstances(controllerUUID string) ([]instance.Id, error) { estate, err := e.state() if err != nil { @@ -863,11 +893,13 @@ estate.mu.Lock() ops := estate.ops name := estate.name + delete(dummy.state, e.modelUUID) estate.mu.Unlock() ops <- OpDestroy{ - Env: name, - Cloud: e.cloud, - Error: res, + Env: name, + Cloud: e.cloud.Name, + CloudRegion: e.cloud.Region, + Error: res, } }() if err := e.checkBroken("Destroy"); err != nil { @@ -885,7 +917,7 @@ return err } dummy.mu.Lock() - delete(dummy.state, e.modelUUID) + dummy.controllerState = nil dummy.mu.Unlock() return nil } @@ -895,9 +927,7 @@ validator := constraints.NewValidator() validator.RegisterUnsupported([]string{constraints.CpuPower, constraints.VirtType}) validator.RegisterConflicts([]string{constraints.InstanceType}, []string{constraints.Mem}) - validator.RegisterVocabulary(constraints.Arch, []string{ - arch.AMD64, arch.I386, arch.PPC64EL, arch.ARM64, - }) + validator.RegisterVocabulary(constraints.Arch, []string{arch.AMD64, arch.ARM64, arch.I386, arch.PPC64EL}) return validator, nil } @@ -1416,7 +1446,7 @@ inst.mu.Lock() defer inst.mu.Unlock() // TODO(perrito666) add a provider status -> juju status mapping. - jujuStatus := status.StatusPending + jujuStatus := status.Pending if inst.status != "" { dummyStatus := status.Status(inst.status) if dummyStatus.KnownInstanceStatus() { @@ -1568,6 +1598,6 @@ return nil, errors.NotSupportedf("container address allocation") } -func (e *environ) ReleaseContainerAddresses(interfaces []network.InterfaceInfo) error { +func (e *environ) ReleaseContainerAddresses(interfaces []network.ProviderInterfaceInfo) error { return errors.NotSupportedf("container address allocation") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/config.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -71,6 +71,18 @@ return fields } +// ConfigSchema returns extra config attributes specific +// to this provider only. +func (p environProvider) ConfigSchema() schema.Fields { + return configFields +} + +// ConfigDefaults returns the default values for the +// provider specific config attributes. +func (p environProvider) ConfigDefaults() schema.Defaults { + return configDefaults +} + func validateConfig(cfg, old *config.Config) (*environConfig, error) { // Check for valid changes for the base config values. if err := config.Validate(cfg, old); err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/config_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -31,11 +31,6 @@ var _ = gc.Suite(&ConfigSuite{}) -var configTestRegion = aws.Region{ - Name: "configtest", - EC2Endpoint: "testregion.nowhere:1234", -} - var testAuth = aws.Auth{ AccessKey: "gopher", SecretKey: "long teeth", @@ -318,13 +313,11 @@ err = utils.SetHome(home) c.Assert(err, jc.ErrorIsNil) - aws.Regions["configtest"] = configTestRegion } func (s *ConfigSuite) TearDownTest(c *gc.C) { err := utils.SetHome(s.savedHome) c.Assert(err, jc.ErrorIsNil) - delete(aws.Regions, "configtest") s.BaseSuite.TearDownTest(c) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/credentials.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,6 +15,7 @@ "gopkg.in/ini.v1" "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" ) type environProviderCredentials struct{} @@ -126,3 +127,8 @@ user: accessKeyCredential, }}, nil } + +// FinalizeCredential is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) FinalizeCredential(_ environs.FinalizeCredentialContext, args environs.FinalizeCredentialParams) (*cloud.Credential, error) { + return &args.Credential, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/ebs.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/ebs.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/ebs.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/ebs.go 2016-10-13 14:31:49.000000000 +0000 @@ -41,13 +41,18 @@ EBS_Encrypted = "encrypted" volumeTypeMagnetic = "magnetic" // standard - volumeTypeSsd = "ssd" // gp2 + volumeTypeSSD = "ssd" // gp2 volumeTypeProvisionedIops = "provisioned-iops" // io1 volumeTypeStandard = "standard" - volumeTypeGp2 = "gp2" - volumeTypeIo1 = "io1" + volumeTypeGP2 = "gp2" + volumeTypeIO1 = "io1" rootDiskDeviceName = "/dev/sda1" + + // defaultControllerDiskSizeMiB is the default size for the + // root disk of controller machines, if no root-disk constraint + // is specified. + defaultControllerDiskSizeMiB = 32 * 1024 ) // AWS error codes @@ -78,11 +83,11 @@ // maxMagneticVolumeSizeGiB is the maximum size for magnetic volumes in GiB. maxMagneticVolumeSizeGiB = 1024 - // minSsdVolumeSizeGiB is the minimum size for SSD volumes in GiB. - minSsdVolumeSizeGiB = 1 + // minSSDVolumeSizeGiB is the minimum size for SSD volumes in GiB. + minSSDVolumeSizeGiB = 1 - // maxSsdVolumeSizeGiB is the maximum size for SSD volumes in GiB. - maxSsdVolumeSizeGiB = 16 * 1024 + // maxSSDVolumeSizeGiB is the maximum size for SSD volumes in GiB. + maxSSDVolumeSizeGiB = 16 * 1024 // minProvisionedIopsVolumeSizeGiB is the minimum size of provisioned IOPS // volumes in GiB. @@ -115,8 +120,8 @@ var deviceInUseRegexp = regexp.MustCompile(".*Attachment point .* is already in use") // StorageProviderTypes implements storage.ProviderRegistry. -func (env *environ) StorageProviderTypes() []storage.ProviderType { - return []storage.ProviderType{EBS_ProviderType} +func (env *environ) StorageProviderTypes() ([]storage.ProviderType, error) { + return []storage.ProviderType{EBS_ProviderType}, nil } // StorageProvider implements storage.ProviderRegistry. @@ -137,11 +142,11 @@ var ebsConfigFields = schema.Fields{ EBS_VolumeType: schema.OneOf( schema.Const(volumeTypeMagnetic), - schema.Const(volumeTypeSsd), + schema.Const(volumeTypeSSD), schema.Const(volumeTypeProvisionedIops), schema.Const(volumeTypeStandard), - schema.Const(volumeTypeGp2), - schema.Const(volumeTypeIo1), + schema.Const(volumeTypeGP2), + schema.Const(volumeTypeIO1), ), EBS_IOPS: schema.ForceInt(), EBS_Encrypted: schema.Bool(), @@ -178,15 +183,15 @@ switch ebsConfig.volumeType { case volumeTypeMagnetic: ebsConfig.volumeType = volumeTypeStandard - case volumeTypeSsd: - ebsConfig.volumeType = volumeTypeGp2 + case volumeTypeSSD: + ebsConfig.volumeType = volumeTypeGP2 case volumeTypeProvisionedIops: - ebsConfig.volumeType = volumeTypeIo1 + ebsConfig.volumeType = volumeTypeIO1 } - if ebsConfig.iops > 0 && ebsConfig.volumeType != volumeTypeIo1 { + if ebsConfig.iops > 0 && ebsConfig.volumeType != volumeTypeIO1 { return nil, errors.Errorf("IOPS specified, but volume type is %q", volumeType) - } else if ebsConfig.iops == 0 && ebsConfig.volumeType == volumeTypeIo1 { - return nil, errors.Errorf("volume type is %q, IOPS unspecified or zero", volumeTypeIo1) + } else if ebsConfig.iops == 0 && ebsConfig.volumeType == volumeTypeIO1 { + return nil, errors.Errorf("volume type is %q, IOPS unspecified or zero", volumeTypeIO1) } return ebsConfig, nil } @@ -215,7 +220,7 @@ // DefaultPools is defined on the Provider interface. func (e *ebsProvider) DefaultPools() []*storage.Config { ssdPool, _ := storage.NewConfig("ebs-ssd", EBS_ProviderType, map[string]interface{}{ - EBS_VolumeType: volumeTypeSsd, + EBS_VolumeType: volumeTypeSSD, }) return []*storage.Config{ssdPool} } @@ -579,10 +584,10 @@ case volumeTypeStandard: minVolumeSize = minMagneticVolumeSizeGiB maxVolumeSize = maxMagneticVolumeSizeGiB - case volumeTypeGp2: - minVolumeSize = minSsdVolumeSizeGiB - maxVolumeSize = maxSsdVolumeSizeGiB - case volumeTypeIo1: + case volumeTypeGP2: + minVolumeSize = minSSDVolumeSizeGiB + maxVolumeSize = maxSSDVolumeSizeGiB + case volumeTypeIO1: minVolumeSize = minProvisionedIopsVolumeSizeGiB maxVolumeSize = maxProvisionedIopsVolumeSizeGiB } @@ -869,24 +874,32 @@ } } -func minRootDiskSizeMiB(ser string) uint64 { - return gibToMib(common.MinRootDiskSizeGiB(ser)) +func minRootDiskSizeMiB(series string) uint64 { + return gibToMib(common.MinRootDiskSizeGiB(series)) } // getBlockDeviceMappings translates constraints into BlockDeviceMappings. // // The first entry is always the root disk mapping, followed by instance // stores (ephemeral disks). -func getBlockDeviceMappings(cons constraints.Value, ser string) []ec2.BlockDeviceMapping { - rootDiskSizeMiB := minRootDiskSizeMiB(ser) +func getBlockDeviceMappings( + cons constraints.Value, + series string, + controller bool, +) []ec2.BlockDeviceMapping { + minRootDiskSizeMiB := minRootDiskSizeMiB(series) + rootDiskSizeMiB := minRootDiskSizeMiB + if controller { + rootDiskSizeMiB = defaultControllerDiskSizeMiB + } if cons.RootDisk != nil { - if *cons.RootDisk >= minRootDiskSizeMiB(ser) { + if *cons.RootDisk >= minRootDiskSizeMiB { rootDiskSizeMiB = *cons.RootDisk } else { logger.Infof( - "Ignoring root-disk constraint of %dM because it is smaller than the EC2 image size of %dM", + "Ignoring root-disk constraint of %dM because it is smaller than the minimum size %dM", *cons.RootDisk, - minRootDiskSizeMiB(ser), + minRootDiskSizeMiB, ) } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/ebs_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/ebs_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/ebs_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/ebs_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -38,8 +38,8 @@ testing.BaseSuite // TODO(axw) the EBS tests should not be embedding jujutest.Tests. jujutest.Tests - srv localServer - restoreEC2Patching func() + srv localServer + client *awsec2.EC2 instanceId string } @@ -56,7 +56,6 @@ "secret-key": "x", }, ) - s.CloudRegion = "test" // Upload arches that ec2 supports; add to this // as ec2 coverage expands. @@ -66,7 +65,6 @@ "secret-key": "x", "region": "test", }) - s.restoreEC2Patching = patchEC2ForTesting(c) s.BaseSuite.PatchValue(&imagemetadata.SimplestreamsImagesPublicKey, sstesting.SignedMetadataPublicKey) s.BaseSuite.PatchValue(&keys.JujuPublicKey, sstesting.SignedMetadataPublicKey) imagetesting.PatchOfficialDataSources(&s.BaseSuite.CleanupSuite, "test:") @@ -74,7 +72,6 @@ } func (s *ebsSuite) TearDownSuite(c *gc.C) { - s.restoreEC2Patching() s.Tests.TearDownSuite(c) s.BaseSuite.TearDownSuite(c) } @@ -87,6 +84,13 @@ s.srv.startServer(c) s.Tests.SetUpTest(c) s.PatchValue(&ec2.DestroyVolumeAttempt.Delay, time.Duration(0)) + + region := s.srv.region() + s.CloudRegion = region.Name + s.CloudEndpoint = region.EC2Endpoint + s.client = s.srv.client() + restoreEC2Patching := patchEC2ForTesting(c, region) + s.AddCleanup(func(c *gc.C) { restoreEC2Patching() }) } func (s *ebsSuite) TearDownTest(c *gc.C) { @@ -430,7 +434,7 @@ s.srv.ec2srv.NewInstances(1, "m1.medium", imageId, ec2test.Pending, nil) // Tag the root disk with the model UUID. - _, err := s.srv.client.CreateTags([]string{"vol-0"}, []awsec2.Tag{ + _, err := s.client.CreateTags([]string{"vol-0"}, []awsec2.Tag{ {tags.JujuModel, s.TestConfig["uuid"].(string)}, }) c.Assert(err, jc.ErrorIsNil) @@ -724,12 +728,32 @@ } func (*blockDeviceMappingSuite) TestGetBlockDeviceMappings(c *gc.C) { - mapping := ec2.GetBlockDeviceMappings(constraints.Value{}, "trusty") + mapping := ec2.GetBlockDeviceMappings(constraints.Value{}, "trusty", false) c.Assert(mapping, gc.DeepEquals, []awsec2.BlockDeviceMapping{{ VolumeSize: 8, DeviceName: "/dev/sda1", }, { VirtualName: "ephemeral0", + DeviceName: "/dev/sdb", + }, { + VirtualName: "ephemeral1", + DeviceName: "/dev/sdc", + }, { + VirtualName: "ephemeral2", + DeviceName: "/dev/sdd", + }, { + VirtualName: "ephemeral3", + DeviceName: "/dev/sde", + }}) +} + +func (*blockDeviceMappingSuite) TestGetBlockDeviceMappingsController(c *gc.C) { + mapping := ec2.GetBlockDeviceMappings(constraints.Value{}, "trusty", true) + c.Assert(mapping, gc.DeepEquals, []awsec2.BlockDeviceMapping{{ + VolumeSize: 32, + DeviceName: "/dev/sda1", + }, { + VirtualName: "ephemeral0", DeviceName: "/dev/sdb", }, { VirtualName: "ephemeral1", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/environ.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,10 +14,9 @@ "github.com/juju/errors" "github.com/juju/retry" "github.com/juju/utils" - "github.com/juju/utils/arch" "github.com/juju/utils/clock" + "gopkg.in/amz.v3/aws" "gopkg.in/amz.v3/ec2" - "gopkg.in/amz.v3/s3" "gopkg.in/juju/names.v2" "github.com/juju/juju/cloudconfig/instancecfg" @@ -25,13 +24,13 @@ "github.com/juju/juju/constraints" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" - "github.com/juju/juju/environs/imagemetadata" "github.com/juju/juju/environs/instances" "github.com/juju/juju/environs/simplestreams" "github.com/juju/juju/environs/tags" "github.com/juju/juju/instance" "github.com/juju/juju/network" "github.com/juju/juju/provider/common" + "github.com/juju/juju/provider/ec2/internal/ec2instancetypes" "github.com/juju/juju/tools" ) @@ -60,13 +59,6 @@ name string cloud environs.CloudSpec ec2 *ec2.EC2 - s3 *s3.S3 - - // archMutex gates access to supportedArchitectures - archMutex sync.Mutex - // supportedArchitectures caches the architectures - // for which images can be instantiated. - supportedArchitectures []string // ecfgMutex protects the *Unlocked fields below. ecfgMutex sync.Mutex @@ -74,6 +66,10 @@ availabilityZonesMutex sync.Mutex availabilityZones []common.AvailabilityZone + + defaultVPCMutex sync.Mutex + defaultVPCChecked bool + defaultVPC *ec2.VPC } func (e *environ) Config() *config.Config { @@ -131,32 +127,14 @@ return nil } -func (env *environ) validateVPC(logInfof func(string, ...interface{}), badge string) error { - return nil -} - // Bootstrap is part of the Environ interface. func (e *environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) { return common.Bootstrap(ctx, e, args) } -func (e *environ) getSupportedArchitectures() ([]string, error) { - e.archMutex.Lock() - defer e.archMutex.Unlock() - if e.supportedArchitectures != nil { - return e.supportedArchitectures, nil - } - // Create a filter to get all images from our region and for the correct stream. - cloudSpec, err := e.Region() - if err != nil { - return nil, err - } - imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{ - CloudSpec: cloudSpec, - Stream: e.Config().ImageStream(), - }) - e.supportedArchitectures, err = common.SupportedArchitectures(e, imageConstraint) - return e.supportedArchitectures, err +// BootstrapMessage is part of the Environ interface. +func (e *environ) BootstrapMessage() string { + return "" } // SupportsSpaces is specified on environs.Networking. @@ -181,15 +159,14 @@ validator := constraints.NewValidator() validator.RegisterConflicts( []string{constraints.InstanceType}, - []string{constraints.Mem, constraints.CpuCores, constraints.CpuPower}) + []string{constraints.Mem, constraints.Cores, constraints.CpuPower}) validator.RegisterUnsupported(unsupportedConstraints) - supportedArches, err := e.getSupportedArchitectures() + instanceTypes, err := e.supportedInstanceTypes() if err != nil { - return nil, err + return nil, errors.Trace(err) } - validator.RegisterVocabulary(constraints.Arch, supportedArches) - instTypeNames := make([]string, len(allInstanceTypes)) - for i, itype := range allInstanceTypes { + instTypeNames := make([]string, len(instanceTypes)) + for i, itype := range instanceTypes { instTypeNames[i] = itype.Name } validator.RegisterVocabulary(constraints.InstanceType, instTypeNames) @@ -299,7 +276,11 @@ return nil } // Constraint has an instance-type constraint so let's see if it is valid. - for _, itype := range allInstanceTypes { + instanceTypes, err := e.supportedInstanceTypes() + if err != nil { + return errors.Trace(err) + } + for _, itype := range instanceTypes { if itype.Name != *cons.InstanceType { continue } @@ -315,34 +296,32 @@ // MetadataLookupParams returns parameters which are used to query simplestreams metadata. func (e *environ) MetadataLookupParams(region string) (*simplestreams.MetadataLookupParams, error) { + var endpoint string if region == "" { region = e.cloud.Region - } - cloudSpec, err := e.cloudSpec(region) - if err != nil { - return nil, err + endpoint = e.cloud.Endpoint + } else { + // TODO(axw) 2016-10-04 #1630089 + // MetadataLookupParams needs to be updated so that providers + // are not expected to know how to map regions to endpoints. + ec2Region, ok := aws.Regions[region] + if !ok { + return nil, errors.Errorf("unknown region %q", region) + } + endpoint = ec2Region.EC2Endpoint } return &simplestreams.MetadataLookupParams{ - Series: config.PreferredSeries(e.ecfg()), - Region: cloudSpec.Region, - Endpoint: cloudSpec.Endpoint, - Architectures: arch.AllSupportedArches, + Series: config.PreferredSeries(e.ecfg()), + Region: region, + Endpoint: endpoint, }, nil } // Region is specified in the HasRegion interface. func (e *environ) Region() (simplestreams.CloudSpec, error) { - return e.cloudSpec(e.cloud.Region) -} - -func (e *environ) cloudSpec(region string) (simplestreams.CloudSpec, error) { - ec2Region, ok := allRegions[region] - if !ok { - return simplestreams.CloudSpec{}, fmt.Errorf("unknown region %q", region) - } return simplestreams.CloudSpec{ - Region: region, - Endpoint: ec2Region.EC2Endpoint, + Region: e.cloud.Region, + Endpoint: e.cloud.Endpoint, }, nil } @@ -423,13 +402,23 @@ arches := args.Tools.Arches() - spec, err := findInstanceSpec(args.ImageMetadata, &instances.InstanceConstraint{ - Region: e.cloud.Region, - Series: args.InstanceConfig.Series, - Arches: arches, - Constraints: args.Constraints, - Storage: []string{ssdStorage, ebsStorage}, - }) + instanceTypes, err := e.supportedInstanceTypes() + if err != nil { + return nil, errors.Trace(err) + } + + spec, err := findInstanceSpec( + args.InstanceConfig.Controller != nil, + args.ImageMetadata, + instanceTypes, + &instances.InstanceConstraint{ + Region: e.cloud.Region, + Series: args.InstanceConfig.Series, + Arches: arches, + Constraints: args.Constraints, + Storage: []string{ssdStorage, ebsStorage}, + }, + ) if err != nil { return nil, err } @@ -465,7 +454,11 @@ return nil, errors.Annotate(err, "cannot set up groups") } - blockDeviceMappings := getBlockDeviceMappings(args.Constraints, args.InstanceConfig.Series) + blockDeviceMappings := getBlockDeviceMappings( + args.Constraints, + args.InstanceConfig.Series, + args.InstanceConfig.Controller != nil, + ) rootDiskSize := uint64(blockDeviceMappings[0].VolumeSize) * 1024 // If --constraints spaces=foo was passed, the provisioner will populate @@ -574,7 +567,7 @@ cfg := e.Config() tags := tags.ResourceTags( names.NewModelTag(cfg.UUID()), - names.NewModelTag(args.ControllerUUID), + names.NewControllerTag(args.ControllerUUID), cfg, ) tags[tagName] = instanceName + "-root" @@ -1398,7 +1391,6 @@ } var deleteSecurityGroupInsistently = func(inst SecurityGroupCleaner, group ec2.SecurityGroup, clock clock.Clock) error { - var lastErr error err := retry.Call(retry.CallArgs{ Attempts: 30, Delay: time.Second, @@ -1408,17 +1400,17 @@ Func: func() error { _, err := inst.DeleteSecurityGroup(group) if err == nil || isNotFoundError(err) { + logger.Debugf("deleting security group %q", group.Name) return nil } return errors.Trace(err) }, NotifyFunc: func(err error, attempt int) { - lastErr = err - logger.Infof(fmt.Sprintf("deleting security group %q, attempt %d", group.Name, attempt)) + logger.Debugf("deleting security group %q, attempt %d", group.Name, attempt) }, }) if err != nil { - return errors.Annotatef(lastErr, "cannot delete security group %q: consider deleting it manually", group.Name) + return errors.Annotatef(err, "cannot delete security group %q: consider deleting it manually", group.Name) } return nil } @@ -1548,7 +1540,7 @@ cfg := e.Config() tags := tags.ResourceTags( names.NewModelTag(cfg.UUID()), - names.NewModelTag(controllerUUID), + names.NewControllerTag(controllerUUID), cfg, ) if err := tagResources(e.ec2, tags, g.Id); err != nil { @@ -1726,6 +1718,50 @@ return nil, errors.NotSupportedf("container address allocation") } -func (e *environ) ReleaseContainerAddresses(interfaces []network.InterfaceInfo) error { +func (e *environ) ReleaseContainerAddresses(interfaces []network.ProviderInterfaceInfo) error { return errors.NotSupportedf("container address allocation") } + +func (e *environ) supportedInstanceTypes() ([]instances.InstanceType, error) { + allInstanceTypes := ec2instancetypes.RegionInstanceTypes(e.cloud.Region) + if isVPCIDSet(e.ecfg().vpcID()) { + return allInstanceTypes, nil + } + hasDefaultVPC, err := e.hasDefaultVPC() + if err != nil { + return nil, errors.Trace(err) + } + if hasDefaultVPC { + return allInstanceTypes, nil + } + + // The region has no default VPC, and the user has not specified + // one to use. We filter out any instance types that are not + // supported in EC2-Classic. + supportedInstanceTypes := make([]instances.InstanceType, 0, len(allInstanceTypes)) + for _, instanceType := range allInstanceTypes { + if !ec2instancetypes.SupportsClassic(instanceType.Name) { + continue + } + supportedInstanceTypes = append(supportedInstanceTypes, instanceType) + } + return supportedInstanceTypes, nil +} + +func (e *environ) hasDefaultVPC() (bool, error) { + e.defaultVPCMutex.Lock() + defer e.defaultVPCMutex.Unlock() + if !e.defaultVPCChecked { + filter := ec2.NewFilter() + filter.Add("isDefault", "true") + resp, err := e.ec2.VPCs(nil, filter) + if err != nil { + return false, errors.Trace(err) + } + if len(resp.VPCs) > 0 { + e.defaultVPC = &resp.VPCs[0] + } + e.defaultVPCChecked = true + } + return e.defaultVPC != nil, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/environ_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/environ_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/environ_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/environ_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "github.com/juju/juju/constraints" "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" "github.com/juju/juju/environs/simplestreams" "github.com/juju/juju/instance" "github.com/juju/juju/network" @@ -18,6 +19,7 @@ // Ensure EC2 provider supports the expected interfaces, var ( _ environs.NetworkingEnviron = (*environ)(nil) + _ config.ConfigSchemaSource = (*environProvider)(nil) _ simplestreams.HasRegion = (*environ)(nil) _ state.Prechecker = (*environ)(nil) _ instance.Distributor = (*environ)(nil) @@ -94,7 +96,7 @@ for _, t := range rootDiskTests { c.Logf("Test %s", t.name) cons := constraints.Value{RootDisk: t.constraint} - mappings := getBlockDeviceMappings(cons, t.series) + mappings := getBlockDeviceMappings(cons, t.series, false) expected := append([]amzec2.BlockDeviceMapping{t.device}, commonInstanceStoreDisks...) c.Assert(mappings, gc.DeepEquals, expected) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/export_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,17 +4,15 @@ package ec2 import ( - "io" + "strings" "gopkg.in/amz.v3/aws" "gopkg.in/amz.v3/ec2" - "gopkg.in/amz.v3/s3" gc "gopkg.in/check.v1" "github.com/juju/juju/environs" "github.com/juju/juju/environs/imagemetadata" sstesting "github.com/juju/juju/environs/simplestreams/testing" - "github.com/juju/juju/environs/storage" "github.com/juju/juju/instance" jujustorage "github.com/juju/juju/storage" ) @@ -59,19 +57,6 @@ const VPCIDNone = vpcIDNone -// BucketStorage returns a storage instance addressing -// an arbitrary s3 bucket. -func BucketStorage(b *s3.Bucket) storage.Storage { - return &ec2storage{ - bucket: b, - } -} - -// DeleteBucket deletes the s3 bucket used by the storage instance. -func DeleteBucket(s storage.Storage) error { - return deleteBucket(s.(*ec2storage)) -} - // TODO: Apart from overriding different hardcoded hosts, these two test helpers are identical. Let's share. // UseTestImageData causes the given content to be served @@ -84,27 +69,8 @@ } } -func UseTestRegionData(content map[string]aws.Region) { - if content != nil { - allRegions = content - } else { - allRegions = aws.Regions - } -} - -// UseTestInstanceTypeData causes the given instance type -// cost data to be served for the "test" region. -func UseTestInstanceTypeData(content instanceTypeCost) { - if content != nil { - allRegionCosts["test"] = content - } else { - delete(allRegionCosts, "test") - } -} - var ( ShortAttempt = &shortAttempt - StorageAttempt = &storageAttempt DestroyVolumeAttempt = &destroyVolumeAttempt DeleteSecurityGroupInsistently = &deleteSecurityGroupInsistently TerminateInstancesById = &terminateInstancesById @@ -127,18 +93,6 @@ return newi } -// Access non exported methods on ec2.storage -type Storage interface { - Put(file string, r io.Reader, length int64) error - ResetMadeBucket() -} - -func (s *ec2storage) ResetMadeBucket() { - s.Lock() - defer s.Unlock() - s.madeBucket = false -} - func makeImage(id, storage, virtType, arch, version, region string) *imagemetadata.ImageMetadata { return &imagemetadata.ImageMetadata{ Id: id, @@ -155,12 +109,12 @@ var TestImageMetadata = []*imagemetadata.ImageMetadata{ // LTS-dependent requires new entries upon new LTS release. // 16.04:amd64 - makeImage("ami-00000133", "ssd", "pv", "amd64", "16.04", "test"), - makeImage("ami-00000139", "ebs", "pv", "amd64", "16.04", "test"), - makeImage("ami-00000135", "ssd", "hvm", "amd64", "16.04", "test"), + makeImage("ami-00000133", "ssd", "hvm", "amd64", "16.04", "test"), + makeImage("ami-00000139", "ebs", "hvm", "amd64", "16.04", "test"), + makeImage("ami-00000135", "ssd", "pv", "amd64", "16.04", "test"), // 14.04:amd64 - makeImage("ami-00000033", "ssd", "pv", "amd64", "14.04", "test"), + makeImage("ami-00000033", "ssd", "hvm", "amd64", "14.04", "test"), // 14.04:i386 makeImage("ami-00000034", "ssd", "pv", "i386", "14.04", "test"), @@ -169,43 +123,52 @@ makeImage("ami-01000035", "ssd", "hvm", "amd64", "12.10", "test"), // 12.10:i386 - makeImage("ami-01000034", "ssd", "pv", "i386", "12.10", "test"), + makeImage("ami-01000034", "ssd", "hvm", "i386", "12.10", "test"), // 13.04:i386 - makeImage("ami-02000034", "ssd", "pv", "i386", "13.04", "test"), + makeImage("ami-02000034", "ssd", "hvm", "i386", "13.04", "test"), + makeImage("ami-02000035", "ssd", "pv", "i386", "13.04", "test"), } -var TestImagesData = map[string]string{ - // LTS-dependent requires new/updated entries upon new LTS release. - "/streams/v1/index.json": ` - { - "index": { - "com.ubuntu.cloud:released": { - "updated": "Wed, 01 May 2013 13:31:26 +0000", - "clouds": [ - { - "region": "test", - "endpoint": "https://ec2.endpoint.com" - } - ], - "cloudname": "aws", - "datatype": "image-ids", - "format": "products:1.0", - "products": [ - "com.ubuntu.cloud:server:16.04:amd64", - "com.ubuntu.cloud:server:14.04:amd64", - "com.ubuntu.cloud:server:14.04:i386", - "com.ubuntu.cloud:server:12.10:i386", - "com.ubuntu.cloud:server:13.04:i386" - ], - "path": "streams/v1/com.ubuntu.cloud:released:aws.json" - } - }, - "updated": "Wed, 01 May 2013 13:31:26 +0000", - "format": "index:1.0" - } -`, - "/streams/v1/com.ubuntu.cloud:released:aws.json": ` +func MakeTestImageStreamsData(region aws.Region) map[string]string { + testImageMetadataIndex := strings.Replace(testImageMetadataIndex, "$REGION", region.Name, -1) + testImageMetadataIndex = strings.Replace(testImageMetadataIndex, "$ENDPOINT", region.EC2Endpoint, -1) + return map[string]string{ + "/streams/v1/index.json": testImageMetadataIndex, + "/streams/v1/com.ubuntu.cloud:released:aws.json": testImageMetadataProduct, + } +} + +// LTS-dependent requires new/updated entries upon new LTS release. +const testImageMetadataIndex = ` +{ + "index": { + "com.ubuntu.cloud:released": { + "updated": "Wed, 01 May 2013 13:31:26 +0000", + "clouds": [ + { + "region": "$REGION", + "endpoint": "$ENDPOINT" + } + ], + "cloudname": "aws", + "datatype": "image-ids", + "format": "products:1.0", + "products": [ + "com.ubuntu.cloud:server:16.04:amd64", + "com.ubuntu.cloud:server:14.04:amd64", + "com.ubuntu.cloud:server:14.04:i386", + "com.ubuntu.cloud:server:12.10:i386", + "com.ubuntu.cloud:server:13.04:i386" + ], + "path": "streams/v1/com.ubuntu.cloud:released:aws.json" + } + }, + "updated": "Wed, 01 May 2013 13:31:26 +0000", + "format": "index:1.0" +} +` +const testImageMetadataProduct = ` { "content_id": "com.ubuntu.cloud:released:aws", "products": { @@ -273,7 +236,7 @@ "items": { "test1peebs": { "root_store": "ssd", - "virt": "pv", + "virt": "hvm", "region": "test", "id": "ami-00000033" } @@ -392,27 +355,4 @@ }, "format": "products:1.0" } -`, -} - -var TestInstanceTypeCosts = instanceTypeCost{ - "m1.small": 60, - "m1.medium": 120, - "m1.large": 240, - "m1.xlarge": 480, - "m3.medium": 95, - "m3.large": 190, - "m3.xlarge": 385, - "m3.2xlarge": 765, - "t1.micro": 20, - "c1.medium": 145, - "c1.xlarge": 580, - "cc2.8xlarge": 2400, -} - -var TestRegions = map[string]aws.Region{ - "test": { - Name: "test", - EC2Endpoint: "https://ec2.endpoint.com", - }, -} +` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/image.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/image.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/image.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/image.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,18 +4,11 @@ package ec2 import ( - "fmt" - + "github.com/juju/juju/constraints" "github.com/juju/juju/environs/imagemetadata" "github.com/juju/juju/environs/instances" ) -// defaultCpuPower is larger the smallest instance's cpuPower, and no larger than -// any other instance type's cpuPower. It is used when no explicit CpuPower -// constraint exists, preventing the smallest instance from being chosen unless -// the user has clearly indicated that they are willing to accept poor performance. -const defaultCpuPower = 100 - // filterImages returns only that subset of the input (in the same order) that // this provider finds suitable. func filterImages(images []*imagemetadata.ImageMetadata, ic *instances.InstanceConstraint) []*imagemetadata.ImageMetadata { @@ -44,35 +37,53 @@ // findInstanceSpec returns an InstanceSpec satisfying the supplied instanceConstraint. func findInstanceSpec( + controller bool, allImageMetadata []*imagemetadata.ImageMetadata, + instanceTypes []instances.InstanceType, ic *instances.InstanceConstraint, ) (*instances.InstanceSpec, error) { logger.Debugf("received %d image(s)", len(allImageMetadata)) - // If the instance type is set, don't also set a default CPU power - // as this is implied. - cons := ic.Constraints - if cons.CpuPower == nil && (cons.InstanceType == nil || *cons.InstanceType == "") { - ic.Constraints.CpuPower = instances.CpuPower(defaultCpuPower) + if controller { + ic.Constraints = withDefaultControllerConstraints(ic.Constraints) + } else { + ic.Constraints = withDefaultNonControllerConstraints(ic.Constraints) } suitableImages := filterImages(allImageMetadata, ic) logger.Debugf("found %d suitable image(s)", len(suitableImages)) images := instances.ImageMetadataToImages(suitableImages) + return instances.FindInstanceSpec(images, ic, instanceTypes) +} - // Make a copy of the known EC2 instance types, filling in the cost for the specified region. - regionCosts := allRegionCosts[ic.Region] - if len(regionCosts) == 0 && len(allRegionCosts) > 0 { - return nil, fmt.Errorf("no instance types found in %s", ic.Region) +// withDefaultControllerConstraints returns the given constraints, +// updated to choose a default instance type appropriate for a +// controller machine. We use this only if the user does not specify +// any constraints that would otherwise control the instance type +// selection. +// +// At the time of writing, this will choose +// - t2.medium, for VPC +// - m3.medium, for EC2-Classic +func withDefaultControllerConstraints(cons constraints.Value) constraints.Value { + if !cons.HasInstanceType() && !cons.HasCpuCores() && !cons.HasCpuPower() && !cons.HasMem() { + var mem uint64 = 3.75 * 1024 + cons.Mem = &mem } + return cons +} - var itypesWithCosts []instances.InstanceType - for _, itype := range allInstanceTypes { - cost, ok := regionCosts[itype.Name] - if !ok { - continue - } - itWithCost := itype - itWithCost.Cost = cost - itypesWithCosts = append(itypesWithCosts, itWithCost) +// withDefaultNonControllerConstraints returns the given constraints, +// updated to choose a default instance type appropriate for a +// non-controller machine. We use this only if the user does not +// specify an instance-type, or cpu-power. +// +// At the time of writing, this will choose the cheapest non-burstable +// instance available in the account/region. At the time of writing, that +// is, for example: +// - m3.medium (for EC2-Classic) +// - c4.large (e.g. in ap-south-1) +func withDefaultNonControllerConstraints(cons constraints.Value) constraints.Value { + if !cons.HasInstanceType() && !cons.HasCpuPower() { + cons.CpuPower = instances.CpuPower(100) } - return instances.FindInstanceSpec(images, ic, itypesWithCosts) + return cons } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/image_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/image_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/image_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/image_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,10 +12,8 @@ "github.com/juju/juju/constraints" "github.com/juju/juju/environs/imagemetadata" - imagetesting "github.com/juju/juju/environs/imagemetadata/testing" "github.com/juju/juju/environs/instances" - sstesting "github.com/juju/juju/environs/simplestreams/testing" - "github.com/juju/juju/juju/keys" + "github.com/juju/juju/provider/ec2/internal/ec2instancetypes" "github.com/juju/juju/testing" ) @@ -23,28 +21,6 @@ type specSuite struct { testing.BaseSuite - sstesting.TestDataSuite -} - -func (s *specSuite) SetUpSuite(c *gc.C) { - s.BaseSuite.SetUpSuite(c) - s.TestDataSuite.SetUpSuite(c) - - imagetesting.PatchOfficialDataSources(&s.CleanupSuite, "test:") - s.PatchValue(&imagemetadata.SimplestreamsImagesPublicKey, sstesting.SignedMetadataPublicKey) - s.PatchValue(&keys.JujuPublicKey, sstesting.SignedMetadataPublicKey) - - UseTestImageData(c, TestImagesData) - UseTestInstanceTypeData(TestInstanceTypeCosts) - UseTestRegionData(TestRegions) -} - -func (s *specSuite) TearDownSuite(c *gc.C) { - UseTestInstanceTypeData(nil) - UseTestImageData(c, nil) - UseTestRegionData(nil) - s.TestDataSuite.TearDownSuite(c) - s.BaseSuite.TearDownSuite(c) } var findInstanceSpecTests = []struct { @@ -64,19 +40,19 @@ }, { series: "quantal", arches: []string{"i386"}, - itype: "c1.medium", + itype: "c3.large", image: "ami-01000034", }, { series: "xenial", arches: []string{"amd64"}, - cons: "cpu-cores=4", - itype: "m3.xlarge", + cons: "cores=4", + itype: "c4.xlarge", image: "ami-00000133", }, { series: "xenial", arches: []string{"amd64"}, cons: "mem=10G", - itype: "m3.xlarge", + itype: "r3.large", image: "ami-00000133", }, { series: "xenial", @@ -94,14 +70,14 @@ series: "xenial", arches: []string{"amd64"}, cons: "cpu-power=800", - itype: "m3.xlarge", + itype: "c4.large", image: "ami-00000133", }, { series: "xenial", arches: []string{"amd64"}, - cons: "instance-type=m1.medium cpu-power=200", + cons: "instance-type=m1.medium cpu-power=100", itype: "m1.medium", - image: "ami-00000133", + image: "ami-00000135", }, { series: "xenial", arches: []string{"amd64"}, @@ -112,21 +88,21 @@ series: "xenial", arches: []string{"amd64"}, cons: "mem=4G root-disk=16384M", - itype: "m3.large", + itype: "m4.large", storage: []string{"ssd", "ebs"}, image: "ami-00000133", }, { series: "xenial", arches: []string{"amd64"}, cons: "mem=4G root-disk=16384M", - itype: "m3.large", + itype: "m4.large", storage: []string{"ebs", "ssd"}, image: "ami-00000139", }, { series: "xenial", arches: []string{"amd64"}, cons: "mem=4G root-disk=16384M", - itype: "m3.large", + itype: "m4.large", storage: []string{"ebs"}, image: "ami-00000139", }, { @@ -137,17 +113,17 @@ }, { series: "quantal", arches: []string{"i386"}, - itype: "c1.medium", + itype: "c3.large", image: "ami-01000034", }, { series: "quantal", - arches: both, + arches: []string{"amd64", "i386"}, cons: "arch=amd64", - itype: "cc2.8xlarge", + itype: "m3.medium", image: "ami-01000035", }, { series: "quantal", - arches: both, + arches: []string{"amd64", "i386"}, cons: "instance-type=cc2.8xlarge", itype: "cc2.8xlarge", image: "ami-01000035", @@ -174,7 +150,9 @@ c, TestImageMetadata, test.series, test.arches, ) spec, err := findInstanceSpec( + false, // non-controller imageMetadata, + ec2instancetypes.RegionInstanceTypes("test"), &instances.InstanceConstraint{ Region: "test", Series: test.series, @@ -197,7 +175,12 @@ } c.Check(instanceConstraint.Constraints.CpuPower, gc.IsNil) - findInstanceSpec(TestImageMetadata, instanceConstraint) + findInstanceSpec( + false, // non-controller + TestImageMetadata, + ec2instancetypes.RegionInstanceTypes("test"), + instanceConstraint, + ) c.Check(instanceConstraint.Constraints.CpuPower, gc.IsNil) } @@ -214,9 +197,9 @@ err: fmt.Sprintf(`no "%s" images in test with arches \[arm\]`, series.LatestLts()), }, { series: "raring", - arches: both, + arches: []string{"amd64", "i386"}, cons: "mem=4G", - err: `no "raring" images in test matching instance types \[m3.large m3.xlarge c1.xlarge m3.2xlarge cc2.8xlarge\]`, + err: `no "raring" images in test matching instance types \[.*\]`, }, { series: series.LatestLts(), arches: []string{"amd64"}, @@ -235,7 +218,9 @@ c, TestImageMetadata, t.series, t.arches, ) _, err := findInstanceSpec( + false, // non-controller imageMetadata, + ec2instancetypes.RegionInstanceTypes("test"), &instances.InstanceConstraint{ Region: "test", Series: t.series, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -32,16 +32,16 @@ func (inst *ec2Instance) Status() instance.InstanceStatus { // pending | running | shutting-down | terminated | stopping | stopped - jujuStatus := status.StatusPending + jujuStatus := status.Pending switch inst.State.Name { case "pending": - jujuStatus = status.StatusPending + jujuStatus = status.Pending case "running": - jujuStatus = status.StatusRunning + jujuStatus = status.Running case "shutting-down", "terminated", "stopping", "stopped": - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty default: - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty } return instance.InstanceStatus{ Status: jujuStatus, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/instancetype.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/instancetype.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/instancetype.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/instancetype.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,998 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ec2 - -import ( - "gopkg.in/amz.v3/aws" - - "github.com/juju/juju/environs/instances" - "github.com/juju/utils/arch" -) - -var ( - // Type of virtualisation used. - paravirtual = "pv" - hvm = "hvm" - - // all instance types can run amd64 images, and some can also run - // i386 ones. - amd64 = []string{arch.AMD64} - both = []string{arch.AMD64, arch.I386} -) - -// allRegions is defined here to allow tests to override the content. -var allRegions = aws.Regions - -// allInstanceTypes holds the relevant attributes of every known -// instance type. -// -// Note that while the EC2 root disk default is 8G, constraints on -// disk for amazon will simply cause the root disk to grow to match -// the constraint -var allInstanceTypes = []instances.InstanceType{ - { // General purpose, 1st generation. m1.* instance types are deprecated - // and should only be used if explicitly requested by name. - Name: "m1.small", - Arches: both, - CpuCores: 1, - CpuPower: instances.CpuPower(100), - Mem: 1740, - VirtType: ¶virtual, - Deprecated: true, - }, { - Name: "m1.medium", - Arches: both, - CpuCores: 1, - CpuPower: instances.CpuPower(200), - Mem: 3840, - VirtType: ¶virtual, - Deprecated: true, - }, { - Name: "m1.large", - Arches: amd64, - CpuCores: 2, - CpuPower: instances.CpuPower(400), - Mem: 7680, - VirtType: ¶virtual, - Deprecated: true, - }, { - Name: "m1.xlarge", - Arches: amd64, - CpuCores: 4, - CpuPower: instances.CpuPower(800), - Mem: 15360, - VirtType: ¶virtual, - Deprecated: true, - }, - // M4 instances are the latest generation of General Purpose - // Instances. This family provides a balance of compute, memory, - // and network resources, and it is a good choice for many - // applications. - { - Name: "m4.large", - Arches: amd64, - CpuCores: 2, - CpuPower: instances.CpuPower(650), - Mem: 8192, - VirtType: &hvm, - }, - { - Name: "m4.xlarge", - Arches: amd64, - CpuCores: 4, - CpuPower: instances.CpuPower(1300), - Mem: 16384, - VirtType: &hvm, - }, - { - Name: "m4.2xlarge", - Arches: amd64, - CpuCores: 8, - CpuPower: instances.CpuPower(2600), - Mem: 32768, - VirtType: &hvm, - }, - { - Name: "m4.4xlarge", - Arches: amd64, - CpuCores: 16, - CpuPower: instances.CpuPower(5350), - Mem: 65536, - VirtType: &hvm, - }, - { - Name: "m4.10xlarge", - Arches: amd64, - CpuCores: 40, - CpuPower: instances.CpuPower(12450), - Mem: 163840, - VirtType: &hvm, - }, - - { // General purpose, 2nd generation. - Name: "m3.medium", - Arches: amd64, - CpuCores: 1, - CpuPower: instances.CpuPower(300), - Mem: 3840, - VirtType: ¶virtual, - }, { - Name: "m3.large", - Arches: amd64, - CpuCores: 2, - CpuPower: instances.CpuPower(650), - Mem: 7680, - VirtType: ¶virtual, - }, { - Name: "m3.xlarge", - Arches: amd64, - CpuCores: 4, - CpuPower: instances.CpuPower(1300), - Mem: 15360, - VirtType: ¶virtual, - }, { - Name: "m3.2xlarge", - Arches: amd64, - CpuCores: 8, - CpuPower: instances.CpuPower(2600), - Mem: 30720, - VirtType: ¶virtual, - }, - - { // Compute-optimized, 1st generation. - Name: "c1.medium", - Arches: both, - CpuCores: 2, - CpuPower: instances.CpuPower(500), - Mem: 1740, - VirtType: ¶virtual, - }, { - Name: "c1.xlarge", - Arches: amd64, - CpuCores: 8, - CpuPower: instances.CpuPower(2000), - Mem: 7168, - VirtType: ¶virtual, - }, { - Name: "cc2.8xlarge", - Arches: amd64, - CpuCores: 16, - CpuPower: instances.CpuPower(8800), - Mem: 61952, - VirtType: &hvm, - }, - - { // Compute-optimized, 2nd generation. - Name: "c3.large", - Arches: amd64, - CpuCores: 2, - CpuPower: instances.CpuPower(700), - Mem: 3840, - VirtType: ¶virtual, - }, { - Name: "c3.xlarge", - Arches: amd64, - CpuCores: 4, - CpuPower: instances.CpuPower(1400), - Mem: 7680, - VirtType: ¶virtual, - }, { - Name: "c3.2xlarge", - Arches: amd64, - CpuCores: 8, - CpuPower: instances.CpuPower(2800), - Mem: 15360, - VirtType: ¶virtual, - }, { - Name: "c3.4xlarge", - Arches: amd64, - CpuCores: 16, - CpuPower: instances.CpuPower(5500), - Mem: 30720, - VirtType: ¶virtual, - }, { - Name: "c3.8xlarge", - Arches: amd64, - CpuCores: 32, - CpuPower: instances.CpuPower(10800), - Mem: 61440, - VirtType: ¶virtual, - }, - - { // GPU instances, 1st generation. - Name: "cg1.4xlarge", - Arches: amd64, - CpuCores: 8, - CpuPower: instances.CpuPower(3350), - Mem: 22528, - VirtType: &hvm, - }, - - { // GPU instances, 2nd generation. - Name: "g2.2xlarge", - Arches: amd64, - CpuCores: 8, - CpuPower: instances.CpuPower(2600), - Mem: 15360, - VirtType: &hvm, - }, - - { // Memory-optimized, 1st generation. - Name: "m2.xlarge", - Arches: amd64, - CpuCores: 2, - CpuPower: instances.CpuPower(650), - Mem: 17408, - VirtType: ¶virtual, - }, { - Name: "m2.2xlarge", - Arches: amd64, - CpuCores: 4, - CpuPower: instances.CpuPower(1300), - Mem: 34816, - VirtType: ¶virtual, - }, { - Name: "m2.4xlarge", - Arches: amd64, - CpuCores: 8, - CpuPower: instances.CpuPower(2600), - Mem: 69632, - VirtType: ¶virtual, - }, { - Name: "cr1.8xlarge", - Arches: amd64, - CpuCores: 16, - CpuPower: instances.CpuPower(8800), - Mem: 249856, - VirtType: &hvm, - }, - - { // Memory-optimized, 2nd generation. - Name: "r3.large", - Arches: amd64, - CpuCores: 2, - CpuPower: instances.CpuPower(650), - Mem: 15616, - VirtType: &hvm, - }, { - Name: "r3.xlarge", - Arches: amd64, - CpuCores: 4, - CpuPower: instances.CpuPower(1300), - Mem: 31232, - VirtType: &hvm, - }, { - Name: "r3.2xlarge", - Arches: amd64, - CpuCores: 8, - CpuPower: instances.CpuPower(2600), - Mem: 62464, - VirtType: &hvm, - }, { - Name: "r3.4xlarge", - Arches: amd64, - CpuCores: 16, - CpuPower: instances.CpuPower(5200), - Mem: 124928, - VirtType: &hvm, - }, { - Name: "r3.8xlarge", - Arches: amd64, - CpuCores: 32, - CpuPower: instances.CpuPower(10400), - Mem: 249856, - VirtType: &hvm, - }, - - { // Storage-optimized, 1st generation. - Name: "hi1.4xlarge", - Arches: amd64, - CpuCores: 16, - CpuPower: instances.CpuPower(3500), - Mem: 61952, - VirtType: ¶virtual, - }, - - { // Storage-optimized, 2nd generation. - Name: "i2.xlarge", - Arches: amd64, - CpuCores: 4, - CpuPower: instances.CpuPower(1400), - Mem: 31232, - VirtType: &hvm, - }, { - Name: "i2.2xlarge", - Arches: amd64, - CpuCores: 8, - CpuPower: instances.CpuPower(2700), - Mem: 62464, - VirtType: &hvm, - }, { - Name: "i2.4xlarge", - Arches: amd64, - CpuCores: 16, - CpuPower: instances.CpuPower(5300), - Mem: 124928, - VirtType: &hvm, - }, { - Name: "i2.8xlarge", - Arches: amd64, - CpuCores: 32, - CpuPower: instances.CpuPower(10400), - Mem: 249856, - VirtType: &hvm, - }, { - Name: "hs1.8xlarge", - Arches: amd64, - CpuCores: 16, - CpuPower: instances.CpuPower(3500), - Mem: 119808, - VirtType: ¶virtual, - }, - - { // Tiny-weeny. - Name: "t1.micro", - Arches: both, - CpuCores: 1, - // Burstable baseline is 20% - CpuPower: instances.CpuPower(20), - Mem: 613, - VirtType: ¶virtual, - }, - - { // General Purpose, 3rd generation. - Name: "t2.micro", - Arches: amd64, - CpuCores: 1, - Mem: 1024, - // Burstable baseline is 10% (from http://aws.amazon.com/ec2/faqs/#burst) - CpuPower: instances.CpuPower(10), - VirtType: &hvm, - }, - { // General Purpose, 3rd generation. - Name: "t2.small", - Arches: amd64, - CpuCores: 1, - Mem: 2048, - // Burstable baseline is 20% (from http://aws.amazon.com/ec2/faqs/#burst) - CpuPower: instances.CpuPower(20), - VirtType: &hvm, - }, - { // General Purpose, 3rd generation. - Name: "t2.medium", - Arches: amd64, - CpuCores: 2, - Mem: 4096, - // Burstable baseline is 40% (from http://aws.amazon.com/ec2/faqs/#burst) - CpuPower: instances.CpuPower(40), - VirtType: &hvm, - }, - - { // Compute-optimized, 3rd generation. - Name: "c4.large", - Arches: amd64, - CpuCores: 2, - Mem: 3840, - CpuPower: instances.CpuPower(800), - VirtType: &hvm, - }, { - Name: "c4.xlarge", - Arches: amd64, - CpuCores: 4, - Mem: 7680, - CpuPower: instances.CpuPower(1600), - VirtType: &hvm, - }, { - Name: "c4.2xlarge", - Arches: amd64, - CpuCores: 8, - Mem: 15360, - CpuPower: instances.CpuPower(3100), - VirtType: &hvm, - }, { - Name: "c4.4xlarge", - Arches: amd64, - CpuCores: 16, - Mem: 30720, - CpuPower: instances.CpuPower(6200), - VirtType: &hvm, - }, { - Name: "c4.8xlarge", - Arches: amd64, - // The source of this information at http://aws.amazon.com/ec2/instance-types/ - CpuCores: 36, - Mem: 61440, - CpuPower: instances.CpuPower(13200), - VirtType: &hvm, - }, -} - -type instanceTypeCost map[string]uint64 -type regionCosts map[string]instanceTypeCost - -// allRegionCosts holds the cost in USDe-3/hour for each available instance -// type in each region. -var allRegionCosts = regionCosts{ - "ap-northeast-1": { // Tokyo. - "m1.small": 61, - "m1.medium": 122, - "m1.large": 243, - "m1.xlarge": 486, - - "m3.medium": 101, - "m3.large": 203, - "m3.xlarge": 405, - "m3.2xlarge": 810, - - "m4.large": 174, - "m4.xlarge": 348, - "m4.2xlarge": 695, - "m4.4xlarge": 1391, - "m4.10xlarge": 3477, - - "c1.medium": 158, - "c1.xlarge": 632, - - "cc2.8xlarge": 2349, - - "c3.large": 128, - "c3.xlarge": 255, - "c3.2xlarge": 511, - "c3.4xlarge": 1021, - "c3.8xlarge": 2043, - - "g2.2xlarge": 898, - - "m2.xlarge": 287, - "m2.2xlarge": 575, - "m2.4xlarge": 1150, - - "cr1.8xlarge": 4105, - - "r3.large": 210, - "r3.xlarge": 420, - "r3.2xlarge": 840, - "r3.4xlarge": 1680, - "r3.8xlarge": 3360, - - "hi1.4xlarge": 3276, - - "i2.xlarge": 1001, - "i2.2xlarge": 2001, - "i2.4xlarge": 4002, - "i2.8xlarge": 8004, - - "hs1.8xlarge": 5400, - - "t1.micro": 26, - - "t2.micro": 20, - "t2.small": 40, - "t2.medium": 80, - - "c4.large": 147, - "c4.xlarge": 294, - "c4.2xlarge": 588, - "c4.4xlarge": 1176, - "c4.8xlarge": 2352, - }, - "ap-northeast-2": { // Seoul. - "t2.nano": 10, - "t2.micro": 20, - "t2.small": 40, - "t2.medium": 80, - "t2.large": 160, - - "m4.large": 165, - "m4.xlarge": 331, - "m4.2xlarge": 660, - "m4.4xlarge": 1321, - "m4.10xlarge": 3303, - - "c4.large": 120, - "c4.xlarge": 239, - "c4.2xlarge": 478, - "c4.4xlarge": 955, - "c4.8xlarge": 1910, - - "r3.large": 200, - "r3.xlarge": 399, - "r3.2xlarge": 798, - "r3.4xlarge": 1596, - "r3.8xlarge": 3192, - - "i2.xlarge": 1001, - "i2.2xlarge": 2001, - "i2.4xlarge": 4002, - "i2.8xlarge": 8004, - // TODO(gz): Add d2 types below per lp:1535838 - //"d2.xlarge": 844, - //"d2.2xlarge": 1688, - //"d2.4xlarge": 3376, - //"d2.8xlarge": 6752, - }, - "ap-southeast-1": { // Singapore. - "m1.small": 58, - "m1.medium": 117, - "m1.large": 233, - "m1.xlarge": 467, - - "m3.medium": 98, - "m3.large": 196, - "m3.xlarge": 392, - "m3.2xlarge": 784, - - "m4.large": 178, - "m4.xlarge": 355, - "m4.2xlarge": 711, - "m4.4xlarge": 1421, - "m4.10xlarge": 3553, - - "c1.medium": 164, - "c1.xlarge": 655, - - "c3.large": 132, - "c3.xlarge": 265, - "c3.2xlarge": 529, - "c3.4xlarge": 1058, - "c3.8xlarge": 2117, - - "m2.xlarge": 296, - "m2.2xlarge": 592, - "m2.4xlarge": 1183, - - "r3.large": 210, - "r3.xlarge": 420, - "r3.2xlarge": 840, - "r3.4xlarge": 1680, - "r3.8xlarge": 3360, - - "i2.xlarge": 1018, - "i2.2xlarge": 2035, - "i2.4xlarge": 4070, - "i2.8xlarge": 8140, - - "hs1.8xlarge": 5570, - - "t1.micro": 20, - - "t2.micro": 20, - "t2.small": 40, - "t2.medium": 80, - - "c4.large": 152, - "c4.xlarge": 304, - "c4.2xlarge": 608, - "c4.4xlarge": 1216, - "c4.8xlarge": 2432, - - "g2.2xlarge": 1000, - }, - "ap-southeast-2": { // Sydney. - "m1.small": 58, - "m1.medium": 117, - "m1.large": 233, - "m1.xlarge": 467, - - "m3.medium": 98, - "m3.large": 196, - "m3.xlarge": 392, - "m3.2xlarge": 784, - - "m4.large": 168, - "m4.xlarge": 336, - "m4.2xlarge": 673, - "m4.4xlarge": 1345, - "m4.10xlarge": 3363, - - "c1.medium": 164, - "c1.xlarge": 655, - - "c3.large": 132, - "c3.xlarge": 265, - "c3.2xlarge": 529, - "c3.4xlarge": 1058, - "c3.8xlarge": 2117, - - "m2.xlarge": 296, - "m2.2xlarge": 592, - "m2.4xlarge": 1183, - - "r3.large": 210, - "r3.xlarge": 420, - "r3.2xlarge": 840, - "r3.4xlarge": 1680, - "r3.8xlarge": 3360, - - "i2.xlarge": 1018, - "i2.2xlarge": 2035, - "i2.4xlarge": 4070, - "i2.8xlarge": 8140, - - "hs1.8xlarge": 5570, - - "t1.micro": 20, - - "t2.micro": 20, - "t2.small": 40, - "t2.medium": 80, - - "c4.large": 152, - "c4.xlarge": 304, - "c4.2xlarge": 608, - "c4.4xlarge": 1216, - "c4.8xlarge": 2432, - - "g2.2xlarge": 898, - }, - "eu-west-1": { // Ireland. - "m1.small": 47, - "m1.medium": 95, - "m1.large": 190, - "m1.xlarge": 379, - - "m3.medium": 77, - "m3.large": 154, - "m3.xlarge": 308, - "m3.2xlarge": 616, - - "m4.large": 132, - "m4.xlarge": 264, - "m4.2xlarge": 528, - "m4.4xlarge": 1056, - "m4.10xlarge": 2641, - - "c1.medium": 148, - "c1.xlarge": 592, - - "cc2.8xlarge": 2250, - - "c3.large": 120, - "c3.xlarge": 239, - "c3.2xlarge": 478, - "c3.4xlarge": 956, - "c3.8xlarge": 1912, - - "cg1.4xlarge": 2360, - - "g2.2xlarge": 702, - - "m2.xlarge": 275, - "m2.2xlarge": 550, - "m2.4xlarge": 1100, - - "cr1.8xlarge": 3750, - - "r3.large": 195, - "r3.xlarge": 390, - "r3.2xlarge": 780, - "r3.4xlarge": 1560, - "r3.8xlarge": 3120, - - "hi1.4xlarge": 3100, - - "i2.xlarge": 938, - "i2.2xlarge": 1876, - "i2.4xlarge": 3751, - "i2.8xlarge": 7502, - - "hs1.8xlarge": 4900, - - "t1.micro": 20, - - "t2.micro": 14, - "t2.small": 28, - "t2.medium": 56, - - "c4.large": 132, - "c4.xlarge": 264, - "c4.2xlarge": 528, - "c4.4xlarge": 1056, - "c4.8xlarge": 2112, - }, - "sa-east-1": { // Sao Paulo. - "m1.small": 58, - "m1.medium": 117, - "m1.large": 233, - "m1.xlarge": 467, - - "m3.medium": 95, - "m3.large": 190, - "m3.xlarge": 381, - "m3.2xlarge": 761, - - "c1.medium": 179, - "c1.xlarge": 718, - - "m2.xlarge": 323, - "m2.2xlarge": 645, - "m2.4xlarge": 1291, - - "t1.micro": 27, - - "t2.micro": 27, - "t2.small": 54, - "t2.medium": 108, - - "c3.large": 163, - "c3.xlarge": 325, - "c3.2xlarge": 650, - "c3.4xlarge": 1300, - "c3.8xlarge": 2600, - }, - "us-east-1": { // Northern Virginia. - "m1.small": 44, - "m1.medium": 87, - "m1.large": 175, - "m1.xlarge": 350, - - "m3.medium": 70, - "m3.large": 140, - "m3.xlarge": 280, - "m3.2xlarge": 560, - - "m4.large": 120, - "m4.xlarge": 239, - "m4.2xlarge": 479, - "m4.4xlarge": 958, - "m4.10xlarge": 2394, - - "c1.medium": 130, - "c1.xlarge": 520, - - "cc2.8xlarge": 2000, - - "c3.large": 105, - "c3.xlarge": 210, - "c3.2xlarge": 420, - "c3.4xlarge": 840, - "c3.8xlarge": 1680, - - "cg1.4xlarge": 2100, - - "g2.2xlarge": 650, - - "m2.xlarge": 245, - "m2.2xlarge": 490, - "m2.4xlarge": 980, - - "cr1.8xlarge": 3500, - - "r3.large": 175, - "r3.xlarge": 350, - "r3.2xlarge": 700, - "r3.4xlarge": 1400, - "r3.8xlarge": 2800, - - "hi1.4xlarge": 3100, - - "i2.xlarge": 853, - "i2.2xlarge": 1705, - "i2.4xlarge": 3410, - "i2.8xlarge": 6820, - - "hs1.8xlarge": 4600, - - "t1.micro": 20, - - "t2.micro": 13, - "t2.small": 26, - "t2.medium": 52, - - "c4.large": 116, - "c4.xlarge": 232, - "c4.2xlarge": 464, - "c4.4xlarge": 928, - "c4.8xlarge": 1856, - }, - "us-west-1": { // Northern California. - "m1.small": 47, - "m1.medium": 95, - "m1.large": 190, - "m1.xlarge": 379, - - "m3.medium": 77, - "m3.large": 154, - "m3.xlarge": 308, - "m3.2xlarge": 616, - - "m4.large": 140, - "m4.xlarge": 279, - "m4.2xlarge": 559, - "m4.4xlarge": 1117, - "m4.10xlarge": 2793, - - "c1.medium": 148, - "c1.xlarge": 592, - - "c3.large": 120, - "c3.xlarge": 239, - "c3.2xlarge": 478, - "c3.4xlarge": 956, - "c3.8xlarge": 1912, - - "g2.2xlarge": 702, - - "m2.xlarge": 275, - "m2.2xlarge": 550, - "m2.4xlarge": 1100, - - "r3.large": 195, - "r3.xlarge": 390, - "r3.2xlarge": 780, - "r3.4xlarge": 1560, - "r3.8xlarge": 3120, - - "i2.xlarge": 938, - "i2.2xlarge": 1876, - "i2.4xlarge": 3751, - "i2.8xlarge": 7502, - - "t1.micro": 25, - - "t2.micro": 17, - "t2.small": 34, - "t2.medium": 68, - - "c4.large": 138, - "c4.xlarge": 276, - "c4.2xlarge": 552, - "c4.4xlarge": 1104, - "c4.8xlarge": 2208, - }, - "us-west-2": { // Oregon. - "m1.small": 44, - "m1.medium": 87, - "m1.large": 175, - "m1.xlarge": 350, - - "m3.medium": 70, - "m3.large": 140, - "m3.xlarge": 280, - "m3.2xlarge": 560, - - "m4.large": 120, - "m4.xlarge": 239, - "m4.2xlarge": 479, - "m4.4xlarge": 958, - "m4.10xlarge": 2394, - - "c1.medium": 130, - "c1.xlarge": 520, - - "cc2.8xlarge": 2000, - - "c3.large": 105, - "c3.xlarge": 210, - "c3.2xlarge": 420, - "c3.4xlarge": 840, - "c3.8xlarge": 1680, - - "g2.2xlarge": 650, - - "m2.xlarge": 245, - "m2.2xlarge": 490, - "m2.4xlarge": 980, - "cr1.8xlarge": 3500, - - "r3.large": 175, - "r3.xlarge": 350, - "r3.2xlarge": 700, - "r3.4xlarge": 1400, - "r3.8xlarge": 2800, - - "hi1.4xlarge": 3100, - - "i2.xlarge": 853, - "i2.2xlarge": 1705, - "i2.4xlarge": 3410, - "i2.8xlarge": 6820, - - "hs1.8xlarge": 4600, - - "t1.micro": 20, - - "t2.micro": 13, - "t2.small": 26, - "t2.medium": 52, - - "c4.large": 116, - "c4.xlarge": 232, - "c4.2xlarge": 464, - "c4.4xlarge": 928, - "c4.8xlarge": 1856, - }, - "eu-central-1": { // Frankfurt. - "t2.micro": 15, - "t2.small": 30, - "t2.medium": 60, - "t2.large": 120, - - "m3.medium": 79, - "m3.large": 158, - "m3.xlarge": 315, - "m3.2xlarge": 632, - - "m4.large": 143, - "m4.xlarge": 285, - "m4.2xlarge": 570, - "m4.4xlarge": 1140, - "m4.10xlarge": 2850, - - "c3.large": 129, - "c3.xlarge": 258, - "c3.2xlarge": 516, - "c3.4xlarge": 1032, - "c3.8xlarge": 2064, - - "c4.large": 134, - "c4.xlarge": 267, - "c4.2xlarge": 534, - "c4.4xlarge": 1069, - "c4.8xlarge": 2138, - - "r3.large": 200, - "r3.xlarge": 420, - "r3.2xlarge": 800, - "r3.4xlarge": 1600, - "r3.8xlarge": 3201, - - "i2.xlarge": 1013, - "i2.2xlarge": 2026, - "i2.4xlarge": 4051, - "i2.8xlarge": 8102, - }, - "us-gov-west-1": { // Isolated region - US GovCloud. - "t2.micro": 15, - "t2.small": 31, - "t2.medium": 62, - - "m3.medium": 84, - "m3.large": 168, - "m3.xlarge": 336, - "m3.2xlarge": 672, - - "c3.large": 126, - "c3.xlarge": 252, - "c3.2xlarge": 504, - "c3.4xlarge": 1008, - "c3.8xlarge": 2016, - - "r3.large": 210, - "r3.xlarge": 420, - "r3.2xlarge": 840, - "r3.4xlarge": 1680, - "r3.8xlarge": 3360, - - "i2.xlarge": 1023, - "i2.2xlarge": 2046, - "i2.4xlarge": 4092, - "i2.8xlarge": 8184, - - "hs1.8xlarge": 5520, - }, - "cn-north-1": { // Isolated region - China, Beijing. - // Instance type information is from - // http://www.amazonaws.cn/en/ec2/details/ - // TODO (anastasiamac 2015-03-05): - // To compensate for lack of pricing, we - // are putting in arbitrary values that reflect the relative costs. - // This is justified by the fact that, in Juju, we use instance-by-region cost - // to identify least expensive machine with desired constraints. - "t1.micro": 1, - - "m1.small": 2, - - "m3.medium": 3, - "m3.large": 5, - "m3.xlarge": 7, - "m3.2xlarge": 9, - - "c3.large": 4, - "c3.xlarge": 6, - "c3.2xlarge": 8, - "c3.4xlarge": 10, - "c3.8xlarge": 11, - }, -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/doc.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/doc.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/doc.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,11 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package ec2instancetypes contains instance type information +// for the ec2 provider, generated from the AWS Price List API. +// +// To update this package, first fetch index.json to this +// directory, and then run "go generate". The current index.json +// file can be found at: +// https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json +package ec2instancetypes diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/generated.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/generated.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/generated.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/generated.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,7325 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ec2instancetypes + +import ( + "github.com/juju/utils/arch" + + "github.com/juju/juju/environs/instances" +) + +var ( + paravirtual = "pv" + hvm = "hvm" + amd64 = []string{arch.AMD64} + both = []string{arch.AMD64, arch.I386} +) + +// Version: 20160901005907 +// Publication date: 2016-09-01 00:59:07 +0000 UTC +// +// This pricing list is for informational purposes only. All prices are subject to the additional terms included in the pricing pages on http://aws.amazon.com. All Free Tier prices are also subject to the terms included at https://aws.amazon.com/free/ + +var allInstanceTypes = map[string][]instances.InstanceType{ + + "ap-northeast-1": { + + // SKU: 4BJPFU3PAZJ4AKMM + // Instance family: General purpose + // Storage: 4 x 420 + { + Name: "m1.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 15360, + VirtType: ¶virtual, + Cost: 486, + Deprecated: true, + }, + + // SKU: 4GHFAT5CNS2FEKB2 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 32768, + VirtType: &hvm, + Cost: 695, + }, + + // SKU: 4REMK3MMXCZ55ZX3 + // Instance family: Storage optimized + // Storage: 8 x 800 SSD + { + Name: "i2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 8004, + }, + + // SKU: 6JP9PA73B58NZHUN + // Instance family: Storage optimized + // Storage: 12 x 2000 HDD + { + Name: "d2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 124928, + VirtType: &hvm, + Cost: 3376, + }, + + // SKU: 6M27QQ6HYCNA5KGA + // Instance family: General purpose + // Storage: 1 x 4 SSD + { + Name: "m3.medium", + Arches: amd64, + CpuCores: 1, + CpuPower: instances.CpuPower(350), + Mem: 3840, + VirtType: &hvm, + Cost: 96, + }, + + // SKU: 6TMC6UD2UCCDAMNP + // Instance family: General purpose + // Storage: 2 x 420 + { + Name: "m1.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 7680, + VirtType: ¶virtual, + Cost: 243, + Deprecated: true, + }, + + // SKU: 77K4UJJUNGQ6UXXN + // Instance family: GPU instance + // Storage: 1 x 60 SSD + { + Name: "g2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2911), + Mem: 15360, + VirtType: &hvm, + Cost: 898, + }, + + // SKU: 7A24VVDQEZ54FYXU + // Instance family: Storage optimized + // Storage: 6 x 2000 HDD + { + Name: "d2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 62464, + VirtType: &hvm, + Cost: 1688, + }, + + // SKU: 7KXQBZSKETPTG6QZ + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 133, + }, + + // SKU: 7MYWT7Y96UT3NJ2D + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(672), + Mem: 8192, + VirtType: &hvm, + Cost: 174, + }, + + // SKU: 8H36QJ2PHPR3SJ48 + // Instance family: Storage optimized + // Storage: 24 x 2000 + { + Name: "hs1.8xlarge", + Arches: amd64, + CpuCores: 17, + CpuPower: instances.CpuPower(4760), + Mem: 119808, + VirtType: &hvm, + Cost: 5400, + Deprecated: true, + }, + + // SKU: 9KMZWGZVTXKAQXNM + // Instance family: Memory optimized + // Storage: 1 x 160 SSD + { + Name: "r3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 798, + }, + + // SKU: 9NSP3EV3G593P35X + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 20, + }, + + // SKU: AKQ89V8E78T6H534 + // Instance family: General purpose + // Storage: 1 x 160 + { + Name: "m1.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 1741, + VirtType: ¶virtual, + Cost: 61, + Deprecated: true, + }, + + // SKU: AY6XZ64M22QQJCXE + // Instance family: General purpose + // Storage: 1 x 32 SSD + { + Name: "m3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 7680, + VirtType: &hvm, + Cost: 193, + }, + + // SKU: BQQUCAM9PFTSUNQX + // Instance family: Memory optimized + // Storage: 2 x 840 + { + Name: "m2.4xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 70042, + VirtType: ¶virtual, + Cost: 1150, + Deprecated: true, + }, + + // SKU: BURRP7APFUCZFSZK + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 16384, + VirtType: &hvm, + Cost: 348, + }, + + // SKU: BYV8H4R4VJNAH42Q + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 1596, + }, + + // SKU: CTK39QJHQN4CZ3PC + // Instance family: GPU instance + // Storage: 2 x 120 SSD + { + Name: "g2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61440, + VirtType: &hvm, + Cost: 3592, + }, + + // SKU: DDX2JPPMM28BXD7D + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 3192, + }, + + // SKU: E3J2T7B8EQDFXWDR + // Instance family: Compute optimized + // Storage: 2 x 320 SSD + { + Name: "c3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(12543), + Mem: 61440, + VirtType: &hvm, + Cost: 2043, + }, + + // SKU: E5MWNHYU3BAVZCRP + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 1061, + }, + + // SKU: E6F66FZ47YZNXAJ2 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 80, + }, + + // SKU: ERVWZ4V3UBYH4NQH + // Instance family: Micro instances + // Storage: EBS only + { + Name: "t1.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 628, + VirtType: ¶virtual, + Cost: 26, + }, + + // SKU: EZCSGZJ8PMXA2QF2 + // Instance family: Storage optimized + // Storage: 1 x 800 SSD + { + Name: "i2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 1000, + }, + + // SKU: F2RRJYX33EGMBSFR + // Instance family: General purpose + // Storage: 1 x 410 + { + Name: "m1.medium", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 3840, + VirtType: ¶virtual, + Cost: 122, + Deprecated: true, + }, + + // SKU: F7XCNBBYFKX42QF3 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 10, + }, + + // SKU: FBUWUPNC8FXRUS5W + // Instance family: Storage optimized + // Storage: 4 x 800 SSD + { + Name: "i2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 4002, + }, + + // SKU: G6G6ZNFBYMW2V8BH + // Instance family: Memory optimized + // Storage: 1 x 420 + { + Name: "m2.xlarge", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 17511, + VirtType: ¶virtual, + Cost: 287, + Deprecated: true, + }, + + // SKU: GJHUHQSUU37VCQ5A + // Instance family: Memory optimized + // Storage: 1 x 80 SSD + { + Name: "r3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 399, + }, + + // SKU: GP8GQA2T96JQ4MBB + // Instance family: Memory optimized + // Storage: 2 x 120 SSD + { + Name: "cr1.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(3200), + Mem: 249856, + VirtType: &hvm, + Cost: 4105, + Deprecated: true, + }, + + // SKU: HTNXMK8Z5YHMU737 + // Instance family: Compute optimized + // Storage: 2 x 40 SSD + { + Name: "c3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1567), + Mem: 7680, + VirtType: &hvm, + Cost: 255, + }, + + // SKU: J85A5X44TT267EH8 + // Instance family: General purpose + // Storage: 2 x 40 SSD + { + Name: "m3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 15360, + VirtType: &hvm, + Cost: 385, + }, + + // SKU: JTQKHD7ZTEEM4DC5 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.10xlarge", + Arches: amd64, + CpuCores: 40, + CpuPower: instances.CpuPower(13440), + Mem: 163840, + VirtType: &hvm, + Cost: 3477, + }, + + // SKU: KM8DYQWHEC32CGGX + // Instance family: Storage optimized + // Storage: 2 x 800 SSD + { + Name: "i2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 2001, + }, + + // SKU: M3G65XHCPFQHAQD5 + // Instance family: Storage optimized + // Storage: 2 x 1024 SSD + { + Name: "hi1.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 61952, + VirtType: &hvm, + Cost: 3276, + Deprecated: true, + }, + + // SKU: MJ7YVW9J2WD856AC + // Instance family: Memory optimized + // Storage: 2 x 1,920 + { + Name: "x1.32xlarge", + Arches: amd64, + CpuCores: 128, + CpuPower: instances.CpuPower(41216), + Mem: 1998848, + VirtType: &hvm, + Cost: 19341, + }, + + // SKU: PCB5ARVZ6TNS7A96 + // Instance family: General purpose + // Storage: 2 x 80 SSD + { + Name: "m3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 30720, + VirtType: &hvm, + Cost: 770, + }, + + // SKU: PCNBVATW49APFGZQ + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 2122, + }, + + // SKU: PSF2TQK8WMUGUPYK + // Instance family: Storage optimized + // Storage: 24 x 2000 HDD + { + Name: "d2.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(12096), + Mem: 249856, + VirtType: &hvm, + Cost: 6752, + }, + + // SKU: PTSCWYT4DGMHMSYG + // Instance family: Compute optimized + // Storage: 1 x 350 + { + Name: "c1.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 1741, + VirtType: ¶virtual, + Cost: 158, + Deprecated: true, + }, + + // SKU: Q4QTSF7H37JFW9ER + // Instance family: Compute optimized + // Storage: 2 x 16 SSD + { + Name: "c3.large", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(783), + Mem: 3840, + VirtType: &hvm, + Cost: 128, + }, + + // SKU: Q5HVB8NUA7UMHV4Y + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 160, + }, + + // SKU: Q85F79PK8VHHZT6X + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 531, + }, + + // SKU: UJB452HW969DQZFB + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 265, + }, + + // SKU: UMV7384WFS5N9T5F + // Instance family: Memory optimized + // Storage: 1 x 850 + { + Name: "m2.2xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 35021, + VirtType: ¶virtual, + Cost: 575, + Deprecated: true, + }, + + // SKU: URZU4GXQC7AT6RE9 + // Instance family: Compute optimized + // Storage: 4 x 420 + { + Name: "c1.xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 7168, + VirtType: ¶virtual, + Cost: 632, + Deprecated: true, + }, + + // SKU: VWWQ7S3GZ9J8TF77 + // Instance family: Storage optimized + // Storage: 3 x 2000 HDD + { + Name: "d2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 31232, + VirtType: &hvm, + Cost: 844, + }, + + // SKU: XJ88E6MSR3AYHFXA + // Instance family: Compute optimized + // Storage: 2 x 160 SSD + { + Name: "c3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6271), + Mem: 30720, + VirtType: &hvm, + Cost: 1020, + }, + + // SKU: XU2NYYPCRTK4T7CN + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 65536, + VirtType: &hvm, + Cost: 1391, + }, + + // SKU: YCYU3NQCQRYQ2TU7 + // Instance family: Memory optimized + // Storage: 1 x 32 SSD + { + Name: "r3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 15616, + VirtType: &hvm, + Cost: 200, + }, + + // SKU: YJ2E4JTYGN2FMNQM + // Instance family: Compute optimized + // Storage: 4 x 840 + { + Name: "cc2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61952, + VirtType: &hvm, + Cost: 2349, + Deprecated: true, + }, + + // SKU: YR67H6NVBRN37HRZ + // Instance family: Compute optimized + // Storage: 2 x 80 SSD + { + Name: "c3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3135), + Mem: 15360, + VirtType: &hvm, + Cost: 511, + }, + + // SKU: YUYNTU8AZ9MKK68V + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 40, + }, + }, + + "ap-northeast-2": { + + // SKU: 3MBNRY22Y6A2W6WY + // Instance family: General purpose + // Storage: 1 x 4 SSD + { + Name: "m3.medium", + Arches: amd64, + CpuCores: 1, + CpuPower: instances.CpuPower(350), + Mem: 3840, + VirtType: &hvm, + Cost: 91, + }, + + // SKU: 3UWMR4BVSMJ3PTQ5 + // Instance family: Storage optimized + // Storage: 1 x 800 SSD + { + Name: "i2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 1000, + }, + + // SKU: 45D7HY2M47KUYJXR + // Instance family: Compute optimized + // Storage: 2 x 320 SSD + { + Name: "c3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(12543), + Mem: 61440, + VirtType: &hvm, + Cost: 1839, + }, + + // SKU: 5CB9VHZSJWQTZN3W + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 120, + }, + + // SKU: 5RC27Y2XYGFJVP7K + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 16384, + VirtType: &hvm, + Cost: 331, + }, + + // SKU: 65JJWWKAHFAMNF85 + // Instance family: Memory optimized + // Storage: 2 x 1,920 + { + Name: "x1.32xlarge", + Arches: amd64, + CpuCores: 128, + CpuPower: instances.CpuPower(41216), + Mem: 1998848, + VirtType: &hvm, + Cost: 19341, + }, + + // SKU: 6K25ZNG5NAXQC5AB + // Instance family: GPU instance + // Storage: 1 x 60 SSD + { + Name: "g2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2911), + Mem: 15360, + VirtType: &hvm, + Cost: 898, + }, + + // SKU: 6NSPY3BTJRX47KWG + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 65536, + VirtType: &hvm, + Cost: 1321, + }, + + // SKU: 6TYE4QER4XX5TSC5 + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 3192, + }, + + // SKU: 6U3CMEPEAZEVZSV8 + // Instance family: Memory optimized + // Storage: 1 x 32 SSD + { + Name: "r3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 15616, + VirtType: &hvm, + Cost: 200, + }, + + // SKU: 7GTTQXNXREPMU7WY + // Instance family: General purpose + // Storage: 2 x 40 SSD + { + Name: "m3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 15360, + VirtType: &hvm, + Cost: 366, + }, + + // SKU: 7MQ7AMJWV8BPWH88 + // Instance family: Compute optimized + // Storage: 2 x 160 SSD + { + Name: "c3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6271), + Mem: 30720, + VirtType: &hvm, + Cost: 919, + }, + + // SKU: 7VFMGFAWZ9QPBHST + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 80, + }, + + // SKU: 852A82DVHUAQRBUS + // Instance family: Storage optimized + // Storage: 3 x 2000 HDD + { + Name: "d2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 31232, + VirtType: &hvm, + Cost: 844, + }, + + // SKU: 98ZFCFAZXXRGF7CG + // Instance family: GPU instance + // Storage: 2 x 120 SSD + { + Name: "g2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61440, + VirtType: &hvm, + Cost: 3592, + }, + + // SKU: 9XQJDHCZD834J68K + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(672), + Mem: 8192, + VirtType: &hvm, + Cost: 165, + }, + + // SKU: BHS4CH7UVYY7QN2H + // Instance family: Storage optimized + // Storage: 4 x 800 SSD + { + Name: "i2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 4002, + }, + + // SKU: BQDV4FCR9QJEQHQS + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 955, + }, + + // SKU: BRTSXYEA84EMVTVE + // Instance family: Memory optimized + // Storage: 1 x 160 SSD + { + Name: "r3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 798, + }, + + // SKU: CEU6V4KXWNQA6DD3 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 160, + }, + + // SKU: CFXCUT5A22XNZ43Y + // Instance family: Compute optimized + // Storage: 2 x 16 SSD + { + Name: "c3.large", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(783), + Mem: 3840, + VirtType: &hvm, + Cost: 115, + }, + + // SKU: DTEVN35HD43BM5ST + // Instance family: General purpose + // Storage: 2 x 80 SSD + { + Name: "m3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 30720, + VirtType: &hvm, + Cost: 732, + }, + + // SKU: G6ATM6E28ZDDBNCE + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 239, + }, + + // SKU: HUNMAJP6W7UHJAAG + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 1910, + }, + + // SKU: JZYQ7EEFZRRYYC5S + // Instance family: Storage optimized + // Storage: 6 x 2000 HDD + { + Name: "d2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 62464, + VirtType: &hvm, + Cost: 1688, + }, + + // SKU: K79C7JVTDAKRA842 + // Instance family: General purpose + // Storage: 1 x 32 SSD + { + Name: "m3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 7680, + VirtType: &hvm, + Cost: 183, + }, + + // SKU: KGFSNH7UYJEDWTQQ + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 10, + }, + + // SKU: KUKJATN7HCNF2UFT + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 32768, + VirtType: &hvm, + Cost: 660, + }, + + // SKU: KW2ZPRSC298WFJ94 + // Instance family: Memory optimized + // Storage: 1 x 80 SSD + { + Name: "r3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 399, + }, + + // SKU: MXYJRCMDAHMFNUAB + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 1596, + }, + + // SKU: N6M9WS3F7XHZ2TXS + // Instance family: Storage optimized + // Storage: 24 x 2000 HDD + { + Name: "d2.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(12096), + Mem: 249856, + VirtType: &hvm, + Cost: 6752, + }, + + // SKU: P75FYMSDEYRH34VG + // Instance family: Storage optimized + // Storage: 2 x 800 SSD + { + Name: "i2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 2001, + }, + + // SKU: PBRNAGPS98SBSDRS + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 478, + }, + + // SKU: PSBR72NYUMRACH7E + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 40, + }, + + // SKU: R7GFV82WRF8QTZYP + // Instance family: Compute optimized + // Storage: 2 x 40 SSD + { + Name: "c3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1567), + Mem: 7680, + VirtType: &hvm, + Cost: 230, + }, + + // SKU: RM2KHQ9S45BW6B7M + // Instance family: Compute optimized + // Storage: 2 x 80 SSD + { + Name: "c3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3135), + Mem: 15360, + VirtType: &hvm, + Cost: 460, + }, + + // SKU: WFBUYA3WPRPDVNEH + // Instance family: Storage optimized + // Storage: 12 x 2000 HDD + { + Name: "d2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 124928, + VirtType: &hvm, + Cost: 3376, + }, + + // SKU: XCB734X2BM8PZ77F + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.10xlarge", + Arches: amd64, + CpuCores: 40, + CpuPower: instances.CpuPower(13440), + Mem: 163840, + VirtType: &hvm, + Cost: 3303, + }, + + // SKU: YG3C8Z588MN6BXGW + // Instance family: Storage optimized + // Storage: 8 x 800 SSD + { + Name: "i2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 8004, + }, + + // SKU: YMCDAKZ8EVGJJDRM + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 20, + }, + }, + + "ap-south-1": { + + // SKU: 2BHQP3WKDU9A2DSC + // Instance family: Storage optimized + // Storage: 3 x 2000 HDD + { + Name: "d2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 31232, + VirtType: &hvm, + Cost: 827, + }, + + // SKU: 2T64ZB6E54RM9GA2 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 2195, + }, + + // SKU: 3P5UPPTRJJQ6TKSU + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(672), + Mem: 8192, + VirtType: &hvm, + Cost: 169, + }, + + // SKU: 5N383TJKMC5FSCKD + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 3032, + }, + + // SKU: 673MQ62EKV4VCTT8 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 9, + }, + + // SKU: 6WAFB82CP99WZXD9 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 38, + }, + + // SKU: 7HYM8MHNNFW2NN6T + // Instance family: Storage optimized + // Storage: 4 x 800 SSD + { + Name: "i2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 3867, + }, + + // SKU: 8BG4ECAGKKNWYDVU + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 549, + }, + + // SKU: 8U4NEK2635VB7NHD + // Instance family: Memory optimized + // Storage: 1 x 160 SSD + { + Name: "r3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 758, + }, + + // SKU: 9Q2KYTZY2YDQZCM8 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 16384, + VirtType: &hvm, + Cost: 337, + }, + + // SKU: AFU2HU8WVY9T6QAK + // Instance family: Storage optimized + // Storage: 2 x 800 SSD + { + Name: "i2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 1933, + }, + + // SKU: CA3Y8U6BUYR54NVM + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 1516, + }, + + // SKU: FQ7FVC9B3R8RBBXA + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.10xlarge", + Arches: amd64, + CpuCores: 40, + CpuPower: instances.CpuPower(13440), + Mem: 163840, + VirtType: &hvm, + Cost: 3375, + }, + + // SKU: G4283CPK5MQ5QQ2A + // Instance family: Storage optimized + // Storage: 8 x 800 SSD + { + Name: "i2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 7733, + }, + + // SKU: G69QDQNHE5SR7846 + // Instance family: Storage optimized + // Storage: 12 x 2000 HDD + { + Name: "d2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 124928, + VirtType: &hvm, + Cost: 3306, + }, + + // SKU: GG7UHJRKQSGP364T + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 152, + }, + + // SKU: GGTGBU32M4STN8YS + // Instance family: Storage optimized + // Storage: 6 x 2000 HDD + { + Name: "d2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 62464, + VirtType: &hvm, + Cost: 1653, + }, + + // SKU: JXAMSKC2ZXKCA37S + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 19, + }, + + // SKU: KFTR5EQCGQ6AUYXP + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 137, + }, + + // SKU: MKNAAVQMBXXNTPQQ + // Instance family: Memory optimized + // Storage: 1 x 32 SSD + { + Name: "r3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 15616, + VirtType: &hvm, + Cost: 190, + }, + + // SKU: NH9KFSA26V6F5742 + // Instance family: Storage optimized + // Storage: 24 x 2000 HDD + { + Name: "d2.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(12096), + Mem: 249856, + VirtType: &hvm, + Cost: 6612, + }, + + // SKU: Q5HCF2WEXJ7TRHNF + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 76, + }, + + // SKU: TEV889FX73ZKZ8TU + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 1097, + }, + + // SKU: TPTBS44NNEJN3HUG + // Instance family: Storage optimized + // Storage: 1 x 800 SSD + { + Name: "i2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 967, + }, + + // SKU: TPVVGJC63DQYU7EJ + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 275, + }, + + // SKU: YKUFHRZDYCT9JG3A + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 65536, + VirtType: &hvm, + Cost: 1350, + }, + + // SKU: ZEEU583UYCZMVJZV + // Instance family: Memory optimized + // Storage: 1 x 80 SSD + { + Name: "r3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 379, + }, + + // SKU: ZVMAFPQR3NKB6VVP + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 32768, + VirtType: &hvm, + Cost: 675, + }, + }, + + "ap-southeast-1": { + + // SKU: 2B4AFZB6SYHMPZGS + // Instance family: General purpose + // Storage: 2 x 420 + { + Name: "m1.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 7680, + VirtType: ¶virtual, + Cost: 233, + Deprecated: true, + }, + + // SKU: 39ZR86RYWKDSK82K + // Instance family: Memory optimized + // Storage: 1 x 420 + { + Name: "m2.xlarge", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 17511, + VirtType: ¶virtual, + Cost: 296, + Deprecated: true, + }, + + // SKU: 3ZUGJVTA8NWE9NZT + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 1155, + }, + + // SKU: 57M4AZ4NRYTPT6NM + // Instance family: Memory optimized + // Storage: 2 x 1,920 + { + Name: "x1.32xlarge", + Arches: amd64, + CpuCores: 128, + CpuPower: instances.CpuPower(41216), + Mem: 1998848, + VirtType: &hvm, + Cost: 19341, + }, + + // SKU: 5ES8X7PS795W6ZD4 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 65536, + VirtType: &hvm, + Cost: 1421, + }, + + // SKU: 6R4QVUNHTJVS9J2S + // Instance family: General purpose + // Storage: 1 x 160 + { + Name: "m1.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 1741, + VirtType: ¶virtual, + Cost: 58, + Deprecated: true, + }, + + // SKU: 7FQD2RCMJSS57GFA + // Instance family: General purpose + // Storage: 1 x 410 + { + Name: "m1.medium", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 3840, + VirtType: ¶virtual, + Cost: 117, + Deprecated: true, + }, + + // SKU: 7QHAUE39SCU6N9N9 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 40, + }, + + // SKU: 7TMGTEJPM5UPWQ8X + // Instance family: General purpose + // Storage: 4 x 420 + { + Name: "m1.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 15360, + VirtType: ¶virtual, + Cost: 467, + Deprecated: true, + }, + + // SKU: 8E9KB9CNE94Z4AHE + // Instance family: Storage optimized + // Storage: 4 x 800 SSD + { + Name: "i2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 4070, + }, + + // SKU: 8V5MYBMPUD434579 + // Instance family: Compute optimized + // Storage: 4 x 420 + { + Name: "c1.xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 7168, + VirtType: ¶virtual, + Cost: 655, + Deprecated: true, + }, + + // SKU: 8VCD85YY26XCKZDE + // Instance family: General purpose + // Storage: 2 x 40 SSD + { + Name: "m3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 15360, + VirtType: &hvm, + Cost: 392, + }, + + // SKU: ABMNUJ6SQ7A595A4 + // Instance family: Compute optimized + // Storage: 2 x 16 SSD + { + Name: "c3.large", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(783), + Mem: 3840, + VirtType: &hvm, + Cost: 132, + }, + + // SKU: AEUJF75AZR2WPVWK + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.10xlarge", + Arches: amd64, + CpuCores: 40, + CpuPower: instances.CpuPower(13440), + Mem: 163840, + VirtType: &hvm, + Cost: 3553, + }, + + // SKU: B9DFHMNNN499Z259 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(672), + Mem: 8192, + VirtType: &hvm, + Cost: 178, + }, + + // SKU: DK6FJW8STXUGW6PA + // Instance family: GPU instance + // Storage: 2 x 120 SSD + { + Name: "g2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61440, + VirtType: &hvm, + Cost: 4000, + }, + + // SKU: DKFKKEAW78H8X64T + // Instance family: Storage optimized + // Storage: 2 x 800 SSD + { + Name: "i2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 2035, + }, + + // SKU: DTZY5KW9NPT6V929 + // Instance family: Storage optimized + // Storage: 3 x 2000 HDD + { + Name: "d2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 31232, + VirtType: &hvm, + Cost: 870, + }, + + // SKU: EERGZVYFKRBMSYKW + // Instance family: Storage optimized + // Storage: 24 x 2000 + { + Name: "hs1.8xlarge", + Arches: amd64, + CpuCores: 17, + CpuPower: instances.CpuPower(4760), + Mem: 119808, + VirtType: &hvm, + Cost: 5570, + Deprecated: true, + }, + + // SKU: EEUHF7PCXDQT2MYE + // Instance family: Storage optimized + // Storage: 24 x 2000 HDD + { + Name: "d2.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(12096), + Mem: 249856, + VirtType: &hvm, + Cost: 6960, + }, + + // SKU: EV2HH2XUX6RZEAW3 + // Instance family: Memory optimized + // Storage: 1 x 32 SSD + { + Name: "r3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 15616, + VirtType: &hvm, + Cost: 200, + }, + + // SKU: G9Z5RTPAVX5KWH4Z + // Instance family: Compute optimized + // Storage: 2 x 40 SSD + { + Name: "c3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1567), + Mem: 7680, + VirtType: &hvm, + Cost: 265, + }, + + // SKU: GCVKBN38AXXGHBQH + // Instance family: Storage optimized + // Storage: 1 x 800 SSD + { + Name: "i2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 1018, + }, + + // SKU: J23MFJ7UXYN9HFKS + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 80, + }, + + // SKU: J65Z38YCBYKP7Q49 + // Instance family: Micro instances + // Storage: EBS only + { + Name: "t1.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 628, + VirtType: ¶virtual, + Cost: 20, + }, + + // SKU: JDH4WM7E92WUS9JS + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 3192, + }, + + // SKU: KCZD349CGXR5DRQ3 + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 1596, + }, + + // SKU: M5ZT2V2ZMSBCEB2Q + // Instance family: Storage optimized + // Storage: 12 x 2000 HDD + { + Name: "d2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 124928, + VirtType: &hvm, + Cost: 3480, + }, + + // SKU: N55SZ6XU92JF33VX + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 16384, + VirtType: &hvm, + Cost: 355, + }, + + // SKU: P6BPTANASQKJ8FJX + // Instance family: Memory optimized + // Storage: 1 x 80 SSD + { + Name: "r3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 399, + }, + + // SKU: PJ8AKRU5VVMS9DFN + // Instance family: Compute optimized + // Storage: 2 x 80 SSD + { + Name: "c3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3135), + Mem: 15360, + VirtType: &hvm, + Cost: 529, + }, + + // SKU: QB3EG2XVBQ5BYA5F + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 144, + }, + + // SKU: R8K75VHRAADAJJ2W + // Instance family: General purpose + // Storage: 2 x 80 SSD + { + Name: "m3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 30720, + VirtType: &hvm, + Cost: 784, + }, + + // SKU: RZV9MRNEARCGY297 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 20, + }, + + // SKU: SKTEJ2QN2YW8UFKF + // Instance family: Storage optimized + // Storage: 6 x 2000 HDD + { + Name: "d2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 62464, + VirtType: &hvm, + Cost: 1740, + }, + + // SKU: SMHSRASDZ66J6CC3 + // Instance family: Memory optimized + // Storage: 1 x 160 SSD + { + Name: "r3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 798, + }, + + // SKU: T2PU2JF8K7NGF3AH + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 10, + }, + + // SKU: TYGKARPH33A4B8DT + // Instance family: Memory optimized + // Storage: 1 x 850 + { + Name: "m2.2xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 35021, + VirtType: ¶virtual, + Cost: 592, + Deprecated: true, + }, + + // SKU: U9CPUKN22CXMPGRV + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 289, + }, + + // SKU: UKF69K7GTUQKKRPH + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 32768, + VirtType: &hvm, + Cost: 711, + }, + + // SKU: UKGPAABCGR48DYC4 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 578, + }, + + // SKU: UKY8RWKR7MVYC863 + // Instance family: Compute optimized + // Storage: 2 x 160 SSD + { + Name: "c3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6271), + Mem: 30720, + VirtType: &hvm, + Cost: 1058, + }, + + // SKU: UYUWYNASFB3J75S6 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 160, + }, + + // SKU: VE5MWWHUXS2VR8DV + // Instance family: Storage optimized + // Storage: 8 x 800 SSD + { + Name: "i2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 8140, + }, + + // SKU: VVKTWPMARM4HESXU + // Instance family: GPU instance + // Storage: 1 x 60 SSD + { + Name: "g2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2911), + Mem: 15360, + VirtType: &hvm, + Cost: 1000, + }, + + // SKU: XUVJRQ9MSAQKDXE9 + // Instance family: Compute optimized + // Storage: 2 x 320 SSD + { + Name: "c3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(12543), + Mem: 61440, + VirtType: &hvm, + Cost: 2117, + }, + + // SKU: Y3RWQDFC7G8TZ3A8 + // Instance family: Memory optimized + // Storage: 2 x 840 + { + Name: "m2.4xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 70042, + VirtType: ¶virtual, + Cost: 1183, + Deprecated: true, + }, + + // SKU: YDP6BX3WNNZ488BZ + // Instance family: Compute optimized + // Storage: 1 x 350 + { + Name: "c1.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 1741, + VirtType: ¶virtual, + Cost: 164, + Deprecated: true, + }, + + // SKU: YSKUUH777M98DWE4 + // Instance family: General purpose + // Storage: 1 x 32 SSD + { + Name: "m3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 7680, + VirtType: &hvm, + Cost: 196, + }, + + // SKU: Z3DQKNTFUZ68H6TT + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 2310, + }, + + // SKU: ZAGTVD3ADUUPS6QV + // Instance family: General purpose + // Storage: 1 x 4 SSD + { + Name: "m3.medium", + Arches: amd64, + CpuCores: 1, + CpuPower: instances.CpuPower(350), + Mem: 3840, + VirtType: &hvm, + Cost: 98, + }, + }, + + "ap-southeast-2": { + + // SKU: 296YCXVCWAKPXKRE + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 32768, + VirtType: &hvm, + Cost: 673, + }, + + // SKU: 2PKSXUFC38ZY888Q + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 137, + }, + + // SKU: 46ZVWU6WX68NZCE7 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 2195, + }, + + // SKU: 4PRF9CZZBT3AM9D4 + // Instance family: Memory optimized + // Storage: 2 x 840 + { + Name: "m2.4xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 70042, + VirtType: ¶virtual, + Cost: 1183, + Deprecated: true, + }, + + // SKU: 5BKJJZ77BSJPMR4D + // Instance family: Storage optimized + // Storage: 24 x 2000 + { + Name: "hs1.8xlarge", + Arches: amd64, + CpuCores: 17, + CpuPower: instances.CpuPower(4760), + Mem: 119808, + VirtType: &hvm, + Cost: 5570, + Deprecated: true, + }, + + // SKU: 66QVG55FP52WHCFH + // Instance family: Memory optimized + // Storage: 2 x 1,920 + { + Name: "x1.32xlarge", + Arches: amd64, + CpuCores: 128, + CpuPower: instances.CpuPower(41216), + Mem: 1998848, + VirtType: &hvm, + Cost: 19341, + }, + + // SKU: 69UM5U8QFXRAU255 + // Instance family: General purpose + // Storage: 2 x 40 SSD + { + Name: "m3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 15360, + VirtType: &hvm, + Cost: 372, + }, + + // SKU: 6CK52R5BRMQEVGRW + // Instance family: Storage optimized + // Storage: 2 x 800 SSD + { + Name: "i2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 2035, + }, + + // SKU: 6PB95M6GG8CNXMMR + // Instance family: Memory optimized + // Storage: 1 x 850 + { + Name: "m2.2xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 35021, + VirtType: ¶virtual, + Cost: 592, + Deprecated: true, + }, + + // SKU: 6UHS7YAMM8JY7X52 + // Instance family: Storage optimized + // Storage: 4 x 800 SSD + { + Name: "i2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 4070, + }, + + // SKU: 6UMTMKVFBXENW3BF + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(672), + Mem: 8192, + VirtType: &hvm, + Cost: 168, + }, + + // SKU: 6WEMUEK6JNZU6PTC + // Instance family: General purpose + // Storage: 1 x 4 SSD + { + Name: "m3.medium", + Arches: amd64, + CpuCores: 1, + CpuPower: instances.CpuPower(350), + Mem: 3840, + VirtType: &hvm, + Cost: 93, + }, + + // SKU: 7NYHPHSMD45SYSNN + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 20, + }, + + // SKU: 8A5X9KQR4YKYYXCQ + // Instance family: General purpose + // Storage: 4 x 420 + { + Name: "m1.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 15360, + VirtType: ¶virtual, + Cost: 467, + Deprecated: true, + }, + + // SKU: 8XZUT4AHDH972AME + // Instance family: Compute optimized + // Storage: 2 x 16 SSD + { + Name: "c3.large", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(783), + Mem: 3840, + VirtType: &hvm, + Cost: 132, + }, + + // SKU: 9CYSN2TKZDN6GFWQ + // Instance family: Micro instances + // Storage: EBS only + { + Name: "t1.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 628, + VirtType: ¶virtual, + Cost: 20, + }, + + // SKU: C4A5RM72TUGX8R5D + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 65536, + VirtType: &hvm, + Cost: 1345, + }, + + // SKU: CMDB58FT3PAJJNGN + // Instance family: Compute optimized + // Storage: 4 x 420 + { + Name: "c1.xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 7168, + VirtType: ¶virtual, + Cost: 655, + Deprecated: true, + }, + + // SKU: CP3U32VDAT67RT9R + // Instance family: Storage optimized + // Storage: 6 x 2000 HDD + { + Name: "d2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 62464, + VirtType: &hvm, + Cost: 1740, + }, + + // SKU: D29U26UAEX6WK4TW + // Instance family: General purpose + // Storage: 2 x 80 SSD + { + Name: "m3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 30720, + VirtType: &hvm, + Cost: 745, + }, + + // SKU: DBMRRDDSPZZKNV49 + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 1596, + }, + + // SKU: DS7EYGXHAG6T6NTV + // Instance family: GPU instance + // Storage: 1 x 60 SSD + { + Name: "g2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2911), + Mem: 15360, + VirtType: &hvm, + Cost: 898, + }, + + // SKU: E6JQJZ8BQHCG328E + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 40, + }, + + // SKU: F9BAR5QA2VU3ZTBF + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 1097, + }, + + // SKU: FFBDA7VFHVPEJXS6 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 549, + }, + + // SKU: FJURXZQ9HT9HN2YJ + // Instance family: Storage optimized + // Storage: 1 x 800 SSD + { + Name: "i2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 1018, + }, + + // SKU: FYMCPD2A3YBTSUPQ + // Instance family: Compute optimized + // Storage: 2 x 320 SSD + { + Name: "c3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(12543), + Mem: 61440, + VirtType: &hvm, + Cost: 2117, + }, + + // SKU: GKVR3QEC5B7WJXTD + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 3192, + }, + + // SKU: HDSPKHDAUP2HXQTR + // Instance family: Storage optimized + // Storage: 3 x 2000 HDD + { + Name: "d2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 31232, + VirtType: &hvm, + Cost: 870, + }, + + // SKU: HHJGN8MDU3U6DFE5 + // Instance family: General purpose + // Storage: 1 x 32 SSD + { + Name: "m3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 7680, + VirtType: &hvm, + Cost: 186, + }, + + // SKU: JT2PVSWTGS2BMV4D + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 160, + }, + + // SKU: KEVDJ9YEEGJZZGDS + // Instance family: Storage optimized + // Storage: 24 x 2000 HDD + { + Name: "d2.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(12096), + Mem: 249856, + VirtType: &hvm, + Cost: 6960, + }, + + // SKU: KNJZFWCSBKY8N4NF + // Instance family: Memory optimized + // Storage: 1 x 80 SSD + { + Name: "r3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 399, + }, + + // SKU: KWTW9RNYJG6GG3J2 + // Instance family: General purpose + // Storage: 2 x 420 + { + Name: "m1.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 7680, + VirtType: ¶virtual, + Cost: 233, + Deprecated: true, + }, + + // SKU: KYSFQQQ4H28QEHFQ + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 10, + }, + + // SKU: MSGAHYMZTGGJN5WS + // Instance family: Storage optimized + // Storage: 12 x 2000 HDD + { + Name: "d2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 124928, + VirtType: &hvm, + Cost: 3480, + }, + + // SKU: N32CG42C5KFN6GDH + // Instance family: Compute optimized + // Storage: 2 x 160 SSD + { + Name: "c3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6271), + Mem: 30720, + VirtType: &hvm, + Cost: 1058, + }, + + // SKU: N3D6SQF6HU9ENSPR + // Instance family: Memory optimized + // Storage: 1 x 32 SSD + { + Name: "r3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 15616, + VirtType: &hvm, + Cost: 200, + }, + + // SKU: R8KMJWXSQ8BJC35M + // Instance family: Compute optimized + // Storage: 2 x 80 SSD + { + Name: "c3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3135), + Mem: 15360, + VirtType: &hvm, + Cost: 529, + }, + + // SKU: RAWDW374YPCAB65D + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 16384, + VirtType: &hvm, + Cost: 336, + }, + + // SKU: RW8353QQ8DWZ4WQD + // Instance family: Storage optimized + // Storage: 8 x 800 SSD + { + Name: "i2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 8140, + }, + + // SKU: SPJKFAB8G379JD6R + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.10xlarge", + Arches: amd64, + CpuCores: 40, + CpuPower: instances.CpuPower(13440), + Mem: 163840, + VirtType: &hvm, + Cost: 3363, + }, + + // SKU: T72BQ8E4ETD9K62R + // Instance family: Compute optimized + // Storage: 1 x 350 + { + Name: "c1.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 1741, + VirtType: ¶virtual, + Cost: 164, + Deprecated: true, + }, + + // SKU: TD8NW4BSBCYU646U + // Instance family: GPU instance + // Storage: 2 x 120 SSD + { + Name: "g2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61440, + VirtType: &hvm, + Cost: 3592, + }, + + // SKU: TPJVBXMBFDUBJM83 + // Instance family: General purpose + // Storage: 1 x 410 + { + Name: "m1.medium", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 3840, + VirtType: ¶virtual, + Cost: 117, + Deprecated: true, + }, + + // SKU: V5SUYWWSC9HUZFWJ + // Instance family: Memory optimized + // Storage: 1 x 420 + { + Name: "m2.xlarge", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 17511, + VirtType: ¶virtual, + Cost: 296, + Deprecated: true, + }, + + // SKU: VM2PFN8ME9595UGP + // Instance family: General purpose + // Storage: 1 x 160 + { + Name: "m1.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 1741, + VirtType: ¶virtual, + Cost: 58, + Deprecated: true, + }, + + // SKU: WNYWP7QUJ3MU8NVV + // Instance family: Memory optimized + // Storage: 1 x 160 SSD + { + Name: "r3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 798, + }, + + // SKU: XD4VKMMZMCMZYFWJ + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 275, + }, + + // SKU: XPAKDV3PWHYTJU3X + // Instance family: Compute optimized + // Storage: 2 x 40 SSD + { + Name: "c3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1567), + Mem: 7680, + VirtType: &hvm, + Cost: 265, + }, + + // SKU: ZNG78GP248PZPM6R + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 80, + }, + }, + + "eu-central-1": { + + // SKU: 3AFDFDJ9FGMNBBUZ + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 3201, + }, + + // SKU: 3KCGMZRVWDY4AC5R + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 1600, + }, + + // SKU: 4TT65SC5HVYUSGR2 + // Instance family: Storage optimized + // Storage: 4 x 800 SSD + { + Name: "i2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 4051, + }, + + // SKU: 5P7657GQ9EZ2Z4ZY + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 60, + }, + + // SKU: 5RNA3KEVYJW8UJWT + // Instance family: Storage optimized + // Storage: 12 x 2000 HDD + { + Name: "d2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 124928, + VirtType: &hvm, + Cost: 3176, + }, + + // SKU: 5ZZCF2WTD3M2NVHT + // Instance family: General purpose + // Storage: 2 x 40 SSD + { + Name: "m3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 15360, + VirtType: &hvm, + Cost: 315, + }, + + // SKU: 686NEEYZAPY5GJ8N + // Instance family: Compute optimized + // Storage: 2 x 16 SSD + { + Name: "c3.large", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(783), + Mem: 3840, + VirtType: &hvm, + Cost: 129, + }, + + // SKU: 6PSHDB8D545JMBBD + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 7, + }, + + // SKU: 6Y959B8MKQZ55MGT + // Instance family: Storage optimized + // Storage: 6 x 2000 HDD + { + Name: "d2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 62464, + VirtType: &hvm, + Cost: 1588, + }, + + // SKU: 7EJH5CWEXABPY2ST + // Instance family: Memory optimized + // Storage: 2 x 1,920 + { + Name: "x1.32xlarge", + Arches: amd64, + CpuCores: 128, + CpuPower: instances.CpuPower(41216), + Mem: 1998848, + VirtType: &hvm, + Cost: 18674, + }, + + // SKU: 7W6DNQ55YG9FCPXZ + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 120, + }, + + // SKU: 8KTQAHWA58GUHDGC + // Instance family: General purpose + // Storage: 1 x 32 SSD + { + Name: "m3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 7680, + VirtType: &hvm, + Cost: 158, + }, + + // SKU: ABFDCPB959KUGRH8 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(672), + Mem: 8192, + VirtType: &hvm, + Cost: 143, + }, + + // SKU: ATHMXFEBFCM8TPWK + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 1069, + }, + + // SKU: C2EDZ5DQN8NMN54X + // Instance family: GPU instance + // Storage: 1 x 60 SSD + { + Name: "g2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2911), + Mem: 15360, + VirtType: &hvm, + Cost: 772, + }, + + // SKU: CDQ3VSAVRNG39R6V + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 32768, + VirtType: &hvm, + Cost: 570, + }, + + // SKU: CNP4PV4Y2J8YZVAR + // Instance family: General purpose + // Storage: 2 x 80 SSD + { + Name: "m3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 30720, + VirtType: &hvm, + Cost: 632, + }, + + // SKU: CU49Z77S6UH36JXW + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 30, + }, + + // SKU: D8BFUEFHTHMN4XUY + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 2138, + }, + + // SKU: EF7GKFKJ3Y5DM7E9 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 16384, + VirtType: &hvm, + Cost: 285, + }, + + // SKU: ER456JE239VN5TQY + // Instance family: Memory optimized + // Storage: 1 x 80 SSD + { + Name: "r3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 400, + }, + + // SKU: F5FY39C3HWRVW8M7 + // Instance family: Storage optimized + // Storage: 24 x 2000 HDD + { + Name: "d2.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(12096), + Mem: 249856, + VirtType: &hvm, + Cost: 6352, + }, + + // SKU: FECZ7UBC3GFUYSJC + // Instance family: Storage optimized + // Storage: 2 x 800 SSD + { + Name: "i2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 2025, + }, + + // SKU: FXJNETA7Z34Z9BAR + // Instance family: Storage optimized + // Storage: 3 x 2000 HDD + { + Name: "d2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 31232, + VirtType: &hvm, + Cost: 794, + }, + + // SKU: GDZZPNEEZXAN7X9J + // Instance family: General purpose + // Storage: 1 x 4 SSD + { + Name: "m3.medium", + Arches: amd64, + CpuCores: 1, + CpuPower: instances.CpuPower(350), + Mem: 3840, + VirtType: &hvm, + Cost: 79, + }, + + // SKU: HW3SH7C5H3K3MV7E + // Instance family: Compute optimized + // Storage: 2 x 40 SSD + { + Name: "c3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1567), + Mem: 7680, + VirtType: &hvm, + Cost: 258, + }, + + // SKU: JG83GAMRHT9DJ8TH + // Instance family: Memory optimized + // Storage: 1 x 32 SSD + { + Name: "r3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 15616, + VirtType: &hvm, + Cost: 200, + }, + + // SKU: JGXEWM5NJCZJPHGG + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 65536, + VirtType: &hvm, + Cost: 1140, + }, + + // SKU: KYFX85FCPCCT57BD + // Instance family: Compute optimized + // Storage: 2 x 320 SSD + { + Name: "c3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(12543), + Mem: 61440, + VirtType: &hvm, + Cost: 2064, + }, + + // SKU: MB3JDB58W76ZHFT8 + // Instance family: Compute optimized + // Storage: 2 x 80 SSD + { + Name: "c3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3135), + Mem: 15360, + VirtType: &hvm, + Cost: 516, + }, + + // SKU: MTQWAHX8C4T4FYVW + // Instance family: Memory optimized + // Storage: 1 x 160 SSD + { + Name: "r3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 800, + }, + + // SKU: N2333QQ45Q3K9RT9 + // Instance family: Storage optimized + // Storage: 1 x 800 SSD + { + Name: "i2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 1012, + }, + + // SKU: N7WSYHVUT72KMK3V + // Instance family: Compute optimized + // Storage: 2 x 160 SSD + { + Name: "c3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6271), + Mem: 30720, + VirtType: &hvm, + Cost: 1032, + }, + + // SKU: NZS3Z83VUDZA9SPY + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 534, + }, + + // SKU: Q5D9K2QEBW7SS9YP + // Instance family: GPU instance + // Storage: 2 x 120 SSD + { + Name: "g2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61440, + VirtType: &hvm, + Cost: 3088, + }, + + // SKU: SMTUMBHX6YKRBJQB + // Instance family: Storage optimized + // Storage: 8 x 800 SSD + { + Name: "i2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 8102, + }, + + // SKU: T9GCN3NZ9U6N5BGN + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 15, + }, + + // SKU: WWTVB5GY85P5FGNW + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 134, + }, + + // SKU: XWVCP8TVZ3EZXHJT + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.10xlarge", + Arches: amd64, + CpuCores: 40, + CpuPower: instances.CpuPower(13440), + Mem: 163840, + VirtType: &hvm, + Cost: 2850, + }, + + // SKU: ZAC36C46HPYXADA7 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 267, + }, + }, + + "eu-west-1": { + + // SKU: 2D5G3BCXGXH9GCH3 + // Instance family: Memory optimized + // Storage: 2 x 1,920 + { + Name: "x1.32xlarge", + Arches: amd64, + CpuCores: 128, + CpuPower: instances.CpuPower(41216), + Mem: 1998848, + VirtType: &hvm, + Cost: 16006, + }, + + // SKU: 2SX63SRBXZK94TSA + // Instance family: Storage optimized + // Storage: 6 x 2000 HDD + { + Name: "d2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 62464, + VirtType: &hvm, + Cost: 1470, + }, + + // SKU: 3H8WR8FBAE4DWNRB + // Instance family: GPU instance + // Storage: 2 x 840 + { + Name: "cg1.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(1600), + Mem: 23040, + VirtType: &hvm, + Cost: 2360, + }, + + // SKU: 4G2Z3WVSPDEGMKFH + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 112, + }, + + // SKU: 6FU9JEK79WWSARQ9 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 16384, + VirtType: &hvm, + Cost: 264, + }, + + // SKU: 6HX9NKE3BQ5V3PMJ + // Instance family: Storage optimized + // Storage: 12 x 2000 HDD + { + Name: "d2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 124928, + VirtType: &hvm, + Cost: 2940, + }, + + // SKU: 7X4K64YA59VZZAC3 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(672), + Mem: 8192, + VirtType: &hvm, + Cost: 132, + }, + + // SKU: 8FFUWN2ESZYSB84N + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 32768, + VirtType: &hvm, + Cost: 528, + }, + + // SKU: 926EPQHVQ6AGDX5P + // Instance family: Compute optimized + // Storage: 2 x 160 SSD + { + Name: "c3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6271), + Mem: 30720, + VirtType: &hvm, + Cost: 956, + }, + + // SKU: 9QYQQRQ9FD9YCPNB + // Instance family: General purpose + // Storage: 4 x 420 + { + Name: "m1.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 15360, + VirtType: ¶virtual, + Cost: 379, + Deprecated: true, + }, + + // SKU: 9VHN6EZGZGFZEHHK + // Instance family: Storage optimized + // Storage: 3 x 2000 HDD + { + Name: "d2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 31232, + VirtType: &hvm, + Cost: 735, + }, + + // SKU: ADT8TJSCKTFKTBMX + // Instance family: Storage optimized + // Storage: 8 x 800 SSD + { + Name: "i2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 7502, + }, + + // SKU: BG8E99UBN6RZV6WV + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 953, + }, + + // SKU: BNSCBCWPZHWPDKKS + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 65536, + VirtType: &hvm, + Cost: 1056, + }, + + // SKU: C3M6ZGSU66GC75NF + // Instance family: Memory optimized + // Storage: 1 x 32 SSD + { + Name: "r3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 15616, + VirtType: &hvm, + Cost: 185, + }, + + // SKU: CP6AQ5U62SXMQV9P + // Instance family: Memory optimized + // Storage: 1 x 160 SSD + { + Name: "r3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 741, + }, + + // SKU: DFX4Y9GW9C3HE99V + // Instance family: Micro instances + // Storage: EBS only + { + Name: "t1.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 628, + VirtType: ¶virtual, + Cost: 20, + }, + + // SKU: DYTSK9JJGPSR6VQB + // Instance family: Storage optimized + // Storage: 2 x 800 SSD + { + Name: "i2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 1876, + }, + + // SKU: E9FTXSZ49KS3R3HY + // Instance family: GPU instance + // Storage: 2 x 120 SSD + { + Name: "g2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61440, + VirtType: &hvm, + Cost: 2808, + }, + + // SKU: EB2QM2B74W2YCANP + // Instance family: General purpose + // Storage: 1 x 160 + { + Name: "m1.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 1741, + VirtType: ¶virtual, + Cost: 47, + Deprecated: true, + }, + + // SKU: EQP9JWYVRCW49MPW + // Instance family: Storage optimized + // Storage: 4 x 800 SSD + { + Name: "i2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 3751, + }, + + // SKU: FSS42UA3US5PWMV7 + // Instance family: Memory optimized + // Storage: 2 x 120 SSD + { + Name: "cr1.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(3200), + Mem: 249856, + VirtType: &hvm, + Cost: 3750, + Deprecated: true, + }, + + // SKU: HB5V2X8TXQUTDZBV + // Instance family: Compute optimized + // Storage: 1 x 350 + { + Name: "c1.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 1741, + VirtType: ¶virtual, + Cost: 148, + Deprecated: true, + }, + + // SKU: HF7N6NNE7N8GDMBE + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 56, + }, + + // SKU: HG3TP7M3FQZ54HKR + // Instance family: Memory optimized + // Storage: 2 x 840 + { + Name: "m2.4xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 70042, + VirtType: ¶virtual, + Cost: 1100, + Deprecated: true, + }, + + // SKU: JGXNGK5X7WE7K3VF + // Instance family: General purpose + // Storage: 1 x 32 SSD + { + Name: "m3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 7680, + VirtType: &hvm, + Cost: 146, + }, + + // SKU: KKQD5EPCF8JFUDDA + // Instance family: Storage optimized + // Storage: 1 x 800 SSD + { + Name: "i2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 938, + }, + + // SKU: N6KDUVR23T758UUC + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 28, + }, + + // SKU: NSCRWEDQZZESFDFG + // Instance family: General purpose + // Storage: 2 x 420 + { + Name: "m1.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 7680, + VirtType: ¶virtual, + Cost: 190, + Deprecated: true, + }, + + // SKU: NV44PJXFFQV9UNQZ + // Instance family: Storage optimized + // Storage: 24 x 2000 HDD + { + Name: "d2.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(12096), + Mem: 249856, + VirtType: &hvm, + Cost: 5880, + }, + + // SKU: P3CTRQJY7SHQ6BJR + // Instance family: General purpose + // Storage: 2 x 80 SSD + { + Name: "m3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 30720, + VirtType: &hvm, + Cost: 585, + }, + + // SKU: P75KF3MVS7BD8VRA + // Instance family: Compute optimized + // Storage: 2 x 80 SSD + { + Name: "c3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3135), + Mem: 15360, + VirtType: &hvm, + Cost: 478, + }, + + // SKU: P7JTZV2EPW3T8GT2 + // Instance family: Memory optimized + // Storage: 1 x 850 + { + Name: "m2.2xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 35021, + VirtType: ¶virtual, + Cost: 550, + Deprecated: true, + }, + + // SKU: PCKAVX9UQTRXBNNF + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.10xlarge", + Arches: amd64, + CpuCores: 40, + CpuPower: instances.CpuPower(13440), + Mem: 163840, + VirtType: &hvm, + Cost: 2641, + }, + + // SKU: PR4SS7VH54V5XAZZ + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 1482, + }, + + // SKU: PY52HJB9NWEKKBZK + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 2964, + }, + + // SKU: Q3BP5KJZEPCUMKM3 + // Instance family: Memory optimized + // Storage: 1 x 420 + { + Name: "m2.xlarge", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 17511, + VirtType: ¶virtual, + Cost: 275, + Deprecated: true, + }, + + // SKU: QRP5VBPEA34W72YQ + // Instance family: General purpose + // Storage: 2 x 40 SSD + { + Name: "m3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 15360, + VirtType: &hvm, + Cost: 293, + }, + + // SKU: RASFAC97JWEGEPYS + // Instance family: Compute optimized + // Storage: 2 x 16 SSD + { + Name: "c3.large", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(783), + Mem: 3840, + VirtType: &hvm, + Cost: 120, + }, + + // SKU: SDFJSCXXJEFDV7P2 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 7, + }, + + // SKU: STTHYT3WDDQU8UBR + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 14, + }, + + // SKU: T3ZC3B9VPS8PA59H + // Instance family: Compute optimized + // Storage: 4 x 840 + { + Name: "cc2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61952, + VirtType: &hvm, + Cost: 2250, + Deprecated: true, + }, + + // SKU: TDVRYW6K68T4XJHJ + // Instance family: Storage optimized + // Storage: 24 x 2000 + { + Name: "hs1.8xlarge", + Arches: amd64, + CpuCores: 17, + CpuPower: instances.CpuPower(4760), + Mem: 119808, + VirtType: &hvm, + Cost: 4900, + Deprecated: true, + }, + + // SKU: UNEZG8PVCP3RUSQG + // Instance family: General purpose + // Storage: 1 x 4 SSD + { + Name: "m3.medium", + Arches: amd64, + CpuCores: 1, + CpuPower: instances.CpuPower(350), + Mem: 3840, + VirtType: &hvm, + Cost: 73, + }, + + // SKU: V2SRX3YBPSJPD8E4 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 477, + }, + + // SKU: V4Q928Z7YAM3TJ6X + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 238, + }, + + // SKU: VM3SRW97DB2T2U8Z + // Instance family: GPU instance + // Storage: 1 x 60 SSD + { + Name: "g2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2911), + Mem: 15360, + VirtType: &hvm, + Cost: 702, + }, + + // SKU: VPAFYT3KA5TFAK4M + // Instance family: Memory optimized + // Storage: 1 x 80 SSD + { + Name: "r3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 371, + }, + + // SKU: WDZRKB8HUJXEKH45 + // Instance family: Compute optimized + // Storage: 2 x 40 SSD + { + Name: "c3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1567), + Mem: 7680, + VirtType: &hvm, + Cost: 239, + }, + + // SKU: WTE2TS5FTMMJQXHK + // Instance family: Compute optimized + // Storage: 4 x 420 + { + Name: "c1.xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 7168, + VirtType: ¶virtual, + Cost: 592, + Deprecated: true, + }, + + // SKU: XWEGA3UJZ88J37T5 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 119, + }, + + // SKU: YC9UG3ESW33SS2WK + // Instance family: General purpose + // Storage: 1 x 410 + { + Name: "m1.medium", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 3840, + VirtType: ¶virtual, + Cost: 95, + Deprecated: true, + }, + + // SKU: YMCJTDYUBRJ9G3JJ + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 1906, + }, + + // SKU: YT7Q7XWV392U2M45 + // Instance family: Compute optimized + // Storage: 2 x 320 SSD + { + Name: "c3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(12543), + Mem: 61440, + VirtType: &hvm, + Cost: 1912, + }, + + // SKU: YVBHSQT9PFQ3DB5S + // Instance family: Storage optimized + // Storage: 2 x 1024 SSD + { + Name: "hi1.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 61952, + VirtType: &hvm, + Cost: 3100, + Deprecated: true, + }, + }, + + "sa-east-1": { + + // SKU: 2DQW6R4PKSZDG2T6 + // Instance family: Memory optimized + // Storage: 2 x 1,920 + { + Name: "x1.32xlarge", + Arches: amd64, + CpuCores: 128, + CpuPower: instances.CpuPower(41216), + Mem: 1998848, + VirtType: &hvm, + Cost: 26010, + }, + + // SKU: 3AW2EEGJZNBGCQTC + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 216, + }, + + // SKU: 4KCYN288G4U4BEAG + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 2600, + }, + + // SKU: 5GTG8UXYNCRDW5C4 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 163, + }, + + // SKU: 5YDAVRN5B6TSD9NF + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 325, + }, + + // SKU: 6TN6BMN8S44CMRDW + // Instance family: General purpose + // Storage: 1 x 410 + { + Name: "m1.medium", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 3840, + VirtType: ¶virtual, + Cost: 117, + Deprecated: true, + }, + + // SKU: 72TGAF9QN2XH5C5V + // Instance family: General purpose + // Storage: 2 x 420 + { + Name: "m1.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 7680, + VirtType: ¶virtual, + Cost: 233, + Deprecated: true, + }, + + // SKU: 7DYQRTNH9TX2QQCF + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 13, + }, + + // SKU: 84JB45JJDJXM67K4 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 54, + }, + + // SKU: 8VWG8TTVN5G378AH + // Instance family: General purpose + // Storage: 4 x 420 + { + Name: "m1.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 15360, + VirtType: ¶virtual, + Cost: 467, + Deprecated: true, + }, + + // SKU: ADMZJH7G4TK3XW72 + // Instance family: General purpose + // Storage: 1 x 4 SSD + { + Name: "m3.medium", + Arches: amd64, + CpuCores: 1, + CpuPower: instances.CpuPower(350), + Mem: 3840, + VirtType: &hvm, + Cost: 95, + }, + + // SKU: AGV5N34XYJNRXKRG + // Instance family: Compute optimized + // Storage: 2 x 40 SSD + { + Name: "c3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1567), + Mem: 7680, + VirtType: &hvm, + Cost: 325, + }, + + // SKU: B6W433SSVKEY68BH + // Instance family: General purpose + // Storage: 2 x 80 SSD + { + Name: "m3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 30720, + VirtType: &hvm, + Cost: 761, + }, + + // SKU: CPGF97CV44XU5R37 + // Instance family: General purpose + // Storage: 1 x 32 SSD + { + Name: "m3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 7680, + VirtType: &hvm, + Cost: 190, + }, + + // SKU: CSJECZGEN7MJ4PNS + // Instance family: Memory optimized + // Storage: 2 x 840 + { + Name: "m2.4xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 70042, + VirtType: ¶virtual, + Cost: 1291, + Deprecated: true, + }, + + // SKU: DE4F8GSMG9ZHARG8 + // Instance family: Compute optimized + // Storage: 2 x 160 SSD + { + Name: "c3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6271), + Mem: 30720, + VirtType: &hvm, + Cost: 1300, + }, + + // SKU: EY7JV9JX6H66P24B + // Instance family: Memory optimized + // Storage: 1 x 420 + { + Name: "m2.xlarge", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 17511, + VirtType: ¶virtual, + Cost: 323, + Deprecated: true, + }, + + // SKU: FDUDDQXMYRBXXPU6 + // Instance family: General purpose + // Storage: 2 x 40 SSD + { + Name: "m3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 15360, + VirtType: &hvm, + Cost: 381, + }, + + // SKU: FSDH6G8FD9Z6EUM2 + // Instance family: Compute optimized + // Storage: 1 x 350 + { + Name: "c1.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 1741, + VirtType: ¶virtual, + Cost: 179, + Deprecated: true, + }, + + // SKU: H8BY38DSCNH87FAD + // Instance family: Compute optimized + // Storage: 2 x 16 SSD + { + Name: "c3.large", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(783), + Mem: 3840, + VirtType: &hvm, + Cost: 163, + }, + + // SKU: HKWECXA9X8UKDCGK + // Instance family: Compute optimized + // Storage: 4 x 420 + { + Name: "c1.xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 7168, + VirtType: ¶virtual, + Cost: 718, + Deprecated: true, + }, + + // SKU: HR6CM3GFVDT3BAMU + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 27, + }, + + // SKU: M6GCPQTQDNQK5XUW + // Instance family: General purpose + // Storage: 1 x 160 + { + Name: "m1.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 1741, + VirtType: ¶virtual, + Cost: 58, + Deprecated: true, + }, + + // SKU: MD2REDTVEQDNK4XJ + // Instance family: Micro instances + // Storage: EBS only + { + Name: "t1.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 628, + VirtType: ¶virtual, + Cost: 27, + }, + + // SKU: PDY52X9T9DZY9CT5 + // Instance family: Memory optimized + // Storage: 1 x 850 + { + Name: "m2.2xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 35021, + VirtType: ¶virtual, + Cost: 645, + Deprecated: true, + }, + + // SKU: SHPTTVUVD5P7R2FX + // Instance family: Compute optimized + // Storage: 2 x 320 SSD + { + Name: "c3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(12543), + Mem: 61440, + VirtType: &hvm, + Cost: 2600, + }, + + // SKU: SUWWGGR72MSFMMCK + // Instance family: Compute optimized + // Storage: 2 x 80 SSD + { + Name: "c3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3135), + Mem: 15360, + VirtType: &hvm, + Cost: 650, + }, + + // SKU: TRBTF7WUCDPWNYFM + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 108, + }, + + // SKU: W6ARQS59M94CBPW2 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 1300, + }, + + // SKU: W8DSYP8X87Q34DGY + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 650, + }, + + // SKU: WCYXWR44SF5RDQSK + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 2799, + }, + + // SKU: YW6RW65SRZ3Y2FP5 + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 5597, + }, + }, + + "us-east-1": { + + // SKU: 2GCTBU78G22TGEXZ + // Instance family: General purpose + // Storage: 1 x 160 + { + Name: "m1.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 1741, + VirtType: ¶virtual, + Cost: 44, + Deprecated: true, + }, + + // SKU: 39748UVFEUKY3MVQ + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 419, + }, + + // SKU: 3DX9M63484ZSZFJV + // Instance family: Compute optimized + // Storage: 4 x 840 + { + Name: "cc2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61952, + VirtType: &hvm, + Cost: 2000, + Deprecated: true, + }, + + // SKU: 3RUU5T58T7XAFAAF + // Instance family: Memory optimized + // Storage: 2 x 120 SSD + { + Name: "cr1.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(3200), + Mem: 249856, + VirtType: &hvm, + Cost: 3500, + Deprecated: true, + }, + + // SKU: 3UP33R2RXCADSPSX + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 65536, + VirtType: &hvm, + Cost: 958, + }, + + // SKU: 47GP959QAF69YPG5 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 16384, + VirtType: &hvm, + Cost: 239, + }, + + // SKU: 48VURD6MVAZ3M5JX + // Instance family: GPU instance + // Storage: 1 x 60 SSD + { + Name: "g2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2911), + Mem: 15360, + VirtType: &hvm, + Cost: 650, + }, + + // SKU: 4C7N4APU9GEUZ6H6 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 105, + }, + + // SKU: 4J62B76AXGGMHG57 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 209, + }, + + // SKU: 4TCUDNKW7PMPSUT2 + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 2660, + }, + + // SKU: 5KHB4S5E8M74C6ES + // Instance family: Storage optimized + // Storage: 1 x 800 SSD + { + Name: "i2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 853, + }, + + // SKU: 639ZEB9D49ASFB26 + // Instance family: Micro instances + // Storage: EBS only + { + Name: "t1.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 628, + VirtType: ¶virtual, + Cost: 20, + }, + + // SKU: 6TEX73KEE94WMEED + // Instance family: Compute optimized + // Storage: 4 x 420 + { + Name: "c1.xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 7168, + VirtType: ¶virtual, + Cost: 520, + Deprecated: true, + }, + + // SKU: 8QZCKNB62EDMDT63 + // Instance family: Memory optimized + // Storage: 2 x 1,920 + { + Name: "x1.32xlarge", + Arches: amd64, + CpuCores: 128, + CpuPower: instances.CpuPower(41216), + Mem: 1998848, + VirtType: &hvm, + Cost: 13338, + }, + + // SKU: 8VCNEHQMSCQS4P39 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(672), + Mem: 8192, + VirtType: &hvm, + Cost: 120, + }, + + // SKU: 9G23QA9CK3NU3BRY + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 1675, + }, + + // SKU: A67CJDV9B3YBP6N6 + // Instance family: GPU instance + // Storage: 2 x 120 SSD + { + Name: "g2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61440, + VirtType: &hvm, + Cost: 2600, + }, + + // SKU: A83EBS2T67UP72G2 + // Instance family: General purpose + // Storage: 2 x 40 SSD + { + Name: "m3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 15360, + VirtType: &hvm, + Cost: 266, + }, + + // SKU: AGHHWVT6KDRBWTWP + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 6, + }, + + // SKU: ARPJFM962U4P5HAT + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 838, + }, + + // SKU: ASDZTDFMC5425T7P + // Instance family: General purpose + // Storage: 1 x 4 SSD + { + Name: "m3.medium", + Arches: amd64, + CpuCores: 1, + CpuPower: instances.CpuPower(350), + Mem: 3840, + VirtType: &hvm, + Cost: 67, + }, + + // SKU: B4JUK3U7ZG63RGSF + // Instance family: Compute optimized + // Storage: 2 x 160 SSD + { + Name: "c3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6271), + Mem: 30720, + VirtType: &hvm, + Cost: 840, + }, + + // SKU: CRRB3H2DYHU6K9FV + // Instance family: Storage optimized + // Storage: 24 x 2000 + { + Name: "hs1.8xlarge", + Arches: amd64, + CpuCores: 17, + CpuPower: instances.CpuPower(4760), + Mem: 119808, + VirtType: &hvm, + Cost: 4600, + Deprecated: true, + }, + + // SKU: D5JBSPHEHDXDUWJR + // Instance family: Memory optimized + // Storage: 1 x 160 SSD + { + Name: "r3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 665, + }, + + // SKU: EYGMRBWWFGSQBSBZ + // Instance family: General purpose + // Storage: 4 x 420 + { + Name: "m1.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 15360, + VirtType: ¶virtual, + Cost: 350, + Deprecated: true, + }, + + // SKU: GEDBVWHPGWMPYFMC + // Instance family: Compute optimized + // Storage: 2 x 40 SSD + { + Name: "c3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1567), + Mem: 7680, + VirtType: &hvm, + Cost: 210, + }, + + // SKU: H48ZRU3X7FXGTGQM + // Instance family: Compute optimized + // Storage: 2 x 16 SSD + { + Name: "c3.large", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(783), + Mem: 3840, + VirtType: &hvm, + Cost: 105, + }, + + // SKU: H6T3SYB5G6QCVMZM + // Instance family: Compute optimized + // Storage: 2 x 320 SSD + { + Name: "c3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(12543), + Mem: 61440, + VirtType: &hvm, + Cost: 1680, + }, + + // SKU: HZC9FAP4F9Y8JW67 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 13, + }, + + // SKU: J4T9ZF4AJ2DXE7SA + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.10xlarge", + Arches: amd64, + CpuCores: 40, + CpuPower: instances.CpuPower(13440), + Mem: 163840, + VirtType: &hvm, + Cost: 2394, + }, + + // SKU: J5XXRJGFYZHJVQZJ + // Instance family: Memory optimized + // Storage: 1 x 80 SSD + { + Name: "r3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 333, + }, + + // SKU: J6U6GMEFVH686HBN + // Instance family: Memory optimized + // Storage: 1 x 420 + { + Name: "m2.xlarge", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 17511, + VirtType: ¶virtual, + Cost: 245, + Deprecated: true, + }, + + // SKU: KG9YWSKMK27V6W6V + // Instance family: General purpose + // Storage: 2 x 420 + { + Name: "m1.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 7680, + VirtType: ¶virtual, + Cost: 175, + Deprecated: true, + }, + + // SKU: MU4QGTJYWR6T73MZ + // Instance family: Storage optimized + // Storage: 2 x 800 SSD + { + Name: "i2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 1705, + }, + + // SKU: NARXYND9H74FTC7A + // Instance family: Storage optimized + // Storage: 8 x 800 SSD + { + Name: "i2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 6820, + }, + + // SKU: NF67K4WANEWZZV22 + // Instance family: GPU instance + // Storage: 2 x 840 + { + Name: "cg1.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(1600), + Mem: 23040, + VirtType: &hvm, + Cost: 2100, + }, + + // SKU: P63NKZQXED5H7HUK + // Instance family: Storage optimized + // Storage: 6 x 2000 HDD + { + Name: "d2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 62464, + VirtType: &hvm, + Cost: 1380, + }, + + // SKU: QCQ27AYFPSSTJG55 + // Instance family: Memory optimized + // Storage: 1 x 850 + { + Name: "m2.2xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 35021, + VirtType: ¶virtual, + Cost: 490, + Deprecated: true, + }, + + // SKU: QG5G45WKDWDDHTFV + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 104, + }, + + // SKU: QSNKQ8P78YXPTAH8 + // Instance family: General purpose + // Storage: 2 x 80 SSD + { + Name: "m3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 30720, + VirtType: &hvm, + Cost: 532, + }, + + // SKU: QY3YSEST3C6FQNQH + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 52, + }, + + // SKU: RJZ63YZJGC58TPTS + // Instance family: Storage optimized + // Storage: 2 x 1024 SSD + { + Name: "hi1.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 61952, + VirtType: &hvm, + Cost: 3100, + Deprecated: true, + }, + + // SKU: RKCQDTMY5DZS4JWT + // Instance family: Memory optimized + // Storage: 2 x 840 + { + Name: "m2.4xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 70042, + VirtType: ¶virtual, + Cost: 980, + Deprecated: true, + }, + + // SKU: RSN2RZ8JSX98HFVM + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 1330, + }, + + // SKU: U3KDJRF6FGANNG5Z + // Instance family: Memory optimized + // Storage: 1 x 32 SSD + { + Name: "r3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 15616, + VirtType: &hvm, + Cost: 166, + }, + + // SKU: U7343ZA6ABZUXFZ9 + // Instance family: Storage optimized + // Storage: 3 x 2000 HDD + { + Name: "d2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 31232, + VirtType: &hvm, + Cost: 690, + }, + + // SKU: VA8Q43DVPX4YV6NG + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 26, + }, + + // SKU: VHC3YWSZ6ZFZPJN4 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 32768, + VirtType: &hvm, + Cost: 479, + }, + + // SKU: X4RWGEB2DKQGCWC2 + // Instance family: Compute optimized + // Storage: 1 x 350 + { + Name: "c1.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 1741, + VirtType: ¶virtual, + Cost: 130, + Deprecated: true, + }, + + // SKU: XP5P8NMSB2W7KP3U + // Instance family: Storage optimized + // Storage: 24 x 2000 HDD + { + Name: "d2.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(12096), + Mem: 249856, + VirtType: &hvm, + Cost: 5520, + }, + + // SKU: YGU2QZY8VPP94FSR + // Instance family: General purpose + // Storage: 1 x 32 SSD + { + Name: "m3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 7680, + VirtType: &hvm, + Cost: 133, + }, + + // SKU: YUXKRQ5SQSHVKD58 + // Instance family: General purpose + // Storage: 1 x 410 + { + Name: "m1.medium", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 3840, + VirtType: ¶virtual, + Cost: 87, + Deprecated: true, + }, + + // SKU: ZA47RH8PF27SDZKP + // Instance family: Compute optimized + // Storage: 2 x 80 SSD + { + Name: "c3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3135), + Mem: 15360, + VirtType: &hvm, + Cost: 420, + }, + + // SKU: ZESHW7CZVERW2BN2 + // Instance family: Storage optimized + // Storage: 4 x 800 SSD + { + Name: "i2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 3410, + }, + + // SKU: ZJC9VZJF5NZNYSVK + // Instance family: Storage optimized + // Storage: 12 x 2000 HDD + { + Name: "d2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 124928, + VirtType: &hvm, + Cost: 2760, + }, + }, + + "us-gov-west-1": { + + // SKU: 28MYFN2XX2772KFF + // Instance family: Memory optimized + // Storage: 1 x 420 + { + Name: "m2.xlarge", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 17511, + VirtType: ¶virtual, + Cost: 293, + Deprecated: true, + }, + + // SKU: 6CVNTPV3HMNBWRCF + // Instance family: General purpose + // Storage: 1 x 4 SSD + { + Name: "m3.medium", + Arches: amd64, + CpuCores: 1, + CpuPower: instances.CpuPower(350), + Mem: 3840, + VirtType: &hvm, + Cost: 84, + }, + + // SKU: 6DWB5HXXA6HTFHVP + // Instance family: General purpose + // Storage: 1 x 410 + { + Name: "m1.medium", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 3840, + VirtType: ¶virtual, + Cost: 106, + Deprecated: true, + }, + + // SKU: 6J7ZTVPXJKAX6EB3 + // Instance family: General purpose + // Storage: 2 x 80 SSD + { + Name: "m3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 30720, + VirtType: &hvm, + Cost: 672, + }, + + // SKU: 6PMSJ4V5N26J36BG + // Instance family: Memory optimized + // Storage: 1 x 80 SSD + { + Name: "r3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 399, + }, + + // SKU: 6R6V4F6BTSJKCC7Q + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 1008, + }, + + // SKU: 6VTUAGVX6J37U9GQ + // Instance family: Compute optimized + // Storage: 2 x 160 SSD + { + Name: "c3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6271), + Mem: 30720, + VirtType: &hvm, + Cost: 1008, + }, + + // SKU: 7BBW4T3J39FZV85H + // Instance family: General purpose + // Storage: 1 x 160 + { + Name: "m1.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 1741, + VirtType: ¶virtual, + Cost: 53, + Deprecated: true, + }, + + // SKU: 89EV5BSMPDHAKNGR + // Instance family: Compute optimized + // Storage: 2 x 320 SSD + { + Name: "c3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(12543), + Mem: 61440, + VirtType: &hvm, + Cost: 2016, + }, + + // SKU: 98882H5A8BVY29GC + // Instance family: General purpose + // Storage: 4 x 420 + { + Name: "m1.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 15360, + VirtType: ¶virtual, + Cost: 423, + Deprecated: true, + }, + + // SKU: 9PTNMYF3BTTMXQXW + // Instance family: Storage optimized + // Storage: 1 x 800 SSD + { + Name: "i2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 1022, + }, + + // SKU: BP3D9JS4K9UCBBZ3 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 124, + }, + + // SKU: BXAR9D46EJJXYWD9 + // Instance family: Compute optimized + // Storage: 2 x 80 SSD + { + Name: "c3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3135), + Mem: 15360, + VirtType: &hvm, + Cost: 504, + }, + + // SKU: CMWE8B43GS86ZUFX + // Instance family: Compute optimized + // Storage: 4 x 840 + { + Name: "cc2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61952, + VirtType: &hvm, + Cost: 2250, + Deprecated: true, + }, + + // SKU: CQDRUMDTUB5DGT63 + // Instance family: Memory optimized + // Storage: 1 x 850 + { + Name: "m2.2xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 35021, + VirtType: ¶virtual, + Cost: 586, + Deprecated: true, + }, + + // SKU: CXM9YR66XHMFFJDD + // Instance family: General purpose + // Storage: 1 x 32 SSD + { + Name: "m3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 7680, + VirtType: &hvm, + Cost: 168, + }, + + // SKU: EFPYDPDNRQTJBP3V + // Instance family: Memory optimized + // Storage: 2 x 1,920 + { + Name: "x1.32xlarge", + Arches: amd64, + CpuCores: 128, + CpuPower: instances.CpuPower(41216), + Mem: 1998848, + VirtType: &hvm, + Cost: 16006, + }, + + // SKU: EG7K36A6WPQ4YM89 + // Instance family: Storage optimized + // Storage: 2 x 800 SSD + { + Name: "i2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 2045, + }, + + // SKU: EXPVWKGGASGW4CEJ + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 504, + }, + + // SKU: FUTAFX3ARUD9RJJQ + // Instance family: General purpose + // Storage: 2 x 420 + { + Name: "m1.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 7680, + VirtType: ¶virtual, + Cost: 211, + Deprecated: true, + }, + + // SKU: FZTFNX6E6VTCH3BH + // Instance family: Memory optimized + // Storage: 2 x 840 + { + Name: "m2.4xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 70042, + VirtType: ¶virtual, + Cost: 1171, + Deprecated: true, + }, + + // SKU: GWPUCHQBWTA9GWE7 + // Instance family: Micro instances + // Storage: EBS only + { + Name: "t1.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 628, + VirtType: ¶virtual, + Cost: 24, + }, + + // SKU: K5CWXN5HSW7SME2R + // Instance family: Memory optimized + // Storage: 1 x 160 SSD + { + Name: "r3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 798, + }, + + // SKU: KNCYTRUPV9RQUJ4J + // Instance family: Storage optimized + // Storage: 4 x 800 SSD + { + Name: "i2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 4091, + }, + + // SKU: N5HCW4AX6C3QWS7P + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 15, + }, + + // SKU: P76AN6DXWYCD69GD + // Instance family: Memory optimized + // Storage: 1 x 32 SSD + { + Name: "r3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 15616, + VirtType: &hvm, + Cost: 200, + }, + + // SKU: Q864N6CVS6UKQ3WZ + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 252, + }, + + // SKU: QFVQE44FY9YSYTTR + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 2016, + }, + + // SKU: R75SXSFUFBHZPRSS + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 7, + }, + + // SKU: RG9GWJUK8NQBF57M + // Instance family: Compute optimized + // Storage: 1 x 350 + { + Name: "c1.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 1741, + VirtType: ¶virtual, + Cost: 157, + Deprecated: true, + }, + + // SKU: RQ2E87MS29TV3CDY + // Instance family: Compute optimized + // Storage: 2 x 40 SSD + { + Name: "c3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1567), + Mem: 7680, + VirtType: &hvm, + Cost: 252, + }, + + // SKU: RY97T2T2385G4KXW + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 1596, + }, + + // SKU: SPN86ZCGXTJQR3TZ + // Instance family: Storage optimized + // Storage: 24 x 2000 + { + Name: "hs1.8xlarge", + Arches: amd64, + CpuCores: 17, + CpuPower: instances.CpuPower(4760), + Mem: 119808, + VirtType: &hvm, + Cost: 5520, + Deprecated: true, + }, + + // SKU: UF55C3729FAEBSYW + // Instance family: Storage optimized + // Storage: 8 x 800 SSD + { + Name: "i2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 8183, + }, + + // SKU: VR6H7WYKMFWVPAUK + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 31, + }, + + // SKU: VXKKRPEQERAMFSFJ + // Instance family: General purpose + // Storage: 2 x 40 SSD + { + Name: "m3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 15360, + VirtType: &hvm, + Cost: 336, + }, + + // SKU: W6ABH88PE5BKSJZQ + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 3192, + }, + + // SKU: XBS8PBKJMH9G6SDB + // Instance family: Compute optimized + // Storage: 4 x 420 + { + Name: "c1.xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 7168, + VirtType: ¶virtual, + Cost: 628, + Deprecated: true, + }, + + // SKU: XYAF9UA2YWC7DNZT + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 126, + }, + + // SKU: YYGVB3MJTV4Q348E + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 62, + }, + + // SKU: ZZTR42B59D85VUCY + // Instance family: Compute optimized + // Storage: 2 x 16 SSD + { + Name: "c3.large", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(783), + Mem: 3840, + VirtType: &hvm, + Cost: 126, + }, + }, + + "us-west-1": { + + // SKU: 2EBX6PMG5FBY92KC + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 16384, + VirtType: &hvm, + Cost: 279, + }, + + // SKU: 2M44JQQN3ZP9874A + // Instance family: GPU instance + // Storage: 2 x 120 SSD + { + Name: "g2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61440, + VirtType: &hvm, + Cost: 2808, + }, + + // SKU: 3CUFXRHG38QDZNHT + // Instance family: Storage optimized + // Storage: 8 x 800 SSD + { + Name: "i2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 7502, + }, + + // SKU: 3MU3SGMJKBFABKE9 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(672), + Mem: 8192, + VirtType: &hvm, + Cost: 140, + }, + + // SKU: 4GAV6VD5FWD8W8B4 + // Instance family: Compute optimized + // Storage: 2 x 320 SSD + { + Name: "c3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(12543), + Mem: 61440, + VirtType: &hvm, + Cost: 1912, + }, + + // SKU: 5JEH726H8KDYDWPP + // Instance family: Micro instances + // Storage: EBS only + { + Name: "t1.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 628, + VirtType: ¶virtual, + Cost: 25, + }, + + // SKU: 5JQZHK4R7B7U6R3D + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 32768, + VirtType: &hvm, + Cost: 559, + }, + + // SKU: 6H5EA7PH345UPBVC + // Instance family: Memory optimized + // Storage: 2 x 840 + { + Name: "m2.4xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 70042, + VirtType: ¶virtual, + Cost: 1100, + Deprecated: true, + }, + + // SKU: 7AJJ9ANNCNNX5WY6 + // Instance family: General purpose + // Storage: 1 x 410 + { + Name: "m1.medium", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 3840, + VirtType: ¶virtual, + Cost: 95, + Deprecated: true, + }, + + // SKU: 7QXMEKWFBRKXCR5T + // Instance family: Compute optimized + // Storage: 2 x 80 SSD + { + Name: "c3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3135), + Mem: 15360, + VirtType: &hvm, + Cost: 478, + }, + + // SKU: 7T9BSUQBURKAGP2T + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 68, + }, + + // SKU: 87ZU79BG86PYWTSG + // Instance family: GPU instance + // Storage: 1 x 60 SSD + { + Name: "g2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2911), + Mem: 15360, + VirtType: &hvm, + Cost: 702, + }, + + // SKU: 8RXDZ4H62GHK7Y7N + // Instance family: General purpose + // Storage: 1 x 32 SSD + { + Name: "m3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 7680, + VirtType: &hvm, + Cost: 154, + }, + + // SKU: 8XWJMTS7HY3XFPBD + // Instance family: Storage optimized + // Storage: 4 x 800 SSD + { + Name: "i2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 3751, + }, + + // SKU: A78J2XTXBB29NGT4 + // Instance family: Memory optimized + // Storage: 2 x 1,920 + { + Name: "x1.32xlarge", + Arches: amd64, + CpuCores: 128, + CpuPower: instances.CpuPower(41216), + Mem: 1998848, + VirtType: &hvm, + Cost: 17340, + }, + + // SKU: CTG879VYY65QE94C + // Instance family: General purpose + // Storage: 2 x 80 SSD + { + Name: "m3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 30720, + VirtType: &hvm, + Cost: 616, + }, + + // SKU: CTW2PCJK622MCHPV + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 2964, + }, + + // SKU: D7EXY7CNAHW9BTHD + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 136, + }, + + // SKU: EVYB78ZE853DF3CC + // Instance family: General purpose + // Storage: 2 x 40 SSD + { + Name: "m3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 15360, + VirtType: &hvm, + Cost: 308, + }, + + // SKU: F4RA9QG9BAGEHG29 + // Instance family: Storage optimized + // Storage: 24 x 2000 HDD + { + Name: "d2.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(12096), + Mem: 249856, + VirtType: &hvm, + Cost: 6250, + }, + + // SKU: F5KDHTDT282JC5R3 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 65536, + VirtType: &hvm, + Cost: 1117, + }, + + // SKU: FA7B379WCHNBVMNU + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 1049, + }, + + // SKU: G3EQX8J7UNTWEVC7 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 524, + }, + + // SKU: GRGVZYA9QN53ASFB + // Instance family: General purpose + // Storage: 2 x 420 + { + Name: "m1.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 7680, + VirtType: ¶virtual, + Cost: 190, + Deprecated: true, + }, + + // SKU: GRKGK4BN2EGBK686 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 8, + }, + + // SKU: GSN36ZXJH466ES5F + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 2098, + }, + + // SKU: H8QFMXWT89NGP6VU + // Instance family: Compute optimized + // Storage: 2 x 16 SSD + { + Name: "c3.large", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(783), + Mem: 3840, + VirtType: &hvm, + Cost: 120, + }, + + // SKU: J5UNF2XTPQCS5N59 + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 1482, + }, + + // SKU: J6T772QKJ5B49GMC + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 17, + }, + + // SKU: JHV4BKWFVMXQ2T6R + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 34, + }, + + // SKU: JJRB8PAXGN6JTB3D + // Instance family: General purpose + // Storage: 4 x 420 + { + Name: "m1.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 15360, + VirtType: ¶virtual, + Cost: 379, + Deprecated: true, + }, + + // SKU: JUT2GARXC5CE93CM + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 262, + }, + + // SKU: P5VFWENV9YDAQVFH + // Instance family: General purpose + // Storage: 1 x 160 + { + Name: "m1.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 1741, + VirtType: ¶virtual, + Cost: 47, + Deprecated: true, + }, + + // SKU: PSNEQGH9XVX3FSE8 + // Instance family: Compute optimized + // Storage: 2 x 160 SSD + { + Name: "c3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6271), + Mem: 30720, + VirtType: &hvm, + Cost: 956, + }, + + // SKU: PYE3YUCCZCUBD7Z6 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 131, + }, + + // SKU: RMARGSE4CA952UDV + // Instance family: Memory optimized + // Storage: 1 x 80 SSD + { + Name: "r3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 371, + }, + + // SKU: RT9NWVFZWQDDRNES + // Instance family: General purpose + // Storage: 1 x 4 SSD + { + Name: "m3.medium", + Arches: amd64, + CpuCores: 1, + CpuPower: instances.CpuPower(350), + Mem: 3840, + VirtType: &hvm, + Cost: 77, + }, + + // SKU: S6XCDNADM5DDPNUP + // Instance family: Compute optimized + // Storage: 2 x 40 SSD + { + Name: "c3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1567), + Mem: 7680, + VirtType: &hvm, + Cost: 239, + }, + + // SKU: S7T2R43H93585V7D + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.10xlarge", + Arches: amd64, + CpuCores: 40, + CpuPower: instances.CpuPower(13440), + Mem: 163840, + VirtType: &hvm, + Cost: 2793, + }, + + // SKU: TMYBBH8MNS5KCXDG + // Instance family: Storage optimized + // Storage: 2 x 800 SSD + { + Name: "i2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 1876, + }, + + // SKU: V3KZ88KMZGE7XS8G + // Instance family: Storage optimized + // Storage: 3 x 2000 HDD + { + Name: "d2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 31232, + VirtType: &hvm, + Cost: 781, + }, + + // SKU: VZ7V29X35F98VENC + // Instance family: Memory optimized + // Storage: 1 x 160 SSD + { + Name: "r3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 741, + }, + + // SKU: VZA22HBW4H82PHGF + // Instance family: Memory optimized + // Storage: 1 x 850 + { + Name: "m2.2xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 35021, + VirtType: ¶virtual, + Cost: 550, + Deprecated: true, + }, + + // SKU: WNGPF3ZVZEAVC7FH + // Instance family: Storage optimized + // Storage: 12 x 2000 HDD + { + Name: "d2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 124928, + VirtType: &hvm, + Cost: 3125, + }, + + // SKU: X7EQJS6PVD6RDBPD + // Instance family: Storage optimized + // Storage: 1 x 800 SSD + { + Name: "i2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 938, + }, + + // SKU: XFDUQKGKAPWHG6P5 + // Instance family: Compute optimized + // Storage: 1 x 350 + { + Name: "c1.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 1741, + VirtType: ¶virtual, + Cost: 148, + Deprecated: true, + }, + + // SKU: XKAXY7525KWTBXQJ + // Instance family: Memory optimized + // Storage: 1 x 420 + { + Name: "m2.xlarge", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 17511, + VirtType: ¶virtual, + Cost: 275, + Deprecated: true, + }, + + // SKU: YY26V92H8QNEPQER + // Instance family: Memory optimized + // Storage: 1 x 32 SSD + { + Name: "r3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 15616, + VirtType: &hvm, + Cost: 185, + }, + + // SKU: ZXHBSSRM8QPYG3GC + // Instance family: Storage optimized + // Storage: 6 x 2000 HDD + { + Name: "d2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 62464, + VirtType: &hvm, + Cost: 1563, + }, + + // SKU: ZXWJ6NZFPEP89DVZ + // Instance family: Compute optimized + // Storage: 4 x 420 + { + Name: "c1.xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 7168, + VirtType: ¶virtual, + Cost: 592, + Deprecated: true, + }, + }, + + "us-west-2": { + + // SKU: 2ES9C4RF3WGQZAQN + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(40), + Mem: 4096, + VirtType: &hvm, + Cost: 52, + }, + + // SKU: 2J3G8CUM4UVYVFJH + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.nano", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(5), + Mem: 512, + VirtType: &hvm, + Cost: 6, + }, + + // SKU: 2JUMD5V8V9V6D9JC + // Instance family: General purpose + // Storage: 1 x 410 + { + Name: "m1.medium", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 3840, + VirtType: ¶virtual, + Cost: 87, + Deprecated: true, + }, + + // SKU: 34G9YZGUJNTY6HG9 + // Instance family: Memory optimized + // Storage: 2 x 1,920 + { + Name: "x1.32xlarge", + Arches: amd64, + CpuCores: 128, + CpuPower: instances.CpuPower(41216), + Mem: 1998848, + VirtType: &hvm, + Cost: 13338, + }, + + // SKU: 3FTJBHZMWT7D76MD + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6495), + Mem: 30720, + VirtType: &hvm, + Cost: 838, + }, + + // SKU: 4SCSPCTHFCXYY6GT + // Instance family: General purpose + // Storage: 4 x 420 + { + Name: "m1.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 15360, + VirtType: ¶virtual, + Cost: 350, + Deprecated: true, + }, + + // SKU: 584JD8RT9GR57BFS + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1623), + Mem: 7680, + VirtType: &hvm, + Cost: 209, + }, + + // SKU: 5EPUM8UK2RTQKW5E + // Instance family: Compute optimized + // Storage: 4 x 420 + { + Name: "c1.xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 7168, + VirtType: ¶virtual, + Cost: 520, + Deprecated: true, + }, + + // SKU: 5WF4ZHD94FEY4YDX + // Instance family: Memory optimized + // Storage: 1 x 850 + { + Name: "m2.2xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(400), + Mem: 35021, + VirtType: ¶virtual, + Cost: 490, + Deprecated: true, + }, + + // SKU: 672XUMHHEC7SYFT7 + // Instance family: Compute optimized + // Storage: 2 x 16 SSD + { + Name: "c3.large", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(783), + Mem: 3840, + VirtType: &hvm, + Cost: 105, + }, + + // SKU: 6P434MFC33XNF65Z + // Instance family: Compute optimized + // Storage: 4 x 840 + { + Name: "cc2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61952, + VirtType: &hvm, + Cost: 2000, + Deprecated: true, + }, + + // SKU: 7528XD9PPHXN6NN2 + // Instance family: GPU instance + // Storage: 2 x 120 SSD + { + Name: "g2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11647), + Mem: 61440, + VirtType: &hvm, + Cost: 2600, + }, + + // SKU: 7RMRB492WTPDQ5Z4 + // Instance family: Memory optimized + // Storage: 1 x 32 SSD + { + Name: "r3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 15616, + VirtType: &hvm, + Cost: 166, + }, + + // SKU: 9GHZN7VCNV2MGV4N + // Instance family: Compute optimized + // Storage: 2 x 160 SSD + { + Name: "c3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(6271), + Mem: 30720, + VirtType: &hvm, + Cost: 840, + }, + + // SKU: 9HMZJQ3SKEW4P7ST + // Instance family: Storage optimized + // Storage: 4 x 800 SSD + { + Name: "i2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 3410, + }, + + // SKU: 9NSBRG2FE96XPHXK + // Instance family: Compute optimized + // Storage: 2 x 40 SSD + { + Name: "c3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1567), + Mem: 7680, + VirtType: &hvm, + Cost: 210, + }, + + // SKU: 9RYWCN75CJX2C238 + // Instance family: Storage optimized + // Storage: 6 x 2000 HDD + { + Name: "d2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 62464, + VirtType: &hvm, + Cost: 1380, + }, + + // SKU: AKWXS6FJQE43VAZ9 + // Instance family: Storage optimized + // Storage: 8 x 800 SSD + { + Name: "i2.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 6820, + }, + + // SKU: B2M25Y2U9824Q5TG + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(672), + Mem: 8192, + VirtType: &hvm, + Cost: 120, + }, + + // SKU: BMEYUTP658QKQRTP + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 65536, + VirtType: &hvm, + Cost: 958, + }, + + // SKU: BNBBBYA6WNXQ3TZV + // Instance family: Storage optimized + // Storage: 12 x 2000 HDD + { + Name: "d2.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 124928, + VirtType: &hvm, + Cost: 2760, + }, + + // SKU: CP2TNWZCKSRY486E + // Instance family: General purpose + // Storage: 2 x 80 SSD + { + Name: "m3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 30720, + VirtType: &hvm, + Cost: 532, + }, + + // SKU: CVFJSWADA39YVNW2 + // Instance family: General purpose + // Storage: 2 x 420 + { + Name: "m1.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 7680, + VirtType: ¶virtual, + Cost: 175, + Deprecated: true, + }, + + // SKU: D8RPR5AJPDXSC9DF + // Instance family: General purpose + // Storage: 1 x 160 + { + Name: "m1.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(100), + Mem: 1741, + VirtType: ¶virtual, + Cost: 44, + Deprecated: true, + }, + + // SKU: DTDPGWV4T5RAVP44 + // Instance family: Storage optimized + // Storage: 24 x 2000 + { + Name: "hs1.8xlarge", + Arches: amd64, + CpuCores: 17, + CpuPower: instances.CpuPower(4760), + Mem: 119808, + VirtType: &hvm, + Cost: 4600, + Deprecated: true, + }, + + // SKU: DW2RY9FQP8VE6V74 + // Instance family: Storage optimized + // Storage: 24 x 2000 HDD + { + Name: "d2.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(12096), + Mem: 249856, + VirtType: &hvm, + Cost: 5520, + }, + + // SKU: E7T5V224CMC9A43F + // Instance family: General purpose + // Storage: 1 x 32 SSD + { + Name: "m3.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(700), + Mem: 7680, + VirtType: &hvm, + Cost: 133, + }, + + // SKU: F6544RN8RCJHYC5Z + // Instance family: Compute optimized + // Storage: 2 x 80 SSD + { + Name: "c3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3135), + Mem: 15360, + VirtType: &hvm, + Cost: 420, + }, + + // SKU: GMTWE5CTY4FEUYDN + // Instance family: Memory optimized + // Storage: 1 x 160 SSD + { + Name: "r3.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 665, + }, + + // SKU: J9H28ZVG9UDW7CX4 + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2688), + Mem: 32768, + VirtType: &hvm, + Cost: 479, + }, + + // SKU: JH68FQ55JWMC4CG9 + // Instance family: General purpose + // Storage: 1 x 4 SSD + { + Name: "m3.medium", + Arches: amd64, + CpuCores: 1, + CpuPower: instances.CpuPower(350), + Mem: 3840, + VirtType: &hvm, + Cost: 67, + }, + + // SKU: JNZ6ESS4AS6RFUAF + // Instance family: Memory optimized + // Storage: 1 x 320 SSD + { + Name: "r3.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5600), + Mem: 124928, + VirtType: &hvm, + Cost: 1330, + }, + + // SKU: K24TXC5VMFQZ53MC + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(811), + Mem: 3840, + VirtType: &hvm, + Cost: 105, + }, + + // SKU: K5MSZ8JUCECB23H9 + // Instance family: Storage optimized + // Storage: 2 x 1024 SSD + { + Name: "hi1.4xlarge", + Arches: amd64, + CpuCores: 16, + CpuPower: instances.CpuPower(5376), + Mem: 61952, + VirtType: &hvm, + Cost: 3100, + Deprecated: true, + }, + + // SKU: KCTVWQQPE9VFXHGP + // Instance family: Memory optimized + // Storage: 1 x 420 + { + Name: "m2.xlarge", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 17511, + VirtType: ¶virtual, + Cost: 245, + Deprecated: true, + }, + + // SKU: MBANS55WTSZ5HYS8 + // Instance family: Memory optimized + // Storage: 1 x 80 SSD + { + Name: "r3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 333, + }, + + // SKU: MNG2Y3YRJK7GPKQR + // Instance family: Compute optimized + // Storage: 2 x 320 SSD + { + Name: "c3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(12543), + Mem: 61440, + VirtType: &hvm, + Cost: 1680, + }, + + // SKU: MWG952JV6DF8YBYE + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.10xlarge", + Arches: amd64, + CpuCores: 40, + CpuPower: instances.CpuPower(13440), + Mem: 163840, + VirtType: &hvm, + Cost: 2394, + }, + + // SKU: N4D3MGNKSH7Q9KT3 + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(10), + Mem: 1024, + VirtType: &hvm, + Cost: 13, + }, + + // SKU: N5F93UFYUKWKB8KE + // Instance family: Storage optimized + // Storage: 1 x 800 SSD + { + Name: "i2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 31232, + VirtType: &hvm, + Cost: 853, + }, + + // SKU: NA6BZ2FSPKCZGWTA + // Instance family: Memory optimized + // Storage: 2 x 320 SSD + { + Name: "r3.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(11200), + Mem: 249856, + VirtType: &hvm, + Cost: 2660, + }, + + // SKU: P9ZPWZF7CCR7MS77 + // Instance family: Compute optimized + // Storage: 1 x 350 + { + Name: "c1.medium", + Arches: both, + CpuCores: 2, + CpuPower: instances.CpuPower(200), + Mem: 1741, + VirtType: ¶virtual, + Cost: 130, + Deprecated: true, + }, + + // SKU: PWGQ6MKD7A6EHVXN + // Instance family: Memory optimized + // Storage: 2 x 120 SSD + { + Name: "cr1.8xlarge", + Arches: amd64, + CpuCores: 32, + CpuPower: instances.CpuPower(3200), + Mem: 249856, + VirtType: &hvm, + Cost: 3500, + Deprecated: true, + }, + + // SKU: QBRGX479S7RZ4QEA + // Instance family: GPU instance + // Storage: 1 x 60 SSD + { + Name: "g2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2911), + Mem: 15360, + VirtType: &hvm, + Cost: 650, + }, + + // SKU: QBYJCF2RGQTF5H5D + // Instance family: Memory optimized + // Storage: 2 x 840 + { + Name: "m2.4xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(800), + Mem: 70042, + VirtType: ¶virtual, + Cost: 980, + Deprecated: true, + }, + + // SKU: UNB4R4KS4XXHQFD2 + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.8xlarge", + Arches: amd64, + CpuCores: 36, + CpuPower: instances.CpuPower(14615), + Mem: 61440, + VirtType: &hvm, + Cost: 1675, + }, + + // SKU: VBKTV5SAFT4WNV9X + // Instance family: General purpose + // Storage: 2 x 40 SSD + { + Name: "m3.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1400), + Mem: 15360, + VirtType: &hvm, + Cost: 266, + }, + + // SKU: WE87HQHP89BK3AXK + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.large", + Arches: amd64, + CpuCores: 2, + CpuPower: instances.CpuPower(60), + Mem: 8192, + VirtType: &hvm, + Cost: 104, + }, + + // SKU: X5NPE8XF7KHV7AAD + // Instance family: General purpose + // Storage: EBS only + { + Name: "m4.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 16384, + VirtType: &hvm, + Cost: 239, + }, + + // SKU: XK9YF9AJ9EBH7W4U + // Instance family: General purpose + // Storage: EBS only + { + Name: "t2.small", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 2048, + VirtType: &hvm, + Cost: 26, + }, + + // SKU: XUTTHNZ5B5VJKKDE + // Instance family: Micro instances + // Storage: EBS only + { + Name: "t1.micro", + Arches: both, + CpuCores: 1, + CpuPower: instances.CpuPower(20), + Mem: 628, + VirtType: ¶virtual, + Cost: 20, + }, + + // SKU: YBN8Q7AQJD9ZT57S + // Instance family: Compute optimized + // Storage: EBS only + { + Name: "c4.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(3247), + Mem: 15360, + VirtType: &hvm, + Cost: 419, + }, + + // SKU: YMWQW8W92QHE628D + // Instance family: Storage optimized + // Storage: 3 x 2000 HDD + { + Name: "d2.xlarge", + Arches: amd64, + CpuCores: 4, + CpuPower: instances.CpuPower(1344), + Mem: 31232, + VirtType: &hvm, + Cost: 690, + }, + + // SKU: ZKYE77DHMC32Y9BK + // Instance family: Storage optimized + // Storage: 2 x 800 SSD + { + Name: "i2.2xlarge", + Arches: amd64, + CpuCores: 8, + CpuPower: instances.CpuPower(2800), + Mem: 62464, + VirtType: &hvm, + Cost: 1705, + }, + }, +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/instancetypes.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/instancetypes.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/instancetypes.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/instancetypes.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,60 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ec2instancetypes + +//go:generate go run process_cost_data.go -o generated.go index.json + +import ( + "strings" + + "github.com/juju/juju/environs/instances" +) + +// RegionInstanceTypes returns the instance types for the named region. +func RegionInstanceTypes(region string) []instances.InstanceType { + // NOTE(axw) at the time of writing, there is no cost + // information for China (Beijing). For any regions + // that we don't know about, we substitute us-east-1 + // and hope that they're equivalent. + instanceTypes, ok := allInstanceTypes[region] + if !ok { + instanceTypes = allInstanceTypes["us-east-1"] + } + return instanceTypes +} + +// SupportsClassic reports whether the instance type with the given +// name can be run in EC2-Classic. +// +// At the time of writing, we know that the following instance type +// families support only VPC: C4, M4, P2, T2, X1. However, rather +// than hard-coding that list, we assume that any new instance type +// families support VPC only, and so we hard-code the inverse of the +// list at the time of writing. +// +// See: +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types +func SupportsClassic(instanceType string) bool { + parts := strings.SplitN(instanceType, ".", 2) + if len(parts) < 2 { + return false + } + switch strings.ToLower(parts[0]) { + case + "c1", "c3", + "cc2", + "cg1", + "cr1", + "d2", + "g2", + "hi1", + "hs1", + "i2", + "m1", "m2", "m3", + "r3", + "t1": + return true + } + return false +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/instancetypes_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/instancetypes_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/instancetypes_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/instancetypes_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,108 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ec2instancetypes_test + +import ( + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/set" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/provider/ec2/internal/ec2instancetypes" +) + +type InstanceTypesSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&InstanceTypesSuite{}) + +func (s *InstanceTypesSuite) TestRegionInstanceTypes(c *gc.C) { + // This is the set of instance type names we had hard coded previously. + knownInstanceTypes := set.NewStrings( + "m1.small", "m1.medium", "m1.large", "m1.xlarge", + "m4.large", "m4.xlarge", "m4.2xlarge", "m4.4xlarge", "m4.10xlarge", + "m3.medium", "m3.large", "m3.xlarge", "m3.2xlarge", + "c1.medium", "c1.xlarge", "cc2.8xlarge", + "c3.large", "c3.xlarge", "c3.2xlarge", "c3.4xlarge", "c3.8xlarge", + "cg1.4xlarge", + "g2.2xlarge", + "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "cr1.8xlarge", + "r3.large", "r3.xlarge", "r3.2xlarge", "r3.4xlarge", "r3.8xlarge", + "hi1.4xlarge", + "i2.xlarge", "i2.2xlarge", "i2.8xlarge", "hs1.8xlarge", + "t1.micro", + "t2.micro", "t2.small", "t2.medium", + "c4.large", "c4.xlarge", "c4.2xlarge", "c4.4xlarge", "c4.8xlarge", + ) + seen := make(map[string]bool) + var unknownInstanceTypes []string + instanceTypes := ec2instancetypes.RegionInstanceTypes("us-east-1") + for _, instanceType := range instanceTypes { + c.Assert(instanceType.Cost, gc.Not(gc.Equals), 0) + c.Assert(seen[instanceType.Name], jc.IsFalse) // no duplicates + seen[instanceType.Name] = true + + if !knownInstanceTypes.Contains(instanceType.Name) { + unknownInstanceTypes = append(unknownInstanceTypes, instanceType.Name) + } else { + knownInstanceTypes.Remove(instanceType.Name) + } + } + c.Assert(knownInstanceTypes, gc.HasLen, 0) // all accounted for + if len(unknownInstanceTypes) > 0 { + c.Logf("unknown instance types: %s", unknownInstanceTypes) + } +} + +func (s *InstanceTypesSuite) TestRegionInstanceTypesAvailability(c *gc.C) { + // Some instance types are only available in some regions. + usWest1InstanceTypes := set.NewStrings() + usEast1InstanceTypes := set.NewStrings() + for _, instanceType := range ec2instancetypes.RegionInstanceTypes("us-west-1") { + usWest1InstanceTypes.Add(instanceType.Name) + } + for _, instanceType := range ec2instancetypes.RegionInstanceTypes("us-east-1") { + usEast1InstanceTypes.Add(instanceType.Name) + } + c.Assert( + usEast1InstanceTypes.Difference(usWest1InstanceTypes).SortedValues(), + jc.DeepEquals, + []string{"cc2.8xlarge", "cg1.4xlarge", "cr1.8xlarge", "hi1.4xlarge", "hs1.8xlarge"}, + ) +} + +func (s *InstanceTypesSuite) TestRegionInstanceTypesUnknownRegion(c *gc.C) { + instanceTypes := ec2instancetypes.RegionInstanceTypes("cn-north-1") + c.Assert(instanceTypes, jc.DeepEquals, ec2instancetypes.RegionInstanceTypes("us-east-1")) +} + +func (s *InstanceTypesSuite) TestSupportsClassic(c *gc.C) { + assertSupportsClassic := func(name string) { + c.Assert(ec2instancetypes.SupportsClassic(name), jc.IsTrue) + } + assertDoesNotSupportClassic := func(name string) { + c.Assert(ec2instancetypes.SupportsClassic(name), jc.IsFalse) + } + assertSupportsClassic("c1.medium") + assertSupportsClassic("c3.large") + assertSupportsClassic("cc2.8xlarge") + assertSupportsClassic("cg1.4xlarge") + assertSupportsClassic("cr1.8xlarge") + assertSupportsClassic("d2.8xlarge") + assertSupportsClassic("g2.2xlarge") + assertSupportsClassic("hi1.4xlarge") + assertSupportsClassic("hs1.8xlarge") + assertSupportsClassic("i2.2xlarge") + assertSupportsClassic("m1.medium") + assertSupportsClassic("m2.medium") + assertSupportsClassic("m3.medium") + assertSupportsClassic("r3.8xlarge") + assertSupportsClassic("t1.micro") + assertDoesNotSupportClassic("c4.large") + assertDoesNotSupportClassic("m4.large") + assertDoesNotSupportClassic("p2.xlarge") + assertDoesNotSupportClassic("t2.medium") + assertDoesNotSupportClassic("x1.32xlarge") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/package_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ec2instancetypes_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/process_cost_data.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/process_cost_data.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/process_cost_data.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/internal/ec2instancetypes/process_cost_data.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,402 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build ignore + +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "go/format" + "io" + "log" + "os" + "strconv" + "strings" + "text/template" + "time" + + "github.com/juju/errors" + "github.com/juju/utils" + "github.com/juju/utils/set" +) + +const ( + baseURL = `https://pricing.us-east-1.amazonaws.com` + ec2IndexPath = `/offers/v1.0/aws/AmazonEC2/current/index.json` +) + +var ( + nowish = time.Now() +) + +func Main() (int, error) { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "Usage: %s [-o outfile] [json-file]:\n", os.Args[0]) + } + + var outfilename string + flag.StringVar(&outfilename, "o", "-", "Name of a file to write the output to") + flag.Parse() + + var infilename string + var fin *os.File + switch flag.NArg() { + case 0: + infilename = "" + fin = os.Stdin + case 1: + var err error + infilename = flag.Arg(0) + fin, err = os.Open(infilename) + if err != nil { + return -1, err + } + defer fin.Close() + default: + fmt.Println(flag.Args()) + flag.Usage() + return 2, nil + } + + fout := os.Stdout + if outfilename != "-" { + var err error + fout, err = os.Create(outfilename) + if err != nil { + return -1, err + } + defer fout.Close() + } + + tmpl, err := template.New("instanceTypes").Parse(` +// Copyright {{.Year}} Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ec2instancetypes + +import ( + "github.com/juju/utils/arch" + + "github.com/juju/juju/environs/instances" +) + +var ( + paravirtual = "pv" + hvm = "hvm" + amd64 = []string{arch.AMD64} + both = []string{arch.AMD64, arch.I386} +) + +// Version: {{.Meta.Version}} +// Publication date: {{.Meta.PublicationDate}} +// +// {{.Meta.Disclaimer}} + +var allInstanceTypes = map[string][]instances.InstanceType{ +{{range $region, $instanceTypes := .InstanceTypes}} +{{printf "%q: {" $region}} +{{range $index, $instanceType := $instanceTypes}}{{with $instanceType}} + // SKU: {{.SKU}} + // Instance family: {{.InstanceFamily}} + // Storage: {{.Storage}} + { + Name: {{printf "%q" .Name}}, + Arches: {{.Arches}}, + CpuCores: {{.CpuCores}}, + CpuPower: instances.CpuPower({{.CpuPower}}), + Mem: {{.Mem}}, + VirtType: &{{.VirtType}}, + Cost: {{.Cost}}, + {{if .Deprecated}}Deprecated: true,{{end}} + }, +{{end}}{{end}} +}, +{{end}} +}`) + if err != nil { + panic(err) + } + + fmt.Fprintln(os.Stderr, "Processing", infilename) + instanceTypes, meta, err := process(fin) + if err != nil { + return -1, err + } + + templateData := struct { + Year int + InstanceTypes map[string][]instanceType + Meta metadata + }{ + nowish.Year(), + instanceTypes, + meta, + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, templateData); err != nil { + return -1, err + } + + formatted, err := format.Source(buf.Bytes()) + if err != nil { + return -1, err + } + if _, err := fout.Write(formatted); err != nil { + return -1, err + } + + return 0, nil +} + +func process(in io.Reader) (map[string][]instanceType, metadata, error) { + var index indexFile + if err := json.NewDecoder(in).Decode(&index); err != nil { + return nil, metadata{}, err + } + meta := metadata{ + Version: index.Version, + Disclaimer: index.Disclaimer, + PublicationDate: index.PublicationDate, + } + instanceTypes := make(map[string][]instanceType) + skus := set.NewStrings() + for sku := range index.Products { + skus.Add(sku) + } + for _, sku := range skus.SortedValues() { + productInfo := index.Products[sku] + if productInfo.ProductFamily != "Compute Instance" { + continue + } + if productInfo.OperatingSystem != "Linux" { + // We don't care about absolute cost, so we don't need + // to include the cost of OS. + continue + } + if productInfo.Tenancy != "Shared" { + continue + } + fmt.Fprintf(os.Stderr, "- Processing %q\n", sku) + + // Some instance types support both 32-bit and 64-bit, some + // only support 64-bit. + arches := "amd64" + if productInfo.ProcessorArchitecture == "32-bit or 64-bit" { + arches = "both" + } + + // NOTE(axw) it's not really either/or. Some instance types are + // capable of launching either HVM or PV images (e.g. T1, C3). + // HVM is preferred, though, so we err on that side. + virtType := "hvm" + if isParavirtualOnly(productInfo) { + virtType = "paravirtual" + } + + memMB, err := parseMem(productInfo.Memory) + if err != nil { + return nil, metadata{}, errors.Annotate(err, "parsing mem") + } + + cpuPower, err := calculateCPUPower(productInfo) + if err != nil { + return nil, metadata{}, errors.Annotate(err, "calculating CPU power") + } + + instanceType := instanceType{ + Name: productInfo.InstanceType, + Arches: arches, + CpuCores: productInfo.VCPU, + CpuPower: cpuPower, + Mem: memMB, + VirtType: virtType, + + // Extended information + SKU: sku, + InstanceFamily: productInfo.InstanceFamily, + Storage: productInfo.Storage, + } + if strings.ToLower(productInfo.CurrentGeneration) == "no" { + instanceType.Deprecated = true + } + + // Get cost information. We only support on-demand. + for skuOfferTermCode, skuTerms := range index.Terms.OnDemand[sku] { + if !skuTerms.EffectiveDate.Before(nowish) { + continue + } + fmt.Fprintf(os.Stderr, "-- Processing offer %q\n", skuOfferTermCode) + for skuOfferTermCodeRateCode, pricingDetails := range skuTerms.PriceDimensions { + fmt.Fprintf(os.Stderr, "--- Processing rate code %q\n", skuOfferTermCodeRateCode) + fmt.Fprintf(os.Stderr, " Description: %s\n", pricingDetails.Description) + fmt.Fprintf(os.Stderr, " Cost: $%f/%s\n", + pricingDetails.PricePerUnit.USD, pricingDetails.Unit, + ) + instanceType.Cost = uint64(pricingDetails.PricePerUnit.USD * 1000) + break + } + } + + region, ok := locationToRegion(productInfo.Location) + if !ok { + return nil, metadata{}, errors.Errorf("unknown location %q", productInfo.Location) + } + regionInstanceTypes := instanceTypes[region] + regionInstanceTypes = append(regionInstanceTypes, instanceType) + instanceTypes[region] = regionInstanceTypes + } + return instanceTypes, meta, nil +} + +func calculateCPUPower(info productInfo) (uint64, error) { + // T-class instances have burstable CPU. This is not captured + // in the pricing information, so we have to hard-code it. We + // will have to update this list when T3 instances come along. + switch info.InstanceType { + case "t1.micro": + return 20, nil + case "t2.nano": + return 5, nil + case "t2.micro": + return 10, nil + case "t2.small": + return 20, nil + case "t2.medium": + return 40, nil + case "t2.large": + return 60, nil + } + if info.ClockSpeed == "" { + return info.VCPU * 100, nil + } + + // If the information includes a clock speed, we use that + // to estimate the ECUs. The pricing information does not + // include the ECUs, but they're only estimates anyway. + // Amazon moved to "vCPUs" quite some time ago. + clock, err := strconv.ParseFloat(strings.Fields(info.ClockSpeed)[0], 64) + if err != nil { + return 0, errors.Annotate(err, "parsing clock speed") + } + return uint64(clock * 1.4 * 100 * float64(info.VCPU)), nil +} + +func parseMem(s string) (uint64, error) { + s = strings.Replace(s, " ", "", -1) + s = strings.Replace(s, ",", "", -1) // e.g. 1,952 -> 1952 + + // Sometimes it's GiB, sometimes Gib. We don't like Gib. + s = strings.Replace(s, "Gib", "GiB", 1) + return utils.ParseSize(s) +} + +func locationToRegion(loc string) (string, bool) { + regions := map[string]string{ + "US East (N. Virginia)": "us-east-1", + "US West (N. California)": "us-west-1", + "US West (Oregon)": "us-west-2", + "Asia Pacific (Mumbai)": "ap-south-1", + "Asia Pacific (Seoul)": "ap-northeast-2", + "Asia Pacific (Singapore)": "ap-southeast-1", + "Asia Pacific (Sydney)": "ap-southeast-2", + "Asia Pacific (Tokyo)": "ap-northeast-1", + "EU (Frankfurt)": "eu-central-1", + "EU (Ireland)": "eu-west-1", + "South America (Sao Paulo)": "sa-east-1", + "AWS GovCloud (US)": "us-gov-west-1", + + // NOTE(axw) at the time of writing, there is no + // pricing information for cn-north-1. + "China (Beijing)": "cn-north-1", + } + region, ok := regions[loc] + return region, ok +} + +func isParavirtualOnly(info productInfo) bool { + // Only very old instance types are restricted to paravirtual. + switch strings.SplitN(info.InstanceType, ".", 2)[0] { + case "t1", "m1", "c1", "m2": + return true + } + return false +} + +type metadata struct { + Version string + PublicationDate time.Time + Disclaimer string +} + +type indexFile struct { + Version string `json:"version"` + PublicationDate time.Time `json:"publicationDate"` + Disclaimer string `json:"disclaimer"` + Products map[string]productInfo `json:"products"` + Terms terms `json:"terms"` +} + +type productInfo struct { + ProductFamily string `json:"productFamily"` // Compute Instance + ProductAttributes `json:"attributes"` +} + +type ProductAttributes struct { + Location string `json:"location"` // e.g. US East (N. Virginia) + InstanceType string `json:"instanceType"` // e.g. t2.nano + CurrentGeneration string `json:"currentGeneration"` // Yes|No (or missing) + InstanceFamily string `json:"instanceFamily"` // e.g. Storage optimised + Storage string `json:"storage"` // e.g. 24 x 2000 + VCPU uint64 `json:"vcpu,string"` // e.g. 16 + ClockSpeed string `json:"clockSpeed"` // e.g. 2.5 GHz + Memory string `json:"memory"` // N.NN GiB + OperatingSystem string `json:"operatingSystem"` // Windows|RHEL|SUSE|Linux + Tenancy string `json:"tenancy"` // Dedicated|Host|Shared + ProcessorArchitecture string `json:"processorArchitecture"` // (32-bit or )?64-bit +} + +type terms struct { + OnDemand map[string]map[string]skuTerms `json:"OnDemand"` +} + +type skuTerms struct { + EffectiveDate time.Time `json:"effectiveDate"` + PriceDimensions map[string]pricingDetails `json:"priceDimensions"` +} + +type pricingDetails struct { + Description string `json:"description"` + Unit string `json:"unit"` + PricePerUnit struct { + USD float64 `json:"USD,string"` + } `json:"pricePerUnit"` +} + +type instanceType struct { + Name string + Arches string // amd64|both + CpuCores uint64 + Mem uint64 + Cost uint64 // paravirtual|hvm + VirtType string + CpuPower uint64 + Deprecated bool // i.e. not current generation + + // extended information, for comments + SKU string + InstanceFamily string + Storage string +} + +func main() { + rc, err := Main() + if err != nil { + log.Fatal(err) + } + os.Exit(rc) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/live_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/live_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/live_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/live_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -129,12 +129,11 @@ inst, hc := testing.AssertStartInstanceWithConstraints(c, t.Env, t.ControllerUUID, "30", cons) defer t.Env.StopInstances(inst.Id()) ec2inst := ec2.InstanceEC2(inst) - c.Assert(ec2inst.InstanceType, gc.Equals, "m3.large") + c.Assert(ec2inst.InstanceType, gc.Equals, "m4.large") c.Assert(*hc.Arch, gc.Equals, "amd64") - c.Assert(*hc.Mem, gc.Equals, uint64(7680)) - c.Assert(*hc.RootDisk, gc.Equals, uint64(8192)) + c.Assert(*hc.Mem, gc.Equals, uint64(8*1024)) + c.Assert(*hc.RootDisk, gc.Equals, uint64(8*1024)) c.Assert(*hc.CpuCores, gc.Equals, uint64(2)) - c.Assert(*hc.CpuPower, gc.Equals, uint64(650)) } func (t *LiveTests) TestControllerInstances(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/local_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/local_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/local_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/local_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,7 +21,6 @@ "gopkg.in/amz.v3/aws" amzec2 "gopkg.in/amz.v3/ec2" "gopkg.in/amz.v3/ec2/ec2test" - "gopkg.in/amz.v3/s3/s3test" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" goyaml "gopkg.in/yaml.v2" @@ -72,8 +71,7 @@ // EC2 server that runs within the test process itself. type localLiveSuite struct { LiveTests - srv localServer - restoreEC2Patching func() + srv localServer } func (t *localLiveSuite) SetUpSuite(c *gc.C) { @@ -85,25 +83,28 @@ "secret-key": "x", }, ) - t.CloudRegion = "test" // Upload arches that ec2 supports; add to this // as ec2 coverage expands. t.UploadArches = []string{arch.AMD64, arch.I386} t.TestConfig = localConfigAttrs - t.restoreEC2Patching = patchEC2ForTesting(c) imagetesting.PatchOfficialDataSources(&t.BaseSuite.CleanupSuite, "test:") t.BaseSuite.PatchValue(&imagemetadata.SimplestreamsImagesPublicKey, sstesting.SignedMetadataPublicKey) t.BaseSuite.PatchValue(&keys.JujuPublicKey, sstesting.SignedMetadataPublicKey) t.BaseSuite.PatchValue(ec2.DeleteSecurityGroupInsistently, deleteSecurityGroupForTestFunc) t.srv.createRootDisks = true t.srv.startServer(c) + + region := t.srv.region() + t.CloudRegion = region.Name + t.CloudEndpoint = region.EC2Endpoint + restoreEC2Patching := patchEC2ForTesting(c, region) + t.BaseSuite.AddCleanup(func(c *gc.C) { restoreEC2Patching() }) } func (t *localLiveSuite) TearDownSuite(c *gc.C) { t.LiveTests.TearDownSuite(c) t.srv.stopServer(c) - t.restoreEC2Patching() } // localServer represents a fake EC2 server running within @@ -114,10 +115,7 @@ // instances. createRootDisks bool - client *amzec2.EC2 ec2srv *ec2test.Server - s3srv *s3test.Server - config *s3test.Config defaultVPC *amzec2.VPC zones []amzec2.AvailabilityZoneInfo @@ -130,31 +128,19 @@ c.Fatalf("cannot start ec2 test server: %v", err) } srv.ec2srv.SetCreateRootDisks(srv.createRootDisks) - srv.s3srv, err = s3test.NewServer(srv.config) - if err != nil { - c.Fatalf("cannot start s3 test server: %v", err) - } - aws.Regions["test"] = aws.Region{ - Name: "test", - EC2Endpoint: srv.ec2srv.URL(), - S3Endpoint: srv.s3srv.URL(), - S3LocationConstraint: true, - } srv.addSpice(c) - region := aws.Regions["test"] - signer := aws.SignV4Factory(region.Name, "ec2") - srv.client = amzec2.New(aws.Auth{}, region, signer) + region := srv.region() zones := make([]amzec2.AvailabilityZoneInfo, 3) - zones[0].Region = "test" - zones[0].Name = "test-available" + zones[0].Region = region.Name + zones[0].Name = region.Name + "-available" zones[0].State = "available" - zones[1].Region = "test" - zones[1].Name = "test-impaired" + zones[1].Region = region.Name + zones[1].Name = region.Name + "-impaired" zones[1].State = "impaired" - zones[2].Region = "test" - zones[2].Name = "test-unavailable" + zones[2].Region = region.Name + zones[2].Name = region.Name + "-unavailable" zones[2].State = "unavailable" srv.ec2srv.SetAvailabilityZones(zones) srv.ec2srv.SetInitialInstanceState(ec2test.Pending) @@ -165,6 +151,18 @@ srv.defaultVPC = &defaultVPC } +func (srv *localServer) client() *amzec2.EC2 { + region := srv.region() + return amzec2.New(aws.Auth{}, region, aws.SignV4Factory(region.Name, "ec2")) +} + +func (srv *localServer) region() aws.Region { + return aws.Region{ + Name: "test", + EC2Endpoint: srv.ec2srv.URL(), + } +} + // addSpice adds some "spice" to the local server // by adding state that may cause tests to fail. func (srv *localServer) addSpice(c *gc.C) { @@ -181,11 +179,6 @@ func (srv *localServer) stopServer(c *gc.C) { srv.ec2srv.Reset(false) srv.ec2srv.Quit() - srv.s3srv.Quit() - // Clear out the region because the server address is - // no longer valid. - delete(aws.Regions, "test") - srv.defaultVPC = nil } @@ -198,8 +191,8 @@ type localServerSuite struct { coretesting.BaseSuite jujutest.Tests - srv localServer - restoreEC2Patching func() + srv localServer + client *amzec2.EC2 } func (t *localServerSuite) SetUpSuite(c *gc.C) { @@ -211,13 +204,11 @@ "secret-key": "x", }, ) - t.CloudRegion = "test" // Upload arches that ec2 supports; add to this // as ec2 coverage expands. t.UploadArches = []string{arch.AMD64, arch.I386} t.TestConfig = localConfigAttrs - t.restoreEC2Patching = patchEC2ForTesting(c) imagetesting.PatchOfficialDataSources(&t.BaseSuite.CleanupSuite, "test:") t.BaseSuite.PatchValue(&imagemetadata.SimplestreamsImagesPublicKey, sstesting.SignedMetadataPublicKey) t.BaseSuite.PatchValue(&keys.JujuPublicKey, sstesting.SignedMetadataPublicKey) @@ -235,7 +226,6 @@ } func (t *localServerSuite) TearDownSuite(c *gc.C) { - t.restoreEC2Patching() t.Tests.TearDownSuite(c) t.BaseSuite.TearDownSuite(c) } @@ -243,6 +233,12 @@ func (t *localServerSuite) SetUpTest(c *gc.C) { t.BaseSuite.SetUpTest(c) t.srv.startServer(c) + region := t.srv.region() + t.CloudRegion = region.Name + t.CloudEndpoint = region.EC2Endpoint + t.client = t.srv.client() + restoreEC2Patching := patchEC2ForTesting(c, region) + t.AddCleanup(func(c *gc.C) { restoreEC2Patching() }) t.Tests.SetUpTest(c) } @@ -399,9 +395,8 @@ // check that a new instance will be started with a machine agent inst1, hc := testing.AssertStartInstance(c, env, t.ControllerUUID, "1") c.Check(*hc.Arch, gc.Equals, "amd64") - c.Check(*hc.Mem, gc.Equals, uint64(3840)) + c.Check(*hc.Mem, gc.Equals, uint64(3.75*1024)) c.Check(*hc.CpuCores, gc.Equals, uint64(1)) - c.Assert(*hc.CpuPower, gc.Equals, uint64(300)) inst = t.srv.ec2srv.Instance(string(inst1.Id())) c.Assert(inst, gc.NotNil) userData, err = utils.Gunzip(inst.UserData) @@ -485,9 +480,8 @@ // check that a new instance will be started with a machine agent inst1, hc := testing.AssertStartInstance(c, env, t.ControllerUUID, "1") c.Check(*hc.Arch, gc.Equals, "amd64") - c.Check(*hc.Mem, gc.Equals, uint64(3840)) + c.Check(*hc.Mem, gc.Equals, uint64(3.75*1024)) c.Check(*hc.CpuCores, gc.Equals, uint64(1)) - c.Assert(*hc.CpuPower, gc.Equals, uint64(300)) inst = t.srv.ec2srv.Instance(string(inst1.Id())) c.Assert(inst, gc.NotNil) userData, err = utils.Gunzip(inst.UserData) @@ -608,12 +602,9 @@ func (t *localServerSuite) TestDestroyHostedModelDeleteSecurityGroupInsistentlyError(c *gc.C) { env := t.prepareAndBootstrap(c) - - cfg, err := env.Config().Apply(map[string]interface{}{"controller-uuid": "7e386e08-cba7-44a4-a76e-7c1633584210"}) - c.Assert(err, jc.ErrorIsNil) - env, err = environs.New(environs.OpenParams{ + hostedEnv, err := environs.New(environs.OpenParams{ Cloud: t.CloudSpec(), - Config: cfg, + Config: env.Config(), }) c.Assert(err, jc.ErrorIsNil) @@ -623,7 +614,7 @@ ) error { return errors.New(msg) }) - err = env.Destroy() + err = hostedEnv.Destroy() c.Assert(err, gc.ErrorMatches, "cannot delete environment security groups: cannot delete default security group: "+msg) } @@ -631,13 +622,17 @@ controllerEnv := t.prepareAndBootstrap(c) // Create a hosted model environment with an instance and a volume. + hostedModelUUID := "7e386e08-cba7-44a4-a76e-7c1633584210" t.srv.ec2srv.SetInitialInstanceState(ec2test.Running) cfg, err := controllerEnv.Config().Apply(map[string]interface{}{ - "uuid": "7e386e08-cba7-44a4-a76e-7c1633584210", + "uuid": hostedModelUUID, "firewall-mode": "global", }) c.Assert(err, jc.ErrorIsNil) - env, err := environs.New(environs.OpenParams{t.CloudSpec(), cfg}) + env, err := environs.New(environs.OpenParams{ + Cloud: t.CloudSpec(), + Config: cfg, + }) c.Assert(err, jc.ErrorIsNil) inst, _ := testing.AssertStartInstance(c, env, t.ControllerUUID, "0") c.Assert(err, jc.ErrorIsNil) @@ -651,7 +646,7 @@ Provider: ec2.EBS_ProviderType, ResourceTags: map[string]string{ tags.JujuController: t.ControllerUUID, - tags.JujuModel: "7e386e08-cba7-44a4-a76e-7c1633584210", + tags.JujuModel: hostedModelUUID, }, Attachment: &storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ @@ -678,7 +673,7 @@ c.Assert(volIds, jc.SameContents, expect) } assertGroups := func(expect ...string) { - groupsResp, err := t.srv.client.SecurityGroups(nil, nil) + groupsResp, err := t.client.SecurityGroups(nil, nil) c.Assert(err, jc.ErrorIsNil) names := make([]string, len(groupsResp.Groups)) for i, group := range groupsResp.Groups { @@ -691,10 +686,10 @@ assertVolumes(volumeResults[0].Volume.VolumeId) assertGroups( "default", - "juju-"+t.ControllerUUID, - "juju-"+t.ControllerUUID+"-0", - "juju-7e386e08-cba7-44a4-a76e-7c1633584210", - "juju-7e386e08-cba7-44a4-a76e-7c1633584210-global", + "juju-"+controllerEnv.Config().UUID(), + "juju-"+controllerEnv.Config().UUID()+"-0", + "juju-"+hostedModelUUID, + "juju-"+hostedModelUUID+"-global", ) // Destroy the controller resources. This should destroy the hosted @@ -739,9 +734,8 @@ env := t.prepareAndBootstrap(c) _, hc := testing.AssertStartInstance(c, env, t.ControllerUUID, "1") c.Check(*hc.Arch, gc.Equals, "amd64") - c.Check(*hc.Mem, gc.Equals, uint64(3840)) + c.Check(*hc.Mem, gc.Equals, uint64(3.75*1024)) c.Check(*hc.CpuCores, gc.Equals, uint64(1)) - c.Assert(*hc.CpuPower, gc.Equals, uint64(300)) } func (t *localServerSuite) TestStartInstanceAvailZone(c *gc.C) { @@ -1159,19 +1153,56 @@ env := t.Prepare(c) validator, err := env.ConstraintsValidator() c.Assert(err, jc.ErrorIsNil) - cons := constraints.MustParse("arch=ppc64el") - _, err = validator.Validate(cons) - c.Assert(err, gc.ErrorMatches, "invalid constraint value: arch=ppc64el\nvalid values are: \\[amd64 i386\\]") - cons = constraints.MustParse("instance-type=foo") + cons := constraints.MustParse("instance-type=foo") _, err = validator.Validate(cons) c.Assert(err, gc.ErrorMatches, "invalid constraint value: instance-type=foo\nvalid values are:.*") } +func (t *localServerSuite) TestConstraintsValidatorVocabNoDefaultOrSpecifiedVPC(c *gc.C) { + t.srv.defaultVPC.IsDefault = false + err := t.srv.ec2srv.UpdateVPC(*t.srv.defaultVPC) + c.Assert(err, jc.ErrorIsNil) + + env := t.Prepare(c) + assertVPCInstanceTypeNotAvailable(c, env) +} + +func (t *localServerSuite) TestConstraintsValidatorVocabDefaultVPC(c *gc.C) { + env := t.Prepare(c) + assertVPCInstanceTypeAvailable(c, env) +} + +func (t *localServerSuite) TestConstraintsValidatorVocabSpecifiedVPC(c *gc.C) { + t.srv.defaultVPC.IsDefault = false + err := t.srv.ec2srv.UpdateVPC(*t.srv.defaultVPC) + c.Assert(err, jc.ErrorIsNil) + + t.TestConfig["vpc-id"] = t.srv.defaultVPC.Id + defer delete(t.TestConfig, "vpc-id") + + env := t.Prepare(c) + assertVPCInstanceTypeAvailable(c, env) +} + +func assertVPCInstanceTypeAvailable(c *gc.C, env environs.Environ) { + validator, err := env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + _, err = validator.Validate(constraints.MustParse("instance-type=t2.medium")) + c.Assert(err, jc.ErrorIsNil) +} + +func assertVPCInstanceTypeNotAvailable(c *gc.C, env environs.Environ) { + validator, err := env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + _, err = validator.Validate(constraints.MustParse("instance-type=t2.medium")) + c.Assert(err, gc.ErrorMatches, "invalid constraint value: instance-type=t2.medium\n.*") +} + func (t *localServerSuite) TestConstraintsMerge(c *gc.C) { env := t.Prepare(c) validator, err := env.ConstraintsValidator() c.Assert(err, jc.ErrorIsNil) - consA := constraints.MustParse("arch=amd64 mem=1G cpu-power=10 cpu-cores=2 tags=bar") + consA := constraints.MustParse("arch=amd64 mem=1G cpu-power=10 cores=2 tags=bar") consB := constraints.MustParse("arch=i386 instance-type=m1.small") cons, err := validator.Merge(consA, consB) c.Assert(err, jc.ErrorIsNil) @@ -1224,11 +1255,15 @@ } func (t *localServerSuite) TestValidateImageMetadata(c *gc.C) { + region := t.srv.region() + aws.Regions[region.Name] = t.srv.region() + defer delete(aws.Regions, region.Name) + env := t.Prepare(c) params, err := env.(simplestreams.MetadataValidator).MetadataLookupParams("test") c.Assert(err, jc.ErrorIsNil) params.Series = series.LatestLts() - params.Endpoint = "https://ec2.endpoint.com" + params.Endpoint = region.EC2Endpoint params.Sources, err = environs.ImageMetadataSources(env) c.Assert(err, jc.ErrorIsNil) image_ids, _, err := imagemetadata.ValidateImageMetadata(params) @@ -1440,15 +1475,28 @@ }) } +func (s *localServerSuite) TestBootstrapInstanceConstraints(c *gc.C) { + env := s.prepareAndBootstrap(c) + inst, hc := testing.AssertStartControllerInstance(c, env, s.ControllerUUID, "1") + ec2inst := ec2.InstanceEC2(inst) + + // Controllers should be started with a burstable + // instance if possible, and a 32 GiB disk. + c.Assert(ec2inst.InstanceType, gc.Equals, "t2.medium") + c.Assert(*hc.Arch, gc.Equals, "amd64") + c.Assert(*hc.Mem, gc.Equals, uint64(4*1024)) + c.Assert(*hc.RootDisk, gc.Equals, uint64(32*1024)) + c.Assert(*hc.CpuCores, gc.Equals, uint64(2)) +} + // localNonUSEastSuite is similar to localServerSuite but the S3 mock server // behaves as if it is not in the us-east region. type localNonUSEastSuite struct { coretesting.BaseSuite sstesting.TestDataSuite - restoreEC2Patching func() - srv localServer - env environs.Environ + srv localServer + env environs.Environ } func (t *localNonUSEastSuite) SetUpSuite(c *gc.C) { @@ -1457,24 +1505,19 @@ t.PatchValue(&imagemetadata.SimplestreamsImagesPublicKey, sstesting.SignedMetadataPublicKey) t.PatchValue(&keys.JujuPublicKey, sstesting.SignedMetadataPublicKey) - - t.restoreEC2Patching = patchEC2ForTesting(c) t.BaseSuite.PatchValue(ec2.DeleteSecurityGroupInsistently, deleteSecurityGroupForTestFunc) } func (t *localNonUSEastSuite) TearDownSuite(c *gc.C) { - t.restoreEC2Patching() t.TestDataSuite.TearDownSuite(c) t.BaseSuite.TearDownSuite(c) } func (t *localNonUSEastSuite) SetUpTest(c *gc.C) { t.BaseSuite.SetUpTest(c) - t.srv.config = &s3test.Config{ - Send409Conflict: true, - } t.srv.startServer(c) + region := t.srv.region() credential := cloud.NewCredential( cloud.AccessKeyAuthType, map[string]string{ @@ -1482,6 +1525,8 @@ "secret-key": "x", }, ) + restoreEC2Patching := patchEC2ForTesting(c, region) + t.AddCleanup(func(c *gc.C) { restoreEC2Patching() }) env, err := bootstrap.Prepare( envtesting.BootstrapContext(c), @@ -1492,7 +1537,8 @@ ControllerName: localConfigAttrs["name"].(string), Cloud: environs.CloudSpec{ Type: "ec2", - Region: "test", + Region: region.Name, + Endpoint: region.EC2Endpoint, Credential: &credential, }, AdminSecret: testing.AdminSecret, @@ -1507,18 +1553,14 @@ t.BaseSuite.TearDownTest(c) } -func patchEC2ForTesting(c *gc.C) func() { - ec2.UseTestImageData(c, ec2.TestImagesData) - ec2.UseTestInstanceTypeData(ec2.TestInstanceTypeCosts) - ec2.UseTestRegionData(ec2.TestRegions) - restoreTimeouts := envtesting.PatchAttemptStrategies(ec2.ShortAttempt, ec2.StorageAttempt) +func patchEC2ForTesting(c *gc.C, region aws.Region) func() { + ec2.UseTestImageData(c, ec2.MakeTestImageStreamsData(region)) + restoreTimeouts := envtesting.PatchAttemptStrategies(ec2.ShortAttempt) restoreFinishBootstrap := envtesting.DisableFinishBootstrap() return func() { restoreFinishBootstrap() restoreTimeouts() ec2.UseTestImageData(c, nil) - ec2.UseTestInstanceTypeData(nil) - ec2.UseTestRegionData(nil) } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/provider.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/provider.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/provider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/provider.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,13 +5,12 @@ import ( "fmt" + "strings" "github.com/juju/errors" "github.com/juju/loggo" - "github.com/juju/utils/arch" "gopkg.in/amz.v3/aws" "gopkg.in/amz.v3/ec2" - "gopkg.in/amz.v3/s3" "github.com/juju/juju/cloud" "github.com/juju/juju/environs" @@ -27,13 +26,6 @@ var providerInstance environProvider -// RestrictedConfigAttributes is specified in the EnvironProvider interface. -func (p environProvider) RestrictedConfigAttributes() []string { - // TODO(dimitern): Both of these shouldn't be restricted for hosted models. - // See bug http://pad.lv/1580417 for more information. - return []string{"vpc-id-force"} -} - // Open is specified in the EnvironProvider interface. func (p environProvider) Open(args environs.OpenParams) (environs.Environ, error) { logger.Infof("opening model %q", args.Config.Name()) @@ -42,8 +34,17 @@ e.cloud = args.Cloud e.name = args.Config.Name() + // The endpoints in public-clouds.yaml from 2.0-rc2 + // and before were wrong, so we use whatever is defined + // in goamz/aws if available. + if isBrokenCloud(e.cloud) { + if region, ok := aws.Regions[e.cloud.Region]; ok { + e.cloud.Endpoint = region.EC2Endpoint + } + } + var err error - e.ec2, e.s3, err = awsClients(args.Cloud) + e.ec2, err = awsClient(e.cloud) if err != nil { return nil, errors.Trace(err) } @@ -54,9 +55,30 @@ return e, nil } -func awsClients(cloud environs.CloudSpec) (*ec2.EC2, *s3.S3, error) { +// isBrokenCloud reports whether the given CloudSpec is from an old, +// broken version of public-clouds.yaml. +func isBrokenCloud(cloud environs.CloudSpec) bool { + // The public-clouds.yaml from 2.0-rc2 and before was + // complete nonsense for general regions and for + // govcloud. The cn-north-1 region has a trailing slash, + // which we don't want as it means we won't match the + // simplestreams data. + switch cloud.Region { + case "us-east-1", "us-west-1", "us-west-2", "eu-west-1", + "eu-central-1", "ap-southeast-1", "ap-southeast-2", + "ap-northeast-1", "ap-northeast-2", "sa-east-1": + return cloud.Endpoint == fmt.Sprintf("https://%s.aws.amazon.com/v1.2/", cloud.Region) + case "cn-north-1": + return strings.HasSuffix(cloud.Endpoint, "/") + case "us-gov-west-1": + return cloud.Endpoint == "https://ec2.us-gov-west-1.amazonaws-govcloud.com" + } + return false +} + +func awsClient(cloud environs.CloudSpec) (*ec2.EC2, error) { if err := validateCloudSpec(cloud); err != nil { - return nil, nil, errors.Annotate(err, "validating cloud spec") + return nil, errors.Annotate(err, "validating cloud spec") } credentialAttrs := cloud.Credential.Attributes() @@ -67,10 +89,12 @@ SecretKey: secretKey, } - // TODO(axw) define region in terms of EC2 and S3 endpoints. - region := aws.Regions[cloud.Region] - signer := aws.SignV4Factory(region.Name, "ec2") - return ec2.New(auth, region, signer), s3.New(auth, region), nil + region := aws.Region{ + Name: cloud.Region, + EC2Endpoint: cloud.Endpoint, + } + signer := aws.SignV4Factory(cloud.Region, "ec2") + return ec2.New(auth, region, signer), nil } // PrepareConfig is specified in the EnvironProvider interface. @@ -93,9 +117,6 @@ if err := c.Validate(); err != nil { return errors.Trace(err) } - if _, ok := aws.Regions[c.Region]; !ok { - return errors.NotValidf("region name %q", c.Region) - } if c.Credential == nil { return errors.NotValidf("missing credential") } @@ -120,22 +141,16 @@ if region == "" { return nil, fmt.Errorf("region must be specified") } - ec2Region, ok := allRegions[region] + ec2Region, ok := aws.Regions[region] if !ok { return nil, fmt.Errorf("unknown region %q", region) } return &simplestreams.MetadataLookupParams{ - Region: region, - Endpoint: ec2Region.EC2Endpoint, - Architectures: arch.AllSupportedArches, + Region: region, + Endpoint: ec2Region.EC2Endpoint, }, nil } -// SecretAttrs is specified in the EnvironProvider interface. -func (environProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - return make(map[string]string), nil -} - const badAccessKey = ` Please ensure the Access Key ID you have specified is correct. You can obtain the Access Key ID via the "Security Credentials" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/provider_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/provider_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/provider_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/provider_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,10 +6,12 @@ import ( "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "gopkg.in/amz.v3/aws" gc "gopkg.in/check.v1" "github.com/juju/juju/cloud" "github.com/juju/juju/environs" + "github.com/juju/juju/provider/ec2" coretesting "github.com/juju/juju/testing" ) @@ -52,9 +54,56 @@ c.Assert(env, gc.NotNil) } -func (s *ProviderSuite) TestOpenInvalidRegion(c *gc.C) { +func (s *ProviderSuite) TestOpenUnknownRegion(c *gc.C) { + // This test shows that we do *not* check the region names against + // anything in the client. That means that when new regions are + // added to AWS, we'll be able to support them. s.spec.Region = "foobar" - s.testOpenError(c, s.spec, `validating cloud spec: region name "foobar" not valid`) + _, err := s.provider.Open(environs.OpenParams{ + Cloud: s.spec, + Config: coretesting.ModelConfig(c), + }) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *ProviderSuite) TestOpenKnownRegionInvalidEndpoint(c *gc.C) { + s.PatchValue(&aws.Regions, map[string]aws.Region{ + "us-east-1": { + EC2Endpoint: "https://testing.invalid", + }, + }) + s.spec.Endpoint = "https://us-east-1.aws.amazon.com/v1.2/" + + env, err := s.provider.Open(environs.OpenParams{ + Cloud: s.spec, + Config: coretesting.ModelConfig(c), + }) + c.Assert(err, jc.ErrorIsNil) + + ec2Client := ec2.EnvironEC2(env) + c.Assert(ec2Client.Region.EC2Endpoint, gc.Equals, "https://testing.invalid") +} + +func (s *ProviderSuite) TestOpenKnownRegionValidEndpoint(c *gc.C) { + // If the endpoint in the cloudspec is not known to be invalid, + // we ignore whatever is in aws.Regions. This way, if the AWS + // endpoints do ever change, we can update public-clouds.yaml + // and have it picked up. + s.PatchValue(&aws.Regions, map[string]aws.Region{ + "us-east-1": { + EC2Endpoint: "https://testing.invalid", + }, + }) + s.spec.Endpoint = "https://ec2.us-east-1.amazonaws.com" + + env, err := s.provider.Open(environs.OpenParams{ + Cloud: s.spec, + Config: coretesting.ModelConfig(c), + }) + c.Assert(err, jc.ErrorIsNil) + + ec2Client := ec2.EnvironEC2(env) + c.Assert(ec2Client.Region.EC2Endpoint, gc.Equals, "https://ec2.us-east-1.amazonaws.com") } func (s *ProviderSuite) TestOpenMissingCredential(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/securitygroups_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/securitygroups_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/securitygroups_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/securitygroups_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -41,7 +41,7 @@ } func (s *SecurityGroupSuite) TestDeleteSecurityGroupSuccess(c *gc.C) { - err := s.deleteFunc(s.instanceStub, amzec2.SecurityGroup{}, coretesting.NewClock(time.Time{})) + err := s.deleteFunc(s.instanceStub, amzec2.SecurityGroup{}, testing.NewClock(time.Time{})) c.Assert(err, jc.ErrorIsNil) s.instanceStub.CheckCallNames(c, "DeleteSecurityGroup") } @@ -50,14 +50,14 @@ s.instanceStub.deleteSecurityGroup = func(group amzec2.SecurityGroup) (resp *amzec2.SimpleResp, err error) { return nil, &amzec2.Error{Code: "InvalidGroup.NotFound"} } - err := s.deleteFunc(s.instanceStub, amzec2.SecurityGroup{}, coretesting.NewClock(time.Time{})) + err := s.deleteFunc(s.instanceStub, amzec2.SecurityGroup{}, testing.NewClock(time.Time{})) c.Assert(err, jc.ErrorIsNil) s.instanceStub.CheckCallNames(c, "DeleteSecurityGroup") } func (s *SecurityGroupSuite) TestDeleteSecurityGroupFewCalls(c *gc.C) { t0 := time.Time{} - clock := autoAdvancingClock{coretesting.NewClock(t0)} + clock := autoAdvancingClock{testing.NewClock(t0)} count := 0 maxCalls := 4 expectedTimes := []time.Time{ @@ -86,7 +86,7 @@ } type autoAdvancingClock struct { - *coretesting.Clock + *testing.Clock } func (c autoAdvancingClock) After(d time.Duration) <-chan time.Time { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/storage.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/storage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/storage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/storage.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,226 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ec2 - -import ( - "fmt" - "io" - "net" - "sync" - "time" - - "github.com/juju/errors" - "github.com/juju/utils" - "gopkg.in/amz.v3/s3" - - "github.com/juju/juju/environs/storage" -) - -func init() { - // We will decide when to retry and under what circumstances, not s3. - // Sometimes it is expected a file may not exist and we don't want s3 - // to hold things up by unilaterally deciding to retry for no good reason. - s3.RetryAttempts(false) -} - -func NewStorage(bucket *s3.Bucket) storage.Storage { - return &ec2storage{bucket: bucket} -} - -// ec2storage implements storage.Storage on -// an ec2.bucket. -type ec2storage struct { - sync.Mutex - madeBucket bool - bucket *s3.Bucket -} - -// makeBucket makes the environent's control bucket, the -// place where bootstrap information and deployed charms -// are stored. To avoid two round trips on every PUT operation, -// we do this only once for each environ. -func (s *ec2storage) makeBucket() error { - s.Lock() - defer s.Unlock() - if s.madeBucket { - return nil - } - // PutBucket always return a 200 if we recreate an existing bucket for the - // original s3.amazonaws.com endpoint. For all other endpoints PutBucket - // returns 409 with a known subcode. - if err := s.bucket.PutBucket(s3.Private); err != nil && s3ErrCode(err) != "BucketAlreadyOwnedByYou" { - return err - } - - s.madeBucket = true - return nil -} - -func (s *ec2storage) Put(file string, r io.Reader, length int64) error { - if err := s.makeBucket(); err != nil { - return fmt.Errorf("cannot make S3 control bucket: %v", err) - } - err := s.bucket.PutReader(file, r, length, "binary/octet-stream", s3.Private) - if err != nil { - return fmt.Errorf("cannot write file %q to control bucket: %v", file, err) - } - return nil -} - -func (s *ec2storage) Get(file string) (r io.ReadCloser, err error) { - r, err = s.bucket.GetReader(file) - return r, maybeNotFound(err) -} - -func (s *ec2storage) URL(name string) (string, error) { - const sevenDays = 168 * time.Hour - const maxExpiratoryPeriod = sevenDays - return s.bucket.SignedURL(name, maxExpiratoryPeriod) -} - -// TODO(katco): 2016-08-09: lp:1611427 -var storageAttempt = utils.AttemptStrategy{ - Total: 5 * time.Second, - Delay: 200 * time.Millisecond, -} - -// DefaultConsistencyStrategy is specified in the StorageReader interface. -// -// TODO(katco): 2016-08-09: lp:1611427 -func (s *ec2storage) DefaultConsistencyStrategy() utils.AttemptStrategy { - return storageAttempt -} - -// ShouldRetry is specified in the StorageReader interface. -func (s *ec2storage) ShouldRetry(err error) bool { - if err == nil { - return false - } - switch err { - case io.ErrUnexpectedEOF, io.EOF: - return true - } - if s3ErrorStatusCode(err) == 404 { - return true - } - switch e := err.(type) { - case *net.DNSError: - return true - case *net.OpError: - switch e.Op { - case "read", "write": - return true - } - case *s3.Error: - switch e.Code { - case "InternalError": - return true - } - } - return false -} - -// s3ErrorStatusCode returns the HTTP status of the S3 request error, -// if it is an error from an S3 operation, or 0 if it was not. -func s3ErrorStatusCode(err error) int { - if err, _ := err.(*s3.Error); err != nil { - return err.StatusCode - } - return 0 -} - -// s3ErrCode returns the text status code of the S3 error code. -func s3ErrCode(err error) string { - if err, ok := err.(*s3.Error); ok { - return err.Code - } - return "" -} - -func (s *ec2storage) Remove(file string) error { - err := s.bucket.Del(file) - // If we can't delete the object because the bucket doesn't - // exist, then we don't care. - if s3ErrorStatusCode(err) == 404 { - return nil - } - return err -} - -func (s *ec2storage) List(prefix string) ([]string, error) { - // TODO cope with more than 1000 objects in the bucket. - resp, err := s.bucket.List(prefix, "", "", 0) - if err != nil { - // If the bucket is not found, it's not an error - // because it's only created when the first - // file is put. - if s3ErrorStatusCode(err) == 404 { - return nil, nil - } - return nil, err - } - var names []string - for _, key := range resp.Contents { - names = append(names, key.Key) - } - return names, nil -} - -func (s *ec2storage) RemoveAll() error { - names, err := storage.List(s, "") - if err != nil { - return err - } - // Remove all the objects in parallel to minimize round-trips. - // If we're in danger of having hundreds of objects, - // we'll want to change this to limit the number - // of concurrent operations. - var wg sync.WaitGroup - wg.Add(len(names)) - errc := make(chan error, len(names)) - for _, name := range names { - name := name - go func() { - if err := s.Remove(name); err != nil { - errc <- err - } - wg.Done() - }() - } - wg.Wait() - select { - case err := <-errc: - return fmt.Errorf("cannot delete all provider state: %v", err) - default: - } - - s.Lock() - defer s.Unlock() - // Even DelBucket fails, it won't harm if we try again - the operation - // might have succeeded even if we get an error. - s.madeBucket = false - err = deleteBucket(s) - err = s.bucket.DelBucket() - if s3ErrorStatusCode(err) == 404 { - return nil - } - return err -} - -func deleteBucket(s *ec2storage) (err error) { - for a := s.DefaultConsistencyStrategy().Start(); a.Next(); { - err = s.bucket.DelBucket() - if err == nil || !s.ShouldRetry(err) { - break - } - } - return err -} - -func maybeNotFound(err error) error { - if err != nil && s3ErrorStatusCode(err) == 404 { - return errors.NewNotFound(err, "") - } - return err -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/userdata_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/userdata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/ec2/userdata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/ec2/userdata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -50,7 +50,7 @@ func (s *UserdataSuite) TestAmazonUnknownOS(c *gc.C) { renderer := ec2.AmazonRenderer{} cloudcfg := &cloudinittest.CloudConfig{} - result, err := renderer.Render(cloudcfg, os.Arch) + result, err := renderer.Render(cloudcfg, os.GenericLinux) c.Assert(result, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "Cannot encode userdata for OS: Arch") + c.Assert(err, gc.ErrorMatches, "Cannot encode userdata for OS: GenericLinux") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/credentials.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,6 +14,7 @@ "github.com/juju/utils" "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" "github.com/juju/juju/provider/gce/google" ) @@ -136,3 +137,8 @@ credAttrPrivateKey: string(creds.PrivateKey), }), nil } + +// FinalizeCredential is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) FinalizeCredential(_ environs.FinalizeCredentialContext, args environs.FinalizeCredentialParams) (*cloud.Credential, error) { + return &args.Credential, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/disks.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/disks.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/disks.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/disks.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,8 +21,8 @@ ) // StorageProviderTypes implements storage.ProviderRegistry. -func (env *environ) StorageProviderTypes() []storage.ProviderType { - return []storage.ProviderType{storageProviderType} +func (env *environ) StorageProviderTypes() ([]storage.ProviderType, error) { + return []storage.ProviderType{storageProviderType}, nil } // StorageProvider implements storage.ProviderRegistry. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/environ_broker.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/environ_broker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/environ_broker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/environ_broker.go 2016-10-13 14:31:49.000000000 +0000 @@ -203,12 +203,6 @@ // See: http://cloudinit.readthedocs.org metadata[metadataKeyEncoding] = "base64" - authKeys, err := google.FormatAuthorizedKeys(args.InstanceConfig.AuthorizedKeys, "ubuntu") - if err != nil { - return nil, errors.Trace(err) - } - - metadata[metadataKeySSHKeys] = authKeys case jujuos.Windows: metadata[metadataKeyWindowsUserdata] = string(userData) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/environ_broker_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/environ_broker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/environ_broker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/environ_broker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -146,10 +146,10 @@ } func (s *environBrokerSuite) TestGetMetadataOSNotSupported(c *gc.C) { - metadata, err := gce.GetMetadata(s.StartInstArgs, jujuos.Arch) + metadata, err := gce.GetMetadata(s.StartInstArgs, jujuos.GenericLinux) c.Assert(metadata, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "cannot pack metadata for os Arch on the gce provider") + c.Assert(err, gc.ErrorMatches, "cannot pack metadata for os GenericLinux on the gce provider") } var getDisksTests = []struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/environ.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -68,9 +68,6 @@ // namespace is used to create the machine and device hostnames. namespace instance.Namespace - - archLock sync.Mutex // protects supportedArchitectures - supportedArchitectures []string } // Function entry points defined as variables so they can be overridden @@ -205,6 +202,11 @@ return bootstrap(ctx, env, params) } +// BootstrapMessage is part of the Environ interface. +func (env *environ) BootstrapMessage() string { + return "" +} + // Destroy shuts down all known machines and destroys the rest of the // known environment. func (env *environ) Destroy() error { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/environ_policy.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/environ_policy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/environ_policy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/environ_policy.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,9 +7,6 @@ "github.com/juju/errors" "github.com/juju/juju/constraints" - "github.com/juju/juju/environs/imagemetadata" - "github.com/juju/juju/environs/simplestreams" - "github.com/juju/juju/provider/common" ) // PrecheckInstance verifies that the provided series and constraints @@ -28,39 +25,6 @@ return nil } -func (env *environ) getSupportedArchitectures() ([]string, error) { - env.archLock.Lock() - defer env.archLock.Unlock() - - if env.supportedArchitectures != nil { - return env.supportedArchitectures, nil - } - - archList, err := env.lookupArchitectures() - if err != nil { - return nil, errors.Trace(err) - } - env.supportedArchitectures = archList - return archList, nil -} - -var supportedArchitectures = common.SupportedArchitectures - -func (env *environ) lookupArchitectures() ([]string, error) { - // Create a filter to get all images from our region and for the - // correct stream. - cloudSpec, err := env.Region() - if err != nil { - return nil, errors.Trace(err) - } - imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{ - CloudSpec: cloudSpec, - Stream: env.Config().ImageStream(), - }) - archList, err := supportedArchitectures(env, imageConstraint) - return archList, errors.Trace(err) -} - var unsupportedConstraints = []string{ constraints.Tags, constraints.VirtType, @@ -70,7 +34,7 @@ // instance types. See instancetypes.go. var instanceTypeConstraints = []string{ constraints.Arch, // Arches - constraints.CpuCores, + constraints.Cores, constraints.CpuPower, constraints.Mem, constraints.Container, // VirtType @@ -95,12 +59,6 @@ // vocab - supportedArches, err := env.getSupportedArchitectures() - if err != nil { - return nil, errors.Trace(err) - } - validator.RegisterVocabulary(constraints.Arch, supportedArches) - instTypeNames := make([]string, len(allInstanceTypes)) for i, itype := range allInstanceTypes { instTypeNames[i] = itype.Name diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/environ_policy_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/environ_policy_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/environ_policy_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/environ_policy_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( "github.com/juju/errors" jc "github.com/juju/testing/checkers" - "github.com/juju/utils/arch" "github.com/juju/utils/series" gc "gopkg.in/check.v1" @@ -126,8 +125,6 @@ } func (s *environPolSuite) TestConstraintsValidator(c *gc.C) { - s.FakeCommon.Arches = []string{arch.AMD64} - validator, err := s.Env.ConstraintsValidator() c.Assert(err, jc.ErrorIsNil) @@ -135,10 +132,6 @@ unsupported, err := validator.Validate(cons) c.Assert(err, jc.ErrorIsNil) c.Check(unsupported, gc.HasLen, 0) - - arm64 := arch.ARM64 - _, err = validator.Validate(constraints.Value{Arch: &arm64}) - c.Assert(err, gc.ErrorMatches, "invalid constraint value: arch=arm64\nvalid values are: \\[amd64\\]") } func (s *environPolSuite) TestConstraintsValidatorEmpty(c *gc.C) { @@ -152,8 +145,6 @@ } func (s *environPolSuite) TestConstraintsValidatorUnsupported(c *gc.C) { - s.FakeCommon.Arches = []string{arch.AMD64} - validator, err := s.Env.ConstraintsValidator() c.Assert(err, jc.ErrorIsNil) @@ -164,18 +155,6 @@ c.Check(unsupported, jc.SameContents, []string{"tags", "virt-type"}) } -func (s *environPolSuite) TestConstraintsValidatorVocabArch(c *gc.C) { - s.FakeCommon.Arches = []string{arch.AMD64} - - validator, err := s.Env.ConstraintsValidator() - c.Assert(err, jc.ErrorIsNil) - - cons := constraints.MustParse("arch=ppc64el") - _, err = validator.Validate(cons) - - c.Check(err, gc.ErrorMatches, "invalid constraint value: arch=ppc64el\nvalid values are:.*") -} - func (s *environPolSuite) TestConstraintsValidatorVocabInstType(c *gc.C) { validator, err := s.Env.ConstraintsValidator() c.Assert(err, jc.ErrorIsNil) @@ -197,15 +176,13 @@ } func (s *environPolSuite) TestConstraintsValidatorConflicts(c *gc.C) { - s.FakeCommon.Arches = []string{arch.AMD64} - validator, err := s.Env.ConstraintsValidator() c.Assert(err, jc.ErrorIsNil) cons := constraints.MustParse("instance-type=n1-standard-1") // We do not check arch or container since there is only one valid // value for each and will always match. - consFallback := constraints.MustParse("cpu-cores=2 cpu-power=1000 mem=10000 tags=bar") + consFallback := constraints.MustParse("cores=2 cpu-power=1000 mem=10000 tags=bar") merged, err := validator.Merge(consFallback, cons) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/gce.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/gce.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/gce.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/gce.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,9 +17,6 @@ metadataKeyEncoding = "user-data-encoding" metadataKeyWindowsUserdata = "windows-startup-script-ps1" metadataKeyWindowsSysprep = "sysprep-specialize-script-ps1" - // GCE uses this specific key for authentication (*handwaving*) - // https://cloud.google.com/compute/docs/instances#sshkeys - metadataKeySSHKeys = "sshKeys" ) const ( diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/google/instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/google/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/google/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/google/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,9 +6,7 @@ import ( "fmt" "path" - "strings" - "github.com/juju/errors" "google.golang.org/api/compute/v1" "github.com/juju/juju/network" @@ -177,27 +175,6 @@ return gi.InstanceSummary.Metadata } -// FormatAuthorizedKeys returns our authorizedKeys with -// the username prepended to it. This is the format that -// GCE expects when we upload sshKeys metadata. The sshKeys -// metadata is what is used by our scripts and commands -// like juju ssh to connect to juju machines. -func FormatAuthorizedKeys(rawAuthorizedKeys, user string) (string, error) { - if rawAuthorizedKeys == "" { - return "", errors.New("empty rawAuthorizedKeys") - } - if user == "" { - return "", errors.New("empty user") - } - - var userKeys string - keys := strings.Split(rawAuthorizedKeys, "\n") - for _, key := range keys { - userKeys += user + ":" + key + "\n" - } - return userKeys, nil -} - // packMetadata composes the provided data into the format required // by the GCE API. func packMetadata(data map[string]string) *compute.Metadata { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/google/instance_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/google/instance_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/google/instance_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/google/instance_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -74,32 +74,6 @@ c.Check(metadata, jc.DeepEquals, map[string]string{"eggs": "steak"}) } -func (s *instanceSuite) TestFormatAuthorizedKeys(c *gc.C) { - formatted, err := google.FormatAuthorizedKeys("abcd", "john") - c.Assert(err, jc.ErrorIsNil) - - c.Check(formatted, gc.Equals, "john:abcd\n") -} - -func (s *instanceSuite) TestFormatAuthorizedKeysEmpty(c *gc.C) { - _, err := google.FormatAuthorizedKeys("", "john") - - c.Check(err, gc.ErrorMatches, "empty rawAuthorizedKeys") -} - -func (s *instanceSuite) TestFormatAuthorizedKeysNoUser(c *gc.C) { - _, err := google.FormatAuthorizedKeys("abcd", "") - - c.Check(err, gc.ErrorMatches, "empty user") -} - -func (s *instanceSuite) TestFormatAuthorizedKeysMultiple(c *gc.C) { - formatted, err := google.FormatAuthorizedKeys("abcd\ndcba\nqwer", "john") - c.Assert(err, jc.ErrorIsNil) - - c.Check(formatted, gc.Equals, "john:abcd\njohn:dcba\njohn:qwer\n") -} - func (s *instanceSuite) TestPackMetadata(c *gc.C) { expected := compute.Metadata{Items: []*compute.MetadataItems{{ Key: "spam", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,16 +34,16 @@ // Status implements instance.Instance. func (inst *environInstance) Status() instance.InstanceStatus { instStatus := inst.base.Status() - jujuStatus := status.StatusProvisioning + jujuStatus := status.Provisioning switch instStatus { case "PROVISIONING", "STAGING": - jujuStatus = status.StatusProvisioning + jujuStatus = status.Provisioning case "RUNNING": - jujuStatus = status.StatusRunning + jujuStatus = status.Running case "STOPPING", "TERMINATED": - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty default: - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty } return instance.InstanceStatus{ Status: jujuStatus, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/instancetypes.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/instancetypes.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/instancetypes.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/instancetypes.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,10 +16,9 @@ // Instance types are not associated with disks in GCE, so we do not // set RootDisk. -// TODO(ericsnow) Dynamically generate the type specs from the official -// JSON file. - -// Shared-core machine types. +// TODO(axw) 2016-10-03 #1629821 +// Query the machine types dynamically, to avoid hard-coding this +// information (if possible), or else to augment it. var allInstanceTypes = []instances.InstanceType{ { // Standard machine types Name: "n1-standard-1", @@ -56,6 +55,13 @@ CpuPower: instances.CpuPower(4400), Mem: 60000, VirtType: &vtype, + }, { + Name: "n1-standard-32", + Arches: arches, + CpuCores: 32, + CpuPower: instances.CpuPower(8800), + Mem: 120000, + VirtType: &vtype, }, { // High memory machine types @@ -86,6 +92,13 @@ CpuPower: instances.CpuPower(4400), Mem: 104000, VirtType: &vtype, + }, { + Name: "n1-highmem-32", + Arches: arches, + CpuCores: 32, + CpuPower: instances.CpuPower(8800), + Mem: 208000, + VirtType: &vtype, }, { // High CPU machine types @@ -116,9 +129,16 @@ CpuPower: instances.CpuPower(4400), Mem: 14400, VirtType: &vtype, + }, { + Name: "n1-highcpu-32", + Arches: arches, + CpuCores: 32, + CpuPower: instances.CpuPower(8800), + Mem: 28800, + VirtType: &vtype, }, - { // Micro and small machine types + { // Shared-core machine types. Name: "f1-micro", Arches: arches, CpuCores: 1, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/provider.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/provider.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/provider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/provider.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,7 @@ import ( "github.com/juju/errors" + "github.com/juju/schema" "gopkg.in/juju/environschema.v1" "github.com/juju/juju/cloud" @@ -59,6 +60,18 @@ return fields } +// ConfigSchema returns extra config attributes specific +// to this provider only. +func (p environProvider) ConfigSchema() schema.Fields { + return configFields +} + +// ConfigDefaults returns the default values for the +// provider specific config attributes. +func (p environProvider) ConfigDefaults() schema.Defaults { + return configDefaults +} + // UpgradeModelConfig is specified in the ModelConfigUpgrader interface. func (environProvider) UpgradeConfig(cfg *config.Config) (*config.Config, error) { return configWithDefaults(cfg) @@ -76,11 +89,6 @@ return cfg.Apply(defaults) } -// RestrictedConfigAttributes is specified in the EnvironProvider interface. -func (environProvider) RestrictedConfigAttributes() []string { - return []string{} -} - // Validate implements environs.EnvironProvider.Validate. func (environProvider) Validate(cfg, old *config.Config) (*config.Config, error) { newCfg, err := newConfig(cfg, old) @@ -89,8 +97,3 @@ } return newCfg.config, nil } - -// SecretAttrs implements environs.EnvironProvider.SecretAttrs. -func (environProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - return map[string]string{}, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/testing_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/testing_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/testing_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/testing_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -31,6 +31,11 @@ coretools "github.com/juju/juju/tools" ) +// Ensure GCE provider supports the expected interfaces. +var ( + _ config.ConfigSchemaSource = (*environProvider)(nil) +) + // These values are fake GCE auth credentials for use in tests. const ( ClientName = "ba9876543210-0123456789abcdefghijklmnopqrstuv" @@ -161,15 +166,11 @@ userData, err := providerinit.ComposeUserData(instanceConfig, nil, GCERenderer{}) c.Assert(err, jc.ErrorIsNil) - authKeys, err := google.FormatAuthorizedKeys(instanceConfig.AuthorizedKeys, "ubuntu") - c.Assert(err, jc.ErrorIsNil) - s.UbuntuMetadata = map[string]string{ tags.JujuIsController: "true", tags.JujuController: s.ControllerUUID, metadataKeyCloudInit: string(userData), metadataKeyEncoding: "base64", - metadataKeySSHKeys: authKeys, } instanceConfig.Tags = map[string]string{ tags.JujuIsController: "true", @@ -303,7 +304,6 @@ s.PatchValue(&newConnection, func(google.ConnectionConfig, *google.Credentials) (gceConnection, error) { return s.FakeConn, nil }) - s.PatchValue(&supportedArchitectures, s.FakeCommon.SupportedArchitectures) s.PatchValue(&bootstrap, s.FakeCommon.Bootstrap) s.PatchValue(&destroyEnv, s.FakeCommon.Destroy) s.PatchValue(&availabilityZoneAllocations, s.FakeCommon.AvailabilityZoneAllocations) @@ -355,21 +355,12 @@ type fakeCommon struct { fake - Arches []string Arch string Series string BSFinalizer environs.BootstrapFinalizer AZInstances []common.AvailabilityZoneInstances } -func (fc *fakeCommon) SupportedArchitectures(env environs.Environ, cons *imagemetadata.ImageConstraint) ([]string, error) { - fc.addCall("SupportedArchitectures", FakeCallArgs{ - "switch": env, - "cons": cons, - }) - return fc.Arches, fc.err() -} - func (fc *fakeCommon) Bootstrap(ctx environs.BootstrapContext, env environs.Environ, params environs.BootstrapParams) (*environs.BootstrapResult, error) { fc.addCall("Bootstrap", FakeCallArgs{ "ctx": ctx, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/userdata_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/gce/userdata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/gce/userdata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/gce/userdata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -52,7 +52,7 @@ renderer := gce.GCERenderer{} cloudcfg := &cloudinittest.CloudConfig{} - result, err := renderer.Render(cloudcfg, os.Arch) + result, err := renderer.Render(cloudcfg, os.GenericLinux) c.Assert(result, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "Cannot encode userdata for OS: Arch") + c.Assert(err, gc.ErrorMatches, "Cannot encode userdata for OS: GenericLinux") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/credentials.go juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,7 @@ "github.com/juju/errors" "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" ) const ( @@ -49,3 +50,8 @@ func (environProviderCredentials) DetectCredentials() (*cloud.CloudCredential, error) { return nil, errors.NotFoundf("credentials") } + +// FinalizeCredential is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) FinalizeCredential(_ environs.FinalizeCredentialContext, args environs.FinalizeCredentialParams) (*cloud.Credential, error) { + return &args.Credential, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/environ.go juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,7 +14,6 @@ "github.com/juju/juju/constraints" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" - "github.com/juju/juju/environs/imagemetadata" "github.com/juju/juju/environs/simplestreams" "github.com/juju/juju/environs/tags" "github.com/juju/juju/instance" @@ -28,11 +27,6 @@ cloud environs.CloudSpec compute *joyentCompute - // supportedArchitectures caches the architectures - // for which images can be instantiated. - archLock sync.Mutex - supportedArchitectures []string - lock sync.Mutex // protects ecfg ecfg *environConfig } @@ -83,27 +77,6 @@ return fmt.Errorf("invalid Joyent instance %q specified", *cons.InstanceType) } -func (env *joyentEnviron) getSupportedArchitectures() ([]string, error) { - env.archLock.Lock() - defer env.archLock.Unlock() - if env.supportedArchitectures != nil { - return env.supportedArchitectures, nil - } - cfg := env.Ecfg() - // Create a filter to get all images from our region and for the correct stream. - cloudSpec := simplestreams.CloudSpec{ - Region: env.cloud.Region, - Endpoint: env.cloud.Endpoint, - } - imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{ - CloudSpec: cloudSpec, - Stream: cfg.ImageStream(), - }) - var err error - env.supportedArchitectures, err = common.SupportedArchitectures(env, imageConstraint) - return env.supportedArchitectures, err -} - func (env *joyentEnviron) SetConfig(cfg *config.Config) error { env.lock.Lock() defer env.lock.Unlock() @@ -140,6 +113,11 @@ return common.Bootstrap(ctx, env, args) } +// BootstrapMessage is part of the Environ interface. +func (env *joyentEnviron) BootstrapMessage() string { + return "" +} + func (env *joyentEnviron) ControllerInstances(controllerUUID string) ([]instance.Id, error) { instanceIds := []instance.Id{} @@ -186,10 +164,9 @@ region = env.cloud.Region } return &simplestreams.MetadataLookupParams{ - Series: config.PreferredSeries(env.Ecfg()), - Region: region, - Endpoint: env.cloud.Endpoint, - Architectures: []string{"amd64", "armhf"}, + Series: config.PreferredSeries(env.Ecfg()), + Region: region, + Endpoint: env.cloud.Endpoint, }, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/environ_instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/environ_instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/environ_instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/environ_instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -60,11 +60,6 @@ func (env *joyentEnviron) ConstraintsValidator() (constraints.Validator, error) { validator := constraints.NewValidator() validator.RegisterUnsupported(unsupportedConstraints) - supportedArches, err := env.getSupportedArchitectures() - if err != nil { - return nil, err - } - validator.RegisterVocabulary(constraints.Arch, supportedArches) packages, err := env.compute.cloudapi.ListPackages(nil) if err != nil { return nil, err diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,18 +24,18 @@ func (inst *joyentInstance) Status() instance.InstanceStatus { instStatus := inst.machine.State - jujuStatus := status.StatusPending + jujuStatus := status.Pending switch instStatus { case "configured", "incomplete", "unavailable", "provisioning": - jujuStatus = status.StatusAllocating + jujuStatus = status.Allocating case "ready", "running": - jujuStatus = status.StatusRunning + jujuStatus = status.Running case "halting", "stopping", "shutting_down", "off", "down", "installed", "stopped", "destroyed", "unreachable": - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty case "failed": - jujuStatus = status.StatusProvisioningError + jujuStatus = status.ProvisioningError default: - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty } return instance.InstanceStatus{ Status: jujuStatus, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/local_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/local_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/local_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/local_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -80,7 +80,7 @@ s.TestConfig = GetFakeConfig().Merge(coretesting.Attrs{ "image-metadata-url": "test://host", }) - s.LiveTests.UploadArches = []string{arch.AMD64} + s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 }) s.AddCleanup(func(*gc.C) { envtesting.PatchAttemptStrategies(&joyent.ShortAttempt) }) } @@ -131,8 +131,7 @@ s.PatchValue(&jujuversion.Current, coretesting.FakeVersionNumber) s.cSrv.setupServer(c) s.AddCleanup(s.cSrv.destroyServer) - - s.Tests.ToolsFixture.UploadArches = []string{arch.AMD64} + s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 }) s.Tests.SetUpTest(c) s.Credential = cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ @@ -388,10 +387,7 @@ env := s.Prepare(c) validator, err := env.ConstraintsValidator() c.Assert(err, jc.ErrorIsNil) - cons := constraints.MustParse("arch=ppc64el") - _, err = validator.Validate(cons) - c.Assert(err, gc.ErrorMatches, "invalid constraint value: arch=ppc64el\nvalid values are:.*") - cons = constraints.MustParse("instance-type=foo") + cons := constraints.MustParse("instance-type=foo") _, err = validator.Validate(cons) c.Assert(err, gc.ErrorMatches, "invalid constraint value: instance-type=foo\nvalid values are:.*") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/provider.go juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/provider.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/provider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/provider.go 2016-10-13 14:31:49.000000000 +0000 @@ -46,11 +46,6 @@ var _ simplestreams.HasRegion = (*joyentEnviron)(nil) -// RestrictedConfigAttributes is part of the EnvironProvider interface. -func (joyentProvider) RestrictedConfigAttributes() []string { - return []string{} -} - // PrepareConfig is part of the EnvironProvider interface. func (p joyentProvider) PrepareConfig(args environs.PrepareConfigParams) (*config.Config, error) { if err := validateCloudSpec(args.Cloud); err != nil { @@ -126,10 +121,6 @@ return cfg.Apply(newEcfg.attrs) } -func (joyentProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - return map[string]string{}, nil -} - func GetProviderInstance() environs.EnvironProvider { return providerInstance } @@ -141,8 +132,7 @@ return nil, errors.Errorf("region must be specified") } return &simplestreams.MetadataLookupParams{ - Region: region, - Architectures: []string{"amd64", "armhf"}, + Region: region, }, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/storage.go juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/storage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/joyent/storage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/joyent/storage.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,8 +10,8 @@ ) // StorageProviderTypes implements storage.ProviderRegistry. -func (*joyentEnviron) StorageProviderTypes() []storage.ProviderType { - return nil +func (*joyentEnviron) StorageProviderTypes() ([]storage.ProviderType, error) { + return nil, nil } // StorageProvider implements storage.ProviderRegistry. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/config.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,111 +6,24 @@ package lxd import ( - "fmt" - "github.com/juju/errors" "github.com/juju/schema" "gopkg.in/juju/environschema.v1" "github.com/juju/juju/environs/config" - "github.com/juju/juju/tools/lxdclient" -) - -// TODO(ericsnow) Support providing cert/key file. - -// The LXD-specific config keys. -const ( - cfgRemoteURL = "remote-url" - cfgClientCert = "client-cert" - cfgClientKey = "client-key" - cfgServerPEMCert = "server-cert" ) -// configSchema defines the schema for the configuration attributes -// defined by the LXD provider. -var configSchema = environschema.Fields{ - cfgRemoteURL: { - Description: `Identifies the LXD API server to use for managing containers, if any.`, - Type: environschema.Tstring, - Immutable: true, - }, - cfgClientKey: { - Description: `The client key used for connecting to a LXD host machine.`, - Type: environschema.Tstring, - Immutable: true, - }, - cfgClientCert: { - Description: `The client cert used for connecting to a LXD host machine.`, - Type: environschema.Tstring, - Immutable: true, - }, - cfgServerPEMCert: { - Description: `The certificate of the LXD server on the host machine.`, - Type: environschema.Tstring, - Immutable: true, - }, -} - var ( - // TODO(ericsnow) Extract the defaults from configSchema as soon as - // (or if) environschema.Attr supports defaults. - - configBaseDefaults = schema.Defaults{ - cfgRemoteURL: "", - cfgClientCert: "", - cfgClientKey: "", - cfgServerPEMCert: "", - } - + configSchema = environschema.Fields{} configFields, configDefaults = func() (schema.Fields, schema.Defaults) { fields, defaults, err := configSchema.ValidationSchema() if err != nil { panic(err) } - defaults = updateDefaults(defaults, configBaseDefaults) return fields, defaults }() - - configSecretFields = []string{ - cfgClientKey, // only privileged agents should get to talk to LXD - } ) -func updateDefaults(defaults schema.Defaults, updates schema.Defaults) schema.Defaults { - updated := schema.Defaults{} - for k, v := range defaults { - updated[k] = v - } - for k, v := range updates { - // TODO(ericsnow) Delete the item if v is nil? - updated[k] = v - } - return updated -} - -func adjustDefaults(cfg *config.Config, defaults map[string]interface{}) (map[string]interface{}, []string) { - var unset []string - updated := make(map[string]interface{}) - for k, v := range defaults { - updated[k] = v - } - - return updated, unset -} - -// TODO(ericsnow) environschema.Fields should have this... -func ensureImmutableFields(oldAttrs, newAttrs map[string]interface{}) error { - for name, attr := range configSchema { - if !attr.Immutable { - continue - } - if newAttrs[name] != oldAttrs[name] { - return errors.Errorf("%s: cannot change from %v to %v", name, oldAttrs[name], newAttrs[name]) - } - } - return nil -} - type environConfig struct { *config.Config attrs map[string]interface{} @@ -128,41 +41,14 @@ // newValidConfig builds a new environConfig from the provided Config // and returns it. This includes applying the provided defaults // values, if any. The resulting config values are validated. -func newValidConfig(cfg *config.Config, defaults map[string]interface{}) (*environConfig, error) { - // Any auth credentials handling should happen first... - +func newValidConfig(cfg *config.Config) (*environConfig, error) { // Ensure that the provided config is valid. if err := config.Validate(cfg, nil); err != nil { return nil, errors.Trace(err) } - // Apply the defaults and coerce/validate the custom config attrs. - fixedDefaults, unset := adjustDefaults(cfg, defaults) - cfg, err := cfg.Remove(unset) - if err != nil { - return nil, errors.Trace(err) - } - validated, err := cfg.ValidateUnknownAttrs(configFields, fixedDefaults) - if err != nil { - return nil, errors.Trace(err) - } - validCfg, err := cfg.Apply(validated) - if err != nil { - return nil, errors.Trace(err) - } - // Build the config. - ecfg := newConfig(validCfg) - - // Update to defaults set via client config. - clientCfg, err := ecfg.clientConfig() - if err != nil { - return nil, errors.Trace(err) - } - ecfg, err = ecfg.updateForClientConfig(clientCfg) - if err != nil { - return nil, errors.Trace(err) - } + ecfg := newConfig(cfg) // Do final (more complex, provider-specific) validation. if err := ecfg.validate(); err != nil { @@ -172,153 +58,7 @@ return ecfg, nil } -func (c *environConfig) dirname() string { - // TODO(ericsnow) Put it under one of the juju/paths.*() directories. - return "" -} - -func (c *environConfig) remoteURL() string { - raw := c.attrs[cfgRemoteURL] - return raw.(string) -} - -func (c *environConfig) clientCert() string { - raw := c.attrs[cfgClientCert] - return raw.(string) -} - -func (c *environConfig) clientKey() string { - raw := c.attrs[cfgClientKey] - return raw.(string) -} - -func (c *environConfig) serverPEMCert() string { - raw := c.attrs[cfgServerPEMCert] - return raw.(string) -} - -// clientConfig builds a LXD Config based on the env config and returns it. -func (c *environConfig) clientConfig() (lxdclient.Config, error) { - remote := lxdclient.Remote{ - Name: "juju-remote", - Host: c.remoteURL(), - ServerPEMCert: c.serverPEMCert(), - } - if c.clientCert() != "" { - certPEM := []byte(c.clientCert()) - keyPEM := []byte(c.clientKey()) - cert := lxdclient.NewCert(certPEM, keyPEM) - cert.Name = fmt.Sprintf("juju cert for env %q", c.Name()) - remote.Cert = &cert - } - - cfg := lxdclient.Config{ - Remote: remote, - } - cfg, err := cfg.WithDefaults() - if err != nil { - return cfg, errors.Trace(err) - } - return cfg, nil -} - -// TODO(ericsnow) Switch to a DI testing approach and eliminiate this var. -var asNonLocal = lxdclient.Config.UsingTCPRemote - -func (c *environConfig) updateForClientConfig(clientCfg lxdclient.Config) (*environConfig, error) { - nonlocal, err := asNonLocal(clientCfg) - if err != nil { - return nil, errors.Trace(err) - } - clientCfg = nonlocal - - c.attrs[cfgRemoteURL] = clientCfg.Remote.Host - c.attrs[cfgServerPEMCert] = clientCfg.Remote.ServerPEMCert - - var cert lxdclient.Cert - if clientCfg.Remote.Cert != nil { - cert = *clientCfg.Remote.Cert - } - c.attrs[cfgClientCert] = string(cert.CertPEM) - c.attrs[cfgClientKey] = string(cert.KeyPEM) - - // Apply the updates. - cfg, err := c.Config.Apply(c.attrs) - if err != nil { - return nil, errors.Trace(err) - } - return newConfig(cfg), nil -} - -// secret gathers the "secret" config values and returns them. -func (c *environConfig) secret() map[string]string { - if len(configSecretFields) == 0 { - return nil - } - - secretAttrs := make(map[string]string, len(configSecretFields)) - for _, key := range configSecretFields { - secretAttrs[key] = c.attrs[key].(string) - } - return secretAttrs -} - -// validate checks more complex LCD-specific config values. +// validate validates LXD-specific configuration. func (c *environConfig) validate() error { - // All fields must be populated, even with just the default. - // TODO(ericsnow) Shouldn't configSchema support this? - for field := range configFields { - if dflt, ok := configDefaults[field]; ok && dflt == "" { - continue - } - if c.attrs[field].(string) == "" { - return errors.Errorf("%s: must not be empty", field) - } - } - - // If cert is provided then key must be (and vice versa). - if c.clientCert() == "" && c.clientKey() != "" { - return errors.Errorf("missing %s (got %s value %q)", cfgClientCert, cfgClientKey, c.clientKey()) - } - if c.clientCert() != "" && c.clientKey() == "" { - return errors.Errorf("missing %s (got %s value %q)", cfgClientKey, cfgClientCert, c.clientCert()) - } - - // Check sanity of complex provider-specific fields. - cfg, err := c.clientConfig() - if err != nil { - return errors.Trace(err) - } - if err := cfg.Validate(); err != nil { - return errors.Trace(err) - } - - return nil -} - -// update applies changes from the provided config to the env config. -// Changes to any immutable attributes result in an error. -func (c *environConfig) update(cfg *config.Config) error { - // Validate the updates. newValidConfig does not modify the "known" - // config attributes so it is safe to call Validate here first. - if err := config.Validate(cfg, c.Config); err != nil { - return errors.Trace(err) - } - - updates, err := newValidConfig(cfg, configDefaults) - if err != nil { - return errors.Trace(err) - } - - // Check that no immutable fields have changed. - attrs := updates.UnknownAttrs() - if err := ensureImmutableFields(c.attrs, attrs); err != nil { - return errors.Trace(err) - } - - // Apply the updates. - // TODO(ericsnow) Should updates.Config be set in instead of cfg? - c.Config = cfg - c.attrs = cfg.UnknownAttrs() return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/config_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,8 +6,6 @@ package lxd_test import ( - "fmt" - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/environschema.v1" @@ -16,7 +14,6 @@ "github.com/juju/juju/environs/config" "github.com/juju/juju/provider/lxd" "github.com/juju/juju/testing" - "github.com/juju/juju/tools/lxdclient" ) type configSuite struct { @@ -42,154 +39,7 @@ values, extras := ecfg.Values(c) c.Assert(extras, gc.HasLen, 0) - c.Check(values, jc.DeepEquals, lxd.ConfigValues{ - RemoteURL: "", - ClientCert: "", - ClientKey: "", - ServerCert: "", - }) -} - -func (s *configSuite) TestClientConfigLocal(c *gc.C) { - cfg := lxd.NewBaseConfig(c) - ecfg := lxd.NewConfig(cfg) - values, _ := ecfg.Values(c) - c.Assert(values.RemoteURL, gc.Equals, "") - - clientCfg, err := ecfg.ClientConfig() - c.Assert(err, jc.ErrorIsNil) - - c.Check(clientCfg, jc.DeepEquals, lxdclient.Config{ - Remote: lxdclient.Remote{ - Name: "juju-remote", - Host: "", - Protocol: lxdclient.LXDProtocol, - Cert: nil, - ServerPEMCert: "", - }, - }) -} - -func (s *configSuite) TestClientConfigNonLocal(c *gc.C) { - cfg := lxd.NewBaseConfig(c) - ecfg := lxd.NewConfig(cfg) - ecfg = ecfg.Apply(c, map[string]interface{}{ - "remote-url": "10.0.0.1", - "client-cert": "", - "client-key": "", - "server-cert": "", - }) - - clientCfg, err := ecfg.ClientConfig() - c.Assert(err, jc.ErrorIsNil) - - c.Check(clientCfg, jc.DeepEquals, lxdclient.Config{ - Remote: lxdclient.Remote{ - Name: "juju-remote", - Host: "10.0.0.1", - Protocol: lxdclient.LXDProtocol, - Cert: &lxdclient.Cert{ - Name: fmt.Sprintf("juju cert for env %q", s.config.Name()), - CertPEM: []byte(""), - KeyPEM: []byte(""), - }, - ServerPEMCert: "", - }, - }) -} - -func (s *configSuite) TestUpdateForClientConfigLocal(c *gc.C) { - cfg := lxd.NewBaseConfig(c) - ecfg := lxd.NewConfig(cfg) - - clientCfg, err := ecfg.ClientConfig() - c.Assert(err, jc.ErrorIsNil) - updated, err := ecfg.UpdateForClientConfig(clientCfg) - c.Assert(err, jc.ErrorIsNil) - - values, extras := updated.Values(c) - c.Assert(extras, gc.HasLen, 0) - - c.Check(values, jc.DeepEquals, lxd.ConfigValues{ - RemoteURL: "", - ClientCert: "", - ClientKey: "", - ServerCert: "", - }) -} - -func (s *configSuite) TestUpdateForClientConfigNonLocal(c *gc.C) { - cfg := lxd.NewBaseConfig(c) - ecfg := lxd.NewConfig(cfg) - ecfg = ecfg.Apply(c, map[string]interface{}{ - "remote-url": "10.0.0.1", - "client-cert": "", - "client-key": "", - "server-cert": "", - }) - - before, extras := ecfg.Values(c) - c.Assert(extras, gc.HasLen, 0) - - clientCfg, err := ecfg.ClientConfig() - c.Assert(err, jc.ErrorIsNil) - updated, err := ecfg.UpdateForClientConfig(clientCfg) - c.Assert(err, jc.ErrorIsNil) - - after, extras := updated.Values(c) - c.Assert(extras, gc.HasLen, 0) - - c.Check(before, jc.DeepEquals, lxd.ConfigValues{ - RemoteURL: "10.0.0.1", - ClientCert: "", - ClientKey: "", - ServerCert: "", - }) - c.Check(after, jc.DeepEquals, lxd.ConfigValues{ - RemoteURL: "10.0.0.1", - ClientCert: "", - ClientKey: "", - ServerCert: "", - }) -} - -func (s *configSuite) TestUpdateForClientConfigGeneratedCert(c *gc.C) { - cfg := lxd.NewBaseConfig(c) - ecfg := lxd.NewConfig(cfg) - ecfg = ecfg.Apply(c, map[string]interface{}{ - "remote-url": "10.0.0.1", - "client-cert": "", - "client-key": "", - "server-cert": "", - }) - - before, extras := ecfg.Values(c) - c.Assert(extras, gc.HasLen, 0) - - clientCfg, err := ecfg.ClientConfig() - c.Assert(err, jc.ErrorIsNil) - updated, err := ecfg.UpdateForClientConfig(clientCfg) - c.Assert(err, jc.ErrorIsNil) - - after, extras := updated.Values(c) - c.Assert(extras, gc.HasLen, 0) - - c.Check(before, jc.DeepEquals, lxd.ConfigValues{ - RemoteURL: "10.0.0.1", - ClientCert: "", - ClientKey: "", - ServerCert: "", - }) - after.CheckCert(c) - after.ClientCert = "" - after.ClientKey = "" - after.ServerCert = "" - c.Check(after, jc.DeepEquals, lxd.ConfigValues{ - RemoteURL: "10.0.0.1", - ClientCert: "", - ClientKey: "", - ServerCert: "", - }) + c.Check(values, jc.DeepEquals, lxd.ConfigValues{}) } // TODO(ericsnow) Each test only deals with a single field, so having @@ -275,34 +125,6 @@ } var newConfigTests = []configTestSpec{{ - info: "remote-url is optional", - remove: []string{"remote-url"}, - expect: testing.Attrs{"remote-url": ""}, -}, { - info: "remote-url can be empty", - insert: testing.Attrs{"remote-url": ""}, - expect: testing.Attrs{"remote-url": ""}, -}, { - info: "client-cert is optional", - remove: []string{"client-cert"}, - expect: testing.Attrs{"client-cert": ""}, -}, { - info: "client-cert can be empty", - insert: testing.Attrs{"client-cert": ""}, - expect: testing.Attrs{"client-cert": ""}, -}, { - info: "client-key is optional", - remove: []string{"client-key"}, - expect: testing.Attrs{"client-key": ""}, -}, { - info: "client-key can be empty", - insert: testing.Attrs{"client-key": ""}, - expect: testing.Attrs{"client-key": ""}, -}, { - info: "server-cert is optional", - remove: []string{"server-cert"}, - expect: testing.Attrs{"server-cert": ""}, -}, { info: "unknown field is not touched", insert: testing.Attrs{"unknown-field": 12345}, expect: testing.Attrs{"unknown-field": 12345}, @@ -321,7 +143,10 @@ c.Logf("test %d: %s", i, test.info) testConfig := test.newConfig(c) - environ, err := environs.New(environs.OpenParams{lxdCloudSpec(), testConfig}) + environ, err := environs.New(environs.OpenParams{ + Cloud: lxdCloudSpec(), + Config: testConfig, + }) // Check the result if test.err != "" { @@ -381,8 +206,6 @@ } } -// TODO(ericsnow) Add tests for client-cert and client-key. - var changeConfigTests = []configTestSpec{{ info: "no change, no error", expect: lxd.ConfigAttrs, @@ -421,7 +244,10 @@ for i, test := range changeConfigTests { c.Logf("test %d: %s", i, test.info) - environ, err := environs.New(environs.OpenParams{lxdCloudSpec(), s.config}) + environ, err := environs.New(environs.OpenParams{ + Cloud: lxdCloudSpec(), + Config: s.config, + }) c.Assert(err, jc.ErrorIsNil) testConfig := test.newConfig(c) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/credentials.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,7 @@ import ( "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" ) type environProviderCredentials struct{} @@ -16,22 +17,15 @@ // TODO (anastasiamac 2016-04-14) When/If this value changes, // verify that juju/juju/cloud/clouds.go#BuiltInClouds // with lxd type are up to-date. - // TODO(wallyworld) update BuiltInClouds to match when we actually take notice of TLSAuthType - return map[cloud.AuthType]cloud.CredentialSchema{ - cloud.EmptyAuthType: {}, - cloud.CertificateAuthType: { - { - cfgClientCert, cloud.CredentialAttr{Description: "The client cert used for connecting to a LXD host machine."}, - }, { - cfgClientKey, cloud.CredentialAttr{Description: "The client key used for connecting to a LXD host machine."}, - }, { - cfgServerPEMCert, cloud.CredentialAttr{Description: "The certificate of the LXD server on the host machine."}, - }, - }, - } + return map[cloud.AuthType]cloud.CredentialSchema{cloud.EmptyAuthType: {}} } // DetectCredentials is part of the environs.ProviderCredentials interface. func (environProviderCredentials) DetectCredentials() (*cloud.CloudCredential, error) { return cloud.NewEmptyCloudCredential(), nil } + +// FinalizeCredential is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) FinalizeCredential(_ environs.FinalizeCredentialContext, args environs.FinalizeCredentialParams) (*cloud.Credential, error) { + return &args.Credential, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/credentials_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/credentials_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/credentials_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/credentials_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -29,7 +29,7 @@ } func (s *credentialsSuite) TestCredentialSchemas(c *gc.C) { - envtesting.AssertProviderAuthTypes(c, s.provider, "certificate", "empty") + envtesting.AssertProviderAuthTypes(c, s.provider, "empty") } func (s *credentialsSuite) TestDetectCredentials(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_broker.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_broker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_broker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_broker.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,12 +6,13 @@ package lxd import ( - "fmt" "strings" "github.com/juju/errors" "github.com/juju/utils/arch" + lxdshared "github.com/lxc/lxd/shared" + "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/juju/cloudconfig/instancecfg" "github.com/juju/juju/cloudconfig/providerinit" "github.com/juju/juju/environs" @@ -48,7 +49,7 @@ raw, err := env.newRawInstance(args) if err != nil { if args.StatusCallback != nil { - args.StatusCallback(status.StatusProvisioningError, err.Error(), nil) + args.StatusCallback(status.ProvisioningError, err.Error(), nil) } return nil, errors.Trace(err) } @@ -72,13 +73,9 @@ if err != nil { return errors.Trace(err) } - if len(tools) == 0 { - return errors.Errorf("No tools available for architecture %q", arch.HostArch()) - } if err := args.InstanceConfig.SetTools(tools); err != nil { return errors.Trace(err) } - logger.Debugf("tools: %#v", args.InstanceConfig.ToolsList()) if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.ecfg.Config); err != nil { return errors.Trace(err) @@ -144,30 +141,88 @@ return nil, errors.Trace(err) } - series := args.Tools.OneSeries() + series := args.InstanceConfig.Series // TODO(jam): We should get this information from EnsureImageExists, or // something given to us from 'raw', not assume it ourselves. image := "ubuntu-" + series // TODO: support args.Constraints.Arch, we'll want to map from - var callback func(string) - if args.StatusCallback != nil { - callback = func(copyProgress string) { - args.StatusCallback(status.StatusAllocating, copyProgress, nil) + // Keep track of StatusCallback output so we may clean up later. + // This is implemented here, close to where the StatusCallback calls + // are made, instead of at a higher level in the package, so as not to + // assume that all providers will have the same need to be implemented + // in the same way. + longestMsg := 0 + statusCallback := func(currentStatus status.Status, msg string) { + if args.StatusCallback != nil { + args.StatusCallback(currentStatus, msg, nil) + } + if len(msg) > longestMsg { + longestMsg = len(msg) + } + } + cleanupCallback := func() { + if args.CleanupCallback != nil { + args.CleanupCallback(strings.Repeat(" ", longestMsg)) } } - if err := env.raw.EnsureImageExists(series, imageSources, callback); err != nil { + defer cleanupCallback() + + imageCallback := func(copyProgress string) { + statusCallback(status.Allocating, copyProgress) + } + if err := env.raw.EnsureImageExists(series, imageSources, imageCallback); err != nil { return nil, errors.Trace(err) } + cleanupCallback() // Clean out any long line of completed download status - metadata, err := getMetadata(args) + cloudcfg, err := cloudinit.New(series) if err != nil { return nil, errors.Trace(err) } - //tags := []string{ - // env.globalFirewallName(), - // machineID, - //} + + var certificateFingerprint string + if args.InstanceConfig.Controller != nil { + // For controller machines, generate a certificate pair and write + // them to the instance's disk in a well-defined location, along + // with the server's certificate. + certPEM, keyPEM, err := lxdshared.GenerateMemCert(true) + if err != nil { + return nil, errors.Trace(err) + } + cert := lxdclient.NewCert(certPEM, keyPEM) + cert.Name = hostname + + // We record the certificate's fingerprint in metadata, so we can + // remove the certificate along with the instance. + certificateFingerprint, err = cert.Fingerprint() + if err != nil { + return nil, errors.Trace(err) + } + + if err := env.raw.AddCert(cert); err != nil { + return nil, errors.Annotatef(err, "adding certificate %q", cert.Name) + } + serverState, err := env.raw.ServerStatus() + if err != nil { + return nil, errors.Annotate(err, "getting server status") + } + cloudcfg.AddRunTextFile(clientCertPath, string(certPEM), 0600) + cloudcfg.AddRunTextFile(clientKeyPath, string(keyPEM), 0600) + cloudcfg.AddRunTextFile(serverCertPath, serverState.Environment.Certificate, 0600) + } + + cloudcfg.SetAttr("hostname", hostname) + cloudcfg.SetAttr("manage_etc_hosts", true) + + metadata, err := getMetadata(cloudcfg, args) + if err != nil { + return nil, errors.Trace(err) + } + if certificateFingerprint != "" { + metadata[metadataKeyCertificateFingerprint] = certificateFingerprint + } + // TODO(ericsnow) Use the env ID for the network name (instead of default)? // TODO(ericsnow) Make the network name configurable? // TODO(ericsnow) Support multiple networks? @@ -187,29 +242,25 @@ "default", env.profileName(), }, - //Tags: tags, // Network is omitted (left empty). } logger.Infof("starting instance %q (image %q)...", instSpec.Name, instSpec.Image) - if args.StatusCallback != nil { - args.StatusCallback(status.StatusAllocating, "starting instance", nil) - } + + statusCallback(status.Allocating, "preparing image") inst, err := env.raw.AddInstance(instSpec) if err != nil { return nil, errors.Trace(err) } - if args.StatusCallback != nil { - args.StatusCallback(status.StatusRunning, "Container started", nil) - } + statusCallback(status.Running, "container started") return inst, nil } // getMetadata builds the raw "user-defined" metadata for the new // instance (relative to the provided args) and returns it. -func getMetadata(args environs.StartInstanceParams) (map[string]string, error) { +func getMetadata(cloudcfg cloudinit.CloudConfig, args environs.StartInstanceParams) (map[string]string, error) { renderer := lxdRenderer{} - uncompressed, err := providerinit.ComposeUserData(args.InstanceConfig, nil, renderer) + uncompressed, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, renderer) if err != nil { return nil, errors.Annotate(err, "cannot make user data") } @@ -254,19 +305,13 @@ // TODO(ericsnow) This special-case should be improved. archStr = arch.HostArch() } - - hwc, err := instance.ParseHardware( - "arch="+archStr, - fmt.Sprintf("cpu-cores=%d", raw.NumCores), - fmt.Sprintf("mem=%dM", raw.MemoryMB), - //"root-disk=", - //"tags=", - ) - if err != nil { - logger.Errorf("unexpected problem parsing hardware info: %v", err) - // Keep moving... + cores := uint64(raw.NumCores) + mem := uint64(raw.MemoryMB) + return &instance.HardwareCharacteristics{ + Arch: &archStr, + CpuCores: &cores, + Mem: &mem, } - return &hwc } // AllInstances implements environs.InstanceBroker. @@ -290,6 +335,36 @@ } prefix := env.namespace.Prefix() - err := env.raw.RemoveInstances(prefix, ids...) + err := removeInstances(env.raw, prefix, ids) return errors.Trace(err) } + +func removeInstances(raw *rawProvider, prefix string, ids []string) error { + // We must first list the instances so we can remove any + // controller certificates. + allInstances, err := raw.Instances(prefix) + if err != nil { + return errors.Trace(err) + } + for _, inst := range allInstances { + certificateFingerprint := inst.Metadata()[lxdclient.CertificateFingerprintKey] + if certificateFingerprint == "" { + continue + } + var found bool + for _, id := range ids { + if inst.Name == id { + found = true + break + } + } + if !found { + continue + } + err := raw.RemoveCertByFingerprint(certificateFingerprint) + if err != nil && !errors.IsNotFound(err) { + return errors.Annotatef(err, "removing certificate for %q", inst.Name) + } + } + return raw.RemoveInstances(prefix, ids...) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_broker_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_broker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_broker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_broker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,12 +6,14 @@ package lxd_test import ( + "github.com/juju/errors" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils/arch" gc "gopkg.in/check.v1" "github.com/juju/juju/provider/lxd" + "github.com/juju/juju/tools/lxdclient" ) type environBrokerSuite struct { @@ -52,6 +54,12 @@ c.Assert(err, jc.ErrorIsNil) s.Stub.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "Instances", + Args: []interface{}{ + "juju-f75cba-", + []string(nil), + }, + }, { FuncName: "RemoveInstances", Args: []interface{}{ "juju-f75cba-", @@ -60,6 +68,29 @@ }}) } +func (s *environBrokerSuite) TestStopInstancesRemoveCertificate(c *gc.C) { + s.RawInstance.InstanceSummary.Metadata[lxdclient.CertificateFingerprintKey] = "foo" + s.Client.Insts = []lxdclient.Instance{*s.RawInstance} + + err := s.Env.StopInstances(s.Instance.Id()) + c.Assert(err, jc.ErrorIsNil) + + s.Stub.CheckCallNames(c, "Instances", "RemoveCertByFingerprint", "RemoveInstances") + s.Stub.CheckCall(c, 1, "RemoveCertByFingerprint", "foo") +} + +func (s *environBrokerSuite) TestStopInstancesRemoveCertificateNotFound(c *gc.C) { + s.RawInstance.InstanceSummary.Metadata[lxdclient.CertificateFingerprintKey] = "foo" + s.Client.Insts = []lxdclient.Instance{*s.RawInstance} + + s.Stub.SetErrors(nil, errors.NotFoundf("certificate")) + err := s.Env.StopInstances(s.Instance.Id()) + c.Assert(err, jc.ErrorIsNil) + + s.Stub.CheckCallNames(c, "Instances", "RemoveCertByFingerprint", "RemoveInstances") + s.Stub.CheckCall(c, 1, "RemoveCertByFingerprint", "foo") +} + func (s *environBrokerSuite) TestImageMetadataURL(c *gc.C) { s.UpdateConfig(c, map[string]interface{}{ "image-metadata-url": "https://my-test.com/images/", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,8 +15,11 @@ "github.com/juju/juju/environs/tags" "github.com/juju/juju/instance" "github.com/juju/juju/provider/common" + "github.com/juju/juju/tools/lxdclient" ) +const bootstrapMessage = `To configure your system to better support LXD containers, please see: https://github.com/lxc/lxd/blob/master/doc/production-setup.md` + type baseProvider interface { // BootstrapEnv bootstraps a Juju environment. BootstrapEnv(environs.BootstrapContext, environs.BootstrapParams) (*environs.BootstrapResult, error) @@ -38,29 +41,32 @@ ecfg *environConfig } -type newRawProviderFunc func(*environConfig) (*rawProvider, error) +type newRawProviderFunc func(environs.CloudSpec) (*rawProvider, error) -func newEnviron(cfg *config.Config, newRawProvider newRawProviderFunc) (*environ, error) { - ecfg, err := newValidConfig(cfg, configDefaults) +func newEnviron(spec environs.CloudSpec, cfg *config.Config, newRawProvider newRawProviderFunc) (*environ, error) { + ecfg, err := newValidConfig(cfg) if err != nil { return nil, errors.Annotate(err, "invalid config") } - // Connect and authenticate. - raw, err := newRawProvider(ecfg) + namespace, err := instance.NewNamespace(cfg.UUID()) if err != nil { return nil, errors.Trace(err) } - env, err := newEnvironRaw(ecfg, raw) + raw, err := newRawProvider(spec) if err != nil { return nil, errors.Trace(err) } - env.namespace, err = instance.NewNamespace(cfg.UUID()) - if err != nil { - return nil, errors.Trace(err) + env := &environ{ + name: ecfg.Name(), + uuid: ecfg.UUID(), + raw: raw, + namespace: namespace, + ecfg: ecfg, } + env.base = common.DefaultProvider{Env: env} //TODO(wwitzel3) make sure we are also cleaning up profiles during destroy if err := env.initProfile(); err != nil { @@ -70,17 +76,6 @@ return env, nil } -func newEnvironRaw(ecfg *environConfig, raw *rawProvider) (*environ, error) { - env := &environ{ - name: ecfg.Name(), - uuid: ecfg.UUID(), - ecfg: ecfg, - raw: raw, - } - env.base = common.DefaultProvider{Env: env} - return env, nil -} - var defaultProfileConfig = map[string]string{ "boot.autostart": "true", "security.nesting": "true", @@ -117,14 +112,11 @@ func (env *environ) SetConfig(cfg *config.Config) error { env.lock.Lock() defer env.lock.Unlock() - - if env.ecfg == nil { - return errors.New("cannot set config on uninitialized env") - } - - if err := env.ecfg.update(cfg); err != nil { - return errors.Annotate(err, "invalid config change") + ecfg, err := newValidConfig(cfg) + if err != nil { + return errors.Trace(err) } + env.ecfg = ecfg return nil } @@ -138,36 +130,30 @@ // PrepareForBootstrap implements environs.Environ. func (env *environ) PrepareForBootstrap(ctx environs.BootstrapContext) error { - if ctx.ShouldVerifyCredentials() { - if err := env.verifyCredentials(); err != nil { - return errors.Trace(err) - } + if err := lxdclient.EnableHTTPSListener(env.raw); err != nil { + return errors.Annotate(err, "enabling HTTPS listener") } return nil } // Create implements environs.Environ. func (env *environ) Create(environs.CreateParams) error { - if err := env.verifyCredentials(); err != nil { - return errors.Trace(err) - } return nil } -// Bootstrap creates a new instance, chosing the series and arch out of -// available tools. The series and arch are returned along with a func -// that must be called to finalize the bootstrap process by transferring -// the tools and installing the initial juju controller. +// Bootstrap implements environs.Environ. func (env *environ) Bootstrap(ctx environs.BootstrapContext, params environs.BootstrapParams) (*environs.BootstrapResult, error) { - // TODO(ericsnow) Ensure currently not the root user - // if remote is local host? - // Using the Bootstrap func from provider/common should be fine. // Local provider does its own thing because it has to deal directly // with localhost rather than using SSH. return env.base.BootstrapEnv(ctx, params) } +// BootstrapMessage is part of the Environ interface. +func (env *environ) BootstrapMessage() string { + return bootstrapMessage +} + // Destroy shuts down all known machines and destroys the rest of the // known environment. func (env *environ) Destroy() error { @@ -195,9 +181,9 @@ } func (env *environ) destroyHostedModelResources(controllerUUID string) error { - // Destroy all instances where juju-controller-uuid, - // but not juju-model-uuid, matches env.uuid. - prefix := env.namespace.Prefix() + // Destroy all instances with juju-controller-uuid + // matching the specified UUID. + const prefix = "juju-" instances, err := env.prefixedInstances(prefix) if err != nil { return errors.Annotate(err, "listing instances") @@ -214,13 +200,8 @@ } names = append(names, string(inst.Id())) } - if err := env.raw.RemoveInstances(prefix, names...); err != nil { + if err := removeInstances(env.raw, prefix, names); err != nil { return errors.Annotate(err, "removing hosted model instances") } return nil } - -func (env *environ) verifyCredentials() error { - // TODO(ericsnow) Do something here? - return nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_instance_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_instance_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_instance_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_instance_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -97,7 +97,7 @@ func (s *environInstSuite) TestControllerInstancesOkay(c *gc.C) { s.Client.Insts = []lxdclient.Instance{*s.RawInstance} - ids, err := s.Env.ControllerInstances(coretesting.ModelTag.Id()) + ids, err := s.Env.ControllerInstances(coretesting.ControllerTag.Id()) c.Assert(err, jc.ErrorIsNil) c.Check(ids, jc.DeepEquals, []instance.Id{"spam"}) @@ -119,7 +119,7 @@ other := lxdclient.NewInstance(lxdclient.InstanceSummary{}, nil) s.Client.Insts = []lxdclient.Instance{*s.RawInstance, *other} - ids, err := s.Env.ControllerInstances(coretesting.ModelTag.Id()) + ids, err := s.Env.ControllerInstances(coretesting.ControllerTag.Id()) c.Assert(err, jc.ErrorIsNil) c.Check(ids, jc.DeepEquals, []instance.Id{"spam"}) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_policy.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_policy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_policy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_policy.go 2016-10-13 14:31:49.000000000 +0000 @@ -27,7 +27,7 @@ } var unsupportedConstraints = []string{ - constraints.CpuCores, + constraints.Cores, constraints.CpuPower, //TODO(ericsnow) Add constraints.Mem as unsupported? constraints.InstanceType, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_policy_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_policy_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_policy_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_policy_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -108,7 +108,7 @@ "tags=foo", "mem=3", "instance-type=some-type", - "cpu-cores=2", + "cores=2", "cpu-power=250", "virt-type=kvm", }, " ")) @@ -118,7 +118,7 @@ expected := []string{ "tags", "instance-type", - "cpu-cores", + "cores", "cpu-power", "virt-type", } @@ -167,12 +167,12 @@ c.Assert(err, jc.ErrorIsNil) cons := constraints.MustParse("instance-type=n1-standard-1") - consFallback := constraints.MustParse("cpu-cores=2 cpu-power=1000 mem=10000 tags=bar") + consFallback := constraints.MustParse("cores=2 cpu-power=1000 mem=10000 tags=bar") merged, err := validator.Merge(consFallback, cons) c.Assert(err, jc.ErrorIsNil) // tags is not supported, but we're not validating here... - expected := constraints.MustParse("instance-type=n1-standard-1 tags=bar cpu-cores=2 cpu-power=1000 mem=10000") + expected := constraints.MustParse("instance-type=n1-standard-1 tags=bar cores=2 cpu-power=1000 mem=10000") c.Check(merged, jc.DeepEquals, expected) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_raw.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_raw.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_raw.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_raw.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,20 +6,49 @@ package lxd import ( + "io/ioutil" + "os" + "path" + "strings" + "github.com/juju/errors" + "github.com/juju/utils" + "github.com/juju/utils/series" + lxdshared "github.com/lxc/lxd/shared" + "github.com/juju/juju/environs" + jujupaths "github.com/juju/juju/juju/paths" "github.com/juju/juju/network" "github.com/juju/juju/provider/common" "github.com/juju/juju/tools/lxdclient" ) +var ( + jujuConfDir = jujupaths.MustSucceed(jujupaths.ConfDir(series.LatestLts())) + clientCertPath = path.Join(jujuConfDir, "lxd-client.crt") + clientKeyPath = path.Join(jujuConfDir, "lxd-client.key") + serverCertPath = path.Join(jujuConfDir, "lxd-server.crt") +) + type rawProvider struct { + lxdCerts + lxdConfig lxdInstances lxdProfiles lxdImages common.Firewaller } +type lxdCerts interface { + AddCert(lxdclient.Cert) error + RemoveCertByFingerprint(string) error +} + +type lxdConfig interface { + ServerStatus() (*lxdshared.ServerState, error) + SetConfig(k, v string) error +} + type lxdInstances interface { Instances(string, ...string) ([]lxdclient.Instance, error) AddInstance(lxdclient.InstanceSpec) (*lxdclient.Instance, error) @@ -36,40 +65,101 @@ EnsureImageExists(series string, sources []lxdclient.Remote, copyProgressHandler func(string)) error } -func newRawProvider(ecfg *environConfig) (*rawProvider, error) { - client, err := newClient(ecfg) - if err != nil { - return nil, errors.Trace(err) - } - - firewaller, err := newFirewaller(ecfg) +func newRawProvider(spec environs.CloudSpec) (*rawProvider, error) { + client, err := newClient(spec, ioutil.ReadFile, utils.RunCommand) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Annotate(err, "creating LXD client") } raw := &rawProvider{ + lxdCerts: client, + lxdConfig: client, lxdInstances: client, lxdProfiles: client, lxdImages: client, - Firewaller: firewaller, + Firewaller: common.NewFirewaller(), } return raw, nil } -func newClient(ecfg *environConfig) (*lxdclient.Client, error) { - clientCfg, err := ecfg.clientConfig() - if err != nil { +type readFileFunc func(string) ([]byte, error) +type runCommandFunc func(string, ...string) (string, error) + +func newClient( + spec environs.CloudSpec, + readFile readFileFunc, + runCommand runCommandFunc, +) (*lxdclient.Client, error) { + if spec.Endpoint != "" { + // We don't handle connecting to non-local lxd at present. + return nil, errors.NotValidf("endpoint %q", spec.Endpoint) + } + + config, err := getRemoteConfig(readFile, runCommand) + if errors.IsNotFound(err) { + config = &lxdclient.Config{Remote: lxdclient.Local} + } else if err != nil { return nil, errors.Trace(err) } - client, err := lxdclient.Connect(clientCfg) + client, err := lxdclient.Connect(*config, true) if err != nil { return nil, errors.Trace(err) } - return client, nil } -func newFirewaller(ecfg *environConfig) (common.Firewaller, error) { - return common.NewFirewaller(), nil +// getRemoteConfig returns a lxdclient.Config using a TCP-based remote +// if called from within an instance started by the LXD provider. Otherwise, +// it returns an errors satisfying errors.IsNotFound. +func getRemoteConfig(readFile readFileFunc, runCommand runCommandFunc) (*lxdclient.Config, error) { + readFileOrig := readFile + readFile = func(path string) ([]byte, error) { + data, err := readFileOrig(path) + if err != nil { + if os.IsNotExist(err) { + err = errors.NotFoundf("%s", path) + } + return nil, err + } + return data, nil + } + clientCert, err := readFile(clientCertPath) + if err != nil { + return nil, errors.Annotate(err, "reading client certificate") + } + clientKey, err := readFile(clientKeyPath) + if err != nil { + return nil, errors.Annotate(err, "reading client key") + } + serverCert, err := readFile(serverCertPath) + if err != nil { + return nil, errors.Annotate(err, "reading server certificate") + } + cert := lxdclient.NewCert(clientCert, clientKey) + hostAddress, err := getDefaultGateway(runCommand) + if err != nil { + return nil, errors.Annotate(err, "getting gateway address") + } + return &lxdclient.Config{ + lxdclient.Remote{ + Name: "remote", + Host: hostAddress, + Protocol: lxdclient.LXDProtocol, + Cert: &cert, + ServerPEMCert: string(serverCert), + }, + }, nil +} + +func getDefaultGateway(runCommand runCommandFunc) (string, error) { + out, err := runCommand("ip", "route", "list", "match", "0/0") + if err != nil { + return "", errors.Trace(err) + } + if !strings.HasPrefix(string(out), "default via") { + return "", errors.Errorf(`unexpected output from "ip route": %s`, out) + } + fields := strings.Fields(string(out)) + return fields[2], nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_raw_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_raw_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_raw_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_raw_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,97 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "os" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/tools/lxdclient" +) + +type environRawSuite struct { + testing.IsolationSuite + testing.Stub + readFile readFileFunc + runCommand runCommandFunc +} + +var _ = gc.Suite(&environRawSuite{}) + +func (s *environRawSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.Stub.ResetCalls() + s.readFile = func(path string) ([]byte, error) { + s.AddCall("readFile", path) + if err := s.NextErr(); err != nil { + return nil, err + } + return []byte("content:" + path), nil + } + s.runCommand = func(command string, args ...string) (string, error) { + s.AddCall("runCommand", command, args) + if err := s.NextErr(); err != nil { + return "", err + } + return "default via 10.0.8.1 dev eth0", nil + } +} + +func (s *environRawSuite) TestGetRemoteConfig(c *gc.C) { + cfg, err := getRemoteConfig(s.readFile, s.runCommand) + c.Assert(err, jc.ErrorIsNil) + c.Assert(cfg, jc.DeepEquals, &lxdclient.Config{ + Remote: lxdclient.Remote{ + Name: "remote", + Host: "10.0.8.1", + Protocol: "lxd", + Cert: &lxdclient.Cert{ + CertPEM: []byte("content:/etc/juju/lxd-client.crt"), + KeyPEM: []byte("content:/etc/juju/lxd-client.key"), + }, + ServerPEMCert: "content:/etc/juju/lxd-server.crt", + }, + }) + s.Stub.CheckCalls(c, []testing.StubCall{ + {"readFile", []interface{}{"/etc/juju/lxd-client.crt"}}, + {"readFile", []interface{}{"/etc/juju/lxd-client.key"}}, + {"readFile", []interface{}{"/etc/juju/lxd-server.crt"}}, + {"runCommand", []interface{}{"ip", []string{"route", "list", "match", "0/0"}}}, + }) +} + +func (s *environRawSuite) TestGetRemoteConfigFileNotExist(c *gc.C) { + s.SetErrors(os.ErrNotExist) + _, err := getRemoteConfig(s.readFile, s.runCommand) + // os.IsNotExist is translated to errors.IsNotFound + c.Assert(err, jc.Satisfies, errors.IsNotFound) + c.Assert(err, gc.ErrorMatches, "reading client certificate: /etc/juju/lxd-client.crt not found") +} + +func (s *environRawSuite) TestGetRemoteConfigFileError(c *gc.C) { + s.SetErrors(nil, errors.New("i/o error")) + _, err := getRemoteConfig(s.readFile, s.runCommand) + c.Assert(err, gc.ErrorMatches, "reading client key: i/o error") +} + +func (s *environRawSuite) TestGetRemoteConfigIPRouteFormatError(c *gc.C) { + s.runCommand = func(string, ...string) (string, error) { + return "this is not the prefix you're looking for", nil + } + _, err := getRemoteConfig(s.readFile, s.runCommand) + c.Assert(err, gc.ErrorMatches, + `getting gateway address: unexpected output from "ip route": this is not the prefix you're looking for`) +} + +func (s *environRawSuite) TestGetRemoteConfigIPRouteCommandError(c *gc.C) { + s.SetErrors(nil, nil, nil, errors.New("buh bow")) + _, err := getRemoteConfig(s.readFile, s.runCommand) + c.Assert(err, gc.ErrorMatches, `getting gateway address: buh bow`) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/environ_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/environ_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -48,17 +48,8 @@ func (s *environSuite) TestSetConfigNoAPI(c *gc.C) { err := s.Env.SetConfig(s.Config) - c.Assert(err, jc.ErrorIsNil) - - s.Stub.CheckCallNames(c, "asNonLocal") -} -func (s *environSuite) TestSetConfigMissing(c *gc.C) { - lxd.UnsetEnvConfig(s.Env) - - err := s.Env.SetConfig(s.Config) - - c.Check(err, gc.ErrorMatches, "cannot set config on uninitialized env") + c.Assert(err, jc.ErrorIsNil) } func (s *environSuite) TestConfig(c *gc.C) { @@ -135,16 +126,16 @@ s.Stub.ResetCalls() // machine0 is in the controller model. - machine0 := s.NewRawInstance(c, "juju-whatever-machine-0") + machine0 := s.NewRawInstance(c, "juju-controller-machine-0") machine0.InstanceSummary.Metadata["juju-model-uuid"] = s.Config.UUID() machine0.InstanceSummary.Metadata["juju-controller-uuid"] = s.Config.UUID() // machine1 is not in the controller model, but managed // by the same controller. - machine1 := s.NewRawInstance(c, "juju-whatever-machine-1") + machine1 := s.NewRawInstance(c, "juju-hosted-machine-1") machine1.InstanceSummary.Metadata["juju-model-uuid"] = "not-" + s.Config.UUID() machine1.InstanceSummary.Metadata["juju-controller-uuid"] = s.Config.UUID() // machine2 is not managed by the same controller. - machine2 := s.NewRawInstance(c, "juju-whatever-machine-2") + machine2 := s.NewRawInstance(c, "juju-controller-machine-2") machine2.InstanceSummary.Metadata["juju-model-uuid"] = "not-" + s.Config.UUID() machine2.InstanceSummary.Metadata["juju-controller-uuid"] = "not-" + s.Config.UUID() s.Client.Insts = append(s.Client.Insts, *machine0, *machine1, *machine2) @@ -152,17 +143,20 @@ err := s.Env.DestroyController(s.Config.UUID()) c.Assert(err, jc.ErrorIsNil) - prefix := s.Prefix() fwname := common.EnvFullName(s.Env.Config().UUID()) s.Stub.CheckCalls(c, []gitjujutesting.StubCall{ {"Ports", []interface{}{fwname}}, {"Destroy", nil}, - {"Instances", []interface{}{prefix, lxdclient.AliveStatuses}}, - {"RemoveInstances", []interface{}{prefix, []string{machine1.Name}}}, + {"Instances", []interface{}{"juju-", lxdclient.AliveStatuses}}, + {"Instances", []interface{}{"juju-", []string{}}}, + {"RemoveInstances", []interface{}{"juju-", []string{machine1.Name}}}, }) } func (s *environSuite) TestPrepareForBootstrap(c *gc.C) { err := s.Env.PrepareForBootstrap(envtesting.BootstrapContext(c)) c.Assert(err, jc.ErrorIsNil) + s.Stub.CheckCalls(c, []gitjujutesting.StubCall{ + {"SetConfig", []interface{}{"core.https_address", "[::]"}}, + }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -35,17 +35,17 @@ // Status implements instance.Instance. func (inst *environInstance) Status() instance.InstanceStatus { - jujuStatus := status.StatusPending + jujuStatus := status.Pending instStatus := inst.raw.Status() switch instStatus { case lxdclient.StatusStarting, lxdclient.StatusStarted: - jujuStatus = status.StatusAllocating + jujuStatus = status.Allocating case lxdclient.StatusRunning: - jujuStatus = status.StatusRunning + jujuStatus = status.Running case lxdclient.StatusFreezing, lxdclient.StatusFrozen, lxdclient.StatusThawed, lxdclient.StatusStopping, lxdclient.StatusStopped: - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty default: - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty } return instance.InstanceStatus{ Status: jujuStatus, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/lxd.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/lxd.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/lxd.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/lxd.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,13 +13,8 @@ // The metadata keys used when creating new instances. const ( - metadataKeyCloudInit = lxdclient.UserdataKey -) - -// Common metadata values used when creating new instances. -const ( - metadataValueTrue = "true" - metadataValueFalse = "false" + metadataKeyCloudInit = lxdclient.UserdataKey + metadataKeyCertificateFingerprint = lxdclient.CertificateFingerprintKey ) var ( diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/provider.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/provider.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/provider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/provider.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,7 @@ import ( "github.com/juju/errors" + "github.com/juju/schema" "gopkg.in/juju/environschema.v1" "github.com/juju/juju/cloud" @@ -23,60 +24,28 @@ // Open implements environs.EnvironProvider. func (environProvider) Open(args environs.OpenParams) (environs.Environ, error) { + if err := validateCloudSpec(args.Cloud); err != nil { + return nil, errors.Annotate(err, "validating cloud spec") + } // TODO(ericsnow) verify prerequisites (see provider/local/prereq.go)? - // TODO(ericsnow) do something similar to correctLocalhostURLs() - // (in provider/local/environprovider.go)? - - env, err := newEnviron(args.Config, newRawProvider) + env, err := newEnviron(args.Cloud, args.Config, newRawProvider) return env, errors.Trace(err) } // PrepareConfig implements environs.EnvironProvider. func (p environProvider) PrepareConfig(args environs.PrepareConfigParams) (*config.Config, error) { - return args.Config, nil -} - -// RestrictedConfigAttributes is specified in the EnvironProvider interface. -func (environProvider) RestrictedConfigAttributes() []string { - return []string{ - "remote-url", - "client-cert", - "client-key", - "server-cert", + if err := validateCloudSpec(args.Cloud); err != nil { + return nil, errors.Annotate(err, "validating cloud spec") } + return args.Config, nil } // Validate implements environs.EnvironProvider. func (environProvider) Validate(cfg, old *config.Config) (valid *config.Config, err error) { - if old == nil { - ecfg, err := newValidConfig(cfg, configDefaults) - if err != nil { - return nil, errors.Annotate(err, "invalid config") - } - return ecfg.Config, nil - } - - // The defaults should be set already, so we pass nil. - ecfg, err := newValidConfig(old, nil) - if err != nil { + if _, err := newValidConfig(cfg); err != nil { return nil, errors.Annotate(err, "invalid base config") } - - if err := ecfg.update(cfg); err != nil { - return nil, errors.Annotate(err, "invalid config change") - } - - return ecfg.Config, nil -} - -// SecretAttrs implements environs.EnvironProvider. -func (environProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - // The defaults should be set already, so we pass nil. - ecfg, err := newValidConfig(cfg, nil) - if err != nil { - return nil, errors.Trace(err) - } - return ecfg.secret(), nil + return cfg, nil } // DetectRegions implements environs.CloudRegionDetector. @@ -95,3 +64,30 @@ } return fields } + +func validateCloudSpec(spec environs.CloudSpec) error { + if err := spec.Validate(); err != nil { + return errors.Trace(err) + } + if spec.Endpoint != "" { + return errors.NotValidf("non-empty endpoint %q", spec.Endpoint) + } + if spec.Credential != nil { + if authType := spec.Credential.AuthType(); authType != cloud.EmptyAuthType { + return errors.NotSupportedf("%q auth-type", authType) + } + } + return nil +} + +// ConfigSchema returns extra config attributes specific +// to this provider only. +func (p environProvider) ConfigSchema() schema.Fields { + return configFields +} + +// ConfigDefaults returns the default values for the +// provider specific config attributes. +func (p environProvider) ConfigDefaults() schema.Defaults { + return configDefaults +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/provider_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/provider_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/provider_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/provider_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,7 +26,7 @@ func skipIfWily(c *gc.C) { if series.HostSeries() == "wily" { cfg, _ := lxdclient.Config{}.WithDefaults() - _, err := lxdclient.Connect(cfg) + _, err := lxdclient.Connect(cfg, false) // We try to create a client here. On wily this should fail, because // the default 0.20 lxd version should make juju/tools/lxdclient return // an error. @@ -74,13 +74,6 @@ c.Check(s.Config.AllAttrs(), gc.DeepEquals, validAttrs) } -func (s *providerSuite) TestSecretAttrs(c *gc.C) { - obtainedAttrs, err := s.provider.SecretAttrs(s.Config) - c.Assert(err, jc.ErrorIsNil) - - c.Check(obtainedAttrs, gc.DeepEquals, map[string]string{"client-key": ""}) -} - type ProviderFunctionalSuite struct { lxd.BaseSuite @@ -116,8 +109,32 @@ func (s *ProviderFunctionalSuite) TestPrepareConfig(c *gc.C) { cfg, err := s.provider.PrepareConfig(environs.PrepareConfigParams{ + Cloud: lxdCloudSpec(), Config: s.Config, }) c.Assert(err, jc.ErrorIsNil) c.Check(cfg, gc.NotNil) } + +func (s *ProviderFunctionalSuite) TestPrepareConfigUnsupportedAuthType(c *gc.C) { + cred := cloud.NewCredential(cloud.CertificateAuthType, nil) + _, err := s.provider.PrepareConfig(environs.PrepareConfigParams{ + Cloud: environs.CloudSpec{ + Type: "lxd", + Name: "remotehost", + Credential: &cred, + }, + }) + c.Assert(err, gc.ErrorMatches, `validating cloud spec: "certificate" auth-type not supported`) +} + +func (s *ProviderFunctionalSuite) TestPrepareConfigNonEmptyEndpoint(c *gc.C) { + _, err := s.provider.PrepareConfig(environs.PrepareConfigParams{ + Cloud: environs.CloudSpec{ + Type: "lxd", + Name: "remotehost", + Endpoint: "1.2.3.4", + }, + }) + c.Assert(err, gc.ErrorMatches, `validating cloud spec: non-empty endpoint "1.2.3.4" not valid`) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/storage.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/storage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/storage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/storage.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,8 +12,8 @@ ) // StorageProviderTypes implements storage.ProviderRegistry. -func (*environ) StorageProviderTypes() []storage.ProviderType { - return nil +func (*environ) StorageProviderTypes() ([]storage.ProviderType, error) { + return nil, nil } // StorageProvider implements storage.ProviderRegistry. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/testing_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/testing_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/lxd/testing_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/lxd/testing_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,14 +6,13 @@ package lxd import ( - "crypto/tls" - "encoding/pem" "os" "github.com/juju/errors" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils/arch" + "github.com/lxc/lxd/shared" gc "gopkg.in/check.v1" "github.com/juju/juju/cloudconfig/instancecfg" @@ -30,6 +29,11 @@ "github.com/juju/version" ) +// Ensure LXD provider supports the expected interfaces. +var ( + _ config.ConfigSchemaSource = (*environProvider)(nil) +) + // These values are stub LXD client credentials for use in tests. const ( PublicKey = `-----BEGIN CERTIFICATE----- @@ -71,12 +75,8 @@ // These are stub config values for use in tests. var ( ConfigAttrs = testing.FakeConfig().Merge(testing.Attrs{ - "type": "lxd", - "remote-url": "", - "client-cert": "", - "client-key": "", - "server-cert": "", - "uuid": "2d02eeac-9dbb-11e4-89d3-123b93f75cba", + "type": "lxd", + "uuid": "2d02eeac-9dbb-11e4-89d3-123b93f75cba", }) ) @@ -185,7 +185,7 @@ s.Metadata = map[string]string{ // userdata tags.JujuIsController: "true", - tags.JujuController: testing.ModelTag.Id(), + tags.JujuController: testing.ControllerTag.Id(), tags.JujuModel: s.Config.UUID(), metadataKeyCloudInit: string(userData), } @@ -220,7 +220,7 @@ func (s *BaseSuiteUnpatched) setConfig(c *gc.C, cfg *config.Config) { s.Config = cfg - ecfg, err := newValidConfig(cfg, configDefaults) + ecfg, err := newValidConfig(cfg) c.Assert(err, jc.ErrorIsNil) s.EnvConfig = ecfg uuid := cfg.UUID() @@ -296,7 +296,6 @@ func (s *BaseSuite) SetUpSuite(c *gc.C) { s.BaseSuiteUnpatched.SetUpSuite(c) // Do this *before* s.initEnv() gets called in BaseSuiteUnpatched.SetUpTest - s.PatchValue(&asNonLocal, s.asNonLocal) } func (s *BaseSuite) SetUpTest(c *gc.C) { @@ -309,6 +308,8 @@ // Patch out all expensive external deps. s.Env.raw = &rawProvider{ + lxdCerts: s.Client, + lxdConfig: s.Client, lxdInstances: s.Client, lxdImages: s.Client, Firewaller: s.Firewaller, @@ -320,18 +321,6 @@ s.Stub.CheckCalls(c, nil) } -func (s *BaseSuite) asNonLocal(clientCfg lxdclient.Config) (lxdclient.Config, error) { - if s.Stub == nil { - return clientCfg, nil - } - s.Stub.AddCall("asNonLocal", clientCfg) - if err := s.Stub.NextErr(); err != nil { - return clientCfg, errors.Trace(err) - } - - return clientCfg, nil -} - func NewBaseConfig(c *gc.C) *config.Config { var err error cfg := testing.ModelConfig(c) @@ -356,32 +345,6 @@ } type ConfigValues struct { - RemoteURL string - ClientCert string - ClientKey string - ServerCert string -} - -func (cv ConfigValues) CheckCert(c *gc.C) { - certPEM := []byte(cv.ClientCert) - keyPEM := []byte(cv.ClientKey) - - _, err := tls.X509KeyPair(certPEM, keyPEM) - c.Check(err, jc.ErrorIsNil) - - block, remainder := pem.Decode(certPEM) - c.Check(block.Type, gc.Equals, "CERTIFICATE") - c.Check(remainder, gc.HasLen, 0) - - block, remainder = pem.Decode(keyPEM) - c.Check(block.Type, gc.Equals, "RSA PRIVATE KEY") - c.Check(remainder, gc.HasLen, 0) - - if cv.ServerCert != "" { - block, remainder = pem.Decode([]byte(cv.ServerCert)) - c.Check(block.Type, gc.Equals, "CERTIFICATE") - c.Check(remainder, gc.HasLen, 1) - } } type Config struct { @@ -393,16 +356,6 @@ return &Config{ecfg} } -func NewValidConfig(cfg *config.Config) (*Config, error) { - ecfg, err := newValidConfig(cfg, nil) - return &Config{ecfg}, err -} - -func NewValidDefaultConfig(cfg *config.Config) (*Config, error) { - ecfg, err := newValidConfig(cfg, configDefaults) - return &Config{ecfg}, err -} - func (ecfg *Config) Values(c *gc.C) (ConfigValues, map[string]interface{}) { c.Assert(ecfg.attrs, jc.DeepEquals, ecfg.UnknownAttrs()) @@ -410,14 +363,6 @@ extras := make(map[string]interface{}) for k, v := range ecfg.attrs { switch k { - case cfgRemoteURL: - values.RemoteURL = v.(string) - case cfgClientCert: - values.ClientCert = v.(string) - case cfgClientKey: - values.ClientKey = v.(string) - case cfgServerPEMCert: - values.ServerCert = v.(string) default: extras[k] = v } @@ -435,15 +380,6 @@ return ecfg.validate() } -func (ecfg *Config) ClientConfig() (lxdclient.Config, error) { - return ecfg.clientConfig() -} - -func (ecfg *Config) UpdateForClientConfig(clientCfg lxdclient.Config) (*Config, error) { - updated, err := ecfg.updateForClientConfig(clientCfg) - return &Config{updated}, err -} - type stubCommon struct { stub *gitjujutesting.Stub @@ -524,6 +460,33 @@ }}, nil } +func (conn *StubClient) AddCert(cert lxdclient.Cert) error { + conn.AddCall("AddCert", cert) + return conn.NextErr() +} + +func (conn *StubClient) RemoveCertByFingerprint(fingerprint string) error { + conn.AddCall("RemoveCertByFingerprint", fingerprint) + return conn.NextErr() +} + +func (conn *StubClient) ServerStatus() (*shared.ServerState, error) { + conn.AddCall("ServerStatus") + if err := conn.NextErr(); err != nil { + return nil, err + } + return &shared.ServerState{ + Environment: shared.ServerStateEnvironment{ + Certificate: "server-cert", + }, + }, nil +} + +func (conn *StubClient) SetConfig(k, v string) error { + conn.AddCall("SetConfig", k, v) + return conn.NextErr() +} + // TODO(ericsnow) Move stubFirewaller to environs/testing or provider/common/testing. type stubFirewaller struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/add-juju-bridge.py juju-core-2.0.0/src/github.com/juju/juju/provider/maas/add-juju-bridge.py --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/add-juju-bridge.py 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/add-juju-bridge.py 2016-10-13 14:31:49.000000000 +0000 @@ -69,17 +69,26 @@ options = [] _, self.name, self.family, self.method = definition.split() self.options = options + self.is_loopback = self.method == 'loopback' self.is_bonded = [x for x in self.options if "bond-" in x] + self.has_bond_master_option, self.bond_master_options = self.has_option(['bond-master']) self.is_alias = ":" in self.name self.is_vlan = [x for x in self.options if x.startswith("vlan-raw-device")] - self.is_active = self.method == "dhcp" or self.method == "static" - self.is_bridged = [x for x in self.options if x.startswith("bridge_ports ")] + self.is_bridged, self.bridge_ports = self.has_option(['bridge_ports']) self.has_auto_stanza = None self.parent = None def __str__(self): return self.name + def has_option(self, options): + for o in self.options: + words = o.split() + ident = words[0] + if ident in options: + return True, words[1:] + return False, [] + @classmethod def prune_options(cls, options, invalid_options): result = [] @@ -90,14 +99,14 @@ return result # Returns an ordered set of stanzas to bridge this interface. - def bridge(self, prefix, bridge_name): + def _bridge(self, prefix, bridge_name): if bridge_name is None: bridge_name = prefix + self.name # Note: the testing order here is significant. - if not self.is_active or self.is_bridged: + if self.is_loopback or self.is_bridged or self.has_bond_master_option: return self._bridge_unchanged() elif self.is_alias: - if self.parent and self.parent.iface and (not self.parent.iface.is_active or self.parent.iface.is_bridged): + if self.parent and self.parent.iface and self.parent.iface.is_bridged: # if we didn't change the parent interface # then we don't change the aliases neither. return self._bridge_unchanged() @@ -208,6 +217,7 @@ s.iface.has_auto_stanza = s.iface.name in physical_interfaces self._connect_aliases() + self._bridged_interfaces = self._find_bridged_ifaces() def _parse_stanza(self, stanza_line, iterable): stanza_options = [] @@ -242,6 +252,15 @@ if parent_name in ifaces: alias.iface.parent = ifaces[parent_name] + def _find_bridged_ifaces(self): + bridged_ifaces = {} + for stanza in self._stanzas: + if not stanza.is_logical_interface: + continue + if stanza.iface.is_bridged: + bridged_ifaces[stanza.iface.name] = stanza.iface + return bridged_ifaces + def _physical_interfaces(self): return {x.phy.name: x.phy for x in [y for y in self._stanzas if y.is_physical_interface]} @@ -249,6 +268,32 @@ for s in self._stanzas: yield s + def _is_already_bridged(self, name, bridge_port): + iface = self._bridged_interfaces.get(name, None) + if iface: + return bridge_port in iface.bridge_ports + return False + + def bridge(self, interface_names_to_bridge, bridge_prefix, bridge_name): + bridged_stanzas = [] + for s in self.stanzas(): + if s.is_logical_interface: + if s.iface.name not in interface_names_to_bridge: + if s.iface.has_auto_stanza: + bridged_stanzas.append(AutoStanza(s.iface.name)) + bridged_stanzas.append(s) + else: + existing_bridge_name = bridge_prefix + s.iface.name + if self._is_already_bridged(existing_bridge_name, s.iface.name): + if s.iface.has_auto_stanza: + bridged_stanzas.append(AutoStanza(s.iface.name)) + bridged_stanzas.append(s) + else: + bridged_stanzas.extend(s.iface._bridge(bridge_prefix, bridge_name)) + elif not s.is_physical_interface: + bridged_stanzas.append(s) + return bridged_stanzas + def uniq_append(dst, src): for x in src: @@ -261,7 +306,7 @@ """Convenience function to create a new "iface" stanza. Maintains original options order but removes duplicates with the -exception of 'dns-*' options which are normlised as required by +exception of 'dns-*' options which are normalised as required by resolvconf(8) and all the dns-* options are moved to the end. """ @@ -349,42 +394,25 @@ parser.add_argument('--bridge-prefix', help="bridge prefix", type=str, required=False, default='br-') parser.add_argument('--one-time-backup', help='A one time backup of filename', action='store_true', default=True, required=False) parser.add_argument('--activate', help='activate new configuration', action='store_true', default=False, required=False) - parser.add_argument('--interface-to-bridge', help="interface to bridge", type=str, required=False) + parser.add_argument('--interfaces-to-bridge', help="interfaces to bridge; space delimited", type=str, required=True) parser.add_argument('--bridge-name', help="bridge name", type=str, required=False) parser.add_argument('filename', help="interfaces(5) based filename") return parser def main(args): - if args.bridge_name and args.interface_to_bridge is None: - sys.stderr.write("error: --interface-to-bridge required when using --bridge-name\n") - exit(1) + interfaces = args.interfaces_to_bridge.split() - if args.interface_to_bridge and args.bridge_name is None: - sys.stderr.write("error: --bridge-name required when using --interface-to-bridge\n") + if len(interfaces) == 0: + sys.stderr.write("error: no interfaces specified\n") exit(1) - stanzas = [] - config_parser = NetworkInterfaceParser(args.filename) + if args.bridge_name and len(interfaces) > 1: + sys.stderr.write("error: cannot use single bridge name '{}' against multiple interface names\n".format(args.bridge_name)) + exit(1) - # Bridging requires modifying 'auto' and 'iface' stanzas only. - # Calling .bridge() will return a set of stanzas that cover - # both of those stanzas. The 'elif' clause catches all the other - # stanza types. The args.interface_to_bridge test is to bridge a - # single interface only, which is only used for juju < 2.0. And if - # that argument is specified then args.bridge_name takes - # precedence over any args.bridge_prefix. - - for s in config_parser.stanzas(): - if s.is_logical_interface: - if args.interface_to_bridge and args.interface_to_bridge != s.iface.name: - if s.iface.has_auto_stanza: - stanzas.append(AutoStanza(s.iface.name)) - stanzas.append(s) - else: - stanzas.extend(s.iface.bridge(args.bridge_prefix, args.bridge_name)) - elif not s.is_physical_interface: - stanzas.append(s) + parser = NetworkInterfaceParser(args.filename) + stanzas = parser.bridge(interfaces, args.bridge_prefix, args.bridge_name) if not args.activate: print_stanzas(stanzas) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/bridgescript.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/bridgescript.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/bridgescript.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/bridgescript.go 2016-10-13 14:31:49.000000000 +0000 @@ -81,17 +81,26 @@ options = [] _, self.name, self.family, self.method = definition.split() self.options = options + self.is_loopback = self.method == 'loopback' self.is_bonded = [x for x in self.options if "bond-" in x] + self.has_bond_master_option, self.bond_master_options = self.has_option(['bond-master']) self.is_alias = ":" in self.name self.is_vlan = [x for x in self.options if x.startswith("vlan-raw-device")] - self.is_active = self.method == "dhcp" or self.method == "static" - self.is_bridged = [x for x in self.options if x.startswith("bridge_ports ")] + self.is_bridged, self.bridge_ports = self.has_option(['bridge_ports']) self.has_auto_stanza = None self.parent = None def __str__(self): return self.name + def has_option(self, options): + for o in self.options: + words = o.split() + ident = words[0] + if ident in options: + return True, words[1:] + return False, [] + @classmethod def prune_options(cls, options, invalid_options): result = [] @@ -102,14 +111,14 @@ return result # Returns an ordered set of stanzas to bridge this interface. - def bridge(self, prefix, bridge_name): + def _bridge(self, prefix, bridge_name): if bridge_name is None: bridge_name = prefix + self.name # Note: the testing order here is significant. - if not self.is_active or self.is_bridged: + if self.is_loopback or self.is_bridged or self.has_bond_master_option: return self._bridge_unchanged() elif self.is_alias: - if self.parent and self.parent.iface and (not self.parent.iface.is_active or self.parent.iface.is_bridged): + if self.parent and self.parent.iface and self.parent.iface.is_bridged: # if we didn't change the parent interface # then we don't change the aliases neither. return self._bridge_unchanged() @@ -220,6 +229,7 @@ s.iface.has_auto_stanza = s.iface.name in physical_interfaces self._connect_aliases() + self._bridged_interfaces = self._find_bridged_ifaces() def _parse_stanza(self, stanza_line, iterable): stanza_options = [] @@ -254,6 +264,15 @@ if parent_name in ifaces: alias.iface.parent = ifaces[parent_name] + def _find_bridged_ifaces(self): + bridged_ifaces = {} + for stanza in self._stanzas: + if not stanza.is_logical_interface: + continue + if stanza.iface.is_bridged: + bridged_ifaces[stanza.iface.name] = stanza.iface + return bridged_ifaces + def _physical_interfaces(self): return {x.phy.name: x.phy for x in [y for y in self._stanzas if y.is_physical_interface]} @@ -261,6 +280,32 @@ for s in self._stanzas: yield s + def _is_already_bridged(self, name, bridge_port): + iface = self._bridged_interfaces.get(name, None) + if iface: + return bridge_port in iface.bridge_ports + return False + + def bridge(self, interface_names_to_bridge, bridge_prefix, bridge_name): + bridged_stanzas = [] + for s in self.stanzas(): + if s.is_logical_interface: + if s.iface.name not in interface_names_to_bridge: + if s.iface.has_auto_stanza: + bridged_stanzas.append(AutoStanza(s.iface.name)) + bridged_stanzas.append(s) + else: + existing_bridge_name = bridge_prefix + s.iface.name + if self._is_already_bridged(existing_bridge_name, s.iface.name): + if s.iface.has_auto_stanza: + bridged_stanzas.append(AutoStanza(s.iface.name)) + bridged_stanzas.append(s) + else: + bridged_stanzas.extend(s.iface._bridge(bridge_prefix, bridge_name)) + elif not s.is_physical_interface: + bridged_stanzas.append(s) + return bridged_stanzas + def uniq_append(dst, src): for x in src: @@ -273,7 +318,7 @@ """Convenience function to create a new "iface" stanza. Maintains original options order but removes duplicates with the -exception of 'dns-*' options which are normlised as required by +exception of 'dns-*' options which are normalised as required by resolvconf(8) and all the dns-* options are moved to the end. """ @@ -361,42 +406,25 @@ parser.add_argument('--bridge-prefix', help="bridge prefix", type=str, required=False, default='br-') parser.add_argument('--one-time-backup', help='A one time backup of filename', action='store_true', default=True, required=False) parser.add_argument('--activate', help='activate new configuration', action='store_true', default=False, required=False) - parser.add_argument('--interface-to-bridge', help="interface to bridge", type=str, required=False) + parser.add_argument('--interfaces-to-bridge', help="interfaces to bridge; space delimited", type=str, required=True) parser.add_argument('--bridge-name', help="bridge name", type=str, required=False) parser.add_argument('filename', help="interfaces(5) based filename") return parser def main(args): - if args.bridge_name and args.interface_to_bridge is None: - sys.stderr.write("error: --interface-to-bridge required when using --bridge-name\n") - exit(1) + interfaces = args.interfaces_to_bridge.split() - if args.interface_to_bridge and args.bridge_name is None: - sys.stderr.write("error: --bridge-name required when using --interface-to-bridge\n") + if len(interfaces) == 0: + sys.stderr.write("error: no interfaces specified\n") exit(1) - stanzas = [] - config_parser = NetworkInterfaceParser(args.filename) + if args.bridge_name and len(interfaces) > 1: + sys.stderr.write("error: cannot use single bridge name '{}' against multiple interface names\n".format(args.bridge_name)) + exit(1) - # Bridging requires modifying 'auto' and 'iface' stanzas only. - # Calling .bridge() will return a set of stanzas that cover - # both of those stanzas. The 'elif' clause catches all the other - # stanza types. The args.interface_to_bridge test is to bridge a - # single interface only, which is only used for juju < 2.0. And if - # that argument is specified then args.bridge_name takes - # precedence over any args.bridge_prefix. - - for s in config_parser.stanzas(): - if s.is_logical_interface: - if args.interface_to_bridge and args.interface_to_bridge != s.iface.name: - if s.iface.has_auto_stanza: - stanzas.append(AutoStanza(s.iface.name)) - stanzas.append(s) - else: - stanzas.extend(s.iface.bridge(args.bridge_prefix, args.bridge_name)) - elif not s.is_physical_interface: - stanzas.append(s) + parser = NetworkInterfaceParser(args.filename) + stanzas = parser.bridge(interfaces, args.bridge_prefix, args.bridge_name) if not args.activate: print_stanzas(stanzas) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/bridgescript_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/bridgescript_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/bridgescript_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/bridgescript_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -63,9 +63,8 @@ for i, python := range s.pythonVersions { c.Logf("test #%v using %s", i, python) // To simplify most cases, trim trailing new lines. - initialConfig = strings.TrimSuffix(initialConfig, "\n") expectedConfig = strings.TrimSuffix(expectedConfig, "\n") - err := ioutil.WriteFile(s.testConfigPath, []byte(initialConfig), 0644) + err := ioutil.WriteFile(s.testConfigPath, []byte(strings.TrimSuffix(initialConfig, "\n")), 0644) c.Check(err, jc.ErrorIsNil) // Run the script and verify the modified config. output, retcode := s.runScript(c, python, s.testConfigPath, bridgePrefix, bridgeName, interfaceToBridge) @@ -74,12 +73,12 @@ } } -func (s *bridgeConfigSuite) assertScriptWithPrefix(c *gc.C, initial, expected, prefix string) { - s.assertScript(c, initial, expected, prefix, "", "") +func (s *bridgeConfigSuite) assertScriptWithPrefix(c *gc.C, initial, expected, prefix, interfaceToBridge string) { + s.assertScript(c, initial, expected, prefix, "", interfaceToBridge) } -func (s *bridgeConfigSuite) assertScriptWithDefaultPrefix(c *gc.C, initial, expected string) { - s.assertScript(c, initial, expected, "", "", "") +func (s *bridgeConfigSuite) assertScriptWithDefaultPrefix(c *gc.C, initial, expected, interfaceToBridge string) { + s.assertScript(c, initial, expected, "", "", interfaceToBridge) } func (s *bridgeConfigSuite) assertScriptWithoutPrefix(c *gc.C, initial, expected, bridgeName, interfaceToBridge string) { @@ -90,52 +89,54 @@ for i, python := range s.pythonVersions { c.Logf("test #%v using %s", i, python) _, code := s.runScript(c, python, "", "", "", "") - c.Check(code, gc.Equals, 1) + c.Check(code, gc.Equals, 2) } } func (s *bridgeConfigSuite) TestBridgeScriptWithPrefixTransformation(c *gc.C) { for i, v := range []struct { - initial string - expected string - prefix string + interfaceToBridge string + initial string + expected string + prefix string }{ - {networkDHCPInitial, networkDHCPExpected, "test-br-"}, - {networkStaticWithAliasInitial, networkStaticWithAliasExpected, "test-br-"}, - {networkDHCPWithBondInitial, networkDHCPWithBondExpected, "test-br-"}, - {networkDualNICInitial, networkDualNICExpected, "test-br-"}, - {networkMultipleAliasesInitial, networkMultipleAliasesExpected, "test-br-"}, - {networkMultipleStaticWithAliasesInitial, networkMultipleStaticWithAliasesExpected, "test-br-"}, - {networkSmorgasboardInitial, networkSmorgasboardExpected, "juju-br-"}, - {networkStaticInitial, networkStaticExpected, "test-br-"}, - {networkVLANInitial, networkVLANExpected, "vlan-br-"}, - {networkWithAliasInitial, networkWithAliasExpected, "test-br-"}, + {networkDHCPInterfacesToBridge, networkDHCPInitial, networkDHCPExpected, "test-br-"}, + {networkStaticWithAliasInterfacesToBridge, networkStaticWithAliasInitial, networkStaticWithAliasExpected, "test-br-"}, + {networkDHCPWithBondInterfacesToBridge, networkDHCPWithBondInitial, networkDHCPWithBondExpected, "test-br-"}, + {networkDualNICInterfacesToBridge, networkDualNICInitial, networkDualNICExpected, "test-br-"}, + {networkMultipleAliasesInterfacesToBridge, networkMultipleAliasesInitial, networkMultipleAliasesExpected, "test-br-"}, + {networkMultipleStaticWithAliasesInterfacesToBridge, networkMultipleStaticWithAliasesInitial, networkMultipleStaticWithAliasesExpected, "test-br-"}, + {networkSmorgasboardInterfacesToBridge, networkSmorgasboardInitial, networkSmorgasboardExpected, "juju-br-"}, + {networkStaticInterfacesToBridge, networkStaticInitial, networkStaticExpected, "test-br-"}, + {networkVLANInterfacesToBridge, networkVLANInitial, networkVLANExpected, "vlan-br-"}, + {networkWithAliasInterfacesToBridge, networkWithAliasInitial, networkWithAliasExpected, "test-br-"}, } { c.Logf("test #%v - expected transformation", i) - s.assertScriptWithPrefix(c, v.initial, v.expected, v.prefix) + s.assertScriptWithPrefix(c, v.initial, v.expected, v.prefix, v.interfaceToBridge) c.Logf("test #%v - idempotent transformation", i) - s.assertScriptWithPrefix(c, v.expected, v.expected, v.prefix) + s.assertScriptWithPrefix(c, v.expected, v.expected, v.prefix, v.interfaceToBridge) } } func (s *bridgeConfigSuite) TestBridgeScriptWithDefaultPrefixTransformation(c *gc.C) { for i, v := range []struct { - initial string - expected string + interfaceToBridge string + initial string + expected string }{ - {networkLoopbackOnlyInitial, networkLoopbackOnlyExpected}, - {networkStaticBondWithVLANsInitial, networkStaticBondWithVLANsExpected}, - {networkVLANWithActiveDHCPDeviceInitial, networkVLANWithActiveDHCPDeviceExpected}, - {networkVLANWithInactiveDeviceInitial, networkVLANWithInactiveDeviceExpected}, - {networkVLANWithMultipleNameserversInitial, networkVLANWithMultipleNameserversExpected}, - {networkWithEmptyDNSValuesInitial, networkWithEmptyDNSValuesExpected}, - {networkWithMultipleDNSValuesInitial, networkWithMultipleDNSValuesExpected}, - {networkPartiallyBridgedInitial, networkPartiallyBridgedExpected}, + {networkLoopbackOnlyInterfacesToBridge, networkLoopbackOnlyInitial, networkLoopbackOnlyExpected}, + {networkStaticBondWithVLANsInterfacesToBridge, networkStaticBondWithVLANsInitial, networkStaticBondWithVLANsExpected}, + {networkVLANWithActiveDHCPDeviceInterfacesToBridge, networkVLANWithActiveDHCPDeviceInitial, networkVLANWithActiveDHCPDeviceExpected}, + {networkVLANWithInactiveDeviceInterfacesToBridge, networkVLANWithInactiveDeviceInitial, networkVLANWithInactiveDeviceExpected}, + {networkVLANWithMultipleNameserversInterfacesToBridge, networkVLANWithMultipleNameserversInitial, networkVLANWithMultipleNameserversExpected}, + {networkWithEmptyDNSValuesInterfacesToBridge, networkWithEmptyDNSValuesInitial, networkWithEmptyDNSValuesExpected}, + {networkWithMultipleDNSValuesInterfacesToBridge, networkWithMultipleDNSValuesInitial, networkWithMultipleDNSValuesExpected}, + {networkPartiallyBridgedInterfacesToBridge, networkPartiallyBridgedInitial, networkPartiallyBridgedExpected}, } { c.Logf("test #%v - expected transformation", i) - s.assertScriptWithDefaultPrefix(c, v.initial, v.expected) + s.assertScriptWithDefaultPrefix(c, v.initial, v.expected, v.interfaceToBridge) c.Logf("test #%v - idempotent transformation", i) - s.assertScriptWithDefaultPrefix(c, v.expected, v.expected) + s.assertScriptWithDefaultPrefix(c, v.expected, v.expected, v.interfaceToBridge) } } @@ -143,24 +144,14 @@ for i, python := range s.pythonVersions { c.Logf("test #%v using %s", i, python) output, code := s.runScript(c, python, "# no content", "", "juju-br0", "") - c.Check(code, gc.Equals, 1) - c.Check(strings.Trim(output, "\n"), gc.Equals, "error: --interface-to-bridge required when using --bridge-name") + c.Check(code, gc.Equals, 2) + // We match very lazily here to isolate ourselves from + // the different formatting of argparse error messages + // that has occured between Python 2 and Python 3. + c.Check(strings.Trim(output, "\n"), gc.Matches, "(\n|.)*error:.*--interfaces-to-bridge.*") } } -func (s *bridgeConfigSuite) TestBridgeScriptBridgeNameArgumentRequired(c *gc.C) { - for i, python := range s.pythonVersions { - c.Logf("test #%v using %s", i, python) - output, code := s.runScript(c, python, "# no content", "", "", "eth0") - c.Check(code, gc.Equals, 1) - c.Check(strings.Trim(output, "\n"), gc.Equals, "error: --bridge-name required when using --interface-to-bridge") - } -} - -func (s *bridgeConfigSuite) TestBridgeScriptMatchingNonExistentSpecificIface(c *gc.C) { - s.assertScriptWithoutPrefix(c, networkStaticInitial, networkStaticInitial, "juju-br0", "eth1234567890") -} - func (s *bridgeConfigSuite) TestBridgeScriptMatchingExistingSpecificIfaceButMissingAutoStanza(c *gc.C) { s.assertScriptWithoutPrefix(c, networkWithExistingSpecificIfaceInitial, networkWithExistingSpecificIfaceExpected, "juju-br0", "eth1") } @@ -179,7 +170,7 @@ } if interfaceToBridge != "" { - interfaceToBridge = fmt.Sprintf("--interface-to-bridge=%q", interfaceToBridge) + interfaceToBridge = fmt.Sprintf("--interfaces-to-bridge=%q", interfaceToBridge) } script := fmt.Sprintf("%q %q %s %s %s %q\n", pythonBinary, s.testPythonScript, bridgePrefix, bridgeName, interfaceToBridge, configFile) @@ -207,6 +198,8 @@ netmask 255.255.255.0 gateway 4.3.2.1` +const networkStaticInterfacesToBridge = "eth0" + const networkStaticExpected = `auto lo iface lo inet loopback @@ -226,6 +219,8 @@ auto eth0 iface eth0 inet dhcp` +const networkDHCPInterfacesToBridge = "eth0" + const networkDHCPExpected = `auto lo iface lo inet loopback @@ -251,6 +246,8 @@ netmask 255.255.255.0 gateway 4.3.2.1` +const networkDualNICInterfacesToBridge = "eth0 eth1" + const networkDualNICExpected = `auto lo iface lo inet loopback @@ -287,6 +284,8 @@ iface eth0:1 inet static address 1.2.3.5` +const networkWithAliasInterfacesToBridge = "eth0 eth0:1" + const networkWithAliasExpected = `auto lo iface lo inet loopback @@ -322,6 +321,8 @@ dns-nameserver 192.168.1.142` +const networkStaticWithAliasInterfacesToBridge = "eth0 eth0:1 eth0:2" + const networkStaticWithAliasExpected = `auto lo iface lo inet loopback @@ -362,6 +363,8 @@ dns-nameservers 10.17.20.200 dns-search maas` +const networkMultipleStaticWithAliasesInterfacesToBridge = "eth0 eth0:1" + const networkMultipleStaticWithAliasesExpected = `auto eth0 iface eth0 inet manual mtu 1500 @@ -414,6 +417,8 @@ dns-nameservers 10.17.20.200 dns-search maas19` +const networkDHCPWithBondInterfacesToBridge = "bond0" + const networkDHCPWithBondExpected = `auto eth0 iface eth0 inet manual bond-lacp_rate slow @@ -474,6 +479,8 @@ dns-nameservers 10.17.20.200 dns-search maas19` +const networkMultipleAliasesInterfacesToBridge = "eth0 eth1 eth10 eth10:1 eth10:2" + const networkMultipleAliasesExpected = `auto eth0 iface eth0 inet manual @@ -605,6 +612,8 @@ dns-nameservers 10.17.20.200 dns-search maas19` +const networkSmorgasboardInterfacesToBridge = "eth4 eth5 eth6 eth6:1 eth6:2 eth6:3 eth6:4 bond0 bond1" + const networkSmorgasboardExpected = `auto eth0 iface eth0 inet manual bond-lacp_rate slow @@ -748,6 +757,8 @@ dns-nameservers 10.17.20.200 dns-search maas19` +const networkVLANInterfacesToBridge = "eth0 eth0.2 eth1.3" + const networkVLANExpected = `auto eth0 iface eth0 inet manual mtu 1500 @@ -840,6 +851,8 @@ dns-nameservers 10.245.168.2 dns-search dellstack` +const networkVLANWithMultipleNameserversInterfacesToBridge = "eth0 eth1.2667 eth1.2668 eth1.2669 eth1.2670" + const networkVLANWithMultipleNameserversExpected = `auto eth0 iface eth0 inet manual mtu 1500 @@ -915,6 +928,8 @@ const networkLoopbackOnlyInitial = `auto lo iface lo inet loopback` +const networkLoopbackOnlyInterfacesToBridge = "lo" + const networkLoopbackOnlyExpected = `auto lo iface lo inet loopback` @@ -966,6 +981,8 @@ dns-nameservers 10.17.20.200 dns-search maas19` +const networkStaticBondWithVLANsInterfacesToBridge = "bond0 bond0.2 bond0.3" + const networkStaticBondWithVLANsExpected = `auto eth0 iface eth0 inet manual bond-master bond0 @@ -1047,6 +1064,8 @@ dns-search maas19 ` +const networkVLANWithInactiveDeviceInterfacesToBridge = "eth0 eth1.2" + const networkVLANWithInactiveDeviceExpected = `auto eth0 iface eth0 inet manual mtu 1500 @@ -1095,6 +1114,8 @@ dns-search maas19 ` +const networkVLANWithActiveDHCPDeviceInterfacesToBridge = "eth0 eth1 eth1.2" + const networkVLANWithActiveDHCPDeviceExpected = `auto eth0 iface eth0 inet manual mtu 1500 @@ -1166,6 +1187,8 @@ dns-search ubuntu juju dns-search dellstack ubuntu dellstack` +const networkWithMultipleDNSValuesInterfacesToBridge = "eth0 eth1 eth2 eth3" + const networkWithMultipleDNSValuesExpected = `auto eth0 iface eth0 inet manual mtu 1500 @@ -1234,6 +1257,8 @@ dns-search dns-sortlist` +const networkWithEmptyDNSValuesInterfacesToBridge = "eth0 eth1" + const networkWithEmptyDNSValuesExpected = `auto eth0 iface eth0 inet manual mtu 1500 @@ -1364,6 +1389,8 @@ dns-nameservers 10.38.160.10 dns-search maas` +const networkLP1532167InterfacesToBridge = "bond0 bond0.1016 bond0.161 bond0.162 bond0.163 bond1.1017 bond1.1018 bond1.1019" + const networkLP1532167Expected = `auto eth0 iface eth0 inet manual bond-lacp_rate fast @@ -1492,6 +1519,8 @@ netmask 255.255.255.0 gateway 4.3.2.1` +const networkWithExistingSpecificIfaceInterfacesToBridge = "eth0 eth1" + const networkWithExistingSpecificIfaceExpected = `auto lo iface lo inet loopback @@ -1527,6 +1556,8 @@ netmask 255.255.255.0 gateway 4.3.2.1` +const networkPartiallyBridgedInterfacesToBridge = "br-eth0 eth1" + const networkPartiallyBridgedExpected = `auto lo iface lo inet loopback diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/config.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -47,6 +47,18 @@ return fields } +// ConfigSchema returns extra config attributes specific +// to this provider only. +func (p maasEnvironProvider) ConfigSchema() schema.Fields { + return configFields +} + +// ConfigDefaults returns the default values for the +// provider specific config attributes. +func (p maasEnvironProvider) ConfigDefaults() schema.Defaults { + return configDefaults +} + func (prov maasEnvironProvider) Validate(cfg, oldCfg *config.Config) (*config.Config, error) { // Validate base configuration change before validating MAAS specifics. err := config.Validate(cfg, oldCfg) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/config_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,6 +13,11 @@ "github.com/juju/juju/testing" ) +// Ensure MAAS provider supports the expected interfaces. +var ( + _ config.ConfigSchemaSource = (*maasEnvironProvider)(nil) +) + type configSuite struct { testing.BaseSuite } @@ -43,11 +48,11 @@ func (s *configSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) - mockCapabilities := func(client *gomaasapi.MAASObject) (set.Strings, error) { + mockCapabilities := func(*gomaasapi.MAASObject, string) (set.Strings, error) { return set.NewStrings("network-deployment-ubuntu"), nil } s.PatchValue(&GetCapabilities, mockCapabilities) - mockGetController := func(maasServer, apiKey string) (gomaasapi.Controller, error) { + mockGetController := func(string, string) (gomaasapi.Controller, error) { return nil, gomaasapi.NewUnsupportedVersionError("oops") } s.PatchValue(&GetMAAS2Controller, mockGetController) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/credentials.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,7 @@ "encoding/json" "fmt" "io/ioutil" + "os" "path/filepath" "strings" @@ -14,6 +15,7 @@ "github.com/juju/utils" "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" ) const ( @@ -40,6 +42,9 @@ // {"Server": "http:///MAAS", "OAuth": ""} maasrc := filepath.Join(utils.Home(), ".maasrc") fileBytes, err := ioutil.ReadFile(maasrc) + if os.IsNotExist(err) { + return nil, errors.NotFoundf("maas credentials") + } if err != nil { return nil, errors.Trace(err) } @@ -78,3 +83,8 @@ } var errMalformedMaasOAuth = errors.New("malformed maas-oauth (3 items separated by colons)") + +// FinalizeCredential is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) FinalizeCredential(_ environs.FinalizeCredentialContext, args environs.FinalizeCredentialParams) (*cloud.Credential, error) { + return &args.Credential, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/credentials_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/credentials_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/credentials_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/credentials_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package maas_test import ( + "github.com/juju/errors" "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -75,3 +76,8 @@ expected.Label = "MAAS credential for unspecified server" c.Assert(creds.AuthCredentials["default"], jc.DeepEquals, expected) } + +func (s *credentialsSuite) TestDetectCredentialsNoFile(c *gc.C) { + _, err := s.provider.DetectCredentials() + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/devices.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/devices.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/devices.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/devices.go 2016-10-13 14:31:49.000000000 +0000 @@ -212,23 +212,17 @@ } for _, link := range nic.Links { - switch link.Mode { - case modeUnknown: - nicInfo.ConfigType = network.ConfigUnknown - case modeDHCP: - nicInfo.ConfigType = network.ConfigDHCP - case modeStatic, modeLinkUp: - nicInfo.ConfigType = network.ConfigStatic - default: - nicInfo.ConfigType = network.ConfigManual - } + nicInfo.ConfigType = maasLinkToInterfaceConfigType(string(link.Mode)) if link.IPAddress == "" { logger.Debugf("device %q interface %q has no address", deviceID, nic.Name) + interfaceInfo = append(interfaceInfo, nicInfo) continue } + if link.Subnet == nil { logger.Debugf("device %q interface %q link %d missing subnet", deviceID, nic.Name, link.ID) + interfaceInfo = append(interfaceInfo, nicInfo) continue } @@ -290,21 +284,12 @@ } for _, link := range nic.Links() { - mode := maasLinkMode(link.Mode()) - switch mode { - case modeUnknown: - nicInfo.ConfigType = network.ConfigUnknown - case modeDHCP: - nicInfo.ConfigType = network.ConfigDHCP - case modeStatic, modeLinkUp: - nicInfo.ConfigType = network.ConfigStatic - default: - nicInfo.ConfigType = network.ConfigManual - } + nicInfo.ConfigType = maasLinkToInterfaceConfigType(link.Mode()) subnet := link.Subnet() if link.IPAddress() == "" || subnet == nil { logger.Debugf("device %q interface %q has no address", deviceID, nic.Name()) + interfaceInfo = append(interfaceInfo, nicInfo) continue } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/environ.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -29,6 +29,7 @@ "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/environs/storage" + "github.com/juju/juju/environs/tags" "github.com/juju/juju/instance" "github.com/juju/juju/network" "github.com/juju/juju/provider/common" @@ -53,6 +54,8 @@ Delay: 200 * time.Millisecond, } +const statusPollInterval = 5 * time.Second + var ( ReleaseNodes = releaseNodes DeploymentStatusCall = deploymentStatusCall @@ -79,9 +82,6 @@ // archMutex gates access to supportedArchitectures archMutex sync.Mutex - // supportedArchitectures caches the architectures - // for which images can be instantiated. - supportedArchitectures []string // ecfgMutex protects the *Unlocked fields below. ecfgMutex sync.Mutex @@ -182,10 +182,41 @@ return bsResult, nil } +// BootstrapMessage is part of the Environ interface. +func (env *maasEnviron) BootstrapMessage() string { + return "" +} + // ControllerInstances is specified in the Environ interface. func (env *maasEnviron) ControllerInstances(controllerUUID string) ([]instance.Id, error) { - // TODO(wallyworld) - tag instances with controller UUID so we can use that - return common.ProviderStateInstances(env, env.Storage()) + if !env.usingMAAS2() { + return env.controllerInstances1(controllerUUID) + } + return env.controllerInstances2(controllerUUID) +} + +func (env *maasEnviron) controllerInstances1(controllerUUID string) ([]instance.Id, error) { + return common.ProviderStateInstances(env.Storage()) +} + +func (env *maasEnviron) controllerInstances2(controllerUUID string) ([]instance.Id, error) { + instances, err := env.instances2(gomaasapi.MachinesArgs{ + OwnerData: map[string]string{ + tags.JujuIsController: "true", + tags.JujuController: controllerUUID, + }, + }) + if err != nil { + return nil, errors.Trace(err) + } + if len(instances) == 0 { + return nil, environs.ErrNotBootstrapped + } + ids := make([]instance.Id, len(instances)) + for i := range instances { + ids[i] = instances[i].Id() + } + return ids, nil } // ecfg returns the environment's maasModelConfig, and protects it with a @@ -247,12 +278,12 @@ return errors.Trace(err) } env.maasClientUnlocked = gomaasapi.NewMAAS(*authClient) - caps, err := GetCapabilities(env.maasClientUnlocked) + caps, err := GetCapabilities(env.maasClientUnlocked, maasServer) if err != nil { return errors.Trace(err) } if !caps.Contains(capNetworkDeploymentUbuntu) { - return errors.NotSupportedf("MAAS 1.9 or more recent is required") + return errors.NewNotSupported(nil, "MAAS 1.9 or more recent is required") } case err != nil: return errors.Trace(err) @@ -266,20 +297,11 @@ func (env *maasEnviron) getSupportedArchitectures() ([]string, error) { env.archMutex.Lock() defer env.archMutex.Unlock() - if env.supportedArchitectures != nil { - return env.supportedArchitectures, nil - } - fetchArchitectures := env.allArchitecturesWithFallback if env.usingMAAS2() { fetchArchitectures = env.allArchitectures2 } - architectures, err := fetchArchitectures() - if err != nil { - return nil, errors.Trace(err) - } - env.supportedArchitectures = architectures - return env.supportedArchitectures, nil + return fetchArchitectures() } // SupportsSpaces is specified on environs.Networking. @@ -582,7 +604,7 @@ // getCapabilities asks the MAAS server for its capabilities, if // supported by the server. -func getCapabilities(client *gomaasapi.MAASObject) (set.Strings, error) { +func getCapabilities(client *gomaasapi.MAASObject, serverURL string) (set.Strings, error) { caps := make(set.Strings) var result gomaasapi.JSONObject var err error @@ -592,7 +614,12 @@ result, err = version.CallGet("", nil) if err != nil { if err, ok := errors.Cause(err).(gomaasapi.ServerError); ok && err.StatusCode == 404 { - return caps, errors.NotSupportedf("MAAS version 1.9 or more recent is required") + message := "could not connect to MAAS controller - check the endpoint is correct" + trimmedURL := strings.TrimRight(serverURL, "/") + if !strings.HasSuffix(trimmedURL, "/MAAS") { + message += " (it normally ends with /MAAS)" + } + return caps, errors.NewNotSupported(nil, message) } } else { break @@ -711,10 +738,6 @@ } if nodeName != "" { acquireParams.Hostname = nodeName - } else if cons.Arch == nil { - logger.Warningf( - "no architecture was specified, acquiring an arbitrary node", - ) } machine, constraintMatches, err := environ.maasController.AllocateMachine(acquireParams) @@ -732,6 +755,15 @@ volumes []volumeInfo, ) (gomaasapi.MAASObject, error) { + // TODO(axw) 2014-08-18 #1358219 + // We should be requesting preferred architectures if unspecified, + // like in the other providers. + // + // This is slightly complicated in MAAS as there are a finite + // number of each architecture; preference may also conflict with + // other constraints, such as tags. Thus, a preference becomes a + // demand (which may fail) if not handled properly. + acquireParams := convertConstraints(cons) positiveSpaceNames, negativeSpaceNames := convertSpacesFromConstraints(cons.Spaces) positiveSpaces, negativeSpaces, err := environ.spaceNamesToSpaceInfo(positiveSpaceNames, negativeSpaceNames) @@ -750,22 +782,6 @@ } if nodeName != "" { acquireParams.Add("name", nodeName) - } else if cons.Arch == nil { - // TODO(axw) 2014-08-18 #1358219 - // We should be requesting preferred - // architectures if unspecified, like - // in the other providers. - // - // This is slightly complicated in MAAS - // as there are a finite number of each - // architecture; preference may also - // conflict with other constraints, such - // as tags. Thus, a preference becomes a - // demand (which may fail) if not handled - // properly. - logger.Warningf( - "no architecture was specified, acquiring an arbitrary node", - ) } var result gomaasapi.JSONObject @@ -928,6 +944,7 @@ return nil, errors.Annotatef(err, "cannot run instances") } } + defer func() { if err != nil { if err := environ.StopInstances(inst.Id()); err != nil { @@ -961,20 +978,30 @@ return nil, errors.Trace(err) } - cloudcfg, err := environ.newCloudinitConfig(hostname, series) + subnetsMap, err := environ.subnetToSpaceIds() if err != nil { return nil, errors.Trace(err) } - userdata, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, MAASRenderer{}) + + // We need to extract the names of all interfaces of the selected node, + // which are both linked to subnets, and have an IP address, in order to + // pass those to the bridge script. + interfaceNamesToBridge, err := instanceConfiguredInterfaceNames(environ.usingMAAS2(), inst, subnetsMap) if err != nil { - return nil, errors.Annotatef(err, "could not compose userdata for bootstrap node") + return nil, errors.Trace(err) } - logger.Debugf("maas user data; %d bytes", len(userdata)) - subnetsMap, err := environ.subnetToSpaceIds() + cloudcfg, err := environ.newCloudinitConfig(hostname, series, interfaceNamesToBridge) if err != nil { return nil, errors.Trace(err) } + + userdata, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, MAASRenderer{}) + if err != nil { + return nil, errors.Annotatef(err, "could not compose userdata for bootstrap node") + } + logger.Debugf("maas user data; %d bytes", len(userdata)) + var interfaces []network.InterfaceInfo if !environ.usingMAAS2() { inst1 := inst.(*maas1Instance) @@ -986,14 +1013,16 @@ // assigned IP addresses, even when NICs are set to "auto" instead of // "static". So instead of selectedNode, which only contains the // acquire-time details (no IP addresses for NICs set to "auto" vs - // "static"), we use the up-to-date startedNode response to get the + // "static"),e we use the up-to-date startedNode response to get the // interfaces. interfaces, err = maasObjectNetworkInterfaces(startedNode, subnetsMap) if err != nil { return nil, errors.Trace(err) } + environ.tagInstance1(inst1, args.InstanceConfig) } else { - startedInst, err := environ.startNode2(*inst.(*maas2Instance), series, userdata) + inst2 := inst.(*maas2Instance) + startedInst, err := environ.startNode2(*inst2, series, userdata) if err != nil { return nil, errors.Trace(err) } @@ -1001,15 +1030,10 @@ if err != nil { return nil, errors.Trace(err) } + environ.tagInstance2(inst2, args.InstanceConfig) } logger.Debugf("started instance %q", inst.Id()) - if multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) { - if err := common.AddStateInstance(environ.Storage(), inst.Id()); err != nil { - logger.Errorf("could not record instance in provider-state: %v", err) - } - } - requestedVolumes := make([]names.VolumeTag, len(args.Volumes)) for i, v := range args.Volumes { requestedVolumes[i] = v.Tag @@ -1035,6 +1059,72 @@ }, nil } +func instanceConfiguredInterfaceNames(usingMAAS2 bool, inst instance.Instance, subnetsMap map[string]network.Id) ([]string, error) { + var ( + interfaces []network.InterfaceInfo + err error + ) + if !usingMAAS2 { + inst1 := inst.(*maas1Instance) + interfaces, err = maasObjectNetworkInterfaces(inst1.maasObject, subnetsMap) + if err != nil { + return nil, errors.Trace(err) + } + } else { + inst2 := inst.(*maas2Instance) + interfaces, err = maas2NetworkInterfaces(inst2, subnetsMap) + if err != nil { + return nil, errors.Trace(err) + } + } + + nameToNumAliases := make(map[string]int) + var linkedNames []string + for _, iface := range interfaces { + if iface.CIDR == "" { // CIDR comes from a linked subnet. + continue + } + + switch iface.ConfigType { + case network.ConfigUnknown, network.ConfigManual: + continue // link is unconfigured + } + + finalName := iface.InterfaceName + numAliases, seen := nameToNumAliases[iface.InterfaceName] + if !seen { + nameToNumAliases[iface.InterfaceName] = 0 + } else { + numAliases++ // aliases start from 1 + finalName += fmt.Sprintf(":%d", numAliases) + nameToNumAliases[iface.InterfaceName] = numAliases + } + + linkedNames = append(linkedNames, finalName) + } + systemID := extractSystemId(inst.Id()) + logger.Infof("interface names to bridge for node %q: %v", systemID, linkedNames) + + return linkedNames, nil +} + +func (environ *maasEnviron) tagInstance1(inst *maas1Instance, instanceConfig *instancecfg.InstanceConfig) { + if !multiwatcher.AnyJobNeedsState(instanceConfig.Jobs...) { + return + } + err := common.AddStateInstance(environ.Storage(), inst.Id()) + if err != nil { + logger.Errorf("could not record instance in provider-state: %v", err) + } +} + +func (environ *maasEnviron) tagInstance2(inst *maas2Instance, instanceConfig *instancecfg.InstanceConfig) { + err := inst.machine.SetOwnerData(instanceConfig.Tags) + if err != nil { + logger.Errorf("could not set owner data for instance: %v", err) + } +} + func (environ *maasEnviron) waitForNodeDeployment(id instance.Id, timeout time.Duration) error { if environ.usingMAAS2() { return environ.waitForNodeDeployment2(id, timeout) @@ -1077,10 +1167,10 @@ return errors.Trace(err) } stat := machine.Status() - if stat.Status == status.StatusRunning { + if stat.Status == status.Running { return nil } - if stat.Status == status.StatusProvisioningError { + if stat.Status == status.ProvisioningError { return errors.Errorf("instance %q failed to deploy", id) } @@ -1230,7 +1320,7 @@ // setupJujuNetworking returns a string representing the script to run // in order to prepare the Juju-specific networking config on a node. -func setupJujuNetworking() string { +func setupJujuNetworking(interfacesToBridge []string) string { // For ubuntu series < xenial we prefer python2 over python3 // as we don't want to invalidate lots of testing against // known cloud-image contents. A summary of Ubuntu releases @@ -1259,16 +1349,17 @@ if [ ! -z "${juju_networking_preferred_python_binary:-}" ]; then if [ -f %[1]q ]; then -# We are sharing this code between master, maas-spaces2 and 1.25. -# For the moment we want master and 1.25 to not bridge all interfaces. +# We are sharing this code between 2.0 and 1.25. +# For the moment we want 2.0 to bridge all interfaces linked +# to a subnet, while for 1.25 we only bridge the default route interface. # This setting allows us to easily switch the behaviour when merging # the code between those various branches. - juju_bridge_all_interfaces=1 - if [ $juju_bridge_all_interfaces -eq 1 ]; then - $juju_networking_preferred_python_binary %[1]q --bridge-prefix=%[2]q --one-time-backup --activate %[4]q + juju_bridge_linked_interfaces=1 + if [ $juju_bridge_linked_interfaces -eq 1 ]; then + $juju_networking_preferred_python_binary %[1]q --bridge-prefix=%[2]q --interfaces-to-bridge=%[5]q --one-time-backup --activate %[4]q else juju_ipv4_interface_to_bridge=$(ip -4 route list exact default | head -n1 | cut -d' ' -f5) - $juju_networking_preferred_python_binary %[1]q --bridge-name=%[3]q --interface-to-bridge="${juju_ipv4_interface_to_bridge:-unknown}" --one-time-backup --activate %[4]q + $juju_networking_preferred_python_binary %[1]q --bridge-name=%[3]q --interfaces-to-bridge="${juju_ipv4_interface_to_bridge:-unknown}" --one-time-backup --activate %[4]q fi fi else @@ -1277,16 +1368,18 @@ bridgeScriptPath, instancecfg.DefaultBridgePrefix, instancecfg.DefaultBridgeName, - "/etc/network/interfaces") + "/etc/network/interfaces", + strings.Join(interfacesToBridge, " "), + ) } -func renderEtcNetworkInterfacesScript() string { - return setupJujuNetworking() +func renderEtcNetworkInterfacesScript(interfacesToBridge ...string) string { + return setupJujuNetworking(interfacesToBridge) } // newCloudinitConfig creates a cloudinit.Config structure suitable as a base // for initialising a MAAS node. -func (environ *maasEnviron) newCloudinitConfig(hostname, forSeries string) (cloudinit.CloudConfig, error) { +func (environ *maasEnviron) newCloudinitConfig(hostname, forSeries string, interfacesToBridge []string) (cloudinit.CloudConfig, error) { cloudcfg, err := cloudinit.New(forSeries) if err != nil { return nil, err @@ -1318,7 +1411,7 @@ } cloudcfg.AddPackage("bridge-utils") cloudcfg.AddBootTextFile(bridgeScriptPath, bridgeScriptPython, 0755) - cloudcfg.AddScripts(setupJujuNetworking()) + cloudcfg.AddScripts(setupJujuNetworking(interfacesToBridge)) } return cloudcfg, nil } @@ -2074,20 +2167,29 @@ primaryNICName := interfaces[0].Name primaryNICID := strconv.Itoa(interfaces[0].ID) primaryNICSubnetCIDR := primaryNICInfo.CIDR - primaryNICVLANID := subnetCIDRToVLANID[primaryNICSubnetCIDR] - updatedPrimaryNIC, err := env.updateDeviceInterface(deviceID, primaryNICID, primaryNICName, primaryMACAddress, primaryNICVLANID) - if err != nil { - return nil, errors.Annotatef(err, "cannot update device interface %q", interfaces[0].Name) + primaryNICVLANID, hasSubnet := subnetCIDRToVLANID[primaryNICSubnetCIDR] + if hasSubnet { + updatedPrimaryNIC, err := env.updateDeviceInterface(deviceID, primaryNICID, primaryNICName, primaryMACAddress, primaryNICVLANID) + if err != nil { + return nil, errors.Annotatef(err, "cannot update device interface %q", interfaces[0].Name) + } + logger.Debugf("device %q primary interface %q updated: %+v", containerDevice.SystemID, primaryNICName, updatedPrimaryNIC) } - logger.Debugf("device %q primary interface %q updated: %+v", containerDevice.SystemID, primaryNICName, updatedPrimaryNIC) deviceNICIDs := make([]string, len(preparedInfo)) nameToParentName := make(map[string]string) for i, nic := range preparedInfo { maasNICID := "" nameToParentName[nic.InterfaceName] = nic.ParentInterfaceName + nicVLANID, knownSubnet := subnetCIDRToVLANID[nic.CIDR] if nic.InterfaceName != primaryNICName { - nicVLANID := subnetCIDRToVLANID[nic.CIDR] + if !knownSubnet { + logger.Warningf("NIC %v has no subnet - setting to manual and using untagged VLAN", nic.InterfaceName) + nicVLANID = primaryNICVLANID + } else { + logger.Infof("linking NIC %v to subnet %v - using static IP", nic.InterfaceName, nic.CIDR) + } + createdNIC, err := env.createDeviceInterface(deviceID, nic.InterfaceName, nic.MACAddress, nicVLANID) if err != nil { return nil, errors.Annotate(err, "creating device interface") @@ -2100,17 +2202,24 @@ deviceNICIDs[i] = maasNICID subnetID := string(nic.ProviderSubnetId) + if !knownSubnet { + continue + } + linkedInterface, err := env.linkDeviceInterfaceToSubnet(deviceID, maasNICID, subnetID, modeStatic) if err != nil { - return nil, errors.Annotate(err, "cannot link device interface to subnet") + logger.Warningf("linking NIC %v to subnet %v failed: %v", nic.InterfaceName, nic.CIDR, err) + } else { + logger.Debugf("linked device interface to subnet: %+v", linkedInterface) } - logger.Debugf("linked device interface to subnet: %+v", linkedInterface) } + finalInterfaces, err := env.deviceInterfaceInfo(deviceID, nameToParentName) if err != nil { return nil, errors.Annotate(err, "cannot get device interfaces") } logger.Debugf("allocated device interfaces: %+v", finalInterfaces) + return finalInterfaces, nil } @@ -2140,9 +2249,9 @@ logger.Debugf("primary device NIC prepared info: %+v", primaryNICInfo) primaryNICSubnetCIDR := primaryNICInfo.CIDR - subnet, ok := subnetCIDRToSubnet[primaryNICSubnetCIDR] - if !ok { - return nil, errors.Errorf("primary NIC subnet %v not found", primaryNICSubnetCIDR) + subnet, hasSubnet := subnetCIDRToSubnet[primaryNICSubnetCIDR] + if !hasSubnet { + logger.Debugf("primary device NIC %q has no linked subnet - leaving unconfigured", primaryNICInfo.InterfaceName) } primaryMACAddress := primaryNICInfo.MACAddress args := gomaasapi.MachinesArgs{ @@ -2164,7 +2273,7 @@ createDeviceArgs := gomaasapi.CreateMachineDeviceArgs{ Hostname: deviceName, MACAddress: primaryMACAddress, - Subnet: subnet, + Subnet: subnet, // can be nil InterfaceName: primaryNICName, } device, err := machine.CreateDevice(createDeviceArgs) @@ -2175,48 +2284,62 @@ if len(interface_set) != 1 { // Shouldn't be possible as machine.CreateDevice always returns us // one interface. - return nil, errors.Errorf("unexpected number of interfaces inresponse from creating device: %v", interface_set) + return nil, errors.Errorf("unexpected number of interfaces in response from creating device: %v", interface_set) } + primaryNICVLAN := interface_set[0].VLAN() nameToParentName := make(map[string]string) for _, nic := range preparedInfo { nameToParentName[nic.InterfaceName] = nic.ParentInterfaceName if nic.InterfaceName != primaryNICName { - subnet, ok := subnetCIDRToSubnet[nic.CIDR] - if !ok { - return nil, errors.Errorf("NIC %v subnet %v not found", nic.InterfaceName, nic.CIDR) + createArgs := gomaasapi.CreateInterfaceArgs{ + Name: nic.InterfaceName, + MTU: nic.MTU, + MACAddress: nic.MACAddress, + } + + subnet, knownSubnet := subnetCIDRToSubnet[nic.CIDR] + if !knownSubnet { + logger.Warningf("NIC %v has no subnet - setting to manual and using untagged VLAN", nic.InterfaceName) + createArgs.VLAN = primaryNICVLAN + } else { + createArgs.VLAN = subnet.VLAN() + logger.Infof("linking NIC %v to subnet %v - using static IP", nic.InterfaceName, subnet.CIDR()) } - createdNIC, err := device.CreateInterface( - gomaasapi.CreateInterfaceArgs{ - Name: nic.InterfaceName, - MACAddress: nic.MACAddress, - VLAN: subnet.VLAN(), - }) + + createdNIC, err := device.CreateInterface(createArgs) if err != nil { return nil, errors.Annotate(err, "creating device interface") } logger.Debugf("created device interface: %+v", createdNIC) + if !knownSubnet { + continue + } + linkArgs := gomaasapi.LinkSubnetArgs{ Mode: gomaasapi.LinkModeStatic, Subnet: subnet, } - err = createdNIC.LinkSubnet(linkArgs) - if err != nil { - return nil, errors.Annotate(err, "cannot link device interface to subnet") + + if err := createdNIC.LinkSubnet(linkArgs); err != nil { + logger.Warningf("linking NIC %v to subnet %v failed: %v", nic.InterfaceName, subnet.CIDR(), err) + } else { + logger.Debugf("linked device interface to subnet: %+v", createdNIC) } - logger.Debugf("linked device interface to subnet: %+v", createdNIC) } } + finalInterfaces, err := env.deviceInterfaceInfo2(device.SystemID(), nameToParentName) if err != nil { return nil, errors.Annotate(err, "cannot get device interfaces") } logger.Debugf("allocated device interfaces: %+v", finalInterfaces) + return finalInterfaces, nil } -func (env *maasEnviron) ReleaseContainerAddresses(interfaces []network.InterfaceInfo) error { +func (env *maasEnviron) ReleaseContainerAddresses(interfaces []network.ProviderInterfaceInfo) error { macAddresses := make([]string, len(interfaces)) for i, info := range interfaces { macAddresses[i] = info.MACAddress @@ -2254,6 +2377,12 @@ deviceIds[i] = id } + // If one device matched on multiple MAC addresses (like for + // multi-nic containers) it will be in the slice multiple + // times. Skip devices we've seen already. + deviceIdSet := set.NewStrings(deviceIds...) + deviceIds = deviceIdSet.SortedValues() + for _, id := range deviceIds { err := devicesAPI.GetSubObject(id).Delete() if err != nil { @@ -2268,7 +2397,16 @@ if err != nil { return errors.Trace(err) } + // If one device matched on multiple MAC addresses (like for + // multi-nic containers) it will be in the slice multiple + // times. Skip devices we've seen already. + seen := set.NewStrings() for _, device := range devices { + if seen.Contains(device.SystemID()) { + continue + } + seen.Add(device.SystemID()) + err = device.Delete() if err != nil { return errors.Annotatef(err, "deleting device %s", device.SystemID()) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/environprovider.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/environprovider.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/environprovider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/environprovider.go 2016-10-13 14:31:49.000000000 +0000 @@ -43,11 +43,6 @@ var errAgentNameAlreadySet = errors.New( "maas-agent-name is already set; this should not be set by hand") -// RestrictedConfigAttributes is specified in the EnvironProvider interface. -func (p maasEnvironProvider) RestrictedConfigAttributes() []string { - return []string{} -} - // PrepareConfig is specified in the EnvironProvider interface. func (p maasEnvironProvider) PrepareConfig(args environs.PrepareConfigParams) (*config.Config, error) { if err := validateCloudSpec(args.Cloud); err != nil { @@ -81,11 +76,6 @@ return nil } -// SecretAttrs is specified in the EnvironProvider interface. -func (prov maasEnvironProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - return map[string]string{}, nil -} - // DetectRegions is specified in the environs.CloudRegionDetector interface. func (p maasEnvironProvider) DetectRegions() ([]cloud.Region, error) { return nil, errors.NotFoundf("regions") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/environ_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/environ_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/environ_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/environ_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,9 @@ package maas_test import ( + "io" + "net/http" + "net/http/httptest" stdtesting "testing" "github.com/juju/gomaasapi" @@ -11,6 +14,7 @@ "github.com/juju/utils/set" gc "gopkg.in/check.v1" + "github.com/juju/errors" "github.com/juju/juju/cloud" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" @@ -22,7 +26,6 @@ type environSuite struct { coretesting.BaseSuite envtesting.ToolsFixture - testMAASObject *gomaasapi.TestMAASObject restoreTimeouts func() } @@ -38,18 +41,16 @@ func (s *environSuite) SetUpSuite(c *gc.C) { s.restoreTimeouts = envtesting.PatchAttemptStrategies(maas.ShortAttempt) s.BaseSuite.SetUpSuite(c) - TestMAASObject := gomaasapi.NewTestMAAS("1.0") - s.testMAASObject = TestMAASObject } func (s *environSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.ToolsFixture.SetUpTest(c) - mockCapabilities := func(client *gomaasapi.MAASObject) (set.Strings, error) { + mockCapabilities := func(*gomaasapi.MAASObject, string) (set.Strings, error) { return set.NewStrings("network-deployment-ubuntu"), nil } - mockGetController := func(maasServer, apiKey string) (gomaasapi.Controller, error) { + mockGetController := func(string, string) (gomaasapi.Controller, error) { return nil, gomaasapi.NewUnsupportedVersionError("oops") } s.PatchValue(&maas.GetCapabilities, mockCapabilities) @@ -57,13 +58,11 @@ } func (s *environSuite) TearDownTest(c *gc.C) { - s.testMAASObject.TestServer.Clear() s.ToolsFixture.TearDownTest(c) s.BaseSuite.TearDownTest(c) } func (s *environSuite) TearDownSuite(c *gc.C) { - s.testMAASObject.Close() s.restoreTimeouts() s.BaseSuite.TearDownSuite(c) } @@ -147,10 +146,10 @@ cfg := getSimpleTestConfig(c, nil) env, err := maas.NewEnviron(getSimpleCloudSpec(), cfg) c.Assert(err, jc.ErrorIsNil) - modifyNetworkScript := maas.RenderEtcNetworkInterfacesScript() + modifyNetworkScript := maas.RenderEtcNetworkInterfacesScript("eth0", "eth1") script := expectedCloudinitConfig script = append(script, modifyNetworkScript) - cloudcfg, err := maas.NewCloudinitConfig(env, "testing.invalid", "quantal") + cloudcfg, err := maas.NewCloudinitConfig(env, "testing.invalid", "quantal", []string{"eth0", "eth1"}) c.Assert(err, jc.ErrorIsNil) c.Assert(cloudcfg.SystemUpdate(), jc.IsTrue) c.Assert(cloudcfg.RunCmds(), jc.DeepEquals, script) @@ -163,8 +162,77 @@ cfg := getSimpleTestConfig(c, attrs) env, err := maas.NewEnviron(getSimpleCloudSpec(), cfg) c.Assert(err, jc.ErrorIsNil) - cloudcfg, err := maas.NewCloudinitConfig(env, "testing.invalid", "quantal") + cloudcfg, err := maas.NewCloudinitConfig(env, "testing.invalid", "quantal", nil) c.Assert(err, jc.ErrorIsNil) c.Assert(cloudcfg.SystemUpdate(), jc.IsTrue) c.Assert(cloudcfg.RunCmds(), jc.DeepEquals, expectedCloudinitConfig) } + +func (*environSuite) TestRenderEtcNetworkInterfacesScriptMultipleNames(c *gc.C) { + script := maas.RenderEtcNetworkInterfacesScript("eth0", "eth0:1", "eth2", "eth1") + c.Check(script, jc.Contains, `--interfaces-to-bridge="eth0 eth0:1 eth2 eth1"`) + c.Check(script, jc.Contains, `--bridge-prefix="br-"`) +} + +func (*environSuite) TestRenderEtcNetworkInterfacesScriptSingleName(c *gc.C) { + script := maas.RenderEtcNetworkInterfacesScript("eth0") + c.Check(script, jc.Contains, `--interfaces-to-bridge="eth0"`) + c.Check(script, jc.Contains, `--bridge-prefix="br-"`) +} + +type badEndpointSuite struct { + coretesting.BaseSuite + + fakeServer *httptest.Server + cloudSpec environs.CloudSpec +} + +var _ = gc.Suite(&badEndpointSuite{}) + +func (s *badEndpointSuite) SetUpSuite(c *gc.C) { + s.BaseSuite.SetUpSuite(c) + always404 := func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusNotFound) + io.WriteString(w, "uh-oh") + } + s.fakeServer = httptest.NewServer(http.HandlerFunc(always404)) +} + +func (s *badEndpointSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + cred := cloud.NewCredential(cloud.OAuth1AuthType, map[string]string{ + "maas-oauth": "a:b:c", + }) + s.cloudSpec = environs.CloudSpec{ + Type: "maas", + Name: "maas", + Endpoint: s.fakeServer.URL, + Credential: &cred, + } +} + +func (s *badEndpointSuite) TestBadEndpointMessageNoMAAS(c *gc.C) { + cfg := getSimpleTestConfig(c, coretesting.Attrs{}) + env, err := maas.NewEnviron(s.cloudSpec, cfg) + c.Assert(env, gc.IsNil) + c.Assert(err, gc.ErrorMatches, `could not connect to MAAS controller - check the endpoint is correct \(it normally ends with /MAAS\)`) + c.Assert(err, jc.Satisfies, errors.IsNotSupported) +} + +func (s *badEndpointSuite) TestBadEndpointMessageWithMAAS(c *gc.C) { + cfg := getSimpleTestConfig(c, coretesting.Attrs{}) + s.cloudSpec.Endpoint += "/MAAS" + env, err := maas.NewEnviron(s.cloudSpec, cfg) + c.Assert(env, gc.IsNil) + c.Assert(err, gc.ErrorMatches, `could not connect to MAAS controller - check the endpoint is correct`) + c.Assert(err, jc.Satisfies, errors.IsNotSupported) +} + +func (s *badEndpointSuite) TestBadEndpointMessageWithMAASAndSlash(c *gc.C) { + cfg := getSimpleTestConfig(c, coretesting.Attrs{}) + s.cloudSpec.Endpoint += "/MAAS/" + env, err := maas.NewEnviron(s.cloudSpec, cfg) + c.Assert(env, gc.IsNil) + c.Assert(err, gc.ErrorMatches, `could not connect to MAAS controller - check the endpoint is correct`) + c.Assert(err, jc.Satisfies, errors.IsNotSupported) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/environ_whitebox_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/environ_whitebox_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/environ_whitebox_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/environ_whitebox_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,6 +17,7 @@ "github.com/juju/utils" "github.com/juju/utils/arch" "github.com/juju/utils/series" + "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" goyaml "gopkg.in/yaml.v2" @@ -34,7 +35,6 @@ "github.com/juju/juju/provider/common" "github.com/juju/juju/storage" coretesting "github.com/juju/juju/testing" - jujuversion "github.com/juju/juju/version" ) type environSuite struct { @@ -141,15 +141,9 @@ suite.setupFakeTools(c) env := suite.makeEnviron() // Create node 0: it will be used as the bootstrap node. - suite.testMAASObject.TestServer.NewNode(fmt.Sprintf( - `{"system_id": "node0", "hostname": "host0", "architecture": "%s/generic", "memory": 1024, "cpu_count": 1, "zone": {"name": "test_zone"}}`, - arch.HostArch()), - ) - lshwXML, err := suite.generateHWTemplate(map[string]ifaceInfo{"aa:bb:cc:dd:ee:f0": {0, "eth0", false}}) - c.Assert(err, jc.ErrorIsNil) - suite.testMAASObject.TestServer.AddNodeDetails("node0", lshwXML) + suite.newNode(c, "node0", "host0", nil) suite.addSubnet(c, 9, 9, "node0") - err = bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ ControllerConfig: coretesting.FakeControllerConfig(), AdminSecret: testing.AdminSecret, CAPrivateKey: coretesting.CAKey, @@ -172,19 +166,13 @@ c.Check(insts[0].Id(), gc.Equals, instanceIds[0]) // Create node 1: it will be used as instance number 1. - suite.testMAASObject.TestServer.NewNode(fmt.Sprintf( - `{"system_id": "node1", "hostname": "host1", "architecture": "%s/generic", "memory": 1024, "cpu_count": 1, "zone": {"name": "test_zone"}}`, - arch.HostArch()), - ) - lshwXML, err = suite.generateHWTemplate(map[string]ifaceInfo{"aa:bb:cc:dd:ee:f1": {0, "eth0", false}}) - c.Assert(err, jc.ErrorIsNil) - suite.testMAASObject.TestServer.AddNodeDetails("node1", lshwXML) + suite.newNode(c, "node1", "host1", nil) suite.addSubnet(c, 8, 8, "node1") instance, hc := testing.AssertStartInstance(c, env, suite.controllerUUID, "1") c.Assert(err, jc.ErrorIsNil) c.Check(instance, gc.NotNil) c.Assert(hc, gc.NotNil) - c.Check(hc.String(), gc.Equals, fmt.Sprintf("arch=%s cpu-cores=1 mem=1024M availability-zone=test_zone", arch.HostArch())) + c.Check(hc.String(), gc.Equals, fmt.Sprintf("arch=%s cores=1 mem=1024M availability-zone=test_zone", arch.HostArch())) // The instance number 1 has been acquired and started. actions, found = operations["node1"] @@ -227,39 +215,6 @@ return &maas1Instance{&node, nil, statusGetter} } -func (suite *environSuite) newNetwork(name string, id int, vlanTag int, defaultGateway string) *gomaasapi.MAASObject { - var vlan string - if vlanTag == 0 { - vlan = "null" - } else { - vlan = fmt.Sprintf("%d", vlanTag) - } - - if defaultGateway != "null" { - // since we use %s below only "null" (if passed) should remain unquoted. - defaultGateway = fmt.Sprintf("%q", defaultGateway) - } - - // TODO(dimitern): Use JSON tags on structs, JSON encoder, or at least - // text/template below and in similar cases. - input := fmt.Sprintf(`{ - "name": %q, - "ip":"192.168.%d.2", - "netmask": "255.255.255.0", - "vlan_tag": %s, - "description": "%s_%d_%d", - "default_gateway": %s - }`, - name, - id, - vlan, - name, id, vlanTag, - defaultGateway, - ) - network := suite.testMAASObject.TestServer.NewNetwork(input) - return &network -} - func (suite *environSuite) TestStopInstancesReturnsIfParameterEmpty(c *gc.C) { suite.getInstance("test1") @@ -382,15 +337,9 @@ func (suite *environSuite) TestBootstrapSucceeds(c *gc.C) { suite.setupFakeTools(c) env := suite.makeEnviron() - suite.testMAASObject.TestServer.NewNode(fmt.Sprintf( - `{"system_id": "thenode", "hostname": "host", "architecture": "%s/generic", "memory": 256, "cpu_count": 8, "zone": {"name": "test_zone"}}`, - arch.HostArch()), - ) + suite.newNode(c, "thenode", "host", nil) suite.addSubnet(c, 9, 9, "thenode") - lshwXML, err := suite.generateHWTemplate(map[string]ifaceInfo{"aa:bb:cc:dd:ee:f0": {0, "eth0", false}}) - c.Assert(err, jc.ErrorIsNil) - suite.testMAASObject.TestServer.AddNodeDetails("thenode", lshwXML) - err = bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ ControllerConfig: coretesting.FakeControllerConfig(), AdminSecret: testing.AdminSecret, CAPrivateKey: coretesting.CAKey, @@ -401,17 +350,11 @@ func (suite *environSuite) TestBootstrapNodeNotDeployed(c *gc.C) { suite.setupFakeTools(c) env := suite.makeEnviron() - suite.testMAASObject.TestServer.NewNode(fmt.Sprintf( - `{"system_id": "thenode", "hostname": "host", "architecture": "%s/generic", "memory": 256, "cpu_count": 8, "zone": {"name": "test_zone"}}`, - arch.HostArch()), - ) + suite.newNode(c, "thenode", "host", nil) suite.addSubnet(c, 9, 9, "thenode") - lshwXML, err := suite.generateHWTemplate(map[string]ifaceInfo{"aa:bb:cc:dd:ee:f0": {0, "eth0", false}}) - c.Assert(err, jc.ErrorIsNil) - suite.testMAASObject.TestServer.AddNodeDetails("thenode", lshwXML) // Ensure node will not be reported as deployed by changing its status. suite.testMAASObject.TestServer.ChangeNode("thenode", "status", "4") - err = bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ ControllerConfig: coretesting.FakeControllerConfig(), AdminSecret: testing.AdminSecret, CAPrivateKey: coretesting.CAKey, @@ -422,17 +365,11 @@ func (suite *environSuite) TestBootstrapNodeFailedDeploy(c *gc.C) { suite.setupFakeTools(c) env := suite.makeEnviron() - suite.testMAASObject.TestServer.NewNode(fmt.Sprintf( - `{"system_id": "thenode", "hostname": "host", "architecture": "%s/generic", "memory": 256, "cpu_count": 8, "zone": {"name": "test_zone"}}`, - arch.HostArch()), - ) + suite.newNode(c, "thenode", "host", nil) suite.addSubnet(c, 9, 9, "thenode") - lshwXML, err := suite.generateHWTemplate(map[string]ifaceInfo{"aa:bb:cc:dd:ee:f0": {0, "eth0", false}}) - c.Assert(err, jc.ErrorIsNil) - suite.testMAASObject.TestServer.AddNodeDetails("thenode", lshwXML) // Set the node status to "Failed deployment" suite.testMAASObject.TestServer.ChangeNode("thenode", "status", "11") - err = bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ ControllerConfig: coretesting.FakeControllerConfig(), AdminSecret: testing.AdminSecret, CAPrivateKey: coretesting.CAKey, @@ -442,19 +379,16 @@ func (suite *environSuite) TestBootstrapFailsIfNoTools(c *gc.C) { env := suite.makeEnviron() - // Disable auto-uploading by setting the agent version. - cfg, err := env.Config().Apply(map[string]interface{}{ - "agent-version": jujuversion.Current.String(), - }) - c.Assert(err, jc.ErrorIsNil) - err = env.SetConfig(cfg) - c.Assert(err, jc.ErrorIsNil) - err = bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + vers := version.MustParse("1.2.3") + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ ControllerConfig: coretesting.FakeControllerConfig(), AdminSecret: testing.AdminSecret, CAPrivateKey: coretesting.CAKey, + // Disable auto-uploading by setting the agent version + // to something that's not the current version. + AgentVersion: &vers, }) - c.Check(err, gc.ErrorMatches, "Juju cannot bootstrap because no tools are available for your model(.|\n)*") + c.Check(err, gc.ErrorMatches, "Juju cannot bootstrap because no agent binaries are available for your model(.|\n)*") } func (suite *environSuite) TestBootstrapFailsIfNoNodes(c *gc.C) { @@ -944,12 +878,13 @@ func (s *environSuite) newNode(c *gc.C, nodename, hostname string, attrs map[string]interface{}) { allAttrs := map[string]interface{}{ - "system_id": nodename, - "hostname": hostname, - "architecture": fmt.Sprintf("%s/generic", arch.HostArch()), - "memory": 1024, - "cpu_count": 1, - "zone": map[string]interface{}{"name": "test_zone", "description": "description"}, + "system_id": nodename, + "hostname": hostname, + "architecture": fmt.Sprintf("%s/generic", arch.HostArch()), + "memory": 1024, + "cpu_count": 1, + "zone": map[string]interface{}{"name": "test_zone", "description": "description"}, + "interface_set": exampleParsedInterfaceSetJSON, } for k, v := range attrs { allAttrs[k] = v @@ -1101,20 +1036,20 @@ func (s *environSuite) TestReleaseContainerAddresses(c *gc.C) { s.testMAASObject.TestServer.AddDevice(&gomaasapi.TestDevice{ - SystemId: "device1", - MACAddress: "mac1", + SystemId: "device1", + MACAddresses: []string{"mac1"}, }) s.testMAASObject.TestServer.AddDevice(&gomaasapi.TestDevice{ - SystemId: "device2", - MACAddress: "mac2", + SystemId: "device2", + MACAddresses: []string{"mac2"}, }) s.testMAASObject.TestServer.AddDevice(&gomaasapi.TestDevice{ - SystemId: "device3", - MACAddress: "mac3", + SystemId: "device3", + MACAddresses: []string{"mac3"}, }) env := s.makeEnviron() - err := env.ReleaseContainerAddresses([]network.InterfaceInfo{ + err := env.ReleaseContainerAddresses([]network.ProviderInterfaceInfo{ {MACAddress: "mac1"}, {MACAddress: "mac3"}, {MACAddress: "mac4"}, @@ -1127,3 +1062,27 @@ } c.Assert(systemIds, gc.DeepEquals, []string{"device2"}) } + +func (s *environSuite) TestReleaseContainerAddresses_HandlesDupes(c *gc.C) { + s.testMAASObject.TestServer.AddDevice(&gomaasapi.TestDevice{ + SystemId: "device1", + MACAddresses: []string{"mac1", "mac2"}, + }) + s.testMAASObject.TestServer.AddDevice(&gomaasapi.TestDevice{ + SystemId: "device3", + MACAddresses: []string{"mac3"}, + }) + + env := s.makeEnviron() + err := env.ReleaseContainerAddresses([]network.ProviderInterfaceInfo{ + {MACAddress: "mac1"}, + {MACAddress: "mac2"}, + }) + c.Assert(err, jc.ErrorIsNil) + + var systemIds []string + for systemId, _ := range s.testMAASObject.TestServer.Devices() { + systemIds = append(systemIds, systemId) + } + c.Assert(systemIds, gc.DeepEquals, []string{"device3"}) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/export_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,8 +18,8 @@ return env.(*maasEnviron).getMAASClient() } -func NewCloudinitConfig(env environs.Environ, hostname, series string) (cloudinit.CloudConfig, error) { - return env.(*maasEnviron).newCloudinitConfig(hostname, series) +func NewCloudinitConfig(env environs.Environ, hostname, series string, interfacesToBridge []string) (cloudinit.CloudConfig, error) { + return env.(*maasEnviron).newCloudinitConfig(hostname, series, interfacesToBridge) } var RenderEtcNetworkInterfacesScript = renderEtcNetworkInterfacesScript diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -51,26 +51,30 @@ return instance.Id(maasObject.URI().String()) } +func normalizeStatus(statusMsg string) string { + return strings.ToLower(strings.TrimSpace(statusMsg)) +} + func convertInstanceStatus(statusMsg, substatus string, id instance.Id) instance.InstanceStatus { - maasInstanceStatus := status.StatusEmpty - switch statusMsg { + maasInstanceStatus := status.Empty + switch normalizeStatus(statusMsg) { case "": logger.Debugf("unable to obtain status of instance %s", id) statusMsg = "error in getting status" - case "Deployed": - maasInstanceStatus = status.StatusRunning - case "Deploying": - maasInstanceStatus = status.StatusAllocating + case "deployed": + maasInstanceStatus = status.Running + case "deploying": + maasInstanceStatus = status.Allocating if substatus != "" { statusMsg = fmt.Sprintf("%s: %s", statusMsg, substatus) } - case "Failed Deployment": - maasInstanceStatus = status.StatusProvisioningError + case "failed deployment": + maasInstanceStatus = status.ProvisioningError if substatus != "" { statusMsg = fmt.Sprintf("%s: %s", statusMsg, substatus) } default: - maasInstanceStatus = status.StatusEmpty + maasInstanceStatus = status.Empty statusMsg = fmt.Sprintf("%s: %s", statusMsg, substatus) } return instance.InstanceStatus{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/instance_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/instance_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/instance_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/instance_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -217,7 +217,7 @@ hc, err := inst.hardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) c.Assert(hc, gc.NotNil) - c.Assert(hc.String(), gc.Equals, `arch=amd64 cpu-cores=6 mem=16384M availability-zone=tst`) + c.Assert(hc.String(), gc.Equals, `arch=amd64 cores=6 mem=16384M availability-zone=tst`) } func (s *instanceTest) TestHardwareCharacteristicsWithTags(c *gc.C) { @@ -238,7 +238,7 @@ hc, err := inst.hardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) c.Assert(hc, gc.NotNil) - c.Assert(hc.String(), gc.Equals, `arch=amd64 cpu-cores=6 mem=16384M tags=a,b availability-zone=tst`) + c.Assert(hc.String(), gc.Equals, `arch=amd64 cores=6 mem=16384M tags=a,b availability-zone=tst`) } func (s *instanceTest) TestHardwareCharacteristicsMissing(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/interfaces.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/interfaces.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/interfaces.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/interfaces.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,6 +26,7 @@ modeStatic maasLinkMode = "static" modeDHCP maasLinkMode = "dhcp" modeLinkUp maasLinkMode = "link_up" + modeAuto maasLinkMode = "auto" ) type maasInterfaceLink struct { @@ -120,7 +121,6 @@ infos := make([]network.InterfaceInfo, 0, len(interfaces)) for i, iface := range interfaces { - // The below works for all types except bonds and their members. parentName := strings.Join(iface.Parents, "") var nicType network.InterfaceType @@ -153,20 +153,18 @@ NoAutoStart: !iface.Enabled, } + if len(iface.Links) == 0 { + logger.Debugf("interface %q has no links", iface.Name) + infos = append(infos, nicInfo) + continue + } + for _, link := range iface.Links { - switch link.Mode { - case modeUnknown: - nicInfo.ConfigType = network.ConfigUnknown - case modeDHCP: - nicInfo.ConfigType = network.ConfigDHCP - case modeStatic, modeLinkUp: - nicInfo.ConfigType = network.ConfigStatic - default: - nicInfo.ConfigType = network.ConfigManual - } + nicInfo.ConfigType = maasLinkToInterfaceConfigType(string(link.Mode)) - if link.IPAddress == "" { - logger.Debugf("interface %q has no address", iface.Name) + if link.IPAddress == "" && link.Subnet == nil { + logger.Debugf("interface %q link %d has neither subnet nor address", iface.Name, link.ID) + infos = append(infos, nicInfo) } else { // We set it here initially without a space, just so we don't // lose it when we have no linked subnet below. @@ -258,20 +256,18 @@ NoAutoStart: !iface.Enabled(), } + if len(iface.Links()) == 0 { + logger.Debugf("interface %q has no links", iface.Name()) + infos = append(infos, nicInfo) + continue + } + for _, link := range iface.Links() { - switch maasLinkMode(link.Mode()) { - case modeUnknown: - nicInfo.ConfigType = network.ConfigUnknown - case modeDHCP: - nicInfo.ConfigType = network.ConfigDHCP - case modeStatic, modeLinkUp: - nicInfo.ConfigType = network.ConfigStatic - default: - nicInfo.ConfigType = network.ConfigManual - } + nicInfo.ConfigType = maasLinkToInterfaceConfigType(link.Mode()) - if link.IPAddress() == "" { - logger.Debugf("interface %q has no address", iface.Name()) + if link.IPAddress() == "" && link.Subnet() == nil { + logger.Debugf("interface %q link %d has neither subnet nor address", iface.Name(), link.ID()) + infos = append(infos, nicInfo) } else { // We set it here initially without a space, just so we don't // lose it when we have no linked subnet below. @@ -339,3 +335,18 @@ return maasObjectNetworkInterfaces(mi.maasObject, subnetsMap) } } + +func maasLinkToInterfaceConfigType(mode string) network.InterfaceConfigType { + switch maasLinkMode(mode) { + case modeUnknown: + return network.ConfigUnknown + case modeDHCP: + return network.ConfigDHCP + case modeStatic, modeAuto: + return network.ConfigStatic + case modeLinkUp: + default: + } + + return network.ConfigManual +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/interfaces_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/interfaces_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/interfaces_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/interfaces_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ "fmt" "text/template" + "github.com/juju/errors" "github.com/juju/gomaasapi" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -271,6 +272,112 @@ } ]` +var exampleParsedInterfaceSetJSON = []network.InterfaceInfo{{ + DeviceIndex: 0, + MACAddress: "52:54:00:70:9b:fe", + CIDR: "10.20.19.0/24", + ProviderId: "91", + ProviderSubnetId: "3", + AvailabilityZones: nil, + VLANTag: 0, + ProviderVLANId: "5001", + ProviderAddressId: "436", + InterfaceName: "eth0", + InterfaceType: "ethernet", + Disabled: false, + NoAutoStart: false, + ConfigType: "static", + Address: network.NewAddressOnSpace("default", "10.20.19.103"), + DNSServers: network.NewAddressesOnSpace("default", "10.20.19.2", "10.20.19.3"), + DNSSearchDomains: nil, + MTU: 1500, + GatewayAddress: network.NewAddressOnSpace("default", "10.20.19.2"), +}, { + DeviceIndex: 0, + MACAddress: "52:54:00:70:9b:fe", + CIDR: "10.20.19.0/24", + ProviderId: "91", + ProviderSubnetId: "3", + AvailabilityZones: nil, + VLANTag: 0, + ProviderVLANId: "5001", + ProviderAddressId: "437", + InterfaceName: "eth0", + InterfaceType: "ethernet", + Disabled: false, + NoAutoStart: false, + ConfigType: "static", + Address: network.NewAddressOnSpace("default", "10.20.19.104"), + DNSServers: network.NewAddressesOnSpace("default", "10.20.19.2", "10.20.19.3"), + DNSSearchDomains: nil, + MTU: 1500, + GatewayAddress: network.NewAddressOnSpace("default", "10.20.19.2"), +}, { + DeviceIndex: 1, + MACAddress: "52:54:00:70:9b:fe", + CIDR: "10.50.19.0/24", + ProviderId: "150", + ProviderSubnetId: "5", + AvailabilityZones: nil, + VLANTag: 50, + ProviderVLANId: "5004", + ProviderAddressId: "517", + InterfaceName: "eth0.50", + ParentInterfaceName: "eth0", + InterfaceType: "802.1q", + Disabled: false, + NoAutoStart: false, + ConfigType: "static", + Address: network.NewAddressOnSpace("admin", "10.50.19.103"), + DNSServers: nil, + DNSSearchDomains: nil, + MTU: 1500, + GatewayAddress: network.NewAddressOnSpace("admin", "10.50.19.2"), +}, { + DeviceIndex: 2, + MACAddress: "52:54:00:70:9b:fe", + CIDR: "10.100.19.0/24", + ProviderId: "151", + ProviderSubnetId: "6", + AvailabilityZones: nil, + VLANTag: 100, + ProviderVLANId: "5005", + ProviderAddressId: "519", + InterfaceName: "eth0.100", + ParentInterfaceName: "eth0", + InterfaceType: "802.1q", + Disabled: false, + NoAutoStart: false, + ConfigType: "static", + Address: network.NewAddressOnSpace("public", "10.100.19.103"), + DNSServers: nil, + DNSSearchDomains: nil, + MTU: 1500, + GatewayAddress: network.NewAddressOnSpace("public", "10.100.19.2"), +}, { + DeviceIndex: 3, + MACAddress: "52:54:00:70:9b:fe", + CIDR: "10.250.19.0/24", + ProviderId: "152", + ProviderSubnetId: "8", + AvailabilityZones: nil, + VLANTag: 250, + ProviderVLANId: "5008", + ProviderAddressId: "523", + ProviderSpaceId: "3", + InterfaceName: "eth0.250", + ParentInterfaceName: "eth0", + InterfaceType: "802.1q", + Disabled: false, + NoAutoStart: false, + ConfigType: "static", + Address: newAddressOnSpaceWithId("storage", network.Id("3"), "10.250.19.103"), + DNSServers: nil, + DNSSearchDomains: nil, + MTU: 1500, + GatewayAddress: newAddressOnSpaceWithId("storage", network.Id("3"), "10.250.19.2"), +}} + func (s *interfacesSuite) TestParseInterfacesNoJSON(c *gc.C) { result, err := parseInterfaces(nil) c.Check(err, gc.ErrorMatches, "parsing interfaces: unexpected end of JSON input") @@ -449,115 +556,126 @@ subnetsMap["10.250.19.0/24"] = network.Id("3") subnetsMap["192.168.1.0/24"] = network.Id("0") - expected := []network.InterfaceInfo{{ - DeviceIndex: 0, - MACAddress: "52:54:00:70:9b:fe", - CIDR: "10.20.19.0/24", - ProviderId: "91", - ProviderSubnetId: "3", - AvailabilityZones: nil, - VLANTag: 0, - ProviderVLANId: "5001", - ProviderAddressId: "436", - InterfaceName: "eth0", - InterfaceType: "ethernet", - Disabled: false, - NoAutoStart: false, - ConfigType: "static", - Address: network.NewAddressOnSpace("default", "10.20.19.103"), - DNSServers: network.NewAddressesOnSpace("default", "10.20.19.2", "10.20.19.3"), - DNSSearchDomains: nil, - MTU: 1500, - GatewayAddress: network.NewAddressOnSpace("default", "10.20.19.2"), - }, { - DeviceIndex: 0, - MACAddress: "52:54:00:70:9b:fe", - CIDR: "10.20.19.0/24", - ProviderId: "91", - ProviderSubnetId: "3", - AvailabilityZones: nil, - VLANTag: 0, - ProviderVLANId: "5001", - ProviderAddressId: "437", - InterfaceName: "eth0", - InterfaceType: "ethernet", - Disabled: false, - NoAutoStart: false, - ConfigType: "static", - Address: network.NewAddressOnSpace("default", "10.20.19.104"), - DNSServers: network.NewAddressesOnSpace("default", "10.20.19.2", "10.20.19.3"), - DNSSearchDomains: nil, - MTU: 1500, - GatewayAddress: network.NewAddressOnSpace("default", "10.20.19.2"), - }, { - DeviceIndex: 1, - MACAddress: "52:54:00:70:9b:fe", - CIDR: "10.50.19.0/24", - ProviderId: "150", - ProviderSubnetId: "5", - AvailabilityZones: nil, - VLANTag: 50, - ProviderVLANId: "5004", - ProviderAddressId: "517", - InterfaceName: "eth0.50", - ParentInterfaceName: "eth0", - InterfaceType: "802.1q", - Disabled: false, - NoAutoStart: false, - ConfigType: "static", - Address: network.NewAddressOnSpace("admin", "10.50.19.103"), - DNSServers: nil, - DNSSearchDomains: nil, - MTU: 1500, - GatewayAddress: network.NewAddressOnSpace("admin", "10.50.19.2"), - }, { - DeviceIndex: 2, - MACAddress: "52:54:00:70:9b:fe", - CIDR: "10.100.19.0/24", - ProviderId: "151", - ProviderSubnetId: "6", - AvailabilityZones: nil, - VLANTag: 100, - ProviderVLANId: "5005", - ProviderAddressId: "519", - InterfaceName: "eth0.100", - ParentInterfaceName: "eth0", - InterfaceType: "802.1q", - Disabled: false, - NoAutoStart: false, - ConfigType: "static", - Address: network.NewAddressOnSpace("public", "10.100.19.103"), - DNSServers: nil, - DNSSearchDomains: nil, - MTU: 1500, - GatewayAddress: network.NewAddressOnSpace("public", "10.100.19.2"), - }, { - DeviceIndex: 3, - MACAddress: "52:54:00:70:9b:fe", - CIDR: "10.250.19.0/24", - ProviderId: "152", - ProviderSubnetId: "8", - AvailabilityZones: nil, - VLANTag: 250, - ProviderVLANId: "5008", - ProviderAddressId: "523", - ProviderSpaceId: "3", - InterfaceName: "eth0.250", - ParentInterfaceName: "eth0", - InterfaceType: "802.1q", - Disabled: false, - NoAutoStart: false, - ConfigType: "static", - Address: newAddressOnSpaceWithId("storage", network.Id("3"), "10.250.19.103"), - DNSServers: nil, - DNSSearchDomains: nil, - MTU: 1500, - GatewayAddress: newAddressOnSpaceWithId("storage", network.Id("3"), "10.250.19.2"), - }} - infos, err := maasObjectNetworkInterfaces(&obj, subnetsMap) c.Assert(err, jc.ErrorIsNil) - c.Check(infos, jc.DeepEquals, expected) + c.Check(infos, jc.DeepEquals, exampleParsedInterfaceSetJSON) +} + +const ( + notUsingMAAS2 = false + notUsingMAAS1 = true +) + +func (s *interfacesSuite) TestInstanceConfiguredInterfaceNamesWithExampleMAAS1InterfaceSet(c *gc.C) { + nodeJSON := fmt.Sprintf(`{ + "system_id": "foo", + "interface_set": %s + }`, exampleInterfaceSetJSON) + obj := s.testMAASObject.TestServer.NewNode(nodeJSON) + + inst := &maas1Instance{maasObject: &obj} + names, err := instanceConfiguredInterfaceNames(notUsingMAAS2, inst, nil) + c.Assert(err, jc.ErrorIsNil) + c.Check(names, jc.DeepEquals, []string{"eth0", "eth0:1", "eth0.50", "eth0.100", "eth0.250"}) +} + +func (s *interfacesSuite) TestInstanceConfiguredNamesWithoutInterfaceSetMAAS1(c *gc.C) { + nodeJSON := `{"system_id": "foo"}` + obj := s.testMAASObject.TestServer.NewNode(nodeJSON) + + inst := &maas1Instance{maasObject: &obj} + names, err := instanceConfiguredInterfaceNames(notUsingMAAS2, inst, nil) + c.Assert(err, gc.ErrorMatches, "interface_set not supported") + c.Check(err, jc.Satisfies, errors.IsNotSupported) + c.Check(names, gc.HasLen, 0) +} + +func (s *interfacesSuite) TestInstanceConfiguredInterfaceNamesPartiallyConfiguredMAAS1(c *gc.C) { + nodeJSON := `{ + "system_id": "foo", + "interface_set": [{ + "name": "eth0", + "links": [ + {"subnet": {"cidr": "1.2.3.4/5"}, "mode": "static", "ip_address": "1.2.3.4"}, + {"subnet": {"cidr": "1.2.3.4/5"}, "mode": "auto"}, + {"subnet": {"cidr": "1.2.3.4/5"}, "mode": "dhcp"} + ] + }, { + "name": "eth1", + "links": [{"mode": "link_up"}] + }, { + "name": "eth1.99", + "links": [{"subnet": {"cidr": "192.168.99.0/24"}, "mode": "auto"}] + }]}` + obj := s.testMAASObject.TestServer.NewNode(nodeJSON) + + inst := &maas1Instance{maasObject: &obj} + names, err := instanceConfiguredInterfaceNames(notUsingMAAS2, inst, nil) + c.Assert(err, jc.ErrorIsNil) + c.Check(names, jc.DeepEquals, []string{"eth0", "eth0:1", "eth0:2", "eth1.99"}) +} + +func (s *interfacesSuite) TestInstanceConfiguredInterfaceNamesWithoutInterfaceSetMAAS2(c *gc.C) { + inst := &maas2Instance{machine: &fakeMachine{interfaceSet: nil}} + + names, err := instanceConfiguredInterfaceNames(notUsingMAAS1, inst, nil) + c.Assert(err, jc.ErrorIsNil) + c.Check(names, gc.HasLen, 0) +} + +func (s *interfacesSuite) TestInstanceConfiguredInterfaceNamesPartiallyConfiguredMAAS2(c *gc.C) { + + subnet50 := &fakeSubnet{ + cidr: "10.50.19.0/24", + vlan: &fakeVLAN{id: 5050}, + } + subnet250 := &fakeSubnet{ + cidr: "10.250.19.0/24", + vlan: &fakeVLAN{id: 5250}, + } + + interfaces := []gomaasapi.Interface{ + &fakeInterface{ + name: "eth0", + links: []gomaasapi.Link{ + &fakeLink{mode: "link_up"}, + }, + }, + &fakeInterface{ + name: "eth0.50", + links: []gomaasapi.Link{ + &fakeLink{ + subnet: subnet50, + ipAddress: "10.50.19.103", + mode: "static", + }, + &fakeLink{ // alias :1 + subnet: subnet50, + mode: "auto", // no address yet, but will have at startNode time + }, + &fakeLink{ // alias :2 + subnet: subnet50, + mode: "dhcp", // will get address at boot via DHCP + }, + }, + }, + &fakeInterface{name: "eth0.100", links: nil}, + &fakeInterface{ + name: "eth0.250", + links: []gomaasapi.Link{ + &fakeLink{ + subnet: subnet250, + mode: "auto", + }, + }, + }, + } + + inst := &maas2Instance{machine: &fakeMachine{interfaceSet: interfaces}} + + names, err := instanceConfiguredInterfaceNames(notUsingMAAS1, inst, nil) + c.Assert(err, jc.ErrorIsNil) + c.Check(names, jc.DeepEquals, []string{"eth0.50", "eth0.50:1", "eth0.50:2", "eth0.250"}) } func (s *interfacesSuite) TestMAAS2NetworkInterfaces(c *gc.C) { @@ -805,8 +923,8 @@ MTU: 1500, GatewayAddress: newAddressOnSpaceWithId("storage", network.Id("3"), "10.250.19.2"), }} - - instance := &maas2Instance{machine: &fakeMachine{interfaceSet: exampleInterfaces}} + machine := &fakeMachine{interfaceSet: exampleInterfaces} + instance := &maas2Instance{machine: machine} infos, err := maas2NetworkInterfaces(instance, subnetsMap) c.Assert(err, jc.ErrorIsNil) @@ -847,8 +965,8 @@ children: []string{"eth0.100", "eth0.250", "eth0.50"}, }, } - - instance := &maas2Instance{machine: &fakeMachine{interfaceSet: exampleInterfaces}} + machine := &fakeMachine{interfaceSet: exampleInterfaces} + instance := &maas2Instance{machine: machine} expected := []network.InterfaceInfo{{ DeviceIndex: 0, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/maas2_environ_whitebox_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/maas2_environ_whitebox_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/maas2_environ_whitebox_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/maas2_environ_whitebox_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,6 @@ package maas import ( - "bytes" "fmt" "net/http" @@ -14,6 +13,7 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils/arch" "github.com/juju/utils/set" + "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" goyaml "gopkg.in/yaml.v2" @@ -24,14 +24,13 @@ "github.com/juju/juju/environs" "github.com/juju/juju/environs/bootstrap" "github.com/juju/juju/environs/config" + "github.com/juju/juju/environs/tags" envjujutesting "github.com/juju/juju/environs/testing" envtools "github.com/juju/juju/environs/tools" "github.com/juju/juju/instance" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" - "github.com/juju/juju/provider/common" coretesting "github.com/juju/juju/testing" - jujuversion "github.com/juju/juju/version" ) type maas2EnvironSuite struct { @@ -70,7 +69,7 @@ c.Assert(env, gc.NotNil) } -func (suite *maas2EnvironSuite) injectControllerWithSpacesAndCheck(c *gc.C, spaces []gomaasapi.Space, expected gomaasapi.AllocateMachineArgs) *maasEnviron { +func (suite *maas2EnvironSuite) injectControllerWithSpacesAndCheck(c *gc.C, spaces []gomaasapi.Space, expected gomaasapi.AllocateMachineArgs) (*maasEnviron, *fakeController) { var env *maasEnviron check := func(args gomaasapi.AllocateMachineArgs) { expected.AgentName = env.Config().UUID() @@ -87,7 +86,7 @@ suite.injectController(controller) suite.setupFakeTools(c) env = suite.makeEnviron(c, nil) - return env + return env, controller } func (suite *maas2EnvironSuite) makeEnvironWithMachines(c *gc.C, expectedSystemIDs []string, returnSystemIDs []string) *maasEnviron { @@ -309,7 +308,7 @@ } func (suite *maas2EnvironSuite) TestStartInstance(c *gc.C) { - env := suite.injectControllerWithSpacesAndCheck(c, nil, gomaasapi.AllocateMachineArgs{}) + env, _ := suite.injectControllerWithSpacesAndCheck(c, nil, gomaasapi.AllocateMachineArgs{}) params := environs.StartInstanceParams{ControllerUUID: suite.controllerUUID} result, err := jujutesting.StartInstanceWithParams(env, "1", params) @@ -317,6 +316,24 @@ c.Assert(result.Instance.Id(), gc.Equals, instance.Id("Bruce Sterling")) } +func (suite *maas2EnvironSuite) TestStartInstanceAppliesResourceTags(c *gc.C) { + env, controller := suite.injectControllerWithSpacesAndCheck(c, nil, gomaasapi.AllocateMachineArgs{}) + config := env.Config() + _, ok := config.ResourceTags() + c.Assert(ok, jc.IsTrue) + params := environs.StartInstanceParams{ControllerUUID: suite.controllerUUID} + _, err := jujutesting.StartInstanceWithParams(env, "1", params) + c.Assert(err, jc.ErrorIsNil) + + machine := controller.allocateMachine.(*fakeMachine) + machine.CheckCallNames(c, "Start", "SetOwnerData") + c.Assert(machine.Calls()[1].Args[0], gc.DeepEquals, map[string]string{ + "claude": "rains", + tags.JujuController: suite.controllerUUID, + tags.JujuModel: config.UUID(), + }) +} + func (suite *maas2EnvironSuite) TestStartInstanceParams(c *gc.C) { var env *maasEnviron suite.injectController(&fakeController{ @@ -371,7 +388,7 @@ Tags: []string{"tag1", "tag3"}, NotTags: []string{"tag2", "tag4"}, } - env = suite.injectControllerWithSpacesAndCheck(c, nil, expected) + env, _ = suite.injectControllerWithSpacesAndCheck(c, nil, expected) _, err := env.acquireNode2( "", "", constraints.Value{Tags: stringslicep("tag1", "^tag2", "tag3", "^tag4")}, @@ -403,7 +420,6 @@ id: 8, }, } - } func (suite *maas2EnvironSuite) TestAcquireNodePassesPositiveAndNegativeSpaces(c *gc.C) { @@ -414,7 +430,7 @@ {Label: "1", Space: "7"}, }, } - env := suite.injectControllerWithSpacesAndCheck(c, getFourSpaces(), expected) + env, _ := suite.injectControllerWithSpacesAndCheck(c, getFourSpaces(), expected) _, err := env.acquireNode2( "", "", @@ -425,7 +441,7 @@ } func (suite *maas2EnvironSuite) TestAcquireNodeDisambiguatesNamedLabelsFromIndexedUpToALimit(c *gc.C) { - env := suite.injectControllerWithSpacesAndCheck(c, getFourSpaces(), gomaasapi.AllocateMachineArgs{}) + env, _ := suite.injectControllerWithSpacesAndCheck(c, getFourSpaces(), gomaasapi.AllocateMachineArgs{}) var shortLimit uint = 0 suite.PatchValue(&numericLabelLimit, shortLimit) @@ -624,7 +640,7 @@ NotSpace: []string{"3"}, Interfaces: []gomaasapi.InterfaceSpec{{Label: "0", Space: "2"}}, } - env := suite.injectControllerWithSpacesAndCheck(c, getTwoSpaces(), expected) + env, _ := suite.injectControllerWithSpacesAndCheck(c, getTwoSpaces(), expected) cons := constraints.Value{ Spaces: stringslicep("foo", "^bar"), } @@ -637,7 +653,7 @@ NotSpace: []string{"3"}, Interfaces: []gomaasapi.InterfaceSpec{{Label: "0", Space: "2"}}, } - env := suite.injectControllerWithSpacesAndCheck(c, getTwoSpaces(), expected) + env, _ := suite.injectControllerWithSpacesAndCheck(c, getTwoSpaces(), expected) cons := constraints.Value{ Spaces: stringslicep("foo-1", "^bar-3"), } @@ -1253,13 +1269,6 @@ suite.assertAllocateContainerAddressesFails(c, controller, nil, "cannot find primary interface for container") } -func (suite *maas2EnvironSuite) TestAllocateContainerAddressesPrimaryInterfaceSubnetMissing(c *gc.C) { - controller := &fakeController{} - prepared := []network.InterfaceInfo{{InterfaceName: "eth0"}} - errorMatches := "primary NIC subnet not found" - suite.assertAllocateContainerAddressesFails(c, controller, prepared, errorMatches) -} - func makeFakeSubnet(id int) fakeSubnet { return fakeSubnet{ id: id, @@ -1344,12 +1353,48 @@ c.Assert(maasArgs, jc.DeepEquals, expected) } -func (suite *maas2EnvironSuite) TestAllocateContainerAddressesSecondNICSubnetMissing(c *gc.C) { +func (suite *maas2EnvironSuite) TestAllocateContainerAddressesSubnetMissing(c *gc.C) { subnet := makeFakeSubnet(3) var env *maasEnviron device := &fakeDevice{ - interfaceSet: []gomaasapi.Interface{&fakeInterface{}}, - systemID: "foo", + Stub: &testing.Stub{}, + interfaceSet: []gomaasapi.Interface{ + &fakeInterface{ + id: 93, + name: "eth0", + type_: "physical", + enabled: true, + macAddress: "53:54:00:70:9b:ff", + vlan: &fakeVLAN{vid: 0}, + links: []gomaasapi.Link{ + &fakeLink{ + id: 480, + mode: "link_up", + }, + }, + parents: []string{}, + children: []string{}, + Stub: &testing.Stub{}, + }, + }, + interface_: &fakeInterface{ + id: 94, + name: "eth1", + type_: "physical", + enabled: true, + macAddress: "53:54:00:70:9b:f1", + vlan: &fakeVLAN{vid: 0}, + links: []gomaasapi.Link{ + &fakeLink{ + id: 481, + mode: "link_up", + }, + }, + parents: []string{}, + children: []string{}, + Stub: &testing.Stub{}, + }, + systemID: "foo", } machine := &fakeMachine{ Stub: &testing.Stub{}, @@ -1357,6 +1402,7 @@ createDevice: device, } controller := &fakeController{ + Stub: &testing.Stub{}, machines: []gomaasapi.Machine{machine}, spaces: []gomaasapi.Space{ fakeSpace{ @@ -1365,16 +1411,40 @@ subnets: []gomaasapi.Subnet{subnet}, }, }, + devices: []gomaasapi.Device{device}, } suite.injectController(controller) env = suite.makeEnviron(c, nil) prepared := []network.InterfaceInfo{ - {InterfaceName: "eth0", CIDR: "10.20.19.0/24", MACAddress: "DEADBEEF"}, - {InterfaceName: "eth1", CIDR: "10.20.20.0/24", MACAddress: "DEADBEEE"}, + {InterfaceName: "eth0", CIDR: "", MACAddress: "DEADBEEF"}, + {InterfaceName: "eth1", CIDR: "", MACAddress: "DEADBEEE"}, } ignored := names.NewMachineTag("1/lxd/0") - _, err := env.AllocateContainerAddresses(instance.Id("1"), ignored, prepared) - c.Assert(err, gc.ErrorMatches, "NIC eth1 subnet 10.20.20.0/24 not found") + allocated, err := env.AllocateContainerAddresses(instance.Id("1"), ignored, prepared) + c.Assert(err, jc.ErrorIsNil) + c.Assert(allocated, jc.DeepEquals, []network.InterfaceInfo{{ + MACAddress: "53:54:00:70:9b:ff", + ProviderId: "93", + ProviderVLANId: "0", + VLANTag: 0, + InterfaceName: "eth0", + InterfaceType: "ethernet", + Disabled: false, + NoAutoStart: false, + ConfigType: "manual", + MTU: 1500, + }, { + MACAddress: "53:54:00:70:9b:f1", + ProviderId: "94", + ProviderVLANId: "0", + VLANTag: 0, + InterfaceName: "eth1", + InterfaceType: "ethernet", + Disabled: false, + NoAutoStart: false, + ConfigType: "manual", + MTU: 1500, + }}) } func (suite *maas2EnvironSuite) TestAllocateContainerAddressesCreateInterfaceError(c *gc.C) { @@ -1442,6 +1512,7 @@ createDevice: device, } controller := &fakeController{ + Stub: &testing.Stub{}, machines: []gomaasapi.Machine{machine}, spaces: []gomaasapi.Space{ fakeSpace{ @@ -1450,6 +1521,7 @@ subnets: []gomaasapi.Subnet{subnet, subnet2}, }, }, + devices: []gomaasapi.Device{device}, } suite.injectController(controller) env = suite.makeEnviron(c, nil) @@ -1458,8 +1530,34 @@ {InterfaceName: "eth1", CIDR: "10.20.20.0/24", MACAddress: "DEADBEEE"}, } ignored := names.NewMachineTag("1/lxd/0") - _, err := env.AllocateContainerAddresses(instance.Id("1"), ignored, prepared) - c.Assert(err, gc.ErrorMatches, "cannot link device interface to subnet: boom") + allocated, err := env.AllocateContainerAddresses(instance.Id("1"), ignored, prepared) + c.Assert(err, jc.ErrorIsNil) + c.Assert(allocated, jc.DeepEquals, []network.InterfaceInfo{{ + CIDR: "", + ProviderId: "0", + ProviderSubnetId: "", + ProviderVLANId: "0", + VLANTag: 0, + InterfaceName: "", + InterfaceType: "ethernet", + ConfigType: "", + MTU: 1500, + Disabled: true, + NoAutoStart: true, + }, { + CIDR: "", + ProviderId: "0", + ProviderSubnetId: "", + ProviderVLANId: "0", + VLANTag: 0, + InterfaceName: "", + InterfaceType: "ethernet", + ConfigType: "", + MTU: 1500, + Disabled: true, + NoAutoStart: true, + }}) + args := getArgs(c, interface_.Calls()) maasArgs, ok := args.(gomaasapi.LinkSubnetArgs) c.Assert(ok, jc.IsTrue) @@ -1502,16 +1600,15 @@ }) c.Assert(err, jc.ErrorIsNil) - machine.Stub.CheckCallNames(c, "Start") - controller.Stub.CheckCallNames(c, "GetFile", "AddFile") - addFileArgs, ok := controller.Stub.Calls()[1].Args[0].(gomaasapi.AddFileArgs) + machine.Stub.CheckCallNames(c, "Start", "SetOwnerData") + ownerData, ok := machine.Stub.Calls()[1].Args[0].(map[string]string) c.Assert(ok, jc.IsTrue) - - // Make it look like the right state was written to the file. - buffer := new(bytes.Buffer) - buffer.ReadFrom(addFileArgs.Reader) - file.contents = buffer.Bytes() - c.Check(string(buffer.Bytes()), gc.Equals, "state-instances:\n- gus\n") + c.Assert(ownerData, gc.DeepEquals, map[string]string{ + "claude": "rains", + tags.JujuController: suite.controllerUUID, + tags.JujuIsController: "true", + tags.JujuModel: env.Config().UUID(), + }) // Test the instance id is correctly recorded for the bootstrap node. // Check that ControllerInstances returns the id of the bootstrap machine. @@ -1533,9 +1630,9 @@ instance, hc := jujutesting.AssertStartInstance(c, env, suite.controllerUUID, "1") c.Check(instance, gc.NotNil) c.Assert(hc, gc.NotNil) - c.Check(hc.String(), gc.Equals, fmt.Sprintf("arch=%s cpu-cores=1 mem=1024M availability-zone=test_zone", arch.HostArch())) + c.Check(hc.String(), gc.Equals, fmt.Sprintf("arch=%s cores=1 mem=1024M availability-zone=test_zone", arch.HostArch())) - node1.Stub.CheckCallNames(c, "Start") + node1.Stub.CheckCallNames(c, "Start", "SetOwnerData") startArgs, ok := node1.Stub.Calls()[0].Args[0].(gomaasapi.StartArgs) c.Assert(ok, jc.IsTrue) @@ -1563,28 +1660,27 @@ _, err := env.ControllerInstances(suite.controllerUUID) c.Assert(err, gc.Equals, environs.ErrNotBootstrapped) - tests := [][]instance.Id{{}, {"inst-0"}, {"inst-0", "inst-1"}} - for _, expected := range tests { - state, err := goyaml.Marshal(&common.BootstrapState{StateInstances: expected}) - c.Assert(err, jc.ErrorIsNil) + controller.machinesArgsCheck = func(args gomaasapi.MachinesArgs) { + c.Assert(args, gc.DeepEquals, gomaasapi.MachinesArgs{ + OwnerData: map[string]string{ + tags.JujuIsController: "true", + tags.JujuController: suite.controllerUUID, + }, + }) + } - controller.files = []gomaasapi.File{&fakeFile{ - name: coretesting.ModelTag.Id() + "-provider-state", - contents: state, - }} + tests := [][]instance.Id{{"inst-0"}, {"inst-0", "inst-1"}} + for _, expected := range tests { + controller.machines = make([]gomaasapi.Machine, len(expected)) + for i := range expected { + controller.machines[i] = newFakeMachine(string(expected[i]), "", "") + } controllerInstances, err := env.ControllerInstances(suite.controllerUUID) c.Assert(err, jc.ErrorIsNil) c.Assert(controllerInstances, jc.SameContents, expected) } } -func (suite *maas2EnvironSuite) TestControllerInstancesFailsIfNoStateInstances(c *gc.C) { - env := suite.makeEnviron(c, - newFakeControllerWithErrors(gomaasapi.NewNoMatchError("state"))) - _, err := env.ControllerInstances(suite.controllerUUID) - c.Check(err, gc.Equals, environs.ErrNotBootstrapped) -} - func (suite *maas2EnvironSuite) TestDestroy(c *gc.C) { file1 := &fakeFile{name: coretesting.ModelTag.Id() + "-provider-state"} file2 := &fakeFile{name: coretesting.ModelTag.Id() + "-horace"} @@ -1608,19 +1704,16 @@ func (suite *maas2EnvironSuite) TestBootstrapFailsIfNoTools(c *gc.C) { env := suite.makeEnviron(c, newFakeController()) - // Disable auto-uploading by setting the agent version. - cfg, err := env.Config().Apply(map[string]interface{}{ - "agent-version": jujuversion.Current.String(), - }) - c.Assert(err, jc.ErrorIsNil) - err = env.SetConfig(cfg) - c.Assert(err, jc.ErrorIsNil) - err = bootstrap.Bootstrap(envjujutesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ + vers := version.MustParse("1.2.3") + err := bootstrap.Bootstrap(envjujutesting.BootstrapContext(c), env, bootstrap.BootstrapParams{ ControllerConfig: coretesting.FakeControllerConfig(), AdminSecret: jujutesting.AdminSecret, CAPrivateKey: coretesting.CAKey, + // Disable auto-uploading by setting the agent version + // to something that's not the current version. + AgentVersion: &vers, }) - c.Check(err, gc.ErrorMatches, "Juju cannot bootstrap because no tools are available for your model(.|\n)*") + c.Check(err, gc.ErrorMatches, "Juju cannot bootstrap because no agent binaries are available for your model(.|\n)*") } func (suite *maas2EnvironSuite) TestBootstrapFailsIfNoNodes(c *gc.C) { @@ -1676,13 +1769,13 @@ } func (suite *maas2EnvironSuite) TestReleaseContainerAddresses(c *gc.C) { - dev1 := newFakeDeviceWithMAC("eleven") - dev2 := newFakeDeviceWithMAC("will") + dev1 := newFakeDevice("a", "eleven") + dev2 := newFakeDevice("b", "will") controller := newFakeController() controller.devices = []gomaasapi.Device{dev1, dev2} env := suite.makeEnviron(c, controller) - err := env.ReleaseContainerAddresses([]network.InterfaceInfo{ + err := env.ReleaseContainerAddresses([]network.ProviderInterfaceInfo{ {MACAddress: "will"}, {MACAddress: "dustin"}, {MACAddress: "eleven"}, @@ -1698,22 +1791,42 @@ dev2.CheckCallNames(c, "Delete") } +func (suite *maas2EnvironSuite) TestReleaseContainerAddresses_HandlesDupes(c *gc.C) { + dev1 := newFakeDevice("a", "eleven") + controller := newFakeController() + controller.devices = []gomaasapi.Device{dev1, dev1} + + env := suite.makeEnviron(c, controller) + err := env.ReleaseContainerAddresses([]network.ProviderInterfaceInfo{ + {MACAddress: "will"}, + {MACAddress: "eleven"}, + }) + c.Assert(err, jc.ErrorIsNil) + + args, ok := getArgs(c, controller.Calls()).(gomaasapi.DevicesArgs) + c.Assert(ok, jc.IsTrue) + expected := gomaasapi.DevicesArgs{MACAddresses: []string{"will", "eleven"}} + c.Assert(args, gc.DeepEquals, expected) + + dev1.CheckCallNames(c, "Delete") +} + func (suite *maas2EnvironSuite) TestReleaseContainerAddressesErrorGettingDevices(c *gc.C) { controller := newFakeControllerWithErrors(errors.New("Everything done broke")) env := suite.makeEnviron(c, controller) - err := env.ReleaseContainerAddresses([]network.InterfaceInfo{{MACAddress: "anything"}}) + err := env.ReleaseContainerAddresses([]network.ProviderInterfaceInfo{{MACAddress: "anything"}}) c.Assert(err, gc.ErrorMatches, "Everything done broke") } func (suite *maas2EnvironSuite) TestReleaseContainerAddressesErrorDeletingDevice(c *gc.C) { - dev1 := newFakeDeviceWithMAC("eleven") + dev1 := newFakeDevice("a", "eleven") dev1.systemID = "hopper" dev1.SetErrors(errors.New("don't delete me")) controller := newFakeController() controller.devices = []gomaasapi.Device{dev1} env := suite.makeEnviron(c, controller) - err := env.ReleaseContainerAddresses([]network.InterfaceInfo{ + err := env.ReleaseContainerAddresses([]network.ProviderInterfaceInfo{ {MACAddress: "eleven"}, }) c.Assert(err, gc.ErrorMatches, "deleting device hopper: don't delete me") @@ -1724,9 +1837,10 @@ dev1.CheckCallNames(c, "Delete") } -func newFakeDeviceWithMAC(macAddress string) *fakeDevice { +func newFakeDevice(systemID, macAddress string) *fakeDevice { return &fakeDevice{ - Stub: &testing.Stub{}, + Stub: &testing.Stub{}, + systemID: systemID, interface_: &fakeInterface{ Stub: &testing.Stub{}, macAddress: macAddress, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/maas2instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/maas2instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/maas2instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/maas2instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -65,7 +65,10 @@ // Status returns a juju status based on the maas instance returned // status message. func (mi *maas2Instance) Status() instance.InstanceStatus { - // TODO (babbageclunk): this should rerequest to get live status. + // A fresh status is not obtained here because the interface it is intended + // to satisfy gets a new maas2Instance before each call, using a fresh status + // would cause us to mask errors since this interface does not contemplate + // returing them. statusName := mi.machine.StatusName() statusMsg := mi.machine.StatusMessage() return convertInstanceStatus(statusName, statusMsg, mi.Id()) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/maas2instance_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/maas2instance_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/maas2instance_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/maas2instance_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,21 +19,24 @@ var _ = gc.Suite(&maas2InstanceSuite{}) func (s *maas2InstanceSuite) TestString(c *gc.C) { - instance := &maas2Instance{machine: &fakeMachine{hostname: "peewee", systemID: "herman"}} + machine := &fakeMachine{hostname: "peewee", systemID: "herman"} + instance := &maas2Instance{machine: machine} c.Assert(instance.String(), gc.Equals, "peewee:herman") } func (s *maas2InstanceSuite) TestID(c *gc.C) { - thing := &maas2Instance{machine: &fakeMachine{systemID: "herman"}} + machine := &fakeMachine{systemID: "herman"} + thing := &maas2Instance{machine: machine} c.Assert(thing.Id(), gc.Equals, instance.Id("herman")) } func (s *maas2InstanceSuite) TestAddresses(c *gc.C) { - instance := &maas2Instance{machine: &fakeMachine{ipAddresses: []string{ + machine := &fakeMachine{ipAddresses: []string{ "0.0.0.0", "1.2.3.4", "127.0.0.1", - }}} + }} + instance := &maas2Instance{machine: machine} expectedAddresses := []network.Address{ network.NewAddress("0.0.0.0"), network.NewAddress("1.2.3.4"), @@ -45,26 +48,30 @@ } func (s *maas2InstanceSuite) TestZone(c *gc.C) { - instance := &maas2Instance{machine: &fakeMachine{zoneName: "inflatable"}} + machine := &fakeMachine{zoneName: "inflatable"} + instance := &maas2Instance{machine: machine} zone, err := instance.zone() c.Assert(err, jc.ErrorIsNil) c.Assert(zone, gc.Equals, "inflatable") } func (s *maas2InstanceSuite) TestStatusSuccess(c *gc.C) { - thing := &maas2Instance{machine: &fakeMachine{statusMessage: "Wexler", statusName: "Deploying"}} + machine := &fakeMachine{statusMessage: "Wexler", statusName: "Deploying"} + thing := &maas2Instance{machine: machine} result := thing.Status() - c.Assert(result, jc.DeepEquals, instance.InstanceStatus{status.StatusAllocating, "Deploying: Wexler"}) + c.Assert(result, jc.DeepEquals, instance.InstanceStatus{status.Allocating, "Deploying: Wexler"}) } func (s *maas2InstanceSuite) TestStatusError(c *gc.C) { - thing := &maas2Instance{machine: &fakeMachine{statusMessage: "", statusName: ""}} + machine := &fakeMachine{statusMessage: "", statusName: ""} + thing := &maas2Instance{machine: machine} result := thing.Status() c.Assert(result, jc.DeepEquals, instance.InstanceStatus{"", "error in getting status"}) } func (s *maas2InstanceSuite) TestHostname(c *gc.C) { - thing := &maas2Instance{machine: &fakeMachine{hostname: "saul-goodman"}} + machine := &fakeMachine{hostname: "saul-goodman"} + thing := &maas2Instance{machine: machine} result, err := thing.hostname() c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.Equals, "saul-goodman") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/maas2_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/maas2_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/maas2_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/maas2_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -184,6 +184,14 @@ return c.NextErr() } +type fakeMachineOnlyController struct { + machines []gomaasapi.Machine +} + +func (f *fakeMachineOnlyController) Machines(gomaasapi.MachinesArgs) ([]gomaasapi.Machine, error) { + return f.machines, nil +} + type fakeBootResource struct { gomaasapi.BootResource name string @@ -229,6 +237,11 @@ return m.tags } +func (m *fakeMachine) SetOwnerData(data map[string]string) error { + m.MethodCall(m, "SetOwnerData", data) + return m.NextErr() +} + func (m *fakeMachine) CPUCount() int { return m.cpuCount } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/maas_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/maas_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/maas_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/maas_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -95,10 +95,10 @@ func (s *providerSuite) SetUpTest(c *gc.C) { s.baseProviderSuite.SetUpTest(c) - mockCapabilities := func(client *gomaasapi.MAASObject) (set.Strings, error) { + mockCapabilities := func(*gomaasapi.MAASObject, string) (set.Strings, error) { return set.NewStrings("network-deployment-ubuntu"), nil } - mockGetController := func(maasServer, apiKey string) (gomaasapi.Controller, error) { + mockGetController := func(string, string) (gomaasapi.Controller, error) { return nil, gomaasapi.NewUnsupportedVersionError("oops") } s.PatchValue(&GetCapabilities, mockCapabilities) @@ -120,6 +120,9 @@ var maasEnvAttrs = coretesting.Attrs{ "name": "test-env", "type": "maas", + config.ResourceTagsKey: map[string]string{ + "claude": "rains", + }, } // makeEnviron creates a functional maasEnviron for a test. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/userdata_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/userdata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/userdata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/userdata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -52,7 +52,7 @@ func (s *RenderersSuite) TestMAASUnknownOS(c *gc.C) { renderer := maas.MAASRenderer{} cloudcfg := &cloudinittest.CloudConfig{} - result, err := renderer.Render(cloudcfg, os.Arch) + result, err := renderer.Render(cloudcfg, os.GenericLinux) c.Assert(result, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "Cannot encode userdata for OS: Arch") + c.Assert(err, gc.ErrorMatches, "Cannot encode userdata for OS: GenericLinux") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/volumes.go juju-core-2.0.0/src/github.com/juju/juju/provider/maas/volumes.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/maas/volumes.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/maas/volumes.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,8 +34,8 @@ ) // StorageProviderTypes implements storage.ProviderRegistry. -func (*maasEnviron) StorageProviderTypes() []storage.ProviderType { - return []storage.ProviderType{maasStorageProviderType} +func (*maasEnviron) StorageProviderTypes() ([]storage.ProviderType, error) { + return []storage.ProviderType{maasStorageProviderType}, nil } // StorageProvider implements storage.ProviderRegistry. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/config.go juju-core-2.0.0/src/github.com/juju/juju/provider/manual/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/manual/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,12 +10,8 @@ ) var ( - configFields = schema.Fields{ - "bootstrap-user": schema.String(), - } - configDefaults = schema.Defaults{ - "bootstrap-user": "", - } + configFields = schema.Fields{} + configDefaults = schema.Defaults{} ) type environConfig struct { @@ -26,7 +22,3 @@ func newModelConfig(config *config.Config, attrs map[string]interface{}) *environConfig { return &environConfig{Config: config, attrs: attrs} } - -func (c *environConfig) bootstrapUser() string { - return c.attrs["bootstrap-user"].(string) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/config_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/manual/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/manual/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,6 @@ package manual import ( - "fmt" - "regexp" - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -34,9 +31,8 @@ "name": "test", "type": "manual", "uuid": coretesting.ModelTag.Id(), - "controller-uuid": coretesting.ModelTag.Id(), + "controller-uuid": coretesting.ControllerTag.Id(), "firewall-mode": "instance", - "bootstrap-user": "", // While the ca-cert bits aren't entirely minimal, they avoid the need // to set up a fake home. "ca-cert": coretesting.CACert, @@ -58,35 +54,3 @@ c.Assert(err, jc.ErrorIsNil) return envConfig } - -func (s *configSuite) TestConfigMutability(c *gc.C) { - testConfig := MinimalConfig(c) - valid, err := manualProvider{}.Validate(testConfig, nil) - c.Assert(err, jc.ErrorIsNil) - unknownAttrs := valid.UnknownAttrs() - - // Make sure the immutable values can't be changed. It'd be nice to be - // able to change these, but that would involve somehow updating the - // machine agent's config/upstart config. - oldConfig := testConfig - for k, v := range map[string]interface{}{ - "bootstrap-user": "new-username", - } { - testConfig = MinimalConfig(c) - testConfig, err = testConfig.Apply(map[string]interface{}{k: v}) - c.Assert(err, jc.ErrorIsNil) - _, err := manualProvider{}.Validate(testConfig, oldConfig) - oldv := unknownAttrs[k] - errmsg := fmt.Sprintf("cannot change %s from %q to %q", k, oldv, v) - c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(errmsg)) - } -} - -func (s *configSuite) TestBootstrapUser(c *gc.C) { - values := MinimalConfigValues() - testConfig := getModelConfig(c, values) - c.Assert(testConfig.bootstrapUser(), gc.Equals, "") - values["bootstrap-user"] = "ubuntu" - testConfig = getModelConfig(c, values) - c.Assert(testConfig.bootstrapUser(), gc.Equals, "ubuntu") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/credentials.go juju-core-2.0.0/src/github.com/juju/juju/provider/manual/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/manual/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,7 @@ import ( "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" ) type environProviderCredentials struct{} @@ -18,3 +19,8 @@ func (environProviderCredentials) DetectCredentials() (*cloud.CloudCredential, error) { return cloud.NewEmptyCloudCredential(), nil } + +// FinalizeCredential is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) FinalizeCredential(_ environs.FinalizeCredentialContext, args environs.FinalizeCredentialParams) (*cloud.Credential, error) { + return &args.Credential, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/environ.go juju-core-2.0.0/src/github.com/juju/juju/provider/manual/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/manual/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,6 +15,8 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils" + "github.com/juju/utils/arch" + "github.com/juju/utils/featureflag" "github.com/juju/utils/ssh" "github.com/juju/juju/agent" @@ -23,6 +25,7 @@ "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/environs/manual" + "github.com/juju/juju/feature" "github.com/juju/juju/instance" "github.com/juju/juju/juju/names" "github.com/juju/juju/mongo" @@ -38,15 +41,19 @@ ) var ( - logger = loggo.GetLogger("juju.provider.manual") - manualCheckProvisioned = manual.CheckProvisioned - manualDetectSeriesAndHardwareCharacteristics = manual.DetectSeriesAndHardwareCharacteristics + logger = loggo.GetLogger("juju.provider.manual") + manualCheckProvisioned = manual.CheckProvisioned ) type manualEnviron struct { - host string - cfgmutex sync.Mutex - cfg *environConfig + host string + user string + mu sync.Mutex + cfg *environConfig + // hw and series are detected by running a script on the + // target machine. We cache these, as they should not change. + hw *instance.HardwareCharacteristics + series string } var errNoStartInstance = errors.New("manual provider cannot start instances") @@ -70,9 +77,9 @@ } func (e *manualEnviron) envConfig() (cfg *environConfig) { - e.cfgmutex.Lock() + e.mu.Lock() cfg = e.cfg - e.cfgmutex.Unlock() + e.mu.Unlock() return cfg } @@ -82,7 +89,7 @@ // PrepareForBootstrap is part of the Environ interface. func (e *manualEnviron) PrepareForBootstrap(ctx environs.BootstrapContext) error { - if err := ensureBootstrapUbuntuUser(ctx, e.host, e.envConfig()); err != nil { + if err := ensureBootstrapUbuntuUser(ctx, e.host, e.user, e.envConfig()); err != nil { return err } return nil @@ -102,13 +109,13 @@ if provisioned { return nil, manual.ErrProvisioned } - hc, series, err := manualDetectSeriesAndHardwareCharacteristics(e.host) + hw, series, err := e.seriesAndHardwareCharacteristics() if err != nil { return nil, err } finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig, _ environs.BootstrapDialOpts) error { icfg.Bootstrap.BootstrapMachineInstanceId = BootstrapInstanceId - icfg.Bootstrap.BootstrapMachineHardwareCharacteristics = &hc + icfg.Bootstrap.BootstrapMachineHardwareCharacteristics = hw if err := instancecfg.FinishInstanceConfig(icfg, e.Config()); err != nil { return err } @@ -116,17 +123,21 @@ } result := &environs.BootstrapResult{ - Arch: *hc.Arch, + Arch: *hw.Arch, Series: series, Finalize: finalize, } return result, nil } +// BootstrapMessage is part of the Environ interface. +func (e *manualEnviron) BootstrapMessage() string { + return "" +} + // ControllerInstances is specified in the Environ interface. func (e *manualEnviron) ControllerInstances(controllerUUID string) ([]instance.Id, error) { - arg0 := filepath.Base(os.Args[0]) - if arg0 != names.Jujud { + if !isRunningController() { // Not running inside the controller, so we must // verify the host. if err := e.verifyBootstrapHost(); err != nil { @@ -148,7 +159,7 @@ utils.ShQuote(agentsDir), noAgentDir, ) - out, err := runSSHCommand( + out, _, err := runSSHCommand( "ubuntu@"+e.host, []string{"/bin/bash"}, stdin, @@ -168,8 +179,8 @@ } func (e *manualEnviron) SetConfig(cfg *config.Config) error { - e.cfgmutex.Lock() - defer e.cfgmutex.Unlock() + e.mu.Lock() + defer e.mu.Unlock() _, err := manualProvider{}.validate(cfg, e.cfg.Config) if err != nil { return err @@ -200,7 +211,7 @@ return instances, err } -var runSSHCommand = func(host string, command []string, stdin string) (stdout string, err error) { +var runSSHCommand = func(host string, command []string, stdin string) (stdout, stderr string, err error) { cmd := ssh.Command(host, command, nil) cmd.Stdin = strings.NewReader(stdin) var stdoutBuf, stderrBuf bytes.Buffer @@ -210,21 +221,56 @@ if stderr := strings.TrimSpace(stderrBuf.String()); len(stderr) > 0 { err = errors.Annotate(err, stderr) } - return "", err + return "", "", err } - return stdoutBuf.String(), nil + return stdoutBuf.String(), stderrBuf.String(), nil } +// Destroy implements the Environ interface. func (e *manualEnviron) Destroy() error { + // There is nothing we can do for manual environments, + // except when destroying the controller as a whole + // (see DestroyController below). + return nil +} + +// DestroyController implements the Environ interface. +func (e *manualEnviron) DestroyController(controllerUUID string) error { script := ` set -x touch %s -pkill -%d jujud && exit -stop %s +# If jujud is running, we then wait for a while for it to stop. +stopped=0 +if pkill -%d jujud; then + for i in ` + "`seq 1 30`" + `; do + if pgrep jujud > /dev/null ; then + sleep 1 + else + echo "jujud stopped" + stopped=1 + break + fi + done +fi +if [ $stopped -ne 1 ]; then + # If jujud didn't stop nicely, we kill it hard here. + %spkill -9 jujud + service %s stop +fi rm -f /etc/init/juju* +rm -f /etc/systemd/system/juju* rm -fr %s %s exit 0 ` + var diagnostics string + if featureflag.Enabled(feature.DeveloperMode) { + diagnostics = ` + echo "Dump engine report and goroutines for stuck jujud" + source /etc/profile.d/juju-introspection.sh + juju-engine-report + juju-goroutines +` + } script = fmt.Sprintf( script, // WARNING: this is linked with the use of uninstallFile in @@ -235,22 +281,21 @@ agent.UninstallFile, )), terminationworker.TerminationSignal, + diagnostics, mongo.ServiceName, utils.ShQuote(agent.DefaultPaths.DataDir), utils.ShQuote(agent.DefaultPaths.LogDir), ) - _, err := runSSHCommand( + logger.Tracef("destroy controller script: %s", script) + stdout, stderr, err := runSSHCommand( "ubuntu@"+e.host, []string{"sudo", "/bin/bash"}, script, ) + logger.Debugf("script stdout: \n%s", stdout) + logger.Debugf("script stderr: \n%s", stderr) return err } -// DestroyController implements the Environ interface. -func (e *manualEnviron) DestroyController(controllerUUID string) error { - return e.Destroy() -} - func (*manualEnviron) PrecheckInstance(series string, _ constraints.Value, placement string) error { return errors.New(`use "juju add-machine ssh:[user@]" to provision machines`) } @@ -266,9 +311,34 @@ func (e *manualEnviron) ConstraintsValidator() (constraints.Validator, error) { validator := constraints.NewValidator() validator.RegisterUnsupported(unsupportedConstraints) + if isRunningController() { + validator.UpdateVocabulary(constraints.Arch, []string{arch.HostArch()}) + } else { + // We're running outside of the Juju controller, so we must + // SSH to the machine and detect its architecture. + hw, _, err := e.seriesAndHardwareCharacteristics() + if err != nil { + return nil, errors.Trace(err) + } + validator.UpdateVocabulary(constraints.Arch, []string{*hw.Arch}) + } return validator, nil } +func (e *manualEnviron) seriesAndHardwareCharacteristics() (_ *instance.HardwareCharacteristics, series string, _ error) { + e.mu.Lock() + defer e.mu.Unlock() + if e.hw != nil { + return e.hw, e.series, nil + } + hw, series, err := manual.DetectSeriesAndHardwareCharacteristics(e.host) + if err != nil { + return nil, "", errors.Trace(err) + } + e.hw, e.series = &hw, series + return e.hw, e.series, nil +} + func (e *manualEnviron) OpenPorts(ports []network.PortRange) error { return nil } @@ -284,3 +354,7 @@ func (*manualEnviron) Provider() environs.EnvironProvider { return manualProvider{} } + +func isRunningController() bool { + return filepath.Base(os.Args[0]) == names.Jujud +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/environ_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/manual/environ_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/environ_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/manual/environ_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,10 +9,12 @@ "github.com/juju/errors" "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" gc "gopkg.in/check.v1" "github.com/juju/juju/constraints" "github.com/juju/juju/environs" + "github.com/juju/juju/environs/manual" "github.com/juju/juju/instance" coretesting "github.com/juju/juju/testing" ) @@ -73,26 +75,43 @@ c.Assert(instances[0], gc.IsNil) } -func (s *environSuite) TestDestroy(c *gc.C) { - var resultStderr string +func (s *environSuite) TestDestroyController(c *gc.C) { + var resultStdout string var resultErr error - runSSHCommandTesting := func(host string, command []string, stdin string) (string, error) { + runSSHCommandTesting := func(host string, command []string, stdin string) (string, string, error) { c.Assert(host, gc.Equals, "ubuntu@hostname") c.Assert(command, gc.DeepEquals, []string{"sudo", "/bin/bash"}) - c.Assert(stdin, jc.DeepEquals, ` + c.Assert(stdin, gc.Equals, ` set -x touch '/var/lib/juju/uninstall-agent' -pkill -6 jujud && exit -stop juju-db +# If jujud is running, we then wait for a while for it to stop. +stopped=0 +if pkill -6 jujud; then + for i in `+"`seq 1 30`"+`; do + if pgrep jujud > /dev/null ; then + sleep 1 + else + echo "jujud stopped" + stopped=1 + break + fi + done +fi +if [ $stopped -ne 1 ]; then + # If jujud didn't stop nicely, we kill it hard here. + pkill -9 jujud + service juju-db stop +fi rm -f /etc/init/juju* +rm -f /etc/systemd/system/juju* rm -fr '/var/lib/juju' '/var/log/juju' exit 0 `) - return resultStderr, resultErr + return resultStdout, "", resultErr } s.PatchValue(&runSSHCommand, runSSHCommandTesting) type test struct { - stderr string + stdout string err error match string } @@ -103,8 +122,8 @@ } for i, t := range tests { c.Logf("test %d: %v", i, t) - resultStderr, resultErr = t.stderr, t.err - err := s.env.Destroy() + resultStdout, resultErr = t.stdout, t.err + err := s.env.DestroyController("controller-uuid") if t.match == "" { c.Assert(err, jc.ErrorIsNil) } else { @@ -119,14 +138,36 @@ } func (s *environSuite) TestConstraintsValidator(c *gc.C) { + s.PatchValue(&manual.DetectSeriesAndHardwareCharacteristics, + func(string) (instance.HardwareCharacteristics, string, error) { + amd64 := "amd64" + return instance.HardwareCharacteristics{ + Arch: &amd64, + }, "", nil + }, + ) + validator, err := s.env.ConstraintsValidator() c.Assert(err, jc.ErrorIsNil) - cons := constraints.MustParse("arch=amd64 instance-type=foo tags=bar cpu-power=10 cpu-cores=2 mem=1G virt-type=kvm") + cons := constraints.MustParse("arch=amd64 instance-type=foo tags=bar cpu-power=10 cores=2 mem=1G virt-type=kvm") unsupported, err := validator.Validate(cons) c.Assert(err, jc.ErrorIsNil) c.Assert(unsupported, jc.SameContents, []string{"cpu-power", "instance-type", "tags", "virt-type"}) } +func (s *environSuite) TestConstraintsValidatorInsideController(c *gc.C) { + // Patch os.Args so it appears that we're running in "jujud", and then + // patch the host arch so it looks like we're running arm64. + s.PatchValue(&os.Args, []string{"/some/where/containing/jujud", "whatever"}) + s.PatchValue(&arch.HostArch, func() string { return arch.ARM64 }) + + validator, err := s.env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + cons := constraints.MustParse("arch=arm64") + _, err = validator.Validate(cons) + c.Assert(err, jc.ErrorIsNil) +} + type controllerInstancesSuite struct { baseEnvironSuite } @@ -136,8 +177,8 @@ func (s *controllerInstancesSuite) TestControllerInstances(c *gc.C) { var outputResult string var errResult error - runSSHCommandTesting := func(host string, command []string, stdin string) (string, error) { - return outputResult, errResult + runSSHCommandTesting := func(host string, command []string, stdin string) (string, string, error) { + return outputResult, "", errResult } s.PatchValue(&runSSHCommand, runSSHCommandTesting) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/manual/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/manual/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,7 +22,7 @@ // We asume that if we are deploying in manual provider the // underlying machine is clearly running. return instance.InstanceStatus{ - Status: status.StatusRunning, + Status: status.Running, } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/provider.go juju-core-2.0.0/src/github.com/juju/juju/provider/manual/provider.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/provider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/manual/provider.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,7 @@ import ( "fmt" + "strings" "github.com/juju/errors" @@ -23,8 +24,8 @@ var initUbuntuUser = manual.InitUbuntuUser -func ensureBootstrapUbuntuUser(ctx environs.BootstrapContext, host string, cfg *environConfig) error { - err := initUbuntuUser(host, cfg.bootstrapUser(), cfg.AuthorizedKeys(), ctx.GetStdin(), ctx.GetStdout()) +func ensureBootstrapUbuntuUser(ctx environs.BootstrapContext, host, user string, cfg *environConfig) error { + err := initUbuntuUser(host, user, cfg.AuthorizedKeys(), ctx.GetStdin(), ctx.GetStdout()) if err != nil { logger.Errorf("initializing ubuntu user: %v", err) return err @@ -33,11 +34,6 @@ return nil } -// RestrictedConfigAttributes is specified in the EnvironProvider interface. -func (p manualProvider) RestrictedConfigAttributes() []string { - return []string{"bootstrap-user"} -} - // DetectRegions is specified in the environs.CloudRegionDetector interface. func (p manualProvider) DetectRegions() ([]cloud.Region, error) { return nil, errors.NotFoundf("regions") @@ -45,11 +41,8 @@ // PrepareConfig is specified in the EnvironProvider interface. func (p manualProvider) PrepareConfig(args environs.PrepareConfigParams) (*config.Config, error) { - if args.Cloud.Endpoint == "" { - return nil, errors.Errorf( - "missing address of host to bootstrap: " + - `please specify "juju bootstrap manual/"`, - ) + if err := validateCloudSpec(args.Cloud); err != nil { + return nil, errors.Trace(err) } envConfig, err := p.validate(args.Config, nil) if err != nil { @@ -70,21 +63,25 @@ // with their defaults in the result; we don't wnat that in // Open. envConfig := newModelConfig(args.Config, args.Config.UnknownAttrs()) - return p.open(args.Cloud.Endpoint, envConfig) + host, user := args.Cloud.Endpoint, "" + if i := strings.IndexRune(host, '@'); i >= 0 { + user, host = host[:i], host[i+1:] + } + return p.open(host, user, envConfig) } func validateCloudSpec(spec environs.CloudSpec) error { if spec.Endpoint == "" { return errors.Errorf( "missing address of host to bootstrap: " + - `please specify "juju bootstrap manual/"`, + `please specify "juju bootstrap manual/[user@]"`, ) } return nil } -func (p manualProvider) open(host string, cfg *environConfig) (environs.Environ, error) { - env := &manualEnviron{host: host, cfg: cfg} +func (p manualProvider) open(host, user string, cfg *environConfig) (environs.Environ, error) { + env := &manualEnviron{host: host, user: user, cfg: cfg} // Need to call SetConfig to initialise storage. if err := env.SetConfig(cfg.Config); err != nil { return nil, err @@ -109,20 +106,6 @@ return nil, err } envConfig := newModelConfig(cfg, validated) - // Check various immutable attributes. - if old != nil { - oldEnvConfig, err := p.validate(old, nil) - if err != nil { - return nil, err - } - for _, key := range [...]string{ - "bootstrap-user", - } { - if err = checkImmutableString(envConfig, oldEnvConfig, key); err != nil { - return nil, err - } - } - } // If the user hasn't already specified a value, set it to the // given value. @@ -148,8 +131,3 @@ } return cfg.Apply(envConfig.attrs) } - -func (p manualProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - attrs := make(map[string]string) - return attrs, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/provider_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/manual/provider_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/provider_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/manual/provider_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -39,10 +39,16 @@ s.CheckCall(c, 0, "InitUbuntuUser", "endpoint", "", "", ctx.GetStdin(), ctx.GetStdout()) } +func (s *providerSuite) TestPrepareForBootstrapUserHost(c *gc.C) { + ctx, err := s.testPrepareForBootstrap(c, "user@host", "") + c.Assert(err, jc.ErrorIsNil) + s.CheckCall(c, 0, "InitUbuntuUser", "host", "user", "", ctx.GetStdin(), ctx.GetStdout()) +} + func (s *providerSuite) TestPrepareForBootstrapNoCloudEndpoint(c *gc.C) { _, err := s.testPrepareForBootstrap(c, "", "region") c.Assert(err, gc.ErrorMatches, - `missing address of host to bootstrap: please specify "juju bootstrap manual/"`) + `missing address of host to bootstrap: please specify "juju bootstrap manual/\[user@\]"`) } func (s *providerSuite) testPrepareForBootstrap(c *gc.C, endpoint, region string) (environs.BootstrapContext, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/storage.go juju-core-2.0.0/src/github.com/juju/juju/provider/manual/storage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/manual/storage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/manual/storage.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,8 +10,8 @@ ) // StorageProviderTypes implements storage.ProviderRegistry. -func (*manualEnviron) StorageProviderTypes() []storage.ProviderType { - return nil +func (*manualEnviron) StorageProviderTypes() ([]storage.ProviderType, error) { + return nil, nil } // StorageProvider implements storage.ProviderRegistry. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/cinder.go juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/cinder.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/cinder.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/cinder.go 2016-10-13 14:31:49.000000000 +0000 @@ -33,8 +33,14 @@ ) // StorageProviderTypes implements storage.ProviderRegistry. -func (*Environ) StorageProviderTypes() []storage.ProviderType { - return []storage.ProviderType{CinderProviderType} +func (env *Environ) StorageProviderTypes() ([]storage.ProviderType, error) { + var types []storage.ProviderType + if _, err := env.cinderProvider(); err == nil { + types = append(types, CinderProviderType) + } else if !errors.IsNotSupported(err) { + return nil, errors.Trace(err) + } + return types, nil } // StorageProvider implements storage.ProviderRegistry. @@ -46,35 +52,37 @@ } func (env *Environ) cinderProvider() (*cinderProvider, error) { - env.ecfgMutex.Lock() - envName := env.ecfgUnlocked.Config.Name() - modelUUID := env.ecfgUnlocked.Config.UUID() - env.ecfgMutex.Unlock() - storageAdapter, err := newOpenstackStorage(env) if err != nil { return nil, errors.Trace(err) } - return &cinderProvider{storageAdapter, envName, modelUUID}, nil + return &cinderProvider{ + storageAdapter: storageAdapter, + envName: env.name, + modelUUID: env.uuid, + namespace: env.namespace, + }, nil } var newOpenstackStorage = func(env *Environ) (OpenstackStorage, error) { env.ecfgMutex.Lock() - authClient := env.client - envNovaClient := env.novaUnlocked - env.ecfgMutex.Unlock() + defer env.ecfgMutex.Unlock() - endpointUrl, err := getVolumeEndpointURL(authClient, env.cloud.Region) - if err != nil { + if env.volumeURL == nil { + url, err := getVolumeEndpointURL(env.client, env.cloud.Region) if errors.IsNotFound(err) { - return nil, errors.NewNotSupported(err, "volumes not supported") + // No volume endpoint found; Cinder is not supported. + return nil, errors.NotSupportedf("volumes") + } else if err != nil { + return nil, errors.Trace(err) } - return nil, errors.Annotate(err, "getting volume endpoint") + env.volumeURL = url + logger.Debugf("volume URL: %v", url) } return &openstackStorageAdapter{ - cinderClient{cinder.Basic(endpointUrl, authClient.TenantId(), authClient.Token)}, - novaClient{envNovaClient}, + cinderClient{cinder.Basic(env.volumeURL, env.client.TenantId(), env.client.Token)}, + novaClient{env.novaUnlocked}, }, nil } @@ -82,6 +90,7 @@ storageAdapter OpenstackStorage envName string modelUUID string + namespace instance.Namespace } var _ storage.Provider = (*cinderProvider)(nil) @@ -100,6 +109,7 @@ storageAdapter: p.storageAdapter, envName: p.envName, modelUUID: p.modelUUID, + namespace: p.namespace, } return source, nil } @@ -144,6 +154,7 @@ storageAdapter OpenstackStorage envName string // non unique, informational only modelUUID string + namespace instance.Namespace } var _ storage.VolumeSource = (*cinderVolumeSource)(nil) @@ -171,7 +182,7 @@ // The Cinder documentation incorrectly states the // size parameter is in GB. It is actually GiB. Size: int(math.Ceil(float64(arg.Size / 1024))), - Name: resourceName(arg.Tag, s.envName), + Name: resourceName(s.namespace, s.envName, arg.Tag.String()), // TODO(axw) use the AZ of the initially attached machine. AvailabilityZone: "", Metadata: metadata, @@ -469,10 +480,17 @@ } type endpointResolver interface { + Authenticate() error + IsAuthenticated() bool EndpointsForRegion(region string) identity.ServiceURLs } func getVolumeEndpointURL(client endpointResolver, region string) (*url.URL, error) { + if !client.IsAuthenticated() { + if err := authenticateClient(client); err != nil { + return nil, errors.Trace(err) + } + } endpointMap := client.EndpointsForRegion(region) // The cinder openstack charm appends 'v2' to the type for the v2 api. endpoint, ok := endpointMap["volumev2"] diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/cinder_internal_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/cinder_internal_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/cinder_internal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/cinder_internal_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,64 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package openstack + +import ( + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/goose.v1/client" + "gopkg.in/goose.v1/identity" + + "github.com/juju/juju/environs" +) + +// TODO(axw) 2016-10-03 #1629721 +// Change this to an external test, which will +// require refactoring the provider code to make +// it more easily testable. + +type cinderInternalSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&cinderInternalSuite{}) + +func (s *cinderInternalSuite) TestStorageProviderTypes(c *gc.C) { + env := &Environ{ + cloud: environs.CloudSpec{ + Region: "foo", + }, + client: &testAuthClient{ + regionEndpoints: map[string]identity.ServiceURLs{ + "foo": {"volumev2": "https://bar.invalid"}, + }, + }} + types, err := env.StorageProviderTypes() + c.Assert(err, jc.ErrorIsNil) + c.Assert(types, gc.HasLen, 1) +} + +func (s *cinderInternalSuite) TestStorageProviderTypesNotSupported(c *gc.C) { + env := &Environ{client: &testAuthClient{}} + types, err := env.StorageProviderTypes() + c.Assert(err, jc.ErrorIsNil) + c.Assert(types, gc.HasLen, 0) +} + +type testAuthClient struct { + client.AuthenticatingClient + regionEndpoints map[string]identity.ServiceURLs +} + +func (r *testAuthClient) IsAuthenticated() bool { + return true +} + +func (r *testAuthClient) TenantId() string { + return "tenant-id" +} + +func (r *testAuthClient) EndpointsForRegion(region string) identity.ServiceURLs { + return r.regionEndpoints[region] +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/cinder_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/cinder_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/cinder_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/cinder_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -504,15 +504,28 @@ } type testEndpointResolver struct { + authenticated bool regionEndpoints map[string]identity.ServiceURLs } -func (r testEndpointResolver) EndpointsForRegion(region string) identity.ServiceURLs { +func (r *testEndpointResolver) IsAuthenticated() bool { + return r.authenticated +} + +func (r *testEndpointResolver) Authenticate() error { + r.authenticated = true + return nil +} + +func (r *testEndpointResolver) EndpointsForRegion(region string) identity.ServiceURLs { + if !r.authenticated { + return identity.ServiceURLs{} + } return r.regionEndpoints[region] } func (s *cinderVolumeSourceSuite) TestGetVolumeEndpointVolume(c *gc.C) { - client := testEndpointResolver{regionEndpoints: map[string]identity.ServiceURLs{ + client := &testEndpointResolver{regionEndpoints: map[string]identity.ServiceURLs{ "west": map[string]string{"volume": "http://cinder.testing/v1"}, }} url, err := openstack.GetVolumeEndpointURL(client, "west") @@ -521,7 +534,7 @@ } func (s *cinderVolumeSourceSuite) TestGetVolumeEndpointVolumeV2(c *gc.C) { - client := testEndpointResolver{regionEndpoints: map[string]identity.ServiceURLs{ + client := &testEndpointResolver{regionEndpoints: map[string]identity.ServiceURLs{ "west": map[string]string{"volumev2": "http://cinder.testing/v2"}, }} url, err := openstack.GetVolumeEndpointURL(client, "west") @@ -530,7 +543,7 @@ } func (s *cinderVolumeSourceSuite) TestGetVolumeEndpointPreferV2(c *gc.C) { - client := testEndpointResolver{regionEndpoints: map[string]identity.ServiceURLs{ + client := &testEndpointResolver{regionEndpoints: map[string]identity.ServiceURLs{ "south": map[string]string{ "volume": "http://cinder.testing/v1", "volumev2": "http://cinder.testing/v2", @@ -542,7 +555,7 @@ } func (s *cinderVolumeSourceSuite) TestGetVolumeEndpointMissing(c *gc.C) { - client := testEndpointResolver{} + client := &testEndpointResolver{} url, err := openstack.GetVolumeEndpointURL(client, "east") c.Assert(err, gc.ErrorMatches, `endpoint "volume" in region "east" not found`) c.Assert(err, jc.Satisfies, errors.IsNotFound) @@ -550,7 +563,7 @@ } func (s *cinderVolumeSourceSuite) TestGetVolumeEndpointBadURL(c *gc.C) { - client := testEndpointResolver{regionEndpoints: map[string]identity.ServiceURLs{ + client := &testEndpointResolver{regionEndpoints: map[string]identity.ServiceURLs{ "north": map[string]string{"volumev2": "some %4"}, }} url, err := openstack.GetVolumeEndpointURL(client, "north") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/config.go juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -27,6 +27,12 @@ }, } +var configDefaults = schema.Defaults{ + "use-floating-ip": false, + "use-default-secgroup": false, + "network": "", +} + var configFields = func() schema.Fields { fs, _, err := configSchema.ValidationSchema() if err != nil { @@ -69,6 +75,18 @@ return fields } +// ConfigSchema returns extra config attributes specific +// to this provider only. +func (p EnvironProvider) ConfigSchema() schema.Fields { + return configFields +} + +// ConfigDefaults returns the default values for the +// provider specific config attributes. +func (p EnvironProvider) ConfigDefaults() schema.Defaults { + return configDefaults +} + func (p EnvironProvider) Validate(cfg, old *config.Config) (valid *config.Config, err error) { // Check for valid changes for the base config values. if err := config.Validate(cfg, old); err != nil { @@ -88,7 +106,7 @@ msg := fmt.Sprintf( "Config attribute %q (%v) is deprecated and ignored.\n"+ "Your cloud provider should have set up image metadata to provide the correct image id\n"+ - "for your chosen series and archietcure. If this is a private Openstack deployment without\n"+ + "for your chosen series and architecture. If this is a private Openstack deployment without\n"+ "existing image metadata, please run 'juju-metadata help' to see how suitable image"+ "metadata can be generated.", "default-image-id", defaultImageId) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/config_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -116,7 +116,7 @@ func (s *ConfigSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) - s.PatchValue(&authenticateClient, func(*Environ) error { return nil }) + s.PatchValue(&authenticateClient, func(authenticator) error { return nil }) } var configTests = []configTest{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/credentials.go juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,15 +15,16 @@ "gopkg.in/ini.v1" "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" ) const ( - credAttrTenantName = "tenant-name" - credAttrUserName = "username" - credAttrPassword = "password" - credAttrDomainName = "domain-name" - credAttrAccessKey = "access-key" - credAttrSecretKey = "secret-key" + CredAttrTenantName = "tenant-name" + CredAttrUserName = "username" + CredAttrPassword = "password" + CredAttrDomainName = "domain-name" + CredAttrAccessKey = "access-key" + CredAttrSecretKey = "secret-key" ) type OpenstackCredentials struct{} @@ -33,16 +34,16 @@ return map[cloud.AuthType]cloud.CredentialSchema{ cloud.UserPassAuthType: { { - credAttrUserName, cloud.CredentialAttr{Description: "The username to authenticate with."}, + CredAttrUserName, cloud.CredentialAttr{Description: "The username to authenticate with."}, }, { - credAttrPassword, cloud.CredentialAttr{ + CredAttrPassword, cloud.CredentialAttr{ Description: "The password for the specified username.", Hidden: true, }, }, { - credAttrTenantName, cloud.CredentialAttr{Description: "The OpenStack tenant name."}, + CredAttrTenantName, cloud.CredentialAttr{Description: "The OpenStack tenant name."}, }, { - credAttrDomainName, cloud.CredentialAttr{ + CredAttrDomainName, cloud.CredentialAttr{ Description: "The OpenStack domain name.", Optional: true, }, @@ -50,14 +51,14 @@ }, cloud.AccessKeyAuthType: { { - credAttrAccessKey, cloud.CredentialAttr{Description: "The access key to authenticate with."}, + CredAttrAccessKey, cloud.CredentialAttr{Description: "The access key to authenticate with."}, }, { - credAttrSecretKey, cloud.CredentialAttr{ + CredAttrSecretKey, cloud.CredentialAttr{ Description: "The secret key to authenticate with.", Hidden: true, }, }, { - credAttrTenantName, cloud.CredentialAttr{Description: "The OpenStack tenant name."}, + CredAttrTenantName, cloud.CredentialAttr{Description: "The OpenStack tenant name."}, }, }, } @@ -125,19 +126,19 @@ credential = cloud.NewCredential( cloud.UserPassAuthType, map[string]string{ - credAttrUserName: creds.User, - credAttrPassword: creds.Secrets, - credAttrTenantName: creds.TenantName, - credAttrDomainName: creds.DomainName, + CredAttrUserName: creds.User, + CredAttrPassword: creds.Secrets, + CredAttrTenantName: creds.TenantName, + CredAttrDomainName: creds.DomainName, }, ) } else { credential = cloud.NewCredential( cloud.AccessKeyAuthType, map[string]string{ - credAttrAccessKey: creds.User, - credAttrSecretKey: creds.Secrets, - credAttrTenantName: creds.TenantName, + CredAttrAccessKey: creds.User, + CredAttrSecretKey: creds.Secrets, + CredAttrTenantName: creds.TenantName, }, ) } @@ -148,3 +149,8 @@ credential.Label = fmt.Sprintf("openstack region %q project %q user %q", region, creds.TenantName, user) return &credential, user, creds.Region, nil } + +// FinalizeCredential is part of the environs.ProviderCredentials interface. +func (OpenstackCredentials) FinalizeCredential(_ environs.FinalizeCredentialContext, args environs.FinalizeCredentialParams) (*cloud.Credential, error) { + return &args.Credential, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/export_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -77,7 +77,20 @@ func NewCinderVolumeSource(s OpenstackStorage) storage.VolumeSource { const envName = "testenv" modelUUID := testing.ModelTag.Id() - return &cinderVolumeSource{s, envName, modelUUID} + return &cinderVolumeSource{ + storageAdapter: s, + envName: envName, + modelUUID: modelUUID, + namespace: fakeNamespace{}, + } +} + +type fakeNamespace struct { + instance.Namespace +} + +func (fakeNamespace) Value(s string) string { + return "juju-" + s } // Include images for arches currently supported. i386 is no longer diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/local_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/local_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/local_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/local_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -427,6 +427,13 @@ c.Assert(hc.CpuPower, gc.IsNil) } +func (s *localServerSuite) TestInstanceName(c *gc.C) { + inst, _ := testing.AssertStartInstance(c, s.env, s.ControllerUUID, "100") + serverDetail := openstack.InstanceServerDetail(inst) + envName := s.env.Config().Name() + c.Assert(serverDetail.Name, gc.Matches, "juju-06f00d-"+envName+"-100") +} + func (s *localServerSuite) TestStartInstanceNetwork(c *gc.C) { cfg, err := s.env.Config().Apply(coretesting.Attrs{ // A label that corresponds to a nova test service network @@ -602,8 +609,8 @@ testing.AssertStartInstance(c, env, s.ControllerUUID, hostedModelInstanceName) modelUUID := env.Config().UUID() allControllerSecurityGroups := []string{ - "default", fmt.Sprintf("juju-%v-%v", s.ControllerUUID, s.ControllerUUID), - fmt.Sprintf("juju-%v-%v-%v", s.ControllerUUID, s.ControllerUUID, controllerInstanceName), + "default", fmt.Sprintf("juju-%v-%v", s.ControllerUUID, controllerEnv.Config().UUID()), + fmt.Sprintf("juju-%v-%v-%v", s.ControllerUUID, controllerEnv.Config().UUID(), controllerInstanceName), } allHostedModelSecurityGroups := []string{ "default", fmt.Sprintf("juju-%v-%v", s.ControllerUUID, modelUUID), @@ -630,8 +637,8 @@ testing.AssertStartInstance(c, env, s.ControllerUUID, hostedModelInstanceName) modelUUID := env.Config().UUID() allControllerSecurityGroups := []string{ - "default", fmt.Sprintf("juju-%v-%v", s.ControllerUUID, s.ControllerUUID), - fmt.Sprintf("juju-%v-%v-%v", s.ControllerUUID, s.ControllerUUID, controllerInstanceName), + "default", fmt.Sprintf("juju-%v-%v", s.ControllerUUID, controllerEnv.Config().UUID()), + fmt.Sprintf("juju-%v-%v-%v", s.ControllerUUID, controllerEnv.Config().UUID(), controllerInstanceName), } allHostedModelSecurityGroups := []string{ "default", fmt.Sprintf("juju-%v-%v", s.ControllerUUID, modelUUID), @@ -698,7 +705,7 @@ func (s *localServerSuite) TestInstanceStatus(c *gc.C) { // goose's test service always returns ACTIVE state. inst, _ := testing.AssertStartInstance(c, s.env, s.ControllerUUID, "100") - c.Assert(inst.Status().Status, gc.Equals, status.StatusRunning) + c.Assert(inst.Status().Status, gc.Equals, status.Running) err := s.env.StopInstances(inst.Id()) c.Assert(err, jc.ErrorIsNil) } @@ -776,7 +783,6 @@ func (s *localServerSuite) TestInstancesBuildSpawning(c *gc.C) { coretesting.SkipIfPPC64EL(c, "lp:1425242") - // HP servers are available once they are BUILD(spawning). cleanup := s.srv.Nova.RegisterControlPoint( "addServer", func(sc hook.ServiceControl, args ...interface{}) error { @@ -988,12 +994,7 @@ validator, err := env.ConstraintsValidator() c.Assert(err, jc.ErrorIsNil) - // i386 is a valid arch, but is no longer supported. No image - // data was created for it for the test. - cons := constraints.MustParse("arch=i386") - _, err = validator.Validate(cons) - c.Assert(err, gc.ErrorMatches, "invalid constraint value: arch=i386\nvalid values are: \\[amd64 arm64 ppc64el s390x\\]") - cons = constraints.MustParse("instance-type=foo") + cons := constraints.MustParse("instance-type=foo") _, err = validator.Validate(cons) c.Assert(err, gc.ErrorMatches, "invalid constraint value: instance-type=foo\nvalid values are:.*") @@ -1010,7 +1011,7 @@ consB := constraints.MustParse("instance-type=m1.small") cons, err := validator.Merge(consA, consB) c.Assert(err, jc.ErrorIsNil) - c.Assert(cons, gc.DeepEquals, constraints.MustParse("instance-type=m1.small")) + c.Assert(cons, gc.DeepEquals, constraints.MustParse("arch=amd64 instance-type=m1.small")) } func (s *localServerSuite) TestFindImageInstanceConstraint(c *gc.C) { @@ -1764,7 +1765,7 @@ jc.DeepEquals, map[string]string{ "juju-model-uuid": coretesting.ModelTag.Id(), - "juju-controller-uuid": coretesting.ModelTag.Id(), + "juju-controller-uuid": coretesting.ControllerTag.Id(), "juju-is-controller": "true", }, ) @@ -1784,7 +1785,7 @@ jc.DeepEquals, map[string]string{ "juju-model-uuid": coretesting.ModelTag.Id(), - "juju-controller-uuid": coretesting.ModelTag.Id(), + "juju-controller-uuid": coretesting.ControllerTag.Id(), "juju-is-controller": "true", extraKey: extraValue, }, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/provider.go juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/provider.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/provider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/provider.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,14 +16,12 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils" - "github.com/juju/utils/arch" "github.com/juju/version" "gopkg.in/goose.v1/cinder" "gopkg.in/goose.v1/client" gooseerrors "gopkg.in/goose.v1/errors" "gopkg.in/goose.v1/identity" "gopkg.in/goose.v1/nova" - "gopkg.in/juju/names.v2" "github.com/juju/juju/cloud" "github.com/juju/juju/cloudconfig/instancecfg" @@ -31,7 +29,6 @@ "github.com/juju/juju/constraints" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" - "github.com/juju/juju/environs/imagemetadata" "github.com/juju/juju/environs/instances" "github.com/juju/juju/environs/simplestreams" "github.com/juju/juju/environs/tags" @@ -51,7 +48,10 @@ FirewallerFactory FirewallerFactory } -var _ environs.EnvironProvider = (*EnvironProvider)(nil) +var ( + _ environs.EnvironProvider = (*EnvironProvider)(nil) + _ environs.ProviderSchema = (*EnvironProvider)(nil) +) var providerInstance *EnvironProvider = &EnvironProvider{ OpenstackCredentials{}, @@ -61,11 +61,11 @@ var makeServiceURL = client.AuthenticatingClient.MakeServiceURL -// Use shortAttempt to poll for short-term events. -// TODO: This was kept to a long timeout because Nova needs more time than EC2. -// For example, HP Cloud takes around 9.1 seconds (10 samples) to return a -// BUILD(spawning) status. But storage delays are handled separately now, and +// TODO: shortAttempt was kept to a long timeout because Nova needs +// more time than EC2. Storage delays are handled separately now, and // perhaps other polling attempts can time out faster. + +// shortAttempt is used when polling for short-term events in tests. var shortAttempt = utils.AttemptStrategy{ Total: 15 * time.Second, Delay: 200 * time.Millisecond, @@ -76,25 +76,26 @@ if err := validateCloudSpec(args.Cloud); err != nil { return nil, errors.Annotate(err, "validating cloud spec") } + uuid := args.Config.UUID() + namespace, err := instance.NewNamespace(uuid) + if err != nil { + return nil, errors.Annotate(err, "creating instance namespace") + } e := &Environ{ - name: args.Config.Name(), - cloud: args.Cloud, + name: args.Config.Name(), + uuid: uuid, + cloud: args.Cloud, + namespace: namespace, } e.firewaller = p.FirewallerFactory.GetFirewaller(e) e.configurator = p.Configurator - err := e.SetConfig(args.Config) - if err != nil { + if err := e.SetConfig(args.Config); err != nil { return nil, err } return e, nil } -// RestrictedConfigAttributes is specified in the EnvironProvider interface. -func (p EnvironProvider) RestrictedConfigAttributes() []string { - return []string{} -} - // DetectRegions implements environs.CloudRegionDetector. func (EnvironProvider) DetectRegions() ([]cloud.Region, error) { // If OS_REGION_NAME and OS_AUTH_URL are both set, @@ -138,15 +139,10 @@ return nil, errors.Errorf("region must be specified") } return &simplestreams.MetadataLookupParams{ - Region: region, - Architectures: arch.AllSupportedArches, + Region: region, }, nil } -func (p EnvironProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - return make(map[string]string), nil -} - func (p EnvironProvider) newConfig(cfg *config.Config) (*environConfig, error) { valid, err := p.Validate(cfg, nil) if err != nil { @@ -156,19 +152,16 @@ } type Environ struct { - name string - cloud environs.CloudSpec - - // archMutex gates access to cachedSupportedArchitectures - archMutex sync.Mutex - // cachedSupportedArchitectures caches the architectures - // for which images can be instantiated. - cachedSupportedArchitectures []string + name string + uuid string + cloud environs.CloudSpec + namespace instance.Namespace ecfgMutex sync.Mutex ecfgUnlocked *environConfig client client.AuthenticatingClient novaUnlocked *nova.Client + volumeURL *url.URL // keystoneImageDataSource caches the result of getKeystoneImageSource. keystoneImageDataSourceMutex sync.Mutex @@ -230,23 +223,23 @@ func (inst *openstackInstance) Status() instance.InstanceStatus { instStatus := inst.getServerDetail().Status - jujuStatus := status.StatusPending + jujuStatus := status.Pending switch instStatus { case nova.StatusActive: - jujuStatus = status.StatusRunning + jujuStatus = status.Running case nova.StatusError: - jujuStatus = status.StatusProvisioningError + jujuStatus = status.ProvisioningError case nova.StatusBuild, nova.StatusBuildSpawning, nova.StatusDeleted, nova.StatusHardReboot, nova.StatusPassword, nova.StatusReboot, nova.StatusRebuild, nova.StatusRescue, nova.StatusResize, nova.StatusShutoff, nova.StatusSuspended, nova.StatusVerifyResize: - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty case nova.StatusUnknown: - jujuStatus = status.StatusUnknown + jujuStatus = status.Unknown default: - jujuStatus = status.StatusEmpty + jujuStatus = status.Empty } return instance.InstanceStatus{ Status: jujuStatus, @@ -376,13 +369,8 @@ validator := constraints.NewValidator() validator.RegisterConflicts( []string{constraints.InstanceType}, - []string{constraints.Mem, constraints.Arch, constraints.RootDisk, constraints.CpuCores}) + []string{constraints.Mem, constraints.RootDisk, constraints.Cores}) validator.RegisterUnsupported(unsupportedConstraints) - supportedArches, err := e.supportedArchitectures() - if err != nil { - return nil, err - } - validator.RegisterVocabulary(constraints.Arch, supportedArches) novaClient := e.nova() flavors, err := novaClient.ListFlavorsDetail() if err != nil { @@ -397,25 +385,6 @@ return validator, nil } -func (e *Environ) supportedArchitectures() ([]string, error) { - e.archMutex.Lock() - defer e.archMutex.Unlock() - if e.cachedSupportedArchitectures != nil { - return e.cachedSupportedArchitectures, nil - } - // Create a filter to get all images from our region and for the correct stream. - cloudSpec, err := e.Region() - if err != nil { - return nil, err - } - imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{ - CloudSpec: cloudSpec, - Stream: e.Config().ImageStream(), - }) - e.cachedSupportedArchitectures, err = common.SupportedArchitectures(e, imageConstraint) - return e.cachedSupportedArchitectures, err -} - var novaListAvailabilityZones = (*nova.Client).ListAvailabilityZones type openstackAvailabilityZone struct { @@ -522,7 +491,7 @@ // PrepareForBootstrap is part of the Environ interface. func (e *Environ) PrepareForBootstrap(ctx environs.BootstrapContext) error { // Verify credentials. - if err := authenticateClient(e); err != nil { + if err := authenticateClient(e.client); err != nil { return err } return nil @@ -531,7 +500,7 @@ // Create is part of the Environ interface. func (e *Environ) Create(environs.CreateParams) error { // Verify credentials. - if err := authenticateClient(e); err != nil { + if err := authenticateClient(e.client); err != nil { return err } // TODO(axw) 2016-08-04 #1609643 @@ -543,12 +512,17 @@ // The client's authentication may have been reset when finding tools if the agent-version // attribute was updated so we need to re-authenticate. This will be a no-op if already authenticated. // An authenticated client is needed for the URL() call below. - if err := authenticateClient(e); err != nil { + if err := authenticateClient(e.client); err != nil { return nil, err } return common.Bootstrap(ctx, e, args) } +// BootstrapMessage is part of the Environ interface. +func (e *Environ) BootstrapMessage() string { + return "" +} + func (e *Environ) ControllerInstances(controllerUUID string) ([]instance.Id, error) { // Find all instances tagged with tags.JujuIsController. instances, err := e.allControllerManagedInstances(controllerUUID, e.ecfg().useFloatingIP()) @@ -577,7 +551,7 @@ cred := identity.Credentials{ Region: spec.Region, URL: spec.Endpoint, - TenantName: credAttrs[credAttrTenantName], + TenantName: credAttrs[CredAttrTenantName], } // AuthType is validated when the environment is opened, so it's known @@ -586,16 +560,16 @@ switch spec.Credential.AuthType() { case cloud.UserPassAuthType: // TODO(axw) we need a way of saying to use legacy auth. - cred.User = credAttrs[credAttrUserName] - cred.Secrets = credAttrs[credAttrPassword] - cred.DomainName = credAttrs[credAttrDomainName] + cred.User = credAttrs[CredAttrUserName] + cred.Secrets = credAttrs[CredAttrPassword] + cred.DomainName = credAttrs[CredAttrDomainName] authMode = identity.AuthUserPass if cred.DomainName != "" { authMode = identity.AuthUserPassV3 } case cloud.AccessKeyAuthType: - cred.User = credAttrs[credAttrAccessKey] - cred.Secrets = credAttrs[credAttrSecretKey] + cred.User = credAttrs[CredAttrAccessKey] + cred.Secrets = credAttrs[CredAttrSecretKey] authMode = identity.AuthKeyPair } return cred, authMode @@ -654,8 +628,12 @@ return client, nil } -var authenticateClient = func(e *Environ) error { - err := e.client.Authenticate() +type authenticator interface { + Authenticate() error +} + +var authenticateClient = func(auth authenticator) error { + err := auth.Authenticate() if err != nil { // Log the error in case there are any useful hints, // but provide a readable and helpful error message @@ -735,7 +713,7 @@ return *datasource, nil } if !e.client.IsAuthenticated() { - if err := authenticateClient(e); err != nil { + if err := authenticateClient(e.client); err != nil { return nil, err } } @@ -960,8 +938,9 @@ groupNames = append(groupNames, nova.SecurityGroupName{g.Name}) } machineName := resourceName( - names.NewMachineTag(args.InstanceConfig.MachineId), - e.Config().UUID(), + e.namespace, + e.name, + args.InstanceConfig.MachineId, ) tryStartNovaInstance := func( @@ -1075,8 +1054,6 @@ func (e *Environ) isAliveServer(server nova.ServerDetail) bool { switch server.Status { - // HPCloud uses "BUILD(spawning)" as an intermediate BUILD state - // once networking is available. case nova.StatusActive, nova.StatusBuild, nova.StatusBuildSpawning, nova.StatusShutoff, nova.StatusSuspended: return true } @@ -1098,20 +1075,24 @@ } return wantedServers, nil } - // List all servers that may be in the environment - servers, err := e.nova().ListServersDetail(e.machinesFilter()) + // List all instances in the environment. + instances, err := e.AllInstances() if err != nil { return nil, err } - // Create a set of the ids of servers that are wanted - idSet := make(map[string]struct{}, len(ids)) - for _, id := range ids { - idSet[string(id)] = struct{}{} - } // Return only servers with the wanted ids that are currently alive - for _, server := range servers { - if _, ok := idSet[server.Id]; ok && e.isAliveServer(server) { - wantedServers = append(wantedServers, server) + for _, inst := range instances { + inst := inst.(*openstackInstance) + serverDetail := *inst.serverDetail + if !e.isAliveServer(serverDetail) { + continue + } + for _, id := range ids { + if inst.Id() != id { + continue + } + wantedServers = append(wantedServers, serverDetail) + break } } return wantedServers, nil @@ -1193,16 +1174,15 @@ // AllInstances returns all instances in this environment. func (e *Environ) AllInstances() ([]instance.Instance, error) { - filter := e.machinesFilter() tagFilter := tagValue{tags.JujuModel, e.ecfg().UUID()} - return e.allInstances(filter, tagFilter, e.ecfg().useFloatingIP()) + return e.allInstances(tagFilter, e.ecfg().useFloatingIP()) } // allControllerManagedInstances returns all instances managed by this // environment's controller, matching the optionally specified filter. func (e *Environ) allControllerManagedInstances(controllerUUID string, updateFloatingIPAddresses bool) ([]instance.Instance, error) { tagFilter := tagValue{tags.JujuController, controllerUUID} - return e.allInstances(nil, tagFilter, updateFloatingIPAddresses) + return e.allInstances(tagFilter, updateFloatingIPAddresses) } type tagValue struct { @@ -1211,8 +1191,8 @@ // allControllerManagedInstances returns all instances managed by this // environment's controller, matching the optionally specified filter. -func (e *Environ) allInstances(filter *nova.Filter, tagFilter tagValue, updateFloatingIPAddresses bool) ([]instance.Instance, error) { - servers, err := e.nova().ListServersDetail(filter) +func (e *Environ) allInstances(tagFilter tagValue, updateFloatingIPAddresses bool) ([]instance.Instance, error) { + servers, err := e.nova().ListServersDetail(jujuMachineFilter()) if err != nil { return nil, err } @@ -1315,15 +1295,16 @@ return volIds, nil } -func resourceName(tag names.Tag, envName string) string { - return fmt.Sprintf("juju-%s-%s", envName, tag) +func resourceName(namespace instance.Namespace, envName, resourceId string) string { + return namespace.Value(envName + "-" + resourceId) } -// machinesFilter returns a nova.Filter matching all machines in the environment. -func (e *Environ) machinesFilter() *nova.Filter { +// jujuMachineFilter returns a nova.Filter matching machines created by Juju. +// The machines are not filtered to any particular environment. To do that, +// instance tags must be compared. +func jujuMachineFilter() *nova.Filter { filter := nova.NewFilter() - modelUUID := e.Config().UUID() - filter.Set(nova.FilterServer, fmt.Sprintf("juju-%s-machine-\\d*", modelUUID)) + filter.Set(nova.FilterServer, "juju-.*") return filter } @@ -1407,10 +1388,9 @@ return nil, err } return &simplestreams.MetadataLookupParams{ - Series: config.PreferredSeries(e.ecfg()), - Region: cloudSpec.Region, - Endpoint: cloudSpec.Endpoint, - Architectures: arch.AllSupportedArches, + Series: config.PreferredSeries(e.ecfg()), + Region: cloudSpec.Region, + Endpoint: cloudSpec.Endpoint, }, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/provider_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/provider_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/provider_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/provider_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -54,12 +54,7 @@ networks: []string{"private"}, expected: "192.168.0.1", }, { - summary: "private only, both IPv6 and IPv4", - private: []nova.IPAddress{{6, "fc00::1"}, {4, "192.168.0.1"}}, - networks: []string{"private"}, - expected: "fc00::1", -}, { - summary: "private IPv4 plus (HP cloud)", + summary: "private IPv4 plus (what HP cloud used to do)", private: []nova.IPAddress{{4, "10.0.0.1"}, {4, "8.8.4.4"}}, networks: []string{"private"}, expected: "8.8.4.4", @@ -79,11 +74,6 @@ networks: []string{"", "public"}, expected: "8.8.8.8", }, { - summary: "public only, both IPv6 and IPv4", - public: []nova.IPAddress{{6, "2001:db8::1"}, {4, "8.8.8.8"}}, - networks: []string{"", "public"}, - expected: "2001:db8::1", -}, { summary: "public and private both IPv4", private: []nova.IPAddress{{4, "10.0.0.4"}}, public: []nova.IPAddress{{4, "8.8.4.4"}}, @@ -114,12 +104,6 @@ networks: []string{"private", "public"}, expected: "8.8.8.8", }, { - summary: "public, private, and localhost - both IPv6 and IPv4", - private: []nova.IPAddress{{6, "::1"}, {6, "fc00::1"}, {4, "127.0.0.4"}, {4, "192.168.0.1"}}, - public: []nova.IPAddress{{6, "2001:db8::1"}, {4, "8.8.8.8"}}, - networks: []string{"private", "public"}, - expected: "2001:db8::1", -}, { summary: "custom only IPv4", private: []nova.IPAddress{{4, "192.168.0.1"}}, networks: []string{"special"}, @@ -135,11 +119,6 @@ networks: []string{"special"}, expected: "192.168.0.1", }, { - summary: "custom only - both IPv6 and IPv4", - private: []nova.IPAddress{{6, "fc00::1"}, {4, "192.168.0.1"}}, - networks: []string{"special"}, - expected: "fc00::1", -}, { summary: "custom and public IPv4", private: []nova.IPAddress{{4, "172.16.0.1"}}, public: []nova.IPAddress{{4, "8.8.8.8"}}, @@ -158,12 +137,6 @@ networks: []string{"special", "public"}, expected: "8.8.8.8", }, { - summary: "custom and public - both IPv6 and IPv4", - private: []nova.IPAddress{{6, "fc00::1"}, {4, "172.16.0.1"}}, - public: []nova.IPAddress{{6, "2001:db8::1"}, {4, "8.8.8.8"}}, - networks: []string{"special", "public"}, - expected: "2001:db8::1", -}, { summary: "floating and public, same address", floatingIP: "8.8.8.8", public: []nova.IPAddress{{4, "8.8.8.8"}}, @@ -209,7 +182,7 @@ } } addr := InstanceAddress(t.floatingIP, addresses) - c.Assert(addr, gc.Equals, t.expected) + c.Check(addr, gc.Equals, t.expected) } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/userdata_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/userdata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/openstack/userdata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/openstack/userdata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -47,7 +47,7 @@ func (s *UserdataSuite) TestOpenstackUnknownOS(c *gc.C) { renderer := openstack.OpenstackRenderer{} cloudcfg := &cloudinittest.CloudConfig{} - result, err := renderer.Render(cloudcfg, os.Arch) + result, err := renderer.Render(cloudcfg, os.GenericLinux) c.Assert(result, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "Cannot encode userdata for OS: Arch") + c.Assert(err, gc.ErrorMatches, "Cannot encode userdata for OS: GenericLinux") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/credentials.go juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/credentials.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,56 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rackspace + +import ( + "github.com/juju/juju/cloud" + "github.com/juju/juju/provider/openstack" +) + +// Credentials represents openstack credentials specifically tailored +// to rackspace. Mostly this means that they're appropriate for the v2 API, and +// thus there's no domain name. +type Credentials struct { + openstack.OpenstackCredentials +} + +// CredentialSchemas is part of the environs.ProviderCredentials interface. +func (Credentials) CredentialSchemas() map[cloud.AuthType]cloud.CredentialSchema { + return map[cloud.AuthType]cloud.CredentialSchema{ + cloud.UserPassAuthType: { + { + Name: openstack.CredAttrUserName, + CredentialAttr: cloud.CredentialAttr{Description: "The username to authenticate with."}, + }, { + Name: openstack.CredAttrPassword, + CredentialAttr: cloud.CredentialAttr{ + Description: "The password for the specified username.", + Hidden: true, + }, + }, { + Name: openstack.CredAttrTenantName, + CredentialAttr: cloud.CredentialAttr{Description: "The OpenStack tenant name."}, + }, + }, + } +} + +// DetectCredentials is part of the environs.ProviderCredentials interface. +func (c Credentials) DetectCredentials() (*cloud.CloudCredential, error) { + result, err := c.OpenstackCredentials.DetectCredentials() + if err != nil { + return nil, err + } + + delete(result.AuthCredentials, string(cloud.AccessKeyAuthType)) + + // delete domain name from creds, since rackspace doesn't use it, and it + // confuses our code. + for k, v := range result.AuthCredentials { + attr := v.Attributes() + delete(attr, openstack.CredAttrDomainName) + result.AuthCredentials[k] = cloud.NewCredential(v.AuthType(), attr) + } + return result, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/credentials_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/credentials_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/credentials_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/credentials_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,47 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rackspace_test + +import ( + "os" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/provider/openstack" + "github.com/juju/juju/provider/rackspace" +) + +var _ = gc.Suite(&CredentialSuite{}) + +type CredentialSuite struct { + testing.IsolationSuite +} + +func (CredentialSuite) TestCredentialSchemasNoDomain(c *gc.C) { + schemas := rackspace.Credentials{}.CredentialSchemas() + for name, schema := range schemas { + for _, attr := range schema { + if attr.Name == openstack.CredAttrDomainName { + c.Fatalf("schema %q has domain name attribute", name) + } + } + } +} + +func (CredentialSuite) TestDetectCredentialsNoDomain(c *gc.C) { + os.Setenv("OS_USERNAME", "foo") + os.Setenv("OS_TENANT_NAME", "baz") + os.Setenv("OS_PASSWORD", "bar") + os.Setenv("OS_DOMAIN_NAME", "domain") + result, err := rackspace.Credentials{}.DetectCredentials() + c.Assert(err, jc.ErrorIsNil) + for _, v := range result.AuthCredentials { + attr := v.Attributes() + if _, ok := attr[openstack.CredAttrDomainName]; ok { + c.Fatal("Domain name exists in rackspace creds and should not.") + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/environ.go juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -30,6 +30,11 @@ return bootstrap(ctx, e, params) } +// BootstrapMessage is part of the Environ interface. +func (e environ) BootstrapMessage() string { + return "" +} + var waitSSH = common.WaitSSH // StartInstance implements environs.Environ. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/environ_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/environ_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/environ_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/environ_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -51,7 +51,7 @@ func (s *environSuite) TestStartInstance(c *gc.C) { configurator := &fakeConfigurator{} - s.PatchValue(rackspace.WaitSSH, func(stdErr io.Writer, interrupted <-chan os.Signal, client ssh.Client, checkHostScript string, inst common.Addresser, timeout environs.BootstrapDialOpts) (addr string, err error) { + s.PatchValue(rackspace.WaitSSH, func(stdErr io.Writer, interrupted <-chan os.Signal, client ssh.Client, checkHostScript string, inst common.InstanceRefresher, timeout environs.BootstrapDialOpts) (addr string, err error) { addresses, err := inst.Addresses() if err != nil { return "", err @@ -65,7 +65,7 @@ "name": "some-name", "type": "some-type", "uuid": testing.ModelTag.Id(), - "controller-uuid": testing.ModelTag.Id(), + "controller-uuid": testing.ControllerTag.Id(), "authorized-keys": "key", }) c.Assert(err, gc.IsNil) @@ -124,6 +124,10 @@ return nil, nil } +func (e *fakeEnviron) BootstrapMessage() string { + return "" +} + func (e *fakeEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { e.Push("StartInstance", args) return &environs.StartInstanceResult{ @@ -205,9 +209,9 @@ return nil } -func (e *fakeEnviron) StorageProviderTypes() []storage.ProviderType { +func (e *fakeEnviron) StorageProviderTypes() ([]storage.ProviderType, error) { e.Push("StorageProviderTypes") - return nil + return nil, nil } func (e *fakeEnviron) StorageProvider(t storage.ProviderType) (storage.Provider, error) { @@ -281,7 +285,7 @@ func (e *fakeInstance) Status() instance.InstanceStatus { e.Push("Status") return instance.InstanceStatus{ - Status: status.StatusProvisioning, + Status: status.Provisioning, Message: "a message", } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/init.go juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/init.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/init.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/init.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,7 +14,7 @@ func init() { osProvider := openstack.EnvironProvider{ - openstack.OpenstackCredentials{}, + Credentials{}, &rackspaceConfigurator{}, &firewallerFactory{}, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/provider_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/provider_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/rackspace/provider_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/rackspace/provider_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -32,7 +32,7 @@ "name": "some-name", "type": "some-type", "uuid": coretesting.ModelTag.Id(), - "controller-uuid": coretesting.ModelTag.Id(), + "controller-uuid": coretesting.ControllerTag.Id(), "authorized-keys": "key", }) c.Check(err, gc.IsNil) @@ -65,11 +65,6 @@ return nil, nil } -func (p *fakeProvider) RestrictedConfigAttributes() []string { - p.MethodCall(p, "RestrictedConfigAttributes") - return nil -} - func (p *fakeProvider) PrepareForCreateEnvironment(controllerUUID string, cfg *config.Config) (*config.Config, error) { p.MethodCall(p, "PrepareForCreateEnvironment", controllerUUID, cfg) return nil, nil @@ -90,11 +85,6 @@ return cfg, nil } -func (p *fakeProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - p.MethodCall(p, "SecretAttrs", cfg) - return nil, nil -} - func (p *fakeProvider) CredentialSchemas() map[cloud.AuthType]cloud.CredentialSchema { p.MethodCall(p, "CredentialSchemas") return nil @@ -104,3 +94,8 @@ p.MethodCall(p, "DetectCredentials") return nil, errors.NotFoundf("credentials") } + +func (p *fakeProvider) FinalizeCredential(ctx environs.FinalizeCredentialContext, args environs.FinalizeCredentialParams) (*cloud.Credential, error) { + p.MethodCall(p, "FinalizeCredential", ctx, args) + return &args.Credential, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/credentials.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/credentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/credentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/credentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "github.com/juju/errors" "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" ) const ( @@ -38,3 +39,8 @@ func (environProviderCredentials) DetectCredentials() (*cloud.CloudCredential, error) { return nil, errors.NotFoundf("credentials") } + +// FinalizeCredential is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) FinalizeCredential(_ environs.FinalizeCredentialContext, args environs.FinalizeCredentialParams) (*cloud.Credential, error) { + return &args.Credential, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ_broker_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ_broker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ_broker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ_broker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -46,7 +46,7 @@ s.FakeAvailabilityZones(client, "z1") s.FakeAvailabilityZones(client, "z1") s.FakeAvailabilityZones(client, "z1") - s.FakeCreateInstance(client, s.ServerUrl, c) + s.FakeCreateInstance(client, s.ServerURL, c) } func (s *environBrokerSuite) CreateStartInstanceArgs(c *gc.C) environs.StartInstanceParams { @@ -180,7 +180,7 @@ client := vsphere.ExposeEnvFakeClient(s.Env) s.FakeAvailabilityZones(client, "z1", "z2") s.FakeAvailabilityZones(client, "z1", "z2") - s.FakeCreateInstance(client, s.ServerUrl, c) + s.FakeCreateInstance(client, s.ServerURL, c) startInstArgs := s.CreateStartInstanceArgs(c) startInstArgs.Placement = "zone=z2" _, err := s.Env.StartInstance(startInstArgs) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,14 +26,14 @@ cloud environs.CloudSpec client *client - archLock sync.Mutex // archLock protects access to the following fields. - supportedArchitectures []string - // namespace is used to create the machine and device hostnames. namespace instance.Namespace lock sync.Mutex // lock protects access the following fields. ecfg *environConfig + + archLock sync.Mutex + supportedArchitectures []string } func newEnviron(cloud environs.CloudSpec, cfg *config.Config) (*environ, error) { @@ -116,6 +116,11 @@ return Bootstrap(ctx, env, params) } +// BootstrapMessage is part of the Environ interface. +func (env *environ) BootstrapMessage() string { + return "" +} + //this variable is exported, because it has to be rewritten in external unit tests var DestroyEnv = common.Destroy diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ_network.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ_network.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ_network.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ_network.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,12 +18,22 @@ return false, errors.NotSupportedf("spaces") } -// Subnets implements environs.Environ. +// SupportsSpaceDiscovery implements environs.Networking. +func (env *environ) SupportsSpaceDiscovery() (bool, error) { + return false, errors.NotSupportedf("spaces") +} + +// Spaces implements environs.Networking. +func (env *environ) Spaces() ([]network.SpaceInfo, error) { + return nil, errors.NotSupportedf("spaces") +} + +// Subnets implements environs.Networking. func (env *environ) Subnets(inst instance.Id, ids []network.Id) ([]network.SubnetInfo, error) { return env.client.Subnets(inst, ids) } -// NetworkInterfaces implements environs.Environ. +// NetworkInterfaces implements environs.Networking. func (env *environ) NetworkInterfaces(inst instance.Id) ([]network.InterfaceInfo, error) { return env.client.GetNetworkInterfaces(inst, env.ecfg) } @@ -55,6 +65,6 @@ } // ReleaseContainerAddresses implements environs.Networking. -func (e *environ) ReleaseContainerAddresses(interfaces []network.InterfaceInfo) error { +func (e *environ) ReleaseContainerAddresses(interfaces []network.ProviderInterfaceInfo) error { return errors.NotSupportedf("container address allocation") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ_policy.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ_policy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ_policy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ_policy.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,7 +26,9 @@ return nil } -func (env *environ) getSupportedArchitectures() ([]string, error) { +// supportedArchitectures returns the image architectures which can +// be hosted by this environment. +func (env *environ) allSupportedArchitectures() ([]string, error) { env.archLock.Lock() defer env.archLock.Unlock() @@ -74,19 +76,13 @@ // validate and merge constraints. func (env *environ) ConstraintsValidator() (constraints.Validator, error) { validator := constraints.NewValidator() - - // unsupported - validator.RegisterUnsupported(unsupportedConstraints) - // vocab - - supportedArches, err := env.getSupportedArchitectures() + supportedArches, err := env.allSupportedArchitectures() if err != nil { return nil, errors.Trace(err) } validator.RegisterVocabulary(constraints.Arch, supportedArches) - return validator, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ_policy_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ_policy_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ_policy_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ_policy_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -57,7 +57,6 @@ cons := constraints.MustParse("arch=ppc64el") _, err = validator.Validate(cons) - c.Check(err, gc.ErrorMatches, "invalid constraint value: arch=ppc64el\nvalid values are:.*") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/environ_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/environ_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -53,3 +53,9 @@ err := s.Env.PrepareForBootstrap(envtesting.BootstrapContext(c)) c.Check(err, jc.ErrorIsNil) } + +func (s *environSuite) TestSupportsNetworking(c *gc.C) { + var _ environs.Networking = s.Env + _, ok := environs.SupportsNetworking(s.Env) + c.Assert(ok, jc.IsTrue) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/fake_methods_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/fake_methods_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/fake_methods_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/fake_methods_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -230,8 +230,8 @@ }) } -func (s *BaseSuite) FakeCreateInstance(c *fakeClient, serverUrl string, checker *gc.C) { - s.FakeImportOvf(c, serverUrl, checker) +func (s *BaseSuite) FakeCreateInstance(c *fakeClient, serverURL string, checker *gc.C) { + s.FakeImportOvf(c, serverURL, checker) powerOnTask := types.ManagedObjectReference{} c.SetProxyHandler("PowerOnVM_Task", func(req, res soap.HasFault) { resBody := res.(*methods.PowerOnVM_TaskBody) @@ -292,7 +292,7 @@ }) } -func (s *BaseSuite) FakeImportOvf(c *fakeClient, serverUrl string, checker *gc.C) { +func (s *BaseSuite) FakeImportOvf(c *fakeClient, serverURL string, checker *gc.C) { c.SetPropertyProxyHandler("FakeDatacenter", RetrieveDatacenterProperties) c.SetProxyHandler("CreateImportSpec", func(req, res soap.HasFault) { resBody := res.(*methods.CreateImportSpecBody) @@ -352,7 +352,7 @@ DeviceUrl: []types.HttpNfcLeaseDeviceUrl{ types.HttpNfcLeaseDeviceUrl{ ImportKey: "key1", - Url: serverUrl + "/disk-device/", + Url: serverURL + "/disk-device/", }, }, }, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/image_metadata.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/image_metadata.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/image_metadata.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/image_metadata.go 2016-10-13 14:31:49.000000000 +0000 @@ -20,7 +20,7 @@ */ type OvaFileMetadata struct { - Url string + URL string Arch string `json:"arch"` Size int `json:"size"` Path string `json:"path"` @@ -88,7 +88,7 @@ if file.FileType == "ova" { //ignore error for url data source url, _ := source.URL(file.Path) - file.Url = url + file.URL = url matchingImages = append(matchingImages, file) } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/instance.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -41,7 +41,7 @@ // but that method does not exist. // return inst.base.Status() return instance.InstanceStatus{ - Status: status.StatusPending, + Status: status.Pending, } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/ova_import_manager.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/ova_import_manager.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/ova_import_manager.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/ova_import_manager.go 2016-10-13 14:31:49.000000000 +0000 @@ -64,7 +64,7 @@ logger.Errorf("can't remove temp directory, error: %s", err.Error()) } }() - ovf, err := m.downloadOva(basePath, instSpec.img.Url) + ovf, err := m.downloadOva(basePath, instSpec.img.URL) if err != nil { return nil, errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/provider.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/provider.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/provider.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/provider.go 2016-10-13 14:31:49.000000000 +0000 @@ -40,11 +40,6 @@ return args.Config, nil } -// RestrictedConfigAttributes is specified in the EnvironProvider interface. -func (environProvider) RestrictedConfigAttributes() []string { - return []string{} -} - // Validate implements environs.EnvironProvider. func (environProvider) Validate(cfg, old *config.Config) (valid *config.Config, err error) { if old == nil { @@ -68,11 +63,6 @@ return ecfg.Config, nil } -// SecretAttrs implements environs.EnvironProvider. -func (environProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - return map[string]string{}, nil -} - func validateCloudSpec(spec environs.CloudSpec) error { if err := spec.Validate(); err != nil { return errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/storage.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/storage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/storage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/storage.go 2016-10-13 14:31:49.000000000 +0000 @@ -1,6 +1,8 @@ // Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. +// +build !gccgo + package vsphere import ( @@ -10,8 +12,8 @@ ) // StorageProviderTypes implements storage.ProviderRegistry. -func (*environ) StorageProviderTypes() []storage.ProviderType { - return nil +func (*environ) StorageProviderTypes() ([]storage.ProviderType, error) { + return nil, nil } // StorageProvider implements storage.ProviderRegistry. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/testing_test.go juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/testing_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/provider/vsphere/testing_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/provider/vsphere/testing_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,7 +18,6 @@ "github.com/juju/govmomi/vim25/methods" "github.com/juju/govmomi/vim25/soap" "github.com/juju/govmomi/vim25/types" - gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "golang.org/x/net/context" gc "gopkg.in/check.v1" @@ -26,7 +25,6 @@ "github.com/juju/juju/cloud" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" - "github.com/juju/juju/juju/osenv" "github.com/juju/juju/testing" ) @@ -57,24 +55,23 @@ } type BaseSuite struct { - gitjujutesting.IsolationSuite + testing.FakeJujuXDGDataHomeSuite Config *config.Config EnvConfig *environConfig Env *environ ServeMux *http.ServeMux - ServerUrl string + ServerURL string } func (s *BaseSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) s.PatchValue(&newConnection, newFakeConnection) s.initEnv(c) - s.setUpHttpProxy(c) + s.setUpHTTPProxy(c) s.FakeMetadataServer() - osenv.SetJujuXDGDataHome(c.MkDir()) } func (s *BaseSuite) initEnv(c *gc.C) { @@ -103,19 +100,19 @@ s.setConfig(c, cfg) } -func (s *BaseSuite) setUpHttpProxy(c *gc.C) { +func (s *BaseSuite) setUpHTTPProxy(c *gc.C) { s.ServeMux = http.NewServeMux() server := httptest.NewServer(s.ServeMux) - s.ServerUrl = server.URL + s.ServerURL = server.URL cfg, _ := s.Config.Apply(map[string]interface{}{"image-metadata-url": server.URL}) s.setConfig(c, cfg) } -type fakeApiHandler func(req, res soap.HasFault) +type fakeAPIHandler func(req, res soap.HasFault) type fakePropertiesHandler func(req, res *methods.RetrievePropertiesBody) -type fakeApiCall struct { - handler fakeApiHandler +type fakeAPICall struct { + handler fakeAPIHandler method string } @@ -125,7 +122,7 @@ } type fakeClient struct { - handlers []fakeApiCall + handlers []fakeAPICall propertyHandlers []fakePropertiesCall } @@ -155,8 +152,8 @@ return nil } -func (c *fakeClient) SetProxyHandler(method string, handler fakeApiHandler) { - c.handlers = append(c.handlers, fakeApiCall{method: method, handler: handler}) +func (c *fakeClient) SetProxyHandler(method string, handler fakeAPIHandler) { + c.handlers = append(c.handlers, fakeAPICall{method: method, handler: handler}) } func (c *fakeClient) SetPropertyProxyHandler(obj string, handler fakePropertiesHandler) { @@ -165,7 +162,7 @@ var newFakeConnection = func(url *url.URL) (*govmomi.Client, error) { fakeClient := &fakeClient{ - handlers: make([]fakeApiCall, 0, 100), + handlers: make([]fakeAPICall, 0, 100), propertyHandlers: make([]fakePropertiesCall, 0, 100), } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/README.md juju-core-2.0.0/src/github.com/juju/juju/README.md --- juju-core-2.0~beta15/src/github.com/juju/juju/README.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/README.md 2016-10-13 14:31:49.000000000 +0000 @@ -3,7 +3,7 @@ juju is devops distilled. -Juju enables you to use [Charms](http://juju.ubuntu.com/charms) to deploy your application architectures to EC2, OpenStack, +Juju enables you to use [Charms](http://jujucharms.com/charms) to deploy your application architectures to EC2, OpenStack, Azure, HP your data center and even your own Ubuntu based laptop. Moving between models is simple giving you the flexibility to switch hosts whenever you want — for free. @@ -118,21 +118,6 @@ juju switch local sudo juju bootstrap ---upload-tools --------------- - -The `juju` client program, and the juju 'tools' are deployed in lockstep. When a -release of `juju` is made, the compiled tools matching that version of juju -are extracted and uploaded to a known location. This consumes a release version -number, and implies that no tools are available for the next, development, version -of juju. Therefore, when using the development version of juju you will need to -pass an additional flag, `--upload-tools` to instruct the `juju` client to build -a set of tools from source and upload them to the model as part of the -bootstrap process. - - juju bootstrap -m your-model --upload-tools {--debug} - - Installing bash completion for juju =================================== @@ -143,3 +128,24 @@ juju status , juju ssh , juju terminate-machine , etc), by parsing cached `juju status` output for speedup. It also does command flags completion by parsing `juju help ...` output. + +Building Juju as a Snap Package +=============================== + +Building +-------- +This requires the godeps plugin -- make sure your snapcraft version is > 2.13.1. Run snapcraft at the root of the repository. A snap will build. + +Current State +------------- +Needs devmode per the known issues below. The resulting snap itself works perfectly in developer mode. Do note however credentials are not shared with a debian packaged juju, and any installed juju's will own `juju` on your `$PATH` over the snap. + +Known Issues +---------------- + * Missing support for abstract mutex socket (https://bugs.launchpad.net/snappy/+bug/1604967) + * Needs LXD interface + * Needs SSH interface (https://bugs.launchpad.net/snappy/+bug/1606574) + * Bash completion doesn't work (https://launchpad.net/bugs/1612303) + * Snap doesn't use local source as part for snapcraft + + diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/list_charm_resources.go juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/list_charm_resources.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/list_charm_resources.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/list_charm_resources.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,37 +6,29 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/charm.v6-unstable" charmresource "gopkg.in/juju/charm.v6-unstable/resource" csparams "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" - "launchpad.net/gnuflag" "github.com/juju/juju/charmstore" "github.com/juju/juju/cmd/modelcmd" ) -// CharmCommandBase exposes the functionality of charmcmd.CommandBase -// needed here. -type CharmCommandBase interface { - // Connect connects to the charm store and returns a client. - // cmd.Context needs to be passed in so that we can do authentication - // via the cli if available. - Connect(*cmd.Context) (CharmResourceLister, error) -} - -// CharmResourceLister has the charm store API methods needed by ListCharmResourcesCommand. -type CharmResourceLister interface { - // ListResources lists the resources for each of the identified charms. - ListResources([]charmstore.CharmID) ([][]charmresource.Resource, error) - - // Close closes the client. - Close() error +// CharmResourceLister lists resources for the given charm ids. +type ResourceLister interface { + ListResources(ids []charmstore.CharmID) ([][]charmresource.Resource, error) } // ListCharmResourcesCommand implements the "juju charm resources" command. type ListCharmResourcesCommand struct { modelcmd.ModelCommandBase - CharmCommandBase + + // ResourceLister is called by Run to list charm resources. The + // default implementation uses juju/juju/charmstore.Client, but + // it may be set to mock out the call to that method. + ResourceLister ResourceLister + out cmd.Output channel string charm string @@ -44,11 +36,10 @@ // NewListCharmResourcesCommand returns a new command that lists resources defined // by a charm. -func NewListCharmResourcesCommand(base CharmCommandBase) *ListCharmResourcesCommand { - cmd := &ListCharmResourcesCommand{ - CharmCommandBase: base, - } - return cmd +func NewListCharmResourcesCommand() *ListCharmResourcesCommand { + var c ListCharmResourcesCommand + c.ResourceLister = &c + return &c } var listCharmResourcesDoc = ` @@ -81,6 +72,7 @@ // SetFlags implements cmd.Command. func (c *ListCharmResourcesCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) defaultFormat := "tabular" c.out.AddFlags(f, defaultFormat, map[string]cmd.Formatter{ "tabular": FormatCharmTabular, @@ -100,7 +92,6 @@ if err := cmd.CheckEmpty(args[1:]); err != nil { return errors.Trace(err) } - return nil } @@ -108,13 +99,6 @@ func (c *ListCharmResourcesCommand) Run(ctx *cmd.Context) error { // TODO(ericsnow) Adjust this to the charm store. - apiclient, err := c.Connect(ctx) - if err != nil { - // TODO(ericsnow) Return a more user-friendly error? - return errors.Trace(err) - } - defer apiclient.Close() - charmURLs, err := resolveCharms([]string{c.charm}) if err != nil { return errors.Trace(err) @@ -125,13 +109,19 @@ charms[i] = charmstore.CharmID{URL: id, Channel: csparams.Channel(c.channel)} } - resources, err := apiclient.ListResources(charms) + resources, err := c.ResourceLister.ListResources(charms) if err != nil { return errors.Trace(err) } if len(resources) != 1 { return errors.New("got bad data from charm store") } + res := resources[0] + + if len(res) == 0 && c.out.Name() == "tabular" { + ctx.Infof("No resources to display.") + return nil + } // Note that we do not worry about c.CompatVersion // for show-charm-resources... @@ -140,6 +130,20 @@ return c.out.Write(ctx, formatted) } +// ListCharmResources implements CharmResourceLister by getting the charmstore client +// from the command's ModelCommandBase. +func (c *ListCharmResourcesCommand) ListResources(ids []charmstore.CharmID) ([][]charmresource.Resource, error) { + bakeryClient, err := c.BakeryClient() + if err != nil { + return nil, errors.Trace(err) + } + client, err := charmstore.NewCustomClient(bakeryClient, nil) + if err != nil { + return nil, errors.Trace(err) + } + return client.ListResources(ids) +} + func resolveCharms(charms []string) ([]*charm.URL, error) { var charmURLs []*charm.URL for _, raw := range charms { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/list_charm_resources_test.go juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/list_charm_resources_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/list_charm_resources_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/list_charm_resources_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,6 @@ "strings" jujucmd "github.com/juju/cmd" - "github.com/juju/errors" "github.com/juju/juju/charmstore" "github.com/juju/testing" jc "github.com/juju/testing/checkers" @@ -32,15 +31,6 @@ s.client = &stubCharmStore{stub: s.stub} } -func (s *ListCharmSuite) newAPIClient(c *ListCharmResourcesCommand) (CharmResourceLister, error) { - s.stub.AddCall("newAPIClient", c) - if err := s.stub.NextErr(); err != nil { - return nil, errors.Trace(err) - } - - return s.client, nil -} - func (s *ListCharmSuite) TestInfo(c *gc.C) { var command ListCharmResourcesCommand info := command.Info() @@ -77,32 +67,24 @@ resources[0].Revision = 2 s.client.ReturnListResources = [][]charmresource.Resource{resources} - command := NewListCharmResourcesCommand(s.client) + command := NewListCharmResourcesCommand() + command.ResourceLister = s.client code, stdout, stderr := runCmd(c, command, "cs:a-charm") c.Check(code, gc.Equals, 0) c.Check(stdout, gc.Equals, ` -RESOURCE REVISION -website 2 -music 1 +Resource Revision +website 2 +music 1 `[1:]) c.Check(stderr, gc.Equals, "") s.stub.CheckCallNames(c, - "Connect", "ListResources", - "Close", ) - s.stub.CheckCall(c, 1, "ListResources", []charmstore.CharmID{ + s.stub.CheckCall(c, 0, "ListResources", []charmstore.CharmID{ { - URL: &charm.URL{ - Schema: "cs", - User: "", - Name: "a-charm", - Revision: -1, - Series: "", - Channel: "", - }, + URL: charm.MustParseURL("cs:a-charm"), Channel: "stable", }, }) @@ -111,16 +93,14 @@ func (s *ListCharmSuite) TestNoResources(c *gc.C) { s.client.ReturnListResources = [][]charmresource.Resource{{}} - command := NewListCharmResourcesCommand(s.client) + command := NewListCharmResourcesCommand() + command.ResourceLister = s.client code, stdout, stderr := runCmd(c, command, "cs:a-charm") c.Check(code, gc.Equals, 0) - c.Check(stdout, gc.Equals, ` -RESOURCE REVISION - -`[1:]) - c.Check(stderr, gc.Equals, "") - s.stub.CheckCallNames(c, "Connect", "ListResources", "Close") + c.Check(stderr, gc.Equals, "No resources to display.\n") + c.Check(stdout, gc.Equals, "") + s.stub.CheckCallNames(c, "ListResources") } func (s *ListCharmSuite) TestOutputFormats(c *gc.C) { @@ -136,9 +116,9 @@ formats := map[string]string{ "tabular": ` -RESOURCE REVISION -website 1 -music 1 +Resource Revision +website 1 +music 1 `[1:], "yaml": ` @@ -185,7 +165,8 @@ } for format, expected := range formats { c.Logf("checking format %q", format) - command := NewListCharmResourcesCommand(s.client) + command := NewListCharmResourcesCommand() + command.ResourceLister = s.client args := []string{ "--format", format, "cs:a-charm", @@ -208,7 +189,8 @@ charmRes(c, "music", ".mp3", "mp3 of your backing vocals", string(fp2.Bytes())), } s.client.ReturnListResources = [][]charmresource.Resource{resources} - command := NewListCharmResourcesCommand(s.client) + command := NewListCharmResourcesCommand() + command.ResourceLister = s.client code, _, stderr := runCmd(c, command, "--channel", "development", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/output_tabular.go juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/output_tabular.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/output_tabular.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/output_tabular.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,31 +4,30 @@ package cmd import ( - "bytes" "fmt" + "io" "sort" - "text/tabwriter" + "github.com/juju/ansiterm" "github.com/juju/errors" + "github.com/juju/juju/cmd/output" ) // FormatCharmTabular returns a tabular summary of charm resources. -func FormatCharmTabular(value interface{}) ([]byte, error) { +func FormatCharmTabular(writer io.Writer, value interface{}) error { resources, valueConverted := value.([]FormattedCharmResource) if !valueConverted { - return nil, errors.Errorf("expected value of type %T, got %T", resources, value) + return errors.Errorf("expected value of type %T, got %T", resources, value) } // TODO(ericsnow) sort the rows first? - var out bytes.Buffer - // To format things into columns. - tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) + tw := output.TabWriter(writer) // Write the header. // We do not print a section label. - fmt.Fprintln(tw, "RESOURCE\tREVISION") + fmt.Fprintln(tw, "Resource\tRevision") // Print each info to its own row. for _, res := range resources { @@ -40,33 +39,35 @@ } tw.Flush() - return out.Bytes(), nil + return nil } // FormatSvcTabular returns a tabular summary of resources. -func FormatSvcTabular(value interface{}) ([]byte, error) { +func FormatSvcTabular(writer io.Writer, value interface{}) error { switch resources := value.(type) { case FormattedServiceInfo: - return formatServiceTabular(resources), nil + formatServiceTabular(writer, resources) + return nil case []FormattedUnitResource: - return formatUnitTabular(resources), nil + formatUnitTabular(writer, resources) + return nil case FormattedServiceDetails: - return formatServiceDetailTabular(resources), nil + formatServiceDetailTabular(writer, resources) + return nil case FormattedUnitDetails: - return formatUnitDetailTabular(resources), nil + formatUnitDetailTabular(writer, resources) + return nil default: - return nil, errors.Errorf("unexpected type for data: %T", resources) + return errors.Errorf("unexpected type for data: %T", resources) } } -func formatServiceTabular(info FormattedServiceInfo) []byte { +func formatServiceTabular(writer io.Writer, info FormattedServiceInfo) { // TODO(ericsnow) sort the rows first? - var out bytes.Buffer - - fmt.Fprintln(&out, "[Service]") - tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) - fmt.Fprintln(tw, "RESOURCE\tSUPPLIED BY\tREVISION") + fmt.Fprintln(writer, "[Service]") + tw := output.TabWriter(writer) + fmt.Fprintln(tw, "Resource\tSupplied by\tRevision") // Print each info to its own row. for _, r := range info.Resources { @@ -83,16 +84,14 @@ // with the below fmt.Fprintlns. tw.Flush() - writeUpdates(info.Updates, &out, tw) - - return out.Bytes() + writeUpdates(info.Updates, writer, tw) } -func writeUpdates(updates []FormattedCharmResource, out *bytes.Buffer, tw *tabwriter.Writer) { +func writeUpdates(updates []FormattedCharmResource, out io.Writer, tw *ansiterm.TabWriter) { if len(updates) > 0 { fmt.Fprintln(out, "") fmt.Fprintln(out, "[Updates Available]") - fmt.Fprintln(tw, "RESOURCE\tREVISION") + fmt.Fprintln(tw, "Resource\tRevision") for _, r := range updates { fmt.Fprintf(tw, "%v\t%v\n", r.Name, @@ -104,19 +103,17 @@ tw.Flush() } -func formatUnitTabular(resources []FormattedUnitResource) []byte { +func formatUnitTabular(writer io.Writer, resources []FormattedUnitResource) { // TODO(ericsnow) sort the rows first? - var out bytes.Buffer - - fmt.Fprintln(&out, "[Unit]") + fmt.Fprintln(writer, "[Unit]") // To format things into columns. - tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) + tw := output.TabWriter(writer) // Write the header. // We do not print a section label. - fmt.Fprintln(tw, "RESOURCE\tREVISION") + fmt.Fprintln(tw, "Resource\tRevision") // Print each info to its own row. for _, r := range resources { @@ -127,23 +124,20 @@ ) } tw.Flush() - - return out.Bytes() } -func formatServiceDetailTabular(resources FormattedServiceDetails) []byte { +func formatServiceDetailTabular(writer io.Writer, resources FormattedServiceDetails) { // note that the unit resource can be a zero value here, to indicate that // the unit has not downloaded that resource yet. - var out bytes.Buffer - fmt.Fprintln(&out, "[Units]") + fmt.Fprintln(writer, "[Units]") sort.Sort(byUnitID(resources.Resources)) // To format things into columns. - tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) + tw := output.TabWriter(writer) // Write the header. - fmt.Fprintln(tw, "UNIT\tRESOURCE\tREVISION\tEXPECTED") + fmt.Fprintln(tw, "Unit\tResource\tRevision\tExpected") for _, r := range resources.Resources { fmt.Fprintf(tw, "%v\t%v\t%v\t%v\n", @@ -155,24 +149,21 @@ } tw.Flush() - writeUpdates(resources.Updates, &out, tw) - - return out.Bytes() + writeUpdates(resources.Updates, writer, tw) } -func formatUnitDetailTabular(resources FormattedUnitDetails) []byte { +func formatUnitDetailTabular(writer io.Writer, resources FormattedUnitDetails) { // note that the unit resource can be a zero value here, to indicate that // the unit has not downloaded that resource yet. - var out bytes.Buffer - fmt.Fprintln(&out, "[Unit]") + fmt.Fprintln(writer, "[Unit]") sort.Sort(byUnitID(resources)) // To format things into columns. - tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) + tw := output.TabWriter(writer) // Write the header. - fmt.Fprintln(tw, "RESOURCE\tREVISION\tEXPECTED") + fmt.Fprintln(tw, "Resource\tRevision\tExpected") for _, r := range resources { fmt.Fprintf(tw, "%v\t%v\t%v\n", @@ -182,7 +173,6 @@ ) } tw.Flush() - return out.Bytes() } type byUnitID []FormattedDetailResource diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/output_tabular_test.go juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/output_tabular_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/output_tabular_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/output_tabular_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package cmd import ( + "bytes" "time" "github.com/juju/testing" @@ -20,16 +21,21 @@ testing.IsolationSuite } +func (s *CharmTabularSuite) formatTabular(c *gc.C, value interface{}) string { + out := &bytes.Buffer{} + err := FormatCharmTabular(out, value) + c.Assert(err, jc.ErrorIsNil) + return out.String() +} + func (s *CharmTabularSuite) TestFormatCharmTabularOkay(c *gc.C) { res := charmRes(c, "spam", ".tgz", "...", "") formatted := []FormattedCharmResource{FormatCharmResource(res)} - data, err := FormatCharmTabular(formatted) - c.Assert(err, jc.ErrorIsNil) - - c.Check(string(data), gc.Equals, ` -RESOURCE REVISION -spam 1 + data := s.formatTabular(c, formatted) + c.Check(data, gc.Equals, ` +Resource Revision +spam 1 `[1:]) } @@ -37,12 +43,10 @@ res := charmRes(c, "spam", "", "", "") formatted := []FormattedCharmResource{FormatCharmResource(res)} - data, err := FormatCharmTabular(formatted) - c.Assert(err, jc.ErrorIsNil) - - c.Check(string(data), gc.Equals, ` -RESOURCE REVISION -spam 1 + data := s.formatTabular(c, formatted) + c.Check(data, gc.Equals, ` +Resource Revision +spam 1 `[1:]) } @@ -51,12 +55,10 @@ res.Origin = charmresource.OriginUpload formatted := []FormattedCharmResource{FormatCharmResource(res)} - data, err := FormatCharmTabular(formatted) - c.Assert(err, jc.ErrorIsNil) - - c.Check(string(data), gc.Equals, ` -RESOURCE REVISION -spam 1 + data := s.formatTabular(c, formatted) + c.Check(data, gc.Equals, ` +Resource Revision +spam 1 `[1:]) } @@ -70,23 +72,20 @@ } formatted[1].Revision = 2 - data, err := FormatCharmTabular(formatted) - c.Assert(err, jc.ErrorIsNil) - - c.Check(string(data), gc.Equals, ` -RESOURCE REVISION -spam 1 -eggs 2 -somethingbig 1 -song 1 -avatar 1 + data := s.formatTabular(c, formatted) + c.Check(data, gc.Equals, ` +Resource Revision +spam 1 +eggs 2 +somethingbig 1 +song 1 +avatar 1 `[1:]) } func (s *CharmTabularSuite) TestFormatCharmTabularBadValue(c *gc.C) { bogus := "should have been something else" - _, err := FormatCharmTabular(bogus) - + err := FormatCharmTabular(nil, bogus) c.Check(err, gc.ErrorMatches, `expected value of type .*`) } @@ -96,6 +95,13 @@ testing.IsolationSuite } +func (s *SvcTabularSuite) formatTabular(c *gc.C, value interface{}) string { + out := &bytes.Buffer{} + err := FormatSvcTabular(out, value) + c.Assert(err, jc.ErrorIsNil) + return out.String() +} + func (s *SvcTabularSuite) TestFormatServiceOkay(c *gc.C) { res := resource.Resource{ @@ -114,13 +120,11 @@ Resources: []FormattedSvcResource{FormatSvcResource(res)}, } - data, err := FormatSvcTabular(formatted) - c.Assert(err, jc.ErrorIsNil) - - c.Check(string(data), gc.Equals, ` + data := s.formatTabular(c, formatted) + c.Check(data, gc.Equals, ` [Service] -RESOURCE SUPPLIED BY REVISION -openjdk charmstore 7 +Resource Supplied by Revision +openjdk charmstore 7 `[1:]) } @@ -142,13 +146,11 @@ FormattedUnitResource(FormatSvcResource(res)), } - data, err := FormatSvcTabular(formatted) - c.Assert(err, jc.ErrorIsNil) - - c.Check(string(data), gc.Equals, ` + data := s.formatTabular(c, formatted) + c.Check(data, gc.Equals, ` [Unit] -RESOURCE REVISION -openjdk 7 +Resource Revision +openjdk 7 `[1:]) } @@ -246,37 +248,29 @@ }) c.Assert(err, jc.ErrorIsNil) - data, err := FormatSvcTabular(formatted) - c.Assert(err, jc.ErrorIsNil) - + data := s.formatTabular(c, formatted) // Notes: sorted by name, then by revision, newest first. - c.Check(string(data), gc.Equals, ` + c.Check(data, gc.Equals, ` [Service] -RESOURCE SUPPLIED BY REVISION -openjdk charmstore 7 -website upload - -openjdk2 charmstore 8 -website2 Bill User 2012-12-12T12:12 +Resource Supplied by Revision +openjdk charmstore 7 +website upload - +openjdk2 charmstore 8 +website2 Bill User 2012-12-12T12:12 [Updates Available] -RESOURCE REVISION -openjdk 10 +Resource Revision +openjdk 10 `[1:]) } func (s *SvcTabularSuite) TestFormatSvcTabularBadValue(c *gc.C) { bogus := "should have been something else" - _, err := FormatSvcTabular(bogus) + err := FormatSvcTabular(nil, bogus) c.Check(err, gc.ErrorMatches, `unexpected type for data: string`) } -var _ = gc.Suite(&DetailsTabularSuite{}) - -type DetailsTabularSuite struct { - testing.IsolationSuite -} - -func (s *DetailsTabularSuite) TestFormatServiceDetailsOkay(c *gc.C) { +func (s *SvcTabularSuite) TestFormatServiceDetailsOkay(c *gc.C) { res := charmRes(c, "spam", ".tgz", "...", "") updates := []FormattedCharmResource{FormatCharmResource(res)} @@ -302,22 +296,20 @@ Updates: updates, } - output, err := FormatSvcTabular(data) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(string(output), gc.Equals, ` + output := s.formatTabular(c, data) + c.Assert(output, gc.Equals, ` [Units] -UNIT RESOURCE REVISION EXPECTED -5 config combRev2 combRev3 -10 data combRev1 combRev1 (fetching: 17%) +Unit Resource Revision Expected +5 config combRev2 combRev3 +10 data combRev1 combRev1 (fetching: 17%) [Updates Available] -RESOURCE REVISION -spam 1 +Resource Revision +spam 1 `[1:]) } -func (s *DetailsTabularSuite) TestFormatUnitDetailsOkay(c *gc.C) { +func (s *SvcTabularSuite) TestFormatUnitDetailsOkay(c *gc.C) { data := FormattedUnitDetails{ { UnitID: "svc/10", @@ -337,14 +329,12 @@ }, } - output, err := FormatSvcTabular(data) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(string(output), gc.Equals, ` + output := s.formatTabular(c, data) + c.Assert(output, gc.Equals, ` [Unit] -RESOURCE REVISION EXPECTED -config combRev2 combRev3 (fetching: 91%) -data combRev1 combRev1 +Resource Revision Expected +config combRev2 combRev3 (fetching: 91%) +data combRev1 combRev1 `[1:]) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/show_service.go juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/show_service.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/show_service.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/show_service.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,8 +6,8 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/resource" @@ -62,11 +62,12 @@ // SetFlags implements cmd.Command.SetFlags. func (c *ShowServiceCommand) SetFlags(f *gnuflag.FlagSet) { - const defaultFlag = "tabular" - c.out.AddFlags(f, defaultFlag, map[string]cmd.Formatter{ - defaultFlag: FormatSvcTabular, - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, + c.ModelCommandBase.SetFlags(f) + const defaultFormat = "tabular" + c.out.AddFlags(f, defaultFormat, map[string]cmd.Formatter{ + defaultFormat: FormatSvcTabular, + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, }) f.BoolVar(&c.details, "details", false, "show detailed information about resources used by each unit.") @@ -113,18 +114,25 @@ return errors.Errorf("bad data returned from server") } v := vals[0] + if unit == "" { return c.formatServiceResources(ctx, v) } return c.formatUnitResources(ctx, unit, service, v) } +const noResources = "No resources to display." + func (c *ShowServiceCommand) formatServiceResources(ctx *cmd.Context, sr resource.ServiceResources) error { if c.details { formatted, err := FormatServiceDetails(sr) if err != nil { return errors.Trace(err) } + if len(formatted.Resources) == 0 && len(formatted.Updates) == 0 { + ctx.Infof(noResources) + return nil + } return c.out.Write(ctx, formatted) } @@ -133,10 +141,19 @@ if err != nil { return errors.Trace(err) } + if len(formatted.Resources) == 0 && len(formatted.Updates) == 0 { + ctx.Infof(noResources) + return nil + } return c.out.Write(ctx, formatted) } func (c *ShowServiceCommand) formatUnitResources(ctx *cmd.Context, unit, service string, sr resource.ServiceResources) error { + if len(sr.UnitResources) == 0 { + ctx.Infof(noResources) + return nil + } + if c.details { formatted, err := detailedResources(unit, sr) if err != nil { @@ -149,6 +166,7 @@ if err != nil { return errors.Trace(err) } + res := make([]FormattedUnitResource, len(resources)) for i, r := range resources { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/show_service_test.go juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/show_service_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/show_service_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/show_service_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -175,15 +175,15 @@ c.Check(stdout, gc.Equals, ` [Service] -RESOURCE SUPPLIED BY REVISION -openjdk charmstore 7 -website upload - -rsc1234 charmstore 15 -website2 Bill User 2012-12-12T12:12 +Resource Supplied by Revision +openjdk charmstore 7 +website upload - +rsc1234 charmstore 15 +website2 Bill User 2012-12-12T12:12 [Updates Available] -RESOURCE REVISION -openjdk 10 +Resource Revision +openjdk 10 `[1:]) @@ -238,9 +238,9 @@ c.Check(stdout, gc.Equals, ` [Unit] -RESOURCE REVISION -rsc1234 15 -website2 2012-12-12T12:12 +Resource Revision +rsc1234 15 +website2 2012-12-12T12:12 `[1:]) @@ -402,13 +402,13 @@ c.Check(stdout, gc.Equals, ` [Units] -UNIT RESOURCE REVISION EXPECTED -5 alpha 10 15 -5 beta 2012-12-12T12:12 2012-12-12T12:12 -5 charlie 2011-11-11T11:11 2012-12-12T12:12 (fetching: 2%) -10 alpha 10 15 (fetching: 15%) -10 beta - 2012-12-12T12:12 -10 charlie 2011-11-11T11:11 2012-12-12T12:12 (fetching: 9%) +Unit Resource Revision Expected +5 alpha 10 15 +5 beta 2012-12-12T12:12 2012-12-12T12:12 +5 charlie 2011-11-11T11:11 2012-12-12T12:12 (fetching: 2%) +10 alpha 10 15 (fetching: 15%) +10 beta - 2012-12-12T12:12 +10 charlie 2011-11-11T11:11 2012-12-12T12:12 (fetching: 9%) `[1:]) @@ -542,10 +542,10 @@ c.Check(stdout, gc.Equals, ` [Unit] -RESOURCE REVISION EXPECTED -alpha 10 15 -beta - 2012-12-12T12:12 -charlie 2011-11-11T11:11 2012-12-12T12:12 (fetching: 0%) +Resource Revision Expected +alpha 10 15 +beta - 2012-12-12T12:12 +charlie 2011-11-11T11:11 2012-12-12T12:12 (fetching: 0%) `[1:]) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/stub_test.go juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/stub_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/cmd/stub_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/cmd/stub_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( "io" - "github.com/juju/cmd" "github.com/juju/errors" "github.com/juju/juju/charmstore" "github.com/juju/testing" @@ -19,15 +18,6 @@ ReturnListResources [][]charmresource.Resource } -func (s *stubCharmStore) Connect(ctx *cmd.Context) (CharmResourceLister, error) { - s.stub.AddCall("Connect", ctx) - if err := s.stub.NextErr(); err != nil { - return nil, errors.Trace(err) - } - - return s, nil -} - func (s *stubCharmStore) ListResources(charms []charmstore.CharmID) ([][]charmresource.Resource, error) { s.stub.AddCall("ListResources", charms) if err := s.stub.NextErr(); err != nil { @@ -37,15 +27,6 @@ return s.ReturnListResources, nil } -func (s *stubCharmStore) Close() error { - s.stub.AddCall("Close") - if err := s.stub.NextErr(); err != nil { - return errors.Trace(err) - } - - return nil -} - type stubAPIClient struct { stub *testing.Stub } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/context/cmd/get.go juju-core-2.0.0/src/github.com/juju/juju/resource/context/cmd/get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/context/cmd/get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/context/cmd/get.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,15 +8,16 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/juju/worker/uniter/runner/jujuc" ) // GetCmdName is the name of the resource-get command. const GetCmdName = "resource-get" // NewGetCmd creates a new GetCmd for the given hook context. -func NewGetCmd(c HookContext) (*GetCmd, error) { +func NewGetCmd(c jujuc.ContextComponent) (*GetCmd, error) { return &GetCmd{ - hookContext: c, + compContext: c, }, nil } @@ -24,7 +25,7 @@ type GetCmd struct { cmd.CommandBase - hookContext HookContext + compContext jujuc.ContextComponent resourceName string } @@ -88,7 +89,11 @@ // Run implements cmd.Command. func (c GetCmd) Run(ctx *cmd.Context) error { - filePath, err := c.hookContext.Download(c.resourceName) + hookContext, ok := c.compContext.(downloader) + if !ok { + return errors.Errorf("invalid component context") + } + filePath, err := hookContext.Download(c.resourceName) if err != nil { return errors.Annotate(err, "could not download resource") } @@ -98,3 +103,7 @@ } return nil } + +type downloader interface { + Download(name string) (string, error) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/context/cmd/get_test.go juju-core-2.0.0/src/github.com/juju/juju/resource/context/cmd/get_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/context/cmd/get_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/context/cmd/get_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -63,7 +63,7 @@ func (s *GetCmdSuite) TestRunOkay(c *gc.C) { getCmd := GetCmd{ - hookContext: s.hctx, + compContext: s.hctx, resourceName: "spam", } const expected = "/var/lib/juju/agents/unit-foo-1/resources/spam/a-file.tgz" @@ -81,7 +81,7 @@ func (s *GetCmdSuite) TestRunDownloadFailure(c *gc.C) { getCmd := GetCmd{ - hookContext: s.hctx, + compContext: s.hctx, resourceName: "spam", } failure := errors.New("") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/context/cmd/stub_test.go juju-core-2.0.0/src/github.com/juju/juju/resource/context/cmd/stub_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/context/cmd/stub_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/context/cmd/stub_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,3 +22,5 @@ return s.ReturnDownload, nil } + +func (s *stubHookContext) Flush() error { return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/resourceadapters/apiclient.go juju-core-2.0.0/src/github.com/juju/juju/resource/resourceadapters/apiclient.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/resourceadapters/apiclient.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/resourceadapters/apiclient.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( "github.com/juju/errors" - "github.com/juju/juju/api" "github.com/juju/juju/api/base" "github.com/juju/juju/resource" "github.com/juju/juju/resource/api/client" @@ -16,16 +15,7 @@ // NewAPIClient is mostly a copy of the newClient code in // component/all/resources.go. It lives here because it simplifies this code // immensely. -func NewAPIClient(newAPICaller func() (api.Connection, error)) (*client.Client, error) { - apiCaller, err := newAPICaller() - if err != nil { - return nil, errors.Trace(err) - } - - return newAPIClient(apiCaller) -} - -func newAPIClient(apiCaller api.Connection) (*client.Client, error) { +func NewAPIClient(apiCaller base.APICallCloser) (*client.Client, error) { caller := base.NewFacadeCallerForVersion(apiCaller, resource.FacadeName, server.Version) httpClient, err := apiCaller.HTTPClient() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/resourceadapters/cmd.go juju-core-2.0.0/src/github.com/juju/juju/resource/resourceadapters/cmd.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/resourceadapters/cmd.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/resourceadapters/cmd.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package resourceadapters - -import ( - "io" - - jujucmd "github.com/juju/cmd" - "github.com/juju/errors" - - "github.com/juju/juju/charmstore" - "github.com/juju/juju/cmd/juju/charmcmd" - "github.com/juju/juju/resource/cmd" -) - -// CharmCmdBase is an adapter for charmcmd.CommandBase. -type CharmCmdBase struct { - *charmcmd.CommandBase -} - -// Connect implements cmd.CommandBase. -func (c *CharmCmdBase) Connect(ctx *jujucmd.Context) (cmd.CharmResourceLister, error) { - client, closer, err := c.CommandBase.Connect(ctx) - if err != nil { - return nil, errors.Trace(err) - } - return struct { - charmstore.Client - io.Closer - }{client, closer}, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/resource/resourceadapters/deploy.go juju-core-2.0.0/src/github.com/juju/juju/resource/resourceadapters/deploy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/resource/resourceadapters/deploy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/resource/resourceadapters/deploy.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,17 +10,40 @@ charmresource "gopkg.in/juju/charm.v6-unstable/resource" "gopkg.in/macaroon.v1" - "github.com/juju/juju/api" + "github.com/juju/juju/api/base" "github.com/juju/juju/charmstore" "github.com/juju/juju/resource/api/client" "github.com/juju/juju/resource/cmd" ) +// DeployResourcesFunc is the function type of DeployResources. +type DeployResourcesFunc func( + applicationID string, + chID charmstore.CharmID, + csMac *macaroon.Macaroon, + filesAndRevisions map[string]string, + resources map[string]charmresource.Meta, + conn base.APICallCloser, +) (ids map[string]string, err error) + // DeployResources uploads the bytes for the given files to the server and // creates pending resource metadata for the all resource mentioned in the // metadata. It returns a map of resource name to pending resource IDs. -func DeployResources(applicationID string, chID charmstore.CharmID, csMac *macaroon.Macaroon, filesAndRevisions map[string]string, resources map[string]charmresource.Meta, conn api.Connection) (ids map[string]string, err error) { - client, err := newAPIClient(conn) +func DeployResources( + applicationID string, + chID charmstore.CharmID, + csMac *macaroon.Macaroon, + filesAndRevisions map[string]string, + resources map[string]charmresource.Meta, + conn base.APICallCloser, +) (ids map[string]string, err error) { + + if len(filesAndRevisions)+len(resources) == 0 { + // Nothing to upload. + return nil, nil + } + + client, err := NewAPIClient(conn) if err != nil { return nil, errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/rpc/rpcreflect/value.go juju-core-2.0.0/src/github.com/juju/juju/rpc/rpcreflect/value.go --- juju-core-2.0~beta15/src/github.com/juju/juju/rpc/rpcreflect/value.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/rpc/rpcreflect/value.go 2016-10-13 14:31:49.000000000 +0000 @@ -107,6 +107,20 @@ return caller, nil } +// killer is the same interface as rpc.Killer, but redeclared +// here to avoid cyclic dependency. +type killer interface { + Kill() +} + +// Kill implements rpc.Killer.Kill by calling Kill on the root +// value if it implements Killer. +func (v Value) Kill() { + if killer, ok := v.rootValue.Interface().(killer); ok { + killer.Kill() + } +} + func (caller methodCaller) Call(objId string, arg reflect.Value) (reflect.Value, error) { obj, err := caller.rootMethod.Call(caller.rootValue, objId) if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/rpc/rpc_test.go juju-core-2.0.0/src/github.com/juju/juju/rpc/rpc_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/rpc/rpc_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/rpc/rpc_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -276,7 +276,7 @@ InterfaceMethods } -type CustomMethodFinder struct { +type CustomRoot struct { root *Root } @@ -310,7 +310,10 @@ return c.objMethod.Call(obj, arg) } -func (cc *CustomMethodFinder) FindMethod( +func (cc *CustomRoot) Kill() { +} + +func (cc *CustomRoot) FindMethod( rootMethodName string, version int, objMethodName string, ) ( rpcreflect.MethodCaller, error, @@ -563,8 +566,8 @@ }) } -func (*rpcSuite) TestCustomMethodFinderV0(c *gc.C) { - root := &CustomMethodFinder{SimpleRoot()} +func (*rpcSuite) TestCustomRootV0(c *gc.C) { + root := &CustomRoot{SimpleRoot()} client, srvDone, serverNotifier := newRPCClientServer(c, root, nil, false) defer closeClient(c, client, srvDone) // V0 of MultiVersion implements only VariableMethods1.Call0r1. @@ -589,8 +592,8 @@ }) } -func (*rpcSuite) TestCustomMethodFinderV1(c *gc.C) { - root := &CustomMethodFinder{SimpleRoot()} +func (*rpcSuite) TestCustomRootV1(c *gc.C) { + root := &CustomRoot{SimpleRoot()} client, srvDone, serverNotifier := newRPCClientServer(c, root, nil, false) defer closeClient(c, client, srvDone) // V1 of MultiVersion implements only VariableMethods2.Call1r1. @@ -615,8 +618,8 @@ }) } -func (*rpcSuite) TestCustomMethodFinderV2(c *gc.C) { - root := &CustomMethodFinder{SimpleRoot()} +func (*rpcSuite) TestCustomRootV2(c *gc.C) { + root := &CustomRoot{SimpleRoot()} client, srvDone, serverNotifier := newRPCClientServer(c, root, nil, false) defer closeClient(c, client, srvDone) p := testCallParams{ @@ -642,8 +645,8 @@ }) } -func (*rpcSuite) TestCustomMethodFinderUnknownVersion(c *gc.C) { - root := &CustomMethodFinder{SimpleRoot()} +func (*rpcSuite) TestCustomRootUnknownVersion(c *gc.C) { + root := &CustomRoot{SimpleRoot()} client, srvDone, _ := newRPCClientServer(c, root, nil, false) defer closeClient(c, client, srvDone) var r stringVal @@ -979,30 +982,6 @@ c.Assert(err, jc.ErrorIsNil) } -type KillerCleanerRoot struct { - events []string - Root -} - -func (r *KillerCleanerRoot) Kill() { - r.events = append(r.events, "kill") -} - -func (r *KillerCleanerRoot) Cleanup() { - r.events = append(r.events, "cleanup") -} - -func (*rpcSuite) TestRootIsKilledAndCleaned(c *gc.C) { - root := &KillerCleanerRoot{} - client, srvDone, _ := newRPCClientServer(c, root, nil, false) - err := client.Close() - c.Assert(err, jc.ErrorIsNil) - err = chanReadError(c, srvDone, "server done") - c.Assert(err, jc.ErrorIsNil) - // Kill should happen first. - c.Assert(root.events, jc.DeepEquals, []string{"kill", "cleanup"}) -} - func (*rpcSuite) TestBidirectional(c *gc.C) { srvRoot := &Root{} client, srvDone, _ := newRPCClientServer(c, srvRoot, nil, true) @@ -1086,7 +1065,7 @@ } } -func (*rpcSuite) TestCodeNotImplementedMatchesApiserverParams(c *gc.C) { +func (*rpcSuite) TestCodeNotImplementedMatchesAPIserverParams(c *gc.C) { c.Assert(rpc.CodeNotImplemented, gc.Equals, params.CodeNotImplemented) } @@ -1127,8 +1106,8 @@ role = roleBoth } rpcConn := rpc.NewConn(NewJSONCodec(conn, role), serverNotifier) - if custroot, ok := root.(*CustomMethodFinder); ok { - rpcConn.ServeFinder(custroot, tfErr) + if custroot, ok := root.(*CustomRoot); ok { + rpcConn.ServeRoot(custroot, tfErr) custroot.root.conn = rpcConn } else { rpcConn.Serve(root, tfErr) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/rpc/server.go juju-core-2.0.0/src/github.com/juju/juju/rpc/server.go --- juju-core-2.0~beta15/src/github.com/juju/juju/rpc/server.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/rpc/server.go 2016-10-13 14:31:49.000000000 +0000 @@ -118,16 +118,9 @@ // mutex guards the following values. mutex sync.Mutex - // methodFinder is used to lookup methods to serve RPC requests. May be - // nil if nothing is being served. - methodFinder MethodFinder - - // root is the current object that we are serving. - // If it implements the Killer interface, Kill will be called on it - // as the connection shuts down. - // If it implements the Cleaner interface, Cleanup will be called on - // it after the connection has shut down. - root interface{} + // root represents the current root object that serves the RPC requests. + // It may be nil if nothing is being served. + root Root // transformErrors is used to transform returned errors. transformErrors func(error) error @@ -223,33 +216,31 @@ func (conn *Conn) Serve(root interface{}, transformErrors func(error) error) { rootValue := rpcreflect.ValueOf(reflect.ValueOf(root)) if rootValue.IsValid() { - conn.serve(rootValue, root, transformErrors) + conn.serve(rootValue, transformErrors) } else { - conn.serve(nil, nil, transformErrors) + conn.serve(nil, transformErrors) } } -// ServeFinder serves RPC requests on the connection by invoking methods retrieved -// from root. Note that it does not start the connection running, though -// it may be called once the connection is already started. +// ServeRoot is like Serve except that it gives the root object dynamic +// control over what methods are available instead of using reflection +// on the type. // // The server executes each client request by calling FindMethod to obtain a // method to invoke. It invokes that method with the request parameters, // possibly returning some result. // -// root can optionally implement the Killer method. If implemented, when the -// connection is closed, root.Kill() will be called. -func (conn *Conn) ServeFinder(finder MethodFinder, transformErrors func(error) error) { - conn.serve(finder, finder, transformErrors) +// The Kill method will be called when the connection is closed. +func (conn *Conn) ServeRoot(root Root, transformErrors func(error) error) { + conn.serve(root, transformErrors) } -func (conn *Conn) serve(methodFinder MethodFinder, root interface{}, transformErrors func(error) error) { +func (conn *Conn) serve(root Root, transformErrors func(error) error) { if transformErrors == nil { transformErrors = noopTransform } conn.mutex.Lock() defer conn.mutex.Unlock() - conn.methodFinder = methodFinder conn.root = root conn.transformErrors = transformErrors } @@ -287,7 +278,9 @@ return nil } conn.closing = true - conn.killRequests() + if conn.root != nil { + conn.root.Kill() + } conn.mutex.Unlock() // Wait for any outstanding server requests to complete @@ -296,31 +289,13 @@ // Closing the codec should cause the input loop to terminate. if err := conn.codec.Close(); err != nil { - logger.Infof("error closing codec: %v", err) + logger.Debugf("error closing codec: %v", err) } <-conn.dead - conn.mutex.Lock() - conn.cleanRoot() - conn.mutex.Unlock() - return conn.inputLoopError } -// Kill server requests if appropriate. Client requests will be -// terminated when the input loop finishes. -func (conn *Conn) killRequests() { - if killer, ok := conn.root.(Killer); ok { - killer.Kill() - } -} - -func (conn *Conn) cleanRoot() { - if cleaner, ok := conn.root.(Cleaner); ok { - cleaner.Cleanup() - } -} - // ErrorCoder represents an any error that has an associated // error code. An error code is a short string that represents the // kind of an error. @@ -328,10 +303,11 @@ ErrorCode() string } -// MethodFinder represents a type that can be used to lookup a Method and place +// Root represents a type that can be used to lookup a Method and place // calls on that method. -type MethodFinder interface { +type Root interface { FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) + Killer } // Killer represents a type that can be asked to abort any outstanding @@ -340,12 +316,6 @@ Kill() } -// Cleaner represents a type that can be asked to clean up after -// itself once the connection has closed. -type Cleaner interface { - Cleanup() -} - // input reads messages from the connection and handles them // appropriately. func (conn *Conn) input() { @@ -488,14 +458,14 @@ // a boundRequest that can call those methods. func (conn *Conn) bindRequest(hdr *Header) (boundRequest, error) { conn.mutex.Lock() - methodFinder := conn.methodFinder + root := conn.root transformErrors := conn.transformErrors conn.mutex.Unlock() - if methodFinder == nil { + if root == nil { return boundRequest{}, errors.New("no service") } - caller, err := methodFinder.FindMethod( + caller, err := root.FindMethod( hdr.Request.Type, hdr.Request.Version, hdr.Request.Action) if err != nil { if _, ok := err.(*rpcreflect.CallNotImplementedError); ok { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/scripts/setup-lxd.sh juju-core-2.0.0/src/github.com/juju/juju/scripts/setup-lxd.sh --- juju-core-2.0~beta15/src/github.com/juju/juju/scripts/setup-lxd.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/scripts/setup-lxd.sh 2016-10-13 14:31:49.000000000 +0000 @@ -1,12 +1,27 @@ -#!/bin/sh +#!/bin/bash # Copyright 2016 Canonical Ltd. # Licensed under the AGPLv3, see LICENCE file for details. -set -ex +set -eux # Do the manual steps a user has to run on a fresh system to get an lxd # bridge so the juju lxd provider can function. Taken from changes made # to cloud-init to do approximately this. +VERSION=$(lxd --version) + +# LXD 2.3+ needs lxdbr0 setup via lxc. +if [[ "$VERSION" > "2.2" ]]; then + if [[ ! $(lxc network list | grep lxdbr0) ]]; then + # Configure a known address ranges for lxdbr0. + lxc network create lxdbr0 \ + ipv4.address=10.0.8.1/24 ipv4.nat=true \ + ipv6.address=none ipv6.nat=false + fi + lxc network show lxdbr0 + exit 0 +fi + +# LXD 2.2 and earlier use debconf to create and configure the network. debconf-communicate << EOF set lxd/setup-bridge true set lxd/bridge-domain lxd diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/scripts/verify.bash juju-core-2.0.0/src/github.com/juju/juju/scripts/verify.bash --- juju-core-2.0~beta15/src/github.com/juju/juju/scripts/verify.bash 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/scripts/verify.bash 2016-10-13 14:31:49.000000000 +0000 @@ -22,19 +22,47 @@ echo "checking: go vet ..." + +# Define additional Printf style functions to check. These add to the +# default list of standard library functions that go vet already has. +logging_prints="\ +Tracef +Debugf +Infof +Warningf +Errorf +Criticalf +" + +error_prints="\ +AlreadyExistsf +BadRequestf +MethodNotAllowedf +NotAssignedf +NotFoundf +NotImplementedf +NotProvisionedf +NotSupportedf +NotValidf +Unauthorizedf +UserNotFoundf +" + +# Under Go 1.6, the vet docs say that -printfuncs takes each print +# function in "name:N" format. This has changed in Go 1.7 and doesn't +# actually seem to make a difference under 1.6 either don't bother. +all_prints=`echo $logging_prints $error_prints | tr " " ,` + go tool vet \ - -methods \ - -printf \ - -rangeloops \ - -printfuncs 'ErrorContextf:1,notFoundf:0,badReqErrorf:0,Commitf:0,Snapshotf:0,Debugf:0,Infof:0,Warningf:0,Errorf:0,Criticalf:0,Tracef:0' \ + -all \ + -composites=false \ + -copylocks=false \ + -printfuncs=$all_prints \ . || [ -n "$IGNORE_VET_WARNINGS" ] echo "checking: go build ..." -# check this branch builds cleanly go build github.com/juju/juju/... echo "checking: tests are wired up ..." -# check that all tests are wired up ./scripts/checktesting.bash - diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/scripts/win-installer/README.txt juju-core-2.0.0/src/github.com/juju/juju/scripts/win-installer/README.txt --- juju-core-2.0~beta15/src/github.com/juju/juju/scripts/win-installer/README.txt 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/scripts/win-installer/README.txt 2016-10-13 14:31:49.000000000 +0000 @@ -1,133 +1,20 @@ -Introduction to Juju +Congratulations on installing the Juju client software! -This tutorial will show you how to get started with Juju, including installing, configuring and bootstrapping a new Juju model. Before you start you will need: +Note that any configuration files created by Juju will be stored in the +'%APPDATA%\Juju' directory. You may wish to include this in regular backups. -* An Ubuntu, Windows or OSX machine to install the client on. +In order to deploy applications to a cloud you will also need: -* A model which can provide a new server with an Ubuntu cloud operating system image on-demand. This includes services such as Microsoft Azure, Amazon EC2, HP Cloud, or an OpenStack installation. +* Credentials for a cloud provider which can instantiate a suitable operating +system image on-demand. This includes services such as Microsoft Azure, +Amazon EC2, Google Compute Engine, or an OpenStack installation. -* An SSH key-pair. Juju expects to find ssh keys called id_rsa and id_rsa.pub in a .ssh folder in your home directory. +* An SSH key-pair. Juju expects to find ssh keys called id_rsa and id_rsa.pub +in a directory called '.ssh' in your home directory. There are instructions on +how to generate these keys at: -Configuring + https://jujucharms.com/docs/stable/getting-started-keygen-win -Now the Juju software is installed, it needs to be configured to use your particular cloud provider. This is done by generating and editing a file, "environments.yaml", which will live in your %LOCALAPPDATA%\Juju directory. You can generate the models file manually, but Juju also includes a boilerplate configuration option that will flesh out most of the file for you and minimise the amount of work (and potential errors). +To continue, please follow the online documentation at: -To generate an initial config file, you simply need to run: - -> juju generate-config - -This causes the file to be written to your %LOCALAPPDATA%\Juju directory if an environments.yaml file does not already exist. It will also create the %LOCALAPPDATA%\Juju directory if that does not exist. - -This file will contain sample profiles for different types of cloud services, but you will need to edit the files to provide specific information for your cloud provider. Sections are created for Amazon (AWS) services, HPCloud and a generic OpenStack instance. For more specifics on what needs to be changed, see https://juju.ubuntu.com/docs/getting-started.html - -Testing your setup - -Once you have installed and configured Juju, it is probably a good idea to take it for a bit of a test drive and check that everything is working as expected. Because Juju makes it really easy to deploy services, this is actually quick and straightforward. - - -The first thing to do is set up a bootstrap model. This is an instance in the cloud that Juju will use to deploy and control other services with. It will be created according to the configuration you have provided, and your SSH key will automatically be uploaded so that Juju can communicate securely with the bootstrap instance. - -> juju bootstrap - -Note: If you have multiple models configured, you can choose which one to address with a particular command by adding the -e switch followed by the model name, E.g. "-e hpcloud". - -You may have to wait a few moments for this command to return, as it needs to perform various tasks and contact your cloud provider. - -Assuming it returns successfully (otherwise see common error messages and what to do about them - https://juju.ubuntu.com/docs/getting-started.html#errors), we can now deploy some services and explore the basic operations of Juju. - -To start with, we will deploy Wordpress, by running this command: - -> juju deploy wordpress - -Now juju will fetch the Wordpress charm and use it, through the bootstrap instance to request and deploy whatever resources it needs to set up this service. - -Wordpress needs a database though, so we will also deploy one of those: - -> juju deploy mysql - -Once again, juju will do whatever is necessary to deploy this service for you, and it may take some time for the command to return. - -Note: If you want to get more information on what is actually happening, or to help resolve problems, you can add the -v switch to the juju command to get verbose output. - -Although we have deployed Wordpress and a MySQL database, they are not linked together in any way yet. To do this we should run: - -> juju add-relation wordpress mysql - -This command uses information provided by the relevant charms to associate these services with each other in whatever way makes sense. There is much more to be said about linking services together which is covered in the juju command documentation, but for the moment, we just need to know that it will link these services together. Juju command documentation: https://juju.ubuntu.com/docs/getting-started.html#add-relation - -In order to make our Wordpress public, we now need to expose this service: - -> juju expose wordpress - -This service will now be configured to respond to web requests, so visitors can see it. But where exactly is it? If we run the juju status command, we will be able to see what services are running, and where they are located. - -> juju status - -The output from this command should look something like this: - -> juju status -machines: - "0": - agent-state: started - agent-version: 1.10.0 - dns-name: ec2-50-16-167-135.compute-1.amazonaws.com - instance-id: i-781bf614 - series: precise - "1": - agent-state: started - agent-version: 1.10.0 - dns-name: ec2-23-22-225-54.compute-1.amazonaws.com - instance-id: i-9e8927f6 - series: precise - "2": - agent-state: started - agent-version: 1.10.0 - dns-name: ec2-54-224-220-210.compute-1.amazonaws.com - instance-id: i-5c440436 - series: precise -services: - mysql: - charm: cs:precise/mysql-18 - exposed: false - relations: - db: - - wordpress - units: - mysql/0: - agent-state: started - agent-version: 1.10.0 - machine: "1" - public-address: ec2-23-22-225-54.compute-1.amazonaws.com - wordpress: - charm: cs:precise/wordpress-12 - exposed: true - relations: - db: - - mysql - loadbalancer: - - wordpress - units: - wordpress/0: - agent-state: started - agent-version: 1.10.0 - machine: "2" - public-address: ec2-54-224-220-210.compute-1.amazonaws.com - - -There is quite a lot of information here. the first section, titled machines:, details all the instances which are currently running. For each you will see the version of Juju they are running, their hostname, instance id and the series or version of Ubuntu they are running. - -After that, the sections list the services which are currently deployed. The information here differs slightly according to the service and how it is configured. It will however, always list the charm that was used to deploy the service, whether it is exposed or not, its address and whatever relationships exist. - -From this status readout, we can see that wordpress is exposed and ready. If we simply copy the address into a web browser, we should be able to see it running. - -Congratulations, you have just deployed a service with Juju! - -Now you are ready to deploy whatever service you really want from the 100s available at the Juju Charm Store: http://jujucharms.com/ - -To remove all current deployments and clear up everything in your cloud, you can run the command: - -> juju destroy-model - -This will remove everything, including the bootstrap node. - -To learn more about charms, including configuring options and managing running systems, you should continue to read the charm documentation here: https://juju.ubuntu.com/docs/charms.html + https://jujucharms.com/docs/stable/getting-started-other diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/scripts/win-installer/setup.iss juju-core-2.0.0/src/github.com/juju/juju/scripts/win-installer/setup.iss --- juju-core-2.0~beta15/src/github.com/juju/juju/scripts/win-installer/setup.iss 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/scripts/win-installer/setup.iss 2016-10-13 14:31:49.000000000 +0000 @@ -2,9 +2,9 @@ ; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES! #define MyAppName "Juju" -#define MyAppVersion "2.0-beta15" +#define MyAppVersion "2.0.0" #define MyAppPublisher "Canonical, Ltd" -#define MyAppURL "http://juju.ubuntu.com/" +#define MyAppURL "http://jujucharms.com/" #define MyAppExeName "juju.exe" [Setup] diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/snapcraft.yaml juju-core-2.0.0/src/github.com/juju/juju/snapcraft.yaml --- juju-core-2.0~beta15/src/github.com/juju/juju/snapcraft.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/snapcraft.yaml 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,24 @@ +name: juju +version: 2.0.0 +summary: juju client +description: Through the use of charms, juju provides you with shareable, re-usable, and repeatable expressions of devops best practices. +confinement: devmode + +apps: + juju: + command: bin/juju + plugs: [network, network-bind, home] + +parts: + juju: + plugin: godeps + go-importpath: github.com/juju/juju + source: https://github.com/juju/juju.git + source-type: git + #If you want to grab a specific revision tag, include it here + #source-tag: juju-2.0.0 + snap: + - -bin/filetoconst + - -bin/winuserdata + #If you are releasing a build with public streams, also remove the agent + #- -bin/jujud diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/action.go juju-core-2.0.0/src/github.com/juju/juju/state/action.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/action.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/action.go 2016-10-13 14:31:49.000000000 +0000 @@ -192,7 +192,7 @@ Assert: bson.D{{"status", ActionPending}}, Update: bson.D{{"$set", bson.D{ {"status", ActionRunning}, - {"started", nowToTheSecond()}, + {"started", a.st.NowToTheSecond()}, }}}, }}) if err != nil { @@ -225,7 +225,7 @@ {"status", finalStatus}, {"message", message}, {"results", results}, - {"completed", nowToTheSecond()}, + {"completed", a.st.NowToTheSecond()}, }}}, }, { C: actionNotificationsC, @@ -261,7 +261,7 @@ Receiver: receiverTag.Id(), Name: actionName, Parameters: parameters, - Enqueued: nowToTheSecond(), + Enqueued: st.NowToTheSecond(), Status: ActionPending, }, actionNotificationDoc{ DocId: st.docID(prefix + actionId.String()), @@ -291,6 +291,24 @@ return newAction(st, doc), nil } +// AllActions returns all Actions. +func (st *State) AllActions() ([]Action, error) { + actionLogger.Tracef("AllActions()") + actions, closer := st.getCollection(actionsC) + defer closer() + + results := []Action{} + docs := []actionDoc{} + err := actions.Find(nil).All(&docs) + if err != nil { + return nil, errors.Annotatef(err, "cannot get all actions") + } + for _, doc := range docs { + results = append(results, newAction(st, doc)) + } + return results, nil +} + // ActionByTag returns an Action given an ActionTag. func (st *State) ActionByTag(tag names.ActionTag) (Action, error) { return st.Action(tag.Id()) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/action_test.go juju-core-2.0.0/src/github.com/juju/juju/state/action_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/action_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/action_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -47,23 +47,23 @@ s.actionlessService = s.AddTestingService(c, "actionless", s.actionlessCharm) c.Assert(err, jc.ErrorIsNil) - sUrl, _ := s.service.CharmURL() - c.Assert(sUrl, gc.NotNil) - actionlessSUrl, _ := s.actionlessService.CharmURL() - c.Assert(actionlessSUrl, gc.NotNil) + sURL, _ := s.service.CharmURL() + c.Assert(sURL, gc.NotNil) + actionlessSURL, _ := s.actionlessService.CharmURL() + c.Assert(actionlessSURL, gc.NotNil) s.unit, err = s.service.AddUnit() c.Assert(err, jc.ErrorIsNil) c.Assert(s.unit.Series(), gc.Equals, "quantal") - err = s.unit.SetCharmURL(sUrl) + err = s.unit.SetCharmURL(sURL) c.Assert(err, jc.ErrorIsNil) s.unit2, err = s.service.AddUnit() c.Assert(err, jc.ErrorIsNil) c.Assert(s.unit2.Series(), gc.Equals, "quantal") - err = s.unit2.SetCharmURL(sUrl) + err = s.unit2.SetCharmURL(sURL) c.Assert(err, jc.ErrorIsNil) s.charmlessUnit, err = s.service.AddUnit() @@ -74,7 +74,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(s.actionlessUnit.Series(), gc.Equals, "quantal") - err = s.actionlessUnit.SetCharmURL(actionlessSUrl) + err = s.actionlessUnit.SetCharmURL(actionlessSURL) c.Assert(err, jc.ErrorIsNil) } @@ -131,7 +131,7 @@ expectedErr: "validation failed: \\(root\\)\\.outfile : must be of type string, given 5", }} { c.Logf("Test %d: should %s", i, t.should) - before := state.NowToTheSecond() + before := s.State.NowToTheSecond() later := before.Add(testing.LongWait) // Copy params over into empty premade map for comparison later @@ -161,7 +161,7 @@ // Enqueued time should be within a reasonable time of the beginning // of the test - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() c.Check(action.Enqueued(), jc.TimeBetween(before, now)) c.Check(action.Enqueued(), jc.TimeBetween(before, later)) continue @@ -278,15 +278,15 @@ svc := s.AddTestingService(c, svcName, ch) // Get its charm URL - sUrl, _ := svc.CharmURL() - c.Assert(sUrl, gc.NotNil) + sURL, _ := svc.CharmURL() + c.Assert(sURL, gc.NotNil) // Add a unit var err error u, err := svc.AddUnit() c.Assert(err, jc.ErrorIsNil) c.Assert(u.Series(), gc.Equals, "quantal") - err = u.SetCharmURL(sUrl) + err = u.SetCharmURL(sURL) c.Assert(err, jc.ErrorIsNil) units[name] = u diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/addmachine.go juju-core-2.0.0/src/github.com/juju/juju/state/addmachine.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/addmachine.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/addmachine.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( "fmt" "strconv" - "time" "github.com/juju/errors" "github.com/juju/replicaset" @@ -436,6 +435,10 @@ // thing to do when none is available. privateAddr, _ := network.SelectInternalAddress(template.Addresses, false) publicAddr, _ := network.SelectPublicAddress(template.Addresses) + logger.Infof( + "new machine %q has preferred addresses: private %q, public %q", + id, privateAddr, publicAddr, + ) return &machineDoc{ DocID: st.docID(id), Id: id, @@ -458,16 +461,16 @@ // into the database, based on the given template. Only the constraints are // taken from the template. func (st *State) insertNewMachineOps(mdoc *machineDoc, template MachineTemplate) (prereqOps []txn.Op, machineOp txn.Op, err error) { + now := st.clock.Now() machineStatusDoc := statusDoc{ - Status: status.StatusPending, + Status: status.Pending, ModelUUID: st.ModelUUID(), - // TODO(fwereade): 2016-03-17 lp:1558657 - Updated: time.Now().UnixNano(), + Updated: now.UnixNano(), } instanceStatusDoc := statusDoc{ - Status: status.StatusPending, + Status: status.Pending, ModelUUID: st.ModelUUID(), - Updated: time.Now().UnixNano(), + Updated: now.UnixNano(), } prereqOps, machineOp = st.baseNewMachineOps( @@ -533,6 +536,31 @@ filesystemAttachments map[names.FilesystemTag]FilesystemAttachmentParams } +func combineMachineStorageParams(lhs, rhs *machineStorageParams) *machineStorageParams { + out := &machineStorageParams{} + out.volumes = append(lhs.volumes[:], rhs.volumes...) + out.filesystems = append(lhs.filesystems[:], rhs.filesystems...) + if lhs.volumeAttachments != nil || rhs.volumeAttachments != nil { + out.volumeAttachments = make(map[names.VolumeTag]VolumeAttachmentParams) + for k, v := range lhs.volumeAttachments { + out.volumeAttachments[k] = v + } + for k, v := range rhs.volumeAttachments { + out.volumeAttachments[k] = v + } + } + if lhs.filesystemAttachments != nil || rhs.filesystemAttachments != nil { + out.filesystemAttachments = make(map[names.FilesystemTag]FilesystemAttachmentParams) + for k, v := range lhs.filesystemAttachments { + out.filesystemAttachments[k] = v + } + for k, v := range rhs.filesystemAttachments { + out.filesystemAttachments[k] = v + } + } + return out +} + // machineStorageOps creates txn.Ops for creating volumes, filesystems, // and attachments to the specified machine. The results are the txn.Ops, // and the tags of volumes and filesystems newly attached to the machine. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/address.go juju-core-2.0.0/src/github.com/juju/juju/state/address.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/address.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/address.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "strconv" "github.com/juju/errors" + statetxn "github.com/juju/txn" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -104,16 +105,20 @@ type apiHostPortsDoc struct { APIHostPorts [][]hostPort `bson:"apihostports"` + TxnRevno int64 `bson:"txn-revno"` } // SetAPIHostPorts sets the addresses of the API server instances. // Each server is represented by one element in the top level slice. func (st *State) SetAPIHostPorts(netHostsPorts [][]network.HostPort) error { + controllers, closer := st.getCollection(controllersC) + defer closer() doc := apiHostPortsDoc{ APIHostPorts: fromNetworkHostsPorts(netHostsPorts), } buildTxn := func(attempt int) ([]txn.Op, error) { - existing, err := st.APIHostPorts() + var existingDoc apiHostPortsDoc + err := controllers.Find(bson.D{{"_id", apiHostPortsKey}}).One(&existingDoc) if err != nil { return nil, err } @@ -121,13 +126,16 @@ C: controllersC, Id: apiHostPortsKey, Assert: bson.D{{ - "apihostports", fromNetworkHostsPorts(existing), + "txn-revno", existingDoc.TxnRevno, }}, } - if !hostsPortsEqual(netHostsPorts, existing) { + hostPorts := networkHostsPorts(existingDoc.APIHostPorts) + if !hostsPortsEqual(netHostsPorts, hostPorts) { op.Update = bson.D{{ "$set", bson.D{{"apihostports", doc.APIHostPorts}}, }} + } else { + return nil, statetxn.ErrNoOperations } return []txn.Op{op}, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/address_test.go juju-core-2.0.0/src/github.com/juju/juju/state/address_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/address_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/address_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,6 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" "github.com/juju/juju/state" "github.com/juju/juju/testing/factory" @@ -40,13 +39,13 @@ } type ControllerAddressesSuite struct { - testing.JujuConnSuite + ConnSuite } var _ = gc.Suite(&ControllerAddressesSuite{}) func (s *ControllerAddressesSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) + s.ConnSuite.SetUpTest(c) // Make sure there is a machine with manage state in existence. machine := s.Factory.MakeMachine(c, &factory.MachineParams{ Jobs: []state.MachineJob{state.JobManageModel, state.JobHostUnits}, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/allcollections.go juju-core-2.0.0/src/github.com/juju/juju/state/allcollections.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/allcollections.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/allcollections.go 2016-10-13 14:31:49.000000000 +0000 @@ -183,19 +183,19 @@ indexes: bakerystorage.MongoIndexes(), }, - // ----------------- - - // Local collections - // ================= - - // This collection holds users related to a model and will be usde as one - // of the intersection axis of permissionsC - modelUsersC: {}, - // This collection is basically a standard SQL intersection table; it // references the global records of the users allowed access to a // given operation. - permissionsC: {}, + permissionsC: { + global: true, + }, + + // This collection holds information cached by autocert certificate + // acquisition. + autocertCacheC: { + global: true, + rawAccess: true, + }, // This collection holds the last time the model user connected // to the model. @@ -203,6 +203,15 @@ rawAccess: true, }, + // ----------------- + + // Local collections + // ================= + + // This collection holds users related to a model and will be used as one + // of the intersection axis of permissionsC + modelUsersC: {}, + // This collection contains governors that prevent certain kinds of // changes from being accepted. blocksC: {}, @@ -249,8 +258,8 @@ assignUnitC: {}, // meterStatusC is the collection used to store meter status information. - meterStatusC: {}, - settingsrefsC: {}, + meterStatusC: {}, + refcountsC: {}, relationsC: { indexes: []mgo.Index{{ Key: []string{"model-uuid", "endpoints.relationname"}, @@ -371,7 +380,9 @@ }, // This collection holds information about cloud image metadata. - cloudimagemetadataC: {}, + cloudimagemetadataC: { + global: true, + }, // ---------------------- @@ -396,6 +407,7 @@ actionresultsC = "actionresults" actionsC = "actions" annotationsC = "annotations" + autocertCacheC = "autocertCache" assignUnitC = "assignUnits" auditingC = "audit.log" bakeryStorageItemsC = "bakeryStorageItems" @@ -443,7 +455,7 @@ applicationsC = "applications" endpointBindingsC = "endpointbindings" settingsC = "settings" - settingsrefsC = "settingsrefs" + refcountsC = "refcounts" sshHostKeysC = "sshhostkeys" spacesC = "spaces" statusesC = "statuses" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/allwatcher.go juju-core-2.0.0/src/github.com/juju/juju/state/allwatcher.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/allwatcher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/allwatcher.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( "reflect" "strings" - "time" "github.com/juju/errors" "gopkg.in/juju/names.v2" @@ -117,7 +116,7 @@ Name: e.Name, Life: multiwatcher.Life(e.Life.String()), Owner: e.Owner, - ControllerUUID: e.ServerUUID, + ControllerUUID: e.ControllerUUID, }) return nil } @@ -423,10 +422,9 @@ // Not sure how status can even return NotFound as it is created // with the application initially. For now, we'll log the error as per // the above and return Unknown. - // TODO(fwereade): 2016-03-17 lp:1558657 - now := time.Now() + now := st.clock.Now() info.Status = multiwatcher.StatusInfo{ - Current: status.StatusUnknown, + Current: status.Unknown, Since: &now, Data: normaliseStatusData(nil), } @@ -648,13 +646,13 @@ // Unit or workload status - display the agent status or any error. // NOTE: thumper 2016-06-27, this is truely horrible, and we are lying to our users. // however, this is explicitly what has been asked for as much as we dislike it. - if strings.HasSuffix(id, "#charm") || s.Status == status.StatusError { + if strings.HasSuffix(id, "#charm") || s.Status == status.Error { newInfo.WorkloadStatus = s.toStatusInfo() } else { newInfo.AgentStatus = s.toStatusInfo() // If the unit was in error and now it's not, we need to reset its // status back to what was previously recorded. - if newInfo.WorkloadStatus.Current == status.StatusError { + if newInfo.WorkloadStatus.Current == status.Error { newInfo.WorkloadStatus.Current = unitStatus.Status newInfo.WorkloadStatus.Message = unitStatus.Message newInfo.WorkloadStatus.Data = unitStatus.Data diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/allwatcher_internal_test.go juju-core-2.0.0/src/github.com/juju/juju/state/allwatcher_internal_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/allwatcher_internal_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/allwatcher_internal_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( "fmt" "sort" - "time" "github.com/juju/errors" "github.com/juju/loggo" @@ -57,7 +56,7 @@ "uuid": utils.MustNewUUID().String(), }) _, st, err := s.state.NewModel(ModelArgs{ - CloudName: "dummy", Config: cfg, Owner: s.owner, + CloudName: "dummy", CloudRegion: "dummy-region", Config: cfg, Owner: s.owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) @@ -100,11 +99,11 @@ Id: "0", InstanceId: "i-machine-0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, Life: multiwatcher.Life("alive"), @@ -135,8 +134,8 @@ Config: charm.Settings{"blog-title": "boring"}, Subordinate: false, Status: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, }) @@ -158,8 +157,8 @@ Config: charm.Settings{}, Subordinate: true, Status: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, }) @@ -195,8 +194,8 @@ Ports: []multiwatcher.Port{}, Subordinate: false, WorkloadStatus: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, AgentStatus: multiwatcher.StatusInfo{ @@ -216,9 +215,9 @@ err = m.SetProvisioned(instance.Id("i-"+m.Tag().String()), "fake_nonce", nil) c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: m.Tag().String(), Since: &now, } @@ -231,12 +230,12 @@ Id: fmt.Sprint(i + 1), InstanceId: "i-" + m.Tag().String(), AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusError, + Current: status.Error, Message: m.Tag().String(), Data: map[string]interface{}{}, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, Life: multiwatcher.Life("alive"), @@ -276,8 +275,8 @@ Ports: []multiwatcher.Port{}, Subordinate: true, WorkloadStatus: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, AgentStatus: multiwatcher.StatusInfo{ @@ -610,8 +609,8 @@ Ports: []multiwatcher.Port{{"tcp", 12345}}, PortRanges: []multiwatcher.PortRange{{12345, 12345, "tcp"}}, WorkloadStatus: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, AgentStatus: multiwatcher.StatusInfo{ @@ -646,8 +645,8 @@ Ports: []multiwatcher.Port{}, PortRanges: []multiwatcher.PortRange{}, WorkloadStatus: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, AgentStatus: multiwatcher.StatusInfo{ @@ -725,18 +724,18 @@ // Expect to see events for the already created machines first. deltas := tw.All(2) - now := time.Now() + now := testing.ZeroTime() checkDeltasEqual(c, deltas, []multiwatcher.Delta{{ Entity: &multiwatcher.MachineInfo{ ModelUUID: s.state.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, @@ -752,12 +751,12 @@ ModelUUID: s.state.ModelUUID(), Id: "1", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, @@ -781,12 +780,12 @@ ModelUUID: s.state.ModelUUID(), Id: "1", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, @@ -809,12 +808,12 @@ ModelUUID: s.state.ModelUUID(), Id: "1", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, @@ -861,12 +860,12 @@ Id: "0", InstanceId: "i-0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, @@ -889,12 +888,12 @@ ModelUUID: s.state.ModelUUID(), Id: "2", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, @@ -913,8 +912,8 @@ Life: "alive", Config: make(map[string]interface{}), Status: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, }, @@ -926,8 +925,8 @@ Series: "quantal", MachineId: "2", WorkloadStatus: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, AgentStatus: multiwatcher.StatusInfo{ @@ -1020,9 +1019,9 @@ m, err := st.Machine("0") c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "pete tong", Since: &now, } @@ -1279,7 +1278,7 @@ initialContents: []multiwatcher.EntityInfo{&multiwatcher.ApplicationInfo{ ModelUUID: st.ModelUUID(), Name: "wordpress", - Constraints: constraints.MustParse("mem=99M cpu-cores=2 cpu-power=4"), + Constraints: constraints.MustParse("mem=99M cores=2 cpu-power=4"), }}, change: watcher.Change{ C: "constraints", @@ -1410,11 +1409,11 @@ ModelUUID: st0.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, Life: multiwatcher.Life("alive"), @@ -1429,11 +1428,11 @@ ModelUUID: st1.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, Life: multiwatcher.Life("alive"), @@ -1456,11 +1455,11 @@ ModelUUID: st1.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, Life: multiwatcher.Life("dying"), @@ -1482,11 +1481,11 @@ ModelUUID: st1.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, Life: multiwatcher.Life("dead"), @@ -1534,11 +1533,11 @@ Id: "0", InstanceId: "i-0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, Life: multiwatcher.Life("alive"), @@ -1560,11 +1559,11 @@ ModelUUID: st1.ModelUUID(), Id: "1", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, Life: multiwatcher.Life("alive"), @@ -1582,8 +1581,8 @@ Life: "alive", Config: make(map[string]interface{}), Status: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, }, @@ -1595,8 +1594,8 @@ Series: "quantal", MachineId: "1", WorkloadStatus: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, AgentStatus: multiwatcher.StatusInfo{ @@ -1618,11 +1617,11 @@ ModelUUID: st2.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, Life: multiwatcher.Life("alive"), @@ -1735,7 +1734,7 @@ } func testChangeMachines(c *gc.C, runChangeTests func(*gc.C, []changeTestFunc)) { - now := time.Now() + now := testing.ZeroTime() changeTestFuncs := []changeTestFunc{ func(c *gc.C, st *State) changeTestCase { return changeTestCase{ @@ -1768,9 +1767,9 @@ func(c *gc.C, st *State) changeTestCase { m, err := st.AddMachine("quantal", JobHostUnits) c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "failure", Since: &now, } @@ -1788,12 +1787,12 @@ ModelUUID: st.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusError, + Current: status.Error, Message: "failure", Data: map[string]interface{}{}, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, Life: multiwatcher.Life("alive"), @@ -1819,13 +1818,13 @@ ModelUUID: st.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusError, + Current: status.Error, Message: "another failure", Data: map[string]interface{}{}, Since: &now, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, Since: &now, }, @@ -1841,12 +1840,12 @@ Id: "0", InstanceId: "i-0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusError, + Current: status.Error, Message: "another failure", Data: map[string]interface{}{}, }, InstanceStatus: multiwatcher.StatusInfo{ - Current: status.StatusPending, + Current: status.Pending, Data: map[string]interface{}{}, }, Life: multiwatcher.Life("alive"), @@ -1865,7 +1864,7 @@ ModelUUID: st.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusError, + Current: status.Error, Message: "failure", Data: map[string]interface{}{}, Since: &now, @@ -1880,7 +1879,7 @@ ModelUUID: st.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusError, + Current: status.Error, Message: "failure", Data: map[string]interface{}{}, }, @@ -1889,9 +1888,9 @@ func(c *gc.C, st *State) changeTestCase { m, err := st.AddMachine("quantal", JobHostUnits) c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusStarted, + Status: status.Started, Message: "", Since: &now, } @@ -1904,7 +1903,7 @@ ModelUUID: st.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusError, + Current: status.Error, Message: "failure", Data: map[string]interface{}{}, Since: &now, @@ -1919,7 +1918,7 @@ ModelUUID: st.ModelUUID(), Id: "0", AgentStatus: multiwatcher.StatusInfo{ - Current: status.StatusStarted, + Current: status.Started, Data: make(map[string]interface{}), }, }}} @@ -2026,8 +2025,8 @@ MinUnits: 42, Config: charm.Settings{}, Status: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, }}} @@ -2275,7 +2274,7 @@ initialContents: []multiwatcher.EntityInfo{&multiwatcher.ApplicationInfo{ ModelUUID: st.ModelUUID(), Name: "wordpress", - Constraints: constraints.MustParse("mem=99M cpu-cores=2 cpu-power=4"), + Constraints: constraints.MustParse("mem=99M cores=2 cpu-power=4"), }}, change: watcher.Change{ C: "constraints", @@ -2293,7 +2292,7 @@ } func testChangeUnits(c *gc.C, owner names.UserTag, runChangeTests func(*gc.C, []changeTestFunc)) { - now := time.Now() + now := testing.ZeroTime() changeTestFuncs := []changeTestFunc{ func(c *gc.C, st *State) changeTestCase { return changeTestCase{ @@ -2331,9 +2330,9 @@ c.Assert(err, jc.ErrorIsNil) err = u.OpenPorts("tcp", 5555, 5558) c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "failure", Since: &now, } @@ -2505,8 +2504,8 @@ Series: "quantal", MachineId: "0", WorkloadStatus: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, AgentStatus: multiwatcher.StatusInfo{ @@ -2536,9 +2535,9 @@ privateAddress := network.NewScopedAddress("private", network.ScopeCloudLocal) err = m.SetProviderAddresses(publicAddress, privateAddress) c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "failure", Since: &now, } @@ -2627,9 +2626,9 @@ wordpress := AddTestingService(c, st, "wordpress", AddTestingCharm(c, st, "wordpress")) u, err := wordpress.AddUnit() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -2680,16 +2679,16 @@ wordpress := AddTestingService(c, st, "wordpress", AddTestingCharm(c, st, "wordpress")) u, err := wordpress.AddUnit() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } err = u.SetAgentStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, Message: "doing work", Since: &now, } @@ -2740,9 +2739,9 @@ wordpress := AddTestingService(c, st, "wordpress", AddTestingCharm(c, st, "wordpress")) u, err := wordpress.AddUnit() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "hook error", Data: map[string]interface{}{ "1st-key": "one", @@ -2800,9 +2799,9 @@ wordpress := AddTestingService(c, st, "wordpress", AddTestingCharm(c, st, "wordpress")) u, err := wordpress.AddUnit() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "", Since: &now, } @@ -2937,8 +2936,8 @@ Ports: []multiwatcher.Port{}, PortRanges: []multiwatcher.PortRange{}, WorkloadStatus: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, AgentStatus: multiwatcher.StatusInfo{ @@ -2969,8 +2968,8 @@ Ports: []multiwatcher.Port{{"tcp", 12345}}, PortRanges: []multiwatcher.PortRange{{12345, 12345, "tcp"}}, WorkloadStatus: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, AgentStatus: multiwatcher.StatusInfo{ @@ -3001,8 +3000,8 @@ Ports: []multiwatcher.Port{}, PortRanges: []multiwatcher.PortRange{}, WorkloadStatus: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, AgentStatus: multiwatcher.StatusInfo{ @@ -3030,8 +3029,8 @@ Ports: []multiwatcher.Port{}, PortRanges: []multiwatcher.PortRange{}, WorkloadStatus: multiwatcher.StatusInfo{ - Current: "unknown", - Message: "Waiting for agent initialization to finish", + Current: "waiting", + Message: "waiting for machine", Data: map[string]interface{}{}, }, AgentStatus: multiwatcher.StatusInfo{ @@ -3107,7 +3106,7 @@ break done } } - case <-time.After(maxDuration): + case <-tw.st.clock.After(maxDuration): // timed out break done } @@ -3128,7 +3127,7 @@ if len(d) > 0 { c.Error("change detected") } - case <-time.After(testing.ShortWait): + case <-tw.st.clock.After(testing.ShortWait): // expected } } @@ -3136,7 +3135,7 @@ func (tw *testWatcher) AssertChanges(c *gc.C, expected int) { var count int tw.st.StartSync() - maxWait := time.After(testing.LongWait) + maxWait := tw.st.clock.After(testing.LongWait) done: for { select { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/annotations_test.go juju-core-2.0.0/src/github.com/juju/juju/state/annotations_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/annotations_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/annotations_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -181,7 +181,7 @@ }) owner := names.NewUserTag("test@remote") env, st, err := s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", Config: cfg, Owner: owner, + CloudName: "dummy", CloudRegion: "dummy-region", Config: cfg, Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/application.go juju-core-2.0.0/src/github.com/juju/juju/state/application.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/application.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/application.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,6 @@ "sort" "strconv" "strings" - "time" "github.com/juju/errors" jujutxn "github.com/juju/txn" @@ -62,21 +61,21 @@ } // Name returns the application name. -func (s *Application) Name() string { - return s.doc.Name +func (a *Application) Name() string { + return a.doc.Name } // Tag returns a name identifying the service. // The returned name will be different from other Tag values returned by any // other entities from the same state. -func (s *Application) Tag() names.Tag { - return s.ApplicationTag() +func (a *Application) Tag() names.Tag { + return a.ApplicationTag() } // ApplicationTag returns the more specific ApplicationTag rather than the generic // Tag. -func (s *Application) ApplicationTag() names.ApplicationTag { - return names.NewApplicationTag(s.Name()) +func (a *Application) ApplicationTag() names.ApplicationTag { + return names.NewApplicationTag(a.Name()) } // applicationGlobalKey returns the global database key for the application @@ -86,28 +85,38 @@ } // globalKey returns the global database key for the application. -func (s *Application) globalKey() string { - return applicationGlobalKey(s.doc.Name) +func (a *Application) globalKey() string { + return applicationGlobalKey(a.doc.Name) } -func applicationSettingsKey(applicationname string, curl *charm.URL) string { - return fmt.Sprintf("a#%s#%s", applicationname, curl) +func applicationSettingsKey(appName string, curl *charm.URL) string { + return fmt.Sprintf("a#%s#%s", appName, curl) } // settingsKey returns the charm-version-specific settings collection // key for the application. -func (s *Application) settingsKey() string { - return applicationSettingsKey(s.doc.Name, s.doc.CharmURL) +func (a *Application) settingsKey() string { + return applicationSettingsKey(a.doc.Name, a.doc.CharmURL) +} + +func applicationStorageConstraintsKey(appName string, curl *charm.URL) string { + return fmt.Sprintf("asc#%s#%s", appName, curl) +} + +// storageConstraintsKey returns the charm-version-specific storage +// constraints collection key for the application. +func (a *Application) storageConstraintsKey() string { + return applicationStorageConstraintsKey(a.doc.Name, a.doc.CharmURL) } // Series returns the specified series for this charm. -func (s *Application) Series() string { - return s.doc.Series +func (a *Application) Series() string { + return a.doc.Series } // Life returns whether the application is Alive, Dying or Dead. -func (s *Application) Life() Life { - return s.doc.Life +func (a *Application) Life() Life { + return a.doc.Life } var errRefresh = stderrors.New("state seems inconsistent, refresh and try again") @@ -115,15 +124,15 @@ // Destroy ensures that the application and all its relations will be removed at // some point; if the application has no units, and no relation involving the // application has any units in scope, they are all removed immediately. -func (s *Application) Destroy() (err error) { - defer errors.DeferredAnnotatef(&err, "cannot destroy application %q", s) +func (a *Application) Destroy() (err error) { + defer errors.DeferredAnnotatef(&err, "cannot destroy application %q", a) defer func() { if err == nil { // This is a white lie; the document might actually be removed. - s.doc.Life = Dying + a.doc.Life = Dying } }() - svc := &Application{st: s.st, doc: s.doc} + svc := &Application{st: a.st, doc: a.doc} buildTxn := func(attempt int) ([]txn.Op, error) { if attempt > 0 { if err := svc.Refresh(); errors.IsNotFound(err) { @@ -143,30 +152,30 @@ } return nil, jujutxn.ErrTransientFailure } - return s.st.run(buildTxn) + return a.st.run(buildTxn) } // destroyOps returns the operations required to destroy the service. If it // returns errRefresh, the application should be refreshed and the destruction // operations recalculated. -func (s *Application) destroyOps() ([]txn.Op, error) { - if s.doc.Life == Dying { +func (a *Application) destroyOps() ([]txn.Op, error) { + if a.doc.Life == Dying { return nil, errAlreadyDying } - rels, err := s.Relations() + rels, err := a.Relations() if err != nil { return nil, err } - if len(rels) != s.doc.RelationCount { + if len(rels) != a.doc.RelationCount { // This is just an early bail out. The relations obtained may still // be wrong, but that situation will be caught by a combination of // asserts on relationcount and on each known relation, below. return nil, errRefresh } - ops := []txn.Op{minUnitsRemoveOp(s.st, s.doc.Name)} + ops := []txn.Op{minUnitsRemoveOp(a.st, a.doc.Name)} removeCount := 0 for _, rel := range rels { - relOps, isRemove, err := rel.destroyOps(s.doc.Name) + relOps, isRemove, err := rel.destroyOps(a.doc.Name) if err == errAlreadyDying { relOps = []txn.Op{{ C: relationsC, @@ -182,16 +191,20 @@ ops = append(ops, relOps...) } // TODO(ericsnow) Use a generic registry instead. - resOps, err := removeResourcesOps(s.st, s.doc.Name) + resOps, err := removeResourcesOps(a.st, a.doc.Name) if err != nil { return nil, errors.Trace(err) } ops = append(ops, resOps...) // If the application has no units, and all its known relations will be // removed, the application can also be removed. - if s.doc.UnitCount == 0 && s.doc.RelationCount == removeCount { + if a.doc.UnitCount == 0 && a.doc.RelationCount == removeCount { hasLastRefs := bson.D{{"life", Alive}, {"unitcount", 0}, {"relationcount", removeCount}} - return append(ops, s.removeOps(hasLastRefs)...), nil + removeOps, err := a.removeOps(hasLastRefs) + if err != nil { + return nil, errors.Trace(err) + } + return append(ops, removeOps...), nil } // In all other cases, application removal will be handled as a consequence // of the removal of the last unit or relation referencing it. If any @@ -202,14 +215,14 @@ // will be caught by virtue of being a remove. notLastRefs := bson.D{ {"life", Alive}, - {"relationcount", s.doc.RelationCount}, + {"relationcount", a.doc.RelationCount}, } // With respect to unit count, a changing value doesn't matter, so long // as the count's equality with zero does not change, because all we care // about is that *some* unit is, or is not, keeping the application from // being removed: the difference between 1 unit and 1000 is irrelevant. - if s.doc.UnitCount > 0 { - ops = append(ops, s.st.newCleanupOp(cleanupUnitsForDyingService, s.doc.Name)) + if a.doc.UnitCount > 0 { + ops = append(ops, newCleanupOp(cleanupUnitsForDyingService, a.doc.Name)) notLastRefs = append(notLastRefs, bson.D{{"unitcount", bson.D{{"$gt", 0}}}}...) } else { notLastRefs = append(notLastRefs, bson.D{{"unitcount", 0}}...) @@ -221,7 +234,7 @@ } return append(ops, txn.Op{ C: applicationsC, - Id: s.doc.DocID, + Id: a.doc.DocID, Assert: notLastRefs, Update: update, }), nil @@ -245,120 +258,125 @@ // removeOps returns the operations required to remove the service. Supplied // asserts will be included in the operation on the application document. -func (s *Application) removeOps(asserts bson.D) []txn.Op { - settingsDocID := s.st.docID(s.settingsKey()) +func (a *Application) removeOps(asserts bson.D) ([]txn.Op, error) { ops := []txn.Op{ { C: applicationsC, - Id: s.doc.DocID, + Id: a.doc.DocID, Assert: asserts, Remove: true, }, { - C: settingsrefsC, - Id: settingsDocID, - Remove: true, - }, { C: settingsC, - Id: settingsDocID, + Id: a.settingsKey(), Remove: true, }, - removeEndpointBindingsOp(s.globalKey()), - removeStorageConstraintsOp(s.globalKey()), - removeConstraintsOp(s.st, s.globalKey()), - annotationRemoveOp(s.st, s.globalKey()), - removeLeadershipSettingsOp(s.Name()), - removeStatusOp(s.st, s.globalKey()), - removeModelServiceRefOp(s.st, s.Name()), - } - // For local charms, we also delete the charm itself since the - // charm is associated 1:1 with the service. Each different deploy - // of a local charm creates a new copy with a different revision. - if s.doc.CharmURL.Schema == "local" { - ops = append(ops, s.st.newCleanupOp(cleanupCharmForDyingService, s.doc.CharmURL.String())) } - return ops + // Note that appCharmDecRefOps might not catch the final decref + // when run in a transaction that decrefs more than once. In + // this case, luckily, we can be sure that we unconditionally + // need finalAppCharmRemoveOps; and we trust that it's written + // such that it's safe to run multiple times. + name := a.doc.Name + curl := a.doc.CharmURL + charmOps, err := appCharmDecRefOps(a.st, name, curl) + if err != nil { + return nil, errors.Trace(err) + } + ops = append(ops, charmOps...) + ops = append(ops, finalAppCharmRemoveOps(name, curl)...) + + globalKey := a.globalKey() + ops = append(ops, + removeEndpointBindingsOp(globalKey), + removeConstraintsOp(a.st, globalKey), + annotationRemoveOp(a.st, globalKey), + removeLeadershipSettingsOp(name), + removeStatusOp(a.st, globalKey), + removeModelServiceRefOp(a.st, name), + ) + return ops, nil } // IsExposed returns whether this application is exposed. The explicitly open // ports (with open-port) for exposed services may be accessed from machines // outside of the local deployment network. See SetExposed and ClearExposed. -func (s *Application) IsExposed() bool { - return s.doc.Exposed +func (a *Application) IsExposed() bool { + return a.doc.Exposed } // SetExposed marks the application as exposed. // See ClearExposed and IsExposed. -func (s *Application) SetExposed() error { - return s.setExposed(true) +func (a *Application) SetExposed() error { + return a.setExposed(true) } // ClearExposed removes the exposed flag from the service. // See SetExposed and IsExposed. -func (s *Application) ClearExposed() error { - return s.setExposed(false) +func (a *Application) ClearExposed() error { + return a.setExposed(false) } -func (s *Application) setExposed(exposed bool) (err error) { +func (a *Application) setExposed(exposed bool) (err error) { ops := []txn.Op{{ C: applicationsC, - Id: s.doc.DocID, + Id: a.doc.DocID, Assert: isAliveDoc, Update: bson.D{{"$set", bson.D{{"exposed", exposed}}}}, }} - if err := s.st.runTransaction(ops); err != nil { - return fmt.Errorf("cannot set exposed flag for application %q to %v: %v", s, exposed, onAbort(err, errNotAlive)) + if err := a.st.runTransaction(ops); err != nil { + return errors.Errorf("cannot set exposed flag for application %q to %v: %v", a, exposed, onAbort(err, errNotAlive)) } - s.doc.Exposed = exposed + a.doc.Exposed = exposed return nil } // Charm returns the service's charm and whether units should upgrade to that // charm even if they are in an error state. -func (s *Application) Charm() (ch *Charm, force bool, err error) { +func (a *Application) Charm() (ch *Charm, force bool, err error) { // We don't worry about the channel since we aren't interacting // with the charm store here. - ch, err = s.st.Charm(s.doc.CharmURL) + ch, err = a.st.Charm(a.doc.CharmURL) if err != nil { return nil, false, err } - return ch, s.doc.ForceCharm, nil + return ch, a.doc.ForceCharm, nil } // IsPrincipal returns whether units of the application can // have subordinate units. -func (s *Application) IsPrincipal() bool { - return !s.doc.Subordinate +func (a *Application) IsPrincipal() bool { + return !a.doc.Subordinate } // CharmModifiedVersion increases whenever the service's charm is changed in any // way. -func (s *Application) CharmModifiedVersion() int { - return s.doc.CharmModifiedVersion +func (a *Application) CharmModifiedVersion() int { + return a.doc.CharmModifiedVersion } // CharmURL returns the service's charm URL, and whether units should upgrade // to the charm with that URL even if they are in an error state. -func (s *Application) CharmURL() (curl *charm.URL, force bool) { - return s.doc.CharmURL, s.doc.ForceCharm +func (a *Application) CharmURL() (curl *charm.URL, force bool) { + return a.doc.CharmURL, a.doc.ForceCharm } // Channel identifies the charm store channel from which the service's // charm was deployed. It is only needed when interacting with the charm // store. -func (s *Application) Channel() csparams.Channel { - return csparams.Channel(s.doc.Channel) +func (a *Application) Channel() csparams.Channel { + return csparams.Channel(a.doc.Channel) } // Endpoints returns the service's currently available relation endpoints. -func (s *Application) Endpoints() (eps []Endpoint, err error) { - ch, _, err := s.Charm() +func (a *Application) Endpoints() (eps []Endpoint, err error) { + ch, _, err := a.Charm() if err != nil { return nil, err } collect := func(role charm.RelationRole, rels map[string]charm.Relation) { for _, rel := range rels { eps = append(eps, Endpoint{ - ApplicationName: s.doc.Name, + ApplicationName: a.doc.Name, Relation: rel, }) } @@ -380,8 +398,8 @@ } // Endpoint returns the relation endpoint with the supplied name, if it exists. -func (s *Application) Endpoint(relationName string) (Endpoint, error) { - eps, err := s.Endpoints() +func (a *Application) Endpoint(relationName string) (Endpoint, error) { + eps, err := a.Endpoints() if err != nil { return Endpoint{}, err } @@ -390,17 +408,17 @@ return ep, nil } } - return Endpoint{}, fmt.Errorf("application %q has no %q relation", s, relationName) + return Endpoint{}, errors.Errorf("application %q has no %q relation", a, relationName) } // extraPeerRelations returns only the peer relations in newMeta not // present in the service's current charm meta data. -func (s *Application) extraPeerRelations(newMeta *charm.Meta) map[string]charm.Relation { +func (a *Application) extraPeerRelations(newMeta *charm.Meta) map[string]charm.Relation { if newMeta == nil { // This should never happen, since we're checking the charm in SetCharm already. panic("newMeta is nil") } - ch, _, err := s.Charm() + ch, _, err := a.Charm() if err != nil { return nil } @@ -415,14 +433,14 @@ return extraPeers } -func (s *Application) checkRelationsOps(ch *Charm, relations []*Relation) ([]txn.Op, error) { +func (a *Application) checkRelationsOps(ch *Charm, relations []*Relation) ([]txn.Op, error) { asserts := make([]txn.Op, 0, len(relations)) // All relations must still exist and their endpoints are implemented by the charm. for _, rel := range relations { - if ep, err := rel.Endpoint(s.doc.Name); err != nil { + if ep, err := rel.Endpoint(a.doc.Name); err != nil { return nil, err } else if !ep.ImplementedBy(ch) { - return nil, fmt.Errorf("cannot upgrade application %q to charm %q: would break relation %q", s, ch, rel) + return nil, errors.Errorf("would break relation %q", rel) } asserts = append(asserts, txn.Op{ C: relationsC, @@ -433,15 +451,9 @@ return asserts, nil } -func (s *Application) checkStorageUpgrade(newMeta *charm.Meta) (_ []txn.Op, err error) { - defer errors.DeferredAnnotatef(&err, "cannot upgrade application %q to charm %q", s, newMeta.Name) - ch, _, err := s.Charm() - if err != nil { - return nil, errors.Trace(err) - } - oldMeta := ch.Meta() +func (a *Application) checkStorageUpgrade(newMeta, oldMeta *charm.Meta, units []*Unit) (_ []txn.Op, err error) { + // Make sure no storage instances are added or removed. var ops []txn.Op - var units []*Unit for name, oldStorageMeta := range oldMeta.Storage { if _, ok := newMeta.Storage[name]; ok { continue @@ -453,70 +465,24 @@ // are no instances of the store, it can safely be // removed. if oldStorageMeta.Shared { - n, err := s.st.countEntityStorageInstancesForName( - s.Tag(), name, - ) + op, n, err := a.st.countEntityStorageInstances(a.Tag(), name) if err != nil { return nil, errors.Trace(err) } if n > 0 { return nil, errors.Errorf("in-use storage %q removed", name) } - // TODO(axw) if/when it is possible to - // add shared storage instance to an - // application post-deployment, we must - // include a txn.Op here that asserts - // that the number of instances is zero. + ops = append(ops, op) } else { - if units == nil { - var err error - units, err = s.AllUnits() + for _, u := range units { + op, n, err := a.st.countEntityStorageInstances(u.Tag(), name) if err != nil { return nil, errors.Trace(err) } - ops = append(ops, txn.Op{ - C: applicationsC, - Id: s.doc.DocID, - Assert: bson.D{{"unitcount", len(units)}}, - }) - for _, unit := range units { - // Here we check that the storage - // attachment count remains the same. - // To get around the ABA problem, we - // also add ops for the individual - // attachments below. - ops = append(ops, txn.Op{ - C: unitsC, - Id: unit.doc.DocID, - Assert: bson.D{{ - "storageattachmentcount", - unit.doc.StorageAttachmentCount, - }}, - }) - } - } - for _, unit := range units { - attachments, err := s.st.UnitStorageAttachments(unit.UnitTag()) - if err != nil { - return nil, errors.Trace(err) - } - for _, attachment := range attachments { - storageTag := attachment.StorageInstance() - storageName, err := names.StorageName(storageTag.Id()) - if err != nil { - return nil, errors.Trace(err) - } - if storageName == name { - return nil, errors.Errorf("in-use storage %q removed", name) - } - // We assert that other storage attachments still exist to - // avoid the ABA problem. - ops = append(ops, txn.Op{ - C: storageAttachmentsC, - Id: storageAttachmentId(unit.Name(), storageTag.Id()), - Assert: txn.DocExists, - }) + if n > 0 { + return nil, errors.Errorf("in-use storage %q removed", name) } + ops = append(ops, op) } } } @@ -526,15 +492,6 @@ for name, newStorageMeta := range newMeta.Storage { oldStorageMeta, ok := oldMeta.Storage[name] if !ok { - if newStorageMeta.CountMin > 0 { - return nil, errors.Errorf("required storage %q added", name) - } - // New storage is fine as long as it is not required. - // - // TODO(axw) introduce a way of adding storage at - // upgrade time. We should also look at supplying - // a way of adding/changing other things during - // upgrade, e.g. changing application config. continue } if newStorageMeta.Type != oldStorageMeta.Type { @@ -561,12 +518,6 @@ name, oldStorageMeta.Location, newStorageMeta.Location, ) } - if newStorageMeta.CountMin > oldStorageMeta.CountMin { - return nil, errors.Errorf( - "existing storage %q range contracted: min increased from %d to %d", - name, oldStorageMeta.CountMin, newStorageMeta.CountMin, - ) - } if less(newStorageMeta.CountMax, oldStorageMeta.CountMax) { var oldCountMax interface{} = oldStorageMeta.CountMax if oldStorageMeta.CountMax == -1 { @@ -582,7 +533,7 @@ // from being a singleton to multiple, since then the // location has a different meaning. return nil, errors.Errorf( - "existing storage %q with location changed from singleton to multiple", + "existing storage %q with location changed from single to multiple", name, ) } @@ -592,45 +543,137 @@ // changeCharmOps returns the operations necessary to set a service's // charm URL to a new value. -func (s *Application) changeCharmOps(ch *Charm, channel string, forceUnits bool, resourceIDs map[string]string) ([]txn.Op, error) { +func (a *Application) changeCharmOps( + ch *Charm, + channel string, + updatedSettings charm.Settings, + forceUnits bool, + resourceIDs map[string]string, + updatedStorageConstraints map[string]StorageConstraints, +) ([]txn.Op, error) { // Build the new application config from what can be used of the old one. var newSettings charm.Settings - oldSettings, err := readSettings(s.st, settingsC, s.settingsKey()) + oldSettings, err := readSettings(a.st, settingsC, a.settingsKey()) if err == nil { // Filter the old settings through to get the new settings. newSettings = ch.Config().FilterSettings(oldSettings.Map()) + for k, v := range updatedSettings { + newSettings[k] = v + } } else if errors.IsNotFound(err) { - // No old settings, start with empty new settings. - newSettings = make(charm.Settings) + // No old settings, start with the updated settings. + newSettings = updatedSettings } else { return nil, errors.Trace(err) } // Create or replace application settings. var settingsOp txn.Op - newKey := applicationSettingsKey(s.doc.Name, ch.URL()) - if _, err := readSettings(s.st, settingsC, newKey); errors.IsNotFound(err) { + newSettingsKey := applicationSettingsKey(a.doc.Name, ch.URL()) + if _, err := readSettings(a.st, settingsC, newSettingsKey); errors.IsNotFound(err) { // No settings for this key yet, create it. - settingsOp = createSettingsOp(settingsC, newKey, newSettings) + settingsOp = createSettingsOp(settingsC, newSettingsKey, newSettings) } else if err != nil { return nil, errors.Trace(err) } else { // Settings exist, just replace them with the new ones. - settingsOp, _, err = replaceSettingsOp(s.st, settingsC, newKey, newSettings) + settingsOp, _, err = replaceSettingsOp(a.st, settingsC, newSettingsKey, newSettings) if err != nil { return nil, errors.Trace(err) } } - // Add or create a reference to the new settings doc. - incOp, err := settingsIncRefOp(s.st, s.doc.Name, ch.URL(), true) + // Make sure no units are added or removed while the upgrade + // transaction is being executed. This allows us to make + // changes to units during the upgrade, e.g. add storage + // to existing units, or remove optional storage so long as + // it is unreferenced. + units, err := a.AllUnits() + if err != nil { + return nil, errors.Trace(err) + } + unitOps := make([]txn.Op, len(units)) + for i, u := range units { + unitOps[i] = txn.Op{ + C: unitsC, + Id: u.doc.DocID, + Assert: txn.DocExists, + } + } + unitOps = append(unitOps, txn.Op{ + C: applicationsC, + Id: a.doc.DocID, + Assert: bson.D{{"unitcount", len(units)}}, + }) + + // Check storage to ensure no referenced storage is removed, or changed + // in an incompatible way. We do this before computing the new storage + // constraints, as incompatible charm changes will otherwise yield + // confusing error messages that would suggest the user has supplied + // invalid constraints. + oldCharm, _, err := a.Charm() + if err != nil { + return nil, errors.Trace(err) + } + oldMeta := oldCharm.Meta() + checkStorageOps, err := a.checkStorageUpgrade(ch.Meta(), oldMeta, units) + if err != nil { + return nil, errors.Trace(err) + } + + // Create or replace storage constraints. We take the existing storage + // constraints, remove any keys that are no longer referenced by the + // charm, and update the constraints that the user has specified. + var storageConstraintsOp txn.Op + oldStorageConstraints, err := a.StorageConstraints() + if err != nil { + return nil, errors.Trace(err) + } + newStorageConstraints := oldStorageConstraints + for name, cons := range updatedStorageConstraints { + newStorageConstraints[name] = cons + } + for name := range newStorageConstraints { + if _, ok := ch.Meta().Storage[name]; !ok { + delete(newStorageConstraints, name) + } + } + if err := addDefaultStorageConstraints(a.st, newStorageConstraints, ch.Meta()); err != nil { + return nil, errors.Annotate(err, "adding default storage constraints") + } + if err := validateStorageConstraints(a.st, newStorageConstraints, ch.Meta()); err != nil { + return nil, errors.Annotate(err, "validating storage constraints") + } + newStorageConstraintsKey := applicationStorageConstraintsKey(a.doc.Name, ch.URL()) + if _, err := readStorageConstraints(a.st, newStorageConstraintsKey); errors.IsNotFound(err) { + storageConstraintsOp = createStorageConstraintsOp( + newStorageConstraintsKey, newStorageConstraints, + ) + } else if err != nil { + return nil, errors.Trace(err) + } else { + storageConstraintsOp = replaceStorageConstraintsOp( + newStorageConstraintsKey, newStorageConstraints, + ) + } + + // Upgrade charm storage. + upgradeStorageOps, err := a.upgradeStorageOps(ch.Meta(), oldMeta, units, newStorageConstraints) + if err != nil { + return nil, errors.Trace(err) + } + + // Add or create a reference to the new charm, settings, + // and storage constraints docs. + incOps, err := appCharmIncRefOps(a.st, a.doc.Name, ch.URL(), true) if err != nil { return nil, errors.Trace(err) } var decOps []txn.Op - // Drop the reference to the old settings doc (if they exist). + // Drop the references to the old settings, storage constraints, + // and charm docs (if the refs actually exist yet). if oldSettings != nil { - decOps, err = settingsDecRefOps(s.st, s.doc.Name, s.doc.CharmURL) // current charm + decOps, err = appCharmDecRefOps(a.st, a.doc.Name, a.doc.CharmURL) // current charm if err != nil { return nil, errors.Trace(err) } @@ -638,21 +681,21 @@ // Build the transaction. var ops []txn.Op - differentCharm := bson.D{{"charmurl", bson.D{{"$ne", ch.URL()}}}} if oldSettings != nil { // Old settings shouldn't change (when they exist). ops = append(ops, oldSettings.assertUnchangedOp()) } + ops = append(ops, unitOps...) + ops = append(ops, incOps...) ops = append(ops, []txn.Op{ // Create or replace new settings. settingsOp, - // Increment the ref count. - incOp, + // Create storage constraints. + storageConstraintsOp, // Update the charm URL and force flag (if relevant). { - C: applicationsC, - Id: s.doc.DocID, - Assert: append(notDeadDoc, differentCharm...), + C: applicationsC, + Id: a.doc.DocID, Update: bson.D{{"$set", bson.D{ {"charmurl", ch.URL()}, {"cs-channel", channel}, @@ -660,19 +703,21 @@ }}}, }, }...) + ops = append(ops, checkStorageOps...) + ops = append(ops, upgradeStorageOps...) - ops = append(ops, incCharmModifiedVersionOps(s.doc.DocID)...) + ops = append(ops, incCharmModifiedVersionOps(a.doc.DocID)...) // Add any extra peer relations that need creation. - newPeers := s.extraPeerRelations(ch.Meta()) - peerOps, err := s.st.addPeerRelationsOps(s.doc.Name, newPeers) + newPeers := a.extraPeerRelations(ch.Meta()) + peerOps, err := a.st.addPeerRelationsOps(a.doc.Name, newPeers) if err != nil { return nil, errors.Trace(err) } if len(resourceIDs) > 0 { // Collect pending resource resolution operations. - resOps, err := s.resolveResourceOps(resourceIDs) + resOps, err := a.resolveResourceOps(resourceIDs) if err != nil { return nil, errors.Trace(err) } @@ -680,7 +725,7 @@ } // Get all relations - we need to check them later. - relations, err := s.Relations() + relations, err := a.Relations() if err != nil { return nil, errors.Trace(err) } @@ -691,12 +736,12 @@ // Update the relation count as well. ops = append(ops, txn.Op{ C: applicationsC, - Id: s.doc.DocID, + Id: a.doc.DocID, Assert: append(notDeadDoc, sameRelCount...), Update: bson.D{{"$inc", bson.D{{"relationcount", len(newPeers)}}}}, }) // Check relations to ensure no active relations are removed. - relOps, err := s.checkRelationsOps(ch, relations) + relOps, err := a.checkRelationsOps(ch, relations) if err != nil { return nil, errors.Trace(err) } @@ -706,7 +751,7 @@ // // TODO(dimitern): Once upgrade-charm accepts --bind like deploy, pass the // given bindings below, instead of nil. - endpointBindingsOp, err := updateEndpointBindingsOp(s.st, s.globalKey(), nil, ch.Meta()) + endpointBindingsOp, err := updateEndpointBindingsOp(a.st, a.globalKey(), nil, ch.Meta()) if err == nil { ops = append(ops, endpointBindingsOp) } else if !errors.IsNotFound(err) && err != jujutxn.ErrNoOperations { @@ -716,18 +761,41 @@ return nil, errors.Trace(err) } - // Check storage to ensure no storage is removed, and no required - // storage is added for which there are no constraints. - storageOps, err := s.checkStorageUpgrade(ch.Meta()) - if err != nil { - return nil, errors.Trace(err) - } - ops = append(ops, storageOps...) - - // And finally, decrement the old settings. + // And finally, decrement the old charm and settings. return append(ops, decOps...), nil } +func (a *Application) upgradeStorageOps( + meta, oldMeta *charm.Meta, + units []*Unit, + allStorageCons map[string]StorageConstraints, +) (_ []txn.Op, err error) { + // For each store, ensure that every unit has the minimum requirements. + // If a unit has an existing store, but its minimum count has been + // increased, we only add the shortfall; we do not necessarily add as + // many instances as are specified in the storage constraints. + var ops []txn.Op + for name, cons := range allStorageCons { + for _, u := range units { + countMin := meta.Storage[name].CountMin + if _, ok := oldMeta.Storage[name]; !ok { + // The store did not exist previously, so we + // create the full amount specified in the + // cosntraints. + countMin = int(cons.Count) + } + unitOps, err := a.st.addUnitStorageOps( + meta, u, name, cons, countMin, + ) + if err != nil { + return nil, errors.Trace(err) + } + ops = append(ops, unitOps...) + } + } + return ops, nil +} + // incCharmModifiedVersionOps returns the operations necessary to increment // the CharmModifiedVersion field for the given service. func incCharmModifiedVersionOps(serviceID string) []txn.Op { @@ -739,51 +807,68 @@ }} } -func (s *Application) resolveResourceOps(resourceIDs map[string]string) ([]txn.Op, error) { +func (a *Application) resolveResourceOps(resourceIDs map[string]string) ([]txn.Op, error) { // Collect pending resource resolution operations. - resources, err := s.st.Resources() + resources, err := a.st.Resources() if err != nil { return nil, errors.Trace(err) } - return resources.NewResolvePendingResourcesOps(s.doc.Name, resourceIDs) + return resources.NewResolvePendingResourcesOps(a.doc.Name, resourceIDs) } -// SetCharmConfig sets the charm for the application. +// SetCharmConfig contains the parameters for Application.SetCharm. type SetCharmConfig struct { - // Charm is the new charm to use for the application. + // Charm is the new charm to use for the application. New units + // will be started with this charm, and existing units will be + // upgraded to use it. Charm *Charm + // Channel is the charm store channel from which charm was pulled. Channel csparams.Channel + + // ConfigSettings is the charm config settings to apply when upgrading + // the charm. + ConfigSettings charm.Settings + // ForceUnits forces the upgrade on units in an error state. - ForceUnits bool `json:"forceunits"` - // ForceSeries forces the use of the charm even if it doesn't match the - // series of the unit. - ForceSeries bool `json:"forceseries"` + ForceUnits bool + + // ForceSeries forces the use of the charm even if it is not one of + // the charm's supported series. + ForceSeries bool + // ResourceIDs is a map of resource names to resource IDs to activate during // the upgrade. - ResourceIDs map[string]string `json:"resourceids"` + ResourceIDs map[string]string + + // StorageConstraints contains the constraints to add or update when + // upgrading the charm. + // + // Any existing storage instances for the named stores will be + // unaffected; the storage constraints will only be used for + // provisioning new storage instances. + StorageConstraints map[string]StorageConstraints } -// SetCharm changes the charm for the application. New units will be started with -// this charm, and existing units will be upgraded to use it. -// If forceUnits is true, units will be upgraded even if they are in an error state. -// If forceSeries is true, the charm will be used even if it's the service's series -// is not supported by the charm. -func (s *Application) SetCharm(cfg SetCharmConfig) error { - if cfg.Charm.Meta().Subordinate != s.doc.Subordinate { - return errors.Errorf("cannot change a service's subordinacy") +// SetCharm changes the charm for the application. +func (a *Application) SetCharm(cfg SetCharmConfig) (err error) { + defer errors.DeferredAnnotatef( + &err, "cannot upgrade application %q to charm %q", a, cfg.Charm, + ) + if cfg.Charm.Meta().Subordinate != a.doc.Subordinate { + return errors.Errorf("cannot change an application's subordinacy") } // For old style charms written for only one series, we still retain // this check. Newer charms written for multi-series have a URL // with series = "". if cfg.Charm.URL().Series != "" { - if cfg.Charm.URL().Series != s.doc.Series { - return errors.Errorf("cannot change a service's series") + if cfg.Charm.URL().Series != a.doc.Series { + return errors.Errorf("cannot change an application's series") } } else if !cfg.ForceSeries { supported := false for _, series := range cfg.Charm.Meta().Series { - if series == s.doc.Series { + if series == a.doc.Series { supported = true break } @@ -793,13 +878,13 @@ if len(cfg.Charm.Meta().Series) > 0 { supportedSeries = strings.Join(cfg.Charm.Meta().Series, ", ") } - return errors.Errorf("cannot upgrade charm, only these series are supported: %v", supportedSeries) + return errors.Errorf("only these series are supported: %v", supportedSeries) } } else { // Even with forceSeries=true, we do not allow a charm to be used which is for // a different OS. This assumes the charm declares it has supported series which // we can check for OS compatibility. Otherwise, we just accept the series supplied. - currentOS, err := series.GetOSFromSeries(s.doc.Series) + currentOS, err := series.GetOSFromSeries(a.doc.Series) if err != nil { // We don't expect an error here but there's not much we can // do to recover. @@ -818,153 +903,141 @@ } } if !supportedOS && len(supportedSeries) > 0 { - return errors.Errorf("cannot upgrade charm, OS %q not supported by charm", currentOS) + return errors.Errorf("OS %q not supported by charm", currentOS) } } - services, closer := s.st.getCollection(applicationsC) - defer closer() + updatedSettings, err := cfg.Charm.Config().ValidateSettings(cfg.ConfigSettings) + if err != nil { + return errors.Annotate(err, "validating config settings") + } - // this value holds the *previous* charm modified version, before this - // transaction commits. - var charmModifiedVersion int + var newCharmModifiedVersion int channel := string(cfg.Channel) + acopy := &Application{a.st, a.doc} buildTxn := func(attempt int) ([]txn.Op, error) { + a := acopy if attempt > 0 { - // NOTE: We're explicitly allowing SetCharm to succeed - // when the application is Dying, because service/charm - // upgrades should still be allowed to apply to dying - // services and units, so that bugs in departed/broken - // hooks can be addressed at runtime. - if notDead, err := isNotDeadWithSession(services, s.doc.DocID); err != nil { + if err := a.Refresh(); err != nil { return nil, errors.Trace(err) - } else if !notDead { - return nil, ErrDead } } - // We can't update the in-memory application doc inside the transaction, so - // we manually udpate it at the end of the SetCharm method. However, we - // have no way of knowing what the charmModifiedVersion will be, since - // it's just incrementing the value in the DB (and that might be out of - // step with the value we have in memory). What we have to do is read - // the DB, store the charmModifiedVersion we get, run the transaction, - // assert in the transaction that the charmModifiedVersion hasn't - // changed since we retrieved it, and then we know what its value must - // be after this transaction ends. It's hacky, but there's no real - // other way to do it, thanks to the way mgo's transactions work. - var doc applicationDoc - err := services.FindId(s.doc.DocID).One(&doc) - var charmModifiedVersion int - switch { - case err == mgo.ErrNotFound: - // 0 is correct, since no previous charm existed. - case err != nil: - return nil, errors.Annotate(err, "can't open previous copy of charm") - default: - charmModifiedVersion = doc.CharmModifiedVersion + // NOTE: We're explicitly allowing SetCharm to succeed + // when the application is Dying, because service/charm + // upgrades should still be allowed to apply to dying + // services and units, so that bugs in departed/broken + // hooks can be addressed at runtime. + if a.Life() == Dead { + return nil, ErrDead } + + // Record the current value of charmModifiedVersion, so we can + // set the value on the method receiver's in-memory document + // structure. We increment the version only when we change the + // charm URL. + newCharmModifiedVersion = a.doc.CharmModifiedVersion + ops := []txn.Op{{ - C: applicationsC, - Id: s.doc.DocID, - Assert: bson.D{{"charmmodifiedversion", charmModifiedVersion}}, + C: applicationsC, + Id: a.doc.DocID, + Assert: append(notDeadDoc, bson.DocElem{ + "charmmodifiedversion", a.doc.CharmModifiedVersion, + }), }} - // Make sure the application doesn't have this charm already. - sel := bson.D{{"_id", s.doc.DocID}, {"charmurl", cfg.Charm.URL()}} - count, err := services.Find(sel).Count() - if err != nil { - return nil, errors.Trace(err) - } - if count > 0 { + if a.doc.CharmURL.String() == cfg.Charm.URL().String() { // Charm URL already set; just update the force flag and channel. - sameCharm := bson.D{{"charmurl", cfg.Charm.URL()}} - ops = append(ops, []txn.Op{{ - C: applicationsC, - Id: s.doc.DocID, - Assert: append(notDeadDoc, sameCharm...), + ops = append(ops, txn.Op{ + C: applicationsC, + Id: a.doc.DocID, Update: bson.D{{"$set", bson.D{ {"cs-channel", channel}, {"forcecharm", cfg.ForceUnits}, }}}, - }}...) + }) } else { - // Change the charm URL. - chng, err := s.changeCharmOps(cfg.Charm, channel, cfg.ForceUnits, cfg.ResourceIDs) + chng, err := a.changeCharmOps( + cfg.Charm, + channel, + updatedSettings, + cfg.ForceUnits, + cfg.ResourceIDs, + cfg.StorageConstraints, + ) if err != nil { return nil, errors.Trace(err) } ops = append(ops, chng...) + newCharmModifiedVersion++ } return ops, nil } - err := s.st.run(buildTxn) - if err == nil { - s.doc.CharmURL = cfg.Charm.URL() - s.doc.Channel = channel - s.doc.ForceCharm = cfg.ForceUnits - s.doc.CharmModifiedVersion = charmModifiedVersion + 1 + if err := a.st.run(buildTxn); err != nil { + return err } - return err + a.doc.CharmURL = cfg.Charm.URL() + a.doc.Channel = channel + a.doc.ForceCharm = cfg.ForceUnits + a.doc.CharmModifiedVersion = newCharmModifiedVersion + return nil } // String returns the application name. -func (s *Application) String() string { - return s.doc.Name +func (a *Application) String() string { + return a.doc.Name } // Refresh refreshes the contents of the Service from the underlying // state. It returns an error that satisfies errors.IsNotFound if the // application has been removed. -func (s *Application) Refresh() error { - services, closer := s.st.getCollection(applicationsC) +func (a *Application) Refresh() error { + services, closer := a.st.getCollection(applicationsC) defer closer() - err := services.FindId(s.doc.DocID).One(&s.doc) + err := services.FindId(a.doc.DocID).One(&a.doc) if err == mgo.ErrNotFound { - return errors.NotFoundf("application %q", s) + return errors.NotFoundf("application %q", a) } if err != nil { - return fmt.Errorf("cannot refresh application %q: %v", s, err) + return errors.Errorf("cannot refresh application %q: %v", a, err) } return nil } // newUnitName returns the next unit name. -func (s *Application) newUnitName() (string, error) { - unitSeq, err := s.st.sequence(s.Tag().String()) +func (a *Application) newUnitName() (string, error) { + unitSeq, err := a.st.sequence(a.Tag().String()) if err != nil { return "", errors.Trace(err) } - name := s.doc.Name + "/" + strconv.Itoa(unitSeq) + name := a.doc.Name + "/" + strconv.Itoa(unitSeq) return name, nil } -const MessageWaitForAgentInit = "Waiting for agent initialization to finish" - // addUnitOps returns a unique name for a new unit, and a list of txn operations // necessary to create that unit. The principalName param must be non-empty if // and only if s is a subordinate service. Only one subordinate of a given // application will be assigned to a given principal. The asserts param can be used // to include additional assertions for the application document. This method // assumes that the application already exists in the db. -func (s *Application) addUnitOps(principalName string, asserts bson.D) (string, []txn.Op, error) { +func (a *Application) addUnitOps(principalName string, asserts bson.D) (string, []txn.Op, error) { var cons constraints.Value - if !s.doc.Subordinate { - scons, err := s.Constraints() + if !a.doc.Subordinate { + scons, err := a.Constraints() if errors.IsNotFound(err) { - return "", nil, errors.NotFoundf("application %q", s.Name()) + return "", nil, errors.NotFoundf("application %q", a.Name()) } if err != nil { return "", nil, err } - cons, err = s.st.resolveConstraints(scons) + cons, err = a.st.resolveConstraints(scons) if err != nil { return "", nil, err } } - storageCons, err := s.StorageConstraints() + storageCons, err := a.StorageConstraints() if err != nil { return "", nil, err } @@ -973,13 +1046,13 @@ principalName: principalName, storageCons: storageCons, } - names, ops, err := s.addUnitOpsWithCons(args) + names, ops, err := a.addUnitOpsWithCons(args) if err != nil { return names, ops, err } // we verify the application is alive asserts = append(isAliveDoc, asserts...) - ops = append(ops, s.incUnitCountOp(asserts)) + ops = append(ops, a.incUnitCountOp(asserts)) return names, ops, err } @@ -991,99 +1064,125 @@ // addServiceUnitOps is just like addUnitOps but explicitly takes a // constraints value (this is used at application creation time). -func (s *Application) addServiceUnitOps(args applicationAddUnitOpsArgs) (string, []txn.Op, error) { - names, ops, err := s.addUnitOpsWithCons(args) +func (a *Application) addServiceUnitOps(args applicationAddUnitOpsArgs) (string, []txn.Op, error) { + names, ops, err := a.addUnitOpsWithCons(args) if err == nil { - ops = append(ops, s.incUnitCountOp(nil)) + ops = append(ops, a.incUnitCountOp(nil)) } return names, ops, err } // addUnitOpsWithCons is a helper method for returning addUnitOps. -func (s *Application) addUnitOpsWithCons(args applicationAddUnitOpsArgs) (string, []txn.Op, error) { - if s.doc.Subordinate && args.principalName == "" { - return "", nil, fmt.Errorf("application is a subordinate") - } else if !s.doc.Subordinate && args.principalName != "" { - return "", nil, fmt.Errorf("application is not a subordinate") +func (a *Application) addUnitOpsWithCons(args applicationAddUnitOpsArgs) (string, []txn.Op, error) { + if a.doc.Subordinate && args.principalName == "" { + return "", nil, errors.New("application is a subordinate") + } else if !a.doc.Subordinate && args.principalName != "" { + return "", nil, errors.New("application is not a subordinate") } - name, err := s.newUnitName() + name, err := a.newUnitName() if err != nil { return "", nil, err } + unitTag := names.NewUnitTag(name) - // Create instances of the charm's declared stores. - storageOps, numStorageAttachments, err := s.unitStorageOps(name, args.storageCons) + charm, _, err := a.Charm() + if err != nil { + return "", nil, err + } + + // Add storage instances/attachments for the unit. If the + // application is subordinate, we'll add the machine storage + // if the principal is assigned to a machine. Otherwise, we + // will add the subordinate's storage along with the principal's + // when the principal is assigned to a machine. + var machineAssignable machineAssignable + if a.doc.Subordinate { + pu, err := a.st.Unit(args.principalName) + if err != nil { + return "", nil, errors.Trace(err) + } + machineAssignable = pu + } + storageOps, numStorageAttachments, err := createStorageOps( + a.st, + unitTag, + charm.Meta(), + args.storageCons, + a.doc.Series, + machineAssignable, + ) if err != nil { return "", nil, errors.Trace(err) } - docID := s.st.docID(name) + docID := a.st.docID(name) globalKey := unitGlobalKey(name) agentGlobalKey := unitAgentGlobalKey(name) udoc := &unitDoc{ DocID: docID, Name: name, - Application: s.doc.Name, - Series: s.doc.Series, + Application: a.doc.Name, + Series: a.doc.Series, Life: Alive, Principal: args.principalName, StorageAttachmentCount: numStorageAttachments, } - // TODO(fwereade): 2016-03-17 lp:1558657 - now := time.Now() + now := a.st.clock.Now() agentStatusDoc := statusDoc{ - Status: status.StatusAllocating, + Status: status.Allocating, Updated: now.UnixNano(), } unitStatusDoc := statusDoc{ - // TODO(fwereade): this violates the spec. Should be "waiting". - Status: status.StatusUnknown, - StatusInfo: MessageWaitForAgentInit, + Status: status.Waiting, + StatusInfo: status.MessageWaitForMachine, Updated: now.UnixNano(), } workloadVersionDoc := statusDoc{ - Status: status.StatusUnknown, + Status: status.Unknown, Updated: now.UnixNano(), } - ops := addUnitOps(s.st, addUnitOpsArgs{ + ops, err := addUnitOps(a.st, addUnitOpsArgs{ unitDoc: udoc, agentStatusDoc: agentStatusDoc, workloadStatusDoc: unitStatusDoc, workloadVersionDoc: workloadVersionDoc, meterStatusDoc: &meterStatusDoc{Code: MeterNotSet.String()}, }) + if err != nil { + return "", nil, errors.Trace(err) + } ops = append(ops, storageOps...) - if s.doc.Subordinate { + if a.doc.Subordinate { ops = append(ops, txn.Op{ C: unitsC, - Id: s.st.docID(args.principalName), + Id: a.st.docID(args.principalName), Assert: append(isAliveDoc, bson.DocElem{ - "subordinates", bson.D{{"$not", bson.RegEx{Pattern: "^" + s.doc.Name + "/"}}}, + "subordinates", bson.D{{"$not", bson.RegEx{Pattern: "^" + a.doc.Name + "/"}}}, }), Update: bson.D{{"$addToSet", bson.D{{"subordinates", name}}}}, }) } else { - ops = append(ops, createConstraintsOp(s.st, agentGlobalKey, args.cons)) + ops = append(ops, createConstraintsOp(a.st, agentGlobalKey, args.cons)) } // At the last moment we still have the statusDocs in scope, set the initial // history entries. This is risky, and may lead to extra entries, but that's // an intrinsic problem with mixing txn and non-txn ops -- we can't sync // them cleanly. - probablyUpdateStatusHistory(s.st, globalKey, unitStatusDoc) - probablyUpdateStatusHistory(s.st, agentGlobalKey, agentStatusDoc) - probablyUpdateStatusHistory(s.st, globalWorkloadVersionKey(name), workloadVersionDoc) + probablyUpdateStatusHistory(a.st, globalKey, unitStatusDoc) + probablyUpdateStatusHistory(a.st, agentGlobalKey, agentStatusDoc) + probablyUpdateStatusHistory(a.st, globalWorkloadVersionKey(name), workloadVersionDoc) return name, ops, nil } // incUnitCountOp returns the operation to increment the service's unit count. -func (s *Application) incUnitCountOp(asserts bson.D) txn.Op { +func (a *Application) incUnitCountOp(asserts bson.D) txn.Op { op := txn.Op{ C: applicationsC, - Id: s.doc.DocID, + Id: a.doc.DocID, Update: bson.D{{"$inc", bson.D{{"unitcount", 1}}}}, } if len(asserts) > 0 { @@ -1092,67 +1191,43 @@ return op } -// unitStorageOps returns operations for creating storage -// instances and attachments for a new unit. unitStorageOps -// returns the number of initial storage attachments, to -// initialise the unit's storage attachment refcount. -func (s *Application) unitStorageOps(unitName string, cons map[string]StorageConstraints) (ops []txn.Op, numStorageAttachments int, err error) { - charm, _, err := s.Charm() - if err != nil { - return nil, -1, err - } - meta := charm.Meta() - url := charm.URL() - tag := names.NewUnitTag(unitName) - // TODO(wallyworld) - record constraints info in data model - size and pool name - ops, numStorageAttachments, err = createStorageOps( - s.st, tag, meta, url, cons, - s.doc.Series, - false, // unit is not assigned yet; don't create machine storage - ) - if err != nil { - return nil, -1, errors.Trace(err) - } - return ops, numStorageAttachments, nil -} - // AddUnit adds a new principal unit to the service. -func (s *Application) AddUnit() (unit *Unit, err error) { - defer errors.DeferredAnnotatef(&err, "cannot add unit to application %q", s) - name, ops, err := s.addUnitOps("", nil) +func (a *Application) AddUnit() (unit *Unit, err error) { + defer errors.DeferredAnnotatef(&err, "cannot add unit to application %q", a) + name, ops, err := a.addUnitOps("", nil) if err != nil { return nil, err } - if err := s.st.runTransaction(ops); err == txn.ErrAborted { - if alive, err := isAlive(s.st, applicationsC, s.doc.DocID); err != nil { + if err := a.st.runTransaction(ops); err == txn.ErrAborted { + if alive, err := isAlive(a.st, applicationsC, a.doc.DocID); err != nil { return nil, err } else if !alive { - return nil, fmt.Errorf("application is not alive") + return nil, errors.New("application is not alive") } - return nil, fmt.Errorf("inconsistent state") + return nil, errors.New("inconsistent state") } else if err != nil { return nil, err } - return s.st.Unit(name) + return a.st.Unit(name) } // removeUnitOps returns the operations necessary to remove the supplied unit, // assuming the supplied asserts apply to the unit document. -func (s *Application) removeUnitOps(u *Unit, asserts bson.D) ([]txn.Op, error) { - ops, err := u.destroyHostOps(s) +func (a *Application) removeUnitOps(u *Unit, asserts bson.D) ([]txn.Op, error) { + ops, err := u.destroyHostOps(a) if err != nil { return nil, err } - portsOps, err := removePortsForUnitOps(s.st, u) + portsOps, err := removePortsForUnitOps(a.st, u) if err != nil { return nil, err } - storageInstanceOps, err := removeStorageInstancesOps(s.st, u.Tag()) + storageInstanceOps, err := removeStorageInstancesOps(a.st, u.Tag()) if err != nil { return nil, err } - resOps, err := removeUnitResourcesOps(s.st, u.doc.Application, u.doc.Name) + resOps, err := removeUnitResourcesOps(a.st, u.doc.Application, u.doc.Name) if err != nil { return nil, errors.Trace(err) } @@ -1169,17 +1244,17 @@ Assert: append(observedFieldsMatch, asserts...), Remove: true, }, - removeMeterStatusOp(s.st, u.globalMeterStatusKey()), - removeStatusOp(s.st, u.globalAgentKey()), - removeStatusOp(s.st, u.globalKey()), - removeConstraintsOp(s.st, u.globalAgentKey()), - annotationRemoveOp(s.st, u.globalKey()), - s.st.newCleanupOp(cleanupRemovedUnit, u.doc.Name), + removeMeterStatusOp(a.st, u.globalMeterStatusKey()), + removeStatusOp(a.st, u.globalAgentKey()), + removeStatusOp(a.st, u.globalKey()), + removeConstraintsOp(a.st, u.globalAgentKey()), + annotationRemoveOp(a.st, u.globalKey()), + newCleanupOp(cleanupRemovedUnit, u.doc.Name), ) ops = append(ops, portsOps...) ops = append(ops, storageInstanceOps...) if u.doc.CharmURL != nil { - decOps, err := settingsDecRefOps(s.st, s.doc.Name, u.doc.CharmURL) + decOps, err := appCharmDecRefOps(a.st, a.doc.Name, u.doc.CharmURL) if errors.IsNotFound(err) { return nil, errRefresh } else if err != nil { @@ -1187,16 +1262,20 @@ } ops = append(ops, decOps...) } - if s.doc.Life == Dying && s.doc.RelationCount == 0 && s.doc.UnitCount == 1 { + if a.doc.Life == Dying && a.doc.RelationCount == 0 && a.doc.UnitCount == 1 { hasLastRef := bson.D{{"life", Dying}, {"relationcount", 0}, {"unitcount", 1}} - return append(ops, s.removeOps(hasLastRef)...), nil + removeOps, err := a.removeOps(hasLastRef) + if err != nil { + return nil, errors.Trace(err) + } + return append(ops, removeOps...), nil } svcOp := txn.Op{ C: applicationsC, - Id: s.doc.DocID, + Id: a.doc.DocID, Update: bson.D{{"$inc", bson.D{{"unitcount", -1}}}}, } - if s.doc.Life == Alive { + if a.doc.Life == Alive { svcOp.Assert = bson.D{{"life", Alive}, {"unitcount", bson.D{{"$gt", 0}}}} } else { svcOp.Assert = bson.D{ @@ -1229,8 +1308,8 @@ } // AllUnits returns all units of the service. -func (s *Application) AllUnits() (units []*Unit, err error) { - return allUnits(s.st, s.doc.Name) +func (a *Application) AllUnits() (units []*Unit, err error) { + return allUnits(a.st, a.doc.Name) } func allUnits(st *State, application string) (units []*Unit, err error) { @@ -1240,7 +1319,7 @@ docs := []unitDoc{} err = unitsCollection.Find(bson.D{{"application", application}}).All(&docs) if err != nil { - return nil, fmt.Errorf("cannot get all units from application %q: %v", application, err) + return nil, errors.Errorf("cannot get all units from application %q: %v", application, err) } for i := range docs { units = append(units, newUnit(st, &docs[i])) @@ -1249,8 +1328,8 @@ } // Relations returns a Relation for every relation the application is in. -func (s *Application) Relations() (relations []*Relation, err error) { - return applicationRelations(s.st, s.doc.Name) +func (a *Application) Relations() (relations []*Relation, err error) { + return applicationRelations(a.st, a.doc.Name) } func applicationRelations(st *State, name string) (relations []*Relation, err error) { @@ -1271,8 +1350,8 @@ // ConfigSettings returns the raw user configuration for the application's charm. // Unset values are omitted. -func (s *Application) ConfigSettings() (charm.Settings, error) { - settings, err := readSettings(s.st, settingsC, s.settingsKey()) +func (a *Application) ConfigSettings() (charm.Settings, error) { + settings, err := readSettings(a.st, settingsC, a.settingsKey()) if err != nil { return nil, err } @@ -1281,8 +1360,8 @@ // UpdateConfigSettings changes a service's charm config settings. Values set // to nil will be deleted; unknown and invalid values will return an error. -func (s *Application) UpdateConfigSettings(changes charm.Settings) error { - charm, _, err := s.Charm() +func (a *Application) UpdateConfigSettings(changes charm.Settings) error { + charm, _, err := a.Charm() if err != nil { return err } @@ -1294,7 +1373,7 @@ // about every use case. This needs to be resolved some time; but at // least the settings docs are keyed by charm url as well as service // name, so the actual impact of a race is non-threatening. - node, err := readSettings(s.st, settingsC, s.settingsKey()) + node, err := readSettings(a.st, settingsC, a.settingsKey()) if err != nil { return err } @@ -1311,12 +1390,12 @@ // LeaderSettings returns a service's leader settings. If nothing has been set // yet, it will return an empty map; this is not an error. -func (s *Application) LeaderSettings() (map[string]string, error) { +func (a *Application) LeaderSettings() (map[string]string, error) { // There's no compelling reason to have these methods on Service -- and // thus require an extra db read to access them -- but it stops the State // type getting even more cluttered. - doc, err := readSettingsDoc(s.st, settingsC, leadershipSettingsKey(s.doc.Name)) + doc, err := readSettingsDoc(a.st, settingsC, leadershipSettingsKey(a.doc.Name)) if errors.IsNotFound(err) { return nil, errors.NotFoundf("application") } else if err != nil { @@ -1339,7 +1418,7 @@ // UpdateLeaderSettings updates the service's leader settings with the supplied // values, but will fail (with a suitable error) if the supplied Token loses // validity. Empty values in the supplied map will be cleared in the database. -func (s *Application) UpdateLeaderSettings(token leadership.Token, updates map[string]string) error { +func (a *Application) UpdateLeaderSettings(token leadership.Token, updates map[string]string) error { // There's no compelling reason to have these methods on Service -- and // thus require an extra db read to access them -- but it stops the State // type getting even more cluttered. @@ -1347,7 +1426,7 @@ // We can calculate the actual update ahead of time; it's not dependent // upon the current state of the document. (*Writing* it should depend // on document state, but that's handled below.) - key := leadershipSettingsKey(s.doc.Name) + key := leadershipSettingsKey(a.doc.Name) sets := bson.M{} unsets := bson.M{} for unescapedKey, value := range updates { @@ -1378,7 +1457,7 @@ // Read the current document state so we can abort if there's // no actual change; and the version number so we can assert // on it and prevent these settings from landing late. - doc, err := readSettingsDoc(s.st, settingsC, key) + doc, err := readSettingsDoc(a.st, settingsC, key) if errors.IsNotFound(err) { return nil, errors.NotFoundf("application") } else if err != nil { @@ -1394,55 +1473,55 @@ Update: update, }}, nil } - return s.st.run(buildTxnWithLeadership(buildTxn, token)) + return a.st.run(buildTxnWithLeadership(buildTxn, token)) } var ErrSubordinateConstraints = stderrors.New("constraints do not apply to subordinate services") // Constraints returns the current application constraints. -func (s *Application) Constraints() (constraints.Value, error) { - if s.doc.Subordinate { +func (a *Application) Constraints() (constraints.Value, error) { + if a.doc.Subordinate { return constraints.Value{}, ErrSubordinateConstraints } - return readConstraints(s.st, s.globalKey()) + return readConstraints(a.st, a.globalKey()) } // SetConstraints replaces the current application constraints. -func (s *Application) SetConstraints(cons constraints.Value) (err error) { - unsupported, err := s.st.validateConstraints(cons) +func (a *Application) SetConstraints(cons constraints.Value) (err error) { + unsupported, err := a.st.validateConstraints(cons) if len(unsupported) > 0 { logger.Warningf( - "setting constraints on application %q: unsupported constraints: %v", s.Name(), strings.Join(unsupported, ",")) + "setting constraints on application %q: unsupported constraints: %v", a.Name(), strings.Join(unsupported, ",")) } else if err != nil { return err } - if s.doc.Subordinate { + if a.doc.Subordinate { return ErrSubordinateConstraints } defer errors.DeferredAnnotatef(&err, "cannot set constraints") - if s.doc.Life != Alive { + if a.doc.Life != Alive { return errNotAlive } ops := []txn.Op{{ C: applicationsC, - Id: s.doc.DocID, + Id: a.doc.DocID, Assert: isAliveDoc, }} - ops = append(ops, setConstraintsOp(s.st, s.globalKey(), cons)) - return onAbort(s.st.runTransaction(ops), errNotAlive) + ops = append(ops, setConstraintsOp(a.st, a.globalKey(), cons)) + return onAbort(a.st.runTransaction(ops), errNotAlive) } // EndpointBindings returns the mapping for each endpoint name and the space // name it is bound to (or empty if unspecified). When no bindings are stored // for the application, defaults are returned. -func (s *Application) EndpointBindings() (map[string]string, error) { +func (a *Application) EndpointBindings() (map[string]string, error) { // We don't need the TxnRevno below. - bindings, _, err := readEndpointBindings(s.st, s.globalKey()) + bindings, _, err := readEndpointBindings(a.st, a.globalKey()) if err != nil && !errors.IsNotFound(err) { return nil, errors.Trace(err) } if bindings == nil { - bindings, err = s.defaultEndpointBindings() + bindings, err = a.defaultEndpointBindings() if err != nil { return nil, errors.Trace(err) } @@ -1453,12 +1532,12 @@ // defaultEndpointBindings returns a map with each endpoint from the current // charm metadata bound to an empty space. If no charm URL is set yet, it // returns an empty map. -func (s *Application) defaultEndpointBindings() (map[string]string, error) { - if s.doc.CharmURL == nil { +func (a *Application) defaultEndpointBindings() (map[string]string, error) { + if a.doc.CharmURL == nil { return map[string]string{}, nil } - charm, _, err := s.Charm() + charm, _, err := a.Charm() if err != nil { return nil, errors.Trace(err) } @@ -1467,15 +1546,15 @@ } // MetricCredentials returns any metric credentials associated with this service. -func (s *Application) MetricCredentials() []byte { - return s.doc.MetricCredentials +func (a *Application) MetricCredentials() []byte { + return a.doc.MetricCredentials } // SetMetricCredentials updates the metric credentials associated with this service. -func (s *Application) SetMetricCredentials(b []byte) error { +func (a *Application) SetMetricCredentials(b []byte) error { buildTxn := func(attempt int) ([]txn.Op, error) { if attempt > 0 { - alive, err := isAlive(s.st, applicationsC, s.doc.DocID) + alive, err := isAlive(a.st, applicationsC, a.doc.DocID) if err != nil { return nil, errors.Trace(err) } else if !alive { @@ -1485,123 +1564,42 @@ ops := []txn.Op{ { C: applicationsC, - Id: s.doc.DocID, + Id: a.doc.DocID, Assert: isAliveDoc, Update: bson.M{"$set": bson.M{"metric-credentials": b}}, }, } return ops, nil } - if err := s.st.run(buildTxn); err != nil { + if err := a.st.run(buildTxn); err != nil { if err == errNotAlive { return errors.New("cannot update metric credentials: application " + err.Error()) } return errors.Annotatef(err, "cannot update metric credentials") } - s.doc.MetricCredentials = b + a.doc.MetricCredentials = b return nil } -func (s *Application) StorageConstraints() (map[string]StorageConstraints, error) { - return readStorageConstraints(s.st, s.globalKey()) -} - -// settingsIncRefOp returns an operation that increments the ref count -// of the application settings identified by applicationname and curl. If -// canCreate is false, a missing document will be treated as an error; -// otherwise, it will be created with a ref count of 1. -func settingsIncRefOp(st *State, applicationname string, curl *charm.URL, canCreate bool) (txn.Op, error) { - settingsrefs, closer := st.getCollection(settingsrefsC) - defer closer() - - key := applicationSettingsKey(applicationname, curl) - if count, err := settingsrefs.FindId(key).Count(); err != nil { - return txn.Op{}, err - } else if count == 0 { - if !canCreate { - return txn.Op{}, errors.NotFoundf("application %q settings for charm %q", applicationname, curl) - } - return txn.Op{ - C: settingsrefsC, - Id: st.docID(key), - Assert: txn.DocMissing, - Insert: settingsRefsDoc{ - RefCount: 1, - ModelUUID: st.ModelUUID()}, - }, nil - } - return txn.Op{ - C: settingsrefsC, - Id: st.docID(key), - Assert: txn.DocExists, - Update: bson.D{{"$inc", bson.D{{"refcount", 1}}}}, - }, nil -} - -// settingsDecRefOps returns a list of operations that decrement the -// ref count of the application settings identified by applicationname and -// curl. If the ref count is set to zero, the appropriate setting and -// ref count documents will both be deleted. -func settingsDecRefOps(st *State, applicationname string, curl *charm.URL) ([]txn.Op, error) { - settingsrefs, closer := st.getCollection(settingsrefsC) - defer closer() - - key := applicationSettingsKey(applicationname, curl) - var doc settingsRefsDoc - if err := settingsrefs.FindId(key).One(&doc); err == mgo.ErrNotFound { - return nil, errors.NotFoundf("application %q settings for charm %q", applicationname, curl) +// StorageConstraints returns the storage constraints for the application. +func (a *Application) StorageConstraints() (map[string]StorageConstraints, error) { + cons, err := readStorageConstraints(a.st, a.storageConstraintsKey()) + if errors.IsNotFound(err) { + return nil, nil } else if err != nil { - return nil, err - } - docID := st.docID(key) - if doc.RefCount == 1 { - return []txn.Op{{ - C: settingsrefsC, - Id: docID, - Assert: bson.D{{"refcount", 1}}, - Remove: true, - }, { - C: settingsC, - Id: docID, - Remove: true, - }}, nil + return nil, errors.Trace(err) } - return []txn.Op{{ - C: settingsrefsC, - Id: docID, - Assert: bson.D{{"refcount", bson.D{{"$gt", 1}}}}, - Update: bson.D{{"$inc", bson.D{{"refcount", -1}}}}, - }}, nil -} - -// settingsRefsDoc holds the number of units and services using the -// settings document identified by the document's id. Every time a -// application upgrades its charm the settings doc ref count for the new -// charm url is incremented, and the old settings is ref count is -// decremented. When a unit upgrades to the new charm, the old service -// settings ref count is decremented and the ref count of the new -// charm settings is incremented. The last unit upgrading to the new -// charm is responsible for deleting the old charm's settings doc. -// -// Note: We're not using the settingsDoc for this because changing -// just the ref count is not considered a change worth reporting -// to watchers and firing config-changed hooks. -// -// There is an implicit _id field here, which mongo creates, which is -// always the same as the settingsDoc's id. -type settingsRefsDoc struct { - RefCount int - ModelUUID string `bson:"model-uuid"` + return cons, nil } // Status returns the status of the service. // Only unit leaders are allowed to set the status of the service. // If no status is recorded, then there are no unit leaders and the // status is derived from the unit status values. -func (s *Application) Status() (status.StatusInfo, error) { - statuses, closer := s.st.getCollection(statusesC) +func (a *Application) Status() (status.StatusInfo, error) { + statuses, closer := a.st.getCollection(statusesC) defer closer() - query := statuses.Find(bson.D{{"_id", s.globalKey()}, {"neverset", true}}) + query := statuses.Find(bson.D{{"_id", a.globalKey()}, {"neverset", true}}) if count, err := query.Count(); err != nil { return status.StatusInfo{}, errors.Trace(err) } else if count != 0 { @@ -1615,26 +1613,26 @@ // charm's hooks exists or sets an application status*. This logic should be // removed as soon as possible, and the responsibilities implemented in // the right places rather than being applied at seeming random. - units, err := s.AllUnits() + units, err := a.AllUnits() if err != nil { return status.StatusInfo{}, err } - logger.Tracef("service %q has %d units", s.Name(), len(units)) + logger.Tracef("service %q has %d units", a.Name(), len(units)) if len(units) > 0 { - return s.deriveStatus(units) + return a.deriveStatus(units) } } - return getStatus(s.st, s.globalKey(), "application") + return getStatus(a.st, a.globalKey(), "application") } // SetStatus sets the status for the application. -func (s *Application) SetStatus(statusInfo status.StatusInfo) error { +func (a *Application) SetStatus(statusInfo status.StatusInfo) error { if !status.ValidWorkloadStatus(statusInfo.Status) { return errors.Errorf("cannot set invalid status %q", statusInfo.Status) } - return setStatus(s.st, setStatusParams{ + return setStatus(a.st, setStatusParams{ badge: "application", - globalKey: s.globalKey(), + globalKey: a.globalKey(), status: statusInfo.Status, message: statusInfo.Message, rawData: statusInfo.Data, @@ -1645,22 +1643,22 @@ // StatusHistory returns a slice of at most filter.Size StatusInfo items // or items as old as filter.Date or items newer than now - filter.Delta time // representing past statuses for this service. -func (s *Application) StatusHistory(filter status.StatusHistoryFilter) ([]status.StatusInfo, error) { +func (a *Application) StatusHistory(filter status.StatusHistoryFilter) ([]status.StatusInfo, error) { args := &statusHistoryArgs{ - st: s.st, - globalKey: s.globalKey(), + st: a.st, + globalKey: a.globalKey(), filter: filter, } return statusHistory(args) } // ServiceAndUnitsStatus returns the status for this application and all its units. -func (s *Application) ServiceAndUnitsStatus() (status.StatusInfo, map[string]status.StatusInfo, error) { - serviceStatus, err := s.Status() +func (a *Application) ServiceAndUnitsStatus() (status.StatusInfo, map[string]status.StatusInfo, error) { + serviceStatus, err := a.Status() if err != nil { return status.StatusInfo{}, nil, errors.Trace(err) } - units, err := s.AllUnits() + units, err := a.AllUnits() if err != nil { return status.StatusInfo{}, nil, err } @@ -1676,7 +1674,7 @@ } -func (s *Application) deriveStatus(units []*Unit) (status.StatusInfo, error) { +func (a *Application) deriveStatus(units []*Unit) (status.StatusInfo, error) { var result status.StatusInfo for _, unit := range units { currentSeverity := statusServerities[result.Status] @@ -1698,22 +1696,21 @@ // statusSeverities holds status values with a severity measure. // Status values with higher severity are used in preference to others. var statusServerities = map[status.Status]int{ - status.StatusError: 100, - status.StatusBlocked: 90, - status.StatusWaiting: 80, - status.StatusMaintenance: 70, - status.StatusTerminated: 60, - status.StatusActive: 50, - status.StatusUnknown: 40, + status.Error: 100, + status.Blocked: 90, + status.Waiting: 80, + status.Maintenance: 70, + status.Terminated: 60, + status.Active: 50, + status.Unknown: 40, } type addApplicationOpsArgs struct { - applicationDoc *applicationDoc - statusDoc statusDoc - constraints constraints.Value - storage map[string]StorageConstraints - settings map[string]interface{} - settingsRefCount int + applicationDoc *applicationDoc + statusDoc statusDoc + constraints constraints.Value + storage map[string]StorageConstraints + settings map[string]interface{} // These are nil when adding a new service, and most likely // non-nil when migrating. leadershipSettings map[string]interface{} @@ -1723,32 +1720,33 @@ // services collection, along with all the associated expected other service // entries. This method is used by both the *State.AddService method and the // migration import code. -func addApplicationOps(st *State, args addApplicationOpsArgs) []txn.Op { +func addApplicationOps(st *State, args addApplicationOpsArgs) ([]txn.Op, error) { svc := newApplication(st, args.applicationDoc) + charmRefOps, err := appCharmIncRefOps(st, args.applicationDoc.Name, args.applicationDoc.CharmURL, true) + if err != nil { + return nil, errors.Trace(err) + } + globalKey := svc.globalKey() settingsKey := svc.settingsKey() + storageConstraintsKey := svc.storageConstraintsKey() leadershipKey := leadershipSettingsKey(svc.Name()) - return []txn.Op{ + ops := []txn.Op{ createConstraintsOp(st, globalKey, args.constraints), - createStorageConstraintsOp(globalKey, args.storage), + createStorageConstraintsOp(storageConstraintsKey, args.storage), createSettingsOp(settingsC, settingsKey, args.settings), createSettingsOp(settingsC, leadershipKey, args.leadershipSettings), createStatusOp(st, globalKey, args.statusDoc), addModelServiceRefOp(st, svc.Name()), - { - C: settingsrefsC, - Id: settingsKey, - Assert: txn.DocMissing, - Insert: settingsRefsDoc{ - RefCount: args.settingsRefCount, - }, - }, { - C: applicationsC, - Id: svc.Name(), - Assert: txn.DocMissing, - Insert: args.applicationDoc, - }, } + ops = append(ops, charmRefOps...) + ops = append(ops, txn.Op{ + C: applicationsC, + Id: svc.Name(), + Assert: txn.DocMissing, + Insert: args.applicationDoc, + }) + return ops, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/application_test.go juju-core-2.0.0/src/github.com/juju/juju/state/application_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/application_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/application_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( "fmt" "sort" - "time" "github.com/juju/errors" "github.com/juju/loggo" @@ -15,7 +14,6 @@ "github.com/juju/utils/set" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" - "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -23,18 +21,19 @@ "github.com/juju/juju/state" "github.com/juju/juju/state/testing" "github.com/juju/juju/status" + coretesting "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" ) -type ServiceSuite struct { +type ApplicationSuite struct { ConnSuite charm *state.Charm mysql *state.Application } -var _ = gc.Suite(&ServiceSuite{}) +var _ = gc.Suite(&ApplicationSuite{}) -func (s *ServiceSuite) SetUpTest(c *gc.C) { +func (s *ApplicationSuite) SetUpTest(c *gc.C) { s.ConnSuite.SetUpTest(c) s.policy.GetConstraintsValidator = func() (constraints.Validator, error) { validator := constraints.NewValidator() @@ -46,7 +45,7 @@ s.mysql = s.AddTestingService(c, "mysql", s.charm) } -func (s *ServiceSuite) TestSetCharm(c *gc.C) { +func (s *ApplicationSuite) TestSetCharm(c *gc.C) { ch, force, err := s.mysql.Charm() c.Assert(err, jc.ErrorIsNil) c.Assert(ch.URL(), gc.DeepEquals, s.charm.URL()) @@ -73,7 +72,43 @@ c.Assert(force, jc.IsTrue) } -func (s *ServiceSuite) TestSetCharmLegacy(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmCharmSettings(c *gc.C) { + newCh := s.AddConfigCharm(c, "mysql", stringConfig, 2) + err := s.mysql.SetCharm(state.SetCharmConfig{ + Charm: newCh, + ConfigSettings: charm.Settings{"key": "value"}, + }) + c.Assert(err, jc.ErrorIsNil) + + settings, err := s.mysql.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, charm.Settings{"key": "value"}) + + newCh = s.AddConfigCharm(c, "mysql", newStringConfig, 3) + err = s.mysql.SetCharm(state.SetCharmConfig{ + Charm: newCh, + ConfigSettings: charm.Settings{"other": "one"}, + }) + c.Assert(err, jc.ErrorIsNil) + + settings, err = s.mysql.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, charm.Settings{ + "key": "value", + "other": "one", + }) +} + +func (s *ApplicationSuite) TestSetCharmCharmSettingsInvalid(c *gc.C) { + newCh := s.AddConfigCharm(c, "mysql", stringConfig, 2) + err := s.mysql.SetCharm(state.SetCharmConfig{ + Charm: newCh, + ConfigSettings: charm.Settings{"key": 123.45}, + }) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "mysql" to charm "local:quantal/quantal-mysql-2": validating config settings: option "key" expected string, got 123.45`) +} + +func (s *ApplicationSuite) TestSetCharmLegacy(c *gc.C) { chDifferentSeries := state.AddTestingCharmForSeries(c, s.State, "precise", "mysql") cfg := state.SetCharmConfig{ @@ -81,10 +116,10 @@ ForceSeries: true, } err := s.mysql.SetCharm(cfg) - c.Assert(err, gc.ErrorMatches, "cannot change a service's series") + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "mysql" to charm "local:precise/precise-mysql-1": cannot change an application's series`) } -func (s *ServiceSuite) TestClientServiceSetCharmUnsupportedSeries(c *gc.C) { +func (s *ApplicationSuite) TestClientServiceSetCharmUnsupportedSeries(c *gc.C) { ch := state.AddTestingCharmMultiSeries(c, s.State, "multi-series") svc := state.AddTestingServiceForSeries(c, s.State, "precise", "application", ch) @@ -93,10 +128,10 @@ Charm: chDifferentSeries, } err := svc.SetCharm(cfg) - c.Assert(err, gc.ErrorMatches, "cannot upgrade charm, only these series are supported: trusty, wily") + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "application" to charm "cs:multi-series2-8": only these series are supported: trusty, wily`) } -func (s *ServiceSuite) TestClientServiceSetCharmUnsupportedSeriesForce(c *gc.C) { +func (s *ApplicationSuite) TestClientServiceSetCharmUnsupportedSeriesForce(c *gc.C) { ch := state.AddTestingCharmMultiSeries(c, s.State, "multi-series") svc := state.AddTestingServiceForSeries(c, s.State, "precise", "application", ch) @@ -114,7 +149,7 @@ c.Assert(ch.URL().String(), gc.Equals, "cs:multi-series2-8") } -func (s *ServiceSuite) TestClientServiceSetCharmWrongOS(c *gc.C) { +func (s *ApplicationSuite) TestClientServiceSetCharmWrongOS(c *gc.C) { ch := state.AddTestingCharmMultiSeries(c, s.State, "multi-series") svc := state.AddTestingServiceForSeries(c, s.State, "precise", "application", ch) @@ -124,22 +159,22 @@ ForceSeries: true, } err := svc.SetCharm(cfg) - c.Assert(err, gc.ErrorMatches, `cannot upgrade charm, OS "Ubuntu" not supported by charm`) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "application" to charm "cs:multi-series-windows-1": OS "Ubuntu" not supported by charm`) } -func (s *ServiceSuite) TestSetCharmPreconditions(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmPreconditions(c *gc.C) { logging := s.AddTestingCharm(c, "logging") cfg := state.SetCharmConfig{Charm: logging} err := s.mysql.SetCharm(cfg) - c.Assert(err, gc.ErrorMatches, "cannot change a service's subordinacy") + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "mysql" to charm "local:quantal/quantal-logging-1": cannot change an application's subordinacy`) othermysql := s.AddSeriesCharm(c, "mysql", "otherseries") cfg2 := state.SetCharmConfig{Charm: othermysql} err = s.mysql.SetCharm(cfg2) - c.Assert(err, gc.ErrorMatches, "cannot change a service's series") + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "mysql" to charm "local:otherseries/otherseries-mysql-1": cannot change an application's series`) } -func (s *ServiceSuite) TestSetCharmUpdatesBindings(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmUpdatesBindings(c *gc.C) { _, err := s.State.AddSpace("db", "", nil, false) c.Assert(err, jc.ErrorIsNil) _, err = s.State.AddSpace("client", "", nil, true) @@ -173,7 +208,7 @@ }) } -func (s *ServiceSuite) TestSetCharmWithWeirdlyNamedEndpoints(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmWithWeirdlyNamedEndpoints(c *gc.C) { // This test ensures if special characters appear in endpoint names of the // charm metadata, they are properly escaped before saving to mongo, and // unescaped when read back. @@ -328,7 +363,7 @@ meta: metaExtraEndpoints, }} -func (s *ServiceSuite) TestSetCharmChecksEndpointsWithoutRelations(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmChecksEndpointsWithoutRelations(c *gc.C) { revno := 2 ms := s.AddMetaCharm(c, "mysql", metaBase, revno) svc := s.AddTestingService(c, "fakemysql", ms) @@ -353,7 +388,7 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *ServiceSuite) TestSetCharmChecksEndpointsWithRelations(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmChecksEndpointsWithRelations(c *gc.C) { revno := 2 providerCharm := s.AddMetaCharm(c, "mysql", metaDifferentProvider, revno) providerSvc := s.AddTestingService(c, "myprovider", providerCharm) @@ -442,7 +477,7 @@ endconfig: floatConfig, }} -func (s *ServiceSuite) TestSetCharmConfig(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmConfig(c *gc.C) { charms := map[string]*state.Charm{ stringConfig: s.AddConfigCharm(c, "wordpress", stringConfig, 1), emptyConfig: s.AddConfigCharm(c, "wordpress", emptyConfig, 2), @@ -489,7 +524,7 @@ } } -func (s *ServiceSuite) TestSetCharmWithDyingService(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmWithDyingService(c *gc.C) { sch := s.AddMetaCharm(c, "mysql", metaBase, 2) _, err := s.mysql.AddUnit() @@ -505,7 +540,7 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *ServiceSuite) TestSequenceUnitIdsAfterDestroy(c *gc.C) { +func (s *ApplicationSuite) TestSequenceUnitIdsAfterDestroy(c *gc.C) { unit, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) c.Assert(unit.Name(), gc.Equals, "mysql/0") @@ -519,7 +554,7 @@ c.Assert(unit.Name(), gc.Equals, "mysql/1") } -func (s *ServiceSuite) TestSequenceUnitIds(c *gc.C) { +func (s *ApplicationSuite) TestSequenceUnitIds(c *gc.C) { unit, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) c.Assert(unit.Name(), gc.Equals, "mysql/0") @@ -528,7 +563,7 @@ c.Assert(unit.Name(), gc.Equals, "mysql/1") } -func (s *ServiceSuite) TestSetCharmWhenDead(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmWhenDead(c *gc.C) { sch := s.AddMetaCharm(c, "mysql", metaBase, 2) defer state.SetBeforeHooks(c, s.State, func() { @@ -555,10 +590,10 @@ ForceUnits: true, } err := s.mysql.SetCharm(cfg) - c.Assert(err, gc.Equals, state.ErrDead) + c.Assert(errors.Cause(err), gc.Equals, state.ErrDead) } -func (s *ServiceSuite) TestSetCharmWithRemovedService(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmWithRemovedService(c *gc.C) { sch := s.AddMetaCharm(c, "mysql", metaBase, 2) err := s.mysql.Destroy() @@ -571,10 +606,10 @@ } err = s.mysql.SetCharm(cfg) - c.Assert(err, gc.Equals, state.ErrDead) + c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *ServiceSuite) TestSetCharmWhenRemoved(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmWhenRemoved(c *gc.C) { sch := s.AddMetaCharm(c, "mysql", metaBase, 2) defer state.SetBeforeHooks(c, s.State, func() { @@ -588,10 +623,10 @@ ForceUnits: true, } err := s.mysql.SetCharm(cfg) - c.Assert(err, gc.Equals, state.ErrDead) + c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *ServiceSuite) TestSetCharmWhenDyingIsOK(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmWhenDyingIsOK(c *gc.C) { sch := s.AddMetaCharm(c, "mysql", metaBase, 2) defer state.SetBeforeHooks(c, s.State, func() { @@ -611,7 +646,7 @@ assertLife(c, s.mysql, state.Dying) } -func (s *ServiceSuite) TestSetCharmRetriesWithSameCharmURL(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmRetriesWithSameCharmURL(c *gc.C) { sch := s.AddMetaCharm(c, "mysql", metaBase, 2) defer state.SetTestHooks(c, s.State, @@ -656,7 +691,7 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *ServiceSuite) TestSetCharmRetriesWhenOldSettingsChanged(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmRetriesWhenOldSettingsChanged(c *gc.C) { revno := 2 // revno 1 is used by SetUpSuite oldCh := s.AddConfigCharm(c, "mysql", stringConfig, revno) newCh := s.AddConfigCharm(c, "mysql", stringConfig, revno+1) @@ -680,7 +715,7 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *ServiceSuite) TestSetCharmRetriesWhenBothOldAndNewSettingsChanged(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmRetriesWhenBothOldAndNewSettingsChanged(c *gc.C) { revno := 2 // revno 1 is used by SetUpSuite oldCh := s.AddConfigCharm(c, "mysql", stringConfig, revno) newCh := s.AddConfigCharm(c, "mysql", stringConfig, revno+1) @@ -792,7 +827,7 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *ServiceSuite) TestSetCharmRetriesWhenOldBindingsChanged(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmRetriesWhenOldBindingsChanged(c *gc.C) { revno := 2 // revno 1 is used by SetUpSuite mysqlKey := state.ApplicationGlobalKey(s.mysql.Name()) oldCharm := s.AddMetaCharm(c, "mysql", metaDifferentRequirer, revno) @@ -917,7 +952,7 @@ update: charm.Settings{"skill-level": nil}, }} -func (s *ServiceSuite) TestUpdateConfigSettings(c *gc.C) { +func (s *ApplicationSuite) TestUpdateConfigSettings(c *gc.C) { sch := s.AddTestingCharm(c, "dummy") for i, t := range serviceUpdateConfigSettingsTests { c.Logf("test %d. %s", i, t.about) @@ -946,7 +981,7 @@ func assertNoSettingsRef(c *gc.C, st *state.State, svcName string, sch *state.Charm) { _, err := state.ServiceSettingsRefCount(st, svcName, sch.URL()) - c.Assert(err, gc.Equals, mgo.ErrNotFound) + c.Assert(errors.Cause(err), jc.Satisfies, errors.IsNotFound) } func assertSettingsRef(c *gc.C, st *state.State, svcName string, sch *state.Charm, refcount int) { @@ -955,7 +990,7 @@ c.Assert(rc, gc.Equals, refcount) } -func (s *ServiceSuite) TestSettingsRefCountWorks(c *gc.C) { +func (s *ApplicationSuite) TestSettingsRefCountWorks(c *gc.C) { // This test ensures the service settings per charm URL are // properly reference counted. oldCh := s.AddConfigCharm(c, "wordpress", emptyConfig, 1) @@ -1032,6 +1067,68 @@ c.Assert(err, jc.ErrorIsNil) assertNoSettingsRef(c, s.State, svcName, oldCh) assertNoSettingsRef(c, s.State, svcName, newCh) + + // Having studiously avoided triggering cleanups throughout, + // invoke them now and check that the charms are cleaned up + // correctly -- and that a storm of cleanups for the same + // charm are not a problem. + err = s.State.Cleanup() + c.Assert(err, jc.ErrorIsNil) + err = oldCh.Refresh() + c.Assert(err, jc.Satisfies, errors.IsNotFound) + err = newCh.Refresh() + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *ApplicationSuite) TestSettingsRefCreateRace(c *gc.C) { + oldCh := s.AddConfigCharm(c, "wordpress", emptyConfig, 1) + newCh := s.AddConfigCharm(c, "wordpress", emptyConfig, 2) + appName := "mywp" + + app := s.AddTestingService(c, appName, oldCh) + unit, err := app.AddUnit() + c.Assert(err, jc.ErrorIsNil) + + // just before setting the unit charm url, switch the service + // away from the original charm, causing the attempt to fail + // (because the settings have gone away; it's the unit's job to + // fail out and handle the new charm when it comes back up + // again). + dropSettings := func() { + cfg := state.SetCharmConfig{Charm: newCh} + err = app.SetCharm(cfg) + c.Assert(err, jc.ErrorIsNil) + } + defer state.SetBeforeHooks(c, s.State, dropSettings).Check() + + err = unit.SetCharmURL(oldCh.URL()) + c.Check(err, gc.ErrorMatches, "settings reference: does not exist") +} + +func (s *ApplicationSuite) TestSettingsRefRemoveRace(c *gc.C) { + oldCh := s.AddConfigCharm(c, "wordpress", emptyConfig, 1) + newCh := s.AddConfigCharm(c, "wordpress", emptyConfig, 2) + appName := "mywp" + + app := s.AddTestingService(c, appName, oldCh) + unit, err := app.AddUnit() + c.Assert(err, jc.ErrorIsNil) + + // just before updating the app charm url, set that charm url on + // a unit to block the removal. + grabReference := func() { + err := unit.SetCharmURL(oldCh.URL()) + c.Assert(err, jc.ErrorIsNil) + } + defer state.SetBeforeHooks(c, s.State, grabReference).Check() + + cfg := state.SetCharmConfig{Charm: newCh} + err = app.SetCharm(cfg) + c.Assert(err, jc.ErrorIsNil) + + // check refs to both settings exist + assertSettingsRef(c, s.State, appName, oldCh, 1) + assertSettingsRef(c, s.State, appName, newCh, 1) } const mysqlBaseMeta = ` @@ -1051,7 +1148,7 @@ loadbalancer: phony ` -func (s *ServiceSuite) assertServiceRelations(c *gc.C, svc *state.Application, expectedKeys ...string) []*state.Relation { +func (s *ApplicationSuite) assertApplicationRelations(c *gc.C, svc *state.Application, expectedKeys ...string) []*state.Relation { rels, err := svc.Relations() c.Assert(err, jc.ErrorIsNil) if len(rels) == 0 { @@ -1066,23 +1163,23 @@ return rels } -func (s *ServiceSuite) TestNewPeerRelationsAddedOnUpgrade(c *gc.C) { +func (s *ApplicationSuite) TestNewPeerRelationsAddedOnUpgrade(c *gc.C) { // Original mysql charm has no peer relations. oldCh := s.AddMetaCharm(c, "mysql", mysqlBaseMeta+onePeerMeta, 2) newCh := s.AddMetaCharm(c, "mysql", mysqlBaseMeta+twoPeersMeta, 3) // No relations joined yet. - s.assertServiceRelations(c, s.mysql) + s.assertApplicationRelations(c, s.mysql) cfg := state.SetCharmConfig{Charm: oldCh} err := s.mysql.SetCharm(cfg) c.Assert(err, jc.ErrorIsNil) - s.assertServiceRelations(c, s.mysql, "mysql:cluster") + s.assertApplicationRelations(c, s.mysql, "mysql:cluster") cfg = state.SetCharmConfig{Charm: newCh} err = s.mysql.SetCharm(cfg) c.Assert(err, jc.ErrorIsNil) - rels := s.assertServiceRelations(c, s.mysql, "mysql:cluster", "mysql:loadbalancer") + rels := s.assertApplicationRelations(c, s.mysql, "mysql:cluster", "mysql:loadbalancer") // Check state consistency by attempting to destroy the service. err = s.mysql.Destroy() @@ -1107,11 +1204,11 @@ } } -func (s *ServiceSuite) TestTag(c *gc.C) { +func (s *ApplicationSuite) TestTag(c *gc.C) { c.Assert(s.mysql.Tag().String(), gc.Equals, "application-mysql") } -func (s *ServiceSuite) TestMysqlEndpoints(c *gc.C) { +func (s *ApplicationSuite) TestMysqlEndpoints(c *gc.C) { _, err := s.mysql.Endpoint("mysql") c.Assert(err, gc.ErrorMatches, `application "mysql" has no "mysql" relation`) @@ -1136,7 +1233,7 @@ c.Assert(eps, gc.DeepEquals, []state.Endpoint{jiEP, serverEP}) } -func (s *ServiceSuite) TestRiakEndpoints(c *gc.C) { +func (s *ApplicationSuite) TestRiakEndpoints(c *gc.C) { riak := s.AddTestingService(c, "myriak", s.AddTestingCharm(c, "riak")) _, err := riak.Endpoint("garble") @@ -1188,7 +1285,7 @@ c.Assert(eps, gc.DeepEquals, []state.Endpoint{adminEP, endpointEP, jiEP, ringEP}) } -func (s *ServiceSuite) TestWordpressEndpoints(c *gc.C) { +func (s *ApplicationSuite) TestWordpressEndpoints(c *gc.C) { wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) _, err := wordpress.Endpoint("nonsense") @@ -1266,7 +1363,7 @@ c.Assert(eps, gc.DeepEquals, []state.Endpoint{cacheEP, dbEP, jiEP, ldEP, mpEP, urlEP}) } -func (s *ServiceSuite) TestServiceRefresh(c *gc.C) { +func (s *ApplicationSuite) TestServiceRefresh(c *gc.C) { s1, err := s.State.Application(s.mysql.Name()) c.Assert(err, jc.ErrorIsNil) @@ -1296,7 +1393,7 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *ServiceSuite) TestServiceExposed(c *gc.C) { +func (s *ApplicationSuite) TestServiceExposed(c *gc.C) { // Check that querying for the exposed flag works correctly. c.Assert(s.mysql.IsExposed(), jc.IsFalse) @@ -1343,7 +1440,7 @@ c.Assert(err, gc.ErrorMatches, notAliveErr) } -func (s *ServiceSuite) TestAddUnit(c *gc.C) { +func (s *ApplicationSuite) TestAddUnit(c *gc.C) { // Check that principal units can be added on their own. unitZero, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) @@ -1395,7 +1492,7 @@ c.Assert(id, gc.Equals, m.Id()) } -func (s *ServiceSuite) TestAddUnitWhenNotAlive(c *gc.C) { +func (s *ApplicationSuite) TestAddUnitWhenNotAlive(c *gc.C) { u, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) err = s.mysql.Destroy() @@ -1410,7 +1507,7 @@ c.Assert(err, gc.ErrorMatches, `cannot add unit to application "mysql": application "mysql" not found`) } -func (s *ServiceSuite) TestReadUnit(c *gc.C) { +func (s *ApplicationSuite) TestReadUnit(c *gc.C) { _, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) _, err = s.mysql.AddUnit() @@ -1435,7 +1532,7 @@ c.Assert(sortedUnitNames(units), gc.DeepEquals, []string{"mysql/0", "mysql/1"}) } -func (s *ServiceSuite) TestReadUnitWhenDying(c *gc.C) { +func (s *ApplicationSuite) TestReadUnitWhenDying(c *gc.C) { // Test that we can still read units when the service is Dying... unit, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) @@ -1462,7 +1559,7 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *ServiceSuite) TestDestroySimple(c *gc.C) { +func (s *ApplicationSuite) TestDestroySimple(c *gc.C) { err := s.mysql.Destroy() c.Assert(err, jc.ErrorIsNil) c.Assert(s.mysql.Life(), gc.Equals, state.Dying) @@ -1470,7 +1567,7 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *ServiceSuite) TestDestroyStillHasUnits(c *gc.C) { +func (s *ApplicationSuite) TestDestroyStillHasUnits(c *gc.C) { unit, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) err = s.mysql.Destroy() @@ -1489,7 +1586,7 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *ServiceSuite) TestDestroyOnceHadUnits(c *gc.C) { +func (s *ApplicationSuite) TestDestroyOnceHadUnits(c *gc.C) { unit, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) err = unit.EnsureDead() @@ -1504,7 +1601,7 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *ServiceSuite) TestDestroyStaleNonZeroUnitCount(c *gc.C) { +func (s *ApplicationSuite) TestDestroyStaleNonZeroUnitCount(c *gc.C) { unit, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) err = s.mysql.Refresh() @@ -1521,7 +1618,7 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *ServiceSuite) TestDestroyStaleZeroUnitCount(c *gc.C) { +func (s *ApplicationSuite) TestDestroyStaleZeroUnitCount(c *gc.C) { unit, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) @@ -1545,7 +1642,7 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *ServiceSuite) TestDestroyWithRemovableRelation(c *gc.C) { +func (s *ApplicationSuite) TestDestroyWithRemovableRelation(c *gc.C) { wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) eps, err := s.State.InferEndpoints("wordpress", "mysql") c.Assert(err, jc.ErrorIsNil) @@ -1562,15 +1659,15 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *ServiceSuite) TestDestroyWithReferencedRelation(c *gc.C) { +func (s *ApplicationSuite) TestDestroyWithReferencedRelation(c *gc.C) { s.assertDestroyWithReferencedRelation(c, true) } -func (s *ServiceSuite) TestDestroyWithReferencedRelationStaleCount(c *gc.C) { +func (s *ApplicationSuite) TestDestroyWithReferencedRelationStaleCount(c *gc.C) { s.assertDestroyWithReferencedRelation(c, false) } -func (s *ServiceSuite) assertDestroyWithReferencedRelation(c *gc.C, refresh bool) { +func (s *ApplicationSuite) assertDestroyWithReferencedRelation(c *gc.C, refresh bool) { wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) eps, err := s.State.InferEndpoints("wordpress", "mysql") c.Assert(err, jc.ErrorIsNil) @@ -1618,7 +1715,7 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *ServiceSuite) TestDestroyQueuesUnitCleanup(c *gc.C) { +func (s *ApplicationSuite) TestDestroyQueuesUnitCleanup(c *gc.C) { // Add 5 units; block quick-remove of mysql/1 and mysql/3 units := make([]*state.Unit, 5) for i := range units { @@ -1671,7 +1768,7 @@ c.Assert(dirty, jc.IsFalse) } -func (s *ServiceSuite) TestRemoveServiceMachine(c *gc.C) { +func (s *ApplicationSuite) TestRemoveServiceMachine(c *gc.C) { unit, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) machine, err := s.State.AddMachine("quantal", state.JobHostUnits) @@ -1688,7 +1785,7 @@ assertLife(c, machine, state.Dying) } -func (s *ServiceSuite) TestRemoveQueuesLocalCharmCleanup(c *gc.C) { +func (s *ApplicationSuite) TestRemoveQueuesLocalCharmCleanup(c *gc.C) { // Check state is clean. dirty, err := s.State.NeedsCleanup() c.Assert(err, jc.ErrorIsNil) @@ -1705,23 +1802,17 @@ err = s.State.Cleanup() c.Assert(err, jc.ErrorIsNil) + // Check charm removed + err = s.charm.Refresh() + c.Check(err, jc.Satisfies, errors.IsNotFound) + // Check we're now clean. dirty, err = s.State.NeedsCleanup() c.Assert(err, jc.ErrorIsNil) c.Assert(dirty, jc.IsFalse) } -func (s *ServiceSuite) TestRemoveStoreCharmNoCleanup(c *gc.C) { - ch := state.AddTestingCharmMultiSeries(c, s.State, "multi-series") - svc := state.AddTestingServiceForSeries(c, s.State, "precise", "application", ch) - - err := svc.Destroy() - dirty, err := s.State.NeedsCleanup() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsFalse) -} - -func (s *ServiceSuite) TestReadUnitWithChangingState(c *gc.C) { +func (s *ApplicationSuite) TestReadUnitWithChangingState(c *gc.C) { // Check that reading a unit after removing the service // fails nicely. err := s.mysql.Destroy() @@ -1736,7 +1827,7 @@ return &val } -func (s *ServiceSuite) TestConstraints(c *gc.C) { +func (s *ApplicationSuite) TestConstraints(c *gc.C) { // Constraints are initially empty (for now). cons, err := s.mysql.Constraints() c.Assert(err, jc.ErrorIsNil) @@ -1774,13 +1865,13 @@ c.Assert(&cons6, jc.Satisfies, constraints.IsEmpty) } -func (s *ServiceSuite) TestSetInvalidConstraints(c *gc.C) { +func (s *ApplicationSuite) TestSetInvalidConstraints(c *gc.C) { cons := constraints.MustParse("mem=4G instance-type=foo") err := s.mysql.SetConstraints(cons) c.Assert(err, gc.ErrorMatches, `ambiguous constraints: "instance-type" overlaps with "mem"`) } -func (s *ServiceSuite) TestSetUnsupportedConstraintsWarning(c *gc.C) { +func (s *ApplicationSuite) TestSetUnsupportedConstraintsWarning(c *gc.C) { defer loggo.ResetWriters() logger := loggo.GetLogger("test") logger.SetLogLevel(loggo.DEBUG) @@ -1799,7 +1890,7 @@ c.Assert(scons, gc.DeepEquals, cons) } -func (s *ServiceSuite) TestConstraintsLifecycle(c *gc.C) { +func (s *ApplicationSuite) TestConstraintsLifecycle(c *gc.C) { // Dying. unit, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) @@ -1823,7 +1914,7 @@ c.Assert(err, gc.ErrorMatches, `constraints not found`) } -func (s *ServiceSuite) TestSubordinateConstraints(c *gc.C) { +func (s *ApplicationSuite) TestSubordinateConstraints(c *gc.C) { loggingCh := s.AddTestingCharm(c, "logging") logging := s.AddTestingService(c, "logging", loggingCh) @@ -1834,7 +1925,7 @@ c.Assert(err, gc.Equals, state.ErrSubordinateConstraints) } -func (s *ServiceSuite) TestWatchUnitsBulkEvents(c *gc.C) { +func (s *ApplicationSuite) TestWatchUnitsBulkEvents(c *gc.C) { // Alive unit... alive, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) @@ -1881,7 +1972,7 @@ wc.AssertNoChange() } -func (s *ServiceSuite) TestWatchUnitsLifecycle(c *gc.C) { +func (s *ApplicationSuite) TestWatchUnitsLifecycle(c *gc.C) { // Empty initial event when no units. w := s.mysql.WatchUnits() defer testing.AssertStop(c, w) @@ -1929,7 +2020,7 @@ wc.AssertNoChange() } -func (s *ServiceSuite) TestWatchRelations(c *gc.C) { +func (s *ApplicationSuite) TestWatchRelations(c *gc.C) { // TODO(fwereade) split this test up a bit. w := s.mysql.WatchRelations() defer testing.AssertStop(c, w) @@ -2030,7 +2121,7 @@ } } -func (s *ServiceSuite) TestWatchService(c *gc.C) { +func (s *ApplicationSuite) TestWatchService(c *gc.C) { w := s.mysql.Watch() defer testing.AssertStop(c, w) @@ -2069,7 +2160,7 @@ testing.NewNotifyWatcherC(c, s.State, w).AssertOneChange() } -func (s *ServiceSuite) TestMetricCredentials(c *gc.C) { +func (s *ApplicationSuite) TestMetricCredentials(c *gc.C) { err := s.mysql.SetMetricCredentials([]byte("hello there")) c.Assert(err, jc.ErrorIsNil) c.Assert(s.mysql.MetricCredentials(), gc.DeepEquals, []byte("hello there")) @@ -2078,7 +2169,7 @@ c.Assert(service.MetricCredentials(), gc.DeepEquals, []byte("hello there")) } -func (s *ServiceSuite) TestMetricCredentialsOnDying(c *gc.C) { +func (s *ApplicationSuite) TestMetricCredentialsOnDying(c *gc.C) { _, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) err = s.mysql.SetMetricCredentials([]byte("set before dying")) @@ -2090,10 +2181,10 @@ c.Assert(err, gc.ErrorMatches, "cannot update metric credentials: application not found or not alive") } -func (s *ServiceSuite) testStatus(c *gc.C, status1, status2, expected status.Status) { +func (s *ApplicationSuite) testStatus(c *gc.C, status1, status2, expected status.Status) { u1, err := s.mysql.AddUnit() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := coretesting.ZeroTime() sInfo := status.StatusInfo{ Status: status1, Message: "status 1", @@ -2109,7 +2200,7 @@ Message: "status 2", Since: &now, } - if status2 == status.StatusError { + if status2 == status.Error { err = u2.SetAgentStatus(sInfo) } else { err = u2.SetStatus(sInfo) @@ -2127,17 +2218,17 @@ }) } -func (s *ServiceSuite) TestStatus(c *gc.C) { +func (s *ApplicationSuite) TestStatus(c *gc.C) { for _, t := range []struct{ status1, status2, expected status.Status }{ - {status.StatusActive, status.StatusWaiting, status.StatusWaiting}, - {status.StatusMaintenance, status.StatusWaiting, status.StatusWaiting}, - {status.StatusActive, status.StatusBlocked, status.StatusBlocked}, - {status.StatusWaiting, status.StatusBlocked, status.StatusBlocked}, - {status.StatusMaintenance, status.StatusBlocked, status.StatusBlocked}, - {status.StatusMaintenance, status.StatusError, status.StatusError}, - {status.StatusBlocked, status.StatusError, status.StatusError}, - {status.StatusWaiting, status.StatusError, status.StatusError}, - {status.StatusActive, status.StatusError, status.StatusError}, + {status.Active, status.Waiting, status.Waiting}, + {status.Maintenance, status.Waiting, status.Waiting}, + {status.Active, status.Blocked, status.Blocked}, + {status.Waiting, status.Blocked, status.Blocked}, + {status.Maintenance, status.Blocked, status.Blocked}, + {status.Maintenance, status.Error, status.Error}, + {status.Blocked, status.Error, status.Error}, + {status.Waiting, status.Error, status.Error}, + {status.Active, status.Error, status.Error}, } { s.testStatus(c, t.status1, t.status2, t.expected) } @@ -2193,11 +2284,13 @@ type: filesystem ` -const oneRequiredSharedStorageMeta = ` +const oneOptionalSharedStorageMeta = ` storage: data0: type: block shared: true + multiple: + range: 0- ` const oneRequiredReadOnlyStorageMeta = ` @@ -2237,7 +2330,7 @@ `[1:], minStr, maxStr) } -func (s *ServiceSuite) setCharmFromMeta(c *gc.C, oldMeta, newMeta string) error { +func (s *ApplicationSuite) setCharmFromMeta(c *gc.C, oldMeta, newMeta string) error { oldCh := s.AddMetaCharm(c, "mysql", oldMeta, 2) newCh := s.AddMetaCharm(c, "mysql", newMeta, 3) svc := s.AddTestingService(c, "test", oldCh) @@ -2246,7 +2339,7 @@ return svc.SetCharm(cfg) } -func (s *ServiceSuite) TestSetCharmOptionalUnusedStorageRemoved(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmOptionalUnusedStorageRemoved(c *gc.C) { err := s.setCharmFromMeta(c, mysqlBaseMeta+oneRequiredOneOptionalStorageMeta, mysqlBaseMeta+oneRequiredStorageMeta, @@ -2256,7 +2349,7 @@ // as it is not in use. } -func (s *ServiceSuite) TestSetCharmOptionalUsedStorageRemoved(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmOptionalUsedStorageRemoved(c *gc.C) { oldMeta := mysqlBaseMeta + oneRequiredOneOptionalStorageMeta newMeta := mysqlBaseMeta + oneRequiredStorageMeta oldCh := s.AddMetaCharm(c, "mysql", oldMeta, 2) @@ -2276,26 +2369,58 @@ }).Check() cfg := state.SetCharmConfig{Charm: newCh} err := svc.SetCharm(cfg) - c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "mysql": in-use storage "data1" removed`) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "local:quantal/quantal-mysql-3": in-use storage "data1" removed`) } -func (s *ServiceSuite) TestSetCharmRequiredStorageRemoved(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmRequiredStorageRemoved(c *gc.C) { err := s.setCharmFromMeta(c, mysqlBaseMeta+oneRequiredStorageMeta, mysqlBaseMeta, ) - c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "mysql": required storage "data0" removed`) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "local:quantal/quantal-mysql-3": required storage "data0" removed`) } -func (s *ServiceSuite) TestSetCharmRequiredStorageAdded(c *gc.C) { - err := s.setCharmFromMeta(c, - mysqlBaseMeta+oneRequiredStorageMeta, - mysqlBaseMeta+twoRequiredStorageMeta, - ) - c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "mysql": required storage "data1" added`) +func (s *ApplicationSuite) TestSetCharmRequiredStorageAddedDefaultConstraints(c *gc.C) { + oldCh := s.AddMetaCharm(c, "mysql", mysqlBaseMeta+oneRequiredStorageMeta, 2) + newCh := s.AddMetaCharm(c, "mysql", mysqlBaseMeta+twoRequiredStorageMeta, 3) + svc := s.AddTestingService(c, "test", oldCh) + u, err := svc.AddUnit() + c.Assert(err, jc.ErrorIsNil) + + cfg := state.SetCharmConfig{Charm: newCh} + err = svc.SetCharm(cfg) + c.Assert(err, jc.ErrorIsNil) + + // Check that the new required storage was added for the unit. + attachments, err := s.State.UnitStorageAttachments(u.UnitTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(attachments, gc.HasLen, 2) +} + +func (s *ApplicationSuite) TestSetCharmStorageAddedUserSpecifiedConstraints(c *gc.C) { + oldCh := s.AddMetaCharm(c, "mysql", mysqlBaseMeta+oneRequiredStorageMeta, 2) + newCh := s.AddMetaCharm(c, "mysql", mysqlBaseMeta+twoOptionalStorageMeta, 3) + svc := s.AddTestingService(c, "test", oldCh) + u, err := svc.AddUnit() + c.Assert(err, jc.ErrorIsNil) + + cfg := state.SetCharmConfig{ + Charm: newCh, + StorageConstraints: map[string]state.StorageConstraints{ + "data1": {Count: 3}, + }, + } + err = svc.SetCharm(cfg) + c.Assert(err, jc.ErrorIsNil) + + // Check that new storage was added for the unit, based on the + // constraints specified in SetCharmConfig. + attachments, err := s.State.UnitStorageAttachments(u.UnitTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(attachments, gc.HasLen, 4) } -func (s *ServiceSuite) TestSetCharmOptionalStorageAdded(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmOptionalStorageAdded(c *gc.C) { err := s.setCharmFromMeta(c, mysqlBaseMeta+oneRequiredStorageMeta, mysqlBaseMeta+twoOptionalStorageMeta, @@ -2303,7 +2428,7 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *ServiceSuite) TestSetCharmStorageCountMinDecreased(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmStorageCountMinDecreased(c *gc.C) { err := s.setCharmFromMeta(c, mysqlBaseMeta+oneRequiredStorageMeta+storageRange(2, 3), mysqlBaseMeta+oneRequiredStorageMeta+storageRange(1, 3), @@ -2311,71 +2436,72 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *ServiceSuite) TestSetCharmStorageCountMinIncreased(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmStorageCountMinIncreased(c *gc.C) { err := s.setCharmFromMeta(c, mysqlBaseMeta+oneRequiredStorageMeta+storageRange(1, 3), mysqlBaseMeta+oneRequiredStorageMeta+storageRange(2, 3), ) - c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "mysql": existing storage "data0" range contracted: min increased from 1 to 2`) + // User must increase the storage constraints from 1 to 2. + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "local:quantal/quantal-mysql-3": validating storage constraints: charm "mysql" store "data0": 2 instances required, 1 specified`) } -func (s *ServiceSuite) TestSetCharmStorageCountMaxDecreased(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmStorageCountMaxDecreased(c *gc.C) { err := s.setCharmFromMeta(c, mysqlBaseMeta+oneRequiredStorageMeta+storageRange(1, 2), mysqlBaseMeta+oneRequiredStorageMeta+storageRange(1, 1), ) - c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "mysql": existing storage "data0" range contracted: max decreased from 2 to 1`) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "local:quantal/quantal-mysql-3": existing storage "data0" range contracted: max decreased from 2 to 1`) } -func (s *ServiceSuite) TestSetCharmStorageCountMaxUnboundedToBounded(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmStorageCountMaxUnboundedToBounded(c *gc.C) { err := s.setCharmFromMeta(c, mysqlBaseMeta+oneRequiredStorageMeta+storageRange(1, -1), mysqlBaseMeta+oneRequiredStorageMeta+storageRange(1, 999), ) - c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "mysql": existing storage "data0" range contracted: max decreased from \ to 999`) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "local:quantal/quantal-mysql-3": existing storage "data0" range contracted: max decreased from \ to 999`) } -func (s *ServiceSuite) TestSetCharmStorageTypeChanged(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmStorageTypeChanged(c *gc.C) { err := s.setCharmFromMeta(c, mysqlBaseMeta+oneRequiredStorageMeta, mysqlBaseMeta+oneRequiredFilesystemStorageMeta, ) - c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "mysql": existing storage "data0" type changed from "block" to "filesystem"`) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "local:quantal/quantal-mysql-3": existing storage "data0" type changed from "block" to "filesystem"`) } -func (s *ServiceSuite) TestSetCharmStorageSharedChanged(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmStorageSharedChanged(c *gc.C) { err := s.setCharmFromMeta(c, - mysqlBaseMeta+oneRequiredStorageMeta, - mysqlBaseMeta+oneRequiredSharedStorageMeta, + mysqlBaseMeta+oneOptionalStorageMeta, + mysqlBaseMeta+oneOptionalSharedStorageMeta, ) - c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "mysql": existing storage "data0" shared changed from false to true`) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "local:quantal/quantal-mysql-3": existing storage "data0" shared changed from false to true`) } -func (s *ServiceSuite) TestSetCharmStorageReadOnlyChanged(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmStorageReadOnlyChanged(c *gc.C) { err := s.setCharmFromMeta(c, mysqlBaseMeta+oneRequiredStorageMeta, mysqlBaseMeta+oneRequiredReadOnlyStorageMeta, ) - c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "mysql": existing storage "data0" read-only changed from false to true`) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "local:quantal/quantal-mysql-3": existing storage "data0" read-only changed from false to true`) } -func (s *ServiceSuite) TestSetCharmStorageLocationChanged(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmStorageLocationChanged(c *gc.C) { err := s.setCharmFromMeta(c, mysqlBaseMeta+oneRequiredFilesystemStorageMeta, mysqlBaseMeta+oneRequiredLocationStorageMeta, ) - c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "mysql": existing storage "data0" location changed from "" to "/srv"`) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "local:quantal/quantal-mysql-3": existing storage "data0" location changed from "" to "/srv"`) } -func (s *ServiceSuite) TestSetCharmStorageWithLocationSingletonToMultipleAdded(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmStorageWithLocationSingletonToMultipleAdded(c *gc.C) { err := s.setCharmFromMeta(c, mysqlBaseMeta+oneRequiredLocationStorageMeta, mysqlBaseMeta+oneMultipleLocationStorageMeta, ) - c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "mysql": existing storage "data0" with location changed from singleton to multiple`) + c.Assert(err, gc.ErrorMatches, `cannot upgrade application "test" to charm "local:quantal/quantal-mysql-3": existing storage "data0" with location changed from single to multiple`) } -func (s *ServiceSuite) assertServiceRemovedWithItsBindings(c *gc.C, service *state.Application) { +func (s *ApplicationSuite) assertApplicationRemovedWithItsBindings(c *gc.C, service *state.Application) { // Removing the service removes the bindings with it. err := service.Destroy() c.Assert(err, jc.ErrorIsNil) @@ -2384,15 +2510,15 @@ state.AssertEndpointBindingsNotFoundForService(c, service) } -func (s *ServiceSuite) TestEndpointBindingsReturnsDefaultsWhenNotFound(c *gc.C) { +func (s *ApplicationSuite) TestEndpointBindingsReturnsDefaultsWhenNotFound(c *gc.C) { ch := s.AddMetaCharm(c, "mysql", metaBase, 42) service := s.AddTestingServiceWithBindings(c, "yoursql", ch, nil) state.RemoveEndpointBindingsForService(c, service) - s.assertServiceHasOnlyDefaultEndpointBindings(c, service) + s.assertApplicationHasOnlyDefaultEndpointBindings(c, service) } -func (s *ServiceSuite) assertServiceHasOnlyDefaultEndpointBindings(c *gc.C, service *state.Application) { +func (s *ApplicationSuite) assertApplicationHasOnlyDefaultEndpointBindings(c *gc.C, service *state.Application) { charm, _, err := service.Charm() c.Assert(err, jc.ErrorIsNil) @@ -2413,17 +2539,17 @@ } } -func (s *ServiceSuite) TestEndpointBindingsJustDefaults(c *gc.C) { +func (s *ApplicationSuite) TestEndpointBindingsJustDefaults(c *gc.C) { // With unspecified bindings, all endpoints are explicitly bound to the // default space when saved in state. ch := s.AddMetaCharm(c, "mysql", metaBase, 42) service := s.AddTestingServiceWithBindings(c, "yoursql", ch, nil) - s.assertServiceHasOnlyDefaultEndpointBindings(c, service) - s.assertServiceRemovedWithItsBindings(c, service) + s.assertApplicationHasOnlyDefaultEndpointBindings(c, service) + s.assertApplicationRemovedWithItsBindings(c, service) } -func (s *ServiceSuite) TestEndpointBindingsWithExplictOverrides(c *gc.C) { +func (s *ApplicationSuite) TestEndpointBindingsWithExplictOverrides(c *gc.C) { _, err := s.State.AddSpace("db", "", nil, true) c.Assert(err, jc.ErrorIsNil) _, err = s.State.AddSpace("ha", "", nil, false) @@ -2444,10 +2570,10 @@ "cluster": "ha", }) - s.assertServiceRemovedWithItsBindings(c, service) + s.assertApplicationRemovedWithItsBindings(c, service) } -func (s *ServiceSuite) TestSetCharmExtraBindingsUseDefaults(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmExtraBindingsUseDefaults(c *gc.C) { _, err := s.State.AddSpace("db", "", nil, true) c.Assert(err, jc.ErrorIsNil) @@ -2486,10 +2612,10 @@ } c.Assert(setBindings, jc.DeepEquals, effectiveNew) - s.assertServiceRemovedWithItsBindings(c, service) + s.assertApplicationRemovedWithItsBindings(c, service) } -func (s *ServiceSuite) TestSetCharmHandlesMissingBindingsAsDefaults(c *gc.C) { +func (s *ApplicationSuite) TestSetCharmHandlesMissingBindingsAsDefaults(c *gc.C) { oldCharm := s.AddMetaCharm(c, "mysql", metaDifferentProvider, 69) service := s.AddTestingServiceWithBindings(c, "theirsql", oldCharm, nil) state.RemoveEndpointBindingsForService(c, service) @@ -2514,5 +2640,5 @@ } c.Assert(setBindings, jc.DeepEquals, effectiveNew) - s.assertServiceRemovedWithItsBindings(c, service) + s.assertApplicationRemovedWithItsBindings(c, service) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/assign_test.go juju-core-2.0.0/src/github.com/juju/juju/state/assign_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/assign_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/assign_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "fmt" "sort" "strconv" - "time" + "time" // Only used to Sleep(). jc "github.com/juju/testing/checkers" "github.com/juju/txn" @@ -222,7 +222,7 @@ scons := constraints.MustParse("mem=2G cpu-power=400") err := s.wordpress.SetConstraints(scons) c.Assert(err, jc.ErrorIsNil) - econs := constraints.MustParse("mem=4G cpu-cores=2") + econs := constraints.MustParse("mem=4G cores=2") err = s.State.SetModelConstraints(econs) c.Assert(err, jc.ErrorIsNil) @@ -405,7 +405,7 @@ scons := constraints.MustParse("mem=2G cpu-power=400") err := s.wordpress.SetConstraints(scons) c.Assert(err, jc.ErrorIsNil) - econs := constraints.MustParse("mem=4G cpu-cores=2") + econs := constraints.MustParse("mem=4G cores=2") err = s.State.SetModelConstraints(econs) c.Assert(err, jc.ErrorIsNil) @@ -417,7 +417,7 @@ scons = constraints.MustParse("mem=6G cpu-power=800") err = s.wordpress.SetConstraints(scons) c.Assert(err, jc.ErrorIsNil) - econs = constraints.MustParse("cpu-cores=4") + econs = constraints.MustParse("cores=4") err = s.State.SetModelConstraints(econs) c.Assert(err, jc.ErrorIsNil) @@ -432,7 +432,7 @@ c.Assert(err, jc.ErrorIsNil) mcons, err := machine.Constraints() c.Assert(err, jc.ErrorIsNil) - expect := constraints.MustParse("mem=2G cpu-cores=2 cpu-power=400") + expect := constraints.MustParse("mem=2G cores=2 cpu-power=400") c.Assert(mcons, gc.DeepEquals, expect) } @@ -852,7 +852,7 @@ assignOk: false, }, { unitConstraints: "arch=amd64", - hardwareCharacteristics: "cpu-cores=1", + hardwareCharacteristics: "cores=1", assignOk: false, }, { unitConstraints: "arch=amd64", @@ -868,7 +868,7 @@ assignOk: false, }, { unitConstraints: "mem=4G", - hardwareCharacteristics: "cpu-cores=1", + hardwareCharacteristics: "cores=1", assignOk: false, }, { unitConstraints: "mem=4G", @@ -879,15 +879,15 @@ hardwareCharacteristics: "mem=2G", assignOk: false, }, { - unitConstraints: "cpu-cores=2", - hardwareCharacteristics: "cpu-cores=2", + unitConstraints: "cores=2", + hardwareCharacteristics: "cores=2", assignOk: true, }, { - unitConstraints: "cpu-cores=2", - hardwareCharacteristics: "cpu-cores=1", + unitConstraints: "cores=2", + hardwareCharacteristics: "cores=1", assignOk: false, }, { - unitConstraints: "cpu-cores=2", + unitConstraints: "cores=2", hardwareCharacteristics: "mem=4G", assignOk: false, }, { @@ -915,12 +915,12 @@ hardwareCharacteristics: "root-disk=8192", assignOk: true, }, { - unitConstraints: "arch=amd64 mem=4G cpu-cores=2 root-disk=8192", - hardwareCharacteristics: "arch=amd64 mem=8G cpu-cores=2 root-disk=8192 cpu-power=50", + unitConstraints: "arch=amd64 mem=4G cores=2 root-disk=8192", + hardwareCharacteristics: "arch=amd64 mem=8G cores=2 root-disk=8192 cpu-power=50", assignOk: true, }, { - unitConstraints: "arch=amd64 mem=4G cpu-cores=2 root-disk=8192", - hardwareCharacteristics: "arch=amd64 mem=8G cpu-cores=1 root-disk=4096 cpu-power=50", + unitConstraints: "arch=amd64 mem=4G cores=2 root-disk=8192", + hardwareCharacteristics: "arch=amd64 mem=8G cores=1 root-disk=4096 cpu-power=50", assignOk: false, }, } @@ -1252,7 +1252,11 @@ func (s *assignCleanSuite) TestAssignUnitPolicyConcurrently(c *gc.C) { _, err := s.State.AddMachine("quantal", state.JobManageModel) // bootstrap machine c.Assert(err, jc.ErrorIsNil) - us := make([]*state.Unit, 50) + unitCount := 50 + if raceDetector { + unitCount = 10 + } + us := make([]*state.Unit, unitCount) for i := range us { us[i], err = s.wordpress.AddUnit() c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/autocertcache.go juju-core-2.0.0/src/github.com/juju/juju/state/autocertcache.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/autocertcache.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/autocertcache.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,73 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "github.com/juju/errors" + "golang.org/x/crypto/acme/autocert" + "golang.org/x/net/context" + "gopkg.in/mgo.v2" + + "github.com/juju/juju/mongo" +) + +// AutocertCache returns an implementation +// of autocert.Cache backed by the state. +func (st *State) AutocertCache() autocert.Cache { + return autocertCache{st} +} + +type autocertCache struct { + st *State +} + +type autocertCacheDoc struct { + Name string `bson:"_id"` + Data []byte `bson:"data"` +} + +// Put implements autocert.Cache.Put. +func (cache autocertCache) Put(ctx context.Context, name string, data []byte) error { + coll, closeColl := cache.coll() + defer closeColl() + _, err := coll.UpsertId(name, autocertCacheDoc{ + Name: name, + Data: data, + }) + if err != nil { + return errors.Annotatef(err, "cannot store autocert key %q", name) + } + return nil +} + +// Get implements autocert.Cache.Get. +func (cache autocertCache) Get(ctx context.Context, name string) ([]byte, error) { + coll, closeColl := cache.coll() + defer closeColl() + var doc autocertCacheDoc + err := coll.FindId(name).One(&doc) + if err == nil { + return doc.Data, nil + } + if errors.Cause(err) == mgo.ErrNotFound { + return nil, autocert.ErrCacheMiss + } + return nil, errors.Annotatef(err, "cannot get autocert key %q", name) +} + +// Delete implements autocert.Cache.Delete. +func (cache autocertCache) Delete(ctx context.Context, name string) error { + coll, closeColl := cache.coll() + defer closeColl() + err := coll.RemoveId(name) + if err == nil || errors.Cause(err) == mgo.ErrNotFound { + return nil + } + return errors.Annotatef(err, "cannot delete autocert key %q", name) +} + +func (cache autocertCache) coll() (mongo.WriteCollection, func()) { + coll, closer := cache.st.getCollection(autocertCacheC) + return coll.Writeable(), closer +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/autocertcache_test.go juju-core-2.0.0/src/github.com/juju/juju/state/autocertcache_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/autocertcache_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/autocertcache_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,94 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state_test + +import ( + jc "github.com/juju/testing/checkers" + "golang.org/x/crypto/acme/autocert" + "golang.org/x/net/context" + gc "gopkg.in/check.v1" + + statetesting "github.com/juju/juju/state/testing" +) + +type autocertCacheSuite struct { + statetesting.StateSuite +} + +var _ = gc.Suite(&autocertCacheSuite{}) + +func (s *autocertCacheSuite) TestCachePutGet(c *gc.C) { + ctx := context.Background() + cache := s.State.AutocertCache() + + err := cache.Put(ctx, "a", []byte("aval")) + c.Assert(err, jc.ErrorIsNil) + err = cache.Put(ctx, "b", []byte("bval")) + c.Assert(err, jc.ErrorIsNil) + + // Check that we can get the existing entries. + data, err := cache.Get(ctx, "a") + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(data), gc.Equals, "aval") + + data, err = cache.Get(ctx, "b") + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(data), gc.Equals, "bval") +} + +func (s *autocertCacheSuite) TestGetNonexistentEntry(c *gc.C) { + ctx := context.Background() + cache := s.State.AutocertCache() + + // Getting a non-existent entry must return ErrCacheMiss. + data, err := cache.Get(ctx, "c") + c.Assert(err, gc.Equals, autocert.ErrCacheMiss) + c.Assert(data, gc.IsNil) +} + +func (s *autocertCacheSuite) TestDelete(c *gc.C) { + ctx := context.Background() + cache := s.State.AutocertCache() + + err := cache.Put(ctx, "a", []byte("aval")) + c.Assert(err, jc.ErrorIsNil) + err = cache.Put(ctx, "b", []byte("bval")) + c.Assert(err, jc.ErrorIsNil) + + // Check that we can delete an entry. + err = cache.Delete(ctx, "b") + c.Assert(err, jc.ErrorIsNil) + + data, err := cache.Get(ctx, "b") + c.Assert(err, gc.Equals, autocert.ErrCacheMiss) + c.Assert(data, gc.IsNil) + + // Check that the non-deleted entry is still there. + data, err = cache.Get(ctx, "a") + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(data), gc.Equals, "aval") +} + +func (s *autocertCacheSuite) TestDeleteNonexistentEntry(c *gc.C) { + ctx := context.Background() + cache := s.State.AutocertCache() + + err := cache.Delete(ctx, "a") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *autocertCacheSuite) TestPutExistingEntry(c *gc.C) { + ctx := context.Background() + cache := s.State.AutocertCache() + + err := cache.Put(ctx, "a", []byte("aval")) + c.Assert(err, jc.ErrorIsNil) + + err = cache.Put(ctx, "a", []byte("aval2")) + c.Assert(err, jc.ErrorIsNil) + + data, err := cache.Get(ctx, "a") + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(data), gc.Equals, "aval2") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backend.go juju-core-2.0.0/src/github.com/juju/juju/state/backend.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backend.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backend.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,6 +22,7 @@ localID(string) string strictLocalID(string) (string, error) getCollection(name string) (mongo.Collection, func()) + getCollectionFor(modelUUID, name string) (mongo.Collection, func()) } // isLocalID returns a watcher filter func that rejects ids not specific @@ -77,3 +78,12 @@ func (st *State) getCollection(name string) (mongo.Collection, func()) { return st.database.GetCollection(name) } + +func (st *State) getCollectionFor(modelUUID, name string) (mongo.Collection, func()) { + database, dbcloser := st.database.CopyForModel(modelUUID) + collection, closer := database.GetCollection(name) + return collection, func() { + closer() + dbcloser() + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/backups_linux.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/backups_linux.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/backups_linux.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/backups_linux.go 2016-10-13 14:31:49.000000000 +0000 @@ -32,7 +32,7 @@ } var numaCtlPolicy bool - if numaCtlString := agentConfig.Value(agent.NumaCtlPreference); numaCtlString != "" { + if numaCtlString := agentConfig.Value(agent.NUMACtlPreference); numaCtlString != "" { var err error if numaCtlPolicy, err = strconv.ParseBool(numaCtlString); err != nil { return errors.Annotatef(err, "invalid numactl preference: %q", numaCtlString) @@ -44,14 +44,20 @@ return errors.Errorf("agent config has no state serving info") } - err := mongo.EnsureServiceInstalled(agentConfig.DataDir(), + if err := mongo.EnsureServiceInstalled(agentConfig.DataDir(), si.StatePort, oplogSize, numaCtlPolicy, agentConfig.MongoVersion(), true, - ) - return errors.Annotate(err, "cannot ensure that mongo service start/stop scripts are in place") + ); err != nil { + return errors.Annotate(err, "cannot ensure that mongo service start/stop scripts are in place") + } + // Installing a service will not automatically restart it. + if err := mongo.StartService(); err != nil { + return errors.Annotate(err, "failed to start mongo") + } + return nil } // Restore handles either returning or creating a controller to a backed up status: @@ -100,8 +106,13 @@ return nil, errors.Annotate(err, "cannot load old agent config from disk") } + logger.Infof("stopping juju-db") + if err = mongo.StopService(); err != nil { + return nil, errors.Annotate(err, "failed to stop mongo") + } + // delete all the files to be replaced - if err := PrepareMachineForRestore(); err != nil { + if err := PrepareMachineForRestore(oldAgentConfig.MongoVersion()); err != nil { return nil, errors.Annotate(err, "cannot delete existing files") } logger.Infof("deleted old files to place new") @@ -218,7 +229,7 @@ return nil, errors.Errorf("cannot retrieve info to connect to mongo") } - st, err := newStateConnection(agentConfig.Model(), mgoInfo) + st, err := newStateConnection(agentConfig.Controller(), agentConfig.Model(), mgoInfo) if err != nil { return nil, errors.Trace(err) } @@ -229,20 +240,47 @@ return nil, errors.Trace(err) } + logger.Infof("updating local machine addresses") err = updateMachineAddresses(machine, args.PrivateAddress, args.PublicAddress) if err != nil { return nil, errors.Annotate(err, "cannot update api server machine addresses") } + // Update the APIHostPorts as well. Under normal circumstances the API + // Host Ports are only set during bootstrap and by the peergrouper worker. + // Unfortunately right now, the peer grouper is busy restarting and isn't + // guaranteed to set the host ports before the remote machines we are + // about to tell about us. If it doesn't, the remote machine gets its + // agent.conf file updated with this new machine's IP address, it then + // starts, and the "api-address-updater" worker asks for the api host + // ports, and gets told the old IP address of the machine that was backed + // up. It then writes this incorrect file to its agent.conf file, which + // causes it to attempt to reconnect to the api server. Unfortunately it + // now has the wrong address and can never get the correct one. + // So, we set it explicitly here. + if err := st.SetAPIHostPorts([][]network.HostPort{APIHostPorts}); err != nil { + return nil, errors.Annotate(err, "cannot update api server host ports") + } // update all agents known to the new controller. // TODO(perrito666): We should never stop process because of this. // updateAllMachines will not return errors for individual // agent update failures - machines, err := st.AllMachines() + models, err := st.AllModels() if err != nil { return nil, errors.Trace(err) } - if err := updateAllMachines(args.PrivateAddress, machines); err != nil { + machines := []machineModel{} + for _, model := range models { + machinesForModel, err := st.AllMachinesFor(model.UUID()) + if err != nil { + return nil, errors.Trace(err) + } + for _, machine := range machinesForModel { + machines = append(machines, machineModel{machine: machine, model: model}) + } + } + logger.Infof("updating other machine addresses") + if err := updateAllMachines(args.PrivateAddress, args.PublicAddress, machines); err != nil { return nil, errors.Annotate(err, "cannot update agents") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/backups_test.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/backups_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/backups_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/backups_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,15 +6,17 @@ import ( "bytes" "io/ioutil" - "time" + "time" // Only used for time types. "github.com/juju/errors" jc "github.com/juju/testing/checkers" "github.com/juju/utils/set" gc "gopkg.in/check.v1" + "github.com/juju/juju/mongo" "github.com/juju/juju/state/backups" backupstesting "github.com/juju/juju/state/backups/testing" + "github.com/juju/juju/testing" ) type backupsSuite struct { @@ -35,7 +37,7 @@ s.Storage.ID = id s.Storage.Meta = backupstesting.NewMetadataStarted() s.Storage.Meta.SetID(id) - stored := time.Now().UTC() + stored := testing.NonZeroTime().UTC() s.Storage.Meta.SetStored(&stored) return &stored } @@ -53,7 +55,7 @@ paths := backups.Paths{DataDir: "/var/lib/juju"} targets := set.NewStrings("juju", "admin") - dbInfo := backups.DBInfo{"a", "b", "c", targets} + dbInfo := backups.DBInfo{"a", "b", "c", targets, mongo.Mongo32wt} meta := backupstesting.NewMetadataStarted() meta.Notes = "some notes" err := s.api.Create(meta, &paths, &dbInfo) @@ -92,7 +94,7 @@ // Run the backup. paths := backups.Paths{DataDir: "/var/lib/juju"} targets := set.NewStrings("juju", "admin") - dbInfo := backups.DBInfo{"a", "b", "c", targets} + dbInfo := backups.DBInfo{"a", "b", "c", targets, mongo.Mongo32wt} meta := backupstesting.NewMetadataStarted() backupstesting.SetOrigin(meta, "", "", "") meta.Notes = "some notes" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/db_dump_test.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/db_dump_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/db_dump_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/db_dump_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,7 @@ "github.com/juju/utils/set" gc "gopkg.in/check.v1" + "github.com/juju/juju/mongo" "github.com/juju/juju/state/backups" "github.com/juju/juju/testing" ) @@ -30,7 +31,7 @@ s.BaseSuite.SetUpTest(c) targets := set.NewStrings("juju", "admin") - s.dbInfo = &backups.DBInfo{"a", "b", "c", targets} + s.dbInfo = &backups.DBInfo{"a", "b", "c", targets, mongo.Mongo24} s.targets = targets s.dumpDir = c.MkDir() } @@ -95,6 +96,35 @@ err := dumper.Dump(s.dumpDir) c.Assert(err, jc.ErrorIsNil) + + s.checkDBs(c, "juju", "admin") + s.checkStripped(c, "backups") +} + +func (s *dumpSuite) TestDumpStrippedAdmin(c *gc.C) { + s.dbInfo.MongoVersion = mongo.Mongo32wt + s.dbInfo.Targets = set.NewStrings("juju") + s.patch(c) + dumper := s.prep(c, "juju") + s.prepDB(c, "backups") // ignored + s.prepDB(c, "admin") // ignored + + err := dumper.Dump(s.dumpDir) + c.Assert(err, jc.ErrorIsNil) + + s.checkDBs(c, "juju") + s.checkStripped(c, "backups") + s.checkStripped(c, "admin") +} + +func (s *dumpSuite) TestDumpNotStrippedAdmin(c *gc.C) { + s.patch(c) + dumper := s.prep(c, "juju") + s.prepDB(c, "backups") // ignored + s.prepDB(c, "admin") + + err := dumper.Dump(s.dumpDir) + c.Assert(err, jc.ErrorIsNil) s.checkDBs(c, "juju", "admin") s.checkStripped(c, "backups") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/db.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/db.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/db.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/db.go 2016-10-13 14:31:49.000000000 +0000 @@ -42,10 +42,13 @@ Password string // Targets is a list of databases to dump. Targets set.Strings + // MongoVersion the version of the running mongo db. + MongoVersion mongo.Version } // ignoredDatabases is the list of databases that should not be -// backed up. +// backed up, admin might be removed later, after determining +// mongo version. var ignoredDatabases = set.NewStrings( "admin", storageDBName, @@ -59,16 +62,17 @@ // NewDBInfo returns the information needed by backups to dump // the database. -func NewDBInfo(mgoInfo *mongo.MongoInfo, session DBSession) (*DBInfo, error) { +func NewDBInfo(mgoInfo *mongo.MongoInfo, session DBSession, version mongo.Version) (*DBInfo, error) { targets, err := getBackupTargetDatabases(session) if err != nil { return nil, errors.Trace(err) } info := DBInfo{ - Address: mgoInfo.Addrs[0], - Password: mgoInfo.Password, - Targets: targets, + Address: mgoInfo.Addrs[0], + Password: mgoInfo.Password, + Targets: targets, + MongoVersion: version, } // TODO(dfc) Backup should take a Tag. @@ -183,6 +187,11 @@ // Strip the ignored database from the dump dir. ignored := found.Difference(md.Targets) + // Admin must be removed only if the mongo version is 3.x or + // above, since 2.x will not restore properly without admin. + if md.DBInfo.MongoVersion.NewerThan(mongo.Mongo26) == -1 { + ignored.Remove("admin") + } err = stripIgnored(ignored, baseDumpDir) return errors.Trace(err) } @@ -326,6 +335,8 @@ } installedMongo := mongoInstalledVersion() + logger.Debugf("args: is %#v", args) + logger.Infof("installed mongo is %s", installedMongo) // NewerThan will check Major and Minor so migration between micro versions // will work, before changing this bewar, Mongo has been known to break // compatibility between minors. @@ -366,7 +377,7 @@ // https://jira.mongodb.org/browse/TOOLS-939 -- not guaranteed // to *help* with lp:1605653, but observed not to hurt. // - // The value of 100 was chosen because it's more pessimistic + // The value of 10 was chosen because it's more pessimistic // than the "1000" that many report success using in the bug. options := []string{ "--ssl", @@ -376,7 +387,7 @@ "--password", md.Password, "--drop", "--oplogReplay", - "--batchSize", "100", + "--batchSize", "10", dumpDir, } return options @@ -486,6 +497,7 @@ } func (md *mongoRestorer32) Restore(dumpDir string, dialInfo *mgo.DialInfo) error { + logger.Debugf("start restore, dumpDir %s", dumpDir) if err := md.ensureOplogPermissions(dialInfo); err != nil { return errors.Annotate(err, "setting special user permission in db") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/db_info_test.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/db_info_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/db_info_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/db_info_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -39,7 +39,7 @@ Tag: tag, Password: "eggs", } - dbInfo, err := backups.NewDBInfo(mgoInfo, &session) + dbInfo, err := backups.NewDBInfo(mgoInfo, &session, mongo.Mongo32wt) c.Assert(err, jc.ErrorIsNil) c.Check(dbInfo.Address, gc.Equals, "localhost:8080") @@ -56,7 +56,7 @@ }, Password: "eggs", } - dbInfo, err := backups.NewDBInfo(mgoInfo, &session) + dbInfo, err := backups.NewDBInfo(mgoInfo, &session, mongo.Mongo32wt) c.Assert(err, jc.ErrorIsNil) c.Check(dbInfo.Username, gc.Equals, "") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/db_restore_test.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/db_restore_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/db_restore_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/db_restore_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -115,7 +115,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(ranCommand, gc.Equals, "/a/fake/mongorestore") - c.Assert(ranWithArgs, gc.DeepEquals, []string{"--ssl", "--authenticationDatabase", "admin", "--host", "127.0.0.1", "--username", "fakeUsername", "--password", "fakePassword", "--drop", "--oplogReplay", "--batchSize", "100", "fakePath"}) + c.Assert(ranWithArgs, gc.DeepEquals, []string{"--ssl", "--authenticationDatabase", "admin", "--host", "127.0.0.1", "--username", "fakeUsername", "--password", "fakePassword", "--drop", "--oplogReplay", "--batchSize", "10", "fakePath"}) user := &mgo.User{Username: "machine-0", Password: "fakePassword"} c.Assert(mgoDb.user, gc.DeepEquals, user) c.Assert(mgoSession.closed, jc.IsTrue) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/export_test.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "bytes" "io" "io/ioutil" - "time" + "time" // Only used for time types. "github.com/juju/errors" "github.com/juju/testing" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/files.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/files.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/files.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/files.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,8 @@ "sort" "github.com/juju/errors" + + "github.com/juju/juju/mongo" ) // TODO(ericsnow) lp-1392876 @@ -96,14 +98,23 @@ // replaceableFoldersFunc will return a map with the folders that need to // be replaced so they can be deleted prior to a restore. -func replaceableFoldersFunc() (map[string]os.FileMode, error) { +// Mongo 2.4 requires that the database directory be removed, while +// Mongo 3.2 requires that it not be removed +func replaceableFoldersFunc(dataDir string, mongoVersion mongo.Version) (map[string]os.FileMode, error) { replaceables := map[string]os.FileMode{} - for _, replaceable := range []string{ - filepath.Join(dataDir, "db"), + // NOTE: never put dataDir in here directly as that will unconditionally + // remove the database. + dirs := []string{ filepath.Join(dataDir, "init"), - dataDir, - } { + filepath.Join(dataDir, "tools"), + filepath.Join(dataDir, "agents"), + } + if mongoVersion.Major == 2 { + dirs = append(dirs, filepath.Join(dataDir, "db")) + } + + for _, replaceable := range dirs { dirStat, err := os.Stat(replaceable) if os.IsNotExist(err) { continue @@ -124,8 +135,8 @@ // directories that are to contain new files; this is to avoid // possible mixup from new/old files that lead to an inconsistent // restored state machine. -func PrepareMachineForRestore() error { - replaceFolders, err := replaceableFolders() +func PrepareMachineForRestore(mongoVersion mongo.Version) error { + replaceFolders, err := replaceableFolders(dataDir, mongoVersion) if err != nil { return errors.Annotate(err, "cannot retrieve the list of folders to be cleaned before restore") } @@ -144,6 +155,7 @@ if !fmode.IsDir() { continue } + logger.Debugf("removing dir: %s", toBeRecreated) if err := os.RemoveAll(toBeRecreated); err != nil { return errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/files_test.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/files_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/files_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/files_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,6 +16,7 @@ "github.com/juju/utils/set" gc "gopkg.in/check.v1" + "github.com/juju/juju/mongo" "github.com/juju/juju/state/backups" "github.com/juju/juju/testing" ) @@ -142,7 +143,7 @@ c.Assert(err, jc.ErrorIsNil) defer fhr.Close() - s.PatchValue(backups.ReplaceableFolders, func() (map[string]os.FileMode, error) { + s.PatchValue(backups.ReplaceableFolders, func(_ string, _ mongo.Version) (map[string]os.FileMode, error) { replaceables := map[string]os.FileMode{} for _, replaceable := range []string{ recreatableFolder, @@ -157,7 +158,7 @@ return replaceables, nil }) - err = backups.PrepareMachineForRestore() + err = backups.PrepareMachineForRestore(mongo.Version{}) c.Assert(err, jc.ErrorIsNil) _, err = os.Stat(deletableFolder) @@ -175,6 +176,40 @@ c.Assert(recreatableFolder1Info.Sys().(*syscall.Stat_t).Ino, gc.Not(gc.Equals), recreatedFolder1Info.Sys().(*syscall.Stat_t).Ino) } +func (s *filesSuite) setupReplaceableFolders(c *gc.C) string { + dataDir := c.MkDir() + c.Assert(os.Mkdir(filepath.Join(dataDir, "init"), 0640), jc.ErrorIsNil) + c.Assert(os.Mkdir(filepath.Join(dataDir, "tools"), 0660), jc.ErrorIsNil) + c.Assert(os.Mkdir(filepath.Join(dataDir, "agents"), 0600), jc.ErrorIsNil) + c.Assert(os.Mkdir(filepath.Join(dataDir, "db"), 0600), jc.ErrorIsNil) + return dataDir +} + +func (s *filesSuite) TestReplaceableFoldersMongo2(c *gc.C) { + dataDir := s.setupReplaceableFolders(c) + + result, err := (*backups.ReplaceableFolders)(dataDir, mongo.Version{Major: 2, Minor: 4}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, map[string]os.FileMode{ + filepath.Join(dataDir, "init"): 0640 | os.ModeDir, + filepath.Join(dataDir, "tools"): 0660 | os.ModeDir, + filepath.Join(dataDir, "agents"): 0600 | os.ModeDir, + filepath.Join(dataDir, "db"): 0600 | os.ModeDir, + }) +} + +func (s *filesSuite) TestReplaceableFoldersMongo3(c *gc.C) { + dataDir := s.setupReplaceableFolders(c) + + result, err := (*backups.ReplaceableFolders)(dataDir, mongo.Version{Major: 3, Minor: 2}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, map[string]os.FileMode{ + filepath.Join(dataDir, "init"): 0640 | os.ModeDir, + filepath.Join(dataDir, "tools"): 0660 | os.ModeDir, + filepath.Join(dataDir, "agents"): 0600 | os.ModeDir, + }) +} + func (s *filesSuite) TestGetFilesToBackUpMachine10(c *gc.C) { paths := backups.Paths{ DataDir: "/var/lib/juju", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/metadata_test.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/metadata_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/metadata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/metadata_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "bytes" "os" "path/filepath" - "time" + "time" // Only used for time types and funcs, not Now(). jc "github.com/juju/testing/checkers" "github.com/juju/version" @@ -116,7 +116,7 @@ c.Check(meta.ChecksumFormat(), gc.Equals, "SHA-1, base64 encoded") c.Check(meta.Size(), gc.Equals, int64(17)) c.Check(meta.Stored(), gc.IsNil) - c.Check(meta.Started.Unix(), gc.Equals, int64(time.Time{}.Unix())) + c.Check(meta.Started.Unix(), gc.Equals, int64(testing.ZeroTime().Unix())) c.Check(meta.Finished.Unix(), gc.Equals, finished) c.Check(meta.Notes, gc.Equals, "") c.Check(meta.Origin.Model, gc.Equals, backups.UnknownString) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/restore.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/restore.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/restore.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/restore.go 2016-10-13 14:31:49.000000000 +0000 @@ -141,7 +141,7 @@ var environsGetNewPolicyFunc = stateenvirons.GetNewPolicyFunc // newStateConnection tries to connect to the newly restored controller. -func newStateConnection(modelTag names.ModelTag, info *mongo.MongoInfo) (*state.State, error) { +func newStateConnection(controllerTag names.ControllerTag, modelTag names.ModelTag, info *mongo.MongoInfo) (*state.State, error) { // We need to retry here to allow mongo to come up on the restored controller. // The connection might succeed due to the mongo dial retries but there may still // be a problem issuing database commands. @@ -157,7 +157,7 @@ attempt := utils.AttemptStrategy{Delay: newStateConnDelay, Min: newStateConnMinAttempts} getEnviron := stateenvirons.GetNewEnvironFunc(environs.New) for a := attempt.Start(); a.Next(); { - st, err = state.Open(modelTag, info, mongoDefaultDialOpts(), environsGetNewPolicyFunc(getEnviron)) + st, err = state.Open(modelTag, controllerTag, info, mongoDefaultDialOpts(), environsGetNewPolicyFunc(getEnviron)) if err == nil { return st, nil } @@ -166,28 +166,37 @@ return st, errors.Annotate(err, "cannot open state") } +type machineModel struct { + machine *state.Machine + model *state.Model +} + // updateAllMachines finds all machines and resets the stored state address // in each of them. The address does not include the port. // It is too late to go back and errors in a couple of agents have // better chance of being fixed by the user, if we were to fail // we risk an inconsistent controller because of one unresponsive // agent, we should nevertheless return the err info to the user. -func updateAllMachines(privateAddress string, machines []*state.Machine) error { +func updateAllMachines(privateAddress, publicAddress string, machines []machineModel) error { var machineUpdating sync.WaitGroup - for key := range machines { - // key is used to have machine be scope bound to the loop iteration. - machine := machines[key] + for _, item := range machines { + machine := item.machine // A newly resumed controller requires no updating, and more // than one controller is not yet supported by this code. if machine.IsManager() || machine.Life() == state.Dead { continue } machineUpdating.Add(1) - go func() { + go func(machine *state.Machine, model *state.Model) { defer machineUpdating.Done() - err := runMachineUpdate(machine, setAgentAddressScript(privateAddress)) - logger.Errorf("failed updating machine: %v", err) - }() + logger.Debugf("updating addresses for machine %s in model %s/%s", machine.Tag().Id(), model.Owner().Id(), model.Name()) + // TODO: thumper 2016-09-20 + // runMachineUpdate only handles linux machines, what about windows? + err := runMachineUpdate(machine, setAgentAddressScript(privateAddress, publicAddress)) + if err != nil { + logger.Errorf("failed updating machine: %v", err) + } + }(machine, item.model) } machineUpdating.Wait() @@ -203,13 +212,17 @@ cd /var/lib/juju/agents for agent in * do - status jujud-$agent| grep -q "^jujud-$agent start" > /dev/null - if [ $? -eq 0 ]; then - initctl stop jujud-$agent - fi + service jujud-$agent stop > /dev/null + + # The below statement will work in cases where there + # is a private address for the api server only + # or where there are a private and a public, which are + # the two common cases. sed -i.old -r "/^(stateaddresses|apiaddresses):/{ n s/- .*(:[0-9]+)/- {{.Address}}\1/ + n + s/- .*(:[0-9]+)/- {{.PubAddress}}\1/ }" $agent/agent.conf # If we're processing a unit agent's directly @@ -221,22 +234,17 @@ then find $agent/state/relations -type f -exec sed -i -r 's/change-version: [0-9]+$/change-version: 0/' {} \; fi - # Just in case is a stale unit - status jujud-$agent| grep -q "^jujud-$agent stop" > /dev/null - if [ $? -eq 0 ]; then - initctl start jujud-$agent - systemctl stop jujud-$agent - systemctl start jujud-$agent - fi + service jujud-$agent start > /dev/null done `)) // setAgentAddressScript generates an ssh script argument to update state addresses. -func setAgentAddressScript(stateAddr string) string { +func setAgentAddressScript(stateAddr, statePubAddr string) string { var buf bytes.Buffer err := agentAddressAndRelationsTemplate.Execute(&buf, struct { - Address string - }{stateAddr}) + Address string + PubAddress string + }{stateAddr, statePubAddr}) if err != nil { panic(errors.Annotate(err, "template error")) } @@ -265,10 +273,14 @@ sshOptions := ssh.Options{} sshOptions.SetIdentities("/var/lib/juju/system-identity") userCmd := sshCommand(userAddr, []string{"sudo", "-n", "bash", "-c " + utils.ShQuote(script)}, &sshOptions) + var stdoutBuf bytes.Buffer var stderrBuf bytes.Buffer + userCmd.Stdout = &stdoutBuf userCmd.Stderr = &stderrBuf + logger.Debugf("updating %s, script:\n%s", addr, script) if err := userCmd.Run(); err != nil { return errors.Annotatef(err, "ssh command failed: %q", stderrBuf.String()) } + logger.Debugf("result %s\nstdout: \n%s\nstderr: %s", addr, stdoutBuf.String(), stderrBuf.String()) return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/restore_test.go juju-core-2.0.0/src/github.com/juju/juju/state/backups/restore_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/backups/restore_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/backups/restore_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -120,27 +120,13 @@ "SixtNewControllerAddress:30308", } for _, address := range testServerAddresses { - template := setAgentAddressScript(address) + template := setAgentAddressScript(address, address) expectedString := fmt.Sprintf("\t\ts/- .*(:[0-9]+)/- %s\\1/\n", address) logger.Infof(fmt.Sprintf("Testing with address %q", address)) c.Assert(strings.Contains(template, expectedString), gc.Equals, true) } } -var caCertPEM = ` ------BEGIN CERTIFICATE----- -MIIBnTCCAUmgAwIBAgIBADALBgkqhkiG9w0BAQUwJjENMAsGA1UEChMEanVqdTEV -MBMGA1UEAxMManVqdSB0ZXN0aW5nMB4XDTEyMTExNDE0Mzg1NFoXDTIyMTExNDE0 -NDM1NFowJjENMAsGA1UEChMEanVqdTEVMBMGA1UEAxMManVqdSB0ZXN0aW5nMFow -CwYJKoZIhvcNAQEBA0sAMEgCQQCCOOpn9aWKcKr2GQGtygwD7PdfNe1I9BYiPAqa -2I33F5+6PqFdfujUKvoyTJI6XG4Qo/CECaaN9smhyq9DxzMhAgMBAAGjZjBkMA4G -A1UdDwEB/wQEAwIABDASBgNVHRMBAf8ECDAGAQH/AgEBMB0GA1UdDgQWBBQQDswP -FQGeGMeTzPbHW62EZbbTJzAfBgNVHSMEGDAWgBQQDswPFQGeGMeTzPbHW62EZbbT -JzALBgkqhkiG9w0BAQUDQQAqZzN0DqUyEfR8zIanozyD2pp10m9le+ODaKZDDNfH -8cB2x26F1iZ8ccq5IC2LtQf1IKJnpTcYlLuDvW6yB96g ------END CERTIFICATE----- -` - func (r *RestoreSuite) TestNewDialInfo(c *gc.C) { cases := []struct { @@ -185,19 +171,20 @@ }, UpgradedToVersion: jujuversion.Current, Tag: machineTag, + Controller: coretesting.ControllerTag, Model: coretesting.ModelTag, Password: "placeholder", Nonce: "dummyNonce", StateAddresses: []string{"fakeStateAddress:1234"}, APIAddresses: []string{"fakeAPIAddress:12345"}, - CACert: caCertPEM, + CACert: coretesting.CACert, } statePort := 12345 privateAddress := "dummyPrivateAddress" servingInfo := params.StateServingInfo{ APIPort: 1234, StatePort: statePort, - Cert: caCertPEM, + Cert: coretesting.CACert, CAPrivateKey: "a ca key", PrivateKey: "a key", SharedSecret: "a secret", @@ -262,7 +249,11 @@ c.Assert(err, jc.ErrorIsNil) defer server.DestroyWithLog() - st := statetesting.Initialize(c, names.NewLocalUserTag("test-admin"), nil, nil, nil) + st := statetesting.InitializeWithArgs(c, + statetesting.InitializeArgs{ + Owner: names.NewLocalUserTag("test-admin"), + Clock: gitjujutesting.NewClock(coretesting.NonZeroTime()), + }) c.Assert(st.Close(), jc.ErrorIsNil) r.PatchValue(&mongoDefaultDialOpts, mongotest.DialOpts) @@ -271,7 +262,7 @@ ) state.NewPolicyFunc { return nil }) - st, err = newStateConnection(st.ModelTag(), statetesting.NewMongoInfo()) + st, err = newStateConnection(st.ControllerTag(), st.ModelTag(), statetesting.NewMongoInfo()) c.Assert(err, jc.ErrorIsNil) c.Assert(st.Close(), jc.ErrorIsNil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/bakerystorage/storage_test.go juju-core-2.0.0/src/github.com/juju/juju/state/bakerystorage/storage_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/bakerystorage/storage_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/bakerystorage/storage_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "errors" - "time" + "time" // Only used for time types. gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" @@ -71,7 +71,7 @@ store, err := New(s.config) c.Assert(err, jc.ErrorIsNil) - expiryTime := time.Now().Add(24 * time.Hour) + expiryTime := testing.NonZeroTime().Add(24 * time.Hour) store = store.ExpireAt(expiryTime) err = store.Put("foo", "bar") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/binarystorage.go juju-core-2.0.0/src/github.com/juju/juju/state/binarystorage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/binarystorage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/binarystorage.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,7 +24,11 @@ // This is a hosted model. Hosted models have their own tools // catalogue, which we combine with the controller's. - controllerSt, err := st.ForModel(st.controllerTag) + controllerModel, err := st.ControllerModel() + if err != nil { + return nil, errors.Trace(err) + } + controllerSt, err := st.ForModel(controllerModel.ModelTag()) if err != nil { return nil, errors.Trace(err) } @@ -50,11 +54,15 @@ // GUIStorage returns a new binarystorage.StorageCloser that stores GUI archive // metadata in the "juju" database "guimetadata" collection. func (st *State) GUIStorage() (binarystorage.StorageCloser, error) { - return st.newBinaryStorageCloser(guimetadataC, st.controllerTag.Id()), nil + controllerModel, err := st.ControllerModel() + if err != nil { + return nil, errors.Trace(err) + } + return st.newBinaryStorageCloser(guimetadataC, controllerModel.UUID()), nil } func (st *State) newBinaryStorageCloser(collectionName, uuid string) binarystorage.StorageCloser { - db, closer1 := st.database.CopySession() + db, closer1 := st.database.Copy() metadataCollection, closer2 := db.GetCollection(collectionName) txnRunner, closer3 := db.TransactionRunner() closer := func() { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/binarystorage_test.go juju-core-2.0.0/src/github.com/juju/juju/state/binarystorage_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/binarystorage_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/binarystorage_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -84,9 +84,10 @@ "uuid": s.modelUUID, }) _, s.st, err = s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", - Config: cfg, - Owner: names.NewLocalUserTag("test-admin"), + CloudName: "dummy", + CloudRegion: "dummy-region", + Config: cfg, + Owner: names.NewLocalUserTag("test-admin"), StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/block.go juju-core-2.0.0/src/github.com/juju/juju/state/block.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/block.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/block.go 2016-10-13 14:31:49.000000000 +0000 @@ -32,6 +32,8 @@ // Message returns explanation that accompanies this block. Message() string + + updateMessageOp(string) ([]txn.Op, error) } // BlockType specifies block type for enum benefit. @@ -117,6 +119,15 @@ Message string `bson:"message,omitempty"` } +func (b *block) updateMessageOp(message string) ([]txn.Op, error) { + return []txn.Op{{ + C: blocksC, + Id: b.doc.DocID, + Assert: txn.DocExists, + Update: bson.D{{"$set", bson.D{{"message", message}}}}, + }}, nil +} + // Id is part of the state.Block interface. func (b *block) Id() string { return b.doc.DocID @@ -244,14 +255,14 @@ // Only one instance of each block type can exist in model. func setModelBlock(st *State, t BlockType, msg string) error { buildTxn := func(attempt int) ([]txn.Op, error) { - _, exists, err := st.GetBlockForType(t) + block, exists, err := st.GetBlockForType(t) if err != nil { return nil, errors.Trace(err) } // Cannot create blocks of the same type more than once per model. // Cannot update current blocks. if exists { - return nil, errors.Errorf("block %v is already ON", t.String()) + return block.updateMessageOp(msg) } return createModelBlockOps(st, t, msg) } @@ -310,5 +321,6 @@ Remove: true, }}, nil } - return nil, errors.Errorf("block %v is already OFF", t.String()) + // If the block doesn't exist, we're all good. + return nil, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/block_test.go juju-core-2.0.0/src/github.com/juju/juju/state/block_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/block_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/block_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,9 +4,8 @@ package state_test import ( - "fmt" + "strings" - "github.com/juju/errors" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" @@ -33,6 +32,10 @@ c.Assert(all, gc.HasLen, 0) } +func (s *blockSuite) TestNoInitialBlocks(c *gc.C) { + assertNoEnvBlock(c, s.State) +} + func (s *blockSuite) assertNoTypedBlock(c *gc.C, t state.BlockType) { one, found, err := s.State.GetBlockForType(t) c.Assert(err, jc.ErrorIsNil) @@ -40,70 +43,51 @@ c.Assert(one, gc.IsNil) } -func assertEnvHasBlock(c *gc.C, st *state.State, t state.BlockType, msg string) { - dBlock, found, err := st.GetBlockForType(t) +func (s *blockSuite) assertModelHasBlock(c *gc.C, st *state.State, t state.BlockType, msg string) { + block, found, err := st.GetBlockForType(t) c.Assert(err, jc.ErrorIsNil) c.Assert(found, jc.IsTrue) - c.Assert(dBlock, gc.NotNil) - c.Assert(dBlock.Type(), gc.DeepEquals, t) - tag, err := dBlock.Tag() + c.Assert(block, gc.NotNil) + c.Assert(block.Type(), gc.Equals, t) + tag, err := block.Tag() c.Assert(err, jc.ErrorIsNil) - c.Assert(tag, gc.DeepEquals, st.ModelTag()) - c.Assert(dBlock.Message(), gc.DeepEquals, msg) + c.Assert(tag, gc.Equals, st.ModelTag()) + c.Assert(block.Message(), gc.Equals, msg) } -func (s *blockSuite) switchOnBlock(c *gc.C, t state.BlockType) string { - msg := "" - err := s.State.SwitchBlockOn(t, msg) +func (s *blockSuite) switchOnBlock(c *gc.C, t state.BlockType, message ...string) { + m := strings.Join(message, " ") + err := s.State.SwitchBlockOn(state.DestroyBlock, m) c.Assert(err, jc.ErrorIsNil) +} - assertEnvHasBlock(c, s.State, t, msg) - return msg +func (s *blockSuite) TestSwitchOnBlock(c *gc.C) { + s.switchOnBlock(c, state.DestroyBlock, "some message") + s.assertModelHasBlock(c, s.State, state.DestroyBlock, "some message") +} + +func (s *blockSuite) TestSwitchOnBlockAlreadyOn(c *gc.C) { + s.switchOnBlock(c, state.DestroyBlock, "first message") + s.switchOnBlock(c, state.DestroyBlock, "second message") + s.assertModelHasBlock(c, s.State, state.DestroyBlock, "second message") } func (s *blockSuite) switchOffBlock(c *gc.C, t state.BlockType) { err := s.State.SwitchBlockOff(t) c.Assert(err, jc.ErrorIsNil) - assertNoEnvBlock(c, s.State) - s.assertNoTypedBlock(c, t) -} - -func (s *blockSuite) assertBlocked(c *gc.C, t state.BlockType) { - msg := s.switchOnBlock(c, t) - - expectedErr := fmt.Sprintf(".*block %v is already ON.*", t.String()) - // cannot duplicate - err := s.State.SwitchBlockOn(t, msg) - c.Assert(errors.Cause(err), gc.ErrorMatches, expectedErr) - - // cannot update - err = s.State.SwitchBlockOn(t, "Test block update") - c.Assert(errors.Cause(err), gc.ErrorMatches, expectedErr) - - s.switchOffBlock(c, t) - - err = s.State.SwitchBlockOff(t) - expectedErr = fmt.Sprintf(".*block %v is already OFF.*", t.String()) - c.Assert(errors.Cause(err), gc.ErrorMatches, expectedErr) } -func (s *blockSuite) TestNewModelNotBlocked(c *gc.C) { +func (s *blockSuite) TestSwitchOffBlockNoBlock(c *gc.C) { + s.switchOffBlock(c, state.DestroyBlock) assertNoEnvBlock(c, s.State) s.assertNoTypedBlock(c, state.DestroyBlock) - s.assertNoTypedBlock(c, state.RemoveBlock) - s.assertNoTypedBlock(c, state.ChangeBlock) } -func (s *blockSuite) TestDestroyBlocked(c *gc.C) { - s.assertBlocked(c, state.DestroyBlock) -} - -func (s *blockSuite) TestRemoveBlocked(c *gc.C) { - s.assertBlocked(c, state.RemoveBlock) -} - -func (s *blockSuite) TestChangeBlocked(c *gc.C) { - s.assertBlocked(c, state.ChangeBlock) +func (s *blockSuite) TestSwitchOffBlock(c *gc.C) { + s.switchOnBlock(c, state.DestroyBlock) + s.switchOffBlock(c, state.DestroyBlock) + assertNoEnvBlock(c, s.State) + s.assertNoTypedBlock(c, state.DestroyBlock) } func (s *blockSuite) TestNonsenseBlocked(c *gc.C) { @@ -125,7 +109,7 @@ msg := "another env tst" err := st2.SwitchBlockOn(t, msg) c.Assert(err, jc.ErrorIsNil) - assertEnvHasBlock(c, st2, t, msg) + s.assertModelHasBlock(c, st2, t, msg) //check correct env has it assertNoEnvBlock(c, s.State) @@ -196,7 +180,7 @@ }) owner := names.NewUserTag("test@remote") env, st, err := s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", Config: cfg, Owner: owner, + CloudName: "dummy", CloudRegion: "dummy-region", Config: cfg, Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) @@ -209,12 +193,12 @@ t := state.DestroyBlock err := s.State.SwitchBlockOn(t, msg) c.Assert(err, jc.ErrorIsNil) - assertEnvHasBlock(c, s.State, t, msg) + s.assertModelHasBlock(c, s.State, t, msg) } defer state.SetBeforeHooks(c, s.State, switchBlockOn).Check() msg := "concurrency tst" t := state.RemoveBlock err := s.State.SwitchBlockOn(t, msg) c.Assert(err, jc.ErrorIsNil) - assertEnvHasBlock(c, s.State, t, msg) + s.assertModelHasBlock(c, s.State, t, msg) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/charm.go juju-core-2.0.0/src/github.com/juju/juju/state/charm.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/charm.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/charm.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,6 @@ package state import ( - "net/url" "regexp" "github.com/juju/errors" @@ -47,9 +46,33 @@ // charmDoc represents the internal state of a charm in MongoDB. type charmDoc struct { - DocID string `bson:"_id"` - URL *charm.URL `bson:"url"` // DANGEROUS see below - ModelUUID string `bson:"model-uuid"` + DocID string `bson:"_id"` + URL *charm.URL `bson:"url"` // DANGEROUS see charm.* fields below + + // Life manages charm lifetime in the usual way, but only local + // charms can actually be "destroyed"; store charms are + // immortal. When a local charm is removed, its document is left + // in place, with Life set to Dead, to ensure we don't + // accidentally reuse the charm URL, which must be unique within + // a model. + // + // Note that this aligns with the existing contract implied by + // Dead: that most clients should see it as not existing at all. + // Nothing strictly obliges us to clean up the doc. + Life Life `bson:"life"` + + // These fields are flags; if any of them is set, the charm + // cannot actually be safely used for anything. + PendingUpload bool `bson:"pendingupload"` + Placeholder bool `bson:"placeholder"` + + // These fields control access to the charm archive. + BundleSha256 string `bson:"bundlesha256"` + StoragePath string `bson:"storagepath"` + Macaroon []byte `bson:"macaroon"` + + // The remaining fields hold data sufficient to define a + // charm.Charm. // TODO(fwereade) 2015-06-18 lp:1467964 // DANGEROUS: our schema can change any time the charm package changes, @@ -63,17 +86,6 @@ Config *charm.Config `bson:"config"` Actions *charm.Actions `bson:"actions"` Metrics *charm.Metrics `bson:"metrics"` - - // DEPRECATED: BundleURL is deprecated, and exists here - // only for migration purposes. We should remove this - // when migrations are no longer necessary. - BundleURL *url.URL `bson:"bundleurl,omitempty"` - - BundleSha256 string `bson:"bundlesha256"` - StoragePath string `bson:"storagepath"` - PendingUpload bool `bson:"pendingupload"` - Placeholder bool `bson:"placeholder"` - Macaroon []byte `bson:"macaroon"` } // CharmInfo contains all the data necessary to store a charm's metadata. @@ -95,7 +107,6 @@ doc := charmDoc{ DocID: info.ID.String(), URL: info.ID, - ModelUUID: st.ModelTag().Id(), Meta: info.Charm.Meta(), Config: safeConfig(info.Charm), Metrics: info.Charm.Metrics(), @@ -110,7 +121,7 @@ } doc.Macaroon = mac } - return insertAnyCharmOps(&doc) + return insertAnyCharmOps(st, &doc) } // insertPlaceholderCharmOps returns the txn operations necessary to insert a @@ -120,10 +131,9 @@ if curl == nil { return nil, errors.New("*charm.URL was nil") } - return insertAnyCharmOps(&charmDoc{ + return insertAnyCharmOps(st, &charmDoc{ DocID: curl.String(), URL: curl, - ModelUUID: st.ModelTag().Id(), Placeholder: true, }) } @@ -135,32 +145,71 @@ if curl == nil { return nil, errors.New("*charm.URL was nil") } - return insertAnyCharmOps(&charmDoc{ + return insertAnyCharmOps(st, &charmDoc{ DocID: curl.String(), URL: curl, - ModelUUID: st.ModelTag().Id(), PendingUpload: true, }) } // insertAnyCharmOps returns the txn operations necessary to insert the supplied // charm document. -func insertAnyCharmOps(cdoc *charmDoc) ([]txn.Op, error) { - return []txn.Op{{ +func insertAnyCharmOps(st modelBackend, cdoc *charmDoc) ([]txn.Op, error) { + + charms, closer := st.getCollection(charmsC) + defer closer() + + life, err := nsLife.read(charms, cdoc.DocID) + if errors.IsNotFound(err) { + // everything is as it should be + } else if err != nil { + return nil, errors.Trace(err) + } else if life == Dead { + return nil, errors.New("url already consumed") + } else { + return nil, errors.New("already exists") + } + charmOp := txn.Op{ C: charmsC, Id: cdoc.DocID, Assert: txn.DocMissing, Insert: cdoc, - }}, nil + } + + refcounts, closer := st.getCollection(refcountsC) + defer closer() + + charmKey := charmGlobalKey(cdoc.URL) + refOp, required, err := nsRefcounts.LazyCreateOp(refcounts, charmKey) + if err != nil { + return nil, errors.Trace(err) + } else if required { + return []txn.Op{refOp, charmOp}, nil + } + return []txn.Op{charmOp}, nil } // updateCharmOps returns the txn operations necessary to update the charm // document with the supplied data, so long as the supplied assert still holds // true. func updateCharmOps( - st *State, info CharmInfo, assert interface{}, + st *State, info CharmInfo, assert bson.D, ) ([]txn.Op, error) { + charms, closer := st.getCollection(charmsC) + defer closer() + + charmKey := info.ID.String() + op, err := nsLife.aliveOp(charms, charmKey) + if err != nil { + return nil, errors.Annotate(err, "charm") + } + lifeAssert, ok := op.Assert.(bson.D) + if !ok { + return nil, errors.Errorf("expected bson.D, got %#v", op.Assert) + } + op.Assert = append(lifeAssert, assert...) + data := bson.D{ {"meta", info.Charm.Meta()}, {"config", safeConfig(info.Charm)}, @@ -171,7 +220,6 @@ {"pendingupload", false}, {"placeholder", false}, } - if len(info.Macaroon) > 0 { mac, err := info.Macaroon.MarshalBinary() if err != nil { @@ -180,13 +228,8 @@ data = append(data, bson.DocElem{"macaroon", mac}) } - updateFields := bson.D{{"$set", data}} - return []txn.Op{{ - C: charmsC, - Id: info.ID.String(), - Assert: assert, - Update: updateFields, - }}, nil + op.Update = bson.D{{"$set", data}} + return []txn.Op{op}, nil } // convertPlaceholderCharmOps returns the txn operations necessary to convert @@ -222,13 +265,22 @@ if err != nil { return nil, errors.Trace(err) } + + refcounts, closer := st.getCollection(refcountsC) + defer closer() + var ops []txn.Op for _, doc := range docs { if doc.URL.Revision >= curl.Revision { continue } - ops = append(ops, txn.Op{ - C: charmsC, + key := charmGlobalKey(doc.URL) + refOp, err := nsRefcounts.RemoveOp(refcounts, key, 0) + if err != nil { + return nil, errors.Trace(err) + } + ops = append(ops, refOp, txn.Op{ + C: charms.Name(), Id: doc.DocID, Assert: stillPlaceholder, Remove: true, @@ -280,6 +332,82 @@ return names.NewCharmTag(c.URL().String()) } +// Life returns the charm's life state. +func (c *Charm) Life() Life { + return c.doc.Life +} + +// Refresh loads fresh charm data from the database. In practice, the +// only observable change should be to its Life value. +func (c *Charm) Refresh() error { + ch, err := c.st.Charm(c.doc.URL) + if err != nil { + return errors.Trace(err) + } + c.doc = ch.doc + return nil +} + +// Destroy sets the charm to Dying and prevents it from being used by +// applications or units. It only works on local charms, and only when +// the charm is not referenced by any application. +func (c *Charm) Destroy() error { + buildTxn := func(_ int) ([]txn.Op, error) { + ops, err := charmDestroyOps(c.st, c.doc.URL) + switch errors.Cause(err) { + case nil: + case errNotAlive: + return nil, jujutxn.ErrNoOperations + default: + return nil, errors.Trace(err) + } + return ops, nil + } + if err := c.st.run(buildTxn); err != nil { + return errors.Trace(err) + } + c.doc.Life = Dying + return nil +} + +// Remove will delete the charm's stored archive and render the charm +// inaccessible to future clients. It will fail unless the charm is +// already Dying (indicating that someone has called Destroy). +func (c *Charm) Remove() error { + switch c.doc.Life { + case Alive: + return errors.New("still alive") + case Dead: + return nil + } + + stor := storage.NewStorage(c.st.ModelUUID(), c.st.MongoSession()) + err := stor.Remove(c.doc.StoragePath) + if errors.IsNotFound(err) { + // Not a problem, but we might still need to run the + // transaction further down to complete the process. + } else if err != nil { + return errors.Annotate(err, "deleting archive") + } + + buildTxn := func(_ int) ([]txn.Op, error) { + ops, err := charmRemoveOps(c.st, c.doc.URL) + switch errors.Cause(err) { + case nil: + case errAlreadyDead: + return nil, jujutxn.ErrNoOperations + default: + return nil, errors.Trace(err) + } + return ops, nil + } + if err := c.st.run(buildTxn); err != nil { + return errors.Trace(err) + } + c.doc.Life = Dead + return nil +} + // charmGlobalKey returns the global database key for the charm // with the given url. func charmGlobalKey(charmURL *charm.URL) string { @@ -333,15 +461,6 @@ return c.doc.StoragePath } -// BundleURL returns the url to the charm bundle in -// the provider storage. -// -// DEPRECATED: this is only to be used for migrating -// charm archives to model storage. -func (c *Charm) BundleURL() *url.URL { - return c.doc.BundleURL -} - // BundleSha256 returns the SHA256 digest of the charm bundle bytes. func (c *Charm) BundleSha256() string { return c.doc.BundleSha256 @@ -382,7 +501,7 @@ SHA256: c.BundleSha256(), Macaroon: m, } - ops, err := updateCharmOps(c.st, info, txn.DocExists) + ops, err := updateCharmOps(c.st, info, nil) if err != nil { return errors.Trace(err) } @@ -392,15 +511,6 @@ return nil } -// deleteCharmArchive deletes a charm archive from blob storage. -func (st *State) deleteCharmArchive(curl *charm.URL, storagePath string) error { - stor := storage.NewStorage(st.ModelUUID(), st.MongoSession()) - if err := stor.Remove(storagePath); err != nil { - return errors.Annotate(err, "cannot delete charm from storage") - } - return nil -} - // AddCharm adds the ch charm with curl to the state. // On success the newly added charm state is returned. func (st *State) AddCharm(info CharmInfo) (stch *Charm, err error) { @@ -453,7 +563,7 @@ defer closer() var cdoc charmDoc var charms []*Charm - iter := charmsCollection.Find(nil).Iter() + iter := charmsCollection.Find(nsLife.notDead()).Iter() for iter.Next(&cdoc) { ch := newCharm(st, &cdoc) charms = append(charms, ch) @@ -473,6 +583,7 @@ {"placeholder", bson.D{{"$ne", true}}}, {"pendingupload", bson.D{{"$ne", true}}}, } + what = append(what, nsLife.notDead()...) err := charms.Find(what).One(&cdoc) if err == mgo.ErrNotFound { return nil, errors.NotFoundf("charm %q", curl) @@ -518,7 +629,7 @@ // in state for the charm. // // The url's schema must be "local" and it must include a revision. -func (st *State) PrepareLocalCharmUpload(curl *charm.URL) (chosenUrl *charm.URL, err error) { +func (st *State) PrepareLocalCharmUpload(curl *charm.URL) (chosenURL *charm.URL, err error) { // Perform a few sanity checks first. if curl.Schema != "local" { return nil, errors.Errorf("expected charm URL with local schema, got %q", curl) @@ -555,11 +666,11 @@ // More recent revision exists in state, pick the next. chosenRevision = maxRevision + 1 } - chosenUrl = curl.WithRevision(chosenRevision) - return insertPendingCharmOps(st, chosenUrl) + chosenURL = curl.WithRevision(chosenRevision) + return insertPendingCharmOps(st, chosenURL) } if err = st.run(buildTxn); err == nil { - return chosenUrl, nil + return chosenURL, nil } return nil, errors.Trace(err) } @@ -596,11 +707,10 @@ case err == mgo.ErrNotFound: uploadedCharm = charmDoc{ DocID: st.docID(curl.String()), - ModelUUID: st.ModelTag().Id(), URL: curl, PendingUpload: true, } - return insertAnyCharmOps(&uploadedCharm) + return insertAnyCharmOps(st, &uploadedCharm) case err != nil: return nil, errors.Trace(err) case uploadedCharm.Placeholder: diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/charmref.go juju-core-2.0.0/src/github.com/juju/juju/state/charmref.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/charmref.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/charmref.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,175 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/mgo.v2/txn" +) + +var errCharmInUse = errors.New("charm in use") + +// appCharmIncRefOps returns the operations necessary to record a reference +// to a charm and its per-application settings and storage constraints +// documents. It will fail if the charm is not Alive. +func appCharmIncRefOps(st modelBackend, appName string, curl *charm.URL, canCreate bool) ([]txn.Op, error) { + + charms, closer := st.getCollection(charmsC) + defer closer() + + // If we're migrating. charm document will not be present. But + // if we're not migrating, we need to check the charm is alive. + var checkOps []txn.Op + count, err := charms.FindId(curl.String()).Count() + if err != nil { + return nil, errors.Annotate(err, "charm") + } else if count != 0 { + checkOp, err := nsLife.aliveOp(charms, curl.String()) + if err != nil { + return nil, errors.Annotate(err, "charm") + } + checkOps = []txn.Op{checkOp} + } + + refcounts, closer := st.getCollection(refcountsC) + defer closer() + + getIncRefOp := nsRefcounts.CreateOrIncRefOp + if !canCreate { + getIncRefOp = nsRefcounts.StrictIncRefOp + } + settingsKey := applicationSettingsKey(appName, curl) + settingsOp, err := getIncRefOp(refcounts, settingsKey, 1) + if err != nil { + return nil, errors.Annotate(err, "settings reference") + } + storageConstraintsKey := applicationStorageConstraintsKey(appName, curl) + storageConstraintsOp, err := getIncRefOp(refcounts, storageConstraintsKey, 1) + if err != nil { + return nil, errors.Annotate(err, "storage constraints reference") + } + charmKey := charmGlobalKey(curl) + charmOp, err := getIncRefOp(refcounts, charmKey, 1) + if err != nil { + return nil, errors.Annotate(err, "charm reference") + } + + return append(checkOps, settingsOp, storageConstraintsOp, charmOp), nil +} + +// appCharmDecRefOps returns the operations necessary to delete a +// reference to a charm and its per-application settings and storage +// constraints document. If no references to a given (app, charm) pair +// remain, the operations returned will also remove the settings and +// storage constraints documents for that pair, and schedule a cleanup +// to see if the charm itself is now unreferenced and can be tidied +// away itself. +func appCharmDecRefOps(st modelBackend, appName string, curl *charm.URL) ([]txn.Op, error) { + + refcounts, closer := st.getCollection(refcountsC) + defer closer() + + charmKey := charmGlobalKey(curl) + charmOp, err := nsRefcounts.AliveDecRefOp(refcounts, charmKey) + if err != nil { + return nil, errors.Annotate(err, "charm reference") + } + + settingsKey := applicationSettingsKey(appName, curl) + settingsOp, isFinal, err := nsRefcounts.DyingDecRefOp(refcounts, settingsKey) + if err != nil { + return nil, errors.Annotatef(err, "settings reference %s", settingsKey) + } + + storageConstraintsKey := applicationStorageConstraintsKey(appName, curl) + storageConstraintsOp, _, err := nsRefcounts.DyingDecRefOp(refcounts, storageConstraintsKey) + if err != nil { + return nil, errors.Annotatef(err, "storage constraints reference %s", storageConstraintsKey) + } + + ops := []txn.Op{settingsOp, storageConstraintsOp, charmOp} + if isFinal { + // XXX(fwereade): this construction, in common with ~all + // our refcount logic, is safe in parallel but not in + // serial. If this logic is used twice while composing a + // single transaction, the removal won't be triggered. + // see `Application.removeOps` for the workaround. + ops = append(ops, finalAppCharmRemoveOps(appName, curl)...) + } + return ops, nil +} + +// finalAppCharmRemoveOps returns operations to delete the settings +// and storage constraints documents and queue a charm cleanup. +func finalAppCharmRemoveOps(appName string, curl *charm.URL) []txn.Op { + settingsKey := applicationSettingsKey(appName, curl) + removeSettingsOp := txn.Op{ + C: settingsC, + Id: settingsKey, + Remove: true, + } + storageConstraintsKey := applicationStorageConstraintsKey(appName, curl) + removeStorageConstraintsOp := removeStorageConstraintsOp(storageConstraintsKey) + cleanupOp := newCleanupOp(cleanupCharm, curl.String()) + return []txn.Op{removeSettingsOp, removeStorageConstraintsOp, cleanupOp} +} + +// charmDestroyOps implements the logic of charm.Destroy. +func charmDestroyOps(st modelBackend, curl *charm.URL) ([]txn.Op, error) { + + if curl.Schema != "local" { + // local charms keep a document around to prevent reuse + // of charm URLs, which several components believe to be + // unique keys (this is always true within a model). + // + // it's not so much that it's bad to delete store + // charms; but we don't have a way to reinstate them + // once purged, so we don't allow removal in the first + // place. + return nil, errors.New("cannot destroy non-local charms") + } + + charms, closer := st.getCollection(charmsC) + defer closer() + + charmKey := curl.String() + charmOp, err := nsLife.destroyOp(charms, charmKey, nil) + if err != nil { + return nil, errors.Annotate(err, "charm") + } + + refcounts, closer := st.getCollection(refcountsC) + defer closer() + + refcountKey := charmGlobalKey(curl) + refcountOp, err := nsRefcounts.RemoveOp(refcounts, refcountKey, 0) + switch errors.Cause(err) { + case nil: + case errRefcountChanged: + return nil, errCharmInUse + default: + return nil, errors.Annotate(err, "charm reference") + } + + return []txn.Op{charmOp, refcountOp}, nil +} + +// charmRemoveOps implements the logic of charm.Remove. +func charmRemoveOps(st modelBackend, curl *charm.URL) ([]txn.Op, error) { + + charms, closer := st.getCollection(charmsC) + defer closer() + + // NOTE: we do *not* actually remove the charm document, to + // prevent its URL from being recycled, and breaking caches. + // The "remove" terminology refers to the client's view of the + // change (after which the charm really will be inaccessible). + charmKey := curl.String() + charmOp, err := nsLife.dieOp(charms, charmKey, nil) + if err != nil { + return nil, errors.Annotate(err, "charm") + } + return []txn.Op{charmOp}, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/charm_test.go juju-core-2.0.0/src/github.com/juju/juju/state/charm_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/charm_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/charm_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,35 +7,63 @@ "bytes" "fmt" "path/filepath" + "strings" "github.com/juju/errors" jc "github.com/juju/testing/checkers" - "github.com/juju/txn" "github.com/juju/utils" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/macaroon.v1" "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" "github.com/juju/juju/state" + "github.com/juju/juju/state/storage" "github.com/juju/juju/testcharms" + "github.com/juju/juju/testing/factory" ) type CharmSuite struct { ConnSuite - curl *charm.URL + charm *state.Charm + curl *charm.URL } var _ = gc.Suite(&CharmSuite{}) func (s *CharmSuite) SetUpTest(c *gc.C) { s.ConnSuite.SetUpTest(c) - added := s.AddTestingCharm(c, "dummy") - s.curl = added.URL() + s.charm = s.AddTestingCharm(c, "dummy") + s.curl = s.charm.URL() } -func (s *CharmSuite) TestCharm(c *gc.C) { +func (s *CharmSuite) destroy(c *gc.C) { + err := s.charm.Destroy() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *CharmSuite) remove(c *gc.C) { + s.destroy(c) + err := s.charm.Remove() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *CharmSuite) checkRemoved(c *gc.C) { + _, err := s.State.Charm(s.curl) + c.Check(err, gc.ErrorMatches, `charm ".*" not found`) + c.Check(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *CharmSuite) TestAliveCharm(c *gc.C) { + s.testCharm(c) +} + +func (s *CharmSuite) TestDyingCharm(c *gc.C) { + s.destroy(c) + s.testCharm(c) +} + +func (s *CharmSuite) testCharm(c *gc.C) { dummy, err := s.State.Charm(s.curl) c.Assert(err, jc.ErrorIsNil) c.Assert(dummy.URL().String(), gc.Equals, s.curl.String()) @@ -76,6 +104,23 @@ }) } +func (s *CharmSuite) TestRemovedCharmNotFound(c *gc.C) { + s.remove(c) + s.checkRemoved(c) +} + +func (s *CharmSuite) TestRemovedCharmNotListed(c *gc.C) { + s.remove(c) + charms, err := s.State.AllCharms() + c.Check(err, jc.ErrorIsNil) + c.Check(charms, gc.HasLen, 0) +} + +func (s *CharmSuite) TestRemoveWithoutDestroy(c *gc.C) { + err := s.charm.Remove() + c.Assert(err, gc.ErrorMatches, "still alive") +} + func (s *CharmSuite) TestCharmNotFound(c *gc.C) { curl := charm.MustParseURL("local:anotherseries/dummy-1") _, err := s.State.Charm(curl) @@ -99,6 +144,134 @@ return info } +func (s *CharmSuite) TestDestroyStoreCharm(c *gc.C) { + info := s.dummyCharm(c, "cs:precise/dummy-2") + sch, err := s.State.AddCharm(info) + c.Assert(err, jc.ErrorIsNil) + err = sch.Destroy() + c.Assert(err, gc.ErrorMatches, "cannot destroy non-local charms") +} + +func (s *CharmSuite) TestRemoveDeletesStorage(c *gc.C) { + // We normally don't actually set up charm storage in state + // tests, but we need it here. + path := s.charm.StoragePath() + stor := storage.NewStorage(s.State.ModelUUID(), s.State.MongoSession()) + err := stor.Put(path, strings.NewReader("abc"), 3) + c.Assert(err, jc.ErrorIsNil) + + s.destroy(c) + closer, _, err := stor.Get(path) + c.Assert(err, jc.ErrorIsNil) + closer.Close() + + s.remove(c) + _, _, err = stor.Get(path) + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *CharmSuite) TestReferenceDyingCharm(c *gc.C) { + + s.destroy(c) + + args := state.AddApplicationArgs{ + Name: "blah", + Charm: s.charm, + } + _, err := s.State.AddApplication(args) + c.Check(err, gc.ErrorMatches, `cannot add application "blah": charm: not found or not alive`) +} + +func (s *CharmSuite) TestReferenceDyingCharmRace(c *gc.C) { + + defer state.SetBeforeHooks(c, s.State, func() { + s.destroy(c) + }).Check() + + args := state.AddApplicationArgs{ + Name: "blah", + Charm: s.charm, + } + _, err := s.State.AddApplication(args) + // bad message: see lp:1621754. should match + // TestReferenceDyingCharm above. + c.Check(err, gc.ErrorMatches, `cannot add application "blah": application already exists`) +} + +func (s *CharmSuite) TestDestroyReferencedCharm(c *gc.C) { + s.Factory.MakeApplication(c, &factory.ApplicationParams{ + Charm: s.charm, + }) + + err := s.charm.Destroy() + c.Check(err, gc.ErrorMatches, "charm in use") +} + +func (s *CharmSuite) TestDestroyReferencedCharmRace(c *gc.C) { + + defer state.SetBeforeHooks(c, s.State, func() { + s.Factory.MakeApplication(c, &factory.ApplicationParams{ + Charm: s.charm, + }) + }).Check() + + err := s.charm.Destroy() + c.Check(err, gc.ErrorMatches, "charm in use") +} + +func (s *CharmSuite) TestDestroyUnreferencedCharm(c *gc.C) { + app := s.Factory.MakeApplication(c, &factory.ApplicationParams{ + Charm: s.charm, + }) + err := app.Destroy() + c.Assert(err, jc.ErrorIsNil) + + err = s.charm.Destroy() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *CharmSuite) TestDestroyUnitReferencedCharm(c *gc.C) { + app := s.Factory.MakeApplication(c, &factory.ApplicationParams{ + Charm: s.charm, + }) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{ + Application: app, + SetCharmURL: true, + }) + + // set app charm to something different + info := s.dummyCharm(c, "cs:quantal/dummy-2") + newCh, err := s.State.AddCharm(info) + c.Assert(err, jc.ErrorIsNil) + err = app.SetCharm(state.SetCharmConfig{Charm: newCh}) + c.Assert(err, jc.ErrorIsNil) + + // unit should still reference original charm until updated + err = s.charm.Destroy() + c.Assert(err, gc.ErrorMatches, "charm in use") + err = unit.SetCharmURL(info.ID) + c.Assert(err, jc.ErrorIsNil) + err = s.charm.Destroy() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *CharmSuite) TestDestroyFinalUnitReference(c *gc.C) { + app := s.Factory.MakeApplication(c, &factory.ApplicationParams{ + Charm: s.charm, + }) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{ + Application: app, + SetCharmURL: true, + }) + + err := app.Destroy() + c.Assert(err, jc.ErrorIsNil) + removeUnit(c, unit) + + assertCleanupCount(c, s.State, 1) + s.checkRemoved(c) +} + func (s *CharmSuite) TestAddCharm(c *gc.C) { // Check that adding charms from scratch works correctly. info := s.dummyCharm(c, "") @@ -212,6 +385,15 @@ c.Assert(curl.Revision, gc.Equals, 1234) } +func (s *CharmSuite) TestPrepareLocalCharmUploadRemoved(c *gc.C) { + // Remove the fixture charm and try to re-add it; it gets a new + // revision. + s.remove(c) + curl, err := s.State.PrepareLocalCharmUpload(s.curl) + c.Assert(err, jc.ErrorIsNil) + c.Assert(curl.Revision, gc.Equals, s.curl.Revision+1) +} + func (s *CharmSuite) TestPrepareStoreCharmUpload(c *gc.C) { // First test the sanity checks. sch, err := s.State.PrepareStoreCharmUpload(charm.MustParseURL("cs:quantal/dummy")) @@ -243,37 +425,6 @@ schCopy, err = s.State.PrepareStoreCharmUpload(info.ID) c.Assert(err, jc.ErrorIsNil) c.Assert(sch, jc.DeepEquals, schCopy) - - // Finally, try poking around the state with a placeholder and - // bundlesha256 to make sure we do the right thing. - curl := info.ID.WithRevision(999) - first := txn.TestHook{ - Before: func() { - err := s.State.AddStoreCharmPlaceholder(curl) - c.Assert(err, jc.ErrorIsNil) - }, - After: func() { - err := s.charms.RemoveId(state.DocID(s.State, curl.String())) - c.Assert(err, jc.ErrorIsNil) - }, - } - second := txn.TestHook{ - Before: func() { - err := s.State.AddStoreCharmPlaceholder(curl) - c.Assert(err, jc.ErrorIsNil) - }, - After: func() { - err := s.charms.UpdateId(state.DocID(s.State, curl.String()), bson.D{{"$set", bson.D{ - {"bundlesha256", "fake"}}, - }}) - c.Assert(err, jc.ErrorIsNil) - }, - } - defer state.SetTestHooks(c, s.State, first, second, first).Check() - - _, err = s.State.PrepareStoreCharmUpload(curl) - cause := errors.Cause(err) - c.Assert(cause, gc.Equals, txn.ErrExcessiveContention) } func (s *CharmSuite) TestUpdateUploadedCharm(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/cleanup.go juju-core-2.0.0/src/github.com/juju/juju/state/cleanup.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/cleanup.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/cleanup.go 2016-10-13 14:31:49.000000000 +0000 @@ -20,7 +20,7 @@ // SCHEMACHANGE: the names are expressive, the values not so much. cleanupRelationSettings cleanupKind = "settings" cleanupUnitsForDyingService cleanupKind = "units" - cleanupCharmForDyingService cleanupKind = "charm" + cleanupCharm cleanupKind = "charm" cleanupDyingUnit cleanupKind = "dyingUnit" cleanupRemovedUnit cleanupKind = "removedUnit" cleanupServicesForDyingModel cleanupKind = "applications" @@ -33,23 +33,22 @@ cleanupMachinesForDyingModel cleanupKind = "modelMachines" ) -// cleanupDoc represents a potentially large set of documents that should be -// removed. +// cleanupDoc originally represented a set of documents that should be +// removed, but the Prefix field no longer means anything more than +// "what will be passed to the cleanup func". type cleanupDoc struct { - DocID string `bson:"_id"` - ModelUUID string `bson:"model-uuid"` - Kind cleanupKind - Prefix string + DocID string `bson:"_id"` + Kind cleanupKind `bson:"kind"` + Prefix string `bson:"prefix"` } // newCleanupOp returns a txn.Op that creates a cleanup document with a unique // id and the supplied kind and prefix. -func (st *State) newCleanupOp(kind cleanupKind, prefix string) txn.Op { +func newCleanupOp(kind cleanupKind, prefix string) txn.Op { doc := &cleanupDoc{ - DocID: st.docID(fmt.Sprint(bson.NewObjectId())), - ModelUUID: st.ModelUUID(), - Kind: kind, - Prefix: prefix, + DocID: fmt.Sprint(bson.NewObjectId()), + Kind: kind, + Prefix: prefix, } return txn.Op{ C: cleanupsC, @@ -84,8 +83,8 @@ switch doc.Kind { case cleanupRelationSettings: err = st.cleanupRelationSettings(doc.Prefix) - case cleanupCharmForDyingService: - err = st.cleanupCharmForDyingService(doc.Prefix) + case cleanupCharm: + err = st.cleanupCharm(doc.Prefix) case cleanupUnitsForDyingService: err = st.cleanupUnitsForDyingService(doc.Prefix) case cleanupDyingUnit: @@ -118,7 +117,7 @@ } } if err != nil { - logger.Errorf("cleanup failed: %v", err) + logger.Errorf("cleanup failed for %v(%q): %v", doc.Kind, doc.Prefix, err) continue } ops := []txn.Op{{ @@ -151,20 +150,9 @@ } func (st *State) cleanupRelationSettings(prefix string) error { - settings, closer := st.getCollection(settingsC) - defer closer() - // Documents marked for cleanup are not otherwise referenced in the - // system, and will not be under watch, and are therefore safe to - // delete directly. - settingsW := settings.Writeable() - - sel := bson.D{{"_id", bson.D{{"$regex", "^" + st.docID(prefix)}}}} - if count, err := settingsW.Find(sel).Count(); err != nil { - return fmt.Errorf("cannot detect cleanup targets: %v", err) - } else if count != 0 { - if _, err := settingsW.RemoveAll(sel); err != nil { - return fmt.Errorf("cannot remove documents marked for cleanup: %v", err) - } + change := relationSettingsCleanupChange{Prefix: st.docID(prefix)} + if err := Apply(st.database, change); err != nil { + return errors.Trace(err) } return nil } @@ -185,9 +173,9 @@ return nil } -// cleanupMachinesForDyingModel sets all non-manager, non-manual -// machines to Dying, if they are not already Dying or Dead. It's expected to -// be used when a model is destroyed. +// cleanupMachinesForDyingModel sets all non-manager machines to Dying, +// if they are not already Dying or Dead. It's expected to be used when +// a model is destroyed. func (st *State) cleanupMachinesForDyingModel() (err error) { // This won't miss machines, because a Dying model cannot have // machines added to it. But we do have to remove the machines themselves @@ -205,12 +193,19 @@ } manual, err := m.IsManual() if err != nil { - return err - } else if manual { - continue + return errors.Trace(err) } - err = m.ForceDestroy() - if err != nil { + destroy := m.ForceDestroy + if manual { + // Manually added machines should never be force- + // destroyed automatically. That should be a user- + // driven decision, since it may leak applications + // and resources on the machine. If something is + // stuck, then the user can still force-destroy + // the manual machines. + destroy = m.Destroy + } + if err := destroy(); err != nil { return errors.Trace(err) } } @@ -260,21 +255,39 @@ return nil } -func (st *State) cleanupCharmForDyingService(charmURL string) error { +// cleanupCharm is speculative: it can abort without error for many +// reasons, because it's triggered somewhat overenthusiastically for +// simplicity's sake. +func (st *State) cleanupCharm(charmURL string) error { curl, err := charm.ParseURL(charmURL) if err != nil { return errors.Annotatef(err, "invalid charm URL %v", charmURL) } + if curl.Schema != "local" { + // No cleanup necessary or possible. + return nil + } + ch, err := st.Charm(curl) if errors.IsNotFound(err) { // Charm already removed. return nil + } else if err != nil { + return errors.Annotate(err, "reading charm") } - if err != nil { - return errors.Annotate(err, "cannot read charm record from state") + + err = ch.Destroy() + switch errors.Cause(err) { + case nil: + case errCharmInUse: + // No cleanup necessary at this time. + return nil + default: + return errors.Annotate(err, "destroying charm") } - if err := st.deleteCharmArchive(curl, ch.StoragePath()); err != nil && !errors.IsNotFound(err) { - return errors.Annotate(err, "cannot remove charm archive from storage") + + if err := ch.Remove(); err != nil { + return errors.Trace(err) } return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/cleanup_test.go juju-core-2.0.0/src/github.com/juju/juju/state/cleanup_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/cleanup_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/cleanup_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -119,11 +119,19 @@ } func (s *CleanupSuite) TestCleanupModelMachines(c *gc.C) { - // Create a state and hosted machine. + // Create a controller machine, and manual and non-manual + // workload machine. stateMachine, err := s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) machine, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) + manualMachine, err := s.State.AddOneMachine(state.MachineTemplate{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits}, + InstanceId: "inst-ance", + Nonce: "manual:foo", + }) + c.Assert(err, jc.ErrorIsNil) // Create a relation with a unit in scope and assigned to the hosted machine. pr := NewPeerRelation(c, s.State) @@ -150,6 +158,7 @@ // ...but that the machine remains, and is Dead, ready for removal by the // provisioner. assertLife(c, machine, state.Dead) + assertLife(c, manualMachine, state.Dying) assertLife(c, stateMachine, state.Alive) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/clock.go juju-core-2.0.0/src/github.com/juju/juju/state/clock.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/clock.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/clock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package state - -import ( - "github.com/juju/utils/clock" -) - -// GetClock exists to allow us to patch out time-handling; specifically -// for the worker/uniter tests that want to know what happens when leases -// expire unexpectedly. -// -// TODO(fwereade): lp:1479653 -// This is *clearly* a bad idea, and we should be injecting the dependency -// explicitly -- and using an injected clock across the codebase -- but, -// time pressure. -var GetClock = func() clock.Clock { - return clock.WallClock -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudcredentials.go juju-core-2.0.0/src/github.com/juju/juju/state/cloudcredentials.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudcredentials.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/cloudcredentials.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,8 +7,10 @@ "fmt" "github.com/juju/errors" + jujutxn "github.com/juju/txn" "github.com/juju/utils/set" "gopkg.in/juju/names.v2" + mgo "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -21,10 +23,30 @@ Owner string `bson:"owner"` Cloud string `bson:"cloud"` Name string `bson:"name"` + Revoked bool `bson:"revoked"` AuthType string `bson:"auth-type"` Attributes map[string]string `bson:"attributes,omitempty"` } +// CloudCredential returns the cloud credential for the given tag. +func (st *State) CloudCredential(tag names.CloudCredentialTag) (cloud.Credential, error) { + coll, cleanup := st.getCollection(cloudCredentialsC) + defer cleanup() + + var doc cloudCredentialDoc + err := coll.FindId(cloudCredentialDocID(tag)).One(&doc) + if err == mgo.ErrNotFound { + return cloud.Credential{}, errors.NotFoundf( + "cloud credential %q", tag.Id(), + ) + } else if err != nil { + return cloud.Credential{}, errors.Annotatef( + err, "getting cloud credential %q", tag.Id(), + ) + } + return doc.toCredential(), nil +} + // CloudCredentials returns the user's cloud credentials for a given cloud, // keyed by credential name. func (st *State) CloudCredentials(user names.UserTag, cloudName string) (map[string]cloud.Credential, error) { @@ -34,26 +56,30 @@ var doc cloudCredentialDoc credentials := make(map[string]cloud.Credential) iter := coll.Find(bson.D{ - {"owner", user.Canonical()}, + {"owner", user.Id()}, {"cloud", cloudName}, }).Iter() for iter.Next(&doc) { - credentials[doc.Name] = doc.toCredential() + tag, err := doc.cloudCredentialTag() + if err != nil { + return nil, errors.Trace(err) + } + credentials[tag.Id()] = doc.toCredential() } if err := iter.Err(); err != nil { return nil, errors.Annotatef( err, "cannot get cloud credentials for user %q, cloud %q", - user.Canonical(), cloudName, + user.Id(), cloudName, ) } return credentials, nil } -// UpdateCloudCredentials updates the user's cloud credentials. Any existing -// credentials with the same names will be replaced, and any other credentials -// not in the updated set will be untouched. -func (st *State) UpdateCloudCredentials(user names.UserTag, cloudName string, credentials map[string]cloud.Credential) error { +// UpdateCloudCredential adds or updates a cloud credential with the given tag. +func (st *State) UpdateCloudCredential(tag names.CloudCredentialTag, credential cloud.Credential) error { + credentials := map[names.CloudCredentialTag]cloud.Credential{tag: credential} buildTxn := func(attempt int) ([]txn.Op, error) { + cloudName := tag.Cloud().Id() cloud, err := st.Cloud(cloudName) if err != nil { return nil, errors.Trace(err) @@ -62,72 +88,109 @@ if err != nil { return nil, errors.Annotate(err, "validating cloud credentials") } - existingCreds, err := st.CloudCredentials(user, cloudName) - if err != nil { + _, err = st.CloudCredential(tag) + if err != nil && !errors.IsNotFound(err) { return nil, errors.Maskf(err, "fetching cloud credentials") } - for credName, cred := range credentials { - if _, ok := existingCreds[credName]; ok { - ops = append(ops, updateCloudCredentialOp(user, cloudName, credName, cred)) - } else { - ops = append(ops, createCloudCredentialOp(user, cloudName, credName, cred)) - } + if err == nil { + ops = append(ops, updateCloudCredentialOp(tag, credential)) + } else { + ops = append(ops, createCloudCredentialOp(tag, credential)) } return ops, nil } if err := st.run(buildTxn); err != nil { - return errors.Annotatef( - err, "updating cloud credentials for user %q, cloud %q", - user.String(), cloudName, - ) + return errors.Annotate(err, "updating cloud credentials") + } + return nil +} + +// RemoveCloudCredential removes a cloud credential with the given tag. +func (st *State) RemoveCloudCredential(tag names.CloudCredentialTag) error { + buildTxn := func(attempt int) ([]txn.Op, error) { + _, err := st.CloudCredential(tag) + if errors.IsNotFound(err) { + return nil, jujutxn.ErrNoOperations + } + if err != nil { + return nil, errors.Trace(err) + } + return removeCloudCredentialOps(tag), nil + } + if err := st.run(buildTxn); err != nil { + return errors.Annotate(err, "removing cloud credential") } return nil } // createCloudCredentialOp returns a txn.Op that will create // a cloud credential. -func createCloudCredentialOp(user names.UserTag, cloudName, credName string, cred cloud.Credential) txn.Op { +func createCloudCredentialOp(tag names.CloudCredentialTag, cred cloud.Credential) txn.Op { return txn.Op{ C: cloudCredentialsC, - Id: cloudCredentialDocID(user, cloudName, credName), + Id: cloudCredentialDocID(tag), Assert: txn.DocMissing, Insert: &cloudCredentialDoc{ - Owner: user.Canonical(), - Cloud: cloudName, - Name: credName, + Owner: tag.Owner().Id(), + Cloud: tag.Cloud().Id(), + Name: tag.Name(), AuthType: string(cred.AuthType()), Attributes: cred.Attributes(), + Revoked: cred.Revoked, }, } } // updateCloudCredentialOp returns a txn.Op that will update // a cloud credential. -func updateCloudCredentialOp(user names.UserTag, cloudName, credName string, cred cloud.Credential) txn.Op { +func updateCloudCredentialOp(tag names.CloudCredentialTag, cred cloud.Credential) txn.Op { return txn.Op{ C: cloudCredentialsC, - Id: cloudCredentialDocID(user, cloudName, credName), + Id: cloudCredentialDocID(tag), Assert: txn.DocExists, Update: bson.D{{"$set", bson.D{ {"auth-type", string(cred.AuthType())}, {"attributes", cred.Attributes()}, + {"revoked", cred.Revoked}, }}}, } } -func cloudCredentialDocID(user names.UserTag, cloudName, credentialName string) string { - return fmt.Sprintf("%s#%s#%s", user.Canonical(), cloudName, credentialName) +// removeCloudCredentialOp returns a txn.Op that will remove +// a cloud credential. +func removeCloudCredentialOps(tag names.CloudCredentialTag) []txn.Op { + return []txn.Op{{ + C: cloudCredentialsC, + Id: cloudCredentialDocID(tag), + Assert: txn.DocExists, + Remove: true, + }} +} + +func cloudCredentialDocID(tag names.CloudCredentialTag) string { + return fmt.Sprintf("%s#%s#%s", tag.Cloud().Id(), tag.Owner().Id(), tag.Name()) +} + +func (c cloudCredentialDoc) cloudCredentialTag() (names.CloudCredentialTag, error) { + ownerTag := names.NewUserTag(c.Owner) + id := fmt.Sprintf("%s/%s/%s", c.Cloud, ownerTag.Id(), c.Name) + if !names.IsValidCloudCredential(id) { + return names.CloudCredentialTag{}, errors.NotValidf("cloud credential ID") + } + return names.NewCloudCredentialTag(id), nil } func (c cloudCredentialDoc) toCredential() cloud.Credential { out := cloud.NewCredential(cloud.AuthType(c.AuthType), c.Attributes) + out.Revoked = c.Revoked out.Label = c.Name return out } // validateCloudCredentials checks that the supplied cloud credentials are // valid for use with the controller's cloud, and returns a set of txn.Ops -// to assert the same in a transaction. +// to assert the same in a transaction. The map keys are the cloud credential +// IDs. // // TODO(rogpeppe) We're going to a lot of effort here to assert that a // cloud's auth types haven't changed since we looked at them a moment @@ -136,9 +199,19 @@ // cloud's auth type would invalidate all existing credentials and would // usually involve a new provider version and juju binary too, so // perhaps all this code is unnecessary. -func validateCloudCredentials(cloud cloud.Cloud, cloudName string, credentials map[string]cloud.Credential) ([]txn.Op, error) { +func validateCloudCredentials( + cloud cloud.Cloud, + cloudName string, + credentials map[names.CloudCredentialTag]cloud.Credential, +) ([]txn.Op, error) { requiredAuthTypes := make(set.Strings) - for name, credential := range credentials { + for tag, credential := range credentials { + if tag.Cloud().Id() != cloudName { + return nil, errors.NewNotValid(nil, fmt.Sprintf( + "credential %q for non-matching cloud is not valid (expected %q)", + tag.Id(), cloudName, + )) + } var found bool for _, authType := range cloud.AuthTypes { if credential.AuthType() == authType { @@ -149,7 +222,7 @@ if !found { return nil, errors.NewNotValid(nil, fmt.Sprintf( "credential %q with auth-type %q is not supported (expected one of %q)", - name, credential.AuthType(), cloud.AuthTypes, + tag.Id(), credential.AuthType(), cloud.AuthTypes, )) } requiredAuthTypes.Add(string(credential.AuthType())) @@ -164,3 +237,16 @@ } return ops, nil } + +// WatchCredential returns a new NotifyWatcher watching for +// changes to the specified credential. +func (st *State) WatchCredential(cred names.CloudCredentialTag) NotifyWatcher { + filter := func(rawId interface{}) bool { + id, ok := rawId.(string) + if !ok { + return false + } + return id == cloudCredentialDocID(cred) + } + return newNotifyCollWatcher(st, cloudCredentialsC, filter) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudcredentials_test.go juju-core-2.0.0/src/github.com/juju/juju/state/cloudcredentials_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudcredentials_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/cloudcredentials_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,11 +4,14 @@ package state_test import ( + "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" "github.com/juju/juju/cloud" + "github.com/juju/juju/state" + statetesting "github.com/juju/juju/state/testing" ) type CloudCredentialsSuite struct { @@ -17,40 +20,71 @@ var _ = gc.Suite(&CloudCredentialsSuite{}) -func (s *CloudCredentialsSuite) TestUpdateCloudCredentialsNew(c *gc.C) { +func (s *CloudCredentialsSuite) TestUpdateCloudCredentialNew(c *gc.C) { err := s.State.AddCloud("stratus", cloud.Cloud{ Type: "low", AuthTypes: cloud.AuthTypes{cloud.AccessKeyAuthType, cloud.UserPassAuthType}, }) c.Assert(err, jc.ErrorIsNil) - creds := map[string]cloud.Credential{ - "cred1": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ - "foo": "foo val", - "bar": "bar val", - }), - "cred2": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ - "a": "a val", - "b": "b val", - }), - "cred3": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ - "user": "bob", - "password": "bob's password", - }), - } - addCredLabels(creds) + cred := cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ + "foo": "foo val", + "bar": "bar val", + }) + tag := names.NewCloudCredentialTag("stratus/bob/foobar") + err = s.State.UpdateCloudCredential(tag, cred) + c.Assert(err, jc.ErrorIsNil) + + // The retrieved credentials have labels although cloud.NewCredential + // doesn't have them, so add it to the expected value. + cred.Label = "foobar" + + out, err := s.State.CloudCredential(tag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(out, jc.DeepEquals, cred) +} - err = s.State.UpdateCloudCredentials(names.NewUserTag("bob"), "stratus", creds) +func (s *CloudCredentialsSuite) TestUpdateCloudCredentialsExisting(c *gc.C) { + err := s.State.AddCloud("stratus", cloud.Cloud{ + Type: "low", + AuthTypes: cloud.AuthTypes{cloud.AccessKeyAuthType, cloud.UserPassAuthType}, + }) + c.Assert(err, jc.ErrorIsNil) + + cred := cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ + "foo": "foo val", + "bar": "bar val", + }) + tag := names.NewCloudCredentialTag("stratus/bob/foobar") + err = s.State.UpdateCloudCredential(tag, cred) c.Assert(err, jc.ErrorIsNil) + + cred = cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ + "user": "bob's nephew", + "password": "simple", + }) + cred.Revoked = true + err = s.State.UpdateCloudCredential(tag, cred) + c.Assert(err, jc.ErrorIsNil) + // The retrieved credentials have labels although cloud.NewCredential - // doesn't have them, so add them. - for name, cred := range creds { - cred.Label = name - creds[name] = cred - } - creds1, err := s.State.CloudCredentials(names.NewUserTag("bob"), "stratus") + // doesn't have them, so add it to the expected value. + cred.Label = "foobar" + + out, err := s.State.CloudCredential(tag) c.Assert(err, jc.ErrorIsNil) - c.Assert(creds1, jc.DeepEquals, creds) + c.Assert(out, jc.DeepEquals, cred) +} + +func (s *CloudCredentialsSuite) TestUpdateCloudCredentialInvalidAuthType(c *gc.C) { + err := s.State.AddCloud("stratus", cloud.Cloud{ + Type: "low", + AuthTypes: cloud.AuthTypes{cloud.AccessKeyAuthType}, + }) + tag := names.NewCloudCredentialTag("stratus/bob/foobar") + cred := cloud.NewCredential(cloud.UserPassAuthType, nil) + err = s.State.UpdateCloudCredential(tag, cred) + c.Assert(err, gc.ErrorMatches, `updating cloud credentials: validating cloud credentials: credential "stratus/bob/foobar" with auth-type "userpass" is not supported \(expected one of \["access-key"\]\)`) } func (s *CloudCredentialsSuite) TestCloudCredentialsEmpty(c *gc.C) { @@ -59,81 +93,118 @@ c.Assert(creds, gc.HasLen, 0) } -func (s *CloudCredentialsSuite) TestUpdateCloudCredentialsExisting(c *gc.C) { +func (s *CloudCredentialsSuite) TestCloudCredentials(c *gc.C) { err := s.State.AddCloud("stratus", cloud.Cloud{ Type: "low", AuthTypes: cloud.AuthTypes{cloud.AccessKeyAuthType, cloud.UserPassAuthType}, }) c.Assert(err, jc.ErrorIsNil) - err = s.State.UpdateCloudCredentials(names.NewUserTag("bob"), "stratus", map[string]cloud.Credential{ - "cred1": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ - "foo": "foo val", - "bar": "bar val", - }), - "cred2": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ - "a": "a val", - "b": "b val", - }), - "cred3": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ - "user": "bob", - "password": "bob's password", - }), - }) - c.Assert(err, jc.ErrorIsNil) - err = s.State.UpdateCloudCredentials(names.NewUserTag("bob"), "stratus", map[string]cloud.Credential{ - "cred1": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ - "user": "bob's nephew", - "password": "simple", - }), - "cred2": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ - "b": "new b val", - }), - "cred4": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ - "d": "d val", - }), + otherUser := s.Factory.MakeUser(c, nil).UserTag() + + tag1 := names.NewCloudCredentialTag("stratus/bob/bobcred1") + cred1 := cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ + "foo": "foo val", + "bar": "bar val", }) + err = s.State.UpdateCloudCredential(tag1, cred1) c.Assert(err, jc.ErrorIsNil) - expect := map[string]cloud.Credential{ - "cred1": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ - "user": "bob's nephew", - "password": "simple", - }), - "cred2": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ - "b": "new b val", - }), - "cred3": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ - "user": "bob", - "password": "bob's password", - }), - "cred4": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ - "d": "d val", - }), - } - addCredLabels(expect) - - creds1, err := s.State.CloudCredentials(names.NewUserTag("bob"), "stratus") + tag2 := names.NewCloudCredentialTag("stratus/" + otherUser.Id() + "/foobar") + tag3 := names.NewCloudCredentialTag("stratus/bob/bobcred2") + cred2 := cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ + "baz": "baz val", + "qux": "qux val", + }) + err = s.State.UpdateCloudCredential(tag2, cred2) c.Assert(err, jc.ErrorIsNil) - c.Assert(creds1, jc.DeepEquals, expect) + err = s.State.UpdateCloudCredential(tag3, cred2) + c.Assert(err, jc.ErrorIsNil) + + cred1.Label = "bobcred1" + cred2.Label = "bobcred2" + + for _, userName := range []string{"bob", "bob"} { + creds, err := s.State.CloudCredentials(names.NewUserTag(userName), "stratus") + c.Assert(err, jc.ErrorIsNil) + c.Assert(creds, jc.DeepEquals, map[string]cloud.Credential{ + tag1.Id(): cred1, + tag3.Id(): cred2, + }) + } } -func (s *CloudCredentialsSuite) TestUpdateCloudCredentialsInvalidAuthType(c *gc.C) { +func (s *CloudCredentialsSuite) TestRemoveCredentials(c *gc.C) { + // Create it. err := s.State.AddCloud("stratus", cloud.Cloud{ Type: "low", - AuthTypes: cloud.AuthTypes{cloud.AccessKeyAuthType}, + AuthTypes: cloud.AuthTypes{cloud.AccessKeyAuthType, cloud.UserPassAuthType}, }) - err = s.State.UpdateCloudCredentials(names.NewUserTag("bob"), "stratus", map[string]cloud.Credential{ - "cred1": cloud.NewCredential(cloud.UserPassAuthType, nil), + c.Assert(err, jc.ErrorIsNil) + + tag := names.NewCloudCredentialTag("stratus/bob/bobcred1") + cred := cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ + "foo": "foo val", + "bar": "bar val", }) - c.Assert(err, gc.ErrorMatches, `updating cloud credentials for user "user-bob", cloud "stratus": validating cloud credentials: credential "cred1" with auth-type "userpass" is not supported \(expected one of \["access-key"\]\)`) + err = s.State.UpdateCloudCredential(tag, cred) + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.CloudCredential(tag) + c.Assert(err, jc.ErrorIsNil) + + // Remove it. + err = s.State.RemoveCloudCredential(tag) + c.Assert(err, jc.ErrorIsNil) + + // Check it. + _, err = s.State.CloudCredential(tag) + c.Assert(err, jc.Satisfies, errors.IsNotFound) } -// addCredLabels adds labels to all the given credentials, because -// the labels are present when the credentials are returned from the -// state but not when created with NewCredential. -func addCredLabels(creds map[string]cloud.Credential) { - for name, cred := range creds { - cred.Label = name - creds[name] = cred - } +func (s *CloudCredentialsSuite) createCredentialWatcher(c *gc.C, st *state.State, cred names.CloudCredentialTag) ( + state.NotifyWatcher, statetesting.NotifyWatcherC, +) { + w := st.WatchCredential(cred) + s.AddCleanup(func(c *gc.C) { statetesting.AssertStop(c, w) }) + return w, statetesting.NewNotifyWatcherC(c, st, w) +} + +func (s *CloudCredentialsSuite) TestWatchCredential(c *gc.C) { + cred := names.NewCloudCredentialTag("dummy/fred/default") + w, wc := s.createCredentialWatcher(c, s.State, cred) + wc.AssertOneChange() // Initial event. + + // Create + dummyCred := cloud.NewCredential(cloud.EmptyAuthType, nil) + err := s.State.UpdateCloudCredential(cred, dummyCred) + c.Assert(err, jc.ErrorIsNil) + wc.AssertOneChange() + + // Revoke + dummyCred.Revoked = true + err = s.State.UpdateCloudCredential(cred, dummyCred) + c.Assert(err, jc.ErrorIsNil) + wc.AssertOneChange() + + // Remove. + err = s.State.RemoveCloudCredential(cred) + c.Assert(err, jc.ErrorIsNil) + wc.AssertOneChange() + + statetesting.AssertStop(c, w) + wc.AssertClosed() +} + +func (s *CloudCredentialsSuite) TestWatchCredentialIgnoresOther(c *gc.C) { + cred := names.NewCloudCredentialTag("dummy/fred/default") + w, wc := s.createCredentialWatcher(c, s.State, cred) + wc.AssertOneChange() // Initial event. + + anotherCred := names.NewCloudCredentialTag("dummy/mary/default") + dummyCred := cloud.NewCredential(cloud.EmptyAuthType, nil) + err := s.State.UpdateCloudCredential(anotherCred, dummyCred) + c.Assert(err, jc.ErrorIsNil) + wc.AssertNoChange() + + statetesting.AssertStop(c, w) + wc.AssertClosed() } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/cloud.go juju-core-2.0.0/src/github.com/juju/juju/state/cloud.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/cloud.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/cloud.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "github.com/juju/errors" "github.com/juju/utils/set" + "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/txn" @@ -19,19 +20,21 @@ // cloudDoc records information about the cloud that the controller operates in. type cloudDoc struct { - DocID string `bson:"_id"` - Name string `bson:"name"` - Type string `bson:"type"` - AuthTypes []string `bson:"auth-types"` - Endpoint string `bson:"endpoint"` - StorageEndpoint string `bson:"storage-endpoint,omitempty"` - Regions map[string]cloudRegionSubdoc `bson:"regions,omitempty"` + DocID string `bson:"_id"` + Name string `bson:"name"` + Type string `bson:"type"` + AuthTypes []string `bson:"auth-types"` + Endpoint string `bson:"endpoint"` + IdentityEndpoint string `bson:"identity-endpoint,omitempty"` + StorageEndpoint string `bson:"storage-endpoint,omitempty"` + Regions map[string]cloudRegionSubdoc `bson:"regions,omitempty"` } // cloudRegionSubdoc records information about cloud regions. type cloudRegionSubdoc struct { - Endpoint string `bson:"endpoint,omitempty"` - StorageEndpoint string `bson:"storage-endpoint,omitempty"` + Endpoint string `bson:"endpoint,omitempty"` + IdentityEndpoint string `bson:"identity-endpoint,omitempty"` + StorageEndpoint string `bson:"storage-endpoint,omitempty"` } // createCloudOp returns a list of txn.Ops that will initialize @@ -45,6 +48,7 @@ for _, region := range cloud.Regions { regions[region.Name] = cloudRegionSubdoc{ region.Endpoint, + region.IdentityEndpoint, region.StorageEndpoint, } } @@ -53,12 +57,13 @@ Id: cloudName, Assert: txn.DocMissing, Insert: &cloudDoc{ - Name: cloudName, - Type: cloud.Type, - AuthTypes: authTypes, - Endpoint: cloud.Endpoint, - StorageEndpoint: cloud.StorageEndpoint, - Regions: regions, + Name: cloudName, + Type: cloud.Type, + AuthTypes: authTypes, + Endpoint: cloud.Endpoint, + IdentityEndpoint: cloud.IdentityEndpoint, + StorageEndpoint: cloud.StorageEndpoint, + Regions: regions, }, } } @@ -78,19 +83,37 @@ regions[i] = cloud.Region{ name, region.Endpoint, + region.IdentityEndpoint, region.StorageEndpoint, } } return cloud.Cloud{ - d.Type, - authTypes, - d.Endpoint, - d.StorageEndpoint, - regions, - nil, // Config is not stored, only relevant to bootstrap + Type: d.Type, + AuthTypes: authTypes, + Endpoint: d.Endpoint, + IdentityEndpoint: d.IdentityEndpoint, + StorageEndpoint: d.StorageEndpoint, + Regions: regions, } } +// Clouds returns the definitions for all clouds in the controller. +func (st *State) Clouds() (map[names.CloudTag]cloud.Cloud, error) { + coll, cleanup := st.getCollection(cloudsC) + defer cleanup() + + var doc cloudDoc + clouds := make(map[names.CloudTag]cloud.Cloud) + iter := coll.Find(nil).Iter() + for iter.Next(&doc) { + clouds[names.NewCloudTag(doc.Name)] = doc.toCloud() + } + if err := iter.Err(); err != nil { + return nil, errors.Annotate(err, "getting clouds") + } + return clouds, nil +} + // Cloud returns the controller's cloud definition. func (st *State) Cloud(name string) (cloud.Cloud, error) { coll, cleanup := st.getCollection(cloudsC) @@ -137,3 +160,8 @@ // need a new "policy". return nil } + +// regionSettingsGlobalKey concatenates the cloud a hash and the region string. +func regionSettingsGlobalKey(cloud, region string) string { + return cloud + "#" + region +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudimagemetadata/image.go juju-core-2.0.0/src/github.com/juju/juju/state/cloudimagemetadata/image.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudimagemetadata/image.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/cloudimagemetadata/image.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,7 +19,6 @@ var logger = loggo.GetLogger("juju.state.cloudimagemetadata") type storage struct { - modelUUID string collection string store DataStore } @@ -28,8 +27,8 @@ // NewStorage constructs a new Storage that stores image metadata // in the provided data store. -func NewStorage(modelUUID, collectionName string, store DataStore) Storage { - return &storage{modelUUID, collectionName, store} +func NewStorage(collectionName string, store DataStore) Storage { + return &storage{collectionName, store} } var emptyMetadata = Metadata{} @@ -158,12 +157,26 @@ return old.metadata(), nil } +// AllCloudImageMetadata returns all cloud image metadata in the model. +func (s *storage) AllCloudImageMetadata() ([]Metadata, error) { + coll, closer := s.store.GetCollection(s.collection) + defer closer() + + results := []Metadata{} + docs := []imagesMetadataDoc{} + err := coll.Find(nil).All(&docs) + if err != nil { + return nil, errors.Annotatef(err, "cannot get all image metadata") + } + for _, doc := range docs { + results = append(results, doc.metadata()) + } + return results, nil +} + // imagesMetadataDoc results in immutable records. Updates are effectively // a delate and an insert. type imagesMetadataDoc struct { - // ModelUUID is the model identifier. - ModelUUID string `bson:"model-uuid"` - // Id contains unique key for cloud image metadata. // This is an amalgamation of all deterministic metadata attributes to ensure // that there can be a public and custom image for the same attributes set. @@ -223,6 +236,7 @@ }, m.Priority, m.ImageId, + m.DateCreated, } if m.RootStorageSize != 0 { r.RootStorageSize = &m.RootStorageSize @@ -231,8 +245,12 @@ } func (s *storage) mongoDoc(m Metadata) imagesMetadataDoc { + dateCreated := m.DateCreated + if dateCreated == 0 { + // TODO(fwereade): 2016-03-17 lp:1558657 + dateCreated = time.Now().UnixNano() + } r := imagesMetadataDoc{ - ModelUUID: s.modelUUID, Id: buildKey(m), Stream: m.Stream, Region: m.Region, @@ -242,10 +260,9 @@ VirtType: m.VirtType, RootStorageType: m.RootStorageType, ImageId: m.ImageId, - // TODO(fwereade): 2016-03-17 lp:1558657 - DateCreated: time.Now().UnixNano(), - Source: m.Source, - Priority: m.Priority, + DateCreated: dateCreated, + Source: m.Source, + Priority: m.Priority, } if m.RootStorageSize != nil { r.RootStorageSize = *m.RootStorageSize @@ -269,13 +286,24 @@ if m.Series == "" { return errors.NotValidf("missing series: metadata for image %v", m.ImageId) } - v, err := series.SeriesVersion(m.Series) if err != nil { return err } - m.Version = v + + if m.Stream == "" { + return errors.NotValidf("missing stream: metadata for image %v", m.ImageId) + } + if m.Source == "" { + return errors.NotValidf("missing source: metadata for image %v", m.ImageId) + } + if m.Arch == "" { + return errors.NotValidf("missing architecture: metadata for image %v", m.ImageId) + } + if m.Region == "" { + return errors.NotValidf("missing region: metadata for image %v", m.ImageId) + } return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudimagemetadata/image_test.go juju-core-2.0.0/src/github.com/juju/juju/state/cloudimagemetadata/image_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudimagemetadata/image_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/cloudimagemetadata/image_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,6 +16,7 @@ "github.com/juju/juju/mongo" "github.com/juju/juju/state/cloudimagemetadata" + coretesting "github.com/juju/juju/testing" ) type cloudImageMetadataSuite struct { @@ -28,7 +29,6 @@ var _ = gc.Suite(&cloudImageMetadataSuite{}) const ( - envName = "test-model" collectionName = "test-collection" ) @@ -38,7 +38,7 @@ db := s.MgoSuite.Session.DB("juju") s.access = NewTestMongo(db) - s.storage = cloudimagemetadata.NewStorage(envName, collectionName, s.access) + s.storage = cloudimagemetadata.NewStorage(collectionName, s.access) } func (s *cloudImageMetadataSuite) TestSaveMetadata(c *gc.C) { @@ -50,6 +50,7 @@ Arch: "arch", VirtType: "virtType-test", RootStorageType: "rootStorageType-test", + Source: "test", } attrs2 := cloudimagemetadata.MetadataAttributes{ Stream: "chalk", @@ -57,16 +58,34 @@ Version: "12.04", Series: "precise", Arch: "amd64", + Source: "test", } added := []cloudimagemetadata.Metadata{ - {attrs1, 0, "1"}, - {attrs2, 0, "2"}, + {attrs1, 0, "1", 0}, + {attrs2, 0, "2", 0}, } s.assertRecordMetadata(c, added[0]) s.assertRecordMetadata(c, added[1]) s.assertMetadataRecorded(c, cloudimagemetadata.MetadataAttributes{}, added...) } +func (s *cloudImageMetadataSuite) TestSaveMetadataWithDateCreated(c *gc.C) { + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "stream", + Region: "region-test", + Version: "14.04", + Series: "trusty", + Arch: "arch", + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test", + Source: "test", + } + now := coretesting.NonZeroTime().UnixNano() + metadata := cloudimagemetadata.Metadata{attrs, 0, "1", now} + s.assertRecordMetadata(c, metadata) + s.assertMetadataRecorded(c, cloudimagemetadata.MetadataAttributes{}, metadata) +} + func (s *cloudImageMetadataSuite) TestFindMetadataNotFound(c *gc.C) { s.assertNoMetadata(c) @@ -78,8 +97,9 @@ Series: "trusty", Arch: "arch", VirtType: "virtType", + Source: "test", RootStorageType: "rootStorageType"} - m := cloudimagemetadata.Metadata{attrs, 0, "1"} + m := cloudimagemetadata.Metadata{attrs, 0, "1", 0} s.assertRecordMetadata(c, m) // ...but look for something else. @@ -115,9 +135,10 @@ Series: "trusty", Arch: "arch", VirtType: "virtType", + Source: "test", RootStorageType: "rootStorageType"} - m := cloudimagemetadata.Metadata{attrs, 0, "1"} + m := cloudimagemetadata.Metadata{attrs, 0, "1", 0} _, err := s.storage.FindMetadata(buildAttributesFilter(attrs)) c.Assert(err, jc.Satisfies, errors.IsNotFound) @@ -127,7 +148,7 @@ s.assertMetadataRecorded(c, attrs, expected...) attrs.Stream = "another_stream" - m = cloudimagemetadata.Metadata{attrs, 0, "2"} + m = cloudimagemetadata.Metadata{attrs, 0, "2", 0} s.assertRecordMetadata(c, m) expected = append(expected, m) @@ -141,9 +162,11 @@ Version: "14.04", Series: "trusty", Arch: "arch", + Source: "test", + Region: "wonder", } - metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1"} - metadata1 := cloudimagemetadata.Metadata{attrs, 0, "1"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} + metadata1 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} s.assertRecordMetadata(c, metadata0) s.assertRecordMetadata(c, metadata1) @@ -156,9 +179,11 @@ Version: "14.04", Series: "trusty", Arch: "arch", + Source: "test", + Region: "wonder", } - metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1"} - metadata1 := cloudimagemetadata.Metadata{attrs, 0, "12"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} + metadata1 := cloudimagemetadata.Metadata{attrs, 0, "12", 0} s.assertRecordMetadata(c, metadata0) s.assertMetadataRecorded(c, attrs, metadata0) @@ -173,9 +198,11 @@ Version: "14.04", Series: "trusty", Arch: "arch", + Region: "wonder", + Source: "test", } - metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0"} - metadata1 := cloudimagemetadata.Metadata{attrs, 0, "1"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0", 0} + metadata1 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} metadata1.Stream = "scream" s.assertConcurrentSave(c, @@ -193,9 +220,11 @@ Version: "14.04", Series: "trusty", Arch: "arch", + Source: "test", + Region: "wonder", } - metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0"} - metadata1 := cloudimagemetadata.Metadata{attrs, 0, "1"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0", 0} + metadata1 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} s.assertConcurrentSave(c, metadata0, // add this one @@ -210,8 +239,10 @@ Version: "14.04", Series: "trusty", Arch: "arch", + Source: "test", + Region: "wonder", } - metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0", 0} s.assertConcurrentSave(c, metadata0, // add this one @@ -227,11 +258,12 @@ Series: "trusty", Arch: "arch", Source: "public", + Region: "wonder", } - metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0", 0} attrs.Source = "custom" - metadata1 := cloudimagemetadata.Metadata{attrs, 0, "0"} + metadata1 := cloudimagemetadata.Metadata{attrs, 0, "0", 0} s.assertConcurrentSave(c, metadata0, @@ -246,8 +278,10 @@ Stream: "stream", Series: "trusty", Arch: "arch", + Source: "test", + Region: "wonder", } - metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} s.assertRecordMetadata(c, metadata0) } @@ -255,8 +289,10 @@ attrs := cloudimagemetadata.MetadataAttributes{ Stream: "stream", Arch: "arch", + Source: "test", + Region: "wonder", } - metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} err := s.storage.SaveMetadata([]cloudimagemetadata.Metadata{metadata0}) c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`missing series: metadata for image 1 not valid`)) } @@ -266,12 +302,61 @@ Stream: "stream", Series: "blah", Arch: "arch", + Source: "test", } - metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} err := s.storage.SaveMetadata([]cloudimagemetadata.Metadata{metadata0}) c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`unknown version for series: "blah"`)) } +func (s *cloudImageMetadataSuite) TestSaveMetadataNoStreamPassed(c *gc.C) { + attrs := cloudimagemetadata.MetadataAttributes{ + Arch: "arch", + Source: "test", + Series: "trusty", + Region: "wonder", + } + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} + err := s.storage.SaveMetadata([]cloudimagemetadata.Metadata{metadata0}) + c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`missing stream: metadata for image 1 not valid`)) +} + +func (s *cloudImageMetadataSuite) TestSaveMetadataNoSourcePassed(c *gc.C) { + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "stream", + Arch: "arch", + Series: "trusty", + Region: "wonder", + } + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} + err := s.storage.SaveMetadata([]cloudimagemetadata.Metadata{metadata0}) + c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`missing source: metadata for image 1 not valid`)) +} + +func (s *cloudImageMetadataSuite) TestSaveMetadataNoArchitecturePassed(c *gc.C) { + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "stream", + Source: "test", + Series: "trusty", + Region: "wonder", + } + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} + err := s.storage.SaveMetadata([]cloudimagemetadata.Metadata{metadata0}) + c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`missing architecture: metadata for image 1 not valid`)) +} + +func (s *cloudImageMetadataSuite) TestSaveMetadataNoRegionPassed(c *gc.C) { + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "stream", + Arch: "arch", + Source: "test", + Series: "trusty", + } + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1", 0} + err := s.storage.SaveMetadata([]cloudimagemetadata.Metadata{metadata0}) + c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`missing region: metadata for image 1 not valid`)) +} + func (s *cloudImageMetadataSuite) assertConcurrentSave(c *gc.C, metadata0, metadata1 cloudimagemetadata.Metadata, expected ...cloudimagemetadata.Metadata) { addMetadata := func() { s.assertRecordMetadata(c, metadata0) @@ -303,7 +388,17 @@ // Compare maps by key; order of slices does not matter c.Assert(groups, gc.HasLen, len(metadata)) for source, expectedMetadata := range groups { - c.Assert(metadata[source], jc.SameContents, expectedMetadata) + actual := metadata[source] + if len(actual) == len(expectedMetadata) { + for i, image := range actual { + if expectedMetadata[i].DateCreated == 0 { + // Copy the creation date across as this will have been + // generated. + expectedMetadata[i].DateCreated = image.DateCreated + } + } + } + c.Assert(actual, jc.SameContents, expectedMetadata) } } @@ -319,19 +414,20 @@ Series: "trusty", Arch: arch1, VirtType: "virtType-test", + Source: "test", RootStorageType: "rootStorageType-test"} - added := cloudimagemetadata.Metadata{attrs, 0, "1"} + added := cloudimagemetadata.Metadata{attrs, 0, "1", 0} s.assertRecordMetadata(c, added) s.assertMetadataRecorded(c, attrs, added) - addedNonUnique := cloudimagemetadata.Metadata{attrs, 0, "21"} + addedNonUnique := cloudimagemetadata.Metadata{attrs, 0, "21", 0} s.assertRecordMetadata(c, addedNonUnique) s.assertMetadataRecorded(c, attrs, addedNonUnique) arch2 := "anotherArch" attrs.Arch = arch2 - added2 := cloudimagemetadata.Metadata{attrs, 0, "21"} + added2 := cloudimagemetadata.Metadata{attrs, 0, "21", 0} s.assertRecordMetadata(c, added2) s.assertMetadataRecorded(c, attrs, added2) @@ -353,9 +449,10 @@ Series: "trusty", Arch: "arch", VirtType: "virtType-test", + Source: "test", RootStorageType: "rootStorageType-test"} - added := cloudimagemetadata.Metadata{attrs, 0, "1"} + added := cloudimagemetadata.Metadata{attrs, 0, "1", 0} s.assertRecordMetadata(c, added) s.assertMetadataRecorded(c, attrs, added) @@ -376,9 +473,10 @@ Series: "trusty", Arch: "arch", VirtType: "virtType-test", + Source: "test", RootStorageType: "rootStorageType-test"} - added := cloudimagemetadata.Metadata{attrs, 0, "1"} + added := cloudimagemetadata.Metadata{attrs, 0, "1", 0} s.assertRecordMetadata(c, added) s.assertMetadataRecorded(c, attrs, added) @@ -399,9 +497,10 @@ Series: "trusty", Arch: "arch", VirtType: "virtType-test", + Source: "test", RootStorageType: "rootStorageType-test"} - added := cloudimagemetadata.Metadata{attrs, 0, "1"} + added := cloudimagemetadata.Metadata{attrs, 0, "1", 0} s.assertRecordMetadata(c, added) s.assertMetadataRecorded(c, attrs, added) @@ -457,9 +556,10 @@ Series: "trusty", Arch: "arch", VirtType: "virtType-test", + Source: "test", RootStorageType: "rootStorageType-test"} - added := cloudimagemetadata.Metadata{attrs, 0, imageId} + added := cloudimagemetadata.Metadata{attrs, 0, imageId, 0} s.assertRecordMetadata(c, added) s.assertMetadataRecorded(c, attrs, added) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudimagemetadata/interface.go juju-core-2.0.0/src/github.com/juju/juju/state/cloudimagemetadata/interface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudimagemetadata/interface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/cloudimagemetadata/interface.go 2016-10-13 14:31:49.000000000 +0000 @@ -51,6 +51,10 @@ // ImageId contains image identifier. ImageId string + + // DateCreated contains the time and date the image was created. This + // is populated when the Metadata is saved. + DateCreated int64 } // Storage provides methods for storing and retrieving cloud image metadata. @@ -71,6 +75,10 @@ // SupportedArchitectures returns collection of unique architectures // that stored metadata contains. SupportedArchitectures(criteria MetadataFilter) ([]string, error) + + // AllCloudImageMetadata returns all the cloud image metadata in the + // model. + AllCloudImageMetadata() ([]Metadata, error) } // DataStore exposes data store operations for use by the cloud image metadata package. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudimagemetadata/internal_test.go juju-core-2.0.0/src/github.com/juju/juju/state/cloudimagemetadata/internal_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/cloudimagemetadata/internal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/cloudimagemetadata/internal_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,41 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloudimagemetadata + +import ( + "github.com/juju/utils/set" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" +) + +type cloudImageMetadataSuite struct{} + +var _ = gc.Suite(&cloudImageMetadataSuite{}) + +func (s *cloudImageMetadataSuite) TestCloudImageMetadataDocFields(c *gc.C) { + ignored := set.NewStrings("Id") + migrated := set.NewStrings( + "Stream", + "Region", + "Version", + "Series", + "Arch", + "VirtType", + "RootStorageType", + "RootStorageSize", + "Source", + "Priority", + "ImageId", + "DateCreated", + ) + fields := migrated.Union(ignored) + expected := testing.GetExportedFields(imagesMetadataDoc{}) + unknown := expected.Difference(fields) + removed := fields.Difference(expected) + // If this test fails, it means that extra fields have been added to the + // doc without thinking about the migration implications. + c.Check(unknown, gc.HasLen, 0) + c.Assert(removed, gc.HasLen, 0) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/cloud_test.go juju-core-2.0.0/src/github.com/juju/juju/state/cloud_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/cloud_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/cloud_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,7 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" "github.com/juju/juju/cloud" ) @@ -17,6 +18,25 @@ var _ = gc.Suite(&CloudSuite{}) +var lowCloud = cloud.Cloud{ + Type: "low", + AuthTypes: cloud.AuthTypes{cloud.AccessKeyAuthType, cloud.UserPassAuthType}, + Endpoint: "global-endpoint", + IdentityEndpoint: "identity-endpoint", + StorageEndpoint: "storage-endpoint", + Regions: []cloud.Region{{ + Name: "region1", + Endpoint: "region1-endpoint", + IdentityEndpoint: "region1-identity", + StorageEndpoint: "region1-storage", + }, { + Name: "region2", + Endpoint: "region2-endpoint", + IdentityEndpoint: "region2-identity", + StorageEndpoint: "region2-storage", + }}, +} + func (s *CloudSuite) TestCloudNotFound(c *gc.C) { cld, err := s.State.Cloud("unknown") c.Assert(err, gc.ErrorMatches, `cloud "unknown" not found`) @@ -24,27 +44,26 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } +func (s *CloudSuite) TestClouds(c *gc.C) { + dummyCloud, err := s.State.Cloud("dummy") + c.Assert(err, jc.ErrorIsNil) + err = s.State.AddCloud("stratus", lowCloud) + c.Assert(err, jc.ErrorIsNil) + + clouds, err := s.State.Clouds() + c.Assert(err, jc.ErrorIsNil) + c.Assert(clouds, jc.DeepEquals, map[names.CloudTag]cloud.Cloud{ + names.NewCloudTag("dummy"): dummyCloud, + names.NewCloudTag("stratus"): lowCloud, + }) +} + func (s *CloudSuite) TestAddCloud(c *gc.C) { - cld := cloud.Cloud{ - Type: "low", - AuthTypes: cloud.AuthTypes{cloud.AccessKeyAuthType, cloud.UserPassAuthType}, - Endpoint: "global-endpoint", - StorageEndpoint: "global-storage", - Regions: []cloud.Region{{ - Name: "region1", - Endpoint: "region1-endpoint", - StorageEndpoint: "region1-storage", - }, { - Name: "region2", - Endpoint: "region2-endpoint", - StorageEndpoint: "region2-storage", - }}, - } - err := s.State.AddCloud("stratus", cld) + err := s.State.AddCloud("stratus", lowCloud) c.Assert(err, jc.ErrorIsNil) - cld1, err := s.State.Cloud("stratus") + cloud, err := s.State.Cloud("stratus") c.Assert(err, jc.ErrorIsNil) - c.Assert(cld1, jc.DeepEquals, cld) + c.Assert(cloud, jc.DeepEquals, lowCloud) } func (s *CloudSuite) TestAddCloudDuplicate(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/conn_test.go juju-core-2.0.0/src/github.com/juju/juju/state/conn_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/conn_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/conn_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -34,8 +34,6 @@ } func (cs *ConnSuite) SetUpTest(c *gc.C) { - c.Log("SetUpTest") - cs.policy = statetesting.MockPolicy{ GetStorageProviderRegistry: func() (storage.ProviderRegistry, error) { return dummy.StorageProviders(), nil @@ -58,8 +56,6 @@ cs.services = jujuDB.C("applications") cs.units = jujuDB.C("units") cs.controllers = jujuDB.C("controllers") - - c.Log("SetUpTest done") } func (s *ConnSuite) AddTestingCharm(c *gc.C, name string) *state.Charm { @@ -116,7 +112,7 @@ }) otherOwner := names.NewLocalUserTag("test-admin") _, otherState, err := s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", Config: cfg, Owner: otherOwner, + CloudName: "dummy", CloudRegion: "dummy-region", Config: cfg, Owner: otherOwner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/conn_wallclock_test.go juju-core-2.0.0/src/github.com/juju/juju/state/conn_wallclock_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/conn_wallclock_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/conn_wallclock_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,124 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state_test + +import ( + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + "gopkg.in/mgo.v2" + + "github.com/juju/juju/provider/dummy" + "github.com/juju/juju/state" + statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/storage" + "github.com/juju/juju/testing" +) + +// ConnWithWallClockSuite provides the infrastructure for all other test suites +// (StateSuite, CharmSuite, MachineSuite, etc). This should be deprecated in +// favour of ConnSuite, and tests updated to use the testing clock provided in +// ConnSuite via StateSuite. +type ConnWithWallClockSuite struct { + statetesting.StateWithWallClockSuite + annotations *mgo.Collection + charms *mgo.Collection + machines *mgo.Collection + instanceData *mgo.Collection + relations *mgo.Collection + services *mgo.Collection + units *mgo.Collection + controllers *mgo.Collection + policy statetesting.MockPolicy + modelTag names.ModelTag +} + +func (cs *ConnWithWallClockSuite) SetUpTest(c *gc.C) { + cs.policy = statetesting.MockPolicy{ + GetStorageProviderRegistry: func() (storage.ProviderRegistry, error) { + return dummy.StorageProviders(), nil + }, + } + cs.StateWithWallClockSuite.NewPolicy = func(*state.State) state.Policy { + return &cs.policy + } + + cs.StateWithWallClockSuite.SetUpTest(c) + + cs.modelTag = cs.State.ModelTag() + + jujuDB := cs.MgoSuite.Session.DB("juju") + cs.annotations = jujuDB.C("annotations") + cs.charms = jujuDB.C("charms") + cs.machines = jujuDB.C("machines") + cs.instanceData = jujuDB.C("instanceData") + cs.relations = jujuDB.C("relations") + cs.services = jujuDB.C("applications") + cs.units = jujuDB.C("units") + cs.controllers = jujuDB.C("controllers") +} + +func (s *ConnWithWallClockSuite) AddTestingCharm(c *gc.C, name string) *state.Charm { + return state.AddTestingCharm(c, s.State, name) +} + +func (s *ConnWithWallClockSuite) AddTestingService(c *gc.C, name string, ch *state.Charm) *state.Application { + return state.AddTestingService(c, s.State, name, ch) +} + +func (s *ConnWithWallClockSuite) AddTestingServiceWithStorage(c *gc.C, name string, ch *state.Charm, storage map[string]state.StorageConstraints) *state.Application { + return state.AddTestingServiceWithStorage(c, s.State, name, ch, storage) +} + +func (s *ConnWithWallClockSuite) AddTestingServiceWithBindings(c *gc.C, name string, ch *state.Charm, bindings map[string]string) *state.Application { + return state.AddTestingServiceWithBindings(c, s.State, name, ch, bindings) +} + +func (s *ConnWithWallClockSuite) AddSeriesCharm(c *gc.C, name, series string) *state.Charm { + return state.AddCustomCharm(c, s.State, name, "", "", series, -1) +} + +// AddConfigCharm clones a testing charm, replaces its config with +// the given YAML string and adds it to the state, using the given +// revision. +func (s *ConnWithWallClockSuite) AddConfigCharm(c *gc.C, name, configYaml string, revision int) *state.Charm { + return state.AddCustomCharm(c, s.State, name, "config.yaml", configYaml, "quantal", revision) +} + +// AddActionsCharm clones a testing charm, replaces its actions schema with +// the given YAML, and adds it to the state, using the given revision. +func (s *ConnWithWallClockSuite) AddActionsCharm(c *gc.C, name, actionsYaml string, revision int) *state.Charm { + return state.AddCustomCharm(c, s.State, name, "actions.yaml", actionsYaml, "quantal", revision) +} + +// AddMetaCharm clones a testing charm, replaces its metadata with the +// given YAML string and adds it to the state, using the given revision. +func (s *ConnWithWallClockSuite) AddMetaCharm(c *gc.C, name, metaYaml string, revision int) *state.Charm { + return state.AddCustomCharm(c, s.State, name, "metadata.yaml", metaYaml, "quantal", revision) +} + +// AddMetricsCharm clones a testing charm, replaces its metrics declaration with the +// given YAML string and adds it to the state, using the given revision. +func (s *ConnWithWallClockSuite) AddMetricsCharm(c *gc.C, name, metricsYaml string, revision int) *state.Charm { + return state.AddCustomCharm(c, s.State, name, "metrics.yaml", metricsYaml, "quantal", revision) +} + +// NewStateForModelNamed returns an new model with the given modelName, which +// has a unique UUID, and does not need to be closed when the test completes. +func (s *ConnWithWallClockSuite) NewStateForModelNamed(c *gc.C, modelName string) *state.State { + cfg := testing.CustomModelConfig(c, testing.Attrs{ + "name": modelName, + "uuid": utils.MustNewUUID().String(), + }) + otherOwner := names.NewLocalUserTag("test-admin") + _, otherState, err := s.State.NewModel(state.ModelArgs{ + CloudName: "dummy", CloudRegion: "dummy-region", Config: cfg, Owner: otherOwner, + StorageProviderRegistry: storage.StaticProviderRegistry{}, + }) + + c.Assert(err, jc.ErrorIsNil) + s.AddCleanup(func(*gc.C) { otherState.Close() }) + return otherState +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/constraintsvalidation_test.go juju-core-2.0.0/src/github.com/juju/juju/state/constraintsvalidation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/constraintsvalidation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/constraintsvalidation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -81,84 +81,84 @@ }, { about: "(implicitly) empty constraints never override explictly set fallbacks", consToSet: "", - consFallback: "arch=amd64 cpu-cores=42 mem=2G tags=foo", + consFallback: "arch=amd64 cores=42 mem=2G tags=foo", - effectiveModelCons: "arch=amd64 cpu-cores=42 mem=2G tags=foo", + effectiveModelCons: "arch=amd64 cores=42 mem=2G tags=foo", effectiveServiceCons: "", // set as given. - effectiveUnitCons: "arch=amd64 cpu-cores=42 mem=2G tags=foo", + effectiveUnitCons: "arch=amd64 cores=42 mem=2G tags=foo", // set as given, then merged with fallbacks; since consToSet is // empty, the effective values inherit everything from fallbacks; // like the unit, but only because the service constraints are // also empty. - effectiveMachineCons: "arch=amd64 cpu-cores=42 mem=2G tags=foo", + effectiveMachineCons: "arch=amd64 cores=42 mem=2G tags=foo", }, { about: "(explicitly) empty constraints are OK and stored as given", - consToSet: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces=", + consToSet: "cores= cpu-power= root-disk= instance-type= container= tags= spaces=", consFallback: "", effectiveModelCons: "", - effectiveServiceCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces=", - effectiveUnitCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces=", - effectiveMachineCons: "cpu-cores= cpu-power= instance-type= root-disk= tags= spaces=", // container= is dropped + effectiveServiceCons: "cores= cpu-power= root-disk= instance-type= container= tags= spaces=", + effectiveUnitCons: "cores= cpu-power= root-disk= instance-type= container= tags= spaces=", + effectiveMachineCons: "cores= cpu-power= instance-type= root-disk= tags= spaces=", // container= is dropped }, { about: "(explicitly) empty fallback constraints are OK and stored as given", consToSet: "", - consFallback: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces=", + consFallback: "cores= cpu-power= root-disk= instance-type= container= tags= spaces=", - effectiveModelCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces=", + effectiveModelCons: "cores= cpu-power= root-disk= instance-type= container= tags= spaces=", effectiveServiceCons: "", - effectiveUnitCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces=", - effectiveMachineCons: "cpu-cores= cpu-power= instance-type= root-disk= tags= spaces=", // container= is dropped + effectiveUnitCons: "cores= cpu-power= root-disk= instance-type= container= tags= spaces=", + effectiveMachineCons: "cores= cpu-power= instance-type= root-disk= tags= spaces=", // container= is dropped }, { about: "(explicitly) empty constraints and fallbacks are OK and stored as given", - consToSet: "arch= mem= cpu-cores= container=", - consFallback: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces=", - effectiveModelCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces=", - effectiveServiceCons: "arch= mem= cpu-cores= container=", - effectiveUnitCons: "arch= container= cpu-cores= cpu-power= mem= root-disk= tags= spaces=", - effectiveMachineCons: "arch= cpu-cores= cpu-power= mem= root-disk= tags= spaces=", // container= is dropped + consToSet: "arch= mem= cores= container=", + consFallback: "cores= cpu-power= root-disk= instance-type= container= tags= spaces=", + effectiveModelCons: "cores= cpu-power= root-disk= instance-type= container= tags= spaces=", + effectiveServiceCons: "arch= mem= cores= container=", + effectiveUnitCons: "arch= container= cores= cpu-power= mem= root-disk= tags= spaces=", + effectiveMachineCons: "arch= cores= cpu-power= mem= root-disk= tags= spaces=", // container= is dropped }, { about: "(explicitly) empty constraints override set fallbacks for deployment and provisioning", - consToSet: "cpu-cores= arch= spaces= cpu-power=", - consFallback: "cpu-cores=42 arch=amd64 tags=foo spaces=default,^dmz mem=4G", + consToSet: "cores= arch= spaces= cpu-power=", + consFallback: "cores=42 arch=amd64 tags=foo spaces=default,^dmz mem=4G", - effectiveModelCons: "cpu-cores=42 arch=amd64 tags=foo spaces=default,^dmz mem=4G", - effectiveServiceCons: "cpu-cores= arch= spaces= cpu-power=", - effectiveUnitCons: "arch= cpu-cores= cpu-power= mem=4G tags=foo spaces=", - effectiveMachineCons: "arch= cpu-cores= cpu-power= mem=4G tags=foo spaces=", + effectiveModelCons: "cores=42 arch=amd64 tags=foo spaces=default,^dmz mem=4G", + effectiveServiceCons: "cores= arch= spaces= cpu-power=", + effectiveUnitCons: "arch= cores= cpu-power= mem=4G tags=foo spaces=", + effectiveMachineCons: "arch= cores= cpu-power= mem=4G tags=foo spaces=", // we're also checking if m.SetConstraints() does the same with // regards to the effective constraints as AddMachine(), because // some of these tests proved they had different behavior (i.e. // container= was not set to empty) }, { about: "non-empty constraints always override empty or unset fallbacks", - consToSet: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar", - consFallback: "cpu-cores= arch= tags=", + consToSet: "cores=42 root-disk=20G arch=amd64 tags=foo,bar", + consFallback: "cores= arch= tags=", - effectiveModelCons: "cpu-cores= arch= tags=", - effectiveServiceCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar", - effectiveUnitCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar", - effectiveMachineCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar", + effectiveModelCons: "cores= arch= tags=", + effectiveServiceCons: "cores=42 root-disk=20G arch=amd64 tags=foo,bar", + effectiveUnitCons: "cores=42 root-disk=20G arch=amd64 tags=foo,bar", + effectiveMachineCons: "cores=42 root-disk=20G arch=amd64 tags=foo,bar", }, { about: "non-empty constraints always override set fallbacks", - consToSet: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar", - consFallback: "cpu-cores=12 root-disk=10G arch=i386 tags=bar", + consToSet: "cores=42 root-disk=20G arch=amd64 tags=foo,bar", + consFallback: "cores=12 root-disk=10G arch=i386 tags=bar", - effectiveModelCons: "cpu-cores=12 root-disk=10G arch=i386 tags=bar", - effectiveServiceCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar", - effectiveUnitCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar", - effectiveMachineCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar", + effectiveModelCons: "cores=12 root-disk=10G arch=i386 tags=bar", + effectiveServiceCons: "cores=42 root-disk=20G arch=amd64 tags=foo,bar", + effectiveUnitCons: "cores=42 root-disk=20G arch=amd64 tags=foo,bar", + effectiveMachineCons: "cores=42 root-disk=20G arch=amd64 tags=foo,bar", }, { about: "non-empty constraints override conflicting set fallbacks", - consToSet: "mem=8G arch=amd64 cpu-cores=4 tags=bar", + consToSet: "mem=8G arch=amd64 cores=4 tags=bar", consFallback: "instance-type=small cpu-power=1000", // instance-type conflicts mem, arch effectiveModelCons: "instance-type=small cpu-power=1000", - effectiveServiceCons: "mem=8G arch=amd64 cpu-cores=4 tags=bar", + effectiveServiceCons: "mem=8G arch=amd64 cores=4 tags=bar", // both of the following contain the explicitly set constraints after // resolving any conflicts with fallbacks (by dropping them). - effectiveUnitCons: "mem=8G arch=amd64 cpu-cores=4 tags=bar cpu-power=1000", - effectiveMachineCons: "mem=8G arch=amd64 cpu-cores=4 tags=bar cpu-power=1000", + effectiveUnitCons: "mem=8G arch=amd64 cores=4 tags=bar cpu-power=1000", + effectiveMachineCons: "mem=8G arch=amd64 cores=4 tags=bar cpu-power=1000", }, { about: "set fallbacks are overriden the same way for provisioning and deployment", consToSet: "tags= cpu-power= spaces=bar", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/controller.go juju-core-2.0.0/src/github.com/juju/juju/state/controller.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/controller.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/controller.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,8 @@ package state import ( + "fmt" + "github.com/juju/errors" jujucontroller "github.com/juju/juju/controller" @@ -17,6 +19,12 @@ controllerGlobalKey = "c" ) +// controllerKey will return the key for a given controller using the +// controller uuid and the controllerGlobalKey. +func controllerKey(controllerUUID string) string { + return fmt.Sprintf("%s#%s", controllerGlobalKey, controllerUUID) +} + // ControllerConfig returns the config values for the controller. func (st *State) ControllerConfig() (jujucontroller.Config, error) { settings, err := readSettings(st, controllersC, controllerSettingsGlobalKey) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/controller_test.go juju-core-2.0.0/src/github.com/juju/juju/state/controller_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/controller_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/controller_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -23,12 +23,16 @@ controllerSettings, err := s.State.ReadSettings(state.ControllersC, "controllerSettings") c.Assert(err, jc.ErrorIsNil) - optional := func(attr string) bool { - return attr == controller.IdentityURL || attr == controller.IdentityPublicKey + optional := map[string]bool{ + controller.IdentityURL: true, + controller.IdentityPublicKey: true, + controller.AutocertURLKey: true, + controller.AutocertDNSNameKey: true, + controller.AllowModelAccessKey: true, } for _, controllerAttr := range controller.ControllerOnlyConfigAttributes { v, ok := controllerSettings.Get(controllerAttr) - if !optional(controllerAttr) { + if !optional[controllerAttr] { c.Assert(ok, jc.IsTrue) c.Assert(v, gc.Not(gc.Equals), "") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/controlleruser.go juju-core-2.0.0/src/github.com/juju/juju/state/controlleruser.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/controlleruser.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/controlleruser.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,20 +9,22 @@ "time" "github.com/juju/errors" - "github.com/juju/juju/core/description" "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/txn" + + "github.com/juju/juju/permission" ) -const defaultControllerPermission = description.LoginAccess +const defaultControllerPermission = permission.LoginAccess // setAccess changes the user's access permissions on the controller. -func (st *State) setControllerAccess(access description.Access, userGlobalKey string) error { - if err := access.Validate(); err != nil { +func (st *State) setControllerAccess(access permission.Access, userGlobalKey string) error { + if err := permission.ValidateControllerAccess(access); err != nil { return errors.Trace(err) } - op := updatePermissionOp(controllerGlobalKey, userGlobalKey, access) + op := updatePermissionOp(controllerKey(st.ControllerUUID()), userGlobalKey, access) + err := st.runTransaction([]txn.Op{op}) if err == txn.ErrAborted { return errors.NotFoundf("existing permissions") @@ -36,10 +38,10 @@ controllerUsers, closer := st.getCollection(controllerUsersC) defer closer() - username := strings.ToLower(user.Canonical()) + username := strings.ToLower(user.Id()) err := controllerUsers.FindId(username).One(&controllerUser) if err == mgo.ErrNotFound { - return userAccessDoc{}, errors.NotFoundf("controller user %q", user.Canonical()) + return userAccessDoc{}, errors.NotFoundf("controller user %q", user.Id()) } // DateCreated is inserted as UTC, but read out as local time. So we // convert it back to UTC here. @@ -47,18 +49,18 @@ return controllerUser, nil } -func createControllerUserOps(controllerUUID string, user, createdBy names.UserTag, displayName string, dateCreated time.Time, access description.Access) []txn.Op { - creatorname := createdBy.Canonical() +func createControllerUserOps(controllerUUID string, user, createdBy names.UserTag, displayName string, dateCreated time.Time, access permission.Access) []txn.Op { + creatorname := createdBy.Id() doc := &userAccessDoc{ ID: userAccessID(user), ObjectUUID: controllerUUID, - UserName: user.Canonical(), + UserName: user.Id(), DisplayName: displayName, CreatedBy: creatorname, DateCreated: dateCreated, } ops := []txn.Op{ - createPermissionOp(controllerGlobalKey, userGlobalKey(userAccessID(user)), access), + createPermissionOp(controllerKey(controllerUUID), userGlobalKey(userAccessID(user)), access), { C: controllerUsersC, Id: userAccessID(user), @@ -69,10 +71,9 @@ return ops } -// RemoveControllerUser removes a user from the database. -func (st *State) removeControllerUser(user names.UserTag) error { - ops := []txn.Op{ - removePermissionOp(controllerGlobalKey, userGlobalKey(userAccessID(user))), +func removeControllerUserOps(controllerUUID string, user names.UserTag) []txn.Op { + return []txn.Op{ + removePermissionOp(controllerKey(controllerUUID), userGlobalKey(userAccessID(user))), { C: controllerUsersC, Id: userAccessID(user), @@ -80,9 +81,14 @@ Remove: true, }} +} + +// RemoveControllerUser removes a user from the database. +func (st *State) removeControllerUser(user names.UserTag) error { + ops := removeControllerUserOps(st.ControllerUUID(), user) err := st.runTransaction(ops) if err == txn.ErrAborted { - err = errors.NewNotFound(nil, fmt.Sprintf("controller user %q does not exist", user.Canonical())) + err = errors.NewNotFound(nil, fmt.Sprintf("controller user %q does not exist", user.Id())) } if err != nil { return errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/controlleruser_test.go juju-core-2.0.0/src/github.com/juju/juju/state/controlleruser_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/controlleruser_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/controlleruser_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" "github.com/juju/juju/testing/factory" ) @@ -20,7 +20,7 @@ var _ = gc.Suite(&ControllerUserSuite{}) type accessAwareUser interface { - Access() description.Access + Access() permission.Access } func (s *ControllerUserSuite) TestDefaultAccessControllerUser(c *gc.C) { @@ -34,7 +34,7 @@ ctag := names.NewControllerTag(s.State.ControllerUUID()) controllerUser, err := s.State.UserAccess(userTag, ctag) c.Assert(err, jc.ErrorIsNil) - c.Assert(controllerUser.Access, gc.Equals, description.LoginAccess) + c.Assert(controllerUser.Access, gc.Equals, permission.LoginAccess) } func (s *ControllerUserSuite) TestSetAccessControllerUser(c *gc.C) { @@ -48,12 +48,12 @@ ctag := names.NewControllerTag(s.State.ControllerUUID()) controllerUser, err := s.State.UserAccess(userTag, ctag) c.Assert(err, jc.ErrorIsNil) - c.Assert(controllerUser.Access, gc.Equals, description.LoginAccess) + c.Assert(controllerUser.Access, gc.Equals, permission.LoginAccess) - s.State.SetUserAccess(userTag, ctag, description.AddModelAccess) + s.State.SetUserAccess(userTag, ctag, permission.AddModelAccess) controllerUser, err = s.State.UserAccess(user.UserTag(), ctag) - c.Assert(controllerUser.Access, gc.Equals, description.AddModelAccess) + c.Assert(controllerUser.Access, gc.Equals, permission.AddModelAccess) } func (s *ControllerUserSuite) TestRemoveControllerUser(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/database.go juju-core-2.0.0/src/github.com/juju/juju/state/database.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/database.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/database.go 2016-10-13 14:31:49.000000000 +0000 @@ -22,13 +22,20 @@ // Database exposes the mongodb capabilities that most of state should see. type Database interface { - // CopySession returns a matching Database with its own session, and a + // Copy returns a matching Database with its own session, and a // func that must be called when the Database is no longer needed. // // GetCollection and TransactionRunner results from the resulting Database // will all share a session; this does not absolve you of responsibility // for calling those collections' closers. - CopySession() (Database, SessionCloser) + Copy() (Database, SessionCloser) + + // CopyForModel returns a matching Database with its own session and + // its own modelUUID and a func that must be called when the Database is no + // longer needed. + // + // Same warnings apply for CopyForModel than for Copy. + CopyForModel(modelUUID string) (Database, SessionCloser) // GetCollection returns the named Collection, and a func that must be // called when the Collection is no longer needed. The returned Collection @@ -73,7 +80,7 @@ // Apply runs the supplied Change against the supplied Database. If it // returns no error, the change succeeded. func Apply(db Database, change Change) error { - db, closer := db.CopySession() + db, closer := db.Copy() defer closer() buildTxn := func(int) ([]txn.Op, error) { @@ -191,20 +198,30 @@ runner jujutxn.Runner // ownSession is used to avoid copying additional sessions in a database - // resulting from CopySession. + // resulting from Copy. ownSession bool } -// CopySession is part of the Database interface. -func (db *database) CopySession() (Database, SessionCloser) { +func (db *database) copySession(modelUUID string) (*database, SessionCloser) { session := db.raw.Session.Copy() return &database{ raw: db.raw.With(session), schema: db.schema, - modelUUID: db.modelUUID, + modelUUID: modelUUID, runner: db.runner, ownSession: true, }, session.Close + +} + +// Copy is part of the Database interface. +func (db *database) Copy() (Database, SessionCloser) { + return db.copySession(db.modelUUID) +} + +// CopyForModel is part of the Database interface. +func (db *database) CopyForModel(modelUUID string) (Database, SessionCloser) { + return db.copySession(modelUUID) } // GetCollection is part of the Database interface. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/dump.go juju-core-2.0.0/src/github.com/juju/juju/state/dump.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/dump.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/dump.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,65 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import "github.com/juju/errors" + +// DumpAll returns a map of collection names to a slice of documents +// in that collection. Every document that is related to the current +// model is returned in the map. +func (st *State) DumpAll() (map[string]interface{}, error) { + result := make(map[string]interface{}) + // Add in the model document itself. + doc, err := getModelDoc(st) + if err != nil { + return nil, err + } + result[modelsC] = doc + for name, info := range allCollections() { + if !info.global { + docs, err := getAllModelDocs(st, name) + if err != nil { + return nil, errors.Trace(err) + } + if len(docs) > 0 { + result[name] = docs + } + } + } + return result, nil +} + +func getModelDoc(st *State) (map[string]interface{}, error) { + coll, closer := st.getCollection(modelsC) + defer closer() + + var doc map[string]interface{} + if err := coll.FindId(st.ModelUUID()).One(&doc); err != nil { + return nil, errors.Annotatef(err, "reading model %q", st.ModelUUID()) + } + return doc, nil + +} + +func getAllModelDocs(st *State, collectionName string) ([]map[string]interface{}, error) { + coll, closer := st.getCollection(collectionName) + defer closer() + + var ( + result []map[string]interface{} + doc map[string]interface{} + ) + // Always output in id order. + iter := coll.Find(nil).Sort("_id").Iter() + defer iter.Close() + for iter.Next(&doc) { + result = append(result, doc) + doc = nil + } + + if err := iter.Err(); err != nil { + return nil, errors.Annotatef(err, "reading collection %q", collectionName) + } + return result, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/dump_test.go juju-core-2.0.0/src/github.com/juju/juju/state/dump_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/dump_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/dump_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,34 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state_test + +import ( + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/set" + gc "gopkg.in/check.v1" +) + +type dumpSuite struct { + ConnSuite +} + +var _ = gc.Suite(&dumpSuite{}) + +func (s *dumpSuite) TestDumpAll(c *gc.C) { + value, err := s.State.DumpAll() + c.Assert(err, jc.ErrorIsNil) + + models, ok := value["models"].(map[string]interface{}) + c.Assert(ok, jc.IsTrue) + c.Assert(models["name"], gc.Equals, "testenv") + + initialCollections := set.NewStrings() + for name := range value { + initialCollections.Add(name) + } + // check that there are some other collections there + c.Check(initialCollections.Contains("modelusers"), jc.IsTrue) + c.Check(initialCollections.Contains("leases"), jc.IsTrue) + c.Check(initialCollections.Contains("statuses"), jc.IsTrue) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/export_test.go juju-core-2.0.0/src/github.com/juju/juju/state/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,10 +7,11 @@ "fmt" "io/ioutil" "path/filepath" - "time" + "time" // Only used for time types. "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" jujutxn "github.com/juju/txn" txntesting "github.com/juju/txn/testing" @@ -21,12 +22,15 @@ "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" - "github.com/juju/juju/core/description" "github.com/juju/juju/core/lease" "github.com/juju/juju/mongo" + "github.com/juju/juju/mongo/utils" "github.com/juju/juju/network" + "github.com/juju/juju/permission" + "github.com/juju/juju/status" "github.com/juju/juju/testcharms" "github.com/juju/juju/version" + "github.com/juju/juju/worker" ) const ( @@ -49,7 +53,6 @@ ControllerAvailable = &controllerAvailable GetOrCreatePorts = getOrCreatePorts GetPorts = getPorts - NowToTheSecond = nowToTheSecond AddVolumeOps = (*State).addVolumeOps CombineMeterStatus = combineMeterStatus ApplicationGlobalKey = applicationGlobalKey @@ -105,16 +108,12 @@ return m.String() } -func ServiceSettingsRefCount(st *State, applicationname string, curl *charm.URL) (int, error) { - settingsRefsCollection, closer := st.getCollection(settingsrefsC) +func ServiceSettingsRefCount(st *State, appName string, curl *charm.URL) (int, error) { + refcounts, closer := st.getCollection(refcountsC) defer closer() - key := applicationSettingsKey(applicationname, curl) - var doc settingsRefsDoc - if err := settingsRefsCollection.FindId(key).One(&doc); err == nil { - return doc.RefCount, nil - } - return 0, mgo.ErrNotFound + key := applicationSettingsKey(appName, curl) + return nsRefcounts.read(refcounts, key) } func AddTestingCharm(c *gc.C, st *State, name string) *Charm { @@ -460,7 +459,7 @@ var ActionNotificationIdToActionId = actionNotificationIdToActionId -func UpdateModelUserLastConnection(st *State, e description.UserAccess, when time.Time) error { +func UpdateModelUserLastConnection(st *State, e permission.UserAccess, when time.Time) error { return st.updateLastModelConnection(e.UserTag, when) } @@ -492,3 +491,75 @@ } return client.Leases(), nil } + +func StorageAttachmentCount(instance StorageInstance) int { + internal, ok := instance.(*storageInstance) + if !ok { + return -1 + } + return internal.doc.AttachmentCount +} + +func ResetMigrationMode(c *gc.C, st *State) { + ops := []txn.Op{{ + C: modelsC, + Id: st.ModelUUID(), + Assert: txn.DocExists, + Update: bson.M{ + "$set": bson.M{"migration-mode": MigrationModeNone}, + }, + }} + err := st.runTransaction(ops) + c.Assert(err, jc.ErrorIsNil) +} + +// PrimeUnitStatusHistory will add count history elements, advancing the test clock by +// one second for each entry. +func PrimeUnitStatusHistory( + c *gc.C, clock *testing.Clock, + unit *Unit, statusVal status.Status, + count, batchSize int, + nextData func(int) map[string]interface{}, +) { + globalKey := unit.globalKey() + + history, closer := unit.st.getCollection(statusesHistoryC) + defer closer() + historyW := history.Writeable() + + var data map[string]interface{} + for i := 0; i < count; { + var docs []interface{} + for j := 0; j < batchSize && i < count; j++ { + clock.Advance(time.Second) + if nextData != nil { + data = utils.EscapeKeys(nextData(i)) + } + docs = append(docs, &historicalStatusDoc{ + Status: statusVal, + StatusData: data, + Updated: clock.Now().UnixNano(), + GlobalKey: globalKey, + }) + // Seems like you can't increment two values in one loop + i++ + } + err := historyW.Insert(docs...) + c.Assert(err, jc.ErrorIsNil) + } + // Set the status for the unit itself. + doc := statusDoc{ + Status: statusVal, + StatusData: data, + Updated: clock.Now().UnixNano(), + } + buildTxn := updateStatusSource(unit.st, globalKey, doc) + err := unit.st.run(buildTxn) + c.Assert(err, jc.ErrorIsNil) +} + +// GetInternalWorkers returns the internal workers managed by a State +// to allow inspection in tests. +func GetInternalWorkers(st *State) worker.Worker { + return st.workers +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/filesystem.go juju-core-2.0.0/src/github.com/juju/juju/state/filesystem.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/filesystem.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/filesystem.go 2016-10-13 14:31:49.000000000 +0000 @@ -614,7 +614,7 @@ }} } hasAttachments := bson.D{{"attachmentcount", bson.D{{"$gt", 0}}}} - cleanupOp := st.newCleanupOp(cleanupAttachmentsForDyingFilesystem, f.doc.FilesystemId) + cleanupOp := newCleanupOp(cleanupAttachmentsForDyingFilesystem, f.doc.FilesystemId) return []txn.Op{{ C: filesystemsC, Id: f.doc.FilesystemId, @@ -754,9 +754,8 @@ } status := statusDoc{ - Status: status.StatusPending, - // TODO(fwereade): 2016-03-17 lp:1558657 - Updated: time.Now().UnixNano(), + Status: status.Pending, + Updated: st.clock.Now().UnixNano(), } doc := filesystemDoc{ FilesystemId: filesystemId, @@ -1147,12 +1146,12 @@ // SetFilesystemStatus sets the status of the specified filesystem. func (st *State) SetFilesystemStatus(tag names.FilesystemTag, fsStatus status.Status, info string, data map[string]interface{}, updated *time.Time) error { switch fsStatus { - case status.StatusAttaching, status.StatusAttached, status.StatusDetaching, status.StatusDetached, status.StatusDestroying: - case status.StatusError: + case status.Attaching, status.Attached, status.Detaching, status.Detached, status.Destroying: + case status.Error: if info == "" { return errors.Errorf("cannot set status %q without info", fsStatus) } - case status.StatusPending: + case status.Pending: // If a filesystem is not yet provisioned, we allow its status // to be set back to pending (when a retry is to occur). v, err := st.Filesystem(tag) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/imagestorage/image_test.go juju-core-2.0.0/src/github.com/juju/juju/state/imagestorage/image_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/imagestorage/image_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/imagestorage/image_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ "io/ioutil" "strings" stdtesting "testing" - "time" + "time" // Only used for time types. "github.com/juju/errors" gitjujutesting "github.com/juju/testing" @@ -76,8 +76,11 @@ func checkMetadata(c *gc.C, fromDb, metadata *imagestorage.Metadata) { c.Assert(fromDb.Created.IsZero(), jc.IsFalse) + // We don't want Now() here, we want NonZeroTime().Add(...). Before + // that can happen, we need to look at AddImage for its Created + // timestamp. c.Assert(fromDb.Created.Before(time.Now()), jc.IsTrue) - fromDb.Created = time.Time{} + fromDb.Created = testing.ZeroTime() c.Assert(metadata, gc.DeepEquals, fromDb) } @@ -406,7 +409,7 @@ Size: size, SHA256: checksum, Path: path, - Created: time.Now(), + Created: testing.NonZeroTime(), SourceURL: sourceURL, } err := s.metadataCollection.Insert(&doc) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/initialize_test.go juju-core-2.0.0/src/github.com/juju/juju/state/initialize_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/initialize_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/initialize_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,12 +6,14 @@ import ( gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" "github.com/juju/juju/cloud" "github.com/juju/juju/constraints" "github.com/juju/juju/controller" + "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/mongo/mongotest" "github.com/juju/juju/state" @@ -46,6 +48,7 @@ func (s *InitializeSuite) openState(c *gc.C, modelTag names.ModelTag) { st, err := state.Open( modelTag, + testing.ControllerTag, statetesting.NewMongoInfo(), mongotest.DialOpts(), state.NewPolicyFunc(nil), @@ -68,6 +71,12 @@ uuid := cfg.UUID() owner := names.NewLocalUserTag("initialize-admin") + userPassCredentialTag := names.NewCloudCredentialTag( + "dummy/" + owner.Id() + "/some-credential", + ) + emptyCredentialTag := names.NewCloudCredentialTag( + "dummy/" + owner.Id() + "/empty-credential", + ) userpassCredential := cloud.NewCredential( cloud.UserPassAuthType, map[string]string{ @@ -75,24 +84,24 @@ "password": "hunter2", }, ) - userpassCredential.Label = "some-credential" + userpassCredential.Label = userPassCredentialTag.Name() emptyCredential := cloud.NewEmptyCredential() - emptyCredential.Label = "empty-credential" - cloudCredentialsIn := map[string]cloud.Credential{ - userpassCredential.Label: userpassCredential, - emptyCredential.Label: emptyCredential, + emptyCredential.Label = emptyCredentialTag.Name() + cloudCredentialsIn := map[names.CloudCredentialTag]cloud.Credential{ + userPassCredentialTag: userpassCredential, + emptyCredentialTag: emptyCredential, } controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = uuid st, err := state.Initialize(state.InitializeParams{ + Clock: clock.WallClock, ControllerConfig: controllerCfg, ControllerModelArgs: state.ModelArgs{ Owner: owner, Config: cfg, CloudName: "dummy", - CloudRegion: "some-region", - CloudCredential: "some-credential", + CloudRegion: "dummy-region", + CloudCredential: userPassCredentialTag, StorageProviderRegistry: storage.StaticProviderRegistry{}, }, CloudName: "dummy", @@ -101,7 +110,7 @@ AuthTypes: []cloud.AuthType{ cloud.EmptyAuthType, cloud.UserPassAuthType, }, - Regions: []cloud.Region{{Name: "some-region"}}, + Regions: []cloud.Region{{Name: "dummy-region"}}, }, CloudCredentials: cloudCredentialsIn, MongoInfo: statetesting.NewMongoInfo(), @@ -129,7 +138,7 @@ model, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) c.Assert(model.Tag(), gc.Equals, modelTag) - c.Assert(model.CloudRegion(), gc.Equals, "some-region") + c.Assert(model.CloudRegion(), gc.Equals, "dummy-region") // Check that the owner has been created. c.Assert(model.Owner(), gc.Equals, owner) // Check that the owner can be retrieved by the tag. @@ -160,18 +169,25 @@ // Check that the model's cloud and credential names are as // expected, and the owner's cloud credentials are initialised. c.Assert(model.Cloud(), gc.Equals, "dummy") - c.Assert(model.CloudCredential(), gc.Equals, "some-credential") + credentialTag, ok := model.CloudCredential() + c.Assert(ok, jc.IsTrue) + c.Assert(credentialTag, gc.Equals, userPassCredentialTag) cloudCredentials, err := s.State.CloudCredentials(model.Owner(), "dummy") c.Assert(err, jc.ErrorIsNil) - c.Assert(cloudCredentials, jc.DeepEquals, cloudCredentialsIn) + expectedCred := make(map[string]cloud.Credential, len(cloudCredentialsIn)) + for tag, cred := range cloudCredentialsIn { + expectedCred[tag.Id()] = cred + } + c.Assert(cloudCredentials, jc.DeepEquals, expectedCred) } func (s *InitializeSuite) TestInitializeWithInvalidCredentialType(c *gc.C) { owner := names.NewLocalUserTag("initialize-admin") modelCfg := testing.ModelConfig(c) controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = modelCfg.UUID() + credentialTag := names.NewCloudCredentialTag("dummy/" + owner.Id() + "/borken") _, err := state.Initialize(state.InitializeParams{ + Clock: clock.WallClock, ControllerConfig: controllerCfg, ControllerModelArgs: state.ModelArgs{ CloudName: "dummy", @@ -186,14 +202,14 @@ cloud.AccessKeyAuthType, cloud.OAuth1AuthType, }, }, - CloudCredentials: map[string]cloud.Credential{ - "borken": cloud.NewCredential(cloud.UserPassAuthType, nil), + CloudCredentials: map[names.CloudCredentialTag]cloud.Credential{ + credentialTag: cloud.NewCredential(cloud.UserPassAuthType, nil), }, MongoInfo: statetesting.NewMongoInfo(), MongoDialOpts: mongotest.DialOpts(), }) c.Assert(err, gc.ErrorMatches, - `validating initialization args: validating cloud credentials: credential "borken" with auth-type "userpass" is not supported \(expected one of \["access-key" "oauth1"\]\)`, + `validating initialization args: validating cloud credentials: credential "dummy/initialize-admin/borken" with auth-type "userpass" is not supported \(expected one of \["access-key" "oauth1"\]\)`, ) } @@ -206,9 +222,9 @@ } owner := names.NewLocalUserTag("initialize-admin") controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = uuid st, err := state.Initialize(state.InitializeParams{ + Clock: clock.WallClock, ControllerConfig: controllerCfg, ControllerModelArgs: state.ModelArgs{ CloudName: "dummy", @@ -258,9 +274,9 @@ mgoInfo := statetesting.NewMongoInfo() dialOpts := mongotest.DialOpts() controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = cfg.UUID() args := state.InitializeParams{ + Clock: clock.WallClock, ControllerConfig: controllerCfg, ControllerModelArgs: state.ModelArgs{ CloudName: "dummy", @@ -316,12 +332,13 @@ owner := names.NewLocalUserTag("initialize-admin") controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = good.UUID() args := state.InitializeParams{ + Clock: clock.WallClock, ControllerConfig: controllerCfg, ControllerModelArgs: state.ModelArgs{ CloudName: "dummy", + CloudRegion: "dummy-region", Owner: owner, Config: bad, StorageProviderRegistry: storage.StaticProviderRegistry{}, @@ -330,6 +347,7 @@ Cloud: cloud.Cloud{ Type: "dummy", AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, + Regions: []cloud.Region{{Name: "dummy-region"}}, }, MongoInfo: statetesting.NewMongoInfo(), MongoDialOpts: mongotest.DialOpts(), @@ -366,8 +384,8 @@ modelCfg := testing.ModelConfig(c) controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = modelCfg.UUID() args := state.InitializeParams{ + Clock: clock.WallClock, ControllerConfig: controllerCfg, ControllerModelArgs: state.ModelArgs{ CloudName: "dummy", @@ -391,3 +409,168 @@ c.Assert(err, gc.ErrorMatches, "local cloud config cannot contain .*") } } + +func (s *InitializeSuite) TestInitializeWithCloudRegionConfig(c *gc.C) { + cfg := testing.ModelConfig(c) + uuid := cfg.UUID() + + // Phony region-config + regionInheritedConfigIn := cloud.RegionConfig{ + "a-region": cloud.Attrs{ + "a-key": "a-value", + }, + "b-region": cloud.Attrs{ + "b-key": "b-value", + }, + } + owner := names.NewLocalUserTag("initialize-admin") + controllerCfg := testing.FakeControllerConfig() + + st, err := state.Initialize(state.InitializeParams{ + Clock: clock.WallClock, + ControllerConfig: controllerCfg, + ControllerModelArgs: state.ModelArgs{ + CloudName: "dummy", + Owner: owner, + Config: cfg, + StorageProviderRegistry: storage.StaticProviderRegistry{}, + }, + CloudName: "dummy", + Cloud: cloud.Cloud{ + Type: "dummy", + AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, + RegionConfig: regionInheritedConfigIn, // Init with phony region-config + }, + MongoInfo: statetesting.NewMongoInfo(), + MongoDialOpts: mongotest.DialOpts(), + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(st, gc.NotNil) + modelTag := st.ModelTag() + c.Assert(modelTag.Id(), gc.Equals, uuid) + err = st.Close() + c.Assert(err, jc.ErrorIsNil) + + s.openState(c, modelTag) + + for k := range regionInheritedConfigIn { + // Query for config for each region + regionInheritedConfig, err := state.ReadSettings( + s.State, state.GlobalSettingsC, + "dummy#"+k) + c.Assert(err, jc.ErrorIsNil) + c.Assert( + cloud.Attrs(regionInheritedConfig.Map()), + jc.DeepEquals, + regionInheritedConfigIn[k]) + } +} + +func (s *InitializeSuite) TestInitializeWithCloudRegionMisses(c *gc.C) { + cfg := testing.ModelConfig(c) + uuid := cfg.UUID() + controllerInheritedConfigIn := map[string]interface{}{ + "no-proxy": "local", + } + // Phony region-config + regionInheritedConfigIn := cloud.RegionConfig{ + "a-region": cloud.Attrs{ + "no-proxy": "a-value", + }, + "b-region": cloud.Attrs{ + "no-proxy": "b-value", + }, + } + owner := names.NewLocalUserTag("initialize-admin") + controllerCfg := testing.FakeControllerConfig() + + st, err := state.Initialize(state.InitializeParams{ + Clock: clock.WallClock, + ControllerConfig: controllerCfg, + ControllerModelArgs: state.ModelArgs{ + CloudName: "dummy", + Owner: owner, + Config: cfg, + StorageProviderRegistry: storage.StaticProviderRegistry{}, + }, + CloudName: "dummy", + Cloud: cloud.Cloud{ + Type: "dummy", + AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, + RegionConfig: regionInheritedConfigIn, // Init with phony region-config + }, + ControllerInheritedConfig: controllerInheritedConfigIn, + MongoInfo: statetesting.NewMongoInfo(), + MongoDialOpts: mongotest.DialOpts(), + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(st, gc.NotNil) + modelTag := st.ModelTag() + c.Assert(modelTag.Id(), gc.Equals, uuid) + err = st.Close() + c.Assert(err, jc.ErrorIsNil) + + s.openState(c, modelTag) + + var attrs map[string]interface{} + rspec := &environs.RegionSpec{Cloud: "dummy", Region: "c-region"} + got, err := s.State.ComposeNewModelConfig(attrs, rspec) + c.Check(err, jc.ErrorIsNil) + c.Assert(got["no-proxy"], gc.Equals, "local") +} + +func (s *InitializeSuite) TestInitializeWithCloudRegionHits(c *gc.C) { + cfg := testing.ModelConfig(c) + uuid := cfg.UUID() + + controllerInheritedConfigIn := map[string]interface{}{ + "no-proxy": "local", + } + // Phony region-config + regionInheritedConfigIn := cloud.RegionConfig{ + "a-region": cloud.Attrs{ + "no-proxy": "a-value", + }, + "b-region": cloud.Attrs{ + "no-proxy": "b-value", + }, + } + owner := names.NewLocalUserTag("initialize-admin") + controllerCfg := testing.FakeControllerConfig() + + st, err := state.Initialize(state.InitializeParams{ + Clock: clock.WallClock, + ControllerConfig: controllerCfg, + ControllerModelArgs: state.ModelArgs{ + CloudName: "dummy", + Owner: owner, + Config: cfg, + StorageProviderRegistry: storage.StaticProviderRegistry{}, + }, + CloudName: "dummy", + Cloud: cloud.Cloud{ + Type: "dummy", + AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, + RegionConfig: regionInheritedConfigIn, // Init with phony region-config + }, + ControllerInheritedConfig: controllerInheritedConfigIn, + MongoInfo: statetesting.NewMongoInfo(), + MongoDialOpts: mongotest.DialOpts(), + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(st, gc.NotNil) + modelTag := st.ModelTag() + c.Assert(modelTag.Id(), gc.Equals, uuid) + err = st.Close() + c.Assert(err, jc.ErrorIsNil) + + s.openState(c, modelTag) + + var attrs map[string]interface{} + for r := range regionInheritedConfigIn { + rspec := &environs.RegionSpec{Cloud: "dummy", Region: r} + got, err := s.State.ComposeNewModelConfig(attrs, rspec) + c.Check(err, jc.ErrorIsNil) + c.Assert(got["no-proxy"], gc.Equals, regionInheritedConfigIn[r]["no-proxy"]) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/interface.go juju-core-2.0.0/src/github.com/juju/juju/state/interface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/interface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/interface.go 2016-10-13 14:31:49.000000000 +0000 @@ -103,7 +103,8 @@ // about clouds and credentials. type CloudAccessor interface { Cloud(cloud string) (cloud.Cloud, error) - CloudCredentials(user names.UserTag, cloud string) (map[string]cloud.Credential, error) + Clouds() (map[names.CloudTag]cloud.Cloud, error) + CloudCredential(tag names.CloudCredentialTag) (cloud.Credential, error) } // ModelAccessor defines the methods needed to watch for model diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/internal/audit/audit_test.go juju-core-2.0.0/src/github.com/juju/juju/state/internal/audit/audit_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/internal/audit/audit_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/internal/audit/audit_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,11 +4,10 @@ package audit_test import ( - "time" - "github.com/juju/errors" "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils" "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2/bson" @@ -16,7 +15,7 @@ "github.com/juju/juju/audit" mongoutils "github.com/juju/juju/mongo/utils" stateaudit "github.com/juju/juju/state/internal/audit" - "github.com/juju/utils" + coretesting "github.com/juju/juju/testing" ) type AuditSuite struct { @@ -30,7 +29,7 @@ requested := audit.AuditEntry{ JujuServerVersion: version.MustParse("1.0.0"), ModelUUID: utils.MustNewUUID().String(), - Timestamp: time.Now().UTC(), + Timestamp: coretesting.NonZeroTime().UTC(), RemoteAddress: "8.8.8.8", OriginType: "user", OriginName: "bob", @@ -89,7 +88,7 @@ auditEntry := audit.AuditEntry{ JujuServerVersion: version.MustParse("1.0.0"), ModelUUID: uuid.String(), - Timestamp: time.Now().UTC(), + Timestamp: coretesting.NonZeroTime().UTC(), RemoteAddress: "8.8.8.8", OriginType: "user", OriginName: "bob", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/internal_test.go juju-core-2.0.0/src/github.com/juju/juju/state/internal_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/internal_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/internal_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,7 @@ "github.com/juju/errors" jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" @@ -57,11 +58,12 @@ } modelCfg := testing.ModelConfig(c) controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = modelCfg.UUID() st, err := Initialize(InitializeParams{ + Clock: clock.WallClock, ControllerConfig: controllerCfg, ControllerModelArgs: ModelArgs{ CloudName: "dummy", + CloudRegion: "dummy-region", Owner: s.owner, Config: modelCfg, StorageProviderRegistry: provider.CommonStorageProviders(), @@ -70,6 +72,11 @@ Cloud: cloud.Cloud{ Type: "dummy", AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, + Regions: []cloud.Region{ + cloud.Region{ + Name: "dummy-region", + }, + }, }, MongoInfo: info, MongoDialOpts: mongotest.DialOpts(), @@ -108,3 +115,7 @@ func (internalStatePolicy) StorageProviderRegistry() (storage.ProviderRegistry, error) { return provider.CommonStorageProviders(), nil } + +func (internalStatePolicy) ProviderConfigSchemaSource() (config.ConfigSchemaSource, error) { + return nil, errors.NotImplementedf("ConfigSchemaSource") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/leadership.go juju-core-2.0.0/src/github.com/juju/juju/state/leadership.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/leadership.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/leadership.go 2016-10-13 14:31:49.000000000 +0000 @@ -37,25 +37,6 @@ return leadershipChecker{st.workers.LeadershipManager()} } -// HackLeadership stops the state's internal leadership manager to prevent it -// from interfering with apiserver shutdown. -func (st *State) HackLeadership() { - // TODO(fwereade): 2015-08-07 lp:1482634 - // obviously, this should not exist: it's a quick hack to address lp:1481368 in - // 1.24.4, and should be quickly replaced with something that isn't so heinous. - // - // But. - // - // I *believe* that what it'll take to fix this is to extract the mongo-session- - // opening from state.Open, so we can create a mongosessioner Manifold on which - // state, leadership, watching, tools storage, etc etc etc can all independently - // depend. (Each dependency would/should have a separate session so they can - // close them all on their own schedule, without panics -- but the failure of - // the shared component should successfully goose them all into shutting down, - // in parallel, of their own accord.) - st.workers.Kill() -} - // buildTxnWithLeadership returns a transaction source that combines the supplied source // with checks and asserts on the supplied token. func buildTxnWithLeadership(buildTxn jujutxn.TransactionSource, token leadership.Token) jujutxn.TransactionSource { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/client_assert_test.go juju-core-2.0.0/src/github.com/juju/juju/state/lease/client_assert_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/client_assert_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/lease/client_assert_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,7 @@ package lease_test import ( - "time" + "time" // Only used for time types. jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/client_operation_test.go juju-core-2.0.0/src/github.com/juju/juju/state/lease/client_operation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/client_operation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/lease/client_operation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,7 @@ package lease_test import ( - "time" + "time" // Only used for time types. jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/client_race_test.go juju-core-2.0.0/src/github.com/juju/juju/state/lease/client_race_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/client_race_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/lease/client_race_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,7 @@ package lease_test import ( - "time" + "time" // Only used for time types. jc "github.com/juju/testing/checkers" jujutxn "github.com/juju/txn" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/client_remote_test.go juju-core-2.0.0/src/github.com/juju/juju/state/lease/client_remote_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/client_remote_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/lease/client_remote_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,7 @@ package lease_test import ( - "time" + "time" // Only used for time types. jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/client_validation_test.go juju-core-2.0.0/src/github.com/juju/juju/state/lease/client_validation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/client_validation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/lease/client_validation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,7 @@ package lease_test import ( - "time" + "time" // Only used for time types. gc "gopkg.in/check.v1" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/fixture_test.go juju-core-2.0.0/src/github.com/juju/juju/state/lease/fixture_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/fixture_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/lease/fixture_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "fmt" - "time" + "time" // Only used for time types and Parse(). jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/skew_test.go juju-core-2.0.0/src/github.com/juju/juju/state/lease/skew_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/lease/skew_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/lease/skew_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,12 +4,13 @@ package lease_test import ( - "time" + "time" // Only used for time types. "github.com/juju/testing" gc "gopkg.in/check.v1" "github.com/juju/juju/state/lease" + coretesting "github.com/juju/juju/testing" ) type SkewSuite struct { @@ -19,7 +20,7 @@ var _ = gc.Suite(&SkewSuite{}) func (s *SkewSuite) TestZero(c *gc.C) { - now := time.Now() + now := coretesting.ZeroTime() // The zero Skew should act as unskewed. skew := lease.Skew{} @@ -29,7 +30,7 @@ } func (s *SkewSuite) TestApparentPastWrite(c *gc.C) { - now := time.Now() + now := coretesting.ZeroTime() c.Logf("now: %s", now) oneSecondAgo := now.Add(-time.Second) threeSecondsAgo := now.Add(-3 * time.Second) @@ -60,7 +61,7 @@ } func (s *SkewSuite) TestApparentFutureWrite(c *gc.C) { - now := time.Now() + now := coretesting.ZeroTime() c.Logf("now: %s", now) oneSecondAgo := now.Add(-time.Second) threeSecondsAgo := now.Add(-3 * time.Second) @@ -92,7 +93,7 @@ } func (s *SkewSuite) TestBracketedWrite(c *gc.C) { - now := time.Now() + now := coretesting.ZeroTime() c.Logf("now: %s", now) oneSecondAgo := now.Add(-time.Second) twoSecondsAgo := now.Add(-2 * time.Second) @@ -130,7 +131,7 @@ // This is a straight copy of TestBracketedWrite, with strange timezones // inserted to check that they don't affect the results at all. - now := time.Now() + now := coretesting.ZeroTime() c.Logf("now: %s", now) oneSecondAgo := now.Add(-time.Second) twoSecondsAgo := now.Add(-2 * time.Second) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/life.go juju-core-2.0.0/src/github.com/juju/juju/state/life.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/life.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/life.go 2016-10-13 14:31:49.000000000 +0000 @@ -41,7 +41,9 @@ errDeadOrGone = errors.New("neither alive nor dying") errAlreadyDying = errors.New("already dying") + errAlreadyDead = errors.New("already dead") errAlreadyRemoved = errors.New("already removed") + errNotDying = errors.New("not dying") ) // Living describes state entities with a lifecycle. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/life_ns.go juju-core-2.0.0/src/github.com/juju/juju/state/life_ns.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/life_ns.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/life_ns.go 2016-10-13 14:31:49.000000000 +0000 @@ -1,7 +1,11 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + package state import ( "github.com/juju/errors" + "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -20,25 +24,133 @@ // to be good ideas, and should ideally be extended as we continue. var nsLife = nsLife_{} +// destroyOp returns errNotAlive if the identified entity is not Alive; +// or a txn.Op that will fail if the condition no longer holds, and +// otherwise set Life to Dying and make any other updates supplied in +// update. +func (nsLife_) destroyOp(entities mongo.Collection, docID string, update bson.D) (txn.Op, error) { + op, err := nsLife.aliveOp(entities, docID) + if err != nil { + return txn.Op{}, errors.Trace(err) + } + setDying := bson.D{{"$set", bson.D{{"life", Dying}}}} + op.Update = append(setDying, update...) + return op, nil +} + +// dieOp returns errNotDying if the identified entity is Alive, or +// errAlreadyDead if the entity is Dead or gone; or a txn.Op that will +// fail if the condition no longer holds, and otherwise set Life to +// Dead, and make any other updates supplied in update. +func (nsLife_) dieOp(entities mongo.Collection, docID string, update bson.D) (txn.Op, error) { + life, err := nsLife.read(entities, docID) + if errors.IsNotFound(err) { + return txn.Op{}, errAlreadyDead + } else if err != nil { + return txn.Op{}, errors.Trace(err) + } + switch life { + case Alive: + return txn.Op{}, errNotDying + case Dead: + return txn.Op{}, errAlreadyDead + } + setDead := bson.D{{"$set", bson.D{{"life", Dead}}}} + return txn.Op{ + C: entities.Name(), + Id: docID, + Assert: nsLife.dying(), + Update: append(setDead, update...), + }, nil +} + +// aliveOp returns errNotAlive if the identified entity is not Alive; or +// a txn.Op that will fail if the condition no longer holds. +func (nsLife_) aliveOp(entities mongo.Collection, docID string) (txn.Op, error) { + op, err := nsLife.checkOp(entities, docID, nsLife.alive()) + switch errors.Cause(err) { + case nil: + case errCheckFailed: + return txn.Op{}, errNotAlive + default: + return txn.Op{}, errors.Trace(err) + } + return op, nil +} + +// dyingOp returns errNotDying if the identified entity is not Dying; or +// a txn.Op that will fail if the condition no longer holds. +func (nsLife_) dyingOp(entities mongo.Collection, docID string) (txn.Op, error) { + op, err := nsLife.checkOp(entities, docID, nsLife.dying()) + switch errors.Cause(err) { + case nil: + case errCheckFailed: + return txn.Op{}, errNotDying + default: + return txn.Op{}, errors.Trace(err) + } + return op, nil +} + // notDeadOp returns errDeadOrGone if the identified entity is not Alive // or Dying, or a txn.Op that will fail if the condition no longer // holds. func (nsLife_) notDeadOp(entities mongo.Collection, docID string) (txn.Op, error) { - notDead := nsLife.notDead() - sel := append(bson.D{{"_id", docID}}, notDead...) + op, err := nsLife.checkOp(entities, docID, nsLife.notDead()) + switch errors.Cause(err) { + case nil: + case errCheckFailed: + return txn.Op{}, errDeadOrGone + default: + return txn.Op{}, errors.Trace(err) + } + return op, nil +} + +var errCheckFailed = errors.New("check failed") + +func (nsLife_) checkOp(entities mongo.Collection, docID string, check bson.D) (txn.Op, error) { + sel := append(bson.D{{"_id", docID}}, check...) count, err := entities.Find(sel).Count() if err != nil { return txn.Op{}, errors.Trace(err) } else if count == 0 { - return txn.Op{}, errDeadOrGone + return txn.Op{}, errCheckFailed } return txn.Op{ C: entities.Name(), Id: docID, - Assert: notDead, + Assert: check, }, nil } +func (nsLife_) read(entities mongo.Collection, docID string) (Life, error) { + var doc struct { + Life Life `bson:"life"` + } + err := entities.FindId(docID).One(&doc) + switch errors.Cause(err) { + case nil: + case mgo.ErrNotFound: + return Dead, errors.NotFoundf("entity") + default: + return Dead, errors.Trace(err) + } + return doc.Life, nil +} + +// alive returns a selector that matches only documents whose life +// field is set to Alive. +func (nsLife_) alive() bson.D { + return bson.D{{"life", Alive}} +} + +// dying returns a selector that matches only documents whose life +// field is set to Dying. +func (nsLife_) dying() bson.D { + return bson.D{{"life", Dying}} +} + // notDead returns a selector that matches only documents whose life // field is not set to Dead. func (nsLife_) notDead() bson.D { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/linklayerdevices_test.go juju-core-2.0.0/src/github.com/juju/juju/state/linklayerdevices_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/linklayerdevices_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/linklayerdevices_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,6 +12,7 @@ jujutxn "github.com/juju/txn" gc "gopkg.in/check.v1" + "github.com/juju/juju/container" "github.com/juju/juju/instance" "github.com/juju/juju/network" "github.com/juju/juju/state" @@ -488,6 +489,33 @@ } } +func (s *linkLayerDevicesStateSuite) TestMachineAllProviderInterfaceInfos(c *gc.C) { + err := s.machine.SetLinkLayerDevices(state.LinkLayerDeviceArgs{ + Name: "sara-lynn", + MACAddress: "ab:cd:ef:01:23:45", + ProviderID: "thing1", + Type: state.EthernetDevice, + }, state.LinkLayerDeviceArgs{ + Name: "bojack", + MACAddress: "ab:cd:ef:01:23:46", + ProviderID: "thing2", + Type: state.EthernetDevice, + }) + c.Assert(err, jc.ErrorIsNil) + + results, err := s.machine.AllProviderInterfaceInfos() + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, jc.SameContents, []network.ProviderInterfaceInfo{{ + InterfaceName: "sara-lynn", + MACAddress: "ab:cd:ef:01:23:45", + ProviderId: "thing1", + }, { + InterfaceName: "bojack", + MACAddress: "ab:cd:ef:01:23:46", + ProviderId: "thing2", + }}) +} + func (s *linkLayerDevicesStateSuite) assertNoDevicesOnMachine(c *gc.C, machine *state.Machine) { s.assertAllLinkLayerDevicesOnMachineMatchCount(c, machine, 0) } @@ -752,6 +780,10 @@ } func (s *linkLayerDevicesStateSuite) TestSetLinkLayerDevicesAllowsParentBridgeDeviceForContainerDevice(c *gc.C) { + // Add default bridges per container type to ensure they will not be skipped + // when deciding which host bridges to use for the container NICs. + s.addParentBridgeDeviceWithContainerDevicesAsChildren(c, container.DefaultLxdBridge, "vethX", 1) + s.addParentBridgeDeviceWithContainerDevicesAsChildren(c, container.DefaultKvmBridge, "vethY", 1) parentDevice, _ := s.addParentBridgeDeviceWithContainerDevicesAsChildren(c, "br-eth1.250", "eth", 1) childDevice, err := s.containerMachine.LinkLayerDevice("eth0") c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/logs.go juju-core-2.0.0/src/github.com/juju/juju/state/logs.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/logs.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/logs.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,7 +21,7 @@ "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/mongo" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/logs_test.go juju-core-2.0.0/src/github.com/juju/juju/state/logs_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/logs_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/logs_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -121,10 +121,7 @@ } func (s *LogsSuite) TestAllLastSentLogTrackerSetGet(c *gc.C) { - st, err := s.State.ForModel(names.NewModelTag(s.State.ControllerUUID())) - c.Assert(err, jc.ErrorIsNil) - defer st.Close() - tracker, err := state.NewAllLastSentLogTracker(st, "test-sink") + tracker, err := state.NewAllLastSentLogTracker(s.State, "test-sink") c.Assert(err, jc.ErrorIsNil) defer tracker.Close() @@ -170,7 +167,7 @@ func (s *LogsSuite) TestDbLogger(c *gc.C) { logger := state.NewDbLogger(s.State, names.NewMachineTag("22"), jujuversion.Current) defer logger.Close() - t0 := time.Now().Truncate(time.Millisecond) // MongoDB only stores timestamps with ms precision. + t0 := coretesting.ZeroTime().Truncate(time.Millisecond) // MongoDB only stores timestamps with ms precision. logger.Log(t0, "some.where", "foo.go:99", loggo.INFO, "all is well") t1 := t0.Add(time.Second) logger.Log(t1, "else.where", "bar.go:42", loggo.ERROR, "oh noes") @@ -205,7 +202,7 @@ c.Assert(err, jc.ErrorIsNil) } - now := time.Now() + now := coretesting.NonZeroTime() maxLogTime := now.Add(-time.Minute) log(now, "keep") log(maxLogTime.Add(time.Second), "keep") @@ -230,7 +227,7 @@ func (s *LogsSuite) TestPruneLogsBySize(c *gc.C) { // Set up 3 models and generate different amounts of logs // for them. - now := time.Now().Truncate(time.Millisecond) + now := coretesting.NonZeroTime().Truncate(time.Millisecond) s0 := s.State startingLogsS0 := 10 @@ -247,7 +244,7 @@ s.generateLogs(c, s2, now, startingLogsS2) // Prune logs collection back to 1 MiB. - tsNoPrune := time.Now().Add(-3 * 24 * time.Hour) + tsNoPrune := coretesting.NonZeroTime().Add(-3 * 24 * time.Hour) err := state.PruneLogs(s.State, tsNoPrune, 1) c.Assert(err, jc.ErrorIsNil) @@ -290,7 +287,7 @@ } type LogTailerSuite struct { - ConnSuite + ConnWithWallClockSuite logsColl *mgo.Collection oplogColl *mgo.Collection otherState *state.State @@ -299,7 +296,7 @@ var _ = gc.Suite(&LogTailerSuite{}) func (s *LogTailerSuite) SetUpTest(c *gc.C) { - s.ConnSuite.SetUpTest(c) + s.ConnWithWallClockSuite.SetUpTest(c) session := s.State.MongoSession() s.logsColl = session.DB("logs").C("logs") @@ -323,7 +320,7 @@ func (s *LogTailerSuite) TestTimeFiltering(c *gc.C) { // Add 10 logs that shouldn't be returned. - threshT := time.Now() + threshT := coretesting.NonZeroTime() s.writeLogsT(c, threshT.Add(-5*time.Second), threshT.Add(-time.Millisecond), 5, logTemplate{Message: "dont want"}, @@ -543,7 +540,7 @@ s.writeLogs(c, 2, expected) // Write a log entry that's only in the oplog. - doc := s.logTemplateToDoc(logTemplate{Message: "dont want"}, time.Now()) + doc := s.logTemplateToDoc(logTemplate{Message: "dont want"}, coretesting.ZeroTime()) err := s.writeLogToOplog(doc) c.Assert(err, jc.ErrorIsNil) @@ -769,7 +766,7 @@ // the supplied template. As well as writing to the logs collection, // entries are also made into the fake oplog collection. func (s *LogTailerSuite) writeLogs(c *gc.C, count int, lt logTemplate) { - t := time.Now() + t := coretesting.ZeroTime() s.writeLogsT(c, t, t, count, lt) } @@ -793,9 +790,9 @@ // oplog collection. func (s *LogTailerSuite) writeLogToOplog(doc interface{}) error { return s.oplogColl.Insert(bson.D{ - {"ts", bson.MongoTimestamp(time.Now().Unix() << 32)}, // an approximation which will do - {"h", rand.Int63()}, // again, a suitable fake - {"op", "i"}, // this will always be an insert + {"ts", bson.MongoTimestamp(coretesting.ZeroTime().Unix() << 32)}, // an approximation which will do + {"h", rand.Int63()}, // again, a suitable fake + {"op", "i"}, // this will always be an insert {"ns", "logs.logs"}, {"o", doc}, }) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/machine.go juju-core-2.0.0/src/github.com/juju/juju/state/machine.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/machine.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/machine.go 2016-10-13 14:31:49.000000000 +0000 @@ -116,10 +116,6 @@ PasswordHash string Clean bool - // TODO(axw) 2015-06-22 #1467379 - // We need an upgrade step to populate "volumes" and "filesystems" - // for entities created in 1.24. - // // Volumes contains the names of volumes attached to the machine. Volumes []string `bson:"volumes,omitempty"` // Filesystems contains the names of filesystems attached to the machine. @@ -490,7 +486,7 @@ C: machinesC, Id: m.doc.DocID, Assert: bson.D{{"jobs", bson.D{{"$nin", []MachineJob{JobManageModel}}}}}, - }, m.st.newCleanupOp(cleanupForceDestroyedMachine, m.doc.Id)}, nil + }, newCleanupOp(cleanupForceDestroyedMachine, m.doc.Id)}, nil } // EnsureDead sets the machine lifecycle to Dead if it is Alive or Dying. @@ -627,7 +623,7 @@ {{"principals", bson.D{{"$exists", false}}}}, }, } - cleanupOp := m.st.newCleanupOp(cleanupDyingMachine, m.doc.Id) + cleanupOp := newCleanupOp(cleanupDyingMachine, m.doc.Id) // multiple attempts: one with original data, one with refreshed data, and a final // one intended to determine the cause of failure of the preceding attempt. buildTxn := func(attempt int) ([]txn.Op, error) { @@ -1430,10 +1426,20 @@ *field = stateAddresses if changedPrivate { + oldPrivate := m.doc.PreferredPrivateAddress.networkAddress() m.doc.PreferredPrivateAddress = newPrivate + logger.Infof( + "machine %q preferred private address changed from %q to %q", + m.Id(), oldPrivate, newPrivate.networkAddress(), + ) } if changedPublic { + oldPublic := m.doc.PreferredPublicAddress.networkAddress() m.doc.PreferredPublicAddress = newPublic + logger.Infof( + "machine %q preferred public address changed from %q to %q", + m.Id(), oldPublic, newPublic.networkAddress(), + ) } return nil } @@ -1520,12 +1526,12 @@ // SetStatus sets the status of the machine. func (m *Machine) SetStatus(statusInfo status.StatusInfo) error { switch statusInfo.Status { - case status.StatusStarted, status.StatusStopped: - case status.StatusError: + case status.Started, status.Stopped: + case status.Error: if statusInfo.Message == "" { return errors.Errorf("cannot set status %q without info", statusInfo.Status) } - case status.StatusPending: + case status.Pending: // If a machine is not yet provisioned, we allow its status // to be set back to pending (when a retry is to occur). _, err := m.InstanceId() @@ -1534,7 +1540,7 @@ break } fallthrough - case status.StatusDown: + case status.Down: return errors.Errorf("cannot set status %q", statusInfo.Status) default: return errors.Errorf("cannot set invalid status %q", statusInfo.Status) @@ -1650,12 +1656,11 @@ logger.Errorf("finding status of container %v to mark as invalid: %v", containerId, err) continue } - if statusInfo.Status == status.StatusPending { + if statusInfo.Status == status.Pending { containerType := ContainerTypeFromId(containerId) - // TODO(perrito666) 2016-05-02 lp:1558657 - now := time.Now() + now := m.st.clock.Now() s := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "unsupported container", Data: map[string]interface{}{"type": containerType}, Since: &now, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/machine_linklayerdevices.go juju-core-2.0.0/src/github.com/juju/juju/state/machine_linklayerdevices.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/machine_linklayerdevices.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/machine_linklayerdevices.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,6 +14,7 @@ "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" + "github.com/juju/juju/container" "github.com/juju/juju/network" ) @@ -80,6 +81,24 @@ return errors.Trace(iter.Close()) } +// AllProviderInterfaceInfos returns the provider details for all of +// the link layer devices belonging to this machine. These can be used +// to identify the devices when interacting with the provider +// directly (for example, releasing container addresses). +func (m *Machine) AllProviderInterfaceInfos() ([]network.ProviderInterfaceInfo, error) { + devices, err := m.AllLinkLayerDevices() + if err != nil { + return nil, errors.Trace(err) + } + result := make([]network.ProviderInterfaceInfo, len(devices)) + for i, device := range devices { + result[i].InterfaceName = device.Name() + result[i].MACAddress = device.MACAddress() + result[i].ProviderId = device.ProviderID() + } + return result, nil +} + // RemoveAllLinkLayerDevices removes all existing link-layer devices of the // machine in a single transaction. No error is returned when some or all of the // devices were already removed. @@ -904,9 +923,19 @@ bridgeDeviceNames := make([]string, 0, len(allDevices)) for _, hostDevice := range allDevices { - if hostDevice.Type() == BridgeDevice { - bridgeDevicesByName[hostDevice.Name()] = hostDevice - bridgeDeviceNames = append(bridgeDeviceNames, hostDevice.Name()) + deviceType, name := hostDevice.Type(), hostDevice.Name() + // Since the default bridges (for each container type) are + // machine-local, and there's neither a way (at least not yet) nor any + // point in allocating addresses from the (machine-local) subnets + // configured on those bridges, we need to ignore them below. + if deviceType == BridgeDevice { + switch name { + case container.DefaultLxdBridge, container.DefaultKvmBridge: + logger.Debugf("skipping host bridge %q", name) + continue + } + bridgeDevicesByName[name] = hostDevice + bridgeDeviceNames = append(bridgeDeviceNames, name) } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/machine_test.go juju-core-2.0.0/src/github.com/juju/juju/state/machine_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/machine_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/machine_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,6 @@ import ( "sort" - "time" "github.com/juju/errors" "github.com/juju/loggo" @@ -604,7 +603,7 @@ func (s *MachineSuite) TestSetMongoPassword(c *gc.C) { info := testing.NewMongoInfo() - st, err := state.Open(s.modelTag, info, mongotest.DialOpts(), state.NewPolicyFunc(nil)) + st, err := state.Open(s.modelTag, s.State.ControllerTag(), info, mongotest.DialOpts(), state.NewPolicyFunc(nil)) c.Assert(err, jc.ErrorIsNil) defer func() { // Remove the admin password so that the test harness can reset the state. @@ -629,13 +628,13 @@ // Check that we cannot log in with the wrong password. info.Tag = ent.Tag() info.Password = "bar" - err = tryOpenState(s.modelTag, info) + err = tryOpenState(s.modelTag, s.State.ControllerTag(), info) c.Check(errors.Cause(err), jc.Satisfies, errors.IsUnauthorized) c.Check(err, gc.ErrorMatches, `cannot log in to admin database as "machine-0": unauthorized mongo access: .*`) // Check that we can log in with the correct password. info.Password = "foo" - st1, err := state.Open(s.modelTag, info, mongotest.DialOpts(), state.NewPolicyFunc(nil)) + st1, err := state.Open(s.modelTag, s.State.ControllerTag(), info, mongotest.DialOpts(), state.NewPolicyFunc(nil)) c.Assert(err, jc.ErrorIsNil) defer st1.Close() @@ -648,18 +647,18 @@ // Check that we cannot log in with the old password. info.Password = "foo" - err = tryOpenState(s.modelTag, info) + err = tryOpenState(s.modelTag, s.State.ControllerTag(), info) c.Check(errors.Cause(err), jc.Satisfies, errors.IsUnauthorized) c.Check(err, gc.ErrorMatches, `cannot log in to admin database as "machine-0": unauthorized mongo access: .*`) // Check that we can log in with the correct password. info.Password = "bar" - err = tryOpenState(s.modelTag, info) + err = tryOpenState(s.modelTag, s.State.ControllerTag(), info) c.Assert(err, jc.ErrorIsNil) // Check that the administrator can still log in. info.Tag, info.Password = nil, "admin-secret" - err = tryOpenState(s.modelTag, info) + err = tryOpenState(s.modelTag, s.State.ControllerTag(), info) c.Assert(err, jc.ErrorIsNil) } @@ -901,9 +900,9 @@ err := s.machine.SetProvisioned("umbrella/0", "fake_nonce", nil) c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := coretesting.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusRunning, + Status: status.Running, Message: "alive", Since: &now, } @@ -915,7 +914,7 @@ c.Assert(err, jc.ErrorIsNil) machineStatus, err := s.machine.InstanceStatus() c.Assert(err, jc.ErrorIsNil) - c.Assert(machineStatus.Status, gc.DeepEquals, status.StatusRunning) + c.Assert(machineStatus.Status, gc.DeepEquals, status.Running) c.Assert(machineStatus.Message, gc.DeepEquals, "alive") } @@ -1116,7 +1115,7 @@ // Unit.Watch // State.WatchForModelConfigChanges // Unit.WatchConfigSettings - testWatcherDiesWhenStateCloses(c, s.modelTag, func(c *gc.C, st *state.State) waiter { + testWatcherDiesWhenStateCloses(c, s.modelTag, s.State.ControllerTag(), func(c *gc.C, st *state.State) waiter { m, err := st.Machine(s.machine.Id()) c.Assert(err, jc.ErrorIsNil) w := m.Watch() @@ -1154,9 +1153,9 @@ wc.AssertNoChange() // Change the unit; no change. - now := time.Now() + now := coretesting.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -1190,7 +1189,7 @@ // Change the subordinate; no change. sInfo = status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -1235,7 +1234,7 @@ func (s *MachineSuite) TestWatchPrincipalUnitsDiesOnStateClose(c *gc.C) { // This test is testing logic in watcher.unitsWatcher, which // is also used by Unit.WatchSubordinateUnits. - testWatcherDiesWhenStateCloses(c, s.modelTag, func(c *gc.C, st *state.State) waiter { + testWatcherDiesWhenStateCloses(c, s.modelTag, s.State.ControllerTag(), func(c *gc.C, st *state.State) waiter { m, err := st.Machine(s.machine.Id()) c.Assert(err, jc.ErrorIsNil) w := m.WatchPrincipalUnits() @@ -1269,9 +1268,9 @@ wc.AssertNoChange() // Change the unit; no change. - now := time.Now() + now := coretesting.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -1306,7 +1305,7 @@ // Change the subordinate; no change. sInfo = status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -1350,7 +1349,7 @@ } func (s *MachineSuite) TestWatchUnitsDiesOnStateClose(c *gc.C) { - testWatcherDiesWhenStateCloses(c, s.modelTag, func(c *gc.C, st *state.State) waiter { + testWatcherDiesWhenStateCloses(c, s.modelTag, s.State.ControllerTag(), func(c *gc.C, st *state.State) waiter { m, err := st.Machine(s.machine.Id()) c.Assert(err, jc.ErrorIsNil) w := m.WatchUnits() @@ -2177,14 +2176,14 @@ c.Assert(err, jc.ErrorIsNil) statusInfo, err := supportedContainer.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusPending) + c.Assert(statusInfo.Status, gc.Equals, status.Pending) // An unsupported (lxd) container will have an error status. err = container.Refresh() c.Assert(err, jc.ErrorIsNil) statusInfo, err = container.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusError) + c.Assert(statusInfo.Status, gc.Equals, status.Error) c.Assert(statusInfo.Message, gc.Equals, "unsupported container") c.Assert(statusInfo.Data, gc.DeepEquals, map[string]interface{}{"type": "lxd"}) } @@ -2212,7 +2211,7 @@ c.Assert(err, jc.ErrorIsNil) statusInfo, err := container.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusError) + c.Assert(statusInfo.Status, gc.Equals, status.Error) c.Assert(statusInfo.Message, gc.Equals, "unsupported container") containerType := state.ContainerTypeFromId(container.Id()) c.Assert(statusInfo.Data, gc.DeepEquals, map[string]interface{}{"type": string(containerType)}) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/meterstatus_test.go juju-core-2.0.0/src/github.com/juju/juju/state/meterstatus_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/meterstatus_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/meterstatus_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -108,7 +108,7 @@ c.Assert(err, jc.ErrorIsNil) watcher := s.unit.WatchMeterStatus() assertMeterStatusChanged(c, watcher) - err = mm.SetLastSuccessfulSend(time.Now()) + err = mm.SetLastSuccessfulSend(testing.NonZeroTime()) c.Assert(err, jc.ErrorIsNil) for i := 0; i < 3; i++ { err := mm.IncrementConsecutiveErrors() @@ -124,7 +124,7 @@ c.Assert(err, jc.ErrorIsNil) watcher := s.unit.WatchMeterStatus() assertMeterStatusChanged(c, watcher) - err = mm.SetLastSuccessfulSend(time.Now()) + err = mm.SetLastSuccessfulSend(testing.NonZeroTime()) c.Assert(err, jc.ErrorIsNil) for i := 0; i < 3; i++ { err := mm.IncrementConsecutiveErrors() @@ -155,7 +155,7 @@ } func assertMetricsManagerAmberState(c *gc.C, metricsManager *state.MetricsManager) { - err := metricsManager.SetLastSuccessfulSend(time.Now()) + err := metricsManager.SetLastSuccessfulSend(testing.NonZeroTime()) c.Assert(err, jc.ErrorIsNil) for i := 0; i < 3; i++ { err := metricsManager.IncrementConsecutiveErrors() @@ -168,7 +168,7 @@ // TODO (mattyw) This function could be moved into a metricsmanager testing package. func assertMetricsManagerRedState(c *gc.C, metricsManager *state.MetricsManager) { // To enter the red state we need to set a last successful send as over 1 week ago - err := metricsManager.SetLastSuccessfulSend(time.Now().Add(-8 * 24 * time.Hour)) + err := metricsManager.SetLastSuccessfulSend(testing.NonZeroTime().Add(-8 * 24 * time.Hour)) c.Assert(err, jc.ErrorIsNil) for i := 0; i < 3; i++ { err := metricsManager.IncrementConsecutiveErrors() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/metrics.go juju-core-2.0.0/src/github.com/juju/juju/state/metrics.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/metrics.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/metrics.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,7 @@ import ( "encoding/json" + "sort" "time" "github.com/juju/errors" @@ -36,7 +37,7 @@ UUID string `bson:"_id"` ModelUUID string `bson:"model-uuid"` Unit string `bson:"unit"` - CharmUrl string `bson:"charmurl"` + CharmURL string `bson:"charmurl"` Sent bool `bson:"sent"` DeleteTime time.Time `bson:"delete-time"` Created time.Time `bson:"created"` @@ -51,13 +52,21 @@ Time time.Time `bson:"time"` } +type byTime []Metric + +func (t byTime) Len() int { return len(t) } +func (t byTime) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t byTime) Less(i, j int) bool { + return t[i].Time.Before(t[j].Time) +} + // validate checks that the MetricBatch contains valid metrics. func (m *MetricBatch) validate() error { - charmUrl, err := charm.ParseURL(m.doc.CharmUrl) + charmURL, err := charm.ParseURL(m.doc.CharmURL) if err != nil { return errors.Trace(err) } - chrm, err := m.st.Charm(charmUrl) + chrm, err := m.st.Charm(charmURL) if err != nil { return errors.Trace(err) } @@ -108,7 +117,7 @@ UUID: batch.UUID, ModelUUID: st.ModelUUID(), Unit: batch.Unit.Id(), - CharmUrl: charmURL.String(), + CharmURL: charmURL.String(), Sent: false, Created: batch.Created, Metrics: batch.Metrics, @@ -171,15 +180,11 @@ return results, nil } -func (st *State) queryLocalMetricBatches(query bson.M) ([]MetricBatch, error) { +func (st *State) queryMetricBatches(query bson.M) ([]MetricBatch, error) { c, closer := st.getCollection(metricsC) defer closer() docs := []metricBatchDoc{} - if query == nil { - query = bson.M{} - } - query["charmurl"] = bson.M{"$regex": "^local:"} - err := c.Find(query).All(&docs) + err := c.Find(query).Sort("created").All(&docs) if err != nil { return nil, errors.Trace(err) } @@ -190,13 +195,22 @@ return results, nil } -// MetricBatchesUnit returns metric batches for the given unit. +// MetricBatchesForUnit returns metric batches for the given unit. func (st *State) MetricBatchesForUnit(unit string) ([]MetricBatch, error) { - return st.queryLocalMetricBatches(bson.M{"unit": unit}) + _, err := st.Unit(unit) + if err != nil { + return nil, errors.Trace(err) + } + return st.queryMetricBatches(bson.M{"unit": unit}) } -// MetricBatchesUnit returns metric batches for the given application. -func (st *State) MetricBatchesForService(application string) ([]MetricBatch, error) { +// MetricBatchesForModel returns metric batches for all the units in the model. +func (st *State) MetricBatchesForModel() ([]MetricBatch, error) { + return st.queryMetricBatches(bson.M{"model-uuid": st.ModelUUID()}) +} + +// MetricBatchesForApplication returns metric batches for the given application. +func (st *State) MetricBatchesForApplication(application string) ([]MetricBatch, error) { svc, err := st.Application(application) if err != nil { return nil, errors.Trace(err) @@ -209,7 +223,7 @@ for i, u := range units { unitNames[i] = bson.M{"unit": u.Name()} } - return st.queryLocalMetricBatches(bson.M{"$or": unitNames}) + return st.queryMetricBatches(bson.M{"$or": unitNames}) } // MetricBatch returns the metric batch with the given id. @@ -230,8 +244,7 @@ // CleanupOldMetrics looks for metrics that are 24 hours old (or older) // and have been sent. Any metrics it finds are deleted. func (st *State) CleanupOldMetrics() error { - // TODO(fwereade): 2016-03-17 lp:1558657 - now := time.Now() + now := st.clock.Now() metrics, closer := st.getCollection(metricsC) defer closer() // Nothing else in the system will interact with sent metrics, and nothing needs @@ -240,6 +253,7 @@ metricsW := metrics.Writeable() // TODO (mattyw) iter over this. info, err := metricsW.RemoveAll(bson.M{ + "model-uuid": st.ModelUUID(), "sent": true, "delete-time": bson.M{"$lte": now}, }) @@ -255,9 +269,12 @@ var docs []metricBatchDoc c, closer := st.getCollection(metricsC) defer closer() - err := c.Find(bson.M{ - "sent": false, - }).Limit(batchSize).All(&docs) + + q := bson.M{ + "model-uuid": st.ModelUUID(), + "sent": false, + } + err := c.Find(q).Limit(batchSize).All(&docs) if err != nil { return nil, errors.Trace(err) } @@ -277,7 +294,8 @@ c, closer := st.getCollection(metricsC) defer closer() return c.Find(bson.M{ - "sent": false, + "model-uuid": st.ModelUUID(), + "sent": false, }).Count() } @@ -288,7 +306,8 @@ c, closer := st.getCollection(metricsC) defer closer() return c.Find(bson.M{ - "sent": true, + "model-uuid": st.ModelUUID(), + "sent": true, }).Count() } @@ -315,7 +334,7 @@ // CharmURL returns the charm url for the charm this metric was generated in. func (m *MetricBatch) CharmURL() string { - return m.doc.CharmUrl + return m.doc.CharmURL } // Created returns the time this metric batch was created. @@ -336,6 +355,24 @@ return result } +// UniqueMetrics returns only the last value for each +// metric key in this batch. +func (m *MetricBatch) UniqueMetrics() []Metric { + metrics := m.Metrics() + sort.Sort(byTime(metrics)) + uniq := map[string]Metric{} + for _, m := range metrics { + uniq[m.Key] = m + } + results := make([]Metric, len(uniq)) + i := 0 + for _, m := range uniq { + results[i] = m + i++ + } + return results +} + // SetSent marks the metric has having been sent at // the specified time. func (m *MetricBatch) SetSent(t time.Time) error { @@ -370,8 +407,7 @@ // SetMetricBatchesSent sets sent on each MetricBatch corresponding to the uuids provided. func (st *State) SetMetricBatchesSent(batchUUIDs []string) error { - // TODO(fwereade): 2016-03-17 lp:1558657 - deleteTime := time.Now().UTC().Add(CleanupAge) + deleteTime := st.clock.Now().UTC().Add(CleanupAge) ops := setSentOps(batchUUIDs, deleteTime) if err := st.runTransaction(ops); err != nil { return errors.Annotatef(err, "cannot set metric sent in bulk call") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/metricsmanager.go juju-core-2.0.0/src/github.com/juju/juju/state/metricsmanager.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/metricsmanager.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/metricsmanager.go 2016-10-13 14:31:49.000000000 +0000 @@ -152,8 +152,7 @@ } func (m *MetricsManager) gracePeriodExceeded() bool { - // TODO(fwereade): 2016-03-17 lp:1558657 - now := time.Now() + now := m.st.clock.Now() t := m.LastSuccessfulSend().Add(m.GracePeriod()) return t.Before(now) || t.Equal(now) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/metricsmanager_test.go juju-core-2.0.0/src/github.com/juju/juju/state/metricsmanager_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/metricsmanager_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/metricsmanager_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,7 @@ "github.com/juju/juju/state" testing "github.com/juju/juju/state/testing" + coretesting "github.com/juju/juju/testing" ) type metricsManagerSuite struct { @@ -42,7 +43,7 @@ c.Assert(err, jc.ErrorIsNil) err = mm.IncrementConsecutiveErrors() c.Assert(err, jc.ErrorIsNil) - now := time.Now().Round(time.Second).UTC() + now := coretesting.ZeroTime().Round(time.Second).UTC() err = mm.SetLastSuccessfulSend(now) c.Assert(err, jc.ErrorIsNil) c.Assert(mm.LastSuccessfulSend(), gc.DeepEquals, now) @@ -92,7 +93,7 @@ c.Assert(err, jc.ErrorIsNil) status := mm.MeterStatus() c.Assert(status.Code, gc.Equals, state.MeterGreen) - now := time.Now() + now := coretesting.NonZeroTime() err = mm.SetLastSuccessfulSend(now) c.Assert(err, jc.ErrorIsNil) for i := 0; i < 3; i++ { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/metrics_test.go juju-core-2.0.0/src/github.com/juju/juju/state/metrics_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/metrics_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/metrics_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,13 +13,14 @@ "gopkg.in/juju/names.v2" "github.com/juju/juju/state" + "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" ) type MetricSuite struct { ConnSuite unit *state.Unit - service *state.Application + application *state.Application meteredCharm *state.Charm } @@ -27,11 +28,13 @@ func (s *MetricSuite) SetUpTest(c *gc.C) { s.ConnSuite.SetUpTest(c) - s.assertAddUnit(c) + s.meteredCharm = s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) + s.application = s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: s.meteredCharm}) + s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Application: s.application, SetCharmURL: true}) } func (s *MetricSuite) TestAddNoMetrics(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() _, err := s.State.AddMetrics(state.BatchParam{ UUID: utils.MustNewUUID().String(), CharmURL: s.meteredCharm.URL().String(), @@ -53,14 +56,8 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *MetricSuite) assertAddUnit(c *gc.C) { - s.meteredCharm = s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) - s.service = s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: s.meteredCharm}) - s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Application: s.service, SetCharmURL: true}) -} - func (s *MetricSuite) TestAddMetric(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() modelUUID := s.State.ModelUUID() m := state.Metric{"pings", "5", now} metricBatch, err := s.State.AddMetrics( @@ -99,7 +96,7 @@ func (s *MetricSuite) TestAddMetricNonExistentUnit(c *gc.C) { removeUnit(c, s.unit) - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() m := state.Metric{"pings", "5", now} unitTag := names.NewUnitTag("test/0") _, err := s.State.AddMetrics( @@ -116,7 +113,7 @@ func (s *MetricSuite) TestAddMetricDeadUnit(c *gc.C) { ensureUnitDead(c, s.unit) - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() m := state.Metric{"pings", "5", now} _, err := s.State.AddMetrics( state.BatchParam{ @@ -131,7 +128,7 @@ } func (s *MetricSuite) TestSetMetricSent(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() m := state.Metric{"pings", "5", now} added, err := s.State.AddMetrics( state.BatchParam{ @@ -145,7 +142,7 @@ c.Assert(err, jc.ErrorIsNil) saved, err := s.State.MetricBatch(added.UUID()) c.Assert(err, jc.ErrorIsNil) - err = saved.SetSent(time.Now()) + err = saved.SetSent(testing.NonZeroTime()) c.Assert(err, jc.ErrorIsNil) c.Assert(saved.Sent(), jc.IsTrue) saved, err = s.State.MetricBatch(added.UUID()) @@ -154,8 +151,8 @@ } func (s *MetricSuite) TestCleanupMetrics(c *gc.C) { - oldTime := time.Now().Add(-(time.Hour * 25)) - now := time.Now() + oldTime := testing.NonZeroTime().Add(-(time.Hour * 25)) + now := testing.NonZeroTime() m := state.Metric{"pings", "5", oldTime} oldMetric1, err := s.State.AddMetrics( state.BatchParam{ @@ -167,7 +164,7 @@ }, ) c.Assert(err, jc.ErrorIsNil) - oldMetric1.SetSent(time.Now().Add(-25 * time.Hour)) + oldMetric1.SetSent(testing.NonZeroTime().Add(-25 * time.Hour)) oldMetric2, err := s.State.AddMetrics( state.BatchParam{ @@ -179,7 +176,7 @@ }, ) c.Assert(err, jc.ErrorIsNil) - oldMetric2.SetSent(time.Now().Add(-25 * time.Hour)) + oldMetric2.SetSent(testing.NonZeroTime().Add(-25 * time.Hour)) m = state.Metric{"pings", "5", now} newMetric, err := s.State.AddMetrics( @@ -192,7 +189,7 @@ }, ) c.Assert(err, jc.ErrorIsNil) - newMetric.SetSent(time.Now()) + newMetric.SetSent(testing.NonZeroTime()) err = s.State.CleanupOldMetrics() c.Assert(err, jc.ErrorIsNil) @@ -212,7 +209,7 @@ } func (s *MetricSuite) TestCleanupMetricsIgnoreNotSent(c *gc.C) { - oldTime := time.Now().Add(-(time.Hour * 25)) + oldTime := testing.NonZeroTime().Add(-(time.Hour * 25)) m := state.Metric{"pings", "5", oldTime} oldMetric, err := s.State.AddMetrics( state.BatchParam{ @@ -225,7 +222,7 @@ ) c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.NonZeroTime() m = state.Metric{"pings", "5", now} newMetric, err := s.State.AddMetrics( state.BatchParam{ @@ -237,7 +234,7 @@ }, ) c.Assert(err, jc.ErrorIsNil) - newMetric.SetSent(time.Now()) + newMetric.SetSent(testing.NonZeroTime()) err = s.State.CleanupOldMetrics() c.Assert(err, jc.ErrorIsNil) @@ -249,7 +246,7 @@ } func (s *MetricSuite) TestAllMetricBatches(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() m := state.Metric{"pings", "5", now} _, err := s.State.AddMetrics( state.BatchParam{ @@ -271,15 +268,15 @@ } func (s *MetricSuite) TestAllMetricBatchesCustomCharmURLAndUUID(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() m := state.Metric{"pings", "5", now} uuid := utils.MustNewUUID().String() - charmUrl := "cs:quantal/metered" + charmURL := "cs:quantal/metered" _, err := s.State.AddMetrics( state.BatchParam{ UUID: uuid, Created: now, - CharmURL: charmUrl, + CharmURL: charmURL, Metrics: []state.Metric{m}, Unit: s.unit.UnitTag(), }, @@ -290,15 +287,15 @@ c.Assert(metricBatches, gc.HasLen, 1) c.Assert(metricBatches[0].Unit(), gc.Equals, "metered/0") c.Assert(metricBatches[0].UUID(), gc.Equals, uuid) - c.Assert(metricBatches[0].CharmURL(), gc.Equals, charmUrl) + c.Assert(metricBatches[0].CharmURL(), gc.Equals, charmURL) c.Assert(metricBatches[0].Sent(), jc.IsFalse) c.Assert(metricBatches[0].Metrics(), gc.HasLen, 1) } func (s *MetricSuite) TestMetricCredentials(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() m := state.Metric{"pings", "5", now} - err := s.service.SetMetricCredentials([]byte("hello there")) + err := s.application.SetMetricCredentials([]byte("hello there")) c.Assert(err, gc.IsNil) _, err = s.State.AddMetrics( state.BatchParam{ @@ -319,7 +316,7 @@ // TestCountMetrics asserts the correct values are returned // by CountOfUnsentMetrics and CountOfSentMetrics. func (s *MetricSuite) TestCountMetrics(c *gc.C) { - now := time.Now() + now := testing.NonZeroTime() m := []state.Metric{{Key: "pings", Value: "123", Time: now}} s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now, Metrics: m}) s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now, Metrics: m}) @@ -334,7 +331,7 @@ } func (s *MetricSuite) TestSetMetricBatchesSent(c *gc.C) { - now := time.Now() + now := testing.NonZeroTime() metrics := make([]*state.MetricBatch, 3) for i := range metrics { m := []state.Metric{{Key: "pings", Value: "123", Time: now}} @@ -353,7 +350,7 @@ } func (s *MetricSuite) TestMetricsToSend(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() m := []state.Metric{{Key: "pings", Value: "123", Time: now}} s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now, Metrics: m}) s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now, Metrics: m}) @@ -365,7 +362,7 @@ // TestMetricsToSendBatches checks that metrics are properly batched. func (s *MetricSuite) TestMetricsToSendBatches(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() for i := 0; i < 6; i++ { m := []state.Metric{{Key: "pings", Value: "123", Time: now}} s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now, Metrics: m}) @@ -391,15 +388,15 @@ func (s *MetricSuite) TestMetricValidation(c *gc.C) { nonMeteredUnit := s.Factory.MakeUnit(c, &factory.UnitParams{SetCharmURL: true}) - meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Name: "metered-service", Charm: s.meteredCharm}) - meteredUnit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true}) - dyingUnit, err := meteredService.AddUnit() + meteredApplication := s.Factory.MakeApplication(c, &factory.ApplicationParams{Name: "metered-service", Charm: s.meteredCharm}) + meteredUnit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredApplication, SetCharmURL: true}) + dyingUnit, err := meteredApplication.AddUnit() c.Assert(err, jc.ErrorIsNil) err = dyingUnit.SetCharmURL(s.meteredCharm.URL()) c.Assert(err, jc.ErrorIsNil) err = dyingUnit.Destroy() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.NonZeroTime() tests := []struct { about string metrics []state.Metric @@ -467,68 +464,8 @@ } } -func (s *MetricSuite) TestMetricsAcrossEnvironments(c *gc.C) { - now := state.NowToTheSecond().Add(-48 * time.Hour) - m := state.Metric{"pings", "5", now} - m1, err := s.State.AddMetrics( - state.BatchParam{ - UUID: utils.MustNewUUID().String(), - Created: now, - CharmURL: s.meteredCharm.URL().String(), - Metrics: []state.Metric{m}, - Unit: s.unit.UnitTag(), - }, - ) - c.Assert(err, jc.ErrorIsNil) - - st := s.Factory.MakeModel(c, nil) - defer st.Close() - f := factory.NewFactory(st) - meteredCharm := f.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) - service := f.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) - unit := f.MakeUnit(c, &factory.UnitParams{Application: service, SetCharmURL: true}) - m2, err := s.State.AddMetrics( - state.BatchParam{ - UUID: utils.MustNewUUID().String(), - Created: now, - CharmURL: s.meteredCharm.URL().String(), - Metrics: []state.Metric{m}, - Unit: unit.UnitTag(), - }, - ) - c.Assert(err, jc.ErrorIsNil) - - batches, err := s.State.AllMetricBatches() - c.Assert(err, jc.ErrorIsNil) - c.Assert(batches, gc.HasLen, 2) - - unsent, err := s.State.CountOfUnsentMetrics() - c.Assert(err, jc.ErrorIsNil) - c.Assert(unsent, gc.Equals, 2) - - toSend, err := s.State.MetricsToSend(10) - c.Assert(err, jc.ErrorIsNil) - c.Assert(toSend, gc.HasLen, 2) - - err = m1.SetSent(time.Now().Add(-25 * time.Hour)) - c.Assert(err, jc.ErrorIsNil) - err = m2.SetSent(time.Now().Add(-25 * time.Hour)) - c.Assert(err, jc.ErrorIsNil) - - sent, err := s.State.CountOfSentMetrics() - c.Assert(err, jc.ErrorIsNil) - c.Assert(sent, gc.Equals, 2) - - err = s.State.CleanupOldMetrics() - c.Assert(err, jc.ErrorIsNil) - - batches, err = s.State.AllMetricBatches() - c.Assert(err, jc.ErrorIsNil) - c.Assert(batches, gc.HasLen, 0) -} - func (s *MetricSuite) TestAddMetricDuplicateUUID(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() mUUID := utils.MustNewUUID().String() _, err := s.State.AddMetrics( state.BatchParam{ @@ -577,7 +514,7 @@ } for _, test := range tests { c.Logf("running test: %v", test.about) - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() modelUUID := s.State.ModelUUID() m := state.Metric{"juju-units", test.value, now} metricBatch, err := s.State.AddMetrics( @@ -619,8 +556,8 @@ } } -func (s *MetricSuite) TestUnitMetricBatchesReturnsJustLocal(c *gc.C) { - now := state.NowToTheSecond() +func (s *MetricSuite) TestUnitMetricBatchesMatchesAllCharms(c *gc.C) { + now := s.State.NowToTheSecond() m := state.Metric{"pings", "5", now} _, err := s.State.AddMetrics( state.BatchParam{ @@ -633,8 +570,8 @@ ) c.Assert(err, jc.ErrorIsNil) localMeteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) - service := s.Factory.MakeApplication(c, &factory.ApplicationParams{Name: "localmetered", Charm: localMeteredCharm}) - unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: service, SetCharmURL: true}) + application := s.Factory.MakeApplication(c, &factory.ApplicationParams{Name: "localmetered", Charm: localMeteredCharm}) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: application, SetCharmURL: true}) _, err = s.State.AddMetrics( state.BatchParam{ UUID: utils.MustNewUUID().String(), @@ -647,15 +584,25 @@ c.Assert(err, jc.ErrorIsNil) metricBatches, err := s.State.MetricBatchesForUnit("metered/0") - c.Assert(metricBatches, gc.HasLen, 0) + c.Assert(metricBatches, gc.HasLen, 1) metricBatches, err = s.State.MetricBatchesForUnit("localmetered/0") c.Assert(metricBatches, gc.HasLen, 1) } +func (s *MetricSuite) TestNoSuchUnitMetricBatches(c *gc.C) { + _, err := s.State.MetricBatchesForUnit("chimerical-unit/0") + c.Assert(err, gc.ErrorMatches, `unit "chimerical-unit/0" not found`) +} + +func (s *MetricSuite) TestNoSuchApplicationMetricBatches(c *gc.C) { + _, err := s.State.MetricBatchesForApplication("unicorn-app") + c.Assert(err, gc.ErrorMatches, `application "unicorn-app" not found`) +} + type MetricLocalCharmSuite struct { ConnSuite unit *state.Unit - service *state.Application + application *state.Application meteredCharm *state.Charm } @@ -663,17 +610,13 @@ func (s *MetricLocalCharmSuite) SetUpTest(c *gc.C) { s.ConnSuite.SetUpTest(c) - s.assertAddLocalUnit(c) -} - -func (s *MetricLocalCharmSuite) assertAddLocalUnit(c *gc.C) { s.meteredCharm = s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) - s.service = s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: s.meteredCharm}) - s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Application: s.service, SetCharmURL: true}) + s.application = s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: s.meteredCharm}) + s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Application: s.application, SetCharmURL: true}) } func (s *MetricLocalCharmSuite) TestUnitMetricBatches(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() m := state.Metric{"pings", "5", now} m2 := state.Metric{"pings", "10", now} _, err := s.State.AddMetrics( @@ -686,7 +629,7 @@ }, ) c.Assert(err, jc.ErrorIsNil) - newUnit, err := s.service.AddUnit() + newUnit, err := s.application.AddUnit() c.Assert(err, jc.ErrorIsNil) _, err = s.State.AddMetrics( state.BatchParam{ @@ -702,24 +645,24 @@ metricBatches, err := s.State.MetricBatchesForUnit("metered/0") c.Assert(err, jc.ErrorIsNil) c.Assert(metricBatches, gc.HasLen, 1) - c.Assert(metricBatches[0].Unit(), gc.Equals, "metered/0") - c.Assert(metricBatches[0].CharmURL(), gc.Equals, "local:quantal/metered") - c.Assert(metricBatches[0].Sent(), jc.IsFalse) + c.Check(metricBatches[0].Unit(), gc.Equals, "metered/0") + c.Check(metricBatches[0].CharmURL(), gc.Equals, "local:quantal/metered") + c.Check(metricBatches[0].Sent(), jc.IsFalse) c.Assert(metricBatches[0].Metrics(), gc.HasLen, 1) - c.Assert(metricBatches[0].Metrics()[0].Value, gc.Equals, "5") + c.Check(metricBatches[0].Metrics()[0].Value, gc.Equals, "5") metricBatches, err = s.State.MetricBatchesForUnit("metered/1") c.Assert(err, jc.ErrorIsNil) c.Assert(metricBatches, gc.HasLen, 1) - c.Assert(metricBatches[0].Unit(), gc.Equals, "metered/1") - c.Assert(metricBatches[0].CharmURL(), gc.Equals, "local:quantal/metered") - c.Assert(metricBatches[0].Sent(), jc.IsFalse) + c.Check(metricBatches[0].Unit(), gc.Equals, "metered/1") + c.Check(metricBatches[0].CharmURL(), gc.Equals, "local:quantal/metered") + c.Check(metricBatches[0].Sent(), jc.IsFalse) c.Assert(metricBatches[0].Metrics(), gc.HasLen, 1) - c.Assert(metricBatches[0].Metrics()[0].Value, gc.Equals, "10") + c.Check(metricBatches[0].Metrics()[0].Value, gc.Equals, "10") } -func (s *MetricLocalCharmSuite) TestServiceMetricBatches(c *gc.C) { - now := state.NowToTheSecond() +func (s *MetricLocalCharmSuite) TestApplicationMetricBatches(c *gc.C) { + now := s.State.NowToTheSecond() m := state.Metric{"pings", "5", now} m2 := state.Metric{"pings", "10", now} _, err := s.State.AddMetrics( @@ -732,7 +675,7 @@ }, ) c.Assert(err, jc.ErrorIsNil) - newUnit, err := s.service.AddUnit() + newUnit, err := s.application.AddUnit() c.Assert(err, jc.ErrorIsNil) _, err = s.State.AddMetrics( state.BatchParam{ @@ -745,25 +688,177 @@ ) c.Assert(err, jc.ErrorIsNil) - metricBatches, err := s.State.MetricBatchesForService("metered") + metricBatches, err := s.State.MetricBatchesForApplication("metered") c.Assert(err, jc.ErrorIsNil) c.Assert(metricBatches, gc.HasLen, 2) - c.Assert(metricBatches[0].Unit(), gc.Equals, "metered/0") - c.Assert(metricBatches[0].CharmURL(), gc.Equals, "local:quantal/metered") - c.Assert(metricBatches[0].Sent(), jc.IsFalse) + c.Check(metricBatches[0].Unit(), gc.Equals, "metered/0") + c.Check(metricBatches[0].CharmURL(), gc.Equals, "local:quantal/metered") + c.Check(metricBatches[0].Sent(), jc.IsFalse) c.Assert(metricBatches[0].Metrics(), gc.HasLen, 1) - c.Assert(metricBatches[0].Metrics()[0].Value, gc.Equals, "5") + c.Check(metricBatches[0].Metrics()[0].Value, gc.Equals, "5") - c.Assert(metricBatches[1].Unit(), gc.Equals, "metered/1") - c.Assert(metricBatches[1].CharmURL(), gc.Equals, "local:quantal/metered") - c.Assert(metricBatches[1].Sent(), jc.IsFalse) + c.Check(metricBatches[1].Unit(), gc.Equals, "metered/1") + c.Check(metricBatches[1].CharmURL(), gc.Equals, "local:quantal/metered") + c.Check(metricBatches[1].Sent(), jc.IsFalse) c.Assert(metricBatches[1].Metrics(), gc.HasLen, 1) - c.Assert(metricBatches[1].Metrics()[0].Value, gc.Equals, "10") + c.Check(metricBatches[1].Metrics()[0].Value, gc.Equals, "10") +} + +func (s *MetricLocalCharmSuite) TestModelMetricBatches(c *gc.C) { + now := s.State.NowToTheSecond() + // Add 2 metric batches to a single unit. + m := state.Metric{"pings", "5", now} + m2 := state.Metric{"pings", "10", now} + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + newUnit, err := s.application.AddUnit() + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now.Add(time.Second), + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m2}, + Unit: newUnit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + + // Create a new model and add a metric batch. + st := s.Factory.MakeModel(c, nil) + defer st.Close() + f := factory.NewFactory(st) + meteredCharm := f.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) + service := f.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + unit := f.MakeUnit(c, &factory.UnitParams{Application: service, SetCharmURL: true}) + _, err = st.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + + // We expect 2 metric batches in our first model. + metricBatches, err := s.State.MetricBatchesForModel() + c.Assert(err, jc.ErrorIsNil) + c.Assert(metricBatches, gc.HasLen, 2) + + var first, second state.MetricBatch + for _, m := range metricBatches { + if m.Unit() == "metered/0" { + first = m + } + if m.Unit() == "metered/1" { + second = m + } + } + c.Assert(first, gc.NotNil) + c.Assert(second, gc.NotNil) + + c.Check(first.Unit(), gc.Equals, "metered/0") + c.Check(first.CharmURL(), gc.Equals, "local:quantal/metered") + c.Check(first.ModelUUID(), gc.Equals, s.State.ModelUUID()) + c.Check(first.Sent(), jc.IsFalse) + c.Assert(first.Metrics(), gc.HasLen, 1) + c.Check(first.Metrics()[0].Value, gc.Equals, "5") + + c.Check(second.Unit(), gc.Equals, "metered/1") + c.Check(second.CharmURL(), gc.Equals, "local:quantal/metered") + c.Check(second.ModelUUID(), gc.Equals, s.State.ModelUUID()) + c.Check(second.Sent(), jc.IsFalse) + c.Assert(second.Metrics(), gc.HasLen, 1) + c.Check(second.Metrics()[0].Value, gc.Equals, "10") + + // And a single metric batch in the second model. + metricBatches, err = st.MetricBatchesForModel() + c.Assert(err, jc.ErrorIsNil) + c.Assert(metricBatches, gc.HasLen, 1) +} + +func (s *MetricLocalCharmSuite) TestMetricsSorted(c *gc.C) { + newUnit, err := s.application.AddUnit() + c.Assert(err, jc.ErrorIsNil) + + t0 := time.Date(2016, time.August, 16, 16, 00, 35, 0, time.Local) + var times []time.Time + for i := 0; i < 3; i++ { + times = append(times, t0.Add(time.Minute*time.Duration(i))) + } + + for _, t := range times { + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: t, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{{"pings", "5", t}}, + Unit: s.unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + + _, err = s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: t, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{{"pings", "10", t}}, + Unit: newUnit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + } + + metricBatches, err := s.State.MetricBatchesForUnit("metered/0") + c.Assert(err, jc.ErrorIsNil) + assertMetricBatchesTimeAscending(c, metricBatches) + + metricBatches, err = s.State.MetricBatchesForUnit("metered/1") + c.Assert(err, jc.ErrorIsNil) + assertMetricBatchesTimeAscending(c, metricBatches) + + metricBatches, err = s.State.MetricBatchesForApplication("metered") + c.Assert(err, jc.ErrorIsNil) + assertMetricBatchesTimeAscending(c, metricBatches) + + metricBatches, err = s.State.MetricBatchesForModel() + c.Assert(err, jc.ErrorIsNil) + assertMetricBatchesTimeAscending(c, metricBatches) + +} + +func assertMetricBatchesTimeAscending(c *gc.C, batches []state.MetricBatch) { + var tPrev time.Time + + for i := range batches { + if i > 0 { + afterOrEqualPrev := func(t time.Time) bool { + return t.After(tPrev) || t.Equal(tPrev) + } + desc := gc.Commentf("%+v <= %+v", batches[i-1], batches[i]) + c.Assert(batches[i].Created(), jc.Satisfies, afterOrEqualPrev, desc) + c.Assert(batches[i].Metrics(), gc.HasLen, 1) + c.Assert(batches[i].Metrics()[0].Time, jc.Satisfies, afterOrEqualPrev, desc) + } + tPrev = batches[i].Created() + } } -func (s *MetricLocalCharmSuite) TestUnitMetricBatchesReturnsJustLocal(c *gc.C) { - now := state.NowToTheSecond() +func (s *MetricLocalCharmSuite) TestUnitMetricBatchesReturnsAllCharms(c *gc.C) { + now := s.State.NowToTheSecond() m := state.Metric{"pings", "5", now} _, err := s.State.AddMetrics( state.BatchParam{ @@ -776,8 +871,8 @@ ) c.Assert(err, jc.ErrorIsNil) csMeteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) - service := s.Factory.MakeApplication(c, &factory.ApplicationParams{Name: "csmetered", Charm: csMeteredCharm}) - unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: service, SetCharmURL: true}) + application := s.Factory.MakeApplication(c, &factory.ApplicationParams{Name: "csmetered", Charm: csMeteredCharm}) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: application, SetCharmURL: true}) _, err = s.State.AddMetrics( state.BatchParam{ UUID: utils.MustNewUUID().String(), @@ -792,5 +887,143 @@ metricBatches, err := s.State.MetricBatchesForUnit("metered/0") c.Assert(metricBatches, gc.HasLen, 1) metricBatches, err = s.State.MetricBatchesForUnit("csmetered/0") - c.Assert(metricBatches, gc.HasLen, 0) + c.Assert(metricBatches, gc.HasLen, 1) +} + +func (s *MetricLocalCharmSuite) TestUnique(c *gc.C) { + t0 := s.State.NowToTheSecond() + t1 := t0.Add(time.Second) + batch, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: t0, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{{ + Key: "pings", + Value: "1", + Time: t0, + }, { + Key: "pings", + Value: "2", + Time: t1, + }, { + Key: "juju-units", + Value: "1", + Time: t1, + }, { + Key: "juju-units", + Value: "2", + Time: t0, + }}, + Unit: s.unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + metrics := batch.UniqueMetrics() + c.Assert(metrics, gc.HasLen, 2) + c.Assert(metrics, jc.SameContents, []state.Metric{{ + Key: "pings", + Value: "2", + Time: t1, + }, { + Key: "juju-units", + Value: "1", + Time: t1, + }}) +} + +type modelData struct { + state *state.State + application *state.Application + unit *state.Unit + meteredCharm *state.Charm +} + +type CrossModelMetricSuite struct { + ConnSuite + models []modelData +} + +var _ = gc.Suite(&CrossModelMetricSuite{}) + +func (s *CrossModelMetricSuite) SetUpTest(c *gc.C) { + s.ConnSuite.SetUpTest(c) + // Set up two models. + s.models = make([]modelData, 2) + var cleanup func(*gc.C) + for i := 0; i < 2; i++ { + s.models[i], cleanup = mustCreateMeteredModel(c, s.Factory) + s.AddCleanup(cleanup) + } +} + +func mustCreateMeteredModel(c *gc.C, stateFactory *factory.Factory) (modelData, func(*gc.C)) { + st := stateFactory.MakeModel(c, nil) + localFactory := factory.NewFactory(st) + + meteredCharm := localFactory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) + application := localFactory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm}) + unit := localFactory.MakeUnit(c, &factory.UnitParams{Application: application, SetCharmURL: true}) + cleanup := func(*gc.C) { st.Close() } + return modelData{ + state: st, + application: application, + unit: unit, + meteredCharm: meteredCharm, + }, cleanup +} + +func (s *CrossModelMetricSuite) TestMetricsAcrossEnvironments(c *gc.C) { + now := s.State.NowToTheSecond().Add(-48 * time.Hour) + m := state.Metric{"pings", "5", now} + m1, err := s.models[0].state.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.models[0].meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.models[0].unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + + m2, err := s.models[1].state.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.models[1].meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.models[1].unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + + batches, err := s.State.AllMetricBatches() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 2) + + unsent, err := s.models[0].state.CountOfUnsentMetrics() + c.Assert(err, jc.ErrorIsNil) + c.Assert(unsent, gc.Equals, 1) + + toSend, err := s.models[0].state.MetricsToSend(10) + c.Assert(err, jc.ErrorIsNil) + c.Assert(toSend, gc.HasLen, 1) + + err = m1.SetSent(testing.NonZeroTime().Add(-25 * time.Hour)) + c.Assert(err, jc.ErrorIsNil) + err = m2.SetSent(testing.NonZeroTime().Add(-25 * time.Hour)) + c.Assert(err, jc.ErrorIsNil) + + sent, err := s.models[0].state.CountOfSentMetrics() + c.Assert(err, jc.ErrorIsNil) + c.Assert(sent, gc.Equals, 1) + + err = s.models[0].state.CleanupOldMetrics() + c.Assert(err, jc.ErrorIsNil) + + // The metric from model s.models[1] should still be in place. + batches, err = s.State.AllMetricBatches() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 1) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/migration_export.go juju-core-2.0.0/src/github.com/juju/juju/state/migration_export.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/migration_export.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/migration_export.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,6 +14,8 @@ "gopkg.in/mgo.v2/bson" "github.com/juju/juju/core/description" + "github.com/juju/juju/payload" + "github.com/juju/juju/storage/poolmanager" ) // Export the current model for the State. @@ -37,6 +39,9 @@ if err := export.readAllSettings(); err != nil { return nil, errors.Trace(err) } + if err := export.readAllStorageConstraints(); err != nil { + return nil, errors.Trace(err) + } if err := export.readAllAnnotations(); err != nil { return nil, errors.Trace(err) } @@ -108,6 +113,14 @@ return nil, errors.Trace(err) } + if err := export.actions(); err != nil { + return nil, errors.Trace(err) + } + + if err := export.cloudimagemetadata(); err != nil { + return nil, errors.Trace(err) + } + if err := export.model.Validate(); err != nil { return nil, errors.Trace(err) } @@ -123,11 +136,12 @@ model description.Model logger loggo.Logger - annotations map[string]annotatorDoc - constraints map[string]bson.M - modelSettings map[string]settingsDoc - status map[string]bson.M - statusHistory map[string][]historicalStatusDoc + annotations map[string]annotatorDoc + constraints map[string]bson.M + modelSettings map[string]settingsDoc + modelStorageConstraints map[string]storageConstraintsDoc + status map[string]bson.M + statusHistory map[string][]historicalStatusDoc // Map of application name to units. Populated as part // of the applications export. units map[string][]*Unit @@ -184,7 +198,7 @@ CreatedBy: user.CreatedBy, DateCreated: user.DateCreated, LastConnection: lastConn, - Access: user.Access, + Access: string(user.Access), } e.model.AddUser(arg) } @@ -448,22 +462,22 @@ } e.logger.Debugf("found %d applications", len(applications)) - refcounts, err := e.readAllSettingsRefCounts() + e.units, err = e.readAllUnits() if err != nil { return errors.Trace(err) } - e.units, err = e.readAllUnits() + meterStatus, err := e.readAllMeterStatus() if err != nil { return errors.Trace(err) } - meterStatus, err := e.readAllMeterStatus() + leaders, err := e.st.ApplicationLeaders() if err != nil { return errors.Trace(err) } - leaders, err := e.readApplicationLeaders() + payloads, err := e.readAllPayloads() if err != nil { return errors.Trace(err) } @@ -471,41 +485,84 @@ for _, application := range applications { applicationUnits := e.units[application.Name()] leader := leaders[application.Name()] - if err := e.addApplication(application, refcounts, applicationUnits, meterStatus, leader); err != nil { + if err := e.addApplication(addApplicationContext{ + application: application, + units: applicationUnits, + meterStatus: meterStatus, + leader: leader, + payloads: payloads, + }); err != nil { return errors.Trace(err) } } return nil } -func (e *exporter) readApplicationLeaders() (map[string]string, error) { - client, err := e.st.getLeadershipLeaseClient() +func (e *exporter) readAllStorageConstraints() error { + coll, closer := e.st.getCollection(storageConstraintsC) + defer closer() + + storageConstraints := make(map[string]storageConstraintsDoc) + var doc storageConstraintsDoc + iter := coll.Find(nil).Iter() + defer iter.Close() + for iter.Next(&doc) { + storageConstraints[e.st.localID(doc.DocID)] = doc + } + if err := iter.Err(); err != nil { + return errors.Annotate(err, "failed to read storage constraints") + } + e.logger.Debugf("read %d storage constraint documents", len(storageConstraints)) + e.modelStorageConstraints = storageConstraints + return nil +} + +func (e *exporter) storageConstraints(doc storageConstraintsDoc) map[string]description.StorageConstraintArgs { + result := make(map[string]description.StorageConstraintArgs) + for key, value := range doc.Constraints { + result[key] = description.StorageConstraintArgs{ + Pool: value.Pool, + Size: value.Size, + Count: value.Count, + } + } + return result +} + +func (e *exporter) readAllPayloads() (map[string][]payload.FullPayloadInfo, error) { + result := make(map[string][]payload.FullPayloadInfo) + all, err := ModelPayloads{db: e.st.database}.ListAll() if err != nil { return nil, errors.Trace(err) } - leases := client.Leases() - result := make(map[string]string, len(leases)) - for key, value := range leases { - result[key] = value.Holder + for _, payload := range all { + result[payload.Unit] = append(result[payload.Unit], payload) } return result, nil } -func (e *exporter) addApplication(application *Application, refcounts map[string]int, units []*Unit, meterStatus map[string]*meterStatusDoc, leader string) error { +type addApplicationContext struct { + application *Application + units []*Unit + meterStatus map[string]*meterStatusDoc + leader string + payloads map[string][]payload.FullPayloadInfo +} + +func (e *exporter) addApplication(ctx addApplicationContext) error { + application := ctx.application + appName := application.Name() settingsKey := application.settingsKey() - leadershipKey := leadershipSettingsKey(application.Name()) + leadershipKey := leadershipSettingsKey(appName) + storageConstraintsKey := application.storageConstraintsKey() applicationSettingsDoc, found := e.modelSettings[settingsKey] if !found { - return errors.Errorf("missing settings for application %q", application.Name()) - } - refCount, found := refcounts[settingsKey] - if !found { - return errors.Errorf("missing settings refcount for application %q", application.Name()) + return errors.Errorf("missing settings for application %q", appName) } leadershipSettingsDoc, found := e.modelSettings[leadershipKey] if !found { - return errors.Errorf("missing leadership settings for application %q", application.Name()) + return errors.Errorf("missing leadership settings for application %q", appName) } args := description.ApplicationArgs{ @@ -519,17 +576,19 @@ Exposed: application.doc.Exposed, MinUnits: application.doc.MinUnits, Settings: applicationSettingsDoc.Settings, - SettingsRefCount: refCount, - Leader: leader, + Leader: ctx.leader, LeadershipSettings: leadershipSettingsDoc.Settings, MetricsCredentials: application.doc.MetricCredentials, } + if constraints, found := e.modelStorageConstraints[storageConstraintsKey]; found { + args.StorageConstraints = e.storageConstraints(constraints) + } exApplication := e.model.AddApplication(args) // Find the current application status. globalKey := application.globalKey() statusArgs, err := e.statusArgs(globalKey) if err != nil { - return errors.Annotatef(err, "status for application %s", application.Name()) + return errors.Annotatef(err, "status for application %s", appName) } exApplication.SetStatus(statusArgs) exApplication.SetStatusHistory(e.statusHistoryArgs(globalKey)) @@ -541,9 +600,9 @@ } exApplication.SetConstraints(constraintsArgs) - for _, unit := range units { + for _, unit := range ctx.units { agentKey := unit.globalAgentKey() - unitMeterStatus, found := meterStatus[agentKey] + unitMeterStatus, found := ctx.meterStatus[agentKey] if !found { return errors.Errorf("missing meter status for unit %s", unit.Name()) } @@ -569,6 +628,11 @@ } } exUnit := exApplication.AddUnit(args) + + if err := e.setUnitPayloads(exUnit, ctx.payloads[unit.UnitTag().Id()]); err != nil { + return errors.Trace(err) + } + // workload uses globalKey, agent uses globalAgentKey, // workload version uses globalWorkloadVersionKey. globalKey := unit.globalKey() @@ -612,6 +676,25 @@ return nil } +func (e *exporter) setUnitPayloads(exUnit description.Unit, payloads []payload.FullPayloadInfo) error { + unitID := exUnit.Tag().Id() + machineID := exUnit.Machine().Id() + for _, payload := range payloads { + if payload.Machine != machineID { + return errors.NotValidf("payload for unit %q reports wrong machine %q (should be %q)", unitID, payload.Machine, machineID) + } + args := description.PayloadArgs{ + Name: payload.Name, + Type: payload.Type, + RawID: payload.ID, + State: payload.Status, + Labels: payload.Labels, + } + exUnit.AddPayload(args) + } + return nil +} + func (e *exporter) relations() error { rels, err := e.st.AllRelations() if err != nil { @@ -765,6 +848,55 @@ return nil } +func (e *exporter) cloudimagemetadata() error { + cloudimagemetadata, err := e.st.CloudImageMetadataStorage.AllCloudImageMetadata() + if err != nil { + return errors.Trace(err) + } + e.logger.Debugf("read %d cloudimagemetadata", len(cloudimagemetadata)) + for _, metadata := range cloudimagemetadata { + e.model.AddCloudImageMetadata(description.CloudImageMetadataArgs{ + Stream: metadata.Stream, + Region: metadata.Region, + Version: metadata.Version, + Series: metadata.Series, + Arch: metadata.Arch, + VirtType: metadata.VirtType, + RootStorageType: metadata.RootStorageType, + RootStorageSize: metadata.RootStorageSize, + DateCreated: metadata.DateCreated, + Source: metadata.Source, + Priority: metadata.Priority, + ImageId: metadata.ImageId, + }) + } + return nil +} + +func (e *exporter) actions() error { + actions, err := e.st.AllActions() + if err != nil { + return errors.Trace(err) + } + e.logger.Debugf("read %d actions", len(actions)) + for _, action := range actions { + results, message := action.Results() + e.model.AddAction(description.ActionArgs{ + Receiver: action.Receiver(), + Name: action.Name(), + Parameters: action.Parameters(), + Enqueued: action.Enqueued(), + Started: action.Started(), + Completed: action.Completed(), + Status: string(action.Status()), + Results: results, + Message: message, + Id: action.Id(), + }) + } + return nil +} + func (e *exporter) readAllRelationScopes() (set.Strings, error) { relationScopes, closer := e.st.getCollection(relationScopesC) defer closer() @@ -1068,34 +1200,6 @@ return result, nil } -func (e *exporter) readAllSettingsRefCounts() (map[string]int, error) { - refCounts, closer := e.st.getCollection(settingsrefsC) - defer closer() - - var docs []bson.M - err := refCounts.Find(nil).All(&docs) - if err != nil { - return nil, errors.Annotate(err, "failed to read settings refcount collection") - } - - e.logger.Debugf("read %d settings refcount documents", len(docs)) - result := make(map[string]int) - for _, doc := range docs { - docId, ok := doc["_id"].(string) - if !ok { - return nil, errors.Errorf("expected string, got %s (%T)", doc["_id"], doc["_id"]) - } - id := e.st.localID(docId) - count, ok := doc["refcount"].(int) - if !ok { - return nil, errors.Errorf("expected int, got %s (%T)", doc["refcount"], doc["refcount"]) - } - result[id] = count - } - - return result, nil -} - func (e *exporter) logExtras() { // As annotations are saved into the model, they are removed from the // exporter's map. If there are any left at the end, we are missing @@ -1114,6 +1218,12 @@ if err := e.filesystems(); err != nil { return errors.Trace(err) } + if err := e.storageInstances(); err != nil { + return errors.Trace(err) + } + if err := e.storagePools(); err != nil { + return errors.Trace(err) + } return nil } @@ -1145,7 +1255,15 @@ args := description.VolumeArgs{ Tag: vol.VolumeTag(), Binding: vol.LifeBinding(), - // TODO: add storage link + } + if tag, err := vol.StorageInstance(); err == nil { + // only returns an error when no storage tag. + args.Storage = tag + } else { + if !errors.IsNotAssigned(err) { + // This is an unexpected error. + return errors.Trace(err) + } } logger.Debugf("addVolume: %#v", vol.doc) if info, err := vol.Info(); err == nil { @@ -1322,3 +1440,95 @@ e.logger.Debugf("read %d filesystem attachment documents", count) return result, nil } + +func (e *exporter) storageInstances() error { + coll, closer := e.st.getCollection(storageInstancesC) + defer closer() + + attachments, err := e.readStorageAttachments() + if err != nil { + return errors.Trace(err) + } + + var doc storageInstanceDoc + iter := coll.Find(nil).Sort("_id").Iter() + defer iter.Close() + for iter.Next(&doc) { + instance := &storageInstance{e.st, doc} + if err := e.addStorage(instance, attachments[doc.Id]); err != nil { + return errors.Trace(err) + } + } + if err := iter.Err(); err != nil { + return errors.Annotate(err, "failed to read storage instances") + } + return nil +} + +func (e *exporter) addStorage(instance *storageInstance, attachments []names.UnitTag) error { + args := description.StorageArgs{ + Tag: instance.StorageTag(), + Kind: instance.Kind().String(), + Owner: instance.Owner(), + Name: instance.StorageName(), + Attachments: attachments, + } + e.model.AddStorage(args) + return nil +} + +func (e *exporter) readStorageAttachments() (map[string][]names.UnitTag, error) { + coll, closer := e.st.getCollection(storageAttachmentsC) + defer closer() + + result := make(map[string][]names.UnitTag) + var doc storageAttachmentDoc + var count int + iter := coll.Find(nil).Iter() + defer iter.Close() + for iter.Next(&doc) { + unit := names.NewUnitTag(doc.Unit) + result[doc.StorageInstance] = append(result[doc.StorageInstance], unit) + count++ + } + if err := iter.Err(); err != nil { + return nil, errors.Annotate(err, "failed to read storage attachments") + } + e.logger.Debugf("read %d storage attachment documents", count) + return result, nil +} + +func (e *exporter) storagePools() error { + registry, err := e.st.storageProviderRegistry() + if err != nil { + return errors.Annotate(err, "getting provider registry") + } + pm := poolmanager.New(storagePoolSettingsManager{e: e}, registry) + poolConfigs, err := pm.List() + if err != nil { + return errors.Annotate(err, "listing pools") + } + for _, cfg := range poolConfigs { + e.model.AddStoragePool(description.StoragePoolArgs{ + Name: cfg.Name(), + Provider: string(cfg.Provider()), + Attributes: cfg.Attrs(), + }) + } + return nil +} + +type storagePoolSettingsManager struct { + poolmanager.SettingsManager + e *exporter +} + +func (m storagePoolSettingsManager) ListSettings(keyPrefix string) (map[string]map[string]interface{}, error) { + result := make(map[string]map[string]interface{}) + for key, doc := range m.e.modelSettings { + if strings.HasPrefix(key, keyPrefix) { + result[key] = doc.Settings + } + } + return result, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/migration_export_test.go juju-core-2.0.0/src/github.com/juju/juju/state/migration_export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/migration_export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/migration_export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,13 +10,20 @@ jc "github.com/juju/testing/checkers" "github.com/juju/version" gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/names.v2" "github.com/juju/juju/constraints" "github.com/juju/juju/core/description" "github.com/juju/juju/network" + "github.com/juju/juju/payload" + "github.com/juju/juju/permission" + "github.com/juju/juju/provider/dummy" "github.com/juju/juju/state" + "github.com/juju/juju/state/cloudimagemetadata" "github.com/juju/juju/status" + "github.com/juju/juju/storage/poolmanager" + "github.com/juju/juju/storage/provider" "github.com/juju/juju/testing/factory" ) @@ -34,18 +41,18 @@ "another": "one", } -type MigrationSuite struct { - ConnSuite +type MigrationBaseSuite struct { + ConnWithWallClockSuite } -func (s *MigrationSuite) setLatestTools(c *gc.C, latestTools version.Number) { +func (s *MigrationBaseSuite) setLatestTools(c *gc.C, latestTools version.Number) { dbModel, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) err = dbModel.UpdateLatestToolsVersion(latestTools) c.Assert(err, jc.ErrorIsNil) } -func (s *MigrationSuite) setRandSequenceValue(c *gc.C, name string) int { +func (s *MigrationBaseSuite) setRandSequenceValue(c *gc.C, name string) int { var value int var err error count := rand.Intn(5) + 1 @@ -57,13 +64,13 @@ return value + 1 } -func (s *MigrationSuite) primeStatusHistory(c *gc.C, entity statusSetter, statusVal status.Status, count int) { +func (s *MigrationBaseSuite) primeStatusHistory(c *gc.C, entity statusSetter, statusVal status.Status, count int) { primeStatusHistory(c, entity, statusVal, count, func(i int) map[string]interface{} { return map[string]interface{}{"index": count - i} }, 0) } -func (s *MigrationSuite) makeApplicationWithLeader(c *gc.C, applicationname string, count int, leader int) { +func (s *MigrationBaseSuite) makeApplicationWithLeader(c *gc.C, applicationname string, count int, leader int) { c.Assert(leader < count, jc.IsTrue) units := make([]*state.Unit, count) application := s.Factory.MakeApplication(c, &factory.ApplicationParams{ @@ -84,8 +91,37 @@ c.Assert(err, jc.ErrorIsNil) } +func (s *MigrationBaseSuite) makeUnitWithStorage(c *gc.C) (*state.Application, *state.Unit, names.StorageTag) { + pool := "loop-pool" + kind := "block" + // Create a default pool for block devices. + pm := poolmanager.New(state.NewStateSettings(s.State), dummy.StorageProviders()) + _, err := pm.Create(pool, provider.LoopProviderType, map[string]interface{}{}) + c.Assert(err, jc.ErrorIsNil) + + // There are test charms called "storage-block" and + // "storage-filesystem" which are what you'd expect. + ch := s.AddTestingCharm(c, "storage-"+kind) + storage := map[string]state.StorageConstraints{ + "data": makeStorageCons(pool, 1024, 1), + } + service := s.AddTestingServiceWithStorage(c, "storage-"+kind, ch, storage) + unit, err := service.AddUnit() + + machine := s.Factory.MakeMachine(c, nil) + err = unit.AssignToMachine(machine) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(err, jc.ErrorIsNil) + storageTag := names.NewStorageTag("data/0") + agentVersion := version.MustParseBinary("2.0.1-quantal-and64") + err = unit.SetAgentVersion(agentVersion) + c.Assert(err, jc.ErrorIsNil) + return service, unit, storageTag +} + type MigrationExportSuite struct { - MigrationSuite + MigrationBaseSuite } var _ = gc.Suite(&MigrationExportSuite{}) @@ -146,17 +182,17 @@ func (s *MigrationExportSuite) TestModelUsers(c *gc.C) { // Make sure we have some last connection times for the admin user, // and create a few other users. - lastConnection := state.NowToTheSecond() + lastConnection := s.State.NowToTheSecond() owner, err := s.State.UserAccess(s.Owner, s.State.ModelTag()) c.Assert(err, jc.ErrorIsNil) err = state.UpdateModelUserLastConnection(s.State, owner, lastConnection) c.Assert(err, jc.ErrorIsNil) bobTag := names.NewUserTag("bob@external") - bob, err := s.State.AddModelUser(state.UserAccessSpec{ + bob, err := s.State.AddModelUser(s.State.ModelUUID(), state.UserAccessSpec{ User: bobTag, CreatedBy: s.Owner, - Access: description.ReadAccess, + Access: permission.ReadAccess, }) c.Assert(err, jc.ErrorIsNil) err = state.UpdateModelUserLastConnection(s.State, bob, lastConnection) @@ -177,14 +213,14 @@ c.Assert(exportedAdmin.CreatedBy(), gc.Equals, s.Owner) c.Assert(exportedAdmin.DateCreated(), gc.Equals, owner.DateCreated) c.Assert(exportedAdmin.LastConnection(), gc.Equals, lastConnection) - c.Assert(exportedAdmin.Access(), gc.Equals, description.AdminAccess) + c.Assert(exportedAdmin.Access(), gc.Equals, "admin") c.Assert(exportedBob.Name(), gc.Equals, bobTag) c.Assert(exportedBob.DisplayName(), gc.Equals, "") c.Assert(exportedBob.CreatedBy(), gc.Equals, s.Owner) c.Assert(exportedBob.DateCreated(), gc.Equals, bob.DateCreated) c.Assert(exportedBob.LastConnection(), gc.Equals, lastConnection) - c.Assert(exportedBob.Access(), gc.Equals, description.ReadAccess) + c.Assert(exportedBob.Access(), gc.Equals, "read") } func (s *MigrationExportSuite) TestMachines(c *gc.C) { @@ -203,7 +239,7 @@ nested := s.Factory.MakeMachineNested(c, machine1.Id(), nil) err := s.State.SetAnnotations(machine1, testAnnotations) c.Assert(err, jc.ErrorIsNil) - s.primeStatusHistory(c, machine1, status.StatusStarted, addedHistoryCount) + s.primeStatusHistory(c, machine1, status.Started, addedHistoryCount) model, err := s.State.Export() c.Assert(err, jc.ErrorIsNil) @@ -231,7 +267,7 @@ history := exported.StatusHistory() c.Assert(history, gc.HasLen, expectedHistoryCount) - s.checkStatusHistory(c, history[:addedHistoryCount], status.StatusStarted) + s.checkStatusHistory(c, history[:addedHistoryCount], status.Started) containers := exported.Containers() c.Assert(containers, gc.HasLen, 1) @@ -307,7 +343,7 @@ c.Assert(err, jc.ErrorIsNil) err = s.State.SetAnnotations(application, testAnnotations) c.Assert(err, jc.ErrorIsNil) - s.primeStatusHistory(c, application, status.StatusActive, addedHistoryCount) + s.primeStatusHistory(c, application, status.Active, addedHistoryCount) model, err := s.State.Export() c.Assert(err, jc.ErrorIsNil) @@ -324,7 +360,6 @@ c.Assert(exported.Settings(), jc.DeepEquals, map[string]interface{}{ "foo": "bar", }) - c.Assert(exported.SettingsRefCount(), gc.Equals, 1) c.Assert(exported.LeadershipSettings(), jc.DeepEquals, map[string]interface{}{ "leader": "true", }) @@ -340,7 +375,7 @@ history := exported.StatusHistory() c.Assert(history, gc.HasLen, expectedHistoryCount) - s.checkStatusHistory(c, history[:addedHistoryCount], status.StatusActive) + s.checkStatusHistory(c, history[:addedHistoryCount], status.Active) } func (s *MigrationExportSuite) TestMultipleApplications(c *gc.C) { @@ -367,8 +402,8 @@ } err = s.State.SetAnnotations(unit, testAnnotations) c.Assert(err, jc.ErrorIsNil) - s.primeStatusHistory(c, unit, status.StatusActive, addedHistoryCount) - s.primeStatusHistory(c, unit.Agent(), status.StatusIdle, addedHistoryCount) + s.primeStatusHistory(c, unit, status.Active, addedHistoryCount) + s.primeStatusHistory(c, unit.Agent(), status.Idle, addedHistoryCount) model, err := s.State.Export() c.Assert(err, jc.ErrorIsNil) @@ -396,11 +431,11 @@ workloadHistory := exported.WorkloadStatusHistory() c.Assert(workloadHistory, gc.HasLen, expectedHistoryCount) - s.checkStatusHistory(c, workloadHistory[:addedHistoryCount], status.StatusActive) + s.checkStatusHistory(c, workloadHistory[:addedHistoryCount], status.Active) agentHistory := exported.AgentStatusHistory() c.Assert(agentHistory, gc.HasLen, expectedHistoryCount) - s.checkStatusHistory(c, agentHistory[:addedHistoryCount], status.StatusIdle) + s.checkStatusHistory(c, agentHistory[:addedHistoryCount], status.Idle) versionHistory := exported.WorkloadVersionHistory() // There are extra entries at the start that we don't care about. @@ -642,6 +677,64 @@ c.Assert(key.Keys(), jc.DeepEquals, []string{"bam", "mam"}) } +func (s *MigrationExportSuite) TestCloudImageMetadatas(c *gc.C) { + storageSize := uint64(3) + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "stream", + Region: "region-test", + Version: "14.04", + Series: "trusty", + Arch: "arch", + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test", + RootStorageSize: &storageSize, + Source: "test", + } + metadata := []cloudimagemetadata.Metadata{{attrs, 2, "1", 2}} + + err := s.State.CloudImageMetadataStorage.SaveMetadata(metadata) + c.Assert(err, jc.ErrorIsNil) + + model, err := s.State.Export() + c.Assert(err, jc.ErrorIsNil) + + images := model.CloudImageMetadata() + c.Assert(images, gc.HasLen, 1) + image := images[0] + c.Check(image.Stream(), gc.Equals, "stream") + c.Check(image.Region(), gc.Equals, "region-test") + c.Check(image.Version(), gc.Equals, "14.04") + c.Check(image.Arch(), gc.Equals, "arch") + c.Check(image.VirtType(), gc.Equals, "virtType-test") + c.Check(image.RootStorageType(), gc.Equals, "rootStorageType-test") + value, ok := image.RootStorageSize() + c.Assert(ok, jc.IsTrue) + c.Assert(value, gc.Equals, uint64(3)) + c.Check(image.Source(), gc.Equals, "test") + c.Check(image.Priority(), gc.Equals, 2) + c.Check(image.ImageId(), gc.Equals, "1") + c.Check(image.DateCreated(), gc.Equals, int64(2)) +} + +func (s *MigrationExportSuite) TestActions(c *gc.C) { + machine := s.Factory.MakeMachine(c, &factory.MachineParams{ + Constraints: constraints.MustParse("arch=amd64 mem=8G"), + }) + _, err := s.State.EnqueueAction(machine.MachineTag(), "foo", nil) + c.Assert(err, jc.ErrorIsNil) + + model, err := s.State.Export() + c.Assert(err, jc.ErrorIsNil) + + actions := model.Actions() + c.Assert(actions, gc.HasLen, 1) + action := actions[0] + c.Check(action.Receiver(), gc.Equals, machine.Id()) + c.Check(action.Name(), gc.Equals, "foo") + c.Check(action.Status(), gc.Equals, "pending") + c.Check(action.Message(), gc.Equals, "") +} + type goodToken struct{} // Check implements leadership.Token @@ -804,3 +897,96 @@ status := provisioned.Status() c.Check(status.Value(), gc.Equals, "pending") } + +func (s *MigrationExportSuite) TestStorage(c *gc.C) { + _, u, storageTag := s.makeUnitWithStorage(c) + + model, err := s.State.Export() + c.Assert(err, jc.ErrorIsNil) + + apps := model.Applications() + c.Assert(apps, gc.HasLen, 1) + constraints := apps[0].StorageConstraints() + c.Assert(constraints, gc.HasLen, 2) + cons, found := constraints["data"] + c.Assert(found, jc.IsTrue) + c.Check(cons.Pool(), gc.Equals, "loop-pool") + c.Check(cons.Size(), gc.Equals, uint64(0x400)) + c.Check(cons.Count(), gc.Equals, uint64(1)) + cons, found = constraints["allecto"] + c.Assert(found, jc.IsTrue) + c.Check(cons.Pool(), gc.Equals, "loop") + c.Check(cons.Size(), gc.Equals, uint64(0x400)) + c.Check(cons.Count(), gc.Equals, uint64(0)) + + storages := model.Storages() + c.Assert(storages, gc.HasLen, 1) + + storage := storages[0] + + c.Check(storage.Tag(), gc.Equals, storageTag) + c.Check(storage.Kind(), gc.Equals, "block") + owner, err := storage.Owner() + c.Check(err, jc.ErrorIsNil) + c.Check(owner, gc.Equals, u.Tag()) + c.Check(storage.Name(), gc.Equals, "data") + c.Check(storage.Attachments(), jc.DeepEquals, []names.UnitTag{ + u.UnitTag(), + }) +} + +func (s *MigrationExportSuite) TestStoragePools(c *gc.C) { + pm := poolmanager.New(state.NewStateSettings(s.State), provider.CommonStorageProviders()) + _, err := pm.Create("test-pool", provider.LoopProviderType, map[string]interface{}{ + "value": 42, + }) + c.Assert(err, jc.ErrorIsNil) + + model, err := s.State.Export() + c.Assert(err, jc.ErrorIsNil) + + pools := model.StoragePools() + c.Assert(pools, gc.HasLen, 1) + pool := pools[0] + c.Assert(pool.Name(), gc.Equals, "test-pool") + c.Assert(pool.Provider(), gc.Equals, "loop") + c.Assert(pool.Attributes(), jc.DeepEquals, map[string]interface{}{ + "value": 42, + }) +} + +func (s *MigrationExportSuite) TestPayloads(c *gc.C) { + unit := s.Factory.MakeUnit(c, nil) + up, err := s.State.UnitPayloads(unit) + c.Assert(err, jc.ErrorIsNil) + original := payload.Payload{ + PayloadClass: charm.PayloadClass{ + Name: "something", + Type: "special", + }, + ID: "42", + Status: "running", + Labels: []string{"foo", "bar"}, + } + err = up.Track(original) + c.Assert(err, jc.ErrorIsNil) + + model, err := s.State.Export() + c.Assert(err, jc.ErrorIsNil) + + applications := model.Applications() + c.Assert(applications, gc.HasLen, 1) + + units := applications[0].Units() + c.Assert(units, gc.HasLen, 1) + + payloads := units[0].Payloads() + c.Assert(payloads, gc.HasLen, 1) + + payload := payloads[0] + c.Check(payload.Name(), gc.Equals, original.Name) + c.Check(payload.Type(), gc.Equals, original.Type) + c.Check(payload.RawID(), gc.Equals, original.ID) + c.Check(payload.State(), gc.Equals, original.Status) + c.Check(payload.Labels(), jc.DeepEquals, original.Labels) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/migration_import.go juju-core-2.0.0/src/github.com/juju/juju/state/migration_import.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/migration_import.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/migration_import.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,8 +19,12 @@ "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" "github.com/juju/juju/network" + "github.com/juju/juju/payload" + "github.com/juju/juju/permission" + "github.com/juju/juju/state/cloudimagemetadata" "github.com/juju/juju/status" "github.com/juju/juju/storage" + "github.com/juju/juju/storage/poolmanager" "github.com/juju/juju/tools" ) @@ -67,7 +71,7 @@ if err != nil { return nil, nil, errors.Trace(err) } - logger.Debugf("model created %s/%s", dbModel.Owner().Canonical(), dbModel.Name()) + logger.Debugf("model created %s/%s", dbModel.Owner().Id(), dbModel.Name()) defer func() { if err != nil { newSt.Close() @@ -95,6 +99,12 @@ if err := restore.sshHostKeys(); err != nil { return nil, nil, errors.Annotate(err, "sshHostKeys") } + if err := restore.cloudimagemetadata(); err != nil { + return nil, nil, errors.Annotate(err, "cloudimagemetadata") + } + if err := restore.actions(); err != nil { + return nil, nil, errors.Annotate(err, "actions") + } if err := restore.modelUsers(); err != nil { return nil, nil, errors.Annotate(err, "modelUsers") @@ -222,7 +232,7 @@ user.CreatedBy(), user.DisplayName(), user.DateCreated(), - user.Access())..., + permission.Access(user.Access()))..., ) } if err := i.st.runTransaction(ops); err != nil { @@ -270,7 +280,6 @@ // - adds status doc // - adds machine block devices doc - // TODO: consider filesystems and volumes mStatus := m.Status() if mStatus == nil { return errors.NotValidf("missing status") @@ -286,7 +295,7 @@ // (a card exists for the work). Fake it for now. instanceStatusDoc := statusDoc{ ModelUUID: i.st.ModelUUID(), - Status: status.StatusStarted, + Status: status.Started, } cons := i.constraints(m.Constraints()) prereqOps, machineOp := i.st.baseNewMachineOps( @@ -445,6 +454,7 @@ if err != nil { return nil, errors.Trace(err) } + machineTag := m.Tag() return &machineDoc{ DocID: i.st.docID(id), Id: id, @@ -459,7 +469,9 @@ NoVote: true, // State servers can't be migrated yet. HasVote: false, // State servers can't be migrated yet. PasswordHash: m.PasswordHash(), - Clean: true, // check this later + Clean: !i.machineHasUnits(machineTag), + Volumes: i.machineVolumes(machineTag), + Filesystems: i.machineFilesystems(machineTag), Addresses: i.makeAddresses(m.ProviderAddresses()), MachineAddresses: i.makeAddresses(m.MachineAddresses()), PreferredPrivateAddress: i.makeAddress(m.PreferredPrivateAddress()), @@ -470,6 +482,41 @@ }, nil } +func (i *importer) machineHasUnits(tag names.MachineTag) bool { + for _, app := range i.model.Applications() { + for _, unit := range app.Units() { + if unit.Machine() == tag { + return true + } + } + } + return false +} + +func (i *importer) machineVolumes(tag names.MachineTag) []string { + var result []string + for _, volume := range i.model.Volumes() { + for _, attachment := range volume.Attachments() { + if attachment.Machine() == tag { + result = append(result, volume.Tag().Id()) + } + } + } + return result +} + +func (i *importer) machineFilesystems(tag names.MachineTag) []string { + var result []string + for _, filesystem := range i.model.Filesystems() { + for _, attachment := range filesystem.Attachments() { + if attachment.Machine() == tag { + result = append(result, filesystem.Tag().Id()) + } + } + } + return result +} + func (i *importer) makeMachineJobs(jobs []string) ([]MachineJob, error) { // At time of writing, there are three valid jobs. If any jobs gets // deprecated or changed in the future, older models that specify those @@ -567,7 +614,7 @@ } func (i *importer) application(s description.Application) error { - // Import this application, then soon, its units. + // Import this application, then its units. i.logger.Debugf("importing application %s", s.Name()) // 1. construct an applicationDoc @@ -584,15 +631,17 @@ statusDoc := i.makeStatusDoc(status) // TODO: update never set malarky... maybe... - ops := addApplicationOps(i.st, addApplicationOpsArgs{ - applicationDoc: sdoc, - statusDoc: statusDoc, - constraints: i.constraints(s.Constraints()), - // storage TODO, + ops, err := addApplicationOps(i.st, addApplicationOpsArgs{ + applicationDoc: sdoc, + statusDoc: statusDoc, + constraints: i.constraints(s.Constraints()), + storage: i.storageConstraints(s.StorageConstraints()), settings: s.Settings(), - settingsRefCount: s.SettingsRefCount(), leadershipSettings: s.LeadershipSettings(), }) + if err != nil { + return errors.Trace(err) + } if err := i.st.runTransaction(ops); err != nil { return errors.Trace(err) @@ -626,6 +675,21 @@ return nil } +func (i *importer) storageConstraints(cons map[string]description.StorageConstraint) map[string]StorageConstraints { + if len(cons) == 0 { + return nil + } + result := make(map[string]StorageConstraints) + for key, value := range cons { + result[key] = StorageConstraints{ + Pool: value.Pool(), + Size: value.Size(), + Count: value.Count(), + } + } + return result +} + func (i *importer) unit(s description.Application, u description.Unit) error { i.logger.Debugf("importing unit %s", u.Name()) @@ -649,16 +713,16 @@ workloadStatusDoc := i.makeStatusDoc(workloadStatus) workloadVersion := u.WorkloadVersion() - versionStatus := status.StatusActive + versionStatus := status.Active if workloadVersion == "" { - versionStatus = status.StatusUnknown + versionStatus = status.Unknown } workloadVersionDoc := statusDoc{ Status: versionStatus, StatusInfo: workloadVersion, } - ops := addUnitOps(i.st, addUnitOpsArgs{ + ops, err := addUnitOps(i.st, addUnitOpsArgs{ unitDoc: udoc, agentStatusDoc: agentStatusDoc, workloadStatusDoc: workloadStatusDoc, @@ -668,6 +732,9 @@ Info: u.MeterStatusInfo(), }, }) + if err != nil { + return errors.Trace(err) + } // If the unit is a principal, add it to its machine. if u.Principal().Id() == "" { @@ -688,6 +755,7 @@ } if err := i.st.runTransaction(ops); err != nil { + i.logger.Debugf("failed ops: %#v", ops) return errors.Trace(err) } @@ -706,12 +774,38 @@ if err := i.importStatusHistory(unit.globalWorkloadVersionKey(), u.WorkloadVersionHistory()); err != nil { return errors.Trace(err) } + if err := i.importUnitPayloads(unit, u.Payloads()); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (i *importer) importUnitPayloads(unit *Unit, payloads []description.Payload) error { + up, err := i.st.UnitPayloads(unit) + if err != nil { + return errors.Trace(err) + } + + for _, p := range payloads { + if err := up.Track(payload.Payload{ + PayloadClass: charm.PayloadClass{ + Name: p.Name(), + Type: p.Type(), + }, + ID: p.RawID(), + Status: p.State(), + Labels: p.Labels(), + }); err != nil { + return errors.Trace(err) + } + } return nil } func (i *importer) makeApplicationDoc(s description.Application) (*applicationDoc, error) { - charmUrl, err := charm.ParseURL(s.CharmURL()) + charmURL, err := charm.ParseURL(s.CharmURL()) if err != nil { return nil, errors.Trace(err) } @@ -720,7 +814,7 @@ Name: s.Name(), Series: s.Series(), Subordinate: s.Subordinate(), - CharmURL: charmUrl, + CharmURL: charmURL, Channel: s.Channel(), CharmModifiedVersion: s.CharmModifiedVersion(), ForceCharm: s.ForceCharm(), @@ -753,7 +847,7 @@ // the charm url for each unit rather than grabbing the applications charm url. // Currently the units charm url matching the application is a precondiation // to migration. - charmUrl, err := charm.ParseURL(s.CharmURL()) + charmURL, err := charm.ParseURL(s.CharmURL()) if err != nil { return nil, errors.Trace(err) } @@ -766,20 +860,32 @@ } return &unitDoc{ - Name: u.Name(), - Application: s.Name(), - Series: s.Series(), - CharmURL: charmUrl, - Principal: u.Principal().Id(), - Subordinates: subordinates, - // StorageAttachmentCount int `bson:"storageattachmentcount"` - MachineId: u.Machine().Id(), - Tools: i.makeTools(u.Tools()), - Life: Alive, - PasswordHash: u.PasswordHash(), + Name: u.Name(), + Application: s.Name(), + Series: s.Series(), + CharmURL: charmURL, + Principal: u.Principal().Id(), + Subordinates: subordinates, + StorageAttachmentCount: i.unitStorageAttachmentCount(u.Tag()), + MachineId: u.Machine().Id(), + Tools: i.makeTools(u.Tools()), + Life: Alive, + PasswordHash: u.PasswordHash(), }, nil } +func (i *importer) unitStorageAttachmentCount(unit names.UnitTag) int { + count := 0 + for _, storage := range i.model.Storages() { + for _, tag := range storage.Attachments() { + if tag == unit { + count++ + } + } + } + return count +} + func (i *importer) relations() error { i.logger.Debugf("importing relations") for _, r := range i.model.Relations() { @@ -1067,6 +1173,87 @@ return nil } +func (i *importer) cloudimagemetadata() error { + i.logger.Debugf("importing cloudimagemetadata") + images := i.model.CloudImageMetadata() + metadatas := make([]cloudimagemetadata.Metadata, len(images)) + for index, image := range images { + metadatas[index] = cloudimagemetadata.Metadata{ + cloudimagemetadata.MetadataAttributes{ + Source: image.Source(), + Stream: image.Stream(), + Region: image.Region(), + Version: image.Version(), + Series: image.Series(), + Arch: image.Arch(), + RootStorageType: image.RootStorageType(), + VirtType: image.VirtType(), + }, + image.Priority(), + image.ImageId(), + image.DateCreated(), + } + } + err := i.st.CloudImageMetadataStorage.SaveMetadata(metadatas) + if err != nil { + i.logger.Errorf("error importing cloudimagemetadata %v: %s", images, err) + return errors.Trace(err) + } + i.logger.Debugf("importing cloudimagemetadata succeeded") + return nil +} + +func (i *importer) actions() error { + i.logger.Debugf("importing actions") + for _, action := range i.model.Actions() { + err := i.addAction(action) + if err != nil { + i.logger.Errorf("error importing action %v: %s", action, err) + return errors.Trace(err) + } + } + i.logger.Debugf("importing actions succeeded") + return nil +} + +func (i *importer) addAction(action description.Action) error { + modelUUID := i.st.ModelUUID() + newDoc := &actionDoc{ + DocId: i.st.docID(action.Id()), + ModelUUID: modelUUID, + Receiver: action.Receiver(), + Name: action.Name(), + Parameters: action.Parameters(), + Enqueued: action.Enqueued(), + Results: action.Results(), + Message: action.Message(), + Started: action.Started(), + Completed: action.Completed(), + Status: ActionStatus(action.Status()), + } + prefix := ensureActionMarker(action.Receiver()) + notificationDoc := &actionNotificationDoc{ + DocId: i.st.docID(prefix + action.Id()), + ModelUUID: modelUUID, + Receiver: action.Receiver(), + ActionID: action.Id(), + } + ops := []txn.Op{{ + C: actionsC, + Id: newDoc.DocId, + Insert: newDoc, + }, { + C: actionNotificationsC, + Id: notificationDoc.DocId, + Insert: notificationDoc, + }} + + if err := i.st.runTransaction(ops); err != nil { + return errors.Trace(err) + } + return nil +} + func (i *importer) importStatusHistory(globalKey string, history []description.Status) error { docs := make([]interface{}, len(history)) for i, statusVal := range history { @@ -1131,12 +1318,75 @@ } func (i *importer) storage() error { + if err := i.storageInstances(); err != nil { + return errors.Annotate(err, "storage instances") + } if err := i.volumes(); err != nil { return errors.Annotate(err, "volumes") } if err := i.filesystems(); err != nil { return errors.Annotate(err, "filesystems") } + if err := i.storagePools(); err != nil { + return errors.Annotate(err, "storage pools") + } + return nil +} + +func (i *importer) storageInstances() error { + i.logger.Debugf("importing storage instances") + for _, storage := range i.model.Storages() { + err := i.addStorageInstance(storage) + if err != nil { + i.logger.Errorf("error importing storage %s: %s", storage.Tag(), err) + return errors.Trace(err) + } + } + i.logger.Debugf("importing storage instances succeeded") + return nil +} + +func (i *importer) addStorageInstance(storage description.Storage) error { + kind := parseStorageKind(storage.Kind()) + if kind == StorageKindUnknown { + return errors.Errorf("storage kind %q is unknown", storage.Kind()) + } + owner, err := storage.Owner() + if err != nil { + return errors.Annotate(err, "storage owner") + } + attachments := storage.Attachments() + tag := storage.Tag() + var ops []txn.Op + for _, unit := range attachments { + ops = append(ops, createStorageAttachmentOp(tag, unit)) + } + doc := &storageInstanceDoc{ + Id: storage.Tag().Id(), + Kind: kind, + Owner: owner.String(), + StorageName: storage.Name(), + AttachmentCount: len(attachments), + } + ops = append(ops, txn.Op{ + C: storageInstancesC, + Id: tag.Id(), + Assert: txn.DocMissing, + Insert: doc, + }) + + refcounts, closer := i.st.getCollection(refcountsC) + defer closer() + storageRefcountKey := entityStorageRefcountKey(owner, storage.Name()) + incRefOp, err := nsRefcounts.CreateOrIncRefOp(refcounts, storageRefcountKey, 1) + if err != nil { + return errors.Trace(err) + } + ops = append(ops, incRefOp) + + if err := i.st.runTransaction(ops); err != nil { + return errors.Trace(err) + } return nil } @@ -1182,9 +1432,8 @@ } } doc := volumeDoc{ - Name: tag.Id(), - // TODO: add storage ID - // StorageId: ..., + Name: tag.Id(), + StorageId: volume.Storage().Id(), // Life: ..., // TODO: import life, default is Alive Binding: binding, Params: params, @@ -1333,3 +1582,19 @@ }, } } + +func (i *importer) storagePools() error { + registry, err := i.st.storageProviderRegistry() + if err != nil { + return errors.Annotate(err, "getting provider registry") + } + pm := poolmanager.New(NewStateSettings(i.st), registry) + + for _, pool := range i.model.StoragePools() { + _, err := pm.Create(pool.Name(), storage.ProviderType(pool.Provider()), pool.Attributes()) + if err != nil { + return errors.Annotatef(err, "creating pool %q", pool.Name()) + } + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/migration_import_test.go juju-core-2.0.0/src/github.com/juju/juju/state/migration_import_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/migration_import_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/migration_import_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,25 +5,32 @@ import ( "fmt" - "time" + "time" // only uses time.Time values "github.com/juju/errors" jc "github.com/juju/testing/checkers" "github.com/juju/utils" "github.com/juju/version" gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/names.v2" "github.com/juju/juju/constraints" "github.com/juju/juju/core/description" "github.com/juju/juju/network" + "github.com/juju/juju/payload" + "github.com/juju/juju/permission" "github.com/juju/juju/state" + "github.com/juju/juju/state/cloudimagemetadata" "github.com/juju/juju/status" + "github.com/juju/juju/storage/poolmanager" + "github.com/juju/juju/storage/provider" + coretesting "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" ) type MigrationImportSuite struct { - MigrationSuite + MigrationBaseSuite } var _ = gc.Suite(&MigrationImportSuite{}) @@ -138,12 +145,12 @@ c.Assert(blocks[0].Message(), gc.Equals, "locked down") } -func (s *MigrationImportSuite) newModelUser(c *gc.C, name string, readOnly bool, lastConnection time.Time) description.UserAccess { - access := description.AdminAccess +func (s *MigrationImportSuite) newModelUser(c *gc.C, name string, readOnly bool, lastConnection time.Time) permission.UserAccess { + access := permission.AdminAccess if readOnly { - access = description.ReadAccess + access = permission.ReadAccess } - user, err := s.State.AddModelUser(state.UserAccessSpec{ + user, err := s.State.AddModelUser(s.State.ModelUUID(), state.UserAccessSpec{ User: names.NewUserTag(name), CreatedBy: s.Owner, Access: access, @@ -156,7 +163,7 @@ return user } -func (s *MigrationImportSuite) AssertUserEqual(c *gc.C, newUser, oldUser description.UserAccess) { +func (s *MigrationImportSuite) AssertUserEqual(c *gc.C, newUser, oldUser permission.UserAccess) { c.Assert(newUser.UserName, gc.Equals, oldUser.UserName) c.Assert(newUser.DisplayName, gc.Equals, oldUser.DisplayName) c.Assert(newUser.CreatedBy, gc.Equals, oldUser.CreatedBy) @@ -182,16 +189,16 @@ err := s.State.RemoveUserAccess(s.Owner, s.modelTag) c.Assert(err, jc.ErrorIsNil) - lastConnection := state.NowToTheSecond() + lastConnection := s.State.NowToTheSecond() bravo := s.newModelUser(c, "bravo@external", false, lastConnection) charlie := s.newModelUser(c, "charlie@external", true, lastConnection) - delta := s.newModelUser(c, "delta@external", true, time.Time{}) + delta := s.newModelUser(c, "delta@external", true, coretesting.ZeroTime()) newModel, newSt := s.importModel(c) // Check the import values of the users. - for _, user := range []description.UserAccess{bravo, charlie, delta} { + for _, user := range []permission.UserAccess{bravo, charlie, delta} { newUser, err := newSt.UserAccess(user.UserTag, newModel.Tag()) c.Assert(err, jc.ErrorIsNil) s.AssertUserEqual(c, newUser, user) @@ -230,7 +237,7 @@ }) err := s.State.SetAnnotations(machine1, testAnnotations) c.Assert(err, jc.ErrorIsNil) - s.primeStatusHistory(c, machine1, status.StatusStarted, 5) + s.primeStatusHistory(c, machine1, status.Started, 5) // machine1 should have some instance data. hardware, err := machine1.HardwareCharacteristics() @@ -323,7 +330,7 @@ c.Assert(application.SetExposed(), jc.ErrorIsNil) err = s.State.SetAnnotations(application, testAnnotations) c.Assert(err, jc.ErrorIsNil) - s.primeStatusHistory(c, application, status.StatusActive, 5) + s.primeStatusHistory(c, application, status.Active, 5) allApplications, err := s.State.AllApplications() c.Assert(err, jc.ErrorIsNil) @@ -400,8 +407,8 @@ c.Assert(err, jc.ErrorIsNil) err = s.State.SetAnnotations(exported, testAnnotations) c.Assert(err, jc.ErrorIsNil) - s.primeStatusHistory(c, exported, status.StatusActive, 5) - s.primeStatusHistory(c, exported.Agent(), status.StatusIdle, 5) + s.primeStatusHistory(c, exported, status.Active, 5) + s.primeStatusHistory(c, exported.Agent(), status.Idle, 5) _, newSt := s.importModel(c) @@ -700,6 +707,66 @@ c.Assert(keys, jc.DeepEquals, state.SSHHostKeys{"bam", "mam"}) } +func (s *MigrationImportSuite) TestCloudImageMetadata(c *gc.C) { + storageSize := uint64(3) + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "stream", + Region: "region-test", + Version: "14.04", + Series: "trusty", + Arch: "arch", + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test", + RootStorageSize: &storageSize, + Source: "test", + } + metadata := []cloudimagemetadata.Metadata{{attrs, 2, "1", 2}} + + err := s.State.CloudImageMetadataStorage.SaveMetadata(metadata) + c.Assert(err, jc.ErrorIsNil) + + _, newSt := s.importModel(c) + defer func() { + c.Assert(newSt.Close(), jc.ErrorIsNil) + }() + + images, err := s.State.CloudImageMetadataStorage.AllCloudImageMetadata() + c.Assert(err, jc.ErrorIsNil) + c.Assert(images, gc.HasLen, 1) + image := images[0] + c.Check(image.Stream, gc.Equals, "stream") + c.Check(image.Region, gc.Equals, "region-test") + c.Check(image.Version, gc.Equals, "14.04") + c.Check(image.Arch, gc.Equals, "arch") + c.Check(image.VirtType, gc.Equals, "virtType-test") + c.Check(image.RootStorageType, gc.Equals, "rootStorageType-test") + c.Check(*image.RootStorageSize, gc.Equals, uint64(3)) + c.Check(image.Source, gc.Equals, "test") + c.Check(image.Priority, gc.Equals, 2) + c.Check(image.ImageId, gc.Equals, "1") + c.Check(image.DateCreated, gc.Equals, int64(2)) +} + +func (s *MigrationImportSuite) TestAction(c *gc.C) { + machine := s.Factory.MakeMachine(c, &factory.MachineParams{ + Constraints: constraints.MustParse("arch=amd64 mem=8G"), + }) + _, err := s.State.EnqueueAction(machine.MachineTag(), "foo", nil) + c.Assert(err, jc.ErrorIsNil) + + _, newSt := s.importModel(c) + defer func() { + c.Assert(newSt.Close(), jc.ErrorIsNil) + }() + + actions, _ := newSt.AllActions() + c.Assert(actions, gc.HasLen, 1) + action := actions[0] + c.Check(action.Receiver(), gc.Equals, machine.Id()) + c.Check(action.Name(), gc.Equals, "foo") + c.Check(action.Status(), gc.Equals, state.ActionPending) +} + func (s *MigrationImportSuite) TestVolumes(c *gc.C) { machine := s.Factory.MakeMachine(c, &factory.MachineParams{ Volumes: []state.MachineVolumeParams{{ @@ -830,6 +897,109 @@ c.Check(attParams.ReadOnly, jc.IsTrue) } +func (s *MigrationImportSuite) TestStorage(c *gc.C) { + app, u, storageTag := s.makeUnitWithStorage(c) + original, err := s.State.StorageInstance(storageTag) + c.Assert(err, jc.ErrorIsNil) + originalCount := state.StorageAttachmentCount(original) + c.Assert(originalCount, gc.Equals, 1) + originalAttachments, err := s.State.StorageAttachments(storageTag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(originalAttachments, gc.HasLen, 1) + c.Assert(originalAttachments[0].Unit(), gc.Equals, u.UnitTag()) + appName := app.Name() + + _, newSt := s.importModel(c) + + app, err = newSt.Application(appName) + c.Assert(err, jc.ErrorIsNil) + cons, err := app.StorageConstraints() + c.Assert(err, jc.ErrorIsNil) + c.Check(cons, jc.DeepEquals, map[string]state.StorageConstraints{ + "data": {Pool: "loop-pool", Size: 0x400, Count: 1}, + "allecto": {Pool: "loop", Size: 0x400}, + }) + + instance, err := newSt.StorageInstance(storageTag) + c.Assert(err, jc.ErrorIsNil) + + c.Check(instance.Tag(), gc.Equals, original.Tag()) + c.Check(instance.Kind(), gc.Equals, original.Kind()) + c.Check(instance.Life(), gc.Equals, original.Life()) + c.Check(instance.StorageName(), gc.Equals, original.StorageName()) + c.Check(state.StorageAttachmentCount(instance), gc.Equals, originalCount) + + attachments, err := newSt.StorageAttachments(storageTag) + + c.Assert(attachments, gc.HasLen, 1) + c.Assert(attachments[0].Unit(), gc.Equals, u.UnitTag()) +} + +func (s *MigrationImportSuite) TestStoragePools(c *gc.C) { + pm := poolmanager.New(state.NewStateSettings(s.State), provider.CommonStorageProviders()) + _, err := pm.Create("test-pool", provider.LoopProviderType, map[string]interface{}{ + "value": 42, + }) + c.Assert(err, jc.ErrorIsNil) + + _, newSt := s.importModel(c) + + pm = poolmanager.New(state.NewStateSettings(newSt), provider.CommonStorageProviders()) + pools, err := pm.List() + c.Assert(err, jc.ErrorIsNil) + c.Assert(pools, gc.HasLen, 1) + + pool := pools[0] + c.Assert(pool.Name(), gc.Equals, "test-pool") + c.Assert(pool.Provider(), gc.Equals, provider.LoopProviderType) + c.Assert(pool.Attrs(), jc.DeepEquals, map[string]interface{}{ + "value": 42, + }) +} + +func (s *MigrationImportSuite) TestPayloads(c *gc.C) { + originalUnit := s.Factory.MakeUnit(c, nil) + unitID := originalUnit.UnitTag().Id() + up, err := s.State.UnitPayloads(originalUnit) + c.Assert(err, jc.ErrorIsNil) + original := payload.Payload{ + PayloadClass: charm.PayloadClass{ + Name: "something", + Type: "special", + }, + ID: "42", + Status: "running", + Labels: []string{"foo", "bar"}, + } + err = up.Track(original) + c.Assert(err, jc.ErrorIsNil) + + _, newSt := s.importModel(c) + + unit, err := newSt.Unit(unitID) + c.Assert(err, jc.ErrorIsNil) + + up, err = newSt.UnitPayloads(unit) + c.Assert(err, jc.ErrorIsNil) + + result, err := up.List() + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.HasLen, 1) + c.Assert(result[0].Payload, gc.NotNil) + + payload := result[0].Payload + + machineID, err := unit.AssignedMachineId() + c.Check(err, jc.ErrorIsNil) + c.Check(payload.Name, gc.Equals, original.Name) + c.Check(payload.Type, gc.Equals, original.Type) + c.Check(payload.ID, gc.Equals, original.ID) + c.Check(payload.Status, gc.Equals, original.Status) + c.Check(payload.Labels, jc.DeepEquals, original.Labels) + c.Check(payload.Unit, gc.Equals, unitID) + c.Check(payload.Machine, gc.Equals, machineID) +} + // newModel replaces the uuid and name of the config attributes so we // can use all the other data to validate imports. An owner and name of the // model are unique together in a controller. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/migration_internal_test.go juju-core-2.0.0/src/github.com/juju/juju/state/migration_internal_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/migration_internal_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/migration_internal_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,11 +4,11 @@ package state import ( - "reflect" - "github.com/juju/utils/set" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/testing" ) type MigrationSuite struct{} @@ -19,6 +19,7 @@ completedCollections := set.NewStrings( annotationsC, blocksC, + cloudimagemetadataC, constraintsC, modelsC, modelUsersC, @@ -35,14 +36,12 @@ machinesC, openedPortsC, - // service / unit + // application / unit leasesC, applicationsC, unitsC, meterStatusC, // red / green status for metrics of units - - // settings reference counts are only used for applications - settingsrefsC, + payloadsC, // relation relationsC, @@ -56,8 +55,19 @@ // storage blockDevicesC, + + // cloudimagemetadata + cloudimagemetadataC, + + // actions + actionsC, + + // storage filesystemsC, filesystemAttachmentsC, + storageAttachmentsC, + storageConstraintsC, + storageInstancesC, volumesC, volumeAttachmentsC, ) @@ -67,6 +77,9 @@ // machine removals. cleanupsC, machineRemovalsC, + // The autocert cache is non-critical. After migration + // you'll just need to acquire new certificates. + autocertCacheC, // We don't export the controller model at this stage. controllersC, // Clouds aren't migrated. They must exist in the @@ -92,6 +105,9 @@ metricsC, // Backup and restore information is not migrated. restoreInfoC, + // reference counts are implementation details that should be + // reconstructed on the other side. + refcountsC, // upgradeInfoC is used to coordinate upgrades and schema migrations, // and aren't needed for model migrations. upgradeInfoC, @@ -136,6 +152,9 @@ // These are recreated whilst migrating other network entities. providerIDsC, linkLayerDevicesRefsC, + + // Recreated whilst migrating actions. + actionNotificationsC, ) // THIS SET WILL BE REMOVED WHEN MIGRATIONS ARE COMPLETE @@ -143,27 +162,14 @@ // model configuration globalSettingsC, - // model - cloudimagemetadataC, - // machine rebootC, // service / unit charmsC, - "payloads", "resources", endpointBindingsC, - // storage - storageInstancesC, - storageAttachmentsC, - storageConstraintsC, - - // actions - actionsC, - actionNotificationsC, - // uncategorised metricsManagerC, // should really be copied across auditingC, @@ -194,9 +200,9 @@ "Name", // Life will always be alive, or we won't be migrating. "Life", - // ServerUUID is recreated when the new model is created in the - // new controller (yay name changes). - "ServerUUID", + // ControllerUUID is recreated when the new model + // is created in the new controller (yay name changes). + "ControllerUUID", "MigrationMode", "Owner", @@ -208,7 +214,7 @@ s.AssertExportedFields(c, modelDoc{}, fields) } -func (s *MigrationSuite) TestEnvUserDocFields(c *gc.C) { +func (s *MigrationSuite) TestUserAccessDocFields(c *gc.C) { fields := set.NewStrings( // ID is the same as UserName (but lowercased) "ID", @@ -249,7 +255,7 @@ } func (s *MigrationSuite) TestMachineDocFields(c *gc.C) { - fields := set.NewStrings( + ignored := set.NewStrings( // DocID is the env + machine id "DocID", // ID is the machine id @@ -259,13 +265,24 @@ "ModelUUID", // Life is always alive, confirmed by export precheck. "Life", - + // NoVote and HasVote only matter for machines with manage state job + // and we don't support migrating the controller model. + "NoVote", + "HasVote", + // Ignored at this stage, could be an issue if mongo 3.0 isn't + // available. + "StopMongoUntilVersion", + ) + migrated := set.NewStrings( "Addresses", "ContainerType", "Jobs", "MachineAddresses", "Nonce", "PasswordHash", + "Clean", + "Volumes", + "Filesystems", "Placement", "PreferredPrivateAddress", "PreferredPublicAddress", @@ -274,19 +291,8 @@ "SupportedContainers", "SupportedContainersKnown", "Tools", - - // Ignored at this stage, could be an issue if mongo 3.0 isn't - // available. - "StopMongoUntilVersion", - ) - todo := set.NewStrings( - "Volumes", - "NoVote", - "Clean", - "Filesystems", - "HasVote", ) - s.AssertExportedFields(c, machineDoc{}, fields.Union(todo)) + s.AssertExportedFields(c, machineDoc{}, migrated.Union(ignored)) } func (s *MigrationSuite) TestInstanceDataFields(c *gc.C) { @@ -311,7 +317,7 @@ s.AssertExportedFields(c, instanceData{}, fields) } -func (s *MigrationSuite) TestServiceDocFields(c *gc.C) { +func (s *MigrationSuite) TestApplicationDocFields(c *gc.C) { ignored := set.NewStrings( // DocID is the env + name "DocID", @@ -343,47 +349,30 @@ s.AssertExportedFields(c, applicationDoc{}, migrated.Union(ignored)) } -func (s *MigrationSuite) TestSettingsRefsDocFields(c *gc.C) { - fields := set.NewStrings( - // ModelUUID shouldn't be exported, and is inherited - // from the model definition. - "ModelUUID", - - "RefCount", - ) - s.AssertExportedFields(c, settingsRefsDoc{}, fields) -} - func (s *MigrationSuite) TestUnitDocFields(c *gc.C) { - fields := set.NewStrings( - // DocID itself isn't migrated - "DocID", - "Name", - // ModelUUID shouldn't be exported, and is inherited - // from the model definition. + ignored := set.NewStrings( "ModelUUID", + "DocID", + "Life", // Application is implicit in the migration structure through containment. "Application", - // Series, CharmURL, and Channel also come from the service. + // Resolved is not migrated as we check that all is good before we start. + "Resolved", + // Series and CharmURL also come from the service. "Series", "CharmURL", + "TxnRevno", + ) + migrated := set.NewStrings( + "Name", "Principal", "Subordinates", + "StorageAttachmentCount", "MachineId", - // Resolved is not migrated as we check that all is good before we start. - "Resolved", "Tools", - // Life isn't migrated as we only migrate live things. - "Life", - // TxnRevno isn't migrated. - "TxnRevno", "PasswordHash", ) - todo := set.NewStrings( - "StorageAttachmentCount", - ) - - s.AssertExportedFields(c, unitDoc{}, fields.Union(todo)) + s.AssertExportedFields(c, unitDoc{}, migrated.Union(ignored)) } func (s *MigrationSuite) TestPortsDocFields(c *gc.C) { @@ -647,6 +636,25 @@ s.AssertExportedFields(c, sshHostKeysDoc{}, migrated.Union(ignored)) } +func (s *MigrationSuite) TestActionDocFields(c *gc.C) { + ignored := set.NewStrings( + "ModelUUID", + ) + migrated := set.NewStrings( + "DocId", + "Receiver", + "Name", + "Enqueued", + "Started", + "Completed", + "Parameters", + "Results", + "Message", + "Status", + ) + s.AssertExportedFields(c, actionDoc{}, migrated.Union(ignored)) +} + func (s *MigrationSuite) TestVolumeDocFields(c *gc.C) { ignored := set.NewStrings( "ModelUUID", @@ -655,13 +663,13 @@ ) migrated := set.NewStrings( "Name", + "StorageId", "AttachmentCount", // through count of attachment instances "Binding", "Info", "Params", ) - todo := set.NewStrings("StorageId") - s.AssertExportedFields(c, volumeDoc{}, migrated.Union(ignored).Union(todo)) + s.AssertExportedFields(c, volumeDoc{}, migrated.Union(ignored)) // The info and params fields ar structs. s.AssertExportedFields(c, VolumeInfo{}, set.NewStrings( "HardwareId", "Size", "Pool", "VolumeId", "Persistent")) @@ -732,8 +740,63 @@ "Location", "ReadOnly")) } +func (s *MigrationSuite) TestStorageInstanceDocFields(c *gc.C) { + ignored := set.NewStrings( + "ModelUUID", + "DocID", + "Life", + ) + migrated := set.NewStrings( + "Id", + "Kind", + "Owner", + "StorageName", + "AttachmentCount", // through count of attachment instances + ) + s.AssertExportedFields(c, storageInstanceDoc{}, migrated.Union(ignored)) +} + +func (s *MigrationSuite) TestStorageAttachmentDocFields(c *gc.C) { + ignored := set.NewStrings( + "ModelUUID", + "DocID", + "Life", + ) + migrated := set.NewStrings( + "Unit", + "StorageInstance", + ) + s.AssertExportedFields(c, storageAttachmentDoc{}, migrated.Union(ignored)) +} + +func (s *MigrationSuite) TestStorageConstraintsDocFields(c *gc.C) { + ignored := set.NewStrings( + "ModelUUID", + "DocID", + ) + migrated := set.NewStrings( + "Constraints", + ) + s.AssertExportedFields(c, storageConstraintsDoc{}, migrated.Union(ignored)) +} + +func (s *MigrationSuite) TestPayloadDocFields(c *gc.C) { + definedThroughContainment := set.NewStrings( + "UnitID", + "MachineID", + ) + migrated := set.NewStrings( + "Name", + "Type", + "RawID", + "State", + "Labels", + ) + s.AssertExportedFields(c, payloadDoc{}, migrated.Union(definedThroughContainment)) +} + func (s *MigrationSuite) AssertExportedFields(c *gc.C, doc interface{}, fields set.Strings) { - expected := getExportedFields(doc) + expected := testing.GetExportedFields(doc) unknown := expected.Difference(fields) removed := fields.Difference(expected) // If this test fails, it means that extra fields have been added to the @@ -741,20 +804,3 @@ c.Check(unknown, gc.HasLen, 0) c.Assert(removed, gc.HasLen, 0) } - -func getExportedFields(arg interface{}) set.Strings { - t := reflect.TypeOf(arg) - result := set.NewStrings() - - count := t.NumField() - for i := 0; i < count; i++ { - f := t.Field(i) - // empty PkgPath means exported field. - // see https://golang.org/pkg/reflect/#StructField - if f.PkgPath == "" { - result.Add(f.Name) - } - } - - return result -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/modelconfig.go juju-core-2.0.0/src/github.com/juju/juju/state/modelconfig.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/modelconfig.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/modelconfig.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,8 +5,10 @@ import ( "github.com/juju/errors" + "github.com/juju/schema" "github.com/juju/juju/controller" + "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" ) @@ -49,7 +51,11 @@ // inheritedConfigAttributes returns the merged collection of inherited config // values used as model defaults when adding models or unsetting values. func (st *State) inheritedConfigAttributes() (map[string]interface{}, error) { - configSources := modelConfigSources(st) + rspec, err := st.regionSpec() + if err != nil { + return nil, errors.Trace(err) + } + configSources := modelConfigSources(st, rspec) values := make(attrValues) for _, src := range configSources { cfg, err := src.sourceFunc() @@ -76,7 +82,11 @@ // Read all of the current inherited config values so // we can dynamically reflect the origin of the model config. - configSources := modelConfigSources(st) + rspec, err := st.regionSpec() + if err != nil { + return nil, errors.Trace(err) + } + configSources := modelConfigSources(st, rspec) sourceNames := make([]string, 0, len(configSources)) sourceAttrs := make([]attrValues, 0, len(configSources)) for _, src := range configSources { @@ -124,10 +134,25 @@ } // UpdateModelConfigDefaultValues updates the inherited settings used when creating a new model. -func (st *State) UpdateModelConfigDefaultValues(attrs map[string]interface{}, removed []string) error { - settings, err := readSettings(st, globalSettingsC, controllerInheritedSettingsGlobalKey) +func (st *State) UpdateModelConfigDefaultValues(attrs map[string]interface{}, removed []string, regionSpec *environs.RegionSpec) error { + var key string + + if regionSpec != nil { + key = regionSettingsGlobalKey(regionSpec.Cloud, regionSpec.Region) + } else { + key = controllerInheritedSettingsGlobalKey + } + settings, err := readSettings(st, globalSettingsC, key) if err != nil { - return errors.Trace(err) + if !errors.IsNotFound(err) { + return errors.Trace(err) + } + // We haven't created settings for this region yet. + _, err := createSettings(st, globalSettingsC, key, attrs) + if err != nil { + return errors.Trace(err) + } + return nil } // TODO(axw) 2013-12-6 #1167616 @@ -156,8 +181,61 @@ // ModelConfigDefaultValues returns the default config values to be used // when creating a new model, and the origin of those values. -func (st *State) ModelConfigDefaultValues() (config.ConfigValues, error) { - return st.modelConfigValues(nil) +func (st *State) ModelConfigDefaultValues() (config.ModelDefaultAttributes, error) { + model, err := st.Model() + if err != nil { + return nil, errors.Trace(err) + } + cloudName := model.Cloud() + cloud, err := st.Cloud(cloudName) + if err != nil { + return nil, errors.Trace(err) + } + + result := make(config.ModelDefaultAttributes) + // Juju defaults + defaultAttrs, err := st.defaultInheritedConfig() + if err != nil { + return nil, errors.Trace(err) + } + for k, v := range defaultAttrs { + result[k] = config.AttributeDefaultValues{Default: v} + } + // Controller config + ciCfg, err := st.controllerInheritedConfig() + if err != nil && !errors.IsNotFound(err) { + return nil, errors.Trace(err) + + } + for k, v := range ciCfg { + if ds, ok := result[k]; ok { + ds.Controller = v + result[k] = ds + } else { + result[k] = config.AttributeDefaultValues{Controller: v} + } + } + // Region config + for _, region := range cloud.Regions { + rspec := &environs.RegionSpec{Cloud: cloudName, Region: region.Name} + riCfg, err := st.regionInheritedConfig(rspec)() + if err != nil { + if errors.IsNotFound(err) { + continue + } + return nil, errors.Trace(err) + } + for k, v := range riCfg { + regCfg := config.RegionDefaultValue{Name: region.Name, Value: v} + if ds, ok := result[k]; ok { + ds.Regions = append(result[k].Regions, regCfg) + result[k] = ds + } else { + result[k] = config.AttributeDefaultValues{Regions: []config.RegionDefaultValue{regCfg}} + } + } + } + return result, nil } // checkControllerInheritedConfig returns an error if the shared local cloud config is definitely invalid. @@ -281,11 +359,11 @@ // sources, in hierarchical order. Starting from the first source, // config is retrieved and each subsequent source adds to the // overall config values, later values override earlier ones. -func modelConfigSources(st *State) []modelConfigSource { +func modelConfigSources(st *State, regionSpec *environs.RegionSpec) []modelConfigSource { return []modelConfigSource{ - {config.JujuDefaultSource, func() (attrValues, error) { return config.ConfigDefaults(), nil }}, + {config.JujuDefaultSource, st.defaultInheritedConfig}, {config.JujuControllerSource, st.controllerInheritedConfig}, - // We will also support local cloud region, tenant, user etc + {config.JujuRegionSource, st.regionInheritedConfig(regionSpec)}, } } @@ -294,6 +372,30 @@ controllerInheritedSettingsGlobalKey = "controller" ) +// defaultInheritedConfig returns config values which are defined +// as defaults in either Juju or the state's environ provider. +func (st *State) defaultInheritedConfig() (attrValues, error) { + var defaults = make(map[string]interface{}) + for k, v := range config.ConfigDefaults() { + defaults[k] = v + } + providerDefaults, err := st.environsProviderConfigSchemaSource() + if errors.IsNotImplemented(err) { + return defaults, nil + } else if err != nil { + return nil, errors.Trace(err) + } + fields := schema.FieldMap(providerDefaults.ConfigSchema(), providerDefaults.ConfigDefaults()) + if coercedAttrs, err := fields.Coerce(defaults, nil); err != nil { + return nil, errors.Trace(err) + } else { + for k, v := range coercedAttrs.(map[string]interface{}) { + defaults[k] = v + } + } + return defaults, nil +} + // controllerInheritedConfig returns the inherited config values // sourced from the local cloud config. func (st *State) controllerInheritedConfig() (attrValues, error) { @@ -304,6 +406,48 @@ return settings.Map(), nil } +// regionInheritedConfig returns the configuration attributes for the region in +// the cloud where the model is targeted. +func (st *State) regionInheritedConfig(regionSpec *environs.RegionSpec) func() (attrValues, error) { + if regionSpec == nil { + return func() (attrValues, error) { + return nil, errors.New( + "no environs.RegionSpec provided") + } + } + if regionSpec.Region == "" { + // It is expected that not all clouds have regions. So return not found + // if there is not a region here. + return func() (attrValues, error) { + return nil, errors.NotFoundf("region") + } + } + return func() (attrValues, error) { + settings, err := readSettings(st, + globalSettingsC, + regionSettingsGlobalKey(regionSpec.Cloud, regionSpec.Region), + ) + if err != nil { + return nil, errors.Trace(err) + } + return settings.Map(), nil + } +} + +// regionSpec returns a suitable environs.RegionSpec for use in +// regionInheritedConfig. +func (st *State) regionSpec() (*environs.RegionSpec, error) { + model, err := st.Model() + if err != nil { + return nil, errors.Trace(err) + } + rspec := &environs.RegionSpec{ + Cloud: model.Cloud(), + Region: model.CloudRegion(), + } + return rspec, nil +} + // composeModelConfigAttributes returns a set of model config settings composed from known // sources of default values overridden by model specific attributes. func composeModelConfigAttributes( @@ -335,7 +479,7 @@ // ComposeNewModelConfig returns a complete map of config attributes suitable for // creating a new model, by combining user specified values with system defaults. -func (st *State) ComposeNewModelConfig(modelAttr map[string]interface{}) (map[string]interface{}, error) { - configSources := modelConfigSources(st) +func (st *State) ComposeNewModelConfig(modelAttr map[string]interface{}, regionSpec *environs.RegionSpec) (map[string]interface{}, error) { + configSources := modelConfigSources(st, regionSpec) return composeModelConfigAttributes(modelAttr, configSources...) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/modelconfig_test.go juju-core-2.0.0/src/github.com/juju/juju/state/modelconfig_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/modelconfig_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/modelconfig_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,7 +13,9 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "github.com/juju/juju/cloud" "github.com/juju/juju/constraints" + "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/mongo/mongotest" "github.com/juju/juju/state" @@ -32,6 +34,17 @@ s.ControllerInheritedConfig = map[string]interface{}{ "apt-mirror": "http://cloud-mirror", } + s.RegionConfig = cloud.RegionConfig{ + "nether-region": cloud.Attrs{ + "apt-mirror": "http://nether-region-mirror", + "no-proxy": "nether-proxy", + }, + "dummy-region": cloud.Attrs{ + "no-proxy": "dummy-proxy", + "image-stream": "dummy-image-stream", + "whimsy-key": "whimsy-value", + }, + } s.ConnSuite.SetUpTest(c) s.policy.GetConstraintsValidator = func() (constraints.Validator, error) { validator := constraints.NewValidator() @@ -39,6 +52,9 @@ validator.RegisterUnsupported([]string{constraints.CpuPower}) return validator, nil } + s.policy.GetProviderConfigSchemaSource = func() (config.ConfigSchemaSource, error) { + return &statetesting.MockConfigSchemaSource{}, nil + } } func (s *ModelConfigSuite) TestAdditionalValidation(c *gc.C) { @@ -98,12 +114,68 @@ "name": "test", "resource-tags": map[string]string{"a": "b", "c": "d"}, } - cfgAttrs, err := s.State.ComposeNewModelConfig(attrs) + + cfgAttrs, err := s.State.ComposeNewModelConfig( + attrs, &environs.RegionSpec{ + Cloud: "dummy", + Region: "dummy-region"}) + c.Assert(err, jc.ErrorIsNil) + expectedCfg, err := config.New(config.UseDefaults, attrs) + c.Assert(err, jc.ErrorIsNil) + expected := expectedCfg.AllAttrs() + expected["apt-mirror"] = "http://cloud-mirror" + expected["providerAttr"] = "vulch" + expected["whimsy-key"] = "whimsy-value" + expected["image-stream"] = "dummy-image-stream" + expected["no-proxy"] = "dummy-proxy" + // config.New() adds logging-config so remove it. + expected["logging-config"] = "" + c.Assert(cfgAttrs, jc.DeepEquals, expected) +} + +func (s *ModelConfigSuite) TestComposeNewModelConfigRegionMisses(c *gc.C) { + attrs := map[string]interface{}{ + "authorized-keys": "different-keys", + "arbitrary-key": "shazam!", + "uuid": testing.ModelTag.Id(), + "type": "dummy", + "name": "test", + "resource-tags": map[string]string{"a": "b", "c": "d"}, + } + rspec := &environs.RegionSpec{Cloud: "dummy", Region: "dummy-region"} + cfgAttrs, err := s.State.ComposeNewModelConfig(attrs, rspec) c.Assert(err, jc.ErrorIsNil) expectedCfg, err := config.New(config.UseDefaults, attrs) c.Assert(err, jc.ErrorIsNil) expected := expectedCfg.AllAttrs() expected["apt-mirror"] = "http://cloud-mirror" + expected["providerAttr"] = "vulch" + expected["whimsy-key"] = "whimsy-value" + expected["no-proxy"] = "dummy-proxy" + expected["image-stream"] = "dummy-image-stream" + // config.New() adds logging-config so remove it. + expected["logging-config"] = "" + c.Assert(cfgAttrs, jc.DeepEquals, expected) +} + +func (s *ModelConfigSuite) TestComposeNewModelConfigRegionInherits(c *gc.C) { + attrs := map[string]interface{}{ + "authorized-keys": "different-keys", + "arbitrary-key": "shazam!", + "uuid": testing.ModelTag.Id(), + "type": "dummy", + "name": "test", + "resource-tags": map[string]string{"a": "b", "c": "d"}, + } + rspec := &environs.RegionSpec{Cloud: "dummy", Region: "nether-region"} + cfgAttrs, err := s.State.ComposeNewModelConfig(attrs, rspec) + c.Assert(err, jc.ErrorIsNil) + expectedCfg, err := config.New(config.UseDefaults, attrs) + c.Assert(err, jc.ErrorIsNil) + expected := expectedCfg.AllAttrs() + expected["no-proxy"] = "nether-proxy" + expected["apt-mirror"] = "http://nether-region-mirror" + expected["providerAttr"] = "vulch" // config.New() adds logging-config so remove it. expected["logging-config"] = "" c.Assert(cfgAttrs, jc.DeepEquals, expected) @@ -117,18 +189,22 @@ func (s *ModelConfigSuite) TestUpdateModelConfigRemoveInherited(c *gc.C) { attrs := map[string]interface{}{ - "apt-mirror": "http://different-mirror", + "apt-mirror": "http://different-mirror", // controller "arbitrary-key": "shazam!", + "providerAttr": "beef", // provider + "whimsy-key": "eggs", // region } err := s.State.UpdateModelConfig(attrs, nil, nil) c.Assert(err, jc.ErrorIsNil) - err = s.State.UpdateModelConfig(nil, []string{"apt-mirror", "arbitrary-key"}, nil) + err = s.State.UpdateModelConfig(nil, []string{"apt-mirror", "arbitrary-key", "providerAttr", "whimsy-key"}, nil) c.Assert(err, jc.ErrorIsNil) cfg, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) allAttrs := cfg.AllAttrs() c.Assert(allAttrs["apt-mirror"], gc.Equals, "http://cloud-mirror") + c.Assert(allAttrs["providerAttr"], gc.Equals, "vulch") + c.Assert(allAttrs["whimsy-key"], gc.Equals, "whimsy-value") _, ok := allAttrs["arbitrary-key"] c.Assert(ok, jc.IsFalse) } @@ -159,20 +235,23 @@ func (s *ModelConfigSuite) TestUpdateModelConfigPreferredOverRemove(c *gc.C) { attrs := map[string]interface{}{ - "apt-mirror": "http://different-mirror", + "apt-mirror": "http://different-mirror", // controller "arbitrary-key": "shazam!", + "providerAttr": "beef", // provider } err := s.State.UpdateModelConfig(attrs, nil, nil) c.Assert(err, jc.ErrorIsNil) err = s.State.UpdateModelConfig(map[string]interface{}{ - "apt-mirror": "http://another-mirror", + "apt-mirror": "http://another-mirror", + "providerAttr": "pork", }, []string{"apt-mirror", "arbitrary-key"}, nil) c.Assert(err, jc.ErrorIsNil) cfg, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) allAttrs := cfg.AllAttrs() c.Assert(allAttrs["apt-mirror"], gc.Equals, "http://another-mirror") + c.Assert(allAttrs["providerAttr"], gc.Equals, "pork") _, ok := allAttrs["arbitrary-key"] c.Assert(ok, jc.IsFalse) } @@ -188,6 +267,12 @@ "apt-mirror": "http://cloud-mirror", "http-proxy": "http://proxy", } + s.RegionConfig = cloud.RegionConfig{ + "dummy-region": cloud.Attrs{ + "apt-mirror": "http://dummy-mirror", + "no-proxy": "dummy-proxy", + }, + } s.ConnSuite.SetUpTest(c) localControllerSettings, err := s.State.ReadSettings(state.GlobalSettingsC, state.ControllerInheritedSettingsGlobalKey) @@ -236,7 +321,7 @@ }) owner := names.NewUserTag("test@remote") _, st, err := s.State.NewModel(state.ModelArgs{ - Config: cfg, Owner: owner, CloudName: "dummy", + Config: cfg, Owner: owner, CloudName: "dummy", CloudRegion: "nether-region", StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) @@ -306,22 +391,30 @@ } func (s *ModelConfigSourceSuite) TestModelConfigDefaults(c *gc.C) { - expectedValues := make(config.ConfigValues) + expectedValues := make(config.ModelDefaultAttributes) for attr, val := range config.ConfigDefaults() { - source := "default" - expectedValues[attr] = config.ConfigValue{ - Value: val, - Source: source, + expectedValues[attr] = config.AttributeDefaultValues{ + Default: val, } } - expectedValues["http-proxy"] = config.ConfigValue{ - Value: "http://proxy", - Source: "controller", - } - expectedValues["apt-mirror"] = config.ConfigValue{ - Value: "http://mirror", - Source: "controller", - } + ds := expectedValues["http-proxy"] + ds.Controller = "http://proxy" + expectedValues["http-proxy"] = ds + + ds = expectedValues["apt-mirror"] + ds.Controller = "http://mirror" + ds.Regions = []config.RegionDefaultValue{{ + Name: "dummy-region", + Value: "http://dummy-mirror", + }} + expectedValues["apt-mirror"] = ds + + ds = expectedValues["no-proxy"] + ds.Regions = []config.RegionDefaultValue{{ + Name: "dummy-region", + Value: "dummy-proxy"}} + expectedValues["no-proxy"] = ds + sources, err := s.State.ModelConfigDefaultValues() c.Assert(err, jc.ErrorIsNil) c.Assert(sources, jc.DeepEquals, expectedValues) @@ -333,34 +426,147 @@ "http-proxy": "http://http-proxy", "https-proxy": "https://https-proxy", } - err := s.State.UpdateModelConfigDefaultValues(attrs, nil) + err := s.State.UpdateModelConfigDefaultValues(attrs, nil, nil) c.Assert(err, jc.ErrorIsNil) attrs = map[string]interface{}{ "apt-mirror": "http://different-mirror", } - err = s.State.UpdateModelConfigDefaultValues(attrs, []string{"http-proxy", "https-proxy"}) + err = s.State.UpdateModelConfigDefaultValues(attrs, []string{"http-proxy", "https-proxy"}, nil) c.Assert(err, jc.ErrorIsNil) info := statetesting.NewMongoInfo() - anotherState, err := state.Open(s.modelTag, info, mongotest.DialOpts(), state.NewPolicyFunc(nil)) + anotherState, err := state.Open(s.modelTag, s.State.ControllerTag(), info, mongotest.DialOpts(), state.NewPolicyFunc(nil)) c.Assert(err, jc.ErrorIsNil) defer anotherState.Close() cfg, err := anotherState.ModelConfigDefaultValues() c.Assert(err, jc.ErrorIsNil) - expectedValues := make(config.ConfigValues) + expectedValues := make(config.ModelDefaultAttributes) for attr, val := range config.ConfigDefaults() { - expectedValues[attr] = config.ConfigValue{ - Value: val, - Source: "default", + expectedValues[attr] = config.AttributeDefaultValues{ + Default: val, } } delete(expectedValues, "http-mirror") delete(expectedValues, "https-mirror") - expectedValues["apt-mirror"] = config.ConfigValue{ - Value: "http://different-mirror", - Source: "controller", + expectedValues["apt-mirror"] = config.AttributeDefaultValues{ + Controller: "http://different-mirror", + Default: "", + Regions: []config.RegionDefaultValue{{ + Name: "dummy-region", + Value: "http://dummy-mirror", + }}} + expectedValues["no-proxy"] = config.AttributeDefaultValues{ + Default: "", + Regions: []config.RegionDefaultValue{{ + Name: "dummy-region", + Value: "dummy-proxy", + }}} + c.Assert(cfg, jc.DeepEquals, expectedValues) +} + +func (s *ModelConfigSourceSuite) TestUpdateModelConfigRegionDefaults(c *gc.C) { + // The test env is setup with dummy/dummy-region having a no-proxy + // dummy-proxy value and nether-region with a nether-proxy value. + // + // First we change the no-proxy setting in dummy-region + attrs := map[string]interface{}{ + "no-proxy": "changed-proxy", + } + + rspec, err := environs.NewRegionSpec("dummy", "dummy-region") + c.Assert(err, jc.ErrorIsNil) + + err = s.State.UpdateModelConfigDefaultValues(attrs, nil, rspec) + c.Assert(err, jc.ErrorIsNil) + + // Then check in another state. + info := statetesting.NewMongoInfo() + anotherState, err := state.Open(s.modelTag, s.State.ControllerTag(), info, mongotest.DialOpts(), state.NewPolicyFunc(nil)) + c.Assert(err, jc.ErrorIsNil) + defer anotherState.Close() + + cfg, err := anotherState.ModelConfigDefaultValues() + c.Assert(err, jc.ErrorIsNil) + expectedValues := make(config.ModelDefaultAttributes) + for attr, val := range config.ConfigDefaults() { + expectedValues[attr] = config.AttributeDefaultValues{ + Default: val, + } + } + expectedValues["http-proxy"] = config.AttributeDefaultValues{ + Controller: "http://proxy", + Default: "", + } + expectedValues["apt-mirror"] = config.AttributeDefaultValues{ + Controller: "http://mirror", + Default: "", + Regions: []config.RegionDefaultValue{{ + Name: "dummy-region", + Value: "http://dummy-mirror", + }}} + expectedValues["no-proxy"] = config.AttributeDefaultValues{ + Default: "", + Regions: []config.RegionDefaultValue{{ + Name: "dummy-region", + Value: "changed-proxy", + }}} + c.Assert(cfg, jc.DeepEquals, expectedValues) + + // remove the dummy-region setting + err = s.State.UpdateModelConfigDefaultValues(nil, []string{"no-proxy"}, rspec) + + // and check again + cfg, err = anotherState.ModelConfigDefaultValues() + c.Assert(err, jc.ErrorIsNil) + cfg, err = anotherState.ModelConfigDefaultValues() + c.Assert(err, jc.ErrorIsNil) + expectedValues = make(config.ModelDefaultAttributes) + for attr, val := range config.ConfigDefaults() { + expectedValues[attr] = config.AttributeDefaultValues{ + Default: val, + } } + expectedValues["http-proxy"] = config.AttributeDefaultValues{ + Controller: "http://proxy", + Default: "", + } + expectedValues["apt-mirror"] = config.AttributeDefaultValues{ + Controller: "http://mirror", + Default: "", + Regions: []config.RegionDefaultValue{{ + Name: "dummy-region", + Value: "http://dummy-mirror", + }}} c.Assert(cfg, jc.DeepEquals, expectedValues) } + +func (s *ModelConfigSourceSuite) TestUpdateModelConfigDefaultValuesUnknownRegion(c *gc.C) { + // Set up settings to create + attrs := map[string]interface{}{ + "no-proxy": "changed-proxy", + } + + rspec, err := environs.NewRegionSpec("dummy", "unused-region") + c.Assert(err, jc.ErrorIsNil) + + // We add this to the unused-region which has not been created in mongo + // yet. + err = s.State.UpdateModelConfigDefaultValues(attrs, nil, rspec) + c.Assert(err, jc.ErrorIsNil) + + // Then check config. + cfg, err := s.State.ModelConfigDefaultValues() + c.Assert(err, jc.ErrorIsNil) + c.Assert(cfg["no-proxy"], jc.DeepEquals, config.AttributeDefaultValues{ + Default: "", + Controller: nil, + Regions: []config.RegionDefaultValue{ + config.RegionDefaultValue{ + Name: "dummy-region", + Value: "dummy-proxy"}, + config.RegionDefaultValue{ + Name: "unused-region", + Value: "changed-proxy"}}}) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/model.go juju-core-2.0.0/src/github.com/juju/juju/state/model.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/model.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/model.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,6 @@ import ( "fmt" - "strings" "github.com/juju/errors" jujutxn "github.com/juju/txn" @@ -17,9 +16,9 @@ jujucloud "github.com/juju/juju/cloud" "github.com/juju/juju/constraints" - "github.com/juju/juju/core/description" "github.com/juju/juju/environs/config" "github.com/juju/juju/mongo" + "github.com/juju/juju/permission" "github.com/juju/juju/status" "github.com/juju/juju/storage" ) @@ -28,13 +27,18 @@ // settings and constraints. const modelGlobalKey = "e" +// modelKey will create the kei for a given model using the modelGlobalKey. +func modelKey(modelUUID string) string { + return fmt.Sprintf("%s#%s", modelGlobalKey, modelUUID) +} + // MigrationMode specifies where the Model is with respect to migration. type MigrationMode string const ( - // MigrationModeActive is the default mode for a model and reflects - // a model that is active within its controller. - MigrationModeActive MigrationMode = "" + // MigrationModeNone is the default mode for a model and reflects + // that it isn't involved with a model migration. + MigrationModeNone MigrationMode = "" // MigrationModeExporting reflects a model that is in the process of being // exported from one controller to another. @@ -57,12 +61,12 @@ // modelDoc represents the internal state of the model in MongoDB. type modelDoc struct { - UUID string `bson:"_id"` - Name string - Life Life - Owner string `bson:"owner"` - ServerUUID string `bson:"server-uuid"` - MigrationMode MigrationMode `bson:"migration-mode"` + UUID string `bson:"_id"` + Name string + Life Life + Owner string `bson:"owner"` + ControllerUUID string `bson:"controller-uuid"` + MigrationMode MigrationMode `bson:"migration-mode"` // Cloud is the name of the cloud to which the model is deployed. Cloud string `bson:"cloud"` @@ -71,7 +75,7 @@ // deployed. This will be empty for clouds that do not support regions. CloudRegion string `bson:"cloud-region,omitempty"` - // CloudCredential is the name of the cloud credential that is used + // CloudCredential is the ID of the cloud credential that is used // for managing cloud resources for this model. This will be empty // for clouds that do not require credentials. CloudCredential string `bson:"cloud-credential,omitempty"` @@ -89,8 +93,8 @@ // Machines contains the names of the top-level machines in the model. Machines []string `bson:"machines"` - // Services contains the names of the services in the model. - Services []string `bson:"applications"` + // Applicatons contains the names of the applications in the model. + Applications []string `bson:"applications"` } // ControllerModel returns the model that was bootstrapped. @@ -137,14 +141,14 @@ models, closer := st.getCollection(modelsC) defer closer() - var envDocs []modelDoc - err := models.Find(nil).All(&envDocs) + var modelDocs []modelDoc + err := models.Find(nil).Sort("name", "owner").All(&modelDocs) if err != nil { return nil, err } - result := make([]*Model, len(envDocs)) - for i, doc := range envDocs { + result := make([]*Model, len(modelDocs)) + for i, doc := range modelDocs { result[i] = &Model{st: st, doc: doc} } @@ -160,10 +164,10 @@ // deployed. This will be empty for clouds that do not support regions. CloudRegion string - // CloudCredential is the name of the cloud credential that will be - // used for managing cloud resources for this model. This will be empty - // for clouds that do not require credentials. - CloudCredential string + // CloudCredential is the tag of the cloud credential that will be + // used for managing cloud resources for this model. This will be + // empty for clouds that do not require credentials. + CloudCredential names.CloudCredentialTag // Config is the model config. Config *config.Config @@ -187,8 +191,8 @@ if m.Config == nil { return errors.NotValidf("nil Config") } - if m.CloudName == "" { - return errors.NotValidf("empty Cloud Name") + if !names.IsValidCloud(m.CloudName) { + return errors.NotValidf("Cloud Name %q", m.CloudName) } if m.Owner == (names.UserTag{}) { return errors.NotValidf("empty Owner") @@ -197,7 +201,7 @@ return errors.NotValidf("nil StorageProviderRegistry") } switch m.MigrationMode { - case MigrationModeActive, MigrationModeImporting: + case MigrationModeNone, MigrationModeImporting: default: return errors.NotValidf("initial migration mode %q", m.MigrationMode) } @@ -247,7 +251,7 @@ return nil, nil, errors.Trace(err) } assertCloudCredentialOp, err := validateCloudCredential( - controllerCloud, args.CloudName, cloudCredentials, args.CloudCredential, owner, + controllerCloud, args.CloudName, cloudCredentials, args.CloudCredential, ) if err != nil { return nil, nil, errors.Trace(err) @@ -261,7 +265,7 @@ uuid := args.Config.UUID() session := st.session.Copy() - newSt, err := newState(names.NewModelTag(uuid), session, st.mongoInfo, st.newPolicy) + newSt, err := newState(names.NewModelTag(uuid), controllerInfo.ModelTag, session, st.mongoInfo, st.newPolicy, st.clock) if err != nil { return nil, nil, errors.Annotate(err, "could not create state for new model") } @@ -270,9 +274,9 @@ newSt.Close() } }() - newSt.controllerTag = st.controllerTag + newSt.controllerModelTag = st.controllerModelTag - modelOps, err := newSt.modelSetupOps(args, nil) + modelOps, err := newSt.modelSetupOps(st.controllerTag.Id(), args, nil) if err != nil { return nil, nil, errors.Annotate(err, "failed to create new model") } @@ -293,13 +297,13 @@ models, closer := st.getCollection(modelsC) defer closer() envCount, countErr := models.Find(bson.D{ - {"owner", owner.Canonical()}, + {"owner", owner.Id()}, {"name", name}}, ).Count() if countErr != nil { err = errors.Trace(countErr) } else if envCount > 0 { - err = errors.AlreadyExistsf("model %q for %s", name, owner.Canonical()) + err = errors.AlreadyExistsf("model %q for %s", name, owner.Id()) } else { err = errors.New("model already exists") } @@ -317,6 +321,10 @@ if err != nil { return nil, nil, errors.Trace(err) } + _, err = newSt.SetUserAccess(newModel.Owner(), newModel.ModelTag(), permission.AdminAccess) + if err != nil { + return nil, nil, errors.Annotate(err, "granting admin permission to the owner") + } return newModel, newSt, nil } @@ -353,21 +361,35 @@ // validateCloudCredential validates the given cloud credential // name against the provided cloud definition and credentials, // and returns a txn.Op to include in a transaction to assert the -// same. +// same. A user is supplied, for which access to the credential +// will be asserted. func validateCloudCredential( cloud jujucloud.Cloud, cloudName string, cloudCredentials map[string]jujucloud.Credential, - cloudCredentialName string, - cloudCredentialOwner names.UserTag, + cloudCredential names.CloudCredentialTag, ) (txn.Op, error) { - if cloudCredentialName != "" { - if _, ok := cloudCredentials[cloudCredentialName]; !ok { - return txn.Op{}, errors.NotFoundf("credential %q", cloudCredentialName) + if cloudCredential != (names.CloudCredentialTag{}) { + if cloudCredential.Cloud().Id() != cloudName { + return txn.Op{}, errors.NotValidf("credential %q", cloudCredential.Id()) + } + var found bool + for tag := range cloudCredentials { + if tag == cloudCredential.Id() { + found = true + break + } + } + if !found { + return txn.Op{}, errors.NotFoundf("credential %q", cloudCredential.Id()) } + // NOTE(axw) if we add ACLs for credentials, + // we'll need to check access here. The map + // we check above contains only the credentials + // that the model owner has access to. return txn.Op{ C: cloudCredentialsC, - Id: cloudCredentialDocID(cloudCredentialOwner, cloudName, cloudCredentialName), + Id: cloudCredentialDocID(cloudCredential), Assert: txn.DocExists, }, nil } @@ -401,10 +423,10 @@ return names.NewModelTag(m.doc.UUID) } -// ControllerTag is the model tag for the controller that the model is +// ControllerTag is the tag for the controller that the model is // running within. -func (m *Model) ControllerTag() names.ModelTag { - return names.NewModelTag(m.doc.ServerUUID) +func (m *Model) ControllerTag() names.ControllerTag { + return names.NewControllerTag(m.doc.ControllerUUID) } // UUID returns the universally unique identifier of the model. @@ -415,7 +437,7 @@ // ControllerUUID returns the universally unique identifier of the controller // in which the model is running. func (m *Model) ControllerUUID() string { - return m.doc.ServerUUID + return m.doc.ControllerUUID } // Name returns the human friendly name of the model. @@ -433,10 +455,13 @@ return m.doc.CloudRegion } -// CloudCredential returns the name of the cloud credential used for managing the -// model's cloud resources. -func (m *Model) CloudCredential() string { - return m.doc.CloudCredential +// CloudCredential returns the tag of the cloud credential used for managing the +// model's cloud resources, and a boolean indicating whether a credential is set. +func (m *Model) CloudCredential() (names.CloudCredentialTag, bool) { + if names.IsValidCloudCredential(m.doc.CloudCredential) { + return names.NewCloudCredentialTag(m.doc.CloudCredential), true + } + return names.CloudCredentialTag{}, false } // MigrationMode returns whether the model is active or being migrated. @@ -586,7 +611,7 @@ } // Users returns a slice of all users for this model. -func (m *Model) Users() ([]description.UserAccess, error) { +func (m *Model) Users() ([]permission.UserAccess, error) { if m.st.ModelUUID() != m.UUID() { return nil, errors.New("cannot lookup model users outside the current model") } @@ -599,8 +624,20 @@ return nil, errors.Trace(err) } - var modelUsers []description.UserAccess + var modelUsers []permission.UserAccess for _, doc := range userDocs { + // check if the User belonging to this model user has + // been deleted, in this case we should not return it. + userTag := names.NewUserTag(doc.UserName) + if userTag.IsLocal() { + _, err := m.st.User(userTag) + if errors.IsUserNotFound(err) { + continue + } + if err != nil { + return nil, errors.Trace(err) + } + } mu, err := NewModelUserAccess(m.st, doc) if err != nil { return nil, errors.Trace(err) @@ -611,6 +648,10 @@ return modelUsers, nil } +func (m *Model) isControllerModel() bool { + return m.st.controllerModelTag.Id() == m.doc.UUID +} + // Destroy sets the models's lifecycle to Dying, preventing // addition of services or machines to state. If called on // an empty hosted model, the lifecycle will be advanced @@ -621,7 +662,7 @@ // error satisfying IsHasHostedsError. func (m *Model) Destroy() error { ensureNoHostedModels := false - if m.doc.UUID == m.doc.ServerUUID { + if m.isControllerModel() { ensureNoHostedModels = true } return m.destroy(ensureNoHostedModels) @@ -701,10 +742,6 @@ } defer closeState() - if err := ensureDestroyable(st); err != nil { - return nil, errors.Trace(err) - } - // Check if the model is empty. If it is, we can advance the model's // lifecycle state directly to Dead. checkEmptyErr := m.checkEmpty() @@ -725,7 +762,7 @@ {"applications", bson.D{{"$size", 0}}}, }, }} - if modelUUID != m.doc.ServerUUID { + if !m.isControllerModel() { // The model is empty, and is not the controller // model, so we can move it straight to Dead. nextLife = Dead @@ -786,7 +823,7 @@ prereqOps = append(prereqOps, assertHostedModelsOp(aliveEmpty+dead)) } - timeOfDying := nowToTheSecond() + timeOfDying := st.NowToTheSecond() modelUpdateValues := bson.D{ {"life", nextLife}, {"time-of-dying", timeOfDying}, @@ -808,8 +845,8 @@ // arbitrarily long delays, we need to make sure every op // causes a state change that's still consistent; so we make // sure the cleanup ops are the last thing that will execute. - if modelUUID == m.doc.ServerUUID { - cleanupOp := st.newCleanupOp(cleanupModelsForDyingController, modelUUID) + if m.isControllerModel() { + cleanupOp := newCleanupOp(cleanupModelsForDyingController, modelUUID) ops = append(ops, cleanupOp) } if !isEmpty { @@ -819,15 +856,15 @@ // hosted model in the course of destroying the controller. In // that case we'll get errors if we try to enqueue hosted-model // cleanups, because the cleanups collection is non-global. - cleanupMachinesOp := st.newCleanupOp(cleanupMachinesForDyingModel, modelUUID) - cleanupServicesOp := st.newCleanupOp(cleanupServicesForDyingModel, modelUUID) + cleanupMachinesOp := newCleanupOp(cleanupMachinesForDyingModel, modelUUID) + cleanupServicesOp := newCleanupOp(cleanupServicesForDyingModel, modelUUID) ops = append(ops, cleanupMachinesOp, cleanupServicesOp) } return append(prereqOps, ops...), nil } // checkEmpty checks that the machine is empty of any entities that may -// require external resource cleanup. If the machine is not empty, then +// require external resource cleanup. If the model is not empty, then // an error will be returned. func (m *Model) checkEmpty() error { st, closeState, err := m.getState() @@ -849,8 +886,8 @@ if n := len(doc.Machines); n > 0 { return errors.Errorf("model not empty, found %d machine(s)", n) } - if n := len(doc.Services); n > 0 { - return errors.Errorf("model not empty, found %d services(s)", n) + if n := len(doc.Applications); n > 0 { + return errors.Errorf("model not empty, found %d applications(s)", n) } return nil } @@ -889,70 +926,24 @@ } } -// checkManualMachines checks if any of the machines in the slice were -// manually provisioned, and are non-manager machines. These machines -// must (currently) be manually destroyed via destroy-machine before -// destroy-model can successfully complete. -func checkManualMachines(machines []*Machine) error { - var ids []string - for _, m := range machines { - if m.IsManager() { - continue - } - manual, err := m.IsManual() - if err != nil { - return errors.Trace(err) - } - if manual { - ids = append(ids, m.Id()) - } - } - if len(ids) > 0 { - return errors.Errorf("manually provisioned machines must first be destroyed with `juju destroy-machine %s`", strings.Join(ids, " ")) - } - return nil -} - -// ensureDestroyable an error if any manual machines or persistent volumes are -// found. -func ensureDestroyable(st *State) error { - - // TODO(waigani) bug #1475212: Model destroy can miss manual - // machines. We need to be able to assert the absence of these as - // part of the destroy txn, but in order to do this manual machines - // need to add refcounts to their models. - - // Check for manual machines. We bail out if there are any, - // to stop the user from prematurely hobbling the model. - machines, err := st.AllMachines() - if err != nil { - return errors.Trace(err) - } - - if err := checkManualMachines(machines); err != nil { - return errors.Trace(err) - } - - return nil -} - // createModelOp returns the operation needed to create // an model document with the given name and UUID. func createModelOp( owner names.UserTag, - name, uuid, server, cloudName, cloudRegion, cloudCredential string, + name, uuid, controllerUUID, cloudName, cloudRegion string, + cloudCredential names.CloudCredentialTag, migrationMode MigrationMode, ) txn.Op { doc := &modelDoc{ UUID: uuid, Name: name, Life: Alive, - Owner: owner.Canonical(), - ServerUUID: server, + Owner: owner.Id(), + ControllerUUID: controllerUUID, MigrationMode: migrationMode, Cloud: cloudName, CloudRegion: cloudRegion, - CloudCredential: cloudCredential, + CloudCredential: cloudCredential.Id(), } return txn.Op{ C: modelsC, @@ -1022,7 +1013,7 @@ func createUniqueOwnerModelNameOp(owner names.UserTag, envName string) txn.Op { return txn.Op{ C: usermodelnameC, - Id: userModelNameIndex(owner.Canonical(), envName), + Id: userModelNameIndex(owner.Id(), envName), Assert: txn.DocMissing, Insert: bson.M{}, } @@ -1062,7 +1053,7 @@ return txn.Op{ C: modelsC, Id: modelUUID, - Assert: append(isAliveDoc, bson.DocElem{"migration-mode", MigrationModeActive}), + Assert: append(isAliveDoc, bson.DocElem{"migration-mode", MigrationModeNone}), } } @@ -1072,7 +1063,7 @@ return errors.Errorf("model %q is no longer alive", model.Name()) } else if err != nil { return errors.Annotate(err, "unable to read model") - } else if mode := model.MigrationMode(); mode != MigrationModeActive { + } else if mode := model.MigrationMode(); mode != MigrationModeNone { return errors.Errorf("model %q is being migrated", model.Name()) } return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/modelmigration.go juju-core-2.0.0/src/github.com/juju/juju/state/modelmigration.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/modelmigration.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/modelmigration.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package state import ( + "encoding/json" "fmt" "strconv" "strings" @@ -12,6 +13,7 @@ "github.com/juju/errors" "github.com/juju/utils/set" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -32,6 +34,10 @@ // ModelUUID returns the UUID for the model being migrated. ModelUUID() string + // ExternalControl returns true if the model migration should be + // managed by an external process. + ExternalControl() bool + // Attempt returns the migration attempt identifier. This // increments for each migration attempt for the model. Attempt() (int, error) @@ -74,15 +80,15 @@ // current progress of the migration. SetStatusMessage(text string) error - // MinionReport records a report from a migration minion worker - // about the success or failure to complete its actions for a - // given migration phase. - MinionReport(tag names.Tag, phase migration.Phase, success bool) error - - // GetMinionReports returns details of the minions that have - // reported success or failure for the current migration phase, as - // well as those which are yet to report. - GetMinionReports() (*MinionReports, error) + // SubmitMinionReport records a report from a migration minion + // worker about the success or failure to complete its actions for + // a given migration phase. + SubmitMinionReport(tag names.Tag, phase migration.Phase, success bool) error + + // MinionReports returns details of the minions that have reported + // success or failure for the current migration phase, as well as + // those which are yet to report. + MinionReports() (*MinionReports, error) // WatchMinionReports returns a notify watcher which triggers when // a migration minion has reported back about the success or failure @@ -125,6 +131,10 @@ // migration. It should be in "user@domain" format. InitiatedBy string `bson:"initiated-by"` + // ExternalControl is true if the migration will be controlled by + // an external process, instead of the migrationmaster worker. + ExternalControl bool `bson:"external-control"` + // TargetController holds the UUID of the target controller. TargetController string `bson:"target-controller"` @@ -142,7 +152,11 @@ // TargetPassword holds the password to use with TargetAuthTag // when authenticating. - TargetPassword string `bson:"target-password"` + TargetPassword string `bson:"target-password,omitempty"` + + // TargetMacaroons holds the macaroons to use with TargetAuthTag + // when authenticating. + TargetMacaroons string `bson:"target-macaroons,omitempty"` } // modelMigStatusDoc tracks the progress of a migration attempt for a @@ -202,6 +216,11 @@ return mig.doc.ModelUUID } +// ExternalControl implements ModelMigration. +func (mig *modelMigration) ExternalControl() bool { + return mig.doc.ExternalControl +} + // Attempt implements ModelMigration. func (mig *modelMigration) Attempt() (int, error) { attempt, err := strconv.Atoi(mig.st.localID(mig.doc.Id)) @@ -257,18 +276,23 @@ if err != nil { return nil, errors.Trace(err) } + macs, err := jsonToMacaroons(mig.doc.TargetMacaroons) + if err != nil { + return nil, errors.Trace(err) + } return &migration.TargetInfo{ - ControllerTag: names.NewModelTag(mig.doc.TargetController), + ControllerTag: names.NewControllerTag(mig.doc.TargetController), Addrs: mig.doc.TargetAddrs, CACert: mig.doc.TargetCACert, AuthTag: authTag, Password: mig.doc.TargetPassword, + Macaroons: macs, }, nil } // SetPhase implements ModelMigration. func (mig *modelMigration) SetPhase(nextPhase migration.Phase) error { - now := GetClock().Now().UnixNano() + now := mig.st.clock.Now().UnixNano() phase, err := mig.Phase() if err != nil { @@ -302,7 +326,7 @@ Id: mig.doc.ModelUUID, Assert: txn.DocExists, Update: bson.M{ - "$set": bson.M{"migration-mode": MigrationModeActive}, + "$set": bson.M{"migration-mode": MigrationModeNone}, }, }) } @@ -353,8 +377,8 @@ return nil } -// MinionReport implements ModelMigration. -func (mig *modelMigration) MinionReport(tag names.Tag, phase migration.Phase, success bool) error { +// SubmitMinionReport implements ModelMigration. +func (mig *modelMigration) SubmitMinionReport(tag names.Tag, phase migration.Phase, success bool) error { globalKey, err := agentTagToGlobalKey(tag) if err != nil { return errors.Trace(err) @@ -365,7 +389,7 @@ MigrationId: mig.Id(), Phase: phase.String(), EntityKey: globalKey, - Time: GetClock().Now().UnixNano(), + Time: mig.st.clock.Now().UnixNano(), Success: success, } ops := []txn.Op{{ @@ -394,8 +418,8 @@ return nil } -// GetMinionReports implements ModelMigration. -func (mig *modelMigration) GetMinionReports() (*MinionReports, error) { +// MinionReports implements ModelMigration. +func (mig *modelMigration) MinionReports() (*MinionReports, error) { all, err := mig.getAllAgents() if err != nil { return nil, errors.Trace(err) @@ -533,26 +557,27 @@ return nil } -// ModelMigrationSpec holds the information required to create a +// MigrationSpec holds the information required to create a // ModelMigration instance. -type ModelMigrationSpec struct { - InitiatedBy names.UserTag - TargetInfo migration.TargetInfo +type MigrationSpec struct { + InitiatedBy names.UserTag + TargetInfo migration.TargetInfo + ExternalControl bool } -// Validate returns an error if the ModelMigrationSpec contains bad +// Validate returns an error if the MigrationSpec contains bad // data. Nil is returned otherwise. -func (spec *ModelMigrationSpec) Validate() error { +func (spec *MigrationSpec) Validate() error { if !names.IsValidUser(spec.InitiatedBy.Id()) { return errors.NotValidf("InitiatedBy") } return spec.TargetInfo.Validate() } -// CreateModelMigration initialises state that tracks a model -// migration. It will return an error if there is already a -// model migration in progress. -func (st *State) CreateModelMigration(spec ModelMigrationSpec) (ModelMigration, error) { +// CreateMigration initialises state that tracks a model migration. It +// will return an error if there is already a model migration in +// progress. +func (st *State) CreateMigration(spec MigrationSpec) (ModelMigration, error) { if st.IsController() { return nil, errors.New("controllers can't be migrated") } @@ -563,7 +588,7 @@ return nil, errors.Trace(err) } - now := GetClock().Now().UnixNano() + now := st.clock.Now().UnixNano() modelUUID := st.ModelUUID() var doc modelMigDoc var statusDoc modelMigStatusDoc @@ -576,12 +601,17 @@ return nil, errors.New("model is not alive") } - if isActive, err := st.IsModelMigrationActive(); err != nil { + if isActive, err := st.IsMigrationActive(); err != nil { return nil, errors.Trace(err) } else if isActive { return nil, errors.New("already in progress") } + macsJSON, err := macaroonsToJSON(spec.TargetInfo.Macaroons) + if err != nil { + return nil, errors.Trace(err) + } + seq, err := st.sequence("modelmigration") if err != nil { return nil, errors.Trace(err) @@ -592,11 +622,13 @@ Id: id, ModelUUID: modelUUID, InitiatedBy: spec.InitiatedBy.Id(), + ExternalControl: spec.ExternalControl, TargetController: spec.TargetInfo.ControllerTag.Id(), TargetAddrs: spec.TargetInfo.Addrs, TargetCACert: spec.TargetInfo.CACert, TargetAuthTag: spec.TargetInfo.AuthTag.String(), TargetPassword: spec.TargetInfo.Password, + TargetMacaroons: macsJSON, } statusDoc = modelMigStatusDoc{ Id: id, @@ -641,44 +673,83 @@ }, nil } -func checkTargetController(st *State, targetControllerTag names.ModelTag) error { +func macaroonsToJSON(m []macaroon.Slice) (string, error) { + if len(m) == 0 { + return "", nil + } + j, err := json.Marshal(m) + if err != nil { + return "", errors.Annotate(err, "marshalling macaroons") + } + return string(j), nil +} + +func jsonToMacaroons(raw string) ([]macaroon.Slice, error) { + if raw == "" { + return nil, nil + } + var macs []macaroon.Slice + if err := json.Unmarshal([]byte(raw), &macs); err != nil { + return nil, errors.Annotate(err, "unmarshalling macaroon") + } + return macs, nil +} + +func checkTargetController(st *State, targetControllerTag names.ControllerTag) error { currentController, err := st.ControllerModel() if err != nil { return errors.Annotate(err, "failed to load existing controller model") } - if targetControllerTag == currentController.ModelTag() { + if targetControllerTag == currentController.ControllerTag() { return errors.New("model already attached to target controller") } return nil } -// LatestModelMigration returns the most recent ModelMigration for a -// model (if any). -func (st *State) LatestModelMigration() (ModelMigration, error) { +// LatestMigration returns the most recent ModelMigration for a model +// (if any). +func (st *State) LatestMigration() (ModelMigration, error) { migColl, closer := st.getCollection(migrationsC) defer closer() query := migColl.Find(bson.M{"model-uuid": st.ModelUUID()}) query = query.Sort("-_id").Limit(1) - mig, err := st.modelMigrationFromQuery(query) + mig, err := st.migrationFromQuery(query) + if err != nil { + return nil, errors.Trace(err) + } + + // Hide previous migrations for models which have been migrated + // away from a model and then migrated back. + phase, err := mig.Phase() if err != nil { return nil, errors.Trace(err) } + if phase == migration.DONE { + model, err := st.Model() + if err != nil { + return nil, errors.Trace(err) + } + if model.MigrationMode() == MigrationModeNone { + return nil, errors.NotFoundf("migration") + } + } + return mig, nil } -// ModelMigration retrieves a specific ModelMigration by its -// id. See also LatestModelMigration. -func (st *State) ModelMigration(id string) (ModelMigration, error) { +// Migration retrieves a specific ModelMigration by its id. See also +// LatestMigration. +func (st *State) Migration(id string) (ModelMigration, error) { migColl, closer := st.getCollection(migrationsC) defer closer() - mig, err := st.modelMigrationFromQuery(migColl.FindId(id)) + mig, err := st.migrationFromQuery(migColl.FindId(id)) if err != nil { return nil, errors.Trace(err) } return mig, nil } -func (st *State) modelMigrationFromQuery(query mongo.Query) (ModelMigration, error) { +func (st *State) migrationFromQuery(query mongo.Query) (ModelMigration, error) { var doc modelMigDoc err := query.One(&doc) if err == mgo.ErrNotFound { @@ -704,12 +775,19 @@ }, nil } -// IsModelMigrationActive return true if a migration is in progress for +// IsMigrationActive returns true if a migration is in progress for // the model associated with the State. -func (st *State) IsModelMigrationActive() (bool, error) { +func (st *State) IsMigrationActive() (bool, error) { + return IsMigrationActive(st, st.ModelUUID()) +} + +// IsMigrationActive returns true if a migration is in progress for +// the model with the given UUID. The State provided need not be for +// the model in question. +func IsMigrationActive(st *State, modelUUID string) (bool, error) { active, closer := st.getCollection(migrationsActiveC) defer closer() - n, err := active.FindId(st.ModelUUID()).Count() + n, err := active.FindId(modelUUID).Count() if err != nil { return false, errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/modelmigration_test.go juju-core-2.0.0/src/github.com/juju/juju/state/modelmigration_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/modelmigration_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/modelmigration_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,43 +8,45 @@ "time" "github.com/juju/errors" + jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" - "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" "github.com/juju/juju/core/migration" "github.com/juju/juju/state" statetesting "github.com/juju/juju/state/testing" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" ) -type ModelMigrationSuite struct { +type MigrationSuite struct { ConnSuite State2 *state.State - clock *coretesting.Clock - stdSpec state.ModelMigrationSpec + clock *jujutesting.Clock + stdSpec state.MigrationSpec } -var _ = gc.Suite(new(ModelMigrationSuite)) +var _ = gc.Suite(new(MigrationSuite)) -func (s *ModelMigrationSuite) SetUpTest(c *gc.C) { +func (s *MigrationSuite) SetUpTest(c *gc.C) { s.ConnSuite.SetUpTest(c) - s.clock = coretesting.NewClock(time.Now().Truncate(time.Second)) - s.PatchValue(&state.GetClock, func() clock.Clock { - return s.clock - }) + s.clock = jujutesting.NewClock(time.Now().Truncate(time.Second)) + err := s.State.SetClockForTesting(s.clock) + c.Assert(err, jc.ErrorIsNil) // Create a hosted model to migrate. s.State2 = s.Factory.MakeModel(c, nil) s.AddCleanup(func(*gc.C) { s.State2.Close() }) - targetControllerTag := names.NewModelTag(utils.MustNewUUID().String()) + targetControllerTag := names.NewControllerTag(utils.MustNewUUID().String()) + + mac, err := macaroon.New([]byte("secret"), "id", "location") + c.Assert(err, jc.ErrorIsNil) // Plausible migration arguments to test with. - s.stdSpec = state.ModelMigrationSpec{ + s.stdSpec = state.MigrationSpec{ InitiatedBy: names.NewUserTag("admin"), TargetInfo: migration.TargetInfo{ ControllerTag: targetControllerTag, @@ -52,16 +54,17 @@ CACert: "cert", AuthTag: names.NewUserTag("user"), Password: "password", + Macaroons: []macaroon.Slice{{mac}}, }, } } -func (s *ModelMigrationSuite) TestCreate(c *gc.C) { +func (s *MigrationSuite) TestCreate(c *gc.C) { model, err := s.State2.Model() c.Assert(err, jc.ErrorIsNil) - c.Assert(model.MigrationMode(), gc.Equals, state.MigrationModeActive) + c.Assert(model.MigrationMode(), gc.Equals, state.MigrationModeNone) - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) c.Check(mig.ModelUUID(), gc.Equals, s.State2.ModelUUID()) @@ -72,6 +75,7 @@ c.Check(mig.EndTime().IsZero(), jc.IsTrue) c.Check(mig.StatusMessage(), gc.Equals, "starting") c.Check(mig.InitiatedBy(), gc.Equals, "admin") + c.Check(mig.ExternalControl(), jc.IsFalse) info, err := mig.TargetInfo() c.Assert(err, jc.ErrorIsNil) @@ -86,23 +90,51 @@ c.Check(model.MigrationMode(), gc.Equals, state.MigrationModeExporting) } -func (s *ModelMigrationSuite) TestIdSequencesAreIndependent(c *gc.C) { +func (s *MigrationSuite) TestCreateExternalControl(c *gc.C) { + spec := s.stdSpec + spec.ExternalControl = true + mig, err := s.State2.CreateMigration(spec) + c.Assert(err, jc.ErrorIsNil) + c.Check(mig.ModelUUID(), gc.Equals, s.State2.ModelUUID()) + c.Check(mig.ExternalControl(), jc.IsTrue) +} + +func (s *MigrationSuite) TestIsMigrationActive(c *gc.C) { + check := func(expected bool) { + isActive, err := s.State2.IsMigrationActive() + c.Assert(err, jc.ErrorIsNil) + c.Check(isActive, gc.Equals, expected) + + isActive2, err := state.IsMigrationActive(s.State, s.State2.ModelUUID()) + c.Assert(err, jc.ErrorIsNil) + c.Check(isActive2, gc.Equals, expected) + } + + check(false) + + _, err := s.State2.CreateMigration(s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + + check(true) +} + +func (s *MigrationSuite) TestIdSequencesAreIndependent(c *gc.C) { st2 := s.State2 st3 := s.Factory.MakeModel(c, nil) s.AddCleanup(func(*gc.C) { st3.Close() }) - mig2, err := st2.CreateModelMigration(s.stdSpec) + mig2, err := st2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) checkIdAndAttempt(c, mig2, 0) - mig3, err := st3.CreateModelMigration(s.stdSpec) + mig3, err := st3.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) checkIdAndAttempt(c, mig3, 0) } -func (s *ModelMigrationSuite) TestIdSequencesIncrement(c *gc.C) { +func (s *MigrationSuite) TestIdSequencesIncrement(c *gc.C) { for attempt := 0; attempt < 3; attempt++ { - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) checkIdAndAttempt(c, mig, attempt) c.Check(mig.SetPhase(migration.ABORT), jc.ErrorIsNil) @@ -110,17 +142,17 @@ } } -func (s *ModelMigrationSuite) TestIdSequencesIncrementOnlyWhenNecessary(c *gc.C) { +func (s *MigrationSuite) TestIdSequencesIncrementOnlyWhenNecessary(c *gc.C) { // Ensure that sequence numbers aren't "used up" unnecessarily // when the create txn is going to fail. - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) checkIdAndAttempt(c, mig, 0) // This attempt will fail because a migration is already in // progress. - _, err = s.State2.CreateModelMigration(s.stdSpec) + _, err = s.State2.CreateMigration(s.stdSpec) c.Assert(err, gc.ErrorMatches, ".+already in progress") // Now abort the migration and create another. The Id sequence @@ -128,28 +160,28 @@ c.Assert(mig.SetPhase(migration.ABORT), jc.ErrorIsNil) c.Assert(mig.SetPhase(migration.ABORTDONE), jc.ErrorIsNil) - mig, err = s.State2.CreateModelMigration(s.stdSpec) + mig, err = s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) checkIdAndAttempt(c, mig, 1) } -func (s *ModelMigrationSuite) TestSpecValidation(c *gc.C) { +func (s *MigrationSuite) TestSpecValidation(c *gc.C) { tests := []struct { label string - tweakSpec func(*state.ModelMigrationSpec) + tweakSpec func(*state.MigrationSpec) errorPattern string }{{ "invalid InitiatedBy", - func(spec *state.ModelMigrationSpec) { + func(spec *state.MigrationSpec) { spec.InitiatedBy = names.UserTag{} }, "InitiatedBy not valid", }, { "TargetInfo is validated", - func(spec *state.ModelMigrationSpec) { - spec.TargetInfo.Password = "" + func(spec *state.MigrationSpec) { + spec.TargetInfo.CACert = "" }, - "empty Password not valid", + "empty CACert not valid", }} for _, test := range tests { c.Logf("---- %s -----------", test.label) @@ -163,88 +195,88 @@ c.Check(errors.IsNotValid(err), jc.IsTrue) c.Check(err, gc.ErrorMatches, test.errorPattern) - // Ensure that CreateModelMigration rejects the bad spec too. - mig, err := s.State2.CreateModelMigration(spec) + // Ensure that CreateMigration rejects the bad spec too. + mig, err := s.State2.CreateMigration(spec) c.Check(mig, gc.IsNil) c.Check(errors.IsNotValid(err), jc.IsTrue) c.Check(err, gc.ErrorMatches, test.errorPattern) } } -func (s *ModelMigrationSuite) TestCreateWithControllerModel(c *gc.C) { +func (s *MigrationSuite) TestCreateWithControllerModel(c *gc.C) { // This is the State for the controller - mig, err := s.State.CreateModelMigration(s.stdSpec) + mig, err := s.State.CreateMigration(s.stdSpec) c.Check(mig, gc.IsNil) c.Check(err, gc.ErrorMatches, "controllers can't be migrated") } -func (s *ModelMigrationSuite) TestCreateMigrationInProgress(c *gc.C) { - mig, err := s.State2.CreateModelMigration(s.stdSpec) +func (s *MigrationSuite) TestCreateMigrationInProgress(c *gc.C) { + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(mig, gc.Not(gc.IsNil)) c.Assert(err, jc.ErrorIsNil) - mig2, err := s.State2.CreateModelMigration(s.stdSpec) + mig2, err := s.State2.CreateMigration(s.stdSpec) c.Check(mig2, gc.IsNil) c.Check(err, gc.ErrorMatches, "failed to create migration: already in progress") } -func (s *ModelMigrationSuite) TestCreateMigrationRace(c *gc.C) { +func (s *MigrationSuite) TestCreateMigrationRace(c *gc.C) { defer state.SetBeforeHooks(c, s.State2, func() { - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(mig, gc.Not(gc.IsNil)) c.Assert(err, jc.ErrorIsNil) }).Check() - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Check(mig, gc.IsNil) c.Check(err, gc.ErrorMatches, "failed to create migration: already in progress") } -func (s *ModelMigrationSuite) TestCreateMigrationWhenModelNotAlive(c *gc.C) { +func (s *MigrationSuite) TestCreateMigrationWhenModelNotAlive(c *gc.C) { // Set the hosted model to Dying. model, err := s.State2.Model() c.Assert(err, jc.ErrorIsNil) c.Assert(model.Destroy(), jc.ErrorIsNil) - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Check(mig, gc.IsNil) c.Check(err, gc.ErrorMatches, "failed to create migration: model is not alive") } -func (s *ModelMigrationSuite) TestMigrationToSameController(c *gc.C) { +func (s *MigrationSuite) TestMigrationToSameController(c *gc.C) { spec := s.stdSpec - spec.TargetInfo.ControllerTag = s.State.ModelTag() + spec.TargetInfo.ControllerTag = s.State.ControllerTag() - mig, err := s.State2.CreateModelMigration(spec) + mig, err := s.State2.CreateMigration(spec) c.Check(mig, gc.IsNil) c.Check(err, gc.ErrorMatches, "model already attached to target controller") } -func (s *ModelMigrationSuite) TestLatestModelMigration(c *gc.C) { - mig1, err := s.State2.CreateModelMigration(s.stdSpec) +func (s *MigrationSuite) TestLatestMigration(c *gc.C) { + mig1, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) - mig2, err := s.State2.LatestModelMigration() + mig2, err := s.State2.LatestMigration() c.Assert(err, jc.ErrorIsNil) c.Assert(mig1.Id(), gc.Equals, mig2.Id()) } -func (s *ModelMigrationSuite) TestLatestModelMigrationNotExist(c *gc.C) { - mig, err := s.State.LatestModelMigration() +func (s *MigrationSuite) TestLatestMigrationNotExist(c *gc.C) { + mig, err := s.State.LatestMigration() c.Check(mig, gc.IsNil) c.Check(errors.IsNotFound(err), jc.IsTrue) } -func (s *ModelMigrationSuite) TestGetsLatestAttempt(c *gc.C) { +func (s *MigrationSuite) TestGetsLatestAttempt(c *gc.C) { modelUUID := s.State2.ModelUUID() for i := 0; i < 10; i++ { c.Logf("loop %d", i) - _, err := s.State2.CreateModelMigration(s.stdSpec) + _, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) - mig, err := s.State2.LatestModelMigration() + mig, err := s.State2.LatestMigration() c.Check(mig.Id(), gc.Equals, fmt.Sprintf("%s:%d", modelUUID, i)) c.Assert(mig.SetPhase(migration.ABORT), jc.ErrorIsNil) @@ -252,50 +284,85 @@ } } -func (s *ModelMigrationSuite) TestModelMigration(c *gc.C) { - mig1, err := s.State2.CreateModelMigration(s.stdSpec) +func (s *MigrationSuite) TestLatestMigrationPreviousMigration(c *gc.C) { + // Check the scenario of a model having been migrated away and + // then migrated back. The previous migration shouldn't be + // reported by LatestMigration. + + // Make it appear as if the model has been successfully + // migrated. Don't actually remove model documents to simulate it + // having been migrated back to the controller. + phases := []migration.Phase{ + migration.IMPORT, + migration.VALIDATION, + migration.SUCCESS, + migration.LOGTRANSFER, + migration.REAP, + migration.DONE, + } + mig, err := s.State2.CreateMigration(s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + for _, phase := range phases { + c.Assert(mig.SetPhase(phase), jc.ErrorIsNil) + } + state.ResetMigrationMode(c, s.State2) + + // Previous migration shouldn't be reported. + _, err = s.State2.LatestMigration() + c.Check(errors.IsNotFound(err), jc.IsTrue) + + // Start a new migration attempt, which should be reported. + mig2, err := s.State2.CreateMigration(s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + + mig2b, err := s.State2.LatestMigration() + c.Check(err, jc.ErrorIsNil) + c.Check(mig2b.Id(), gc.Equals, mig2.Id()) +} + +func (s *MigrationSuite) TestMigration(c *gc.C) { + mig1, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) - mig2, err := s.State2.ModelMigration(mig1.Id()) + mig2, err := s.State2.Migration(mig1.Id()) c.Check(err, jc.ErrorIsNil) c.Check(mig1.Id(), gc.Equals, mig2.Id()) c.Check(mig2.StartTime(), gc.Equals, s.clock.Now()) } -func (s *ModelMigrationSuite) TestModelMigrationNotFound(c *gc.C) { - _, err := s.State2.ModelMigration("does not exist") +func (s *MigrationSuite) TestMigrationNotFound(c *gc.C) { + _, err := s.State2.Migration("does not exist") c.Check(err, jc.Satisfies, errors.IsNotFound) c.Check(err, gc.ErrorMatches, "migration not found") } -func (s *ModelMigrationSuite) TestRefresh(c *gc.C) { - mig1, err := s.State2.CreateModelMigration(s.stdSpec) +func (s *MigrationSuite) TestRefresh(c *gc.C) { + mig1, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) - mig2, err := s.State2.LatestModelMigration() + mig2, err := s.State2.LatestMigration() c.Assert(err, jc.ErrorIsNil) - err = mig1.SetPhase(migration.PRECHECK) + err = mig1.SetPhase(migration.IMPORT) c.Assert(err, jc.ErrorIsNil) assertPhase(c, mig2, migration.QUIESCE) err = mig2.Refresh() c.Assert(err, jc.ErrorIsNil) - assertPhase(c, mig2, migration.PRECHECK) + assertPhase(c, mig2, migration.IMPORT) } -func (s *ModelMigrationSuite) TestSuccessfulPhaseTransitions(c *gc.C) { +func (s *MigrationSuite) TestSuccessfulPhaseTransitions(c *gc.C) { st := s.State2 - mig, err := st.CreateModelMigration(s.stdSpec) + mig, err := st.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) c.Assert(mig, gc.NotNil) - mig2, err := st.LatestModelMigration() + mig2, err := st.LatestMigration() c.Assert(err, jc.ErrorIsNil) phases := []migration.Phase{ - migration.PRECHECK, migration.IMPORT, migration.VALIDATION, migration.SUCCESS, @@ -342,8 +409,8 @@ s.assertMigrationCleanedUp(c, mig) } -func (s *ModelMigrationSuite) TestABORTCleanup(c *gc.C) { - mig, err := s.State2.CreateModelMigration(s.stdSpec) +func (s *MigrationSuite) TestABORTCleanup(c *gc.C) { + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) s.clock.Advance(time.Millisecond) @@ -356,16 +423,15 @@ // Model should be set back to active. model, err := s.State2.Model() c.Assert(err, jc.ErrorIsNil) - c.Assert(model.MigrationMode(), gc.Equals, state.MigrationModeActive) + c.Assert(model.MigrationMode(), gc.Equals, state.MigrationModeNone) } -func (s *ModelMigrationSuite) TestREAPFAILEDCleanup(c *gc.C) { - mig, err := s.State2.CreateModelMigration(s.stdSpec) +func (s *MigrationSuite) TestREAPFAILEDCleanup(c *gc.C) { + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) // Advance the migration to REAPFAILED. phases := []migration.Phase{ - migration.PRECHECK, migration.IMPORT, migration.VALIDATION, migration.SUCCESS, @@ -381,46 +447,46 @@ s.assertMigrationCleanedUp(c, mig) } -func (s *ModelMigrationSuite) assertMigrationCleanedUp(c *gc.C, mig state.ModelMigration) { +func (s *MigrationSuite) assertMigrationCleanedUp(c *gc.C, mig state.ModelMigration) { c.Assert(mig.PhaseChangedTime(), gc.Equals, s.clock.Now()) c.Assert(mig.EndTime(), gc.Equals, s.clock.Now()) assertMigrationNotActive(c, s.State2) } -func (s *ModelMigrationSuite) TestIllegalPhaseTransition(c *gc.C) { - mig, err := s.State2.CreateModelMigration(s.stdSpec) +func (s *MigrationSuite) TestIllegalPhaseTransition(c *gc.C) { + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) err = mig.SetPhase(migration.SUCCESS) c.Check(err, gc.ErrorMatches, "illegal phase change: QUIESCE -> SUCCESS") } -func (s *ModelMigrationSuite) TestPhaseChangeRace(c *gc.C) { - mig, err := s.State2.CreateModelMigration(s.stdSpec) +func (s *MigrationSuite) TestPhaseChangeRace(c *gc.C) { + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(mig, gc.Not(gc.IsNil)) defer state.SetBeforeHooks(c, s.State2, func() { - mig, err := s.State2.LatestModelMigration() + mig, err := s.State2.LatestMigration() c.Assert(err, jc.ErrorIsNil) - c.Assert(mig.SetPhase(migration.PRECHECK), jc.ErrorIsNil) + c.Assert(mig.SetPhase(migration.IMPORT), jc.ErrorIsNil) }).Check() - err = mig.SetPhase(migration.PRECHECK) + err = mig.SetPhase(migration.IMPORT) c.Assert(err, gc.ErrorMatches, "phase already changed") assertPhase(c, mig, migration.QUIESCE) // After a refresh it the phase change should be ok. c.Assert(mig.Refresh(), jc.ErrorIsNil) - err = mig.SetPhase(migration.PRECHECK) + err = mig.SetPhase(migration.IMPORT) c.Assert(err, jc.ErrorIsNil) - assertPhase(c, mig, migration.PRECHECK) + assertPhase(c, mig, migration.IMPORT) } -func (s *ModelMigrationSuite) TestStatusMessage(c *gc.C) { - mig, err := s.State2.CreateModelMigration(s.stdSpec) +func (s *MigrationSuite) TestStatusMessage(c *gc.C) { + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(mig, gc.Not(gc.IsNil)) - mig2, err := s.State2.LatestModelMigration() + mig2, err := s.State2.LatestMigration() c.Assert(err, jc.ErrorIsNil) c.Check(mig.StatusMessage(), gc.Equals, "starting") @@ -435,13 +501,13 @@ c.Check(mig2.StatusMessage(), gc.Equals, "foo bar") } -func (s *ModelMigrationSuite) TestWatchForModelMigration(c *gc.C) { +func (s *MigrationSuite) TestWatchForMigration(c *gc.C) { // Start watching for migration. w, wc := s.createMigrationWatcher(c, s.State2) wc.AssertOneChange() // Create the migration - should be reported. - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() @@ -457,9 +523,9 @@ wc.AssertClosed() } -func (s *ModelMigrationSuite) TestWatchForModelMigrationInProgress(c *gc.C) { +func (s *MigrationSuite) TestWatchForMigrationInProgress(c *gc.C) { // Create a migration. - _, err := s.State2.CreateModelMigration(s.stdSpec) + _, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) // Start watching for a migration - the in progress one should be reported. @@ -467,7 +533,7 @@ wc.AssertOneChange() } -func (s *ModelMigrationSuite) TestWatchForModelMigrationMultiModel(c *gc.C) { +func (s *MigrationSuite) TestWatchForMigrationMultiModel(c *gc.C) { _, wc2 := s.createMigrationWatcher(c, s.State2) wc2.AssertOneChange() @@ -479,32 +545,32 @@ wc3.AssertOneChange() // Create a migration for 2. - _, err := s.State2.CreateModelMigration(s.stdSpec) + _, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) wc2.AssertOneChange() wc3.AssertNoChange() // Create a migration for 3. - _, err = State3.CreateModelMigration(s.stdSpec) + _, err = State3.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) wc2.AssertNoChange() wc3.AssertOneChange() } -func (s *ModelMigrationSuite) createMigrationWatcher(c *gc.C, st *state.State) ( +func (s *MigrationSuite) createMigrationWatcher(c *gc.C, st *state.State) ( state.NotifyWatcher, statetesting.NotifyWatcherC, ) { - w := st.WatchForModelMigration() + w := st.WatchForMigration() s.AddCleanup(func(c *gc.C) { statetesting.AssertStop(c, w) }) return w, statetesting.NewNotifyWatcherC(c, st, w) } -func (s *ModelMigrationSuite) TestWatchMigrationStatus(c *gc.C) { +func (s *MigrationSuite) TestWatchMigrationStatus(c *gc.C) { w, wc := s.createStatusWatcher(c, s.State2) wc.AssertOneChange() // Initial event. // Create a migration. - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() @@ -515,12 +581,12 @@ wc.AssertOneChange() // Start another. - mig2, err := s.State2.CreateModelMigration(s.stdSpec) + mig2, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() // Change phase. - c.Assert(mig2.SetPhase(migration.PRECHECK), jc.ErrorIsNil) + c.Assert(mig2.SetPhase(migration.IMPORT), jc.ErrorIsNil) wc.AssertOneChange() // End it. @@ -531,9 +597,9 @@ wc.AssertClosed() } -func (s *ModelMigrationSuite) TestWatchMigrationStatusPreexisting(c *gc.C) { +func (s *MigrationSuite) TestWatchMigrationStatusPreexisting(c *gc.C) { // Create an aborted migration. - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) c.Assert(mig.SetPhase(migration.ABORT), jc.ErrorIsNil) @@ -541,7 +607,7 @@ wc.AssertOneChange() } -func (s *ModelMigrationSuite) TestWatchMigrationStatusMultiModel(c *gc.C) { +func (s *MigrationSuite) TestWatchMigrationStatusMultiModel(c *gc.C) { _, wc2 := s.createStatusWatcher(c, s.State2) wc2.AssertOneChange() // initial event @@ -553,13 +619,13 @@ wc3.AssertOneChange() // initial event // Create a migration for 2. - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) wc2.AssertOneChange() wc3.AssertNoChange() // Create a migration for 3. - _, err = State3.CreateModelMigration(s.stdSpec) + _, err = State3.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) wc2.AssertNoChange() wc3.AssertOneChange() @@ -571,7 +637,7 @@ wc3.AssertNoChange() } -func (s *ModelMigrationSuite) TestMinionReports(c *gc.C) { +func (s *MigrationSuite) TestMinionReports(c *gc.C) { // Create some machines and units to report with. factory2 := factory.NewFactory(s.State2) m0 := factory2.MakeMachine(c, nil) @@ -579,80 +645,80 @@ m1 := factory2.MakeMachine(c, nil) m2 := factory2.MakeMachine(c, nil) - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) const phase = migration.QUIESCE - c.Assert(mig.MinionReport(m0.Tag(), phase, true), jc.ErrorIsNil) - c.Assert(mig.MinionReport(m1.Tag(), phase, false), jc.ErrorIsNil) - c.Assert(mig.MinionReport(u0.Tag(), phase, true), jc.ErrorIsNil) + c.Assert(mig.SubmitMinionReport(m0.Tag(), phase, true), jc.ErrorIsNil) + c.Assert(mig.SubmitMinionReport(m1.Tag(), phase, false), jc.ErrorIsNil) + c.Assert(mig.SubmitMinionReport(u0.Tag(), phase, true), jc.ErrorIsNil) - reports, err := mig.GetMinionReports() + reports, err := mig.MinionReports() c.Assert(err, jc.ErrorIsNil) c.Check(reports.Succeeded, jc.SameContents, []names.Tag{m0.Tag(), u0.Tag()}) c.Check(reports.Failed, jc.SameContents, []names.Tag{m1.Tag()}) c.Check(reports.Unknown, jc.SameContents, []names.Tag{m2.Tag()}) } -func (s *ModelMigrationSuite) TestDuplicateMinionReportsSameSuccess(c *gc.C) { +func (s *MigrationSuite) TestDuplicateMinionReportsSameSuccess(c *gc.C) { // It should be OK for a minion report to arrive more than once // for the same migration, agent and phase as long as the value of // "success" is the same. - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) tag := names.NewMachineTag("42") - c.Check(mig.MinionReport(tag, migration.QUIESCE, true), jc.ErrorIsNil) - c.Check(mig.MinionReport(tag, migration.QUIESCE, true), jc.ErrorIsNil) + c.Check(mig.SubmitMinionReport(tag, migration.QUIESCE, true), jc.ErrorIsNil) + c.Check(mig.SubmitMinionReport(tag, migration.QUIESCE, true), jc.ErrorIsNil) } -func (s *ModelMigrationSuite) TestDuplicateMinionReportsDifferingSuccess(c *gc.C) { +func (s *MigrationSuite) TestDuplicateMinionReportsDifferingSuccess(c *gc.C) { // It is not OK for a minion report to arrive more than once for // the same migration, agent and phase when the "success" value // changes. - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) tag := names.NewMachineTag("42") - c.Check(mig.MinionReport(tag, migration.QUIESCE, true), jc.ErrorIsNil) - err = mig.MinionReport(tag, migration.QUIESCE, false) + c.Check(mig.SubmitMinionReport(tag, migration.QUIESCE, true), jc.ErrorIsNil) + err = mig.SubmitMinionReport(tag, migration.QUIESCE, false) c.Check(err, gc.ErrorMatches, fmt.Sprintf("conflicting reports received for %s/QUIESCE/machine-42", mig.Id())) } -func (s *ModelMigrationSuite) TestMinionReportWithOldPhase(c *gc.C) { +func (s *MigrationSuite) TestMinionReportWithOldPhase(c *gc.C) { // It is OK for a report to arrive for even a migration has moved // on. - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) // Get another reference to the same migration. - migalt, err := s.State2.LatestModelMigration() + migalt, err := s.State2.LatestMigration() c.Assert(err, jc.ErrorIsNil) // Confirm that there's no reports when starting. - reports, err := mig.GetMinionReports() + reports, err := mig.MinionReports() c.Assert(err, jc.ErrorIsNil) c.Check(reports.Succeeded, gc.HasLen, 0) // Advance the migration - c.Assert(mig.SetPhase(migration.PRECHECK), jc.ErrorIsNil) + c.Assert(mig.SetPhase(migration.IMPORT), jc.ErrorIsNil) // Submit minion report for the old phase. tag := names.NewMachineTag("42") - c.Assert(mig.MinionReport(tag, migration.QUIESCE, true), jc.ErrorIsNil) + c.Assert(mig.SubmitMinionReport(tag, migration.QUIESCE, true), jc.ErrorIsNil) // The report should still have been recorded. - reports, err = migalt.GetMinionReports() + reports, err = migalt.MinionReports() c.Assert(err, jc.ErrorIsNil) c.Check(reports.Succeeded, jc.SameContents, []names.Tag{tag}) } -func (s *ModelMigrationSuite) TestMinionReportWithInactiveMigration(c *gc.C) { +func (s *MigrationSuite) TestMinionReportWithInactiveMigration(c *gc.C) { // Create a migration. - mig, err := s.State2.CreateModelMigration(s.stdSpec) + mig, err := s.State2.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) // Get another reference to the same migration. - migalt, err := s.State2.LatestModelMigration() + migalt, err := s.State2.LatestMigration() c.Assert(err, jc.ErrorIsNil) // Abort the migration. @@ -660,34 +726,34 @@ c.Assert(mig.SetPhase(migration.ABORTDONE), jc.ErrorIsNil) // Confirm that there's no reports when starting. - reports, err := mig.GetMinionReports() + reports, err := mig.MinionReports() c.Assert(err, jc.ErrorIsNil) c.Check(reports.Succeeded, gc.HasLen, 0) // Submit a minion report for it. tag := names.NewMachineTag("42") - c.Assert(mig.MinionReport(tag, migration.QUIESCE, true), jc.ErrorIsNil) + c.Assert(mig.SubmitMinionReport(tag, migration.QUIESCE, true), jc.ErrorIsNil) // The report should still have been recorded. - reports, err = migalt.GetMinionReports() + reports, err = migalt.MinionReports() c.Assert(err, jc.ErrorIsNil) c.Check(reports.Succeeded, jc.SameContents, []names.Tag{tag}) } -func (s *ModelMigrationSuite) TestWatchMinionReports(c *gc.C) { +func (s *MigrationSuite) TestWatchMinionReports(c *gc.C) { mig, wc := s.createMigAndWatchReports(c, s.State2) wc.AssertOneChange() // initial event // A report should trigger the watcher. - c.Assert(mig.MinionReport(names.NewMachineTag("0"), migration.QUIESCE, true), jc.ErrorIsNil) + c.Assert(mig.SubmitMinionReport(names.NewMachineTag("0"), migration.QUIESCE, true), jc.ErrorIsNil) wc.AssertOneChange() // A report for a different phase shouldn't trigger the watcher. - c.Assert(mig.MinionReport(names.NewMachineTag("1"), migration.IMPORT, true), jc.ErrorIsNil) + c.Assert(mig.SubmitMinionReport(names.NewMachineTag("1"), migration.IMPORT, true), jc.ErrorIsNil) wc.AssertNoChange() } -func (s *ModelMigrationSuite) TestWatchMinionReportsMultiModel(c *gc.C) { +func (s *MigrationSuite) TestWatchMinionReportsMultiModel(c *gc.C) { mig, wc := s.createMigAndWatchReports(c, s.State2) wc.AssertOneChange() // initial event @@ -697,16 +763,16 @@ wc3.AssertOneChange() // initial event // Ensure the correct watchers are triggered. - c.Assert(mig.MinionReport(names.NewMachineTag("0"), migration.QUIESCE, true), jc.ErrorIsNil) + c.Assert(mig.SubmitMinionReport(names.NewMachineTag("0"), migration.QUIESCE, true), jc.ErrorIsNil) wc.AssertOneChange() wc3.AssertNoChange() - c.Assert(mig3.MinionReport(names.NewMachineTag("0"), migration.QUIESCE, true), jc.ErrorIsNil) + c.Assert(mig3.SubmitMinionReport(names.NewMachineTag("0"), migration.QUIESCE, true), jc.ErrorIsNil) wc.AssertNoChange() wc3.AssertOneChange() } -func (s *ModelMigrationSuite) createStatusWatcher(c *gc.C, st *state.State) ( +func (s *MigrationSuite) createStatusWatcher(c *gc.C, st *state.State) ( state.NotifyWatcher, statetesting.NotifyWatcherC, ) { w := st.WatchMigrationStatus() @@ -714,10 +780,10 @@ return w, statetesting.NewNotifyWatcherC(c, st, w) } -func (s *ModelMigrationSuite) createMigAndWatchReports(c *gc.C, st *state.State) ( +func (s *MigrationSuite) createMigAndWatchReports(c *gc.C, st *state.State) ( state.ModelMigration, statetesting.NotifyWatcherC, ) { - mig, err := st.CreateModelMigration(s.stdSpec) + mig, err := st.CreateMigration(s.stdSpec) c.Assert(err, jc.ErrorIsNil) w, err := mig.WatchMinionReports() @@ -743,7 +809,7 @@ } func isMigrationActive(c *gc.C, st *state.State) bool { - isActive, err := st.IsModelMigrationActive() + isActive, err := st.IsMigrationActive() c.Assert(err, jc.ErrorIsNil) return isActive } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/model_test.go juju-core-2.0.0/src/github.com/juju/juju/state/model_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/model_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/model_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,19 +5,19 @@ import ( "fmt" - "time" "github.com/juju/errors" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" "github.com/juju/juju/cloud" - "github.com/juju/juju/core/description" "github.com/juju/juju/environs/config" "github.com/juju/juju/mongo/mongotest" + "github.com/juju/juju/permission" "github.com/juju/juju/state" statetesting "github.com/juju/juju/state/testing" "github.com/juju/juju/storage" @@ -37,22 +37,17 @@ expectedTag := names.NewModelTag(model.UUID()) c.Assert(model.Tag(), gc.Equals, expectedTag) - c.Assert(model.ControllerTag(), gc.Equals, expectedTag) + c.Assert(model.ControllerTag(), gc.Equals, s.State.ControllerTag()) c.Assert(model.Name(), gc.Equals, "testenv") c.Assert(model.Owner(), gc.Equals, s.Owner) c.Assert(model.Life(), gc.Equals, state.Alive) - c.Assert(model.MigrationMode(), gc.Equals, state.MigrationModeActive) + c.Assert(model.MigrationMode(), gc.Equals, state.MigrationModeNone) } func (s *ModelSuite) TestModelDestroy(c *gc.C) { env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) - now := state.NowToTheSecond() - s.PatchValue(&state.NowToTheSecond, func() time.Time { - return now - }) - err = env.Destroy() c.Assert(err, jc.ErrorIsNil) err = env.Refresh() @@ -65,9 +60,10 @@ owner := names.NewUserTag("non-existent@local") _, _, err := s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", - Config: cfg, - Owner: owner, + CloudName: "dummy", + CloudRegion: "dummy-region", + Config: cfg, + Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, gc.ErrorMatches, `cannot create model: user "non-existent" not found`) @@ -79,9 +75,10 @@ // Create the first model. _, st1, err := s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", - Config: cfg, - Owner: owner, + CloudName: "dummy", + CloudRegion: "dummy-region", + Config: cfg, + Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) @@ -96,12 +93,13 @@ "uuid": newUUID.String(), }) _, _, err = s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", - Config: cfg2, - Owner: owner, + CloudName: "dummy", + CloudRegion: "dummy-region", + Config: cfg2, + Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) - errMsg := fmt.Sprintf("model %q for %s already exists", cfg2.Name(), owner.Canonical()) + errMsg := fmt.Sprintf("model %q for %s already exists", cfg2.Name(), owner.Id()) c.Assert(err, gc.ErrorMatches, errMsg) c.Assert(errors.IsAlreadyExists(err), jc.IsTrue) @@ -120,9 +118,10 @@ // We should now be able to create the other model. env2, st2, err := s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", - Config: cfg2, - Owner: owner, + CloudName: "dummy", + CloudRegion: "dummy-region", + Config: cfg2, + Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) @@ -136,9 +135,10 @@ owner := names.NewUserTag("test@remote") model, st, err := s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", - Config: cfg, - Owner: owner, + CloudName: "dummy", + CloudRegion: "dummy-region", + Config: cfg, + Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) @@ -148,7 +148,7 @@ assertModelMatches := func(model *state.Model) { c.Assert(model.UUID(), gc.Equals, modelTag.Id()) c.Assert(model.Tag(), gc.Equals, modelTag) - c.Assert(model.ControllerTag(), gc.Equals, s.modelTag) + c.Assert(model.ControllerTag(), gc.Equals, s.State.ControllerTag()) c.Assert(model.Owner(), gc.Equals, owner) c.Assert(model.Name(), gc.Equals, "testing") c.Assert(model.Life(), gc.Equals, state.Alive) @@ -183,6 +183,7 @@ env, st, err := s.State.NewModel(state.ModelArgs{ CloudName: "dummy", + CloudRegion: "dummy-region", Config: cfg, Owner: owner, MigrationMode: state.MigrationModeImporting, @@ -199,9 +200,10 @@ owner := names.NewUserTag("test@remote") env, st, err := s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", - Config: cfg, - Owner: owner, + CloudName: "dummy", + CloudRegion: "dummy-region", + Config: cfg, + Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) @@ -213,23 +215,24 @@ } func (s *ModelSuite) TestControllerModel(c *gc.C) { - env, err := s.State.ControllerModel() + model, err := s.State.ControllerModel() c.Assert(err, jc.ErrorIsNil) - expectedTag := names.NewModelTag(env.UUID()) - c.Assert(env.Tag(), gc.Equals, expectedTag) - c.Assert(env.ControllerTag(), gc.Equals, expectedTag) - c.Assert(env.Name(), gc.Equals, "testenv") - c.Assert(env.Owner(), gc.Equals, s.Owner) - c.Assert(env.Life(), gc.Equals, state.Alive) + expectedTag := names.NewModelTag(model.UUID()) + c.Assert(model.Tag(), gc.Equals, expectedTag) + c.Assert(model.ControllerTag(), gc.Equals, s.State.ControllerTag()) + c.Assert(model.Name(), gc.Equals, "testenv") + c.Assert(model.Owner(), gc.Equals, s.Owner) + c.Assert(model.Life(), gc.Equals, state.Alive) } func (s *ModelSuite) TestControllerModelAccessibleFromOtherModels(c *gc.C) { cfg, _ := s.createTestModelConfig(c) _, st, err := s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", - Config: cfg, - Owner: names.NewUserTag("test@remote"), + CloudName: "dummy", + CloudRegion: "dummy-region", + Config: cfg, + Owner: names.NewUserTag("test@remote"), StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) @@ -423,8 +426,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(controllerEnv.DestroyIncludingHosted(), jc.ErrorIsNil) - assertCleanupRuns(c, s.State) - assertDoesNotNeedCleanup(c, s.State) + assertCleanupCount(c, s.State, 2) assertAllMachinesDeadAndRemove(c, s.State) assertEnv(controllerEnv, s.State, state.Dying, 0, 0) @@ -704,6 +706,26 @@ c.Assert(err, gc.ErrorMatches, "cannot lookup model users outside the current model") } +func (s *ModelSuite) TestListUsersIgnoredDeletedUsers(c *gc.C) { + model, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + expectedUsers := addModelUsers(c, s.State) + + obtainedUsers, err := model.Users() + c.Assert(err, jc.ErrorIsNil) + assertObtainedUsersMatchExpectedUsers(c, obtainedUsers, expectedUsers) + + lastUser := obtainedUsers[len(obtainedUsers)-1] + err = s.State.RemoveUser(lastUser.UserTag) + c.Assert(err, jc.ErrorIsNil) + expectedAfterDeletion := obtainedUsers[:len(obtainedUsers)-1] + + obtainedUsers, err = model.Users() + c.Assert(err, jc.ErrorIsNil) + assertObtainedUsersMatchExpectedUsers(c, obtainedUsers, expectedAfterDeletion) +} + func (s *ModelSuite) TestListUsersTwoModels(c *gc.C) { env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) @@ -727,14 +749,14 @@ assertObtainedUsersMatchExpectedUsers(c, obtainedUsersOtherEnv, expectedUsersOtherEnv) } -func addModelUsers(c *gc.C, st *state.State) (expected []description.UserAccess) { +func addModelUsers(c *gc.C, st *state.State) (expected []permission.UserAccess) { // get the model owner testAdmin := names.NewUserTag("test-admin") owner, err := st.UserAccess(testAdmin, st.ModelTag()) c.Assert(err, jc.ErrorIsNil) f := factory.NewFactory(st) - return []description.UserAccess{ + return []permission.UserAccess{ // we expect the owner to be an existing model user owner, // add new users to the model @@ -744,7 +766,7 @@ } } -func assertObtainedUsersMatchExpectedUsers(c *gc.C, obtainedUsers, expectedUsers []description.UserAccess) { +func assertObtainedUsersMatchExpectedUsers(c *gc.C, obtainedUsers, expectedUsers []permission.UserAccess) { c.Assert(len(obtainedUsers), gc.Equals, len(expectedUsers)) for i, obtained := range obtainedUsers { c.Assert(obtained.Object.Id(), gc.Equals, expectedUsers[i].Object.Id()) @@ -764,14 +786,14 @@ c.Assert(envs, gc.HasLen, 3) var obtained []string for _, env := range envs { - obtained = append(obtained, fmt.Sprintf("%s/%s", env.Owner().Canonical(), env.Name())) + obtained = append(obtained, fmt.Sprintf("%s/%s", env.Owner().Id(), env.Name())) } expected := []string{ - "test-admin@local/testenv", "bob@remote/test", "mary@remote/test", + "test-admin/testenv", } - c.Assert(obtained, jc.SameContents, expected) + c.Assert(obtained, jc.DeepEquals, expected) } func (s *ModelSuite) TestHostedModelCount(c *gc.C) { @@ -825,17 +847,17 @@ defer st.Close() cfg, _ := createTestModelConfig(c, st.ModelUUID()) _, _, err := st.NewModel(state.ModelArgs{ - CloudName: "dummy", - Config: cfg, - Owner: owner, - CloudRegion: "missing-region", + CloudName: "dummy", + CloudRegion: "dummy-region", + Config: cfg, + Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) - c.Assert(err, gc.ErrorMatches, `region "missing-region" not found \(expected one of \["some-region"\]\)`) + c.Assert(err, gc.ErrorMatches, `region "dummy-region" not found \(expected one of \["some-region"\]\)`) } func (s *ModelCloudValidationSuite) TestNewModelMissingCloudRegion(c *gc.C) { - st, owner := s.initializeState(c, []cloud.Region{{Name: "some-region"}}, []cloud.AuthType{cloud.EmptyAuthType}, nil) + st, owner := s.initializeState(c, []cloud.Region{{Name: "dummy-region"}}, []cloud.AuthType{cloud.EmptyAuthType}, nil) defer st.Close() cfg, _ := createTestModelConfig(c, st.ModelUUID()) _, _, err := st.NewModel(state.ModelArgs{ @@ -848,63 +870,99 @@ } func (s *ModelCloudValidationSuite) TestNewModelUnknownCloudCredential(c *gc.C) { + regions := []cloud.Region{cloud.Region{Name: "dummy-region"}} + controllerCredentialTag := names.NewCloudCredentialTag("dummy/test@remote/controller-credential") st, owner := s.initializeState( - c, nil, []cloud.AuthType{cloud.UserPassAuthType}, map[string]cloud.Credential{ - "controller-credentials": cloud.NewCredential(cloud.UserPassAuthType, nil), + c, regions, []cloud.AuthType{cloud.UserPassAuthType}, map[names.CloudCredentialTag]cloud.Credential{ + controllerCredentialTag: cloud.NewCredential(cloud.UserPassAuthType, nil), }, ) defer st.Close() + unknownCredentialTag := names.NewCloudCredentialTag("dummy/" + owner.Id() + "/unknown-credential") cfg, _ := createTestModelConfig(c, st.ModelUUID()) _, _, err := st.NewModel(state.ModelArgs{ CloudName: "dummy", + CloudRegion: "dummy-region", Config: cfg, Owner: owner, - CloudCredential: "unknown-credential", + CloudCredential: unknownCredentialTag, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) - c.Assert(err, gc.ErrorMatches, `credential "unknown-credential" not found`) + c.Assert(err, gc.ErrorMatches, `credential "dummy/test@remote/unknown-credential" not found`) } func (s *ModelCloudValidationSuite) TestNewModelMissingCloudCredential(c *gc.C) { + regions := []cloud.Region{cloud.Region{Name: "dummy-region"}} + controllerCredentialTag := names.NewCloudCredentialTag("dummy/test@remote/controller-credential") st, owner := s.initializeState( - c, nil, []cloud.AuthType{cloud.UserPassAuthType}, map[string]cloud.Credential{ - "controller-credentials": cloud.NewCredential(cloud.UserPassAuthType, nil), + c, regions, []cloud.AuthType{cloud.UserPassAuthType}, map[names.CloudCredentialTag]cloud.Credential{ + controllerCredentialTag: cloud.NewCredential(cloud.UserPassAuthType, nil), }, ) defer st.Close() cfg, _ := createTestModelConfig(c, st.ModelUUID()) _, _, err := st.NewModel(state.ModelArgs{ - CloudName: "dummy", - Config: cfg, - Owner: owner, + CloudName: "dummy", + CloudRegion: "dummy-region", + Config: cfg, + Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, gc.ErrorMatches, "missing CloudCredential not valid") } func (s *ModelCloudValidationSuite) TestNewModelMissingCloudCredentialSupportsEmptyAuth(c *gc.C) { - st, owner := s.initializeState(c, nil, []cloud.AuthType{cloud.EmptyAuthType}, nil) + regions := []cloud.Region{ + cloud.Region{ + Name: "dummy-region", + Endpoint: "dummy-endpoint", + IdentityEndpoint: "dummy-identity-endpoint", + StorageEndpoint: "dummy-storage-endpoint", + }, + } + st, owner := s.initializeState(c, regions, []cloud.AuthType{cloud.EmptyAuthType}, nil) defer st.Close() cfg, _ := createTestModelConfig(c, st.ModelUUID()) cfg, err := cfg.Apply(map[string]interface{}{"name": "whatever"}) c.Assert(err, jc.ErrorIsNil) _, newSt, err := st.NewModel(state.ModelArgs{ - CloudName: "dummy", Config: cfg, Owner: owner, + CloudName: "dummy", CloudRegion: "dummy-region", Config: cfg, Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) newSt.Close() } +func (s *ModelCloudValidationSuite) TestNewModelOtherUserCloudCredential(c *gc.C) { + controllerCredentialTag := names.NewCloudCredentialTag("dummy/test@remote/controller-credential") + st, _ := s.initializeState( + c, nil, []cloud.AuthType{cloud.UserPassAuthType}, map[names.CloudCredentialTag]cloud.Credential{ + controllerCredentialTag: cloud.NewCredential(cloud.UserPassAuthType, nil), + }, + ) + defer st.Close() + owner := factory.NewFactory(st).MakeUser(c, nil).UserTag() + cfg, _ := createTestModelConfig(c, st.ModelUUID()) + _, _, err := st.NewModel(state.ModelArgs{ + CloudName: "dummy", + Config: cfg, + Owner: owner, + CloudCredential: controllerCredentialTag, + StorageProviderRegistry: storage.StaticProviderRegistry{}, + }) + c.Assert(err, gc.ErrorMatches, `credential "dummy/test@remote/controller-credential" not found`) +} + func (s *ModelCloudValidationSuite) initializeState( c *gc.C, regions []cloud.Region, authTypes []cloud.AuthType, - credentials map[string]cloud.Credential, + credentials map[names.CloudCredentialTag]cloud.Credential, ) (*state.State, names.UserTag) { owner := names.NewUserTag("test@remote") cfg, _ := createTestModelConfig(c, "") - var controllerRegion, controllerCredential string + var controllerRegion string + var controllerCredential names.CloudCredentialTag if len(regions) > 0 { controllerRegion = regions[0].Name } @@ -914,8 +972,8 @@ } } controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = cfg.UUID() st, err := state.Initialize(state.InitializeParams{ + Clock: clock.WallClock, ControllerConfig: controllerCfg, ControllerModelArgs: state.ModelArgs{ Owner: owner, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/modeluser.go juju-core-2.0.0/src/github.com/juju/juju/state/modeluser.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/modeluser.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/modeluser.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,7 +14,7 @@ "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" ) // modelUserLastConnectionDoc is updated by the apiserver whenever the user @@ -30,9 +30,12 @@ } // setModelAccess changes the user's access permissions on the model. -func (st *State) setModelAccess(access description.Access, userGlobalKey string) error { - op := updatePermissionOp(modelGlobalKey, userGlobalKey, access) - err := st.runTransaction([]txn.Op{op}) +func (st *State) setModelAccess(access permission.Access, userGlobalKey, modelUUID string) error { + if err := permission.ValidateModelAccess(access); err != nil { + return errors.Trace(err) + } + op := updatePermissionOp(modelKey(modelUUID), userGlobalKey, access) + err := st.runTransactionFor(modelUUID, []txn.Op{op}) if err == txn.ErrAborted { return errors.NotFoundf("existing permissions") } @@ -45,7 +48,7 @@ lastConnections, closer := st.getRawCollection(modelUserLastConnectionC) defer closer() - username := user.Canonical() + username := user.Id() var lastConn modelUserLastConnectionDoc err := lastConnections.FindId(st.docID(username)).Select(bson.D{{"last-connection", 1}}).One(&lastConn) if err != nil { @@ -76,7 +79,7 @@ // UpdateLastModelConnection updates the last connection time of the model user. func (st *State) UpdateLastModelConnection(user names.UserTag) error { - return st.updateLastModelConnection(user, nowToTheSecond()) + return st.updateLastModelConnection(user, st.NowToTheSecond()) } func (st *State) updateLastModelConnection(user names.UserTag, when time.Time) error { @@ -91,9 +94,9 @@ session.SetSafe(&mgo.Safe{}) lastConn := modelUserLastConnectionDoc{ - ID: st.docID(strings.ToLower(user.Canonical())), + ID: st.docID(strings.ToLower(user.Id())), ModelUUID: st.ModelUUID(), - UserName: user.Canonical(), + UserName: user.Id(), LastConnection: when, } _, err := lastConnectionsW.UpsertId(lastConn.ID, lastConn) @@ -101,34 +104,38 @@ } // ModelUser a model userAccessDoc. -func (st *State) modelUser(user names.UserTag) (userAccessDoc, error) { +func (st *State) modelUser(modelUUID string, user names.UserTag) (userAccessDoc, error) { modelUser := userAccessDoc{} - modelUsers, closer := st.getCollection(modelUsersC) + modelUsers, closer := st.getCollectionFor(modelUUID, modelUsersC) defer closer() - username := strings.ToLower(user.Canonical()) + username := strings.ToLower(user.Id()) err := modelUsers.FindId(username).One(&modelUser) if err == mgo.ErrNotFound { return userAccessDoc{}, errors.NotFoundf("model user %q", username) } + if err != nil { + return userAccessDoc{}, errors.Trace(err) + } // DateCreated is inserted as UTC, but read out as local time. So we // convert it back to UTC here. modelUser.DateCreated = modelUser.DateCreated.UTC() return modelUser, nil } -func createModelUserOps(modelUUID string, user, createdBy names.UserTag, displayName string, dateCreated time.Time, access description.Access) []txn.Op { - creatorname := createdBy.Canonical() +func createModelUserOps(modelUUID string, user, createdBy names.UserTag, displayName string, dateCreated time.Time, access permission.Access) []txn.Op { + creatorname := createdBy.Id() doc := &userAccessDoc{ ID: userAccessID(user), ObjectUUID: modelUUID, - UserName: user.Canonical(), + UserName: user.Id(), DisplayName: displayName, CreatedBy: creatorname, DateCreated: dateCreated, } + ops := []txn.Op{ - createPermissionOp(modelGlobalKey, userGlobalKey(userAccessID(user)), access), + createPermissionOp(modelKey(modelUUID), userGlobalKey(userAccessID(user)), access), { C: modelUsersC, Id: userAccessID(user), @@ -136,24 +143,26 @@ Insert: doc, }, } - return ops } -// removeModelUser removes a user from the database. -func (st *State) removeModelUser(user names.UserTag) error { - ops := []txn.Op{ - removePermissionOp(modelGlobalKey, userGlobalKey(userAccessID(user))), +func removeModelUserOps(modelUUID string, user names.UserTag) []txn.Op { + return []txn.Op{ + removePermissionOp(modelKey(modelUUID), userGlobalKey(userAccessID(user))), { C: modelUsersC, Id: userAccessID(user), Assert: txn.DocExists, Remove: true, }} +} +// removeModelUser removes a user from the database. +func (st *State) removeModelUser(user names.UserTag) error { + ops := removeModelUserOps(st.ModelUUID(), user) err := st.runTransaction(ops) if err == txn.ErrAborted { - err = errors.NewNotFound(nil, fmt.Sprintf("model user %q does not exist", user.Canonical())) + err = errors.NewNotFound(nil, fmt.Sprintf("model user %q does not exist", user.Id())) } if err != nil { return errors.Trace(err) @@ -175,10 +184,10 @@ defer lastConnCloser() lastConnDoc := modelUserLastConnectionDoc{} - id := ensureModelUUID(e.ModelTag().Id(), strings.ToLower(e.User.Canonical())) + id := ensureModelUUID(e.ModelTag().Id(), strings.ToLower(e.User.Id())) err := lastConnections.FindId(id).Select(bson.D{{"last-connection", 1}}).One(&lastConnDoc) if (err != nil && err != mgo.ErrNotFound) || lastConnDoc.LastConnection.IsZero() { - return time.Time{}, errors.Trace(NeverConnectedError(e.User.Canonical())) + return time.Time{}, errors.Trace(NeverConnectedError(e.User.Id())) } return lastConnDoc.LastConnection, nil @@ -195,7 +204,7 @@ defer userCloser() var userSlice []userAccessDoc - err := modelUsers.Find(bson.D{{"user", user.Canonical()}}).Select(bson.D{{"object-uuid", 1}, {"_id", 1}}).All(&userSlice) + err := modelUsers.Find(bson.D{{"user", user.Id()}}).Select(bson.D{{"object-uuid", 1}, {"_id", 1}}).All(&userSlice) if err != nil { return nil, err } @@ -214,35 +223,14 @@ return result, nil } -// IsControllerAdministrator returns true if the user specified has access to the -// controller model (the system model). -func (st *State) IsControllerAdministrator(user names.UserTag) (bool, error) { - ssinfo, err := st.ControllerInfo() - if err != nil { - return false, errors.Annotate(err, "could not get controller info") +// IsControllerAdmin returns true if the user specified has Super User Access. +func (st *State) IsControllerAdmin(user names.UserTag) (bool, error) { + ua, err := st.UserAccess(user, st.ControllerTag()) + if errors.IsNotFound(err) { + return false, nil } - - serverUUID := ssinfo.ModelTag.Id() - - modelPermission, closer := st.getRawCollection(permissionsC) - defer closer() - - username := strings.ToLower(user.Canonical()) - subjectGlobalKey := userGlobalKey(username) - - // TODO(perrito666) 20160606 this is prone to errors, it will just - // yield ErrPerm and be hard to trace, use ModelUser and Permission. - // TODO(perrito666) 20160614 since last change on this query it has - // too much out of band knowledge, it should go away when controller - // permissions are implemented. - count, err := modelPermission.Find(bson.D{ - {"_id", fmt.Sprintf("%s:%s", serverUUID, permissionID(modelGlobalKey, subjectGlobalKey))}, - {"object-global-key", modelGlobalKey}, - {"subject-global-key", subjectGlobalKey}, - {"access", description.AdminAccess}, - }).Count() if err != nil { return false, errors.Trace(err) } - return count == 1, nil + return ua.Access == permission.SuperuserAccess, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/modeluser_test.go juju-core-2.0.0/src/github.com/juju/juju/state/modeluser_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/modeluser_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/modeluser_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,7 +13,7 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/storage" "github.com/juju/juju/testing" @@ -27,7 +27,7 @@ var _ = gc.Suite(&ModelUserSuite{}) func (s *ModelUserSuite) TestAddModelUser(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() user := s.Factory.MakeUser(c, &factory.UserParams{ Name: "validusername", @@ -35,19 +35,20 @@ }) createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) modelUser, err := s.State.AddModelUser( + s.State.ModelUUID(), state.UserAccessSpec{ User: user.UserTag(), CreatedBy: createdBy.UserTag(), - Access: description.WriteAccess, + Access: permission.WriteAccess, }) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.UserID, gc.Equals, fmt.Sprintf("%s:validusername@local", s.modelTag.Id())) + c.Assert(modelUser.UserID, gc.Equals, fmt.Sprintf("%s:validusername", s.modelTag.Id())) c.Assert(modelUser.Object, gc.Equals, s.modelTag) - c.Assert(modelUser.UserName, gc.Equals, "validusername@local") + c.Assert(modelUser.UserName, gc.Equals, "validusername") c.Assert(modelUser.DisplayName, gc.Equals, user.DisplayName()) - c.Assert(modelUser.Access, gc.Equals, description.WriteAccess) - c.Assert(modelUser.CreatedBy.Id(), gc.Equals, "createdby@local") + c.Assert(modelUser.Access, gc.Equals, permission.WriteAccess) + c.Assert(modelUser.CreatedBy.Id(), gc.Equals, "createdby") c.Assert(modelUser.DateCreated.Equal(now) || modelUser.DateCreated.After(now), jc.IsTrue) when, err := s.State.LastModelConnection(modelUser.UserTag) c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) @@ -55,12 +56,12 @@ modelUser, err = s.State.UserAccess(user.UserTag(), s.State.ModelTag()) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.UserID, gc.Equals, fmt.Sprintf("%s:validusername@local", s.modelTag.Id())) + c.Assert(modelUser.UserID, gc.Equals, fmt.Sprintf("%s:validusername", s.modelTag.Id())) c.Assert(modelUser.Object, gc.Equals, s.modelTag) - c.Assert(modelUser.UserName, gc.Equals, "validusername@local") + c.Assert(modelUser.UserName, gc.Equals, "validusername") c.Assert(modelUser.DisplayName, gc.Equals, user.DisplayName()) - c.Assert(modelUser.Access, gc.Equals, description.WriteAccess) - c.Assert(modelUser.CreatedBy.Id(), gc.Equals, "createdby@local") + c.Assert(modelUser.Access, gc.Equals, permission.WriteAccess) + c.Assert(modelUser.CreatedBy.Id(), gc.Equals, "createdby") c.Assert(modelUser.DateCreated.Equal(now) || modelUser.DateCreated.After(now), jc.IsTrue) when, err = s.State.LastModelConnection(modelUser.UserTag) c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) @@ -75,22 +76,23 @@ }) createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) modelUser, err := s.State.AddModelUser( + s.State.ModelUUID(), state.UserAccessSpec{ User: user.UserTag(), CreatedBy: createdBy.UserTag(), - Access: description.ReadAccess, + Access: permission.ReadAccess, }) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.UserName, gc.Equals, "validusername@local") + c.Assert(modelUser.UserName, gc.Equals, "validusername") c.Assert(modelUser.DisplayName, gc.Equals, user.DisplayName()) - c.Assert(modelUser.Access, gc.Equals, description.ReadAccess) + c.Assert(modelUser.Access, gc.Equals, permission.ReadAccess) // Make sure that it is set when we read the user out. modelUser, err = s.State.UserAccess(user.UserTag(), s.State.ModelTag()) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.UserName, gc.Equals, "validusername@local") - c.Assert(modelUser.Access, gc.Equals, description.ReadAccess) + c.Assert(modelUser.UserName, gc.Equals, "validusername") + c.Assert(modelUser.Access, gc.Equals, permission.ReadAccess) } func (s *ModelUserSuite) TestAddReadWriteModelUser(c *gc.C) { @@ -101,22 +103,23 @@ }) createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) modelUser, err := s.State.AddModelUser( + s.State.ModelUUID(), state.UserAccessSpec{ User: user.UserTag(), CreatedBy: createdBy.UserTag(), - Access: description.WriteAccess, + Access: permission.WriteAccess, }) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.UserName, gc.Equals, "validusername@local") + c.Assert(modelUser.UserName, gc.Equals, "validusername") c.Assert(modelUser.DisplayName, gc.Equals, user.DisplayName()) - c.Assert(modelUser.Access, gc.Equals, description.WriteAccess) + c.Assert(modelUser.Access, gc.Equals, permission.WriteAccess) // Make sure that it is set when we read the user out. modelUser, err = s.State.UserAccess(user.UserTag(), s.State.ModelTag()) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.UserName, gc.Equals, "validusername@local") - c.Assert(modelUser.Access, gc.Equals, description.WriteAccess) + c.Assert(modelUser.UserName, gc.Equals, "validusername") + c.Assert(modelUser.Access, gc.Equals, permission.WriteAccess) } func (s *ModelUserSuite) TestAddAdminModelUser(c *gc.C) { @@ -127,22 +130,23 @@ }) createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) modelUser, err := s.State.AddModelUser( + s.State.ModelUUID(), state.UserAccessSpec{ User: user.UserTag(), CreatedBy: createdBy.UserTag(), - Access: description.AdminAccess, + Access: permission.AdminAccess, }) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.UserName, gc.Equals, "validusername@local") + c.Assert(modelUser.UserName, gc.Equals, "validusername") c.Assert(modelUser.DisplayName, gc.Equals, user.DisplayName()) - c.Assert(modelUser.Access, gc.Equals, description.AdminAccess) + c.Assert(modelUser.Access, gc.Equals, permission.AdminAccess) // Make sure that it is set when we read the user out. modelUser, err = s.State.UserAccess(user.UserTag(), s.State.ModelTag()) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.UserName, gc.Equals, "validusername@local") - c.Assert(modelUser.Access, gc.Equals, description.AdminAccess) + c.Assert(modelUser.UserName, gc.Equals, "validusername") + c.Assert(modelUser.Access, gc.Equals, permission.AdminAccess) } func (s *ModelUserSuite) TestDefaultAccessModelUser(c *gc.C) { @@ -153,13 +157,14 @@ }) createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) modelUser, err := s.State.AddModelUser( + s.State.ModelUUID(), state.UserAccessSpec{ User: user.UserTag(), CreatedBy: createdBy.UserTag(), - Access: description.ReadAccess, + Access: permission.ReadAccess, }) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.Access, gc.Equals, description.ReadAccess) + c.Assert(modelUser.Access, gc.Equals, permission.ReadAccess) } func (s *ModelUserSuite) TestSetAccessModelUser(c *gc.C) { @@ -170,29 +175,32 @@ }) createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) modelUser, err := s.State.AddModelUser( + s.State.ModelUUID(), state.UserAccessSpec{ User: user.UserTag(), CreatedBy: createdBy.UserTag(), - Access: description.AdminAccess, + Access: permission.AdminAccess, }) c.Assert(err, jc.ErrorIsNil) - c.Assert(modelUser.Access, gc.Equals, description.AdminAccess) + c.Assert(modelUser.Access, gc.Equals, permission.AdminAccess) - s.State.SetUserAccess(modelUser.UserTag, s.State.ModelTag(), description.ReadAccess) + s.State.SetUserAccess(modelUser.UserTag, s.State.ModelTag(), permission.ReadAccess) modelUser, err = s.State.UserAccess(user.UserTag(), s.State.ModelTag()) - c.Assert(modelUser.Access, gc.Equals, description.ReadAccess) + c.Assert(modelUser.Access, gc.Equals, permission.ReadAccess) } func (s *ModelUserSuite) TestCaseUserNameVsId(c *gc.C) { model, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) - user, err := s.State.AddModelUser(state.UserAccessSpec{ - User: names.NewUserTag("Bob@RandomProvider"), - CreatedBy: model.Owner(), - Access: description.ReadAccess, - }) + user, err := s.State.AddModelUser( + s.State.ModelUUID(), + state.UserAccessSpec{ + User: names.NewUserTag("Bob@RandomProvider"), + CreatedBy: model.Owner(), + Access: permission.ReadAccess, + }) c.Assert(err, gc.IsNil) c.Assert(user.UserName, gc.Equals, "Bob@RandomProvider") c.Assert(user.UserID, gc.Equals, state.DocID(s.State, "bob@randomprovider")) @@ -203,11 +211,13 @@ c.Assert(err, jc.ErrorIsNil) s.Factory.MakeModelUser(c, &factory.ModelUserParams{User: "Bob@ubuntuone"}) - _, err = s.State.AddModelUser(state.UserAccessSpec{ - User: names.NewUserTag("boB@ubuntuone"), - CreatedBy: model.Owner(), - Access: description.ReadAccess, - }) + _, err = s.State.AddModelUser( + s.State.ModelUUID(), + state.UserAccessSpec{ + User: names.NewUserTag("boB@ubuntuone"), + CreatedBy: model.Owner(), + Access: permission.ReadAccess, + }) c.Assert(err, gc.ErrorMatches, `user access "boB@ubuntuone" already exists`) c.Assert(errors.IsAlreadyExists(err), jc.IsTrue) } @@ -253,19 +263,25 @@ func (s *ModelUserSuite) TestAddModelNoUserFails(c *gc.C) { createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) - _, err := s.State.AddModelUser(state.UserAccessSpec{ - User: names.NewLocalUserTag("validusername"), - CreatedBy: createdBy.UserTag(), - Access: description.ReadAccess}) + _, err := s.State.AddModelUser( + s.State.ModelUUID(), + state.UserAccessSpec{ + User: names.NewLocalUserTag("validusername"), + CreatedBy: createdBy.UserTag(), + Access: permission.ReadAccess, + }) c.Assert(err, gc.ErrorMatches, `user "validusername" does not exist locally: user "validusername" not found`) } func (s *ModelUserSuite) TestAddModelNoCreatedByUserFails(c *gc.C) { user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validusername"}) - _, err := s.State.AddModelUser(state.UserAccessSpec{ - User: user.UserTag(), - CreatedBy: names.NewLocalUserTag("createdby"), - Access: description.ReadAccess}) + _, err := s.State.AddModelUser( + s.State.ModelUUID(), + state.UserAccessSpec{ + User: user.UserTag(), + CreatedBy: names.NewLocalUserTag("createdby"), + Access: permission.ReadAccess, + }) c.Assert(err, gc.ErrorMatches, `createdBy user "createdby" does not exist locally: user "createdby" not found`) } @@ -288,7 +304,7 @@ } func (s *ModelUserSuite) TestUpdateLastConnection(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validusername", Creator: createdBy.Tag()}) modelUser, err := s.State.UserAccess(user.UserTag(), s.State.ModelTag()) @@ -303,7 +319,7 @@ } func (s *ModelUserSuite) TestUpdateLastConnectionTwoModelUsers(c *gc.C) { - now := state.NowToTheSecond() + now := s.State.NowToTheSecond() // Create a user and add them to the inital model. createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) @@ -314,10 +330,13 @@ // Create a second model and add the same user to this. st2 := s.Factory.MakeModel(c, nil) defer st2.Close() - modelUser2, err := st2.AddModelUser(state.UserAccessSpec{ - User: user.UserTag(), - CreatedBy: createdBy.UserTag(), - Access: description.ReadAccess}) + modelUser2, err := st2.AddModelUser( + st2.ModelUUID(), + state.UserAccessSpec{ + User: user.UserTag(), + CreatedBy: createdBy.UserTag(), + Access: permission.ReadAccess, + }) c.Assert(err, jc.ErrorIsNil) // Now we have two model users with the same username. Ensure we get @@ -333,7 +352,7 @@ // Try to get last connection for modelUser2. As they have never connected, // we expect to get an error. _, err = st2.LastModelConnection(modelUser2.UserTag) - c.Assert(err, gc.ErrorMatches, `never connected: "validusername@local"`) + c.Assert(err, gc.ErrorMatches, `never connected: "validusername"`) // Connect modelUser2 and get last connection. err = s.State.UpdateLastModelConnection(modelUser2.UserTag) @@ -384,7 +403,7 @@ "uuid": uuid.String(), }) model, st, err := s.State.NewModel(state.ModelArgs{ - CloudName: "dummy", Config: cfg, Owner: owner, + CloudName: "dummy", CloudRegion: "dummy-region", Config: cfg, Owner: owner, StorageProviderRegistry: storage.StaticProviderRegistry{}, }) c.Assert(err, jc.ErrorIsNil) @@ -413,9 +432,12 @@ newEnv, err := envState.Model() c.Assert(err, jc.ErrorIsNil) - _, err = envState.AddModelUser(state.UserAccessSpec{ - User: user, CreatedBy: newEnv.Owner(), - Access: description.ReadAccess}) + _, err = envState.AddModelUser( + envState.ModelUUID(), + state.UserAccessSpec{ + User: user, CreatedBy: newEnv.Owner(), + Access: permission.ReadAccess, + }) c.Assert(err, jc.ErrorIsNil) return newEnv } @@ -450,38 +472,38 @@ } } -func (s *ModelUserSuite) TestIsControllerAdministrator(c *gc.C) { - isAdmin, err := s.State.IsControllerAdministrator(s.Owner) +func (s *ModelUserSuite) TestIsControllerAdmin(c *gc.C) { + isAdmin, err := s.State.IsControllerAdmin(s.Owner) c.Assert(err, jc.ErrorIsNil) c.Assert(isAdmin, jc.IsTrue) user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) - isAdmin, err = s.State.IsControllerAdministrator(user.UserTag()) + isAdmin, err = s.State.IsControllerAdmin(user.UserTag()) c.Assert(err, jc.ErrorIsNil) c.Assert(isAdmin, jc.IsFalse) - s.Factory.MakeModelUser(c, &factory.ModelUserParams{User: user.UserTag().Canonical()}) - isAdmin, err = s.State.IsControllerAdministrator(user.UserTag()) + s.State.SetUserAccess(user.UserTag(), s.State.ControllerTag(), permission.SuperuserAccess) + isAdmin, err = s.State.IsControllerAdmin(user.UserTag()) c.Assert(err, jc.ErrorIsNil) c.Assert(isAdmin, jc.IsTrue) - readonly := s.Factory.MakeModelUser(c, &factory.ModelUserParams{Access: description.ReadAccess}) - isAdmin, err = s.State.IsControllerAdministrator(readonly.UserTag) + readonly := s.Factory.MakeModelUser(c, &factory.ModelUserParams{Access: permission.ReadAccess}) + isAdmin, err = s.State.IsControllerAdmin(readonly.UserTag) c.Assert(err, jc.ErrorIsNil) c.Assert(isAdmin, jc.IsFalse) } -func (s *ModelUserSuite) TestIsControllerAdministratorFromOtherState(c *gc.C) { +func (s *ModelUserSuite) TestIsControllerAdminFromOtherState(c *gc.C) { user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) otherState := s.Factory.MakeModel(c, &factory.ModelParams{Owner: user.UserTag()}) defer otherState.Close() - isAdmin, err := otherState.IsControllerAdministrator(user.UserTag()) + isAdmin, err := otherState.IsControllerAdmin(user.UserTag()) c.Assert(err, jc.ErrorIsNil) c.Assert(isAdmin, jc.IsFalse) - isAdmin, err = otherState.IsControllerAdministrator(s.Owner) + isAdmin, err = otherState.IsControllerAdmin(s.Owner) c.Assert(err, jc.ErrorIsNil) c.Assert(isAdmin, jc.IsTrue) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/multiwatcher.go juju-core-2.0.0/src/github.com/juju/juju/state/multiwatcher.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/multiwatcher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/multiwatcher.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "reflect" "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/state/watcher" @@ -495,6 +495,8 @@ a.latestRevno++ entry.revno = a.latestRevno entry.info = info + // The app might have been removed and re-added. + entry.removed = false a.list.MoveToFront(elem) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/multiwatcher_internal_test.go juju-core-2.0.0/src/github.com/juju/juju/state/multiwatcher_internal_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/multiwatcher_internal_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/multiwatcher_internal_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -91,6 +91,45 @@ info: &multiwatcher.MachineInfo{Id: "1"}, }}, }, { + about: "mark application removed then update", + change: func(all *multiwatcherStore) { + all.Update(&multiwatcher.ApplicationInfo{ModelUUID: "uuid0", Name: "logging"}) + all.Update(&multiwatcher.ApplicationInfo{ModelUUID: "uuid0", Name: "wordpress"}) + StoreIncRef(all, multiwatcher.EntityId{"application", "uuid0", "logging"}) + all.Remove(multiwatcher.EntityId{"application", "uuid0", "logging"}) + all.Update(&multiwatcher.ApplicationInfo{ + ModelUUID: "uuid0", + Name: "wordpress", + Exposed: true, + }) + all.Update(&multiwatcher.ApplicationInfo{ + ModelUUID: "uuid0", + Name: "logging", + Exposed: true, + }) + }, + expectRevno: 5, + expectContents: []entityEntry{{ + revno: 4, + creationRevno: 2, + removed: false, + refCount: 0, + info: &multiwatcher.ApplicationInfo{ + ModelUUID: "uuid0", + Name: "wordpress", + Exposed: true, + }}, { + revno: 5, + creationRevno: 1, + removed: false, + refCount: 1, + info: &multiwatcher.ApplicationInfo{ + ModelUUID: "uuid0", + Name: "logging", + Exposed: true, + }, + }}, +}, { about: "mark removed on existing entry", change: func(all *multiwatcherStore) { all.Update(&multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"}) @@ -701,14 +740,9 @@ c.Check(sm.Stop(), gc.IsNil) }() w := &Multiwatcher{all: sm} - done := make(chan struct{}) - go func() { - checkNext(c, w, nil, ErrStopped.Error()) - done <- struct{}{} - }() err := w.Stop() c.Assert(err, jc.ErrorIsNil) - <-done + checkNext(c, w, nil, ErrStopped.Error()) } func (*storeManagerSuite) TestMultiwatcherStopBecauseStoreManagerError(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/open.go juju-core-2.0.0/src/github.com/juju/juju/state/open.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/open.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/open.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,19 +6,20 @@ import ( "fmt" "strings" - "time" "github.com/juju/errors" "github.com/juju/utils" + "github.com/juju/utils/clock" "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/txn" "github.com/juju/juju/cloud" "github.com/juju/juju/controller" - "github.com/juju/juju/core/description" + "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/mongo" + "github.com/juju/juju/permission" "github.com/juju/juju/status" "github.com/juju/juju/storage" "github.com/juju/juju/storage/poolmanager" @@ -34,27 +35,37 @@ // may be provided. // // Open returns unauthorizedError if access is unauthorized. -func Open(tag names.ModelTag, info *mongo.MongoInfo, opts mongo.DialOpts, newPolicy NewPolicyFunc) (*State, error) { - st, err := open(tag, info, opts, newPolicy) +func Open( + controllerModelTag names.ModelTag, + controllerTag names.ControllerTag, + info *mongo.MongoInfo, opts mongo.DialOpts, + newPolicy NewPolicyFunc, +) (*State, error) { + st, err := open(controllerModelTag, info, opts, newPolicy, clock.WallClock) if err != nil { return nil, errors.Trace(err) } if _, err := st.Model(); err != nil { if err := st.Close(); err != nil { - logger.Errorf("closing State for %s: %v", tag, err) + logger.Errorf("closing State for %s: %v", controllerModelTag, err) } - return nil, errors.Annotatef(err, "cannot read model %s", tag.Id()) + return nil, errors.Annotatef(err, "cannot read model %s", controllerModelTag.Id()) } // State should only be Opened on behalf of a controller environ; all // other *States should be created via ForModel. - if err := st.start(tag); err != nil { + if err := st.start(controllerTag); err != nil { return nil, errors.Trace(err) } return st, nil } -func open(tag names.ModelTag, info *mongo.MongoInfo, opts mongo.DialOpts, newPolicy NewPolicyFunc) (*State, error) { +func open( + controllerModelTag names.ModelTag, + info *mongo.MongoInfo, opts mongo.DialOpts, + newPolicy NewPolicyFunc, + clock clock.Clock, +) (*State, error) { logger.Infof("opening state, mongo addresses: %q; entity %v", info.Addrs, info.Tag) logger.Debugf("dialing mongo") session, err := mongo.DialWithInfo(info.Info, opts) @@ -70,21 +81,7 @@ } logger.Debugf("mongodb login successful") - // In rare circumstances, we may be upgrading from pre-1.23, and not have the - // model UUID available. In that case we need to infer what it might be; - // we depend on the assumption that this is the only circumstance in which - // the the UUID might not be known. - if tag.Id() == "" { - logger.Warningf("creating state without model tag; inferring bootstrap model") - ssInfo, err := readRawControllerInfo(session) - if err != nil { - session.Close() - return nil, errors.Trace(err) - } - tag = ssInfo.ModelTag - } - - st, err := newState(tag, session, info, newPolicy) + st, err := newState(controllerModelTag, controllerModelTag, session, info, newPolicy, clock) if err != nil { return nil, errors.Trace(err) } @@ -108,6 +105,10 @@ // InitializeParams contains the parameters for initializing the state database. type InitializeParams struct { + // Clock wraps all calls time. Real uses use clock.WallClock, + // tests may override with a testing clock. + Clock clock.Clock + // ControllerModelArgs contains the arguments for creating // the controller model. ControllerModelArgs ModelArgs @@ -122,7 +123,7 @@ // CloudCredentials contains the credentials for the owner of // the controller model to store in the controller. - CloudCredentials map[string]cloud.Credential + CloudCredentials map[names.CloudCredentialTag]cloud.Credential // ControllerConfig contains config attributes for // the controller. @@ -132,6 +133,10 @@ // models on the specified cloud. ControllerInheritedConfig map[string]interface{} + // RegionInheritedConfig contains region specific configuration for + // models running on specific cloud regions. + RegionInheritedConfig cloud.RegionConfig + // NewPolicy is a function that returns the set of state policies // to apply. NewPolicy NewPolicyFunc @@ -147,16 +152,19 @@ // Validate checks that the state initialization parameters are valid. func (p InitializeParams) Validate() error { + if p.Clock == nil { + return errors.NotValidf("missing clock") + } if err := p.ControllerModelArgs.Validate(); err != nil { return errors.Trace(err) } - if p.ControllerModelArgs.MigrationMode != MigrationModeActive { + if p.ControllerModelArgs.MigrationMode != MigrationModeNone { return errors.NotValidf("migration mode %q", p.ControllerModelArgs.MigrationMode) } uuid := p.ControllerModelArgs.Config.UUID() controllerUUID := p.ControllerConfig.ControllerUUID() - if uuid != controllerUUID { - return errors.NotValidf("mismatching uuid (%v) and controller-uuid (%v)", uuid, controllerUUID) + if uuid == controllerUUID { + return errors.NotValidf("same controller model uuid (%v) and controller-uuid (%v)", uuid, controllerUUID) } if p.MongoInfo == nil { return errors.NotValidf("nil MongoInfo") @@ -176,12 +184,15 @@ if _, err := validateCloudCredentials(p.Cloud, p.CloudName, p.CloudCredentials); err != nil { return errors.Annotate(err, "validating cloud credentials") } + creds := make(map[string]cloud.Credential, len(p.CloudCredentials)) + for tag, cred := range p.CloudCredentials { + creds[tag.Id()] = cred + } if _, err := validateCloudCredential( p.Cloud, p.CloudName, - p.CloudCredentials, + creds, p.ControllerModelArgs.CloudCredential, - p.ControllerModelArgs.Owner, ); err != nil { return errors.Annotate(err, "validating controller model cloud credential") } @@ -199,7 +210,7 @@ // When creating the controller model, the new model // UUID is also used as the controller UUID. modelTag := names.NewModelTag(args.ControllerModelArgs.Config.UUID()) - st, err := open(modelTag, args.MongoInfo, args.MongoDialOpts, args.NewPolicy) + st, err := open(modelTag, args.MongoInfo, args.MongoDialOpts, args.NewPolicy, args.Clock) if err != nil { return nil, errors.Trace(err) } @@ -210,7 +221,7 @@ } } }() - st.controllerTag = modelTag + st.controllerModelTag = modelTag // A valid model is used as a signal that the // state has already been initalized. If this is the case @@ -223,7 +234,13 @@ logger.Infof("initializing controller model %s", modelTag.Id()) - modelOps, err := st.modelSetupOps(args.ControllerModelArgs, args.ControllerInheritedConfig) + modelOps, err := st.modelSetupOps( + args.ControllerConfig.ControllerUUID(), + args.ControllerModelArgs, + &lineage{ + ControllerConfig: args.ControllerInheritedConfig, + RegionConfig: args.RegionInheritedConfig, + }) if err != nil { return nil, errors.Trace(err) } @@ -232,7 +249,14 @@ return nil, err } - ops := createInitialUserOps(st.ControllerUUID(), args.ControllerModelArgs.Owner, args.MongoInfo.Password, salt) + dateCreated := st.NowToTheSecond() + ops := createInitialUserOps( + args.ControllerConfig.ControllerUUID(), + args.ControllerModelArgs.Owner, + args.MongoInfo.Password, + salt, + dateCreated, + ) ops = append(ops, txn.Op{ @@ -266,58 +290,63 @@ createSettingsOp(controllersC, controllerSettingsGlobalKey, args.ControllerConfig), createSettingsOp(globalSettingsC, controllerInheritedSettingsGlobalKey, args.ControllerInheritedConfig), ) - for credName, cred := range args.CloudCredentials { - ops = append(ops, createCloudCredentialOp( - args.ControllerModelArgs.Owner, - args.CloudName, - credName, - cred, - )) + for k, v := range args.Cloud.RegionConfig { + // Create an entry keyed on cloudname#, value for each region in + // region-config. The values here are themselves + // map[string]interface{}. + ops = append(ops, createSettingsOp(globalSettingsC, regionSettingsGlobalKey(args.CloudName, k), v)) + } + + for tag, cred := range args.CloudCredentials { + ops = append(ops, createCloudCredentialOp(tag, cred)) } ops = append(ops, modelOps...) if err := st.runTransaction(ops); err != nil { return nil, errors.Trace(err) } - if err := st.start(modelTag); err != nil { + controllerTag := names.NewControllerTag(args.ControllerConfig.ControllerUUID()) + if err := st.start(controllerTag); err != nil { return nil, errors.Trace(err) } return st, nil } +// lineage is a composite of inheritable properties for the extent of +// passing them into modelSetupOps. +type lineage struct { + ControllerConfig map[string]interface{} + RegionConfig cloud.RegionConfig +} + // modelSetupOps returns the transactions necessary to set up a model. -func (st *State) modelSetupOps(args ModelArgs, controllerInheritedConfig map[string]interface{}) ([]txn.Op, error) { - if err := checkControllerInheritedConfig(controllerInheritedConfig); err != nil { - return nil, errors.Trace(err) +func (st *State) modelSetupOps(controllerUUID string, args ModelArgs, inherited *lineage) ([]txn.Op, error) { + if inherited != nil { + if err := checkControllerInheritedConfig(inherited.ControllerConfig); err != nil { + return nil, errors.Trace(err) + } } if err := checkModelConfig(args.Config); err != nil { return nil, errors.Trace(err) } - controllerUUID := st.controllerTag.Id() + controllerModelUUID := st.controllerModelTag.Id() modelUUID := args.Config.UUID() modelStatusDoc := statusDoc{ ModelUUID: modelUUID, - // TODO(fwereade): 2016-03-17 lp:1558657 - Updated: time.Now().UnixNano(), - // TODO(axw) 2016-04-13 lp:1569632 - // We need to decide how we will - // represent migration in model status. - Status: status.StatusAvailable, + Updated: st.clock.Now().UnixNano(), + Status: status.Available, } - // When creating the controller model, the new model - // UUID is also used as the controller UUID. - isHostedModel := controllerUUID != modelUUID - modelUserOps := createModelUserOps( - modelUUID, args.Owner, args.Owner, args.Owner.Name(), nowToTheSecond(), description.AdminAccess, + modelUUID, args.Owner, args.Owner, args.Owner.Name(), st.NowToTheSecond(), permission.AdminAccess, ) ops := []txn.Op{ createStatusOp(st, modelGlobalKey, modelStatusDoc), createConstraintsOp(st, modelGlobalKey, args.Constraints), } - if isHostedModel { + // Inc ref count for hosted models. + if controllerModelUUID != modelUUID { ops = append(ops, incHostedModelCountOp()) } @@ -333,7 +362,7 @@ // is being initialised and there won't be any config sources // in state. var configSources []modelConfigSource - if len(controllerInheritedConfig) > 0 { + if inherited != nil { configSources = []modelConfigSource{ { name: config.JujuDefaultSource, @@ -343,10 +372,18 @@ { name: config.JujuControllerSource, sourceFunc: modelConfigSourceFunc(func() (attrValues, error) { - return controllerInheritedConfig, nil - })}} + return inherited.ControllerConfig, nil + })}, + { + name: config.JujuRegionSource, + sourceFunc: modelConfigSourceFunc(func() (attrValues, error) { + // We return the values specific to this region for this model. + return attrValues(inherited.RegionConfig[args.CloudRegion]), nil + })}, + } } else { - configSources = modelConfigSources(st) + rspec := &environs.RegionSpec{Cloud: args.CloudName, Region: args.CloudRegion} + configSources = modelConfigSources(st, rspec) } modelCfg, err := composeModelConfigAttributes(args.Config.AllAttrs(), configSources...) if err != nil { @@ -373,7 +410,11 @@ func (st *State) createDefaultStoragePoolsOps(registry storage.ProviderRegistry) ([]txn.Op, error) { m := poolmanager.MemSettings{make(map[string]map[string]interface{})} pm := poolmanager.New(m, registry) - for _, providerType := range registry.StorageProviderTypes() { + providerTypes, err := registry.StorageProviderTypes() + if err != nil { + return nil, errors.Trace(err) + } + for _, providerType := range providerTypes { p, err := registry.StorageProvider(providerType) if err != nil { return nil, errors.Trace(err) @@ -428,7 +469,12 @@ // // newState takes responsibility for the supplied *mgo.Session, and will // close it if it cannot be returned under the aegis of a *State. -func newState(modelTag names.ModelTag, session *mgo.Session, mongoInfo *mongo.MongoInfo, newPolicy NewPolicyFunc) (_ *State, err error) { +func newState( + modelTag, controllerModelTag names.ModelTag, + session *mgo.Session, mongoInfo *mongo.MongoInfo, + newPolicy NewPolicyFunc, + clock clock.Clock, +) (_ *State, err error) { defer func() { if err != nil { @@ -448,11 +494,13 @@ // Create State. st := &State{ - modelTag: modelTag, - mongoInfo: mongoInfo, - session: session, - database: database, - newPolicy: newPolicy, + clock: clock, + modelTag: modelTag, + controllerModelTag: controllerModelTag, + mongoInfo: mongoInfo, + session: session, + database: database, + newPolicy: newPolicy, } if newPolicy != nil { st.policy = newPolicy(st) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/payloads_ns.go juju-core-2.0.0/src/github.com/juju/juju/state/payloads_ns.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/payloads_ns.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/payloads_ns.go 2016-10-13 14:31:49.000000000 +0000 @@ -1,3 +1,6 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + package state import ( diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/persistence.go juju-core-2.0.0/src/github.com/juju/juju/state/persistence.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/persistence.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/persistence.go 2016-10-13 14:31:49.000000000 +0000 @@ -113,5 +113,5 @@ // NewCleanupOp creates a mgo transaction operation that queues up // some cleanup action in state. func (sp *statePersistence) NewCleanupOp(kind, prefix string) txn.Op { - return sp.st.newCleanupOp(cleanupKind(kind), prefix) + return newCleanupOp(cleanupKind(kind), prefix) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/policy.go juju-core-2.0.0/src/github.com/juju/juju/state/policy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/policy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/policy.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,6 @@ package state import ( - "fmt" - "github.com/juju/errors" "github.com/juju/juju/constraints" @@ -32,6 +30,10 @@ // Prechecker returns a Prechecker or an error. Prechecker() (Prechecker, error) + // ProviderConfigSchemaSource returns a config.ConfigSchemaSource + // for the environ provider, or an error. + ProviderConfigSchemaSource() (config.ConfigSchemaSource, error) + // ConfigValidator returns a config.Validator or an error. ConfigValidator() (config.Validator, error) @@ -72,7 +74,7 @@ return err } if prechecker == nil { - return fmt.Errorf("policy returned nil prechecker without an error") + return errors.New("policy returned nil prechecker without an error") } return prechecker.PrecheckInstance(series, cons, placement) } @@ -89,7 +91,7 @@ } else if err != nil { return nil, err } else if validator == nil { - return nil, fmt.Errorf("policy returned nil constraints validator without an error") + return nil, errors.New("policy returned nil constraints validator without an error") } } else { validator = constraints.NewValidator() @@ -160,7 +162,7 @@ return nil, err } if configValidator == nil { - return nil, fmt.Errorf("policy returned nil configValidator without an error") + return nil, errors.New("policy returned nil configValidator without an error") } return configValidator.Validate(cfg, old) } @@ -171,3 +173,10 @@ } return st.policy.StorageProviderRegistry() } + +func (st *State) environsProviderConfigSchemaSource() (config.ConfigSchemaSource, error) { + if st.policy == nil { + return nil, errors.NotImplementedf("config.ProviderConfigSchemaSource") + } + return st.policy.ProviderConfigSchemaSource() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/pool.go juju-core-2.0.0/src/github.com/juju/juju/state/pool.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/pool.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/pool.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,20 +15,31 @@ func NewStatePool(systemState *State) *StatePool { return &StatePool{ systemState: systemState, - pool: make(map[string]*State), + pool: make(map[string]*PoolItem), } } -// StatePool is a simple cache of State instances for multiple models. +// PoolItem holds a State and tracks how many requests are using it +// and whether it's been marked for removal. +type PoolItem struct { + state *State + references uint + remove bool +} + +// StatePool is a cache of State instances for multiple +// models. Clients should call Release when they have finished with any +// state. type StatePool struct { systemState *State // mu protects pool mu sync.Mutex - pool map[string]*State + pool map[string]*PoolItem } -// Get returns a State for a given model from the pool, creating -// one if required. +// Get returns a State for a given model from the pool, creating one +// if required. If the State has been marked for removal because there +// are outstanding uses, an error will be returned. func (p *StatePool) Get(modelUUID string) (*State, error) { if modelUUID == p.systemState.ModelUUID() { return p.systemState, nil @@ -37,36 +48,113 @@ p.mu.Lock() defer p.mu.Unlock() - st, ok := p.pool[modelUUID] + item, ok := p.pool[modelUUID] + if ok && item.remove { + // We don't want to allow increasing the refcount of a model + // that's been removed. + return nil, errors.Errorf("model %v has been removed", modelUUID) + } if ok { - return st, nil + item.references++ + return item.state, nil } st, err := p.systemState.ForModel(names.NewModelTag(modelUUID)) if err != nil { return nil, errors.Annotatef(err, "failed to create state for model %v", modelUUID) } - p.pool[modelUUID] = st + p.pool[modelUUID] = &PoolItem{state: st, references: 1} return st, nil } +// Release indicates that the client has finished using the State. If the +// state has been marked for removal, it will be closed and removed +// when the final Release is done. +func (p *StatePool) Release(modelUUID string) error { + if modelUUID == p.systemState.ModelUUID() { + // We don't maintain a refcount for the controller. + return nil + } + + p.mu.Lock() + defer p.mu.Unlock() + + item, ok := p.pool[modelUUID] + if !ok { + return errors.Errorf("unable to return unknown model %v to the pool", modelUUID) + } + if item.references == 0 { + return errors.Errorf("state pool refcount for model %v is already 0", modelUUID) + } + item.references-- + return p.maybeRemoveItem(modelUUID, item) +} + +// Remove takes the state out of the pool and closes it, or marks it +// for removal if it's currently being used (indicated by Gets without +// corresponding Releases). +func (p *StatePool) Remove(modelUUID string) error { + if modelUUID == p.systemState.ModelUUID() { + // We don't manage the controller state. + return nil + } + + p.mu.Lock() + defer p.mu.Unlock() + + item, ok := p.pool[modelUUID] + if !ok { + // Don't require the client to keep track of what we've seen - + // ignore unknown model uuids. + return nil + } + item.remove = true + return p.maybeRemoveItem(modelUUID, item) +} + +func (p *StatePool) maybeRemoveItem(modelUUID string, item *PoolItem) error { + if item.remove && item.references == 0 { + delete(p.pool, modelUUID) + return item.state.Close() + } + return nil +} + // SystemState returns the State passed in to NewStatePool. func (p *StatePool) SystemState() *State { return p.systemState } +// KillWorkers tells the internal worker for all cached State +// instances in the pool to die. +func (p *StatePool) KillWorkers() { + p.mu.Lock() + defer p.mu.Unlock() + for _, item := range p.pool { + item.state.KillWorkers() + } +} + // Close closes all State instances in the pool. func (p *StatePool) Close() error { p.mu.Lock() defer p.mu.Unlock() var lastErr error - for _, st := range p.pool { - err := st.Close() + for _, item := range p.pool { + if item.references != 0 || item.remove { + logger.Warningf( + "state for %v leaked from pool - references: %v, removed: %v", + item.state.ModelUUID(), + item.references, + item.remove, + ) + } + err := item.state.Close() if err != nil { lastErr = err } } - p.pool = make(map[string]*State) + p.pool = make(map[string]*PoolItem) return errors.Annotate(lastErr, "at least one error closing a state") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/pool_test.go juju-core-2.0.0/src/github.com/juju/juju/state/pool_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/pool_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/pool_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,16 +4,20 @@ package state_test import ( + "fmt" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/state" statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/worker/workertest" ) type statePoolSuite struct { statetesting.StateSuite State1, State2 *state.State + Pool *state.StatePool ModelUUID, ModelUUID1, ModelUUID2 string } @@ -30,63 +34,75 @@ s.State2 = s.Factory.MakeModel(c, nil) s.AddCleanup(func(*gc.C) { s.State2.Close() }) s.ModelUUID2 = s.State2.ModelUUID() + + s.Pool = state.NewStatePool(s.State) + s.AddCleanup(func(*gc.C) { s.Pool.Close() }) } func (s *statePoolSuite) TestGet(c *gc.C) { - p := state.NewStatePool(s.State) - defer p.Close() - - st1, err := p.Get(s.ModelUUID1) + st1, err := s.Pool.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) c.Assert(st1.ModelUUID(), gc.Equals, s.ModelUUID1) - st2, err := p.Get(s.ModelUUID2) + st2, err := s.Pool.Get(s.ModelUUID2) c.Assert(err, jc.ErrorIsNil) c.Assert(st2.ModelUUID(), gc.Equals, s.ModelUUID2) // Check that the same instances are returned - // when a State for the same env is re-requested. - st1_, err := p.Get(s.ModelUUID1) + // when a State for the same model is re-requested. + st1_, err := s.Pool.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) c.Assert(st1_, gc.Equals, st1) - st2_, err := p.Get(s.ModelUUID2) + st2_, err := s.Pool.Get(s.ModelUUID2) c.Assert(err, jc.ErrorIsNil) c.Assert(st2_, gc.Equals, st2) } -func (s *statePoolSuite) TestGetWithControllerEnv(c *gc.C) { - p := state.NewStatePool(s.State) - defer p.Close() - - // When a State for the controller env is requested, the same +func (s *statePoolSuite) TestGetWithControllerModel(c *gc.C) { + // When a State for the controller model is requested, the same // State that was original passed in should be returned. - st0, err := p.Get(s.ModelUUID) + st0, err := s.Pool.Get(s.ModelUUID) c.Assert(err, jc.ErrorIsNil) c.Assert(st0, gc.Equals, s.State) } -func (s *statePoolSuite) TestSystemState(c *gc.C) { - p := state.NewStatePool(s.State) - defer p.Close() - - st0 := p.SystemState() +func (s *statePoolSuite) TestGetSystemState(c *gc.C) { + st0 := s.Pool.SystemState() c.Assert(st0, gc.Equals, s.State) } -func (s *statePoolSuite) TestClose(c *gc.C) { - p := state.NewStatePool(s.State) - defer p.Close() +func (s *statePoolSuite) TestKillWorkers(c *gc.C) { + // Get some State instances via the pool and extract their + // internal workers. + st1, err := s.Pool.Get(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + w1 := state.GetInternalWorkers(st1) + workertest.CheckAlive(c, w1) + st2, err := s.Pool.Get(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + w2 := state.GetInternalWorkers(st2) + workertest.CheckAlive(c, w2) + + // Now kill their workers. + s.Pool.KillWorkers() + + // Ensure the internal workers for each State died. + c.Check(workertest.CheckKilled(c, w1), jc.ErrorIsNil) + c.Check(workertest.CheckKilled(c, w2), jc.ErrorIsNil) +} + +func (s *statePoolSuite) TestClose(c *gc.C) { // Get some State instances. - st1, err := p.Get(s.ModelUUID1) + st1, err := s.Pool.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) - st2, err := p.Get(s.ModelUUID1) + st2, err := s.Pool.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) // Now close them. - err = p.Close() + err = s.Pool.Close() c.Assert(err, jc.ErrorIsNil) // Confirm that controller State isn't closed. @@ -95,11 +111,102 @@ // Ensure that new ones are returned if further States are // requested. - st1_, err := p.Get(s.ModelUUID1) + st1_, err := s.Pool.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) c.Assert(st1_, gc.Not(gc.Equals), st1) - st2_, err := p.Get(s.ModelUUID2) + st2_, err := s.Pool.Get(s.ModelUUID2) c.Assert(err, jc.ErrorIsNil) c.Assert(st2_, gc.Not(gc.Equals), st2) } + +func (s *statePoolSuite) TestReleaseSystemState(c *gc.C) { + // Doesn't maintain a refcount for the system state. + err := s.Pool.Release(s.ModelUUID) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *statePoolSuite) TestReleaseUnknownModel(c *gc.C) { + err := s.Pool.Release("deadbeef") + c.Assert(err, gc.ErrorMatches, "unable to return unknown model deadbeef to the pool") +} + +func (s *statePoolSuite) TestTooManyReleases(c *gc.C) { + _, err := s.Pool.Get(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + err = s.Pool.Release(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + err = s.Pool.Release(s.ModelUUID1) + c.Assert(err, gc.ErrorMatches, fmt.Sprintf( + "state pool refcount for model %s is already 0", s.ModelUUID1)) +} + +func (s *statePoolSuite) TestRemoveSystemStateUUID(c *gc.C) { + err := s.Pool.Remove(s.ModelUUID) + c.Assert(err, jc.ErrorIsNil) + assertNotClosed(c, s.State) +} + +func (s *statePoolSuite) TestRemoveNonExistentModel(c *gc.C) { + err := s.Pool.Remove("abaddad") + // Allow models that haven't been seen by state to be removed. + c.Assert(err, jc.ErrorIsNil) +} + +func assertNotClosed(c *gc.C, st *state.State) { + _, err := st.Model() + c.Assert(err, jc.ErrorIsNil) +} + +func assertClosed(c *gc.C, st *state.State) { + w := state.GetInternalWorkers(st) + c.Check(workertest.CheckKilled(c, w), jc.ErrorIsNil) +} + +func (s *statePoolSuite) TestRemoveWithNoRefsCloses(c *gc.C) { + st, err := s.Pool.Get(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + err = s.Pool.Release(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + + // Confirm the state isn't closed. + assertNotClosed(c, st) + + err = s.Pool.Remove(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + + assertClosed(c, st) +} + +func (s *statePoolSuite) TestRemoveWithRefsClosesOnLastRelease(c *gc.C) { + st, err := s.Pool.Get(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + _, err = s.Pool.Get(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + // Now there are two references to the state. + // Sanity check! + assertNotClosed(c, st) + + // Doesn't close while there are refs still held. + err = s.Pool.Remove(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + assertNotClosed(c, st) + + err = s.Pool.Release(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + // Hasn't been closed - still one outstanding reference. + assertNotClosed(c, st) + + // Should be closed when it's released back into the pool. + err = s.Pool.Release(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + assertClosed(c, st) +} + +func (s *statePoolSuite) TestGetRemovedNotAllowed(c *gc.C) { + _, err := s.Pool.Get(s.ModelUUID1) + c.Assert(err, jc.ErrorIsNil) + err = s.Pool.Remove(s.ModelUUID1) + _, err = s.Pool.Get(s.ModelUUID1) + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("model %v has been removed", s.ModelUUID1)) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/prechecker_test.go juju-core-2.0.0/src/github.com/juju/juju/state/prechecker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/prechecker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/prechecker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -102,7 +102,7 @@ err := s.State.SetModelConstraints(envCons) c.Assert(err, jc.ErrorIsNil) oneJob := []state.MachineJob{state.JobHostUnits} - extraCons := constraints.MustParse("cpu-cores=4") + extraCons := constraints.MustParse("cores=4") template := state.MachineTemplate{ Series: "precise", Constraints: extraCons, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/presence/presence.go juju-core-2.0.0/src/github.com/juju/juju/state/presence/presence.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/presence/presence.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/presence/presence.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,7 +19,7 @@ "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" ) var logger = loggo.GetLogger("juju.state.presence") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/presence/presence_test.go juju-core-2.0.0/src/github.com/juju/juju/state/presence/presence_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/presence/presence_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/presence/presence_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,7 +15,7 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state/presence" "github.com/juju/juju/testing" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/raceoff_test.go juju-core-2.0.0/src/github.com/juju/juju/state/raceoff_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/raceoff_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/raceoff_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,8 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build !race + +package state_test + +const raceDetector = false diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/raceon_test.go juju-core-2.0.0/src/github.com/juju/juju/state/raceon_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/raceon_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/raceon_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,8 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build race + +package state_test + +const raceDetector = true diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/refcounts_ns.go juju-core-2.0.0/src/github.com/juju/juju/state/refcounts_ns.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/refcounts_ns.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/refcounts_ns.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,229 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "github.com/juju/errors" + "github.com/juju/juju/mongo" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" +) + +// refcountDoc holds a reference count. Refcounts are important to juju +// because mgo/txn offers no other mechanisms for safely coordinating +// deletion of unreferenced documents. +// +// TODO(fwereade) 2016-08-11 lp:1612163 +// +// There are several places that use ad-hoc refcounts (application +// UnitCount and RelationCount; and model refs; and many many more) +// which should (1) be using separate refcount docs instead of dumping +// them in entity docs and (2) be using *this* refcount functionality +// rather than building their own ad-hoc variants. +type refcountDoc struct { + + // The _id field should hold some globalKey to identify what's + // being referenced, but there's no reason to express it in this + // document directly. + + // RefCount holds the reference count for whatever this doc is + // referencing. + RefCount int `bson:"refcount"` +} + +var ( + errRefcountChanged = errors.New("refcount changed") +) + +// nsRefcounts exposes methods for safely manipulating reference count +// documents. (You can also manipulate them unsafely via the Just* +// methods that don't keep track of DB state.) +var nsRefcounts = nsRefcounts_{} + +// nsRefcounts_ backs nsRefcounts. +type nsRefcounts_ struct{} + +// LazyCreateOp returns a txn.Op that creates a refcount document; or +// false if the document already exists. +func (ns nsRefcounts_) LazyCreateOp(coll mongo.Collection, key string) (txn.Op, bool, error) { + if exists, err := ns.exists(coll, key); err != nil { + return txn.Op{}, false, errors.Trace(err) + } else if exists { + return txn.Op{}, false, nil + } + return ns.JustCreateOp(coll.Name(), key, 0), true, nil +} + +// StrictCreateOp returns a txn.Op that creates a refcount document as +// configured, or an error if the document already exists. +func (ns nsRefcounts_) StrictCreateOp(coll mongo.Collection, key string, value int) (txn.Op, error) { + if exists, err := ns.exists(coll, key); err != nil { + return txn.Op{}, errors.Trace(err) + } else if exists { + return txn.Op{}, errors.New("refcount already exists") + } + return ns.JustCreateOp(coll.Name(), key, value), nil +} + +// CreateOrIncrefOp returns a txn.Op that creates a refcount document as +// configured with a specified value; or increments any such refcount doc +// that already exists. +func (ns nsRefcounts_) CreateOrIncRefOp(coll mongo.Collection, key string, n int) (txn.Op, error) { + if exists, err := ns.exists(coll, key); err != nil { + return txn.Op{}, errors.Trace(err) + } else if !exists { + return ns.JustCreateOp(coll.Name(), key, n), nil + } + return ns.JustIncRefOp(coll.Name(), key, n), nil +} + +// StrictIncRefOp returns a txn.Op that increments the value of a +// refcount doc, or returns an error if it does not exist. +func (ns nsRefcounts_) StrictIncRefOp(coll mongo.Collection, key string, n int) (txn.Op, error) { + if exists, err := ns.exists(coll, key); err != nil { + return txn.Op{}, errors.Trace(err) + } else if !exists { + return txn.Op{}, errors.New("does not exist") + } + return ns.JustIncRefOp(coll.Name(), key, n), nil +} + +// AliveDecRefOp returns a txn.Op that decrements the value of a +// refcount doc, or an error if the doc does not exist or the count +// would go below 0. +func (ns nsRefcounts_) AliveDecRefOp(coll mongo.Collection, key string) (txn.Op, error) { + if refcount, err := ns.read(coll, key); err != nil { + return txn.Op{}, errors.Trace(err) + } else if refcount < 1 { + return txn.Op{}, errors.New("cannot decRef below 0") + } + return ns.justDecRefOp(coll.Name(), key, 0), nil +} + +// DyingDecRefOp returns a txn.Op that decrements the value of a +// refcount doc and deletes it if the count reaches 0; if the Op will +// cause a delete, the bool result will be true. It will return an error +// if the doc does not exist or the count would go below 0. +func (ns nsRefcounts_) DyingDecRefOp(coll mongo.Collection, key string) (txn.Op, bool, error) { + refcount, err := ns.read(coll, key) + if err != nil { + return txn.Op{}, false, errors.Trace(err) + } + if refcount < 1 { + return txn.Op{}, false, errors.New("cannot decRef below 0") + } else if refcount > 1 { + return ns.justDecRefOp(coll.Name(), key, 1), false, nil + } + return ns.JustRemoveOp(coll.Name(), key, 1), true, nil +} + +// RemoveOp returns a txn.Op that removes a refcount doc so long as its +// refcount is the supplied value, or an error. +func (ns nsRefcounts_) RemoveOp(coll mongo.Collection, key string, value int) (txn.Op, error) { + refcount, err := ns.read(coll, key) + if err != nil { + return txn.Op{}, errors.Trace(err) + } + if refcount != value { + return txn.Op{}, errRefcountChanged + } + return ns.JustRemoveOp(coll.Name(), key, value), nil +} + +// CurrentOp returns the current reference count value, and a txn.Op that +// asserts that the refcount has that value, or an error. If the refcount +// doc does not exist, then the op will assert that the document does not +// exist instead, and no error is returned. +func (ns nsRefcounts_) CurrentOp(coll mongo.Collection, key string) (txn.Op, int, error) { + refcount, err := ns.read(coll, key) + if errors.IsNotFound(err) { + return txn.Op{ + C: coll.Name(), + Id: key, + Assert: txn.DocMissing, + }, 0, nil + } + if err != nil { + return txn.Op{}, -1, errors.Trace(err) + } + return txn.Op{ + C: coll.Name(), + Id: key, + Assert: bson.D{{"refcount", refcount}}, + }, refcount, nil +} + +// JustCreateOp returns a txn.Op that creates a refcount document as +// configured, *without* checking database state for sanity first. +// You should avoid using this method in most cases. +func (nsRefcounts_) JustCreateOp(collName, key string, value int) txn.Op { + return txn.Op{ + C: collName, + Id: key, + Assert: txn.DocMissing, + Insert: bson.D{{"refcount", value}}, + } +} + +// JustIncRefOp returns a txn.Op that increments a refcount document by +// the specified amount, as configured, *without* checking database state +// for sanity first. You should avoid using this method in most cases. +func (nsRefcounts_) JustIncRefOp(collName, key string, n int) txn.Op { + return txn.Op{ + C: collName, + Id: key, + Assert: txn.DocExists, + Update: bson.D{{"$inc", bson.D{{"refcount", n}}}}, + } +} + +// JustRemoveOp returns a txn.Op that deletes a refcount doc so long as +// the refcount matches count. You should avoid using this method in +// most cases. +func (ns nsRefcounts_) JustRemoveOp(collName, key string, count int) txn.Op { + op := txn.Op{ + C: collName, + Id: key, + Remove: true, + } + if count >= 0 { + op.Assert = bson.D{{"refcount", count}} + } + return op +} + +// justDecRefOp returns a txn.Op that decrements a refcount document by +// 1, as configured, allowing it to drop no lower than limit; which must +// not be less than zero. It's unexported, meaningless though that may +// be, to encourage clients to *really* not use it: too many ways to +// mess it up if you're not precisely aware of the context. +func (nsRefcounts_) justDecRefOp(collName, key string, limit int) txn.Op { + return txn.Op{ + C: collName, + Id: key, + Assert: bson.D{{"refcount", bson.D{{"$gt", limit}}}}, + Update: bson.D{{"$inc", bson.D{{"refcount", -1}}}}, + } +} + +// exists returns whether the identified refcount doc exists. +func (nsRefcounts_) exists(coll mongo.Collection, key string) (bool, error) { + count, err := coll.FindId(key).Count() + if err != nil { + return false, errors.Trace(err) + } + return count != 0, nil +} + +// read returns the value stored in the identified refcount doc. +func (nsRefcounts_) read(coll mongo.Collection, key string) (int, error) { + var doc refcountDoc + if err := coll.FindId(key).One(&doc); err == mgo.ErrNotFound { + return 0, errors.NotFoundf("refcount %q", key) + } else if err != nil { + return 0, errors.Trace(err) + } + return doc.RefCount, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/relation.go juju-core-2.0.0/src/github.com/juju/juju/state/relation.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/relation.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/relation.go 2016-10-13 14:31:49.000000000 +0000 @@ -200,7 +200,11 @@ hasLastRef := bson.D{{"life", Dying}, {"unitcount", 0}, {"relationcount", 1}} removable := append(bson.D{{"_id", ep.ApplicationName}}, hasLastRef...) if err := applications.Find(removable).One(&svc.doc); err == nil { - ops = append(ops, svc.removeOps(hasLastRef)...) + appRemoveOps, err := svc.removeOps(hasLastRef) + if err != nil { + return nil, errors.Trace(err) + } + ops = append(ops, appRemoveOps...) continue } else if err != mgo.ErrNotFound { return nil, err @@ -220,7 +224,7 @@ Update: bson.D{{"$inc", bson.D{{"relationcount", -1}}}}, }) } - cleanupOp := r.st.newCleanupOp(cleanupRelationSettings, fmt.Sprintf("r#%d#", r.Id())) + cleanupOp := newCleanupOp(cleanupRelationSettings, fmt.Sprintf("r#%d#", r.Id())) return append(ops, cleanupOp), nil } @@ -291,3 +295,36 @@ scope: strings.Join(scope, "#"), }, nil } + +// relationSettingsCleanupChange removes the settings doc. +type relationSettingsCleanupChange struct { + Prefix string +} + +// Prepare is part of the Change interface. +func (change relationSettingsCleanupChange) Prepare(db Database) ([]txn.Op, error) { + settings, closer := db.GetCollection(settingsC) + defer closer() + sel := bson.D{{"_id", bson.D{{"$regex", "^" + change.Prefix}}}} + var docs []struct { + DocID string `bson:"_id"` + } + err := settings.Find(sel).Select(bson.D{{"_id", 1}}).All(&docs) + if err != nil { + return nil, errors.Trace(err) + } + if len(docs) == 0 { + return nil, ErrChangeComplete + } + + ops := make([]txn.Op, len(docs)) + for i, doc := range docs { + ops[i] = txn.Op{ + C: settingsC, + Id: doc.DocID, + Remove: true, + } + } + return ops, nil + +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/relationunit_test.go juju-core-2.0.0/src/github.com/juju/juju/state/relationunit_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/relationunit_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/relationunit_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -410,7 +410,7 @@ } func (s *StateSuite) TestWatchWatchScopeDiesOnStateClose(c *gc.C) { - testWatcherDiesWhenStateCloses(c, s.modelTag, func(c *gc.C, st *state.State) waiter { + testWatcherDiesWhenStateCloses(c, s.modelTag, s.State.ControllerTag(), func(c *gc.C, st *state.State) waiter { pr := NewPeerRelation(c, st) w := pr.ru0.WatchScope() <-w.Changes() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/resources_mongo_test.go juju-core-2.0.0/src/github.com/juju/juju/state/resources_mongo_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/resources_mongo_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/resources_mongo_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,6 @@ import ( "strings" - "time" "github.com/juju/testing" jc "github.com/juju/testing/checkers" @@ -13,6 +12,7 @@ charmresource "gopkg.in/juju/charm.v6-unstable/resource" "github.com/juju/juju/resource" + coretesting "github.com/juju/juju/testing" ) type ResourcesMongoSuite struct { @@ -25,7 +25,7 @@ content := "some data\n..." fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) c.Assert(err, jc.ErrorIsNil) - now := time.Now().UTC() + now := coretesting.ZeroTime() applicationID := "a-application" docID := applicationResourceID("spam") @@ -80,7 +80,7 @@ content := "some data\n..." fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) c.Assert(err, jc.ErrorIsNil) - now := time.Now().UTC() + now := coretesting.ZeroTime() applicationID := "a-application" docID := applicationResourceID("spam") @@ -129,7 +129,7 @@ content := "some data\n..." fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) c.Assert(err, jc.ErrorIsNil) - now := time.Now().UTC() + now := coretesting.ZeroTime() applicationID := "a-application" docID := pendingResourceID("spam", "some-unique-ID-001") @@ -182,7 +182,7 @@ content := "some data\n..." fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) c.Assert(err, jc.ErrorIsNil) - now := time.Now().UTC() + now := coretesting.NonZeroTime() res, err := doc2resource(resourceDoc{ DocID: docID, @@ -233,7 +233,7 @@ content := "some data\n..." fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) c.Assert(err, jc.ErrorIsNil) - now := time.Now().UTC() + now := coretesting.NonZeroTime() res, err := doc2basicResource(resourceDoc{ DocID: docID, @@ -285,7 +285,7 @@ content := "some data\n..." fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) c.Assert(err, jc.ErrorIsNil) - now := time.Now().UTC() + now := coretesting.NonZeroTime() res, err := doc2basicResource(resourceDoc{ DocID: docID, @@ -329,7 +329,7 @@ content := "some data\n..." fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) c.Assert(err, jc.ErrorIsNil) - now := time.Now().UTC() + now := coretesting.ZeroTime() applicationID := "a-application" docID := applicationResourceID("spam") @@ -387,7 +387,7 @@ content := "some data\n..." fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) c.Assert(err, jc.ErrorIsNil) - now := time.Now().UTC() + now := coretesting.NonZeroTime() res, err := doc2basicResource(resourceDoc{ DocID: docID, @@ -505,7 +505,7 @@ content := "some data\n..." fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) c.Assert(err, jc.ErrorIsNil) - now := time.Now().UTC() + now := coretesting.ZeroTime() applicationID := "a-application" id := applicationID + "/spam" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/resources_persistence_test.go juju-core-2.0.0/src/github.com/juju/juju/state/resources_persistence_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/resources_persistence_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/resources_persistence_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,6 +17,7 @@ "github.com/juju/juju/resource" "github.com/juju/juju/resource/resourcetesting" "github.com/juju/juju/state/statetest" + coretesting "github.com/juju/juju/testing" ) var _ = gc.Suite(&ResourcePersistenceSuite{}) @@ -121,7 +122,7 @@ func (s *ResourcePersistenceSuite) TestListResourcesBadDoc(c *gc.C) { _, docs := newPersistenceResources(c, "a-application", "spam", "eggs") - docs[0].Timestamp = time.Time{} + docs[0].Timestamp = coretesting.ZeroTime() s.base.ReturnAll = docs p := NewResourcePersistence(s.base) @@ -227,7 +228,7 @@ func (s *ResourcePersistenceSuite) TestStageResourceBadResource(c *gc.C) { res, _ := newPersistenceResource(c, "a-application", "spam") - res.Resource.Timestamp = time.Time{} + res.Resource.Timestamp = coretesting.ZeroTime() p := NewResourcePersistence(s.base) _, err := p.StageResource(res.Resource, res.storagePath) @@ -300,13 +301,13 @@ } func (s *ResourcePersistenceSuite) TestSetCharmStoreResourceOkay(c *gc.C) { - lastPolled := time.Now().UTC() + lastPolled := coretesting.NonZeroTime().UTC() applicationname := "a-application" res, doc := newPersistenceResource(c, applicationname, "spam") expected := doc // a copy expected.DocID += "#charmstore" expected.Username = "" - expected.Timestamp = time.Time{} + expected.Timestamp = coretesting.ZeroTime() expected.StoragePath = "" expected.LastPolled = lastPolled p := NewResourcePersistence(s.base) @@ -414,7 +415,7 @@ func (s *ResourcePersistenceSuite) TestSetUnitResourceBadResource(c *gc.C) { res, doc := newPersistenceUnitResource(c, "a-application", "a-application/0", "spam") s.base.ReturnOne = doc - res.Timestamp = time.Time{} + res.Timestamp = coretesting.ZeroTime() p := NewResourcePersistence(s.base) err := p.SetUnitResource("a-application/0", res) @@ -466,6 +467,9 @@ s.base.ReturnOne = doc p := NewResourcePersistence(s.base) + // TODO(macgreagoir) We need to keep using time.Now() for now, while we + // have NewResolvePendingResourceOps returning LastPolled based on + // timeNow(). lp:1558657 lastPolled := time.Now().UTC().Round(time.Second) ops, err := p.NewResolvePendingResourceOps(stored.ID, stored.PendingID) @@ -474,7 +478,7 @@ csresourceDoc := expected csresourceDoc.DocID = "resource#a-application/spam#charmstore" csresourceDoc.Username = "" - csresourceDoc.Timestamp = time.Time{} + csresourceDoc.Timestamp = coretesting.ZeroTime() csresourceDoc.StoragePath = "" csresourceDoc.LastPolled = lastPolled @@ -527,6 +531,9 @@ s.stub.SetErrors(nil, notFound) p := NewResourcePersistence(s.base) + // TODO(macgreagoir) We need to keep using time.Now() for now, while we + // have NewResolvePendingResourceOps returning LastPolled based on + // timeNow(). lp:1558657 lastPolled := time.Now().UTC().Round(time.Second) ops, err := p.NewResolvePendingResourceOps(stored.ID, stored.PendingID) c.Assert(err, jc.ErrorIsNil) @@ -537,7 +544,7 @@ csresourceDoc := expected csresourceDoc.DocID = "resource#a-application/spam#charmstore" csresourceDoc.Username = "" - csresourceDoc.Timestamp = time.Time{} + csresourceDoc.Timestamp = coretesting.ZeroTime() csresourceDoc.StoragePath = "" csresourceDoc.LastPolled = lastPolled @@ -594,9 +601,9 @@ csDoc := doc // a copy csDoc.DocID += "#charmstore" csDoc.Username = "" - csDoc.Timestamp = time.Time{} + csDoc.Timestamp = coretesting.ZeroTime() csDoc.StoragePath = "" - csDoc.LastPolled = time.Now().UTC() + csDoc.LastPolled = coretesting.NonZeroTime().UTC() docs = append(docs, csDoc) } return svcResources, docs diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/resources_test.go juju-core-2.0.0/src/github.com/juju/juju/state/resources_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/resources_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/resources_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "bytes" - "time" + "time" // Only using time func. jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -14,6 +14,7 @@ "github.com/juju/juju/component/all" "github.com/juju/juju/resource" "github.com/juju/juju/resource/resourcetesting" + "github.com/juju/juju/testing" ) func init() { @@ -48,7 +49,7 @@ c.Assert(err, jc.ErrorIsNil) csResources := []charmresource.Resource{res.Resource} - err = st.SetCharmStoreResources("a-application", csResources, time.Now()) + err = st.SetCharmStoreResources("a-application", csResources, testing.NonZeroTime()) c.Assert(err, jc.ErrorIsNil) resources, err = st.ListResources("a-application") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/singular_test.go juju-core-2.0.0/src/github.com/juju/juju/state/singular_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/singular_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/singular_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,31 +7,22 @@ "time" jc "github.com/juju/testing/checkers" - "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "github.com/juju/juju/core/lease" - "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" ) type SingularSuite struct { - clock *coretesting.Clock ConnSuite } var _ = gc.Suite(&SingularSuite{}) -func (s *SingularSuite) SetUpSuite(c *gc.C) { - s.ConnSuite.SetUpSuite(c) - s.PatchValue(&state.GetClock, func() clock.Clock { - return s.clock - }) -} - func (s *SingularSuite) SetUpTest(c *gc.C) { - s.clock = coretesting.NewClock(time.Now()) s.ConnSuite.SetUpTest(c) + err := s.State.SetClockForTesting(s.Clock) + c.Assert(err, jc.ErrorIsNil) } func (s *SingularSuite) TestClaimBadLease(c *gc.C) { @@ -68,22 +59,22 @@ claimer := s.State.SingularClaimer() err := claimer.Claim(s.modelTag.Id(), "machine-123", time.Minute) c.Assert(err, jc.ErrorIsNil) - wait := make(chan error) go func() { + s.Clock.Advance(coretesting.ShortWait) wait <- claimer.WaitUntilExpired(s.modelTag.Id()) }() select { case err := <-wait: c.Fatalf("expired early with %v", err) - case <-time.After(coretesting.ShortWait): + case <-s.Clock.After(coretesting.ShortWait): } - s.clock.Advance(time.Hour) + s.Clock.Advance(time.Hour) select { case err := <-wait: c.Check(err, jc.ErrorIsNil) - case <-time.After(coretesting.LongWait): + case <-s.Clock.After(coretesting.LongWait): c.Fatalf("never expired") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/stateenvirons/config.go juju-core-2.0.0/src/github.com/juju/juju/state/stateenvirons/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/stateenvirons/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/stateenvirons/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,17 +26,16 @@ } cloudName := model.Cloud() regionName := model.CloudRegion() - credentialName := model.CloudCredential() - modelOwner := model.Owner() - return CloudSpec(g.State, cloudName, regionName, credentialName, modelOwner) + credentialTag, _ := model.CloudCredential() + return CloudSpec(g.State, cloudName, regionName, credentialTag) } // CloudSpec returns an environs.CloudSpec from a *state.State, // given the cloud, region and credential names. func CloudSpec( accessor state.CloudAccessor, - cloudName, regionName, credentialName string, - credentialOwner names.UserTag, + cloudName, regionName string, + credentialTag names.CloudCredentialTag, ) (environs.CloudSpec, error) { modelCloud, err := accessor.Cloud(cloudName) if err != nil { @@ -44,16 +43,11 @@ } var credential *cloud.Credential - if credentialName != "" { - credentials, err := accessor.CloudCredentials(credentialOwner, cloudName) + if credentialTag != (names.CloudCredentialTag{}) { + credentialValue, err := accessor.CloudCredential(credentialTag) if err != nil { return environs.CloudSpec{}, errors.Trace(err) } - var ok bool - credentialValue, ok := credentials[credentialName] - if !ok { - return environs.CloudSpec{}, errors.NotFoundf("credential %q", credentialName) - } credential = &credentialValue } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/stateenvirons/config_test.go juju-core-2.0.0/src/github.com/juju/juju/state/stateenvirons/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/stateenvirons/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/stateenvirons/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" "github.com/juju/juju/cloud" "github.com/juju/juju/environs" @@ -39,15 +40,14 @@ func (s *environSuite) TestCloudSpec(c *gc.C) { owner := s.Factory.MakeUser(c, nil).UserTag() emptyCredential := cloud.NewEmptyCredential() - err := s.State.UpdateCloudCredentials(owner, "dummy", map[string]cloud.Credential{ - "empty-credential": emptyCredential, - }) + tag := names.NewCloudCredentialTag("dummy/" + owner.Id() + "/empty-credential") + err := s.State.UpdateCloudCredential(tag, emptyCredential) c.Assert(err, jc.ErrorIsNil) st := s.Factory.MakeModel(c, &factory.ModelParams{ Name: "foo", CloudName: "dummy", - CloudCredential: "empty-credential", + CloudCredential: tag, Owner: owner, }) defer st.Close() @@ -56,8 +56,12 @@ cloudSpec, err := stateenvirons.EnvironConfigGetter{st}.CloudSpec(st.ModelTag()) c.Assert(err, jc.ErrorIsNil) c.Assert(cloudSpec, jc.DeepEquals, environs.CloudSpec{ - Type: "dummy", - Name: "dummy", - Credential: &emptyCredential, + Type: "dummy", + Name: "dummy", + Region: "dummy-region", + Endpoint: "dummy-endpoint", + IdentityEndpoint: "dummy-identity-endpoint", + StorageEndpoint: "dummy-storage-endpoint", + Credential: &emptyCredential, }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/stateenvirons/policy.go juju-core-2.0.0/src/github.com/juju/juju/state/stateenvirons/policy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/stateenvirons/policy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/stateenvirons/policy.go 2016-10-13 14:31:49.000000000 +0000 @@ -40,16 +40,19 @@ // ConfigValidator implements state.Policy. func (p environStatePolicy) ConfigValidator() (config.Validator, error) { - model, err := p.st.Model() + return environProvider(p.st) +} + +// ProviderConfigSchemaSource implements state.Policy. +func (p environStatePolicy) ProviderConfigSchemaSource() (config.ConfigSchemaSource, error) { + provider, err := environProvider(p.st) if err != nil { - return nil, errors.Annotate(err, "getting model") + return nil, errors.Trace(err) } - cloud, err := p.st.Cloud(model.Cloud()) - if err != nil { - return nil, errors.Annotate(err, "getting cloud") + if cs, ok := provider.(config.ConfigSchemaSource); ok { + return cs, nil } - // EnvironProvider implements state.ConfigValidator. - return environs.Provider(cloud.Type) + return nil, errors.NotImplementedf("config.ConfigSource") } // ConstraintsValidator implements state.Policy. @@ -87,3 +90,16 @@ func NewStorageProviderRegistry(env environs.Environ) storage.ProviderRegistry { return storage.ChainedProviderRegistry{env, provider.CommonStorageProviders()} } + +func environProvider(st *state.State) (environs.EnvironProvider, error) { + model, err := st.Model() + if err != nil { + return nil, errors.Annotate(err, "getting model") + } + cloud, err := st.Cloud(model.Cloud()) + if err != nil { + return nil, errors.Annotate(err, "getting cloud") + } + // EnvironProvider implements state.ConfigValidator. + return environs.Provider(cloud.Type) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/state.go juju-core-2.0.0/src/github.com/juju/juju/state/state.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/state.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,6 +19,7 @@ "github.com/juju/loggo" jujutxn "github.com/juju/txn" "github.com/juju/utils" + "github.com/juju/utils/clock" "github.com/juju/utils/os" "github.com/juju/utils/series" "github.com/juju/utils/set" @@ -37,6 +38,7 @@ "github.com/juju/juju/instance" "github.com/juju/juju/mongo" "github.com/juju/juju/network" + "github.com/juju/juju/permission" "github.com/juju/juju/state/cloudimagemetadata" stateaudit "github.com/juju/juju/state/internal/audit" statelease "github.com/juju/juju/state/lease" @@ -74,13 +76,15 @@ // State represents the state of an model // managed by juju. type State struct { - modelTag names.ModelTag - controllerTag names.ModelTag - mongoInfo *mongo.MongoInfo - session *mgo.Session - database Database - policy Policy - newPolicy NewPolicyFunc + clock clock.Clock + modelTag names.ModelTag + controllerModelTag names.ModelTag + controllerTag names.ControllerTag + mongoInfo *mongo.MongoInfo + session *mgo.Session + database Database + policy Policy + newPolicy NewPolicyFunc // cloudName is the name of the cloud on which the model // represented by this state runs. @@ -133,14 +137,21 @@ // IsController returns true if this state instance has the bootstrap // model UUID. func (st *State) IsController() bool { - return st.modelTag == st.controllerTag + return st.modelTag == st.controllerModelTag } -// ControllerUUID returns the model UUID for the controller model +// ControllerUUID returns the UUID for the controller // of this state instance. func (st *State) ControllerUUID() string { return st.controllerTag.Id() } +func (st *State) ControllerTag() names.ControllerTag { + return st.controllerTag +} + +func ControllerAccess(st *State, tag names.Tag) (permission.UserAccess, error) { + return st.UserAccess(tag.(names.UserTag), st.controllerTag) +} // RemoveAllModelDocs removes all documents from multi-model // collections. The model should be put into a dying state before call @@ -177,38 +188,11 @@ } func (st *State) removeAllModelDocs(modelAssertion bson.D) error { - env, err := st.Model() - if err != nil { - return errors.Trace(err) - } - id := userModelNameIndex(env.Owner().Canonical(), env.Name()) - ops := []txn.Op{{ - // Cleanup the owner:envName unique key. - C: usermodelnameC, - Id: id, - Remove: true, - }, { - C: modelEntityRefsC, - Id: st.ModelUUID(), - Remove: true, - }, { - C: modelsC, - Id: st.ModelUUID(), - Assert: modelAssertion, - Remove: true, - }} - if !st.IsController() { - ops = append(ops, decHostedModelCountOp()) - } + modelUUID := st.ModelUUID() - var rawCollections []string // Remove each collection in its own transaction. for name, info := range st.database.Schema() { - if info.global { - continue - } - if info.rawAccess { - rawCollections = append(rawCollections, name) + if info.global || info.rawAccess { continue } @@ -219,7 +203,7 @@ // Make sure we gate everything on the model assertion. ops = append([]txn.Op{{ C: modelsC, - Id: st.ModelUUID(), + Id: modelUUID, Assert: modelAssertion, }}, ops...) err = st.runTransaction(ops) @@ -227,14 +211,53 @@ return errors.Trace(err) } } - // Now remove raw collections - for _, name := range rawCollections { - if err := st.removeAllInCollectionRaw(name); err != nil { - return errors.Trace(err) + + // Remove from the raw (non-transactional) collections. + for name, info := range st.database.Schema() { + if !info.global && info.rawAccess { + if err := st.removeAllInCollectionRaw(name); err != nil { + return errors.Trace(err) + } } } - // Run the remaining ops to remove the model. + // Remove all user permissions for the model. + permPattern := bson.M{ + "_id": bson.M{"$regex": "^" + permissionID(modelKey(modelUUID), "")}, + } + ops, err := st.removeInCollectionOps(permissionsC, permPattern) + if err != nil { + return errors.Trace(err) + } + err = st.runTransaction(ops) + if err != nil { + return errors.Trace(err) + } + + // Now remove remove the model. + env, err := st.Model() + if err != nil { + return errors.Trace(err) + } + id := userModelNameIndex(env.Owner().Id(), env.Name()) + ops = []txn.Op{{ + // Cleanup the owner:envName unique key. + C: usermodelnameC, + Id: id, + Remove: true, + }, { + C: modelEntityRefsC, + Id: modelUUID, + Remove: true, + }, { + C: modelsC, + Id: modelUUID, + Assert: modelAssertion, + Remove: true, + }} + if !st.IsController() { + ops = append(ops, decHostedModelCountOp()) + } return st.runTransaction(ops) } @@ -250,11 +273,17 @@ // removeAllInCollectionOps appends to ops operations to // remove all the documents in the given named collection. func (st *State) removeAllInCollectionOps(name string) ([]txn.Op, error) { + return st.removeInCollectionOps(name, nil) +} + +// removeInCollectionOps generates operations to remove all documents +// from the named collection matching a specific selector. +func (st *State) removeInCollectionOps(name string, sel interface{}) ([]txn.Op, error) { coll, closer := st.getCollection(name) defer closer() var ids []bson.M - err := coll.Find(nil).Select(bson.D{{"_id", 1}}).All(&ids) + err := coll.Find(sel).Select(bson.D{{"_id", 1}}).All(&ids) if err != nil { return nil, errors.Trace(err) } @@ -271,10 +300,10 @@ // ForModel returns a connection to mongo for the specified model. The // connection uses the same credentials and policy as the existing connection. -func (st *State) ForModel(model names.ModelTag) (*State, error) { +func (st *State) ForModel(modelTag names.ModelTag) (*State, error) { session := st.session.Copy() newSt, err := newState( - model, session, st.mongoInfo, st.newPolicy, + modelTag, st.controllerModelTag, session, st.mongoInfo, st.newPolicy, st.clock, ) if err != nil { return nil, errors.Trace(err) @@ -291,7 +320,7 @@ // * creating cloud metadata storage // // start will close the *State if it fails. -func (st *State) start(controllerTag names.ModelTag) (err error) { +func (st *State) start(controllerTag names.ControllerTag) (err error) { defer func() { if err == nil { return @@ -323,15 +352,14 @@ // now we've set up leaseClientId, we can use workersFactory logger.Infof("starting standard state workers") - clock := GetClock() factory := workersFactory{ st: st, - clock: clock, + clock: st.clock, } workers, err := workers.NewRestartWorkers(workers.RestartConfig{ Factory: factory, Logger: loggo.GetLogger(logger.Name() + ".workers"), - Clock: clock, + Clock: st.clock, Delay: time.Second, }) if err != nil { @@ -341,7 +369,6 @@ logger.Infof("creating cloud image metadata storage") st.CloudImageMetadataStorage = cloudimagemetadata.NewStorage( - st.ModelUUID(), cloudimagemetadataC, &environMongo{st}, ) @@ -350,13 +377,48 @@ return nil } +// KillWorkers tells the state's internal workers to die. This is +// mainly used to kill the leadership manager to prevent it from +// interfering with apiserver shutdown. +func (st *State) KillWorkers() { + // TODO(fwereade): 2015-08-07 lp:1482634 + // obviously, this should not exist: it's a quick hack to address lp:1481368 in + // 1.24.4, and should be quickly replaced with something that isn't so heinous. + // + // But. + // + // I *believe* that what it'll take to fix this is to extract the mongo-session- + // opening from state.Open, so we can create a mongosessioner Manifold on which + // state, leadership, watching, tools storage, etc etc etc can all independently + // depend. (Each dependency would/should have a separate session so they can + // close them all on their own schedule, without panics -- but the failure of + // the shared component should successfully goose them all into shutting down, + // in parallel, of their own accord.) + st.workers.Kill() +} + +// ApplicationLeaders returns a map of the application name to the +// unit name that is the current leader. +func (st *State) ApplicationLeaders() (map[string]string, error) { + client, err := st.getLeadershipLeaseClient() + if err != nil { + return nil, errors.Trace(err) + } + leases := client.Leases() + result := make(map[string]string, len(leases)) + for key, value := range leases { + result[key] = value.Holder + } + return result, nil +} + func (st *State) getLeadershipLeaseClient() (lease.Client, error) { client, err := statelease.NewClient(statelease.ClientConfig{ Id: st.leaseClientId, Namespace: applicationLeadershipNamespace, Collection: leasesC, Mongo: &environMongo{st}, - Clock: GetClock(), + Clock: st.clock, }) if err != nil { return nil, errors.Annotatef(err, "cannot create leadership lease client") @@ -370,7 +432,7 @@ Namespace: singularControllerNamespace, Collection: leasesC, Mongo: &environMongo{st}, - Clock: GetClock(), + Clock: st.clock, }) if err != nil { return nil, errors.Annotatef(err, "cannot create singular lease client") @@ -448,7 +510,7 @@ // with various collections in a single session, so don't want to call // getCollection multiple times. func (st *State) newDB() (Database, func()) { - return st.database.CopySession() + return st.database.Copy() } // Ping probes the state's database connection to ensure @@ -657,22 +719,34 @@ return writeConstraints(st, modelGlobalKey, cons) } -// AllMachines returns all machines in the model -// ordered by id. -func (st *State) AllMachines() (machines []*Machine, err error) { - machinesCollection, closer := st.getCollection(machinesC) - defer closer() - +func (st *State) allMachines(machinesCollection mongo.Collection) ([]*Machine, error) { mdocs := machineDocSlice{} - err = machinesCollection.Find(nil).All(&mdocs) + err := machinesCollection.Find(nil).All(&mdocs) if err != nil { return nil, errors.Annotatef(err, "cannot get all machines") } sort.Sort(mdocs) - for _, doc := range mdocs { - machines = append(machines, newMachine(st, &doc)) + machines := make([]*Machine, len(mdocs)) + for i, doc := range mdocs { + machines[i] = newMachine(st, &doc) } - return + return machines, nil +} + +// AllMachines returns all machines in the model +// ordered by id. +func (st *State) AllMachines() ([]*Machine, error) { + machinesCollection, closer := st.getCollection(machinesC) + defer closer() + return st.allMachines(machinesCollection) +} + +// AllMachinesFor returns all machines for the model represented +// by the given modeluuid +func (st *State) AllMachinesFor(modelUUID string) ([]*Machine, error) { + machinesCollection, closer := st.getCollectionFor(modelUUID, machinesC) + defer closer() + return st.allMachines(machinesCollection) } type machineDocSlice []machineDoc @@ -833,7 +907,7 @@ case names.UserTag: coll = usersC if !tag.IsLocal() { - return "", nil, fmt.Errorf("%q is not a local user", tag.Canonical()) + return "", nil, fmt.Errorf("%q is not a local user", tag.Id()) } id = tag.Name() case names.RelationTag: @@ -1047,14 +1121,10 @@ } statusDoc := statusDoc{ - ModelUUID: st.ModelUUID(), - // TODO(fwereade): this violates the spec. Should be "waiting". - // Implemented like this to be consistent with incorrect add-unit - // behaviour. - Status: status.StatusUnknown, - StatusInfo: MessageWaitForAgentInit, - // TODO(fwereade): 2016-03-17 lp:1558657 - Updated: time.Now().UnixNano(), + ModelUUID: st.ModelUUID(), + Status: status.Waiting, + StatusInfo: status.MessageWaitForMachine, + Updated: st.clock.Now().UnixNano(), // This exists to preserve questionable unit-aggregation behaviour // while we work out how to switch to an implementation that makes // sense. It is also set in AddMissingServiceStatuses. @@ -1063,19 +1133,21 @@ // The addServiceOps does not include the environment alive assertion, // so we add it here. - ops := append( - []txn.Op{ - assertModelActiveOp(st.ModelUUID()), - endpointBindingsOp, - }, - addApplicationOps(st, addApplicationOpsArgs{ - applicationDoc: svcDoc, - statusDoc: statusDoc, - constraints: args.Constraints, - storage: args.Storage, - settings: map[string]interface{}(args.Settings), - settingsRefCount: 1, - })...) + ops := []txn.Op{ + assertModelActiveOp(st.ModelUUID()), + endpointBindingsOp, + } + addOps, err := addApplicationOps(st, addApplicationOpsArgs{ + applicationDoc: svcDoc, + statusDoc: statusDoc, + constraints: args.Constraints, + storage: args.Storage, + settings: map[string]interface{}(args.Settings), + }) + if err != nil { + return nil, errors.Trace(err) + } + ops = append(ops, addOps...) // Collect peer relation addition operations. // @@ -1120,6 +1192,12 @@ if err := checkModelActive(st); err != nil { return nil, errors.Trace(err) } + // TODO(fwereade): 2016-09-09 lp:1621754 + // This is not always correct -- there are a million + // operations collected in this func, not *all* of them + // imply that this is the problem. (e.g. the charm being + // destroyed just as we add application will fail, but + // not because "application already exists") return nil, errors.Errorf("application already exists") } else if err != nil { return nil, errors.Trace(err) @@ -1703,12 +1781,12 @@ } return u.AssignToMachine(m) case AssignClean: - if _, err = u.AssignToCleanMachine(); err != noCleanMachines { + if _, err = u.AssignToCleanMachine(); errors.Cause(err) != noCleanMachines { return errors.Trace(err) } return u.AssignToNewMachineOrContainer() case AssignCleanEmpty: - if _, err = u.AssignToCleanEmptyMachine(); err != noCleanMachines { + if _, err = u.AssignToCleanEmptyMachine(); errors.Cause(err) != noCleanMachines { return errors.Trace(err) } return u.AssignToNewMachineOrContainer() @@ -1996,3 +2074,21 @@ } return p + key[2:], true } + +// SetClockForTesting is an exported function to allow other packages +// to set the internal clock for the State instance. It is named such +// that it should be obvious if it is ever called from a non-test package. +func (st *State) SetClockForTesting(clock clock.Clock) error { + st.clock = clock + // Need to restart the lease workers so they get the new clock. + st.workers.Kill() + err := st.workers.Wait() + if err != nil { + return errors.Trace(err) + } + err = st.start(st.controllerTag) + if err != nil { + return errors.Trace(err) + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/state_leader_test.go juju-core-2.0.0/src/github.com/juju/juju/state/state_leader_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/state_leader_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/state_leader_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,38 +4,29 @@ package state_test import ( - "time" + "time" // Only used for time types. "github.com/juju/errors" jc "github.com/juju/testing/checkers" - "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2/txn" "github.com/juju/juju/core/leadership" - "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" ) type LeadershipSuite struct { ConnSuite - clock *coretesting.Clock checker leadership.Checker claimer leadership.Claimer } var _ = gc.Suite(&LeadershipSuite{}) -func (s *LeadershipSuite) SetUpSuite(c *gc.C) { - s.ConnSuite.SetUpSuite(c) - s.PatchValue(&state.GetClock, func() clock.Clock { - return s.clock - }) -} - func (s *LeadershipSuite) SetUpTest(c *gc.C) { - s.clock = coretesting.NewClock(time.Now()) s.ConnSuite.SetUpTest(c) + err := s.State.SetClockForTesting(s.Clock) + c.Assert(err, jc.ErrorIsNil) s.checker = s.State.LeadershipChecker() s.claimer = s.State.LeadershipClaimer() } @@ -121,25 +112,41 @@ c.Check(ops2, gc.IsNil) } -func (s *LeadershipSuite) TestHackLeadershipUnblocksClaimer(c *gc.C) { +func (s *LeadershipSuite) TestKillWorkersUnblocksClaimer(c *gc.C) { err := s.claimer.ClaimLeadership("blah", "blah/0", time.Minute) c.Assert(err, jc.ErrorIsNil) - s.State.HackLeadership() + s.State.KillWorkers() + s.Clock.Advance(coretesting.LongWait) select { case err := <-s.expiryChan("blah"): c.Check(err, gc.ErrorMatches, "lease manager stopped") - case <-time.After(coretesting.LongWait): + case <-s.Clock.After(coretesting.LongWait): c.Fatalf("timed out while waiting for unblock") } } +func (s *LeadershipSuite) TestApplicationLeaders(c *gc.C) { + err := s.claimer.ClaimLeadership("blah", "blah/0", time.Minute) + c.Assert(err, jc.ErrorIsNil) + err = s.claimer.ClaimLeadership("application", "application/1", time.Minute) + c.Assert(err, jc.ErrorIsNil) + + leaders, err := s.State.ApplicationLeaders() + c.Assert(err, jc.ErrorIsNil) + c.Assert(leaders, jc.DeepEquals, map[string]string{ + "application": "application/1", + "blah": "blah/0", + }) +} + func (s *LeadershipSuite) expire(c *gc.C, applicationname string) { - s.clock.Advance(time.Hour) + s.Clock.Advance(time.Hour) + s.Session.Fsync(false) select { case err := <-s.expiryChan(applicationname): c.Assert(err, jc.ErrorIsNil) - case <-time.After(coretesting.LongWait): + case <-s.Clock.After(coretesting.LongWait): c.Fatalf("never unblocked") } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/state_test.go juju-core-2.0.0/src/github.com/juju/juju/state/state_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/state_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/state_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,6 +17,7 @@ "github.com/juju/txn" "github.com/juju/utils" "github.com/juju/utils/arch" + "github.com/juju/utils/clock" "github.com/juju/utils/series" "github.com/juju/version" gc "gopkg.in/check.v1" @@ -33,6 +34,7 @@ "github.com/juju/juju/mongo" "github.com/juju/juju/mongo/mongotest" "github.com/juju/juju/network" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" statetesting "github.com/juju/juju/state/testing" @@ -56,7 +58,7 @@ func preventUnitDestroyRemove(c *gc.C, u *state.Unit) { now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -137,24 +139,16 @@ func (s *StateSuite) TestDialAgain(c *gc.C) { // Ensure idempotent operations on Dial are working fine. for i := 0; i < 2; i++ { - st, err := state.Open(s.modelTag, statetesting.NewMongoInfo(), mongotest.DialOpts(), nil) + st, err := state.Open(s.modelTag, s.State.ControllerTag(), statetesting.NewMongoInfo(), mongotest.DialOpts(), nil) c.Assert(err, jc.ErrorIsNil) c.Assert(st.Close(), gc.IsNil) } } -func (s *StateSuite) TestOpenAcceptsMissingModelTag(c *gc.C) { - st, err := state.Open(names.ModelTag{}, statetesting.NewMongoInfo(), mongotest.DialOpts(), nil) - c.Assert(err, jc.ErrorIsNil) - - c.Check(st.ModelTag(), gc.Equals, s.modelTag) - c.Check(st.Close(), jc.ErrorIsNil) -} - func (s *StateSuite) TestOpenRequiresExtantModelTag(c *gc.C) { uuid := utils.MustNewUUID() tag := names.NewModelTag(uuid.String()) - st, err := state.Open(tag, statetesting.NewMongoInfo(), mongotest.DialOpts(), nil) + st, err := state.Open(tag, s.State.ControllerTag(), statetesting.NewMongoInfo(), mongotest.DialOpts(), nil) if !c.Check(st, gc.IsNil) { c.Check(st.Close(), jc.ErrorIsNil) } @@ -163,7 +157,7 @@ } func (s *StateSuite) TestOpenSetsModelTag(c *gc.C) { - st, err := state.Open(s.modelTag, statetesting.NewMongoInfo(), mongotest.DialOpts(), nil) + st, err := state.Open(s.modelTag, s.State.ControllerTag(), statetesting.NewMongoInfo(), mongotest.DialOpts(), nil) c.Assert(err, jc.ErrorIsNil) defer st.Close() @@ -176,7 +170,7 @@ func (s *StateSuite) TestNoModelDocs(c *gc.C) { c.Assert(s.State.EnsureModelRemoved(), gc.ErrorMatches, - fmt.Sprintf("found documents for model with uuid %s: 1 constraints doc, 2 leases doc, 1 modelusers doc, 2 permissions doc, 1 settings doc, 1 statuses doc", s.State.ModelUUID())) + fmt.Sprintf("found documents for model with uuid %s: 1 constraints doc, 2 leases doc, 1 modelusers doc, 1 settings doc, 1 statuses doc", s.State.ModelUUID())) } func (s *StateSuite) TestMongoSession(c *gc.C) { @@ -499,7 +493,7 @@ now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "some status", Since: &now, } @@ -868,7 +862,7 @@ err := s.State.SetModelConstraints(constraints.MustParse("mem=4G")) c.Assert(err, jc.ErrorIsNil) oneJob := []state.MachineJob{state.JobHostUnits} - extraCons := constraints.MustParse("cpu-cores=4") + extraCons := constraints.MustParse("cores=4") m, err := s.State.AddOneMachine(state.MachineTemplate{ Series: "quantal", Constraints: extraCons, @@ -878,7 +872,7 @@ c.Assert(m.Id(), gc.Equals, "0") c.Assert(m.Series(), gc.Equals, "quantal") c.Assert(m.Jobs(), gc.DeepEquals, oneJob) - expectedCons := constraints.MustParse("cpu-cores=4 mem=4G") + expectedCons := constraints.MustParse("cores=4 mem=4G") mcons, err := m.Constraints() c.Assert(err, jc.ErrorIsNil) c.Assert(mcons, gc.DeepEquals, expectedCons) @@ -1362,6 +1356,9 @@ mysql, err := s.State.AddApplication(state.AddApplicationArgs{Name: "mysql", Charm: ch}) c.Assert(err, jc.ErrorIsNil) c.Assert(mysql.Name(), gc.Equals, "mysql") + sInfo, err := mysql.Status() + c.Assert(sInfo.Status, gc.Equals, status.Waiting) + c.Assert(sInfo.Message, gc.Equals, "waiting for machine") // Check that retrieving the new created services works correctly. wordpress, err = s.State.Application("wordpress") @@ -2018,7 +2015,7 @@ // Service.WatchRelations // State.WatchEnviron // Machine.WatchContainers - testWatcherDiesWhenStateCloses(c, s.modelTag, func(c *gc.C, st *state.State) waiter { + testWatcherDiesWhenStateCloses(c, s.modelTag, s.State.ControllerTag(), func(c *gc.C, st *state.State) waiter { w := st.WatchServices() <-w.Changes() return w @@ -2353,6 +2350,7 @@ func (s *StateSuite) insertFakeModelDocs(c *gc.C, st *state.State) string { // insert one doc for each multiEnvCollection var ops []mgotxn.Op + modelUUID := st.ModelUUID() for _, collName := range state.MultiEnvCollections() { // skip adding constraints, modelUser and settings as they were added when the // model was created @@ -2365,16 +2363,18 @@ err := coll.Insert(bson.M{ "_id": state.DocID(st, "arbitraryid"), - "model-uuid": st.ModelUUID(), + "model-uuid": modelUUID, }) c.Assert(err, jc.ErrorIsNil) } else { ops = append(ops, mgotxn.Op{ C: collName, Id: state.DocID(st, "arbitraryid"), - Insert: bson.M{"model-uuid": st.ModelUUID()}}) + Insert: bson.M{"model-uuid": modelUUID}, + }) } } + err := state.RunTransaction(st, ops) c.Assert(err, jc.ErrorIsNil) @@ -2389,7 +2389,19 @@ model, err := st.Model() c.Assert(err, jc.ErrorIsNil) - return state.UserModelNameIndex(model.Owner().Canonical(), model.Name()) + + // Add a model user whose permissions should get removed + // when the model is. + _, err = s.State.AddModelUser( + s.State.ModelUUID(), + state.UserAccessSpec{ + User: names.NewUserTag("amelia@external"), + CreatedBy: s.Owner, + Access: permission.ReadAccess, + }) + c.Assert(err, jc.ErrorIsNil) + + return state.UserModelNameIndex(model.Owner().Id(), model.Name()) } type checkUserModelNameArgs struct { @@ -2423,6 +2435,14 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(n, gc.Equals, 0) } + + // ensure user permissions for the model are removed + permPattern := fmt.Sprintf("^%s#%s#", state.ModelGlobalKey, st.ModelUUID()) + permissions, closer := state.GetCollection(st, "permissions") + defer closer() + permCount, err := permissions.Find(bson.M{"_id": bson.M{"$regex": permPattern}}).Count() + c.Assert(err, jc.ErrorIsNil) + c.Check(permCount, gc.Equals, 0) } func (s *StateSuite) TestRemoveAllModelDocs(c *gc.C) { @@ -2613,8 +2633,8 @@ c.Assert(relation1, jc.DeepEquals, relation3) } -func tryOpenState(modelTag names.ModelTag, info *mongo.MongoInfo) error { - st, err := state.Open(modelTag, info, mongotest.DialOpts(), nil) +func tryOpenState(modelTag names.ModelTag, controllerTag names.ControllerTag, info *mongo.MongoInfo) error { + st, err := state.Open(modelTag, controllerTag, info, mongotest.DialOpts(), nil) if err == nil { err = st.Close() } @@ -2624,24 +2644,24 @@ func (s *StateSuite) TestOpenWithoutSetMongoPassword(c *gc.C) { info := statetesting.NewMongoInfo() info.Tag, info.Password = names.NewUserTag("arble"), "bar" - err := tryOpenState(s.modelTag, info) + err := tryOpenState(s.modelTag, s.State.ControllerTag(), info) c.Check(errors.Cause(err), jc.Satisfies, errors.IsUnauthorized) c.Check(err, gc.ErrorMatches, `cannot log in to admin database as "user-arble": unauthorized mongo access: .*`) info.Tag, info.Password = names.NewUserTag("arble"), "" - err = tryOpenState(s.modelTag, info) + err = tryOpenState(s.modelTag, s.State.ControllerTag(), info) c.Check(errors.Cause(err), jc.Satisfies, errors.IsUnauthorized) c.Check(err, gc.ErrorMatches, `cannot log in to admin database as "user-arble": unauthorized mongo access: .*`) info.Tag, info.Password = nil, "" - err = tryOpenState(s.modelTag, info) + err = tryOpenState(s.modelTag, s.State.ControllerTag(), info) c.Check(err, jc.ErrorIsNil) } func (s *StateSuite) TestOpenBadAddress(c *gc.C) { info := statetesting.NewMongoInfo() info.Addrs = []string{"0.1.2.3:1234"} - st, err := state.Open(testing.ModelTag, info, mongo.DialOpts{ + st, err := state.Open(testing.ModelTag, testing.ControllerTag, info, mongo.DialOpts{ Timeout: 1 * time.Millisecond, }, nil) if err == nil { @@ -2657,7 +2677,7 @@ info.Addrs = []string{"0.1.2.3:1234"} t0 := time.Now() - st, err := state.Open(testing.ModelTag, info, mongo.DialOpts{ + st, err := state.Open(testing.ModelTag, testing.ControllerTag, info, mongo.DialOpts{ Timeout: 1 * time.Millisecond, }, nil) if err == nil { @@ -2792,8 +2812,8 @@ c.Assert(e.Tag(), gc.Equals, env.Tag()) } else if kind == names.UserTagKind { // Test the fully qualified username rather than the tag structure itself. - expected := test.tag.(names.UserTag).Canonical() - c.Assert(e.Tag().(names.UserTag).Canonical(), gc.Equals, expected) + expected := test.tag.(names.UserTag).Id() + c.Assert(e.Tag().(names.UserTag).Id(), gc.Equals, expected) } else { c.Assert(e.Tag(), gc.Equals, test.tag) } @@ -2911,7 +2931,7 @@ } func (s *StateSuite) TestWatchCleanupsDiesOnStateClose(c *gc.C) { - testWatcherDiesWhenStateCloses(c, s.modelTag, func(c *gc.C, st *state.State) waiter { + testWatcherDiesWhenStateCloses(c, s.modelTag, s.State.ControllerTag(), func(c *gc.C, st *state.State) waiter { w := st.WatchCleanups() <-w.Changes() return w @@ -3034,7 +3054,7 @@ } func (s *StateSuite) TestWatchMinUnitsDiesOnStateClose(c *gc.C) { - testWatcherDiesWhenStateCloses(c, s.modelTag, func(c *gc.C, st *state.State) waiter { + testWatcherDiesWhenStateCloses(c, s.modelTag, s.State.ControllerTag(), func(c *gc.C, st *state.State) waiter { w := st.WatchMinUnits() <-w.Changes() return w @@ -3327,8 +3347,8 @@ // event, otherwise the watcher's initialisation logic may // interact with the closed state, causing it to return an // unexpected error (often "Closed explictly"). -func testWatcherDiesWhenStateCloses(c *gc.C, modelTag names.ModelTag, startWatcher func(c *gc.C, st *state.State) waiter) { - st, err := state.Open(modelTag, statetesting.NewMongoInfo(), mongotest.DialOpts(), nil) +func testWatcherDiesWhenStateCloses(c *gc.C, modelTag names.ModelTag, controllerTag names.ControllerTag, startWatcher func(c *gc.C, st *state.State) waiter) { + st, err := state.Open(modelTag, controllerTag, statetesting.NewMongoInfo(), mongotest.DialOpts(), nil) c.Assert(err, jc.ErrorIsNil) watcher := startWatcher(c, st) err = st.Close() @@ -3366,7 +3386,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(info, jc.DeepEquals, expected) - st, err := state.Open(s.modelTag, statetesting.NewMongoInfo(), mongotest.DialOpts(), nil) + st, err := state.Open(s.modelTag, s.State.ControllerTag(), statetesting.NewMongoInfo(), mongotest.DialOpts(), nil) c.Assert(err, jc.ErrorIsNil) defer st.Close() @@ -4015,7 +4035,7 @@ } func (s *StateSuite) TestNowToTheSecond(c *gc.C) { - t := state.NowToTheSecond() + t := s.State.NowToTheSecond() rounded := t.Round(time.Second) c.Assert(t, gc.DeepEquals, rounded) } @@ -4128,8 +4148,8 @@ } cfg := testing.ModelConfig(c) controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = cfg.UUID() st, err := state.Initialize(state.InitializeParams{ + Clock: clock.WallClock, ControllerConfig: controllerCfg, ControllerModelArgs: state.ModelArgs{ CloudName: "dummy", @@ -4158,7 +4178,7 @@ err = st.MongoSession().DB("admin").Login("admin", "foo") c.Assert(err, jc.ErrorIsNil) - err = tryOpenState(st.ModelTag(), noAuthInfo) + err = tryOpenState(st.ModelTag(), st.ControllerTag(), noAuthInfo) c.Check(errors.Cause(err), jc.Satisfies, errors.IsUnauthorized) // note: collections are set up in arbitrary order, proximate cause of // failure may differ. @@ -4172,6 +4192,6 @@ // creating users. There were some checks for unsetting the // password and then creating the state in an older version of // this test, but they couldn't be made to work with 3.2. - err = tryOpenState(st.ModelTag(), &passwordOnlyInfo) + err = tryOpenState(st.ModelTag(), st.ControllerTag(), &passwordOnlyInfo) c.Assert(err, jc.ErrorIsNil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/status_filesystem_test.go juju-core-2.0.0/src/github.com/juju/juju/state/status_filesystem_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/status_filesystem_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/status_filesystem_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,13 +4,12 @@ package state_test import ( - "time" - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/state" "github.com/juju/juju/status" + "github.com/juju/juju/testing" ) type FilesystemStatusSuite struct { @@ -53,16 +52,16 @@ func (s *FilesystemStatusSuite) checkInitialStatus(c *gc.C) { statusInfo, err := s.filesystem.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusPending) + c.Check(statusInfo.Status, gc.Equals, status.Pending) c.Check(statusInfo.Message, gc.Equals, "") c.Check(statusInfo.Data, gc.HasLen, 0) c.Check(statusInfo.Since, gc.NotNil) } func (s *FilesystemStatusSuite) TestSetErrorStatusWithoutInfo(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "", Since: &now, } @@ -73,7 +72,7 @@ } func (s *FilesystemStatusSuite) TestSetUnknownStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ Status: status.Status("vliegkat"), Message: "orville", @@ -86,9 +85,9 @@ } func (s *FilesystemStatusSuite) TestSetOverwritesData(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusAttaching, + Status: status.Attaching, Message: "blah", Data: map[string]interface{}{ "pew.pew": "zap", @@ -106,9 +105,9 @@ } func (s *FilesystemStatusSuite) checkGetSetStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusAttaching, + Status: status.Attaching, Message: "blah", Data: map[string]interface{}{ "$foo.bar.baz": map[string]interface{}{ @@ -125,7 +124,7 @@ statusInfo, err := filesystem.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusAttaching) + c.Check(statusInfo.Status, gc.Equals, status.Attaching) c.Check(statusInfo.Message, gc.Equals, "blah") c.Check(statusInfo.Data, jc.DeepEquals, map[string]interface{}{ "$foo.bar.baz": map[string]interface{}{ @@ -163,9 +162,9 @@ func (s *FilesystemStatusSuite) TestGetSetStatusGone(c *gc.C) { s.obliterateFilesystem(c, s.filesystem.FilesystemTag()) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusAttaching, + Status: status.Attaching, Message: "not really", Since: &now, } @@ -178,9 +177,9 @@ } func (s *FilesystemStatusSuite) TestSetStatusPendingUnprovisioned(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusPending, + Status: status.Pending, Message: "still", Since: &now, } @@ -193,9 +192,9 @@ FilesystemId: "fs-id", }) c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusPending, + Status: status.Pending, Message: "", Since: &now, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/status.go juju-core-2.0.0/src/github.com/juju/juju/state/status.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/status.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/status.go 2016-10-13 14:31:49.000000000 +0000 @@ -263,9 +263,8 @@ defer closer() // Status Record Age - // TODO(perrito666): 2016-04-26 lp:1558657 if maxHistoryTime > 0 { - t := time.Now().Add(-maxHistoryTime) + t := st.clock.Now().Add(-maxHistoryTime) _, err := history.RemoveAll(bson.D{ {"updated", bson.M{"$lt": t.UnixNano()}}, }) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/status_history_test.go juju-core-2.0.0/src/github.com/juju/juju/state/status_history_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/status_history_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/status_history_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,25 +6,31 @@ import ( "time" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/state" statetesting "github.com/juju/juju/state/testing" "github.com/juju/juju/status" + coretesting "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" ) type StatusHistorySuite struct { - statetesting.StateSuite + // TODO Migrate to StateSuite (with testing clock). + statetesting.StateWithWallClockSuite } var _ = gc.Suite(&StatusHistorySuite{}) func (s *StatusHistorySuite) TestPruneStatusHistoryBySize(c *gc.C) { + clock := testing.NewClock(coretesting.NonZeroTime()) + err := s.State.SetClockForTesting(clock) + c.Assert(err, jc.ErrorIsNil) service := s.Factory.MakeApplication(c, nil) unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: service}) - primeUnitStatusHistory(c, unit, 20000, 0) + state.PrimeUnitStatusHistory(c, clock, unit, status.Active, 20000, 1000, nil) history, err := unit.StatusHistory(status.StatusHistoryFilter{Size: 25000}) c.Assert(err, jc.ErrorIsNil) @@ -148,20 +154,20 @@ twoDaysAgo := now.Add(-twoDaysBack) threeDaysAgo := now.Add(-threeDaysBack) sInfo := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "current status", Since: &now, } err := unit.SetStatus(sInfo) c.Assert(err, jc.ErrorIsNil) sInfo = status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "2 days ago", Since: &twoDaysAgo, } unit.SetStatus(sInfo) sInfo = status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "3 days ago", Since: &threeDaysAgo, } @@ -170,7 +176,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(history, gc.HasLen, 4) c.Assert(history[0].Message, gc.Equals, "current status") - c.Assert(history[1].Message, gc.Equals, "Waiting for agent initialization to finish") + c.Assert(history[1].Message, gc.Equals, "waiting for machine") c.Assert(history[2].Message, gc.Equals, "2 days ago") c.Assert(history[3].Message, gc.Equals, "3 days ago") now = now.Add(10 * time.Second) // lets add some padding to prevent races here. @@ -181,7 +187,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(history, gc.HasLen, 2) c.Assert(history[0].Message, gc.Equals, "current status") - c.Assert(history[1].Message, gc.Equals, "Waiting for agent initialization to finish") + c.Assert(history[1].Message, gc.Equals, "waiting for machine") // logs up to one day back, using date. yesterday := now.Add(-(time.Hour * 24)) @@ -189,14 +195,14 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(history, gc.HasLen, 2) c.Assert(history[0].Message, gc.Equals, "current status") - c.Assert(history[1].Message, gc.Equals, "Waiting for agent initialization to finish") + c.Assert(history[1].Message, gc.Equals, "waiting for machine") // Logs up to two days ago, using delta. history, err = unit.StatusHistory(status.StatusHistoryFilter{Delta: &twoDaysBack}) c.Assert(err, jc.ErrorIsNil) c.Assert(history, gc.HasLen, 2) c.Assert(history[0].Message, gc.Equals, "current status") - c.Assert(history[1].Message, gc.Equals, "Waiting for agent initialization to finish") + c.Assert(history[1].Message, gc.Equals, "waiting for machine") // Logs up to two days ago, using date. @@ -204,14 +210,14 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(history, gc.HasLen, 2) c.Assert(history[0].Message, gc.Equals, "current status") - c.Assert(history[1].Message, gc.Equals, "Waiting for agent initialization to finish") + c.Assert(history[1].Message, gc.Equals, "waiting for machine") // Logs up to three days ago, using delta. history, err = unit.StatusHistory(status.StatusHistoryFilter{Delta: &threeDaysBack}) c.Assert(err, jc.ErrorIsNil) c.Assert(history, gc.HasLen, 3) c.Assert(history[0].Message, gc.Equals, "current status") - c.Assert(history[1].Message, gc.Equals, "Waiting for agent initialization to finish") + c.Assert(history[1].Message, gc.Equals, "waiting for machine") c.Assert(history[2].Message, gc.Equals, "2 days ago") // Logs up to three days ago, using date. @@ -219,6 +225,6 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(history, gc.HasLen, 3) c.Assert(history[0].Message, gc.Equals, "current status") - c.Assert(history[1].Message, gc.Equals, "Waiting for agent initialization to finish") + c.Assert(history[1].Message, gc.Equals, "waiting for machine") c.Assert(history[2].Message, gc.Equals, "2 days ago") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/status_machine_test.go juju-core-2.0.0/src/github.com/juju/juju/state/status_machine_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/status_machine_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/status_machine_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,13 +4,12 @@ package state_test import ( - "time" - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/state" "github.com/juju/juju/status" + "github.com/juju/juju/testing" ) type MachineStatusSuite struct { @@ -32,16 +31,16 @@ func (s *MachineStatusSuite) checkInitialStatus(c *gc.C) { statusInfo, err := s.machine.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusPending) + c.Check(statusInfo.Status, gc.Equals, status.Pending) c.Check(statusInfo.Message, gc.Equals, "") c.Check(statusInfo.Data, gc.HasLen, 0) c.Check(statusInfo.Since, gc.NotNil) } func (s *MachineStatusSuite) TestSetErrorStatusWithoutInfo(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "", Since: &now, } @@ -52,9 +51,9 @@ } func (s *MachineStatusSuite) TestSetDownStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusDown, + Status: status.Down, Message: "", Since: &now, } @@ -65,7 +64,7 @@ } func (s *MachineStatusSuite) TestSetUnknownStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ Status: status.Status("vliegkat"), Message: "orville", @@ -78,9 +77,9 @@ } func (s *MachineStatusSuite) TestSetOverwritesData(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusStarted, + Status: status.Started, Message: "blah", Data: map[string]interface{}{ "pew.pew": "zap", @@ -98,9 +97,9 @@ } func (s *MachineStatusSuite) checkGetSetStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusStarted, + Status: status.Started, Message: "blah", Data: map[string]interface{}{ "$foo.bar.baz": map[string]interface{}{ @@ -117,7 +116,7 @@ statusInfo, err := machine.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusStarted) + c.Check(statusInfo.Status, gc.Equals, status.Started) c.Check(statusInfo.Message, gc.Equals, "blah") c.Check(statusInfo.Data, jc.DeepEquals, map[string]interface{}{ "$foo.bar.baz": map[string]interface{}{ @@ -150,9 +149,9 @@ err = s.machine.Remove() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusStarted, + Status: status.Started, Message: "not really", Since: &now, } @@ -165,9 +164,9 @@ } func (s *MachineStatusSuite) TestSetStatusPendingProvisioned(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusPending, + Status: status.Pending, Message: "", Since: &now, } @@ -178,9 +177,9 @@ func (s *MachineStatusSuite) TestSetStatusPendingUnprovisioned(c *gc.C) { machine, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusPending, + Status: status.Pending, Message: "", Since: &now, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/status_model_test.go juju-core-2.0.0/src/github.com/juju/juju/state/status_model_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/status_model_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/status_model_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,13 +4,12 @@ package state_test import ( - "time" - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/state" "github.com/juju/juju/status" + "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" ) @@ -46,14 +45,14 @@ func (s *ModelStatusSuite) checkInitialStatus(c *gc.C) { statusInfo, err := s.model.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusAvailable) + c.Check(statusInfo.Status, gc.Equals, status.Available) c.Check(statusInfo.Message, gc.Equals, "") c.Check(statusInfo.Data, gc.HasLen, 0) c.Check(statusInfo.Since, gc.NotNil) } func (s *ModelStatusSuite) TestSetUnknownStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ Status: status.Status("vliegkat"), Message: "orville", @@ -66,9 +65,9 @@ } func (s *ModelStatusSuite) TestSetOverwritesData(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusAvailable, + Status: status.Available, Message: "blah", Data: map[string]interface{}{ "pew.pew": "zap", @@ -109,9 +108,9 @@ err = s.st.RemoveAllModelDocs() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusAvailable, + Status: status.Available, Message: "not really", Since: &now, } @@ -123,9 +122,9 @@ } func (s *ModelStatusSuite) checkGetSetStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusAvailable, + Status: status.Available, Message: "blah", Data: map[string]interface{}{ "$foo.bar.baz": map[string]interface{}{ @@ -141,7 +140,7 @@ statusInfo, err := model.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusAvailable) + c.Check(statusInfo.Status, gc.Equals, status.Available) c.Check(statusInfo.Message, gc.Equals, "blah") c.Check(statusInfo.Data, jc.DeepEquals, map[string]interface{}{ "$foo.bar.baz": map[string]interface{}{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/status_service_test.go juju-core-2.0.0/src/github.com/juju/juju/state/status_service_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/status_service_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/status_service_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,13 +4,14 @@ package state_test import ( - "time" + "time" // Only used for time types. jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/state" "github.com/juju/juju/status" + "github.com/juju/juju/testing" ) type ServiceStatusSuite struct { @@ -36,7 +37,7 @@ } func (s *ServiceStatusSuite) TestSetUnknownStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ Status: status.Status("vliegkat"), Message: "orville", @@ -49,9 +50,9 @@ } func (s *ServiceStatusSuite) TestSetOverwritesData(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "healthy", Data: map[string]interface{}{ "pew.pew": "zap", @@ -69,9 +70,9 @@ } func (s *ServiceStatusSuite) checkGetSetStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "healthy", Data: map[string]interface{}{ "$ping": map[string]interface{}{ @@ -88,7 +89,7 @@ statusInfo, err := service.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusActive) + c.Check(statusInfo.Status, gc.Equals, status.Active) c.Check(statusInfo.Message, gc.Equals, "healthy") c.Check(statusInfo.Data, jc.DeepEquals, map[string]interface{}{ "$ping": map[string]interface{}{ @@ -111,9 +112,9 @@ err := s.service.Destroy() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "not really", Since: &now, } @@ -126,9 +127,9 @@ } func (s *ServiceStatusSuite) TestSetStatusSince(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, Message: "", Since: &now, } @@ -143,7 +144,7 @@ // Setting the same status a second time also updates the timestamp. now = now.Add(1 * time.Second) sInfo = status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, Message: "", Since: &now, } @@ -163,7 +164,7 @@ addUnit := func(unitStatus status.Status) *state.Unit { unit, err := s.service.AddUnit() c.Assert(err, gc.IsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ Status: unitStatus, Message: "blam", @@ -173,19 +174,19 @@ c.Assert(err, gc.IsNil) return unit } - blockedUnit := addUnit(status.StatusBlocked) - waitingUnit := addUnit(status.StatusWaiting) - maintenanceUnit := addUnit(status.StatusMaintenance) - terminatedUnit := addUnit(status.StatusTerminated) - activeUnit := addUnit(status.StatusActive) - unknownUnit := addUnit(status.StatusUnknown) + blockedUnit := addUnit(status.Blocked) + waitingUnit := addUnit(status.Waiting) + maintenanceUnit := addUnit(status.Maintenance) + terminatedUnit := addUnit(status.Terminated) + activeUnit := addUnit(status.Active) + unknownUnit := addUnit(status.Unknown) // ...and create one with error status by setting it on the agent :-/. errorUnit, err := s.service.AddUnit() c.Assert(err, gc.IsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "blam", Since: &now, } @@ -207,28 +208,28 @@ err = unit.Remove() c.Assert(err, jc.ErrorIsNil) } - checkAndRemove(errorUnit, status.StatusError) - checkAndRemove(blockedUnit, status.StatusBlocked) - checkAndRemove(waitingUnit, status.StatusWaiting) - checkAndRemove(maintenanceUnit, status.StatusMaintenance) - checkAndRemove(terminatedUnit, status.StatusTerminated) - checkAndRemove(activeUnit, status.StatusActive) - checkAndRemove(unknownUnit, status.StatusUnknown) + checkAndRemove(errorUnit, status.Error) + checkAndRemove(blockedUnit, status.Blocked) + checkAndRemove(waitingUnit, status.Waiting) + checkAndRemove(maintenanceUnit, status.Maintenance) + checkAndRemove(terminatedUnit, status.Terminated) + checkAndRemove(activeUnit, status.Active) + checkAndRemove(unknownUnit, status.Unknown) } func (s *ServiceStatusSuite) TestServiceStatusOverridesDerivedStatus(c *gc.C) { unit, err := s.service.AddUnit() c.Assert(err, gc.IsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusBlocked, + Status: status.Blocked, Message: "pow", Since: &now, } err = unit.SetStatus(sInfo) c.Assert(err, gc.IsNil) sInfo = status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, Message: "zot", Since: &now, } @@ -237,5 +238,5 @@ info, err := s.service.Status() c.Check(err, jc.ErrorIsNil) - c.Check(info.Status, gc.Equals, status.StatusMaintenance) + c.Check(info.Status, gc.Equals, status.Maintenance) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/status_unitagent_test.go juju-core-2.0.0/src/github.com/juju/juju/state/status_unitagent_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/status_unitagent_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/status_unitagent_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,13 +4,14 @@ package state_test import ( - "time" + "time" // Only used for time types. jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/state" "github.com/juju/juju/status" + "github.com/juju/juju/testing" ) type StatusUnitAgentSuite struct { @@ -38,7 +39,7 @@ } func (s *StatusUnitAgentSuite) TestSetUnknownStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ Status: status.Status("vliegkat"), Message: "orville", @@ -51,9 +52,9 @@ } func (s *StatusUnitAgentSuite) TestSetErrorStatusWithoutInfo(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "", Since: &now, } @@ -64,9 +65,9 @@ } func (s *StatusUnitAgentSuite) TestSetOverwritesData(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "something", Data: map[string]interface{}{ "pew.pew": "zap", @@ -84,9 +85,9 @@ } func (s *StatusUnitAgentSuite) checkGetSetStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "something", Data: map[string]interface{}{ "$foo": "bar", @@ -106,7 +107,7 @@ statusInfo, err := agent.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusIdle) + c.Check(statusInfo.Status, gc.Equals, status.Idle) c.Check(statusInfo.Message, gc.Equals, "something") c.Check(statusInfo.Data, jc.DeepEquals, map[string]interface{}{ "$foo": "bar", @@ -143,9 +144,9 @@ err := s.unit.Destroy() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "not really", Since: &now, } @@ -158,9 +159,9 @@ } func (s *StatusUnitAgentSuite) TestGetSetErrorStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "test-hook failed", Data: map[string]interface{}{ "foo": "bar", @@ -173,7 +174,7 @@ // Agent error is reported as unit error. statusInfo, err := s.unit.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusError) + c.Check(statusInfo.Status, gc.Equals, status.Error) c.Check(statusInfo.Message, gc.Equals, "test-hook failed") c.Check(statusInfo.Data, gc.DeepEquals, map[string]interface{}{ "foo": "bar", @@ -182,7 +183,7 @@ // For agents, error is reported as idle. statusInfo, err = s.agent.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusIdle) + c.Check(statusInfo.Status, gc.Equals, status.Idle) c.Check(statusInfo.Message, gc.Equals, "") c.Check(statusInfo.Data, gc.HasLen, 0) } @@ -192,9 +193,9 @@ } func (s *StatusUnitAgentSuite) TestSetAgentStatusSince(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -209,7 +210,7 @@ // Setting the same status a second time also updates the timestamp. now = now.Add(1 * time.Second) sInfo = status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/status_unit_test.go juju-core-2.0.0/src/github.com/juju/juju/state/status_unit_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/status_unit_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/status_unit_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,13 +4,14 @@ package state_test import ( - "time" + "time" // Only used for time types. jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/state" "github.com/juju/juju/status" + "github.com/juju/juju/testing" ) type UnitStatusSuite struct { @@ -36,7 +37,7 @@ } func (s *UnitStatusSuite) TestSetUnknownStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ Status: status.Status("vliegkat"), Message: "orville", @@ -49,9 +50,9 @@ } func (s *UnitStatusSuite) TestSetOverwritesData(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "healthy", Data: map[string]interface{}{ "pew.pew": "zap", @@ -69,9 +70,9 @@ } func (s *UnitStatusSuite) checkGetSetStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "healthy", Data: map[string]interface{}{ "$ping": map[string]interface{}{ @@ -87,7 +88,7 @@ statusInfo, err := unit.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusActive) + c.Check(statusInfo.Status, gc.Equals, status.Active) c.Check(statusInfo.Message, gc.Equals, "healthy") c.Check(statusInfo.Data, jc.DeepEquals, map[string]interface{}{ "$ping": map[string]interface{}{ @@ -122,9 +123,9 @@ err := s.unit.Destroy() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "not really", Since: &now, } @@ -137,9 +138,9 @@ } func (s *UnitStatusSuite) TestSetUnitStatusSince(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, Message: "", Since: &now, } @@ -154,7 +155,7 @@ // Setting the same status a second time also updates the timestamp. now = now.Add(1 * time.Second) sInfo = status.StatusInfo{ - Status: status.StatusMaintenance, + Status: status.Maintenance, Message: "", Since: &now, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/status_util_test.go juju-core-2.0.0/src/github.com/juju/juju/state/status_util_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/status_util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/status_util_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -43,40 +43,40 @@ } func checkInitialWorkloadStatus(c *gc.C, statusInfo status.StatusInfo) { - c.Check(statusInfo.Status, gc.Equals, status.StatusUnknown) - c.Check(statusInfo.Message, gc.Equals, "Waiting for agent initialization to finish") + c.Check(statusInfo.Status, gc.Equals, status.Waiting) + c.Check(statusInfo.Message, gc.Equals, "waiting for machine") c.Check(statusInfo.Data, gc.HasLen, 0) c.Check(statusInfo.Since, gc.NotNil) } func primeUnitStatusHistory(c *gc.C, unit *state.Unit, count int, delta time.Duration) { - primeStatusHistory(c, unit, status.StatusActive, count, func(i int) map[string]interface{} { + primeStatusHistory(c, unit, status.Active, count, func(i int) map[string]interface{} { return map[string]interface{}{"$foo": i, "$delta": delta} }, delta) } func checkPrimedUnitStatus(c *gc.C, statusInfo status.StatusInfo, expect int, expectDelta time.Duration) { - c.Check(statusInfo.Status, gc.Equals, status.StatusActive) + c.Check(statusInfo.Status, gc.Equals, status.Active) c.Check(statusInfo.Message, gc.Equals, "") c.Check(statusInfo.Data, jc.DeepEquals, map[string]interface{}{"$foo": expect, "$delta": int64(expectDelta)}) c.Check(statusInfo.Since, gc.NotNil) } func checkInitialUnitAgentStatus(c *gc.C, statusInfo status.StatusInfo) { - c.Check(statusInfo.Status, gc.Equals, status.StatusAllocating) + c.Check(statusInfo.Status, gc.Equals, status.Allocating) c.Check(statusInfo.Message, gc.Equals, "") c.Check(statusInfo.Data, gc.HasLen, 0) c.Assert(statusInfo.Since, gc.NotNil) } func primeUnitAgentStatusHistory(c *gc.C, agent *state.UnitAgent, count int, delta time.Duration) { - primeStatusHistory(c, agent, status.StatusExecuting, count, func(i int) map[string]interface{} { + primeStatusHistory(c, agent, status.Executing, count, func(i int) map[string]interface{} { return map[string]interface{}{"$bar": i, "$delta": delta} }, delta) } func checkPrimedUnitAgentStatus(c *gc.C, statusInfo status.StatusInfo, expect int, expectDelta time.Duration) { - c.Check(statusInfo.Status, gc.Equals, status.StatusExecuting) + c.Check(statusInfo.Status, gc.Equals, status.Executing) c.Check(statusInfo.Message, gc.Equals, "") c.Check(statusInfo.Data, jc.DeepEquals, map[string]interface{}{"$bar": expect, "$delta": int64(expectDelta)}) c.Check(statusInfo.Since, gc.NotNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/status_volume_test.go juju-core-2.0.0/src/github.com/juju/juju/state/status_volume_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/status_volume_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/status_volume_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,13 +4,12 @@ package state_test import ( - "time" - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/state" "github.com/juju/juju/status" + "github.com/juju/juju/testing" ) type VolumeStatusSuite struct { @@ -53,16 +52,16 @@ func (s *VolumeStatusSuite) checkInitialStatus(c *gc.C) { statusInfo, err := s.volume.Status() c.Check(err, jc.ErrorIsNil) - c.Check(statusInfo.Status, gc.Equals, status.StatusPending) + c.Check(statusInfo.Status, gc.Equals, status.Pending) c.Check(statusInfo.Message, gc.Equals, "") c.Check(statusInfo.Data, gc.HasLen, 0) c.Check(statusInfo.Since, gc.NotNil) } func (s *VolumeStatusSuite) TestSetErrorStatusWithoutInfo(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "", Since: &now, } @@ -73,7 +72,7 @@ } func (s *VolumeStatusSuite) TestSetUnknownStatus(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ Status: status.Status("vliegkat"), Message: "orville", @@ -86,9 +85,9 @@ } func (s *VolumeStatusSuite) TestSetOverwritesData(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusAttaching, + Status: status.Attaching, Message: "blah", Data: map[string]interface{}{ "pew.pew": "zap", @@ -98,13 +97,13 @@ err := s.volume.SetStatus(sInfo) c.Check(err, jc.ErrorIsNil) - s.checkGetSetStatus(c, status.StatusAttaching) + s.checkGetSetStatus(c, status.Attaching) } func (s *VolumeStatusSuite) TestGetSetStatusAlive(c *gc.C) { validStatuses := []status.Status{ - status.StatusAttaching, status.StatusAttached, status.StatusDetaching, - status.StatusDetached, status.StatusDestroying, + status.Attaching, status.Attached, status.Detaching, + status.Detached, status.Destroying, } for _, status := range validStatuses { s.checkGetSetStatus(c, status) @@ -112,7 +111,7 @@ } func (s *VolumeStatusSuite) checkGetSetStatus(c *gc.C, volumeStatus status.Status) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ Status: volumeStatus, Message: "blah", @@ -145,7 +144,7 @@ err := s.State.DestroyVolume(s.volume.VolumeTag()) c.Assert(err, jc.ErrorIsNil) - s.checkGetSetStatus(c, status.StatusAttaching) + s.checkGetSetStatus(c, status.Attaching) } func (s *VolumeStatusSuite) TestGetSetStatusDead(c *gc.C) { @@ -163,15 +162,15 @@ // NOTE: it would be more technically correct to reject status updates // while Dead, but it's easier and clearer, not to mention more efficient, // to just depend on status doc existence. - s.checkGetSetStatus(c, status.StatusAttaching) + s.checkGetSetStatus(c, status.Attaching) } func (s *VolumeStatusSuite) TestGetSetStatusGone(c *gc.C) { s.obliterateVolume(c, s.volume.VolumeTag()) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusAttaching, + Status: status.Attaching, Message: "not really", Since: &now, } @@ -184,9 +183,9 @@ } func (s *VolumeStatusSuite) TestSetStatusPendingUnprovisioned(c *gc.C) { - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusPending, + Status: status.Pending, Message: "still", Since: &now, } @@ -199,9 +198,9 @@ VolumeId: "vol-ume", }) c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := testing.ZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusPending, + Status: status.Pending, Message: "", Since: &now, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/storage.go juju-core-2.0.0/src/github.com/juju/juju/state/storage.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/storage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/storage.go 2016-10-13 14:31:49.000000000 +0000 @@ -33,7 +33,7 @@ // Kind returns the storage instance kind. Kind() StorageKind - // Owner returns the tag of the service or unit that owns this storage + // Owner returns the tag of the application or unit that owns this storage // instance. Owner() names.Tag @@ -44,9 +44,6 @@ // Life reports whether the storage instance is Alive, Dying or Dead. Life() Life - - // CharmURL returns the charm URL that this storage instance was created with. - CharmURL() *charm.URL } // StorageAttachment represents the state of a unit's attachment to a storage @@ -80,6 +77,31 @@ doc storageInstanceDoc } +// String returns a human readable string represting the type. +func (k StorageKind) String() string { + switch k { + case StorageKindBlock: + return "block" + case StorageKindFilesystem: + return "filesystem" + default: + return "unknown" + } +} + +// parseStorageKind is used by the migration code to go from the +// string representation back to the enum. +func parseStorageKind(value string) StorageKind { + switch value { + case "block": + return StorageKindBlock + case "filesystem": + return StorageKindFilesystem + default: + return StorageKindUnknown + } +} + func (s *storageInstance) Tag() names.Tag { return s.StorageTag() } @@ -110,9 +132,11 @@ return s.doc.Life } -// CharmURL returns the charm URL that this storage instance was created with. -func (s *storageInstance) CharmURL() *charm.URL { - return s.doc.CharmURL +// entityStorageRefcountKey returns a key for refcounting charm storage +// for a specific entity. Each time a storage instance is created, the +// named store's refcount is incremented; and decremented when removed. +func entityStorageRefcountKey(owner names.Tag, storageName string) string { + return fmt.Sprintf("storage#%s#%s", owner.String(), storageName) } // storageInstanceDoc describes a charm storage instance. @@ -126,7 +150,6 @@ Owner string `bson:"owner"` StorageName string `bson:"storagename"` AttachmentCount int `bson:"attachmentcount"` - CharmURL *charm.URL `bson:"charmurl"` } type storageAttachment struct { @@ -241,7 +264,7 @@ // remove the storage instance immediately. hasNoAttachments := bson.D{{"attachmentcount", 0}} assert := append(hasNoAttachments, isAliveDoc...) - return removeStorageInstanceOps(st, s.StorageTag(), assert) + return removeStorageInstanceOps(st, s.Owner(), s.StorageTag(), assert) } // There are still attachments: the storage instance will be removed // when the last attachment is removed. We schedule a cleanup to destroy @@ -252,7 +275,7 @@ } update := bson.D{{"$set", bson.D{{"life", Dying}}}} ops := []txn.Op{ - st.newCleanupOp(cleanupAttachmentsForDyingStorage, s.doc.Id), + newCleanupOp(cleanupAttachmentsForDyingStorage, s.doc.Id), { C: storageInstancesC, Id: s.doc.Id, @@ -267,9 +290,11 @@ // tag from state, if the specified assertions hold true. func removeStorageInstanceOps( st *State, + owner names.Tag, tag names.StorageTag, assert bson.D, ) ([]txn.Op, error) { + ops := []txn.Op{{ C: storageInstancesC, Id: tag.Id(), @@ -311,9 +336,31 @@ } else if !errors.IsNotFound(err) { return nil, errors.Trace(err) } + + // Decrement the charm storage reference count. + refcounts, closer := st.getCollection(refcountsC) + defer closer() + storageName, err := names.StorageName(tag.Id()) + if err != nil { + return nil, errors.Trace(err) + } + storageRefcountKey := entityStorageRefcountKey(owner, storageName) + decRefOp, _, err := nsRefcounts.DyingDecRefOp(refcounts, storageRefcountKey) + if err != nil { + return nil, errors.Trace(err) + } + ops = append(ops, decRefOp) + return ops, nil } +// machineAssignable is used by createStorageOps to determine what machine +// storage needs to be created. This is implemented by Unit. +type machineAssignable interface { + machine() (*Machine, error) + noAssignedMachineOp() txn.Op +} + // createStorageOps returns txn.Ops for creating storage instances // and attachments for the newly created unit or service. // @@ -329,14 +376,17 @@ // instances to be created, keyed on the storage name. These constraints // will be correlated with the charm storage metadata for validation // and supplementing. +// +// maybeMachineAssignable may be nil, or an machineAssignable which +// describes the entity's machine assignment. If the entity is assigned +// to a machine, then machine storage will be created. func createStorageOps( st *State, - entity names.Tag, + entityTag names.Tag, charmMeta *charm.Meta, - curl *charm.URL, cons map[string]StorageConstraints, series string, - machineOpsNeeded bool, + maybeMachineAssignable machineAssignable, ) (ops []txn.Op, numStorageAttachments int, err error) { type template struct { @@ -346,12 +396,12 @@ } createdShared := false - switch entity := entity.(type) { + switch entityTag := entityTag.(type) { case names.ApplicationTag: createdShared = true case names.UnitTag: default: - return nil, -1, errors.Errorf("expected application or unit tag, got %T", entity) + return nil, -1, errors.Errorf("expected application or unit tag, got %T", entityTag) } // Create storage instances in order of name, to simplify testing. @@ -367,6 +417,9 @@ if !ok { return nil, -1, errors.NotFoundf("charm storage %q", store) } + if cons.Count == 0 { + continue + } if createdShared != charmStorage.Shared { // services only get shared storage instances, // units only get non-shared storage instances. @@ -379,9 +432,12 @@ }) } - ops = make([]txn.Op, 0, len(templates)*2) + refcounts, closer := st.getCollection(refcountsC) + defer closer() + + ops = make([]txn.Op, 0, len(templates)*3) for _, t := range templates { - owner := entity.String() + owner := entityTag.String() var kind StorageKind switch t.meta.Type { case charm.StorageBlock: @@ -392,6 +448,17 @@ return nil, -1, errors.Errorf("unknown storage type %q", t.meta.Type) } + // Increment reference counts for the named storage for each + // instance we create. We'll use the reference counts to ensure + // we don't exceed limits when adding storage, and for + // maintaining model integrity during charm upgrades. + storageRefcountKey := entityStorageRefcountKey(entityTag, t.storageName) + incRefOp, err := nsRefcounts.CreateOrIncRefOp(refcounts, storageRefcountKey, int(t.cons.Count)) + if err != nil { + return nil, -1, errors.Trace(err) + } + ops = append(ops, incRefOp) + for i := uint64(0); i < t.cons.Count; i++ { id, err := newStorageInstanceId(st, t.storageName) if err != nil { @@ -402,13 +469,27 @@ Kind: kind, Owner: owner, StorageName: t.storageName, - CharmURL: curl, } - if unit, ok := entity.(names.UnitTag); ok { + var machineOps []txn.Op + if unitTag, ok := entityTag.(names.UnitTag); ok { doc.AttachmentCount = 1 storage := names.NewStorageTag(id) - ops = append(ops, createStorageAttachmentOp(storage, unit)) + ops = append(ops, createStorageAttachmentOp(storage, unitTag)) numStorageAttachments++ + + if maybeMachineAssignable != nil { + var err error + machineOps, err = unitAssignedMachineStorageOps( + st, unitTag, charmMeta, cons, series, + &storageInstance{st, *doc}, + maybeMachineAssignable, + ) + if err != nil { + return nil, -1, errors.Annotatef( + err, "creating machine storage for storage %s", id, + ) + } + } } ops = append(ops, txn.Op{ C: storageInstancesC, @@ -416,19 +497,7 @@ Assert: txn.DocMissing, Insert: doc, }) - if machineOpsNeeded { - machineOps, err := unitAssignedMachineStorageOps( - st, entity, charmMeta, cons, series, - &storageInstance{st, *doc}, - ) - if err == nil { - ops = append(ops, machineOps...) - } else if !errors.IsNotAssigned(err) { - return nil, -1, errors.Annotatef( - err, "creating machine storage for storage %s", id, - ) - } - } + ops = append(ops, machineOps...) } } @@ -445,31 +514,33 @@ // unitAssignedMachineStorageOps returns ops for creating volumes, filesystems // and their attachments to the machine that the specified unit is assigned to, // corresponding to the specified storage instance. +// +// If the unit is not assigned to a machine, then ops will be returned to assert +// this, and no error will be returned. func unitAssignedMachineStorageOps( st *State, - entity names.Tag, + unitTag names.UnitTag, charmMeta *charm.Meta, cons map[string]StorageConstraints, series string, storage StorageInstance, + machineAssignable machineAssignable, ) (ops []txn.Op, err error) { - tag, ok := entity.(names.UnitTag) - if !ok { - return nil, errors.NotSupportedf("dynamic creation of shared storage") - } storageParams, err := machineStorageParamsForStorageInstance( - st, charmMeta, tag, series, cons, storage, + st, charmMeta, unitTag, series, cons, storage, ) if err != nil { return nil, errors.Trace(err) } - u, err := st.Unit(tag.Id()) - if err != nil { - return nil, errors.Trace(err) - } - m, err := u.machine() + m, err := machineAssignable.machine() if err != nil { + if errors.IsNotAssigned(err) { + // The unit is not assigned to a machine; return + // txn.Op that ensures that this remains the case + // until the transaction is committed. + return []txn.Op{machineAssignable.noAssignedMachineOp()}, nil + } return nil, errors.Trace(err) } @@ -679,7 +750,9 @@ // Either the storage instance is dying, or its owner // is a unit; in either case, no more attachments can // be added to the instance, so it can be removed. - siOps, err := removeStorageInstanceOps(st, si.StorageTag(), hasLastRef) + siOps, err := removeStorageInstanceOps( + st, si.Owner(), si.StorageTag(), hasLastRef, + ) if err != nil { return nil, errors.Trace(err) } @@ -725,13 +798,14 @@ if err != nil { return nil, errors.Annotatef(err, "cannot get storage instances for %s", owner) } - ops := make([]txn.Op, len(docs)) - for i, doc := range docs { - ops[i] = txn.Op{ - C: storageInstancesC, - Id: doc.Id, - Remove: true, + ops := make([]txn.Op, 0, len(docs)) + for _, doc := range docs { + tag := names.NewStorageTag(doc.Id) + storageInstanceOps, err := removeStorageInstanceOps(st, owner, tag, nil) + if err != nil { + return nil, errors.Trace(err) } + ops = append(ops, storageInstanceOps...) } return ops, nil } @@ -768,6 +842,15 @@ } } +func replaceStorageConstraintsOp(key string, cons map[string]StorageConstraints) txn.Op { + return txn.Op{ + C: storageConstraintsC, + Id: key, + Assert: txn.DocExists, + Update: bson.D{{"$set", bson.D{{"constraints", cons}}}}, + } +} + func removeStorageConstraintsOp(key string) txn.Op { return txn.Op{ C: storageConstraintsC, @@ -783,7 +866,7 @@ var doc storageConstraintsDoc err := coll.FindId(key).One(&doc) if err == mgo.ErrNotFound { - return nil, nil + return nil, errors.NotFoundf("storage constraints for %q", key) } if err != nil { return nil, errors.Annotatef(err, "cannot get storage constraints for %q", key) @@ -1049,11 +1132,11 @@ } // AddStorageForUnit adds storage instances to given unit as specified. -// Missing storage constraints are populated -// based on model defaults. Storage store name is used to retrieve -// existing storage instances for this store. -// Combination of existing storage instances and -// anticipated additional storage instances is validated against storage +// +// Missing storage constraints are populated based on model defaults. +// Storage store name is used to retrieve existing storage instances +// for this store. Combination of existing storage instances and +// anticipated additional storage instances is validated against the // store as specified in the charm. func (st *State) AddStorageForUnit( tag names.UnitTag, name string, cons StorageConstraints, @@ -1062,151 +1145,149 @@ if err != nil { return errors.Trace(err) } - - s, err := u.Application() - if err != nil { - return errors.Annotatef(err, "getting service for unit %v", u.Tag().Id()) + buildTxn := func(attempt int) ([]txn.Op, error) { + if attempt > 0 { + if err := u.Refresh(); err != nil { + return nil, errors.Trace(err) + } + } + return st.addStorageForUnitOps(u, name, cons) } - ch, _, err := s.Charm() - if err != nil { - return errors.Annotatef(err, "getting charm for unit %q", u.Tag().Id()) + if err := st.run(buildTxn); err != nil { + return errors.Annotatef(err, "adding storage to unit %s", u) } - - return st.addStorageForUnit(ch, u, name, cons) + return nil } // addStorage adds storage instances to given unit as specified. -func (st *State) addStorageForUnit( - ch *Charm, u *Unit, - name string, cons StorageConstraints, -) error { - all, err := u.StorageConstraints() - if err != nil { - return errors.Annotatef(err, "getting existing storage directives for %s", u.Tag().Id()) +func (st *State) addStorageForUnitOps( + u *Unit, + storageName string, + cons StorageConstraints, +) ([]txn.Op, error) { + if u.Life() != Alive { + return nil, unitNotAliveErr } - // Check storage name was declared. - _, exists := all[name] - if !exists { - return errors.NotFoundf("charm storage %q", name) + // Storage addition is based on the charm metadata, so make sure that + // the charm URL for the unit or application does not change during + // the transaction. If the unit does not have a charm URL set yet, + // then we use the application's charm URL. + ops := []txn.Op{{ + C: unitsC, + Id: u.doc.Name, + Assert: bson.D{{"charmurl", u.doc.CharmURL}}, + }} + curl, ok := u.CharmURL() + if !ok { + a, err := u.Application() + if err != nil { + return nil, errors.Annotatef(err, "getting application for unit %v", u.doc.Name) + } + curl = a.doc.CharmURL + ops = append(ops, txn.Op{ + C: applicationsC, + Id: a.doc.Name, + Assert: bson.D{{"charmurl", curl}}, + }) + } + ch, err := st.Charm(curl) + if err != nil { + return nil, errors.Trace(err) + } + charmMeta := ch.Meta() + charmStorageMeta, ok := charmMeta.Storage[storageName] + if !ok { + return nil, errors.NotFoundf("charm storage %q", storageName) } // Populate missing configuration parameters with default values. - conf, err := st.ModelConfig() + modelConfig, err := st.ModelConfig() if err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } completeCons, err := storageConstraintsWithDefaults( - conf, - ch.Meta().Storage[name], - name, cons, + modelConfig, + charmStorageMeta, + storageName, + cons, ) if err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } // This can happen for charm stores that specify instances range from 0, // and no count was specified at deploy as storage constraints for this store, // and no count was specified to storage add as a contraint either. if cons.Count == 0 { - return errors.NotValidf("adding storage where instance count is 0") + return nil, errors.NotValidf("adding storage where instance count is 0") } - buildTxn := func(attempt int) ([]txn.Op, error) { - if attempt > 0 { - if err := u.Refresh(); err != nil { - return nil, errors.Trace(err) - } - } - if u.Life() != Alive { - return nil, unitNotAliveErr - } - err = st.validateUnitStorage(ch.Meta(), u, name, completeCons) - if err != nil { - return nil, errors.Trace(err) - } - ops, err := st.constructAddUnitStorageOps(ch, u, name, completeCons) - if err != nil { - return nil, errors.Trace(err) - } - return ops, nil - } - if err := st.run(buildTxn); err != nil { - return errors.Annotatef(err, "adding storage to unit %s", u) + addUnitStorageOps, err := st.addUnitStorageOps(charmMeta, u, storageName, completeCons, -1) + if err != nil { + return nil, errors.Trace(err) } - return nil + ops = append(ops, addUnitStorageOps...) + return ops, nil } -func (st *State) validateUnitStorage( - charmMeta *charm.Meta, u *Unit, name string, cons StorageConstraints, -) error { - // Storage directive may provide storage instance count - // which combined with existing storage instance may exceed - // number of storage instances specified by charm. - // We must take it into account when validating. - currentCount, err := st.countEntityStorageInstancesForName(u.Tag(), name) +// addUnitStorageOps returns transaction ops to create storage for the given +// unit. If countMin is non-negative, the Count field of the constraints will +// be ignored, and as many storage instances as necessary to make up the +// shortfall will be created. +func (st *State) addUnitStorageOps( + charmMeta *charm.Meta, + u *Unit, + storageName string, + cons StorageConstraints, + countMin int, +) ([]txn.Op, error) { + currentCountOp, currentCount, err := st.countEntityStorageInstances(u.Tag(), storageName) if err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) + } + ops := []txn.Op{currentCountOp} + if countMin >= 0 { + if currentCount >= countMin { + return ops, nil + } + cons.Count = uint64(countMin - currentCount) } - cons.Count = cons.Count + currentCount - err = validateStorageConstraintsAgainstCharm( - st, - map[string]StorageConstraints{name: cons}, - charmMeta) - if err != nil { - return errors.Trace(err) + consTotal := cons + consTotal.Count += uint64(currentCount) + if err := validateStorageConstraintsAgainstCharm(st, + map[string]StorageConstraints{storageName: consTotal}, + charmMeta, + ); err != nil { + return nil, errors.Trace(err) } - return nil -} -func (st *State) constructAddUnitStorageOps( - ch *Charm, u *Unit, name string, cons StorageConstraints, -) ([]txn.Op, error) { // Create storage db operations storageOps, _, err := createStorageOps( st, u.Tag(), - ch.Meta(), - ch.URL(), - map[string]StorageConstraints{name: cons}, + charmMeta, + map[string]StorageConstraints{storageName: cons}, u.Series(), - true, // create machine storage + u, ) if err != nil { return nil, errors.Trace(err) } - - // Update storage attachment count. - priorCount := u.doc.StorageAttachmentCount - newCount := priorCount + int(cons.Count) - - attachmentsUnchanged := bson.D{{"storageattachmentcount", priorCount}} - ops := []txn.Op{{ + ops = append(ops, txn.Op{ C: unitsC, Id: u.doc.DocID, - Assert: append(attachmentsUnchanged, isAliveDoc...), - Update: bson.D{{"$set", - bson.D{{"storageattachmentcount", newCount}}}}, - }} + Assert: isAliveDoc, + Update: bson.D{{"$inc", + bson.D{{"storageattachmentcount", int(cons.Count)}}}}, + }) return append(ops, storageOps...), nil } -func (st *State) countEntityStorageInstancesForName( - tag names.Tag, - name string, -) (uint64, error) { - storageCollection, closer := st.getCollection(storageInstancesC) +func (st *State) countEntityStorageInstances(owner names.Tag, name string) (txn.Op, int, error) { + refcounts, closer := st.getCollection(refcountsC) defer closer() - criteria := bson.D{{ - "$and", []bson.D{ - bson.D{{"owner", tag.String()}}, - bson.D{{"storagename", name}}, - }, - }} - result, err := storageCollection.Find(criteria).Count() - if err != nil { - return 0, err - } - return uint64(result), err + key := entityStorageRefcountKey(owner, name) + return nsRefcounts.CurrentOp(refcounts, key) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/storage_test.go juju-core-2.0.0/src/github.com/juju/juju/state/storage_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/storage_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/storage_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -25,12 +25,6 @@ "github.com/juju/juju/testing/factory" ) -type StorageStateSuite struct { - StorageStateSuiteBase -} - -var _ = gc.Suite(&StorageStateSuite{}) - type StorageStateSuiteBase struct { ConnSuite } @@ -110,7 +104,7 @@ return s.AddTestingServiceWithStorage(c, "storage-"+kind+"2", ch, storageCons) } -func (s *StorageStateSuite) storageInstanceExists(c *gc.C, tag names.StorageTag) bool { +func (s *StorageStateSuiteBase) storageInstanceExists(c *gc.C, tag names.StorageTag) bool { _, err := state.TxnRevno( s.State, state.StorageInstancesC, @@ -335,6 +329,12 @@ return state.StorageConstraints{Pool: pool, Size: size, Count: count} } +type StorageStateSuite struct { + StorageStateSuiteBase +} + +var _ = gc.Suite(&StorageStateSuite{}) + func (s *StorageStateSuite) TestAddServiceStorageConstraintsDefault(c *gc.C) { ch := s.AddTestingCharm(c, "storage-block") storageBlock, err := s.State.AddApplication(state.AddApplicationArgs{Name: "storage-block", Charm: ch}) @@ -524,7 +524,6 @@ c.Assert(err, jc.ErrorIsNil) count[storageInstance.StorageName()]++ c.Assert(storageInstance.Kind(), gc.Equals, state.StorageKindBlock) - c.Assert(storageInstance.CharmURL(), gc.DeepEquals, ch.URL()) } c.Assert(count, gc.DeepEquals, map[string]int{ "multi1to10": 1, @@ -943,3 +942,90 @@ // - StorageInstance without attachments is removed by Destroy // - concurrent add-unit and StorageAttachment removal does not // remove storage instance. + +type StorageSubordinateStateSuite struct { + StorageStateSuiteBase + + mysql *state.Application + mysqlUnit *state.Unit + mysqlRelunit *state.RelationUnit + subordinateApplication *state.Application + relation *state.Relation +} + +var _ = gc.Suite(&StorageSubordinateStateSuite{}) + +func (s *StorageSubordinateStateSuite) SetUpTest(c *gc.C) { + s.StorageStateSuiteBase.SetUpTest(c) + + var err error + storageCharm := s.AddTestingCharm(c, "storage-filesystem-subordinate") + s.subordinateApplication = s.AddTestingService(c, "storage-filesystem-subordinate", storageCharm) + s.mysql = s.AddTestingService(c, "mysql", s.AddTestingCharm(c, "mysql")) + s.mysqlUnit, err = s.mysql.AddUnit() + c.Assert(err, jc.ErrorIsNil) + + eps, err := s.State.InferEndpoints("mysql", "storage-filesystem-subordinate") + c.Assert(err, jc.ErrorIsNil) + s.relation, err = s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + s.mysqlRelunit, err = s.relation.Unit(s.mysqlUnit) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *StorageSubordinateStateSuite) TestSubordinateStoragePrincipalUnassigned(c *gc.C) { + storageTag := names.NewStorageTag("data/0") + exists := s.storageInstanceExists(c, storageTag) + c.Assert(exists, jc.IsFalse) + + err := s.mysqlRelunit.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + + // The subordinate unit will have been created, along with its storage. + exists = s.storageInstanceExists(c, storageTag) + c.Assert(exists, jc.IsTrue) + + // The principal unit is not yet assigned to a machine, so there should + // be no filesystem associated with the storage instance yet. + _, err = s.State.StorageInstanceFilesystem(storageTag) + c.Assert(err, jc.Satisfies, errors.IsNotFound) + + // Assigning the principal unit to a machine should cause the subordinate + // unit's machine storage to be created. + err = s.State.AssignUnit(s.mysqlUnit, state.AssignCleanEmpty) + c.Assert(err, jc.ErrorIsNil) + _ = s.storageInstanceFilesystem(c, storageTag) +} + +func (s *StorageSubordinateStateSuite) TestSubordinateStoragePrincipalAssigned(c *gc.C) { + err := s.State.AssignUnit(s.mysqlUnit, state.AssignCleanEmpty) + c.Assert(err, jc.ErrorIsNil) + + err = s.mysqlRelunit.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + + // The subordinate unit will have been created, along with its storage. + storageTag := names.NewStorageTag("data/0") + exists := s.storageInstanceExists(c, storageTag) + c.Assert(exists, jc.IsTrue) + + // The principal unit was assigned to a machine when the subordinate + // unit was created, so there should be a filesystem associated with + // the storage instance now. + _ = s.storageInstanceFilesystem(c, storageTag) +} + +func (s *StorageSubordinateStateSuite) TestSubordinateStoragePrincipalAssignRace(c *gc.C) { + // Add the subordinate before attempting to commit the transaction + // that assigns the unit to a machine. The transaction should fail + // and be reattempted with the knowledge of the subordinate, and + // add the subordinate's storage. + defer state.SetBeforeHooks(c, s.State, func() { + err := s.mysqlRelunit.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + }).Check() + + err := s.State.AssignUnit(s.mysqlUnit, state.AssignCleanEmpty) + c.Assert(err, jc.ErrorIsNil) + _ = s.storageInstanceFilesystem(c, names.NewStorageTag("data/0")) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/testing/conn.go juju-core-2.0.0/src/github.com/juju/juju/state/testing/conn.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/testing/conn.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/testing/conn.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" @@ -20,35 +21,82 @@ "github.com/juju/juju/testing" ) +type InitializeArgs struct { + Owner names.UserTag + InitialConfig *config.Config + ControllerInheritedConfig map[string]interface{} + RegionConfig cloud.RegionConfig + NewPolicy state.NewPolicyFunc + Clock clock.Clock +} + // Initialize initializes the state and returns it. If state was not // already initialized, and cfg is nil, the minimal default model // configuration will be used. -func Initialize(c *gc.C, owner names.UserTag, cfg *config.Config, controllerInheritedConfig map[string]interface{}, newPolicy state.NewPolicyFunc) *state.State { - if cfg == nil { - cfg = testing.ModelConfig(c) +// This provides for tests still using a real clock from utils as tests are +// migrated to use the testing clock +func Initialize(c *gc.C, owner names.UserTag, cfg *config.Config, controllerInheritedConfig map[string]interface{}, regionConfig cloud.RegionConfig, newPolicy state.NewPolicyFunc) *state.State { + return InitializeWithArgs(c, InitializeArgs{ + Owner: owner, + InitialConfig: cfg, + ControllerInheritedConfig: controllerInheritedConfig, + RegionConfig: regionConfig, + NewPolicy: newPolicy, + Clock: &clock.WallClock, + }) +} + +// InitializeWithArgs initializes the state and returns it. If state was not +// already initialized, and args.Config is nil, the minimal default model +// configuration will be used. +func InitializeWithArgs(c *gc.C, args InitializeArgs) *state.State { + if args.InitialConfig == nil { + args.InitialConfig = testing.ModelConfig(c) } mgoInfo := NewMongoInfo() dialOpts := mongotest.DialOpts() controllerCfg := testing.FakeControllerConfig() - controllerCfg["controller-uuid"] = cfg.UUID() st, err := state.Initialize(state.InitializeParams{ + Clock: args.Clock, ControllerConfig: controllerCfg, ControllerModelArgs: state.ModelArgs{ - CloudName: "dummy", - Config: cfg, - Owner: owner, + CloudName: "dummy", + CloudRegion: "dummy-region", + Config: args.InitialConfig, + Owner: args.Owner, StorageProviderRegistry: StorageProviders(), }, - ControllerInheritedConfig: controllerInheritedConfig, + ControllerInheritedConfig: args.ControllerInheritedConfig, CloudName: "dummy", Cloud: cloud.Cloud{ Type: "dummy", AuthTypes: []cloud.AuthType{cloud.EmptyAuthType}, + Regions: []cloud.Region{ + cloud.Region{ + Name: "dummy-region", + Endpoint: "dummy-endpoint", + IdentityEndpoint: "dummy-identity-endpoint", + StorageEndpoint: "dummy-storage-endpoint", + }, + cloud.Region{ + Name: "nether-region", + Endpoint: "nether-endpoint", + IdentityEndpoint: "nether-identity-endpoint", + StorageEndpoint: "nether-storage-endpoint", + }, + cloud.Region{ + Name: "unused-region", + Endpoint: "unused-endpoint", + IdentityEndpoint: "unused-identity-endpoint", + StorageEndpoint: "unused-storage-endpoint", + }, + }, + RegionConfig: args.RegionConfig, }, MongoInfo: mgoInfo, MongoDialOpts: dialOpts, - NewPolicy: newPolicy, + NewPolicy: args.NewPolicy, }) c.Assert(err, jc.ErrorIsNil) return st @@ -97,5 +145,5 @@ owner := names.NewLocalUserTag("test-admin") cfg := testing.ModelConfig(c) newPolicy := func(*state.State) state.Policy { return &MockPolicy{} } - return Initialize(c, owner, cfg, nil, newPolicy) + return Initialize(c, owner, cfg, nil, nil, newPolicy) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/testing/policy.go juju-core-2.0.0/src/github.com/juju/juju/state/testing/policy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/testing/policy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/testing/policy.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,8 @@ import ( "github.com/juju/errors" + "github.com/juju/schema" + "gopkg.in/juju/environschema.v1" "github.com/juju/juju/constraints" "github.com/juju/juju/environs/config" @@ -14,11 +16,12 @@ ) type MockPolicy struct { - GetPrechecker func() (state.Prechecker, error) - GetConfigValidator func() (config.Validator, error) - GetConstraintsValidator func() (constraints.Validator, error) - GetInstanceDistributor func() (instance.Distributor, error) - GetStorageProviderRegistry func() (storage.ProviderRegistry, error) + GetPrechecker func() (state.Prechecker, error) + GetConfigValidator func() (config.Validator, error) + GetProviderConfigSchemaSource func() (config.ConfigSchemaSource, error) + GetConstraintsValidator func() (constraints.Validator, error) + GetInstanceDistributor func() (instance.Distributor, error) + GetStorageProviderRegistry func() (storage.ProviderRegistry, error) } func (p *MockPolicy) Prechecker() (state.Prechecker, error) { @@ -55,3 +58,31 @@ } return nil, errors.NotImplementedf("StorageProviderRegistry") } + +func (p *MockPolicy) ProviderConfigSchemaSource() (config.ConfigSchemaSource, error) { + if p.GetProviderConfigSchemaSource != nil { + return p.GetProviderConfigSchemaSource() + } + return nil, errors.NotImplementedf("ProviderConfigSchemaSource") +} + +type MockConfigSchemaSource struct{} + +func (m *MockConfigSchemaSource) ConfigSchema() schema.Fields { + configSchema := environschema.Fields{ + "providerAttr": { + Type: environschema.Tstring, + }, + } + fs, _, err := configSchema.ValidationSchema() + if err != nil { + panic(err) + } + return fs +} + +func (m *MockConfigSchemaSource) ConfigDefaults() schema.Defaults { + return schema.Defaults{ + "providerAttr": "vulch", + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/testing/suite.go juju-core-2.0.0/src/github.com/juju/juju/state/testing/suite.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/testing/suite.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/testing/suite.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "github.com/juju/juju/cloud" "github.com/juju/juju/environs/config" "github.com/juju/juju/state" "github.com/juju/juju/testing" @@ -27,6 +28,8 @@ Factory *factory.Factory InitialConfig *config.Config ControllerInheritedConfig map[string]interface{} + RegionConfig cloud.RegionConfig + Clock *jujutesting.Clock } func (s *StateSuite) SetUpSuite(c *gc.C) { @@ -44,7 +47,15 @@ s.BaseSuite.SetUpTest(c) s.Owner = names.NewLocalUserTag("test-admin") - s.State = Initialize(c, s.Owner, s.InitialConfig, s.ControllerInheritedConfig, s.NewPolicy) + s.Clock = jujutesting.NewClock(testing.NonZeroTime()) + s.State = InitializeWithArgs(c, InitializeArgs{ + Owner: s.Owner, + InitialConfig: s.InitialConfig, + ControllerInheritedConfig: s.ControllerInheritedConfig, + RegionConfig: s.RegionConfig, + NewPolicy: s.NewPolicy, + Clock: s.Clock, + }) s.AddCleanup(func(*gc.C) { s.State.Close() }) s.Factory = factory.NewFactory(s.State) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/testing/suite_wallclock.go juju-core-2.0.0/src/github.com/juju/juju/state/testing/suite_wallclock.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/testing/suite_wallclock.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/testing/suite_wallclock.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,58 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + "github.com/juju/testing" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/cloud" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/state" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +var _ = gc.Suite(&StateWithWallClockSuite{}) + +// StateWithWallClockSuite provides setup and teardown for tests that require a +// state.State. This should be deprecated in favour of StateSuite, and tests +// updated to use the testing clock StateSuite provides. +type StateWithWallClockSuite struct { + testing.MgoSuite + coretesting.BaseSuite + NewPolicy state.NewPolicyFunc + State *state.State + Owner names.UserTag + Factory *factory.Factory + InitialConfig *config.Config + ControllerInheritedConfig map[string]interface{} + RegionConfig cloud.RegionConfig +} + +func (s *StateWithWallClockSuite) SetUpSuite(c *gc.C) { + s.MgoSuite.SetUpSuite(c) + s.BaseSuite.SetUpSuite(c) +} + +func (s *StateWithWallClockSuite) TearDownSuite(c *gc.C) { + s.BaseSuite.TearDownSuite(c) + s.MgoSuite.TearDownSuite(c) +} + +func (s *StateWithWallClockSuite) SetUpTest(c *gc.C) { + s.MgoSuite.SetUpTest(c) + s.BaseSuite.SetUpTest(c) + + s.Owner = names.NewLocalUserTag("test-admin") + s.State = Initialize(c, s.Owner, s.InitialConfig, s.ControllerInheritedConfig, s.RegionConfig, s.NewPolicy) + s.AddCleanup(func(*gc.C) { s.State.Close() }) + s.Factory = factory.NewFactory(s.State) +} + +func (s *StateWithWallClockSuite) TearDownTest(c *gc.C) { + s.BaseSuite.TearDownTest(c) + s.MgoSuite.TearDownTest(c) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/txns.go juju-core-2.0.0/src/github.com/juju/juju/state/txns.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/txns.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/txns.go 2016-10-13 14:31:49.000000000 +0000 @@ -29,6 +29,16 @@ return runner.RunTransaction(ops) } +// runTransaction is a convenience method delegating to the state's Database +// for the model with the given modelUUID. +func (st *State) runTransactionFor(modelUUID string, ops []txn.Op) error { + database, dbcloser := st.database.CopyForModel(modelUUID) + defer dbcloser() + runner, closer := database.TransactionRunner() + defer closer() + return runner.RunTransaction(ops) +} + // runRawTransaction is a convenience method that will run a single // transaction using a "raw" transaction runner that won't perform // model filtering. @@ -47,6 +57,16 @@ defer closer() return runner.Run(transactions) } + +// runForModel is a convenience method that delegates to a Database for a different +// modelUUID. +func (st *State) runForModel(modelUUID string, transactions jujutxn.TransactionSource) error { + database, dbcloser := st.database.CopyForModel(modelUUID) + defer dbcloser() + runner, closer := database.TransactionRunner() + defer closer() + return runner.Run(transactions) +} // ResumeTransactions resumes all pending transactions. func (st *State) ResumeTransactions() error { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/undertaker.go juju-core-2.0.0/src/github.com/juju/juju/state/undertaker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/undertaker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/undertaker.go 2016-10-13 14:31:49.000000000 +0000 @@ -46,7 +46,7 @@ Assert: isDyingDoc, Update: bson.M{"$set": bson.M{ "life": Dead, - "time-of-death": nowToTheSecond(), + "time-of-death": st.NowToTheSecond(), }}, }} return ops, nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/unitagent.go juju-core-2.0.0/src/github.com/juju/juju/state/unitagent.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/unitagent.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/unitagent.go 2016-10-13 14:31:49.000000000 +0000 @@ -43,9 +43,9 @@ // be in error state, but the state model more correctly records the agent // itself as being in error. So we'll do that model translation here. // TODO(fwereade): this should absolutely not be happpening in the model. - if info.Status == status.StatusError { + if info.Status == status.Error { return status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Data: map[string]interface{}{}, Since: info.Since, @@ -58,12 +58,12 @@ // allow to pass additional helpful status data. func (u *UnitAgent) SetStatus(unitAgentStatus status.StatusInfo) (err error) { switch unitAgentStatus.Status { - case status.StatusIdle, status.StatusExecuting, status.StatusRebooting, status.StatusFailed: - case status.StatusError: + case status.Idle, status.Executing, status.Rebooting, status.Failed: + case status.Error: if unitAgentStatus.Message == "" { return errors.Errorf("cannot set status %q without info", unitAgentStatus.Status) } - case status.StatusAllocating, status.StatusLost: + case status.Allocating, status.Lost: return errors.Errorf("cannot set status %q", unitAgentStatus.Status) default: return errors.Errorf("cannot set invalid status %q", unitAgentStatus.Status) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/unit.go juju-core-2.0.0/src/github.com/juju/juju/state/unit.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/unit.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/unit.go 2016-10-13 14:31:49.000000000 +0000 @@ -211,12 +211,11 @@ // Store in status rather than an attribute of the unit doc - we // want to avoid everything being an attr of the main docs to // stop a swarm of watchers being notified for irrelevant changes. - // TODO(babbageclunk) lp:1558657 - should use clock stored on unit - now := time.Now() + now := u.st.clock.Now() return setStatus(u.st, setStatusParams{ badge: "workload", globalKey: u.globalWorkloadVersionKey(), - status: status.StatusActive, + status: status.Active, message: version, updated: &now, }) @@ -390,7 +389,7 @@ // the number of tests that have to change and defer that improvement to // its own CL. minUnitsOp := minUnitsTriggerOp(u.st, u.ApplicationName()) - cleanupOp := u.st.newCleanupOp(cleanupDyingUnit, u.doc.Name) + cleanupOp := newCleanupOp(cleanupDyingUnit, u.doc.Name) setDyingOp := txn.Op{ C: unitsC, Id: u.doc.DocID, @@ -413,14 +412,14 @@ } else if agentErr != nil { return nil, errors.Trace(agentErr) } - if agentStatusInfo.Status != status.StatusAllocating { + if agentStatusInfo.Status != status.Allocating { return setDyingOps, nil } ops := []txn.Op{{ C: statusesC, Id: u.st.docID(agentStatusDocId), - Assert: bson.D{{"status", status.StatusAllocating}}, + Assert: bson.D{{"status", status.Allocating}}, }, minUnitsOp} removeAsserts := append(isAliveDoc, bson.DocElem{ "$and", []bson.D{ @@ -448,7 +447,7 @@ Update: bson.D{{"$pull", bson.D{{"subordinates", u.doc.Name}}}}, }}, nil } else if u.doc.MachineId == "" { - unitLogger.Errorf("unit %v unassigned", u) + unitLogger.Tracef("unit %v unassigned", u) return nil, nil } @@ -737,6 +736,8 @@ } // machine returns the unit's machine. +// +// machine is part of the machineAssignable interface. func (u *Unit) machine() (*Machine, error) { id, err := u.AssignedMachineId() if err != nil { @@ -749,11 +750,24 @@ return m, nil } +// noAssignedMachineOp is part of the machineAssignable interface. +func (u *Unit) noAssignedMachineOp() txn.Op { + id := u.doc.DocID + if u.doc.Principal != "" { + id = u.doc.Principal + } + return txn.Op{ + C: unitsC, + Id: id, + Assert: bson.D{{"machineid", ""}}, + } +} + // PublicAddress returns the public address of the unit. func (u *Unit) PublicAddress() (network.Address, error) { m, err := u.machine() if err != nil { - unitLogger.Errorf("%v", err) + unitLogger.Tracef("%v", err) return network.Address{}, errors.Trace(err) } return m.PublicAddress() @@ -763,7 +777,7 @@ func (u *Unit) PrivateAddress() (network.Address, error) { m, err := u.machine() if err != nil { - unitLogger.Errorf("%v", err) + unitLogger.Tracef("%v", err) return network.Address{}, errors.Trace(err) } return m.PrivateAddress() @@ -855,7 +869,7 @@ if err != nil { return status.StatusInfo{}, err } - if info.Status != status.StatusError { + if info.Status != status.Error { info, err = getStatus(u.st, u.globalKey(), "unit") if err != nil { return status.StatusInfo{}, err @@ -1095,24 +1109,23 @@ } // Add a reference to the service settings for the new charm. - incOp, err := settingsIncRefOp(u.st, u.doc.Application, curl, false) + incOps, err := appCharmIncRefOps(u.st, u.doc.Application, curl, false) if err != nil { return nil, errors.Trace(err) } // Set the new charm URL. differentCharm := bson.D{{"charmurl", bson.D{{"$ne", curl}}}} - ops := []txn.Op{ - incOp, - { + ops := append(incOps, + txn.Op{ C: unitsC, Id: u.doc.DocID, Assert: append(notDeadDoc, differentCharm...), Update: bson.D{{"$set", bson.D{{"charmurl", curl}}}}, - }} + }) if u.doc.CharmURL != nil { // Drop the reference to the old charm. - decOps, err := settingsDecRefOps(u.st, u.doc.Application, u.doc.CharmURL) + decOps, err := appCharmDecRefOps(u.st, u.doc.Application, u.doc.CharmURL) if err != nil { return nil, errors.Trace(err) } @@ -1219,17 +1232,16 @@ inUseErr = errors.New("machine is not unused") ) -// assignToMachine is the internal version of AssignToMachine, -// also used by AssignToUnusedMachine. It returns specific errors -// in some cases: -// - machineNotAliveErr when the machine is not alive. -// - unitNotAliveErr when the unit is not alive. -// - alreadyAssignedErr when the unit has already been assigned -// - inUseErr when the machine already has a unit assigned (if unused is true) +// assignToMachine is the internal version of AssignToMachine. func (u *Unit) assignToMachine(m *Machine, unused bool) (err error) { - originalm := m buildTxn := func(attempt int) ([]txn.Op, error) { + u, m := u, m // don't change outer vars if attempt > 0 { + var err error + u, err = u.st.Unit(u.Name()) + if err != nil { + return nil, errors.Trace(err) + } m, err = u.st.Machine(m.Id()) if err != nil { return nil, errors.Trace(err) @@ -1238,15 +1250,19 @@ return u.assignToMachineOps(m, unused) } if err := u.st.run(buildTxn); err != nil { - // Don't wrap the error, as we want to return specific values - // as described in the doc comment. - return err + return errors.Trace(err) } - u.doc.MachineId = originalm.doc.Id - originalm.doc.Clean = false + u.doc.MachineId = m.doc.Id + m.doc.Clean = false return nil } +// assignToMachineOps returns txn.Ops to assign a unit to a machine. +// assignToMachineOps returns specific errors in some cases: +// - machineNotAliveErr when the machine is not alive. +// - unitNotAliveErr when the unit is not alive. +// - alreadyAssignedErr when the unit has already been assigned +// - inUseErr when the machine already has a unit assigned (if unused is true) func (u *Unit) assignToMachineOps(m *Machine, unused bool) ([]txn.Op, error) { if u.Life() != Alive { return nil, unitNotAliveErr @@ -1290,12 +1306,17 @@ } storageOps = append(storageOps, attachmentOps...) - assert := append(isAliveDoc, bson.D{ - {"$or", []bson.D{ + assert := append(isAliveDoc, bson.D{{ + // The unit's subordinates must not change while we're + // assigning it to a machine, to ensure machine storage + // is created for subordinate units. + "subordinates", u.doc.Subordinates, + }, { + "$or", []bson.D{ {{"machineid", ""}}, {{"machineid", m.Id()}}, - }}, - }...) + }, + }}...) massert := isAliveDoc if unused { massert = append(massert, bson.D{{"clean", bson.D{{"$ne", false}}}}...) @@ -1466,9 +1487,21 @@ return u.assignToMachine(m, false) } -// assignToNewMachine assigns the unit to a machine created according to -// the supplied params, with the supplied constraints. -func (u *Unit) assignToNewMachine(template MachineTemplate, parentId string, containerType instance.ContainerType) error { +// assignToNewMachineOps returns txn.Ops to assign the unit to a machine +// created according to the supplied params, with the supplied constraints. +func (u *Unit) assignToNewMachineOps( + template MachineTemplate, + parentId string, + containerType instance.ContainerType, +) (*Machine, []txn.Op, error) { + + if u.Life() != Alive { + return nil, nil, unitNotAliveErr + } + if u.doc.MachineId != "" { + return nil, nil, alreadyAssignedErr + } + template.principals = []string{u.doc.Name} template.Dirty = true @@ -1482,7 +1515,7 @@ mdoc, ops, err = u.st.addMachineOps(template) case parentId == "": if containerType == "" { - return fmt.Errorf("assignToNewMachine called without container type (should never happen)") + return nil, nil, errors.New("assignToNewMachine called without container type (should never happen)") } // The new parent machine is clean and only hosts units, // regardless of its child. @@ -1490,14 +1523,28 @@ parentParams.Jobs = []MachineJob{JobHostUnits} mdoc, ops, err = u.st.addMachineInsideNewMachineOps(template, parentParams, containerType) default: - // Container type is specified but no parent id. mdoc, ops, err = u.st.addMachineInsideMachineOps(template, parentId, containerType) } if err != nil { - return err + return nil, nil, err } + // Ensure the host machine is really clean. if parentId != "" { + mparent, err := u.st.Machine(parentId) + if err != nil { + return nil, nil, err + } + if !mparent.Clean() { + return nil, nil, machineNotCleanErr + } + containers, err := mparent.Containers() + if err != nil { + return nil, nil, err + } + if len(containers) > 0 { + return nil, nil, machineNotCleanErr + } parentDocId := u.st.docID(parentId) ops = append(ops, txn.Op{ C: machinesC, @@ -1509,9 +1556,15 @@ Assert: bson.D{hasNoContainersTerm}, }) } - isUnassigned := bson.D{{"machineid", ""}} + // The unit's subordinates must not change while we're + // assigning it to a machine, to ensure machine storage + // is created for subordinate units. + subordinatesUnchanged := bson.D{{"subordinates", u.doc.Subordinates}} + isUnassigned := bson.D{{"machineid", ""}} asserts := append(isAliveDoc, isUnassigned...) + asserts = append(asserts, subordinatesUnchanged...) + ops = append(ops, txn.Op{ C: unitsC, Id: u.doc.DocID, @@ -1520,51 +1573,7 @@ }, removeStagedAssignmentOp(u.doc.DocID), ) - - err = u.st.runTransaction(ops) - if err == nil { - u.doc.MachineId = mdoc.Id - return nil - } else if err != txn.ErrAborted { - return err - } - - // If we assume that the machine ops will never give us an - // operation that would fail (because the machine id(s) that it - // chooses are unique), then the only reasons that the - // transaction could have been aborted are: - // * the unit is no longer alive - // * the unit has been assigned to a different machine - // * the parent machine we want to create a container on was - // clean but became dirty - unit, err := u.st.Unit(u.Name()) - if err != nil { - return err - } - switch { - case unit.Life() != Alive: - return unitNotAliveErr - case unit.doc.MachineId != "": - return alreadyAssignedErr - } - if parentId == "" { - return fmt.Errorf("cannot add top level machine: transaction aborted for unknown reason") - } - m, err := u.st.Machine(parentId) - if err != nil { - return err - } - if !m.Clean() { - return machineNotCleanErr - } - containers, err := m.Containers() - if err != nil { - return err - } - if len(containers) > 0 { - return machineNotCleanErr - } - return fmt.Errorf("cannot add container within machine: transaction aborted for unknown reason") + return &Machine{u.st, *mdoc}, ops, nil } // Constraints returns the unit's deployment constraints. @@ -1610,25 +1619,43 @@ defer closer() var host machineDoc if err := machinesCollection.Find(query).One(&host); err == mgo.ErrNotFound { - // No existing clean, empty machine so create a new one. - // The container constraint will be used by AssignToNewMachine to create the required container. + // No existing clean, empty machine so create a new one. The + // container constraint will be used by AssignToNewMachine to + // create the required container. return u.AssignToNewMachine() } else if err != nil { return err } - template := MachineTemplate{ - Series: u.doc.Series, - Constraints: *cons, - Jobs: []MachineJob{JobHostUnits}, - } - err = u.assignToNewMachine(template, host.Id, *cons.Container) - if err == machineNotCleanErr { - // The clean machine was used before we got a chance to use it so just - // stick the unit on a new machine. - return u.AssignToNewMachine() + var m *Machine + buildTxn := func(attempt int) ([]txn.Op, error) { + var err error + u := u // don't change outer var + if attempt > 0 { + u, err = u.st.Unit(u.Name()) + if err != nil { + return nil, errors.Trace(err) + } + } + template := MachineTemplate{ + Series: u.doc.Series, + Constraints: *cons, + Jobs: []MachineJob{JobHostUnits}, + } + var ops []txn.Op + m, ops, err = u.assignToNewMachineOps(template, host.Id, *cons.Container) + return ops, err } - return err + if err := u.st.run(buildTxn); err != nil { + if errors.Cause(err) == machineNotCleanErr { + // The clean machine was used before we got a chance + // to use it so just stick the unit on a new machine. + return u.AssignToNewMachine() + } + return errors.Trace(err) + } + u.doc.MachineId = m.doc.Id + return nil } // AssignToNewMachine assigns the unit to a new machine, with constraints @@ -1639,31 +1666,49 @@ if u.doc.Principal != "" { return fmt.Errorf("unit is a subordinate") } - // Get the ops necessary to create a new machine, and the machine doc that - // will be added with those operations (which includes the machine id). - cons, err := u.Constraints() - if err != nil { - return err - } - var containerType instance.ContainerType - // Configure to create a new container if required. - if cons.HasContainer() { - containerType = *cons.Container + var m *Machine + buildTxn := func(attempt int) ([]txn.Op, error) { + var err error + u := u // don't change outer var + if attempt > 0 { + u, err = u.st.Unit(u.Name()) + if err != nil { + return nil, errors.Trace(err) + } + } + cons, err := u.Constraints() + if err != nil { + return nil, err + } + var containerType instance.ContainerType + if cons.HasContainer() { + containerType = *cons.Container + } + storageParams, err := u.machineStorageParams() + if err != nil { + return nil, errors.Trace(err) + } + template := MachineTemplate{ + Series: u.doc.Series, + Constraints: *cons, + Jobs: []MachineJob{JobHostUnits}, + Volumes: storageParams.volumes, + VolumeAttachments: storageParams.volumeAttachments, + Filesystems: storageParams.filesystems, + FilesystemAttachments: storageParams.filesystemAttachments, + } + // Get the ops necessary to create a new machine, and the + // machine doc that will be added with those operations + // (which includes the machine id). + var ops []txn.Op + m, ops, err = u.assignToNewMachineOps(template, "", containerType) + return ops, err } - storageParams, err := u.machineStorageParams() - if err != nil { + if err := u.st.run(buildTxn); err != nil { return errors.Trace(err) } - template := MachineTemplate{ - Series: u.doc.Series, - Constraints: *cons, - Jobs: []MachineJob{JobHostUnits}, - Volumes: storageParams.volumes, - VolumeAttachments: storageParams.volumeAttachments, - Filesystems: storageParams.filesystems, - FilesystemAttachments: storageParams.filesystemAttachments, - } - return u.assignToNewMachine(template, "", containerType) + u.doc.MachineId = m.doc.Id + return nil } type byStorageInstance []StorageAttachment @@ -1679,17 +1724,37 @@ // and volume/filesystem attachments for a machine that the unit will be // assigned to. func (u *Unit) machineStorageParams() (*machineStorageParams, error) { - storageAttachments, err := u.st.UnitStorageAttachments(u.UnitTag()) + params, err := unitMachineStorageParams(u) if err != nil { - return nil, errors.Annotate(err, "getting storage attachments") + return nil, errors.Trace(err) } - svc, err := u.Application() + for _, name := range u.doc.Subordinates { + sub, err := u.st.Unit(name) + if err != nil { + return nil, errors.Trace(err) + } + subParams, err := unitMachineStorageParams(sub) + if err != nil { + return nil, errors.Trace(err) + } + params = combineMachineStorageParams(params, subParams) + } + return params, nil +} + +func unitMachineStorageParams(u *Unit) (*machineStorageParams, error) { + storageAttachments, err := u.st.UnitStorageAttachments(u.UnitTag()) if err != nil { - return nil, errors.Trace(err) + return nil, errors.Annotate(err, "getting storage attachments") } - curl, _ := svc.CharmURL() + curl := u.doc.CharmURL if curl == nil { - return nil, errors.Errorf("no URL set for application %q", svc.Name()) + var err error + app, err := u.Application() + if err != nil { + return nil, errors.Trace(err) + } + curl, _ = app.CharmURL() } ch, err := u.st.Charm(curl) if err != nil { @@ -1955,7 +2020,34 @@ // assignToCleanMaybeEmptyMachine implements AssignToCleanMachine and AssignToCleanEmptyMachine. // A 'machine' may be a machine instance or container depending on the service constraints. -func (u *Unit) assignToCleanMaybeEmptyMachine(requireEmpty bool) (m *Machine, err error) { +func (u *Unit) assignToCleanMaybeEmptyMachine(requireEmpty bool) (*Machine, error) { + var m *Machine + buildTxn := func(attempt int) ([]txn.Op, error) { + var err error + u := u // don't change outer var + if attempt > 0 { + u, err = u.st.Unit(u.Name()) + if err != nil { + return nil, errors.Trace(err) + } + } + var ops []txn.Op + m, ops, err = u.assignToCleanMaybeEmptyMachineOps(requireEmpty) + return ops, err + } + if err := u.st.run(buildTxn); err != nil { + return nil, errors.Trace(err) + } + u.doc.MachineId = m.doc.Id + m.doc.Clean = false + return m, nil +} + +func (u *Unit) assignToCleanMaybeEmptyMachineOps(requireEmpty bool) (_ *Machine, _ []txn.Op, err error) { + failure := func(err error) (*Machine, []txn.Op, error) { + return nil, nil, err + } + context := "clean" if requireEmpty { context += ", empty" @@ -1965,7 +2057,7 @@ if u.doc.Principal != "" { err = fmt.Errorf("unit is a subordinate") assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } // If required storage is not all dynamic, then assigning @@ -1973,31 +2065,31 @@ storageParams, err := u.machineStorageParams() if err != nil { assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } storagePools, err := machineStoragePools(u.st, storageParams) if err != nil { assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } if err := validateDynamicStoragePools(u.st, storagePools); err != nil { if errors.IsNotSupported(err) { - return nil, noCleanMachines + return failure(noCleanMachines) } assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } // Get the unit constraints to see what deployment requirements we have to adhere to. cons, err := u.Constraints() if err != nil { assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } query, err := u.findCleanMachineQuery(requireEmpty, cons) if err != nil { assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } // Find all of the candidate machines, and associated @@ -2009,7 +2101,7 @@ var mdocs []*machineDoc if err := machinesCollection.Find(query).All(&mdocs); err != nil { assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } var unprovisioned []*Machine var instances []instance.Id @@ -2021,7 +2113,7 @@ unprovisioned = append(unprovisioned, m) } else if err != nil { assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } else { instances = append(instances, instance) instanceMachines[instance] = m @@ -2037,7 +2129,7 @@ // must be maintained. if instances, err = distributeUnit(u, instances); err != nil { assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } machines := make([]*Machine, len(instances), len(instances)+len(unprovisioned)) for i, instance := range instances { @@ -2045,7 +2137,7 @@ if !ok { err := fmt.Errorf("invalid instance returned: %v", instance) assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } machines[i] = m } @@ -2066,20 +2158,20 @@ continue } assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } - err := u.assignToMachine(m, true) + ops, err := u.assignToMachineOps(m, true) if err == nil { - return m, nil + return m, ops, nil } switch errors.Cause(err) { case inUseErr, machineNotAliveErr: default: assignContextf(&err, u.Name(), context) - return nil, err + return failure(err) } } - return nil, noCleanMachines + return failure(noCleanMachines) } // UnassignFromMachine removes the assignment between this unit and the @@ -2208,7 +2300,7 @@ // reestablish normal workflow. The retryHooks parameter informs // whether to attempt to reexecute previous failed hooks or to continue // as if they had succeeded before. -func (u *Unit) Resolve(retryHooks bool) error { +func (u *Unit) Resolve(noretryHooks bool) error { // We currently check agent status to see if a unit is // in error state. As the new Juju Health work is completed, // this will change to checking the unit status. @@ -2216,12 +2308,12 @@ if err != nil { return err } - if statusInfo.Status != status.StatusError { + if statusInfo.Status != status.Error { return errors.Errorf("unit %q is not in an error state", u) } - mode := ResolvedNoHooks - if retryHooks { - mode = ResolvedRetryHooks + mode := ResolvedRetryHooks + if noretryHooks { + mode = ResolvedNoHooks } return u.SetResolved(mode) } @@ -2279,9 +2371,21 @@ // StorageConstraints returns the unit's storage constraints. func (u *Unit) StorageConstraints() (map[string]StorageConstraints, error) { - // TODO(axw) eventually we should be able to override service - // storage constraints at the unit level. - return readStorageConstraints(u.st, applicationGlobalKey(u.doc.Application)) + if u.doc.CharmURL == nil { + app, err := u.st.Application(u.doc.Application) + if err != nil { + return nil, errors.Trace(err) + } + return app.StorageConstraints() + } + key := applicationStorageConstraintsKey(u.doc.Application, u.doc.CharmURL) + cons, err := readStorageConstraints(u.st, key) + if errors.IsNotFound(err) { + return nil, nil + } else if err != nil { + return nil, errors.Trace(err) + } + return cons, nil } type addUnitOpsArgs struct { @@ -2296,23 +2400,39 @@ // collection, along with all the associated expected other unit entries. This // method is used by both the *Service.addUnitOpsWithCons method and the // migration import code. -func addUnitOps(st *State, args addUnitOpsArgs) []txn.Op { +func addUnitOps(st *State, args addUnitOpsArgs) ([]txn.Op, error) { name := args.unitDoc.Name agentGlobalKey := unitAgentGlobalKey(name) + // TODO: consider the constraints op // TODO: consider storageOps - return []txn.Op{ + prereqOps := []txn.Op{ createStatusOp(st, unitGlobalKey(name), args.workloadStatusDoc), createStatusOp(st, agentGlobalKey, args.agentStatusDoc), createStatusOp(st, globalWorkloadVersionKey(name), args.workloadVersionDoc), createMeterStatusOp(st, agentGlobalKey, args.meterStatusDoc), - { - C: unitsC, - Id: name, - Assert: txn.DocMissing, - Insert: args.unitDoc, - }, } + + // Freshly-created units will not have a charm URL set; migrated + // ones will, and they need to maintain their refcounts. If we + // relax the restrictions on migrating apps mid-upgrade, this + // will need to be more sophisticated, because it might need to + // create the settings doc. + if curl := args.unitDoc.CharmURL; curl != nil { + appName := args.unitDoc.Application + charmRefOps, err := appCharmIncRefOps(st, appName, curl, false) + if err != nil { + return nil, errors.Trace(err) + } + prereqOps = append(prereqOps, charmRefOps...) + } + + return append(prereqOps, txn.Op{ + C: unitsC, + Id: name, + Assert: txn.DocMissing, + Insert: args.unitDoc, + }), nil } // HistoryGetter allows getting the status history based on some identifying key. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/unit_test.go juju-core-2.0.0/src/github.com/juju/juju/state/unit_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/unit_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/unit_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "strconv" - "time" + "time" // Only used for time types. "github.com/juju/errors" jc "github.com/juju/testing/checkers" @@ -683,9 +683,9 @@ func (s *UnitSuite) TestDestroySetStatusRetry(c *gc.C) { defer state.SetRetryHooks(c, s.State, func() { - now := time.Now() + now := coretesting.NonZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -796,18 +796,18 @@ status status.Status info string }{{ - status.StatusExecuting, "blah", + status.Executing, "blah", }, { - status.StatusIdle, "blah", + status.Idle, "blah", }, { - status.StatusFailed, "blah", + status.Failed, "blah", }, { - status.StatusRebooting, "blah", + status.Rebooting, "blah", }} { c.Logf("test %d: %s", i, test.status) unit, err := s.service.AddUnit() c.Assert(err, jc.ErrorIsNil) - now := time.Now() + now := coretesting.NonZeroTime() sInfo := status.StatusInfo{ Status: test.status, Message: test.info, @@ -920,9 +920,9 @@ err = s.unit.Resolve(true) c.Assert(err, gc.ErrorMatches, `unit "wordpress/0" is not in an error state`) - now := time.Now() + now := coretesting.NonZeroTime() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "gaaah", Since: &now, } @@ -932,7 +932,7 @@ c.Assert(err, jc.ErrorIsNil) err = s.unit.Resolve(true) c.Assert(err, gc.ErrorMatches, `cannot set resolved mode for unit "wordpress/0": already resolved`) - c.Assert(s.unit.Resolved(), gc.Equals, state.ResolvedNoHooks) + c.Assert(s.unit.Resolved(), gc.Equals, state.ResolvedRetryHooks) err = s.unit.ClearResolved() c.Assert(err, jc.ErrorIsNil) @@ -940,7 +940,7 @@ c.Assert(err, jc.ErrorIsNil) err = s.unit.Resolve(false) c.Assert(err, gc.ErrorMatches, `cannot set resolved mode for unit "wordpress/0": already resolved`) - c.Assert(s.unit.Resolved(), gc.Equals, state.ResolvedRetryHooks) + c.Assert(s.unit.Resolved(), gc.Equals, state.ResolvedNoHooks) } func (s *UnitSuite) TestGetSetClearResolved(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/upgrade.go juju-core-2.0.0/src/github.com/juju/juju/state/upgrade.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/upgrade.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/upgrade.go 2016-10-13 14:31:49.000000000 +0000 @@ -262,12 +262,11 @@ } doc := upgradeInfoDoc{ - Id: currentUpgradeId, - PreviousVersion: previousVersion, - TargetVersion: targetVersion, - Status: UpgradePending, - // TODO(fwereade): 2016-03-17 lp:1558657 - Started: time.Now().UTC(), + Id: currentUpgradeId, + PreviousVersion: previousVersion, + TargetVersion: targetVersion, + Status: UpgradePending, + Started: st.clock.Now().UTC(), ControllersReady: []string{machineId}, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/upgrades.go juju-core-2.0.0/src/github.com/juju/juju/state/upgrades.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/upgrades.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/upgrades.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,57 +4,31 @@ package state import ( - "time" + "strings" "github.com/juju/errors" "github.com/juju/loggo" "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" - - "github.com/juju/juju/status" ) var upgradesLogger = loggo.GetLogger("juju.state.upgrade") -func AddPreferredAddressesToMachines(st *State) error { - machines, err := st.AllMachines() - if err != nil { - return errors.Trace(err) - } - - for _, machine := range machines { - if machine.Life() == Dead { - continue - } - // Setting the addresses is enough to trigger setting the preferred - // addresses. - err = machine.SetMachineAddresses(machine.MachineAddresses()...) - if err != nil { - return errors.Trace(err) - } - err := machine.SetProviderAddresses(machine.ProviderAddresses()...) - if err != nil { - return errors.Trace(err) - } - } - return nil -} - -// runForAllEnvStates will run runner function for every env passing a state -// for that env. -func runForAllEnvStates(st *State, runner func(st *State) error) error { - environments, closer := st.getCollection(modelsC) +// runForAllModelStates will run runner function for every model passing a state +// for that model. +func runForAllModelStates(st *State, runner func(st *State) error) error { + models, closer := st.getCollection(modelsC) defer closer() - var envDocs []bson.M - err := environments.Find(nil).Select(bson.M{"_id": 1}).All(&envDocs) + var modelDocs []bson.M + err := models.Find(nil).Select(bson.M{"_id": 1}).All(&modelDocs) if err != nil { return errors.Annotate(err, "failed to read models") } - for _, envDoc := range envDocs { - modelUUID := envDoc["_id"].(string) + for _, modelDoc := range modelDocs { + modelUUID := modelDoc["_id"].(string) envSt, err := st.ForModel(names.NewModelTag(modelUUID)) if err != nil { return errors.Annotatef(err, "failed to open model %q", modelUUID) @@ -67,136 +41,51 @@ return nil } -// AddFilesystemStatus ensures each filesystem has a status doc. -func AddFilesystemStatus(st *State) error { - return runForAllEnvStates(st, func(st *State) error { - filesystems, err := st.AllFilesystems() - if err != nil { - return errors.Trace(err) - } - var ops []txn.Op - for _, filesystem := range filesystems { - _, err := filesystem.Status() - if err == nil { - continue - } - if !errors.IsNotFound(err) { - return errors.Annotate(err, "getting status") - } - status, err := upgradingFilesystemStatus(st, filesystem) - if err != nil { - return errors.Annotate(err, "deciding filesystem status") - } - // TODO(perrito666) 2016-05-02 lp:1558657 - ops = append(ops, createStatusOp(st, filesystem.globalKey(), statusDoc{ - Status: status, - Updated: time.Now().UnixNano(), - })) +// readBsonDField returns the value of a given field in a bson.D. +func readBsonDField(d bson.D, name string) (interface{}, bool) { + for i := range d { + field := &d[i] + if field.Name == name { + return field.Value, true } - if len(ops) > 0 { - return errors.Trace(st.runTransaction(ops)) - } - return nil - }) + } + return nil, false } -// If the filesystem has not been provisioned, then it should be Pending; -// if it has been provisioned, but there is an unprovisioned attachment, then -// it should be Attaching; otherwise it is Attached. -func upgradingFilesystemStatus(st *State, filesystem Filesystem) (status.Status, error) { - if _, err := filesystem.Info(); errors.IsNotProvisioned(err) { - return status.StatusPending, nil - } - attachments, err := st.FilesystemAttachments(filesystem.FilesystemTag()) - if err != nil { - return "", errors.Trace(err) - } - for _, attachment := range attachments { - _, err := attachment.Info() - if errors.IsNotProvisioned(err) { - return status.StatusAttaching, nil +// replaceBsonDField replaces a field in bson.D. +func replaceBsonDField(d bson.D, name string, value interface{}) error { + for i, field := range d { + if field.Name == name { + newField := field + newField.Value = value + d[i] = newField + return nil } } - return status.StatusAttached, nil + return errors.NotFoundf("field %q", name) } -// MigrateSettingsSchema migrates the schema of the settings collection, -// moving non-reserved keys at the top-level into a subdoc, and introducing -// a top-level "version" field with the initial value matching txn-revno. -// -// This migration takes place both before and after model-uuid migration, -// to get the correct txn-revno value. -func MigrateSettingsSchema(st *State) error { - coll, closer := st.getRawCollection(settingsC) +// RenameAddModelPermission renames any permissions called addmodel to add-model. +func RenameAddModelPermission(st *State) error { + coll, closer := st.getRawCollection(permissionsC) defer closer() + upgradesLogger.Infof("migrating addmodel permission") - upgradesLogger.Debugf("migrating schema of the %s collection", settingsC) - iter := coll.Find(nil).Iter() + iter := coll.Find(bson.M{"access": "addmodel"}).Iter() defer iter.Close() - var ops []txn.Op var doc bson.M for iter.Next(&doc) { - if !settingsDocNeedsMigration(doc) { - continue + id, ok := doc["_id"] + if !ok { + return errors.New("no id found in permission doc") } - id := doc["_id"] - txnRevno := doc["txn-revno"].(int64) - - // Remove reserved attributes; we'll move the remaining - // ones to the "settings" subdoc. - delete(doc, "model-uuid") - delete(doc, "_id") - delete(doc, "txn-revno") - delete(doc, "txn-queue") - - // If there exists a setting by the name "settings", - // we must remove it first, or it will collide with - // the dotted-notation $sets. - if _, ok := doc["settings"]; ok { - ops = append(ops, txn.Op{ - C: settingsC, - Id: id, - Assert: txn.DocExists, - Update: bson.D{{"$unset", bson.D{{"settings", 1}}}}, - }) - } - - var update bson.D - for key, value := range doc { - if key != "settings" && key != "version" { - // Don't try to unset these fields, - // as we've unset "settings" above - // already, and we'll overwrite - // "version" below. - update = append(update, bson.DocElem{ - "$unset", bson.D{{key, 1}}, - }) - } - update = append(update, bson.DocElem{ - "$set", bson.D{{"settings." + key, value}}, - }) - } - if len(update) == 0 { - // If there are no settings, then we need - // to add an empty "settings" map so we - // can tell for next time that migration - // is complete, and don't move the "version" - // field we add. - update = bson.D{{ - "$set", bson.D{{"settings", bson.M{}}}, - }} - } - update = append(update, bson.DocElem{ - "$set", bson.D{{"version", txnRevno}}, - }) - ops = append(ops, txn.Op{ - C: settingsC, + C: permissionsC, Id: id, Assert: txn.DocExists, - Update: update, + Update: bson.D{{"$set", bson.D{{"access", "add-model"}}}}, }) } if err := iter.Err(); err != nil { @@ -205,48 +94,129 @@ return st.runRawTransaction(ops) } -func settingsDocNeedsMigration(doc bson.M) bool { - // It is not possible for there to exist a settings value - // with type bson.M, so we know that it is the new settings - // field and not just a setting with the name "settings". - if _, ok := doc["settings"].(bson.M); ok { - return false +// StripLocalUserDomain removes any @local suffix from any relevant document field values. +func StripLocalUserDomain(st *State) error { + var ops []txn.Op + more, err := stripLocalFromFields(st, cloudCredentialsC, "_id", "owner") + if err != nil { + return err } - return true -} + ops = append(ops, more...) -func addDefaultBindingsToServices(st *State) error { - applications, err := st.AllApplications() + more, err = stripLocalFromFields(st, modelsC, "owner", "cloud-credential") if err != nil { - return errors.Trace(err) + return err } + ops = append(ops, more...) - upgradesLogger.Debugf("adding default endpoint bindings to applications (where missing)") - ops := make([]txn.Op, 0, len(applications)) - for _, application := range applications { - ch, _, err := application.Charm() - if err != nil { - return errors.Annotatef(err, "cannot get charm for application %q", application.Name()) + more, err = stripLocalFromFields(st, usermodelnameC, "_id") + if err != nil { + return err + } + ops = append(ops, more...) + + more, err = stripLocalFromFields(st, controllerUsersC, "_id", "user", "createdby") + if err != nil { + return err + } + ops = append(ops, more...) + + more, err = stripLocalFromFields(st, modelUsersC, "_id", "user", "createdby") + if err != nil { + return err + } + ops = append(ops, more...) + + more, err = stripLocalFromFields(st, permissionsC, "_id", "subject-global-key") + if err != nil { + return err + } + ops = append(ops, more...) + + more, err = stripLocalFromFields(st, modelUserLastConnectionC, "_id", "user") + if err != nil { + return err + } + ops = append(ops, more...) + return st.runRawTransaction(ops) +} + +func stripLocalFromFields(st *State, collName string, fields ...string) ([]txn.Op, error) { + coll, closer := st.getRawCollection(collName) + defer closer() + upgradesLogger.Infof("migrating document fields of the %s collection", collName) + + iter := coll.Find(nil).Iter() + defer iter.Close() + var ops []txn.Op + var doc bson.D + for iter.Next(&doc) { + // Get a copy of the current doc id so we can see if it has changed. + var newId interface{} + id, ok := readBsonDField(doc, "_id") + if ok { + newId = id } - if _, err := application.EndpointBindings(); err == nil { - upgradesLogger.Debugf("application %q already has bindings (skipping)", application.Name()) - continue - } else if !errors.IsNotFound(err) { - return errors.Annotatef(err, "checking application %q for existing bindings", application.Name()) + + // Take a copy of the current doc fields. + newDoc := make(bson.D, len(doc)) + for i, f := range doc { + newDoc[i] = f } - // Passing nil for the bindings map will use the defaults. - createOp, err := createEndpointBindingsOp(st, application.globalKey(), nil, ch.Meta()) - if err != nil { - return errors.Annotatef(err, "setting default endpoint bindings for application %q", application.Name()) + + // Iterate over the fields that need to be updated and + // record any updates to be made. + var update bson.D + for _, field := range fields { + isId := field == "_id" + fieldVal, ok := readBsonDField(doc, field) + if !ok { + continue + } + updatedVal := strings.Replace(fieldVal.(string), "@local", "", -1) + if err := replaceBsonDField(newDoc, field, updatedVal); err != nil { + return nil, err + } + if isId { + newId = updatedVal + } else { + if fieldVal != updatedVal { + update = append(update, bson.DocElem{ + "$set", bson.D{{field, updatedVal}}, + }) + } + } } - ops = append(ops, createOp) - } - return st.runTransaction(ops) -} -// AddDefaultEndpointBindingsToServices adds default endpoint bindings for each -// service. As long as the service has a charm URL set, each charm endpoint will -// be bound to the default space. -func AddDefaultEndpointBindingsToServices(st *State) error { - return runForAllEnvStates(st, addDefaultBindingsToServices) + // For documents where the id has not changed, we can + // use an update operation. + if newId == id { + if len(update) > 0 { + ops = append(ops, txn.Op{ + C: collName, + Id: id, + Assert: txn.DocExists, + Update: update, + }) + } + } else { + // Where the id has changed, we need to remove the old and + // insert the new document. + ops = append(ops, []txn.Op{{ + C: collName, + Id: id, + Assert: txn.DocExists, + Remove: true, + }, { + C: collName, + Id: newId, + Assert: txn.DocMissing, + Insert: newDoc, + }}...) + } + } + if err := iter.Err(); err != nil { + return nil, errors.Trace(err) + } + return ops, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/upgrades_test.go juju-core-2.0.0/src/github.com/juju/juju/state/upgrades_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/upgrades_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/upgrades_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,16 +6,10 @@ import ( "time" - "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" - "gopkg.in/mgo.v2/txn" - - "github.com/juju/juju/core/description" - "github.com/juju/juju/network" - "github.com/juju/juju/status" ) type upgradesSuite struct { @@ -24,564 +18,346 @@ var _ = gc.Suite(&upgradesSuite{}) -func (s *upgradesSuite) addLegacyDoc(c *gc.C, collName string, legacyDoc bson.M) { - ops := []txn.Op{{ - C: collName, - Id: legacyDoc["_id"], - Assert: txn.DocMissing, - Insert: legacyDoc, - }} - err := s.state.runRawTransaction(ops) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *upgradesSuite) FindId(c *gc.C, coll *mgo.Collection, id interface{}, doc interface{}) { - err := coll.FindId(id).One(doc) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *upgradesSuite) removePreferredAddressFields(c *gc.C, machine *Machine) { - machinesCol, closer := s.state.getRawCollection(machinesC) +func (s *upgradesSuite) TestStripLocalUserDomainCredentials(c *gc.C) { + coll, closer := s.state.getRawCollection(cloudCredentialsC) defer closer() - - err := machinesCol.Update( - bson.D{{"_id", s.state.docID(machine.Id())}}, - bson.D{{"$unset", bson.D{{"preferredpublicaddress", ""}}}}, - ) - c.Assert(err, jc.ErrorIsNil) - err = machinesCol.Update( - bson.D{{"_id", s.state.docID(machine.Id())}}, - bson.D{{"$unset", bson.D{{"preferredprivateaddress", ""}}}}, + err := coll.Insert( + cloudCredentialDoc{ + DocID: "aws#admin@local#default", + Owner: "user-admin@local", + Name: "default", + Cloud: "cloud-aws", + AuthType: "userpass", + Attributes: map[string]string{"user": "fred"}, + }, + cloudCredentialDoc{ + DocID: "aws#fred#default", + Owner: "user-mary@external", + Name: "default", + Cloud: "cloud-aws", + AuthType: "userpass", + Attributes: map[string]string{"user": "fred"}, + }, ) c.Assert(err, jc.ErrorIsNil) -} - -func (s *upgradesSuite) setPreferredAddressFields(c *gc.C, machine *Machine, addr string) { - machinesCol, closer := s.state.getRawCollection(machinesC) - defer closer() - stateAddr := fromNetworkAddress(network.NewAddress(addr), OriginUnknown) - err := machinesCol.Update( - bson.D{{"_id", s.state.docID(machine.Id())}}, - bson.D{{"$set", bson.D{{"preferredpublicaddress", stateAddr}}}}, - ) - c.Assert(err, jc.ErrorIsNil) - err = machinesCol.Update( - bson.D{{"_id", s.state.docID(machine.Id())}}, - bson.D{{"$set", bson.D{{"preferredprivateaddress", stateAddr}}}}, - ) - c.Assert(err, jc.ErrorIsNil) + expected := []bson.M{{ + "_id": "aws#admin#default", + "owner": "user-admin", + "cloud": "cloud-aws", + "name": "default", + "revoked": false, + "auth-type": "userpass", + "attributes": bson.M{"user": "fred"}, + }, { + "_id": "aws#fred#default", + "owner": "user-mary@external", + "cloud": "cloud-aws", + "name": "default", + "revoked": false, + "auth-type": "userpass", + "attributes": bson.M{"user": "fred"}, + }} + s.assertStrippedUserData(c, coll, expected) } -func assertMachineAddresses(c *gc.C, machine *Machine, publicAddress, privateAddress string) { - err := machine.Refresh() - c.Assert(err, jc.ErrorIsNil) - addr, err := machine.PublicAddress() - if publicAddress != "" { - c.Assert(err, jc.ErrorIsNil) - } else { - c.Assert(err, jc.Satisfies, network.IsNoAddressError) - } - c.Assert(addr.Value, gc.Equals, publicAddress) - privAddr, err := machine.PrivateAddress() - if privateAddress != "" { - c.Assert(err, jc.ErrorIsNil) - } else { - c.Assert(err, jc.Satisfies, network.IsNoAddressError) - } - c.Assert(privAddr.Value, gc.Equals, privateAddress) -} +func (s *upgradesSuite) TestStripLocalUserDomainModels(c *gc.C) { + coll, closer := s.state.getRawCollection(modelsC) + defer closer() -func (s *upgradesSuite) createMachinesWithAddresses(c *gc.C) []*Machine { - _, err := s.state.AddMachine("quantal", JobManageModel) - c.Assert(err, jc.ErrorIsNil) - _, err = s.state.AddMachines([]MachineTemplate{ - {Series: "quantal", Jobs: []MachineJob{JobHostUnits}}, - {Series: "quantal", Jobs: []MachineJob{JobHostUnits}}, - {Series: "quantal", Jobs: []MachineJob{JobHostUnits}}, - }...) - c.Assert(err, jc.ErrorIsNil) - machines, err := s.state.AllMachines() + var initialModels []bson.M + err := coll.Find(nil).Sort("_id").All(&initialModels) c.Assert(err, jc.ErrorIsNil) - c.Assert(machines, gc.HasLen, 4) + c.Assert(initialModels, gc.HasLen, 1) - m1 := machines[0] - m2 := machines[1] - m4 := machines[3] - err = m1.SetProviderAddresses(network.NewAddress("8.8.8.8")) - c.Assert(err, jc.ErrorIsNil) - err = m2.SetMachineAddresses(network.NewAddress("10.0.0.1")) - c.Assert(err, jc.ErrorIsNil) - err = m2.SetProviderAddresses(network.NewAddress("10.0.0.2"), network.NewAddress("8.8.4.4")) + err = coll.Insert( + modelDoc{ + UUID: "0000-dead-beaf-0001", + Owner: "user-admin@local", + Name: "controller", + ControllerUUID: "deadbeef-1bad-500d-9000-4b1d0d06f00d", + Cloud: "cloud-aws", + CloudRegion: "us-west-1", + CloudCredential: "aws#fred@local#default", + }, + modelDoc{ + UUID: "0000-dead-beaf-0002", + Owner: "user-mary@external", + Name: "default", + ControllerUUID: "deadbeef-1bad-500d-9000-4b1d0d06f00d", + Cloud: "cloud-aws", + CloudRegion: "us-west-1", + CloudCredential: "aws#mary@external#default", + }, + ) c.Assert(err, jc.ErrorIsNil) - // Attempting to set the addresses of a dead machine will fail, so we - // include a dead machine to make sure the upgrade step can cope. - err = m4.SetProviderAddresses(network.NewAddress("8.8.8.8")) - c.Assert(err, jc.ErrorIsNil) - err = m4.EnsureDead() - c.Assert(err, jc.ErrorIsNil) + initialModel := initialModels[0] + delete(initialModel, "txn-queue") + delete(initialModel, "txn-revno") + initialModel["owner"] = "test-admin" - // Delete the preferred address fields. - for _, machine := range machines { - s.removePreferredAddressFields(c, machine) + expected := []bson.M{{ + "_id": "0000-dead-beaf-0001", + "owner": "user-admin", + "cloud": "cloud-aws", + "name": "controller", + "cloud-region": "us-west-1", + "cloud-credential": "aws#fred#default", + "controller-uuid": "deadbeef-1bad-500d-9000-4b1d0d06f00d", + "life": 0, + "migration-mode": "", + }, { + "_id": "0000-dead-beaf-0002", + "owner": "user-mary@external", + "cloud": "cloud-aws", + "name": "default", + "cloud-region": "us-west-1", + "cloud-credential": "aws#mary@external#default", + "controller-uuid": "deadbeef-1bad-500d-9000-4b1d0d06f00d", + "life": 0, + "migration-mode": "", + }, + initialModel, } - return machines -} - -func (s *upgradesSuite) TestAddPreferredAddressesToMachines(c *gc.C) { - machines := s.createMachinesWithAddresses(c) - m1 := machines[0] - m2 := machines[1] - m3 := machines[2] - - err := AddPreferredAddressesToMachines(s.state) - c.Assert(err, jc.ErrorIsNil) - assertMachineAddresses(c, m1, "8.8.8.8", "8.8.8.8") - assertMachineAddresses(c, m2, "8.8.4.4", "10.0.0.2") - assertMachineAddresses(c, m3, "", "") + s.assertStrippedUserData(c, coll, expected) } -func (s *upgradesSuite) TestAddPreferredAddressesToMachinesIdempotent(c *gc.C) { - machines := s.createMachinesWithAddresses(c) - m1 := machines[0] - m2 := machines[1] - m3 := machines[2] +func (s *upgradesSuite) TestStripLocalUserDomainModelNames(c *gc.C) { + coll, closer := s.state.getRawCollection(usermodelnameC) + defer closer() - err := AddPreferredAddressesToMachines(s.state) + err := coll.Insert( + bson.M{"_id": "fred@local:test"}, + bson.M{"_id": "mary@external:test2"}, + ) c.Assert(err, jc.ErrorIsNil) - assertMachineAddresses(c, m1, "8.8.8.8", "8.8.8.8") - assertMachineAddresses(c, m2, "8.8.4.4", "10.0.0.2") - assertMachineAddresses(c, m3, "", "") - - err = AddPreferredAddressesToMachines(s.state) - c.Assert(err, jc.ErrorIsNil) + expected := []bson.M{{ + "_id": "fred:test", + }, { + "_id": "mary@external:test2", + }, { + "_id": "test-admin:testenv", + }} - assertMachineAddresses(c, m1, "8.8.8.8", "8.8.8.8") - assertMachineAddresses(c, m2, "8.8.4.4", "10.0.0.2") - assertMachineAddresses(c, m3, "", "") + s.assertStrippedUserData(c, coll, expected) } -func (s *upgradesSuite) TestAddPreferredAddressesToMachinesUpdatesExistingFields(c *gc.C) { - machines := s.createMachinesWithAddresses(c) - m1 := machines[0] - m2 := machines[1] - m3 := machines[2] - s.setPreferredAddressFields(c, m1, "1.1.2.2") - s.setPreferredAddressFields(c, m2, "1.1.2.2") - s.setPreferredAddressFields(c, m3, "1.1.2.2") - - assertMachineInitial := func(m *Machine) { - err := m.Refresh() - c.Assert(err, jc.ErrorIsNil) - addr, err := m.PublicAddress() - c.Assert(err, jc.ErrorIsNil) - c.Assert(addr.Value, gc.Equals, "1.1.2.2") - addr, err = m.PrivateAddress() - c.Assert(err, jc.ErrorIsNil) - c.Assert(addr.Value, gc.Equals, "1.1.2.2") - } - assertMachineInitial(m1) - assertMachineInitial(m2) - assertMachineInitial(m3) - - err := AddPreferredAddressesToMachines(s.state) - c.Assert(err, jc.ErrorIsNil) - - assertMachineAddresses(c, m1, "8.8.8.8", "8.8.8.8") - assertMachineAddresses(c, m2, "8.8.4.4", "10.0.0.2") - assertMachineAddresses(c, m3, "", "") +func (s *upgradesSuite) TestStripLocalUserDomainControllerUser(c *gc.C) { + s.assertStripLocalUserDomainUserAccess(c, controllerUsersC) } -func (s *upgradesSuite) readDocIDs(c *gc.C, coll, regex string) []string { - settings, closer := s.state.getRawCollection(coll) - defer closer() - var docs []bson.M - err := settings.Find(bson.D{{"_id", bson.D{{"$regex", regex}}}}).All(&docs) - c.Assert(err, jc.ErrorIsNil) - var actualDocIDs []string - for _, doc := range docs { - actualDocIDs = append(actualDocIDs, doc["_id"].(string)) - } - return actualDocIDs +func (s *upgradesSuite) TestStripLocalUserDomainModelUser(c *gc.C) { + s.assertStripLocalUserDomainUserAccess(c, modelUsersC) } -func (s *upgradesSuite) getDocMap(c *gc.C, docID, collection string) (map[string]interface{}, error) { - docMap := map[string]interface{}{} - coll, closer := s.state.getRawCollection(collection) +func (s *upgradesSuite) assertStripLocalUserDomainUserAccess(c *gc.C, collName string) { + coll, closer := s.state.getRawCollection(collName) defer closer() - err := coll.Find(bson.D{{"_id", docID}}).One(&docMap) - return docMap, err -} - -func unsetField(st *State, id, collection, field string) error { - return st.runTransaction( - []txn.Op{{ - C: collection, - Id: id, - Update: bson.D{{"$unset", bson.D{{field, nil}}}}, - }, - }) -} - -func setupMachineBoundStorageTests(c *gc.C, st *State) (*Machine, Volume, Filesystem, func() error) { - // Make an unprovisioned machine with storage for tests to use. - // TODO(axw) extend testing/factory to allow creating unprovisioned - // machines. - m, err := st.AddOneMachine(MachineTemplate{ - Series: "quantal", - Jobs: []MachineJob{JobHostUnits}, - Volumes: []MachineVolumeParams{ - {Volume: VolumeParams{Pool: "loop", Size: 2048}}, - }, - Filesystems: []MachineFilesystemParams{ - {Filesystem: FilesystemParams{Pool: "rootfs", Size: 2048}}, - }, - }) - c.Assert(err, jc.ErrorIsNil) - va, err := m.VolumeAttachments() - c.Assert(err, jc.ErrorIsNil) - c.Assert(va, gc.HasLen, 1) - v, err := st.Volume(va[0].Volume()) - c.Assert(err, jc.ErrorIsNil) - - fa, err := st.MachineFilesystemAttachments(m.MachineTag()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(fa, gc.HasLen, 1) - f, err := st.Filesystem(fa[0].Filesystem()) + var initialUsers []bson.M + err := coll.Find(nil).Sort("_id").All(&initialUsers) c.Assert(err, jc.ErrorIsNil) - - return m, v, f, m.Destroy -} - -func (s *upgradesSuite) TestAddFilesystemStatus(c *gc.C) { - _, _, filesystem, cleanup := setupMachineBoundStorageTests(c, s.state) - defer cleanup() - - removeStatusDoc(c, s.state, filesystem) - _, err := filesystem.Status() - c.Assert(err, jc.Satisfies, errors.IsNotFound) - s.assertAddFilesystemStatus(c, filesystem, status.StatusPending) -} - -func (s *upgradesSuite) TestAddFilesystemStatusDoesNotOverwrite(c *gc.C) { - _, _, filesystem, cleanup := setupMachineBoundStorageTests(c, s.state) - defer cleanup() + c.Assert(initialUsers, gc.HasLen, 1) now := time.Now() - sInfo := status.StatusInfo{ - Status: status.StatusDestroying, - Message: "", - Since: &now, - } - err := filesystem.SetStatus(sInfo) + err = coll.Insert( + userAccessDoc{ + ID: "zfred@local", + ObjectUUID: "uuid1", + UserName: "fred@local", + DisplayName: "Fred", + CreatedBy: "admin@local", + DateCreated: now, + }, + userAccessDoc{ + ID: "zmary@external", + ObjectUUID: "uuid2", + UserName: "mary@external", + DisplayName: "Mary", + CreatedBy: "admin@local", + DateCreated: now, + }, + ) c.Assert(err, jc.ErrorIsNil) - s.assertAddFilesystemStatus(c, filesystem, status.StatusDestroying) -} -func (s *upgradesSuite) TestAddFilesystemStatusProvisioned(c *gc.C) { - _, _, filesystem, cleanup := setupMachineBoundStorageTests(c, s.state) - defer cleanup() - - err := s.state.SetFilesystemInfo(filesystem.FilesystemTag(), FilesystemInfo{ - FilesystemId: "fs", - }) - c.Assert(err, jc.ErrorIsNil) - removeStatusDoc(c, s.state, filesystem) - s.assertAddFilesystemStatus(c, filesystem, status.StatusAttaching) + initialUser := initialUsers[0] + delete(initialUser, "txn-queue") + delete(initialUser, "txn-revno") + initialCreated := initialUser["datecreated"].(time.Time) + initialUser["datecreated"] = initialCreated.Truncate(time.Millisecond) + + roundedNow := now.Truncate(time.Millisecond) + expected := []bson.M{ + initialUser, + { + "_id": "zfred", + "object-uuid": "uuid1", + "user": "fred", + "displayname": "Fred", + "createdby": "admin", + "datecreated": roundedNow, + }, { + "_id": "zmary@external", + "object-uuid": "uuid2", + "user": "mary@external", + "displayname": "Mary", + "createdby": "admin", + "datecreated": roundedNow, + }, + } + s.assertStrippedUserData(c, coll, expected) } -func (s *upgradesSuite) TestAddFilesystemStatusAttached(c *gc.C) { - machine, _, filesystem, cleanup := setupMachineBoundStorageTests(c, s.state) - defer cleanup() - - err := machine.SetProvisioned("fake", "fake", nil) - c.Assert(err, jc.ErrorIsNil) +func (s *upgradesSuite) TestStripLocalUserDomainPermissions(c *gc.C) { + coll, closer := s.state.getRawCollection(permissionsC) + defer closer() - err = s.state.SetFilesystemInfo(filesystem.FilesystemTag(), FilesystemInfo{ - FilesystemId: "fs", - }) + var initialPermissions []bson.M + err := coll.Find(nil).Sort("_id").All(&initialPermissions) c.Assert(err, jc.ErrorIsNil) + c.Assert(initialPermissions, gc.HasLen, 2) - err = s.state.SetFilesystemAttachmentInfo( - machine.MachineTag(), - filesystem.FilesystemTag(), - FilesystemAttachmentInfo{}, + err = coll.Insert( + permissionDoc{ + ID: "uuid#fred@local", + ObjectGlobalKey: "c#uuid", + SubjectGlobalKey: "fred@local", + Access: "addmodel", + }, + permissionDoc{ + ID: "uuid#mary@external", + ObjectGlobalKey: "c#uuid", + SubjectGlobalKey: "mary@external", + Access: "addmodel", + }, ) c.Assert(err, jc.ErrorIsNil) - removeStatusDoc(c, s.state, filesystem) - s.assertAddFilesystemStatus(c, filesystem, status.StatusAttached) -} - -func (s *upgradesSuite) assertAddFilesystemStatus(c *gc.C, filesystem Filesystem, expect status.Status) { - err := AddFilesystemStatus(s.state) - c.Assert(err, jc.ErrorIsNil) - - info, err := filesystem.Status() - c.Assert(err, jc.ErrorIsNil) - c.Assert(info.Status, gc.Equals, expect) -} + for i, inital := range initialPermissions { + perm := inital + delete(perm, "txn-queue") + delete(perm, "txn-revno") + initialPermissions[i] = perm + } -func removeStatusDoc(c *gc.C, st *State, g GlobalEntity) { - op := removeStatusOp(st, g.globalKey()) - err := st.runTransaction([]txn.Op{op}) - c.Assert(err, jc.ErrorIsNil) + expected := []bson.M{initialPermissions[0], initialPermissions[1], { + "_id": "uuid#fred", + "object-global-key": "c#uuid", + "subject-global-key": "fred", + "access": "addmodel", + }, { + "_id": "uuid#mary@external", + "object-global-key": "c#uuid", + "subject-global-key": "mary@external", + "access": "addmodel", + }} + s.assertStrippedUserData(c, coll, expected) } -func (s *upgradesSuite) TestMigrateSettingsSchema(c *gc.C) { - // Insert test documents. - settingsColl, closer := s.state.getRawCollection(settingsC) +func (s *upgradesSuite) TestStripLocalUserDomainLastConnection(c *gc.C) { + coll, closer := s.state.getRawCollection(modelUserLastConnectionC) defer closer() - err := settingsColl.Insert( - bson.D{ - // Post-model-uuid migration, with no settings. - {"_id", "1"}, - {"model-uuid", "model-uuid"}, - {"txn-revno", int64(99)}, - {"txn-queue", []string{}}, - }, - bson.D{ - // Post-model-uuid migration, with settings. One - // of the settings is called "settings", and - // one "version". - {"_id", "2"}, - {"model-uuid", "model-uuid"}, - {"txn-revno", int64(99)}, - {"txn-queue", []string{}}, - {"settings", int64(123)}, - {"version", "onetwothree"}, - }, - bson.D{ - // Pre-model-uuid migration, with no settings. - {"_id", "3"}, - {"txn-revno", int64(99)}, - {"txn-queue", []string{}}, - }, - bson.D{ - // Pre-model-uuid migration, with settings. - {"_id", "4"}, - {"txn-revno", int64(99)}, - {"txn-queue", []string{}}, - {"settings", int64(123)}, - {"version", "onetwothree"}, - }, - bson.D{ - // Already migrated, with no settings. - {"_id", "5"}, - {"model-uuid", "model-uuid"}, - {"txn-revno", int64(99)}, - {"txn-queue", []string{}}, - {"version", int64(98)}, - {"settings", map[string]interface{}{}}, - }, - bson.D{ - // Already migrated, with settings. - {"_id", "6"}, - {"model-uuid", "model-uuid"}, - {"txn-revno", int64(99)}, - {"txn-queue", []string{}}, - {"version", int64(98)}, - {"settings", bson.D{ - {"settings", int64(123)}, - {"version", "onetwothree"}, - }}, + + now := time.Now() + err := coll.Insert( + modelUserLastConnectionDoc{ + ID: "fred@local", + ModelUUID: "uuid", + UserName: "fred@local", + LastConnection: now, + }, + modelUserLastConnectionDoc{ + ID: "mary@external", + ModelUUID: "uuid", + UserName: "mary@external", + LastConnection: now, }, ) c.Assert(err, jc.ErrorIsNil) - // Expected docs, excluding txn-queu which we cannot predict. + roundedNow := now.Truncate(time.Millisecond) expected := []bson.M{{ - "_id": "1", - "model-uuid": "model-uuid", - "txn-revno": int64(100), - "settings": bson.M{}, - "version": int64(99), - }, { - "_id": "2", - "model-uuid": "model-uuid", - "txn-revno": int64(101), - "settings": bson.M{ - "settings": int64(123), - "version": "onetwothree", - }, - "version": int64(99), - }, { - "_id": "3", - "txn-revno": int64(100), - "settings": bson.M{}, - "version": int64(99), + "_id": "fred", + "model-uuid": "uuid", + "user": "fred", + "last-connection": roundedNow, }, { - "_id": "4", - "txn-revno": int64(101), - "settings": bson.M{ - "settings": int64(123), - "version": "onetwothree", - }, - "version": int64(99), - }, { - "_id": "5", - "model-uuid": "model-uuid", - "txn-revno": int64(99), - "version": int64(98), - "settings": bson.M{}, - }, { - "_id": "6", - "model-uuid": "model-uuid", - "txn-revno": int64(99), - "version": int64(98), - "settings": bson.M{ - "settings": int64(123), - "version": "onetwothree", - }, + "_id": "mary@external", + "model-uuid": "uuid", + "user": "mary@external", + "last-connection": roundedNow, }} + s.assertStrippedUserData(c, coll, expected) +} + +func (s *upgradesSuite) assertStrippedUserData(c *gc.C, coll *mgo.Collection, expected []bson.M) { + s.assertUpgradedData(c, StripLocalUserDomain, coll, expected) +} +func (s *upgradesSuite) assertUpgradedData(c *gc.C, upgrade func(*State) error, coll *mgo.Collection, expected []bson.M) { // Two rounds to check idempotency. for i := 0; i < 2; i++ { - err = MigrateSettingsSchema(s.state) + err := upgrade(s.state) c.Assert(err, jc.ErrorIsNil) var docs []bson.M - err = settingsColl.Find( - bson.D{{"model-uuid", bson.D{{"$ne", s.state.ModelUUID()}}}}, - ).Sort("_id").Select(bson.M{"txn-queue": 0}).All(&docs) + err = coll.Find(nil).Sort("_id").All(&docs) c.Assert(err, jc.ErrorIsNil) + for i, d := range docs { + doc := d + delete(doc, "txn-queue") + delete(doc, "txn-revno") + docs[i] = doc + } c.Assert(docs, jc.DeepEquals, expected) } } -func (s *upgradesSuite) setupAddDefaultEndpointBindingsToServices(c *gc.C) []*Application { - // Add an owner user. - stateOwner, err := s.state.AddUser("bob", "notused", "notused", "bob") - c.Assert(err, jc.ErrorIsNil) - ownerTag := stateOwner.UserTag() - _, err = s.state.AddModelUser(UserAccessSpec{ - User: ownerTag, - CreatedBy: ownerTag, - DisplayName: "", - Access: description.ReadAccess, - }) - c.Assert(err, jc.ErrorIsNil) - - // Add a couple of test spaces - _, err = s.state.AddSpace("db", "", nil, false) - c.Assert(err, jc.ErrorIsNil) - _, err = s.state.AddSpace("apps", "", nil, true) - c.Assert(err, jc.ErrorIsNil) - - // Add some testing charms for the services. - charms := []*Charm{ - AddTestingCharm(c, s.state, "wordpress"), - AddTestingCharm(c, s.state, "mysql"), - } - - // Add a few services using the charms above: with no bindings, with just - // defaults, and with explicitly given bindings. For the first case we need - // to manually remove the added default bindings. - wpBindings := map[string]string{ - "db": "db", - "url": "apps", - } - msBindings := map[string]string{ - "server": "db", - } - services := []*Application{ - AddTestingService(c, s.state, "wp-no-bindings", charms[0]), - AddTestingService(c, s.state, "ms-no-bindings", charms[1]), - - AddTestingService(c, s.state, "wp-default-bindings", charms[0]), - AddTestingService(c, s.state, "ms-default-bindings", charms[1]), - - AddTestingServiceWithBindings(c, s.state, "wp-given-bindings", charms[0], wpBindings), - AddTestingServiceWithBindings(c, s.state, "ms-given-bindings", charms[1], msBindings), - } +func (s *upgradesSuite) TestRenameAddModelPermission(c *gc.C) { + coll, closer := s.state.getRawCollection(permissionsC) + defer closer() - // Drop the added endpoint bindings doc directly for the first two services. - ops := []txn.Op{ - removeEndpointBindingsOp(services[0].globalKey()), - removeEndpointBindingsOp(services[1].globalKey()), - } - err = s.state.runTransaction(ops) + var initialPermissions []bson.M + err := coll.Find(nil).Sort("_id").All(&initialPermissions) c.Assert(err, jc.ErrorIsNil) + c.Assert(initialPermissions, gc.HasLen, 2) - return services -} - -func (s *upgradesSuite) getServicesBindings(c *gc.C, services []*Application) map[string]map[string]string { - currentBindings := make(map[string]map[string]string, len(services)) - for i := range services { - applicationname := services[i].Name() - serviceBindings, err := services[i].EndpointBindings() - if err != nil { - c.Fatalf("unexpected error getting service %q bindings: %v", applicationname, err) - } - currentBindings[applicationname] = serviceBindings - } - return currentBindings -} - -func (s *upgradesSuite) testAddDefaultEndpointBindingsToServices(c *gc.C, runTwice bool) { - services := s.setupAddDefaultEndpointBindingsToServices(c) - initialBindings := s.getServicesBindings(c, services) - wpAllDefaults := map[string]string{ - // relation names - "url": "", - "logging-dir": "", - "monitoring-port": "", - "db": "", - "cache": "", - // extra-bindings - "db-client": "", - "admin-api": "", - "foo-bar": "", - } - msAllDefaults := map[string]string{ - "server": "", - } - expectedInitialAndFinal := map[string]map[string]string{ - "wp-no-bindings": wpAllDefaults, - "wp-default-bindings": wpAllDefaults, - "wp-given-bindings": map[string]string{ - "url": "apps", - "logging-dir": "", - "monitoring-port": "", - "db": "db", - "cache": "", - "db-client": "", - "admin-api": "", - "foo-bar": "", - }, - - "ms-no-bindings": msAllDefaults, - "ms-default-bindings": msAllDefaults, - "ms-given-bindings": map[string]string{ - "server": "db", + err = coll.Insert( + permissionDoc{ + ID: "uuid#fred", + ObjectGlobalKey: "c#uuid", + SubjectGlobalKey: "fred", + Access: "superuser", + }, + permissionDoc{ + ID: "uuid#mary@external", + ObjectGlobalKey: "c#uuid", + SubjectGlobalKey: "mary@external", + Access: "addmodel", }, - } - c.Assert(initialBindings, jc.DeepEquals, expectedInitialAndFinal) - - assertFinalBindings := func() { - finalBindings := s.getServicesBindings(c, services) - c.Assert(finalBindings, jc.DeepEquals, expectedInitialAndFinal) - } - err := AddDefaultEndpointBindingsToServices(s.state) + ) c.Assert(err, jc.ErrorIsNil) - assertFinalBindings() - if runTwice { - err = AddDefaultEndpointBindingsToServices(s.state) - c.Assert(err, jc.ErrorIsNil, gc.Commentf("idempotency check failed!")) - assertFinalBindings() + for i, inital := range initialPermissions { + perm := inital + delete(perm, "txn-queue") + delete(perm, "txn-revno") + initialPermissions[i] = perm } -} -func (s *upgradesSuite) TestAddDefaultEndpointBindingsToServices(c *gc.C) { - s.testAddDefaultEndpointBindingsToServices(c, false) -} - -func (s *upgradesSuite) TestAddDefaultEndpointBindingsToServicesIdempotent(c *gc.C) { - s.testAddDefaultEndpointBindingsToServices(c, true) + expected := []bson.M{initialPermissions[0], initialPermissions[1], { + "_id": "uuid#fred", + "object-global-key": "c#uuid", + "subject-global-key": "fred", + "access": "superuser", + }, { + "_id": "uuid#mary@external", + "object-global-key": "c#uuid", + "subject-global-key": "mary@external", + "access": "add-model", + }} + s.assertUpgradedData(c, RenameAddModelPermission, coll, expected) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/upgrade_test.go juju-core-2.0.0/src/github.com/juju/juju/state/upgrade_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/upgrade_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/upgrade_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "fmt" - "time" + "time" // Only used for time types. "github.com/juju/errors" jc "github.com/juju/testing/checkers" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/useraccess.go juju-core-2.0.0/src/github.com/juju/juju/state/useraccess.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/useraccess.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/useraccess.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,11 +8,11 @@ "strings" "time" + "github.com/juju/errors" "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2/txn" - "github.com/juju/errors" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" ) type userAccessDoc struct { @@ -30,31 +30,41 @@ User names.UserTag CreatedBy names.UserTag DisplayName string - Access description.Access + Access permission.Access } -// AddModelUser adds a new user for the current model to the database. -func (st *State) AddModelUser(spec UserAccessSpec) (description.UserAccess, error) { - if err := description.ValidateModelAccess(spec.Access); err != nil { - return description.UserAccess{}, errors.Annotate(err, "adding model user") +// userAccessTarget defines the target of a user access granting. +type userAccessTarget struct { + uuid string + globalKey string +} + +// AddModelUser adds a new user for the model identified by modelUUID to the database. +func (st *State) AddModelUser(modelUUID string, spec UserAccessSpec) (permission.UserAccess, error) { + if err := permission.ValidateModelAccess(spec.Access); err != nil { + return permission.UserAccess{}, errors.Annotate(err, "adding model user") + } + target := userAccessTarget{ + uuid: modelUUID, + globalKey: modelGlobalKey, } - return st.addUserAccess(spec, modelGlobalKey) + return st.addUserAccess(spec, target) } // AddControllerUser adds a new user for the curent controller to the database. -func (st *State) AddControllerUser(spec UserAccessSpec) (description.UserAccess, error) { - if err := description.ValidateControllerAccess(spec.Access); err != nil { - return description.UserAccess{}, errors.Annotate(err, "adding controller user") +func (st *State) AddControllerUser(spec UserAccessSpec) (permission.UserAccess, error) { + if err := permission.ValidateControllerAccess(spec.Access); err != nil { + return permission.UserAccess{}, errors.Annotate(err, "adding controller user") } - return st.addUserAccess(spec, controllerGlobalKey) + return st.addUserAccess(spec, userAccessTarget{globalKey: controllerGlobalKey}) } -func (st *State) addUserAccess(spec UserAccessSpec, targetGlobalKey string) (description.UserAccess, error) { +func (st *State) addUserAccess(spec UserAccessSpec, target userAccessTarget) (permission.UserAccess, error) { // Ensure local user exists in state before adding them as an model user. if spec.User.IsLocal() { localUser, err := st.User(spec.User) if err != nil { - return description.UserAccess{}, errors.Annotate(err, fmt.Sprintf("user %q does not exist locally", spec.User.Name())) + return permission.UserAccess{}, errors.Annotate(err, fmt.Sprintf("user %q does not exist locally", spec.User.Name())) } if spec.DisplayName == "" { spec.DisplayName = localUser.DisplayName() @@ -64,7 +74,7 @@ // Ensure local createdBy user exists. if spec.CreatedBy.IsLocal() { if _, err := st.User(spec.CreatedBy); err != nil { - return description.UserAccess{}, errors.Annotatef(err, "createdBy user %q does not exist locally", spec.CreatedBy.Name()) + return permission.UserAccess{}, errors.Annotatef(err, "createdBy user %q does not exist locally", spec.CreatedBy.Name()) } } var ( @@ -72,66 +82,66 @@ err error targetTag names.Tag ) - switch targetGlobalKey { + switch target.globalKey { case modelGlobalKey: ops = createModelUserOps( - st.ModelUUID(), + target.uuid, spec.User, spec.CreatedBy, spec.DisplayName, - nowToTheSecond(), + st.NowToTheSecond(), spec.Access) - targetTag = st.ModelTag() + targetTag = names.NewModelTag(target.uuid) case controllerGlobalKey: ops = createControllerUserOps( st.ControllerUUID(), spec.User, spec.CreatedBy, spec.DisplayName, - nowToTheSecond(), + st.NowToTheSecond(), spec.Access) - targetTag = names.NewControllerTag(st.ControllerUUID()) + targetTag = st.controllerTag default: - return description.UserAccess{}, errors.NotSupportedf("user access global key %q", targetGlobalKey) + return permission.UserAccess{}, errors.NotSupportedf("user access global key %q", target.globalKey) } - err = st.runTransaction(ops) + err = st.runTransactionFor(target.uuid, ops) if err == txn.ErrAborted { - err = errors.AlreadyExistsf("user access %q", spec.User.Canonical()) + err = errors.AlreadyExistsf("user access %q", spec.User.Id()) } if err != nil { - return description.UserAccess{}, errors.Trace(err) + return permission.UserAccess{}, errors.Trace(err) } return st.UserAccess(spec.User, targetTag) } // userAccessID returns the document id of the user access. func userAccessID(user names.UserTag) string { - username := user.Canonical() + username := user.Id() return strings.ToLower(username) } -// NewModelUserAccess returns a new description.UserAccess for the given userDoc and +// NewModelUserAccess returns a new permission.UserAccess for the given userDoc and // current Model. -func NewModelUserAccess(st *State, userDoc userAccessDoc) (description.UserAccess, error) { - perm, err := st.userPermission(modelGlobalKey, userGlobalKey(strings.ToLower(userDoc.UserName))) +func NewModelUserAccess(st *State, userDoc userAccessDoc) (permission.UserAccess, error) { + perm, err := st.userPermission(modelKey(userDoc.ObjectUUID), userGlobalKey(strings.ToLower(userDoc.UserName))) if err != nil { - return description.UserAccess{}, errors.Annotate(err, "obtaining model permission") + return permission.UserAccess{}, errors.Annotate(err, "obtaining model permission") } return newUserAccess(perm, userDoc, names.NewModelTag(userDoc.ObjectUUID)), nil } -// NewControllerUserAccess returns a new description.UserAccess for the given userDoc and +// NewControllerUserAccess returns a new permission.UserAccess for the given userDoc and // current Controller. -func NewControllerUserAccess(st *State, userDoc userAccessDoc) (description.UserAccess, error) { - perm, err := st.userPermission(controllerGlobalKey, userGlobalKey(strings.ToLower(userDoc.UserName))) +func NewControllerUserAccess(st *State, userDoc userAccessDoc) (permission.UserAccess, error) { + perm, err := st.controllerUserPermission(controllerKey(st.ControllerUUID()), userGlobalKey(strings.ToLower(userDoc.UserName))) if err != nil { - return description.UserAccess{}, errors.Annotate(err, "obtaining controller permission") + return permission.UserAccess{}, errors.Annotate(err, "obtaining controller permission") } return newUserAccess(perm, userDoc, names.NewControllerTag(userDoc.ObjectUUID)), nil } -func newUserAccess(perm *permission, userDoc userAccessDoc, object names.Tag) description.UserAccess { - return description.UserAccess{ +func newUserAccess(perm *userPermission, userDoc userAccessDoc, object names.Tag) permission.UserAccess { + return permission.UserAccess{ UserID: userDoc.ID, UserTag: names.NewUserTag(userDoc.UserName), Object: object, @@ -143,15 +153,22 @@ } } -// UserAccess returns a new description.UserAccess for the passed subject and target. -func (st *State) UserAccess(subject names.UserTag, target names.Tag) (description.UserAccess, error) { +// UserAccess returns a new permission.UserAccess for the passed subject and target. +func (st *State) UserAccess(subject names.UserTag, target names.Tag) (permission.UserAccess, error) { + if subject.IsLocal() { + _, err := st.User(subject) + if err != nil { + return permission.UserAccess{}, errors.Trace(err) + } + } + var ( userDoc userAccessDoc err error ) switch target.Kind() { case names.ModelTagKind: - userDoc, err = st.modelUser(subject) + userDoc, err = st.modelUser(target.Id(), subject) if err == nil { return NewModelUserAccess(st, userDoc) } @@ -161,27 +178,27 @@ return NewControllerUserAccess(st, userDoc) } default: - return description.UserAccess{}, errors.NotValidf("%q as a target", target.Kind()) + return permission.UserAccess{}, errors.NotValidf("%q as a target", target.Kind()) } - return description.UserAccess{}, errors.Trace(err) + return permission.UserAccess{}, errors.Trace(err) } // SetUserAccess sets level on to . -func (st *State) SetUserAccess(subject names.UserTag, target names.Tag, access description.Access) (description.UserAccess, error) { +func (st *State) SetUserAccess(subject names.UserTag, target names.Tag, access permission.Access) (permission.UserAccess, error) { err := access.Validate() if err != nil { - return description.UserAccess{}, errors.Trace(err) + return permission.UserAccess{}, errors.Trace(err) } switch target.Kind() { case names.ModelTagKind: - err = st.setModelAccess(access, userGlobalKey(userAccessID(subject))) + err = st.setModelAccess(access, userGlobalKey(userAccessID(subject)), target.Id()) case names.ControllerTagKind: err = st.setControllerAccess(access, userGlobalKey(userAccessID(subject))) default: - return description.UserAccess{}, errors.NotValidf("%q as a target", target.Kind()) + return permission.UserAccess{}, errors.NotValidf("%q as a target", target.Kind()) } if err != nil { - return description.UserAccess{}, errors.Trace(err) + return permission.UserAccess{}, errors.Trace(err) } return st.UserAccess(subject, target) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/user.go juju-core-2.0.0/src/github.com/juju/juju/state/user.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/user.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/user.go 2016-10-13 14:31:49.000000000 +0000 @@ -2,7 +2,7 @@ // Licensed under the AGPLv3, see LICENCE file for details. // NOTE: the users that are being stored in the database here are only -// the local users, like "admin" or "bob" (@local). In the world +// the local users, like "admin" or "bob". In the world // where we have external user providers hooked up, there are no records // in the database for users that are authenticated elsewhere. @@ -21,6 +21,8 @@ "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" + + "github.com/juju/juju/permission" ) const userGlobalKeyPrefix = "us" @@ -70,7 +72,7 @@ } nameToLower := strings.ToLower(name) - dateCreated := nowToTheSecond() + dateCreated := st.NowToTheSecond() user := &User{ st: st, doc: userDoc{ @@ -144,13 +146,11 @@ }} return ops, nil } - return st.run(buildTxn) } -func createInitialUserOps(controllerUUID string, user names.UserTag, password, salt string) []txn.Op { +func createInitialUserOps(controllerUUID string, user names.UserTag, password, salt string, dateCreated time.Time) []txn.Op { nameToLower := strings.ToLower(user.Name()) - dateCreated := nowToTheSecond() doc := userDoc{ DocID: nameToLower, Name: user.Name(), @@ -171,7 +171,8 @@ names.NewUserTag(user.Name()), user.Name(), dateCreated, - defaultControllerPermission) + // first user is controller admin. + permission.SuperuserAccess) ops = append(ops, controllerUserOps...) return ops @@ -198,7 +199,7 @@ // User returns the state User for the given name. func (st *State) User(tag names.UserTag) (*User, error) { if !tag.IsLocal() { - return nil, errors.NotFoundf("user %q", tag.Canonical()) + return nil, errors.NotFoundf("user %q", tag.Id()) } user := &User{st: st} if err := st.getUser(tag.Name(), &user.doc); err != nil { @@ -290,9 +291,9 @@ LastLogin time.Time `bson:"last-login"` } -// String returns "@local" where is the Name of the user. +// String returns "" where is the Name of the user. func (u *User) String() string { - return u.UserTag().Canonical() + return u.UserTag().Id() } // Name returns the User name. @@ -346,13 +347,13 @@ return lastLogin.LastLogin.UTC(), nil } -// nowToTheSecond returns the current time in UTC to the nearest second. -// We use this for a time source that is not more precise than we can -// handle. When serializing time in and out of mongo, we lose enough -// precision that it's misleading to store any more than precision to -// the second. -// TODO(fwereade): 2016-03-17 lp:1558657 -var nowToTheSecond = func() time.Time { return time.Now().Round(time.Second).UTC() } +// NowToTheSecond returns the current time in UTC to the nearest second. We use +// this for a time source that is not more precise than we can handle. When +// serializing time in and out of mongo, we lose enough precision that it's +// misleading to store any more than precision to the second. +func (st *State) NowToTheSecond() time.Time { + return st.clock.Now().Round(time.Second).UTC() +} // NeverLoggedInError is used to indicate that a user has never logged in. type NeverLoggedInError string @@ -388,7 +389,7 @@ lastLogin := userLastLoginDoc{ DocID: u.doc.DocID, ModelUUID: u.st.ModelUUID(), - LastLogin: nowToTheSecond(), + LastLogin: u.st.NowToTheSecond(), } _, err = lastLoginsW.UpsertId(lastLogin.DocID, lastLogin) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/user_internal_test.go juju-core-2.0.0/src/github.com/juju/juju/state/user_internal_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/user_internal_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/user_internal_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,8 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" + "github.com/juju/juju/testing" ) type internalUserSuite struct { @@ -20,7 +21,7 @@ func (s *internalUserSuite) TestCreateInitialUserOps(c *gc.C) { tag := names.NewUserTag("AdMiN") - ops := createInitialUserOps(s.state.ControllerUUID(), tag, "abc", "salt") + ops := createInitialUserOps(s.state.ControllerUUID(), tag, "abc", "salt", testing.ZeroTime()) c.Assert(ops, gc.HasLen, 3) op := ops[0] c.Assert(op.Id, gc.Equals, "admin") @@ -33,19 +34,19 @@ // controller user permissions op = ops[1] permdoc := op.Insert.(*permissionDoc) - c.Assert(permdoc.Access, gc.Equals, string(description.LoginAccess)) - c.Assert(permdoc.ID, gc.Equals, permissionID(controllerGlobalKey, userGlobalKey(strings.ToLower(tag.Canonical())))) - c.Assert(permdoc.SubjectGlobalKey, gc.Equals, userGlobalKey(strings.ToLower(tag.Canonical()))) - c.Assert(permdoc.ObjectGlobalKey, gc.Equals, controllerGlobalKey) + c.Assert(permdoc.Access, gc.Equals, string(permission.SuperuserAccess)) + c.Assert(permdoc.ID, gc.Equals, permissionID(controllerKey(s.state.ControllerUUID()), userGlobalKey(strings.ToLower(tag.Id())))) + c.Assert(permdoc.SubjectGlobalKey, gc.Equals, userGlobalKey(strings.ToLower(tag.Id()))) + c.Assert(permdoc.ObjectGlobalKey, gc.Equals, controllerKey(s.state.ControllerUUID())) // controller user op = ops[2] cudoc := op.Insert.(*userAccessDoc) - c.Assert(cudoc.ID, gc.Equals, "admin@local") + c.Assert(cudoc.ID, gc.Equals, "admin") c.Assert(cudoc.ObjectUUID, gc.Equals, s.state.ControllerUUID()) - c.Assert(cudoc.UserName, gc.Equals, "AdMiN@local") + c.Assert(cudoc.UserName, gc.Equals, "AdMiN") c.Assert(cudoc.DisplayName, gc.Equals, "AdMiN") - c.Assert(cudoc.CreatedBy, gc.Equals, "AdMiN@local") + c.Assert(cudoc.CreatedBy, gc.Equals, "AdMiN") } func (s *internalUserSuite) TestCaseNameVsId(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/userpermission.go juju-core-2.0.0/src/github.com/juju/juju/state/userpermission.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/userpermission.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/userpermission.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,12 +11,12 @@ "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" - "github.com/juju/juju/core/description" + "github.com/juju/juju/permission" ) // permission represents the permission a user has // on a given scope. -type permission struct { +type userPermission struct { doc permissionDoc } @@ -31,75 +31,85 @@ Access string `bson:"access"` } -func stringToAccess(a string) description.Access { - return description.Access(a) +func stringToAccess(a string) permission.Access { + return permission.Access(a) } -func accessToString(a description.Access) string { +func accessToString(a permission.Access) string { return string(a) } // userPermission returns a Permission for the given Subject and User. -func (st *State) userPermission(objectKey, subjectKey string) (*permission, error) { - userPermission := &permission{} +func (st *State) userPermission(objectGlobalKey, subjectGlobalKey string) (*userPermission, error) { + result := &userPermission{} permissions, closer := st.getCollection(permissionsC) defer closer() - id := permissionID(objectKey, subjectKey) - err := permissions.FindId(st.docID(id)).One(&userPermission.doc) + id := permissionID(objectGlobalKey, subjectGlobalKey) + err := permissions.FindId(id).One(&result.doc) if err == mgo.ErrNotFound { return nil, errors.NotFoundf("user permissions for user %q", id) } - return userPermission, nil - + return result, nil } -// globalUserPermission returns a Permission for the given Subject and User. -func (st *State) globalUserPermission(objectKey, subjectKey string) (*permission, error) { - userPermission := &permission{} - permissions, closer := st.getRawCollection(permissionsC) +// controllerUserPermission returns a Permission for the given Subject and User. +func (st *State) controllerUserPermission(objectGlobalKey, subjectGlobalKey string) (*userPermission, error) { + result := &userPermission{} + + permissions, closer := st.getCollection(permissionsC) defer closer() - id := permissionID(objectKey, subjectKey) - err := permissions.FindId(st.docID(id)).One(&userPermission.doc) + id := permissionID(objectGlobalKey, subjectGlobalKey) + err := permissions.FindId(id).One(&result.doc) if err == mgo.ErrNotFound { return nil, errors.NotFoundf("user permissions for user %q", id) } - return userPermission, nil - + return result, nil } // isReadOnly returns whether or not the user has write access or only // read access to the model. -func (p *permission) isReadOnly() bool { - return stringToAccess(p.doc.Access) == description.UndefinedAccess || stringToAccess(p.doc.Access) == description.ReadAccess +func (p *userPermission) isReadOnly() bool { + return stringToAccess(p.doc.Access) == permission.NoAccess || stringToAccess(p.doc.Access) == permission.ReadAccess } // isAdmin is a convenience method that -// returns whether or not the user has description.AdminAccess. -func (p *permission) isAdmin() bool { - return stringToAccess(p.doc.Access) == description.AdminAccess +// returns whether or not the user has permission.AdminAccess. +func (p *userPermission) isAdmin() bool { + return stringToAccess(p.doc.Access) == permission.AdminAccess } // isReadWrite is a convenience method that -// returns whether or not the user has description.WriteAccess. -func (p *permission) isReadWrite() bool { - return stringToAccess(p.doc.Access) == description.WriteAccess +// returns whether or not the user has permission.WriteAccess. +func (p *userPermission) isReadWrite() bool { + return stringToAccess(p.doc.Access) == permission.WriteAccess } -func (p *permission) access() description.Access { +func (p *userPermission) access() permission.Access { return stringToAccess(p.doc.Access) } -func permissionID(objectKey, subjectKey string) string { - // example: e#us#jim - // e: model global key (its always e). - // us: user key prefix. - // jim: an arbitrary username. - return fmt.Sprintf("%s#%s", objectKey, subjectKey) +func permissionID(objectGlobalKey, subjectGlobalKey string) string { + // example: e#:deadbeef#us#jim + // e: object global key + // deadbeef: object uuid + // us#jim: subject global key + // the first element (e in this example) is the global key for the object + // (model in this example) + // the second, is the : prefixed model uuid + // the third, in this example is a user with name jim, hence the globalKey + // ( a user global key) being us#jim. + // another example, now with controller and user maria: + // c#:deadbeef#us#maria + // c: object global key, in this case controller. + // :deadbeef controller uuid + // us#maria: its the user global key for maria. + // if this where for model, it would be e#us#maria + return fmt.Sprintf("%s#%s", objectGlobalKey, subjectGlobalKey) } -func updatePermissionOp(objectGlobalKey, subjectGlobalKey string, access description.Access) txn.Op { +func updatePermissionOp(objectGlobalKey, subjectGlobalKey string, access permission.Access) txn.Op { return txn.Op{ C: permissionsC, Id: permissionID(objectGlobalKey, subjectGlobalKey), @@ -117,7 +127,7 @@ } } -func createPermissionOp(objectGlobalKey, subjectGlobalKey string, access description.Access) txn.Op { +func createPermissionOp(objectGlobalKey, subjectGlobalKey string, access permission.Access) txn.Op { doc := &permissionDoc{ ID: permissionID(objectGlobalKey, subjectGlobalKey), SubjectGlobalKey: subjectGlobalKey, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/user_test.go juju-core-2.0.0/src/github.com/juju/juju/state/user_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/user_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/user_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,7 +14,9 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "github.com/juju/juju/permission" "github.com/juju/juju/state" + "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" ) @@ -47,7 +49,7 @@ password := "password" creator := "admin" - now := time.Now().Round(time.Second).UTC() + now := testing.NonZeroTime().Round(time.Second).UTC() user, err := s.State.AddUser(name, displayName, password, creator) c.Assert(err, jc.ErrorIsNil) @@ -88,11 +90,11 @@ func (s *UserSuite) TestString(c *gc.C) { user := s.Factory.MakeUser(c, &factory.UserParams{Name: "foo"}) - c.Assert(user.String(), gc.Equals, "foo@local") + c.Assert(user.String(), gc.Equals, "foo") } func (s *UserSuite) TestUpdateLastLogin(c *gc.C) { - now := time.Now().Round(time.Second).UTC() + now := testing.NonZeroTime().Round(time.Second).UTC() user := s.Factory.MakeUser(c, nil) err := user.UpdateLastLogin() c.Assert(err, jc.ErrorIsNil) @@ -226,6 +228,39 @@ c.Check(err, jc.Satisfies, errors.IsUserNotFound) } +func (s *UserSuite) TestRemoveUserRemovesUserAccess(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{Password: "so sekrit"}) + + // Assert user exists and can authenticate. + c.Assert(user.PasswordValid("so sekrit"), jc.IsTrue) + + s.State.SetUserAccess(user.UserTag(), s.State.ModelTag(), permission.AdminAccess) + s.State.SetUserAccess(user.UserTag(), s.State.ControllerTag(), permission.SuperuserAccess) + + uam, err := s.State.UserAccess(user.UserTag(), s.State.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(uam.Access, gc.Equals, permission.AdminAccess) + + uac, err := s.State.UserAccess(user.UserTag(), s.State.ControllerTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(uac.Access, gc.Equals, permission.SuperuserAccess) + + // Look for the user. + u, err := s.State.User(user.UserTag()) + c.Check(err, jc.ErrorIsNil) + c.Assert(u, jc.DeepEquals, user) + + // Remove the user. + err = s.State.RemoveUser(user.UserTag()) + c.Check(err, jc.ErrorIsNil) + + uam, err = s.State.UserAccess(user.UserTag(), s.State.ModelTag()) + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("%q user not found", user.UserTag().Name())) + + uac, err = s.State.UserAccess(user.UserTag(), s.State.ControllerTag()) + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("%q user not found", user.UserTag().Name())) +} + func (s *UserSuite) TestDisable(c *gc.C) { user := s.Factory.MakeUser(c, &factory.UserParams{Password: "a-password"}) c.Assert(user.IsDisabled(), jc.IsFalse) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/volume.go juju-core-2.0.0/src/github.com/juju/juju/state/volume.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/volume.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/volume.go 2016-10-13 14:31:49.000000000 +0000 @@ -673,7 +673,7 @@ Update: bson.D{{"$set", bson.D{{"life", Dead}}}}, }} } - cleanupOp := st.newCleanupOp(cleanupAttachmentsForDyingVolume, v.doc.Name) + cleanupOp := newCleanupOp(cleanupAttachmentsForDyingVolume, v.doc.Name) hasAttachments := bson.D{{"attachmentcount", bson.D{{"$gt", 0}}}} return []txn.Op{{ C: volumesC, @@ -747,9 +747,8 @@ return nil, names.VolumeTag{}, errors.Annotate(err, "cannot generate volume name") } status := statusDoc{ - Status: status.StatusPending, - // TODO(fwereade): 2016-03-17 lp:1558657 - Updated: time.Now().UnixNano(), + Status: status.Pending, + Updated: st.clock.Now().UnixNano(), } doc := volumeDoc{ Name: name, @@ -1040,12 +1039,12 @@ // SetVolumeStatus sets the status of the specified volume. func (st *State) SetVolumeStatus(tag names.VolumeTag, volumeStatus status.Status, info string, data map[string]interface{}, updated *time.Time) error { switch volumeStatus { - case status.StatusAttaching, status.StatusAttached, status.StatusDetaching, status.StatusDetached, status.StatusDestroying: - case status.StatusError: + case status.Attaching, status.Attached, status.Detaching, status.Detached, status.Destroying: + case status.Error: if info == "" { return errors.Errorf("cannot set status %q without info", volumeStatus) } - case status.StatusPending: + case status.Pending: // If a volume is not yet provisioned, we allow its status // to be set back to pending (when a retry is to occur). v, err := st.Volume(tag) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/watcher/helpers.go juju-core-2.0.0/src/github.com/juju/juju/state/watcher/helpers.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/watcher/helpers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/watcher/helpers.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" ) // Stopper is implemented by all watchers. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/watcher/helpers_test.go juju-core-2.0.0/src/github.com/juju/juju/state/watcher/helpers_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/watcher/helpers_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/watcher/helpers_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ stderrors "errors" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state/watcher" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/watcher/watcher.go juju-core-2.0.0/src/github.com/juju/juju/state/watcher/watcher.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/watcher/watcher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/watcher/watcher.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,7 +15,7 @@ "github.com/juju/loggo" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" ) var logger = loggo.GetLogger("juju.state.watcher") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/watcher/watcher_test.go juju-core-2.0.0/src/github.com/juju/juju/state/watcher/watcher_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/watcher/watcher_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/watcher/watcher_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,7 +12,7 @@ gc "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/txn" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state/watcher" "github.com/juju/juju/testing" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/watcher.go juju-core-2.0.0/src/github.com/juju/juju/state/watcher.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/watcher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/watcher.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,7 +16,7 @@ "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/instance" "github.com/juju/juju/mongo" @@ -2399,10 +2399,10 @@ } } -// WatchForModelMigration returns a notify watcher which reports when +// WatchForMigration returns a notify watcher which reports when // a migration is in progress for the model associated with the // State. -func (st *State) WatchForModelMigration() NotifyWatcher { +func (st *State) WatchForMigration() NotifyWatcher { return newMigrationActiveWatcher(st) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/workers/fixture_test.go juju-core-2.0.0/src/github.com/juju/juju/state/workers/fixture_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/workers/fixture_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/workers/fixture_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,11 +8,12 @@ "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/state/workers" - "github.com/juju/juju/testing" + jujutesting "github.com/juju/juju/testing" "github.com/juju/juju/worker" "github.com/juju/juju/worker/workertest" ) @@ -39,7 +40,7 @@ select { case worker := <-ch: return worker - case <-time.After(testing.LongWait): + case <-time.After(jujutesting.LongWait): c.Fatalf("expected worker never started") } panic("unreachable") // I hate doing this :-|. @@ -297,6 +298,9 @@ // types (as returned by the factory methods) and also wraps the // `expect` worker. func IsWorker(wrapped interface{}, expect worker.Worker) bool { + if w, ok := wrapped.(workers.DynamicLeaseManager); ok { + wrapped = w.Underlying() + } var actual worker.Worker switch wrapped := wrapped.(type) { case fakeLeaseWorker: @@ -336,13 +340,13 @@ // IsWorker, or until it times out. func WaitWorker(c *gc.C, getter func() interface{}, expect worker.Worker) { var delay time.Duration - timeout := time.After(testing.LongWait) + timeout := time.After(jujutesting.LongWait) for { select { case <-timeout: c.Fatalf("expected worker") case <-time.After(delay): - delay = testing.ShortWait + delay = jujutesting.ShortWait } if IsWorker(getter(), expect) { return @@ -353,7 +357,7 @@ // WaitAlarms waits until the supplied clock has sent count values on // its Alarms channel. func WaitAlarms(c *gc.C, clock *testing.Clock, count int) { - timeout := time.After(testing.LongWait) + timeout := time.After(jujutesting.LongWait) for i := 0; i < count; i++ { select { case <-timeout: diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/workers/restart.go juju-core-2.0.0/src/github.com/juju/juju/state/workers/restart.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/workers/restart.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/workers/restart.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,6 +11,7 @@ "github.com/juju/loggo" "github.com/juju/utils/clock" + "github.com/juju/juju/core/lease" "github.com/juju/juju/worker" "github.com/juju/juju/worker/catacomb" ) @@ -104,16 +105,12 @@ // LeadershipManager is part of the Workers interface. func (rw *RestartWorkers) LeadershipManager() LeaseManager { - rw.mu.Lock() - defer rw.mu.Unlock() - return rw.workers.leadershipWorker + return DynamicLeaseManager{&rw.mu, &rw.workers.leadershipWorker} } // SingularManager is part of the Workers interface. func (rw *RestartWorkers) SingularManager() LeaseManager { - rw.mu.Lock() - defer rw.mu.Unlock() - return rw.workers.singularWorker + return DynamicLeaseManager{&rw.mu, &rw.workers.singularWorker} } // Kill is part of the worker.Worker interface. @@ -305,3 +302,33 @@ r.current = r.next r.next = nil } + +// DynamicLeaseManager is a workers.LeaseManager that calls a given function +// to acquire a fresh LeaseManager for each method call. This enables us to +// hide the fact that workers returned from RestartManager may become stale. +type DynamicLeaseManager struct { + mu *sync.Mutex + w *LeaseWorker +} + +// Claim is part of the lease.Claimer interface. +func (d DynamicLeaseManager) Claim(leaseName, holderName string, duration time.Duration) error { + return d.Underlying().Claim(leaseName, holderName, duration) +} + +// WaitUntilExpired is part of the lease.Claimer interface. +func (d DynamicLeaseManager) WaitUntilExpired(leaseName string) error { + return d.Underlying().WaitUntilExpired(leaseName) +} + +// Token is part of the lease.Checker interface. +func (d DynamicLeaseManager) Token(leaseName, holderName string) lease.Token { + return d.Underlying().Token(leaseName, holderName) +} + +// Underlying returns the current underlying LeaseManager. +func (d DynamicLeaseManager) Underlying() LeaseManager { + d.mu.Lock() + defer d.mu.Unlock() + return *d.w +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/state/workers/restart_test.go juju-core-2.0.0/src/github.com/juju/juju/state/workers/restart_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/state/workers/restart_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/state/workers/restart_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -175,6 +175,7 @@ fix := BasicFixture() fix.LW_errors = []error{errors.New("oof"), nil} fix.RunRestart(c, func(ctx Context, rw *workers.RestartWorkers) { + origw := rw.LeadershipManager() w := NextWorker(c, ctx.LWs()) c.Assert(w, gc.NotNil) AssertWorker(c, rw.LeadershipManager(), w) @@ -187,6 +188,11 @@ c.Assert(w, gc.NotNil) WaitWorker(c, LM_getter(rw), w2) + // The new worker should underlie the originally + // acquired leadership manager, so that restarts + // do not require callers to acquire a new manager + AssertWorker(c, origw, w2) + workertest.CleanKill(c, rw) }) } @@ -195,6 +201,7 @@ fix := BasicFixture() fix.SW_errors = []error{errors.New("oof"), nil} fix.RunRestart(c, func(ctx Context, rw *workers.RestartWorkers) { + origw := rw.SingularManager() w := NextWorker(c, ctx.SWs()) c.Assert(w, gc.NotNil) AssertWorker(c, rw.SingularManager(), w) @@ -207,6 +214,11 @@ c.Assert(w, gc.NotNil) WaitWorker(c, SM_getter(rw), w2) + // The new worker should underlie the originally + // acquired singular manager, so that restarts + // do not require callers to acquire a new manager + AssertWorker(c, origw, w2) + workertest.CleanKill(c, rw) }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/status/status.go juju-core-2.0.0/src/github.com/juju/juju/status/status.go --- juju-core-2.0~beta15/src/github.com/juju/juju/status/status.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/status/status.go 2016-10-13 14:31:49.000000000 +0000 @@ -46,173 +46,168 @@ const ( // Status values common to machine and unit agents. - // StatusError means the entity requires human intervention + // Error means the entity requires human intervention // in order to operate correctly. - StatusError Status = "error" + Error Status = "error" - // StatusStarted is set when: + // Started is set when: // The entity is actively participating in the model. // For unit agents, this is a state we preserve for backwards // compatibility with scripts during the life of Juju 1.x. // In Juju 2.x, the agent-state will remain “active†and scripts // will watch the unit-state instead for signals of service readiness. - StatusStarted Status = "started" + Started Status = "started" ) const ( // Status values specific to machine agents. - // StatusPending is set when: + // Pending is set when: // The machine is not yet participating in the model. - StatusPending Status = "pending" + Pending Status = "pending" - // StatusStopped is set when: + // Stopped is set when: // The machine's agent will perform no further action, other than // to set the unit to Dead at a suitable moment. - StatusStopped Status = "stopped" + Stopped Status = "stopped" - // StatusDown is set when: + // Down is set when: // The machine ought to be signalling activity, but it cannot be // detected. - StatusDown Status = "down" + Down Status = "down" ) const ( // Status values specific to unit agents. - // StatusAllocating is set when: + // Allocating is set when: // The machine on which a unit is to be hosted is still being // spun up in the cloud. - StatusAllocating Status = "allocating" + Allocating Status = "allocating" - // StatusRebooting is set when: + // Rebooting is set when: // The machine on which this agent is running is being rebooted. // The juju-agent should move from rebooting to idle when the reboot is complete. - StatusRebooting Status = "rebooting" + Rebooting Status = "rebooting" - // StatusExecuting is set when: + // Executing is set when: // The agent is running a hook or action. The human-readable message should reflect // which hook or action is being run. - StatusExecuting Status = "executing" + Executing Status = "executing" - // StatusIdle is set when: + // Idle is set when: // Once the agent is installed and running it will notify the Juju server and its state // becomes "idle". It will stay "idle" until some action (e.g. it needs to run a hook) or // error (e.g it loses contact with the Juju server) moves it to a different state. - StatusIdle Status = "idle" + Idle Status = "idle" - // StatusFailed is set when: + // Failed is set when: // The unit agent has failed in some way,eg the agent ought to be signalling // activity, but it cannot be detected. It might also be that the unit agent // detected an unrecoverable condition and managed to tell the Juju server about it. - StatusFailed Status = "failed" + Failed Status = "failed" - // StatusLost is set when: + // Lost is set when: // The juju agent has has not communicated with the juju server for an unexpectedly long time; // the unit agent ought to be signalling activity, but none has been detected. - StatusLost Status = "lost" + Lost Status = "lost" ) const ( // Status values specific to services and units, reflecting the // state of the software itself. - // StatusMaintenance is set when: + // Maintenance is set when: // The unit is not yet providing services, but is actively doing stuff // in preparation for providing those services. // This is a "spinning" state, not an error state. // It reflects activity on the unit itself, not on peers or related units. - StatusMaintenance Status = "maintenance" + Maintenance Status = "maintenance" - // StatusTerminated is set when: + // Terminated is set when: // This unit used to exist, we have a record of it (perhaps because of storage // allocated for it that was flagged to survive it). Nonetheless, it is now gone. - StatusTerminated Status = "terminated" + Terminated Status = "terminated" - // StatusUnknown is set when: + // Unknown is set when: // A unit-agent has finished calling install, config-changed, and start, // but the charm has not called status-set yet. - StatusUnknown Status = "unknown" + Unknown Status = "unknown" - // StatusWaiting is set when: + // Waiting is set when: // The unit is unable to progress to an active state because a service to // which it is related is not running. - StatusWaiting Status = "waiting" + Waiting Status = "waiting" - // StatusBlocked is set when: + // Blocked is set when: // The unit needs manual intervention to get back to the Running state. - StatusBlocked Status = "blocked" + Blocked Status = "blocked" - // StatusActive is set when: + // Active is set when: // The unit believes it is correctly offering all the services it has // been asked to offer. - StatusActive Status = "active" + Active Status = "active" ) const ( // Status values specific to storage. - // StatusAttaching indicates that the storage is being attached + // Attaching indicates that the storage is being attached // to a machine. - StatusAttaching Status = "attaching" + Attaching Status = "attaching" - // StatusAttached indicates that the storage is attached to a + // Attached indicates that the storage is attached to a // machine. - StatusAttached Status = "attached" + Attached Status = "attached" - // StatusDetaching indicates that the storage is being detached + // Detaching indicates that the storage is being detached // from a machine. - StatusDetaching Status = "detaching" + Detaching Status = "detaching" - // StatusDetached indicates that the storage is not attached to + // Detached indicates that the storage is not attached to // any machine. - StatusDetached Status = "detached" + Detached Status = "detached" ) const ( // Status values specific to models. - // StatusAvailable indicates that the model is available for use. - StatusAvailable Status = "available" + // Available indicates that the model is available for use. + Available Status = "available" ) const ( // Status values that are common to several entities. - // StatusDestroying indicates that the entity is being destroyed. + // Destroying indicates that the entity is being destroyed. // // This is valid for volumes, filesystems, and models. - StatusDestroying Status = "destroying" + Destroying Status = "destroying" ) // InstanceStatus const ( - StatusEmpty Status = "" - StatusProvisioning Status = "allocating" - StatusRunning Status = "running" - StatusProvisioningError Status = "provisioning error" + Empty Status = "" + Provisioning Status = "allocating" + Running Status = "running" + ProvisioningError Status = "provisioning error" ) const ( - MessageInstalling = "installing charm software" - - // StorageReadyMessage is the message set to the agent status when all storage - // attachments are properly done. - StorageReadyMessage = "storage ready" - - // PreparingStorageMessage is the message set to the agent status before trying - // to attach storages. - PreparingStorageMessage = "preparing storage" + MessageWaitForMachine = "waiting for machine" + MessageInstallingAgent = "installing agent" + MessageInitializingAgent = "agent initializing" + MessageInstallingCharm = "installing charm software" ) func (status Status) KnownInstanceStatus() bool { switch status { case - StatusPending, - StatusProvisioningError, - StatusAllocating, - StatusRunning, - StatusUnknown: + Pending, + ProvisioningError, + Allocating, + Running, + Unknown: return true } return false @@ -224,12 +219,12 @@ func (status Status) KnownAgentStatus() bool { switch status { case - StatusAllocating, - StatusError, - StatusFailed, - StatusRebooting, - StatusExecuting, - StatusIdle: + Allocating, + Error, + Failed, + Rebooting, + Executing, + Idle: return true } return false @@ -243,7 +238,7 @@ return true } switch status { - case StatusError: // include error so that we can filter on what the spec says is valid + case Error: // include error so that we can filter on what the spec says is valid return true default: return false @@ -255,12 +250,12 @@ func ValidWorkloadStatus(status Status) bool { switch status { case - StatusBlocked, - StatusMaintenance, - StatusWaiting, - StatusActive, - StatusUnknown, - StatusTerminated: + Blocked, + Maintenance, + Waiting, + Active, + Unknown, + Terminated: return true default: return false @@ -279,8 +274,8 @@ func ValidModelStatus(status Status) bool { switch status { case - StatusAvailable, - StatusDestroying: + Available, + Destroying: return true default: return false diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/status/status_history.go juju-core-2.0.0/src/github.com/juju/juju/status/status_history.go --- juju-core-2.0~beta15/src/github.com/juju/juju/status/status_history.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/status/status_history.go 2016-10-13 14:31:49.000000000 +0000 @@ -92,7 +92,7 @@ var repeat int var i int repeatStatus := DetailedStatus{ - Status: StatusIdle, + Status: Idle, Info: "", Since: &now, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/status/status_test.go juju-core-2.0.0/src/github.com/juju/juju/status/status_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/status/status_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/status/status_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,47 +21,47 @@ since := time.Now() statuses := status.History{ { - Status: status.StatusActive, + Status: status.Active, Info: "unique status one", Since: &since, }, { - Status: status.StatusActive, + Status: status.Active, Info: "unique status two", Since: &since, }, { - Status: status.StatusActive, + Status: status.Active, Info: "unique status three", Since: &since, }, { - Status: status.StatusExecuting, + Status: status.Executing, Info: "repeated status one", Since: &since, }, { - Status: status.StatusIdle, + Status: status.Idle, Info: "repeated status two", Since: &since, }, { - Status: status.StatusExecuting, + Status: status.Executing, Info: "repeated status one", Since: &since, }, { - Status: status.StatusIdle, + Status: status.Idle, Info: "repeated status two", Since: &since, }, { - Status: status.StatusExecuting, + Status: status.Executing, Info: "repeated status one", Since: &since, }, { - Status: status.StatusIdle, + Status: status.Idle, Info: "repeated status two", Since: &since, }, @@ -72,32 +72,32 @@ newStatuses[5].Since = &since expectedStatuses := status.History{ { - Status: status.StatusActive, + Status: status.Active, Info: "unique status one", Since: &since, }, { - Status: status.StatusActive, + Status: status.Active, Info: "unique status two", Since: &since, }, { - Status: status.StatusActive, + Status: status.Active, Info: "unique status three", Since: &since, }, { - Status: status.StatusExecuting, + Status: status.Executing, Info: "repeated status one", Since: &since, }, { - Status: status.StatusIdle, + Status: status.Idle, Info: "repeated status two", Since: &since, }, { - Status: status.StatusIdle, + Status: status.Idle, Info: "last 2 statuses repeated 2 times", Since: &since, }, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/storage/interface.go juju-core-2.0.0/src/github.com/juju/juju/storage/interface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/storage/interface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/storage/interface.go 2016-10-13 14:31:49.000000000 +0000 @@ -27,7 +27,11 @@ type ProviderRegistry interface { // StorageProviderTypes returns the storage provider types // contained within this registry. - StorageProviderTypes() []ProviderType + // + // Determining the supported storage providers may be dynamic. + // Multiple calls for the same registry must return consistent + // results. + StorageProviderTypes() ([]ProviderType, error) // StorageProvider returns the storage provider with the given // provider type. StorageProvider must return an errors satisfying diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/storage/looputil/loop.go juju-core-2.0.0/src/github.com/juju/juju/storage/looputil/loop.go --- juju-core-2.0~beta15/src/github.com/juju/juju/storage/looputil/loop.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/storage/looputil/loop.go 2016-10-13 14:31:49.000000000 +0000 @@ -64,6 +64,9 @@ if !strings.HasPrefix(info.backingFile, prefix) { continue } + if info.backingInode == 0 { + continue + } rootedBackingFile := path.Join(rootfs, info.backingFile) st, err := m.stat(rootedBackingFile) if os.IsNotExist(err) { @@ -110,7 +113,8 @@ // e.g. "/dev/loop0: [0021]:7504142 (/tmp/test.dat)" // "/dev/loop0: [002f]:7504142 (/tmp/test.dat (deleted))" -var loopDeviceInfoRegexp = regexp.MustCompile(`^([^ ]+): \[[[:xdigit:]]+\]:(\d+) \((.*?)(?: \(.*\))?\)$`) +// "/dev/loop0: []: (/var/lib/lxc-btrfs.img)" +var loopDeviceInfoRegexp = regexp.MustCompile(`^([^ ]+): \[[[:xdigit:]]*\]:(\d*) \((.*?)(?: \(.*\))?\)$`) func parseLoopDeviceInfo(line string) (loopDeviceInfo, error) { submatch := loopDeviceInfoRegexp.FindStringSubmatch(line) @@ -119,9 +123,15 @@ } name := submatch[1] backingFile := submatch[3] - backingInode, err := strconv.ParseUint(submatch[2], 10, 64) - if err != nil { - return loopDeviceInfo{}, errors.Annotate(err, "parsing inode") + var ( + backingInode uint64 + err error + ) + if submatch[2] != "" { + backingInode, err = strconv.ParseUint(submatch[2], 10, 64) + if err != nil { + return loopDeviceInfo{}, errors.Annotate(err, "parsing inode") + } } return loopDeviceInfo{name, backingFile, backingInode}, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/storage/looputil/loop_test.go juju-core-2.0.0/src/github.com/juju/juju/storage/looputil/loop_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/storage/looputil/loop_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/storage/looputil/loop_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -187,6 +187,15 @@ }) } +func (s *LoopUtilSuite) TestDetachLoopDevicesListEmptyInodeOK(c *gc.C) { + commands := &mockRunCommand{c: c} + defer commands.assertDrained() + commands.expect("losetup", "-a").respond("/dev/loop0: []: (/var/lib/lxc-btrfs.img)", nil) + m := looputil.NewTestLoopDeviceManager(commands.run, nil, nil) + err := m.DetachLoopDevices("", "") + c.Assert(err, jc.ErrorIsNil) +} + type mockFileInfo struct { os.FileInfo inode uint64 diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/storage/provider/common_test.go juju-core-2.0.0/src/github.com/juju/juju/storage/provider/common_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/storage/provider/common_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/storage/provider/common_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,7 +21,9 @@ func (s *providerCommonSuite) TestCommonProvidersExported(c *gc.C) { registry := provider.CommonStorageProviders() var common []storage.ProviderType - for _, pType := range registry.StorageProviderTypes() { + pTypes, err := registry.StorageProviderTypes() + c.Assert(err, jc.ErrorIsNil) + for _, pType := range pTypes { common = append(common, pType) p, err := registry.StorageProvider(pType) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/storage/registries.go juju-core-2.0.0/src/github.com/juju/juju/storage/registries.go --- juju-core-2.0~beta15/src/github.com/juju/juju/storage/registries.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/storage/registries.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,12 +15,16 @@ type ChainedProviderRegistry []ProviderRegistry // StorageProviderTypes implements ProviderRegistry. -func (r ChainedProviderRegistry) StorageProviderTypes() []ProviderType { +func (r ChainedProviderRegistry) StorageProviderTypes() ([]ProviderType, error) { var result []ProviderType for _, r := range r { - result = append(result, r.StorageProviderTypes()...) + types, err := r.StorageProviderTypes() + if err != nil { + return nil, errors.Trace(err) + } + result = append(result, types...) } - return result + return result, nil } // StorageProvider implements ProviderRegistry. @@ -46,7 +50,7 @@ } // StorageProviderTypes implements ProviderRegistry. -func (r StaticProviderRegistry) StorageProviderTypes() []ProviderType { +func (r StaticProviderRegistry) StorageProviderTypes() ([]ProviderType, error) { typeStrings := make([]string, 0, len(r.Providers)) for t := range r.Providers { typeStrings = append(typeStrings, string(t)) @@ -56,7 +60,7 @@ for i, s := range typeStrings { types[i] = ProviderType(s) } - return types + return types, nil } // StorageProvider implements ProviderRegistry. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm.go juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,6 +19,25 @@ // Repo provides access to the test charm repository. var Repo = testing.NewRepo("charm-repo", "quantal") +// UploadCharmWithMeta pushes a new charm to the charmstore. +// The uploaded charm takes the supplied charmURL with metadata.yaml and metrics.yaml +// to define the charm, rather than relying on the charm to exist on disk. +// This allows you to create charm definitions directly in yaml and have them uploaded +// here for us in tests. +// +// For convenience the charm is also made public +func UploadCharmWithMeta(c *gc.C, client *csclient.Client, charmURL, meta, metrics string, revision int) (*charm.URL, charm.Charm) { + ch := testing.NewCharm(c, testing.CharmSpec{ + Meta: meta, + Metrics: metrics, + Revision: revision, + }) + chURL, err := client.UploadCharm(charm.MustParseURL(charmURL), ch) + c.Assert(err, jc.ErrorIsNil) + SetPublic(c, client, chURL) + return chURL, ch +} + // UploadCharm uploads a charm using the given charm store client, and returns // the resulting charm URL and charm. // diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm-repo/quantal/metered/metrics.yaml juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm-repo/quantal/metered/metrics.yaml --- juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm-repo/quantal/metered/metrics.yaml 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm-repo/quantal/metered/metrics.yaml 2016-10-13 14:31:49.000000000 +0000 @@ -1,3 +1,5 @@ +plan: + required: true metrics: pings: type: gauge diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm-repo/quantal/mysql/icon.svg juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm-repo/quantal/mysql/icon.svg --- juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm-repo/quantal/mysql/icon.svg 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm-repo/quantal/mysql/icon.svg 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,335 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/hooks/install juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/hooks/install --- juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/hooks/install 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/hooks/install 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,2 @@ +#!/bin/bash +echo "Done!" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/metadata.yaml juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/metadata.yaml --- juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/metadata.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/metadata.yaml 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +name: storage-filesystem-subordinate +summary: A charm needing filesystem storage +description: See above +subordinate: true +requires: + info: + interface: juju-info + scope: container +storage: + data: + type: filesystem + read-only: true + multiple: + range: 1+ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/revision juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/revision --- juju-core-2.0~beta15/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/revision 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testcharms/charm-repo/quantal/storage-filesystem-subordinate/revision 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1 @@ +1 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/base.go juju-core-2.0.0/src/github.com/juju/juju/testing/base.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/base.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/base.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "fmt" "os" + "reflect" "runtime" "strings" "time" @@ -18,6 +19,7 @@ "github.com/juju/utils/featureflag" jujuos "github.com/juju/utils/os" "github.com/juju/utils/series" + "github.com/juju/utils/set" gc "gopkg.in/check.v1" "github.com/juju/juju/juju/osenv" @@ -33,7 +35,6 @@ // github.com/juju/testing, and this suite will be removed. // Do not use JujuOSEnvSuite when writing new tests. type JujuOSEnvSuite struct { - oldJujuXDGDataHome string oldHomeEnv string oldEnvironment map[string]string initialFeatureFlags string @@ -55,7 +56,7 @@ os.Setenv(name, "") } s.oldHomeEnv = utils.Home() - s.oldJujuXDGDataHome = osenv.SetJujuXDGDataHome("") + os.Setenv(osenv.JujuXDGDataHomeEnvKey, c.MkDir()) err := utils.SetHome("") c.Assert(err, jc.ErrorIsNil) @@ -75,7 +76,6 @@ } err := utils.SetHome(s.oldHomeEnv) c.Assert(err, jc.ErrorIsNil) - osenv.SetJujuXDGDataHome(s.oldJujuXDGDataHome) } // SkipIfPPC64EL skips the test if the arch is PPC64EL and the @@ -249,3 +249,21 @@ } return s, nil } + +// GetExportedFields return the exported fields of a struct. +func GetExportedFields(arg interface{}) set.Strings { + t := reflect.TypeOf(arg) + result := set.NewStrings() + + count := t.NumField() + for i := 0; i < count; i++ { + f := t.Field(i) + // empty PkgPath means exported field. + // see https://golang.org/pkg/reflect/#StructField + if f.PkgPath == "" { + result.Add(f.Name) + } + } + + return result +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/base_test.go juju-core-2.0.0/src/github.com/juju/juju/testing/base_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/base_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/base_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package testing_test - -import ( - "os" - - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/juju/osenv" - "github.com/juju/juju/testing" -) - -type TestingBaseSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&TestingBaseSuite{}) - -func (s *TestingBaseSuite) SetUpTest(c *gc.C) { - err := utils.SetHome(home) - c.Assert(err, jc.ErrorIsNil) - os.Setenv("JUJU_DATA", jujuXDGDataHome) - osenv.SetJujuXDGDataHome(jujuXDGDataHome) - - s.BaseSuite.SetUpTest(c) -} - -func (s *TestingBaseSuite) TearDownTest(c *gc.C) { - s.BaseSuite.TearDownTest(c) - - // Test that the environment is restored. - c.Assert(utils.Home(), gc.Equals, home) - c.Assert(os.Getenv("JUJU_DATA"), gc.Equals, jujuXDGDataHome) -} - -func (s *TestingBaseSuite) TestFakeHomeReplacesEnvironment(c *gc.C) { - c.Assert(utils.Home(), gc.Not(gc.Equals), home) - c.Assert(os.Getenv("JUJU_DATA"), gc.Equals, "") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/clock.go juju-core-2.0.0/src/github.com/juju/juju/testing/clock.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/clock.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/clock.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,248 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package testing - -import ( - "sort" - "sync" - "time" - - "github.com/juju/testing" - "github.com/juju/utils/clock" -) - -// timerClock exposes the underlying Clock's capabilities to a Timer. -type timerClock interface { - reset(id int, d time.Duration) bool - stop(id int) bool -} - -// Timer implements a mock clock.Timer for testing purposes. -type Timer struct { - ID int - clock timerClock -} - -// Reset is part of the clock.Timer interface. -func (t *Timer) Reset(d time.Duration) bool { - return t.clock.reset(t.ID, d) -} - -// Stop is part of the clock.Timer interface. -func (t *Timer) Stop() bool { - return t.clock.stop(t.ID) -} - -// stoppedTimer is a no-op implementation of clock.Timer. -type stoppedTimer struct{} - -// Reset is part of the clock.Timer interface. -func (stoppedTimer) Reset(time.Duration) bool { return false } - -// Stop is part of the clock.Timer interface. -func (stoppedTimer) Stop() bool { return false } - -// Clock implements a mock clock.Clock for testing purposes. -type Clock struct { - mu sync.Mutex - now time.Time - alarms []alarm - currentAlarmID int - notifyAlarms chan struct{} -} - -// NewClock returns a new clock set to the supplied time. If your SUT needs to -// call After, AfterFunc, or Timer.Reset more than 1024 times: (1) you have -// probably written a bad test; and (2) you'll need to read from the Alarms -// chan to keep the buffer clear. -func NewClock(now time.Time) *Clock { - return &Clock{ - now: now, - notifyAlarms: make(chan struct{}, 1024), - } -} - -// Now is part of the clock.Clock interface. -func (clock *Clock) Now() time.Time { - clock.mu.Lock() - defer clock.mu.Unlock() - return clock.now -} - -// After is part of the clock.Clock interface. -func (clock *Clock) After(d time.Duration) <-chan time.Time { - defer clock.notifyAlarm() - clock.mu.Lock() - defer clock.mu.Unlock() - notify := make(chan time.Time, 1) - if d <= 0 { - notify <- clock.now - } else { - clock.setAlarm(clock.now.Add(d), func() { notify <- clock.now }) - } - return notify -} - -// AfterFunc is part of the clock.Clock interface. -func (clock *Clock) AfterFunc(d time.Duration, f func()) clock.Timer { - defer clock.notifyAlarm() - clock.mu.Lock() - defer clock.mu.Unlock() - if d <= 0 { - f() - return &stoppedTimer{} - } - id := clock.setAlarm(clock.now.Add(d), f) - return &Timer{id, clock} -} - -// Advance advances the result of Now by the supplied duration, and sends -// the "current" time on all alarms which are no longer "in the future". -func (clock *Clock) Advance(d time.Duration) { - clock.mu.Lock() - defer clock.mu.Unlock() - clock.now = clock.now.Add(d) - triggered := 0 - for _, alarm := range clock.alarms { - if clock.now.Before(alarm.time) { - break - } - alarm.trigger() - triggered++ - } - clock.alarms = clock.alarms[triggered:] -} - -// Alarms returns a channel on which you can read one value for every call to -// After and AfterFunc; and for every successful Timer.Reset backed by this -// Clock. It might not be elegant but it's necessary when testing time logic -// that runs on a goroutine other than that of the test. -func (clock *Clock) Alarms() <-chan struct{} { - return clock.notifyAlarms -} - -// reset is the underlying implementation of clock.Timer.Reset, which may be -// called by any Timer backed by this Clock. -func (clock *Clock) reset(id int, d time.Duration) bool { - clock.mu.Lock() - defer clock.mu.Unlock() - - for i, alarm := range clock.alarms { - if id == alarm.ID { - defer clock.notifyAlarm() - clock.alarms[i].time = clock.now.Add(d) - sort.Sort(byTime(clock.alarms)) - return true - } - } - return false -} - -// stop is the underlying implementation of clock.Timer.Reset, which may be -// called by any Timer backed by this Clock. -func (clock *Clock) stop(id int) bool { - clock.mu.Lock() - defer clock.mu.Unlock() - - for i, alarm := range clock.alarms { - if id == alarm.ID { - clock.alarms = removeFromSlice(clock.alarms, i) - return true - } - } - return false -} - -// setAlarm adds an alarm at time t. -// It also sorts the alarms and increments the current ID by 1. -func (clock *Clock) setAlarm(t time.Time, trigger func()) int { - alarm := alarm{ - time: t, - trigger: trigger, - ID: clock.currentAlarmID, - } - clock.alarms = append(clock.alarms, alarm) - sort.Sort(byTime(clock.alarms)) - clock.currentAlarmID = clock.currentAlarmID + 1 - return alarm.ID -} - -// notifyAlarm sends a value on the channel exposed by Alarms(). -func (clock *Clock) notifyAlarm() { - select { - case clock.notifyAlarms <- struct{}{}: - default: - panic("alarm notification buffer full") - } -} - -// alarm records the time at which we're expected to execute trigger. -type alarm struct { - ID int - time time.Time - trigger func() -} - -// byTime is used to sort alarms by time. -type byTime []alarm - -func (a byTime) Len() int { return len(a) } -func (a byTime) Less(i, j int) bool { return a[i].time.Before(a[j].time) } -func (a byTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// removeFromSlice removes item at the specified index from the slice. -func removeFromSlice(sl []alarm, index int) []alarm { - return append(sl[:index], sl[index+1:]...) -} - -type StubClock struct { - *testing.Stub - - ReturnNow time.Time - ReturnAfter <-chan time.Time - ReturnAfterFunc clock.Timer -} - -func NewStubClock(stub *testing.Stub) *StubClock { - return &StubClock{ - Stub: stub, - } -} - -func (s *StubClock) Now() time.Time { - s.AddCall("Now") - s.NextErr() // pop one off - return s.ReturnNow -} - -func (s *StubClock) After(d time.Duration) <-chan time.Time { - s.AddCall("After", d) - s.NextErr() // pop one off - return s.ReturnAfter -} - -func (s *StubClock) AfterFunc(d time.Duration, f func()) clock.Timer { - s.AddCall("AfterFunc", d, f) - s.NextErr() // pop one off - return s.ReturnAfterFunc -} - -// AutoAdvancingClock wraps a clock.Clock, calling the Advance -// function whenever After or AfterFunc are called. -type AutoAdvancingClock struct { - clock.Clock - Advance func(time.Duration) -} - -func (c *AutoAdvancingClock) After(d time.Duration) <-chan time.Time { - ch := c.Clock.After(d) - c.Advance(d) - return ch -} - -func (c *AutoAdvancingClock) AfterFunc(d time.Duration, f func()) clock.Timer { - t := c.Clock.AfterFunc(d, f) - c.Advance(d) - return t -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/cmdblockhelper.go juju-core-2.0.0/src/github.com/juju/juju/testing/cmdblockhelper.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/cmdblockhelper.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/cmdblockhelper.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,71 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + "strings" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/block" +) + +// CmdBlockHelper is a helper struct used to block commands. +type CmdBlockHelper struct { + blockClient *block.Client +} + +// NewCmdBlockHelper creates a block switch used in testing +// to manage desired juju blocks. +func NewCmdBlockHelper(api base.APICallCloser) CmdBlockHelper { + return CmdBlockHelper{ + blockClient: block.NewClient(api), + } +} + +// on switches on desired block and +// asserts that no errors were encountered. +func (s *CmdBlockHelper) on(c *gc.C, blockType, msg string) { + c.Assert(s.blockClient.SwitchBlockOn(blockType, msg), gc.IsNil) +} + +// BlockAllChanges switches changes block on. +// This prevents all changes to juju environment. +func (s *CmdBlockHelper) BlockAllChanges(c *gc.C, msg string) { + s.on(c, "BlockChange", msg) +} + +// BlockRemoveObject switches remove block on. +// This prevents any object/entity removal on juju environment +func (s *CmdBlockHelper) BlockRemoveObject(c *gc.C, msg string) { + s.on(c, "BlockRemove", msg) +} + +// BlockDestroyModel switches destroy block on. +// This prevents juju environment destruction. +func (s *CmdBlockHelper) BlockDestroyModel(c *gc.C, msg string) { + s.on(c, "BlockDestroy", msg) +} + +func (s *CmdBlockHelper) Close() { + s.blockClient.Close() +} + +// AssertBlocked is going to be removed as soon as all cmd tests mock out API. +// the corect method to call will become AssertOperationWasBlocked. +func (s *CmdBlockHelper) AssertBlocked(c *gc.C, err error, msg string) { + c.Assert(err.Error(), jc.Contains, "disabled") + stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) + c.Check(stripped, gc.Matches, msg) +} + +func AssertOperationWasBlocked(c *gc.C, err error, msg string) { + c.Assert(err.Error(), jc.Contains, "disabled") + // msg is logged + stripped := strings.Replace(c.GetTestLog(), "\n", "", -1) + c.Check(stripped, gc.Matches, msg) + c.Check(stripped, jc.Contains, "disabled") +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/cmd.go juju-core-2.0.0/src/github.com/juju/juju/testing/cmd.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/cmd.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/cmd.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,12 +7,11 @@ "bytes" "fmt" "io/ioutil" - "strings" "github.com/juju/cmd" + "github.com/juju/gnuflag" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/gnuflag" ) // NewFlagSet creates a new flag set using the standard options, particularly @@ -87,15 +86,6 @@ return context, com.Run(context) } -// RunCommandInDir works like RunCommand, but runs with a context that uses dir. -func RunCommandInDir(c *gc.C, com cmd.Command, args []string, dir string) (*cmd.Context, error) { - if err := InitCommand(com, args); err != nil { - return nil, err - } - var context = ContextForDir(c, dir) - return context, com.Run(context) -} - // TestInit checks that a command initialises correctly with the given set of // arguments. func TestInit(c *gc.C, com cmd.Command, args []string, errPat string) { @@ -106,16 +96,3 @@ c.Assert(err, jc.ErrorIsNil) } } - -// ExtractCommandsFromHelpOutput takes the standard output from the -// command context and looks for the "commands:" string and returns the -// commands output after that. -func ExtractCommandsFromHelpOutput(ctx *cmd.Context) []string { - var namesFound []string - commandHelp := strings.SplitAfter(Stdout(ctx), "commands:")[1] - commandHelp = strings.TrimSpace(commandHelp) - for _, line := range strings.Split(commandHelp, "\n") { - namesFound = append(namesFound, strings.TrimSpace(strings.Split(line, " - ")[0])) - } - return namesFound -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/environ.go juju-core-2.0.0/src/github.com/juju/juju/testing/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,8 +4,6 @@ package testing import ( - "os" - gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" @@ -17,7 +15,6 @@ "github.com/juju/juju/controller" "github.com/juju/juju/environs/config" - "github.com/juju/juju/juju/osenv" ) // FakeAuthKeys holds the authorized key used for testing @@ -38,11 +35,14 @@ // ModelTag is a defined known valid UUID that can be used in testing. var ModelTag = names.NewModelTag("deadbeef-0bad-400d-8000-4b1d0d06f00d") +// ControllerTag is a defined known valid UUID that can be used in testing. +var ControllerTag = names.NewControllerTag("deadbeef-1bad-500d-9000-4b1d0d06f00d") + // FakeControllerConfig() returns an environment configuration // that is expected to be found in state for a fake controller. func FakeControllerConfig() controller.Config { return controller.Config{ - "controller-uuid": ModelTag.Id(), + "controller-uuid": ControllerTag.Id(), "ca-cert": CACert, "state-port": 1234, "api-port": 17777, @@ -99,20 +99,14 @@ type FakeJujuXDGDataHomeSuite struct { JujuOSEnvSuite gitjujutesting.FakeHomeSuite - oldJujuXDGDataHome string } func (s *FakeJujuXDGDataHomeSuite) SetUpTest(c *gc.C) { s.JujuOSEnvSuite.SetUpTest(c) s.FakeHomeSuite.SetUpTest(c) - jujuXDGDataHome := gitjujutesting.JujuXDGDataHomePath() - err := os.MkdirAll(jujuXDGDataHome, 0700) - c.Assert(err, jc.ErrorIsNil) - s.oldJujuXDGDataHome = osenv.SetJujuXDGDataHome(jujuXDGDataHome) } func (s *FakeJujuXDGDataHomeSuite) TearDownTest(c *gc.C) { - osenv.SetJujuXDGDataHome(s.oldJujuXDGDataHome) s.FakeHomeSuite.TearDownTest(c) s.JujuOSEnvSuite.TearDownTest(c) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/environ_test.go juju-core-2.0.0/src/github.com/juju/juju/testing/environ_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/environ_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/environ_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,15 +4,11 @@ package testing_test import ( - "os" - - gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" - "github.com/juju/juju/juju/osenv" "github.com/juju/juju/testing" ) @@ -22,35 +18,6 @@ var _ = gc.Suite(&fakeHomeSuite{}) -func (s *fakeHomeSuite) SetUpTest(c *gc.C) { - err := utils.SetHome(home) - c.Assert(err, jc.ErrorIsNil) - os.Setenv("JUJU_DATA", jujuXDGDataHome) - osenv.SetJujuXDGDataHome(jujuXDGDataHome) - - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) -} - -func (s *fakeHomeSuite) TearDownTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.TearDownTest(c) - - // Test that the environment is restored. - c.Assert(utils.Home(), gc.Equals, jujuXDGDataHome) - c.Assert(os.Getenv("JUJU_DATA"), gc.Equals, jujuXDGDataHome) - c.Assert(osenv.JujuXDGDataHome(), gc.Equals, jujuXDGDataHome) -} - -func (s *fakeHomeSuite) TestFakeHomeSetsUpJujuXDGDataHome(c *gc.C) { - jujuDir := gitjujutesting.JujuXDGDataHomePath() - c.Assert(jujuDir, jc.IsDirectory) -} - -func (s *fakeHomeSuite) TestFakeHomeSetsConfigJujuXDGDataHome(c *gc.C) { - s.PatchEnvironment(osenv.XDGDataHome, "") - expected := gitjujutesting.JujuXDGDataHomePath() - c.Assert(osenv.JujuXDGDataHome(), gc.Equals, expected) -} - func (s *fakeHomeSuite) TestModelTagValid(c *gc.C) { asString := testing.ModelTag.String() tag, err := names.ParseModelTag(asString) @@ -58,6 +25,6 @@ c.Assert(tag, gc.Equals, testing.ModelTag) } -func (s *fakeHomeSuite) TestEnvironUUIDValid(c *gc.C) { +func (s *fakeHomeSuite) TestModelUUIDValid(c *gc.C) { c.Assert(utils.IsValidUUIDString(testing.ModelTag.Id()), jc.IsTrue) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/factory/factory.go juju-core-2.0.0/src/github.com/juju/juju/testing/factory/factory.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/factory/factory.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/factory/factory.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,9 +19,9 @@ "gopkg.in/juju/names.v2" "github.com/juju/juju/constraints" - "github.com/juju/juju/core/description" "github.com/juju/juju/instance" "github.com/juju/juju/network" + "github.com/juju/juju/permission" "github.com/juju/juju/state" "github.com/juju/juju/status" "github.com/juju/juju/storage" @@ -54,7 +54,7 @@ Creator names.Tag NoModelUser bool Disabled bool - Access description.Access + Access permission.Access } // ModelUserParams defines the parameters for creating an environment user. @@ -62,7 +62,7 @@ User string DisplayName string CreatedBy names.Tag - Access description.Access + Access permission.Access } // CharmParams defines the parameters for creating a charm. @@ -126,7 +126,7 @@ ConfigAttrs testing.Attrs CloudName string CloudRegion string - CloudCredential string + CloudCredential names.CloudCredentialTag StorageProviderRegistry storage.ProviderRegistry } @@ -181,15 +181,15 @@ c.Assert(err, jc.ErrorIsNil) params.Creator = env.Owner() } - if params.Access == description.UndefinedAccess { - params.Access = description.AdminAccess + if params.Access == permission.NoAccess { + params.Access = permission.AdminAccess } creatorUserTag := params.Creator.(names.UserTag) user, err := factory.st.AddUser( params.Name, params.DisplayName, params.Password, creatorUserTag.Name()) c.Assert(err, jc.ErrorIsNil) if !params.NoModelUser { - _, err := factory.st.AddModelUser(state.UserAccessSpec{ + _, err := factory.st.AddModelUser(factory.st.ModelUUID(), state.UserAccessSpec{ User: user.UserTag(), CreatedBy: names.NewUserTag(user.CreatedBy()), DisplayName: params.DisplayName, @@ -208,19 +208,19 @@ // attributes of ModelUserParams that are the default empty values, some // meaningful valid values are used instead. If params is not specified, // defaults are used. -func (factory *Factory) MakeModelUser(c *gc.C, params *ModelUserParams) description.UserAccess { +func (factory *Factory) MakeModelUser(c *gc.C, params *ModelUserParams) permission.UserAccess { if params == nil { params = &ModelUserParams{} } if params.User == "" { user := factory.MakeUser(c, &UserParams{NoModelUser: true}) - params.User = user.UserTag().Canonical() + params.User = user.UserTag().Id() } if params.DisplayName == "" { params.DisplayName = uniqueString("display name") } - if params.Access == description.UndefinedAccess { - params.Access = description.AdminAccess + if params.Access == permission.NoAccess { + params.Access = permission.AdminAccess } if params.CreatedBy == nil { env, err := factory.st.Model() @@ -228,7 +228,7 @@ params.CreatedBy = env.Owner() } createdByUserTag := params.CreatedBy.(names.UserTag) - modelUser, err := factory.st.AddModelUser(state.UserAccessSpec{ + modelUser, err := factory.st.AddModelUser(factory.st.ModelUUID(), state.UserAccessSpec{ User: names.NewUserTag(params.User), CreatedBy: createdByUserTag, DisplayName: params.DisplayName, @@ -316,6 +316,28 @@ // The machine and its password are returned. func (factory *Factory) MakeMachineReturningPassword(c *gc.C, params *MachineParams) (*state.Machine, string) { params = factory.paramsFillDefaults(c, params) + return factory.makeMachineReturningPassword(c, params, true) +} + +// MakeUnprovisionedMachineReturningPassword will add a machine with values +// defined in params. For some values in params, if they are missing, some +// meaningful empty values will be set. If params is not specified, defaults +// are used. The machine and its password are returned; the machine will not +// be provisioned. +func (factory *Factory) MakeUnprovisionedMachineReturningPassword(c *gc.C, params *MachineParams) (*state.Machine, string) { + if params != nil { + c.Assert(params.Nonce, gc.Equals, "") + c.Assert(params.InstanceId, gc.Equals, instance.Id("")) + c.Assert(params.Characteristics, gc.IsNil) + } + params = factory.paramsFillDefaults(c, params) + params.Nonce = "" + params.InstanceId = "" + params.Characteristics = nil + return factory.makeMachineReturningPassword(c, params, false) +} + +func (factory *Factory) makeMachineReturningPassword(c *gc.C, params *MachineParams, setProvisioned bool) (*state.Machine, string) { machineTemplate := state.MachineTemplate{ Series: params.Series, Jobs: params.Jobs, @@ -325,8 +347,10 @@ } machine, err := factory.st.AddOneMachine(machineTemplate) c.Assert(err, jc.ErrorIsNil) - err = machine.SetProvisioned(params.InstanceId, params.Nonce, params.Characteristics) - c.Assert(err, jc.ErrorIsNil) + if setProvisioned { + err = machine.SetProvisioned(params.InstanceId, params.Nonce, params.Characteristics) + c.Assert(err, jc.ErrorIsNil) + } err = machine.SetPassword(params.Password) c.Assert(err, jc.ErrorIsNil) if len(params.Addresses) > 0 { @@ -575,6 +599,9 @@ if params.CloudName == "" { params.CloudName = "dummy" } + if params.CloudRegion == "" { + params.CloudRegion = "dummy-region" + } if params.Owner == nil { origEnv, err := factory.st.Model() c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/factory/factory_test.go juju-core-2.0.0/src/github.com/juju/juju/testing/factory/factory_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/factory/factory_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/factory/factory_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,8 +15,8 @@ "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/names.v2" - "github.com/juju/juju/core/description" "github.com/juju/juju/instance" + "github.com/juju/juju/permission" "github.com/juju/juju/state" statetesting "github.com/juju/juju/state/testing" "github.com/juju/juju/storage" @@ -153,7 +153,7 @@ saved, err := s.State.UserAccess(modelUser.UserTag, modelUser.Object) c.Assert(err, jc.ErrorIsNil) c.Assert(saved.Object.Id(), gc.Equals, modelUser.Object.Id()) - c.Assert(saved.UserName, gc.Equals, "foobar123@local") + c.Assert(saved.UserName, gc.Equals, "foobar123") c.Assert(saved.DisplayName, gc.Equals, modelUser.DisplayName) c.Assert(saved.CreatedBy, gc.Equals, modelUser.CreatedBy) } @@ -175,8 +175,8 @@ saved, err := s.State.UserAccess(modelUser.UserTag, s.State.ModelTag()) c.Assert(err, jc.ErrorIsNil) c.Assert(saved.Object.Id(), gc.Equals, modelUser.Object.Id()) - c.Assert(saved.UserName, gc.Equals, "foobar@local") - c.Assert(saved.CreatedBy.Id(), gc.Equals, "createdby@local") + c.Assert(saved.UserName, gc.Equals, "foobar") + c.Assert(saved.CreatedBy.Id(), gc.Equals, "createdby") c.Assert(saved.DisplayName, gc.Equals, "Foo Bar") } @@ -191,7 +191,7 @@ c.Assert(invalidFunc, gc.PanicMatches, `interface conversion: .*`) saved, err := s.State.UserAccess(names.NewLocalUserTag("bob"), s.State.ModelTag()) c.Assert(err, jc.Satisfies, errors.IsNotFound) - c.Assert(saved, gc.DeepEquals, description.UserAccess{}) + c.Assert(saved, gc.DeepEquals, permission.UserAccess{}) } func (s *factorySuite) TestMakeModelUserNonLocalUser(c *gc.C) { @@ -207,7 +207,7 @@ c.Assert(saved.Object.Id(), gc.Equals, modelUser.Object.Id()) c.Assert(saved.UserName, gc.Equals, "foobar@ubuntuone") c.Assert(saved.DisplayName, gc.Equals, "Foo Bar") - c.Assert(saved.CreatedBy.Canonical(), gc.Equals, creator.UserTag().Canonical()) + c.Assert(saved.CreatedBy.Id(), gc.Equals, creator.UserTag().Id()) } func (s *factorySuite) TestMakeMachineNil(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/time.go juju-core-2.0.0/src/github.com/juju/juju/testing/time.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/time.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/time.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,19 @@ +package testing + +import ( + "time" +) + +// ZeroTime can be used in tests instead of time.Now() when the returned +// time.Time value is not relevant. +// +// Example: instead of now := time.Now() use now := testing.ZeroTime(). +func ZeroTime() time.Time { + return time.Time{} +} + +// NonZeroTime can be used in tests instead of time.Now() when the returned +// time.Time value must be non-zero (its IsZero() method returns false). +func NonZeroTime() time.Time { + return time.Unix(0, 1) // 1 nanosecond since epoch +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/vars_unix_test.go juju-core-2.0.0/src/github.com/juju/juju/testing/vars_unix_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/vars_unix_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/vars_unix_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Copyright 2014 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -// +build !windows - -package testing_test - -var home = "/home/eric" -var jujuXDGDataHome = "/home/eric" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/testing/vars_windows_test.go juju-core-2.0.0/src/github.com/juju/juju/testing/vars_windows_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/testing/vars_windows_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/testing/vars_windows_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Copyright 2014 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -// +build windows - -package testing_test - -var home = "C:\\home\\eric" -var jujuXDGDataHome = "C:\\home\\eric" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/addserver.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/addserver.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/addserver.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/addserver.go 2016-10-13 14:31:49.000000000 +0000 @@ -88,7 +88,7 @@ return "", errors.Trace(err) } - // TODO(ericsnow) Use remoteUrl.String() + // TODO(ericsnow) Use remoteURL.String() return fmt.Sprintf("%s://%s%s", remoteURL.Scheme, remoteURL.Host, remoteURL.Path), nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_cert.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_cert.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_cert.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_cert.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,8 +7,10 @@ import ( "crypto/x509" + "net/http" "github.com/juju/errors" + "github.com/lxc/lxd" "github.com/lxc/lxd/shared" ) @@ -36,36 +38,12 @@ return nil } -// ListCerts returns the list of cert fingerprints from the server. -func (c certClient) ListCerts() ([]string, error) { - certs, err := c.raw.CertificateList() - if err != nil { - return nil, errors.Trace(err) - } - - var fingerprints []string - for _, cert := range certs { - fingerprints = append(fingerprints, cert.Fingerprint) - } - return fingerprints, nil -} - -// RemoveCert removes the cert from the server. -func (c certClient) RemoveCert(cert *Cert) error { - fingerprint, err := cert.Fingerprint() - if err != nil { - return errors.Trace(err) - } - - if err := c.raw.CertificateRemove(fingerprint); err != nil { - return errors.Trace(err) - } - return nil -} - // RemoveCertByFingerprint removes the cert from the server. func (c certClient) RemoveCertByFingerprint(fingerprint string) error { if err := c.raw.CertificateRemove(fingerprint); err != nil { + if err == lxd.LXDErrors[http.StatusNotFound] { + return errors.NotFoundf("certificate with fingerprint %q", fingerprint) + } return errors.Trace(err) } return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client.go 2016-10-13 14:31:49.000000000 +0000 @@ -97,6 +97,7 @@ *profileClient *instanceClient *imageClient + *networkClient baseURL string defaultProfileBridgeName string } @@ -111,7 +112,7 @@ // Connect opens an API connection to LXD and returns a high-level // Client wrapper around that connection. -func Connect(cfg Config) (*Client, error) { +func Connect(cfg Config, verifyBridgeConfig bool) (*Client, error) { if err := cfg.Validate(); err != nil { return nil, errors.Trace(err) } @@ -123,12 +124,24 @@ return nil, errors.Trace(err) } - // If this is the LXD provider on the localhost, let's do an extra check to - // make sure the default profile has a correctly configured bridge, and - // which one is it. + networkAPISupported := false + if cfg.Remote.Protocol != SimplestreamsProtocol { + status, err := raw.ServerStatus() + if err != nil { + return nil, errors.Trace(err) + } + + if lxdshared.StringInSlice("network", status.APIExtensions) { + networkAPISupported = true + } + } + var bridgeName string - if remoteID == remoteIDForLocal { - bridgeName, err = verifyDefaultProfileBridgeConfig(raw) + if remoteID == remoteIDForLocal && verifyBridgeConfig { + // If this is the LXD provider on the localhost, let's do an extra check to + // make sure the default profile has a correctly configured bridge, and + // which one is it. + bridgeName, err = verifyDefaultProfileBridgeConfig(raw, networkAPISupported) if err != nil { return nil, errors.Trace(err) } @@ -140,6 +153,7 @@ profileClient: &profileClient{raw}, instanceClient: &instanceClient{raw, remoteID}, imageClient: &imageClient{raw, connectToRaw}, + networkClient: &networkClient{raw, networkAPISupported}, baseURL: raw.BaseURL, defaultProfileBridgeName: bridgeName, } @@ -148,36 +162,21 @@ var lxdNewClientFromInfo = lxd.NewClientFromInfo -func isSupportedLxdVersion(version string) bool { - var major, minor, micro int - var err error - +func isSupportedAPIVersion(version string) bool { versionParts := strings.Split(version, ".") - if len(versionParts) < 3 { + if len(versionParts) < 2 { + logger.Warningf("LXD API version %q: expected format .", version) return false } - major, err = strconv.Atoi(versionParts[0]) + major, err := strconv.Atoi(versionParts[0]) if err != nil { + logger.Warningf("LXD API version %q: unexpected major number: %v", version, err) return false } - minor, err = strconv.Atoi(versionParts[1]) - if err != nil { - return false - } - - micro, err = strconv.Atoi(versionParts[2]) - if err != nil { - return false - } - - if major < 2 { - return false - } - - /* disallow 2.0.0.rc4 and friends */ - if major == 2 && minor == 0 && micro == 0 && len(versionParts) > 3 { + if major < 1 { + logger.Warningf("LXD API version %q: expected major version 1 or later", version) return false } @@ -242,8 +241,10 @@ return nil, errors.Trace(err) } - if !isSupportedLxdVersion(status.Environment.ServerVersion) { - return nil, errors.Errorf("lxd version %s, juju needs at least 2.0.0", status.Environment.ServerVersion) + if !isSupportedAPIVersion(status.APIVersion) { + logger.Warningf("trying to use unsupported LXD API version %q", status.APIVersion) + } else { + logger.Infof("using LXD API version %q", status.APIVersion) } } @@ -254,7 +255,7 @@ // network bridge configured on the "default" profile. Additionally, if the // default bridge bridge is used, its configuration in LXDBridgeFile is also // inspected to make sure it has a chance to work. -func verifyDefaultProfileBridgeConfig(client *lxd.Client) (string, error) { +func verifyDefaultProfileBridgeConfig(client *lxd.Client, networkAPISupported bool) (string, error) { const ( defaultProfileName = "default" configTypeKey = "type" @@ -270,11 +271,24 @@ return "", errors.Trace(err) } - // If the default profile doesn't have eth0 in it, then the user has messed - // with it, so let's just use whatever they set up. eth0, ok := config.Devices[configEth0] if !ok { + /* on lxd >= 2.3, there is nothing in the default profile + * w.r.t. eth0, because there is no lxdbr0 by default. Let's + * handle this case and configure one now. + */ + if networkAPISupported { + if err := CreateDefaultBridgeInDefaultProfile(client); err != nil { + return "", errors.Annotate(err, "couldn't create default bridge") + } + + return network.DefaultLXDBridge, nil + } return "", errors.Errorf("unexpected LXD %q profile config without eth0: %+v", defaultProfileName, config) + } else if networkAPISupported { + if err := checkBridgeConfig(client, eth0[configParentKey]); err != nil { + return "", err + } } // If eth0 is there, but not with the expected attributes, likewise fail @@ -292,6 +306,13 @@ return bridgeName, nil } + /* if the network API is supported, that means the lxd-bridge config + * file has been obsoleted so we don't need to check it for correctness + */ + if networkAPISupported { + return bridgeName, nil + } + bridgeConfig, err := ioutil.ReadFile(LXDBridgeFile) if os.IsNotExist(err) { return "", bridgeConfigError("lxdbr0 configured but no config file found at " + LXDBridgeFile) @@ -315,6 +336,17 @@ and then bootstrap again.`, err) } +func ipv6BridgeConfigError(filename string) error { + return errors.Errorf(`%s has IPv6 enabled. +Juju doesn't currently support IPv6. + +IPv6 can be disabled by running: + + sudo dpkg-reconfigure -p medium lxd + +and then bootstrap again.`, filename) +} + func checkLXDBridgeConfiguration(conf string) error { foundSubnetConfig := false for _, line := range strings.Split(conf, "\n") { @@ -337,11 +369,16 @@ if name != network.DefaultLXDBridge { return bridgeConfigError(fmt.Sprintf(LXDBridgeFile+" has a bridge named %s, not lxdbr0", name)) } - } else if strings.HasPrefix(line, "LXD_IPV4_ADDR=") || strings.HasPrefix(line, "LXD_IPV6_ADDR=") { - contents := strings.Trim(line[len("LXD_IPVN_ADDR="):], " \"") + } else if strings.HasPrefix(line, "LXD_IPV4_ADDR=") { + contents := strings.Trim(line[len("LXD_IPV4_ADDR="):], " \"") if len(contents) > 0 { foundSubnetConfig = true } + } else if strings.HasPrefix(line, "LXD_IPV6_ADDR=") { + contents := strings.Trim(line[len("LXD_IPV6_ADDR="):], " \"") + if len(contents) > 0 { + return ipv6BridgeConfigError(LXDBridgeFile) + } } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_image.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_image.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_image.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_image.go 2016-10-13 14:31:49.000000000 +0000 @@ -98,18 +98,7 @@ // at private methods so we can't easily tweak it. name := i.ImageNameForSeries(series) - // TODO(jam) Add a flag to not trust local aliases, which would allow - // non-state machines to only trust the alias that is set on the state - // machines. - // if IgnoreLocalAliases {} - target := i.raw.GetAlias(name) - if target != "" { - // GetAlias returns "" if the alias is not found, else it - // returns the Target of the alias (the hash) - return nil - } - - var lastErr error + lastErr := errors.New("image not imported!") for _, remote := range sources { source, err := i.connectToSource(remote) if err != nil { @@ -144,7 +133,7 @@ context: fmt.Sprintf("copying image for %s from %s: %%s", name, source.URL()), forward: forwarder.Forward, } - err = source.CopyImage(target, i.raw, []string{name}, adapter.copyProgress) + err = source.CopyImage(series, i.raw, []string{name}, adapter.copyProgress) return errors.Annotatef(err, "unable to get LXD image for %s", name) } return lastErr diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_image_test.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_image_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_image_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_image_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -33,7 +33,7 @@ stub: s.Stub, url: "https://match", aliases: map[string]string{ - "trusty": "deadbeef", + "trusty": "trusty-alias", }, } s.remoteWithNothing = &stubRemoteClient{ @@ -127,6 +127,7 @@ } func (s *imageSuite) TestEnsureImageExistsAlreadyPresent(c *gc.C) { + connector := MakeConnector(s.Stub, s.remoteWithTrusty) raw := &stubClient{ stub: s.Stub, Aliases: map[string]string{ @@ -134,11 +135,11 @@ }, } client := &imageClient{ - raw: raw, + raw: raw, + connectToSource: connector.connectToSource, } - err := client.EnsureImageExists("trusty", nil, nil) + err := client.EnsureImageExists("trusty", []Remote{s.remoteWithTrusty.AsRemote()}, nil) c.Assert(err, jc.ErrorIsNil) - s.Stub.CheckCall(c, 0, "GetAlias", "ubuntu-trusty") } func (s *imageSuite) TestEnsureImageExistsFirstRemote(c *gc.C) { @@ -158,10 +159,6 @@ c.Assert(err, jc.ErrorIsNil) // We didn't find it locally s.Stub.CheckCalls(c, []testing.StubCall{ - { // Check if we already have 'ubuntu-trusty' locally - FuncName: "GetAlias", - Args: []interface{}{"ubuntu-trusty"}, - }, { // We didn't so connect to the first remote FuncName: "connectToSource", Args: []interface{}{"https://match"}, @@ -172,12 +169,12 @@ }, { // So Copy the Image FuncName: "CopyImage", - Args: []interface{}{"deadbeef", []string{"ubuntu-trusty"}}, + Args: []interface{}{"trusty", []string{"ubuntu-trusty"}}, }, }) // We've updated the aliases c.Assert(raw.Aliases, gc.DeepEquals, map[string]string{ - "ubuntu-trusty": "deadbeef", + "ubuntu-trusty": "trusty", }) } @@ -197,16 +194,12 @@ Protocol: SimplestreamsProtocol, } s.Stub.ResetCalls() - s.Stub.SetErrors(nil, errors.Errorf("unable-to-connect")) + s.Stub.SetErrors(errors.Errorf("unable-to-connect")) remotes := []Remote{badRemote, s.remoteWithTrusty.AsRemote()} err := client.EnsureImageExists("trusty", remotes, nil) c.Assert(err, jc.ErrorIsNil) // We didn't find it locally s.Stub.CheckCalls(c, []testing.StubCall{ - { // Check if we already have 'ubuntu-trusty' locally - FuncName: "GetAlias", - Args: []interface{}{"ubuntu-trusty"}, - }, { // We didn't so connect to the first remote FuncName: "connectToSource", Args: []interface{}{"https://nosuch-remote.invalid"}, @@ -221,12 +214,12 @@ }, { // So Copy the Image FuncName: "CopyImage", - Args: []interface{}{"deadbeef", []string{"ubuntu-trusty"}}, + Args: []interface{}{"trusty", []string{"ubuntu-trusty"}}, }, }) // We've updated the aliases c.Assert(raw.Aliases, gc.DeepEquals, map[string]string{ - "ubuntu-trusty": "deadbeef", + "ubuntu-trusty": "trusty", }) } @@ -247,10 +240,6 @@ c.Assert(err, jc.ErrorIsNil) // We didn't find it locally s.Stub.CheckCalls(c, []testing.StubCall{ - { // Check if we already have 'ubuntu-trusty' locally - FuncName: "GetAlias", - Args: []interface{}{"ubuntu-trusty"}, - }, { // We didn't so connect to the first remote FuncName: "connectToSource", Args: []interface{}{s.remoteWithNothing.URL()}, @@ -269,12 +258,12 @@ }, { // So Copy the Image FuncName: "CopyImage", - Args: []interface{}{"deadbeef", []string{"ubuntu-trusty"}}, + Args: []interface{}{"trusty", []string{"ubuntu-trusty"}}, }, }) // We've updated the aliases c.Assert(raw.Aliases, gc.DeepEquals, map[string]string{ - "ubuntu-trusty": "deadbeef", + "ubuntu-trusty": "trusty", }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_instance.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,10 @@ package lxdclient import ( + "bytes" "fmt" + "io" + "os" "strings" "github.com/juju/errors" @@ -20,19 +23,29 @@ type Device map[string]string type Devices map[string]Device +type File struct { + Content []byte + Path string + GID int + UID int + Mode os.FileMode +} +type Files []File + // TODO(ericsnow) We probably need to address some of the things that // get handled in container/lxc/clonetemplate.go. type rawInstanceClient interface { ListContainers() ([]shared.ContainerInfo, error) ContainerInfo(name string) (*shared.ContainerInfo, error) - Init(name string, imgremote string, image string, profiles *[]string, config map[string]string, ephem bool) (*lxd.Response, error) + Init(name string, imgremote string, image string, profiles *[]string, config map[string]string, devices shared.Devices, ephem bool) (*lxd.Response, error) Action(name string, action shared.ContainerAction, timeout int, force bool, stateful bool) (*lxd.Response, error) Delete(name string) (*lxd.Response, error) WaitForSuccess(waitURL string) error ContainerState(name string) (*shared.ContainerState, error) ContainerDeviceAdd(container, devname, devtype string, props []string) (*lxd.Response, error) + PushFile(container, path string, gid int, uid int, mode string, buf io.ReadSeeker) error } type instanceClient struct { @@ -65,8 +78,17 @@ // TODO(ericsnow) Copy the image first? + lxdDevices := make(shared.Devices, len(spec.Devices)) + for name, device := range spec.Devices { + lxdDevice := make(shared.Device, len(device)) + for key, value := range device { + lxdDevice[key] = value + } + lxdDevices[name] = lxdDevice + } + config := spec.config() - resp, err := client.raw.Init(spec.Name, imageRemote, imageAlias, profiles, config, spec.Ephemeral) + resp, err := client.raw.Init(spec.Name, imageRemote, imageAlias, profiles, config, lxdDevices, spec.Ephemeral) if err != nil { return errors.Trace(err) } @@ -80,21 +102,12 @@ return errors.Trace(err) } - for name, device := range spec.Devices { - deviceType, ok := device["type"] - if !ok { - continue - } - props := deviceProperties(device) - logger.Infof("adding device=%s, type=%s with properties=%q to container %s", - name, deviceType, props, spec.Name) - resp, err := client.raw.ContainerDeviceAdd(spec.Name, name, deviceType, props) + for _, file := range spec.Files { + logger.Infof("pushing file %q to container %q", file.Path, spec.Name) + err := client.raw.PushFile(spec.Name, file.Path, file.GID, file.UID, file.Mode.String(), bytes.NewReader(file.Content)) if err != nil { return errors.Trace(err) } - if err := client.raw.WaitForSuccess(resp.Operation); err != nil { - return errors.Trace(err) - } } return nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_network.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_network.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_network.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_network.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,117 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + + "github.com/juju/juju/network" +) + +type rawNetworkClient interface { + NetworkCreate(name string, config map[string]string) error + NetworkGet(name string) (shared.NetworkConfig, error) +} + +type networkClient struct { + raw rawNetworkClient + supported bool +} + +// NetworkCreate creates the specified network. +func (c *networkClient) NetworkCreate(name string, config map[string]string) error { + if !c.supported { + return errors.NotSupportedf("network API not supported on this remote") + } + + return c.raw.NetworkCreate(name, config) +} + +// NetworkGet returns the specified network's configuration. +func (c *networkClient) NetworkGet(name string) (shared.NetworkConfig, error) { + if !c.supported { + return shared.NetworkConfig{}, errors.NotSupportedf("network API not supported on this remote") + } + + return c.raw.NetworkGet(name) +} + +type creator interface { + rawNetworkClient + ProfileDeviceAdd(profile, devname, devtype string, props []string) (*lxd.Response, error) + ProfileConfig(profile string) (*shared.ProfileConfig, error) +} + +func checkBridgeConfig(client rawNetworkClient, bridge string) error { + n, err := client.NetworkGet(bridge) + if err != nil { + return err + } + + if n.Config["ipv6.address"] != "none" { + return errors.Errorf(`juju doesn't support ipv6. Please disable LXD's IPV6: + + $ lxc network set %s ipv6.address none + +and rebootstrap`, bridge) + } + + return nil +} + +// CreateDefaultBridgeInDefaultProfile creates a default bridge if it doesn't +// exist and (if necessary) inserts it into the default profile. +func CreateDefaultBridgeInDefaultProfile(client creator) error { + /* create the default bridge if it doesn't exist */ + n, err := client.NetworkGet(network.DefaultLXDBridge) + if err != nil { + err := client.NetworkCreate(network.DefaultLXDBridge, map[string]string{ + "ipv6.address": "none", + "ipv6.nat": "false", + }) + if err != nil { + return err + } + + n, err = client.NetworkGet(network.DefaultLXDBridge) + if err != nil { + return err + } + } else { + if err := checkBridgeConfig(client, network.DefaultLXDBridge); err != nil { + return err + } + } + + nicType := "macvlan" + if n.Type == "bridge" { + nicType = "bridged" + } + + props := []string{fmt.Sprintf("nictype=%s", nicType), fmt.Sprintf("parent=%s", network.DefaultLXDBridge)} + + config, err := client.ProfileConfig("default") + if err != nil { + return err + } + + _, ok := config.Devices["eth0"] + if ok { + /* don't configure an eth0 if it already exists */ + return nil + } + + _, err = client.ProfileDeviceAdd("default", "eth0", "nic", props) + if err != nil { + return err + } + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_profile.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_profile.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_profile.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_profile.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,15 +8,17 @@ import ( "github.com/juju/errors" "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" ) type rawProfileClient interface { ProfileCreate(name string) error - ListProfiles() ([]string, error) + ListProfiles() ([]shared.ProfileConfig, error) SetProfileConfigItem(profile, key, value string) error GetProfileConfig(profile string) (map[string]string, error) ProfileDelete(profile string) error ProfileDeviceAdd(profile, devname, devtype string, props []string) (*lxd.Response, error) + ProfileConfig(profile string) (*shared.ProfileConfig, error) } type profileClient struct { @@ -65,7 +67,7 @@ return false, errors.Trace(err) } for _, profile := range profiles { - if profile == name { + if profile.Name == name { return true, nil } } @@ -89,3 +91,7 @@ } return config, nil } + +func (p profileClient) ProfileConfig(profile string) (*shared.ProfileConfig, error) { + return p.raw.ProfileConfig(profile) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_test.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/client_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -46,7 +46,7 @@ /* ECONNREFUSED because it's not a socket (mimics behavior of a socket * with nobody listening) */ - _, err = Connect(cfg) + _, err = Connect(cfg, false) c.Assert(err.Error(), gc.Equals, `can't connect to the local LXD server: LXD refused connections; is LXD running? Please configure LXD by running: @@ -56,7 +56,7 @@ /* EACCESS because we can't read/write */ c.Assert(f.Chmod(0400), jc.ErrorIsNil) - _, err = Connect(cfg) + _, err = Connect(cfg, false) c.Assert(err.Error(), gc.Equals, `can't connect to the local LXD server: Permisson denied, are you in the lxd group? Please configure LXD by running: @@ -66,7 +66,7 @@ /* ENOENT because it doesn't exist */ c.Assert(os.RemoveAll(f.Name()), jc.ErrorIsNil) - _, err = Connect(cfg) + _, err = Connect(cfg, false) c.Assert(err.Error(), gc.Equals, `can't connect to the local LXD server: LXD socket not found; is LXD installed & running? Please install LXD by running: @@ -79,7 +79,7 @@ // Yes, the error message actually matters here... this is being displayed // to the user. cs.PatchValue(&lxdNewClientFromInfo, fakeNewClientFromInfo) - _, err = Connect(cfg) + _, err = Connect(cfg, false) c.Assert(err.Error(), gc.Equals, `can't connect to the local LXD server: boo! Please install LXD by running: @@ -174,6 +174,22 @@ and then bootstrap again.`) + ipv6 := ` +USE_LXD_BRIDGE="true" +LXD_BRIDGE="lxdbr0" +LXD_IPV6_ADDR="2001:470:b368:4242::1" +` + + err = checkLXDBridgeConfiguration(ipv6) + c.Assert(err.Error(), gc.Equals, LXDBridgeFile+` has IPv6 enabled. +Juju doesn't currently support IPv6. + +IPv6 can be disabled by running: + + sudo dpkg-reconfigure -p medium lxd + +and then bootstrap again.`) + } func (cs *ConnectSuite) TestRemoteConnectError(c *gc.C) { @@ -191,16 +207,34 @@ }, }.WithDefaults() c.Assert(err, jc.ErrorIsNil) - _, err = Connect(cfg) + _, err = Connect(cfg, false) c.Assert(errors.Cause(err), gc.Equals, testerr) } -func (cs *ConnectSuite) TestVersionCheck(c *gc.C) { - c.Assert(isSupportedLxdVersion("2.0.0"), jc.IsTrue) - c.Assert(isSupportedLxdVersion("2.0.0.rc4"), jc.IsFalse) - c.Assert(isSupportedLxdVersion("0.19"), jc.IsFalse) - c.Assert(isSupportedLxdVersion("2.0.1"), jc.IsTrue) +func (*ConnectSuite) CheckLogContains(c *gc.C, suffix string) { + c.Check(c.GetTestLog(), gc.Matches, "(?s).*WARNING juju.tools.lxdclient "+suffix+".*") +} + +func (*ConnectSuite) CheckVersionSupported(c *gc.C, version string, supported bool) { + c.Check(isSupportedAPIVersion(version), gc.Equals, supported) +} + +func (cs *ConnectSuite) TestBadVersionChecks(c *gc.C) { + cs.CheckVersionSupported(c, "foo", false) + cs.CheckLogContains(c, `LXD API version "foo": expected format \.`) + + cs.CheckVersionSupported(c, "a.b", false) + cs.CheckLogContains(c, `LXD API version "a.b": unexpected major number: strconv.(ParseInt|Atoi): parsing "a": invalid syntax`) + + cs.CheckVersionSupported(c, "0.9", false) + cs.CheckLogContains(c, `LXD API version "0.9": expected major version 1 or later`) +} + +func (cs *ConnectSuite) TestGoodVersionChecks(c *gc.C) { + cs.CheckVersionSupported(c, "1.0", true) + cs.CheckVersionSupported(c, "2.0", true) + cs.CheckVersionSupported(c, "2.1", true) } var testerr = errors.Errorf("boo!") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/config.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/config.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/config.go 2016-10-13 14:31:49.000000000 +0000 @@ -38,76 +38,3 @@ return nil } - -// UsingTCPRemote converts the config into a "non-local" version. An -// already non-local remote is left alone. -// -// For a "local" remote (see Local), the remote is changed to a one -// with the host set to the IP address of the local lxcbr0 bridge -// interface. The LXD server is also set up for remote access, exposing -// the TCP port and adding a certificate for remote access. -func (cfg Config) UsingTCPRemote() (Config, error) { - // Note that cfg is a value receiver, so it is an implicit copy. - - if !cfg.Remote.isLocal() { - return cfg, nil - } - - client, err := Connect(cfg) - if err != nil { - return cfg, errors.Trace(err) - } - - if _, err := client.ServerStatus(); err != nil { - return cfg, errors.Trace(err) - } - - // If the default profile's bridge was never used before, the next call with - // also activate it and get its address. - remote, err := cfg.Remote.UsingTCP(client.defaultProfileBridgeName) - if err != nil { - return cfg, errors.Trace(err) - } - - // Update the server config and authorized certs. - serverCert, err := prepareRemote(client, remote.Cert) - if err != nil { - return cfg, errors.Trace(err) - } - // Note: jam 2016-02-25 setting ServerPEMCert feels like something - // that would have been done in UsingTCP. However, we can't know the - // server's certificate until we've actually connected to it, which - // happens in prepareRemote - remote.ServerPEMCert = serverCert - - cfg.Remote = remote - return cfg, nil -} - -func prepareRemote(client *Client, newCert *Cert) (string, error) { - // Make sure the LXD service is configured to listen to local https - // requests, rather than only via the Unix socket. - // TODO: jam 2016-02-25 This tells LXD to listen on all addresses, - // which does expose the LXD to outside requests. It would - // probably be better to only tell LXD to listen for requests on - // the loopback and LXC bridges that we are using. - if err := client.SetConfig("core.https_address", "[::]"); err != nil { - return "", errors.Trace(err) - } - - if newCert == nil { - return "", nil - } - - // Make sure the LXD service will allow our certificate to connect - if err := client.AddCert(*newCert); err != nil { - return "", errors.Trace(err) - } - - st, err := client.ServerStatus() - if err != nil { - return "", errors.Trace(err) - } - - return st.Environment.Certificate, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/config_test.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/config_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/config_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/config_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,6 @@ import ( "github.com/juju/errors" jc "github.com/juju/testing/checkers" - "github.com/juju/utils/set" gc "gopkg.in/check.v1" "github.com/juju/juju/tools/lxdclient" @@ -16,7 +15,6 @@ var ( _ = gc.Suite(&configSuite{}) - _ = gc.Suite(&configFunctionalSuite{}) ) type configBaseSuite struct { @@ -103,87 +101,3 @@ c.Check(err, jc.Satisfies, errors.IsNotValid) } - -func (s *configSuite) TestUsingTCPRemoteNoop(c *gc.C) { - cfg := lxdclient.Config{ - Remote: s.remote, - } - nonlocal, err := cfg.UsingTCPRemote() - c.Assert(err, jc.ErrorIsNil) - - c.Check(nonlocal, jc.DeepEquals, cfg) -} - -type configFunctionalSuite struct { - configBaseSuite - - client *lxdclient.Client -} - -func (s *configFunctionalSuite) SetUpTest(c *gc.C) { - s.configBaseSuite.SetUpTest(c) - - s.client = newLocalClient(c) - c.Logf("connected to %v", s.client) - - if s.client != nil { - origCerts, err := s.client.ListCerts() - c.Assert(err, jc.ErrorIsNil) - s.AddCleanup(func(c *gc.C) { - certs, err := s.client.ListCerts() - c.Assert(err, jc.ErrorIsNil) - - orig := set.NewStrings(origCerts...) - added := set.NewStrings(certs...).Difference(orig) - for _, fingerprint := range added.Values() { - err := s.client.RemoveCertByFingerprint(fingerprint) - if err != nil { - c.Logf("could not remove cert %q: %v", fingerprint, err) - } - } - }) - } -} - -func (s *configFunctionalSuite) TestUsingTCPRemote(c *gc.C) { - if s.client == nil { - c.Skip("LXD not running locally") - } - // We can't just pass the testingCert as part of the Local connection, - // because Validate() doesn't like Local remotes that have - // Certificates. - lxdclient.PatchGenerateCertificate(&s.CleanupSuite, testingCert, testingKey) - - cfg := lxdclient.Config{ - Remote: lxdclient.Local, - } - nonlocal, err := cfg.UsingTCPRemote() - c.Assert(err, jc.ErrorIsNil) - - checkValidRemote(c, &nonlocal.Remote) - c.Check(nonlocal, jc.DeepEquals, lxdclient.Config{ - Remote: lxdclient.Remote{ - Name: lxdclient.Local.Name, - Host: nonlocal.Remote.Host, - Cert: nonlocal.Remote.Cert, - Protocol: lxdclient.LXDProtocol, - ServerPEMCert: nonlocal.Remote.ServerPEMCert, - }, - }) - c.Check(nonlocal.Remote.Host, gc.Not(gc.Equals), "") - c.Check(nonlocal.Remote.Cert.CertPEM, gc.Not(gc.Equals), "") - c.Check(nonlocal.Remote.Cert.KeyPEM, gc.Not(gc.Equals), "") - c.Check(nonlocal.Remote.ServerPEMCert, gc.Not(gc.Equals), "") - // TODO(ericsnow) Check that the server has the certs. -} - -func newLocalClient(c *gc.C) *lxdclient.Client { - client, err := lxdclient.Connect(lxdclient.Config{ - Remote: lxdclient.Local, - }) - if err != nil { - c.Log(err) - return nil - } - return client -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/instance.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/instance.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/instance.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/instance.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,6 +26,11 @@ // Also see https://github.com/lxc/lxd/blob/master/specs/configuration.md. UserdataKey = "user-data" + // CertificateFingerprintKey is a key that we define to associate + // a certificate fingerprint with an instance. We use this to clean + // up certificates when removing controller instances. + CertificateFingerprintKey = "certificate-fingerprint" + megabyte = 1024 * 1024 ) @@ -76,9 +81,13 @@ // Metadata is the instance metadata. Metadata map[string]string - // Devices to be added at container initialisation time + // Devices to be added at container initialisation time. Devices + // Files to be pushed after initialisation has completed but + // before the container is started. + Files + // TODO(ericsnow) Other possible fields: // Disks // Networks diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/remote.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/remote.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/remote.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/remote.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,6 @@ import ( "github.com/juju/errors" - "github.com/juju/utils" lxdshared "github.com/lxc/lxd/shared" ) @@ -37,6 +36,9 @@ SimplestreamsProtocol Protocol = "simplestreams" ) +/* The "releases" stream for images. This consists of blessed releases by the + * Canonical team. + */ var CloudImagesRemote = Remote{ Name: "cloud-images.ubuntu.com", Host: "https://cloud-images.ubuntu.com/releases", @@ -45,8 +47,21 @@ ServerPEMCert: "", } -var generateCertificate = lxdshared.GenerateMemCert -var DefaultImageSources = []Remote{CloudImagesRemote} +/* The "daily" stream. This consists of images that are built from the daily + * package builds. These images have not been independently tested, but in + * theory "should" be good, since they're build from packages from the released + * archive. + */ +var CloudImagesDailyRemote = Remote{ + Name: "cloud-images.ubuntu.com", + Host: "https://cloud-images.ubuntu.com/daily", + Protocol: SimplestreamsProtocol, + Cert: nil, + ServerPEMCert: "", +} + +var generateCertificate = func() ([]byte, []byte, error) { return lxdshared.GenerateMemCert(true) } +var DefaultImageSources = []Remote{CloudImagesRemote, CloudImagesDailyRemote} // Remote describes a LXD "remote" server for a client. In // particular it holds the information needed for the client @@ -171,32 +186,3 @@ return nil } - -// UsingTCP converts the remote into a non-local version. For non-local remotes -// this is a no-op. -// -// For a "local" remote (see Local), the remote is changed to a one with the -// host set to the first IPv4 address assigned to the given bridgeName. The -// remote is also set up for remote access, setting the cert if not already set. -func (r Remote) UsingTCP(bridgeName string) (Remote, error) { - // Note that r is a value receiver, so it is an implicit copy. - - if !r.isLocal() { - return r, nil - } - - address, err := utils.GetAddressForInterface(bridgeName) - if err != nil { - return r, errors.Trace(err) - } - r.Host = address - - // TODO(ericsnow) Change r.Name if "local"? Prepend "juju-"? - - r, err = r.WithDefaults() - if err != nil { - return r, errors.Trace(err) - } - - return r, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/remote_test.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/remote_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/remote_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/remote_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,7 +17,6 @@ var ( _ = gc.Suite(&remoteSuite{}) - _ = gc.Suite(&remoteFunctionalSuite{}) ) type remoteSuite struct { @@ -406,48 +405,6 @@ c.Check(id, gc.Equals, "local") } -func (s *remoteSuite) TestUsingTCPNoop(c *gc.C) { - remote := lxdclient.Remote{ - Name: "my-remote", - Host: "some-host", - Protocol: lxdclient.LXDProtocol, - Cert: s.Cert, - } - nonlocal, err := remote.UsingTCP("") - c.Assert(err, jc.ErrorIsNil) - - c.Check(nonlocal, jc.DeepEquals, remote) -} - -type remoteFunctionalSuite struct { - lxdclient.BaseSuite -} - -func (s *remoteFunctionalSuite) TestUsingTCP(c *gc.C) { - lxdclient.PatchGenerateCertificate(&s.CleanupSuite, testingCert, testingKey) - - remote := lxdclient.Remote{ - Name: "my-remote", - Host: "", - Cert: nil, - } - nonlocal, err := remote.UsingTCP("lo") - c.Assert(err, jc.ErrorIsNil) - - checkValidRemote(c, &nonlocal) - c.Check(nonlocal, jc.DeepEquals, lxdclient.Remote{ - Name: "my-remote", - Host: nonlocal.Host, - Protocol: lxdclient.LXDProtocol, - Cert: nonlocal.Cert, - }) -} - -func checkValidRemote(c *gc.C, remote *lxdclient.Remote) { - c.Check(remote.Host, jc.Satisfies, isValidAddr) - checkValidCert(c, remote.Cert) -} - func isValidAddr(value interface{}) bool { addr, ok := value.(string) if !ok { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/utils.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/utils.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/utils.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/utils.go 2016-10-13 14:31:49.000000000 +0000 @@ -60,3 +60,20 @@ return running, nil } + +// EnableHTTPSListener configures LXD to listen for HTTPS requests, +// rather than only via the Unix socket. +func EnableHTTPSListener(client interface { + SetConfig(k, v string) error +}) error { + // Make sure the LXD service is configured to listen to local https + // requests, rather than only via the Unix socket. + // TODO: jam 2016-02-25 This tells LXD to listen on all addresses, + // which does expose the LXD to outside requests. It would + // probably be better to only tell LXD to listen for requests on + // the loopback and LXC bridges that we are using. + if err := client.SetConfig("core.https_address", "[::]"); err != nil { + return errors.Trace(err) + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/utils_test.go juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/utils_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/tools/lxdclient/utils_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/tools/lxdclient/utils_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,47 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient_test + +import ( + "errors" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/tools/lxdclient" +) + +var ( + _ = gc.Suite(&utilsSuite{}) +) + +type utilsSuite struct { + lxdclient.BaseSuite +} + +func (s *utilsSuite) TestEnableHTTPSListener(c *gc.C) { + var client mockConfigSetter + err := lxdclient.EnableHTTPSListener(&client) + c.Assert(err, jc.ErrorIsNil) + client.CheckCall(c, 0, "SetConfig", "core.https_address", "[::]") +} + +func (s *utilsSuite) TestEnableHTTPSListenerError(c *gc.C) { + var client mockConfigSetter + client.SetErrors(errors.New("uh oh")) + err := lxdclient.EnableHTTPSListener(&client) + c.Assert(err, gc.ErrorMatches, "uh oh") +} + +type mockConfigSetter struct { + testing.Stub +} + +func (m *mockConfigSetter) SetConfig(k, v string) error { + m.MethodCall(m, "SetConfig", k, v) + return m.NextErr() +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/upgrades/operations.go juju-core-2.0.0/src/github.com/juju/juju/upgrades/operations.go --- juju-core-2.0~beta15/src/github.com/juju/juju/upgrades/operations.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/upgrades/operations.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,11 +18,7 @@ // (below). var stateUpgradeOperations = func() []Operation { steps := []Operation{ - // Replace when we have upgrades to do - upgradeToVersion{ - version.MustParse("1.26-placeholder1"), - []Step{}, - }, + upgradeToVersion{version.MustParse("2.0.0"), stateStepsFor20()}, } return steps } @@ -32,11 +28,7 @@ // state-based operations above, ordering is important. var upgradeOperations = func() []Operation { steps := []Operation{ - // Replace when we have upgrades to do - upgradeToVersion{ - version.MustParse("1.26-placeholder1"), - []Step{}, - }, + upgradeToVersion{version.MustParse("2.0.0"), stepsFor20()}, } return steps } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/upgrades/steps_20.go juju-core-2.0.0/src/github.com/juju/juju/upgrades/steps_20.go --- juju-core-2.0~beta15/src/github.com/juju/juju/upgrades/steps_20.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/upgrades/steps_20.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,50 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades + +import ( + "os" + "path/filepath" + + "github.com/juju/juju/state" +) + +// stateStepsFor20 returns upgrade steps for Juju 2.0 that manipulate state directly. +func stateStepsFor20() []Step { + return []Step{ + &upgradeStep{ + description: "strip @local from local user names", + targets: []Target{DatabaseMaster}, + run: func(context Context) error { + return state.StripLocalUserDomain(context.State()) + }, + }, + &upgradeStep{ + description: "rename addmodel permission to add-model", + targets: []Target{DatabaseMaster}, + run: func(context Context) error { + return state.RenameAddModelPermission(context.State()) + }, + }, + } +} + +// stepsFor20 returns upgrade steps for Juju 2.0 that only need the API. +func stepsFor20() []Step { + return []Step{ + &upgradeStep{ + description: "remove apiserver charm get cache", + targets: []Target{Controller}, + run: removeCharmGetCache, + }, + } +} + +// removeCharmGetCache removes the cache directory that was previously +// used by the charms API endpoint. It is no longer necessary. +func removeCharmGetCache(context Context) error { + dataDir := context.AgentConfig().DataDir() + cacheDir := filepath.Join(dataDir, "charm-get-cache") + return os.RemoveAll(cacheDir) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/upgrades/steps_20_test.go juju-core-2.0.0/src/github.com/juju/juju/upgrades/steps_20_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/upgrades/steps_20_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/upgrades/steps_20_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,74 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades_test + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + jc "github.com/juju/testing/checkers" + "github.com/juju/version" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" + "github.com/juju/juju/upgrades" +) + +var v200 = version.MustParse("2.0.0") + +type steps20Suite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&steps20Suite{}) + +func (s *steps20Suite) TestStripLocalUserDomain(c *gc.C) { + step := findStateStep(c, v200, "strip @local from local user names") + // Logic for step itself is tested in state package. + c.Assert(step.Targets(), jc.DeepEquals, []upgrades.Target{upgrades.DatabaseMaster}) +} + +func (s *steps20Suite) TestRenameAddModelPermission(c *gc.C) { + step := findStateStep(c, v200, "rename addmodel permission to add-model") + // Logic for step itself is tested in state package. + c.Assert(step.Targets(), jc.DeepEquals, []upgrades.Target{upgrades.DatabaseMaster}) +} + +func (s *steps20Suite) TestCharmGetCacheDir(c *gc.C) { + // Create a cache directory with some stuff in it. + dataDir := c.MkDir() + cacheDir := filepath.Join(dataDir, "charm-get-cache") + c.Assert(os.MkdirAll(cacheDir, 0777), jc.ErrorIsNil) + err := ioutil.WriteFile(filepath.Join(cacheDir, "stuff"), []byte("things"), 0777) + c.Assert(err, jc.ErrorIsNil) + + step := findStep(c, v200, "remove apiserver charm get cache") + + check := func() { + context := &mockContext{ + agentConfig: &mockAgentConfig{dataDir: dataDir}, + } + err = step.Run(context) + c.Assert(err, jc.ErrorIsNil) + + // Cache directory should be gone, but data dir should still be there. + c.Check(pathExists(cacheDir), jc.IsFalse) + c.Check(pathExists(dataDir), jc.IsTrue) + } + + check() + check() // Check OK when directory not present +} + +func pathExists(p string) bool { + _, err := os.Stat(p) + if err == nil { + return true + } else if os.IsNotExist(err) { + return false + } + panic(fmt.Sprintf("stat for %q failed: %v", p, err)) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/upgrades/upgrade_test.go juju-core-2.0.0/src/github.com/juju/juju/upgrades/upgrade_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/upgrades/upgrade_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/upgrades/upgrade_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -30,43 +30,32 @@ coretesting.MgoTestPackage(t) } -// assertStateSteps is a helper that ensures that the given -// state-based upgrade steps match what is expected for that version -// and that the steps have been added to the global upgrade operations -// list. -func assertStateSteps(c *gc.C, ver version.Number, expectedSteps []string) { - findAndCheckSteps(c, (*upgrades.StateUpgradeOperations)(), ver, expectedSteps) -} - -// assertSteps is a helper that ensures that the given API-based -// upgrade steps match what is expected for that version and that the -// steps have been added to the global upgrade operations list. -func assertSteps(c *gc.C, ver version.Number, expectedSteps []string) { - findAndCheckSteps(c, (*upgrades.UpgradeOperations)(), ver, expectedSteps) -} - -func findAndCheckSteps(c *gc.C, ops []upgrades.Operation, ver version.Number, expectedSteps []string) { - for _, op := range ops { +func findStep(c *gc.C, ver version.Number, description string) upgrades.Step { + for _, op := range (*upgrades.UpgradeOperations)() { if op.TargetVersion() == ver { - assertExpectedSteps(c, op.Steps(), expectedSteps) - return + for _, step := range op.Steps() { + if step.Description() == description { + return step + } + } } } - if len(expectedSteps) > 0 { - c.Fatal("upgrade operations for this version are not hooked up") - } + c.Fatalf("could not find step %q for %s", description, ver) + return nil } -// assertExpectedSteps is a helper function used to check that the upgrade steps match -// what is expected for a version. -func assertExpectedSteps(c *gc.C, steps []upgrades.Step, expectedSteps []string) { - c.Assert(steps, gc.HasLen, len(expectedSteps)) - - var stepNames = make([]string, len(steps)) - for i, step := range steps { - stepNames[i] = step.Description() +func findStateStep(c *gc.C, ver version.Number, description string) upgrades.Step { + for _, op := range (*upgrades.StateUpgradeOperations)() { + if op.TargetVersion() == ver { + for _, step := range op.Steps() { + if step.Description() == description { + return step + } + } + } } - c.Assert(stepNames, gc.DeepEquals, expectedSteps) + c.Fatalf("could not find state step %q for %s", description, ver) + return nil } type upgradeSuite struct { @@ -597,7 +586,7 @@ s.checkContextRestriction(c, "API not available from this context") } -func (s *upgradeSuite) TestApiStepsGetRestrictedContext(c *gc.C) { +func (s *upgradeSuite) TestAPIStepsGetRestrictedContext(c *gc.C) { s.PatchValue(upgrades.StateUpgradeOperations, func() []upgrades.Operation { return nil }) @@ -669,14 +658,14 @@ func (s *upgradeSuite) TestStateUpgradeOperationsVersions(c *gc.C) { versions := extractUpgradeVersions(c, (*upgrades.StateUpgradeOperations)()) c.Assert(versions, gc.DeepEquals, []string{ - "1.26-placeholder1", + "2.0.0", }) } func (s *upgradeSuite) TestUpgradeOperationsVersions(c *gc.C) { versions := extractUpgradeVersions(c, (*upgrades.UpgradeOperations)()) c.Assert(versions, gc.DeepEquals, []string{ - "1.26-placeholder1", + "2.0.0", }) } @@ -685,7 +674,7 @@ for _, utv := range ops { vers := utv.TargetVersion() // Upgrade steps should only be targeted at final versions (not alpha/beta). - c.Check(vers.Tag, gc.Equals, "placeholder") + c.Check(vers.Tag, gc.Equals, "") versions = append(versions, vers.String()) } return versions diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/version/version.go juju-core-2.0.0/src/github.com/juju/juju/version/version.go --- juju-core-2.0~beta15/src/github.com/juju/juju/version/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/version/version.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,7 +19,7 @@ // The presence and format of this constant is very important. // The debian/rules build recipe uses this value for the version // number of the release package. -const version = "2.0-beta15" +const version = "2.0.0" // The version that we switched over from old style numbering to new style. var switchOverVersion = semversion.MustParse("1.19.9") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/watcher/legacy/notifyworker.go juju-core-2.0.0/src/github.com/juju/juju/watcher/legacy/notifyworker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/watcher/legacy/notifyworker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/watcher/legacy/notifyworker.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,7 @@ package legacy import ( - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state" "github.com/juju/juju/state/watcher" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/watcher/legacy/notifyworker_test.go juju-core-2.0.0/src/github.com/juju/juju/watcher/legacy/notifyworker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/watcher/legacy/notifyworker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/watcher/legacy/notifyworker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state" "github.com/juju/juju/state/watcher" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/watcher/legacy/stringsworker.go juju-core-2.0.0/src/github.com/juju/juju/watcher/legacy/stringsworker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/watcher/legacy/stringsworker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/watcher/legacy/stringsworker.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,7 +4,7 @@ package legacy import ( - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state" "github.com/juju/juju/state/watcher" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/watcher/legacy/stringsworker_test.go juju-core-2.0.0/src/github.com/juju/juju/watcher/legacy/stringsworker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/watcher/legacy/stringsworker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/watcher/legacy/stringsworker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state" "github.com/juju/juju/state/watcher" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/watcher/notify_test.go juju-core-2.0.0/src/github.com/juju/juju/watcher/notify_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/watcher/notify_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/watcher/notify_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/watcher" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/watcher/strings_test.go juju-core-2.0.0/src/github.com/juju/juju/watcher/strings_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/watcher/strings_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/watcher/strings_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/watcher" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/agent/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/agent/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/agent/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/agent/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/agent" "github.com/juju/juju/worker" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/apiaddressupdater/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/apiaddressupdater/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/apiaddressupdater/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/apiaddressupdater/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,11 +5,11 @@ import ( "github.com/juju/errors" - "github.com/juju/juju/api/machiner" "gopkg.in/juju/names.v2" "github.com/juju/juju/agent" "github.com/juju/juju/api/base" + "github.com/juju/juju/api/machiner" "github.com/juju/juju/api/uniter" "github.com/juju/juju/cmd/jujud/agent/engine" "github.com/juju/juju/worker" @@ -17,16 +17,16 @@ ) // ManifoldConfig defines the names of the manifolds on which a Manifold will depend. -type ManifoldConfig engine.AgentApiManifoldConfig +type ManifoldConfig engine.AgentAPIManifoldConfig // Manifold returns a dependency manifold that runs an API address updater worker, // using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - typedConfig := engine.AgentApiManifoldConfig(config) - return engine.AgentApiManifold(typedConfig, newWorker) + typedConfig := engine.AgentAPIManifoldConfig(config) + return engine.AgentAPIManifold(typedConfig, newWorker) } -// newWorker trivially wraps NewAPIAddressUpdater for use in a engine.AgentApiManifold. +// newWorker trivially wraps NewAPIAddressUpdater for use in a engine.AgentAPIManifold. // It's not tested at the moment, because the scaffolding necessary is too // unwieldy/distracting to introduce at this point. var newWorker = func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/apicaller/connect.go juju-core-2.0.0/src/github.com/juju/juju/worker/apicaller/connect.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/apicaller/connect.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/apicaller/connect.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,7 +24,7 @@ // // TODO(katco): 2016-08-09: lp:1611427 checkProvisionedStrategy = utils.AttemptStrategy{ - Total: 1 * time.Minute, + Total: 10 * time.Minute, Delay: 5 * time.Second, } @@ -44,21 +44,6 @@ ErrChangedPassword = errors.New("insecure password replaced; retry") ) -// APIOpen is an api.OpenFunc that wraps api.Open, and handles the edge -// case where a model has jumping several versions and doesn't yet have -// the model UUID cached in the agent config; in which case we fall back -// to login version 1. -// -// You probably want to use this in ManifoldConfig; *we* probably want to -// put this particular hack inside api.Open, but I seem to recall there -// being some complication last time I thought that was a good idea. -func APIOpen(info *api.Info, opts api.DialOpts) (api.Connection, error) { - if info.ModelTag.Id() == "" { - return api.OpenWithVersion(info, opts, 1) - } - return api.Open(info, opts) -} - // OnlyConnect logs into the API using the supplied agent's credentials. func OnlyConnect(a agent.Agent, apiOpen api.OpenFunc) (api.Connection, error) { agentConfig := a.CurrentConfig() @@ -163,8 +148,6 @@ // // * returns ErrConnectImpossible if the agent entity is dead or // unauthorized for all known passwords; -// * if the agent's config does not specify a model, tries to record the -// model we just connected to; // * replaces insecure credentials with freshly (locally) generated ones // (and returns ErrPasswordChanged, expecting to be reinvoked); // * unconditionally resets the remote-state password to its current value @@ -209,14 +192,6 @@ } }() - // Update the agent config if necessary; this should just read the - // conn's properties, rather than making api calls, so we don't - // need to think about facades yet. - if err := maybeSetAgentModelTag(a, conn); err != nil { - // apperently it's fine for this to fail - logger.Errorf("maybeSetAgentModelTag failed: %v", err) - } - // newConnFacade is patched out in export_test, because exhaustion. // proper config/params struct would be better. facade, err := newConnFacade(conn) @@ -267,26 +242,6 @@ return conn, nil } -// maybeSetAgentModelTag tries to update the agent configuration if -// it's missing a model tag. It doesn't *really* matter if it fails, -// because we can demonstrably connect without it, so we log any -// errors encountered and never return any to the client. -func maybeSetAgentModelTag(a agent.Agent, conn api.Connection) error { - if a.CurrentConfig().Model().Id() == "" { - err := a.ChangeConfig(func(setter agent.ConfigSetter) error { - modelTag, err := conn.ModelTag() - if err != nil { - return errors.Annotate(err, "no model uuid set on api") - } - return setter.Migrate(agent.MigrateParams{ - Model: modelTag, - }) - }) - return errors.Annotate(err, "unable to save model uuid") - } - return nil -} - // changePassword generates a new random password and records it in // local agent configuration and on the remote state server. The supplied // oldPassword -- which must be the current valid password -- is set as a diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/apicaller/connect_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/apicaller/connect_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/apicaller/connect_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/apicaller/connect_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,6 @@ gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" - "github.com/juju/juju/agent" "github.com/juju/juju/api" apiagent "github.com/juju/juju/api/agent" "github.com/juju/juju/apiserver/common" @@ -71,68 +70,6 @@ }}) } -func (*ScaryConnectSuite) TestModelTagCannotChangeConfig(c *gc.C) { - stub := checkModelTagUpdate(c, errors.New("oh noes")) - stub.CheckCallNames(c, - "ChangeConfig", - "Life", "SetPassword", - ) -} - -func (*ScaryConnectSuite) TestModelTagCannotGetTag(c *gc.C) { - stub := checkModelTagUpdate(c, nil, errors.New("oh noes")) - stub.CheckCallNames(c, - "ChangeConfig", "ModelTag", - "Life", "SetPassword", - ) -} - -func (*ScaryConnectSuite) TestModelTagCannotMigrate(c *gc.C) { - stub := checkModelTagUpdate(c, nil, nil, errors.New("oh noes")) - stub.CheckCallNames(c, - "ChangeConfig", "ModelTag", "Migrate", - "Life", "SetPassword", - ) - c.Check(stub.Calls()[2].Args, jc.DeepEquals, []interface{}{ - agent.MigrateParams{Model: coretesting.ModelTag}, - }) -} - -func (*ScaryConnectSuite) TestModelTagSuccess(c *gc.C) { - stub := checkModelTagUpdate(c) - stub.CheckCallNames(c, - "ChangeConfig", "ModelTag", "Migrate", - "Life", "SetPassword", - ) - c.Check(stub.Calls()[2].Args, jc.DeepEquals, []interface{}{ - agent.MigrateParams{Model: coretesting.ModelTag}, - }) -} - -func checkModelTagUpdate(c *gc.C, errs ...error) *testing.Stub { - // success case; just a little failure we don't mind, otherwise - // equivalent to testEntityFine. - stub := &testing.Stub{} - stub.SetErrors(errs...) // from ChangeConfig - expectConn := &mockConn{stub: stub} - apiOpen := func(info *api.Info, opts api.DialOpts) (api.Connection, error) { - return expectConn, nil - } - - entity := names.NewApplicationTag("omg") - connect := func() (api.Connection, error) { - return apicaller.ScaryConnect(&mockAgent{ - stub: stub, - // no model set; triggers ChangeConfig - entity: entity, - }, apiOpen) - } - conn, err := lifeTest(c, stub, apiagent.Alive, connect) - c.Check(conn, gc.Equals, expectConn) - c.Check(err, jc.ErrorIsNil) - return stub -} - func (*ScaryConnectSuite) TestEntityDead(c *gc.C) { // permanent failure case stub := &testing.Stub{} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/apicaller/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/apicaller/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/apicaller/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/apicaller/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -81,7 +81,7 @@ } else if err != nil { return nil, errors.Annotate(err, "cannot open api") } - return newApiConnWorker(conn), nil + return newAPIConnWorker(conn), nil } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/apicaller/util_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/apicaller/util_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/apicaller/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/apicaller/util_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -76,11 +76,6 @@ agent.ConfigSetter } -func (mock *mockSetter) Migrate(params agent.MigrateParams) error { - mock.stub.AddCall("Migrate", params) - return mock.stub.NextErr() -} - func (mock *mockSetter) SetOldPassword(pw string) { mock.stub.AddCall("SetOldPassword", pw) mock.stub.PopNoErr() @@ -94,15 +89,16 @@ type mockConn struct { stub *testing.Stub api.Connection - broken chan struct{} + controllerOnly bool + broken chan struct{} } -func (mock *mockConn) ModelTag() (names.ModelTag, error) { +func (mock *mockConn) ModelTag() (names.ModelTag, bool) { mock.stub.AddCall("ModelTag") - if err := mock.stub.NextErr(); err != nil { - return names.ModelTag{}, err + if mock.controllerOnly { + return names.ModelTag{}, false } - return coretesting.ModelTag, nil + return coretesting.ModelTag, true } func (mock *mockConn) Broken() <-chan struct{} { @@ -152,9 +148,8 @@ } func lifeTest(c *gc.C, stub *testing.Stub, life apiagent.Life, test func() (api.Connection, error)) (api.Connection, error) { - expectConn := &mockConn{stub: stub} newFacade := func(apiCaller base.APICaller) (apiagent.ConnFacade, error) { - c.Check(apiCaller, jc.DeepEquals, expectConn) + c.Check(apiCaller, gc.FitsTypeOf, (*mockConn)(nil)) return newMockConnFacade(stub, life), nil } unpatch := testing.PatchValue(apicaller.NewConnFacade, newFacade) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/apicaller/worker.go juju-core-2.0.0/src/github.com/juju/juju/worker/apicaller/worker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/apicaller/worker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/apicaller/worker.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( "github.com/juju/errors" "github.com/juju/loggo" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/api" "github.com/juju/juju/worker" @@ -14,7 +14,7 @@ var logger = loggo.GetLogger("juju.worker.apicaller") -// newApiConnWorker returns a worker that exists for as long as the associated +// newAPIConnWorker returns a worker that exists for as long as the associated // connection, and provides access to a base.APICaller via its manifold's Output // func. If the worker is killed, the connection will be closed; and if the // connection is broken, the worker will be killed. @@ -22,7 +22,7 @@ // The lack of error return is considered and intentional; it signals the // transfer of responsibility for the connection from the caller to the // worker. -func newApiConnWorker(conn api.Connection) worker.Worker { +func newAPIConnWorker(conn api.Connection) worker.Worker { w := &apiConnWorker{conn: conn} go func() { defer w.tomb.Done() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/apiconfigwatcher/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/apiconfigwatcher/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/apiconfigwatcher/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/apiconfigwatcher/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils/voyeur" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/agent" "github.com/juju/juju/worker" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/applicationscaler/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/applicationscaler/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/applicationscaler/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/applicationscaler/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -33,8 +33,8 @@ // Manifold returns a dependency.Manifold that runs an applicationscaler worker. func Manifold(config ManifoldConfig) dependency.Manifold { - return engine.ApiManifold( - engine.ApiManifoldConfig{config.APICallerName}, + return engine.APIManifold( + engine.APIManifoldConfig{config.APICallerName}, config.start, ) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/authenticationworker/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/authenticationworker/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/authenticationworker/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/authenticationworker/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,14 +15,14 @@ ) // ManifoldConfig defines the names of the manifolds on which a Manifold will depend. -type ManifoldConfig engine.AgentApiManifoldConfig +type ManifoldConfig engine.AgentAPIManifoldConfig // Manifold returns a dependency manifold that runs a authenticationworker worker, // using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - typedConfig := engine.AgentApiManifoldConfig(config) + typedConfig := engine.AgentAPIManifoldConfig(config) - return engine.AgentApiManifold(typedConfig, newWorker) + return engine.AgentAPIManifold(typedConfig, newWorker) } func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/authenticationworker/worker.go juju-core-2.0.0/src/github.com/juju/juju/worker/authenticationworker/worker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/authenticationworker/worker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/authenticationworker/worker.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,7 +12,7 @@ "github.com/juju/utils/set" "github.com/juju/utils/ssh" "gopkg.in/juju/names.v2" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/agent" "github.com/juju/juju/api/keyupdater" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/authenticationworker/worker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/authenticationworker/worker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/authenticationworker/worker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/authenticationworker/worker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -28,7 +28,7 @@ jujutesting.JujuConnSuite stateMachine *state.Machine machine *state.Machine - keyupdaterApi *keyupdater.State + keyupdaterAPI *keyupdater.State existingEnvKey string existingKeys []string @@ -61,8 +61,8 @@ var apiRoot api.Connection apiRoot, s.machine = s.OpenAPIAsNewMachine(c) c.Assert(apiRoot, gc.NotNil) - s.keyupdaterApi = keyupdater.NewState(apiRoot) - c.Assert(s.keyupdaterApi, gc.NotNil) + s.keyupdaterAPI = keyupdater.NewState(apiRoot) + c.Assert(s.keyupdaterAPI, gc.NotNil) } func stop(c *gc.C, w worker.Worker) { @@ -110,7 +110,7 @@ } func (s *workerSuite) TestKeyUpdateRetainsExisting(c *gc.C) { - authWorker, err := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err := authenticationworker.NewWorker(s.keyupdaterAPI, agentConfig(c, s.machine.Tag().(names.MachineTag))) c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) @@ -124,7 +124,7 @@ newKey := sshtesting.ValidKeyThree.Key + " user@host" s.setAuthorisedKeys(c, newKey) - authWorker, err := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err := authenticationworker.NewWorker(s.keyupdaterAPI, agentConfig(c, s.machine.Tag().(names.MachineTag))) c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) @@ -133,7 +133,7 @@ } func (s *workerSuite) TestDeleteKey(c *gc.C) { - authWorker, err := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err := authenticationworker.NewWorker(s.keyupdaterAPI, agentConfig(c, s.machine.Tag().(names.MachineTag))) c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) @@ -149,7 +149,7 @@ } func (s *workerSuite) TestMultipleChanges(c *gc.C) { - authWorker, err := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err := authenticationworker.NewWorker(s.keyupdaterAPI, agentConfig(c, s.machine.Tag().(names.MachineTag))) c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) s.waitSSHKeys(c, append(s.existingKeys, s.existingEnvKey)) @@ -163,7 +163,7 @@ } func (s *workerSuite) TestWorkerRestart(c *gc.C) { - authWorker, err := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err := authenticationworker.NewWorker(s.keyupdaterAPI, agentConfig(c, s.machine.Tag().(names.MachineTag))) c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) s.waitSSHKeys(c, append(s.existingKeys, s.existingEnvKey)) @@ -175,7 +175,7 @@ s.setAuthorisedKeys(c, sshtesting.ValidKeyThree.Key+" yetanother@host") // Restart the worker and check that the ssh auth keys are as expected. - authWorker, err = authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err = authenticationworker.NewWorker(s.keyupdaterAPI, agentConfig(c, s.machine.Tag().(names.MachineTag))) c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/catacomb/catacomb.go juju-core-2.0.0/src/github.com/juju/juju/worker/catacomb/catacomb.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/catacomb/catacomb.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/catacomb/catacomb.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "sync/atomic" "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/worker" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/catacomb/catacomb_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/catacomb/catacomb_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/catacomb/catacomb_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/catacomb/catacomb_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/worker" "github.com/juju/juju/worker/catacomb" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/catacomb/fixture_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/catacomb/fixture_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/catacomb/fixture_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/catacomb/fixture_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/charmrevision/worker.go juju-core-2.0.0/src/github.com/juju/juju/worker/charmrevision/worker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/charmrevision/worker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/charmrevision/worker.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "github.com/juju/errors" "github.com/juju/utils/clock" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/worker" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/charmrevision/worker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/charmrevision/worker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/charmrevision/worker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/charmrevision/worker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -45,7 +45,9 @@ fix := newFixture(time.Minute) fix.cleanTest(c, func(_ worker.Worker) { fix.waitCall(c) - fix.clock.Advance(time.Minute) + if err := fix.clock.WaitAdvance(time.Minute, 1*time.Second, 1); err != nil { + c.Fatal(err) + } fix.waitCall(c) fix.waitNoCall(c) }) @@ -84,14 +86,14 @@ // workerFixture isolates a charmrevision worker for testing. type workerFixture struct { revisionUpdater mockRevisionUpdater - clock *coretesting.Clock + clock *testing.Clock period time.Duration } func newFixture(period time.Duration) workerFixture { return workerFixture{ revisionUpdater: newMockRevisionUpdater(), - clock: coretesting.NewClock(time.Now()), + clock: testing.NewClock(coretesting.ZeroTime()), period: period, } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/cleaner/cleaner_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/cleaner/cleaner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/cleaner/cleaner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/cleaner/cleaner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/watcher" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/cleaner/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/cleaner/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/cleaner/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/cleaner/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,12 +14,12 @@ ) // ManifoldConfig describes the resources used by the cleanup worker. -type ManifoldConfig engine.ApiManifoldConfig +type ManifoldConfig engine.APIManifoldConfig // Manifold returns a Manifold that encapsulates the cleanup worker. func Manifold(config ManifoldConfig) dependency.Manifold { - return engine.ApiManifold( - engine.ApiManifoldConfig(config), + return engine.APIManifold( + engine.APIManifoldConfig(config), manifoldStart, ) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/dblogpruner/worker.go juju-core-2.0.0/src/github.com/juju/juju/worker/dblogpruner/worker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/dblogpruner/worker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/dblogpruner/worker.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "time" "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state" "github.com/juju/juju/worker" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/dependency/engine.go juju-core-2.0.0/src/github.com/juju/juju/worker/dependency/engine.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/dependency/engine.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/dependency/engine.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils/set" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/worker" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/dependency/util_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/dependency/util_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/dependency/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/dependency/util_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/deployer/deployer.go juju-core-2.0.0/src/github.com/juju/juju/worker/deployer/deployer.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/deployer/deployer.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/deployer/deployer.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,6 +15,7 @@ "github.com/juju/juju/agent" apideployer "github.com/juju/juju/api/deployer" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/status" "github.com/juju/juju/watcher" "github.com/juju/juju/worker" ) @@ -151,6 +152,9 @@ if d.deployed.Contains(unit.Name()) { panic("must not re-deploy a deployed unit") } + if err := unit.SetStatus(status.Waiting, status.MessageInstallingAgent, nil); err != nil { + return errors.Trace(err) + } logger.Infof("deploying unit %q", unitName) initialPassword, err := utils.RandomPassword() if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/deployer/deployer_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/deployer/deployer_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/deployer/deployer_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/deployer/deployer_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -79,7 +79,7 @@ // Cause a unit to become Dying, and check no change. now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -108,6 +108,21 @@ c.Assert(u1.Life(), gc.Equals, state.Dying) } +func (s *deployerSuite) TestInitialStatusMessages(c *gc.C) { + svc := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + u0, err := svc.AddUnit() + c.Assert(err, jc.ErrorIsNil) + + dep, _ := s.makeDeployerAndContext(c) + defer stop(c, dep) + err = u0.AssignToMachine(s.machine) + c.Assert(err, jc.ErrorIsNil) + s.waitFor(c, unitStatus(u0, status.StatusInfo{ + Status: status.Waiting, + Message: "installing agent", + })) +} + func (s *deployerSuite) TestRemoveNonAlivePrincipals(c *gc.C) { // Create a service, and a couple of units. svc := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) @@ -128,7 +143,7 @@ // would happen if it were possible to have a dying unit in this situation. now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -265,6 +280,14 @@ } } +func unitStatus(u *state.Unit, statusInfo status.StatusInfo) func(*gc.C) bool { + return func(c *gc.C) bool { + sInfo, err := u.Status() + c.Assert(err, jc.ErrorIsNil) + return sInfo.Status == statusInfo.Status && sInfo.Message == statusInfo.Message + } +} + func stop(c *gc.C, w worker.Worker) { c.Assert(worker.Stop(w), gc.IsNil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/deployer/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/deployer/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/deployer/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/deployer/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -27,14 +27,14 @@ // Manifold returns a dependency manifold that runs a deployer worker, // using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - typedConfig := engine.AgentApiManifoldConfig{ + typedConfig := engine.AgentAPIManifoldConfig{ AgentName: config.AgentName, APICallerName: config.APICallerName, } - return engine.AgentApiManifold(typedConfig, config.newWorker) + return engine.AgentAPIManifold(typedConfig, config.newWorker) } -// newWorker trivially wraps NewDeployer for use in a engine.AgentApiManifold. +// newWorker trivially wraps NewDeployer for use in a engine.AgentAPIManifold. // // It's not tested at the moment, because the scaffolding // necessary is too unwieldy/distracting to introduce at this point. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/deployer/simple.go juju-core-2.0.0/src/github.com/juju/juju/worker/deployer/simple.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/deployer/simple.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/deployer/simple.go 2016-10-13 14:31:49.000000000 +0000 @@ -142,6 +142,7 @@ Tag: tag, Password: initialPassword, Nonce: "unused", + Controller: ctx.agentConfig.Controller(), Model: ctx.agentConfig.Model(), // TODO: remove the state addresses here and test when api only. StateAddresses: result.StateAddresses, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/deployer/simple_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/deployer/simple_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/deployer/simple_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/deployer/simple_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -327,6 +327,10 @@ return testing.ModelTag } +func (mock *mockConfig) Controller() names.ControllerTag { + return testing.ControllerTag +} + func (mock *mockConfig) CACert() string { return testing.CACert } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/diskmanager/lsblk.go juju-core-2.0.0/src/github.com/juju/juju/worker/diskmanager/lsblk.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/diskmanager/lsblk.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/diskmanager/lsblk.go 2016-10-13 14:31:49.000000000 +0000 @@ -111,9 +111,7 @@ // host, but the devices will typically not be present. continue } else if err != nil { - logger.Errorf( - "error checking if %q is in use: %v", dev.DeviceName, err, - ) + logger.Infof("could not check if %q is in use: %v", dev.DeviceName, err) // We cannot detect, so err on the side of caution and default to // "in use" so the device cannot be used. dev.InUse = true diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/diskmanager/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/diskmanager/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/diskmanager/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/diskmanager/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,16 +16,16 @@ ) // ManifoldConfig defines the names of the manifolds on which a Manifold will depend. -type ManifoldConfig engine.AgentApiManifoldConfig +type ManifoldConfig engine.AgentAPIManifoldConfig // Manifold returns a dependency manifold that runs a diskmanager worker, // using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - typedConfig := engine.AgentApiManifoldConfig(config) - return engine.AgentApiManifold(typedConfig, newWorker) + typedConfig := engine.AgentAPIManifoldConfig(config) + return engine.AgentAPIManifold(typedConfig, newWorker) } -// newWorker trivially wraps NewWorker for use in a engine.AgentApiManifold. +// newWorker trivially wraps NewWorker for use in a engine.AgentAPIManifold. func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { t := a.CurrentConfig().Tag() tag, ok := t.(names.MachineTag) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/environ/environ.go juju-core-2.0.0/src/github.com/juju/juju/worker/environ/environ.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/environ/environ.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/environ/environ.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,6 +5,7 @@ import ( "github.com/juju/errors" + "gopkg.in/juju/names.v2" "github.com/juju/juju/environs" "github.com/juju/juju/watcher" @@ -16,6 +17,7 @@ type ConfigObserver interface { environs.EnvironConfigGetter WatchForModelConfigChanges() (watcher.NotifyWatcher, error) + WatchCredential(tag names.CloudCredentialTag) (watcher.NotifyWatcher, error) } // Config describes the dependencies of a Tracker. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/environ/environ_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/environ/environ_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/environ/environ_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/environ/environ_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -152,7 +152,7 @@ c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, tracker) - context.CloseNotify() + context.CloseModelConfigNotify() err = workertest.CheckKilled(c, tracker) c.Check(err, gc.ErrorMatches, "environ config watch closed") context.CheckCallNames(c, "ModelConfig", "CloudSpec", "WatchForModelConfigChanges") @@ -173,7 +173,7 @@ c.Check(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, tracker) - context.SendNotify() + context.SendModelConfigNotify() err = workertest.CheckKilled(c, tracker) c.Check(err, gc.ErrorMatches, "cannot read environ config: blam ouch") context.CheckCallNames(c, "ModelConfig", "CloudSpec", "WatchForModelConfigChanges", "ModelConfig") @@ -194,7 +194,7 @@ c.Check(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, tracker) - context.SendNotify() + context.SendModelConfigNotify() err = workertest.CheckKilled(c, tracker) c.Check(err, gc.ErrorMatches, "cannot update environ config: SetConfig is broken") context.CheckCallNames(c, "ModelConfig", "CloudSpec", "WatchForModelConfigChanges", "ModelConfig") @@ -223,7 +223,7 @@ timeout := time.After(coretesting.LongWait) attempt := time.After(0) - context.SendNotify() + context.SendModelConfigNotify() for { select { case <-attempt: diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/environ/fixture_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/environ/fixture_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/environ/fixture_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/environ/fixture_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -38,11 +38,12 @@ } type runContext struct { - mu sync.Mutex - stub testing.Stub - cloud environs.CloudSpec - config map[string]interface{} - watcher *notifyWatcher + mu sync.Mutex + stub testing.Stub + cloud environs.CloudSpec + config map[string]interface{} + watcher *notifyWatcher + credWatcher *notifyWatcher } // SetConfig updates the configuration returned by ModelConfig. @@ -74,20 +75,20 @@ return config.New(config.NoDefaults, context.config) } -// KillNotify kills the watcher returned from WatchForModelConfigChanges with +// KillModelConfigNotify kills the watcher returned from WatchForModelConfigChanges with // the error configured in the enclosing fixture. -func (context *runContext) KillNotify() { +func (context *runContext) KillModelConfigNotify() { context.watcher.Kill() } -// SendNotify sends a value on the channel used by WatchForModelConfigChanges +// SendModelConfigNotify sends a value on the channel used by WatchForModelConfigChanges // results. -func (context *runContext) SendNotify() { +func (context *runContext) SendModelConfigNotify() { context.watcher.changes <- struct{}{} } -// CloseNotify closes the channel used by WatchForModelConfigChanges results. -func (context *runContext) CloseNotify() { +// CloseModelConfigNotify closes the channel used by WatchForModelConfigChanges results. +func (context *runContext) CloseModelConfigNotify() { close(context.watcher.changes) } @@ -99,6 +100,34 @@ if err := context.stub.NextErr(); err != nil { return nil, err } + return context.watcher, nil +} + +// KillCredentialNotify kills the watcher returned from WatchCredentialChanges with +// the error configured in the enclosing fixture. +func (context *runContext) KillCredentialNotify() { + context.credWatcher.Kill() +} + +// SendCredentialNotify sends a value on the channel used by WatchCredentialChanges +// results. +func (context *runContext) SendCredentialNotify() { + context.credWatcher.changes <- struct{}{} +} + +// CloseCredentialNotify closes the channel used by WatchCredentialChanges results. +func (context *runContext) CloseCredentialNotify() { + close(context.credWatcher.changes) +} + +// WatchCredential is part of the environ.ConfigObserver interface. +func (context *runContext) WatchCredential(cred names.CloudCredentialTag) (watcher.NotifyWatcher, error) { + context.mu.Lock() + defer context.mu.Unlock() + context.stub.AddCall("WatchCredential") + if err := context.stub.NextErr(); err != nil { + return nil, err + } return context.watcher, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/environ/wait.go juju-core-2.0.0/src/github.com/juju/juju/worker/environ/wait.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/environ/wait.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/environ/wait.go 2016-10-13 14:31:49.000000000 +0000 @@ -31,6 +31,7 @@ // responsible for detecting and handling any watcher errors that may occur, // whether this func succeeds or fails. func WaitForEnviron( + // TODO(wallyworld) - pass in credential watcher w watcher.NotifyWatcher, getter environs.EnvironConfigGetter, newEnviron environs.NewEnvironFunc, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/environ/wait_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/environ/wait_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/environ/wait_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/environ/wait_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -58,7 +58,7 @@ c.Check(err, gc.ErrorMatches, "environ config watch closed") }() - context.CloseNotify() + context.CloseModelConfigNotify() select { case <-done: case <-time.After(coretesting.LongWait): @@ -86,7 +86,7 @@ c.Check(err, gc.ErrorMatches, "cannot read environ config: biff zonk") }() - context.SendNotify() + context.SendModelConfigNotify() select { case <-done: case <-time.After(coretesting.LongWait): @@ -121,7 +121,7 @@ } }() - context.SendNotify() + context.SendModelConfigNotify() select { case <-time.After(coretesting.ShortWait): case <-done: @@ -131,7 +131,7 @@ context.SetConfig(c, coretesting.Attrs{ "name": "expected-name", }) - context.SendNotify() + context.SendModelConfigNotify() select { case <-done: case <-time.After(coretesting.LongWait): diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/firewaller/firewaller_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/firewaller/firewaller_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/firewaller/firewaller_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/firewaller/firewaller_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -114,8 +114,8 @@ } } -func (s *firewallerBaseSuite) addUnit(c *gc.C, svc *state.Application) (*state.Unit, *state.Machine) { - units, err := juju.AddUnits(s.State, svc, 1, nil) +func (s *firewallerBaseSuite) addUnit(c *gc.C, app *state.Application) (*state.Unit, *state.Machine) { + units, err := juju.AddUnits(s.State, app, app.Name(), 1, nil) c.Assert(err, jc.ErrorIsNil) u := units[0] id, err := u.AssignedMachineId() @@ -158,8 +158,8 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc := s.AddTestingService(c, "wordpress", s.charm) - u, m := s.addUnit(c, svc) + app := s.AddTestingService(c, "wordpress", s.charm) + u, m := s.addUnit(c, app) inst := s.startInstance(c, m) err = u.OpenPort("tcp", 80) @@ -180,11 +180,11 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc := s.AddTestingService(c, "wordpress", s.charm) + app := s.AddTestingService(c, "wordpress", s.charm) - err = svc.SetExposed() + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) - u, m := s.addUnit(c, svc) + u, m := s.addUnit(c, app) inst := s.startInstance(c, m) err = u.OpenPorts("tcp", 80, 90) @@ -205,23 +205,23 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc1 := s.AddTestingService(c, "wordpress", s.charm) - err = svc1.SetExposed() + app1 := s.AddTestingService(c, "wordpress", s.charm) + err = app1.SetExposed() c.Assert(err, jc.ErrorIsNil) - u1, m1 := s.addUnit(c, svc1) + u1, m1 := s.addUnit(c, app1) inst1 := s.startInstance(c, m1) err = u1.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) err = u1.OpenPort("tcp", 8080) c.Assert(err, jc.ErrorIsNil) - svc2 := s.AddTestingService(c, "mysql", s.charm) + app2 := s.AddTestingService(c, "mysql", s.charm) c.Assert(err, jc.ErrorIsNil) - err = svc2.SetExposed() + err = app2.SetExposed() c.Assert(err, jc.ErrorIsNil) - u2, m2 := s.addUnit(c, svc2) + u2, m2 := s.addUnit(c, app2) inst2 := s.startInstance(c, m2) err = u2.OpenPort("tcp", 3306) c.Assert(err, jc.ErrorIsNil) @@ -243,15 +243,15 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc := s.AddTestingService(c, "wordpress", s.charm) - err = svc.SetExposed() + app := s.AddTestingService(c, "wordpress", s.charm) + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) // add a unit but don't start its instance yet. - u1, m1 := s.addUnit(c, svc) + u1, m1 := s.addUnit(c, app) // add another unit and start its instance, so that // we're sure the firewaller has seen the first instance. - u2, m2 := s.addUnit(c, svc) + u2, m2 := s.addUnit(c, app) inst2 := s.startInstance(c, m2) err = u2.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) @@ -268,16 +268,16 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc := s.AddTestingService(c, "wordpress", s.charm) - err = svc.SetExposed() + app := s.AddTestingService(c, "wordpress", s.charm) + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) - u1, m1 := s.addUnit(c, svc) + u1, m1 := s.addUnit(c, app) inst1 := s.startInstance(c, m1) err = u1.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) - u2, m2 := s.addUnit(c, svc) + u2, m2 := s.addUnit(c, app) inst2 := s.startInstance(c, m2) err = u2.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) @@ -295,10 +295,10 @@ } func (s *InstanceModeSuite) TestStartWithState(c *gc.C) { - svc := s.AddTestingService(c, "wordpress", s.charm) - err := svc.SetExposed() + app := s.AddTestingService(c, "wordpress", s.charm) + err := app.SetExposed() c.Assert(err, jc.ErrorIsNil) - u, m := s.addUnit(c, svc) + u, m := s.addUnit(c, app) inst := s.startInstance(c, m) err = u.OpenPort("tcp", 80) @@ -316,7 +316,7 @@ s.assertPorts(c, inst, m.Id(), []network.PortRange{{80, 80, "tcp"}, {8080, 8080, "tcp"}}) - err = svc.SetExposed() + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) } @@ -325,8 +325,8 @@ c.Assert(err, jc.ErrorIsNil) inst := s.startInstance(c, m) - svc := s.AddTestingService(c, "wordpress", s.charm) - err = svc.SetExposed() + app := s.AddTestingService(c, "wordpress", s.charm) + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) // Starting the firewaller, no open ports. @@ -337,7 +337,7 @@ s.assertPorts(c, inst, m.Id(), nil) // Complete steps to open port. - u, err := svc.AddUnit() + u, err := app.AddUnit() c.Assert(err, jc.ErrorIsNil) err = u.AssignToMachine(m) c.Assert(err, jc.ErrorIsNil) @@ -352,8 +352,8 @@ c.Assert(err, jc.ErrorIsNil) inst := s.startInstance(c, m) - svc := s.AddTestingService(c, "wordpress", s.charm) - u, err := svc.AddUnit() + app := s.AddTestingService(c, "wordpress", s.charm) + u, err := app.AddUnit() c.Assert(err, jc.ErrorIsNil) err = u.AssignToMachine(m) c.Assert(err, jc.ErrorIsNil) @@ -368,7 +368,7 @@ s.assertPorts(c, inst, m.Id(), nil) // Expose service. - err = svc.SetExposed() + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) s.assertPorts(c, inst, m.Id(), []network.PortRange{{80, 80, "tcp"}}) } @@ -378,9 +378,9 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc := s.AddTestingService(c, "wordpress", s.charm) + app := s.AddTestingService(c, "wordpress", s.charm) - u, m := s.addUnit(c, svc) + u, m := s.addUnit(c, app) inst := s.startInstance(c, m) err = u.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) @@ -391,13 +391,13 @@ s.assertPorts(c, inst, m.Id(), nil) // SeExposed opens the ports. - err = svc.SetExposed() + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) s.assertPorts(c, inst, m.Id(), []network.PortRange{{80, 80, "tcp"}, {8080, 8080, "tcp"}}) // ClearExposed closes the ports again. - err = svc.ClearExposed() + err = app.ClearExposed() c.Assert(err, jc.ErrorIsNil) s.assertPorts(c, inst, m.Id(), nil) @@ -408,16 +408,16 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc := s.AddTestingService(c, "wordpress", s.charm) - err = svc.SetExposed() + app := s.AddTestingService(c, "wordpress", s.charm) + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) - u1, m1 := s.addUnit(c, svc) + u1, m1 := s.addUnit(c, app) inst1 := s.startInstance(c, m1) err = u1.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) - u2, m2 := s.addUnit(c, svc) + u2, m2 := s.addUnit(c, app) inst2 := s.startInstance(c, m2) err = u2.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) @@ -440,11 +440,11 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc := s.AddTestingService(c, "wordpress", s.charm) - err = svc.SetExposed() + app := s.AddTestingService(c, "wordpress", s.charm) + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) - u, m := s.addUnit(c, svc) + u, m := s.addUnit(c, app) inst := s.startInstance(c, m) err = u.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) @@ -456,7 +456,7 @@ c.Assert(err, jc.ErrorIsNil) err = u.Remove() c.Assert(err, jc.ErrorIsNil) - err = svc.Destroy() + err = app.Destroy() c.Assert(err, jc.ErrorIsNil) s.assertPorts(c, inst, m.Id(), nil) } @@ -466,20 +466,20 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc1 := s.AddTestingService(c, "wordpress", s.charm) - err = svc1.SetExposed() + app1 := s.AddTestingService(c, "wordpress", s.charm) + err = app1.SetExposed() c.Assert(err, jc.ErrorIsNil) - u1, m1 := s.addUnit(c, svc1) + u1, m1 := s.addUnit(c, app1) inst1 := s.startInstance(c, m1) err = u1.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) - svc2 := s.AddTestingService(c, "mysql", s.charm) - err = svc2.SetExposed() + app2 := s.AddTestingService(c, "mysql", s.charm) + err = app2.SetExposed() c.Assert(err, jc.ErrorIsNil) - u2, m2 := s.addUnit(c, svc2) + u2, m2 := s.addUnit(c, app2) inst2 := s.startInstance(c, m2) err = u2.OpenPort("tcp", 3306) c.Assert(err, jc.ErrorIsNil) @@ -492,14 +492,14 @@ c.Assert(err, jc.ErrorIsNil) err = u2.Remove() c.Assert(err, jc.ErrorIsNil) - err = svc2.Destroy() + err = app2.Destroy() c.Assert(err, jc.ErrorIsNil) err = u1.EnsureDead() c.Assert(err, jc.ErrorIsNil) err = u1.Remove() c.Assert(err, jc.ErrorIsNil) - err = svc1.Destroy() + err = app1.Destroy() c.Assert(err, jc.ErrorIsNil) s.assertPorts(c, inst1, m1.Id(), nil) @@ -511,11 +511,11 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc := s.AddTestingService(c, "wordpress", s.charm) - err = svc.SetExposed() + app := s.AddTestingService(c, "wordpress", s.charm) + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) - u, m := s.addUnit(c, svc) + u, m := s.addUnit(c, app) inst := s.startInstance(c, m) err = u.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) @@ -527,7 +527,7 @@ c.Assert(err, jc.ErrorIsNil) err = u.Remove() c.Assert(err, jc.ErrorIsNil) - err = svc.Destroy() + err = app.Destroy() c.Assert(err, jc.ErrorIsNil) // Kill machine. @@ -544,11 +544,11 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc := s.AddTestingService(c, "wordpress", s.charm) - err = svc.SetExposed() + app := s.AddTestingService(c, "wordpress", s.charm) + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) - u, m := s.addUnit(c, svc) + u, m := s.addUnit(c, app) inst := s.startInstance(c, m) err = u.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) @@ -573,10 +573,10 @@ } func (s *InstanceModeSuite) TestStartWithStateOpenPortsBroken(c *gc.C) { - svc := s.AddTestingService(c, "wordpress", s.charm) - err := svc.SetExposed() + app := s.AddTestingService(c, "wordpress", s.charm) + err := app.SetExposed() c.Assert(err, jc.ErrorIsNil) - u, m := s.addUnit(c, svc) + u, m := s.addUnit(c, app) inst := s.startInstance(c, m) err = u.OpenPort("tcp", 80) @@ -631,23 +631,23 @@ c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertKillAndWait(c, fw) - svc1 := s.AddTestingService(c, "wordpress", s.charm) - err = svc1.SetExposed() + app1 := s.AddTestingService(c, "wordpress", s.charm) + err = app1.SetExposed() c.Assert(err, jc.ErrorIsNil) - u1, m1 := s.addUnit(c, svc1) + u1, m1 := s.addUnit(c, app1) s.startInstance(c, m1) err = u1.OpenPorts("tcp", 80, 90) c.Assert(err, jc.ErrorIsNil) err = u1.OpenPort("tcp", 8080) c.Assert(err, jc.ErrorIsNil) - svc2 := s.AddTestingService(c, "moinmoin", s.charm) + app2 := s.AddTestingService(c, "moinmoin", s.charm) c.Assert(err, jc.ErrorIsNil) - err = svc2.SetExposed() + err = app2.SetExposed() c.Assert(err, jc.ErrorIsNil) - u2, m2 := s.addUnit(c, svc2) + u2, m2 := s.addUnit(c, app2) s.startInstance(c, m2) err = u2.OpenPorts("tcp", 80, 90) c.Assert(err, jc.ErrorIsNil) @@ -675,8 +675,8 @@ c.Assert(err, jc.ErrorIsNil) s.startInstance(c, m) - svc := s.AddTestingService(c, "wordpress", s.charm) - u, err := svc.AddUnit() + app := s.AddTestingService(c, "wordpress", s.charm) + u, err := app.AddUnit() c.Assert(err, jc.ErrorIsNil) err = u.AssignToMachine(m) c.Assert(err, jc.ErrorIsNil) @@ -691,7 +691,7 @@ s.assertEnvironPorts(c, nil) // Expose service. - err = svc.SetExposed() + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) s.assertEnvironPorts(c, []network.PortRange{{80, 80, "tcp"}}) } @@ -701,11 +701,11 @@ fw, err := firewaller.NewFirewaller(s.firewaller) c.Assert(err, jc.ErrorIsNil) - svc := s.AddTestingService(c, "wordpress", s.charm) - err = svc.SetExposed() + app := s.AddTestingService(c, "wordpress", s.charm) + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) - u, m := s.addUnit(c, svc) + u, m := s.addUnit(c, app) s.startInstance(c, m) err = u.OpenPorts("tcp", 80, 90) c.Assert(err, jc.ErrorIsNil) @@ -736,11 +736,11 @@ fw, err := firewaller.NewFirewaller(s.firewaller) c.Assert(err, jc.ErrorIsNil) - svc := s.AddTestingService(c, "wordpress", s.charm) - err = svc.SetExposed() + app := s.AddTestingService(c, "wordpress", s.charm) + err = app.SetExposed() c.Assert(err, jc.ErrorIsNil) - u, m := s.addUnit(c, svc) + u, m := s.addUnit(c, app) s.startInstance(c, m) err = u.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) @@ -753,7 +753,7 @@ err = worker.Stop(fw) c.Assert(err, jc.ErrorIsNil) - err = svc.ClearExposed() + err = app.ClearExposed() c.Assert(err, jc.ErrorIsNil) // Start firewaller and check port. @@ -769,11 +769,11 @@ fw, err := firewaller.NewFirewaller(s.firewaller) c.Assert(err, jc.ErrorIsNil) - svc1 := s.AddTestingService(c, "wordpress", s.charm) - err = svc1.SetExposed() + app1 := s.AddTestingService(c, "wordpress", s.charm) + err = app1.SetExposed() c.Assert(err, jc.ErrorIsNil) - u1, m1 := s.addUnit(c, svc1) + u1, m1 := s.addUnit(c, app1) s.startInstance(c, m1) err = u1.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) @@ -786,11 +786,11 @@ err = worker.Stop(fw) c.Assert(err, jc.ErrorIsNil) - svc2 := s.AddTestingService(c, "moinmoin", s.charm) - err = svc2.SetExposed() + app2 := s.AddTestingService(c, "moinmoin", s.charm) + err = app2.SetExposed() c.Assert(err, jc.ErrorIsNil) - u2, m2 := s.addUnit(c, svc2) + u2, m2 := s.addUnit(c, app2) s.startInstance(c, m2) err = u2.OpenPort("tcp", 80) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/firewaller/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/firewaller/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/firewaller/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/firewaller/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,12 +14,12 @@ ) // ManifoldConfig describes the resources used by the firewaller worker. -type ManifoldConfig engine.ApiManifoldConfig +type ManifoldConfig engine.APIManifoldConfig // Manifold returns a Manifold that encapsulates the firewaller worker. func Manifold(config ManifoldConfig) dependency.Manifold { - return engine.ApiManifold( - engine.ApiManifoldConfig(config), + return engine.APIManifold( + engine.APIManifoldConfig(config), manifoldStart, ) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/fortress/fortress.go juju-core-2.0.0/src/github.com/juju/juju/worker/fortress/fortress.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/fortress/fortress.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/fortress/fortress.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "sync" "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" ) // fortress coordinates between clients that access it as a Guard and as a Guest. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/gate/flag.go juju-core-2.0.0/src/github.com/juju/juju/worker/gate/flag.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/gate/flag.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/gate/flag.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/cmd/jujud/agent/engine" "github.com/juju/juju/worker" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/gate/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/gate/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/gate/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/gate/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "sync" "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/hostkeyreporter/worker.go juju-core-2.0.0/src/github.com/juju/juju/worker/hostkeyreporter/worker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/hostkeyreporter/worker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/hostkeyreporter/worker.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ "github.com/juju/errors" "github.com/juju/loggo" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/identityfilewriter/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/identityfilewriter/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/identityfilewriter/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/identityfilewriter/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,16 +18,16 @@ ) // ManifoldConfig defines the names of the manifolds on which a Manifold will depend. -type ManifoldConfig engine.AgentApiManifoldConfig +type ManifoldConfig engine.AgentAPIManifoldConfig // Manifold returns a dependency manifold that runs an identity file writer worker, // using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - typedConfig := engine.AgentApiManifoldConfig(config) - return engine.AgentApiManifold(typedConfig, newWorker) + typedConfig := engine.AgentAPIManifoldConfig(config) + return engine.AgentAPIManifold(typedConfig, newWorker) } -// newWorker trivially wraps NewWorker for use in a engine.AgentApiManifold. +// newWorker trivially wraps NewWorker for use in a engine.AgentAPIManifold. func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { cfg := a.CurrentConfig() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/identityfilewriter/manifold_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/identityfilewriter/manifold_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/identityfilewriter/manifold_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/identityfilewriter/manifold_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -37,8 +37,8 @@ } func (s *ManifoldSuite) TestMachine(c *gc.C) { - config := identityfilewriter.ManifoldConfig(enginetest.AgentApiManifoldTestConfig()) - _, err := enginetest.RunAgentApiManifold( + config := identityfilewriter.ManifoldConfig(enginetest.AgentAPIManifoldTestConfig()) + _, err := enginetest.RunAgentAPIManifold( identityfilewriter.Manifold(config), &fakeAgent{tag: names.NewMachineTag("42")}, mockAPICaller(multiwatcher.JobManageModel)) @@ -47,8 +47,8 @@ } func (s *ManifoldSuite) TestMachineNotModelManagerErrors(c *gc.C) { - config := identityfilewriter.ManifoldConfig(enginetest.AgentApiManifoldTestConfig()) - _, err := enginetest.RunAgentApiManifold( + config := identityfilewriter.ManifoldConfig(enginetest.AgentAPIManifoldTestConfig()) + _, err := enginetest.RunAgentAPIManifold( identityfilewriter.Manifold(config), &fakeAgent{tag: names.NewMachineTag("42")}, mockAPICaller(multiwatcher.JobHostUnits)) @@ -57,8 +57,8 @@ } func (s *ManifoldSuite) TestNonMachineAgent(c *gc.C) { - config := identityfilewriter.ManifoldConfig(enginetest.AgentApiManifoldTestConfig()) - _, err := enginetest.RunAgentApiManifold( + config := identityfilewriter.ManifoldConfig(enginetest.AgentAPIManifoldTestConfig()) + _, err := enginetest.RunAgentAPIManifold( identityfilewriter.Manifold(config), &fakeAgent{tag: names.NewUnitTag("foo/0")}, mockAPICaller("")) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/instancepoller/aggregate_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/instancepoller/aggregate_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/instancepoller/aggregate_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/instancepoller/aggregate_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "time" "github.com/juju/errors" + jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -48,7 +49,7 @@ } func (t *testInstance) Status() instance.InstanceStatus { - return instance.InstanceStatus{Status: status.StatusUnknown, Message: t.status} + return instance.InstanceStatus{Status: status.Unknown, Message: t.status} } type testInstanceGetter struct { @@ -90,7 +91,7 @@ // We setup a couple variables here so that we can use them locally without // type assertions. Then we use them in the aggregatorConfig. testGetter := new(testInstanceGetter) - clock := testing.NewClock(time.Now()) + clock := jujutesting.NewClock(time.Now()) delay := time.Minute cfg := aggregatorConfig{ Clock: clock, @@ -136,7 +137,7 @@ // We setup a couple variables here so that we can use them locally without // type assertions. Then we use them in the aggregatorConfig. testGetter := new(testInstanceGetter) - clock := testing.NewClock(time.Now()) + clock := jujutesting.NewClock(time.Now()) delay := time.Minute cfg := aggregatorConfig{ Clock: clock, @@ -196,7 +197,7 @@ func (s *aggregateSuite) TestKillingWorkerKillsPendinReqs(c *gc.C) { // Setup local variables. testGetter := new(testInstanceGetter) - clock := testing.NewClock(time.Now()) + clock := jujutesting.NewClock(time.Now()) delay := time.Minute cfg := aggregatorConfig{ Clock: clock, @@ -251,7 +252,7 @@ func (s *aggregateSuite) TestMultipleBatches(c *gc.C) { // Setup some local variables. testGetter := new(testInstanceGetter) - clock := testing.NewClock(time.Now()) + clock := jujutesting.NewClock(time.Now()) delay := time.Second cfg := aggregatorConfig{ Clock: clock, @@ -334,7 +335,7 @@ func (s *aggregateSuite) TestInstancesErrors(c *gc.C) { // Setup local variables. testGetter := new(testInstanceGetter) - clock := testing.NewClock(time.Now()) + clock := jujutesting.NewClock(time.Now()) delay := time.Millisecond cfg := aggregatorConfig{ Clock: clock, @@ -373,7 +374,7 @@ func (s *aggregateSuite) TestPartialInstanceErrors(c *gc.C) { testGetter := new(testInstanceGetter) - clock := testing.NewClock(time.Now()) + clock := jujutesting.NewClock(time.Now()) delay := time.Second cfg := aggregatorConfig{ @@ -432,7 +433,7 @@ c.Assert(testGetter.counter, gc.Equals, int32(1)) } -func waitAlarms(c *gc.C, clock *testing.Clock, count int) { +func waitAlarms(c *gc.C, clock *jujutesting.Clock, count int) { timeout := time.After(testing.LongWait) for i := 0; i < count; i++ { select { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/instancepoller/machine_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/instancepoller/machine_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/instancepoller/machine_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/instancepoller/machine_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,7 +13,9 @@ "sync/atomic" "time" + gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" @@ -49,7 +51,7 @@ s.PatchValue(&ShortPoll, coretesting.ShortWait/10) s.PatchValue(&LongPoll, coretesting.ShortWait/10) - go runMachine(context, m, nil, died) + go runMachine(context, m, nil, died, clock.WallClock) time.Sleep(coretesting.ShortWait) killMachineLoop(c, m, context.dyingc, died) @@ -59,10 +61,36 @@ c.Assert(m.instStatusInfo, gc.Equals, "running") } +func (s *machineSuite) TestSetsInstanceInfoDeadMachineInitially(c *gc.C) { + context := &testMachineContext{ + getInstanceInfo: instanceInfoGetter(c, "i1234", testAddrs, "deleting", nil), + dyingc: make(chan struct{}), + } + m := &testMachine{ + tag: names.NewMachineTag("99"), + instanceId: "i1234", + refresh: func() error { return nil }, + life: params.Dead, + } + died := make(chan machine) + // Change the poll intervals to be short, so that we know + // that we've polled (probably) at least a few times. + s.PatchValue(&ShortPoll, coretesting.ShortWait/10) + s.PatchValue(&LongPoll, coretesting.ShortWait/10) + + go runMachine(context, m, nil, died, clock.WallClock) + time.Sleep(coretesting.ShortWait) + + killMachineLoop(c, m, context.dyingc, died) + c.Assert(context.killErr, gc.Equals, nil) + c.Assert(m.setAddressCount, gc.Equals, 0) + c.Assert(m.instStatusInfo, gc.Equals, "deleting") +} + func (s *machineSuite) TestShortPollIntervalWhenNoAddress(c *gc.C) { s.PatchValue(&ShortPoll, 1*time.Millisecond) s.PatchValue(&LongPoll, coretesting.LongWait) - count := countPolls(c, nil, "i1234", "running", status.StatusStarted) + count := countPolls(c, nil, "i1234", "running", status.Started) c.Assert(count, jc.GreaterThan, 2) } @@ -76,17 +104,77 @@ func (s *machineSuite) TestShortPollIntervalWhenNotStarted(c *gc.C) { s.PatchValue(&ShortPoll, 1*time.Millisecond) s.PatchValue(&LongPoll, coretesting.LongWait) - count := countPolls(c, testAddrs, "i1234", "pending", status.StatusPending) + count := countPolls(c, testAddrs, "i1234", "pending", status.Pending) c.Assert(count, jc.GreaterThan, 2) } func (s *machineSuite) TestShortPollIntervalWhenNotProvisioned(c *gc.C) { s.PatchValue(&ShortPoll, 1*time.Millisecond) s.PatchValue(&LongPoll, coretesting.LongWait) - count := countPolls(c, testAddrs, "", "pending", status.StatusPending) + count := countPolls(c, testAddrs, "", "pending", status.Pending) c.Assert(count, gc.Equals, 0) } +func (s *machineSuite) TestNoPollWhenNotProvisioned(c *gc.C) { + s.PatchValue(&ShortPoll, 1*time.Millisecond) + s.PatchValue(&LongPoll, coretesting.LongWait) + + polled := make(chan struct{}, 1) + getInstanceInfo := func(id instance.Id) (instanceInfo, error) { + select { + case polled <- struct{}{}: + default: + } + return instanceInfo{testAddrs, instance.InstanceStatus{Status: status.Unknown, Message: "pending"}}, nil + } + context := &testMachineContext{ + getInstanceInfo: getInstanceInfo, + dyingc: make(chan struct{}), + } + m := &testMachine{ + tag: names.NewMachineTag("99"), + instanceId: instance.Id(""), + refresh: func() error { return nil }, + addresses: testAddrs, + life: params.Alive, + status: "pending", + } + died := make(chan machine) + + clock := gitjujutesting.NewClock(time.Time{}) + changed := make(chan struct{}) + go runMachine(context, m, changed, died, clock) + + expectPoll := func() { + // worker should be waiting for ShortPoll + select { + case <-clock.Alarms(): + case <-time.After(coretesting.LongWait): + c.Fatalf("expected time-based polling") + } + clock.Advance(ShortPoll) + } + + expectPoll() + expectPoll() + select { + case <-polled: + c.Fatalf("unexpected instance poll") + case <-time.After(coretesting.ShortWait): + } + + m.setInstanceId("inst-ance") + expectPoll() + select { + case <-polled: + case <-time.After(coretesting.LongWait): + c.Fatalf("expected instance poll") + } + + killMachineLoop(c, m, context.dyingc, died) + c.Assert(context.killErr, gc.Equals, nil) +} + func (s *machineSuite) TestShortPollIntervalExponent(c *gc.C) { s.PatchValue(&ShortPoll, 1*time.Microsecond) s.PatchValue(&LongPoll, coretesting.LongWait) @@ -97,7 +185,7 @@ // ShortPollBackoff of ShortWait/ShortPoll, given that sleep will // sleep for at least the requested interval. maxCount := int(math.Log(float64(coretesting.ShortWait)/float64(ShortPoll))/math.Log(ShortPollBackoff) + 1) - count := countPolls(c, nil, "i1234", "", status.StatusStarted) + count := countPolls(c, nil, "i1234", "", status.Started) c.Assert(count, jc.GreaterThan, 2) c.Assert(count, jc.LessThan, maxCount) c.Logf("actual count: %v; max %v", count, maxCount) @@ -106,7 +194,7 @@ func (s *machineSuite) TestLongPollIntervalWhenHasAllInstanceInfo(c *gc.C) { s.PatchValue(&ShortPoll, coretesting.LongWait) s.PatchValue(&LongPoll, 1*time.Millisecond) - count := countPolls(c, testAddrs, "i1234", "running", status.StatusStarted) + count := countPolls(c, testAddrs, "i1234", "running", status.Started) c.Assert(count, jc.GreaterThan, 2) } @@ -122,7 +210,7 @@ if addrs == nil { return instanceInfo{}, fmt.Errorf("no instance addresses available") } - return instanceInfo{addrs, instance.InstanceStatus{Status: status.StatusUnknown, Message: instStatus}}, nil + return instanceInfo{addrs, instance.InstanceStatus{Status: status.Unknown, Message: instStatus}}, nil } context := &testMachineContext{ getInstanceInfo: getInstanceInfo, @@ -138,7 +226,7 @@ } died := make(chan machine) - go runMachine(context, m, nil, died) + go runMachine(context, m, nil, died, clock.WallClock) time.Sleep(coretesting.ShortWait) killMachineLoop(c, m, context.dyingc, died) @@ -164,7 +252,7 @@ } died := make(chan machine) changed := make(chan struct{}) - go runMachine(context, m, changed, died) + go runMachine(context, m, changed, died, clock.WallClock) select { case <-died: c.Fatalf("machine died prematurely") @@ -241,7 +329,7 @@ mutate(m, expectErr) died := make(chan machine) changed := make(chan struct{}, 1) - go runMachine(context, m, changed, died) + go runMachine(context, m, changed, died, clock.WallClock) changed <- struct{}{} select { case <-died: @@ -267,7 +355,7 @@ return func(id instance.Id) (instanceInfo, error) { c.Check(id, gc.Equals, expectId) - return instanceInfo{addrs, instance.InstanceStatus{Status: status.StatusUnknown, Message: instanceStatus}}, err + return instanceInfo{addrs, instance.InstanceStatus{Status: status.Unknown, Message: instanceStatus}}, err } } @@ -328,6 +416,8 @@ } func (m *testMachine) InstanceId() (instance.Id, error) { + m.mu.Lock() + defer m.mu.Unlock() if m.instanceId == "" { err := ¶ms.Error{ Code: params.CodeNotProvisioned, @@ -338,6 +428,12 @@ return m.instanceId, m.instanceIdErr } +func (m *testMachine) setInstanceId(id instance.Id) { + m.mu.Lock() + defer m.mu.Unlock() + m.instanceId = id +} + // This is stubbed out for testing. var MachineStatus = func(m *testMachine) (params.StatusResult, error) { return params.StatusResult{Status: m.status.String()}, nil diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/instancepoller/updater.go juju-core-2.0.0/src/github.com/juju/juju/worker/instancepoller/updater.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/instancepoller/updater.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/instancepoller/updater.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,11 +4,11 @@ package instancepoller import ( - "fmt" "time" "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/utils/clock" "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/params" @@ -137,7 +137,8 @@ } c = make(chan struct{}) p.machines[tag] = c - go runMachine(p.context.newMachineContext(), m, c, p.machineDead) + // TODO(fwereade): 2016-03-17 lp:1558657 + go runMachine(p.context.newMachineContext(), m, c, p.machineDead, clock.WallClock) } else { select { case <-p.context.dying(): @@ -151,7 +152,7 @@ // runMachine processes the address and status publishing for a given machine. // We assume that the machine is alive when this is first called. -func runMachine(context machineContext, m machine, changed <-chan struct{}, died chan<- machine) { +func runMachine(context machineContext, m machine, changed <-chan struct{}, died chan<- machine, clock clock.Clock) { defer func() { // We can't just send on the died channel because the // central loop might be trying to write to us on the @@ -164,53 +165,63 @@ } } }() - if err := machineLoop(context, m, changed); err != nil { + if err := machineLoop(context, m, changed, clock); err != nil { context.kill(err) } } -func machineLoop(context machineContext, m machine, changed <-chan struct{}) error { +func machineLoop(context machineContext, m machine, lifeChanged <-chan struct{}, clock clock.Clock) error { // Use a short poll interval when initially waiting for // a machine's address and machine agent to start, and a long one when it already // has an address and the machine agent is started. pollInterval := ShortPoll - pollInstance := true - for { - if pollInstance { - instInfo, err := pollInstanceInfo(context, m) - if err != nil && !params.IsCodeNotProvisioned(err) { - return err + pollInstance := func() error { + instInfo, err := pollInstanceInfo(context, m) + if err != nil { + return err + } + + machineStatus := status.Pending + if err == nil { + if statusInfo, err := m.Status(); err != nil { + logger.Warningf("cannot get current machine status for machine %v: %v", m.Id(), err) + } else { + // TODO(perrito666) add status validation. + machineStatus = status.Status(statusInfo.Status) } - machineStatus := status.StatusPending - if err == nil { - if statusInfo, err := m.Status(); err != nil { - logger.Warningf("cannot get current machine status for machine %v: %v", m.Id(), err) - } else { - // TODO(perrito666) add status validation. - machineStatus = status.Status(statusInfo.Status) - } + } + + // the extra condition below (checking allocating/pending) is here to improve user experience + // without it the instance status will say "pending" for +10 minutes after the agent comes up to "started" + if instInfo.status.Status != status.Allocating && instInfo.status.Status != status.Pending { + if len(instInfo.addresses) > 0 && machineStatus == status.Started { + // We've got at least one address and a status and instance is started, so poll infrequently. + pollInterval = LongPoll + } else if pollInterval < LongPoll { + // We have no addresses or not started - poll increasingly rarely + // until we do. + pollInterval = time.Duration(float64(pollInterval) * ShortPollBackoff) } - // the extra condition below (checking allocating/pending) is here to improve user experience - // without it the instance status will say "pending" for +10 minutes after the agent comes up to "started" - if instInfo.status.Status != status.StatusAllocating && instInfo.status.Status != status.StatusPending { - if len(instInfo.addresses) > 0 && machineStatus == status.StatusStarted { - // We've got at least one address and a status and instance is started, so poll infrequently. - pollInterval = LongPoll - } else if pollInterval < LongPoll { - // We have no addresses or not started - poll increasingly rarely - // until we do. - pollInterval = time.Duration(float64(pollInterval) * ShortPollBackoff) + } + return nil + } + + shouldPollInstance := true + for { + if shouldPollInstance { + if err := pollInstance(); err != nil { + if !params.IsCodeNotProvisioned(err) { + return errors.Trace(err) } } - pollInstance = false + shouldPollInstance = false } select { case <-context.dying(): return context.errDying() - case <-time.After(pollInterval): - // TODO(fwereade): 2016-03-17 lp:1558657 - pollInstance = true - case <-changed: + case <-clock.After(pollInterval): + shouldPollInstance = true + case <-lifeChanged: if err := m.Refresh(); err != nil { return err } @@ -228,26 +239,25 @@ instId, err := m.InstanceId() // We can't ask the machine for its addresses if it isn't provisioned yet. if params.IsCodeNotProvisioned(err) { - return instInfo, err + return instanceInfo{}, err } if err != nil { - return instInfo, fmt.Errorf("cannot get machine's instance id: %v", err) + return instanceInfo{}, errors.Annotate(err, "cannot get machine's instance id") } instInfo, err = context.instanceInfo(instId) if err != nil { // TODO (anastasiamac 2016-02-01) This does not look like it needs to be removed now. if params.IsCodeNotImplemented(err) { - return instInfo, err + return instanceInfo{}, err } logger.Warningf("cannot get instance info for instance %q: %v", instId, err) return instInfo, nil } - instStat, err := m.InstanceStatus() - if err != nil { + if instStat, err := m.InstanceStatus(); err != nil { // This should never occur since the machine is provisioned. // But just in case, we reset polled status so we try again next time. logger.Warningf("cannot get current instance status for machine %v: %v", m.Id(), err) - instInfo.status = instance.InstanceStatus{status.StatusUnknown, ""} + instInfo.status = instance.InstanceStatus{status.Unknown, ""} } else { // TODO(perrito666) add status validation. currentInstStatus := instance.InstanceStatus{ @@ -258,20 +268,25 @@ logger.Infof("machine %q instance status changed from %q to %q", m.Id(), currentInstStatus, instInfo.status) if err = m.SetInstanceStatus(instInfo.status.Status, instInfo.status.Message, nil); err != nil { logger.Errorf("cannot set instance status on %q: %v", m, err) + return instanceInfo{}, err } } + } - providerAddresses, err := m.ProviderAddresses() - if err != nil { - return instInfo, err - } - if !addressesEqual(providerAddresses, instInfo.addresses) { - logger.Infof("machine %q has new addresses: %v", m.Id(), instInfo.addresses) - if err = m.SetProviderAddresses(instInfo.addresses...); err != nil { - logger.Errorf("cannot set addresses on %q: %v", m, err) + if m.Life() != params.Dead { + providerAddresses, err := m.ProviderAddresses() + if err != nil { + return instanceInfo{}, err + } + if !addressesEqual(providerAddresses, instInfo.addresses) { + logger.Infof("machine %q has new addresses: %v", m.Id(), instInfo.addresses) + if err := m.SetProviderAddresses(instInfo.addresses...); err != nil { + logger.Errorf("cannot set addresses on %q: %v", m, err) + return instanceInfo{}, err + } } } - return instInfo, err + return instInfo, nil } // addressesEqual compares the addresses of the machine and the instance information. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/instancepoller/updater_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/instancepoller/updater_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/instancepoller/updater_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/instancepoller/updater_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -97,7 +97,7 @@ // Signal that we're in Status. waitStatus <- struct{}{} return params.StatusResult{ - Status: status.StatusPending.String(), + Status: status.Pending.String(), Info: "", Data: map[string]interface{}{}, Since: nil, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/instancepoller/worker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/instancepoller/worker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/instancepoller/worker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/instancepoller/worker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -101,7 +101,7 @@ } instanceStatus, err := m.InstanceStatus() c.Logf("instance message is: %q", instanceStatus.Info) - c.Assert(instanceStatus.Status, gc.Equals, status.StatusPending.String()) + c.Assert(instanceStatus.Status, gc.Equals, status.Pending.String()) stm, err := s.State.Machine(m.Id()) c.Assert(err, jc.ErrorIsNil) return len(stm.Addresses()) == 0 @@ -128,7 +128,7 @@ } // Machines in second half still have no addresses, nor status. instanceStatus, err := m.InstanceStatus() - c.Assert(instanceStatus.Status, gc.Equals, status.StatusPending.String()) + c.Assert(instanceStatus.Status, gc.Equals, status.Pending.String()) stm, err := s.State.Machine(m.Id()) c.Assert(err, jc.ErrorIsNil) return len(stm.Addresses()) == 0 diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/manifold.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package introspection - -import ( - "runtime" - - "github.com/juju/errors" - "github.com/juju/loggo" - - "github.com/juju/juju/agent" - "github.com/juju/juju/worker" - "github.com/juju/juju/worker/dependency" -) - -var logger = loggo.GetLogger("juju.worker.introspection") - -// ManifoldConfig describes the resources required to construct the -// introspection worker. -type ManifoldConfig struct { - AgentName string - WorkerFunc func(Config) (worker.Worker, error) -} - -// Manifold returns a Manifold which encapsulates the introspection worker. -func Manifold(config ManifoldConfig) dependency.Manifold { - return dependency.Manifold{ - Inputs: []string{config.AgentName}, - Start: func(context dependency.Context) (worker.Worker, error) { - // Since the worker listens on an abstract domain socket, this - // is only available on linux. - if runtime.GOOS != "linux" { - logger.Debugf("introspection worker not supported on %q", runtime.GOOS) - return nil, dependency.ErrUninstall - } - - var a agent.Agent - if err := context.Get(config.AgentName, &a); err != nil { - return nil, errors.Trace(err) - } - - socketName := "jujud-" + a.CurrentConfig().Tag().String() - w, err := config.WorkerFunc(Config{ - SocketName: socketName, - }) - if err != nil { - return nil, errors.Trace(err) - } - return w, nil - }, - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/manifold_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/manifold_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/manifold_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/manifold_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,132 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package introspection_test - -import ( - "runtime" - - "github.com/juju/errors" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/names.v2" - - "github.com/juju/juju/agent" - "github.com/juju/juju/api/base" - "github.com/juju/juju/worker" - "github.com/juju/juju/worker/dependency" - dt "github.com/juju/juju/worker/dependency/testing" - "github.com/juju/juju/worker/introspection" -) - -type ManifoldSuite struct { - testing.IsolationSuite - manifold dependency.Manifold - startErr error -} - -var _ = gc.Suite(&ManifoldSuite{}) - -func (s *ManifoldSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - s.startErr = nil - s.manifold = introspection.Manifold(introspection.ManifoldConfig{ - AgentName: "agent-name", - WorkerFunc: func(cfg introspection.Config) (worker.Worker, error) { - if s.startErr != nil { - return nil, s.startErr - } - return &dummyWorker{config: cfg}, nil - }, - }) -} - -func (s *ManifoldSuite) TestInputs(c *gc.C) { - c.Check(s.manifold.Inputs, jc.DeepEquals, []string{"agent-name"}) -} - -func (s *ManifoldSuite) TestStartNonLinux(c *gc.C) { - if runtime.GOOS == "linux" { - c.Skip("testing for non-linux") - } - - context := dt.StubContext(nil, map[string]interface{}{ - "agent-name": dependency.ErrMissing, - }) - - worker, err := s.manifold.Start(context) - c.Check(worker, gc.IsNil) - c.Check(err, gc.Equals, dependency.ErrUninstall) -} - -func (s *ManifoldSuite) TestStartAgentMissing(c *gc.C) { - if runtime.GOOS != "linux" { - c.Skip("introspection worker not supported on non-linux") - } - - context := dt.StubContext(nil, map[string]interface{}{ - "agent-name": dependency.ErrMissing, - }) - - worker, err := s.manifold.Start(context) - c.Check(worker, gc.IsNil) - c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing) -} - -func (s *ManifoldSuite) TestStartError(c *gc.C) { - if runtime.GOOS != "linux" { - c.Skip("introspection worker not supported on non-linux") - } - - s.startErr = errors.New("boom") - context := dt.StubContext(nil, map[string]interface{}{ - "agent-name": &dummyAgent{}, - }) - - worker, err := s.manifold.Start(context) - c.Check(worker, gc.IsNil) - c.Check(err, gc.ErrorMatches, "boom") -} - -func (s *ManifoldSuite) TestStartSuccess(c *gc.C) { - if runtime.GOOS != "linux" { - c.Skip("introspection worker not supported on non-linux") - } - - context := dt.StubContext(nil, map[string]interface{}{ - "agent-name": &dummyAgent{}, - }) - - worker, err := s.manifold.Start(context) - c.Check(err, jc.ErrorIsNil) - dummy, ok := worker.(*dummyWorker) - c.Assert(ok, jc.IsTrue) - c.Assert(dummy.config.SocketName, gc.Equals, "jujud-machine-42") -} - -type dummyAgent struct { - agent.Agent -} - -func (*dummyAgent) CurrentConfig() agent.Config { - return &dummyConfig{} -} - -type dummyConfig struct { - agent.Config -} - -func (*dummyConfig) Tag() names.Tag { - return names.NewMachineTag("42") -} - -type dummyApiCaller struct { - base.APICaller -} - -type dummyWorker struct { - worker.Worker - - config introspection.Config -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/script.go juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/script.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/script.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/script.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,86 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package introspection + +import ( + "io/ioutil" + "path" + "runtime" + + "github.com/juju/errors" +) + +var ( + profileDir = "/etc/profile.d" + bashFuncsFilename = "juju-introspection.sh" +) + +// WriteProfileFunctions writes the bashFuncs below to a file in the +// /etc/profile.d directory so all bash terminals can easily access the +// introspection worker. +func WriteProfileFunctions() error { + if runtime.GOOS != "linux" { + logger.Debugf("skipping profile funcs install") + return nil + } + filename := profileFilename() + if err := ioutil.WriteFile(filename, []byte(bashFuncs), 0644); err != nil { + return errors.Annotate(err, "writing introspection bash funcs") + } + return nil +} + +func profileFilename() string { + return path.Join(profileDir, bashFuncsFilename) +} + +const bashFuncs = ` +jujuAgentCall () { + local agent=$1 + shift + local path= + for i in "$@"; do + path="$path/$i" + done + echo -e "GET $path HTTP/1.0\r\n" | socat abstract-connect:jujud-$agent STDIO +} + +jujuMachineAgentName () { + local machine=` + "`ls -d /var/lib/juju/agents/machine*`" + ` + machine=` + "`basename $machine`" + ` + echo $machine +} + +jujuMachineOrUnit () { + # First arg is the path, second is optional agent name. + if [ "$#" -gt 2 ]; then + echo "expected no args (for machine agent) or one (unit agent)" + return 1 + fi + local agent=$(jujuMachineAgentName) + if [ "$#" -eq 2 ]; then + agent=$2 + fi + jujuAgentCall $agent $1 +} + +juju-goroutines () { + jujuMachineOrUnit debug/pprof/goroutine?debug=1 $@ +} + +juju-heap-profile () { + jujuMachineOrUnit debug/pprof/heap?debug=1 $@ +} + +juju-engine-report () { + jujuMachineOrUnit depengine/ $@ +} + +export -f jujuAgentCall +export -f jujuMachineAgentName +export -f jujuMachineOrUnit +export -f juju-goroutines +export -f juju-heap-profile +export -f juju-engine-report +` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/script_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/script_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/script_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/script_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,45 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package introspection + +import ( + "io/ioutil" + "runtime" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" +) + +type profileSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&profileSuite{}) + +func (*profileSuite) TestProfileFilename(c *gc.C) { + c.Assert(profileFilename(), gc.Equals, "/etc/profile.d/juju-introspection.sh") +} + +func (*profileSuite) TestNonLinux(c *gc.C) { + if runtime.GOOS == "linux" { + c.Skip("testing non-linux") + } + err := WriteProfileFunctions() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *profileSuite) TestLinux(c *gc.C) { + if runtime.GOOS != "linux" { + c.Skip("testing linux") + } + dir := c.MkDir() + s.PatchValue(&profileDir, dir) + err := WriteProfileFunctions() + c.Assert(err, jc.ErrorIsNil) + + content, err := ioutil.ReadFile(profileFilename()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(content), gc.Equals, bashFuncs) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/socket.go juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/socket.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/socket.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/socket.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,20 +4,33 @@ package introspection import ( + "fmt" "net" "net/http" "runtime" "github.com/juju/errors" - "launchpad.net/tomb" + "github.com/juju/loggo" + "gopkg.in/tomb.v1" + "gopkg.in/yaml.v2" "github.com/juju/juju/worker" "github.com/juju/juju/worker/introspection/pprof" ) +var logger = loggo.GetLogger("juju.worker.introspection") + +// DepEngineReporter provides insight into the running dependency engine of the agent. +type DepEngineReporter interface { + // Report returns a map describing the state of the receiver. It is expected + // to be goroutine-safe. + Report() map[string]interface{} +} + // Config describes the arguments required to create the introspection worker. type Config struct { SocketName string + Reporter DepEngineReporter } // Validate checks the config values to assert they are valid to create the worker. @@ -32,6 +45,8 @@ type socketListener struct { tomb tomb.Tomb listener *net.UnixListener + reporter DepEngineReporter + done chan struct{} } // NewWorker starts an http server listening on an abstract domain socket @@ -58,6 +73,8 @@ w := &socketListener{ listener: l, + reporter: config.Reporter, + done: make(chan struct{}), } go w.serve() go w.run() @@ -70,21 +87,26 @@ mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) + mux.Handle("/depengine/", http.HandlerFunc(w.depengineReport)) srv := http.Server{ Handler: mux, } logger.Debugf("stats worker now servering") + defer logger.Debugf("stats worker servering finished") + defer close(w.done) srv.Serve(w.listener) - logger.Debugf("stats worker servering finished") } func (w *socketListener) run() { + defer w.tomb.Done() + defer logger.Debugf("stats worker finished") <-w.tomb.Dying() logger.Debugf("stats worker closing listener") w.listener.Close() - w.tomb.Done() + // Don't mark the worker as done until the serve goroutine has finished. + <-w.done } // Kill implements worker.Worker. @@ -96,3 +118,22 @@ func (w *socketListener) Wait() error { return w.tomb.Wait() } + +func (s *socketListener) depengineReport(w http.ResponseWriter, r *http.Request) { + if s.reporter == nil { + w.WriteHeader(http.StatusNotFound) + fmt.Fprintln(w, "missing reporter") + return + } + bytes, err := yaml.Marshal(s.reporter.Report()) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "error: %v\n", err) + return + } + + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + + fmt.Fprint(w, "Dependency Engine Report\n\n") + w.Write(bytes) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/socket_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/socket_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/introspection/socket_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/introspection/socket_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "fmt" "io/ioutil" "net" + "os" "regexp" "runtime" @@ -48,8 +49,9 @@ type introspectionSuite struct { testing.IsolationSuite - name string - worker worker.Worker + name string + worker worker.Worker + reporter introspection.DepEngineReporter } var _ = gc.Suite(&introspectionSuite{}) @@ -58,12 +60,17 @@ if runtime.GOOS != "linux" { c.Skip("introspection worker not supported on non-linux") } - s.IsolationSuite.SetUpTest(c) + s.reporter = nil + s.worker = nil + s.startWorker(c) +} - s.name = "introspection-test" +func (s *introspectionSuite) startWorker(c *gc.C) { + s.name = fmt.Sprintf("introspection-test-%d", os.Getpid()) w, err := introspection.NewWorker(introspection.Config{ SocketName: s.name, + Reporter: s.reporter, }) c.Assert(err, jc.ErrorIsNil) s.worker = w @@ -89,7 +96,7 @@ func (s *introspectionSuite) TestCmdLine(c *gc.C) { buf := s.call(c, "/debug/pprof/cmdline") c.Assert(buf, gc.NotNil) - matches(c, buf, ".*github.com/juju/juju/worker/introspection/_test/introspection.test") + matches(c, buf, ".*/introspection.test") } func (s *introspectionSuite) TestGoroutineProfile(c *gc.C) { @@ -98,6 +105,28 @@ matches(c, buf, `^goroutine profile: total \d+`) } +func (s *introspectionSuite) TestMissingReporter(c *gc.C) { + buf := s.call(c, "/depengine/") + matches(c, buf, "404 Not Found") + matches(c, buf, "missing reporter") +} + +func (s *introspectionSuite) TestEngineReporter(c *gc.C) { + // We need to make sure the existing worker is shut down + // so we can connect to the socket. + workertest.CheckKill(c, s.worker) + s.reporter = &reporter{ + values: map[string]interface{}{ + "working": true, + }, + } + s.startWorker(c) + buf := s.call(c, "/depengine/") + + matches(c, buf, "200 OK") + matches(c, buf, "working: true") +} + // matches fails if regex is not found in the contents of b. // b is expected to be the response from the pprof http server, and will // contain some HTTP preamble that should be ignored. @@ -113,3 +142,11 @@ } c.Fatalf("%q did not match regex %q", string(b), regex) } + +type reporter struct { + values map[string]interface{} +} + +func (r *reporter) Report() map[string]interface{} { + return r.values +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/leadership/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/leadership/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/leadership/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/leadership/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,7 @@ "time" "github.com/juju/errors" + "github.com/juju/utils/clock" "gopkg.in/juju/names.v2" "github.com/juju/juju/agent" @@ -22,6 +23,7 @@ type ManifoldConfig struct { AgentName string APICallerName string + Clock clock.Clock LeadershipGuarantee time.Duration } @@ -42,6 +44,9 @@ // named in the supplied config. func startFunc(config ManifoldConfig) dependency.StartFunc { return func(context dependency.Context) (worker.Worker, error) { + if config.Clock == nil { + return nil, errors.NotValidf("missing Clock") + } var agent agent.Agent if err := context.Get(config.AgentName, &agent); err != nil { return nil, err @@ -50,7 +55,7 @@ if err := context.Get(config.APICallerName, &apiCaller); err != nil { return nil, err } - return NewManifoldWorker(agent, apiCaller, config.LeadershipGuarantee) + return NewManifoldWorker(agent, apiCaller, config.Clock, config.LeadershipGuarantee) } } @@ -58,14 +63,14 @@ // exists primarily to be patched out via NewManifoldWorker for ease of testing, // and is not itself directly tested. It would almost certainly be better to // pass the constructor dependencies in as explicit manifold config. -var NewManifoldWorker = func(agent agent.Agent, apiCaller base.APICaller, guarantee time.Duration) (worker.Worker, error) { +var NewManifoldWorker = func(agent agent.Agent, apiCaller base.APICaller, clock clock.Clock, guarantee time.Duration) (worker.Worker, error) { tag := agent.CurrentConfig().Tag() unitTag, ok := tag.(names.UnitTag) if !ok { return nil, fmt.Errorf("expected a unit tag; got %q", tag) } claimer := leadership.NewClient(apiCaller) - return NewTracker(unitTag, claimer, guarantee), nil + return NewTracker(unitTag, claimer, clock, guarantee), nil } // outputFunc extracts the coreleadership.Tracker from a *Tracker passed in as a Worker. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/leadership/manifold_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/leadership/manifold_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/leadership/manifold_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/leadership/manifold_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "github.com/juju/errors" "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" "github.com/juju/juju/agent" @@ -34,6 +35,7 @@ s.manifold = leadership.Manifold(leadership.ManifoldConfig{ AgentName: "agent-name", APICallerName: "api-caller-name", + Clock: clock.WallClock, LeadershipGuarantee: 123456 * time.Millisecond, }) } @@ -42,10 +44,19 @@ c.Check(s.manifold.Inputs, jc.DeepEquals, []string{"agent-name", "api-caller-name"}) } +func (s *ManifoldSuite) TestStartClockMissing(c *gc.C) { + manifold := leadership.Manifold(leadership.ManifoldConfig{}) + context := dt.StubContext(nil, nil) + worker, err := manifold.Start(context) + c.Check(worker, gc.IsNil) + c.Check(err.Error(), gc.Equals, "missing Clock not valid") + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + func (s *ManifoldSuite) TestStartAgentMissing(c *gc.C) { context := dt.StubContext(nil, map[string]interface{}{ "agent-name": dependency.ErrMissing, - "api-caller-name": &dummyApiCaller{}, + "api-caller-name": &dummyAPICaller{}, }) worker, err := s.manifold.Start(context) @@ -53,7 +64,7 @@ c.Check(err, gc.Equals, dependency.ErrMissing) } -func (s *ManifoldSuite) TestStartApiCallerMissing(c *gc.C) { +func (s *ManifoldSuite) TestStartAPICallerMissing(c *gc.C) { context := dt.StubContext(nil, map[string]interface{}{ "agent-name": &dummyAgent{}, "api-caller-name": dependency.ErrMissing, @@ -66,13 +77,13 @@ func (s *ManifoldSuite) TestStartError(c *gc.C) { dummyAgent := &dummyAgent{} - dummyApiCaller := &dummyApiCaller{} + dummyAPICaller := &dummyAPICaller{} context := dt.StubContext(nil, map[string]interface{}{ "agent-name": dummyAgent, - "api-caller-name": dummyApiCaller, + "api-caller-name": dummyAPICaller, }) - s.PatchValue(&leadership.NewManifoldWorker, func(a agent.Agent, apiCaller base.APICaller, guarantee time.Duration) (worker.Worker, error) { - s.AddCall("newManifoldWorker", a, apiCaller, guarantee) + s.PatchValue(&leadership.NewManifoldWorker, func(a agent.Agent, apiCaller base.APICaller, clock clock.Clock, guarantee time.Duration) (worker.Worker, error) { + s.AddCall("newManifoldWorker", a, apiCaller, clock, guarantee) return nil, errors.New("blammo") }) @@ -81,20 +92,20 @@ c.Check(err, gc.ErrorMatches, "blammo") s.CheckCalls(c, []testing.StubCall{{ FuncName: "newManifoldWorker", - Args: []interface{}{dummyAgent, dummyApiCaller, 123456 * time.Millisecond}, + Args: []interface{}{dummyAgent, dummyAPICaller, clock.WallClock, 123456 * time.Millisecond}, }}) } func (s *ManifoldSuite) TestStartSuccess(c *gc.C) { dummyAgent := &dummyAgent{} - dummyApiCaller := &dummyApiCaller{} + dummyAPICaller := &dummyAPICaller{} context := dt.StubContext(nil, map[string]interface{}{ "agent-name": dummyAgent, - "api-caller-name": dummyApiCaller, + "api-caller-name": dummyAPICaller, }) dummyWorker := &dummyWorker{} - s.PatchValue(&leadership.NewManifoldWorker, func(a agent.Agent, apiCaller base.APICaller, guarantee time.Duration) (worker.Worker, error) { - s.AddCall("newManifoldWorker", a, apiCaller, guarantee) + s.PatchValue(&leadership.NewManifoldWorker, func(a agent.Agent, apiCaller base.APICaller, clock clock.Clock, guarantee time.Duration) (worker.Worker, error) { + s.AddCall("newManifoldWorker", a, apiCaller, clock, guarantee) return dummyWorker, nil }) @@ -103,7 +114,7 @@ c.Check(worker, gc.Equals, dummyWorker) s.CheckCalls(c, []testing.StubCall{{ FuncName: "newManifoldWorker", - Args: []interface{}{dummyAgent, dummyApiCaller, 123456 * time.Millisecond}, + Args: []interface{}{dummyAgent, dummyAPICaller, clock.WallClock, 123456 * time.Millisecond}, }}) } @@ -133,7 +144,7 @@ agent.Agent } -type dummyApiCaller struct { +type dummyAPICaller struct { base.APICaller } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/leadership/tracker.go juju-core-2.0.0/src/github.com/juju/juju/worker/leadership/tracker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/leadership/tracker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/leadership/tracker.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,8 +8,9 @@ "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/utils/clock" "gopkg.in/juju/names.v2" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/core/leadership" ) @@ -21,6 +22,7 @@ claimer leadership.Claimer unitName string applicationName string + clock clock.Clock duration time.Duration isMinion bool @@ -41,13 +43,14 @@ // leadership for the duration supplied here without generating additional // calls to the supplied manager (which may very well be on the other side of // a network connection). -func NewTracker(tag names.UnitTag, claimer leadership.Claimer, duration time.Duration) *Tracker { +func NewTracker(tag names.UnitTag, claimer leadership.Claimer, clock clock.Clock, duration time.Duration) *Tracker { unitName := tag.Id() serviceName, _ := names.UnitApplication(unitName) t := &Tracker{ unitName: unitName, applicationName: serviceName, claimer: claimer, + clock: clock, duration: duration, claimTickets: make(chan chan bool), waitLeaderTickets: make(chan chan bool), @@ -163,8 +166,7 @@ func (t *Tracker) refresh() error { logger.Debugf("checking %s for %s leadership", t.unitName, t.applicationName) leaseDuration := 2 * t.duration - // TODO(fwereade): 2016-03-17 lp:1558657 - untilTime := time.Now().Add(leaseDuration) + untilTime := t.clock.Now().Add(leaseDuration) err := t.claimer.ClaimLeadership(t.applicationName, t.unitName, leaseDuration) switch { case err == nil: @@ -182,8 +184,7 @@ logger.Infof("%s will renew %s leadership at %s", t.unitName, t.applicationName, renewTime) t.isMinion = false t.claimLease = nil - // TODO(fwereade): 2016-03-17 lp:1558657 - t.renewLease = time.After(renewTime.Sub(time.Now())) + t.renewLease = t.clock.After(renewTime.Sub(t.clock.Now())) for len(t.waitingLeader) > 0 { logger.Debugf("notifying %s ticket of impending %s leadership", t.unitName, t.applicationName) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/leadership/tracker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/leadership/tracker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/leadership/tracker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/leadership/tracker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,6 @@ "github.com/juju/errors" "github.com/juju/testing" - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" @@ -16,30 +15,40 @@ coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker" "github.com/juju/juju/worker/leadership" + "github.com/juju/juju/worker/workertest" ) type TrackerSuite struct { testing.IsolationSuite unitTag names.UnitTag claimer *StubClaimer + clock *testing.Clock } var _ = gc.Suite(&TrackerSuite{}) const ( - trackerDuration = coretesting.ShortWait + trackerDuration = 30 * time.Second leaseDuration = trackerDuration * 2 ) -func refreshes(count int) time.Duration { +func (s *TrackerSuite) refreshes(count int) { + halfDuration := trackerDuration / 2 halfRefreshes := (2 * count) + 1 - twiceDuration := trackerDuration * time.Duration(halfRefreshes) - return twiceDuration / 2 + // The worker often checks against the current time + // and adds delay to that time. Here we advance the clock + // in small jumps, and then wait a short time to allow the + // worker to do stuff. + for i := 0; i < halfRefreshes; i++ { + s.clock.Advance(halfDuration) + <-time.After(coretesting.ShortWait) + } } func (s *TrackerSuite) SetUpTest(c *gc.C) { s.IsolationSuite.SetUpTest(c) s.unitTag = names.NewUnitTag("led-service/123") + s.clock = testing.NewClock(time.Date(2016, 10, 9, 12, 0, 0, 0, time.UTC)) s.claimer = &StubClaimer{ Stub: &testing.Stub{}, releases: make(chan struct{}), @@ -64,21 +73,39 @@ } } +func (s *TrackerSuite) newTrackerInner() *leadership.Tracker { + return leadership.NewTracker(s.unitTag, s.claimer, s.clock, trackerDuration) +} + +func (s *TrackerSuite) newTracker() *leadership.Tracker { + tracker := s.newTrackerInner() + s.AddCleanup(func(c *gc.C) { + workertest.CleanKill(c, tracker) + }) + return tracker +} + +func (s *TrackerSuite) newTrackerDirtyKill() *leadership.Tracker { + tracker := s.newTrackerInner() + s.AddCleanup(func(c *gc.C) { + workertest.DirtyKill(c, tracker) + }) + return tracker +} + func (s *TrackerSuite) TestApplicationName(c *gc.C) { - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() c.Assert(tracker.ApplicationName(), gc.Equals, "led-service") } func (s *TrackerSuite) TestOnLeaderSuccess(c *gc.C) { - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() // Check the ticket succeeds. assertClaimLeader(c, tracker, true) // Stop the tracker before trying to look at its stub. - assertStop(c, tracker) + workertest.CleanKill(c, tracker) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ @@ -89,14 +116,13 @@ func (s *TrackerSuite) TestOnLeaderFailure(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil) - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() // Check the ticket fails. assertClaimLeader(c, tracker, false) // Stop the tracker before trying to look at its mocks. - assertStop(c, tracker) + workertest.CleanKill(c, tracker) // Unblock the release goroutine, lest data races. s.unblockRelease(c) @@ -116,8 +142,7 @@ func (s *TrackerSuite) TestOnLeaderError(c *gc.C) { s.claimer.Stub.SetErrors(errors.New("pow")) - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer worker.Stop(tracker) + tracker := s.newTrackerDirtyKill() // Check the ticket fails. assertClaimLeader(c, tracker, false) @@ -135,19 +160,18 @@ func (s *TrackerSuite) TestLoseLeadership(c *gc.C) { s.claimer.Stub.SetErrors(nil, coreleadership.ErrClaimDenied, nil) - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() // Check the first ticket succeeds. assertClaimLeader(c, tracker, true) // Wait long enough for a single refresh, to trigger ErrClaimDenied; then // check the next ticket fails. - <-time.After(refreshes(1)) + s.refreshes(1) assertClaimLeader(c, tracker, false) // Stop the tracker before trying to look at its stub. - assertStop(c, tracker) + workertest.CleanKill(c, tracker) // Unblock the release goroutine, lest data races. s.unblockRelease(c) @@ -172,8 +196,7 @@ func (s *TrackerSuite) TestGainLeadership(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil, nil) - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() // Check initial ticket fails. assertClaimLeader(c, tracker, false) @@ -181,14 +204,14 @@ // Unblock the release goroutine... s.unblockRelease(c) - // ...and, uh, voodoo sleep a bit, but not long enough to trigger a refresh... - <-time.After(refreshes(0)) + // advance the clock a small amount, but not enough to trigger a check + s.refreshes(0) // ...then check the next ticket succeeds. assertClaimLeader(c, tracker, true) // Stop the tracker before trying to look at its stub. - assertStop(c, tracker) + workertest.CleanKill(c, tracker) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ @@ -211,8 +234,7 @@ s.claimer.Stub.SetErrors( coreleadership.ErrClaimDenied, nil, coreleadership.ErrClaimDenied, nil, ) - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() // Check initial ticket fails. assertClaimLeader(c, tracker, false) @@ -220,18 +242,18 @@ // Unblock the release goroutine... s.unblockRelease(c) - // ...and, uh, voodoo sleep a bit, but not long enough to trigger a refresh... - <-time.After(refreshes(0)) + // advance the clock a small amount, but not enough to trigger a check + s.refreshes(0) // ...then check the next ticket fails again. assertClaimLeader(c, tracker, false) - // This time, sleep long enough that a refresh would trigger if it were + // This time, advance far enough that a refresh would trigger if it were // going to... - <-time.After(refreshes(1)) + s.refreshes(1) // ...but it won't, because we Stop the tracker... - assertStop(c, tracker) + workertest.CleanKill(c, tracker) // ...and clear out the release goroutine before we look at the stub. s.unblockRelease(c) @@ -260,14 +282,13 @@ } func (s *TrackerSuite) TestWaitLeaderAlreadyLeader(c *gc.C) { - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() // Check the ticket succeeds. assertWaitLeader(c, tracker, true) // Stop the tracker before trying to look at its stub. - assertStop(c, tracker) + workertest.CleanKill(c, tracker) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ @@ -278,8 +299,7 @@ func (s *TrackerSuite) TestWaitLeaderBecomeLeader(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil, nil) - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() // Check initial ticket fails. assertWaitLeader(c, tracker, false) @@ -287,14 +307,14 @@ // Unblock the release goroutine... s.unblockRelease(c) - // ...and, uh, voodoo sleep a bit, but not long enough to trigger a refresh... - <-time.After(refreshes(0)) + // advance the clock a small amount, but not enough to trigger a check + s.refreshes(0) // ...then check the next ticket succeeds. assertWaitLeader(c, tracker, true) // Stop the tracker before trying to look at its stub. - assertStop(c, tracker) + workertest.CleanKill(c, tracker) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ @@ -315,15 +335,14 @@ func (s *TrackerSuite) TestWaitLeaderNeverBecomeLeader(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil) - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() // Check initial ticket fails. assertWaitLeader(c, tracker, false) // Get a new ticket and stop the tracker while it's pending. ticket := tracker.WaitLeader() - assertStop(c, tracker) + workertest.CleanKill(c, tracker) // Check the ticket got closed without sending true. assertTicket(c, ticket, false) @@ -347,14 +366,13 @@ func (s *TrackerSuite) TestWaitMinionAlreadyMinion(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil) - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() // Check initial ticket is closed immediately. assertWaitLeader(c, tracker, false) // Stop the tracker before trying to look at its stub. - assertStop(c, tracker) + workertest.CleanKill(c, tracker) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ @@ -370,19 +388,18 @@ func (s *TrackerSuite) TestWaitMinionBecomeMinion(c *gc.C) { s.claimer.Stub.SetErrors(nil, coreleadership.ErrClaimDenied, nil) - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() // Check the first ticket stays open. assertWaitMinion(c, tracker, false) // Wait long enough for a single refresh, to trigger ErrClaimDenied; then // check the next ticket is closed. - <-time.After(refreshes(1)) + s.refreshes(1) assertWaitMinion(c, tracker, true) // Stop the tracker before trying to look at its stub. - assertStop(c, tracker) + workertest.CleanKill(c, tracker) // Unblock the release goroutine, lest data races. s.unblockRelease(c) @@ -406,14 +423,16 @@ } func (s *TrackerSuite) TestWaitMinionNeverBecomeMinion(c *gc.C) { - tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) - defer assertStop(c, tracker) + tracker := s.newTracker() ticket := tracker.WaitMinion() + s.refreshes(2) + select { - case <-time.After(refreshes(2)): case <-ticket.Ready(): c.Fatalf("got unexpected readiness: %v", ticket.Wait()) + default: + // fallthrough } s.claimer.CheckCalls(c, []testing.StubCall{{ @@ -451,7 +470,7 @@ return } select { - case <-time.After(trackerDuration / 4): + case <-time.After(coretesting.ShortWait): // This wait needs to be small, compared to the resolution we run the // tests at, so as not to disturb client timing too much. case <-ticket.Ready(): @@ -467,7 +486,7 @@ return } select { - case <-time.After(trackerDuration / 4): + case <-time.After(coretesting.ShortWait): // This wait needs to be small, compared to the resolution we run the // tests at, so as not to disturb client timing too much. case <-ticket.Ready(): @@ -484,7 +503,3 @@ c.Assert(ticket.Wait(), gc.Equals, expect) } } - -func assertStop(c *gc.C, w worker.Worker) { - c.Assert(worker.Stop(w), jc.ErrorIsNil) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/fixture_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/lease/fixture_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/fixture_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/lease/fixture_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,11 +6,12 @@ import ( "time" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" corelease "github.com/juju/juju/core/lease" - "github.com/juju/juju/testing" + coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/lease" ) @@ -97,7 +98,7 @@ } func waitAlarms(c *gc.C, clock *testing.Clock, count int) { - timeout := time.After(testing.LongWait) + timeout := time.After(coretesting.LongWait) for i := 0; i < count; i++ { select { case <-clock.Alarms(): diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/manager_block_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/lease/manager_block_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/manager_block_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/lease/manager_block_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,6 @@ gc "gopkg.in/check.v1" corelease "github.com/juju/juju/core/lease" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/lease" ) @@ -23,7 +22,7 @@ func (s *WaitUntilExpiredSuite) TestLeadershipNotHeld(c *gc.C) { fix := &Fixture{} - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { blockTest := newBlockTest(manager, "redis") err := blockTest.assertUnblocked(c) c.Check(err, jc.ErrorIsNil) @@ -48,7 +47,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, clock *testing.Clock) { blockTest := newBlockTest(manager, "redis") blockTest.assertBlocked(c) @@ -81,7 +80,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, clock *testing.Clock) { blockTest := newBlockTest(manager, "redis") blockTest.assertBlocked(c) @@ -106,7 +105,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, clock *testing.Clock) { blockTest := newBlockTest(manager, "redis") blockTest.assertBlocked(c) @@ -149,7 +148,7 @@ err: corelease.ErrInvalid, }}, } - fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, clock *testing.Clock) { redisTest1 := newBlockTest(manager, "redis") redisTest1.assertBlocked(c) redisTest2 := newBlockTest(manager, "redis") @@ -180,7 +179,7 @@ }, }, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { blockTest := newBlockTest(manager, "redis") blockTest.assertBlocked(c) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/manager_check_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/lease/manager_check_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/manager_check_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/lease/manager_check_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,7 +12,6 @@ gc "gopkg.in/check.v1" corelease "github.com/juju/juju/core/lease" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/lease" ) @@ -32,7 +31,7 @@ }, }, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { token := manager.Token("redis", "redis/0") err := token.Check(nil) c.Check(err, jc.ErrorIsNil) @@ -52,7 +51,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { token := manager.Token("redis", "redis/0") err := token.Check(nil) c.Check(err, jc.ErrorIsNil) @@ -72,7 +71,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { token := manager.Token("redis", "redis/0") err := token.Check(nil) c.Check(err, jc.ErrorIsNil) @@ -85,7 +84,7 @@ method: "Refresh", }}, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { token := manager.Token("redis", "redis/0") err := token.Check(nil) c.Check(errors.Cause(err), gc.Equals, corelease.ErrNotHeld) @@ -105,7 +104,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { token := manager.Token("redis", "redis/0") err := token.Check(nil) c.Check(errors.Cause(err), gc.Equals, corelease.ErrNotHeld) @@ -120,7 +119,7 @@ }}, expectDirty: true, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { token := manager.Token("redis", "redis/0") c.Check(token.Check(nil), gc.ErrorMatches, "lease manager stopped") err := manager.Wait() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/manager_claim_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/lease/manager_claim_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/manager_claim_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/lease/manager_claim_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,7 +12,6 @@ gc "gopkg.in/check.v1" corelease "github.com/juju/juju/core/lease" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/lease" ) @@ -29,7 +28,7 @@ args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, }}, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("redis", "redis/0", time.Minute) c.Check(err, jc.ErrorIsNil) }) @@ -52,7 +51,7 @@ args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, }}, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("redis", "redis/0", time.Minute) c.Check(err, jc.ErrorIsNil) }) @@ -72,7 +71,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("redis", "redis/0", time.Minute) c.Check(err, gc.Equals, corelease.ErrClaimDenied) }) @@ -87,7 +86,7 @@ }}, expectDirty: true, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("redis", "redis/0", time.Minute) c.Check(err, gc.ErrorMatches, "lease manager stopped") err = manager.Wait() @@ -108,7 +107,7 @@ args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, }}, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("redis", "redis/0", time.Minute) c.Check(err, jc.ErrorIsNil) }) @@ -134,7 +133,7 @@ args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, }}, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("redis", "redis/0", time.Minute) c.Check(err, jc.ErrorIsNil) }) @@ -160,7 +159,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("redis", "redis/0", time.Minute) c.Check(err, gc.Equals, corelease.ErrClaimDenied) }) @@ -181,7 +180,7 @@ }}, expectDirty: true, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("redis", "redis/0", time.Minute) c.Check(err, gc.ErrorMatches, "lease manager stopped") err = manager.Wait() @@ -198,7 +197,7 @@ }, }, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("redis", "redis/0", time.Minute) c.Check(err, gc.Equals, corelease.ErrClaimDenied) }) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/manager_expire_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/lease/manager_expire_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/manager_expire_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/lease/manager_expire_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -12,7 +12,6 @@ gc "gopkg.in/check.v1" corelease "github.com/juju/juju/core/lease" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/lease" ) @@ -37,7 +36,7 @@ }, }}, } - fix.RunTest(c, func(_ *lease.Manager, _ *coretesting.Clock) {}) + fix.RunTest(c, func(_ *lease.Manager, _ *testing.Clock) {}) } func (s *ExpireSuite) TestStartup_ExpiryInFuture(c *gc.C) { @@ -46,7 +45,7 @@ "redis": corelease.Info{Expiry: offset(time.Second)}, }, } - fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(_ *lease.Manager, clock *testing.Clock) { clock.Advance(almostSeconds(1)) }) } @@ -66,14 +65,14 @@ }, }}, } - fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(_ *lease.Manager, clock *testing.Clock) { clock.Advance(time.Second) }) } func (s *ExpireSuite) TestStartup_NoExpiry_NotLongEnough(c *gc.C) { fix := &Fixture{} - fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(_ *lease.Manager, clock *testing.Clock) { clock.Advance(almostSeconds(3600)) }) } @@ -98,7 +97,7 @@ }, }}, } - fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(_ *lease.Manager, clock *testing.Clock) { clock.Advance(time.Hour) }) } @@ -119,7 +118,7 @@ }, }}, } - fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(_ *lease.Manager, clock *testing.Clock) { clock.Advance(time.Second) }) } @@ -140,7 +139,7 @@ }, }}, } - fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(_ *lease.Manager, clock *testing.Clock) { clock.Advance(time.Second) }) } @@ -159,7 +158,7 @@ }}, expectDirty: true, } - fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, clock *testing.Clock) { clock.Advance(time.Second) err := manager.Wait() c.Check(err, gc.ErrorMatches, "snarfblat hobalob") @@ -179,7 +178,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, clock *testing.Clock) { // Ask for a minute, actually get 63s. Don't expire early. err := manager.Claim("redis", "redis/0", time.Minute) c.Assert(err, jc.ErrorIsNil) @@ -208,7 +207,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, clock *testing.Clock) { // Ask for a minute, actually get 63s. Expire on time. err := manager.Claim("redis", "redis/0", time.Minute) c.Assert(err, jc.ErrorIsNil) @@ -235,7 +234,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, clock *testing.Clock) { // Ask for a minute, actually get 63s. Don't expire early. err := manager.Claim("redis", "redis/0", time.Minute) c.Assert(err, jc.ErrorIsNil) @@ -270,7 +269,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, clock *testing.Clock) { // Ask for a minute, actually get 63s. Expire on time. err := manager.Claim("redis", "redis/0", time.Minute) c.Assert(err, jc.ErrorIsNil) @@ -324,7 +323,7 @@ }}, expectDirty: true, } - fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, clock *testing.Clock) { clock.Advance(5 * time.Second) err := manager.Wait() c.Check(err, gc.ErrorMatches, "what is this\\?") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/manager_validation_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/lease/manager_validation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/lease/manager_validation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/lease/manager_validation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,7 +13,6 @@ gc "gopkg.in/check.v1" corelease "github.com/juju/juju/core/lease" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/lease" ) @@ -59,7 +58,7 @@ manager, err := lease.NewManager(lease.ManagerConfig{ Client: NewClient(nil, nil), Secretary: struct{ lease.Secretary }{}, - Clock: coretesting.NewClock(time.Now()), + Clock: testing.NewClock(time.Now()), }) c.Check(err, gc.ErrorMatches, "non-positive MaxSleep not valid") c.Check(err, jc.Satisfies, errors.IsNotValid) @@ -69,7 +68,7 @@ func (s *ValidationSuite) TestNegativeMaxSleep(c *gc.C) { manager, err := lease.NewManager(lease.ManagerConfig{ Client: NewClient(nil, nil), - Clock: coretesting.NewClock(time.Now()), + Clock: testing.NewClock(time.Now()), Secretary: struct{ lease.Secretary }{}, MaxSleep: -time.Nanosecond, }) @@ -80,7 +79,7 @@ func (s *ValidationSuite) TestClaim_LeaseName(c *gc.C) { fix := &Fixture{} - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("INVALID", "bar/0", time.Minute) c.Check(err, gc.ErrorMatches, `cannot claim lease "INVALID": name not valid`) c.Check(err, jc.Satisfies, errors.IsNotValid) @@ -89,7 +88,7 @@ func (s *ValidationSuite) TestClaim_HolderName(c *gc.C) { fix := &Fixture{} - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("foo", "INVALID", time.Minute) c.Check(err, gc.ErrorMatches, `cannot claim lease for holder "INVALID": name not valid`) c.Check(err, jc.Satisfies, errors.IsNotValid) @@ -98,7 +97,7 @@ func (s *ValidationSuite) TestClaim_Duration(c *gc.C) { fix := &Fixture{} - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.Claim("foo", "bar/0", time.Second) c.Check(err, gc.ErrorMatches, `cannot claim lease for 1s: time not valid`) c.Check(err, jc.Satisfies, errors.IsNotValid) @@ -107,7 +106,7 @@ func (s *ValidationSuite) TestToken_LeaseName(c *gc.C) { fix := &Fixture{} - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { token := manager.Token("INVALID", "bar/0") err := token.Check(nil) c.Check(err, gc.ErrorMatches, `cannot check lease "INVALID": name not valid`) @@ -117,7 +116,7 @@ func (s *ValidationSuite) TestToken_HolderName(c *gc.C) { fix := &Fixture{} - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { token := manager.Token("foo", "INVALID") err := token.Check(nil) c.Check(err, gc.ErrorMatches, `cannot check holder "INVALID": name not valid`) @@ -144,7 +143,7 @@ }, }}, } - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { token := manager.Token("redis", "redis/0") err := token.Check(&expectKey) cause := errors.Cause(err) @@ -154,7 +153,7 @@ func (s *ValidationSuite) TestWaitUntilExpired_LeaseName(c *gc.C) { fix := &Fixture{} - fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + fix.RunTest(c, func(manager *lease.Manager, _ *testing.Clock) { err := manager.WaitUntilExpired("INVALID") c.Check(err, gc.ErrorMatches, `cannot wait for lease "INVALID" expiry: name not valid`) c.Check(err, jc.Satisfies, errors.IsNotValid) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/logforwarder/logforwarder.go juju-core-2.0.0/src/github.com/juju/juju/worker/logforwarder/logforwarder.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/logforwarder/logforwarder.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/logforwarder/logforwarder.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "github.com/juju/errors" "github.com/juju/loggo" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/logger/logger_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/logger/logger_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/logger/logger_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/logger/logger_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -28,7 +28,7 @@ type LoggerSuite struct { testing.JujuConnSuite - loggerApi *apilogger.State + loggerAPI *apilogger.State machine *state.Machine } @@ -38,8 +38,8 @@ s.JujuConnSuite.SetUpTest(c) apiConn, machine := s.OpenAPIAsNewMachine(c) // Create the machiner API facade. - s.loggerApi = apilogger.NewState(apiConn) - c.Assert(s.loggerApi, gc.NotNil) + s.loggerAPI = apilogger.NewState(apiConn) + c.Assert(s.loggerAPI, gc.NotNil) s.machine = machine } @@ -76,7 +76,7 @@ func (s *LoggerSuite) makeLogger(c *gc.C) (worker.Worker, *mockConfig) { config := agentConfig(c, s.machine.Tag()) - w, err := logger.NewLogger(s.loggerApi, config) + w, err := logger.NewLogger(s.loggerAPI, config) c.Assert(err, jc.ErrorIsNil) return w, config } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/logger/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/logger/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/logger/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/logger/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,16 +14,16 @@ // ManifoldConfig defines the names of the manifolds on which a // Manifold will depend. -type ManifoldConfig engine.AgentApiManifoldConfig +type ManifoldConfig engine.AgentAPIManifoldConfig // Manifold returns a dependency manifold that runs a logger // worker, using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - typedConfig := engine.AgentApiManifoldConfig(config) - return engine.AgentApiManifold(typedConfig, newWorker) + typedConfig := engine.AgentAPIManifoldConfig(config) + return engine.AgentAPIManifold(typedConfig, newWorker) } -// newWorker trivially wraps NewLogger to specialise a engine.AgentApiManifold. +// newWorker trivially wraps NewLogger to specialise a engine.AgentAPIManifold. var newWorker = func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { currentConfig := a.CurrentConfig() loggerFacade := logger.NewState(apiCaller) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/logsender/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/logsender/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/logsender/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/logsender/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,10 +21,10 @@ // Manifold returns a dependency manifold that runs a logger // worker, using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - typedConfig := engine.ApiManifoldConfig{ + typedConfig := engine.APIManifoldConfig{ APICallerName: config.APICallerName, } - return engine.ApiManifold(typedConfig, config.newWorker) + return engine.APIManifold(typedConfig, config.newWorker) } func (config ManifoldConfig) newWorker(apiCaller base.APICaller) (worker.Worker, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineactions/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/machineactions/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineactions/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/machineactions/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -23,7 +23,7 @@ NewWorker func(WorkerConfig) (worker.Worker, error) } -// start is used by engine.AgentApiManifold to create a StartFunc. +// start is used by engine.AgentAPIManifold to create a StartFunc. func (config ManifoldConfig) start(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { machineTag, ok := a.CurrentConfig().Tag().(names.MachineTag) if !ok { @@ -39,9 +39,9 @@ // Manifold returns a dependency.Manifold as configured. func Manifold(config ManifoldConfig) dependency.Manifold { - typedConfig := engine.AgentApiManifoldConfig{ + typedConfig := engine.AgentAPIManifoldConfig{ AgentName: config.AgentName, APICallerName: config.APICallerName, } - return engine.AgentApiManifold(typedConfig, config.start) + return engine.AgentAPIManifold(typedConfig, config.start) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/machiner/machiner.go juju-core-2.0.0/src/github.com/juju/juju/worker/machiner/machiner.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/machiner/machiner.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/machiner/machiner.go 2016-10-13 14:31:49.000000000 +0000 @@ -99,7 +99,7 @@ } // Mark the machine as started and log it. - if err := m.SetStatus(status.StatusStarted, "", nil); err != nil { + if err := m.SetStatus(status.Started, "", nil); err != nil { return nil, errors.Annotatef(err, "%s failed to set status started", mr.config.Tag) } logger.Infof("%q started", mr.config.Tag) @@ -157,7 +157,7 @@ life := mr.machine.Life() if life == params.Alive { - observedConfig, err := getObservedNetworkConfig() + observedConfig, err := getObservedNetworkConfig(networkingcommon.DefaultNetworkConfigSource()) if err != nil { return errors.Annotate(err, "cannot discover observed network config") } else if len(observedConfig) == 0 { @@ -173,7 +173,7 @@ return nil } logger.Debugf("%q is now %s", mr.config.Tag, life) - if err := mr.machine.SetStatus(status.StatusStopped, "", nil); err != nil { + if err := mr.machine.SetStatus(status.Stopped, "", nil); err != nil { return errors.Annotatef(err, "%s failed to set status stopped", mr.config.Tag) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/machiner/machiner_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/machiner/machiner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/machiner/machiner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/machiner/machiner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,6 +18,7 @@ "github.com/juju/juju/api" apimachiner "github.com/juju/juju/api/machiner" + "github.com/juju/juju/apiserver/common/networkingcommon" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" @@ -54,7 +55,7 @@ s.PatchValue(machiner.InterfaceAddrs, func() ([]net.Addr, error) { return s.addresses, nil }) - s.PatchValue(machiner.GetObservedNetworkConfig, func() ([]params.NetworkConfig, error) { + s.PatchValue(machiner.GetObservedNetworkConfig, func(_ networkingcommon.NetworkConfigSource) ([]params.NetworkConfig, error) { return nil, nil }) } @@ -82,18 +83,18 @@ func (s *MachinerSuite) TestMachinerMachineNotFound(c *gc.C) { // Accessing the machine initially yields "not found or unauthorized". // We don't know which, so we don't report that the machine is dead. - var machineDead machineDeathTracker - w, err := machiner.NewMachiner(machiner.Config{ - s.accessor, s.machineTag, false, - machineDead.machineDead, - }) - c.Assert(err, jc.ErrorIsNil) s.accessor.machine.SetErrors( nil, // SetMachineAddresses nil, // SetStatus nil, // Watch ¶ms.Error{Code: params.CodeNotFound}, // Refresh ) + var machineDead machineDeathTracker + w, err := machiner.NewMachiner(machiner.Config{ + s.accessor, s.machineTag, false, + machineDead.machineDead, + }) + c.Assert(err, jc.ErrorIsNil) s.accessor.machine.watcher.changes <- struct{}{} err = stopWorker(w) c.Assert(errors.Cause(err), gc.Equals, worker.ErrTerminateAgent) @@ -101,11 +102,6 @@ } func (s *MachinerSuite) TestMachinerSetStatusStopped(c *gc.C) { - w, err := machiner.NewMachiner(machiner.Config{ - MachineAccessor: s.accessor, - Tag: s.machineTag, - }) - c.Assert(err, jc.ErrorIsNil) s.accessor.machine.life = params.Dying s.accessor.machine.SetErrors( nil, // SetMachineAddresses @@ -114,6 +110,11 @@ nil, // Refresh errors.New("cannot set status"), // SetStatus (stopped) ) + w, err := machiner.NewMachiner(machiner.Config{ + MachineAccessor: s.accessor, + Tag: s.machineTag, + }) + c.Assert(err, jc.ErrorIsNil) s.accessor.machine.watcher.changes <- struct{}{} err = stopWorker(w) c.Assert( @@ -130,18 +131,13 @@ ) s.accessor.machine.CheckCall( c, 5, "SetStatus", - status.StatusStopped, + status.Stopped, "", map[string]interface{}(nil), ) } func (s *MachinerSuite) TestMachinerMachineEnsureDeadError(c *gc.C) { - w, err := machiner.NewMachiner(machiner.Config{ - MachineAccessor: s.accessor, - Tag: s.machineTag, - }) - c.Assert(err, jc.ErrorIsNil) s.accessor.machine.life = params.Dying s.accessor.machine.SetErrors( nil, // SetMachineAddresses @@ -151,6 +147,11 @@ nil, // SetStatus errors.New("cannot ensure machine is dead"), // EnsureDead ) + w, err := machiner.NewMachiner(machiner.Config{ + MachineAccessor: s.accessor, + Tag: s.machineTag, + }) + c.Assert(err, jc.ErrorIsNil) s.accessor.machine.watcher.changes <- struct{}{} err = stopWorker(w) c.Check( @@ -160,11 +161,6 @@ } func (s *MachinerSuite) TestMachinerMachineAssignedUnits(c *gc.C) { - w, err := machiner.NewMachiner(machiner.Config{ - MachineAccessor: s.accessor, - Tag: s.machineTag, - }) - c.Assert(err, jc.ErrorIsNil) s.accessor.machine.life = params.Dying s.accessor.machine.SetErrors( nil, // SetMachineAddresses @@ -174,6 +170,11 @@ nil, // SetStatus ¶ms.Error{Code: params.CodeHasAssignedUnits}, // EnsureDead ) + w, err := machiner.NewMachiner(machiner.Config{ + MachineAccessor: s.accessor, + Tag: s.machineTag, + }) + c.Assert(err, jc.ErrorIsNil) s.accessor.machine.watcher.changes <- struct{}{} err = stopWorker(w) @@ -231,7 +232,7 @@ }, { FuncName: "SetStatus", Args: []interface{}{ - status.StatusStarted, + status.Started, "", map[string]interface{}(nil), }, @@ -244,7 +245,7 @@ }, { FuncName: "SetStatus", Args: []interface{}{ - status.StatusStopped, + status.Stopped, "", map[string]interface{}(nil), }, @@ -266,6 +267,8 @@ machinerState *apimachiner.State machine *state.Machine apiMachine *apimachiner.Machine + + getObservedNetworkConfigError error } var _ = gc.Suite(&MachinerStateSuite{}) @@ -291,8 +294,9 @@ return nil, nil }) s.PatchValue(&network.LXCNetDefaultConfig, "") - s.PatchValue(machiner.GetObservedNetworkConfig, func() ([]params.NetworkConfig, error) { - return nil, nil + s.getObservedNetworkConfigError = nil + s.PatchValue(machiner.GetObservedNetworkConfig, func(_ networkingcommon.NetworkConfigSource) ([]params.NetworkConfig, error) { + return nil, s.getObservedNetworkConfigError }) } @@ -367,20 +371,20 @@ func (s *MachinerStateSuite) TestStartSetsStatus(c *gc.C) { statusInfo, err := s.machine.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusPending) + c.Assert(statusInfo.Status, gc.Equals, status.Pending) c.Assert(statusInfo.Message, gc.Equals, "") mr := s.makeMachiner(c, false, nil) defer worker.Stop(mr) - s.waitMachineStatus(c, s.machine, status.StatusStarted) + s.waitMachineStatus(c, s.machine, status.Started) } func (s *MachinerStateSuite) TestSetsStatusWhenDying(c *gc.C) { mr := s.makeMachiner(c, false, nil) defer worker.Stop(mr) c.Assert(s.machine.Destroy(), jc.ErrorIsNil) - s.waitMachineStatus(c, s.machine, status.StatusStopped) + s.waitMachineStatus(c, s.machine, status.Stopped) } func (s *MachinerStateSuite) TestSetDead(c *gc.C) { @@ -427,7 +431,19 @@ s.State.StartSync() c.Assert(mr.Wait(), gc.Equals, worker.ErrTerminateAgent) c.Assert(bool(machineDead), jc.IsTrue) +} +func (s *MachinerStateSuite) TestAliveErrorGetObservedNetworkConfig(c *gc.C) { + s.getObservedNetworkConfigError = errors.New("no config!") + var machineDead machineDeathTracker + mr := s.makeMachiner(c, false, machineDead.machineDead) + defer worker.Stop(mr) + s.State.StartSync() + + c.Assert(mr.Wait(), gc.ErrorMatches, "cannot discover observed network config: no config!") + c.Assert(s.machine.Refresh(), jc.ErrorIsNil) + c.Assert(s.machine.Life(), gc.Equals, state.Alive) + c.Assert(bool(machineDead), jc.IsFalse) } func (s *MachinerStateSuite) setupSetMachineAddresses(c *gc.C, ignore bool) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/machiner/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/machiner/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/machiner/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/machiner/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,16 +18,16 @@ // ManifoldConfig defines the names of the manifolds on which a // Manifold will depend. -type ManifoldConfig engine.AgentApiManifoldConfig +type ManifoldConfig engine.AgentAPIManifoldConfig // Manifold returns a dependency manifold that runs a machiner worker, using // the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - typedConfig := engine.AgentApiManifoldConfig(config) - return engine.AgentApiManifold(typedConfig, newWorker) + typedConfig := engine.AgentAPIManifoldConfig(config) + return engine.AgentAPIManifold(typedConfig, newWorker) } -// newWorker non-trivially wraps NewMachiner to specialise a engine.AgentApiManifold. +// newWorker non-trivially wraps NewMachiner to specialise a engine.AgentAPIManifold. // // TODO(waigani) This function is currently covered by functional tests // under the machine agent. Add unit tests once infrastructure to do so is diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineundertaker/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/machineundertaker/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineundertaker/manifold.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/machineundertaker/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,51 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/machineundertaker" + "github.com/juju/juju/api/watcher" + "github.com/juju/juju/environs" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" +) + +// ManifoldConfig defines the machine undertaker's configuration and +// dependencies. +type ManifoldConfig struct { + APICallerName string + EnvironName string + + NewWorker func(Facade, environs.Environ) (worker.Worker, error) +} + +// Manifold returns a dependency.Manifold that runs a machine +// undertaker. +func Manifold(config ManifoldConfig) dependency.Manifold { + return dependency.Manifold{ + Inputs: []string{config.APICallerName, config.EnvironName}, + Start: func(context dependency.Context) (worker.Worker, error) { + var apiCaller base.APICaller + if err := context.Get(config.APICallerName, &apiCaller); err != nil { + return nil, errors.Trace(err) + } + var environ environs.Environ + if err := context.Get(config.EnvironName, &environ); err != nil { + return nil, errors.Trace(err) + } + api, err := machineundertaker.NewAPI(apiCaller, watcher.NewNotifyWatcher) + if err != nil { + return nil, errors.Trace(err) + } + w, err := config.NewWorker(api, environ) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil + }, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineundertaker/manifold_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/machineundertaker/manifold_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineundertaker/manifold_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/machineundertaker/manifold_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,100 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/api/base" + apitesting "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/environs" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" + "github.com/juju/juju/worker/machineundertaker" +) + +type manifoldSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&manifoldSuite{}) + +func (*manifoldSuite) TestMissingCaller(c *gc.C) { + manifold := makeManifold(nil, nil) + result, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{ + "the-caller": dependency.ErrMissing, + "the-environ": &fakeEnviron{}, + })) + c.Assert(result, gc.IsNil) + c.Assert(errors.Cause(err), gc.Equals, dependency.ErrMissing) +} + +func (*manifoldSuite) TestMissingEnviron(c *gc.C) { + manifold := makeManifold(nil, nil) + result, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{ + "the-caller": &fakeAPICaller{}, + "the-environ": dependency.ErrMissing, + })) + c.Assert(result, gc.IsNil) + c.Assert(errors.Cause(err), gc.Equals, dependency.ErrMissing) +} + +func (*manifoldSuite) TestAPIError(c *gc.C) { + manifold := makeManifold(nil, nil) + result, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{ + "the-caller": &fakeAPICaller{}, + "the-environ": &fakeEnviron{}, + })) + c.Assert(result, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "machine undertaker client requires a model API connection") +} + +func (*manifoldSuite) TestWorkerError(c *gc.C) { + manifold := makeManifold(nil, errors.New("boglodite")) + result, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{ + "the-caller": apitesting.APICallerFunc(nil), + "the-environ": &fakeEnviron{}, + })) + c.Assert(result, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "boglodite") +} + +func (*manifoldSuite) TestSuccess(c *gc.C) { + w := fakeWorker{name: "Boris"} + manifold := makeManifold(&w, nil) + result, err := manifold.Start(dt.StubContext(nil, map[string]interface{}{ + "the-caller": apitesting.APICallerFunc(nil), + "the-environ": &fakeEnviron{}, + })) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, &w) +} + +func makeManifold(workerResult worker.Worker, workerError error) dependency.Manifold { + return machineundertaker.Manifold(machineundertaker.ManifoldConfig{ + APICallerName: "the-caller", + EnvironName: "the-environ", + NewWorker: func(machineundertaker.Facade, environs.Environ) (worker.Worker, error) { + return workerResult, workerError + }, + }) +} + +type fakeAPICaller struct { + base.APICaller +} + +func (c *fakeAPICaller) ModelTag() (names.ModelTag, bool) { + return names.ModelTag{}, false +} + +type fakeWorker struct { + worker.Worker + name string +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineundertaker/package_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/machineundertaker/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineundertaker/package_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/machineundertaker/package_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineundertaker/undertaker.go juju-core-2.0.0/src/github.com/juju/juju/worker/machineundertaker/undertaker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineundertaker/undertaker.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/machineundertaker/undertaker.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,126 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + "gopkg.in/juju/names.v2" + + "github.com/juju/juju/environs" + "github.com/juju/juju/network" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker" +) + +var logger = loggo.GetLogger("juju.worker.machineundertaker") + +// Facade defines the interface we require from the machine undertaker +// facade. +type Facade interface { + WatchMachineRemovals() (watcher.NotifyWatcher, error) + AllMachineRemovals() ([]names.MachineTag, error) + GetProviderInterfaceInfo(names.MachineTag) ([]network.ProviderInterfaceInfo, error) + CompleteRemoval(names.MachineTag) error +} + +// AddressReleaser defines the interface we need from the environment +// networking. +type AddressReleaser interface { + ReleaseContainerAddresses([]network.ProviderInterfaceInfo) error +} + +// MachineUndertaker is responsible for doing any provider-level +// cleanup needed and then removing the machine. +type Undertaker struct { + API Facade + Releaser AddressReleaser +} + +// NewWorker returns a machine undertaker worker that will watch for +// machines that need to be removed and remove them, cleaning up any +// necessary provider-level resources first. +func NewWorker(api Facade, env environs.Environ) (worker.Worker, error) { + envNetworking, _ := environs.SupportsNetworking(env) + w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{ + Handler: &Undertaker{API: api, Releaser: envNetworking}, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil +} + +// Setup (part of watcher.NotifyHandler) starts watching for machine +// removals. +func (u *Undertaker) SetUp() (watcher.NotifyWatcher, error) { + logger.Infof("setting up machine undertaker") + return u.API.WatchMachineRemovals() +} + +// Handle (part of watcher.NotifyHandler) cleans up provider resources +// and removes machines that have been marked for removal. +func (u *Undertaker) Handle(<-chan struct{}) error { + removals, err := u.API.AllMachineRemovals() + if err != nil { + return errors.Trace(err) + } + logger.Debugf("handling removals: %v", removals) + // TODO(babbageclunk): shuffle the removals so if there's a + // problem with one others can still get past? + for _, machine := range removals { + err := u.MaybeReleaseAddresses(machine) + if err != nil { + logger.Errorf("couldn't release addresses for %s: %s", machine, err) + continue + } + err = u.API.CompleteRemoval(machine) + if err != nil { + logger.Errorf("couldn't complete removal for %s: %s", machine, err) + } else { + logger.Debugf("completed removal: %s", machine) + } + } + return nil +} + +// MaybeReleaseAddresses releases any addresses that have been +// allocated to this machine by the provider (if the provider supports +// that). +func (u *Undertaker) MaybeReleaseAddresses(machine names.MachineTag) error { + if u.Releaser == nil { + // This environ doesn't support releasing addresses. + return nil + } + if !names.IsContainerMachine(machine.Id()) { + // At the moment, only containers need their addresses releasing. + return nil + } + interfaceInfos, err := u.API.GetProviderInterfaceInfo(machine) + if err != nil { + return errors.Trace(err) + } + if len(interfaceInfos) == 0 { + logger.Debugf("%s has no addresses to release", machine) + return nil + } + err = u.Releaser.ReleaseContainerAddresses(interfaceInfos) + // Some providers say they support networking but don't + // actually support container addressing; don't freak out + // about those. + if errors.IsNotSupported(err) { + logger.Debugf("%s has addresses but provider doesn't support releasing them", machine) + } else if err != nil { + return errors.Trace(err) + } + return nil +} + +// Teardown (part of watcher.NotifyHandler) is an opportunity to stop +// or release any resources created in SetUp other than the watcher, +// which watcher.NotifyWorker takes care of for us. +func (u *Undertaker) TearDown() error { + logger.Infof("tearing down machine undertaker") + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineundertaker/undertaker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/machineundertaker/undertaker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/machineundertaker/undertaker_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/machineundertaker/undertaker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,338 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machineundertaker_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" + "gopkg.in/tomb.v1" + + "github.com/juju/juju/environs" + "github.com/juju/juju/network" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/machineundertaker" + "github.com/juju/juju/worker/workertest" +) + +type undertakerSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&undertakerSuite{}) + +// Some tests to check that the handler is wired up to the +// NotifyWorker first. + +func (s *undertakerSuite) TestErrorWatching(c *gc.C) { + api := s.makeAPIWithWatcher() + api.SetErrors(errors.New("blam")) + w, err := machineundertaker.NewWorker(api, &fakeEnviron{}) + c.Assert(err, jc.ErrorIsNil) + err = workertest.CheckKilled(c, w) + c.Check(err, gc.ErrorMatches, "blam") + api.CheckCallNames(c, "WatchMachineRemovals") +} + +func (s *undertakerSuite) TestErrorGettingRemovals(c *gc.C) { + api := s.makeAPIWithWatcher() + api.SetErrors(nil, errors.New("explodo")) + w, err := machineundertaker.NewWorker(api, &fakeEnviron{}) + c.Assert(err, jc.ErrorIsNil) + err = workertest.CheckKilled(c, w) + c.Check(err, gc.ErrorMatches, "explodo") + api.CheckCallNames(c, "WatchMachineRemovals", "AllMachineRemovals") +} + +// It's really fiddly trying to test the code behind the worker, so +// the rest of the tests use the Undertaker directly to test the +// Handle and MaybeReleaseAddresses methods. This is much simpler +// because everything happens in the same goroutine (and it's safe +// since all of the clever/tricky lifecycle management is taken care +// of in NotifyWorker instead). + +func (*undertakerSuite) TestMaybeReleaseAddresses_NoNetworking(c *gc.C) { + api := fakeAPI{Stub: &testing.Stub{}} + u := machineundertaker.Undertaker{API: &api} + err := u.MaybeReleaseAddresses(names.NewMachineTag("3")) + c.Assert(err, jc.ErrorIsNil) + api.CheckCallNames(c) +} + +func (*undertakerSuite) TestMaybeReleaseAddresses_NotContainer(c *gc.C) { + api := fakeAPI{Stub: &testing.Stub{}} + releaser := fakeReleaser{} + u := machineundertaker.Undertaker{ + API: &api, + Releaser: &releaser, + } + err := u.MaybeReleaseAddresses(names.NewMachineTag("4")) + c.Assert(err, jc.ErrorIsNil) + api.CheckCallNames(c) +} + +func (*undertakerSuite) TestMaybeReleaseAddresses_ErrorGettingInfo(c *gc.C) { + api := fakeAPI{Stub: &testing.Stub{}} + api.SetErrors(errors.New("a funny thing happened on the way")) + releaser := fakeReleaser{} + u := machineundertaker.Undertaker{ + API: &api, + Releaser: &releaser, + } + err := u.MaybeReleaseAddresses(names.NewMachineTag("4/lxd/2")) + c.Assert(err, gc.ErrorMatches, "a funny thing happened on the way") +} + +func (*undertakerSuite) TestMaybeReleaseAddresses_NoAddresses(c *gc.C) { + api := fakeAPI{Stub: &testing.Stub{}} + releaser := fakeReleaser{Stub: &testing.Stub{}} + u := machineundertaker.Undertaker{ + API: &api, + Releaser: &releaser, + } + err := u.MaybeReleaseAddresses(names.NewMachineTag("4/lxd/4")) + c.Assert(err, jc.ErrorIsNil) + releaser.CheckCallNames(c) +} + +func (*undertakerSuite) TestMaybeReleaseAddresses_NotSupported(c *gc.C) { + api := fakeAPI{ + Stub: &testing.Stub{}, + interfaces: map[string][]network.ProviderInterfaceInfo{ + "4/lxd/4": []network.ProviderInterfaceInfo{ + {InterfaceName: "chloe"}, + }, + }, + } + releaser := fakeReleaser{Stub: &testing.Stub{}} + releaser.SetErrors(errors.NotSupportedf("this sort of thing")) + u := machineundertaker.Undertaker{ + API: &api, + Releaser: &releaser, + } + err := u.MaybeReleaseAddresses(names.NewMachineTag("4/lxd/4")) + c.Assert(err, jc.ErrorIsNil) + releaser.CheckCall(c, 0, "ReleaseContainerAddresses", + []network.ProviderInterfaceInfo{{InterfaceName: "chloe"}}, + ) +} + +func (*undertakerSuite) TestMaybeReleaseAddresses_ErrorReleasing(c *gc.C) { + api := fakeAPI{ + Stub: &testing.Stub{}, + interfaces: map[string][]network.ProviderInterfaceInfo{ + "4/lxd/4": []network.ProviderInterfaceInfo{ + {InterfaceName: "chloe"}, + }, + }, + } + releaser := fakeReleaser{Stub: &testing.Stub{}} + releaser.SetErrors(errors.New("something unexpected")) + u := machineundertaker.Undertaker{ + API: &api, + Releaser: &releaser, + } + err := u.MaybeReleaseAddresses(names.NewMachineTag("4/lxd/4")) + c.Assert(err, gc.ErrorMatches, "something unexpected") + releaser.CheckCall(c, 0, "ReleaseContainerAddresses", + []network.ProviderInterfaceInfo{{InterfaceName: "chloe"}}, + ) +} + +func (*undertakerSuite) TestMaybeReleaseAddresses_Success(c *gc.C) { + api := fakeAPI{ + Stub: &testing.Stub{}, + interfaces: map[string][]network.ProviderInterfaceInfo{ + "4/lxd/4": []network.ProviderInterfaceInfo{ + {InterfaceName: "chloe"}, + }, + }, + } + releaser := fakeReleaser{Stub: &testing.Stub{}} + u := machineundertaker.Undertaker{ + API: &api, + Releaser: &releaser, + } + err := u.MaybeReleaseAddresses(names.NewMachineTag("4/lxd/4")) + c.Assert(err, jc.ErrorIsNil) + releaser.CheckCall(c, 0, "ReleaseContainerAddresses", + []network.ProviderInterfaceInfo{{InterfaceName: "chloe"}}, + ) +} + +func (*undertakerSuite) TestHandle_CompletesRemoval(c *gc.C) { + api := fakeAPI{ + Stub: &testing.Stub{}, + removals: []string{"3", "4/lxd/4"}, + interfaces: map[string][]network.ProviderInterfaceInfo{ + "4/lxd/4": []network.ProviderInterfaceInfo{ + {InterfaceName: "chloe"}, + }, + }, + } + releaser := fakeReleaser{Stub: &testing.Stub{}} + u := machineundertaker.Undertaker{ + API: &api, + Releaser: &releaser, + } + err := u.Handle(nil) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(releaser.Calls(), gc.HasLen, 1) + releaser.CheckCall(c, 0, "ReleaseContainerAddresses", + []network.ProviderInterfaceInfo{{InterfaceName: "chloe"}}, + ) + + checkRemovalsMatch(c, api.Stub, "3", "4/lxd/4") +} + +func (*undertakerSuite) TestHandle_NoRemovalOnErrorReleasing(c *gc.C) { + api := fakeAPI{ + Stub: &testing.Stub{}, + removals: []string{"3", "4/lxd/4", "5"}, + interfaces: map[string][]network.ProviderInterfaceInfo{ + "4/lxd/4": []network.ProviderInterfaceInfo{ + {InterfaceName: "chloe"}, + }, + }, + } + releaser := fakeReleaser{Stub: &testing.Stub{}} + releaser.SetErrors(errors.New("couldn't release address")) + u := machineundertaker.Undertaker{ + API: &api, + Releaser: &releaser, + } + err := u.Handle(nil) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(releaser.Calls(), gc.HasLen, 1) + releaser.CheckCall(c, 0, "ReleaseContainerAddresses", + []network.ProviderInterfaceInfo{{InterfaceName: "chloe"}}, + ) + + checkRemovalsMatch(c, api.Stub, "3", "5") +} + +func (*undertakerSuite) TestHandle_ErrorOnRemoval(c *gc.C) { + api := fakeAPI{ + Stub: &testing.Stub{}, + removals: []string{"3", "4/lxd/4"}, + } + api.SetErrors(nil, errors.New("couldn't remove machine 3")) + u := machineundertaker.Undertaker{API: &api} + err := u.Handle(nil) + c.Assert(err, jc.ErrorIsNil) + checkRemovalsMatch(c, api.Stub, "3", "4/lxd/4") +} + +func checkRemovalsMatch(c *gc.C, stub *testing.Stub, expected ...string) { + var completedRemovals []string + for _, call := range stub.Calls() { + if call.FuncName == "CompleteRemoval" { + machineId := call.Args[0].(names.MachineTag).Id() + completedRemovals = append(completedRemovals, machineId) + } + } + c.Check(completedRemovals, gc.DeepEquals, expected) +} + +func (s *undertakerSuite) makeAPIWithWatcher() *fakeAPI { + return &fakeAPI{ + Stub: &testing.Stub{}, + watcher: s.newMockNotifyWatcher(), + } +} + +func (s *undertakerSuite) newMockNotifyWatcher() *mockNotifyWatcher { + m := &mockNotifyWatcher{ + changes: make(chan struct{}, 1), + } + go func() { + defer m.tomb.Done() + defer m.tomb.Kill(nil) + <-m.tomb.Dying() + }() + s.AddCleanup(func(c *gc.C) { + err := worker.Stop(m) + c.Check(err, jc.ErrorIsNil) + }) + m.Change() + return m +} + +type fakeEnviron struct { + environs.NetworkingEnviron +} + +type fakeNoNetworkingEnviron struct { + environs.Environ +} + +type fakeReleaser struct { + *testing.Stub +} + +func (r *fakeReleaser) ReleaseContainerAddresses(interfaces []network.ProviderInterfaceInfo) error { + r.Stub.AddCall("ReleaseContainerAddresses", interfaces) + return r.Stub.NextErr() +} + +type fakeAPI struct { + machineundertaker.Facade + + *testing.Stub + watcher *mockNotifyWatcher + removals []string + interfaces map[string][]network.ProviderInterfaceInfo +} + +func (a *fakeAPI) WatchMachineRemovals() (watcher.NotifyWatcher, error) { + a.Stub.AddCall("WatchMachineRemovals") + return a.watcher, a.Stub.NextErr() +} + +func (a *fakeAPI) AllMachineRemovals() ([]names.MachineTag, error) { + a.Stub.AddCall("AllMachineRemovals") + result := make([]names.MachineTag, len(a.removals)) + for i := range a.removals { + result[i] = names.NewMachineTag(a.removals[i]) + } + return result, a.Stub.NextErr() +} + +func (a *fakeAPI) GetProviderInterfaceInfo(machine names.MachineTag) ([]network.ProviderInterfaceInfo, error) { + a.Stub.AddCall("GetProviderInterfaceInfo", machine) + return a.interfaces[machine.Id()], a.Stub.NextErr() +} + +func (a *fakeAPI) CompleteRemoval(machine names.MachineTag) error { + a.Stub.AddCall("CompleteRemoval", machine) + return a.Stub.NextErr() +} + +type mockNotifyWatcher struct { + watcher.NotifyWatcher + + tomb tomb.Tomb + changes chan struct{} +} + +func (m *mockNotifyWatcher) Kill() { + m.tomb.Kill(nil) +} + +func (m *mockNotifyWatcher) Wait() error { + return m.tomb.Wait() +} + +func (m *mockNotifyWatcher) Changes() watcher.NotifyChannel { + return m.changes +} + +func (m *mockNotifyWatcher) Change() { + m.changes <- struct{}{} +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/meterstatus/isolated.go juju-core-2.0.0/src/github.com/juju/juju/worker/meterstatus/isolated.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/meterstatus/isolated.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/meterstatus/isolated.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "github.com/juju/errors" "github.com/juju/utils/clock" "gopkg.in/juju/charm.v6-unstable/hooks" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/worker" "github.com/juju/juju/worker/uniter/runner/context" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/meterstatus/isolated_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/meterstatus/isolated_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/meterstatus/isolated_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/meterstatus/isolated_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -30,7 +30,7 @@ stub *testing.Stub dataDir string - clk *coretesting.Clock + clk *testing.Clock hookRan chan struct{} triggersCreated chan struct{} @@ -58,7 +58,7 @@ return meterstatus.GetTriggers(state, status, disconectedAt, clk, amber, red) } - s.clk = coretesting.NewClock(time.Now()) + s.clk = testing.NewClock(time.Now()) wrk, err := meterstatus.NewIsolatedStatusWorker( meterstatus.IsolatedConfig{ Runner: &stubRunner{stub: s.stub, ran: s.hookRan}, @@ -91,13 +91,13 @@ expected: "clock not provided", }, { cfg: meterstatus.IsolatedConfig{ - Clock: coretesting.NewClock(time.Now()), + Clock: testing.NewClock(time.Now()), StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), }, expected: "hook runner not provided", }, { cfg: meterstatus.IsolatedConfig{ - Clock: coretesting.NewClock(time.Now()), + Clock: testing.NewClock(time.Now()), Runner: &stubRunner{stub: s.stub}, }, expected: "state file not provided", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/meterstatus/manifold_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/meterstatus/manifold_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/meterstatus/manifold_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/meterstatus/manifold_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -47,7 +47,7 @@ AgentName: "agent-name", APICallerName: "apicaller-name", MachineLockName: "machine-lock-name", - Clock: coretesting.NewClock(time.Now()), + Clock: testing.NewClock(time.Now()), NewHookRunner: meterstatus.NewHookRunner, NewMeterStatusAPIClient: msapi.NewClient, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/meterstatus/triggers_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/meterstatus/triggers_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/meterstatus/triggers_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/meterstatus/triggers_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "time" + "github.com/juju/testing" "github.com/juju/utils/clock" gc "gopkg.in/check.v1" @@ -40,7 +41,7 @@ meterstatus.Uninitialized, "GREEN", now, - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.NotNil) c.Check(red, gc.NotNil) @@ -49,7 +50,7 @@ meterstatus.Uninitialized, "AMBER", now, - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.NotNil) c.Check(red, gc.NotNil) @@ -58,7 +59,7 @@ meterstatus.Uninitialized, "RED", now, - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.IsNil) c.Check(red, gc.NotNil) @@ -67,7 +68,7 @@ meterstatus.WaitingAmber, "GREEN", now, - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.NotNil) c.Check(red, gc.NotNil) @@ -76,7 +77,7 @@ meterstatus.WaitingAmber, "AMBER", now, - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.NotNil) c.Check(red, gc.NotNil) @@ -85,7 +86,7 @@ meterstatus.WaitingAmber, "RED", now, - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.IsNil) c.Check(red, gc.NotNil) @@ -94,7 +95,7 @@ meterstatus.WaitingAmber, "GREEN", now.Add(-(testAmberGracePeriod + fudge)), - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.NotNil) c.Check(red, gc.NotNil) @@ -103,7 +104,7 @@ meterstatus.WaitingAmber, "AMBER", now.Add(-(testAmberGracePeriod + fudge)), - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.NotNil) c.Check(red, gc.NotNil) @@ -112,7 +113,7 @@ meterstatus.WaitingAmber, "RED", now.Add(-(testAmberGracePeriod + fudge)), - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.IsNil) c.Check(red, gc.NotNil) @@ -121,7 +122,7 @@ meterstatus.WaitingRed, "AMBER", now.Add(-(testAmberGracePeriod + fudge)), - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.IsNil) c.Check(red, gc.NotNil) @@ -130,7 +131,7 @@ meterstatus.WaitingRed, "AMBER", now.Add(-(testRedGracePeriod + fudge)), - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.IsNil) c.Check(red, gc.NotNil) @@ -139,7 +140,7 @@ meterstatus.WaitingRed, "RED", now.Add(-(testRedGracePeriod + fudge)), - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.IsNil) c.Check(red, gc.NotNil) @@ -148,7 +149,7 @@ meterstatus.Done, "RED", now.Add(-(testRedGracePeriod + fudge)), - coretesting.NewClock(now), + testing.NewClock(now), func(c *gc.C, amber, red <-chan time.Time) { c.Check(amber, gc.IsNil) c.Check(red, gc.IsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/metrics/collect/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/metrics/collect/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/metrics/collect/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/metrics/collect/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -32,12 +32,12 @@ ) const ( - defaultPeriod = 5 * time.Minute defaultSocketName = "metrics-collect.socket" ) var ( - logger = loggo.GetLogger("juju.worker.metrics.collect") + logger = loggo.GetLogger("juju.worker.metrics.collect") + defaultPeriod = 5 * time.Minute // errMetricsNotDefined is returned when the charm the uniter is running does // not declared any metrics. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/metrics/sender/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/metrics/sender/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/metrics/sender/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/metrics/sender/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,9 +24,6 @@ newMetricAdderClient = func(apiCaller base.APICaller) metricsadder.MetricsAdderClient { return metricsadder.NewClient(apiCaller) } -) - -const ( period = time.Minute * 5 ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/metrics/sender/manifold_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/metrics/sender/manifold_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/metrics/sender/manifold_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/metrics/sender/manifold_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -121,9 +121,9 @@ return 42 } -func (s *stubAPICaller) ModelTag() (names.ModelTag, error) { +func (s *stubAPICaller) ModelTag() (names.ModelTag, bool) { s.MethodCall(s, "ModelTag") - return names.NewModelTag("foobar"), nil + return names.NewModelTag("foobar"), true } func (s *stubAPICaller) ConnectStream(string, url.Values) (base.Stream, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/metrics/spool/listener.go juju-core-2.0.0/src/github.com/juju/juju/worker/metrics/spool/listener.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/metrics/spool/listener.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/metrics/spool/listener.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "time" "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/juju/sockets" "github.com/juju/juju/worker" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/metrics/spool/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/metrics/spool/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/metrics/spool/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/metrics/spool/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -13,7 +13,7 @@ "github.com/juju/errors" corecharm "gopkg.in/juju/charm.v6-unstable" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/agent" "github.com/juju/juju/cmd/jujud/agent/engine" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/metricworker/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/metricworker/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/metricworker/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/metricworker/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,12 +14,12 @@ ) // ManifoldConfig describes the resources used by metrics workers. -type ManifoldConfig engine.ApiManifoldConfig +type ManifoldConfig engine.APIManifoldConfig // Manifold returns a Manifold that encapsulates various metrics workers. func Manifold(config ManifoldConfig) dependency.Manifold { - return engine.ApiManifold( - engine.ApiManifoldConfig(config), + return engine.APIManifold( + engine.APIManifoldConfig(config), manifoldStart, ) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationflag/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/migrationflag/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationflag/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/migrationflag/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -52,9 +52,9 @@ if err != nil { return nil, errors.Trace(err) } - modelTag, err := apiCaller.ModelTag() - if err != nil { - return nil, errors.Trace(err) + modelTag, ok := apiCaller.ModelTag() + if !ok { + return nil, errors.New("API connection is controller-only (should never happen)") } worker, err := config.NewWorker(Config{ Facade: facade, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationflag/util_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/migrationflag/util_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationflag/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/migrationflag/util_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -171,6 +171,6 @@ } // ModelTag is part of the base.APICaller interface. -func (*stubCaller) ModelTag() (names.ModelTag, error) { - return names.NewModelTag(validUUID), nil +func (*stubCaller) ModelTag() (names.ModelTag, bool) { + return names.NewModelTag(validUUID), true } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationmaster/worker.go juju-core-2.0.0/src/github.com/juju/juju/worker/migrationmaster/worker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationmaster/worker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/migrationmaster/worker.go 2016-10-13 14:31:49.000000000 +0000 @@ -53,9 +53,9 @@ // active for the model associated with the API connection. Watch() (watcher.NotifyWatcher, error) - // GetMigrationStatus returns the details and progress of the - // latest model migration. - GetMigrationStatus() (coremigration.MigrationStatus, error) + // MigrationStatus returns the details and progress of the latest + // model migration. + MigrationStatus() (coremigration.MigrationStatus, error) // SetPhase updates the phase of the currently active model // migration. @@ -65,6 +65,13 @@ // progress of a migration. SetStatusMessage(string) error + // Prechecks performs pre-migration checks on the model and + // (source) controller. + Prechecks() error + + // ModelInfo return basic information about the model to migrated. + ModelInfo() (coremigration.ModelInfo, error) + // Export returns a serialized representation of the model // associated with the API connection. Export() (coremigration.SerializedModel, error) @@ -77,9 +84,9 @@ // minion has made a report for the current migration phase. WatchMinionReports() (watcher.NotifyWatcher, error) - // GetMinionReports returns details of the reports made by migration + // MinionReports returns details of the reports made by migration // minions to the controller for the current migration phase. - GetMinionReports() (coremigration.MinionReports, error) + MinionReports() (coremigration.MinionReports, error) } // Config defines the operation of a Worker. @@ -181,18 +188,22 @@ return errors.Trace(err) } + if status.ExternalControl { + err := w.waitForMigrationEnd() + return errors.Trace(err) + } + phase := status.Phase + for { var err error switch phase { case coremigration.QUIESCE: - phase, err = w.doQUIESCE() - case coremigration.PRECHECK: - phase, err = w.doPRECHECK() + phase, err = w.doQUIESCE(status) case coremigration.IMPORT: phase, err = w.doIMPORT(status.TargetInfo, status.ModelUUID) case coremigration.VALIDATION: - phase, err = w.doVALIDATION(status.TargetInfo, status.ModelUUID) + phase, err = w.doVALIDATION(status) case coremigration.SUCCESS: phase, err = w.doSUCCESS(status) case coremigration.LOGTRANSFER: @@ -247,6 +258,10 @@ w.setStatusAndLog(w.logger.Infof, s, a...) } +func (w *Worker) setWarningStatus(s string, a ...interface{}) { + w.setStatusAndLog(w.logger.Warningf, s, a...) +} + func (w *Worker) setErrorStatus(s string, a ...interface{}) { w.setStatusAndLog(w.logger.Errorf, s, a...) } @@ -267,22 +282,56 @@ return errors.Annotate(err, "failed to set status message") } -func (w *Worker) doQUIESCE() (coremigration.Phase, error) { - // TODO(mjs) - Wait for all agents to report back. - // w.setInfoStatus("model quiescing to readonly mode") - return coremigration.PRECHECK, nil -} +func (w *Worker) doQUIESCE(status coremigration.MigrationStatus) (coremigration.Phase, error) { + err := w.prechecks(status) + if err != nil { + w.setErrorStatus(err.Error()) + return coremigration.ABORT, nil + } + + ok, err := w.waitForMinions(status, failFast, "quiescing") + if err != nil { + return coremigration.UNKNOWN, errors.Trace(err) + } + if !ok { + return coremigration.ABORT, nil + } -func (w *Worker) doPRECHECK() (coremigration.Phase, error) { - // TODO(mjs) - To be implemented. - // w.setInfoStatus("performing prechecks") return coremigration.IMPORT, nil } +func (w *Worker) prechecks(status coremigration.MigrationStatus) error { + w.setInfoStatus("performing source prechecks") + err := w.config.Facade.Prechecks() + if err != nil { + return errors.Annotate(err, "source prechecks failed") + } + + w.setInfoStatus("performing target prechecks") + model, err := w.config.Facade.ModelInfo() + if err != nil { + return errors.Annotate(err, "failed to obtain model info during prechecks") + } + conn, err := w.openAPIConn(status.TargetInfo) + if err != nil { + return errors.Annotate(err, "failed to connect to target controller during prechecks") + } + defer conn.Close() + + if conn.ControllerTag() != status.TargetInfo.ControllerTag { + return errors.Errorf("unexpected target controller UUID (got %s, expected %s)", + conn.ControllerTag(), status.TargetInfo.ControllerTag) + } + + targetClient := migrationtarget.NewClient(conn) + err = targetClient.Prechecks(model) + return errors.Annotate(err, "target prechecks failed") +} + func (w *Worker) doIMPORT(targetInfo coremigration.TargetInfo, modelUUID string) (coremigration.Phase, error) { err := w.transferModel(targetInfo, modelUUID) if err != nil { - w.setErrorStatus("model export failed: %v", err) + w.setErrorStatus("model data transfer failed, %v", err) return coremigration.ABORT, nil } return coremigration.VALIDATION, nil @@ -325,13 +374,21 @@ return errors.Annotate(err, "failed migration binaries") } -func (w *Worker) doVALIDATION(targetInfo coremigration.TargetInfo, modelUUID string) (coremigration.Phase, error) { - // TODO(mjs) - Wait for all agents to report back. +func (w *Worker) doVALIDATION(status coremigration.MigrationStatus) (coremigration.Phase, error) { + // Wait for agents to complete their validation checks. + ok, err := w.waitForMinions(status, failFast, "validating") + if err != nil { + return coremigration.UNKNOWN, errors.Trace(err) + } + if !ok { + return coremigration.ABORT, nil + } - // Once all agents have validated, activate the model. - err := w.activateModel(targetInfo, modelUUID) + // Once all agents have validated, activate the model in the + // target controller. + err = w.activateModel(status.TargetInfo, status.ModelUUID) if err != nil { - w.setErrorStatus("model activation failed %v", err) + w.setErrorStatus("model activation failed, %v", err) return coremigration.ABORT, nil } return coremigration.SUCCESS, nil @@ -351,17 +408,14 @@ } func (w *Worker) doSUCCESS(status coremigration.MigrationStatus) (coremigration.Phase, error) { - err := w.waitForMinions(status, waitForAll, "successful") - switch errors.Cause(err) { - case nil, errMinionReportFailed, errMinionReportTimeout: - // There's no turning back from SUCCESS - any problems should - // have been picked up in VALIDATION. After the minion wait in - // the SUCCESS phase, the migration can only proceed to - // LOGTRANSFER. - return coremigration.LOGTRANSFER, nil - default: - return coremigration.SUCCESS, errors.Trace(err) + _, err := w.waitForMinions(status, waitForAll, "successful") + if err != nil { + return coremigration.UNKNOWN, errors.Trace(err) } + // There's no turning back from SUCCESS - any problems should have + // been picked up in VALIDATION. After the minion wait in the + // SUCCESS phase, the migration can only proceed to LOGTRANSFER. + return coremigration.LOGTRANSFER, nil } func (w *Worker) doLOGTRANSFER() (coremigration.Phase, error) { @@ -371,7 +425,7 @@ } func (w *Worker) doREAP() (coremigration.Phase, error) { - w.setInfoStatus("successful: removing model from source controller") + w.setInfoStatus("successful, removing model from source controller") err := w.config.Facade.Reap() if err != nil { return coremigration.REAPFAILED, errors.Trace(err) @@ -380,11 +434,11 @@ } func (w *Worker) doABORT(targetInfo coremigration.TargetInfo, modelUUID string) (coremigration.Phase, error) { - w.setInfoStatus("aborted: removing model from target controller") + w.setInfoStatus("aborted, removing model from target controller") if err := w.removeImportedModel(targetInfo, modelUUID); err != nil { // This isn't fatal. Removing the imported model is a best // efforts attempt so just report the error and proceed. - w.setErrorStatus("failed to remove model from target controller: %v", err) + w.setWarningStatus("failed to remove model from target controller, %v", err) } return coremigration.ABORTDONE, nil } @@ -420,7 +474,7 @@ case <-watcher.Changes(): } - status, err := w.config.Facade.GetMigrationStatus() + status, err := w.config.Facade.MigrationStatus() switch { case params.IsCodeNotFound(err): // There's never been a migration. @@ -443,28 +497,63 @@ } } +func (w *Worker) waitForMigrationEnd() error { + w.logger.Infof("migration is externally managed. waiting for completion") + watcher, err := w.config.Facade.Watch() + if err != nil { + return errors.Annotate(err, "watching for migration") + } + if err := w.catacomb.Add(watcher); err != nil { + return errors.Trace(err) + } + defer watcher.Kill() + + for { + select { + case <-w.catacomb.Dying(): + return w.catacomb.ErrDying() + case <-watcher.Changes(): + } + + status, err := w.config.Facade.MigrationStatus() + if err != nil { + return errors.Annotate(err, "retrieving migration status") + } + w.logger.Infof("migration phase is now %v", status.Phase) + if status.Phase.IsTerminal() { + if modelHasMigrated(status.Phase) { + w.logger.Infof("migration is complete") + return ErrMigrated + } + w.logger.Infof("migration has aborted") + return ErrInactive + } + } +} + // Possible values for waitForMinion's waitPolicy argument. const failFast = false // Stop waiting at first minion failure report const waitForAll = true // Wait for all minion reports to arrive (or timeout) -var errMinionReportTimeout = errors.New("timed out waiting for all agents to report") -var errMinionReportFailed = errors.New("one or more agents failed a migration phase") - -func (w *Worker) waitForMinions(status coremigration.MigrationStatus, waitPolicy bool, infoPrefix string) error { +func (w *Worker) waitForMinions( + status coremigration.MigrationStatus, + waitPolicy bool, + infoPrefix string, +) (success bool, err error) { clk := w.config.Clock maxWait := maxMinionWait - clk.Now().Sub(status.PhaseChangedTime) timeout := clk.After(maxWait) - w.setInfoStatus("%s: waiting for agents to report back", infoPrefix) + w.setInfoStatus("%s, waiting for agents to report back", infoPrefix) w.logger.Infof("waiting for agents to report back for migration phase %s (will wait up to %s)", status.Phase, truncDuration(maxWait)) watch, err := w.config.Facade.WatchMinionReports() if err != nil { - return errors.Trace(err) + return false, errors.Trace(err) } if err := w.catacomb.Add(watch); err != nil { - return errors.Trace(err) + return false, errors.Trace(err) } logProgress := clk.After(minionWaitLogInterval) @@ -473,38 +562,44 @@ for { select { case <-w.catacomb.Dying(): - return w.catacomb.ErrDying() + return false, w.catacomb.ErrDying() case <-timeout: - w.logger.Errorf(formatMinionTimeout(reports, status)) - return errors.Trace(errMinionReportTimeout) + w.logger.Errorf(formatMinionTimeout(reports, status, infoPrefix)) + w.setErrorStatus("%s, timed out waiting for agents to report", infoPrefix) + return false, nil case <-watch.Changes(): var err error - reports, err = w.config.Facade.GetMinionReports() + reports, err = w.config.Facade.MinionReports() if err != nil { - return errors.Trace(err) + return false, errors.Trace(err) } if err := validateMinionReports(reports, status); err != nil { - return errors.Trace(err) + return false, errors.Trace(err) } failures := len(reports.FailedMachines) + len(reports.FailedUnits) if failures > 0 { - w.logger.Errorf(formatMinionFailure(reports)) + w.logger.Errorf(formatMinionFailure(reports, infoPrefix)) + w.setErrorStatus("%s, some agents reported failure", infoPrefix) if waitPolicy == failFast { - return errors.Trace(errMinionReportFailed) + return false, nil } } if reports.UnknownCount == 0 { - w.logger.Infof(formatMinionWaitDone(reports)) + msg := formatMinionWaitDone(reports, infoPrefix) if failures > 0 { - return errors.Trace(errMinionReportFailed) + w.logger.Errorf(msg) + w.setErrorStatus("%s, some agents reported failure", infoPrefix) + return false, nil } - return nil + w.logger.Infof(msg) + w.setInfoStatus("%s, all agents reported success", infoPrefix) + return true, nil } case <-logProgress: - w.setInfoStatus("%s: ", infoPrefix, formatMinionWaitUpdate(reports)) + w.setInfoStatus("%s, %s", infoPrefix, formatMinionWaitUpdate(reports)) logProgress = clk.After(minionWaitLogInterval) } } @@ -526,30 +621,40 @@ return nil } -func formatMinionTimeout(reports coremigration.MinionReports, status coremigration.MigrationStatus) string { +func formatMinionTimeout( + reports coremigration.MinionReports, + status coremigration.MigrationStatus, + infoPrefix string, +) string { if reports.IsZero() { return fmt.Sprintf("no agents reported in time") } - msg := "%s agents failed to report in time for migration phase %s including:" + var fails []string if len(reports.SomeUnknownMachines) > 0 { - msg += fmt.Sprintf("machines: %s;", strings.Join(reports.SomeUnknownMachines, ", ")) + fails = append(fails, fmt.Sprintf("machines: %s", strings.Join(reports.SomeUnknownMachines, ","))) } if len(reports.SomeUnknownUnits) > 0 { - msg += fmt.Sprintf(" units: %s", strings.Join(reports.SomeUnknownUnits, ", ")) + fails = append(fails, fmt.Sprintf("units: %s", strings.Join(reports.SomeUnknownUnits, ","))) } - return msg + return fmt.Sprintf("%d agents failed to report in time for %q phase (including %s)", + reports.UnknownCount, infoPrefix, strings.Join(fails, "; ")) } -func formatMinionFailure(reports coremigration.MinionReports) string { - msg := fmt.Sprintf("some agents failed %s: ", reports.Phase) +func formatMinionFailure(reports coremigration.MinionReports, infoPrefix string) string { + var fails []string if len(reports.FailedMachines) > 0 { - msg += fmt.Sprintf("failed machines: %s; ", strings.Join(reports.FailedMachines, ", ")) + fails = append(fails, fmt.Sprintf("machines: %s", strings.Join(reports.FailedMachines, ","))) } if len(reports.FailedUnits) > 0 { - msg += fmt.Sprintf("failed units: %s", strings.Join(reports.FailedUnits, ", ")) + fails = append(fails, fmt.Sprintf("units: %s", strings.Join(reports.FailedUnits, ","))) } - return msg + return fmt.Sprintf("agents failed phase %q (%s)", infoPrefix, strings.Join(fails, "; ")) +} + +func formatMinionWaitDone(reports coremigration.MinionReports, infoPrefix string) string { + return fmt.Sprintf("completed waiting for agents to report for %q, %d succeeded, %d failed", + infoPrefix, reports.SuccessCount, len(reports.FailedMachines)+len(reports.FailedUnits)) } func formatMinionWaitUpdate(reports coremigration.MinionReports) string { @@ -566,27 +671,20 @@ return msg } -func formatMinionWaitDone(reports coremigration.MinionReports) string { - return fmt.Sprintf("completed waiting for agents to report for %s: %d succeeded, %d failed", - reports.Phase, reports.SuccessCount, len(reports.FailedMachines)+len(reports.FailedUnits)) -} - func (w *Worker) openAPIConn(targetInfo coremigration.TargetInfo) (api.Connection, error) { return w.openAPIConnForModel(targetInfo, "") } func (w *Worker) openAPIConnForModel(targetInfo coremigration.TargetInfo, modelUUID string) (api.Connection, error) { apiInfo := &api.Info{ - Addrs: targetInfo.Addrs, - CACert: targetInfo.CACert, - Tag: targetInfo.AuthTag, - Password: targetInfo.Password, - ModelTag: names.NewModelTag(modelUUID), - } - // Use zero DialOpts (no retries) because the worker must stay - // responsive to Kill requests. We don't want it to be blocked by - // a long set of retry attempts. - return w.config.APIOpen(apiInfo, api.DialOpts{}) + Addrs: targetInfo.Addrs, + CACert: targetInfo.CACert, + Tag: targetInfo.AuthTag, + Password: targetInfo.Password, + ModelTag: names.NewModelTag(modelUUID), + Macaroons: targetInfo.Macaroons, + } + return w.config.APIOpen(apiInfo, migration.ControllerDialOpts()) } func modelHasMigrated(phase coremigration.Phase) bool { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationmaster/worker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/migrationmaster/worker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationmaster/worker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/migrationmaster/worker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,6 +14,7 @@ "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/juju/names.v2" + "gopkg.in/macaroon.v1" "github.com/juju/juju/api" "github.com/juju/juju/apiserver/params" @@ -29,23 +30,27 @@ type Suite struct { coretesting.BaseSuite - clock *coretesting.Clock + clock *jujutesting.Clock stub *jujutesting.Stub connection *stubConnection connectionErr error - masterFacade *stubMasterFacade + facade *stubMasterFacade config migrationmaster.Config } var _ = gc.Suite(&Suite{}) var ( - fakeModelBytes = []byte("model") - modelTag = names.NewModelTag("model-uuid") - modelTagString = modelTag.String() + fakeModelBytes = []byte("model") + targetControllerTag = names.NewControllerTag("controller-uuid") + modelUUID = "model-uuid" + modelTag = names.NewModelTag(modelUUID) + modelName = "model-name" + ownerTag = names.NewUserTag("owner") + modelVersion = version.MustParse("1.2.4") // Define stub calls that commonly appear in tests here to allow reuse. - apiOpenCallController = jujutesting.StubCall{ + apiOpenControllerCall = jujutesting.StubCall{ "apiOpen", []interface{}{ &api.Info{ @@ -54,10 +59,10 @@ Tag: names.NewUserTag("admin"), Password: "secret", }, - api.DialOpts{}, + migration.ControllerDialOpts(), }, } - apiOpenCallModel = jujutesting.StubCall{ + apiOpenModelCall = jujutesting.StubCall{ "apiOpen", []interface{}{ &api.Info{ @@ -67,45 +72,72 @@ Password: "secret", ModelTag: modelTag, }, - api.DialOpts{}, + migration.ControllerDialOpts(), }, } importCall = jujutesting.StubCall{ - "APICall:MigrationTarget.Import", + "MigrationTarget.Import", []interface{}{ params.SerializedModel{Bytes: fakeModelBytes}, }, } activateCall = jujutesting.StubCall{ - "APICall:MigrationTarget.Activate", + "MigrationTarget.Activate", []interface{}{ - params.ModelArgs{ModelTag: modelTagString}, + params.ModelArgs{ModelTag: modelTag.String()}, }, } - connCloseCall = jujutesting.StubCall{"Connection.Close", nil} - abortCall = jujutesting.StubCall{ - "APICall:MigrationTarget.Abort", + apiCloseCall = jujutesting.StubCall{"Connection.Close", nil} + abortCall = jujutesting.StubCall{ + "MigrationTarget.Abort", []interface{}{ - params.ModelArgs{ModelTag: modelTagString}, + params.ModelArgs{ModelTag: modelTag.String()}, }, } + watchStatusLockdownCalls = []jujutesting.StubCall{ + {"facade.Watch", nil}, + {"facade.MigrationStatus", nil}, + {"guard.Lockdown", nil}, + } + prechecksCalls = []jujutesting.StubCall{ + {"facade.Prechecks", nil}, + {"facade.ModelInfo", nil}, + apiOpenControllerCall, + {"MigrationTarget.Prechecks", []interface{}{params.MigrationModelInfo{ + UUID: modelUUID, + Name: modelName, + OwnerTag: ownerTag.String(), + AgentVersion: modelVersion, + }}}, + apiCloseCall, + } + abortCalls = []jujutesting.StubCall{ + {"facade.SetPhase", []interface{}{coremigration.ABORT}}, + apiOpenControllerCall, + abortCall, + apiCloseCall, + {"facade.SetPhase", []interface{}{coremigration.ABORTDONE}}, + } ) func (s *Suite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) - s.clock = coretesting.NewClock(time.Now()) + s.clock = jujutesting.NewClock(time.Now()) s.stub = new(jujutesting.Stub) - s.connection = &stubConnection{stub: s.stub} + s.connection = &stubConnection{ + stub: s.stub, + controllerTag: targetControllerTag, + } s.connectionErr = nil - s.masterFacade = newStubMasterFacade(s.stub, s.clock.Now()) + s.facade = newStubMasterFacade(s.stub, s.clock.Now()) // The default worker Config used by most of the tests. Tests may // tweak parts of this as needed. s.config = migrationmaster.Config{ ModelUUID: utils.MustNewUUID().String(), - Facade: s.masterFacade, + Facade: s.facade, Guard: newStubGuard(s.stub), APIOpen: s.apiOpen, UploadBinaries: nullUploadBinaries, @@ -123,373 +155,407 @@ return s.connection, nil } -func (s *Suite) triggerMigration() { - select { - case s.masterFacade.watcherChanges <- struct{}{}: - default: - panic("migration watcher channel unexpectedly closed") - } - -} - -func (s *Suite) triggerMinionReports() { - select { - case s.masterFacade.minionReportsChanges <- struct{}{}: - default: - panic("minion reports watcher channel unexpectedly closed") +func (s *Suite) makeStatus(phase coremigration.Phase) coremigration.MigrationStatus { + return coremigration.MigrationStatus{ + MigrationId: "model-uuid:2", + ModelUUID: "model-uuid", + Phase: phase, + PhaseChangedTime: s.clock.Now(), + TargetInfo: coremigration.TargetInfo{ + ControllerTag: targetControllerTag, + Addrs: []string{"1.2.3.4:5"}, + CACert: "cert", + AuthTag: names.NewUserTag("admin"), + Password: "secret", + }, } } func (s *Suite) TestSuccessfulMigration(c *gc.C) { + s.facade.queueStatus(s.makeStatus(coremigration.QUIESCE)) + s.facade.queueMinionReports(makeMinionReports(coremigration.QUIESCE)) + s.facade.queueMinionReports(makeMinionReports(coremigration.VALIDATION)) + s.facade.queueMinionReports(makeMinionReports(coremigration.SUCCESS)) s.config.UploadBinaries = makeStubUploadBinaries(s.stub) - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.triggerMigration() - s.triggerMinionReports() - err = workertest.CheckKilled(c, worker) - c.Assert(errors.Cause(err), gc.Equals, migrationmaster.ErrMigrated) + s.checkWorkerReturns(c, migrationmaster.ErrMigrated) // Observe that the migration was seen, the model exported, an API // connection to the target controller was made, the model was // imported and then the migration completed. - s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, - {"guard.Lockdown", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.PRECHECK}}, - {"masterFacade.SetPhase", []interface{}{coremigration.IMPORT}}, - {"masterFacade.Export", nil}, - apiOpenCallController, - importCall, - apiOpenCallModel, - {"UploadBinaries", []interface{}{ - []string{"charm0", "charm1"}, - fakeCharmDownloader, - map[version.Binary]string{ - version.MustParseBinary("2.1.0-trusty-amd64"): "/tools/0", - }, - fakeToolsDownloader, - }}, - connCloseCall, // for target model - connCloseCall, // for target controller - {"masterFacade.SetPhase", []interface{}{coremigration.VALIDATION}}, - apiOpenCallController, - activateCall, - connCloseCall, - {"masterFacade.SetPhase", []interface{}{coremigration.SUCCESS}}, - {"masterFacade.WatchMinionReports", nil}, - {"masterFacade.GetMinionReports", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.LOGTRANSFER}}, - {"masterFacade.SetPhase", []interface{}{coremigration.REAP}}, - {"masterFacade.Reap", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.DONE}}, - }) + s.stub.CheckCalls(c, joinCalls( + // Wait for migration to start. + watchStatusLockdownCalls, + + // QUIESCE + prechecksCalls, + []jujutesting.StubCall{ + {"facade.WatchMinionReports", nil}, + {"facade.MinionReports", nil}, + {"facade.SetPhase", []interface{}{coremigration.IMPORT}}, + + //IMPORT + {"facade.Export", nil}, + apiOpenControllerCall, + importCall, + apiOpenModelCall, + {"UploadBinaries", []interface{}{ + []string{"charm0", "charm1"}, + fakeCharmDownloader, + map[version.Binary]string{ + version.MustParseBinary("2.1.0-trusty-amd64"): "/tools/0", + }, + fakeToolsDownloader, + }}, + apiCloseCall, // for target model + apiCloseCall, // for target controller + {"facade.SetPhase", []interface{}{coremigration.VALIDATION}}, + + // VALIDATION + {"facade.WatchMinionReports", nil}, + {"facade.MinionReports", nil}, + apiOpenControllerCall, + activateCall, + apiCloseCall, + {"facade.SetPhase", []interface{}{coremigration.SUCCESS}}, + + // SUCCESS + {"facade.WatchMinionReports", nil}, + {"facade.MinionReports", nil}, + {"facade.SetPhase", []interface{}{coremigration.LOGTRANSFER}}, + + // LOGTRANSFER + {"facade.SetPhase", []interface{}{coremigration.REAP}}, + + // REAP + {"facade.Reap", nil}, + {"facade.SetPhase", []interface{}{coremigration.DONE}}, + }), + ) } func (s *Suite) TestMigrationResume(c *gc.C) { // Test that a partially complete migration can be resumed. - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.masterFacade.status.Phase = coremigration.SUCCESS - s.triggerMigration() - s.triggerMinionReports() + s.facade.queueStatus(s.makeStatus(coremigration.SUCCESS)) + s.facade.queueMinionReports(makeMinionReports(coremigration.SUCCESS)) - err = workertest.CheckKilled(c, worker) - c.Assert(errors.Cause(err), gc.Equals, migrationmaster.ErrMigrated) - - s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, - {"guard.Lockdown", nil}, - {"masterFacade.WatchMinionReports", nil}, - {"masterFacade.GetMinionReports", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.LOGTRANSFER}}, - {"masterFacade.SetPhase", []interface{}{coremigration.REAP}}, - {"masterFacade.Reap", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.DONE}}, - }) + s.checkWorkerReturns(c, migrationmaster.ErrMigrated) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{ + {"facade.WatchMinionReports", nil}, + {"facade.MinionReports", nil}, + {"facade.SetPhase", []interface{}{coremigration.LOGTRANSFER}}, + {"facade.SetPhase", []interface{}{coremigration.REAP}}, + {"facade.Reap", nil}, + {"facade.SetPhase", []interface{}{coremigration.DONE}}, + }, + )) } func (s *Suite) TestPreviouslyAbortedMigration(c *gc.C) { - s.masterFacade.status.Phase = coremigration.ABORTDONE - s.triggerMigration() + s.facade.queueStatus(s.makeStatus(coremigration.ABORTDONE)) worker, err := migrationmaster.New(s.config) c.Assert(err, jc.ErrorIsNil) defer workertest.CleanKill(c, worker) s.waitForStubCalls(c, []string{ - "masterFacade.Watch", - "masterFacade.GetMigrationStatus", + "facade.Watch", + "facade.MigrationStatus", "guard.Unlock", }) } func (s *Suite) TestPreviouslyCompletedMigration(c *gc.C) { - s.masterFacade.status.Phase = coremigration.DONE - s.triggerMigration() - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - - err = workertest.CheckKilled(c, worker) - c.Assert(errors.Cause(err), gc.Equals, migrationmaster.ErrMigrated) - + s.facade.queueStatus(s.makeStatus(coremigration.DONE)) + s.checkWorkerReturns(c, migrationmaster.ErrMigrated) s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, + {"facade.Watch", nil}, + {"facade.MigrationStatus", nil}, }) } func (s *Suite) TestWatchFailure(c *gc.C) { - s.masterFacade.watchErr = errors.New("boom") - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - err = workertest.CheckKilled(c, worker) - c.Assert(err, gc.ErrorMatches, "watching for migration: boom") + s.facade.watchErr = errors.New("boom") + s.checkWorkerErr(c, "watching for migration: boom") } func (s *Suite) TestStatusError(c *gc.C) { - s.masterFacade.statusErr = errors.New("splat") - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.triggerMigration() - - err = workertest.CheckKilled(c, worker) + s.facade.queueStatus(s.makeStatus(coremigration.QUIESCE)) + s.facade.statusErr = errors.New("splat") + s.checkWorkerErr(c, "retrieving migration status: splat") s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, + {"facade.Watch", nil}, + {"facade.MigrationStatus", nil}, }) } func (s *Suite) TestStatusNotFound(c *gc.C) { - s.masterFacade.statusErr = ¶ms.Error{Code: params.CodeNotFound} - s.triggerMigration() + s.facade.statusErr = ¶ms.Error{Code: params.CodeNotFound} + s.facade.triggerWatcher() worker, err := migrationmaster.New(s.config) c.Assert(err, jc.ErrorIsNil) defer workertest.CleanKill(c, worker) s.waitForStubCalls(c, []string{ - "masterFacade.Watch", - "masterFacade.GetMigrationStatus", + "facade.Watch", + "facade.MigrationStatus", "guard.Unlock", }) } func (s *Suite) TestUnlockError(c *gc.C) { - s.masterFacade.statusErr = ¶ms.Error{Code: params.CodeNotFound} + s.facade.statusErr = ¶ms.Error{Code: params.CodeNotFound} + s.facade.triggerWatcher() guard := newStubGuard(s.stub) guard.unlockErr = errors.New("pow") s.config.Guard = guard - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.triggerMigration() - - err = workertest.CheckKilled(c, worker) - c.Check(err, gc.ErrorMatches, "pow") + s.checkWorkerErr(c, "pow") s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, + {"facade.Watch", nil}, + {"facade.MigrationStatus", nil}, {"guard.Unlock", nil}, }) } func (s *Suite) TestLockdownError(c *gc.C) { + s.facade.queueStatus(s.makeStatus(coremigration.QUIESCE)) guard := newStubGuard(s.stub) guard.lockdownErr = errors.New("biff") s.config.Guard = guard - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.triggerMigration() - err = workertest.CheckKilled(c, worker) - c.Check(err, gc.ErrorMatches, "biff") + s.checkWorkerErr(c, "biff") + s.stub.CheckCalls(c, watchStatusLockdownCalls) +} - s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, - {"guard.Lockdown", nil}, +func (s *Suite) TestQUIESCEMinionWaitWatchError(c *gc.C) { + s.checkMinionWaitWatchError(c, coremigration.QUIESCE) +} + +func (s *Suite) TestQUIESCEMinionWaitGetError(c *gc.C) { + s.checkMinionWaitGetError(c, coremigration.QUIESCE) +} + +func (s *Suite) TestQUIESCEFailedAgent(c *gc.C) { + s.facade.queueStatus(s.makeStatus(coremigration.QUIESCE)) + s.facade.queueMinionReports(coremigration.MinionReports{ + MigrationId: "model-uuid:2", + Phase: coremigration.QUIESCE, + FailedMachines: []string{"42"}, // a machine failed }) + + s.checkWorkerReturns(c, migrationmaster.ErrInactive) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + prechecksCalls, + []jujutesting.StubCall{ + {"facade.WatchMinionReports", nil}, + {"facade.MinionReports", nil}, + }, + abortCalls, + )) } -func (s *Suite) TestExportFailure(c *gc.C) { - s.masterFacade.exportErr = errors.New("boom") - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.triggerMigration() +func (s *Suite) TestQUIESCEWrongController(c *gc.C) { + s.facade.queueStatus(s.makeStatus(coremigration.QUIESCE)) + s.connection.controllerTag = names.NewControllerTag("another-controller") + + s.checkWorkerReturns(c, migrationmaster.ErrInactive) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{ + {"facade.Prechecks", nil}, + {"facade.ModelInfo", nil}, + apiOpenControllerCall, + apiCloseCall, + }, + abortCalls, + )) +} - err = workertest.CheckKilled(c, worker) - c.Assert(err, gc.Equals, migrationmaster.ErrInactive) +func (s *Suite) TestQUIESCESourceChecksFail(c *gc.C) { + s.facade.queueStatus(s.makeStatus(coremigration.QUIESCE)) + s.facade.prechecksErr = errors.New("boom") + + s.checkWorkerReturns(c, migrationmaster.ErrInactive) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{{"facade.Prechecks", nil}}, + abortCalls, + )) +} + +func (s *Suite) TestQUIESCEModelInfoFail(c *gc.C) { + s.facade.queueStatus(s.makeStatus(coremigration.QUIESCE)) + s.facade.modelInfoErr = errors.New("boom") + + s.checkWorkerReturns(c, migrationmaster.ErrInactive) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{ + {"facade.Prechecks", nil}, + {"facade.ModelInfo", nil}, + }, + abortCalls, + )) +} - s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, - {"guard.Lockdown", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.PRECHECK}}, - {"masterFacade.SetPhase", []interface{}{coremigration.IMPORT}}, - {"masterFacade.Export", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.ABORT}}, - apiOpenCallController, - abortCall, - connCloseCall, - {"masterFacade.SetPhase", []interface{}{coremigration.ABORTDONE}}, - }) +func (s *Suite) TestQUIESCETargetChecksFail(c *gc.C) { + s.facade.queueStatus(s.makeStatus(coremigration.QUIESCE)) + s.connection.prechecksErr = errors.New("boom") + + s.checkWorkerReturns(c, migrationmaster.ErrInactive) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + prechecksCalls, + abortCalls, + )) +} + +func (s *Suite) TestExportFailure(c *gc.C) { + s.facade.queueStatus(s.makeStatus(coremigration.IMPORT)) + s.facade.exportErr = errors.New("boom") + + s.checkWorkerReturns(c, migrationmaster.ErrInactive) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{ + {"facade.Export", nil}, + }, + abortCalls, + )) } func (s *Suite) TestAPIOpenFailure(c *gc.C) { - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) + s.facade.queueStatus(s.makeStatus(coremigration.IMPORT)) s.connectionErr = errors.New("boom") - s.triggerMigration() - - err = workertest.CheckKilled(c, worker) - c.Assert(err, gc.Equals, migrationmaster.ErrInactive) - s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, - {"guard.Lockdown", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.PRECHECK}}, - {"masterFacade.SetPhase", []interface{}{coremigration.IMPORT}}, - {"masterFacade.Export", nil}, - apiOpenCallController, - {"masterFacade.SetPhase", []interface{}{coremigration.ABORT}}, - apiOpenCallController, - {"masterFacade.SetPhase", []interface{}{coremigration.ABORTDONE}}, - }) + s.checkWorkerReturns(c, migrationmaster.ErrInactive) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{ + {"facade.Export", nil}, + apiOpenControllerCall, + {"facade.SetPhase", []interface{}{coremigration.ABORT}}, + apiOpenControllerCall, + {"facade.SetPhase", []interface{}{coremigration.ABORTDONE}}, + }, + )) } func (s *Suite) TestImportFailure(c *gc.C) { - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) + s.facade.queueStatus(s.makeStatus(coremigration.IMPORT)) s.connection.importErr = errors.New("boom") - s.triggerMigration() - err = workertest.CheckKilled(c, worker) - c.Assert(err, gc.Equals, migrationmaster.ErrInactive) + s.checkWorkerReturns(c, migrationmaster.ErrInactive) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{ + {"facade.Export", nil}, + apiOpenControllerCall, + importCall, + apiCloseCall, + }, + abortCalls, + )) +} - s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, - {"guard.Lockdown", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.PRECHECK}}, - {"masterFacade.SetPhase", []interface{}{coremigration.IMPORT}}, - {"masterFacade.Export", nil}, - apiOpenCallController, - importCall, - connCloseCall, - {"masterFacade.SetPhase", []interface{}{coremigration.ABORT}}, - apiOpenCallController, - abortCall, - connCloseCall, - {"masterFacade.SetPhase", []interface{}{coremigration.ABORTDONE}}, - }) +func (s *Suite) TestVALIDATIONMinionWaitWatchError(c *gc.C) { + s.checkMinionWaitWatchError(c, coremigration.VALIDATION) } -func (s *Suite) TestMinionWaitWatchError(c *gc.C) { - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.masterFacade.minionReportsWatchErr = errors.New("boom") - s.masterFacade.status.Phase = coremigration.SUCCESS - s.triggerMigration() +func (s *Suite) TestVALIDATIONMinionWaitGetError(c *gc.C) { + s.checkMinionWaitGetError(c, coremigration.VALIDATION) +} - err = workertest.CheckKilled(c, worker) - c.Assert(err, gc.ErrorMatches, "boom") +func (s *Suite) TestVALIDATIONFailedAgent(c *gc.C) { + s.facade.queueStatus(s.makeStatus(coremigration.VALIDATION)) + s.facade.queueMinionReports(coremigration.MinionReports{ + MigrationId: "model-uuid:2", + Phase: coremigration.VALIDATION, + FailedMachines: []string{"42"}, // a machine failed + }) + + s.checkWorkerReturns(c, migrationmaster.ErrInactive) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{ + {"facade.WatchMinionReports", nil}, + {"facade.MinionReports", nil}, + }, + abortCalls, + )) } -func (s *Suite) TestMinionWaitGetError(c *gc.C) { - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.masterFacade.minionReportsErr = errors.New("boom") - s.masterFacade.status.Phase = coremigration.SUCCESS - s.triggerMigration() - s.triggerMinionReports() +func (s *Suite) TestSUCCESSMinionWaitWatchError(c *gc.C) { + s.checkMinionWaitWatchError(c, coremigration.SUCCESS) +} - err = workertest.CheckKilled(c, worker) - c.Assert(err, gc.ErrorMatches, "boom") +func (s *Suite) TestSUCCESSMinionWaitGetError(c *gc.C) { + s.checkMinionWaitGetError(c, coremigration.SUCCESS) } -func (s *Suite) TestMinionWaitSUCCESSFailedMachine(c *gc.C) { +func (s *Suite) TestSUCCESSMinionWaitFailedMachine(c *gc.C) { // With the SUCCESS phase the master should wait for all reports, // continuing even if some minions report failure. - - s.masterFacade.minionReports.FailedMachines = []string{"42"} - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.masterFacade.status.Phase = coremigration.SUCCESS - s.triggerMigration() - s.triggerMinionReports() - - err = workertest.CheckKilled(c, worker) - c.Assert(err, gc.Equals, migrationmaster.ErrMigrated) - - s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, - {"guard.Lockdown", nil}, - {"masterFacade.WatchMinionReports", nil}, - {"masterFacade.GetMinionReports", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.LOGTRANSFER}}, - {"masterFacade.SetPhase", []interface{}{coremigration.REAP}}, - {"masterFacade.Reap", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.DONE}}, - }) + s.facade.queueStatus(s.makeStatus(coremigration.SUCCESS)) + s.facade.queueMinionReports(coremigration.MinionReports{ + MigrationId: "model-uuid:2", + Phase: coremigration.SUCCESS, + FailedMachines: []string{"42"}, + }) + + s.checkWorkerReturns(c, migrationmaster.ErrMigrated) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{ + {"facade.WatchMinionReports", nil}, + {"facade.MinionReports", nil}, + {"facade.SetPhase", []interface{}{coremigration.LOGTRANSFER}}, + {"facade.SetPhase", []interface{}{coremigration.REAP}}, + {"facade.Reap", nil}, + {"facade.SetPhase", []interface{}{coremigration.DONE}}, + }, + )) } -func (s *Suite) TestMinionWaitSUCCESSFailedUnit(c *gc.C) { - // See note for TestMinionWaitSUCCESSFailedMachine above. - - s.masterFacade.minionReports.FailedUnits = []string{"foo/2"} - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.masterFacade.status.Phase = coremigration.SUCCESS - s.triggerMigration() - s.triggerMinionReports() - - err = workertest.CheckKilled(c, worker) - c.Assert(err, gc.Equals, migrationmaster.ErrMigrated) - - s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, - {"guard.Lockdown", nil}, - {"masterFacade.WatchMinionReports", nil}, - {"masterFacade.GetMinionReports", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.LOGTRANSFER}}, - {"masterFacade.SetPhase", []interface{}{coremigration.REAP}}, - {"masterFacade.Reap", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.DONE}}, - }) +func (s *Suite) TestSUCCESSMinionWaitFailedUnit(c *gc.C) { + // See note for TestMinionWaitFailedMachine above. + s.facade.queueStatus(s.makeStatus(coremigration.SUCCESS)) + s.facade.queueMinionReports(coremigration.MinionReports{ + MigrationId: "model-uuid:2", + Phase: coremigration.SUCCESS, + FailedUnits: []string{"foo/2"}, + }) + + s.checkWorkerReturns(c, migrationmaster.ErrMigrated) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{ + {"facade.WatchMinionReports", nil}, + {"facade.MinionReports", nil}, + {"facade.SetPhase", []interface{}{coremigration.LOGTRANSFER}}, + {"facade.SetPhase", []interface{}{coremigration.REAP}}, + {"facade.Reap", nil}, + {"facade.SetPhase", []interface{}{coremigration.DONE}}, + }, + )) } -func (s *Suite) TestMinionWaitSUCCESSTimeout(c *gc.C) { +func (s *Suite) TestSUCCESSMinionWaitTimeout(c *gc.C) { // The SUCCESS phase is special in that even if some minions fail // to report the migration should continue. There's no turning // back from SUCCESS. + s.facade.queueStatus(s.makeStatus(coremigration.SUCCESS)) + worker, err := migrationmaster.New(s.config) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, worker) - s.masterFacade.status.Phase = coremigration.SUCCESS - s.triggerMigration() - select { case <-s.clock.Alarms(): case <-time.After(coretesting.LongWait): @@ -502,53 +568,141 @@ err = workertest.CheckKilled(c, worker) c.Assert(err, gc.Equals, migrationmaster.ErrMigrated) - s.stub.CheckCalls(c, []jujutesting.StubCall{ - {"masterFacade.Watch", nil}, - {"masterFacade.GetMigrationStatus", nil}, - {"guard.Lockdown", nil}, - {"masterFacade.WatchMinionReports", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.LOGTRANSFER}}, - {"masterFacade.SetPhase", []interface{}{coremigration.REAP}}, - {"masterFacade.Reap", nil}, - {"masterFacade.SetPhase", []interface{}{coremigration.DONE}}, - }) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{ + {"facade.WatchMinionReports", nil}, + {"facade.SetPhase", []interface{}{coremigration.LOGTRANSFER}}, + {"facade.SetPhase", []interface{}{coremigration.REAP}}, + {"facade.Reap", nil}, + {"facade.SetPhase", []interface{}{coremigration.DONE}}, + }, + )) } func (s *Suite) TestMinionWaitWrongPhase(c *gc.C) { - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.masterFacade.status.Phase = coremigration.SUCCESS - s.triggerMigration() + s.facade.queueStatus(s.makeStatus(coremigration.SUCCESS)) // Have the phase in the minion reports be different from the // migration status. This shouldn't happen but the migrationmaster // should handle it. - s.masterFacade.minionReports.Phase = coremigration.PRECHECK - s.triggerMinionReports() + s.facade.queueMinionReports(makeMinionReports(coremigration.IMPORT)) - err = workertest.CheckKilled(c, worker) - c.Assert(err, gc.ErrorMatches, `minion reports phase \(PRECHECK\) does not match migration phase \(SUCCESS\)`) + s.checkWorkerErr(c, + `minion reports phase \(IMPORT\) does not match migration phase \(SUCCESS\)`) } func (s *Suite) TestMinionWaitMigrationIdChanged(c *gc.C) { - worker, err := migrationmaster.New(s.config) - c.Assert(err, jc.ErrorIsNil) - defer workertest.DirtyKill(c, worker) - s.masterFacade.status.Phase = coremigration.SUCCESS - s.triggerMigration() + s.facade.queueStatus(s.makeStatus(coremigration.SUCCESS)) // Have the migration id in the minion reports be different from // the migration status. This shouldn't happen but the // migrationmaster should handle it. - s.masterFacade.minionReports.MigrationId = "blah" - s.triggerMinionReports() + s.facade.queueMinionReports(coremigration.MinionReports{ + MigrationId: "blah", + Phase: coremigration.SUCCESS, + }) - err = workertest.CheckKilled(c, worker) - c.Assert(err, gc.ErrorMatches, + s.checkWorkerErr(c, "unexpected migration id in minion reports, got blah, expected model-uuid:2") } +func (s *Suite) TestAPIConnectWithMacaroon(c *gc.C) { + // Use ABORT because it involves an API connection to the target + // and is convenient. + status := s.makeStatus(coremigration.ABORT) + + // Set up macaroon based auth to the target. + mac, err := macaroon.New([]byte("secret"), "id", "location") + c.Assert(err, jc.ErrorIsNil) + macs := []macaroon.Slice{{mac}} + status.TargetInfo.Password = "" + status.TargetInfo.Macaroons = macs + + s.facade.queueStatus(status) + + s.checkWorkerReturns(c, migrationmaster.ErrInactive) + s.stub.CheckCalls(c, joinCalls( + watchStatusLockdownCalls, + []jujutesting.StubCall{ + { + "apiOpen", + []interface{}{ + &api.Info{ + Addrs: []string{"1.2.3.4:5"}, + CACert: "cert", + Tag: names.NewUserTag("admin"), + Macaroons: macs, // <--- + }, + migration.ControllerDialOpts(), + }, + }, + abortCall, + apiCloseCall, + {"facade.SetPhase", []interface{}{coremigration.ABORTDONE}}, + }, + )) +} + +func (s *Suite) TestExternalControl(c *gc.C) { + status := s.makeStatus(coremigration.QUIESCE) + status.ExternalControl = true + s.facade.queueStatus(status) + + status.Phase = coremigration.DONE + s.facade.queueStatus(status) + + s.checkWorkerReturns(c, migrationmaster.ErrMigrated) + s.stub.CheckCalls(c, joinCalls( + // Wait for migration to start. + watchStatusLockdownCalls, + + // Wait for migration to end. + []jujutesting.StubCall{ + {"facade.Watch", nil}, + {"facade.MigrationStatus", nil}, + }, + )) +} + +func (s *Suite) TestExternalControlABORT(c *gc.C) { + status := s.makeStatus(coremigration.QUIESCE) + status.ExternalControl = true + s.facade.queueStatus(status) + + status.Phase = coremigration.ABORTDONE + s.facade.queueStatus(status) + + s.checkWorkerReturns(c, migrationmaster.ErrInactive) + s.stub.CheckCalls(c, joinCalls( + // Wait for migration to start. + watchStatusLockdownCalls, + + // Wait for migration to end. + []jujutesting.StubCall{ + {"facade.Watch", nil}, + {"facade.MigrationStatus", nil}, + }, + )) +} + +func (s *Suite) checkWorkerReturns(c *gc.C, expected error) { + err := s.runWorker(c) + c.Check(errors.Cause(err), gc.Equals, expected) +} + +func (s *Suite) checkWorkerErr(c *gc.C, expected string) { + err := s.runWorker(c) + c.Check(err, gc.ErrorMatches, expected) +} + +func (s *Suite) runWorker(c *gc.C) error { + w, err := migrationmaster.New(s.config) + c.Assert(err, jc.ErrorIsNil) + defer workertest.DirtyKill(c, w) + return workertest.CheckKilled(c, w) +} + func (s *Suite) waitForStubCalls(c *gc.C, expectedCallNames []string) { var callNames []string for a := coretesting.LongAttempt.Start(); a.Next(); { @@ -557,7 +711,24 @@ return } } - c.Fatalf("failed to see expected calls. saw: %v", callNames) + c.Fatalf("failed to see expected calls\nobtained: %v\nexpected: %v", + callNames, expectedCallNames) +} + +func (s *Suite) checkMinionWaitWatchError(c *gc.C, phase coremigration.Phase) { + s.facade.minionReportsWatchErr = errors.New("boom") + s.facade.queueStatus(s.makeStatus(phase)) + + s.checkWorkerErr(c, "boom") +} + +func (s *Suite) checkMinionWaitGetError(c *gc.C, phase coremigration.Phase) { + s.facade.queueStatus(s.makeStatus(phase)) + + s.facade.minionReportsErr = errors.New("boom") + s.facade.triggerMinionReports() + + s.checkWorkerErr(c, "boom") } func stubCallNames(stub *jujutesting.Stub) []string { @@ -592,31 +763,10 @@ return &stubMasterFacade{ stub: stub, watcherChanges: make(chan struct{}, 999), - status: coremigration.MigrationStatus{ - MigrationId: "model-uuid:2", - ModelUUID: "model-uuid", - Phase: coremigration.QUIESCE, - PhaseChangedTime: now, - TargetInfo: coremigration.TargetInfo{ - ControllerTag: names.NewModelTag("controller-uuid"), - Addrs: []string{"1.2.3.4:5"}, - CACert: "cert", - AuthTag: names.NewUserTag("admin"), - Password: "secret", - }, - }, // Give minionReportsChanges a larger-than-required buffer to // support waits at a number of phases. minionReportsChanges: make(chan struct{}, 999), - - // Default to happy state. Test may wish to tweak. - minionReports: coremigration.MinionReports{ - MigrationId: "model-uuid:2", - Phase: coremigration.SUCCESS, - SuccessCount: 5, - UnknownCount: 0, - }, } } @@ -627,53 +777,110 @@ watcherChanges chan struct{} watchErr error - status coremigration.MigrationStatus + status []coremigration.MigrationStatus statusErr error - exportErr error + prechecksErr error + modelInfoErr error + exportErr error minionReportsChanges chan struct{} minionReportsWatchErr error - minionReports coremigration.MinionReports + minionReports []coremigration.MinionReports minionReportsErr error } -func (c *stubMasterFacade) Watch() (watcher.NotifyWatcher, error) { - c.stub.AddCall("masterFacade.Watch") - if c.watchErr != nil { - return nil, c.watchErr +func (f *stubMasterFacade) triggerWatcher() { + select { + case f.watcherChanges <- struct{}{}: + default: + panic("migration watcher channel unexpectedly closed") + } +} + +func (f *stubMasterFacade) queueStatus(status coremigration.MigrationStatus) { + f.status = append(f.status, status) + f.triggerWatcher() +} + +func (f *stubMasterFacade) triggerMinionReports() { + select { + case f.minionReportsChanges <- struct{}{}: + default: + panic("minion reports watcher channel unexpectedly closed") } - return newMockWatcher(c.watcherChanges), nil } -func (c *stubMasterFacade) GetMigrationStatus() (coremigration.MigrationStatus, error) { - c.stub.AddCall("masterFacade.GetMigrationStatus") - if c.statusErr != nil { - return coremigration.MigrationStatus{}, c.statusErr +func (f *stubMasterFacade) queueMinionReports(r coremigration.MinionReports) { + f.minionReports = append(f.minionReports, r) + f.triggerMinionReports() +} + +func (f *stubMasterFacade) Watch() (watcher.NotifyWatcher, error) { + f.stub.AddCall("facade.Watch") + if f.watchErr != nil { + return nil, f.watchErr } - return c.status, nil + return newMockWatcher(f.watcherChanges), nil } -func (c *stubMasterFacade) WatchMinionReports() (watcher.NotifyWatcher, error) { - c.stub.AddCall("masterFacade.WatchMinionReports") - if c.minionReportsWatchErr != nil { - return nil, c.minionReportsWatchErr +func (f *stubMasterFacade) MigrationStatus() (coremigration.MigrationStatus, error) { + f.stub.AddCall("facade.MigrationStatus") + if f.statusErr != nil { + return coremigration.MigrationStatus{}, f.statusErr } - return newMockWatcher(c.minionReportsChanges), nil + if len(f.status) == 0 { + panic("no status queued to report") + } + out := f.status[0] + f.status = f.status[1:] + return out, nil } -func (c *stubMasterFacade) GetMinionReports() (coremigration.MinionReports, error) { - c.stub.AddCall("masterFacade.GetMinionReports") - if c.minionReportsErr != nil { - return coremigration.MinionReports{}, c.minionReportsErr +func (f *stubMasterFacade) WatchMinionReports() (watcher.NotifyWatcher, error) { + f.stub.AddCall("facade.WatchMinionReports") + if f.minionReportsWatchErr != nil { + return nil, f.minionReportsWatchErr } - return c.minionReports, nil + return newMockWatcher(f.minionReportsChanges), nil } -func (c *stubMasterFacade) Export() (coremigration.SerializedModel, error) { - c.stub.AddCall("masterFacade.Export") - if c.exportErr != nil { - return coremigration.SerializedModel{}, c.exportErr +func (f *stubMasterFacade) MinionReports() (coremigration.MinionReports, error) { + f.stub.AddCall("facade.MinionReports") + if f.minionReportsErr != nil { + return coremigration.MinionReports{}, f.minionReportsErr + } + if len(f.minionReports) == 0 { + return coremigration.MinionReports{}, errors.NotFoundf("reports") + + } + r := f.minionReports[0] + f.minionReports = f.minionReports[1:] + return r, nil +} + +func (f *stubMasterFacade) Prechecks() error { + f.stub.AddCall("facade.Prechecks") + return f.prechecksErr +} + +func (f *stubMasterFacade) ModelInfo() (coremigration.ModelInfo, error) { + f.stub.AddCall("facade.ModelInfo") + if f.modelInfoErr != nil { + return coremigration.ModelInfo{}, f.modelInfoErr + } + return coremigration.ModelInfo{ + UUID: modelUUID, + Name: modelName, + Owner: ownerTag, + AgentVersion: modelVersion, + }, nil +} + +func (f *stubMasterFacade) Export() (coremigration.SerializedModel, error) { + f.stub.AddCall("facade.Export") + if f.exportErr != nil { + return coremigration.SerializedModel{}, f.exportErr } return coremigration.SerializedModel{ Bytes: fakeModelBytes, @@ -684,17 +891,17 @@ }, nil } -func (c *stubMasterFacade) SetPhase(phase coremigration.Phase) error { - c.stub.AddCall("masterFacade.SetPhase", phase) +func (f *stubMasterFacade) SetPhase(phase coremigration.Phase) error { + f.stub.AddCall("facade.SetPhase", phase) return nil } -func (c *stubMasterFacade) SetStatusMessage(message string) error { +func (f *stubMasterFacade) SetStatusMessage(message string) error { return nil } -func (c *stubMasterFacade) Reap() error { - c.stub.AddCall("masterFacade.Reap") +func (f *stubMasterFacade) Reap() error { + f.stub.AddCall("facade.Reap") return nil } @@ -716,8 +923,10 @@ type stubConnection struct { api.Connection - stub *jujutesting.Stub - importErr error + stub *jujutesting.Stub + prechecksErr error + importErr error + controllerTag names.ControllerTag } func (c *stubConnection) BestFacadeVersion(string) int { @@ -725,10 +934,12 @@ } func (c *stubConnection) APICall(objType string, version int, id, request string, params, response interface{}) error { - c.stub.AddCall("APICall:"+objType+"."+request, params) + c.stub.AddCall(objType+"."+request, params) if objType == "MigrationTarget" { switch request { + case "Prechecks": + return c.prechecksErr case "Import": return c.importErr case "Activate": @@ -749,6 +960,10 @@ return nil } +func (c *stubConnection) ControllerTag() names.ControllerTag { + return c.controllerTag +} + func makeStubUploadBinaries(stub *jujutesting.Stub) func(migration.UploadBinariesConfig) error { return func(config migration.UploadBinariesConfig) error { stub.AddCall( @@ -771,3 +986,19 @@ var fakeCharmDownloader = struct{ migration.CharmDownloader }{} var fakeToolsDownloader = struct{ migration.ToolsDownloader }{} + +func joinCalls(allCalls ...[]jujutesting.StubCall) (out []jujutesting.StubCall) { + for _, calls := range allCalls { + out = append(out, calls...) + } + return +} + +func makeMinionReports(p coremigration.Phase) coremigration.MinionReports { + return coremigration.MinionReports{ + MigrationId: "model-uuid:2", + Phase: p, + SuccessCount: 5, + UnknownCount: 0, + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationminion/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/migrationminion/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationminion/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/migrationminion/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "github.com/juju/errors" "github.com/juju/juju/agent" + "github.com/juju/juju/api" "github.com/juju/juju/api/base" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" @@ -15,9 +16,11 @@ // ManifoldConfig defines the names of the manifolds on which a // Worker manifold will depend. type ManifoldConfig struct { - AgentName string - APICallerName string - FortressName string + AgentName string + APICallerName string + FortressName string + APIOpen func(*api.Info, api.DialOpts) (api.Connection, error) + ValidateMigration func(base.APICaller) error NewFacade func(base.APICaller) (Facade, error) NewWorker func(Config) (worker.Worker, error) @@ -34,6 +37,12 @@ if config.FortressName == "" { return errors.NotValidf("empty FortressName") } + if config.APIOpen == nil { + return errors.NotValidf("nil APIOpen") + } + if config.ValidateMigration == nil { + return errors.NotValidf("nil ValidateMigration") + } if config.NewFacade == nil { return errors.NotValidf("nil NewFacade") } @@ -66,9 +75,11 @@ return nil, errors.Trace(err) } worker, err := config.NewWorker(Config{ - Agent: agent, - Facade: facade, - Guard: guard, + Agent: agent, + Facade: facade, + Guard: guard, + APIOpen: config.APIOpen, + ValidateMigration: config.ValidateMigration, }) if err != nil { return nil, errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationminion/validate_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/migrationminion/validate_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationminion/validate_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/migrationminion/validate_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,8 @@ import ( "github.com/juju/errors" "github.com/juju/juju/agent" + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" "github.com/juju/juju/worker/fortress" "github.com/juju/juju/worker/migrationminion" "github.com/juju/testing" @@ -30,23 +32,37 @@ checkNotValid(c, config, "nil Agent not valid") } +func (*ValidateSuite) TestMissingFacade(c *gc.C) { + config := validConfig() + config.Facade = nil + checkNotValid(c, config, "nil Facade not valid") +} + func (*ValidateSuite) TestMissingGuard(c *gc.C) { config := validConfig() config.Guard = nil checkNotValid(c, config, "nil Guard not valid") } -func (*ValidateSuite) TestMissingFacade(c *gc.C) { +func (*ValidateSuite) TestMissingAPIOpen(c *gc.C) { config := validConfig() - config.Facade = nil - checkNotValid(c, config, "nil Facade not valid") + config.APIOpen = nil + checkNotValid(c, config, "nil APIOpen not valid") +} + +func (*ValidateSuite) TestMissingValidateMigration(c *gc.C) { + config := validConfig() + config.ValidateMigration = nil + checkNotValid(c, config, "nil ValidateMigration not valid") } func validConfig() migrationminion.Config { return migrationminion.Config{ - Agent: struct{ agent.Agent }{}, - Guard: struct{ fortress.Guard }{}, - Facade: struct{ migrationminion.Facade }{}, + Agent: struct{ agent.Agent }{}, + Guard: struct{ fortress.Guard }{}, + Facade: struct{ migrationminion.Facade }{}, + APIOpen: func(*api.Info, api.DialOpts) (api.Connection, error) { return nil, nil }, + ValidateMigration: func(base.APICaller) error { return nil }, } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationminion/worker.go juju-core-2.0.0/src/github.com/juju/juju/worker/migrationminion/worker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationminion/worker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/migrationminion/worker.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,6 +8,8 @@ "github.com/juju/loggo" "github.com/juju/juju/agent" + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" "github.com/juju/juju/core/migration" "github.com/juju/juju/network" "github.com/juju/juju/watcher" @@ -26,9 +28,11 @@ // Config defines the operation of a Worker. type Config struct { - Agent agent.Agent - Facade Facade - Guard fortress.Guard + Agent agent.Agent + Facade Facade + Guard fortress.Guard + APIOpen func(*api.Info, api.DialOpts) (api.Connection, error) + ValidateMigration func(base.APICaller) error } // Validate returns an error if config cannot drive a Worker. @@ -42,6 +46,12 @@ if config.Guard == nil { return errors.NotValidf("nil Guard") } + if config.APIOpen == nil { + return errors.NotValidf("nil APIOpen") + } + if config.ValidateMigration == nil { + return errors.NotValidf("nil ValidateMigration") + } return nil } @@ -109,6 +119,8 @@ return w.config.Guard.Unlock() } + // Ensure that all workers related to migration fortress have + // stopped and aren't allowed to restart. err := w.config.Guard.Lockdown(w.catacomb.Dying()) if errors.Cause(err) == fortress.ErrAborted { return w.catacomb.ErrDying() @@ -117,16 +129,12 @@ } switch status.Phase { + case migration.QUIESCE: + err = w.doQUIESCE(status) + case migration.VALIDATION: + err = w.doVALIDATION(status) case migration.SUCCESS: - // Report first because the config update in doSUCCESS will - // cause the API connection to drop. The SUCCESS phase is the - // point of no return anyway. - if err := w.report(status, true); err != nil { - return errors.Trace(err) - } - if err = w.doSUCCESS(status); err != nil { - return errors.Trace(err) - } + err = w.doSUCCESS(status) default: // The minion doesn't need to do anything for other // migration phases. @@ -134,11 +142,60 @@ return errors.Trace(err) } +func (w *Worker) doQUIESCE(status watcher.MigrationStatus) error { + // Report that the minion is ready and that all workers that + // should be shut down have done so. + return w.report(status, true) +} + +func (w *Worker) doVALIDATION(status watcher.MigrationStatus) error { + err := w.validate(status) + if err != nil { + // Don't return this error just log it and report to the + // migrationmaster that things didn't work out. + logger.Errorf("validation failed: %v", err) + } + return w.report(status, err == nil) +} + +func (w *Worker) validate(status watcher.MigrationStatus) error { + agentConf := w.config.Agent.CurrentConfig() + apiInfo, ok := agentConf.APIInfo() + if !ok { + return errors.New("no API connection details") + } + apiInfo.Addrs = status.TargetAPIAddrs + apiInfo.CACert = status.TargetCACert + + // Use zero DialOpts (no retries) because the worker must stay + // responsive to Kill requests. We don't want it to be blocked by + // a long set of retry attempts. + conn, err := w.config.APIOpen(apiInfo, api.DialOpts{}) + if err != nil { + // Don't return this error just log it and report to the + // migrationmaster that things didn't work out. + return errors.Annotate(err, "failed to open API to target controller") + } + defer conn.Close() + + // Ask the agent to confirm that things look ok. + err = w.config.ValidateMigration(conn) + return errors.Trace(err) +} + func (w *Worker) doSUCCESS(status watcher.MigrationStatus) error { hps, err := apiAddrsToHostPorts(status.TargetAPIAddrs) if err != nil { return errors.Annotate(err, "converting API addresses") } + + // Report first because the config update that's about to happen + // will cause the API connection to drop. The SUCCESS phase is the + // point of no return anyway. + if err := w.report(status, true); err != nil { + return errors.Trace(err) + } + err = w.config.Agent.ChangeConfig(func(conf agent.ConfigSetter) error { conf.SetAPIHostPorts(hps) conf.SetCACert(status.TargetCACert) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationminion/worker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/migrationminion/worker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/migrationminion/worker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/migrationminion/worker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package migrationminion_test import ( + "reflect" "sync" "time" @@ -11,8 +12,11 @@ jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "gopkg.in/juju/names.v2" "github.com/juju/juju/agent" + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" "github.com/juju/juju/core/migration" "github.com/juju/juju/network" coretesting "github.com/juju/juju/testing" @@ -23,8 +27,17 @@ "github.com/juju/juju/worker/workertest" ) +var ( + modelTag = names.NewModelTag("model-uuid") + addrs = []string{"1.1.1.1:1111", "2.2.2.2:2222"} + agentTag = names.NewMachineTag("42") + agentPassword = "sekret" + caCert = "cert" +) + type Suite struct { coretesting.BaseSuite + config migrationminion.Config stub *jujutesting.Stub client *stubMinionClient guard *stubGuard @@ -39,14 +52,25 @@ s.client = newStubMinionClient(s.stub) s.guard = newStubGuard(s.stub) s.agent = newStubAgent() + s.config = migrationminion.Config{ + Facade: s.client, + Guard: s.guard, + Agent: s.agent, + APIOpen: s.apiOpen, + ValidateMigration: func(base.APICaller) error { + s.stub.AddCall("ValidateMigration") + return nil + }, + } +} + +func (s *Suite) apiOpen(info *api.Info, dialOpts api.DialOpts) (api.Connection, error) { + s.stub.AddCall("API open", info) + return &stubConnection{stub: s.stub}, nil } func (s *Suite) TestStartAndStop(c *gc.C) { - w, err := migrationminion.New(migrationminion.Config{ - Facade: s.client, - Guard: s.guard, - Agent: s.agent, - }) + w, err := migrationminion.New(s.config) c.Assert(err, jc.ErrorIsNil) workertest.CleanKill(c, w) s.stub.CheckCallNames(c, "Watch") @@ -54,11 +78,7 @@ func (s *Suite) TestWatchFailure(c *gc.C) { s.client.watchErr = errors.New("boom") - w, err := migrationminion.New(migrationminion.Config{ - Facade: s.client, - Guard: s.guard, - Agent: s.agent, - }) + w, err := migrationminion.New(s.config) c.Assert(err, jc.ErrorIsNil) err = workertest.CheckKilled(c, w) c.Check(err, gc.ErrorMatches, "setting up watcher: boom") @@ -66,11 +86,7 @@ func (s *Suite) TestClosedWatcherChannel(c *gc.C) { close(s.client.watcher.changes) - w, err := migrationminion.New(migrationminion.Config{ - Facade: s.client, - Guard: s.guard, - Agent: s.agent, - }) + w, err := migrationminion.New(s.config) c.Assert(err, jc.ErrorIsNil) err = workertest.CheckKilled(c, w) c.Check(err, gc.ErrorMatches, "watcher channel closed") @@ -81,11 +97,7 @@ Phase: migration.NONE, } s.guard.unlockErr = errors.New("squish") - w, err := migrationminion.New(migrationminion.Config{ - Facade: s.client, - Guard: s.guard, - Agent: s.agent, - }) + w, err := migrationminion.New(s.config) c.Assert(err, jc.ErrorIsNil) err = workertest.CheckKilled(c, w) @@ -98,11 +110,7 @@ Phase: migration.QUIESCE, } s.guard.lockdownErr = errors.New("squash") - w, err := migrationminion.New(migrationminion.Config{ - Facade: s.client, - Guard: s.guard, - Agent: s.agent, - }) + w, err := migrationminion.New(s.config) c.Assert(err, jc.ErrorIsNil) err = workertest.CheckKilled(c, w) @@ -130,30 +138,113 @@ c.Logf("checking %s", phase) s.stub.ResetCalls() s.client.watcher.changes <- watcher.MigrationStatus{Phase: phase} - w, err := migrationminion.New(migrationminion.Config{ - Facade: s.client, - Guard: s.guard, - Agent: s.agent, - }) + w, err := migrationminion.New(s.config) c.Assert(err, jc.ErrorIsNil) workertest.CheckAlive(c, w) workertest.CleanKill(c, w) s.stub.CheckCallNames(c, "Watch", "Unlock") } +func (s *Suite) TestQUIESCE(c *gc.C) { + s.client.watcher.changes <- watcher.MigrationStatus{ + MigrationId: "id", + Phase: migration.QUIESCE, + } + w, err := migrationminion.New(s.config) + c.Assert(err, jc.ErrorIsNil) + defer workertest.CleanKill(c, w) + + s.waitForStubCalls(c, []string{ + "Watch", + "Lockdown", + "Report", + }) + s.stub.CheckCall(c, 2, "Report", "id", migration.QUIESCE, true) +} + +func (s *Suite) TestVALIDATION(c *gc.C) { + s.client.watcher.changes <- watcher.MigrationStatus{ + MigrationId: "id", + Phase: migration.VALIDATION, + TargetAPIAddrs: addrs, + TargetCACert: caCert, + } + w, err := migrationminion.New(s.config) + c.Assert(err, jc.ErrorIsNil) + defer workertest.CleanKill(c, w) + + s.waitForStubCalls(c, []string{ + "Watch", + "Lockdown", + "API open", + "ValidateMigration", + "API close", + "Report", + }) + s.stub.CheckCall(c, 2, "API open", &api.Info{ + ModelTag: modelTag, + Tag: agentTag, + Password: agentPassword, + Addrs: addrs, + CACert: caCert, + }) + s.stub.CheckCall(c, 5, "Report", "id", migration.VALIDATION, true) +} + +func (s *Suite) TestVALIDATIONCantConnect(c *gc.C) { + s.client.watcher.changes <- watcher.MigrationStatus{ + MigrationId: "id", + Phase: migration.VALIDATION, + } + s.config.APIOpen = func(*api.Info, api.DialOpts) (api.Connection, error) { + s.stub.AddCall("API open") + return nil, errors.New("boom") + } + w, err := migrationminion.New(s.config) + c.Assert(err, jc.ErrorIsNil) + defer workertest.CleanKill(c, w) + + s.waitForStubCalls(c, []string{ + "Watch", + "Lockdown", + "API open", + "Report", + }) + s.stub.CheckCall(c, 3, "Report", "id", migration.VALIDATION, false) +} + +func (s *Suite) TestVALIDATIONFail(c *gc.C) { + s.client.watcher.changes <- watcher.MigrationStatus{ + MigrationId: "id", + Phase: migration.VALIDATION, + } + s.config.ValidateMigration = func(base.APICaller) error { + s.stub.AddCall("ValidateMigration") + return errors.New("boom") + } + w, err := migrationminion.New(s.config) + c.Assert(err, jc.ErrorIsNil) + defer workertest.CleanKill(c, w) + + s.waitForStubCalls(c, []string{ + "Watch", + "Lockdown", + "API open", + "ValidateMigration", + "API close", + "Report", + }) + s.stub.CheckCall(c, 5, "Report", "id", migration.VALIDATION, false) +} + func (s *Suite) TestSUCCESS(c *gc.C) { - addrs := []string{"1.1.1.1:1", "9.9.9.9:9"} s.client.watcher.changes <- watcher.MigrationStatus{ MigrationId: "id", Phase: migration.SUCCESS, TargetAPIAddrs: addrs, - TargetCACert: "top secret", + TargetCACert: caCert, } - w, err := migrationminion.New(migrationminion.Config{ - Facade: s.client, - Guard: s.guard, - Agent: s.agent, - }) + w, err := migrationminion.New(s.config) c.Assert(err, jc.ErrorIsNil) select { @@ -163,11 +254,31 @@ } workertest.CleanKill(c, w) c.Assert(s.agent.conf.addrs, gc.DeepEquals, addrs) - c.Assert(s.agent.conf.caCert, gc.DeepEquals, "top secret") + c.Assert(s.agent.conf.caCert, gc.DeepEquals, caCert) s.stub.CheckCallNames(c, "Watch", "Lockdown", "Report") s.stub.CheckCall(c, 2, "Report", "id", migration.SUCCESS, true) } +func (s *Suite) waitForStubCalls(c *gc.C, expectedCallNames []string) { + var callNames []string + for a := coretesting.LongAttempt.Start(); a.Next(); { + callNames = stubCallNames(s.stub) + if reflect.DeepEqual(callNames, expectedCallNames) { + return + } + } + c.Fatalf("failed to see expected calls. saw: %v", callNames) +} + +// Make this a feature of stub +func stubCallNames(stub *jujutesting.Stub) []string { + var out []string + for _, call := range stub.Calls() { + out = append(out, call.FuncName) + } + return out +} + func newStubGuard(stub *jujutesting.Stub) *stubGuard { return &stubGuard{stub: stub} } @@ -239,7 +350,7 @@ type stubAgent struct { agent.Agent configChanged chan bool - conf stubConfig + conf stubAgentConfig } func (ma *stubAgent) CurrentConfig() agent.Config { @@ -251,7 +362,7 @@ return f(&ma.conf) } -type stubConfig struct { +type stubAgentConfig struct { agent.ConfigSetter mu sync.Mutex @@ -259,13 +370,13 @@ caCert string } -func (mc *stubConfig) setAddresses(addrs ...string) { +func (mc *stubAgentConfig) setAddresses(addrs ...string) { mc.mu.Lock() defer mc.mu.Unlock() mc.addrs = append([]string(nil), addrs...) } -func (mc *stubConfig) SetAPIHostPorts(servers [][]network.HostPort) { +func (mc *stubAgentConfig) SetAPIHostPorts(servers [][]network.HostPort) { mc.mu.Lock() defer mc.mu.Unlock() mc.addrs = nil @@ -276,8 +387,30 @@ } } -func (mc *stubConfig) SetCACert(cert string) { +func (mc *stubAgentConfig) SetCACert(cert string) { mc.mu.Lock() defer mc.mu.Unlock() mc.caCert = cert } + +func (mc *stubAgentConfig) APIInfo() (*api.Info, bool) { + mc.mu.Lock() + defer mc.mu.Unlock() + return &api.Info{ + Addrs: mc.addrs, + CACert: mc.caCert, + ModelTag: modelTag, + Tag: agentTag, + Password: agentPassword, + }, true +} + +type stubConnection struct { + api.Connection + stub *jujutesting.Stub +} + +func (c *stubConnection) Close() error { + c.stub.AddCall("API close") + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager.go juju-core-2.0.0/src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,19 +24,23 @@ // NewWorkerFunc should return a worker responsible for running // all a model's required workers; and for returning nil when // there's no more model to manage. -type NewWorkerFunc func(modelUUID string) (worker.Worker, error) +type NewWorkerFunc func(controllerUUID, modelUUID string) (worker.Worker, error) // Config holds the dependencies and configuration necessary to run // a model worker manager. type Config struct { - Backend Backend - NewWorker NewWorkerFunc - ErrorDelay time.Duration + ControllerUUID string + Backend Backend + NewWorker NewWorkerFunc + ErrorDelay time.Duration } // Validate returns an error if config cannot be expected to drive // a functional model worker manager. func (config Config) Validate() error { + if config.ControllerUUID == "" { + return errors.NotValidf("missing controller UUID") + } if config.Backend == nil { return errors.NotValidf("nil Backend") } @@ -103,8 +107,8 @@ if !ok { return errors.New("changes stopped") } - for _, uuid := range uuids { - if err := m.ensure(uuid); err != nil { + for _, modelUUID := range uuids { + if err := m.ensure(m.config.ControllerUUID, modelUUID); err != nil { return errors.Trace(err) } } @@ -112,20 +116,20 @@ } } -func (m *modelWorkerManager) ensure(uuid string) error { - starter := m.starter(uuid) - if err := m.runner.StartWorker(uuid, starter); err != nil { +func (m *modelWorkerManager) ensure(controllerUUID, modelUUID string) error { + starter := m.starter(controllerUUID, modelUUID) + if err := m.runner.StartWorker(modelUUID, starter); err != nil { return errors.Trace(err) } return nil } -func (m *modelWorkerManager) starter(uuid string) func() (worker.Worker, error) { +func (m *modelWorkerManager) starter(controllerUUID, modelUUID string) func() (worker.Worker, error) { return func() (worker.Worker, error) { - logger.Debugf("starting workers for %s", uuid) - worker, err := m.config.NewWorker(uuid) + logger.Debugf("starting workers for model %q", modelUUID) + worker, err := m.config.NewWorker(controllerUUID, modelUUID) if err != nil { - return nil, errors.Annotatef(err, "cannot manage model %q", uuid) + return nil, errors.Annotatef(err, "cannot manage model %q", modelUUID) } return worker, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,7 +10,7 @@ "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" @@ -146,9 +146,10 @@ func (s *suite) runKillTest(c *gc.C, kill killFunc, test testFunc) { backend := newMockBackend() config := modelworkermanager.Config{ - Backend: backend, - NewWorker: s.startModelWorker, - ErrorDelay: time.Millisecond, + ControllerUUID: coretesting.ControllerTag.Id(), + Backend: backend, + NewWorker: s.startModelWorker, + ErrorDelay: time.Millisecond, } w, err := modelworkermanager.New(config) c.Assert(err, jc.ErrorIsNil) @@ -156,8 +157,8 @@ test(w, backend) } -func (s *suite) startModelWorker(uuid string) (worker.Worker, error) { - worker := newMockWorker(uuid) +func (s *suite) startModelWorker(controllerUUID, modelUUID string) (worker.Worker, error) { + worker := newMockWorker(controllerUUID, modelUUID) s.workerC <- worker return worker, nil } @@ -199,8 +200,8 @@ } } -func newMockWorker(uuid string) *mockWorker { - w := &mockWorker{uuid: uuid} +func newMockWorker(_, modelUUID string) *mockWorker { + w := &mockWorker{uuid: modelUUID} go func() { defer w.tomb.Done() <-w.tomb.Dying() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/peergrouper/mock_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/peergrouper/mock_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/peergrouper/mock_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/peergrouper/mock_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -15,7 +15,7 @@ "github.com/juju/errors" "github.com/juju/replicaset" "github.com/juju/utils/voyeur" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/apiserver/common/networkingcommon" "github.com/juju/juju/apiserver/testing" @@ -359,7 +359,7 @@ func (m *fakeMachine) Status() (status.StatusInfo, error) { return status.StatusInfo{ - Status: status.StatusStarted, + Status: status.Started, }, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/peergrouper/worker.go juju-core-2.0.0/src/github.com/juju/juju/worker/peergrouper/worker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/peergrouper/worker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/peergrouper/worker.go 2016-10-13 14:31:49.000000000 +0000 @@ -288,7 +288,7 @@ if err != nil { return false, errors.Annotatef(err, "cannot get status for machine %q", id) } - if machineStatus.Status == status.StatusStarted { + if machineStatus.Status == status.Started { logger.Debugf("machine %q has started, adding it to peergrouper list", id) tracker, err := newMachineTracker(stm, w.machineChanges) if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/periodicworker.go juju-core-2.0.0/src/github.com/juju/juju/worker/periodicworker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/periodicworker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/periodicworker.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "errors" "time" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" ) // ErrKilled can be returned by the PeriodicWorkerCall to signify that diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/broker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/broker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/broker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/broker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -147,7 +147,7 @@ // To isolate the tests from the host's architecture, we override it here. s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 }) apiInfo := jujutesting.FakeAPIInfo(machineId) - instanceConfig, err := instancecfg.NewInstanceConfig(machineId, machineNonce, "released", "quantal", true, apiInfo) + instanceConfig, err := instancecfg.NewInstanceConfig(coretesting.ControllerTag, machineId, machineNonce, "released", "quantal", apiInfo) c.Assert(err, jc.ErrorIsNil) return instanceConfig } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/container_initialisation_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/container_initialisation_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/container_initialisation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/container_initialisation_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -134,7 +134,7 @@ c.Assert(container.Remove(), gc.IsNil) c.Assert(host.EnsureDead(), gc.IsNil) s.checkStopInstances(c, inst) - s.waitRemoved(c, host) + s.waitForRemovalMark(c, host) } func (s *ContainerSetupSuite) assertContainerProvisionerStarted( @@ -365,27 +365,11 @@ } func getContainerInstance() (cont []ContainerInstance, err error) { - current_os, err := series.GetOSFromSeries(series.HostSeries()) - if err != nil { - return nil, err + cont = []ContainerInstance{ + {instance.KVM, [][]string{ + {"uvtool-libvirt"}, + {"uvtool"}, + }}, } - - switch current_os { - case jujuos.CentOS: - cont = []ContainerInstance{ - {instance.KVM, [][]string{ - {"uvtool-libvirt"}, - {"uvtool"}, - }}, - } - default: - cont = []ContainerInstance{ - {instance.KVM, [][]string{ - {"uvtool-libvirt"}, - {"uvtool"}, - }}, - } - } - return cont, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/kvm-broker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/kvm-broker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/kvm-broker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/kvm-broker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -82,6 +82,7 @@ Nonce: "nonce", APIAddresses: []string{"10.0.0.1:1234"}, CACert: coretesting.CACert, + Controller: coretesting.ControllerTag, Model: coretesting.ModelTag, }) c.Assert(err, jc.ErrorIsNil) @@ -339,7 +340,7 @@ // ...and removed, along with the machine, when the machine is Dead. c.Assert(container.EnsureDead(), gc.IsNil) s.expectStopped(c, instId) - s.waitRemoved(c, container) + s.waitForRemovalMark(c, container) } func (s *kvmProvisionerSuite) TestKVMProvisionerObservesConfigChanges(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/lxd-broker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/lxd-broker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/lxd-broker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/lxd-broker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -56,6 +56,7 @@ Nonce: "nonce", APIAddresses: []string{"10.0.0.1:1234"}, CACert: coretesting.CACert, + Controller: coretesting.ControllerTag, Model: coretesting.ModelTag, }) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/provisioner.go juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/provisioner.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/provisioner.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/provisioner.go 2016-10-13 14:31:49.000000000 +0000 @@ -149,10 +149,6 @@ return nil, errors.Annotate(err, "could not retrieve the controller config.") } - secureServerConnection := false - if info, ok := p.agentConfig.StateServingInfo(); ok { - secureServerConnection = info.CAPrivateKey != "" - } task, err := NewProvisionerTask( controllerCfg.ControllerUUID(), machineTag, @@ -164,7 +160,6 @@ p.broker, auth, modelCfg.ImageStream(), - secureServerConnection, RetryStrategy{retryDelay: retryStrategyDelay, retryCount: retryStrategyCount}, ) if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/provisioner_task.go juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/provisioner_task.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/provisioner_task.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/provisioner_task.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,6 +16,7 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cloudconfig/instancecfg" "github.com/juju/juju/constraints" + "github.com/juju/juju/controller" "github.com/juju/juju/controller/authentication" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" @@ -32,6 +33,7 @@ "github.com/juju/juju/watcher" "github.com/juju/juju/worker" "github.com/juju/juju/worker/catacomb" + "github.com/juju/juju/wrench" "github.com/juju/version" ) @@ -69,7 +71,6 @@ broker environs.InstanceBroker, auth authentication.AuthenticationProvider, imageStream string, - secureServerConnection bool, retryStartInstanceStrategy RetryStrategy, ) (ProvisionerTask, error) { machineChanges := machineWatcher.Changes() @@ -92,7 +93,6 @@ harvestModeChan: make(chan config.HarvestMode, 1), machines: make(map[string]*apiprovisioner.Machine), imageStream: imageStream, - secureServerConnection: secureServerConnection, retryStartInstanceStrategy: retryStartInstanceStrategy, } err := catacomb.Invoke(catacomb.Plan{ @@ -117,7 +117,6 @@ catacomb catacomb.Catacomb auth authentication.AuthenticationProvider imageStream string - secureServerConnection bool harvestMode config.HarvestMode harvestModeChan chan config.HarvestMode retryStartInstanceStrategy RetryStrategy @@ -205,7 +204,7 @@ continue } machine := machines[i] - if err := machine.SetStatus(status.StatusPending, "", nil); err != nil { + if err := machine.SetStatus(status.Pending, "", nil); err != nil { logger.Errorf("cannot reset status of machine %q: %v", statusResult.Id, err) continue } @@ -273,7 +272,7 @@ // Remove any dead machines from state. for _, machine := range dead { logger.Infof("removing dead machine %q", machine) - if err := machine.Remove(); err != nil { + if err := machine.MarkForRemoval(); err != nil { logger.Errorf("failed to remove dead machine %q", machine) } delete(task.machines, machine.Id()) @@ -398,7 +397,7 @@ logger.Infof("cannot get machine id:%s, details:%v, err:%v", machine.Id(), machine, err) return None, nil } - if machineStatus == status.StatusPending { + if machineStatus == status.Pending { logger.Infof("found machine pending provisioning id:%s, details:%v", machine.Id(), machine) return Pending, nil } @@ -467,6 +466,10 @@ if len(instances) == 0 { return nil } + if wrench.IsActive("provisioner", "stop-instances") { + return errors.New("wrench in the works") + } + ids := make([]instance.Id, len(instances)) for i, inst := range instances { ids[i] = inst.Id() @@ -498,11 +501,11 @@ nonce := fmt.Sprintf("%s:%s", task.machineTag, uuid) instanceConfig, err := instancecfg.NewInstanceConfig( + names.NewControllerTag(controller.Config(pInfo.ControllerConfig).ControllerUUID()), machine.Id(), nonce, task.imageStream, pInfo.Series, - task.secureServerConnection, apiInfo, ) if err != nil { @@ -682,7 +685,7 @@ func (task *provisionerTask) setErrorStatus(message string, machine *apiprovisioner.Machine, err error) error { logger.Errorf(message, machine, err) - if err1 := machine.SetStatus(status.StatusError, err.Error(), nil); err1 != nil { + if err1 := machine.SetStatus(status.Error, err.Error(), nil); err1 != nil { // Something is wrong with this machine, better report it back. return errors.Annotatef(err1, "cannot set error status for machine %q", machine) } @@ -709,7 +712,7 @@ logger.Warningf("%v", errors.Annotate(err, "starting instance")) retryMsg := fmt.Sprintf("will retry to start instance in %v", task.retryStartInstanceStrategy.retryDelay) - if err2 := machine.SetStatus(status.StatusPending, retryMsg, nil); err2 != nil { + if err2 := machine.SetStatus(status.Pending, retryMsg, nil); err2 != nil { logger.Errorf("%v", err2) } logger.Infof(retryMsg) @@ -722,8 +725,8 @@ } networkConfig := networkingcommon.NetworkConfigFromInterfaceInfo(result.NetworkInfo) - volumes := volumesToApiserver(result.Volumes) - volumeNameToAttachmentInfo := volumeAttachmentsToApiserver(result.VolumeAttachments) + volumes := volumesToAPIserver(result.Volumes) + volumeNameToAttachmentInfo := volumeAttachmentsToAPIserver(result.VolumeAttachments) if err := machine.SetInstanceInfo( result.Instance.Id(), @@ -777,7 +780,7 @@ } } -func volumesToApiserver(volumes []storage.Volume) []params.Volume { +func volumesToAPIserver(volumes []storage.Volume) []params.Volume { result := make([]params.Volume, len(volumes)) for i, v := range volumes { result[i] = params.Volume{ @@ -793,7 +796,7 @@ return result } -func volumeAttachmentsToApiserver(attachments []storage.VolumeAttachment) map[string]params.VolumeAttachmentInfo { +func volumeAttachmentsToAPIserver(attachments []storage.VolumeAttachment) map[string]params.VolumeAttachmentInfo { result := make(map[string]params.VolumeAttachmentInfo) for _, a := range attachments { result[a.Volume.String()] = params.VolumeAttachmentInfo{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/provisioner_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/provisioner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/provisioner/provisioner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/provisioner/provisioner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -107,7 +107,7 @@ func (s *CommonProvisionerSuite) SetUpSuite(c *gc.C) { s.JujuConnSuite.SetUpSuite(c) - s.defaultConstraints = constraints.MustParse("arch=amd64 mem=4G cpu-cores=1 root-disk=8G") + s.defaultConstraints = constraints.MustParse("arch=amd64 mem=4G cores=1 root-disk=8G") } func (s *CommonProvisionerSuite) SetUpTest(c *gc.C) { @@ -117,16 +117,17 @@ imagetesting.PatchOfficialDataSources(&s.CleanupSuite, "") // We want an image to start test instances err := s.State.CloudImageMetadataStorage.SaveMetadata([]cloudimagemetadata.Metadata{{ - cloudimagemetadata.MetadataAttributes{ + MetadataAttributes: cloudimagemetadata.MetadataAttributes{ Region: "region", Series: "trusty", Arch: "amd64", VirtType: "", RootStorageType: "", Source: "test", + Stream: "released", }, - 10, - "-999", + Priority: 10, + ImageId: "-999", }}) c.Assert(err, jc.ErrorIsNil) @@ -334,12 +335,11 @@ } } -func (s *CommonProvisionerSuite) waitMachine(c *gc.C, m *state.Machine, check func() bool) { +func (s *CommonProvisionerSuite) waitForWatcher(c *gc.C, w state.NotifyWatcher, name string, check func() bool) { // TODO(jam): We need to grow a new method on NotifyWatcherC // that calls StartSync while waiting for changes, then // waitMachine and waitHardwareCharacteristics can use that // instead - w := m.Watch() defer stop(c, w) timeout := time.After(coretesting.LongWait) resync := time.After(0) @@ -353,40 +353,29 @@ resync = time.After(coretesting.ShortWait) s.BackingState.StartSync() case <-timeout: - c.Fatalf("machine %v wait timed out", m) + c.Fatalf("%v wait timed out", name) } } } func (s *CommonProvisionerSuite) waitHardwareCharacteristics(c *gc.C, m *state.Machine, check func() bool) { w := m.WatchHardwareCharacteristics() - defer stop(c, w) - timeout := time.After(coretesting.LongWait) - resync := time.After(0) - for { - select { - case <-w.Changes(): - if check() { - return - } - case <-resync: - resync = time.After(coretesting.ShortWait) - s.BackingState.StartSync() - case <-timeout: - c.Fatalf("hardware characteristics for machine %v wait timed out", m) - } - } + name := fmt.Sprintf("hardware characteristics for machine %v", m) + s.waitForWatcher(c, w, name, check) } -// waitRemoved waits for the supplied machine to be removed from state. -func (s *CommonProvisionerSuite) waitRemoved(c *gc.C, m *state.Machine) { - s.waitMachine(c, m, func() bool { - err := m.Refresh() - if errors.IsNotFound(err) { - return true - } +// waitForRemovalMark waits for the supplied machine to be marked for removal. +func (s *CommonProvisionerSuite) waitForRemovalMark(c *gc.C, m *state.Machine) { + w := s.BackingState.WatchMachineRemovals() + name := fmt.Sprintf("machine %v marked for removal", m) + s.waitForWatcher(c, w, name, func() bool { + removals, err := s.BackingState.AllMachineRemovals() c.Assert(err, jc.ErrorIsNil) - c.Logf("machine %v is still %s", m, m.Life()) + for _, removal := range removals { + if removal == m.Id() { + return true + } + } return false }) } @@ -457,14 +446,14 @@ // ...and removed, along with the machine, when the machine is Dead. c.Assert(m.EnsureDead(), gc.IsNil) s.checkStopInstances(c, instance) - s.waitRemoved(c, m) + s.waitForRemovalMark(c, m) } func (s *ProvisionerSuite) TestConstraints(c *gc.C) { // Create a machine with non-standard constraints. m, err := s.addMachine() c.Assert(err, jc.ErrorIsNil) - cons := constraints.MustParse("mem=8G arch=amd64 cpu-cores=2 root-disk=10G") + cons := constraints.MustParse("mem=8G arch=amd64 cores=2 root-disk=10G") err = m.SetConstraints(cons) c.Assert(err, jc.ErrorIsNil) @@ -538,11 +527,11 @@ // And check the machine status is set to error. statusInfo, err := m.Status() c.Assert(err, jc.ErrorIsNil) - if statusInfo.Status == status.StatusPending { + if statusInfo.Status == status.Pending { time.Sleep(coretesting.ShortWait) continue } - c.Assert(statusInfo.Status, gc.Equals, status.StatusError) + c.Assert(statusInfo.Status, gc.Equals, status.Error) c.Assert(statusInfo.Message, gc.Equals, "no matching tools available") break } @@ -585,11 +574,11 @@ // And check the machine status is set to error. statusInfo, err := m.Status() c.Assert(err, jc.ErrorIsNil) - if statusInfo.Status == status.StatusPending { + if statusInfo.Status == status.Pending { time.Sleep(coretesting.ShortWait) continue } - c.Assert(statusInfo.Status, gc.Equals, status.StatusError) + c.Assert(statusInfo.Status, gc.Equals, status.Error) // check that the status matches the error message c.Assert(statusInfo.Message, gc.Equals, destroyError.Error()) return @@ -646,7 +635,7 @@ stop(c, p) statusInfo, err := m.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusPending) + c.Assert(statusInfo.Status, gc.Equals, status.Pending) s.checkNoOperations(c) } @@ -675,7 +664,7 @@ c.Assert(container.Remove(), gc.IsNil) c.Assert(m.EnsureDead(), gc.IsNil) s.checkStopInstances(c, inst) - s.waitRemoved(c, m) + s.waitForRemovalMark(c, m) } func (s *ProvisionerSuite) TestProvisioningDoesNotOccurForKVM(c *gc.C) { @@ -703,7 +692,7 @@ c.Assert(container.Remove(), gc.IsNil) c.Assert(m.EnsureDead(), gc.IsNil) s.checkStopInstances(c, inst) - s.waitRemoved(c, m) + s.waitForRemovalMark(c, m) } type MachineClassifySuite struct { @@ -755,23 +744,23 @@ var machineClassificationTests = []machineClassificationTest{{ description: "Dead machine is dead", life: params.Dead, - status: status.StatusStarted, + status: status.Started, classification: provisioner.Dead, }, { description: "Dying machine can carry on dying", life: params.Dying, - status: status.StatusStarted, + status: status.Started, classification: provisioner.None, }, { description: "Dying unprovisioned machine is ensured dead", life: params.Dying, - status: status.StatusStarted, + status: status.Started, classification: provisioner.Dead, idErr: params.CodeNotProvisioned, }, { description: "Can't load provisioned dying machine", life: params.Dying, - status: status.StatusStarted, + status: status.Started, classification: provisioner.None, idErr: params.CodeNotFound, expectErrCode: params.CodeNotFound, @@ -779,14 +768,14 @@ }, { description: "Alive machine is not provisioned - pending", life: params.Alive, - status: status.StatusPending, + status: status.Pending, classification: provisioner.Pending, idErr: params.CodeNotProvisioned, expectErrFmt: "found machine pending provisioning id:%s.*", }, { description: "Alive, pending machine not found", life: params.Alive, - status: status.StatusPending, + status: status.Pending, classification: provisioner.None, idErr: params.CodeNotFound, expectErrCode: params.CodeNotFound, @@ -800,7 +789,7 @@ }, { description: "Dying machine fails to ensure dead", life: params.Dying, - status: status.StatusStarted, + status: status.Started, classification: provisioner.None, idErr: params.CodeNotProvisioned, expectErrCode: params.CodeNotFound, @@ -811,14 +800,14 @@ var machineClassificationTestsRequireMaintenance = machineClassificationTest{ description: "Machine needs maintaining", life: params.Alive, - status: status.StatusStarted, + status: status.Started, classification: provisioner.Maintain, } var machineClassificationTestsNoMaintenance = machineClassificationTest{ description: "Machine doesn't need maintaining", life: params.Alive, - status: status.StatusStarted, + status: status.Started, classification: provisioner.None, } @@ -899,7 +888,7 @@ // Cleanup. c.Assert(m.EnsureDead(), gc.IsNil) s.checkStopInstances(c, inst) - s.waitRemoved(c, m) + s.waitForRemovalMark(c, m) } func (s *ProvisionerSuite) testProvisioningFailsAndSetsErrorStatusForConstraints( @@ -922,11 +911,11 @@ for time.Since(t0) < coretesting.LongWait { statusInfo, err := machine.Status() c.Assert(err, jc.ErrorIsNil) - if statusInfo.Status == status.StatusPending { + if statusInfo.Status == status.Pending { time.Sleep(coretesting.ShortWait) continue } - c.Assert(statusInfo.Status, gc.Equals, status.StatusError) + c.Assert(statusInfo.Status, gc.Equals, status.Error) c.Assert(statusInfo.Message, gc.Equals, expectedErrorStatus) break } @@ -1019,7 +1008,7 @@ // Cleanup. c.Assert(m.EnsureDead(), gc.IsNil) s.checkStopInstances(c, inst) - s.waitRemoved(c, m) + s.waitForRemovalMark(c, m) } func (s *ProvisionerSuite) TestProvisioningDoesNotProvisionTheSameMachineAfterRestart(c *gc.C) { @@ -1071,7 +1060,7 @@ p = s.newEnvironProvisioner(c) defer stop(c, p) s.checkNoOperations(c) - s.waitRemoved(c, m1) + s.waitForRemovalMark(c, m1) // verify the other one's still fine err = m0.Refresh() @@ -1150,7 +1139,6 @@ broker, auth, imagemetadata.ReleasedStream, - true, retryStrategy, ) c.Assert(err, jc.ErrorIsNil) @@ -1199,7 +1187,7 @@ // When only harvesting unknown machines, only one of the machines // is stopped. s.checkStopSomeInstances(c, []instance.Instance{i1}, []instance.Instance{i0}) - s.waitRemoved(c, m0) + s.waitForRemovalMark(c, m0) } func (s *ProvisionerSuite) TestHarvestDestroyedReapsOnlyDestroyed(c *gc.C) { @@ -1225,7 +1213,7 @@ // When only harvesting destroyed machines, only one of the // machines is stopped. s.checkStopSomeInstances(c, []instance.Instance{i0}, []instance.Instance{i1}) - s.waitRemoved(c, m0) + s.waitForRemovalMark(c, m0) } func (s *ProvisionerSuite) TestHarvestAllReapsAllTheThings(c *gc.C) { @@ -1250,7 +1238,7 @@ // Everything must die! s.checkStopSomeInstances(c, []instance.Instance{i0, i1}, []instance.Instance{}) - s.waitRemoved(c, m0) + s.waitForRemovalMark(c, m0) } func (s *ProvisionerSuite) TestProvisionerRetriesTransientErrors(c *gc.C) { @@ -1284,7 +1272,7 @@ case <-time.After(coretesting.ShortWait): now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusError, + Status: status.Error, Message: "info", Data: map[string]interface{}{"transient": true}, Since: &now, @@ -1300,7 +1288,7 @@ // Machine 4 is never provisioned. statusInfo, err := m4.Status() c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, status.StatusError) + c.Assert(statusInfo.Status, gc.Equals, status.Error) _, err = m4.InstanceId() c.Assert(err, jc.Satisfies, errors.IsNotProvisioned) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/proxyupdater/manifold_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/proxyupdater/manifold_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/proxyupdater/manifold_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/proxyupdater/manifold_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -88,7 +88,7 @@ s.startErr = errors.New("boom") context := dt.StubContext(nil, map[string]interface{}{ "agent-name": &dummyAgent{}, - "api-caller-name": &dummyApiCaller{}, + "api-caller-name": &dummyAPICaller{}, }) worker, err := s.manifold().Start(context) @@ -99,7 +99,7 @@ func (s *ManifoldSuite) TestStartSuccess(c *gc.C) { context := dt.StubContext(nil, map[string]interface{}{ "agent-name": &dummyAgent{}, - "api-caller-name": &dummyApiCaller{}, + "api-caller-name": &dummyAPICaller{}, }) worker, err := s.manifold().Start(context) @@ -130,11 +130,11 @@ return names.NewMachineTag("42") } -type dummyApiCaller struct { +type dummyAPICaller struct { base.APICaller } -func (*dummyApiCaller) BestFacadeVersion(_ string) int { +func (*dummyAPICaller) BestFacadeVersion(_ string) int { return 42 } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/reboot/reboot.go juju-core-2.0.0/src/github.com/juju/juju/worker/reboot/reboot.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/reboot/reboot.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/reboot/reboot.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,7 @@ "github.com/juju/mutex" "github.com/juju/utils/clock" "gopkg.in/juju/names.v2" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/agent" "github.com/juju/juju/api/reboot" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/resumer/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/resumer/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/resumer/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/resumer/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -29,7 +29,7 @@ NewWorker func(Config) (worker.Worker, error) } -// newWorker is an engine.AgentApiStartFunc that draws context from the +// newWorker is an engine.AgentAPIStartFunc that draws context from the // ManifoldConfig on which it is defined. func (config ManifoldConfig) newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { @@ -74,11 +74,11 @@ // Manifold returns a dependency manifold that runs a resumer worker, // using the resources named or defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - aaConfig := engine.AgentApiManifoldConfig{ + aaConfig := engine.AgentAPIManifoldConfig{ AgentName: config.AgentName, APICallerName: config.APICallerName, } - return engine.AgentApiManifold(aaConfig, config.newWorker) + return engine.AgentAPIManifold(aaConfig, config.newWorker) } // isModelManager returns whether the agent has JobManageModel, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/resumer/resumer_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/resumer/resumer_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/resumer/resumer_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/resumer/resumer_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -24,7 +24,7 @@ func (*ResumerSuite) TestImmediateFailure(c *gc.C) { fix := newFixture(errors.New("zap")) - stub := fix.Run(c, func(_ *coretesting.Clock, worker *resumer.Resumer) { + stub := fix.Run(c, func(_ *testing.Clock, worker *resumer.Resumer) { err := workertest.CheckKilled(c, worker) c.Check(err, gc.ErrorMatches, "cannot resume transactions: zap") }) @@ -33,7 +33,7 @@ func (*ResumerSuite) TestWaitsToResume(c *gc.C) { fix := newFixture(nil, errors.New("unexpected")) - stub := fix.Run(c, func(clock *coretesting.Clock, worker *resumer.Resumer) { + stub := fix.Run(c, func(clock *testing.Clock, worker *resumer.Resumer) { waitAlarms(c, clock, 2) clock.Advance(time.Hour - time.Nanosecond) workertest.CheckAlive(c, worker) @@ -44,7 +44,7 @@ func (*ResumerSuite) TestResumesAfterWait(c *gc.C) { fix := newFixture(nil, nil, errors.New("unexpected")) - stub := fix.Run(c, func(clock *coretesting.Clock, worker *resumer.Resumer) { + stub := fix.Run(c, func(clock *testing.Clock, worker *resumer.Resumer) { waitAlarms(c, clock, 2) clock.Advance(time.Hour) waitAlarms(c, clock, 1) @@ -55,7 +55,7 @@ func (*ResumerSuite) TestSeveralResumes(c *gc.C) { fix := newFixture(nil, nil, nil, errors.New("unexpected")) - stub := fix.Run(c, func(clock *coretesting.Clock, worker *resumer.Resumer) { + stub := fix.Run(c, func(clock *testing.Clock, worker *resumer.Resumer) { waitAlarms(c, clock, 2) clock.Advance(time.Hour) waitAlarms(c, clock, 1) @@ -74,13 +74,13 @@ errors []error } -type TestFunc func(*coretesting.Clock, *resumer.Resumer) +type TestFunc func(*testing.Clock, *resumer.Resumer) func (fix fixture) Run(c *gc.C, test TestFunc) *testing.Stub { stub := &testing.Stub{} stub.SetErrors(fix.errors...) - clock := coretesting.NewClock(time.Now()) + clock := testing.NewClock(time.Now()) facade := newMockFacade(stub) worker, err := resumer.NewResumer(resumer.Config{ @@ -108,7 +108,7 @@ return mock.stub.NextErr() } -func waitAlarms(c *gc.C, clock *coretesting.Clock, count int) { +func waitAlarms(c *gc.C, clock *testing.Clock, count int) { timeout := time.After(coretesting.LongWait) for i := 0; i < count; i++ { select { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/retrystrategy/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/retrystrategy/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/retrystrategy/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/retrystrategy/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -26,11 +26,11 @@ // Manifold returns a dependency manifold that runs a hook retry strategy worker, // using the agent name and the api connection resources named in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - typedConfig := engine.AgentApiManifoldConfig{ + typedConfig := engine.AgentAPIManifoldConfig{ AgentName: config.AgentName, APICallerName: config.APICallerName, } - manifold := engine.AgentApiManifold(typedConfig, config.start) + manifold := engine.AgentAPIManifold(typedConfig, config.start) manifold.Output = config.output return manifold } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/runner.go juju-core-2.0.0/src/github.com/juju/juju/worker/runner.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/runner.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/runner.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "time" "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" ) // RestartDelay holds the length of time that a worker diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/runner_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/runner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/runner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/runner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,7 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/testing" "github.com/juju/juju/worker" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/simpleworker.go juju-core-2.0.0/src/github.com/juju/juju/worker/simpleworker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/simpleworker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/simpleworker.go 2016-10-13 14:31:49.000000000 +0000 @@ -3,7 +3,7 @@ package worker -import "launchpad.net/tomb" +import "gopkg.in/tomb.v1" // simpleWorker implements the worker returned by NewSimpleWorker. type simpleWorker struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/singular/fixture_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/singular/fixture_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/singular/fixture_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/singular/fixture_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -32,11 +32,11 @@ return fix } -type testFunc func(*singular.FlagWorker, *coretesting.Clock, func()) +type testFunc func(*singular.FlagWorker, *testing.Clock, func()) func (fix *fixture) Run(c *gc.C, test testFunc) { facade := newStubFacade(&fix.Stub) - clock := coretesting.NewClock(time.Now()) + clock := testing.NewClock(time.Now()) flagWorker, err := singular.NewFlagWorker(singular.FlagConfig{ Facade: facade, Clock: clock, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/singular/flag_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/singular/flag_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/singular/flag_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/singular/flag_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,6 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/singular" "github.com/juju/juju/worker/workertest" ) @@ -37,7 +36,7 @@ func (s *FlagSuite) TestClaimFailure(c *gc.C) { fix := newFixture(c, errClaimDenied, nil) - fix.Run(c, func(flag *singular.FlagWorker, _ *coretesting.Clock, _ func()) { + fix.Run(c, func(flag *singular.FlagWorker, _ *testing.Clock, _ func()) { c.Check(flag.Check(), jc.IsFalse) workertest.CheckAlive(c, flag) }) @@ -46,7 +45,7 @@ func (s *FlagSuite) TestClaimFailureWaitError(c *gc.C) { fix := newFixture(c, errClaimDenied, errors.New("glug")) - fix.Run(c, func(flag *singular.FlagWorker, _ *coretesting.Clock, unblock func()) { + fix.Run(c, func(flag *singular.FlagWorker, _ *testing.Clock, unblock func()) { c.Check(flag.Check(), jc.IsFalse) unblock() err := workertest.CheckKilled(c, flag) @@ -57,7 +56,7 @@ func (s *FlagSuite) TestClaimFailureWaitSuccess(c *gc.C) { fix := newFixture(c, errClaimDenied, nil) - fix.Run(c, func(flag *singular.FlagWorker, _ *coretesting.Clock, unblock func()) { + fix.Run(c, func(flag *singular.FlagWorker, _ *testing.Clock, unblock func()) { c.Check(flag.Check(), jc.IsFalse) unblock() err := workertest.CheckKilled(c, flag) @@ -68,7 +67,7 @@ func (s *FlagSuite) TestClaimSuccess(c *gc.C) { fix := newFixture(c, nil, errors.New("should not happen")) - fix.Run(c, func(flag *singular.FlagWorker, clock *coretesting.Clock, unblock func()) { + fix.Run(c, func(flag *singular.FlagWorker, clock *testing.Clock, unblock func()) { <-clock.Alarms() clock.Advance(29 * time.Second) workertest.CheckAlive(c, flag) @@ -78,7 +77,7 @@ func (s *FlagSuite) TestClaimSuccessThenFailure(c *gc.C) { fix := newFixture(c, nil, errClaimDenied) - fix.Run(c, func(flag *singular.FlagWorker, clock *coretesting.Clock, unblock func()) { + fix.Run(c, func(flag *singular.FlagWorker, clock *testing.Clock, unblock func()) { <-clock.Alarms() clock.Advance(30 * time.Second) err := workertest.CheckKilled(c, flag) @@ -89,7 +88,7 @@ func (s *FlagSuite) TestClaimSuccessesThenError(c *gc.C) { fix := newFixture(c) - fix.Run(c, func(flag *singular.FlagWorker, clock *coretesting.Clock, unblock func()) { + fix.Run(c, func(flag *singular.FlagWorker, clock *testing.Clock, unblock func()) { <-clock.Alarms() clock.Advance(time.Minute) <-clock.Alarms() diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/singular/manifold_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/singular/manifold_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/singular/manifold_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/singular/manifold_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,7 +14,6 @@ "github.com/juju/juju/api/base" "github.com/juju/juju/cmd/jujud/agent/engine" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" dt "github.com/juju/juju/worker/dependency/testing" @@ -48,7 +47,7 @@ func (s *ManifoldSuite) TestOutputBadResult(c *gc.C) { manifold := singular.Manifold(singular.ManifoldConfig{}) fix := newFixture(c) - fix.Run(c, func(flag *singular.FlagWorker, _ *coretesting.Clock, _ func()) { + fix.Run(c, func(flag *singular.FlagWorker, _ *testing.Clock, _ func()) { var out interface{} err := manifold.Output(flag, &out) c.Check(err, gc.ErrorMatches, `expected out to be a \*Flag; got a .*`) @@ -59,7 +58,7 @@ func (s *ManifoldSuite) TestOutputSuccess(c *gc.C) { manifold := singular.Manifold(singular.ManifoldConfig{}) fix := newFixture(c) - fix.Run(c, func(flag *singular.FlagWorker, _ *coretesting.Clock, _ func()) { + fix.Run(c, func(flag *singular.FlagWorker, _ *testing.Clock, _ func()) { var out engine.Flag err := manifold.Output(flag, &out) c.Check(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/state/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/state/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/state/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/state/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "github.com/juju/errors" "github.com/juju/loggo" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" coreagent "github.com/juju/juju/agent" "github.com/juju/juju/state" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/stateconfigwatcher/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/stateconfigwatcher/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/stateconfigwatcher/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/stateconfigwatcher/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "github.com/juju/loggo" "github.com/juju/utils/voyeur" "gopkg.in/juju/names.v2" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/agent" "github.com/juju/juju/worker" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/filesystem_ops.go juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/filesystem_ops.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/filesystem_ops.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/filesystem_ops.go 2016-10-13 14:31:49.000000000 +0000 @@ -44,7 +44,7 @@ } statuses = append(statuses, params.EntityStatusArgs{ Tag: filesystemParams[i].Tag.String(), - Status: status.StatusError.String(), + Status: status.Error.String(), Info: err.Error(), }) logger.Debugf( @@ -63,7 +63,7 @@ for i, result := range results { statuses = append(statuses, params.EntityStatusArgs{ Tag: filesystemParams[i].Tag.String(), - Status: status.StatusAttaching.String(), + Status: status.Attaching.String(), }) entityStatus := &statuses[len(statuses)-1] if result.Error != nil { @@ -74,7 +74,7 @@ // that we will retry. When we distinguish between // transient and permanent errors, we will set the // status to "error" for permanent errors. - entityStatus.Status = status.StatusPending.String() + entityStatus.Status = status.Pending.String() entityStatus.Info = result.Error.Error() logger.Debugf( "failed to create %s: %v", @@ -148,7 +148,7 @@ p := filesystemAttachmentParams[i] statuses = append(statuses, params.EntityStatusArgs{ Tag: p.Filesystem.String(), - Status: status.StatusAttached.String(), + Status: status.Attached.String(), }) entityStatus := &statuses[len(statuses)-1] if result.Error != nil { @@ -163,7 +163,7 @@ // indicate that we will retry. When we distinguish // between transient and permanent errors, we will // set the status to "error" for permanent errors. - entityStatus.Status = status.StatusAttaching.String() + entityStatus.Status = status.Attaching.String() entityStatus.Info = result.Error.Error() logger.Debugf( "failed to attach %s to %s: %v", @@ -216,7 +216,7 @@ } statuses = append(statuses, params.EntityStatusArgs{ Tag: filesystemParams[i].Tag.String(), - Status: status.StatusError.String(), + Status: status.Error.String(), Info: err.Error(), }) logger.Debugf( @@ -250,7 +250,7 @@ reschedule = append(reschedule, ops[tag]) statuses = append(statuses, params.EntityStatusArgs{ Tag: tag.String(), - Status: status.StatusDestroying.String(), + Status: status.Destroying.String(), Info: err.Error(), }) } @@ -297,7 +297,7 @@ // attachment, we'll have to check if // there are any other attachments // before saying the status "detached". - Status: status.StatusDetached.String(), + Status: status.Detached.String(), }) id := params.MachineStorageId{ MachineTag: p.Machine.String(), @@ -306,7 +306,7 @@ entityStatus := &statuses[len(statuses)-1] if err != nil { reschedule = append(reschedule, ops[id]) - entityStatus.Status = status.StatusDetaching.String() + entityStatus.Status = status.Detaching.String() entityStatus.Info = err.Error() logger.Debugf( "failed to detach %s from %s: %v", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/internal/schedule/schedule_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/internal/schedule/schedule_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/internal/schedule/schedule_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/internal/schedule/schedule_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "time" + jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -20,13 +21,13 @@ var _ = gc.Suite(&scheduleSuite{}) func (*scheduleSuite) TestNextNoEvents(c *gc.C) { - s := schedule.NewSchedule(coretesting.NewClock(time.Time{})) + s := schedule.NewSchedule(jujutesting.NewClock(time.Time{})) next := s.Next() c.Assert(next, gc.IsNil) } func (*scheduleSuite) TestNext(c *gc.C) { - clock := coretesting.NewClock(time.Time{}) + clock := jujutesting.NewClock(time.Time{}) now := clock.Now() s := schedule.NewSchedule(clock) @@ -51,13 +52,13 @@ } func (*scheduleSuite) TestReadyNoEvents(c *gc.C) { - s := schedule.NewSchedule(coretesting.NewClock(time.Time{})) + s := schedule.NewSchedule(jujutesting.NewClock(time.Time{})) ready := s.Ready(time.Now()) c.Assert(ready, gc.HasLen, 0) } func (*scheduleSuite) TestAdd(c *gc.C) { - clock := coretesting.NewClock(time.Time{}) + clock := jujutesting.NewClock(time.Time{}) now := clock.Now() s := schedule.NewSchedule(clock) @@ -80,7 +81,7 @@ } func (*scheduleSuite) TestRemove(c *gc.C) { - clock := coretesting.NewClock(time.Time{}) + clock := jujutesting.NewClock(time.Time{}) now := clock.Now() s := schedule.NewSchedule(clock) @@ -94,11 +95,11 @@ } func (*scheduleSuite) TestRemoveKeyNotFound(c *gc.C) { - s := schedule.NewSchedule(coretesting.NewClock(time.Time{})) + s := schedule.NewSchedule(jujutesting.NewClock(time.Time{})) s.Remove("0") // does not explode } -func assertNextOp(c *gc.C, s *schedule.Schedule, clock *coretesting.Clock, d time.Duration) { +func assertNextOp(c *gc.C, s *schedule.Schedule, clock *jujutesting.Clock, d time.Duration) { next := s.Next() c.Assert(next, gc.NotNil) if d > 0 { @@ -122,7 +123,7 @@ } } -func assertReady(c *gc.C, s *schedule.Schedule, clock *coretesting.Clock, expect ...interface{}) { +func assertReady(c *gc.C, s *schedule.Schedule, clock *jujutesting.Clock, expect ...interface{}) { ready := s.Ready(clock.Now()) c.Assert(ready, jc.DeepEquals, expect) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/manifold_machine.go juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/manifold_machine.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/manifold_machine.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/manifold_machine.go 2016-10-13 14:31:49.000000000 +0000 @@ -62,9 +62,9 @@ // MachineManifold returns a dependency.Manifold that runs a storage provisioner. func MachineManifold(config MachineManifoldConfig) dependency.Manifold { - typedConfig := engine.AgentApiManifoldConfig{ + typedConfig := engine.AgentAPIManifoldConfig{ AgentName: config.AgentName, APICallerName: config.APICallerName, } - return engine.AgentApiManifold(typedConfig, config.newWorker) + return engine.AgentAPIManifold(typedConfig, config.newWorker) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/manifold_machine_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/manifold_machine_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/manifold_machine_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/manifold_machine_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -17,7 +17,6 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/jujud/agent/engine/enginetest" "github.com/juju/juju/state/multiwatcher" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" "github.com/juju/juju/worker/storageprovisioner" @@ -42,16 +41,16 @@ return nil, nil }, ) - config := enginetest.AgentApiManifoldTestConfig() + config := enginetest.AgentAPIManifoldTestConfig() s.config = storageprovisioner.MachineManifoldConfig{ AgentName: config.AgentName, APICallerName: config.APICallerName, - Clock: coretesting.NewClock(defaultClockStart), + Clock: testing.NewClock(defaultClockStart), } } func (s *MachineManifoldSuite) TestMachine(c *gc.C) { - _, err := enginetest.RunAgentApiManifold( + _, err := enginetest.RunAgentAPIManifold( storageprovisioner.MachineManifold(s.config), &fakeAgent{tag: names.NewMachineTag("42")}, &fakeAPIConn{}) @@ -61,7 +60,7 @@ func (s *MachineManifoldSuite) TestMissingClock(c *gc.C) { s.config.Clock = nil - _, err := enginetest.RunAgentApiManifold( + _, err := enginetest.RunAgentAPIManifold( storageprovisioner.MachineManifold(s.config), &fakeAgent{tag: names.NewMachineTag("42")}, &fakeAPIConn{}) @@ -70,7 +69,7 @@ } func (s *MachineManifoldSuite) TestUnit(c *gc.C) { - _, err := enginetest.RunAgentApiManifold( + _, err := enginetest.RunAgentAPIManifold( storageprovisioner.MachineManifold(s.config), &fakeAgent{tag: names.NewUnitTag("foo/0")}, &fakeAPIConn{}) @@ -79,7 +78,7 @@ } func (s *MachineManifoldSuite) TestNonAgent(c *gc.C) { - _, err := enginetest.RunAgentApiManifold( + _, err := enginetest.RunAgentAPIManifold( storageprovisioner.MachineManifold(s.config), &fakeAgent{tag: names.NewUserTag("foo")}, &fakeAPIConn{}) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/mock_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/mock_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/mock_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/mock_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -738,6 +738,10 @@ return ch } +func (c *mockClock) NewTimer(d time.Duration) clock.Timer { + return mockTimer{time.NewTimer(0)} +} + func (c *mockClock) AfterFunc(d time.Duration, f func()) clock.Timer { c.MethodCall(c, "AfterFunc", d, f) if c.onAfterFunc != nil { @@ -746,7 +750,15 @@ if d > 0 { c.now = c.now.Add(d) } - return time.AfterFunc(0, f) + return mockTimer{time.AfterFunc(0, f)} +} + +type mockTimer struct { + *time.Timer +} + +func (t mockTimer) Chan() <-chan time.Time { + return t.C } type mockStatusSetter struct { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/volume_ops.go juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/volume_ops.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/storageprovisioner/volume_ops.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/storageprovisioner/volume_ops.go 2016-10-13 14:31:49.000000000 +0000 @@ -38,7 +38,7 @@ } statuses = append(statuses, params.EntityStatusArgs{ Tag: volumeParams[i].Tag.String(), - Status: status.StatusError.String(), + Status: status.Error.String(), Info: err.Error(), }) logger.Debugf( @@ -57,7 +57,7 @@ for i, result := range results { statuses = append(statuses, params.EntityStatusArgs{ Tag: volumeParams[i].Tag.String(), - Status: status.StatusAttaching.String(), + Status: status.Attaching.String(), }) entityStatus := &statuses[len(statuses)-1] if result.Error != nil { @@ -68,7 +68,7 @@ // that we will retry. When we distinguish between // transient and permanent errors, we will set the // status to "error" for permanent errors. - entityStatus.Status = status.StatusPending.String() + entityStatus.Status = status.Pending.String() entityStatus.Info = result.Error.Error() logger.Debugf( "failed to create %s: %v", @@ -79,7 +79,7 @@ } volumes = append(volumes, *result.Volume) if result.VolumeAttachment != nil { - entityStatus.Status = status.StatusAttached.String() + entityStatus.Status = status.Attached.String() volumeAttachments = append(volumeAttachments, *result.VolumeAttachment) } } @@ -147,7 +147,7 @@ p := volumeAttachmentParams[i] statuses = append(statuses, params.EntityStatusArgs{ Tag: p.Volume.String(), - Status: status.StatusAttached.String(), + Status: status.Attached.String(), }) entityStatus := &statuses[len(statuses)-1] if result.Error != nil { @@ -162,7 +162,7 @@ // indicate that we will retry. When we distinguish // between transient and permanent errors, we will // set the status to "error" for permanent errors. - entityStatus.Status = status.StatusAttaching.String() + entityStatus.Status = status.Attaching.String() entityStatus.Info = result.Error.Error() logger.Debugf( "failed to attach %s to %s: %v", @@ -212,7 +212,7 @@ } statuses = append(statuses, params.EntityStatusArgs{ Tag: volumeParams[i].Tag.String(), - Status: status.StatusError.String(), + Status: status.Error.String(), Info: err.Error(), }) logger.Debugf( @@ -246,7 +246,7 @@ reschedule = append(reschedule, ops[tag]) statuses = append(statuses, params.EntityStatusArgs{ Tag: tag.String(), - Status: status.StatusDestroying.String(), + Status: status.Destroying.String(), Info: err.Error(), }) } @@ -289,7 +289,7 @@ // attachment, we'll have to check if // there are any other attachments // before saying the status "detached". - Status: status.StatusDetached.String(), + Status: status.Detached.String(), }) id := params.MachineStorageId{ MachineTag: p.Machine.String(), @@ -298,7 +298,7 @@ entityStatus := &statuses[len(statuses)-1] if err != nil { reschedule = append(reschedule, ops[id]) - entityStatus.Status = status.StatusDetaching.String() + entityStatus.Status = status.Detaching.String() entityStatus.Info = err.Error() logger.Debugf( "failed to detach %s from %s: %v", diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/terminationworker/worker.go juju-core-2.0.0/src/github.com/juju/juju/worker/terminationworker/worker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/terminationworker/worker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/terminationworker/worker.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "os/signal" "syscall" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/worker" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/toolsversionchecker/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/toolsversionchecker/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/toolsversionchecker/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/toolsversionchecker/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -20,13 +20,13 @@ ) // ManifoldConfig defines the names of the manifolds on which a Manifold will depend. -type ManifoldConfig engine.AgentApiManifoldConfig +type ManifoldConfig engine.AgentAPIManifoldConfig // Manifold returns a dependency manifold that runs a toolsversionchecker worker, // using the api connection resource named in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - typedConfig := engine.AgentApiManifoldConfig(config) - return engine.AgentApiManifold(typedConfig, newWorker) + typedConfig := engine.AgentAPIManifoldConfig(config) + return engine.AgentAPIManifold(typedConfig, newWorker) } func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/toolsversionchecker/manifold_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/toolsversionchecker/manifold_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/toolsversionchecker/manifold_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/toolsversionchecker/manifold_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -37,8 +37,8 @@ } func (s *ManifoldSuite) TestMachine(c *gc.C) { - config := toolsversionchecker.ManifoldConfig(enginetest.AgentApiManifoldTestConfig()) - _, err := enginetest.RunAgentApiManifold( + config := toolsversionchecker.ManifoldConfig(enginetest.AgentAPIManifoldTestConfig()) + _, err := enginetest.RunAgentAPIManifold( toolsversionchecker.Manifold(config), &fakeAgent{tag: names.NewMachineTag("42")}, mockAPICaller(multiwatcher.JobManageModel)) @@ -47,8 +47,8 @@ } func (s *ManifoldSuite) TestMachineNotModelManagerErrors(c *gc.C) { - config := toolsversionchecker.ManifoldConfig(enginetest.AgentApiManifoldTestConfig()) - _, err := enginetest.RunAgentApiManifold( + config := toolsversionchecker.ManifoldConfig(enginetest.AgentAPIManifoldTestConfig()) + _, err := enginetest.RunAgentAPIManifold( toolsversionchecker.Manifold(config), &fakeAgent{tag: names.NewMachineTag("42")}, mockAPICaller(multiwatcher.JobHostUnits)) @@ -57,8 +57,8 @@ } func (s *ManifoldSuite) TestNonMachineAgent(c *gc.C) { - config := toolsversionchecker.ManifoldConfig(enginetest.AgentApiManifoldTestConfig()) - _, err := enginetest.RunAgentApiManifold( + config := toolsversionchecker.ManifoldConfig(enginetest.AgentAPIManifoldTestConfig()) + _, err := enginetest.RunAgentAPIManifold( toolsversionchecker.Manifold(config), &fakeAgent{tag: names.NewUnitTag("foo/0")}, mockAPICaller("")) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/txnpruner/txnpruner.go juju-core-2.0.0/src/github.com/juju/juju/worker/txnpruner/txnpruner.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/txnpruner/txnpruner.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/txnpruner/txnpruner.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,6 +7,7 @@ "time" "github.com/juju/errors" + "github.com/juju/utils/clock" "github.com/juju/juju/worker" ) @@ -19,22 +20,15 @@ // New returns a worker which periodically prunes the data for // completed transactions. -func New(tp TransactionPruner, interval time.Duration) worker.Worker { +func New(tp TransactionPruner, interval time.Duration, clock clock.Clock) worker.Worker { return worker.NewSimpleWorker(func(stopCh <-chan struct{}) error { - // Use a timer rather than a ticker because pruning could - // sometimes take a while and we don't want pruning attempts - // to occur back-to-back. - // TODO(fwereade): 2016-03-17 lp:1558657 - timer := time.NewTimer(interval) - defer timer.Stop() for { select { - case <-timer.C: + case <-clock.After(interval): err := tp.MaybePruneTransactions() if err != nil { return errors.Annotate(err, "pruning failed, txnpruner stopping") } - timer.Reset(interval) case <-stopCh: return nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/txnpruner/txnpruner_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/txnpruner/txnpruner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/txnpruner/txnpruner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/txnpruner/txnpruner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,57 +6,66 @@ import ( "time" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" - "github.com/juju/juju/testing" + coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/txnpruner" ) type TxnPrunerSuite struct { - testing.BaseSuite + coretesting.BaseSuite } var _ = gc.Suite(&TxnPrunerSuite{}) func (s *TxnPrunerSuite) TestPrunes(c *gc.C) { fakePruner := newFakeTransactionPruner() - interval := 10 * time.Millisecond - p := txnpruner.New(fakePruner, interval) + testClock := testing.NewClock(time.Now()) + interval := time.Minute + p := txnpruner.New(fakePruner, interval, testClock) defer p.Kill() - var t0 time.Time + select { + case <-testClock.Alarms(): + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out waiting for worker to stat") + } + c.Logf("pruner running and waiting: %s (%s)", testClock.Now(), time.Now()) + // Show that we prune every minute for i := 0; i < 5; i++ { + testClock.Advance(interval) + c.Logf("loop %d: %s (%s)", i, testClock.Now(), time.Now()) select { case <-fakePruner.pruneCh: - t1 := time.Now() - if i > 0 { - // Check that pruning runs at the expected interval - // (but not the first time around as we don't know - // when the worker actually started). - td := t1.Sub(t0) - c.Assert(td >= interval, jc.IsTrue, gc.Commentf("td=%s", td)) - } - t0 = t1 - case <-time.After(testing.LongWait): + case <-time.After(coretesting.LongWait): c.Fatal("timed out waiting for pruning to happen") } + // Now we need to wait for the txn pruner to call clock.After again + // before we advance the clock, or it will be waiting for the wrong time. + select { + case <-testClock.Alarms(): + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out waiting for worker to loop around") + } } } func (s *TxnPrunerSuite) TestStops(c *gc.C) { success := make(chan bool) check := func() { - p := txnpruner.New(newFakeTransactionPruner(), time.Minute) + p := txnpruner.New(newFakeTransactionPruner(), time.Minute, clock.WallClock) p.Kill() - c.Assert(p.Wait(), jc.ErrorIsNil) + c.Check(p.Wait(), jc.ErrorIsNil) success <- true } go check() select { case <-success: - case <-time.After(testing.LongWait): + case <-time.After(coretesting.LongWait): c.Fatal("timed out waiting for worker to stop") } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/undertaker/undertaker.go juju-core-2.0.0/src/github.com/juju/juju/worker/undertaker/undertaker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/undertaker/undertaker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/undertaker/undertaker.go 2016-10-13 14:31:49.000000000 +0000 @@ -97,7 +97,7 @@ // checking the emptiness criteria before // attempting to remove the model. if err := u.setStatus( - status.StatusDestroying, + status.Destroying, "cleaning up cloud resources", ); err != nil { return errors.Trace(err) @@ -125,7 +125,7 @@ // Now the model is known to be hosted and dead, we can tidy up any // provider resources it might have used. if err := u.setStatus( - status.StatusDestroying, "tearing down cloud environment", + status.Destroying, "tearing down cloud environment", ); err != nil { return errors.Trace(err) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/undertaker/undertaker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/undertaker/undertaker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/undertaker/undertaker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/undertaker/undertaker_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -72,11 +72,11 @@ workertest.CheckKilled(c, w) }) stub.CheckCall( - c, 1, "SetStatus", status.StatusDestroying, + c, 1, "SetStatus", status.Destroying, "cleaning up cloud resources", map[string]interface{}(nil), ) stub.CheckCall( - c, 4, "SetStatus", status.StatusDestroying, + c, 4, "SetStatus", status.Destroying, "tearing down cloud environment", map[string]interface{}(nil), ) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/unitassigner/manifold.go juju-core-2.0.0/src/github.com/juju/juju/worker/unitassigner/manifold.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/unitassigner/manifold.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/unitassigner/manifold.go 2016-10-13 14:31:49.000000000 +0000 @@ -14,12 +14,12 @@ ) // ManifoldConfig describes the resources used by a unitassigner worker. -type ManifoldConfig engine.ApiManifoldConfig +type ManifoldConfig engine.APIManifoldConfig // Manifold returns a Manifold that runs a unitassigner worker. func Manifold(config ManifoldConfig) dependency.Manifold { - return engine.ApiManifold( - engine.ApiManifoldConfig(config), + return engine.APIManifold( + engine.APIManifoldConfig(config), manifoldStart, ) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/unitassigner/unitassigner.go juju-core-2.0.0/src/github.com/juju/juju/worker/unitassigner/unitassigner.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/unitassigner/unitassigner.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/unitassigner/unitassigner.go 2016-10-13 14:31:49.000000000 +0000 @@ -77,7 +77,7 @@ for unit, err := range failures { args.Entities[x] = params.EntityStatusArgs{ Tag: unit, - Status: status.StatusError.String(), + Status: status.Error.String(), Info: err.Error(), } x++ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/unitassigner/unitassigner_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/unitassigner/unitassigner_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/unitassigner/unitassigner_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/unitassigner/unitassigner_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -63,7 +63,7 @@ c.Assert(entities, gc.HasLen, 1) c.Assert(entities[0], gc.DeepEquals, params.EntityStatusArgs{ Tag: "unit-foo-0", - Status: status.StatusError.String(), + Status: status.Error.String(), Info: e.Error(), }) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/actions/resolver.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/actions/resolver.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/actions/resolver.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/actions/resolver.go 2016-10-13 14:31:49.000000000 +0000 @@ -52,14 +52,13 @@ return opFactory.NewAction(nextAction) } case operation.RunAction: - // TODO(fwereade): we *should* handle interrupted actions, and make sure - // they're marked as failed, but that's not for now. if localState.Hook != nil { logger.Infof("found incomplete action %q; ignoring", localState.ActionId) logger.Infof("recommitting prior %q hook", localState.Hook.Kind) return opFactory.NewSkipHook(*localState.Hook) } else { logger.Infof("%q hook is nil", operation.RunAction) + return opFactory.NewFailAction(*localState.ActionId) } case operation.Continue: return opFactory.NewAction(nextAction) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/agent.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/agent.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/agent.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/agent.go 2016-10-13 14:31:49.000000000 +0000 @@ -25,7 +25,7 @@ if err == nil { return } - err2 := setAgentStatus(u, status.StatusFailed, userMessage, nil) + err2 := setAgentStatus(u, status.Failed, userMessage, nil) if err2 != nil { logger.Errorf("updating agent status: %v", err2) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/bundles.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/bundles.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/bundles.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/bundles.go 2016-10-13 14:31:49.000000000 +0000 @@ -19,7 +19,7 @@ type Downloader interface { // Download starts a new charm archive download, waits for it to // complete, and returns the local name of the file. - Download(req downloader.Request, abort <-chan struct{}) (string, error) + Download(req downloader.Request) (string, error) } // BundlesDir is responsible for storing and retrieving charm bundles @@ -36,7 +36,6 @@ HostnameVerification: utils.NoVerifySSLHostnames, }) } - return &BundlesDir{ path: path, downloader: dlr, @@ -71,11 +70,12 @@ expectedSha256, err := info.ArchiveSha256() req := downloader.Request{ URL: curl, - TargetDir: d.downloadsPath(), + TargetDir: downloadsPath(d.path), Verify: downloader.NewSha256Verifier(expectedSha256), + Abort: abort, } logger.Infof("downloading %s from API server", info.URL()) - filename, err := d.downloader.Download(req, abort) + filename, err := d.downloader.Download(req) if err != nil { return errors.Annotatef(err, "failed to download charm %q from API server", info.URL()) } @@ -103,8 +103,16 @@ return path.Join(d.path, charm.Quote(url.String())) } +// ClearDownloads removes any entries in the temporary bundle download +// directory. It is intended to be called on uniter startup. +func ClearDownloads(bundlesDir string) error { + downloadDir := downloadsPath(bundlesDir) + err := os.RemoveAll(downloadDir) + return errors.Annotate(err, "unable to clear bundle downloads") +} + // downloadsPath returns the path to the directory into which charms are // downloaded. -func (d *BundlesDir) downloadsPath() string { - return path.Join(d.path, "downloads") +func downloadsPath(bunsDir string) string { + return path.Join(bunsDir, "downloads") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/bundles_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/bundles_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/bundles_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/bundles_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,10 +4,12 @@ package charm_test import ( + "io/ioutil" "os" "path/filepath" "regexp" + jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" @@ -98,12 +100,18 @@ func (s *BundlesDirSuite) TestGet(c *gc.C) { basedir := c.MkDir() - bunsdir := filepath.Join(basedir, "random", "bundles") + bunsDir := filepath.Join(basedir, "random", "bundles") downloader := api.NewCharmDownloader(s.st.Client()) - d := charm.NewBundlesDir(bunsdir, downloader) + d := charm.NewBundlesDir(bunsDir, downloader) + + checkDownloadsEmpty := func() { + files, err := ioutil.ReadDir(filepath.Join(bunsDir, "downloads")) + c.Assert(err, jc.ErrorIsNil) + c.Check(files, gc.HasLen, 0) + } // Check it doesn't get created until it's needed. - _, err := os.Stat(bunsdir) + _, err := os.Stat(bunsDir) c.Assert(err, jc.Satisfies, os.IsNotExist) // Add a charm to state that we can try to get. @@ -111,32 +119,37 @@ // Try to get the charm when the content doesn't match. _, err = d.Read(&fakeBundleInfo{apiCharm, nil, "..."}, nil) - c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`failed to download charm "cs:quantal/dummy-1" from API server: `)+`expected sha256 "...", got ".*"`) + c.Check(err, gc.ErrorMatches, regexp.QuoteMeta(`failed to download charm "cs:quantal/dummy-1" from API server: `)+`expected sha256 "...", got ".*"`) + checkDownloadsEmpty() // Try to get a charm whose bundle doesn't exist. otherURL := corecharm.MustParseURL("cs:quantal/spam-1") _, err = d.Read(&fakeBundleInfo{apiCharm, otherURL, ""}, nil) - c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`failed to download charm "cs:quantal/spam-1" from API server: `)+`.* not found`) + c.Check(err, gc.ErrorMatches, regexp.QuoteMeta(`failed to download charm "cs:quantal/spam-1" from API server: `)+`.* not found`) + checkDownloadsEmpty() // Get a charm whose bundle exists and whose content matches. ch, err := d.Read(apiCharm, nil) c.Assert(err, jc.ErrorIsNil) assertCharm(c, ch, sch) + checkDownloadsEmpty() // Get the same charm again, without preparing a response from the server. ch, err = d.Read(apiCharm, nil) c.Assert(err, jc.ErrorIsNil) assertCharm(c, ch, sch) + checkDownloadsEmpty() // Check the abort chan is honoured. - err = os.RemoveAll(bunsdir) + err = os.RemoveAll(bunsDir) c.Assert(err, jc.ErrorIsNil) abort := make(chan struct{}) close(abort) ch, err = d.Read(apiCharm, abort) - c.Assert(ch, gc.IsNil) - c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`failed to download charm "cs:quantal/dummy-1" from API server: aborted`)) + c.Check(ch, gc.IsNil) + c.Check(err, gc.ErrorMatches, regexp.QuoteMeta(`failed to download charm "cs:quantal/dummy-1" from API server: download aborted`)) + checkDownloadsEmpty() } func assertCharm(c *gc.C, bun charm.Bundle, sch *state.Charm) { @@ -145,3 +158,48 @@ c.Assert(actual.Meta(), gc.DeepEquals, sch.Meta()) c.Assert(actual.Config(), gc.DeepEquals, sch.Config()) } + +type ClearDownloadsSuite struct { + jujutesting.IsolationSuite +} + +var _ = gc.Suite(&ClearDownloadsSuite{}) + +func (s *ClearDownloadsSuite) TestWorks(c *gc.C) { + baseDir := c.MkDir() + bunsDir := filepath.Join(baseDir, "bundles") + downloadDir := filepath.Join(bunsDir, "downloads") + c.Assert(os.MkdirAll(downloadDir, 0777), jc.ErrorIsNil) + c.Assert(ioutil.WriteFile(filepath.Join(downloadDir, "stuff"), []byte("foo"), 0755), jc.ErrorIsNil) + c.Assert(ioutil.WriteFile(filepath.Join(downloadDir, "thing"), []byte("bar"), 0755), jc.ErrorIsNil) + + err := charm.ClearDownloads(bunsDir) + c.Assert(err, jc.ErrorIsNil) + checkMissing(c, downloadDir) +} + +func (s *ClearDownloadsSuite) TestEmptyOK(c *gc.C) { + baseDir := c.MkDir() + bunsDir := filepath.Join(baseDir, "bundles") + downloadDir := filepath.Join(bunsDir, "downloads") + c.Assert(os.MkdirAll(downloadDir, 0777), jc.ErrorIsNil) + + err := charm.ClearDownloads(bunsDir) + c.Assert(err, jc.ErrorIsNil) + checkMissing(c, downloadDir) +} + +func (s *ClearDownloadsSuite) TestMissingOK(c *gc.C) { + baseDir := c.MkDir() + bunsDir := filepath.Join(baseDir, "bundles") + + err := charm.ClearDownloads(bunsDir) + c.Assert(err, jc.ErrorIsNil) +} + +func checkMissing(c *gc.C, p string) { + _, err := os.Stat(p) + if !os.IsNotExist(err) { + c.Fatalf("checking %s is missing: %v", p, err) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/charm.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/charm.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/charm.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/charm.go 2016-10-13 14:31:49.000000000 +0000 @@ -66,14 +66,6 @@ // can be resolved by user intervention will be signalled by returning // ErrConflict. Deploy() error - - // NotifyRevert must be called when a conflicted deploy is abandoned, in - // preparation for a new upgrade. - NotifyRevert() error - - // NotifyResolved must be called when the cause of a deploy conflict has - // been resolved, and a new deploy attempt will be made. - NotifyResolved() error } // ErrConflict indicates that an upgrade failed and cannot be resolved diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/converter.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/converter.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/converter.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/converter.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,119 +4,21 @@ package charm import ( - "fmt" "os" "path/filepath" "github.com/juju/utils/set" "github.com/juju/utils/symlink" - "gopkg.in/juju/charm.v6-unstable" ) -// NewDeployer returns a Deployer of whatever kind is currently in use for the -// supplied paths, or a manifest deployer if none exists yet. It is a var so -// that it can be patched for uniter tests. +// NewDeployer returns a manifest deployer. It is a var so that it can be +// patched for uniter tests. var NewDeployer = newDeployer func newDeployer(charmPath, dataPath string, bundles BundleReader) (Deployer, error) { - gitDeployer := NewGitDeployer(charmPath, dataPath, bundles).(*gitDeployer) - if exists, err := gitDeployer.current.Exists(); err != nil { - return nil, err - } else if exists { - return gitDeployer, nil - } return NewManifestDeployer(charmPath, dataPath, bundles), nil } -// FixDeployer ensures that the supplied Deployer address points to a manifest -// deployer. If a git deployer is passed into FixDeployer, it will be converted -// to a manifest deployer, and the git deployer data will be removed. The charm -// is assumed to be in a stable state; this should not be called if there is any -// chance the git deployer is partway through an upgrade, or in a conflicted state. -// It is a var so that it can be patched for uniter tests. -var FixDeployer = fixDeployer - -func fixDeployer(deployer *Deployer) error { - if manifestDeployer, ok := (*deployer).(*manifestDeployer); ok { - // This works around a race at the very end of this func, in which - // the process could have been killed after removing the "current" - // symlink but before removing the orphan repos from the data dir. - collectGitOrphans(manifestDeployer.dataPath) - return nil - } - gitDeployer, ok := (*deployer).(*gitDeployer) - if !ok { - return fmt.Errorf("cannot fix unknown deployer type: %T", *deployer) - } - logger.Infof("converting git-based deployer to manifest deployer") - manifestDeployer := &manifestDeployer{ - charmPath: gitDeployer.target.Path(), - dataPath: gitDeployer.dataPath, - bundles: gitDeployer.bundles, - } - - // Ensure that the staged charm matches the deployed charm: it's possible - // that the uniter was stopped after staging, but before deploying, a new - // bundle. - deployedURL, err := ReadCharmURL(manifestDeployer.CharmPath(CharmURLPath)) - if err != nil && !os.IsNotExist(err) { - return err - } - - // If we deployed something previously, we need to copy some state over. - if deployedURL != nil { - if err := ensureCurrentGitCharm(gitDeployer, deployedURL); err != nil { - return err - } - // Now we know we've got the right stuff checked out in gitDeployer.current, - // we can turn that into a manifest that will be used in future upgrades... - // even if users desparate for space deleted the original bundle. - manifest, err := gitManifest(gitDeployer.current.Path()) - if err != nil { - return err - } - if err := manifestDeployer.storeManifest(deployedURL, manifest); err != nil { - return err - } - } - - // We're left with the staging repo and a symlink to it. We decide deployer - // type by checking existence of the symlink's target, so we start off by - // trashing the symlink itself; collectGitOrphans will then delete all the - // original deployer's repos. - if err := os.RemoveAll(gitDeployer.current.Path()); err != nil { - return err - } - // Note potential race alluded to at the start of this func. - collectGitOrphans(gitDeployer.dataPath) - - // Phew. Done. - *deployer = manifestDeployer - return nil -} - -// ensureCurrentGitCharm checks out progressively earlier versions of the -// gitDeployer's current staging repo, until it finds one in which the -// content of charmURLPath matches the supplied charm URL. -func ensureCurrentGitCharm(gitDeployer *gitDeployer, expectURL *charm.URL) error { - i := 1 - repo := gitDeployer.current - for { - stagedURL, err := gitDeployer.current.ReadCharmURL() - if err != nil { - return err - } - logger.Debugf("staged url: %s", stagedURL) - if *stagedURL == *expectURL { - return nil - } - if err := repo.cmd("checkout", fmt.Sprintf("master~%d", i)); err != nil { - return err - } - i++ - } -} - // gitManifest returns every file path in the supplied directory, *except* for: // * paths below .git, because we don't need to track every file: we just // want them all gone diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/converter_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/converter_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/converter_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/converter_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,131 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charm_test - -import ( - jc "github.com/juju/testing/checkers" - ft "github.com/juju/testing/filetesting" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/worker/uniter/charm" -) - -type ConverterSuite struct { - testing.GitSuite - targetPath string - dataPath string - bundles *bundleReader -} - -var _ = gc.Suite(&ConverterSuite{}) - -func (s *ConverterSuite) SetUpTest(c *gc.C) { - testing.SkipIfGitNotAvailable(c) - s.GitSuite.SetUpTest(c) - s.targetPath = c.MkDir() - s.dataPath = c.MkDir() - s.bundles = &bundleReader{} -} - -func (s *ConverterSuite) TestNewDeployerCreatesManifestDeployer(c *gc.C) { - deployer, err := charm.NewDeployer(s.targetPath, s.dataPath, s.bundles) - c.Assert(err, jc.ErrorIsNil) - c.Assert(deployer, jc.Satisfies, charm.IsManifestDeployer) -} - -func (s *ConverterSuite) TestNewDeployerCreatesGitDeployerOnceStaged(c *gc.C) { - gitDeployer := charm.NewGitDeployer(s.targetPath, s.dataPath, s.bundles) - info := s.bundles.AddBundle(c, charmURL(1), mockBundle{}) - err := gitDeployer.Stage(info, nil) - c.Assert(err, jc.ErrorIsNil) - - deployer, err := charm.NewDeployer(s.targetPath, s.dataPath, s.bundles) - c.Assert(err, jc.ErrorIsNil) - c.Assert(deployer, jc.Satisfies, charm.IsGitDeployer) -} - -func (s *ConverterSuite) TestConvertGitDeployerBeforeDeploy(c *gc.C) { - gitDeployer := charm.NewGitDeployer(s.targetPath, s.dataPath, s.bundles) - info := s.bundles.AddBundle(c, charmURL(1), mockBundle{}) - err := gitDeployer.Stage(info, nil) - c.Assert(err, jc.ErrorIsNil) - - deployer, err := charm.NewDeployer(s.targetPath, s.dataPath, s.bundles) - c.Assert(err, jc.ErrorIsNil) - err = charm.FixDeployer(&deployer) - c.Assert(err, jc.ErrorIsNil) - c.Assert(deployer, jc.Satisfies, charm.IsManifestDeployer) - ft.Removed{"current"}.Check(c, s.dataPath) - - err = deployer.Stage(info, nil) - c.Assert(err, jc.ErrorIsNil) - err = deployer.Deploy() - c.Assert(err, jc.ErrorIsNil) - ft.Removed{".git"}.Check(c, s.targetPath) -} - -func (s *ConverterSuite) TestConvertGitDeployerAfterDeploy(c *gc.C) { - gitDeployer := charm.NewGitDeployer(s.targetPath, s.dataPath, s.bundles) - info1 := s.bundles.AddBundle(c, charmURL(1), mockBundle{}) - err := gitDeployer.Stage(info1, nil) - c.Assert(err, jc.ErrorIsNil) - err = gitDeployer.Deploy() - c.Assert(err, jc.ErrorIsNil) - - deployer, err := charm.NewDeployer(s.targetPath, s.dataPath, s.bundles) - c.Assert(err, jc.ErrorIsNil) - err = charm.FixDeployer(&deployer) - c.Assert(err, jc.ErrorIsNil) - c.Assert(deployer, jc.Satisfies, charm.IsManifestDeployer) - ft.Removed{"current"}.Check(c, s.dataPath) - ft.Dir{"manifests", 0755}.Check(c, s.dataPath) - - info2 := s.bundles.AddBundle(c, charmURL(2), mockBundle{}) - err = deployer.Stage(info2, nil) - c.Assert(err, jc.ErrorIsNil) - err = deployer.Deploy() - c.Assert(err, jc.ErrorIsNil) - ft.Removed{".git"}.Check(c, s.targetPath) -} - -func (s *ConverterSuite) TestPathological(c *gc.C) { - initial := s.bundles.AddCustomBundle(c, charmURL(1), func(path string) { - ft.File{"common", "initial", 0644}.Create(c, path) - ft.File{"initial", "blah", 0644}.Create(c, path) - }) - staged := s.bundles.AddCustomBundle(c, charmURL(2), func(path string) { - ft.File{"common", "staged", 0644}.Create(c, path) - ft.File{"user", "badwrong", 0644}.Create(c, path) - }) - final := s.bundles.AddCustomBundle(c, charmURL(3), func(path string) { - ft.File{"common", "final", 0644}.Create(c, path) - ft.File{"final", "blah", 0644}.Create(c, path) - }) - - gitDeployer := charm.NewGitDeployer(s.targetPath, s.dataPath, s.bundles) - err := gitDeployer.Stage(initial, nil) - c.Assert(err, jc.ErrorIsNil) - err = gitDeployer.Deploy() - c.Assert(err, jc.ErrorIsNil) - - preserveUser := ft.File{"user", "preserve", 0644}.Create(c, s.targetPath) - err = gitDeployer.Stage(staged, nil) - c.Assert(err, jc.ErrorIsNil) - - deployer, err := charm.NewDeployer(s.targetPath, s.dataPath, s.bundles) - c.Assert(err, jc.ErrorIsNil) - err = charm.FixDeployer(&deployer) - c.Assert(err, jc.ErrorIsNil) - - err = deployer.Stage(final, nil) - c.Assert(err, jc.ErrorIsNil) - err = deployer.Deploy() - c.Assert(err, jc.ErrorIsNil) - ft.Removed{".git"}.Check(c, s.targetPath) - ft.Removed{"initial"}.Check(c, s.targetPath) - ft.Removed{"staged"}.Check(c, s.targetPath) - ft.File{"common", "final", 0644}.Check(c, s.targetPath) - preserveUser.Check(c, s.targetPath) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/export_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,24 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charm - -// exported so we can get the deployer path from tests. -func GitDeployerDataPath(d Deployer) string { - return d.(*gitDeployer).dataPath -} - -// exported so we can get the deployer current git repo from tests. -func GitDeployerCurrent(d Deployer) *GitDir { - return d.(*gitDeployer).current -} - -func IsGitDeployer(d Deployer) bool { - _, ok := d.(*gitDeployer) - return ok -} - -func IsManifestDeployer(d Deployer) bool { - _, ok := d.(*manifestDeployer) - return ok -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/git_deployer.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/git_deployer.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/git_deployer.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/git_deployer.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,233 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charm - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/juju/utils/symlink" -) - -const ( - gitUpdatePrefix = "update-" - gitInstallPrefix = "install-" - gitCurrentPath = "current" -) - -// gitDeployer maintains a git repository tracking a series of charm versions, -// and can install and upgrade charm deployments to the current version. -type gitDeployer struct { - target *GitDir - dataPath string - bundles BundleReader - current *GitDir -} - -// NewGitDeployer creates a new Deployer which stores its state in dataPath, -// and installs or upgrades the charm at charmPath. -func NewGitDeployer(charmPath, dataPath string, bundles BundleReader) Deployer { - return &gitDeployer{ - target: NewGitDir(charmPath), - dataPath: dataPath, - bundles: bundles, - current: NewGitDir(filepath.Join(dataPath, gitCurrentPath)), - } -} - -func (d *gitDeployer) Stage(info BundleInfo, abort <-chan struct{}) error { - // Make sure we've got an actual bundle available. - bundle, err := d.bundles.Read(info, abort) - if err != nil { - return err - } - - // Read present state of current. - if err := os.MkdirAll(d.dataPath, 0755); err != nil { - return err - } - defer collectGitOrphans(d.dataPath) - srcExists, err := d.current.Exists() - if err != nil { - return err - } - url := info.URL() - if srcExists { - prevURL, err := d.current.ReadCharmURL() - if err != nil { - return err - } - if *url == *prevURL { - return nil - } - } - - // Prepare a fresh repository for the update, using current's history - // if it exists. - updatePath, err := d.newDir(gitUpdatePrefix) - if err != nil { - return err - } - var repo *GitDir - if srcExists { - repo, err = d.current.Clone(updatePath) - } else { - repo = NewGitDir(updatePath) - err = repo.Init() - } - if err != nil { - return err - } - - // Write the desired new state and commit. - if err = bundle.ExpandTo(updatePath); err != nil { - return err - } - if err = repo.WriteCharmURL(url); err != nil { - return err - } - if err = repo.Snapshotf("Imported charm %q.", url); err != nil { - return err - } - - // Atomically rename fresh repository to current. - tmplink := filepath.Join(updatePath, "tmplink") - if err = symlink.New(updatePath, tmplink); err != nil { - return err - } - return os.Rename(tmplink, d.current.Path()) -} - -func (d *gitDeployer) Deploy() (err error) { - defer func() { - if err == ErrConflict { - logger.Warningf("charm deployment completed with conflicts") - } else if err != nil { - err = fmt.Errorf("charm deployment failed: %s", err) - logger.Errorf("%v", err) - } else { - logger.Infof("charm deployment succeeded") - } - }() - if exists, err := d.current.Exists(); err != nil { - return err - } else if !exists { - return fmt.Errorf("no charm set") - } - if exists, err := d.target.Exists(); err != nil { - return err - } else if !exists { - return d.install() - } - return d.upgrade() -} - -func (d *gitDeployer) NotifyRevert() error { - return d.target.Revert() -} - -func (d *gitDeployer) NotifyResolved() error { - return d.target.Snapshotf("Upgrade conflict resolved.") -} - -// install creates a new deployment of current, and atomically moves it to -// target. -func (d *gitDeployer) install() error { - defer collectGitOrphans(d.dataPath) - logger.Infof("preparing new charm deployment") - url, err := d.current.ReadCharmURL() - if err != nil { - return err - } - installPath, err := d.newDir(gitInstallPrefix) - if err != nil { - return err - } - repo := NewGitDir(installPath) - if err = repo.Init(); err != nil { - return err - } - if err = repo.Pull(d.current); err != nil { - return err - } - if err = repo.Snapshotf("Deployed charm %q.", url); err != nil { - return err - } - logger.Infof("deploying charm") - return os.Rename(installPath, d.target.Path()) -} - -// upgrade pulls from current into target. If target has local changes, but -// no conflicts, it will be snapshotted before any changes are made. -func (d *gitDeployer) upgrade() error { - logger.Infof("preparing charm upgrade") - url, err := d.current.ReadCharmURL() - if err != nil { - return err - } - if err := d.target.Init(); err != nil { - return err - } - if dirty, err := d.target.Dirty(); err != nil { - return err - } else if dirty { - if conflicted, err := d.target.Conflicted(); err != nil { - return err - } else if !conflicted { - logger.Infof("snapshotting dirty charm before upgrade") - if err = d.target.Snapshotf("Pre-upgrade snapshot."); err != nil { - return err - } - } - } - logger.Infof("deploying charm") - if err := d.target.Pull(d.current); err != nil { - return err - } - return d.target.Snapshotf("Upgraded charm to %q.", url) -} - -// collectGitOrphans deletes all repos in dataPath except the one pointed to by -// a git deployer's "current" symlink. -// Errors are generally ignored; some are logged. If current does not exist, *all* -// repos are orphans, and all will be deleted; this should only be the case when -// converting a gitDeployer to a manifestDeployer. -func collectGitOrphans(dataPath string) { - current, err := symlink.Read(filepath.Join(dataPath, gitCurrentPath)) - if os.IsNotExist(err) { - logger.Debugf("no current staging repo") - } else if err != nil { - logger.Warningf("cannot read current staging repo: %v", err) - return - } else if !filepath.IsAbs(current) { - current = filepath.Join(dataPath, current) - } - orphans, err := filepath.Glob(filepath.Join(dataPath, fmt.Sprintf("%s*", gitUpdatePrefix))) - if err != nil { - return - } - installOrphans, err := filepath.Glob(filepath.Join(dataPath, fmt.Sprintf("%s*", gitInstallPrefix))) - if err != nil { - return - } - orphans = append(orphans, installOrphans...) - for _, repoPath := range orphans { - if repoPath != dataPath && repoPath != current { - if err = os.RemoveAll(repoPath); err != nil { - logger.Warningf("failed to remove orphan repo at %s: %s", repoPath, err) - } - } - } -} - -// newDir creates a new timestamped directory with the given prefix. It -// assumes that the deployer will not need to create more than 10 -// directories in any given second. -func (d *gitDeployer) newDir(prefix string) (string, error) { - // TODO(fwereade): 2016-03-17 lp:1558657 - return ioutil.TempDir(d.dataPath, prefix+time.Now().Format("20060102-150405")) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/git_deployer_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/git_deployer_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/git_deployer_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/git_deployer_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,220 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "io/ioutil" - "path/filepath" - - jc "github.com/juju/testing/checkers" - "github.com/juju/utils/symlink" - gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v6-unstable" - - "github.com/juju/juju/testing" - "github.com/juju/juju/worker/uniter/charm" -) - -type GitDeployerSuite struct { - testing.GitSuite - bundles *bundleReader - targetPath string - deployer charm.Deployer -} - -var _ = gc.Suite(&GitDeployerSuite{}) - -func (s *GitDeployerSuite) SetUpTest(c *gc.C) { - testing.SkipIfGitNotAvailable(c) - s.GitSuite.SetUpTest(c) - s.bundles = &bundleReader{} - s.targetPath = filepath.Join(c.MkDir(), "target") - deployerPath := filepath.Join(c.MkDir(), "deployer") - s.deployer = charm.NewGitDeployer(s.targetPath, deployerPath, s.bundles) -} - -func (s *GitDeployerSuite) TestUnsetCharm(c *gc.C) { - err := s.deployer.Deploy() - c.Assert(err, gc.ErrorMatches, "charm deployment failed: no charm set") -} - -func (s *GitDeployerSuite) TestInstall(c *gc.C) { - // Prepare. - info := s.bundles.AddCustomBundle(c, corecharm.MustParseURL("cs:s/c-1"), func(path string) { - err := ioutil.WriteFile(filepath.Join(path, "some-file"), []byte("hello"), 0644) - c.Assert(err, jc.ErrorIsNil) - }) - err := s.deployer.Stage(info, nil) - c.Assert(err, jc.ErrorIsNil) - checkCleanup(c, s.deployer) - - // Install. - err = s.deployer.Deploy() - c.Assert(err, jc.ErrorIsNil) - checkCleanup(c, s.deployer) - - // Check content. - data, err := ioutil.ReadFile(filepath.Join(s.targetPath, "some-file")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Equals, "hello") - - target := charm.NewGitDir(s.targetPath) - url, err := target.ReadCharmURL() - c.Assert(err, jc.ErrorIsNil) - c.Assert(url, gc.DeepEquals, corecharm.MustParseURL("cs:s/c-1")) - lines, err := target.Log() - c.Assert(err, jc.ErrorIsNil) - c.Assert(lines, gc.HasLen, 2) - c.Assert(lines[0], gc.Matches, `[0-9a-f]{7} Deployed charm "cs:s/c-1"\.`) - c.Assert(lines[1], gc.Matches, `[0-9a-f]{7} Imported charm "cs:s/c-1"\.`) -} - -func (s *GitDeployerSuite) TestUpgrade(c *gc.C) { - // Install. - info1 := s.bundles.AddCustomBundle(c, corecharm.MustParseURL("cs:s/c-1"), func(path string) { - err := ioutil.WriteFile(filepath.Join(path, "some-file"), []byte("hello"), 0644) - c.Assert(err, jc.ErrorIsNil) - err = symlink.New("./some-file", filepath.Join(path, "a-symlink")) - c.Assert(err, jc.ErrorIsNil) - }) - err := s.deployer.Stage(info1, nil) - c.Assert(err, jc.ErrorIsNil) - err = s.deployer.Deploy() - c.Assert(err, jc.ErrorIsNil) - - // Upgrade. - info2 := s.bundles.AddCustomBundle(c, corecharm.MustParseURL("cs:s/c-2"), func(path string) { - err := ioutil.WriteFile(filepath.Join(path, "some-file"), []byte("goodbye"), 0644) - c.Assert(err, jc.ErrorIsNil) - err = ioutil.WriteFile(filepath.Join(path, "a-symlink"), []byte("not any more!"), 0644) - c.Assert(err, jc.ErrorIsNil) - }) - err = s.deployer.Stage(info2, nil) - c.Assert(err, jc.ErrorIsNil) - checkCleanup(c, s.deployer) - err = s.deployer.Deploy() - c.Assert(err, jc.ErrorIsNil) - checkCleanup(c, s.deployer) - - // Check content. - data, err := ioutil.ReadFile(filepath.Join(s.targetPath, "some-file")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Equals, "goodbye") - data, err = ioutil.ReadFile(filepath.Join(s.targetPath, "a-symlink")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Equals, "not any more!") - - target := charm.NewGitDir(s.targetPath) - url, err := target.ReadCharmURL() - c.Assert(err, jc.ErrorIsNil) - c.Assert(url, gc.DeepEquals, corecharm.MustParseURL("cs:s/c-2")) - lines, err := target.Log() - c.Assert(err, jc.ErrorIsNil) - c.Assert(lines, gc.HasLen, 5) - c.Assert(lines[0], gc.Matches, `[0-9a-f]{7} Upgraded charm to "cs:s/c-2".`) -} - -func (s *GitDeployerSuite) TestConflictRevertResolve(c *gc.C) { - // Install. - info1 := s.bundles.AddCustomBundle(c, corecharm.MustParseURL("cs:s/c-1"), func(path string) { - err := ioutil.WriteFile(filepath.Join(path, "some-file"), []byte("hello"), 0644) - c.Assert(err, jc.ErrorIsNil) - }) - err := s.deployer.Stage(info1, nil) - c.Assert(err, jc.ErrorIsNil) - err = s.deployer.Deploy() - c.Assert(err, jc.ErrorIsNil) - - // Mess up target. - err = ioutil.WriteFile(filepath.Join(s.targetPath, "some-file"), []byte("mu!"), 0644) - c.Assert(err, jc.ErrorIsNil) - - // Upgrade. - info2 := s.bundles.AddCustomBundle(c, corecharm.MustParseURL("cs:s/c-2"), func(path string) { - err := ioutil.WriteFile(filepath.Join(path, "some-file"), []byte("goodbye"), 0644) - c.Assert(err, jc.ErrorIsNil) - }) - err = s.deployer.Stage(info2, nil) - c.Assert(err, jc.ErrorIsNil) - err = s.deployer.Deploy() - c.Assert(err, gc.Equals, charm.ErrConflict) - checkCleanup(c, s.deployer) - - // Check state. - target := charm.NewGitDir(s.targetPath) - conflicted, err := target.Conflicted() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conflicted, jc.IsTrue) - - // Revert and check initial content. - err = s.deployer.NotifyRevert() - c.Assert(err, jc.ErrorIsNil) - data, err := ioutil.ReadFile(filepath.Join(s.targetPath, "some-file")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Equals, "mu!") - conflicted, err = target.Conflicted() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conflicted, jc.IsFalse) - - // Try to upgrade again. - err = s.deployer.Deploy() - c.Assert(err, gc.Equals, charm.ErrConflict) - conflicted, err = target.Conflicted() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conflicted, jc.IsTrue) - checkCleanup(c, s.deployer) - - // And again. - err = s.deployer.Deploy() - c.Assert(err, gc.Equals, charm.ErrConflict) - conflicted, err = target.Conflicted() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conflicted, jc.IsTrue) - checkCleanup(c, s.deployer) - - // Manually resolve, and commit. - err = ioutil.WriteFile(filepath.Join(target.Path(), "some-file"), []byte("nu!"), 0644) - c.Assert(err, jc.ErrorIsNil) - err = s.deployer.NotifyResolved() - c.Assert(err, jc.ErrorIsNil) - conflicted, err = target.Conflicted() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conflicted, jc.IsFalse) - - // Try a final upgrade to the same charm and check it doesn't write anything - // except the upgrade log line. - err = s.deployer.Deploy() - c.Assert(err, jc.ErrorIsNil) - checkCleanup(c, s.deployer) - - data, err = ioutil.ReadFile(filepath.Join(target.Path(), "some-file")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Equals, "nu!") - conflicted, err = target.Conflicted() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conflicted, jc.IsFalse) - lines, err := target.Log() - c.Assert(err, jc.ErrorIsNil) - c.Assert(lines[0], gc.Matches, `[0-9a-f]{7} Upgraded charm to "cs:s/c-2".`) -} - -func checkCleanup(c *gc.C, d charm.Deployer) { - // Only one update dir should exist and be pointed to by the 'current' - // symlink since extra ones should have been cleaned up by - // cleanupOrphans. - deployerPath := charm.GitDeployerDataPath(d) - updateDirs, err := filepath.Glob(filepath.Join(deployerPath, "update-*")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(updateDirs, gc.HasLen, 1) - deployerCurrent := charm.GitDeployerCurrent(d) - current, err := symlink.Read(deployerCurrent.Path()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(updateDirs[0], gc.Equals, current) - - // No install dirs should be left behind since the one created is - // renamed to the target path. - installDirs, err := filepath.Glob(filepath.Join(deployerPath, "install-*")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(installDirs, gc.HasLen, 0) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/git.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/git.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/git.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/git.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,241 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charm - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - - "gopkg.in/juju/charm.v6-unstable" -) - -// GitDir exposes a specialized subset of git operations on a directory. -type GitDir struct { - path string -} - -// NewGitDir creates a new GitDir at path. It does not touch the filesystem. -func NewGitDir(path string) *GitDir { - return &GitDir{path} -} - -// Path returns the directory path. -func (d *GitDir) Path() string { - return d.path -} - -// Exists returns true if the directory exists. -func (d *GitDir) Exists() (bool, error) { - fi, err := os.Stat(d.path) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - if fi.IsDir() { - return true, nil - } - return false, fmt.Errorf("%q is not a directory", d.path) -} - -// Init ensures that a git repository exists in the directory. -func (d *GitDir) Init() error { - if err := os.MkdirAll(d.path, 0755); err != nil { - return err - } - commands := [][]string{ - {"init"}, - {"config", "user.email", "juju@localhost"}, - {"config", "user.name", "juju"}, - } - for _, args := range commands { - if err := d.cmd(args...); err != nil { - return err - } - } - return nil -} - -// AddAll ensures that the next commit will reflect the current contents of -// the directory. Empty directories will be preserved by inserting and tracking -// empty files named .empty. -func (d *GitDir) AddAll() error { - walker := func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - if !fi.IsDir() { - return nil - } - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() - if _, err := f.Readdir(1); err != nil { - if err == io.EOF { - empty := filepath.Join(path, ".empty") - return ioutil.WriteFile(empty, nil, 0644) - } - return err - } - return nil - } - if err := filepath.Walk(d.path, walker); err != nil { - return err - } - - // special handling for addall, since there is an error condition that - // we need to suppress - return d.addAll() -} - -// addAll runs "git add -A ."" and swallows errors about no matching files. This -// is to replicate the behavior of older versions of git that returned no error -// in that situation. -func (d *GitDir) addAll() error { - args := []string{"add", "-A", "."} - cmd := exec.Command("git", args...) - cmd.Dir = d.path - if out, err := cmd.CombinedOutput(); err != nil { - output := string(out) - // Swallow this specific error. It's a change in behavior from older - // versions of git, and we want AddAll to be able to be used on empty - // directories. - if !strings.Contains(output, "pathspec '.' did not match any files") { - return d.logError(err, string(out), args...) - } - } - return nil -} - -// Commitf commits a new revision to the repository with the supplied message. -func (d *GitDir) Commitf(format string, args ...interface{}) error { - return d.cmd("commit", "--allow-empty", "-m", fmt.Sprintf(format, args...)) -} - -// Snapshotf adds all changes made since the last commit, including deletions -// and empty directories, and commits them using the supplied message. -func (d *GitDir) Snapshotf(format string, args ...interface{}) error { - if err := d.AddAll(); err != nil { - return err - } - return d.Commitf(format, args...) -} - -// Clone creates a new GitDir at the specified path, with history cloned -// from the existing GitDir. It does not check out any files. -func (d *GitDir) Clone(path string) (*GitDir, error) { - if err := d.cmd("clone", "--no-checkout", ".", path); err != nil { - return nil, err - } - return &GitDir{path}, nil -} - -// Pull pulls from the supplied GitDir. -func (d *GitDir) Pull(src *GitDir) error { - err := d.cmd("pull", src.path) - if err != nil { - if conflicted, e := d.Conflicted(); e == nil && conflicted { - return ErrConflict - } - } - return err -} - -// Dirty returns true if the directory contains any uncommitted local changes. -func (d *GitDir) Dirty() (bool, error) { - statuses, err := d.statuses() - if err != nil { - return false, err - } - return len(statuses) != 0, nil -} - -// Conflicted returns true if the directory contains any conflicts. -func (d *GitDir) Conflicted() (bool, error) { - statuses, err := d.statuses() - if err != nil { - return false, err - } - for _, st := range statuses { - switch st { - case "AA", "DD", "UU", "AU", "UA", "DU", "UD": - return true, nil - } - } - return false, nil -} - -// Revert removes unversioned files and reverts everything else to its state -// as of the most recent commit. -func (d *GitDir) Revert() error { - if err := d.cmd("reset", "--hard", "ORIG_HEAD"); err != nil { - return err - } - return d.cmd("clean", "-f", "-f", "-d") -} - -// Log returns a highly compacted history of the directory. -func (d *GitDir) Log() ([]string, error) { - cmd := exec.Command("git", "--no-pager", "log", "--oneline") - cmd.Dir = d.path - out, err := cmd.Output() - if err != nil { - return nil, err - } - trim := strings.TrimRight(string(out), "\n") - return strings.Split(trim, "\n"), nil -} - -// cmd runs the specified command inside the directory. Errors will be logged -// in detail. -func (d *GitDir) cmd(args ...string) error { - cmd := exec.Command("git", args...) - cmd.Dir = d.path - if out, err := cmd.CombinedOutput(); err != nil { - return d.logError(err, string(out), args...) - } - return nil -} - -func (d *GitDir) logError(err error, output string, args ...string) error { - logger.Errorf("git command failed: %s\npath: %s\nargs: %#v\n%s", - err, d.path, args, output) - return fmt.Errorf("git %s failed: %s", args[0], err) -} - -// statuses returns a list of XY-coded git statuses for the files in the directory. -func (d *GitDir) statuses() ([]string, error) { - cmd := exec.Command("git", "status", "--porcelain") - cmd.Dir = d.path - out, err := cmd.Output() - if err != nil { - return nil, fmt.Errorf("git status failed: %v", err) - } - statuses := []string{} - for _, line := range strings.Split(string(out), "\n") { - if line != "" { - statuses = append(statuses, line[:2]) - } - } - return statuses, nil -} - -// ReadCharmURL reads the charm identity file from the GitDir. -func (d *GitDir) ReadCharmURL() (*charm.URL, error) { - path := filepath.Join(d.path, CharmURLPath) - return ReadCharmURL(path) -} - -// WriteCharmURL writes the charm identity file into the GitDir. -func (d *GitDir) WriteCharmURL(url *charm.URL) error { - return WriteCharmURL(filepath.Join(d.path, CharmURLPath), url) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/git_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/git_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/git_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/git_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,229 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// +build !windows - -package charm_test - -import ( - "io/ioutil" - "os" - "os/exec" - "path/filepath" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v6-unstable" - - "github.com/juju/juju/testing" - "github.com/juju/juju/worker/uniter/charm" -) - -var curl = corecharm.MustParseURL("cs:series/blah-blah-123") - -type GitDirSuite struct { - testing.GitSuite -} - -var _ = gc.Suite(&GitDirSuite{}) - -func (s *GitDirSuite) TestInitConfig(c *gc.C) { - base := c.MkDir() - repo := charm.NewGitDir(filepath.Join(base, "repo")) - err := repo.Init() - c.Assert(err, jc.ErrorIsNil) - - cmd := exec.Command("git", "config", "--list", "--local") - cmd.Dir = repo.Path() - out, err := cmd.Output() - c.Assert(err, jc.ErrorIsNil) - outstr := string(out) - c.Assert(outstr, jc.Contains, "user.email=juju@localhost") - c.Assert(outstr, jc.Contains, "user.name=juju") -} - -func (s *GitDirSuite) TestCreate(c *gc.C) { - base := c.MkDir() - repo := charm.NewGitDir(filepath.Join(base, "repo")) - exists, err := repo.Exists() - c.Assert(err, jc.ErrorIsNil) - c.Assert(exists, jc.IsFalse) - - err = ioutil.WriteFile(repo.Path(), nil, 0644) - c.Assert(err, jc.ErrorIsNil) - _, err = repo.Exists() - c.Assert(err, gc.ErrorMatches, `".*/repo" is not a directory`) - err = os.Remove(repo.Path()) - c.Assert(err, jc.ErrorIsNil) - - err = os.Chmod(base, 0555) - c.Assert(err, jc.ErrorIsNil) - defer os.Chmod(base, 0755) - err = repo.Init() - c.Assert(err, gc.ErrorMatches, ".* permission denied") - exists, err = repo.Exists() - c.Assert(err, jc.ErrorIsNil) - c.Assert(exists, jc.IsFalse) - - err = os.Chmod(base, 0755) - c.Assert(err, jc.ErrorIsNil) - err = repo.Init() - c.Assert(err, jc.ErrorIsNil) - exists, err = repo.Exists() - c.Assert(err, jc.ErrorIsNil) - c.Assert(exists, jc.IsTrue) - - _, err = repo.ReadCharmURL() - c.Assert(err, jc.Satisfies, os.IsNotExist) - - err = repo.Init() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *GitDirSuite) TestAddCommitPullRevert(c *gc.C) { - target := charm.NewGitDir(c.MkDir()) - err := target.Init() - c.Assert(err, jc.ErrorIsNil) - err = ioutil.WriteFile(filepath.Join(target.Path(), "initial"), []byte("initial"), 0644) - c.Assert(err, jc.ErrorIsNil) - err = target.WriteCharmURL(curl) - c.Assert(err, jc.ErrorIsNil) - err = target.AddAll() - c.Assert(err, jc.ErrorIsNil) - dirty, err := target.Dirty() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsTrue) - err = target.Commitf("initial") - c.Assert(err, jc.ErrorIsNil) - dirty, err = target.Dirty() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsFalse) - - source := newRepo(c) - err = target.Pull(source) - c.Assert(err, jc.ErrorIsNil) - url, err := target.ReadCharmURL() - c.Assert(err, jc.ErrorIsNil) - c.Assert(url, gc.DeepEquals, curl) - fi, err := os.Stat(filepath.Join(target.Path(), "some-dir")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(fi, jc.Satisfies, os.FileInfo.IsDir) - data, err := ioutil.ReadFile(filepath.Join(target.Path(), "some-file")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Equals, "hello") - dirty, err = target.Dirty() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsFalse) - - err = ioutil.WriteFile(filepath.Join(target.Path(), "another-file"), []byte("blah"), 0644) - c.Assert(err, jc.ErrorIsNil) - dirty, err = target.Dirty() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsTrue) - err = source.AddAll() - c.Assert(err, jc.ErrorIsNil) - dirty, err = target.Dirty() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsTrue) - - err = target.Revert() - c.Assert(err, jc.ErrorIsNil) - _, err = os.Stat(filepath.Join(target.Path(), "some-file")) - c.Assert(err, jc.Satisfies, os.IsNotExist) - _, err = os.Stat(filepath.Join(target.Path(), "some-dir")) - c.Assert(err, jc.Satisfies, os.IsNotExist) - data, err = ioutil.ReadFile(filepath.Join(target.Path(), "initial")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Equals, "initial") - dirty, err = target.Dirty() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsFalse) -} - -func (s *GitDirSuite) TestClone(c *gc.C) { - repo, err := newRepo(c).Clone(c.MkDir()) - c.Assert(err, jc.ErrorIsNil) - _, err = os.Stat(filepath.Join(repo.Path(), "some-file")) - c.Assert(err, jc.Satisfies, os.IsNotExist) - _, err = os.Stat(filepath.Join(repo.Path(), "some-dir")) - c.Assert(err, jc.Satisfies, os.IsNotExist) - dirty, err := repo.Dirty() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsTrue) - - err = repo.AddAll() - c.Assert(err, jc.ErrorIsNil) - dirty, err = repo.Dirty() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsTrue) - err = repo.Commitf("blank overwrite") - c.Assert(err, jc.ErrorIsNil) - dirty, err = repo.Dirty() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsFalse) - - lines, err := repo.Log() - c.Assert(err, jc.ErrorIsNil) - c.Assert(lines, gc.HasLen, 2) - c.Assert(lines[0], gc.Matches, "[a-f0-9]{7} blank overwrite") - c.Assert(lines[1], gc.Matches, "[a-f0-9]{7} im in ur repo committin ur files") -} - -func (s *GitDirSuite) TestConflictRevert(c *gc.C) { - source := newRepo(c) - updated, err := source.Clone(c.MkDir()) - c.Assert(err, jc.ErrorIsNil) - err = ioutil.WriteFile(filepath.Join(updated.Path(), "some-dir"), []byte("hello"), 0644) - c.Assert(err, jc.ErrorIsNil) - err = updated.Snapshotf("potential conflict src") - c.Assert(err, jc.ErrorIsNil) - conflicted, err := updated.Conflicted() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conflicted, jc.IsFalse) - - target := charm.NewGitDir(c.MkDir()) - err = target.Init() - c.Assert(err, jc.ErrorIsNil) - err = target.Pull(source) - c.Assert(err, jc.ErrorIsNil) - err = ioutil.WriteFile(filepath.Join(target.Path(), "some-dir", "conflicting-file"), []byte("hello"), 0644) - c.Assert(err, jc.ErrorIsNil) - err = target.Snapshotf("potential conflict dst") - c.Assert(err, jc.ErrorIsNil) - conflicted, err = target.Conflicted() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conflicted, jc.IsFalse) - - err = target.Pull(updated) - c.Assert(err, gc.Equals, charm.ErrConflict) - conflicted, err = target.Conflicted() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conflicted, jc.IsTrue) - dirty, err := target.Dirty() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsTrue) - - err = target.Revert() - c.Assert(err, jc.ErrorIsNil) - conflicted, err = target.Conflicted() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conflicted, jc.IsFalse) - dirty, err = target.Dirty() - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirty, jc.IsFalse) -} - -func newRepo(c *gc.C) *charm.GitDir { - repo := charm.NewGitDir(c.MkDir()) - err := repo.Init() - c.Assert(err, jc.ErrorIsNil) - err = os.Mkdir(filepath.Join(repo.Path(), "some-dir"), 0755) - c.Assert(err, jc.ErrorIsNil) - err = ioutil.WriteFile(filepath.Join(repo.Path(), "some-file"), []byte("hello"), 0644) - c.Assert(err, jc.ErrorIsNil) - err = repo.AddAll() - c.Assert(err, jc.ErrorIsNil) - err = repo.Commitf("im in ur repo committin ur %s", "files") - c.Assert(err, jc.ErrorIsNil) - return repo -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/manifest_deployer.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/manifest_deployer.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/manifest_deployer.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/manifest_deployer.go 2016-10-13 14:31:49.000000000 +0000 @@ -108,19 +108,6 @@ return d.finishDeploy() } -func (d *manifestDeployer) NotifyResolved() error { - // Maybe it is resolved, maybe not. We'll find out soon enough, but we - // don't need to take any action now; if it's not, we'll just ErrConflict - // out of Deploy again. - return nil -} - -func (d *manifestDeployer) NotifyRevert() error { - // The Deploy implementation always effectively reverts when required - // anyway, so we need take no action right now. - return nil -} - // startDeploy persists the fact that we've started deploying the staged bundle. func (d *manifestDeployer) startDeploy() error { logger.Debugf("preparing to deploy charm %q", d.staged.url) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/manifest_deployer_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/manifest_deployer_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/charm/manifest_deployer_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/charm/manifest_deployer_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -211,8 +211,6 @@ // ...and we want to verify that if we "fix the errors" and redeploy the // same charm... failDeploy = false - err = s.deployer.NotifyResolved() - c.Assert(err, jc.ErrorIsNil) err = s.deployer.Deploy() c.Assert(err, jc.ErrorIsNil) @@ -248,11 +246,6 @@ err = s.deployer.Deploy() c.Assert(err, gc.Equals, charm.ErrConflict) - // Notify the Deployer that it'll be expected to revert the changes from - // the last attempt. - err = s.deployer.NotifyRevert() - c.Assert(err, jc.ErrorIsNil) - // Create a charm upgrade that creates a bunch of different files, without // error, and deploy it; check user files are preserved, and nothing from // charm 1 or 2 is. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/deployer.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/deployer.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/deployer.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/deployer.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -// Copyright 2012-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter - -import ( - "github.com/juju/errors" - - "github.com/juju/juju/worker/uniter/charm" -) - -// deployerProxy exists because we're not yet comfortable that we can safely -// drop support for charm.gitDeployer. If we can, then the uniter doesn't -// need a deployer reference at all: and we can drop the Fix method, and even -// the Notify* methods on the Deployer interface, and simply hand the -// deployer we create over to the operationFactory at creation and forget -// about it. But. -// -// We will never be *completely* certain that gitDeployer can be dropped, -// because it's not done as an upgrade step (because we can't replace the -// deployer while conflicted, and upgrades are not gated on no-conflicts); -// and so long as there's a reasonable possibility that someone *might* have -// been running a pre-1.19.1 environment, and have either upgraded directly -// in a conflict state *or* have upgraded stepwise without fixing a conflict -// state, we should keep this complexity. -// -// In practice, that possibility is growing ever more remote, but we're not -// ready to pull the trigger yet. -type deployerProxy struct { - charm.Deployer -} - -// Fix replaces a git-based charm deployer with a manifest-based one, if -// necessary. It should not be called unless the existing charm deployment -// is known to be in a stable state. -func (d *deployerProxy) Fix() error { - if err := charm.FixDeployer(&d.Deployer); err != nil { - return errors.Annotatef(err, "cannot convert git deployment to manifest deployment") - } - return nil -} - -// NotifyRevert is part of the charm.Deployer interface. -func (d *deployerProxy) NotifyRevert() error { - if err := d.Deployer.NotifyRevert(); err != nil { - return err - } - // Now we've reverted, we can guarantee that the deployer is in a sane state; - // it's a great time to replace the git deployer (if we're still using it). - return d.Fix() -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/hook/hooktesting/source.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/hook/hooktesting/source.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/hook/hooktesting/source.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/hook/hooktesting/source.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "gopkg.in/juju/charm.v6-unstable/hooks" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/worker/uniter/hook" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/hook/peeker.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/hook/peeker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/hook/peeker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/hook/peeker.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state/watcher" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/hook/sender.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/hook/sender.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/hook/sender.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/hook/sender.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "github.com/juju/errors" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/state/watcher" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/op_callbacks.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/op_callbacks.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/op_callbacks.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/op_callbacks.go 2016-10-13 14:31:49.000000000 +0000 @@ -120,5 +120,5 @@ // SetExecutingStatus is part of the operation.Callbacks interface. func (opc *operationCallbacks) SetExecutingStatus(message string) error { - return setAgentStatus(opc.u, status.StatusExecuting, message, nil) + return setAgentStatus(opc.u, status.Executing, message, nil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/deploy.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/deploy.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/deploy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/deploy.go 2016-10-13 14:31:49.000000000 +0000 @@ -51,16 +51,6 @@ if err := d.checkAlreadyDone(state); err != nil { return nil, errors.Trace(err) } - if d.revert { - if err := d.deployer.NotifyRevert(); err != nil { - return nil, errors.Trace(err) - } - } - if d.resolved { - if err := d.deployer.NotifyResolved(); err != nil { - return nil, errors.Trace(err) - } - } info, err := d.callbacks.GetArchiveInfo(d.charmURL) if err != nil { return nil, errors.Trace(err) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/deploy_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/deploy_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/deploy_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/deploy_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -68,38 +68,6 @@ ) } -func (s *DeploySuite) testNotifyDeployerError( - c *gc.C, newDeploy newDeploy, expectNotifyRevert bool, -) { - callbacks := &DeployCallbacks{} - deployer := &MockDeployer{} - expectCall := &MockNoArgs{err: errors.New("snh")} - if expectNotifyRevert { - deployer.MockNotifyRevert = expectCall - } else { - deployer.MockNotifyResolved = expectCall - } - factory := operation.NewFactory(operation.FactoryParams{ - Callbacks: callbacks, - Deployer: deployer, - }) - op, err := newDeploy(factory, curl("cs:quantal/hive-23")) - c.Assert(err, jc.ErrorIsNil) - - newState, err := op.Prepare(operation.State{}) - c.Check(newState, gc.IsNil) - c.Check(err, gc.ErrorMatches, "snh") - c.Check(expectCall.called, jc.IsTrue) -} - -func (s *DeploySuite) TestNotifyDeployerError_RevertUpgrade(c *gc.C) { - s.testNotifyDeployerError(c, (operation.Factory).NewRevertUpgrade, true) -} - -func (s *DeploySuite) TestNotifyDeployerError_ResolvedUpgrade(c *gc.C) { - s.testNotifyDeployerError(c, (operation.Factory).NewResolvedUpgrade, false) -} - func (s *DeploySuite) testPrepareArchiveInfoError(c *gc.C, newDeploy newDeploy) { callbacks := &DeployCallbacks{ MockGetArchiveInfo: &MockGetArchiveInfo{err: errors.New("pew")}, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/factory.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/factory.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/factory.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/factory.go 2016-10-13 14:31:49.000000000 +0000 @@ -105,6 +105,17 @@ }, nil } +// NewFailAction is part of the factory interface. +func (f *factory) NewFailAction(actionId string) (Operation, error) { + if !names.IsValidAction(actionId) { + return nil, errors.Errorf("invalid action id %q", actionId) + } + return &failAction{ + actionId: actionId, + callbacks: f.config.Callbacks, + }, nil +} + // NewCommands is part of the Factory interface. func (f *factory) NewCommands(args CommandArgs, sendResponse CommandResponseFunc) (Operation, error) { if args.Commands == "" { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/failaction.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/failaction.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/failaction.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/failaction.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,54 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package operation + +import ( + "fmt" +) + +type failAction struct { + actionId string + callbacks Callbacks + name string + RequiresMachineLock +} + +// String is part of the Operation interface. +func (fa *failAction) String() string { + return fmt.Sprintf("fail action %s", fa.actionId) +} + +// Prepare is part of the Operation interface. +func (fa *failAction) Prepare(state State) (*State, error) { + return stateChange{ + Kind: RunAction, + Step: Pending, + ActionId: &fa.actionId, + Hook: state.Hook, + }.apply(state), nil +} + +// Execute is part of the Operation interface. +func (fa *failAction) Execute(state State) (*State, error) { + if err := fa.callbacks.FailAction(fa.actionId, "action terminated"); err != nil { + return nil, err + } + + return stateChange{ + Kind: RunAction, + Step: Done, + ActionId: &fa.actionId, + Hook: state.Hook, + }.apply(state), nil +} + +// Commit preserves the recorded hook, and returns a neutral state. +// Commit is part of the Operation interface. +func (fa *failAction) Commit(state State) (*State, error) { + return stateChange{ + Kind: continuationKind(state), + Step: Pending, + Hook: state.Hook, + }.apply(state), nil +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/failaction_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/failaction_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/failaction_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/failaction_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,160 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package operation_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" +) + +type FailActionSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&FailActionSuite{}) + +func (s *FailActionSuite) TestPrepare(c *gc.C) { + factory := operation.NewFactory(operation.FactoryParams{}) + op, err := factory.NewFailAction(someActionId) + c.Assert(err, jc.ErrorIsNil) + + newState, err := op.Prepare(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(newState, jc.DeepEquals, &operation.State{ + Kind: operation.RunAction, + Step: operation.Pending, + ActionId: &someActionId, + }) +} + +func (s *FailActionSuite) TestExecuteSuccess(c *gc.C) { + var stateChangeTests = []struct { + description string + before operation.State + after operation.State + }{{ + description: "empty state", + after: operation.State{ + Kind: operation.RunAction, + Step: operation.Done, + ActionId: &someActionId, + }, + }, { + description: "preserves appropriate fields", + before: overwriteState, + after: operation.State{ + Kind: operation.RunAction, + Step: operation.Done, + ActionId: &someActionId, + Hook: &hook.Info{Kind: hooks.Install}, + Started: true, + }, + }} + + for i, test := range stateChangeTests { + c.Logf("test %d: %s", i, test.description) + callbacks := &RunActionCallbacks{MockFailAction: &MockFailAction{}} + factory := operation.NewFactory(operation.FactoryParams{ + Callbacks: callbacks, + }) + op, err := factory.NewFailAction(someActionId) + c.Assert(err, jc.ErrorIsNil) + midState, err := op.Prepare(test.before) + c.Assert(midState, gc.NotNil) + c.Assert(err, jc.ErrorIsNil) + + newState, err := op.Execute(*midState) + c.Assert(err, jc.ErrorIsNil) + c.Assert(newState, jc.DeepEquals, &test.after) + c.Assert(*callbacks.MockFailAction.gotMessage, gc.Equals, "action terminated") + c.Assert(*callbacks.MockFailAction.gotActionId, gc.Equals, someActionId) + } +} + +func (s *FailActionSuite) TestExecuteFail(c *gc.C) { + st := operation.State{ + Kind: operation.RunAction, + Step: operation.Done, + ActionId: &someActionId, + } + callbacks := &RunActionCallbacks{MockFailAction: &MockFailAction{err: errors.New("squelch")}} + factory := operation.NewFactory(operation.FactoryParams{ + Callbacks: callbacks, + }) + op, err := factory.NewFailAction(someActionId) + c.Assert(err, jc.ErrorIsNil) + midState, err := op.Prepare(st) + c.Assert(midState, gc.NotNil) + c.Assert(err, jc.ErrorIsNil) + + _, err = op.Execute(*midState) + c.Assert(err, gc.ErrorMatches, "squelch") +} + +func (s *FailActionSuite) TestCommit(c *gc.C) { + var stateChangeTests = []struct { + description string + before operation.State + after operation.State + }{{ + description: "empty state", + after: operation.State{ + Kind: operation.Continue, + Step: operation.Pending, + }, + }, { + description: "preserves only appropriate fields, no hook", + before: operation.State{ + Kind: operation.Continue, + Step: operation.Pending, + Started: true, + CharmURL: curl("cs:quantal/wordpress-2"), + ActionId: &randomActionId, + }, + after: operation.State{ + Kind: operation.Continue, + Step: operation.Pending, + Started: true, + }, + }, { + description: "preserves only appropriate fields, with hook", + before: operation.State{ + Kind: operation.Continue, + Step: operation.Pending, + Started: true, + CharmURL: curl("cs:quantal/wordpress-2"), + ActionId: &randomActionId, + Hook: &hook.Info{Kind: hooks.Install}, + }, + after: operation.State{ + Kind: operation.RunHook, + Step: operation.Pending, + Hook: &hook.Info{Kind: hooks.Install}, + Started: true, + }, + }} + + for i, test := range stateChangeTests { + c.Logf("test %d: %s", i, test.description) + factory := operation.NewFactory(operation.FactoryParams{}) + op, err := factory.NewFailAction(someActionId) + c.Assert(err, jc.ErrorIsNil) + + newState, err := op.Commit(test.before) + c.Assert(newState, jc.DeepEquals, &test.after) + } +} + +func (s *FailActionSuite) TestNeedsGlobalMachineLock(c *gc.C) { + factory := operation.NewFactory(operation.FactoryParams{}) + op, err := factory.NewFailAction(someActionId) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op.NeedsGlobalMachineLock(), jc.IsTrue) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/interface.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/interface.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/interface.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/interface.go 2016-10-13 14:31:49.000000000 +0000 @@ -90,6 +90,9 @@ // NewAction creates an operation to execute the supplied action. NewAction(actionId string) (Operation, error) + // NewFailAction creates an operation that marks an action as failed. + NewFailAction(actionId string) (Operation, error) + // NewCommands creates an operation to execute the supplied script in the // indicated relation context, and pass the results back over the supplied // func. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/runhook.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/runhook.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/runhook.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/runhook.go 2016-10-13 14:31:49.000000000 +0000 @@ -134,12 +134,12 @@ switch rh.info.Kind { case hooks.Install: err = rh.runner.Context().SetUnitStatus(jujuc.StatusInfo{ - Status: string(status.StatusMaintenance), - Info: "installing charm software", + Status: string(status.Maintenance), + Info: status.MessageInstallingCharm, }) case hooks.Stop: err = rh.runner.Context().SetUnitStatus(jujuc.StatusInfo{ - Status: string(status.StatusMaintenance), + Status: string(status.Maintenance), Info: "cleaning up prior to charm deletion", }) } @@ -161,7 +161,7 @@ case hooks.Stop: // Charm is no longer of this world. err = rh.runner.Context().SetUnitStatus(jujuc.StatusInfo{ - Status: string(status.StatusTerminated), + Status: string(status.Terminated), }) case hooks.Start: if hasRunStatusSet { @@ -171,7 +171,7 @@ // We've finished the start hook and the charm has not updated its // own status so we'll set it to unknown. err = rh.runner.Context().SetUnitStatus(jujuc.StatusInfo{ - Status: string(status.StatusUnknown), + Status: string(status.Unknown), }) } if err != nil { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/util_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/util_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/operation/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/operation/util_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -100,14 +100,6 @@ return d.MockDeploy.Call() } -func (d *MockDeployer) NotifyRevert() error { - return d.MockNotifyRevert.Call() -} - -func (d *MockDeployer) NotifyResolved() error { - return d.MockNotifyResolved.Call() -} - type MockFailAction struct { gotActionId *string gotMessage *string diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/relation/relations_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/relation/relations_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/relation/relations_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/relation/relations_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -59,7 +59,7 @@ err error } -func uniterApiCall(request string, args, result interface{}, err error) apiCall { +func uniterAPICall(request string, args, result interface{}, err error) apiCall { return apiCall{ request: request, args: args, @@ -123,8 +123,8 @@ var numCalls int32 unitEntity := params.Entities{Entities: []params.Entity{params.Entity{Tag: "unit-wordpress-0"}}} apiCaller := mockAPICaller(c, &numCalls, - uniterApiCall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), - uniterApiCall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), + uniterAPICall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), + uniterAPICall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), ) st := uniter.NewState(apiCaller, unitTag) r, err := relation.NewRelations(st, unitTag, s.stateDir, s.relationsDir, abort) @@ -162,12 +162,12 @@ } apiCaller := mockAPICaller(c, &numCalls, - uniterApiCall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), - uniterApiCall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{"relation-wordpress:db mysql:db"}}}}, nil), - uniterApiCall("Relation", relationUnits, relationResults, nil), - uniterApiCall("Relation", relationUnits, relationResults, nil), - uniterApiCall("Watch", unitEntity, params.NotifyWatchResults{Results: []params.NotifyWatchResult{{NotifyWatcherId: "1"}}}, nil), - uniterApiCall("EnterScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), + uniterAPICall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), + uniterAPICall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{"relation-wordpress:db mysql:db"}}}}, nil), + uniterAPICall("Relation", relationUnits, relationResults, nil), + uniterAPICall("Relation", relationUnits, relationResults, nil), + uniterAPICall("Watch", unitEntity, params.NotifyWatchResults{Results: []params.NotifyWatchResult{{NotifyWatcherId: "1"}}}, nil), + uniterAPICall("EnterScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), ) st := uniter.NewState(apiCaller, unitTag) r, err := relation.NewRelations(st, unitTag, s.stateDir, s.relationsDir, abort) @@ -191,9 +191,9 @@ var numCalls int32 unitEntity := params.Entities{Entities: []params.Entity{params.Entity{Tag: "unit-wordpress-0"}}} apiCaller := mockAPICaller(c, &numCalls, - uniterApiCall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), - uniterApiCall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), - uniterApiCall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil), + uniterAPICall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), + uniterAPICall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), + uniterAPICall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil), ) st := uniter.NewState(apiCaller, unitTag) r, err := relation.NewRelations(st, unitTag, s.stateDir, s.relationsDir, abort) @@ -211,7 +211,7 @@ c.Assert(errors.Cause(err), gc.Equals, resolver.ErrNoOperation) } -func relationJoinedApiCalls() []apiCall { +func relationJoinedAPICalls() []apiCall { unitEntity := params.Entities{Entities: []params.Entity{params.Entity{Tag: "unit-wordpress-0"}}} relationResults := params.RelationResults{ Results: []params.RelationResult{ @@ -229,14 +229,14 @@ {Relation: "relation-wordpress.db#mysql.db", Unit: "unit-wordpress-0"}, }} apiCalls := []apiCall{ - uniterApiCall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), - uniterApiCall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), - uniterApiCall("RelationById", params.RelationIds{RelationIds: []int{1}}, relationResults, nil), - uniterApiCall("Relation", relationUnits, relationResults, nil), - uniterApiCall("Relation", relationUnits, relationResults, nil), - uniterApiCall("Watch", unitEntity, params.NotifyWatchResults{Results: []params.NotifyWatchResult{{NotifyWatcherId: "1"}}}, nil), - uniterApiCall("EnterScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), - uniterApiCall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil), + uniterAPICall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), + uniterAPICall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), + uniterAPICall("RelationById", params.RelationIds{RelationIds: []int{1}}, relationResults, nil), + uniterAPICall("Relation", relationUnits, relationResults, nil), + uniterAPICall("Relation", relationUnits, relationResults, nil), + uniterAPICall("Watch", unitEntity, params.NotifyWatchResults{Results: []params.NotifyWatchResult{{NotifyWatcherId: "1"}}}, nil), + uniterAPICall("EnterScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), + uniterAPICall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil), } return apiCalls } @@ -282,7 +282,7 @@ func (s *relationsSuite) TestHookRelationJoined(c *gc.C) { var numCalls int32 - s.assertHookRelationJoined(c, &numCalls, relationJoinedApiCalls()...) + s.assertHookRelationJoined(c, &numCalls, relationJoinedAPICalls()...) } func (s *relationsSuite) assertHookRelationChanged( @@ -314,19 +314,19 @@ c.Assert(err, jc.ErrorIsNil) } -func getPrincipalApiCalls(numCalls int32) []apiCall { +func getPrincipalAPICalls(numCalls int32) []apiCall { unitEntity := params.Entities{Entities: []params.Entity{params.Entity{Tag: "unit-wordpress-0"}}} result := make([]apiCall, numCalls) for i := int32(0); i < numCalls; i++ { - result[i] = uniterApiCall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil) + result[i] = uniterAPICall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil) } return result } func (s *relationsSuite) TestHookRelationChanged(c *gc.C) { var numCalls int32 - apiCalls := relationJoinedApiCalls() - apiCalls = append(apiCalls, getPrincipalApiCalls(3)...) + apiCalls := relationJoinedAPICalls() + apiCalls = append(apiCalls, getPrincipalAPICalls(3)...) r := s.assertHookRelationJoined(c, &numCalls, apiCalls...) // There will be an initial relation-changed regardless of @@ -397,17 +397,17 @@ func (s *relationsSuite) TestHookRelationDeparted(c *gc.C) { var numCalls int32 - apiCalls := relationJoinedApiCalls() + apiCalls := relationJoinedAPICalls() - apiCalls = append(apiCalls, getPrincipalApiCalls(2)...) + apiCalls = append(apiCalls, getPrincipalAPICalls(2)...) s.assertHookRelationDeparted(c, &numCalls, apiCalls...) } func (s *relationsSuite) TestHookRelationBroken(c *gc.C) { var numCalls int32 - apiCalls := relationJoinedApiCalls() + apiCalls := relationJoinedAPICalls() - apiCalls = append(apiCalls, getPrincipalApiCalls(3)...) + apiCalls = append(apiCalls, getPrincipalAPICalls(3)...) r := s.assertHookRelationDeparted(c, &numCalls, apiCalls...) localState := resolver.LocalState{ @@ -431,12 +431,12 @@ func (s *relationsSuite) TestCommitHook(c *gc.C) { var numCalls int32 - apiCalls := relationJoinedApiCalls() + apiCalls := relationJoinedAPICalls() relationUnits := params.RelationUnits{RelationUnits: []params.RelationUnit{ {Relation: "relation-wordpress.db#mysql.db", Unit: "unit-wordpress-0"}, }} apiCalls = append(apiCalls, - uniterApiCall("LeaveScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), + uniterAPICall("LeaveScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), ) stateFile := filepath.Join(s.relationsDir, "1", "wordpress") c.Assert(stateFile, jc.DoesNotExist) @@ -487,14 +487,14 @@ {Relation: "relation-wordpress.juju-info#juju-info.juju-info", Unit: "unit-wordpress-0"}, }} apiCalls := []apiCall{ - uniterApiCall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), - uniterApiCall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), - uniterApiCall("RelationById", params.RelationIds{RelationIds: []int{1}}, relationResults, nil), - uniterApiCall("Relation", relationUnits, relationResults, nil), - uniterApiCall("Relation", relationUnits, relationResults, nil), - uniterApiCall("Watch", unitEntity, params.NotifyWatchResults{Results: []params.NotifyWatchResult{{NotifyWatcherId: "1"}}}, nil), - uniterApiCall("EnterScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), - uniterApiCall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil), + uniterAPICall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), + uniterAPICall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), + uniterAPICall("RelationById", params.RelationIds{RelationIds: []int{1}}, relationResults, nil), + uniterAPICall("Relation", relationUnits, relationResults, nil), + uniterAPICall("Relation", relationUnits, relationResults, nil), + uniterAPICall("Watch", unitEntity, params.NotifyWatchResults{Results: []params.NotifyWatchResult{{NotifyWatcherId: "1"}}}, nil), + uniterAPICall("EnterScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), + uniterAPICall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil), } var numCalls int32 diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/remotestate/watcher_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/remotestate/watcher_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/remotestate/watcher_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/remotestate/watcher_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,19 +6,20 @@ import ( "time" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/names.v2" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/testing" + coretesting "github.com/juju/juju/testing" "github.com/juju/juju/watcher" "github.com/juju/juju/worker/uniter/remotestate" ) type WatcherSuite struct { - testing.BaseSuite + coretesting.BaseSuite st *mockState leadership *mockLeadershipTracker @@ -555,12 +556,12 @@ // for a specific number is very likely to start failing intermittently // again, as in lp:1604955, if the SUT undergoes even subtle changes. func (s *WatcherSuite) waitAlarmsStable(c *gc.C) { - timeout := time.After(testing.LongWait) + timeout := time.After(coretesting.LongWait) for i := 0; ; i++ { c.Logf("waiting for alarm %d", i) select { case <-s.clock.Alarms(): - case <-time.After(testing.ShortWait): + case <-time.After(coretesting.ShortWait): return case <-timeout: c.Fatalf("never stopped setting alarms") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/resolver.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/resolver.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/resolver.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/resolver.go 2016-10-13 14:31:49.000000000 +0000 @@ -18,7 +18,6 @@ type ResolverConfig struct { ClearResolved func() error ReportHookError func(hook.Info) error - FixDeployer func() error ShouldRetryHooks bool StartRetryHookTimer func() StopRetryHookTimer func() @@ -66,12 +65,6 @@ return nil, resolver.ErrRestart } - if localState.Kind == operation.Continue { - if err := s.config.FixDeployer(); err != nil { - return nil, errors.Trace(err) - } - } - if s.retryHookTimerStarted && (localState.Kind != operation.RunHook || localState.Step != operation.Pending) { // The hook-retry timer is running, but there is no pending // hook operation. We're not in an error state, so stop the diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/resolver_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/resolver_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/resolver_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/resolver_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -62,7 +62,6 @@ s.resolverConfig = uniter.ResolverConfig{ ClearResolved: func() error { return s.clearResolved() }, ReportHookError: func(info hook.Info) error { return s.reportHookError(info) }, - FixDeployer: func() error { return nil }, StartRetryHookTimer: func() { s.stub.AddCall("StartRetryHookTimer") }, StopRetryHookTimer: func() { s.stub.AddCall("StopRetryHookTimer") }, ShouldRetryHooks: true, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runlistener.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runlistener.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runlistener.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runlistener.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,7 @@ "net/rpc" "sync" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/errors" "github.com/juju/utils/exec" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/context/contextfactory_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/context/contextfactory_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/context/contextfactory_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/context/contextfactory_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -21,7 +21,6 @@ "github.com/juju/juju/state" "github.com/juju/juju/storage" "github.com/juju/juju/testcharms" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/runner/context" runnertesting "github.com/juju/juju/worker/uniter/runner/testing" @@ -48,7 +47,7 @@ s.getRelationInfos, s.storage, s.paths, - coretesting.NewClock(time.Time{}), + testing.NewClock(time.Time{}), ) c.Assert(err, jc.ErrorIsNil) s.factory = contextFactory @@ -210,7 +209,7 @@ s.getRelationInfos, s.storage, s.paths, - coretesting.NewClock(time.Time{}), + testing.NewClock(time.Time{}), ) c.Assert(err, jc.ErrorIsNil) ctx, err := contextFactory.HookContext(hook.Info{ diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/context/context.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/context/context.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/context/context.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/context/context.go 2016-10-13 14:31:49.000000000 +0000 @@ -616,7 +616,7 @@ case jujuc.RebootNow: *err = ErrRequeueAndReboot } - err2 := ctx.unit.SetUnitStatus(status.StatusRebooting, "", nil) + err2 := ctx.unit.SetUnitStatus(status.Rebooting, "", nil) if err2 != nil { logger.Errorf("updating agent status: %v", err2) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/context/context_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/context/context_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/context/context_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/context/context_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -154,13 +154,14 @@ ctx := s.GetContext(c, -1, "") unitStatus, err := ctx.UnitStatus() c.Check(err, jc.ErrorIsNil) - c.Check(unitStatus.Status, gc.Equals, "unknown") + c.Check(unitStatus.Status, gc.Equals, "waiting") + c.Check(unitStatus.Info, gc.Equals, "waiting for machine") c.Check(unitStatus.Data, gc.DeepEquals, map[string]interface{}{}) // Change remote state. now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusActive, + Status: status.Active, Message: "it works", Since: &now, } @@ -170,7 +171,8 @@ // Local view is unchanged. unitStatus, err = ctx.UnitStatus() c.Check(err, jc.ErrorIsNil) - c.Check(unitStatus.Status, gc.Equals, "unknown") + c.Check(unitStatus.Status, gc.Equals, "waiting") + c.Check(unitStatus.Info, gc.Equals, "waiting for machine") c.Check(unitStatus.Data, gc.DeepEquals, map[string]interface{}{}) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/context/util_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/context/util_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/context/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/context/util_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -25,7 +25,6 @@ "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/storage" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/runner/context" "github.com/juju/juju/worker/uniter/runner/jujuc" runnertesting "github.com/juju/juju/worker/uniter/runner/testing" @@ -33,7 +32,7 @@ var noProxies = proxy.Settings{} var apiAddrs = []string{"a1:123", "a2:123"} -var expectedApiAddrs = strings.Join(apiAddrs, " ") +var expectedAPIAddrs = strings.Join(apiAddrs, " ") // HookContextSuite contains shared setup for various other test suites. Test // methods should not be added to this type, because they'll get run repeatedly. @@ -45,12 +44,12 @@ relch *state.Charm relunits map[int]*state.RelationUnit storage *runnertesting.StorageContextAccessor - clock *coretesting.Clock + clock *jujutesting.Clock st api.Connection uniter *uniter.State apiUnit *uniter.Unit - meteredApiUnit *uniter.Unit + meteredAPIUnit *uniter.Unit meteredCharm *state.Charm apiRelunits map[int]*uniter.RelationUnit BlockHelper @@ -90,7 +89,7 @@ c.Assert(err, jc.ErrorIsNil) meteredState := s.OpenAPIAs(c, meteredUnit.Tag(), password) meteredUniter, err := meteredState.Uniter() - s.meteredApiUnit, err = meteredUniter.Unit(meteredUnit.Tag().(names.UnitTag)) + s.meteredAPIUnit, err = meteredUniter.Unit(meteredUnit.Tag().(names.UnitTag)) c.Assert(err, jc.ErrorIsNil) // Note: The unit must always have a charm URL set, because this @@ -115,7 +114,7 @@ }, } - s.clock = coretesting.NewClock(time.Time{}) + s.clock = jujutesting.NewClock(time.Time{}) } func (s *HookContextSuite) GetContext( @@ -220,7 +219,7 @@ relctxs[relId] = context.NewContextRelation(relUnit, cache) } - context, err := context.NewHookContext(s.meteredApiUnit, facade, "TestCtx", uuid, + context, err := context.NewHookContext(s.meteredAPIUnit, facade, "TestCtx", uuid, "test-model-name", relid, remote, relctxs, apiAddrs, proxies, canAddMetrics, metrics, nil, s.machine.Tag().(names.MachineTag), paths, s.clock) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/debug/export_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/debug/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/debug/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/debug/export_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -0,0 +1,16 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package debug + +import "io" + +// FindSessionWithWriter returns the server session with the writer +// specified so the run output can be captured for tests. +func (c *HooksContext) FindSessionWithWriter(writer io.Writer) (*ServerSession, error) { + session, err := c.FindSession() + if session != nil { + session.output = writer + } + return session, err +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/debug/server.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/debug/server.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/debug/server.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/debug/server.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,6 +6,7 @@ import ( "bytes" "errors" + "io" "io/ioutil" "os" "os/exec" @@ -18,6 +19,8 @@ type ServerSession struct { *HooksContext hooks set.Strings + + output io.Writer } // MatchHook returns true if the specified hook name matches @@ -40,6 +43,10 @@ cmd.Env = env cmd.Dir = charmDir cmd.Stdin = bytes.NewBufferString(debugHooksServerScript) + if s.output != nil { + cmd.Stdout = s.output + cmd.Stderr = s.output + } if err := cmd.Start(); err != nil { return err } @@ -76,7 +83,7 @@ return nil, err } hooks := set.NewStrings(args.Hooks...) - session := &ServerSession{c, hooks} + session := &ServerSession{HooksContext: c, hooks: hooks} return session, nil } @@ -99,7 +106,7 @@ 3. CTRL+a is tmux prefix. More help and info is available in the online documentation: -https://juju.ubuntu.com/docs/authors-hook-debug.html +https://jujucharms.com/docs/authors-hook-debug.html END diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/debug/server_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/debug/server_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/debug/server_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/debug/server_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -4,6 +4,7 @@ package debug import ( + "bytes" "fmt" "io/ioutil" "os" @@ -142,7 +143,8 @@ func (s *DebugHooksServerSuite) TestRunHook(c *gc.C) { err := ioutil.WriteFile(s.ctx.ClientFileLock(), []byte{}, 0777) c.Assert(err, jc.ErrorIsNil) - session, err := s.ctx.FindSession() + var output bytes.Buffer + session, err := s.ctx.FindSessionWithWriter(&output) c.Assert(session, gc.NotNil) c.Assert(err, jc.ErrorIsNil) @@ -154,6 +156,8 @@ // exit cleanly (as if the PID were real and no longer running). cmd := exec.Command("flock", s.ctx.ClientExitFileLock(), "-c", "sleep 5s") c.Assert(cmd.Start(), gc.IsNil) + defer cmd.Process.Kill() // kill flock + ch := make(chan error) go func() { ch <- session.RunHook(hookName, s.tmpdir, os.Environ()) @@ -162,12 +166,14 @@ // Wait until either we find the debug dir, or the flock is released. ticker := time.Tick(10 * time.Millisecond) var debugdir os.FileInfo + timeout := time.After(testing.LongWait) for debugdir == nil { select { + case <-timeout: + c.Fatal("test timed out") case err = <-ch: // flock was released before we found the debug dir. - c.Error("could not find hook.sh") - + c.Fatalf("could not find hook.sh\nerr: %v\noutput: %s", err, output.String()) case <-ticker: tmpdir, err := os.Open(s.tmpdir) if err != nil { @@ -203,9 +209,12 @@ // RunHook should complete without waiting to be // killed, and despite the exit lock being held. - err = <-ch - c.Assert(err, jc.ErrorIsNil) - cmd.Process.Kill() // kill flock + select { + case err = <-ch: + c.Assert(err, jc.ErrorIsNil) + case <-time.After(testing.LongWait): + c.Fatal("RunHook did not complete") + } } func (s *DebugHooksServerSuite) verifyEnvshFile(c *gc.C, envshPath string, hookName string) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/factory_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/factory_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/factory_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/factory_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,6 +9,7 @@ "time" "github.com/juju/errors" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" @@ -16,7 +17,6 @@ "gopkg.in/juju/names.v2" "github.com/juju/juju/state" - "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/runner" "github.com/juju/juju/worker/uniter/runner/context" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-fail.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-fail.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-fail.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-fail.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "github.com/juju/cmd" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // ActionFailCommand implements the action-fail command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-get.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-get.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "strings" "github.com/juju/cmd" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // ActionGetCommand implements the action-get command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-get_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-get_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-get_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-get_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -179,7 +179,7 @@ summary: "a simple map of one value to one key", args: []string{"--format", "json"}, actionParams: actionGetTestMaps[0], - out: "{\"outfile\":\"foo.bz2\"}\n", + out: `{"outfile":"foo.bz2"}` + "\n", }, { summary: "an entire map", args: []string{}, @@ -257,10 +257,11 @@ ctx := testing.Context(c) code := cmd.Main(com, ctx, t.args) c.Check(code, gc.Equals, t.code) - c.Check(bufferString(ctx.Stdout), gc.Equals, t.out) if code == 0 { + c.Check(bufferString(ctx.Stdout), gc.Equals, t.out) c.Check(bufferString(ctx.Stderr), gc.Equals, "") } else { + c.Check(bufferString(ctx.Stdout), gc.Equals, "") expect := fmt.Sprintf(`(\n)*error: %s\n`, t.errMsg) c.Check(bufferString(ctx.Stderr), gc.Matches, expect) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-set.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-set.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-set.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/action-set.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "strings" "github.com/juju/cmd" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) var keyRule = regexp.MustCompile("^[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$") diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/config-get.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/config-get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/config-get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/config-get.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ "fmt" "github.com/juju/cmd" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // ConfigGetCommand implements the config-get command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/config-get_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/config-get_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/config-get_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/config-get_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -47,9 +47,9 @@ c.Assert(err, jc.ErrorIsNil) ctx := testing.Context(c) code := cmd.Main(com, ctx, t.args) - c.Assert(code, gc.Equals, 0) - c.Assert(bufferString(ctx.Stderr), gc.Equals, "") - c.Assert(bufferString(ctx.Stdout), gc.Matches, t.out) + c.Check(code, gc.Equals, 0) + c.Check(bufferString(ctx.Stderr), gc.Equals, "") + c.Check(bufferString(ctx.Stdout), gc.Matches, t.out) } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/is-leader.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/is-leader.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/is-leader.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/is-leader.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // isLeaderCommand implements the is-leader command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/is-leader_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/is-leader_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/is-leader_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/is-leader_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -15,7 +14,7 @@ ) type isLeaderSuite struct { - jujutesting.IsolationSuite + testing.BaseSuite } var _ = gc.Suite(&isLeaderSuite{}) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/juju-log.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/juju-log.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/juju-log.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/juju-log.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,8 +9,8 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/loggo" - "launchpad.net/gnuflag" ) // JujuLogCommand implements the juju-log command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-get.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-get.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // leaderGetCommand implements the leader-get command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-get_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-get_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-get_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-get_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,6 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -15,13 +14,14 @@ ) type leaderGetSuite struct { - jujutesting.IsolationSuite + testing.BaseSuite command cmd.Command } var _ = gc.Suite(&leaderGetSuite{}) func (s *leaderGetSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) var err error s.command, err = jujuc.NewLeaderGetCommand(nil) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/network-get.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/network-get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/network-get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/network-get.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // NetworkGetCommand implements the network-get command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/opened-ports.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/opened-ports.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/opened-ports.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/opened-ports.go 2016-10-13 14:31:49.000000000 +0000 @@ -5,7 +5,7 @@ import ( "github.com/juju/cmd" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // OpenedPortsCommand implements the opened-ports command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/ports.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/ports.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/ports.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/ports.go 2016-10-13 14:31:49.000000000 +0000 @@ -11,7 +11,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) const ( diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/reboot.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/reboot.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/reboot.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/reboot.go 2016-10-13 14:31:49.000000000 +0000 @@ -7,7 +7,7 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // JujuRebootCommand implements the juju-reboot command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/reboot_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/reboot_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/reboot_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/reboot_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,9 +6,9 @@ import ( "github.com/juju/cmd" + "github.com/juju/gnuflag" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/gnuflag" "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/runner/jujuc" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/apiserver/params" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -152,8 +152,8 @@ if code == 0 { c.Check(bufferString(ctx.Stderr), gc.Equals, "") expect := t.out - if expect != "" { - expect = expect + "\n" + if len(expect) > 0 { + expect += "\n" } c.Check(bufferString(ctx.Stdout), gc.Equals, expect) } else { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-ids.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-ids.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-ids.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-ids.go 2016-10-13 14:31:49.000000000 +0000 @@ -9,7 +9,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // RelationIdsCommand implements the relation-ids command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // RelationListCommand implements the relation-list command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -121,16 +121,16 @@ c.Logf(bufferString(ctx.Stderr)) c.Assert(code, gc.Equals, t.code) if code == 0 { - c.Assert(bufferString(ctx.Stderr), gc.Equals, "") + c.Check(bufferString(ctx.Stderr), gc.Equals, "") expect := t.out if expect != "" { - expect = expect + "\n" + expect += "\n" } - c.Assert(bufferString(ctx.Stdout), gc.Equals, expect) + c.Check(bufferString(ctx.Stdout), gc.Equals, expect) } else { - c.Assert(bufferString(ctx.Stdout), gc.Equals, "") + c.Check(bufferString(ctx.Stdout), gc.Equals, "") expect := fmt.Sprintf(`(.|\n)*error: %s\n`, t.out) - c.Assert(bufferString(ctx.Stderr), gc.Matches, expect) + c.Check(bufferString(ctx.Stderr), gc.Matches, expect) } } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-set.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-set.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-set.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-set.go 2016-10-13 14:31:49.000000000 +0000 @@ -10,9 +10,9 @@ "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "github.com/juju/utils/keyvalues" goyaml "gopkg.in/yaml.v2" - "launchpad.net/gnuflag" ) const relationSetDoc = ` diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/server.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/server.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/server.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/server.go 2016-10-13 14:31:49.000000000 +0000 @@ -171,7 +171,10 @@ } j.mu.Lock() defer j.mu.Unlock() - logger.Infof("running hook tool %q %q", req.CommandName, req.Args) + // Beware, reducing the log level of the following line will lead + // to passwords leaking if passed as args. + logger.Tracef("running hook tool %q %q", req.CommandName, req.Args) + logger.Tracef("running hook tool %q", req.CommandName) logger.Debugf("hook context id %q; dir %q", req.ContextId, req.Dir) wrapper := &cmdWrapper{c, nil} resp.Code = cmd.Main(wrapper, ctx, req.Args) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/server_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/server_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/server_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/server_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -16,10 +16,10 @@ "time" "github.com/juju/cmd" + "github.com/juju/gnuflag" jc "github.com/juju/testing/checkers" "github.com/juju/utils/exec" gc "gopkg.in/check.v1" - "launchpad.net/gnuflag" "github.com/juju/juju/juju/sockets" "github.com/juju/juju/testing" diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-get.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-get.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/status" ) @@ -79,7 +79,7 @@ serviceStatus, err := c.ctx.ApplicationStatus() if err != nil { if errors.IsNotImplemented(err) { - return c.out.Write(ctx, status.StatusUnknown) + return c.out.Write(ctx, status.Unknown) } return errors.Annotatef(err, "finding service status") } @@ -111,7 +111,7 @@ unitStatus, err := c.ctx.UnitStatus() if err != nil { if errors.IsNotImplemented(err) { - return c.out.Write(ctx, status.StatusUnknown) + return c.out.Write(ctx, status.Unknown) } return errors.Annotatef(err, "finding workload status") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-get_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-get_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-get_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-get_test.go 2016-10-13 14:31:49.000000000 +0000 @@ -42,7 +42,6 @@ }{ {[]string{"--format", "json", "--include-data"}, formatJson, statusAttributes}, {[]string{"--format", "yaml"}, formatYaml, map[string]interface{}{"status": "error"}}, - {[]string{}, -1, "error\n"}, } func setFakeStatus(ctx *Context) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-set.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-set.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-set.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/status-set.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,7 +6,7 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" "github.com/juju/juju/status" ) @@ -40,10 +40,10 @@ } var validStatus = []status.Status{ - status.StatusMaintenance, - status.StatusBlocked, - status.StatusWaiting, - status.StatusActive, + status.Maintenance, + status.Blocked, + status.Waiting, + status.Active, } func (c *StatusSetCommand) SetFlags(f *gnuflag.FlagSet) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-get.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-get.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,8 +6,8 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" ) // StorageGetCommand implements the storage-get command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-list.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-list.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-list.go 2016-10-13 14:31:49.000000000 +0000 @@ -6,8 +6,8 @@ import ( "github.com/juju/cmd" "github.com/juju/errors" + "github.com/juju/gnuflag" "gopkg.in/juju/names.v2" - "launchpad.net/gnuflag" ) // StorageListCommand implements the storage-list command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/unit-get.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/unit-get.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/jujuc/unit-get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/jujuc/unit-get.go 2016-10-13 14:31:49.000000000 +0000 @@ -8,7 +8,7 @@ "github.com/juju/cmd" "github.com/juju/errors" - "launchpad.net/gnuflag" + "github.com/juju/gnuflag" ) // UnitGetCommand implements the unit-get command. diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/util_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/util_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/runner/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/runner/util_test.go 2016-10-13 14:31:50.000000000 +0000 @@ -11,6 +11,7 @@ "strings" "time" + jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" "github.com/juju/utils/fs" @@ -25,7 +26,6 @@ "github.com/juju/juju/state" "github.com/juju/juju/storage" "github.com/juju/juju/testcharms" - coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/runner" "github.com/juju/juju/worker/uniter/runner/context" runnertesting "github.com/juju/juju/worker/uniter/runner/testing" @@ -105,7 +105,7 @@ s.getRelationInfos, s.storage, s.paths, - coretesting.NewClock(time.Time{}), + jujutesting.NewClock(time.Time{}), ) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/uniter.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/uniter.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/uniter.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/uniter.go 2016-10-13 14:31:50.000000000 +0000 @@ -70,7 +70,6 @@ lastReportedStatus status.Status lastReportedMessage string - deployer *deployerProxy operationFactory operation.Factory operationExecutor operation.Executor newOperationExecutor NewExecutorFunc @@ -257,7 +256,7 @@ // error state. return nil } - return setAgentStatus(u, status.StatusIdle, "", nil) + return setAgentStatus(u, status.Idle, "", nil) } clearResolved := func() error { @@ -277,7 +276,6 @@ uniterResolver := NewUniterResolver(ResolverConfig{ ClearResolved: clearResolved, ReportHookError: u.reportHookError, - FixDeployer: u.deployer.Fix, ShouldRetryHooks: u.hookRetryStrategy.ShouldRetry, StartRetryHookTimer: retryHookTimer.Start, StopRetryHookTimer: retryHookTimer.Reset, @@ -337,7 +335,7 @@ // handling is outside of the resolver's control. if operation.IsDeployConflictError(cause) { localState.Conflicted = true - err = setAgentStatus(u, status.StatusError, "upgrade failed", nil) + err = setAgentStatus(u, status.Error, "upgrade failed", nil) } else { reportAgentError(u, "resolver loop error", err) } @@ -399,6 +397,21 @@ // and inescapable, whereas this one is not. return worker.ErrTerminateAgent } + // If initialising for the first time after deploying, update the status. + currentStatus, err := u.unit.UnitStatus() + if err != nil { + return err + } + // TODO(fwereade/wallyworld): we should have an explicit place in the model + // to tell us when we've hit this point, instead of piggybacking on top of + // status and/or status history. + // If the previous status was waiting for machine, we transition to the next step. + if currentStatus.Status == string(status.Waiting) && + (currentStatus.Info == status.MessageWaitForMachine || currentStatus.Info == status.MessageInstallingAgent) { + if err := u.unit.SetUnitStatus(status.Waiting, status.MessageInitializingAgent, nil); err != nil { + return errors.Trace(err) + } + } if err := jujuc.EnsureSymlinks(u.paths.ToolsDir); err != nil { return err } @@ -423,6 +436,9 @@ u.commands = runcommands.NewCommands() u.commandChannel = make(chan string) + if err := charm.ClearDownloads(u.paths.State.BundlesDir); err != nil { + logger.Warningf(err.Error()) + } deployer, err := charm.NewDeployer( u.paths.State.CharmDir, u.paths.State.DeployerDir, @@ -431,7 +447,6 @@ if err != nil { return errors.Annotatef(err, "cannot create deployer") } - u.deployer = &deployerProxy{deployer} contextFactory, err := context.NewContextFactory( u.st, unitTag, u.leadershipTracker, u.relations.GetInfo, u.storage, u.paths, u.clock, ) @@ -445,7 +460,7 @@ return errors.Trace(err) } u.operationFactory = operation.NewFactory(operation.FactoryParams{ - Deployer: u.deployer, + Deployer: deployer, RunnerFactory: runnerFactory, Callbacks: &operationCallbacks{u}, Abort: u.catacomb.Dying(), @@ -550,5 +565,5 @@ } statusData["hook"] = hookName statusMessage := fmt.Sprintf("hook failed: %q", hookName) - return setAgentStatus(u, status.StatusError, statusMessage, statusData) + return setAgentStatus(u, status.Error, statusMessage, statusData) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/uniter_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/uniter_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/uniter_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/uniter_test.go 2016-10-13 14:31:50.000000000 +0000 @@ -6,7 +6,6 @@ import ( "fmt" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -17,9 +16,9 @@ "github.com/juju/errors" "github.com/juju/mutex" + jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" ft "github.com/juju/testing/filetesting" - "github.com/juju/utils/clock" gc "gopkg.in/check.v1" corecharm "gopkg.in/juju/charm.v6-unstable" @@ -46,7 +45,7 @@ var _ = gc.Suite(&UniterSuite{}) -var leaseClock *coretesting.Clock +var leaseClock *jujutesting.Clock // This guarantees that we get proper platform // specific error directly from their source @@ -69,16 +68,6 @@ s.oldLcAll = os.Getenv("LC_ALL") os.Setenv("LC_ALL", "en_US") s.unitDir = filepath.Join(s.dataDir, "agents", "unit-u-0") - - zone, err := time.LoadLocation("") - c.Assert(err, jc.ErrorIsNil) - now := time.Date(2030, 11, 11, 11, 11, 11, 11, zone) - leaseClock = coretesting.NewClock(now) - oldGetClock := state.GetClock - state.GetClock = func() clock.Clock { - return leaseClock - } - s.AddCleanup(func(*gc.C) { state.GetClock = oldGetClock }) all.RegisterForServer() } @@ -89,9 +78,15 @@ } func (s *UniterSuite) SetUpTest(c *gc.C) { + zone, err := time.LoadLocation("") + c.Assert(err, jc.ErrorIsNil) + now := time.Date(2030, 11, 11, 11, 11, 11, 11, zone) + leaseClock = jujutesting.NewClock(now) s.updateStatusHookTicker = newManualTicker() s.GitSuite.SetUpTest(c) s.JujuConnSuite.SetUpTest(c) + err = s.State.SetClockForTesting(leaseClock) + c.Assert(err, jc.ErrorIsNil) } func (s *UniterSuite) TearDownTest(c *gc.C) { @@ -155,6 +150,23 @@ }) } +func (s *UniterSuite) TestPreviousDownloadsCleared(c *gc.C) { + s.runUniterTests(c, []uniterTest{ + ut( + "Ensure stale download files are cleared on uniter startup", + createCharm{}, + serveCharm{}, + ensureStateWorker{}, + createServiceAndUnit{}, + createDownloads{}, + startUniter{}, + waitAddresses{}, + waitUnitAgent{status: status.Idle}, + verifyDownloadsCleared{}, + ), + }) +} + func (s *UniterSuite) TestUniterBootstrap(c *gc.C) { //TODO(bogdanteleaga): Fix this on windows if runtime.GOOS == "windows" { @@ -179,6 +191,44 @@ }) } +type noopExecutor struct { + operation.Executor +} + +func (m *noopExecutor) Run(op operation.Operation) error { + return errors.New("some error occurred") +} + +func (s *UniterSuite) TestUniterStartupStatus(c *gc.C) { + executorFunc := func(stateFilePath string, getInstallCharm func() (*corecharm.URL, error), acquireLock func() (mutex.Releaser, error)) (operation.Executor, error) { + e, err := operation.NewExecutor(stateFilePath, getInstallCharm, acquireLock) + c.Assert(err, jc.ErrorIsNil) + return &mockExecutor{e}, nil + } + s.runUniterTests(c, []uniterTest{ + ut( + "unit status and message at startup", + createCharm{}, + serveCharm{}, + ensureStateWorker{}, + createServiceAndUnit{}, + startUniter{ + newExecutorFunc: executorFunc, + }, + waitUnitAgent{ + statusGetter: unitStatusGetter, + status: status.Waiting, + info: status.MessageInitializingAgent, + }, + waitUnitAgent{ + status: status.Failed, + info: "resolver loop error", + }, + expectError{".*some error occurred.*"}, + ), + }) +} + func (s *UniterSuite) TestUniterInstallHook(c *gc.C) { s.runUniterTests(c, []uniterTest{ ut( @@ -188,11 +238,11 @@ resolveError{state.ResolvedNoHooks}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"leader-elected", "config-changed", "start"}, ), ut( @@ -203,7 +253,7 @@ resolveError{state.ResolvedRetryHooks}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "install"`, data: map[string]interface{}{ "hook": "install", @@ -215,7 +265,7 @@ resolveError{state.ResolvedRetryHooks}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, ), @@ -230,7 +280,7 @@ serveCharm{}, createUniter{}, waitHooks(startupHooks(false)), - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, updateStatusHookTick{}, waitHooks{"update-status"}, ), @@ -249,7 +299,7 @@ // Resolve and hook should run. resolveError{state.ResolvedNoHooks}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitHooks{}, updateStatusHookTick{}, @@ -267,11 +317,11 @@ resolveError{state.ResolvedNoHooks}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusMaintenance, + status: status.Maintenance, info: "installing charm software", }, waitHooks{"config-changed"}, @@ -284,7 +334,7 @@ resolveError{state.ResolvedRetryHooks}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "start"`, data: map[string]interface{}{ "hook": "start", @@ -296,7 +346,7 @@ fixHook{"start"}, resolveError{state.ResolvedRetryHooks}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitHooks{"start", "config-changed"}, verifyRunning{}, @@ -313,7 +363,7 @@ createUniter{}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "install"`, data: map[string]interface{}{ "hook": "install", @@ -322,7 +372,7 @@ resolveError{state.ResolvedNoHooks}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "leader-elected"`, data: map[string]interface{}{ "hook": "leader-elected", @@ -331,7 +381,7 @@ resolveError{state.ResolvedNoHooks}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "config-changed"`, data: map[string]interface{}{ "hook": "config-changed", @@ -340,7 +390,7 @@ resolveError{state.ResolvedNoHooks}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "start"`, data: map[string]interface{}{ "hook": "start", @@ -363,11 +413,11 @@ fixHook{"config-changed"}, resolveError{state.ResolvedNoHooks}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, // TODO(axw) confirm with fwereade that this is correct. // Previously we would see "start", "config-changed". @@ -387,7 +437,7 @@ resolveError{state.ResolvedRetryHooks}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "config-changed"`, data: map[string]interface{}{ "hook": "config-changed", @@ -399,7 +449,7 @@ fixHook{"config-changed"}, resolveError{state.ResolvedRetryHooks}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitHooks{"config-changed", "start"}, verifyRunning{}, @@ -413,7 +463,7 @@ serveCharm{}, createUniter{}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, assertYaml{"charm/config.out", map[string]interface{}{ @@ -452,7 +502,7 @@ waitAddresses{}, waitHooks{}, lock.release(), - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitHooks{"install", "leader-elected", "config-changed", "start"}, ), }) @@ -501,12 +551,12 @@ createCharm{revision: 1}, upgradeCharm{revision: 1}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, charm: 1, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, charm: 1, }, waitHooks{"upgrade-charm", "config-changed"}, @@ -524,12 +574,12 @@ createCharm{revision: 1}, upgradeCharm{revision: 1, forced: true}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, charm: 1, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, charm: 1, }, waitHooks{"upgrade-charm", "config-changed"}, @@ -548,7 +598,7 @@ upgradeCharm{revision: 1}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "upgrade-charm"`, data: map[string]interface{}{ "hook": "upgrade-charm", @@ -561,12 +611,12 @@ resolveError{state.ResolvedNoHooks}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, charm: 1, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, charm: 1, }, waitHooks{"config-changed"}, @@ -584,7 +634,7 @@ upgradeCharm{revision: 1}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "upgrade-charm"`, data: map[string]interface{}{ "hook": "upgrade-charm", @@ -598,7 +648,7 @@ resolveError{state.ResolvedRetryHooks}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "upgrade-charm"`, data: map[string]interface{}{ "hook": "upgrade-charm", @@ -611,7 +661,7 @@ fixHook{"upgrade-charm"}, resolveError{state.ResolvedRetryHooks}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, charm: 1, }, waitHooks{"upgrade-charm", "config-changed"}, @@ -676,7 +726,7 @@ createCharm{customize: appendResource}, serveCharm{}, createUniter{}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitHooks(startupHooks(false)), verifyCharm{}, @@ -710,11 +760,11 @@ serveCharm{}, createUniter{}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, @@ -727,12 +777,12 @@ serveCharm{}, upgradeCharm{revision: 1}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, charm: 1, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, charm: 1, }, waitHooks{"upgrade-charm", "config-changed"}, @@ -792,7 +842,7 @@ upgradeCharm{revision: 1}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "start"`, data: map[string]interface{}{ "hook": "start", @@ -804,12 +854,12 @@ resolveError{state.ResolvedNoHooks}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, charm: 1, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusMaintenance, + status: status.Maintenance, info: "installing charm software", charm: 1, }, @@ -833,7 +883,7 @@ // useful to wait until that point... waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "start"`, data: map[string]interface{}{ "hook": "start", @@ -849,7 +899,7 @@ resolveError{state.ResolvedNoHooks}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, charm: 1, }, waitHooks{"config-changed"}, @@ -858,127 +908,6 @@ }) } -func (s *UniterSuite) TestUniterDeployerConversion(c *gc.C) { - coretesting.SkipIfGitNotAvailable(c) - - deployerConversionTests := []uniterTest{ - ut( - "install normally, check not using git", - quickStart{}, - verifyCharm{ - checkFiles: ft.Entries{ft.Removed{".git"}}, - }, - ), ut( - "install with git, restart in steady state", - prepareGitUniter{[]stepper{ - quickStart{}, - verifyGitCharm{}, - stopUniter{}, - }}, - startUniter{}, - waitHooks{"config-changed"}, - - // At this point, the deployer has been converted, but the - // charm directory itself hasn't; the *next* deployment will - // actually hit the charm directory and strip out the git - // stuff. - createCharm{revision: 1}, - upgradeCharm{revision: 1}, - waitHooks{"upgrade-charm", "config-changed"}, - waitUnitAgent{ - status: status.StatusIdle, - charm: 1, - }, - waitUnitAgent{ - statusGetter: unitStatusGetter, - status: status.StatusUnknown, - charm: 1, - }, - verifyCharm{ - revision: 1, - checkFiles: ft.Entries{ft.Removed{".git"}}, - }, - verifyRunning{}, - ), ut( - "install with git, get conflicted, mark resolved", - prepareGitUniter{[]stepper{ - startGitUpgradeError{}, - stopUniter{}, - }}, - startUniter{}, - - resolveError{state.ResolvedNoHooks}, - waitHooks{"upgrade-charm", "config-changed"}, - waitUnitAgent{ - status: status.StatusIdle, - charm: 1, - }, - waitUnitAgent{ - statusGetter: unitStatusGetter, - status: status.StatusUnknown, - charm: 1, - }, - verifyCharm{revision: 1}, - verifyRunning{}, - - // Due to the uncertainties around marking upgrade conflicts resolved, - // the charm directory again remains unconverted, although the deployer - // should have been fixed. Again, we check this by running another - // upgrade and verifying the .git dir is then removed. - createCharm{revision: 2}, - upgradeCharm{revision: 2}, - waitHooks{"upgrade-charm", "config-changed"}, - waitUnitAgent{ - status: status.StatusIdle, - charm: 2, - }, - waitUnitAgent{ - statusGetter: unitStatusGetter, - status: status.StatusUnknown, - charm: 2, - }, - verifyCharm{ - revision: 2, - checkFiles: ft.Entries{ft.Removed{".git"}}, - }, - verifyRunning{}, - ), ut( - "install with git, get conflicted, force an upgrade", - prepareGitUniter{[]stepper{ - startGitUpgradeError{}, - stopUniter{}, - }}, - startUniter{}, - - createCharm{ - revision: 2, - customize: func(c *gc.C, ctx *context, path string) { - ft.File{"data", "OVERWRITE!", 0644}.Create(c, path) - }, - }, - serveCharm{}, - upgradeCharm{revision: 2, forced: true}, - waitHooks{"upgrade-charm", "config-changed"}, - waitUnitAgent{ - status: status.StatusIdle, - charm: 2, - }, - - // A forced upgrade allows us to swap out the git deployer *and* - // the .git dir inside the charm immediately; check we did so. - verifyCharm{ - revision: 2, - checkFiles: ft.Entries{ - ft.Removed{".git"}, - ft.File{"data", "OVERWRITE!", 0644}, - }, - }, - verifyRunning{}, - ), - } - s.runUniterTests(c, deployerConversionTests) -} - func (s *UniterSuite) TestUniterUpgradeConflicts(c *gc.C) { coretesting.SkipIfPPC64EL(c, "lp:1448308") //TODO(bogdanteleaga): Fix this on windows @@ -996,12 +925,12 @@ resolveError{state.ResolvedNoHooks}, waitHooks{"upgrade-charm", "config-changed"}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, charm: 1, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, charm: 1, }, verifyCharm{revision: 1}, @@ -1016,12 +945,12 @@ upgradeCharm{revision: 2, forced: true}, waitHooks{"upgrade-charm", "config-changed"}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, charm: 2, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, charm: 2, }, verifyCharm{revision: 2}, @@ -1045,142 +974,6 @@ }) } -func (s *UniterSuite) TestUniterUpgradeGitConflicts(c *gc.C) { - coretesting.SkipIfGitNotAvailable(c) - - // These tests are copies of the old git-deployer-related tests, to test that - // the uniter with the manifest-deployer work patched out still works how it - // used to; thus demonstrating that the *other* tests that verify manifest - // deployer behaviour in the presence of an old git deployer are working against - // an accurate representation of the base state. - // The only actual behaviour change is that we no longer commit changes after - // each hook execution; this is reflected by checking that it's dirty in a couple - // of places where we once checked it was not. - - s.runUniterTests(c, []uniterTest{ - // Upgrade scenarios - handling conflicts. - ugt( - "upgrade: conflicting files", - startGitUpgradeError{}, - - // NOTE: this is just dumbly committing the conflicts, but AFAICT this - // is the only reasonable solution; if the user tells us it's resolved - // we have to take their word for it. - resolveError{state.ResolvedNoHooks}, - waitHooks{"upgrade-charm", "config-changed"}, - waitUnitAgent{ - status: status.StatusIdle, - charm: 1, - }, - waitUnitAgent{ - statusGetter: unitStatusGetter, - status: status.StatusUnknown, - charm: 1, - }, - verifyGitCharm{revision: 1}, - ), ugt( - `upgrade: conflicting directories`, - createCharm{ - customize: func(c *gc.C, ctx *context, path string) { - err := os.Mkdir(filepath.Join(path, "data"), 0755) - c.Assert(err, jc.ErrorIsNil) - appendHook(c, path, "start", "echo DATA > data/newfile") - }, - }, - serveCharm{}, - createUniter{}, - waitUnitAgent{ - status: status.StatusIdle, - }, - waitUnitAgent{ - statusGetter: unitStatusGetter, - status: status.StatusUnknown, - }, - waitHooks{"install", "leader-elected", "config-changed", "start"}, - verifyGitCharm{dirty: true}, - - createCharm{ - revision: 1, - customize: func(c *gc.C, ctx *context, path string) { - data := filepath.Join(path, "data") - err := ioutil.WriteFile(data, []byte("ha ha"), 0644) - c.Assert(err, jc.ErrorIsNil) - }, - }, - serveCharm{}, - upgradeCharm{revision: 1}, - waitUnitAgent{ - statusGetter: unitStatusGetter, - status: status.StatusError, - info: "upgrade failed", - charm: 1, - }, - verifyWaiting{}, - verifyGitCharm{dirty: true}, - - resolveError{state.ResolvedNoHooks}, - waitHooks{"upgrade-charm", "config-changed"}, - waitUnitAgent{ - status: status.StatusIdle, - charm: 1, - }, - verifyGitCharm{revision: 1}, - ), ugt( - "upgrade conflict resolved with forced upgrade", - startGitUpgradeError{}, - createCharm{ - revision: 2, - customize: func(c *gc.C, ctx *context, path string) { - otherdata := filepath.Join(path, "otherdata") - err := ioutil.WriteFile(otherdata, []byte("blah"), 0644) - c.Assert(err, jc.ErrorIsNil) - }, - }, - serveCharm{}, - upgradeCharm{revision: 2, forced: true}, - waitUnitAgent{ - status: status.StatusIdle, - charm: 2, - }, waitUnitAgent{ - statusGetter: unitStatusGetter, - status: status.StatusUnknown, - charm: 2, - }, - waitHooks{"upgrade-charm", "config-changed"}, - verifyGitCharm{revision: 2}, - custom{func(c *gc.C, ctx *context) { - // otherdata should exist (in v2) - otherdata, err := ioutil.ReadFile(filepath.Join(ctx.path, "charm", "otherdata")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(otherdata), gc.Equals, "blah") - - // ignore should not (only in v1) - _, err = os.Stat(filepath.Join(ctx.path, "charm", "ignore")) - c.Assert(err, jc.Satisfies, os.IsNotExist) - - // data should contain what was written in the start hook - data, err := ioutil.ReadFile(filepath.Join(ctx.path, "charm", "data")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Equals, "STARTDATA\n") - }}, - ), ugt( - "upgrade conflict unit dying", - startGitUpgradeError{}, - unitDying, - verifyWaiting{}, - resolveError{state.ResolvedNoHooks}, - waitHooks{"upgrade-charm", "config-changed", "leader-settings-changed", "stop"}, - waitUniterDead{}, - ), ugt( - "upgrade conflict unit dead", - startGitUpgradeError{}, - unitDead, - waitUniterDead{}, - waitHooks{}, - ), - }) -} - func (s *UniterSuite) TestUniterRelations(c *gc.C) { waitDyingHooks := custom{func(c *gc.C, ctx *context) { // There is no ordering relationship between relation hooks and @@ -1298,11 +1091,11 @@ serveCharm{}, createUniter{}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, addRelation{waitJoin: true}, @@ -1339,7 +1132,7 @@ startupRelationError{"db-relation-joined"}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "db-relation-joined"`, data: map[string]interface{}{ "hook": "db-relation-joined", @@ -1352,7 +1145,7 @@ startupRelationError{"db-relation-changed"}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "db-relation-changed"`, data: map[string]interface{}{ "hook": "db-relation-changed", @@ -1367,7 +1160,7 @@ removeRelationUnit{"mysql/0"}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "db-relation-departed"`, data: map[string]interface{}{ "hook": "db-relation-departed", @@ -1383,7 +1176,7 @@ relationDying, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "db-relation-broken"`, data: map[string]interface{}{ "hook": "db-relation-broken", @@ -1409,10 +1202,10 @@ createServiceAndUnit{}, startUniter{}, waitAddresses{}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, verifyCharm{}, @@ -1422,10 +1215,10 @@ results: map[string]interface{}{}, status: params.ActionCompleted, }}}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, ), ut( "action-fail causes the action to fail with a message", @@ -1440,10 +1233,10 @@ createServiceAndUnit{}, startUniter{}, waitAddresses{}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, verifyCharm{}, @@ -1456,9 +1249,9 @@ message: "I'm afraid I can't let you do that, Dave.", status: params.ActionFailed, }}}, - waitUnitAgent{status: status.StatusIdle}, waitUnitAgent{ + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, ), ut( "action-fail with the wrong arguments fails but is not an error", @@ -1473,10 +1266,10 @@ createServiceAndUnit{}, startUniter{}, waitAddresses{}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, verifyCharm{}, @@ -1489,10 +1282,10 @@ message: "A real message", status: params.ActionFailed, }}}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, ), ut( "actions with correct params passed are not an error", @@ -1507,10 +1300,10 @@ createServiceAndUnit{}, startUniter{}, waitAddresses{}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, verifyCharm{}, @@ -1532,10 +1325,10 @@ }, status: params.ActionCompleted, }}}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, ), ut( "actions with incorrect params passed are not an error but fail", @@ -1550,10 +1343,10 @@ createServiceAndUnit{}, startUniter{}, waitAddresses{}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, verifyCharm{}, @@ -1567,10 +1360,10 @@ status: params.ActionFailed, message: `cannot run "snapshot" action: validation failed: (root).outfile : must be of type string, given 2`, }}}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, ), ut( "actions not defined in actions.yaml fail without causing a uniter error", @@ -1584,10 +1377,10 @@ createServiceAndUnit{}, startUniter{}, waitAddresses{}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, verifyCharm{}, @@ -1598,10 +1391,10 @@ status: params.ActionFailed, message: `cannot run "snapshot" action: not defined`, }}}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, ), ut( "pending actions get consumed", @@ -1619,10 +1412,10 @@ addAction{"action-log", nil}, startUniter{}, waitAddresses{}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, verifyCharm{}, @@ -1639,10 +1432,10 @@ results: map[string]interface{}{}, status: params.ActionCompleted, }}}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, ), ut( "actions not implemented fail but are not errors", @@ -1656,10 +1449,10 @@ createServiceAndUnit{}, startUniter{}, waitAddresses{}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, verifyCharm{}, @@ -1670,10 +1463,10 @@ status: params.ActionFailed, message: `action not implemented on unit "u/0"`, }}}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, ), ut( "actions may run from ModeHookError, but do not clear the error", @@ -1687,11 +1480,9 @@ addAction{"action-log", nil}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "start"`, - data: map[string]interface{}{ - "hook": "start", - }, + data: map[string]interface{}{"hook": "start"}, }, waitActionResults{[]actionResult{{ name: "action-log", @@ -1700,16 +1491,16 @@ }}}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "start"`, data: map[string]interface{}{"hook": "start"}, }, verifyWaiting{}, resolveError{state.ResolvedNoHooks}, - waitUnitAgent{status: status.StatusIdle}, + waitUnitAgent{status: status.Idle}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusMaintenance, + status: status.Maintenance, info: "installing charm software", }, ), @@ -1816,11 +1607,11 @@ startUniter{}, waitAddresses{}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitActionResults{[]actionResult{{ name: "action-reboot", @@ -1852,11 +1643,11 @@ waitHooks{"install"}, startUniter{}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"leader-elected", "config-changed", "start"}, )}) @@ -1881,11 +1672,11 @@ waitHooks{"install"}, startUniter{}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusUnknown, + status: status.Unknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, )}) @@ -1908,7 +1699,7 @@ waitAddresses{}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: fmt.Sprintf(`hook failed: "install"`), }, ), @@ -1924,7 +1715,7 @@ createUniter{}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "config-changed"`, data: map[string]interface{}{ "hook": "config-changed", @@ -1933,7 +1724,7 @@ runCommands{"exit 0"}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: `hook failed: "config-changed"`, data: map[string]interface{}{ "hook": "config-changed", @@ -2148,7 +1939,7 @@ serveCharm{}, createUniter{executorFunc: executorFunc}, waitUnitAgent{ - status: status.StatusFailed, + status: status.Failed, info: "resolver loop error", }, expectError{".*some error occurred.*"}, diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/util_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/util_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/uniter/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/uniter/util_test.go 2016-10-13 14:31:50.000000000 +0000 @@ -8,7 +8,6 @@ "fmt" "io/ioutil" "os" - "os/exec" "path/filepath" "reflect" "runtime" @@ -47,7 +46,6 @@ "github.com/juju/juju/worker" "github.com/juju/juju/worker/fortress" "github.com/juju/juju/worker/uniter" - "github.com/juju/juju/worker/uniter/charm" "github.com/juju/juju/worker/uniter/operation" ) @@ -615,7 +613,7 @@ step(c, ctx, createUniter{}) step(c, ctx, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: fmt.Sprintf(`hook failed: %q`, s.badHook), }) for _, hook := range startupHooks(false) { @@ -638,7 +636,7 @@ step(c, ctx, createUniter{}) step(c, ctx, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: fmt.Sprintf(`hook failed: %q`, s.badHook), }) for _, hook := range startupHooks(false) { @@ -651,6 +649,30 @@ step(c, ctx, verifyCharm{}) } +type createDownloads struct{} + +func (s createDownloads) step(c *gc.C, ctx *context) { + dir := downloadDir(ctx) + c.Assert(os.MkdirAll(dir, 0775), jc.ErrorIsNil) + c.Assert( + ioutil.WriteFile(filepath.Join(dir, "foo"), []byte("bar"), 0775), + jc.ErrorIsNil, + ) +} + +type verifyDownloadsCleared struct{} + +func (s verifyDownloadsCleared) step(c *gc.C, ctx *context) { + files, err := ioutil.ReadDir(downloadDir(ctx)) + c.Assert(err, jc.ErrorIsNil) + c.Check(files, gc.HasLen, 0) +} + +func downloadDir(ctx *context) string { + paths := uniter.NewPaths(ctx.dataDir, ctx.unit.UnitTag()) + return filepath.Join(paths.State.BundlesDir, "downloads") +} + type quickStart struct { minion bool } @@ -659,7 +681,7 @@ step(c, ctx, createCharm{}) step(c, ctx, serveCharm{}) step(c, ctx, createUniter{minion: s.minion}) - step(c, ctx, waitUnitAgent{status: status.StatusIdle}) + step(c, ctx, waitUnitAgent{status: status.Idle}) step(c, ctx, waitHooks(startupHooks(s.minion))) step(c, ctx, verifyCharm{}) } @@ -682,7 +704,7 @@ step(c, ctx, createCharm{badHooks: []string{s.badHook}}) step(c, ctx, serveCharm{}) step(c, ctx, createUniter{}) - step(c, ctx, waitUnitAgent{status: status.StatusIdle}) + step(c, ctx, waitUnitAgent{status: status.Idle}) step(c, ctx, waitHooks(startupHooks(false))) step(c, ctx, verifyCharm{}) step(c, ctx, addRelation{}) @@ -1019,7 +1041,7 @@ serveCharm{}, createUniter{}, waitUnitAgent{ - status: status.StatusIdle, + status: status.Idle, }, waitHooks(startupHooks(false)), verifyCharm{}, @@ -1029,7 +1051,7 @@ upgradeCharm{revision: 1}, waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: "upgrade failed", charm: 1, }, @@ -1049,7 +1071,7 @@ verifyCharmSteps := []stepper{ waitUnitAgent{ statusGetter: unitStatusGetter, - status: status.StatusError, + status: status.Error, info: "upgrade failed", charm: s.revision, }, @@ -1064,7 +1086,7 @@ // upgrade; and thus puts us in an unexpected state for future steps. now := time.Now() sInfo := status.StatusInfo{ - Status: status.StatusIdle, + Status: status.Idle, Message: "", Since: &now, } @@ -1693,117 +1715,6 @@ // Lockdown implements fortress.Guard. func (*mockCharmDirGuard) Lockdown(_ fortress.Abort) error { return nil } -// prepareGitUniter runs a sequence of uniter tests with the manifest deployer -// replacement logic patched out, simulating the effect of running an older -// version of juju that exclusively used a git deployer. This is useful both -// for testing the new deployer-replacement code *and* for running the old -// tests against the new, patched code to check that the tweaks made to -// accommodate the manifest deployer do not change the original behaviour as -// simulated by the patched-out code. -type prepareGitUniter struct { - prepSteps []stepper -} - -func (s prepareGitUniter) step(c *gc.C, ctx *context) { - c.Assert(ctx.uniter, gc.IsNil, gc.Commentf("please don't try to patch stuff while the uniter's running")) - newDeployer := func(charmPath, dataPath string, bundles charm.BundleReader) (charm.Deployer, error) { - return charm.NewGitDeployer(charmPath, dataPath, bundles), nil - } - restoreNewDeployer := gt.PatchValue(&charm.NewDeployer, newDeployer) - defer restoreNewDeployer() - - fixDeployer := func(deployer *charm.Deployer) error { - return nil - } - restoreFixDeployer := gt.PatchValue(&charm.FixDeployer, fixDeployer) - defer restoreFixDeployer() - - for _, prepStep := range s.prepSteps { - step(c, ctx, prepStep) - } - if ctx.uniter != nil { - step(c, ctx, stopUniter{}) - } -} - -func ugt(summary string, steps ...stepper) uniterTest { - return ut(summary, prepareGitUniter{steps}) -} - -type verifyGitCharm struct { - revision int - dirty bool -} - -func (s verifyGitCharm) step(c *gc.C, ctx *context) { - charmPath := filepath.Join(ctx.path, "charm") - if !s.dirty { - revisionPath := filepath.Join(charmPath, "revision") - content, err := ioutil.ReadFile(revisionPath) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(content), gc.Equals, strconv.Itoa(s.revision)) - err = ctx.unit.Refresh() - c.Assert(err, jc.ErrorIsNil) - url, ok := ctx.unit.CharmURL() - c.Assert(ok, jc.IsTrue) - c.Assert(url, gc.DeepEquals, curl(s.revision)) - } - - // Before we try to check the git status, make sure expected hooks are all - // complete, to prevent the test and the uniter interfering with each other. - step(c, ctx, waitHooks{}) - step(c, ctx, waitHooks{}) - cmd := exec.Command("git", "status") - cmd.Dir = filepath.Join(ctx.path, "charm") - out, err := cmd.CombinedOutput() - c.Assert(err, jc.ErrorIsNil) - cmp := gc.Matches - if s.dirty { - cmp = gc.Not(gc.Matches) - } - c.Assert(string(out), cmp, "(# )?On branch master\nnothing to commit.*\n") -} - -type startGitUpgradeError struct{} - -func (s startGitUpgradeError) step(c *gc.C, ctx *context) { - steps := []stepper{ - createCharm{ - customize: func(c *gc.C, ctx *context, path string) { - appendHook(c, path, "start", "echo STARTDATA > data") - }, - }, - serveCharm{}, - createUniter{}, - waitUnitAgent{ - status: status.StatusIdle, - }, - waitHooks(startupHooks(false)), - verifyGitCharm{dirty: true}, - - createCharm{ - revision: 1, - customize: func(c *gc.C, ctx *context, path string) { - ft.File{"data", "ha ha", 0644}.Create(c, path) - ft.File{"ignore", "anything", 0644}.Create(c, path) - }, - }, - serveCharm{}, - upgradeCharm{revision: 1}, - waitUnitAgent{ - statusGetter: unitStatusGetter, - status: status.StatusError, - info: "upgrade failed", - charm: 1, - }, - verifyWaiting{}, - verifyGitCharm{dirty: true}, - } - for _, s_ := range steps { - step(c, ctx, s_) - } -} - type provisionStorage struct{} func (s provisionStorage) step(c *gc.C, ctx *context) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/upgradesteps/worker.go juju-core-2.0.0/src/github.com/juju/juju/worker/upgradesteps/worker.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/upgradesteps/worker.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/upgradesteps/worker.go 2016-10-13 14:31:50.000000000 +0000 @@ -11,7 +11,7 @@ "github.com/juju/loggo" "github.com/juju/utils" "gopkg.in/juju/names.v2" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/agent" "github.com/juju/juju/api" @@ -223,7 +223,7 @@ } else { // Upgrade succeeded - signal that the upgrade is complete. logger.Infof("upgrade to %v completed successfully.", w.toVersion) - w.machine.SetStatus(status.StatusStarted, "", nil) + w.machine.SetStatus(status.Started, "", nil) w.upgradeComplete.Unlock() } return nil @@ -343,7 +343,7 @@ // designed to be called via a machine agent's ChangeConfig method. func (w *upgradesteps) runUpgradeSteps(agentConfig agent.ConfigSetter) error { var upgradeErr error - w.machine.SetStatus(status.StatusStarted, fmt.Sprintf("upgrading to %v", w.toVersion), nil) + w.machine.SetStatus(status.Started, fmt.Sprintf("upgrading to %v", w.toVersion), nil) context := upgrades.NewContext(agentConfig, w.apiConn, w.st) logger.Infof("starting upgrade from %v to %v for %q", w.fromVersion, w.toVersion, w.tag) @@ -377,7 +377,7 @@ } logger.Errorf("upgrade from %v to %v for %q failed (%s): %v", w.fromVersion, w.toVersion, w.tag, retryText, err) - w.machine.SetStatus(status.StatusError, + w.machine.SetStatus(status.Error, fmt.Sprintf("upgrade to %v failed (%s): %v", w.toVersion, retryText, err), nil) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/upgradesteps/worker_test.go juju-core-2.0.0/src/github.com/juju/juju/worker/upgradesteps/worker_test.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/upgradesteps/worker_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/upgradesteps/worker_test.go 2016-10-13 14:31:50.000000000 +0000 @@ -77,7 +77,7 @@ // Allow tests to make the API connection appear to be dead. s.connectionDead = false - s.PatchValue(&cmdutil.ConnectionIsDead, func(loggo.Logger, cmdutil.Pinger) bool { + s.PatchValue(&cmdutil.ConnectionIsDead, func(loggo.Logger, cmdutil.Breakable) bool { return s.connectionDead }) @@ -239,7 +239,7 @@ c.Assert(doneLock.IsUnlocked(), jc.IsFalse) } -func (s *UpgradeSuite) TestApiConnectionFailure(c *gc.C) { +func (s *UpgradeSuite) TestAPIConnectionFailure(c *gc.C) { // This test checks what happens when an upgrade fails because the // connection to mongo has gone away. This will happen when the // mongo master changes. In this case we want the upgrade worker @@ -292,7 +292,7 @@ "aborted wait for other controllers:" + causeMsg}, }) c.Assert(statusCalls, jc.DeepEquals, []StatusCall{{ - status.StatusError, + status.Error, fmt.Sprintf( "upgrade to %s failed (giving up): aborted wait for other controllers:"+causeMsg, jujuversion.Current), @@ -389,7 +389,7 @@ statusMessage := fmt.Sprintf( `upgrade to %s failed (giving up): %s`, jujuversion.Current, causeMessage) c.Assert(statusCalls, jc.DeepEquals, []StatusCall{{ - status.StatusError, statusMessage, + status.Error, statusMessage, }}) } @@ -414,7 +414,7 @@ newPolicy := stateenvirons.GetNewPolicyFunc( stateenvirons.GetNewEnvironFunc(environs.New), ) - st, err := state.Open(s.State.ModelTag(), mongoInfo, mongotest.DialOpts(), newPolicy) + st, err := state.Open(s.State.ModelTag(), s.State.ControllerTag(), mongoInfo, mongotest.DialOpts(), newPolicy) if err != nil { return nil, err } @@ -502,22 +502,22 @@ func (s *UpgradeSuite) makeExpectedStatusCalls(retryCount int, expectFail bool, failReason string) []StatusCall { calls := []StatusCall{{ - status.StatusStarted, + status.Started, fmt.Sprintf("upgrading to %s", jujuversion.Current), }} for i := 0; i < retryCount; i++ { calls = append(calls, StatusCall{ - status.StatusError, + status.Error, fmt.Sprintf("upgrade to %s failed (will retry): %s", jujuversion.Current, failReason), }) } if expectFail { calls = append(calls, StatusCall{ - status.StatusError, + status.Error, fmt.Sprintf("upgrade to %s failed (giving up): %s", jujuversion.Current, failReason), }) } else { - calls = append(calls, StatusCall{status.StatusStarted, ""}) + calls = append(calls, StatusCall{status.Started, ""}) } return calls } diff -Nru juju-core-2.0~beta15/src/github.com/juju/juju/worker/workertest/workers.go juju-core-2.0.0/src/github.com/juju/juju/worker/workertest/workers.go --- juju-core-2.0~beta15/src/github.com/juju/juju/worker/workertest/workers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/juju/worker/workertest/workers.go 2016-10-13 14:31:50.000000000 +0000 @@ -4,7 +4,7 @@ package workertest import ( - "launchpad.net/tomb" + "gopkg.in/tomb.v1" "github.com/juju/juju/worker" ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/loggo/dependencies.tsv juju-core-2.0.0/src/github.com/juju/loggo/dependencies.tsv --- juju-core-2.0~beta15/src/github.com/juju/loggo/dependencies.tsv 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/loggo/dependencies.tsv 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,5 @@ +github.com/juju/ansiterm git c368f42cb4b32a70389cded05c7345d9ccdce889 2016-08-17T02:52:20Z +github.com/lunixbochs/vtclean git 4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36 2016-01-25T03:51:06Z +github.com/mattn/go-colorable git ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8 2016-07-31T23:54:17Z +github.com/mattn/go-isatty git 66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8 2016-08-06T12:27:52Z +gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z diff -Nru juju-core-2.0~beta15/src/github.com/juju/loggo/example/first.go juju-core-2.0.0/src/github.com/juju/loggo/example/first.go --- juju-core-2.0~beta15/src/github.com/juju/loggo/example/first.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/loggo/example/first.go 2016-10-13 14:32:22.000000000 +0000 @@ -22,6 +22,10 @@ first.Infof(message) } +func FirstDebug(message string) { + first.Debugf(message) +} + func FirstTrace(message string) { first.Tracef(message) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/loggo/example/main.go juju-core-2.0.0/src/github.com/juju/loggo/example/main.go --- juju-core-2.0~beta15/src/github.com/juju/loggo/example/main.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/loggo/example/main.go 2016-10-13 14:32:22.000000000 +0000 @@ -28,12 +28,14 @@ FirstError("first error") FirstWarning("first warning") FirstInfo("first info") + FirstDebug("first debug") FirstTrace("first trace") - SecondCritical("first critical") - SecondError("first error") - SecondWarning("first warning") - SecondInfo("first info") - SecondTrace("first trace") + SecondCritical("second critical") + SecondError("second error") + SecondWarning("second warning") + SecondInfo("second info") + SecondDebug("second debug") + SecondTrace("second trace") } diff -Nru juju-core-2.0~beta15/src/github.com/juju/loggo/example/second.go juju-core-2.0.0/src/github.com/juju/loggo/example/second.go --- juju-core-2.0~beta15/src/github.com/juju/loggo/example/second.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/loggo/example/second.go 2016-10-13 14:32:22.000000000 +0000 @@ -22,6 +22,9 @@ second.Infof(message) } +func SecondDebug(message string) { + second.Debugf(message) +} func SecondTrace(message string) { second.Tracef(message) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/loggo/formatter.go juju-core-2.0.0/src/github.com/juju/loggo/formatter.go --- juju-core-2.0~beta15/src/github.com/juju/loggo/formatter.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/loggo/formatter.go 2016-10-13 14:32:22.000000000 +0000 @@ -5,6 +5,7 @@ import ( "fmt" + "os" "path/filepath" "time" ) @@ -19,3 +20,19 @@ filename := filepath.Base(entry.Filename) return fmt.Sprintf("%s %s %s %s:%d %s", ts, entry.Level, entry.Module, filename, entry.Line, entry.Message) } + +// TimeFormat is the time format used for the default writer. +// This can be set with the environment variable LOGGO_TIME_FORMAT. +var TimeFormat = initTimeFormat() + +func initTimeFormat() string { + format := os.Getenv("LOGGO_TIME_FORMAT") + if format != "" { + return format + } + return "15:04:05" +} + +func formatTime(ts time.Time) string { + return ts.Format(TimeFormat) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/loggo/.gitignore juju-core-2.0.0/src/github.com/juju/loggo/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/loggo/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/loggo/.gitignore 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1 @@ +example/example diff -Nru juju-core-2.0~beta15/src/github.com/juju/loggo/level.go juju-core-2.0.0/src/github.com/juju/loggo/level.go --- juju-core-2.0~beta15/src/github.com/juju/loggo/level.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/loggo/level.go 2016-10-13 14:32:22.000000000 +0000 @@ -69,6 +69,27 @@ } } +// Short returns a five character string to use in +// aligned logging output. +func (level Level) Short() string { + switch level { + case TRACE: + return "TRACE" + case DEBUG: + return "DEBUG" + case INFO: + return "INFO " + case WARNING: + return "WARN " + case ERROR: + return "ERROR" + case CRITICAL: + return "CRITC" + default: + return " " + } +} + // get atomically gets the value of the given level. func (level *Level) get() Level { return Level(atomic.LoadUint32((*uint32)(level))) diff -Nru juju-core-2.0~beta15/src/github.com/juju/loggo/README.md juju-core-2.0.0/src/github.com/juju/loggo/README.md --- juju-core-2.0~beta15/src/github.com/juju/loggo/README.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/loggo/README.md 2016-10-13 14:32:22.000000000 +0000 @@ -49,10 +49,35 @@ ``` go const DefaultWriterName = "default" ``` -DefaultWriterName is the name of the writer default writer for +DefaultWriterName is the name of the default writer for a Context. +## Variables +``` go +var ( + // SeverityColor defines the colors for the levels output by the ColorWriter. + SeverityColor = map[Level]*ansiterm.Context{ + TRACE: ansiterm.Foreground(ansiterm.Default), + DEBUG: ansiterm.Foreground(ansiterm.Green), + INFO: ansiterm.Foreground(ansiterm.BrightBlue), + WARNING: ansiterm.Foreground(ansiterm.Yellow), + ERROR: ansiterm.Foreground(ansiterm.BrightRed), + CRITICAL: &ansiterm.Context{ + Foreground: ansiterm.White, + Background: ansiterm.Red, + }, + } + // LocationColor defines the colors for the location output by the ColorWriter. + LocationColor = ansiterm.Foreground(ansiterm.BrightBlue) +) +``` +``` go +var TimeFormat = initTimeFormat() +``` +TimeFormat is the time format used for the default writer. +This can be set with the environment variable LOGGO_TIME_FORMAT. + ## func ConfigureLoggers ``` go @@ -77,7 +102,10 @@ ``` DefaultFormatter returns the parameters separated by spaces except for filename and line which are separated by a colon. The timestamp is shown -to second resolution in UTC. +to second resolution in UTC. For example: + + + 2016-07-02 15:04:05 ## func LoggerInfo @@ -94,17 +122,19 @@ ``` go func RegisterWriter(name string, writer Writer) error ``` -RegisterWriter adds the writer to the list of writers to the DefaultContext +RegisterWriter adds the writer to the list of writers in the DefaultContext that get notified when logging. If there is already a registered writer with that name, an error is returned. -## func ResetLoggers +## func ResetLogging ``` go -func ResetLoggers() +func ResetLogging() ``` ResetLogging iterates through the known modules and sets the levels of all -to UNSPECIFIED, except for which is set to WARNING. +to UNSPECIFIED, except for which is set to WARNING. The call also +removes all writers in the DefaultContext and puts the original default +writer back as the only writer. ## func ResetWriters @@ -129,17 +159,17 @@ -### func ParseConfigurationString +### func ParseConfigString ``` go -func ParseConfigurationString(specification string) (Config, error) +func ParseConfigString(specification string) (Config, error) ``` -ParseConfigurationString parses a logger configuration string into a map of -logger names and their associated log level. This method is provided to -allow other programs to pre-validate a configuration string rather than -just calling ConfigureLoggers. +ParseConfigString parses a logger configuration string into a map of logger +names and their associated log level. This method is provided to allow +other programs to pre-validate a configuration string rather than just +calling ConfigureLoggers. -Loggers are colon- or semicolon-separated; each module is specified as -=. White space outside of module names and levels is +Logging modules are colon- or semicolon-separated; each module is specified +as =. White space outside of module names and levels is ignored. The root module is specified with the name "". As a special case, a log level may be specified on its own. @@ -159,7 +189,7 @@ func (c Config) String() string ``` String returns a logger configuration string that may be parsed -using ParseLoggersConfig. +using ParseConfigurationString. @@ -189,9 +219,9 @@ ### func NewContext ``` go -func NewContext(rootLevel Level, defaultWriter Writer) *Context +func NewContext(rootLevel Level) *Context ``` -NewLoggers returns a new Context with a possible default writer set. +NewLoggers returns a new Context with no writers set. If the root level is UNSPECIFIED, WARNING is used. @@ -201,7 +231,7 @@ ``` go func (c *Context) AddWriter(name string, writer Writer) error ``` -AddWriter adds an writer to the list to be called for each logging call. +AddWriter adds a writer to the list to be called for each logging call. The name cannot be empty, and the writer cannot be nil. If an existing writer exists with the specified name, an error is returned. @@ -256,7 +286,7 @@ ``` go func (c *Context) ReplaceWriter(name string, writer Writer) (Writer, error) ``` -ReplaceWriter is a convenience function that does the equivalent of RemoveWriter +ReplaceWriter is a convenience method that does the equivalent of RemoveWriter followed by AddWriter with the same name. The replaced writer is returned. @@ -274,8 +304,7 @@ ``` go func (c *Context) ResetWriters() ``` -ResetWriters is generally only used in testing and removes all the writers, and -adds back in the default writer if one was specified when the Context was created. +ResetWriters is generally only used in testing and removes all the writers. @@ -346,6 +375,15 @@ +### func (Level) Short +``` go +func (level Level) Short() string +``` +Short returns a five character string to use in +aligned logging output. + + + ### func (Level) String ``` go func (level Level) String() string @@ -601,12 +639,11 @@ ## type Writer ``` go type Writer interface { - // Write writes a message to the Writer with the given - // level and module name. The filename and line hold - // the file name and line number of the code that is - // generating the log message; the time stamp holds - // the time the log message was generated, and - // message holds the log message itself. + // Write writes a message to the Writer with the given level and module + // name. The filename and line hold the file name and line number of the + // code that is generating the log message; the time stamp holds the time + // the log message was generated, and message holds the log message + // itself. Write(entry Entry) } ``` @@ -620,21 +657,29 @@ +### func NewColorWriter +``` go +func NewColorWriter(writer io.Writer) Writer +``` +NewColorWriter will write out colored severity levels if the writer is +outputting to a terminal. + + ### func NewMinimumLevelWriter ``` go func NewMinimumLevelWriter(writer Writer, minLevel Level) Writer ``` NewMinLevelWriter returns a Writer that will only pass on the Write calls -to the provided writer if the log level is at or above the specified minimul level. +to the provided writer if the log level is at or above the specified +minimum level. ### func NewSimpleWriter ``` go func NewSimpleWriter(writer io.Writer, formatter func(entry Entry) string) Writer ``` -NewSimpleWriter returns a new writer that writes -log messages to the given io.Writer formatting the -messages with the given formatter. +NewSimpleWriter returns a new writer that writes log messages to the given +io.Writer formatting the messages with the given formatter. ### func RemoveWriter diff -Nru juju-core-2.0~beta15/src/github.com/juju/loggo/writer.go juju-core-2.0.0/src/github.com/juju/loggo/writer.go --- juju-core-2.0~beta15/src/github.com/juju/loggo/writer.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/loggo/writer.go 2016-10-13 14:32:22.000000000 +0000 @@ -7,6 +7,9 @@ "fmt" "io" "os" + "path/filepath" + + "github.com/juju/ansiterm" ) // DefaultWriterName is the name of the default writer for @@ -66,5 +69,45 @@ } func defaultWriter() Writer { - return NewSimpleWriter(os.Stderr, DefaultFormatter) + return NewColorWriter(os.Stderr) +} + +type colorWriter struct { + writer *ansiterm.Writer +} + +var ( + // SeverityColor defines the colors for the levels output by the ColorWriter. + SeverityColor = map[Level]*ansiterm.Context{ + TRACE: ansiterm.Foreground(ansiterm.Default), + DEBUG: ansiterm.Foreground(ansiterm.Green), + INFO: ansiterm.Foreground(ansiterm.BrightBlue), + WARNING: ansiterm.Foreground(ansiterm.Yellow), + ERROR: ansiterm.Foreground(ansiterm.BrightRed), + CRITICAL: &ansiterm.Context{ + Foreground: ansiterm.White, + Background: ansiterm.Red, + }, + } + // LocationColor defines the colors for the location output by the ColorWriter. + LocationColor = ansiterm.Foreground(ansiterm.BrightBlue) +) + +// NewColorWriter will write out colored severity levels if the writer is +// outputting to a terminal. +func NewColorWriter(writer io.Writer) Writer { + return &colorWriter{ansiterm.NewWriter(writer)} +} + +// Write implements Writer. +func (w *colorWriter) Write(entry Entry) { + ts := formatTime(entry.Timestamp) + // Just get the basename from the filename + filename := filepath.Base(entry.Filename) + + fmt.Fprintf(w.writer, "%s ", ts) + SeverityColor[entry.Level].Fprintf(w.writer, entry.Level.Short()) + fmt.Fprintf(w.writer, " %s ", entry.Module) + LocationColor.Fprintf(w.writer, "%s:%d ", filename, entry.Line) + fmt.Fprintln(w.writer, entry.Message) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/persistent-cookiejar/jar.go juju-core-2.0.0/src/github.com/juju/persistent-cookiejar/jar.go --- juju-core-2.0~beta15/src/github.com/juju/persistent-cookiejar/jar.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/persistent-cookiejar/jar.go 2016-10-13 14:32:26.000000000 +0000 @@ -396,6 +396,32 @@ } } +// RemoveAllHost removes any cookies from the jar that were set for the given host. +func (j *Jar) RemoveAllHost(host string) { + host, err := canonicalHost(host) + if err != nil { + return + } + key := jarKey(host, j.psList) + + j.mu.Lock() + defer j.mu.Unlock() + + expired := time.Now().Add(-1 * time.Second) + submap := j.entries[key] + for id, e := range submap { + if e.CanonicalHost == host { + // Save some space by deleting the value when the cookie + // expires. We can't delete the cookie itself because then + // we wouldn't know that the cookie had expired when + // we merge with another cookie jar. + e.Value = "" + e.Expires = expired + submap[id] = e + } + } +} + // SetCookies implements the SetCookies method of the http.CookieJar interface. // // It does nothing if the URL's scheme is not HTTP or HTTPS. diff -Nru juju-core-2.0~beta15/src/github.com/juju/persistent-cookiejar/jar_test.go juju-core-2.0.0/src/github.com/juju/persistent-cookiejar/jar_test.go --- juju-core-2.0~beta15/src/github.com/juju/persistent-cookiejar/jar_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/persistent-cookiejar/jar_test.go 2016-10-13 14:32:26.000000000 +0000 @@ -1964,6 +1964,87 @@ } } +func TestRemoveAll(t *testing.T) { + testRemoveAll(t, mustParseURL("https://www.apple.com"), "www.apple.com", true) +} + +func TestRemoveAllRoot(t *testing.T) { + testRemoveAll(t, mustParseURL("https://www.apple.com"), "apple.com", false) +} + +func TestRemoveAllDifferent(t *testing.T) { + testRemoveAll(t, mustParseURL("https://www.apple.com"), "foo.apple.com", false) +} + +func TestRemoveAllWithPort(t *testing.T) { + testRemoveAll(t, mustParseURL("https://www.apple.com"), "www.apple.com:80", true) +} + +func TestRemoveAllIP(t *testing.T) { + testRemoveAll(t, mustParseURL("https://10.1.1.1"), "10.1.1.1", true) +} + +func testRemoveAll(t *testing.T, setURL *url.URL, removeHost string, shouldRemove bool) { + jar := newTestJar("") + google := mustParseURL("https://www.google.com") + jar.SetCookies( + google, + []*http.Cookie{ + &http.Cookie{ + Name: "test-cookie", + Value: "test-value", + Expires: time.Now().Add(24 * time.Hour), + }, + &http.Cookie{ + Name: "test-cookie2", + Value: "test-value", + Expires: time.Now().Add(24 * time.Hour), + }, + }, + ) + onlyGoogle := jar.AllCookies() + if len(onlyGoogle) != 2 { + t.Fatalf("Expected 2 cookies, got %d", len(onlyGoogle)) + } + + jar.SetCookies( + setURL, + []*http.Cookie{ + &http.Cookie{ + Name: "test-cookie3", + Value: "test-value", + Expires: time.Now().Add(24 * time.Hour), + }, + &http.Cookie{ + Name: "test-cookie4", + Value: "test-value", + Expires: time.Now().Add(24 * time.Hour), + }, + }, + ) + withSet := jar.AllCookies() + if len(withSet) != 4 { + t.Fatalf("Expected 4 cookies, got %d", len(withSet)) + } + jar.RemoveAllHost(removeHost) + after := jar.AllCookies() + if !shouldRemove { + if len(after) != len(withSet) { + t.Fatalf("Expected %d cookies, got %d", len(withSet), len(after)) + } + return + } + if len(after) != len(onlyGoogle) { + t.Fatalf("Expected %d cookies, got %d", len(onlyGoogle), len(after)) + } + if !cookiesEqual(onlyGoogle[0], after[0]) { + t.Fatalf("Expected %v, got %v", onlyGoogle[0], after[0]) + } + if !cookiesEqual(onlyGoogle[1], after[1]) { + t.Fatalf("Expected %v, got %v", onlyGoogle[1], after[1]) + } +} + func cookiesEqual(a, b *http.Cookie) bool { return a.Name == b.Name && a.Value == b.Value && diff -Nru juju-core-2.0~beta15/src/github.com/juju/retry/.gitignore juju-core-2.0.0/src/github.com/juju/retry/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/retry/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/retry/.gitignore 2016-10-13 14:32:08.000000000 +0000 @@ -0,0 +1 @@ +*.test diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/agree/agree.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/agree/agree.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/agree/agree.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/agree/agree.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,224 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package agree - -import ( - "bufio" - "bytes" - "fmt" - "os" - "os/exec" - "strings" - - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/terms-client/api" - "github.com/juju/terms-client/api/wireformat" - "gopkg.in/juju/charm.v6-unstable" - "launchpad.net/gnuflag" -) - -var ( - clientNew = api.NewClient -) - -const agreeDoc = ` -Agree to the terms required by a charm. - -When deploying a charm that requires agreement to terms, use 'juju agree' to -view the terms and agree to them. Then the charm may be deployed. - -Once you have agreed to terms, you will not be prompted to view them again. - -Examples: - # Displays terms for somePlan revision 1 and prompts for agreement. - juju agree somePlan/1 - - # Displays the terms for revision 1 of somePlan, revision 2 of otherPlan, - # and prompts for agreement. - juju agree somePlan/1 otherPlan/2 - - # Agrees to the terms without prompting. - juju agree somePlan/1 otherPlan/2 --yes -` - -// NewAgreeCommand returns a new command that can be -// used to create user agreements. -func NewAgreeCommand() cmd.Command { - return &agreeCommand{} -} - -type term struct { - owner string - name string - revision int -} - -// agreeCommand creates a user agreement to the specified terms. -type agreeCommand struct { - modelcmd.JujuCommandBase - out cmd.Output - - terms []term - termIds []string - SkipTermContent bool -} - -// SetFlags implements Command.SetFlags. -func (c *agreeCommand) SetFlags(f *gnuflag.FlagSet) { - f.BoolVar(&c.SkipTermContent, "yes", false, "Agree to terms non interactively") - c.out.AddFlags(f, "json", cmd.DefaultFormatters) -} - -// Info implements Command.Info. -func (c *agreeCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "agree", - Args: "", - Purpose: "Agree to terms.", - Doc: agreeDoc, - } -} - -// Init read and verifies the arguments. -func (c *agreeCommand) Init(args []string) error { - if len(args) < 1 { - return errors.New("missing arguments") - } - - for _, t := range args { - termId, err := charm.ParseTerm(t) - if err != nil { - return errors.Annotate(err, "invalid term format") - } - if termId.Revision == 0 { - return errors.Errorf("must specify a valid term revision %q", t) - } - c.terms = append(c.terms, term{owner: termId.Owner, name: termId.Name, revision: termId.Revision}) - c.termIds = append(c.termIds, t) - } - if len(c.terms) == 0 { - return errors.New("must specify a valid term revision") - } - return nil -} - -// Run implements Command.Run. -func (c *agreeCommand) Run(ctx *cmd.Context) error { - client, err := c.BakeryClient() - if err != nil { - return errors.Trace(err) - } - - termsClient, err := clientNew(api.HTTPClient(client)) - if err != nil { - return err - } - - if c.SkipTermContent { - err := saveAgreements(ctx, termsClient, c.terms) - if err != nil { - return errors.Trace(err) - } - return nil - } - - needAgreement := []wireformat.GetTermsResponse{} - terms, err := termsClient.GetUnsignedTerms(&wireformat.CheckAgreementsRequest{ - Terms: c.termIds, - }) - if err != nil { - return errors.Annotate(err, "failed to retrieve terms") - } - needAgreement = append(needAgreement, terms...) - - if len(needAgreement) == 0 { - fmt.Fprintf(ctx.Stdout, "Already agreed\n") - return nil - } - - err = printTerms(ctx, needAgreement) - if err != nil { - return errors.Trace(err) - } - fmt.Fprintf(ctx.Stdout, "Do you agree to the displayed terms? (Y/n): ") - answer, err := userAnswer() - if err != nil { - return errors.Trace(err) - } - - agreedTerms := make([]term, len(needAgreement)) - for i, t := range needAgreement { - agreedTerms[i] = term{owner: t.Owner, name: t.Name, revision: t.Revision} - } - - answer = strings.TrimSpace(answer) - if userAgrees(answer) { - err = saveAgreements(ctx, termsClient, agreedTerms) - if err != nil { - return errors.Trace(err) - } - } else { - fmt.Fprintf(ctx.Stdout, "You didn't agree to the presented terms.\n") - return nil - } - - return nil -} - -func saveAgreements(ctx *cmd.Context, termsClient api.Client, ts []term) error { - agreements := make([]wireformat.SaveAgreement, len(ts)) - for i, t := range ts { - agreements[i] = wireformat.SaveAgreement{ - TermOwner: t.owner, - TermName: t.name, - TermRevision: t.revision, - } - } - response, err := termsClient.SaveAgreement(&wireformat.SaveAgreements{Agreements: agreements}) - if err != nil { - return errors.Annotate(err, "failed to save user agreement") - } - for _, agreement := range response.Agreements { - _, err = fmt.Fprintf(ctx.Stdout, "Agreed to revision %v of %v for Juju users\n", agreement.Revision, agreement.Term) - if err != nil { - return errors.Trace(err) - } - } - return nil -} - -var userAnswer = func() (string, error) { - return bufio.NewReader(os.Stdin).ReadString('\n') -} - -func printTerms(ctx *cmd.Context, terms []wireformat.GetTermsResponse) error { - output := "" - for _, t := range terms { - output += fmt.Sprintf(` -=== %v/%v: %v === -%v -======== -`, t.Name, t.Revision, t.CreatedOn, t.Content) - } - buffer := bytes.NewReader([]byte(output)) - less := exec.Command("less") - less.Args = []string{"less", "-P", "Press 'q' to quit after you've read the terms."} - less.Stdout = ctx.Stdout - less.Stdin = buffer - err := less.Run() - if err != nil { - fmt.Fprintf(ctx.Stdout, output) - return errors.Annotate(err, "failed to print plan") - } - return nil -} - -func userAgrees(input string) bool { - if input == "y" || input == "Y" || input == "" { - return true - } - return false -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/agree/agree_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/agree/agree_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/agree/agree_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/agree/agree_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,233 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package agree_test - -import ( - "sync" - "testing" - - "github.com/juju/cmd/cmdtesting" - coretesting "github.com/juju/juju/testing" - "github.com/juju/terms-client/api" - "github.com/juju/terms-client/api/wireformat" - jujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/romulus/cmd/agree" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} - -var _ = gc.Suite(&agreeSuite{}) - -var testTerms = "Test Terms" - -type agreeSuite struct { - client *mockClient - coretesting.FakeJujuXDGDataHomeSuite -} - -func (s *agreeSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.client = &mockClient{} - - jujutesting.PatchValue(agree.ClientNew, func(...api.ClientOption) (api.Client, error) { - return s.client, nil - }) -} - -func (s *agreeSuite) TestAgreementNothingToSign(c *gc.C) { - jujutesting.PatchValue(agree.UserAnswer, func() (string, error) { - return "y", nil - }) - - s.client.user = "test-user" - s.client.setUnsignedTerms([]wireformat.GetTermsResponse{}) - - ctx, err := cmdtesting.RunCommand(c, agree.NewAgreeCommand(), "test-term/1") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, `Already agreed -`) -} -func (s *agreeSuite) TestAgreement(c *gc.C) { - var answer string - jujutesting.PatchValue(agree.UserAnswer, func() (string, error) { - return answer, nil - }) - - s.client.user = "test-user" - s.client.setUnsignedTerms([]wireformat.GetTermsResponse{{ - Name: "test-term", - Revision: 1, - Content: testTerms, - }}) - tests := []struct { - about string - args []string - err string - stdout string - answer string - apiCalls []jujutesting.StubCall - }{{ - about: "everything works", - args: []string{"test-term/1", "--yes"}, - stdout: "Agreed to revision 1 of test-term for Juju users\n", - apiCalls: []jujutesting.StubCall{{FuncName: "SaveAgreement", Args: []interface{}{&wireformat.SaveAgreements{Agreements: []wireformat.SaveAgreement{{TermName: "test-term", TermRevision: 1}}}}}}, - }, { - about: "everything works with owner term", - args: []string{"owner/test-term/1", "--yes"}, - stdout: "Agreed to revision 1 of test-term for Juju users\n", - apiCalls: []jujutesting.StubCall{{FuncName: "SaveAgreement", Args: []interface{}{&wireformat.SaveAgreements{Agreements: []wireformat.SaveAgreement{{TermOwner: "owner", TermName: "test-term", TermRevision: 1}}}}}}, - }, { - about: "cannot parse revision number", - args: []string{"test-term/abc"}, - err: `must specify a valid term revision "test-term/abc"`, - }, { - about: "missing arguments", - args: []string{}, - err: "missing arguments", - }, { - about: "everything works - user accepts", - args: []string{"test-term/1"}, - answer: "y", - stdout: ` -=== test-term/1: 0001-01-01 00:00:00 +0000 UTC === -Test Terms -======== -Do you agree to the displayed terms? (Y/n): Agreed to revision 1 of test-term for Juju users -`, - apiCalls: []jujutesting.StubCall{{ - FuncName: "GetUnunsignedTerms", Args: []interface{}{ - &wireformat.CheckAgreementsRequest{Terms: []string{"test-term/1"}}, - }, - }, { - FuncName: "SaveAgreement", Args: []interface{}{ - &wireformat.SaveAgreements{Agreements: []wireformat.SaveAgreement{{TermName: "test-term", TermRevision: 1}}}, - }, - }}, - }, { - about: "everything works - user refuses", - args: []string{"test-term/1"}, - answer: "n", - stdout: ` -=== test-term/1: 0001-01-01 00:00:00 +0000 UTC === -Test Terms -======== -Do you agree to the displayed terms? (Y/n): You didn't agree to the presented terms. -`, - apiCalls: []jujutesting.StubCall{{ - FuncName: "GetUnunsignedTerms", Args: []interface{}{ - &wireformat.CheckAgreementsRequest{Terms: []string{"test-term/1"}}, - }, - }}, - }, { - about: "must not accept 0 revision", - args: []string{"test-term/0", "--yes"}, - err: `must specify a valid term revision "test-term/0"`, - }, { - about: "user accepts, multiple terms", - args: []string{"test-term/1", "test-term/2"}, - answer: "y", - stdout: ` -=== test-term/1: 0001-01-01 00:00:00 +0000 UTC === -Test Terms -======== -Do you agree to the displayed terms? (Y/n): Agreed to revision 1 of test-term for Juju users -`, - apiCalls: []jujutesting.StubCall{ - { - FuncName: "GetUnunsignedTerms", Args: []interface{}{ - &wireformat.CheckAgreementsRequest{Terms: []string{"test-term/1", "test-term/2"}}, - }, - }, { - FuncName: "SaveAgreement", Args: []interface{}{ - &wireformat.SaveAgreements{Agreements: []wireformat.SaveAgreement{ - {TermName: "test-term", TermRevision: 1}, - }}, - }, - }}, - }, { - about: "valid then unknown arguments", - args: []string{"test-term/1", "unknown", "arguments"}, - err: `must specify a valid term revision "unknown"`, - }, { - about: "user accepts all the terms", - args: []string{"test-term/1", "test-term/2", "--yes"}, - stdout: `Agreed to revision 1 of test-term for Juju users -Agreed to revision 2 of test-term for Juju users -`, - apiCalls: []jujutesting.StubCall{ - {FuncName: "SaveAgreement", Args: []interface{}{&wireformat.SaveAgreements{ - Agreements: []wireformat.SaveAgreement{ - {TermName: "test-term", TermRevision: 1}, - {TermName: "test-term", TermRevision: 2}, - }}}}}, - }, - } - for i, test := range tests { - s.client.ResetCalls() - c.Logf("running test %d: %s", i, test.about) - if test.answer != "" { - answer = test.answer - } - ctx, err := cmdtesting.RunCommand(c, agree.NewAgreeCommand(), test.args...) - if test.err != "" { - c.Assert(err, gc.ErrorMatches, test.err) - } else { - c.Assert(err, jc.ErrorIsNil) - } - if ctx != nil { - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, test.stdout) - } - if len(test.apiCalls) > 0 { - s.client.CheckCalls(c, test.apiCalls) - } - } -} - -type mockClient struct { - api.Client - jujutesting.Stub - - lock sync.Mutex - user string - terms []wireformat.GetTermsResponse - unsignedTerms []wireformat.GetTermsResponse -} - -func (c *mockClient) setUnsignedTerms(t []wireformat.GetTermsResponse) { - c.lock.Lock() - defer c.lock.Unlock() - c.unsignedTerms = t -} - -// SaveAgreement saves user's agreement to the specified -// revision of the terms documents -func (c *mockClient) SaveAgreement(p *wireformat.SaveAgreements) (*wireformat.SaveAgreementResponses, error) { - c.AddCall("SaveAgreement", p) - responses := make([]wireformat.AgreementResponse, len(p.Agreements)) - for i, agreement := range p.Agreements { - responses[i] = wireformat.AgreementResponse{ - User: c.user, - Term: agreement.TermName, - Revision: agreement.TermRevision, - } - } - return &wireformat.SaveAgreementResponses{responses}, nil -} - -func (c *mockClient) GetUnsignedTerms(p *wireformat.CheckAgreementsRequest) ([]wireformat.GetTermsResponse, error) { - c.MethodCall(c, "GetUnunsignedTerms", p) - r := make([]wireformat.GetTermsResponse, len(c.unsignedTerms)) - copy(r, c.unsignedTerms) - return r, nil -} - -func (c *mockClient) GetUsersAgreements() ([]wireformat.AgreementResponse, error) { - c.MethodCall(c, "GetUsersAgreements") - return []wireformat.AgreementResponse{}, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/agree/export.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/agree/export.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/agree/export.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/agree/export.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package agree - -// These two var are exported becuase they are useful in tests outside of this -// package. Unless you are writing a test you shouldn't be using either of these -// values. -var ( - ClientNew = &clientNew - UserAnswer = &userAnswer -) diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/allocate/allocate.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/allocate/allocate.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/allocate/allocate.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/allocate/allocate.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,140 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package allocate - -import ( - "fmt" - "regexp" - "strings" - - "launchpad.net/gnuflag" - - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/utils" - "gopkg.in/macaroon-bakery.v1/httpbakery" - - api "github.com/juju/romulus/api/budget" -) - -var budgetWithLimitRe = regexp.MustCompile(`^[a-zA-Z0-9\-]+:[0-9]+$`) - -type allocateCommand struct { - modelcmd.ModelCommandBase - api apiClient - Budget string - ModelUUID string - Services []string - Limit string -} - -// NewAllocateCommand returns a new allocateCommand -func NewAllocateCommand() modelcmd.ModelCommand { - return &allocateCommand{} -} - -const doc = ` -Allocate budget for the specified applications, replacing any prior allocations -made for the specified applications. - -Examples: - # Assigns application "db" to an allocation on budget "somebudget" with - # the limit "42". - juju allocate somebudget:42 db - - # Application names assume the current selected model, unless otherwise - # specified with: - juju allocate -m [ ... - - # Models may also be referenced by UUID when necessary: - juju allocate --model-uuid ... -` - -// SetFlags implements cmd.Command.SetFlags. -func (c *allocateCommand) SetFlags(f *gnuflag.FlagSet) { - f.StringVar(&c.ModelUUID, "model-uuid", "", "Model UUID of allocation") -} - -// Info implements cmd.Command.Info. -func (c *allocateCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "allocate", - Args: ": [ ...]", - Purpose: "Allocate budget to applications.", - Doc: doc, - } -} - -// Init implements cmd.Command.Init. -func (c *allocateCommand) Init(args []string) error { - if len(args) < 2 { - return errors.New("budget and application name required") - } - budgetWithLimit := args[0] - var err error - c.Budget, c.Limit, err = parseBudgetWithLimit(budgetWithLimit) - if err != nil { - return errors.Annotate(err, `expected args in the form "budget:limit [application ...]"`) - } - if c.ModelUUID == "" { - c.ModelUUID, err = c.modelUUID() - if err != nil { - return err - } - } else { - if !utils.IsValidUUIDString(c.ModelUUID) { - return errors.NotValidf("model UUID %q", c.ModelUUID) - } - } - - c.Services = args[1:] - return nil -} - -// Run implements cmd.Command.Run and has most of the logic for the run command. -func (c *allocateCommand) Run(ctx *cmd.Context) error { - client, err := c.BakeryClient() - if err != nil { - return errors.Annotate(err, "failed to create an http client") - } - api, err := c.newAPIClient(client) - if err != nil { - return errors.Annotate(err, "failed to create an api client") - } - resp, err := api.CreateAllocation(c.Budget, c.Limit, c.ModelUUID, c.Services) - if err != nil { - return errors.Annotate(err, "failed to create allocation") - } - fmt.Fprintln(ctx.Stdout, resp) - return nil -} - -func (c *allocateCommand) modelUUID() (string, error) { - model, err := c.ClientStore().ModelByName(c.ControllerName(), c.ModelName()) - if err != nil { - return "", errors.Trace(err) - } - return model.ModelUUID, nil -} - -func parseBudgetWithLimit(bl string) (string, string, error) { - if !budgetWithLimitRe.MatchString(bl) { - return "", "", errors.New("invalid budget specification, expecting :") - } - parts := strings.Split(bl, ":") - return parts[0], parts[1], nil -} - -func (c *allocateCommand) newAPIClient(bakery *httpbakery.Client) (apiClient, error) { - if c.api != nil { - return c.api, nil - } - c.api = api.NewClient(bakery) - return c.api, nil -} - -type apiClient interface { - CreateAllocation(string, string, string, []string) (string, error) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/allocate/allocate_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/allocate/allocate_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/allocate/allocate_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/allocate/allocate_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,147 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package allocate_test - -import ( - "github.com/juju/cmd" - "github.com/juju/cmd/cmdtesting" - "github.com/juju/errors" - "github.com/juju/juju/jujuclient" - "github.com/juju/juju/jujuclient/jujuclienttesting" - coretesting "github.com/juju/juju/testing" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/romulus/cmd/allocate" -) - -var _ = gc.Suite(&allocateSuite{}) - -type allocateSuite struct { - coretesting.FakeJujuXDGDataHomeSuite - stub *testing.Stub - mockAPI *mockapi - store jujuclient.ClientStore -} - -func (s *allocateSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.store = &jujuclienttesting.MemStore{ - Controllers: map[string]jujuclient.ControllerDetails{ - "controller": {}, - }, - Models: map[string]*jujuclient.ControllerModels{ - "controller": { - Models: map[string]jujuclient.ModelDetails{ - "model": {"model-uuid"}, - }, - CurrentModel: "model", - }, - }, - Accounts: map[string]jujuclient.AccountDetails{ - "controller": { - User: "admin@local", - }, - }, - } - s.stub = &testing.Stub{} - s.mockAPI = newMockAPI(s.stub) -} - -func (s *allocateSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { - alloc := allocate.NewAllocateCommandForTest(s.mockAPI, s.store) - a := []string{"-m", "controller:model"} - a = append(a, args...) - return cmdtesting.RunCommand(c, alloc, a...) -} - -func (s *allocateSuite) TestAllocate(c *gc.C) { - s.mockAPI.resp = "allocation updated" - ctx, err := s.run(c, "name:100", "db") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "allocation updated\n") - s.mockAPI.CheckCall(c, 0, "CreateAllocation", "name", "100", "model-uuid", []string{"db"}) -} - -func (s *allocateSuite) TestAllocateAPIError(c *gc.C) { - s.stub.SetErrors(errors.New("something failed")) - _, err := s.run(c, "name:100", "db") - c.Assert(err, gc.ErrorMatches, "failed to create allocation: something failed") - s.mockAPI.CheckCall(c, 0, "CreateAllocation", "name", "100", "model-uuid", []string{"db"}) -} - -func (s *allocateSuite) TestAllocateZero(c *gc.C) { - s.mockAPI.resp = "allocation updated" - _, err := s.run(c, "name:0", "db") - c.Assert(err, jc.ErrorIsNil) - s.mockAPI.CheckCall(c, 0, "CreateAllocation", "name", "0", "model-uuid", []string{"db"}) -} - -func (s *allocateSuite) TestAllocateModelUUID(c *gc.C) { - s.mockAPI.resp = "allocation updated" - _, err := s.run(c, "name:0", "--model-uuid", "30f7a9f2-220d-4268-b336-35e7daacae79", "db") - c.Assert(err, jc.ErrorIsNil) - s.mockAPI.CheckCall(c, 0, "CreateAllocation", "name", "0", "30f7a9f2-220d-4268-b336-35e7daacae79", []string{"db"}) -} - -func (s *allocateSuite) TestAllocateErrors(c *gc.C) { - tests := []struct { - about string - args []string - expectedError string - }{{ - about: "no args", - args: []string{}, - expectedError: "budget and application name required", - }, { - about: "budget without allocation limit", - args: []string{"name", "db"}, - expectedError: `expected args in the form "budget:limit \[application ...\]": invalid budget specification, expecting :`, - }, { - about: "application not specified", - args: []string{"name:100"}, - expectedError: "budget and application name required", - }, { - about: "negative allocation limit", - args: []string{"name:-100", "db"}, - expectedError: `expected args in the form "budget:limit \[application ...\]": invalid budget specification, expecting :`, - }, { - about: "non-numeric allocation limit", - args: []string{"name:abcd", "db"}, - expectedError: `expected args in the form "budget:limit \[application ...\]": invalid budget specification, expecting :`, - }, { - about: "empty allocation limit", - args: []string{"name:", "db"}, - expectedError: `expected args in the form "budget:limit \[application ...\]": invalid budget specification, expecting :`, - }, { - about: "invalid model UUID", - args: []string{"--model-uuid", "nope", "name:100", "db"}, - expectedError: `model UUID "nope" not valid`, - }, { - about: "arguments in wrong order", - args: []string{"name:", "db:50"}, - expectedError: `expected args in the form "budget:limit \[application ...\]": invalid budget specification, expecting :`, - }} - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - _, err := s.run(c, test.args...) - c.Check(err, gc.ErrorMatches, test.expectedError) - s.mockAPI.CheckNoCalls(c) - } -} - -func newMockAPI(s *testing.Stub) *mockapi { - return &mockapi{Stub: s} -} - -type mockapi struct { - *testing.Stub - resp string -} - -func (api *mockapi) CreateAllocation(name, limit, modelUUID string, services []string) (string, error) { - api.MethodCall(api, "CreateAllocation", name, limit, modelUUID, services) - return api.resp, api.NextErr() -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/allocate/export_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/allocate/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/allocate/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/allocate/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package allocate - -import ( - "github.com/juju/cmd" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/juju/jujuclient" -) - -func NewAllocateCommandForTest(api apiClient, store jujuclient.ClientStore) cmd.Command { - c := &allocateCommand{api: api} - c.SetClientStore(store) - return modelcmd.Wrap(c) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/allocate/package_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/allocate/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/allocate/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/allocate/package_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package allocate_test - -import ( - stdtesting "testing" - - gc "gopkg.in/check.v1" -) - -func TestAll(t *stdtesting.T) { - gc.TestingT(t) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/commands/commands.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/commands/commands.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/commands/commands.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/commands/commands.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// Package commands provides functionality for registering all the romulus commands. -package commands - -import ( - "github.com/juju/cmd" - "github.com/juju/juju/cmd/modelcmd" - - "github.com/juju/romulus/cmd/agree" - "github.com/juju/romulus/cmd/allocate" - "github.com/juju/romulus/cmd/createbudget" - "github.com/juju/romulus/cmd/listagreements" - "github.com/juju/romulus/cmd/listbudgets" - "github.com/juju/romulus/cmd/listplans" - "github.com/juju/romulus/cmd/setbudget" - "github.com/juju/romulus/cmd/setplan" - "github.com/juju/romulus/cmd/showbudget" - "github.com/juju/romulus/cmd/updateallocation" -) - -type commandRegister interface { - Register(cmd.Command) -} - -// RegisterAll registers all romulus commands with the -// provided command registry. -func RegisterAll(r commandRegister) { - register := func(c cmd.Command) { - switch c := c.(type) { - case modelcmd.ModelCommand: - r.Register(modelcmd.Wrap(c)) - case modelcmd.CommandBase: - r.Register(modelcmd.WrapBase(c)) - default: - r.Register(c) - } - - } - register(agree.NewAgreeCommand()) - register(listagreements.NewListAgreementsCommand()) - register(allocate.NewAllocateCommand()) - register(listbudgets.NewListBudgetsCommand()) - register(createbudget.NewCreateBudgetCommand()) - register(listplans.NewListPlansCommand()) - register(setbudget.NewSetBudgetCommand()) - register(setplan.NewSetPlanCommand()) - register(showbudget.NewShowBudgetCommand()) - register(updateallocation.NewUpdateAllocationCommand()) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/commands/commands_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/commands/commands_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/commands/commands_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/commands/commands_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,46 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package commands_test - -import ( - stdtesting "testing" - - "github.com/juju/cmd" - gc "gopkg.in/check.v1" - - "github.com/juju/romulus/cmd/commands" -) - -type commandSuite struct{} - -var _ = gc.Suite(&commandSuite{}) - -type mockRegister struct { - commands []string -} - -func (m *mockRegister) Register(command cmd.Command) { - m.commands = append(m.commands, command.Info().Name) -} - -func TestAll(t *stdtesting.T) { - gc.TestingT(t) -} - -func (s *commandSuite) TestRegister(c *gc.C) { - m := &mockRegister{} - commands.RegisterAll(m) - c.Assert(m.commands, gc.DeepEquals, []string{ - "agree", - "agreements", - "allocate", - "budgets", - "create-budget", - "plans", - "set-budget", - "set-plan", - "show-budget", - "update-allocation", - }) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/createbudget/createbudget.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/createbudget/createbudget.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/createbudget/createbudget.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/createbudget/createbudget.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,85 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package createbudget - -import ( - "fmt" - "strconv" - - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/juju/cmd/modelcmd" - "gopkg.in/macaroon-bakery.v1/httpbakery" - - api "github.com/juju/romulus/api/budget" -) - -type createBudgetCommand struct { - modelcmd.JujuCommandBase - Name string - Value string -} - -// NewCreateBudgetCommand returns a new createBudgetCommand -func NewCreateBudgetCommand() cmd.Command { - return &createBudgetCommand{} -} - -const doc = ` -Create a new budget with monthly limit. - -Examples: - # Creates a budget named 'qa' with a limit of 42. - juju create-budget qa 42 -` - -// Info implements cmd.Command.Info. -func (c *createBudgetCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "create-budget", - Purpose: "Create a new budget.", - Doc: doc, - } -} - -// Init implements cmd.Command.Init. -func (c *createBudgetCommand) Init(args []string) error { - if len(args) < 2 { - return errors.New("name and value required") - } - c.Name, c.Value = args[0], args[1] - if _, err := strconv.ParseInt(c.Value, 10, 32); err != nil { - return errors.New("budget value needs to be a whole number") - } - return cmd.CheckEmpty(args[2:]) -} - -// Run implements cmd.Command.Run and has most of the logic for the run command. -func (c *createBudgetCommand) Run(ctx *cmd.Context) error { - client, err := c.BakeryClient() - if err != nil { - return errors.Annotate(err, "failed to create an http client") - } - api, err := newAPIClient(client) - if err != nil { - return errors.Annotate(err, "failed to create an api client") - } - resp, err := api.CreateBudget(c.Name, c.Value) - if err != nil { - return errors.Annotate(err, "failed to create the budget") - } - fmt.Fprintln(ctx.Stdout, resp) - return nil -} - -var newAPIClient = newAPIClientImpl - -func newAPIClientImpl(c *httpbakery.Client) (apiClient, error) { - client := api.NewClient(c) - return client, nil -} - -type apiClient interface { - CreateBudget(name string, limit string) (string, error) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/createbudget/createbudget_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/createbudget/createbudget_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/createbudget/createbudget_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/createbudget/createbudget_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package createbudget_test - -import ( - "github.com/juju/cmd/cmdtesting" - "github.com/juju/errors" - coretesting "github.com/juju/juju/testing" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/romulus/cmd/createbudget" -) - -var _ = gc.Suite(&createBudgetSuite{}) - -type createBudgetSuite struct { - coretesting.FakeJujuXDGDataHomeSuite - stub *testing.Stub - mockAPI *mockapi -} - -func (s *createBudgetSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.stub = &testing.Stub{} - s.mockAPI = newMockAPI(s.stub) - s.PatchValue(createbudget.NewAPIClient, createbudget.APIClientFnc(s.mockAPI)) -} - -func (s *createBudgetSuite) TestCreateBudget(c *gc.C) { - s.mockAPI.resp = "name budget set to 5" - createCmd := createbudget.NewCreateBudgetCommand() - ctx, err := cmdtesting.RunCommand(c, createCmd, "name", "5") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "name budget set to 5\n") - s.mockAPI.CheckCall(c, 0, "CreateBudget", "name", "5") -} - -func (s *createBudgetSuite) TestCreateBudgetAPIError(c *gc.C) { - s.mockAPI.SetErrors(errors.New("something failed")) - createCmd := createbudget.NewCreateBudgetCommand() - _, err := cmdtesting.RunCommand(c, createCmd, "name", "5") - c.Assert(err, gc.ErrorMatches, "failed to create the budget: something failed") - s.mockAPI.CheckCall(c, 0, "CreateBudget", "name", "5") -} - -func (s *createBudgetSuite) TestCreateBudgetErrors(c *gc.C) { - tests := []struct { - about string - args []string - expectedError string - }{ - { - about: "test value needs to be a number", - args: []string{"name", "badvalue"}, - expectedError: "budget value needs to be a whole number", - }, - { - about: "value is missing", - args: []string{"name"}, - expectedError: "name and value required", - }, - { - about: "no args", - args: []string{}, - expectedError: "name and value required", - }, - } - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - if test.expectedError != "" { - s.mockAPI.SetErrors(errors.New(test.expectedError)) - } - createCmd := createbudget.NewCreateBudgetCommand() - _, err := cmdtesting.RunCommand(c, createCmd, test.args...) - c.Assert(err, gc.ErrorMatches, test.expectedError) - s.mockAPI.CheckNoCalls(c) - } -} - -func newMockAPI(s *testing.Stub) *mockapi { - return &mockapi{Stub: s} -} - -type mockapi struct { - *testing.Stub - resp string -} - -func (api *mockapi) CreateBudget(name, value string) (string, error) { - api.MethodCall(api, "CreateBudget", name, value) - return api.resp, api.NextErr() -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/createbudget/export_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/createbudget/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/createbudget/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/createbudget/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package createbudget - -import ( - "gopkg.in/macaroon-bakery.v1/httpbakery" -) - -var ( - NewAPIClient = &newAPIClient -) - -func APIClientFnc(api apiClient) func(*httpbakery.Client) (apiClient, error) { - return func(*httpbakery.Client) (apiClient, error) { - return api, nil - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/createbudget/package_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/createbudget/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/createbudget/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/createbudget/package_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package createbudget_test - -import ( - stdtesting "testing" - - gc "gopkg.in/check.v1" -) - -func TestAll(t *stdtesting.T) { - gc.TestingT(t) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listagreements/export_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listagreements/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listagreements/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listagreements/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package listagreements - -var ( - NewClient = &newClient -) diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listagreements/listagreements.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listagreements/listagreements.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listagreements/listagreements.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listagreements/listagreements.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package listagreements - -import ( - "encoding/json" - - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/terms-client/api" - "github.com/juju/terms-client/api/wireformat" - "gopkg.in/macaroon-bakery.v1/httpbakery" - "launchpad.net/gnuflag" -) - -var ( - newClient = func(client *httpbakery.Client) (TermsServiceClient, error) { - return api.NewClient(api.HTTPClient(client)) - } -) - -// TermsServiceClient defines methods needed for the Terms Service CLI -// commands. -type TermsServiceClient interface { - GetUsersAgreements() ([]wireformat.AgreementResponse, error) -} - -const listAgreementsDoc = ` -List terms the user has agreed to. -` - -// NewListAgreementsCommand returns a new command that can be -// used to list agreements a user has made. -func NewListAgreementsCommand() *listAgreementsCommand { - return &listAgreementsCommand{} -} - -type term struct { - name string - revision int -} - -var _ cmd.Command = (*listAgreementsCommand)(nil) - -// listAgreementsCommand creates a user agreement to the specified -// Terms and Conditions document. -type listAgreementsCommand struct { - modelcmd.JujuCommandBase - out cmd.Output -} - -// SetFlags implements Command.SetFlags. -func (c *listAgreementsCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "json", map[string]cmd.Formatter{ - "json": formatJSON, - "yaml": cmd.FormatYaml, - }) -} - -// Info implements Command.Info. -func (c *listAgreementsCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "agreements", - Purpose: "List user's agreements.", - Doc: listAgreementsDoc, - Aliases: []string{"list-agreements"}, - } -} - -// Run implements Command.Run. -func (c *listAgreementsCommand) Run(ctx *cmd.Context) error { - client, err := c.BakeryClient() - if err != nil { - return errors.Annotate(err, "failed to create an http client") - } - - apiClient, err := newClient(client) - if err != nil { - return errors.Annotate(err, "failed to create a terms API client") - } - - agreements, err := apiClient.GetUsersAgreements() - if err != nil { - return errors.Annotate(err, "failed to list user agreements") - } - if agreements == nil { - agreements = []wireformat.AgreementResponse{} - } - err = c.out.Write(ctx, agreements) - if err != nil { - return errors.Mask(err) - } - return nil -} - -func formatJSON(value interface{}) ([]byte, error) { - return json.MarshalIndent(value, "", " ") -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listagreements/listagreements_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listagreements/listagreements_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listagreements/listagreements_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listagreements/listagreements_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,153 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package listagreements_test - -import ( - "errors" - "time" - - "github.com/juju/cmd/cmdtesting" - coretesting "github.com/juju/juju/testing" - "github.com/juju/terms-client/api/wireformat" - jujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/macaroon-bakery.v1/httpbakery" - - "github.com/juju/romulus/cmd/listagreements" -) - -var _ = gc.Suite(&listAgreementsSuite{}) - -var testTermsAndConditions = "Test Terms and Conditions" - -type listAgreementsSuite struct { - coretesting.FakeJujuXDGDataHomeSuite - client *mockClient -} - -func (s *listAgreementsSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.client = &mockClient{} - - jujutesting.PatchValue(listagreements.NewClient, func(_ *httpbakery.Client) (listagreements.TermsServiceClient, error) { - return s.client, nil - }) -} - -const ( - expectedListAgreementsJSONOutput = `[ - { - "user": "test-user", - "term": "test-term", - "revision": 1, - "created-on": "2015-12-25T00:00:00Z" - } -] -` - expectedListAgreementsJSONOutputWithOwner = `[ - { - "user": "test-user", - "owner": "owner", - "term": "test-term", - "revision": 1, - "created-on": "2015-12-25T00:00:00Z" - } -] -` -) - -func (s *listAgreementsSuite) TestGetUsersAgreements(c *gc.C) { - ctx, err := cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, `[] -`) - c.Assert(s.client.called, jc.IsTrue) - - s.client.setError("well, this is embarassing") - ctx, err = cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand()) - c.Assert(err, gc.ErrorMatches, "failed to list user agreements: well, this is embarassing") - c.Assert(s.client.called, jc.IsTrue) - - agreements := []wireformat.AgreementResponse{{ - User: "test-user", - Term: "test-term", - Revision: 1, - CreatedOn: time.Date(2015, 12, 25, 0, 0, 0, 0, time.UTC), - }} - s.client.setAgreements(agreements) - - ctx, err = cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(ctx, gc.NotNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, expectedListAgreementsJSONOutput) - c.Assert(s.client.called, jc.IsTrue) - - ctx, err = cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand(), "--format", "yaml") - c.Assert(err, jc.ErrorIsNil) - c.Assert(ctx, gc.NotNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, "- user: test-user\n term: test-term\n revision: 1\n createdon: 2015-12-25T00:00:00Z\n") - c.Assert(s.client.called, jc.IsTrue) -} - -func (s *listAgreementsSuite) TestGetUsersAgreementsWithTermOwner(c *gc.C) { - ctx, err := cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, `[] -`) - c.Assert(s.client.called, jc.IsTrue) - - s.client.setError("well, this is embarassing") - ctx, err = cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand()) - c.Assert(err, gc.ErrorMatches, "failed to list user agreements: well, this is embarassing") - c.Assert(s.client.called, jc.IsTrue) - - agreements := []wireformat.AgreementResponse{{ - User: "test-user", - Owner: "owner", - Term: "test-term", - Revision: 1, - CreatedOn: time.Date(2015, 12, 25, 0, 0, 0, 0, time.UTC), - }} - s.client.setAgreements(agreements) - - ctx, err = cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(ctx, gc.NotNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, expectedListAgreementsJSONOutputWithOwner) - c.Assert(s.client.called, jc.IsTrue) - - ctx, err = cmdtesting.RunCommand(c, listagreements.NewListAgreementsCommand(), "--format", "yaml") - c.Assert(err, jc.ErrorIsNil) - c.Assert(ctx, gc.NotNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, "- user: test-user\n owner: owner\n term: test-term\n revision: 1\n createdon: 2015-12-25T00:00:00Z\n") - c.Assert(s.client.called, jc.IsTrue) -} - -type mockClient struct { - called bool - - agreements []wireformat.AgreementResponse - err string -} - -func (c *mockClient) setAgreements(agreements []wireformat.AgreementResponse) { - c.agreements = agreements - c.called = false - c.err = "" -} - -func (c *mockClient) setError(err string) { - c.err = err - c.called = false - c.agreements = nil -} - -func (c *mockClient) GetUsersAgreements() ([]wireformat.AgreementResponse, error) { - c.called = true - if c.err != "" { - return nil, errors.New(c.err) - } - return c.agreements, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listagreements/package_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listagreements/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listagreements/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listagreements/package_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package listagreements_test - -import ( - stdtesting "testing" - - gc "gopkg.in/check.v1" -) - -func TestAll(t *stdtesting.T) { - gc.TestingT(t) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listbudgets/export_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listbudgets/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listbudgets/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listbudgets/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package listbudgets - -import ( - "gopkg.in/macaroon-bakery.v1/httpbakery" -) - -var ( - NewAPIClient = &newAPIClient -) - -// APIClientFnc returns a function that returns the provided apiClient -// and can be used to patch the NewAPIClient variable for tests. -func APIClientFnc(api apiClient) func(*httpbakery.Client) (apiClient, error) { - return func(*httpbakery.Client) (apiClient, error) { - return api, nil - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listbudgets/list-budgets.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listbudgets/list-budgets.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listbudgets/list-budgets.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listbudgets/list-budgets.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,115 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package listbudgets - -import ( - "sort" - - "github.com/gosuri/uitable" - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/juju/cmd/modelcmd" - "gopkg.in/macaroon-bakery.v1/httpbakery" - "launchpad.net/gnuflag" - - api "github.com/juju/romulus/api/budget" - wireformat "github.com/juju/romulus/wireformat/budget" -) - -// NewListBudgetsCommand returns a new command that is used -// to list budgets a user has access to. -func NewListBudgetsCommand() modelcmd.CommandBase { - return &listBudgetsCommand{} -} - -type listBudgetsCommand struct { - modelcmd.JujuCommandBase - - out cmd.Output -} - -const listBudgetsDoc = ` -List the available budgets. - -Examples: - juju budgets -` - -// Info implements cmd.Command.Info. -func (c *listBudgetsCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "budgets", - Purpose: "List budgets.", - Doc: listBudgetsDoc, - Aliases: []string{"list-budgets"}, - } -} - -// SetFlags implements cmd.Command.SetFlags. -func (c *listBudgetsCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ - "tabular": formatTabular, - "json": cmd.FormatJson, - }) -} - -func (c *listBudgetsCommand) Run(ctx *cmd.Context) error { - client, err := c.BakeryClient() - if err != nil { - return errors.Annotate(err, "failed to create an http client") - } - api, err := newAPIClient(client) - if err != nil { - return errors.Annotate(err, "failed to create an api client") - } - budgets, err := api.ListBudgets() - if err != nil { - return errors.Annotate(err, "failed to retrieve budgets") - } - if budgets == nil { - return errors.New("no budget information available") - } - err = c.out.Write(ctx, budgets) - if err != nil { - return errors.Trace(err) - } - return nil -} - -// formatTabular returns a tabular view of available budgets. -func formatTabular(value interface{}) ([]byte, error) { - b, ok := value.(*wireformat.ListBudgetsResponse) - if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", b, value) - } - sort.Sort(b.Budgets) - - table := uitable.New() - table.MaxColWidth = 50 - table.Wrap = true - for _, col := range []int{1, 2, 3, 4} { - table.RightAlign(col) - } - - table.AddRow("BUDGET", "MONTHLY", "ALLOCATED", "AVAILABLE", "SPENT") - for _, budgetEntry := range b.Budgets { - table.AddRow(budgetEntry.Budget, budgetEntry.Limit, budgetEntry.Allocated, budgetEntry.Available, budgetEntry.Consumed) - } - table.AddRow("TOTAL", b.Total.Limit, b.Total.Allocated, b.Total.Available, b.Total.Consumed) - table.AddRow("", "", "", "", "") - table.AddRow("Credit limit:", b.Credit, "", "", "") - return []byte(table.String()), nil -} - -var newAPIClient = newAPIClientImpl - -func newAPIClientImpl(c *httpbakery.Client) (apiClient, error) { - client := api.NewClient(c) - return client, nil -} - -type apiClient interface { - // ListBudgets returns a list of budgets a user has access to. - ListBudgets() (*wireformat.ListBudgetsResponse, error) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listbudgets/list-budgets_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listbudgets/list-budgets_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listbudgets/list-budgets_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listbudgets/list-budgets_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,150 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package listbudgets_test - -import ( - "github.com/juju/cmd/cmdtesting" - "github.com/juju/errors" - coretesting "github.com/juju/juju/testing" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/romulus/cmd/listbudgets" - "github.com/juju/romulus/wireformat/budget" -) - -var _ = gc.Suite(&listBudgetsSuite{}) - -type listBudgetsSuite struct { - coretesting.FakeJujuXDGDataHomeSuite - stub *testing.Stub - mockAPI *mockapi -} - -func (s *listBudgetsSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.stub = &testing.Stub{} - s.mockAPI = &mockapi{Stub: s.stub} - s.PatchValue(listbudgets.NewAPIClient, listbudgets.APIClientFnc(s.mockAPI)) -} - -func (s *listBudgetsSuite) TestUnexpectedParameters(c *gc.C) { - listBudgets := listbudgets.NewListBudgetsCommand() - _, err := cmdtesting.RunCommand(c, listBudgets, "unexpected") - c.Assert(err, gc.ErrorMatches, `unrecognized args: \["unexpected"\]`) -} - -func (s *listBudgetsSuite) TestAPIError(c *gc.C) { - s.mockAPI.SetErrors(errors.New("well, this is embarrassing")) - listBudgets := listbudgets.NewListBudgetsCommand() - _, err := cmdtesting.RunCommand(c, listBudgets) - c.Assert(err, gc.ErrorMatches, "failed to retrieve budgets: well, this is embarrassing") -} - -func (s *listBudgetsSuite) TestListBudgetsOutput(c *gc.C) { - s.mockAPI.result = &budget.ListBudgetsResponse{ - Budgets: budget.BudgetSummaries{ - budget.BudgetSummary{ - Owner: "bob", - Budget: "personal", - Limit: "50", - Allocated: "30", - Unallocated: "20", - Available: "45", - Consumed: "5", - }, - budget.BudgetSummary{ - Owner: "bob", - Budget: "work", - Limit: "200", - Allocated: "100", - Unallocated: "100", - Available: "150", - Consumed: "50", - }, - budget.BudgetSummary{ - Owner: "bob", - Budget: "team", - Limit: "50", - Allocated: "10", - Unallocated: "40", - Available: "40", - Consumed: "10", - }, - }, - Total: budget.BudgetTotals{ - Limit: "300", - Allocated: "140", - Available: "235", - Unallocated: "160", - Consumed: "65", - }, - Credit: "400", - } - // Expected command output. Make sure budgets are sorted alphabetically. - expected := "" + - "BUDGET \tMONTHLY\tALLOCATED\tAVAILABLE\tSPENT\n" + - "personal \t 50\t 30\t 45\t 5\n" + - "team \t 50\t 10\t 40\t 10\n" + - "work \t 200\t 100\t 150\t 50\n" + - "TOTAL \t 300\t 140\t 235\t 65\n" + - " \t \t \t \t \n" + - "Credit limit:\t 400\t \t \t \n" - - listBudgets := listbudgets.NewListBudgetsCommand() - - ctx, err := cmdtesting.RunCommand(c, listBudgets) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, expected) - s.mockAPI.CheckCallNames(c, "ListBudgets") -} - -func (s *listBudgetsSuite) TestListBudgetsOutputNoBudgets(c *gc.C) { - s.mockAPI.result = &budget.ListBudgetsResponse{ - Budgets: budget.BudgetSummaries{}, - Total: budget.BudgetTotals{ - Limit: "0", - Allocated: "0", - Available: "0", - Unallocated: "0", - Consumed: "0", - }, - Credit: "0", - } - expected := "" + - "BUDGET \tMONTHLY\tALLOCATED\tAVAILABLE\tSPENT\n" + - "TOTAL \t 0\t 0\t 0\t 0\n" + - " \t \t \t \t \n" + - "Credit limit:\t 0\t \t \t \n" - - listBudgets := listbudgets.NewListBudgetsCommand() - - ctx, err := cmdtesting.RunCommand(c, listBudgets) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, expected) - s.mockAPI.CheckCallNames(c, "ListBudgets") -} - -func (s *listBudgetsSuite) TestListBudgetsNoOutput(c *gc.C) { - listBudgets := listbudgets.NewListBudgetsCommand() - - ctx, err := cmdtesting.RunCommand(c, listBudgets) - c.Assert(err, gc.ErrorMatches, `no budget information available`) - c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, ``) - s.mockAPI.CheckCallNames(c, "ListBudgets") -} - -type mockapi struct { - *testing.Stub - result *budget.ListBudgetsResponse -} - -func (api *mockapi) ListBudgets() (*budget.ListBudgetsResponse, error) { - api.AddCall("ListBudgets") - if err := api.NextErr(); err != nil { - return nil, err - } - return api.result, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listbudgets/package_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listbudgets/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listbudgets/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listbudgets/package_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package listbudgets_test - -import ( - stdtesting "testing" - - gc "gopkg.in/check.v1" -) - -func TestAll(t *stdtesting.T) { - gc.TestingT(t) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listplans/export_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listplans/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listplans/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listplans/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package listplans - -import ( - "gopkg.in/macaroon-bakery.v1/httpbakery" -) - -var ( - NewClient = &newClient -) - -// APIClientFnc returns a function that returns the provided apiClient -// and can be used to patch the NewAPIClient variable for tests. -func APIClientFnc(api apiClient) func(client *httpbakery.Client) (apiClient, error) { - return func(*httpbakery.Client) (apiClient, error) { - return api, nil - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listplans/list_plans.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listplans/list_plans.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listplans/list_plans.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listplans/list_plans.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,222 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The listplans package contains implementation of the command that -// can be used to list plans that are available for a charm. -package listplans - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "strings" - "text/tabwriter" - - "github.com/gosuri/uitable" - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/juju/cmd/modelcmd" - "gopkg.in/macaroon-bakery.v1/httpbakery" - "gopkg.in/yaml.v2" - "launchpad.net/gnuflag" - - api "github.com/juju/romulus/api/plan" - rcmd "github.com/juju/romulus/cmd" - wireformat "github.com/juju/romulus/wireformat/plan" -) - -// apiClient defines the interface of the plan api client need by this command. -type apiClient interface { - // GetAssociatedPlans returns the plans associated with the charm. - GetAssociatedPlans(charmURL string) ([]wireformat.Plan, error) -} - -var newClient = func(client *httpbakery.Client) (apiClient, error) { - return api.NewClient(api.HTTPClient(client)) -} - -const listPlansDoc = ` -List plans available for the specified charm. - -Examples: - juju plans cs:webapp -` - -// ListPlansCommand retrieves plans that are available for the specified charm -type ListPlansCommand struct { - modelcmd.JujuCommandBase - - out cmd.Output - CharmURL string - - CharmResolver rcmd.CharmResolver -} - -// NewListPlansCommand creates a new ListPlansCommand. -func NewListPlansCommand() modelcmd.CommandBase { - return &ListPlansCommand{ - CharmResolver: rcmd.NewCharmStoreResolver(), - } -} - -// Info implements Command.Info. -func (c *ListPlansCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "plans", - Args: "", - Purpose: "List plans.", - Doc: listPlansDoc, - Aliases: []string{"list-plans"}, - } -} - -// Init reads and verifies the cli arguments for the ListPlansCommand -func (c *ListPlansCommand) Init(args []string) error { - if len(args) == 0 { - return errors.New("missing arguments") - } - charmURL, args := args[0], args[1:] - if err := cmd.CheckEmpty(args); err != nil { - return errors.Errorf("unknown command line arguments: " + strings.Join(args, ",")) - } - c.CharmURL = charmURL - return nil -} - -// SetFlags implements Command.SetFlags. -func (c *ListPlansCommand) SetFlags(f *gnuflag.FlagSet) { - c.JujuCommandBase.SetFlags(f) - defaultFormat := "tabular" - c.out.AddFlags(f, defaultFormat, map[string]cmd.Formatter{ - "yaml": cmd.FormatYaml, - "json": cmd.FormatJson, - "smart": cmd.FormatSmart, - "summary": formatSummary, - "tabular": formatTabular, - }) -} - -// Run implements Command.Run. -// Retrieves the plan from the plans service. The set of plans to be -// retrieved can be limited using the plan and isv flags. -func (c *ListPlansCommand) Run(ctx *cmd.Context) (rErr error) { - client, err := c.BakeryClient() - if err != nil { - return errors.Annotate(err, "failed to create an http client") - } - - resolvedUrl, err := c.CharmResolver.Resolve(client.VisitWebPage, client.Client, c.CharmURL) - if err != nil { - return errors.Annotatef(err, "failed to resolve charmURL %v", c.CharmURL) - } - c.CharmURL = resolvedUrl - - apiClient, err := newClient(client) - if err != nil { - return errors.Annotate(err, "failed to create a plan API client") - } - - plans, err := apiClient.GetAssociatedPlans(c.CharmURL) - if err != nil { - return errors.Annotate(err, "failed to retrieve plans") - } - - output := make([]plan, len(plans)) - for i, p := range plans { - outputPlan := plan{ - URL: p.URL, - } - def, err := readPlan(bytes.NewBufferString(p.Definition)) - if err != nil { - return errors.Annotate(err, "failed to parse plan definition") - } - if def.Description != nil { - outputPlan.Price = def.Description.Price - outputPlan.Description = def.Description.Text - } - output[i] = outputPlan - } - err = c.out.Write(ctx, output) - if err != nil { - return errors.Trace(err) - } - - return nil -} - -type plan struct { - URL string `json:"plan" yaml:"plan"` - Price string `json:"price" yaml:"price"` - Description string `json:"description" yaml:"description"` -} - -// formatSummary returns a summary of available plans. -func formatSummary(value interface{}) ([]byte, error) { - plans, ok := value.([]plan) - if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", plans, value) - } - var out bytes.Buffer - tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) - p := func(values ...interface{}) { - for _, v := range values { - fmt.Fprintf(tw, "%s\t", v) - } - fmt.Fprintln(tw) - } - p("PLAN", "PRICE") - for _, plan := range plans { - p(plan.URL, plan.Price) - } - err := tw.Flush() - if err != nil { - return nil, errors.Trace(err) - } - - return out.Bytes(), nil -} - -// formatTabular returns a tabular summary of available plans. -func formatTabular(value interface{}) ([]byte, error) { - plans, ok := value.([]plan) - if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", plans, value) - } - - table := uitable.New() - table.MaxColWidth = 50 - table.Wrap = true - - table.AddRow("PLAN", "PRICE", "DESCRIPTION") - for _, plan := range plans { - table.AddRow(plan.URL, plan.Price, plan.Description) - } - - return []byte(table.String()), nil -} - -type planModel struct { - Description *descriptionModel `json:"description,omitempty"` -} - -// descriptionModel provides a human readable description of the plan. -type descriptionModel struct { - Price string `json:"price,omitempty"` - Text string `json:"text,omitempty"` -} - -// readPlan reads, parses and returns a planModel struct representation. -func readPlan(r io.Reader) (plan *planModel, err error) { - data, err := ioutil.ReadAll(r) - if err != nil { - return - } - - var doc planModel - err = yaml.Unmarshal(data, &doc) - if err != nil { - return - } - return &doc, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listplans/list_plans_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listplans/list_plans_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listplans/list_plans_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listplans/list_plans_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,237 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package listplans_test - -import ( - "net/http" - "net/url" - "time" - - "github.com/juju/cmd/cmdtesting" - coretesting "github.com/juju/juju/testing" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - api "github.com/juju/romulus/api/plan" - "github.com/juju/romulus/cmd/listplans" - wireformat "github.com/juju/romulus/wireformat/plan" -) - -var ( - testPlan1 = ` - description: - text: | - Lorem ipsum dolor sit amet, - consectetur adipiscing elit. - Nunc pretium purus nec magna faucibus, sed eleifend dui fermentum. Nulla nec ornare lorem, sed imperdiet turpis. Nam auctor quis massa et commodo. Maecenas in magna erat. Duis non iaculis risus, a malesuada quam. Sed quis commodo sapien. Suspendisse laoreet diam eu interdum tristique. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. - Donec eu nunc quis eros fermentum porta non ut justo. Donec ut tempus sapien. Suspendisse bibendum fermentum eros, id feugiat justo elementum quis. Quisque vel volutpat risus. Aenean pellentesque ultrices consequat. Maecenas luctus, augue vitae ullamcorper vulputate, purus ligula accumsan diam, ut efficitur diam tellus ac nibh. Cras eros ligula, mattis in ex quis, porta efficitur quam. Donec porta, est ut interdum blandit, enim est elementum sapien, quis congue orci dui et nulla. Maecenas vehicula malesuada vehicula. Phasellus sapien ante, semper eu ornare sed, vulputate id nunc. Maecenas in orci mollis, sagittis lorem quis, ultrices metus. Integer molestie tempor augue, pulvinar blandit sapien ultricies eget. - Fusce sed tellus sit amet tortor mollis pellentesque. Nulla tempus sem tellus, vitae tempor ipsum scelerisque eu. Cras tempor, tellus nec pretium egestas, felis massa luctus velit, vitae feugiat nunc velit ac tellus. Maecenas quis nisi diam. Sed pulvinar suscipit nibh sit amet cursus. Ut sem orci, consequat id pretium id, lacinia id nisl. Maecenas id quam at nisi eleifend porta. Vestibulum at ligula arcu. Quisque tincidunt pulvinar egestas. Ut suscipit ornare ligula a fermentum. Morbi ante justo, condimentum ut risus vitae, molestie elementum elit. Curabitur malesuada commodo diam sed ultrices. Vestibulum tincidunt turpis at ultricies fermentum. Morbi ipsum felis, laoreet quis risus id, ornare elementum urna. Morbi ultrices porttitor pulvinar. Maecenas facilisis velit sit amet tellus feugiat iaculis. - metrics: - pings: - unit: - transform: max - period: hour - gaps: zero -` - testPlan2 = ` - metrics: - pongs: - unit: - transform: max - period: hour - gaps: zero -` -) - -type ListPlansCommandSuite struct { - coretesting.FakeJujuXDGDataHomeSuite - mockAPI *mockapi - stub *testing.Stub -} - -var _ = gc.Suite(&ListPlansCommandSuite{}) - -func (s *ListPlansCommandSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.stub = &testing.Stub{} - s.mockAPI = newMockAPI(s.stub) - s.PatchValue(listplans.NewClient, listplans.APIClientFnc(s.mockAPI)) -} - -func (s *ListPlansCommandSuite) TestTabularOutput(c *gc.C) { - listPlans := &listplans.ListPlansCommand{ - CharmResolver: &mockCharmResolver{ - ResolvedURL: "series/some-charm-url", - Stub: s.stub, - }, - } - ctx, err := cmdtesting.RunCommand(c, listPlans, "some-charm-url") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), gc.Equals, - `PLAN PRICE DESCRIPTION -bob/test-plan-1 Lorem ipsum dolor sit amet, - consectetur adipiscing elit. - Nunc pretium purus nec magna faucibus, sed - eleifend dui fermentum. Nulla nec ornare lorem, - sed imperdiet turpis. Nam auctor quis massa et - commodo. Maecenas in magna erat. Duis non iaculis - risus, a malesuada quam. Sed quis commodo sapien. - Suspendisse laoreet diam eu interdum tristique. - Class aptent taciti sociosqu ad litora torquent - per conubia nostra, per inceptos himenaeos. - Donec eu nunc quis eros fermentum porta non ut - justo. Donec ut tempus sapien. Suspendisse - bibendum fermentum eros, id feugiat justo - elementum quis. Quisque vel volutpat risus. Aenean - pellentesque ultrices consequat. Maecenas luctus, - augue vitae ullamcorper vulputate, purus ligula - accumsan diam, ut efficitur diam tellus ac nibh. - Cras eros ligula, mattis in ex quis, porta - efficitur quam. Donec porta, est ut interdum - blandit, enim est elementum sapien, quis congue - orci dui et nulla. Maecenas vehicula malesuada - vehicula. Phasellus sapien ante, semper eu ornare - sed, vulputate id nunc. Maecenas in orci mollis, - sagittis lorem quis, ultrices metus. Integer - molestie tempor augue, pulvinar blandit sapien - ultricies eget. - Fusce sed tellus sit amet tortor mollis - pellentesque. Nulla tempus sem tellus, vitae - tempor ipsum scelerisque eu. Cras tempor, tellus - nec pretium egestas, felis massa luctus velit, - vitae feugiat nunc velit ac tellus. Maecenas quis - nisi diam. Sed pulvinar suscipit nibh sit amet - cursus. Ut sem orci, consequat id pretium id, - lacinia id nisl. Maecenas id quam at nisi eleifend - porta. Vestibulum at ligula arcu. Quisque - tincidunt pulvinar egestas. Ut suscipit ornare - ligula a fermentum. Morbi ante justo, condimentum - ut risus vitae, molestie elementum elit. Curabitur - malesuada commodo diam sed ultrices. Vestibulum - tincidunt turpis at ultricies fermentum. Morbi - ipsum felis, laoreet quis risus id, ornare - elementum urna. Morbi ultrices porttitor pulvinar. - Maecenas facilisis velit sit amet tellus feugiat - iaculis. - -carol/test-plan-2 -`) -} - -func (s *ListPlansCommandSuite) TestGetCommands(c *gc.C) { - tests := []struct { - about string - args []string - err string - resolvedCharmURL string - apiCall []interface{} - }{{ - about: "charm url is resolved", - args: []string{"some-charm-url"}, - resolvedCharmURL: "series/some-charm-url-1", - apiCall: []interface{}{"series/some-charm-url-1"}, - }, { - about: "everything works - default format", - args: []string{"some-charm-url"}, - apiCall: []interface{}{"some-charm-url"}, - }, { - about: "everything works - yaml", - args: []string{"some-charm-url", "--format", "yaml"}, - apiCall: []interface{}{"some-charm-url"}, - }, { - about: "everything works - smart", - args: []string{"some-charm-url", "--format", "smart"}, - apiCall: []interface{}{"some-charm-url"}, - }, { - about: "everything works - json", - args: []string{"some-charm-url", "--format", "json"}, - apiCall: []interface{}{"some-charm-url"}, - }, { - about: "everything works - summary", - args: []string{"some-charm-url", "--format", "summary"}, - apiCall: []interface{}{"some-charm-url"}, - }, { - about: "everything works - tabular", - args: []string{"some-charm-url", "--format", "tabular"}, - apiCall: []interface{}{"some-charm-url"}, - }, { - about: "missing argument", - args: []string{}, - err: `missing arguments`, - apiCall: []interface{}{}, - }, { - about: "unknown arguments", - args: []string{"some-charm-url", "extra", "arguments"}, - err: `unknown command line arguments: extra,arguments`, - apiCall: []interface{}{}, - }, - } - - for i, t := range tests { - c.Logf("Running test %d %s", i, t.about) - s.mockAPI.reset() - - listPlans := &listplans.ListPlansCommand{ - CharmResolver: &mockCharmResolver{ - ResolvedURL: t.resolvedCharmURL, - Stub: s.stub, - }, - } - _, err := cmdtesting.RunCommand(c, listPlans, t.args...) - if t.err != "" { - c.Assert(err, gc.ErrorMatches, t.err) - } else { - c.Assert(err, jc.ErrorIsNil) - s.mockAPI.CheckCall(c, 0, "Resolve", t.args[0]) - s.mockAPI.CheckCall(c, 1, "GetAssociatedPlans", t.apiCall...) - } - } -} - -// mockapi mocks the plan service api -type mockapi struct { - *testing.Stub - api.Client -} - -func newMockAPI(s *testing.Stub) *mockapi { - return &mockapi{Stub: s} -} - -// Get implements the Get function of the api.PlanClient interface. -// TODO (domas) : fix once querying by charm url is in place -func (m *mockapi) GetAssociatedPlans(charmURL string) ([]wireformat.Plan, error) { - m.AddCall("GetAssociatedPlans", charmURL) - p1 := wireformat.Plan{ - URL: "bob/test-plan-1", - Definition: testPlan1, - CreatedOn: time.Date(2015, 0, 0, 0, 0, 0, 0, time.UTC).Format(time.RFC3339), - } - p2 := wireformat.Plan{ - URL: "carol/test-plan-2", - Definition: testPlan2, - CreatedOn: time.Date(2015, 0, 0, 0, 0, 0, 0, time.UTC).Format(time.RFC3339), - } - return []wireformat.Plan{p1, p2}, m.NextErr() -} - -func (m *mockapi) reset() { - m.ResetCalls() -} - -// mockCharmResolver is a mock implementation of cmd.CharmResolver. -type mockCharmResolver struct { - *testing.Stub - ResolvedURL string -} - -// Resolve implements cmd.CharmResolver. -func (r *mockCharmResolver) Resolve(_ func(*url.URL) error, _ *http.Client, charmURL string) (string, error) { - r.AddCall("Resolve", charmURL) - if r.ResolvedURL != "" { - return r.ResolvedURL, r.NextErr() - } - return charmURL, r.NextErr() -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listplans/package_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/listplans/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/listplans/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/listplans/package_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package listplans_test - -import ( - stdtesting "testing" - - gc "gopkg.in/check.v1" -) - -func TestAll(t *stdtesting.T) { - gc.TestingT(t) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/resolve.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/resolve.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/resolve.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/resolve.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package cmd - -import ( - "net/http" - "net/url" - - "github.com/juju/errors" - "gopkg.in/juju/charm.v6-unstable" - "gopkg.in/juju/charmrepo.v2-unstable" - "gopkg.in/juju/charmrepo.v2-unstable/csclient" -) - -// CharmResolver interface defines the functionality to resolve a charm URL. -type CharmResolver interface { - // Resolve resolves the charm URL. - Resolve(visitWebPage func(*url.URL) error, client *http.Client, charmURL string) (string, error) -} - -// CharmStoreResolver implements the CharmResolver interface. -type CharmStoreResolver struct { - csURL string -} - -// NewCharmStoreResolver creates a new charm store resolver. -func NewCharmStoreResolver() *CharmStoreResolver { - return &CharmStoreResolver{ - csURL: csclient.ServerURL, - } -} - -// Resolve implements the CharmResolver interface. -func (r *CharmStoreResolver) Resolve(visitWebPage func(*url.URL) error, client *http.Client, charmURL string) (string, error) { - repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ - URL: r.csURL, - HTTPClient: client, - VisitWebPage: visitWebPage, - }) - - curl, err := charm.ParseURL(charmURL) - if err != nil { - return "", errors.Annotate(err, "could not parse charm url") - } - // ignore local charm urls - if curl.Schema == "local" { - return charmURL, nil - } - resolvedURL, _, err := repo.Resolve(curl) - if err != nil { - return "", errors.Trace(err) - } - return resolvedURL.String(), nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setbudget/export_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/setbudget/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setbudget/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/setbudget/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package setbudget - -import ( - "gopkg.in/macaroon-bakery.v1/httpbakery" -) - -var ( - NewAPIClient = &newAPIClient -) - -func APIClientFnc(api apiClient) func(*httpbakery.Client) (apiClient, error) { - return func(*httpbakery.Client) (apiClient, error) { - return api, nil - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setbudget/package_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/setbudget/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setbudget/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/setbudget/package_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package setbudget_test - -import ( - stdtesting "testing" - - gc "gopkg.in/check.v1" -) - -func TestAll(t *stdtesting.T) { - gc.TestingT(t) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setbudget/setbudget.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/setbudget/setbudget.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setbudget/setbudget.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/setbudget/setbudget.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,86 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package setbudget - -import ( - "fmt" - "strconv" - - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/juju/cmd/modelcmd" - "gopkg.in/macaroon-bakery.v1/httpbakery" - - api "github.com/juju/romulus/api/budget" -) - -type setBudgetCommand struct { - modelcmd.JujuCommandBase - Name string - Value string -} - -// NewSetBudgetCommand returns a new setBudgetCommand. -func NewSetBudgetCommand() cmd.Command { - return &setBudgetCommand{} -} - -const doc = ` -Set the monthly budget limit. - -Examples: - # Sets the monthly limit for budget named 'personal' to 96. - juju set-budget personal 96 -` - -// Info implements cmd.Command.Info. -func (c *setBudgetCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "set-budget", - Args: " ", - Purpose: "Set the budget limit.", - Doc: doc, - } -} - -// Init implements cmd.Command.Init. -func (c *setBudgetCommand) Init(args []string) error { - if len(args) < 2 { - return errors.New("name and value required") - } - c.Name, c.Value = args[0], args[1] - if _, err := strconv.ParseInt(c.Value, 10, 32); err != nil { - return errors.New("budget value needs to be a whole number") - } - return cmd.CheckEmpty(args[2:]) -} - -// Run implements cmd.Command.Run and contains most of the setbudget logic. -func (c *setBudgetCommand) Run(ctx *cmd.Context) error { - client, err := c.BakeryClient() - if err != nil { - return errors.Annotate(err, "failed to create an http client") - } - api, err := newAPIClient(client) - if err != nil { - return errors.Annotate(err, "failed to create an api client") - } - resp, err := api.SetBudget(c.Name, c.Value) - if err != nil { - return errors.Annotate(err, "failed to set the budget") - } - fmt.Fprintln(ctx.Stdout, resp) - return nil -} - -var newAPIClient = newAPIClientImpl - -func newAPIClientImpl(c *httpbakery.Client) (apiClient, error) { - client := api.NewClient(c) - return client, nil -} - -type apiClient interface { - SetBudget(string, string) (string, error) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setbudget/setbudget_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/setbudget/setbudget_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setbudget/setbudget_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/setbudget/setbudget_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,94 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package setbudget_test - -import ( - "github.com/juju/cmd/cmdtesting" - "github.com/juju/errors" - coretesting "github.com/juju/juju/testing" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/romulus/cmd/setbudget" -) - -var _ = gc.Suite(&setBudgetSuite{}) - -type setBudgetSuite struct { - coretesting.FakeJujuXDGDataHomeSuite - stub *testing.Stub - mockAPI *mockapi -} - -func (s *setBudgetSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.stub = &testing.Stub{} - s.mockAPI = newMockAPI(s.stub) - s.PatchValue(setbudget.NewAPIClient, setbudget.APIClientFnc(s.mockAPI)) -} - -func (s *setBudgetSuite) TestSetBudget(c *gc.C) { - s.mockAPI.resp = "name budget set to 5" - set := setbudget.NewSetBudgetCommand() - ctx, err := cmdtesting.RunCommand(c, set, "name", "5") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "name budget set to 5\n") - s.mockAPI.CheckCall(c, 0, "SetBudget", "name", "5") -} - -func (s *setBudgetSuite) TestSetBudgetAPIError(c *gc.C) { - s.stub.SetErrors(errors.New("something failed")) - set := setbudget.NewSetBudgetCommand() - _, err := cmdtesting.RunCommand(c, set, "name", "5") - c.Assert(err, gc.ErrorMatches, "failed to set the budget: something failed") - s.mockAPI.CheckCall(c, 0, "SetBudget", "name", "5") -} - -func (s *setBudgetSuite) TestSetBudgetErrors(c *gc.C) { - tests := []struct { - about string - args []string - expectedError string - }{ - { - about: "value needs to be a number", - args: []string{"name", "badvalue"}, - expectedError: "budget value needs to be a whole number", - }, - { - about: "value is missing", - args: []string{"name"}, - expectedError: "name and value required", - }, - { - about: "no args", - args: []string{}, - expectedError: "name and value required", - }, - } - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - s.stub.SetErrors(errors.New(test.expectedError)) - defer s.mockAPI.ResetCalls() - set := setbudget.NewSetBudgetCommand() - _, err := cmdtesting.RunCommand(c, set, test.args...) - c.Assert(err, gc.ErrorMatches, test.expectedError) - s.mockAPI.CheckNoCalls(c) - } -} - -func newMockAPI(s *testing.Stub) *mockapi { - return &mockapi{Stub: s} -} - -type mockapi struct { - *testing.Stub - resp string -} - -func (api *mockapi) SetBudget(name, value string) (string, error) { - api.MethodCall(api, "SetBudget", name, value) - return api.resp, api.NextErr() -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setplan/export_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/setplan/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setplan/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/setplan/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,20 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package setplan - -import ( - api "github.com/juju/romulus/api/plan" -) - -var ( - NewAuthorizationClient = &newAuthorizationClient -) - -// APIClientFnc returns a function that returns the provided apiClient -// and can be used to patch the NewAPIClient variable for tests. -func APIClientFnc(client authorizationClient) func(...api.ClientOption) (authorizationClient, error) { - return func(...api.ClientOption) (authorizationClient, error) { - return client, nil - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setplan/set_plan.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/setplan/set_plan.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setplan/set_plan.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/setplan/set_plan.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,123 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The setplan package contains the implementation of the juju set-plan -// command. -package setplan - -import ( - "encoding/json" - "net/url" - - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/juju/api/application" - "github.com/juju/juju/cmd/modelcmd" - "gopkg.in/juju/names.v2" - "gopkg.in/macaroon.v1" - - api "github.com/juju/romulus/api/plan" -) - -// authorizationClient defines the interface of an api client that -// the comand uses to create an authorization macaroon. -type authorizationClient interface { - // Authorize returns the authorization macaroon for the specified environment, - // charm url, application name and plan. - Authorize(environmentUUID, charmURL, applicationName, plan string, visitWebPage func(*url.URL) error) (*macaroon.Macaroon, error) -} - -var newAuthorizationClient = func(options ...api.ClientOption) (authorizationClient, error) { - return api.NewAuthorizationClient(options...) -} - -// NewSetPlanCommand returns a new command that is used to set metric credentials for a -// deployed application. -func NewSetPlanCommand() cmd.Command { - return modelcmd.Wrap(&setPlanCommand{}) -} - -// setPlanCommand is a command-line tool for setting -// Application.MetricCredential for development & demonstration purposes. -type setPlanCommand struct { - modelcmd.ModelCommandBase - - Application string - Plan string -} - -// Info implements cmd.Command. -func (c *setPlanCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "set-plan", - Args: " ", - Purpose: "Set the plan for an application.", - Doc: ` -Set the plan for the deployed application, effective immediately. - -The specified plan name must be a valid plan that is offered for this -particular charm. Use "juju list-plans " for more information. - -Examples: - juju set-plan myapp example/uptime -`, - } -} - -// Init implements cmd.Command. -func (c *setPlanCommand) Init(args []string) error { - if len(args) < 2 { - return errors.New("need to specify application name and plan url") - } - - applicationName := args[0] - if !names.IsValidApplication(applicationName) { - return errors.Errorf("invalid application name %q", applicationName) - } - - c.Plan = args[1] - c.Application = applicationName - - return c.ModelCommandBase.Init(args[2:]) -} - -func (c *setPlanCommand) requestMetricCredentials(client *application.Client, ctx *cmd.Context) ([]byte, error) { - envUUID := client.ModelUUID() - charmURL, err := client.GetCharmURL(c.Application) - if err != nil { - return nil, errors.Trace(err) - } - - hc, err := c.BakeryClient() - if err != nil { - return nil, errors.Trace(err) - } - authClient, err := newAuthorizationClient(api.HTTPClient(hc)) - if err != nil { - return nil, errors.Trace(err) - } - m, err := authClient.Authorize(envUUID, charmURL.String(), c.Application, c.Plan, hc.VisitWebPage) - if err != nil { - return nil, errors.Trace(err) - } - ms := macaroon.Slice{m} - return json.Marshal(ms) -} - -// Run implements cmd.Command. -func (c *setPlanCommand) Run(ctx *cmd.Context) error { - root, err := c.NewAPIRoot() - if err != nil { - return errors.Trace(err) - } - client := application.NewClient(root) - credentials, err := c.requestMetricCredentials(client, ctx) - if err != nil { - return errors.Trace(err) - } - err = client.SetMetricCredentials(c.Application, credentials) - if err != nil { - return errors.Trace(err) - } - return nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setplan/set_plan_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/setplan/set_plan_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/setplan/set_plan_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/setplan/set_plan_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,180 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package setplan_test - -import ( - "encoding/json" - "fmt" - "net/url" - stdtesting "testing" - - "github.com/juju/cmd/cmdtesting" - "github.com/juju/errors" - jjjtesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/testcharms" - jjtesting "github.com/juju/juju/testing" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v6-unstable" - "gopkg.in/macaroon-bakery.v1/bakery" - "gopkg.in/macaroon-bakery.v1/bakery/checkers" - "gopkg.in/macaroon.v1" - - "github.com/juju/romulus/cmd/setplan" -) - -func TestPackage(t *stdtesting.T) { - jjtesting.MgoTestPackage(t) -} - -var _ = gc.Suite(&setPlanCommandSuite{}) - -type setPlanCommandSuite struct { - jjjtesting.JujuConnSuite - - mockAPI *mockapi - charmURL string -} - -func (s *setPlanCommandSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - - ch := testcharms.Repo.CharmDir("dummy") - curl := charm.MustParseURL( - fmt.Sprintf("local:quantal/%s-%d", ch.Meta().Name, ch.Revision()), - ) - s.charmURL = curl.String() - charmInfo := state.CharmInfo{ - Charm: ch, - ID: curl, - StoragePath: "dummy-path", - SHA256: "dummy-1", - } - dummyCharm, err := s.State.AddCharm(charmInfo) - c.Assert(err, jc.ErrorIsNil) - s.AddTestingService(c, "mysql", dummyCharm) - - mockAPI, err := newMockAPI() - c.Assert(err, jc.ErrorIsNil) - s.mockAPI = mockAPI - - s.PatchValue(setplan.NewAuthorizationClient, setplan.APIClientFnc(s.mockAPI)) -} - -func (s setPlanCommandSuite) TestSetPlanCommand(c *gc.C) { - tests := []struct { - about string - plan string - application string - err string - apiErr error - apiCalls []testing.StubCall - }{{ - about: "all is well", - plan: "bob/default", - application: "mysql", - apiCalls: []testing.StubCall{{ - FuncName: "Authorize", - Args: []interface{}{ - s.State.ModelUUID(), - s.charmURL, - "mysql", - }, - }}, - }, { - about: "invalid application name", - plan: "bob/default", - application: "mysql-0", - err: "invalid application name \"mysql-0\"", - }, { - about: "unknown application", - plan: "bob/default", - application: "wordpress", - err: "application \"wordpress\" not found.*", - }, { - about: "unknown application", - plan: "bob/default", - application: "mysql", - apiErr: errors.New("some strange error"), - err: "some strange error", - }, - } - for i, test := range tests { - c.Logf("running test %d: %v", i, test.about) - s.mockAPI.ResetCalls() - if test.apiErr != nil { - s.mockAPI.SetErrors(test.apiErr) - } - _, err := cmdtesting.RunCommand(c, setplan.NewSetPlanCommand(), test.application, test.plan) - if test.err == "" { - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.mockAPI.Calls(), gc.HasLen, 1) - s.mockAPI.CheckCalls(c, test.apiCalls) - - app, err := s.State.Application("mysql") - c.Assert(err, jc.ErrorIsNil) - svcMacaroon := app.MetricCredentials() - data, err := json.Marshal(macaroon.Slice{s.mockAPI.macaroon}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(svcMacaroon, gc.DeepEquals, data) - } else { - c.Assert(err, gc.ErrorMatches, test.err) - c.Assert(s.mockAPI.Calls(), gc.HasLen, 0) - } - } -} - -func (s *setPlanCommandSuite) TestNoArgs(c *gc.C) { - _, err := cmdtesting.RunCommand(c, setplan.NewSetPlanCommand()) - c.Assert(err, gc.ErrorMatches, "need to specify application name and plan url") -} - -func newMockAPI() (*mockapi, error) { - kp, err := bakery.GenerateKey() - if err != nil { - return nil, errors.Trace(err) - } - svc, err := bakery.NewService(bakery.NewServiceParams{ - Location: "omnibus", - Key: kp, - }) - if err != nil { - return nil, errors.Trace(err) - } - return &mockapi{ - service: svc, - }, nil -} - -type mockapi struct { - testing.Stub - - service *bakery.Service - macaroon *macaroon.Macaroon -} - -func (m *mockapi) Authorize(modelUUID, charmURL, applicationName, plan string, visitWebPage func(*url.URL) error) (*macaroon.Macaroon, error) { - err := m.NextErr() - if err != nil { - return nil, errors.Trace(err) - } - m.AddCall("Authorize", modelUUID, charmURL, applicationName) - macaroon, err := m.service.NewMacaroon( - "", - nil, - []checkers.Caveat{ - checkers.DeclaredCaveat("environment", modelUUID), - checkers.DeclaredCaveat("charm", charmURL), - checkers.DeclaredCaveat("service", applicationName), - checkers.DeclaredCaveat("plan", plan), - }, - ) - if err != nil { - return nil, errors.Trace(err) - } - m.macaroon = macaroon - return m.macaroon, nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/showbudget/export_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/showbudget/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/showbudget/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/showbudget/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package showbudget - -import ( - "gopkg.in/macaroon-bakery.v1/httpbakery" -) - -var ( - NewBudgetAPIClient = &newBudgetAPIClient - NewAPIClient = &newAPIClient -) - -// APIClientFnc returns a function that returns the provided APIClient -// and can be used to patch the NewAPIClient variable in tests -func NewAPIClientFnc(api APIClient) func(*showBudgetCommand) (APIClient, error) { - return func(*showBudgetCommand) (APIClient, error) { - return api, nil - } -} - -// BudgetAPIClientFnc returns a function that returns the provided budgetAPIClient -// and can be used to patch the NewBudgetAPIClient variable for tests. -func BudgetAPIClientFnc(api budgetAPIClient) func(*httpbakery.Client) (budgetAPIClient, error) { - return func(*httpbakery.Client) (budgetAPIClient, error) { - return api, nil - } -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/showbudget/package_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/showbudget/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/showbudget/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/showbudget/package_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package showbudget_test - -import ( - stdtesting "testing" - - gc "gopkg.in/check.v1" -) - -func TestAll(t *stdtesting.T) { - gc.TestingT(t) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/showbudget/show_budget.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/showbudget/show_budget.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/showbudget/show_budget.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/showbudget/show_budget.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,187 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package showbudget - -import ( - "sort" - - "github.com/gosuri/uitable" - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/juju/api/modelmanager" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/loggo" - "gopkg.in/juju/names.v2" - "gopkg.in/macaroon-bakery.v1/httpbakery" - "launchpad.net/gnuflag" - - api "github.com/juju/romulus/api/budget" - wireformat "github.com/juju/romulus/wireformat/budget" -) - -var logger = loggo.GetLogger("romulus.cmd.showbudget") - -// NewShowBudgetCommand returns a new command that is used -// to show details of the specified wireformat. -func NewShowBudgetCommand() modelcmd.CommandBase { - return &showBudgetCommand{} -} - -type showBudgetCommand struct { - modelcmd.ModelCommandBase - - out cmd.Output - budget string -} - -const showBudgetDoc = ` -Display budget usage information. - -Examples: - juju show-budget personal -` - -// Info implements cmd.Command.Info. -func (c *showBudgetCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "show-budget", - Args: "", - Purpose: "Show details about a budget.", - Doc: showBudgetDoc, - } -} - -// Init implements cmd.Command.Init. -func (c *showBudgetCommand) Init(args []string) error { - if len(args) < 1 { - return errors.New("missing arguments") - } - c.budget, args = args[0], args[1:] - - return cmd.CheckEmpty(args) -} - -// SetFlags implements cmd.Command.SetFlags. -func (c *showBudgetCommand) SetFlags(f *gnuflag.FlagSet) { - c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ - "tabular": formatTabular, - "json": cmd.FormatJson, - }) -} - -func (c *showBudgetCommand) Run(ctx *cmd.Context) error { - client, err := c.BakeryClient() - if err != nil { - return errors.Annotate(err, "failed to create an http client") - } - api, err := newBudgetAPIClient(client) - if err != nil { - return errors.Annotate(err, "failed to create an api client") - } - budget, err := api.GetBudget(c.budget) - if err != nil { - return errors.Annotate(err, "failed to retrieve the budget") - } - c.resolveModelNames(budget) - err = c.out.Write(ctx, budget) - return errors.Trace(err) -} - -// resolveModelNames is a best-effort method to resolve model names - if we -// encounter any error, we do not issue an error. -func (c *showBudgetCommand) resolveModelNames(budget *wireformat.BudgetWithAllocations) { - models := make([]names.ModelTag, len(budget.Allocations)) - for i, allocation := range budget.Allocations { - models[i] = names.NewModelTag(allocation.Model) - } - client, err := newAPIClient(c) - if err != nil { - logger.Errorf("failed to open the API client: %v", err) - return - } - modelInfoSlice, err := client.ModelInfo(models) - if err != nil { - logger.Errorf("failed to retrieve model info: %v", err) - return - } - for j, info := range modelInfoSlice { - if info.Error != nil { - logger.Errorf("failed to get model info for model %q: %v", models[j], info.Error) - continue - } - for i, allocation := range budget.Allocations { - if info.Result.UUID == allocation.Model { - budget.Allocations[i].Model = info.Result.Name - } - } - } -} - -// formatTabular returns a tabular view of available budgets. -func formatTabular(value interface{}) ([]byte, error) { - b, ok := value.(*wireformat.BudgetWithAllocations) - if !ok { - return nil, errors.Errorf("expected value of type %T, got %T", b, value) - } - - table := uitable.New() - table.MaxColWidth = 50 - table.Wrap = true - for _, col := range []int{2, 3, 5} { - table.RightAlign(col) - } - - table.AddRow("MODEL", "SERVICES", "SPENT", "ALLOCATED", "BY", "USAGE") - for _, allocation := range b.Allocations { - firstLine := true - // We'll sort the service names to avoid nondeterministic - // command output. - services := make([]string, 0, len(allocation.Services)) - for serviceName, _ := range allocation.Services { - services = append(services, serviceName) - } - sort.Strings(services) - for _, serviceName := range services { - service, _ := allocation.Services[serviceName] - if firstLine { - table.AddRow(allocation.Model, serviceName, service.Consumed, allocation.Limit, allocation.Owner, allocation.Usage) - firstLine = false - continue - } - table.AddRow("", serviceName, service.Consumed, "", "") - } - - } - table.AddRow("", "", "", "", "") - table.AddRow("TOTAL", "", b.Total.Consumed, b.Total.Allocated, "", b.Total.Usage) - table.AddRow("BUDGET", "", "", b.Limit, "") - table.AddRow("UNALLOCATED", "", "", b.Total.Unallocated, "") - return []byte(table.String()), nil -} - -var newAPIClient = newAPIClientImpl - -func newAPIClientImpl(c *showBudgetCommand) (APIClient, error) { - root, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return modelmanager.NewClient(root), nil -} - -type APIClient interface { - ModelInfo(tags []names.ModelTag) ([]params.ModelInfoResult, error) -} - -var newBudgetAPIClient = newBudgetAPIClientImpl - -func newBudgetAPIClientImpl(c *httpbakery.Client) (budgetAPIClient, error) { - client := api.NewClient(c) - return client, nil -} - -type budgetAPIClient interface { - GetBudget(string) (*wireformat.BudgetWithAllocations, error) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/showbudget/show_budget_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/showbudget/show_budget_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/showbudget/show_budget_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/showbudget/show_budget_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,202 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details.s - -package showbudget_test - -import ( - "github.com/juju/cmd/cmdtesting" - "github.com/juju/errors" - "github.com/juju/juju/apiserver/params" - coretesting "github.com/juju/juju/testing" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/names.v2" - - "github.com/juju/romulus/cmd/showbudget" - "github.com/juju/romulus/wireformat/budget" -) - -var _ = gc.Suite(&showBudgetSuite{}) - -type showBudgetSuite struct { - coretesting.FakeJujuXDGDataHomeSuite - stub *testing.Stub - mockBudgetAPI *mockBudgetAPI - mockAPI *mockAPI -} - -func (s *showBudgetSuite) SetUpTest(c *gc.C) { - s.CleanupSuite.SetUpTest(c) - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.stub = &testing.Stub{} - s.mockBudgetAPI = &mockBudgetAPI{s.stub} - s.mockAPI = &mockAPI{s.stub} - s.PatchValue(showbudget.NewBudgetAPIClient, showbudget.BudgetAPIClientFnc(s.mockBudgetAPI)) - s.PatchValue(showbudget.NewAPIClient, showbudget.NewAPIClientFnc(s.mockAPI)) -} - -func (s *showBudgetSuite) TestShowBudgetCommand(c *gc.C) { - tests := []struct { - about string - args []string - err string - budget string - apierr string - resolveerr string - output string - }{{ - about: "missing argument", - err: `missing arguments`, - }, { - about: "unknown arguments", - args: []string{"my-special-budget", "extra", "arguments"}, - err: `unrecognized args: \["extra" "arguments"\]`, - }, { - about: "api error", - args: []string{"personal"}, - apierr: "well, this is embarrassing", - err: "failed to retrieve the budget: well, this is embarrassing", - }, { - about: "all ok", - args: []string{"personal"}, - budget: "personal", - output: "" + - "MODEL \tSERVICES \tSPENT\tALLOCATED\tBY \tUSAGE\n" + - "model.joe \tmysql \t 200\t 1200\tuser.joe \t 42%\n" + - " \twordpress\t 300\t \t \n" + - "model.jess \tlandscape\t 600\t 1000\tuser.jess\t 60%\n" + - "uuid3 \tmysql \t 10\t 100\tuser.bob \t 10%\n" + - " \t \t \t \t \n" + - "TOTAL \t \t 1110\t 2300\t \t 48%\n" + - "BUDGET \t \t \t 4000\t \n" + - "UNALLOCATED\t \t \t 1700\t \n", - }, { - about: "all ok", - args: []string{"personal"}, - budget: "personal", - resolveerr: "test error", - output: "" + - "MODEL \tSERVICES \tSPENT\tALLOCATED\tBY \tUSAGE\n" + - "uuid1 \tmysql \t 200\t 1200\tuser.joe \t 42%\n" + - " \twordpress\t 300\t \t \n" + - "uuid2 \tlandscape\t 600\t 1000\tuser.jess\t 60%\n" + - "uuid3 \tmysql \t 10\t 100\tuser.bob \t 10%\n" + - " \t \t \t \t \n" + - "TOTAL \t \t 1110\t 2300\t \t 48%\n" + - "BUDGET \t \t \t 4000\t \n" + - "UNALLOCATED\t \t \t 1700\t \n", - }, - } - - for i, test := range tests { - c.Logf("running test %d: %v", i, test.about) - s.mockAPI.ResetCalls() - - errs := []error{} - if test.apierr != "" { - errs = append(errs, errors.New(test.apierr)) - } else { - errs = append(errs, nil) - } - if test.resolveerr != "" { - errs = append(errs, errors.New(test.resolveerr)) - } else { - errs = append(errs, nil) - } - s.mockAPI.SetErrors(errs...) - - showBudget := showbudget.NewShowBudgetCommand() - - ctx, err := cmdtesting.RunCommand(c, showBudget, test.args...) - if test.err == "" { - c.Assert(err, jc.ErrorIsNil) - s.stub.CheckCalls(c, []testing.StubCall{ - {"GetBudget", []interface{}{test.budget}}, - {"ModelInfo", []interface{}{[]names.ModelTag{names.NewModelTag("uuid1"), names.NewModelTag("uuid2"), names.NewModelTag("uuid3")}}}, - }) - output := cmdtesting.Stdout(ctx) - c.Assert(output, gc.Equals, test.output) - } else { - c.Assert(err, gc.ErrorMatches, test.err) - } - } -} - -type mockAPI struct { - *testing.Stub -} - -func (api *mockAPI) ModelInfo(tags []names.ModelTag) ([]params.ModelInfoResult, error) { - api.AddCall("ModelInfo", tags) - return []params.ModelInfoResult{{ - Result: ¶ms.ModelInfo{ - Name: "model.jess", - UUID: "uuid2", - }, - }, { - Result: ¶ms.ModelInfo{ - Name: "model.joe", - UUID: "uuid1", - }, - }, { - Error: ¶ms.Error{ - Message: "not found", - }, - }, - }, api.NextErr() -} - -type mockBudgetAPI struct { - *testing.Stub -} - -func (api *mockBudgetAPI) GetBudget(name string) (*budget.BudgetWithAllocations, error) { - api.AddCall("GetBudget", name) - return &budget.BudgetWithAllocations{ - Limit: "4000", - Total: budget.BudgetTotals{ - Allocated: "2300", - Unallocated: "1700", - Available: "1190", - Consumed: "1110", - Usage: "48%", - }, - Allocations: []budget.Allocation{{ - Owner: "user.joe", - Limit: "1200", - Consumed: "500", - Usage: "42%", - Model: "uuid1", - Services: map[string]budget.ServiceAllocation{ - "wordpress": budget.ServiceAllocation{ - Consumed: "300", - }, - "mysql": budget.ServiceAllocation{ - Consumed: "200", - }, - }, - }, { - Owner: "user.jess", - Limit: "1000", - Consumed: "600", - Usage: "60%", - Model: "uuid2", - Services: map[string]budget.ServiceAllocation{ - "landscape": budget.ServiceAllocation{ - Consumed: "600", - }, - }, - }, { - Owner: "user.bob", - Limit: "100", - Consumed: "10", - Usage: "10%", - Model: "uuid3", - Services: map[string]budget.ServiceAllocation{ - "mysql": budget.ServiceAllocation{ - Consumed: "10", - }, - }, - }}}, api.NextErr() -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/updateallocation/export_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/updateallocation/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/updateallocation/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/updateallocation/export_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package updateallocation - -import ( - "github.com/juju/cmd" - "github.com/juju/juju/cmd/modelcmd" - "github.com/juju/juju/jujuclient" -) - -func NewUpdateAllocateCommandForTest(api apiClient, store jujuclient.ClientStore) cmd.Command { - c := &updateAllocationCommand{api: api} - c.SetClientStore(store) - return modelcmd.Wrap(c) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/updateallocation/package_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/updateallocation/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/updateallocation/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/updateallocation/package_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package updateallocation_test - -import ( - stdtesting "testing" - - gc "gopkg.in/check.v1" -) - -func TestAll(t *stdtesting.T) { - gc.TestingT(t) -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/updateallocation/updateallocation.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/updateallocation/updateallocation.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/updateallocation/updateallocation.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/updateallocation/updateallocation.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// Package updateallocation defines the command used to update allocations. -package updateallocation - -import ( - "fmt" - "strconv" - - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/juju/cmd/modelcmd" - "gopkg.in/macaroon-bakery.v1/httpbakery" - - api "github.com/juju/romulus/api/budget" -) - -type updateAllocationCommand struct { - modelcmd.ModelCommandBase - api apiClient - Name string - Value string -} - -// NewUpdateAllocationCommand returns a new updateAllocationCommand. -func NewUpdateAllocationCommand() modelcmd.ModelCommand { - return &updateAllocationCommand{} -} - -func (c *updateAllocationCommand) newAPIClient(bakery *httpbakery.Client) (apiClient, error) { - if c.api != nil { - return c.api, nil - } - c.api = api.NewClient(bakery) - return c.api, nil -} - -type apiClient interface { - UpdateAllocation(string, string, string) (string, error) -} - -const doc = ` -Updates an existing allocation on an application. - -Examples: - # Sets the allocation for the wordpress application to 10. - juju update-allocation wordpress 10 -` - -// Info implements cmd.Command.Info. -func (c *updateAllocationCommand) Info() *cmd.Info { - return &cmd.Info{ - Name: "update-allocation", - Args: " ", - Purpose: "Update an allocation.", - Doc: doc, - } -} - -// Init implements cmd.Command.Init. -func (c *updateAllocationCommand) Init(args []string) error { - if len(args) < 2 { - return errors.New("application and value required") - } - c.Name, c.Value = args[0], args[1] - if _, err := strconv.ParseInt(c.Value, 10, 32); err != nil { - return errors.New("value needs to be a whole number") - } - return cmd.CheckEmpty(args[2:]) -} - -func (c *updateAllocationCommand) modelUUID() (string, error) { - model, err := c.ClientStore().ModelByName(c.ControllerName(), c.ModelName()) - if err != nil { - return "", errors.Trace(err) - } - return model.ModelUUID, nil -} - -// Run implements cmd.Command.Run and contains most of the setbudget logic. -func (c *updateAllocationCommand) Run(ctx *cmd.Context) error { - modelUUID, err := c.modelUUID() - if err != nil { - return errors.Annotate(err, "failed to get model uuid") - } - client, err := c.BakeryClient() - if err != nil { - return errors.Annotate(err, "failed to create an http client") - } - api, err := c.newAPIClient(client) - if err != nil { - return errors.Annotate(err, "failed to create an api client") - } - resp, err := api.UpdateAllocation(modelUUID, c.Name, c.Value) - if err != nil { - return errors.Annotate(err, "failed to update the allocation") - } - fmt.Fprintln(ctx.Stdout, resp) - return nil -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/updateallocation/updateallocation_test.go juju-core-2.0.0/src/github.com/juju/romulus/cmd/updateallocation/updateallocation_test.go --- juju-core-2.0~beta15/src/github.com/juju/romulus/cmd/updateallocation/updateallocation_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/cmd/updateallocation/updateallocation_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,118 +0,0 @@ -// Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package updateallocation_test - -import ( - "github.com/juju/cmd" - "github.com/juju/cmd/cmdtesting" - "github.com/juju/errors" - "github.com/juju/juju/jujuclient" - "github.com/juju/juju/jujuclient/jujuclienttesting" - coretesting "github.com/juju/juju/testing" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/romulus/cmd/updateallocation" -) - -var _ = gc.Suite(&updateAllocationSuite{}) - -type updateAllocationSuite struct { - coretesting.FakeJujuXDGDataHomeSuite - stub *testing.Stub - mockAPI *mockapi - store jujuclient.ClientStore -} - -func (s *updateAllocationSuite) SetUpTest(c *gc.C) { - s.FakeJujuXDGDataHomeSuite.SetUpTest(c) - s.store = &jujuclienttesting.MemStore{ - Controllers: map[string]jujuclient.ControllerDetails{ - "controller": {}, - }, - Models: map[string]*jujuclient.ControllerModels{ - "controller": { - Models: map[string]jujuclient.ModelDetails{ - "model": {"model-uuid"}, - }, - CurrentModel: "model", - }, - }, - Accounts: map[string]jujuclient.AccountDetails{ - "controller": { - User: "admin@local", - }, - }, - } - s.stub = &testing.Stub{} - s.mockAPI = newMockAPI(s.stub) -} - -func (s *updateAllocationSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { - updateAlloc := updateallocation.NewUpdateAllocateCommandForTest(s.mockAPI, s.store) - a := []string{"-m", "controller:model"} - a = append(a, args...) - return cmdtesting.RunCommand(c, updateAlloc, a...) -} - -func (s *updateAllocationSuite) TestUpdateAllocation(c *gc.C) { - s.mockAPI.resp = "name budget set to 5" - ctx, err := s.run(c, "name", "5") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "name budget set to 5\n") - s.mockAPI.CheckCall(c, 0, "UpdateAllocation", "model-uuid", "name", "5") -} - -func (s *updateAllocationSuite) TestUpdateAllocationAPIError(c *gc.C) { - s.stub.SetErrors(errors.New("something failed")) - _, err := s.run(c, "name", "5") - c.Assert(err, gc.ErrorMatches, "failed to update the allocation: something failed") - s.mockAPI.CheckCall(c, 0, "UpdateAllocation", "model-uuid", "name", "5") -} - -func (s *updateAllocationSuite) TestUpdateAllocationErrors(c *gc.C) { - tests := []struct { - about string - args []string - expectedError string - }{ - { - about: "value needs to be a number", - args: []string{"name", "badvalue"}, - expectedError: "value needs to be a whole number", - }, - { - about: "value is missing", - args: []string{"name"}, - expectedError: "application and value required", - }, - { - about: "no args", - args: []string{}, - expectedError: "application and value required", - }, - } - for i, test := range tests { - s.mockAPI.ResetCalls() - c.Logf("test %d: %s", i, test.about) - _, err := s.run(c, test.args...) - c.Check(err, gc.ErrorMatches, test.expectedError) - s.mockAPI.CheckNoCalls(c) - } -} - -func newMockAPI(s *testing.Stub) *mockapi { - return &mockapi{Stub: s} -} - -type mockapi struct { - *testing.Stub - resp string -} - -func (api *mockapi) UpdateAllocation(modelUUID, name, value string) (string, error) { - api.MethodCall(api, "UpdateAllocation", modelUUID, name, value) - return api.resp, api.NextErr() -} diff -Nru juju-core-2.0~beta15/src/github.com/juju/romulus/dependencies.tsv juju-core-2.0.0/src/github.com/juju/romulus/dependencies.tsv --- juju-core-2.0~beta15/src/github.com/juju/romulus/dependencies.tsv 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/romulus/dependencies.tsv 2016-10-13 14:32:10.000000000 +0000 @@ -1,51 +1,18 @@ -github.com/bmizerany/pat git c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c 2016-02-17T10:32:42Z -github.com/coreos/go-systemd git 7b2428fec40033549c68f54e26e89e7ca9a9ce31 2016-02-02T21:14:25Z -github.com/dustin/go-humanize git 145fabdb1ab757076a70a886d092a3af27f66f4c 2014-12-28T07:11:48Z -github.com/gabriel-samfira/sys git 9ddc60d56b511544223adecea68da1e4f2153beb 2015-06-08T13:21:19Z -github.com/godbus/dbus git 32c6cc29c14570de4cf6d7e7737d68fb2d01ad15 2016-05-06T22:25:50Z -github.com/gorilla/schema git 08023a0215e7fc27a9aecd8b8c50913c40019478 2016-04-26T23:15:12Z -github.com/gosuri/uitable git 36ee7e946282a3fb1cfecd476ddc9b35d8847e42 2016-04-04T20:39:58Z -github.com/juju/bundlechanges git 8d99dd2a94d7b4fd975a152238d0e19d0c4a6cf1 2016-06-15T07:19:43Z -github.com/juju/cmd git a11ae7a7436c133e799f025998cbbefd3f6eef7e 2016-06-01T03:55:01Z github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z -github.com/juju/go4 git 40d72ab9641a2a8c36a9c46a51e28367115c8e59 2016-02-22T16:32:58Z -github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z -github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z -github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z -github.com/juju/httpprof git 14bf14c307672fd2456bdbf35d19cf0ccd3cf565 2014-12-17T16:00:36Z github.com/juju/httprequest git 796aaafaf712f666df58d31a482c51233038bf9f 2016-05-03T15:03:27Z -github.com/juju/idmclient git 3dda079a75cccb85083d4c3877e638f5d6ab79c2 2016-05-26T05:00:34Z -github.com/juju/juju git ef5ff9f3160a51e1964943532191df95644665d5 2016-07-14T10:19:34Z -github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z -github.com/juju/mutex git 59c26ee163447c5c57f63ff71610d433862013de 2016-06-17T01:09:07Z -github.com/juju/persistent-cookiejar git e710b897c13ca52828ca2fc9769465186fd6d15c 2016-03-31T17:12:27Z -github.com/juju/replicaset git fb7294cf57a1e2f08a57691f1246d129a87ab7e8 2015-05-08T02:21:43Z -github.com/juju/rfc git ebdbbdb950cd039a531d15cdc2ac2cbd94f068ee 2016-07-11T02:42:13Z -github.com/juju/schema git 075de04f9b7d7580d60a1e12a0b3f50bb18e6998 2016-04-20T04:42:03Z -github.com/juju/terms-client git 8b4b1f20960150c2529db2003824014490291299 2016-08-08T09:34:01Z -github.com/juju/testing git ccf839b5a07a7a05009f8fa3ec41cd05fb2e0b08 2016-06-24T20:35:24Z -github.com/juju/txn git 99ec629d0066a4d73c54d8e021a7fc1dc07df614 2015-06-09T16:58:27Z -github.com/juju/usso git 68a59c96c178fbbad65926e7f93db50a2cd14f33 2016-04-01T10:44:24Z -github.com/juju/utils git 6219812829a3542c827c76cc75f416d4e6c94335 2016-07-08T10:00:56Z +github.com/juju/loggo git 15901ae4de786d05edae84a27c93d3fbef66c91e 2016-08-04T22:15:26Z +github.com/juju/testing git d325c22badd4ba3a5fde01d479b188c7a06df755 2016-08-02T03:47:59Z +github.com/juju/utils git bdb77b07e7e3f77463d10d2b554cd1b7a78009a2 2016-08-15T11:38:39Z github.com/juju/version git 4ae6172c00626779a5a462c3e3d22fc0e889431a 2016-06-03T19:49:58Z github.com/juju/webbrowser git 54b8c57083b4afb7dc75da7f13e2967b2606a507 2016-03-09T14:36:29Z github.com/julienschmidt/httprouter git 77a895ad01ebc98a4dc95d8355bc825ce80a56f6 2015-10-13T22:55:20Z -github.com/mattn/go-runewidth git d96d1bd051f2bd9e7e43d602782b37b93b1b5666 2015-11-18T07:21:59Z github.com/rogpeppe/fastuuid git 6724a57986aff9bff1a1770e9347036def7c89f6 2015-01-06T09:32:20Z golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z -gopkg.in/juju/blobstore.v2 git 51fa6e26128d74e445c72d3a91af555151cc3654 2016-01-25T02:37:03Z -gopkg.in/juju/charm.v6-unstable git a3bb92d047b0892452b6a39ece59b4d3a2ac35b9 2016-07-22T08:34:31Z -gopkg.in/juju/charmrepo.v2-unstable git 6e6733987fb03100f30e494cc1134351fe4a593b 2016-05-30T23:07:41Z -gopkg.in/juju/environschema.v1 git 7359fc7857abe2b11b5b3e23811a9c64cb6b01e0 2015-11-04T11:58:10Z -gopkg.in/juju/names.v2 git 5426d66579afd36fc63d809dd58806806c2f161f 2016-06-23T03:33:52Z -gopkg.in/macaroon-bakery.v1 git b097c9d99b2537efaf54492e08f7e148f956ba51 2016-05-24T09:38:11Z +gopkg.in/juju/names.v2 git 3e0d33a444fec55aea7269b849eb22da41e73072 2016-07-18T22:31:20Z +gopkg.in/macaroon-bakery.v1 git 469b44e6f1f9479e115c8ae879ef80695be624d5 2016-06-22T12:14:21Z gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z -gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z -gopkg.in/natefinch/lumberjack.v2 git 514cbda263a734ae8caac038dadf05f8f3f9f738 2016-01-25T11:17:49Z -gopkg.in/natefinch/npipe.v2 git c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6 2016-06-21T03:49:01Z +gopkg.in/mgo.v2 git 29cc868a5ca65f401ff318143f9408d02f4799cc 2016-06-09T18:00:28Z gopkg.in/yaml.v2 git a83829b6f1293c91addabc89d0571c246397bbf4 2016-03-01T20:40:22Z -launchpad.net/gnuflag bzr roger.peppe@canonical.com-20140716064605-pk32dnmfust02yab 13 -launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 diff -Nru juju-core-2.0~beta15/src/github.com/juju/testing/clock.go juju-core-2.0.0/src/github.com/juju/testing/clock.go --- juju-core-2.0~beta15/src/github.com/juju/testing/clock.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/testing/clock.go 2016-10-13 14:32:08.000000000 +0000 @@ -8,57 +8,51 @@ "sync" "time" + "github.com/juju/errors" "github.com/juju/utils/clock" ) -// timerClock exposes the underlying Clock's capabilities to a Timer. -type timerClock interface { - reset(id int, d time.Duration) bool - stop(id int) bool -} - -// Timer implements a mock clock.Timer for testing purposes. -type Timer struct { - ID int - clock timerClock +// timer implements a mock clock.Timer for testing purposes. +type timer struct { + deadline time.Time + clock *Clock + c chan time.Time + // trigger is called when the timer expires. It is + // called with the clock mutex held and will not block. + trigger func() } // Reset is part of the clock.Timer interface. -func (t *Timer) Reset(d time.Duration) bool { - return t.clock.reset(t.ID, d) +func (t *timer) Reset(d time.Duration) bool { + return t.clock.reset(t, d) } // Stop is part of the clock.Timer interface. -func (t *Timer) Stop() bool { - return t.clock.stop(t.ID) +func (t *timer) Stop() bool { + return t.clock.stop(t) } -// stoppedTimer is a no-op implementation of clock.Timer. -type stoppedTimer struct{} - -// Reset is part of the clock.Timer interface. -func (stoppedTimer) Reset(time.Duration) bool { return false } - -// Stop is part of the clock.Timer interface. -func (stoppedTimer) Stop() bool { return false } +// Chan is part of the clock.Timer interface. +func (t *timer) Chan() <-chan time.Time { + return t.c +} // Clock implements a mock clock.Clock for testing purposes. type Clock struct { - mu sync.Mutex - now time.Time - alarms []alarm - currentAlarmID int - notifyAlarms chan struct{} + mu sync.Mutex + now time.Time + waiting []*timer // timers waiting to fire, sorted by deadline. + notifyAlarms chan struct{} } // NewClock returns a new clock set to the supplied time. If your SUT needs to -// call After, AfterFunc, or Timer.Reset more than 1024 times: (1) you have -// probably written a bad test; and (2) you'll need to read from the Alarms -// chan to keep the buffer clear. +// call After, AfterFunc, NewTimer or Timer.Reset more than 10000 times: (1) +// you have probably written a bad test; and (2) you'll need to read from the +// Alarms chan to keep the buffer clear. func NewClock(now time.Time) *Clock { return &Clock{ now: now, - notifyAlarms: make(chan struct{}, 1024), + notifyAlarms: make(chan struct{}, 10000), } } @@ -71,29 +65,36 @@ // After is part of the clock.Clock interface. func (clock *Clock) After(d time.Duration) <-chan time.Time { - defer clock.notifyAlarm() - clock.mu.Lock() - defer clock.mu.Unlock() - notify := make(chan time.Time, 1) - if d <= 0 { - notify <- clock.now - } else { - clock.setAlarm(clock.now.Add(d), func() { notify <- clock.now }) - } - return notify + return clock.NewTimer(d).Chan() +} + +func (clock *Clock) NewTimer(d time.Duration) clock.Timer { + c := make(chan time.Time, 1) + return clock.addAlarm(d, c, func() { + c <- clock.now + }) } // AfterFunc is part of the clock.Clock interface. func (clock *Clock) AfterFunc(d time.Duration, f func()) clock.Timer { + return clock.addAlarm(d, nil, func() { + go f() + }) +} + +func (clock *Clock) addAlarm(d time.Duration, c chan time.Time, trigger func()) *timer { defer clock.notifyAlarm() clock.mu.Lock() defer clock.mu.Unlock() - if d <= 0 { - f() - return &stoppedTimer{} - } - id := clock.setAlarm(clock.now.Add(d), f) - return &Timer{id, clock} + t := &timer{ + c: c, + deadline: clock.now.Add(d), + clock: clock, + trigger: trigger, + } + clock.addTimer(t) + clock.triggerAll() + return t } // Advance advances the result of Now by the supplied duration, and sends @@ -102,15 +103,43 @@ clock.mu.Lock() defer clock.mu.Unlock() clock.now = clock.now.Add(d) - triggered := 0 - for _, alarm := range clock.alarms { - if clock.now.Before(alarm.time) { - break + if len(clock.waiting) == 0 { + logger.Debugf("advancing a clock that has nothing waiting: cf. https://github.com/juju/juju/wiki/Intermittent-failures") + } + clock.triggerAll() +} + +// WaitAdvance functions the same as Advance, but only if there is n timers in +// clock.waiting. This came about while fixing lp:1607044 intermittent +// failures. It turns out that testing.Clock.Advance might advance the time +// and trigger notifications before triggers are set. So we wait a limited time +// 'w' for 'n' timers to show up in clock.waiting, and if they do we advance +// 'd'. +func (clock *Clock) WaitAdvance(d, w time.Duration, n int) error { + if w == 0 { + w = time.Second + } + pause := w / 10 + for i := 0; i < 10; i++ { + if clock.hasNWaiters(n) { + clock.Advance(d) + return nil } - alarm.trigger() - triggered++ + time.Sleep(pause) } - clock.alarms = clock.alarms[triggered:] + clock.mu.Lock() + got := len(clock.waiting) + clock.mu.Unlock() + return errors.Errorf( + "got %d timers added after waiting %s: wanted %d", got, w.String(), n) +} + +// hasNWaiters checks if the clock currently has 'n' timers waiting to fire. +func (clock *Clock) hasNWaiters(n int) bool { + clock.mu.Lock() + hasWaiters := len(clock.waiting) == n + clock.mu.Unlock() + return hasWaiters } // Alarms returns a channel on which you can read one value for every call to @@ -121,50 +150,60 @@ return clock.notifyAlarms } +// triggerAll triggers any alarms that are currently due and removes them +// from clock.waiting. +func (clock *Clock) triggerAll() { + triggered := 0 + for _, t := range clock.waiting { + if clock.now.Before(t.deadline) { + break + } + t.trigger() + triggered++ + } + clock.waiting = clock.waiting[triggered:] +} + // reset is the underlying implementation of clock.Timer.Reset, which may be // called by any Timer backed by this Clock. -func (clock *Clock) reset(id int, d time.Duration) bool { +func (clock *Clock) reset(t *timer, d time.Duration) bool { + defer clock.notifyAlarm() clock.mu.Lock() defer clock.mu.Unlock() - for i, alarm := range clock.alarms { - if id == alarm.ID { - defer clock.notifyAlarm() - clock.alarms[i].time = clock.now.Add(d) - sort.Sort(byTime(clock.alarms)) - return true + found := false + for _, wt := range clock.waiting { + if wt == t { + found = true } } - return false + if !found { + clock.waiting = append(clock.waiting, t) + } + t.deadline = clock.now.Add(d) + sort.Sort(byDeadline(clock.waiting)) + return found } // stop is the underlying implementation of clock.Timer.Reset, which may be // called by any Timer backed by this Clock. -func (clock *Clock) stop(id int) bool { +func (clock *Clock) stop(t *timer) bool { clock.mu.Lock() defer clock.mu.Unlock() - for i, alarm := range clock.alarms { - if id == alarm.ID { - clock.alarms = removeFromSlice(clock.alarms, i) + for i, wt := range clock.waiting { + if wt == t { + clock.waiting = removeFromSlice(clock.waiting, i) return true } } return false } -// setAlarm adds an alarm at time t. -// It also sorts the alarms and increments the current ID by 1. -func (clock *Clock) setAlarm(t time.Time, trigger func()) int { - alarm := alarm{ - time: t, - trigger: trigger, - ID: clock.currentAlarmID, - } - clock.alarms = append(clock.alarms, alarm) - sort.Sort(byTime(clock.alarms)) - clock.currentAlarmID = clock.currentAlarmID + 1 - return alarm.ID +// addTimer adds an alarm at time t. +func (clock *Clock) addTimer(t *timer) { + clock.waiting = append(clock.waiting, t) + sort.Sort(byDeadline(clock.waiting)) } // notifyAlarm sends a value on the channel exposed by Alarms(). @@ -176,22 +215,15 @@ } } -// alarm records the time at which we're expected to execute trigger. -type alarm struct { - ID int - time time.Time - trigger func() -} - -// byTime is used to sort alarms by time. -type byTime []alarm +// byDeadline is used to sort alarms by time. +type byDeadline []*timer -func (a byTime) Len() int { return len(a) } -func (a byTime) Less(i, j int) bool { return a[i].time.Before(a[j].time) } -func (a byTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byDeadline) Len() int { return len(a) } +func (a byDeadline) Less(i, j int) bool { return a[i].deadline.Before(a[j].deadline) } +func (a byDeadline) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // removeFromSlice removes item at the specified index from the slice. -func removeFromSlice(sl []alarm, index int) []alarm { +func removeFromSlice(sl []*timer, index int) []*timer { return append(sl[:index], sl[index+1:]...) } diff -Nru juju-core-2.0~beta15/src/github.com/juju/testing/clock_test.go juju-core-2.0.0/src/github.com/juju/testing/clock_test.go --- juju-core-2.0~beta15/src/github.com/juju/testing/clock_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/testing/clock_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -0,0 +1,229 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing_test + +import ( + "sync" + "time" + + gc "gopkg.in/check.v1" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" +) + +type clockSuite struct { + testing.LoggingSuite +} + +var _ = gc.Suite(&clockSuite{}) + +func (*clockSuite) TestNow(c *gc.C) { + t0 := time.Now() + cl := testing.NewClock(t0) + c.Assert(cl.Now(), gc.Equals, t0) +} + +var ( + shortWait = 50 * time.Millisecond + longWait = time.Second +) + +func (*clockSuite) TestAdvanceLogs(c *gc.C) { + t0 := time.Now() + cl := testing.NewClock(t0) + + // Shouldn't log anything. + t := cl.After(time.Second) + cl.Advance(time.Minute) + <-t + c.Check(c.GetTestLog(), jc.DeepEquals, "") + + // Should log since nothing's waiting. + cl.Advance(time.Hour) + c.Check(c.GetTestLog(), jc.Contains, "advancing a clock that has nothing waiting: cf. https://github.com/juju/juju/wiki/Intermittent-failures") +} + +func (*clockSuite) TestWaitAdvance(c *gc.C) { + t0 := time.Now() + cl := testing.NewClock(t0) + + // Test that no timers errors out. + err := cl.WaitAdvance(time.Millisecond, 10*time.Millisecond, 1) + c.Check(err, gc.ErrorMatches, "got 0 timers added after waiting 10ms: wanted 1") + + // Test that a timer doesn't error. + _ = cl.After(time.Nanosecond) + err = cl.WaitAdvance(time.Millisecond, 10*time.Millisecond, 1) + c.Check(err, jc.ErrorIsNil) +} + +func (*clockSuite) TestAdvanceWithAfter(c *gc.C) { + t0 := time.Now() + cl := testing.NewClock(t0) + ch := cl.After(time.Second) + select { + case <-ch: + c.Fatalf("received unexpected event") + case <-time.After(shortWait): + } + + cl.Advance(time.Second - 1) + + select { + case <-ch: + c.Fatalf("received unexpected event") + case <-time.After(shortWait): + } + + cl.Advance(1) + + select { + case <-ch: + case <-time.After(longWait): + c.Fatalf("expected event to be triggered") + } + + cl.Advance(time.Second) + select { + case <-ch: + c.Fatalf("received unexpected event") + case <-time.After(shortWait): + } + + // Test that we can do it again + ch = cl.After(time.Second) + cl.Advance(2 * time.Second) + select { + case <-ch: + case <-time.After(longWait): + c.Fatalf("expected event to be triggered") + } + c.Assert(cl.Now().UTC(), gc.Equals, t0.Add(4*time.Second).UTC()) +} + +func (*clockSuite) TestAdvanceWithAfterFunc(c *gc.C) { + // Most of the details have been checked in TestAdvanceWithAfter, + // so just check that AfterFunc is wired up correctly. + t0 := time.Now() + cl := testing.NewClock(t0) + fired := make(chan struct{}) + cl.AfterFunc(time.Second, func() { + close(fired) + }) + cl.Advance(2 * time.Second) + select { + case <-fired: + case <-time.After(longWait): + c.Fatalf("expected event to be triggered") + } +} + +func (*clockSuite) TestAfterFuncStop(c *gc.C) { + t0 := time.Now() + cl := testing.NewClock(t0) + fired := make(chan struct{}) + timer := cl.AfterFunc(time.Second, func() { + close(fired) + }) + cl.Advance(50 * time.Millisecond) + timer.Stop() + select { + case <-fired: + c.Fatalf("received unexpected event") + case <-time.After(shortWait): + } +} + +func (*clockSuite) TestNewTimerReset(c *gc.C) { + t0 := time.Now() + cl := testing.NewClock(t0) + timer := cl.NewTimer(time.Second) + cl.Advance(time.Second) + select { + case t := <-timer.Chan(): + c.Assert(t.UTC(), gc.Equals, t0.Add(time.Second).UTC()) + case <-time.After(longWait): + c.Fatalf("expected event to be triggered") + } + + timer.Reset(50 * time.Millisecond) + cl.Advance(100 * time.Millisecond) + select { + case t := <-timer.Chan(): + c.Assert(t.UTC(), gc.Equals, t0.Add(time.Second+100*time.Millisecond).UTC()) + case <-time.After(longWait): + c.Fatalf("expected event to be triggered") + } +} + +func (*clockSuite) TestMultipleWaiters(c *gc.C) { + var wg sync.WaitGroup + t0 := time.Date(2000, 01, 01, 01, 0, 0, 0, time.UTC) + cl := testing.NewClock(t0) + + total := 0 + start := func(f func()) { + total++ + wg.Add(1) + go func() { + defer wg.Done() + f() + }() + } + start(func() { + <-cl.After(50 * time.Millisecond) + }) + start(func() { + ch := make(chan struct{}) + cl.AfterFunc(100*time.Millisecond, func() { + close(ch) + }) + <-ch + }) + start(func() { + timer := cl.NewTimer(150 * time.Millisecond) + <-timer.Chan() + timer.Reset(50 * time.Millisecond) + <-timer.Chan() + }) + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + // Wait for all the alarms to be waited on. + for i := 0; i < total; i++ { + select { + case <-cl.Alarms(): + case <-time.After(longWait): + c.Fatalf("expected a notification on the alarms channel") + } + } + select { + case <-cl.Alarms(): + c.Fatalf("unexpected extra notification on alarms channel") + case <-time.After(shortWait): + } + + cl.Advance(150 * time.Millisecond) + + // Wait for the extra notification after reset. + select { + case <-cl.Alarms(): + case <-time.After(longWait): + c.Fatalf("expected a notification on the alarms channel") + } + + cl.Advance(50 * time.Millisecond) + + select { + case <-done: + case <-time.After(longWait): + c.Fatalf("expected all waits to complete") + } + +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/testing/dependencies.tsv juju-core-2.0.0/src/github.com/juju/testing/dependencies.tsv --- juju-core-2.0~beta15/src/github.com/juju/testing/dependencies.tsv 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/testing/dependencies.tsv 2016-10-13 14:32:08.000000000 +0000 @@ -0,0 +1,9 @@ +github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z +github.com/juju/loggo git 15901ae4de786d05edae84a27c93d3fbef66c91e 2016-08-04T22:15:26Z +github.com/juju/retry git 62c62032529169c7ec02fa48f93349604c345e1f 2015-10-29T02:48:21Z +github.com/juju/utils git d3ba4256601ad03bb65877692da522c2b3b08dc9 2016-09-26T12:49:57Z +github.com/juju/version git 4ae6172c00626779a5a462c3e3d22fc0e889431a 2016-06-03T19:49:58Z +golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z +gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z +gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z +gopkg.in/yaml.v2 git a83829b6f1293c91addabc89d0571c246397bbf4 2016-03-01T20:40:22Z diff -Nru juju-core-2.0~beta15/src/github.com/juju/testing/mgo.go juju-core-2.0.0/src/github.com/juju/testing/mgo.go --- juju-core-2.0~beta15/src/github.com/juju/testing/mgo.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/testing/mgo.go 2016-10-13 14:32:08.000000000 +0000 @@ -27,8 +27,10 @@ "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/retry" jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/clock" "github.com/juju/version" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2" @@ -277,9 +279,9 @@ } listening <- err } - // Capture the last 20 lines of output from mongod, to log + // Capture the last 100 lines of output from mongod, to log // in the event of unclean exit. - lines := readLastLines(prefix, io.MultiReader(&buf, out), 20) + lines := readLastLines(prefix, io.MultiReader(&buf, out), 100) err = server.Wait() exitErr, _ := err.(*exec.ExitError) if err == nil || exitErr != nil && exitErr.Exited() { @@ -420,7 +422,19 @@ gc.TestingT(t) } +type mgoLogger struct { + logger loggo.Logger +} + +// Output implements the mgo log_Logger interface. +func (s *mgoLogger) Output(calldepth int, message string) error { + s.logger.LogCallf(calldepth, loggo.TRACE, message) + return nil +} + func (s *MgoSuite) SetUpSuite(c *gc.C) { + mgo.SetLogger(&mgoLogger{loggo.GetLogger("mgo")}) + mgo.SetDebug(true) if MgoServer.addr == "" { c.Fatalf("No Mongo Server Address, MgoSuite tests must be run with MgoTestPackage") } @@ -481,11 +495,29 @@ err := MgoServer.Reset() c.Assert(err, jc.ErrorIsNil) utils.FastInsecureHash = false + mgo.SetDebug(false) + mgo.SetLogger(nil) } // Dial returns a new connection to the MongoDB server. func (inst *MgoInstance) Dial() (*mgo.Session, error) { - return mgo.DialWithInfo(inst.DialInfo()) + var session *mgo.Session + err := retry.Call(retry.CallArgs{ + Func: func() error { + var err error + session, err = mgo.DialWithInfo(inst.DialInfo()) + return err + }, + // Only interested in retrying the intermittent + // 'unexpected message'. + IsFatalError: func(err error) bool { + return !strings.HasSuffix(err.Error(), "unexpected message") + }, + Delay: time.Millisecond, + Clock: clock.WallClock, + Attempts: 5, + }) + return session, err } // DialInfo returns information suitable for dialling the @@ -740,6 +772,7 @@ // If the server has already been destroyed for testing purposes, // just start it again. if inst.Addr() == "" { + logger.Debugf("restarting mongo instance") err := inst.Start(inst.certs) return errors.Annotatef(err, "inst.Start(%v) failed", inst.certs) } @@ -754,7 +787,17 @@ err := MgoServer.EnsureRunning() c.Assert(err, jc.ErrorIsNil) - if err = s.Session.Ping(); err != nil { + // If the Session we have doesn't know about + // the address of the server, then we should reconnect. + foundAddress := false + for _, addr := range s.Session.LiveServers() { + if addr == MgoServer.Addr() { + foundAddress = true + break + } + } + + if !foundAddress { // The test has killed the server - reconnect. s.Session.Close() s.Session, err = MgoServer.Dial() @@ -781,6 +824,38 @@ } } +// ProxiedSession represents a mongo session that's +// proxied through a TCPProxy instance. +type ProxiedSession struct { + *mgo.Session + *TCPProxy +} + +// NewProxiedSession returns a ProxiedSession instance that holds a +// mgo.Session that directs through a TCPProxy instance to the testing +// mongoDB server, and the proxy instance itself. This allows tests to +// check what happens when mongo connections are broken. +// +// The returned value should be closed after use. +func NewProxiedSession(c *gc.C) *ProxiedSession { + mgoInfo := MgoServer.DialInfo() + c.Assert(mgoInfo.Addrs, gc.HasLen, 1) + proxy := NewTCPProxy(c, mgoInfo.Addrs[0]) + mgoInfo.Addrs = []string{proxy.Addr()} + session, err := mgo.DialWithInfo(mgoInfo) + c.Assert(err, gc.IsNil) + return &ProxiedSession{ + Session: session, + TCPProxy: proxy, + } +} + +// Close closes s.Session and s.TCPProxy. +func (s *ProxiedSession) Close() { + s.Session.Close() + s.TCPProxy.Close() +} + // FindTCPPort finds an unused TCP port and returns it. // Use of this function has an inherent race condition - another // process may claim the port before we try to use it. diff -Nru juju-core-2.0~beta15/src/github.com/juju/testing/mgo_test.go juju-core-2.0.0/src/github.com/juju/testing/mgo_test.go --- juju-core-2.0~beta15/src/github.com/juju/testing/mgo_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/testing/mgo_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -11,6 +11,7 @@ "gopkg.in/mgo.v2/bson" "github.com/juju/testing" + jc "github.com/juju/testing/checkers" ) type mgoSuite struct { @@ -84,3 +85,14 @@ c.Assert(err, gc.IsNil) session.Close() } + +func (s *mgoSuite) TestNewProxiedSession(c *gc.C) { + session := testing.NewProxiedSession(c) + defer session.Close() + err := session.Ping() + c.Assert(err, jc.ErrorIsNil) + session.CloseConns() + err = session.Ping() + // There's no consistent error in this case. + c.Assert(err, gc.Not(gc.Equals), nil) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/txn/prune.go juju-core-2.0.0/src/github.com/juju/txn/prune.go --- juju-core-2.0~beta15/src/github.com/juju/txn/prune.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/txn/prune.go 2016-10-13 14:31:52.000000000 +0000 @@ -44,7 +44,7 @@ if required { started := time.Now() - err := pruneTxns(txnsPrune.Database, txns) + err := PruneTxns(txnsPrune.Database, txns) if err != nil { return err } @@ -114,16 +114,21 @@ return txnsName + ".prune" } -// pruneTxns removes applied and aborted entries from the txns +// PruneTxns removes applied and aborted entries from the txns // collection that are no longer referenced by any document. // // Warning: this is a fairly heavyweight activity and therefore should // be done infrequently. // +// PruneTxns is the low-level pruning function that does the actual +// pruning work. It only exposed for external utilities to +// call. Typical usage should be via Runner.MaybePruneTransactions +// which wraps PruneTxns, only calling it when really necessary. +// // TODO(mjs) - this knows way too much about mgo/txn's internals and // with a bit of luck something like this will one day be part of // mgo/txn. -func pruneTxns(db *mgo.Database, txns *mgo.Collection) error { +func PruneTxns(db *mgo.Database, txns *mgo.Collection) error { present := struct{}{} // Load the ids of all completed txns and all collections @@ -140,6 +145,7 @@ Ops []txn.Op `bson:"o"` } + logger.Debugf("loading all completed transactions") completed := bson.M{ "s": bson.M{"$in": []int{taborted, tapplied}}, } @@ -153,6 +159,8 @@ if err := iter.Close(); err != nil { return fmt.Errorf("failed to read all txns: %v", err) } + logger.Debugf("found %d completed transactions across %d collections", + len(txnIds), len(collNames)) // Transactions may also be referenced in the stash. collNames["txns.stash"] = present @@ -166,6 +174,7 @@ // removal of transactions run while pruning executes. // for collName := range collNames { + logger.Tracef("checking %s for transaction references", collName) coll := db.C(collName) var tDoc struct { Queue []string `bson:"txn-queue"` @@ -182,10 +191,11 @@ } // Remove the unreferenced transactions. - err := bulkRemoveTxns(txns, txnIds) - if err != nil { + logger.Debugf("%d transactions to remove", len(txnIds)) + if err := bulkRemoveTxns(txns, txnIds); err != nil { return fmt.Errorf("txn removal failed: %v", err) } + logger.Debugf("completed transactions pruned") return nil } @@ -199,23 +209,26 @@ // be significantly more efficient than removing one document per // remove query while also not trigger query document size limits. func bulkRemoveTxns(txns *mgo.Collection, txnIds map[bson.ObjectId]struct{}) error { + removeCount := 0 removeTxns := func(ids []bson.ObjectId) error { _, err := txns.RemoveAll(bson.M{"_id": bson.M{"$in": ids}}) switch err { case nil, mgo.ErrNotFound: // It's OK for txns to no longer exist. Another process // may have concurrently pruned them. + removeCount += len(ids) + logger.Tracef("%d completed transactions removed", removeCount) return nil default: return err } } - const chunkMax = 1024 - chunk := make([]bson.ObjectId, 0, chunkMax) + const chunkSize = 1e5 + chunk := make([]bson.ObjectId, 0, chunkSize) for txnId := range txnIds { chunk = append(chunk, txnId) - if len(chunk) == chunkMax { + if len(chunk) == chunkSize { if err := removeTxns(chunk); err != nil { return err } diff -Nru juju-core-2.0~beta15/src/github.com/juju/txn/prune_test.go juju-core-2.0.0/src/github.com/juju/txn/prune_test.go --- juju-core-2.0~beta15/src/github.com/juju/txn/prune_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/txn/prune_test.go 2016-10-13 14:31:52.000000000 +0000 @@ -7,7 +7,7 @@ "time" "github.com/juju/loggo" - jujutesting "github.com/juju/testing" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2" @@ -18,7 +18,8 @@ ) type PruneSuite struct { - jujutesting.MgoSuite + testing.IsolationSuite + testing.MgoSuite db *mgo.Database txns *mgo.Collection runner *txn.Runner @@ -26,7 +27,19 @@ var _ = gc.Suite(&PruneSuite{}) +func (s *PruneSuite) SetUpSuite(c *gc.C) { + s.IsolationSuite.SetUpSuite(c) + s.MgoSuite.SetUpSuite(c) +} + +func (s *PruneSuite) TearDownSuite(c *gc.C) { + txn.SetChaos(txn.Chaos{}) + s.MgoSuite.TearDownSuite(c) + s.IsolationSuite.TearDownSuite(c) +} + func (s *PruneSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) s.MgoSuite.SetUpTest(c) txn.SetChaos(txn.Chaos{}) @@ -35,9 +48,9 @@ s.runner = txn.NewRunner(s.txns) } -func (s *PruneSuite) TearDownSuite(c *gc.C) { - txn.SetChaos(txn.Chaos{}) - s.MgoSuite.TearDownSuite(c) +func (s *PruneSuite) TearDownTest(c *gc.C) { + s.MgoSuite.TearDownTest(c) + s.IsolationSuite.TearDownTest(c) } func (s *PruneSuite) maybePrune(c *gc.C, pruneFactor float32) { @@ -385,7 +398,7 @@ c.Assert(err, jc.ErrorIsNil) var tw loggo.TestWriter - c.Assert(loggo.RegisterWriter("test", &tw, loggo.WARNING), gc.IsNil) + c.Assert(loggo.RegisterWriter("test", loggo.NewMinimumLevelWriter(&tw, loggo.WARNING)), gc.IsNil) defer loggo.RemoveWriter("test") // Pruning should occur when "last" pointer is broken. diff -Nru juju-core-2.0~beta15/src/github.com/juju/txn/txn.go juju-core-2.0.0/src/github.com/juju/txn/txn.go --- juju-core-2.0~beta15/src/github.com/juju/txn/txn.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/txn/txn.go 2016-10-13 14:31:52.000000000 +0000 @@ -14,9 +14,11 @@ import ( stderrors "errors" + "strings" "github.com/juju/loggo" "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" ) @@ -76,11 +78,18 @@ MaybePruneTransactions(pruneFactor float32) error } +type txnRunner interface { + Run([]txn.Op, bson.ObjectId, interface{}) error + ResumeAll() error +} + type transactionRunner struct { db *mgo.Database transactionCollectionName string changeLogName string testHooks chan ([]TestHook) + + newRunner func() txnRunner } var _ Runner = (*transactionRunner)(nil) @@ -119,10 +128,11 @@ } txnRunner.testHooks = make(chan ([]TestHook), 1) txnRunner.testHooks <- nil + txnRunner.newRunner = txnRunner.newRunnerImpl return txnRunner } -func (tr *transactionRunner) newRunner() *txn.Runner { +func (tr *transactionRunner) newRunnerImpl() txnRunner { db := tr.db runner := txn.NewRunner(db.C(tr.transactionCollectionName)) runner.ChangeLog(db.C(tr.changeLogName)) @@ -145,7 +155,13 @@ if err := tr.RunTransaction(ops); err == nil { return nil } else if err != txn.ErrAborted { - return err + // Mongo very occasionally returns an intermittent + // "unexpected message" error. Retry those. + // However if this is the last time, return that error + // rather than the excessive contention error. + if !strings.HasSuffix(err.Error(), "unexpected message") || i == (nrRetries-1) { + return err + } } } return ErrExcessiveContention diff -Nru juju-core-2.0~beta15/src/github.com/juju/txn/txn_test.go juju-core-2.0.0/src/github.com/juju/txn/txn_test.go --- juju-core-2.0~beta15/src/github.com/juju/txn/txn_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/txn/txn_test.go 2016-10-13 14:31:52.000000000 +0000 @@ -4,6 +4,8 @@ package txn_test import ( + "errors" + "github.com/juju/testing" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2" @@ -197,6 +199,7 @@ func (s *txnSuite) TestExcessiveContention(c *gc.C) { maxAttempt := 0 + // This keeps failing because the Assert is wrong. buildTxn := func(attempt int) ([]txn.Op, error) { maxAttempt = attempt ops := []txn.Op{{ @@ -247,3 +250,59 @@ err = s.collection.FindId("1").One(&found) c.Assert(found, gc.DeepEquals, doc) } + +func (s *txnSuite) TestRunFailureIntermittentUnexpectedMessage(c *gc.C) { + runner := jujutxn.NewRunner(jujutxn.RunnerParams{}) + fake := &fakeRunner{errors: []error{errors.New("unexpected message")}} + jujutxn.SetRunnerFunc(runner, fake.new) + tries := 0 + // Doesn't matter what this returns as long as it isn't an error. + buildTxn := func(attempt int) ([]txn.Op, error) { + tries++ + return nil, nil + } + err := runner.Run(buildTxn) + c.Check(err, gc.Equals, nil) + c.Check(tries, gc.Equals, 2) +} + +func (s *txnSuite) TestRunFailureAlwaysUnexpectedMessage(c *gc.C) { + runner := jujutxn.NewRunner(jujutxn.RunnerParams{}) + fake := &fakeRunner{errors: []error{ + errors.New("unexpected message"), + errors.New("unexpected message"), + errors.New("unexpected message"), + errors.New("unexpected message"), + }} + jujutxn.SetRunnerFunc(runner, fake.new) + tries := 0 + // Doesn't matter what this returns as long as it isn't an error. + buildTxn := func(attempt int) ([]txn.Op, error) { + tries++ + return nil, nil + } + err := runner.Run(buildTxn) + c.Check(err, gc.ErrorMatches, "unexpected message") + c.Check(tries, gc.Equals, 3) +} + +type fakeRunner struct { + jujutxn.TxnRunner + errors []error +} + +// Since a new transaction runner is created each time the code +// is retried, we want to have a single source of errors, so make the +// fake a factory that returns itself. +func (f *fakeRunner) new() jujutxn.TxnRunner { + return f +} + +func (f *fakeRunner) Run([]txn.Op, bson.ObjectId, interface{}) error { + if len(f.errors) == 0 { + return nil + } + err := f.errors[0] + f.errors = f.errors[1:] + return err +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/arch/arch.go juju-core-2.0.0/src/github.com/juju/utils/arch/arch.go --- juju-core-2.0~beta15/src/github.com/juju/utils/arch/arch.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/arch/arch.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014-2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package arch diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/arch/arch_test.go juju-core-2.0.0/src/github.com/juju/utils/arch/arch_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/arch/arch_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/arch/arch_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package arch_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/arch/package_test.go juju-core-2.0.0/src/github.com/juju/utils/arch/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/arch/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/arch/package_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package arch_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/bzr/bzr.go juju-core-2.0.0/src/github.com/juju/utils/bzr/bzr.go --- juju-core-2.0~beta15/src/github.com/juju/utils/bzr/bzr.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/bzr/bzr.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. // Package bzr offers an interface to manage branches of the Bazaar VCS. package bzr diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/bzr/bzr_test.go juju-core-2.0.0/src/github.com/juju/utils/bzr/bzr_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/bzr/bzr_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/bzr/bzr_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,6 +1,6 @@ // Copyright 2014 Canonical Ltd. // Copyright 2014 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package bzr_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/bzr/bzr_unix_test.go juju-core-2.0.0/src/github.com/juju/utils/bzr/bzr_unix_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/bzr/bzr_unix_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/bzr/bzr_unix_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,6 +1,6 @@ // Copyright 2014 Canonical Ltd. // Copyright 2014 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. // +build !windows diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/bzr/bzr_windows_test.go juju-core-2.0.0/src/github.com/juju/utils/bzr/bzr_windows_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/bzr/bzr_windows_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/bzr/bzr_windows_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,6 +1,6 @@ // Copyright 2014 Canonical Ltd. // Copyright 2014 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. // +build windows diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/cache/cache.go juju-core-2.0.0/src/github.com/juju/utils/cache/cache.go --- juju-core-2.0~beta15/src/github.com/juju/utils/cache/cache.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/cache/cache.go 2016-10-13 14:32:08.000000000 +0000 @@ -40,6 +40,17 @@ // Instead, we move items from old to new when they're accessed // and throw away the old map at refresh time. old, new map[Key]entry + + inFlight map[Key]*fetchCall +} + +// fetch represents an in-progress fetch call. If a cache Get request +// is made for an item that is currently being fetched, this will +// be used to avoid an extra call to the fetch function. +type fetchCall struct { + wg sync.WaitGroup + val interface{} + err error } // New returns a new Cache that will cache items for @@ -50,7 +61,8 @@ // time, so will expire immediately, causing the new // map to be created. return &Cache{ - maxAge: maxAge, + maxAge: maxAge, + inFlight: make(map[Key]*fetchCall), } } @@ -91,15 +103,37 @@ if val, ok := c.cachedValue(key, now); ok { return val, nil } + c.mu.Lock() + if f, ok := c.inFlight[key]; ok { + // There's already an in-flight request for the key, so wait + // for that to complete and use its results. + c.mu.Unlock() + f.wg.Wait() + // The value will have been added to the cache by the first fetch, + // so no need to add it here. + if f.err == nil { + return f.val, nil + } + return nil, errgo.Mask(f.err, errgo.Any) + } + var f fetchCall + f.wg.Add(1) + c.inFlight[key] = &f + // Mark the request as done when we return, and after + // the value has been added to the cache. + defer f.wg.Done() + // Fetch the data without the mutex held // so that one slow fetch doesn't hold up // all the other cache accesses. + c.mu.Unlock() val, err := fetch() - if err != nil { - // TODO consider caching cache misses. - return nil, errgo.Mask(err, errgo.Any) - } - if c.maxAge < 2*time.Nanosecond { + c.mu.Lock() + defer c.mu.Unlock() + + // Set the result in the fetchCall so that other calls can see it. + f.val, f.err = val, err + if err == nil && c.maxAge >= 2*time.Nanosecond { // If maxAge is < 2ns then the expiry code will panic because the // actual expiry time will be maxAge - a random value in the // interval [0, maxAge/2). If maxAge is < 2ns then this requires @@ -108,22 +142,16 @@ // This value is so small that there's no need to cache anyway, // which makes tests more obviously deterministic when using // a zero expiry time. - return val, nil + c.new[key] = entry{ + value: val, + expire: now.Add(c.maxAge - time.Duration(rand.Int63n(int64(c.maxAge/2)))), + } } - c.mu.Lock() - defer c.mu.Unlock() - // Add the new cache entry. Because it's quite likely that a - // large number of cache entries will be initially fetched at - // the same time, we want to avoid a thundering herd of fetches - // when they all expire at the same time, so we set the expiry - // time to a random interval between [now + t.maxAge/2, now + - // t.maxAge] and so they'll be spread over time without - // compromising the maxAge value. - c.new[key] = entry{ - value: val, - expire: now.Add(c.maxAge - time.Duration(rand.Int63n(int64(c.maxAge/2)))), + delete(c.inFlight, key) + if err == nil { + return f.val, nil } - return val, nil + return nil, errgo.Mask(f.err, errgo.Any) } // cachedValue returns any cached value for the given key diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/cache/cache_test.go juju-core-2.0.0/src/github.com/juju/utils/cache/cache_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/cache/cache_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/cache/cache_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -241,6 +241,52 @@ c.Assert(total, gc.Equals, N) } +func (*suite) TestSingleFlight(c *gc.C) { + p := cache.New(time.Minute) + start := make(chan struct{}) + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + x, err := p.Get("x", func() (interface{}, error) { + start <- struct{}{} + <-start + return 99, nil + }) + c.Check(x, gc.Equals, 99) + c.Check(err, gc.Equals, nil) + + }() + // Wait for the fetch to start. + <-start + wg.Add(1) + go func() { + defer wg.Done() + x, err := p.Get("x", func() (interface{}, error) { + c.Errorf("fetch function unexpectedly called with inflight request") + return 55, nil + }) + c.Check(x, gc.Equals, 99) + c.Check(err, gc.Equals, nil) + }() + + // Check that we can still get other values while the + // other fetches are in progress. + y, err := p.Get("y", func() (interface{}, error) { + return 88, nil + }) + c.Check(y, gc.Equals, 88) + c.Check(err, gc.Equals, nil) + + // Let the original fetch proceed, which should let the other one + // succeed too, but sleep for a little bit to let the second goroutine + // actually initiate its request. + time.Sleep(time.Millisecond) + start <- struct{}{} + wg.Wait() +} + var errUnexpectedFetch = errgo.New("fetch called unexpectedly") func fetchError(err error) func() (interface{}, error) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/clock/clock.go juju-core-2.0.0/src/github.com/juju/utils/clock/clock.go --- juju-core-2.0~beta15/src/github.com/juju/utils/clock/clock.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/clock/clock.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package clock @@ -7,7 +7,6 @@ // Clock provides an interface for dealing with clocks. type Clock interface { - // Now returns the current clock time. Now() time.Time @@ -17,7 +16,11 @@ // AfterFunc waits for the duration to elapse and then calls f in its own goroutine. // It returns a Timer that can be used to cancel the call using its Stop method. - AfterFunc(time.Duration, func()) Timer + AfterFunc(d time.Duration, f func()) Timer + + // NewTimer creates a new Timer that will send the current time + // on its channel after at least duration d. + NewTimer(d time.Duration) Timer } // Alarm returns a channel that will have the time sent on it at some point @@ -32,6 +35,10 @@ // A Timer must be created with AfterFunc. // This interface follows time.Timer's methods but provides easier mocking. type Timer interface { + // When the Timer expires, the current time will be sent on the + // channel returned from Chan, unless the Timer was created by + // AfterFunc. + Chan() <-chan time.Time // Reset changes the timer to expire after duration d. // It returns true if the timer had been active, false if diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/clock/wall.go juju-core-2.0.0/src/github.com/juju/utils/clock/wall.go --- juju-core-2.0~beta15/src/github.com/juju/utils/clock/wall.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/clock/wall.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package clock @@ -10,6 +10,9 @@ // WallClock exposes wall-clock time via the Clock interface. var WallClock wallClock +// ensure that WallClock does actually implement the Clock interface. +var _ Clock = WallClock + // WallClock exposes wall-clock time as returned by time.Now. type wallClock struct{} @@ -18,12 +21,27 @@ return time.Now() } -// Alarm returns a channel that will send a value at some point after -// the supplied time. +// After implements Clock.After. func (wallClock) After(d time.Duration) <-chan time.Time { return time.After(d) } +// AfterFunc implements Clock.AfterFunc. func (wallClock) AfterFunc(d time.Duration, f func()) Timer { - return time.AfterFunc(d, f) + return wallTimer{time.AfterFunc(d, f)} +} + +// NewTimer implements Clock.NewTimer. +func (wallClock) NewTimer(d time.Duration) Timer { + return wallTimer{time.NewTimer(d)} +} + +// wallTimer implements the Timer interface. +type wallTimer struct { + *time.Timer +} + +// Chan implements Timer.Chan. +func (t wallTimer) Chan() <-chan time.Time { + return t.C } diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/dependencies.tsv juju-core-2.0.0/src/github.com/juju/utils/dependencies.tsv --- juju-core-2.0~beta15/src/github.com/juju/utils/dependencies.tsv 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/dependencies.tsv 2016-10-13 14:32:08.000000000 +0000 @@ -1,9 +1,12 @@ -github.com/juju/cmd git ca63dd8ba13f8fbbbe16a917696a7ce68cc3dc0b 2016-03-31T03:26:51Z +github.com/juju/cmd git 7c57a7d5a20602e4563a83f2d530283ca0e6f481 2016-08-10T12:53:08Z github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z +github.com/juju/gnuflag git 4e76c56581859c14d9d87e1ddbe29e1c0f10195f 2016-08-09T16:52:14Z github.com/juju/httpprof git 14bf14c307672fd2456bdbf35d19cf0ccd3cf565 2014-12-17T16:00:36Z github.com/juju/httprequest git 89d547093c45e293599088cc63e805c6f1205dc0 2016-03-02T10:09:58Z -github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z -github.com/juju/testing git 162fafccebf20a4207ab93d63b986c230e3f4d2e 2016-04-04T09:43:17Z +github.com/juju/loggo git 15901ae4de786d05edae84a27c93d3fbef66c91e 2016-08-04T22:15:26Z +github.com/juju/retry git 62c62032529169c7ec02fa48f93349604c345e1f 2015-10-29T02:48:21Z +github.com/juju/testing git 7177264a582e2c00d08277eaa91d88f8eb0fd869 2016-09-26T12:59:16Z +github.com/juju/version git 4ae6172c00626779a5a462c3e3d22fc0e889431a 2016-06-03T19:49:58Z github.com/julienschmidt/httprouter git 77a895ad01ebc98a4dc95d8355bc825ce80a56f6 2015-10-13T22:55:20Z golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z @@ -11,6 +14,5 @@ gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z gopkg.in/juju/names.v2 git e38bc90539f22af61a9c656d35068bd5f0a5b30a 2016-05-25T23:07:23Z gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z +gopkg.in/tomb.v1 git dd632973f1e7218eb1089048e0798ec9ae7dceb8 2014-10-24T13:56:13Z gopkg.in/yaml.v2 git a83829b6f1293c91addabc89d0571c246397bbf4 2016-03-01T20:40:22Z -launchpad.net/gnuflag bzr roger.peppe@canonical.com-20140716064605-pk32dnmfust02yab 13 -launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/export_test.go juju-core-2.0.0/src/github.com/juju/utils/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/export_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -10,11 +10,11 @@ var ( GOMAXPROCS = &gomaxprocs NumCPU = &numCPU - Dial = dial - NetDial = &netDial ResolveSudoByFunc = resolveSudo ) func ExposeBackoffTimerDuration(bot *BackoffTimer) time.Duration { return bot.currentDuration } + +var IsLocalAddr = isLocalAddr diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/http-1_4.go juju-core-2.0.0/src/github.com/juju/utils/http-1_4.go --- juju-core-2.0~beta15/src/github.com/juju/utils/http-1_4.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/http-1_4.go 2016-10-13 14:32:08.000000000 +0000 @@ -0,0 +1,24 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +//+build !go1.7 + +package utils + +import ( + "fmt" + "net" + "net/http" +) + +// installHTTPDialShim patches the default HTTP transport so +// that it fails when an attempt is made to dial a non-local +// host. +func installHTTPDialShim(t *http.Transport) { + t.Dial = func(network, addr string) (net.Conn, error) { + if !OutgoingAccessAllowed && !isLocalAddr(addr) { + return nil, fmt.Errorf("access to address %q not allowed", addr) + } + return net.Dial(network, addr) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/http-1_7.go juju-core-2.0.0/src/github.com/juju/utils/http-1_7.go --- juju-core-2.0~beta15/src/github.com/juju/utils/http-1_7.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/http-1_7.go 2016-10-13 14:32:08.000000000 +0000 @@ -0,0 +1,35 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +//+build go1.7 + +package utils + +import ( + "context" + "fmt" + "net" + "net/http" + "time" +) + +var ctxtDialer = &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, +} + +// installHTTPDialShim patches the default HTTP transport so +// that it fails when an attempt is made to dial a non-local +// host. +// +// Note that this is Go version dependent because in Go 1.7 and above, +// the DialContext field was introduced (and set in http.DefaultTransport) +// which overrides the Dial field. +func installHTTPDialShim(t *http.Transport) { + t.DialContext = func(ctxt context.Context, network, addr string) (net.Conn, error) { + if !OutgoingAccessAllowed && !isLocalAddr(addr) { + return nil, fmt.Errorf("access to address %q not allowed", addr) + } + return ctxtDialer.DialContext(ctxt, network, addr) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/http.go juju-core-2.0.0/src/github.com/juju/utils/http.go --- juju-core-2.0~beta15/src/github.com/juju/utils/http.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/http.go 2016-10-13 14:32:08.000000000 +0000 @@ -17,12 +17,8 @@ var insecureClientMutex = sync.Mutex{} func init() { - // See https://code.google.com/p/go/issues/detail?id=4677 - // We need to force the connection to close each time so that we don't - // hit the above Go bug. defaultTransport := http.DefaultTransport.(*http.Transport) - defaultTransport.DisableKeepAlives = true - defaultTransport.Dial = dial + installHTTPDialShim(defaultTransport) registerFileProtocol(defaultTransport) } @@ -112,21 +108,10 @@ // localhost can be dialled. var OutgoingAccessAllowed = true -// Override for tests. -var netDial = net.Dial - -func dial(network, addr string) (net.Conn, error) { - if !OutgoingAccessAllowed { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return netDial(network, addr) - } - if host != "localhost" { - ip := net.ParseIP(host) - if ip == nil || !ip.IsLoopback() { - return nil, fmt.Errorf("access to address %q not allowed", addr) - } - } +func isLocalAddr(addr string) bool { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return false } - return netDial(network, addr) + return host == "localhost" || net.ParseIP(host).IsLoopback() } diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/http_test.go juju-core-2.0.0/src/github.com/juju/utils/http_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/http_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/http_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -5,14 +5,12 @@ import ( "encoding/base64" - "net" "net/http" "net/http/httptest" "os" "strings" "github.com/juju/testing" - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/utils" @@ -144,52 +142,63 @@ } } -type dialSuite struct { +type httpDialSuite struct { testing.IsolationSuite } -var _ = gc.Suite(&dialSuite{}) +var _ = gc.Suite(&httpDialSuite{}) -func (s *dialSuite) TestDialRejectsNonLocal(c *gc.C) { +func (s *httpDialSuite) TestDefaultClientNoAccess(c *gc.C) { s.PatchValue(&utils.OutgoingAccessAllowed, false) - _, err := utils.Dial("tcp", "10.0.0.1:80") - c.Assert(err, gc.ErrorMatches, `access to address "10.0.0.1:80" not allowed`) - _, err = utils.Dial("tcp", "somehost:80") - c.Assert(err, gc.ErrorMatches, `access to address "somehost:80" not allowed`) + _, err := http.Get("http://0.1.2.3:1234") + c.Assert(err, gc.ErrorMatches, `.*access to address "0.1.2.3:1234" not allowed`) } -func (s *dialSuite) assertDial(c *gc.C, addr string) { - dialed := false - s.PatchValue(utils.NetDial, func(network, addr string) (net.Conn, error) { - c.Assert(network, gc.Equals, "tcp") - c.Assert(addr, gc.Equals, addr) - dialed = true - return nil, nil - }) - _, err := utils.Dial("tcp", addr) - c.Assert(err, gc.IsNil) - c.Assert(dialed, jc.IsTrue) +func (s *httpDialSuite) TestInsecureClientNoAccess(c *gc.C) { + s.PatchValue(&utils.OutgoingAccessAllowed, false) + _, err := utils.GetNonValidatingHTTPClient().Get("http://0.1.2.3:1234") + c.Assert(err, gc.ErrorMatches, `.*access to address "0.1.2.3:1234" not allowed`) } -func (s *dialSuite) TestDialAllowsNonLocal(c *gc.C) { - s.PatchValue(&utils.OutgoingAccessAllowed, true) - s.assertDial(c, "10.0.0.1:80") +func (s *httpDialSuite) TestSecureClientNoAccess(c *gc.C) { + s.PatchValue(&utils.OutgoingAccessAllowed, false) + _, err := utils.GetValidatingHTTPClient().Get("http://0.1.2.3:1234") + c.Assert(err, gc.ErrorMatches, `.*access to address "0.1.2.3:1234" not allowed`) } -func (s *dialSuite) TestDialAllowsLocal(c *gc.C) { - s.PatchValue(&utils.OutgoingAccessAllowed, false) - s.assertDial(c, "127.0.0.1:1234") - s.assertDial(c, "localhost:1234") +func (s *httpDialSuite) TestDefaultClientAllowAccess(c *gc.C) { + _, err := http.Get("http://0.1.2.3:1234") + c.Assert(err, gc.ErrorMatches, `Get http://0.1.2.3:1234: dial tcp 0.1.2.3:1234: connect: .*`) } -func (s *dialSuite) TestInsecureClientNoAccess(c *gc.C) { - s.PatchValue(&utils.OutgoingAccessAllowed, false) - _, err := utils.GetNonValidatingHTTPClient().Get("http://10.0.0.1:1234") - c.Assert(err, gc.ErrorMatches, `.*access to address "10.0.0.1:1234" not allowed`) +func (s *httpDialSuite) TestInsecureClientAllowAccess(c *gc.C) { + _, err := utils.GetNonValidatingHTTPClient().Get("http://0.1.2.3:1234") + c.Assert(err, gc.ErrorMatches, `Get http://0.1.2.3:1234: dial tcp 0.1.2.3:1234: connect: .*`) } -func (s *dialSuite) TestSecureClientNoAccess(c *gc.C) { - s.PatchValue(&utils.OutgoingAccessAllowed, false) - _, err := utils.GetValidatingHTTPClient().Get("http://10.0.0.1:1234") - c.Assert(err, gc.ErrorMatches, `.*access to address "10.0.0.1:1234" not allowed`) +func (s *httpDialSuite) TestSecureClientAllowAccess(c *gc.C) { + _, err := utils.GetValidatingHTTPClient().Get("http://0.1.2.3:1234") + c.Assert(err, gc.ErrorMatches, `Get http://0.1.2.3:1234: dial tcp 0.1.2.3:1234: connect: .*`) +} + +var isLocalAddrTests = []struct { + addr string + isLocal bool +}{ + {"localhost:456", true}, + {"127.0.0.1:1234", true}, + {"[::1]:4567", true}, + {"localhost:smtp", true}, + {"123.45.67.5", false}, + {"0.1.2.3", false}, + {"10.0.43.6:12345", false}, + {":456", false}, + {"12xz4.5.6", false}, +} + +func (s *httpDialSuite) TestIsLocalAddr(c *gc.C) { + for i, test := range isLocalAddrTests { + c.Logf("test %d: %v", i, test.addr) + c.Assert(utils.IsLocalAddr(test.addr), gc.Equals, test.isLocal) + } } diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/multireader.go juju-core-2.0.0/src/github.com/juju/utils/multireader.go --- juju-core-2.0~beta15/src/github.com/juju/utils/multireader.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/multireader.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,3 +1,6 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + package utils import ( diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/multireader_test.go juju-core-2.0.0/src/github.com/juju/utils/multireader_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/multireader_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/multireader_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,3 +1,6 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + package utils_test import ( diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/naturalsort.go juju-core-2.0.0/src/github.com/juju/utils/naturalsort.go --- juju-core-2.0~beta15/src/github.com/juju/utils/naturalsort.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/naturalsort.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package utils diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/naturalsort_test.go juju-core-2.0.0/src/github.com/juju/utils/naturalsort_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/naturalsort_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/naturalsort_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2016 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package utils_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/os/os_darwin.go juju-core-2.0.0/src/github.com/juju/utils/os/os_darwin.go --- juju-core-2.0~beta15/src/github.com/juju/utils/os/os_darwin.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/os/os_darwin.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package os diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/os/os.go juju-core-2.0.0/src/github.com/juju/utils/os/os.go --- juju-core-2.0~beta15/src/github.com/juju/utils/os/os.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/os/os.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. // Package os provides access to operating system related configuration. package os @@ -14,7 +14,7 @@ Windows OSX CentOS - Arch + GenericLinux ) func (t OSType) String() string { @@ -27,8 +27,26 @@ return "OSX" case CentOS: return "CentOS" - case Arch: - return "Arch" + case GenericLinux: + return "GenericLinux" } return "Unknown" } + +// EquivalentTo returns true if the OS type is equivalent to another +// OS type. +func (t OSType) EquivalentTo(t2 OSType) bool { + if t == t2 { + return true + } + return t.IsLinux() && t2.IsLinux() +} + +// IsLinux returns true if the OS type is a Linux variant. +func (t OSType) IsLinux() bool { + switch t { + case Ubuntu, CentOS, GenericLinux: + return true + } + return false +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/os/os_linux.go juju-core-2.0.0/src/github.com/juju/utils/os/os_linux.go --- juju-core-2.0~beta15/src/github.com/juju/utils/os/os_linux.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/os/os_linux.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package os @@ -29,10 +29,6 @@ return os } -var defaultVersionIDs = map[string]string{ - "arch": "rolling", -} - func updateOS(f string) (OSType, error) { values, err := ReadOSRelease(f) if err != nil { @@ -41,12 +37,10 @@ switch values["ID"] { case strings.ToLower(Ubuntu.String()): return Ubuntu, nil - case strings.ToLower(Arch.String()): - return Arch, nil case strings.ToLower(CentOS.String()): return CentOS, nil default: - return Unknown, nil + return GenericLinux, nil } } @@ -67,15 +61,8 @@ } values[c[0]] = strings.Trim(c[1], "\t '\"") } - id, ok := values["ID"] - if !ok { + if _, ok := values["ID"]; !ok { return nil, errors.New("OS release file is missing ID") } - if _, ok := values["VERSION_ID"]; !ok { - values["VERSION_ID"], ok = defaultVersionIDs[id] - if !ok { - return nil, errors.New("OS release file is missing VERSION_ID") - } - } return values, nil } diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/os/os_test.go juju-core-2.0.0/src/github.com/juju/utils/os/os_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/os/os_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/os/os_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,11 +1,12 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package os import ( "runtime" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" ) @@ -22,10 +23,35 @@ case "darwin": c.Assert(os, gc.Equals, OSX) case "linux": - if os != Ubuntu && os != CentOS && os != Arch { + // TODO(mjs) - this should really do more by patching out + // osReleaseFile and testing the corner cases. + switch os { + case Ubuntu, CentOS, GenericLinux: + default: c.Fatalf("unknown linux version: %v", os) } default: c.Fatalf("unsupported operating system: %v", runtime.GOOS) } } + +func (s *osSuite) TestEquivalentTo(c *gc.C) { + c.Check(Ubuntu.EquivalentTo(CentOS), jc.IsTrue) + c.Check(Ubuntu.EquivalentTo(GenericLinux), jc.IsTrue) + c.Check(GenericLinux.EquivalentTo(Ubuntu), jc.IsTrue) + c.Check(CentOS.EquivalentTo(CentOS), jc.IsTrue) + + c.Check(OSX.EquivalentTo(Ubuntu), jc.IsFalse) + c.Check(OSX.EquivalentTo(Windows), jc.IsFalse) + c.Check(GenericLinux.EquivalentTo(OSX), jc.IsFalse) +} + +func (s *osSuite) TestIsLinux(c *gc.C) { + c.Check(Ubuntu.IsLinux(), jc.IsTrue) + c.Check(CentOS.IsLinux(), jc.IsTrue) + c.Check(GenericLinux.IsLinux(), jc.IsTrue) + + c.Check(OSX.IsLinux(), jc.IsFalse) + c.Check(Windows.IsLinux(), jc.IsFalse) + c.Check(Unknown.IsLinux(), jc.IsFalse) +} diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/os/os_unknown.go juju-core-2.0.0/src/github.com/juju/utils/os/os_unknown.go --- juju-core-2.0~beta15/src/github.com/juju/utils/os/os_unknown.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/os/os_unknown.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. // +build !windows,!darwin,!linux diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/os/os_windows.go juju-core-2.0.0/src/github.com/juju/utils/os/os_windows.go --- juju-core-2.0~beta15/src/github.com/juju/utils/os/os_windows.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/os/os_windows.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package os diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/os/package_test.go juju-core-2.0.0/src/github.com/juju/utils/os/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/os/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/os/package_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package os diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/parallel/try.go juju-core-2.0.0/src/github.com/juju/utils/parallel/try.go --- juju-core-2.0~beta15/src/github.com/juju/utils/parallel/try.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/parallel/try.go 2016-10-13 14:32:08.000000000 +0000 @@ -8,7 +8,7 @@ "io" "sync" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" ) var ( diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/randomstring.go juju-core-2.0.0/src/github.com/juju/utils/randomstring.go --- juju-core-2.0~beta15/src/github.com/juju/utils/randomstring.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/randomstring.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package utils diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/randomstring_test.go juju-core-2.0.0/src/github.com/juju/utils/randomstring_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/randomstring_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/randomstring_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,6 +1,6 @@ // Copyright 2015 Canonical Ltd. // Copyright 2015 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package utils_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/export_linux_test.go juju-core-2.0.0/src/github.com/juju/utils/series/export_linux_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/export_linux_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/export_linux_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/export_test.go juju-core-2.0.0/src/github.com/juju/utils/series/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/export_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/export_windows_test.go juju-core-2.0.0/src/github.com/juju/utils/series/export_windows_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/export_windows_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/export_windows_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,6 +1,6 @@ // Copyright 2015 Canonical Ltd. // Copyright 2015 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/package_test.go juju-core-2.0.0/src/github.com/juju/utils/series/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/package_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/series_darwin.go juju-core-2.0.0/src/github.com/juju/utils/series/series_darwin.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/series_darwin.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/series_darwin.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/series_darwin_test.go juju-core-2.0.0/src/github.com/juju/utils/series/series_darwin_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/series_darwin_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/series_darwin_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/series.go juju-core-2.0.0/src/github.com/juju/utils/series/series.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/series.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/series.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. // series provides helpers for determining the series of // a host, and translating from os to series. @@ -15,11 +15,15 @@ "github.com/juju/utils/os" ) -var logger = loggo.GetLogger("juju.juju.series") - -var HostSeries = hostSeries +const ( + genericLinuxSeries = "genericlinux" + genericLinuxVersion = "genericlinux" +) var ( + logger = loggo.GetLogger("juju.juju.series") + HostSeries = hostSeries + seriesOnce sync.Once series string // filled in by the first call to hostSeries ) diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/series_linux.go juju-core-2.0.0/src/github.com/juju/utils/series/series_linux.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/series_linux.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/series_linux.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series @@ -33,13 +33,11 @@ switch values["ID"] { case strings.ToLower(jujuos.Ubuntu.String()): return getValue(ubuntuSeries, values["VERSION_ID"]) - case strings.ToLower(jujuos.Arch.String()): - return getValue(archSeries, values["VERSION_ID"]) case strings.ToLower(jujuos.CentOS.String()): codename := fmt.Sprintf("%s%s", values["ID"], values["VERSION_ID"]) return getValue(centosSeries, codename) default: - return "unknown", nil + return genericLinuxSeries, nil } } @@ -49,7 +47,7 @@ return serie, nil } } - return "unknown", errors.New("Could not determine series") + return "unknown", errors.New("could not determine series") } // ReleaseVersion looks for the value of VERSION_ID in the content of diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/series_linux_test.go juju-core-2.0.0/src/github.com/juju/utils/series/series_linux_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/series_linux_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/series_linux_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series_test @@ -168,17 +168,6 @@ "centos7", "", }, { - `NAME="Arch Linux" -ID=arch -PRETTY_NAME="Arch Linux" -ANSI_COLOR="0;36" -HOME_URL="https://www.archlinux.org/" -SUPPORT_URL="https://bbs.archlinux.org/" -BUG_REPORT_URL="https://bugs.archlinux.org/" -`, - "arch", - "", -}, { `NAME="Ubuntu" VERSION="14.04.1 LTS, Trusty Tahr" ID=ubuntu @@ -192,22 +181,46 @@ "trusty", "", }, { + `NAME="Arch Linux" +ID=arch +PRETTY_NAME="Arch Linux" +ANSI_COLOR="0;36" +HOME_URL="https://www.archlinux.org/" +SUPPORT_URL="https://bbs.archlinux.org/" +BUG_REPORT_URL="https://bugs.archlinux.org/" +`, + "genericlinux", "", - "unknown", - "OS release file is missing ID", }, { - `NAME="CentOS Linux" -ID="centos" + `NAME=Fedora +VERSION="24 (Twenty Four)" +ID=fedora +VERSION_ID=24 +PRETTY_NAME="Fedora 24 (Twenty Four)" +CPE_NAME="cpe:/o:fedoraproject:fedora:24" +HOME_URL="https://fedoraproject.org/" +BUG_REPORT_URL="https://bugzilla.redhat.com/" `, - "unknown", - "OS release file is missing VERSION_ID", + "genericlinux", + "", }, { `NAME="SuSE Linux" ID="SuSE" VERSION_ID="12" `, - "unknown", + "genericlinux", "", +}, { + + "", + "unknown", + "OS release file is missing ID", +}, { + `NAME="CentOS Linux" +ID="centos" +`, + "unknown", + "could not determine series", }, } diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/series_nonlinux.go juju-core-2.0.0/src/github.com/juju/utils/series/series_nonlinux.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/series_nonlinux.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/series_nonlinux.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. // +build !linux diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/series_test.go juju-core-2.0.0/src/github.com/juju/utils/series/series_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/series_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/series_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/series_windows.go juju-core-2.0.0/src/github.com/juju/utils/series/series_windows.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/series_windows.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/series_windows.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,6 +1,6 @@ // Copyright 2015 Canonical Ltd. // Copyright 2015 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/series_windows_test.go juju-core-2.0.0/src/github.com/juju/utils/series/series_windows_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/series_windows_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/series_windows_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,6 +1,6 @@ // Copyright 2015 Canonical Ltd. // Copyright 2015 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/supportedseries.go juju-core-2.0.0/src/github.com/juju/utils/series/supportedseries.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/supportedseries.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/supportedseries.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series @@ -47,38 +47,34 @@ return ok } -var defaultVersionIDs = map[string]string{ - "arch": "rolling", -} - // seriesVersions provides a mapping between series names and versions. // The values here are current as of the time of writing. On Ubuntu systems, we update // these values from /usr/share/distro-info/ubuntu.csv to ensure we have the latest values. // On non-Ubuntu systems, these values provide a nice fallback option. // Exported so tests can change the values to ensure the distro-info lookup works. var seriesVersions = map[string]string{ - "precise": "12.04", - "quantal": "12.10", - "raring": "13.04", - "saucy": "13.10", - "trusty": "14.04", - "utopic": "14.10", - "vivid": "15.04", - "wily": "15.10", - "xenial": "16.04", - "win2008r2": "win2008r2", - "win2012hvr2": "win2012hvr2", - "win2012hv": "win2012hv", - "win2012r2": "win2012r2", - "win2012": "win2012", - "win2016": "win2016", - "win2016nano": "win2016nano", - "win7": "win7", - "win8": "win8", - "win81": "win81", - "win10": "win10", - "centos7": "centos7", - "arch": "rolling", + "precise": "12.04", + "quantal": "12.10", + "raring": "13.04", + "saucy": "13.10", + "trusty": "14.04", + "utopic": "14.10", + "vivid": "15.04", + "wily": "15.10", + "xenial": "16.04", + "win2008r2": "win2008r2", + "win2012hvr2": "win2012hvr2", + "win2012hv": "win2012hv", + "win2012r2": "win2012r2", + "win2012": "win2012", + "win2016": "win2016", + "win2016nano": "win2016nano", + "win7": "win7", + "win8": "win8", + "win81": "win81", + "win10": "win10", + "centos7": "centos7", + genericLinuxSeries: genericLinuxVersion, } // versionSeries provides a mapping between versions and series names. @@ -88,10 +84,6 @@ "centos7": "centos7", } -var archSeries = map[string]string{ - "arch": "rolling", -} - var ubuntuSeries = map[string]string{ "precise": "12.04", "quantal": "12.10", @@ -189,8 +181,8 @@ if _, ok := centosSeries[series]; ok { return os.CentOS, nil } - if _, ok := archSeries[series]; ok { - return os.Arch, nil + if series == genericLinuxSeries { + return os.GenericLinux, nil } for _, val := range windowsVersions { if val == series { diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/supportedseries_linux_test.go juju-core-2.0.0/src/github.com/juju/utils/series/supportedseries_linux_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/supportedseries_linux_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/supportedseries_linux_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/supportedseries_test.go juju-core-2.0.0/src/github.com/juju/utils/series/supportedseries_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/supportedseries_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/supportedseries_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series_test @@ -44,8 +44,8 @@ series: "centos7", want: os.CentOS, }, { - series: "arch", - want: os.Arch, + series: "genericlinux", + want: os.GenericLinux, }, { series: "", err: "series \"\" not valid", @@ -72,13 +72,13 @@ func setSeriesTestData() { series.SetSeriesVersions(map[string]string{ - "trusty": "14.04", - "utopic": "14.10", - "win7": "win7", - "win81": "win81", - "win2016nano": "win2016nano", - "centos7": "centos7", - "arch": "rolling", + "trusty": "14.04", + "utopic": "14.10", + "win7": "win7", + "win81": "win81", + "win2016nano": "win2016nano", + "centos7": "centos7", + "genericlinux": "genericlinux", }) } @@ -90,8 +90,8 @@ c.Assert(supported, jc.SameContents, []string{"win7", "win81", "win2016nano"}) supported = series.OSSupportedSeries(os.CentOS) c.Assert(supported, jc.SameContents, []string{"centos7"}) - supported = series.OSSupportedSeries(os.Arch) - c.Assert(supported, jc.SameContents, []string{"arch"}) + supported = series.OSSupportedSeries(os.GenericLinux) + c.Assert(supported, jc.SameContents, []string{"genericlinux"}) } func (s *supportedSeriesSuite) TestVersionSeriesValid(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/series/supportedseries_windows_test.go juju-core-2.0.0/src/github.com/juju/utils/series/supportedseries_windows_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/series/supportedseries_windows_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/series/supportedseries_windows_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,6 +1,6 @@ // Copyright 2014 Canonical Ltd. // Copyright 2014 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package series_test @@ -29,7 +29,7 @@ func (s *supportedSeriesWindowsSuite) TestSupportedSeries(c *gc.C) { expectedSeries := []string{ - "arch", + "genericlinux", "centos7", "precise", diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/authorisedkeys.go juju-core-2.0.0/src/github.com/juju/utils/ssh/authorisedkeys.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/authorisedkeys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/authorisedkeys.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/authorisedkeys_test.go juju-core-2.0.0/src/github.com/juju/utils/ssh/authorisedkeys_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/authorisedkeys_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/authorisedkeys_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/clientkeys.go juju-core-2.0.0/src/github.com/juju/utils/ssh/clientkeys.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/clientkeys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/clientkeys.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/clientkeys_test.go juju-core-2.0.0/src/github.com/juju/utils/ssh/clientkeys_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/clientkeys_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/clientkeys_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/export_test.go juju-core-2.0.0/src/github.com/juju/utils/ssh/export_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/export_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/export_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/fakes_test.go juju-core-2.0.0/src/github.com/juju/utils/ssh/fakes_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/fakes_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/fakes_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/fingerprint.go juju-core-2.0.0/src/github.com/juju/utils/ssh/fingerprint.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/fingerprint.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/fingerprint.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/fingerprint_test.go juju-core-2.0.0/src/github.com/juju/utils/ssh/fingerprint_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/fingerprint_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/fingerprint_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/generate.go juju-core-2.0.0/src/github.com/juju/utils/ssh/generate.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/generate.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/generate.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/generate_test.go juju-core-2.0.0/src/github.com/juju/utils/ssh/generate_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/generate_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/generate_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/package_test.go juju-core-2.0.0/src/github.com/juju/utils/ssh/package_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/package_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/package_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/run.go juju-core-2.0.0/src/github.com/juju/utils/ssh/run.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/run.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/run.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/run_test.go juju-core-2.0.0/src/github.com/juju/utils/ssh/run_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/run_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/run_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/ssh.go juju-core-2.0.0/src/github.com/juju/utils/ssh/ssh.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/ssh.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/ssh.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. // Package ssh contains utilities for dealing with SSH connections, // key management, and so on. All SSH-based command executions in diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/ssh_gocrypto.go juju-core-2.0.0/src/github.com/juju/utils/ssh/ssh_gocrypto.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/ssh_gocrypto.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/ssh_gocrypto.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/ssh_gocrypto_test.go juju-core-2.0.0/src/github.com/juju/utils/ssh/ssh_gocrypto_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/ssh_gocrypto_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/ssh_gocrypto_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh_test diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/ssh_openssh.go juju-core-2.0.0/src/github.com/juju/utils/ssh/ssh_openssh.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/ssh_openssh.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/ssh_openssh.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package ssh diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/ssh_test.go juju-core-2.0.0/src/github.com/juju/utils/ssh/ssh_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/ssh_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/ssh_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. // +build !windows diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/ssh/testing/keys.go juju-core-2.0.0/src/github.com/juju/utils/ssh/testing/keys.go --- juju-core-2.0~beta15/src/github.com/juju/utils/ssh/testing/keys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/ssh/testing/keys.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,5 +1,5 @@ // Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package testing diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/symlink/zsymlink_windows_386.go juju-core-2.0.0/src/github.com/juju/utils/symlink/zsymlink_windows_386.go --- juju-core-2.0~beta15/src/github.com/juju/utils/symlink/zsymlink_windows_386.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/symlink/zsymlink_windows_386.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,3 +1,7 @@ +// Copyright 2014 Canonical Ltd. +// Copyright 2014 Cloudbase Solutions SRL +// Licensed under the LGPLv3, see LICENCE file for details. + // mksyscall_windows.pl -l32 symlink/symlink_windows.go // MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/symlink/zsymlink_windows_amd64.go juju-core-2.0.0/src/github.com/juju/utils/symlink/zsymlink_windows_amd64.go --- juju-core-2.0~beta15/src/github.com/juju/utils/symlink/zsymlink_windows_amd64.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/symlink/zsymlink_windows_amd64.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,3 +1,7 @@ +// Copyright 2014 Canonical Ltd. +// Copyright 2014 Cloudbase Solutions SRL +// Licensed under the LGPLv3, see LICENCE file for details. + // mksyscall_windows.pl symlink/symlink_windows.go // MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/tailer/tailer.go juju-core-2.0.0/src/github.com/juju/utils/tailer/tailer.go --- juju-core-2.0~beta15/src/github.com/juju/utils/tailer/tailer.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/tailer/tailer.go 2016-10-13 14:32:08.000000000 +0000 @@ -10,7 +10,7 @@ "os" "time" - "launchpad.net/tomb" + "gopkg.in/tomb.v1" ) const ( diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/timer.go juju-core-2.0.0/src/github.com/juju/utils/timer.go --- juju-core-2.0~beta15/src/github.com/juju/utils/timer.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/timer.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,6 +1,6 @@ // Copyright 2015 Canonical Ltd. // Copyright 2015 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. +// Licensed under the LGPLv3, see LICENCE file for details. package utils @@ -15,6 +15,8 @@ // after a internally stored duration. The steps as well as min and max // durations are declared upon initialization and depend on // the particular implementation. +// +// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427 type Countdown interface { // Reset stops the timer and resets its duration to the minimum one. // Start must be called to start the timer again. @@ -30,6 +32,8 @@ // A backoff timer starts at min and gets multiplied by factor // until it reaches max. Jitter determines whether a small // randomization is added to the duration. +// +// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427 func NewBackoffTimer(config BackoffTimerConfig) *BackoffTimer { return &BackoffTimer{ config: config, @@ -41,6 +45,8 @@ // A backoff timer starts at min and gets multiplied by factor // until it reaches max. Jitter determines whether a small // randomization is added to the duration. +// +// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427 type BackoffTimer struct { config BackoffTimerConfig @@ -50,6 +56,8 @@ // BackoffTimerConfig is a helper struct for backoff timer // that encapsulates config information. +// +// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427 type BackoffTimerConfig struct { // The minimum duration after which Func is called. Min time.Duration diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/timer_test.go juju-core-2.0.0/src/github.com/juju/utils/timer_test.go --- juju-core-2.0~beta15/src/github.com/juju/utils/timer_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/timer_test.go 2016-10-13 14:32:08.000000000 +0000 @@ -30,6 +30,10 @@ return true } +func (t *TestStdTimer) Chan() <-chan time.Time { + panic("should not be called") +} + type timerSuite struct { baseSuite testing.CleanupSuite timer *utils.BackoffTimer @@ -54,6 +58,9 @@ // These 2 methods are not used here but are needed to satisfy the intergface func (c *mockClock) Now() time.Time { return time.Now() } func (c *mockClock) After(d time.Duration) <-chan time.Time { return time.After(d) } +func (c *mockClock) NewTimer(d time.Duration) clock.Timer { + panic("should not be called") +} func (c *mockClock) AfterFunc(d time.Duration, f func()) clock.Timer { *c.afterFuncCalls++ diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/tls.go juju-core-2.0.0/src/github.com/juju/utils/tls.go --- juju-core-2.0~beta15/src/github.com/juju/utils/tls.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/tls.go 2016-10-13 14:32:08.000000000 +0000 @@ -19,9 +19,9 @@ Proxy: http.ProxyFromEnvironment, TLSClientConfig: tlsConfig, DisableKeepAlives: true, - Dial: dial, TLSHandshakeTimeout: 10 * time.Second, } + installHTTPDialShim(transport) registerFileProtocol(transport) return transport } diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/uptime/zuptime_windows_386.go juju-core-2.0.0/src/github.com/juju/utils/uptime/zuptime_windows_386.go --- juju-core-2.0~beta15/src/github.com/juju/utils/uptime/zuptime_windows_386.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/uptime/zuptime_windows_386.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,3 +1,7 @@ +// Copyright 2014 Canonical Ltd. +// Copyright 2014 Cloudbase Solutions SRL +// Licensed under the LGPLv3, see LICENCE file for details. + // mksyscall_windows.pl -l32 uptime_windows.go // MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/uptime/zuptime_windows_amd64.go juju-core-2.0.0/src/github.com/juju/utils/uptime/zuptime_windows_amd64.go --- juju-core-2.0~beta15/src/github.com/juju/utils/uptime/zuptime_windows_amd64.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/uptime/zuptime_windows_amd64.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,3 +1,7 @@ +// Copyright 2014 Canonical Ltd. +// Copyright 2014 Cloudbase Solutions SRL +// Licensed under the LGPLv3, see LICENCE file for details. + // mksyscall_windows.pl uptime_windows.go // MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT diff -Nru juju-core-2.0~beta15/src/github.com/juju/utils/zfile_windows.go juju-core-2.0.0/src/github.com/juju/utils/zfile_windows.go --- juju-core-2.0~beta15/src/github.com/juju/utils/zfile_windows.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/utils/zfile_windows.go 2016-10-13 14:32:08.000000000 +0000 @@ -1,3 +1,6 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + // mksyscall_windows.pl -l32 file_windows.go // MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT diff -Nru juju-core-2.0~beta15/src/github.com/juju/version/.gitignore juju-core-2.0.0/src/github.com/juju/version/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/version/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/version/.gitignore 2016-10-13 14:32:08.000000000 +0000 @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff -Nru juju-core-2.0~beta15/src/github.com/juju/webbrowser/.gitignore juju-core-2.0.0/src/github.com/juju/webbrowser/.gitignore --- juju-core-2.0~beta15/src/github.com/juju/webbrowser/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/juju/webbrowser/.gitignore 2016-10-13 14:32:34.000000000 +0000 @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff -Nru juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/io.go juju-core-2.0.0/src/github.com/lunixbochs/vtclean/io.go --- juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/io.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lunixbochs/vtclean/io.go 2016-10-13 14:32:29.000000000 +0000 @@ -0,0 +1,93 @@ +package vtclean + +import ( + "bufio" + "bytes" + "io" +) + +type reader struct { + io.Reader + scanner *bufio.Scanner + buf []byte + + color bool +} + +func NewReader(r io.Reader, color bool) io.Reader { + return &reader{Reader: r, color: color} +} + +func (r *reader) scan() bool { + if r.scanner == nil { + r.scanner = bufio.NewScanner(r.Reader) + } + if len(r.buf) > 0 { + return true + } + if r.scanner.Scan() { + r.buf = []byte(Clean(r.scanner.Text(), r.color) + "\n") + return true + } + return false +} + +func (r *reader) fill(p []byte) int { + n := len(r.buf) + copy(p, r.buf) + if len(p) < len(r.buf) { + r.buf = r.buf[len(p):] + n = len(p) + } else { + r.buf = nil + } + return n +} + +func (r *reader) Read(p []byte) (int, error) { + n := r.fill(p) + if n < len(p) { + if !r.scan() { + if n == 0 { + return 0, io.EOF + } + return n, nil + } + n += r.fill(p[n:]) + } + return n, nil +} + +type writer struct { + io.Writer + buf []byte + color bool +} + +func NewWriter(w io.Writer, color bool) io.WriteCloser { + return &writer{Writer: w, color: color} +} + +func (w *writer) Write(p []byte) (int, error) { + buf := append(w.buf, p...) + lines := bytes.Split(buf, []byte("\n")) + if len(lines) > 0 { + last := len(lines) - 1 + w.buf = lines[last] + count := 0 + for _, line := range lines[:last] { + n, err := w.Writer.Write([]byte(Clean(string(line), w.color) + "\n")) + count += n + if err != nil { + return count, err + } + } + } + return len(p), nil +} + +func (w *writer) Close() error { + cl := Clean(string(w.buf), w.color) + _, err := w.Writer.Write([]byte(cl)) + return err +} diff -Nru juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/LICENSE juju-core-2.0.0/src/github.com/lunixbochs/vtclean/LICENSE --- juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lunixbochs/vtclean/LICENSE 2016-10-13 14:32:29.000000000 +0000 @@ -0,0 +1,19 @@ +Copyright (c) 2015 Ryan Hileman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff -Nru juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/line.go juju-core-2.0.0/src/github.com/lunixbochs/vtclean/line.go --- juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/line.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lunixbochs/vtclean/line.go 2016-10-13 14:32:29.000000000 +0000 @@ -0,0 +1,107 @@ +package vtclean + +type char struct { + char byte + vt100 []byte +} + +func chars(p []byte) []char { + tmp := make([]char, len(p)) + for i, v := range p { + tmp[i].char = v + } + return tmp +} + +type lineEdit struct { + buf []char + pos, size int + vt100 []byte +} + +func newLineEdit(length int) *lineEdit { + return &lineEdit{buf: make([]char, length)} +} + +func (l *lineEdit) Vt100(p []byte) { + l.vt100 = p +} + +func (l *lineEdit) Move(x int) { + if x < 0 && l.pos <= -x { + l.pos = 0 + } else if x > 0 && l.pos+x > l.size { + l.pos = l.size + } else { + l.pos += x + } +} + +func (l *lineEdit) Write(p []byte) { + c := chars(p) + if len(c) > 0 { + c[0].vt100 = l.vt100 + l.vt100 = nil + } + if len(l.buf)-l.pos < len(c) { + l.buf = append(l.buf[:l.pos], c...) + } else { + copy(l.buf[l.pos:], c) + } + l.pos += len(c) + if l.pos > l.size { + l.size = l.pos + } +} + +func (l *lineEdit) Insert(p []byte) { + c := chars(p) + if len(c) > 0 { + c[0].vt100 = l.vt100 + l.vt100 = nil + } + l.size += len(c) + c = append(c, l.buf[l.pos:]...) + l.buf = append(l.buf[:l.pos], c...) +} + +func (l *lineEdit) Delete(n int) { + most := l.size - l.pos + if n > most { + n = most + } + copy(l.buf[l.pos:], l.buf[l.pos+n:]) + l.size -= n +} + +func (l *lineEdit) Clear() { + for i := 0; i < len(l.buf); i++ { + l.buf[i].char = ' ' + } +} +func (l *lineEdit) ClearLeft() { + for i := 0; i < l.pos+1; i++ { + l.buf[i].char = ' ' + } +} +func (l *lineEdit) ClearRight() { + l.size = l.pos +} + +func (l *lineEdit) Bytes() []byte { + length := 0 + buf := l.buf[:l.size] + for _, v := range buf { + length += 1 + len(v.vt100) + } + tmp := make([]byte, 0, length) + for _, v := range buf { + tmp = append(tmp, v.vt100...) + tmp = append(tmp, v.char) + } + return tmp +} + +func (l *lineEdit) String() string { + return string(l.Bytes()) +} diff -Nru juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/README.md juju-core-2.0.0/src/github.com/lunixbochs/vtclean/README.md --- juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lunixbochs/vtclean/README.md 2016-10-13 14:32:29.000000000 +0000 @@ -0,0 +1,44 @@ +vtclean +---- + +Clean up raw terminal output by stripping escape sequences, optionally preserving color. + +Get it: `go get github.com/lunixbochs/vtclean/vtclean` + +API: + + import "github.com/lunixbochs/vtclean" + vtclean.Clean(line string, color bool) string + +Command line example: + + $ echo -e '\x1b[1;32mcolor example + color forced to stop at end of line + backspace is ba\b\bgood + no beeps!\x07\x07' | ./vtclean -color + + color example + color forced to stop at end of line + backspace is good + no beeps! + +Go example: + + package main + + import ( + "fmt" + "github.com/lunixbochs/vtclean" + ) + + func main() { + line := vtclean.Clean( + "\033[1;32mcolor, " + + "curs\033[Aor, " + + "backspace\b\b\b\b\b\b\b\b\b\b\b\033[K", false) + fmt.Println(line) + } + +Output: + + color, cursor diff -Nru juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/regex.txt juju-core-2.0.0/src/github.com/lunixbochs/vtclean/regex.txt --- juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/regex.txt 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lunixbochs/vtclean/regex.txt 2016-10-13 14:32:29.000000000 +0000 @@ -0,0 +1,14 @@ +this is the source definitions for the scary escape code regex + +# from tests in Terminal.app, this regex should cover all basic \e[ and \e] cases +^([\[\]]([\d\?]+)?(;[\d\?]+)*)?. + +# this catches any case the above does not +# make sure to not include any special characters the main regex finds (like ?) +\[[^a-zA-Z0-9@\?]+. + +# esc + paren + any single char +[\(\)]. + +# didn't re-check this one (not included) +[\[K]\d+;\d+ diff -Nru juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/vtclean/vtclean.go juju-core-2.0.0/src/github.com/lunixbochs/vtclean/vtclean/vtclean.go --- juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/vtclean/vtclean.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lunixbochs/vtclean/vtclean/vtclean.go 2016-10-13 14:32:29.000000000 +0000 @@ -0,0 +1,17 @@ +package main + +import ( + "flag" + "github.com/lunixbochs/vtclean" + "io" + "os" +) + +func main() { + color := flag.Bool("color", false, "enable color") + flag.Parse() + + stdout := vtclean.NewWriter(os.Stdout, *color) + defer stdout.Close() + io.Copy(stdout, os.Stdin) +} diff -Nru juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/vtclean.go juju-core-2.0.0/src/github.com/lunixbochs/vtclean/vtclean.go --- juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/vtclean.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lunixbochs/vtclean/vtclean.go 2016-10-13 14:32:29.000000000 +0000 @@ -0,0 +1,81 @@ +package vtclean + +import ( + "bytes" + "regexp" + "strconv" +) + +// see regex.txt for a slightly separated version of this regex +var vt100re = regexp.MustCompile(`^\033([\[\]]([\d\?]+)?(;[\d\?]+)*)?(.)`) +var vt100exc = regexp.MustCompile(`^\033(\[[^a-zA-Z0-9@\?]+|[\(\)]).`) + +func Clean(line string, color bool) string { + var edit = newLineEdit(len(line)) + lineb := []byte(line) + + hadColor := false + for i := 0; i < len(lineb); { + c := lineb[i] + switch c { + case '\b': + edit.Move(-1) + case '\033': + // set terminal title + if bytes.HasPrefix(lineb[i:], []byte("\x1b]0;")) { + pos := bytes.Index(lineb[i:], []byte("\a")) + if pos != -1 { + i += pos + 1 + continue + } + } + if m := vt100exc.Find(lineb[i:]); m != nil { + i += len(m) + } else if m := vt100re.FindSubmatch(lineb[i:]); m != nil { + i += len(m[0]) + num := string(m[2]) + n, err := strconv.Atoi(num) + if err != nil || n > 10000 { + n = 1 + } + switch m[4][0] { + case 'm': + if color { + hadColor = true + edit.Vt100(m[0]) + } + case '@': + edit.Insert(bytes.Repeat([]byte{' '}, n)) + case 'C': + edit.Move(n) + case 'D': + edit.Move(-n) + case 'P': + edit.Delete(n) + case 'K': + switch num { + case "", "0": + edit.ClearRight() + case "1": + edit.ClearLeft() + case "2": + edit.Clear() + } + } + } else { + i += 1 + } + continue + default: + if c == '\n' || c >= ' ' { + edit.Write([]byte{c}) + } + } + i += 1 + } + out := edit.Bytes() + if hadColor { + out = append(out, []byte("\033[0m")...) + } + return string(out) +} diff -Nru juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/vtclean_test.go juju-core-2.0.0/src/github.com/lunixbochs/vtclean/vtclean_test.go --- juju-core-2.0~beta15/src/github.com/lunixbochs/vtclean/vtclean_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lunixbochs/vtclean/vtclean_test.go 2016-10-13 14:32:29.000000000 +0000 @@ -0,0 +1,74 @@ +package vtclean + +import ( + "testing" +) + +var tests = map[string]string{ + // "set title" special case + "\x1b]0;asdjklfasdkljf\atest": "test", + + // basic escape + "\033[12laaa": "aaa", + "\033[?1049laaa": "aaa", + + // for the second regex + "a\033[!pa": "aa", + + // backspace and clear + "aaa\b\bb": "aba", + "aaa\b\b\033[K": "a", + "aaa\b\b\033[1K": " a", + "aaa\b\b\033[2Ka": " a ", + + // character movement + "aaa\033[2Db": "aba", + "aaa\033[4D\033[2Cb": "aab", + "aaa\033[4D\033[1Cb": "aba", + "aaa\033[1Cb": "aaab", + + // vt52 + "aaa\033D\033Db": "aba", + "a\033@b": "ab", + + // delete and insert + "aaa\b\b\033[2@": "a aa", + "aaa\b\b\033[P": "aa", + "aaa\b\b\033[4P": "a", + + // strip color + "aaa \033[25;25mtest": "aaa test", +} + +var colorTests = map[string]string{ + "aaa \033[25;25mtest": "aaa \033[25;25mtest\x1b[0m", +} + +func TestMain(t *testing.T) { + for a, b := range tests { + tmp := Clean(a, false) + if tmp != b { + t.Logf("Clean() failed: %#v -> %#v != %#v\n", a, tmp, b) + t.Fail() + } + } +} + +func TestColor(t *testing.T) { + for a, b := range colorTests { + tmp := Clean(a, true) + if tmp != b { + t.Logf("Clean() failed: %#v -> %#v != %#v\n", a, tmp, b) + t.Fail() + } + } +} + +func TestWriteBounds(t *testing.T) { + l := &lineEdit{buf: nil} + s := "asdf" + l.Write([]byte(s)) + if l.String() != s { + t.Fatalf("l.String(): %#v != %#v", l.String(), s) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/client.go juju-core-2.0.0/src/github.com/lxc/lxd/client.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/client.go 2016-10-13 14:31:53.000000000 +0000 @@ -2,6 +2,7 @@ import ( "bytes" + "crypto/sha256" "crypto/x509" "encoding/base64" "encoding/json" @@ -19,6 +20,7 @@ "path/filepath" "strconv" "strings" + "syscall" "github.com/gorilla/websocket" @@ -94,6 +96,15 @@ return &op, nil } +func (r *Response) MetadataAsStringSlice() ([]string, error) { + sl := []string{} + if err := json.Unmarshal(r.Metadata, &sl); err != nil { + return nil, err + } + + return sl, nil +} + // ParseResponse parses a lxd style response out of an http.Response. Note that // this does _not_ automatically convert error responses to golang errors. To // do that, use ParseError. Internal client library uses should probably use @@ -110,7 +121,7 @@ if err != nil { return nil, err } - shared.Debugf("Raw response: %s", string(s)) + shared.LogDebugf("Raw response: %s", string(s)) if err := json.Unmarshal(s, &ret); err != nil { return nil, err @@ -186,6 +197,17 @@ info.ClientPEMKey = string(keyBytes) } + // Read the client key (if it exists) + clientCaPath := path.Join(config.ConfigDir, "client.ca") + if shared.PathExists(clientCaPath) { + caBytes, err := ioutil.ReadFile(clientCaPath) + if err != nil { + return nil, err + } + + info.ClientPEMCa = string(caBytes) + } + // Read the server certificate (if it exists) serverCertPath := config.ServerCertPath(remote) if shared.PathExists(serverCertPath) { @@ -222,6 +244,8 @@ ClientPEMCert string // ClientPEMKey is the PEM encoded private bytes of the client's key associated with its certificate ClientPEMKey string + // ClientPEMCa is the PEM encoded client certificate authority (if any) + ClientPEMCa string // ServerPEMCert is the PEM encoded server certificate that we are // connecting to. It can be the empty string if we do not know the // server's certificate yet. @@ -241,18 +265,17 @@ // unix:///path/to/socket // unix:/path/to/socket // unix:path/to/socket - path := strings.TrimPrefix(remote.Addr, "unix:") - if strings.HasPrefix(path, "///") { - // translate unix:///path/to, to just "/path/to" - path = path[2:] - } + path := strings.TrimPrefix(strings.TrimPrefix(remote.Addr, "unix:"), "//") raddr, err := net.ResolveUnixAddr("unix", path) if err != nil { return nil, err } return net.DialUnix("unix", nil, raddr) } - c.Http.Transport = &http.Transport{Dial: uDial} + c.Http.Transport = &http.Transport{ + Dial: uDial, + DisableKeepAlives: true, + } c.websocketDialer.NetDial = uDial c.Remote = remote @@ -264,16 +287,17 @@ return nil } -func connectViaHttp(c *Client, remote *RemoteConfig, clientCert, clientKey, serverCert string) error { - tlsconfig, err := shared.GetTLSConfigMem(clientCert, clientKey, serverCert) +func connectViaHttp(c *Client, remote *RemoteConfig, clientCert, clientKey, clientCA, serverCert string) error { + tlsconfig, err := shared.GetTLSConfigMem(clientCert, clientKey, clientCA, serverCert) if err != nil { return err } tr := &http.Transport{ - TLSClientConfig: tlsconfig, - Dial: shared.RFC3493Dialer, - Proxy: shared.ProxyFromEnvironment, + TLSClientConfig: tlsconfig, + Dial: shared.RFC3493Dialer, + Proxy: shared.ProxyFromEnvironment, + DisableKeepAlives: true, } c.websocketDialer.NetDial = shared.RFC3493Dialer @@ -307,7 +331,7 @@ if strings.HasPrefix(info.RemoteConfig.Addr, "unix:") { err = connectViaUnix(c, &info.RemoteConfig) } else { - err = connectViaHttp(c, &info.RemoteConfig, info.ClientPEMCert, info.ClientPEMKey, info.ServerPEMCert) + err = connectViaHttp(c, &info.RemoteConfig, info.ClientPEMCert, info.ClientPEMKey, info.ClientPEMCa, info.ServerPEMCert) } if err != nil { return nil, err @@ -378,7 +402,7 @@ return nil, err } - shared.Debugf("Putting %s to %s", buf.String(), uri) + shared.LogDebugf("Putting %s to %s", buf.String(), uri) req, err := http.NewRequest("PUT", uri, &buf) if err != nil { @@ -404,7 +428,7 @@ return nil, err } - shared.Debugf("Posting %s to %s", buf.String(), uri) + shared.LogDebugf("Posting %s to %s", buf.String(), uri) req, err := http.NewRequest("POST", uri, &buf) if err != nil { @@ -454,7 +478,7 @@ return nil, err } - shared.Debugf("Deleting %s to %s", buf.String(), uri) + shared.LogDebugf("Deleting %s to %s", buf.String(), uri) req, err := http.NewRequest("DELETE", uri, &buf) if err != nil { @@ -471,24 +495,40 @@ return HoistResponse(resp, rtype) } -func (c *Client) websocket(operation string, secret string) (*websocket.Conn, error) { +func (c *Client) Websocket(operation string, secret string) (*websocket.Conn, error) { query := url.Values{"secret": []string{secret}} url := c.BaseWSURL + path.Join(operation, "websocket") + "?" + query.Encode() return WebsocketDial(c.websocketDialer, url) } func (c *Client) url(elem ...string) string { + // Normalize the URL path := strings.Join(elem, "/") + entries := []string{} + fields := strings.Split(path, "/") + for i, entry := range fields { + if entry == "" && i+1 < len(fields) { + continue + } + + entries = append(entries, entry) + } + path = strings.Join(entries, "/") + + // Assemble the final URL uri := c.BaseURL + "/" + path + // Aliases may contain a trailing slash if strings.HasPrefix(path, "1.0/images/aliases") { return uri } + // File paths may contain a trailing slash if strings.Contains(path, "?") { return uri } + // Nothing else should contain a trailing slash return strings.TrimSuffix(uri, "/") } @@ -500,13 +540,54 @@ return c.baseGet(c.url(shared.APIVersion)) } +// GetLocalLXDErr determines whether or not an error is likely due to a +// local LXD configuration issue, and if so, returns the underlying error. +// GetLocalLXDErr can be used to provide customized error messages to help +// the user identify basic system issues, e.g. LXD daemon not running. +// +// Returns syscall.ENOENT, syscall.ECONNREFUSED or syscall.EACCES when a +// local LXD configuration issue is detected, nil otherwise. +func GetLocalLXDErr(err error) error { + t, ok := err.(*url.Error) + if !ok { + return nil + } + + u, ok := t.Err.(*net.OpError) + if !ok { + return nil + } + + if u.Op == "dial" && u.Net == "unix" { + var lxdErr error + + sysErr, ok := u.Err.(*os.SyscallError) + if ok { + lxdErr = sysErr.Err + } else { + // syscall.Errno may be returned on some systems, e.g. CentOS + lxdErr, ok = u.Err.(syscall.Errno) + if !ok { + return nil + } + } + + switch lxdErr { + case syscall.ENOENT, syscall.ECONNREFUSED, syscall.EACCES: + return lxdErr + } + } + + return nil +} + func (c *Client) AmTrusted() bool { resp, err := c.GetServerConfig() if err != nil { return false } - shared.Debugf("%s", resp) + shared.LogDebugf("%s", resp) jmap, err := resp.MetadataAsMap() if err != nil { @@ -527,7 +608,7 @@ return false } - shared.Debugf("%s", resp) + shared.LogDebugf("%s", resp) jmap, err := resp.MetadataAsMap() if err != nil { @@ -580,27 +661,25 @@ return err } - if c.Remote.Protocol != "simplestreams" { - if !info.Public { - var secret string + if c.Remote.Protocol != "simplestreams" && !info.Public { + var secret string - resp, err := c.post("images/"+image+"/secret", nil, Async) - if err != nil { - return err - } - - op, err := resp.MetadataAsOperation() - if err != nil { - return err - } + resp, err := c.post("images/"+image+"/secret", nil, Async) + if err != nil { + return err + } - secret, err = op.Metadata.GetString("secret") - if err != nil { - return err - } + op, err := resp.MetadataAsOperation() + if err != nil { + return err + } - source["secret"] = secret + secret, err = op.Metadata.GetString("secret") + if err != nil { + return err } + + source["secret"] = secret source["fingerprint"] = image } @@ -644,6 +723,8 @@ go dest.Monitor([]string{"operation"}, handler) } + fingerprint := info.Fingerprint + for _, addr := range addresses { sourceUrl := "https://" + addr @@ -657,11 +738,18 @@ operation = resp.Operation - err = dest.WaitForSuccess(resp.Operation) + op, err := dest.WaitForSuccessOp(resp.Operation) if err != nil { return err } + if op.Metadata != nil { + value, err := op.Metadata.GetString("fingerprint") + if err == nil { + fingerprint = value + } + } + break } @@ -673,7 +761,7 @@ if copy_aliases { for _, alias := range info.Aliases { dest.DeleteAlias(alias.Name) - err = dest.PostAlias(alias.Name, alias.Description, info.Fingerprint) + err = dest.PostAlias(alias.Name, alias.Description, fingerprint) if err != nil { return fmt.Errorf("Error adding alias %s: %s", alias.Name, err) } @@ -683,7 +771,7 @@ /* add new aliases */ for _, alias := range aliases { dest.DeleteAlias(alias) - err = dest.PostAlias(alias, alias, info.Fingerprint) + err = dest.PostAlias(alias, alias, fingerprint) if err != nil { return fmt.Errorf("Error adding alias %s: %s\n", alias, err) } @@ -749,7 +837,7 @@ return "", fmt.Errorf("Invalid multipart image") } - rootfsTarf, err := os.OpenFile(filepath.Join(part.FileName()), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + rootfsTarf, err := os.OpenFile(filepath.Join(target, part.FileName()), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return "", err } @@ -770,75 +858,110 @@ if target == "-" { wr = os.Stdout destpath = "stdout" - } else if fi, err := os.Stat(target); err == nil { - // file exists, so check if folder - switch mode := fi.Mode(); { - case mode.IsDir(): - // save in directory, header content-disposition can not be null - // and will have a filename - cd := strings.Split(raw.Header["Content-Disposition"][0], "=") - - // write filename from header - destpath = filepath.Join(target, cd[1]) - f, err := os.Create(destpath) - defer f.Close() - - if err != nil { - return "", err - } - - wr = f - - default: - // overwrite file - destpath = target - f, err := os.OpenFile(destpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - defer f.Close() + } else { + _, cdParams, err := mime.ParseMediaType(raw.Header.Get("Content-Disposition")) + if err != nil { + return "", err + } + filename, ok := cdParams["filename"] + if !ok { + return "", fmt.Errorf("No filename in Content-Disposition header.") + } - if err != nil { - return "", err + if shared.IsDir(target) { + // The target is a directory, use the filename verbatim from the + // Content-Disposition header + destpath = filepath.Join(target, filename) + } else { + // The target is a file, parse the extension from the source filename + // and append it to the target filename. + ext := filepath.Ext(filename) + if strings.HasSuffix(filename, fmt.Sprintf(".tar%s", ext)) { + ext = fmt.Sprintf(".tar%s", ext) } - - wr = f + destpath = fmt.Sprintf("%s%s", target, ext) } - } else { - // write as simple file - destpath = target - f, err := os.Create(destpath) - defer f.Close() - wr = f + f, err := os.OpenFile(destpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return "", err } + defer f.Close() + + wr = f } _, err = io.Copy(wr, raw.Body) - if err != nil { return "", err } - // it streams to stdout or file, so no response returned return destpath, nil } -func (c *Client) PostImageURL(imageFile string, public bool, aliases []string) (string, error) { +func (c *Client) PostImageURL(imageFile string, properties []string, public bool, aliases []string, progressHandler func(progress string)) (string, error) { if c.Remote.Public { return "", fmt.Errorf("This function isn't supported by public remotes.") } + imgProperties := map[string]string{} + for _, entry := range properties { + fields := strings.SplitN(entry, "=", 2) + if len(fields) != 2 { + return "", fmt.Errorf("Invalid image property: %s", entry) + } + + imgProperties[fields[0]] = fields[1] + } + source := shared.Jmap{ "type": "url", "mode": "pull", "url": imageFile} - body := shared.Jmap{"public": public, "source": source} + body := shared.Jmap{"public": public, "properties": imgProperties, "source": source} + + operation := "" + handler := func(msg interface{}) { + if msg == nil { + return + } + + event := msg.(map[string]interface{}) + if event["type"].(string) != "operation" { + return + } + + if event["metadata"] == nil { + return + } + + md := event["metadata"].(map[string]interface{}) + if !strings.HasSuffix(operation, md["id"].(string)) { + return + } + + if md["metadata"] == nil { + return + } + + opMd := md["metadata"].(map[string]interface{}) + _, ok := opMd["download_progress"] + if ok { + progressHandler(opMd["download_progress"].(string)) + } + } + + if progressHandler != nil { + go c.Monitor([]string{"operation"}, handler) + } resp, err := c.post("images", body, Async) if err != nil { return "", err } + operation = resp.Operation + op, err := c.WaitFor(resp.Operation) if err != nil { return "", err @@ -1197,7 +1320,7 @@ // Init creates a container from either a fingerprint or an alias; you must // provide at least one. -func (c *Client) Init(name string, imgremote string, image string, profiles *[]string, config map[string]string, ephem bool) (*Response, error) { +func (c *Client) Init(name string, imgremote string, image string, profiles *[]string, config map[string]string, devices shared.Devices, ephem bool) (*Response, error) { if c.Remote.Public { return nil, fmt.Errorf("This function isn't supported by public remotes.") } @@ -1299,6 +1422,10 @@ body["config"] = config } + if devices != nil { + body["devices"] = devices + } + if ephem { body["ephemeral"] = ephem } @@ -1409,6 +1536,11 @@ "environment": env, } + if width > 0 && height > 0 { + body["width"] = width + body["height"] = height + } + resp, err := c.post(fmt.Sprintf("containers/%s/exec", name), body, Async) if err != nil { return -1, err @@ -1429,7 +1561,7 @@ if controlHandler != nil { var control *websocket.Conn if wsControl, ok := fds["control"]; ok { - control, err = c.websocket(resp.Operation, wsControl.(string)) + control, err = c.Websocket(resp.Operation, wsControl.(string)) if err != nil { return -1, err } @@ -1438,12 +1570,12 @@ go controlHandler(c, control) } - conn, err := c.websocket(resp.Operation, fds["0"].(string)) + conn, err := c.Websocket(resp.Operation, fds["0"].(string)) if err != nil { return -1, err } - shared.WebsocketSendStream(conn, stdin) + shared.WebsocketSendStream(conn, stdin, -1) <-shared.WebsocketRecvStream(stdout, conn) conn.Close() @@ -1451,17 +1583,17 @@ conns := make([]*websocket.Conn, 3) dones := make([]chan bool, 3) - conns[0], err = c.websocket(resp.Operation, fds[strconv.Itoa(0)].(string)) + conns[0], err = c.Websocket(resp.Operation, fds[strconv.Itoa(0)].(string)) if err != nil { return -1, err } defer conns[0].Close() - dones[0] = shared.WebsocketSendStream(conns[0], stdin) + dones[0] = shared.WebsocketSendStream(conns[0], stdin, -1) outputs := []io.WriteCloser{stdout, stderr} for i := 1; i < 3; i++ { - conns[i], err = c.websocket(resp.Operation, fds[strconv.Itoa(i)].(string)) + conns[i], err = c.Websocket(resp.Operation, fds[strconv.Itoa(i)].(string)) if err != nil { return -1, err } @@ -1556,6 +1688,15 @@ return nil, err } + // Fill in certificate fingerprint if not provided + if ss.Environment.CertificateFingerprint == "" && ss.Environment.Certificate != "" { + pemCertificate, _ := pem.Decode([]byte(ss.Environment.Certificate)) + if pemCertificate != nil { + digest := sha256.Sum256(pemCertificate.Bytes) + ss.Environment.CertificateFingerprint = fmt.Sprintf("%x", digest) + } + } + return &ss, nil } @@ -1630,7 +1771,7 @@ return &ct, nil } -func (c *Client) PushFile(container string, p string, gid int, uid int, mode os.FileMode, buf io.ReadSeeker) error { +func (c *Client) PushFile(container string, p string, gid int, uid int, mode string, buf io.ReadSeeker) error { if c.Remote.Public { return fmt.Errorf("This function isn't supported by public remotes.") } @@ -1643,10 +1784,43 @@ return err } req.Header.Set("User-Agent", shared.UserAgent) + req.Header.Set("X-LXD-type", "file") + if mode != "" { + req.Header.Set("X-LXD-mode", mode) + } + if uid != -1 { + req.Header.Set("X-LXD-uid", strconv.FormatUint(uint64(uid), 10)) + } + if gid != -1 { + req.Header.Set("X-LXD-gid", strconv.FormatUint(uint64(gid), 10)) + } + + raw, err := c.Http.Do(req) + if err != nil { + return err + } + + _, err = HoistResponse(raw, Sync) + return err +} + +func (c *Client) Mkdir(container string, p string, mode os.FileMode) error { + if c.Remote.Public { + return fmt.Errorf("This function isn't supported by public remotes.") + } + + query := url.Values{"path": []string{p}} + uri := c.url(shared.APIVersion, "containers", container, "files") + "?" + query.Encode() + + req, err := http.NewRequest("POST", uri, nil) + if err != nil { + return err + } + + req.Header.Set("User-Agent", shared.UserAgent) + req.Header.Set("X-LXD-type", "directory") req.Header.Set("X-LXD-mode", fmt.Sprintf("%04o", mode.Perm())) - req.Header.Set("X-LXD-uid", strconv.FormatUint(uint64(uid), 10)) - req.Header.Set("X-LXD-gid", strconv.FormatUint(uint64(gid), 10)) raw, err := c.Http.Do(req) if err != nil { @@ -1657,9 +1831,73 @@ return err } -func (c *Client) PullFile(container string, p string) (int, int, os.FileMode, io.ReadCloser, error) { +func (c *Client) MkdirP(container string, p string, mode os.FileMode) error { + if c.Remote.Public { + return fmt.Errorf("This function isn't supported by public remotes.") + } + + parts := strings.Split(p, "/") + i := len(parts) + + for ; i >= 1; i-- { + cur := filepath.Join(parts[:i]...) + _, _, _, type_, _, _, err := c.PullFile(container, cur) + if err != nil { + continue + } + + if type_ != "directory" { + return fmt.Errorf("%s is not a directory", cur) + } + + i++ + break + } + + for ; i <= len(parts); i++ { + cur := filepath.Join(parts[:i]...) + if err := c.Mkdir(container, cur, mode); err != nil { + return err + } + } + + return nil +} + +func (c *Client) RecursivePushFile(container string, source string, target string) error { if c.Remote.Public { - return 0, 0, 0, nil, fmt.Errorf("This function isn't supported by public remotes.") + return fmt.Errorf("This function isn't supported by public remotes.") + } + + sourceDir := filepath.Dir(source) + + sendFile := func(p string, fInfo os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("got error sending path %s: %s", p, err) + } + + targetPath := path.Join(target, p[len(sourceDir):]) + if fInfo.IsDir() { + return c.Mkdir(container, targetPath, fInfo.Mode()) + } + + f, err := os.Open(p) + if err != nil { + return err + } + defer f.Close() + + mode, uid, gid := shared.GetOwnerMode(fInfo) + + return c.PushFile(container, targetPath, gid, uid, fmt.Sprintf("0%o", mode), f) + } + + return filepath.Walk(source, sendFile) +} + +func (c *Client) PullFile(container string, p string) (int, int, int, string, io.ReadCloser, []string, error) { + if c.Remote.Public { + return 0, 0, 0, "", nil, nil, fmt.Errorf("This function isn't supported by public remotes.") } uri := c.url(shared.APIVersion, "containers", container, "files") @@ -1667,12 +1905,73 @@ r, err := c.getRaw(uri + "?" + query.Encode()) if err != nil { - return 0, 0, 0, nil, err + return 0, 0, 0, "", nil, nil, err } - uid, gid, mode := shared.ParseLXDFileHeaders(r.Header) + uid, gid, mode, type_ := shared.ParseLXDFileHeaders(r.Header) + if type_ == "directory" { + resp, err := HoistResponse(r, Sync) + if err != nil { + return 0, 0, 0, "", nil, nil, err + } + + entries, err := resp.MetadataAsStringSlice() + if err != nil { + return 0, 0, 0, "", nil, nil, err + } - return uid, gid, mode, r.Body, nil + return uid, gid, mode, type_, nil, entries, nil + } else if type_ == "file" { + return uid, gid, mode, type_, r.Body, nil, nil + } else { + return 0, 0, 0, "", nil, nil, fmt.Errorf("unknown file type '%s'", type_) + } +} + +func (c *Client) RecursivePullFile(container string, p string, targetDir string) error { + if c.Remote.Public { + return fmt.Errorf("This function isn't supported by public remotes.") + } + + _, _, mode, type_, buf, entries, err := c.PullFile(container, p) + if err != nil { + return err + } + + target := filepath.Join(targetDir, filepath.Base(p)) + + if type_ == "directory" { + if err := os.Mkdir(target, os.FileMode(mode)); err != nil { + return err + } + + for _, ent := range entries { + nextP := path.Join(p, ent) + if err := c.RecursivePullFile(container, nextP, target); err != nil { + return err + } + } + } else if type_ == "file" { + f, err := os.Create(target) + if err != nil { + return err + } + defer f.Close() + + err = f.Chmod(os.FileMode(mode)) + if err != nil { + return err + } + + _, err = io.Copy(f, buf) + if err != nil { + return err + } + } else { + return fmt.Errorf("unknown file type '%s'", type_) + } + + return nil } func (c *Client) GetMigrationSourceWS(container string) (*Response, error) { @@ -1694,19 +1993,35 @@ return c.post(url, body, Async) } -func (c *Client) MigrateFrom(name string, operation string, certificate string, secrets map[string]string, architecture string, config map[string]string, devices shared.Devices, profiles []string, baseImage string, ephemeral bool) (*Response, error) { +func (c *Client) MigrateFrom(name string, operation string, certificate string, + sourceSecrets map[string]string, architecture string, config map[string]string, + devices shared.Devices, profiles []string, + baseImage string, ephemeral bool, push bool, sourceClient *Client, + sourceOperation string) (*Response, error) { if c.Remote.Public { return nil, fmt.Errorf("This function isn't supported by public remotes.") } source := shared.Jmap{ - "type": "migration", - "mode": "pull", - "operation": operation, - "certificate": certificate, - "secrets": secrets, - "base-image": baseImage, + "type": "migration", + "base-image": baseImage, } + + if push { + source["mode"] = "push" + source["live"] = false + // If the criu secret is present we know that live migration is + // required. + if _, ok := sourceSecrets["criu"]; ok { + source["live"] = true + } + } else { + source["mode"] = "pull" + source["secrets"] = sourceSecrets + source["operation"] = operation + source["certificate"] = certificate + } + body := shared.Jmap{ "architecture": architecture, "config": config, @@ -1717,6 +2032,140 @@ "source": source, } + if source["mode"] == "push" { + // Check source server secrets. + sourceControlSecret, ok := sourceSecrets["control"] + if !ok { + return nil, fmt.Errorf("Missing control secret") + } + sourceFsSecret, ok := sourceSecrets["fs"] + if !ok { + return nil, fmt.Errorf("Missing fs secret") + } + + criuSecret := false + sourceCriuSecret, ok := sourceSecrets["criu"] + if ok { + criuSecret = true + } + + // Connect to source server websockets. + sourceControlConn, err := sourceClient.Websocket(sourceOperation, sourceControlSecret) + if err != nil { + return nil, err + } + sourceFsConn, err := sourceClient.Websocket(sourceOperation, sourceFsSecret) + if err != nil { + return nil, err + } + + var sourceCriuConn *websocket.Conn + if criuSecret { + sourceCriuConn, err = sourceClient.Websocket(sourceOperation, sourceCriuSecret) + if err != nil { + return nil, err + } + } + + // Post to target server and request and retrieve a set of + // websockets + secrets matching those of the source server. + resp, err := c.post("containers", body, Async) + if err != nil { + return nil, err + } + + destSecrets := map[string]string{} + op, err := resp.MetadataAsOperation() + if err != nil { + return nil, err + } + for k, v := range *op.Metadata { + destSecrets[k] = v.(string) + } + + destControlSecret, ok := destSecrets["control"] + if !ok { + return nil, fmt.Errorf("Missing control secret") + } + destFsSecret, ok := destSecrets["fs"] + if !ok { + return nil, fmt.Errorf("Missing fs secret") + } + destCriuSecret, ok := destSecrets["criu"] + if criuSecret && !ok || !criuSecret && ok { + return nil, fmt.Errorf("Missing criu secret") + } + + // Connect to dest server websockets. + destControlConn, err := c.Websocket(resp.Operation, destControlSecret) + if err != nil { + return nil, err + } + destFsConn, err := c.Websocket(resp.Operation, destFsSecret) + if err != nil { + return nil, err + } + + var destCriuConn *websocket.Conn + if criuSecret { + destCriuConn, err = c.Websocket(resp.Operation, destCriuSecret) + if err != nil { + return nil, err + } + } + + // Let client shovel data from src to dest server. + capacity := 4 + if criuSecret { + capacity += 2 + } + syncChan := make(chan error, capacity) + defer close(syncChan) + + proxy := func(src *websocket.Conn, dest *websocket.Conn) { + for { + mt, r, err := src.NextReader() + if err != nil { + if err != io.EOF { + syncChan <- err + break + } + } + + w, err := dest.NextWriter(mt) + if err != nil { + syncChan <- err + break + } + + if _, err := io.Copy(w, r); err != nil { + syncChan <- err + break + } + + if err := w.Close(); err != nil { + syncChan <- err + break + } + } + } + + go proxy(sourceControlConn, destControlConn) + go proxy(destControlConn, sourceControlConn) + go proxy(sourceFsConn, destFsConn) + go proxy(destFsConn, sourceFsConn) + if criuSecret { + go proxy(sourceCriuConn, destCriuConn) + go proxy(destCriuConn, sourceCriuConn) + } + + for i := 0; i < cap(syncChan); i++ { + <-syncChan + } + + return resp, nil + } + return c.post("containers", body, Async) } @@ -1752,7 +2201,7 @@ * "//operations/" in it; we chop off the leading / and pass * it to url directly. */ - shared.Debugf(path.Join(waitURL[1:], "wait")) + shared.LogDebugf(path.Join(waitURL[1:], "wait")) resp, err := c.baseGet(c.url(waitURL, "wait")) if err != nil { return nil, err @@ -1774,6 +2223,19 @@ return fmt.Errorf(op.Err) } +func (c *Client) WaitForSuccessOp(waitURL string) (*shared.Operation, error) { + op, err := c.WaitFor(waitURL) + if err != nil { + return nil, err + } + + if op.StatusCode == shared.Success { + return op, nil + } + + return op, fmt.Errorf(op.Err) +} + func (c *Client) RestoreSnapshot(container string, snapshotName string, stateful bool) (*Response, error) { if c.Remote.Public { return nil, fmt.Errorf("This function isn't supported by public remotes.") @@ -1812,6 +2274,31 @@ return result, nil } +func (c *Client) SnapshotInfo(snapName string) (*shared.SnapshotInfo, error) { + if c.Remote.Public { + return nil, fmt.Errorf("This function isn't supported by public remotes.") + } + + pieces := strings.SplitN(snapName, shared.SnapshotDelimiter, 2) + if len(pieces) != 2 { + return nil, fmt.Errorf("invalid snapshot name %s", snapName) + } + + qUrl := fmt.Sprintf("containers/%s/snapshots/%s", pieces[0], pieces[1]) + resp, err := c.get(qUrl) + if err != nil { + return nil, err + } + + var result shared.SnapshotInfo + + if err := json.Unmarshal(resp.Metadata, &result); err != nil { + return nil, err + } + + return &result, nil +} + func (c *Client) GetServerConfigString() ([]string, error) { var resp []string @@ -1967,7 +2454,7 @@ st, err := c.ProfileConfig(profile) if err != nil { - shared.Debugf("Error getting profile %s to update", profile) + shared.LogDebugf("Error getting profile %s to update", profile) return err } @@ -1994,48 +2481,25 @@ return err } -func (c *Client) ListProfiles() ([]string, error) { +func (c *Client) ListProfiles() ([]shared.ProfileConfig, error) { if c.Remote.Public { return nil, fmt.Errorf("This function isn't supported by public remotes.") } - resp, err := c.get("profiles") + resp, err := c.get("profiles?recursion=1") if err != nil { return nil, err } - var result []string - - if err := json.Unmarshal(resp.Metadata, &result); err != nil { + profiles := []shared.ProfileConfig{} + if err := json.Unmarshal(resp.Metadata, &profiles); err != nil { return nil, err } - names := []string{} - - for _, url := range result { - toScan := strings.Replace(url, "/", " ", -1) - version := "" - name := "" - count, err := fmt.Sscanf(toScan, " %s profiles %s", &version, &name) - if err != nil { - return nil, err - } - - if count != 2 { - return nil, fmt.Errorf("bad profile url %s", url) - } - - if version != shared.APIVersion { - return nil, fmt.Errorf("bad version in profile url") - } - - names = append(names, name) - } - - return names, nil + return profiles, nil } -func (c *Client) ApplyProfile(container, profile string) (*Response, error) { +func (c *Client) AssignProfile(container, profile string) (*Response, error) { if c.Remote.Public { return nil, fmt.Errorf("This function isn't supported by public remotes.") } @@ -2045,7 +2509,11 @@ return nil, err } - st.Profiles = strings.Split(profile, ",") + if profile != "" { + st.Profiles = strings.Split(profile, ",") + } else { + st.Profiles = nil + } return c.put(fmt.Sprintf("containers/%s", container), st, Async) } @@ -2060,9 +2528,14 @@ return nil, err } - delete(st.Devices, devname) + for n, _ := range st.Devices { + if n == devname { + delete(st.Devices, n) + return c.put(fmt.Sprintf("containers/%s", container), st, Async) + } + } - return c.put(fmt.Sprintf("containers/%s", container), st, Async) + return nil, fmt.Errorf("Device doesn't exist.") } func (c *Client) ContainerDeviceAdd(container, devname, devtype string, props []string) (*Response, error) { @@ -2129,10 +2602,11 @@ for n, _ := range st.Devices { if n == devname { delete(st.Devices, n) + return c.put(fmt.Sprintf("profiles/%s", profile), st, Sync) } } - return c.put(fmt.Sprintf("profiles/%s", profile), st, Sync) + return nil, fmt.Errorf("Device doesn't exist.") } func (c *Client) ProfileDeviceAdd(profile, devname, devtype string, props []string) (*Response, error) { @@ -2232,17 +2706,21 @@ return op.Metadata, nil } -func (c *Client) ImageFromContainer(cname string, public bool, aliases []string, properties map[string]string) (string, error) { +func (c *Client) ImageFromContainer(cname string, public bool, aliases []string, properties map[string]string, compression_algorithm string) (string, error) { if c.Remote.Public { return "", fmt.Errorf("This function isn't supported by public remotes.") } - source := shared.Jmap{"type": "container", "name": cname} if shared.IsSnapshot(cname) { source["type"] = "snapshot" } + body := shared.Jmap{"public": public, "source": source, "properties": properties} + if compression_algorithm != "" { + body["compression_algorithm"] = compression_algorithm + } + resp, err := c.post("images", body, Async) if err != nil { return "", err @@ -2269,3 +2747,73 @@ return fingerprint, nil } + +// Network functions +func (c *Client) NetworkCreate(name string, config map[string]string) error { + if c.Remote.Public { + return fmt.Errorf("This function isn't supported by public remotes.") + } + + body := shared.Jmap{"name": name, "config": config} + + _, err := c.post("networks", body, Sync) + return err +} + +func (c *Client) NetworkGet(name string) (shared.NetworkConfig, error) { + if c.Remote.Public { + return shared.NetworkConfig{}, fmt.Errorf("This function isn't supported by public remotes.") + } + + resp, err := c.get(fmt.Sprintf("networks/%s", name)) + if err != nil { + return shared.NetworkConfig{}, err + } + + network := shared.NetworkConfig{} + if err := json.Unmarshal(resp.Metadata, &network); err != nil { + return shared.NetworkConfig{}, err + } + + return network, nil +} + +func (c *Client) NetworkPut(name string, network shared.NetworkConfig) error { + if c.Remote.Public { + return fmt.Errorf("This function isn't supported by public remotes.") + } + + if network.Name != name { + return fmt.Errorf("Cannot change network name") + } + + _, err := c.put(fmt.Sprintf("networks/%s", name), network, Sync) + return err +} + +func (c *Client) NetworkDelete(name string) error { + if c.Remote.Public { + return fmt.Errorf("This function isn't supported by public remotes.") + } + + _, err := c.delete(fmt.Sprintf("networks/%s", name), nil, Sync) + return err +} + +func (c *Client) ListNetworks() ([]shared.NetworkConfig, error) { + if c.Remote.Public { + return nil, fmt.Errorf("This function isn't supported by public remotes.") + } + + resp, err := c.get("networks?recursion=1") + if err != nil { + return nil, err + } + + networks := []shared.NetworkConfig{} + if err := json.Unmarshal(resp.Metadata, &networks); err != nil { + return nil, err + } + + return networks, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/client_test.go juju-core-2.0.0/src/github.com/lxc/lxd/client_test.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/client_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/client_test.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,52 @@ +package lxd + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "testing" +) + +func assertNoError(t *testing.T, err error, msg string) { + if err != nil { + t.Fatalf("Error: %s, action: %s", err, msg) + } +} + +func TestLocalLXDError(t *testing.T) { + f, err := ioutil.TempFile("", "lxd-test.socket") + assertNoError(t, err, "ioutil.TempFile to create fake socket file") + defer os.RemoveAll(f.Name()) + + c := &Client{ + Name: "test", + Config: DefaultConfig, + Remote: &RemoteConfig{ + Addr: fmt.Sprintf("unix:%s", f.Name()), + Static: true, + Public: false, + }, + } + runTest := func(exp error) { + lxdErr := GetLocalLXDErr(connectViaUnix(c, c.Remote)) + if lxdErr != exp { + t.Fatalf("GetLocalLXDErr returned the wrong error, EXPECTED: %s, ACTUAL: %s", exp, lxdErr) + } + } + + // The fake socket file should mimic a socket with nobody listening. + runTest(syscall.ECONNREFUSED) + + // Remove R/W permissions to mimic the user not having lxd group permissions. + // Skip this test for root, as root ignores permissions and connect will fail + // with ECONNREFUSED instead of EACCES. + if os.Geteuid() != 0 { + assertNoError(t, f.Chmod(0100), "f.Chmod on fake socket file") + runTest(syscall.EACCES) + } + + // Remove the fake socket to mimic LXD not being installed. + assertNoError(t, os.RemoveAll(f.Name()), "osRemoveAll on fake socket file") + runTest(syscall.ENOENT) +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/config/bash/lxd-client juju-core-2.0.0/src/github.com/lxc/lxd/config/bash/lxd-client --- juju-core-2.0~beta15/src/github.com/lxc/lxd/config/bash/lxd-client 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/config/bash/lxd-client 2016-10-13 14:31:53.000000000 +0000 @@ -3,8 +3,14 @@ { _lxd_names() { + local state=$1 + local keys=$2 + + local cmd="lxc list --fast" + [ -n "$state" ] && cmd="$cmd | grep -E '$state'" + COMPREPLY=( $( compgen -W \ - "$( lxc list | tail -n +4 | awk '{print $2}' | egrep -v '^(\||^$)' )" "$cur" ) + "$( eval $cmd | grep -Ev '(+--|NAME)' | awk '{print $2}' ) $keys" "$cur" ) ) } @@ -15,85 +21,227 @@ ) } - local cur prev + _lxd_remotes() + { + COMPREPLY=( $( compgen -W \ + "$( lxc remote list | tail -n +4 | awk '{print $2}' | egrep -v '^(\||^$)' )" "$cur" ) + ) + } + + _lxd_profiles() + { + COMPREPLY=( $( compgen -W "$( lxc profile list | tail -n +4 | awk '{print $2}' | egrep -v '^(\||^$)' )" "$cur" ) ) + } + + _lxd_networks() + { + COMPREPLY=( $( compgen -W \ + "$( lxc network list | tail -n +4 | awk '{print $2}' | egrep -v '^(\||^$)' )" "$cur" ) + ) + } COMPREPLY=() - cur=${COMP_WORDS[COMP_CWORD]} - prev=${COMP_WORDS[COMP_CWORD-1]} + # ignore special --foo args + if [[ ${COMP_WORDS[COMP_CWORD]} == -* ]]; then + return 0 + fi + lxc_cmds="config copy delete exec file help image info init launch \ - list move profile publish remote restart restore snapshot start stop \ - version" + list move network profile publish remote restart restore shell snapshot \ + start stop version" + + global_keys="core.https_address core.https_allowd_origin \ + core.https_allowed_methods core.https_allowed_headers \ + core.https_allowed_credentials core.proxy_https \ + core.proxy_http core.proxy_ignore_host core.trust_password \ + storage.lvm_vg_name storage.lvm_thinpool_name storage.lvm_fstype \ + storage.lvm_volume_size storage.lvm_mount_options storage.zfs_pool_name \ + storage.zfs_remove_snapshots storage.zfs_use_refquota \ + images.compression_algorithm \ + images.remote_cache_expiry images.auto_update_interval \ + images.auto_update_cached" + + container_keys="boot.autostart boot.autostart.delay boot.autostart.priority \ + boot.host_shutdown_timeout limits.cpu limits.cpu.allowance limits.cpu.priority \ + limits.disk.priority limits.memory limits.memory.enforce limits.memory.swap \ + limits.memory.swap.priority limits.network.priority limits.processes \ + linux.kernel_modules raw.apparmor raw.lxc raw.seccomp security.nesting \ + security.privileged security.syscalls.blacklist_default \ + security.syscalls.blacklist_compat security.syscalls.blacklist \ + security.syscalls.whitelist volatile.apply_template volatile.base_image \ + volatile.last_state.idmap volatile.last_state.power user.network_mode \ + user.meta-data user.user-data user.vendor-data" + + networks_keys="bridge.driver bridge.external_interfaces bridge.mtu bridge.mode \ + fan.underlay_subnet fan.overlay_subnet fan.type ipv4.address ipv4.nat ipv4.dhcp \ + ipv4.dhcp.ranges ipv4.routing ipv6.address ipv6.nat ipv6.dhcp ipv6.dhcp.stateful \ + ipv6.dhcp.ranges ipv6.routing dns.domain dns.mode raw.dnsmasq" if [ $COMP_CWORD -eq 1 ]; then - COMPREPLY=( $(compgen -W "$lxc_cmds" -- $cur) ) - elif [ $COMP_CWORD -eq 2 ]; then - case "$prev" in - "config") - COMPREPLY=( $(compgen -W "device edit get set show trust" -- $cur) ) - ;; - "copy") - _lxd_names - ;; - "delete") - _lxd_names - ;; - "exec") - _lxd_names - ;; - "file") - COMPREPLY=( $(compgen -W "pull push edit" -- $cur) ) - ;; - "help") - COMPREPLY=( $(compgen -W "$lxc_cmds" -- $cur) ) - ;; - "image") - COMPREPLY=( $(compgen -W "import copy delete edit export info list show alias" -- $cur) ) - ;; - "info") - _lxd_names - ;; - "init") - _lxd_images - ;; - "launch") - _lxd_images - ;; - "move") - _lxd_names - ;; - "profile") - COMPREPLY=( $(compgen -W \ - "list show create edit copy get set delete apply device" -- $cur) ) - ;; - "publish") - _lxd_names - ;; - "remote") - COMPREPLY=( $(compgen -W \ - "add remove list rename set-url set-default get-default" -- $cur) ) - ;; - "restart") - _lxd_names - ;; - "restore") - _lxd_names - ;; - "snapshot") - _lxd_names - ;; - "start") - # should check if containers are stopped - _lxd_names - ;; - "stop") - # should check if containers are started - _lxd_names - ;; - *) - ;; - esac + COMPREPLY=( $(compgen -W "$lxc_cmds" -- ${COMP_WORDS[COMP_CWORD]}) ) + return 0 + fi + + local no_dashargs + cur=${COMP_WORDS[COMP_CWORD]} + + no_dashargs=(${COMP_WORDS[@]// -*}) + pos=$((COMP_CWORD - (${#COMP_WORDS[@]} - ${#no_dashargs[@]}))) + if [ -z "$cur" ]; then + pos=$(($pos + 1)) fi + case ${no_dashargs[1]} in + "config") + case $pos in + 2) + COMPREPLY=( $(compgen -W "device edit get set show trust" -- $cur) ) + ;; + 3) + case ${no_dashargs[2]} in + "trust") + COMPREPLY=( $(compgen -W "list add remove" -- $cur) ) + ;; + "device") + COMPREPLY=( $(compgen -W "add get set unset list show remove" -- $cur) ) + ;; + "show"|"edit") + _lxd_names + ;; + "get"|"set"|"unset") + _lxd_names "" "$global_keys" + ;; + esac + ;; + 4) + case ${no_dashargs[2]} in + "trust") + _lxd_remotes + ;; + "device") + _lxd_names + ;; + "get"|"set"|"unset") + COMPREPLY=( $(compgen -W "$container_keys" -- $cur) ) + ;; + esac + ;; + esac + ;; + "copy") + if [ $pos -lt 4 ]; then + _lxd_names + fi + ;; + "delete") + _lxd_names + ;; + "exec") + _lxd_names "RUNNING" + ;; + "file") + COMPREPLY=( $(compgen -W "pull push edit" -- $cur) ) + ;; + "help") + COMPREPLY=( $(compgen -W "$lxc_cmds" -- $cur) ) + ;; + "image") + COMPREPLY=( $(compgen -W "import copy delete edit export info list show alias" -- $cur) ) + ;; + "info") + _lxd_names + ;; + "init") + _lxd_images + ;; + "launch") + _lxd_images + ;; + "move") + _lxd_names + ;; + "network") + case $pos in + 2) + COMPREPLY=( $(compgen -W "list show create get set unset delete edit attach attach-profile detach detach-profile" -- $cur) ) + ;; + 3) + case ${no_dashargs[2]} in + "show"|"get"|"set"|"unset"|"delete"|"edit"|"attach"|"attach-profile"|"detach"|"detach-profile") + _lxd_networks + ;; + esac + ;; + 4) + case ${no_dashargs[2]} in + "get"|"set"|"unset") + COMPREPLY=( $(compgen -W "$networks_keys" -- $cur) ) + ;; + "attach"|"detach"|"detach-profile") + _lxd_names + ;; + "attach-profile") + _lxd_profiles + ;; + esac + esac + ;; + "profile") + case $pos in + 2) + COMPREPLY=( $(compgen -W "list copy delete apply device edit get set show" -- $cur) ) + ;; + 3) + case ${no_dashargs[2]} in + "device") + COMPREPLY=( $(compgen -W "add get set unset list show remove" -- $cur) ) + ;; + *) + _lxd_profiles + ;; + esac + ;; + 4) + case ${no_dashargs[2]} in + "device") + _lxd_profiles + ;; + *) + COMPREPLY=( $(compgen -W "$container_keys" -- $cur) ) + ;; + esac + ;; + esac + ;; + "publish") + _lxd_names + ;; + "remote") + COMPREPLY=( $(compgen -W \ + "add remove list rename set-url set-default get-default" -- $cur) ) + ;; + "restart") + _lxd_names + ;; + "restore") + _lxd_names + ;; + "shell") + _lxd_names "RUNNING" + ;; + "snapshot") + _lxd_names + ;; + "start") + _lxd_names "STOPPED|FROZEN" + ;; + "stop") + _lxd_names "RUNNING" + ;; + *) + ;; + esac + return 0 } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/config.go juju-core-2.0.0/src/github.com/lxc/lxd/config.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/config.go 2016-10-13 14:31:53.000000000 +0000 @@ -52,8 +52,9 @@ Public: false} var ImagesRemote = RemoteConfig{ - Addr: "https://images.linuxcontainers.org", - Public: true} + Addr: "https://images.linuxcontainers.org", + Public: true, + Protocol: "simplestreams"} var UbuntuRemote = RemoteConfig{ Addr: "https://cloud-images.ubuntu.com/releases", @@ -80,9 +81,7 @@ var DefaultConfig = Config{ Remotes: DefaultRemotes, - DefaultRemote: "local", - Aliases: map[string]string{}, -} + DefaultRemote: "local"} // LoadConfig reads the configuration from the config path; if the path does // not exist, it returns a default configuration. diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/api-extensions.md juju-core-2.0.0/src/github.com/lxc/lxd/doc/api-extensions.md --- juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/api-extensions.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/doc/api-extensions.md 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,137 @@ +# API extensions + +The changes below were introduced to the LXD API after the 1.0 API was finalized. + +They are all backward compatible and can be detected by client tools by +looking at the api\_extensions field in GET /1.0/. + + +## storage\_zfs\_remove\_snapshots +A storage.zfs\_remove\_snapshots daemon configuration key was introduced. + +It's a boolean that defaults to false and that when set to true instructs LXD +to remove any needed snapshot when attempting to restore another. + +This is needed as ZFS will only let you restore the latest snapshot. + +## container\_host\_shutdown\_timeout +A boot.host\_shutdown\_timeout container configuration key was introduced. + +It's an integer which indicates how long LXD should wait for the container +to stop before killing it. + +Its value is only used on clean LXD daemon shutdown. It defaults to 30s. + +## container\_syscall\_filtering +A number of new syscalls related container configuration keys were introduced. + + * security.syscalls.blacklist\_default + * security.syscalls.blacklist\_compat + * security.syscalls.blacklist + * security.syscalls.whitelist + +See configuration.md for how to use them. + +## auth\_pki +This indicates support for PKI authentication mode. + +In this mode, the client and server both must use certificates issued by the same PKI. + +See lxd-ssl-authentication.md for details. + +## container\_last\_used\_at +A last\_used\_at field was added to the /1.0/containers/\ GET endpoint. + +It is a timestamp of the last time the container was started. + +If a container has been created but not started yet, last\_used\_at field +will be 1970-01-01T00:00:00Z + +## etag +Add support for the ETag header on all relevant endpoints. + +This adds the following HTTP header on answers to GET: + - ETag (SHA-256 of user modifiable content) + +And adds support for the following HTTP header on PUT requests: + - If-Match (ETag value retrieved through previous GET) + +This makes it possible to GET a LXD object, modify it and PUT it without +risking to hit a race condition where LXD or another client modified the +object in the meantime. + +## patch +Add support for the HTTP PATCH method. + +PATCH allows for partial update of an object in place of PUT. + +## https\_allowed\_credentials +To use LXD API with all Web Browsers (via SPAs) you must send credentials +(certificate) with each XHR (in order for this to happen, you should set +["withCredentials=true"](https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest/withCredentials) +flag to each XHR Request). + +Some browsers like Firefox and Safari can't accept server response without +`Access-Control-Allow-Credentials: true` header. To ensure that the server will +return a response with that header, set `core.https_allowed_credentials=true`. + +## image\_compression\_algorithm +This adds support for a compression\_algorithm property when creating an image (POST to /1.0/images). + +Setting this property overrides the server default value (images.compression\_algorithm). + +## directory\_manipulation +This allows for creating and listing directories via the LXD API, and exports +the file type via the X-LXD-type header, which can be either "file" or +"directory" right now. + +## container\_cpu\_time +This adds support for retrieving cpu time for a running container. + +## storage\_zfs\_use\_refquota +Introduces a new server property "storage.zfs\_use\_refquota" which instructs LXD +to set the "refquota" property instead of "quota" when setting a size limit +on a container. LXD will also then use "usedbydataset" in place of "used" +when being queried about disk utilization. + +This effectively controls whether disk usage by snapshots should be +considered as part of the container's disk space usage. + +## storage\_lvm\_mount\_options +Adds a new "storage.lvm\_mount\_options" daemon configuration option +which defaults to "discard" and allows the user to set addition mount +options for the filesystem used by the LVM LV. + +## network +Network management API for LXD. + +This includes: + * Addition of the "managed" property on /1.0/networks entries + * All the network configuration options (see configuration.md for details) + * POST /1.0/networks (see rest-api.md for details) + * PUT /1.0/networks/ (see rest-api.md for details) + * PATCH /1.0/networks/ (see rest-api.md for details) + * DELETE /1.0/networks/ (see rest-api.md for details) + * ipv4.address property on "nic" type devices (when nictype is "bridged") + * ipv6.address property on "nic" type devices (when nictype is "bridged") + * security.mac\_filtering property on "nic" type devices (when nictype is "bridged") + +## profile\_usedby +Adds a new used\_by field to profile entries listing the containers that are using it. + +## container\_push +When a container is created in push mode, the client serves as a proxy between +the source and target server. This is useful in cases where the target server +is behind a NAT or firewall and cannot directly communicate with the source +server and operate in pull mode. + +## container\_exec\_recording +Introduces a new boolean "record-output", parameter to +/1.0/containers//exec which when set to "true" and combined with +with "wait-for-websocket" set to false, will record stdout and stderr to +disk and make them available through the logs interface. + +The URL to the recorded output is included in the operation metadata +once the command is done running. + +That output will expire similarly to other log files, typically after 48 hours. diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/configuration.md juju-core-2.0.0/src/github.com/lxc/lxd/doc/configuration.md --- juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/configuration.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/doc/configuration.md 2016-10-13 14:31:53.000000000 +0000 @@ -17,24 +17,29 @@ - images (image configuration) - storage (storage configuration) -Key | Type | Default | Description -:-- | :--- | :------ | :---------- -core.https\_address | string | - | Address to bind for the remote API -core.https\_allowed\_origin | string | - | Access-Control-Allow-Origin http header value -core.https\_allowed\_methods | string | - | Access-Control-Allow-Methods http header value -core.https\_allowed\_headers | string | - | Access-Control-Allow-Headers http header value -core.proxy\_https | string | - | https proxy to use, if any (falls back to HTTPS_PROXY environment variable) -core.proxy\_http | string | - | http proxy to use, if any (falls back to HTTP_PROXY environment variable) -core.proxy\_ignore\_hosts | string | - | hosts which don't need the proxy for use (similar format to NO_PROXY, e.g. 1.2.3.4,1.2.3.5, falls back to NO_PROXY environment varialbe) -core.trust\_password | string | - | Password to be provided by clients to setup a trust -storage.lvm\_vg\_name | string | - | LVM Volume Group name to be used for container and image storage. A default Thin Pool is created using 100% of the free space in the Volume Group, unless `storage.lvm_thinpool_name` is set. -storage.lvm\_thinpool\_name | string | "LXDPool" | LVM Thin Pool to use within the Volume Group specified in `storage.lvm_vg_name`, if the default pool parameters are undesirable. -storage.lvm\_fstype | string | ext4 | Format LV with filesystem, for now it's value can be only ext4 (default) or xfs. -storage.zfs\_pool\_name | string | - | ZFS pool name -images.compression\_algorithm | string | gzip | Compression algorithm to use for new images (bzip2, gzip, lzma, xz or none) -images.remote\_cache\_expiry | integer | 10 | Number of days after which an unused cached remote image will be flushed -images.auto\_update\_interval | integer | 6 | Interval in hours at which to look for update to cached images (0 disables it) -images.auto\_update\_cached | boolean | true | Whether to automatically update any image that LXD caches +Key | Type | Default | API extension | Description +:-- | :--- | :------ | :------------ | :---------- +core.https\_address | string | - | - | Address to bind for the remote API +core.https\_allowed\_origin | string | - | - | Access-Control-Allow-Origin http header value +core.https\_allowed\_methods | string | - | - | Access-Control-Allow-Methods http header value +core.https\_allowed\_headers | string | - | - | Access-Control-Allow-Headers http header value +core.https\_allowed\_credentials| boolean | - | - | Whether to set Access-Control-Allow-Credentials http header value to "true" +core.proxy\_https | string | - | - | https proxy to use, if any (falls back to HTTPS\_PROXY environment variable) +core.proxy\_http | string | - | - | http proxy to use, if any (falls back to HTTP\_PROXY environment variable) +core.proxy\_ignore\_hosts | string | - | - | hosts which don't need the proxy for use (similar format to NO\_PROXY, e.g. 1.2.3.4,1.2.3.5, falls back to NO\_PROXY environment variable) +core.trust\_password | string | - | - | Password to be provided by clients to setup a trust +storage.lvm\_vg\_name | string | - | - | LVM Volume Group name to be used for container and image storage. A default Thin Pool is created using 100% of the free space in the Volume Group, unless `storage.lvm_thinpool_name` is set. +storage.lvm\_thinpool\_name | string | "LXDPool" | - | LVM Thin Pool to use within the Volume Group specified in `storage.lvm_vg_name`, if the default pool parameters are undesirable. +storage.lvm\_fstype | string | ext4 | - | Format LV with filesystem, for now it's value can be only ext4 (default) or xfs. +storage.lvm\_mount\_options | string | discard | storage\_lvm\_mount\_options | Mount options for the LV filesystem +storage.lvm\_volume\_size | string | 10GiB | - | Size of the logical volume +storage.zfs\_pool\_name | string | - | - | ZFS pool name +storage.zfs\_remove\_snapshots | boolean | false | storage\_zfs\_remove\_snapshots | Automatically remove any needed snapshot when attempting a container restore +storage.zfs\_use\_refquota | boolean | false | storage\_zfs\_use\_refquota | Don't include snapshots as part of container quota (size property) or in reported disk usage +images.compression\_algorithm | string | gzip | - | Compression algorithm to use for new images (bzip2, gzip, lzma, xz or none) +images.remote\_cache\_expiry | integer | 10 | - | Number of days after which an unused cached remote image will be flushed +images.auto\_update\_interval | integer | 6 | - | Interval in hours at which to look for update to cached images (0 disables it) +images.auto\_update\_cached | boolean | true | - | Whether to automatically update any image that LXD caches Those keys can be set using the lxc tool with: @@ -62,28 +67,34 @@ The currently supported keys are: -Key | Type | Default | Live update | Description -:-- | :--- | :------ | :---------- | :---------- -boot.autostart | boolean | false | n/a | Always start the container when LXD starts -boot.autostart.delay | integer | 0 | n/a | Number of seconds to wait after the container started before starting the next one -boot.autostart.priority | integer | 0 | n/a | What order to start the containers in (starting with highest) -environment.\* | string | - | yes (exec) | key/value environment variables to export to the container and set on exec -limits.cpu | string | - (all) | yes | Number or range of CPUs to expose to the container -limits.cpu.allowance | string | 100% | yes | How much of the CPU can be used. Can be a percentage (e.g. 50%) for a soft limit or hard a chunk of time (25ms/100ms) -limits.cpu.priority | integer | 10 (maximum) | yes | CPU scheduling priority compared to other containers sharing the same CPUs (overcommit) -limits.disk.priority | integer | 5 (medium) | yes | When under load, how much priority to give to the container's I/O requests -limits.memory | string | - (all) | yes | Percentage of the host's memory or fixed value in bytes (supports kB, MB, GB, TB, PB and EB suffixes) -limits.memory.enforce | string | hard | yes | If hard, container can't exceed its memory limit. If soft, the container can exceed its memory limit when extra host memory is available. -limits.memory.swap | boolean | true | yes | Whether to allow some of the container's memory to be swapped out to disk -limits.memory.swap.priority | integer | 10 (maximum) | yes | The higher this is set, the least likely the container is to be swapped to disk -limits.network.priority | integer | 0 (minimum) | yes | When under load, how much priority to give to the container's network requests -limits.processes | integer | - (max) | yes | Maximum number of processes that can run in the container -linux.kernel\_modules | string | - | yes | Comma separated list of kernel modules to load before starting the container -raw.apparmor | blob | - | yes | Apparmor profile entries to be appended to the generated profile -raw.lxc | blob | - | no | Raw LXC configuration to be appended to the generated one -security.nesting | boolean | false | yes | Support running lxd (nested) inside the container -security.privileged | boolean | false | no | Runs the container in privileged mode -user.\* | string | - | n/a | Free form user key/value storage (can be used in search) +Key | Type | Default | Live update | API extension | Description +:-- | :--- | :------ | :---------- | :------------ | :---------- +boot.autostart | boolean | - | n/a | - | Always start the container when LXD starts (if not set, restore last state) +boot.autostart.delay | integer | 0 | n/a | - | Number of seconds to wait after the container started before starting the next one +boot.autostart.priority | integer | 0 | n/a | - | What order to start the containers in (starting with highest) +boot.host\_shutdown\_timeout | integer | 30 | yes | container\_host\_shutdown\_timeout | Seconds to wait for container to shutdown before it is force stopped +environment.\* | string | - | yes (exec) | - | key/value environment variables to export to the container and set on exec +limits.cpu | string | - (all) | yes | - | Number or range of CPUs to expose to the container +limits.cpu.allowance | string | 100% | yes | - | How much of the CPU can be used. Can be a percentage (e.g. 50%) for a soft limit or hard a chunk of time (25ms/100ms) +limits.cpu.priority | integer | 10 (maximum) | yes | - | CPU scheduling priority compared to other containers sharing the same CPUs (overcommit) (integer between 0 and 10) +limits.disk.priority | integer | 5 (medium) | yes | - | When under load, how much priority to give to the container's I/O requests (integer between 0 and 10) +limits.memory | string | - (all) | yes | - | Percentage of the host's memory or fixed value in bytes (supports kB, MB, GB, TB, PB and EB suffixes) +limits.memory.enforce | string | hard | yes | - | If hard, container can't exceed its memory limit. If soft, the container can exceed its memory limit when extra host memory is available. +limits.memory.swap | boolean | true | yes | - | Whether to allow some of the container's memory to be swapped out to disk +limits.memory.swap.priority | integer | 10 (maximum) | yes | - | The higher this is set, the least likely the container is to be swapped to disk (integer between 0 and 10) +limits.network.priority | integer | 0 (minimum) | yes | - | When under load, how much priority to give to the container's network requests (integer between 0 and 10) +limits.processes | integer | - (max) | yes | - | Maximum number of processes that can run in the container +linux.kernel\_modules | string | - | yes | - | Comma separated list of kernel modules to load before starting the container +raw.apparmor | blob | - | yes | - | Apparmor profile entries to be appended to the generated profile +raw.lxc | blob | - | no | - | Raw LXC configuration to be appended to the generated one +raw.seccomp | blob | - | no | container\_syscall\_filtering | Raw Seccomp configuration +security.nesting | boolean | false | yes | - | Support running lxd (nested) inside the container +security.privileged | boolean | false | no | - | Runs the container in privileged mode +security.syscalls.blacklist\_default | boolean | true | no | container\_syscall\_filtering | Enables the default syscall blacklist +security.syscalls.blacklist\_compat | boolean | false | no | container\_syscall\_filtering | On x86\_64 this enables blocking of compat\_\* syscalls, it is a no-op on other arches +security.syscalls.blacklist | string | - | no | container\_syscall\_filtering | A '\n' separated list of syscalls to blacklist +security.syscalls.whitelist | string | - | no | container\_syscall\_filtering | A '\n' separated list of syscalls to whitelist (mutually exclusive with security.syscalls.blacklist\*) +user.\* | string | - | n/a | - |Free form user key/value storage (can be used in search) The following volatile keys are currently internally used by LXD: @@ -91,6 +102,7 @@ :-- | :--- | :------ | :---------- volatile.\.hwaddr | string | - | Network device MAC address (when no hwaddr property is set on the device itself) volatile.\.name | string | - | Network device name (when no name propery is set on the device itself) +volatile.apply\_template | string | - | The name of a template hook which should be triggered upon next startup volatile.base\_image | string | - | The hash of the image the container was created from, if any. volatile.last\_state.idmap | string | - | Serialized container uid/gid map volatile.last\_state.power | string | - | Container state as of last host shutdown @@ -134,6 +146,8 @@ - /dev/tty (character device) - /dev/random (character device) - /dev/urandom (character device) + - /dev/net/tun (character device) + - /dev/fuse (character device) - lo (network interface) Anything else has to be defined in the container configuration or in one @@ -181,17 +195,20 @@ Different network interface types have different additional properties, the current list is: -Key | Type | Default | Required | Used by | Description -:-- | :-- | :-- | :-- | :-- | :-- -nictype | string | - | yes | all | The device type, one of "physical", "bridged", "macvlan" or "p2p" -limits.ingress | string | - | no | bridged, p2p | I/O limit in bit/s (supports kbit, Mbit, Gbit suffixes) -limits.egress | string | - | no | bridged, p2p | I/O limit in bit/s (supports kbit, Mbit, Gbit suffixes) -limits.max | string | - | no | bridged, p2p | Same as modifying both limits.read and limits.write -name | string | kernel assigned | no | all | The name of the interface inside the container -host\_name | string | randomly assigned | no | bridged, p2p, macvlan | The name of the interface inside the host -hwaddr | string | randomly assigned | no | all | The MAC address of the new interface -mtu | integer | parent MTU | no | all | The MTU of the new interface -parent | string | - | yes | physical, bridged, macvlan | The name of the host device or bridge +Key | Type | Default | Required | Used by | API extension | Description +:-- | :-- | :-- | :-- | :-- | :-- | :-- +nictype | string | - | yes | all | - | The device type, one of "physical", "bridged", "macvlan" or "p2p" +limits.ingress | string | - | no | bridged, p2p | - | I/O limit in bit/s (supports kbit, Mbit, Gbit suffixes) +limits.egress | string | - | no | bridged, p2p | - | I/O limit in bit/s (supports kbit, Mbit, Gbit suffixes) +limits.max | string | - | no | bridged, p2p | - | Same as modifying both limits.read and limits.write +name | string | kernel assigned | no | all | - | The name of the interface inside the container +host\_name | string | randomly assigned | no | bridged, p2p, macvlan | - | The name of the interface inside the host +hwaddr | string | randomly assigned | no | all | - | The MAC address of the new interface +mtu | integer | parent MTU | no | all | - | The MTU of the new interface +parent | string | - | yes | physical, bridged, macvlan | - | The name of the host device or bridge +ipv4.address | string | - | no | bridged | network | An IPv4 address to assign to the container through DHCP +ipv6.address | string | - | no | bridged | network | An IPv6 address to assign to the container through DHCP +security.mac\_filtering | boolean | false | no | bridged | network | Prevent the container from spoofing another's MAC address ### Type: disk Disk entries are essentially mountpoints inside the container. They can @@ -231,7 +248,7 @@ mode | int | 0660 | no | Mode of the device in the container ### Type: unix-block -Unix block device entries simply make the requested character device +Unix block device entries simply make the requested block device appear in the container's /dev and allow read/write operations to it. The following properties exist: @@ -245,6 +262,21 @@ gid | int | 0 | no | GID of the device owner in the container mode | int | 0660 | no | Mode of the device in the container +### Type: usb +USB device entries simply make the requested USB device appear in the +container. + +The following properties exist: + +Key | Type | Default | Required | Description +:-- | :-- | :-- | :-- | :-- +vendorid | string | - | yes | The vendor id of the USB device. +productid | string | - | no | The product id of the USB device. +uid | int | 0 | no | UID of the device owner in the container +gid | int | 0 | no | GID of the device owner in the container +mode | int | 0660 | no | Mode of the device in the container +required | boolean | false | no | Whether or not this device is required to start the container. (The default is no, and all devices are hot-pluggable.) + ## Profiles Profiles can store any configuration that a container can (key/value or devices) and any number of profiles can be applied to a container. @@ -256,47 +288,62 @@ coming from the profiles. -If not present, LXD will create a "default" profile which comes with a -network interface connected to LXD's default bridge (lxcbr0). +If not present, LXD will create a "default" profile. The "default" profile is set for any new container created which doesn't specify a different profiles list. -## JSON representation -A representation of a container using all the different types of -configurations would look like: - - { - 'name': "my-container", - 'profiles': ["default"], - 'architecture': 'x86_64', - 'config': { - 'limits.cpu': '3', - 'security.privileged': 'true' - }, - 'devices': { - 'nic-lxcbr0': { - 'type': 'none' - }, - 'nic-mybr0': { - 'type': 'nic', - 'mtu': '9000', - 'parent': 'mybr0' - }, - 'rootfs': { - 'type': 'disk', - 'path': '/', - 'source': 'UUID=8f7fdf5e-dc60-4524-b9fe-634f82ac2fb6' - }, - }, - 'status': { - 'status': "Running", - 'status_code': 103, - 'ips': [{'interface': "eth0", - 'protocol': "INET6", - 'address': "2001:470:b368:1020:1::2"}, - {'interface': "eth0", - 'protocol': "INET", - 'address': "172.16.15.30"}]} - } +# Network configuration +LXD supports creating and managing bridges, below is a list of the +configuration options supported for those bridges. + +Note that this feature was introduced as part of API extension "network". + +The key/value configuration is namespaced with the following namespaces +currently supported: + - bridge (L2 interface configuration) + - fan (configuration specific to the Ubuntu FAN overlay) + - tunnel (cross-host tunneling configuration) + - ipv4 (L3 IPv4 configuration) + - ipv6 (L3 IPv6 configuration) + - dns (DNS server and resolution configuration) + - raw (raw configuration file content) + - user (free form key/value for user metadata) + +It is expected that IP addresses and subnets are given using CIDR notation (1.1.1.1/24 or fd80:1234::1/64). +The exception being tunnel local and remote addresses which are just plain addresses (1.1.1.1 or fd80:1234::1). + +Key | Type | Condition | Default | Description +:-- | :-- | :-- | :-- | :-- +bridge.driver | string | - | native | Bridge driver ("native" or "openvswitch") +bridge.external\_interfaces | string | - | - | Comma separate list of unconfigured network interfaces to include in the bridge +bridge.mtu | integer | - | 1500 | Bridge MTU (default varies if tunnel or fan setup) +bridge.mode | string | - | standard | Bridge operation mode ("standard" or "fan") +fan.underlay\_subnet | string | fan mode | default gateway subnet | Subnet to use as the underlay for the FAN (CIDR notation) +fan.overlay\_subnet | string | fan mode | 240.0.0.0/8 | Subnet to use as the overlay for the FAN (CIDR notation) +fan.type | string | fan mode | vxlan | The tunneling type for the FAN ("vxlan" or "ipip") +tunnel.NAME.protocol | string | standard mode | - | Tunneling protocol ("vxlan" or "gre") +tunnel.NAME.local | string | gre or vxlan | - | Local address for the tunnel (not necessary for multicast vxlan) +tunnel.NAME.remote | string | gre or vxlan | - | Remote address for the tunnel (not necessary for multicast vxlan) +tunnel.NAME.group | string | vxlan | 239.0.0.1 | Multicast address for vxlan (used if local and remote aren't set) +tunnel.NAME.port | integer | vxlan | 0 | Specific port to use for the vxlan tunnel +tunnel.NAME.id | integer | vxlan | 0 | Specific tunnel ID to use for the vxlan tunnel +ipv4.address | string | standard mode | random unused subnet | IPv4 address for the bridge (CIDR notation). Use "none" to turn off IPv4 or "auto" to generate a new one +ipv4.nat | boolean | ipv4 address | false | Whether to NAT (will default to true if unset and a random ipv4.address is generated) +ipv4.dhcp | boolean | ipv4 address | true | Whether to allocate addresses using DHCP +ipv4.dhcp.ranges | string | ipv4 dhcp | all addresses | Comma separated list of IP ranges to use for DHCP (FIRST-LAST format) +ipv4.routing | boolean | ipv4 address | true | Whether to route traffic in and out of the bridge +ipv6.address | string | standard mode | random unused subnet | IPv6 address for the bridge (CIDR notation). Use "none" to turn off IPv6 or "auto" to generate a new one +ipv6.nat | boolean | ipv6 address | false | Whether to NAT (will default to true if unset and a random ipv6.address is generated) +ipv6.dhcp | boolean | ipv6 address | true | Whether to provide additional network configuration over DHCP +ipv6.dhcp.stateful | boolean | ipv6 dhcp | false | Whether to allocate addresses using DHCP +ipv6.dhcp.ranges | string | ipv6 stateful dhcp | all addresses | Comma separated list of IPv6 ranges to use for DHCP (FIRST-LAST format) +ipv6.routing | boolean | ipv6 address | true | Whether to route traffic in and out of the bridge +dns.domain | string | - | lxd | Domain to advertise to DHCP clients and use for DNS resolution +dns.mode | string | - | managed | DNS registration mode ("none" for no DNS record, "managed" for LXD generated static records or "dynamic" for client generated records) +raw.dnsmasq | string | - | - | Additional dnsmasq configuration to append to the configuration + + +Those keys can be set using the lxc tool with: + lxc network set diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/environment.md juju-core-2.0.0/src/github.com/lxc/lxd/doc/environment.md --- juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/environment.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/doc/environment.md 2016-10-13 14:31:53.000000000 +0000 @@ -3,6 +3,8 @@ the user's environment and to turn some advanced features on and off. # Common +Name | Description +:--- | :---- LXD\_DIR | The LXD data directory PATH | List of paths to look into when resolving binaries http\_proxy | Proxy server URL for HTTP @@ -20,4 +22,3 @@ :--- | :---- LXD\_SECURITY\_APPARMOR | If set to "false", forces AppArmor off LXD\_LXC\_TEMPLATE\_CONFIG | Path to the LXC template configuration directory -LXD\_LVM\_LVSIZE | Size of the default LVM LV (10Gib by default) diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/image-handling.md juju-core-2.0.0/src/github.com/lxc/lxd/doc/image-handling.md --- juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/image-handling.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/doc/image-handling.md 2016-10-13 14:31:53.000000000 +0000 @@ -24,7 +24,7 @@ remote server and was requested through an alias will be automatically updated by LXD. This can be changed with images.auto\_update\_cached. -On startup and then every 3 hours (unless images.auto\_update\_interval +On startup and then every 6 hours (unless images.auto\_update\_interval is set), the LXD daemon will go look for more recent version of all the images in the store which are marked as auto-update and have a recorded source server. @@ -128,3 +128,6 @@ As a general rule, you should never template a file which is owned by a package or is otherwise expected to be overwritten by normal operation of the container. + +For convenience the following functions are exported to pongo templates: + - config\_get("user.foo", "bar") => Returns the value of "user.foo" or "bar" if unset. diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/lxd-ssl-authentication.md juju-core-2.0.0/src/github.com/lxc/lxd/doc/lxd-ssl-authentication.md --- juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/lxd-ssl-authentication.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/doc/lxd-ssl-authentication.md 2016-10-13 14:31:53.000000000 +0000 @@ -56,8 +56,8 @@ In that mode, any connection to a LXD daemon will be done using the preseeded CA certificate. If the server certificate isn't signed by the -CA, or if it has been revoked, the connection will simply go through the -normal authentication mechanism. +CA, the connection will simply go through the normal authentication +mechanism. If the server certificate is valid and signed by the CA, then the connection continues without prompting the user for the certificate. @@ -67,6 +67,15 @@ and the client can now connect to the server without having to provide any additional credentials. +Enabling PKI mode is done by adding a client.ca file in the +client's configuration directory (~/.config/lxc) and a server.ca file in +the server's configuration directory (/var/lib/lxd). Then a client +certificate must be issued by the CA for the client and a server +certificate for the server. Those must then replace the existing +pre-generated files. + +After this is done, restarting the server will have it run in PKI mode. + # Password prompt To establish a new trust relationship, a password must be set on the server and send by the client when adding itself. diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/production-setup.md juju-core-2.0.0/src/github.com/lxc/lxd/doc/production-setup.md --- juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/production-setup.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/doc/production-setup.md 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,79 @@ +# Introduction +So you've made it past trying out [LXD live online](https://linuxcontainers.org/lxd/try-it/), +or on a server scavenged from random parts. You like what you see, +and now you want to try doing some serious work with LXD. + +With the vanilla installation of Ubuntu Server 16.04, some modifications +to the server configuration will be needed, to avoid common pitfalls when +using containers that require tens of thousands of file operations. + + +## Common errors that may be encountered + +`Failed to allocate directory watch: Too many open files` + +` : Too many open files` + +`failed to open stream: Too many open files in...` + + +# Server Changes +## /etc/security/limits.conf + +Domain | Type | Item | Value | Default | Description +:----- | :--- | :---- | :-------- | :-------- | :---------- +* | soft | nofile | 1048576 | unset | maximum number of open files +* | hard | nofile | 1048576 | unset | maximum number of open files +root | soft | nofile | 1048576 | unset | maximum number of open files +root | hard | nofile | 1048576 | unset | maximum number of open files +* | soft | memlock | unlimited | unset | maximum locked-in-memory address space (KB) +* | hard | memlock | unlimited | unset | maximum locked-in-memory address space (KB) + + +## /etc/sysctl.conf + +Parameter | Value | Default | Description +:----- | :--- | :--- | :--- +fs.inotify.max\_queued\_events | 1048576 | 16384 | This specifies an upper limit on the number of events that can be queued to the corresponding inotify instance. [1] +fs.inotify.max\_user\_instances | 1048576 | 128 | This specifies an upper limit on the number of inotify instances that can be created per real user ID. [1] +fs.inotify.max\_user\_watches | 1048576 | 8192 | This specifies an upper limit on the number of watches that can be created per real user ID. [1] +vm.max\_map\_count | 262144 | 65530 | This file contains the maximum number of memory map areas a process may have. Memory map areas are used as a side-effect of calling malloc, directly by mmap and mprotect, and also when loading shared libraries. + + +Then, reboot the server. + + +[1]: http://man7.org/linux/man-pages/man7/inotify.7.html + +## Network Bandwidth Tweaking +If you have at least 1GbE NIC on your lxd host with a lot of local activity (container - container connections, or host - container connections), or you have 1GbE or better internet connection on your lxd host it worth play with txqueuelen. These settings work even better with 10GbE NIC. + +### Server Changes + +#### txqueuelen + +You need to change `txqueuelen` of your real NIC to 10000 (not sure about the best possible value for you), and change and change lxdbr0 interface `txqueuelen` to 10000. +In Debian-based distros you can change `txqueuelen` permanently in `/etc/network/interfaces` +You can add for ex.: `up ip link set eth0 txqueuelen 10000` to your interface configuration to set txqueuelen value on boot. +You could set it txqueuelen temporary (for test purpose) with `ifconfig interfacename# txqueuelen 10000` + +#### /etc/sysctl.conf + +You also need to increase `net.core.netdev_max_backlog` value. +You can add `net.core.netdev_max_backlog = 182757` to `/etc/sysctl.conf` to set it permanently (after reboot) +You set `netdev_max_backlog` temporary (for test purpose) with `echo 182757 > /proc/sys/net/core/netdev_max_backlog` +Note: You can find this value too high, most people prefer set `netdev_max_backlog` = `net.ipv4.tcp_mem` min. value. +For example I use this values `net.ipv4.tcp_mem = 182757 243679 365514` + +### Containers changes + +You also need to change txqueuelen value for all you ethernet interfaces in containers. +In Debian-based distros you can change txqueuelen permanently in `/etc/network/interfaces` +You can add for ex.: `up ip link set eth0 txqueuelen 10000` to your interface configuration to set txqueuelen value on boot. + +### Notes regarding this change + +10000 txqueuelen value commonly used with 10GbE NICs. Basically small txqueuelen values used with slow devices with a high latency, and higher with devices with low latency. I personally have like 3-5% improvement with these settings for local (host with container, container vs container) and internet connections. Good thing about txqueuelen value tweak, the more containers you use, the more you can be can benefit from this tweak. And you can always temporary set this values and check this tweak in your environment without lxd host reboot. + + + diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/rest-api.md juju-core-2.0.0/src/github.com/lxc/lxd/doc/rest-api.md --- juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/rest-api.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/doc/rest-api.md 2016-10-13 14:31:53.000000000 +0000 @@ -135,7 +135,6 @@ 400 | Failure 401 | Cancelled - # Recursion To optimize queries of large lists, recursion is implemented for collections. A "recursion" argument can be passed to a GET query against a collection. @@ -216,7 +215,7 @@ { "api_extensions": [], # List of API extensions added after the API was marked stable - "api_status": "development", # API implementation status (one of, development, stable or deprecated) + "api_status": "stable", # API implementation status (one of, development, stable or deprecated) "api_version": "1.0", # The API version as a string "auth": "trusted", # Authentication state, one of "guest", "untrusted" or "trusted" "config": { # Host configuration @@ -251,14 +250,14 @@ { "api_extensions": [], # List of API extensions added after the API was marked stable - "api_status": "development", # API implementation status (one of, development, stable or deprecated) + "api_status": "stable", # API implementation status (one of, development, stable or deprecated) "api_version": "1.0", # The API version as a string "auth": "guest", # Authentication state, one of "guest", "untrusted" or "trusted" "public": false, # Whether the server should be treated as a public (read-only) remote by the client } -### PUT - * Description: Updates the server configuration or other properties +### PUT (ETag supported) + * Description: Replaces the server configuration or other properties * Authentication: trusted * Operation: sync * Return: standard return value or standard error @@ -272,6 +271,21 @@ } } +### PATCH (ETag supported) + * Description: Updates the server configuration or other properties + * Introduced: with API extension "patch" + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input (updates only the listed keys, rest remains intact): + + { + "config": { + "core.trust_password": "my-new-password" + } + } + ## /1.0/certificates ### GET * Description: list of trusted certificates @@ -296,7 +310,7 @@ { "type": "client", # Certificate type (keyring), currently only client "certificate": "PEM certificate", # If provided, a valid x509 certificate. If not, the client certificate of the connection will be used - "name": "foo" # An optional name for the certificate. If nothing is provided, the host in the TLS header for the request is used. + "name": "foo", # An optional name for the certificate. If nothing is provided, the host in the TLS header for the request is used. "password": "server-trust-password" # The trust password for that server (only required if untrusted) } @@ -311,7 +325,8 @@ { "type": "client", - "certificate": "PEM certificate" + "certificate": "PEM certificate", + "name": "foo", "fingerprint": "SHA256 Hash of the raw certificate" } @@ -356,6 +371,12 @@ "profiles": ["default"], # List of profiles "ephemeral": true, # Whether to destroy the container on shutdown "config": {"limits.cpu": "2"}, # Config override. + "devices": { # optional list of devices the container should have + "rootfs": { + "path": "/dev/kvm", + "type": "unix-char" + }, + }, "source": {"type": "image", # Can be: "image", "migration", "copy" or "none" "alias": "ubuntu/devel"}, # Name of the alias } @@ -368,6 +389,12 @@ "profiles": ["default"], # List of profiles "ephemeral": true, # Whether to destroy the container on shutdown "config": {"limits.cpu": "2"}, # Config override. + "devices": { # optional list of devices the container should have + "rootfs": { + "path": "/dev/kvm", + "type": "unix-char" + }, + }, "source": {"type": "image", # Can be: "image", "migration", "copy" or "none" "fingerprint": "SHA-256"}, # Fingerprint } @@ -380,6 +407,12 @@ "profiles": ["default"], # List of profiles "ephemeral": true, # Whether to destroy the container on shutdown "config": {"limits.cpu": "2"}, # Config override. + "devices": { # optional list of devices the container should have + "rootfs": { + "path": "/dev/kvm", + "type": "unix-char" + }, + }, "source": {"type": "image", # Can be: "image", "migration", "copy" or "none" "properties": { # Properties "os": "ubuntu", @@ -396,6 +429,12 @@ "profiles": ["default"], # List of profiles "ephemeral": true, # Whether to destroy the container on shutdown "config": {"limits.cpu": "2"}, # Config override. + "devices": { # optional list of devices the container should have + "rootfs": { + "path": "/dev/kvm", + "type": "unix-char" + }, + }, "source": {"type": "none"}, # Can be: "image", "migration", "copy" or "none" } @@ -407,6 +446,12 @@ "profiles": ["default"], # List of profiles "ephemeral": true, # Whether to destroy the container on shutdown "config": {"limits.cpu": "2"}, # Config override. + "devices": { # optional list of devices the container should have + "rootfs": { + "path": "/dev/kvm", + "type": "unix-char" + }, + }, "source": {"type": "image", # Can be: "image", "migration", "copy" or "none" "mode": "pull", # One of "local" (default) or "pull" "server": "https://10.0.2.3:8443", # Remote server (pull mode only) @@ -415,7 +460,6 @@ "alias": "ubuntu/devel"}, # Name of the alias } - Input (using a private remote image after having obtained a secret for that image): { @@ -424,6 +468,12 @@ "profiles": ["default"], # List of profiles "ephemeral": true, # Whether to destroy the container on shutdown "config": {"limits.cpu": "2"}, # Config override. + "devices": { # optional list of devices the container should have + "rootfs": { + "path": "/dev/kvm", + "type": "unix-char" + }, + }, "source": {"type": "image", # Can be: "image", "migration", "copy" or "none" "mode": "pull", # One of "local" (default) or "pull" "server": "https://10.0.2.3:8443", # Remote server (pull mode only) @@ -440,8 +490,14 @@ "profiles": ["default"], # List of profiles "ephemeral": true, # Whether to destroy the container on shutdown "config": {"limits.cpu": "2"}, # Config override. + "devices": { # optional list of devices the container should have + "rootfs": { + "path": "/dev/kvm", + "type": "unix-char" + }, + }, "source": {"type": "migration", # Can be: "image", "migration", "copy" or "none" - "mode": "pull", # Only "pull" is supported for now + "mode": "pull", # "pull" and "push" is supported for now "operation": "https://10.0.2.3:8443/1.0/operations/", # Full URL to the remote operation (pull mode only) "certificate": "PEM certificate", # Optional PEM certificate. If not mentioned, system CA is used. "base-image": "", # Optional, the base image the container was created from @@ -462,6 +518,25 @@ "source": "my-old-container"} # Name of the source container } +Input (using a remote container, in push mode sent over the migration websocket via client proxying): + + { + "name": "my-new-container", # 64 chars max, ASCII, no slash, no colon and no comma + "architecture": "x86_64", + "profiles": ["default"], # List of profiles + "ephemeral": true, # Whether to destroy the container on shutdown + "config": {"limits.cpu": "2"}, # Config override. + "devices": { # optional list of devices the container should have + "rootfs": { + "path": "/dev/kvm", + "type": "unix-char" + }, + }, + "source": {"type": "migration", # Can be: "image", "migration", "copy" or "none" + "mode": "push", # "pull" and "push" are supported + "base-image": "", # Optional, the base image the container was created from + "live": true # Whether migration is performed live + } ## /1.0/containers/\ ### GET @@ -496,7 +571,7 @@ "eth0": { "name": "eth0", "nictype": "bridged", - "parent": "lxcbr0", + "parent": "lxdbr0", "type": "nic" }, "root": { @@ -504,6 +579,7 @@ "type": "disk" } }, + "last_used_at": "2016-02-16T01:05:05Z", "name": "my-container", "profiles": [ "default" @@ -513,9 +589,8 @@ "status_code": 103 } - -### PUT - * Description: update container configuration or restore snapshot +### PUT (ETag supported) + * Description: replaces container configuration or restore snapshot * Authentication: trusted * Operation: async * Return: background operation or standard error @@ -541,7 +616,6 @@ ] } - Takes the same structure as that returned by GET but doesn't allow name changes (see POST below) or changes to the status sub-dict (since that's read-only). @@ -552,6 +626,27 @@ "restore": "snapshot-name" } +### PATCH (ETag supported) + * Description: update container configuration + * Introduced: with API extension "patch" + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "config": { + "limits.cpu": "4" + }, + "devices": { + "rootfs": { + "size": "5GB" + } + }, + "ephemeral": true + } + ### POST * Description: used to rename/migrate the container * Authentication: trusted @@ -604,6 +699,8 @@ * Operation: sync * Return: dict representing current state +Output: + { "type": "sync", "status": "Success", @@ -611,6 +708,9 @@ "metadata": { "status": "Running", "status_code": 103, + "cpu": { + "usage": 4986019722 + }, "disk": { "root": { "usage": 422330368 @@ -677,7 +777,7 @@ "state": "up", "type": "loopback" }, - "lxcbr0": { + "lxdbr0": { "addresses": [ { "family": "inet", @@ -743,7 +843,6 @@ } } - ### PUT * Description: change the container state * Authentication: trusted @@ -761,15 +860,18 @@ ## /1.0/containers/\/files ### GET (?path=/path/inside/the/container) - * Description: download a file from the container + * Description: download a file or directory listing from the container * Authentication: trusted * Operation: sync - * Return: Raw file or standard error + * Return: if the type of the file is a directory, the return is a sync + response with a list of the directory contents as metadata, otherwise it is + the raw contents of the file. The following headers will be set (on top of standard size and mimetype headers): * X-LXD-uid: 0 * X-LXD-gid: 0 * X-LXD-mode: 0700 + * X-LXD-type: one of "directory" or "file" This is designed to be easily usable from the command line or even a web browser. @@ -839,7 +941,7 @@ "eth0": { "name": "eth0", "nictype": "bridged", - "parent": "lxcbr0", + "parent": "lxdbr0", "type": "nic" }, "root": { @@ -858,7 +960,7 @@ "eth0": { "name": "eth0", "nictype": "bridged", - "parent": "lxcbr0", + "parent": "lxdbr0", "type": "nic" }, "root": { @@ -926,6 +1028,7 @@ "command": ["/bin/bash"], # Command and arguments "environment": {}, # Optional extra environment variables to set "wait-for-websocket": false, # Whether to wait for a connection before starting the process + "record-output": false, # Whether to store stdout and stderr (only valid with wait-for-websocket set to false) "interactive": true, # Whether to allocate a pts device instead of PIPEs "width": 80, # Initial width of the terminal (optional) "height": 25, # Initial height of the terminal (optional) @@ -933,7 +1036,11 @@ `wait-for-websocket` indicates whether the operation should block and wait for a websocket connection to start (so that users can pass stdin and read -stdout), or simply run to completion with /dev/null as stdin and stdout. +stdout), or start immediately. + +If starting immediately, /dev/null will be used for stdin, stdout and +stderr. That's unless record-output is set to true, in which case, +stdout and stderr will be redirected to a log file. If interactive is set to true, a single websocket is returned and is mapped to a pts device for stdin, stdout and stderr of the execed process. @@ -965,7 +1072,6 @@ } } - When the exec command finishes, its exit status is available from the operation's metadata: @@ -1043,7 +1149,6 @@ } } - ## /1.0/images ### GET * Description: list of images (public or private) @@ -1102,13 +1207,14 @@ In the source container case, the following dict must be used: { - "filename": filename, # Used for export (optional) - "public": true, # Whether the image can be downloaded by untrusted users (defaults to false) - "properties": { # Image properties (optional) + "compression_algorithm": "xz", # Override the compression algorithm for the image (optional) + "filename": filename, # Used for export (optional) + "public": true, # Whether the image can be downloaded by untrusted users (defaults to false) + "properties": { # Image properties (optional) "os": "Ubuntu" }, "source": { - "type": "container", # One of "container" or "snapshot" + "type": "container", # One of "container" or "snapshot" "name": "abc" } } @@ -1127,7 +1233,6 @@ } } - After the input is received by LXD, a background operation is started which will add the image to the store and possibly do some backend filesystem-specific optimizations. @@ -1186,8 +1291,8 @@ HTTP code for this should be 202 (Accepted). -### PUT - * Description: Updates the image properties +### PUT (ETag supported) + * Description: Replaces the image properties, update information and visibility * Authentication: trusted * Operation: sync * Return: standard return value or standard error @@ -1205,6 +1310,23 @@ "public": true, } +### PATCH (ETag supported) + * Description: Updates the image properties, update information and visibility + * Introduced: with API extension "patch" + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "properties": { + "os": "ubuntu", + "release": "trusty" + }, + "public": true, + } + ## /1.0/images/\/export ### GET (optional ?secret=SECRET) * Description: Download the image tarball @@ -1220,7 +1342,6 @@ token which it'll then pass to the target LXD. That target LXD will then GET the image as a guest, passing the secret token. - ## /1.0/images/\/secret ### POST * Description: Generate a random token and tell LXD to expect it be used by a guest @@ -1283,14 +1404,15 @@ * Return: dict representing an alias description and target Output: + { "name": "test", "description": "my description", "target": "c9b6e738fae75286d52f497415463a8ecc61bbcb046536f220d797b0e500a41f" } -### PUT - * Description: Updates the alias target or description +### PUT (ETag supported) + * Description: Replaces the alias target or description * Authentication: trusted * Operation: sync * Return: standard return value or standard error @@ -1302,6 +1424,19 @@ "target": "54c8caac1f61901ed86c68f24af5f5d3672bdc62c71d04f06df3a59e95684473" } +### PATCH (ETag supported) + * Description: Updates the alias target or description + * Introduced: with API extension "patch" + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "description": "New description" + } + ### POST * Description: rename an alias * Authentication: trusted @@ -1335,10 +1470,28 @@ * Return: list of URLs for networks that are current defined on the host [ - "/1.0/networks/eth0",, - "/1.0/networks/lxcbr0" + "/1.0/networks/eth0", + "/1.0/networks/lxdbr0" ] +### POST + * Description: define a new network + * Introduced: with API extension "network" + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "name": "my-network", + "config": { + "ipv4.address": "none", + "ipv6.address": "2001:470:b368:4242::1/64", + "ipv6.nat": "true" + } + } + ## /1.0/networks/\ ### GET * Description: information about a network @@ -1347,13 +1500,84 @@ * Return: dict representing a network { - "name": "lxcbr0", + "config": {}, + "name": "lxdbr0", + "managed": false, "type": "bridge", "used_by": [ "/1.0/containers/blah" ] } +### POST + * Description: rename a network + * Introduced: with API extension "network" + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input (rename a network): + + { + "name": "new-name" + } + + +HTTP return value must be 204 (No content) and Location must point to +the renamed resource. + +Renaming to an existing name must return the 409 (Conflict) HTTP code. + +### PUT (ETag supported) + * Description: replace the network information + * Introduced: with API extension "network" + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "config": { + "bridge.driver": "openvswitch", + "ipv4.address": "10.0.3.1/24", + "ipv6.address": "fd1:6997:4939:495d::1/64" + } + } + +Same dict as used for initial creation and coming from GET. Only the +config is used, everything else is ignored. + +### PATCH (ETag supported) + * Description: update the network information + * Introduced: with API extension "network" + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "config": { + "dns.mode": "dynamic" + } + } + + +### DELETE + * Description: remove a network + * Introduced: with API extension "network" + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input (none at present): + + { + } + +HTTP code for this should be 202 (Accepted). + ## /1.0/operations ### GET * Description: list of operations @@ -1445,7 +1669,6 @@ "/1.0/profiles/default" ] - ### POST * Description: define a new profile * Authentication: trusted @@ -1488,11 +1711,14 @@ "path": "/dev/kvm", "type": "unix-char" } - } + }, + "used_by": [ + "/1.0/containers/blah" + ] } -### PUT - * Description: update the profile +### PUT (ETag supported) + * Description: replace the profile information * Authentication: trusted * Operation: sync * Return: standard return value or standard error @@ -1515,6 +1741,27 @@ Same dict as used for initial creation and coming from GET. The name property can't be changed (see POST for that). +### PATCH (ETag supported) + * Description: update the profile information + * Introduced: with API extension "patch" + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "config": { + "limits.memory": "4GB" + }, + "description": "Some description string", + "devices": { + "kvm": { + "path": "/dev/kvm", + "type": "unix-char" + } + } + } ### POST * Description: rename a profile @@ -1528,13 +1775,11 @@ "name": "new-name" } - HTTP return value must be 204 (No content) and Location must point to the renamed resource. Renaming to an existing name must return the 409 (Conflict) HTTP code. - ### DELETE * Description: remove a profile * Authentication: trusted diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/storage-backends.md juju-core-2.0.0/src/github.com/lxc/lxd/doc/storage-backends.md --- juju-core-2.0~beta15/src/github.com/lxc/lxd/doc/storage-backends.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/doc/storage-backends.md 2016-10-13 14:31:53.000000000 +0000 @@ -38,6 +38,7 @@ - The btrfs backend is automatically used if /var/lib/lxd is on a btrfs filesystem. - Uses a subvolume per container, image and snapshot, creating btrfs snapshots when creating a new object. + - When using for nesting, the host btrfs filesystem must be mounted with the "user\_subvol\_rm\_allowed" mount option. ### LVM @@ -63,3 +64,11 @@ one. You can however create new containers from older snapshots which makes it possible to confirm the snapshots is indeed what you want to restore before you remove the newer snapshots. + + Also note that container copies use ZFS snapshots, so you also cannot + restore a container to a snapshot taken before the last copy without + having to also delete container copies. + + Copying the wanted snapshot into a new container and then deleting + the old container does however work, at the cost of losing any other + snapshot the container may have had. diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/.gitignore juju-core-2.0.0/src/github.com/lxc/lxd/.gitignore --- juju-core-2.0~beta15/src/github.com/lxc/lxd/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/.gitignore 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,12 @@ +*.swp +po/*.mo +po/*.po~ +lxd-*.tar.gz +.vagrant +test/deps/devlxd-client +*~ +tags + +# For Atom ctags +.tags +.tags1 diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/action.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/action.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/action.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/action.go 2016-10-13 14:31:53.000000000 +0000 @@ -10,14 +10,15 @@ ) type actionCmd struct { - action shared.ContainerAction - hasTimeout bool - visible bool - name string - timeout int - force bool - stateful bool - stateless bool + action shared.ContainerAction + hasTimeout bool + visible bool + name string + timeout int + force bool + stateful bool + stateless bool + additionalHelp string } func (c *actionCmd) showByDefault() bool { @@ -25,19 +26,24 @@ } func (c *actionCmd) usage() string { + if c.additionalHelp != "" { + c.additionalHelp = fmt.Sprintf("\n\n%s", c.additionalHelp) + } + return fmt.Sprintf(i18n.G( `Changes state of one or more containers to %s. -lxc %s [...]`), c.name, c.name) +lxc %s [...]%s`), c.name, c.name, c.additionalHelp) } func (c *actionCmd) flags() { if c.hasTimeout { gnuflag.IntVar(&c.timeout, "timeout", -1, i18n.G("Time to wait for the container before killing it.")) + gnuflag.BoolVar(&c.force, "f", false, i18n.G("Force the container to shutdown.")) gnuflag.BoolVar(&c.force, "force", false, i18n.G("Force the container to shutdown.")) - gnuflag.BoolVar(&c.stateful, "stateful", false, i18n.G("Store the container state (only for stop).")) - gnuflag.BoolVar(&c.stateless, "stateless", false, i18n.G("Ignore the container state (only forstart).")) } + gnuflag.BoolVar(&c.stateful, "stateful", false, i18n.G("Store the container state (only for stop).")) + gnuflag.BoolVar(&c.stateless, "stateless", false, i18n.G("Ignore the container state (only for start).")) } func (c *actionCmd) run(config *lxd.Config, args []string) error { @@ -63,7 +69,7 @@ return fmt.Errorf(i18n.G("Must supply container name for: ")+"\"%s\"", nameArg) } - if c.action == shared.Start || c.action == shared.Stop { + if c.action == shared.Start { current, err := d.ContainerInfo(name) if err != nil { return err @@ -90,7 +96,7 @@ } if err := d.WaitForSuccess(resp.Operation); err != nil { - return fmt.Errorf("%s\n"+i18n.G("Try `lxc info --show-log %s` for more info"), err, name) + return fmt.Errorf("%s\n"+i18n.G("Try `lxc info --show-log %s` for more info"), err, nameArg) } } return nil diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/config.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/config.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/config.go 2016-10-13 14:31:53.000000000 +0000 @@ -21,7 +21,6 @@ ) type configCmd struct { - httpAddr string expanded bool } @@ -81,7 +80,7 @@ Examples: To mount host's /share/c1 onto /opt in the container: - lxc config device add [remote:]container1 disk source=/share/c1 path=opt + lxc config device add [remote:]container1 disk source=/share/c1 path=opt To set an lxc config value: lxc config set [remote:] raw.lxc 'lxc.aa_allow_incomplete = 1' @@ -250,6 +249,10 @@ fp := cert.Fingerprint[0:12] certBlock, _ := pem.Decode([]byte(cert.Certificate)) + if certBlock == nil { + return fmt.Errorf(i18n.G("Invalid certificate")) + } + cert, err := x509.ParseCertificate(certBlock.Bytes) if err != nil { return err @@ -341,16 +344,43 @@ brief := config.Brief() data, err = yaml.Marshal(&brief) } else { - config, err := d.ContainerInfo(container) - if err != nil { - return err - } + var brief shared.BriefContainerInfo + if shared.IsSnapshot(container) { + config, err := d.SnapshotInfo(container) + if err != nil { + return err + } - brief := config.Brief() - if c.expanded { - brief = config.BriefExpanded() + brief = shared.BriefContainerInfo{ + Profiles: config.Profiles, + Config: config.Config, + Devices: config.Devices, + Ephemeral: config.Ephemeral, + } + if c.expanded { + brief = shared.BriefContainerInfo{ + Profiles: config.Profiles, + Config: config.ExpandedConfig, + Devices: config.ExpandedDevices, + Ephemeral: config.Ephemeral, + } + } + } else { + config, err := d.ContainerInfo(container) + if err != nil { + return err + } + + brief = config.Brief() + if c.expanded { + brief = config.BriefExpanded() + } } + data, err = yaml.Marshal(&brief) + if err != nil { + return err + } } fmt.Printf("%s", data) diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/copy.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/copy.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/copy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/copy.go 2016-10-13 14:31:53.000000000 +0000 @@ -11,7 +11,9 @@ ) type copyCmd struct { - ephem bool + profArgs profileList + confArgs configList + ephem bool } func (c *copyCmd) showByDefault() bool { @@ -22,10 +24,14 @@ return i18n.G( `Copy containers within or in between lxd instances. -lxc copy [remote:] [remote:] [--ephemeral|e]`) +lxc copy [remote:] [[remote:]] [--ephemeral|e] [--profile|-p ...] [--config|-c ...]`) } func (c *copyCmd) flags() { + gnuflag.Var(&c.confArgs, "config", i18n.G("Config key/value to apply to the new container")) + gnuflag.Var(&c.confArgs, "c", i18n.G("Config key/value to apply to the new container")) + gnuflag.Var(&c.profArgs, "profile", i18n.G("Profile to apply to the new container")) + gnuflag.Var(&c.profArgs, "p", i18n.G("Profile to apply to the new container")) gnuflag.BoolVar(&c.ephem, "ephemeral", false, i18n.G("Ephemeral container")) gnuflag.BoolVar(&c.ephem, "e", false, i18n.G("Ephemeral container")) } @@ -38,7 +44,7 @@ return fmt.Errorf(i18n.G("you must specify a source container name")) } - if destName == "" { + if destName == "" && destResource != "" { destName = sourceName } @@ -47,7 +53,12 @@ return err } - status := &shared.ContainerInfo{} + var status struct { + Architecture string + Devices shared.Devices + Config map[string]string + Profiles []string + } // TODO: presumably we want to do this for copying snapshots too? We // need to think a bit more about how we track the baseImage in the @@ -56,18 +67,44 @@ baseImage := "" if !shared.IsSnapshot(sourceName) { - status, err = source.ContainerInfo(sourceName) + result, err := source.ContainerInfo(sourceName) if err != nil { return err } - baseImage = status.Config["volatile.base_image"] + status.Architecture = result.Architecture + status.Devices = result.Devices + status.Config = result.Config + status.Profiles = result.Profiles + + } else { + result, err := source.SnapshotInfo(sourceName) + if err != nil { + return err + } + + status.Architecture = result.Architecture + status.Devices = result.Devices + status.Config = result.Config + status.Profiles = result.Profiles + } + + if c.profArgs != nil { + status.Profiles = append(status.Profiles, c.profArgs...) + } + + if configMap != nil { + for key, value := range configMap { + status.Config[key] = value + } + } + + baseImage = status.Config["volatile.base_image"] - if !keepVolatile { - for k := range status.Config { - if strings.HasPrefix(k, "volatile") { - delete(status.Config, k) - } + if !keepVolatile { + for k := range status.Config { + if strings.HasPrefix(k, "volatile") { + delete(status.Config, k) } } } @@ -83,7 +120,27 @@ return err } - return source.WaitForSuccess(cp.Operation) + err = source.WaitForSuccess(cp.Operation) + if err != nil { + return err + } + + if destResource == "" { + op, err := cp.MetadataAsOperation() + if err != nil { + return fmt.Errorf(i18n.G("didn't get any affected image, container or snapshot from server")) + } + + containers, ok := op.Resources["containers"] + if !ok || len(containers) == 0 { + return fmt.Errorf(i18n.G("didn't get any affected image, container or snapshot from server")) + } + + fields := strings.Split(containers[0], "/") + fmt.Printf(i18n.G("Container name is: %s")+"\n", fields[len(fields)-1]) + } + + return nil } dest, err := lxd.NewClient(config, destRemote) @@ -92,11 +149,17 @@ } sourceProfs := shared.NewStringSet(status.Profiles) - destProfs, err := dest.ListProfiles() + destProfs := []string{} + + profiles, err := dest.ListProfiles() if err != nil { return err } + for _, profile := range profiles { + destProfs = append(destProfs, profile.Name) + } + if !sourceProfs.IsSubset(shared.NewStringSet(destProfs)) { return fmt.Errorf(i18n.G("not all the profiles from the source exist on the target")) } @@ -147,15 +210,33 @@ var migration *lxd.Response sourceWSUrl := "https://" + addr + sourceWSResponse.Operation - migration, err = dest.MigrateFrom(destName, sourceWSUrl, source.Certificate, secrets, status.Architecture, status.Config, status.Devices, status.Profiles, baseImage, ephemeral == 1) + migration, err = dest.MigrateFrom(destName, sourceWSUrl, source.Certificate, secrets, status.Architecture, status.Config, status.Devices, status.Profiles, baseImage, ephemeral == 1, false, source, sourceWSResponse.Operation) if err != nil { continue } + // If push mode is implemented then MigrateFrom will return a + // non-waitable operation. So this needs to be conditionalized + // on pull mode. if err = dest.WaitForSuccess(migration.Operation); err != nil { return err } + if destResource == "" { + op, err := migration.MetadataAsOperation() + if err != nil { + return fmt.Errorf(i18n.G("didn't get any affected image, container or snapshot from server")) + } + + containers, ok := op.Resources["containers"] + if !ok || len(containers) == 0 { + return fmt.Errorf(i18n.G("didn't get any affected image, container or snapshot from server")) + } + + fields := strings.Split(containers[0], "/") + fmt.Printf(i18n.G("Container name is: %s")+"\n", fields[len(fields)-1]) + } + return nil } @@ -163,7 +244,7 @@ } func (c *copyCmd) run(config *lxd.Config, args []string) error { - if len(args) != 2 { + if len(args) < 1 { return errArgs } @@ -172,5 +253,9 @@ ephem = 1 } + if len(args) < 2 { + return c.copyContainer(config, args[0], "", false, ephem) + } + return c.copyContainer(config, args[0], args[1], false, ephem) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/exec.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/exec.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/exec.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/exec.go 2016-10-13 14:31:53.000000000 +0000 @@ -45,7 +45,7 @@ return i18n.G( `Execute the specified command in a container. -lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env EDITOR=/usr/bin/vim]... +lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env EDITOR=/usr/bin/vim]... [--] Mode defaults to non-interactive, interactive mode is selected if both stdin AND stdout are terminals (stderr is ignored).`) } @@ -61,7 +61,7 @@ return err } - shared.Debugf("Window size is now: %dx%d", width, height) + shared.LogDebugf("Window size is now: %dx%d", width, height) w, err := control.NextWriter(websocket.TextMessage) if err != nil { @@ -96,11 +96,8 @@ } env := map[string]string{"HOME": "/root", "USER": "root"} - myEnv := os.Environ() - for _, ent := range myEnv { - if strings.HasPrefix(ent, "TERM=") { - env["TERM"] = ent[len("TERM="):] - } + if myTerm, ok := os.LookupEnv("TERM"); ok { + env["TERM"] = myTerm } for _, arg := range c.envArgs { @@ -162,7 +159,6 @@ termios.Restore(cfd, oldttystate) } - /* we get the result of waitpid() here so we need to transform it */ - os.Exit(ret >> 8) + os.Exit(ret) return fmt.Errorf(i18n.G("unreachable return reached")) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/exec_unix.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/exec_unix.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/exec_unix.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/exec_unix.go 2016-10-13 14:31:53.000000000 +0000 @@ -25,11 +25,11 @@ for { sig := <-ch - shared.Debugf("Received '%s signal', updating window geometry.", sig) + shared.LogDebugf("Received '%s signal', updating window geometry.", sig) err := c.sendTermSize(control) if err != nil { - shared.Debugf("error setting term size %s", err) + shared.LogDebugf("error setting term size %s", err) break } } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/exec_windows.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/exec_windows.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/exec_windows.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/exec_windows.go 2016-10-13 14:31:53.000000000 +0000 @@ -34,6 +34,6 @@ // won't work quite correctly. err := c.sendTermSize(control) if err != nil { - shared.Debugf("error setting term size %s", err) + shared.LogDebugf("error setting term size %s", err) } } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/file.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/file.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/file.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/file.go 2016-10-13 14:31:53.000000000 +0000 @@ -22,6 +22,10 @@ uid int gid int mode string + + recursive bool + + mkdirs bool } func (c *fileCmd) showByDefault() bool { @@ -32,20 +36,33 @@ return i18n.G( `Manage files on a container. -lxc file pull [...] -lxc file push [--uid=UID] [--gid=GID] [--mode=MODE] [...] +lxc file pull [-r|--recursive] [...] +lxc file push [-r|--recursive] [-p|create-dirs] [--uid=UID] [--gid=GID] [--mode=MODE] [...] lxc file edit - in the case of pull, in the case of push and in the case of edit are /`) + in the case of pull, in the case of push and in the case of edit are / + +Examples: + +To push /etc/hosts into the container foo: + lxc file push /etc/hosts foo/etc/hosts + +To pull /etc/hosts from the container: + lxc file pull foo/etc/hosts . +`) } func (c *fileCmd) flags() { gnuflag.IntVar(&c.uid, "uid", -1, i18n.G("Set the file's uid on push")) gnuflag.IntVar(&c.gid, "gid", -1, i18n.G("Set the file's gid on push")) gnuflag.StringVar(&c.mode, "mode", "", i18n.G("Set the file's perms on push")) + gnuflag.BoolVar(&c.recursive, "recursive", false, i18n.G("Recursively push or pull files")) + gnuflag.BoolVar(&c.recursive, "r", false, i18n.G("Recursively push or pull files")) + gnuflag.BoolVar(&c.mkdirs, "create-dirs", false, i18n.G("Create any directories necessary")) + gnuflag.BoolVar(&c.mkdirs, "p", false, i18n.G("Create any directories necessary")) } -func (c *fileCmd) push(config *lxd.Config, args []string) error { +func (c *fileCmd) push(config *lxd.Config, send_file_perms bool, args []string) error { if len(args) < 2 { return errArgs } @@ -65,8 +82,19 @@ return err } + var sourcefilenames []string + for _, fname := range args[:len(args)-1] { + if !strings.HasPrefix(fname, "--") { + sourcefilenames = append(sourcefilenames, fname) + } + } + mode := os.FileMode(0755) if c.mode != "" { + if len(c.mode) == 3 { + c.mode = "0" + c.mode + } + m, err := strconv.ParseInt(c.mode, 0, 0) if err != nil { return err @@ -74,6 +102,26 @@ mode = os.FileMode(m) } + if c.recursive { + if c.uid != -1 || c.gid != -1 || c.mode != "" { + return fmt.Errorf(i18n.G("can't supply uid/gid/mode in recursive mode")) + } + + for _, fname := range sourcefilenames { + if c.mkdirs { + if err := d.MkdirP(container, fname, mode); err != nil { + return err + } + } + + if err := d.RecursivePushFile(container, fname, pathSpec[1]); err != nil { + return err + } + } + + return nil + } + uid := 0 if c.uid >= 0 { uid = c.uid @@ -86,13 +134,6 @@ _, targetfilename := filepath.Split(targetPath) - var sourcefilenames []string - for _, fname := range args[:len(args)-1] { - if !strings.HasPrefix(fname, "--") { - sourcefilenames = append(sourcefilenames, fname) - } - } - if (targetfilename != "") && (len(sourcefilenames) > 1) { return errArgs } @@ -121,26 +162,37 @@ fpath = path.Join(fpath, path.Base(f.Name())) } - if c.mode == "" || c.uid == -1 || c.gid == -1 { - fMode, fUid, fGid, err := c.getOwner(f) - if err != nil { + if c.mkdirs { + if err := d.MkdirP(container, filepath.Dir(fpath), mode); err != nil { return err } + } - if c.mode == "" { - mode = fMode - } - - if c.uid == -1 { - uid = fUid + if send_file_perms { + if c.mode == "" || c.uid == -1 || c.gid == -1 { + fMode, fUid, fGid, err := c.getOwner(f) + if err != nil { + return err + } + + if c.mode == "" { + mode = fMode + } + + if c.uid == -1 { + uid = fUid + } + + if c.gid == -1 { + gid = fGid + } } - if c.gid == -1 { - gid = fGid - } + err = d.PushFile(container, fpath, gid, uid, fmt.Sprintf("%04o", mode.Perm()), f) + } else { + err = d.PushFile(container, fpath, -1, -1, "", f) } - err = d.PushFile(container, fpath, gid, uid, mode, f) if err != nil { return err } @@ -163,17 +215,18 @@ /* * If the path exists, just use it. If it doesn't exist, it might be a - * directory in one of two cases: + * directory in one of three cases: * 1. Someone explicitly put "/" at the end * 2. Someone provided more than one source. In this case the target * should be a directory so we can save all the files into it. + * 3. We are dealing with recursive copy */ if err == nil { targetIsDir = sb.IsDir() if !targetIsDir && len(args)-1 > 1 { return fmt.Errorf(i18n.G("More than one file to download, but target is not a directory")) } - } else if strings.HasSuffix(target, string(os.PathSeparator)) || len(args)-1 > 1 { + } else if strings.HasSuffix(target, string(os.PathSeparator)) || len(args)-1 > 1 || c.recursive { if err := os.MkdirAll(target, 0755); err != nil { return err } @@ -192,11 +245,23 @@ return err } - _, _, _, buf, err := d.PullFile(container, pathSpec[1]) + if c.recursive { + if err := d.RecursivePullFile(container, pathSpec[1], target); err != nil { + return err + } + + continue + } + + _, _, mode, type_, buf, _, err := d.PullFile(container, pathSpec[1]) if err != nil { return err } + if type_ == "directory" { + return fmt.Errorf(i18n.G("can't pull a directory without --recursive")) + } + var targetPath string if targetIsDir { targetPath = path.Join(target, path.Base(pathSpec[1])) @@ -213,6 +278,11 @@ return err } defer f.Close() + + err = f.Chmod(os.FileMode(mode)) + if err != nil { + return err + } } _, err = io.Copy(f, buf) @@ -229,9 +299,13 @@ return errArgs } + if c.recursive { + return fmt.Errorf(i18n.G("recursive edit doesn't make sense :(")) + } + // If stdin isn't a terminal, read text from it if !termios.IsTerminal(int(syscall.Stdin)) { - return c.push(config, append([]string{os.Stdin.Name()}, args[0])) + return c.push(config, false, append([]string{os.Stdin.Name()}, args[0])) } // Create temp file @@ -252,7 +326,7 @@ return err } - err = c.push(config, append([]string{fname}, args[0])) + err = c.push(config, false, append([]string{fname}, args[0])) if err != nil { return err } @@ -267,7 +341,7 @@ switch args[0] { case "push": - return c.push(config, args[1:]) + return c.push(config, true, args[1:]) case "pull": return c.pull(config, args[1:]) case "edit": diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/finger.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/finger.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/finger.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/finger.go 2016-10-13 14:31:53.000000000 +0000 @@ -5,9 +5,7 @@ "github.com/lxc/lxd/shared/i18n" ) -type fingerCmd struct { - httpAddr string -} +type fingerCmd struct{} func (c *fingerCmd) showByDefault() bool { return false diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/help.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/help.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/help.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/help.go 2016-10-13 14:31:53.000000000 +0000 @@ -39,7 +39,7 @@ if !ok { fmt.Fprintf(os.Stderr, i18n.G("error: unknown command: %s")+"\n", name) } else { - fmt.Fprintf(os.Stderr, cmd.usage()+"\n") + fmt.Fprintf(os.Stdout, cmd.usage()+"\n") } } return nil @@ -64,6 +64,7 @@ fmt.Println(" --all " + i18n.G("Print less common commands.")) fmt.Println(" --debug " + i18n.G("Print debug information.")) fmt.Println(" --verbose " + i18n.G("Print verbose information.")) + fmt.Println(" --version " + i18n.G("Show client version.")) fmt.Println() fmt.Println(i18n.G("Environment:")) fmt.Println(" LXD_CONF " + i18n.G("Path to an alternate client configuration directory.")) diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/image.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/image.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/image.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/image.go 2016-10-13 14:31:53.000000000 +0000 @@ -1,6 +1,7 @@ package main import ( + "encoding/json" "fmt" "io/ioutil" "os" @@ -73,6 +74,7 @@ publicImage bool copyAliases bool autoUpdate bool + format string } func (c *imageCmd) showByDefault() bool { @@ -108,7 +110,7 @@ hash or alias name (if one is set). -lxc image import [rootfs tarball|URL] [remote:] [--public] [--created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [prop=value] +lxc image import [rootfs tarball|URL] [remote:] [--public] [--created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [--alias=ALIAS].. [prop=value] Import an image tarball (or tarballs) into the LXD image store. lxc image copy [remote:] : [--alias=ALIAS].. [--copy-aliases] [--public] [--auto-update] @@ -117,16 +119,25 @@ The auto-update flag instructs the server to keep this image up to date. It requires the source to be an alias and for it to be public. -lxc image delete [remote:] - Delete an image from the LXD image store. +lxc image delete [remote:] [remote:][...] + Delete one or more images from the LXD image store. -lxc image export [remote:] +lxc image export [remote:] [target] Export an image from the LXD image store into a distributable tarball. + The output target is optional and defaults to the working directory. + The target may be an existing directory, file name, or "-" to specify + stdout. The target MUST be a directory when exporting a split image. + If the target is a directory, the image's name (each part's name for + split images) as found in the database will be used for the exported + image. If the target is a file (not a directory and not stdout), then + the appropriate extension will be appended to the provided file name + based on the algorithm used to compress the image. + lxc image info [remote:] Print everything LXD knows about a given image. -lxc image list [remote:] [filter] +lxc image list [remote:] [filter] [--format table|json] List images in the LXD image store. Filters may be of the = form for property based filtering, or part of the image hash or part of the image alias name. @@ -145,8 +156,8 @@ lxc image alias delete [remote:] Delete an alias. -lxc image alias list [remote:] - List the aliases. +lxc image alias list [remote:] [filter] + List the aliases. Filters may be part of the image hash or part of the image alias name. `) } @@ -155,18 +166,33 @@ gnuflag.BoolVar(&c.copyAliases, "copy-aliases", false, i18n.G("Copy aliases from source")) gnuflag.BoolVar(&c.autoUpdate, "auto-update", false, i18n.G("Keep the image up to date after initial copy")) gnuflag.Var(&c.addAliases, "alias", i18n.G("New alias to define at target")) + gnuflag.StringVar(&c.format, "format", "table", i18n.G("Format")) } func (c *imageCmd) doImageAlias(config *lxd.Config, args []string) error { var remote string switch args[1] { case "list": - /* alias list [:] */ + filters := []string{} + if len(args) > 2 { - remote, _ = config.ParseRemoteAndContainer(args[2]) + result := strings.SplitN(args[2], ":", 2) + if len(result) == 1 { + filters = append(filters, args[2]) + remote, _ = config.ParseRemoteAndContainer("") + } else { + remote, _ = config.ParseRemoteAndContainer(args[2]) + } } else { remote, _ = config.ParseRemoteAndContainer("") } + + if len(args) > 3 { + for _, filter := range args[3:] { + filters = append(filters, filter) + } + } + d, err := lxd.NewClient(config, remote) if err != nil { return err @@ -177,7 +203,7 @@ return err } - c.showAliases(resp) + c.showAliases(resp, filters) return nil case "create": @@ -261,24 +287,30 @@ return err case "delete": - /* delete [:] */ + /* delete [:] [:][...] */ if len(args) < 2 { return errArgs } - remote, inName := config.ParseRemoteAndContainer(args[1]) - if inName == "" { - inName = "default" - } + for _, arg := range args[1:] { + remote, inName := config.ParseRemoteAndContainer(arg) + if inName == "" { + inName = "default" + } - d, err := lxd.NewClient(config, remote) - if err != nil { - return err + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + image := c.dereferenceAlias(d, inName) + err = d.DeleteImage(image) + if err != nil { + return err + } } - image := c.dereferenceAlias(d, inName) - err = d.DeleteImage(image) - return err + return nil case "info": if len(args) < 2 { @@ -376,7 +408,8 @@ } if imageFile == "" { - return errArgs + imageFile = args[1] + properties = properties[1:] } d, err := lxd.NewClient(config, remote) @@ -392,7 +425,11 @@ } if strings.HasPrefix(imageFile, "https://") { - fingerprint, err = d.PostImageURL(imageFile, c.publicImage, c.addAliases) + progressHandler := func(progress string) { + fmt.Printf(i18n.G("Importing the image: %s")+"\r", progress) + } + + fingerprint, err = d.PostImageURL(imageFile, properties, c.publicImage, c.addAliases, progressHandler) } else if strings.HasPrefix(imageFile, "http://") { return fmt.Errorf(i18n.G("Only https:// is supported for remote image import.")) } else { @@ -432,11 +469,20 @@ return err } - images, err := d.ListImages() + var images []shared.ImageInfo + allImages, err := d.ListImages() if err != nil { return err } + for _, image := range allImages { + if !c.imageShouldShow(filters, &image) { + continue + } + + images = append(images, image) + } + return c.showImages(images, filters) case "edit": @@ -558,52 +604,71 @@ } func (c *imageCmd) showImages(images []shared.ImageInfo, filters []string) error { - data := [][]string{} - for _, image := range images { - if !c.imageShouldShow(filters, &image) { - continue - } + switch c.format { + case listFormatTable: + data := [][]string{} + for _, image := range images { + if !c.imageShouldShow(filters, &image) { + continue + } - shortest := c.shortestAlias(image.Aliases) - if len(image.Aliases) > 1 { - shortest = fmt.Sprintf(i18n.G("%s (%d more)"), shortest, len(image.Aliases)-1) - } - fp := image.Fingerprint[0:12] - public := i18n.G("no") - description := c.findDescription(image.Properties) + shortest := c.shortestAlias(image.Aliases) + if len(image.Aliases) > 1 { + shortest = fmt.Sprintf(i18n.G("%s (%d more)"), shortest, len(image.Aliases)-1) + } + fp := image.Fingerprint[0:12] + public := i18n.G("no") + description := c.findDescription(image.Properties) - if image.Public { - public = i18n.G("yes") - } + if image.Public { + public = i18n.G("yes") + } - const layout = "Jan 2, 2006 at 3:04pm (MST)" - uploaded := image.UploadDate.UTC().Format(layout) - size := fmt.Sprintf("%.2fMB", float64(image.Size)/1024.0/1024.0) - data = append(data, []string{shortest, fp, public, description, image.Architecture, size, uploaded}) + const layout = "Jan 2, 2006 at 3:04pm (MST)" + uploaded := image.UploadDate.UTC().Format(layout) + size := fmt.Sprintf("%.2fMB", float64(image.Size)/1024.0/1024.0) + data = append(data, []string{shortest, fp, public, description, image.Architecture, size, uploaded}) + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetAutoWrapText(false) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(true) + table.SetHeader([]string{ + i18n.G("ALIAS"), + i18n.G("FINGERPRINT"), + i18n.G("PUBLIC"), + i18n.G("DESCRIPTION"), + i18n.G("ARCH"), + i18n.G("SIZE"), + i18n.G("UPLOAD DATE")}) + sort.Sort(SortImage(data)) + table.AppendBulk(data) + table.Render() + case listFormatJSON: + data := make([]*shared.ImageInfo, len(images)) + for i := range images { + data[i] = &images[i] + } + enc := json.NewEncoder(os.Stdout) + err := enc.Encode(data) + if err != nil { + return err + } + default: + return fmt.Errorf("invalid format %q", c.format) } - table := tablewriter.NewWriter(os.Stdout) - table.SetAutoWrapText(false) - table.SetAlignment(tablewriter.ALIGN_LEFT) - table.SetRowLine(true) - table.SetHeader([]string{ - i18n.G("ALIAS"), - i18n.G("FINGERPRINT"), - i18n.G("PUBLIC"), - i18n.G("DESCRIPTION"), - i18n.G("ARCH"), - i18n.G("SIZE"), - i18n.G("UPLOAD DATE")}) - sort.Sort(SortImage(data)) - table.AppendBulk(data) - table.Render() - return nil } -func (c *imageCmd) showAliases(aliases shared.ImageAliases) error { +func (c *imageCmd) showAliases(aliases shared.ImageAliases, filters []string) error { data := [][]string{} for _, alias := range aliases { + if !c.aliasShouldShow(filters, &alias) { + continue + } + data = append(data, []string{alias.Name, alias.Target[0:12], alias.Description}) } @@ -743,3 +808,17 @@ return true } + +func (c *imageCmd) aliasShouldShow(filters []string, state *shared.ImageAliasesEntry) bool { + if len(filters) == 0 { + return true + } + + for _, filter := range filters { + if strings.Contains(state.Name, filter) || strings.Contains(state.Target, filter) { + return true + } + } + + return false +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/info.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/info.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/info.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/info.go 2016-10-13 14:31:53.000000000 +0000 @@ -23,11 +23,13 @@ func (c *infoCmd) usage() string { return i18n.G( - `List information on containers. + `List information on LXD servers and containers. -This will support remotes and images as well, but only containers for now. +For a container: + lxc info [:]container [--show-log] -lxc info [:]container [--show-log]`) +For a server: + lxc info [:]`) } func (c *infoCmd) flags() { @@ -85,6 +87,9 @@ const layout = "2006/01/02 15:04 UTC" fmt.Printf(i18n.G("Name: %s")+"\n", ct.Name) + if d.Remote != nil && d.Remote.Addr != "" { + fmt.Printf(i18n.G("Remote: %s")+"\n", d.Remote.Addr) + } fmt.Printf(i18n.G("Architecture: %s")+"\n", ct.Architecture) if ct.CreationDate.UTC().Unix() != 0 { fmt.Printf(i18n.G("Created: %s")+"\n", ct.CreationDate.UTC().Format(layout)) @@ -102,14 +107,16 @@ // IP addresses ipInfo := "" - for netName, net := range cs.Network { - vethStr := "" - if net.HostName != "" { - vethStr = fmt.Sprintf("\t%s", net.HostName) - } - - for _, addr := range net.Addresses { - ipInfo += fmt.Sprintf(" %s:\t%s\t%s%s\n", netName, addr.Family, addr.Address, vethStr) + if cs.Network != nil { + for netName, net := range cs.Network { + vethStr := "" + if net.HostName != "" { + vethStr = fmt.Sprintf("\t%s", net.HostName) + } + + for _, addr := range net.Addresses { + ipInfo += fmt.Sprintf(" %s:\t%s\t%s%s\n", netName, addr.Family, addr.Address, vethStr) + } } } @@ -124,9 +131,11 @@ // Disk usage diskInfo := "" - for entry, disk := range cs.Disk { - if disk.Usage != 0 { - diskInfo += fmt.Sprintf(" %s: %s\n", entry, shared.GetByteSizeString(disk.Usage)) + if cs.Disk != nil { + for entry, disk := range cs.Disk { + if disk.Usage != 0 { + diskInfo += fmt.Sprintf(" %s: %s\n", entry, shared.GetByteSizeString(disk.Usage)) + } } } @@ -135,6 +144,17 @@ fmt.Printf(diskInfo) } + // CPU usage + cpuInfo := "" + if cs.CPU.Usage != 0 { + cpuInfo += fmt.Sprintf(" %s: %v\n", i18n.G("CPU usage (in seconds)"), cs.CPU.Usage/1000000000) + } + + if cpuInfo != "" { + fmt.Println(i18n.G(" CPU usage:")) + fmt.Printf(cpuInfo) + } + // Memory usage memoryInfo := "" if cs.Memory.Usage != 0 { @@ -160,12 +180,14 @@ // Network usage networkInfo := "" - for netName, net := range cs.Network { - networkInfo += fmt.Sprintf(" %s:\n", netName) - networkInfo += fmt.Sprintf(" %s: %s\n", i18n.G("Bytes received"), shared.GetByteSizeString(net.Counters.BytesReceived)) - networkInfo += fmt.Sprintf(" %s: %s\n", i18n.G("Bytes sent"), shared.GetByteSizeString(net.Counters.BytesSent)) - networkInfo += fmt.Sprintf(" %s: %d\n", i18n.G("Packets received"), net.Counters.PacketsReceived) - networkInfo += fmt.Sprintf(" %s: %d\n", i18n.G("Packets sent"), net.Counters.PacketsReceived) + if cs.Network != nil { + for netName, net := range cs.Network { + networkInfo += fmt.Sprintf(" %s:\n", netName) + networkInfo += fmt.Sprintf(" %s: %s\n", i18n.G("Bytes received"), shared.GetByteSizeString(net.Counters.BytesReceived)) + networkInfo += fmt.Sprintf(" %s: %s\n", i18n.G("Bytes sent"), shared.GetByteSizeString(net.Counters.BytesSent)) + networkInfo += fmt.Sprintf(" %s: %d\n", i18n.G("Packets received"), net.Counters.PacketsReceived) + networkInfo += fmt.Sprintf(" %s: %d\n", i18n.G("Packets sent"), net.Counters.PacketsSent) + } } if networkInfo != "" { @@ -185,7 +207,9 @@ if first_snapshot { fmt.Println(i18n.G("Snapshots:")) } - fmt.Printf(" %s", snap.Name) + + fields := strings.Split(snap.Name, shared.SnapshotDelimiter) + fmt.Printf(" %s", fields[len(fields)-1]) if snap.CreationDate.UTC().Unix() != 0 { fmt.Printf(" ("+i18n.G("taken at %s")+")", snap.CreationDate.UTC().Format(layout)) diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/init.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/init.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/init.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/init.go 2016-10-13 14:31:53.000000000 +0000 @@ -63,6 +63,7 @@ profArgs profileList confArgs configList ephem bool + network string } func (c *initCmd) showByDefault() bool { @@ -73,7 +74,7 @@ return i18n.G( `Initialize a container from a particular image. -lxc init [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...] +lxc init [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...] [--network|-n ] Initializes a container using the specified image and name. @@ -137,6 +138,8 @@ gnuflag.Var(&c.profArgs, "p", i18n.G("Profile to apply to the new container")) gnuflag.BoolVar(&c.ephem, "ephemeral", false, i18n.G("Ephemeral container")) gnuflag.BoolVar(&c.ephem, "e", false, i18n.G("Ephemeral container")) + gnuflag.StringVar(&c.network, "network", "", i18n.G("Network name")) + gnuflag.StringVar(&c.network, "n", "", i18n.G("Network name")) } func (c *initCmd) run(config *lxd.Config, args []string) error { @@ -177,12 +180,27 @@ fmt.Printf(i18n.G("Creating %s")+"\n", name) } + iremote, image = c.guessImage(config, d, remote, iremote, image) + + devicesMap := map[string]shared.Device{} + if c.network != "" { + network, err := d.NetworkGet(c.network) + if err != nil { + return err + } + + if network.Type == "bridge" { + devicesMap[c.network] = shared.Device{"type": "nic", "nictype": "bridge", "parent": c.network} + } else { + devicesMap[c.network] = shared.Device{"type": "nic", "nictype": "macvlan", "parent": c.network} + } + } + if !initRequestedEmptyProfiles && len(profiles) == 0 { - resp, err = d.Init(name, iremote, image, nil, configMap, c.ephem) + resp, err = d.Init(name, iremote, image, nil, configMap, devicesMap, c.ephem) } else { - resp, err = d.Init(name, iremote, image, &profiles, configMap, c.ephem) + resp, err = d.Init(name, iremote, image, &profiles, configMap, devicesMap, c.ephem) } - if err != nil { return err } @@ -209,6 +227,9 @@ fmt.Printf(i18n.G("Container name is: %s")+"\n", fields[len(fields)-1]) } } + + c.checkNetwork(d, name) + return nil } @@ -252,3 +273,44 @@ } go d.Monitor([]string{"operation"}, handler) } + +func (c *initCmd) guessImage(config *lxd.Config, d *lxd.Client, remote string, iremote string, image string) (string, string) { + if remote != iremote { + return iremote, image + } + + _, ok := config.Remotes[image] + if !ok { + return iremote, image + } + + target := d.GetAlias(image) + if target != "" { + return iremote, image + } + + _, err := d.GetImageInfo(image) + if err == nil { + return iremote, image + } + + fmt.Fprintf(os.Stderr, i18n.G("The local image '%s' couldn't be found, trying '%s:' instead.")+"\n", image, image) + return image, "default" +} + +func (c *initCmd) checkNetwork(d *lxd.Client, name string) { + ct, err := d.ContainerInfo(name) + if err != nil { + return + } + + for _, d := range ct.ExpandedDevices { + if d["type"] == "nic" { + return + } + } + + fmt.Fprintf(os.Stderr, "\n"+i18n.G("The container you are starting doesn’t have any network attached to it.")+"\n") + fmt.Fprintf(os.Stderr, " "+i18n.G("To create a new network, use: lxc network create")+"\n") + fmt.Fprintf(os.Stderr, " "+i18n.G("To assign a network to a container, use: lxc network assign")+"\n\n") +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/launch.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/launch.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/launch.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/launch.go 2016-10-13 14:31:53.000000000 +0000 @@ -22,7 +22,7 @@ return i18n.G( `Launch a container from a particular image. -lxc launch [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...] +lxc launch [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...] [--network|-n ] Launches a container using the specified image and name. @@ -30,7 +30,7 @@ Specifying "-p" with no argument will result in no profile. Example: -lxc launch ubuntu u1`) +lxc launch ubuntu:16.04 u1`) } func (c *launchCmd) flags() { @@ -43,6 +43,8 @@ gnuflag.Var(&c.init.profArgs, "p", i18n.G("Profile to apply to the new container")) gnuflag.BoolVar(&c.init.ephem, "ephemeral", false, i18n.G("Ephemeral container")) gnuflag.BoolVar(&c.init.ephem, "e", false, i18n.G("Ephemeral container")) + gnuflag.StringVar(&c.init.network, "network", "", i18n.G("Network name")) + gnuflag.StringVar(&c.init.network, "n", "", i18n.G("Network name")) } func (c *launchCmd) run(config *lxd.Config, args []string) error { @@ -75,10 +77,26 @@ profiles = append(profiles, p) } + iremote, image = c.init.guessImage(config, d, remote, iremote, image) + + devicesMap := map[string]shared.Device{} + if c.init.network != "" { + network, err := d.NetworkGet(c.init.network) + if err != nil { + return err + } + + if network.Type == "bridge" { + devicesMap[c.init.network] = shared.Device{"type": "nic", "nictype": "bridge", "parent": c.init.network} + } else { + devicesMap[c.init.network] = shared.Device{"type": "nic", "nictype": "macvlan", "parent": c.init.network} + } + } + if !initRequestedEmptyProfiles && len(profiles) == 0 { - resp, err = d.Init(name, iremote, image, nil, configMap, c.init.ephem) + resp, err = d.Init(name, iremote, image, nil, configMap, devicesMap, c.init.ephem) } else { - resp, err = d.Init(name, iremote, image, &profiles, configMap, c.init.ephem) + resp, err = d.Init(name, iremote, image, &profiles, configMap, devicesMap, c.init.ephem) } if err != nil { @@ -119,6 +137,8 @@ return err } + c.init.checkNetwork(d, name) + fmt.Printf(i18n.G("Starting %s")+"\n", name) resp, err = d.Action(name, shared.Start, -1, false, false) if err != nil { @@ -127,7 +147,12 @@ err = d.WaitForSuccess(resp.Operation) if err != nil { - return fmt.Errorf("%s\n"+i18n.G("Try `lxc info --show-log %s` for more info"), err, name) + prettyName := name + if remote != "" { + prettyName = fmt.Sprintf("%s:%s", remote, name) + } + + return fmt.Errorf("%s\n"+i18n.G("Try `lxc info --show-log %s` for more info"), err, prettyName) } return nil diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/list.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/list.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/list.go 2016-10-13 14:31:53.000000000 +0000 @@ -1,10 +1,12 @@ package main import ( + "encoding/json" "fmt" "os" "regexp" "sort" + "strconv" "strings" "sync" @@ -47,9 +49,15 @@ return a[i][0] < a[j][0] } +const ( + listFormatTable = "table" + listFormatJSON = "json" +) + type listCmd struct { - chosenColumnRunes string - fast bool + columnsRaw string + fast bool + format string } func (c *listCmd) showByDefault() bool { @@ -60,21 +68,30 @@ return i18n.G( `Lists the available resources. -lxc list [resource] [filters] [-c columns] [--fast] +lxc list [resource] [filters] [--format table|json] [-c columns] [--fast] The filters are: -* A single keyword like "web" which will list any container with "web" in its name. +* A single keyword like "web" which will list any container with a name starting by "web". +* A regular expression on the container name. (e.g. .*web.*01$) * A key/value pair referring to a configuration item. For those, the namespace can be abreviated to the smallest unambiguous identifier: -* "user.blah=abc" will list all containers with the "blah" user property set to "abc" -* "u.blah=abc" will do the same -* "security.privileged=1" will list all privileged containers -* "s.privileged=1" will do the same + * "user.blah=abc" will list all containers with the "blah" user property set to "abc". + * "u.blah=abc" will do the same + * "security.privileged=1" will list all privileged containers + * "s.privileged=1" will do the same +* A regular expression matching a configuration item or its value. (e.g. volatile.eth0.hwaddr=00:16:3e:.*) + +The -c option takes a comma separated list of arguments that control +which container attributes to output when displaying in table format. +Column arguments are either pre-defined shorthand chars (see below), +or (extended) config keys. Commas between consecutive shorthand chars +are optional. -The columns are: +Pre-defined shorthand chars: * 4 - IPv4 address * 6 - IPv6 address * a - architecture * c - creation date +* l - last used date * n - name * p - pid of container init process * P - profiles @@ -82,13 +99,28 @@ * S - number of snapshots * t - type (persistent or ephemeral) +Config key syntax: key[:name][:maxWidth] +* key - The (extended) config key to display +* name - Name to display in the column header, defaults to the key + if not specified or if empty (to allow defining maxWidth + without a custom name, e.g. user.key::0) +* maxWidth - Max width of the column (longer results are truncated). + -1 == unlimited + 0 == width of column header + >0 == max width in chars + Default is -1 (unlimited) + Default column layout: ns46tS -Fast column layout: nsacPt`) +Fast column layout: nsacPt + +Example: lxc list -c n,volatile.base_image:"BASE IMAGE":0,s46,volatile.eth0.hwaddr:MAC +`) } func (c *listCmd) flags() { - gnuflag.StringVar(&c.chosenColumnRunes, "c", "ns46tS", i18n.G("Columns")) - gnuflag.StringVar(&c.chosenColumnRunes, "columns", "ns46tS", i18n.G("Columns")) + gnuflag.StringVar(&c.columnsRaw, "c", "ns46tS", i18n.G("Columns")) + gnuflag.StringVar(&c.columnsRaw, "columns", "ns46tS", i18n.G("Columns")) + gnuflag.StringVar(&c.format, "format", "table", i18n.G("Format")) gnuflag.BoolVar(&c.fast, "fast", false, i18n.G("Fast mode (same as --columns=nsacPt")) } @@ -124,7 +156,7 @@ } found := false - for configKey, configValue := range state.Config { + for configKey, configValue := range state.ExpandedConfig { if c.dotPrefixMatch(key, configKey) { //try to test filter value as a regexp regexpValue := value @@ -148,11 +180,25 @@ } } + if state.ExpandedConfig[key] == value { + return true + } + if !found { return false } } else { - if !strings.Contains(state.Name, filter) { + regexpValue := filter + if !(strings.Contains(filter, "^") || strings.Contains(filter, "$")) { + regexpValue = "^" + regexpValue + "$" + } + + r, err := regexp.Compile(regexpValue) + if err == nil && r.MatchString(state.Name) == true { + return true + } + + if !strings.HasPrefix(state.Name, filter) { return false } } @@ -185,6 +231,12 @@ for i := 0; i < threads; i++ { cStatesWg.Add(1) go func() { + d, err := lxd.NewClient(&d.Config, d.Name) + if err != nil { + cStatesWg.Done() + return + } + for { cName, more := <-cStatesQueue if !more { @@ -205,6 +257,12 @@ cSnapshotsWg.Add(1) go func() { + d, err := lxd.NewClient(&d.Config, d.Name) + if err != nil { + cSnapshotsWg.Done() + return + } + for { cName, more := <-cSnapshotsQueue if !more { @@ -225,13 +283,11 @@ } for _, cInfo := range cinfos { - if !c.shouldShow(filters, &cInfo) { - continue - } - for _, column := range columns { if column.NeedsState && cInfo.IsActive() { + cStatesLock.Lock() _, ok := cStates[cInfo.Name] + cStatesLock.Unlock() if ok { continue } @@ -244,7 +300,9 @@ } if column.NeedsSnapshots { + cSnapshotsLock.Lock() _, ok := cSnapshots[cInfo.Name] + cSnapshotsLock.Unlock() if ok { continue } @@ -263,31 +321,54 @@ cStatesWg.Wait() cSnapshotsWg.Wait() - data := [][]string{} - for _, cInfo := range cinfos { - if !c.shouldShow(filters, &cInfo) { - continue - } + switch c.format { + case listFormatTable: + data := [][]string{} + for _, cInfo := range cinfos { + if !c.shouldShow(filters, &cInfo) { + continue + } - col := []string{} - for _, column := range columns { - col = append(col, column.Data(cInfo, cStates[cInfo.Name], cSnapshots[cInfo.Name])) + col := []string{} + for _, column := range columns { + col = append(col, column.Data(cInfo, cStates[cInfo.Name], cSnapshots[cInfo.Name])) + } + data = append(data, col) + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetAutoWrapText(false) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(true) + table.SetHeader(headers) + sort.Sort(byName(data)) + table.AppendBulk(data) + table.Render() + case listFormatJSON: + data := make([]listContainerItem, len(cinfos)) + for i := range cinfos { + data[i].ContainerInfo = &cinfos[i] + data[i].State = cStates[cinfos[i].Name] + data[i].Snapshots = cSnapshots[cinfos[i].Name] + } + enc := json.NewEncoder(os.Stdout) + err := enc.Encode(data) + if err != nil { + return err } - data = append(data, col) + default: + return fmt.Errorf("invalid format %q", c.format) } - table := tablewriter.NewWriter(os.Stdout) - table.SetAutoWrapText(false) - table.SetAlignment(tablewriter.ALIGN_LEFT) - table.SetRowLine(true) - table.SetHeader(headers) - sort.Sort(byName(data)) - table.AppendBulk(data) - table.Render() - return nil } +type listContainerItem struct { + *shared.ContainerInfo + State *shared.ContainerState `json:"state"` + Snapshots []shared.SnapshotInfo `json:"snapshots"` +} + func (c *listCmd) run(config *lxd.Config, args []string) error { var remote string name := "" @@ -296,7 +377,7 @@ if len(args) != 0 { filters = args - if strings.Contains(args[0], ":") { + if strings.Contains(args[0], ":") && !strings.Contains(args[0], "=") { remote, name = config.ParseRemoteAndContainer(args[0]) filters = args[1:] } else if !strings.Contains(args[0], "=") { @@ -304,6 +385,7 @@ name = args[0] } } + filters = append(filters, name) if remote == "" { remote = config.DefaultRemote @@ -320,21 +402,29 @@ return err } - if name == "" { - cts = ctslist - } else { - for _, cinfo := range ctslist { - if len(cinfo.Name) >= len(name) && cinfo.Name[0:len(name)] == name { - cts = append(cts, cinfo) - } + for _, cinfo := range ctslist { + if !c.shouldShow(filters, &cinfo) { + continue } + + cts = append(cts, cinfo) } - columns_map := map[rune]column{ + columns, err := c.parseColumns() + if err != nil { + return err + } + + return c.listContainers(d, cts, filters, columns) +} + +func (c *listCmd) parseColumns() ([]column, error) { + columnsShorthandMap := map[rune]column{ '4': column{i18n.G("IPV4"), c.IP4ColumnData, true, false}, '6': column{i18n.G("IPV6"), c.IP6ColumnData, true, false}, 'a': column{i18n.G("ARCHITECTURE"), c.ArchitectureColumnData, false, false}, 'c': column{i18n.G("CREATED AT"), c.CreatedColumnData, false, false}, + 'l': column{i18n.G("LAST USED AT"), c.LastUsedColumnData, false, false}, 'n': column{i18n.G("NAME"), c.nameColumnData, false, false}, 'p': column{i18n.G("PID"), c.PIDColumnData, true, false}, 'P': column{i18n.G("PROFILES"), c.ProfilesColumnData, false, false}, @@ -344,19 +434,79 @@ } if c.fast { - c.chosenColumnRunes = "nsacPt" + c.columnsRaw = "nsacPt" } + columnList := strings.Split(c.columnsRaw, ",") + columns := []column{} - for _, columnRune := range c.chosenColumnRunes { - if column, ok := columns_map[columnRune]; ok { - columns = append(columns, column) + for _, columnEntry := range columnList { + if columnEntry == "" { + return nil, fmt.Errorf("Empty column entry (redundant, leading or trailing command) in '%s'", c.columnsRaw) + } + + // Config keys always contain a period, parse anything without a + // period as a series of shorthand runes. + if !strings.Contains(columnEntry, ".") { + for _, columnRune := range columnEntry { + if column, ok := columnsShorthandMap[columnRune]; ok { + columns = append(columns, column) + } else { + return nil, fmt.Errorf("Unknown column shorthand char '%c' in '%s'", columnRune, columnEntry) + } + } } else { - return fmt.Errorf("%s does contain invalid column characters\n", c.chosenColumnRunes) + cc := strings.Split(columnEntry, ":") + if len(cc) > 3 { + return nil, fmt.Errorf("Invalid config key column format (too many fields): '%s'", columnEntry) + } + + k := cc[0] + if _, err := shared.ConfigKeyChecker(k); err != nil { + return nil, fmt.Errorf("Invalid config key '%s' in '%s'", k, columnEntry) + } + + column := column{Name: k} + if len(cc) > 1 { + if len(cc[1]) == 0 && len(cc) != 3 { + return nil, fmt.Errorf("Invalid name in '%s', empty string is only allowed when defining maxWidth", columnEntry) + } + column.Name = cc[1] + } + + maxWidth := -1 + if len(cc) > 2 { + temp, err := strconv.ParseInt(cc[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("Invalid max width (must be an integer) '%s' in '%s'", cc[2], columnEntry) + } + if temp < -1 { + return nil, fmt.Errorf("Invalid max width (must -1, 0 or a positive integer) '%s' in '%s'", cc[2], columnEntry) + } + if temp == 0 { + maxWidth = len(column.Name) + } else { + maxWidth = int(temp) + } + } + + column.Data = func(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + v, ok := cInfo.Config[k] + if !ok { + v, ok = cInfo.ExpandedConfig[k] + } + + // Truncate the data according to the max width. A negative max width + // indicates there is no effective limit. + if maxWidth > 0 && len(v) > maxWidth { + return v[:maxWidth] + } + return v + } + columns = append(columns, column) } } - - return c.listContainers(d, cts, filters, columns) + return columns, nil } func (c *listCmd) nameColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { @@ -368,7 +518,7 @@ } func (c *listCmd) IP4ColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { - if cInfo.IsActive() { + if cInfo.IsActive() && cState != nil && cState.Network != nil { ipv4s := []string{} for netName, net := range cState.Network { if net.Type == "loopback" { @@ -392,7 +542,7 @@ } func (c *listCmd) IP6ColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { - if cInfo.IsActive() { + if cInfo.IsActive() && cState != nil && cState.Network != nil { ipv6s := []string{} for netName, net := range cState.Network { if net.Type == "loopback" { @@ -424,11 +574,15 @@ } func (c *listCmd) numberSnapshotsColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { - return fmt.Sprintf("%d", len(cSnaps)) + if cSnaps != nil { + return fmt.Sprintf("%d", len(cSnaps)) + } + + return "" } func (c *listCmd) PIDColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { - if cInfo.IsActive() { + if cInfo.IsActive() && cState != nil { return fmt.Sprintf("%d", cState.Pid) } @@ -451,4 +605,14 @@ } return "" +} + +func (c *listCmd) LastUsedColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + layout := "2006/01/02 15:04 UTC" + + if !cInfo.LastUsedDate.IsZero() && cInfo.LastUsedDate.UTC().Unix() != 0 { + return cInfo.LastUsedDate.UTC().Format(layout) + } + + return "" } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/list_test.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/list_test.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/list_test.go 2016-10-13 14:31:53.000000000 +0000 @@ -1,6 +1,10 @@ package main import ( + "bytes" + "math/rand" + "strconv" + "strings" "testing" "github.com/lxc/lxd/shared" @@ -23,7 +27,7 @@ state := &shared.ContainerInfo{ Name: "foo", - Config: map[string]string{ + ExpandedConfig: map[string]string{ "security.privileged": "1", "user.blah": "abc", }, @@ -45,3 +49,157 @@ t.Errorf("value filter didn't work") } } + +// Used by TestColumns and TestInvalidColumns +const shorthand = "46aclnpPsSt" +const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + +func TestColumns(t *testing.T) { + keys := make([]string, 0, len(shared.KnownContainerConfigKeys)) + for k := range shared.KnownContainerConfigKeys { + keys = append(keys, k) + } + + randShorthand := func(buffer *bytes.Buffer) { + buffer.WriteByte(shorthand[rand.Intn(len(shorthand))]) + } + + randString := func(buffer *bytes.Buffer) { + l := rand.Intn(20) + if l == 0 { + l = rand.Intn(20) + 20 + } + for i := 0; i < l; i++ { + buffer.WriteByte(alphanum[rand.Intn(len(alphanum))]) + } + } + + randConfigKey := func(buffer *bytes.Buffer) { + // Unconditionally prepend a comma so that we don't create an invalid + // column string, redundant commas will be handled immediately prior + // to parsing the string. + buffer.WriteRune(',') + + switch rand.Intn(4) { + case 0: + buffer.WriteString(keys[rand.Intn(len(keys))]) + case 1: + buffer.WriteString("user.") + randString(buffer) + case 2: + buffer.WriteString("environment.") + randString(buffer) + case 3: + if rand.Intn(2) == 0 { + buffer.WriteString("volatile.") + randString(buffer) + buffer.WriteString(".hwaddr") + } else { + buffer.WriteString("volatile.") + randString(buffer) + buffer.WriteString(".name") + } + } + + // Randomize the optional fields in a single shot. Empty names are legal + // when specifying the max width, append an extra colon in this case. + opt := rand.Intn(8) + if opt&1 != 0 { + buffer.WriteString(":") + randString(buffer) + } else if opt != 0 { + buffer.WriteString(":") + } + + switch opt { + case 2, 3: + buffer.WriteString(":-1") + case 4, 5: + buffer.WriteString(":0") + case 6, 7: + buffer.WriteRune(':') + buffer.WriteString(strconv.FormatUint(uint64(rand.Uint32()), 10)) + } + + // Unconditionally append a comma so that we don't create an invalid + // column string, redundant commas will be handled immediately prior + // to parsing the string. + buffer.WriteRune(',') + } + + for i := 0; i < 1000; i++ { + go func() { + var buffer bytes.Buffer + + l := rand.Intn(10) + if l == 0 { + l = rand.Intn(10) + 10 + } + + num := l + for j := 0; j < l; j++ { + switch rand.Intn(5) { + case 0: + if buffer.Len() > 0 { + buffer.WriteRune(',') + num-- + } else { + randShorthand(&buffer) + } + + case 1, 2: + randShorthand(&buffer) + case 3, 4: + randConfigKey(&buffer) + } + } + + // Generate the column string, removing any leading, trailing or duplicate commas. + raw := shared.RemoveDuplicatesFromString(strings.Trim(buffer.String(), ","), ",") + + list := listCmd{columnsRaw: raw} + + columns, err := list.parseColumns() + if err != nil { + t.Errorf("Failed to parse columns string. Input: %s, Error: %s", raw, err) + } + if len(columns) != num { + t.Errorf("Did not generate correct number of columns. Expected: %d, Actual: %d, Input: %s", num, len(columns), raw) + } + }() + } +} + +func TestInvalidColumns(t *testing.T) { + run := func(raw string) { + list := listCmd{columnsRaw: raw} + _, err := list.parseColumns() + if err == nil { + t.Errorf("Expected error from parseColumns, received nil. Input: %s", raw) + } + } + + for _, v := range alphanum { + if !strings.ContainsRune(shorthand, v) { + run(string(v)) + } + } + + run(",") + run(",a") + run("a,") + run("4,,6") + run(".") + run(":") + run("::") + run(".key:") + run("user.key:") + run("user.key::") + run(":user.key") + run(":user.key:0") + run("user.key::-2") + run("user.key:name:-2") + run("volatile") + run("base_image") + run("volatile.image") +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/main.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/main.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/main.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/main.go 2016-10-13 14:31:53.000000000 +0000 @@ -2,8 +2,6 @@ import ( "fmt" - "net" - "net/url" "os" "os/exec" "path" @@ -21,28 +19,16 @@ func main() { if err := run(); err != nil { - // The action we take depends on the error we get. msg := fmt.Sprintf(i18n.G("error: %v"), err) - switch t := err.(type) { - case *url.Error: - switch u := t.Err.(type) { - case *net.OpError: - if u.Op == "dial" && u.Net == "unix" { - switch errno := u.Err.(type) { - case syscall.Errno: - switch errno { - case syscall.ENOENT: - msg = i18n.G("LXD socket not found; is LXD running?") - case syscall.ECONNREFUSED: - msg = i18n.G("Connection refused; is LXD running?") - case syscall.EACCES: - msg = i18n.G("Permisson denied, are you in the lxd group?") - default: - msg = fmt.Sprintf("%d %s", uintptr(errno), errno.Error()) - } - } - } - } + + lxdErr := lxd.GetLocalLXDErr(err) + switch lxdErr { + case syscall.ENOENT: + msg = i18n.G("LXD socket not found; is LXD installed and running?") + case syscall.ECONNREFUSED: + msg = i18n.G("Connection refused; is LXD running?") + case syscall.EACCES: + msg = i18n.G("Permission denied, are you in the lxd group?") } fmt.Fprintln(os.Stderr, fmt.Sprintf("%s", msg)) @@ -80,6 +66,10 @@ os.Args[1] = "version" } + if len(os.Args) == 2 && os.Args[1] == "--man" { + os.Args[1] = "manpage" + } + if len(os.Args) < 2 { commands["help"].run(nil, nil) os.Exit(1) @@ -131,16 +121,20 @@ return err } - certf := config.ConfigPath("client.crt") - keyf := config.ConfigPath("client.key") - - if !*forceLocal && os.Args[0] != "help" && os.Args[0] != "version" && (!shared.PathExists(certf) || !shared.PathExists(keyf)) { - fmt.Fprintf(os.Stderr, i18n.G("Generating a client certificate. This may take a minute...")+"\n") + // If the user is running a command that may attempt to connect to the local daemon + // and this is the first time the client has been run by the user, then check to see + // if LXD has been properly configured. Don't display the message if the var path + // does not exist (LXD not installed), as the user may be targeting a remote daemon. + if os.Args[0] != "help" && os.Args[0] != "version" && shared.PathExists(shared.VarPath("")) && !shared.PathExists(config.ConfigDir) { - err = shared.FindOrGenCert(certf, keyf) + // Create the config dir so that we don't get in here again for this user. + err = os.MkdirAll(config.ConfigDir, 0750) if err != nil { return err } + + fmt.Fprintf(os.Stderr, i18n.G("If this is your first time using LXD, you should also run: sudo lxd init")+"\n") + fmt.Fprintf(os.Stderr, i18n.G("To start your first container, try: lxc launch ubuntu:16.04")+"\n\n") } err = cmd.run(config, gnuflag.Args()) @@ -165,64 +159,113 @@ } var commands = map[string]command{ - "config": &configCmd{}, - "copy": ©Cmd{}, - "delete": &deleteCmd{}, - "exec": &execCmd{}, - "file": &fileCmd{}, - "finger": &fingerCmd{}, - "help": &helpCmd{}, - "image": &imageCmd{}, - "info": &infoCmd{}, - "init": &initCmd{}, - "launch": &launchCmd{}, - "list": &listCmd{}, - "monitor": &monitorCmd{}, - "move": &moveCmd{}, - "pause": &actionCmd{shared.Freeze, false, false, "pause", -1, false, false, false}, - "profile": &profileCmd{}, - "publish": &publishCmd{}, - "remote": &remoteCmd{}, - "restart": &actionCmd{shared.Restart, true, true, "restart", -1, false, false, false}, + "config": &configCmd{}, + "copy": ©Cmd{}, + "delete": &deleteCmd{}, + "exec": &execCmd{}, + "file": &fileCmd{}, + "finger": &fingerCmd{}, + "help": &helpCmd{}, + "image": &imageCmd{}, + "info": &infoCmd{}, + "init": &initCmd{}, + "launch": &launchCmd{}, + "list": &listCmd{}, + "manpage": &manpageCmd{}, + "monitor": &monitorCmd{}, + "move": &moveCmd{}, + "network": &networkCmd{}, + "pause": &actionCmd{ + action: shared.Freeze, + name: "pause", + additionalHelp: i18n.G("The opposite of `lxc pause` is `lxc start`."), + }, + "profile": &profileCmd{}, + "publish": &publishCmd{}, + "remote": &remoteCmd{}, + "restart": &actionCmd{ + action: shared.Restart, + hasTimeout: true, + visible: true, + name: "restart", + timeout: -1, + }, "restore": &restoreCmd{}, "snapshot": &snapshotCmd{}, - "start": &actionCmd{shared.Start, false, true, "start", -1, false, false, false}, - "stop": &actionCmd{shared.Stop, true, true, "stop", -1, false, false, false}, - "version": &versionCmd{}, + "start": &actionCmd{ + action: shared.Start, + visible: true, + name: "start", + }, + "stop": &actionCmd{ + action: shared.Stop, + hasTimeout: true, + visible: true, + name: "stop", + timeout: -1, + }, + "version": &versionCmd{}, +} + +// defaultAliases contains LXC's built-in command line aliases. The built-in +// aliases are checked only if no user-defined alias was found. +var defaultAliases = map[string]string{ + "shell": "exec @ARGS@ -- login -f root", + + "cp": "copy", + "ls": "list", + "mv": "move", + "rename": "move", + "rm": "delete", + + "image cp": "image copy", + "image ls": "image list", + "image rm": "image delete", + + "image alias ls": "image alias list", + "image alias rm": "image alias delete", + + "remote ls": "remote list", + "remote mv": "remote rename", + "remote rm": "remote remove", + + "config device ls": "config device list", + "config device rm": "config device remove", } var errArgs = fmt.Errorf(i18n.G("wrong number of subcommand arguments")) -func expandAlias(config *lxd.Config, origArgs []string) ([]string, bool) { +func findAlias(aliases map[string]string, origArgs []string) ([]string, []string, bool) { foundAlias := false aliasKey := []string{} aliasValue := []string{} - for k, v := range config.Aliases { - matches := false + for k, v := range aliases { + foundAlias = true for i, key := range strings.Split(k, " ") { - if len(origArgs) <= i+1 { - break - } - - if origArgs[i+1] == key { - matches = true - aliasKey = strings.Split(k, " ") - aliasValue = strings.Split(v, " ") + if len(origArgs) <= i+1 || origArgs[i+1] != key { + foundAlias = false break } } - if !matches { - continue + if foundAlias { + aliasKey = strings.Split(k, " ") + aliasValue = strings.Split(v, " ") + break } - - foundAlias = true - break } + return aliasKey, aliasValue, foundAlias +} + +func expandAlias(config *lxd.Config, origArgs []string) ([]string, bool) { + aliasKey, aliasValue, foundAlias := findAlias(config.Aliases, origArgs) if !foundAlias { - return []string{}, false + aliasKey, aliasValue, foundAlias = findAlias(defaultAliases, origArgs) + if !foundAlias { + return []string{}, false + } } newArgs := []string{origArgs[0]} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/manpage.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/manpage.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/manpage.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/manpage.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "sort" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared/i18n" +) + +type manpageCmd struct{} + +func (c *manpageCmd) showByDefault() bool { + return false +} + +func (c *manpageCmd) usage() string { + return i18n.G( + `Prints all the subcommands help.`) +} + +func (c *manpageCmd) flags() { +} + +func (c *manpageCmd) run(_ *lxd.Config, args []string) error { + if len(args) > 0 { + return errArgs + } + + keys := []string{} + for k, _ := range commands { + keys = append(keys, k) + } + sort.Strings(keys) + + header := false + for _, k := range keys { + if header { + fmt.Printf("\n\n") + } + + fmt.Printf("### lxc %s\n", k) + commands["help"].run(nil, []string{k}) + header = true + } + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/move.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/move.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/move.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/move.go 2016-10-13 14:31:53.000000000 +0000 @@ -6,7 +6,6 @@ ) type moveCmd struct { - httpAddr string } func (c *moveCmd) showByDefault() bool { diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/network.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/network.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/network.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/network.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,475 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "sort" + "strings" + "syscall" + + "github.com/olekukonko/tablewriter" + "gopkg.in/yaml.v2" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/i18n" + "github.com/lxc/lxd/shared/termios" +) + +type networkCmd struct { +} + +func (c *networkCmd) showByDefault() bool { + return true +} + +func (c *networkCmd) networkEditHelp() string { + return i18n.G( + `### This is a yaml representation of the network. +### Any line starting with a '# will be ignored. +### +### A network consists of a set of configuration items. +### +### An example would look like: +### name: lxdbr0 +### config: +### ipv4.address: 10.62.42.1/24 +### ipv4.nat: true +### ipv6.address: fd00:56ad:9f7a:9800::1/64 +### ipv6.nat: true +### managed: true +### type: bridge +### +### Note that only the configuration can be changed.`) +} + +func (c *networkCmd) usage() string { + return i18n.G( + `Manage networks. + +lxc network list List available networks. +lxc network show Show details of a network. +lxc network create [key=value]... Create a network. +lxc network get Get network configuration. +lxc network set Set network configuration. +lxc network unset Unset network configuration. +lxc network delete Delete a network. +lxc network edit + Edit network, either by launching external editor or reading STDIN. + Example: lxc network edit # launch editor + cat network.yml | lxc network edit # read from network.yml + +lxc network attach [device name] +lxc network attach-profile [device name] + +lxc network detach [device name] +lxc network detach-profile [device name] +`) +} + +func (c *networkCmd) flags() {} + +func (c *networkCmd) run(config *lxd.Config, args []string) error { + if len(args) < 1 { + return errArgs + } + + if args[0] == "list" { + return c.doNetworkList(config, args) + } + + if len(args) < 2 { + return errArgs + } + + remote, network := config.ParseRemoteAndContainer(args[1]) + client, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + switch args[0] { + case "attach": + return c.doNetworkAttach(client, network, args[2:]) + case "attach-profile": + return c.doNetworkAttachProfile(client, network, args[2:]) + case "create": + return c.doNetworkCreate(client, network, args[2:]) + case "delete": + return c.doNetworkDelete(client, network) + case "detach": + return c.doNetworkDetach(client, network, args[2:]) + case "detach-profile": + return c.doNetworkDetachProfile(client, network, args[2:]) + case "edit": + return c.doNetworkEdit(client, network) + case "get": + return c.doNetworkGet(client, network, args[2:]) + case "set": + return c.doNetworkSet(client, network, args[2:]) + case "unset": + return c.doNetworkSet(client, network, args[2:]) + case "show": + return c.doNetworkShow(client, network) + default: + return errArgs + } +} + +func (c *networkCmd) doNetworkAttach(client *lxd.Client, name string, args []string) error { + if len(args) < 1 || len(args) > 2 { + return errArgs + } + + container := args[0] + devName := name + if len(args) > 1 { + devName = args[1] + } + + network, err := client.NetworkGet(name) + if err != nil { + return err + } + + nicType := "macvlan" + if network.Type == "bridge" { + nicType = "bridged" + } + + props := []string{fmt.Sprintf("nictype=%s", nicType), fmt.Sprintf("parent=%s", name)} + resp, err := client.ContainerDeviceAdd(container, devName, "nic", props) + if err != nil { + return err + } + + return client.WaitForSuccess(resp.Operation) +} + +func (c *networkCmd) doNetworkAttachProfile(client *lxd.Client, name string, args []string) error { + if len(args) < 1 || len(args) > 2 { + return errArgs + } + + profile := args[0] + devName := name + if len(args) > 1 { + devName = args[1] + } + + network, err := client.NetworkGet(name) + if err != nil { + return err + } + + nicType := "macvlan" + if network.Type == "bridge" { + nicType = "bridged" + } + + props := []string{fmt.Sprintf("nictype=%s", nicType), fmt.Sprintf("parent=%s", name)} + _, err = client.ProfileDeviceAdd(profile, devName, "nic", props) + return err +} + +func (c *networkCmd) doNetworkCreate(client *lxd.Client, name string, args []string) error { + config := map[string]string{} + + for i := 0; i < len(args); i++ { + entry := strings.SplitN(args[i], "=", 2) + if len(entry) < 2 { + return errArgs + } + + config[entry[0]] = entry[1] + } + + err := client.NetworkCreate(name, config) + if err == nil { + fmt.Printf(i18n.G("Network %s created")+"\n", name) + } + + return err +} + +func (c *networkCmd) doNetworkDetach(client *lxd.Client, name string, args []string) error { + if len(args) < 1 || len(args) > 2 { + return errArgs + } + + containerName := args[0] + devName := "" + if len(args) > 1 { + devName = args[1] + } + + container, err := client.ContainerInfo(containerName) + if err != nil { + return err + } + + if devName == "" { + for n, d := range container.Devices { + if d["type"] == "nic" && d["parent"] == name { + if devName != "" { + return fmt.Errorf(i18n.G("More than one device matches, specify the device name.")) + } + + devName = n + } + } + } + + if devName == "" { + return fmt.Errorf(i18n.G("No device found for this network")) + } + + device, ok := container.Devices[devName] + if !ok { + return fmt.Errorf(i18n.G("The specified device doesn't exist")) + } + + if device["type"] != "nic" || device["parent"] != name { + return fmt.Errorf(i18n.G("The specified device doesn't match the network")) + } + + resp, err := client.ContainerDeviceDelete(containerName, devName) + if err != nil { + return err + } + + return client.WaitForSuccess(resp.Operation) +} + +func (c *networkCmd) doNetworkDetachProfile(client *lxd.Client, name string, args []string) error { + if len(args) < 1 || len(args) > 2 { + return errArgs + } + + profileName := args[0] + devName := "" + if len(args) > 1 { + devName = args[1] + } + + profile, err := client.ProfileConfig(profileName) + if err != nil { + return err + } + + if devName == "" { + for n, d := range profile.Devices { + if d["type"] == "nic" && d["parent"] == name { + if devName != "" { + return fmt.Errorf(i18n.G("More than one device matches, specify the device name.")) + } + + devName = n + } + } + } + + if devName == "" { + return fmt.Errorf(i18n.G("No device found for this network")) + } + + device, ok := profile.Devices[devName] + if !ok { + return fmt.Errorf(i18n.G("The specified device doesn't exist")) + } + + if device["type"] != "nic" || device["parent"] != name { + return fmt.Errorf(i18n.G("The specified device doesn't match the network")) + } + + _, err = client.ProfileDeviceDelete(profileName, devName) + return err +} + +func (c *networkCmd) doNetworkDelete(client *lxd.Client, name string) error { + err := client.NetworkDelete(name) + if err == nil { + fmt.Printf(i18n.G("Network %s deleted")+"\n", name) + } + + return err +} + +func (c *networkCmd) doNetworkEdit(client *lxd.Client, name string) error { + // If stdin isn't a terminal, read text from it + if !termios.IsTerminal(int(syscall.Stdin)) { + contents, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + + newdata := shared.NetworkConfig{} + err = yaml.Unmarshal(contents, &newdata) + if err != nil { + return err + } + return client.NetworkPut(name, newdata) + } + + // Extract the current value + network, err := client.NetworkGet(name) + if err != nil { + return err + } + + data, err := yaml.Marshal(&network) + if err != nil { + return err + } + + // Spawn the editor + content, err := shared.TextEditor("", []byte(c.networkEditHelp()+"\n\n"+string(data))) + if err != nil { + return err + } + + for { + // Parse the text received from the editor + newdata := shared.NetworkConfig{} + err = yaml.Unmarshal(content, &newdata) + if err == nil { + err = client.NetworkPut(name, newdata) + } + + // Respawn the editor + if err != nil { + fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err) + fmt.Println(i18n.G("Press enter to open the editor again")) + + _, err := os.Stdin.Read(make([]byte, 1)) + if err != nil { + return err + } + + content, err = shared.TextEditor("", content) + if err != nil { + return err + } + continue + } + break + } + return nil +} + +func (c *networkCmd) doNetworkGet(client *lxd.Client, name string, args []string) error { + // we shifted @args so so it should read "" + if len(args) != 1 { + return errArgs + } + + resp, err := client.NetworkGet(name) + if err != nil { + return err + } + + for k, v := range resp.Config { + if k == args[0] { + fmt.Printf("%s\n", v) + } + } + return nil +} + +func (c *networkCmd) doNetworkList(config *lxd.Config, args []string) error { + var remote string + if len(args) > 1 { + var name string + remote, name = config.ParseRemoteAndContainer(args[1]) + if name != "" { + return fmt.Errorf(i18n.G("Cannot provide container name to list")) + } + } else { + remote = config.DefaultRemote + } + + client, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + networks, err := client.ListNetworks() + if err != nil { + return err + } + + data := [][]string{} + for _, network := range networks { + if shared.StringInSlice(network.Type, []string{"loopback", "unknown"}) { + continue + } + + strManaged := i18n.G("NO") + if network.Managed { + strManaged = i18n.G("YES") + } + + strUsedBy := fmt.Sprintf("%d", len(network.UsedBy)) + data = append(data, []string{network.Name, network.Type, strManaged, strUsedBy}) + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetAutoWrapText(false) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(true) + table.SetHeader([]string{ + i18n.G("NAME"), + i18n.G("TYPE"), + i18n.G("MANAGED"), + i18n.G("USED BY")}) + sort.Sort(byName(data)) + table.AppendBulk(data) + table.Render() + + return nil +} + +func (c *networkCmd) doNetworkSet(client *lxd.Client, name string, args []string) error { + // we shifted @args so so it should read " []" + if len(args) < 1 { + return errArgs + } + + network, err := client.NetworkGet(name) + if err != nil { + return err + } + + key := args[0] + var value string + if len(args) < 2 { + value = "" + } else { + value = args[1] + } + + if !termios.IsTerminal(int(syscall.Stdin)) && value == "-" { + buf, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return fmt.Errorf("Can't read from stdin: %s", err) + } + value = string(buf[:]) + } + + network.Config[key] = value + + return client.NetworkPut(name, network) +} + +func (c *networkCmd) doNetworkShow(client *lxd.Client, name string) error { + network, err := client.NetworkGet(name) + if err != nil { + return err + } + + data, err := yaml.Marshal(&network) + fmt.Printf("%s", data) + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/profile.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/profile.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/profile.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/profile.go 2016-10-13 14:31:53.000000000 +0000 @@ -4,9 +4,10 @@ "fmt" "io/ioutil" "os" - "strings" + "sort" "syscall" + "github.com/olekukonko/tablewriter" "gopkg.in/yaml.v2" "github.com/lxc/lxd" @@ -16,7 +17,6 @@ ) type profileCmd struct { - httpAddr string } func (c *profileCmd) showByDefault() bool { @@ -38,7 +38,7 @@ ### devices: ### eth0: ### nictype: bridged -### parent: lxcbr0 +### parent: lxdbr0 ### type: nic ### ### Note that the name is shown but cannot be changed`) @@ -54,19 +54,25 @@ lxc profile copy Copy the profile to the specified remote. lxc profile get Get profile configuration. lxc profile set Set profile configuration. +lxc profile unset Unset profile configuration. lxc profile delete Delete a profile. lxc profile edit Edit profile, either by launching external editor or reading STDIN. Example: lxc profile edit # launch editor cat profile.yml | lxc profile edit # read from profile.yml -lxc profile apply - Apply a comma-separated list of profiles to a container, in order. + +lxc profile assign + Assign a comma-separated list of profiles to a container, in order. All profiles passed in this call (and only those) will be applied - to the specified container. - Example: lxc profile apply foo default,bar # Apply default and bar - lxc profile apply foo default # Only default is active - lxc profile apply '' # no profiles are applied anymore - lxc profile apply bar,default # Apply default second now + to the specified container, i.e. it sets the list of profiles exactly to + those specified in this command. To add/remove a particular profile from a + container, use {add|remove} below. + Example: lxc profile assign foo default,bar # Apply default and bar + lxc profile assign foo default # Only default is active + lxc profile assign '' # no profiles are applied anymore + lxc profile assign bar,default # Apply default second now +lxc profile add # add a profile to a container +lxc profile remove # remove the profile from a container Devices: lxc profile device list List devices in the given profile. @@ -110,7 +116,29 @@ return c.doProfileDevice(config, args) case "edit": return c.doProfileEdit(client, profile) - case "apply": + case "apply", "assign": + container := profile + switch len(args) { + case 2: + profile = "" + case 3: + profile = args[2] + default: + return errArgs + } + return c.doProfileAssign(client, container, profile) + case "add": + container := profile + switch len(args) { + case 2: + profile = "" + case 3: + profile = args[2] + default: + return errArgs + } + return c.doProfileAdd(client, container, profile) + case "remove": container := profile switch len(args) { case 2: @@ -120,7 +148,7 @@ default: return errArgs } - return c.doProfileApply(client, container, profile) + return c.doProfileRemove(client, container, profile) case "get": return c.doProfileGet(client, profile, args[2:]) case "set": @@ -214,8 +242,8 @@ return err } -func (c *profileCmd) doProfileApply(client *lxd.Client, d string, p string) error { - resp, err := client.ApplyProfile(d, p) +func (c *profileCmd) doProfileAssign(client *lxd.Client, d string, p string) error { + resp, err := client.AssignProfile(d, p) if err != nil { return err } @@ -225,9 +253,58 @@ if p == "" { p = i18n.G("(none)") } - fmt.Printf(i18n.G("Profile %s applied to %s")+"\n", p, d) + fmt.Printf(i18n.G("Profiles %s applied to %s")+"\n", p, d) + } + + return err +} + +func (c *profileCmd) doProfileAdd(client *lxd.Client, d string, p string) error { + ct, err := client.ContainerInfo(d) + if err != nil { + return err + } + + ct.Profiles = append(ct.Profiles, p) + + err = client.UpdateContainerConfig(d, ct.Brief()) + if err != nil { + return err + } + + fmt.Printf(i18n.G("Profile %s added to %s")+"\n", p, d) + + return err +} + +func (c *profileCmd) doProfileRemove(client *lxd.Client, d string, p string) error { + ct, err := client.ContainerInfo(d) + if err != nil { + return err + } + + if !shared.StringInSlice(p, ct.Profiles) { + return fmt.Errorf("Profile %s isn't currently applied to %s", p, d) + } + + profiles := []string{} + for _, profile := range ct.Profiles { + if profile == p { + continue + } + + profiles = append(profiles, profile) + } + + ct.Profiles = profiles + + err = client.UpdateContainerConfig(d, ct.Brief()) + if err != nil { + return err } + fmt.Printf(i18n.G("Profile %s removed from %s")+"\n", p, d) + return err } @@ -355,6 +432,23 @@ if err != nil { return err } - fmt.Printf("%s\n", strings.Join(profiles, "\n")) + + data := [][]string{} + for _, profile := range profiles { + strUsedBy := fmt.Sprintf("%d", len(profile.UsedBy)) + data = append(data, []string{profile.Name, strUsedBy}) + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetAutoWrapText(false) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(true) + table.SetHeader([]string{ + i18n.G("NAME"), + i18n.G("USED BY")}) + sort.Sort(byName(data)) + table.AppendBulk(data) + table.Render() + return nil } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/publish.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/publish.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/publish.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/publish.go 2016-10-13 14:31:53.000000000 +0000 @@ -12,9 +12,10 @@ ) type publishCmd struct { - pAliases aliasList // aliasList defined in lxc/image.go - makePublic bool - Force bool + pAliases aliasList // aliasList defined in lxc/image.go + compression_algorithm string + makePublic bool + Force bool } func (c *publishCmd) showByDefault() bool { @@ -33,6 +34,7 @@ gnuflag.Var(&c.pAliases, "alias", i18n.G("New alias to define at target")) gnuflag.BoolVar(&c.Force, "force", false, i18n.G("Stop the container if currently running")) gnuflag.BoolVar(&c.Force, "f", false, i18n.G("Stop the container if currently running")) + gnuflag.StringVar(&c.compression_algorithm, "compression", "", i18n.G("Define a compression algorithm: for image or none")) } func (c *publishCmd) run(config *lxd.Config, args []string) error { @@ -134,7 +136,7 @@ // Optimized local publish if cRemote == iRemote { - fp, err = d.ImageFromContainer(cName, c.makePublic, c.pAliases, properties) + fp, err = d.ImageFromContainer(cName, c.makePublic, c.pAliases, properties, c.compression_algorithm) if err != nil { return err } @@ -142,7 +144,7 @@ return nil } - fp, err = s.ImageFromContainer(cName, false, nil, properties) + fp, err = s.ImageFromContainer(cName, false, nil, properties, c.compression_algorithm) if err != nil { return err } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/remote.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/remote.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/remote.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/remote.go 2016-10-13 14:31:53.000000000 +0000 @@ -24,7 +24,6 @@ ) type remoteCmd struct { - httpAddr string acceptCert bool password string public bool @@ -39,8 +38,8 @@ return i18n.G( `Manage remote LXD servers. -lxc remote add [--accept-certificate] [--password=PASSWORD] - [--public] [--protocol=PROTOCOL] Add the remote at . +lxc remote add [] [--accept-certificate] [--password=PASSWORD] + [--public] [--protocol=PROTOCOL] Add the remote at . lxc remote remove Remove the remote . lxc remote list List all remotes. lxc remote rename Rename remote to . @@ -56,9 +55,24 @@ gnuflag.BoolVar(&c.public, "public", false, i18n.G("Public image server")) } +func generateClientCertificate(config *lxd.Config) error { + // Generate a client certificate if necessary. The default repositories are + // either local or public, neither of which requires a client certificate. + // Generation of the cert is delayed to avoid unnecessary overhead, e.g in + // testing scenarios where only the default repositories are used. + certf := config.ConfigPath("client.crt") + keyf := config.ConfigPath("client.key") + if !shared.PathExists(certf) || !shared.PathExists(keyf) { + fmt.Fprintf(os.Stderr, i18n.G("Generating a client certificate. This may take a minute...")+"\n") + + return shared.FindOrGenCert(certf, keyf, true) + } + return nil +} + func getRemoteCertificate(address string) (*x509.Certificate, error) { // Setup a permissive TLS config - tlsConfig, err := shared.GetTLSConfig("", "", nil) + tlsConfig, err := shared.GetTLSConfig("", "", "", nil) if err != nil { return nil, err } @@ -111,6 +125,12 @@ return nil } + // Fix broken URL parser + if !strings.Contains(addr, "://") && remoteURL.Scheme != "" && remoteURL.Scheme != "unix" && remoteURL.Host == "" { + remoteURL.Host = addr + remoteURL.Scheme = "" + } + if remoteURL.Scheme != "" { if remoteURL.Scheme != "unix" && remoteURL.Scheme != "https" { return fmt.Errorf(i18n.G("Invalid URL scheme \"%s\" in \"%s\""), remoteURL.Scheme, addr) @@ -120,7 +140,7 @@ } else if addr[0] == '/' { rScheme = "unix" } else { - if !shared.PathExists(addr) { + if !shared.IsUnixSocket(addr) { rScheme = "https" } else { rScheme = "unix" @@ -142,17 +162,7 @@ } if rScheme == "unix" { - if addr[0:5] == "unix:" { - if addr[0:7] == "unix://" { - if len(addr) > 8 { - rHost = addr[8:] - } else { - rHost = "" - } - } else { - rHost = addr[6:] - } - } + rHost = strings.TrimPrefix(strings.TrimPrefix(addr, "unix:"), "//") rPort = "" } @@ -166,7 +176,15 @@ addr = rScheme + "://" + rHost } - /* Actually add the remote */ + // Finally, actually add the remote, almost... If the remote is a private + // HTTPS server then we need to ensure we have a client certificate before + // adding the remote server. + if rScheme != "unix" && !public { + err = generateClientCertificate(config) + if err != nil { + return err + } + } config.Remotes[server] = lxd.RemoteConfig{Addr: addr, Protocol: protocol} remote := config.ParseRemote(server) @@ -175,7 +193,7 @@ return err } - if len(addr) > 5 && addr[0:5] == "unix:" { + if strings.HasPrefix(addr, "unix:") { // NewClient succeeded so there was a lxd there (we fingered // it) so just accept it return nil @@ -276,7 +294,7 @@ func (c *remoteCmd) removeCertificate(config *lxd.Config, remote string) { certf := config.ServerCertPath(remote) - shared.Debugf("Trying to remove %s", certf) + shared.LogDebugf("Trying to remove %s", certf) os.Remove(certf) } @@ -288,18 +306,24 @@ switch args[0] { case "add": - if len(args) < 3 { + if len(args) < 2 { return errArgs } - if rc, ok := config.Remotes[args[1]]; ok { - return fmt.Errorf(i18n.G("remote %s exists as <%s>"), args[1], rc.Addr) + remote := args[1] + fqdn := args[1] + if len(args) > 2 { + fqdn = args[2] + } + + if rc, ok := config.Remotes[remote]; ok { + return fmt.Errorf(i18n.G("remote %s exists as <%s>"), remote, rc.Addr) } - err := c.addServer(config, args[1], args[2], c.acceptCert, c.password, c.public, c.protocol) + err := c.addServer(config, remote, fqdn, c.acceptCert, c.password, c.public, c.protocol) if err != nil { - delete(config.Remotes, args[1]) - c.removeCertificate(config, args[1]) + delete(config.Remotes, remote) + c.removeCertificate(config, remote) return err } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/version.go juju-core-2.0.0/src/github.com/lxc/lxd/lxc/version.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxc/version.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxc/version.go 2016-10-13 14:31:53.000000000 +0000 @@ -11,12 +11,12 @@ type versionCmd struct{} func (c *versionCmd) showByDefault() bool { - return true + return false } func (c *versionCmd) usage() string { return i18n.G( - `Prints the version number of LXD. + `Prints the version number of this client tool. lxc version`) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/api_1.0.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/api_1.0.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/api_1.0.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/api_1.0.go 2016-10-13 14:31:53.000000000 +0000 @@ -5,6 +5,7 @@ "fmt" "net/http" "os" + "reflect" "syscall" "gopkg.in/lxc/go-lxc.v2" @@ -44,9 +45,38 @@ func api10Get(d *Daemon, r *http.Request) Response { body := shared.Jmap{ - "api_extensions": []string{}, - "api_status": "development", - "api_version": shared.APIVersion, + /* List of API extensions in the order they were added. + * + * The following kind of changes require an addition to api_extensions: + * - New configuration key + * - New valid values for a configuration key + * - New REST API endpoint + * - New argument inside an existing REST API call + * - New HTTPs authentication mechanisms or protocols + */ + "api_extensions": []string{ + "storage_zfs_remove_snapshots", + "container_host_shutdown_timeout", + "container_syscall_filtering", + "auth_pki", + "container_last_used_at", + "etag", + "patch", + "usb_devices", + "https_allowed_credentials", + "image_compression_algorithm", + "directory_manipulation", + "container_cpu_time", + "storage_zfs_use_refquota", + "storage_lvm_mount_options", + "network", + "profile_usedby", + "container_push", + "container_exec_recording", + }, + + "api_status": "stable", + "api_version": shared.APIVersion, } if d.isTrustedClient(r) { @@ -125,29 +155,13 @@ body["environment"] = env body["public"] = false - - serverConfig, err := d.ConfigValuesGet() - if err != nil { - return InternalError(err) - } - - config := shared.Jmap{} - - for key, value := range serverConfig { - if key == "core.trust_password" { - config[key] = true - } else { - config[key] = value - } - } - - body["config"] = config + body["config"] = daemonConfigRender() } else { body["auth"] = "untrusted" body["public"] = false } - return SyncResponse(true, body) + return SyncResponseETag(true, body, body["config"]) } type apiPut struct { @@ -160,12 +174,58 @@ return InternalError(err) } + err = etagCheck(r, oldConfig) + if err != nil { + return PreconditionFailed(err) + } + req := apiPut{} + if err := shared.ReadToJSON(r.Body, &req); err != nil { + return BadRequest(err) + } + + return doApi10Update(d, oldConfig, req) +} + +func api10Patch(d *Daemon, r *http.Request) Response { + oldConfig, err := dbConfigValuesGet(d.db) + if err != nil { + return InternalError(err) + } + + err = etagCheck(r, oldConfig) + if err != nil { + return PreconditionFailed(err) + } + req := apiPut{} if err := shared.ReadToJSON(r.Body, &req); err != nil { return BadRequest(err) } + if req.Config == nil { + return EmptySyncResponse + } + + for k, v := range oldConfig { + _, ok := req.Config[k] + if !ok { + req.Config[k] = v + } + } + + return doApi10Update(d, oldConfig, req) +} + +func doApi10Update(d *Daemon, oldConfig map[string]string, req apiPut) Response { + // Deal with special keys + for k, v := range req.Config { + config := daemonConfig[k] + if config != nil && config.hiddenValue && v == true { + req.Config[k] = oldConfig[k] + } + } + // Diff the configs changedConfig := map[string]interface{}{} for key, value := range oldConfig { @@ -180,84 +240,30 @@ } } - for key, value := range changedConfig { - if value == nil { - value = "" + for key, valueRaw := range changedConfig { + if valueRaw == nil { + valueRaw = "" } - if !d.ConfigKeyIsValid(key) { - return BadRequest(fmt.Errorf("Bad server config key: '%s'", key)) + s := reflect.ValueOf(valueRaw) + if !s.IsValid() || s.Kind() != reflect.String { + return BadRequest(fmt.Errorf("Invalid value type for '%s'", key)) } - if key == "core.trust_password" { - if value == true { - continue - } - - err := d.PasswordSet(value.(string)) - if err != nil { - return InternalError(err) - } - } else if key == "storage.lvm_vg_name" { - err := storageLVMSetVolumeGroupNameConfig(d, value.(string)) - if err != nil { - return InternalError(err) - } - if err = d.SetupStorageDriver(); err != nil { - return InternalError(err) - } - } else if key == "storage.lvm_thinpool_name" { - err := storageLVMSetThinPoolNameConfig(d, value.(string)) - if err != nil { - return InternalError(err) - } - } else if key == "storage.lvm_fstype" { - err := storageLVMSetFsTypeConfig(d, value.(string)) - if err != nil { - return InternalError(err) - } - } else if key == "storage.zfs_pool_name" { - err := storageZFSSetPoolNameConfig(d, value.(string)) - if err != nil { - return InternalError(err) - } - if err = d.SetupStorageDriver(); err != nil { - return InternalError(err) - } - } else if key == "core.https_address" { - old_address, err := d.ConfigValueGet("core.https_address") - if err != nil { - return InternalError(err) - } - - err = d.UpdateHTTPsPort(old_address, value.(string)) - if err != nil { - return InternalError(err) - } + value := valueRaw.(string) - err = d.ConfigValueSet(key, value.(string)) - if err != nil { - return InternalError(err) - } - } else if key == "core.proxy_https" || key == "core.proxy_http" || key == "core.proxy_ignore_hosts" { - err = d.ConfigValueSet(key, value.(string)) - if err != nil { - return InternalError(err) - } + confKey, ok := daemonConfig[key] + if !ok { + return BadRequest(fmt.Errorf("Bad server config key: '%s'", key)) + } - d.updateProxy() - } else { - err := d.ConfigValueSet(key, value.(string)) - if err != nil { - return InternalError(err) - } - if key == "images.remote_cache_expiry" { - d.pruneChan <- true - } + err := confKey.Set(d, value) + if err != nil { + return SmartError(err) } } return EmptySyncResponse } -var api10Cmd = Command{name: "", untrustedGet: true, get: api10Get, put: api10Put} +var api10Cmd = Command{name: "", untrustedGet: true, get: api10Get, put: api10Put, patch: api10Patch} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/apparmor.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/apparmor.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/apparmor.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/apparmor.go 2016-10-13 14:31:53.000000000 +0000 @@ -23,16 +23,223 @@ var aaPath = shared.VarPath("security", "apparmor") -const NESTING_AA_PROFILE = ` +const AA_PROFILE_BASE = ` + ### Base profile + capability, + dbus, + file, + network, + umount, + + # Allow us to receive signals from anywhere. + signal (receive), + + # Allow us to send signals to ourselves + signal peer=@{profile_name}, + + # Allow other processes to read our /proc entries, futexes, perf tracing and + # kcmp for now (they will need 'read' in the first place). Administrators can + # override with: + # deny ptrace (readby) ... + ptrace (readby), + + # Allow other processes to trace us by default (they will need 'trace' in + # the first place). Administrators can override with: + # deny ptrace (tracedby) ... + ptrace (tracedby), + + # Allow us to ptrace ourselves + ptrace peer=@{profile_name}, + + # ignore DENIED message on / remount + deny mount options=(ro, remount) -> /, + deny mount options=(ro, remount, silent) -> /, + + # allow tmpfs mounts everywhere + mount fstype=tmpfs, + + # allow hugetlbfs mounts everywhere + mount fstype=hugetlbfs, + + # allow mqueue mounts everywhere + mount fstype=mqueue, + + # allow fuse mounts everywhere + mount fstype=fuse, + mount fstype=fuse.*, + + # deny access under /proc/bus to avoid e.g. messing with pci devices directly + deny @{PROC}/bus/** wklx, + + # deny writes in /proc/sys/fs but allow binfmt_misc to be mounted + mount fstype=binfmt_misc -> /proc/sys/fs/binfmt_misc/, + deny @{PROC}/sys/fs/** wklx, + + # allow efivars to be mounted, writing to it will be blocked though + mount fstype=efivarfs -> /sys/firmware/efi/efivars/, + + # block some other dangerous paths + deny @{PROC}/kcore rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/sysrq-trigger rwklx, + + # deny writes in /sys except for /sys/fs/cgroup, also allow + # fusectl, securityfs and debugfs to be mounted there (read-only) + mount fstype=fusectl -> /sys/fs/fuse/connections/, + mount fstype=securityfs -> /sys/kernel/security/, + mount fstype=debugfs -> /sys/kernel/debug/, + deny mount fstype=debugfs -> /var/lib/ureadahead/debugfs/, + mount fstype=proc -> /proc/, + mount fstype=sysfs -> /sys/, + mount options=(rw, nosuid, nodev, noexec, remount) -> /sys/, + deny /sys/firmware/efi/efivars/** rwklx, + # note, /sys/kernel/security/** handled below + mount options=(move) /sys/fs/cgroup/cgmanager/ -> /sys/fs/cgroup/cgmanager.lower/, + mount options=(ro, nosuid, nodev, noexec, remount, strictatime) -> /sys/fs/cgroup/, + + # deny reads from debugfs + deny /sys/kernel/debug/{,**} rwklx, + + # allow paths to be made slave, shared, private or unbindable + # FIXME: This currently doesn't work due to the apparmor parser treating those as allowing all mounts. +# mount options=(rw,make-slave) -> **, +# mount options=(rw,make-rslave) -> **, +# mount options=(rw,make-shared) -> **, +# mount options=(rw,make-rshared) -> **, +# mount options=(rw,make-private) -> **, +# mount options=(rw,make-rprivate) -> **, +# mount options=(rw,make-unbindable) -> **, +# mount options=(rw,make-runbindable) -> **, + + # allow bind-mounts of anything except /proc, /sys and /dev + mount options=(rw,bind) /[^spd]*{,/**}, + mount options=(rw,bind) /d[^e]*{,/**}, + mount options=(rw,bind) /de[^v]*{,/**}, + mount options=(rw,bind) /dev/.[^l]*{,/**}, + mount options=(rw,bind) /dev/.l[^x]*{,/**}, + mount options=(rw,bind) /dev/.lx[^c]*{,/**}, + mount options=(rw,bind) /dev/.lxc?*{,/**}, + mount options=(rw,bind) /dev/[^.]*{,/**}, + mount options=(rw,bind) /dev?*{,/**}, + mount options=(rw,bind) /p[^r]*{,/**}, + mount options=(rw,bind) /pr[^o]*{,/**}, + mount options=(rw,bind) /pro[^c]*{,/**}, + mount options=(rw,bind) /proc?*{,/**}, + mount options=(rw,bind) /s[^y]*{,/**}, + mount options=(rw,bind) /sy[^s]*{,/**}, + mount options=(rw,bind) /sys?*{,/**}, + + # allow moving mounts except for /proc, /sys and /dev + mount options=(rw,move) /[^spd]*{,/**}, + mount options=(rw,move) /d[^e]*{,/**}, + mount options=(rw,move) /de[^v]*{,/**}, + mount options=(rw,move) /dev/.[^l]*{,/**}, + mount options=(rw,move) /dev/.l[^x]*{,/**}, + mount options=(rw,move) /dev/.lx[^c]*{,/**}, + mount options=(rw,move) /dev/.lxc?*{,/**}, + mount options=(rw,move) /dev/[^.]*{,/**}, + mount options=(rw,move) /dev?*{,/**}, + mount options=(rw,move) /p[^r]*{,/**}, + mount options=(rw,move) /pr[^o]*{,/**}, + mount options=(rw,move) /pro[^c]*{,/**}, + mount options=(rw,move) /proc?*{,/**}, + mount options=(rw,move) /s[^y]*{,/**}, + mount options=(rw,move) /sy[^s]*{,/**}, + mount options=(rw,move) /sys?*{,/**}, + + # generated by: lxc-generate-aa-rules.py container-rules.base + deny /proc/sys/[^kn]*{,/**} wklx, + deny /proc/sys/k[^e]*{,/**} wklx, + deny /proc/sys/ke[^r]*{,/**} wklx, + deny /proc/sys/ker[^n]*{,/**} wklx, + deny /proc/sys/kern[^e]*{,/**} wklx, + deny /proc/sys/kerne[^l]*{,/**} wklx, + deny /proc/sys/kernel/[^smhd]*{,/**} wklx, + deny /proc/sys/kernel/d[^o]*{,/**} wklx, + deny /proc/sys/kernel/do[^m]*{,/**} wklx, + deny /proc/sys/kernel/dom[^a]*{,/**} wklx, + deny /proc/sys/kernel/doma[^i]*{,/**} wklx, + deny /proc/sys/kernel/domai[^n]*{,/**} wklx, + deny /proc/sys/kernel/domain[^n]*{,/**} wklx, + deny /proc/sys/kernel/domainn[^a]*{,/**} wklx, + deny /proc/sys/kernel/domainna[^m]*{,/**} wklx, + deny /proc/sys/kernel/domainnam[^e]*{,/**} wklx, + deny /proc/sys/kernel/domainname?*{,/**} wklx, + deny /proc/sys/kernel/h[^o]*{,/**} wklx, + deny /proc/sys/kernel/ho[^s]*{,/**} wklx, + deny /proc/sys/kernel/hos[^t]*{,/**} wklx, + deny /proc/sys/kernel/host[^n]*{,/**} wklx, + deny /proc/sys/kernel/hostn[^a]*{,/**} wklx, + deny /proc/sys/kernel/hostna[^m]*{,/**} wklx, + deny /proc/sys/kernel/hostnam[^e]*{,/**} wklx, + deny /proc/sys/kernel/hostname?*{,/**} wklx, + deny /proc/sys/kernel/m[^s]*{,/**} wklx, + deny /proc/sys/kernel/ms[^g]*{,/**} wklx, + deny /proc/sys/kernel/msg*/** wklx, + deny /proc/sys/kernel/s[^he]*{,/**} wklx, + deny /proc/sys/kernel/se[^m]*{,/**} wklx, + deny /proc/sys/kernel/sem*/** wklx, + deny /proc/sys/kernel/sh[^m]*{,/**} wklx, + deny /proc/sys/kernel/shm*/** wklx, + deny /proc/sys/kernel?*{,/**} wklx, + deny /proc/sys/n[^e]*{,/**} wklx, + deny /proc/sys/ne[^t]*{,/**} wklx, + deny /proc/sys/net?*{,/**} wklx, + deny /sys/[^fdck]*{,/**} wklx, + deny /sys/c[^l]*{,/**} wklx, + deny /sys/cl[^a]*{,/**} wklx, + deny /sys/cla[^s]*{,/**} wklx, + deny /sys/clas[^s]*{,/**} wklx, + deny /sys/class/[^n]*{,/**} wklx, + deny /sys/class/n[^e]*{,/**} wklx, + deny /sys/class/ne[^t]*{,/**} wklx, + deny /sys/class/net?*{,/**} wklx, + deny /sys/class?*{,/**} wklx, + deny /sys/d[^e]*{,/**} wklx, + deny /sys/de[^v]*{,/**} wklx, + deny /sys/dev[^i]*{,/**} wklx, + deny /sys/devi[^c]*{,/**} wklx, + deny /sys/devic[^e]*{,/**} wklx, + deny /sys/device[^s]*{,/**} wklx, + deny /sys/devices/[^v]*{,/**} wklx, + deny /sys/devices/v[^i]*{,/**} wklx, + deny /sys/devices/vi[^r]*{,/**} wklx, + deny /sys/devices/vir[^t]*{,/**} wklx, + deny /sys/devices/virt[^u]*{,/**} wklx, + deny /sys/devices/virtu[^a]*{,/**} wklx, + deny /sys/devices/virtua[^l]*{,/**} wklx, + deny /sys/devices/virtual/[^n]*{,/**} wklx, + deny /sys/devices/virtual/n[^e]*{,/**} wklx, + deny /sys/devices/virtual/ne[^t]*{,/**} wklx, + deny /sys/devices/virtual/net?*{,/**} wklx, + deny /sys/devices/virtual?*{,/**} wklx, + deny /sys/devices?*{,/**} wklx, + deny /sys/f[^s]*{,/**} wklx, + deny /sys/fs/[^c]*{,/**} wklx, + deny /sys/fs/c[^g]*{,/**} wklx, + deny /sys/fs/cg[^r]*{,/**} wklx, + deny /sys/fs/cgr[^o]*{,/**} wklx, + deny /sys/fs/cgro[^u]*{,/**} wklx, + deny /sys/fs/cgrou[^p]*{,/**} wklx, + deny /sys/fs/cgroup?*{,/**} wklx, + deny /sys/fs?*{,/**} wklx, +` + +const AA_PROFILE_NESTING = ` pivot_root, + ptrace, + signal, + + deny /dev/.lxd/proc/** rw, + deny /dev/.lxd/sys/** rw, + mount /var/lib/lxd/shmounts/ -> /var/lib/lxd/shmounts/, mount none -> /var/lib/lxd/shmounts/, mount fstype=proc -> /usr/lib/*/lxc/**, mount fstype=sysfs -> /usr/lib/*/lxc/**, mount options=(rw,bind), mount options=(rw,rbind), - deny /dev/.lxd/proc/** rw, - deny /dev/.lxd/sys/** rw, mount options=(rw,make-rshared), # there doesn't seem to be a way to ask for: @@ -41,65 +248,146 @@ # So allow all mounts until that is straightened out: mount, mount options=bind /var/lib/lxd/shmounts/** -> /var/lib/lxd/**, - # lxc-container-default-with-nesting also inherited these - # from start-container, and seems to need them. - ptrace, - signal, ` -const DEFAULT_AA_PROFILE = ` -#include -profile "%s" flags=(attach_disconnected,mediate_deleted) { - #include - - # Special exception for cgroup namespaces - %s +const AA_PROFILE_UNPRIVILEGED = ` + pivot_root, - # user input raw.apparmor below here - %s + mount options=(rw,make-slave) -> **, + mount options=(rw,make-rslave) -> **, + mount options=(rw,make-shared) -> **, + mount options=(rw,make-rshared) -> **, + mount options=(rw,make-private) -> **, + mount options=(rw,make-rprivate) -> **, + mount options=(rw,make-unbindable) -> **, + mount options=(rw,make-runbindable) -> **, - # nesting support goes here if needed - %s - change_profile -> "%s", -}` + mount options=(rw,bind), + mount options=(rw,rbind), +` -func AAProfileFull(c container) string { - lxddir := shared.VarPath("") - if len(c.Name())+len(lxddir)+7 >= 253 { +func mkApparmorName(name string) string { + if len(name)+7 >= 253 { hash := sha256.New() - io.WriteString(hash, lxddir) - lxddir = fmt.Sprintf("%x", hash.Sum(nil)) + io.WriteString(hash, name) + return fmt.Sprintf("%x", hash.Sum(nil)) } + return name +} + +func AANamespace(c container) string { + /* / is not allowed in apparmor namespace names; let's also trim the + * leading / so it doesn't look like "-var-lib-lxd" + */ + lxddir := strings.Replace(strings.Trim(shared.VarPath(""), "/"), "/", "-", -1) + lxddir = mkApparmorName(lxddir) return fmt.Sprintf("lxd-%s_<%s>", c.Name(), lxddir) } -func AAProfileShort(c container) string { - return fmt.Sprintf("lxd-%s", c.Name()) +func AAProfileFull(c container) string { + lxddir := shared.VarPath("") + lxddir = mkApparmorName(lxddir) + return fmt.Sprintf("lxd-%s_<%s>", c.Name(), lxddir) } -func AAProfileCgns() string { - if shared.PathExists("/proc/self/ns/cgroup") { - return " mount fstype=cgroup -> /sys/fs/cgroup/**," - } - return "" +func AAProfileShort(c container) string { + return fmt.Sprintf("lxd-%s", c.Name()) } // getProfileContent generates the apparmor profile template from the given // container. This includes the stock lxc includes as well as stuff from // raw.apparmor. func getAAProfileContent(c container) string { - rawApparmor, ok := c.ExpandedConfig()["raw.apparmor"] - if !ok { - rawApparmor = "" + profile := strings.TrimLeft(AA_PROFILE_BASE, "\n") + + // Apply new features + if aaParserSupports("unix") { + profile += ` + ### Feature: unix + # Allow receive via unix sockets from anywhere + unix (receive), + + # Allow all unix in the container + unix peer=(label=@{profile_name}), +` + } + + // Apply cgns bits + if shared.PathExists("/proc/self/ns/cgroup") { + profile += "\n ### Feature: cgroup namespace\n" + profile += " mount fstype=cgroup -> /sys/fs/cgroup/**,\n" + } + + if aaStacking { + profile += "\n ### Feature: apparmor stacking\n" + + if c.IsPrivileged() { + profile += "\n ### Configuration: apparmor loading disabled in privileged containers\n" + profile += " deny /sys/k*{,/**} rwklx,\n" + } else { + profile += ` ### Configuration: apparmor loading in unprivileged containers + deny /sys/k[^e]*{,/**} wklx, + deny /sys/ke[^r]*{,/**} wklx, + deny /sys/ker[^n]*{,/**} wklx, + deny /sys/kern[^e]*{,/**} wklx, + deny /sys/kerne[^l]*{,/**} wklx, + deny /sys/kernel/[^s]*{,/**} wklx, + deny /sys/kernel/s[^e]*{,/**} wklx, + deny /sys/kernel/se[^c]*{,/**} wklx, + deny /sys/kernel/sec[^u]*{,/**} wklx, + deny /sys/kernel/secu[^r]*{,/**} wklx, + deny /sys/kernel/secur[^i]*{,/**} wklx, + deny /sys/kernel/securi[^t]*{,/**} wklx, + deny /sys/kernel/securit[^y]*{,/**} wklx, + deny /sys/kernel/security/[^a]*{,/**} wklx, + deny /sys/kernel/security/a[^p]*{,/**} wklx, + deny /sys/kernel/security/ap[^p]*{,/**} wklx, + deny /sys/kernel/security/app[^a]*{,/**} wklx, + deny /sys/kernel/security/appa[^r]*{,/**} wklx, + deny /sys/kernel/security/appar[^m]*{,/**} wklx, + deny /sys/kernel/security/apparm[^o]*{,/**} wklx, + deny /sys/kernel/security/apparmo[^r]*{,/**} wklx, + deny /sys/kernel/security/apparmor?*{,/**} wklx, + deny /sys/kernel/security?*{,/**} wklx, + deny /sys/kernel?*{,/**} wklx, +` + profile += fmt.Sprintf(" change_profile -> \":%s://*\",\n", AANamespace(c)) + } + } else { + profile += "\n ### Feature: apparmor stacking (not present)\n" + profile += " deny /sys/k*{,/**} rwklx,\n" } - nesting := "" if c.IsNesting() { - nesting = NESTING_AA_PROFILE + // Apply nesting bits + profile += "\n ### Configuration: nesting\n" + profile += strings.TrimLeft(AA_PROFILE_NESTING, "\n") + if !aaStacking || c.IsPrivileged() { + profile += fmt.Sprintf(" change_profile -> \"%s\",\n", AAProfileFull(c)) + } } - return fmt.Sprintf(DEFAULT_AA_PROFILE, AAProfileFull(c), AAProfileCgns(), rawApparmor, nesting, AAProfileFull(c)) + if !c.IsPrivileged() { + // Apply unprivileged bits + profile += "\n ### Configuration: unprivileged containers\n" + profile += strings.TrimLeft(AA_PROFILE_UNPRIVILEGED, "\n") + } + + // Append raw.apparmor + rawApparmor, ok := c.ExpandedConfig()["raw.apparmor"] + if ok { + profile += "\n ### Configuration: raw.apparmor\n" + for _, line := range strings.Split(strings.Trim(rawApparmor, "\n"), "\n") { + profile += fmt.Sprintf(" %s\n", line) + } + } + + return fmt.Sprintf(`#include +profile "%s" flags=(attach_disconnected,mediate_deleted) { +%s +} +`, AAProfileFull(c), strings.Trim(profile, "\n")) } func runApparmor(command string, c container) error { @@ -115,13 +403,26 @@ output, err := cmd.CombinedOutput() if err != nil { - shared.Log.Error("Running apparmor", + shared.LogError("Running apparmor", log.Ctx{"action": command, "output": string(output), "err": err}) } return err } +func mkApparmorNamespace(namespace string) error { + if !aaStacking { + return nil + } + + p := path.Join("/sys/kernel/security/apparmor/policy/namespaces", namespace) + if err := os.Mkdir(p, 0755); !os.IsExist(err) { + return err + } + + return nil +} + // Ensure that the container's policy is loaded into the kernel so the // container can boot. func AALoadProfile(c container) error { @@ -129,6 +430,10 @@ return nil } + if err := mkApparmorNamespace(AANamespace(c)); err != nil { + return err + } + /* In order to avoid forcing a profile parse (potentially slow) on * every container start, let's use apparmor's binary policy cache, * which checks mtime of the files to figure out if the policy needs to @@ -165,19 +470,27 @@ return runApparmor(APPARMOR_CMD_LOAD, c) } -// Ensure that the container's policy is unloaded to free kernel memory. This -// does not delete the policy from disk or cache. -func AAUnloadProfile(c container) error { +// Ensure that the container's policy namespace is unloaded to free kernel +// memory. This does not delete the policy from disk or cache. +func AADestroy(c container) error { if !aaAdmin { return nil } + if aaStacking { + p := path.Join("/sys/kernel/security/apparmor/policy/namespaces", AANamespace(c)) + if err := os.Remove(p); err != nil { + shared.LogError("error removing apparmor namespace", log.Ctx{"err": err, "ns": p}) + } + } + return runApparmor(APPARMOR_CMD_UNLOAD, c) } // Parse the profile without loading it into the kernel. func AAParseProfile(c container) error { if !aaAvailable { + return nil } return runApparmor(APPARMOR_CMD_PARSE, c) @@ -204,3 +517,36 @@ } return "" } + +func aaParserSupports(feature string) bool { + out, err := exec.Command("apparmor_parser", "--version").CombinedOutput() + if err != nil { + return false + } + + major := 0 + minor := 0 + micro := 0 + + _, err = fmt.Sscanf(strings.Split(string(out), "\n")[0], "AppArmor parser version %d.%d.%d", &major, &minor, µ) + if err != nil { + return false + } + + switch feature { + case "unix": + if major < 2 { + return false + } + + if major == 2 && minor < 10 { + return false + } + + if major == 2 && minor == 10 && micro < 95 { + return false + } + } + + return true +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/certificates.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/certificates.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/certificates.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/certificates.go 2016-10-13 14:31:53.000000000 +0000 @@ -63,15 +63,20 @@ dbCerts, err := dbCertsGet(d.db) if err != nil { - shared.Logf("Error reading certificates from database: %s", err) + shared.LogInfof("Error reading certificates from database: %s", err) return } for _, dbCert := range dbCerts { certBlock, _ := pem.Decode([]byte(dbCert.Certificate)) + if certBlock == nil { + shared.LogInfof("Error decoding certificate for %s: %s", dbCert.Name, err) + continue + } + cert, err := x509.ParseCertificate(certBlock.Bytes) if err != nil { - shared.Logf("Error reading certificate for %s: %s", dbCert.Name, err) + shared.LogInfof("Error reading certificate for %s: %s", dbCert.Name, err) continue } d.clientCerts = append(d.clientCerts, *cert) @@ -91,20 +96,25 @@ } func certificatesPost(d *Daemon, r *http.Request) Response { + // Parse the request req := certificatesPostBody{} - if err := shared.ReadToJSON(r.Body, &req); err != nil { return BadRequest(err) } + // Access check + if !d.isTrustedClient(r) && d.PasswordCheck(req.Password) != nil { + return Forbidden + } + if req.Type != "client" { return BadRequest(fmt.Errorf("Unknown request type %s", req.Type)) } + // Extract the certificate var cert *x509.Certificate var name string if req.Certificate != "" { - data, err := base64.StdEncoding.DecodeString(req.Certificate) if err != nil { return BadRequest(err) @@ -115,9 +125,7 @@ return BadRequest(err) } name = req.Name - } else if r.TLS != nil { - if len(r.TLS.PeerCertificates) < 1 { return BadRequest(fmt.Errorf("No client certificate provided")) } @@ -136,14 +144,10 @@ fingerprint := certGenerateFingerprint(cert) for _, existingCert := range d.clientCerts { if fingerprint == certGenerateFingerprint(&existingCert) { - return EmptySyncResponse + return BadRequest(fmt.Errorf("Certificate already in trust store")) } } - if !d.isTrustedClient(r) && !d.PasswordCheck(req.Password) { - return Forbidden - } - err := saveCert(d, name, cert) if err != nil { return SmartError(err) @@ -151,18 +155,10 @@ d.clientCerts = append(d.clientCerts, *cert) - return EmptySyncResponse + return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/certificates/%s", shared.APIVersion, fingerprint)) } -var certificatesCmd = Command{ - "certificates", - false, - true, - certificatesGet, - nil, - certificatesPost, - nil, -} +var certificatesCmd = Command{name: "certificates", untrustedPost: true, get: certificatesGet, post: certificatesPost} func certificateFingerprintGet(d *Daemon, r *http.Request) Response { fingerprint := mux.Vars(r)["fingerprint"] @@ -211,12 +207,4 @@ return EmptySyncResponse } -var certificateFingerprintCmd = Command{ - "certificates/{fingerprint}", - false, - false, - certificateFingerprintGet, - nil, - nil, - certificateFingerprintDelete, -} +var certificateFingerprintCmd = Command{name: "certificates/{fingerprint}", get: certificateFingerprintGet, delete: certificateFingerprintDelete} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/cgroup.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/cgroup.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/cgroup.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/cgroup.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,61 @@ +package main + +import ( + "bufio" + "io/ioutil" + "os" + "path" + "strings" +) + +func getInitCgroupPath(controller string) string { + f, err := os.Open("/proc/1/cgroup") + if err != nil { + return "/" + } + defer f.Close() + + scan := bufio.NewScanner(f) + for scan.Scan() { + line := scan.Text() + + fields := strings.Split(line, ":") + if len(fields) != 3 { + return "/" + } + + if fields[2] != controller { + continue + } + + initPath := string(fields[3]) + + // ignore trailing /init.scope if it is there + dir, file := path.Split(initPath) + if file == "init.scope" { + return dir + } else { + return initPath + } + } + + return "/" +} + +func cGroupGet(controller, cgroup, file string) (string, error) { + initPath := getInitCgroupPath(controller) + path := path.Join("/sys/fs/cgroup", controller, initPath, cgroup, file) + + contents, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + return strings.Trim(string(contents), "\n"), nil +} + +func cGroupSet(controller, cgroup, file string, value string) error { + initPath := getInitCgroupPath(controller) + path := path.Join("/sys/fs/cgroup", controller, initPath, cgroup, file) + + return ioutil.WriteFile(path, []byte(value), 0755) +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_exec.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_exec.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_exec.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_exec.go 2016-10-13 14:31:53.000000000 +0000 @@ -6,42 +6,36 @@ "io/ioutil" "net/http" "os" + "path/filepath" "strconv" "strings" "sync" "github.com/gorilla/mux" "github.com/gorilla/websocket" - "gopkg.in/lxc/go-lxc.v2" "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" ) type commandPostContent struct { - Command []string `json:"command"` - WaitForWS bool `json:"wait-for-websocket"` - Interactive bool `json:"interactive"` - Environment map[string]string `json:"environment"` - Width int `json:"width"` - Height int `json:"height"` -} - -func runCommand(container *lxc.Container, command []string, options lxc.AttachOptions) (int, error) { - status, err := container.RunCommandStatus(command, options) - if err != nil { - shared.Debugf("Failed running command: %q", err.Error()) - return 0, err - } - - return status, nil + Command []string `json:"command"` + WaitForWS bool `json:"wait-for-websocket"` + RecordOutput bool `json:"record-output"` + Interactive bool `json:"interactive"` + Environment map[string]string `json:"environment"` + Width int `json:"width"` + Height int `json:"height"` } type execWs struct { - command []string - container *lxc.Container + command []string + container container + env map[string]string + rootUid int rootGid int - options lxc.AttachOptions conns map[int]*websocket.Conn connsLock sync.Mutex allConnected chan bool @@ -109,13 +103,18 @@ var ttys []*os.File var ptys []*os.File + var stdin *os.File + var stdout *os.File + var stderr *os.File + if s.interactive { ttys = make([]*os.File, 1) ptys = make([]*os.File, 1) ptys[0], ttys[0], err = shared.OpenPty(s.rootUid, s.rootGid) - s.options.StdinFd = ttys[0].Fd() - s.options.StdoutFd = ttys[0].Fd() - s.options.StderrFd = ttys[0].Fd() + + stdin = ttys[0] + stdout = ttys[0] + stderr = ttys[0] if s.width > 0 && s.height > 0 { shared.SetSize(int(ptys[0].Fd()), s.width, s.height) @@ -129,9 +128,10 @@ return err } } - s.options.StdinFd = ptys[0].Fd() - s.options.StdoutFd = ttys[1].Fd() - s.options.StderrFd = ttys[2].Fd() + + stdin = ptys[0] + stdout = ttys[1] + stderr = ttys[2] } controlExit := make(chan bool) @@ -155,39 +155,39 @@ } if err != nil { - shared.Debugf("Got error getting next reader %s", err) + shared.LogDebugf("Got error getting next reader %s", err) break } buf, err := ioutil.ReadAll(r) if err != nil { - shared.Debugf("Failed to read message %s", err) + shared.LogDebugf("Failed to read message %s", err) break } command := shared.ContainerExecControl{} if err := json.Unmarshal(buf, &command); err != nil { - shared.Debugf("Failed to unmarshal control socket command: %s", err) + shared.LogDebugf("Failed to unmarshal control socket command: %s", err) continue } if command.Command == "window-resize" { winchWidth, err := strconv.Atoi(command.Args["width"]) if err != nil { - shared.Debugf("Unable to extract window width: %s", err) + shared.LogDebugf("Unable to extract window width: %s", err) continue } winchHeight, err := strconv.Atoi(command.Args["height"]) if err != nil { - shared.Debugf("Unable to extract window height: %s", err) + shared.LogDebugf("Unable to extract window height: %s", err) continue } err = shared.SetSize(int(ptys[0].Fd()), winchWidth, winchHeight) if err != nil { - shared.Debugf("Failed to set window size to: %dx%d", winchWidth, winchHeight) + shared.LogDebugf("Failed to set window size to: %dx%d", winchWidth, winchHeight) continue } } @@ -208,7 +208,7 @@ <-shared.WebsocketRecvStream(ttys[i], s.conns[i]) ttys[i].Close() } else { - <-shared.WebsocketSendStream(s.conns[i], ptys[i]) + <-shared.WebsocketSendStream(s.conns[i], ptys[i], -1) ptys[i].Close() wgEOF.Done() } @@ -216,11 +216,7 @@ } } - cmdResult, cmdErr := runCommand( - s.container, - s.command, - s.options, - ) + cmdResult, cmdErr := s.container.Exec(s.command, s.env, stdin, stdout, stderr) for _, tty := range ttys { tty.Close() @@ -274,22 +270,25 @@ return BadRequest(err) } - opts := lxc.DefaultAttachOptions - opts.ClearEnv = true - opts.Env = []string{} + env := map[string]string{} for k, v := range c.ExpandedConfig() { if strings.HasPrefix(k, "environment.") { - opts.Env = append(opts.Env, fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v)) + env[strings.TrimPrefix(k, "environment.")] = v } } if post.Environment != nil { for k, v := range post.Environment { - if k == "HOME" { - opts.Cwd = v - } - opts.Env = append(opts.Env, fmt.Sprintf("%s=%s", k, v)) + env[k] = v + } + } + + _, ok := env["PATH"] + if !ok { + env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + if shared.PathExists(fmt.Sprintf("%s/snap/bin", c.RootfsPath())) { + env["PATH"] = fmt.Sprintf("%s:/snap/bin", env["PATH"]) } } @@ -310,7 +309,6 @@ ws.allConnected = make(chan bool, 1) ws.controlConnected = make(chan bool, 1) ws.interactive = post.Interactive - ws.options = opts for i := -1; i < len(ws.conns)-1; i++ { ws.fds[i], err = shared.RandomCryptoString() if err != nil { @@ -319,7 +317,9 @@ } ws.command = post.Command - ws.container = c.LXContainerGet() + ws.container = c + ws.env = env + ws.width = post.Width ws.height = post.Height @@ -335,18 +335,43 @@ } run := func(op *operation) error { - nullDev, err := os.OpenFile(os.DevNull, os.O_RDWR, 0666) - if err != nil { - return err + var cmdErr error + var cmdResult int + metadata := shared.Jmap{} + + if post.RecordOutput { + // Prepare stdout and stderr recording + stdout, err := os.OpenFile(filepath.Join(c.LogPath(), fmt.Sprintf("exec_%s.stdout", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + defer stdout.Close() + + stderr, err := os.OpenFile(filepath.Join(c.LogPath(), fmt.Sprintf("exec_%s.stderr", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + defer stderr.Close() + + // Run the command + cmdResult, cmdErr = c.Exec(post.Command, env, nil, stdout, stderr) + + // Update metadata with the right URLs + metadata["return"] = cmdResult + metadata["output"] = shared.Jmap{ + "1": fmt.Sprintf("/%s/containers/%s/logs/%s", shared.APIVersion, c.Name(), filepath.Base(stdout.Name())), + "2": fmt.Sprintf("/%s/containers/%s/logs/%s", shared.APIVersion, c.Name(), filepath.Base(stderr.Name())), + } + } else { + cmdResult, cmdErr = c.Exec(post.Command, env, nil, nil, nil) + metadata["return"] = cmdResult } - defer nullDev.Close() - nullfd := nullDev.Fd() - opts.StdinFd = nullfd - opts.StdoutFd = nullfd - opts.StderrFd = nullfd + err = op.UpdateMetadata(metadata) + if err != nil { + shared.LogError("error updating metadata for cmd", log.Ctx{"err": err, "cmd": post.Command}) + } - _, cmdErr := runCommand(c.LXContainerGet(), post.Command, opts) return cmdErr } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_file.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_file.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_file.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_file.go 2016-10-13 14:31:53.000000000 +0000 @@ -7,8 +7,6 @@ "net/http" "os" "path/filepath" - "strconv" - "syscall" "github.com/gorilla/mux" @@ -51,63 +49,68 @@ } defer temp.Close() - // Pul the file from the container - err = c.FilePull(path, temp.Name()) - if err != nil { - return InternalError(err) - } - - // Get file attributes - fi, err := temp.Stat() + // Pull the file from the container + uid, gid, mode, type_, dirEnts, err := c.FilePull(path, temp.Name()) if err != nil { return SmartError(err) } - /* - * Unfortunately, there's no portable way to do this: - * https://groups.google.com/forum/#!topic/golang-nuts/tGYjYyrwsGM - * https://groups.google.com/forum/#!topic/golang-nuts/ywS7xQYJkHY - */ - sb := fi.Sys().(*syscall.Stat_t) headers := map[string]string{ - "X-LXD-uid": strconv.FormatUint(uint64(sb.Uid), 10), - "X-LXD-gid": strconv.FormatUint(uint64(sb.Gid), 10), - "X-LXD-mode": fmt.Sprintf("%04o", fi.Mode()&os.ModePerm), + "X-LXD-uid": fmt.Sprintf("%d", uid), + "X-LXD-gid": fmt.Sprintf("%d", gid), + "X-LXD-mode": fmt.Sprintf("%04o", mode), + "X-LXD-type": type_, + } + + if type_ == "file" { + // Make a file response struct + files := make([]fileResponseEntry, 1) + files[0].identifier = filepath.Base(path) + files[0].path = temp.Name() + files[0].filename = filepath.Base(path) + + return FileResponse(r, files, headers, true) + } else if type_ == "directory" { + return SyncResponseHeaders(true, dirEnts, headers) + } else { + return InternalError(fmt.Errorf("bad file type %s", type_)) } - - // Make a file response struct - files := make([]fileResponseEntry, 1) - files[0].identifier = filepath.Base(path) - files[0].path = temp.Name() - files[0].filename = filepath.Base(path) - - return FileResponse(r, files, headers, true) } func containerFilePut(c container, path string, r *http.Request) Response { // Extract file ownership and mode from headers - uid, gid, mode := shared.ParseLXDFileHeaders(r.Header) + uid, gid, mode, type_ := shared.ParseLXDFileHeaders(r.Header) - // Write file content to a tempfile - temp, err := ioutil.TempFile("", "lxd_forkputfile_") - if err != nil { - return InternalError(err) + if type_ == "file" { + // Write file content to a tempfile + temp, err := ioutil.TempFile("", "lxd_forkputfile_") + if err != nil { + return InternalError(err) + } + defer func() { + temp.Close() + os.Remove(temp.Name()) + }() + + _, err = io.Copy(temp, r.Body) + if err != nil { + return InternalError(err) + } + + // Transfer the file into the container + err = c.FilePush(temp.Name(), path, uid, gid, mode) + if err != nil { + return InternalError(err) + } + + return EmptySyncResponse + } else if type_ == "directory" { + err := c.FilePush("", path, uid, gid, mode) + if err != nil { + return InternalError(err) + } + return EmptySyncResponse + } else { + return InternalError(fmt.Errorf("bad file type %s", type_)) } - defer func() { - temp.Close() - os.Remove(temp.Name()) - }() - - _, err = io.Copy(temp, r.Body) - if err != nil { - return InternalError(err) - } - - // Transfer the file into the container - err = c.FilePush(temp.Name(), path, uid, gid, mode) - if err != nil { - return InternalError(err) - } - - return EmptySyncResponse } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_get.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_get.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_get.go 2016-10-13 14:31:53.000000000 +0000 @@ -13,10 +13,10 @@ return SmartError(err) } - state, err := c.Render() + state, etag, err := c.Render() if err != nil { return InternalError(err) } - return SyncResponse(true, state) + return SyncResponseETag(true, state, etag) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container.go 2016-10-13 14:31:53.000000000 +0000 @@ -10,8 +10,6 @@ "gopkg.in/lxc/go-lxc.v2" "github.com/lxc/lxd/shared" - - log "gopkg.in/inconshreveable/log15.v2" ) // Helper functions @@ -37,71 +35,28 @@ return nil } -func containerValidConfigKey(k string) bool { - switch k { - case "boot.autostart": - return true - case "boot.autostart.delay": - return true - case "boot.autostart.priority": - return true - case "limits.cpu": - return true - case "limits.cpu.allowance": - return true - case "limits.cpu.priority": - return true - case "limits.disk.priority": - return true - case "limits.memory": - return true - case "limits.memory.enforce": - return true - case "limits.memory.swap": - return true - case "limits.memory.swap.priority": - return true - case "limits.network.priority": - return true - case "limits.processes": - return true - case "linux.kernel_modules": - return true - case "security.privileged": - return true - case "security.nesting": - return true - case "raw.apparmor": - return true - case "raw.lxc": - return true - case "volatile.base_image": - return true - case "volatile.last_state.idmap": - return true - case "volatile.last_state.power": - return true +func containerValidConfigKey(d *Daemon, key string, value string) error { + f, err := shared.ConfigKeyChecker(key) + if err != nil { + return err } - - if strings.HasPrefix(k, "volatile.") { - if strings.HasSuffix(k, ".hwaddr") { - return true - } - - if strings.HasSuffix(k, ".name") { - return true - } + if err = f(value); err != nil { + return err } - - if strings.HasPrefix(k, "environment.") { - return true + if key == "raw.lxc" { + return lxcValidConfig(value) } - - if strings.HasPrefix(k, "user.") { - return true + if key == "security.syscalls.blacklist_compat" { + for _, arch := range d.architectures { + if arch == shared.ARCH_64BIT_INTEL_X86 || + arch == shared.ARCH_64BIT_ARMV8_LITTLE_ENDIAN || + arch == shared.ARCH_64BIT_POWERPC_BIG_ENDIAN { + return nil + } + } + return fmt.Errorf("security.syscalls.blacklist_compat isn't supported on this architecture") } - - return false + return nil } func containerValidDeviceConfigKey(t, k string) bool { @@ -110,24 +65,7 @@ } switch t { - case "unix-char": - switch k { - case "gid": - return true - case "major": - return true - case "minor": - return true - case "mode": - return true - case "path": - return true - case "uid": - return true - default: - return false - } - case "unix-block": + case "unix-char", "unix-block": switch k { case "gid": return true @@ -164,6 +102,12 @@ return true case "parent": return true + case "ipv4.address": + return true + case "ipv6.address": + return true + case "security.mac_filtering": + return true default: return false } @@ -190,6 +134,23 @@ default: return false } + case "usb": + switch k { + case "vendorid": + return true + case "productid": + return true + case "mode": + return true + case "gid": + return true + case "uid": + return true + case "required": + return true + default: + return false + } case "none": return false default: @@ -197,26 +158,34 @@ } } -func containerValidConfig(config map[string]string, profile bool, expanded bool) error { +func containerValidConfig(d *Daemon, config map[string]string, profile bool, expanded bool) error { if config == nil { return nil } - for k, _ := range config { + for k, v := range config { if profile && strings.HasPrefix(k, "volatile.") { return fmt.Errorf("Volatile keys can only be set on containers.") } - if k == "raw.lxc" { - err := lxcValidConfig(config["raw.lxc"]) - if err != nil { - return err - } + err := containerValidConfigKey(d, k, v) + if err != nil { + return err } + } - if !containerValidConfigKey(k) { - return fmt.Errorf("Bad key: %s", k) - } + _, rawSeccomp := config["raw.seccomp"] + _, whitelist := config["security.syscalls.whitelist"] + _, blacklist := config["securtiy.syscalls.blacklist"] + blacklistDefault := shared.IsTrue(config["security.syscalls.blacklist_default"]) + blacklistCompat := shared.IsTrue(config["security.syscalls.blacklist_compat"]) + + if rawSeccomp && (whitelist || blacklist || blacklistDefault || blacklistCompat) { + return fmt.Errorf("raw.seccomp is mutually exclusive with security.syscalls*") + } + + if whitelist && (blacklist || blacklistDefault || blacklistCompat) { + return fmt.Errorf("security.syscalls.whitelist is mutually exclusive with security.syscalls.blacklist*") } return nil @@ -229,7 +198,15 @@ } // Check each device individually - for _, m := range devices { + for name, m := range devices { + if m["type"] == "" { + return fmt.Errorf("Missing device type for device '%s'", name) + } + + if !shared.StringInSlice(m["type"], []string{"none", "nic", "disk", "unix-char", "unix-block", "usb"}) { + return fmt.Errorf("Invalid device type for device '%s'", name) + } + for k, _ := range m { if !containerValidDeviceConfigKey(m["type"], k) { return fmt.Errorf("Invalid device configuration key for %s: %s", m["type"], k) @@ -272,6 +249,10 @@ if m["path"] == "" { return fmt.Errorf("Unix device entry is missing the required \"path\" property.") } + } else if m["type"] == "usb" { + if m["vendorid"] == "" { + return fmt.Errorf("Missing vendorid for USB device.") + } } else if m["type"] == "none" { continue } else { @@ -305,6 +286,7 @@ BaseImage string Config map[string]string CreationDate time.Time + LastUsedDate time.Time Ctype containerType Devices shared.Devices Ephemeral bool @@ -324,8 +306,10 @@ // Snapshots & migration Restore(sourceContainer container) error - Checkpoint(opts lxc.CheckpointOptions) error - StartFromMigration(imagesDir string) error + /* actionScript here is a script called action.sh in the stateDir, to + * be passed to CRIU as --action-script + */ + Migrate(cmd uint, stateDir string, function string, stop bool, actionScript bool) error Snapshots() ([]container, error) // Config handling @@ -341,11 +325,14 @@ ConfigKeySet(key string, value string) error // File handling - FilePull(srcpath string, dstpath string) error - FilePush(srcpath string, dstpath string, uid int, gid int, mode os.FileMode) error + FilePull(srcpath string, dstpath string) (int, int, os.FileMode, string, []string, error) + FilePush(srcpath string, dstpath string, uid int, gid int, mode int) error + + // Command execution + Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File) (int, error) // Status - Render() (interface{}, error) + Render() (interface{}, interface{}, error) RenderState() (*shared.ContainerState, error) IsPrivileged() bool IsRunning() bool @@ -364,6 +351,7 @@ Name() string Architecture() int CreationDate() time.Time + LastUsedDate() time.Time ExpandedConfig() map[string]string ExpandedDevices() shared.Devices LocalConfig() map[string]string @@ -381,7 +369,6 @@ LogPath() string // FIXME: Those should be internal functions - LXContainerGet() *lxc.Container StorageStart() error StorageStop() error Storage() storage @@ -488,6 +475,10 @@ return nil, fmt.Errorf("Container not running, cannot do stateful snapshot") } + if err := findCriu("snapshot"); err != nil { + return nil, err + } + stateDir := sourceContainer.StatePath() err := os.MkdirAll(stateDir, 0700) if err != nil { @@ -504,13 +495,7 @@ * after snapshotting will fail. */ - opts := lxc.CheckpointOptions{Directory: stateDir, Stop: false, Verbose: true} - err = sourceContainer.Checkpoint(opts) - err2 := CollectCRIULogFile(sourceContainer, stateDir, "snapshot", "dump") - if err2 != nil { - shared.Log.Warn("failed to collect criu log file", log.Ctx{"error": err2}) - } - + err = sourceContainer.Migrate(lxc.MIGRATE_DUMP, stateDir, "snapshot", false, false) if err != nil { os.RemoveAll(sourceContainer.StatePath()) return nil, err @@ -568,7 +553,7 @@ } // Validate container config - err := containerValidConfig(args.Config, false, false) + err := containerValidConfig(d, args.Config, false, false) if err != nil { return nil, err } @@ -621,6 +606,7 @@ return nil, err } args.CreationDate = dbArgs.CreationDate + args.LastUsedDate = dbArgs.LastUsedDate return containerLXCCreate(d, args) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_logs.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_logs.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_logs.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_logs.go 2016-10-13 14:31:53.000000000 +0000 @@ -34,6 +34,10 @@ } for _, f := range dents { + if !validLogFileName(f.Name()) { + continue + } + result = append(result, fmt.Sprintf("/%s/containers/%s/logs/%s", shared.APIVersion, name, f.Name())) } @@ -52,7 +56,8 @@ return fname == "lxc.log" || fname == "lxc.conf" || strings.HasPrefix(fname, "migration_") || - strings.HasPrefix(fname, "snapshot_") + strings.HasPrefix(fname, "snapshot_") || + strings.HasPrefix(fname, "exec_") } func containerLogGet(d *Daemon, r *http.Request) Response { diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_lxc.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_lxc.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_lxc.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_lxc.go 2016-10-13 14:31:53.000000000 +0000 @@ -2,6 +2,7 @@ import ( "archive/tar" + "bufio" "encoding/json" "fmt" "io" @@ -27,9 +28,55 @@ log "gopkg.in/inconshreveable/log15.v2" ) -// Global variables -var lxcStoppingContainersLock sync.Mutex -var lxcStoppingContainers map[int]*sync.WaitGroup = make(map[int]*sync.WaitGroup) +// Operation locking +type lxcContainerOperation struct { + action string + chanDone chan error + err error + id int + timeout int +} + +func (op *lxcContainerOperation) Create(id int, action string, timeout int) *lxcContainerOperation { + op.id = id + op.action = action + op.timeout = timeout + op.chanDone = make(chan error, 0) + + if timeout > 1 { + go func(op *lxcContainerOperation) { + time.Sleep(time.Second * time.Duration(op.timeout)) + op.Done(fmt.Errorf("Container %s operation timed out after %d seconds", op.action, op.timeout)) + }(op) + } + + return op +} + +func (op *lxcContainerOperation) Wait() error { + <-op.chanDone + + return op.err +} + +func (op *lxcContainerOperation) Done(err error) { + lxcContainerOperationsLock.Lock() + defer lxcContainerOperationsLock.Unlock() + + // Check if already done + runningOp, ok := lxcContainerOperations[op.id] + if !ok || runningOp != op { + return + } + + op.err = err + close(op.chanDone) + + delete(lxcContainerOperations, op.id) +} + +var lxcContainerOperationsLock sync.Mutex +var lxcContainerOperations map[int]*lxcContainerOperation = make(map[int]*lxcContainerOperation) // Helper functions func lxcSetConfigItem(c *lxc.Container, key string, value string) error { @@ -99,6 +146,7 @@ cType: args.Ctype, stateful: args.Stateful, creationDate: args.CreationDate, + lastUsedDate: args.LastUsedDate, profiles: args.Profiles, localConfig: args.Config, localDevices: args.Devices, @@ -116,7 +164,8 @@ // Look for a rootfs entry rootfs := false - for _, m := range c.expandedDevices { + for _, name := range c.expandedDevices.DeviceNames() { + m := c.expandedDevices[name] if m["type"] == "disk" && m["path"] == "/" { rootfs = true break @@ -151,7 +200,7 @@ } // Validate expanded config - err = containerValidConfig(c.expandedConfig, false, true) + err = containerValidConfig(d, c.expandedConfig, false, true) if err != nil { c.Delete() return nil, err @@ -183,6 +232,9 @@ return nil, err } + // Update lease files + networkUpdateStatic(d) + return c, nil } @@ -196,6 +248,7 @@ architecture: args.Architecture, cType: args.Ctype, creationDate: args.CreationDate, + lastUsedDate: args.LastUsedDate, profiles: args.Profiles, localConfig: args.Config, localDevices: args.Devices, @@ -223,6 +276,7 @@ architecture int cType containerType creationDate time.Time + lastUsedDate time.Time ephemeral bool id int name string @@ -243,6 +297,51 @@ storage storage } +func (c *containerLXC) createOperation(action string, timeout int) (*lxcContainerOperation, error) { + op, _ := c.getOperation("") + if op != nil { + return nil, fmt.Errorf("Container is already running a %s operation", op.action) + } + + lxcContainerOperationsLock.Lock() + defer lxcContainerOperationsLock.Unlock() + + op = &lxcContainerOperation{} + op.Create(c.id, action, timeout) + lxcContainerOperations[c.id] = op + + return lxcContainerOperations[c.id], nil +} + +func (c *containerLXC) getOperation(action string) (*lxcContainerOperation, error) { + lxcContainerOperationsLock.Lock() + defer lxcContainerOperationsLock.Unlock() + + op := lxcContainerOperations[c.id] + + if op == nil { + return nil, fmt.Errorf("No running %s container operation", action) + } + + if action != "" && op.action != action { + return nil, fmt.Errorf("Container is running a %s operation, not a %s operation", op.action, action) + } + + return op, nil +} + +func (c *containerLXC) waitOperation() error { + op, _ := c.getOperation("") + if op != nil { + err := op.Wait() + if err != nil { + return err + } + } + + return nil +} + func (c *containerLXC) init() error { // Compute the expanded config and device list err := c.expandConfig() @@ -267,6 +366,11 @@ } func (c *containerLXC) initLXC() error { + // No need to go through all that for snapshots + if c.IsSnapshot() { + return nil + } + // Check if being called from a hook if c.fromHook { return fmt.Errorf("You can't use go-lxc from inside a LXC hook.") @@ -284,7 +388,12 @@ } // Base config - err = lxcSetConfigItem(cc, "lxc.cap.drop", "mac_admin mac_override sys_time sys_module sys_rawio") + toDrop := "sys_time sys_module sys_rawio" + if !aaStacking || c.IsPrivileged() { + toDrop = toDrop + " mac_admin mac_override" + } + + err = lxcSetConfigItem(cc, "lxc.cap.drop", toDrop) if err != nil { return err } @@ -319,6 +428,8 @@ } bindMounts := []string{ + "/dev/fuse", + "/dev/net/tun", "/proc/sys/fs/binfmt_misc", "/sys/firmware/efi/efivars", "/sys/fs/fuse/connections", @@ -336,9 +447,20 @@ } for _, mnt := range bindMounts { - err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none rbind,create=dir,optional", mnt, strings.TrimPrefix(mnt, "/"))) - if err != nil { - return err + if !shared.PathExists(mnt) { + continue + } + + if shared.IsDir(mnt) { + err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none rbind,create=dir,optional", mnt, strings.TrimPrefix(mnt, "/"))) + if err != nil { + return err + } + } else { + err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none bind,create=file,optional", mnt, strings.TrimPrefix(mnt, "/"))) + if err != nil { + return err + } } } @@ -362,7 +484,23 @@ return err } - for _, dev := range []string{"c *:* m", "b *:* m", "c 5:0 rwm", "c 5:1 rwm", "c 1:5 rwm", "c 1:7 rwm", "c 1:3 rwm", "c 1:8 rwm", "c 1:9 rwm", "c 5:2 rwm", "c 136:* rwm"} { + devices := []string{ + "b *:* m", // Allow mknod of block devices + "c *:* m", // Allow mknod of char devices + "c 136:* rwm", // /dev/pts devices + "c 1:3 rwm", // /dev/null + "c 1:5 rwm", // /dev/zero + "c 1:7 rwm", // /dev/full + "c 1:8 rwm", // /dev/random + "c 1:9 rwm", // /dev/urandom + "c 5:0 rwm", // /dev/tty + "c 5:1 rwm", // /dev/console + "c 5:2 rwm", // /dev/ptmx + "c 10:229 rwm", // /dev/fuse + "c 10:200 rwm", // /dev/net/tun + } + + for _, dev := range devices { err = lxcSetConfigItem(cc, "lxc.cgroup.devices.allow", dev) if err != nil { return err @@ -414,12 +552,12 @@ } // Setup the hooks - err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("%s callhook %s %d start", c.daemon.execPath, shared.VarPath(""), c.id)) + err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("%s callhook %s %d start", execPath, shared.VarPath(""), c.id)) if err != nil { return err } - err = lxcSetConfigItem(cc, "lxc.hook.post-stop", fmt.Sprintf("%s callhook %s %d stop", c.daemon.execPath, shared.VarPath(""), c.id)) + err = lxcSetConfigItem(cc, "lxc.hook.post-stop", fmt.Sprintf("%s callhook %s %d stop", execPath, shared.VarPath(""), c.id)) if err != nil { return err } @@ -454,17 +592,32 @@ } } else { // If not currently confined, use the container's profile - err := lxcSetConfigItem(cc, "lxc.aa_profile", AAProfileFull(c)) + profile := AAProfileFull(c) + + /* In the nesting case, we want to enable the inside + * LXD to load its profile. Unprivileged containers can + * load profiles, but privileged containers cannot, so + * let's not use a namespace so they can fall back to + * the old way of nesting, i.e. using the parent's + * profile. + */ + if aaStacking && (!c.IsNesting() || !c.IsPrivileged()) { + profile = fmt.Sprintf("%s//&:%s:", profile, AANamespace(c)) + } + + err := lxcSetConfigItem(cc, "lxc.aa_profile", profile) if err != nil { return err } } } - // Setup Seccomp - err = lxcSetConfigItem(cc, "lxc.seccomp", SeccompProfilePath(c)) - if err != nil { - return err + // Setup Seccomp if necessary + if ContainerNeedsSeccomp(c) { + err = lxcSetConfigItem(cc, "lxc.seccomp", SeccompProfilePath(c)) + if err != nil { + return err + } } // Setup idmap @@ -523,7 +676,7 @@ return err } } else { - if memorySwap != "false" && cgSwapAccounting { + if cgSwapAccounting && (memorySwap == "" || shared.IsTrue(memorySwap)) { err = lxcSetConfigItem(cc, "lxc.cgroup.memory.limit_in_bytes", fmt.Sprintf("%d", valueInt)) if err != nil { return err @@ -542,7 +695,7 @@ } // Configure the swappiness - if memorySwap == "false" { + if memorySwap != "" && !shared.IsTrue(memorySwap) { err = lxcSetConfigItem(cc, "lxc.cgroup.memory.swappiness", "0") if err != nil { return err @@ -601,14 +754,21 @@ return err } - err = lxcSetConfigItem(cc, "lxc.cgroup.blkio.weight", fmt.Sprintf("%d", priorityInt*100)) + // Minimum valid value is 10 + priority := priorityInt * 100 + if priority == 0 { + priority = 10 + } + + err = lxcSetConfigItem(cc, "lxc.cgroup.blkio.weight", fmt.Sprintf("%d", priority)) if err != nil { return err } } hasDiskLimits := false - for _, m := range c.expandedDevices { + for _, name := range c.expandedDevices.DeviceNames() { + m := c.expandedDevices[name] if m["type"] != "disk" { continue } @@ -674,7 +834,8 @@ } // Setup devices - for k, m := range c.expandedDevices { + for _, k := range c.expandedDevices.DeviceNames() { + m := c.expandedDevices[k] if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { // Prepare all the paths srcPath := m["path"] @@ -730,8 +891,16 @@ } // Host Virtual NIC name + vethName := "" if m["host_name"] != "" { - err = lxcSetConfigItem(cc, "lxc.network.veth.pair", m["host_name"]) + vethName = m["host_name"] + } else if shared.IsTrue(m["security.mac_filtering"]) { + // We need a known device name for MAC filtering + vethName = deviceNextVeth() + } + + if vethName != "" { + err = lxcSetConfigItem(cc, "lxc.network.veth.pair", vethName) if err != nil { return err } @@ -768,13 +937,22 @@ devPath := filepath.Join(c.DevicesPath(), devName) // Various option checks - isOptional := m["optional"] == "1" || m["optional"] == "true" - isReadOnly := m["readonly"] == "1" || m["readonly"] == "true" - isRecursive := m["recursive"] == "1" || m["recursive"] == "true" + isOptional := shared.IsTrue(m["optional"]) + isReadOnly := shared.IsTrue(m["readonly"]) + isRecursive := shared.IsTrue(m["recursive"]) isFile := !shared.IsDir(srcPath) && !deviceIsBlockdev(srcPath) // Deal with a rootfs if tgtPath == "" { + // Set the rootfs backend type if supported (must happen before any other lxc.rootfs) + err := lxcSetConfigItem(cc, "lxc.rootfs.backend", "dir") + if err == nil { + value := cc.ConfigItem("lxc.rootfs.backend") + if len(value) == 0 || value[0] != "dir" { + lxcSetConfigItem(cc, "lxc.rootfs.backend", "") + } + } + // Set the rootfs path err = lxcSetConfigItem(cc, "lxc.rootfs", c.RootfsPath()) if err != nil { @@ -909,6 +1087,25 @@ return "", fmt.Errorf("The container is already running") } + // Sanity checks for devices + for _, name := range c.expandedDevices.DeviceNames() { + m := c.expandedDevices[name] + switch m["type"] { + case "disk": + if m["source"] != "" && !shared.PathExists(m["source"]) { + return "", fmt.Errorf("Missing source '%s' for disk '%s'", m["source"], name) + } + case "nic": + if m["parent"] != "" && !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", m["parent"])) { + return "", fmt.Errorf("Missing parent '%s' for nic '%s'", m["parent"], name) + } + case "unix-char", "unix-block": + if m["path"] != "" && m["major"] == "" && m["minor"] == "" && !shared.PathExists(m["path"]) { + return "", fmt.Errorf("Missing source '%s' for device '%s'", m["path"], name) + } + } + } + // Load any required kernel modules kernelModules := c.expandedConfig["linux.kernel_modules"] if kernelModules != "" { @@ -941,7 +1138,7 @@ } if !reflect.DeepEqual(idmap, lastIdmap) { - shared.Debugf("Container idmap changed, remapping") + shared.LogDebugf("Container idmap changed, remapping") err := c.StorageStart() if err != nil { @@ -963,6 +1160,34 @@ return "", err } } + + var mode os.FileMode + var uid int + var gid int + + if c.IsPrivileged() { + mode = 0700 + } else { + mode = 0755 + if idmap != nil { + uid, gid = idmap.ShiftIntoNs(0, 0) + } + } + + err = os.Chmod(c.Path(), mode) + if err != nil { + return "", err + } + + err = os.Chown(c.Path(), uid, gid) + if err != nil { + return "", err + } + + err = c.StorageStop() + if err != nil { + return "", err + } } err = c.ConfigKeySet("volatile.last_state.idmap", jsonIdmap) @@ -978,25 +1203,85 @@ // Cleanup any existing leftover devices c.removeUnixDevices() c.removeDiskDevices() + c.removeNetworkFilters() + + var usbs []usbDevice // Create the devices - for k, m := range c.expandedDevices { + for _, k := range c.expandedDevices.DeviceNames() { + m := c.expandedDevices[k] if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { // Unix device - devPath, err := c.createUnixDevice(k, m) + devPath, err := c.createUnixDevice(m) if err != nil { return "", err } - // Add the new device cgroup rule - dType, dMajor, dMinor, err := deviceGetAttributes(devPath) - if err != nil { - return "", err + if c.IsPrivileged() && !runningInUserns && cgDevicesController { + // Add the new device cgroup rule + dType, dMajor, dMinor, err := deviceGetAttributes(devPath) + if err != nil { + return "", err + } + + err = lxcSetConfigItem(c.c, "lxc.cgroup.devices.allow", fmt.Sprintf("%s %d:%d rwm", dType, dMajor, dMinor)) + if err != nil { + return "", fmt.Errorf("Failed to add cgroup rule for device") + } + } + } else if m["type"] == "usb" { + if usbs == nil { + usbs, err = deviceLoadUsb() + if err != nil { + return "", err + } } - err = lxcSetConfigItem(c.c, "lxc.cgroup.devices.allow", fmt.Sprintf("%s %d:%d rwm", dType, dMajor, dMinor)) - if err != nil { - return "", fmt.Errorf("Failed to add cgroup rule for device") + created := false + + for _, usb := range usbs { + if usb.vendor != m["vendorid"] || (m["productid"] != "" && usb.product != m["productid"]) { + continue + } + + if c.IsPrivileged() && !runningInUserns && cgDevicesController { + err = lxcSetConfigItem(c.c, "lxc.cgroup.devices.allow", fmt.Sprintf("c %d:%d rwm", usb.major, usb.minor)) + if err != nil { + return "", err + } + } + + temp := shared.Device{} + if err := shared.DeepCopy(&m, &temp); err != nil { + return "", err + } + + temp["major"] = fmt.Sprintf("%d", usb.major) + temp["minor"] = fmt.Sprintf("%d", usb.minor) + temp["path"] = usb.path + + /* it's ok to fail, the device might be hot plugged later */ + _, err := c.createUnixDevice(temp) + if err != nil { + shared.LogDebug("failed to create usb device", log.Ctx{"err": err, "device": k}) + continue + } + + created = true + + /* if the create was successful, let's bind mount it */ + srcPath := usb.path + tgtPath := strings.TrimPrefix(srcPath, "/") + devName := fmt.Sprintf("unix.%s", strings.Replace(tgtPath, "/", "-", -1)) + devPath := filepath.Join(c.DevicesPath(), devName) + err = lxcSetConfigItem(c.c, "lxc.mount.entry", fmt.Sprintf("%s %s none bind,create=file", devPath, tgtPath)) + if err != nil { + return "", err + } + } + + if !created && shared.IsTrue(m["required"]) { + return "", fmt.Errorf("couldn't create usb device %s", k) } } else if m["type"] == "disk" { // Disk device @@ -1006,6 +1291,44 @@ return "", err } } + } else if m["type"] == "nic" { + if m["nictype"] == "bridged" && shared.IsTrue(m["security.mac_filtering"]) { + m, err = c.fillNetworkDevice(k, m) + if err != nil { + return "", err + } + + // Read device name from config + vethName := "" + for i := 0; i < len(c.c.ConfigItem("lxc.network")); i++ { + val := c.c.ConfigItem(fmt.Sprintf("lxc.network.%d.hwaddr", i)) + if len(val) == 0 || val[0] != m["hwaddr"] { + continue + } + + val = c.c.ConfigItem(fmt.Sprintf("lxc.network.%d.link", i)) + if len(val) == 0 || val[0] != m["parent"] { + continue + } + + val = c.c.ConfigItem(fmt.Sprintf("lxc.network.%d.veth.pair", i)) + if len(val) == 0 { + continue + } + + vethName = val[0] + break + } + + if vethName == "" { + return "", fmt.Errorf("Failed to find device name for mac_filtering") + } + + err = c.createNetworkFilter(vethName, m["parent"], m["hwaddr"]) + if err != nil { + return "", err + } + } } } @@ -1027,7 +1350,8 @@ // Cleanup any leftover volatile entries netNames := []string{} - for k, v := range c.expandedDevices { + for _, k := range c.expandedDevices.DeviceNames() { + v := c.expandedDevices[k] if v["type"] == "nic" { netNames = append(netNames, k) } @@ -1069,36 +1393,46 @@ delete(c.expandedConfig, k) } - // Generate the LXC config - f, err := ioutil.TempFile("", "lxd_lxc_startconfig_") - if err != nil { - return "", err + // Rotate the log file + logfile := c.LogFilePath() + if shared.PathExists(logfile) { + os.Remove(logfile + ".old") + err := os.Rename(logfile, logfile+".old") + if err != nil { + return "", err + } } - configPath := f.Name() - if err = f.Chmod(0600); err != nil { - f.Close() + // Generate the LXC config + configPath := filepath.Join(c.LogPath(), "lxc.conf") + err = c.c.SaveConfigFile(configPath) + if err != nil { os.Remove(configPath) return "", err } - f.Close() - err = c.c.SaveConfigFile(configPath) + // Update time container was last started + err = dbContainerLastUsedUpdate(c.daemon.db, c.id, time.Now().UTC()) if err != nil { - os.Remove(configPath) - return "", err + fmt.Printf("Error updating last used: %v", err) } return configPath, nil } func (c *containerLXC) Start(stateful bool) error { - // Wait for container tear down to finish - lxcStoppingContainersLock.Lock() - wgStopping, stopping := lxcStoppingContainers[c.id] - lxcStoppingContainersLock.Unlock() - if stopping { - wgStopping.Wait() + var ctxMap log.Ctx + + // Setup a new operation + op, err := c.createOperation("start", 30) + if err != nil { + return err + } + defer op.Done(nil) + + err = setupSharedMounts() + if err != nil { + return fmt.Errorf("Daemon failed to setup shared mounts base: %s.\nDoes security.nesting need to be turned on?", err) } // Run the shared start code @@ -1107,39 +1441,38 @@ return err } + ctxMap = log.Ctx{"name": c.name, + "action": op.action, + "created": c.creationDate, + "ephemeral": c.ephemeral, + "used": c.lastUsedDate, + "stateful": stateful} + + shared.LogInfo("Starting container", ctxMap) + // If stateful, restore now if stateful { if !c.stateful { return fmt.Errorf("Container has no existing state to restore.") } - if !c.IsPrivileged() { - if err := c.IdmapSet().ShiftRootfs(c.StatePath()); err != nil { - return err - } + err := c.Migrate(lxc.MIGRATE_RESTORE, c.StatePath(), "snapshot", false, false) + if err != nil && !c.IsRunning() { + return err } - out, err := exec.Command( - c.daemon.execPath, - "forkmigrate", - c.name, - c.daemon.lxcpath, - configPath, - c.StatePath()).CombinedOutput() - if string(out) != "" { - for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { - shared.Debugf("forkmigrate: %s", line) - } - } - CollectCRIULogFile(c, c.StatePath(), "snapshot", "restore") + os.RemoveAll(c.StatePath()) + c.stateful = false + err = dbContainerSetStateful(c.daemon.db, c.id, false) if err != nil { + shared.LogError("Failed starting container", ctxMap) return err } - os.RemoveAll(c.StatePath()) - c.stateful = false - return dbContainerSetStateful(c.daemon.db, c.id, false) + shared.LogInfo("Started container", ctxMap) + + return err } else if c.stateful { /* stateless start required when we have state, let's delete it */ err := os.RemoveAll(c.StatePath()) @@ -1156,62 +1489,60 @@ // Start the LXC container out, err := exec.Command( - c.daemon.execPath, + execPath, "forkstart", c.name, c.daemon.lxcpath, configPath).CombinedOutput() + // Capture debug output if string(out) != "" { for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { - shared.Debugf("forkstart: %s", line) + shared.LogDebugf("forkstart: %s", line) } } - if err != nil { - return fmt.Errorf( - "Error calling 'lxd forkstart %s %s %s': err='%v'", - c.name, - c.daemon.lxcpath, - filepath.Join(c.LogPath(), "lxc.conf"), - err) - } - - return nil -} + if err != nil && !c.IsRunning() { + // Attempt to extract the LXC errors + lxcLog := "" + logPath := filepath.Join(c.LogPath(), "lxc.log") + if shared.PathExists(logPath) { + logContent, err := ioutil.ReadFile(logPath) + if err == nil { + for _, line := range strings.Split(string(logContent), "\n") { + fields := strings.Fields(line) + if len(fields) < 4 { + continue + } -func (c *containerLXC) StartFromMigration(imagesDir string) error { - // Run the shared start code - configPath, err := c.startCommon() - if err != nil { - return err - } + // We only care about errors + if fields[2] != "ERROR" { + continue + } - // Start the LXC container - out, err := exec.Command( - c.daemon.execPath, - "forkmigrate", - c.name, - c.daemon.lxcpath, - configPath, - imagesDir).CombinedOutput() + // Prepend the line break + if len(lxcLog) == 0 { + lxcLog += "\n" + } - if string(out) != "" { - for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { - shared.Debugf("forkmigrate: %s", line) + lxcLog += fmt.Sprintf(" %s\n", strings.Join(fields[0:], " ")) + } + } } - } - if err != nil { + shared.LogError("Failed starting container", ctxMap) + + // Return the actual error return fmt.Errorf( - "Error calling 'lxd forkmigrate %s %s %s %s': err='%v'", + "Error calling 'lxd forkstart %s %s %s': err='%v'%s", c.name, c.daemon.lxcpath, filepath.Join(c.LogPath(), "lxc.conf"), - imagesDir, - err) + err, lxcLog) } + shared.LogInfo("Started container", ctxMap) + return nil } @@ -1233,7 +1564,24 @@ } // Template anything that needs templating - err = c.TemplateApply("start") + key := "volatile.apply_template" + if c.localConfig[key] != "" { + // Run any template that needs running + err = c.templateApplyNow(c.localConfig[key]) + if err != nil { + c.StorageStop() + return err + } + + // Remove the volatile key from the DB + err := dbContainerConfigRemove(c.daemon.db, c.id, key) + if err != nil { + c.StorageStop() + return err + } + } + + err = c.templateApplyNow("start") if err != nil { c.StorageStop() return err @@ -1248,13 +1596,14 @@ c.fromHook = false err := c.setNetworkPriority() if err != nil { - shared.Log.Error("Failed to apply network priority", log.Ctx{"container": c.name, "err": err}) + shared.LogError("Failed to apply network priority", log.Ctx{"container": c.name, "err": err}) } }(c) } // Apply network limits - for name, m := range c.expandedDevices { + for _, name := range c.expandedDevices.DeviceNames() { + m := c.expandedDevices[name] if m["type"] != "nic" { continue } @@ -1267,43 +1616,38 @@ c.fromHook = false err = c.setNetworkLimits(name, m) if err != nil { - shared.Log.Error("Failed to apply network limits", log.Ctx{"container": c.name, "err": err}) + shared.LogError("Failed to apply network limits", log.Ctx{"container": c.name, "err": err}) } }(c, name, m) } + // Record current state + err = dbContainerSetState(c.daemon.db, c.id, "RUNNING") + if err != nil { + return err + } + return nil } -// Container shutdown locking -func (c *containerLXC) setupStopping() *sync.WaitGroup { - // Handle locking - lxcStoppingContainersLock.Lock() - defer lxcStoppingContainersLock.Unlock() - - // Existing entry - wg, stopping := lxcStoppingContainers[c.id] - if stopping { - return wg +// Stop functions +func (c *containerLXC) Stop(stateful bool) error { + var ctxMap log.Ctx + // Setup a new operation + op, err := c.createOperation("stop", 30) + if err != nil { + return err } - // Setup new entry - lxcStoppingContainers[c.id] = &sync.WaitGroup{} - - go func(wg *sync.WaitGroup, id int) { - wg.Wait() + ctxMap = log.Ctx{"name": c.name, + "action": op.action, + "created": c.creationDate, + "ephemeral": c.ephemeral, + "used": c.lastUsedDate, + "stateful": stateful} - lxcStoppingContainersLock.Lock() - defer lxcStoppingContainersLock.Unlock() + shared.LogInfo("Stopping container", ctxMap) - delete(lxcStoppingContainers, id) - }(lxcStoppingContainers[c.id], c.id) - - return lxcStoppingContainers[c.id] -} - -// Stop functions -func (c *containerLXC) Stop(stateful bool) error { // Handle stateful stop if stateful { // Cleanup any existing state @@ -1312,91 +1656,107 @@ err := os.MkdirAll(stateDir, 0700) if err != nil { + op.Done(err) + shared.LogError("Failed stopping container", ctxMap) return err } // Checkpoint - opts := lxc.CheckpointOptions{Directory: stateDir, Stop: true, Verbose: true} - err = c.Checkpoint(opts) - err2 := CollectCRIULogFile(c, stateDir, "snapshot", "dump") - if err2 != nil { - shared.Log.Warn("failed to collect criu log file", log.Ctx{"error": err2}) - } - + err = c.Migrate(lxc.MIGRATE_DUMP, stateDir, "snapshot", true, false) if err != nil { + op.Done(err) + shared.LogError("Failed stopping container", ctxMap) return err } c.stateful = true err = dbContainerSetStateful(c.daemon.db, c.id, true) if err != nil { + op.Done(err) + shared.LogError("Failed stopping container", ctxMap) return err } + op.Done(nil) + shared.LogInfo("Stopped container", ctxMap) return nil } // Load the go-lxc struct - err := c.initLXC() + err = c.initLXC() if err != nil { + op.Done(err) + shared.LogError("Failed stopping container", ctxMap) return err } // Attempt to freeze the container first, helps massively with fork bombs c.Freeze() - // Handle locking - wg := c.setupStopping() - - // Stop the container - wg.Add(1) if err := c.c.Stop(); err != nil { - wg.Done() + op.Done(err) + shared.LogError("Failed stopping container", ctxMap) return err } - // Mark ourselves as done - wg.Done() - - // Wait for any other teardown routines to finish - wg.Wait() + err = op.Wait() + if err != nil { + shared.LogError("Failed stopping container", ctxMap) + return err + } - return nil + shared.LogInfo("Stopped container", ctxMap) + return err } func (c *containerLXC) Shutdown(timeout time.Duration) error { - // Load the go-lxc struct - err := c.initLXC() + var ctxMap log.Ctx + + // Setup a new operation + op, err := c.createOperation("shutdown", 30) if err != nil { return err } - // Handle locking - wg := c.setupStopping() + ctxMap = log.Ctx{"name": c.name, + "action": op.action, + "created": c.creationDate, + "ephemeral": c.ephemeral, + "used": c.lastUsedDate, + "timeout": timeout} + + shared.LogInfo("Shutting down container", ctxMap) + + // Load the go-lxc struct + err = c.initLXC() + if err != nil { + op.Done(err) + shared.LogError("Failed shutting down container", ctxMap) + return err + } - // Shutdown the container - wg.Add(1) if err := c.c.Shutdown(timeout); err != nil { - wg.Done() + op.Done(err) + shared.LogError("Failed shutting down container", ctxMap) return err } - // Mark ourselves as done - wg.Done() + err = op.Wait() + if err != nil { + shared.LogError("Failed shutting down container", ctxMap) + return err + } - // Wait for any other teardown routines to finish - wg.Wait() + shared.LogInfo("Shut down container", ctxMap) - return nil + return err } func (c *containerLXC) OnStop(target string) error { - // Get locking - lxcStoppingContainersLock.Lock() - wg, stopping := lxcStoppingContainers[c.id] - lxcStoppingContainersLock.Unlock() - if wg != nil { - wg.Add(1) + // Get operation + op, _ := c.getOperation("") + if op != nil && !shared.StringInSlice(op.action, []string{"stop", "shutdown"}) { + return fmt.Errorf("Container is already running a %s operation", op.action) } // Make sure we can't call go-lxc functions by mistake @@ -1405,25 +1765,28 @@ // Stop the storage for this container err := c.StorageStop() if err != nil { + if op != nil { + op.Done(err) + } + return err } - // Unlock the apparmor profile - err = AAUnloadProfile(c) - if err != nil { - return err + // Unload the apparmor profile + if err := AADestroy(c); err != nil { + shared.LogError("failed to destroy apparmor namespace", log.Ctx{"container": c.Name(), "err": err}) } // FIXME: The go routine can go away once we can rely on LXC_TARGET - go func(c *containerLXC, target string, wg *sync.WaitGroup) { + go func(c *containerLXC, target string, op *lxcContainerOperation) { c.fromHook = false // Unlock on return - if wg != nil { - defer wg.Done() + if op != nil { + defer op.Done(nil) } - if target == "unknown" && stopping { + if target == "unknown" && op != nil { target = "stop" } @@ -1447,13 +1810,19 @@ // Clean all the unix devices err = c.removeUnixDevices() if err != nil { - shared.Log.Error("Unable to remove unix devices", log.Ctx{"err": err}) + shared.LogError("Unable to remove unix devices", log.Ctx{"err": err}) } // Clean all the disk devices err = c.removeDiskDevices() if err != nil { - shared.Log.Error("Unable to remove disk devices", log.Ctx{"err": err}) + shared.LogError("Unable to remove disk devices", log.Ctx{"err": err}) + } + + // Clean all network filters + err = c.removeNetworkFilters() + if err != nil { + shared.LogError("Unable to remove network filters", log.Ctx{"err": err}) } // Reboot the container @@ -1482,34 +1851,71 @@ // Trigger a rebalance deviceTaskSchedulerTrigger("container", c.name, "stopped") + // Record current state + err = dbContainerSetState(c.daemon.db, c.id, "STOPPED") + if err != nil { + return + } + // Destroy ephemeral containers if c.ephemeral { c.Delete() } - }(c, target, wg) + }(c, target, op) return nil } // Freezer functions func (c *containerLXC) Freeze() error { + ctxMap := log.Ctx{"name": c.name, + "created": c.creationDate, + "ephemeral": c.ephemeral, + "used": c.lastUsedDate} + + shared.LogInfo("Freezing container", ctxMap) + // Load the go-lxc struct err := c.initLXC() if err != nil { + shared.LogError("Failed freezing container", ctxMap) return err } - return c.c.Freeze() + err = c.c.Freeze() + if err != nil { + shared.LogError("Failed freezing container", ctxMap) + return err + } + + shared.LogInfo("Froze container", ctxMap) + + return err } func (c *containerLXC) Unfreeze() error { + ctxMap := log.Ctx{"name": c.name, + "created": c.creationDate, + "ephemeral": c.ephemeral, + "used": c.lastUsedDate} + + shared.LogInfo("Unfreezing container", ctxMap) + // Load the go-lxc struct err := c.initLXC() if err != nil { + shared.LogError("Failed unfreezing container", ctxMap) return err } - return c.c.Unfreeze() + err = c.c.Unfreeze() + if err != nil { + shared.LogError("Failed unfreezing container", ctxMap) + } + + shared.LogInfo("Unfroze container", ctxMap) + + return err } var LxcMonitorStateError = fmt.Errorf("Monitor is hung") @@ -1517,6 +1923,10 @@ // Get lxc container state, with 1 second timeout // If we don't get a reply, assume the lxc monitor is hung func (c *containerLXC) getLxcState() (lxc.State, error) { + if c.IsSnapshot() { + return lxc.StateMap["STOPPED"], nil + } + monitor := make(chan lxc.State, 1) go func(c *lxc.Container) { @@ -1531,16 +1941,19 @@ } } -func (c *containerLXC) Render() (interface{}, error) { +func (c *containerLXC) Render() (interface{}, interface{}, error) { // Load the go-lxc struct err := c.initLXC() if err != nil { - return nil, err + return nil, nil, err } // Ignore err as the arch string on error is correct (unknown) architectureName, _ := shared.ArchitectureName(c.architecture) + // Prepare the ETag + etag := []interface{}{c.architecture, c.localConfig, c.localDevices, c.ephemeral, c.profiles} + if c.IsSnapshot() { return &shared.SnapshotInfo{ Architecture: architectureName, @@ -1550,15 +1963,16 @@ Ephemeral: c.ephemeral, ExpandedConfig: c.expandedConfig, ExpandedDevices: c.expandedDevices, + LastUsedDate: c.lastUsedDate, Name: c.name, Profiles: c.profiles, Stateful: c.stateful, - }, nil + }, etag, nil } else { // FIXME: Render shouldn't directly access the go-lxc struct cState, err := c.getLxcState() if err != nil { - return nil, err + return nil, nil, err } statusCode := shared.FromLXCState(int(cState)) @@ -1570,12 +1984,13 @@ Ephemeral: c.ephemeral, ExpandedConfig: c.expandedConfig, ExpandedDevices: c.expandedDevices, + LastUsedDate: c.lastUsedDate, Name: c.name, Profiles: c.profiles, Status: statusCode.String(), StatusCode: statusCode, Stateful: c.stateful, - }, nil + }, etag, nil } } @@ -1598,6 +2013,7 @@ if c.IsRunning() { pid := c.InitPID() + status.CPU = c.cpuState() status.Disk = c.diskState() status.Memory = c.memoryState() status.Network = c.networkState() @@ -1630,33 +2046,44 @@ } func (c *containerLXC) Restore(sourceContainer container) error { + var ctxMap log.Ctx + // Check if we can restore the container err := c.storage.ContainerCanRestore(c, sourceContainer) if err != nil { return err } + /* let's also check for CRIU if necessary, before doing a bunch of + * filesystem manipulations + */ + if shared.PathExists(c.StatePath()) { + if err := findCriu("snapshot"); err != nil { + return err + } + } + // Stop the container wasRunning := false if c.IsRunning() { wasRunning = true if err := c.Stop(false); err != nil { - shared.Log.Error( - "Could not stop container", - log.Ctx{ - "container": c.Name(), - "err": err}) return err } } + ctxMap = log.Ctx{"name": c.name, + "created": c.creationDate, + "ephemeral": c.ephemeral, + "used": c.lastUsedDate, + "source": sourceContainer.Name()} + + shared.LogInfo("Restoring container", ctxMap) + // Restore the rootfs err = c.storage.ContainerRestore(c, sourceContainer) if err != nil { - shared.Log.Error("Restoring the filesystem failed", - log.Ctx{ - "source": sourceContainer.Name(), - "destination": c.Name()}) + shared.LogError("Failed restoring container filesystem", ctxMap) return err } @@ -1671,43 +2098,14 @@ err = c.Update(args, false) if err != nil { - shared.Log.Error("Restoring the configuration failed", - log.Ctx{ - "source": sourceContainer.Name(), - "destination": c.Name()}) - + shared.LogError("Failed restoring container configuration", ctxMap) return err } // If the container wasn't running but was stateful, should we restore // it as running? if shared.PathExists(c.StatePath()) { - configPath, err := c.startCommon() - if err != nil { - return err - } - - if !c.IsPrivileged() { - if err := c.IdmapSet().ShiftRootfs(c.StatePath()); err != nil { - return err - } - } - - out, err := exec.Command( - c.daemon.execPath, - "forkmigrate", - c.name, - c.daemon.lxcpath, - configPath, - c.StatePath()).CombinedOutput() - if string(out) != "" { - for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { - shared.Debugf("forkmigrate: %s", line) - } - } - CollectCRIULogFile(c, c.StatePath(), "snapshot", "restore") - - if err != nil { + if err := c.Migrate(lxc.MIGRATE_RESTORE, c.StatePath(), "snapshot", false, false); err != nil { return err } @@ -1715,21 +2113,26 @@ // this in snapshots. err2 := os.RemoveAll(c.StatePath()) if err2 != nil { - shared.Log.Error("failed to delete snapshot state", "path", c.StatePath(), "err", err2) + shared.LogError("Failed to delete snapshot state", log.Ctx{"path": c.StatePath(), "err": err2}) } if err != nil { + shared.LogInfo("Failed restoring container", ctxMap) return err } + shared.LogInfo("Restored container", ctxMap) return nil } // Restart the container if wasRunning { + shared.LogInfo("Restored container", ctxMap) return c.Start(false) } + shared.LogInfo("Restored container", ctxMap) + return nil } @@ -1737,28 +2140,36 @@ // Unmount any leftovers c.removeUnixDevices() c.removeDiskDevices() + c.removeNetworkFilters() // Remove the security profiles AADeleteProfile(c) SeccompDeleteProfile(c) // Remove the devices path - os.RemoveAll(c.DevicesPath()) + os.Remove(c.DevicesPath()) // Remove the shmounts path os.RemoveAll(shared.VarPath("shmounts", c.Name())) } func (c *containerLXC) Delete() error { + ctxMap := log.Ctx{"name": c.name, + "created": c.creationDate, + "ephemeral": c.ephemeral, + "used": c.lastUsedDate} + + shared.LogInfo("Deleting container", ctxMap) + if c.IsSnapshot() { // Remove the snapshot if err := c.storage.ContainerSnapshotDelete(c); err != nil { - shared.Log.Warn("failed to delete snapshot", "name", c.Name(), "err", err) + shared.LogWarn("Failed to delete snapshot", log.Ctx{"name": c.Name(), "err": err}) } } else { // Remove all snapshot if err := containerDeleteSnapshots(c.daemon, c.Name()); err != nil { - shared.Log.Warn("failed to delete snapshots", "name", c.Name(), "err", err) + shared.LogWarn("Failed to delete snapshots", log.Ctx{"name": c.Name(), "err": err}) } // Clean things up @@ -1767,6 +2178,7 @@ // Delete the container from disk if shared.PathExists(c.Path()) { if err := c.storage.ContainerDelete(c); err != nil { + shared.LogError("Failed deleting container", ctxMap) return err } } @@ -1774,14 +2186,27 @@ // Remove the database record if err := dbContainerRemove(c.daemon.db, c.Name()); err != nil { + shared.LogError("Failed deleting container", ctxMap) return err } + // Update lease files + networkUpdateStatic(c.daemon) + + shared.LogInfo("Deleted container", ctxMap) + return nil } func (c *containerLXC) Rename(newName string) error { oldName := c.Name() + ctxMap := log.Ctx{"name": c.name, + "created": c.creationDate, + "ephemeral": c.ephemeral, + "used": c.lastUsedDate, + "newname": newName} + + shared.LogInfo("Renaming container", ctxMap) // Sanity checks if !c.IsSnapshot() && !shared.ValidHostname(newName) { @@ -1789,7 +2214,7 @@ } if c.IsRunning() { - return fmt.Errorf("renaming of running container not allowed") + return fmt.Errorf("Renaming of running container not allowed") } // Clean things up @@ -1797,24 +2222,30 @@ // Rename the logging path os.RemoveAll(shared.LogPath(newName)) - err := os.Rename(c.LogPath(), shared.LogPath(newName)) - if err != nil { - return err + if shared.PathExists(c.LogPath()) { + err := os.Rename(c.LogPath(), shared.LogPath(newName)) + if err != nil { + shared.LogError("Failed renaming container", ctxMap) + return err + } } // Rename the storage entry if c.IsSnapshot() { if err := c.storage.ContainerSnapshotRename(c, newName); err != nil { + shared.LogError("Failed renaming container", ctxMap) return err } } else { if err := c.storage.ContainerRename(c, newName); err != nil { + shared.LogError("Failed renaming container", ctxMap) return err } } // Rename the database entry if err := dbContainerRename(c.daemon.db, oldName, newName); err != nil { + shared.LogError("Failed renaming container", ctxMap) return err } @@ -1822,6 +2253,7 @@ // Rename all the snapshots results, err := dbContainerGetSnapshots(c.daemon.db, oldName) if err != nil { + shared.LogError("Failed renaming container", ctxMap) return err } @@ -1830,6 +2262,7 @@ baseSnapName := filepath.Base(sname) newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName if err := dbContainerRename(c.daemon.db, sname, newSnapshotName); err != nil { + shared.LogError("Failed renaming container", ctxMap) return err } } @@ -1841,6 +2274,8 @@ // Invalidate the go-lxc cache c.c = nil + shared.LogInfo("Renamed container", ctxMap) + return nil } @@ -1913,7 +2348,7 @@ } // Validate the new config - err := containerValidConfig(args.Config, false, false) + err := containerValidConfig(c.daemon, args.Config, false, false) if err != nil { return err } @@ -2002,18 +2437,25 @@ return err } - // Define a function which reverts everything - undoChanges := func() { - c.architecture = oldArchitecture - c.ephemeral = oldEphemeral - c.expandedConfig = oldExpandedConfig - c.expandedDevices = oldExpandedDevices - c.localConfig = oldLocalConfig - c.localDevices = oldLocalDevices - c.profiles = oldProfiles - c.initLXC() - deviceTaskSchedulerTrigger("container", c.name, "changed") - } + // Define a function which reverts everything. Defer this function + // so that it doesn't need to be explicitly called in every failing + // return path. Track whether or not we want to undo the changes + // using a closure. + undoChanges := true + defer func() { + if undoChanges { + c.architecture = oldArchitecture + c.ephemeral = oldEphemeral + c.expandedConfig = oldExpandedConfig + c.expandedDevices = oldExpandedDevices + c.localConfig = oldLocalConfig + c.localDevices = oldLocalDevices + c.profiles = oldProfiles + c.c = nil + c.initLXC() + deviceTaskSchedulerTrigger("container", c.name, "changed") + } + }() // Apply the various changes c.architecture = args.Architecture @@ -2025,19 +2467,11 @@ // Expand the config and refresh the LXC config err = c.expandConfig() if err != nil { - undoChanges() return err } err = c.expandDevices() if err != nil { - undoChanges() - return err - } - - err = c.initLXC() - if err != nil { - undoChanges() return err } @@ -2063,16 +2497,21 @@ removeDevices, addDevices, updateDevices := oldExpandedDevices.Update(c.expandedDevices) // Do some validation of the config diff - err = containerValidConfig(c.expandedConfig, false, true) + err = containerValidConfig(c.daemon, c.expandedConfig, false, true) if err != nil { - undoChanges() return err } // Do some validation of the devices diff err = containerValidDevices(c.expandedDevices, false, true) if err != nil { - undoChanges() + return err + } + + // Run through initLXC to catch anything we missed + c.c = nil + err = c.initLXC() + if err != nil { return err } @@ -2081,7 +2520,6 @@ if key == "raw.apparmor" || key == "security.nesting" { err = AAParseProfile(c) if err != nil { - undoChanges() return err } } @@ -2100,13 +2538,11 @@ if m["size"] != oldRootfsSize { size, err := shared.ParseByteSizeString(m["size"]) if err != nil { - undoChanges() return err } err = c.storage.ContainerSetQuota(c, size) if err != nil { - undoChanges() return err } } @@ -2124,7 +2560,8 @@ } var newRootfs shared.Device - for _, m := range c.expandedDevices { + for _, name := range c.expandedDevices.DeviceNames() { + m := c.expandedDevices[name] if m["type"] == "disk" && m["path"] == "/" { newRootfs = m break @@ -2132,7 +2569,6 @@ } if oldRootfs["source"] != newRootfs["source"] { - undoChanges() return fmt.Errorf("Cannot change the rootfs path of a running container") } @@ -2144,7 +2580,6 @@ // Update the AppArmor profile err = AALoadProfile(c) if err != nil { - undoChanges() return err } } else if key == "linux.kernel_modules" && value != "" { @@ -2152,7 +2587,6 @@ module = strings.TrimPrefix(module, " ") out, err := exec.Command("modprobe", module).CombinedOutput() if err != nil { - undoChanges() return fmt.Errorf("Failed to load kernel module '%s': %s", module, out) } } @@ -2170,7 +2604,13 @@ } } - err = c.CGroupSet("blkio.weight", fmt.Sprintf("%d", priorityInt*100)) + // Minimum valid value is 10 + priority := priorityInt * 100 + if priority == 0 { + priority = 10 + } + + err = c.CGroupSet("blkio.weight", fmt.Sprintf("%d", priority)) if err != nil { return err } @@ -2203,7 +2643,6 @@ } else { valueInt, err := shared.ParseByteSizeString(memory) if err != nil { - undoChanges() return err } memory = fmt.Sprintf("%d", valueInt) @@ -2213,20 +2652,17 @@ if cgSwapAccounting { err = c.CGroupSet("memory.memsw.limit_in_bytes", "-1") if err != nil { - undoChanges() return err } } err = c.CGroupSet("memory.limit_in_bytes", "-1") if err != nil { - undoChanges() return err } err = c.CGroupSet("memory.soft_limit_in_bytes", "-1") if err != nil { - undoChanges() return err } @@ -2235,25 +2671,21 @@ // Set new limit err = c.CGroupSet("memory.soft_limit_in_bytes", memory) if err != nil { - undoChanges() return err } } else { - if memorySwap != "false" && cgSwapAccounting { + if cgSwapAccounting && (memorySwap == "" || shared.IsTrue(memorySwap)) { err = c.CGroupSet("memory.limit_in_bytes", memory) if err != nil { - undoChanges() return err } err = c.CGroupSet("memory.memsw.limit_in_bytes", memory) if err != nil { - undoChanges() return err } } else { err = c.CGroupSet("memory.limit_in_bytes", memory) if err != nil { - undoChanges() return err } } @@ -2263,10 +2695,9 @@ if key == "limits.memory.swap" || key == "limits.memory.swap.priority" { memorySwap := c.expandedConfig["limits.memory.swap"] memorySwapPriority := c.expandedConfig["limits.memory.swap.priority"] - if memorySwap == "false" { + if memorySwap != "" && !shared.IsTrue(memorySwap) { err = c.CGroupSet("memory.swappiness", "0") if err != nil { - undoChanges() return err } } else { @@ -2274,14 +2705,12 @@ if memorySwapPriority != "" { priority, err = strconv.Atoi(memorySwapPriority) if err != nil { - undoChanges() return err } } err = c.CGroupSet("memory.swappiness", fmt.Sprintf("%d", 60-10+priority)) if err != nil { - undoChanges() return err } } @@ -2303,25 +2732,21 @@ // Apply new CPU limits cpuShares, cpuCfsQuota, cpuCfsPeriod, err := deviceParseCPU(c.expandedConfig["limits.cpu.allowance"], c.expandedConfig["limits.cpu.priority"]) if err != nil { - undoChanges() return err } err = c.CGroupSet("cpu.shares", cpuShares) if err != nil { - undoChanges() return err } err = c.CGroupSet("cpu.cfs_period_us", cpuCfsPeriod) if err != nil { - undoChanges() return err } err = c.CGroupSet("cpu.cfs_quota_us", cpuCfsQuota) if err != nil { - undoChanges() return err } } else if key == "limits.processes" { @@ -2332,67 +2757,97 @@ if value == "" { err = c.CGroupSet("pids.max", "max") if err != nil { - undoChanges() return err } } else { valueInt, err := strconv.ParseInt(value, 10, 64) if err != nil { - undoChanges() return err } err = c.CGroupSet("pids.max", fmt.Sprintf("%d", valueInt)) if err != nil { - undoChanges() return err } } } } + var usbs []usbDevice + // Live update the devices for k, m := range removeDevices { if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { - err = c.removeUnixDevice(k, m) + err = c.removeUnixDevice(m) if err != nil { - undoChanges() return err } } else if m["type"] == "disk" && m["path"] != "/" { err = c.removeDiskDevice(k, m) if err != nil { - undoChanges() return err } } else if m["type"] == "nic" { err = c.removeNetworkDevice(k, m) if err != nil { - undoChanges() return err } + } else if m["type"] == "usb" { + if usbs == nil { + usbs, err = deviceLoadUsb() + if err != nil { + return err + } + } + + /* if the device isn't present, we don't need to remove it */ + for _, usb := range usbs { + if usb.vendor != m["vendorid"] || (m["productid"] != "" && usb.product != m["productid"]) { + continue + } + + err := c.removeUSBDevice(m, usb) + if err != nil { + return err + } + } } } for k, m := range addDevices { if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { - err = c.insertUnixDevice(k, m) + err = c.insertUnixDevice(m) if err != nil { - undoChanges() return err } } else if m["type"] == "disk" && m["path"] != "/" { err = c.insertDiskDevice(k, m) if err != nil { - undoChanges() return err } } else if m["type"] == "nic" { err = c.insertNetworkDevice(k, m) if err != nil { - undoChanges() return err } + } else if m["type"] == "usb" { + if usbs == nil { + usbs, err = deviceLoadUsb() + if err != nil { + return err + } + } + + for _, usb := range usbs { + if usb.vendor != m["vendorid"] || (m["productid"] != "" && usb.product != m["productid"]) { + continue + } + + err = c.insertUSBDevice(m, usb) + if err != nil { + shared.LogError("failed to insert usb device", log.Ctx{"err": err, "usb": usb, "container": c.Name()}) + } + } } } @@ -2401,9 +2856,9 @@ if m["type"] == "disk" { updateDiskLimit = true } else if m["type"] == "nic" { + // Refresh tc limits err = c.setNetworkLimits(k, m) if err != nil { - undoChanges() return err } } @@ -2413,32 +2868,27 @@ if updateDiskLimit && cgBlkioController { diskLimits, err := c.getDiskLimits() if err != nil { - undoChanges() return err } for block, limit := range diskLimits { err = c.CGroupSet("blkio.throttle.read_bps_device", fmt.Sprintf("%s %d", block, limit.readBps)) if err != nil { - undoChanges() return err } err = c.CGroupSet("blkio.throttle.read_iops_device", fmt.Sprintf("%s %d", block, limit.readIops)) if err != nil { - undoChanges() return err } err = c.CGroupSet("blkio.throttle.write_bps_device", fmt.Sprintf("%s %d", block, limit.writeBps)) if err != nil { - undoChanges() return err } err = c.CGroupSet("blkio.throttle.write_iops_device", fmt.Sprintf("%s %d", block, limit.writeIops)) if err != nil { - undoChanges() return err } } @@ -2448,61 +2898,78 @@ // Finally, apply the changes to the database tx, err := dbBegin(c.daemon.db) if err != nil { - undoChanges() return err } err = dbContainerConfigClear(tx, c.id) if err != nil { tx.Rollback() - undoChanges() return err } err = dbContainerConfigInsert(tx, c.id, args.Config) if err != nil { tx.Rollback() - undoChanges() return err } err = dbContainerProfilesInsert(tx, c.id, args.Profiles) if err != nil { tx.Rollback() - undoChanges() return err } err = dbDevicesAdd(tx, "container", int64(c.id), args.Devices) if err != nil { tx.Rollback() - undoChanges() return err } err = dbContainerUpdate(tx, c.id, c.architecture, c.ephemeral) if err != nil { tx.Rollback() - undoChanges() return err } if err := txCommit(tx); err != nil { - undoChanges() return err } + // Update network leases + needsUpdate := false + for _, m := range updateDevices { + if m["type"] == "nic" && m["nictype"] == "bridged" { + needsUpdate = true + break + } + } + + if needsUpdate { + networkUpdateStatic(c.daemon) + } + + // Success, update the closure to mark that the changes should be kept. + undoChanges = false + return nil } func (c *containerLXC) Export(w io.Writer) error { + ctxMap := log.Ctx{"name": c.name, + "created": c.creationDate, + "ephemeral": c.ephemeral, + "used": c.lastUsedDate} + if c.IsRunning() { return fmt.Errorf("Cannot export a running container as an image") } + shared.LogInfo("Exporting container", ctxMap) + // Start the storage err := c.StorageStart() if err != nil { + shared.LogError("Failed exporting container", ctxMap) return err } defer c.StorageStop() @@ -2510,11 +2977,13 @@ // Unshift the container idmap, err := c.LastIdmapSet() if err != nil { + shared.LogError("Failed exporting container", ctxMap) return err } if idmap != nil { if err := idmap.UnshiftRootfs(c.RootfsPath()); err != nil { + shared.LogError("Failed exporting container", ctxMap) return err } @@ -2533,7 +3002,7 @@ writeToTar := func(path string, fi os.FileInfo, err error) error { if err := c.tarStoreFile(linkmap, offset, tw, path, fi); err != nil { - shared.Debugf("Error tarring up %s: %s", path, err) + shared.LogDebugf("Error tarring up %s: %s", path, err) return err } return nil @@ -2543,12 +3012,13 @@ fnam := filepath.Join(cDir, "metadata.yaml") if !shared.PathExists(fnam) { // Generate a new metadata.yaml - f, err := ioutil.TempFile("", "lxd_lxd_metadata_") + tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_") if err != nil { tw.Close() + shared.LogError("Failed exporting container", ctxMap) return err } - defer os.Remove(f.Name()) + defer os.RemoveAll(tempDir) // Get the container's architecture var arch string @@ -2557,6 +3027,7 @@ parent, err := containerLoadByName(c.daemon, parentName) if err != nil { tw.Close() + shared.LogError("Failed exporting container", ctxMap) return err } @@ -2568,6 +3039,7 @@ if arch == "" { arch, err = shared.ArchitectureName(c.daemon.architectures[0]) if err != nil { + shared.LogError("Failed exporting container", ctxMap) return err } } @@ -2577,70 +3049,261 @@ meta.Architecture = arch meta.CreationDate = time.Now().UTC().Unix() - data, err := yaml.Marshal(&meta) + data, err := yaml.Marshal(&meta) + if err != nil { + tw.Close() + shared.LogError("Failed exporting container", ctxMap) + return err + } + + // Write the actual file + fnam = filepath.Join(tempDir, "metadata.yaml") + err = ioutil.WriteFile(fnam, data, 0644) + if err != nil { + tw.Close() + shared.LogError("Failed exporting container", ctxMap) + return err + } + + fi, err := os.Lstat(fnam) + if err != nil { + tw.Close() + shared.LogError("Failed exporting container", ctxMap) + return err + } + + tmpOffset := len(path.Dir(fnam)) + 1 + if err := c.tarStoreFile(linkmap, tmpOffset, tw, fnam, fi); err != nil { + tw.Close() + shared.LogDebugf("Error writing to tarfile: %s", err) + shared.LogError("Failed exporting container", ctxMap) + return err + } + } else { + // Include metadata.yaml in the tarball + fi, err := os.Lstat(fnam) + if err != nil { + tw.Close() + shared.LogDebugf("Error statting %s during export", fnam) + shared.LogError("Failed exporting container", ctxMap) + return err + } + + if err := c.tarStoreFile(linkmap, offset, tw, fnam, fi); err != nil { + tw.Close() + shared.LogDebugf("Error writing to tarfile: %s", err) + shared.LogError("Failed exporting container", ctxMap) + return err + } + } + + // Include all the rootfs files + fnam = c.RootfsPath() + filepath.Walk(fnam, writeToTar) + + // Include all the templates + fnam = c.TemplatesPath() + if shared.PathExists(fnam) { + filepath.Walk(fnam, writeToTar) + } + + err = tw.Close() + if err != nil { + shared.LogError("Failed exporting container", ctxMap) + } + + shared.LogInfo("Exported container", ctxMap) + return err +} + +func collectCRIULogFile(c container, imagesDir string, function string, method string) error { + t := time.Now().Format(time.RFC3339) + newPath := shared.LogPath(c.Name(), fmt.Sprintf("%s_%s_%s.log", function, method, t)) + return shared.FileCopy(filepath.Join(imagesDir, fmt.Sprintf("%s.log", method)), newPath) +} + +func getCRIULogErrors(imagesDir string, method string) (string, error) { + f, err := os.Open(path.Join(imagesDir, fmt.Sprintf("%s.log", method))) + if err != nil { + return "", err + } + + defer f.Close() + + scanner := bufio.NewScanner(f) + ret := []string{} + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, "Error") || strings.Contains(line, "Warn") { + ret = append(ret, scanner.Text()) + } + } + + return strings.Join(ret, "\n"), nil +} + +func findCriu(host string) error { + _, err := exec.LookPath("criu") + if err != nil { + return fmt.Errorf("CRIU is required for live migration but its binary couldn't be found on the %s server. Is it installed in LXD's path?", host) + } + + return nil +} + +func (c *containerLXC) Migrate(cmd uint, stateDir string, function string, stop bool, actionScript bool) error { + ctxMap := log.Ctx{"name": c.name, + "created": c.creationDate, + "ephemeral": c.ephemeral, + "used": c.lastUsedDate, + "statedir": stateDir, + "actionscript": actionScript, + "stop": stop} + + if err := findCriu(function); err != nil { + return err + } + + shared.LogInfo("Migrating container", ctxMap) + + prettyCmd := "" + switch cmd { + case lxc.MIGRATE_PRE_DUMP: + prettyCmd = "pre-dump" + case lxc.MIGRATE_DUMP: + prettyCmd = "dump" + case lxc.MIGRATE_RESTORE: + prettyCmd = "restore" + default: + prettyCmd = "unknown" + shared.LogWarn("unknown migrate call", log.Ctx{"cmd": cmd}) + } + + preservesInodes := c.storage.PreservesInodes() + /* This feature was only added in 2.0.1, let's not ask for it + * before then or migrations will fail. + */ + if !lxc.VersionAtLeast(2, 0, 1) { + preservesInodes = false + } + + var migrateErr error + + /* For restore, we need an extra fork so that we daemonize monitor + * instead of having it be a child of LXD, so let's hijack the command + * here and do the extra fork. + */ + if cmd == lxc.MIGRATE_RESTORE { + // Run the shared start + _, err := c.startCommon() if err != nil { - tw.Close() return err } - // Write the actual file - f.Write(data) - f.Close() + /* + * For unprivileged containers we need to shift the + * perms on the images images so that they can be + * opened by the process after it is in its user + * namespace. + */ + if !c.IsPrivileged() { + if err := c.IdmapSet().ShiftRootfs(stateDir); err != nil { + return err + } + } - fi, err := os.Lstat(f.Name()) - if err != nil { - tw.Close() - return err + configPath := filepath.Join(c.LogPath(), "lxc.conf") + + var out []byte + out, migrateErr = exec.Command( + execPath, + "forkmigrate", + c.name, + c.daemon.lxcpath, + configPath, + stateDir, + fmt.Sprintf("%v", preservesInodes)).CombinedOutput() + + if string(out) != "" { + for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { + shared.LogDebugf("forkmigrate: %s", line) + } } - tmpOffset := len(path.Dir(f.Name())) + 1 - if err := c.tarStoreFile(linkmap, tmpOffset, tw, f.Name(), fi); err != nil { - shared.Debugf("Error writing to tarfile: %s", err) - tw.Close() - return err + if migrateErr != nil && !c.IsRunning() { + migrateErr = fmt.Errorf( + "Error calling 'lxd forkmigrate %s %s %s %s': err='%v' out='%v'", + c.name, + c.daemon.lxcpath, + filepath.Join(c.LogPath(), "lxc.conf"), + stateDir, + err, + string(out)) } - fnam = f.Name() } else { - // Include metadata.yaml in the tarball - fi, err := os.Lstat(fnam) + err := c.initLXC() if err != nil { - shared.Debugf("Error statting %s during export", fnam) - tw.Close() return err } - if err := c.tarStoreFile(linkmap, offset, tw, fnam, fi); err != nil { - shared.Debugf("Error writing to tarfile: %s", err) - tw.Close() - return err + script := "" + if actionScript { + script = filepath.Join(stateDir, "action.sh") + } + + // TODO: make this configurable? Ultimately I think we don't + // want to do that; what we really want to do is have "modes" + // of criu operation where one is "make this succeed" and the + // other is "make this fast". Anyway, for now, let's choose a + // really big size so it almost always succeeds, even if it is + // slow. + ghostLimit := uint64(256 * 1024 * 1024) + + opts := lxc.MigrateOptions{ + Stop: stop, + Directory: stateDir, + Verbose: true, + PreservesInodes: preservesInodes, + ActionScript: script, + GhostLimit: ghostLimit, } + + migrateErr = c.c.Migrate(cmd, opts) } - // Include all the rootfs files - fnam = c.RootfsPath() - filepath.Walk(fnam, writeToTar) + collectErr := collectCRIULogFile(c, stateDir, function, prettyCmd) + if collectErr != nil { + shared.LogError("Error collecting checkpoint log file", log.Ctx{"err": collectErr}) + } - // Include all the templates - fnam = c.TemplatesPath() - if shared.PathExists(fnam) { - filepath.Walk(fnam, writeToTar) + if migrateErr != nil { + log, err2 := getCRIULogErrors(stateDir, prettyCmd) + if err2 == nil { + shared.LogInfo("Failed migrating container", ctxMap) + migrateErr = fmt.Errorf("%s %s failed\n%s", function, prettyCmd, log) + } } - return tw.Close() + shared.LogInfo("Migrated container", ctxMap) + + return migrateErr } -func (c *containerLXC) Checkpoint(opts lxc.CheckpointOptions) error { - // Load the go-lxc struct - err := c.initLXC() - if err != nil { - return err +func (c *containerLXC) TemplateApply(trigger string) error { + // "create" and "copy" are deferred until next start + if shared.StringInSlice(trigger, []string{"create", "copy"}) { + // The two events are mutually exclusive so only keep the last one + err := c.ConfigKeySet("volatile.apply_template", trigger) + if err != nil { + return err + } } - return c.c.Checkpoint(opts) + return c.templateApplyNow(trigger) } -func (c *containerLXC) TemplateApply(trigger string) error { +func (c *containerLXC) templateApplyNow(trigger string) error { // If there's no metadata, just return fname := filepath.Join(c.Path(), "metadata.yaml") if !shared.PathExists(fname) { @@ -2775,18 +3438,18 @@ return nil } -func (c *containerLXC) FilePull(srcpath string, dstpath string) error { +func (c *containerLXC) FilePull(srcpath string, dstpath string) (int, int, os.FileMode, string, []string, error) { // Setup container storage if needed if !c.IsRunning() { err := c.StorageStart() if err != nil { - return err + return -1, -1, 0, "", nil, err } } // Get the file from the container out, err := exec.Command( - c.daemon.execPath, + execPath, "forkgetfile", c.RootfsPath(), fmt.Sprintf("%d", c.InitPID()), @@ -2798,23 +3461,85 @@ if !c.IsRunning() { err := c.StorageStop() if err != nil { - return err + return -1, -1, 0, "", nil, err } } + uid := -1 + gid := -1 + mode := -1 + type_ := "unknown" + var dirEnts []string + + var errStr string + // Process forkgetfile response - if string(out) != "" { - if strings.HasPrefix(string(out), "error:") { - return fmt.Errorf(strings.TrimPrefix(strings.TrimSuffix(string(out), "\n"), "error: ")) + for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { + if line == "" { + continue } - for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { - shared.Debugf("forkgetfile: %s", line) + // Extract errors + if strings.HasPrefix(line, "error: ") { + errStr = strings.TrimPrefix(line, "error: ") + continue } + + if strings.HasPrefix(line, "errno: ") { + errno := strings.TrimPrefix(line, "errno: ") + if errno == "2" { + return -1, -1, 0, "", nil, os.ErrNotExist + } + return -1, -1, 0, "", nil, fmt.Errorf(errStr) + } + + // Extract the uid + if strings.HasPrefix(line, "uid: ") { + uid, err = strconv.Atoi(strings.TrimPrefix(line, "uid: ")) + if err != nil { + return -1, -1, 0, "", nil, err + } + + continue + } + + // Extract the gid + if strings.HasPrefix(line, "gid: ") { + gid, err = strconv.Atoi(strings.TrimPrefix(line, "gid: ")) + if err != nil { + return -1, -1, 0, "", nil, err + } + + continue + } + + // Extract the mode + if strings.HasPrefix(line, "mode: ") { + mode, err = strconv.Atoi(strings.TrimPrefix(line, "mode: ")) + if err != nil { + return -1, -1, 0, "", nil, err + } + + continue + } + + if strings.HasPrefix(line, "type: ") { + type_ = strings.TrimPrefix(line, "type: ") + continue + } + + if strings.HasPrefix(line, "entry: ") { + ent := strings.TrimPrefix(line, "entry: ") + ent = strings.Replace(ent, "\x00", "\n", -1) + dirEnts = append(dirEnts, ent) + continue + } + + shared.LogDebugf("forkgetfile: %s", line) } if err != nil { - return fmt.Errorf( + return -1, -1, 0, "", nil, fmt.Errorf( "Error calling 'lxd forkgetfile %s %d %s': err='%v'", dstpath, c.InitPID(), @@ -2822,10 +3547,23 @@ err) } - return nil + // Unmap uid and gid if needed + idmapset, err := c.LastIdmapSet() + if err != nil { + return -1, -1, 0, "", nil, err + } + + if idmapset != nil { + uid, gid = idmapset.ShiftFromNs(uid, gid) + } + + return uid, gid, os.FileMode(mode), type_, dirEnts, nil } -func (c *containerLXC) FilePush(srcpath string, dstpath string, uid int, gid int, mode os.FileMode) error { +func (c *containerLXC) FilePush(srcpath string, dstpath string, uid int, gid int, mode int) error { + var rootUid = 0 + var rootGid = 0 + // Map uid and gid if needed idmapset, err := c.LastIdmapSet() if err != nil { @@ -2834,6 +3572,7 @@ if idmapset != nil { uid, gid = idmapset.ShiftIntoNs(uid, gid) + rootUid, rootGid = idmapset.ShiftIntoNs(0, 0) } // Setup container storage if needed @@ -2846,7 +3585,7 @@ // Push the file to the container out, err := exec.Command( - c.daemon.execPath, + execPath, "forkputfile", c.RootfsPath(), fmt.Sprintf("%d", c.InitPID()), @@ -2854,7 +3593,10 @@ dstpath, fmt.Sprintf("%d", uid), fmt.Sprintf("%d", gid), - fmt.Sprintf("%d", mode&os.ModePerm), + fmt.Sprintf("%d", mode), + fmt.Sprintf("%d", rootUid), + fmt.Sprintf("%d", rootGid), + fmt.Sprintf("%d", int(os.FileMode(0640)&os.ModePerm)), ).CombinedOutput() // Tear down container storage if needed @@ -2872,7 +3614,7 @@ } for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { - shared.Debugf("forkgetfile: %s", line) + shared.LogDebugf("forkgetfile: %s", line) } } @@ -2891,10 +3633,75 @@ return nil } +func (c *containerLXC) Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File) (int, error) { + envSlice := []string{} + + for k, v := range env { + envSlice = append(envSlice, fmt.Sprintf("%s=%s", k, v)) + } + + args := []string{execPath, "forkexec", c.name, c.daemon.lxcpath, filepath.Join(c.LogPath(), "lxc.conf")} + + args = append(args, "--") + args = append(args, "env") + args = append(args, envSlice...) + + args = append(args, "--") + args = append(args, "cmd") + args = append(args, command...) + + cmd := exec.Cmd{} + cmd.Path = execPath + cmd.Args = args + cmd.Stdin = stdin + cmd.Stdout = stdout + cmd.Stderr = stderr + + shared.LogInfo("Executing command", log.Ctx{"environment": envSlice, "args": args}) + + err := cmd.Run() + if err != nil { + exitErr, ok := err.(*exec.ExitError) + if ok { + status, ok := exitErr.Sys().(syscall.WaitStatus) + if ok { + shared.LogInfo("Executed command", log.Ctx{"environment": envSlice, "args": args, "exit_status": status.ExitStatus()}) + return status.ExitStatus(), nil + } + } + + shared.LogInfo("Failed executing command", log.Ctx{"environment": envSlice, "args": args, "err": err}) + return -1, err + } + + shared.LogInfo("Executed command", log.Ctx{"environment": envSlice, "args": args}) + return 0, nil +} + +func (c *containerLXC) cpuState() shared.ContainerStateCPU { + cpu := shared.ContainerStateCPU{} + + if !cgCpuacctController { + return cpu + } + + // CPU usage in seconds + value, err := c.CGroupGet("cpuacct.usage") + valueInt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + valueInt = -1 + } + + cpu.Usage = valueInt + + return cpu +} + func (c *containerLXC) diskState() map[string]shared.ContainerStateDisk { disk := map[string]shared.ContainerStateDisk{} - for name, d := range c.expandedDevices { + for _, name := range c.expandedDevices.DeviceNames() { + d := c.expandedDevices[name] if d["type"] != "disk" { continue } @@ -2971,13 +3778,13 @@ // Get the network state from the container out, err := exec.Command( - c.daemon.execPath, + execPath, "forkgetnet", fmt.Sprintf("%d", pid)).CombinedOutput() // Process forkgetnet response if err != nil { - shared.Log.Error("Error calling 'lxd forkgetnet", log.Ctx{"container": c.name, "output": string(out), "pid": pid}) + shared.LogError("Error calling 'lxd forkgetnet", log.Ctx{"container": c.name, "output": string(out), "pid": pid}) return result } @@ -2985,7 +3792,7 @@ err = json.Unmarshal(out, &networks) if err != nil { - shared.Log.Error("Failure to read forkgetnet json", log.Ctx{"container": c.name, "err": err}) + shared.LogError("Failure to read forkgetnet json", log.Ctx{"container": c.name, "err": err}) return result } @@ -3089,7 +3896,12 @@ } } - // TODO: handle xattrs + // Handle xattrs. + hdr.Xattrs, err = shared.GetAllXattr(path) + if err != nil { + return err + } + if err := tw.WriteHeader(hdr); err != nil { return fmt.Errorf("error writing header: %s", err) } @@ -3168,11 +3980,11 @@ mntsrc := filepath.Join("/dev/.lxd-mounts", filepath.Base(tmpMount)) pidStr := fmt.Sprintf("%d", pid) - out, err := exec.Command(c.daemon.execPath, "forkmount", pidStr, mntsrc, target).CombinedOutput() + out, err := exec.Command(execPath, "forkmount", pidStr, mntsrc, target).CombinedOutput() if string(out) != "" { for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { - shared.Debugf("forkmount: %s", line) + shared.LogDebugf("forkmount: %s", line) } } @@ -3198,11 +4010,11 @@ // Remove the mount from the container pidStr := fmt.Sprintf("%d", pid) - out, err := exec.Command(c.daemon.execPath, "forkumount", pidStr, mount).CombinedOutput() + out, err := exec.Command(execPath, "forkumount", pidStr, mount).CombinedOutput() if string(out) != "" { for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { - shared.Debugf("forkumount: %s", line) + shared.LogDebugf("forkumount: %s", line) } } @@ -3218,7 +4030,7 @@ } // Unix devices handling -func (c *containerLXC) createUnixDevice(name string, m shared.Device) (string, error) { +func (c *containerLXC) createUnixDevice(m shared.Device) (string, error) { var err error var major, minor int @@ -3228,15 +4040,24 @@ devName := fmt.Sprintf("unix.%s", strings.Replace(tgtPath, "/", "-", -1)) devPath := filepath.Join(c.DevicesPath(), devName) + // Extra checks for nesting + if runningInUserns { + for key, value := range m { + if shared.StringInSlice(key, []string{"major", "minor", "mode", "uid", "gid"}) && value != "" { + return "", fmt.Errorf("The \"%s\" property may not be set when adding a device to a nested container", key) + } + } + } + // Get the major/minor of the device we want to create if m["major"] == "" && m["minor"] == "" { // If no major and minor are set, use those from the device on the host _, major, minor, err = deviceGetAttributes(srcPath) if err != nil { - return "", fmt.Errorf("Failed to get device attributes: %s", err) + return "", fmt.Errorf("Failed to get device attributes for %s: %s", m["path"], err) } } else if m["major"] == "" || m["minor"] == "" { - return "", fmt.Errorf("Both major and minor must be supplied for devices") + return "", fmt.Errorf("Both major and minor must be supplied for device: %s", m["path"]) } else { major, err = strconv.Atoi(m["major"]) if err != nil { @@ -3293,6 +4114,10 @@ // Clean any existing entry if shared.PathExists(devPath) { + if runningInUserns { + syscall.Unmount(devPath, syscall.MNT_DETACH) + } + err = os.Remove(devPath) if err != nil { return "", fmt.Errorf("Failed to remove existing entry: %s", err) @@ -3300,37 +4125,50 @@ } // Create the new entry - if err := syscall.Mknod(devPath, uint32(mode), minor|(major<<8)); err != nil { - return "", fmt.Errorf("Failed to create device %s for %s: %s", devPath, m["path"], err) - } + if !runningInUserns { + if err := syscall.Mknod(devPath, uint32(mode), minor|(major<<8)); err != nil { + return "", fmt.Errorf("Failed to create device %s for %s: %s", devPath, m["path"], err) + } - if err := os.Chown(devPath, uid, gid); err != nil { - return "", fmt.Errorf("Failed to chown device %s: %s", devPath, err) - } + if err := os.Chown(devPath, uid, gid); err != nil { + return "", fmt.Errorf("Failed to chown device %s: %s", devPath, err) + } - // Needed as mknod respects the umask - if err := os.Chmod(devPath, mode); err != nil { - return "", fmt.Errorf("Failed to chmod device %s: %s", devPath, err) - } + // Needed as mknod respects the umask + if err := os.Chmod(devPath, mode); err != nil { + return "", fmt.Errorf("Failed to chmod device %s: %s", devPath, err) + } - if c.idmapset != nil { - if err := c.idmapset.ShiftFile(devPath); err != nil { - // uidshift failing is weird, but not a big problem. Log and proceed - shared.Debugf("Failed to uidshift device %s: %s\n", m["path"], err) + if c.idmapset != nil { + if err := c.idmapset.ShiftFile(devPath); err != nil { + // uidshift failing is weird, but not a big problem. Log and proceed + shared.LogDebugf("Failed to uidshift device %s: %s\n", m["path"], err) + } + } + } else { + f, err := os.Create(devPath) + if err != nil { + return "", err + } + f.Close() + + err = deviceMountDisk(srcPath, devPath, false, false) + if err != nil { + return "", err } } return devPath, nil } -func (c *containerLXC) insertUnixDevice(name string, m shared.Device) error { +func (c *containerLXC) insertUnixDevice(m shared.Device) error { // Check that the container is running if !c.IsRunning() { return fmt.Errorf("Can't insert device into stopped container") } // Create the device on the host - devPath, err := c.createUnixDevice(name, m) + devPath, err := c.createUnixDevice(m) if err != nil { return fmt.Errorf("Failed to setup device: %s", err) } @@ -3348,14 +4186,29 @@ return fmt.Errorf("Failed to get device attributes: %s", err) } - if err := c.CGroupSet("devices.allow", fmt.Sprintf("%s %d:%d rwm", dType, dMajor, dMinor)); err != nil { - return fmt.Errorf("Failed to add cgroup rule for device") + if c.IsPrivileged() && !runningInUserns && cgDevicesController { + if err := c.CGroupSet("devices.allow", fmt.Sprintf("%s %d:%d rwm", dType, dMajor, dMinor)); err != nil { + return fmt.Errorf("Failed to add cgroup rule for device") + } } return nil } -func (c *containerLXC) removeUnixDevice(name string, m shared.Device) error { +func (c *containerLXC) insertUSBDevice(m shared.Device, usb usbDevice) error { + temp := shared.Device{} + if err := shared.DeepCopy(&m, &temp); err != nil { + return err + } + + temp["major"] = fmt.Sprintf("%d", usb.major) + temp["minor"] = fmt.Sprintf("%d", usb.minor) + temp["path"] = usb.path + + return c.insertUnixDevice(temp) +} + +func (c *containerLXC) removeUnixDevice(m shared.Device) error { // Check that the container is running pid := c.InitPID() if pid == -1 { @@ -3374,9 +4227,11 @@ return err } - err = c.CGroupSet("devices.deny", fmt.Sprintf("%s %d:%d rwm", dType, dMajor, dMinor)) - if err != nil { - return err + if c.IsPrivileged() && !runningInUserns && cgDevicesController { + err = c.CGroupSet("devices.deny", fmt.Sprintf("%s %d:%d rwm", dType, dMajor, dMinor)) + if err != nil { + return err + } } // Remove the bind-mount from the container @@ -3395,6 +4250,10 @@ } // Remove the host side + if runningInUserns { + syscall.Unmount(devPath, syscall.MNT_DETACH) + } + err = os.Remove(devPath) if err != nil { return err @@ -3403,6 +4262,36 @@ return nil } +func (c *containerLXC) removeUSBDevice(m shared.Device, usb usbDevice) error { + pid := c.InitPID() + if pid == -1 { + return fmt.Errorf("Can't remove device from stopped container") + } + + temp := shared.Device{} + if err := shared.DeepCopy(&m, &temp); err != nil { + return err + } + + temp["major"] = fmt.Sprintf("%d", usb.major) + temp["minor"] = fmt.Sprintf("%d", usb.minor) + temp["path"] = usb.path + + err := c.removeUnixDevice(temp) + if err != nil { + shared.LogError("failed to remove usb device", log.Ctx{"err": err, "usb": usb, "container": c.Name()}) + return err + } + + /* ok to fail here, there may be other usb + * devices on this bus still left in the + * container + */ + dir := fmt.Sprintf("/proc/%d/root/%s", pid, filepath.Dir(usb.path)) + os.Remove(dir) + return nil +} + func (c *containerLXC) removeUnixDevices() error { // Check that we indeed have devices to remove if !shared.PathExists(c.DevicesPath()) { @@ -3423,9 +4312,10 @@ } // Remove the entry - err := os.Remove(filepath.Join(c.DevicesPath(), f.Name())) + devicePath := filepath.Join(c.DevicesPath(), f.Name()) + err := os.Remove(devicePath) if err != nil { - return err + shared.LogError("failed removing unix device", log.Ctx{"err": err, "path": devicePath}) } } @@ -3460,7 +4350,7 @@ } if m["nictype"] == "bridged" { - err = exec.Command("ip", "link", "set", n1, "master", m["parent"]).Run() + err = networkAttachInterface(m["parent"], n1) if err != nil { deviceRemoveInterface(n2) return "", fmt.Errorf("Failed to add interface to bridge: %s", err) @@ -3502,6 +4392,14 @@ return "", fmt.Errorf("Failed to bring up the interface: %s", err) } + // Set the filter + if m["nictype"] == "bridged" && shared.IsTrue(m["security.mac_filtering"]) { + err = c.createNetworkFilter(dev, m["parent"], m["hwaddr"]) + if err != nil { + return "", err + } + } + return dev, nil } @@ -3517,7 +4415,8 @@ devNames := []string{} // Include all static interface names - for _, v := range c.expandedDevices { + for _, k := range c.expandedDevices.DeviceNames() { + v := c.expandedDevices[k] if v["name"] != "" && !shared.StringInSlice(v["name"], devNames) { devNames = append(devNames, v["name"]) } @@ -3639,6 +4538,70 @@ return newDevice, nil } +func (c *containerLXC) createNetworkFilter(name string, bridge string, hwaddr string) error { + err := shared.RunCommand("ebtables", "-A", "FORWARD", "-s", "!", hwaddr, "-i", name, "-o", bridge, "-j", "DROP") + if err != nil { + return err + } + + err = shared.RunCommand("ebtables", "-A", "INPUT", "-s", "!", hwaddr, "-i", name, "-j", "DROP") + if err != nil { + return err + } + + return nil +} + +func (c *containerLXC) removeNetworkFilter(hwaddr string, bridge string) error { + out, err := exec.Command("ebtables", "-L", "--Lmac2", "--Lx").Output() + for _, line := range strings.Split(string(out), "\n") { + line = strings.TrimSpace(line) + fields := strings.Fields(line) + + if len(fields) == 12 { + match := []string{"ebtables", "-t", "filter", "-A", "INPUT", "-s", "!", hwaddr, "-i", fields[9], "-j", "DROP"} + if reflect.DeepEqual(fields, match) { + fields[3] = "-D" + err = shared.RunCommand(fields[0], fields[1:]...) + if err != nil { + return err + } + } + } else if len(fields) == 14 { + match := []string{"ebtables", "-t", "filter", "-A", "FORWARD", "-s", "!", hwaddr, "-i", fields[9], "-o", bridge, "-j", "DROP"} + if reflect.DeepEqual(fields, match) { + fields[3] = "-D" + err = shared.RunCommand(fields[0], fields[1:]...) + if err != nil { + return err + } + } + } + } + + return nil +} + +func (c *containerLXC) removeNetworkFilters() error { + for k, m := range c.expandedDevices { + m, err := c.fillNetworkDevice(k, m) + if err != nil { + return err + } + + if m["type"] != "nic" || m["nictype"] != "bridged" { + continue + } + + err = c.removeNetworkFilter(m["hwaddr"], m["parent"]) + if err != nil { + return err + } + } + + return nil +} + func (c *containerLXC) insertNetworkDevice(name string, m shared.Device) error { // Load the go-lxc struct err := c.initLXC() @@ -3652,8 +4615,8 @@ return nil } - if m["hwaddr"] == "" || m["name"] == "" { - return fmt.Errorf("wtf? hwaddr=%s name=%s", m["hwaddr"], m["name"]) + if m["parent"] != "" && !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", m["parent"])) { + return fmt.Errorf("Parent device '%s' doesn't exist", m["parent"]) } // Return empty list if not running @@ -3719,6 +4682,14 @@ deviceRemoveInterface(hostName) } + // Remove any filter + if m["nictype"] == "bridged" { + err = c.removeNetworkFilter(m["hwaddr"], m["parent"]) + if err != nil { + return err + } + } + return nil } @@ -3731,9 +4702,9 @@ devPath := filepath.Join(c.DevicesPath(), devName) // Check if read-only - isOptional := m["optional"] == "1" || m["optional"] == "true" - isReadOnly := m["readonly"] == "1" || m["readonly"] == "true" - isRecursive := m["recursive"] == "1" || m["recursive"] == "true" + isOptional := shared.IsTrue(m["optional"]) + isReadOnly := shared.IsTrue(m["readonly"]) + isRecursive := shared.IsTrue(m["recursive"]) isFile := !shared.IsDir(srcPath) && !deviceIsBlockdev(srcPath) // Check if the source exists @@ -3741,7 +4712,7 @@ if isOptional { return "", nil } - return "", fmt.Errorf("Source path doesn't exist") + return "", fmt.Errorf("Source path %s doesn't exist for device %s", srcPath, name) } // Create the devices directory if missing @@ -3790,7 +4761,7 @@ return fmt.Errorf("Can't insert device into stopped container") } - isRecursive := m["recursive"] == "1" || m["recursive"] == "true" + isRecursive := shared.IsTrue(m["recursive"]) // Create the device on the host devPath, err := c.createDiskDevice(name, m) @@ -3873,9 +4844,10 @@ _ = syscall.Unmount(filepath.Join(c.DevicesPath(), f.Name()), syscall.MNT_DETACH) // Remove the entry - err := os.Remove(filepath.Join(c.DevicesPath(), f.Name())) + diskPath := filepath.Join(c.DevicesPath(), f.Name()) + err := os.Remove(diskPath) if err != nil { - return err + shared.LogError("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath}) } } @@ -3914,7 +4886,8 @@ // Process all the limits blockLimits := map[string][]deviceBlockLimit{} - for _, m := range c.expandedDevices { + for _, k := range c.expandedDevices.DeviceNames() { + m := c.expandedDevices[k] if m["type"] != "disk" { continue } @@ -4088,7 +5061,8 @@ } } - for k, dev := range c.expandedDevices { + for _, k := range c.expandedDevices.DeviceNames() { + dev := c.expandedDevices[k] if dev["type"] != "nic" { continue } @@ -4212,23 +5186,11 @@ } func (c *containerLXC) IsNesting() bool { - switch strings.ToLower(c.expandedConfig["security.nesting"]) { - case "1": - return true - case "true": - return true - } - return false + return shared.IsTrue(c.expandedConfig["security.nesting"]) } func (c *containerLXC) IsPrivileged() bool { - switch strings.ToLower(c.expandedConfig["security.privileged"]) { - case "1": - return true - case "true": - return true - } - return false + return shared.IsTrue(c.expandedConfig["security.privileged"]) } func (c *containerLXC) IsRunning() bool { @@ -4248,6 +5210,9 @@ func (c *containerLXC) CreationDate() time.Time { return c.creationDate } +func (c *containerLXC) LastUsedDate() time.Time { + return c.lastUsedDate +} func (c *containerLXC) ExpandedConfig() map[string]string { return c.expandedConfig } @@ -4302,18 +5267,6 @@ return lastIdmap, nil } -func (c *containerLXC) LXContainerGet() *lxc.Container { - // FIXME: This function should go away - - // Load the go-lxc struct - err := c.initLXC() - if err != nil { - return nil - } - - return c.c -} - func (c *containerLXC) Daemon() *Daemon { // FIXME: This function should go away return c.daemon @@ -4367,5 +5320,13 @@ } func (c *containerLXC) StatePath() string { - return filepath.Join(c.RootfsPath(), "state") + /* FIXME: backwards compatibility: we used to use Join(RootfsPath(), + * "state"), which was bad. Let's just check to see if that directory + * exists. + */ + oldStatePath := filepath.Join(c.RootfsPath(), "state") + if shared.IsDir(oldStatePath) { + return oldStatePath + } + return filepath.Join(c.Path(), "state") } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_patch.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_patch.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_patch.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_patch.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,112 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + "github.com/gorilla/mux" + "github.com/lxc/lxd/shared" +) + +func containerPatch(d *Daemon, r *http.Request) Response { + // Get the container + name := mux.Vars(r)["name"] + c, err := containerLoadByName(d, name) + if err != nil { + return NotFound + } + + // Validate the ETag + etag := []interface{}{c.Architecture(), c.LocalConfig(), c.LocalDevices(), c.IsEphemeral(), c.Profiles()} + err = etagCheck(r, etag) + if err != nil { + return PreconditionFailed(err) + } + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return InternalError(err) + } + + rdr1 := ioutil.NopCloser(bytes.NewBuffer(body)) + rdr2 := ioutil.NopCloser(bytes.NewBuffer(body)) + + reqRaw := shared.Jmap{} + if err := json.NewDecoder(rdr1).Decode(&reqRaw); err != nil { + return BadRequest(err) + } + + req := containerPutReq{} + if err := json.NewDecoder(rdr2).Decode(&req); err != nil { + return BadRequest(err) + } + + if req.Restore != "" { + return BadRequest(fmt.Errorf("Can't call PATCH in restore mode.")) + } + + // Check if architecture was passed + var architecture int + _, err = reqRaw.GetString("architecture") + if err != nil { + architecture = c.Architecture() + } else { + architecture, err = shared.ArchitectureId(req.Architecture) + if err != nil { + architecture = 0 + } + } + + // Check if ephemeral was passed + _, err = reqRaw.GetBool("ephemeral") + if err != nil { + req.Ephemeral = c.IsEphemeral() + } + + // Check if profiles was passed + if req.Profiles == nil { + req.Profiles = c.Profiles() + } + + // Check if config was passed + if req.Config == nil { + req.Config = c.LocalConfig() + } else { + for k, v := range c.LocalConfig() { + _, ok := req.Config[k] + if !ok { + req.Config[k] = v + } + } + } + + // Check if devices was passed + if req.Devices == nil { + req.Devices = c.LocalDevices() + } else { + for k, v := range c.LocalDevices() { + _, ok := req.Devices[k] + if !ok { + req.Devices[k] = v + } + } + } + + // Update container configuration + args := containerArgs{ + Architecture: architecture, + Config: req.Config, + Devices: req.Devices, + Ephemeral: req.Ephemeral, + Profiles: req.Profiles} + + err = c.Update(args, false) + if err != nil { + return SmartError(err) + } + + return EmptySyncResponse +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_post.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_post.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_post.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_post.go 2016-10-13 14:31:53.000000000 +0000 @@ -47,6 +47,12 @@ return OperationResponse(op) } + // Check that the name isn't already in use + id, _ := dbContainerId(d.db, body.Name) + if id > 0 { + return Conflict + } + run := func(*operation) error { return c.Rename(body.Name) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_put.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_put.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_put.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_put.go 2016-10-13 14:31:53.000000000 +0000 @@ -26,12 +26,20 @@ * the named snapshot */ func containerPut(d *Daemon, r *http.Request) Response { + // Get the container name := mux.Vars(r)["name"] c, err := containerLoadByName(d, name) if err != nil { return NotFound } + // Validate the ETag + etag := []interface{}{c.Architecture(), c.LocalConfig(), c.LocalDevices(), c.IsEphemeral(), c.Profiles()} + err = etagCheck(r, etag) + if err != nil { + return PreconditionFailed(err) + } + configRaw := containerPutReq{} if err := json.NewDecoder(r.Body).Decode(&configRaw); err != nil { return BadRequest(err) @@ -86,7 +94,7 @@ snap = name + shared.SnapshotDelimiter + snap } - shared.Log.Info( + shared.LogInfo( "RESTORE => Restoring snapshot", log.Ctx{ "snapshot": snap, @@ -94,7 +102,7 @@ c, err := containerLoadByName(d, name) if err != nil { - shared.Log.Error( + shared.LogError( "RESTORE => loadcontainerLXD() failed", log.Ctx{ "container": name, diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/containers_get.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/containers_get.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/containers_get.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/containers_get.go 2016-10-13 14:31:53.000000000 +0000 @@ -15,7 +15,7 @@ return SyncResponse(true, result) } if !isDbLockedError(err) { - shared.Debugf("DBERR: containersGet: error %q", err) + shared.LogDebugf("DBERR: containersGet: error %q", err) return InternalError(err) } // 1 s may seem drastic, but we really don't want to thrash @@ -23,7 +23,7 @@ time.Sleep(100 * time.Millisecond) } - shared.Debugf("DBERR: containersGet, db is locked") + shared.LogDebugf("DBERR: containersGet, db is locked") shared.PrintStack() return InternalError(fmt.Errorf("DB is locked")) } @@ -69,7 +69,7 @@ return nil, err } - cts, err := c.Render() + cts, _, err := c.Render() if err != nil { return nil, err } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/containers.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/containers.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/containers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/containers.go 2016-10-13 14:31:53.000000000 +0000 @@ -5,6 +5,7 @@ "os" "sort" "strconv" + "strings" "sync" "syscall" "time" @@ -28,6 +29,7 @@ put: containerPut, delete: containerDelete, post: containerPost, + patch: containerPatch, } var containerStateCmd = Command{ @@ -84,6 +86,7 @@ } func containersRestart(d *Daemon) error { + // Get all the containers result, err := dbContainersList(d.db, cTypeRegular) if err != nil { return err @@ -102,6 +105,7 @@ sort.Sort(containerAutostartList(containers)) + // Restart the containers for _, c := range containers { config := c.ExpandedConfig() lastState := config["volatile.last_state.power"] @@ -109,7 +113,7 @@ autoStart := config["boot.autostart"] autoStartDelay := config["boot.autostart.delay"] - if lastState == "RUNNING" || autoStart == "true" { + if shared.IsTrue(autoStart) || (autoStart == "" && lastState == "RUNNING") { if c.IsRunning() { continue } @@ -123,38 +127,65 @@ } } + // Reset the recorded state (to ensure it's up to date) _, err = dbExec(d.db, "DELETE FROM containers_config WHERE key='volatile.last_state.power'") if err != nil { return err } + for _, c := range containers { + err = c.ConfigKeySet("volatile.last_state.power", c.State()) + if err != nil { + return err + } + } + return nil } func containersShutdown(d *Daemon) error { + var wg sync.WaitGroup + + // Get all the containers results, err := dbContainersList(d.db, cTypeRegular) if err != nil { return err } - var wg sync.WaitGroup + // Reset all container states + _, err = dbExec(d.db, "DELETE FROM containers_config WHERE key='volatile.last_state.power'") + if err != nil { + return err + } for _, r := range results { + // Load the container c, err := containerLoadByName(d, r) if err != nil { return err } + // Record the current state err = c.ConfigKeySet("volatile.last_state.power", c.State()) - if err != nil { return err } + // Stop the container if c.IsRunning() { + // Determinate how long to wait for the container to shutdown cleanly + var timeoutSeconds int + value, ok := c.ExpandedConfig()["boot.host_shutdown_timeout"] + if ok { + timeoutSeconds, _ = strconv.Atoi(value) + } else { + timeoutSeconds = 30 + } + + // Stop the container wg.Add(1) go func() { - c.Shutdown(time.Second * 30) + c.Shutdown(time.Second * time.Duration(timeoutSeconds)) c.Stop(false) wg.Done() }() @@ -166,7 +197,7 @@ } func containerDeleteSnapshots(d *Daemon, cname string) error { - shared.Log.Debug("containerDeleteSnapshots", + shared.LogDebug("containerDeleteSnapshots", log.Ctx{"container": cname}) results, err := dbContainerGetSnapshots(d.db, cname) @@ -177,7 +208,7 @@ for _, sname := range results { sc, err := containerLoadByName(d, sname) if err != nil { - shared.Log.Error( + shared.LogError( "containerDeleteSnapshots: Failed to load the snapshotcontainer", log.Ctx{"container": cname, "snapshot": sname}) @@ -185,7 +216,7 @@ } if err := sc.Delete(); err != nil { - shared.Log.Error( + shared.LogError( "containerDeleteSnapshots: Failed to delete a snapshotcontainer", log.Ctx{"container": cname, "snapshot": sname, "err": err}) } @@ -198,8 +229,6 @@ * This is called by lxd when called as "lxd forkstart " * 'forkstart' is used instead of just 'start' in the hopes that people * do not accidentally type 'lxd start' instead of 'lxc start' - * - * We expect to read the lxcconfig over fd 3. */ func startContainer(args []string) error { if len(args) != 4 { @@ -241,8 +270,91 @@ syscall.Dup3(int(logFile.Fd()), 2, 0) } - // Move the config so we can inspect it on failure - shared.FileMove(configPath, shared.LogPath(name, "lxc.conf")) - return c.Start() } + +/* + * This is called by lxd when called as "lxd forkexec " + */ +func execContainer(args []string) (int, error) { + if len(args) < 6 { + return -1, fmt.Errorf("Bad arguments: %q", args) + } + + name := args[1] + lxcpath := args[2] + configPath := args[3] + + c, err := lxc.NewContainer(name, lxcpath) + if err != nil { + return -1, fmt.Errorf("Error initializing container for start: %q", err) + } + + err = c.LoadConfigFile(configPath) + if err != nil { + return -1, fmt.Errorf("Error opening startup config file: %q", err) + } + + syscall.Dup3(int(os.Stdin.Fd()), 200, 0) + syscall.Dup3(int(os.Stdout.Fd()), 201, 0) + syscall.Dup3(int(os.Stderr.Fd()), 202, 0) + + syscall.Close(int(os.Stdin.Fd())) + syscall.Close(int(os.Stdout.Fd())) + syscall.Close(int(os.Stderr.Fd())) + + opts := lxc.DefaultAttachOptions + opts.ClearEnv = true + opts.StdinFd = 200 + opts.StdoutFd = 201 + opts.StderrFd = 202 + + logPath := shared.LogPath(name, "forkexec.log") + if shared.PathExists(logPath) { + os.Remove(logPath) + } + + logFile, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_SYNC, 0644) + if err == nil { + syscall.Dup3(int(logFile.Fd()), 1, 0) + syscall.Dup3(int(logFile.Fd()), 2, 0) + } + + env := []string{} + cmd := []string{} + + section := "" + for _, arg := range args[5:len(args)] { + // The "cmd" section must come last as it may contain a -- + if arg == "--" && section != "cmd" { + section = "" + continue + } + + if section == "" { + section = arg + continue + } + + if section == "env" { + fields := strings.SplitN(arg, "=", 2) + if len(fields) == 2 && fields[0] == "HOME" { + opts.Cwd = fields[1] + } + env = append(env, arg) + } else if section == "cmd" { + cmd = append(cmd, arg) + } else { + return -1, fmt.Errorf("Invalid exec section: %s", section) + } + } + + opts.Env = env + + status, err := c.RunCommandStatus(cmd, opts) + if err != nil { + return -1, fmt.Errorf("Failed running command: %q", err) + } + + return status >> 8, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_snapshot.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_snapshot.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_snapshot.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_snapshot.go 2016-10-13 14:31:53.000000000 +0000 @@ -44,7 +44,7 @@ url := fmt.Sprintf("/%s/containers/%s/snapshots/%s", shared.APIVersion, cname, snapName) resultString = append(resultString, url) } else { - render, err := snap.Render() + render, _, err := snap.Render() if err != nil { continue } @@ -174,7 +174,7 @@ case "GET": return snapshotGet(sc, snapshotName) case "POST": - return snapshotPost(r, sc, containerName) + return snapshotPost(d, r, sc, containerName) case "DELETE": return snapshotDelete(sc, snapshotName) default: @@ -183,7 +183,7 @@ } func snapshotGet(sc container, name string) Response { - render, err := sc.Render() + render, _, err := sc.Render() if err != nil { return SmartError(err) } @@ -191,7 +191,7 @@ return SyncResponse(true, render.(*shared.SnapshotInfo)) } -func snapshotPost(r *http.Request, sc container, containerName string) Response { +func snapshotPost(d *Daemon, r *http.Request, sc container, containerName string) Response { raw := shared.Jmap{} if err := json.NewDecoder(r.Body).Decode(&raw); err != nil { return BadRequest(err) @@ -220,8 +220,16 @@ return BadRequest(err) } + fullName := containerName + shared.SnapshotDelimiter + newName + + // Check that the name isn't already in use + id, _ := dbContainerId(d.db, fullName) + if id > 0 { + return Conflict + } + rename := func(op *operation) error { - return sc.Rename(containerName + shared.SnapshotDelimiter + newName) + return sc.Rename(fullName) } resources := map[string][]string{} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/containers_post.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/containers_post.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/containers_post.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/containers_post.go 2016-10-13 14:31:53.000000000 +0000 @@ -20,11 +20,12 @@ Certificate string `json:"certificate"` /* for "image" type */ - Alias string `json:"alias"` - Fingerprint string `json:"fingerprint"` - Server string `json:"server"` - Secret string `json:"secret"` - Protocol string `json:"protocol"` + Alias string `json:"alias"` + Fingerprint string `json:"fingerprint"` + Properties map[string]string `json:"properties"` + Server string `json:"server"` + Secret string `json:"secret"` + Protocol string `json:"protocol"` /* * for "migration" and "copy" types, as an optimization users can @@ -42,6 +43,8 @@ /* for "copy" type */ Source string `json:"source"` + /* for "migration" type. Whether the migration is live. */ + Live bool `json:"live"` } type containerPostReq struct { @@ -73,14 +76,57 @@ } } else if req.Source.Fingerprint != "" { hash = req.Source.Fingerprint + } else if req.Source.Properties != nil { + if req.Source.Server != "" { + return BadRequest(fmt.Errorf("Property match is only supported for local images")) + } + + hashes, err := dbImagesGet(d.db, false) + if err != nil { + return InternalError(err) + } + + var image *shared.ImageInfo + + for _, hash := range hashes { + _, img, err := dbImageGet(d.db, hash, false, true) + if err != nil { + continue + } + + if image != nil && img.CreationDate.Before(image.CreationDate) { + continue + } + + match := true + for key, value := range req.Source.Properties { + if img.Properties[key] != value { + match = false + break + } + } + + if !match { + continue + } + + image = img + } + + if image == nil { + return BadRequest(fmt.Errorf("No matching image could be found")) + } + + hash = image.Fingerprint } else { - return BadRequest(fmt.Errorf("must specify one of alias or fingerprint for init from image")) + return BadRequest(fmt.Errorf("Must specify one of alias, fingerprint or properties for init from image")) } run := func(op *operation) error { if req.Source.Server != "" { - updateCached, _ := d.ConfigValueGet("images.auto_update_cached") - hash, err = d.ImageDownload(op, req.Source.Server, req.Source.Protocol, req.Source.Certificate, req.Source.Secret, hash, true, updateCached != "false") + hash, err = d.ImageDownload( + op, req.Source.Server, req.Source.Protocol, req.Source.Certificate, req.Source.Secret, + hash, true, daemonConfig["images.auto_update_cached"].GetBool()) if err != nil { return err } @@ -157,7 +203,7 @@ } func createFromMigration(d *Daemon, req *containerPostReq) Response { - if req.Source.Mode != "pull" { + if req.Source.Mode != "pull" && req.Source.Mode != "push" { return NotImplemented } @@ -166,85 +212,95 @@ architecture = 0 } - run := func(op *operation) error { - args := containerArgs{ - Architecture: architecture, - BaseImage: req.Source.BaseImage, - Config: req.Config, - Ctype: cTypeRegular, - Devices: req.Devices, - Ephemeral: req.Ephemeral, - Name: req.Name, - Profiles: req.Profiles, - } - - var c container - _, _, err := dbImageGet(d.db, req.Source.BaseImage, false, true) - - /* Only create a container from an image if we're going to - * rsync over the top of it. In the case of a better file - * transfer mechanism, let's just use that. - * - * TODO: we could invent some negotiation here, where if the - * source and sink both have the same image, we can clone from - * it, but we have to know before sending the snapshot that - * we're sending the whole thing or just a delta from the - * image, so one extra negotiation round trip is needed. An - * alternative is to move actual container object to a later - * point and just negotiate it over the migration control - * socket. Anyway, it'll happen later :) - */ - if err == nil && d.Storage.MigrationType() == MigrationFSType_RSYNC { - c, err = containerCreateFromImage(d, args, req.Source.BaseImage) - if err != nil { - return err - } - } else { - c, err = containerCreateAsEmpty(d, args) - if err != nil { - return err - } - } + args := containerArgs{ + Architecture: architecture, + BaseImage: req.Source.BaseImage, + Config: req.Config, + Ctype: cTypeRegular, + Devices: req.Devices, + Ephemeral: req.Ephemeral, + Name: req.Name, + Profiles: req.Profiles, + } - var cert *x509.Certificate - if req.Source.Certificate != "" { - certBlock, _ := pem.Decode([]byte(req.Source.Certificate)) + var c container + _, _, err = dbImageGet(d.db, req.Source.BaseImage, false, true) - cert, err = x509.ParseCertificate(certBlock.Bytes) - if err != nil { - return err - } + /* Only create a container from an image if we're going to + * rsync over the top of it. In the case of a better file + * transfer mechanism, let's just use that. + * + * TODO: we could invent some negotiation here, where if the + * source and sink both have the same image, we can clone from + * it, but we have to know before sending the snapshot that + * we're sending the whole thing or just a delta from the + * image, so one extra negotiation round trip is needed. An + * alternative is to move actual container object to a later + * point and just negotiate it over the migration control + * socket. Anyway, it'll happen later :) + */ + if err == nil && d.Storage.MigrationType() == MigrationFSType_RSYNC { + c, err = containerCreateFromImage(d, args, req.Source.BaseImage) + if err != nil { + return InternalError(err) } - - config, err := shared.GetTLSConfig("", "", cert) + } else { + c, err = containerCreateAsEmpty(d, args) if err != nil { - c.Delete() - return err + return InternalError(err) } + } - migrationArgs := MigrationSinkArgs{ - Url: req.Source.Operation, - Dialer: websocket.Dialer{ - TLSClientConfig: config, - NetDial: shared.RFC3493Dialer}, - Container: c, - Secrets: req.Source.Websockets, + var cert *x509.Certificate + if req.Source.Certificate != "" { + certBlock, _ := pem.Decode([]byte(req.Source.Certificate)) + if certBlock == nil { + return InternalError(fmt.Errorf("Invalid certificate")) } - sink, err := NewMigrationSink(&migrationArgs) + cert, err = x509.ParseCertificate(certBlock.Bytes) if err != nil { - c.Delete() - return err + return InternalError(err) } + } + + config, err := shared.GetTLSConfig("", "", "", cert) + if err != nil { + c.Delete() + return InternalError(err) + } + push := false + if req.Source.Mode == "push" { + push = true + } + + migrationArgs := MigrationSinkArgs{ + Url: req.Source.Operation, + Dialer: websocket.Dialer{ + TLSClientConfig: config, + NetDial: shared.RFC3493Dialer}, + Container: c, + Secrets: req.Source.Websockets, + Push: push, + Live: req.Source.Live, + } + + sink, err := NewMigrationSink(&migrationArgs) + if err != nil { + c.Delete() + return InternalError(err) + } + + run := func(op *operation) error { // Start the storage for this container (LVM mount/umount) c.StorageStart() // And finaly run the migration. - err = sink() + err = sink.Do(op) if err != nil { c.StorageStop() - shared.Log.Error("Error during migration sink", "err", err) + shared.LogError("Error during migration sink", log.Ctx{"err": err}) c.Delete() return fmt.Errorf("Error transferring container data: %s", err) } @@ -262,9 +318,17 @@ resources := map[string][]string{} resources["containers"] = []string{req.Name} - op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil) - if err != nil { - return InternalError(err) + var op *operation + if push { + op, err = operationCreate(operationClassWebsocket, resources, sink.Metadata(), run, nil, sink.Connect) + if err != nil { + return InternalError(err) + } + } else { + op, err = operationCreate(operationClassTask, resources, nil, run, nil, nil) + if err != nil { + return InternalError(err) + } } return OperationResponse(op) @@ -289,7 +353,7 @@ for key, value := range sourceConfig { if len(key) > 8 && key[0:8] == "volatile" && key[9:] != "base_image" { - shared.Log.Debug("Skipping volatile key from copy source", + shared.LogDebug("Skipping volatile key from copy source", log.Ctx{"key": key}) continue } @@ -339,7 +403,7 @@ } func containersPost(d *Daemon, r *http.Request) Response { - shared.Debugf("Responding to container create") + shared.LogDebugf("Responding to container create") req := containerPostReq{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { @@ -347,8 +411,24 @@ } if req.Name == "" { - req.Name = strings.ToLower(petname.Generate(2, "-")) - shared.Debugf("No name provided, creating %s", req.Name) + cs, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return InternalError(err) + } + + i := 0 + for { + i++ + req.Name = strings.ToLower(petname.Generate(2, "-")) + if !shared.StringInSlice(req.Name, cs) { + break + } + + if i > 100 { + return InternalError(fmt.Errorf("couldn't generate a new unique name after 100 tries")) + } + } + shared.LogDebugf("No name provided, creating %s", req.Name) } if req.Devices == nil { diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_state.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_state.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_state.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_state.go 2016-10-13 14:31:53.000000000 +0000 @@ -46,6 +46,9 @@ return BadRequest(err) } + // Don't mess with containers while in setup mode + <-d.readyChan + c, err := containerLoadByName(d, name) if err != nil { return SmartError(err) @@ -85,6 +88,13 @@ } } else { do = func(op *operation) error { + if c.IsFrozen() { + err := c.Unfreeze() + if err != nil { + return err + } + } + err = c.Shutdown(time.Duration(raw.Timeout) * time.Second) if err != nil { return err @@ -105,11 +115,16 @@ return err } } else { + if c.IsFrozen() { + return fmt.Errorf("container is not running") + } + err = c.Shutdown(time.Duration(raw.Timeout) * time.Second) if err != nil { return err } } + err = c.Start(false) if err != nil { return err diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_test.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_test.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/container_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/container_test.go 2016-10-13 14:31:53.000000000 +0000 @@ -81,7 +81,7 @@ suite.True(c.IsPrivileged(), "This container should be privileged.") - out, err := c.Render() + out, _, err := c.Render() suite.Req.Nil(err) state := out.(*shared.ContainerInfo) diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/daemon_config.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/daemon_config.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/daemon_config.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/daemon_config.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,325 @@ +package main + +import ( + "crypto/rand" + "database/sql" + "encoding/hex" + "fmt" + "io" + "os/exec" + "strconv" + "strings" + "sync" + + "golang.org/x/crypto/scrypt" + log "gopkg.in/inconshreveable/log15.v2" + + "github.com/lxc/lxd/shared" +) + +var daemonConfigLock sync.Mutex +var daemonConfig map[string]*daemonConfigKey + +type daemonConfigKey struct { + valueType string + defaultValue string + validValues []string + currentValue string + hiddenValue bool + + validator func(d *Daemon, key string, value string) error + setter func(d *Daemon, key string, value string) (string, error) + trigger func(d *Daemon, key string, value string) +} + +func (k *daemonConfigKey) name() string { + name := "" + + // Look for a matching entry in daemonConfig + daemonConfigLock.Lock() + for key, value := range daemonConfig { + if value == k { + name = key + break + } + } + daemonConfigLock.Unlock() + + return name +} + +func (k *daemonConfigKey) Validate(d *Daemon, value string) error { + // No need to validate when unsetting + if value == "" { + return nil + } + + // Validate booleans + if k.valueType == "bool" && !shared.StringInSlice(strings.ToLower(value), []string{"true", "false", "1", "0", "yes", "no", "on", "off"}) { + return fmt.Errorf("Invalid value for a boolean: %s", value) + } + + // Validate integers + if k.valueType == "int" { + _, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + } + + // Check against valid values + if k.validValues != nil && !shared.StringInSlice(value, k.validValues) { + return fmt.Errorf("Invalid value, only the following values are allowed: %s", k.validValues) + } + + // Run external validation function + if k.validator != nil { + err := k.validator(d, k.name(), value) + if err != nil { + return err + } + } + + return nil +} + +func (k *daemonConfigKey) Set(d *Daemon, value string) error { + var name string + + // Check if we are actually changing things + oldValue := k.currentValue + if oldValue == value { + return nil + } + + // Validate the new value + err := k.Validate(d, value) + if err != nil { + return err + } + + // Run external setting function + if k.setter != nil { + value, err = k.setter(d, k.name(), value) + if err != nil { + return err + } + } + + // Get the configuration key and make sure daemonConfig is sane + name = k.name() + if name == "" { + return fmt.Errorf("Corrupted configuration cache") + } + + // Actually apply the change + daemonConfigLock.Lock() + k.currentValue = value + daemonConfigLock.Unlock() + + err = dbConfigValueSet(d.db, name, value) + if err != nil { + return err + } + + return nil +} + +func (k *daemonConfigKey) Get() string { + value := k.currentValue + + // Get the default value if not set + if value == "" { + value = k.defaultValue + } + + return value +} + +func (k *daemonConfigKey) GetBool() bool { + value := k.currentValue + + // Get the default value if not set + if value == "" { + value = k.defaultValue + } + + // Convert to boolean + return shared.IsTrue(value) +} + +func (k *daemonConfigKey) GetInt64() int64 { + value := k.currentValue + + // Get the default value if not set + if value == "" { + value = k.defaultValue + } + + // Convert to int64 + ret, _ := strconv.ParseInt(value, 10, 64) + return ret +} + +func daemonConfigInit(db *sql.DB) error { + // Set all the keys + daemonConfig = map[string]*daemonConfigKey{ + "core.https_address": &daemonConfigKey{valueType: "string", setter: daemonConfigSetAddress}, + "core.https_allowed_headers": &daemonConfigKey{valueType: "string"}, + "core.https_allowed_methods": &daemonConfigKey{valueType: "string"}, + "core.https_allowed_origin": &daemonConfigKey{valueType: "string"}, + "core.https_allowed_credentials": &daemonConfigKey{valueType: "bool"}, + "core.proxy_http": &daemonConfigKey{valueType: "string", setter: daemonConfigSetProxy}, + "core.proxy_https": &daemonConfigKey{valueType: "string", setter: daemonConfigSetProxy}, + "core.proxy_ignore_hosts": &daemonConfigKey{valueType: "string", setter: daemonConfigSetProxy}, + "core.trust_password": &daemonConfigKey{valueType: "string", hiddenValue: true, setter: daemonConfigSetPassword}, + + "images.auto_update_cached": &daemonConfigKey{valueType: "bool", defaultValue: "true"}, + "images.auto_update_interval": &daemonConfigKey{valueType: "int", defaultValue: "6"}, + "images.compression_algorithm": &daemonConfigKey{valueType: "string", validator: daemonConfigValidateCompression, defaultValue: "gzip"}, + "images.remote_cache_expiry": &daemonConfigKey{valueType: "int", defaultValue: "10", trigger: daemonConfigTriggerExpiry}, + + "storage.lvm_fstype": &daemonConfigKey{valueType: "string", defaultValue: "ext4", validValues: []string{"ext4", "xfs"}}, + "storage.lvm_mount_options": &daemonConfigKey{valueType: "string", defaultValue: "discard"}, + "storage.lvm_thinpool_name": &daemonConfigKey{valueType: "string", defaultValue: "LXDPool", validator: storageLVMValidateThinPoolName}, + "storage.lvm_vg_name": &daemonConfigKey{valueType: "string", validator: storageLVMValidateVolumeGroupName, setter: daemonConfigSetStorage}, + "storage.lvm_volume_size": &daemonConfigKey{valueType: "string", defaultValue: "10GiB"}, + "storage.zfs_pool_name": &daemonConfigKey{valueType: "string", validator: storageZFSValidatePoolName, setter: daemonConfigSetStorage}, + "storage.zfs_remove_snapshots": &daemonConfigKey{valueType: "bool"}, + "storage.zfs_use_refquota": &daemonConfigKey{valueType: "bool"}, + } + + // Load the values from the DB + dbValues, err := dbConfigValuesGet(db) + if err != nil { + return err + } + + daemonConfigLock.Lock() + for k, v := range dbValues { + _, ok := daemonConfig[k] + if !ok { + shared.LogError("Found invalid configuration key in database", log.Ctx{"key": k}) + } + + daemonConfig[k].currentValue = v + } + daemonConfigLock.Unlock() + + return nil +} + +func daemonConfigRender() map[string]interface{} { + config := map[string]interface{}{} + + // Turn the config into a JSON-compatible map + for k, v := range daemonConfig { + value := v.Get() + if value != v.defaultValue { + if v.hiddenValue { + config[k] = true + } else { + config[k] = value + } + } + } + + return config +} + +func daemonConfigSetPassword(d *Daemon, key string, value string) (string, error) { + // Nothing to do on unset + if value == "" { + return value, nil + } + + // Hash the password + buf := make([]byte, 32) + _, err := io.ReadFull(rand.Reader, buf) + if err != nil { + return "", err + } + + hash, err := scrypt.Key([]byte(value), buf, 1<<14, 8, 1, 64) + if err != nil { + return "", err + } + + buf = append(buf, hash...) + value = hex.EncodeToString(buf) + + return value, nil +} + +func daemonConfigSetStorage(d *Daemon, key string, value string) (string, error) { + // The storage driver looks at daemonConfig so just set it temporarily + daemonConfigLock.Lock() + oldValue := daemonConfig[key].Get() + daemonConfig[key].currentValue = value + daemonConfigLock.Unlock() + + defer func() { + daemonConfigLock.Lock() + daemonConfig[key].currentValue = oldValue + daemonConfigLock.Unlock() + }() + + // Update the current storage driver + err := d.SetupStorageDriver() + if err != nil { + return "", err + } + + return value, nil +} + +func daemonConfigSetAddress(d *Daemon, key string, value string) (string, error) { + // Update the current https address + err := d.UpdateHTTPsPort(value) + if err != nil { + return "", err + } + + return value, nil +} + +func daemonConfigSetProxy(d *Daemon, key string, value string) (string, error) { + // Get the current config + config := map[string]string{} + config["core.proxy_https"] = daemonConfig["core.proxy_https"].Get() + config["core.proxy_http"] = daemonConfig["core.proxy_http"].Get() + config["core.proxy_ignore_hosts"] = daemonConfig["core.proxy_ignore_hosts"].Get() + + // Apply the change + config[key] = value + + // Update the cached proxy function + d.proxy = shared.ProxyFromConfig( + config["core.proxy_https"], + config["core.proxy_http"], + config["core.proxy_ignore_hosts"], + ) + + // Clear the simplestreams cache as it's tied to the old proxy config + imageStreamCacheLock.Lock() + for k, _ := range imageStreamCache { + delete(imageStreamCache, k) + } + imageStreamCacheLock.Unlock() + + return value, nil +} + +func daemonConfigTriggerExpiry(d *Daemon, key string, value string) { + // Trigger an image pruning run + d.pruneChan <- true +} + +func daemonConfigValidateCompression(d *Daemon, key string, value string) error { + if value == "none" { + return nil + } + + _, err := exec.LookPath(value) + return err +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/daemon.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/daemon.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/daemon.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/daemon.go 2016-10-13 14:31:53.000000000 +0000 @@ -2,7 +2,6 @@ import ( "bytes" - "crypto/rand" "crypto/tls" "crypto/x509" "database/sql" @@ -10,6 +9,7 @@ "encoding/pem" "fmt" "io" + "io/ioutil" "net" "net/http" "net/url" @@ -24,7 +24,6 @@ "golang.org/x/crypto/scrypt" - "github.com/coreos/go-systemd/activation" "github.com/gorilla/mux" _ "github.com/mattn/go-sqlite3" "github.com/syndtr/gocapability/capability" @@ -41,10 +40,12 @@ var aaAdmin = true var aaAvailable = true var aaConfined = false +var aaStacking = false // CGroup var cgBlkioController = false var cgCpuController = false +var cgCpuacctController = false var cgCpusetController = false var cgDevicesController = false var cgMemoryController = false @@ -55,11 +56,6 @@ // UserNS var runningInUserns = false -const ( - pwSaltBytes = 32 - pwHashBytes = 64 -) - type Socket struct { Socket net.Listener CloseOnExit bool @@ -80,7 +76,6 @@ pruneChan chan bool shutdownChan chan bool resetAutoUpdateChan chan bool - execPath string Storage storage @@ -89,8 +84,6 @@ devlxd *net.UnixListener - configValues map[string]string - MockMode bool SetupMode bool @@ -111,14 +104,7 @@ put func(d *Daemon, r *http.Request) Response post func(d *Daemon, r *http.Request) Response delete func(d *Daemon, r *http.Request) Response -} - -func (d *Daemon) updateProxy() { - d.proxy = shared.ProxyFromConfig( - d.configValues["core.proxy_https"], - d.configValues["core.proxy_http"], - d.configValues["core.proxy_ignore_hosts"], - ) + patch func(d *Daemon, r *http.Request) Response } func (d *Daemon) httpGetSync(url string, certificate string) (*lxd.Response, error) { @@ -127,6 +113,9 @@ var cert *x509.Certificate if certificate != "" { certBlock, _ := pem.Decode([]byte(certificate)) + if certBlock == nil { + return nil, fmt.Errorf("Invalid certificate") + } cert, err = x509.ParseCertificate(certBlock.Bytes) if err != nil { @@ -134,15 +123,16 @@ } } - tlsConfig, err := shared.GetTLSConfig("", "", cert) + tlsConfig, err := shared.GetTLSConfig("", "", "", cert) if err != nil { return nil, err } tr := &http.Transport{ - TLSClientConfig: tlsConfig, - Dial: shared.RFC3493Dialer, - Proxy: d.proxy, + TLSClientConfig: tlsConfig, + Dial: shared.RFC3493Dialer, + Proxy: d.proxy, + DisableKeepAlives: true, } myhttp := http.Client{ @@ -179,6 +169,9 @@ var cert *x509.Certificate if certificate != "" { certBlock, _ := pem.Decode([]byte(certificate)) + if certBlock == nil { + return nil, fmt.Errorf("Invalid certificate") + } cert, err = x509.ParseCertificate(certBlock.Bytes) if err != nil { @@ -186,15 +179,16 @@ } } - tlsConfig, err := shared.GetTLSConfig("", "", cert) + tlsConfig, err := shared.GetTLSConfig("", "", "", cert) if err != nil { return nil, err } tr := &http.Transport{ - TLSClientConfig: tlsConfig, - Dial: shared.RFC3493Dialer, - Proxy: d.proxy, + TLSClientConfig: tlsConfig, + Dial: shared.RFC3493Dialer, + Proxy: d.proxy, + DisableKeepAlives: true, } myhttp := http.Client{ Transport: tr, @@ -227,9 +221,8 @@ func readMyCert() (string, string, error) { certf := shared.VarPath("server.crt") keyf := shared.VarPath("server.key") - shared.Log.Info("Looking for existing certificates", log.Ctx{"cert": certf, "key": keyf}) - - err := shared.FindOrGenCert(certf, keyf) + shared.LogDebug("Looking for existing certificates", log.Ctx{"cert": certf, "key": keyf}) + err := shared.FindOrGenCert(certf, keyf, false) return certf, keyf, err } @@ -239,14 +232,17 @@ // Unix socket return true } + if r.TLS == nil { return false } + for i := range r.TLS.PeerCertificates { if d.CheckTrustState(*r.TLS.PeerCertificates[i]) { return true } } + return false } @@ -263,6 +259,7 @@ func (d *Daemon) isRecursionRequest(r *http.Request) bool { recursionStr := r.FormValue("recursion") + recursion, err := strconv.Atoi(recursionStr) if err != nil { return false @@ -283,19 +280,19 @@ w.Header().Set("Content-Type", "application/json") if d.isTrustedClient(r) { - shared.Log.Info( + shared.LogDebug( "handling", log.Ctx{"method": r.Method, "url": r.URL.RequestURI(), "ip": r.RemoteAddr}) } else if r.Method == "GET" && c.untrustedGet { - shared.Log.Info( + shared.LogDebug( "allowing untrusted GET", log.Ctx{"url": r.URL.RequestURI(), "ip": r.RemoteAddr}) } else if r.Method == "POST" && c.untrustedPost { - shared.Log.Info( + shared.LogDebug( "allowing untrusted POST", log.Ctx{"url": r.URL.RequestURI(), "ip": r.RemoteAddr}) } else { - shared.Log.Warn( + shared.LogWarn( "rejecting request from untrusted client", log.Ctx{"ip": r.RemoteAddr}) Forbidden.Render(w) @@ -335,6 +332,10 @@ if c.delete != nil { resp = c.delete(d, r) } + case "PATCH": + if c.patch != nil { + resp = c.patch(d, r) + } default: resp = NotFound } @@ -342,7 +343,7 @@ if err := resp.Render(w); err != nil { err := InternalError(err).Render(w) if err != nil { - shared.Log.Error("Failed writing error for error, giving up") + shared.LogErrorf("Failed writing error for error, giving up") } } @@ -361,34 +362,29 @@ } func (d *Daemon) SetupStorageDriver() error { - lvmVgName, err := d.ConfigValueGet("storage.lvm_vg_name") - if err != nil { - return fmt.Errorf("Couldn't read config: %s", err) - } + var err error - zfsPoolName, err := d.ConfigValueGet("storage.zfs_pool_name") - if err != nil { - return fmt.Errorf("Couldn't read config: %s", err) - } + lvmVgName := daemonConfig["storage.lvm_vg_name"].Get() + zfsPoolName := daemonConfig["storage.zfs_pool_name"].Get() if lvmVgName != "" { d.Storage, err = newStorage(d, storageTypeLvm) if err != nil { - shared.Logf("Could not initialize storage type LVM: %s - falling back to dir", err) + shared.LogErrorf("Could not initialize storage type LVM: %s - falling back to dir", err) } else { return nil } } else if zfsPoolName != "" { d.Storage, err = newStorage(d, storageTypeZfs) if err != nil { - shared.Logf("Could not initialize storage type ZFS: %s - falling back to dir", err) + shared.LogErrorf("Could not initialize storage type ZFS: %s - falling back to dir", err) } else { return nil } } else if d.BackingFs == "btrfs" { d.Storage, err = newStorage(d, storageTypeBtrfs) if err != nil { - shared.Logf("Could not initialize storage type btrfs: %s - falling back to dir", err) + shared.LogErrorf("Could not initialize storage type btrfs: %s - falling back to dir", err) } else { return nil } @@ -399,7 +395,22 @@ return err } +// have we setup shared mounts? +var sharedMounted bool +var sharedMountsLock sync.Mutex + func setupSharedMounts() error { + if sharedMounted { + return nil + } + + sharedMountsLock.Lock() + defer sharedMountsLock.Unlock() + + if sharedMounted { + return nil + } + path := shared.VarPath("shmounts") isShared, err := shared.IsOnSharedMount(path) @@ -410,6 +421,7 @@ if isShared { // / may already be ms-shared, or shmounts may have // been mounted by a previous lxd run + sharedMounted = true return nil } @@ -422,15 +434,16 @@ return err } + sharedMounted = true return nil } func (d *Daemon) ListenAddresses() ([]string, error) { addresses := make([]string, 0) - value, err := d.ConfigValueGet("core.https_address") - if err != nil || value == "" { - return addresses, err + value := daemonConfig["core.https_address"].Get() + if value == "" { + return addresses, nil } localHost, localPort, err := net.SplitHostPort(value) @@ -485,7 +498,9 @@ return addresses, nil } -func (d *Daemon) UpdateHTTPsPort(oldAddress string, newAddress string) error { +func (d *Daemon) UpdateHTTPsPort(newAddress string) error { + oldAddress := daemonConfig["core.https_address"].Get() + if oldAddress == newAddress { return nil } @@ -545,14 +560,8 @@ d.shutdownChan = make(chan bool) /* Set the executable path */ - absPath, err := os.Readlink("/proc/self/exe") - if err != nil { - return err - } - d.execPath = absPath - /* Set the LVM environment */ - err = os.Setenv("LVM_SUPPRESS_FD_WARNINGS", "1") + err := os.Setenv("LVM_SUPPRESS_FD_WARNINGS", "1") if err != nil { return err } @@ -567,13 +576,13 @@ /* Print welcome message */ if d.MockMode { - shared.Log.Info("LXD is starting in mock mode", + shared.LogInfo("LXD is starting in mock mode", log.Ctx{"path": shared.VarPath("")}) } else if d.SetupMode { - shared.Log.Info("LXD is starting in setup mode", + shared.LogInfo("LXD is starting in setup mode", log.Ctx{"path": shared.VarPath("")}) } else { - shared.Log.Info("LXD is starting in normal mode", + shared.LogInfo("LXD is starting in normal mode", log.Ctx{"path": shared.VarPath("")}) } @@ -584,31 +593,31 @@ if aaAvailable && os.Getenv("LXD_SECURITY_APPARMOR") == "false" { aaAvailable = false aaAdmin = false - shared.Log.Warn("AppArmor support has been manually disabled") + shared.LogWarnf("AppArmor support has been manually disabled") } if aaAvailable && !shared.IsDir("/sys/kernel/security/apparmor") { aaAvailable = false aaAdmin = false - shared.Log.Warn("AppArmor support has been disabled because of lack of kernel support") + shared.LogWarnf("AppArmor support has been disabled because of lack of kernel support") } _, err = exec.LookPath("apparmor_parser") if aaAvailable && err != nil { aaAvailable = false aaAdmin = false - shared.Log.Warn("AppArmor support has been disabled because 'apparmor_parser' couldn't be found") + shared.LogWarnf("AppArmor support has been disabled because 'apparmor_parser' couldn't be found") } /* Detect AppArmor admin support */ if aaAdmin && !haveMacAdmin() { aaAdmin = false - shared.Log.Warn("Per-container AppArmor profiles are disabled because the mac_admin capability is missing.") + shared.LogWarnf("Per-container AppArmor profiles are disabled because the mac_admin capability is missing.") } if aaAdmin && runningInUserns { aaAdmin = false - shared.Log.Warn("Per-container AppArmor profiles are disabled because LXD is running in an unprivileged container.") + shared.LogWarnf("Per-container AppArmor profiles are disabled because LXD is running in an unprivileged container.") } /* Detect AppArmor confinment */ @@ -616,49 +625,100 @@ profile := aaProfile() if profile != "unconfined" && profile != "" { aaConfined = true - shared.Log.Warn("Per-container AppArmor profiles are disabled because LXD is already protected by AppArmor.") + shared.LogWarnf("Per-container AppArmor profiles are disabled because LXD is already protected by AppArmor.") + } + } + + if aaAvailable { + canStack := func() bool { + contentBytes, err := ioutil.ReadFile("/sys/kernel/security/apparmor/features/domain/stack") + if err != nil { + return false + } + + if string(contentBytes) != "yes\n" { + return false + } + + contentBytes, err = ioutil.ReadFile("/sys/kernel/security/apparmor/features/domain/version") + if err != nil { + return false + } + + content := string(contentBytes) + + parts := strings.Split(strings.TrimSpace(content), ".") + + if len(parts) == 0 { + shared.LogWarn("unknown apparmor domain version", log.Ctx{"version": content}) + return false + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + shared.LogWarn("unknown apparmor domain version", log.Ctx{"version": content}) + return false + } + + minor := 0 + if len(parts) == 2 { + minor, err = strconv.Atoi(parts[1]) + if err != nil { + shared.LogWarn("unknown apparmor domain version", log.Ctx{"version": content}) + return false + } + } + + return major >= 1 && minor >= 2 } + + aaStacking = canStack() } /* Detect CGroup support */ cgBlkioController = shared.PathExists("/sys/fs/cgroup/blkio/") if !cgBlkioController { - shared.Log.Warn("Couldn't find the CGroup blkio controller, I/O limits will be ignored.") + shared.LogWarnf("Couldn't find the CGroup blkio controller, I/O limits will be ignored.") } cgCpuController = shared.PathExists("/sys/fs/cgroup/cpu/") if !cgCpuController { - shared.Log.Warn("Couldn't find the CGroup CPU controller, CPU time limits will be ignored.") + shared.LogWarnf("Couldn't find the CGroup CPU controller, CPU time limits will be ignored.") + } + + cgCpuacctController = shared.PathExists("/sys/fs/cgroup/cpuacct/") + if !cgCpuacctController { + shared.LogWarnf("Couldn't find the CGroup CPUacct controller, CPU accounting will not be available.") } cgCpusetController = shared.PathExists("/sys/fs/cgroup/cpuset/") if !cgCpusetController { - shared.Log.Warn("Couldn't find the CGroup CPUset controller, CPU pinning will be ignored.") + shared.LogWarnf("Couldn't find the CGroup CPUset controller, CPU pinning will be ignored.") } cgDevicesController = shared.PathExists("/sys/fs/cgroup/devices/") if !cgDevicesController { - shared.Log.Warn("Couldn't find the CGroup devices controller, device access control won't work.") + shared.LogWarnf("Couldn't find the CGroup devices controller, device access control won't work.") } cgMemoryController = shared.PathExists("/sys/fs/cgroup/memory/") if !cgMemoryController { - shared.Log.Warn("Couldn't find the CGroup memory controller, memory limits will be ignored.") + shared.LogWarnf("Couldn't find the CGroup memory controller, memory limits will be ignored.") } cgNetPrioController = shared.PathExists("/sys/fs/cgroup/net_prio/") if !cgNetPrioController { - shared.Log.Warn("Couldn't find the CGroup network class controller, network limits will be ignored.") + shared.LogWarnf("Couldn't find the CGroup network class controller, network limits will be ignored.") } cgPidsController = shared.PathExists("/sys/fs/cgroup/pids/") if !cgPidsController { - shared.Log.Warn("Couldn't find the CGroup pids controller, process limits will be ignored.") + shared.LogWarnf("Couldn't find the CGroup pids controller, process limits will be ignored.") } cgSwapAccounting = shared.PathExists("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes") if !cgSwapAccounting { - shared.Log.Warn("CGroup memory swap accounting is disabled, swap limits will be ignored.") + shared.LogWarnf("CGroup memory swap accounting is disabled, swap limits will be ignored.") } /* Get the list of supported architectures */ @@ -716,18 +776,18 @@ /* Detect the filesystem */ d.BackingFs, err = filesystemDetect(d.lxcpath) if err != nil { - shared.Log.Error("Error detecting backing fs", log.Ctx{"err": err}) + shared.LogError("Error detecting backing fs", log.Ctx{"err": err}) } /* Read the uid/gid allocation */ d.IdmapSet, err = shared.DefaultIdmapSet() if err != nil { - shared.Log.Warn("Error reading idmap", log.Ctx{"err": err.Error()}) - shared.Log.Warn("Only privileged containers will be able to run") + shared.LogWarn("Error reading idmap", log.Ctx{"err": err.Error()}) + shared.LogWarnf("Only privileged containers will be able to run") } else { - shared.Log.Info("Default uid/gid map:") + shared.LogInfof("Default uid/gid map:") for _, lxcmap := range d.IdmapSet.ToLxcString() { - shared.Log.Info(strings.TrimRight(" - "+lxcmap, "\n")) + shared.LogInfof(strings.TrimRight(" - "+lxcmap, "\n")) } } @@ -737,33 +797,62 @@ return err } - /* Setup the storage driver */ + /* Load all config values from the database */ + err = daemonConfigInit(d.db) + if err != nil { + return err + } + if !d.MockMode { + /* Setup the storage driver */ err = d.SetupStorageDriver() if err != nil { return fmt.Errorf("Failed to setup storage: %s", err) } - } - /* Load all config values from the database */ - _, err = d.ConfigValuesGet() - if err != nil { - return err + /* Apply all patches */ + err = patchesApplyAll(d) + if err != nil { + return err + } + + /* Setup the networks */ + err = networkStartup(d) + if err != nil { + return err + } } + /* Log expiry */ + go func() { + t := time.NewTicker(24 * time.Hour) + for { + shared.LogInfof("Expiring log files") + + err := d.ExpireLogs() + if err != nil { + shared.LogError("Failed to expire logs", log.Ctx{"err": err}) + } + + shared.LogInfof("Done expiring log files") + <-t.C + } + }() + /* set the initial proxy function based on config values in the DB */ - d.updateProxy() + d.proxy = shared.ProxyFromConfig( + daemonConfig["core.proxy_https"].Get(), + daemonConfig["core.proxy_http"].Get(), + daemonConfig["core.proxy_ignore_hosts"].Get(), + ) /* Setup /dev/lxd */ + shared.LogInfof("Starting /dev/lxd handler") d.devlxd, err = createAndBindDevLxd() if err != nil { return err } - if err := setupSharedMounts(); err != nil { - return err - } - if !d.MockMode { /* Start the scheduler */ go deviceEventListener(d) @@ -786,10 +875,25 @@ MinVersion: tls.VersionTLS12, MaxVersion: tls.VersionTLS12, CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA}, PreferServerCipherSuites: true, } + + if shared.PathExists(shared.VarPath("server.ca")) { + ca, err := shared.ReadCert(shared.VarPath("server.ca")) + if err != nil { + return err + } + + caPool := x509.NewCertPool() + caPool.AddCert(ca) + tlsConfig.RootCAs = caPool + tlsConfig.ClientCAs = caPool + + shared.LogInfof("LXD is in CA mode, only CA-signed certificates will be allowed") + } + tlsConfig.BuildNameToCertificate() d.tlsConfig = tlsConfig @@ -815,18 +919,14 @@ } d.mux.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - shared.Log.Debug("Sending top level 404", log.Ctx{"url": r.URL}) + shared.LogInfo("Sending top level 404", log.Ctx{"url": r.URL}) w.Header().Set("Content-Type", "application/json") NotFound.Render(w) }) - listeners, err := activation.Listeners(false) - if err != nil { - return err - } - + listeners := d.GetListeners() if len(listeners) > 0 { - shared.Log.Info("LXD is socket activated") + shared.LogInfof("LXD is socket activated") for _, listener := range listeners { if shared.PathExists(listener.Addr().String()) { @@ -837,7 +937,7 @@ } } } else { - shared.Log.Info("LXD isn't socket activated") + shared.LogInfof("LXD isn't socket activated") localSocketPath := shared.VarPath("unix.socket") @@ -846,7 +946,7 @@ if shared.PathExists(localSocketPath) { _, err := lxd.NewClient(&lxd.DefaultConfig, "local") if err != nil { - shared.Log.Debug("Detected stale unix socket, deleting") + shared.LogDebugf("Detected stale unix socket, deleting") // Connecting failed, so let's delete the socket and // listen on it ourselves. err = os.Remove(localSocketPath) @@ -889,11 +989,7 @@ d.UnixSocket = &Socket{Socket: unixl, CloseOnExit: true} } - listenAddr, err := d.ConfigValueGet("core.https_address") - if err != nil { - return err - } - + listenAddr := daemonConfig["core.https_address"].Get() if listenAddr != "" { _, _, err := net.SplitHostPort(listenAddr) if err != nil { @@ -902,10 +998,10 @@ tcpl, err := tls.Listen("tcp", listenAddr, d.tlsConfig) if err != nil { - shared.Log.Error("cannot listen on https socket, skipping...", log.Ctx{"err": err}) + shared.LogError("cannot listen on https socket, skipping...", log.Ctx{"err": err}) } else { if d.TCPSocket != nil { - shared.Log.Info("Replacing systemd TCP socket by configure one") + shared.LogInfof("Replacing inherited TCP socket with configured one") d.TCPSocket.Socket.Close() } d.TCPSocket = &Socket{Socket: tcpl, CloseOnExit: true} @@ -913,14 +1009,14 @@ } d.tomb.Go(func() error { - shared.Log.Info("REST API daemon:") + shared.LogInfof("REST API daemon:") if d.UnixSocket != nil { - shared.Log.Info(" - binding Unix socket", log.Ctx{"socket": d.UnixSocket.Socket.Addr()}) + shared.LogInfo(" - binding Unix socket", log.Ctx{"socket": d.UnixSocket.Socket.Addr()}) d.tomb.Go(func() error { return http.Serve(d.UnixSocket.Socket, &lxdHttpServer{d.mux, d}) }) } if d.TCPSocket != nil { - shared.Log.Info(" - binding TCP socket", log.Ctx{"socket": d.TCPSocket.Socket.Addr()}) + shared.LogInfo(" - binding TCP socket", log.Ctx{"socket": d.TCPSocket.Socket.Addr()}) d.tomb.Go(func() error { return http.Serve(d.TCPSocket.Socket, &lxdHttpServer{d.mux, d}) }) } @@ -967,18 +1063,9 @@ autoUpdateImages(d) for { - interval, _ := d.ConfigValueGet("images.auto_update_interval") - if interval == "" { - interval = "6" - } - - intervalInt, err := strconv.Atoi(interval) - if err != nil { - intervalInt = 0 - } - - if intervalInt > 0 { - timer := time.NewTimer(time.Duration(intervalInt) * time.Hour) + interval := daemonConfig["images.auto_update_interval"].GetInt64() + if interval > 0 { + timer := time.NewTimer(time.Duration(interval) * time.Hour) timeChan := timer.C select { @@ -1011,11 +1098,11 @@ func (d *Daemon) CheckTrustState(cert x509.Certificate) bool { for k, v := range d.clientCerts { if bytes.Compare(cert.Raw, v.Raw) == 0 { - shared.Log.Debug("Found cert", log.Ctx{"k": k}) + shared.LogDebug("Found cert", log.Ctx{"k": k}) return true } - shared.Log.Debug("Client cert != key", log.Ctx{"k": k}) } + return false } @@ -1047,34 +1134,37 @@ forceStop := false d.tomb.Kill(errStop) - shared.Log.Info("Stopping REST API handler:") + shared.LogInfof("Stopping REST API handler:") for _, socket := range []*Socket{d.TCPSocket, d.UnixSocket} { if socket == nil { continue } if socket.CloseOnExit { - shared.Log.Info(" - closing socket", log.Ctx{"socket": socket.Socket.Addr()}) + shared.LogInfo(" - closing socket", log.Ctx{"socket": socket.Socket.Addr()}) socket.Socket.Close() } else { - shared.Log.Info(" - skipping socket-activated socket", log.Ctx{"socket": socket.Socket.Addr()}) + shared.LogInfo(" - skipping socket-activated socket", log.Ctx{"socket": socket.Socket.Addr()}) forceStop = true } } if n, err := d.numRunningContainers(); err != nil || n == 0 { - shared.Log.Debug("Unmounting shmounts") + shared.LogInfof("Unmounting shmounts") syscall.Unmount(shared.VarPath("shmounts"), syscall.MNT_DETACH) + + shared.LogInfof("Done unmounting shmounts") } else { - shared.Debugf("Not unmounting shmounts (containers are still running)") + shared.LogDebugf("Not unmounting shmounts (containers are still running)") } - shared.Log.Debug("Closing the database") + shared.LogInfof("Closing the database") d.db.Close() - shared.Log.Debug("Stopping /dev/lxd handler") + shared.LogInfof("Stopping /dev/lxd handler") d.devlxd.Close() + shared.LogInfof("Stopped /dev/lxd handler") if d.MockMode || forceStop { return nil @@ -1088,158 +1178,150 @@ return err } -// ConfigKeyIsValid returns if the given key is a known config value. -func (d *Daemon) ConfigKeyIsValid(key string) bool { - switch key { - case "core.https_address": - return true - case "core.https_allowed_origin": - return true - case "core.https_allowed_methods": - return true - case "core.https_allowed_headers": - return true - case "core.proxy_https": - return true - case "core.proxy_http": - return true - case "core.proxy_ignore_hosts": - return true - case "core.trust_password": - return true - case "storage.lvm_vg_name": - return true - case "storage.lvm_thinpool_name": - return true - case "storage.lvm_fstype": - return true - case "storage.zfs_pool_name": - return true - case "images.remote_cache_expiry": - return true - case "images.compression_algorithm": - return true - case "images.auto_update_interval": - return true - case "images.auto_update_cached": - return true - } - - return false -} +func (d *Daemon) PasswordCheck(password string) error { + value := daemonConfig["core.trust_password"].Get() -// ConfigValueGet returns a config value from the memory, -// calls ConfigValuesGet if required. -// It returns a empty result if the config key isn't given. -func (d *Daemon) ConfigValueGet(key string) (string, error) { - if d.configValues == nil { - if _, err := d.ConfigValuesGet(); err != nil { - return "", err - } + // No password set + if value == "" { + return fmt.Errorf("No password is set") } - if val, ok := d.configValues[key]; ok { - return val, nil + // Compare the password + buff, err := hex.DecodeString(value) + if err != nil { + return err } - return "", nil -} + salt := buff[0:32] + hash, err := scrypt.Key([]byte(password), salt, 1<<14, 8, 1, 64) + if err != nil { + return err + } -// ConfigValuesGet fetches all config values and stores them in memory. -func (d *Daemon) ConfigValuesGet() (map[string]string, error) { - if d.configValues == nil { - var err error - d.configValues, err = dbConfigValuesGet(d.db) - if err != nil { - return d.configValues, err - } + if !bytes.Equal(hash, buff[32:]) { + return fmt.Errorf("Bad password provided") } - return d.configValues, nil + return nil } -// ConfigValueSet sets a new or updates a config value, -// it updates the value in the DB and in memory. -func (d *Daemon) ConfigValueSet(key string, value string) error { - if err := dbConfigValueSet(d.db, key, value); err != nil { +func (d *Daemon) ExpireLogs() error { + entries, err := ioutil.ReadDir(shared.LogPath()) + if err != nil { return err } - if d.configValues == nil { - if _, err := d.ConfigValuesGet(); err != nil { - return err - } - } - - if value == "" { - delete(d.configValues, key) - } else { - d.configValues[key] = value + result, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return err } - return nil -} + newestFile := func(path string, dir os.FileInfo) time.Time { + newest := dir.ModTime() -// PasswordSet sets the password to the new value. -func (d *Daemon) PasswordSet(password string) error { - shared.Log.Info("Setting new https password") - var value = password - if password != "" { - buf := make([]byte, pwSaltBytes) - _, err := io.ReadFull(rand.Reader, buf) + entries, err := ioutil.ReadDir(path) if err != nil { - return err + return newest } - hash, err := scrypt.Key([]byte(password), buf, 1<<14, 8, 1, pwHashBytes) - if err != nil { - return err + for _, entry := range entries { + if entry.ModTime().After(newest) { + newest = entry.ModTime() + } } - buf = append(buf, hash...) - value = hex.EncodeToString(buf) + return newest } - err := d.ConfigValueSet("core.trust_password", value) - if err != nil { - return err + for _, entry := range entries { + // Check if the container still exists + if shared.StringInSlice(entry.Name(), result) { + // Remove any log file which wasn't modified in the past 48 hours + logs, err := ioutil.ReadDir(shared.LogPath(entry.Name())) + if err != nil { + return err + } + + for _, logfile := range logs { + path := shared.LogPath(entry.Name(), logfile.Name()) + + // Always keep the LXC config + if logfile.Name() == "lxc.conf" { + continue + } + + // Deal with directories (snapshots) + if logfile.IsDir() { + newest := newestFile(path, logfile) + if time.Since(newest).Hours() >= 48 { + os.RemoveAll(path) + if err != nil { + return err + } + } + + continue + } + + // Individual files + if time.Since(logfile.ModTime()).Hours() >= 48 { + err := os.Remove(path) + if err != nil { + return err + } + } + } + } else { + // Empty directory if unchanged in the past 24 hours + path := shared.LogPath(entry.Name()) + newest := newestFile(path, entry) + if time.Since(newest).Hours() >= 24 { + err := os.RemoveAll(path) + if err != nil { + return err + } + } + } } return nil } -// PasswordCheck checks if the given password is the same -// as we have in the DB. -func (d *Daemon) PasswordCheck(password string) bool { - value, err := d.ConfigValueGet("core.trust_password") +func (d *Daemon) GetListeners() []net.Listener { + defer func() { + os.Unsetenv("LISTEN_PID") + os.Unsetenv("LISTEN_FDS") + }() + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) if err != nil { - shared.Log.Error("verifyAdminPwd", log.Ctx{"err": err}) - return false + return nil } - // No password set - if value == "" { - return false + if pid != os.Getpid() { + return nil } - buff, err := hex.DecodeString(value) + fds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) if err != nil { - shared.Log.Error("hex decode failed", log.Ctx{"err": err}) - return false + return nil } - salt := buff[0:pwSaltBytes] - hash, err := scrypt.Key([]byte(password), salt, 1<<14, 8, 1, pwHashBytes) - if err != nil { - shared.Log.Error("Failed to create hash to check", log.Ctx{"err": err}) - return false - } - if !bytes.Equal(hash, buff[pwSaltBytes:]) { - shared.Log.Error("Bad password received", log.Ctx{"err": err}) - return false + listeners := []net.Listener{} + + for i := 3; i < 3+fds; i++ { + syscall.CloseOnExec(i) + + file := os.NewFile(uintptr(i), fmt.Sprintf("inherited-fd%d", i)) + listener, err := net.FileListener(file) + if err != nil { + continue + } + + listeners = append(listeners, listener) } - shared.Log.Debug("Verified the admin password") - return true + + return listeners } type lxdHttpServer struct { @@ -1248,22 +1330,27 @@ } func (s *lxdHttpServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - allowedOrigin, _ := s.d.ConfigValueGet("core.https_allowed_origin") + allowedOrigin := daemonConfig["core.https_allowed_origin"].Get() origin := req.Header.Get("Origin") if allowedOrigin != "" && origin != "" { rw.Header().Set("Access-Control-Allow-Origin", allowedOrigin) } - allowedMethods, _ := s.d.ConfigValueGet("core.https_allowed_methods") + allowedMethods := daemonConfig["core.https_allowed_methods"].Get() if allowedMethods != "" && origin != "" { rw.Header().Set("Access-Control-Allow-Methods", allowedMethods) } - allowedHeaders, _ := s.d.ConfigValueGet("core.https_allowed_headers") + allowedHeaders := daemonConfig["core.https_allowed_headers"].Get() if allowedHeaders != "" && origin != "" { rw.Header().Set("Access-Control-Allow-Headers", allowedHeaders) } + allowedCredentials := daemonConfig["core.https_allowed_credentials"].GetBool() + if allowedCredentials { + rw.Header().Set("Access-Control-Allow-Credentials", "true") + } + // OPTIONS request don't need any further processing if req.Method == "OPTIONS" { return diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/daemon_images.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/daemon_images.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/daemon_images.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/daemon_images.go 2016-10-13 14:31:53.000000000 +0000 @@ -29,6 +29,7 @@ func (d *Daemon) ImageDownload(op *operation, server string, protocol string, certificate string, secret string, alias string, forContainer bool, autoUpdate bool) (string, error) { var err error var ss *shared.SimpleStreams + var ctxMap log.Ctx if protocol == "" { protocol = "lxd" @@ -50,7 +51,7 @@ entry = &imageStreamCacheEntry{ss: ss, expiry: time.Now().Add(time.Hour)} imageStreamCache[server] = entry } else { - shared.Debugf("Using SimpleStreams cache entry for %s, expires at %s", server, entry.expiry) + shared.LogDebugf("Using SimpleStreams cache entry for %s, expires at %s", server, entry.expiry) ss = entry.ss } imageStreamCacheLock.Unlock() @@ -77,39 +78,35 @@ } if _, _, err := dbImageGet(d.db, fp, false, false); err == nil { - shared.Log.Debug("Image already exists in the db", log.Ctx{"image": fp}) + shared.LogDebug("Image already exists in the db", log.Ctx{"image": fp}) // already have it return fp, nil } - shared.Log.Info( - "Image not in the db, downloading it", - log.Ctx{"image": fp, "server": server}) - // Now check if we already downloading the image d.imagesDownloadingLock.RLock() if waitChannel, ok := d.imagesDownloading[fp]; ok { // We already download the image d.imagesDownloadingLock.RUnlock() - shared.Log.Info( + shared.LogDebug( "Already downloading the image, waiting for it to succeed", log.Ctx{"image": fp}) // Wait until the download finishes (channel closes) if _, ok := <-waitChannel; ok { - shared.Log.Warn("Value transmitted over image lock semaphore?") + shared.LogWarnf("Value transmitted over image lock semaphore?") } if _, _, err := dbImageGet(d.db, fp, false, true); err != nil { - shared.Log.Error( + shared.LogError( "Previous download didn't succeed", log.Ctx{"image": fp}) return "", fmt.Errorf("Previous download didn't succeed") } - shared.Log.Info( + shared.LogDebug( "Previous download succeeded", log.Ctx{"image": fp}) @@ -118,9 +115,13 @@ d.imagesDownloadingLock.RUnlock() - shared.Log.Info( - "Downloading the image", - log.Ctx{"image": fp}) + if op == nil { + ctxMap = log.Ctx{"alias": alias, "server": server} + } else { + ctxMap = log.Ctx{"trigger": op.url, "image": fp, "operation": op.id, "alias": alias, "server": server} + } + + shared.LogInfo("Downloading image", ctxMap) // Add the download to the queue d.imagesDownloadingLock.Lock() @@ -179,7 +180,7 @@ resp, err := d.httpGetSync(url, certificate) if err != nil { - shared.Log.Error( + shared.LogError( "Failed to download image metadata", log.Ctx{"image": fp, "err": err}) @@ -237,6 +238,8 @@ } } + shared.LogInfo("Image downloaded", ctxMap) + if forContainer { return fp, dbImageLastAccessInit(d.db, fp) } @@ -246,7 +249,7 @@ raw, err := d.httpGetFile(exporturl, certificate) if err != nil { - shared.Log.Error( + shared.LogError( "Failed to download image", log.Ctx{"image": fp, "err": err}) return "", err @@ -267,7 +270,7 @@ // Get the metadata tarball part, err := mr.NextPart() if err != nil { - shared.Log.Error( + shared.LogError( "Invalid multipart image", log.Ctx{"image": fp, "err": err}) @@ -275,7 +278,7 @@ } if part.FormName() != "metadata" { - shared.Log.Error( + shared.LogError( "Invalid multipart image", log.Ctx{"image": fp, "err": err}) @@ -285,7 +288,7 @@ destName = filepath.Join(destDir, info.Fingerprint) f, err := os.Create(destName) if err != nil { - shared.Log.Error( + shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) @@ -296,7 +299,7 @@ f.Close() if err != nil { - shared.Log.Error( + shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) @@ -306,7 +309,7 @@ // Get the rootfs tarball part, err = mr.NextPart() if err != nil { - shared.Log.Error( + shared.LogError( "Invalid multipart image", log.Ctx{"image": fp, "err": err}) @@ -314,7 +317,7 @@ } if part.FormName() != "rootfs" { - shared.Log.Error( + shared.LogError( "Invalid multipart image", log.Ctx{"image": fp}) return "", fmt.Errorf("Invalid multipart image") @@ -323,7 +326,7 @@ destName = filepath.Join(destDir, info.Fingerprint+".rootfs") f, err = os.Create(destName) if err != nil { - shared.Log.Error( + shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) return "", err @@ -333,7 +336,7 @@ f.Close() if err != nil { - shared.Log.Error( + shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) return "", err @@ -343,7 +346,7 @@ f, err := os.Create(destName) if err != nil { - shared.Log.Error( + shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) @@ -354,7 +357,7 @@ f.Close() if err != nil { - shared.Log.Error( + shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) return "", err @@ -382,7 +385,7 @@ _, err = imageBuildFromInfo(d, info) if err != nil { - shared.Log.Error( + shared.LogError( "Failed to create image", log.Ctx{"image": fp, "err": err}) @@ -401,9 +404,7 @@ } } - shared.Log.Info( - "Download succeeded", - log.Ctx{"image": fp}) + shared.LogInfo("Image downloaded", ctxMap) if forContainer { return fp, dbImageLastAccessInit(d.db, fp) diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/daemon_test.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/daemon_test.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/daemon_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/daemon_test.go 2016-10-13 14:31:53.000000000 +0000 @@ -4,23 +4,24 @@ var err error d := suite.d - err = d.ConfigValueSet("storage.lvm_vg_name", "foo") + err = daemonConfig["core.trust_password"].Set(d, "foo") suite.Req.Nil(err) - val, err := d.ConfigValueGet("storage.lvm_vg_name") - suite.Req.Nil(err) - suite.Req.Equal(val, "foo") + val := daemonConfig["core.trust_password"].Get() + suite.Req.Equal(len(val), 192) - err = d.ConfigValueSet("storage.lvm_vg_name", "") - suite.Req.Nil(err) + valMap := daemonConfigRender() + value, present := valMap["core.trust_password"] + suite.Req.True(present) + suite.Req.Equal(value, true) - val, err = d.ConfigValueGet("storage.lvm_vg_name") + err = daemonConfig["core.trust_password"].Set(d, "") suite.Req.Nil(err) - suite.Req.Equal(val, "") - valMap, err := d.ConfigValuesGet() - suite.Req.Nil(err) + val = daemonConfig["core.trust_password"].Get() + suite.Req.Equal(val, "") - _, present := valMap["storage.lvm_vg_name"] + valMap = daemonConfigRender() + _, present = valMap["core.trust_password"] suite.Req.False(present) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_containers.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_containers.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_containers.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_containers.go 2016-10-13 14:31:53.000000000 +0000 @@ -62,14 +62,16 @@ } func dbContainerGet(db *sql.DB, name string) (containerArgs, error) { + var used *time.Time // Hold the db-returned time + args := containerArgs{} args.Name = name ephemInt := -1 statefulInt := -1 - q := "SELECT id, architecture, type, ephemeral, stateful, creation_date FROM containers WHERE name=?" + q := "SELECT id, architecture, type, ephemeral, stateful, creation_date, last_use_date FROM containers WHERE name=?" arg1 := []interface{}{name} - arg2 := []interface{}{&args.Id, &args.Architecture, &args.Ctype, &ephemInt, &statefulInt, &args.CreationDate} + arg2 := []interface{}{&args.Id, &args.Architecture, &args.Ctype, &ephemInt, &statefulInt, &args.CreationDate, &used} err := dbQueryRowScan(db, q, arg1, arg2) if err != nil { return args, err @@ -87,6 +89,12 @@ args.Stateful = true } + if used != nil { + args.LastUsedDate = *used + } else { + args.LastUsedDate = time.Unix(0, 0).UTC() + } + config, err := dbContainerConfig(db, args.Id) if err != nil { return args, err @@ -135,15 +143,16 @@ } args.CreationDate = time.Now().UTC() + args.LastUsedDate = time.Unix(0, 0).UTC() - str := fmt.Sprintf("INSERT INTO containers (name, architecture, type, ephemeral, creation_date, stateful) VALUES (?, ?, ?, ?, ?, ?)") + str := fmt.Sprintf("INSERT INTO containers (name, architecture, type, ephemeral, creation_date, last_use_date, stateful) VALUES (?, ?, ?, ?, ?, ?, ?)") stmt, err := tx.Prepare(str) if err != nil { tx.Rollback() return 0, err } defer stmt.Close() - result, err := stmt.Exec(args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix(), statefulInt) + result, err := stmt.Exec(args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix(), args.LastUsedDate.Unix(), statefulInt) if err != nil { tx.Rollback() return 0, err @@ -206,7 +215,7 @@ for k, v := range config { _, err := stmt.Exec(id, k, v) if err != nil { - shared.Debugf("Error adding configuration item %s = %s to container %d", + shared.LogDebugf("Error adding configuration item %s = %s to container %d", k, v, id) return err } @@ -242,7 +251,7 @@ for _, p := range profiles { _, err = stmt.Exec(id, p, applyOrder) if err != nil { - shared.Debugf("Error adding profile %s to container: %s", + shared.LogDebugf("Error adding profile %s to container: %s", p, err) return err } @@ -323,6 +332,43 @@ return ret, nil } +func dbContainerSetState(db *sql.DB, id int, state string) error { + tx, err := dbBegin(db) + if err != nil { + return err + } + + // Clear any existing entry + str := fmt.Sprintf("DELETE FROM containers_config WHERE container_id = ? AND key = 'volatile.last_state.power'") + stmt, err := tx.Prepare(str) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + if _, err := stmt.Exec(id); err != nil { + tx.Rollback() + return err + } + + // Insert the new one + str = fmt.Sprintf("INSERT INTO containers_config (container_id, key, value) VALUES (?, 'volatile.last_state.power', ?)") + stmt, err = tx.Prepare(str) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + if _, err = stmt.Exec(id, state); err != nil { + tx.Rollback() + return err + } + + return txCommit(tx) +} + func dbContainerRename(db *sql.DB, oldName string, newName string) error { tx, err := dbBegin(db) if err != nil { @@ -337,7 +383,7 @@ } defer stmt.Close() - shared.Log.Debug( + shared.LogDebug( "Calling SQL Query", log.Ctx{ "query": "UPDATE containers SET name = ? WHERE name = ?", @@ -371,6 +417,12 @@ return nil } +func dbContainerLastUsedUpdate(db *sql.DB, id int, date time.Time) error { + stmt := `UPDATE containers SET last_use_date=? WHERE id=?` + _, err := dbExec(db, stmt, date, id) + return err +} + func dbContainerGetSnapshots(db *sql.DB, name string) ([]string, error) { result := []string{} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_devices.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_devices.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_devices.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_devices.go 2016-10-13 14:31:53.000000000 +0000 @@ -21,6 +21,8 @@ return "unix-char", nil case 4: return "unix-block", nil + case 5: + return "usb", nil default: return "", fmt.Errorf("Invalid device type %d", t) } @@ -38,6 +40,8 @@ return 3, nil case "unix-block": return 4, nil + case "usb": + return 5, nil default: return -1, fmt.Errorf("Invalid device type %s", t) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db.go 2016-10-13 14:31:53.000000000 +0000 @@ -34,8 +34,6 @@ // Profiles will contain a list of all Profiles. type Profiles []Profile -const DB_CURRENT_VERSION int = 29 - // CURRENT_SCHEMA contains the current SQLite SQL Schema. const CURRENT_SCHEMA string = ` CREATE TABLE IF NOT EXISTS certificates ( @@ -60,6 +58,7 @@ ephemeral INTEGER NOT NULL DEFAULT 0, stateful INTEGER NOT NULL DEFAULT 0, creation_date DATETIME, + last_use_date DATETIME, UNIQUE (name) ); CREATE TABLE IF NOT EXISTS containers_config ( @@ -135,6 +134,25 @@ alias VARCHAR(255) NOT NULL, FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE ); +CREATE TABLE IF NOT EXISTS networks ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + UNIQUE (name) +); +CREATE TABLE IF NOT EXISTS networks_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + network_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + UNIQUE (network_id, key), + FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE +); +CREATE TABLE IF NOT EXISTS patches ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + applied_at DATETIME NOT NULL, + UNIQUE (name) +); CREATE TABLE IF NOT EXISTS profiles ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, name VARCHAR(255) NOT NULL, @@ -187,7 +205,7 @@ // There isn't an entry for schema version, let's put it in. insertStmt := `INSERT INTO schema (version, updated_at) values (?, strftime("%s"));` - _, err = db.Exec(insertStmt, DB_CURRENT_VERSION) + _, err = db.Exec(insertStmt, dbGetLatestSchema()) if err != nil { return err } @@ -211,6 +229,10 @@ return v } +func dbGetLatestSchema() int { + return dbUpdates[len(dbUpdates)-1].version +} + // Create a database connection object and return it. func initializeDbObject(d *Daemon, path string) (err error) { var openPath string @@ -236,13 +258,10 @@ // Run PRAGMA statements now since they are *per-connection*. d.db.Exec("PRAGMA foreign_keys=ON;") // This allows us to use ON DELETE CASCADE - v := dbGetSchema(d.db) - - if v != DB_CURRENT_VERSION { - err = dbUpdate(d, v) - if err != nil { - return err - } + // Apply any update + err = dbUpdatesApplyAll(d) + if err != nil { + return err } return nil @@ -278,13 +297,13 @@ return tx, nil } if !isDbLockedError(err) { - shared.Debugf("DbBegin: error %q", err) + shared.LogDebugf("DbBegin: error %q", err) return nil, err } time.Sleep(100 * time.Millisecond) } - shared.Debugf("DbBegin: DB still locked") + shared.LogDebugf("DbBegin: DB still locked") shared.PrintStack() return nil, fmt.Errorf("DB is locked") } @@ -296,13 +315,13 @@ return nil } if !isDbLockedError(err) { - shared.Debugf("Txcommit: error %q", err) + shared.LogDebugf("Txcommit: error %q", err) return err } time.Sleep(100 * time.Millisecond) } - shared.Debugf("Txcommit: db still locked") + shared.LogDebugf("Txcommit: db still locked") shared.PrintStack() return fmt.Errorf("DB is locked") } @@ -322,7 +341,7 @@ time.Sleep(100 * time.Millisecond) } - shared.Debugf("DbQueryRowScan: query %q args %q, DB still locked", q, args) + shared.LogDebugf("DbQueryRowScan: query %q args %q, DB still locked", q, args) shared.PrintStack() return fmt.Errorf("DB is locked") } @@ -334,13 +353,13 @@ return result, nil } if !isDbLockedError(err) { - shared.Debugf("DbQuery: query %q error %q", q, err) + shared.LogDebugf("DbQuery: query %q error %q", q, err) return nil, err } time.Sleep(100 * time.Millisecond) } - shared.Debugf("DbQuery: query %q args %q, DB still locked", q, args) + shared.LogDebugf("DbQuery: query %q args %q, DB still locked", q, args) shared.PrintStack() return nil, fmt.Errorf("DB is locked") } @@ -362,6 +381,9 @@ case int: integer := 0 ptrargs[i] = &integer + case int64: + integer := int64(0) + ptrargs[i] = &integer default: return [][]interface{}{}, fmt.Errorf("Bad interface type: %s", t) } @@ -377,6 +399,8 @@ newargs[i] = *ptrargs[i].(*string) case int: newargs[i] = *ptrargs[i].(*int) + case int64: + newargs[i] = *ptrargs[i].(*int64) default: return [][]interface{}{}, fmt.Errorf("Bad interface type: %s", t) } @@ -409,13 +433,13 @@ return result, nil } if !isDbLockedError(err) { - shared.Debugf("DbQuery: query %q error %q", q, err) + shared.LogDebugf("DbQuery: query %q error %q", q, err) return nil, err } time.Sleep(100 * time.Millisecond) } - shared.Debugf("DbQueryscan: query %q inargs %q, DB still locked", q, inargs) + shared.LogDebugf("DbQueryscan: query %q inargs %q, DB still locked", q, inargs) shared.PrintStack() return nil, fmt.Errorf("DB is locked") } @@ -427,13 +451,13 @@ return result, nil } if !isDbLockedError(err) { - shared.Debugf("DbExec: query %q error %q", q, err) + shared.LogDebugf("DbExec: query %q error %q", q, err) return nil, err } time.Sleep(100 * time.Millisecond) } - shared.Debugf("DbExec: query %q args %q, DB still locked", q, args) + shared.LogDebugf("DbExec: query %q args %q, DB still locked", q, args) shared.PrintStack() return nil, fmt.Errorf("DB is locked") } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_images.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_images.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_images.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_images.go 2016-10-13 14:31:53.000000000 +0000 @@ -38,7 +38,7 @@ return results, nil } -func dbImagesGetExpired(db *sql.DB, expiry int) ([]string, error) { +func dbImagesGetExpired(db *sql.DB, expiry int64) ([]string, error) { q := `SELECT fingerprint FROM images WHERE cached=1 AND creation_date<=strftime('%s', date('now', '-` + fmt.Sprintf("%d", expiry) + ` day'))` var fp string diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_networks.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_networks.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_networks.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_networks.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,269 @@ +package main + +import ( + "database/sql" + "fmt" + "strings" + + _ "github.com/mattn/go-sqlite3" + + "github.com/lxc/lxd/shared" +) + +func dbNetworks(db *sql.DB) ([]string, error) { + q := fmt.Sprintf("SELECT name FROM networks") + inargs := []interface{}{} + var name string + outfmt := []interface{}{name} + result, err := dbQueryScan(db, q, inargs, outfmt) + if err != nil { + return []string{}, err + } + + response := []string{} + for _, r := range result { + response = append(response, r[0].(string)) + } + + return response, nil +} + +func dbNetworkGet(db *sql.DB, network string) (int64, *shared.NetworkConfig, error) { + id := int64(-1) + + q := "SELECT id FROM networks WHERE name=?" + arg1 := []interface{}{network} + arg2 := []interface{}{&id} + err := dbQueryRowScan(db, q, arg1, arg2) + if err != nil { + return -1, nil, err + } + + config, err := dbNetworkConfigGet(db, id) + if err != nil { + return -1, nil, err + } + + return id, &shared.NetworkConfig{ + Name: network, + Managed: true, + Type: "bridge", + Config: config, + }, nil +} + +func dbNetworkGetInterface(db *sql.DB, devName string) (int64, *shared.NetworkConfig, error) { + id := int64(-1) + name := "" + value := "" + + q := "SELECT networks.id, networks.name, networks_config.value FROM networks LEFT JOIN networks_config ON networks.id=networks_config.network_id WHERE networks_config.key=\"bridge.external_interfaces\"" + arg1 := []interface{}{} + arg2 := []interface{}{id, name, value} + result, err := dbQueryScan(db, q, arg1, arg2) + if err != nil { + return -1, nil, err + } + + for _, r := range result { + for _, entry := range strings.Split(r[2].(string), ",") { + entry = strings.TrimSpace(entry) + + if entry == devName { + id = r[0].(int64) + name = r[1].(string) + } + } + } + + if id == -1 { + return -1, nil, fmt.Errorf("No network found for interface: %s", devName) + } + + config, err := dbNetworkConfigGet(db, id) + if err != nil { + return -1, nil, err + } + + return id, &shared.NetworkConfig{ + Name: name, + Managed: true, + Type: "bridge", + Config: config, + }, nil +} + +func dbNetworkConfigGet(db *sql.DB, id int64) (map[string]string, error) { + var key, value string + query := ` + SELECT + key, value + FROM networks_config + WHERE network_id=?` + inargs := []interface{}{id} + outfmt := []interface{}{key, value} + results, err := dbQueryScan(db, query, inargs, outfmt) + if err != nil { + return nil, fmt.Errorf("Failed to get network '%d'", id) + } + + if len(results) == 0 { + /* + * If we didn't get any rows here, let's check to make sure the + * network really exists; if it doesn't, let's send back a 404. + */ + query := "SELECT id FROM networks WHERE id=?" + var r int + results, err := dbQueryScan(db, query, []interface{}{id}, []interface{}{r}) + if err != nil { + return nil, err + } + + if len(results) == 0 { + return nil, NoSuchObjectError + } + } + + config := map[string]string{} + + for _, r := range results { + key = r[0].(string) + value = r[1].(string) + + config[key] = value + } + + return config, nil +} + +func dbNetworkCreate(db *sql.DB, name string, config map[string]string) (int64, error) { + tx, err := dbBegin(db) + if err != nil { + return -1, err + } + + result, err := tx.Exec("INSERT INTO networks (name) VALUES (?)", name) + if err != nil { + tx.Rollback() + return -1, err + } + + id, err := result.LastInsertId() + if err != nil { + tx.Rollback() + return -1, err + } + + err = dbNetworkConfigAdd(tx, id, config) + if err != nil { + tx.Rollback() + return -1, err + } + + err = txCommit(tx) + if err != nil { + return -1, err + } + + return id, nil +} + +func dbNetworkUpdate(db *sql.DB, name string, config map[string]string) error { + id, _, err := dbNetworkGet(db, name) + if err != nil { + return err + } + + tx, err := dbBegin(db) + if err != nil { + return err + } + + err = dbNetworkConfigClear(tx, id) + if err != nil { + tx.Rollback() + return err + } + + err = dbNetworkConfigAdd(tx, id, config) + if err != nil { + tx.Rollback() + return err + } + + return txCommit(tx) +} + +func dbNetworkConfigAdd(tx *sql.Tx, id int64, config map[string]string) error { + str := fmt.Sprintf("INSERT INTO networks_config (network_id, key, value) VALUES(?, ?, ?)") + stmt, err := tx.Prepare(str) + defer stmt.Close() + + for k, v := range config { + if v == "" { + continue + } + + _, err = stmt.Exec(id, k, v) + if err != nil { + return err + } + } + + return nil +} + +func dbNetworkConfigClear(tx *sql.Tx, id int64) error { + _, err := tx.Exec("DELETE FROM networks_config WHERE network_id=?", id) + if err != nil { + return err + } + + return nil +} + +func dbNetworkDelete(db *sql.DB, name string) error { + id, _, err := dbNetworkGet(db, name) + if err != nil { + return err + } + + tx, err := dbBegin(db) + if err != nil { + return err + } + + _, err = tx.Exec("DELETE FROM networks WHERE id=?", id) + if err != nil { + tx.Rollback() + return err + } + + err = dbNetworkConfigClear(tx, id) + if err != nil { + tx.Rollback() + return err + } + + return txCommit(tx) +} + +func dbNetworkRename(db *sql.DB, oldName string, newName string) error { + id, _, err := dbNetworkGet(db, oldName) + if err != nil { + return err + } + + tx, err := dbBegin(db) + if err != nil { + return err + } + + _, err = tx.Exec("UPDATE networks SET name=? WHERE id=?", newName, id) + if err != nil { + tx.Rollback() + return err + } + + return txCommit(tx) +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_patches.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_patches.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_patches.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_patches.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,30 @@ +package main + +import ( + "database/sql" + "fmt" +) + +func dbPatches(db *sql.DB) ([]string, error) { + inargs := []interface{}{} + outfmt := []interface{}{""} + + query := fmt.Sprintf("SELECT name FROM patches") + result, err := dbQueryScan(db, query, inargs, outfmt) + if err != nil { + return []string{}, err + } + + response := []string{} + for _, r := range result { + response = append(response, r[0].(string)) + } + + return response, nil +} + +func dbPatchesMarkApplied(db *sql.DB, patch string) error { + stmt := `INSERT INTO patches (name, applied_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, patch) + return err +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_profiles.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_profiles.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_profiles.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_profiles.go 2016-10-13 14:31:53.000000000 +0000 @@ -104,14 +104,7 @@ return nil } - // TODO: We should scan for bridges and use the best available as default. - devices := shared.Devices{ - "eth0": shared.Device{ - "name": "eth0", - "type": "nic", - "nictype": "bridged", - "parent": "lxcbr0"}} - id, err := dbProfileCreate(db, "default", "Default LXD profile", map[string]string{}, devices) + id, err := dbProfileCreate(db, "default", "Default LXD profile", map[string]string{}, shared.Devices{}) if err != nil { return err } @@ -130,16 +123,12 @@ config := map[string]string{ "security.nesting": "true", "linux.kernel_modules": "overlay, nf_nat"} - fusedev := map[string]string{ - "path": "/dev/fuse", - "type": "unix-char", - } aadisable := map[string]string{ "path": "/sys/module/apparmor/parameters/enabled", "type": "disk", "source": "/dev/null", } - devices := map[string]shared.Device{"fuse": fusedev, "aadisable": aadisable} + devices := map[string]shared.Device{"aadisable": aadisable} _, err = dbProfileCreate(db, "docker", "Profile supporting docker in containers", config, devices) return err @@ -191,19 +180,33 @@ } func dbProfileDelete(db *sql.DB, name string) error { + id, _, err := dbProfileGet(db, name) + if err != nil { + return err + } + tx, err := dbBegin(db) if err != nil { return err } - _, err = tx.Exec("DELETE FROM profiles WHERE name=?", name) + + _, err = tx.Exec("DELETE FROM profiles WHERE id=?", id) if err != nil { tx.Rollback() return err } - err = txCommit(tx) + err = dbProfileConfigClear(tx, id) + if err != nil { + return err + } + + _, err = tx.Exec("DELETE FROM containers_profiles WHERE profile_id=?", id) + if err != nil { + return err + } - return err + return txCommit(tx) } func dbProfileUpdate(db *sql.DB, name string, newName string) error { diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_test.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_test.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_test.go 2016-10-13 14:31:53.000000000 +0000 @@ -22,7 +22,7 @@ INSERT INTO images_properties (image_id, type, key, value) VALUES (1, 0, 'thekey', 'some value'); INSERT INTO profiles_config (profile_id, key, value) VALUES (3, 'thekey', 'thevalue'); INSERT INTO profiles_devices (profile_id, name, type) VALUES (3, 'devicename', 1); - INSERT INTO profiles_devices_config (profile_device_id, key, value) VALUES (4, 'devicekey', 'devicevalue'); + INSERT INTO profiles_devices_config (profile_device_id, key, value) VALUES (2, 'devicekey', 'devicevalue'); ` // This Helper will initialize a test in-memory DB. @@ -262,7 +262,7 @@ } // Run the upgrade from V6 code - err = dbUpdateFromV6(d.db) + err = dbUpdateFromV6(5, 6, d) // Make sure the inserted data is still there. statements = `SELECT count(*) FROM containers_config;` @@ -374,16 +374,17 @@ // Let's run the schema upgrades. d := &Daemon{MockMode: true} d.db = db - err = dbUpdate(d, 1) + daemonConfigInit(db) + err = dbUpdatesApplyAll(d) if err != nil { t.Error("Error upgrading database schema!") t.Fatal(err) } result := dbGetSchema(db) - if result != DB_CURRENT_VERSION { - t.Fatal(fmt.Sprintf("The schema is not at the latest version after update! Found: %d, should be: %d", result, DB_CURRENT_VERSION)) + if result != dbGetLatestSchema() { + t.Fatal(fmt.Sprintf("The schema is not at the latest version after update! Found: %d, should be: %d", result, dbGetLatestSchema())) } // Make sure there are 0 containers_config entries left. diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_update.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_update.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/db_update.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/db_update.go 2016-10-13 14:31:53.000000000 +0000 @@ -1,41 +1,239 @@ package main import ( - "database/sql" "encoding/hex" "fmt" + "io/ioutil" "os" "os/exec" "path/filepath" "strconv" "strings" + "syscall" "github.com/lxc/lxd/shared" log "gopkg.in/inconshreveable/log15.v2" ) -func dbUpdateFromV28(db *sql.DB) error { +/* Database updates are one-time actions that are needed to move an + existing database from one version of the schema to the next. + + Those updates are applied at startup time before anything else in LXD + is initialized. This means that they should be entirely + self-contained and not touch anything but the database. + + Calling LXD functions isn't allowed as such functions may themselves + depend on a newer DB schema and so would fail when upgrading a very old + version of LXD. + + DO NOT USE this mechanism for one-time actions which do not involve + changes to the database schema. Use patches instead. + + Only append to the updates list, never remove entries and never re-order them. +*/ + +var dbUpdates = []dbUpdate{ + dbUpdate{version: 1, run: dbUpdateFromV0}, + dbUpdate{version: 2, run: dbUpdateFromV1}, + dbUpdate{version: 3, run: dbUpdateFromV2}, + dbUpdate{version: 4, run: dbUpdateFromV3}, + dbUpdate{version: 5, run: dbUpdateFromV4}, + dbUpdate{version: 6, run: dbUpdateFromV5}, + dbUpdate{version: 7, run: dbUpdateFromV6}, + dbUpdate{version: 8, run: dbUpdateFromV7}, + dbUpdate{version: 9, run: dbUpdateFromV8}, + dbUpdate{version: 10, run: dbUpdateFromV9}, + dbUpdate{version: 11, run: dbUpdateFromV10}, + dbUpdate{version: 12, run: dbUpdateFromV11}, + dbUpdate{version: 13, run: dbUpdateFromV12}, + dbUpdate{version: 14, run: dbUpdateFromV13}, + dbUpdate{version: 15, run: dbUpdateFromV14}, + dbUpdate{version: 16, run: dbUpdateFromV15}, + dbUpdate{version: 17, run: dbUpdateFromV16}, + dbUpdate{version: 18, run: dbUpdateFromV17}, + dbUpdate{version: 19, run: dbUpdateFromV18}, + dbUpdate{version: 20, run: dbUpdateFromV19}, + dbUpdate{version: 21, run: dbUpdateFromV20}, + dbUpdate{version: 22, run: dbUpdateFromV21}, + dbUpdate{version: 23, run: dbUpdateFromV22}, + dbUpdate{version: 24, run: dbUpdateFromV23}, + dbUpdate{version: 25, run: dbUpdateFromV24}, + dbUpdate{version: 26, run: dbUpdateFromV25}, + dbUpdate{version: 27, run: dbUpdateFromV26}, + dbUpdate{version: 28, run: dbUpdateFromV27}, + dbUpdate{version: 29, run: dbUpdateFromV28}, + dbUpdate{version: 30, run: dbUpdateFromV29}, + dbUpdate{version: 31, run: dbUpdateFromV30}, + dbUpdate{version: 32, run: dbUpdateFromV31}, + dbUpdate{version: 33, run: dbUpdateFromV32}, + dbUpdate{version: 34, run: dbUpdateFromV33}, +} + +type dbUpdate struct { + version int + run func(previousVersion int, version int, d *Daemon) error +} + +func (u *dbUpdate) apply(currentVersion int, d *Daemon) error { + // Get the current schema version + + shared.LogDebugf("Updating DB schema from %d to %d", currentVersion, u.version) + + err := u.run(currentVersion, u.version, d) + if err != nil { + return err + } + + _, err = d.db.Exec("INSERT INTO schema (version, updated_at) VALUES (?, strftime(\"%s\"));", u.version) + if err != nil { + return err + } + + return nil +} + +func dbUpdatesApplyAll(d *Daemon) error { + currentVersion := dbGetSchema(d.db) + + backup := false + for _, update := range dbUpdates { + if update.version <= currentVersion { + continue + } + + if !d.MockMode && !backup { + shared.LogInfof("Updating the LXD database schema. Backup made as \"lxd.db.bak\"") + err := shared.FileCopy(shared.VarPath("lxd.db"), shared.VarPath("lxd.db.bak")) + if err != nil { + return err + } + + backup = true + } + + err := update.apply(currentVersion, d) + if err != nil { + return err + } + + currentVersion = update.version + } + + return nil +} + +// Schema updates begin here +func dbUpdateFromV33(currentVersion int, version int, d *Daemon) error { + stmt := ` +CREATE TABLE IF NOT EXISTS networks ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + UNIQUE (name) +); +CREATE TABLE IF NOT EXISTS networks_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + network_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + UNIQUE (network_id, key), + FOREIGN KEY (network_id) REFERENCES networks (id) ON DELETE CASCADE +);` + _, err := d.db.Exec(stmt) + return err +} + +func dbUpdateFromV32(currentVersion int, version int, d *Daemon) error { + _, err := d.db.Exec("ALTER TABLE containers ADD COLUMN last_use_date DATETIME;") + return err +} + +func dbUpdateFromV31(currentVersion int, version int, d *Daemon) error { + stmt := ` +CREATE TABLE IF NOT EXISTS patches ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + applied_at DATETIME NOT NULL, + UNIQUE (name) +);` + _, err := d.db.Exec(stmt) + return err +} + +func dbUpdateFromV30(currentVersion int, version int, d *Daemon) error { + if d.MockMode { + return nil + } + + entries, err := ioutil.ReadDir(shared.VarPath("containers")) + if err != nil { + /* If the directory didn't exist before, the user had never + * started containers, so we don't need to fix up permissions + * on anything. + */ + if os.IsNotExist(err) { + return nil + } + return err + } + + for _, entry := range entries { + if !shared.IsDir(shared.VarPath("containers", entry.Name(), "rootfs")) { + continue + } + + info, err := os.Stat(shared.VarPath("containers", entry.Name(), "rootfs")) + if err != nil { + return err + } + + if int(info.Sys().(*syscall.Stat_t).Uid) == 0 { + err := os.Chmod(shared.VarPath("containers", entry.Name()), 0700) + if err != nil { + return err + } + + err = os.Chown(shared.VarPath("containers", entry.Name()), 0, 0) + if err != nil { + return err + } + } + } + + return nil +} + +func dbUpdateFromV29(currentVersion int, version int, d *Daemon) error { + if d.MockMode { + return nil + } + + if shared.PathExists(shared.VarPath("zfs.img")) { + err := os.Chmod(shared.VarPath("zfs.img"), 0600) + if err != nil { + return err + } + } + + return nil +} + +func dbUpdateFromV28(currentVersion int, version int, d *Daemon) error { stmt := ` INSERT INTO profiles_devices (profile_id, name, type) SELECT id, "aadisable", 2 FROM profiles WHERE name="docker"; INSERT INTO profiles_devices_config (profile_device_id, key, value) SELECT profiles_devices.id, "source", "/dev/null" FROM profiles_devices LEFT JOIN profiles WHERE profiles_devices.profile_id = profiles.id AND profiles.name = "docker" AND profiles_devices.name = "aadisable"; INSERT INTO profiles_devices_config (profile_device_id, key, value) SELECT profiles_devices.id, "path", "/sys/module/apparmor/parameters/enabled" FROM profiles_devices LEFT JOIN profiles WHERE profiles_devices.profile_id = profiles.id AND profiles.name = "docker" AND profiles_devices.name = "aadisable";` - db.Exec(stmt) + d.db.Exec(stmt) - stmt = `INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 29) - return err + return nil } -func dbUpdateFromV27(db *sql.DB) error { - stmt := ` -UPDATE profiles_devices SET type=3 WHERE type='unix-char'; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 28) +func dbUpdateFromV27(currentVersion int, version int, d *Daemon) error { + _, err := d.db.Exec("UPDATE profiles_devices SET type=3 WHERE type='unix-char';") return err } -func dbUpdateFromV26(db *sql.DB) error { +func dbUpdateFromV26(currentVersion int, version int, d *Daemon) error { stmt := ` ALTER TABLE images ADD COLUMN auto_update INTEGER NOT NULL DEFAULT 0; CREATE TABLE IF NOT EXISTS images_source ( @@ -46,92 +244,76 @@ certificate TEXT NOT NULL, alias VARCHAR(255) NOT NULL, FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE -); -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 27) +);` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV25(db *sql.DB) error { +func dbUpdateFromV25(currentVersion int, version int, d *Daemon) error { stmt := ` INSERT INTO profiles (name, description) VALUES ("docker", "Profile supporting docker in containers"); INSERT INTO profiles_config (profile_id, key, value) SELECT id, "security.nesting", "true" FROM profiles WHERE name="docker"; INSERT INTO profiles_config (profile_id, key, value) SELECT id, "linux.kernel_modules", "overlay, nf_nat" FROM profiles WHERE name="docker"; INSERT INTO profiles_devices (profile_id, name, type) SELECT id, "fuse", "unix-char" FROM profiles WHERE name="docker"; INSERT INTO profiles_devices_config (profile_device_id, key, value) SELECT profiles_devices.id, "path", "/dev/fuse" FROM profiles_devices LEFT JOIN profiles WHERE profiles_devices.profile_id = profiles.id AND profiles.name = "docker";` - db.Exec(stmt) + d.db.Exec(stmt) - stmt = `INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 26) - return err + return nil } -func dbUpdateFromV24(db *sql.DB) error { - stmt := ` -ALTER TABLE containers ADD COLUMN stateful INTEGER NOT NULL DEFAULT 0; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 25) +func dbUpdateFromV24(currentVersion int, version int, d *Daemon) error { + _, err := d.db.Exec("ALTER TABLE containers ADD COLUMN stateful INTEGER NOT NULL DEFAULT 0;") return err } -func dbUpdateFromV23(db *sql.DB) error { - stmt := ` -ALTER TABLE profiles ADD COLUMN description TEXT; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 24) +func dbUpdateFromV23(currentVersion int, version int, d *Daemon) error { + _, err := d.db.Exec("ALTER TABLE profiles ADD COLUMN description TEXT;") return err } -func dbUpdateFromV22(db *sql.DB) error { +func dbUpdateFromV22(currentVersion int, version int, d *Daemon) error { stmt := ` DELETE FROM containers_devices_config WHERE key='type'; -DELETE FROM profiles_devices_config WHERE key='type'; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 23) +DELETE FROM profiles_devices_config WHERE key='type';` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV21(db *sql.DB) error { - stmt := ` -ALTER TABLE containers ADD COLUMN creation_date DATETIME NOT NULL DEFAULT 0; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 22) +func dbUpdateFromV21(currentVersion int, version int, d *Daemon) error { + _, err := d.db.Exec("ALTER TABLE containers ADD COLUMN creation_date DATETIME NOT NULL DEFAULT 0;") return err } -func dbUpdateFromV20(db *sql.DB) error { +func dbUpdateFromV20(currentVersion int, version int, d *Daemon) error { stmt := ` UPDATE containers_devices SET name='__lxd_upgrade_root' WHERE name='root'; UPDATE profiles_devices SET name='__lxd_upgrade_root' WHERE name='root'; INSERT INTO containers_devices (container_id, name, type) SELECT id, "root", 2 FROM containers; -INSERT INTO containers_devices_config (container_device_id, key, value) SELECT id, "path", "/" FROM containers_devices WHERE name='root'; - -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 21) +INSERT INTO containers_devices_config (container_device_id, key, value) SELECT id, "path", "/" FROM containers_devices WHERE name='root';` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV19(db *sql.DB) error { +func dbUpdateFromV19(currentVersion int, version int, d *Daemon) error { stmt := ` DELETE FROM containers_config WHERE container_id NOT IN (SELECT id FROM containers); DELETE FROM containers_devices_config WHERE container_device_id NOT IN (SELECT id FROM containers_devices WHERE container_id IN (SELECT id FROM containers)); DELETE FROM containers_devices WHERE container_id NOT IN (SELECT id FROM containers); DELETE FROM containers_profiles WHERE container_id NOT IN (SELECT id FROM containers); DELETE FROM images_aliases WHERE image_id NOT IN (SELECT id FROM images); -DELETE FROM images_properties WHERE image_id NOT IN (SELECT id FROM images); -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 20) +DELETE FROM images_properties WHERE image_id NOT IN (SELECT id FROM images);` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV18(db *sql.DB) error { +func dbUpdateFromV18(currentVersion int, version int, d *Daemon) error { var id int var value string // Update container config - rows, err := dbQueryScan(db, "SELECT id, value FROM containers_config WHERE key='limits.memory'", nil, []interface{}{id, value}) + rows, err := dbQueryScan(d.db, "SELECT id, value FROM containers_config WHERE key='limits.memory'", nil, []interface{}{id, value}) if err != nil { return err } @@ -153,22 +335,22 @@ // Deal with completely broken values _, err = shared.ParseByteSizeString(value) if err != nil { - shared.Debugf("Invalid container memory limit, id=%d value=%s, removing.", id, value) - _, err = db.Exec("DELETE FROM containers_config WHERE id=?;", id) + shared.LogDebugf("Invalid container memory limit, id=%d value=%s, removing.", id, value) + _, err = d.db.Exec("DELETE FROM containers_config WHERE id=?;", id) if err != nil { return err } } // Set the new value - _, err = db.Exec("UPDATE containers_config SET value=? WHERE id=?", value, id) + _, err = d.db.Exec("UPDATE containers_config SET value=? WHERE id=?", value, id) if err != nil { return err } } // Update profiles config - rows, err = dbQueryScan(db, "SELECT id, value FROM profiles_config WHERE key='limits.memory'", nil, []interface{}{id, value}) + rows, err = dbQueryScan(d.db, "SELECT id, value FROM profiles_config WHERE key='limits.memory'", nil, []interface{}{id, value}) if err != nil { return err } @@ -190,44 +372,41 @@ // Deal with completely broken values _, err = shared.ParseByteSizeString(value) if err != nil { - shared.Debugf("Invalid profile memory limit, id=%d value=%s, removing.", id, value) - _, err = db.Exec("DELETE FROM profiles_config WHERE id=?;", id) + shared.LogDebugf("Invalid profile memory limit, id=%d value=%s, removing.", id, value) + _, err = d.db.Exec("DELETE FROM profiles_config WHERE id=?;", id) if err != nil { return err } } // Set the new value - _, err = db.Exec("UPDATE profiles_config SET value=? WHERE id=?", value, id) + _, err = d.db.Exec("UPDATE profiles_config SET value=? WHERE id=?", value, id) if err != nil { return err } } - _, err = db.Exec("INSERT INTO schema (version, updated_at) VALUES (?, strftime(\"%s\"));", 19) - return err + return nil } -func dbUpdateFromV17(db *sql.DB) error { +func dbUpdateFromV17(currentVersion int, version int, d *Daemon) error { stmt := ` DELETE FROM profiles_config WHERE key LIKE 'volatile.%'; UPDATE containers_config SET key='limits.cpu' WHERE key='limits.cpus'; -UPDATE profiles_config SET key='limits.cpu' WHERE key='limits.cpus'; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 18) +UPDATE profiles_config SET key='limits.cpu' WHERE key='limits.cpus';` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV16(db *sql.DB) error { +func dbUpdateFromV16(currentVersion int, version int, d *Daemon) error { stmt := ` UPDATE config SET key='storage.lvm_vg_name' WHERE key = 'core.lvm_vg_name'; -UPDATE config SET key='storage.lvm_thinpool_name' WHERE key = 'core.lvm_thinpool_name'; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 17) +UPDATE config SET key='storage.lvm_thinpool_name' WHERE key = 'core.lvm_thinpool_name';` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV15(d *Daemon) error { +func dbUpdateFromV15(currentVersion int, version int, d *Daemon) error { // munge all LVM-backed containers' LV names to match what is // required for snapshot support @@ -236,11 +415,13 @@ return err } - vgName, err := d.ConfigValueGet("storage.lvm_vg_name") + err = daemonConfigInit(d.db) if err != nil { - return fmt.Errorf("Error checking server config: %v", err) + return err } + vgName := daemonConfig["storage.lvm_vg_name"].Get() + for _, cName := range cNames { var lvLinkPath string if strings.Contains(cName, shared.SnapshotDelimiter) { @@ -257,11 +438,11 @@ newLVName = strings.Replace(newLVName, shared.SnapshotDelimiter, "-", -1) if cName == newLVName { - shared.Log.Debug("No need to rename, skipping", log.Ctx{"cName": cName, "newLVName": newLVName}) + shared.LogDebug("No need to rename, skipping", log.Ctx{"cName": cName, "newLVName": newLVName}) continue } - shared.Log.Debug("About to rename cName in lv upgrade", log.Ctx{"lvLinkPath": lvLinkPath, "cName": cName, "newLVName": newLVName}) + shared.LogDebug("About to rename cName in lv upgrade", log.Ctx{"lvLinkPath": lvLinkPath, "cName": cName, "newLVName": newLVName}) output, err := exec.Command("lvrename", vgName, cName, newLVName).CombinedOutput() if err != nil { @@ -276,13 +457,11 @@ return fmt.Errorf("Couldn't recreate symlink '%s'->'%s'", lvLinkPath, newLinkDest) } } - stmt := ` -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err = d.db.Exec(stmt, 16) - return err + + return nil } -func dbUpdateFromV14(db *sql.DB) error { +func dbUpdateFromV14(currentVersion int, version int, d *Daemon) error { stmt := ` PRAGMA foreign_keys=OFF; -- So that integrity doesn't get in the way for now @@ -310,30 +489,27 @@ DROP TABLE containers; ALTER TABLE tmp RENAME TO containers; -PRAGMA foreign_keys=ON; -- Make sure we turn integrity checks back on. -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 15) +PRAGMA foreign_keys=ON; -- Make sure we turn integrity checks back on.` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV13(db *sql.DB) error { +func dbUpdateFromV13(currentVersion int, version int, d *Daemon) error { stmt := ` -UPDATE containers_config SET key='volatile.base_image' WHERE key = 'volatile.baseImage'; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 14) +UPDATE containers_config SET key='volatile.base_image' WHERE key = 'volatile.baseImage';` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV12(db *sql.DB) error { +func dbUpdateFromV12(currentVersion int, version int, d *Daemon) error { stmt := ` ALTER TABLE images ADD COLUMN cached INTEGER NOT NULL DEFAULT 0; -ALTER TABLE images ADD COLUMN last_use_date DATETIME; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 13) +ALTER TABLE images ADD COLUMN last_use_date DATETIME;` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV11(d *Daemon) error { +func dbUpdateFromV11(currentVersion int, version int, d *Daemon) error { if d.MockMode { // No need to move snapshots no mock runs, // dbUpdateFromV12 will then set the db version to 13 @@ -352,7 +528,7 @@ oldPath := shared.VarPath("containers", snappieces[0], "snapshots", snappieces[1]) newPath := shared.VarPath("snapshots", snappieces[0], snappieces[1]) if shared.PathExists(oldPath) && !shared.PathExists(newPath) { - shared.Log.Info( + shared.LogInfo( "Moving snapshot", log.Ctx{ "snapshot": cName, @@ -365,7 +541,7 @@ // snapshots// output, err := storageRsyncCopy(oldPath, newPath) if err != nil { - shared.Log.Error( + shared.LogError( "Failed rsync snapshot", log.Ctx{ "snapshot": cName, @@ -377,7 +553,7 @@ // Remove containers//snapshots/ if err := os.RemoveAll(oldPath); err != nil { - shared.Log.Error( + shared.LogError( "Failed to remove the old snapshot path", log.Ctx{ "snapshot": cName, @@ -404,14 +580,10 @@ return fmt.Errorf("Got errors while moving snapshots, see the log output.") } - stmt := ` -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err = d.db.Exec(stmt, 12) - - return err + return nil } -func dbUpdateFromV10(d *Daemon) error { +func dbUpdateFromV10(currentVersion int, version int, d *Daemon) error { if d.MockMode { // No need to move lxc to containers in mock runs, // dbUpdateFromV12 will then set the db version to 13 @@ -424,18 +596,15 @@ return err } - shared.Debugf("Restarting all the containers following directory rename") + shared.LogDebugf("Restarting all the containers following directory rename") containersShutdown(d) containersRestart(d) } - stmt := ` -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := d.db.Exec(stmt, 11) - return err + return nil } -func dbUpdateFromV9(db *sql.DB) error { +func dbUpdateFromV9(currentVersion int, version int, d *Daemon) error { stmt := ` CREATE TABLE tmp ( id INTEGER primary key AUTOINCREMENT NOT NULL, @@ -473,30 +642,27 @@ UPDATE profiles_devices SET type=3 WHERE id IN (SELECT id FROM tmp WHERE type="unix-char"); UPDATE profiles_devices SET type=4 WHERE id IN (SELECT id FROM tmp WHERE type="unix-block"); -DROP TABLE tmp; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 10) +DROP TABLE tmp;` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV8(db *sql.DB) error { +func dbUpdateFromV8(currentVersion int, version int, d *Daemon) error { stmt := ` -UPDATE certificates SET fingerprint = replace(fingerprint, " ", ""); -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 9) +UPDATE certificates SET fingerprint = replace(fingerprint, " ", "");` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV7(db *sql.DB) error { +func dbUpdateFromV7(currentVersion int, version int, d *Daemon) error { stmt := ` UPDATE config SET key='core.trust_password' WHERE key IN ('password', 'trust_password', 'trust-password', 'core.trust-password'); -DELETE FROM config WHERE key != 'core.trust_password'; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 8) +DELETE FROM config WHERE key != 'core.trust_password';` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV6(db *sql.DB) error { +func dbUpdateFromV6(currentVersion int, version int, d *Daemon) error { // This update recreates the schemas that need an ON DELETE CASCADE foreign // key. stmt := ` @@ -620,15 +786,14 @@ INSERT INTO profiles_devices_config SELECT * FROM tmp; DROP TABLE tmp; -PRAGMA foreign_keys=ON; -- Make sure we turn integrity checks back on. -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 7) +PRAGMA foreign_keys=ON; -- Make sure we turn integrity checks back on.` + _, err := d.db.Exec(stmt) if err != nil { return err } // Get the rows with broken foreign keys an nuke them - rows, err := db.Query("PRAGMA foreign_key_check;") + rows, err := d.db.Query("PRAGMA foreign_key_check;") if err != nil { return err } @@ -650,7 +815,7 @@ rows.Close() for i := range tablestodelete { - _, err = db.Exec(fmt.Sprintf("DELETE FROM %s WHERE rowid = %d;", tablestodelete[i], rowidtodelete[i])) + _, err = d.db.Exec(fmt.Sprintf("DELETE FROM %s WHERE rowid = %d;", tablestodelete[i], rowidtodelete[i])) if err != nil { return err } @@ -659,25 +824,24 @@ return err } -func dbUpdateFromV5(db *sql.DB) error { +func dbUpdateFromV5(currentVersion int, version int, d *Daemon) error { stmt := ` ALTER TABLE containers ADD COLUMN power_state INTEGER NOT NULL DEFAULT 0; -ALTER TABLE containers ADD COLUMN ephemeral INTEGER NOT NULL DEFAULT 0; -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 6) +ALTER TABLE containers ADD COLUMN ephemeral INTEGER NOT NULL DEFAULT 0;` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV4(db *sql.DB) error { +func dbUpdateFromV4(currentVersion int, version int, d *Daemon) error { stmt := ` CREATE TABLE IF NOT EXISTS config ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, key VARCHAR(255) NOT NULL, value TEXT, UNIQUE (key) -); -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 5) +);` + + _, err := d.db.Exec(stmt) if err != nil { return err } @@ -696,7 +860,7 @@ oldPassword = hex.EncodeToString(buff) stmt := `INSERT INTO config (key, value) VALUES ("core.trust_password", ?);` - _, err := db.Exec(stmt, oldPassword) + _, err := d.db.Exec(stmt, oldPassword) if err != nil { return err } @@ -707,21 +871,14 @@ return nil } -func dbUpdateFromV3(db *sql.DB) error { +func dbUpdateFromV3(currentVersion int, version int, d *Daemon) error { // Attempt to create a default profile (but don't fail if already there) - stmt := `INSERT INTO profiles (name) VALUES ("default"); -INSERT INTO profiles_devices (profile_id, name, type) SELECT id, "eth0", "nic" FROM profiles WHERE profiles.name="default"; -INSERT INTO profiles_devices_config (profile_device_id, key, value) SELECT profiles_devices.id, "nictype", "bridged" FROM profiles_devices LEFT JOIN profiles ON profiles.id=profiles_devices.profile_id WHERE profiles.name == "default"; -INSERT INTO profiles_devices_config (profile_device_id, key, value) SELECT profiles_devices.id, 'name', "eth0" FROM profiles_devices LEFT JOIN profiles ON profiles.id=profiles_devices.profile_id WHERE profiles.name == "default"; -INSERT INTO profiles_devices_config (profile_device_id, key, value) SELECT profiles_devices.id, "parent", "lxcbr0" FROM profiles_devices LEFT JOIN profiles ON profiles.id=profiles_devices.profile_id WHERE profiles.name == "default";` - db.Exec(stmt) + d.db.Exec("INSERT INTO profiles (name) VALUES (\"default\");") - stmt = `INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 4) - return err + return nil } -func dbUpdateFromV2(db *sql.DB) error { +func dbUpdateFromV2(currentVersion int, version int, d *Daemon) error { stmt := ` CREATE TABLE IF NOT EXISTS containers_devices ( id INTEGER primary key AUTOINCREMENT NOT NULL, @@ -776,14 +933,12 @@ value TEXT, UNIQUE (profile_device_id, key), FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) -); -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 3) +);` + _, err := d.db.Exec(stmt) return err } -/* Yeah we can do htis in a more clever way */ -func dbUpdateFromV1(db *sql.DB) error { +func dbUpdateFromV1(currentVersion int, version int, d *Daemon) error { // v1..v2 adds images aliases stmt := ` CREATE TABLE IF NOT EXISTS images_aliases ( @@ -793,13 +948,12 @@ description VARCHAR(255), FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE, UNIQUE (name) -); -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 2) +);` + _, err := d.db.Exec(stmt) return err } -func dbUpdateFromV0(db *sql.DB) error { +func dbUpdateFromV0(currentVersion int, version int, d *Daemon) error { // v0..v1 adds schema table stmt := ` CREATE TABLE IF NOT EXISTS schema ( @@ -807,196 +961,7 @@ version INTEGER NOT NULL, updated_at DATETIME NOT NULL, UNIQUE (version) -); -INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` - _, err := db.Exec(stmt, 1) +);` + _, err := d.db.Exec(stmt) return err } - -func dbUpdate(d *Daemon, prevVersion int) error { - db := d.db - - if prevVersion < 0 || prevVersion > DB_CURRENT_VERSION { - return fmt.Errorf("Bad database version: %d", prevVersion) - } - if prevVersion == DB_CURRENT_VERSION { - return nil - } - var err error - if prevVersion < 1 { - err = dbUpdateFromV0(db) - if err != nil { - return err - } - } - if prevVersion < 2 { - err = dbUpdateFromV1(db) - if err != nil { - return err - } - } - if prevVersion < 3 { - err = dbUpdateFromV2(db) - if err != nil { - return err - } - } - if prevVersion < 4 { - err = dbUpdateFromV3(db) - if err != nil { - return err - } - } - if prevVersion < 5 { - err = dbUpdateFromV4(db) - if err != nil { - return err - } - } - if prevVersion < 6 { - err = dbUpdateFromV5(db) - if err != nil { - return err - } - } - if prevVersion < 7 { - err = dbUpdateFromV6(db) - if err != nil { - return err - } - } - if prevVersion < 8 { - err = dbUpdateFromV7(db) - if err != nil { - return err - } - } - if prevVersion < 9 { - err = dbUpdateFromV8(db) - if err != nil { - return err - } - } - if prevVersion < 10 { - err = dbUpdateFromV9(db) - if err != nil { - return err - } - } - if prevVersion < 11 { - err = dbUpdateFromV10(d) - if err != nil { - return err - } - } - if prevVersion < 12 { - err = dbUpdateFromV11(d) - if err != nil { - return err - } - } - if prevVersion < 13 { - err = dbUpdateFromV12(db) - if err != nil { - return err - } - } - if prevVersion < 14 { - err = dbUpdateFromV13(db) - if err != nil { - return err - } - } - if prevVersion < 15 { - err = dbUpdateFromV14(db) - if err != nil { - return err - } - } - if prevVersion < 16 { - err = dbUpdateFromV15(d) - if err != nil { - return err - } - } - if prevVersion < 17 { - err = dbUpdateFromV16(db) - if err != nil { - return err - } - } - if prevVersion < 18 { - err = dbUpdateFromV17(db) - if err != nil { - return err - } - } - if prevVersion < 19 { - err = dbUpdateFromV18(db) - if err != nil { - return err - } - } - if prevVersion < 20 { - err = dbUpdateFromV19(db) - if err != nil { - return err - } - } - if prevVersion < 21 { - err = dbUpdateFromV20(db) - if err != nil { - return err - } - } - if prevVersion < 22 { - err = dbUpdateFromV21(db) - if err != nil { - return err - } - } - if prevVersion < 23 { - err = dbUpdateFromV22(db) - if err != nil { - return err - } - } - if prevVersion < 24 { - err = dbUpdateFromV23(db) - if err != nil { - return err - } - } - if prevVersion < 25 { - err = dbUpdateFromV24(db) - if err != nil { - return err - } - } - if prevVersion < 26 { - err = dbUpdateFromV25(db) - if err != nil { - return err - } - } - if prevVersion < 27 { - err = dbUpdateFromV26(db) - if err != nil { - return err - } - } - if prevVersion < 28 { - err = dbUpdateFromV27(db) - if err != nil { - return err - } - } - if prevVersion < 29 { - err = dbUpdateFromV28(db) - if err != nil { - return err - } - } - - return nil -} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/debug.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/debug.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/debug.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/debug.go 2016-10-13 14:31:53.000000000 +0000 @@ -12,7 +12,7 @@ func doMemDump(memProfile string) { f, err := os.Create(memProfile) if err != nil { - shared.Debugf("Error opening memory profile file '%s': %s", err) + shared.LogDebugf("Error opening memory profile file '%s': %s", err) return } pprof.WriteHeapProfile(f) @@ -24,7 +24,7 @@ signal.Notify(ch, syscall.SIGUSR1) for { sig := <-ch - shared.Debugf("Received '%s signal', dumping memory.", sig) + shared.LogDebugf("Received '%s signal', dumping memory.", sig) doMemDump(memProfile) } } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/devices.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/devices.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/devices.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/devices.go 2016-10-13 14:31:53.000000000 +0000 @@ -24,7 +24,7 @@ log "gopkg.in/inconshreveable/log15.v2" ) -var deviceSchedRebalance = make(chan []string, 0) +var deviceSchedRebalance = make(chan []string, 2) type deviceBlockLimit struct { readBps int64 @@ -44,7 +44,51 @@ func (c deviceTaskCPUs) Less(i, j int) bool { return *c[i].count < *c[j].count } func (c deviceTaskCPUs) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -func deviceNetlinkListener() (chan []string, error) { +type usbDevice struct { + action string + + vendor string + product string + + path string + major int + minor int +} + +func createUSBDevice(action string, vendor string, product string, major string, minor string, busnum string, devnum string) (usbDevice, error) { + majorInt, err := strconv.Atoi(major) + if err != nil { + return usbDevice{}, err + } + + minorInt, err := strconv.Atoi(minor) + if err != nil { + return usbDevice{}, err + } + + busnumInt, err := strconv.Atoi(busnum) + if err != nil { + return usbDevice{}, err + } + + devnumInt, err := strconv.Atoi(devnum) + if err != nil { + return usbDevice{}, err + } + + path := fmt.Sprintf("/dev/bus/usb/%03d/%03d", busnumInt, devnumInt) + + return usbDevice{ + action, + vendor, + product, + path, + majorInt, + minorInt, + }, nil +} + +func deviceNetlinkListener() (chan []string, chan []string, chan usbDevice, error) { NETLINK_KOBJECT_UEVENT := 15 UEVENT_BUFFER_SIZE := 2048 @@ -54,7 +98,7 @@ ) if err != nil { - return nil, err + return nil, nil, nil, err } nl := syscall.SockaddrNetlink{ @@ -65,12 +109,14 @@ err = syscall.Bind(fd, &nl) if err != nil { - return nil, err + return nil, nil, nil, err } - ch := make(chan []string, 0) + chCPU := make(chan []string, 1) + chNetwork := make(chan []string, 0) + chUSB := make(chan usbDevice) - go func(ch chan []string) { + go func(chCPU chan []string, chNetwork chan []string, chUSB chan usbDevice) { b := make([]byte, UEVENT_BUFFER_SIZE*2) for { _, err := syscall.Read(fd, b) @@ -106,7 +152,12 @@ continue } - ch <- []string{"cpu", path.Base(props["DEVPATH"]), props["ACTION"]} + // As CPU re-balancing affects all containers, no need to queue them + select { + case chCPU <- []string{path.Base(props["DEVPATH"]), props["ACTION"]}: + default: + // Channel is full, drop the event + } } if props["SUBSYSTEM"] == "net" { @@ -114,12 +165,106 @@ continue } - ch <- []string{"net", props["INTERFACE"], props["ACTION"]} + if !shared.PathExists(fmt.Sprintf("/sys/class/net/%s", props["INTERFACE"])) { + continue + } + + // Network balancing is interface specific, so queue everything + chNetwork <- []string{props["INTERFACE"], props["ACTION"]} + } + + if props["SUBSYSTEM"] == "usb" { + if props["ACTION"] != "add" && props["ACTION"] != "remove" { + continue + } + + parts := strings.Split(props["PRODUCT"], "/") + if len(parts) < 2 { + continue + } + + major, ok := props["MAJOR"] + if !ok { + continue + } + minor, ok := props["MINOR"] + if !ok { + continue + } + busnum, ok := props["BUSNUM"] + if !ok { + continue + } + devnum, ok := props["DEVNUM"] + if !ok { + continue + } + + zeroPad := func(s string, l int) string { + return strings.Repeat("0", l-len(s)) + s + } + + usb, err := createUSBDevice( + props["ACTION"], + /* udev doesn't zero pad these, while + * everything else does, so let's zero pad them + * for consistency + */ + zeroPad(parts[0], 4), + zeroPad(parts[1], 4), + major, + minor, + busnum, + devnum, + ) + if err != nil { + shared.LogError("error reading usb device", log.Ctx{"err": err, "path": props["PHYSDEVPATH"]}) + continue + } + + chUSB <- usb } + } - }(ch) + }(chCPU, chNetwork, chUSB) + + return chCPU, chNetwork, chUSB, nil +} + +func parseCpuset(cpu string) ([]int, error) { + cpus := []int{} + chunks := strings.Split(cpu, ",") + for _, chunk := range chunks { + if strings.Contains(chunk, "-") { + // Range + fields := strings.SplitN(chunk, "-", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("Invalid cpuset value: %s", cpu) + } + + low, err := strconv.Atoi(fields[0]) + if err != nil { + return nil, fmt.Errorf("Invalid cpuset value: %s", cpu) + } + + high, err := strconv.Atoi(fields[1]) + if err != nil { + return nil, fmt.Errorf("Invalid cpuset value: %s", cpu) + } - return ch, nil + for i := low; i <= high; i++ { + cpus = append(cpus, i) + } + } else { + // Simple entry + nr, err := strconv.Atoi(chunk) + if err != nil { + return nil, fmt.Errorf("Invalid cpuset value: %s", cpu) + } + cpus = append(cpus, nr) + } + } + return cpus, nil } func deviceTaskBalance(d *Daemon) { @@ -135,44 +280,32 @@ return } - // Count CPUs - cpus := []int{} - dents, err := ioutil.ReadDir("/sys/bus/cpu/devices/") + // Get effective cpus list - those are all guaranteed to be online + effectiveCpus, err := cGroupGet("cpuset", "/", "cpuset.effective_cpus") if err != nil { - shared.Log.Error("balance: Unable to list CPUs", log.Ctx{"err": err}) - return - } - - for _, f := range dents { - id := -1 - count, err := fmt.Sscanf(f.Name(), "cpu%d", &id) - if count != 1 || id == -1 { - shared.Log.Error("balance: Bad CPU", log.Ctx{"path": f.Name()}) - continue - } - - onlinePath := fmt.Sprintf("/sys/bus/cpu/devices/%s/online", f.Name()) - if !shared.PathExists(onlinePath) { - // CPUs without an online file are non-hotplug so are always online - cpus = append(cpus, id) - continue - } - - online, err := ioutil.ReadFile(onlinePath) + // Older kernel - use cpuset.cpus + effectiveCpus, err = cGroupGet("cpuset", "/", "cpuset.cpus") if err != nil { - shared.Log.Error("balance: Bad CPU", log.Ctx{"path": f.Name(), "err": err}) - continue - } - - if online[0] == byte('0') { - continue + shared.LogErrorf("Error reading host's cpuset.cpus") + return } - - cpus = append(cpus, id) + } + err = cGroupSet("cpuset", "/lxc", "cpuset.cpus", effectiveCpus) + if err != nil && shared.PathExists("/sys/fs/cgroup/cpuset/lxc") { + shared.LogWarn("Error setting lxd's cpuset.cpus", log.Ctx{"err": err}) + } + cpus, err := parseCpuset(effectiveCpus) + if err != nil { + shared.LogError("Error parsing host's cpu set", log.Ctx{"cpuset": effectiveCpus, "err": err}) + return } // Iterate through the containers containers, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + shared.LogError("problem loading containers list", log.Ctx{"err": err}) + return + } fixedContainers := map[int][]container{} balancedContainers := map[container]int{} for _, name := range containers { @@ -182,80 +315,36 @@ } conf := c.ExpandedConfig() - cpu, ok := conf["limits.cpu"] - if !ok || cpu == "" { - currentCPUs, err := deviceGetCurrentCPUs() - if err != nil { - shared.Debugf("Couldn't get current CPU list: %s", err) - cpu = fmt.Sprintf("%d", len(cpus)) - } else { - cpu = currentCPUs - } + cpulimit, ok := conf["limits.cpu"] + if !ok || cpulimit == "" { + cpulimit = effectiveCpus } if !c.IsRunning() { continue } - count, err := strconv.Atoi(cpu) + count, err := strconv.Atoi(cpulimit) if err == nil { // Load-balance count = min(count, len(cpus)) balancedContainers[c] = count } else { // Pinned - chunks := strings.Split(cpu, ",") - for _, chunk := range chunks { - if strings.Contains(chunk, "-") { - // Range - fields := strings.SplitN(chunk, "-", 2) - if len(fields) != 2 { - shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu}) - continue - } - - low, err := strconv.Atoi(fields[0]) - if err != nil { - shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu}) - continue - } - - high, err := strconv.Atoi(fields[1]) - if err != nil { - shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu}) - continue - } + containerCpus, err := parseCpuset(cpulimit) + if err != nil { + return + } + for _, nr := range containerCpus { + if !shared.IntInSlice(nr, cpus) { + continue + } - for i := low; i <= high; i++ { - if !shared.IntInSlice(i, cpus) { - continue - } - - _, ok := fixedContainers[i] - if ok { - fixedContainers[i] = append(fixedContainers[i], c) - } else { - fixedContainers[i] = []container{c} - } - } + _, ok := fixedContainers[nr] + if ok { + fixedContainers[nr] = append(fixedContainers[nr], c) } else { - // Simple entry - nr, err := strconv.Atoi(chunk) - if err != nil { - shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu}) - continue - } - - if !shared.IntInSlice(nr, cpus) { - continue - } - - _, ok := fixedContainers[nr] - if ok { - fixedContainers[nr] = append(fixedContainers[nr], c) - } else { - fixedContainers[nr] = []container{c} - } + fixedContainers[nr] = []container{c} } } } @@ -263,7 +352,7 @@ // Balance things pinning := map[container][]string{} - usage := make(deviceTaskCPUs, 0) + usage := map[int]deviceTaskCPU{} for _, id := range cpus { cpu := deviceTaskCPU{} @@ -272,11 +361,16 @@ count := 0 cpu.count = &count - usage = append(usage, cpu) + usage[id] = cpu } for cpu, ctns := range fixedContainers { - id := usage[cpu].strId + c, ok := usage[cpu] + if !ok { + shared.LogErrorf("Internal error: container using unavailable cpu") + continue + } + id := c.strId for _, ctn := range ctns { _, ok := pinning[ctn] if ok { @@ -284,13 +378,18 @@ } else { pinning[ctn] = []string{id} } - *usage[cpu].count += 1 + *c.count += 1 } } + sortedUsage := make(deviceTaskCPUs, 0) + for _, value := range usage { + sortedUsage = append(sortedUsage, value) + } + for ctn, count := range balancedContainers { - sort.Sort(usage) - for _, cpu := range usage { + sort.Sort(sortedUsage) + for _, cpu := range sortedUsage { if count == 0 { break } @@ -317,37 +416,9 @@ sort.Strings(set) err := ctn.CGroupSet("cpuset.cpus", strings.Join(set, ",")) if err != nil { - shared.Log.Error("balance: Unable to set cpuset", log.Ctx{"name": ctn.Name(), "err": err, "value": strings.Join(set, ",")}) - } - } -} - -func deviceGetCurrentCPUs() (string, error) { - // Open /proc/self/status - f, err := os.Open("/proc/self/status") - if err != nil { - return "", err - } - defer f.Close() - - // Read it line by line - scan := bufio.NewScanner(f) - for scan.Scan() { - line := scan.Text() - - // We only care about MemTotal - if !strings.HasPrefix(line, "Cpus_allowed_list:") { - continue + shared.LogError("balance: Unable to set cpuset", log.Ctx{"name": ctn.Name(), "err": err, "value": strings.Join(set, ",")}) } - - // Extract the before last (value) and last (unit) fields - fields := strings.Split(line, "\t") - value := fields[len(fields)-1] - - return value, nil } - - return "", fmt.Errorf("Couldn't find cpus_allowed_list") } func deviceNetworkPriority(d *Daemon, netif string) { @@ -386,49 +457,119 @@ return } +func deviceUSBEvent(d *Daemon, usb usbDevice) { + containers, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + shared.LogError("problem loading containers list", log.Ctx{"err": err}) + return + } + + for _, name := range containers { + containerIf, err := containerLoadByName(d, name) + if err != nil { + continue + } + + c, ok := containerIf.(*containerLXC) + if !ok { + shared.LogErrorf("got device event on non-LXC container?") + return + } + + if !c.IsRunning() { + continue + } + + devices := c.ExpandedDevices() + for _, name := range devices.DeviceNames() { + m := devices[name] + if m["type"] != "usb" { + continue + } + + if m["vendorid"] != usb.vendor || (m["productid"] != "" && m["productid"] != usb.product) { + continue + } + + if usb.action == "add" { + err := c.insertUSBDevice(m, usb) + if err != nil { + shared.LogError("failed to create usb device", log.Ctx{"err": err, "usb": usb, "container": c.Name()}) + return + } + } else if usb.action == "remove" { + err := c.removeUSBDevice(m, usb) + if err != nil { + shared.LogError("failed to remove usb device", log.Ctx{"err": err, "usb": usb, "container": c.Name()}) + return + } + } else { + shared.LogError("unknown action for usb device", log.Ctx{"usb": usb}) + continue + } + } + } +} + func deviceEventListener(d *Daemon) { - chNetlink, err := deviceNetlinkListener() + chNetlinkCPU, chNetlinkNetwork, chUSB, err := deviceNetlinkListener() if err != nil { - shared.Log.Error("scheduler: couldn't setup netlink listener") + shared.LogErrorf("scheduler: couldn't setup netlink listener") return } for { select { - case e := <-chNetlink: - if len(e) != 3 { - shared.Log.Error("Scheduler: received an invalid hotplug event") + case e := <-chNetlinkCPU: + if len(e) != 2 { + shared.LogErrorf("Scheduler: received an invalid cpu hotplug event") continue } - if e[0] == "cpu" && cgCpusetController { - shared.Debugf("Scheduler: %s: %s is now %s: re-balancing", e[0], e[1], e[2]) - deviceTaskBalance(d) + if !cgCpusetController { + continue } - if e[0] == "net" && e[2] == "add" && cgNetPrioController && shared.PathExists(fmt.Sprintf("/sys/class/net/%s", e[1])) { - shared.Debugf("Scheduler: %s: %s has been added: updating network priorities", e[0], e[1]) - deviceNetworkPriority(d, e[1]) + shared.LogDebugf("Scheduler: cpu: %s is now %s: re-balancing", e[0], e[1]) + deviceTaskBalance(d) + case e := <-chNetlinkNetwork: + if len(e) != 2 { + shared.LogErrorf("Scheduler: received an invalid network hotplug event") + continue } + + if !cgNetPrioController { + continue + } + + shared.LogDebugf("Scheduler: network: %s has been added: updating network priorities", e[0]) + deviceNetworkPriority(d, e[0]) + networkAutoAttach(d, e[0]) + case e := <-chUSB: + deviceUSBEvent(d, e) case e := <-deviceSchedRebalance: if len(e) != 3 { - shared.Log.Error("Scheduler: received an invalid rebalance event") + shared.LogErrorf("Scheduler: received an invalid rebalance event") continue } - if cgCpusetController { - shared.Debugf("Scheduler: %s %s %s: re-balancing", e[0], e[1], e[2]) - deviceTaskBalance(d) + if !cgCpusetController { + continue } + + shared.LogDebugf("Scheduler: %s %s %s: re-balancing", e[0], e[1], e[2]) + deviceTaskBalance(d) } } } func deviceTaskSchedulerTrigger(srcType string, srcName string, srcStatus string) { // Spawn a go routine which then triggers the scheduler - go func() { - deviceSchedRebalance <- []string{srcType, srcName, srcStatus} - }() + select { + case deviceSchedRebalance <- []string{srcType, srcName, srcStatus}: + default: + // Channel is full, drop the event + } } func deviceIsBlockdev(path string) bool { @@ -742,11 +883,11 @@ devices = append(devices, dev) } } - } else if shared.PathExists(fmt.Sprintf("/dev/%s", fields[0])) { + } else if deviceIsBlockdev(fmt.Sprintf("/dev/%s", fields[0])) { path = fmt.Sprintf("/dev/%s", fields[0]) - } else if shared.PathExists(fmt.Sprintf("/dev/disk/by-id/%s", fields[0])) { + } else if deviceIsBlockdev(fmt.Sprintf("/dev/disk/by-id/%s", fields[0])) { path = fmt.Sprintf("/dev/disk/by-id/%s", fields[0]) - } else if shared.PathExists(fmt.Sprintf("/dev/mapper/%s", fields[0])) { + } else if deviceIsBlockdev(fmt.Sprintf("/dev/mapper/%s", fields[0])) { path = fmt.Sprintf("/dev/mapper/%s", fields[0]) } else { continue @@ -838,3 +979,76 @@ return readBps, readIops, writeBps, writeIops, nil } + +const USB_PATH = "/sys/bus/usb/devices" + +func loadRawValues(p string) (map[string]string, error) { + values := map[string]string{ + "idVendor": "", + "idProduct": "", + "dev": "", + "busnum": "", + "devnum": "", + } + + for k, _ := range values { + v, err := ioutil.ReadFile(path.Join(p, k)) + if err != nil { + return nil, err + } + + values[k] = strings.TrimSpace(string(v)) + } + + return values, nil +} + +func deviceLoadUsb() ([]usbDevice, error) { + result := []usbDevice{} + + ents, err := ioutil.ReadDir(USB_PATH) + if err != nil { + /* if there are no USB devices, let's render an empty list, + * i.e. no usb devices */ + if os.IsNotExist(err) { + return result, nil + } + return nil, err + } + + for _, ent := range ents { + values, err := loadRawValues(path.Join(USB_PATH, ent.Name())) + if err != nil { + if os.IsNotExist(err) { + continue + } + + return []usbDevice{}, err + } + + parts := strings.Split(values["dev"], ":") + if len(parts) != 2 { + return []usbDevice{}, fmt.Errorf("invalid device value %s", values["dev"]) + } + + usb, err := createUSBDevice( + "add", + values["idVendor"], + values["idProduct"], + parts[0], + parts[1], + values["busnum"], + values["devnum"], + ) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, err + } + + result = append(result, usb) + } + + return result, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/devlxd.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/devlxd.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/devlxd.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/devlxd.go 2016-10-13 14:31:53.000000000 +0000 @@ -217,7 +217,7 @@ case http.StateNew: cred, err := getCred(unixConn) if err != nil { - shared.Debugf("Error getting ucred for conn %s", err) + shared.LogDebugf("Error getting ucred for conn %s", err) } else { m.m[unixConn] = cred } @@ -238,7 +238,7 @@ case http.StateClosed: delete(m.m, unixConn) default: - shared.Debugf("Unknown state for connection %s", state) + shared.LogDebugf("Unknown state for connection %s", state) } } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/events.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/events.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/events.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/events.go 2016-10-13 14:31:53.000000000 +0000 @@ -61,6 +61,10 @@ return eventsSocket(r.req, w) } +func (r *eventsServe) String() string { + return "event handler" +} + func eventsSocket(r *http.Request, w http.ResponseWriter) error { listener := eventListener{} @@ -83,7 +87,7 @@ eventListeners[listener.id] = &listener eventsLock.Unlock() - shared.Debugf("New events listener: %s", listener.id) + shared.LogDebugf("New events listener: %s", listener.id) <-listener.active @@ -92,7 +96,7 @@ eventsLock.Unlock() listener.connection.Close() - shared.Debugf("Disconnected events listener: %s", listener.id) + shared.LogDebugf("Disconnected events listener: %s", listener.id) return nil } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/images.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/images.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/images.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/images.go 2016-10-13 14:31:53.000000000 +0000 @@ -16,6 +16,7 @@ "strconv" "strings" "sync" + "syscall" "time" "github.com/gorilla/mux" @@ -64,67 +65,122 @@ return []string{"-Jxf"}, ".tar.xz", nil case (bytes.Equal(header[1:5], []byte{'7', 'z', 'X', 'Z'}) && header[0] != 0xFD): return []string{"--lzma", "-xf"}, ".tar.lzma", nil + case bytes.Equal(header[0:3], []byte{0x5d, 0x00, 0x00}): + return []string{"--lzma", "-xf"}, ".tar.lzma", nil case bytes.Equal(header[257:262], []byte{'u', 's', 't', 'a', 'r'}): return []string{"-xf"}, ".tar", nil + case bytes.Equal(header[0:4], []byte{'h', 's', 'q', 's'}): + return []string{""}, ".squashfs", nil default: - return []string{""}, "", fmt.Errorf("Unsupported compression.") + return []string{""}, "", fmt.Errorf("Unsupported compression") } } -func untar(tarball string, path string) error { - extractArgs, _, err := detectCompression(tarball) +func unpack(d *Daemon, file string, path string) error { + extractArgs, extension, err := detectCompression(file) if err != nil { return err } - command := "tar" + command := "" args := []string{} - if runningInUserns { - args = append(args, "--wildcards") - args = append(args, "--exclude=dev/*") - args = append(args, "--exclude=./dev/*") - args = append(args, "--exclude=rootfs/dev/*") - args = append(args, "--exclude=rootfs/./dev/*") - } - args = append(args, "-C", path, "--numeric-owner") - args = append(args, extractArgs...) - args = append(args, tarball) + if strings.HasPrefix(extension, ".tar") { + command = "tar" + if runningInUserns { + args = append(args, "--wildcards") + args = append(args, "--exclude=dev/*") + args = append(args, "--exclude=./dev/*") + args = append(args, "--exclude=rootfs/dev/*") + args = append(args, "--exclude=rootfs/./dev/*") + } + args = append(args, "-C", path, "--numeric-owner") + args = append(args, extractArgs...) + args = append(args, file) + } else if strings.HasPrefix(extension, ".squashfs") { + command = "unsquashfs" + args = append(args, "-f", "-d", path, "-n") + + // Limit unsquashfs chunk size to 10% of memory and up to 256MB (default) + // When running on a low memory system, also disable multi-processing + mem, err := deviceTotalMemory() + mem = mem / 1024 / 1024 / 10 + if err == nil && mem < 256 { + args = append(args, "-da", fmt.Sprintf("%d", mem), "-fr", fmt.Sprintf("%d", mem), "-p", "1") + } + + args = append(args, file) + } else { + return fmt.Errorf("Unsupported image format: %s", extension) + } output, err := exec.Command(command, args...).CombinedOutput() if err != nil { - shared.Debugf("Unpacking failed") - shared.Debugf(string(output)) - return err + // Check if we ran out of space + fs := syscall.Statfs_t{} + + err1 := syscall.Statfs(path, &fs) + if err1 != nil { + return err1 + } + + // Check if we're running out of space + if int64(fs.Bfree) < int64(2*fs.Bsize) { + if d.Storage.GetStorageType() == storageTypeLvm { + return fmt.Errorf("Unable to unpack image, run out of disk space (consider increasing storage.lvm_volume_size).") + } else { + return fmt.Errorf("Unable to unpack image, run out of disk space.") + } + } + + co := string(output) + shared.LogDebugf("Unpacking failed") + shared.LogDebugf(co) + + // Truncate the output to a single line for inclusion in the error + // message. The first line isn't guaranteed to pinpoint the issue, + // but it's better than nothing and better than a multi-line message. + return fmt.Errorf("Unpack failed, %s. %s", err, strings.SplitN(co, "\n", 2)[0]) } return nil } -func untarImage(imagefname string, destpath string) error { - err := untar(imagefname, destpath) +func unpackImage(d *Daemon, imagefname string, destpath string) error { + err := unpack(d, imagefname, destpath) if err != nil { return err } + rootfsPath := fmt.Sprintf("%s/rootfs", destpath) if shared.PathExists(imagefname + ".rootfs") { - rootfsPath := fmt.Sprintf("%s/rootfs", destpath) err = os.MkdirAll(rootfsPath, 0755) if err != nil { return fmt.Errorf("Error creating rootfs directory") } - err = untar(imagefname+".rootfs", rootfsPath) + err = unpack(d, imagefname+".rootfs", rootfsPath) if err != nil { return err } } + if !shared.PathExists(rootfsPath) { + return fmt.Errorf("Image is missing a rootfs: %s", imagefname) + } + return nil } func compressFile(path string, compress string) (string, error) { - cmd := exec.Command(compress, path, "-c", "-n") + reproducible := []string{"gzip"} + + args := []string{path, "-c"} + if shared.StringInSlice(compress, reproducible) { + args = append(args, "-n") + } + + cmd := exec.Command(compress, args...) outfile, err := os.Create(path + ".compressed") if err != nil { @@ -151,11 +207,12 @@ } type imagePostReq struct { - Filename string `json:"filename"` - Public bool `json:"public"` - Source map[string]string `json:"source"` - Properties map[string]string `json:"properties"` - AutoUpdate bool `json:"auto_update"` + Filename string `json:"filename"` + Public bool `json:"public"` + Source map[string]string `json:"source"` + Properties map[string]string `json:"properties"` + AutoUpdate bool `json:"auto_update"` + CompressionAlgorithm string `json:"compression_algorithm"` } type imageMetadata struct { @@ -219,17 +276,15 @@ } tarfile.Close() - compress, err := d.ConfigValueGet("images.compression_algorithm") - if err != nil { - return info, err - } + var compressedPath string + var compress string - // Default to gzip for this - if compress == "" { - compress = "gzip" + if req.CompressionAlgorithm != "" { + compress = req.CompressionAlgorithm + } else { + compress = daemonConfig["images.compression_algorithm"].Get() } - var compressedPath string if compress != "none" { compressedPath, err = compressFile(tarfile.Name(), compress) if err != nil { @@ -321,7 +376,7 @@ } // Resolve the image URL - tlsConfig, err := shared.GetTLSConfig("", "", nil) + tlsConfig, err := shared.GetTLSConfig("", "", "", nil) if err != nil { return err } @@ -591,6 +646,9 @@ func imageBuildFromInfo(d *Daemon, info shared.ImageInfo) (metadata map[string]string, err error) { err = d.Storage.ImageCreate(info.Fingerprint) if err != nil { + os.Remove(shared.VarPath("images", info.Fingerprint)) + os.Remove(shared.VarPath("images", info.Fingerprint) + ".rootfs") + return metadata, err } @@ -631,7 +689,7 @@ } if err := os.RemoveAll(path); err != nil { - shared.Debugf("Error deleting temporary directory \"%s\": %s", path, err) + shared.LogDebugf("Error deleting temporary directory \"%s\": %s", path, err) } } @@ -747,7 +805,7 @@ if err != nil { outputLines := strings.Split(string(output), "\n") - return nil, fmt.Errorf("Could not extract image metadata %s from tar: %v (%s)", metadataName, err, outputLines[0]) + return nil, fmt.Errorf("Could not extract image %s from tar: %v (%s)", metadataName, err, outputLines[0]) } metadata := imageMetadata{} @@ -757,6 +815,15 @@ return nil, fmt.Errorf("Could not parse %s: %v", metadataName, err) } + _, err = shared.ArchitectureId(metadata.Architecture) + if err != nil { + return nil, err + } + + if metadata.CreationDate == 0 { + return nil, fmt.Errorf("Missing creation date.") + } + return &metadata, nil } @@ -804,18 +871,18 @@ var imagesCmd = Command{name: "images", post: imagesPost, untrustedGet: true, get: imagesGet} func autoUpdateImages(d *Daemon) { - shared.Debugf("Updating images") + shared.LogInfof("Updating images") images, err := dbImagesGet(d.db, false) if err != nil { - shared.Log.Error("Unable to retrieve the list of images", log.Ctx{"err": err}) + shared.LogError("Unable to retrieve the list of images", log.Ctx{"err": err}) return } for _, fp := range images { id, info, err := dbImageGet(d.db, fp, false, true) if err != nil { - shared.Log.Error("Error loading image", log.Ctx{"err": err, "fp": fp}) + shared.LogError("Error loading image", log.Ctx{"err": err, "fp": fp}) continue } @@ -828,69 +895,63 @@ continue } - shared.Log.Debug("Processing image", log.Ctx{"fp": fp, "server": source.Server, "protocol": source.Protocol, "alias": source.Alias}) + shared.LogDebug("Processing image", log.Ctx{"fp": fp, "server": source.Server, "protocol": source.Protocol, "alias": source.Alias}) hash, err := d.ImageDownload(nil, source.Server, source.Protocol, "", "", source.Alias, false, true) if hash == fp { - shared.Log.Debug("Already up to date", log.Ctx{"fp": fp}) + shared.LogDebug("Already up to date", log.Ctx{"fp": fp}) + continue + } else if err != nil { + shared.LogError("Failed to update the image", log.Ctx{"err": err, "fp": fp}) continue } newId, _, err := dbImageGet(d.db, hash, false, true) if err != nil { - shared.Log.Error("Error loading image", log.Ctx{"err": err, "fp": hash}) + shared.LogError("Error loading image", log.Ctx{"err": err, "fp": hash}) continue } err = dbImageLastAccessUpdate(d.db, hash, info.LastUsedDate) if err != nil { - shared.Log.Error("Error setting last use date", log.Ctx{"err": err, "fp": hash}) + shared.LogError("Error setting last use date", log.Ctx{"err": err, "fp": hash}) continue } err = dbImageAliasesMove(d.db, id, newId) if err != nil { - shared.Log.Error("Error moving aliases", log.Ctx{"err": err, "fp": hash}) + shared.LogError("Error moving aliases", log.Ctx{"err": err, "fp": hash}) continue } err = doDeleteImage(d, fp) if err != nil { - shared.Log.Error("Error deleting image", log.Ctx{"err": err, "fp": fp}) + shared.LogError("Error deleting image", log.Ctx{"err": err, "fp": fp}) } } + + shared.LogInfof("Done updating images") } func pruneExpiredImages(d *Daemon) { - shared.Debugf("Pruning expired images") - expiry, err := d.ConfigValueGet("images.remote_cache_expiry") - if err != nil { - shared.Log.Error("Unable to read the images.remote_cache_expiry key") - return - } - - if expiry == "" { - expiry = "10" - } - - expiryInt, err := strconv.Atoi(expiry) - if err != nil { - shared.Log.Error("Invalid value for images.remote_cache_expiry", log.Ctx{"err": err}) - return - } + shared.LogInfof("Pruning expired images") - images, err := dbImagesGetExpired(d.db, expiryInt) + // Get the list of expires images + expiry := daemonConfig["images.remote_cache_expiry"].GetInt64() + images, err := dbImagesGetExpired(d.db, expiry) if err != nil { - shared.Log.Error("Unable to retrieve the list of expired images", log.Ctx{"err": err}) + shared.LogError("Unable to retrieve the list of expired images", log.Ctx{"err": err}) return } + // Delete them for _, fp := range images { if err := doDeleteImage(d, fp); err != nil { - shared.Log.Error("Error deleting image", log.Ctx{"err": err, "fp": fp}) + shared.LogError("Error deleting image", log.Ctx{"err": err, "fp": fp}) } } - shared.Debugf("Done pruning expired images") + + shared.LogInfof("Done pruning expired images") } func doDeleteImage(d *Daemon, fingerprint string) error { @@ -903,12 +964,12 @@ // look at the path s, err := storageForImage(d, imgInfo) if err != nil { - return err - } - - // Remove the image from storage backend - if err = s.ImageDelete(imgInfo.Fingerprint); err != nil { - return err + shared.LogError("error detecting image storage backend", log.Ctx{"fingerprint": imgInfo.Fingerprint, "err": err}) + } else { + // Remove the image from storage backend + if err = s.ImageDelete(imgInfo.Fingerprint); err != nil { + shared.LogError("error deleting the image from storage backend", log.Ctx{"fingerprint": imgInfo.Fingerprint, "err": err}) + } } // Remove main image file @@ -916,16 +977,16 @@ if shared.PathExists(fname) { err = os.Remove(fname) if err != nil { - shared.Debugf("Error deleting image file %s: %s", fname, err) + shared.LogDebugf("Error deleting image file %s: %s", fname, err) } } - // Remote the rootfs file + // Remove the rootfs file fname = shared.VarPath("images", imgInfo.Fingerprint) + ".rootfs" if shared.PathExists(fname) { err = os.Remove(fname) if err != nil { - shared.Debugf("Error deleting image file %s: %s", fname, err) + shared.LogDebugf("Error deleting image file %s: %s", fname, err) } } @@ -1008,7 +1069,8 @@ return response } - return SyncResponse(true, info) + etag := []interface{}{info.Public, info.AutoUpdate, info.Properties} + return SyncResponseETag(true, info, etag) } type imagePutReq struct { @@ -1018,19 +1080,92 @@ } func imagePut(d *Daemon, r *http.Request) Response { + // Get current value fingerprint := mux.Vars(r)["fingerprint"] + id, info, err := dbImageGet(d.db, fingerprint, false, false) + if err != nil { + return SmartError(err) + } + + // Validate ETag + etag := []interface{}{info.Public, info.AutoUpdate, info.Properties} + err = etagCheck(r, etag) + if err != nil { + return PreconditionFailed(err) + } req := imagePutReq{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { return BadRequest(err) } + err = dbImageUpdate(d.db, id, info.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreationDate, info.ExpiryDate, req.Properties) + if err != nil { + return SmartError(err) + } + + return EmptySyncResponse +} + +func imagePatch(d *Daemon, r *http.Request) Response { + // Get current value + fingerprint := mux.Vars(r)["fingerprint"] id, info, err := dbImageGet(d.db, fingerprint, false, false) if err != nil { return SmartError(err) } - err = dbImageUpdate(d.db, id, info.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreationDate, info.ExpiryDate, req.Properties) + // Validate ETag + etag := []interface{}{info.Public, info.AutoUpdate, info.Properties} + err = etagCheck(r, etag) + if err != nil { + return PreconditionFailed(err) + } + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return InternalError(err) + } + + rdr1 := ioutil.NopCloser(bytes.NewBuffer(body)) + rdr2 := ioutil.NopCloser(bytes.NewBuffer(body)) + + reqRaw := shared.Jmap{} + if err := json.NewDecoder(rdr1).Decode(&reqRaw); err != nil { + return BadRequest(err) + } + + req := imagePutReq{} + if err := json.NewDecoder(rdr2).Decode(&req); err != nil { + return BadRequest(err) + } + + // Get AutoUpdate + autoUpdate, err := reqRaw.GetBool("auto_update") + if err == nil { + info.AutoUpdate = autoUpdate + } + + // Get Public + public, err := reqRaw.GetBool("public") + if err == nil { + info.Public = public + } + + // Get Properties + _, ok := reqRaw["properties"] + if ok { + properties := req.Properties + for k, v := range info.Properties { + _, ok := req.Properties[k] + if !ok { + properties[k] = v + } + } + info.Properties = properties + } + + err = dbImageUpdate(d.db, id, info.Filename, info.Size, info.Public, info.AutoUpdate, info.Architecture, info.CreationDate, info.ExpiryDate, info.Properties) if err != nil { return SmartError(err) } @@ -1038,7 +1173,7 @@ return EmptySyncResponse } -var imageCmd = Command{name: "images/{fingerprint}", untrustedGet: true, get: imageGet, put: imagePut, delete: imageDelete} +var imageCmd = Command{name: "images/{fingerprint}", untrustedGet: true, get: imageGet, put: imagePut, delete: imageDelete, patch: imagePatch} type aliasPostReq struct { Name string `json:"name"` @@ -1077,7 +1212,7 @@ return InternalError(err) } - return EmptySyncResponse + return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/images/aliases/%s", shared.APIVersion, req.Name)) } func aliasesGet(d *Daemon, r *http.Request) Response { @@ -1123,7 +1258,7 @@ return SmartError(err) } - return SyncResponse(true, alias) + return SyncResponseETag(true, alias, alias) } func aliasDelete(d *Daemon, r *http.Request) Response { @@ -1142,16 +1277,26 @@ } func aliasPut(d *Daemon, r *http.Request) Response { + // Get current value name := mux.Vars(r)["name"] + id, alias, err := dbImageAliasGet(d.db, name, true) + if err != nil { + return SmartError(err) + } + + // Validate ETag + err = etagCheck(r, alias) + if err != nil { + return PreconditionFailed(err) + } req := aliasPutReq{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { return BadRequest(err) } - id, _, err := dbImageAliasGet(d.db, name, true) - if err != nil { - return SmartError(err) + if req.Target == "" { + return BadRequest(fmt.Errorf("The target field is required")) } imageId, _, err := dbImageGet(d.db, req.Target, false, false) @@ -1167,6 +1312,58 @@ return EmptySyncResponse } +func aliasPatch(d *Daemon, r *http.Request) Response { + // Get current value + name := mux.Vars(r)["name"] + id, alias, err := dbImageAliasGet(d.db, name, true) + if err != nil { + return SmartError(err) + } + + // Validate ETag + err = etagCheck(r, alias) + if err != nil { + return PreconditionFailed(err) + } + + req := shared.Jmap{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + _, ok := req["target"] + if ok { + target, err := req.GetString("target") + if err != nil { + return BadRequest(err) + } + + alias.Target = target + } + + _, ok = req["description"] + if ok { + description, err := req.GetString("description") + if err != nil { + return BadRequest(err) + } + + alias.Description = description + } + + imageId, _, err := dbImageGet(d.db, alias.Target, false, false) + if err != nil { + return SmartError(err) + } + + err = dbImageAliasUpdate(d.db, id, imageId, alias.Description) + if err != nil { + return SmartError(err) + } + + return EmptySyncResponse +} + func aliasPost(d *Daemon, r *http.Request) Response { name := mux.Vars(r)["name"] @@ -1175,6 +1372,12 @@ return BadRequest(err) } + // Check that the name isn't already in use + id, _, _ := dbImageAliasGet(d.db, req.Name, true) + if id > 0 { + return Conflict + } + id, _, err := dbImageAliasGet(d.db, name, true) if err != nil { return SmartError(err) @@ -1185,7 +1388,7 @@ return SmartError(err) } - return EmptySyncResponse + return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/images/aliases/%s", shared.APIVersion, req.Name)) } func imageExport(d *Daemon, r *http.Request) Response { @@ -1203,16 +1406,14 @@ return SmartError(err) } - filename := imgInfo.Filename imagePath := shared.VarPath("images", imgInfo.Fingerprint) rootfsPath := imagePath + ".rootfs" - if filename == "" { - _, ext, err := detectCompression(imagePath) - if err != nil { - ext = "" - } - filename = fmt.Sprintf("%s%s", fingerprint, ext) + + _, ext, err := detectCompression(imagePath) + if err != nil { + ext = "" } + filename := fmt.Sprintf("%s%s", fingerprint, ext) if shared.PathExists(rootfsPath) { files := make([]fileResponseEntry, 2) @@ -1221,6 +1422,14 @@ files[0].path = imagePath files[0].filename = "meta-" + filename + // Recompute the extension for the root filesystem, it may use a different + // compression algorithm than the metadata. + _, ext, err = detectCompression(rootfsPath) + if err != nil { + ext = "" + } + filename = fmt.Sprintf("%s%s", fingerprint, ext) + files[1].identifier = "rootfs" files[1].path = rootfsPath files[1].filename = filename @@ -1268,4 +1477,4 @@ var aliasesCmd = Command{name: "images/aliases", post: aliasesPost, get: aliasesGet} -var aliasCmd = Command{name: "images/aliases/{name:.*}", untrustedGet: true, get: aliasGet, delete: aliasDelete, put: aliasPut, post: aliasPost} +var aliasCmd = Command{name: "images/aliases/{name:.*}", untrustedGet: true, get: aliasGet, delete: aliasDelete, put: aliasPut, post: aliasPost, patch: aliasPatch} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/main.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/main.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/main.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/main.go 2016-10-13 14:31:53.000000000 +0000 @@ -35,11 +35,11 @@ var argLogfile = gnuflag.String("logfile", "", "") var argMemProfile = gnuflag.String("memprofile", "", "") var argNetworkAddress = gnuflag.String("network-address", "", "") -var argNetworkPort = gnuflag.Int("network-port", -1, "") +var argNetworkPort = gnuflag.Int64("network-port", -1, "") var argPrintGoroutinesEvery = gnuflag.Int("print-goroutines-every", -1, "") var argStorageBackend = gnuflag.String("storage-backend", "", "") var argStorageCreateDevice = gnuflag.String("storage-create-device", "", "") -var argStorageCreateLoop = gnuflag.Int("storage-create-loop", -1, "") +var argStorageCreateLoop = gnuflag.Int64("storage-create-loop", -1, "") var argStoragePool = gnuflag.String("storage-pool", "", "") var argSyslog = gnuflag.Bool("syslog", false, "") var argTimeout = gnuflag.Int("timeout", -1, "") @@ -50,9 +50,15 @@ // Global variables var debug bool var verbose bool +var execPath string func init() { rand.Seed(time.Now().UTC().UnixNano()) + absPath, err := os.Readlink("/proc/self/exe") + if err != nil { + absPath = "bad-exec-path" + } + execPath = absPath } func main() { @@ -138,6 +144,8 @@ fmt.Printf(" How long to wait before failing\n") fmt.Printf("\n\nInternal commands (don't call these directly):\n") + fmt.Printf(" forkexec\n") + fmt.Printf(" Execute a command in a container\n") fmt.Printf(" forkgetnet\n") fmt.Printf(" Get container network information\n") fmt.Printf(" forkgetfile\n") @@ -150,6 +158,10 @@ fmt.Printf(" Start a container\n") fmt.Printf(" callhook\n") fmt.Printf(" Call a container hook\n") + fmt.Printf(" migratedumpsuccess\n") + fmt.Printf(" Indicate that a migration dump was successful\n") + fmt.Printf(" netcat\n") + fmt.Printf(" Mirror a unix socket to stdin/stdout\n") } // Parse the arguments @@ -219,6 +231,16 @@ return MigrateContainer(os.Args[1:]) case "forkstart": return startContainer(os.Args[1:]) + case "forkexec": + ret, err := execContainer(os.Args[1:]) + if err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + } + os.Exit(ret) + case "netcat": + return Netcat(os.Args[1:]) + case "migratedumpsuccess": + return cmdMigrateDumpSuccess(os.Args[1:]) } } @@ -315,7 +337,7 @@ go memProfiler(*argMemProfile) } - neededPrograms := []string{"setfacl", "rsync", "tar", "xz"} + neededPrograms := []string{"dnsmasq", "setfacl", "rsync", "tar", "unsquashfs", "xz"} for _, p := range neededPrograms { _, err := exec.LookPath(p) if err != nil { @@ -352,8 +374,7 @@ signal.Notify(ch, syscall.SIGPWR) sig := <-ch - shared.Log.Info( - fmt.Sprintf("Received '%s signal', shutting down containers.", sig)) + shared.LogInfof("Received '%s signal', shutting down containers.", sig) containersShutdown(d) @@ -364,8 +385,7 @@ go func() { <-d.shutdownChan - shared.Log.Info( - fmt.Sprintf("Asked to shutdown by API, shutting down containers.")) + shared.LogInfof("Asked to shutdown by API, shutting down containers.") containersShutdown(d) @@ -380,7 +400,7 @@ signal.Notify(ch, syscall.SIGTERM) sig := <-ch - shared.Log.Info(fmt.Sprintf("Received '%s signal', exiting.", sig)) + shared.LogInfof("Received '%s signal', exiting.", sig) ret = d.Stop() wg.Done() }() @@ -457,6 +477,7 @@ d := &Daemon{ imagesDownloading: map[string]chan bool{}, imagesDownloadingLock: sync.RWMutex{}, + lxcpath: shared.VarPath("containers"), } err := initializeDbObject(d, shared.VarPath("lxd.db")) @@ -464,14 +485,16 @@ return err } - // Look for network socket - value, err := d.ConfigValueGet("core.https_address") + /* Load all config values from the database */ + err = daemonConfigInit(d.db) if err != nil { return err } + // Look for network socket + value := daemonConfig["core.https_address"].Get() if value != "" { - shared.Debugf("Daemon has core.https_address set, activating...") + shared.LogDebugf("Daemon has core.https_address set, activating...") _, err := lxd.NewClient(&lxd.DefaultConfig, "local") return err } @@ -497,14 +520,20 @@ lastState := config["volatile.last_state.power"] autoStart := config["boot.autostart"] - if lastState == "RUNNING" || lastState == "Running" || autoStart == "true" { - shared.Debugf("Daemon has auto-started containers, activating...") + if c.IsRunning() { + shared.LogDebugf("Daemon has running containers, activating...") + _, err := lxd.NewClient(&lxd.DefaultConfig, "local") + return err + } + + if lastState == "RUNNING" || lastState == "Running" || shared.IsTrue(autoStart) { + shared.LogDebugf("Daemon has auto-started containers, activating...") _, err := lxd.NewClient(&lxd.DefaultConfig, "local") return err } } - shared.Debugf("No need to start the daemon now.") + shared.LogDebugf("No need to start the daemon now.") return nil } @@ -560,14 +589,26 @@ } func cmdInit() error { + var defaultPrivileged int // controls whether we set security.privileged=true var storageBackend string // dir or zfs var storageMode string // existing, loop or device - var storageLoopSize int // Size in GB + var storageLoopSize int64 // Size in GB var storageDevice string // Path var storagePool string // pool name var networkAddress string // Address - var networkPort int // Port + var networkPort int64 // Port var trustPassword string // Trust password + var imagesAutoUpdate bool // controls whether we set images.auto_update_interval to 0 + var bridgeName string // Bridge name + var bridgeIPv4 string // IPv4 address + var bridgeIPv4Nat bool // IPv4 address + var bridgeIPv6 string // IPv6 address + var bridgeIPv6Nat bool // IPv6 address + + // Detect userns + defaultPrivileged = -1 + runningInUserns = shared.RunningInUserNS() + imagesAutoUpdate = true // Only root should run this if os.Geteuid() != 0 { @@ -579,20 +620,28 @@ // Detect zfs out, err := exec.LookPath("zfs") - if err == nil && len(out) != 0 { - backendsAvailable = append(backendsAvailable, "zfs") + if err == nil && len(out) != 0 && !runningInUserns { + _ = shared.RunCommand("modprobe", "zfs") + + err := shared.RunCommand("zpool", "list") + if err == nil { + backendsAvailable = append(backendsAvailable, "zfs") + } } reader := bufio.NewReader(os.Stdin) - askBool := func(question string) bool { + askBool := func(question string, default_ string) bool { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") - if shared.StringInSlice(strings.ToLower(input), []string{"yes", "y", "true"}) { + if input == "" { + input = default_ + } + if shared.StringInSlice(strings.ToLower(input), []string{"yes", "y"}) { return true - } else if shared.StringInSlice(strings.ToLower(input), []string{"no", "n", "false"}) { + } else if shared.StringInSlice(strings.ToLower(input), []string{"no", "n"}) { return false } @@ -600,11 +649,14 @@ } } - askChoice := func(question string, choices []string) string { + askChoice := func(question string, choices []string, default_ string) string { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") + if input == "" { + input = default_ + } if shared.StringInSlice(input, choices) { return input } @@ -613,12 +665,15 @@ } } - askInt := func(question string, min int, max int) int { + askInt := func(question string, min int64, max int64, default_ string) int64 { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") - intInput, err := strconv.Atoi(input) + if input == "" { + input = default_ + } + intInput, err := strconv.ParseInt(input, 10, 64) if err == nil && (min == -1 || intInput >= min) && (max == -1 || intInput <= max) { return intInput @@ -628,11 +683,21 @@ } } - askString := func(question string) string { + askString := func(question string, default_ string, validate func(string) error) string { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") + if input == "" { + input = default_ + } + if validate != nil { + result := validate(input) + if result != nil { + fmt.Printf("Invalid input: %s\n\n", result) + continue + } + } if len(input) != 0 { return input } @@ -700,13 +765,13 @@ if *argStorageBackend == "dir" { if *argStorageCreateLoop != -1 || *argStorageCreateDevice != "" || *argStoragePool != "" { - return fmt.Errorf("None of --storage-pool, --storage-create-device or --storage-create-pool may be used with the 'dir' backend.") + return fmt.Errorf("None of --storage-pool, --storage-create-device or --storage-create-loop may be used with the 'dir' backend.") } } if *argStorageBackend == "zfs" { if *argStorageCreateLoop != -1 && *argStorageCreateDevice != "" { - return fmt.Errorf("Only one of --storage-create-device or --storage-create-pool can be specified with the 'zfs' backend.") + return fmt.Errorf("Only one of --storage-create-device or --storage-create-loop can be specified with the 'zfs' backend.") } if *argStoragePool == "" { @@ -744,7 +809,12 @@ return fmt.Errorf("Init configuration is only valid with --auto") } - storageBackend = askChoice("Name of the storage backend to use (dir or zfs): ", backendsSupported) + defaultStorage := "dir" + if shared.StringInSlice("zfs", backendsAvailable) { + defaultStorage = "zfs" + } + + storageBackend = askChoice(fmt.Sprintf("Name of the storage backend to use (dir or zfs) [default=%s]: ", defaultStorage), backendsSupported, defaultStorage) if !shared.StringInSlice(storageBackend, backendsSupported) { return fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", storageBackend) @@ -755,26 +825,110 @@ } if storageBackend == "zfs" { - if askBool("Create a new ZFS pool (yes/no)? ") { - storagePool = askString("Name of the new ZFS pool: ") - if askBool("Would you like to use an existing block device (yes/no)? ") { - storageDevice = askString("Path to the existing block device: ") + if askBool("Create a new ZFS pool (yes/no) [default=yes]? ", "yes") { + storagePool = askString("Name of the new ZFS pool [default=lxd]: ", "lxd", nil) + if askBool("Would you like to use an existing block device (yes/no) [default=no]? ", "no") { + deviceExists := func(path string) error { + if !shared.IsBlockdevPath(path) { + return fmt.Errorf("'%s' is not a block device", path) + } + return nil + } + storageDevice = askString("Path to the existing block device: ", "", deviceExists) storageMode = "device" } else { - storageLoopSize = askInt("Size in GB of the new loop device (1GB minimum): ", 1, -1) + st := syscall.Statfs_t{} + err := syscall.Statfs(shared.VarPath(), &st) + if err != nil { + return fmt.Errorf("couldn't statfs %s: %s", shared.VarPath(), err) + } + + /* choose 15 GB < x < 100GB, where x is 20% of the disk size */ + def := uint64(st.Frsize) * st.Blocks / (1024 * 1024 * 1024) / 5 + if def > 100 { + def = 100 + } + if def < 15 { + def = 15 + } + + q := fmt.Sprintf("Size in GB of the new loop device (1GB minimum) [default=%d]: ", def) + storageLoopSize = askInt(q, 1, -1, fmt.Sprintf("%d", def)) storageMode = "loop" } } else { - storagePool = askString("Name of the existing ZFS pool or dataset: ") + storagePool = askString("Name of the existing ZFS pool or dataset: ", "", nil) storageMode = "existing" } } - if askBool("Would you like LXD to be available over the network (yes/no)? ") { - networkAddress = askString("Address to bind LXD to (not including port): ") - networkPort = askInt("Port to bind LXD to (8443 recommended): ", 1, 65535) + if runningInUserns { + fmt.Printf(` +We detected that you are running inside an unprivileged container. +This means that unless you manually configured your host otherwise, +you will not have enough uid and gid to allocate to your containers. + +LXD can re-use your container's own allocation to avoid the problem. +Doing so makes your nested containers slightly less safe as they could +in theory attack their parent container and gain more privileges than +they otherwise would. + +`) + if askBool("Would you like to have your containers share their parent's allocation (yes/no) [default=yes]? ", "yes") { + defaultPrivileged = 1 + } else { + defaultPrivileged = 0 + } + } + + if askBool("Would you like LXD to be available over the network (yes/no) [default=no]? ", "no") { + isIPAddress := func(s string) error { + if s != "all" && net.ParseIP(s) == nil { + return fmt.Errorf("'%s' is not an IP address", s) + } + return nil + } + + networkAddress = askString("Address to bind LXD to (not including port) [default=all]: ", "all", isIPAddress) + if networkAddress == "all" { + networkAddress = "::" + } + + if net.ParseIP(networkAddress).To4() == nil { + networkAddress = fmt.Sprintf("[%s]", networkAddress) + } + networkPort = askInt("Port to bind LXD to [default=8443]: ", 1, 65535, "8443") trustPassword = askPassword("Trust password for new clients: ") } + + if !askBool("Would you like stale cached images to be updated automatically (yes/no) [default=yes]? ", "yes") { + imagesAutoUpdate = false + } + + if askBool("Would you like to create a new network bridge (yes/no) [default=yes]? ", "yes") { + bridgeName = askString("What should the new bridge be called [default=lxdbr0]? ", "lxdbr0", networkValidName) + bridgeIPv4 = askString("What IPv4 subnet should be used (CIDR notation, “auto†or “noneâ€) [default=auto]? ", "auto", func(value string) error { + if shared.StringInSlice(value, []string{"auto", "none"}) { + return nil + } + return networkValidAddressCIDRV4(value) + }) + + if !shared.StringInSlice(bridgeIPv4, []string{"auto", "none"}) { + bridgeIPv4Nat = askBool("Would you like LXD to NAT IPv4 traffic on your bridge? [default=yes]? ", "yes") + } + + bridgeIPv6 = askString("What IPv6 subnet should be used (CIDR notation, “auto†or “noneâ€) [default=auto]? ", "auto", func(value string) error { + if shared.StringInSlice(value, []string{"auto", "none"}) { + return nil + } + return networkValidAddressCIDRV6(value) + }) + + if !shared.StringInSlice(bridgeIPv6, []string{"auto", "none"}) { + bridgeIPv6Nat = askBool("Would you like LXD to NAT IPv6 traffic on your bridge? [default=yes]? ", "yes") + } + } } if !shared.StringInSlice(storageBackend, []string{"dir", "zfs"}) { @@ -782,7 +936,7 @@ } // Unset all storage keys, core.https_address and core.trust_password - for _, key := range []string{"core.https_address", "core.trust_password"} { + for _, key := range []string{"storage.zfs_pool_name", "core.https_address", "core.trust_password"} { _, err = c.SetServerConfig(key, "") if err != nil { return err @@ -795,8 +949,6 @@ } if storageBackend == "zfs" { - _ = exec.Command("modprobe", "zfs").Run() - if storageMode == "loop" { storageDevice = shared.VarPath("zfs.img") f, err := os.Create(storageDevice) @@ -804,6 +956,11 @@ return fmt.Errorf("Failed to open %s: %s", storageDevice, err) } + err = f.Chmod(0600) + if err != nil { + return fmt.Errorf("Failed to chmod %s: %s", storageDevice, err) + } + err = f.Truncate(int64(storageLoopSize * 1024 * 1024 * 1024)) if err != nil { return fmt.Errorf("Failed to create sparse file %s: %s", storageDevice, err) @@ -819,7 +976,7 @@ output, err := exec.Command( "zpool", "create", storagePool, storageDevice, - "-f", "-m", "none").CombinedOutput() + "-f", "-m", "none", "-O", "compression=on").CombinedOutput() if err != nil { return fmt.Errorf("Failed to create the ZFS pool: %s", output) } @@ -832,6 +989,35 @@ } } + if defaultPrivileged == 0 { + err = c.SetProfileConfigItem("default", "security.privileged", "") + if err != nil { + return err + } + } else if defaultPrivileged == 1 { + err = c.SetProfileConfigItem("default", "security.privileged", "true") + if err != nil { + } + } + + if imagesAutoUpdate { + ss, err := c.ServerStatus() + if err != nil { + return err + } + if val, ok := ss.Config["images.auto_update_interval"]; ok && val == "0" { + _, err = c.SetServerConfig("images.auto_update_interval", "") + if err != nil { + return err + } + } + } else { + _, err = c.SetServerConfig("images.auto_update_interval", "0") + if err != nil { + return err + } + } + if networkAddress != "" { _, err = c.SetServerConfig("core.https_address", fmt.Sprintf("%s:%d", networkAddress, networkPort)) if err != nil { @@ -846,6 +1032,31 @@ } } + if bridgeName != "" { + bridgeConfig := map[string]string{} + bridgeConfig["ipv4.address"] = bridgeIPv4 + bridgeConfig["ipv6.address"] = bridgeIPv6 + + if bridgeIPv4Nat { + bridgeConfig["ipv4.nat"] = "true" + } + + if bridgeIPv6Nat { + bridgeConfig["ipv6.nat"] = "true" + } + + err = c.NetworkCreate(bridgeName, bridgeConfig) + if err != nil { + return err + } + + props := []string{"nictype=bridged", fmt.Sprintf("parent=%s", bridgeName)} + _, err = c.ProfileDeviceAdd("default", "eth0", "nic", props) + if err != nil { + return err + } + } + fmt.Printf("LXD has been successfully configured.\n") return nil } @@ -983,3 +1194,22 @@ return nil } + +func cmdMigrateDumpSuccess(args []string) error { + if len(args) != 3 { + return fmt.Errorf("bad migrate dump success args %s", args) + } + + c, err := lxd.NewClient(&lxd.DefaultConfig, "local") + if err != nil { + return err + } + + conn, err := c.Websocket(args[1], args[2]) + if err != nil { + return err + } + conn.Close() + + return c.WaitForSuccess(args[1]) +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/migrate.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/migrate.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/migrate.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/migrate.go 2016-10-13 14:31:53.000000000 +0000 @@ -6,18 +6,15 @@ package main import ( - "bufio" "fmt" "io/ioutil" "net/http" "net/url" "os" - "os/exec" - "path" "path/filepath" + "strconv" "strings" "sync" - "time" "github.com/golang/protobuf/proto" "github.com/gorilla/websocket" @@ -71,15 +68,6 @@ return shared.WriteAll(w, data) } -func findCriu(host string) error { - _, err := exec.LookPath("criu") - if err != nil { - return fmt.Errorf("CRIU is required for live migration but its binary couldn't be found on the %s server. Is it installed in LXD's path?", host) - } - - return nil -} - func (c *migrationFields) recv(m proto.Message) error { mt, r, err := c.controlConn.NextReader() if err != nil { @@ -148,7 +136,7 @@ msg := MigrationControl{} err := c.recv(&msg) if err != nil { - shared.Debugf("Got error reading migration control socket %s", err) + shared.LogDebugf("Got error reading migration control socket %s", err) close(ch) return } @@ -158,32 +146,6 @@ return ch } -func CollectCRIULogFile(c container, imagesDir string, function string, method string) error { - t := time.Now().Format(time.RFC3339) - newPath := shared.LogPath(c.Name(), fmt.Sprintf("%s_%s_%s.log", function, method, t)) - return shared.FileCopy(filepath.Join(imagesDir, fmt.Sprintf("%s.log", method)), newPath) -} - -func GetCRIULogErrors(imagesDir string, method string) (string, error) { - f, err := os.Open(path.Join(imagesDir, fmt.Sprintf("%s.log", method))) - if err != nil { - return "", err - } - - defer f.Close() - - scanner := bufio.NewScanner(f) - ret := []string{} - for scanner.Scan() { - line := scanner.Text() - if strings.Contains(line, "Error") { - ret = append(ret, scanner.Text()) - } - } - - return strings.Join(ret, "\n"), nil -} - type migrationSourceWs struct { migrationFields @@ -267,7 +229,63 @@ return nil } -func (s *migrationSourceWs) Do(op *operation) error { +func writeActionScript(directory string, operation string, secret string) error { + script := fmt.Sprintf(`#!/bin/sh -e +if [ "$CRTOOLS_SCRIPT_ACTION" = "post-dump" ]; then + %s migratedumpsuccess %s %s +fi +`, execPath, operation, secret) + + f, err := os.Create(filepath.Join(directory, "action.sh")) + if err != nil { + return err + } + defer f.Close() + + if err := f.Chmod(0500); err != nil { + return err + } + + _, err = f.WriteString(script) + return err +} + +func snapshotToProtobuf(c container) *Snapshot { + config := []*Config{} + for k, v := range c.LocalConfig() { + kCopy := string(k) + vCopy := string(v) + config = append(config, &Config{Key: &kCopy, Value: &vCopy}) + } + + devices := []*Device{} + for name, d := range c.LocalDevices() { + props := []*Config{} + for k, v := range d { + kCopy := string(k) + vCopy := string(v) + props = append(props, &Config{Key: &kCopy, Value: &vCopy}) + } + + devices = append(devices, &Device{Name: &name, Config: props}) + } + + parts := strings.SplitN(c.Name(), shared.SnapshotDelimiter, 2) + isEphemeral := c.IsEphemeral() + arch := int32(c.Architecture()) + stateful := c.IsStateful() + return &Snapshot{ + Name: &parts[len(parts)-1], + LocalConfig: config, + Profiles: c.Profiles(), + Ephemeral: &isEphemeral, + LocalDevices: devices, + Architecture: &arch, + Stateful: &stateful, + } +} + +func (s *migrationSourceWs) Do(migrateOp *operation) error { <-s.allConnected criuType := CRIUType_CRIU_RSYNC.Enum() @@ -303,20 +321,23 @@ /* the protocol says we have to send a header no matter what, so let's * do that, but then immediately send an error. */ - snapshots := []string{} + snapshots := []*Snapshot{} + snapshotNames := []string{} if fsErr == nil { fullSnaps := driver.Snapshots() for _, snap := range fullSnaps { - snapshots = append(snapshots, shared.ExtractSnapshotName(snap.Name())) + snapshots = append(snapshots, snapshotToProtobuf(snap)) + snapshotNames = append(snapshotNames, shared.ExtractSnapshotName(snap.Name())) } } myType := s.container.Storage().MigrationType() header := MigrationHeader{ - Fs: &myType, - Criu: criuType, - Idmap: idmaps, - Snapshots: snapshots, + Fs: &myType, + Criu: criuType, + Idmap: idmaps, + SnapshotNames: snapshotNames, + Snapshots: snapshots, } if err := s.send(&header); err != nil { @@ -341,54 +362,126 @@ driver, _ = rsyncMigrationSource(s.container) } - if err := driver.SendWhileRunning(s.fsConn); err != nil { + // All failure paths need to do a few things to correctly handle errors before returning. + // Unfortunately, handling errors is not well-suited to defer as the code depends on the + // status of driver and the error value. The error value is especially tricky due to the + // common case of creating a new err variable (intentional or not) due to scoping and use + // of ":=". Capturing err in a closure for use in defer would be fragile, which defeats + // the purpose of using defer. An abort function reduces the odds of mishandling errors + // without introducing the fragility of closing on err. + abort := func(err error) error { driver.Cleanup() s.sendControl(err) return err } + if err := driver.SendWhileRunning(s.fsConn); err != nil { + return abort(err) + } + if s.live { if header.Criu == nil { - driver.Cleanup() - err := fmt.Errorf("Got no CRIU socket type for live migration") - s.sendControl(err) - return err + return abort(fmt.Errorf("Got no CRIU socket type for live migration")) } else if *header.Criu != CRIUType_CRIU_RSYNC { - driver.Cleanup() - err := fmt.Errorf("Formats other than criu rsync not understood") - s.sendControl(err) - return err + return abort(fmt.Errorf("Formats other than criu rsync not understood")) } checkpointDir, err := ioutil.TempDir("", "lxd_checkpoint_") if err != nil { - driver.Cleanup() - s.sendControl(err) - return err + return abort(err) } defer os.RemoveAll(checkpointDir) - opts := lxc.CheckpointOptions{Stop: true, Directory: checkpointDir, Verbose: true} - err = s.container.Checkpoint(opts) + if lxc.VersionAtLeast(2, 0, 4) { + /* What happens below is slightly convoluted. Due to various + * complications with networking, there's no easy way for criu + * to exit and leave the container in a frozen state for us to + * somehow resume later. + * + * Instead, we use what criu calls an "action-script", which is + * basically a callback that lets us know when the dump is + * done. (Unfortunately, we can't pass arguments, just an + * executable path, so we write a custom action script with the + * real command we want to run.) + * + * This script then hangs until the migration operation either + * finishes successfully or fails, and exits 1 or 0, which + * causes criu to either leave the container running or kill it + * as we asked. + */ + dumpDone := make(chan bool, 1) + actionScriptOpSecret, err := shared.RandomCryptoString() + if err != nil { + return abort(err) + } - if err2 := CollectCRIULogFile(s.container, checkpointDir, "migration", "dump"); err2 != nil { - shared.Debugf("Error collecting checkpoint log file %s", err) - } + actionScriptOp, err := operationCreate( + operationClassWebsocket, + nil, + nil, + func(op *operation) error { + _, err := migrateOp.WaitFinal(-1) + if err != nil { + return err + } + + if migrateOp.status != shared.Success { + return fmt.Errorf("restore failed: %s", op.status.String()) + } + return nil + }, + nil, + func(op *operation, r *http.Request, w http.ResponseWriter) error { + secret := r.FormValue("secret") + if secret == "" { + return fmt.Errorf("missing secret") + } + + if secret != actionScriptOpSecret { + return os.ErrPermission + } + + c, err := shared.WebsocketUpgrader.Upgrade(w, r, nil) + if err != nil { + return err + } + + dumpDone <- true + + closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") + return c.WriteMessage(websocket.CloseMessage, closeMsg) + }, + ) + if err != nil { + return abort(err) + } - if err != nil { - driver.Cleanup() - log, err2 := GetCRIULogErrors(checkpointDir, "dump") + if err := writeActionScript(checkpointDir, actionScriptOp.url, actionScriptOpSecret); err != nil { + return abort(err) + } - /* couldn't find the CRIU log file which means we - * didn't even get that far; give back the liblxc - * error. */ - if err2 != nil { - log = err.Error() + _, err = actionScriptOp.Run() + if err != nil { + return abort(err) } - err = fmt.Errorf("checkpoint failed:\n%s", log) - s.sendControl(err) - return err + migrateDone := make(chan error, 1) + go func() { + migrateDone <- s.container.Migrate(lxc.MIGRATE_DUMP, checkpointDir, "migration", true, true) + }() + + select { + /* the checkpoint failed, let's just abort */ + case err = <-migrateDone: + return abort(err) + /* the dump finished, let's continue on to the restore */ + case <-dumpDone: + shared.LogDebugf("Dump finished, continuing with restore...") + } + } else { + if err := s.container.Migrate(lxc.MIGRATE_DUMP, checkpointDir, "migration", true, false); err != nil { + return abort(err) + } } /* @@ -399,15 +492,11 @@ * p.haul's protocol, it will make sense to do these in parallel. */ if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn); err != nil { - driver.Cleanup() - s.sendControl(err) - return err + return abort(err) } if err := driver.SendAfterCheckpoint(s.fsConn); err != nil { - driver.Cleanup() - s.sendControl(err) - return err + return abort(err) } } @@ -419,8 +508,6 @@ return err } - // TODO: should we add some config here about automatically restarting - // the container migrate failure? What about the failures above? if !*msg.Success { return fmt.Errorf(*msg.Message) } @@ -429,10 +516,17 @@ } type migrationSink struct { - migrationFields + // We are pulling the container from src in pull mode. + src migrationFields + // The container is pushed from src to dest in push mode. Note that + // websocket connections are not set in push mode. Only the secret + // fields are used since the client will connect to the sockets. + dest migrationFields - url string - dialer websocket.Dialer + url string + dialer websocket.Dialer + allConnected chan bool + push bool } type MigrationSinkArgs struct { @@ -440,34 +534,65 @@ Dialer websocket.Dialer Container container Secrets map[string]string + Push bool + Live bool } -func NewMigrationSink(args *MigrationSinkArgs) (func() error, error) { +func NewMigrationSink(args *MigrationSinkArgs) (*migrationSink, error) { sink := migrationSink{ - migrationFields{container: args.Container}, - args.Url, - args.Dialer, + src: migrationFields{container: args.Container}, + url: args.Url, + dialer: args.Dialer, + push: args.Push, } - var ok bool - sink.controlSecret, ok = args.Secrets["control"] - if !ok { - return nil, fmt.Errorf("Missing control secret") + if sink.push { + sink.allConnected = make(chan bool, 1) } - sink.fsSecret, ok = args.Secrets["fs"] - if !ok { - return nil, fmt.Errorf("Missing fs secret") - } + var ok bool + var err error + if sink.push { + sink.dest.controlSecret, err = shared.RandomCryptoString() + if err != nil { + return nil, err + } - sink.criuSecret, ok = args.Secrets["criu"] - sink.live = ok + sink.dest.fsSecret, err = shared.RandomCryptoString() + if err != nil { + return nil, err + } - if err := findCriu("destination"); sink.live && err != nil { + sink.dest.live = args.Live + if sink.dest.live { + sink.dest.criuSecret, err = shared.RandomCryptoString() + if err != nil { + return nil, err + } + } + } else { + sink.src.controlSecret, ok = args.Secrets["control"] + if !ok { + return nil, fmt.Errorf("Missing control secret") + } + + sink.src.fsSecret, ok = args.Secrets["fs"] + if !ok { + return nil, fmt.Errorf("Missing fs secret") + } + + sink.src.criuSecret, ok = args.Secrets["criu"] + sink.src.live = ok + } + + err = findCriu("destination") + if sink.push && sink.dest.live && err != nil { + return nil, err + } else if sink.src.live && err != nil { return nil, err } - return sink.do, nil + return &sink, nil } func (c *migrationSink) connectWithSecret(secret string) (*websocket.Conn, error) { @@ -479,41 +604,123 @@ return lxd.WebsocketDial(c.dialer, wsUrl) } -func (c *migrationSink) do() error { - var err error - c.controlConn, err = c.connectWithSecret(c.controlSecret) - if err != nil { - return err +func (s *migrationSink) Metadata() interface{} { + secrets := shared.Jmap{ + "control": s.dest.controlSecret, + "fs": s.dest.fsSecret, + } + + if s.dest.criuSecret != "" { + secrets["criu"] = s.dest.criuSecret + } + + return secrets +} + +func (s *migrationSink) Connect(op *operation, r *http.Request, w http.ResponseWriter) error { + secret := r.FormValue("secret") + if secret == "" { + return fmt.Errorf("missing secret") } - defer c.disconnect() - c.fsConn, err = c.connectWithSecret(c.fsSecret) + var conn **websocket.Conn + + switch secret { + case s.dest.controlSecret: + conn = &s.dest.controlConn + case s.dest.criuSecret: + conn = &s.dest.criuConn + case s.dest.fsSecret: + conn = &s.dest.fsConn + default: + /* If we didn't find the right secret, the user provided a bad one, + * which 403, not 404, since this operation actually exists */ + return os.ErrPermission + } + + c, err := shared.WebsocketUpgrader.Upgrade(w, r, nil) if err != nil { - c.sendControl(err) return err } - if c.live { - c.criuConn, err = c.connectWithSecret(c.criuSecret) + *conn = c + + if s.dest.controlConn != nil && (!s.dest.live || s.dest.criuConn != nil) && s.dest.fsConn != nil { + s.allConnected <- true + } + + return nil +} + +func (c *migrationSink) Do(migrateOp *operation) error { + var err error + + if c.push { + <-c.allConnected + } + + disconnector := c.src.disconnect + if c.push { + disconnector = c.dest.disconnect + } + + if c.push { + defer disconnector() + } else { + c.src.controlConn, err = c.connectWithSecret(c.src.controlSecret) + if err != nil { + return err + } + defer c.src.disconnect() + + c.src.fsConn, err = c.connectWithSecret(c.src.fsSecret) if err != nil { - c.sendControl(err) + c.src.sendControl(err) return err } + + if c.src.live { + c.src.criuConn, err = c.connectWithSecret(c.src.criuSecret) + if err != nil { + c.src.sendControl(err) + return err + } + } + } + + receiver := c.src.recv + if c.push { + receiver = c.dest.recv + } + + sender := c.src.send + if c.push { + sender = c.dest.send + } + + controller := c.src.sendControl + if c.push { + controller = c.dest.sendControl } header := MigrationHeader{} - if err := c.recv(&header); err != nil { - c.sendControl(err) + if err := receiver(&header); err != nil { + controller(err) return err } + live := c.src.live + if c.push { + live = c.dest.live + } + criuType := CRIUType_CRIU_RSYNC.Enum() - if !c.live { + if !live { criuType = nil } - mySink := c.container.Storage().MigrationSink - myType := c.container.Storage().MigrationType() + mySink := c.src.container.Storage().MigrationSink + myType := c.src.container.Storage().MigrationType() resp := MigrationHeader{ Fs: &myType, Criu: criuType, @@ -527,8 +734,8 @@ resp.Fs = &myType } - if err := c.send(&resp); err != nil { - c.sendControl(err) + if err := sender(&resp); err != nil { + controller(err) return err } @@ -537,32 +744,6 @@ imagesDir := "" srcIdmap := new(shared.IdmapSet) - snapshots := []container{} - for _, snap := range header.Snapshots { - // TODO: we need to propagate snapshot configurations - // as well. Right now the container configuration is - // done through the initial migration post. Should we - // post the snapshots and their configs as well, or do - // it some other way? - name := c.container.Name() + shared.SnapshotDelimiter + snap - args := containerArgs{ - Ctype: cTypeSnapshot, - Config: c.container.LocalConfig(), - Profiles: c.container.Profiles(), - Ephemeral: c.container.IsEphemeral(), - Architecture: c.container.Architecture(), - Devices: c.container.LocalDevices(), - Name: name, - } - - ct, err := containerCreateEmptySnapshot(c.container.Daemon(), args) - if err != nil { - restore <- err - return - } - snapshots = append(snapshots, ct) - } - for _, idmap := range header.Idmap { e := shared.IdmapEntry{ Isuid: *idmap.Isuid, @@ -581,12 +762,34 @@ */ fsTransfer := make(chan error) go func() { - if err := mySink(c.live, c.container, snapshots, c.fsConn); err != nil { + snapshots := []*Snapshot{} + + /* Legacy: we only sent the snapshot names, so we just + * copy the container's config over, same as we used to + * do. + */ + if len(header.SnapshotNames) != len(header.Snapshots) { + for _, name := range header.SnapshotNames { + base := snapshotToProtobuf(c.src.container) + base.Name = &name + snapshots = append(snapshots, base) + } + } else { + snapshots = header.Snapshots + } + + var fsConn *websocket.Conn + if c.push { + fsConn = c.dest.fsConn + } else { + fsConn = c.src.fsConn + } + if err := mySink(live, c.src.container, header.Snapshots, fsConn, srcIdmap); err != nil { fsTransfer <- err return } - if err := ShiftIfNecessary(c.container, srcIdmap); err != nil { + if err := ShiftIfNecessary(c.src.container, srcIdmap); err != nil { fsTransfer <- err return } @@ -594,44 +797,26 @@ fsTransfer <- nil }() - if c.live { + if live { var err error imagesDir, err = ioutil.TempDir("", "lxd_restore_") if err != nil { - os.RemoveAll(imagesDir) + restore <- err return } - defer func() { - err := CollectCRIULogFile(c.container, imagesDir, "migration", "restore") - /* - * If the checkpoint fails, we won't have any log to collect, - * so don't warn about that. - */ - if err != nil && !os.IsNotExist(err) { - shared.Debugf("Error collectiong migration log file %s", err) - } + defer os.RemoveAll(imagesDir) - os.RemoveAll(imagesDir) - }() - - if err := RsyncRecv(shared.AddSlash(imagesDir), c.criuConn); err != nil { + var criuConn *websocket.Conn + if c.push { + criuConn = c.dest.criuConn + } else { + criuConn = c.src.criuConn + } + if err := RsyncRecv(shared.AddSlash(imagesDir), criuConn); err != nil { restore <- err return } - - /* - * For unprivileged containers we need to shift the - * perms on the images images so that they can be - * opened by the process after it is in its user - * namespace. - */ - if !c.container.IsPrivileged() { - if err := c.container.IdmapSet().ShiftRootfs(imagesDir); err != nil { - restore <- err - return - } - } } err := <-fsTransfer @@ -640,52 +825,43 @@ return } - if c.live { - err := c.container.StartFromMigration(imagesDir) + if live { + err = c.src.container.Migrate(lxc.MIGRATE_RESTORE, imagesDir, "migration", false, false) if err != nil { - log, err2 := GetCRIULogErrors(imagesDir, "restore") - /* restore failed before CRIU was invoked, give - * back the liblxc error */ - if err2 != nil { - log = err.Error() - } - err = fmt.Errorf("restore failed:\n%s", log) restore <- err return } } - for _, snap := range snapshots { - if err := ShiftIfNecessary(snap, srcIdmap); err != nil { - restore <- err - return - } - } - restore <- nil }(c) - source := c.controlChannel() + var source <-chan MigrationControl + if c.push { + source = c.dest.controlChannel() + } else { + source = c.src.controlChannel() + } for { select { case err = <-restore: - c.sendControl(err) + controller(err) return err case msg, ok := <-source: if !ok { - c.disconnect() + disconnector() return fmt.Errorf("Got error reading source") } if !*msg.Success { - c.disconnect() + disconnector() return fmt.Errorf(*msg.Message) } else { // The source can only tell us it failed (e.g. if // checkpointing failed). We have to tell the source // whether or not the restore was successful. - shared.Debugf("Unknown message %v from source", msg) + shared.LogDebugf("Unknown message %v from source", msg) } } } @@ -694,7 +870,7 @@ /* * Similar to forkstart, this is called when lxd is invoked as: * - * lxd forkmigrate + * lxd forkmigrate * * liblxc's restore() sets up the processes in such a way that the monitor ends * up being a child of the process that calls it, in our case lxd. However, we @@ -703,7 +879,7 @@ * footprint when we fork tasks that will never free golang's memory, etc.) */ func MigrateContainer(args []string) error { - if len(args) != 5 { + if len(args) != 6 { return fmt.Errorf("Bad arguments %q", args) } @@ -711,8 +887,7 @@ lxcpath := args[2] configPath := args[3] imagesDir := args[4] - - defer os.Remove(configPath) + preservesInodes, err := strconv.ParseBool(args[5]) c, err := lxc.NewContainer(name, lxcpath) if err != nil { @@ -728,8 +903,9 @@ os.Stdout.Close() os.Stderr.Close() - return c.Restore(lxc.RestoreOptions{ - Directory: imagesDir, - Verbose: true, + return c.Migrate(lxc.MIGRATE_RESTORE, lxc.MigrateOptions{ + Directory: imagesDir, + Verbose: true, + PreservesInodes: preservesInodes, }) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/migrate.pb.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/migrate.pb.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/migrate.pb.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/migrate.pb.go 2016-10-13 14:31:53.000000000 +0000 @@ -10,6 +10,9 @@ It has these top-level messages: IDMapType + Config + Device + Snapshot MigrationHeader MigrationControl */ @@ -139,11 +142,124 @@ return 0 } +type Config struct { + Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` + Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Config) Reset() { *m = Config{} } +func (m *Config) String() string { return proto.CompactTextString(m) } +func (*Config) ProtoMessage() {} + +func (m *Config) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Config) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Device struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Config []*Config `protobuf:"bytes,2,rep,name=config" json:"config,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Device) Reset() { *m = Device{} } +func (m *Device) String() string { return proto.CompactTextString(m) } +func (*Device) ProtoMessage() {} + +func (m *Device) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Device) GetConfig() []*Config { + if m != nil { + return m.Config + } + return nil +} + +type Snapshot struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + LocalConfig []*Config `protobuf:"bytes,2,rep,name=localConfig" json:"localConfig,omitempty"` + Profiles []string `protobuf:"bytes,3,rep,name=profiles" json:"profiles,omitempty"` + Ephemeral *bool `protobuf:"varint,4,req,name=ephemeral" json:"ephemeral,omitempty"` + LocalDevices []*Device `protobuf:"bytes,5,rep,name=localDevices" json:"localDevices,omitempty"` + Architecture *int32 `protobuf:"varint,6,req,name=architecture" json:"architecture,omitempty"` + Stateful *bool `protobuf:"varint,7,req,name=stateful" json:"stateful,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} + +func (m *Snapshot) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Snapshot) GetLocalConfig() []*Config { + if m != nil { + return m.LocalConfig + } + return nil +} + +func (m *Snapshot) GetProfiles() []string { + if m != nil { + return m.Profiles + } + return nil +} + +func (m *Snapshot) GetEphemeral() bool { + if m != nil && m.Ephemeral != nil { + return *m.Ephemeral + } + return false +} + +func (m *Snapshot) GetLocalDevices() []*Device { + if m != nil { + return m.LocalDevices + } + return nil +} + +func (m *Snapshot) GetArchitecture() int32 { + if m != nil && m.Architecture != nil { + return *m.Architecture + } + return 0 +} + +func (m *Snapshot) GetStateful() bool { + if m != nil && m.Stateful != nil { + return *m.Stateful + } + return false +} + type MigrationHeader struct { Fs *MigrationFSType `protobuf:"varint,1,req,name=fs,enum=main.MigrationFSType" json:"fs,omitempty"` Criu *CRIUType `protobuf:"varint,2,opt,name=criu,enum=main.CRIUType" json:"criu,omitempty"` Idmap []*IDMapType `protobuf:"bytes,3,rep,name=idmap" json:"idmap,omitempty"` - Snapshots []string `protobuf:"bytes,4,rep,name=snapshots" json:"snapshots,omitempty"` + SnapshotNames []string `protobuf:"bytes,4,rep,name=snapshotNames" json:"snapshotNames,omitempty"` + Snapshots []*Snapshot `protobuf:"bytes,5,rep,name=snapshots" json:"snapshots,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -172,7 +288,14 @@ return nil } -func (m *MigrationHeader) GetSnapshots() []string { +func (m *MigrationHeader) GetSnapshotNames() []string { + if m != nil { + return m.SnapshotNames + } + return nil +} + +func (m *MigrationHeader) GetSnapshots() []*Snapshot { if m != nil { return m.Snapshots } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/migrate.proto juju-core-2.0.0/src/github.com/lxc/lxd/lxd/migrate.proto --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/migrate.proto 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/migrate.proto 2016-10-13 14:31:53.000000000 +0000 @@ -1,35 +1,55 @@ package main; enum MigrationFSType { - RSYNC = 0; - BTRFS = 1; - ZFS = 2; + RSYNC = 0; + BTRFS = 1; + ZFS = 2; } enum CRIUType { - CRIU_RSYNC = 0; - PHAUL = 1; + CRIU_RSYNC = 0; + PHAUL = 1; } message IDMapType { - required bool isuid = 1; - required bool isgid = 2; - required int32 hostid = 3; - required int32 nsid = 4; - required int32 maprange = 5; + required bool isuid = 1; + required bool isgid = 2; + required int32 hostid = 3; + required int32 nsid = 4; + required int32 maprange = 5; } -message MigrationHeader { - required MigrationFSType fs = 1; - optional CRIUType criu = 2; - repeated IDMapType idmap = 3; +message Config { + required string key = 1; + required string value = 2; +} + +message Device { + required string name = 1; + repeated Config config = 2; +} - repeated string snapshots = 4; +message Snapshot { + required string name = 1; + repeated Config localConfig = 2; + repeated string profiles = 3; + required bool ephemeral = 4; + repeated Device localDevices = 5; + required int32 architecture = 6; + required bool stateful = 7; +} + +message MigrationHeader { + required MigrationFSType fs = 1; + optional CRIUType criu = 2; + repeated IDMapType idmap = 3; + repeated string snapshotNames = 4; + repeated Snapshot snapshots = 5; } message MigrationControl { - required bool success = 1; + required bool success = 1; - /* optional failure message if sending a failure */ - optional string message = 2; + /* optional failure message if sending a failure */ + optional string message = 2; } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/networks_config.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/networks_config.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/networks_config.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/networks_config.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,209 @@ +package main + +import ( + "fmt" + "strconv" + "strings" + + "github.com/lxc/lxd/shared" +) + +var networkConfigKeys = map[string]func(value string) error{ + "bridge.driver": func(value string) error { + return shared.IsOneOf(value, []string{"native", "openvswitch"}) + }, + "bridge.external_interfaces": func(value string) error { + if value == "" { + return nil + } + + for _, entry := range strings.Split(value, ",") { + entry = strings.TrimSpace(entry) + if networkValidName(entry) != nil { + return fmt.Errorf("Invalid interface name '%s'", entry) + } + } + + return nil + }, + "bridge.mtu": shared.IsInt64, + "bridge.mode": func(value string) error { + return shared.IsOneOf(value, []string{"standard", "fan"}) + }, + + "fan.overlay_subnet": networkValidNetworkV4, + "fan.underlay_subnet": func(value string) error { + if value == "auto" { + return nil + } + + return networkValidNetworkV4(value) + }, + "fan.type": func(value string) error { + return shared.IsOneOf(value, []string{"vxlan", "ipip"}) + }, + + "tunnel.TARGET.protocol": func(value string) error { + return shared.IsOneOf(value, []string{"gre", "vxlan"}) + }, + "tunnel.TARGET.local": networkValidAddressV4, + "tunnel.TARGET.remote": networkValidAddressV4, + "tunnel.TARGET.port": networkValidPort, + "tunnel.TARGET.group": networkValidAddressV4, + "tunnel.TARGET.id": shared.IsInt64, + + "ipv4.address": func(value string) error { + if shared.IsOneOf(value, []string{"none", "auto"}) == nil { + return nil + } + + return networkValidAddressCIDRV4(value) + }, + "ipv4.nat": shared.IsBool, + "ipv4.dhcp": shared.IsBool, + "ipv4.dhcp.ranges": shared.IsAny, + "ipv4.routing": shared.IsBool, + + "ipv6.address": func(value string) error { + if shared.IsOneOf(value, []string{"none", "auto"}) == nil { + return nil + } + + return networkValidAddressCIDRV6(value) + }, + "ipv6.nat": shared.IsBool, + "ipv6.dhcp": shared.IsBool, + "ipv6.dhcp.stateful": shared.IsBool, + "ipv6.dhcp.ranges": shared.IsAny, + "ipv6.routing": shared.IsBool, + + "dns.domain": shared.IsAny, + "dns.mode": func(value string) error { + return shared.IsOneOf(value, []string{"dynamic", "managed", "none"}) + }, + + "raw.dnsmasq": shared.IsAny, +} + +func networkValidateConfig(name string, config map[string]string) error { + bridgeMode := config["bridge.mode"] + + if bridgeMode == "fan" && len(name) > 11 { + return fmt.Errorf("Network name too long to use with the FAN (must be 11 characters or less)") + } + + for k, v := range config { + key := k + + // User keys are free for all + if strings.HasPrefix(key, "user.") { + continue + } + + // Tunnel keys have the remote name in their name, so extract the real key + if strings.HasPrefix(key, "tunnel.") { + fields := strings.Split(key, ".") + if len(fields) != 3 { + return fmt.Errorf("Invalid network configuration key: %s", k) + } + + if len(name)+len(fields[1]) > 14 { + return fmt.Errorf("Network name too long for tunnel interface: %s-%s", name, fields[1]) + } + + key = fmt.Sprintf("tunnel.TARGET.%s", fields[2]) + } + + // Then validate + validator, ok := networkConfigKeys[key] + if !ok { + return fmt.Errorf("Invalid network configuration key: %s", k) + } + + err := validator(v) + if err != nil { + return err + } + + // Bridge mode checks + if bridgeMode == "fan" && strings.HasPrefix(key, "ipv4.") && v != "" { + return fmt.Errorf("IPv4 configuration may not be set when in 'fan' mode") + } + + if bridgeMode == "fan" && strings.HasPrefix(key, "ipv6.") && v != "" { + return fmt.Errorf("IPv6 configuration may not be set when in 'fan' mode") + } + + if bridgeMode != "fan" && strings.HasPrefix(key, "fan.") && v != "" { + return fmt.Errorf("FAN configuration may only be set when in 'fan' mode") + } + + // MTU checks + if key == "bridge.mtu" && v != "" { + mtu, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("Invalid value for an integer: %s", v) + } + + ipv6 := config["ipv6.address"] + if ipv6 != "" && ipv6 != "none" && mtu < 1280 { + return fmt.Errorf("The minimum MTU for an IPv6 network is 1280") + } + + ipv4 := config["ipv4.address"] + if ipv4 != "" && ipv4 != "none" && mtu < 68 { + return fmt.Errorf("The minimum MTU for an IPv4 network is 68") + } + + if config["bridge.mode"] == "fan" { + if config["fan.type"] == "ipip" { + if mtu > 1480 { + return fmt.Errorf("Maximum MTU for an IPIP FAN bridge is 1480") + } + } else { + if mtu > 1450 { + return fmt.Errorf("Maximum MTU for a VXLAN FAN bridge is 1450") + } + } + } + + tunnels := networkGetTunnels(config) + if len(tunnels) > 0 && mtu > 1400 { + return fmt.Errorf("Maximum MTU when using tunnels is 1400") + } + } + } + + return nil +} + +func networkFillAuto(config map[string]string) error { + if config["ipv4.address"] == "auto" { + subnet, err := networkRandomSubnetV4() + if err != nil { + return err + } + + config["ipv4.address"] = subnet + } + + if config["ipv6.address"] == "auto" { + subnet, err := networkRandomSubnetV6() + if err != nil { + return err + } + + config["ipv6.address"] = subnet + } + + if config["fan.underlay_subnet"] == "auto" { + subnet, _, err := networkDefaultGatewaySubnetV4() + if err != nil { + return err + } + + config["fan.underlay_subnet"] = subnet.String() + } + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/networks.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/networks.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/networks.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/networks.go 2016-10-13 14:31:53.000000000 +0000 @@ -1,16 +1,24 @@ package main import ( + "encoding/binary" + "encoding/json" "fmt" + "io/ioutil" "net" "net/http" + "os" + "os/exec" "strconv" + "strings" "github.com/gorilla/mux" + log "gopkg.in/inconshreveable/log15.v2" "github.com/lxc/lxd/shared" ) +// API endpoints func networksGet(d *Daemon, r *http.Request) Response { recursionStr := r.FormValue("recursion") recursion, err := strconv.Atoi(recursionStr) @@ -18,23 +26,22 @@ recursion = 0 } - ifs, err := net.Interfaces() + ifs, err := networkGetInterfaces(d) if err != nil { return InternalError(err) } resultString := []string{} - resultMap := []network{} + resultMap := []shared.NetworkConfig{} for _, iface := range ifs { if recursion == 0 { - resultString = append(resultString, fmt.Sprintf("/%s/networks/%s", shared.APIVersion, iface.Name)) + resultString = append(resultString, fmt.Sprintf("/%s/networks/%s", shared.APIVersion, iface)) } else { - net, err := doNetworkGet(d, iface.Name) + net, err := doNetworkGet(d, iface) if err != nil { continue } resultMap = append(resultMap, net) - } } @@ -45,87 +52,1246 @@ return SyncResponse(true, resultMap) } -var networksCmd = Command{name: "networks", get: networksGet} +func networksPost(d *Daemon, r *http.Request) Response { + req := shared.NetworkConfig{} -type network struct { - Name string `json:"name"` - Type string `json:"type"` - UsedBy []string `json:"used_by"` -} + // Parse the request + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return BadRequest(err) + } -func isOnBridge(c container, bridge string) bool { - for _, device := range c.ExpandedDevices() { - if device["type"] != "nic" { - continue - } + // Sanity checks + if req.Name == "" { + return BadRequest(fmt.Errorf("No name provided")) + } - if !shared.StringInSlice(device["nictype"], []string{"bridged", "macvlan"}) { - continue - } + err = networkValidName(req.Name) + if err != nil { + return BadRequest(err) + } + + if req.Type != "" && req.Type != "bridge" { + return BadRequest(fmt.Errorf("Only 'bridge' type networks can be created")) + } + + networks, err := networkGetInterfaces(d) + if err != nil { + return InternalError(err) + } - if device["parent"] == "" { - continue + if shared.StringInSlice(req.Name, networks) { + return BadRequest(fmt.Errorf("The network already exists")) + } + + if req.Config == nil { + req.Config = map[string]string{} + } + + err = networkValidateConfig(req.Name, req.Config) + if err != nil { + return BadRequest(err) + } + + // Set some default values where needed + if req.Config["bridge.mode"] == "fan" { + if req.Config["fan.underlay_subnet"] == "" { + req.Config["fan.underlay_subnet"] = "auto" + } + } else { + if req.Config["ipv4.address"] == "" { + req.Config["ipv4.address"] = "auto" + } + if req.Config["ipv4.address"] == "auto" && req.Config["ipv4.nat"] == "" { + req.Config["ipv4.nat"] = "true" } - if device["parent"] == bridge { - return true + if req.Config["ipv6.address"] == "" { + content, err := ioutil.ReadFile("/proc/sys/net/ipv6/conf/default/disable_ipv6") + if err == nil && string(content) == "0\n" { + req.Config["ipv6.address"] = "auto" + } + } + if req.Config["ipv6.address"] == "auto" && req.Config["ipv6.nat"] == "" { + req.Config["ipv6.nat"] = "true" } } - return false + // Replace "auto" by actual values + err = networkFillAuto(req.Config) + if err != nil { + return InternalError(err) + } + + // Create the database entry + _, err = dbNetworkCreate(d.db, req.Name, req.Config) + if err != nil { + return InternalError( + fmt.Errorf("Error inserting %s into database: %s", req.Name, err)) + } + + // Start the network + n, err := networkLoadByName(d, req.Name) + if err != nil { + return InternalError(err) + } + + err = n.Start() + if err != nil { + n.Delete() + return InternalError(err) + } + + return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/networks/%s", shared.APIVersion, req.Name)) } +var networksCmd = Command{name: "networks", get: networksGet, post: networksPost} + func networkGet(d *Daemon, r *http.Request) Response { name := mux.Vars(r)["name"] n, err := doNetworkGet(d, name) if err != nil { - return InternalError(err) + return SmartError(err) } - return SyncResponse(true, &n) + etag := []interface{}{n.Name, n.Managed, n.Type, n.Config} + + return SyncResponseETag(true, &n, etag) } -func doNetworkGet(d *Daemon, name string) (network, error) { - iface, err := net.InterfaceByName(name) - if err != nil { - return network{}, err +func doNetworkGet(d *Daemon, name string) (shared.NetworkConfig, error) { + // Get some information + osInfo, _ := net.InterfaceByName(name) + _, dbInfo, _ := dbNetworkGet(d.db, name) + + // Sanity check + if osInfo == nil && dbInfo == nil { + return shared.NetworkConfig{}, os.ErrNotExist } // Prepare the response - n := network{} - n.Name = iface.Name + n := shared.NetworkConfig{} + n.Name = name n.UsedBy = []string{} + n.Config = map[string]string{} // Look for containers using the interface cts, err := dbContainersList(d.db, cTypeRegular) if err != nil { - return network{}, err + return shared.NetworkConfig{}, err } for _, ct := range cts { c, err := containerLoadByName(d, ct) if err != nil { - return network{}, err + return shared.NetworkConfig{}, err } - if isOnBridge(c, n.Name) { + if networkIsInUse(c, n.Name) { n.UsedBy = append(n.UsedBy, fmt.Sprintf("/%s/containers/%s", shared.APIVersion, ct)) } } // Set the device type as needed - if shared.IsLoopback(iface) { + if osInfo != nil && shared.IsLoopback(osInfo) { n.Type = "loopback" - } else if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/bridge", n.Name)) { + } else if dbInfo != nil || shared.PathExists(fmt.Sprintf("/sys/class/net/%s/bridge", n.Name)) { + if dbInfo != nil { + n.Managed = true + n.Config = dbInfo.Config + } + n.Type = "bridge" } else if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/device", n.Name)) { n.Type = "physical" + } else if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/bonding", n.Name)) { + n.Type = "bond" } else { - n.Type = "unknown" + _, err := exec.Command("ovs-vsctl", "br-exists", n.Name).CombinedOutput() + if err == nil { + n.Type = "bridge" + } else { + n.Type = "unknown" + } } return n, nil } -var networkCmd = Command{name: "networks/{name}", get: networkGet} +func networkDelete(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + // Get the existing network + n, err := networkLoadByName(d, name) + if err != nil { + return NotFound + } + + // Attempt to delete the network + err = n.Delete() + if err != nil { + return SmartError(err) + } + + // Cleanup storage + if shared.PathExists(shared.VarPath("networks", n.name)) { + os.RemoveAll(shared.VarPath("networks", n.name)) + } + + return EmptySyncResponse +} + +func networkPost(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + req := shared.NetworkConfig{} + + // Parse the request + err := json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return BadRequest(err) + } + + // Get the existing network + n, err := networkLoadByName(d, name) + if err != nil { + return NotFound + } + + // Sanity checks + if req.Name == "" { + return BadRequest(fmt.Errorf("No name provided")) + } + + err = networkValidName(req.Name) + if err != nil { + return BadRequest(err) + } + + // Check that the name isn't already in use + networks, err := networkGetInterfaces(d) + if err != nil { + return InternalError(err) + } + + if shared.StringInSlice(req.Name, networks) { + return Conflict + } + + // Rename it + err = n.Rename(req.Name) + if err != nil { + return SmartError(err) + } + + return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/networks/%s", shared.APIVersion, req.Name)) +} + +func networkPut(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + // Get the existing network + _, dbInfo, err := dbNetworkGet(d.db, name) + if err != nil { + return SmartError(err) + } + + // Validate the ETag + etag := []interface{}{dbInfo.Name, dbInfo.Managed, dbInfo.Type, dbInfo.Config} + + err = etagCheck(r, etag) + if err != nil { + return PreconditionFailed(err) + } + + req := shared.NetworkConfig{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + return doNetworkUpdate(d, name, dbInfo.Config, req.Config) +} + +func networkPatch(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + // Get the existing network + _, dbInfo, err := dbNetworkGet(d.db, name) + if dbInfo != nil { + return SmartError(err) + } + + // Validate the ETag + etag := []interface{}{dbInfo.Name, dbInfo.Managed, dbInfo.Type, dbInfo.Config} + + err = etagCheck(r, etag) + if err != nil { + return PreconditionFailed(err) + } + + req := shared.NetworkConfig{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + // Config stacking + if req.Config == nil { + req.Config = map[string]string{} + } + + for k, v := range dbInfo.Config { + _, ok := req.Config[k] + if !ok { + req.Config[k] = v + } + } + + return doNetworkUpdate(d, name, dbInfo.Config, req.Config) +} + +func doNetworkUpdate(d *Daemon, name string, oldConfig map[string]string, newConfig map[string]string) Response { + // Validate the configuration + err := networkValidateConfig(name, newConfig) + if err != nil { + return BadRequest(err) + } + + // When switching to a fan bridge, auto-detect the underlay + if newConfig["bridge.mode"] == "fan" { + if newConfig["fan.underlay_subnet"] == "" { + newConfig["fan.underlay_subnet"] = "auto" + } + } + + // Load the network + n, err := networkLoadByName(d, name) + if err != nil { + return NotFound + } + + err = n.Update(shared.NetworkConfig{Config: newConfig}) + if err != nil { + return SmartError(err) + } + + return EmptySyncResponse +} + +var networkCmd = Command{name: "networks/{name}", get: networkGet, delete: networkDelete, post: networkPost, put: networkPut, patch: networkPatch} + +// The network structs and functions +func networkLoadByName(d *Daemon, name string) (*network, error) { + id, dbInfo, err := dbNetworkGet(d.db, name) + if err != nil { + return nil, err + } + + n := network{daemon: d, id: id, name: name, config: dbInfo.Config} + + return &n, nil +} + +func networkStartup(d *Daemon) error { + // Get a list of managed networks + networks, err := dbNetworks(d.db) + if err != nil { + return err + } + + // Bring them all up + for _, name := range networks { + n, err := networkLoadByName(d, name) + if err != nil { + return err + } + + err = n.Start() + if err != nil { + // Don't cause LXD to fail to start entirely on network bring up failure + shared.LogError("Failed to bring up network", log.Ctx{"err": err, "name": name}) + } + } + + return nil +} + +type network struct { + // Properties + daemon *Daemon + id int64 + name string + + // config + config map[string]string +} + +func (n *network) Config() map[string]string { + return n.config +} + +func (n *network) IsRunning() bool { + return shared.PathExists(fmt.Sprintf("/sys/class/net/%s", n.name)) +} + +func (n *network) IsUsed() bool { + // Look for containers using the interface + cts, err := dbContainersList(n.daemon.db, cTypeRegular) + if err != nil { + return true + } + + for _, ct := range cts { + c, err := containerLoadByName(n.daemon, ct) + if err != nil { + return true + } + + if networkIsInUse(c, n.name) { + return true + } + } + + return false +} + +func (n *network) Delete() error { + // Sanity checks + if n.IsUsed() { + return fmt.Errorf("The network is currently in use") + } + + // Bring the network down + if n.IsRunning() { + err := n.Stop() + if err != nil { + return err + } + } + + // Remove the network from the database + err := dbNetworkDelete(n.daemon.db, n.name) + if err != nil { + return err + } + + return nil +} + +func (n *network) Rename(name string) error { + // Sanity checks + if n.IsUsed() { + return fmt.Errorf("The network is currently in use") + } + + // Bring the network down + if n.IsRunning() { + err := n.Stop() + if err != nil { + return err + } + } + + // Rename directory + if shared.PathExists(shared.VarPath("networks", name)) { + os.RemoveAll(shared.VarPath("networks", name)) + } + + if shared.PathExists(shared.VarPath("networks", n.name)) { + err := os.Rename(shared.VarPath("networks", n.name), shared.VarPath("networks", name)) + if err != nil { + return err + } + } + + // Rename the database entry + err := dbNetworkRename(n.daemon.db, n.name, name) + if err != nil { + return err + } + + // Bring the network up + err = n.Start() + if err != nil { + return err + } + + return nil +} + +func (n *network) Start() error { + // Create directory + if !shared.PathExists(shared.VarPath("networks", n.name)) { + err := os.MkdirAll(shared.VarPath("networks", n.name), 0700) + if err != nil { + return err + } + } + + // Create the bridge interface + if !n.IsRunning() { + if n.config["bridge.driver"] == "openvswitch" { + err := shared.RunCommand("ovs-vsctl", "add-br", n.name) + if err != nil { + return err + } + } else { + err := shared.RunCommand("ip", "link", "add", n.name, "type", "bridge") + if err != nil { + return err + } + } + } + + // Get a list of tunnels + tunnels := networkGetTunnels(n.config) + + // IPv6 bridge configuration + if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) { + err := networkSysctl(fmt.Sprintf("ipv6/conf/%s/autoconf", n.name), "0") + if err != nil { + return err + } + + err = networkSysctl(fmt.Sprintf("ipv6/conf/%s/accept_dad", n.name), "0") + if err != nil { + return err + } + } + + // Get a list of interfaces + ifaces, err := net.Interfaces() + if err != nil { + return err + } + + // Cleanup any existing tunnel device + for _, iface := range ifaces { + if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) { + err = shared.RunCommand("ip", "link", "del", iface.Name) + if err != nil { + return err + } + } + } + + // Set the MTU + mtu := "" + if n.config["bridge.mtu"] != "" { + mtu = n.config["bridge.mtu"] + } else if len(tunnels) > 0 { + mtu = "1400" + } else if n.config["bridge.mode"] == "fan" { + if n.config["fan.type"] == "ipip" { + mtu = "1480" + } else { + mtu = "1450" + } + } + + // Attempt to add a dummy device to the bridge to force the MTU + if mtu != "" && n.config["bridge.driver"] != "openvswitch" { + err = shared.RunCommand("ip", "link", "add", fmt.Sprintf("%s-mtu", n.name), "mtu", mtu, "type", "dummy") + if err == nil { + networkAttachInterface(n.name, fmt.Sprintf("%s-mtu", n.name)) + } + } + + // Now, set a default MTU + if mtu == "" { + mtu = "1500" + } + + err = shared.RunCommand("ip", "link", "set", n.name, "mtu", mtu) + if err != nil { + return err + } + + // Bring it up + err = shared.RunCommand("ip", "link", "set", n.name, "up") + if err != nil { + return err + } + + // Add any listed existing external interface + if n.config["bridge.external_interfaces"] != "" { + for _, entry := range strings.Split(n.config["bridge.external_interfaces"], ",") { + entry = strings.TrimSpace(entry) + iface, err := net.InterfaceByName(entry) + if err != nil { + continue + } + + addrs, err := iface.Addrs() + if err == nil && len(addrs) != 0 { + return fmt.Errorf("Only unconfigured network interfaces can be bridged") + } + + err = networkAttachInterface(n.name, entry) + if err != nil { + return err + } + } + } + + // Remove any existing IPv4 iptables rules + err = networkIptablesClear("ipv4", n.name, "") + if err != nil { + return err + } + + err = networkIptablesClear("ipv4", n.name, "mangle") + if err != nil { + return err + } + + err = networkIptablesClear("ipv4", n.name, "nat") + if err != nil { + return err + } + + // Flush all IPv4 addresses + err = shared.RunCommand("ip", "-4", "addr", "flush", "dev", n.name, "scope", "global") + if err != nil { + return err + } + + // Configure IPv4 firewall (includes fan) + if n.config["bridge.mode"] == "fan" || !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) { + // Setup basic iptables overrides + err = networkIptablesPrepend("ipv4", n.name, "", "INPUT", "-i", n.name, "-p", "udp", "--dport", "67", "-j", "ACCEPT") + if err != nil { + return err + } + + err = networkIptablesPrepend("ipv4", n.name, "", "INPUT", "-i", n.name, "-p", "tcp", "--dport", "67", "-j", "ACCEPT") + if err != nil { + return err + } + + err = networkIptablesPrepend("ipv4", n.name, "", "INPUT", "-i", n.name, "-p", "udp", "--dport", "53", "-j", "ACCEPT") + if err != nil { + return err + } + + err = networkIptablesPrepend("ipv4", n.name, "", "INPUT", "-i", n.name, "-p", "tcp", "--dport", "53", "-j", "ACCEPT") + if err != nil { + return err + } + + // Allow forwarding + if n.config["bridge.mode"] == "fan" || n.config["ipv4.routing"] == "" || shared.IsTrue(n.config["ipv4.routing"]) { + err = networkSysctl("ipv4/ip_forward", "1") + if err != nil { + return err + } + + err = networkIptablesPrepend("ipv4", n.name, "", "FORWARD", "-i", n.name, "-j", "ACCEPT") + if err != nil { + return err + } + + err = networkIptablesPrepend("ipv4", n.name, "", "FORWARD", "-o", n.name, "-j", "ACCEPT") + if err != nil { + return err + } + } else { + err = networkIptablesPrepend("ipv4", n.name, "", "FORWARD", "-i", n.name, "-j", "REJECT") + if err != nil { + return err + } + + err = networkIptablesPrepend("ipv4", n.name, "", "FORWARD", "-o", n.name, "-j", "REJECT") + if err != nil { + return err + } + } + } + + // Start building the dnsmasq command line + dnsmasqCmd := []string{"dnsmasq", "-u", "root", "--strict-order", "--bind-interfaces", + fmt.Sprintf("--pid-file=%s", shared.VarPath("networks", n.name, "dnsmasq.pid")), + "--except-interface=lo", + fmt.Sprintf("--interface=%s", n.name)} + + // Configure IPv4 + if !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) { + // Parse the subnet + ip, subnet, err := net.ParseCIDR(n.config["ipv4.address"]) + if err != nil { + return err + } + + // Update the dnsmasq config + dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--listen-address=%s", ip.String())) + if n.config["ipv4.dhcp"] == "" || shared.IsTrue(n.config["ipv4.dhcp"]) { + if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) { + dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...) + } + + if n.config["ipv4.dhcp.ranges"] != "" { + for _, dhcpRange := range strings.Split(n.config["ipv4.dhcp.ranges"], ",") { + dhcpRange = strings.TrimSpace(dhcpRange) + dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", strings.Replace(dhcpRange, "-", ",", -1)}...) + } + } else { + dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s", networkGetIP(subnet, 2).String(), networkGetIP(subnet, -2).String())}...) + } + } + + // Add the address + err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, n.config["ipv4.address"]) + if err != nil { + return err + } + + // Configure NAT + if shared.IsTrue(n.config["ipv4.nat"]) { + err = networkIptablesPrepend("ipv4", n.name, "nat", "POSTROUTING", "-s", subnet.String(), "!", "-d", subnet.String(), "-j", "MASQUERADE") + if err != nil { + return err + } + } + } + + // Remove any existing IPv6 iptables rules + err = networkIptablesClear("ipv6", n.name, "") + if err != nil { + return err + } + + err = networkIptablesClear("ipv6", n.name, "nat") + if err != nil { + return err + } + + // Flush all IPv6 addresses + err = shared.RunCommand("ip", "-6", "addr", "flush", "dev", n.name, "scope", "global") + if err != nil { + return err + } + + // Configure IPv6 + if !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) { + // Enable IPv6 for the subnet + err := networkSysctl(fmt.Sprintf("ipv6/conf/%s/disable_ipv6", n.name), "0") + if err != nil { + return err + } + + // Parse the subnet + ip, subnet, err := net.ParseCIDR(n.config["ipv6.address"]) + if err != nil { + return err + } + + // Update the dnsmasq config + dnsmasqCmd = append(dnsmasqCmd, []string{fmt.Sprintf("--listen-address=%s", ip.String()), "--enable-ra"}...) + if n.config["ipv6.dhcp"] == "" || shared.IsTrue(n.config["ipv6.dhcp"]) { + if !shared.StringInSlice("--dhcp-no-override", dnsmasqCmd) { + dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-no-override", "--dhcp-authoritative", fmt.Sprintf("--dhcp-leasefile=%s", shared.VarPath("networks", n.name, "dnsmasq.leases")), fmt.Sprintf("--dhcp-hostsfile=%s", shared.VarPath("networks", n.name, "dnsmasq.hosts"))}...) + } + + if shared.IsTrue(n.config["ipv6.dhcp.stateful"]) { + if n.config["ipv6.dhcp.ranges"] != "" { + for _, dhcpRange := range strings.Split(n.config["ipv6.dhcp.ranges"], ",") { + dhcpRange = strings.TrimSpace(dhcpRange) + dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s", strings.Replace(dhcpRange, "-", ",", -1))}...) + } + } else { + dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("%s,%s", networkGetIP(subnet, 2), networkGetIP(subnet, -1))}...) + } + } else { + dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-stateless,ra-names", n.name)}...) + } + } else { + dnsmasqCmd = append(dnsmasqCmd, []string{"--dhcp-range", fmt.Sprintf("::,constructor:%s,ra-only", n.name)}...) + } + + // Setup basic iptables overrides + err = networkIptablesPrepend("ipv6", n.name, "", "INPUT", "-i", n.name, "-p", "udp", "--dport", "546", "-j", "ACCEPT") + if err != nil { + return err + } + + err = networkIptablesPrepend("ipv6", n.name, "", "INPUT", "-i", n.name, "-p", "tcp", "--dport", "546", "-j", "ACCEPT") + if err != nil { + return err + } + + err = networkIptablesPrepend("ipv6", n.name, "", "INPUT", "-i", n.name, "-p", "udp", "--dport", "53", "-j", "ACCEPT") + if err != nil { + return err + } + + err = networkIptablesPrepend("ipv6", n.name, "", "INPUT", "-i", n.name, "-p", "tcp", "--dport", "53", "-j", "ACCEPT") + if err != nil { + return err + } + + // Allow forwarding + if n.config["ipv6.routing"] == "" || shared.IsTrue(n.config["ipv6.routing"]) { + // Get a list of proc entries + entries, err := ioutil.ReadDir("/proc/sys/net/ipv6/conf/") + if err != nil { + return err + } + + // First set accept_ra to 2 for everything + for _, entry := range entries { + content, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/accept_ra", entry.Name())) + if err == nil && string(content) != "1\n" { + continue + } + + err = networkSysctl(fmt.Sprintf("ipv6/conf/%s/accept_ra", entry.Name()), "2") + if err != nil && !os.IsNotExist(err) { + return err + } + } + + // Then set forwarding for all of them + for _, entry := range entries { + err = networkSysctl(fmt.Sprintf("ipv6/conf/%s/forwarding", entry.Name()), "1") + if err != nil && !os.IsNotExist(err) { + return err + } + } + + err = networkIptablesPrepend("ipv6", n.name, "", "FORWARD", "-i", n.name, "-j", "ACCEPT") + if err != nil { + return err + } + + err = networkIptablesPrepend("ipv6", n.name, "", "FORWARD", "-o", n.name, "-j", "ACCEPT") + if err != nil { + return err + } + } else { + err = networkIptablesPrepend("ipv6", n.name, "", "FORWARD", "-i", n.name, "-j", "REJECT") + if err != nil { + return err + } + + err = networkIptablesPrepend("ipv6", n.name, "", "FORWARD", "-o", n.name, "-j", "REJECT") + if err != nil { + return err + } + } + + // Add the address + err = shared.RunCommand("ip", "-6", "addr", "add", "dev", n.name, n.config["ipv6.address"]) + if err != nil { + return err + } + + // Configure NAT + if shared.IsTrue(n.config["ipv6.nat"]) { + err = networkIptablesPrepend("ipv6", n.name, "nat", "POSTROUTING", "-s", subnet.String(), "!", "-d", subnet.String(), "-j", "MASQUERADE") + if err != nil { + return err + } + } + } + + // Configure the fan + if n.config["bridge.mode"] == "fan" { + tunName := fmt.Sprintf("%s-fan", n.name) + + // Parse the underlay + underlay := n.config["fan.underlay_subnet"] + _, underlaySubnet, err := net.ParseCIDR(underlay) + if err != nil { + return nil + } + + // Parse the overlay + overlay := n.config["fan.overlay_subnet"] + if overlay == "" { + overlay = "240.0.0.0/8" + } + + _, overlaySubnet, err := net.ParseCIDR(overlay) + if err != nil { + return err + } + + // Get the address + fanAddress, devName, devAddr, err := networkFanAddress(underlaySubnet, overlaySubnet) + if err != nil { + return err + } + + if n.config["fan.type"] == "ipip" { + fanAddress = strings.Replace(fanAddress, "/8", "/24", 1) + } + + // Add the address + err = shared.RunCommand("ip", "-4", "addr", "add", "dev", n.name, fanAddress) + if err != nil { + return err + } + + // Setup the tunnel + if n.config["fan.type"] == "ipip" { + err = shared.RunCommand("ip", "-4", "route", "flush", "dev", "tunl0") + if err != nil { + return err + } + + err = shared.RunCommand("ip", "link", "set", "tunl0", "up") + if err != nil { + return err + } + + // Fails if the map is already set + shared.RunCommand("ip", "link", "change", "tunl0", "type", "ipip", "fan-map", fmt.Sprintf("%s:%s", overlay, underlay)) + + addr := strings.Split(fanAddress, "/") + + err = shared.RunCommand("ip", "route", "add", overlay, "dev", "tunl0", "src", addr[0]) + if err != nil { + return err + } + } else { + vxlanID := fmt.Sprintf("%d", binary.BigEndian.Uint32(overlaySubnet.IP.To4())>>8) + + err = shared.RunCommand("ip", "link", "add", tunName, "type", "vxlan", "id", vxlanID, "dev", devName, "dstport", "0", "local", devAddr, "fan-map", fmt.Sprintf("%s:%s", overlay, underlay)) + if err != nil { + return err + } + + err = networkAttachInterface(n.name, tunName) + if err != nil { + return err + } + + err = shared.RunCommand("ip", "link", "set", tunName, "mtu", mtu, "up") + if err != nil { + return err + } + + err = shared.RunCommand("ip", "link", "set", n.name, "up") + if err != nil { + return err + } + } + + // Configure NAT + err = networkIptablesPrepend("ipv4", n.name, "nat", "POSTROUTING", "-s", underlaySubnet.String(), "!", "-d", underlaySubnet.String(), "-j", "MASQUERADE") + if err != nil { + return err + } + } + + // Configure tunnels + for _, tunnel := range tunnels { + getConfig := func(key string) string { + return n.config[fmt.Sprintf("tunnel.%s.%s", tunnel, key)] + } + + tunProtocol := getConfig("protocol") + tunLocal := getConfig("local") + tunRemote := getConfig("remote") + tunName := fmt.Sprintf("%s-%s", n.name, tunnel) + + // Configure the tunnel + cmd := []string{"ip", "link", "add", tunName} + if tunProtocol == "gre" { + // Skip partial configs + if tunProtocol == "" || tunLocal == "" || tunRemote == "" { + continue + } + + cmd = append(cmd, []string{"type", "gretap", "local", tunLocal, "remote", tunRemote}...) + } else if tunProtocol == "vxlan" { + tunGroup := getConfig("group") + + // Skip partial configs + if tunProtocol == "" { + continue + } + + cmd = append(cmd, []string{"type", "vxlan"}...) + + if tunLocal != "" && tunRemote != "" { + cmd = append(cmd, []string{"local", tunLocal, "remote", tunRemote}...) + } else { + if tunGroup == "" { + tunGroup = "239.0.0.1" + } + + _, devName, err := networkDefaultGatewaySubnetV4() + if err != nil { + return err + } + + cmd = append(cmd, []string{"group", tunGroup, "dev", devName}...) + } + + tunPort := getConfig("port") + if tunPort == "" { + tunPort = "0" + } + cmd = append(cmd, []string{"dstport", tunPort}...) + + tunId := getConfig("id") + if tunId == "" { + tunId = "1" + } + cmd = append(cmd, []string{"id", tunId}...) + } + + // Create the interface + err = shared.RunCommand(cmd[0], cmd[1:]...) + if err != nil { + return err + } + + // Bridge it and bring up + err = networkAttachInterface(n.name, tunName) + if err != nil { + return err + } + + err = shared.RunCommand("ip", "link", "set", tunName, "mtu", mtu, "up") + if err != nil { + return err + } + + err = shared.RunCommand("ip", "link", "set", n.name, "up") + if err != nil { + return err + } + } + + // Kill any existing dnsmasq daemon for this network + err = networkKillDnsmasq(n.name, false) + if err != nil { + return err + } + + // Configure dnsmasq + if !shared.StringInSlice(n.config["ipv4.address"], []string{"", "none"}) || !shared.StringInSlice(n.config["ipv6.address"], []string{"", "none"}) { + dnsDomain := n.config["dns.domain"] + if dnsDomain == "" { + dnsDomain = "lxd" + } + + // Setup the dnsmasq domain + if n.config["dns.mode"] != "none" { + dnsmasqCmd = append(dnsmasqCmd, []string{"-s", dnsDomain, "-S", fmt.Sprintf("/%s/", dnsDomain)}...) + } + + // Create raw config file + if n.config["raw.dnsmasq"] != "" { + err = ioutil.WriteFile(shared.VarPath("networks", n.name, "dnsmasq.raw"), []byte(fmt.Sprintf("%s\n", n.config["raw.dnsmasq"])), 0) + if err != nil { + return err + } + dnsmasqCmd = append(dnsmasqCmd, fmt.Sprintf("--conf-file=%s", shared.VarPath("networks", n.name, "dnsmasq.raw"))) + } + + // Create DHCP hosts file + if !shared.PathExists(shared.VarPath("networks", n.name, "dnsmasq.hosts")) { + err = ioutil.WriteFile(shared.VarPath("networks", n.name, "dnsmasq.hosts"), []byte(""), 0) + if err != nil { + return err + } + } + + // Start dnsmasq (occasionaly races, try a few times) + output, err := tryExec(dnsmasqCmd[0], dnsmasqCmd[1:]...) + if err != nil { + return fmt.Errorf("Failed to run: %s: %s", strings.Join(dnsmasqCmd, " "), strings.TrimSpace(string(output))) + } + + // Update the static leases + err = networkUpdateStatic(n.daemon) + if err != nil { + return err + } + } + + return nil +} + +func (n *network) Stop() error { + if !n.IsRunning() { + return fmt.Errorf("The network is already stopped") + } + + // Destroy the bridge interface + if n.config["bridge.driver"] == "openvswitch" { + err := shared.RunCommand("ovs-vsctl", "del-br", n.name) + if err != nil { + return err + } + } else { + err := shared.RunCommand("ip", "link", "del", n.name) + if err != nil { + return err + } + } + + // Cleanup iptables + err := networkIptablesClear("ipv4", n.name, "") + if err != nil { + return err + } + + err = networkIptablesClear("ipv4", n.name, "mangle") + if err != nil { + return err + } + + err = networkIptablesClear("ipv4", n.name, "nat") + if err != nil { + return err + } + + err = networkIptablesClear("ipv6", n.name, "") + if err != nil { + return err + } + + err = networkIptablesClear("ipv6", n.name, "nat") + if err != nil { + return err + } + + // Kill any existing dnsmasq daemon for this network + err = networkKillDnsmasq(n.name, false) + if err != nil { + return err + } + + // Get a list of interfaces + ifaces, err := net.Interfaces() + if err != nil { + return err + } + + // Cleanup any existing tunnel device + for _, iface := range ifaces { + if strings.HasPrefix(iface.Name, fmt.Sprintf("%s-", n.name)) { + err = shared.RunCommand("ip", "link", "del", iface.Name) + if err != nil { + return err + } + } + } + + return nil +} + +func (n *network) Update(newNetwork shared.NetworkConfig) error { + err := networkFillAuto(newNetwork.Config) + if err != nil { + return err + } + newConfig := newNetwork.Config + + // Backup the current state + oldConfig := map[string]string{} + err = shared.DeepCopy(&n.config, &oldConfig) + if err != nil { + return err + } + + // Define a function which reverts everything. Defer this function + // so that it doesn't need to be explicitly called in every failing + // return path. Track whether or not we want to undo the changes + // using a closure. + undoChanges := true + defer func() { + if undoChanges { + n.config = oldConfig + } + }() + + // Diff the configurations + changedConfig := []string{} + userOnly := true + for key, _ := range oldConfig { + if oldConfig[key] != newConfig[key] { + if !strings.HasPrefix(key, "user.") { + userOnly = false + } + + if !shared.StringInSlice(key, changedConfig) { + changedConfig = append(changedConfig, key) + } + } + } + + for key, _ := range newConfig { + if oldConfig[key] != newConfig[key] { + if !strings.HasPrefix(key, "user.") { + userOnly = false + } + + if !shared.StringInSlice(key, changedConfig) { + changedConfig = append(changedConfig, key) + } + } + } + + // Skip on no change + if len(changedConfig) == 0 { + return nil + } + + // Update the network + if !userOnly { + if shared.StringInSlice("bridge.driver", changedConfig) && n.IsRunning() { + err = n.Stop() + if err != nil { + return err + } + } + + if shared.StringInSlice("bridge.external_interfaces", changedConfig) && n.IsRunning() { + devices := []string{} + for _, dev := range strings.Split(newConfig["bridge.external_interfaces"], ",") { + dev = strings.TrimSpace(dev) + devices = append(devices, dev) + } + + for _, dev := range strings.Split(oldConfig["bridge.external_interfaces"], ",") { + dev = strings.TrimSpace(dev) + if dev == "" { + continue + } + + if !shared.StringInSlice(dev, devices) && shared.PathExists(fmt.Sprintf("/sys/class/net/%s", dev)) { + err = networkDetachInterface(n.name, dev) + if err != nil { + return err + } + } + } + } + } + + // Apply the new configuration + n.config = newConfig + + // Update the database + err = dbNetworkUpdate(n.daemon.db, n.name, n.config) + if err != nil { + return err + } + + // Restart the network + if !userOnly { + err = n.Start() + if err != nil { + return err + } + } + + // Success, update the closure to mark that the changes should be kept. + undoChanges = false + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/networks_iptables.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/networks_iptables.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/networks_iptables.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/networks_iptables.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,78 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/lxc/lxd/shared" +) + +func networkIptablesPrepend(protocol string, netName string, table string, chain string, rule ...string) error { + cmd := "iptables" + if protocol == "ipv6" { + cmd = "ip6tables" + } + + baseArgs := []string{"-w"} + if table != "" { + baseArgs = append(baseArgs, []string{"-t", table}...) + } + + // Check for an existing entry + args := append(baseArgs, []string{"-C", chain}...) + args = append(args, rule...) + args = append(args, "-m", "comment", "--comment", fmt.Sprintf("generated for LXD network %s", netName)) + if shared.RunCommand(cmd, args...) == nil { + return nil + } + + // Add the rule + args = append(baseArgs, []string{"-I", chain}...) + args = append(args, rule...) + args = append(args, "-m", "comment", "--comment", fmt.Sprintf("generated for LXD network %s", netName)) + + err := shared.RunCommand(cmd, args...) + if err != nil { + return err + } + + return nil +} + +func networkIptablesClear(protocol string, netName string, table string) error { + cmd := "iptables" + if protocol == "ipv6" { + cmd = "ip6tables" + } + + baseArgs := []string{"-w"} + if table != "" { + baseArgs = append(baseArgs, []string{"-t", table}...) + } + + // List the rules + args := append(baseArgs, "-S") + output, err := exec.Command(cmd, args...).Output() + if err != nil { + return fmt.Errorf("Failed to list %s rules for %s (table %s)", protocol, netName, table) + } + + for _, line := range strings.Split(string(output), "\n") { + if !strings.Contains(line, fmt.Sprintf("generated for LXD network %s", netName)) { + continue + } + + // Remove the entry + fields := strings.Fields(line) + fields[0] = "-D" + + args = append(baseArgs, fields...) + err = shared.RunCommand("sh", "-c", fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))) + if err != nil { + return err + } + } + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/networks_utils.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/networks_utils.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/networks_utils.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/networks_utils.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,796 @@ +package main + +import ( + "bufio" + "encoding/binary" + "encoding/hex" + "fmt" + "io/ioutil" + "math" + "math/big" + "math/rand" + "net" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/lxc/lxd/shared" +) + +func networkAutoAttach(d *Daemon, devName string) error { + _, dbInfo, err := dbNetworkGetInterface(d.db, devName) + if err != nil { + // No match found, move on + return nil + } + + return networkAttachInterface(dbInfo.Name, devName) +} + +func networkAttachInterface(netName string, devName string) error { + if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/bridge", netName)) { + err := shared.RunCommand("ip", "link", "set", devName, "master", netName) + if err != nil { + return err + } + } else { + err := shared.RunCommand("ovs-vsctl", "port-to-br", devName) + if err != nil { + err := shared.RunCommand("ovs-vsctl", "add-port", netName, devName) + if err != nil { + return err + } + } + } + + return nil +} + +func networkDetachInterface(netName string, devName string) error { + if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/bridge", netName)) { + err := shared.RunCommand("ip", "link", "set", devName, "nomaster") + if err != nil { + return err + } + } else { + err := shared.RunCommand("ovs-vsctl", "port-to-br", devName) + if err == nil { + err := shared.RunCommand("ovs-vsctl", "del-port", netName, devName) + if err != nil { + return err + } + } + } + + return nil +} + +func networkGetInterfaces(d *Daemon) ([]string, error) { + networks, err := dbNetworks(d.db) + if err != nil { + return nil, err + } + + ifaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + for _, iface := range ifaces { + if !shared.StringInSlice(iface.Name, networks) { + networks = append(networks, iface.Name) + } + } + + return networks, nil +} + +func networkIsInUse(c container, name string) bool { + for _, d := range c.ExpandedDevices() { + if d["type"] != "nic" { + continue + } + + if !shared.StringInSlice(d["nictype"], []string{"bridged", "macvlan"}) { + continue + } + + if d["parent"] == "" { + continue + } + + if d["parent"] == name { + return true + } + } + + return false +} + +func networkGetIP(subnet *net.IPNet, host int64) net.IP { + // Convert IP to a big int + bigIP := big.NewInt(0) + bigIP.SetBytes(subnet.IP.To16()) + + // Deal with negative offsets + bigHost := big.NewInt(host) + bigCount := big.NewInt(host) + if host < 0 { + mask, size := subnet.Mask.Size() + + bigHosts := big.NewFloat(0) + bigHosts.SetFloat64((math.Pow(2, float64(size-mask)))) + bigHostsInt, _ := bigHosts.Int(nil) + + bigCount.Set(bigHostsInt) + bigCount.Add(bigCount, bigHost) + } + + // Get the new IP int + bigIP.Add(bigIP, bigCount) + + // Generate an IPv6 + if subnet.IP.To4() == nil { + newIp := make(net.IP, 16) + newIp = bigIP.Bytes() + return newIp + } + + // Generate an IPv4 + newIp := make(net.IP, 4) + binary.BigEndian.PutUint32(newIp, uint32(bigIP.Int64())) + return newIp +} + +func networkGetTunnels(config map[string]string) []string { + tunnels := []string{} + + for k, _ := range config { + if !strings.HasPrefix(k, "tunnel.") { + continue + } + + fields := strings.Split(k, ".") + if !shared.StringInSlice(fields[1], tunnels) { + tunnels = append(tunnels, fields[1]) + } + } + + return tunnels +} + +func networkPingSubnet(subnet *net.IPNet) bool { + var fail bool + var failLock sync.Mutex + var wgChecks sync.WaitGroup + + ping := func(ip net.IP) { + defer wgChecks.Done() + + cmd := "ping" + if ip.To4() == nil { + cmd = "ping6" + } + + _, err := exec.Command(cmd, "-n", "-q", ip.String(), "-c", "1", "-W", "1").CombinedOutput() + if err != nil { + // Remote didn't answer + return + } + + // Remote answered + failLock.Lock() + fail = true + failLock.Unlock() + } + + poke := func(ip net.IP) { + defer wgChecks.Done() + + addr := fmt.Sprintf("%s:22", ip.String()) + if ip.To4() == nil { + addr = fmt.Sprintf("[%s]:22", ip.String()) + } + + _, err := net.DialTimeout("tcp", addr, time.Second) + if err == nil { + // Remote answered + failLock.Lock() + fail = true + failLock.Unlock() + return + } + } + + // Ping first IP + wgChecks.Add(1) + go ping(networkGetIP(subnet, 1)) + + // Poke port on first IP + wgChecks.Add(1) + go poke(networkGetIP(subnet, 1)) + + // Ping check + if subnet.IP.To4() != nil { + // Ping last IP + wgChecks.Add(1) + go ping(networkGetIP(subnet, -2)) + + // Poke port on last IP + wgChecks.Add(1) + go poke(networkGetIP(subnet, -2)) + } + + wgChecks.Wait() + + return fail +} + +func networkInRoutingTable(subnet *net.IPNet) bool { + filename := "route" + if subnet.IP.To4() == nil { + filename = "ipv6_route" + } + + file, err := os.Open(fmt.Sprintf("/proc/net/%s", filename)) + if err != nil { + return false + } + defer file.Close() + + scanner := bufio.NewReader(file) + for { + line, _, err := scanner.ReadLine() + if err != nil { + break + } + + fields := strings.Fields(string(line)) + + // Get the IP + ip := net.IP{} + if filename == "ipv6_route" { + ip, err = hex.DecodeString(fields[0]) + if err != nil { + continue + } + } else { + bytes, err := hex.DecodeString(fields[1]) + if err != nil { + continue + } + + ip = net.IPv4(bytes[3], bytes[2], bytes[1], bytes[0]) + } + + // Get the mask + mask := net.IPMask{} + if filename == "ipv6_route" { + size, err := strconv.ParseInt(fmt.Sprintf("0x%s", fields[1]), 0, 64) + if err != nil { + continue + } + + mask = net.CIDRMask(int(size), 128) + } else { + bytes, err := hex.DecodeString(fields[7]) + if err != nil { + continue + } + + mask = net.IPv4Mask(bytes[3], bytes[2], bytes[1], bytes[0]) + } + + // Generate a new network + lineNet := net.IPNet{IP: ip, Mask: mask} + + // Ignore default gateway + if lineNet.IP.Equal(net.ParseIP("::")) { + continue + } + + if lineNet.IP.Equal(net.ParseIP("0.0.0.0")) { + continue + } + + // Check if we have a route to our new subnet + if lineNet.Contains(subnet.IP) { + return true + } + } + + return false +} + +func networkRandomSubnetV4() (string, error) { + for i := 0; i < 100; i++ { + cidr := fmt.Sprintf("10.%d.%d.1/24", rand.Intn(255), rand.Intn(255)) + _, subnet, err := net.ParseCIDR(cidr) + if err != nil { + continue + } + + if networkInRoutingTable(subnet) { + continue + } + + if networkPingSubnet(subnet) { + continue + } + + return cidr, nil + } + + return "", fmt.Errorf("Unable to find a free IPv4 subnet") +} + +func networkRandomSubnetV6() (string, error) { + for i := 0; i < 100; i++ { + cidr := fmt.Sprintf("fd42:%x:%x:%x::1/64", rand.Intn(65535), rand.Intn(65535), rand.Intn(65535)) + _, subnet, err := net.ParseCIDR(cidr) + if err != nil { + continue + } + + if networkInRoutingTable(subnet) { + continue + } + + if networkPingSubnet(subnet) { + continue + } + + return cidr, nil + } + + return "", fmt.Errorf("Unable to find a free IPv6 subnet") +} + +func networkDefaultGatewaySubnetV4() (*net.IPNet, string, error) { + file, err := os.Open("/proc/net/route") + if err != nil { + return nil, "", err + } + defer file.Close() + + ifaceName := "" + + scanner := bufio.NewReader(file) + for { + line, _, err := scanner.ReadLine() + if err != nil { + break + } + + fields := strings.Fields(string(line)) + + if fields[1] == "00000000" && fields[7] == "00000000" { + ifaceName = fields[0] + break + } + } + + if ifaceName == "" { + return nil, "", fmt.Errorf("No default gateway for IPv4") + } + + iface, err := net.InterfaceByName(ifaceName) + if err != nil { + return nil, "", err + } + + addrs, err := iface.Addrs() + if err != nil { + return nil, "", err + } + + var subnet *net.IPNet + + for _, addr := range addrs { + addrIP, addrNet, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, "", err + } + + if addrIP.To4() == nil { + continue + } + + if subnet != nil { + return nil, "", fmt.Errorf("More than one IPv4 subnet on default interface") + } + + subnet = addrNet + } + + if subnet == nil { + return nil, "", fmt.Errorf("No IPv4 subnet on default interface") + } + + return subnet, ifaceName, nil +} + +func networkValidName(value string) error { + // Validate the length + if len(value) < 2 { + return fmt.Errorf("Interface name is too short (minimum 2 characters)") + } + + if len(value) > 15 { + return fmt.Errorf("Interface name is too long (maximum 15 characters)") + } + + // Validate the character set + match, _ := regexp.MatchString("^[-a-zA-Z0-9]*$", value) + if !match { + return fmt.Errorf("Interface name contains invalid characters") + } + + return nil +} + +func networkValidPort(value string) error { + if value == "" { + return nil + } + + valueInt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("Invalid value for an integer: %s", value) + } + + if valueInt < 1 || valueInt > 65536 { + return fmt.Errorf("Invalid port number: %s", value) + } + + return nil +} + +func networkValidAddressCIDRV6(value string) error { + if value == "" { + return nil + } + + ip, subnet, err := net.ParseCIDR(value) + if err != nil { + return err + } + + if ip.To4() != nil { + return fmt.Errorf("Not an IPv6 address: %s", value) + } + + if ip.String() == subnet.IP.String() { + return fmt.Errorf("Not a usable IPv6 address: %s", value) + } + + return nil +} + +func networkValidAddressCIDRV4(value string) error { + if value == "" { + return nil + } + + ip, subnet, err := net.ParseCIDR(value) + if err != nil { + return err + } + + if ip.To4() == nil { + return fmt.Errorf("Not an IPv4 address: %s", value) + } + + if ip.String() == subnet.IP.String() { + return fmt.Errorf("Not a usable IPv4 address: %s", value) + } + + return nil +} + +func networkValidAddressV4(value string) error { + if value == "" { + return nil + } + + ip := net.ParseIP(value) + if ip == nil { + return fmt.Errorf("Not an IPv4 address: %s", value) + } + + return nil +} + +func networkValidNetworkV4(value string) error { + if value == "" { + return nil + } + + ip, subnet, err := net.ParseCIDR(value) + if err != nil { + return err + } + + if ip.To4() == nil { + return fmt.Errorf("Not an IPv4 network: %s", value) + } + + if ip.String() != subnet.IP.String() { + return fmt.Errorf("Not an IPv4 network address: %s", value) + } + + return nil +} + +func networkAddressForSubnet(subnet *net.IPNet) (net.IP, string, error) { + ifaces, err := net.Interfaces() + if err != nil { + return net.IP{}, "", err + } + + for _, iface := range ifaces { + addrs, err := iface.Addrs() + if err != nil { + continue + } + + for _, addr := range addrs { + ip, _, err := net.ParseCIDR(addr.String()) + if err != nil { + continue + } + + if subnet.Contains(ip) { + return ip, iface.Name, nil + } + } + } + + return net.IP{}, "", fmt.Errorf("No address found in subnet") +} + +func networkFanAddress(underlay *net.IPNet, overlay *net.IPNet) (string, string, string, error) { + // Sanity checks + underlaySize, _ := underlay.Mask.Size() + if underlaySize != 16 && underlaySize != 24 { + return "", "", "", fmt.Errorf("Only /16 or /24 underlays are supported at this time") + } + + overlaySize, _ := overlay.Mask.Size() + if overlaySize != 8 && overlaySize != 16 { + return "", "", "", fmt.Errorf("Only /8 or /16 overlays are supported at this time") + } + + if overlaySize+(32-underlaySize)+8 > 32 { + return "", "", "", fmt.Errorf("Underlay or overlay networks too large to accommodate the FAN") + } + + // Get the IP + ip, dev, err := networkAddressForSubnet(underlay) + if err != nil { + return "", "", "", err + } + ipStr := ip.String() + + // Force into IPv4 format + ipBytes := ip.To4() + if ipBytes == nil { + return "", "", "", fmt.Errorf("Invalid IPv4: %s", ip) + } + + // Compute the IP + ipBytes[0] = overlay.IP[0] + if overlaySize == 16 { + ipBytes[1] = overlay.IP[1] + } else if underlaySize == 24 { + ipBytes[1] = 0 + } else if underlaySize == 16 { + ipBytes[1] = ipBytes[2] + } + ipBytes[2] = ipBytes[3] + ipBytes[3] = 1 + + return fmt.Sprintf("%s/%d", ipBytes.String(), overlaySize), dev, ipStr, err +} + +func networkKillDnsmasq(name string, reload bool) error { + // Check if we have a running dnsmasq at all + pidPath := shared.VarPath("networks", name, "dnsmasq.pid") + if !shared.PathExists(pidPath) { + if reload { + return fmt.Errorf("dnsmasq isn't running") + } + + return nil + } + + // Grab the PID + content, err := ioutil.ReadFile(pidPath) + if err != nil { + return err + } + pid := strings.TrimSpace(string(content)) + + // Check if the process still exists + if !shared.PathExists(fmt.Sprintf("/proc/%s", pid)) { + os.Remove(pidPath) + + if reload { + return fmt.Errorf("dnsmasq isn't running") + } + + return nil + } + + // Check if it's dnsmasq + cmdPath, err := os.Readlink(fmt.Sprintf("/proc/%s/exe", pid)) + if err != nil { + return err + } + + // Deal with deleted paths + cmdName := filepath.Base(strings.Split(cmdPath, " ")[0]) + if cmdName != "dnsmasq" { + if reload { + return fmt.Errorf("dnsmasq isn't running") + } + + os.Remove(pidPath) + return nil + } + + // Parse the pid + pidInt, err := strconv.Atoi(pid) + if err != nil { + return err + } + + // Actually kill the process + if reload { + err = syscall.Kill(pidInt, syscall.SIGHUP) + if err != nil { + return err + } + + return nil + } + + err = syscall.Kill(pidInt, syscall.SIGKILL) + if err != nil { + return err + } + + // Cleanup + os.Remove(pidPath) + return nil +} + +func networkUpdateStatic(d *Daemon) error { + // Get all the containers + containers, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return err + } + + // Get all the networks + networks, err := dbNetworks(d.db) + if err != nil { + return err + } + + // Build a list of dhcp host entries + entries := map[string][][]string{} + for _, name := range containers { + // Load the container + c, err := containerLoadByName(d, name) + if err != nil { + continue + } + + // Go through all its devices (including profiles + for k, d := range c.ExpandedDevices() { + // Skip uninteresting entries + if d["type"] != "nic" || d["nictype"] != "bridged" || !shared.StringInSlice(d["parent"], networks) { + continue + } + + // Fill in the hwaddr from volatile + d, err = c.(*containerLXC).fillNetworkDevice(k, d) + if err != nil { + continue + } + + // Add the new host entries + _, ok := entries[d["parent"]] + if !ok { + entries[d["parent"]] = [][]string{} + } + + entries[d["parent"]] = append(entries[d["parent"]], []string{d["hwaddr"], name, d["ipv4.address"], d["ipv6.address"]}) + } + } + + // Update the host files + for _, network := range networks { + entries, _ := entries[network] + + // Skip networks we don't manage (or don't have DHCP enabled) + if !shared.PathExists(shared.VarPath("networks", network, "dnsmasq.hosts")) { + continue + } + + n, err := networkLoadByName(d, network) + if err != nil { + return err + } + config := n.Config() + + // Update the file + if entries == nil { + err := ioutil.WriteFile(shared.VarPath("networks", network, "dnsmasq.hosts"), []byte(""), 0) + if err != nil { + return err + } + } else { + lines := []string{} + for _, entry := range entries { + hwaddr := entry[0] + name := entry[1] + ipv4Address := entry[2] + ipv6Address := entry[3] + + line := hwaddr + + if ipv4Address != "" { + line += fmt.Sprintf(",id:*,%s", ipv4Address) + } + + if ipv6Address != "" { + line += fmt.Sprintf(",[%s]", ipv6Address) + } + + if config["dns.mode"] == "" || config["dns.mode"] == "managed" { + line += fmt.Sprintf(",%s", name) + } + + if line == hwaddr { + continue + } + + lines = append(lines, line) + } + + err := ioutil.WriteFile(shared.VarPath("networks", network, "dnsmasq.hosts"), []byte(strings.Join(lines, "\n")+"\n"), 0) + if err != nil { + return err + } + } + + // Signal dnsmasq + err = networkKillDnsmasq(network, true) + if err != nil { + return err + } + } + + return nil +} + +func networkSysctl(path string, value string) error { + content, err := ioutil.ReadFile(fmt.Sprintf("/proc/sys/net/%s", path)) + if err != nil { + return err + } + + if strings.TrimSpace(string(content)) == value { + return nil + } + + return ioutil.WriteFile(fmt.Sprintf("/proc/sys/net/%s", path), []byte(value), 0) +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/nsexec.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/nsexec.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/nsexec.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/nsexec.go 2016-10-13 14:31:53.000000000 +0000 @@ -37,6 +37,7 @@ #include #include #include +#include // This expects: // ./lxd forkputfile /source/path /target/path @@ -48,6 +49,14 @@ // #define CMDLINE_SIZE (8 * PATH_MAX) +void error(char *msg) +{ + int old_errno = errno; + + perror(msg); + fprintf(stderr, "errno: %d\n", old_errno); +} + int mkdir_p(const char *dir, mode_t mode) { const char *tmp = dir; @@ -60,7 +69,7 @@ makeme = strndup(orig, dir - orig); if (*makeme) { if (mkdir(makeme, mode) && errno != EEXIST) { - fprintf(stderr, "failed to create directory '%s'", makeme); + fprintf(stderr, "failed to create directory '%s': %s\n", makeme, strerror(errno)); free(makeme); return -1; } @@ -77,19 +86,19 @@ char buf[1024]; if (ftruncate(target, 0) < 0) { - perror("error: truncate"); + error("error: truncate"); return -1; } while ((n = read(source, buf, 1024)) > 0) { if (write(target, buf, n) != n) { - perror("error: write"); + error("error: write"); return -1; } } if (n < 0) { - perror("error: read"); + error("error: read"); return -1; } @@ -103,12 +112,12 @@ sprintf(buf, "/proc/%d/ns/%s", pid, nstype); mntns = open(buf, O_RDONLY); if (mntns < 0) { - perror("error: open mntns"); + error("error: open mntns"); return -1; } if (setns(mntns, 0) < 0) { - perror("error: setns"); + error("error: setns"); close(mntns); return -1; } @@ -117,58 +126,160 @@ return 0; } -int manip_file_in_ns(char *rootfs, int pid, char *host, char *container, bool is_put, uid_t uid, gid_t gid, mode_t mode) { - int host_fd, container_fd; +int manip_file_in_ns(char *rootfs, int pid, char *host, char *container, bool is_put, uid_t uid, gid_t gid, mode_t mode, uid_t defaultUid, gid_t defaultGid, mode_t defaultMode) { + int host_fd = -1, container_fd = -1; int ret = -1; int container_open_flags; - - host_fd = open(host, O_RDWR); - if (host_fd < 0) { - perror("error: open"); - return -1; + struct stat st; + int exists = 1; + bool is_dir_manip = !strcmp(host, ""); + + if (!is_dir_manip) { + host_fd = open(host, O_RDWR); + if (host_fd < 0) { + error("error: open"); + return -1; + } } - container_open_flags = O_RDWR; - if (is_put) - container_open_flags |= O_CREAT; - if (pid > 0) { if (dosetns(pid, "mnt") < 0) { - perror("error: setns"); + error("error: setns"); goto close_host; } } else { if (chroot(rootfs) < 0) { - perror("error: chroot"); + error("error: chroot"); goto close_host; } if (chdir("/") < 0) { - perror("error: chdir"); + error("error: chdir"); goto close_host; } } - container_fd = open(container, container_open_flags, mode); + if (is_put && is_dir_manip) { + if (mode == -1) { + mode = defaultMode; + } + + if (uid == -1) { + uid = defaultUid; + } + + if (gid == -1) { + gid = defaultGid; + } + + if (mkdir(container, mode) < 0 && errno != EEXIST) { + error("error: mkdir"); + return -1; + } + + if (chown(container, uid, gid) < 0) { + error("error: chown"); + return -1; + } + + return 0; + } + + if (stat(container, &st) < 0) + exists = 0; + + container_open_flags = O_RDWR; + if (is_put) + container_open_flags |= O_CREAT; + + if (exists && S_ISDIR(st.st_mode)) + container_open_flags = O_DIRECTORY; + + umask(0); + container_fd = open(container, container_open_flags, 0); if (container_fd < 0) { - perror("error: open"); + error("error: open"); goto close_host; } if (is_put) { + if (!exists) { + if (mode == -1) { + mode = defaultMode; + } + + if (uid == -1) { + uid = defaultUid; + } + + if (gid == -1) { + gid = defaultGid; + } + } + if (copy(container_fd, host_fd) < 0) { - perror("error: copy"); + error("error: copy"); goto close_container; } - if (fchown(container_fd, uid, gid) < 0) { - perror("error: chown"); + if (mode != -1 && fchmod(container_fd, mode) < 0) { + error("error: chmod"); goto close_container; } + if (fchown(container_fd, uid, gid) < 0) { + error("error: chown"); + goto close_container; + } ret = 0; - } else - ret = copy(host_fd, container_fd); + } else { + + if (fstat(container_fd, &st) < 0) { + error("error: stat"); + goto close_container; + } + + fprintf(stderr, "uid: %ld\n", (long)st.st_uid); + fprintf(stderr, "gid: %ld\n", (long)st.st_gid); + fprintf(stderr, "mode: %ld\n", (unsigned long)st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)); + if (S_ISDIR(st.st_mode)) { + DIR *fdir; + struct dirent *de; + + fdir = fdopendir(container_fd); + if (!fdir) { + error("error: fdopendir"); + goto close_container; + } + + fprintf(stderr, "type: directory\n"); + + while((de = readdir(fdir))) { + int len, i; + + if (!strcmp(de->d_name, ".") || !strcmp(de->d_name, "..")) + continue; + + fprintf(stderr, "entry: "); + + // swap \n to \0 since we split this output by line + for (i = 0, len = strlen(de->d_name); i < len; i++) { + if (*(de->d_name + i) == '\n') + putc(0, stderr); + else + putc(*(de->d_name + i), stderr); + } + fprintf(stderr, "\n"); + } + + // container_fd is dead now that we fopendir'd it + goto close_host; + } else { + fprintf(stderr, "type: file\n"); + ret = copy(host_fd, container_fd); + } + fprintf(stderr, "type: %s", S_ISDIR(st.st_mode) ? "directory" : "file"); + } close_container: close(container_fd); @@ -257,9 +368,43 @@ void forkmount(char *buf, char *cur, ssize_t size) { char *src, *dest, *opts; + char nspath[PATH_MAX]; + char userns_source[PATH_MAX]; + char userns_target[PATH_MAX]; + ADVANCE_ARG_REQUIRED(); int pid = atoi(cur); + sprintf(nspath, "/proc/%d/ns/user", pid); + if (access(nspath, F_OK) == 0) { + if (readlink("/proc/self/ns/user", userns_source, 18) < 0) { + fprintf(stderr, "Failed readlink of source namespace: %s\n", strerror(errno)); + _exit(1); + } + + if (readlink(nspath, userns_target, PATH_MAX) < 0) { + fprintf(stderr, "Failed readlink of target namespace: %s\n", strerror(errno)); + _exit(1); + } + + if (strncmp(userns_source, userns_target, PATH_MAX) != 0) { + if (dosetns(pid, "user") < 0) { + fprintf(stderr, "Failed setns to container user namespace: %s\n", strerror(errno)); + _exit(1); + } + + if (setuid(0) < 0) { + fprintf(stderr, "Failed setuid to container root user: %s\n", strerror(errno)); + _exit(1); + } + + if (setgid(0) < 0) { + fprintf(stderr, "Failed setgid to container root group: %s\n", strerror(errno)); + _exit(1); + } + } + } + if (dosetns(pid, "mnt") < 0) { fprintf(stderr, "Failed setns to container mount namespace: %s\n", strerror(errno)); _exit(1); @@ -320,6 +465,9 @@ uid_t uid = 0; gid_t gid = 0; mode_t mode = 0; + uid_t defaultUid = 0; + gid_t defaultGid = 0; + mode_t defaultMode = 0; char *command = cur, *rootfs = NULL, *source = NULL, *target = NULL; pid_t pid; @@ -344,17 +492,18 @@ ADVANCE_ARG_REQUIRED(); mode = atoi(cur); - } - printf("command: %s\n", command); - printf("source: %s\n", source); - printf("pid: %d\n", pid); - printf("target: %s\n", target); - printf("uid: %d\n", uid); - printf("gid: %d\n", gid); - printf("mode: %d\n", mode); + ADVANCE_ARG_REQUIRED(); + defaultUid = atoi(cur); + + ADVANCE_ARG_REQUIRED(); + defaultGid = atoi(cur); + + ADVANCE_ARG_REQUIRED(); + defaultMode = atoi(cur); + } - _exit(manip_file_in_ns(rootfs, pid, source, target, is_put, uid, gid, mode)); + _exit(manip_file_in_ns(rootfs, pid, source, target, is_put, uid, gid, mode, defaultUid, defaultGid, defaultMode)); } void forkgetnet(char *buf, char *cur, ssize_t size) { @@ -377,14 +526,14 @@ cmdline = open("/proc/self/cmdline", O_RDONLY); if (cmdline < 0) { - perror("error: open"); + error("error: open"); _exit(232); } memset(buf, 0, sizeof(buf)); if ((size = read(cmdline, buf, sizeof(buf)-1)) < 0) { close(cmdline); - perror("error: read"); + error("error: read"); _exit(232); } close(cmdline); diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/operations.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/operations.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/operations.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/operations.go 2016-10-13 14:31:53.000000000 +0000 @@ -111,12 +111,12 @@ if err != nil { op.lock.Lock() op.status = shared.Failure - op.err = err.Error() + op.err = SmartError(err).String() op.lock.Unlock() op.done() chanRun <- err - shared.Debugf("Failure for %s operation: %s: %s", op.class.String(), op.id, err) + shared.LogDebugf("Failure for %s operation: %s: %s", op.class.String(), op.id, err) _, md, _ := op.Render() eventSend("operation", md) @@ -129,14 +129,16 @@ op.done() chanRun <- nil - shared.Debugf("Success for %s operation: %s", op.class.String(), op.id) + op.lock.Lock() + shared.LogDebugf("Success for %s operation: %s", op.class.String(), op.id) _, md, _ := op.Render() eventSend("operation", md) + op.lock.Unlock() }(op, chanRun) } op.lock.Unlock() - shared.Debugf("Started %s operation: %s", op.class.String(), op.id) + shared.LogDebugf("Started %s operation: %s", op.class.String(), op.id) _, md, _ := op.Render() eventSend("operation", md) @@ -168,7 +170,7 @@ op.lock.Unlock() chanCancel <- err - shared.Debugf("Failed to cancel %s operation: %s: %s", op.class.String(), op.id, err) + shared.LogDebugf("Failed to cancel %s operation: %s: %s", op.class.String(), op.id, err) _, md, _ := op.Render() eventSend("operation", md) return @@ -180,13 +182,13 @@ op.done() chanCancel <- nil - shared.Debugf("Cancelled %s operation: %s", op.class.String(), op.id) + shared.LogDebugf("Cancelled %s operation: %s", op.class.String(), op.id) _, md, _ := op.Render() eventSend("operation", md) }(op, oldStatus, chanCancel) } - shared.Debugf("Cancelling %s operation: %s", op.class.String(), op.id) + shared.LogDebugf("Cancelling %s operation: %s", op.class.String(), op.id) _, md, _ := op.Render() eventSend("operation", md) @@ -198,7 +200,7 @@ chanCancel <- nil } - shared.Debugf("Cancelled %s operation: %s", op.class.String(), op.id) + shared.LogDebugf("Cancelled %s operation: %s", op.class.String(), op.id) _, md, _ = op.Render() eventSend("operation", md) @@ -223,23 +225,17 @@ if err != nil { chanConnect <- err - shared.Debugf("Failed to handle %s operation: %s: %s", op.class.String(), op.id, err) - _, md, _ := op.Render() - eventSend("operation", md) + shared.LogDebugf("Failed to handle %s operation: %s: %s", op.class.String(), op.id, err) return } chanConnect <- nil - shared.Debugf("Handled %s operation: %s", op.class.String(), op.id) - _, md, _ := op.Render() - eventSend("operation", md) + shared.LogDebugf("Handled %s operation: %s", op.class.String(), op.id) }(op, chanConnect) op.lock.Unlock() - shared.Debugf("Connected %s operation: %s", op.class.String(), op.id) - _, md, _ := op.Render() - eventSend("operation", md) + shared.LogDebugf("Connected %s operation: %s", op.class.String(), op.id) return chanConnect, nil } @@ -324,7 +320,7 @@ op.resources = opResources op.lock.Unlock() - shared.Debugf("Updated resources for %s operation: %s", op.class.String(), op.id) + shared.LogDebugf("Updated resources for %s operation: %s", op.class.String(), op.id) _, md, _ := op.Render() eventSend("operation", md) @@ -350,7 +346,7 @@ op.metadata = newMetadata op.lock.Unlock() - shared.Debugf("Updated metadata for %s operation: %s", op.class.String(), op.id) + shared.LogDebugf("Updated metadata for %s operation: %s", op.class.String(), op.id) _, md, _ := op.Render() eventSend("operation", md) @@ -405,7 +401,7 @@ operations[op.id] = &op operationsLock.Unlock() - shared.Debugf("New %s operation: %s", op.class.String(), op.id) + shared.LogDebugf("New %s operation: %s", op.class.String(), op.id) _, md, _ := op.Render() eventSend("operation", md) @@ -541,6 +537,15 @@ return err } +func (r *operationWebSocket) String() string { + _, md, err := r.op.Render() + if err != nil { + return fmt.Sprintf("error: %s", err) + } + + return md.Id +} + func operationAPIWebsocketGet(d *Daemon, r *http.Request) Response { id := mux.Vars(r)["id"] op, err := operationGet(id) diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/patches.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/patches.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/patches.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/patches.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,107 @@ +package main + +import ( + "strings" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +/* Patches are one-time actions that are sometimes needed to update + existing container configuration or move things around on the + filesystem. + + Those patches are applied at startup time after the database schema + has been fully updated. Patches can therefore assume a working database. + + At the time the patches are applied, the containers aren't started + yet and the daemon isn't listening to requests. + + DO NOT use this mechanism for database update. Schema updates must be + done through the separate schema update mechanism. + + + Only append to the patches list, never remove entries and never re-order them. +*/ + +var patches = []patch{ + patch{name: "invalid_profile_names", run: patchInvalidProfileNames}, + patch{name: "leftover_profile_config", run: patchLeftoverProfileConfig}, +} + +type patch struct { + name string + run func(name string, d *Daemon) error +} + +func (p *patch) apply(d *Daemon) error { + shared.LogDebugf("Applying patch: %s", p.name) + + err := p.run(p.name, d) + if err != nil { + return err + } + + err = dbPatchesMarkApplied(d.db, p.name) + if err != nil { + return err + } + + return nil +} + +func patchesApplyAll(d *Daemon) error { + appliedPatches, err := dbPatches(d.db) + if err != nil { + return err + } + + for _, patch := range patches { + if shared.StringInSlice(patch.name, appliedPatches) { + continue + } + + err := patch.apply(d) + if err != nil { + return err + } + } + + return nil +} + +// Patches begin here +func patchLeftoverProfileConfig(name string, d *Daemon) error { + stmt := ` +DELETE FROM profiles_config WHERE profile_id NOT IN (SELECT id FROM profiles); +DELETE FROM profiles_devices WHERE profile_id NOT IN (SELECT id FROM profiles); +DELETE FROM profiles_devices_config WHERE profile_device_id NOT IN (SELECT id FROM profiles_devices); +` + + _, err := d.db.Exec(stmt) + if err != nil { + return err + } + + return nil +} + +func patchInvalidProfileNames(name string, d *Daemon) error { + profiles, err := dbProfiles(d.db) + if err != nil { + return err + } + + for _, profile := range profiles { + if strings.Contains(profile, "/") || shared.StringInSlice(profile, []string{".", ".."}) { + shared.LogInfo("Removing unreachable profile (invalid name)", log.Ctx{"name": profile}) + err := dbProfileDelete(d.db, profile) + if err != nil { + return err + } + } + } + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/profiles.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/profiles.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/profiles.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/profiles.go 2016-10-13 14:31:53.000000000 +0000 @@ -1,10 +1,13 @@ package main import ( + "bytes" "encoding/json" "fmt" + "io/ioutil" "net/http" "reflect" + "strings" "github.com/gorilla/mux" _ "github.com/mattn/go-sqlite3" @@ -40,7 +43,7 @@ } else { profile, err := doProfileGet(d, name) if err != nil { - shared.Log.Error("Failed to get profile", log.Ctx{"profile": name}) + shared.LogError("Failed to get profile", log.Ctx{"profile": name}) continue } resultMap[i] = profile @@ -66,7 +69,20 @@ return BadRequest(fmt.Errorf("No name provided")) } - err := containerValidConfig(req.Config, true, false) + _, profile, _ := dbProfileGet(d.db, req.Name) + if profile != nil { + return BadRequest(fmt.Errorf("The profile already exists")) + } + + if strings.Contains(req.Name, "/") { + return BadRequest(fmt.Errorf("Profile names may not contain slashes")) + } + + if shared.StringInSlice(req.Name, []string{".", ".."}) { + return BadRequest(fmt.Errorf("Invalid profile name '%s'", req.Name)) + } + + err := containerValidConfig(d, req.Config, true, false) if err != nil { return BadRequest(err) } @@ -83,7 +99,7 @@ fmt.Errorf("Error inserting %s into database: %s", req.Name, err)) } - return EmptySyncResponse + return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/profiles/%s", shared.APIVersion, req.Name)) } var profilesCmd = Command{ @@ -93,7 +109,22 @@ func doProfileGet(d *Daemon, name string) (*shared.ProfileConfig, error) { _, profile, err := dbProfileGet(d.db, name) - return profile, err + if err != nil { + return nil, err + } + + cts, err := dbProfileContainersGet(d.db, name) + if err != nil { + return nil, err + } + + usedBy := []string{} + for _, ct := range cts { + usedBy = append(usedBy, fmt.Sprintf("/%s/containers/%s", shared.APIVersion, ct)) + } + profile.UsedBy = usedBy + + return profile, nil } func profileGet(d *Daemon, r *http.Request) Response { @@ -104,7 +135,7 @@ return SmartError(err) } - return SyncResponse(true, resp) + return SyncResponseETag(true, resp, resp) } func getRunningContainersWithProfile(d *Daemon, profile string) []container { @@ -118,7 +149,7 @@ for _, name := range output { c, err := containerLoadByName(d, name) if err != nil { - shared.Log.Error("Failed opening container", log.Ctx{"container": name}) + shared.LogError("Failed opening container", log.Ctx{"container": name}) continue } results = append(results, c) @@ -127,15 +158,95 @@ } func profilePut(d *Daemon, r *http.Request) Response { + // Get the profile name := mux.Vars(r)["name"] + id, profile, err := dbProfileGet(d.db, name) + if err != nil { + return InternalError(fmt.Errorf("Failed to retrieve profile='%s'", name)) + } + + // Validate the ETag + err = etagCheck(r, profile) + if err != nil { + return PreconditionFailed(err) + } req := profilesPostReq{} if err := json.NewDecoder(r.Body).Decode(&req); err != nil { return BadRequest(err) } + return doProfileUpdate(d, name, id, profile, req) +} + +func profilePatch(d *Daemon, r *http.Request) Response { + // Get the profile + name := mux.Vars(r)["name"] + id, profile, err := dbProfileGet(d.db, name) + if err != nil { + return InternalError(fmt.Errorf("Failed to retrieve profile='%s'", name)) + } + + // Validate the ETag + err = etagCheck(r, profile) + if err != nil { + return PreconditionFailed(err) + } + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return InternalError(err) + } + + rdr1 := ioutil.NopCloser(bytes.NewBuffer(body)) + rdr2 := ioutil.NopCloser(bytes.NewBuffer(body)) + + reqRaw := shared.Jmap{} + if err := json.NewDecoder(rdr1).Decode(&reqRaw); err != nil { + return BadRequest(err) + } + + req := profilesPostReq{} + if err := json.NewDecoder(rdr2).Decode(&req); err != nil { + return BadRequest(err) + } + + // Get Description + _, err = reqRaw.GetString("description") + if err != nil { + req.Description = profile.Description + } + + // Get Config + if req.Config == nil { + req.Config = profile.Config + } else { + for k, v := range profile.Config { + _, ok := req.Config[k] + if !ok { + req.Config[k] = v + } + } + } + + // Get Devices + if req.Devices == nil { + req.Devices = profile.Devices + } else { + for k, v := range profile.Devices { + _, ok := req.Devices[k] + if !ok { + req.Devices[k] = v + } + } + } + + return doProfileUpdate(d, name, id, profile, req) +} + +func doProfileUpdate(d *Daemon, name string, id int64, profile *shared.ProfileConfig, req profilesPostReq) Response { // Sanity checks - err := containerValidConfig(req.Config, true, false) + err := containerValidConfig(d, req.Config, true, false) if err != nil { return BadRequest(err) } @@ -157,11 +268,6 @@ } // Update the database - id, profile, err := dbProfileGet(d.db, name) - if err != nil { - return InternalError(fmt.Errorf("Failed to retrieve profile='%s'", name)) - } - tx, err := dbBegin(d.db) if err != nil { return InternalError(err) @@ -248,24 +354,48 @@ return BadRequest(fmt.Errorf("No name provided")) } + // Check that the name isn't already in use + id, _, _ := dbProfileGet(d.db, req.Name) + if id > 0 { + return Conflict + } + + if strings.Contains(req.Name, "/") { + return BadRequest(fmt.Errorf("Profile names may not contain slashes")) + } + + if shared.StringInSlice(req.Name, []string{".", ".."}) { + return BadRequest(fmt.Errorf("Invalid profile name '%s'", req.Name)) + } + err := dbProfileUpdate(d.db, name, req.Name) if err != nil { return InternalError(err) } - return EmptySyncResponse + return SyncResponseLocation(true, nil, fmt.Sprintf("/%s/profiles/%s", shared.APIVersion, req.Name)) } // The handler for the delete operation. func profileDelete(d *Daemon, r *http.Request) Response { name := mux.Vars(r)["name"] - err := dbProfileDelete(d.db, name) + _, err := doProfileGet(d, name) if err != nil { - return InternalError(err) + return SmartError(err) + } + + clist := getRunningContainersWithProfile(d, name) + if len(clist) != 0 { + return BadRequest(fmt.Errorf("Profile is currently in use")) + } + + err = dbProfileDelete(d.db, name) + if err != nil { + return SmartError(err) } return EmptySyncResponse } -var profileCmd = Command{name: "profiles/{name}", get: profileGet, put: profilePut, delete: profileDelete, post: profilePost} +var profileCmd = Command{name: "profiles/{name}", get: profileGet, put: profilePut, delete: profileDelete, post: profilePost, patch: profilePatch} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/response.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/response.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/response.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/response.go 2016-10-13 14:31:53.000000000 +0000 @@ -33,29 +33,73 @@ type Response interface { Render(w http.ResponseWriter) error + String() string } // Sync response type syncResponse struct { success bool + etag interface{} metadata interface{} + location string + headers map[string]string } func (r *syncResponse) Render(w http.ResponseWriter) error { + // Set an appropriate ETag header + if r.etag != nil { + etag, err := etagHash(r.etag) + if err == nil { + w.Header().Set("ETag", etag) + } + } + + // Prepare the JSON response status := shared.Success if !r.success { status = shared.Failure } + if r.headers != nil { + for h, v := range r.headers { + w.Header().Set(h, v) + } + } + + if r.location != "" { + w.Header().Set("Location", r.location) + w.WriteHeader(201) + } + resp := syncResp{Type: lxd.Sync, Status: status.String(), StatusCode: status, Metadata: r.metadata} return WriteJSON(w, resp) } +func (r *syncResponse) String() string { + if r.success { + return "success" + } + + return "failure" +} + func SyncResponse(success bool, metadata interface{}) Response { - return &syncResponse{success, metadata} + return &syncResponse{success: success, metadata: metadata} +} + +func SyncResponseETag(success bool, metadata interface{}, etag interface{}) Response { + return &syncResponse{success: success, metadata: metadata, etag: etag} } -var EmptySyncResponse = &syncResponse{true, make(map[string]interface{})} +func SyncResponseLocation(success bool, metadata interface{}, location string) Response { + return &syncResponse{success: success, metadata: metadata, location: location} +} + +func SyncResponseHeaders(success bool, metadata interface{}, headers map[string]string) Response { + return &syncResponse{success: success, metadata: metadata, headers: headers} +} + +var EmptySyncResponse = &syncResponse{success: true, metadata: make(map[string]interface{})} // File transfer response type fileResponseEntry struct { @@ -141,6 +185,10 @@ return err } +func (r *fileResponse) String() string { + return fmt.Sprintf("%d files", len(r.files)) +} + func FileResponse(r *http.Request, files []fileResponseEntry, headers map[string]string, removeAfterServe bool) Response { return &fileResponse{r, files, headers, removeAfterServe} } @@ -174,6 +222,15 @@ return WriteJSON(w, body) } +func (r *operationResponse) String() string { + _, md, err := r.op.Render() + if err != nil { + return fmt.Sprintf("error: %s", err) + } + + return md.Id +} + func OperationResponse(op *operation) Response { return &operationResponse{op} } @@ -184,6 +241,10 @@ msg string } +func (r *errorResponse) String() string { + return r.msg +} + func (r *errorResponse) Render(w http.ResponseWriter) error { var output io.Writer @@ -227,6 +288,10 @@ return &errorResponse{http.StatusInternalServerError, err.Error()} } +func PreconditionFailed(err error) Response { + return &errorResponse{http.StatusPreconditionFailed, err.Error()} +} + /* * SmartError returns the right error message based on err. */ diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/rsync.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/rsync.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/rsync.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/rsync.go 2016-10-13 14:31:53.000000000 +0000 @@ -7,6 +7,7 @@ "net" "os" "os/exec" + "sync" "github.com/gorilla/websocket" @@ -36,13 +37,13 @@ readDone, writeDone := shared.WebsocketMirror(conn, stdin, stdout) data, err2 := ioutil.ReadAll(stderr) if err2 != nil { - shared.Debugf("error reading rsync stderr: %s", err2) + shared.LogDebugf("error reading rsync stderr: %s", err2) return err2 } err = cmd.Wait() if err != nil { - shared.Debugf("rsync recv error for path %s: %s: %s", path, err, string(data)) + shared.LogDebugf("rsync recv error for path %s: %s: %s", path, err, string(data)) } <-readDone @@ -93,7 +94,7 @@ * command (i.e. the command to run on --server). However, we're * hardcoding that at the other end, so we can just ignore it. */ - rsyncCmd := fmt.Sprintf("sh -c \"nc -U %s\"", f.Name()) + rsyncCmd := fmt.Sprintf("sh -c \"%s netcat %s\"", execPath, f.Name()) cmd := exec.Command( "rsync", "-arvP", @@ -138,11 +139,12 @@ output, err := ioutil.ReadAll(stderr) if err != nil { - shared.Debugf("problem reading rsync stderr %s", err) + shared.LogDebugf("problem reading rsync stderr %s", err) } - if err := cmd.Wait(); err != nil { - shared.Debugf("problem with rsync send of %s: %s: %s", path, err, string(output)) + err = cmd.Wait() + if err != nil { + shared.LogDebugf("problem with rsync send of %s: %s: %s", path, err, string(output)) } <-readDone @@ -168,3 +170,51 @@ func RsyncRecv(path string, conn *websocket.Conn) error { return rsyncWebsocket(path, rsyncRecvCmd(path), conn) } + +// Netcat is called with: +// +// lxd netcat /path/to/unix/socket +// +// and does unbuffered netcatting of to socket to stdin/stdout. Any arguments +// after the path to the unix socket are ignored, so that this can be passed +// directly to rsync as the sync command. +func Netcat(args []string) error { + if len(args) < 2 { + return fmt.Errorf("Bad arguments %q", args) + } + + uAddr, err := net.ResolveUnixAddr("unix", args[1]) + if err != nil { + return err + } + + conn, err := net.DialUnix("unix", nil, uAddr) + if err != nil { + return err + } + + wg := sync.WaitGroup{} + wg.Add(1) + + go func() { + io.Copy(os.Stdout, conn) + f, _ := os.Create("/tmp/done_stdout") + f.Close() + conn.Close() + f, _ = os.Create("/tmp/done_close") + f.Close() + wg.Done() + }() + + go func() { + io.Copy(conn, os.Stdin) + f, _ := os.Create("/tmp/done_stdin") + f.Close() + }() + + f, _ := os.Create("/tmp/done_spawning_goroutines") + f.Close() + wg.Wait() + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/rsync_test.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/rsync_test.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/rsync_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/rsync_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,106 +0,0 @@ -package main - -import ( - "io" - "io/ioutil" - "os" - "path" - "testing" - - "github.com/lxc/lxd/shared" -) - -const helloWorld = "hello world\n" - -func TestRsyncSendRecv(t *testing.T) { - source, err := ioutil.TempDir("", "lxd_test_source_") - if err != nil { - t.Error(err) - return - } - defer os.RemoveAll(source) - - sink, err := ioutil.TempDir("", "lxd_test_sink_") - if err != nil { - t.Error(err) - return - } - defer os.RemoveAll(sink) - - /* now, write something to rsync over */ - f, err := os.Create(path.Join(source, "foo")) - if err != nil { - t.Error(err) - return - } - f.Write([]byte(helloWorld)) - f.Close() - - send, sendConn, _, err := rsyncSendSetup(shared.AddSlash(source)) - if err != nil { - t.Error(err) - return - } - - recv := rsyncRecvCmd(sink) - - recvOut, err := recv.StdoutPipe() - if err != nil { - t.Error(err) - return - } - - recvIn, err := recv.StdinPipe() - if err != nil { - t.Error(err) - return - } - - if err := recv.Start(); err != nil { - t.Error(err) - return - } - - go func() { - defer sendConn.Close() - if _, err := io.Copy(sendConn, recvOut); err != nil { - t.Error(err) - } - - if err := recv.Wait(); err != nil { - t.Error(err) - } - - }() - - /* - * We close the socket in the above gofunc, but go tells us - * https://github.com/golang/go/issues/4373 that this is an error - * because we were reading from a socket that was closed. Thus, we - * ignore it - */ - io.Copy(recvIn, sendConn) - - if err := send.Wait(); err != nil { - t.Error(err) - return - } - - f, err = os.Open(path.Join(sink, "foo")) - if err != nil { - t.Error(err) - return - } - defer f.Close() - - buf, err := ioutil.ReadAll(f) - if err != nil { - t.Error(err) - return - } - - if string(buf) != helloWorld { - t.Errorf("expected %s got %s", helloWorld, buf) - return - } -} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/seccomp.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/seccomp.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/seccomp.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/seccomp.go 2016-10-13 14:31:53.000000000 +0000 @@ -1,6 +1,7 @@ package main import ( + "fmt" "io/ioutil" "os" "path" @@ -8,16 +9,55 @@ "github.com/lxc/lxd/shared" ) -const DEFAULT_SECCOMP_POLICY = ` +const SECCOMP_HEADER = ` 2 -blacklist +` + +const DEFAULT_SECCOMP_POLICY = ` reject_force_umount # comment this to allow umount -f; not recommended [all] -kexec_load errno 1 -open_by_handle_at errno 1 -init_module errno 1 -finit_module errno 1 -delete_module errno 1 +kexec_load errno 38 +open_by_handle_at errno 38 +init_module errno 38 +finit_module errno 38 +delete_module errno 38 +` +const COMPAT_BLOCKING_POLICY = ` +[%s] +compat_sys_rt_sigaction errno 38 +stub_x32_rt_sigreturn errno 38 +compat_sys_ioctl errno 38 +compat_sys_readv errno 38 +compat_sys_writev errno 38 +compat_sys_recvfrom errno 38 +compat_sys_sendmsg errno 38 +compat_sys_recvmsg errno 38 +stub_x32_execve errno 38 +compat_sys_ptrace errno 38 +compat_sys_rt_sigpending errno 38 +compat_sys_rt_sigtimedwait errno 38 +compat_sys_rt_sigqueueinfo errno 38 +compat_sys_sigaltstack errno 38 +compat_sys_timer_create errno 38 +compat_sys_mq_notify errno 38 +compat_sys_kexec_load errno 38 +compat_sys_waitid errno 38 +compat_sys_set_robust_list errno 38 +compat_sys_get_robust_list errno 38 +compat_sys_vmsplice errno 38 +compat_sys_move_pages errno 38 +compat_sys_preadv64 errno 38 +compat_sys_pwritev64 errno 38 +compat_sys_rt_tgsigqueueinfo errno 38 +compat_sys_recvmmsg errno 38 +compat_sys_sendmmsg errno 38 +compat_sys_process_vm_readv errno 38 +compat_sys_process_vm_writev errno 38 +compat_sys_setsockopt errno 38 +compat_sys_getsockopt errno 38 +compat_sys_io_setup errno 38 +compat_sys_io_submit errno 38 +stub_x32_execveat errno 38 ` var seccompPath = shared.VarPath("security", "seccomp") @@ -26,9 +66,72 @@ return path.Join(seccompPath, c.Name()) } -func getSeccompProfileContent(c container) string { - /* for now there are no seccomp knobs. */ - return DEFAULT_SECCOMP_POLICY +func ContainerNeedsSeccomp(c container) bool { + config := c.ExpandedConfig() + + keys := []string{ + "raw.seccomp", + "security.syscalls.whitelist", + "security.syscalls.blacklist", + } + + for _, k := range keys { + _, hasKey := config[k] + if hasKey { + return true + } + } + + compat := config["security.syscalls.blacklist_compat"] + if shared.IsTrue(compat) { + return true + } + + /* this are enabled by default, so if the keys aren't present, that + * means "true" + */ + default_, ok := config["security.syscalls.blacklist_default"] + if !ok || shared.IsTrue(default_) { + return true + } + + return false +} + +func getSeccompProfileContent(c container) (string, error) { + config := c.ExpandedConfig() + + raw := config["raw.seccomp"] + if raw != "" { + return raw, nil + } + + policy := SECCOMP_HEADER + + whitelist := config["security.syscalls.whitelist"] + if whitelist != "" { + policy += "whitelist\n[all]\n" + policy += whitelist + return policy, nil + } + + policy += "blacklist\n" + + default_, ok := config["security.syscalls.blacklist_default"] + if !ok || shared.IsTrue(default_) { + policy += DEFAULT_SECCOMP_POLICY + } + + compat := config["security.syscalls.blacklist_compat"] + if shared.IsTrue(compat) { + arch, err := shared.ArchitectureName(c.Architecture()) + if err != nil { + return "", err + } + policy += fmt.Sprintf(COMPAT_BLOCKING_POLICY, arch) + } + + return policy, nil } func SeccompCreateProfile(c container) error { @@ -38,7 +141,15 @@ * the mtime on the file for any compiler purpose, so let's just write * out the profile. */ - profile := getSeccompProfileContent(c) + if !ContainerNeedsSeccomp(c) { + return nil + } + + profile, err := getSeccompProfileContent(c) + if err != nil { + return nil + } + if err := os.MkdirAll(seccompPath, 0700); err != nil { return err } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage_btrfs.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage_btrfs.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage_btrfs.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage_btrfs.go 2016-10-13 14:31:53.000000000 +0000 @@ -122,10 +122,12 @@ } // Then the directory (if it still exists). - err := os.RemoveAll(cPath) - if err != nil { - s.log.Error("ContainerDelete: failed", log.Ctx{"cPath": cPath, "err": err}) - return fmt.Errorf("Error cleaning up %s: %s", cPath, err) + if shared.PathExists(cPath) { + err := os.RemoveAll(cPath) + if err != nil { + s.log.Error("ContainerDelete: failed", log.Ctx{"cPath": cPath, "err": err}) + return fmt.Errorf("Error cleaning up %s: %s", cPath, err) + } } return nil @@ -193,7 +195,6 @@ } } - // TODO: No TemplateApply here? return nil } @@ -248,7 +249,7 @@ } else { // Remove the backup, we made if s.isSubvolume(sourceBackupPath) { - return s.subvolDelete(sourceBackupPath) + return s.subvolsDelete(sourceBackupPath) } os.RemoveAll(sourceBackupPath) } @@ -412,7 +413,8 @@ return err } - if err := untarImage(imagePath, subvol); err != nil { + if err := unpackImage(s.d, imagePath, subvol); err != nil { + s.subvolDelete(subvol) return err } @@ -423,7 +425,13 @@ imagePath := shared.VarPath("images", fingerprint) subvol := fmt.Sprintf("%s.btrfs", imagePath) - return s.subvolDelete(subvol) + if s.isSubvolume(subvol) { + if err := s.subvolsDelete(subvol); err != nil { + return err + } + } + + return nil } func (s *storageBtrfs) subvolCreate(subvol string) error { @@ -842,16 +850,16 @@ return err } - <-shared.WebsocketSendStream(conn, stdout) + <-shared.WebsocketSendStream(conn, stdout, 4*1024*1024) output, err := ioutil.ReadAll(stderr) if err != nil { - shared.Log.Error("problem reading btrfs send stderr", "err", err) + shared.LogError("problem reading btrfs send stderr", log.Ctx{"err": err}) } err = cmd.Wait() if err != nil { - shared.Log.Error("problem with btrfs send", "output", string(output)) + shared.LogError("problem with btrfs send", log.Ctx{"output": string(output)}) } return err } @@ -940,6 +948,14 @@ } } +func (s *storageBtrfs) PreservesInodes() bool { + if runningInUserns { + return false + } else { + return true + } +} + func (s *storageBtrfs) MigrationSource(c container) (MigrationStorageSourceDriver, error) { if runningInUserns { return rsyncMigrationSource(c) @@ -969,9 +985,9 @@ return driver, nil } -func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error { +func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error { if runningInUserns { - return rsyncMigrationSink(live, container, snapshots, conn) + return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap) } cName := container.Name() @@ -1012,12 +1028,12 @@ output, err := ioutil.ReadAll(stderr) if err != nil { - shared.Debugf("problem reading btrfs receive stderr %s", err) + shared.LogDebugf("problem reading btrfs receive stderr %s", err) } err = cmd.Wait() if err != nil { - shared.Log.Error("problem with btrfs receive", log.Ctx{"output": string(output)}) + shared.LogError("problem with btrfs receive", log.Ctx{"output": string(output)}) return err } @@ -1026,13 +1042,13 @@ err := s.subvolSnapshot(cPath, targetPath, false) if err != nil { - shared.Log.Error("problem with btrfs snapshot", log.Ctx{"err": err}) + shared.LogError("problem with btrfs snapshot", log.Ctx{"err": err}) return err } err = s.subvolsDelete(cPath) if err != nil { - shared.Log.Error("problem with btrfs delete", log.Ctx{"err": err}) + shared.LogError("problem with btrfs delete", log.Ctx{"err": err}) return err } } @@ -1041,7 +1057,13 @@ } for _, snap := range snapshots { - if err := btrfsRecv(containerPath(cName, true), snap.Path(), true); err != nil { + args := snapshotProtobufToContainerArgs(container.Name(), snap) + s, err := containerCreateEmptySnapshot(container.Daemon(), args) + if err != nil { + return err + } + + if err := btrfsRecv(containerPath(cName, true), s.Path(), true); err != nil { return err } } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage_dir.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage_dir.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage_dir.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage_dir.go 2016-10-13 14:31:53.000000000 +0000 @@ -3,6 +3,7 @@ import ( "fmt" "os" + "os/exec" "path/filepath" "strings" @@ -59,8 +60,8 @@ } imagePath := shared.VarPath("images", imageFingerprint) - if err := untarImage(imagePath, container.Path()); err != nil { - os.RemoveAll(rootfsPath) + if err := unpackImage(s.d, imagePath, container.Path()); err != nil { + s.ContainerDelete(container) return err } @@ -81,10 +82,18 @@ func (s *storageDir) ContainerDelete(container container) error { cPath := container.Path() + if !shared.PathExists(cPath) { + return nil + } + err := os.RemoveAll(cPath) if err != nil { - s.log.Error("ContainerDelete: failed", log.Ctx{"cPath": cPath, "err": err}) - return fmt.Errorf("Error cleaning up %s: %s", cPath, err) + // RemovaAll fails on very long paths, so attempt an rm -Rf + output, err := exec.Command("rm", "-Rf", cPath).CombinedOutput() + if err != nil { + s.log.Error("ContainerDelete: failed", log.Ctx{"cPath": cPath, "output": output}) + return fmt.Errorf("Error cleaning up %s: %s", cPath, string(output)) + } } return nil @@ -93,8 +102,8 @@ func (s *storageDir) ContainerCopy( container container, sourceContainer container) error { - oldPath := sourceContainer.RootfsPath() - newPath := container.RootfsPath() + oldPath := sourceContainer.Path() + newPath := container.Path() /* * Copy by using rsync @@ -139,7 +148,6 @@ } } - // TODO: No TemplateApply here? return nil } @@ -266,10 +274,14 @@ return MigrationFSType_RSYNC } +func (s *storageDir) PreservesInodes() bool { + return false +} + func (s *storageDir) MigrationSource(container container) (MigrationStorageSourceDriver, error) { return rsyncMigrationSource(container) } -func (s *storageDir) MigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error { - return rsyncMigrationSink(live, container, snapshots, conn) +func (s *storageDir) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error { + return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage.go 2016-10-13 14:31:53.000000000 +0000 @@ -8,6 +8,7 @@ "path/filepath" "reflect" "syscall" + "time" "github.com/gorilla/websocket" @@ -52,7 +53,7 @@ case filesystemSuperMagicNfs: return "nfs", nil default: - shared.Debugf("Unknown backing filesystem type: 0x%x", fs.Type) + shared.LogDebugf("Unknown backing filesystem type: 0x%x", fs.Type) return string(fs.Type), nil } } @@ -168,6 +169,10 @@ ImageDelete(fingerprint string) error MigrationType() MigrationFSType + /* does this storage backend preserve inodes when it is moved across + * LXD hosts? + */ + PreservesInodes() bool // Get the pieces required to migrate the source. This contains a list // of the "object" (i.e. container or snapshot, depending on whether or @@ -187,7 +192,7 @@ // already present on the target instance as an exercise for the // enterprising developer. MigrationSource(container container) (MigrationStorageSourceDriver, error) - MigrationSink(live bool, container container, objects []container, conn *websocket.Conn) error + MigrationSink(live bool, container container, objects []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error } func newStorage(d *Daemon, sType storageType) (storage, error) { @@ -233,6 +238,9 @@ } func storageForFilename(d *Daemon, filename string) (storage, error) { + var filesystem string + var err error + config := make(map[string]interface{}) storageType := storageTypeDir @@ -240,9 +248,11 @@ return newStorageWithConfig(d, storageTypeMock, config) } - filesystem, err := filesystemDetect(filename) - if err != nil { - return nil, fmt.Errorf("couldn't detect filesystem for '%s': %v", filename, err) + if shared.PathExists(filename) { + filesystem, err = filesystemDetect(filename) + if err != nil { + return nil, fmt.Errorf("couldn't detect filesystem for '%s': %v", filename, err) + } } if shared.PathExists(filename + ".lv") { @@ -299,7 +309,7 @@ dpath := c.Path() rpath := c.RootfsPath() - shared.Log.Debug("Shifting root filesystem", + shared.LogDebug("Shifting root filesystem", log.Ctx{"container": c.Name(), "rootfs": rpath}) idmapset := c.IdmapSet() @@ -310,7 +320,7 @@ err := idmapset.ShiftRootfs(rpath) if err != nil { - shared.Debugf("Shift of rootfs %s failed: %s", rpath, err) + shared.LogDebugf("Shift of rootfs %s failed: %s", rpath, err) return err } @@ -362,7 +372,7 @@ log.Ctx{"driver": fmt.Sprintf("storage/%s", lw.w.GetStorageTypeName())}, ) - lw.log.Info("Init") + lw.log.Debug("Init") return lw, err } @@ -511,12 +521,12 @@ } func (lw *storageLogWrapper) ContainerSnapshotStart(container container) error { - lw.log.Debug("ContainerStart", log.Ctx{"container": container.Name()}) + lw.log.Debug("ContainerSnapshotStart", log.Ctx{"container": container.Name()}) return lw.w.ContainerSnapshotStart(container) } func (lw *storageLogWrapper) ContainerSnapshotStop(container container) error { - lw.log.Debug("ContainerStop", log.Ctx{"container": container.Name()}) + lw.log.Debug("ContainerSnapshotStop", log.Ctx{"container": container.Name()}) return lw.w.ContainerSnapshotStop(container) } @@ -537,24 +547,29 @@ return lw.w.MigrationType() } +func (lw *storageLogWrapper) PreservesInodes() bool { + return lw.w.PreservesInodes() +} + func (lw *storageLogWrapper) MigrationSource(container container) (MigrationStorageSourceDriver, error) { lw.log.Debug("MigrationSource", log.Ctx{"container": container.Name()}) return lw.w.MigrationSource(container) } -func (lw *storageLogWrapper) MigrationSink(live bool, container container, objects []container, conn *websocket.Conn) error { +func (lw *storageLogWrapper) MigrationSink(live bool, container container, objects []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error { objNames := []string{} for _, obj := range objects { - objNames = append(objNames, obj.Name()) + objNames = append(objNames, obj.GetName()) } lw.log.Debug("MigrationSink", log.Ctx{ "live": live, "container": container.Name(), "objects": objNames, + "srcIdmap": *srcIdmap, }) - return lw.w.MigrationSink(live, container, objects, conn) + return lw.w.MigrationSink(live, container, objects, conn, srcIdmap) } func ShiftIfNecessary(container container, srcIdmap *shared.IdmapSet) error { @@ -594,16 +609,19 @@ } func (s rsyncStorageSourceDriver) SendWhileRunning(conn *websocket.Conn) error { - toSend := append([]container{s.container}, s.snapshots...) + for _, send := range s.snapshots { + if err := send.StorageStart(); err != nil { + return err + } + defer send.StorageStop() - for _, send := range toSend { path := send.Path() if err := RsyncSend(shared.AddSlash(path), conn); err != nil { return err } } - return nil + return RsyncSend(shared.AddSlash(s.container.Path()), conn) } func (s rsyncStorageSourceDriver) SendAfterCheckpoint(conn *websocket.Conn) error { @@ -624,21 +642,83 @@ return rsyncStorageSourceDriver{container, snapshots}, nil } -func rsyncMigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error { - /* the first object is the actual container */ - if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil { - return err +func snapshotProtobufToContainerArgs(containerName string, snap *Snapshot) containerArgs { + config := map[string]string{} + + for _, ent := range snap.LocalConfig { + config[ent.GetKey()] = ent.GetValue() } - if len(snapshots) > 0 { - err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700) - if err != nil { - return err + devices := shared.Devices{} + for _, ent := range snap.LocalDevices { + props := map[string]string{} + for _, prop := range ent.Config { + props[prop.GetKey()] = prop.GetValue() } + + devices[ent.GetName()] = props + } + + name := containerName + shared.SnapshotDelimiter + snap.GetName() + return containerArgs{ + Name: name, + Ctype: cTypeSnapshot, + Config: config, + Profiles: snap.Profiles, + Ephemeral: snap.GetEphemeral(), + Devices: devices, + Architecture: int(snap.GetArchitecture()), + Stateful: snap.GetStateful(), } +} + +func rsyncMigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error { + isDirBackend := container.Storage().GetStorageType() == storageTypeDir + + if isDirBackend { + if len(snapshots) > 0 { + err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700) + if err != nil { + return err + } + } + for _, snap := range snapshots { + args := snapshotProtobufToContainerArgs(container.Name(), snap) + s, err := containerCreateEmptySnapshot(container.Daemon(), args) + if err != nil { + return err + } + + if err := RsyncRecv(shared.AddSlash(s.Path()), conn); err != nil { + return err + } + + if err := ShiftIfNecessary(container, srcIdmap); err != nil { + return err + } + } + + if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil { + return err + } + } else { + for _, snap := range snapshots { + if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil { + return err + } - for _, snap := range snapshots { - if err := RsyncRecv(shared.AddSlash(snap.Path()), conn); err != nil { + if err := ShiftIfNecessary(container, srcIdmap); err != nil { + return err + } + + args := snapshotProtobufToContainerArgs(container.Name(), snap) + _, err := containerCreateAsSnapshot(container.Daemon(), args, container) + if err != nil { + return err + } + } + + if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil { return err } } @@ -650,5 +730,64 @@ } } + if err := ShiftIfNecessary(container, srcIdmap); err != nil { + return err + } + + return nil +} + +// Useful functions for unreliable backends +func tryExec(name string, arg ...string) ([]byte, error) { + var err error + var output []byte + + for i := 0; i < 20; i++ { + output, err = exec.Command(name, arg...).CombinedOutput() + if err == nil { + break + } + + time.Sleep(500 * time.Millisecond) + } + + return output, err +} + +func tryMount(src string, dst string, fs string, flags uintptr, options string) error { + var err error + + for i := 0; i < 20; i++ { + err = syscall.Mount(src, dst, fs, flags, options) + if err == nil { + break + } + + time.Sleep(500 * time.Millisecond) + } + + if err != nil { + return err + } + + return nil +} + +func tryUnmount(path string, flags int) error { + var err error + + for i := 0; i < 20; i++ { + err = syscall.Unmount(path, flags) + if err == nil { + break + } + + time.Sleep(500 * time.Millisecond) + } + + if err != nil { + return err + } + return nil } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage_lvm.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage_lvm.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage_lvm.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage_lvm.go 2016-10-13 14:31:53.000000000 +0000 @@ -9,7 +9,6 @@ "strconv" "strings" "syscall" - "time" "github.com/gorilla/websocket" @@ -18,13 +17,10 @@ log "gopkg.in/inconshreveable/log15.v2" ) -var storageLvmDefaultThinLVSize = "10GiB" -var storageLvmDefaultThinPoolName = "LXDPool" - func storageLVMCheckVolumeGroup(vgName string) error { output, err := exec.Command("vgdisplay", "-s", vgName).CombinedOutput() if err != nil { - shared.Log.Debug("vgdisplay failed to find vg", log.Ctx{"output": string(output)}) + shared.LogDebug("vgdisplay failed to find vg", log.Ctx{"output": string(output)}) return fmt.Errorf("LVM volume group '%s' not found", vgName) } @@ -54,18 +50,8 @@ func storageLVMGetThinPoolUsers(d *Daemon) ([]string, error) { results := []string{} - vgname, err := d.ConfigValueGet("storage.lvm_vg_name") - if err != nil { - return results, fmt.Errorf("Error getting lvm_vg_name config") - } - if vgname == "" { - return results, nil - } - poolname, err := d.ConfigValueGet("storage.lvm_thinpool_name") - if err != nil { - return results, fmt.Errorf("Error getting lvm_thinpool_name config") - } - if poolname == "" { + + if daemonConfig["storage.lvm_vg_name"].Get() == "" { return results, nil } @@ -73,6 +59,7 @@ if err != nil { return results, err } + for _, cName := range cNames { var lvLinkPath string if strings.Contains(cName, shared.SnapshotDelimiter) { @@ -101,72 +88,52 @@ return results, nil } -func storageLVMSetThinPoolNameConfig(d *Daemon, poolname string) error { +func storageLVMValidateThinPoolName(d *Daemon, key string, value string) error { users, err := storageLVMGetThinPoolUsers(d) if err != nil { return fmt.Errorf("Error checking if a pool is already in use: %v", err) } + if len(users) > 0 { return fmt.Errorf("Can not change LVM config. Images or containers are still using LVs: %v", users) } - vgname, err := d.ConfigValueGet("storage.lvm_vg_name") - if err != nil { - return fmt.Errorf("Error getting lvm_vg_name config: %v", err) - } - - if poolname != "" { + vgname := daemonConfig["storage.lvm_vg_name"].Get() + if value != "" { if vgname == "" { return fmt.Errorf("Can not set lvm_thinpool_name without lvm_vg_name set.") } - poolExists, err := storageLVMThinpoolExists(vgname, poolname) + poolExists, err := storageLVMThinpoolExists(vgname, value) if err != nil { - return fmt.Errorf("Error checking for thin pool '%s' in '%s': %v", poolname, vgname, err) + return fmt.Errorf("Error checking for thin pool '%s' in '%s': %v", value, vgname, err) } + if !poolExists { - return fmt.Errorf("Pool '%s' does not exist in Volume Group '%s'", poolname, vgname) + return fmt.Errorf("Pool '%s' does not exist in Volume Group '%s'", value, vgname) } } - err = d.ConfigValueSet("storage.lvm_thinpool_name", poolname) - if err != nil { - return err - } - return nil } -func storageLVMSetVolumeGroupNameConfig(d *Daemon, vgname string) error { +func storageLVMValidateVolumeGroupName(d *Daemon, key string, value string) error { users, err := storageLVMGetThinPoolUsers(d) if err != nil { return fmt.Errorf("Error checking if a pool is already in use: %v", err) } + if len(users) > 0 { return fmt.Errorf("Can not change LVM config. Images or containers are still using LVs: %v", users) } - if vgname != "" { - err = storageLVMCheckVolumeGroup(vgname) + if value != "" { + err = storageLVMCheckVolumeGroup(value) if err != nil { return err } } - err = d.ConfigValueSet("storage.lvm_vg_name", vgname) - if err != nil { - return err - } - - return nil -} - -func storageLVMSetFsTypeConfig(d *Daemon, fstype string) error { - err := d.ConfigValueSet("storage.lvm_fstype", fstype) - if err != nil { - return err - } - return nil } @@ -220,10 +187,7 @@ } if config["vgName"] == nil { - vgName, err := s.d.ConfigValueGet("storage.lvm_vg_name") - if err != nil { - return s, fmt.Errorf("Error checking server config: %v", err) - } + vgName := daemonConfig["storage.lvm_vg_name"].Get() if vgName == "" { return s, fmt.Errorf("LVM isn't enabled") } @@ -337,14 +301,7 @@ return fmt.Errorf("Error creating container directory: %v", err) } - var mode os.FileMode - if container.IsPrivileged() { - mode = 0700 - } else { - mode = 0755 - } - - err = os.Chmod(destPath, mode) + err = os.Chmod(destPath, 0700) if err != nil { return err } @@ -355,17 +312,8 @@ return err } - var fstype string - fstype, err = s.d.ConfigValueGet("storage.lvm_fstype") - if err != nil { - return fmt.Errorf("Error checking server config, err=%v", err) - } - - if fstype == "" { - fstype = "ext4" - } - // Generate a new xfs's UUID + fstype := daemonConfig["storage.lvm_fstype"].Get() if fstype == "xfs" { err := xfsGenerateNewUUID(lvpath) if err != nil { @@ -374,15 +322,28 @@ } } - err = s.tryMount(lvpath, destPath, fstype, 0, "discard") + mountOptions := daemonConfig["storage.lvm_mount_options"].Get() + err = tryMount(lvpath, destPath, fstype, 0, mountOptions) if err != nil { s.ContainerDelete(container) return fmt.Errorf("Error mounting snapshot LV: %v", err) } + var mode os.FileMode + if container.IsPrivileged() { + mode = 0700 + } else { + mode = 0755 + } + + err = os.Chmod(destPath, mode) + if err != nil { + return err + } + if !container.IsPrivileged() { if err = s.shiftRootfs(container); err != nil { - err2 := s.tryUnmount(destPath, 0) + err2 := tryUnmount(destPath, 0) if err2 != nil { return fmt.Errorf("Error in umount: '%s' while cleaning up after error in shiftRootfs: '%s'", err2, err) } @@ -397,7 +358,7 @@ log.Ctx{"err": err}) } - umounterr := s.tryUnmount(destPath, 0) + umounterr := tryUnmount(destPath, 0) if umounterr != nil { return fmt.Errorf("Error unmounting '%s' after shiftRootfs: %v", destPath, umounterr) } @@ -468,16 +429,10 @@ func (s *storageLvm) ContainerStart(container container) error { lvName := containerNameToLVName(container.Name()) lvpath := fmt.Sprintf("/dev/%s/%s", s.vgName, lvName) - fstype, err := s.d.ConfigValueGet("storage.lvm_fstype") - if err != nil { - return fmt.Errorf("Error checking server config, err=%v", err) - } - - if fstype == "" { - fstype = "ext4" - } + fstype := daemonConfig["storage.lvm_fstype"].Get() - err = s.tryMount(lvpath, container.Path(), fstype, 0, "discard") + mountOptions := daemonConfig["storage.lvm_mount_options"].Get() + err := tryMount(lvpath, container.Path(), fstype, 0, mountOptions) if err != nil { return fmt.Errorf( "Error mounting snapshot LV path='%s': %v", @@ -489,7 +444,7 @@ } func (s *storageLvm) ContainerStop(container container) error { - err := s.tryUnmount(container.Path(), 0) + err := tryUnmount(container.Path(), 0) if err != nil { return fmt.Errorf( "failed to unmount container path '%s'.\nError: %v", @@ -500,60 +455,6 @@ return nil } -func (s *storageLvm) tryExec(name string, arg ...string) ([]byte, error) { - var err error - var output []byte - - for i := 0; i < 20; i++ { - output, err = exec.Command(name, arg...).CombinedOutput() - if err == nil { - break - } - - time.Sleep(500 * time.Millisecond) - } - - return output, err -} - -func (s *storageLvm) tryMount(src string, dst string, fs string, flags uintptr, options string) error { - var err error - - for i := 0; i < 20; i++ { - err = syscall.Mount(src, dst, fs, flags, options) - if err == nil { - break - } - - time.Sleep(500 * time.Millisecond) - } - - if err != nil { - return err - } - - return nil -} - -func (s *storageLvm) tryUnmount(path string, flags int) error { - var err error - - for i := 0; i < 20; i++ { - err = syscall.Unmount(path, flags) - if err == nil { - break - } - - time.Sleep(500 * time.Millisecond) - } - - if err != nil { - return err - } - - return nil -} - func (s *storageLvm) ContainerRename( container container, newContainerName string) error { @@ -658,7 +559,7 @@ srcName := containerNameToLVName(sourceContainer.Name()) destName := containerNameToLVName(snapshotContainer.Name()) - shared.Log.Debug( + shared.LogDebug( "Creating snapshot", log.Ctx{"srcName": srcName, "destName": destName}) @@ -750,7 +651,7 @@ srcName := containerNameToLVName(container.Name()) destName := containerNameToLVName(container.Name() + "/rw") - shared.Log.Debug( + shared.LogDebug( "Creating snapshot", log.Ctx{"srcName": srcName, "destName": destName}) @@ -766,17 +667,8 @@ } } - var fstype string - fstype, err = s.d.ConfigValueGet("storage.lvm_fstype") - if err != nil { - return fmt.Errorf("Error checking server config, err=%v", err) - } - - if fstype == "" { - fstype = "ext4" - } - // Generate a new xfs's UUID + fstype := daemonConfig["storage.lvm_fstype"].Get() if fstype == "xfs" { err := xfsGenerateNewUUID(lvpath) if err != nil { @@ -785,7 +677,8 @@ } } - err = s.tryMount(lvpath, container.Path(), fstype, 0, "discard") + mountOptions := daemonConfig["storage.lvm_mount_options"].Get() + err = tryMount(lvpath, container.Path(), fstype, 0, mountOptions) if err != nil { return fmt.Errorf( "Error mounting snapshot LV path='%s': %v", @@ -839,39 +732,35 @@ } }() - var fstype string - fstype, err = s.d.ConfigValueGet("storage.lvm_fstype") + fstype := daemonConfig["storage.lvm_fstype"].Get() + mountOptions := daemonConfig["storage.lvm_mount_options"].Get() + err = tryMount(lvpath, tempLVMountPoint, fstype, 0, mountOptions) if err != nil { - return fmt.Errorf("Error checking server config, err=%v", err) - } - - if fstype == "" { - fstype = "ext4" - } - - err = s.tryMount(lvpath, tempLVMountPoint, fstype, 0, "discard") - if err != nil { - shared.Logf("Error mounting image LV for untarring: %v", err) + shared.LogInfof("Error mounting image LV for unpacking: %v", err) return fmt.Errorf("Error mounting image LV: %v", err) - } - untarErr := untarImage(finalName, tempLVMountPoint) + unpackErr := unpackImage(s.d, finalName, tempLVMountPoint) - err = s.tryUnmount(tempLVMountPoint, 0) + err = tryUnmount(tempLVMountPoint, 0) if err != nil { s.log.Warn("could not unmount LV. Will not remove", log.Ctx{"lvpath": lvpath, "mountpoint": tempLVMountPoint, "err": err}) - if untarErr == nil { + if unpackErr == nil { return err } return fmt.Errorf( "Error unmounting '%s' during cleanup of error %v", - tempLVMountPoint, untarErr) + tempLVMountPoint, unpackErr) + } + + if unpackErr != nil { + s.removeLV(fingerprint) + return unpackErr } - return untarErr + return nil } func (s *storageLvm) ImageDelete(fingerprint string) error { @@ -892,78 +781,97 @@ } func (s *storageLvm) createDefaultThinPool() (string, error) { - // Create a tiny 1G thinpool - output, err := s.tryExec( - "lvcreate", - "--poolmetadatasize", "1G", - "-L", "1G", - "--thinpool", - fmt.Sprintf("%s/%s", s.vgName, storageLvmDefaultThinPoolName)) + thinPoolName := daemonConfig["storage.lvm_thinpool_name"].Get() + isRecent, err := s.lvmVersionIsAtLeast("2.02.99") + if err != nil { + return "", fmt.Errorf("Error checking LVM version: %v", err) + } + + // Create the thin pool + var output []byte + if isRecent { + output, err = tryExec( + "lvcreate", + "--poolmetadatasize", "1G", + "-l", "100%FREE", + "--thinpool", + fmt.Sprintf("%s/%s", s.vgName, thinPoolName)) + } else { + output, err = tryExec( + "lvcreate", + "--poolmetadatasize", "1G", + "-L", "1G", + "--thinpool", + fmt.Sprintf("%s/%s", s.vgName, thinPoolName)) + } if err != nil { s.log.Error( "Could not create thin pool", log.Ctx{ - "name": storageLvmDefaultThinPoolName, + "name": thinPoolName, "err": err, "output": string(output)}) return "", fmt.Errorf( - "Could not create LVM thin pool named %s", storageLvmDefaultThinPoolName) + "Could not create LVM thin pool named %s", thinPoolName) } - // Grow it to the maximum VG size (two step process required by old LVM) - output, err = s.tryExec( - "lvextend", - "--alloc", "anywhere", - "-l", "100%FREE", - fmt.Sprintf("%s/%s", s.vgName, storageLvmDefaultThinPoolName)) + if !isRecent { + // Grow it to the maximum VG size (two step process required by old LVM) + output, err = tryExec( + "lvextend", + "--alloc", "anywhere", + "-l", "100%FREE", + fmt.Sprintf("%s/%s", s.vgName, thinPoolName)) - if err != nil { - s.log.Error( - "Could not grow thin pool", - log.Ctx{ - "name": storageLvmDefaultThinPoolName, - "err": err, - "output": string(output)}) + if err != nil { + s.log.Error( + "Could not grow thin pool", + log.Ctx{ + "name": thinPoolName, + "err": err, + "output": string(output)}) - return "", fmt.Errorf( - "Could not grow LVM thin pool named %s", storageLvmDefaultThinPoolName) + return "", fmt.Errorf( + "Could not grow LVM thin pool named %s", thinPoolName) + } } - return storageLvmDefaultThinPoolName, nil + return thinPoolName, nil } func (s *storageLvm) createThinLV(lvname string) (string, error) { - poolname, err := s.d.ConfigValueGet("storage.lvm_thinpool_name") + var err error + + vgname := daemonConfig["storage.lvm_vg_name"].Get() + poolname := daemonConfig["storage.lvm_thinpool_name"].Get() + exists, err := storageLVMThinpoolExists(vgname, poolname) if err != nil { - return "", fmt.Errorf("Error checking server config, err=%v", err) + return "", err } - if poolname == "" { + if !exists { poolname, err = s.createDefaultThinPool() if err != nil { return "", fmt.Errorf("Error creating LVM thin pool: %v", err) } - err = storageLVMSetThinPoolNameConfig(s.d, poolname) + + err = storageLVMValidateThinPoolName(s.d, "", poolname) if err != nil { s.log.Error("Setting thin pool name", log.Ctx{"err": err}) return "", fmt.Errorf("Error setting LVM thin pool config: %v", err) } } - lvSize := os.Getenv("LXD_LVM_LVSIZE") - if lvSize == "" { - lvSize = storageLvmDefaultThinLVSize - } + lvSize := daemonConfig["storage.lvm_volume_size"].Get() - output, err := s.tryExec( + output, err := tryExec( "lvcreate", "--thin", "-n", lvname, "--virtualsize", lvSize, fmt.Sprintf("%s/%s", s.vgName, poolname)) - if err != nil { s.log.Error("Could not create LV", log.Ctx{"lvname": lvname, "output": string(output)}) return "", fmt.Errorf("Could not create thin LV named %s", lvname) @@ -971,23 +879,22 @@ lvpath := fmt.Sprintf("/dev/%s/%s", s.vgName, lvname) - fstype, err := s.d.ConfigValueGet("storage.lvm_fstype") - + fstype := daemonConfig["storage.lvm_fstype"].Get() switch fstype { case "xfs": - output, err = s.tryExec( + output, err = tryExec( "mkfs.xfs", lvpath) default: // default = ext4 - output, err = s.tryExec( + output, err = tryExec( "mkfs.ext4", "-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0", lvpath) } if err != nil { - s.log.Error("mkfs.ext4", log.Ctx{"output": string(output)}) + s.log.Error("Filesystem creation failed", log.Ctx{"output": string(output)}) return "", fmt.Errorf("Error making filesystem on image LV: %v", err) } @@ -998,7 +905,7 @@ var err error var output []byte - output, err = s.tryExec( + output, err = tryExec( "lvremove", "-f", fmt.Sprintf("%s/%s", s.vgName, lvname)) if err != nil { @@ -1017,13 +924,13 @@ } var output []byte if isRecent { - output, err = s.tryExec( + output, err = tryExec( "lvcreate", "-kn", "-n", lvname, "-s", fmt.Sprintf("/dev/%s/%s", s.vgName, origlvname)) } else { - output, err = s.tryExec( + output, err = tryExec( "lvcreate", "-n", lvname, "-s", fmt.Sprintf("/dev/%s/%s", s.vgName, origlvname)) @@ -1036,9 +943,9 @@ snapshotFullName := fmt.Sprintf("/dev/%s/%s", s.vgName, lvname) if readonly { - output, err = s.tryExec("lvchange", "-ay", "-pr", snapshotFullName) + output, err = tryExec("lvchange", "-ay", "-pr", snapshotFullName) } else { - output, err = s.tryExec("lvchange", "-ay", snapshotFullName) + output, err = tryExec("lvchange", "-ay", snapshotFullName) } if err != nil { @@ -1053,7 +960,7 @@ } func (s *storageLvm) renameLV(oldName string, newName string) (string, error) { - output, err := s.tryExec("lvrename", s.vgName, oldName, newName) + output, err := tryExec("lvrename", s.vgName, oldName, newName) return string(output), err } @@ -1061,10 +968,14 @@ return MigrationFSType_RSYNC } +func (s *storageLvm) PreservesInodes() bool { + return false +} + func (s *storageLvm) MigrationSource(container container) (MigrationStorageSourceDriver, error) { return rsyncMigrationSource(container) } -func (s *storageLvm) MigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error { - return rsyncMigrationSink(live, container, snapshots, conn) +func (s *storageLvm) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error { + return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage_test.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage_test.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage_test.go 2016-10-13 14:31:53.000000000 +0000 @@ -5,6 +5,8 @@ "github.com/gorilla/websocket" + "github.com/lxc/lxd/shared" + log "gopkg.in/inconshreveable/log15.v2" ) @@ -131,9 +133,13 @@ return MigrationFSType_RSYNC } +func (s *storageMock) PreservesInodes() bool { + return false +} + func (s *storageMock) MigrationSource(container container) (MigrationStorageSourceDriver, error) { return nil, fmt.Errorf("not implemented") } -func (s *storageMock) MigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error { +func (s *storageMock) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error { return nil } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage_zfs.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage_zfs.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/storage_zfs.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/storage_zfs.go 2016-10-13 14:31:53.000000000 +0000 @@ -8,7 +8,6 @@ "strconv" "strings" "syscall" - "time" "github.com/gorilla/websocket" @@ -35,11 +34,7 @@ } if config["zfsPool"] == nil { - zfsPool, err := s.d.ConfigValueGet("storage.zfs_pool_name") - if err != nil { - return s, fmt.Errorf("Error checking server config: %v", err) - } - + zfsPool := daemonConfig["storage.zfs_pool_name"].Get() if zfsPool == "" { return s, fmt.Errorf("ZFS isn't enabled") } @@ -79,11 +74,30 @@ return s, fmt.Errorf("The 'zfs' tool isn't working properly") } + output, err = exec.Command("zfs", "get", "mountpoint", "-H", "-o", "source", s.zfsPool).CombinedOutput() + if err != nil { + return s, fmt.Errorf("Unable to query ZFS mountpoint") + } + + if strings.TrimSpace(string(output)) != "local" { + err = shared.RunCommand("zfs", "set", "mountpoint=none", s.zfsPool) + if err != nil { + return s, err + } + } + return s, nil } // Things we don't need to care about func (s *storageZfs) ContainerStart(container container) error { + fs := fmt.Sprintf("containers/%s", container.Name()) + + // Just in case the container filesystem got unmounted + if !shared.IsMountPoint(shared.VarPath(fs)) { + s.zfsMount(fs) + } + return nil } @@ -168,17 +182,17 @@ } func (s *storageZfs) ContainerCanRestore(container container, sourceContainer container) error { - fields := strings.SplitN(sourceContainer.Name(), shared.SnapshotDelimiter, 2) - cName := fields[0] - snapName := fmt.Sprintf("snapshot-%s", fields[1]) - - snapshots, err := s.zfsListSnapshots(fmt.Sprintf("containers/%s", cName)) + snaps, err := container.Snapshots() if err != nil { return err } - if snapshots[len(snapshots)-1] != snapName { - return fmt.Errorf("ZFS only supports restoring state to the latest snapshot.") + if snaps[len(snaps)-1].Name() != sourceContainer.Name() { + if !daemonConfig["storage.zfs_remove_snapshots"].GetBool() { + return fmt.Errorf("ZFS can only restore from the latest snapshot. Delete newer snapshots or copy the snapshot into a new container instead.") + } + + return nil } return nil @@ -187,61 +201,63 @@ func (s *storageZfs) ContainerDelete(container container) error { fs := fmt.Sprintf("containers/%s", container.Name()) - removable := true - snaps, err := s.zfsListSnapshots(fs) - if err != nil { - return err - } - - for _, snap := range snaps { - var err error - removable, err = s.zfsSnapshotRemovable(fs, snap) + if s.zfsExists(fs) { + removable := true + snaps, err := s.zfsListSnapshots(fs) if err != nil { return err } - if !removable { - break - } - } + for _, snap := range snaps { + var err error + removable, err = s.zfsSnapshotRemovable(fs, snap) + if err != nil { + return err + } - if removable { - origin, err := s.zfsGet(fs, "origin") - if err != nil { - return err + if !removable { + break + } } - origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.zfsPool)) - err = s.zfsDestroy(fs) - if err != nil { - return err - } + if removable { + origin, err := s.zfsGet(fs, "origin") + if err != nil { + return err + } + origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.zfsPool)) - err = s.zfsCleanup(origin) - if err != nil { - return err - } - } else { - err := s.zfsSet(fs, "mountpoint", "none") - if err != nil { - return err - } + err = s.zfsDestroy(fs) + if err != nil { + return err + } - err = s.zfsRename(fs, fmt.Sprintf("deleted/containers/%s", uuid.NewRandom().String())) - if err != nil { - return err + err = s.zfsCleanup(origin) + if err != nil { + return err + } + } else { + err := s.zfsSet(fs, "mountpoint", "none") + if err != nil { + return err + } + + err = s.zfsRename(fs, fmt.Sprintf("deleted/containers/%s", uuid.NewRandom().String())) + if err != nil { + return err + } } } if shared.PathExists(shared.VarPath(fs)) { - os.Remove(shared.VarPath(fs)) + err := os.Remove(shared.VarPath(fs)) if err != nil { return err } } if shared.PathExists(shared.VarPath(fs) + ".zfs") { - os.Remove(shared.VarPath(fs) + ".zfs") + err := os.Remove(shared.VarPath(fs) + ".zfs") if err != nil { return err } @@ -320,23 +336,6 @@ return container.TemplateApply("copy") } -func (s *storageZfs) zfsMounted(path string) bool { - output, err := exec.Command("zfs", "mount").CombinedOutput() - if err != nil { - shared.Log.Error("error listing zfs mounts", "err", output) - return false - } - - for _, line := range strings.Split(string(output), "\n") { - zfsName := strings.Split(line, " ")[0] - if zfsName == fmt.Sprintf("%s/%s", s.zfsPool, path) { - return true - } - } - - return false -} - func (s *storageZfs) ContainerRename(container container, newName string) error { oldName := container.Name() @@ -359,17 +358,9 @@ } // In case ZFS didn't mount the filesystem, do it ourselves - if !shared.PathExists(shared.VarPath(fmt.Sprintf("containers/%s.zfs", newName))) { - for i := 0; i < 20; i++ { - err = s.zfsMount(fmt.Sprintf("containers/%s", newName)) - if err == nil { - break - } - time.Sleep(500 * time.Millisecond) - } - if err != nil { - return err - } + err = s.zfsMount(fmt.Sprintf("containers/%s", newName)) + if err != nil { + return err } // In case the change of mountpoint didn't remove the old path, do it ourselves @@ -404,11 +395,29 @@ } func (s *storageZfs) ContainerRestore(container container, sourceContainer container) error { + // Remove any needed snapshot + snaps, err := container.Snapshots() + if err != nil { + return err + } + + for i := len(snaps) - 1; i != 0; i-- { + if snaps[i].Name() == sourceContainer.Name() { + break + } + + err := snaps[i].Delete() + if err != nil { + return err + } + } + + // Restore the snapshot fields := strings.SplitN(sourceContainer.Name(), shared.SnapshotDelimiter, 2) cName := fields[0] snapName := fmt.Sprintf("snapshot-%s", fields[1]) - err := s.zfsSnapshotRestore(fmt.Sprintf("containers/%s", cName), snapName) + err = s.zfsSnapshotRestore(fmt.Sprintf("containers/%s", cName), snapName) if err != nil { return err } @@ -421,10 +430,15 @@ fs := fmt.Sprintf("containers/%s", container.Name()) + property := "quota" + if daemonConfig["storage.zfs_use_refquota"].GetBool() { + property = "refquota" + } + if size > 0 { - err = s.zfsSet(fs, "quota", fmt.Sprintf("%d", size)) + err = s.zfsSet(fs, property, fmt.Sprintf("%d", size)) } else { - err = s.zfsSet(fs, "quota", "none") + err = s.zfsSet(fs, property, "none") } if err != nil { @@ -439,7 +453,12 @@ fs := fmt.Sprintf("containers/%s", container.Name()) - value, err := s.zfsGet(fs, "used") + property := "used" + if daemonConfig["storage.zfs_use_refquota"].GetBool() { + property = "usedbydataset" + } + + value, err := s.zfsGet(fs, property) if err != nil { return -1, err } @@ -482,27 +501,32 @@ cName := fields[0] snapName := fmt.Sprintf("snapshot-%s", fields[1]) - removable, err := s.zfsSnapshotRemovable(fmt.Sprintf("containers/%s", cName), snapName) - if removable { - err = s.zfsSnapshotDestroy(fmt.Sprintf("containers/%s", cName), snapName) - if err != nil { - return err + if s.zfsExists(fmt.Sprintf("containers/%s@%s", cName, snapName)) { + removable, err := s.zfsSnapshotRemovable(fmt.Sprintf("containers/%s", cName), snapName) + if removable { + err = s.zfsSnapshotDestroy(fmt.Sprintf("containers/%s", cName), snapName) + if err != nil { + return err + } + } else { + err = s.zfsSnapshotRename(fmt.Sprintf("containers/%s", cName), snapName, fmt.Sprintf("copy-%s", uuid.NewRandom().String())) + if err != nil { + return err + } } - } else { - err = s.zfsSnapshotRename(fmt.Sprintf("containers/%s", cName), snapName, fmt.Sprintf("copy-%s", uuid.NewRandom().String())) + } + + snapPath := shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", cName, fields[1])) + if shared.PathExists(snapPath) { + err := os.Remove(snapPath) if err != nil { return err } } - err = os.Remove(shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", cName, fields[1]))) - if err != nil { - return err - } - parent := shared.VarPath(fmt.Sprintf("snapshots/%s", cName)) if ok, _ := shared.PathIsEmpty(parent); ok { - err = os.Remove(parent) + err := os.Remove(parent) if err != nil { return err } @@ -560,6 +584,7 @@ if len(fields) < 2 { return fmt.Errorf("Invalid snapshot name: %s", container.Name()) } + cName := fields[0] sName := fields[1] sourceFs := fmt.Sprintf("containers/%s", cName) @@ -622,19 +647,31 @@ return err } - err = untarImage(imagePath, subvol) - if err != nil { + cleanup := func(err error) error { + if zerr := s.zfsDestroy(fs); zerr != nil { + err = fmt.Errorf("%s During cleanup: %s", err, zerr) + } + if shared.PathExists(subvol) { + if oserr := os.Remove(subvol); oserr != nil { + err = fmt.Errorf("%s During cleanup: Failed to remove sub-volume %s, %s", err, subvol, oserr) + } + } return err } + err = unpackImage(s.d, imagePath, subvol) + if err != nil { + return cleanup(err) + } + err = s.zfsSet(fs, "readonly", "on") if err != nil { - return err + return cleanup(err) } err = s.zfsSnapshotCreate(fs, "readonly") if err != nil { - return err + return cleanup(err) } return nil @@ -643,32 +680,37 @@ func (s *storageZfs) ImageDelete(fingerprint string) error { fs := fmt.Sprintf("images/%s", fingerprint) - removable, err := s.zfsSnapshotRemovable(fs, "readonly") - - if err != nil { - return err - } - - if removable { - err := s.zfsDestroy(fs) + if s.zfsExists(fs) { + removable, err := s.zfsSnapshotRemovable(fs, "readonly") if err != nil { return err } - } else { - err := s.zfsSet(fs, "mountpoint", "none") - if err != nil { - return err + + if removable { + err := s.zfsDestroy(fs) + if err != nil { + return err + } + } else { + err := s.zfsSet(fs, "mountpoint", "none") + if err != nil { + return err + } + + err = s.zfsRename(fs, fmt.Sprintf("deleted/%s", fs)) + if err != nil { + return err + } } + } - err = s.zfsRename(fs, fmt.Sprintf("deleted/%s", fs)) + if shared.PathExists(shared.VarPath(fs + ".zfs")) { + err := os.Remove(shared.VarPath(fs + ".zfs")) if err != nil { return err } } - if shared.PathExists(shared.VarPath(fs + ".zfs")) { - os.Remove(shared.VarPath(fs + ".zfs")) - } return nil } @@ -775,19 +817,11 @@ } // Due to open fds or kernel refs, this may fail for a bit, give it 10s - var output []byte - for i := 0; i < 20; i++ { - output, err = exec.Command( - "zfs", - "destroy", - "-r", - fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput() - - if err == nil { - break - } - time.Sleep(500 * time.Millisecond) - } + output, err := tryExec( + "zfs", + "destroy", + "-r", + fmt.Sprintf("%s/%s", s.zfsPool, path)) if err != nil { s.log.Error("zfs destroy failed", log.Ctx{"output": string(output)}) @@ -799,29 +833,64 @@ func (s *storageZfs) zfsCleanup(path string) error { if strings.HasPrefix(path, "deleted/") { + // Cleanup of filesystems kept for refcount reason removablePath, err := s.zfsSnapshotRemovable(path, "") if err != nil { return err } + // Confirm that there are no more clones if removablePath { - subPath := strings.SplitN(path, "@", 2)[0] - - origin, err := s.zfsGet(subPath, "origin") - if err != nil { - return err - } - origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.zfsPool)) - - err = s.zfsDestroy(subPath) - if err != nil { - return err + if strings.Contains(path, "@") { + // Cleanup snapshots + err = s.zfsDestroy(path) + if err != nil { + return err + } + + // Check if the parent can now be deleted + subPath := strings.SplitN(path, "@", 2)[0] + snaps, err := s.zfsListSnapshots(subPath) + if err != nil { + return err + } + + if len(snaps) == 0 { + err := s.zfsCleanup(subPath) + if err != nil { + return err + } + } + } else { + // Cleanup filesystems + origin, err := s.zfsGet(path, "origin") + if err != nil { + return err + } + origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.zfsPool)) + + err = s.zfsDestroy(path) + if err != nil { + return err + } + + // Attempt to remove its parent + if origin != "-" { + err := s.zfsCleanup(origin) + if err != nil { + return err + } + } } - s.zfsCleanup(origin) - return nil } + } else if strings.HasPrefix(path, "containers") && strings.Contains(path, "@copy-") { + // Just remove the copy- snapshot for copies of active containers + err := s.zfsDestroy(path) + if err != nil { + return err + } } return nil @@ -853,26 +922,13 @@ return strings.TrimRight(string(output), "\n"), nil } -func (s *storageZfs) zfsMount(path string) error { - output, err := exec.Command( - "zfs", - "mount", - fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput() - if err != nil { - s.log.Error("zfs mount failed", log.Ctx{"output": string(output)}) - return fmt.Errorf("Failed to mount ZFS filesystem: %s", output) - } - - return nil -} - func (s *storageZfs) zfsRename(source string, dest string) error { - output, err := exec.Command( + output, err := tryExec( "zfs", "rename", "-p", fmt.Sprintf("%s/%s", s.zfsPool, source), - fmt.Sprintf("%s/%s", s.zfsPool, dest)).CombinedOutput() + fmt.Sprintf("%s/%s", s.zfsPool, dest)) if err != nil { if s.zfsExists(source) || !s.zfsExists(dest) { s.log.Error("zfs rename failed", log.Ctx{"output": string(output)}) @@ -926,10 +982,10 @@ } func (s *storageZfs) zfsSnapshotRestore(path string, name string) error { - output, err := exec.Command( + output, err := tryExec( "zfs", "rollback", - fmt.Sprintf("%s/%s@%s", s.zfsPool, path, name)).CombinedOutput() + fmt.Sprintf("%s/%s@%s", s.zfsPool, path, name)) if err != nil { s.log.Error("zfs rollback failed", log.Ctx{"output": string(output)}) return fmt.Errorf("Failed to restore ZFS snapshot: %s", output) @@ -950,10 +1006,10 @@ continue } - output, err := exec.Command( + output, err := tryExec( "zfs", "rollback", - fmt.Sprintf("%s/%s@%s", s.zfsPool, sub, name)).CombinedOutput() + fmt.Sprintf("%s/%s@%s", s.zfsPool, sub, name)) if err != nil { s.log.Error("zfs rollback failed", log.Ctx{"output": string(output)}) return fmt.Errorf("Failed to restore ZFS sub-volume snapshot: %s", output) @@ -978,11 +1034,24 @@ return nil } +func (s *storageZfs) zfsMount(path string) error { + output, err := tryExec( + "zfs", + "mount", + fmt.Sprintf("%s/%s", s.zfsPool, path)) + if err != nil { + s.log.Error("zfs mount failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to mount ZFS filesystem: %s", output) + } + + return nil +} + func (s *storageZfs) zfsUnmount(path string) error { - output, err := exec.Command( + output, err := tryExec( "zfs", "unmount", - fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput() + fmt.Sprintf("%s/%s", s.zfsPool, path)) if err != nil { s.log.Error("zfs unmount failed", log.Ctx{"output": string(output)}) return fmt.Errorf("Failed to unmount ZFS filesystem: %s", output) @@ -1099,6 +1168,14 @@ users := []string{} for _, subvol := range subvols { + path := strings.Split(subvol, "/") + + // Only care about plausible LXD paths + if !shared.StringInSlice(path[0], exceptions) { + continue + } + + // Ignore empty paths if shared.StringInSlice(subvol, exceptions) { continue } @@ -1110,7 +1187,7 @@ } // Global functions -func storageZFSSetPoolNameConfig(d *Daemon, poolname string) error { +func storageZFSValidatePoolName(d *Daemon, key string, value string) error { s := storageZfs{} // Confirm the backend is working @@ -1120,20 +1197,26 @@ } // Confirm the new pool exists and is compatible - if poolname != "" { - err = s.zfsCheckPool(poolname) + if value != "" { + err = s.zfsCheckPool(value) if err != nil { return fmt.Errorf("Invalid ZFS pool: %v", err) } } - // Check if we're switching pools - oldPoolname, err := d.ConfigValueGet("storage.zfs_pool_name") + // Confirm that the new pool is empty + s.zfsPool = value + subvols, err := s.zfsListSubvolumes("") if err != nil { return err } + if len(subvols) > 0 { + return fmt.Errorf("Provided ZFS pool (or dataset) isn't empty") + } + // Confirm the old pool isn't in use anymore + oldPoolname := daemonConfig["storage.zfs_pool_name"].Get() if oldPoolname != "" { s.zfsPool = oldPoolname @@ -1146,13 +1229,6 @@ return fmt.Errorf("Can not change ZFS config. Images or containers are still using the ZFS pool: %v", users) } } - s.zfsPool = poolname - - // All good, set the new pool name - err = d.ConfigValueSet("storage.zfs_pool_name", poolname) - if err != nil { - return err - } return nil } @@ -1193,16 +1269,16 @@ return err } - <-shared.WebsocketSendStream(conn, stdout) + <-shared.WebsocketSendStream(conn, stdout, 4*1024*1024) output, err := ioutil.ReadAll(stderr) if err != nil { - shared.Log.Error("problem reading zfs send stderr", "err", err) + shared.LogError("problem reading zfs send stderr", log.Ctx{"err": err}) } err = cmd.Wait() if err != nil { - shared.Log.Error("problem with zfs send", "output", string(output)) + shared.LogError("problem with zfs send", log.Ctx{"output": string(output)}) } return err @@ -1218,7 +1294,6 @@ lastSnap := "" for i, snap := range s.zfsSnapshotNames { - prev := "" if i > 0 { prev = s.zfsSnapshotNames[i-1] @@ -1270,6 +1345,10 @@ return MigrationFSType_ZFS } +func (s *storageZfs) PreservesInodes() bool { + return true +} + func (s *storageZfs) MigrationSource(ct container) (MigrationStorageSourceDriver, error) { /* If the container is a snapshot, let's just send that; we don't need * to send anything else, because that's all the user asked for. @@ -1317,7 +1396,7 @@ return &driver, nil } -func (s *storageZfs) MigrationSink(live bool, container container, snapshots []container, conn *websocket.Conn) error { +func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error { zfsRecv := func(zfsName string) error { zfsFsName := fmt.Sprintf("%s/%s", s.zfsPool, zfsName) args := []string{"receive", "-F", "-u", zfsFsName} @@ -1341,12 +1420,12 @@ output, err := ioutil.ReadAll(stderr) if err != nil { - shared.Debugf("problem reading zfs recv stderr %s", "err", err) + shared.LogDebug("problem reading zfs recv stderr %s", log.Ctx{"err": err}) } err = cmd.Wait() if err != nil { - shared.Log.Error("problem with zfs recv", "output", string(output)) + shared.LogError("problem with zfs recv", log.Ctx{"output": string(output)}) } return err } @@ -1356,39 +1435,31 @@ * this fs (it's empty anyway) before we zfs recv. N.B. that `zfs recv` * of a snapshot also needs tha actual fs that it has snapshotted * unmounted, so we do this before receiving anything. - * - * Further, `zfs unmount` doesn't actually unmount things right away, - * so we ask /proc/self/mountinfo whether or not this path is mounted - * before continuing so that we're sure the fs is actually unmounted - * before doing a recv. */ zfsName := fmt.Sprintf("containers/%s", container.Name()) - fsPath := shared.VarPath(fmt.Sprintf("containers/%s.zfs", container.Name())) - for i := 0; i < 20; i++ { - if shared.IsMountPoint(fsPath) || s.zfsMounted(zfsName) { - if err := s.zfsUnmount(zfsName); err != nil { - shared.Log.Error("zfs umount error for", "path", zfsName, "err", err) - } - } else { - break - } - - time.Sleep(500 * time.Millisecond) + err := s.zfsUnmount(zfsName) + if err != nil { + return err } for _, snap := range snapshots { - fields := strings.SplitN(snap.Name(), shared.SnapshotDelimiter, 2) - name := fmt.Sprintf("containers/%s@snapshot-%s", fields[0], fields[1]) + args := snapshotProtobufToContainerArgs(container.Name(), snap) + _, err := containerCreateEmptySnapshot(container.Daemon(), args) + if err != nil { + return err + } + + name := fmt.Sprintf("containers/%s@snapshot-%s", container.Name(), snap.GetName()) if err := zfsRecv(name); err != nil { return err } - err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", fields[0])), 0700) + err = os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700) if err != nil { return err } - err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", fields[0], fields[1]))) + err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", container.Name(), snap.GetName()))) if err != nil { return err } @@ -1396,14 +1467,15 @@ defer func() { /* clean up our migration-send snapshots that we got from recv. */ - snapshots, err := s.zfsListSnapshots(fmt.Sprintf("containers/%s", container.Name())) + zfsSnapshots, err := s.zfsListSnapshots(fmt.Sprintf("containers/%s", container.Name())) if err != nil { - shared.Log.Error("failed listing snapshots post migration", "err", err) + shared.LogError("failed listing snapshots post migration", log.Ctx{"err": err}) return } - for _, snap := range snapshots { - if !strings.HasPrefix(snap, "migration-send") { + for _, snap := range zfsSnapshots { + // If we received a bunch of snapshots, remove the migration-send-* ones, if not, wipe any snapshot we got + if snapshots != nil && len(snapshots) > 0 && !strings.HasPrefix(snap, "migration-send") { continue } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/util.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd/util.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd/util.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd/util.go 2016-10-13 14:31:53.000000000 +0000 @@ -2,7 +2,9 @@ import ( "bytes" + "crypto/sha256" "encoding/json" + "fmt" "io" "net/http" @@ -27,3 +29,31 @@ return err } + +func etagHash(data interface{}) (string, error) { + etag := sha256.New() + err := json.NewEncoder(etag).Encode(data) + if err != nil { + return "", err + } + + return fmt.Sprintf("%x", etag.Sum(nil)), nil +} + +func etagCheck(r *http.Request, data interface{}) error { + match := r.Header.Get("If-Match") + if match == "" { + return nil + } + + hash, err := etagHash(data) + if err != nil { + return err + } + + if hash != match { + return fmt.Errorf("ETag doesn't match: %s vs %s", hash, match) + } + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd-bridge/lxd-bridge juju-core-2.0.0/src/github.com/lxc/lxd/lxd-bridge/lxd-bridge --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd-bridge/lxd-bridge 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd-bridge/lxd-bridge 1970-01-01 00:00:00.000000000 +0000 @@ -1,232 +0,0 @@ -#!/bin/sh -config="/etc/default/lxd-bridge" -varrun="/run/lxd-bridge/" -varlib="/var/lib/lxd-bridge/" - -# lxdbr0 defaults to only setting up the standard IPv6 link-local network -# to enable routable IPv4 and/or IPv6, please edit /etc/default/lxd - -# The values below are defaults -USE_LXD_BRIDGE="true" -LXD_BRIDGE="lxdbr0" -LXD_CONFILE="" -LXD_DOMAIN="" - -# IPv4 -LXD_IPV4_ADDR="" -LXD_IPV4_NETMASK="" -LXD_IPV4_NETWORK="" -LXD_IPV4_DHCP_RANGE="" -LXD_IPV4_DHCP_MAX="" -LXD_IPV4_NAT="false" - -# IPv6 -LXD_IPV6_ADDR="" -LXD_IPV6_MASK="" -LXD_IPV6_NETWORK="" -LXD_IPV6_NAT="false" -LXD_IPV6_PROXY="true" - -[ ! -f "${config}" ] || . "${config}" - -use_iptables_lock="-w" -iptables -w -L -n > /dev/null 2>&1 || use_iptables_lock="" - -_netmask2cidr () -{ - # Assumes there's no "255." after a non-255 byte in the mask - local x=${1##*255.} - set -- "0^^^128^192^224^240^248^252^254^" "$(( (${#1} - ${#x})*2 ))" "${x%%.*}" - x=${1%%${3}*} - echo $(( ${2} + (${#x}/4) )) -} - -ifdown() { - ip addr flush dev "${1}" - ip link set dev "${1}" down -} - -ifup() { - ip addr add fe80::1/64 dev "${1}" - if [ -n "${LXD_IPV4_NETMASK}" ] && [ -n "${LXD_IPV4_ADDR}" ]; then - MASK=$(_netmask2cidr ${LXD_IPV4_NETMASK}) - CIDR_ADDR="${LXD_IPV4_ADDR}/${MASK}" - ip addr add "${CIDR_ADDR}" dev "${1}" - fi - ip link set dev "${1}" up -} - -start() { - [ "x${USE_LXD_BRIDGE}" = "xtrue" ] || { exit 0; } - - [ ! -f "${varrun}/network_up" ] || { echo "lxd-bridge is already running"; exit 1; } - - if [ -d /sys/class/net/${LXD_BRIDGE} ]; then - stop force 2>/dev/null || true - fi - - FAILED=1 - - cleanup() { - set +e - if [ "${FAILED}" = "1" ]; then - echo "Failed to setup lxd-bridge." >&2 - stop force - fi - } - - trap cleanup EXIT HUP INT TERM - set -e - - # set up the lxd network - [ ! -d "/sys/class/net/${LXD_BRIDGE}" ] && ip link add dev "${LXD_BRIDGE}" type bridge - - echo 0 > "/proc/sys/net/ipv6/conf/${LXD_BRIDGE}/autoconf" || true - echo 0 > "/proc/sys/net/ipv6/conf/${LXD_BRIDGE}/accept_dad" || true - - # if we are run from systemd on a system with selinux enabled, - # the mkdir will create /run/lxd as init_var_run_t which dnsmasq - # can't write its pid into, so we restorecon it (to var_run_t) - if [ ! -d "${varrun}" ]; then - mkdir -p "${varrun}" - if which restorecon >/dev/null 2>&1; then - restorecon "${varrun}" - fi - fi - - if [ ! -d "${varlib}" ]; then - mkdir -p "${varlib}" - if which restorecon >/dev/null 2>&1; then - restorecon "${varlib}" - fi - fi - - ifup "${LXD_BRIDGE}" "${LXD_IPV4_ADDR}" "${LXD_IPV4_NETMASK}" - - LXD_IPV4_ARG="" - if [ -n "${LXD_IPV4_ADDR}" ] && [ -n "${LXD_IPV4_NETMASK}" ] && [ -n "${LXD_IPV4_NETWORK}" ]; then - echo 1 > /proc/sys/net/ipv4/ip_forward - if [ "${LXD_IPV4_NAT}" = "true" ]; then - iptables "${use_iptables_lock}" -t nat -A POSTROUTING -s "${LXD_IPV4_NETWORK}" ! -d "${LXD_IPV4_NETWORK}" -j MASQUERADE - fi - LXD_IPV4_ARG="--listen-address ${LXD_IPV4_ADDR} --dhcp-range ${LXD_IPV4_DHCP_RANGE} --dhcp-lease-max=${LXD_IPV4_DHCP_MAX}" - fi - - LXD_IPV6_ARG="" - if [ -n "${LXD_IPV6_ADDR}" ] && [ -n "${LXD_IPV6_MASK}" ] && [ -n "${LXD_IPV6_NETWORK}" ]; then - # IPv6 sysctls don't respect the "all" path... - for interface in /proc/sys/net/ipv6/conf/*; do - echo 2 > "${interface}/accept_ra" - done - - for interface in /proc/sys/net/ipv6/conf/*; do - echo 1 > "${interface}/forwarding" - done - - ip -6 addr add dev "${LXD_BRIDGE}" "${LXD_IPV6_ADDR}/${LXD_IPV6_MASK}" - if [ "${LXD_IPV6_NAT}" = "true" ]; then - ip6tables "${use_iptables_lock}" -t nat -A POSTROUTING -s "${LXD_IPV6_NETWORK}" ! -d "${LXD_IPV6_NETWORK}" -j MASQUERADE - fi - LXD_IPV6_ARG="--dhcp-range=${LXD_IPV6_ADDR},ra-only --listen-address ${LXD_IPV6_ADDR}" - fi - - iptables "${use_iptables_lock}" -I INPUT -i "${LXD_BRIDGE}" -p udp --dport 67 -j ACCEPT - iptables "${use_iptables_lock}" -I INPUT -i "${LXD_BRIDGE}" -p tcp --dport 67 -j ACCEPT - iptables "${use_iptables_lock}" -I INPUT -i "${LXD_BRIDGE}" -p udp --dport 53 -j ACCEPT - iptables "${use_iptables_lock}" -I INPUT -i "${LXD_BRIDGE}" -p tcp --dport 53 -j ACCEPT - iptables "${use_iptables_lock}" -I FORWARD -i "${LXD_BRIDGE}" -j ACCEPT - iptables "${use_iptables_lock}" -I FORWARD -o "${LXD_BRIDGE}" -j ACCEPT - iptables "${use_iptables_lock}" -t mangle -A POSTROUTING -o "${LXD_BRIDGE}" -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill - - LXD_DOMAIN_ARG="" - if [ -n "${LXD_DOMAIN}" ]; then - LXD_DOMAIN_ARG="-s ${LXD_DOMAIN} -S /${LXD_DOMAIN}/" - fi - - LXD_CONFILE_ARG="" - if [ -n "${LXD_CONFILE}" ]; then - LXD_CONFILE_ARG="--conf-file=${LXD_CONFILE}" - fi - - # https://lists.linuxcontainers.org/pipermail/lxc-devel/2014-October/010561.html - for DNSMASQ_USER in lxd-dnsmasq dnsmasq nobody - do - if getent passwd "${DNSMASQ_USER}" >/dev/null; then - break - fi - done - - if [ -n "${LXD_IPV4_ADDR}" ] || [ -n "${LXD_IPV6_ADDR}" ]; then - # shellcheck disable=SC2086 - dnsmasq ${LXD_CONFILE_ARG} ${LXD_DOMAIN_ARG} -u "${DNSMASQ_USER}" --strict-order --bind-interfaces --pid-file="${varrun}/dnsmasq.pid" --dhcp-no-override --except-interface=lo --interface="${LXD_BRIDGE}" --dhcp-leasefile="${varlib}/dnsmasq.${LXD_BRIDGE}.leases" --dhcp-authoritative ${LXD_IPV4_ARG} ${LXD_IPV6_ARG} || cleanup - fi - - if [ "${LXD_IPV6_PROXY}" = "true" ]; then - PATH="${PATH}:$(dirname "${0}")" lxd-bridge-proxy --addr="[fe80::1%${LXD_BRIDGE}]:3128" & - PID=$! - echo "${PID}" > "${varrun}/proxy.pid" - fi - - touch "${varrun}/network_up" - FAILED=0 -} - -stop() { - [ -f "${varrun}/network_up" ] || [ "${1}" = "force" ] || { echo "lxd-bridge isn't running"; exit 1; } - - if [ -d /sys/class/net/${LXD_BRIDGE} ]; then - ifdown ${LXD_BRIDGE} - iptables ${use_iptables_lock} -D INPUT -i ${LXD_BRIDGE} -p udp --dport 67 -j ACCEPT - iptables ${use_iptables_lock} -D INPUT -i ${LXD_BRIDGE} -p tcp --dport 67 -j ACCEPT - iptables ${use_iptables_lock} -D INPUT -i ${LXD_BRIDGE} -p udp --dport 53 -j ACCEPT - iptables ${use_iptables_lock} -D INPUT -i ${LXD_BRIDGE} -p tcp --dport 53 -j ACCEPT - iptables ${use_iptables_lock} -D FORWARD -i ${LXD_BRIDGE} -j ACCEPT - iptables ${use_iptables_lock} -D FORWARD -o ${LXD_BRIDGE} -j ACCEPT - iptables ${use_iptables_lock} -t mangle -D POSTROUTING -o ${LXD_BRIDGE} -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill - - if [ -n "${LXD_IPV4_NETWORK}" ] && [ "${LXD_IPV4_NAT}" = "true" ]; then - iptables ${use_iptables_lock} -t nat -D POSTROUTING -s ${LXD_IPV4_NETWORK} ! -d ${LXD_IPV4_NETWORK} -j MASQUERADE - fi - - if [ -n "${LXD_IPV6_NETWORK}" ] && [ "${LXD_IPV6_NAT}" = "true" ]; then - ip6tables ${use_iptables_lock} -t nat -D POSTROUTING -s ${LXD_IPV6_NETWORK} ! -d ${LXD_IPV6_NETWORK} -j MASQUERADE - fi - - if [ -e "${varrun}/dnsmasq.pid" ]; then - pid=$(cat "${varrun}/dnsmasq.pid" 2>/dev/null) && kill -9 "${pid}" - rm -f "${varrun}/dnsmasq.pid" - fi - - if [ -e "${varrun}/proxy.pid" ]; then - pid=$(cat "${varrun}/proxy.pid" 2>/dev/null) && kill -9 "${pid}" - rm -f "${varrun}/proxy.pid" - fi - - # if ${LXD_BRIDGE} has attached interfaces, don't destroy the bridge - ls /sys/class/net/${LXD_BRIDGE}/brif/* > /dev/null 2>&1 || ip link delete "${LXD_BRIDGE}" - fi - - rm -f "${varrun}/network_up" -} - -# See how we were called. -case "${1}" in - start) - start - ;; - - stop) - stop - ;; - - restart|reload|force-reload) - ${0} stop - ${0} start - ;; - - *) - echo "Usage: ${0} {start|stop|restart|reload|force-reload}" - exit 2 -esac - -exit $? diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd-bridge/lxd-bridge-proxy/main.go juju-core-2.0.0/src/github.com/lxc/lxd/lxd-bridge/lxd-bridge-proxy/main.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/lxd-bridge/lxd-bridge-proxy/main.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/lxd-bridge/lxd-bridge-proxy/main.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "net/http" - "net/http/httputil" -) - -func NewProxy() *httputil.ReverseProxy { - director := func(req *http.Request) { - if req.Method == "CONNECT" { - fmt.Printf("CONNECT: %s\n", req.Host) - } - } - return &httputil.ReverseProxy{Director: director} -} - -func main() { - addr := flag.String("addr", "[fe80::1%lxdbr0]:3128", "proxy listen address") - flag.Parse() - - log.Fatal(http.ListenAndServe(*addr, NewProxy())) -} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/Makefile juju-core-2.0.0/src/github.com/lxc/lxd/Makefile --- juju-core-2.0~beta15/src/github.com/lxc/lxd/Makefile 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/Makefile 2016-10-13 14:31:53.000000000 +0000 @@ -12,7 +12,8 @@ .PHONY: default default: - # Must run twice due to go get race + # Must a few times due to go get race + -go get -t -v -d ./... -go get -t -v -d ./... -go get -t -v -d ./... go install -v $(DEBUG) ./... @@ -20,7 +21,8 @@ .PHONY: client client: - # Must run twice due to go get race + # Must a few times due to go get race + -go get -t -v -d ./... -go get -t -v -d ./... -go get -t -v -d ./... go install -v $(DEBUG) ./lxc @@ -28,7 +30,8 @@ .PHONY: update update: - # Must run twice due to go get race + # Must a few times due to go get race + -go get -t -v -d -u ./... -go get -t -v -d -u ./... go get -t -v -d -u ./... @echo "Dependencies updated" @@ -42,9 +45,8 @@ .PHONY: check check: default + go get -v -x github.com/rogpeppe/godeps go get -v -x github.com/remyoudompheng/go-misc/deadcode - @# go vet can (and does on go < 1.5) fail - go get -v -x golang.org/x/tools/cmd/vet || true go test -v ./... cd test && ./main.sh @@ -54,16 +56,20 @@ .PHONY: dist dist: + $(eval TMP := $(shell mktemp -d)) rm -Rf lxd-$(VERSION) $(ARCHIVE) $(ARCHIVE).gz - mkdir -p lxd-$(VERSION)/dist - -GOPATH=$(shell pwd)/lxd-$(VERSION)/dist go get -t -v -d ./... - GOPATH=$(shell pwd)/lxd-$(VERSION)/dist go get -t -v -d ./... - rm -rf $(shell pwd)/lxd-$(VERSION)/dist/src/github.com/lxc/lxd - ln -s ../../../.. ./lxd-$(VERSION)/dist/src/github.com/lxc/lxd + mkdir -p lxd-$(VERSION)/ + -GOPATH=$(TMP) go get -t -v -d ./... + -GOPATH=$(TMP) go get -t -v -d ./... + -GOPATH=$(TMP) go get -t -v -d ./... + GOPATH=$(TMP) go get -t -v -d ./... + rm -rf $(TMP)/src/github.com/lxc/lxd + ln -s ../../../.. $(TMP)/src/github.com/lxc/lxd + mv $(TMP)/ lxd-$(VERSION)/dist git archive --prefix=lxd-$(VERSION)/ --output=$(ARCHIVE) HEAD tar -uf $(ARCHIVE) --exclude-vcs lxd-$(VERSION)/ gzip -9 $(ARCHIVE) - rm -Rf dist lxd-$(VERSION) $(ARCHIVE) + rm -Rf lxd-$(VERSION) $(ARCHIVE) .PHONY: i18n update-po update-pot build-mo static-analysis i18n: update-pot @@ -81,7 +87,7 @@ done update-pot: - go get -v -x github.com/ubuntu-core/snappy/i18n/xgettext-go/ + go get -v -x github.com/snapcore/snapd/i18n/xgettext-go/ xgettext-go -o po/$(DOMAIN).pot --add-comments-tag=TRANSLATORS: --sort-output --package-name=$(DOMAIN) --msgid-bugs-address=lxc-devel@lists.linuxcontainers.org --keyword=i18n.G --keyword-plural=i18n.NG *.go shared/*.go lxc/*.go lxd/*.go diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/po/de.po juju-core-2.0.0/src/github.com/lxc/lxd/po/de.po --- juju-core-2.0~beta15/src/github.com/lxc/lxd/po/de.po 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/po/de.po 2016-10-13 14:31:53.000000000 +0000 @@ -7,7 +7,7 @@ msgstr "" "Project-Id-Version: LXD\n" "Report-Msgid-Bugs-To: lxc-devel@lists.linuxcontainers.org\n" -"POT-Creation-Date: 2016-02-10 22:15-0500\n" +"POT-Creation-Date: 2016-08-30 17:59-0400\n" "PO-Revision-Date: 2015-06-13 06:10+0200\n" "Last-Translator: Felix Engelmann \n" "Language-Team: \n" @@ -16,7 +16,19 @@ "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -#: lxc/config.go:36 +#: lxc/info.go:140 +msgid " Disk usage:" +msgstr "" + +#: lxc/info.go:163 +msgid " Memory usage:" +msgstr "" + +#: lxc/info.go:180 +msgid " Network usage:" +msgstr "" + +#: lxc/config.go:37 #, fuzzy msgid "" "### This is a yaml representation of the configuration.\n" @@ -55,7 +67,7 @@ "###\n" "### Der Name wird zwar angezeigt, lässt sich jedoch nicht ändern.\n" -#: lxc/image.go:29 +#: lxc/image.go:85 #, fuzzy msgid "" "### This is a yaml representation of the image properties.\n" @@ -72,7 +84,7 @@ "### Zum Beispiel:\n" "### description: Mein eigenes Abbild\n" -#: lxc/profile.go:26 +#: lxc/profile.go:27 #, fuzzy msgid "" "### This is a yaml representation of the profile.\n" @@ -88,7 +100,7 @@ "### devices:\n" "### eth0:\n" "### nictype: bridged\n" -"### parent: lxcbr0\n" +"### parent: lxdbr0\n" "### type: nic\n" "###\n" "### Note that the name is shown but cannot be changed" @@ -106,12 +118,12 @@ "### devices:\n" "### eth0:\n" "### nictype: bridged\n" -"### parent: lxcbr0\n" +"### parent: lxdbr0\n" "### type: nic\n" "###\n" "### Der Name wird zwar angezeigt, lässt sich jedoch nicht ändern.\n" -#: lxc/image.go:500 +#: lxc/image.go:617 #, c-format msgid "%s (%d more)" msgstr "" @@ -121,136 +133,162 @@ msgid "'/' not allowed in snapshot name" msgstr "'/' ist kein gültiges Zeichen im Namen eines Sicherungspunktes\n" -#: lxc/info.go:109 lxc/profile.go:221 +#: lxc/profile.go:254 msgid "(none)" msgstr "" -#: lxc/image.go:520 lxc/image.go:542 +#: lxc/image.go:638 lxc/image.go:680 msgid "ALIAS" msgstr "" -#: lxc/image.go:524 +#: lxc/image.go:642 msgid "ARCH" msgstr "" -#: lxc/remote.go:46 +#: lxc/list.go:425 +msgid "ARCHITECTURE" +msgstr "" + +#: lxc/remote.go:53 msgid "Accept certificate" msgstr "Akzeptiere Zertifikat" -#: lxc/remote.go:181 +#: lxc/remote.go:269 #, c-format msgid "Admin password for %s: " msgstr "Administrator Passwort für %s: " -#: lxc/image.go:281 +#: lxc/image.go:365 #, fuzzy msgid "Aliases:" msgstr "Aliasse:\n" -#: lxc/exec.go:53 +#: lxc/exec.go:54 msgid "An environment variable of the form HOME=/home/foo" msgstr "" -#: lxc/image.go:264 +#: lxc/image.go:348 lxc/info.go:90 #, fuzzy, c-format msgid "Architecture: %s" msgstr "Architektur: %s\n" +#: lxc/image.go:369 +#, c-format +msgid "Auto update: %s" +msgstr "" + #: lxc/help.go:49 msgid "Available commands:" msgstr "" -#: lxc/config.go:264 +#: lxc/info.go:172 +msgid "Bytes received" +msgstr "" + +#: lxc/info.go:173 +msgid "Bytes sent" +msgstr "" + +#: lxc/config.go:274 msgid "COMMON NAME" msgstr "" -#: lxc/config.go:111 +#: lxc/list.go:426 +msgid "CREATED AT" +msgstr "" + +#: lxc/config.go:114 #, c-format msgid "Can't read from stdin: %s" msgstr "" -#: lxc/config.go:124 lxc/config.go:157 lxc/config.go:179 +#: lxc/config.go:127 lxc/config.go:160 lxc/config.go:182 #, c-format msgid "Can't unset key '%s', it's not currently set." msgstr "" -#: lxc/profile.go:329 +#: lxc/profile.go:420 msgid "Cannot provide container name to list" msgstr "" -#: lxc/remote.go:147 +#: lxc/remote.go:219 #, fuzzy, c-format msgid "Certificate fingerprint: %x" msgstr "Fingerabdruck des Zertifikats: % x\n" -#: lxc/action.go:27 +#: lxc/action.go:33 #, fuzzy, c-format msgid "" "Changes state of one or more containers to %s.\n" "\n" -"lxc %s [...]" +"lxc %s [...]%s" msgstr "" "Ändert den Laufzustand eines Containers in %s.\n" "\n" "lxd %s \n" -#: lxc/remote.go:204 +#: lxc/remote.go:292 msgid "Client certificate stored at server: " msgstr "Gespeichertes Nutzerzertifikat auf dem Server: " -#: lxc/list.go:80 +#: lxc/list.go:121 lxc/list.go:122 msgid "Columns" msgstr "" -#: lxc/init.go:132 lxc/init.go:133 lxc/launch.go:36 lxc/launch.go:37 +#: lxc/copy.go:31 lxc/copy.go:32 lxc/init.go:134 lxc/init.go:135 +#: lxc/launch.go:40 lxc/launch.go:41 #, fuzzy msgid "Config key/value to apply to the new container" msgstr "kann nicht zum selben Container Namen kopieren" -#: lxc/config.go:490 lxc/config.go:555 lxc/image.go:597 lxc/profile.go:185 +#: lxc/config.go:531 lxc/config.go:596 lxc/image.go:734 lxc/profile.go:218 #, fuzzy, c-format msgid "Config parsing error: %s" msgstr "YAML Analyse Fehler %v\n" -#: lxc/main.go:37 +#: lxc/main.go:29 msgid "Connection refused; is LXD running?" msgstr "" -#: lxc/publish.go:54 +#: lxc/publish.go:59 msgid "Container name is mandatory" msgstr "" -#: lxc/init.go:206 +#: lxc/copy.go:140 lxc/copy.go:228 lxc/init.go:210 #, c-format msgid "Container name is: %s" msgstr "" -#: lxc/publish.go:81 lxc/publish.go:101 +#: lxc/publish.go:141 lxc/publish.go:156 #, fuzzy, c-format msgid "Container published with fingerprint: %s" msgstr "Abbild mit Fingerabdruck %s importiert\n" -#: lxc/image.go:116 +#: lxc/image.go:166 msgid "Copy aliases from source" msgstr "Kopiere Aliasse von der Quelle" -#: lxc/copy.go:22 +#: lxc/copy.go:24 #, fuzzy msgid "" "Copy containers within or in between lxd instances.\n" "\n" -"lxc copy [remote:] [remote:] [--" -"ephemeral|e]" +"lxc copy [remote:] [[remote:]] [--" +"ephemeral|e] [--profile|-p ...] [--config|-c ...]" msgstr "" "Kopiert Container innerhalb einer oder zwischen lxd Instanzen\n" "\n" "lxc copy \n" -#: lxc/image.go:211 +#: lxc/image.go:280 #, c-format msgid "Copying the image: %s" msgstr "" +#: lxc/remote.go:234 +msgid "Could not create server cert dir" +msgstr "Kann Verzeichnis für Zertifikate auf dem Server nicht erstellen" + #: lxc/snapshot.go:21 msgid "" "Create a read-only snapshot of a container.\n" @@ -271,22 +309,22 @@ "lxc snapshot u1 snap0" msgstr "" -#: lxc/image.go:269 lxc/info.go:84 +#: lxc/image.go:353 lxc/info.go:92 #, c-format msgid "Created: %s" msgstr "" -#: lxc/init.go:175 lxc/launch.go:112 +#: lxc/init.go:177 lxc/launch.go:118 #, c-format msgid "Creating %s" msgstr "" -#: lxc/init.go:173 +#: lxc/init.go:175 #, fuzzy msgid "Creating the container" msgstr "kann nicht zum selben Container Namen kopieren" -#: lxc/image.go:523 +#: lxc/image.go:641 lxc/image.go:682 msgid "DESCRIPTION" msgstr "" @@ -306,38 +344,38 @@ "Entfernt einen Container (oder Sicherungspunkt) und alle dazugehörigen\n" "Daten (Konfiguration, Sicherungspunkte, ...).\n" -#: lxc/config.go:603 +#: lxc/config.go:648 #, fuzzy, c-format msgid "Device %s added to %s" msgstr "Gerät %s wurde zu %s hinzugefügt\n" -#: lxc/config.go:631 +#: lxc/config.go:835 #, fuzzy, c-format msgid "Device %s removed from %s" msgstr "Gerät %s wurde von %s entfernt\n" -#: lxc/list.go:228 +#: lxc/list.go:570 msgid "EPHEMERAL" msgstr "" -#: lxc/config.go:266 +#: lxc/config.go:276 msgid "EXPIRY DATE" msgstr "" -#: lxc/main.go:55 +#: lxc/main.go:41 msgid "Enables debug mode." msgstr "Aktiviert Debug Modus" -#: lxc/main.go:54 +#: lxc/main.go:40 msgid "Enables verbose mode." msgstr "Aktiviert ausführliche Ausgabe" -#: lxc/help.go:68 +#: lxc/help.go:69 msgid "Environment:" msgstr "" -#: lxc/copy.go:29 lxc/copy.go:30 lxc/init.go:136 lxc/init.go:137 -#: lxc/launch.go:40 lxc/launch.go:41 +#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139 +#: lxc/launch.go:44 lxc/launch.go:45 msgid "Ephemeral container" msgstr "Flüchtiger Container" @@ -345,37 +383,44 @@ msgid "Event type to listen for" msgstr "" -#: lxc/exec.go:27 +#: lxc/exec.go:45 #, fuzzy msgid "" "Execute the specified command in a container.\n" "\n" "lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env " -"EDITOR=/usr/bin/vim]... " +"EDITOR=/usr/bin/vim]... \n" +"\n" +"Mode defaults to non-interactive, interactive mode is selected if both stdin " +"AND stdout are terminals (stderr is ignored)." msgstr "" "Führt den angegebenen Befehl in einem Container aus.\n" "\n" "lxc exec [--env EDITOR=/usr/bin/vim]... \n" -#: lxc/image.go:273 +#: lxc/image.go:357 #, c-format msgid "Expires: %s" msgstr "" -#: lxc/image.go:275 +#: lxc/image.go:359 msgid "Expires: never" msgstr "" -#: lxc/config.go:263 lxc/image.go:521 lxc/image.go:543 +#: lxc/config.go:273 lxc/image.go:639 lxc/image.go:681 msgid "FINGERPRINT" msgstr "" -#: lxc/image.go:255 +#: lxc/list.go:124 +msgid "Fast mode (same as --columns=nsacPt" +msgstr "" + +#: lxc/image.go:346 #, fuzzy, c-format msgid "Fingerprint: %s" msgstr "Fingerabdruck: %s\n" -#: lxc/finger.go:17 +#: lxc/finger.go:15 #, fuzzy msgid "" "Fingers the LXD instance to check if it is up and working.\n" @@ -386,12 +431,7 @@ "\n" "lxc finger \n" -#: lxc/main.go:156 -#, fuzzy -msgid "For example: 'lxd-images import ubuntu --alias ubuntu'." -msgstr "Zum Beispiel: 'lxd-images import ubuntu --alias ubuntu'.\n" - -#: lxc/action.go:36 +#: lxc/action.go:42 lxc/action.go:43 msgid "Force the container to shutdown." msgstr "Herunterfahren des Containers erzwingen." @@ -399,55 +439,60 @@ msgid "Force the removal of stopped containers." msgstr "" -#: lxc/main.go:56 +#: lxc/main.go:42 msgid "Force using the local unix socket." msgstr "" -#: lxc/main.go:148 +#: lxc/image.go:169 lxc/list.go:123 +msgid "Format" +msgstr "" + +#: lxc/remote.go:67 #, fuzzy msgid "Generating a client certificate. This may take a minute..." msgstr "Generiere Nutzerzertifikat. Dies kann wenige Minuten dauern...\n" -#: lxc/list.go:226 +#: lxc/list.go:423 msgid "IPV4" msgstr "" -#: lxc/list.go:227 +#: lxc/list.go:424 msgid "IPV6" msgstr "" -#: lxc/config.go:265 +#: lxc/config.go:275 msgid "ISSUE DATE" msgstr "" -#: lxc/main.go:155 -#, fuzzy +#: lxc/main.go:136 msgid "" -"If this is your first run, you will need to import images using the 'lxd-" -"images' script." +"If this is your first time using LXD, you should also run: sudo lxd init" msgstr "" -"Falls dies der erste Start ist, sollten Sie mit dem 'lxd-images' Script " -"Abbilder importieren.\n" -#: lxc/main.go:57 +#: lxc/main.go:43 msgid "Ignore aliases when determining what command to run." msgstr "" -#: lxc/image.go:216 +#: lxc/action.go:46 +#, fuzzy +msgid "Ignore the container state (only for start)." +msgstr "Herunterfahren des Containers erzwingen." + +#: lxc/image.go:285 msgid "Image copied successfully!" msgstr "" -#: lxc/image.go:339 +#: lxc/image.go:442 #, fuzzy, c-format msgid "Image imported with fingerprint: %s" msgstr "Abbild mit Fingerabdruck %s importiert\n" -#: lxc/info.go:95 +#: lxc/image.go:429 #, c-format -msgid "Init: %d" +msgid "Importing the image: %s" msgstr "" -#: lxc/init.go:21 +#: lxc/init.go:73 #, fuzzy msgid "" "Initialize a container from a particular image.\n" @@ -475,29 +520,47 @@ "Beispiel:\n" "lxc launch ubuntu u1\n" -#: lxc/init.go:63 lxc/init.go:68 +#: lxc/remote.go:137 +#, c-format +msgid "Invalid URL scheme \"%s\" in \"%s\"" +msgstr "" + +#: lxc/config.go:254 +#, fuzzy +msgid "Invalid certificate" +msgstr "Akzeptiere Zertifikat" + +#: lxc/init.go:30 lxc/init.go:35 msgid "Invalid configuration key" msgstr "" -#: lxc/file.go:181 +#: lxc/file.go:195 #, c-format msgid "Invalid source %s" msgstr "Ungültige Quelle %s" -#: lxc/file.go:58 +#: lxc/file.go:57 #, c-format msgid "Invalid target %s" msgstr "Ungültiges Ziel %s" -#: lxc/info.go:97 +#: lxc/info.go:121 msgid "Ips:" msgstr "" -#: lxc/main.go:35 -msgid "LXD socket not found; is LXD running?" +#: lxc/image.go:167 +msgid "Keep the image up to date after initial copy" +msgstr "" + +#: lxc/list.go:427 +msgid "LAST USED AT" +msgstr "" + +#: lxc/main.go:27 +msgid "LXD socket not found; is LXD installed and running?" msgstr "" -#: lxc/launch.go:20 +#: lxc/launch.go:22 #, fuzzy msgid "" "Launch a container from a particular image.\n" @@ -511,7 +574,7 @@ "Specifying \"-p\" with no argument will result in no profile.\n" "\n" "Example:\n" -"lxc launch ubuntu u1" +"lxc launch ubuntu:16.04 u1" msgstr "" "Starte Container von gegebenem Abbild.\n" "\n" @@ -528,11 +591,13 @@ #: lxc/info.go:25 #, fuzzy msgid "" -"List information on containers.\n" +"List information on LXD servers and containers.\n" "\n" -"This will support remotes and images as well, but only containers for now.\n" +"For a container:\n" +" lxc info [:]container [--show-log]\n" "\n" -"lxc info [:]container [--show-log]" +"For a server:\n" +" lxc info [:]" msgstr "" "Listet Informationen über Container.\n" "\n" @@ -541,32 +606,62 @@ "\n" "lxc info [:]Container [--show-log]\n" -#: lxc/list.go:54 +#: lxc/list.go:68 #, fuzzy msgid "" "Lists the available resources.\n" "\n" -"lxc list [resource] [filters] -c [columns]\n" +"lxc list [resource] [filters] [--format table|json] [-c columns] [--fast]\n" "\n" "The filters are:\n" -"* A single keyword like \"web\" which will list any container with \"web\" " -"in its name.\n" +"* A single keyword like \"web\" which will list any container with a name " +"starting by \"web\".\n" +"* A regular expression on the container name. (e.g. .*web.*01$)\n" "* A key/value pair referring to a configuration item. For those, the " "namespace can be abreviated to the smallest unambiguous identifier:\n" -"* \"user.blah=abc\" will list all containers with the \"blah\" user property " -"set to \"abc\"\n" -"* \"u.blah=abc\" will do the same\n" -"* \"security.privileged=1\" will list all privileged containers\n" -"* \"s.privileged=1\" will do the same\n" -"\n" -"The columns are:\n" +" * \"user.blah=abc\" will list all containers with the \"blah\" user " +"property set to \"abc\".\n" +" * \"u.blah=abc\" will do the same\n" +" * \"security.privileged=1\" will list all privileged containers\n" +" * \"s.privileged=1\" will do the same\n" +"* A regular expression matching a configuration item or its value. (e.g. " +"volatile.eth0.hwaddr=00:16:3e:.*)\n" +"\n" +"The -c option takes a comma separated list of arguments that control\n" +"which container attributes to output when displaying in table format.\n" +"Column arguments are either pre-defined shorthand chars (see below),\n" +"or (extended) config keys. Commas between consecutive shorthand chars\n" +"are optional.\n" +"\n" +"Pre-defined shorthand chars:\n" +"* 4 - IPv4 address\n" +"* 6 - IPv6 address\n" +"* a - architecture\n" +"* c - creation date\n" +"* l - last used date\n" "* n - name\n" +"* p - pid of container init process\n" +"* P - profiles\n" "* s - state\n" -"* 4 - IP4\n" -"* 6 - IP6\n" -"* e - ephemeral\n" -"* S - snapshots\n" -"* p - pid of container init process" +"* S - number of snapshots\n" +"* t - type (persistent or ephemeral)\n" +"\n" +"Config key syntax: key[:name][:maxWidth]\n" +"* key - The (extended) config key to display\n" +"* name - Name to display in the column header, defaults to the key\n" +" if not specified or if empty (to allow defining maxWidth\n" +" without a custom name, e.g. user.key::0)\n" +"* maxWidth - Max width of the column (longer results are truncated).\n" +" -1 == unlimited\n" +" 0 == width of column header\n" +" >0 == max width in chars\n" +" Default is -1 (unlimited)\n" +"\n" +"Default column layout: ns46tS\n" +"Fast column layout: nsacPt\n" +"\n" +"Example: lxc list -c n,volatile.base_image:\"BASE IMAGE\":0,s46,volatile." +"eth0.hwaddr:MAC\n" msgstr "" "Listet vorhandene Ressourcen.\n" "\n" @@ -583,20 +678,20 @@ "* \"security.privileged=1\" listet alle privilegierten Container\n" "* \"s.privileged=1\" ebenfalls\n" -#: lxc/info.go:151 +#: lxc/info.go:225 msgid "Log:" msgstr "" -#: lxc/image.go:115 +#: lxc/image.go:165 msgid "Make image public" msgstr "Veröffentliche Abbild" -#: lxc/publish.go:29 +#: lxc/publish.go:32 #, fuzzy msgid "Make the image public" msgstr "Veröffentliche Abbild" -#: lxc/profile.go:46 +#: lxc/profile.go:48 #, fuzzy msgid "" "Manage configuration profiles.\n" @@ -608,28 +703,43 @@ "specified remote.\n" "lxc profile get Get profile configuration.\n" "lxc profile set Set profile configuration.\n" +"lxc profile unset Unset profile configuration.\n" "lxc profile delete Delete a profile.\n" "lxc profile edit \n" " Edit profile, either by launching external editor or reading STDIN.\n" " Example: lxc profile edit # launch editor\n" " cat profile.yml | lxc profile edit # read from " "profile.yml\n" -"lxc profile apply \n" -" Apply a comma-separated list of profiles to a container, in order.\n" +"\n" +"lxc profile assign \n" +" Assign a comma-separated list of profiles to a container, in order.\n" " All profiles passed in this call (and only those) will be applied\n" -" to the specified container.\n" -" Example: lxc profile apply foo default,bar # Apply default and bar\n" -" lxc profile apply foo default # Only default is active\n" -" lxc profile apply '' # no profiles are applied anymore\n" -" lxc profile apply bar,default # Apply default second now\n" +" to the specified container, i.e. it sets the list of profiles exactly " +"to\n" +" those specified in this command. To add/remove a particular profile from " +"a\n" +" container, use {add|remove} below.\n" +" Example: lxc profile assign foo default,bar # Apply default and bar\n" +" lxc profile assign foo default # Only default is active\n" +" lxc profile assign '' # no profiles are applied anymore\n" +" lxc profile assign bar,default # Apply default second now\n" +"lxc profile add # add a profile to a container\n" +"lxc profile remove # remove the profile from a " +"container\n" "\n" "Devices:\n" -"lxc profile device list List devices in the given " -"profile.\n" -"lxc profile device show Show full device details in " -"the given profile.\n" -"lxc profile device remove Remove a device from a " -"profile.\n" +"lxc profile device list List " +"devices in the given profile.\n" +"lxc profile device show Show " +"full device details in the given profile.\n" +"lxc profile device remove Remove a " +"device from a profile.\n" +"lxc profile device get <[remote:]profile> Get a " +"device property.\n" +"lxc profile device set <[remote:]profile> Set a " +"device property.\n" +"lxc profile device unset <[remote:]profile> Unset a " +"device property.\n" "lxc profile device add " "[key=value]...\n" " Add a profile device, such as a disk or a nic, to the containers\n" @@ -669,34 +779,36 @@ "Containern hinzu,\n" " die dieses Profil verwenden.\n" -#: lxc/config.go:56 +#: lxc/config.go:58 #, fuzzy msgid "" "Manage configuration.\n" "\n" "lxc config device add <[remote:]container> [key=value]... " "Add a device to a container.\n" -"lxc config device list [remote:] " +"lxc config device get <[remote:]container> " +"Get a device property.\n" +"lxc config device set <[remote:]container> " +"Set a device property.\n" +"lxc config device unset <[remote:]container> " +"Unset a device property.\n" +"lxc config device list <[remote:]container> " "List devices for container.\n" -"lxc config device show [remote:] " +"lxc config device show <[remote:]container> " "Show full device details for container.\n" -"lxc config device remove [remote:] " +"lxc config device remove <[remote:]container> " "Remove device from container.\n" "\n" -"lxc config get [remote:] key " -"Get configuration key.\n" -"lxc config set [remote:] key value " -"Set container configuration key.\n" -"lxc config unset [remote:] key " -"Unset container configuration key.\n" -"lxc config set key value " -"Set server configuration key.\n" -"lxc config unset key " -"Unset server configuration key.\n" -"lxc config show [--expanded] [remote:] " -"Show container configuration.\n" +"lxc config get [remote:][container] " +"Get container or server configuration key.\n" +"lxc config set [remote:][container] " +"Set container or server configuration key.\n" +"lxc config unset [remote:][container] " +"Unset container or server configuration key.\n" +"lxc config show [remote:][container] [--expanded] " +"Show container or server configuration.\n" "lxc config edit [remote:][container] " -"Edit container configuration in external editor.\n" +"Edit container or server configuration in external editor.\n" " Edit configuration, either by launching external editor or reading " "STDIN.\n" " Example: lxc config edit # launch editor\n" @@ -712,7 +824,7 @@ "\n" "Examples:\n" "To mount host's /share/c1 onto /opt in the container:\n" -" lxc config device add [remote:]container1 disk source=/" +" lxc config device add [remote:]container1 disk source=/" "share/c1 path=opt\n" "\n" "To set an lxc config value:\n" @@ -759,7 +871,7 @@ "Um das Server Passwort zur authentifizierung zu setzen:\n" "\tlxc config set core.trust_password blah\n" -#: lxc/file.go:33 +#: lxc/file.go:32 #, fuzzy msgid "" "Manage files on a container.\n" @@ -781,30 +893,25 @@ " bei pull und bei push sind jeweils von der Form /\n" -#: lxc/remote.go:33 +#: lxc/remote.go:39 #, fuzzy msgid "" "Manage remote LXD servers.\n" "\n" -"lxc remote add [--accept-certificate] [--password=PASSWORD] [--" -"public] Add the remote at .\n" -"lxc remote remove " -" Remove " -"the remote .\n" -"lxc remote " -"list " +"lxc remote add [--accept-certificate] [--password=PASSWORD]\n" +" [--public] [--protocol=PROTOCOL] " +"Add the remote at .\n" +"lxc remote remove " +"Remove the remote .\n" +"lxc remote list " "List all remotes.\n" -"lxc remote rename " -" Rename remote " -" to .\n" -"lxc remote set-url " -" Update 's " -"url to .\n" -"lxc remote set-default " -" Set the " -"default remote.\n" -"lxc remote get-" -"default " +"lxc remote rename " +"Rename remote to .\n" +"lxc remote set-url " +"Update 's url to .\n" +"lxc remote set-default " +"Set the default remote.\n" +"lxc remote get-default " "Print the default remote." msgstr "" "Verwalte entfernte LXD Server.\n" @@ -824,7 +931,7 @@ "lxc remote get-default " "Gibt die Standard Instanz aus.\n" -#: lxc/image.go:38 +#: lxc/image.go:95 msgid "" "Manipulate container images.\n" "\n" @@ -844,24 +951,36 @@ "\n" "\n" "lxc image import [rootfs tarball|URL] [remote:] [--public] [--" -"created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] " -"[prop=value]\n" +"created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [--" +"alias=ALIAS].. [prop=value]\n" " Import an image tarball (or tarballs) into the LXD image store.\n" "\n" "lxc image copy [remote:] : [--alias=ALIAS].. [--copy-aliases] " -"[--public]\n" +"[--public] [--auto-update]\n" " Copy an image from one LXD daemon to another over the network.\n" "\n" -"lxc image delete [remote:]\n" -" Delete an image from the LXD image store.\n" +" The auto-update flag instructs the server to keep this image up to\n" +" date. It requires the source to be an alias and for it to be public.\n" +"\n" +"lxc image delete [remote:] [remote:][...]\n" +" Delete one or more images from the LXD image store.\n" "\n" -"lxc image export [remote:]\n" +"lxc image export [remote:] [target]\n" " Export an image from the LXD image store into a distributable tarball.\n" "\n" +" The output target is optional and defaults to the working directory.\n" +" The target may be an existing directory, file name, or \"-\" to specify\n" +" stdout. The target MUST be a directory when exporting a split image.\n" +" If the target is a directory, the image's name (each part's name for\n" +" split images) as found in the database will be used for the exported\n" +" image. If the target is a file (not a directory and not stdout), then\n" +" the appropriate extension will be appended to the provided file name\n" +" based on the algorithm used to compress the image. \n" +"\n" "lxc image info [remote:]\n" " Print everything LXD knows about a given image.\n" "\n" -"lxc image list [remote:] [filter]\n" +"lxc image list [remote:] [filter] [--format table|json]\n" " List images in the LXD image store. Filters may be of the\n" " = form for property based filtering, or part of the image\n" " hash or part of the image alias name.\n" @@ -880,15 +999,24 @@ "lxc image alias delete [remote:]\n" " Delete an alias.\n" "\n" -"lxc image alias list [remote:]\n" -" List the aliases.\n" +"lxc image alias list [remote:] [filter]\n" +" List the aliases. Filters may be part of the image hash or part of the " +"image alias name.\n" +msgstr "" + +#: lxc/info.go:147 +msgid "Memory (current)" msgstr "" -#: lxc/help.go:86 +#: lxc/info.go:151 +msgid "Memory (peak)" +msgstr "" + +#: lxc/help.go:87 msgid "Missing summary." msgstr "Fehlende Zusammenfassung." -#: lxc/monitor.go:20 +#: lxc/monitor.go:41 msgid "" "Monitor activity on the LXD server.\n" "\n" @@ -903,7 +1031,7 @@ "lxc monitor --type=logging" msgstr "" -#: lxc/file.go:169 +#: lxc/file.go:183 msgid "More than one file to download, but target is not a directory" msgstr "" "Mehr als eine Datei herunterzuladen, aber das Ziel ist kein Verzeichnis" @@ -924,72 +1052,106 @@ "\n" "lxc move \n" -#: lxc/list.go:224 lxc/remote.go:271 +#: lxc/action.go:69 +#, fuzzy +msgid "Must supply container name for: " +msgstr "der Name des Ursprung Containers muss angegeben werden" + +#: lxc/list.go:428 lxc/remote.go:376 msgid "NAME" msgstr "" -#: lxc/list.go:293 lxc/remote.go:257 +#: lxc/remote.go:350 lxc/remote.go:355 msgid "NO" msgstr "" -#: lxc/info.go:82 +#: lxc/info.go:89 #, c-format msgid "Name: %s" msgstr "" -#: lxc/image.go:117 lxc/publish.go:30 +#: lxc/image.go:168 lxc/publish.go:33 msgid "New alias to define at target" msgstr "" -#: lxc/config.go:277 +#: lxc/config.go:285 #, fuzzy msgid "No certificate provided to add" msgstr "Kein Zertifikat zum hinzufügen bereitgestellt" -#: lxc/config.go:300 +#: lxc/config.go:308 msgid "No fingerprint specified." msgstr "Kein Fingerabdruck angegeben." -#: lxc/image.go:331 +#: lxc/remote.go:122 +msgid "Only https URLs are supported for simplestreams" +msgstr "" + +#: lxc/image.go:434 msgid "Only https:// is supported for remote image import." msgstr "" -#: lxc/help.go:63 lxc/main.go:132 +#: lxc/help.go:63 lxc/main.go:112 msgid "Options:" msgstr "" -#: lxc/image.go:425 +#: lxc/image.go:538 #, c-format msgid "Output is in %s" msgstr "" -#: lxc/exec.go:54 +#: lxc/exec.go:55 msgid "Override the terminal mode (auto, interactive or non-interactive)" msgstr "" -#: lxc/list.go:230 +#: lxc/list.go:572 +msgid "PERSISTENT" +msgstr "" + +#: lxc/list.go:429 msgid "PID" msgstr "" -#: lxc/image.go:522 lxc/remote.go:273 +#: lxc/list.go:430 +msgid "PROFILES" +msgstr "" + +#: lxc/remote.go:378 +msgid "PROTOCOL" +msgstr "" + +#: lxc/image.go:640 lxc/remote.go:379 msgid "PUBLIC" msgstr "" -#: lxc/help.go:69 +#: lxc/info.go:174 +msgid "Packets received" +msgstr "" + +#: lxc/info.go:175 +msgid "Packets sent" +msgstr "" + +#: lxc/help.go:70 #, fuzzy msgid "Path to an alternate client configuration directory." msgstr "Alternatives config Verzeichnis." -#: lxc/help.go:70 +#: lxc/help.go:71 #, fuzzy msgid "Path to an alternate server directory." msgstr "Alternatives config Verzeichnis." -#: lxc/main.go:39 -msgid "Permisson denied, are you in the lxd group?" +#: lxc/main.go:31 +msgid "Permission denied, are you in the lxd group?" msgstr "" -#: lxc/help.go:23 +#: lxc/info.go:103 +#, c-format +msgid "Pid: %d" +msgstr "" + +#: lxc/help.go:25 #, fuzzy msgid "" "Presents details on how to use LXD.\n" @@ -1000,11 +1162,11 @@ "\n" "lxd help [—all]\n" -#: lxc/profile.go:186 +#: lxc/profile.go:219 msgid "Press enter to open the editor again" msgstr "" -#: lxc/config.go:491 lxc/config.go:556 lxc/image.go:598 +#: lxc/config.go:532 lxc/config.go:597 lxc/image.go:735 msgid "Press enter to start the editor again" msgstr "" @@ -1020,10 +1182,14 @@ msgid "Print verbose information." msgstr "" +#: lxc/manpage.go:18 +msgid "Prints all the subcommands help." +msgstr "" + #: lxc/version.go:18 #, fuzzy msgid "" -"Prints the version number of LXD.\n" +"Prints the version number of this client tool.\n" "\n" "lxc version" msgstr "" @@ -1031,51 +1197,62 @@ "\n" "lxc version\n" -#: lxc/info.go:96 -#, c-format -msgid "Processcount: %d" -msgstr "" +#: lxc/info.go:127 +#, fuzzy, c-format +msgid "Processes: %d" +msgstr "Profil %s erstellt\n" -#: lxc/profile.go:223 +#: lxc/profile.go:275 #, fuzzy, c-format -msgid "Profile %s applied to %s" +msgid "Profile %s added to %s" msgstr "Profil %s wurde auf %s angewandt\n" -#: lxc/profile.go:137 +#: lxc/profile.go:170 #, fuzzy, c-format msgid "Profile %s created" msgstr "Profil %s erstellt\n" -#: lxc/profile.go:207 +#: lxc/profile.go:240 #, fuzzy, c-format msgid "Profile %s deleted" msgstr "Profil %s gelöscht\n" -#: lxc/init.go:134 lxc/init.go:135 lxc/launch.go:38 lxc/launch.go:39 +#: lxc/profile.go:306 +#, fuzzy, c-format +msgid "Profile %s removed from %s" +msgstr "Gerät %s wurde von %s entfernt\n" + +#: lxc/copy.go:33 lxc/copy.go:34 lxc/init.go:136 lxc/init.go:137 +#: lxc/launch.go:42 lxc/launch.go:43 #, fuzzy msgid "Profile to apply to the new container" msgstr "kann nicht zum selben Container Namen kopieren" -#: lxc/info.go:93 +#: lxc/profile.go:256 +#, fuzzy, c-format +msgid "Profiles %s applied to %s" +msgstr "Profil %s wurde auf %s angewandt\n" + +#: lxc/info.go:101 #, fuzzy, c-format msgid "Profiles: %s" msgstr "Profil %s erstellt\n" -#: lxc/image.go:277 +#: lxc/image.go:361 #, fuzzy msgid "Properties:" msgstr "Eigenschaften:\n" -#: lxc/remote.go:48 +#: lxc/remote.go:56 msgid "Public image server" msgstr "" -#: lxc/image.go:265 +#: lxc/image.go:349 #, fuzzy, c-format msgid "Public: %s" msgstr "Öffentlich: %s\n" -#: lxc/publish.go:19 +#: lxc/publish.go:25 msgid "" "Publish containers as images.\n" "\n" @@ -1083,11 +1260,11 @@ "value]..." msgstr "" -#: lxc/remote.go:47 +#: lxc/remote.go:54 msgid "Remote admin password" msgstr "Entferntes Administrator Passwort" -#: lxc/delete.go:43 +#: lxc/delete.go:42 #, c-format msgid "Remove %s (yes/no): " msgstr "" @@ -1096,32 +1273,44 @@ msgid "Require user confirmation." msgstr "" -#: lxc/init.go:244 +#: lxc/info.go:124 +msgid "Resources:" +msgstr "" + +#: lxc/init.go:247 #, c-format msgid "Retrieving image: %s" msgstr "" -#: lxc/image.go:525 +#: lxc/image.go:643 msgid "SIZE" msgstr "" -#: lxc/list.go:229 +#: lxc/list.go:431 msgid "SNAPSHOTS" msgstr "" -#: lxc/list.go:225 +#: lxc/list.go:432 msgid "STATE" msgstr "" -#: lxc/remote.go:155 +#: lxc/remote.go:380 +msgid "STATIC" +msgstr "" + +#: lxc/remote.go:227 msgid "Server certificate NACKed by user" msgstr "Server Zertifikat vom Benutzer nicht akzeptiert" -#: lxc/remote.go:201 +#: lxc/remote.go:289 msgid "Server doesn't trust us after adding our cert" msgstr "" "Der Server vertraut uns nicht nachdem er unser Zertifikat hinzugefügt hat" +#: lxc/remote.go:55 +msgid "Server protocol (lxd or simplestreams)" +msgstr "" + #: lxc/restore.go:21 msgid "" "Set the current state of a resource back to a snapshot.\n" @@ -1136,15 +1325,15 @@ "lxc restore u1 snap0 # restore the snapshot" msgstr "" -#: lxc/file.go:45 +#: lxc/file.go:44 msgid "Set the file's gid on push" msgstr "Setzt die gid der Datei beim Ãœbertragen" -#: lxc/file.go:46 +#: lxc/file.go:45 msgid "Set the file's perms on push" msgstr "Setzt die Dateiberechtigungen beim Ãœbertragen" -#: lxc/file.go:44 +#: lxc/file.go:43 msgid "Set the file's uid on push" msgstr "Setzt die uid der Datei beim Ãœbertragen" @@ -1152,77 +1341,140 @@ msgid "Show all commands (not just interesting ones)" msgstr "Zeigt alle Befehle (nicht nur die interessanten)" -#: lxc/info.go:34 +#: lxc/help.go:67 +msgid "Show client version." +msgstr "" + +#: lxc/info.go:36 msgid "Show the container's last 100 log lines?" msgstr "Zeige die letzten 100 Zeilen Protokoll des Containers?" -#: lxc/image.go:262 +#: lxc/image.go:347 #, fuzzy, c-format msgid "Size: %.2fMB" msgstr "Größe: %.2vMB\n" -#: lxc/info.go:122 +#: lxc/info.go:194 msgid "Snapshots:" msgstr "" -#: lxc/launch.go:118 +#: lxc/image.go:371 +msgid "Source:" +msgstr "" + +#: lxc/launch.go:124 #, c-format msgid "Starting %s" msgstr "" -#: lxc/info.go:87 +#: lxc/info.go:95 #, c-format msgid "Status: %s" msgstr "" -#: lxc/delete.go:97 +#: lxc/publish.go:34 lxc/publish.go:35 +msgid "Stop the container if currently running" +msgstr "" + +#: lxc/delete.go:106 lxc/publish.go:111 msgid "Stopping container failed!" msgstr "Anhalten des Containers fehlgeschlagen!" -#: lxc/delete.go:83 +#: lxc/action.go:45 +#, fuzzy +msgid "Store the container state (only for stop)." +msgstr "Herunterfahren des Containers erzwingen." + +#: lxc/info.go:155 +msgid "Swap (current)" +msgstr "" + +#: lxc/info.go:159 +msgid "Swap (peak)" +msgstr "" + +#: lxc/list.go:433 +msgid "TYPE" +msgstr "" + +#: lxc/delete.go:92 msgid "The container is currently running, stop it first or pass --force." msgstr "" -#: lxc/publish.go:57 +#: lxc/publish.go:89 +msgid "" +"The container is currently running. Use --force to have it stopped and " +"restarted." +msgstr "" + +#: lxc/config.go:676 lxc/config.go:688 lxc/config.go:721 lxc/config.go:739 +#: lxc/config.go:777 lxc/config.go:795 +#, fuzzy +msgid "The device doesn't exist" +msgstr "entfernte Instanz %s existiert nicht" + +#: lxc/init.go:277 +#, c-format +msgid "The local image '%s' couldn't be found, trying '%s:' instead." +msgstr "" + +#: lxc/main.go:180 +msgid "The opposite of `lxc pause` is `lxc start`." +msgstr "" + +#: lxc/publish.go:62 msgid "There is no \"image name\". Did you want an alias?" msgstr "" -#: lxc/action.go:35 +#: lxc/action.go:41 msgid "Time to wait for the container before killing it." msgstr "Wartezeit bevor der Container gestoppt wird." -#: lxc/image.go:266 +#: lxc/image.go:350 #, fuzzy msgid "Timestamps:" msgstr "Zeitstempel:\n" -#: lxc/action.go:62 lxc/launch.go:126 +#: lxc/main.go:137 +msgid "To start your first container, try: lxc launch ubuntu:16.04" +msgstr "" + +#: lxc/image.go:421 +#, c-format +msgid "Transferring image: %d%%" +msgstr "" + +#: lxc/action.go:99 lxc/launch.go:132 #, c-format msgid "Try `lxc info --show-log %s` for more info" msgstr "" -#: lxc/info.go:89 +#: lxc/info.go:97 msgid "Type: ephemeral" msgstr "" -#: lxc/info.go:91 +#: lxc/info.go:99 msgid "Type: persistent" msgstr "" -#: lxc/image.go:526 +#: lxc/image.go:644 msgid "UPLOAD DATE" msgstr "" -#: lxc/remote.go:272 +#: lxc/remote.go:377 msgid "URL" msgstr "" -#: lxc/image.go:271 +#: lxc/remote.go:97 +msgid "Unable to read remote TLS certificate" +msgstr "" + +#: lxc/image.go:355 #, c-format msgid "Uploaded: %s" msgstr "" -#: lxc/main.go:132 +#: lxc/main.go:112 #, fuzzy, c-format msgid "Usage: %s" msgstr "" @@ -1238,7 +1490,7 @@ "Benutzung: lxc [Unterbefehl] [Optionen]\n" "Verfügbare Befehle:\n" -#: lxc/delete.go:47 +#: lxc/delete.go:46 msgid "User aborted delete operation." msgstr "" @@ -1258,125 +1510,148 @@ msgid "Whether to show the expanded configuration" msgstr "" -#: lxc/list.go:291 lxc/remote.go:259 +#: lxc/remote.go:352 lxc/remote.go:357 msgid "YES" msgstr "" -#: lxc/main.go:66 +#: lxc/main.go:52 msgid "`lxc config profile` is deprecated, please use `lxc profile`" msgstr "" -#: lxc/launch.go:105 +#: lxc/launch.go:111 msgid "bad number of things scanned from image, container or snapshot" msgstr "" "Falsche Anzahl an Objekten im Abbild, Container oder Sicherungspunkt gelesen." -#: lxc/action.go:58 +#: lxc/action.go:95 msgid "bad result type from action" msgstr "" -#: lxc/copy.go:78 +#: lxc/copy.go:115 msgid "can't copy to the same container name" msgstr "kann nicht zum selben Container Namen kopieren" -#: lxc/remote.go:247 +#: lxc/remote.go:340 msgid "can't remove the default remote" msgstr "" -#: lxc/remote.go:264 +#: lxc/remote.go:366 msgid "default" msgstr "" -#: lxc/init.go:197 lxc/init.go:202 lxc/launch.go:89 lxc/launch.go:94 +#: lxc/copy.go:131 lxc/copy.go:136 lxc/copy.go:219 lxc/copy.go:224 +#: lxc/init.go:200 lxc/init.go:205 lxc/launch.go:95 lxc/launch.go:100 msgid "didn't get any affected image, container or snapshot from server" msgstr "" -#: lxc/main.go:25 lxc/main.go:167 +#: lxc/image.go:341 +msgid "disabled" +msgstr "" + +#: lxc/image.go:343 +msgid "enabled" +msgstr "" + +#: lxc/main.go:22 lxc/main.go:148 #, fuzzy, c-format msgid "error: %v" msgstr "Fehler: %v\n" -#: lxc/help.go:40 lxc/main.go:127 +#: lxc/help.go:40 lxc/main.go:107 #, fuzzy, c-format msgid "error: unknown command: %s" msgstr "Fehler: unbekannter Befehl: %s\n" -#: lxc/launch.go:109 +#: lxc/launch.go:115 msgid "got bad version" msgstr "Versionskonflikt" -#: lxc/image.go:256 lxc/image.go:503 +#: lxc/image.go:336 lxc/image.go:620 msgid "no" msgstr "" -#: lxc/copy.go:100 +#: lxc/copy.go:158 msgid "not all the profiles from the source exist on the target" msgstr "nicht alle Profile der Quelle sind am Ziel vorhanden." -#: lxc/remote.go:148 +#: lxc/remote.go:220 #, fuzzy msgid "ok (y/n)?" msgstr "OK (y/n)? " -#: lxc/main.go:274 lxc/main.go:278 +#: lxc/main.go:302 lxc/main.go:306 #, c-format msgid "processing aliases failed %s\n" msgstr "" -#: lxc/remote.go:291 +#: lxc/remote.go:402 #, c-format msgid "remote %s already exists" msgstr "entfernte Instanz %s existiert bereits" -#: lxc/remote.go:243 lxc/remote.go:287 lxc/remote.go:317 lxc/remote.go:328 +#: lxc/remote.go:332 lxc/remote.go:394 lxc/remote.go:429 lxc/remote.go:445 #, c-format msgid "remote %s doesn't exist" msgstr "entfernte Instanz %s existiert nicht" -#: lxc/remote.go:227 +#: lxc/remote.go:315 #, c-format msgid "remote %s exists as <%s>" msgstr "entfernte Instanz %s existiert als <%s>" -#: lxc/info.go:131 +#: lxc/remote.go:336 lxc/remote.go:398 lxc/remote.go:433 +#, c-format +msgid "remote %s is static and cannot be modified" +msgstr "" + +#: lxc/info.go:205 msgid "stateful" msgstr "" -#: lxc/info.go:133 +#: lxc/info.go:207 msgid "stateless" msgstr "" -#: lxc/info.go:127 +#: lxc/info.go:201 #, c-format msgid "taken at %s" msgstr "" -#: lxc/exec.go:158 +#: lxc/exec.go:163 msgid "unreachable return reached" msgstr "" -#: lxc/main.go:207 +#: lxc/main.go:234 msgid "wrong number of subcommand arguments" msgstr "falsche Anzahl an Parametern für Unterbefehl" -#: lxc/delete.go:46 lxc/image.go:259 lxc/image.go:507 +#: lxc/delete.go:45 lxc/image.go:338 lxc/image.go:624 msgid "yes" msgstr "" -#: lxc/copy.go:38 +#: lxc/copy.go:44 msgid "you must specify a source container name" msgstr "der Name des Ursprung Containers muss angegeben werden" #, fuzzy +#~ msgid "For example: 'lxd-images import ubuntu --alias ubuntu'." +#~ msgstr "Zum Beispiel: 'lxd-images import ubuntu --alias ubuntu'.\n" + +#, fuzzy +#~ msgid "" +#~ "If this is your first run, you will need to import images using the 'lxd-" +#~ "images' script." +#~ msgstr "" +#~ "Falls dies der erste Start ist, sollten Sie mit dem 'lxd-images' Script " +#~ "Abbilder importieren.\n" + +#, fuzzy #~ msgid "Bad image property: %s" #~ msgstr "Ungültige Abbild Eigenschaft: %s\n" #~ msgid "Cannot change profile name" #~ msgstr "Profilname kann nicht geändert werden" -#~ msgid "Could not create server cert dir" -#~ msgstr "Kann Verzeichnis für Zertifikate auf dem Server nicht erstellen" - #, fuzzy #~ msgid "" #~ "Create a read-only snapshot of a container.\n" diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/po/fr.po juju-core-2.0.0/src/github.com/lxc/lxd/po/fr.po --- juju-core-2.0~beta15/src/github.com/lxc/lxd/po/fr.po 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/po/fr.po 2016-10-13 14:31:53.000000000 +0000 @@ -7,7 +7,7 @@ msgstr "" "Project-Id-Version: LXD\n" "Report-Msgid-Bugs-To: lxc-devel@lists.linuxcontainers.org\n" -"POT-Creation-Date: 2016-02-10 22:15-0500\n" +"POT-Creation-Date: 2016-08-30 17:59-0400\n" "PO-Revision-Date: 2015-02-26 02:05-0600\n" "Last-Translator: Stéphane Graber \n" @@ -16,7 +16,19 @@ "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -#: lxc/config.go:36 +#: lxc/info.go:140 +msgid " Disk usage:" +msgstr "" + +#: lxc/info.go:163 +msgid " Memory usage:" +msgstr "" + +#: lxc/info.go:180 +msgid " Network usage:" +msgstr "" + +#: lxc/config.go:37 msgid "" "### This is a yaml representation of the configuration.\n" "### Any line starting with a '# will be ignored.\n" @@ -37,7 +49,7 @@ "### Note that the name is shown but cannot be changed" msgstr "" -#: lxc/image.go:29 +#: lxc/image.go:85 msgid "" "### This is a yaml representation of the image properties.\n" "### Any line starting with a '# will be ignored.\n" @@ -47,7 +59,7 @@ "### description: My custom image" msgstr "" -#: lxc/profile.go:26 +#: lxc/profile.go:27 msgid "" "### This is a yaml representation of the profile.\n" "### Any line starting with a '# will be ignored.\n" @@ -62,13 +74,13 @@ "### devices:\n" "### eth0:\n" "### nictype: bridged\n" -"### parent: lxcbr0\n" +"### parent: lxdbr0\n" "### type: nic\n" "###\n" "### Note that the name is shown but cannot be changed" msgstr "" -#: lxc/image.go:500 +#: lxc/image.go:617 #, c-format msgid "%s (%d more)" msgstr "" @@ -78,127 +90,153 @@ msgid "'/' not allowed in snapshot name" msgstr "'/' n'est pas autorisé dans le nom d'un instantané (snapshot)\n" -#: lxc/info.go:109 lxc/profile.go:221 +#: lxc/profile.go:254 msgid "(none)" msgstr "" -#: lxc/image.go:520 lxc/image.go:542 +#: lxc/image.go:638 lxc/image.go:680 msgid "ALIAS" msgstr "" -#: lxc/image.go:524 +#: lxc/image.go:642 msgid "ARCH" msgstr "" -#: lxc/remote.go:46 +#: lxc/list.go:425 +msgid "ARCHITECTURE" +msgstr "" + +#: lxc/remote.go:53 msgid "Accept certificate" msgstr "" -#: lxc/remote.go:181 +#: lxc/remote.go:269 #, c-format msgid "Admin password for %s: " msgstr "Mot de passe administrateur pour %s: " -#: lxc/image.go:281 +#: lxc/image.go:365 msgid "Aliases:" msgstr "" -#: lxc/exec.go:53 +#: lxc/exec.go:54 msgid "An environment variable of the form HOME=/home/foo" msgstr "" -#: lxc/image.go:264 +#: lxc/image.go:348 lxc/info.go:90 #, c-format msgid "Architecture: %s" msgstr "" +#: lxc/image.go:369 +#, c-format +msgid "Auto update: %s" +msgstr "" + #: lxc/help.go:49 msgid "Available commands:" msgstr "" -#: lxc/config.go:264 +#: lxc/info.go:172 +msgid "Bytes received" +msgstr "" + +#: lxc/info.go:173 +msgid "Bytes sent" +msgstr "" + +#: lxc/config.go:274 msgid "COMMON NAME" msgstr "" -#: lxc/config.go:111 +#: lxc/list.go:426 +msgid "CREATED AT" +msgstr "" + +#: lxc/config.go:114 #, c-format msgid "Can't read from stdin: %s" msgstr "" -#: lxc/config.go:124 lxc/config.go:157 lxc/config.go:179 +#: lxc/config.go:127 lxc/config.go:160 lxc/config.go:182 #, c-format msgid "Can't unset key '%s', it's not currently set." msgstr "" -#: lxc/profile.go:329 +#: lxc/profile.go:420 msgid "Cannot provide container name to list" msgstr "" -#: lxc/remote.go:147 +#: lxc/remote.go:219 #, fuzzy, c-format msgid "Certificate fingerprint: %x" msgstr "Empreinte du certificat: % x\n" -#: lxc/action.go:27 +#: lxc/action.go:33 #, fuzzy, c-format msgid "" "Changes state of one or more containers to %s.\n" "\n" -"lxc %s [...]" +"lxc %s [...]%s" msgstr "Change l'état du conteneur à %s.\n" -#: lxc/remote.go:204 +#: lxc/remote.go:292 msgid "Client certificate stored at server: " msgstr "Certificat client enregistré avec le serveur: " -#: lxc/list.go:80 +#: lxc/list.go:121 lxc/list.go:122 msgid "Columns" msgstr "" -#: lxc/init.go:132 lxc/init.go:133 lxc/launch.go:36 lxc/launch.go:37 +#: lxc/copy.go:31 lxc/copy.go:32 lxc/init.go:134 lxc/init.go:135 +#: lxc/launch.go:40 lxc/launch.go:41 msgid "Config key/value to apply to the new container" msgstr "" -#: lxc/config.go:490 lxc/config.go:555 lxc/image.go:597 lxc/profile.go:185 +#: lxc/config.go:531 lxc/config.go:596 lxc/image.go:734 lxc/profile.go:218 #, fuzzy, c-format msgid "Config parsing error: %s" msgstr "erreur: %v\n" -#: lxc/main.go:37 +#: lxc/main.go:29 msgid "Connection refused; is LXD running?" msgstr "" -#: lxc/publish.go:54 +#: lxc/publish.go:59 msgid "Container name is mandatory" msgstr "" -#: lxc/init.go:206 +#: lxc/copy.go:140 lxc/copy.go:228 lxc/init.go:210 #, c-format msgid "Container name is: %s" msgstr "" -#: lxc/publish.go:81 lxc/publish.go:101 +#: lxc/publish.go:141 lxc/publish.go:156 #, fuzzy, c-format msgid "Container published with fingerprint: %s" msgstr "Empreinte du certificat: % x\n" -#: lxc/image.go:116 +#: lxc/image.go:166 msgid "Copy aliases from source" msgstr "" -#: lxc/copy.go:22 +#: lxc/copy.go:24 msgid "" "Copy containers within or in between lxd instances.\n" "\n" -"lxc copy [remote:] [remote:] [--" -"ephemeral|e]" +"lxc copy [remote:] [[remote:]] [--" +"ephemeral|e] [--profile|-p ...] [--config|-c ...]" msgstr "" -#: lxc/image.go:211 +#: lxc/image.go:280 #, c-format msgid "Copying the image: %s" msgstr "" +#: lxc/remote.go:234 +msgid "Could not create server cert dir" +msgstr "Le dossier de stockage des certificats serveurs n'a pas pû être créé" + #: lxc/snapshot.go:21 msgid "" "Create a read-only snapshot of a container.\n" @@ -219,21 +257,21 @@ "lxc snapshot u1 snap0" msgstr "" -#: lxc/image.go:269 lxc/info.go:84 +#: lxc/image.go:353 lxc/info.go:92 #, c-format msgid "Created: %s" msgstr "" -#: lxc/init.go:175 lxc/launch.go:112 +#: lxc/init.go:177 lxc/launch.go:118 #, c-format msgid "Creating %s" msgstr "" -#: lxc/init.go:173 +#: lxc/init.go:175 msgid "Creating the container" msgstr "" -#: lxc/image.go:523 +#: lxc/image.go:641 lxc/image.go:682 msgid "DESCRIPTION" msgstr "" @@ -248,38 +286,38 @@ "snapshots, ...)." msgstr "" -#: lxc/config.go:603 +#: lxc/config.go:648 #, c-format msgid "Device %s added to %s" msgstr "" -#: lxc/config.go:631 +#: lxc/config.go:835 #, c-format msgid "Device %s removed from %s" msgstr "" -#: lxc/list.go:228 +#: lxc/list.go:570 msgid "EPHEMERAL" msgstr "" -#: lxc/config.go:266 +#: lxc/config.go:276 msgid "EXPIRY DATE" msgstr "" -#: lxc/main.go:55 +#: lxc/main.go:41 msgid "Enables debug mode." msgstr "Active le mode de déboguage." -#: lxc/main.go:54 +#: lxc/main.go:40 msgid "Enables verbose mode." msgstr "Active le mode verbeux." -#: lxc/help.go:68 +#: lxc/help.go:69 msgid "Environment:" msgstr "" -#: lxc/copy.go:29 lxc/copy.go:30 lxc/init.go:136 lxc/init.go:137 -#: lxc/launch.go:40 lxc/launch.go:41 +#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139 +#: lxc/launch.go:44 lxc/launch.go:45 msgid "Ephemeral container" msgstr "" @@ -287,34 +325,41 @@ msgid "Event type to listen for" msgstr "" -#: lxc/exec.go:27 +#: lxc/exec.go:45 #, fuzzy msgid "" "Execute the specified command in a container.\n" "\n" "lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env " -"EDITOR=/usr/bin/vim]... " +"EDITOR=/usr/bin/vim]... \n" +"\n" +"Mode defaults to non-interactive, interactive mode is selected if both stdin " +"AND stdout are terminals (stderr is ignored)." msgstr "Exécute la commande spécifiée dans un conteneur.\n" -#: lxc/image.go:273 +#: lxc/image.go:357 #, c-format msgid "Expires: %s" msgstr "" -#: lxc/image.go:275 +#: lxc/image.go:359 msgid "Expires: never" msgstr "" -#: lxc/config.go:263 lxc/image.go:521 lxc/image.go:543 +#: lxc/config.go:273 lxc/image.go:639 lxc/image.go:681 msgid "FINGERPRINT" msgstr "" -#: lxc/image.go:255 +#: lxc/list.go:124 +msgid "Fast mode (same as --columns=nsacPt" +msgstr "" + +#: lxc/image.go:346 #, fuzzy, c-format msgid "Fingerprint: %s" msgstr "Empreinte du certificat: % x\n" -#: lxc/finger.go:17 +#: lxc/finger.go:15 #, fuzzy msgid "" "Fingers the LXD instance to check if it is up and working.\n" @@ -322,11 +367,7 @@ "lxc finger " msgstr "Contacte LXD pour voir s'il est fonctionel.\n" -#: lxc/main.go:156 -msgid "For example: 'lxd-images import ubuntu --alias ubuntu'." -msgstr "" - -#: lxc/action.go:36 +#: lxc/action.go:42 lxc/action.go:43 msgid "Force the container to shutdown." msgstr "Force l'arrêt du conteneur." @@ -334,52 +375,60 @@ msgid "Force the removal of stopped containers." msgstr "" -#: lxc/main.go:56 +#: lxc/main.go:42 msgid "Force using the local unix socket." msgstr "" -#: lxc/main.go:148 +#: lxc/image.go:169 lxc/list.go:123 +msgid "Format" +msgstr "" + +#: lxc/remote.go:67 #, fuzzy msgid "Generating a client certificate. This may take a minute..." msgstr "Géneration d'un certificat client. Ceci peut prendre une minute...\n" -#: lxc/list.go:226 +#: lxc/list.go:423 msgid "IPV4" msgstr "" -#: lxc/list.go:227 +#: lxc/list.go:424 msgid "IPV6" msgstr "" -#: lxc/config.go:265 +#: lxc/config.go:275 msgid "ISSUE DATE" msgstr "" -#: lxc/main.go:155 +#: lxc/main.go:136 msgid "" -"If this is your first run, you will need to import images using the 'lxd-" -"images' script." +"If this is your first time using LXD, you should also run: sudo lxd init" msgstr "" -#: lxc/main.go:57 +#: lxc/main.go:43 msgid "Ignore aliases when determining what command to run." msgstr "" -#: lxc/image.go:216 +#: lxc/action.go:46 +#, fuzzy +msgid "Ignore the container state (only for start)." +msgstr "Force l'arrêt du conteneur." + +#: lxc/image.go:285 msgid "Image copied successfully!" msgstr "" -#: lxc/image.go:339 +#: lxc/image.go:442 #, fuzzy, c-format msgid "Image imported with fingerprint: %s" msgstr "Empreinte du certificat: % x\n" -#: lxc/info.go:95 +#: lxc/image.go:429 #, c-format -msgid "Init: %d" +msgid "Importing the image: %s" msgstr "" -#: lxc/init.go:21 +#: lxc/init.go:73 msgid "" "Initialize a container from a particular image.\n" "\n" @@ -395,30 +444,48 @@ "lxc init ubuntu u1" msgstr "" -#: lxc/init.go:63 lxc/init.go:68 +#: lxc/remote.go:137 +#, c-format +msgid "Invalid URL scheme \"%s\" in \"%s\"" +msgstr "" + +#: lxc/config.go:254 +#, fuzzy +msgid "Invalid certificate" +msgstr "Gérer la configuration.\n" + +#: lxc/init.go:30 lxc/init.go:35 #, fuzzy msgid "Invalid configuration key" msgstr "Gérer la configuration.\n" -#: lxc/file.go:181 +#: lxc/file.go:195 #, c-format msgid "Invalid source %s" msgstr "Source invalide %s" -#: lxc/file.go:58 +#: lxc/file.go:57 #, c-format msgid "Invalid target %s" msgstr "Destination invalide %s" -#: lxc/info.go:97 +#: lxc/info.go:121 msgid "Ips:" msgstr "" -#: lxc/main.go:35 -msgid "LXD socket not found; is LXD running?" +#: lxc/image.go:167 +msgid "Keep the image up to date after initial copy" +msgstr "" + +#: lxc/list.go:427 +msgid "LAST USED AT" +msgstr "" + +#: lxc/main.go:27 +msgid "LXD socket not found; is LXD installed and running?" msgstr "" -#: lxc/launch.go:20 +#: lxc/launch.go:22 msgid "" "Launch a container from a particular image.\n" "\n" @@ -431,58 +498,90 @@ "Specifying \"-p\" with no argument will result in no profile.\n" "\n" "Example:\n" -"lxc launch ubuntu u1" +"lxc launch ubuntu:16.04 u1" msgstr "" #: lxc/info.go:25 msgid "" -"List information on containers.\n" +"List information on LXD servers and containers.\n" "\n" -"This will support remotes and images as well, but only containers for now.\n" +"For a container:\n" +" lxc info [:]container [--show-log]\n" "\n" -"lxc info [:]container [--show-log]" +"For a server:\n" +" lxc info [:]" msgstr "" -#: lxc/list.go:54 +#: lxc/list.go:68 msgid "" "Lists the available resources.\n" "\n" -"lxc list [resource] [filters] -c [columns]\n" +"lxc list [resource] [filters] [--format table|json] [-c columns] [--fast]\n" "\n" "The filters are:\n" -"* A single keyword like \"web\" which will list any container with \"web\" " -"in its name.\n" +"* A single keyword like \"web\" which will list any container with a name " +"starting by \"web\".\n" +"* A regular expression on the container name. (e.g. .*web.*01$)\n" "* A key/value pair referring to a configuration item. For those, the " "namespace can be abreviated to the smallest unambiguous identifier:\n" -"* \"user.blah=abc\" will list all containers with the \"blah\" user property " -"set to \"abc\"\n" -"* \"u.blah=abc\" will do the same\n" -"* \"security.privileged=1\" will list all privileged containers\n" -"* \"s.privileged=1\" will do the same\n" -"\n" -"The columns are:\n" +" * \"user.blah=abc\" will list all containers with the \"blah\" user " +"property set to \"abc\".\n" +" * \"u.blah=abc\" will do the same\n" +" * \"security.privileged=1\" will list all privileged containers\n" +" * \"s.privileged=1\" will do the same\n" +"* A regular expression matching a configuration item or its value. (e.g. " +"volatile.eth0.hwaddr=00:16:3e:.*)\n" +"\n" +"The -c option takes a comma separated list of arguments that control\n" +"which container attributes to output when displaying in table format.\n" +"Column arguments are either pre-defined shorthand chars (see below),\n" +"or (extended) config keys. Commas between consecutive shorthand chars\n" +"are optional.\n" +"\n" +"Pre-defined shorthand chars:\n" +"* 4 - IPv4 address\n" +"* 6 - IPv6 address\n" +"* a - architecture\n" +"* c - creation date\n" +"* l - last used date\n" "* n - name\n" +"* p - pid of container init process\n" +"* P - profiles\n" "* s - state\n" -"* 4 - IP4\n" -"* 6 - IP6\n" -"* e - ephemeral\n" -"* S - snapshots\n" -"* p - pid of container init process" +"* S - number of snapshots\n" +"* t - type (persistent or ephemeral)\n" +"\n" +"Config key syntax: key[:name][:maxWidth]\n" +"* key - The (extended) config key to display\n" +"* name - Name to display in the column header, defaults to the key\n" +" if not specified or if empty (to allow defining maxWidth\n" +" without a custom name, e.g. user.key::0)\n" +"* maxWidth - Max width of the column (longer results are truncated).\n" +" -1 == unlimited\n" +" 0 == width of column header\n" +" >0 == max width in chars\n" +" Default is -1 (unlimited)\n" +"\n" +"Default column layout: ns46tS\n" +"Fast column layout: nsacPt\n" +"\n" +"Example: lxc list -c n,volatile.base_image:\"BASE IMAGE\":0,s46,volatile." +"eth0.hwaddr:MAC\n" msgstr "" -#: lxc/info.go:151 +#: lxc/info.go:225 msgid "Log:" msgstr "" -#: lxc/image.go:115 +#: lxc/image.go:165 msgid "Make image public" msgstr "" -#: lxc/publish.go:29 +#: lxc/publish.go:32 msgid "Make the image public" msgstr "" -#: lxc/profile.go:46 +#: lxc/profile.go:48 msgid "" "Manage configuration profiles.\n" "\n" @@ -493,61 +592,78 @@ "specified remote.\n" "lxc profile get Get profile configuration.\n" "lxc profile set Set profile configuration.\n" +"lxc profile unset Unset profile configuration.\n" "lxc profile delete Delete a profile.\n" "lxc profile edit \n" " Edit profile, either by launching external editor or reading STDIN.\n" " Example: lxc profile edit # launch editor\n" " cat profile.yml | lxc profile edit # read from " "profile.yml\n" -"lxc profile apply \n" -" Apply a comma-separated list of profiles to a container, in order.\n" +"\n" +"lxc profile assign \n" +" Assign a comma-separated list of profiles to a container, in order.\n" " All profiles passed in this call (and only those) will be applied\n" -" to the specified container.\n" -" Example: lxc profile apply foo default,bar # Apply default and bar\n" -" lxc profile apply foo default # Only default is active\n" -" lxc profile apply '' # no profiles are applied anymore\n" -" lxc profile apply bar,default # Apply default second now\n" +" to the specified container, i.e. it sets the list of profiles exactly " +"to\n" +" those specified in this command. To add/remove a particular profile from " +"a\n" +" container, use {add|remove} below.\n" +" Example: lxc profile assign foo default,bar # Apply default and bar\n" +" lxc profile assign foo default # Only default is active\n" +" lxc profile assign '' # no profiles are applied anymore\n" +" lxc profile assign bar,default # Apply default second now\n" +"lxc profile add # add a profile to a container\n" +"lxc profile remove # remove the profile from a " +"container\n" "\n" "Devices:\n" -"lxc profile device list List devices in the given " -"profile.\n" -"lxc profile device show Show full device details in " -"the given profile.\n" -"lxc profile device remove Remove a device from a " -"profile.\n" +"lxc profile device list List " +"devices in the given profile.\n" +"lxc profile device show Show " +"full device details in the given profile.\n" +"lxc profile device remove Remove a " +"device from a profile.\n" +"lxc profile device get <[remote:]profile> Get a " +"device property.\n" +"lxc profile device set <[remote:]profile> Set a " +"device property.\n" +"lxc profile device unset <[remote:]profile> Unset a " +"device property.\n" "lxc profile device add " "[key=value]...\n" " Add a profile device, such as a disk or a nic, to the containers\n" " using the specified profile." msgstr "" -#: lxc/config.go:56 +#: lxc/config.go:58 msgid "" "Manage configuration.\n" "\n" "lxc config device add <[remote:]container> [key=value]... " "Add a device to a container.\n" -"lxc config device list [remote:] " +"lxc config device get <[remote:]container> " +"Get a device property.\n" +"lxc config device set <[remote:]container> " +"Set a device property.\n" +"lxc config device unset <[remote:]container> " +"Unset a device property.\n" +"lxc config device list <[remote:]container> " "List devices for container.\n" -"lxc config device show [remote:] " +"lxc config device show <[remote:]container> " "Show full device details for container.\n" -"lxc config device remove [remote:] " +"lxc config device remove <[remote:]container> " "Remove device from container.\n" "\n" -"lxc config get [remote:] key " -"Get configuration key.\n" -"lxc config set [remote:] key value " -"Set container configuration key.\n" -"lxc config unset [remote:] key " -"Unset container configuration key.\n" -"lxc config set key value " -"Set server configuration key.\n" -"lxc config unset key " -"Unset server configuration key.\n" -"lxc config show [--expanded] [remote:] " -"Show container configuration.\n" +"lxc config get [remote:][container] " +"Get container or server configuration key.\n" +"lxc config set [remote:][container] " +"Set container or server configuration key.\n" +"lxc config unset [remote:][container] " +"Unset container or server configuration key.\n" +"lxc config show [remote:][container] [--expanded] " +"Show container or server configuration.\n" "lxc config edit [remote:][container] " -"Edit container configuration in external editor.\n" +"Edit container or server configuration in external editor.\n" " Edit configuration, either by launching external editor or reading " "STDIN.\n" " Example: lxc config edit # launch editor\n" @@ -563,7 +679,7 @@ "\n" "Examples:\n" "To mount host's /share/c1 onto /opt in the container:\n" -" lxc config device add [remote:]container1 disk source=/" +" lxc config device add [remote:]container1 disk source=/" "share/c1 path=opt\n" "\n" "To set an lxc config value:\n" @@ -578,7 +694,7 @@ " lxc config set core.trust_password blah" msgstr "" -#: lxc/file.go:33 +#: lxc/file.go:32 msgid "" "Manage files on a container.\n" "\n" @@ -591,33 +707,28 @@ "case of edit are /" msgstr "" -#: lxc/remote.go:33 +#: lxc/remote.go:39 msgid "" "Manage remote LXD servers.\n" "\n" -"lxc remote add [--accept-certificate] [--password=PASSWORD] [--" -"public] Add the remote at .\n" -"lxc remote remove " -" Remove " -"the remote .\n" -"lxc remote " -"list " +"lxc remote add [--accept-certificate] [--password=PASSWORD]\n" +" [--public] [--protocol=PROTOCOL] " +"Add the remote at .\n" +"lxc remote remove " +"Remove the remote .\n" +"lxc remote list " "List all remotes.\n" -"lxc remote rename " -" Rename remote " -" to .\n" -"lxc remote set-url " -" Update 's " -"url to .\n" -"lxc remote set-default " -" Set the " -"default remote.\n" -"lxc remote get-" -"default " +"lxc remote rename " +"Rename remote to .\n" +"lxc remote set-url " +"Update 's url to .\n" +"lxc remote set-default " +"Set the default remote.\n" +"lxc remote get-default " "Print the default remote." msgstr "" -#: lxc/image.go:38 +#: lxc/image.go:95 msgid "" "Manipulate container images.\n" "\n" @@ -637,24 +748,36 @@ "\n" "\n" "lxc image import [rootfs tarball|URL] [remote:] [--public] [--" -"created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] " -"[prop=value]\n" +"created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [--" +"alias=ALIAS].. [prop=value]\n" " Import an image tarball (or tarballs) into the LXD image store.\n" "\n" "lxc image copy [remote:] : [--alias=ALIAS].. [--copy-aliases] " -"[--public]\n" +"[--public] [--auto-update]\n" " Copy an image from one LXD daemon to another over the network.\n" "\n" -"lxc image delete [remote:]\n" -" Delete an image from the LXD image store.\n" +" The auto-update flag instructs the server to keep this image up to\n" +" date. It requires the source to be an alias and for it to be public.\n" +"\n" +"lxc image delete [remote:] [remote:][...]\n" +" Delete one or more images from the LXD image store.\n" "\n" -"lxc image export [remote:]\n" +"lxc image export [remote:] [target]\n" " Export an image from the LXD image store into a distributable tarball.\n" "\n" +" The output target is optional and defaults to the working directory.\n" +" The target may be an existing directory, file name, or \"-\" to specify\n" +" stdout. The target MUST be a directory when exporting a split image.\n" +" If the target is a directory, the image's name (each part's name for\n" +" split images) as found in the database will be used for the exported\n" +" image. If the target is a file (not a directory and not stdout), then\n" +" the appropriate extension will be appended to the provided file name\n" +" based on the algorithm used to compress the image. \n" +"\n" "lxc image info [remote:]\n" " Print everything LXD knows about a given image.\n" "\n" -"lxc image list [remote:] [filter]\n" +"lxc image list [remote:] [filter] [--format table|json]\n" " List images in the LXD image store. Filters may be of the\n" " = form for property based filtering, or part of the image\n" " hash or part of the image alias name.\n" @@ -673,15 +796,24 @@ "lxc image alias delete [remote:]\n" " Delete an alias.\n" "\n" -"lxc image alias list [remote:]\n" -" List the aliases.\n" +"lxc image alias list [remote:] [filter]\n" +" List the aliases. Filters may be part of the image hash or part of the " +"image alias name.\n" +msgstr "" + +#: lxc/info.go:147 +msgid "Memory (current)" msgstr "" -#: lxc/help.go:86 +#: lxc/info.go:151 +msgid "Memory (peak)" +msgstr "" + +#: lxc/help.go:87 msgid "Missing summary." msgstr "Sommaire manquant." -#: lxc/monitor.go:20 +#: lxc/monitor.go:41 msgid "" "Monitor activity on the LXD server.\n" "\n" @@ -696,7 +828,7 @@ "lxc monitor --type=logging" msgstr "" -#: lxc/file.go:169 +#: lxc/file.go:183 msgid "More than one file to download, but target is not a directory" msgstr "" "Plusieurs fichiers à télécharger mais la destination n'est pas un dossier" @@ -713,73 +845,106 @@ " Rename a local container.\n" msgstr "" -#: lxc/list.go:224 lxc/remote.go:271 +#: lxc/action.go:69 +msgid "Must supply container name for: " +msgstr "" + +#: lxc/list.go:428 lxc/remote.go:376 msgid "NAME" msgstr "" -#: lxc/list.go:293 lxc/remote.go:257 +#: lxc/remote.go:350 lxc/remote.go:355 msgid "NO" msgstr "" -#: lxc/info.go:82 +#: lxc/info.go:89 #, c-format msgid "Name: %s" msgstr "" -#: lxc/image.go:117 lxc/publish.go:30 +#: lxc/image.go:168 lxc/publish.go:33 msgid "New alias to define at target" msgstr "" -#: lxc/config.go:277 +#: lxc/config.go:285 #, fuzzy msgid "No certificate provided to add" msgstr "Un certificat n'a pas été fournis" -#: lxc/config.go:300 +#: lxc/config.go:308 msgid "No fingerprint specified." msgstr "Aucune empreinte n'a été spécifié." -#: lxc/image.go:331 +#: lxc/remote.go:122 +msgid "Only https URLs are supported for simplestreams" +msgstr "" + +#: lxc/image.go:434 msgid "Only https:// is supported for remote image import." msgstr "" -#: lxc/help.go:63 lxc/main.go:132 +#: lxc/help.go:63 lxc/main.go:112 #, fuzzy msgid "Options:" msgstr "Opération %s" -#: lxc/image.go:425 +#: lxc/image.go:538 #, c-format msgid "Output is in %s" msgstr "" -#: lxc/exec.go:54 +#: lxc/exec.go:55 msgid "Override the terminal mode (auto, interactive or non-interactive)" msgstr "" -#: lxc/list.go:230 +#: lxc/list.go:572 +msgid "PERSISTENT" +msgstr "" + +#: lxc/list.go:429 msgid "PID" msgstr "" -#: lxc/image.go:522 lxc/remote.go:273 +#: lxc/list.go:430 +msgid "PROFILES" +msgstr "" + +#: lxc/remote.go:378 +msgid "PROTOCOL" +msgstr "" + +#: lxc/image.go:640 lxc/remote.go:379 msgid "PUBLIC" msgstr "" -#: lxc/help.go:69 +#: lxc/info.go:174 +msgid "Packets received" +msgstr "" + +#: lxc/info.go:175 +msgid "Packets sent" +msgstr "" + +#: lxc/help.go:70 #, fuzzy msgid "Path to an alternate client configuration directory." msgstr "Dossier de configuration alternatif." -#: lxc/help.go:70 +#: lxc/help.go:71 #, fuzzy msgid "Path to an alternate server directory." msgstr "Dossier de configuration alternatif." -#: lxc/main.go:39 -msgid "Permisson denied, are you in the lxd group?" +#: lxc/main.go:31 +msgid "Permission denied, are you in the lxd group?" msgstr "" -#: lxc/help.go:23 +#: lxc/info.go:103 +#, c-format +msgid "Pid: %d" +msgstr "" + +#: lxc/help.go:25 #, fuzzy msgid "" "Presents details on how to use LXD.\n" @@ -787,11 +952,11 @@ "lxd help [--all]" msgstr "Explique comment utiliser LXD.\n" -#: lxc/profile.go:186 +#: lxc/profile.go:219 msgid "Press enter to open the editor again" msgstr "" -#: lxc/config.go:491 lxc/config.go:556 lxc/image.go:598 +#: lxc/config.go:532 lxc/config.go:597 lxc/image.go:735 msgid "Press enter to start the editor again" msgstr "" @@ -807,57 +972,72 @@ msgid "Print verbose information." msgstr "" +#: lxc/manpage.go:18 +msgid "Prints all the subcommands help." +msgstr "" + #: lxc/version.go:18 #, fuzzy msgid "" -"Prints the version number of LXD.\n" +"Prints the version number of this client tool.\n" "\n" "lxc version" msgstr "Montre le numéro de version de LXD.\n" -#: lxc/info.go:96 -#, c-format -msgid "Processcount: %d" -msgstr "" +#: lxc/info.go:127 +#, fuzzy, c-format +msgid "Processes: %d" +msgstr "Mauvaise URL pour le conteneur %s" -#: lxc/profile.go:223 -#, c-format -msgid "Profile %s applied to %s" -msgstr "" +#: lxc/profile.go:275 +#, fuzzy, c-format +msgid "Profile %s added to %s" +msgstr "Mauvaise URL pour le conteneur %s" -#: lxc/profile.go:137 +#: lxc/profile.go:170 #, c-format msgid "Profile %s created" msgstr "" -#: lxc/profile.go:207 +#: lxc/profile.go:240 #, c-format msgid "Profile %s deleted" msgstr "" -#: lxc/init.go:134 lxc/init.go:135 lxc/launch.go:38 lxc/launch.go:39 +#: lxc/profile.go:306 +#, c-format +msgid "Profile %s removed from %s" +msgstr "" + +#: lxc/copy.go:33 lxc/copy.go:34 lxc/init.go:136 lxc/init.go:137 +#: lxc/launch.go:42 lxc/launch.go:43 msgid "Profile to apply to the new container" msgstr "" -#: lxc/info.go:93 +#: lxc/profile.go:256 +#, c-format +msgid "Profiles %s applied to %s" +msgstr "" + +#: lxc/info.go:101 #, fuzzy, c-format msgid "Profiles: %s" msgstr "Mauvaise URL pour le conteneur %s" -#: lxc/image.go:277 +#: lxc/image.go:361 msgid "Properties:" msgstr "" -#: lxc/remote.go:48 +#: lxc/remote.go:56 msgid "Public image server" msgstr "" -#: lxc/image.go:265 +#: lxc/image.go:349 #, c-format msgid "Public: %s" msgstr "" -#: lxc/publish.go:19 +#: lxc/publish.go:25 msgid "" "Publish containers as images.\n" "\n" @@ -865,11 +1045,11 @@ "value]..." msgstr "" -#: lxc/remote.go:47 +#: lxc/remote.go:54 msgid "Remote admin password" msgstr "" -#: lxc/delete.go:43 +#: lxc/delete.go:42 #, c-format msgid "Remove %s (yes/no): " msgstr "" @@ -878,31 +1058,43 @@ msgid "Require user confirmation." msgstr "" -#: lxc/init.go:244 +#: lxc/info.go:124 +msgid "Resources:" +msgstr "" + +#: lxc/init.go:247 #, c-format msgid "Retrieving image: %s" msgstr "" -#: lxc/image.go:525 +#: lxc/image.go:643 msgid "SIZE" msgstr "" -#: lxc/list.go:229 +#: lxc/list.go:431 msgid "SNAPSHOTS" msgstr "" -#: lxc/list.go:225 +#: lxc/list.go:432 msgid "STATE" msgstr "" -#: lxc/remote.go:155 +#: lxc/remote.go:380 +msgid "STATIC" +msgstr "" + +#: lxc/remote.go:227 msgid "Server certificate NACKed by user" msgstr "Le certificat serveur a été rejeté par l'utilisateur" -#: lxc/remote.go:201 +#: lxc/remote.go:289 msgid "Server doesn't trust us after adding our cert" msgstr "Identification refuse après l'ajout du certificat client" +#: lxc/remote.go:55 +msgid "Server protocol (lxd or simplestreams)" +msgstr "" + #: lxc/restore.go:21 msgid "" "Set the current state of a resource back to a snapshot.\n" @@ -917,15 +1109,15 @@ "lxc restore u1 snap0 # restore the snapshot" msgstr "" -#: lxc/file.go:45 +#: lxc/file.go:44 msgid "Set the file's gid on push" msgstr "Définit le gid lors de l'envoi" -#: lxc/file.go:46 +#: lxc/file.go:45 msgid "Set the file's perms on push" msgstr "Définit les permissions lors de l'envoi" -#: lxc/file.go:44 +#: lxc/file.go:43 msgid "Set the file's uid on push" msgstr "Définit le uid lors de l'envoi" @@ -933,76 +1125,139 @@ msgid "Show all commands (not just interesting ones)" msgstr "Affiche toutes les comandes (pas seulement les intéresantes)" -#: lxc/info.go:34 +#: lxc/help.go:67 +msgid "Show client version." +msgstr "" + +#: lxc/info.go:36 msgid "Show the container's last 100 log lines?" msgstr "" -#: lxc/image.go:262 +#: lxc/image.go:347 #, c-format msgid "Size: %.2fMB" msgstr "" -#: lxc/info.go:122 +#: lxc/info.go:194 msgid "Snapshots:" msgstr "" -#: lxc/launch.go:118 +#: lxc/image.go:371 +msgid "Source:" +msgstr "" + +#: lxc/launch.go:124 #, c-format msgid "Starting %s" msgstr "" -#: lxc/info.go:87 +#: lxc/info.go:95 #, c-format msgid "Status: %s" msgstr "" -#: lxc/delete.go:97 +#: lxc/publish.go:34 lxc/publish.go:35 +msgid "Stop the container if currently running" +msgstr "" + +#: lxc/delete.go:106 lxc/publish.go:111 msgid "Stopping container failed!" msgstr "L'arrêt du conteneur a échoué!" -#: lxc/delete.go:83 +#: lxc/action.go:45 +#, fuzzy +msgid "Store the container state (only for stop)." +msgstr "Force l'arrêt du conteneur." + +#: lxc/info.go:155 +msgid "Swap (current)" +msgstr "" + +#: lxc/info.go:159 +msgid "Swap (peak)" +msgstr "" + +#: lxc/list.go:433 +msgid "TYPE" +msgstr "" + +#: lxc/delete.go:92 msgid "The container is currently running, stop it first or pass --force." msgstr "" -#: lxc/publish.go:57 +#: lxc/publish.go:89 +msgid "" +"The container is currently running. Use --force to have it stopped and " +"restarted." +msgstr "" + +#: lxc/config.go:676 lxc/config.go:688 lxc/config.go:721 lxc/config.go:739 +#: lxc/config.go:777 lxc/config.go:795 +#, fuzzy +msgid "The device doesn't exist" +msgstr "le serveur distant %s n'existe pas" + +#: lxc/init.go:277 +#, c-format +msgid "The local image '%s' couldn't be found, trying '%s:' instead." +msgstr "" + +#: lxc/main.go:180 +msgid "The opposite of `lxc pause` is `lxc start`." +msgstr "" + +#: lxc/publish.go:62 msgid "There is no \"image name\". Did you want an alias?" msgstr "" -#: lxc/action.go:35 +#: lxc/action.go:41 msgid "Time to wait for the container before killing it." msgstr "Temps d'attente avant de tuer le conteneur." -#: lxc/image.go:266 +#: lxc/image.go:350 msgid "Timestamps:" msgstr "" -#: lxc/action.go:62 lxc/launch.go:126 +#: lxc/main.go:137 +msgid "To start your first container, try: lxc launch ubuntu:16.04" +msgstr "" + +#: lxc/image.go:421 +#, c-format +msgid "Transferring image: %d%%" +msgstr "" + +#: lxc/action.go:99 lxc/launch.go:132 #, c-format msgid "Try `lxc info --show-log %s` for more info" msgstr "" -#: lxc/info.go:89 +#: lxc/info.go:97 msgid "Type: ephemeral" msgstr "" -#: lxc/info.go:91 +#: lxc/info.go:99 msgid "Type: persistent" msgstr "" -#: lxc/image.go:526 +#: lxc/image.go:644 msgid "UPLOAD DATE" msgstr "" -#: lxc/remote.go:272 +#: lxc/remote.go:377 msgid "URL" msgstr "" -#: lxc/image.go:271 +#: lxc/remote.go:97 +msgid "Unable to read remote TLS certificate" +msgstr "" + +#: lxc/image.go:355 #, c-format msgid "Uploaded: %s" msgstr "" -#: lxc/main.go:132 +#: lxc/main.go:112 #, fuzzy, c-format msgid "Usage: %s" msgstr "" @@ -1018,7 +1273,7 @@ "Utilisation: lxc [sous commande] [options]\n" "Comande disponibles:\n" -#: lxc/delete.go:47 +#: lxc/delete.go:46 msgid "User aborted delete operation." msgstr "" @@ -1041,113 +1296,127 @@ msgid "Whether to show the expanded configuration" msgstr "" -#: lxc/list.go:291 lxc/remote.go:259 +#: lxc/remote.go:352 lxc/remote.go:357 msgid "YES" msgstr "" -#: lxc/main.go:66 +#: lxc/main.go:52 msgid "`lxc config profile` is deprecated, please use `lxc profile`" msgstr "" -#: lxc/launch.go:105 +#: lxc/launch.go:111 #, fuzzy msgid "bad number of things scanned from image, container or snapshot" msgstr "nombre de propriété invalide pour la ressource" -#: lxc/action.go:58 +#: lxc/action.go:95 msgid "bad result type from action" msgstr "mauvais type de réponse pour l'action!" -#: lxc/copy.go:78 +#: lxc/copy.go:115 msgid "can't copy to the same container name" msgstr "" -#: lxc/remote.go:247 +#: lxc/remote.go:340 msgid "can't remove the default remote" msgstr "" -#: lxc/remote.go:264 +#: lxc/remote.go:366 msgid "default" msgstr "" -#: lxc/init.go:197 lxc/init.go:202 lxc/launch.go:89 lxc/launch.go:94 +#: lxc/copy.go:131 lxc/copy.go:136 lxc/copy.go:219 lxc/copy.go:224 +#: lxc/init.go:200 lxc/init.go:205 lxc/launch.go:95 lxc/launch.go:100 #, fuzzy msgid "didn't get any affected image, container or snapshot from server" msgstr "N'a pas pû obtenir de resource du serveur" -#: lxc/main.go:25 lxc/main.go:167 +#: lxc/image.go:341 +msgid "disabled" +msgstr "" + +#: lxc/image.go:343 +msgid "enabled" +msgstr "" + +#: lxc/main.go:22 lxc/main.go:148 #, fuzzy, c-format msgid "error: %v" msgstr "erreur: %v\n" -#: lxc/help.go:40 lxc/main.go:127 +#: lxc/help.go:40 lxc/main.go:107 #, fuzzy, c-format msgid "error: unknown command: %s" msgstr "erreur: comande inconnue: %s\n" -#: lxc/launch.go:109 +#: lxc/launch.go:115 msgid "got bad version" msgstr "reçu une version invalide" -#: lxc/image.go:256 lxc/image.go:503 +#: lxc/image.go:336 lxc/image.go:620 msgid "no" msgstr "" -#: lxc/copy.go:100 +#: lxc/copy.go:158 msgid "not all the profiles from the source exist on the target" msgstr "" -#: lxc/remote.go:148 +#: lxc/remote.go:220 #, fuzzy msgid "ok (y/n)?" msgstr "ok (y/n)?" -#: lxc/main.go:274 lxc/main.go:278 +#: lxc/main.go:302 lxc/main.go:306 #, c-format msgid "processing aliases failed %s\n" msgstr "" -#: lxc/remote.go:291 +#: lxc/remote.go:402 #, c-format msgid "remote %s already exists" msgstr "le serveur distant %s existe déjà" -#: lxc/remote.go:243 lxc/remote.go:287 lxc/remote.go:317 lxc/remote.go:328 +#: lxc/remote.go:332 lxc/remote.go:394 lxc/remote.go:429 lxc/remote.go:445 #, c-format msgid "remote %s doesn't exist" msgstr "le serveur distant %s n'existe pas" -#: lxc/remote.go:227 +#: lxc/remote.go:315 #, c-format msgid "remote %s exists as <%s>" msgstr "le serveur distant %s existe en tant que <%s>" -#: lxc/info.go:131 +#: lxc/remote.go:336 lxc/remote.go:398 lxc/remote.go:433 +#, c-format +msgid "remote %s is static and cannot be modified" +msgstr "" + +#: lxc/info.go:205 msgid "stateful" msgstr "" -#: lxc/info.go:133 +#: lxc/info.go:207 msgid "stateless" msgstr "" -#: lxc/info.go:127 +#: lxc/info.go:201 #, c-format msgid "taken at %s" msgstr "" -#: lxc/exec.go:158 +#: lxc/exec.go:163 msgid "unreachable return reached" msgstr "Un retour inacessible à été atteint" -#: lxc/main.go:207 +#: lxc/main.go:234 msgid "wrong number of subcommand arguments" msgstr "nombre d'argument incorrect pour la sous-comande" -#: lxc/delete.go:46 lxc/image.go:259 lxc/image.go:507 +#: lxc/delete.go:45 lxc/image.go:338 lxc/image.go:624 msgid "yes" msgstr "" -#: lxc/copy.go:38 +#: lxc/copy.go:44 msgid "you must specify a source container name" msgstr "" @@ -1155,10 +1424,6 @@ #~ msgid "Bad image property: %s" #~ msgstr "(Image invalide: %s\n" -#~ msgid "Could not create server cert dir" -#~ msgstr "" -#~ "Le dossier de stockage des certificats serveurs n'a pas pû être créé" - #, fuzzy #~ msgid "" #~ "Create a read-only snapshot of a container.\n" diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/po/ja.po juju-core-2.0.0/src/github.com/lxc/lxd/po/ja.po --- juju-core-2.0~beta15/src/github.com/lxc/lxd/po/ja.po 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/po/ja.po 2016-10-13 14:31:53.000000000 +0000 @@ -7,8 +7,8 @@ msgstr "" "Project-Id-Version: LXD\n" "Report-Msgid-Bugs-To: lxc-devel@lists.linuxcontainers.org\n" -"POT-Creation-Date: 2016-02-10 22:15-0500\n" -"PO-Revision-Date: 2015-03-13 23:44+0900\n" +"POT-Creation-Date: 2016-08-30 17:59-0400\n" +"PO-Revision-Date: 2016-09-01 21:17+0900\n" "Last-Translator: KATOH Yasufumi \n" "Language-Team: Japanese \n" "Language: \n" @@ -16,7 +16,19 @@ "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -#: lxc/config.go:36 +#: lxc/info.go:140 +msgid " Disk usage:" +msgstr " ディスク使用é‡:" + +#: lxc/info.go:163 +msgid " Memory usage:" +msgstr " メモリ消費é‡:" + +#: lxc/info.go:180 +msgid " Network usage:" +msgstr " ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ä½¿ç”¨çŠ¶æ³:" + +#: lxc/config.go:37 msgid "" "### This is a yaml representation of the configuration.\n" "### Any line starting with a '# will be ignored.\n" @@ -37,7 +49,7 @@ "### Note that the name is shown but cannot be changed" msgstr "" -#: lxc/image.go:29 +#: lxc/image.go:85 msgid "" "### This is a yaml representation of the image properties.\n" "### Any line starting with a '# will be ignored.\n" @@ -47,7 +59,7 @@ "### description: My custom image" msgstr "" -#: lxc/profile.go:26 +#: lxc/profile.go:27 msgid "" "### This is a yaml representation of the profile.\n" "### Any line starting with a '# will be ignored.\n" @@ -62,144 +74,174 @@ "### devices:\n" "### eth0:\n" "### nictype: bridged\n" -"### parent: lxcbr0\n" +"### parent: lxdbr0\n" "### type: nic\n" "###\n" "### Note that the name is shown but cannot be changed" msgstr "" -#: lxc/image.go:500 +#: lxc/image.go:617 #, c-format msgid "%s (%d more)" msgstr "" #: lxc/snapshot.go:61 -#, fuzzy msgid "'/' not allowed in snapshot name" -msgstr "'/' ã¯ã‚¹ãƒŠãƒƒãƒ—ショットã®åå‰ã«ã¯ä½¿ç”¨ã§ãã¾ã›ã‚“。\n" +msgstr "'/' ã¯ã‚¹ãƒŠãƒƒãƒ—ショットã®åå‰ã«ã¯ä½¿ç”¨ã§ãã¾ã›ã‚“。" -#: lxc/info.go:109 lxc/profile.go:221 +#: lxc/profile.go:254 msgid "(none)" msgstr "" -#: lxc/image.go:520 lxc/image.go:542 +#: lxc/image.go:638 lxc/image.go:680 msgid "ALIAS" msgstr "" -#: lxc/image.go:524 +#: lxc/image.go:642 msgid "ARCH" msgstr "" -#: lxc/remote.go:46 -msgid "Accept certificate" +#: lxc/list.go:425 +msgid "ARCHITECTURE" msgstr "" -#: lxc/remote.go:181 +#: lxc/remote.go:53 +msgid "Accept certificate" +msgstr "証明書をå—ã‘入れã¾ã™" + +#: lxc/remote.go:269 #, c-format msgid "Admin password for %s: " msgstr "%s ã®ç®¡ç†è€…パスワード: " -#: lxc/image.go:281 -#, fuzzy +#: lxc/image.go:365 msgid "Aliases:" -msgstr "エイリアス:\n" +msgstr "エイリアス:" -#: lxc/exec.go:53 +#: lxc/exec.go:54 msgid "An environment variable of the form HOME=/home/foo" -msgstr "" +msgstr "環境変数を HOME=/home/foo ã®å½¢å¼ã§æŒ‡å®šã—ã¾ã™" -#: lxc/image.go:264 +#: lxc/image.go:348 lxc/info.go:90 #, c-format msgid "Architecture: %s" -msgstr "" +msgstr "アーキテクãƒãƒ£: %s" + +#: lxc/image.go:369 +#, c-format +msgid "Auto update: %s" +msgstr "自動更新: %s" #: lxc/help.go:49 msgid "Available commands:" -msgstr "" +msgstr "使用å¯èƒ½ãªã‚³ãƒžãƒ³ãƒ‰:" + +#: lxc/info.go:172 +msgid "Bytes received" +msgstr "å—ä¿¡ãƒã‚¤ãƒˆæ•°" -#: lxc/config.go:264 +#: lxc/info.go:173 +msgid "Bytes sent" +msgstr "é€ä¿¡ãƒã‚¤ãƒˆæ•°" + +#: lxc/config.go:274 msgid "COMMON NAME" msgstr "" -#: lxc/config.go:111 +#: lxc/list.go:426 +msgid "CREATED AT" +msgstr "" + +#: lxc/config.go:114 #, c-format msgid "Can't read from stdin: %s" -msgstr "" +msgstr "標準入力ã‹ã‚‰èª­ã¿è¾¼ã‚ã¾ã›ã‚“: %s" -#: lxc/config.go:124 lxc/config.go:157 lxc/config.go:179 +#: lxc/config.go:127 lxc/config.go:160 lxc/config.go:182 #, c-format msgid "Can't unset key '%s', it's not currently set." -msgstr "" +msgstr "キー '%s' ãŒæŒ‡å®šã•ã‚Œã¦ã„ãªã„ã®ã§å‰Šé™¤ã§ãã¾ã›ã‚“。" -#: lxc/profile.go:329 +#: lxc/profile.go:420 msgid "Cannot provide container name to list" -msgstr "" +msgstr "コンテナåã‚’å–å¾—ã§ãã¾ã›ã‚“" -#: lxc/remote.go:147 -#, fuzzy, c-format +#: lxc/remote.go:219 +#, c-format msgid "Certificate fingerprint: %x" -msgstr "証明書ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リント: % x\n" +msgstr "証明書ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リント: %x" -#: lxc/action.go:27 -#, fuzzy, c-format +#: lxc/action.go:33 +#, c-format msgid "" "Changes state of one or more containers to %s.\n" "\n" -"lxc %s [...]" -msgstr "コンテナã®çŠ¶æ…‹ã‚’ %s ã«å¤‰æ›´ã—ã¾ã™ã€‚\n" +"lxc %s [...]%s" +msgstr "" +"1ã¤ã¾ãŸã¯è¤‡æ•°ã®ã‚³ãƒ³ãƒ†ãƒŠã®çŠ¶æ…‹ã‚’ %s ã«å¤‰æ›´ã—ã¾ã™ã€‚\n" +"\n" +"lxc %s [...]%s" -#: lxc/remote.go:204 +#: lxc/remote.go:292 msgid "Client certificate stored at server: " msgstr "クライアント証明書ãŒã‚µãƒ¼ãƒã«æ ¼ç´ã•ã‚Œã¾ã—ãŸ: " -#: lxc/list.go:80 +#: lxc/list.go:121 lxc/list.go:122 msgid "Columns" -msgstr "" +msgstr "カラムレイアウト" -#: lxc/init.go:132 lxc/init.go:133 lxc/launch.go:36 lxc/launch.go:37 +#: lxc/copy.go:31 lxc/copy.go:32 lxc/init.go:134 lxc/init.go:135 +#: lxc/launch.go:40 lxc/launch.go:41 msgid "Config key/value to apply to the new container" -msgstr "" +msgstr "æ–°ã—ã„コンテナã«é©ç”¨ã™ã‚‹ã‚­ãƒ¼/値ã®è¨­å®š" -#: lxc/config.go:490 lxc/config.go:555 lxc/image.go:597 lxc/profile.go:185 -#, fuzzy, c-format +#: lxc/config.go:531 lxc/config.go:596 lxc/image.go:734 lxc/profile.go:218 +#, c-format msgid "Config parsing error: %s" -msgstr "エラー: %v\n" +msgstr "設定ã®æ§‹æ–‡ã‚¨ãƒ©ãƒ¼: %s" -#: lxc/main.go:37 +#: lxc/main.go:29 msgid "Connection refused; is LXD running?" -msgstr "" +msgstr "接続ãŒæ‹’å¦ã•ã‚Œã¾ã—ãŸã€‚LXDãŒå®Ÿè¡Œã•ã‚Œã¦ã„ã¾ã™ã‹?" -#: lxc/publish.go:54 -#, fuzzy +#: lxc/publish.go:59 msgid "Container name is mandatory" -msgstr "コンテナãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" +msgstr "コンテナåを指定ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™" -#: lxc/init.go:206 -#, fuzzy, c-format +#: lxc/copy.go:140 lxc/copy.go:228 lxc/init.go:210 +#, c-format msgid "Container name is: %s" -msgstr "コンテナãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" +msgstr "コンテナå: %s" -#: lxc/publish.go:81 lxc/publish.go:101 -#, fuzzy, c-format +#: lxc/publish.go:141 lxc/publish.go:156 +#, c-format msgid "Container published with fingerprint: %s" -msgstr "証明書ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リント: % x\n" +msgstr "コンテナã¯ä»¥ä¸‹ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リント㧠publish ã•ã‚Œã¾ã™: %s" -#: lxc/image.go:116 +#: lxc/image.go:166 msgid "Copy aliases from source" -msgstr "" +msgstr "ソースã‹ã‚‰ã‚¨ã‚¤ãƒªã‚¢ã‚¹ã‚’コピーã—ã¾ã—ãŸ" -#: lxc/copy.go:22 +#: lxc/copy.go:24 msgid "" "Copy containers within or in between lxd instances.\n" "\n" -"lxc copy [remote:] [remote:] [--" -"ephemeral|e]" +"lxc copy [remote:] [[remote:]] [--" +"ephemeral|e] [--profile|-p ...] [--config|-c ...]" msgstr "" +"LXDインスタンス内もã—ãã¯LXDインスタンス間ã§ã‚³ãƒ³ãƒ†ãƒŠã‚’コピーã—ã¾ã™ã€‚\n" +"\n" +"lxc copy [remote:] [remote:]\n" +"[--ephemeral|e] [--profile|-p ...] [--config|-c ...]" -#: lxc/image.go:211 +#: lxc/image.go:280 #, c-format msgid "Copying the image: %s" -msgstr "" +msgstr "イメージã®ã‚³ãƒ”ー中: %s" + +#: lxc/remote.go:234 +msgid "Could not create server cert dir" +msgstr "サーãƒè¨¼æ˜Žæ›¸æ ¼ç´ç”¨ã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’作æˆã§ãã¾ã›ã‚“。" #: lxc/snapshot.go:21 msgid "" @@ -220,22 +262,34 @@ "Example:\n" "lxc snapshot u1 snap0" msgstr "" +"コンテナã®èª­ã¿å–り専用ã®ã‚¹ãƒŠãƒƒãƒ—ショットを作æˆã—ã¾ã™ã€‚\n" +"\n" +"lxc snapshot [remote:] [--stateful]\n" +"\n" +"コンテナã®ã‚¹ãƒŠãƒƒãƒ—ショットを作æˆã—ã¾ã™ (オプションã§ã‚³ãƒ³ãƒ†ãƒŠã®ãƒ¡ãƒ¢ãƒªçŠ¶æ…‹ã‚’\n" +"å«ã‚ã¦)。--statefulãŒæŒ‡å®šã•ã‚ŒãŸå ´åˆã€LXDã¯ã‚³ãƒ³ãƒ†ãƒŠãƒ—ロセスã®ãƒ¡ãƒ¢ãƒªçŠ¶æ…‹ã€TCP\n" +"接続ãªã©ã®å®Ÿè¡ŒçŠ¶æ…‹ã‚’ã€ã‚ã¨ã§lxc restoreを使ã£ã¦ãƒªã‚¹ãƒˆã‚¢ã§ãるよã†ã«ã€ã‚³ãƒ³ãƒ†\n" +"ナã®ãƒã‚§ãƒƒã‚¯ãƒã‚¤ãƒ³ãƒˆã‚’å–å¾—ã—よã†ã¨ã—ã¾ã™ (ã—ã‹ã—ã€ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã‚¦ã‚£ãƒ³ãƒ‰ã‚¦ãŒ\n" +"expireã—ãŸã‚ã¨ã®TCP接続ã®ã‚ˆã†ã«æ­£å¸¸ã«ãƒªã‚¹ãƒˆã‚¢ã§ããªã„ã‚‚ã®ã‚‚ã‚ã‚Šã¾ã™)\n" +"\n" +"例:\n" +"lxc snapshot u1 snap0" -#: lxc/image.go:269 lxc/info.go:84 +#: lxc/image.go:353 lxc/info.go:92 #, c-format msgid "Created: %s" -msgstr "" +msgstr "作æˆæ—¥æ™‚: %s" -#: lxc/init.go:175 lxc/launch.go:112 +#: lxc/init.go:177 lxc/launch.go:118 #, c-format msgid "Creating %s" -msgstr "" +msgstr "%s を作æˆä¸­" -#: lxc/init.go:173 +#: lxc/init.go:175 msgid "Creating the container" -msgstr "" +msgstr "コンテナを作æˆä¸­" -#: lxc/image.go:523 +#: lxc/image.go:641 lxc/image.go:682 msgid "DESCRIPTION" msgstr "" @@ -249,139 +303,163 @@ "Destroy containers or snapshots with any attached data (configuration, " "snapshots, ...)." msgstr "" +"コンテナもã—ãã¯ã‚³ãƒ³ãƒ†ãƒŠã®ã‚¹ãƒŠãƒƒãƒ—ショットを消去ã—ã¾ã™ã€‚\n" +"\n" +"lxc delete [remote:][/] [remote:]" +"[[]...]\n" +"\n" +"付属ã™ã‚‹ãƒ‡ãƒ¼ã‚¿ (設定ã€ã‚¹ãƒŠãƒƒãƒ—ショットã€...) ã¨ä¸€ç·’ã«ã‚³ãƒ³ãƒ†ãƒŠã‚‚ã—ãã¯ã‚³ãƒ³ãƒ†\n" +"ナã®ã‚¹ãƒŠãƒƒãƒ—ショットを消去ã—ã¾ã™ã€‚" -#: lxc/config.go:603 -#, fuzzy, c-format +#: lxc/config.go:648 +#, c-format msgid "Device %s added to %s" -msgstr "デãƒã‚¤ã‚¹ %s ㌠%s ã«è¿½åŠ ã•ã‚Œã¾ã—ãŸ\n" +msgstr "デãƒã‚¤ã‚¹ %s ㌠%s ã«è¿½åŠ ã•ã‚Œã¾ã—ãŸ" -#: lxc/config.go:631 -#, fuzzy, c-format +#: lxc/config.go:835 +#, c-format msgid "Device %s removed from %s" -msgstr "デãƒã‚¤ã‚¹ %s ㌠%s ã‹ã‚‰å‰Šé™¤ã•ã‚Œã¾ã—ãŸ\n" +msgstr "デãƒã‚¤ã‚¹ %s ㌠%s ã‹ã‚‰å‰Šé™¤ã•ã‚Œã¾ã—ãŸ" -#: lxc/list.go:228 +#: lxc/list.go:570 msgid "EPHEMERAL" msgstr "" -#: lxc/config.go:266 +#: lxc/config.go:276 msgid "EXPIRY DATE" msgstr "" -#: lxc/main.go:55 +#: lxc/main.go:41 msgid "Enables debug mode." msgstr "デãƒãƒƒã‚°ãƒ¢ãƒ¼ãƒ‰ã‚’有効ã«ã—ã¾ã™ã€‚" -#: lxc/main.go:54 +#: lxc/main.go:40 msgid "Enables verbose mode." msgstr "詳細モードを有効ã«ã—ã¾ã™ã€‚" -#: lxc/help.go:68 +#: lxc/help.go:69 msgid "Environment:" -msgstr "" +msgstr "環境変数:" -#: lxc/copy.go:29 lxc/copy.go:30 lxc/init.go:136 lxc/init.go:137 -#: lxc/launch.go:40 lxc/launch.go:41 +#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:138 lxc/init.go:139 +#: lxc/launch.go:44 lxc/launch.go:45 msgid "Ephemeral container" -msgstr "" +msgstr "Ephemeral コンテナ" #: lxc/monitor.go:56 msgid "Event type to listen for" -msgstr "" +msgstr "Listenã™ã‚‹ã‚¤ãƒ™ãƒ³ãƒˆã‚¿ã‚¤ãƒ—" -#: lxc/exec.go:27 -#, fuzzy +#: lxc/exec.go:45 msgid "" "Execute the specified command in a container.\n" "\n" "lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env " -"EDITOR=/usr/bin/vim]... " -msgstr "コンテナã§æŒ‡å®šã—ãŸã‚³ãƒžãƒ³ãƒ‰ã‚’実行ã—ã¾ã™ã€‚\n" +"EDITOR=/usr/bin/vim]... \n" +"\n" +"Mode defaults to non-interactive, interactive mode is selected if both stdin " +"AND stdout are terminals (stderr is ignored)." +msgstr "" +"指定ã—ãŸã‚³ãƒžãƒ³ãƒ‰ã‚’コンテナ内ã§å®Ÿè¡Œã—ã¾ã™ã€‚\n" +"\n" +"lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env " +"EDITOR=/usr/bin/vim]... \n" +"\n" +"デフォルトã®ãƒ¢ãƒ¼ãƒ‰ã¯ non-interactive ã§ã™ã€‚ã‚‚ã—標準入出力ãŒä¸¡æ–¹ã¨ã‚‚ターミナ\n" +"ルã®å ´åˆã¯ interactive モードãŒé¸æŠžã•ã‚Œã¾ã™ (標準エラー出力ã¯ç„¡è¦–ã•ã‚Œã¾ã™)。" -#: lxc/image.go:273 +#: lxc/image.go:357 #, c-format msgid "Expires: %s" -msgstr "" +msgstr "失効日時: %s" -#: lxc/image.go:275 +#: lxc/image.go:359 msgid "Expires: never" -msgstr "" +msgstr "失効日時: 失効ã—ãªã„" -#: lxc/config.go:263 lxc/image.go:521 lxc/image.go:543 +#: lxc/config.go:273 lxc/image.go:639 lxc/image.go:681 msgid "FINGERPRINT" msgstr "" -#: lxc/image.go:255 -#, fuzzy, c-format +#: lxc/list.go:124 +msgid "Fast mode (same as --columns=nsacPt" +msgstr "Fast モード (--columns=nsacPt ã¨åŒã˜)" + +#: lxc/image.go:346 +#, c-format msgid "Fingerprint: %s" -msgstr "証明書ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リント: % x\n" +msgstr "証明書ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リント: %s" -#: lxc/finger.go:17 -#, fuzzy +#: lxc/finger.go:15 msgid "" "Fingers the LXD instance to check if it is up and working.\n" "\n" "lxc finger " -msgstr "LXDインスタンスãŒç¨¼åƒä¸­ã‹ã‚’確èªã—ã¾ã™ã€‚\n" - -#: lxc/main.go:156 -msgid "For example: 'lxd-images import ubuntu --alias ubuntu'." msgstr "" +"LXDインスタンスãŒç¨¼åƒä¸­ã‹ã‚’確èªã—ã¾ã™ã€‚\n" +"\n" +"lxc finger " -#: lxc/action.go:36 +#: lxc/action.go:42 lxc/action.go:43 msgid "Force the container to shutdown." msgstr "コンテナを強制シャットダウンã—ã¾ã™ã€‚" #: lxc/delete.go:34 lxc/delete.go:35 msgid "Force the removal of stopped containers." -msgstr "" +msgstr "åœæ­¢ã—ãŸã‚³ãƒ³ãƒ†ãƒŠã‚’強制的ã«å‰Šé™¤ã—ã¾ã™ã€‚" -#: lxc/main.go:56 +#: lxc/main.go:42 msgid "Force using the local unix socket." +msgstr "強制的ã«ãƒ­ãƒ¼ã‚«ãƒ«ã®UNIXソケットを使ã„ã¾ã™ã€‚" + +#: lxc/image.go:169 lxc/list.go:123 +msgid "Format" msgstr "" -#: lxc/main.go:148 -#, fuzzy +#: lxc/remote.go:67 msgid "Generating a client certificate. This may take a minute..." -msgstr "クライアント証明書を生æˆã—ã¾ã™ã€‚1分ãらã„ã‹ã‹ã‚Šã¾ã™â€¦\n" +msgstr "クライアント証明書を生æˆã—ã¾ã™ã€‚1分ãらã„ã‹ã‹ã‚Šã¾ã™..." -#: lxc/list.go:226 +#: lxc/list.go:423 msgid "IPV4" msgstr "" -#: lxc/list.go:227 +#: lxc/list.go:424 msgid "IPV6" msgstr "" -#: lxc/config.go:265 +#: lxc/config.go:275 msgid "ISSUE DATE" msgstr "" -#: lxc/main.go:155 +#: lxc/main.go:136 msgid "" -"If this is your first run, you will need to import images using the 'lxd-" -"images' script." -msgstr "" +"If this is your first time using LXD, you should also run: sudo lxd init" +msgstr "åˆã‚㦠LXD を使ã†å ´åˆã€sudo lxd init ã¨å®Ÿè¡Œã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™" -#: lxc/main.go:57 +#: lxc/main.go:43 msgid "Ignore aliases when determining what command to run." -msgstr "" +msgstr "ã©ã®ã‚³ãƒžãƒ³ãƒ‰ã‚’実行ã™ã‚‹ã‹æ±ºã‚ã‚‹éš›ã«ã‚¨ã‚¤ãƒªã‚¢ã‚¹ã‚’無視ã—ã¾ã™ã€‚" -#: lxc/image.go:216 +#: lxc/action.go:46 +msgid "Ignore the container state (only for start)." +msgstr "コンテナã®çŠ¶æ…‹ã‚’無視ã—ã¾ã™ (startã®ã¿)。" + +#: lxc/image.go:285 msgid "Image copied successfully!" -msgstr "" +msgstr "イメージã®ã‚³ãƒ”ーãŒæˆåŠŸã—ã¾ã—ãŸ!" -#: lxc/image.go:339 -#, fuzzy, c-format +#: lxc/image.go:442 +#, c-format msgid "Image imported with fingerprint: %s" -msgstr "証明書ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リント: % x\n" +msgstr "イメージã¯ä»¥ä¸‹ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リントã§ã‚¤ãƒ³ãƒãƒ¼ãƒˆã•ã‚Œã¾ã—ãŸ: %s" -#: lxc/info.go:95 +#: lxc/image.go:429 #, c-format -msgid "Init: %d" -msgstr "" +msgid "Importing the image: %s" +msgstr "イメージã®ã‚¤ãƒ³ãƒãƒ¼ãƒˆä¸­: %s" -#: lxc/init.go:21 +#: lxc/init.go:73 msgid "" "Initialize a container from a particular image.\n" "\n" @@ -396,31 +474,59 @@ "Example:\n" "lxc init ubuntu u1" msgstr "" +"指定ã—ãŸã‚¤ãƒ¡ãƒ¼ã‚¸ã‹ã‚‰ã‚³ãƒ³ãƒ†ãƒŠã‚’åˆæœŸåŒ–ã—ã¾ã™ã€‚\n" +"\n" +"lxc init [remote:] [remote:][] [--ephemeral|-e] [--profile|-p " +"...] [--config|-c ...]\n" +"\n" +"指定ã—ãŸã‚¤ãƒ¡ãƒ¼ã‚¸ã¨ã‚³ãƒ³ãƒ†ãƒŠåを使ã£ã¦ã‚³ãƒ³ãƒ†ãƒŠã‚’åˆæœŸåŒ–ã—ã¾ã™ã€‚\n" +"\n" +"-p を指定ã—ãªã„å ´åˆã¯ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆã®ãƒ—ロファイルを使ã„ã¾ã™ã€‚\n" +"\"-p\" ã®ã‚ˆã†ã«å¼•æ•°ãªã—㧠-p を使ã†ã¨ãƒ—ロファイルãªã—ã¨ãªã‚Šã¾ã™ã€‚\n" +"\n" +"例:\n" +"lxc init ubuntu u1" -#: lxc/init.go:63 lxc/init.go:68 -#, fuzzy +#: lxc/remote.go:137 +#, c-format +msgid "Invalid URL scheme \"%s\" in \"%s\"" +msgstr "ä¸æ­£ãª URL スキーム \"%s\" (\"%s\" 内)" + +#: lxc/config.go:254 +msgid "Invalid certificate" +msgstr "ä¸æ­£ãªè¨¼æ˜Žæ›¸ã§ã™" + +#: lxc/init.go:30 lxc/init.go:35 msgid "Invalid configuration key" -msgstr "設定を管ç†ã—ã¾ã™ã€‚\n" +msgstr "æ­£ã—ããªã„設定項目 (key) ã§ã™" -#: lxc/file.go:181 +#: lxc/file.go:195 #, c-format msgid "Invalid source %s" msgstr "ä¸æ­£ãªã‚½ãƒ¼ã‚¹ %s" -#: lxc/file.go:58 +#: lxc/file.go:57 #, c-format msgid "Invalid target %s" msgstr "ä¸æ­£ãªé€ã‚Šå…ˆ %s" -#: lxc/info.go:97 +#: lxc/info.go:121 msgid "Ips:" -msgstr "" +msgstr "IPアドレス:" + +#: lxc/image.go:167 +msgid "Keep the image up to date after initial copy" +msgstr "最åˆã«ã‚³ãƒ”ーã—ãŸå¾Œã‚‚常ã«ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’最新ã®çŠ¶æ…‹ã«ä¿ã¤" -#: lxc/main.go:35 -msgid "LXD socket not found; is LXD running?" +#: lxc/list.go:427 +msgid "LAST USED AT" msgstr "" -#: lxc/launch.go:20 +#: lxc/main.go:27 +msgid "LXD socket not found; is LXD installed and running?" +msgstr "LXD ã®ã‚½ã‚±ãƒƒãƒˆãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。LXD ãŒå®Ÿè¡Œã•ã‚Œã¦ã„ã¾ã™ã‹?" + +#: lxc/launch.go:22 msgid "" "Launch a container from a particular image.\n" "\n" @@ -433,58 +539,160 @@ "Specifying \"-p\" with no argument will result in no profile.\n" "\n" "Example:\n" -"lxc launch ubuntu u1" +"lxc launch ubuntu:16.04 u1" msgstr "" +"指定ã—ãŸã‚¤ãƒ¡ãƒ¼ã‚¸ã‹ã‚‰ã‚³ãƒ³ãƒ†ãƒŠã‚’èµ·å‹•ã—ã¾ã™ã€‚\n" +"\n" +"lxc launch [remote:] [remote:][] [--ephemeral|-e] [--profile|-p " +"...] [--config|-c ...]\n" +"\n" +"指定ã—ãŸã‚¤ãƒ¡ãƒ¼ã‚¸ã¨åå‰ã‚’使ã£ã¦ã‚³ãƒ³ãƒ†ãƒŠã‚’èµ·å‹•ã—ã¾ã™ã€‚\n" +"\n" +"-p を指定ã—ãªã„å ´åˆã¯ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆã®ãƒ—ロファイルを使ã„ã¾ã™ã€‚\n" +"\"-p\" ã®ã‚ˆã†ã«å¼•æ•°ãªã—㧠-p を使ã†ã¨ãƒ—ロファイルãªã—ã¨ãªã‚Šã¾ã™ã€‚\n" +"\n" +"例:\n" +"lxc launch ubuntu:16.04 u1" #: lxc/info.go:25 msgid "" -"List information on containers.\n" +"List information on LXD servers and containers.\n" "\n" -"This will support remotes and images as well, but only containers for now.\n" +"For a container:\n" +" lxc info [:]container [--show-log]\n" "\n" -"lxc info [:]container [--show-log]" +"For a server:\n" +" lxc info [:]" msgstr "" +"LXD サーãƒã¨ã‚³ãƒ³ãƒ†ãƒŠã®æƒ…報を一覧表示ã—ã¾ã™ã€‚\n" +"\n" +"コンテナ情報:\n" +" lxc info [:]container [--show-log]\n" +"\n" +"サーãƒæƒ…å ±:\n" +" lxc info [:]" -#: lxc/list.go:54 +#: lxc/list.go:68 msgid "" "Lists the available resources.\n" "\n" -"lxc list [resource] [filters] -c [columns]\n" +"lxc list [resource] [filters] [--format table|json] [-c columns] [--fast]\n" "\n" "The filters are:\n" -"* A single keyword like \"web\" which will list any container with \"web\" " -"in its name.\n" +"* A single keyword like \"web\" which will list any container with a name " +"starting by \"web\".\n" +"* A regular expression on the container name. (e.g. .*web.*01$)\n" "* A key/value pair referring to a configuration item. For those, the " "namespace can be abreviated to the smallest unambiguous identifier:\n" -"* \"user.blah=abc\" will list all containers with the \"blah\" user property " -"set to \"abc\"\n" -"* \"u.blah=abc\" will do the same\n" -"* \"security.privileged=1\" will list all privileged containers\n" -"* \"s.privileged=1\" will do the same\n" -"\n" -"The columns are:\n" +" * \"user.blah=abc\" will list all containers with the \"blah\" user " +"property set to \"abc\".\n" +" * \"u.blah=abc\" will do the same\n" +" * \"security.privileged=1\" will list all privileged containers\n" +" * \"s.privileged=1\" will do the same\n" +"* A regular expression matching a configuration item or its value. (e.g. " +"volatile.eth0.hwaddr=00:16:3e:.*)\n" +"\n" +"The -c option takes a comma separated list of arguments that control\n" +"which container attributes to output when displaying in table format.\n" +"Column arguments are either pre-defined shorthand chars (see below),\n" +"or (extended) config keys. Commas between consecutive shorthand chars\n" +"are optional.\n" +"\n" +"Pre-defined shorthand chars:\n" +"* 4 - IPv4 address\n" +"* 6 - IPv6 address\n" +"* a - architecture\n" +"* c - creation date\n" +"* l - last used date\n" "* n - name\n" +"* p - pid of container init process\n" +"* P - profiles\n" "* s - state\n" -"* 4 - IP4\n" -"* 6 - IP6\n" -"* e - ephemeral\n" -"* S - snapshots\n" -"* p - pid of container init process" -msgstr "" +"* S - number of snapshots\n" +"* t - type (persistent or ephemeral)\n" +"\n" +"Config key syntax: key[:name][:maxWidth]\n" +"* key - The (extended) config key to display\n" +"* name - Name to display in the column header, defaults to the key\n" +" if not specified or if empty (to allow defining maxWidth\n" +" without a custom name, e.g. user.key::0)\n" +"* maxWidth - Max width of the column (longer results are truncated).\n" +" -1 == unlimited\n" +" 0 == width of column header\n" +" >0 == max width in chars\n" +" Default is -1 (unlimited)\n" +"\n" +"Default column layout: ns46tS\n" +"Fast column layout: nsacPt\n" +"\n" +"Example: lxc list -c n,volatile.base_image:\"BASE IMAGE\":0,s46,volatile." +"eth0.hwaddr:MAC\n" +msgstr "" +"利用å¯èƒ½ãªãƒªã‚½ãƒ¼ã‚¹ã‚’一覧表示ã—ã¾ã™ã€‚\n" +"\n" +"lxc list [resource] [filters] [--format table|json] [-c columns] [--fast]\n" +"\n" +"フィルタã®æŒ‡å®š:\n" +"* å˜ä¸€ã® \"web\" ã®ã‚ˆã†ãªã‚­ãƒ¼ãƒ¯ãƒ¼ãƒ‰ã‚’指定ã™ã‚‹ã¨ã€åå‰ãŒ \"web\" ã§ã¯ã˜ã¾ã‚‹ã‚³ãƒ³ãƒ†\n" +" ナãŒä¸€è¦§è¡¨ç¤ºã•ã‚Œã¾ã™ã€‚\n" +"* コンテナåã®æ­£è¦è¡¨ç¾ (例: .*web.*01$)\n" +"* 設定項目ã®ã‚­ãƒ¼ã¨å€¤ã€‚キーã®åå‰ç©ºé–“ã¯ä¸€æ„ã«è­˜åˆ¥ã§ãã‚‹å ´åˆã¯çŸ­ç¸®ã™ã‚‹ã“ã¨ãŒã§\n" +" ãã¾ã™:\n" +" * \"user.blah=abc\" 㯠\"blah\" ã¨ã„ㆠuser プロパティ㌠\"abc\" ã«è¨­å®šã•ã‚Œã¦ã„ã‚‹\n" +" コンテナをã™ã¹ã¦ä¸€è¦§è¡¨ç¤ºã—ã¾ã™ã€‚\n" +" * \"u.blah=abc\" ã¯ä¸Šè¨˜ã¨åŒã˜æ„味ã«ãªã‚Šã¾ã™ã€‚\n" +" * \"security.privileged=1\" ã¯ç‰¹æ¨©ã‚³ãƒ³ãƒ†ãƒŠã‚’ã™ã¹ã¦ä¸€è¦§è¡¨ç¤ºã—ã¾ã™ã€‚\n" +" * \"s.privilaged=1\" ã¯ä¸Šè¨˜ã¨åŒã˜æ„味ã«ãªã‚Šã¾ã™ã€‚\n" +" * 設定項目もã—ãã¯å€¤ã¨ãƒžãƒƒãƒã™ã‚‹æ­£è¦è¡¨ç¾\n" +" (例:volatile.eth0.hwaddr=00:16:3e:.*)\n" +"\n" +"-c オプションã«ã¯ã€è¡¨å½¢å¼ã§è¡¨ç¤ºã™ã‚‹éš›ã«ã©ã®ã‚³ãƒ³ãƒ†ãƒŠã®å±žæ€§ã‚’表示ã™ã‚‹ã‹\n" +"を指定ã™ã‚‹ã‚³ãƒ³ãƒžåŒºåˆ‡ã‚Šã®ãƒªã‚¹ãƒˆã‚’指定ã—ã¾ã™ã€‚カラムã®æŒ‡å®šã¯ã€å®šç¾©æ¸ˆã¿ã®\n" +"短縮形 (以下をå‚ç…§) ã‚‚ã—ã㯠(æ‹¡å¼µã•ã‚ŒãŸ) 設定キーã®ã©ã¡ã‚‰ã‹ã‚’指定ã—ã¾\n" +"ã™ã€‚連続ã™ã‚‹çŸ­ç¸®å½¢ã®é–“ã®ã‚³ãƒ³ãƒžã¯çœç•¥ã§ãã¾ã™ã€‚\n" +"\n" +"短縮形ã§æŒ‡å®šã§ãる文字:\n" +"* 4 - IPv4 アドレス\n" +"* 6 - IPv6 アドレス\n" +"* a - アーキテクãƒãƒ£\n" +"* c - 作æˆæ—¥\n" +"* n - åå‰\n" +"* p - コンテナ㮠init プロセス㮠pid\n" +"* P - プロファイル\n" +"* s - 状態\n" +"* S - スナップショットã®æ•°\n" +"* t - タイプ (persistent or ephemeral)\n" +"\n" +"設定キーã®æ–‡æ³•: key[:name][:maxWidth]\n" +"* key - 表示ã™ã‚‹ (æ‹¡å¼µã•ã‚ŒãŸ) 設定キー\n" +"* name - カラムã®ãƒ˜ãƒƒãƒ€ã«è¡¨ç¤ºã™ã‚‹åå‰ã€‚指定ã—ãªã„å ´åˆã€ã‚‚ã—ãã¯ç©º\n" +" ã®å ´åˆã®ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆã¯ã‚­ãƒ¼ã€‚\n" +" (åå‰ã‚’指定ã›ãšã« maxWidth を指定ã§ãã¾ã™ã€‚例: user.key::0)\n" +"* maxWidth - カラムã®æœ€å¤§å¹… (çµæžœãŒã“れより長ã„å ´åˆã¯åˆ‡ã‚Šè©°ã‚られã¾ã™)\n" +" -1 == 制é™ãªã—\n" +" 0 == カラムヘッダã®å¹…\n" +" >0 == 文字列ã®æœ€å¤§å¹…\n" +" デフォルト㯠-1 (制é™ãªã—)\n" +"\n" +"デフォルトã®ã‚«ãƒ©ãƒ ãƒ¬ã‚¤ã‚¢ã‚¦ãƒˆ: ns46tS\n" +"Fast モードã®ã‚«ãƒ©ãƒ ãƒ¬ã‚¤ã‚¢ã‚¦ãƒˆ: nsacPt\n" +"\n" +"例:\n" +"lxc list -c n,volatile.base_image:\"BASE IMAGE\":0,s46,volatile.eth0.hwaddr:MAC\n" -#: lxc/info.go:151 +#: lxc/info.go:225 msgid "Log:" -msgstr "" +msgstr "ログ:" -#: lxc/image.go:115 +#: lxc/image.go:165 msgid "Make image public" -msgstr "" +msgstr "イメージを public ã«ã™ã‚‹" -#: lxc/publish.go:29 +#: lxc/publish.go:32 msgid "Make the image public" -msgstr "" +msgstr "イメージを public ã«ã™ã‚‹" -#: lxc/profile.go:46 +#: lxc/profile.go:48 msgid "" "Manage configuration profiles.\n" "\n" @@ -495,61 +703,132 @@ "specified remote.\n" "lxc profile get Get profile configuration.\n" "lxc profile set Set profile configuration.\n" +"lxc profile unset Unset profile configuration.\n" "lxc profile delete Delete a profile.\n" "lxc profile edit \n" " Edit profile, either by launching external editor or reading STDIN.\n" " Example: lxc profile edit # launch editor\n" " cat profile.yml | lxc profile edit # read from " "profile.yml\n" -"lxc profile apply \n" -" Apply a comma-separated list of profiles to a container, in order.\n" +"\n" +"lxc profile assign \n" +" Assign a comma-separated list of profiles to a container, in order.\n" " All profiles passed in this call (and only those) will be applied\n" -" to the specified container.\n" -" Example: lxc profile apply foo default,bar # Apply default and bar\n" -" lxc profile apply foo default # Only default is active\n" -" lxc profile apply '' # no profiles are applied anymore\n" -" lxc profile apply bar,default # Apply default second now\n" +" to the specified container, i.e. it sets the list of profiles exactly " +"to\n" +" those specified in this command. To add/remove a particular profile from " +"a\n" +" container, use {add|remove} below.\n" +" Example: lxc profile assign foo default,bar # Apply default and bar\n" +" lxc profile assign foo default # Only default is active\n" +" lxc profile assign '' # no profiles are applied anymore\n" +" lxc profile assign bar,default # Apply default second now\n" +"lxc profile add # add a profile to a container\n" +"lxc profile remove # remove the profile from a " +"container\n" "\n" "Devices:\n" -"lxc profile device list List devices in the given " -"profile.\n" -"lxc profile device show Show full device details in " -"the given profile.\n" -"lxc profile device remove Remove a device from a " -"profile.\n" +"lxc profile device list List " +"devices in the given profile.\n" +"lxc profile device show Show " +"full device details in the given profile.\n" +"lxc profile device remove Remove a " +"device from a profile.\n" +"lxc profile device get <[remote:]profile> Get a " +"device property.\n" +"lxc profile device set <[remote:]profile> Set a " +"device property.\n" +"lxc profile device unset <[remote:]profile> Unset a " +"device property.\n" "lxc profile device add " "[key=value]...\n" " Add a profile device, such as a disk or a nic, to the containers\n" " using the specified profile." msgstr "" +"設定プロファイルを管ç†ã—ã¾ã™ã€‚\n" +"\n" +"lxc profile list [filters]\n" +" 利用å¯èƒ½ãªãƒ—ロファイルを一覧ã—ã¾ã™ã€‚\n" +"lxc profile show \n" +" プロファイルã®è©³ç´°ã‚’表示ã—ã¾ã™ã€‚\n" +"lxc profile create \n" +" プロファイルを作æˆã—ã¾ã™ã€‚\n" +"lxc profile copy \n" +" プロファイルを remote ã«ã‚³ãƒ”ーã—ã¾ã™ã€‚\n" +"lxc profile get \n" +" プロファイルã®è¨­å®šã‚’å–å¾—ã—ã¾ã™ã€‚\n" +"lxc profile set \n" +" プロファイルã®è¨­å®šã‚’設定ã—ã¾ã™ã€‚\n" +"lxc profile unset \n" +" プロファイルã‹ã‚‰è¨­å®šé …目を削除ã—ã¾ã™ã€‚\n" +"lxc profile delete \n" +" プロファイルを削除ã—ã¾ã™ã€‚\n" +"lxc profile edit \n" +" プロファイルを編集ã—ã¾ã™ã€‚外部エディタもã—ãã¯STDINã‹ã‚‰èª­ã¿è¾¼ã¿ã¾ã™ã€‚\n" +" 例: lxc profile edit # エディタã®èµ·å‹•\n" +" cat profile.yml | lxc profile edit # profile.yml ã‹ã‚‰èª­ã¿è¾¼ã¿\n" +"\n" +"lxc profile assign \n" +" プロファイルã®ã‚³ãƒ³ãƒžåŒºåˆ‡ã‚Šã®ãƒªã‚¹ãƒˆã‚’コンテナã«é †ç•ªã«å‰²ã‚Šå½“ã¦ã¾ã™ã€‚\n" +" ã“ã®ã‚³ãƒžãƒ³ãƒ‰ã§æŒ‡å®šã—ãŸãƒ—ロファイルã ã‘ãŒå¯¾è±¡ã®ã‚³ãƒ³ãƒ†ãƒŠã«é©ç”¨ã•ã‚Œã¾ã™ã€‚\n" +" ã¤ã¾ã‚Šã€ã‚³ãƒžãƒ³ãƒ‰ã§æŒ‡å®šã—ãŸã‚³ãƒ³ãƒ†ãƒŠã«æ­£ç¢ºã«ãƒ—ロファイルã®ãƒªã‚¹ãƒˆã‚’設定ã—ã¾ã™ã€‚\n" +" コンテナã‹ã‚‰ç‰¹å®šã®ãƒ—ロファイルを追加ã—ãŸã‚Šå‰Šé™¤ã—ãŸã‚Šã™ã‚‹ã«ã¯ã€ã“ã®\n" +" 後㮠{add|remove} を使ã„ã¾ã™ã€‚\n" +" 例: lxc profile assign foo default,bar # defaultã¨barã‚’é©ç”¨\n" +" lxc profile assign foo default # defaultã ã‘を有効化\n" +" lxc profile assign '' # 一切ã®ãƒ—ロファイルをé©ç”¨ã—ãªã„\n" +" lxc profile assign bar,default # defaultã‚’2番目ã«é©ç”¨\n" +"lxc profile add \n" +" コンテナã«ãƒ—ロファイルを追加ã—ã¾ã™ã€‚\n" +"lxc profile remove \n" +" コンテナã‹ã‚‰ãƒ—ロファイルを削除ã—ã¾ã™ã€‚\n" +"\n" +"デãƒã‚¤ã‚¹:\n" +"lxc profile device list \n" +" 指定ã—ãŸãƒ—ロファイル内ã®ãƒ‡ãƒã‚¤ã‚¹ã‚’一覧表示ã—ã¾ã™\n" +"lxc profile device show \n" +" 指定ã—ãŸãƒ—ロファイル内ã®å…¨ãƒ‡ãƒã‚¤ã‚¹ã®è©³ç´°ã‚’表示ã—ã¾ã™\n" +"lxc profile device remove \n" +" プロファイルã‹ã‚‰ãƒ‡ãƒã‚¤ã‚¹ã‚’削除ã—ã¾ã™\n" +"lxc profile device get <[remote:]profile> \n" +" デãƒã‚¤ã‚¹ãƒ—ロパティをå–å¾—ã—ã¾ã™\n" +"lxc profile device set <[remote:]profile> \n" +" デãƒã‚¤ã‚¹ãƒ—ロパティを設定ã—ã¾ã™\n" +"lxc profile device unset <[remote:]profile> \n" +" デãƒã‚¤ã‚¹ãƒ—ロパティを削除ã—ã¾ã™\n" +"lxc profile device add [key=value]...\n" +" ディスクやNICã®ã‚ˆã†ãªãƒ—ロファイルデãƒã‚¤ã‚¹ã‚’指定ã—ãŸãƒ—ロファイルを使ã£ã¦\n" +" コンテナã«è¿½åŠ ã—ã¾ã™ã€‚" -#: lxc/config.go:56 +#: lxc/config.go:58 msgid "" "Manage configuration.\n" "\n" "lxc config device add <[remote:]container> [key=value]... " "Add a device to a container.\n" -"lxc config device list [remote:] " +"lxc config device get <[remote:]container> " +"Get a device property.\n" +"lxc config device set <[remote:]container> " +"Set a device property.\n" +"lxc config device unset <[remote:]container> " +"Unset a device property.\n" +"lxc config device list <[remote:]container> " "List devices for container.\n" -"lxc config device show [remote:] " +"lxc config device show <[remote:]container> " "Show full device details for container.\n" -"lxc config device remove [remote:] " +"lxc config device remove <[remote:]container> " "Remove device from container.\n" "\n" -"lxc config get [remote:] key " -"Get configuration key.\n" -"lxc config set [remote:] key value " -"Set container configuration key.\n" -"lxc config unset [remote:] key " -"Unset container configuration key.\n" -"lxc config set key value " -"Set server configuration key.\n" -"lxc config unset key " -"Unset server configuration key.\n" -"lxc config show [--expanded] [remote:] " -"Show container configuration.\n" +"lxc config get [remote:][container] " +"Get container or server configuration key.\n" +"lxc config set [remote:][container] " +"Set container or server configuration key.\n" +"lxc config unset [remote:][container] " +"Unset container or server configuration key.\n" +"lxc config show [remote:][container] [--expanded] " +"Show container or server configuration.\n" "lxc config edit [remote:][container] " -"Edit container configuration in external editor.\n" +"Edit container or server configuration in external editor.\n" " Edit configuration, either by launching external editor or reading " "STDIN.\n" " Example: lxc config edit # launch editor\n" @@ -565,7 +844,7 @@ "\n" "Examples:\n" "To mount host's /share/c1 onto /opt in the container:\n" -" lxc config device add [remote:]container1 disk source=/" +" lxc config device add [remote:]container1 disk source=/" "share/c1 path=opt\n" "\n" "To set an lxc config value:\n" @@ -579,8 +858,59 @@ "To set the server trust password:\n" " lxc config set core.trust_password blah" msgstr "" +"設定を管ç†ã—ã¾ã™ã€‚\n" +"\n" +"lxc config device add <[remote:]container> [key=value]...\n" +" コンテナã«ãƒ‡ãƒã‚¤ã‚¹ã‚’追加ã—ã¾ã™ã€‚\n" +"lxc config device get <[remote:]container> \n" +" デãƒã‚¤ã‚¹ã®ãƒ—ロパティをå–å¾—ã—ã¾ã™ã€‚\n" +"lxc config device set <[remote:]container> \n" +" デãƒã‚¤ã‚¹ã®ãƒ—ロパティを設定ã—ã¾ã™ã€‚\n" +"lxc config device unset <[remote:]container> \n" +" デãƒã‚¤ã‚¹ã®ãƒ—ロパティを削除ã—ã¾ã™ã€‚\n" +"lxc config device list <[remote:]container>\n" +" コンテナã®ãƒ‡ãƒã‚¤ã‚¹ã‚’一覧表示ã—ã¾ã™ã€‚\n" +"lxc config device show <[remote:]container>\n" +" å…¨ã¦ã®ãƒ‡ãƒã‚¤ã‚¹ã®è©³ç´°ã‚’表示ã—ã¾ã™ã€‚\n" +"lxc config device remove <[remote:]container> \n" +" コンテナã‹ã‚‰ãƒ‡ãƒã‚¤ã‚¹ã‚’削除ã—ã¾ã™ã€‚\n" +"\n" +"lxc config get [remote:][container] \n" +" コンテナもã—ãã¯ã‚µãƒ¼ãƒã®è¨­å®šé …ç›®ã®å€¤ã‚’å–å¾—ã—ã¾ã™ã€‚\n" +"lxc config set [remote:][container] \n" +" コンテナもã—ãã¯ã‚µãƒ¼ãƒã®è¨­å®šé …ç›®ã«å€¤ã‚’設定ã—ã¾ã™ã€‚\n" +"lxc config unset [remote:][container] \n" +" コンテナもã—ãã¯ã‚µãƒ¼ãƒã®è¨­å®šé …目を削除ã—ã¾ã™ã€‚\n" +"lxc config show [remote:][container] [--expanded]\n" +" コンテナもã—ãã¯ã‚µãƒ¼ãƒã®è¨­å®šã‚’表示ã—ã¾ã™ã€‚\n" +"lxc config edit [remote:][container]\n" +" コンテナもã—ãã¯ã‚µãƒ¼ãƒã®è¨­å®šã‚’外部エディタã§ç·¨é›†ã—ã¾ã™ã€‚\n" +" 設定ã®ç·¨é›†ã¯å¤–部エディタを起動ã™ã‚‹ã‹ã€æ¨™æº–入力ã‹ã‚‰ã®èª­ã¿è¾¼ã¿ã§è¡Œã„ã¾ã™ã€‚\n" +" 例: lxc config edit # エディタã®èµ·å‹•\n" +" cat config.yml | lxc config edit # config.ymlã‹ã‚‰èª­ã¿è¾¼ã¿\n" +"\n" +"lxc config trust list [remote]\n" +" ä¿¡é ¼ã™ã‚‹è¨¼æ˜Žæ›¸ã‚’å…¨ã¦è¡¨ç¤ºã—ã¾ã™ã€‚\n" +"lxc config trust add [remote] \n" +" certfile.crt ã‚’ä¿¡é ¼ã™ã‚‹ãƒ›ã‚¹ãƒˆã«è¿½åŠ ã—ã¾ã™ã€‚\n" +"lxc config trust remove [remote] [hostname|fingerprint]\n" +" ä¿¡é ¼ã™ã‚‹ãƒ›ã‚¹ãƒˆã‹ã‚‰è¨¼æ˜Žæ›¸ã‚’消去ã—ã¾ã™ã€‚\n" +"\n" +"例:\n" +"ホスト㮠/share/c1 をコンテナ内㮠/opt ã«ãƒžã‚¦ãƒ³ãƒˆã™ã‚‹ã«ã¯:\n" +" lxc config device add [remote:]container1 disk source=/share/c1 path=opt\n" +"\n" +"lxc 設定項目ã«å€¤ã‚’設定ã™ã‚‹ã«ã¯:\n" +" lxc config set [remote:] raw.lxc 'lxc.aa_allow_incomplete = 1'\n" +"\n" +"IPv4 㨠IPv6 ã®ãƒãƒ¼ãƒˆ 8443 㧠Listen ã™ã‚‹ã«ã¯\n" +"(8443 ã¯ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆãªã®ã§çœç•¥ã§ãã¾ã™):\n" +" lxc config set core.https_address [::]:8443\n" +"\n" +"サーãƒã®ãƒ‘スワードを設定ã™ã‚‹ã«ã¯:\n" +" lxc config set core.trust_password blah" -#: lxc/file.go:33 +#: lxc/file.go:32 msgid "" "Manage files on a container.\n" "\n" @@ -592,34 +922,56 @@ " in the case of pull, in the case of push and in the " "case of edit are /" msgstr "" +"コンテナ上ã®ãƒ•ã‚¡ã‚¤ãƒ«ã‚’管ç†ã—ã¾ã™ã€‚\n" +"\n" +"lxc file pull [...] \n" +"lxc file push [--uid=UID] [--gid=GID] [--mode=MODE] [...] " +"\n" +"lxc file edit \n" +"\n" +"pull ã®å ´åˆã® ã€push ã®å ´åˆã® ã€edit ã®å ´åˆã® ã¯ã€ã„\n" +"ãšã‚Œã‚‚ / ã®å½¢å¼ã§ã™ã€‚" -#: lxc/remote.go:33 +#: lxc/remote.go:39 msgid "" "Manage remote LXD servers.\n" "\n" -"lxc remote add [--accept-certificate] [--password=PASSWORD] [--" -"public] Add the remote at .\n" -"lxc remote remove " -" Remove " -"the remote .\n" -"lxc remote " -"list " +"lxc remote add [--accept-certificate] [--password=PASSWORD]\n" +" [--public] [--protocol=PROTOCOL] " +"Add the remote at .\n" +"lxc remote remove " +"Remove the remote .\n" +"lxc remote list " "List all remotes.\n" -"lxc remote rename " -" Rename remote " -" to .\n" -"lxc remote set-url " -" Update 's " -"url to .\n" -"lxc remote set-default " -" Set the " -"default remote.\n" -"lxc remote get-" -"default " +"lxc remote rename " +"Rename remote to .\n" +"lxc remote set-url " +"Update 's url to .\n" +"lxc remote set-default " +"Set the default remote.\n" +"lxc remote get-default " "Print the default remote." msgstr "" +"リモート㮠LXD サーãƒã‚’管ç†ã—ã¾ã™ã€‚\n" +"\n" +"lxc remote add \n" +" [--accept-certificate] [--password=PASSWORD]\n" +" [--public] [--protocol=PROTOCOL]\n" +" をリモートホスト ã¨ã—ã¦è¿½åŠ ã—ã¾ã™ã€‚\n" +"lxc remote remove \n" +" リモートホスト を削除ã—ã¾ã™ã€‚\n" +"lxc remote list\n" +" 登録済ã¿ã®ãƒªãƒ¢ãƒ¼ãƒˆãƒ›ã‚¹ãƒˆã‚’å…¨ã¦ä¸€è¦§è¡¨ç¤ºã—ã¾ã™ã€‚\n" +"lxc remote rename \n" +" リモートホストã®åå‰ã‚’ ã‹ã‚‰ ã«å¤‰æ›´ã—ã¾ã™ã€‚\n" +"lxc remote set-url \n" +" ã® url ã‚’ ã«æ›´æ–°ã—ã¾ã™ã€‚\n" +"lxc remote set-default \n" +" をデフォルトã®ãƒªãƒ¢ãƒ¼ãƒˆãƒ›ã‚¹ãƒˆã«è¨­å®šã—ã¾ã™ã€‚\n" +"lxc remote get-default\n" +" デフォルトã«è¨­å®šã•ã‚Œã¦ã„るリモートホストを表示ã—ã¾ã™ã€‚" -#: lxc/image.go:38 +#: lxc/image.go:95 msgid "" "Manipulate container images.\n" "\n" @@ -639,24 +991,36 @@ "\n" "\n" "lxc image import [rootfs tarball|URL] [remote:] [--public] [--" -"created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] " -"[prop=value]\n" +"created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [--" +"alias=ALIAS].. [prop=value]\n" " Import an image tarball (or tarballs) into the LXD image store.\n" "\n" "lxc image copy [remote:] : [--alias=ALIAS].. [--copy-aliases] " -"[--public]\n" +"[--public] [--auto-update]\n" " Copy an image from one LXD daemon to another over the network.\n" "\n" -"lxc image delete [remote:]\n" -" Delete an image from the LXD image store.\n" +" The auto-update flag instructs the server to keep this image up to\n" +" date. It requires the source to be an alias and for it to be public.\n" "\n" -"lxc image export [remote:]\n" +"lxc image delete [remote:] [remote:][...]\n" +" Delete one or more images from the LXD image store.\n" +"\n" +"lxc image export [remote:] [target]\n" " Export an image from the LXD image store into a distributable tarball.\n" "\n" +" The output target is optional and defaults to the working directory.\n" +" The target may be an existing directory, file name, or \"-\" to specify\n" +" stdout. The target MUST be a directory when exporting a split image.\n" +" If the target is a directory, the image's name (each part's name for\n" +" split images) as found in the database will be used for the exported\n" +" image. If the target is a file (not a directory and not stdout), then\n" +" the appropriate extension will be appended to the provided file name\n" +" based on the algorithm used to compress the image. \n" +"\n" "lxc image info [remote:]\n" " Print everything LXD knows about a given image.\n" "\n" -"lxc image list [remote:] [filter]\n" +"lxc image list [remote:] [filter] [--format table|json]\n" " List images in the LXD image store. Filters may be of the\n" " = form for property based filtering, or part of the image\n" " hash or part of the image alias name.\n" @@ -675,15 +1039,91 @@ "lxc image alias delete [remote:]\n" " Delete an alias.\n" "\n" -"lxc image alias list [remote:]\n" -" List the aliases.\n" +"lxc image alias list [remote:] [filter]\n" +" List the aliases. Filters may be part of the image hash or part of the " +"image alias name.\n" msgstr "" +"コンテナイメージをæ“作ã—ã¾ã™ã€‚\n" +"\n" +"LXD ã§ã¯ã€ã‚³ãƒ³ãƒ†ãƒŠã¯ã‚¤ãƒ¡ãƒ¼ã‚¸ã‹ã‚‰ä½œã‚‰ã‚Œã¾ã™ã€‚ã“ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã¯ã€æ—¢å­˜ã®ã‚³ãƒ³ãƒ†ãƒŠ\n" +"やイメージサーãƒã‹ã‚‰ãƒ€ã‚¦ãƒ³ãƒ­ãƒ¼ãƒ‰ã—ãŸã‚¤ãƒ¡ãƒ¼ã‚¸ã‹ã‚‰ä½œã‚‰ã‚Œã¾ã™ã€‚\n" +"\n" +"リモートã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’使ã†å ´åˆã€LXD ã¯è‡ªå‹•çš„ã«ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’キャッシュã—ã¾ã™ã€‚ã\n" +"ã—ã¦ã€ã‚¤ãƒ¡ãƒ¼ã‚¸ã®æœŸé™ãŒåˆ‡ã‚Œã‚‹ã¨ã‚­ãƒ£ãƒƒã‚·ãƒ¥ã‚’削除ã—ã¾ã™ã€‚\n" +"\n" +"イメージ固有ã®è­˜åˆ¥å­ã¯åœ§ç¸®ã•ã‚ŒãŸ tarball (分割イメージã®å ´åˆã¯ã€ãƒ¡ã‚¿ãƒ‡ãƒ¼ã‚¿\n" +"㨠rootfs tarball ã‚’çµåˆã—ãŸã‚‚ã®) ã®ãƒãƒƒã‚·ãƒ¥ (sha-256) ã§ã™ã€‚\n" +"\n" +"イメージã¯å…¨ãƒãƒƒã‚·ãƒ¥æ–‡å­—列ã€ä¸€æ„ã«å®šã¾ã‚‹ãƒãƒƒã‚·ãƒ¥ã®çŸ­ç¸®è¡¨ç¾ã€(設定ã•ã‚Œã¦ã„\n" +"ã‚‹å ´åˆã¯) エイリアスã§å‚ç…§ã§ãã¾ã™ã€‚\n" +"\n" +"\n" +"lxc image import [rootfs tarball|URL] [remote:] [--public] [--created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [--alias=ALIAS].. [prop=value]\n" +" イメージ㮠tarball (複数もå¯èƒ½) ã‚’ LXD ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚¹ãƒˆã‚¢ã«ã‚¤ãƒ³ãƒãƒ¼ãƒˆã—ã¾\n" +" ã™ã€‚\n" +"\n" +"lxc image copy [remote:] : [--alias=ALIAS].. [--copy-aliases] [--public] [--auto-update]\n" +" ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯çµŒç”±ã§ã‚ã‚‹ LXD デーモンã‹ã‚‰ä»–ã® LXD デーモンã¸ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’\n" +" コピーã—ã¾ã™ã€‚\n" +"\n" +" auto-update フラグã¯ã€ã‚µãƒ¼ãƒãŒã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’最新ã«ä¿ã¤ã‚ˆã†ã«æŒ‡ç¤ºã—ã¾ã™ã€‚イ\n" +" メージã®ã‚½ãƒ¼ã‚¹ãŒã‚¨ã‚¤ãƒªã‚¢ã‚¹ã§ã‚ã‚Šã€public ã§ã‚ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚\n" +"\n" +"lxc image delete [remote:]\n" +" LXD ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚¹ãƒˆã‚¢ã‹ã‚‰ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’削除ã—ã¾ã™ã€‚\n" +"\n" +"lxc image export [remote:]\n" +" LXD ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚¹ãƒˆã‚¢ã‹ã‚‰é…布å¯èƒ½ãª tarball ã¨ã—ã¦ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’エクスãƒãƒ¼ãƒˆ\n" +" ã—ã¾ã™ã€‚\n" +"\n" +" 出力先ã®æŒ‡å®šã¯ã‚ªãƒ—ションã§ã€ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆã§ã¯ç¾åœ¨ã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã§ã™ã€‚\n" +" 出力先ã¯å­˜åœ¨ã™ã‚‹ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã€ãƒ•ã‚¡ã‚¤ãƒ«åã€æ¨™æº–出力を示㙠\"-\" ã®ã„\n" +" ãšã‚Œã‹ã§ã™ã€‚分割イメージをエクスãƒãƒ¼ãƒˆã™ã‚‹éš›ã¯ã€å‡ºåŠ›å…ˆã¯ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆ\n" +" リã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“。出力先ãŒãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã®å ´åˆã€ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã§\n" +" 見ã¤ã‹ã£ãŸã‚¤ãƒ¡ãƒ¼ã‚¸å (分割イメージã®éƒ¨åˆ†ãã‚Œãžã‚Œã®åå‰) をエクスãƒãƒ¼\n" +" トã™ã‚‹ã‚¤ãƒ¡ãƒ¼ã‚¸ã«ä½¿ç”¨ã—ã¾ã™ã€‚出力先ãŒãƒ•ã‚¡ã‚¤ãƒ«ã®å ´åˆ (ディレクトリã§\n" +" も標準出力ã§ã‚‚ãªã„å ´åˆ)ã€ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’圧縮ã™ã‚‹ã‚¢ãƒ«ã‚´ãƒªã‚ºãƒ ã«ã‚‚土ã¥ã„\n" +" ãŸé©åˆ‡ãªæ‹¡å¼µå­ãŒå¯¾è±¡ã®ãƒ•ã‚¡ã‚¤ãƒ«åã«è¿½åŠ ã•ã‚Œã¾ã™ã€‚\n" +"\n" +"lxc image info [remote:]\n" +" 指定ã—ãŸã‚¤ãƒ¡ãƒ¼ã‚¸ã«ã¤ã„ã¦ã®ã™ã¹ã¦ã®æƒ…報を表示ã—ã¾ã™ã€‚\n" +"\n" +"lxc image list [remote:] [filter]\n" +" LXD ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚¹ãƒˆã‚¢å†…ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’一覧表示ã—ã¾ã™ã€‚プロパティã§ãƒ•ã‚£ãƒ«ã‚¿\n" +" ã‚’è¡Œã†å ´åˆã¯ã€ãƒ•ã‚£ãƒ«ã‚¿ã¯ = ã®å½¢ã«ãªã‚Šã¾ã™ã€‚フィルタã¯ã‚¤ãƒ¡ãƒ¼\n" +" ジãƒãƒƒã‚·ãƒ¥ã®ä¸€éƒ¨ã‚„イメージエイリアスåã®ä¸€éƒ¨ã‚‚指定ã§ãã¾ã™ã€‚\n" +"\n" +"lxc image show [remote:]\n" +" ユーザãŒå¤‰æ›´ã§ãるプロパティ㮠YAML å½¢å¼ã®å‡ºåŠ›ã‚’è¡Œã„ã¾ã™ã€‚\n" +"\n" +"lxc image edit [remote:]\n" +" 外部エディタã¾ãŸã¯æ¨™æº–入力ã‹ã‚‰ã®èª­ã¿è¾¼ã¿ã«ã‚ˆã‚Šã€ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚’編集ã—ã¾ã™ã€‚\n" +" 例: lxc image edit # エディタã®èµ·å‹•\n" +" cat image.yml | lxc image edit # image.yml ã‹ã‚‰èª­ã¿è¾¼ã¿\n" +"\n" +"lxc image alias create [remote:] \n" +" 既存ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã«æ–°ãŸã«ã‚¨ã‚¤ãƒªã‚¢ã‚¹ã‚’作æˆã—ã¾ã™ã€‚\n" +"\n" +"lxc image alias delete [remote:]\n" +" エイリアスを削除ã—ã¾ã™ã€‚\n" +"\n" +"lxc image alias list [remote:] [filter]\n" +" エイリアスを一覧表示ã—ã¾ã™ã€‚イメージãƒãƒƒã‚·ãƒ¥ã®ä¸€éƒ¨ã‚„イメージã®ã‚¨ã‚¤ãƒªã‚¢ã‚¹\n" +" åã®ä¸€éƒ¨ã‚’フィルタã¨ã—ã¦æŒ‡å®šã§ãã¾ã™ã€‚\n" + +#: lxc/info.go:147 +msgid "Memory (current)" +msgstr "メモリ (ç¾åœ¨å€¤)" -#: lxc/help.go:86 +#: lxc/info.go:151 +msgid "Memory (peak)" +msgstr "メモリ (ピーク)" + +#: lxc/help.go:87 msgid "Missing summary." msgstr "サマリーã¯ã‚ã‚Šã¾ã›ã‚“。" -#: lxc/monitor.go:20 +#: lxc/monitor.go:41 msgid "" "Monitor activity on the LXD server.\n" "\n" @@ -697,8 +1137,19 @@ "Example:\n" "lxc monitor --type=logging" msgstr "" +"LXD サーãƒã®å‹•ä½œã‚’モニタリングã—ã¾ã™ã€‚\n" +"\n" +"lxc monitor [remote:] [--type=TYPE...]\n" +"\n" +"指定ã—㟠LXD サーãƒã®ãƒ¢ãƒ‹ã‚¿ãƒªãƒ³ã‚°ã‚¤ãƒ³ã‚¿ãƒ¼ãƒ•ã‚§ãƒ¼ã‚¹ã«æŽ¥ç¶šã—ã¾ã™ã€‚\n" +"\n" +"デフォルトã§ã¯å…¨ã¦ã®ã‚¿ã‚¤ãƒ—をモニタリングã—ã¾ã™ã€‚\n" +"--type ã«ã‚ˆã‚Šã€ãƒ¢ãƒ‹ã‚¿ãƒªãƒ³ã‚°ã™ã‚‹ã‚¿ã‚¤ãƒ—を指定ã§ãã¾ã™ã€‚\n" +"\n" +"例:\n" +"lxc monitor --type=logging" -#: lxc/file.go:169 +#: lxc/file.go:183 msgid "More than one file to download, but target is not a directory" msgstr "" "ダウンロード対象ã®ãƒ•ã‚¡ã‚¤ãƒ«ãŒè¤‡æ•°ã‚ã‚Šã¾ã™ãŒã€ã‚³ãƒ”ー先ãŒãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã§ã¯ã‚ã‚Šã¾" @@ -715,197 +1166,269 @@ "lxc move \n" " Rename a local container.\n" msgstr "" +"LXD ホスト内ã€ã‚‚ã—ã㯠LXD ホスト間ã§ã‚³ãƒ³ãƒ†ãƒŠã‚’移動ã—ã¾ã™ã€‚\n" +"\n" +"lxc move [remote:] [remote:]\n" +" 2 ã¤ã®ãƒ›ã‚¹ãƒˆé–“ã§ã‚³ãƒ³ãƒ†ãƒŠã‚’移動ã—ã¾ã™ã€‚コピー先ã®åå‰ãŒå…ƒã¨é•ã†å ´åˆã¯\n" +" åŒæ™‚ã«ãƒªãƒãƒ¼ãƒ ã•ã‚Œã¾ã™ã€‚\n" +"\n" +"lxc move \n" +" ローカルã®ã‚³ãƒ³ãƒ†ãƒŠã‚’リãƒãƒ¼ãƒ ã—ã¾ã™ã€‚\n" -#: lxc/list.go:224 lxc/remote.go:271 +#: lxc/action.go:69 +msgid "Must supply container name for: " +msgstr "コンテナåを指定ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™: " + +#: lxc/list.go:428 lxc/remote.go:376 msgid "NAME" msgstr "" -#: lxc/list.go:293 lxc/remote.go:257 +#: lxc/remote.go:350 lxc/remote.go:355 msgid "NO" msgstr "" -#: lxc/info.go:82 +#: lxc/info.go:89 #, c-format msgid "Name: %s" -msgstr "" +msgstr "コンテナå: %s" -#: lxc/image.go:117 lxc/publish.go:30 +#: lxc/image.go:168 lxc/publish.go:33 msgid "New alias to define at target" -msgstr "" +msgstr "æ–°ã—ã„エイリアスを定義ã™ã‚‹" -#: lxc/config.go:277 -#, fuzzy +#: lxc/config.go:285 msgid "No certificate provided to add" msgstr "追加ã™ã¹ã証明書ãŒæä¾›ã•ã‚Œã¦ã„ã¾ã›ã‚“" -#: lxc/config.go:300 +#: lxc/config.go:308 msgid "No fingerprint specified." msgstr "フィンガープリントãŒæŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“。" -#: lxc/image.go:331 +#: lxc/remote.go:122 +msgid "Only https URLs are supported for simplestreams" +msgstr "simplestreams 㯠https ã® URL ã®ã¿ã‚µãƒãƒ¼ãƒˆã—ã¾ã™" + +#: lxc/image.go:434 msgid "Only https:// is supported for remote image import." -msgstr "" +msgstr "リモートイメージã®ã‚¤ãƒ³ãƒãƒ¼ãƒˆã¯ https:// ã®ã¿ã‚’サãƒãƒ¼ãƒˆã—ã¾ã™ã€‚" -#: lxc/help.go:63 lxc/main.go:132 -#, fuzzy +#: lxc/help.go:63 lxc/main.go:112 msgid "Options:" -msgstr "æ“作 %s" +msgstr "オプション:" -#: lxc/image.go:425 +#: lxc/image.go:538 #, c-format msgid "Output is in %s" -msgstr "" +msgstr "%s ã«å‡ºåŠ›ã•ã‚Œã¾ã™" -#: lxc/exec.go:54 +#: lxc/exec.go:55 msgid "Override the terminal mode (auto, interactive or non-interactive)" +msgstr "ターミナルモードを上書ãã—ã¾ã™ (auto, interactive, non-interactive)" + +#: lxc/list.go:572 +msgid "PERSISTENT" msgstr "" -#: lxc/list.go:230 +#: lxc/list.go:429 msgid "PID" msgstr "" -#: lxc/image.go:522 lxc/remote.go:273 +#: lxc/list.go:430 +msgid "PROFILES" +msgstr "" + +#: lxc/remote.go:378 +msgid "PROTOCOL" +msgstr "" + +#: lxc/image.go:640 lxc/remote.go:379 msgid "PUBLIC" msgstr "" -#: lxc/help.go:69 -#, fuzzy -msgid "Path to an alternate client configuration directory." -msgstr "別ã®è¨­å®šãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒª" +#: lxc/info.go:174 +msgid "Packets received" +msgstr "å—信パケット" + +#: lxc/info.go:175 +msgid "Packets sent" +msgstr "é€ä¿¡ãƒ‘ケット" #: lxc/help.go:70 -#, fuzzy +msgid "Path to an alternate client configuration directory." +msgstr "別ã®ã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆç”¨è¨­å®šãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒª" + +#: lxc/help.go:71 msgid "Path to an alternate server directory." -msgstr "別ã®è¨­å®šãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒª" +msgstr "別ã®ã‚µãƒ¼ãƒç”¨è¨­å®šãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒª" -#: lxc/main.go:39 -msgid "Permisson denied, are you in the lxd group?" +#: lxc/main.go:31 +msgid "Permission denied, are you in the lxd group?" +msgstr "アクセスãŒæ‹’å¦ã•ã‚Œã¾ã—ãŸã€‚lxd グループã«æ‰€å±žã—ã¦ã„ã¾ã™ã‹?" + +#: lxc/info.go:103 +#, c-format +msgid "Pid: %d" msgstr "" -#: lxc/help.go:23 -#, fuzzy +#: lxc/help.go:25 msgid "" "Presents details on how to use LXD.\n" "\n" "lxd help [--all]" -msgstr "LXDã®ä½¿ã„æ–¹ã®è©³ç´°ã‚’表示ã—ã¾ã™ã€‚\n" +msgstr "" +"LXDã®ä½¿ã„æ–¹ã®è©³ç´°ã‚’表示ã—ã¾ã™ã€‚\n" +"\n" +"lxd help [--all]" -#: lxc/profile.go:186 +#: lxc/profile.go:219 msgid "Press enter to open the editor again" -msgstr "" +msgstr "å†åº¦ã‚¨ãƒ‡ã‚£ã‚¿ã‚’é–‹ããŸã‚ã«ã¯ Enter キーを押ã—ã¾ã™" -#: lxc/config.go:491 lxc/config.go:556 lxc/image.go:598 +#: lxc/config.go:532 lxc/config.go:597 lxc/image.go:735 msgid "Press enter to start the editor again" -msgstr "" +msgstr "å†åº¦ã‚¨ãƒ‡ã‚£ã‚¿ã‚’èµ·å‹•ã™ã‚‹ã«ã¯ Enter キーを押ã—ã¾ã™" #: lxc/help.go:65 msgid "Print debug information." -msgstr "" +msgstr "デãƒãƒƒã‚°æƒ…報を表示ã—ã¾ã™ã€‚" #: lxc/help.go:64 msgid "Print less common commands." -msgstr "" +msgstr "å…¨ã¦ã®ã‚³ãƒžãƒ³ãƒ‰ã‚’表示ã—ã¾ã™ (主ãªã‚³ãƒžãƒ³ãƒ‰ã ã‘ã§ã¯ãªã)。" #: lxc/help.go:66 msgid "Print verbose information." -msgstr "" +msgstr "詳細情報を表示ã—ã¾ã™ã€‚" + +#: lxc/manpage.go:18 +msgid "Prints all the subcommands help." +msgstr "å…¨ã¦ã®ã‚µãƒ–コマンドã®ãƒ˜ãƒ«ãƒ—を表示ã—ã¾ã™ã€‚" #: lxc/version.go:18 -#, fuzzy msgid "" -"Prints the version number of LXD.\n" +"Prints the version number of this client tool.\n" +"\n" +"lxc version" +msgstr "" +"ãŠä½¿ã„ã®ã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ç•ªå·ã‚’表示ã—ã¾ã™ã€‚\n" "\n" "lxc version" -msgstr "LXDã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ç•ªå·ã‚’表示ã—ã¾ã™ã€‚\n" -#: lxc/info.go:96 +#: lxc/info.go:127 #, c-format -msgid "Processcount: %d" -msgstr "" +msgid "Processes: %d" +msgstr "プロセス数: %d" -#: lxc/profile.go:223 -#, fuzzy, c-format -msgid "Profile %s applied to %s" -msgstr "デãƒã‚¤ã‚¹ %s ㌠%s ã«è¿½åŠ ã•ã‚Œã¾ã—ãŸ\n" +#: lxc/profile.go:275 +#, c-format +msgid "Profile %s added to %s" +msgstr "プロファイル %s ㌠%s ã«è¿½åŠ ã•ã‚Œã¾ã—ãŸ" -#: lxc/profile.go:137 +#: lxc/profile.go:170 #, c-format msgid "Profile %s created" -msgstr "" +msgstr "プロファイル %s を作æˆã—ã¾ã—ãŸ" -#: lxc/profile.go:207 +#: lxc/profile.go:240 #, c-format msgid "Profile %s deleted" -msgstr "" +msgstr "プロファイル %s を削除ã—ã¾ã—ãŸ" -#: lxc/init.go:134 lxc/init.go:135 lxc/launch.go:38 lxc/launch.go:39 +#: lxc/profile.go:306 +#, c-format +msgid "Profile %s removed from %s" +msgstr "プロファイル %s ㌠%s ã‹ã‚‰å‰Šé™¤ã•ã‚Œã¾ã—ãŸ" + +#: lxc/copy.go:33 lxc/copy.go:34 lxc/init.go:136 lxc/init.go:137 +#: lxc/launch.go:42 lxc/launch.go:43 msgid "Profile to apply to the new container" -msgstr "" +msgstr "æ–°ã—ã„コンテナã«é©ç”¨ã™ã‚‹ãƒ—ロファイル" -#: lxc/info.go:93 -#, fuzzy, c-format +#: lxc/profile.go:256 +#, c-format +msgid "Profiles %s applied to %s" +msgstr "プロファイル %s ㌠%s ã«è¿½åŠ ã•ã‚Œã¾ã—ãŸ" + +#: lxc/info.go:101 +#, c-format msgid "Profiles: %s" -msgstr "デãƒã‚¤ã‚¹ %s ㌠%s ã«è¿½åŠ ã•ã‚Œã¾ã—ãŸ\n" +msgstr "プロファイル: %s" -#: lxc/image.go:277 +#: lxc/image.go:361 msgid "Properties:" -msgstr "" +msgstr "プロパティ:" -#: lxc/remote.go:48 +#: lxc/remote.go:56 msgid "Public image server" -msgstr "" +msgstr "Public ãªã‚¤ãƒ¡ãƒ¼ã‚¸ã‚µãƒ¼ãƒã¨ã—ã¦è¨­å®šã—ã¾ã™" -#: lxc/image.go:265 +#: lxc/image.go:349 #, c-format msgid "Public: %s" msgstr "" -#: lxc/publish.go:19 +#: lxc/publish.go:25 msgid "" "Publish containers as images.\n" "\n" "lxc publish [remote:]container [remote:] [--alias=ALIAS]... [prop-key=prop-" "value]..." msgstr "" +"イメージã¨ã—ã¦ã‚³ãƒ³ãƒ†ãƒŠã‚’ publish ã—ã¾ã™ã€‚\n" +"\n" +"lxc publish [remote:]container [remote:] [--alias=ALIAS]... [prop-key=prop-" +"value]..." -#: lxc/remote.go:47 +#: lxc/remote.go:54 msgid "Remote admin password" -msgstr "" +msgstr "リモートã®ç®¡ç†è€…パスワード" -#: lxc/delete.go:43 +#: lxc/delete.go:42 #, c-format msgid "Remove %s (yes/no): " -msgstr "" +msgstr "%s を消去ã—ã¾ã™ã‹ (yes/no): " #: lxc/delete.go:36 lxc/delete.go:37 msgid "Require user confirmation." -msgstr "" +msgstr "ユーザã®ç¢ºèªã‚’è¦æ±‚ã™ã‚‹ã€‚" -#: lxc/init.go:244 +#: lxc/info.go:124 +msgid "Resources:" +msgstr "リソース:" + +#: lxc/init.go:247 #, c-format msgid "Retrieving image: %s" -msgstr "" +msgstr "イメージã®å–得中: %s" -#: lxc/image.go:525 +#: lxc/image.go:643 msgid "SIZE" msgstr "" -#: lxc/list.go:229 +#: lxc/list.go:431 msgid "SNAPSHOTS" msgstr "" -#: lxc/list.go:225 +#: lxc/list.go:432 msgid "STATE" msgstr "" -#: lxc/remote.go:155 +#: lxc/remote.go:380 +msgid "STATIC" +msgstr "" + +#: lxc/remote.go:227 msgid "Server certificate NACKed by user" msgstr "ユーザã«ã‚ˆã‚Šã‚µãƒ¼ãƒè¨¼æ˜Žæ›¸ãŒæ‹’å¦ã•ã‚Œã¾ã—ãŸ" -#: lxc/remote.go:201 +#: lxc/remote.go:289 msgid "Server doesn't trust us after adding our cert" msgstr "サーãƒãŒæˆ‘々ã®è¨¼æ˜Žæ›¸ã‚’追加ã—ãŸå¾Œæˆ‘々を信頼ã—ã¦ã„ã¾ã›ã‚“" +#: lxc/remote.go:55 +msgid "Server protocol (lxd or simplestreams)" +msgstr "サーãƒã®ãƒ—ロトコル (lxd or simplestreams)" + #: lxc/restore.go:21 msgid "" "Set the current state of a resource back to a snapshot.\n" @@ -919,16 +1442,26 @@ "lxc snapshot u1 snap0 # create the snapshot\n" "lxc restore u1 snap0 # restore the snapshot" msgstr "" +"リソースã®ç¾åœ¨ã®çŠ¶æ…‹ã‚’スナップショット時点ã®çŠ¶æ…‹ã«è¨­å®šã—ã¾ã™ã€‚\n" +"\n" +"lxc restore [remote:] [--stateful]\n" +"\n" +"スナップショットã‹ã‚‰ã‚³ãƒ³ãƒ†ãƒŠã‚’リストアã—ã¾ã™ (オプションã§å®Ÿè¡ŒçŠ¶æ…‹ã‚‚リスト\n" +"ã‚¢ã—ã¾ã™ã€‚詳ã—ãã¯ã‚¹ãƒŠãƒƒãƒ—ショットã®ãƒ˜ãƒ«ãƒ—ã‚’ã”覧ãã ã•ã„)。\n" +"\n" +"例:\n" +"lxc snapshot u1 snap0 # スナップショットã®ä½œæˆ\n" +"lxc restore u1 snap0 # スナップショットã‹ã‚‰ãƒªã‚¹ãƒˆã‚¢" -#: lxc/file.go:45 +#: lxc/file.go:44 msgid "Set the file's gid on push" msgstr "プッシュ時ã«ãƒ•ã‚¡ã‚¤ãƒ«ã®gidを設定ã—ã¾ã™" -#: lxc/file.go:46 +#: lxc/file.go:45 msgid "Set the file's perms on push" msgstr "プッシュ時ã«ãƒ•ã‚¡ã‚¤ãƒ«ã®ãƒ‘ーミションを設定ã—ã¾ã™" -#: lxc/file.go:44 +#: lxc/file.go:43 msgid "Set the file's uid on push" msgstr "プッシュ時ã«ãƒ•ã‚¡ã‚¤ãƒ«ã®uidを設定ã—ã¾ã™" @@ -936,101 +1469,164 @@ msgid "Show all commands (not just interesting ones)" msgstr "å…¨ã¦ã‚³ãƒžãƒ³ãƒ‰ã‚’表示ã—ã¾ã™ (主ãªã‚³ãƒžãƒ³ãƒ‰ã ã‘ã§ã¯ãªã)" -#: lxc/info.go:34 +#: lxc/help.go:67 +msgid "Show client version." +msgstr "クライアントã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’表示ã—ã¾ã™ã€‚" + +#: lxc/info.go:36 msgid "Show the container's last 100 log lines?" -msgstr "" +msgstr "コンテナログã®æœ€å¾Œã® 100 行を表示ã—ã¾ã™ã‹?" -#: lxc/image.go:262 +#: lxc/image.go:347 #, c-format msgid "Size: %.2fMB" -msgstr "" +msgstr "サイズ: %.2fMB" -#: lxc/info.go:122 +#: lxc/info.go:194 msgid "Snapshots:" -msgstr "" +msgstr "スナップショット:" + +#: lxc/image.go:371 +msgid "Source:" +msgstr "å–å¾—å…ƒ:" -#: lxc/launch.go:118 +#: lxc/launch.go:124 #, c-format msgid "Starting %s" -msgstr "" +msgstr "%s を起動中" -#: lxc/info.go:87 +#: lxc/info.go:95 #, c-format msgid "Status: %s" -msgstr "" +msgstr "状態: %s" + +#: lxc/publish.go:34 lxc/publish.go:35 +msgid "Stop the container if currently running" +msgstr "実行中ã®å ´åˆã€ã‚³ãƒ³ãƒ†ãƒŠã‚’åœæ­¢ã—ã¾ã™" -#: lxc/delete.go:97 +#: lxc/delete.go:106 lxc/publish.go:111 msgid "Stopping container failed!" msgstr "コンテナã®åœæ­¢ã«å¤±æ•—ã—ã¾ã—ãŸï¼" -#: lxc/delete.go:83 +#: lxc/action.go:45 +msgid "Store the container state (only for stop)." +msgstr "コンテナã®çŠ¶æ…‹ã‚’ä¿å­˜ã—ã¾ã™ (stopã®ã¿)。" + +#: lxc/info.go:155 +msgid "Swap (current)" +msgstr "Swap (ç¾åœ¨å€¤)" + +#: lxc/info.go:159 +msgid "Swap (peak)" +msgstr "Swap (ピーク)" + +#: lxc/list.go:433 +msgid "TYPE" +msgstr "" + +#: lxc/delete.go:92 msgid "The container is currently running, stop it first or pass --force." +msgstr "コンテナã¯å®Ÿè¡Œä¸­ã§ã™ã€‚å…ˆã«åœæ­¢ã•ã›ã‚‹ã‹ã€--force を指定ã—ã¦ãã ã•ã„。" + +#: lxc/publish.go:89 +msgid "" +"The container is currently running. Use --force to have it stopped and " +"restarted." msgstr "" +"コンテナã¯ç¾åœ¨å®Ÿè¡Œä¸­ã§ã™ã€‚åœæ­¢ã—ã¦ã€å†èµ·å‹•ã™ã‚‹ãŸã‚ã« --force を使用ã—ã¦ãã \n" +"ã•ã„。" -#: lxc/publish.go:57 +#: lxc/config.go:676 lxc/config.go:688 lxc/config.go:721 lxc/config.go:739 +#: lxc/config.go:777 lxc/config.go:795 +msgid "The device doesn't exist" +msgstr "デãƒã‚¤ã‚¹ãŒå­˜åœ¨ã—ã¾ã›ã‚“" + +#: lxc/init.go:277 +#, c-format +msgid "The local image '%s' couldn't be found, trying '%s:' instead." +msgstr "" +"ローカルイメージ '%s' ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。代ã‚ã‚Šã« '%s:' を試ã—ã¦ã¿ã¦ãã ã•ã„。" + +#: lxc/main.go:180 +msgid "The opposite of `lxc pause` is `lxc start`." +msgstr "`lxc pause` ã®å対ã®ã‚³ãƒžãƒ³ãƒ‰ã¯ `lxc start` ã§ã™ã€‚" + +#: lxc/publish.go:62 msgid "There is no \"image name\". Did you want an alias?" msgstr "" +"publish å…ˆã«ã¯ã‚¤ãƒ¡ãƒ¼ã‚¸åã¯æŒ‡å®šã§ãã¾ã›ã‚“。\"--alias\" オプションを使ã£ã¦ã" +"ã \n" +"ã•ã„。" -#: lxc/action.go:35 +#: lxc/action.go:41 msgid "Time to wait for the container before killing it." msgstr "コンテナを強制åœæ­¢ã™ã‚‹ã¾ã§ã®æ™‚é–“" -#: lxc/image.go:266 +#: lxc/image.go:350 msgid "Timestamps:" +msgstr "タイムスタンプ:" + +#: lxc/main.go:137 +msgid "To start your first container, try: lxc launch ubuntu:16.04" msgstr "" +"åˆã‚ã¦ã‚³ãƒ³ãƒ†ãƒŠã‚’èµ·å‹•ã™ã‚‹ã«ã¯ã€\"lxc launch ubuntu:16.04\" ã¨å®Ÿè¡Œã—ã¦ã¿ã¦ã" +"ã \n" +"ã•ã„。" + +#: lxc/image.go:421 +#, c-format +msgid "Transferring image: %d%%" +msgstr "イメージを転é€ä¸­: %d%%" -#: lxc/action.go:62 lxc/launch.go:126 +#: lxc/action.go:99 lxc/launch.go:132 #, c-format msgid "Try `lxc info --show-log %s` for more info" -msgstr "" +msgstr "æ›´ã«æƒ…報を得るãŸã‚ã« `lxc info --show-log` を実行ã—ã¦ã¿ã¦ãã ã•ã„" -#: lxc/info.go:89 +#: lxc/info.go:97 msgid "Type: ephemeral" -msgstr "" +msgstr "タイプ: ephemeral" -#: lxc/info.go:91 +#: lxc/info.go:99 msgid "Type: persistent" -msgstr "" +msgstr "タイプ: persistent" -#: lxc/image.go:526 +#: lxc/image.go:644 msgid "UPLOAD DATE" msgstr "" -#: lxc/remote.go:272 +#: lxc/remote.go:377 msgid "URL" msgstr "" -#: lxc/image.go:271 +#: lxc/remote.go:97 +msgid "Unable to read remote TLS certificate" +msgstr "リモート㮠TLS 証明書を読ã‚ã¾ã›ã‚“" + +#: lxc/image.go:355 #, c-format msgid "Uploaded: %s" -msgstr "" +msgstr "アップロード日時: %s" -#: lxc/main.go:132 -#, fuzzy, c-format +#: lxc/main.go:112 +#, c-format msgid "Usage: %s" -msgstr "" -"Utilisation: %s\n" -"\n" -"Options:\n" -"\n" +msgstr "使ã„æ–¹: %s" #: lxc/help.go:48 -#, fuzzy msgid "Usage: lxc [subcommand] [options]" -msgstr "" -"使ã„æ–¹: lxc [サブコマンド] [オプション]\n" -"利用å¯èƒ½ãªã‚³ãƒžãƒ³ãƒ‰:\n" +msgstr "使ã„æ–¹: lxc [サブコマンド] [オプション]" -#: lxc/delete.go:47 +#: lxc/delete.go:46 msgid "User aborted delete operation." -msgstr "" +msgstr "ユーザãŒå‰Šé™¤æ“作を中断ã—ã¾ã—ãŸã€‚" #: lxc/restore.go:35 -#, fuzzy msgid "" "Whether or not to restore the container's running state from snapshot (if " "available)" -msgstr "コンテナã®ç¨¼å‹•çŠ¶æ…‹ã®ã‚¹ãƒŠãƒƒãƒ—ショットをå–å¾—ã™ã‚‹ã‹ã©ã†ã‹" +msgstr "" +"スナップショットã‹ã‚‰ã‚³ãƒ³ãƒ†ãƒŠã®ç¨¼å‹•çŠ¶æ…‹ã‚’リストアã™ã‚‹ã‹ã©ã†ã‹ (å–å¾—å¯èƒ½ãªå ´åˆ)" #: lxc/snapshot.go:38 msgid "Whether or not to snapshot the container's running state" @@ -1038,117 +1634,131 @@ #: lxc/config.go:33 msgid "Whether to show the expanded configuration" -msgstr "" +msgstr "æ‹¡å¼µã—ãŸè¨­å®šã‚’表示ã™ã‚‹ã‹ã©ã†ã‹" -#: lxc/list.go:291 lxc/remote.go:259 +#: lxc/remote.go:352 lxc/remote.go:357 msgid "YES" msgstr "" -#: lxc/main.go:66 +#: lxc/main.go:52 msgid "`lxc config profile` is deprecated, please use `lxc profile`" -msgstr "" +msgstr "`lxc config profile` ã¯å»ƒæ­¢ã•ã‚Œã¾ã—ãŸã€‚`lxc profile` を使ã£ã¦ãã ã•ã„" -#: lxc/launch.go:105 -#, fuzzy +#: lxc/launch.go:111 msgid "bad number of things scanned from image, container or snapshot" -msgstr "リソースã‹ã‚‰ã‚¹ã‚­ãƒ£ãƒ³ã•ã‚ŒãŸæ•°ãŒä¸æ­£" +msgstr "" +"イメージã€ã‚³ãƒ³ãƒ†ãƒŠã€ã‚¹ãƒŠãƒƒãƒ—ショットã®ã„ãšã‚Œã‹ã‹ã‚‰ã‚¹ã‚­ãƒ£ãƒ³ã•ã‚ŒãŸæ•°ãŒä¸æ­£" -#: lxc/action.go:58 +#: lxc/action.go:95 msgid "bad result type from action" -msgstr "アクションã‹ã‚‰ã®çµæžœã‚¿ã‚¤ãƒ—ãŒä¸æ­£ï¼" +msgstr "アクションã‹ã‚‰ã®çµæžœã‚¿ã‚¤ãƒ—ãŒä¸æ­£ã§ã™" -#: lxc/copy.go:78 +#: lxc/copy.go:115 msgid "can't copy to the same container name" -msgstr "" +msgstr "åŒã˜ã‚³ãƒ³ãƒ†ãƒŠåã¸ã¯ã‚³ãƒ”ーã§ãã¾ã›ã‚“" -#: lxc/remote.go:247 +#: lxc/remote.go:340 msgid "can't remove the default remote" -msgstr "" +msgstr "デフォルトã®ãƒªãƒ¢ãƒ¼ãƒˆã¯å‰Šé™¤ã§ãã¾ã›ã‚“" -#: lxc/remote.go:264 +#: lxc/remote.go:366 msgid "default" msgstr "" -#: lxc/init.go:197 lxc/init.go:202 lxc/launch.go:89 lxc/launch.go:94 -#, fuzzy +#: lxc/copy.go:131 lxc/copy.go:136 lxc/copy.go:219 lxc/copy.go:224 +#: lxc/init.go:200 lxc/init.go:205 lxc/launch.go:95 lxc/launch.go:100 msgid "didn't get any affected image, container or snapshot from server" -msgstr "サーãƒã‹ã‚‰å¤‰æ›´ã•ã‚ŒãŸãƒªã‚½ãƒ¼ã‚¹ã‚’å–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ" +msgstr "" +"サーãƒã‹ã‚‰å¤‰æ›´ã•ã‚ŒãŸã‚¤ãƒ¡ãƒ¼ã‚¸ã€ã‚³ãƒ³ãƒ†ãƒŠã€ã‚¹ãƒŠãƒƒãƒ—ショットをå–å¾—ã§ãã¾ã›ã‚“ã§\n" +"ã—ãŸ" -#: lxc/main.go:25 lxc/main.go:167 -#, fuzzy, c-format +#: lxc/image.go:341 +msgid "disabled" +msgstr "無効" + +#: lxc/image.go:343 +msgid "enabled" +msgstr "有効" + +#: lxc/main.go:22 lxc/main.go:148 +#, c-format msgid "error: %v" -msgstr "エラー: %v\n" +msgstr "エラー: %v" -#: lxc/help.go:40 lxc/main.go:127 -#, fuzzy, c-format +#: lxc/help.go:40 lxc/main.go:107 +#, c-format msgid "error: unknown command: %s" -msgstr "エラー: 未知ã®ã‚³ãƒžãƒ³ãƒ‰: %s\n" +msgstr "エラー: 未知ã®ã‚³ãƒžãƒ³ãƒ‰: %s" -#: lxc/launch.go:109 +#: lxc/launch.go:115 msgid "got bad version" msgstr "ä¸æ­£ãªãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’å¾—ã¾ã—ãŸ" -#: lxc/image.go:256 lxc/image.go:503 +#: lxc/image.go:336 lxc/image.go:620 msgid "no" msgstr "" -#: lxc/copy.go:100 +#: lxc/copy.go:158 msgid "not all the profiles from the source exist on the target" -msgstr "" +msgstr "コピー元ã®å…¨ã¦ã®ãƒ—ロファイルãŒã‚¿ãƒ¼ã‚²ãƒƒãƒˆã«å­˜åœ¨ã—ã¾ã›ã‚“" -#: lxc/remote.go:148 -#, fuzzy +#: lxc/remote.go:220 msgid "ok (y/n)?" msgstr "ok (y/n)?" -#: lxc/main.go:274 lxc/main.go:278 +#: lxc/main.go:302 lxc/main.go:306 #, c-format msgid "processing aliases failed %s\n" -msgstr "" +msgstr "エイリアスã®å‡¦ç†ãŒå¤±æ•—ã—ã¾ã—㟠%s\n" -#: lxc/remote.go:291 +#: lxc/remote.go:402 #, c-format msgid "remote %s already exists" msgstr "リモート %s ã¯æ—¢ã«å­˜åœ¨ã—ã¾ã™" -#: lxc/remote.go:243 lxc/remote.go:287 lxc/remote.go:317 lxc/remote.go:328 +#: lxc/remote.go:332 lxc/remote.go:394 lxc/remote.go:429 lxc/remote.go:445 #, c-format msgid "remote %s doesn't exist" msgstr "リモート %s ã¯å­˜åœ¨ã—ã¾ã›ã‚“" -#: lxc/remote.go:227 +#: lxc/remote.go:315 #, c-format msgid "remote %s exists as <%s>" msgstr "リモート %s 㯠<%s> ã¨ã—ã¦å­˜åœ¨ã—ã¾ã™" -#: lxc/info.go:131 +#: lxc/remote.go:336 lxc/remote.go:398 lxc/remote.go:433 +#, c-format +msgid "remote %s is static and cannot be modified" +msgstr "リモート %s 㯠static ã§ã™ã®ã§å¤‰æ›´ã§ãã¾ã›ã‚“" + +#: lxc/info.go:205 msgid "stateful" msgstr "" -#: lxc/info.go:133 +#: lxc/info.go:207 msgid "stateless" msgstr "" -#: lxc/info.go:127 +#: lxc/info.go:201 #, c-format msgid "taken at %s" -msgstr "" +msgstr "%s ã«å–å¾—ã—ã¾ã—ãŸ" -#: lxc/exec.go:158 +#: lxc/exec.go:163 msgid "unreachable return reached" msgstr "到é”ã—ãªã„ã¯ãšã®returnã«åˆ°é”ã—ã¾ã—ãŸ" -#: lxc/main.go:207 +#: lxc/main.go:234 msgid "wrong number of subcommand arguments" msgstr "サブコマンドã®å¼•æ•°ã®æ•°ãŒæ­£ã—ãã‚ã‚Šã¾ã›ã‚“" -#: lxc/delete.go:46 lxc/image.go:259 lxc/image.go:507 +#: lxc/delete.go:45 lxc/image.go:338 lxc/image.go:624 msgid "yes" msgstr "" -#: lxc/copy.go:38 +#: lxc/copy.go:44 msgid "you must specify a source container name" -msgstr "" +msgstr "コピー元ã®ã‚³ãƒ³ãƒ†ãƒŠåを指定ã—ã¦ãã ã•ã„" #, fuzzy #~ msgid "Bad image property: %s" @@ -1157,9 +1767,6 @@ #~ msgid "Cannot change profile name" #~ msgstr "プロファイルåを変更ã§ãã¾ã›ã‚“" -#~ msgid "Could not create server cert dir" -#~ msgstr "サーãƒè¨¼æ˜Žæ›¸æ ¼ç´ç”¨ã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’作æˆã§ãã¾ã›ã‚“。" - #, fuzzy #~ msgid "" #~ "Create a read-only snapshot of a container.\n" diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/po/lxd.pot juju-core-2.0.0/src/github.com/lxc/lxd/po/lxd.pot --- juju-core-2.0~beta15/src/github.com/lxc/lxd/po/lxd.pot 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/po/lxd.pot 2016-10-13 14:31:53.000000000 +0000 @@ -7,7 +7,7 @@ msgid "" msgstr "Project-Id-Version: lxd\n" "Report-Msgid-Bugs-To: lxc-devel@lists.linuxcontainers.org\n" - "POT-Creation-Date: 2016-03-25 20:07-0400\n" + "POT-Creation-Date: 2016-10-10 23:45-0400\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -16,19 +16,23 @@ "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" -#: lxc/info.go:134 +#: lxc/info.go:154 +msgid " CPU usage:" +msgstr "" + +#: lxc/info.go:143 msgid " Disk usage:" msgstr "" -#: lxc/info.go:157 +#: lxc/info.go:177 msgid " Memory usage:" msgstr "" -#: lxc/info.go:172 +#: lxc/info.go:194 msgid " Network usage:" msgstr "" -#: lxc/config.go:37 +#: lxc/config.go:36 msgid "### This is a yaml representation of the configuration.\n" "### Any line starting with a '# will be ignored.\n" "###\n" @@ -48,7 +52,7 @@ "### Note that the name is shown but cannot be changed" msgstr "" -#: lxc/image.go:83 +#: lxc/image.go:85 msgid "### This is a yaml representation of the image properties.\n" "### Any line starting with a '# will be ignored.\n" "###\n" @@ -57,6 +61,25 @@ "### description: My custom image" msgstr "" +#: lxc/network.go:28 +msgid "### This is a yaml representation of the network.\n" + "### Any line starting with a '# will be ignored.\n" + "###\n" + "### A network consists of a set of configuration items.\n" + "###\n" + "### An example would look like:\n" + "### name: lxdbr0\n" + "### config:\n" + "### ipv4.address: 10.62.42.1/24\n" + "### ipv4.nat: true\n" + "### ipv6.address: fd00:56ad:9f7a:9800::1/64\n" + "### ipv6.nat: true\n" + "### managed: true\n" + "### type: bridge\n" + "###\n" + "### Note that only the configuration can be changed." +msgstr "" + #: lxc/profile.go:27 msgid "### This is a yaml representation of the profile.\n" "### Any line starting with a '# will be ignored.\n" @@ -71,13 +94,13 @@ "### devices:\n" "### eth0:\n" "### nictype: bridged\n" - "### parent: lxcbr0\n" + "### parent: lxdbr0\n" "### type: nic\n" "###\n" "### Note that the name is shown but cannot be changed" msgstr "" -#: lxc/image.go:569 +#: lxc/image.go:617 #, c-format msgid "%s (%d more)" msgstr "" @@ -86,32 +109,32 @@ msgid "'/' not allowed in snapshot name" msgstr "" -#: lxc/profile.go:226 +#: lxc/profile.go:254 msgid "(none)" msgstr "" -#: lxc/image.go:590 lxc/image.go:615 +#: lxc/image.go:638 lxc/image.go:680 msgid "ALIAS" msgstr "" -#: lxc/image.go:594 +#: lxc/image.go:642 msgid "ARCH" msgstr "" -#: lxc/list.go:336 +#: lxc/list.go:425 msgid "ARCHITECTURE" msgstr "" -#: lxc/remote.go:53 +#: lxc/remote.go:52 msgid "Accept certificate" msgstr "" -#: lxc/remote.go:250 +#: lxc/remote.go:268 #, c-format msgid "Admin password for %s: " msgstr "" -#: lxc/image.go:333 +#: lxc/image.go:365 msgid "Aliases:" msgstr "" @@ -119,12 +142,12 @@ msgid "An environment variable of the form HOME=/home/foo" msgstr "" -#: lxc/image.go:316 lxc/info.go:88 +#: lxc/image.go:348 lxc/info.go:93 #, c-format msgid "Architecture: %s" msgstr "" -#: lxc/image.go:337 +#: lxc/image.go:369 #, c-format msgid "Auto update: %s" msgstr "" @@ -133,99 +156,103 @@ msgid "Available commands:" msgstr "" -#: lxc/info.go:165 +#: lxc/info.go:186 msgid "Bytes received" msgstr "" -#: lxc/info.go:166 +#: lxc/info.go:187 msgid "Bytes sent" msgstr "" -#: lxc/config.go:270 +#: lxc/config.go:273 msgid "COMMON NAME" msgstr "" -#: lxc/list.go:337 +#: lxc/info.go:150 +msgid "CPU usage (in seconds)" +msgstr "" + +#: lxc/list.go:426 msgid "CREATED AT" msgstr "" -#: lxc/config.go:114 +#: lxc/config.go:113 #, c-format msgid "Can't read from stdin: %s" msgstr "" -#: lxc/config.go:127 lxc/config.go:160 lxc/config.go:182 +#: lxc/config.go:126 lxc/config.go:159 lxc/config.go:181 #, c-format msgid "Can't unset key '%s', it's not currently set." msgstr "" -#: lxc/profile.go:343 +#: lxc/network.go:386 lxc/profile.go:420 msgid "Cannot provide container name to list" msgstr "" -#: lxc/remote.go:200 +#: lxc/remote.go:218 #, c-format msgid "Certificate fingerprint: %x" msgstr "" -#: lxc/action.go:28 +#: lxc/action.go:33 #, c-format msgid "Changes state of one or more containers to %s.\n" "\n" - "lxc %s [...]" + "lxc %s [...]%s" msgstr "" -#: lxc/remote.go:273 +#: lxc/remote.go:291 msgid "Client certificate stored at server: " msgstr "" -#: lxc/list.go:90 lxc/list.go:91 +#: lxc/list.go:121 lxc/list.go:122 msgid "Columns" msgstr "" -#: lxc/init.go:134 lxc/init.go:135 lxc/launch.go:40 lxc/launch.go:41 +#: lxc/copy.go:31 lxc/copy.go:32 lxc/init.go:135 lxc/init.go:136 lxc/launch.go:40 lxc/launch.go:41 msgid "Config key/value to apply to the new container" msgstr "" -#: lxc/config.go:500 lxc/config.go:565 lxc/image.go:669 lxc/profile.go:190 +#: lxc/config.go:530 lxc/config.go:595 lxc/image.go:734 lxc/network.go:342 lxc/profile.go:218 #, c-format msgid "Config parsing error: %s" msgstr "" -#: lxc/main.go:37 +#: lxc/main.go:29 msgid "Connection refused; is LXD running?" msgstr "" -#: lxc/publish.go:59 +#: lxc/publish.go:61 msgid "Container name is mandatory" msgstr "" -#: lxc/init.go:209 +#: lxc/copy.go:140 lxc/copy.go:237 lxc/init.go:227 #, c-format msgid "Container name is: %s" msgstr "" -#: lxc/publish.go:141 lxc/publish.go:156 +#: lxc/publish.go:143 lxc/publish.go:158 #, c-format msgid "Container published with fingerprint: %s" msgstr "" -#: lxc/image.go:155 +#: lxc/image.go:166 msgid "Copy aliases from source" msgstr "" -#: lxc/copy.go:22 +#: lxc/copy.go:24 msgid "Copy containers within or in between lxd instances.\n" "\n" - "lxc copy [remote:] [remote:] [--ephemeral|e]" + "lxc copy [remote:] [[remote:]] [--ephemeral|e] [--profile|-p ...] [--config|-c ...]" msgstr "" -#: lxc/image.go:254 +#: lxc/image.go:280 #, c-format msgid "Copying the image: %s" msgstr "" -#: lxc/remote.go:215 +#: lxc/remote.go:233 msgid "Could not create server cert dir" msgstr "" @@ -245,24 +272,32 @@ "lxc snapshot u1 snap0" msgstr "" -#: lxc/image.go:321 lxc/info.go:90 +#: lxc/file.go:61 lxc/file.go:62 +msgid "Create any directories necessary" +msgstr "" + +#: lxc/image.go:353 lxc/info.go:95 #, c-format msgid "Created: %s" msgstr "" -#: lxc/init.go:177 lxc/launch.go:116 +#: lxc/init.go:180 lxc/launch.go:134 #, c-format msgid "Creating %s" msgstr "" -#: lxc/init.go:175 +#: lxc/init.go:178 msgid "Creating the container" msgstr "" -#: lxc/image.go:593 lxc/image.go:617 +#: lxc/image.go:641 lxc/image.go:682 msgid "DESCRIPTION" msgstr "" +#: lxc/publish.go:37 +msgid "Define a compression algorithm: for image or none" +msgstr "" + #: lxc/delete.go:25 msgid "Delete containers or container snapshots.\n" "\n" @@ -271,37 +306,37 @@ "Destroy containers or snapshots with any attached data (configuration, snapshots, ...)." msgstr "" -#: lxc/config.go:617 +#: lxc/config.go:647 #, c-format msgid "Device %s added to %s" msgstr "" -#: lxc/config.go:804 +#: lxc/config.go:834 #, c-format msgid "Device %s removed from %s" msgstr "" -#: lxc/list.go:420 +#: lxc/list.go:570 msgid "EPHEMERAL" msgstr "" -#: lxc/config.go:272 +#: lxc/config.go:275 msgid "EXPIRY DATE" msgstr "" -#: lxc/main.go:55 +#: lxc/main.go:41 msgid "Enables debug mode." msgstr "" -#: lxc/main.go:54 +#: lxc/main.go:40 msgid "Enables verbose mode." msgstr "" -#: lxc/help.go:68 +#: lxc/help.go:69 msgid "Environment:" msgstr "" -#: lxc/copy.go:29 lxc/copy.go:30 lxc/init.go:138 lxc/init.go:139 lxc/launch.go:44 lxc/launch.go:45 +#: lxc/copy.go:35 lxc/copy.go:36 lxc/init.go:139 lxc/init.go:140 lxc/launch.go:44 lxc/launch.go:45 msgid "Ephemeral container" msgstr "" @@ -312,40 +347,40 @@ #: lxc/exec.go:45 msgid "Execute the specified command in a container.\n" "\n" - "lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env EDITOR=/usr/bin/vim]... \n" + "lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env EDITOR=/usr/bin/vim]... [--] \n" "\n" "Mode defaults to non-interactive, interactive mode is selected if both stdin AND stdout are terminals (stderr is ignored)." msgstr "" -#: lxc/image.go:325 +#: lxc/image.go:357 #, c-format msgid "Expires: %s" msgstr "" -#: lxc/image.go:327 +#: lxc/image.go:359 msgid "Expires: never" msgstr "" -#: lxc/config.go:269 lxc/image.go:591 lxc/image.go:616 +#: lxc/config.go:272 lxc/image.go:639 lxc/image.go:681 msgid "FINGERPRINT" msgstr "" -#: lxc/list.go:92 +#: lxc/list.go:124 msgid "Fast mode (same as --columns=nsacPt" msgstr "" -#: lxc/image.go:314 +#: lxc/image.go:346 #, c-format msgid "Fingerprint: %s" msgstr "" -#: lxc/finger.go:17 +#: lxc/finger.go:15 msgid "Fingers the LXD instance to check if it is up and working.\n" "\n" "lxc finger " msgstr "" -#: lxc/action.go:37 +#: lxc/action.go:42 lxc/action.go:43 msgid "Force the container to shutdown." msgstr "" @@ -353,47 +388,60 @@ msgid "Force the removal of stopped containers." msgstr "" -#: lxc/main.go:56 +#: lxc/main.go:42 msgid "Force using the local unix socket." msgstr "" -#: lxc/main.go:138 +#: lxc/image.go:169 lxc/list.go:123 +msgid "Format" +msgstr "" + +#: lxc/remote.go:66 msgid "Generating a client certificate. This may take a minute..." msgstr "" -#: lxc/list.go:334 +#: lxc/list.go:423 msgid "IPV4" msgstr "" -#: lxc/list.go:335 +#: lxc/list.go:424 msgid "IPV6" msgstr "" -#: lxc/config.go:271 +#: lxc/config.go:274 msgid "ISSUE DATE" msgstr "" -#: lxc/main.go:57 +#: lxc/main.go:136 +msgid "If this is your first time using LXD, you should also run: sudo lxd init" +msgstr "" + +#: lxc/main.go:43 msgid "Ignore aliases when determining what command to run." msgstr "" -#: lxc/action.go:39 -msgid "Ignore the container state (only forstart)." +#: lxc/action.go:46 +msgid "Ignore the container state (only for start)." msgstr "" -#: lxc/image.go:259 +#: lxc/image.go:285 msgid "Image copied successfully!" msgstr "" -#: lxc/image.go:405 +#: lxc/image.go:442 #, c-format msgid "Image imported with fingerprint: %s" msgstr "" -#: lxc/init.go:73 +#: lxc/image.go:429 +#, c-format +msgid "Importing the image: %s" +msgstr "" + +#: lxc/init.go:74 msgid "Initialize a container from a particular image.\n" "\n" - "lxc init [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...]\n" + "lxc init [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...] [--network|-n ]\n" "\n" "Initializes a container using the specified image and name.\n" "\n" @@ -404,41 +452,49 @@ "lxc init ubuntu u1" msgstr "" -#: lxc/remote.go:116 +#: lxc/remote.go:136 #, c-format msgid "Invalid URL scheme \"%s\" in \"%s\"" msgstr "" +#: lxc/config.go:253 +msgid "Invalid certificate" +msgstr "" + #: lxc/init.go:30 lxc/init.go:35 msgid "Invalid configuration key" msgstr "" -#: lxc/file.go:186 +#: lxc/file.go:239 #, c-format msgid "Invalid source %s" msgstr "" -#: lxc/file.go:57 +#: lxc/file.go:74 #, c-format msgid "Invalid target %s" msgstr "" -#: lxc/info.go:117 +#: lxc/info.go:124 msgid "Ips:" msgstr "" -#: lxc/image.go:156 +#: lxc/image.go:167 msgid "Keep the image up to date after initial copy" msgstr "" -#: lxc/main.go:35 -msgid "LXD socket not found; is LXD running?" +#: lxc/list.go:427 +msgid "LAST USED AT" +msgstr "" + +#: lxc/main.go:27 +msgid "LXD socket not found; is LXD installed and running?" msgstr "" #: lxc/launch.go:22 msgid "Launch a container from a particular image.\n" "\n" - "lxc launch [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...]\n" + "lxc launch [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...] [--network|-n ]\n" "\n" "Launches a container using the specified image and name.\n" "\n" @@ -446,35 +502,46 @@ "Specifying \"-p\" with no argument will result in no profile.\n" "\n" "Example:\n" - "lxc launch ubuntu u1" + "lxc launch ubuntu:16.04 u1" msgstr "" #: lxc/info.go:25 -msgid "List information on containers.\n" +msgid "List information on LXD servers and containers.\n" "\n" - "This will support remotes and images as well, but only containers for now.\n" + "For a container:\n" + " lxc info [:]container [--show-log]\n" "\n" - "lxc info [:]container [--show-log]" + "For a server:\n" + " lxc info [:]" msgstr "" -#: lxc/list.go:60 +#: lxc/list.go:68 msgid "Lists the available resources.\n" "\n" - "lxc list [resource] [filters] [-c columns] [--fast]\n" + "lxc list [resource] [filters] [--format table|json] [-c columns] [--fast]\n" "\n" "The filters are:\n" - "* A single keyword like \"web\" which will list any container with \"web\" in its name.\n" + "* A single keyword like \"web\" which will list any container with a name starting by \"web\".\n" + "* A regular expression on the container name. (e.g. .*web.*01$)\n" "* A key/value pair referring to a configuration item. For those, the namespace can be abreviated to the smallest unambiguous identifier:\n" - "* \"user.blah=abc\" will list all containers with the \"blah\" user property set to \"abc\"\n" - "* \"u.blah=abc\" will do the same\n" - "* \"security.privileged=1\" will list all privileged containers\n" - "* \"s.privileged=1\" will do the same\n" + " * \"user.blah=abc\" will list all containers with the \"blah\" user property set to \"abc\".\n" + " * \"u.blah=abc\" will do the same\n" + " * \"security.privileged=1\" will list all privileged containers\n" + " * \"s.privileged=1\" will do the same\n" + "* A regular expression matching a configuration item or its value. (e.g. volatile.eth0.hwaddr=00:16:3e:.*)\n" + "\n" + "The -c option takes a comma separated list of arguments that control\n" + "which container attributes to output when displaying in table format.\n" + "Column arguments are either pre-defined shorthand chars (see below),\n" + "or (extended) config keys. Commas between consecutive shorthand chars\n" + "are optional.\n" "\n" - "The columns are:\n" + "Pre-defined shorthand chars:\n" "* 4 - IPv4 address\n" "* 6 - IPv6 address\n" "* a - architecture\n" "* c - creation date\n" + "* l - last used date\n" "* n - name\n" "* p - pid of container init process\n" "* P - profiles\n" @@ -482,19 +549,36 @@ "* S - number of snapshots\n" "* t - type (persistent or ephemeral)\n" "\n" + "Config key syntax: key[:name][:maxWidth]\n" + "* key - The (extended) config key to display\n" + "* name - Name to display in the column header, defaults to the key\n" + " if not specified or if empty (to allow defining maxWidth\n" + " without a custom name, e.g. user.key::0)\n" + "* maxWidth - Max width of the column (longer results are truncated).\n" + " -1 == unlimited\n" + " 0 == width of column header\n" + " >0 == max width in chars\n" + " Default is -1 (unlimited)\n" + "\n" "Default column layout: ns46tS\n" - "Fast column layout: nsacPt" + "Fast column layout: nsacPt\n" + "\n" + "Example: lxc list -c n,volatile.base_image:\"BASE IMAGE\":0,s46,volatile.eth0.hwaddr:MAC\n" msgstr "" -#: lxc/info.go:215 +#: lxc/info.go:239 msgid "Log:" msgstr "" -#: lxc/image.go:154 +#: lxc/network.go:424 +msgid "MANAGED" +msgstr "" + +#: lxc/image.go:165 msgid "Make image public" msgstr "" -#: lxc/publish.go:32 +#: lxc/publish.go:33 msgid "Make the image public" msgstr "" @@ -507,19 +591,25 @@ "lxc profile copy Copy the profile to the specified remote.\n" "lxc profile get Get profile configuration.\n" "lxc profile set Set profile configuration.\n" + "lxc profile unset Unset profile configuration.\n" "lxc profile delete Delete a profile.\n" "lxc profile edit \n" " Edit profile, either by launching external editor or reading STDIN.\n" " Example: lxc profile edit # launch editor\n" " cat profile.yml | lxc profile edit # read from profile.yml\n" - "lxc profile apply \n" - " Apply a comma-separated list of profiles to a container, in order.\n" + "\n" + "lxc profile assign \n" + " Assign a comma-separated list of profiles to a container, in order.\n" " All profiles passed in this call (and only those) will be applied\n" - " to the specified container.\n" - " Example: lxc profile apply foo default,bar # Apply default and bar\n" - " lxc profile apply foo default # Only default is active\n" - " lxc profile apply '' # no profiles are applied anymore\n" - " lxc profile apply bar,default # Apply default second now\n" + " to the specified container, i.e. it sets the list of profiles exactly to\n" + " those specified in this command. To add/remove a particular profile from a\n" + " container, use {add|remove} below.\n" + " Example: lxc profile assign foo default,bar # Apply default and bar\n" + " lxc profile assign foo default # Only default is active\n" + " lxc profile assign '' # no profiles are applied anymore\n" + " lxc profile assign bar,default # Apply default second now\n" + "lxc profile add # add a profile to a container\n" + "lxc profile remove # remove the profile from a container\n" "\n" "Devices:\n" "lxc profile device list List devices in the given profile.\n" @@ -533,7 +623,7 @@ " using the specified profile." msgstr "" -#: lxc/config.go:58 +#: lxc/config.go:57 msgid "Manage configuration.\n" "\n" "lxc config device add <[remote:]container> [key=value]... Add a device to a container.\n" @@ -559,7 +649,7 @@ "\n" "Examples:\n" "To mount host's /share/c1 onto /opt in the container:\n" - " lxc config device add [remote:]container1 disk source=/share/c1 path=opt\n" + " lxc config device add [remote:]container1 disk source=/share/c1 path=opt\n" "\n" "To set an lxc config value:\n" " lxc config set [remote:] raw.lxc 'lxc.aa_allow_incomplete = 1'\n" @@ -571,21 +661,51 @@ " lxc config set core.trust_password blah" msgstr "" -#: lxc/file.go:32 +#: lxc/file.go:36 msgid "Manage files on a container.\n" "\n" - "lxc file pull [...] \n" - "lxc file push [--uid=UID] [--gid=GID] [--mode=MODE] [...] \n" + "lxc file pull [-r|--recursive] [...] \n" + "lxc file push [-r|--recursive] [-p|create-dirs] [--uid=UID] [--gid=GID] [--mode=MODE] [...] \n" "lxc file edit \n" "\n" - " in the case of pull, in the case of push and in the case of edit are /" + " in the case of pull, in the case of push and in the case of edit are /\n" + "\n" + "Examples:\n" + "\n" + "To push /etc/hosts into the container foo:\n" + " lxc file push /etc/hosts foo/etc/hosts\n" + "\n" + "To pull /etc/hosts from the container:\n" + " lxc file pull foo/etc/hosts .\n" +msgstr "" + +#: lxc/network.go:48 +msgid "Manage networks.\n" + "\n" + "lxc network list List available networks.\n" + "lxc network show Show details of a network.\n" + "lxc network create [key=value]... Create a network.\n" + "lxc network get Get network configuration.\n" + "lxc network set Set network configuration.\n" + "lxc network unset Unset network configuration.\n" + "lxc network delete Delete a network.\n" + "lxc network edit \n" + " Edit network, either by launching external editor or reading STDIN.\n" + " Example: lxc network edit # launch editor\n" + " cat network.yml | lxc network edit # read from network.yml\n" + "\n" + "lxc network attach [device name]\n" + "lxc network attach-profile [device name]\n" + "\n" + "lxc network detach [device name]\n" + "lxc network detach-profile [device name]\n" msgstr "" -#: lxc/remote.go:39 +#: lxc/remote.go:38 msgid "Manage remote LXD servers.\n" "\n" - "lxc remote add [--accept-certificate] [--password=PASSWORD]\n" - " [--public] [--protocol=PROTOCOL] Add the remote at .\n" + "lxc remote add [] [--accept-certificate] [--password=PASSWORD]\n" + " [--public] [--protocol=PROTOCOL] Add the remote at .\n" "lxc remote remove Remove the remote .\n" "lxc remote list List all remotes.\n" "lxc remote rename Rename remote to .\n" @@ -594,7 +714,7 @@ "lxc remote get-default Print the default remote." msgstr "" -#: lxc/image.go:93 +#: lxc/image.go:95 msgid "Manipulate container images.\n" "\n" "In LXD containers are created from images. Those images were themselves\n" @@ -612,7 +732,7 @@ "hash or alias name (if one is set).\n" "\n" "\n" - "lxc image import [rootfs tarball|URL] [remote:] [--public] [--created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [prop=value]\n" + "lxc image import [rootfs tarball|URL] [remote:] [--public] [--created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [--alias=ALIAS].. [prop=value]\n" " Import an image tarball (or tarballs) into the LXD image store.\n" "\n" "lxc image copy [remote:] : [--alias=ALIAS].. [--copy-aliases] [--public] [--auto-update]\n" @@ -621,16 +741,25 @@ " The auto-update flag instructs the server to keep this image up to\n" " date. It requires the source to be an alias and for it to be public.\n" "\n" - "lxc image delete [remote:]\n" - " Delete an image from the LXD image store.\n" + "lxc image delete [remote:] [remote:][...]\n" + " Delete one or more images from the LXD image store.\n" "\n" - "lxc image export [remote:]\n" + "lxc image export [remote:] [target]\n" " Export an image from the LXD image store into a distributable tarball.\n" "\n" + " The output target is optional and defaults to the working directory.\n" + " The target may be an existing directory, file name, or \"-\" to specify\n" + " stdout. The target MUST be a directory when exporting a split image.\n" + " If the target is a directory, the image's name (each part's name for\n" + " split images) as found in the database will be used for the exported\n" + " image. If the target is a file (not a directory and not stdout), then\n" + " the appropriate extension will be appended to the provided file name\n" + " based on the algorithm used to compress the image. \n" + "\n" "lxc image info [remote:]\n" " Print everything LXD knows about a given image.\n" "\n" - "lxc image list [remote:] [filter]\n" + "lxc image list [remote:] [filter] [--format table|json]\n" " List images in the LXD image store. Filters may be of the\n" " = form for property based filtering, or part of the image\n" " hash or part of the image alias name.\n" @@ -649,19 +778,19 @@ "lxc image alias delete [remote:]\n" " Delete an alias.\n" "\n" - "lxc image alias list [remote:]\n" - " List the aliases.\n" + "lxc image alias list [remote:] [filter]\n" + " List the aliases. Filters may be part of the image hash or part of the image alias name.\n" msgstr "" -#: lxc/info.go:141 +#: lxc/info.go:161 msgid "Memory (current)" msgstr "" -#: lxc/info.go:145 +#: lxc/info.go:165 msgid "Memory (peak)" msgstr "" -#: lxc/help.go:86 +#: lxc/help.go:87 msgid "Missing summary." msgstr "" @@ -679,11 +808,15 @@ "lxc monitor --type=logging" msgstr "" -#: lxc/file.go:174 +#: lxc/network.go:216 lxc/network.go:265 +msgid "More than one device matches, specify the device name." +msgstr "" + +#: lxc/file.go:227 msgid "More than one file to download, but target is not a directory" msgstr "" -#: lxc/move.go:17 +#: lxc/move.go:16 msgid "Move containers within or in between lxd instances.\n" "\n" "lxc move [remote:] [remote:]\n" @@ -693,48 +826,66 @@ " Rename a local container.\n" msgstr "" -#: lxc/action.go:63 +#: lxc/action.go:69 msgid "Must supply container name for: " msgstr "" -#: lxc/list.go:338 lxc/remote.go:357 +#: lxc/list.go:428 lxc/network.go:422 lxc/profile.go:447 lxc/remote.go:381 msgid "NAME" msgstr "" -#: lxc/remote.go:331 lxc/remote.go:336 +#: lxc/network.go:408 lxc/remote.go:355 lxc/remote.go:360 msgid "NO" msgstr "" -#: lxc/info.go:87 +#: lxc/info.go:89 #, c-format msgid "Name: %s" msgstr "" -#: lxc/image.go:157 lxc/publish.go:33 +#: lxc/network.go:190 +#, c-format +msgid "Network %s created" +msgstr "" + +#: lxc/network.go:293 +#, c-format +msgid "Network %s deleted" +msgstr "" + +#: lxc/init.go:141 lxc/init.go:142 lxc/launch.go:46 lxc/launch.go:47 +msgid "Network name" +msgstr "" + +#: lxc/image.go:168 lxc/publish.go:34 msgid "New alias to define at target" msgstr "" -#: lxc/config.go:281 +#: lxc/config.go:284 msgid "No certificate provided to add" msgstr "" -#: lxc/config.go:304 +#: lxc/network.go:225 lxc/network.go:274 +msgid "No device found for this network" +msgstr "" + +#: lxc/config.go:307 msgid "No fingerprint specified." msgstr "" -#: lxc/remote.go:107 +#: lxc/remote.go:121 msgid "Only https URLs are supported for simplestreams" msgstr "" -#: lxc/image.go:397 +#: lxc/image.go:434 msgid "Only https:// is supported for remote image import." msgstr "" -#: lxc/help.go:63 lxc/main.go:122 +#: lxc/help.go:63 lxc/main.go:112 msgid "Options:" msgstr "" -#: lxc/image.go:492 +#: lxc/image.go:538 #, c-format msgid "Output is in %s" msgstr "" @@ -743,47 +894,47 @@ msgid "Override the terminal mode (auto, interactive or non-interactive)" msgstr "" -#: lxc/list.go:422 +#: lxc/list.go:572 msgid "PERSISTENT" msgstr "" -#: lxc/list.go:339 +#: lxc/list.go:429 msgid "PID" msgstr "" -#: lxc/list.go:340 +#: lxc/list.go:430 msgid "PROFILES" msgstr "" -#: lxc/remote.go:359 +#: lxc/remote.go:383 msgid "PROTOCOL" msgstr "" -#: lxc/image.go:592 lxc/remote.go:360 +#: lxc/image.go:640 lxc/remote.go:384 msgid "PUBLIC" msgstr "" -#: lxc/info.go:167 +#: lxc/info.go:188 msgid "Packets received" msgstr "" -#: lxc/info.go:168 +#: lxc/info.go:189 msgid "Packets sent" msgstr "" -#: lxc/help.go:69 +#: lxc/help.go:70 msgid "Path to an alternate client configuration directory." msgstr "" -#: lxc/help.go:70 +#: lxc/help.go:71 msgid "Path to an alternate server directory." msgstr "" -#: lxc/main.go:39 -msgid "Permisson denied, are you in the lxd group?" +#: lxc/main.go:31 +msgid "Permission denied, are you in the lxd group?" msgstr "" -#: lxc/info.go:101 +#: lxc/info.go:106 #, c-format msgid "Pid: %d" msgstr "" @@ -794,11 +945,11 @@ "lxd help [--all]" msgstr "" -#: lxc/profile.go:191 +#: lxc/network.go:343 lxc/profile.go:219 msgid "Press enter to open the editor again" msgstr "" -#: lxc/config.go:501 lxc/config.go:566 lxc/image.go:670 +#: lxc/config.go:531 lxc/config.go:596 lxc/image.go:735 msgid "Press enter to start the editor again" msgstr "" @@ -814,64 +965,87 @@ msgid "Print verbose information." msgstr "" +#: lxc/manpage.go:18 +msgid "Prints all the subcommands help." +msgstr "" + #: lxc/version.go:18 -msgid "Prints the version number of LXD.\n" +msgid "Prints the version number of this client tool.\n" "\n" "lxc version" msgstr "" -#: lxc/info.go:123 +#: lxc/info.go:130 #, c-format msgid "Processes: %d" msgstr "" -#: lxc/profile.go:228 +#: lxc/profile.go:275 #, c-format -msgid "Profile %s applied to %s" +msgid "Profile %s added to %s" msgstr "" -#: lxc/profile.go:142 +#: lxc/profile.go:170 #, c-format msgid "Profile %s created" msgstr "" -#: lxc/profile.go:212 +#: lxc/profile.go:240 #, c-format msgid "Profile %s deleted" msgstr "" -#: lxc/init.go:136 lxc/init.go:137 lxc/launch.go:42 lxc/launch.go:43 +#: lxc/profile.go:306 +#, c-format +msgid "Profile %s removed from %s" +msgstr "" + +#: lxc/copy.go:33 lxc/copy.go:34 lxc/init.go:137 lxc/init.go:138 lxc/launch.go:42 lxc/launch.go:43 msgid "Profile to apply to the new container" msgstr "" -#: lxc/info.go:99 +#: lxc/profile.go:256 +#, c-format +msgid "Profiles %s applied to %s" +msgstr "" + +#: lxc/info.go:104 #, c-format msgid "Profiles: %s" msgstr "" -#: lxc/image.go:329 +#: lxc/image.go:361 msgid "Properties:" msgstr "" -#: lxc/remote.go:56 +#: lxc/remote.go:55 msgid "Public image server" msgstr "" -#: lxc/image.go:317 +#: lxc/image.go:349 #, c-format msgid "Public: %s" msgstr "" -#: lxc/publish.go:25 +#: lxc/publish.go:26 msgid "Publish containers as images.\n" "\n" "lxc publish [remote:]container [remote:] [--alias=ALIAS]... [prop-key=prop-value]..." msgstr "" -#: lxc/remote.go:54 +#: lxc/file.go:59 lxc/file.go:60 +msgid "Recursively push or pull files" +msgstr "" + +#: lxc/remote.go:53 msgid "Remote admin password" msgstr "" +#: lxc/info.go:91 +#, c-format +msgid "Remote: %s" +msgstr "" + #: lxc/delete.go:42 #, c-format msgid "Remove %s (yes/no): " @@ -881,40 +1055,40 @@ msgid "Require user confirmation." msgstr "" -#: lxc/info.go:120 +#: lxc/info.go:127 msgid "Resources:" msgstr "" -#: lxc/init.go:246 +#: lxc/init.go:267 #, c-format msgid "Retrieving image: %s" msgstr "" -#: lxc/image.go:595 +#: lxc/image.go:643 msgid "SIZE" msgstr "" -#: lxc/list.go:341 +#: lxc/list.go:431 msgid "SNAPSHOTS" msgstr "" -#: lxc/list.go:342 +#: lxc/list.go:432 msgid "STATE" msgstr "" -#: lxc/remote.go:361 +#: lxc/remote.go:385 msgid "STATIC" msgstr "" -#: lxc/remote.go:208 +#: lxc/remote.go:226 msgid "Server certificate NACKed by user" msgstr "" -#: lxc/remote.go:270 +#: lxc/remote.go:288 msgid "Server doesn't trust us after adding our cert" msgstr "" -#: lxc/remote.go:55 +#: lxc/remote.go:54 msgid "Server protocol (lxd or simplestreams)" msgstr "" @@ -931,15 +1105,15 @@ "lxc restore u1 snap0 # restore the snapshot" msgstr "" -#: lxc/file.go:44 +#: lxc/file.go:57 msgid "Set the file's gid on push" msgstr "" -#: lxc/file.go:45 +#: lxc/file.go:58 msgid "Set the file's perms on push" msgstr "" -#: lxc/file.go:43 +#: lxc/file.go:56 msgid "Set the file's uid on push" msgstr "" @@ -947,54 +1121,58 @@ msgid "Show all commands (not just interesting ones)" msgstr "" -#: lxc/info.go:34 +#: lxc/help.go:67 +msgid "Show client version." +msgstr "" + +#: lxc/info.go:36 msgid "Show the container's last 100 log lines?" msgstr "" -#: lxc/image.go:315 +#: lxc/image.go:347 #, c-format msgid "Size: %.2fMB" msgstr "" -#: lxc/info.go:186 +#: lxc/info.go:208 msgid "Snapshots:" msgstr "" -#: lxc/image.go:339 +#: lxc/image.go:371 msgid "Source:" msgstr "" -#: lxc/launch.go:122 +#: lxc/launch.go:142 #, c-format msgid "Starting %s" msgstr "" -#: lxc/info.go:93 +#: lxc/info.go:98 #, c-format msgid "Status: %s" msgstr "" -#: lxc/publish.go:34 lxc/publish.go:35 +#: lxc/publish.go:35 lxc/publish.go:36 msgid "Stop the container if currently running" msgstr "" -#: lxc/delete.go:106 lxc/publish.go:111 +#: lxc/delete.go:106 lxc/publish.go:113 msgid "Stopping container failed!" msgstr "" -#: lxc/action.go:38 +#: lxc/action.go:45 msgid "Store the container state (only for stop)." msgstr "" -#: lxc/info.go:149 +#: lxc/info.go:169 msgid "Swap (current)" msgstr "" -#: lxc/info.go:153 +#: lxc/info.go:173 msgid "Swap (peak)" msgstr "" -#: lxc/list.go:343 +#: lxc/list.go:433 lxc/network.go:423 msgid "TYPE" msgstr "" @@ -1002,62 +1180,99 @@ msgid "The container is currently running, stop it first or pass --force." msgstr "" -#: lxc/publish.go:89 +#: lxc/publish.go:91 msgid "The container is currently running. Use --force to have it stopped and restarted." msgstr "" -#: lxc/config.go:645 lxc/config.go:657 lxc/config.go:690 lxc/config.go:708 lxc/config.go:746 lxc/config.go:764 +#: lxc/init.go:313 +msgid "The container you are starting doesn’t have any network attached to it." +msgstr "" + +#: lxc/config.go:675 lxc/config.go:687 lxc/config.go:720 lxc/config.go:738 lxc/config.go:776 lxc/config.go:794 msgid "The device doesn't exist" msgstr "" -#: lxc/publish.go:62 +#: lxc/init.go:297 +#, c-format +msgid "The local image '%s' couldn't be found, trying '%s:' instead." +msgstr "" + +#: lxc/main.go:181 +msgid "The opposite of `lxc pause` is `lxc start`." +msgstr "" + +#: lxc/network.go:230 lxc/network.go:279 +msgid "The specified device doesn't exist" +msgstr "" + +#: lxc/network.go:234 lxc/network.go:283 +msgid "The specified device doesn't match the network" +msgstr "" + +#: lxc/publish.go:64 msgid "There is no \"image name\". Did you want an alias?" msgstr "" -#: lxc/action.go:36 +#: lxc/action.go:41 msgid "Time to wait for the container before killing it." msgstr "" -#: lxc/image.go:318 +#: lxc/image.go:350 msgid "Timestamps:" msgstr "" -#: lxc/image.go:388 +#: lxc/init.go:315 +msgid "To assign a network to a container, use: lxc network assign" +msgstr "" + +#: lxc/init.go:314 +msgid "To create a new network, use: lxc network create" +msgstr "" + +#: lxc/main.go:137 +msgid "To start your first container, try: lxc launch ubuntu:16.04" +msgstr "" + +#: lxc/image.go:421 #, c-format msgid "Transferring image: %d%%" msgstr "" -#: lxc/action.go:93 lxc/launch.go:130 +#: lxc/action.go:99 lxc/launch.go:155 #, c-format msgid "Try `lxc info --show-log %s` for more info" msgstr "" -#: lxc/info.go:95 +#: lxc/info.go:100 msgid "Type: ephemeral" msgstr "" -#: lxc/info.go:97 +#: lxc/info.go:102 msgid "Type: persistent" msgstr "" -#: lxc/image.go:596 +#: lxc/image.go:644 msgid "UPLOAD DATE" msgstr "" -#: lxc/remote.go:358 +#: lxc/remote.go:382 msgid "URL" msgstr "" -#: lxc/remote.go:82 +#: lxc/network.go:425 lxc/profile.go:448 +msgid "USED BY" +msgstr "" + +#: lxc/remote.go:96 msgid "Unable to read remote TLS certificate" msgstr "" -#: lxc/image.go:323 +#: lxc/image.go:355 #, c-format msgid "Uploaded: %s" msgstr "" -#: lxc/main.go:122 +#: lxc/main.go:112 #, c-format msgid "Usage: %s" msgstr "" @@ -1078,127 +1293,139 @@ msgid "Whether or not to snapshot the container's running state" msgstr "" -#: lxc/config.go:33 +#: lxc/config.go:32 msgid "Whether to show the expanded configuration" msgstr "" -#: lxc/remote.go:333 lxc/remote.go:338 +#: lxc/network.go:410 lxc/remote.go:357 lxc/remote.go:362 msgid "YES" msgstr "" -#: lxc/main.go:66 +#: lxc/main.go:52 msgid "`lxc config profile` is deprecated, please use `lxc profile`" msgstr "" -#: lxc/launch.go:109 +#: lxc/launch.go:127 msgid "bad number of things scanned from image, container or snapshot" msgstr "" -#: lxc/action.go:89 +#: lxc/action.go:95 msgid "bad result type from action" msgstr "" -#: lxc/copy.go:78 +#: lxc/copy.go:115 msgid "can't copy to the same container name" msgstr "" -#: lxc/remote.go:321 +#: lxc/file.go:262 +msgid "can't pull a directory without --recursive" +msgstr "" + +#: lxc/remote.go:345 msgid "can't remove the default remote" msgstr "" -#: lxc/remote.go:347 +#: lxc/file.go:107 +msgid "can't supply uid/gid/mode in recursive mode" +msgstr "" + +#: lxc/remote.go:371 msgid "default" msgstr "" -#: lxc/init.go:199 lxc/init.go:204 lxc/launch.go:93 lxc/launch.go:98 +#: lxc/copy.go:131 lxc/copy.go:136 lxc/copy.go:228 lxc/copy.go:233 lxc/init.go:217 lxc/init.go:222 lxc/launch.go:111 lxc/launch.go:116 msgid "didn't get any affected image, container or snapshot from server" msgstr "" -#: lxc/image.go:309 +#: lxc/image.go:341 msgid "disabled" msgstr "" -#: lxc/image.go:311 +#: lxc/image.go:343 msgid "enabled" msgstr "" -#: lxc/main.go:25 lxc/main.go:154 +#: lxc/main.go:22 lxc/main.go:148 #, c-format msgid "error: %v" msgstr "" -#: lxc/help.go:40 lxc/main.go:117 +#: lxc/help.go:40 lxc/main.go:107 #, c-format msgid "error: unknown command: %s" msgstr "" -#: lxc/launch.go:113 +#: lxc/launch.go:131 msgid "got bad version" msgstr "" -#: lxc/image.go:304 lxc/image.go:572 +#: lxc/image.go:336 lxc/image.go:620 msgid "no" msgstr "" -#: lxc/copy.go:101 +#: lxc/copy.go:164 msgid "not all the profiles from the source exist on the target" msgstr "" -#: lxc/remote.go:201 +#: lxc/remote.go:219 msgid "ok (y/n)?" msgstr "" -#: lxc/main.go:261 lxc/main.go:265 +#: lxc/main.go:304 lxc/main.go:308 #, c-format msgid "processing aliases failed %s\n" msgstr "" -#: lxc/remote.go:383 +#: lxc/file.go:303 +msgid "recursive edit doesn't make sense :(" +msgstr "" + +#: lxc/remote.go:407 #, c-format msgid "remote %s already exists" msgstr "" -#: lxc/remote.go:313 lxc/remote.go:375 lxc/remote.go:410 lxc/remote.go:426 +#: lxc/remote.go:337 lxc/remote.go:399 lxc/remote.go:434 lxc/remote.go:450 #, c-format msgid "remote %s doesn't exist" msgstr "" -#: lxc/remote.go:296 +#: lxc/remote.go:320 #, c-format msgid "remote %s exists as <%s>" msgstr "" -#: lxc/remote.go:317 lxc/remote.go:379 lxc/remote.go:414 +#: lxc/remote.go:341 lxc/remote.go:403 lxc/remote.go:438 #, c-format msgid "remote %s is static and cannot be modified" msgstr "" -#: lxc/info.go:195 +#: lxc/info.go:219 msgid "stateful" msgstr "" -#: lxc/info.go:197 +#: lxc/info.go:221 msgid "stateless" msgstr "" -#: lxc/info.go:191 +#: lxc/info.go:215 #, c-format msgid "taken at %s" msgstr "" -#: lxc/exec.go:167 +#: lxc/exec.go:163 msgid "unreachable return reached" msgstr "" -#: lxc/main.go:194 +#: lxc/main.go:236 msgid "wrong number of subcommand arguments" msgstr "" -#: lxc/delete.go:45 lxc/image.go:306 lxc/image.go:576 +#: lxc/delete.go:45 lxc/image.go:338 lxc/image.go:624 msgid "yes" msgstr "" -#: lxc/copy.go:38 +#: lxc/copy.go:44 msgid "you must specify a source container name" msgstr "" diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/README.md juju-core-2.0.0/src/github.com/lxc/lxd/README.md --- juju-core-2.0~beta15/src/github.com/lxc/lxd/README.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/README.md 2016-10-13 14:31:53.000000000 +0000 @@ -9,6 +9,7 @@ ## CI status * Travis: [![Build Status](https://travis-ci.org/lxc/lxd.svg?branch=master)](https://travis-ci.org/lxc/lxd) + * AppVeyor: [![Build Status](https://ci.appveyor.com/api/projects/status/rb4141dsi2xm3n0x/branch/master?svg=true)](https://ci.appveyor.com/project/lxc/lxd) * Jenkins: [![Build Status](https://jenkins.linuxcontainers.org/job/lxd-github-commit/badge/icon)](https://jenkins.linuxcontainers.org/job/lxd-github-commit/) ## Getting started with LXD @@ -28,6 +29,51 @@ After you've got LXD installed and a session with the right permissions, you can take your [first steps](#first-steps). +## Using the REST API +The LXD REST API can be used locally via unauthenticated Unix socket or remotely via SSL encapsulated TCP. + +#### via Unix socket +```bash +curl --unix-socket /var/lib/lxd/unix.socket \ + -H "Content-Type: application/json" \ + -X POST \ + -d @hello-ubuntu.json \ + lxd/1.0/containers +``` + +#### via TCP +TCP requires some additional configuration and is not enabled by default. +```bash +lxc config set core.https_address "[::]:8443" +``` +```bash +curl -k -L \ + --cert ~/.config/lxc/client.crt \ + --key ~/.config/lxc/client.key \ + -H "Content-Type: application/json" \ + -X POST \ + -d @hello-ubuntu.json \ + "https://127.0.0.1:8443/1.0/containers" +``` +#### JSON payload +The `hello-ubuntu.json` file referenced above could contain something like: +```json +{ + "name":"some-ubuntu", + "ephemeral":true, + "config":{ + "limits.cpu":"2" + }, + "source": { + "type":"image", + "mode":"pull", + "protocol":"simplestreams", + "server":"https://cloud-images.ubuntu.com/releases", + "alias":"14.04" + } +} +``` + ## Building from source We recommend having the latest versions of liblxc (>= 1.1 required) and CRIU @@ -38,7 +84,7 @@ sudo apt-get install software-properties-common sudo add-apt-repository ppa:ubuntu-lxc/lxd-git-master sudo apt-get update - sudo apt-get install golang lxc lxc-dev mercurial git pkg-config protobuf-compiler golang-goprotobuf-dev xz-utils tar acl make + sudo apt-get install acl dnsmasq-base git golang liblxc1 lxc-dev make pkg-config rsync squashfs-tools tar xz-utils There are a few storage backends for LXD besides the default "directory" backend. Installing these tools adds a bit to initramfs and may slow down your @@ -266,8 +312,16 @@ Yes. The easiest way to do that is using a privileged container: - lxc launch ubuntu priv -c security.privileged=true - lxc config device add priv homedir disk source=/home/$USER path=/home/ubuntu +1.a) create a container. + + lxc launch ubuntu privilegedContainerName -c security.privileged=true + +1.b) or, if your container already exists. + + lxc config set privilegedContainerName security.privileged true +2) then. + + lxc config device add privilegedContainerName shareName disk source=/home/$USER path=/home/ubuntu #### How can I run docker inside a LXD container? diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/scripts/lxc-to-lxd juju-core-2.0.0/src/github.com/lxc/lxd/scripts/lxc-to-lxd --- juju-core-2.0~beta15/src/github.com/lxc/lxd/scripts/lxc-to-lxd 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/scripts/lxc-to-lxd 2016-10-13 14:31:53.000000000 +0000 @@ -1,12 +1,12 @@ -#!/usr/bin/python3 +#!/usr/bin/env python3 import argparse import json import lxc import os import subprocess -import time -from pylxd import api as api +from pylxd.client import Client +from pylxd import exceptions # Fetch a config key as a list @@ -76,7 +76,7 @@ # Connect to LXD if args.lxdpath: os.environ['LXD_DIR'] = args.lxdpath - lxd = api.API() + lxd = Client() print("==> Processing container: %s" % container_name) @@ -107,9 +107,12 @@ # Make sure we don't have a conflict print("Checking for existing containers") - if lxd.container_defined(container_name): + try: + lxd.containers.get(container_name) print("Container already exists, skipping...") return False + except (NameError, exceptions.LXDAPIException): + pass # Validate lxc.utsname print("Validating container name") @@ -314,14 +317,14 @@ # Set the container architecture if set in LXC print("Converting container architecture configuration") - arches = {'i686': 1, - 'x86_64': 2, - 'armhf': 3, - 'arm64': 4, - 'powerpc': 5, - 'powerpc64': 6, - 'ppc64el': 7, - 's390x': 8} + arches = {'i686': "i686", + 'x86_64': "x86_64", + 'armhf': "armv7l", + 'arm64': "aarch64", + 'powerpc': "ppc", + 'powerpc64': "ppc64", + 'ppc64el': "ppc64le", + 's390x': "s390x"} arch = None try: @@ -344,11 +347,11 @@ try: print("Creating the container") - lxd.container_init(new) + lxd.containers.create(new, wait=True) except Exception as e: + raise print("Failed to create the container: %s" % e) return False - time.sleep(1) # Transfer the filesystem lxd_rootfs = os.path.join("/var/lib/lxd/", "containers", diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/scripts/vagrant/install-lxd.sh juju-core-2.0.0/src/github.com/lxc/lxd/scripts/vagrant/install-lxd.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/scripts/vagrant/install-lxd.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/scripts/vagrant/install-lxd.sh 2016-10-13 14:31:53.000000000 +0000 @@ -9,7 +9,7 @@ # install build dependencies sudo apt-get -y install lxc lxc-dev mercurial git pkg-config \ - protobuf-compiler golang-goprotobuf-dev + protobuf-compiler golang-goprotobuf-dev squashfs-tools # setup env [ -e uid_gid_setup ] || \ diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/cert.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/cert.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/cert.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/cert.go 2016-10-13 14:31:53.000000000 +0000 @@ -64,14 +64,14 @@ return ret, nil } -func FindOrGenCert(certf string, keyf string) error { +func FindOrGenCert(certf string, keyf string, certtype bool) error { if PathExists(certf) && PathExists(keyf) { return nil } /* If neither stat succeeded, then this is our first run and we * need to generate cert and privkey */ - err := GenCert(certf, keyf) + err := GenCert(certf, keyf, certtype) if err != nil { return err } @@ -80,7 +80,7 @@ } // GenCert will create and populate a certificate file and a key file -func GenCert(certf string, keyf string) error { +func GenCert(certf string, keyf string, certtype bool) error { /* Create the basenames if needed */ dir := path.Dir(certf) err := os.MkdirAll(dir, 0750) @@ -93,7 +93,7 @@ return err } - certBytes, keyBytes, err := GenerateMemCert() + certBytes, keyBytes, err := GenerateMemCert(certtype) if err != nil { return err } @@ -116,9 +116,9 @@ return nil } -// GenerateMemCert creates a certificate and key pair, returning them as byte -// arrays in memory. -func GenerateMemCert() ([]byte, []byte, error) { +// GenerateMemCert creates client or server certificate and key pair, +// returning them as byte arrays in memory. +func GenerateMemCert(client bool) ([]byte, []byte, error) { privk, err := rsa.GenerateKey(rand.Reader, 4096) if err != nil { log.Fatalf("failed to generate key") @@ -167,13 +167,20 @@ NotAfter: validTo, KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, } + if client { + template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} + } else { + template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} + } + for _, h := range hosts { - if ip := net.ParseIP(h); ip != nil { - template.IPAddresses = append(template.IPAddresses, ip) + if ip, _, err := net.ParseCIDR(h); err == nil { + if !ip.IsLinkLocalUnicast() && !ip.IsLinkLocalMulticast() { + template.IPAddresses = append(template.IPAddresses, ip) + } } else { template.DNSNames = append(template.DNSNames, h) } @@ -197,5 +204,9 @@ } certBlock, _ := pem.Decode(cf) + if certBlock == nil { + return nil, fmt.Errorf("Invalid certificate file") + } + return x509.ParseCertificate(certBlock.Bytes) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/cert_test.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/cert_test.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/cert_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/cert_test.go 2016-10-13 14:31:53.000000000 +0000 @@ -9,7 +9,7 @@ if testing.Short() { t.Skip("skipping cert generation in short mode") } - cert, key, err := GenerateMemCert() + cert, key, err := GenerateMemCert(false) if err != nil { t.Error(err) return diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/container.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/container.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/container.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/container.go 2016-10-13 14:31:53.000000000 +0000 @@ -1,12 +1,16 @@ package shared import ( + "fmt" + "strconv" + "strings" "time" ) type ContainerState struct { Status string `json:"status"` StatusCode StatusCode `json:"status_code"` + CPU ContainerStateCPU `json:"cpu"` Disk map[string]ContainerStateDisk `json:"disk"` Memory ContainerStateMemory `json:"memory"` Network map[string]ContainerStateNetwork `json:"network"` @@ -18,6 +22,10 @@ Usage int64 `json:"usage"` } +type ContainerStateCPU struct { + Usage int64 `json:"usage"` +} + type ContainerStateMemory struct { Usage int64 `json:"usage"` UsagePeak int64 `json:"usage_peak"` @@ -62,6 +70,7 @@ Ephemeral bool `json:"ephemeral"` ExpandedConfig map[string]string `json:"expanded_config"` ExpandedDevices Devices `json:"expanded_devices"` + LastUsedDate time.Time `json:"last_used_at"` Name string `json:"name"` Profiles []string `json:"profiles"` Stateful bool `json:"stateful"` @@ -75,6 +84,7 @@ Ephemeral bool `json:"ephemeral"` ExpandedConfig map[string]string `json:"expanded_config"` ExpandedDevices Devices `json:"expanded_devices"` + LastUsedDate time.Time `json:"last_used_at"` Name string `json:"name"` Profiles []string `json:"profiles"` Stateful bool `json:"stateful"` @@ -138,4 +148,202 @@ Config map[string]string `json:"config"` Description string `json:"description"` Devices Devices `json:"devices"` + UsedBy []string `json:"used_by"` +} + +type NetworkConfig struct { + Name string `json:"name"` + Config map[string]string `json:"config"` + Managed bool `json:"managed"` + Type string `json:"type"` + UsedBy []string `json:"used_by"` +} + +func IsInt64(value string) error { + if value == "" { + return nil + } + + _, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("Invalid value for an integer: %s", value) + } + + return nil +} + +func IsPriority(value string) error { + if value == "" { + return nil + } + + valueInt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("Invalid value for an integer: %s", value) + } + + if valueInt < 0 || valueInt > 10 { + return fmt.Errorf("Invalid value for a limit '%s'. Must be between 0 and 10.", value) + } + + return nil +} + +func IsBool(value string) error { + if value == "" { + return nil + } + + if !StringInSlice(strings.ToLower(value), []string{"true", "false", "yes", "no", "1", "0", "on", "off"}) { + return fmt.Errorf("Invalid value for a boolean: %s", value) + } + + return nil +} + +func IsOneOf(value string, valid []string) error { + if value == "" { + return nil + } + + if !StringInSlice(value, valid) { + return fmt.Errorf("Invalid value: %s (not one of %s)", value, valid) + } + + return nil +} + +func IsAny(value string) error { + return nil +} + +// KnownContainerConfigKeys maps all fully defined, well-known config keys +// to an appropriate checker function, which validates whether or not a +// given value is syntactically legal. +var KnownContainerConfigKeys = map[string]func(value string) error{ + "boot.autostart": IsBool, + "boot.autostart.delay": IsInt64, + "boot.autostart.priority": IsInt64, + "boot.host_shutdown_timeout": IsInt64, + + "limits.cpu": IsAny, + "limits.cpu.allowance": func(value string) error { + if value == "" { + return nil + } + + if strings.HasSuffix(value, "%") { + // Percentage based allocation + _, err := strconv.Atoi(strings.TrimSuffix(value, "%")) + if err != nil { + return err + } + + return nil + } + + // Time based allocation + fields := strings.SplitN(value, "/", 2) + if len(fields) != 2 { + return fmt.Errorf("Invalid allowance: %s", value) + } + + _, err := strconv.Atoi(strings.TrimSuffix(fields[0], "ms")) + if err != nil { + return err + } + + _, err = strconv.Atoi(strings.TrimSuffix(fields[1], "ms")) + if err != nil { + return err + } + + return nil + }, + "limits.cpu.priority": IsPriority, + + "limits.disk.priority": IsPriority, + + "limits.memory": func(value string) error { + if value == "" { + return nil + } + + if strings.HasSuffix(value, "%") { + _, err := strconv.ParseInt(strings.TrimSuffix(value, "%"), 10, 64) + if err != nil { + return err + } + + return nil + } + + _, err := ParseByteSizeString(value) + if err != nil { + return err + } + + return nil + }, + "limits.memory.enforce": func(value string) error { + return IsOneOf(value, []string{"soft", "hard"}) + }, + "limits.memory.swap": IsBool, + "limits.memory.swap.priority": IsPriority, + + "limits.network.priority": IsPriority, + + "limits.processes": IsInt64, + + "linux.kernel_modules": IsAny, + + "security.nesting": IsBool, + "security.privileged": IsBool, + + "security.syscalls.blacklist_default": IsBool, + "security.syscalls.blacklist_compat": IsBool, + "security.syscalls.blacklist": IsAny, + "security.syscalls.whitelist": IsAny, + + // Caller is responsible for full validation of any raw.* value + "raw.apparmor": IsAny, + "raw.lxc": IsAny, + "raw.seccomp": IsAny, + + "volatile.apply_template": IsAny, + "volatile.base_image": IsAny, + "volatile.last_state.idmap": IsAny, + "volatile.last_state.power": IsAny, +} + +// ConfigKeyChecker returns a function that will check whether or not +// a provide value is valid for the associate config key. Returns an +// error if the key is not known. The checker function only performs +// syntactic checking of the value, semantic and usage checking must +// be done by the caller. User defined keys are always considered to +// be valid, e.g. user.* and environment.* keys. +func ConfigKeyChecker(key string) (func(value string) error, error) { + if f, ok := KnownContainerConfigKeys[key]; ok { + return f, nil + } + + if strings.HasPrefix(key, "volatile.") { + if strings.HasSuffix(key, ".hwaddr") { + return IsAny, nil + } + + if strings.HasSuffix(key, ".name") { + return IsAny, nil + } + } + + if strings.HasPrefix(key, "environment.") { + return IsAny, nil + } + + if strings.HasPrefix(key, "user.") { + return IsAny, nil + } + + return nil, fmt.Errorf("Bad key: %s", key) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/devices.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/devices.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/devices.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/devices.go 2016-10-13 14:31:53.000000000 +0000 @@ -1,5 +1,9 @@ package shared +import ( + "sort" +) + type Device map[string]string type Devices map[string]Device @@ -74,7 +78,7 @@ continue } - for _, k := range []string{"limits.max", "limits.read", "limits.write", "limits.egress", "limits.ingress"} { + for _, k := range []string{"limits.max", "limits.read", "limits.write", "limits.egress", "limits.ingress", "ipv4.address", "ipv6.address"} { delete(oldDevice, k) delete(newDevice, k) } @@ -102,3 +106,59 @@ return nil } + +type namedDevice struct { + name string + device Device +} +type sortableDevices []namedDevice + +func (devices Devices) toSortable() sortableDevices { + named := []namedDevice{} + for k, d := range devices { + named = append(named, namedDevice{k, d}) + } + + return named +} + +func (devices sortableDevices) Len() int { + return len(devices) +} + +func (devices sortableDevices) Less(i, j int) bool { + a := devices[i] + b := devices[j] + + if a.device["type"] == "disk" && b.device["type"] == "disk" { + if a.device["path"] == b.device["path"] { + return a.name < b.name + } + + return a.device["path"] < b.device["path"] + } + + return a.name < b.name +} + +func (devices sortableDevices) Swap(i, j int) { + tmp := devices[i] + devices[i] = devices[j] + devices[j] = tmp +} + +func (devices sortableDevices) Names() []string { + result := []string{} + for _, d := range devices { + result = append(result, d.name) + } + + return result +} + +/* DeviceNames returns the device names for this Devices in sorted order */ +func (devices Devices) DeviceNames() []string { + sortable := devices.toSortable() + sort.Sort(sortable) + return sortable.Names() +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/devices_test.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/devices_test.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/devices_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/devices_test.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,22 @@ +package shared + +import ( + "reflect" + "testing" +) + +func TestSortableDevices(t *testing.T) { + devices := Devices{ + "1": Device{"type": "nic"}, + "3": Device{"type": "disk", "path": "/foo/bar"}, + "4": Device{"type": "disk", "path": "/foo"}, + "2": Device{"type": "nic"}, + } + + expected := []string{"1", "2", "4", "3"} + + result := devices.DeviceNames() + if !reflect.DeepEqual(result, expected) { + t.Error("devices sorted incorrectly") + } +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/flex.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/flex.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/flex.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/flex.go 2016-10-13 14:31:53.000000000 +0000 @@ -3,7 +3,7 @@ */ package shared -var Version = "2.0.0.rc6" +var Version = "2.4.1" var UserAgent = "LXD " + Version /* diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/idmapset_linux.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/idmapset_linux.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/idmapset_linux.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/idmapset_linux.go 2016-10-13 14:31:53.000000000 +0000 @@ -223,11 +223,13 @@ } func (set *IdmapSet) doUidshiftIntoContainer(dir string, testmode bool, how string) error { - // Expand any symlink in dir and cleanup resulting path - dir, err := filepath.EvalSymlinks(dir) + // Expand any symlink before the final path component + tmp := filepath.Dir(dir) + tmp, err := filepath.EvalSymlinks(tmp) if err != nil { return err } + dir = filepath.Join(tmp, filepath.Base(dir)) dir = strings.TrimRight(dir, "/") convert := func(path string, fi os.FileInfo, err error) (e error) { diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/json.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/json.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/json.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/json.go 2016-10-13 14:31:53.000000000 +0000 @@ -51,11 +51,11 @@ func DebugJson(r *bytes.Buffer) { pretty := &bytes.Buffer{} if err := json.Indent(pretty, r.Bytes(), "\t", "\t"); err != nil { - Debugf("error indenting json: %s", err) + LogDebugf("error indenting json: %s", err) return } // Print the JSON without the last "\n" str := pretty.String() - Debugf("\n\t%s", str[0:len(str)-1]) + LogDebugf("\n\t%s", str[0:len(str)-1]) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/log.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/log.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/log.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/log.go 2016-10-13 14:31:53.000000000 +0000 @@ -27,25 +27,71 @@ Log = nullLogger{} } -// Logf sends to the logger registered via SetLogger the string resulting -// from running format and args through Sprintf. -func Logf(format string, args ...interface{}) { +// General wrappers around Logger interface functions. +func LogDebug(msg string, ctx interface{}) { + if Log != nil { + Log.Debug(msg, ctx) + } +} + +func LogInfo(msg string, ctx interface{}) { + if Log != nil { + Log.Info(msg, ctx) + } +} + +func LogWarn(msg string, ctx interface{}) { + if Log != nil { + Log.Warn(msg, ctx) + } +} + +func LogError(msg string, ctx interface{}) { + if Log != nil { + Log.Error(msg, ctx) + } +} + +func LogCrit(msg string, ctx interface{}) { + if Log != nil { + Log.Crit(msg, ctx) + } +} + +// Wrappers around Logger interface functions that send a string to the Logger +// by running it through fmt.Sprintf(). +func LogInfof(format string, args ...interface{}) { if Log != nil { Log.Info(fmt.Sprintf(format, args...)) } } -// Debugf sends to the logger registered via SetLogger the string resulting -// from running format and args through Sprintf, but only if debugging was -// enabled via SetDebug. -func Debugf(format string, args ...interface{}) { +func LogDebugf(format string, args ...interface{}) { if Log != nil { Log.Debug(fmt.Sprintf(format, args...)) } } +func LogWarnf(format string, args ...interface{}) { + if Log != nil { + Log.Warn(fmt.Sprintf(format, args...)) + } +} + +func LogErrorf(format string, args ...interface{}) { + if Log != nil { + Log.Error(fmt.Sprintf(format, args...)) + } +} + +func LogCritf(format string, args ...interface{}) { + if Log != nil { + Log.Crit(fmt.Sprintf(format, args...)) + } +} + func PrintStack() { buf := make([]byte, 1<<16) runtime.Stack(buf, true) - Debugf("%s", buf) + LogDebugf("%s", buf) } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/network.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/network.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/network.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/network.go 2016-10-13 14:31:53.000000000 +0000 @@ -39,7 +39,9 @@ MaxVersion: tls.VersionTLS12, CipherSuites: []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA}, PreferServerCipherSuites: true, } } @@ -47,7 +49,10 @@ func finalizeTLSConfig(tlsConfig *tls.Config, tlsRemoteCert *x509.Certificate) { // Trusted certificates if tlsRemoteCert != nil { - caCertPool := x509.NewCertPool() + caCertPool := tlsConfig.RootCAs + if caCertPool == nil { + caCertPool = x509.NewCertPool() + } // Make it a valid RootCA tlsRemoteCert.IsCA = true @@ -66,7 +71,7 @@ tlsConfig.BuildNameToCertificate() } -func GetTLSConfig(tlsClientCertFile string, tlsClientKeyFile string, tlsRemoteCert *x509.Certificate) (*tls.Config, error) { +func GetTLSConfig(tlsClientCertFile string, tlsClientKeyFile string, tlsClientCAFile string, tlsRemoteCert *x509.Certificate) (*tls.Config, error) { tlsConfig := initTLSConfig() // Client authentication @@ -79,11 +84,23 @@ tlsConfig.Certificates = []tls.Certificate{cert} } + if tlsClientCAFile != "" { + caCertificates, err := ioutil.ReadFile(tlsClientCAFile) + if err != nil { + return nil, err + } + + caPool := x509.NewCertPool() + caPool.AppendCertsFromPEM(caCertificates) + + tlsConfig.RootCAs = caPool + } + finalizeTLSConfig(tlsConfig, tlsRemoteCert) return tlsConfig, nil } -func GetTLSConfigMem(tlsClientCert string, tlsClientKey string, tlsRemoteCertPEM string) (*tls.Config, error) { +func GetTLSConfigMem(tlsClientCert string, tlsClientKey string, tlsClientCA string, tlsRemoteCertPEM string) (*tls.Config, error) { tlsConfig := initTLSConfig() // Client authentication @@ -100,12 +117,24 @@ if tlsRemoteCertPEM != "" { // Ignore any content outside of the PEM bytes we care about certBlock, _ := pem.Decode([]byte(tlsRemoteCertPEM)) + if certBlock == nil { + return nil, fmt.Errorf("Invalid remote certificate") + } + var err error tlsRemoteCert, err = x509.ParseCertificate(certBlock.Bytes) if err != nil { return nil, err } } + + if tlsClientCA != "" { + caPool := x509.NewCertPool() + caPool.AppendCertsFromPEM([]byte(tlsClientCA)) + + tlsConfig.RootCAs = caPool + } + finalizeTLSConfig(tlsConfig, tlsRemoteCert) return tlsConfig, nil @@ -115,7 +144,7 @@ return int(iface.Flags&net.FlagLoopback) > 0 } -func WebsocketSendStream(conn *websocket.Conn, r io.Reader) chan bool { +func WebsocketSendStream(conn *websocket.Conn, r io.Reader, bufferSize int) chan bool { ch := make(chan bool) if r == nil { @@ -124,7 +153,7 @@ } go func(conn *websocket.Conn, r io.Reader) { - in := ReaderToChannel(r) + in := ReaderToChannel(r, bufferSize) for { buf, ok := <-in if !ok { @@ -133,14 +162,14 @@ w, err := conn.NextWriter(websocket.BinaryMessage) if err != nil { - Debugf("Got error getting next writer %s", err) + LogDebugf("Got error getting next writer %s", err) break } _, err = w.Write(buf) w.Close() if err != nil { - Debugf("Got err writing %s", err) + LogDebugf("Got err writing %s", err) break } } @@ -151,30 +180,30 @@ return ch } -func WebsocketRecvStream(w io.WriteCloser, conn *websocket.Conn) chan bool { +func WebsocketRecvStream(w io.Writer, conn *websocket.Conn) chan bool { ch := make(chan bool) - go func(w io.WriteCloser, conn *websocket.Conn) { + go func(w io.Writer, conn *websocket.Conn) { for { mt, r, err := conn.NextReader() if mt == websocket.CloseMessage { - Debugf("Got close message for reader") + LogDebugf("Got close message for reader") break } if mt == websocket.TextMessage { - Debugf("got message barrier") + LogDebugf("got message barrier") break } if err != nil { - Debugf("Got error getting next reader %s, %s", err, w) + LogDebugf("Got error getting next reader %s, %s", err, w) break } buf, err := ioutil.ReadAll(r) if err != nil { - Debugf("Got error writing to writer %s", err) + LogDebugf("Got error writing to writer %s", err) break } @@ -184,11 +213,11 @@ i, err := w.Write(buf) if i != len(buf) { - Debugf("Didn't write all of buf") + LogDebugf("Didn't write all of buf") break } if err != nil { - Debugf("Error writing buf %s", err) + LogDebugf("Error writing buf %s", err) break } } @@ -210,32 +239,32 @@ for { mt, r, err := conn.NextReader() if err != nil { - Debugf("Got error getting next reader %s, %s", err, w) + LogDebugf("Got error getting next reader %s, %s", err, w) break } if mt == websocket.CloseMessage { - Debugf("Got close message for reader") + LogDebugf("Got close message for reader") break } if mt == websocket.TextMessage { - Debugf("Got message barrier, resetting stream") + LogDebugf("Got message barrier, resetting stream") break } buf, err := ioutil.ReadAll(r) if err != nil { - Debugf("Got error writing to writer %s", err) + LogDebugf("Got error writing to writer %s", err) break } i, err := w.Write(buf) if i != len(buf) { - Debugf("Didn't write all of buf") + LogDebugf("Didn't write all of buf") break } if err != nil { - Debugf("Error writing buf %s", err) + LogDebugf("Error writing buf %s", err) break } } @@ -244,26 +273,30 @@ }(conn, w) go func(conn *websocket.Conn, r io.ReadCloser) { - in := ReaderToChannel(r) + /* For now, we don't need to adjust buffer sizes in + * WebsocketMirror, since it's used for interactive things like + * exec. + */ + in := ReaderToChannel(r, -1) for { buf, ok := <-in if !ok { - readDone <- true r.Close() - Debugf("sending write barrier") + LogDebugf("sending write barrier") conn.WriteMessage(websocket.TextMessage, []byte{}) + readDone <- true return } w, err := conn.NextWriter(websocket.BinaryMessage) if err != nil { - Debugf("Got error getting next writer %s", err) + LogDebugf("Got error getting next writer %s", err) break } _, err = w.Write(buf) w.Close() if err != nil { - Debugf("Got err writing %s", err) + LogDebugf("Got err writing %s", err) break } } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/operation.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/operation.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/operation.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/operation.go 2016-10-13 14:31:53.000000000 +0000 @@ -8,9 +8,7 @@ ) var WebsocketUpgrader = websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, - CheckOrigin: func(r *http.Request) bool { return true }, + CheckOrigin: func(r *http.Request) bool { return true }, } type Operation struct { diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/server.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/server.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/server.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/server.go 2016-10-13 14:31:53.000000000 +0000 @@ -1,27 +1,30 @@ package shared type ServerStateEnvironment struct { - Addresses []string `json:"addresses"` - Architectures []string `json:"architectures"` - Certificate string `json:"certificate"` - Driver string `json:"driver"` - DriverVersion string `json:"driver_version"` - Kernel string `json:"kernel"` - KernelArchitecture string `json:"kernel_architecture"` - KernelVersion string `json:"kernel_version"` - Server string `json:"server"` - ServerPid int `json:"server_pid"` - ServerVersion string `json:"server_version"` - Storage string `json:"storage"` - StorageVersion string `json:"storage_version"` + Addresses []string `json:"addresses"` + Architectures []string `json:"architectures"` + Certificate string `json:"certificate"` + CertificateFingerprint string `json:"certificate_fingerprint"` + Driver string `json:"driver"` + DriverVersion string `json:"driver_version"` + Kernel string `json:"kernel"` + KernelArchitecture string `json:"kernel_architecture"` + KernelVersion string `json:"kernel_version"` + Server string `json:"server"` + ServerPid int `json:"server_pid"` + ServerVersion string `json:"server_version"` + Storage string `json:"storage"` + StorageVersion string `json:"storage_version"` } type ServerState struct { - APICompat int `json:"api_compat"` - Auth string `json:"auth"` - Environment ServerStateEnvironment `json:"environment"` - Config map[string]interface{} `json:"config"` - Public bool `json:"public"` + APIExtensions []string `json:"api_extensions"` + APIStatus string `json:"api_status"` + APIVersion string `json:"api_version"` + Auth string `json:"auth"` + Environment ServerStateEnvironment `json:"environment"` + Config map[string]interface{} `json:"config"` + Public bool `json:"public"` } type BriefServerState struct { diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/simplestreams.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/simplestreams.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/simplestreams.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/simplestreams.go 2016-10-13 14:31:53.000000000 +0000 @@ -115,14 +115,19 @@ found := 0 for _, item := range version.Items { // Skip the files we don't care about - if !StringInSlice(item.FileType, []string{"root.tar.xz", "lxd.tar.xz"}) { + if !StringInSlice(item.FileType, []string{"root.tar.xz", "lxd.tar.xz", "squashfs"}) { continue } found += 1 - size += item.Size - if item.LXDHashSha256 != "" { - fingerprint = item.LXDHashSha256 + if fingerprint == "" { + if item.LXDHashSha256SquashFs != "" { + fingerprint = item.LXDHashSha256SquashFs + } else if item.LXDHashSha256RootXz != "" { + fingerprint = item.LXDHashSha256RootXz + } else if item.LXDHashSha256 != "" { + fingerprint = item.LXDHashSha256 + } } if item.FileType == "lxd.tar.xz" { @@ -130,20 +135,37 @@ filename = fields[len(fields)-1] metaPath = item.Path metaHash = item.HashSha256 + + size += item.Size } - if item.FileType == "root.tar.xz" { - rootfsPath = item.Path - rootfsHash = item.HashSha256 + if rootfsPath == "" || rootfsHash == "" { + if item.FileType == "squashfs" { + rootfsPath = item.Path + rootfsHash = item.HashSha256 + } + + if item.FileType == "root.tar.xz" { + rootfsPath = item.Path + rootfsHash = item.HashSha256 + } + + size += item.Size } } - if found != 2 || size == 0 || filename == "" || fingerprint == "" { + if found < 2 || size == 0 || filename == "" || fingerprint == "" { // Invalid image continue } // Generate the actual image entry + description := fmt.Sprintf("%s %s %s", product.OperatingSystem, product.ReleaseTitle, product.Architecture) + if version.Label != "" { + description = fmt.Sprintf("%s (%s)", description, version.Label) + } + description = fmt.Sprintf("%s (%s)", description, name) + image := ImageInfo{} image.Architecture = architectureName image.Public = true @@ -160,10 +182,11 @@ "architecture": product.Architecture, "label": version.Label, "serial": name, - "description": fmt.Sprintf("%s %s %s (%s) (%s)", product.OperatingSystem, product.ReleaseTitle, product.Architecture, version.Label, name), + "description": description, } // Attempt to parse the EOL + image.ExpiryDate = time.Unix(0, 0).UTC() if product.SupportedEOL != "" { eolDate, err := time.Parse(eolLayout, product.SupportedEOL) if err == nil { @@ -199,12 +222,14 @@ } type SimpleStreamsManifestProductVersionItem struct { - Path string `json:"path"` - FileType string `json:"ftype"` - HashMd5 string `json:"md5"` - HashSha256 string `json:"sha256"` - LXDHashSha256 string `json:"combined_sha256"` - Size int64 `json:"size"` + Path string `json:"path"` + FileType string `json:"ftype"` + HashMd5 string `json:"md5"` + HashSha256 string `json:"sha256"` + LXDHashSha256 string `json:"combined_sha256"` + LXDHashSha256RootXz string `json:"combined_rootxz_sha256"` + LXDHashSha256SquashFs string `json:"combined_squashfs_sha256"` + Size int64 `json:"size"` } type SimpleStreamsIndex struct { @@ -222,7 +247,7 @@ func SimpleStreamsClient(url string, proxy func(*http.Request) (*url.URL, error)) (*SimpleStreams, error) { // Setup a http client - tlsConfig, err := GetTLSConfig("", "", nil) + tlsConfig, err := GetTLSConfig("", "", "", nil) if err != nil { return nil, err } @@ -364,11 +389,6 @@ if alias != nil { image.Aliases = append(image.Aliases, *alias) } - - alias = addAlias(fmt.Sprintf("%s/%s", entry, image.Properties["serial"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } } // Medium @@ -376,78 +396,6 @@ if alias != nil { image.Aliases = append(image.Aliases, *alias) } - - // Long - alias = addAlias(fmt.Sprintf("%s/%s/%s", entry, image.Properties["architecture"], image.Properties["serial"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - } - } else { - // FIXME: This is backward compatibility needed until cloud-images.ubuntu.com supports the aliases field - // Short - if image.Architecture == architectureName { - alias := addAlias(fmt.Sprintf("%s/%s", image.Properties["os"], image.Properties["release"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - - alias = addAlias(fmt.Sprintf("%s/%s/%s", image.Properties["os"], image.Properties["release"], image.Properties["serial"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - - alias = addAlias(fmt.Sprintf("%s/%c", image.Properties["os"], image.Properties["release"][0]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - - alias = addAlias(fmt.Sprintf("%s/%c/%s", image.Properties["os"], image.Properties["release"][0], image.Properties["serial"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - - alias = addAlias(fmt.Sprintf("%s/%s", image.Properties["os"], image.Properties["version"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - - alias = addAlias(fmt.Sprintf("%s/%s/%s", image.Properties["os"], image.Properties["version"], image.Properties["serial"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - } - - // Medium - alias := addAlias(fmt.Sprintf("%s/%s/%s", image.Properties["os"], image.Properties["release"], image.Properties["architecture"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - - alias = addAlias(fmt.Sprintf("%s/%c/%s", image.Properties["os"], image.Properties["release"][0], image.Properties["architecture"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - - alias = addAlias(fmt.Sprintf("%s/%s/%s", image.Properties["os"], image.Properties["version"], image.Properties["architecture"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - - // Long - alias = addAlias(fmt.Sprintf("%s/%s/%s/%s", image.Properties["os"], image.Properties["release"], image.Properties["architecture"], image.Properties["serial"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - - alias = addAlias(fmt.Sprintf("%s/%c/%s/%s", image.Properties["os"], image.Properties["release"][0], image.Properties["architecture"], image.Properties["serial"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) - } - - alias = addAlias(fmt.Sprintf("%s/%s/%s/%s", image.Properties["os"], image.Properties["version"], image.Properties["architecture"], image.Properties["serial"]), image.Fingerprint) - if alias != nil { - image.Aliases = append(image.Aliases, *alias) } } @@ -554,11 +502,22 @@ } defer out.Close() - resp, err := s.http.Get(url) + req, err := http.NewRequest("GET", url, nil) if err != nil { + return err + } + req.Header.Set("User-Agent", UserAgent) + + resp, err := s.http.Do(req) + if err != nil { + return err } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("invalid simplestreams source: got %d looking for %s", resp.StatusCode, path) + } + body := &TransferProgress{Reader: resp.Body, Length: resp.ContentLength, Handler: progress} sha256 := sha256.New() @@ -567,9 +526,10 @@ return err } - if fmt.Sprintf("%x", sha256.Sum(nil)) != hash { + result := fmt.Sprintf("%x", sha256.Sum(nil)) + if result != hash { os.Remove(target) - return fmt.Errorf("Hash mismatch") + return fmt.Errorf("Hash mismatch for %s: %s != %s", path, result, hash) } return nil diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/util.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/util.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/util.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/util.go 2016-10-13 14:31:53.000000000 +0000 @@ -70,6 +70,16 @@ return stat.IsDir() } +// IsUnixSocket returns true if the given path is either a Unix socket +// or a symbolic link pointing at a Unix socket. +func IsUnixSocket(path string) bool { + stat, err := os.Stat(path) + if err != nil { + return false + } + return (stat.Mode() & os.ModeSocket) == os.ModeSocket +} + // VarPath returns the provided path elements joined by a slash and // appended to the end of $LXD_DIR, which defaults to /var/lib/lxd. func VarPath(path ...string) string { @@ -96,26 +106,36 @@ return filepath.Join(items...) } -func ParseLXDFileHeaders(headers http.Header) (uid int, gid int, mode os.FileMode) { +func ParseLXDFileHeaders(headers http.Header) (uid int, gid int, mode int, type_ string) { uid, err := strconv.Atoi(headers.Get("X-LXD-uid")) if err != nil { - uid = 0 + uid = -1 } gid, err = strconv.Atoi(headers.Get("X-LXD-gid")) if err != nil { - gid = 0 + gid = -1 } - /* Allow people to send stuff with a leading 0 for octal or a regular - * int that represents the perms when redered in octal. */ - rawMode, err := strconv.ParseInt(headers.Get("X-LXD-mode"), 0, 0) + mode, err = strconv.Atoi(headers.Get("X-LXD-mode")) if err != nil { - rawMode = 0644 + mode = -1 + } else { + rawMode, err := strconv.ParseInt(headers.Get("X-LXD-mode"), 0, 0) + if err == nil { + mode = int(os.FileMode(rawMode) & os.ModePerm) + } } - mode = os.FileMode(rawMode) - return uid, gid, mode + type_ = headers.Get("X-LXD-type") + /* backwards compat: before "type" was introduced, we could only + * manipulate files + */ + if type_ == "" { + type_ = "file" + } + + return uid, gid, mode, type_ } func ReadToJSON(r io.Reader, req interface{}) error { @@ -127,16 +147,26 @@ return json.Unmarshal(buf, req) } -func ReaderToChannel(r io.Reader) <-chan []byte { +func ReaderToChannel(r io.Reader, bufferSize int) <-chan []byte { + if bufferSize <= 128*1024 { + bufferSize = 128 * 1024 + } + ch := make(chan ([]byte)) go func() { + readSize := 128 * 1024 + offset := 0 + buf := make([]byte, bufferSize) + for { - /* io.Copy uses a 32KB buffer, so we might as well too. */ - buf := make([]byte, 32*1024) - nr, err := r.Read(buf) - if nr > 0 { - ch <- buf[0:nr] + read := buf[offset : offset+readSize] + nr, err := r.Read(read) + offset += nr + if offset > 0 && (offset+readSize >= bufferSize || err != nil) { + ch <- buf[0:offset] + offset = 0 + buf = make([]byte, bufferSize) } if err != nil { @@ -351,6 +381,14 @@ return false } +func IsTrue(value string) bool { + if StringInSlice(strings.ToLower(value), []string{"true", "1", "yes", "on"}) { + return true + } + + return false +} + func IsOnSharedMount(pathName string) (bool, error) { file, err := os.Open("/proc/self/mountinfo") if err != nil { @@ -674,6 +712,18 @@ return fmt.Sprintf("%.2fEB", value) } +// RemoveDuplicatesFromString removes all duplicates of the string 'sep' +// from the specified string 's'. Leading and trailing occurences of sep +// are NOT removed (duplicate leading/trailing are). Performs poorly if +// there are multiple consecutive redundant separators. +func RemoveDuplicatesFromString(s string, sep string) string { + dup := sep + sep + for s = strings.Replace(s, dup, sep, -1); strings.Contains(s, dup); s = strings.Replace(s, dup, sep, -1) { + + } + return s +} + type TransferProgress struct { io.Reader percentage float64 @@ -708,3 +758,12 @@ return n, err } + +func RunCommand(name string, arg ...string) error { + output, err := exec.Command(name, arg...).CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to run: %s %s: %s", name, strings.Join(arg, " "), strings.TrimSpace(string(output))) + } + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/util_linux.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/util_linux.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/util_linux.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/util_linux.go 2016-10-13 14:31:53.000000000 +0000 @@ -8,6 +8,7 @@ "fmt" "os" "os/exec" + "strings" "syscall" "unsafe" ) @@ -384,3 +385,100 @@ } return nil } + +// This uses ssize_t llistxattr(const char *path, char *list, size_t size); to +// handle symbolic links (should it in the future be possible to set extended +// attributed on symlinks): If path is a symbolic link the extended attributes +// associated with the link itself are retrieved. +func llistxattr(path string, list []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(list) > 0 { + _p1 = unsafe.Pointer(&list[0]) + } else { + _p1 = unsafe.Pointer(nil) + } + r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(list))) + sz = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// GetAllXattr retrieves all extended attributes associated with a file, +// directory or symbolic link. +func GetAllXattr(path string) (xattrs map[string]string, err error) { + e1 := fmt.Errorf("Extended attributes changed during retrieval.") + + // Call llistxattr() twice: First, to determine the size of the buffer + // we need to allocate to store the extended attributes, second, to + // actually store the extended attributes in the buffer. Also, check if + // the size/number of extended attributes hasn't changed between the two + // calls. + pre, err := llistxattr(path, nil) + if err != nil || pre < 0 { + return nil, err + } + if pre == 0 { + return nil, nil + } + + dest := make([]byte, pre) + + post, err := llistxattr(path, dest) + if err != nil || post < 0 { + return nil, err + } + if post != pre { + return nil, e1 + } + + split := strings.Split(string(dest), "\x00") + if split == nil { + return nil, fmt.Errorf("No valid extended attribute key found.") + } + // *listxattr functions return a list of names as an unordered array + // of null-terminated character strings (attribute names are separated + // by null bytes ('\0')), like this: user.name1\0system.name1\0user.name2\0 + // Since we split at the '\0'-byte the last element of the slice will be + // the empty string. We remove it: + if split[len(split)-1] == "" { + split = split[:len(split)-1] + } + + xattrs = make(map[string]string, len(split)) + + for _, x := range split { + xattr := string(x) + // Call Getxattr() twice: First, to determine the size of the + // buffer we need to allocate to store the extended attributes, + // second, to actually store the extended attributes in the + // buffer. Also, check if the size of the extended attribute + // hasn't changed between the two calls. + pre, err = syscall.Getxattr(path, xattr, nil) + if err != nil || pre < 0 { + return nil, err + } + if pre == 0 { + return nil, fmt.Errorf("No valid extended attribute value found.") + } + + dest = make([]byte, pre) + post, err = syscall.Getxattr(path, xattr, dest) + if err != nil || post < 0 { + return nil, err + } + if post != pre { + return nil, e1 + } + + xattrs[xattr] = string(dest) + } + + return xattrs, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/util_test.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/util_test.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/util_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/util_test.go 2016-10-13 14:31:53.000000000 +0000 @@ -1,13 +1,101 @@ package shared import ( + "bytes" + "crypto/rand" "fmt" "io/ioutil" "os" "strings" + "syscall" "testing" ) +func TestGetAllXattr(t *testing.T) { + var ( + err error + testxattr = map[string]string{ + "user.checksum": "asdfsf13434qwf1324", + "user.random": "This is a test", + } + ) + xattrFile, err := ioutil.TempFile("", "") + if err != nil { + t.Error(err) + return + } + defer os.Remove(xattrFile.Name()) + xattrFile.Close() + + xattrDir, err := ioutil.TempDir("", "") + if err != nil { + t.Error(err) + return + } + defer os.Remove(xattrDir) + + for k, v := range testxattr { + err = syscall.Setxattr(xattrFile.Name(), k, []byte(v), 0) + if err == syscall.ENOTSUP { + t.Log(err) + return + } + if err != nil { + t.Error(err) + return + } + err = syscall.Setxattr(xattrDir, k, []byte(v), 0) + if err == syscall.ENOTSUP { + t.Log(err) + return + } + if err != nil { + t.Error(err) + return + } + } + + // Test retrieval of extended attributes for regular files. + h, err := GetAllXattr(xattrFile.Name()) + if err != nil { + t.Error(err) + return + } + + if h == nil { + t.Errorf("Expected to find extended attributes but did not find any.") + return + } + + for k, v := range h { + found, ok := h[k] + if !ok || found != testxattr[k] { + t.Errorf("Expected to find extended attribute %s with a value of %s on regular file but did not find it.", k, v) + return + } + } + + // Test retrieval of extended attributes for directories. + h, err = GetAllXattr(xattrDir) + if err != nil { + t.Error(err) + return + } + + if h == nil { + t.Errorf("Expected to find extended attributes but did not find any.") + return + } + + for k, v := range h { + found, ok := h[k] + if !ok || found != testxattr[k] { + t.Errorf("Expected to find extended attribute %s with a value of %s on directory but did not find it.", k, v) + return + } + } +} + func TestFileCopy(t *testing.T) { helloWorld := []byte("hello world\n") source, err := ioutil.TempFile("", "") @@ -100,3 +188,43 @@ } } } + +func TestReaderToChannel(t *testing.T) { + buf := make([]byte, 1*1024*1024) + rand.Read(buf) + + offset := 0 + finished := false + + ch := ReaderToChannel(bytes.NewBuffer(buf), -1) + for { + data, ok := <-ch + if len(data) > 0 { + for i := 0; i < len(data); i++ { + if buf[offset+i] != data[i] { + t.Error(fmt.Sprintf("byte %d didn't match", offset+i)) + return + } + } + + offset += len(data) + if offset > len(buf) { + t.Error("read too much data") + return + } + + if offset == len(buf) { + finished = true + } + } + + if !ok { + if !finished { + t.Error("connection closed too early") + return + } else { + break + } + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/util_unix.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/util_unix.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/util_unix.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/util_unix.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,15 @@ +// +build !windows + +package shared + +import ( + "os" + "syscall" +) + +func GetOwnerMode(fInfo os.FileInfo) (os.FileMode, int, int) { + mode := fInfo.Mode() + uid := int(fInfo.Sys().(*syscall.Stat_t).Uid) + gid := int(fInfo.Sys().(*syscall.Stat_t).Gid) + return mode, uid, gid +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/util_windows.go juju-core-2.0.0/src/github.com/lxc/lxd/shared/util_windows.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/shared/util_windows.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/shared/util_windows.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,11 @@ +// +build windows + +package shared + +import ( + "os" +) + +func GetOwnerMode(fInfo os.FileInfo) (os.FileMode, int, int) { + return os.FileMode(0), -1, -1 +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/backends/btrfs.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/backends/btrfs.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/backends/btrfs.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/backends/btrfs.sh 2016-10-13 14:31:53.000000000 +0000 @@ -1,7 +1,9 @@ #!/bin/sh btrfs_setup() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Setting up btrfs backend in ${LXD_DIR}" @@ -17,14 +19,18 @@ } btrfs_configure() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Configuring btrfs backend in ${LXD_DIR}" } btrfs_teardown() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Tearing down btrfs backend in ${LXD_DIR}" diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/backends/dir.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/backends/dir.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/backends/dir.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/backends/dir.sh 2016-10-13 14:31:53.000000000 +0000 @@ -5,7 +5,9 @@ # Any necessary backend-specific setup dir_setup() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Setting up directory backend in ${LXD_DIR}" @@ -13,14 +15,18 @@ # Do the API voodoo necessary to configure LXD to use this backend dir_configure() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Configuring directory backend in ${LXD_DIR}" } dir_teardown() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Tearing down directory backend in ${LXD_DIR}" diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/backends/lvm.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/backends/lvm.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/backends/lvm.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/backends/lvm.sh 2016-10-13 14:31:53.000000000 +0000 @@ -1,7 +1,9 @@ #!/bin/sh lvm_setup() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Setting up lvm backend in ${LXD_DIR}" @@ -23,17 +25,21 @@ } lvm_configure() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Configuring lvm backend in ${LXD_DIR}" - export LXD_LVM_LVSIZE="10Mib" + lxc config set storage.lvm_volume_size "10Mib" lxc config set storage.lvm_vg_name "lxdtest-$(basename "${LXD_DIR}")" } lvm_teardown() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Tearing down lvm backend in ${LXD_DIR}" diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/backends/zfs.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/backends/zfs.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/backends/zfs.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/backends/zfs.sh 2016-10-13 14:31:53.000000000 +0000 @@ -1,7 +1,9 @@ #!/bin/sh zfs_setup() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Setting up ZFS backend in ${LXD_DIR}" @@ -17,7 +19,9 @@ } zfs_configure() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Configuring ZFS backend in ${LXD_DIR}" @@ -26,7 +30,9 @@ } zfs_teardown() { + # shellcheck disable=2039 local LXD_DIR + LXD_DIR=$1 echo "==> Tearing down ZFS backend in ${LXD_DIR}" diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/deps/import-busybox juju-core-2.0.0/src/github.com/lxc/lxd/test/deps/import-busybox --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/deps/import-busybox 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/deps/import-busybox 2016-10-13 14:31:53.000000000 +0000 @@ -83,7 +83,7 @@ status, data = self.rest_call("/1.0/images/aliases", data, "POST") - if status != 200: + if status not in (200, 201): raise Exception("Failed to create alias: %s" % name) def aliases_remove(self, name): @@ -108,7 +108,7 @@ return [image.split("/1.0/images/")[-1] for image in data['metadata']] - def images_upload(self, path, filename, public): + def images_upload(self, path, public, filename=None): headers = {} if public: headers['X-LXD-public'] = "1" @@ -121,16 +121,16 @@ else: meta_path, rootfs_path = path boundary = str(uuid.uuid1()) + filename_entry = " filename=%s" % filename if filename else "" upload_path = os.path.join(self.workdir, "upload") body = open(upload_path, "wb+") for name, path in [("metadata", meta_path), ("rootfs", rootfs_path)]: - filename = os.path.basename(path) body.write(bytes("--%s\r\n" % boundary, "utf-8")) body.write(bytes("Content-Disposition: form-data; " - "name=%s; filename=%s\r\n" % - (name, filename), "utf-8")) + "name=%s;%s\r\n" % (name, filename_entry), + "utf-8")) body.write(b"Content-Type: application/octet-stream\r\n") body.write(b"\r\n") with open(path, "rb") as fd: @@ -314,8 +314,11 @@ if fingerprint in lxd.images_list(): parser.exit(1, "This image is already in the store.\n") - r = lxd.images_upload((meta_path, rootfs_path), - meta_path.split("/")[-1], args.public) + if args.filename: + r = lxd.images_upload((meta_path, rootfs_path), args.public, + meta_path.split("/")[-1]) + else: + r = lxd.images_upload((meta_path, rootfs_path), args.public) print("Image imported as: %s" % r['fingerprint']) else: path = busybox.create_tarball() @@ -326,7 +329,7 @@ if fingerprint in lxd.images_list(): parser.exit(1, "This image is already in the store.\n") - r = lxd.images_upload(path, path.split("/")[-1], args.public) + r = lxd.images_upload(path, args.public) print("Image imported as: %s" % r['fingerprint']) setup_alias(args.alias, fingerprint) @@ -338,6 +341,8 @@ default=False, help="Make the image public") parser.add_argument("--split", action="store_true", default=False, help="Whether to create a split image") + parser.add_argument("--filename", action="store_true", + default=False, help="Set the split image's filename") parser.set_defaults(func=import_busybox) # Call the function diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/extras/stresstest.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/extras/stresstest.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/extras/stresstest.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/extras/stresstest.sh 2016-10-13 14:31:53.000000000 +0000 @@ -173,7 +173,7 @@ while [ 1 ]; do lxc profile create empty lxc init busybox disturb1 - lxc profile apply disturb1 empty + lxc profile assign disturb1 empty lxc start disturb1 lxc exec disturb1 -- ps -ef lxc stop disturb1 --force diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/lxd-benchmark/main.go juju-core-2.0.0/src/github.com/lxc/lxd/test/lxd-benchmark/main.go --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/lxd-benchmark/main.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/lxd-benchmark/main.go 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,342 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "sync" + "time" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" +) + +var argCount = gnuflag.Int("count", 100, "Number of containers to create") +var argParallel = gnuflag.Int("parallel", -1, "Number of threads to use") +var argImage = gnuflag.String("image", "ubuntu:", "Image to use for the test") +var argPrivileged = gnuflag.Bool("privileged", false, "Use privileged containers") +var argFreeze = gnuflag.Bool("freeze", false, "Freeze the container right after start") + +func main() { + err := run(os.Args) + if err != nil { + fmt.Fprintf(os.Stderr, "error: %s\n", err) + os.Exit(1) + } + + os.Exit(0) +} + +func run(args []string) error { + // Parse command line + gnuflag.Parse(true) + + if len(os.Args) == 1 || !shared.StringInSlice(os.Args[1], []string{"spawn", "delete"}) { + fmt.Printf("Usage: %s spawn [--count=COUNT] [--image=IMAGE] [--privileged=BOOL] [--parallel=COUNT]\n", os.Args[0]) + fmt.Printf(" %s delete [--parallel=COUNT]\n\n", os.Args[0]) + gnuflag.Usage() + fmt.Printf("\n") + return fmt.Errorf("An action (spawn or delete) must be passed.") + } + + // Connect to LXD + c, err := lxd.NewClient(&lxd.DefaultConfig, "local") + if err != nil { + return err + } + + switch os.Args[1] { + case "spawn": + return spawnContainers(c, *argCount, *argImage, *argPrivileged) + case "delete": + return deleteContainers(c) + } + + return nil +} + +func logf(format string, args ...interface{}) { + fmt.Printf(fmt.Sprintf("[%s] %s\n", time.Now().Format(time.StampMilli), format), args...) +} + +func spawnContainers(c *lxd.Client, count int, image string, privileged bool) error { + batch := *argParallel + if batch < 1 { + // Detect the number of parallel actions + cpus, err := ioutil.ReadDir("/sys/bus/cpu/devices") + if err != nil { + return err + } + + batch = len(cpus) + } + + batches := count / batch + remainder := count % batch + + // Print the test header + st, err := c.ServerStatus() + if err != nil { + return err + } + + privilegedStr := "unprivileged" + if privileged { + privilegedStr = "privileged" + } + + mode := "normal startup" + if *argFreeze { + mode = "start and freeze" + } + + fmt.Printf("Test environment:\n") + fmt.Printf(" Server backend: %s\n", st.Environment.Server) + fmt.Printf(" Server version: %s\n", st.Environment.ServerVersion) + fmt.Printf(" Kernel: %s\n", st.Environment.Kernel) + fmt.Printf(" Kernel architecture: %s\n", st.Environment.KernelArchitecture) + fmt.Printf(" Kernel version: %s\n", st.Environment.KernelVersion) + fmt.Printf(" Storage backend: %s\n", st.Environment.Storage) + fmt.Printf(" Storage version: %s\n", st.Environment.StorageVersion) + fmt.Printf(" Container backend: %s\n", st.Environment.Driver) + fmt.Printf(" Container version: %s\n", st.Environment.DriverVersion) + fmt.Printf("\n") + fmt.Printf("Test variables:\n") + fmt.Printf(" Container count: %d\n", count) + fmt.Printf(" Container mode: %s\n", privilegedStr) + fmt.Printf(" Startup mode: %s\n", mode) + fmt.Printf(" Image: %s\n", image) + fmt.Printf(" Batches: %d\n", batches) + fmt.Printf(" Batch size: %d\n", batch) + fmt.Printf(" Remainder: %d\n", remainder) + fmt.Printf("\n") + + // Pre-load the image + var fingerprint string + if strings.Contains(image, ":") { + var remote string + remote, fingerprint = lxd.DefaultConfig.ParseRemoteAndContainer(image) + + if fingerprint == "" { + fingerprint = "default" + } + + d, err := lxd.NewClient(&lxd.DefaultConfig, remote) + if err != nil { + return err + } + + target := d.GetAlias(fingerprint) + if target != "" { + fingerprint = target + } + + _, err = c.GetImageInfo(fingerprint) + if err != nil { + logf("Importing image into local store: %s", fingerprint) + err := d.CopyImage(fingerprint, c, false, nil, false, false, nil) + if err != nil { + return err + } + } else { + logf("Found image in local store: %s", fingerprint) + } + } else { + fingerprint = image + logf("Found image in local store: %s", fingerprint) + } + + // Start the containers + spawnedCount := 0 + nameFormat := "benchmark-%." + fmt.Sprintf("%d", len(fmt.Sprintf("%d", count))) + "d" + wgBatch := sync.WaitGroup{} + nextStat := batch + + startContainer := func(name string) { + defer wgBatch.Done() + + // Configure + config := map[string]string{} + if privileged { + config["security.privileged"] = "true" + } + config["user.lxd-benchmark"] = "true" + + // Create + resp, err := c.Init(name, "local", fingerprint, nil, config, nil, false) + if err != nil { + logf(fmt.Sprintf("Failed to spawn container '%s': %s", name, err)) + return + } + + err = c.WaitForSuccess(resp.Operation) + if err != nil { + logf(fmt.Sprintf("Failed to spawn container '%s': %s", name, err)) + return + } + + // Start + resp, err = c.Action(name, "start", -1, false, false) + if err != nil { + logf(fmt.Sprintf("Failed to spawn container '%s': %s", name, err)) + return + } + + err = c.WaitForSuccess(resp.Operation) + if err != nil { + logf(fmt.Sprintf("Failed to spawn container '%s': %s", name, err)) + return + } + + // Freeze + if *argFreeze { + resp, err = c.Action(name, "freeze", -1, false, false) + if err != nil { + logf(fmt.Sprintf("Failed to spawn container '%s': %s", name, err)) + return + } + + err = c.WaitForSuccess(resp.Operation) + if err != nil { + logf(fmt.Sprintf("Failed to spawn container '%s': %s", name, err)) + return + } + } + } + + logf("Starting the test") + timeStart := time.Now() + + for i := 0; i < batches; i++ { + for j := 0; j < batch; j++ { + spawnedCount = spawnedCount + 1 + name := fmt.Sprintf(nameFormat, spawnedCount) + + wgBatch.Add(1) + go startContainer(name) + } + wgBatch.Wait() + + if spawnedCount >= nextStat { + interval := time.Since(timeStart).Seconds() + logf("Started %d containers in %.3fs (%.3f/s)", spawnedCount, interval, float64(spawnedCount)/interval) + nextStat = nextStat * 2 + } + } + + for k := 0; k < remainder; k++ { + spawnedCount = spawnedCount + 1 + name := fmt.Sprintf(nameFormat, spawnedCount) + + wgBatch.Add(1) + go startContainer(name) + } + wgBatch.Wait() + + logf("Test completed in %.3fs", time.Since(timeStart).Seconds()) + + return nil +} + +func deleteContainers(c *lxd.Client) error { + batch := *argParallel + if batch < 1 { + // Detect the number of parallel actions + cpus, err := ioutil.ReadDir("/sys/bus/cpu/devices") + if err != nil { + return err + } + + batch = len(cpus) + } + + // List all the containers + allContainers, err := c.ListContainers() + if err != nil { + return err + } + + containers := []shared.ContainerInfo{} + for _, container := range allContainers { + if container.Config["user.lxd-benchmark"] != "true" { + continue + } + + containers = append(containers, container) + } + + // Delete them all + count := len(containers) + logf("%d containers to delete", count) + + batches := count / batch + + deletedCount := 0 + wgBatch := sync.WaitGroup{} + nextStat := batch + + deleteContainer := func(ct shared.ContainerInfo) { + defer wgBatch.Done() + + // Stop + if ct.IsActive() { + resp, err := c.Action(ct.Name, "stop", -1, true, false) + if err != nil { + logf("Failed to delete container: %s", ct.Name) + return + } + + err = c.WaitForSuccess(resp.Operation) + if err != nil { + logf("Failed to delete container: %s", ct.Name) + return + } + } + + // Delete + resp, err := c.Delete(ct.Name) + if err != nil { + logf("Failed to delete container: %s", ct.Name) + return + } + + err = c.WaitForSuccess(resp.Operation) + if err != nil { + logf("Failed to delete container: %s", ct.Name) + return + } + } + + logf("Starting the cleanup") + timeStart := time.Now() + + for i := 0; i < batches; i++ { + for j := 0; j < batch; j++ { + wgBatch.Add(1) + go deleteContainer(containers[deletedCount]) + + deletedCount = deletedCount + 1 + } + wgBatch.Wait() + + if deletedCount >= nextStat { + interval := time.Since(timeStart).Seconds() + logf("Deleted %d containers in %.3fs (%.3f/s)", deletedCount, interval, float64(deletedCount)/interval) + nextStat = nextStat * 2 + } + } + + for k := deletedCount; k < count; k++ { + wgBatch.Add(1) + go deleteContainer(containers[deletedCount]) + + deletedCount = deletedCount + 1 + } + wgBatch.Wait() + + logf("Cleanup completed") + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/main.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/main.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/main.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/main.sh 2016-10-13 14:31:53.000000000 +0000 @@ -1,6 +1,12 @@ #!/bin/sh -eu [ -n "${GOPATH:-}" ] && export "PATH=${GOPATH}/bin:${PATH}" +# Don't translate lxc output for parsing in it in tests. +export "LC_ALL=C" + +# Force UTC for consistency +export "TZ=UTC" + if [ -n "${LXD_DEBUG:-}" ]; then set -x DEBUG="--debug" @@ -36,6 +42,7 @@ # import all the backends for backend in backends/*.sh; do + # shellcheck disable=SC1090 . "${backend}" done @@ -47,6 +54,8 @@ set +x # LXD_DIR is local here because since $(lxc) is actually a function, it # overwrites the environment and we would lose LXD_DIR's value otherwise. + + # shellcheck disable=2039 local LXD_DIR lxddir=${1} @@ -86,6 +95,9 @@ set -x fi + echo "==> Setting up networking" + LXD_DIR="${lxddir}" lxc network attach-profile lxdbr0 default eth0 + echo "==> Configuring storage backend" "$LXD_BACKEND"_configure "${lxddir}" } @@ -126,6 +138,22 @@ eval "${cmd}" } +gen_cert() { + # Temporarily move the existing cert to trick LXC into generating a + # second cert. LXC will only generate a cert when adding a remote + # server with a HTTPS scheme. The remote server URL just needs to + # be syntactically correct to get past initial checks; in fact, we + # don't want it to succeed, that way we don't have to delete it later. + [ -f "${LXD_CONF}/${1}.crt" ] && return + mv "${LXD_CONF}/client.crt" "${LXD_CONF}/client.crt.bak" + mv "${LXD_CONF}/client.key" "${LXD_CONF}/client.key.bak" + echo y | lxc_remote remote add "$(uuidgen)" https://0.0.0.0 || true + mv "${LXD_CONF}/client.crt" "${LXD_CONF}/${1}.crt" + mv "${LXD_CONF}/client.key" "${LXD_CONF}/${1}.key" + mv "${LXD_CONF}/client.crt.bak" "${LXD_CONF}/client.crt" + mv "${LXD_CONF}/client.key.bak" "${LXD_CONF}/client.key" +} + my_curl() { curl -k -s --cert "${LXD_CONF}/client.crt" --key "${LXD_CONF}/client.key" "$@" } @@ -173,16 +201,20 @@ kill_lxd() { # LXD_DIR is local here because since $(lxc) is actually a function, it # overwrites the environment and we would lose LXD_DIR's value otherwise. + + # shellcheck disable=2039 local LXD_DIR + daemon_dir=${1} LXD_DIR=${daemon_dir} daemon_pid=$(cat "${daemon_dir}/lxd.pid") + check_leftovers="false" echo "==> Killing LXD at ${daemon_dir}" if [ -e "${daemon_dir}/unix.socket" ]; then # Delete all containers echo "==> Deleting all containers" - for container in $(lxc list --force-local | tail -n+3 | grep "^| " | cut -d' ' -f2); do + for container in $(lxc list --fast --force-local | tail -n+3 | grep "^| " | cut -d' ' -f2); do lxc delete "${container}" --force-local -f || true done @@ -192,6 +224,12 @@ lxc image delete "${image}" --force-local || true done + # Delete all profiles + echo "==> Deleting all profiles" + for profile in $(lxc profile list --force-local | tail -n+3 | grep "^| " | cut -d' ' -f2); do + lxc profile delete "${profile}" --force-local || true + done + echo "==> Checking for locked DB tables" for table in $(echo .tables | sqlite3 "${daemon_dir}/lxd.db"); do echo "SELECT * FROM ${table};" | sqlite3 "${daemon_dir}/lxd.db" >/dev/null @@ -202,6 +240,8 @@ # Cleanup shmounts (needed due to the forceful kill) find "${daemon_dir}" -name shmounts -exec "umount" "-l" "{}" \; >/dev/null 2>&1 || true + + check_leftovers="true" fi if [ -n "${LXD_LOGS:-}" ]; then @@ -211,29 +251,36 @@ cp "${daemon_dir}/lxd.log" "${LXD_LOGS}/${daemon_pid}/" fi - echo "==> Checking for leftover files" - rm -f "${daemon_dir}/containers/lxc-monitord.log" - rm -f "${daemon_dir}/security/apparmor/cache/.features" - check_empty "${daemon_dir}/containers/" - check_empty "${daemon_dir}/devices/" - check_empty "${daemon_dir}/images/" - # FIXME: Once container logging rework is done, uncomment - # check_empty "${daemon_dir}/logs/" - check_empty "${daemon_dir}/security/apparmor/cache/" - check_empty "${daemon_dir}/security/apparmor/profiles/" - check_empty "${daemon_dir}/security/seccomp/" - check_empty "${daemon_dir}/shmounts/" - check_empty "${daemon_dir}/snapshots/" - - echo "==> Checking for leftover DB entries" - check_empty_table "${daemon_dir}/lxd.db" "containers" - check_empty_table "${daemon_dir}/lxd.db" "containers_config" - check_empty_table "${daemon_dir}/lxd.db" "containers_devices" - check_empty_table "${daemon_dir}/lxd.db" "containers_devices_config" - check_empty_table "${daemon_dir}/lxd.db" "containers_profiles" - check_empty_table "${daemon_dir}/lxd.db" "images" - check_empty_table "${daemon_dir}/lxd.db" "images_aliases" - check_empty_table "${daemon_dir}/lxd.db" "images_properties" + if [ "${check_leftovers}" = "true" ]; then + echo "==> Checking for leftover files" + rm -f "${daemon_dir}/containers/lxc-monitord.log" + rm -f "${daemon_dir}/security/apparmor/cache/.features" + check_empty "${daemon_dir}/containers/" + check_empty "${daemon_dir}/devices/" + check_empty "${daemon_dir}/images/" + # FIXME: Once container logging rework is done, uncomment + # check_empty "${daemon_dir}/logs/" + check_empty "${daemon_dir}/security/apparmor/cache/" + check_empty "${daemon_dir}/security/apparmor/profiles/" + check_empty "${daemon_dir}/security/seccomp/" + check_empty "${daemon_dir}/shmounts/" + check_empty "${daemon_dir}/snapshots/" + + echo "==> Checking for leftover DB entries" + check_empty_table "${daemon_dir}/lxd.db" "containers" + check_empty_table "${daemon_dir}/lxd.db" "containers_config" + check_empty_table "${daemon_dir}/lxd.db" "containers_devices" + check_empty_table "${daemon_dir}/lxd.db" "containers_devices_config" + check_empty_table "${daemon_dir}/lxd.db" "containers_profiles" + check_empty_table "${daemon_dir}/lxd.db" "images" + check_empty_table "${daemon_dir}/lxd.db" "images_aliases" + check_empty_table "${daemon_dir}/lxd.db" "images_properties" + check_empty_table "${daemon_dir}/lxd.db" "images_source" + check_empty_table "${daemon_dir}/lxd.db" "profiles" + check_empty_table "${daemon_dir}/lxd.db" "profiles_config" + check_empty_table "${daemon_dir}/lxd.db" "profiles_devices" + check_empty_table "${daemon_dir}/lxd.db" "profiles_devices_config" + fi # teardown storage "$LXD_BACKEND"_teardown "${daemon_dir}" @@ -260,16 +307,25 @@ echo "Tests Completed (${TEST_RESULT}): hit enter to continue" # shellcheck disable=SC2034 - read nothing + read -r nothing fi echo "==> Cleaning up" # Kill all the LXD instances - while read daemon_dir; do + while read -r daemon_dir; do kill_lxd "${daemon_dir}" done < "${TEST_DIR}/daemons" + # Cleanup leftover networks + # shellcheck disable=SC2009 + ps aux | grep "interface=lxdt$$ " | grep -v grep | awk '{print $2}' | while read -r line; do + kill -9 "${line}" + done + if [ -e "/sys/class/net/lxdt$$" ]; then + ip link del lxdt$$ + fi + # Wipe the test environment wipe "${TEST_DIR}" @@ -290,12 +346,12 @@ fi # shellcheck disable=SC2009 - ps aux | grep lxc-monitord | grep "${1}" | awk '{print $2}' | while read pid; do + ps aux | grep lxc-monitord | grep "${1}" | awk '{print $2}' | while read -r pid; do kill -9 "${pid}" done if [ -f "${TEST_DIR}/loops" ]; then - while read line; do + while read -r line; do losetup -d "${line}" || true done < "${TEST_DIR}/loops" fi @@ -314,6 +370,7 @@ # Import all the testsuites for suite in suites/*.sh; do + # shellcheck disable=SC1090 . "${suite}" done @@ -374,6 +431,10 @@ TEST_CURRENT=test_basic_usage test_basic_usage +echo "==> TEST: security" +TEST_CURRENT=test_security +test_security + echo "==> TEST: images (and cached image expiry)" TEST_CURRENT=test_image_expiry test_image_expiry @@ -412,6 +473,10 @@ TEST_CURRENT=test_filemanip test_filemanip +echo "==> TEST: network" +TEST_CURRENT=test_network +test_network + echo "==> TEST: devlxd" TEST_CURRENT=test_devlxd test_devlxd diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/basic.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/basic.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/basic.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/basic.sh 2016-10-13 14:31:53.000000000 +0000 @@ -1,16 +1,5 @@ #!/bin/sh -gen_third_cert() { - [ -f "${LXD_CONF}/client3.crt" ] && return - mv "${LXD_CONF}/client.crt" "${LXD_CONF}/client.crt.bak" - mv "${LXD_CONF}/client.key" "${LXD_CONF}/client.key.bak" - lxc_remote list > /dev/null 2>&1 - mv "${LXD_CONF}/client.crt" "${LXD_CONF}/client3.crt" - mv "${LXD_CONF}/client.key" "${LXD_CONF}/client3.key" - mv "${LXD_CONF}/client.crt.bak" "${LXD_CONF}/client.crt" - mv "${LXD_CONF}/client.key.bak" "${LXD_CONF}/client.key" -} - test_basic_usage() { ensure_import_testimage ensure_has_localhost_remote "${LXD_ADDR}" @@ -18,18 +7,30 @@ # Test image export sum=$(lxc image info testimage | grep ^Fingerprint | cut -d' ' -f2) lxc image export testimage "${LXD_DIR}/" - if [ -e "${LXD_TEST_IMAGE:-}" ]; then - name=$(basename "${LXD_TEST_IMAGE}") - else - name=${sum}.tar.xz - fi - [ "${sum}" = "$(sha256sum "${LXD_DIR}/${name}" | cut -d' ' -f1)" ] + [ "${sum}" = "$(sha256sum "${LXD_DIR}/${sum}.tar.xz" | cut -d' ' -f1)" ] # Test an alias with slashes lxc image show "${sum}" lxc image alias create a/b/ "${sum}" lxc image alias delete a/b/ + # Test alias list filtering + lxc image alias create foo "${sum}" + lxc image alias create bar "${sum}" + lxc image alias list local: | grep -q foo + lxc image alias list local: | grep -q bar + lxc image alias list local: foo | grep -q -v bar + lxc image alias list local: "${sum}" | grep -q foo + lxc image alias list local: non-existent | grep -q -v non-existent + lxc image alias delete foo + lxc image alias delete bar + + # Test image list output formats (table & json) + lxc image list --format table | grep -q testimage + lxc image list --format json \ + | jq '.[]|select(.alias[0].name="testimage")' \ + | grep -q '"name": "testimage"' + # Test image delete lxc image delete testimage @@ -38,20 +39,56 @@ my_curl -f -X GET "https://${LXD_ADDR}/1.0/containers" # Re-import the image - mv "${LXD_DIR}/${name}" "${LXD_DIR}/testimage.tar.xz" + mv "${LXD_DIR}/${sum}.tar.xz" "${LXD_DIR}/testimage.tar.xz" lxc image import "${LXD_DIR}/testimage.tar.xz" --alias testimage rm "${LXD_DIR}/testimage.tar.xz" - # Test filename for image export (should be "out") + # Test filename for image export lxc image export testimage "${LXD_DIR}/" - [ "${sum}" = "$(sha256sum "${LXD_DIR}/testimage.tar.xz" | cut -d' ' -f1)" ] - rm "${LXD_DIR}/testimage.tar.xz" + [ "${sum}" = "$(sha256sum "${LXD_DIR}/${sum}.tar.xz" | cut -d' ' -f1)" ] + rm "${LXD_DIR}/${sum}.tar.xz" + + # Test custom filename for image export + lxc image export testimage "${LXD_DIR}/foo" + [ "${sum}" = "$(sha256sum "${LXD_DIR}/foo.tar.xz" | cut -d' ' -f1)" ] + rm "${LXD_DIR}/foo.tar.xz" + + + # Test image export with a split image. + deps/import-busybox --split --alias splitimage + + sum=$(lxc image info splitimage | grep ^Fingerprint | cut -d' ' -f2) + + lxc image export splitimage "${LXD_DIR}" + [ "${sum}" = "$(cat "${LXD_DIR}/meta-${sum}.tar.xz" "${LXD_DIR}/${sum}.tar.xz" | sha256sum | cut -d' ' -f1)" ] + + # Delete the split image and exported files + rm "${LXD_DIR}/${sum}.tar.xz" + rm "${LXD_DIR}/meta-${sum}.tar.xz" + lxc image delete splitimage + + # Redo the split image export test, this time with the --filename flag + # to tell import-busybox to set the 'busybox' filename in the upload. + # The sum should remain the same as its the same image. + deps/import-busybox --split --filename --alias splitimage + + lxc image export splitimage "${LXD_DIR}" + [ "${sum}" = "$(cat "${LXD_DIR}/meta-${sum}.tar.xz" "${LXD_DIR}/${sum}.tar.xz" | sha256sum | cut -d' ' -f1)" ] + + # Delete the split image and exported files + rm "${LXD_DIR}/${sum}.tar.xz" + rm "${LXD_DIR}/meta-${sum}.tar.xz" + lxc image delete splitimage + # Test container creation lxc init testimage foo lxc list | grep foo | grep STOPPED lxc list fo | grep foo | grep STOPPED + # Test list json format + lxc list --format json | jq '.[]|select(.name="foo")' | grep '"name": "foo"' + # Test container rename lxc move foo bar lxc list | grep -v foo @@ -62,7 +99,7 @@ lxc delete foo # gen untrusted cert - gen_third_cert + gen_cert client3 # don't allow requests without a cert to get trusted data curl -k -s -X GET "https://${LXD_ADDR}/1.0/containers/foo" | grep 403 @@ -73,6 +110,13 @@ curl -k -s --cert "${LXD_CONF}/client3.crt" --key "${LXD_CONF}/client3.key" -X GET "https://${LXD_ADDR}/1.0/images" | grep "/1.0/images/" && false lxc image delete foo-image +# Test image compression on publish + lxc publish bar --alias=foo-image-compressed --compression=bzip2 prop=val1 + lxc image show foo-image-compressed | grep val1 + curl -k -s --cert "${LXD_CONF}/client3.crt" --key "${LXD_CONF}/client3.key" -X GET "https://${LXD_ADDR}/1.0/images" | grep "/1.0/images/" && false + lxc image delete foo-image-compressed + + # Test privileged container publish lxc profile create priv lxc profile set priv security.privileged true @@ -118,9 +162,35 @@ lxc delete bar2 lxc image delete foo - # test basic alias support - printf "aliases:\n ls: list" >> "${LXD_CONF}/config.yml" - lxc ls + # Test alias support + cp "${LXD_CONF}/config.yml" "${LXD_CONF}/config.yml.bak" + + # 1. Basic built-in alias functionality + [ "$(lxc ls)" = "$(lxc list)" ] + # 2. Basic user-defined alias functionality + printf "aliases:\n l: list\n" >> "${LXD_CONF}/config.yml" + [ "$(lxc l)" = "$(lxc list)" ] + # 3. Built-in aliases and user-defined aliases can coexist + [ "$(lxc ls)" = "$(lxc l)" ] + # 4. Multi-argument alias keys and values + printf " i ls: image list\n" >> "${LXD_CONF}/config.yml" + [ "$(lxc i ls)" = "$(lxc image list)" ] + # 5. Aliases where len(keys) != len(values) (expansion/contraction of number of arguments) + printf " ils: image list\n container ls: list\n" >> "${LXD_CONF}/config.yml" + [ "$(lxc ils)" = "$(lxc image list)" ] + [ "$(lxc container ls)" = "$(lxc list)" ] + # 6. User-defined aliases override built-in aliases + printf " cp: list\n" >> "${LXD_CONF}/config.yml" + [ "$(lxc ls)" = "$(lxc cp)" ] + # 7. User-defined aliases override commands and don't recurse + lxc init testimage foo + LXC_CONFIG_SHOW=$(lxc config show foo --expanded) + printf " config show: config show --expanded\n" >> "${LXD_CONF}/config.yml" + [ "$(lxc config show foo)" = "$LXC_CONFIG_SHOW" ] + lxc delete foo + + # Restore the config to remove the aliases + mv "${LXD_CONF}/config.yml.bak" "${LXD_CONF}/config.yml" # Delete the bar container we've used for several tests lxc delete bar @@ -141,6 +211,7 @@ # Test "nonetype" container creation with an LXC config wait_for "${LXD_ADDR}" my_curl -X POST "https://${LXD_ADDR}/1.0/containers" \ -d "{\"name\":\"configtest\",\"config\":{\"raw.lxc\":\"lxc.hook.clone=/bin/true\"},\"source\":{\"type\":\"none\"}}" + # shellcheck disable=SC2102 [ "$(my_curl "https://${LXD_ADDR}/1.0/containers/configtest" | jq -r .metadata.config[\"raw.lxc\"])" = "lxc.hook.clone=/bin/true" ] lxc delete configtest @@ -182,11 +253,21 @@ false fi + # Test last_used_at field is working properly + lxc init testimage last-used-at-test + lxc list last-used-at-test --format json | jq -r '.[].last_used_at' | grep '1970-01-01T00:00:00Z' + lxc start last-used-at-test + lxc list last-used-at-test --format json | jq -r '.[].last_used_at' | grep -v '1970-01-01T00:00:00Z' + # check that we can set the environment lxc exec foo pwd | grep /root lxc exec --env BEST_BAND=meshuggah foo env | grep meshuggah lxc exec foo ip link show | grep eth0 + # check that we can get the return code for a non- wait-for-websocket exec + op=$(my_curl -X POST "https://${LXD_ADDR}/1.0/containers/foo/exec" -d '{"command": ["sleep", "1"], "environment": {}, "wait-for-websocket": false, "interactive": false}' | jq -r .operation) + [ "$(my_curl "https://${LXD_ADDR}${op}/wait" | jq -r .metadata.metadata.return)" != "null" ] + # test file transfer echo abc > "${LXD_DIR}/in" @@ -198,6 +279,14 @@ lxc exec foo /bin/cat /root/in1 | grep abc lxc exec foo -- /bin/rm -f root/in1 + # test lxc file edit doesn't change target file's owner and permissions + echo "content" | lxc file push - foo/tmp/edit_test + lxc exec foo -- chown 55.55 /tmp/edit_test + lxc exec foo -- chmod 555 /tmp/edit_test + echo "new content" | lxc file edit foo/tmp/edit_test + [ "$(lxc exec foo -- cat /tmp/edit_test)" = "new content" ] + [ "$(lxc exec foo -- stat -c \"%u %g %a\" /tmp/edit_test)" = "55 55 555" ] + # make sure stdin is chowned to our container root uid (Issue #590) [ -t 0 ] && [ -t 1 ] && lxc exec foo -- chown 1000:1000 /proc/self/fd/0 @@ -224,12 +313,37 @@ # check that an apparmor profile is created for this container, that it is # unloaded on stop, and that it is deleted when the container is deleted lxc launch testimage lxd-apparmor-test - aa-status | grep "lxd-lxd-apparmor-test_<${LXD_DIR}>" - lxc stop lxd-apparmor-test --force - ! aa-status | grep -q "lxd-lxd-apparmor-test_<${LXD_DIR}>" + + MAJOR=0 + MINOR=0 + if [ -f /sys/kernel/security/apparmor/features/domain/version ]; then + MAJOR=$(awk -F. '{print $1}' < /sys/kernel/security/apparmor/features/domain/version) + MINOR=$(awk -F. '{print $2}' < /sys/kernel/security/apparmor/features/domain/version) + fi + + if [ "${MAJOR}" -gt "1" ] || ([ "${MAJOR}" = "1" ] && [ "${MINOR}" -ge "2" ]); then + aa_namespace="lxd-lxd-apparmor-test_<$(echo "${LXD_DIR}" | sed -e 's/\//-/g' -e 's/^.//')>" + aa-status | grep ":${aa_namespace}://unconfined" + lxc stop lxd-apparmor-test --force + ! aa-status | grep -q ":${aa_namespace}:" + else + aa-status | grep "lxd-lxd-apparmor-test_<${LXD_DIR}>" + lxc stop lxd-apparmor-test --force + ! aa-status | grep -q "lxd-lxd-apparmor-test_<${LXD_DIR}>" + fi lxc delete lxd-apparmor-test [ ! -f "${LXD_DIR}/security/apparmor/profiles/lxd-lxd-apparmor-test" ] + lxc launch testimage lxd-seccomp-test + init=$(lxc info lxd-seccomp-test | grep Pid | cut -f2 -d" ") + [ "$(grep Seccomp "/proc/${init}/status" | cut -f2)" -eq "2" ] + lxc stop --force lxd-seccomp-test + lxc config set lxd-seccomp-test security.syscalls.blacklist_default false + lxc start lxd-seccomp-test + init=$(lxc info lxd-seccomp-test | grep Pid | cut -f2 -d" ") + [ "$(grep Seccomp "/proc/${init}/status" | cut -f2)" -eq "0" ] + lxc stop --force lxd-seccomp-test + # make sure that privileged containers are not world-readable lxc profile create unconfined lxc profile set unconfined security.privileged true @@ -238,6 +352,32 @@ lxc delete foo2 lxc profile delete unconfined + # Test boot.host_shutdown_timeout config setting + lxc init testimage configtest --config boot.host_shutdown_timeout=45 + [ "$(lxc config get configtest boot.host_shutdown_timeout)" -eq 45 ] + lxc config set configtest boot.host_shutdown_timeout 15 + [ "$(lxc config get configtest boot.host_shutdown_timeout)" -eq 15 ] + lxc delete configtest + + # Test deleting multiple images + # Start 3 containers to create 3 different images + lxc launch testimage c1 + lxc launch testimage c2 + lxc launch testimage c3 + lxc exec c1 -- touch /tmp/c1 + lxc exec c2 -- touch /tmp/c2 + lxc exec c3 -- touch /tmp/c3 + lxc publish --force c1 --alias=image1 + lxc publish --force c2 --alias=image2 + lxc publish --force c3 --alias=image3 + # Delete multiple images with lxc delete and confirm they're deleted + lxc image delete local:image1 local:image2 local:image3 + ! lxc image list | grep -q image1 + ! lxc image list | grep -q image2 + ! lxc image list | grep -q image3 + # Cleanup the containers + lxc delete --force c1 c2 c3 + # Ephemeral lxc launch testimage foo -e diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/config.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/config.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/config.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/config.sh 2016-10-13 14:31:53.000000000 +0000 @@ -92,6 +92,21 @@ # into the database and never let the user edit the container again. ! lxc config set foo raw.lxc "lxc.notaconfigkey = invalid" + # check that various profile application mechanisms work + lxc profile create one + lxc profile create two + lxc profile assign foo one,two + [ "$(lxc info foo | grep Profiles)" = "Profiles: one, two" ] + lxc profile assign foo "" + [ "$(lxc info foo | grep Profiles)" = "Profiles: " ] + lxc profile apply foo one # backwards compat check with `lxc profile apply` + [ "$(lxc info foo | grep Profiles)" = "Profiles: one" ] + lxc profile assign foo "" + lxc profile add foo one + [ "$(lxc info foo | grep Profiles)" = "Profiles: one" ] + lxc profile remove foo one + [ "$(lxc info foo | grep Profiles)" = "Profiles: " ] + lxc profile create stdintest echo "BADCONF" | lxc profile set stdintest user.user_data - lxc profile show stdintest | grep BADCONF @@ -104,18 +119,18 @@ mkdir -p "${TEST_DIR}/mnt1" lxc config device add foo mnt1 disk source="${TEST_DIR}/mnt1" path=/mnt1 readonly=true lxc profile create onenic - lxc profile device add onenic eth0 nic nictype=bridged parent=lxcbr0 - lxc profile apply foo onenic + lxc profile device add onenic eth0 nic nictype=bridged parent=lxdbr0 + lxc profile assign foo onenic lxc profile create unconfined lxc profile set unconfined raw.lxc "lxc.aa_profile=unconfined" - lxc profile apply foo onenic,unconfined + lxc profile assign foo onenic,unconfined lxc config device list foo | grep mnt1 lxc config device show foo | grep "/mnt1" lxc config show foo | grep "onenic" -A1 | grep "unconfined" lxc profile list | grep onenic lxc profile device list onenic | grep eth0 - lxc profile device show onenic | grep lxcbr0 + lxc profile device show onenic | grep lxdbr0 # test live-adding a nic lxc start foo @@ -123,7 +138,7 @@ lxc config show foo --expanded | grep -q "raw.lxc" ! lxc config show foo | grep -v "volatile.eth0" | grep -q "eth0" lxc config show foo --expanded | grep -v "volatile.eth0" | grep -q "eth0" - lxc config device add foo eth2 nic nictype=bridged parent=lxcbr0 name=eth10 + lxc config device add foo eth2 nic nictype=bridged parent=lxdbr0 name=eth10 lxc exec foo -- /sbin/ifconfig -a | grep eth0 lxc exec foo -- /sbin/ifconfig -a | grep eth10 lxc config device list foo | grep eth2 @@ -173,7 +188,7 @@ lxc delete foo lxc init testimage foo - lxc profile apply foo onenic,unconfined + lxc profile assign foo onenic,unconfined lxc start foo lxc exec foo -- cat /proc/self/attr/current | grep unconfined diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/database_update.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/database_update.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/database_update.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/database_update.sh 2016-10-13 14:31:53.000000000 +0000 @@ -11,12 +11,12 @@ spawn_lxd "${LXD_MIGRATE_DIR}" # Assert there are enough tables. - expected_tables=16 + expected_tables=19 tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE") [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; } - # There should be 10 "ON DELETE CASCADE" occurences - expected_cascades=11 + # There should be 13 "ON DELETE CASCADE" occurences + expected_cascades=12 cascades=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "ON DELETE CASCADE") [ "${cascades}" -eq "${expected_cascades}" ] || { echo "FAIL: Wrong number of ON DELETE CASCADE foreign keys. Found: ${cascades}, exected: ${expected_cascades}"; false; } } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/devlxd.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/devlxd.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/devlxd.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/devlxd.sh 2016-10-13 14:31:53.000000000 +0000 @@ -3,8 +3,10 @@ test_devlxd() { ensure_import_testimage + # shellcheck disable=SC2164 cd "${TEST_DIR}" go build -tags netgo -a -installsuffix devlxd ../deps/devlxd-client.go + # shellcheck disable=SC2164 cd - lxc launch testimage devlxd diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/filemanip.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/filemanip.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/filemanip.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/filemanip.sh 2016-10-13 14:31:53.000000000 +0000 @@ -2,13 +2,43 @@ test_filemanip() { ensure_import_testimage + ensure_has_localhost_remote "${LXD_ADDR}" lxc launch testimage filemanip lxc exec filemanip -- ln -s /tmp/ /tmp/outside lxc file push main.sh filemanip/tmp/outside/ [ ! -f /tmp/main.sh ] - [ -f "${LXD_DIR}/containers/filemanip/rootfs/tmp/main.sh" ] + lxc exec filemanip -- ls /tmp/main.sh + + # missing files should return 404 + err=$(my_curl -o /dev/null -w "%{http_code}" -X GET "https://${LXD_ADDR}/1.0/containers/filemanip/files?path=/tmp/foo") + [ "${err}" -eq "404" ] + + # lxc {push|pull} -r + mkdir "${TEST_DIR}"/source + echo "foo" > "${TEST_DIR}"/source/foo + echo "bar" > "${TEST_DIR}"/source/bar + + lxc file push -r "${TEST_DIR}"/source filemanip/tmp + + [ "$(lxc exec filemanip -- stat -c "%u" /tmp/source)" = "$(id -u)" ] + [ "$(lxc exec filemanip -- stat -c "%g" /tmp/source)" = "$(id -g)" ] + [ "$(lxc exec filemanip -- stat -c "%a" /tmp/source)" = "755" ] + + mkdir "${TEST_DIR}"/dest + lxc file pull -r filemanip/tmp/source "${TEST_DIR}"/dest + + [ "$(cat "${TEST_DIR}"/dest/source/foo)" = "foo" ] + [ "$(cat "${TEST_DIR}"/dest/source/bar)" = "bar" ] + + [ "$(stat -c "%u" "${TEST_DIR}"/dest/source)" = "$(id -u)" ] + [ "$(stat -c "%g" "${TEST_DIR}"/dest/source)" = "$(id -g)" ] + [ "$(stat -c "%a" "${TEST_DIR}"/dest/source)" = "755" ] + + lxc file push -p "${TEST_DIR}"/source/foo filemanip/tmp/this/is/a/nonexistent/directory/ + lxc file pull filemanip/tmp/this/is/a/nonexistent/directory/foo "${TEST_DIR}" + [ "$(cat "${TEST_DIR}"/foo)" = "foo" ] lxc delete filemanip -f } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/migration.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/migration.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/migration.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/migration.sh 2016-10-13 14:31:53.000000000 +0000 @@ -3,6 +3,9 @@ test_migration() { ensure_import_testimage + # workaround for kernel/criu + umount /sys/kernel/debug >/dev/null 2>&1 || true + if ! lxc_remote remote list | grep -q l1; then lxc_remote remote add l1 "${LXD_ADDR}" --accept-certificate --password foo fi @@ -12,8 +15,12 @@ lxc_remote init testimage nonlive # test moving snapshots + lxc_remote config set l1:nonlive user.tester foo lxc_remote snapshot l1:nonlive + lxc_remote config unset l1:nonlive user.tester lxc_remote move l1:nonlive l2: + lxc_remote config show l2:nonlive/snap0 | grep user.tester | grep foo + # FIXME: make this backend agnostic if [ "${LXD_BACKEND}" != "lvm" ]; then [ -d "${LXD2_DIR}/containers/nonlive/rootfs" ] @@ -64,8 +71,12 @@ return fi - lxc_remote launch testimage migratee + lxc_remote launch testimage l1:migratee + + # let the container do some interesting things + sleep 1s - lxc_remote move l1:migratee l2:migratee - lxc_remote stop l2:migratee --force + lxc_remote stop --stateful l1:migratee + lxc_remote start l1:migratee + lxc_remote stop --force l1:migratee } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/network.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/network.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/network.sh 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/network.sh 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,44 @@ +#!/bin/sh + +test_network() { + ensure_import_testimage + ensure_has_localhost_remote "${LXD_ADDR}" + + lxc init testimage nettest + + # Standard bridge with random subnet and a bunch of options + lxc network create lxdt$$ + lxc network set lxdt$$ dns.mode dynamic + lxc network set lxdt$$ dns.domain blah + lxc network set lxdt$$ ipv4.routing false + lxc network set lxdt$$ ipv6.routing false + lxc network set lxdt$$ ipv6.dhcp.stateful true + lxc network delete lxdt$$ + + # Unconfigured bridge + lxc network create lxdt$$ ipv4.address=none ipv6.address=none + lxc network delete lxdt$$ + + # Configured bridge with static assignment + lxc network create lxdt$$ dns.domain=test dns.mode=managed + lxc network attach lxdt$$ nettest eth0 + v4_addr="$(lxc network get lxdt$$ ipv4.address | cut -d/ -f1)0" + v6_addr="$(lxc network get lxdt$$ ipv4.address | cut -d/ -f1)00" + lxc config device set nettest eth0 ipv4.address "${v4_addr}" + lxc config device set nettest eth0 ipv6.address "${v6_addr}" + grep -q "${v4_addr}.*nettest" "${LXD_DIR}/networks/lxdt$$/dnsmasq.hosts" + grep -q "${v6_addr}.*nettest" "${LXD_DIR}/networks/lxdt$$/dnsmasq.hosts" + lxc start nettest + + SUCCESS=0 + # shellcheck disable=SC2034 + for i in $(seq 10); do + lxc info nettest | grep -q fd42 && SUCCESS=1 && break + sleep 1 + done + + [ "${SUCCESS}" = "0" ] && (echo "Container static IP wasn't applied" && false) + + lxc delete nettest -f + lxc network delete lxdt$$ +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/profiling.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/profiling.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/profiling.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/profiling.sh 2016-10-13 14:31:53.000000000 +0000 @@ -11,6 +11,10 @@ echo top5 | go tool pprof "$(which lxd)" "${LXD3_DIR}/cpu.out" echo "" + # Cleanup following manual kill + rm -f "${LXD3_DIR}/unix.socket" + find "${LXD3_DIR}" -name shmounts -exec "umount" "-l" "{}" \; >/dev/null 2>&1 || true + kill_lxd "${LXD3_DIR}" } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/remote.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/remote.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/remote.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/remote.sh 2016-10-13 14:31:53.000000000 +0000 @@ -1,21 +1,10 @@ #!/bin/sh -gen_second_cert() { - [ -f "${LXD_CONF}/client2.crt" ] && return - mv "${LXD_CONF}/client.crt" "${LXD_CONF}/client.crt.bak" - mv "${LXD_CONF}/client.key" "${LXD_CONF}/client.key.bak" - lxc_remote list > /dev/null 2>&1 - mv "${LXD_CONF}/client.crt" "${LXD_CONF}/client2.crt" - mv "${LXD_CONF}/client.key" "${LXD_CONF}/client2.key" - mv "${LXD_CONF}/client.crt.bak" "${LXD_CONF}/client.crt" - mv "${LXD_CONF}/client.key.bak" "${LXD_CONF}/client.key" -} - test_remote_url() { for url in "${LXD_ADDR}" "https://${LXD_ADDR}"; do lxc_remote remote add test "${url}" --accept-certificate --password foo lxc_remote finger test: - lxc_remote config trust list | grep @ | awk '{print $2}' | while read line ; do + lxc_remote config trust list | grep @ | awk '{print $2}' | while read -r line ; do lxc_remote config trust remove "\"${line}\"" done lxc_remote remote remove test @@ -58,7 +47,7 @@ # we just re-add our cert under a different name to test the cert # manipulation mechanism. - gen_second_cert + gen_cert client2 # Test for #623 lxc_remote remote add test-623 "${LXD_ADDR}" --accept-certificate --password foo @@ -80,13 +69,16 @@ } test_remote_usage() { + ensure_import_testimage + ensure_has_localhost_remote "${LXD_ADDR}" + lxc_remote remote add lxd2 "${LXD2_ADDR}" --accept-certificate --password foo # we need a public image on localhost - lxc_remote image export localhost:testimage "${LXD_DIR}/foo.img" + img=$(lxc_remote image export localhost:testimage "${LXD_DIR}/foo" | grep -o "foo.*") lxc_remote image delete localhost:testimage - sum=$(sha256sum "${LXD_DIR}/foo.img" | cut -d' ' -f1) - lxc_remote image import "${LXD_DIR}/foo.img" localhost: --public + sum=$(sha256sum "${LXD_DIR}/${img}" | cut -d' ' -f1) + lxc_remote image import "${LXD_DIR}/${img}" localhost: --public lxc_remote image alias create localhost:testimage "${sum}" lxc_remote image delete "lxd2:${sum}" || true @@ -138,4 +130,22 @@ lxc_remote info lxd2:c1 lxc_remote stop lxd2:c1 --force lxc_remote delete lxd2:c1 + + # Test that local and public servers can be accessed without a client cert + mv "${LXD_CONF}/client.crt" "${LXD_CONF}/client.crt.bak" + mv "${LXD_CONF}/client.key" "${LXD_CONF}/client.key.bak" + + # testimage should still exist on the local server. Count the number of + # matches so the output isn't polluted with the results. + lxc_remote image list local: | grep -c testimage + + # Skip the truly remote servers in offline mode. There should always be + # Ubuntu images in the results for the remote servers. + if [ -z "${LXD_OFFLINE:-}" ]; then + lxc_remote image list images: | grep -i -c ubuntu + lxc_remote image list ubuntu: | grep -i -c ubuntu + fi + + mv "${LXD_CONF}/client.crt.bak" "${LXD_CONF}/client.crt" + mv "${LXD_CONF}/client.key.bak" "${LXD_CONF}/client.key" } diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/security.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/security.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/security.sh 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/security.sh 2016-10-13 14:31:53.000000000 +0000 @@ -0,0 +1,60 @@ +#!/bin/sh + +test_security() { + ensure_import_testimage + ensure_has_localhost_remote "${LXD_ADDR}" + + # CVE-2016-1581 + if [ "${LXD_BACKEND}" = "zfs" ]; then + LXD_INIT_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_INIT_DIR}" + spawn_lxd "${LXD_INIT_DIR}" + + ZFS_POOL="lxdtest-$(basename "${LXD_DIR}")-init" + LXD_DIR=${LXD_INIT_DIR} lxd init --storage-backend zfs --storage-create-loop 1 --storage-pool "${ZFS_POOL}" --auto + + PERM=$(stat -c %a "${LXD_INIT_DIR}/zfs.img") + if [ "${PERM}" != "600" ]; then + echo "Bad zfs.img permissions: ${PERM}" + zpool destroy "${ZFS_POOL}" + false + fi + + zpool destroy "${ZFS_POOL}" + kill_lxd "${LXD_INIT_DIR}" + fi + + # CVE-2016-1582 + lxc launch testimage test-priv -c security.privileged=true + + PERM=$(stat -L -c %a "${LXD_DIR}/containers/test-priv") + if [ "${PERM}" != "700" ]; then + echo "Bad container permissions: ${PERM}" + false + fi + + lxc config set test-priv security.privileged false + lxc restart test-priv --force + lxc config set test-priv security.privileged true + lxc restart test-priv --force + + PERM=$(stat -L -c %a "${LXD_DIR}/containers/test-priv") + if [ "${PERM}" != "700" ]; then + echo "Bad container permissions: ${PERM}" + false + fi + + lxc delete test-priv --force + + lxc launch testimage test-unpriv + lxc config set test-unpriv security.privileged true + lxc restart test-unpriv --force + + PERM=$(stat -L -c %a "${LXD_DIR}/containers/test-unpriv") + if [ "${PERM}" != "700" ]; then + echo "Bad container permissions: ${PERM}" + false + fi + + lxc delete test-unpriv --force +} diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/snapshots.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/snapshots.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/snapshots.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/snapshots.sh 2016-10-13 14:31:53.000000000 +0000 @@ -85,11 +85,8 @@ echo snap0 > state lxc file push state bar/root/state lxc file push state bar/root/file_only_in_snap0 - - mkdir "${LXD_DIR}/containers/bar/rootfs/root/dir_only_in_snap0" - cd "${LXD_DIR}/containers/bar/rootfs/root/" - ln -s ./file_only_in_snap0 statelink - cd - + lxc exec bar -- mkdir /root/dir_only_in_snap0 + lxc exec bar -- ln -s file_only_in_snap0 /root/statelink lxc stop bar --force lxc snapshot bar snap0 @@ -100,13 +97,11 @@ lxc file push state bar/root/state lxc file push state bar/root/file_only_in_snap1 - cd "${LXD_DIR}/containers/bar/rootfs/root/" - rmdir dir_only_in_snap0 - rm file_only_in_snap0 - rm statelink - ln -s ./file_only_in_snap1 statelink - mkdir dir_only_in_snap1 - cd - + lxc exec bar -- rmdir /root/dir_only_in_snap0 + lxc exec bar -- rm /root/file_only_in_snap0 + lxc exec bar -- rm /root/statelink + lxc exec bar -- ln -s file_only_in_snap1 /root/statelink + lxc exec bar -- mkdir /root/dir_only_in_snap1 lxc stop bar --force # Delete the state file we created to prevent leaking. @@ -118,8 +113,7 @@ ########################################################## - # FIXME: make this backend agnostic - if [ "${LXD_BACKEND}" = "dir" ]; then + if [ "${LXD_BACKEND}" != "zfs" ]; then # The problem here is that you can't `zfs rollback` to a snapshot with a # parent, which snap0 has (snap1). restore_and_compare_fs snap0 @@ -127,8 +121,8 @@ # Check container config has been restored (limits.cpu is unset) cpus=$(lxc config get bar limits.cpu) if [ -n "${cpus}" ]; then - echo "==> config didn't match expected value after restore (${cpus})" - false + echo "==> config didn't match expected value after restore (${cpus})" + false fi fi @@ -149,8 +143,7 @@ # Start container and then restore snapshot to verify the running state after restore. lxc start bar - # FIXME: make this backend agnostic - if [ "${LXD_BACKEND}" = "dir" ]; then + if [ "${LXD_BACKEND}" != "zfs" ]; then # see comment above about snap0 restore_and_compare_fs snap0 diff -Nru juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/static_analysis.sh juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/static_analysis.sh --- juju-core-2.0~beta15/src/github.com/lxc/lxd/test/suites/static_analysis.sh 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/github.com/lxc/lxd/test/suites/static_analysis.sh 2016-10-13 14:31:53.000000000 +0000 @@ -1,7 +1,7 @@ #!/bin/sh safe_pot_hash() { - sed -e "/Project-Id-Version/,/Content-Transfer-Encoding/d" -e "/^#/d" "po/lxd.pot" | tee /tmp/foo | md5sum | cut -f1 -d" " + sed -e "/Project-Id-Version/,/Content-Transfer-Encoding/d" -e "/^#/d" "po/lxd.pot" | md5sum | cut -f1 -d" " } test_static_analysis() { @@ -14,13 +14,13 @@ pyflakes3 test/deps/import-busybox scripts/lxd-setup-lvm-storage # Shell static analysis - shellcheck lxd-bridge/lxd-bridge test/main.sh test/suites/* test/backends/* + shellcheck test/main.sh test/suites/* test/backends/* # Go static analysis ## Functions starting by empty line OUT=$(grep -r "^$" -B1 . | grep "func " | grep -v "}$" || true) if [ -n "${OUT}" ]; then - echo "${OUT}" + echo "ERROR: Functions must not start with an empty line: ${OUT}" false fi @@ -38,7 +38,7 @@ ## deadcode if which deadcode >/dev/null 2>&1; then - for path in . lxc/ lxd/ shared/ shared/i18n shared/termios fuidshift/ lxd-bridge/lxd-bridge-proxy/; do + for path in . lxc/ lxd/ shared/ shared/i18n shared/termios fuidshift/; do OUT=$(deadcode ${path} 2>&1 | grep -v lxd/migrate.pb.go || true) if [ -n "${OUT}" ]; then echo "${OUT}" >&2 @@ -47,6 +47,14 @@ done fi + if which godeps >/dev/null 2>&1; then + OUT=$(godeps . ./shared | cut -f1) + if [ "${OUT}" != "$(printf "github.com/gorilla/websocket\ngopkg.in/yaml.v2\n")" ]; then + echo "ERROR: you added a new dependency to the client or shared; please make sure this is what you want" + echo "${OUT}" + fi + fi + # Skip the tests which require git if ! git status; then return diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-colorable/colorable_others.go juju-core-2.0.0/src/github.com/mattn/go-colorable/colorable_others.go --- juju-core-2.0~beta15/src/github.com/mattn/go-colorable/colorable_others.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-colorable/colorable_others.go 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,24 @@ +// +build !windows + +package colorable + +import ( + "io" + "os" +) + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +func NewColorableStdout() io.Writer { + return os.Stdout +} + +func NewColorableStderr() io.Writer { + return os.Stderr +} diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-colorable/colorable_windows.go juju-core-2.0.0/src/github.com/mattn/go-colorable/colorable_windows.go --- juju-core-2.0~beta15/src/github.com/mattn/go-colorable/colorable_windows.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-colorable/colorable_windows.go 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,809 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") +) + +type Writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n-1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2-1) + csbi.cursorPosition.y = short(n1-1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + case 'h': + cs := buf.String() + if cs == "?25" { + var ci consoleCursorInfo + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } + case 'l': + cs := buf.String() + if cs == "?25" { + var ci consoleCursorInfo + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-colorable/_example/main.go juju-core-2.0.0/src/github.com/mattn/go-colorable/_example/main.go --- juju-core-2.0~beta15/src/github.com/mattn/go-colorable/_example/main.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-colorable/_example/main.go 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,16 @@ +package main + +import ( + "github.com/Sirupsen/logrus" + "github.com/mattn/go-colorable" +) + +func main() { + logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) + logrus.SetOutput(colorable.NewColorableStdout()) + + logrus.Info("succeeded") + logrus.Warn("not correct") + logrus.Error("something error") + logrus.Fatal("panic") +} diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-colorable/_example2/main.go juju-core-2.0.0/src/github.com/mattn/go-colorable/_example2/main.go --- juju-core-2.0~beta15/src/github.com/mattn/go-colorable/_example2/main.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-colorable/_example2/main.go 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,15 @@ +package main + +import ( + "bufio" + "fmt" + ".." +) + +func main(){ + stdOut := bufio.NewWriter(colorable.NewColorableStdout()) + + fmt.Fprint(stdOut,"\x1B[3GMove to 3rd Column\n") + fmt.Fprint(stdOut,"\x1B[1;2HMove to 2nd Column on 1st Line\n") + stdOut.Flush() +} diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-colorable/LICENSE juju-core-2.0.0/src/github.com/mattn/go-colorable/LICENSE --- juju-core-2.0~beta15/src/github.com/mattn/go-colorable/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-colorable/LICENSE 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-colorable/noncolorable.go juju-core-2.0.0/src/github.com/mattn/go-colorable/noncolorable.go --- juju-core-2.0~beta15/src/github.com/mattn/go-colorable/noncolorable.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-colorable/noncolorable.go 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" +) + +type NonColorable struct { + out io.Writer + lastbuf bytes.Buffer +} + +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewBuffer(data) +loop: + for { + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + return len(data) - w.lastbuf.Len(), nil +} diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-colorable/README.md juju-core-2.0.0/src/github.com/mattn/go-colorable/README.md --- juju-core-2.0~beta15/src/github.com/mattn/go-colorable/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-colorable/README.md 2016-10-13 14:32:32.000000000 +0000 @@ -0,0 +1,43 @@ +# go-colorable + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-isatty/doc.go juju-core-2.0.0/src/github.com/mattn/go-isatty/doc.go --- juju-core-2.0~beta15/src/github.com/mattn/go-isatty/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-isatty/doc.go 2016-10-13 14:32:34.000000000 +0000 @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-isatty/_example/example.go juju-core-2.0.0/src/github.com/mattn/go-isatty/_example/example.go --- juju-core-2.0~beta15/src/github.com/mattn/go-isatty/_example/example.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-isatty/_example/example.go 2016-10-13 14:32:34.000000000 +0000 @@ -0,0 +1,15 @@ +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(int(os.Stdout.Fd())) { + fmt.Println("Is Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-isatty/isatty_appengine.go juju-core-2.0.0/src/github.com/mattn/go-isatty/isatty_appengine.go --- juju-core-2.0~beta15/src/github.com/mattn/go-isatty/isatty_appengine.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-isatty/isatty_appengine.go 2016-10-13 14:32:34.000000000 +0000 @@ -0,0 +1,9 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-isatty/isatty_bsd.go juju-core-2.0.0/src/github.com/mattn/go-isatty/isatty_bsd.go --- juju-core-2.0~beta15/src/github.com/mattn/go-isatty/isatty_bsd.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-isatty/isatty_bsd.go 2016-10-13 14:32:34.000000000 +0000 @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-isatty/isatty_linux.go juju-core-2.0.0/src/github.com/mattn/go-isatty/isatty_linux.go --- juju-core-2.0~beta15/src/github.com/mattn/go-isatty/isatty_linux.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-isatty/isatty_linux.go 2016-10-13 14:32:34.000000000 +0000 @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-isatty/isatty_solaris.go juju-core-2.0.0/src/github.com/mattn/go-isatty/isatty_solaris.go --- juju-core-2.0~beta15/src/github.com/mattn/go-isatty/isatty_solaris.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-isatty/isatty_solaris.go 2016-10-13 14:32:34.000000000 +0000 @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-isatty/isatty_windows.go juju-core-2.0.0/src/github.com/mattn/go-isatty/isatty_windows.go --- juju-core-2.0~beta15/src/github.com/mattn/go-isatty/isatty_windows.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-isatty/isatty_windows.go 2016-10-13 14:32:34.000000000 +0000 @@ -0,0 +1,19 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") +var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-isatty/LICENSE juju-core-2.0.0/src/github.com/mattn/go-isatty/LICENSE --- juju-core-2.0~beta15/src/github.com/mattn/go-isatty/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-isatty/LICENSE 2016-10-13 14:32:34.000000000 +0000 @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff -Nru juju-core-2.0~beta15/src/github.com/mattn/go-isatty/README.md juju-core-2.0.0/src/github.com/mattn/go-isatty/README.md --- juju-core-2.0~beta15/src/github.com/mattn/go-isatty/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/mattn/go-isatty/README.md 2016-10-13 14:32:34.000000000 +0000 @@ -0,0 +1,37 @@ +# go-isatty + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/ext/moved.go juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/ext/moved.go --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/ext/moved.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/ext/moved.go 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,2 @@ +// Package ext moved to a new location: github.com/matttproud/golang_protobuf_extensions/pbutil. +package ext diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/LICENSE juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/LICENSE --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/LICENSE 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/Makefile.TRAVIS 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,15 @@ +all: build cover test vet + +build: + go build -v ./... + +cover: test + $(MAKE) -C pbutil cover + +test: build + go test -v ./... + +vet: build + go vet -v ./... + +.PHONY: build cover test vet diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/NOTICE juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/NOTICE --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/NOTICE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/NOTICE 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbtest/deleted.go juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbtest/deleted.go --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbtest/deleted.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbtest/deleted.go 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,2 @@ +// Package pbtest is deleted for the time being, because upstream Protocol Buffer 3 may have rendered quick.Value-based blackbox generation impossible. +package pbtest diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,178 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "bytes" + "testing" + + "github.com/golang/protobuf/proto" + + . "github.com/matttproud/golang_protobuf_extensions/testdata" +) + +func TestWriteDelimited(t *testing.T) { + t.Parallel() + for _, test := range []struct { + msg proto.Message + buf []byte + n int + err error + }{ + { + msg: &Empty{}, + n: 1, + buf: []byte{0}, + }, + { + msg: &GoEnum{Foo: FOO_FOO1.Enum()}, + n: 3, + buf: []byte{2, 8, 1}, + }, + { + msg: &Strings{ + StringField: proto.String(`This is my gigantic, unhappy string. It exceeds +the encoding size of a single byte varint. We are using it to fuzz test the +correctness of the header decoding mechanisms, which may prove problematic. +I expect it may. Let's hope you enjoy testing as much as we do.`), + }, + n: 271, + buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, + 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, + 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, + 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, + 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, + 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, + 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, + 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, + 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, + 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, + 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, + 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, + 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, + 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, + 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, + 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, + 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, + 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, + }, + } { + var buf bytes.Buffer + if n, err := WriteDelimited(&buf, test.msg); n != test.n || err != test.err { + t.Fatalf("WriteDelimited(buf, %#v) = %v, %v; want %v, %v", test.msg, n, err, test.n, test.err) + } + if out := buf.Bytes(); !bytes.Equal(out, test.buf) { + t.Fatalf("WriteDelimited(buf, %#v); buf = %v; want %v", test.msg, out, test.buf) + } + } +} + +func TestReadDelimited(t *testing.T) { + t.Parallel() + for _, test := range []struct { + buf []byte + msg proto.Message + n int + err error + }{ + { + buf: []byte{0}, + msg: &Empty{}, + n: 1, + }, + { + n: 3, + buf: []byte{2, 8, 1}, + msg: &GoEnum{Foo: FOO_FOO1.Enum()}, + }, + { + buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, + 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, + 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, + 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, + 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, + 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, + 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, + 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, + 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, + 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, + 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, + 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, + 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, + 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, + 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, + 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, + 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, + 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, + msg: &Strings{ + StringField: proto.String(`This is my gigantic, unhappy string. It exceeds +the encoding size of a single byte varint. We are using it to fuzz test the +correctness of the header decoding mechanisms, which may prove problematic. +I expect it may. Let's hope you enjoy testing as much as we do.`), + }, + n: 271, + }, + } { + msg := proto.Clone(test.msg) + msg.Reset() + if n, err := ReadDelimited(bytes.NewBuffer(test.buf), msg); n != test.n || err != test.err { + t.Fatalf("ReadDelimited(%v, msg) = %v, %v; want %v, %v", test.buf, n, err, test.n, test.err) + } + if !proto.Equal(msg, test.msg) { + t.Fatalf("ReadDelimited(%v, msg); msg = %v; want %v", test.buf, msg, test.msg) + } + } +} + +func TestEndToEndValid(t *testing.T) { + t.Parallel() + for _, test := range [][]proto.Message{ + {&Empty{}}, + {&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}}, + {&GoEnum{Foo: FOO_FOO1.Enum()}}, + {&Strings{ + StringField: proto.String(`This is my gigantic, unhappy string. It exceeds +the encoding size of a single byte varint. We are using it to fuzz test the +correctness of the header decoding mechanisms, which may prove problematic. +I expect it may. Let's hope you enjoy testing as much as we do.`), + }}, + } { + var buf bytes.Buffer + var written int + for i, msg := range test { + n, err := WriteDelimited(&buf, msg) + if err != nil { + // Assumption: TestReadDelimited and TestWriteDelimited are sufficient + // and inputs for this test are explicitly exercised there. + t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", test, i, err) + } + written += n + } + var read int + for i, msg := range test { + out := proto.Clone(msg) + out.Reset() + n, _ := ReadDelimited(&buf, out) + // Decide to do EOF checking? + read += n + if !proto.Equal(out, msg) { + t.Fatalf("out = %v; want %v[%d] = %#v", out, test, i, msg) + } + } + if read != written { + t.Fatalf("%v read = %d; want %d", test, read, written) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,99 @@ +// Copyright 2016 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "bytes" + "io" + "testing" + "testing/iotest" +) + +func TestReadDelimitedIllegalVarint(t *testing.T) { + t.Parallel() + var tests = []struct { + in []byte + n int + err error + }{ + { + in: []byte{255, 255, 255, 255, 255}, + n: 5, + err: errInvalidVarint, + }, + { + in: []byte{255, 255, 255, 255, 255, 255}, + n: 5, + err: errInvalidVarint, + }, + } + for _, test := range tests { + n, err := ReadDelimited(bytes.NewReader(test.in), nil) + if got, want := n, test.n; got != want { + t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", test.in, got, want) + } + if got, want := err, test.err; got != want { + t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", test.in, got, want) + } + } +} + +func TestReadDelimitedPrematureHeader(t *testing.T) { + t.Parallel() + var data = []byte{128, 5} // 256 + 256 + 128 + n, err := ReadDelimited(bytes.NewReader(data[0:1]), nil) + if got, want := n, 1; got != want { + t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want) + } + if got, want := err, io.EOF; got != want { + t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want) + } +} + +func TestReadDelimitedPrematureBody(t *testing.T) { + t.Parallel() + var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128 + n, err := ReadDelimited(bytes.NewReader(data[:]), nil) + if got, want := n, 5; got != want { + t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want) + } + if got, want := err, io.ErrUnexpectedEOF; got != want { + t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want) + } +} + +func TestReadDelimitedPrematureHeaderIncremental(t *testing.T) { + t.Parallel() + var data = []byte{128, 5} // 256 + 256 + 128 + n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[0:1])), nil) + if got, want := n, 1; got != want { + t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want) + } + if got, want := err, io.EOF; got != want { + t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want) + } +} + +func TestReadDelimitedPrematureBodyIncremental(t *testing.T) { + t.Parallel() + var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128 + n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[:])), nil) + if got, want := n, 5; got != want { + t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want) + } + if got, want := err, io.ErrUnexpectedEOF; got != want { + t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,67 @@ +// Copyright 2016 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "bytes" + "errors" + "testing" + + "github.com/golang/protobuf/proto" +) + +var errMarshal = errors.New("pbutil: can't marshal") + +type cantMarshal struct{ proto.Message } + +func (cantMarshal) Marshal() ([]byte, error) { return nil, errMarshal } + +var _ proto.Message = cantMarshal{} + +func TestWriteDelimitedMarshalErr(t *testing.T) { + t.Parallel() + var data cantMarshal + var buf bytes.Buffer + n, err := WriteDelimited(&buf, data) + if got, want := n, 0; got != want { + t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want) + } + if got, want := err, errMarshal; got != want { + t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want) + } +} + +type canMarshal struct{ proto.Message } + +func (canMarshal) Marshal() ([]byte, error) { return []byte{0, 1, 2, 3, 4, 5}, nil } + +var errWrite = errors.New("pbutil: can't write") + +type cantWrite struct{} + +func (cantWrite) Write([]byte) (int, error) { return 0, errWrite } + +func TestWriteDelimitedWriteErr(t *testing.T) { + t.Parallel() + var data canMarshal + var buf cantWrite + n, err := WriteDelimited(buf, data) + if got, want := n, 0; got != want { + t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want) + } + if got, want := err, errWrite; got != want { + t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1 @@ +cover.dat diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,7 @@ +all: + +cover: + go test -cover -v -coverprofile=cover.dat ./... + go tool cover -func cover.dat + +.PHONY: cover diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/README.md juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/README.md --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/README.md 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,20 @@ +# Overview +This repository provides various Protocol Buffer extensions for the Go +language (golang), namely support for record length-delimited message +streaming. + +| Java | Go | +| ------------------------------ | --------------------- | +| MessageLite#parseDelimitedFrom | pbutil.ReadDelimited | +| MessageLite#writeDelimitedTo | pbutil.WriteDelimited | + +Because [Code Review 9102043](https://codereview.appspot.com/9102043/) is +destined to never be merged into mainline (i.e., never be promoted to formal +[goprotobuf features](https://github.com/golang/protobuf)), this repository +will live here in the wild. + +# Documentation +We have [generated Go Doc documentation](http://godoc.org/github.com/matttproud/golang_protobuf_extensions/pbutil) here. + +# Testing +[![Build Status](https://travis-ci.org/matttproud/golang_protobuf_extensions.png?branch=master)](https://travis-ci.org/matttproud/golang_protobuf_extensions) diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/testdata/README.THIRD_PARTY 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,4 @@ +test.pb.go and test.proto are third-party data. + +SOURCE: https://github.com/golang/protobuf +REVISION: bf531ff1a004f24ee53329dfd5ce0b41bfdc17df diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/testdata/test.pb.go 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,4029 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package testdata is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + GoEnum + GoTestField + GoTest + GoSkipTest + NonPackedTest + PackedTest + MaxTag + OldMessage + NewMessage + InnerMessage + OtherMessage + RequiredInnerMessage + MyMessage + Ext + ComplexExtension + DefaultsMessage + MyMessageSet + Empty + MessageList + Strings + Defaults + SubDefaults + RepeatedEnum + MoreRepeated + GroupOld + GroupNew + FloatingPoint + MessageWithMap + Oneof + Communique +*/ +package testdata + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} +func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} +func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} +func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } + +type DefaultsMessage_DefaultsEnum int32 + +const ( + DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 + DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 + DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 +) + +var DefaultsMessage_DefaultsEnum_name = map[int32]string{ + 0: "ZERO", + 1: "ONE", + 2: "TWO", +} +var DefaultsMessage_DefaultsEnum_value = map[string]int32{ + "ZERO": 0, + "ONE": 1, + "TWO": 2, +} + +func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { + p := new(DefaultsMessage_DefaultsEnum) + *p = x + return p +} +func (x DefaultsMessage_DefaultsEnum) String() string { + return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) +} +func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") + if err != nil { + return err + } + *x = DefaultsMessage_DefaultsEnum(value) + return nil +} +func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{15, 0} +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} +func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{20, 0} } + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} +func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{22, 0} } + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} +func (*GoEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req,name=Label,json=label" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req,name=Type,json=type" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} +func (*GoTestField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,json=kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt,name=Table,json=table" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt,name=Param,json=param" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField,json=repeatedField" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField,json=optionalField" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=fBoolRequired" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=fInt32Required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=fInt64Required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=fFixed32Required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=fFixed64Required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=fUint32Required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=fUint64Required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=fFloatRequired" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=fDoubleRequired" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=fStringRequired" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=fBytesRequired" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=fSint32Required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=fSint64Required" json:"F_Sint64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=fBoolRepeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=fInt32Repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=fInt64Repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=fFixed32Repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=fFixed64Repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=fUint32Repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=fUint64Repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=fFloatRepeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=fDoubleRepeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=fStringRepeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=fBytesRepeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=fSint32Repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=fSint64Repeated" json:"F_Sint64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=fBoolOptional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=fInt32Optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=fInt64Optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=fFixed32Optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=fFixed64Optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=fUint32Optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=fUint64Optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=fFloatOptional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=fDoubleOptional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=fStringOptional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=fBytesOptional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=fSint32Optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=fSint64Optional" json:"F_Sint64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=fBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=fInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=fInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=fFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=fFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=fUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=fUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=fFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=fDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=fStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=fBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=fSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=fSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=fBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=fInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=fInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=fFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=fFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=fUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=fUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=fFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=fDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=fSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=fSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} +func (*GoTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} +func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} +func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} +func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} } + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} +func (*GoSkipTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} +func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} +func (*NonPackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} +func (*PackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} +func (*MaxTag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} +func (*OldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *OldMessage) GetNum() int32 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} +func (*OldMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + // This is an int32 in OldMessage. + Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} +func (*NewMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *NewMessage) GetNum() int64 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} +func (*NewMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} } + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} +func (*InnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} +func (*OtherMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +var extRange_OtherMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherMessage +} +func (m *OtherMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type RequiredInnerMessage struct { + LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} } +func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) } +func (*RequiredInnerMessage) ProtoMessage() {} +func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage { + if m != nil { + return m.LeoFinallyWonAnOscar + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} +func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} +func (m *MyMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage { + if m != nil { + return m.WeMustGoDeeper + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} +func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} +func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", +} + +type ComplexExtension struct { + First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"` + Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"` + Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ComplexExtension) Reset() { *m = ComplexExtension{} } +func (m *ComplexExtension) String() string { return proto.CompactTextString(m) } +func (*ComplexExtension) ProtoMessage() {} +func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *ComplexExtension) GetFirst() int32 { + if m != nil && m.First != nil { + return *m.First + } + return 0 +} + +func (m *ComplexExtension) GetSecond() int32 { + if m != nil && m.Second != nil { + return *m.Second + } + return 0 +} + +func (m *ComplexExtension) GetThird() []int32 { + if m != nil { + return m.Third + } + return nil +} + +type DefaultsMessage struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } +func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } +func (*DefaultsMessage) ProtoMessage() {} +func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +var extRange_DefaultsMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_DefaultsMessage +} +func (m *DefaultsMessage) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type MyMessageSet struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} +func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *MyMessageSet) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(m.ExtensionMap()) +} +func (m *MyMessageSet) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) +} +func (m *MyMessageSet) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(m.XXX_extensions) +} +func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) +} + +// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*MyMessageSet)(nil) +var _ proto.Unmarshaler = (*MyMessageSet)(nil) + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} +func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} +func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} +func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} } + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} +func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,name=F_String,json=fString,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=fPinf,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=fNinf,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=fNan,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + // Redundant but explicit defaults. + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} +func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +func (m *Defaults) GetStrZero() string { + if m != nil && m.StrZero != nil { + return *m.StrZero + } + return "" +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} +func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} +func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} +func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} +func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} +func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24, 0} } + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} +func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} +func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} } + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} +func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +type MessageWithMap struct { + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *MessageWithMap) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func (m *MessageWithMap) GetStrToStr() map[string]string { + if m != nil { + return m.StrToStr + } + return nil +} + +type Oneof struct { + // Types that are valid to be assigned to Union: + // *Oneof_F_Bool + // *Oneof_F_Int32 + // *Oneof_F_Int64 + // *Oneof_F_Fixed32 + // *Oneof_F_Fixed64 + // *Oneof_F_Uint32 + // *Oneof_F_Uint64 + // *Oneof_F_Float + // *Oneof_F_Double + // *Oneof_F_String + // *Oneof_F_Bytes + // *Oneof_F_Sint32 + // *Oneof_F_Sint64 + // *Oneof_F_Enum + // *Oneof_F_Message + // *Oneof_FGroup + // *Oneof_F_Largest_Tag + Union isOneof_Union `protobuf_oneof:"union"` + // Types that are valid to be assigned to Tormato: + // *Oneof_Value + Tormato isOneof_Tormato `protobuf_oneof:"tormato"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Oneof) Reset() { *m = Oneof{} } +func (m *Oneof) String() string { return proto.CompactTextString(m) } +func (*Oneof) ProtoMessage() {} +func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +type isOneof_Union interface { + isOneof_Union() +} +type isOneof_Tormato interface { + isOneof_Tormato() +} + +type Oneof_F_Bool struct { + F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,oneof"` +} +type Oneof_F_Int32 struct { + F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,oneof"` +} +type Oneof_F_Int64 struct { + F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,oneof"` +} +type Oneof_F_Fixed32 struct { + F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,oneof"` +} +type Oneof_F_Fixed64 struct { + F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,oneof"` +} +type Oneof_F_Uint32 struct { + F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,oneof"` +} +type Oneof_F_Uint64 struct { + F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,oneof"` +} +type Oneof_F_Float struct { + F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,oneof"` +} +type Oneof_F_Double struct { + F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,oneof"` +} +type Oneof_F_String struct { + F_String string `protobuf:"bytes,10,opt,name=F_String,json=fString,oneof"` +} +type Oneof_F_Bytes struct { + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,oneof"` +} +type Oneof_F_Sint32 struct { + F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,oneof"` +} +type Oneof_F_Sint64 struct { + F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,oneof"` +} +type Oneof_F_Enum struct { + F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.MyMessage_Color,oneof"` +} +type Oneof_F_Message struct { + F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=fMessage,oneof"` +} +type Oneof_FGroup struct { + FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"` +} +type Oneof_F_Largest_Tag struct { + F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=fLargestTag,oneof"` +} +type Oneof_Value struct { + Value int32 `protobuf:"varint,100,opt,name=value,oneof"` +} + +func (*Oneof_F_Bool) isOneof_Union() {} +func (*Oneof_F_Int32) isOneof_Union() {} +func (*Oneof_F_Int64) isOneof_Union() {} +func (*Oneof_F_Fixed32) isOneof_Union() {} +func (*Oneof_F_Fixed64) isOneof_Union() {} +func (*Oneof_F_Uint32) isOneof_Union() {} +func (*Oneof_F_Uint64) isOneof_Union() {} +func (*Oneof_F_Float) isOneof_Union() {} +func (*Oneof_F_Double) isOneof_Union() {} +func (*Oneof_F_String) isOneof_Union() {} +func (*Oneof_F_Bytes) isOneof_Union() {} +func (*Oneof_F_Sint32) isOneof_Union() {} +func (*Oneof_F_Sint64) isOneof_Union() {} +func (*Oneof_F_Enum) isOneof_Union() {} +func (*Oneof_F_Message) isOneof_Union() {} +func (*Oneof_FGroup) isOneof_Union() {} +func (*Oneof_F_Largest_Tag) isOneof_Union() {} +func (*Oneof_Value) isOneof_Tormato() {} + +func (m *Oneof) GetUnion() isOneof_Union { + if m != nil { + return m.Union + } + return nil +} +func (m *Oneof) GetTormato() isOneof_Tormato { + if m != nil { + return m.Tormato + } + return nil +} + +func (m *Oneof) GetF_Bool() bool { + if x, ok := m.GetUnion().(*Oneof_F_Bool); ok { + return x.F_Bool + } + return false +} + +func (m *Oneof) GetF_Int32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Int32); ok { + return x.F_Int32 + } + return 0 +} + +func (m *Oneof) GetF_Int64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Int64); ok { + return x.F_Int64 + } + return 0 +} + +func (m *Oneof) GetF_Fixed32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok { + return x.F_Fixed32 + } + return 0 +} + +func (m *Oneof) GetF_Fixed64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok { + return x.F_Fixed64 + } + return 0 +} + +func (m *Oneof) GetF_Uint32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok { + return x.F_Uint32 + } + return 0 +} + +func (m *Oneof) GetF_Uint64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok { + return x.F_Uint64 + } + return 0 +} + +func (m *Oneof) GetF_Float() float32 { + if x, ok := m.GetUnion().(*Oneof_F_Float); ok { + return x.F_Float + } + return 0 +} + +func (m *Oneof) GetF_Double() float64 { + if x, ok := m.GetUnion().(*Oneof_F_Double); ok { + return x.F_Double + } + return 0 +} + +func (m *Oneof) GetF_String() string { + if x, ok := m.GetUnion().(*Oneof_F_String); ok { + return x.F_String + } + return "" +} + +func (m *Oneof) GetF_Bytes() []byte { + if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok { + return x.F_Bytes + } + return nil +} + +func (m *Oneof) GetF_Sint32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok { + return x.F_Sint32 + } + return 0 +} + +func (m *Oneof) GetF_Sint64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok { + return x.F_Sint64 + } + return 0 +} + +func (m *Oneof) GetF_Enum() MyMessage_Color { + if x, ok := m.GetUnion().(*Oneof_F_Enum); ok { + return x.F_Enum + } + return MyMessage_RED +} + +func (m *Oneof) GetF_Message() *GoTestField { + if x, ok := m.GetUnion().(*Oneof_F_Message); ok { + return x.F_Message + } + return nil +} + +func (m *Oneof) GetFGroup() *Oneof_F_Group { + if x, ok := m.GetUnion().(*Oneof_FGroup); ok { + return x.FGroup + } + return nil +} + +func (m *Oneof) GetF_Largest_Tag() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok { + return x.F_Largest_Tag + } + return 0 +} + +func (m *Oneof) GetValue() int32 { + if x, ok := m.GetTormato().(*Oneof_Value); ok { + return x.Value + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{ + (*Oneof_F_Bool)(nil), + (*Oneof_F_Int32)(nil), + (*Oneof_F_Int64)(nil), + (*Oneof_F_Fixed32)(nil), + (*Oneof_F_Fixed64)(nil), + (*Oneof_F_Uint32)(nil), + (*Oneof_F_Uint64)(nil), + (*Oneof_F_Float)(nil), + (*Oneof_F_Double)(nil), + (*Oneof_F_String)(nil), + (*Oneof_F_Bytes)(nil), + (*Oneof_F_Sint32)(nil), + (*Oneof_F_Sint64)(nil), + (*Oneof_F_Enum)(nil), + (*Oneof_F_Message)(nil), + (*Oneof_FGroup)(nil), + (*Oneof_F_Largest_Tag)(nil), + (*Oneof_Value)(nil), + } +} + +func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + t := uint64(0) + if x.F_Bool { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Oneof_F_Int32: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + b.EncodeVarint(4<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(x.F_Fixed32)) + case *Oneof_F_Fixed64: + b.EncodeVarint(5<<3 | proto.WireFixed64) + b.EncodeFixed64(uint64(x.F_Fixed64)) + case *Oneof_F_Uint32: + b.EncodeVarint(6<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + b.EncodeVarint(7<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + b.EncodeVarint(8<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.F_Float))) + case *Oneof_F_Double: + b.EncodeVarint(9<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.F_Double)) + case *Oneof_F_String: + b.EncodeVarint(10<<3 | proto.WireBytes) + b.EncodeStringBytes(x.F_String) + case *Oneof_F_Bytes: + b.EncodeVarint(11<<3 | proto.WireBytes) + b.EncodeRawBytes(x.F_Bytes) + case *Oneof_F_Sint32: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.F_Sint32)) + case *Oneof_F_Sint64: + b.EncodeVarint(13<<3 | proto.WireVarint) + b.EncodeZigzag64(uint64(x.F_Sint64)) + case *Oneof_F_Enum: + b.EncodeVarint(14<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.F_Message); err != nil { + return err + } + case *Oneof_FGroup: + b.EncodeVarint(16<<3 | proto.WireStartGroup) + if err := b.Marshal(x.FGroup); err != nil { + return err + } + b.EncodeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + b.EncodeVarint(536870911<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + return fmt.Errorf("Oneof.Union has unexpected type %T", x) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + b.EncodeVarint(100<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Value)) + case nil: + default: + return fmt.Errorf("Oneof.Tormato has unexpected type %T", x) + } + return nil +} + +func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Oneof) + switch tag { + case 1: // union.F_Bool + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Bool{x != 0} + return true, err + case 2: // union.F_Int32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int32{int32(x)} + return true, err + case 3: // union.F_Int64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int64{int64(x)} + return true, err + case 4: // union.F_Fixed32 + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Fixed32{uint32(x)} + return true, err + case 5: // union.F_Fixed64 + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Fixed64{x} + return true, err + case 6: // union.F_Uint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint32{uint32(x)} + return true, err + case 7: // union.F_Uint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint64{x} + return true, err + case 8: // union.F_Float + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))} + return true, err + case 9: // union.F_Double + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Double{math.Float64frombits(x)} + return true, err + case 10: // union.F_String + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Oneof_F_String{x} + return true, err + case 11: // union.F_Bytes + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Oneof_F_Bytes{x} + return true, err + case 12: // union.F_Sint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Oneof_F_Sint32{int32(x)} + return true, err + case 13: // union.F_Sint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag64() + m.Union = &Oneof_F_Sint64{int64(x)} + return true, err + case 14: // union.F_Enum + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Enum{MyMessage_Color(x)} + return true, err + case 15: // union.F_Message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GoTestField) + err := b.DecodeMessage(msg) + m.Union = &Oneof_F_Message{msg} + return true, err + case 16: // union.f_group + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Oneof_F_Group) + err := b.DecodeGroup(msg) + m.Union = &Oneof_FGroup{msg} + return true, err + case 536870911: // union.F_Largest_Tag + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Largest_Tag{int32(x)} + return true, err + case 100: // tormato.value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Tormato = &Oneof_Value{int32(x)} + return true, err + default: + return false, nil + } +} + +func _Oneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *Oneof_F_Int32: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + n += proto.SizeVarint(4<<3 | proto.WireFixed32) + n += 4 + case *Oneof_F_Fixed64: + n += proto.SizeVarint(5<<3 | proto.WireFixed64) + n += 8 + case *Oneof_F_Uint32: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + n += proto.SizeVarint(7<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + n += proto.SizeVarint(8<<3 | proto.WireFixed32) + n += 4 + case *Oneof_F_Double: + n += proto.SizeVarint(9<<3 | proto.WireFixed64) + n += 8 + case *Oneof_F_String: + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.F_String))) + n += len(x.F_String) + case *Oneof_F_Bytes: + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.F_Bytes))) + n += len(x.F_Bytes) + case *Oneof_F_Sint32: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31)))) + case *Oneof_F_Sint64: + n += proto.SizeVarint(13<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63)))) + case *Oneof_F_Enum: + n += proto.SizeVarint(14<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + s := proto.Size(x.F_Message) + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Oneof_FGroup: + n += proto.SizeVarint(16<<3 | proto.WireStartGroup) + n += proto.Size(x.FGroup) + n += proto.SizeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + n += proto.SizeVarint(536870911<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + n += proto.SizeVarint(100<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Value)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oneof_F_Group struct { + X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } +func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } +func (*Oneof_F_Group) ProtoMessage() {} +func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28, 0} } + +func (m *Oneof_F_Group) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Col + // *Communique_Msg + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} +func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Col struct { + Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"` +} +type Communique_Msg struct { + Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Col) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetCol() MyMessage_Color { + if x, ok := m.GetUnion().(*Communique_Col); ok { + return x.Col + } + return MyMessage_RED +} + +func (m *Communique) GetMsg() *Strings { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Col)(nil), + (*Communique_Msg)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Col: + b.EncodeVarint(9<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Col)) + case *Communique_Msg: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.col + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Col{MyMessage_Color(x)} + return true, err + case 10: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Strings) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 8 + case *Communique_Col: + n += proto.SizeVarint(9<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Col)) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", +} + +var E_Complex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: (*ComplexExtension)(nil), + Field: 200, + Name: "testdata.complex", + Tag: "bytes,200,opt,name=complex", +} + +var E_RComplex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: ([]*ComplexExtension)(nil), + Field: 201, + Name: "testdata.r_complex", + Tag: "bytes,201,rep,name=r_complex,json=rComplex", +} + +var E_NoDefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "testdata.no_default_double", + Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble", +} + +var E_NoDefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 102, + Name: "testdata.no_default_float", + Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat", +} + +var E_NoDefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 103, + Name: "testdata.no_default_int32", + Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32", +} + +var E_NoDefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 104, + Name: "testdata.no_default_int64", + Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64", +} + +var E_NoDefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 105, + Name: "testdata.no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32", +} + +var E_NoDefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 106, + Name: "testdata.no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64", +} + +var E_NoDefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 107, + Name: "testdata.no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32", +} + +var E_NoDefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 108, + Name: "testdata.no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64", +} + +var E_NoDefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 109, + Name: "testdata.no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32", +} + +var E_NoDefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 110, + Name: "testdata.no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64", +} + +var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 111, + Name: "testdata.no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32", +} + +var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 112, + Name: "testdata.no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64", +} + +var E_NoDefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 113, + Name: "testdata.no_default_bool", + Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool", +} + +var E_NoDefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 114, + Name: "testdata.no_default_string", + Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString", +} + +var E_NoDefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 115, + Name: "testdata.no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes", +} + +var E_NoDefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 116, + Name: "testdata.no_default_enum", + Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum", +} + +var E_DefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 201, + Name: "testdata.default_double", + Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415", +} + +var E_DefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 202, + Name: "testdata.default_float", + Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14", +} + +var E_DefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 203, + Name: "testdata.default_int32", + Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42", +} + +var E_DefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 204, + Name: "testdata.default_int64", + Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43", +} + +var E_DefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 205, + Name: "testdata.default_uint32", + Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44", +} + +var E_DefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 206, + Name: "testdata.default_uint64", + Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45", +} + +var E_DefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 207, + Name: "testdata.default_sint32", + Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46", +} + +var E_DefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 208, + Name: "testdata.default_sint64", + Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47", +} + +var E_DefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 209, + Name: "testdata.default_fixed32", + Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48", +} + +var E_DefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 210, + Name: "testdata.default_fixed64", + Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49", +} + +var E_DefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 211, + Name: "testdata.default_sfixed32", + Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50", +} + +var E_DefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 212, + Name: "testdata.default_sfixed64", + Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51", +} + +var E_DefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 213, + Name: "testdata.default_bool", + Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1", +} + +var E_DefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 214, + Name: "testdata.default_string", + Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string", +} + +var E_DefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 215, + Name: "testdata.default_bytes", + Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes", +} + +var E_DefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 216, + Name: "testdata.default_enum", + Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "testdata.x201", + Tag: "bytes,201,opt,name=x201", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "testdata.x202", + Tag: "bytes,202,opt,name=x202", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "testdata.x203", + Tag: "bytes,203,opt,name=x203", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "testdata.x204", + Tag: "bytes,204,opt,name=x204", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "testdata.x205", + Tag: "bytes,205,opt,name=x205", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "testdata.x206", + Tag: "bytes,206,opt,name=x206", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "testdata.x207", + Tag: "bytes,207,opt,name=x207", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "testdata.x208", + Tag: "bytes,208,opt,name=x208", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "testdata.x209", + Tag: "bytes,209,opt,name=x209", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "testdata.x210", + Tag: "bytes,210,opt,name=x210", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "testdata.x211", + Tag: "bytes,211,opt,name=x211", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "testdata.x212", + Tag: "bytes,212,opt,name=x212", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "testdata.x213", + Tag: "bytes,213,opt,name=x213", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "testdata.x214", + Tag: "bytes,214,opt,name=x214", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "testdata.x215", + Tag: "bytes,215,opt,name=x215", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "testdata.x216", + Tag: "bytes,216,opt,name=x216", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "testdata.x217", + Tag: "bytes,217,opt,name=x217", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "testdata.x218", + Tag: "bytes,218,opt,name=x218", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "testdata.x219", + Tag: "bytes,219,opt,name=x219", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "testdata.x220", + Tag: "bytes,220,opt,name=x220", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "testdata.x221", + Tag: "bytes,221,opt,name=x221", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "testdata.x222", + Tag: "bytes,222,opt,name=x222", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "testdata.x223", + Tag: "bytes,223,opt,name=x223", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "testdata.x224", + Tag: "bytes,224,opt,name=x224", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "testdata.x225", + Tag: "bytes,225,opt,name=x225", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "testdata.x226", + Tag: "bytes,226,opt,name=x226", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "testdata.x227", + Tag: "bytes,227,opt,name=x227", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "testdata.x228", + Tag: "bytes,228,opt,name=x228", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "testdata.x229", + Tag: "bytes,229,opt,name=x229", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "testdata.x230", + Tag: "bytes,230,opt,name=x230", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "testdata.x231", + Tag: "bytes,231,opt,name=x231", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "testdata.x232", + Tag: "bytes,232,opt,name=x232", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "testdata.x233", + Tag: "bytes,233,opt,name=x233", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "testdata.x234", + Tag: "bytes,234,opt,name=x234", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "testdata.x235", + Tag: "bytes,235,opt,name=x235", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "testdata.x236", + Tag: "bytes,236,opt,name=x236", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "testdata.x237", + Tag: "bytes,237,opt,name=x237", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "testdata.x238", + Tag: "bytes,238,opt,name=x238", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "testdata.x239", + Tag: "bytes,239,opt,name=x239", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "testdata.x240", + Tag: "bytes,240,opt,name=x240", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "testdata.x241", + Tag: "bytes,241,opt,name=x241", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "testdata.x242", + Tag: "bytes,242,opt,name=x242", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "testdata.x243", + Tag: "bytes,243,opt,name=x243", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "testdata.x244", + Tag: "bytes,244,opt,name=x244", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "testdata.x245", + Tag: "bytes,245,opt,name=x245", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "testdata.x246", + Tag: "bytes,246,opt,name=x246", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "testdata.x247", + Tag: "bytes,247,opt,name=x247", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "testdata.x248", + Tag: "bytes,248,opt,name=x248", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "testdata.x249", + Tag: "bytes,249,opt,name=x249", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "testdata.x250", + Tag: "bytes,250,opt,name=x250", +} + +func init() { + proto.RegisterType((*GoEnum)(nil), "testdata.GoEnum") + proto.RegisterType((*GoTestField)(nil), "testdata.GoTestField") + proto.RegisterType((*GoTest)(nil), "testdata.GoTest") + proto.RegisterType((*GoTest_RequiredGroup)(nil), "testdata.GoTest.RequiredGroup") + proto.RegisterType((*GoTest_RepeatedGroup)(nil), "testdata.GoTest.RepeatedGroup") + proto.RegisterType((*GoTest_OptionalGroup)(nil), "testdata.GoTest.OptionalGroup") + proto.RegisterType((*GoSkipTest)(nil), "testdata.GoSkipTest") + proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "testdata.GoSkipTest.SkipGroup") + proto.RegisterType((*NonPackedTest)(nil), "testdata.NonPackedTest") + proto.RegisterType((*PackedTest)(nil), "testdata.PackedTest") + proto.RegisterType((*MaxTag)(nil), "testdata.MaxTag") + proto.RegisterType((*OldMessage)(nil), "testdata.OldMessage") + proto.RegisterType((*OldMessage_Nested)(nil), "testdata.OldMessage.Nested") + proto.RegisterType((*NewMessage)(nil), "testdata.NewMessage") + proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested") + proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage") + proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage") + proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage") + proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage") + proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup") + proto.RegisterType((*Ext)(nil), "testdata.Ext") + proto.RegisterType((*ComplexExtension)(nil), "testdata.ComplexExtension") + proto.RegisterType((*DefaultsMessage)(nil), "testdata.DefaultsMessage") + proto.RegisterType((*MyMessageSet)(nil), "testdata.MyMessageSet") + proto.RegisterType((*Empty)(nil), "testdata.Empty") + proto.RegisterType((*MessageList)(nil), "testdata.MessageList") + proto.RegisterType((*MessageList_Message)(nil), "testdata.MessageList.Message") + proto.RegisterType((*Strings)(nil), "testdata.Strings") + proto.RegisterType((*Defaults)(nil), "testdata.Defaults") + proto.RegisterType((*SubDefaults)(nil), "testdata.SubDefaults") + proto.RegisterType((*RepeatedEnum)(nil), "testdata.RepeatedEnum") + proto.RegisterType((*MoreRepeated)(nil), "testdata.MoreRepeated") + proto.RegisterType((*GroupOld)(nil), "testdata.GroupOld") + proto.RegisterType((*GroupOld_G)(nil), "testdata.GroupOld.G") + proto.RegisterType((*GroupNew)(nil), "testdata.GroupNew") + proto.RegisterType((*GroupNew_G)(nil), "testdata.GroupNew.G") + proto.RegisterType((*FloatingPoint)(nil), "testdata.FloatingPoint") + proto.RegisterType((*MessageWithMap)(nil), "testdata.MessageWithMap") + proto.RegisterType((*Oneof)(nil), "testdata.Oneof") + proto.RegisterType((*Oneof_F_Group)(nil), "testdata.Oneof.F_Group") + proto.RegisterType((*Communique)(nil), "testdata.Communique") + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_Complex) + proto.RegisterExtension(E_RComplex) + proto.RegisterExtension(E_NoDefaultDouble) + proto.RegisterExtension(E_NoDefaultFloat) + proto.RegisterExtension(E_NoDefaultInt32) + proto.RegisterExtension(E_NoDefaultInt64) + proto.RegisterExtension(E_NoDefaultUint32) + proto.RegisterExtension(E_NoDefaultUint64) + proto.RegisterExtension(E_NoDefaultSint32) + proto.RegisterExtension(E_NoDefaultSint64) + proto.RegisterExtension(E_NoDefaultFixed32) + proto.RegisterExtension(E_NoDefaultFixed64) + proto.RegisterExtension(E_NoDefaultSfixed32) + proto.RegisterExtension(E_NoDefaultSfixed64) + proto.RegisterExtension(E_NoDefaultBool) + proto.RegisterExtension(E_NoDefaultString) + proto.RegisterExtension(E_NoDefaultBytes) + proto.RegisterExtension(E_NoDefaultEnum) + proto.RegisterExtension(E_DefaultDouble) + proto.RegisterExtension(E_DefaultFloat) + proto.RegisterExtension(E_DefaultInt32) + proto.RegisterExtension(E_DefaultInt64) + proto.RegisterExtension(E_DefaultUint32) + proto.RegisterExtension(E_DefaultUint64) + proto.RegisterExtension(E_DefaultSint32) + proto.RegisterExtension(E_DefaultSint64) + proto.RegisterExtension(E_DefaultFixed32) + proto.RegisterExtension(E_DefaultFixed64) + proto.RegisterExtension(E_DefaultSfixed32) + proto.RegisterExtension(E_DefaultSfixed64) + proto.RegisterExtension(E_DefaultBool) + proto.RegisterExtension(E_DefaultString) + proto.RegisterExtension(E_DefaultBytes) + proto.RegisterExtension(E_DefaultEnum) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} + +var fileDescriptor0 = []byte{ + // 4407 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x5a, 0x59, 0x77, 0xdb, 0x48, + 0x76, 0x36, 0xc0, 0xfd, 0x92, 0x12, 0xa1, 0xb2, 0xda, 0x4d, 0x4b, 0x5e, 0x60, 0xce, 0x74, 0x37, + 0xbd, 0x69, 0x24, 0x10, 0xa2, 0x6d, 0xba, 0xd3, 0xe7, 0x78, 0xa1, 0x64, 0x9d, 0xb1, 0x44, 0x05, + 0x52, 0x77, 0x9f, 0xe9, 0x3c, 0xf0, 0x50, 0x22, 0x48, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52, + 0xf2, 0xd2, 0x2f, 0xc9, 0x6b, 0xb6, 0x97, 0xbc, 0xe6, 0x29, 0x4f, 0x49, 0xce, 0xc9, 0x9f, 0x48, + 0xba, 0x7b, 0xd6, 0x9e, 0x35, 0xeb, 0x64, 0x5f, 0x26, 0xfb, 0x36, 0x93, 0xe4, 0xa5, 0xe7, 0xd4, + 0xad, 0x02, 0x50, 0x00, 0x09, 0x48, 0x7e, 0x12, 0x51, 0xf5, 0x7d, 0xb7, 0x6e, 0x15, 0xbe, 0xba, + 0xb7, 0x6e, 0x41, 0x00, 0x8e, 0x39, 0x71, 0x56, 0x46, 0x63, 0xdb, 0xb1, 0x49, 0x96, 0xfe, 0xee, + 0xb4, 0x9d, 0x76, 0xf9, 0x3a, 0xa4, 0x37, 0xed, 0x86, 0x75, 0x34, 0x24, 0x57, 0x21, 0xd1, 0xb5, + 0xed, 0x92, 0xa4, 0xca, 0x95, 0x79, 0x6d, 0x6e, 0xc5, 0x45, 0xac, 0x6c, 0x34, 0x9b, 0x06, 0xed, + 0x29, 0xdf, 0x81, 0xfc, 0xa6, 0xbd, 0x6f, 0x4e, 0x9c, 0x8d, 0xbe, 0x39, 0xe8, 0x90, 0x45, 0x48, + 0x3d, 0x6d, 0x1f, 0x98, 0x03, 0x64, 0xe4, 0x8c, 0xd4, 0x80, 0x3e, 0x10, 0x02, 0xc9, 0xfd, 0x93, + 0x91, 0x59, 0x92, 0xb1, 0x31, 0xe9, 0x9c, 0x8c, 0xcc, 0xf2, 0xaf, 0x5c, 0xa1, 0x83, 0x50, 0x26, + 0xb9, 0x0e, 0xc9, 0x2f, 0xf7, 0xad, 0x0e, 0x1f, 0xe5, 0x35, 0x7f, 0x14, 0xd6, 0xbf, 0xf2, 0xe5, + 0xad, 0x9d, 0xc7, 0x46, 0xf2, 0x79, 0xdf, 0x42, 0xfb, 0xfb, 0xed, 0x83, 0x01, 0x35, 0x25, 0x51, + 0xfb, 0x0e, 0x7d, 0xa0, 0xad, 0xbb, 0xed, 0x71, 0x7b, 0x58, 0x4a, 0xa8, 0x52, 0x25, 0x65, 0xa4, + 0x46, 0xf4, 0x81, 0xdc, 0x87, 0x39, 0xc3, 0x7c, 0x71, 0xd4, 0x1f, 0x9b, 0x1d, 0x74, 0xae, 0x94, + 0x54, 0xe5, 0x4a, 0x7e, 0xda, 0x3e, 0x76, 0x1a, 0x73, 0x63, 0x11, 0xcb, 0xc8, 0x23, 0xb3, 0xed, + 0xb8, 0xe4, 0x94, 0x9a, 0x88, 0x25, 0x0b, 0x58, 0x4a, 0x6e, 0x8e, 0x9c, 0xbe, 0x6d, 0xb5, 0x07, + 0x8c, 0x9c, 0x56, 0xa5, 0x18, 0xb2, 0x2d, 0x62, 0xc9, 0x9b, 0x50, 0xdc, 0x68, 0x3d, 0xb4, 0xed, + 0x41, 0xcb, 0xf5, 0xa8, 0x04, 0xaa, 0x5c, 0xc9, 0x1a, 0x73, 0x5d, 0xda, 0xea, 0x4e, 0x89, 0x54, + 0x40, 0xd9, 0x68, 0x6d, 0x59, 0x4e, 0x55, 0xf3, 0x81, 0x79, 0x55, 0xae, 0xa4, 0x8c, 0xf9, 0x2e, + 0x36, 0x4f, 0x21, 0x6b, 0xba, 0x8f, 0x2c, 0xa8, 0x72, 0x25, 0xc1, 0x90, 0x35, 0xdd, 0x43, 0xde, + 0x02, 0xb2, 0xd1, 0xda, 0xe8, 0x1f, 0x9b, 0x1d, 0xd1, 0xea, 0x9c, 0x2a, 0x57, 0x32, 0x86, 0xd2, + 0xe5, 0x1d, 0x33, 0xd0, 0xa2, 0xe5, 0x79, 0x55, 0xae, 0xa4, 0x5d, 0xb4, 0x60, 0xfb, 0x06, 0x2c, + 0x6c, 0xb4, 0xde, 0xed, 0x07, 0x1d, 0x2e, 0xaa, 0x72, 0x65, 0xce, 0x28, 0x76, 0x59, 0xfb, 0x34, + 0x56, 0x34, 0xac, 0xa8, 0x72, 0x25, 0xc9, 0xb1, 0x82, 0x5d, 0x9c, 0xdd, 0xc6, 0xc0, 0x6e, 0x3b, + 0x3e, 0x74, 0x41, 0x95, 0x2b, 0xb2, 0x31, 0xdf, 0xc5, 0xe6, 0xa0, 0xd5, 0xc7, 0xf6, 0xd1, 0xc1, + 0xc0, 0xf4, 0xa1, 0x44, 0x95, 0x2b, 0x92, 0x51, 0xec, 0xb2, 0xf6, 0x20, 0x76, 0xcf, 0x19, 0xf7, + 0xad, 0x9e, 0x8f, 0x3d, 0x8f, 0xfa, 0x2d, 0x76, 0x59, 0x7b, 0xd0, 0x83, 0x87, 0x27, 0x8e, 0x39, + 0xf1, 0xa1, 0xa6, 0x2a, 0x57, 0x0a, 0xc6, 0x7c, 0x17, 0x9b, 0x43, 0x56, 0x43, 0x6b, 0xd0, 0x55, + 0xe5, 0xca, 0x02, 0xb5, 0x3a, 0x63, 0x0d, 0xf6, 0x42, 0x6b, 0xd0, 0x53, 0xe5, 0x0a, 0xe1, 0x58, + 0x61, 0x0d, 0x44, 0xcd, 0x30, 0x21, 0x96, 0x16, 0xd5, 0x84, 0xa0, 0x19, 0xd6, 0x18, 0xd4, 0x0c, + 0x07, 0xbe, 0xa6, 0x26, 0x44, 0xcd, 0x84, 0x90, 0x38, 0x38, 0x47, 0x5e, 0x50, 0x13, 0xa2, 0x66, + 0x38, 0x32, 0xa4, 0x19, 0x8e, 0x7d, 0x5d, 0x4d, 0x04, 0x35, 0x33, 0x85, 0x16, 0x2d, 0x97, 0xd4, + 0x44, 0x50, 0x33, 0x1c, 0x1d, 0xd4, 0x0c, 0x07, 0x5f, 0x54, 0x13, 0x01, 0xcd, 0x84, 0xb1, 0xa2, + 0xe1, 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x38, 0x3b, 0x57, 0x33, 0x1c, 0xba, 0xac, 0x26, 0x44, 0xcd, + 0x88, 0x56, 0x3d, 0xcd, 0x70, 0xe8, 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x88, 0xf5, 0x34, 0xc3, 0xb1, + 0x97, 0xd5, 0x44, 0x40, 0x33, 0x1c, 0x7b, 0x5d, 0xd4, 0x0c, 0x87, 0x7e, 0x2c, 0xa9, 0x09, 0x51, + 0x34, 0x1c, 0x7a, 0x33, 0x20, 0x1a, 0x8e, 0xfd, 0x84, 0x62, 0x45, 0xd5, 0x84, 0xc1, 0xe2, 0x2a, + 0x7c, 0x4a, 0xc1, 0xa2, 0x6c, 0x38, 0xd8, 0x97, 0x8d, 0x1b, 0x82, 0x4a, 0x57, 0x54, 0xc9, 0x93, + 0x8d, 0x1b, 0xc3, 0x44, 0xd9, 0x78, 0xc0, 0xab, 0x18, 0x6a, 0xb9, 0x6c, 0xa6, 0x90, 0x35, 0xdd, + 0x47, 0xaa, 0xaa, 0xe4, 0xcb, 0xc6, 0x43, 0x06, 0x64, 0xe3, 0x61, 0xaf, 0xa9, 0x92, 0x28, 0x9b, + 0x19, 0x68, 0xd1, 0x72, 0x59, 0x95, 0x44, 0xd9, 0x78, 0x68, 0x51, 0x36, 0x1e, 0xf8, 0x0b, 0xaa, + 0x24, 0xc8, 0x66, 0x1a, 0x2b, 0x1a, 0xfe, 0xa2, 0x2a, 0x09, 0xb2, 0x09, 0xce, 0x8e, 0xc9, 0xc6, + 0x83, 0xbe, 0xa1, 0x4a, 0xbe, 0x6c, 0x82, 0x56, 0xb9, 0x6c, 0x3c, 0xe8, 0x9b, 0xaa, 0x24, 0xc8, + 0x26, 0x88, 0xe5, 0xb2, 0xf1, 0xb0, 0x6f, 0x61, 0x7e, 0x73, 0x65, 0xe3, 0x61, 0x05, 0xd9, 0x78, + 0xd0, 0xdf, 0xa1, 0xb9, 0xd0, 0x93, 0x8d, 0x07, 0x15, 0x65, 0xe3, 0x61, 0x7f, 0x97, 0x62, 0x7d, + 0xd9, 0x4c, 0x83, 0xc5, 0x55, 0xf8, 0x3d, 0x0a, 0xf6, 0x65, 0xe3, 0x81, 0x57, 0xd0, 0x09, 0x2a, + 0x9b, 0x8e, 0xd9, 0x6d, 0x1f, 0x0d, 0xa8, 0xc4, 0x2a, 0x54, 0x37, 0xf5, 0xa4, 0x33, 0x3e, 0x32, + 0xa9, 0x27, 0xb6, 0x3d, 0x78, 0xec, 0xf6, 0x91, 0x15, 0x6a, 0x9c, 0xc9, 0xc7, 0x27, 0x5c, 0xa7, + 0xfa, 0xa9, 0xcb, 0x55, 0xcd, 0x28, 0x32, 0x0d, 0x4d, 0xe3, 0x6b, 0xba, 0x80, 0xbf, 0x41, 0x55, + 0x54, 0x97, 0x6b, 0x3a, 0xc3, 0xd7, 0x74, 0x1f, 0x5f, 0x85, 0xf3, 0xbe, 0x94, 0x7c, 0xc6, 0x4d, + 0xaa, 0xa5, 0x7a, 0xa2, 0xaa, 0xad, 0x1a, 0x0b, 0xae, 0xa0, 0x66, 0x91, 0x02, 0xc3, 0xdc, 0xa2, + 0x92, 0xaa, 0x27, 0x6a, 0xba, 0x47, 0x12, 0x47, 0xd2, 0xa8, 0x0c, 0xb9, 0xb0, 0x7c, 0xce, 0x6d, + 0xaa, 0xac, 0x7a, 0xb2, 0xaa, 0xad, 0xae, 0x1a, 0x0a, 0xd7, 0xd7, 0x0c, 0x4e, 0x60, 0x9c, 0x15, + 0xaa, 0xb0, 0x7a, 0xb2, 0xa6, 0x7b, 0x9c, 0xe0, 0x38, 0x0b, 0xae, 0xd0, 0x7c, 0xca, 0x97, 0xa8, + 0xd2, 0xea, 0xe9, 0xea, 0x9a, 0xbe, 0xb6, 0x7e, 0xcf, 0x28, 0x32, 0xc5, 0xf9, 0x1c, 0x9d, 0x8e, + 0xc3, 0x25, 0xe7, 0x93, 0x56, 0xa9, 0xe6, 0xea, 0x69, 0xed, 0xce, 0xda, 0x5d, 0xed, 0xae, 0xa1, + 0x70, 0xed, 0xf9, 0xac, 0x77, 0x28, 0x8b, 0x8b, 0xcf, 0x67, 0xad, 0x51, 0xf5, 0xd5, 0x95, 0x67, + 0xe6, 0x60, 0x60, 0xdf, 0x52, 0xcb, 0x2f, 0xed, 0xf1, 0xa0, 0x73, 0xad, 0x0c, 0x86, 0xc2, 0xf5, + 0x28, 0x8e, 0xba, 0xe0, 0x0a, 0xd2, 0xa7, 0xff, 0x1a, 0x3d, 0x87, 0x15, 0xea, 0x99, 0x87, 0xfd, + 0x9e, 0x65, 0x4f, 0x4c, 0xa3, 0xc8, 0xa4, 0x19, 0x5a, 0x93, 0xbd, 0xf0, 0x3a, 0xfe, 0x3a, 0xa5, + 0x2d, 0xd4, 0x13, 0xb7, 0xab, 0x1a, 0x1d, 0x69, 0xd6, 0x3a, 0xee, 0x85, 0xd7, 0xf1, 0x37, 0x28, + 0x87, 0xd4, 0x13, 0xb7, 0x6b, 0x3a, 0xe7, 0x88, 0xeb, 0x78, 0x07, 0x2e, 0x84, 0xf2, 0x62, 0x6b, + 0xd4, 0x3e, 0x7c, 0x6e, 0x76, 0x4a, 0x1a, 0x4d, 0x8f, 0x0f, 0x65, 0x45, 0x32, 0xce, 0x07, 0x52, + 0xe4, 0x2e, 0x76, 0x93, 0x7b, 0xf0, 0x7a, 0x38, 0x51, 0xba, 0xcc, 0x2a, 0xcd, 0x97, 0xc8, 0x5c, + 0x0c, 0xe6, 0xcc, 0x10, 0x55, 0x08, 0xc0, 0x2e, 0x55, 0xa7, 0x09, 0xd4, 0xa7, 0xfa, 0x91, 0x98, + 0x53, 0x7f, 0x06, 0x2e, 0x4e, 0xa7, 0x52, 0x97, 0xbc, 0x4e, 0x33, 0x2a, 0x92, 0x2f, 0x84, 0xb3, + 0xea, 0x14, 0x7d, 0xc6, 0xd8, 0x35, 0x9a, 0x62, 0x45, 0xfa, 0xd4, 0xe8, 0xf7, 0xa1, 0x34, 0x95, + 0x6c, 0x5d, 0xf6, 0x1d, 0x9a, 0x73, 0x91, 0xfd, 0x5a, 0x28, 0xef, 0x86, 0xc9, 0x33, 0x86, 0xbe, + 0x4b, 0x93, 0xb0, 0x40, 0x9e, 0x1a, 0x19, 0x97, 0x2c, 0x98, 0x8e, 0x5d, 0xee, 0x3d, 0x9a, 0x95, + 0xf9, 0x92, 0x05, 0x32, 0xb3, 0x38, 0x6e, 0x28, 0x3f, 0xbb, 0xdc, 0x3a, 0x4d, 0xd3, 0x7c, 0xdc, + 0x60, 0xaa, 0xe6, 0xe4, 0xb7, 0x29, 0x79, 0x6f, 0xf6, 0x8c, 0x7f, 0x9c, 0xa0, 0x09, 0x96, 0xb3, + 0xf7, 0x66, 0x4d, 0xd9, 0x63, 0xcf, 0x98, 0xf2, 0x4f, 0x28, 0x9b, 0x08, 0xec, 0xa9, 0x39, 0x3f, + 0x06, 0xaf, 0xe2, 0xe8, 0x8d, 0xed, 0xa3, 0x51, 0x69, 0x43, 0x95, 0x2b, 0xa0, 0x5d, 0x99, 0xaa, + 0x7e, 0xdc, 0x43, 0xde, 0x26, 0x45, 0x19, 0x41, 0x12, 0xb3, 0xc2, 0xec, 0x32, 0x2b, 0xbb, 0x6a, + 0x22, 0xc2, 0x0a, 0x43, 0x79, 0x56, 0x04, 0x12, 0xb5, 0xe2, 0x06, 0x7d, 0x66, 0xe5, 0x03, 0x55, + 0x9a, 0x69, 0xc5, 0x4d, 0x01, 0xdc, 0x4a, 0x80, 0xb4, 0xb4, 0xee, 0xd7, 0x5b, 0xd8, 0x4f, 0xbe, + 0x18, 0x2e, 0xc0, 0x36, 0xf1, 0xfc, 0x1c, 0xac, 0xb4, 0x18, 0x4d, 0x70, 0x6e, 0x9a, 0xf6, 0xb3, + 0x11, 0xb4, 0x80, 0x37, 0xd3, 0xb4, 0x9f, 0x9b, 0x41, 0x2b, 0xff, 0xa6, 0x04, 0x49, 0x5a, 0x4f, + 0x92, 0x2c, 0x24, 0xdf, 0x6b, 0x6e, 0x3d, 0x56, 0xce, 0xd1, 0x5f, 0x0f, 0x9b, 0xcd, 0xa7, 0x8a, + 0x44, 0x72, 0x90, 0x7a, 0xf8, 0x95, 0xfd, 0xc6, 0x9e, 0x22, 0x93, 0x22, 0xe4, 0x37, 0xb6, 0x76, + 0x36, 0x1b, 0xc6, 0xae, 0xb1, 0xb5, 0xb3, 0xaf, 0x24, 0x68, 0xdf, 0xc6, 0xd3, 0xe6, 0x83, 0x7d, + 0x25, 0x49, 0x32, 0x90, 0xa0, 0x6d, 0x29, 0x02, 0x90, 0xde, 0xdb, 0x37, 0xb6, 0x76, 0x36, 0x95, + 0x34, 0xb5, 0xb2, 0xbf, 0xb5, 0xdd, 0x50, 0x32, 0x14, 0xb9, 0xff, 0xee, 0xee, 0xd3, 0x86, 0x92, + 0xa5, 0x3f, 0x1f, 0x18, 0xc6, 0x83, 0xaf, 0x28, 0x39, 0x4a, 0xda, 0x7e, 0xb0, 0xab, 0x00, 0x76, + 0x3f, 0x78, 0xf8, 0xb4, 0xa1, 0xe4, 0x49, 0x01, 0xb2, 0x1b, 0xef, 0xee, 0x3c, 0xda, 0xdf, 0x6a, + 0xee, 0x28, 0x85, 0xf2, 0x6f, 0xc9, 0x00, 0x9b, 0xf6, 0xde, 0xf3, 0xfe, 0x08, 0xab, 0xe2, 0xcb, + 0x00, 0x93, 0xe7, 0xfd, 0x51, 0x0b, 0xa5, 0xc7, 0x2b, 0xbb, 0x1c, 0x6d, 0xc1, 0xa0, 0x43, 0xae, + 0x41, 0x01, 0xbb, 0xbb, 0x2c, 0x14, 0x60, 0x41, 0x97, 0x31, 0xf2, 0xb4, 0x8d, 0x47, 0x87, 0x20, + 0xa4, 0xa6, 0x63, 0x1d, 0x97, 0x16, 0x20, 0x35, 0x9d, 0x5c, 0x05, 0x7c, 0x6c, 0x4d, 0x30, 0xac, + 0x63, 0xed, 0x96, 0x33, 0x70, 0x5c, 0x16, 0xe8, 0xc9, 0xdb, 0x80, 0x63, 0x32, 0x59, 0x14, 0xa7, + 0x25, 0xea, 0xba, 0xbb, 0x42, 0x7f, 0x30, 0x59, 0xf8, 0x84, 0xa5, 0x26, 0xe4, 0xbc, 0x76, 0x3a, + 0x16, 0xb6, 0xf2, 0x19, 0x29, 0x38, 0x23, 0xc0, 0x26, 0x6f, 0x4a, 0x0c, 0xc0, 0xbd, 0x59, 0x40, + 0x6f, 0x18, 0x89, 0xb9, 0x53, 0xbe, 0x0c, 0x73, 0x3b, 0xb6, 0xc5, 0xb6, 0x10, 0xae, 0x52, 0x01, + 0xa4, 0x76, 0x49, 0xc2, 0x12, 0x46, 0x6a, 0x97, 0xaf, 0x00, 0x08, 0x7d, 0x0a, 0x48, 0x07, 0xac, + 0x0f, 0x37, 0xa2, 0x74, 0x50, 0xbe, 0x09, 0xe9, 0xed, 0xf6, 0xf1, 0x7e, 0xbb, 0x47, 0xae, 0x01, + 0x0c, 0xda, 0x13, 0xa7, 0xd5, 0x45, 0xa9, 0x7c, 0xfe, 0xf9, 0xe7, 0x9f, 0x4b, 0x78, 0xe2, 0xca, + 0xd1, 0x56, 0x26, 0x95, 0x17, 0x00, 0xcd, 0x41, 0x67, 0xdb, 0x9c, 0x4c, 0xda, 0x3d, 0x93, 0x54, + 0x21, 0x6d, 0x99, 0x13, 0x9a, 0x72, 0x24, 0x2c, 0xe6, 0x97, 0xfd, 0x55, 0xf0, 0x51, 0x2b, 0x3b, + 0x08, 0x31, 0x38, 0x94, 0x28, 0x90, 0xb0, 0x8e, 0x86, 0x78, 0x59, 0x91, 0x32, 0xe8, 0xcf, 0xa5, + 0x4b, 0x90, 0x66, 0x18, 0x42, 0x20, 0x69, 0xb5, 0x87, 0x66, 0x89, 0x8d, 0x8b, 0xbf, 0xcb, 0xbf, + 0x2a, 0x01, 0xec, 0x98, 0x2f, 0xcf, 0x30, 0xa6, 0x8f, 0x8a, 0x19, 0x33, 0xc1, 0xc6, 0xbc, 0x1f, + 0x37, 0x26, 0xd5, 0x59, 0xd7, 0xb6, 0x3b, 0x2d, 0xf6, 0x8a, 0xd9, 0xbd, 0x4a, 0x8e, 0xb6, 0xe0, + 0x5b, 0x2b, 0x7f, 0x00, 0x85, 0x2d, 0xcb, 0x32, 0xc7, 0xae, 0x4f, 0x04, 0x92, 0xcf, 0xec, 0x89, + 0xc3, 0x2f, 0x78, 0xf0, 0x37, 0x29, 0x41, 0x72, 0x64, 0x8f, 0x1d, 0x36, 0xcf, 0x7a, 0x52, 0x5f, + 0x5d, 0x5d, 0x35, 0xb0, 0x85, 0x5c, 0x82, 0xdc, 0xa1, 0x6d, 0x59, 0xe6, 0x21, 0x9d, 0x44, 0x02, + 0x6b, 0x0b, 0xbf, 0xa1, 0xfc, 0xcb, 0x12, 0x14, 0x9a, 0xce, 0x33, 0xdf, 0xb8, 0x02, 0x89, 0xe7, + 0xe6, 0x09, 0xba, 0x97, 0x30, 0xe8, 0x4f, 0xb2, 0x08, 0xa9, 0x9f, 0x6f, 0x0f, 0x8e, 0xd8, 0x85, + 0x4f, 0xc1, 0x60, 0x0f, 0xe4, 0x02, 0xa4, 0x5f, 0x9a, 0xfd, 0xde, 0x33, 0x07, 0x6d, 0xca, 0x06, + 0x7f, 0x22, 0xb7, 0x20, 0xd5, 0xa7, 0xce, 0x96, 0x92, 0xb8, 0x5e, 0x17, 0xfc, 0xf5, 0x12, 0xe7, + 0x60, 0x30, 0xd0, 0x8d, 0x6c, 0xb6, 0xa3, 0x7c, 0xf4, 0xd1, 0x47, 0x1f, 0xc9, 0xe5, 0x2e, 0x2c, + 0xba, 0xb1, 0x23, 0x30, 0xd9, 0x1d, 0x28, 0x0d, 0x4c, 0xbb, 0xd5, 0xed, 0x5b, 0xed, 0xc1, 0xe0, + 0xa4, 0xf5, 0xd2, 0xb6, 0x5a, 0x6d, 0xab, 0x65, 0x4f, 0x0e, 0xdb, 0x63, 0x5c, 0x80, 0xe8, 0x21, + 0x16, 0x07, 0xa6, 0xbd, 0xc1, 0x68, 0xef, 0xdb, 0xd6, 0x03, 0xab, 0x49, 0x39, 0xe5, 0x3f, 0x48, + 0x42, 0x6e, 0xfb, 0xc4, 0xb5, 0xbe, 0x08, 0xa9, 0x43, 0xfb, 0xc8, 0x62, 0x6b, 0x99, 0x32, 0xd8, + 0x83, 0xf7, 0x8e, 0x64, 0xe1, 0x1d, 0x2d, 0x42, 0xea, 0xc5, 0x91, 0xed, 0x98, 0x38, 0xdd, 0x9c, + 0xc1, 0x1e, 0xe8, 0x6a, 0x8d, 0x4c, 0xa7, 0x94, 0xc4, 0x0a, 0x93, 0xfe, 0xf4, 0xe7, 0x9f, 0x3a, + 0xc3, 0xfc, 0xc9, 0x0a, 0xa4, 0x6d, 0xba, 0xfa, 0x93, 0x52, 0x1a, 0x2f, 0xb7, 0x04, 0xb8, 0xf8, + 0x56, 0x0c, 0x8e, 0x22, 0x5b, 0xb0, 0xf0, 0xd2, 0x6c, 0x0d, 0x8f, 0x26, 0x4e, 0xab, 0x67, 0xb7, + 0x3a, 0xa6, 0x39, 0x32, 0xc7, 0xa5, 0x39, 0x1c, 0x49, 0x88, 0x09, 0xb3, 0x16, 0xd2, 0x98, 0x7f, + 0x69, 0x6e, 0x1f, 0x4d, 0x9c, 0x4d, 0xfb, 0x31, 0xb2, 0x48, 0x15, 0x72, 0x63, 0x93, 0x46, 0x02, + 0xea, 0x6c, 0x21, 0x3c, 0x7a, 0x80, 0x9a, 0x1d, 0x9b, 0x23, 0x6c, 0x20, 0xeb, 0x90, 0x3d, 0xe8, + 0x3f, 0x37, 0x27, 0xcf, 0xcc, 0x4e, 0x29, 0xa3, 0x4a, 0x95, 0x79, 0xed, 0xa2, 0xcf, 0xf1, 0x96, + 0x75, 0xe5, 0x91, 0x3d, 0xb0, 0xc7, 0x86, 0x07, 0x25, 0xf7, 0x21, 0x37, 0xb1, 0x87, 0x26, 0xd3, + 0x77, 0x16, 0x33, 0xdb, 0xe5, 0x59, 0xbc, 0x3d, 0x7b, 0x68, 0xba, 0x11, 0xcc, 0xc5, 0x93, 0x65, + 0xe6, 0xe8, 0x01, 0x3d, 0xbf, 0x96, 0x00, 0xeb, 0x73, 0xea, 0x10, 0x9e, 0x67, 0xc9, 0x12, 0x75, + 0xa8, 0xd7, 0xa5, 0xc7, 0x92, 0x52, 0x1e, 0x8b, 0x3b, 0xef, 0x79, 0xe9, 0x16, 0xe4, 0x3c, 0x83, + 0x7e, 0xe8, 0x63, 0xe1, 0x26, 0x87, 0xf1, 0x80, 0x85, 0x3e, 0x16, 0x6b, 0xde, 0x80, 0x14, 0xba, + 0x4d, 0xd3, 0x84, 0xd1, 0xa0, 0x59, 0x29, 0x07, 0xa9, 0x4d, 0xa3, 0xd1, 0xd8, 0x51, 0x24, 0x4c, + 0x50, 0x4f, 0xdf, 0x6d, 0x28, 0xb2, 0xa0, 0xd8, 0xdf, 0x96, 0x20, 0xd1, 0x38, 0x46, 0xb5, 0xd0, + 0x69, 0xb8, 0x3b, 0x9a, 0xfe, 0xd6, 0x6a, 0x90, 0x1c, 0xda, 0x63, 0x93, 0x9c, 0x9f, 0x31, 0xcb, + 0x52, 0x0f, 0xdf, 0x97, 0x70, 0x95, 0xdb, 0x38, 0x76, 0x0c, 0xc4, 0x6b, 0x6f, 0x41, 0xd2, 0x31, + 0x8f, 0x9d, 0xd9, 0xbc, 0x67, 0x6c, 0x00, 0x0a, 0xd0, 0x6e, 0x42, 0xda, 0x3a, 0x1a, 0x1e, 0x98, + 0xe3, 0xd9, 0xd0, 0x3e, 0x4e, 0x8f, 0x43, 0xca, 0xef, 0x81, 0xf2, 0xc8, 0x1e, 0x8e, 0x06, 0xe6, + 0x71, 0xe3, 0xd8, 0x31, 0xad, 0x49, 0xdf, 0xb6, 0xa8, 0x9e, 0xbb, 0xfd, 0x31, 0x46, 0x11, 0xbc, + 0xb0, 0xc5, 0x07, 0xba, 0xab, 0x27, 0xe6, 0xa1, 0x6d, 0x75, 0x78, 0xc0, 0xe4, 0x4f, 0x14, 0xed, + 0x3c, 0xeb, 0x8f, 0x69, 0x00, 0xa1, 0x71, 0x9e, 0x3d, 0x94, 0x37, 0xa1, 0xc8, 0x0f, 0xfa, 0x13, + 0x3e, 0x70, 0xf9, 0x06, 0x14, 0xdc, 0x26, 0xbc, 0xbd, 0xce, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x54, + 0xce, 0xd1, 0x65, 0x6d, 0xee, 0x34, 0x14, 0x89, 0xfe, 0xd8, 0x7f, 0xbf, 0x19, 0x58, 0xca, 0x4b, + 0x50, 0xf0, 0x7c, 0xdf, 0x33, 0x1d, 0xec, 0xa1, 0x09, 0x21, 0x53, 0x97, 0xb3, 0x52, 0x39, 0x03, + 0xa9, 0xc6, 0x70, 0xe4, 0x9c, 0x94, 0x7f, 0x11, 0xf2, 0x1c, 0xf4, 0xb4, 0x3f, 0x71, 0xc8, 0x1d, + 0xc8, 0x0c, 0xf9, 0x7c, 0x25, 0x3c, 0x73, 0x89, 0x9a, 0xf2, 0x71, 0xee, 0x6f, 0xc3, 0x45, 0x2f, + 0x55, 0x21, 0x23, 0xc4, 0x52, 0xbe, 0xd5, 0x65, 0x71, 0xab, 0xb3, 0xa0, 0x90, 0x10, 0x82, 0x42, + 0x79, 0x1b, 0x32, 0x2c, 0x03, 0x4e, 0x30, 0xab, 0xb3, 0x7a, 0x8d, 0x89, 0x89, 0xbd, 0xf9, 0x3c, + 0x6b, 0x63, 0x57, 0xc8, 0x57, 0x21, 0x8f, 0x82, 0xe5, 0x08, 0x16, 0x3a, 0x01, 0x9b, 0x98, 0xdc, + 0x7e, 0x3f, 0x05, 0x59, 0x77, 0xa5, 0xc8, 0x32, 0xa4, 0x59, 0x91, 0x84, 0xa6, 0xdc, 0x22, 0x3e, + 0x85, 0x65, 0x11, 0x59, 0x86, 0x0c, 0x2f, 0x84, 0x78, 0x74, 0xa7, 0x15, 0x7b, 0x9a, 0x15, 0x3e, + 0x5e, 0x67, 0x4d, 0xc7, 0xc0, 0xc4, 0xca, 0xf3, 0x34, 0x2b, 0x6d, 0x88, 0x0a, 0x39, 0xaf, 0x98, + 0xc1, 0x78, 0xcc, 0x6b, 0xf1, 0xac, 0x5b, 0xbd, 0x08, 0x88, 0x9a, 0x8e, 0x11, 0x8b, 0x17, 0xde, + 0xd9, 0xae, 0x7f, 0x3c, 0xc9, 0xba, 0x25, 0x09, 0xde, 0xa1, 0xbb, 0x55, 0x76, 0x86, 0x17, 0x21, + 0x3e, 0xa0, 0xa6, 0x63, 0x48, 0x70, 0x4b, 0xea, 0x0c, 0x2f, 0x34, 0xc8, 0x55, 0xea, 0x22, 0x16, + 0x0e, 0xb8, 0xf5, 0xfd, 0xfa, 0x39, 0xcd, 0xca, 0x09, 0x72, 0x8d, 0x5a, 0x60, 0xd5, 0x01, 0xee, + 0x4b, 0xbf, 0x58, 0xce, 0xf0, 0xa2, 0x81, 0xdc, 0xa4, 0x10, 0xb6, 0xfc, 0x25, 0x88, 0xa8, 0x8c, + 0x33, 0xbc, 0x32, 0x26, 0x2a, 0x1d, 0x10, 0xc3, 0x03, 0x86, 0x04, 0xa1, 0x0a, 0x4e, 0xb3, 0x2a, + 0x98, 0x5c, 0x41, 0x73, 0x6c, 0x52, 0x05, 0xbf, 0xe2, 0xcd, 0xf0, 0x2a, 0xc3, 0xef, 0xc7, 0x23, + 0x9b, 0x57, 0xdd, 0x66, 0x78, 0x1d, 0x41, 0x6a, 0xf4, 0x7d, 0x51, 0x7d, 0x97, 0xe6, 0x31, 0x08, + 0x96, 0x7c, 0xe1, 0xb9, 0xef, 0x94, 0xc5, 0xc0, 0x3a, 0x8b, 0x20, 0x46, 0xaa, 0x8b, 0xbb, 0x61, + 0x89, 0xf2, 0x76, 0xfb, 0x56, 0xb7, 0x54, 0xc4, 0x95, 0x48, 0xf4, 0xad, 0xae, 0x91, 0xea, 0xd2, + 0x16, 0xa6, 0x81, 0x1d, 0xda, 0xa7, 0x60, 0x5f, 0xf2, 0x36, 0xeb, 0xa4, 0x4d, 0xa4, 0x04, 0xa9, + 0x8d, 0xd6, 0x4e, 0xdb, 0x2a, 0x2d, 0x30, 0x9e, 0xd5, 0xb6, 0x8c, 0x64, 0x77, 0xa7, 0x6d, 0x91, + 0xb7, 0x20, 0x31, 0x39, 0x3a, 0x28, 0x91, 0xf0, 0xe7, 0x8d, 0xbd, 0xa3, 0x03, 0xd7, 0x15, 0x83, + 0x22, 0xc8, 0x32, 0x64, 0x27, 0xce, 0xb8, 0xf5, 0x0b, 0xe6, 0xd8, 0x2e, 0x9d, 0xc7, 0x25, 0x3c, + 0x67, 0x64, 0x26, 0xce, 0xf8, 0x03, 0x73, 0x6c, 0x9f, 0x31, 0xf8, 0x95, 0xaf, 0x40, 0x5e, 0xb0, + 0x4b, 0x8a, 0x20, 0x59, 0xec, 0xa4, 0x50, 0x97, 0xee, 0x18, 0x92, 0x55, 0xde, 0x87, 0x82, 0x5b, + 0x48, 0xe0, 0x7c, 0x35, 0xba, 0x93, 0x06, 0xf6, 0x18, 0xf7, 0xe7, 0xbc, 0x76, 0x49, 0x4c, 0x51, + 0x3e, 0x8c, 0xa7, 0x0b, 0x06, 0x2d, 0x2b, 0x21, 0x57, 0xa4, 0xf2, 0x0f, 0x25, 0x28, 0x6c, 0xdb, + 0x63, 0xff, 0x96, 0x77, 0x11, 0x52, 0x07, 0xb6, 0x3d, 0x98, 0xa0, 0xd9, 0xac, 0xc1, 0x1e, 0xc8, + 0x1b, 0x50, 0xc0, 0x1f, 0x6e, 0x01, 0x28, 0x7b, 0xf7, 0x0b, 0x79, 0x6c, 0xe7, 0x55, 0x1f, 0x81, + 0x64, 0xdf, 0x72, 0x26, 0x3c, 0x92, 0xe1, 0x6f, 0xf2, 0x05, 0xc8, 0xd3, 0xbf, 0x2e, 0x33, 0xe9, + 0x1d, 0x58, 0x81, 0x36, 0x73, 0xe2, 0x5b, 0x30, 0x87, 0x6f, 0xdf, 0x83, 0x65, 0xbc, 0xbb, 0x84, + 0x02, 0xeb, 0xe0, 0xc0, 0x12, 0x64, 0x58, 0x28, 0x98, 0xe0, 0x27, 0xab, 0x9c, 0xe1, 0x3e, 0xd2, + 0xf0, 0x8a, 0x95, 0x00, 0x4b, 0xf7, 0x19, 0x83, 0x3f, 0x95, 0x1f, 0x40, 0x16, 0xb3, 0x54, 0x73, + 0xd0, 0x21, 0x65, 0x90, 0x7a, 0x25, 0x13, 0x73, 0xe4, 0xa2, 0x70, 0xcc, 0xe7, 0xdd, 0x2b, 0x9b, + 0x86, 0xd4, 0x5b, 0x5a, 0x00, 0x69, 0x93, 0x9e, 0xbb, 0x8f, 0x79, 0x98, 0x96, 0x8e, 0xcb, 0x4d, + 0x6e, 0x62, 0xc7, 0x7c, 0x19, 0x67, 0x62, 0xc7, 0x7c, 0xc9, 0x4c, 0x5c, 0x9d, 0x32, 0x41, 0x9f, + 0x4e, 0xf8, 0xf7, 0x3b, 0xe9, 0x84, 0x9e, 0xf3, 0x71, 0x7b, 0xf6, 0xad, 0xde, 0xae, 0xdd, 0xb7, + 0xf0, 0x9c, 0xdf, 0xc5, 0x73, 0x92, 0x64, 0x48, 0xdd, 0xf2, 0x67, 0x49, 0x98, 0xe7, 0x41, 0xf4, + 0xfd, 0xbe, 0xf3, 0x6c, 0xbb, 0x3d, 0x22, 0x4f, 0xa1, 0x40, 0xe3, 0x67, 0x6b, 0xd8, 0x1e, 0x8d, + 0xe8, 0x46, 0x95, 0xf0, 0x50, 0x71, 0x7d, 0x2a, 0x28, 0x73, 0xfc, 0xca, 0x4e, 0x7b, 0x68, 0x6e, + 0x33, 0x6c, 0xc3, 0x72, 0xc6, 0x27, 0x46, 0xde, 0xf2, 0x5b, 0xc8, 0x16, 0xe4, 0x87, 0x93, 0x9e, + 0x67, 0x4c, 0x46, 0x63, 0x95, 0x48, 0x63, 0xdb, 0x93, 0x5e, 0xc0, 0x16, 0x0c, 0xbd, 0x06, 0xea, + 0x18, 0x8d, 0xbc, 0x9e, 0xad, 0xc4, 0x29, 0x8e, 0xd1, 0x20, 0x11, 0x74, 0xec, 0xc0, 0x6f, 0x21, + 0x8f, 0x01, 0xe8, 0x46, 0x72, 0x6c, 0x5a, 0x24, 0xa1, 0x56, 0xf2, 0xda, 0x9b, 0x91, 0xb6, 0xf6, + 0x9c, 0xf1, 0xbe, 0xbd, 0xe7, 0x8c, 0x99, 0x21, 0xba, 0x05, 0xf1, 0x71, 0xe9, 0x1d, 0x50, 0xc2, + 0xf3, 0x17, 0xcf, 0xde, 0xa9, 0x19, 0x67, 0xef, 0x1c, 0x3f, 0x7b, 0xd7, 0xe5, 0xbb, 0xd2, 0xd2, + 0x7b, 0x50, 0x0c, 0x4d, 0x59, 0xa4, 0x13, 0x46, 0xbf, 0x2d, 0xd2, 0xf3, 0xda, 0xeb, 0xc2, 0xd7, + 0x63, 0xf1, 0xd5, 0x8a, 0x76, 0xdf, 0x01, 0x25, 0x3c, 0x7d, 0xd1, 0x70, 0x36, 0xa6, 0x26, 0x40, + 0xfe, 0x7d, 0x98, 0x0b, 0x4c, 0x59, 0x24, 0xe7, 0x4e, 0x99, 0x54, 0xf9, 0x97, 0x52, 0x90, 0x6a, + 0x5a, 0xa6, 0xdd, 0x25, 0xaf, 0x07, 0x33, 0xe2, 0x93, 0x73, 0x6e, 0x36, 0xbc, 0x18, 0xca, 0x86, + 0x4f, 0xce, 0x79, 0xb9, 0xf0, 0x62, 0x28, 0x17, 0xba, 0x5d, 0x35, 0x9d, 0x5c, 0x9e, 0xca, 0x84, + 0x4f, 0xce, 0x09, 0x69, 0xf0, 0xf2, 0x54, 0x1a, 0xf4, 0xbb, 0x6b, 0x3a, 0x0d, 0x9d, 0xc1, 0x1c, + 0xf8, 0xe4, 0x9c, 0x9f, 0xff, 0x96, 0xc3, 0xf9, 0xcf, 0xeb, 0xac, 0xe9, 0xcc, 0x25, 0x21, 0xf7, + 0xa1, 0x4b, 0x2c, 0xeb, 0x2d, 0x87, 0xb3, 0x1e, 0xf2, 0x78, 0xbe, 0x5b, 0x0e, 0xe7, 0x3b, 0xec, + 0xe4, 0xf9, 0xed, 0x62, 0x28, 0xbf, 0xa1, 0x51, 0x96, 0xd8, 0x96, 0xc3, 0x89, 0x8d, 0xf1, 0x04, + 0x4f, 0xc5, 0xac, 0xe6, 0x75, 0xd6, 0x74, 0xa2, 0x85, 0x52, 0x5a, 0xf4, 0xb9, 0x1e, 0xdf, 0x05, + 0x86, 0x77, 0x9d, 0x2e, 0x9b, 0x7b, 0xe4, 0x2c, 0xc6, 0x7c, 0x60, 0xc7, 0xd5, 0x74, 0x8f, 0x5c, + 0x1a, 0x64, 0xba, 0xbc, 0xd4, 0x55, 0x30, 0x46, 0x09, 0xb2, 0xc4, 0x97, 0xbf, 0xb2, 0xd1, 0xc2, + 0x58, 0x85, 0xf3, 0x62, 0xa7, 0xf7, 0x0a, 0xcc, 0x6d, 0xb4, 0x9e, 0xb6, 0xc7, 0x3d, 0x73, 0xe2, + 0xb4, 0xf6, 0xdb, 0x3d, 0xef, 0xba, 0x80, 0xbe, 0xff, 0x7c, 0x97, 0xf7, 0xec, 0xb7, 0x7b, 0xe4, + 0x82, 0x2b, 0xae, 0x0e, 0xf6, 0x4a, 0x5c, 0x5e, 0x4b, 0xaf, 0xd3, 0x45, 0x63, 0xc6, 0x30, 0xea, + 0x2d, 0xf0, 0xa8, 0xf7, 0x30, 0x03, 0xa9, 0x23, 0xab, 0x6f, 0x5b, 0x0f, 0x73, 0x90, 0x71, 0xec, + 0xf1, 0xb0, 0xed, 0xd8, 0xe5, 0x1f, 0x49, 0x00, 0x8f, 0xec, 0xe1, 0xf0, 0xc8, 0xea, 0xbf, 0x38, + 0x32, 0xc9, 0x15, 0xc8, 0x0f, 0xdb, 0xcf, 0xcd, 0xd6, 0xd0, 0x6c, 0x1d, 0x8e, 0xdd, 0x7d, 0x90, + 0xa3, 0x4d, 0xdb, 0xe6, 0xa3, 0xf1, 0x09, 0x29, 0xb9, 0x87, 0x71, 0xd4, 0x0e, 0x4a, 0x92, 0x1f, + 0xce, 0x17, 0xf9, 0xf1, 0x32, 0xcd, 0xdf, 0xa1, 0x7b, 0xc0, 0x64, 0x15, 0x43, 0x86, 0xbf, 0x3d, + 0x7c, 0xa2, 0x92, 0x77, 0xcc, 0xe1, 0xa8, 0x75, 0x88, 0x52, 0xa1, 0x72, 0x48, 0xd1, 0xe7, 0x47, + 0xe4, 0x36, 0x24, 0x0e, 0xed, 0x01, 0x8a, 0xe4, 0x94, 0xf7, 0x42, 0x71, 0xe4, 0x0d, 0x48, 0x0c, + 0x27, 0x4c, 0x36, 0x79, 0x6d, 0x41, 0x38, 0x11, 0xb0, 0x24, 0x44, 0x61, 0xc3, 0x49, 0xcf, 0x9b, + 0xf7, 0x8d, 0x22, 0x24, 0x36, 0x9a, 0x4d, 0x9a, 0xe5, 0x37, 0x9a, 0xcd, 0x35, 0x45, 0xaa, 0x7f, + 0x09, 0xb2, 0xbd, 0xb1, 0x69, 0xd2, 0xf0, 0x30, 0xbb, 0xba, 0xf8, 0x10, 0xb3, 0x9a, 0x07, 0xaa, + 0x6f, 0x43, 0xe6, 0x90, 0xd5, 0x17, 0x24, 0xa2, 0x80, 0x2d, 0xfd, 0x21, 0xbb, 0x3e, 0x59, 0xf2, + 0xbb, 0xc3, 0x15, 0x89, 0xe1, 0xda, 0xa8, 0xef, 0x42, 0x6e, 0xdc, 0x3a, 0xcd, 0xe0, 0xc7, 0x2c, + 0xbb, 0xc4, 0x19, 0xcc, 0x8e, 0x79, 0x53, 0xbd, 0x01, 0x0b, 0x96, 0xed, 0x7e, 0xb2, 0x68, 0x75, + 0xd8, 0x1e, 0xbb, 0x38, 0x7d, 0x68, 0x73, 0x8d, 0x9b, 0xec, 0x33, 0xa1, 0x65, 0xf3, 0x0e, 0xb6, + 0x2b, 0xeb, 0x8f, 0x40, 0x11, 0xcc, 0x60, 0x91, 0x19, 0x67, 0xa5, 0xcb, 0xbe, 0x4b, 0x7a, 0x56, + 0x70, 0xdf, 0x87, 0x8c, 0xb0, 0x9d, 0x19, 0x63, 0xa4, 0xc7, 0x3e, 0xf2, 0x7a, 0x46, 0x30, 0xd4, + 0x4d, 0x1b, 0xa1, 0xb1, 0x26, 0xda, 0xc8, 0x33, 0xf6, 0xfd, 0x57, 0x34, 0x52, 0xd3, 0x43, 0xab, + 0x72, 0x74, 0xaa, 0x2b, 0x7d, 0xf6, 0xf9, 0xd6, 0xb3, 0xc2, 0x02, 0xe0, 0x0c, 0x33, 0xf1, 0xce, + 0x7c, 0xc8, 0xbe, 0xec, 0x06, 0xcc, 0x4c, 0x79, 0x33, 0x39, 0xd5, 0x9b, 0xe7, 0xec, 0x33, 0xaa, + 0x67, 0x66, 0x6f, 0x96, 0x37, 0x93, 0x53, 0xbd, 0x19, 0xb0, 0x0f, 0xac, 0x01, 0x33, 0x35, 0xbd, + 0xbe, 0x09, 0x44, 0x7c, 0xd5, 0x3c, 0x4f, 0xc4, 0xd8, 0x19, 0xb2, 0xcf, 0xe6, 0xfe, 0xcb, 0x66, + 0x94, 0x59, 0x86, 0xe2, 0x1d, 0xb2, 0xd8, 0x17, 0xf5, 0xa0, 0xa1, 0x9a, 0x5e, 0xdf, 0x82, 0xf3, + 0xe2, 0xc4, 0xce, 0xe0, 0x92, 0xad, 0x4a, 0x95, 0xa2, 0xb1, 0xe0, 0x4f, 0x8d, 0x73, 0x66, 0x9a, + 0x8a, 0x77, 0x6a, 0xa4, 0x4a, 0x15, 0x65, 0xca, 0x54, 0x4d, 0xaf, 0x3f, 0x80, 0xa2, 0x60, 0xea, + 0x00, 0x33, 0x74, 0xb4, 0x99, 0x17, 0xec, 0x5f, 0x1b, 0x3c, 0x33, 0x34, 0xa3, 0x87, 0xdf, 0x18, + 0xcf, 0x71, 0xd1, 0x46, 0xc6, 0xec, 0xbb, 0xbc, 0xef, 0x0b, 0x32, 0x42, 0x5b, 0x02, 0x2b, 0xed, + 0x38, 0x2b, 0x13, 0xf6, 0xc5, 0xde, 0x77, 0x85, 0x12, 0xea, 0xfd, 0xc0, 0x74, 0x4c, 0x9a, 0xe4, + 0x62, 0x6c, 0x38, 0x18, 0x91, 0xdf, 0x8c, 0x04, 0xac, 0x88, 0x57, 0x21, 0xc2, 0xb4, 0xe9, 0x63, + 0x7d, 0x0b, 0xe6, 0xcf, 0x1e, 0x90, 0x3e, 0x96, 0x58, 0x5d, 0x5c, 0x5d, 0xa1, 0xa5, 0xb3, 0x31, + 0xd7, 0x09, 0xc4, 0xa5, 0x06, 0xcc, 0x9d, 0x39, 0x28, 0x7d, 0x22, 0xb1, 0xea, 0x92, 0x5a, 0x32, + 0x0a, 0x9d, 0x60, 0x64, 0x9a, 0x3b, 0x73, 0x58, 0xfa, 0x54, 0x62, 0x57, 0x11, 0xba, 0xe6, 0x19, + 0x71, 0x23, 0xd3, 0xdc, 0x99, 0xc3, 0xd2, 0x57, 0x59, 0xed, 0x28, 0xeb, 0x55, 0xd1, 0x08, 0xc6, + 0x82, 0xf9, 0xb3, 0x87, 0xa5, 0xaf, 0x49, 0x78, 0x2d, 0x21, 0xeb, 0xba, 0xb7, 0x2e, 0x5e, 0x64, + 0x9a, 0x3f, 0x7b, 0x58, 0xfa, 0xba, 0x84, 0x97, 0x17, 0xb2, 0xbe, 0x1e, 0x30, 0x13, 0xf4, 0xe6, + 0xf4, 0xb0, 0xf4, 0x0d, 0x09, 0xef, 0x13, 0x64, 0xbd, 0xe6, 0x99, 0xd9, 0x9b, 0xf2, 0xe6, 0xf4, + 0xb0, 0xf4, 0x4d, 0x3c, 0xc5, 0xd7, 0x65, 0xfd, 0x4e, 0xc0, 0x0c, 0x46, 0xa6, 0xe2, 0x2b, 0x84, + 0xa5, 0x6f, 0x49, 0x78, 0xed, 0x23, 0xeb, 0x77, 0x0d, 0x77, 0x74, 0x3f, 0x32, 0x15, 0x5f, 0x21, + 0x2c, 0x7d, 0x26, 0xe1, 0xed, 0x90, 0xac, 0xdf, 0x0b, 0x1a, 0xc2, 0xc8, 0xa4, 0xbc, 0x4a, 0x58, + 0xfa, 0x36, 0xb5, 0x54, 0xac, 0xcb, 0xeb, 0xab, 0x86, 0xeb, 0x80, 0x10, 0x99, 0x94, 0x57, 0x09, + 0x4b, 0xdf, 0xa1, 0xa6, 0x94, 0xba, 0xbc, 0xbe, 0x16, 0x32, 0x55, 0xd3, 0xeb, 0x8f, 0xa0, 0x70, + 0xd6, 0xb0, 0xf4, 0x5d, 0xf1, 0xd6, 0x2d, 0xdf, 0x11, 0x62, 0xd3, 0xae, 0xf0, 0xce, 0x4e, 0x0d, + 0x4c, 0xdf, 0xc3, 0x1a, 0xa7, 0x3e, 0xf7, 0x84, 0xdd, 0x4c, 0x31, 0x82, 0xff, 0xfa, 0x58, 0x98, + 0xda, 0xf6, 0xf7, 0xc7, 0xa9, 0x31, 0xea, 0xfb, 0x12, 0x5e, 0x5f, 0x15, 0xb8, 0x41, 0xc4, 0x7b, + 0x3b, 0x85, 0x05, 0xac, 0x0f, 0xfd, 0x59, 0x9e, 0x16, 0xad, 0x7e, 0x20, 0xbd, 0x4a, 0xb8, 0xaa, + 0x27, 0x9a, 0x3b, 0x0d, 0x6f, 0x31, 0xb0, 0xe5, 0x6d, 0x48, 0x1e, 0x6b, 0xab, 0x6b, 0xe2, 0x91, + 0x4c, 0xbc, 0xb5, 0x65, 0x41, 0x2a, 0xaf, 0x15, 0x85, 0x8b, 0xed, 0xe1, 0xc8, 0x39, 0x31, 0x90, + 0xc5, 0xd9, 0x5a, 0x24, 0xfb, 0x93, 0x18, 0xb6, 0xc6, 0xd9, 0xd5, 0x48, 0xf6, 0xa7, 0x31, 0xec, + 0x2a, 0x67, 0xeb, 0x91, 0xec, 0xaf, 0xc6, 0xb0, 0x75, 0xce, 0x5e, 0x8f, 0x64, 0x7f, 0x2d, 0x86, + 0xbd, 0xce, 0xd9, 0xb5, 0x48, 0xf6, 0xd7, 0x63, 0xd8, 0x35, 0xce, 0xbe, 0x13, 0xc9, 0xfe, 0x46, + 0x0c, 0xfb, 0x0e, 0x67, 0xdf, 0x8d, 0x64, 0x7f, 0x33, 0x86, 0x7d, 0x97, 0xb3, 0xef, 0x45, 0xb2, + 0xbf, 0x15, 0xc3, 0xbe, 0xc7, 0xd8, 0x6b, 0xab, 0x91, 0xec, 0xcf, 0xa2, 0xd9, 0x6b, 0xab, 0x9c, + 0x1d, 0xad, 0xb5, 0x6f, 0xc7, 0xb0, 0xb9, 0xd6, 0xd6, 0xa2, 0xb5, 0xf6, 0x9d, 0x18, 0x36, 0xd7, + 0xda, 0x5a, 0xb4, 0xd6, 0xbe, 0x1b, 0xc3, 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0xf7, 0x62, 0xd8, + 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x7e, 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0x3f, 0x88, + 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x8f, 0x62, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, + 0xe3, 0x18, 0x36, 0xd7, 0xda, 0x5a, 0xb4, 0xd6, 0xfe, 0x24, 0x86, 0xcd, 0xb5, 0xa6, 0x45, 0x6b, + 0xed, 0x4f, 0xa3, 0xd9, 0x1a, 0xd7, 0x9a, 0x16, 0xad, 0xb5, 0x3f, 0x8b, 0x61, 0x73, 0xad, 0x69, + 0xd1, 0x5a, 0xfb, 0xf3, 0x18, 0x36, 0xd7, 0x9a, 0x16, 0xad, 0xb5, 0x1f, 0xc6, 0xb0, 0xb9, 0xd6, + 0xb4, 0x68, 0xad, 0xfd, 0x45, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0x5f, 0xc6, 0xb0, 0xb9, + 0xd6, 0xb4, 0x68, 0xad, 0xfd, 0x55, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0x5f, 0xc7, 0xb0, + 0xb9, 0xd6, 0xb4, 0x68, 0xad, 0xfd, 0x4d, 0x0c, 0x9b, 0x6b, 0x4d, 0x8b, 0xd6, 0xda, 0xdf, 0xc6, + 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0xbb, 0x68, 0x76, 0x95, 0x6b, 0xad, 0x1a, 0xad, 0xb5, + 0xbf, 0x8f, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0x0f, 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, + 0xd6, 0xfe, 0x31, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0x8f, 0x62, 0xd8, 0x5c, 0x6b, 0xd5, + 0x68, 0xad, 0xfd, 0x53, 0x0c, 0x9b, 0x6b, 0xad, 0x1a, 0xad, 0xb5, 0x7f, 0x8e, 0x61, 0x73, 0xad, + 0x55, 0xa3, 0xb5, 0xf6, 0x2f, 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x35, 0x86, 0xcd, + 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc5, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x7b, 0x34, + 0x5b, 0xe7, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x1f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0xff, + 0x19, 0xc3, 0xe6, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x5f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, + 0xff, 0x1d, 0xc3, 0xe6, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x3f, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, + 0x6b, 0x3f, 0x8e, 0x61, 0x73, 0xad, 0xe9, 0xd1, 0x5a, 0xfb, 0x49, 0x0c, 0x9b, 0x6b, 0x4d, 0x8f, + 0xd6, 0xda, 0xff, 0xc6, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x5f, 0x0c, 0x9b, 0x6b, 0x6d, + 0x3d, 0x5a, 0x6b, 0xff, 0x1f, 0xcd, 0x5e, 0x5f, 0xfd, 0x69, 0x00, 0x00, 0x00, 0xff, 0xff, 0x81, + 0x23, 0xc6, 0xe6, 0xc6, 0x38, 0x00, 0x00, +} diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/testdata/test.proto 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,540 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A feature-rich test file for the protocol compiler and libraries. + +syntax = "proto2"; + +package testdata; + +enum FOO { FOO1 = 1; }; + +message GoEnum { + required FOO foo = 1; +} + +message GoTestField { + required string Label = 1; + required string Type = 2; +} + +message GoTest { + // An enum, for completeness. + enum KIND { + VOID = 0; + + // Basic types + BOOL = 1; + BYTES = 2; + FINGERPRINT = 3; + FLOAT = 4; + INT = 5; + STRING = 6; + TIME = 7; + + // Groupings + TUPLE = 8; + ARRAY = 9; + MAP = 10; + + // Table types + TABLE = 11; + + // Functions + FUNCTION = 12; // last tag + }; + + // Some typical parameters + required KIND Kind = 1; + optional string Table = 2; + optional int32 Param = 3; + + // Required, repeated and optional foreign fields. + required GoTestField RequiredField = 4; + repeated GoTestField RepeatedField = 5; + optional GoTestField OptionalField = 6; + + // Required fields of all basic types + required bool F_Bool_required = 10; + required int32 F_Int32_required = 11; + required int64 F_Int64_required = 12; + required fixed32 F_Fixed32_required = 13; + required fixed64 F_Fixed64_required = 14; + required uint32 F_Uint32_required = 15; + required uint64 F_Uint64_required = 16; + required float F_Float_required = 17; + required double F_Double_required = 18; + required string F_String_required = 19; + required bytes F_Bytes_required = 101; + required sint32 F_Sint32_required = 102; + required sint64 F_Sint64_required = 103; + + // Repeated fields of all basic types + repeated bool F_Bool_repeated = 20; + repeated int32 F_Int32_repeated = 21; + repeated int64 F_Int64_repeated = 22; + repeated fixed32 F_Fixed32_repeated = 23; + repeated fixed64 F_Fixed64_repeated = 24; + repeated uint32 F_Uint32_repeated = 25; + repeated uint64 F_Uint64_repeated = 26; + repeated float F_Float_repeated = 27; + repeated double F_Double_repeated = 28; + repeated string F_String_repeated = 29; + repeated bytes F_Bytes_repeated = 201; + repeated sint32 F_Sint32_repeated = 202; + repeated sint64 F_Sint64_repeated = 203; + + // Optional fields of all basic types + optional bool F_Bool_optional = 30; + optional int32 F_Int32_optional = 31; + optional int64 F_Int64_optional = 32; + optional fixed32 F_Fixed32_optional = 33; + optional fixed64 F_Fixed64_optional = 34; + optional uint32 F_Uint32_optional = 35; + optional uint64 F_Uint64_optional = 36; + optional float F_Float_optional = 37; + optional double F_Double_optional = 38; + optional string F_String_optional = 39; + optional bytes F_Bytes_optional = 301; + optional sint32 F_Sint32_optional = 302; + optional sint64 F_Sint64_optional = 303; + + // Default-valued fields of all basic types + optional bool F_Bool_defaulted = 40 [default=true]; + optional int32 F_Int32_defaulted = 41 [default=32]; + optional int64 F_Int64_defaulted = 42 [default=64]; + optional fixed32 F_Fixed32_defaulted = 43 [default=320]; + optional fixed64 F_Fixed64_defaulted = 44 [default=640]; + optional uint32 F_Uint32_defaulted = 45 [default=3200]; + optional uint64 F_Uint64_defaulted = 46 [default=6400]; + optional float F_Float_defaulted = 47 [default=314159.]; + optional double F_Double_defaulted = 48 [default=271828.]; + optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; + optional sint32 F_Sint32_defaulted = 402 [default = -32]; + optional sint64 F_Sint64_defaulted = 403 [default = -64]; + + // Packed repeated fields (no string or bytes). + repeated bool F_Bool_repeated_packed = 50 [packed=true]; + repeated int32 F_Int32_repeated_packed = 51 [packed=true]; + repeated int64 F_Int64_repeated_packed = 52 [packed=true]; + repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; + repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; + repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; + repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; + repeated float F_Float_repeated_packed = 57 [packed=true]; + repeated double F_Double_repeated_packed = 58 [packed=true]; + repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; + repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + + // Required, repeated, and optional groups. + required group RequiredGroup = 70 { + required string RequiredField = 71; + }; + + repeated group RepeatedGroup = 80 { + required string RequiredField = 81; + }; + + optional group OptionalGroup = 90 { + required string RequiredField = 91; + }; +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +message GoSkipTest { + required int32 skip_int32 = 11; + required fixed32 skip_fixed32 = 12; + required fixed64 skip_fixed64 = 13; + required string skip_string = 14; + required group SkipGroup = 15 { + required int32 group_int32 = 16; + required string group_string = 17; + } +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +message NonPackedTest { + repeated int32 a = 1; +} + +message PackedTest { + repeated int32 b = 1 [packed=true]; +} + +message MaxTag { + // Maximum possible tag number. + optional string last_field = 536870911; +} + +message OldMessage { + message Nested { + optional string name = 1; + } + optional Nested nested = 1; + + optional int32 num = 2; +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +message NewMessage { + message Nested { + optional string name = 1; + optional string food_group = 2; + } + optional Nested nested = 1; + + // This is an int32 in OldMessage. + optional int64 num = 2; +} + +// Smaller tests for ASCII formatting. + +message InnerMessage { + required string host = 1; + optional int32 port = 2 [default=4000]; + optional bool connected = 3; +} + +message OtherMessage { + optional int64 key = 1; + optional bytes value = 2; + optional float weight = 3; + optional InnerMessage inner = 4; + + extensions 100 to max; +} + +message RequiredInnerMessage { + required InnerMessage leo_finally_won_an_oscar = 1; +} + +message MyMessage { + required int32 count = 1; + optional string name = 2; + optional string quote = 3; + repeated string pet = 4; + optional InnerMessage inner = 5; + repeated OtherMessage others = 6; + optional RequiredInnerMessage we_must_go_deeper = 13; + repeated InnerMessage rep_inner = 12; + + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color bikeshed = 7; + + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // This field becomes [][]byte in the generated code. + repeated bytes rep_bytes = 10; + + optional double bigfloat = 11; + + extensions 100 to max; +} + +message Ext { + extend MyMessage { + optional Ext more = 103; + optional string text = 104; + optional int32 number = 105; + } + + optional string data = 1; +} + +extend MyMessage { + repeated string greeting = 106; +} + +message ComplexExtension { + optional int32 first = 1; + optional int32 second = 2; + repeated int32 third = 3; +} + +extend OtherMessage { + optional ComplexExtension complex = 200; + repeated ComplexExtension r_complex = 201; +} + +message DefaultsMessage { + enum DefaultsEnum { + ZERO = 0; + ONE = 1; + TWO = 2; + }; + extensions 100 to max; +} + +extend DefaultsMessage { + optional double no_default_double = 101; + optional float no_default_float = 102; + optional int32 no_default_int32 = 103; + optional int64 no_default_int64 = 104; + optional uint32 no_default_uint32 = 105; + optional uint64 no_default_uint64 = 106; + optional sint32 no_default_sint32 = 107; + optional sint64 no_default_sint64 = 108; + optional fixed32 no_default_fixed32 = 109; + optional fixed64 no_default_fixed64 = 110; + optional sfixed32 no_default_sfixed32 = 111; + optional sfixed64 no_default_sfixed64 = 112; + optional bool no_default_bool = 113; + optional string no_default_string = 114; + optional bytes no_default_bytes = 115; + optional DefaultsMessage.DefaultsEnum no_default_enum = 116; + + optional double default_double = 201 [default = 3.1415]; + optional float default_float = 202 [default = 3.14]; + optional int32 default_int32 = 203 [default = 42]; + optional int64 default_int64 = 204 [default = 43]; + optional uint32 default_uint32 = 205 [default = 44]; + optional uint64 default_uint64 = 206 [default = 45]; + optional sint32 default_sint32 = 207 [default = 46]; + optional sint64 default_sint64 = 208 [default = 47]; + optional fixed32 default_fixed32 = 209 [default = 48]; + optional fixed64 default_fixed64 = 210 [default = 49]; + optional sfixed32 default_sfixed32 = 211 [default = 50]; + optional sfixed64 default_sfixed64 = 212 [default = 51]; + optional bool default_bool = 213 [default = true]; + optional string default_string = 214 [default = "Hello, string"]; + optional bytes default_bytes = 215 [default = "Hello, bytes"]; + optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; +} + +message MyMessageSet { + option message_set_wire_format = true; + extensions 100 to max; +} + +message Empty { +} + +extend MyMessageSet { + optional Empty x201 = 201; + optional Empty x202 = 202; + optional Empty x203 = 203; + optional Empty x204 = 204; + optional Empty x205 = 205; + optional Empty x206 = 206; + optional Empty x207 = 207; + optional Empty x208 = 208; + optional Empty x209 = 209; + optional Empty x210 = 210; + optional Empty x211 = 211; + optional Empty x212 = 212; + optional Empty x213 = 213; + optional Empty x214 = 214; + optional Empty x215 = 215; + optional Empty x216 = 216; + optional Empty x217 = 217; + optional Empty x218 = 218; + optional Empty x219 = 219; + optional Empty x220 = 220; + optional Empty x221 = 221; + optional Empty x222 = 222; + optional Empty x223 = 223; + optional Empty x224 = 224; + optional Empty x225 = 225; + optional Empty x226 = 226; + optional Empty x227 = 227; + optional Empty x228 = 228; + optional Empty x229 = 229; + optional Empty x230 = 230; + optional Empty x231 = 231; + optional Empty x232 = 232; + optional Empty x233 = 233; + optional Empty x234 = 234; + optional Empty x235 = 235; + optional Empty x236 = 236; + optional Empty x237 = 237; + optional Empty x238 = 238; + optional Empty x239 = 239; + optional Empty x240 = 240; + optional Empty x241 = 241; + optional Empty x242 = 242; + optional Empty x243 = 243; + optional Empty x244 = 244; + optional Empty x245 = 245; + optional Empty x246 = 246; + optional Empty x247 = 247; + optional Empty x248 = 248; + optional Empty x249 = 249; + optional Empty x250 = 250; +} + +message MessageList { + repeated group Message = 1 { + required string name = 2; + required int32 count = 3; + } +} + +message Strings { + optional string string_field = 1; + optional bytes bytes_field = 2; +} + +message Defaults { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + optional bool F_Bool = 1 [default=true]; + optional int32 F_Int32 = 2 [default=32]; + optional int64 F_Int64 = 3 [default=64]; + optional fixed32 F_Fixed32 = 4 [default=320]; + optional fixed64 F_Fixed64 = 5 [default=640]; + optional uint32 F_Uint32 = 6 [default=3200]; + optional uint64 F_Uint64 = 7 [default=6400]; + optional float F_Float = 8 [default=314159.]; + optional double F_Double = 9 [default=271828.]; + optional string F_String = 10 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes = 11 [default="Bignose"]; + optional sint32 F_Sint32 = 12 [default=-32]; + optional sint64 F_Sint64 = 13 [default=-64]; + optional Color F_Enum = 14 [default=GREEN]; + + // More fields with crazy defaults. + optional float F_Pinf = 15 [default=inf]; + optional float F_Ninf = 16 [default=-inf]; + optional float F_Nan = 17 [default=nan]; + + // Sub-message. + optional SubDefaults sub = 18; + + // Redundant but explicit defaults. + optional string str_zero = 19 [default=""]; +} + +message SubDefaults { + optional int64 n = 1 [default=7]; +} + +message RepeatedEnum { + enum Color { + RED = 1; + } + repeated Color color = 1; +} + +message MoreRepeated { + repeated bool bools = 1; + repeated bool bools_packed = 2 [packed=true]; + repeated int32 ints = 3; + repeated int32 ints_packed = 4 [packed=true]; + repeated int64 int64s_packed = 7 [packed=true]; + repeated string strings = 5; + repeated fixed32 fixeds = 6; +} + +// GroupOld and GroupNew have the same wire format. +// GroupNew has a new field inside a group. + +message GroupOld { + optional group G = 101 { + optional int32 x = 2; + } +} + +message GroupNew { + optional group G = 101 { + optional int32 x = 2; + optional int32 y = 3; + } +} + +message FloatingPoint { + required double f = 1; +} + +message MessageWithMap { + map name_mapping = 1; + map msg_mapping = 2; + map byte_mapping = 3; + map str_to_str = 4; +} + +message Oneof { + oneof union { + bool F_Bool = 1; + int32 F_Int32 = 2; + int64 F_Int64 = 3; + fixed32 F_Fixed32 = 4; + fixed64 F_Fixed64 = 5; + uint32 F_Uint32 = 6; + uint64 F_Uint64 = 7; + float F_Float = 8; + double F_Double = 9; + string F_String = 10; + bytes F_Bytes = 11; + sint32 F_Sint32 = 12; + sint64 F_Sint64 = 13; + MyMessage.Color F_Enum = 14; + GoTestField F_Message = 15; + group F_Group = 16 { + optional int32 x = 17; + } + int32 F_Largest_Tag = 536870911; + } + + oneof tormato { + int32 value = 100; + } +} + +message Communique { + optional bool make_me_cry = 1; + + // This is a oneof, called "union". + oneof union { + int32 number = 5; + string name = 6; + bytes data = 7; + double temp_c = 8; + MyMessage.Color col = 9; + Strings msg = 10; + } +} diff -Nru juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/.travis.yml juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/.travis.yml --- juju-core-2.0~beta15/src/github.com/matttproud/golang_protobuf_extensions/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/matttproud/golang_protobuf_extensions/.travis.yml 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,8 @@ +language: go + +go: + - 1.5 + - 1.6 + - tip + +script: make -f Makefile.TRAVIS diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/api/prometheus/api.go juju-core-2.0.0/src/github.com/prometheus/client_golang/api/prometheus/api.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/api/prometheus/api.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/api/prometheus/api.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,341 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides bindings to the Prometheus HTTP API: +// http://prometheus.io/docs/querying/api/ +package prometheus + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/prometheus/common/model" + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + statusAPIError = 422 + apiPrefix = "/api/v1" + + epQuery = "/query" + epQueryRange = "/query_range" + epLabelValues = "/label/:name/values" + epSeries = "/series" +) + +type ErrorType string + +const ( + // The different API error types. + ErrBadData ErrorType = "bad_data" + ErrTimeout = "timeout" + ErrCanceled = "canceled" + ErrExec = "execution" + ErrBadResponse = "bad_response" +) + +// Error is an error returned by the API. +type Error struct { + Type ErrorType + Msg string +} + +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Type, e.Msg) +} + +// CancelableTransport is like net.Transport but provides +// per-request cancelation functionality. +type CancelableTransport interface { + http.RoundTripper + CancelRequest(req *http.Request) +} + +var DefaultTransport CancelableTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +// Config defines configuration parameters for a new client. +type Config struct { + // The address of the Prometheus to connect to. + Address string + + // Transport is used by the Client to drive HTTP requests. If not + // provided, DefaultTransport will be used. + Transport CancelableTransport +} + +func (cfg *Config) transport() CancelableTransport { + if cfg.Transport == nil { + return DefaultTransport + } + return cfg.Transport +} + +type Client interface { + url(ep string, args map[string]string) *url.URL + do(context.Context, *http.Request) (*http.Response, []byte, error) +} + +// New returns a new Client. +func New(cfg Config) (Client, error) { + u, err := url.Parse(cfg.Address) + if err != nil { + return nil, err + } + u.Path = strings.TrimRight(u.Path, "/") + apiPrefix + + return &httpClient{ + endpoint: u, + transport: cfg.transport(), + }, nil +} + +type httpClient struct { + endpoint *url.URL + transport CancelableTransport +} + +func (c *httpClient) url(ep string, args map[string]string) *url.URL { + p := path.Join(c.endpoint.Path, ep) + + for arg, val := range args { + arg = ":" + arg + p = strings.Replace(p, arg, val, -1) + } + + u := *c.endpoint + u.Path = p + + return &u +} + +func (c *httpClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + resp, err := ctxhttp.Do(ctx, &http.Client{Transport: c.transport}, req) + + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + close(done) + }() + + select { + case <-ctx.Done(): + err = resp.Body.Close() + <-done + if err == nil { + err = ctx.Err() + } + case <-done: + } + + return resp, body, err +} + +// apiClient wraps a regular client and processes successful API responses. +// Successful also includes responses that errored at the API level. +type apiClient struct { + Client +} + +type apiResponse struct { + Status string `json:"status"` + Data json.RawMessage `json:"data"` + ErrorType ErrorType `json:"errorType"` + Error string `json:"error"` +} + +func (c apiClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + resp, body, err := c.Client.do(ctx, req) + if err != nil { + return resp, body, err + } + + code := resp.StatusCode + + if code/100 != 2 && code != statusAPIError { + return resp, body, &Error{ + Type: ErrBadResponse, + Msg: fmt.Sprintf("bad response code %d", resp.StatusCode), + } + } + + var result apiResponse + + if err = json.Unmarshal(body, &result); err != nil { + return resp, body, &Error{ + Type: ErrBadResponse, + Msg: err.Error(), + } + } + + if (code == statusAPIError) != (result.Status == "error") { + err = &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + } + } + + if code == statusAPIError && result.Status == "error" { + err = &Error{ + Type: result.ErrorType, + Msg: result.Error, + } + } + + return resp, []byte(result.Data), err +} + +// Range represents a sliced time range. +type Range struct { + // The boundaries of the time range. + Start, End time.Time + // The maximum time between two slices within the boundaries. + Step time.Duration +} + +// queryResult contains result data for a query. +type queryResult struct { + Type model.ValueType `json:"resultType"` + Result interface{} `json:"result"` + + // The decoded value. + v model.Value +} + +func (qr *queryResult) UnmarshalJSON(b []byte) error { + v := struct { + Type model.ValueType `json:"resultType"` + Result json.RawMessage `json:"result"` + }{} + + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + + switch v.Type { + case model.ValScalar: + var sv model.Scalar + err = json.Unmarshal(v.Result, &sv) + qr.v = &sv + + case model.ValVector: + var vv model.Vector + err = json.Unmarshal(v.Result, &vv) + qr.v = vv + + case model.ValMatrix: + var mv model.Matrix + err = json.Unmarshal(v.Result, &mv) + qr.v = mv + + default: + err = fmt.Errorf("unexpected value type %q", v.Type) + } + return err +} + +// QueryAPI provides bindings the Prometheus's query API. +type QueryAPI interface { + // Query performs a query for the given time. + Query(ctx context.Context, query string, ts time.Time) (model.Value, error) + // Query performs a query for the given range. + QueryRange(ctx context.Context, query string, r Range) (model.Value, error) +} + +// NewQueryAPI returns a new QueryAPI for the client. +func NewQueryAPI(c Client) QueryAPI { + return &httpQueryAPI{client: apiClient{c}} +} + +type httpQueryAPI struct { + client Client +} + +func (h *httpQueryAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) { + u := h.client.url(epQuery, nil) + q := u.Query() + + q.Set("query", query) + q.Set("time", ts.Format(time.RFC3339Nano)) + + u.RawQuery = q.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + + _, body, err := h.client.do(ctx, req) + if err != nil { + return nil, err + } + + var qres queryResult + err = json.Unmarshal(body, &qres) + + return model.Value(qres.v), err +} + +func (h *httpQueryAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) { + u := h.client.url(epQueryRange, nil) + q := u.Query() + + var ( + start = r.Start.Format(time.RFC3339Nano) + end = r.End.Format(time.RFC3339Nano) + step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64) + ) + + q.Set("query", query) + q.Set("start", start) + q.Set("end", end) + q.Set("step", step) + + u.RawQuery = q.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + + _, body, err := h.client.do(ctx, req) + if err != nil { + return nil, err + } + + var qres queryResult + err = json.Unmarshal(body, &qres) + + return model.Value(qres.v), err +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/api/prometheus/api_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/api/prometheus/api_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/api/prometheus/api_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/api/prometheus/api_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,453 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "reflect" + "testing" + "time" + + "github.com/prometheus/common/model" + "golang.org/x/net/context" +) + +func TestConfig(t *testing.T) { + c := Config{} + if c.transport() != DefaultTransport { + t.Fatalf("expected default transport for nil Transport field") + } +} + +func TestClientURL(t *testing.T) { + tests := []struct { + address string + endpoint string + args map[string]string + expected string + }{ + { + address: "http://localhost:9090", + endpoint: "/test", + expected: "http://localhost:9090/test", + }, + { + address: "http://localhost", + endpoint: "/test", + expected: "http://localhost/test", + }, + { + address: "http://localhost:9090", + endpoint: "test", + expected: "http://localhost:9090/test", + }, + { + address: "http://localhost:9090/prefix", + endpoint: "/test", + expected: "http://localhost:9090/prefix/test", + }, + { + address: "https://localhost:9090/", + endpoint: "/test/", + expected: "https://localhost:9090/test", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param", + args: map[string]string{ + "param": "content", + }, + expected: "http://localhost:9090/test/content", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param/more/:param", + args: map[string]string{ + "param": "content", + }, + expected: "http://localhost:9090/test/content/more/content", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param/more/:foo", + args: map[string]string{ + "param": "content", + "foo": "bar", + }, + expected: "http://localhost:9090/test/content/more/bar", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param", + args: map[string]string{ + "nonexistant": "content", + }, + expected: "http://localhost:9090/test/:param", + }, + } + + for _, test := range tests { + ep, err := url.Parse(test.address) + if err != nil { + t.Fatal(err) + } + + hclient := &httpClient{ + endpoint: ep, + transport: DefaultTransport, + } + + u := hclient.url(test.endpoint, test.args) + if u.String() != test.expected { + t.Errorf("unexpected result: got %s, want %s", u, test.expected) + continue + } + + // The apiClient must return exactly the same result as the httpClient. + aclient := &apiClient{hclient} + + u = aclient.url(test.endpoint, test.args) + if u.String() != test.expected { + t.Errorf("unexpected result: got %s, want %s", u, test.expected) + } + } +} + +type testClient struct { + *testing.T + + ch chan apiClientTest + req *http.Request +} + +type apiClientTest struct { + code int + response interface{} + expected string + err *Error +} + +func (c *testClient) url(ep string, args map[string]string) *url.URL { + return nil +} + +func (c *testClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + if ctx == nil { + c.Fatalf("context was not passed down") + } + if req != c.req { + c.Fatalf("request was not passed down") + } + + test := <-c.ch + + var b []byte + var err error + + switch v := test.response.(type) { + case string: + b = []byte(v) + default: + b, err = json.Marshal(v) + if err != nil { + c.Fatal(err) + } + } + + resp := &http.Response{ + StatusCode: test.code, + } + + return resp, b, nil +} + +func TestAPIClientDo(t *testing.T) { + tests := []apiClientTest{ + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`null`), + ErrorType: ErrBadData, + Error: "failed", + }, + err: &Error{ + Type: ErrBadData, + Msg: "failed", + }, + code: statusAPIError, + expected: `null`, + }, + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrTimeout, + Msg: "timed out", + }, + code: statusAPIError, + expected: `test`, + }, + { + response: "bad json", + err: &Error{ + Type: ErrBadResponse, + Msg: "bad response code 400", + }, + code: http.StatusBadRequest, + }, + { + response: "bad json", + err: &Error{ + Type: ErrBadResponse, + Msg: "invalid character 'b' looking for beginning of value", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "success", + Data: json.RawMessage(`"test"`), + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "success", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: http.StatusOK, + }, + } + + tc := &testClient{ + T: t, + ch: make(chan apiClientTest, 1), + req: &http.Request{}, + } + client := &apiClient{tc} + + for _, test := range tests { + + tc.ch <- test + + _, body, err := client.do(context.Background(), tc.req) + + if test.err != nil { + if err == nil { + t.Errorf("expected error %q but got none", test.err) + continue + } + if test.err.Error() != err.Error() { + t.Errorf("unexpected error: want %q, got %q", test.err, err) + } + continue + } + if err != nil { + t.Errorf("unexpeceted error %s", err) + continue + } + + want, got := test.expected, string(body) + if want != got { + t.Errorf("unexpected body: want %q, got %q", want, got) + } + } +} + +type apiTestClient struct { + *testing.T + curTest apiTest +} + +type apiTest struct { + do func() (interface{}, error) + inErr error + inRes interface{} + + reqPath string + reqParam url.Values + reqMethod string + res interface{} + err error +} + +func (c *apiTestClient) url(ep string, args map[string]string) *url.URL { + u := &url.URL{ + Host: "test:9090", + Path: apiPrefix + ep, + } + return u +} + +func (c *apiTestClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + + test := c.curTest + + if req.URL.Path != test.reqPath { + c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path) + } + if req.Method != test.reqMethod { + c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method) + } + + b, err := json.Marshal(test.inRes) + if err != nil { + c.Fatal(err) + } + + resp := &http.Response{} + if test.inErr != nil { + resp.StatusCode = statusAPIError + } else { + resp.StatusCode = http.StatusOK + } + + return resp, b, test.inErr +} + +func TestAPIs(t *testing.T) { + + testTime := time.Now() + + client := &apiTestClient{T: t} + + queryApi := &httpQueryAPI{ + client: client, + } + + doQuery := func(q string, ts time.Time) func() (interface{}, error) { + return func() (interface{}, error) { + return queryApi.Query(context.Background(), q, ts) + } + } + + doQueryRange := func(q string, rng Range) func() (interface{}, error) { + return func() (interface{}, error) { + return queryApi.QueryRange(context.Background(), q, rng) + } + } + + queryTests := []apiTest{ + { + do: doQuery("2", testTime), + inRes: &queryResult{ + Type: model.ValScalar, + Result: &model.Scalar{ + Value: 2, + Timestamp: model.TimeFromUnix(testTime.Unix()), + }, + }, + + reqMethod: "GET", + reqPath: "/api/v1/query", + reqParam: url.Values{ + "query": []string{"2"}, + "time": []string{testTime.Format(time.RFC3339Nano)}, + }, + res: &model.Scalar{ + Value: 2, + Timestamp: model.TimeFromUnix(testTime.Unix()), + }, + }, + { + do: doQuery("2", testTime), + inErr: fmt.Errorf("some error"), + + reqMethod: "GET", + reqPath: "/api/v1/query", + reqParam: url.Values{ + "query": []string{"2"}, + "time": []string{testTime.Format(time.RFC3339Nano)}, + }, + err: fmt.Errorf("some error"), + }, + + { + do: doQueryRange("2", Range{ + Start: testTime.Add(-time.Minute), + End: testTime, + Step: time.Minute, + }), + inErr: fmt.Errorf("some error"), + + reqMethod: "GET", + reqPath: "/api/v1/query_range", + reqParam: url.Values{ + "query": []string{"2"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + "step": []string{time.Minute.String()}, + }, + err: fmt.Errorf("some error"), + }, + } + + var tests []apiTest + tests = append(tests, queryTests...) + + for _, test := range tests { + client.curTest = test + + res, err := test.do() + + if test.err != nil { + if err == nil { + t.Errorf("expected error %q but got none", test.err) + continue + } + if err.Error() != test.err.Error() { + t.Errorf("unexpected error: want %s, got %s", test.err, err) + } + continue + } + if err != nil { + t.Errorf("unexpected error: %s", err) + continue + } + + if !reflect.DeepEqual(res, test.res) { + t.Errorf("unexpected result: want %v, got %v", test.res, res) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/AUTHORS.md juju-core-2.0.0/src/github.com/prometheus/client_golang/AUTHORS.md --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/AUTHORS.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/AUTHORS.md 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,18 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Björn Rabenstein + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Bernerd Schaefer +* Björn Rabenstein +* Daniel Bornkessel +* Jeff Younker +* Julius Volz +* Matt T. Proud +* Tobias Schmidt + diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/CHANGELOG.md juju-core-2.0.0/src/github.com/prometheus/client_golang/CHANGELOG.md --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/CHANGELOG.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/CHANGELOG.md 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,86 @@ +## 0.7.0 / 2015-07-27 +* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix. +* [BUGFIX] Closed gaps in metric consistency check. +* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling. +* [ENHANCEMENT] Document the possibility to create "empty" metrics in + a metric vector. +* [ENHANCEMENT] Fix and clarify various doc comments and the README.md. +* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler. +* [ENHANCEMENT] Change responseWriterDelegator.written to int64. + +## 0.6.0 / 2015-06-01 +* [CHANGE] Rename process_goroutines to go_goroutines. +* [ENHANCEMENT] Validate label names during YAML decoding. +* [ENHANCEMENT] Add LabelName regular expression. +* [BUGFIX] Ensure alignment of struct members for 32-bit systems. + +## 0.5.0 / 2015-05-06 +* [BUGFIX] Removed a weakness in the fingerprinting aka signature code. + This makes fingerprinting slower and more allocation-heavy, but the + weakness was too severe to be tolerated. +* [CHANGE] As a result of the above, Metric.Fingerprint is now returning + a different fingerprint. To keep the same fingerprint, the new method + Metric.FastFingerprint was introduced, which will be used by the + Prometheus server for storage purposes (implying that a collision + detection has to be added, too). +* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on + fingerprinting anymore, removing the possibility of an undetected + fingerprint collision. +* [FEATURE] The Go collector in the exposition library includes garbage + collection stats. +* [FEATURE] The exposition library allows to create constant "throw-away" + summaries and histograms. +* [CHANGE] A number of new reserved labels and prefixes. + +## 0.4.0 / 2015-04-08 +* [CHANGE] Return NaN when Summaries have no observations yet. +* [BUGFIX] Properly handle Summary decay upon Write(). +* [BUGFIX] Fix the documentation link to the consumption library. +* [FEATURE] Allow the metric family injection hook to merge with existing + metric families. +* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs. +* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions. + +## 0.3.2 / 2015-03-11 +* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is + only used by the Prometheus server internally. +* [CLEANUP] Added licenses of vendored code left out by godep. + +## 0.3.1 / 2015-03-04 +* [ENHANCEMENT] Switched fingerprinting functions from own free list to + sync.Pool. +* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests). + +## 0.3.0 / 2015-03-03 +* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL + PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS + VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE. +* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was + arguably broken.) +* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If + client_golang is used as a library, the vendoring will stay out of your way. +* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made + the fingerprinting change above necessary.) +* [FEATURE] Added new fingerprinting functions SignatureForLabels and + SignatureWithoutLabels to be used by the Prometheus server. These functions + require fewer allocations than the ones currently used by the server. + +## 0.2.0 / 2015-02-23 +* [FEATURE] Introduce new Histagram metric type. +* [CHANGE] Ignore process collector errors for now (better error handling + pending). +* [CHANGE] Use clear error interface for process pidFn. +* [BUGFIX] Fix Go download links for several archs and OSes. +* [ENHANCEMENT] Massively improve Gauge and Counter performance. +* [ENHANCEMENT] Catch illegal label names for summaries in histograms. +* [ENHANCEMENT] Reduce allocations during fingerprinting. +* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if + both cgo is available and the build is for an OS with procfs. +* [CLEANUP] Clean up code style issues. +* [CLEANUP] Mark slow test as such and exclude them from travis. +* [CLEANUP] Update protobuf library package name. +* [CLEANUP] Updated vendoring of beorn7/perks. + +## 0.1.0 / 2015-02-02 +* [CLEANUP] Introduced semantic versioning and changelog. From now on, + changes will be reported in this file. diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/CONTRIBUTING.md juju-core-2.0.0/src/github.com/prometheus/client_golang/CONTRIBUTING.md --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/CONTRIBUTING.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/CONTRIBUTING.md 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/examples/random/main.go juju-core-2.0.0/src/github.com/prometheus/client_golang/examples/random/main.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/examples/random/main.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/examples/random/main.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,103 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A simple example exposing fictional RPC latencies with different types of +// random distributions (uniform, normal, and exponential) as Prometheus +// metrics. +package main + +import ( + "flag" + "math" + "math/rand" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") + uniformDomain = flag.Float64("uniform.domain", 200, "The domain for the uniform distribution.") + normDomain = flag.Float64("normal.domain", 200, "The domain for the normal distribution.") + normMean = flag.Float64("normal.mean", 10, "The mean for the normal distribution.") + oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.") +) + +var ( + // Create a summary to track fictional interservice RPC latencies for three + // distinct services with different latency distributions. These services are + // differentiated via a "service" label. + rpcDurations = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "rpc_durations_microseconds", + Help: "RPC latency distributions.", + }, + []string{"service"}, + ) + // The same as above, but now as a histogram, and only for the normal + // distribution. The buckets are targeted to the parameters of the + // normal distribution, with 20 buckets centered on the mean, each + // half-sigma wide. + rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "rpc_durations_histogram_microseconds", + Help: "RPC latency distributions.", + Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), + }) +) + +func init() { + // Register the summary and the histogram with Prometheus's default registry. + prometheus.MustRegister(rpcDurations) + prometheus.MustRegister(rpcDurationsHistogram) +} + +func main() { + flag.Parse() + + start := time.Now() + + oscillationFactor := func() float64 { + return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod))) + } + + // Periodically record some sample latencies for the three services. + go func() { + for { + v := rand.Float64() * *uniformDomain + rpcDurations.WithLabelValues("uniform").Observe(v) + time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond) + } + }() + + go func() { + for { + v := (rand.NormFloat64() * *normDomain) + *normMean + rpcDurations.WithLabelValues("normal").Observe(v) + rpcDurationsHistogram.Observe(v) + time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond) + } + }() + + go func() { + for { + v := rand.ExpFloat64() + rpcDurations.WithLabelValues("exponential").Observe(v) + time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond) + } + }() + + // Expose the registered metrics via HTTP. + http.Handle("/metrics", prometheus.Handler()) + http.ListenAndServe(*addr, nil) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/examples/simple/main.go juju-core-2.0.0/src/github.com/prometheus/client_golang/examples/simple/main.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/examples/simple/main.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/examples/simple/main.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,30 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A minimal example of how to include Prometheus instrumentation. +package main + +import ( + "flag" + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") + +func main() { + flag.Parse() + http.Handle("/metrics", prometheus.Handler()) + http.ListenAndServe(*addr, nil) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/.gitignore juju-core-2.0.0/src/github.com/prometheus/client_golang/.gitignore --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/.gitignore 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +*~ +*# +.build diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/LICENSE juju-core-2.0.0/src/github.com/prometheus/client_golang/LICENSE --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/LICENSE 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/NOTICE juju-core-2.0.0/src/github.com/prometheus/client_golang/NOTICE --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/NOTICE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/NOTICE 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,28 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +goautoneg +http://bitbucket.org/ww/goautoneg +Copyright 2011, Open Knowledge Foundation Ltd. +See README.txt for license details. + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,183 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "sync" + "testing" +) + +func BenchmarkCounterWithLabelValues(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } +} + +func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + wg := sync.WaitGroup{} + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + for j := 0; j < b.N/10; j++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } + wg.Done() + }() + } + wg.Wait() +} + +func BenchmarkCounterWithMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc() + } +} + +func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + labels := Labels{"two": "zwei", "one": "eins", "three": "drei"} + for i := 0; i < b.N; i++ { + m.With(labels).Inc() + } +} + +func BenchmarkCounterNoLabels(b *testing.B) { + m := NewCounter(CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Inc() + } +} + +func BenchmarkGaugeWithLabelValues(b *testing.B) { + m := NewGaugeVec( + GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Set(3.1415) + } +} + +func BenchmarkGaugeNoLabels(b *testing.B) { + m := NewGauge(GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Set(3.1415) + } +} + +func BenchmarkSummaryWithLabelValues(b *testing.B) { + m := NewSummaryVec( + SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkSummaryNoLabels(b *testing.B) { + m := NewSummary(SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} + +func BenchmarkHistogramWithLabelValues(b *testing.B) { + m := NewHistogramVec( + HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkHistogramNoLabels(b *testing.B) { + m := NewHistogram(HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/collector.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/collector.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/collector.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/collector.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet. +// +// The stock metrics provided by this package (like Gauge, Counter, Summary) are +// also Collectors (which only ever collect one metric, namely itself). An +// implementer of Collector may, however, collect multiple metrics in a +// coordinated fashion and/or create metrics on the fly. Examples for collectors +// already implemented in this library are the metric vectors (i.e. collection +// of multiple instances of the same Metric but with different label values) +// like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by Prometheus when collecting metrics. The + // implementation sends each collected metric via the provided channel + // and returns once the last metric has been sent. The descriptor of + // each sent metric is one of those returned by Describe. Returned + // metrics that share the same descriptor must differ in their variable + // label values. This method may be called concurrently and must + // therefore be implemented in a concurrency safe way. Blocking occurs + // at the expense of total performance of rendering all registered + // metrics. Ideally, Collector implementations support concurrent + // readers. + Collect(chan<- Metric) +} + +// SelfCollector implements Collector for a single Metric so that that the +// Metric collects itself. Add it as an anonymous field to a struct that +// implements Metric, and call Init with the Metric itself as an argument. +type SelfCollector struct { + self Metric +} + +// Init provides the SelfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *SelfCollector) Init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *SelfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *SelfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/counter.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/counter.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/counter.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/counter.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,173 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Set is used to set the Counter to an arbitrary value. It is only used + // if you have to transfer a value from an external counter into this + // Prometheus metric. Do not use it for regular handling of a + // Prometheus counter (as it can be used to break the contract of + // monotonically increasing values). + Set(float64) + // Inc increments the counter by 1. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result.Init(result) // Init self-collection. + return result +} + +type counter struct { + value +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + c.value.Add(v) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +// +// CounterVec embeds MetricVec. See there for a full list of methods with +// detailed documentation. +type CounterVec struct { + MetricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + newMetric: func(lvs ...string) Metric { + result := &counter{value: value{ + desc: desc, + valType: CounterValue, + labelPairs: makeLabelPairs(desc, lvs), + }} + result.Init(result) // Init self-collection. + return result + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Counter and not a +// Metric so that no type conversion is required. +func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Counter and not a Metric so that no +// type conversion is required. +func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *CounterVec) WithLabelValues(lvs ...string) Counter { + return m.MetricVec.WithLabelValues(lvs...).(Counter) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *CounterVec) With(labels Labels) Counter { + return m.MetricVec.With(labels).(Counter) +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/counter_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/counter_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/counter_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/counter_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,58 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestCounterAdd(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }).(*counter) + counter.Inc() + if expected, got := 1., math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + counter.Add(42) + if expected, got := 43., math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + + if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got { + t.Errorf("Expected error %q, got %q.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := `label: label: counter: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func decreaseCounter(c *counter) (err error) { + defer func() { + if e := recover(); e != nil { + err = e.(error) + } + }() + c.Add(-1) + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/desc.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/desc.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/desc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/desc.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,192 @@ +package prometheus + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +var ( + metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) + labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") +) + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occured during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Opts documentation for the implications of +// constant labels. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !metricNameRE.MatchString(fqName) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} + +func checkLabelName(l string) bool { + return labelNameRE.MatchString(l) && + !strings.HasPrefix(l, reservedLabelPrefix) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/doc.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/doc.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/doc.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,109 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides embeddable metric primitives for servers and +// standardized exposition of telemetry through a web services interface. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// To expose metrics registered with the Prometheus registry, an HTTP server +// needs to know about the Prometheus handler. The usual endpoint is "/metrics". +// +// http.Handle("/metrics", prometheus.Handler()) +// +// As a starting point a very basic usage example: +// +// package main +// +// import ( +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }) +// ) +// +// func init() { +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.Inc() +// +// http.Handle("/metrics", prometheus.Handler()) +// http.ListenAndServe(":8080", nil) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter. +// It also exports some stats about the HTTP usage of the /metrics +// endpoint. (See the Handler function for more detail.) +// +// Two more advanced metric types are the Summary and Histogram. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, and +// Histogram, a very important part of the Prometheus data model is the +// partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// and HistogramVec. +// +// Those are all the parts needed for basic usage. Detailed documentation and +// examples are provided below. +// +// Everything else this package offers is essentially for "power users" only. A +// few pointers to "power user features": +// +// All the various ...Opts structs have a ConstLabels field for labels that +// never change their value (which is only useful under special circumstances, +// see documentation of the Opts type). +// +// The Untyped metric behaves like a Gauge, but signals the Prometheus server +// not to assume anything about its type. +// +// Functions to fine-tune how the metric registry works: EnableCollectChecks, +// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook. +// +// For custom metric collection, there are two entry points: Custom Metric +// implementations and custom Collector implementations. A Metric is the +// fundamental unit in the Prometheus data model: a sample at a point in time +// together with its meta-data (like its fully-qualified name and any number of +// pairs of label name and label value) that knows how to marshal itself into a +// data transfer object (aka DTO, implemented as a protocol buffer). A Collector +// gets registered with the Prometheus registry and manages the collection of +// one or more Metrics. Many parts of this package are building blocks for +// Metrics and Collectors. Desc is the metric descriptor, actually used by all +// metrics under the hood, and by Collectors to describe the Metrics to be +// collected, but only to be dealt with by users if they implement their own +// Metrics or Collectors. To create a Desc, the BuildFQName function will come +// in handy. Other useful components for Metric and Collector implementation +// include: LabelPairSorter to sort the DTO version of label pairs, +// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at +// collection time, MetricVec to bundle custom Metrics into a metric vector +// Collector, SelfCollector to make a custom Metric collect itself. +// +// A good example for a custom Collector is the ExpVarCollector included in this +// package, which exports variables exported via the "expvar" package as +// Prometheus metrics. +package prometheus diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,130 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +// ClusterManager is an example for a system that might have been built without +// Prometheus in mind. It models a central manager of jobs running in a +// cluster. To turn it into something that collects Prometheus metrics, we +// simply add the two methods required for the Collector interface. +// +// An additional challenge is that multiple instances of the ClusterManager are +// run within the same binary, each in charge of a different zone. We need to +// make use of ConstLabels to be able to register each ClusterManager instance +// with Prometheus. +type ClusterManager struct { + Zone string + OOMCount *prometheus.CounterVec + RAMUsage *prometheus.GaugeVec + mtx sync.Mutex // Protects OOMCount and RAMUsage. + // ... many more fields +} + +// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a +// real cluster manager would have to do. Since it may actually be really +// expensive, it must only be called once per collection. This implementation, +// obviously, only returns some made-up data. +func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( + oomCountByHost map[string]int, ramUsageByHost map[string]float64, +) { + // Just example fake data. + oomCountByHost = map[string]int{ + "foo.example.org": 42, + "bar.example.org": 2001, + } + ramUsageByHost = map[string]float64{ + "foo.example.org": 6.023e23, + "bar.example.org": 3.14, + } + return +} + +// Describe faces the interesting challenge that the two metric vectors that are +// used in this example are already Collectors themselves. However, thanks to +// the use of channels, it is really easy to "chain" Collectors. Here we simply +// call the Describe methods of the two metric vectors. +func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { + c.OOMCount.Describe(ch) + c.RAMUsage.Describe(ch) +} + +// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it +// sets the retrieved values in the two metric vectors and then sends all their +// metrics to the channel (again using a chaining technique as in the Describe +// method). Since Collect could be called multiple times concurrently, that part +// is protected by a mutex. +func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { + oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() + c.mtx.Lock() + defer c.mtx.Unlock() + for host, oomCount := range oomCountByHost { + c.OOMCount.WithLabelValues(host).Set(float64(oomCount)) + } + for host, ramUsage := range ramUsageByHost { + c.RAMUsage.WithLabelValues(host).Set(ramUsage) + } + c.OOMCount.Collect(ch) + c.RAMUsage.Collect(ch) + // All metrics in OOMCount and RAMUsage are sent to the channel now. We + // can safely reset the two metric vectors now, so that we can start + // fresh in the next Collect cycle. (Imagine a host disappears from the + // cluster. If we did not reset here, its Metric would stay in the + // metric vectors forever.) + c.OOMCount.Reset() + c.RAMUsage.Reset() +} + +// NewClusterManager creates the two metric vectors OOMCount and RAMUsage. Note +// that the zone is set as a ConstLabel. (It's different in each instance of the +// ClusterManager, but constant over the lifetime of an instance.) The reported +// values are partitioned by host, which is therefore a variable label. +func NewClusterManager(zone string) *ClusterManager { + return &ClusterManager{ + Zone: zone, + OOMCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "clustermanager", + Name: "oom_count", + Help: "number of OOM crashes", + ConstLabels: prometheus.Labels{"zone": zone}, + }, + []string{"host"}, + ), + RAMUsage: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Subsystem: "clustermanager", + Name: "ram_usage_bytes", + Help: "RAM usage as reported to the cluster manager", + ConstLabels: prometheus.Labels{"zone": zone}, + }, + []string{"host"}, + ), + } +} + +func ExampleCollector_clustermanager() { + workerDB := NewClusterManager("db") + workerCA := NewClusterManager("ca") + prometheus.MustRegister(workerDB) + prometheus.MustRegister(workerCA) + + // Since we are dealing with custom Collector implementations, it might + // be a good idea to enable the collect checks in the registry. + prometheus.EnableCollectChecks(true) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/example_memstats_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,87 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "runtime" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + allocDesc = prometheus.NewDesc( + prometheus.BuildFQName("", "memstats", "alloc_bytes"), + "bytes allocated and still in use", + nil, nil, + ) + totalAllocDesc = prometheus.NewDesc( + prometheus.BuildFQName("", "memstats", "total_alloc_bytes"), + "bytes allocated (even if freed)", + nil, nil, + ) + numGCDesc = prometheus.NewDesc( + prometheus.BuildFQName("", "memstats", "num_gc_total"), + "number of GCs run", + nil, nil, + ) +) + +// MemStatsCollector is an example for a custom Collector that solves the +// problem of feeding into multiple metrics at the same time. The +// runtime.ReadMemStats should happen only once, and then the results need to be +// fed into a number of separate Metrics. In this example, only a few of the +// values reported by ReadMemStats are used. For each, there is a Desc provided +// as a var, so the MemStatsCollector itself needs nothing else in the +// struct. Only the methods need to be implemented. +type MemStatsCollector struct{} + +// Describe just sends the three Desc objects for the Metrics we intend to +// collect. +func (_ MemStatsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- allocDesc + ch <- totalAllocDesc + ch <- numGCDesc +} + +// Collect does the trick by calling ReadMemStats once and then constructing +// three different Metrics on the fly. +func (_ MemStatsCollector) Collect(ch chan<- prometheus.Metric) { + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + ch <- prometheus.MustNewConstMetric( + allocDesc, + prometheus.GaugeValue, + float64(ms.Alloc), + ) + ch <- prometheus.MustNewConstMetric( + totalAllocDesc, + prometheus.GaugeValue, + float64(ms.TotalAlloc), + ) + ch <- prometheus.MustNewConstMetric( + numGCDesc, + prometheus.CounterValue, + float64(ms.NumGC), + ) + // To avoid new allocations on each collection, you could also keep + // metric objects around and return the same objects each time, just + // with new values set. +} + +func ExampleCollector_memstats() { + prometheus.MustRegister(&MemStatsCollector{}) + // Since we are dealing with custom Collector implementations, it might + // be a good idea to enable the collect checks in the registry. + prometheus.EnableCollectChecks(true) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/example_selfcollector_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,69 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "runtime" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +func NewCallbackMetric(desc *prometheus.Desc, callback func() float64) *CallbackMetric { + result := &CallbackMetric{desc: desc, callback: callback} + result.Init(result) // Initialize the SelfCollector. + return result +} + +// TODO: Come up with a better example. + +// CallbackMetric is an example for a user-defined Metric that exports the +// result of a function call as a metric of type "untyped" without any +// labels. It uses SelfCollector to turn the Metric into a Collector so that it +// can be registered with Prometheus. +// +// Note that this example is pretty much academic as the prometheus package +// already provides an UntypedFunc type. +type CallbackMetric struct { + prometheus.SelfCollector + + desc *prometheus.Desc + callback func() float64 +} + +func (cm *CallbackMetric) Desc() *prometheus.Desc { + return cm.desc +} + +func (cm *CallbackMetric) Write(m *dto.Metric) error { + m.Untyped = &dto.Untyped{Value: proto.Float64(cm.callback())} + return nil +} + +func ExampleSelfCollector() { + m := NewCallbackMetric( + prometheus.NewDesc( + "runtime_goroutines_count", + "Total number of goroutines that currently exist.", + nil, nil, // No labels, these must be nil. + ), + func() float64 { + return float64(runtime.NumGoroutine()) + }, + ) + prometheus.MustRegister(m) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/examples_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/examples_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/examples_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/examples_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,640 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "fmt" + "math" + "net/http" + "os" + "runtime" + "sort" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleGauge() { + opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed.", + }) + prometheus.MustRegister(opsQueued) + + // 10 operations queued by the goroutine managing incoming requests. + opsQueued.Add(10) + // A worker goroutine has picked up a waiting operation. + opsQueued.Dec() + // And once more... + opsQueued.Dec() +} + +func ExampleGaugeVec() { + opsQueued := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", + }, + []string{ + // Which user has requested the operation? + "user", + // Of what type is the operation? + "type", + }, + ) + prometheus.MustRegister(opsQueued) + + // Increase a value using compact (but order-sensitive!) WithLabelValues(). + opsQueued.WithLabelValues("bob", "put").Add(4) + // Increase a value with a map using WithLabels. More verbose, but order + // doesn't matter anymore. + opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc() +} + +func ExampleGaugeFunc() { + if err := prometheus.Register(prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Subsystem: "runtime", + Name: "goroutines_count", + Help: "Number of goroutines that currently exist.", + }, + func() float64 { return float64(runtime.NumGoroutine()) }, + )); err == nil { + fmt.Println("GaugeFunc 'goroutines_count' registered.") + } + // Note that the count of goroutines is a gauge (and not a counter) as + // it can go up and down. + + // Output: + // GaugeFunc 'goroutines_count' registered. +} + +func ExampleCounter() { + pushCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", // Note: No help string... + }) + err := prometheus.Register(pushCounter) // ... so this will return an error. + if err != nil { + fmt.Println("Push counter couldn't be registered, no counting will happen:", err) + return + } + + // Try it once more, this time with a help string. + pushCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", + Help: "Number of pushes to external repository.", + }) + err = prometheus.Register(pushCounter) + if err != nil { + fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err) + return + } + + pushComplete := make(chan struct{}) + // TODO: Start a goroutine that performs repository pushes and reports + // each completion via the channel. + for _ = range pushComplete { + pushCounter.Inc() + } + // Output: + // Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string +} + +func ExampleCounterVec() { + httpReqs := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", + }, + []string{"code", "method"}, + ) + prometheus.MustRegister(httpReqs) + + httpReqs.WithLabelValues("404", "POST").Add(42) + + // If you have to access the same set of labels very frequently, it + // might be good to retrieve the metric only once and keep a handle to + // it. But beware of deletion of that metric, see below! + m := httpReqs.WithLabelValues("200", "GET") + for i := 0; i < 1000000; i++ { + m.Inc() + } + // Delete a metric from the vector. If you have previously kept a handle + // to that metric (as above), future updates via that handle will go + // unseen (even if you re-create a metric with the same label set + // later). + httpReqs.DeleteLabelValues("200", "GET") + // Same thing with the more verbose Labels syntax. + httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"}) +} + +func ExampleInstrumentHandler() { + // Handle the "/doc" endpoint with the standard http.FileServer handler. + // By wrapping the handler with InstrumentHandler, request count, + // request and response sizes, and request latency are automatically + // exported to Prometheus, partitioned by HTTP status code and method + // and by the handler name (here "fileserver"). + http.Handle("/doc", prometheus.InstrumentHandler( + "fileserver", http.FileServer(http.Dir("/usr/share/doc")), + )) + // The Prometheus handler still has to be registered to handle the + // "/metrics" endpoint. The handler returned by prometheus.Handler() is + // already instrumented - with "prometheus" as the handler name. In this + // example, we want the handler name to be "metrics", so we instrument + // the uninstrumented Prometheus handler ourselves. + http.Handle("/metrics", prometheus.InstrumentHandler( + "metrics", prometheus.UninstrumentedHandler(), + )) +} + +func ExampleLabelPairSorter() { + labelPairs := []*dto.LabelPair{ + &dto.LabelPair{Name: proto.String("status"), Value: proto.String("404")}, + &dto.LabelPair{Name: proto.String("method"), Value: proto.String("get")}, + } + + sort.Sort(prometheus.LabelPairSorter(labelPairs)) + + fmt.Println(labelPairs) + // Output: + // [name:"method" value:"get" name:"status" value:"404" ] +} + +func ExampleRegister() { + // Imagine you have a worker pool and want to count the tasks completed. + taskCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }) + // This will register fine. + if err := prometheus.Register(taskCounter); err != nil { + fmt.Println(err) + } else { + fmt.Println("taskCounter registered.") + } + // Don't forget to tell the HTTP server about the Prometheus handler. + // (In a real program, you still need to start the HTTP server...) + http.Handle("/metrics", prometheus.Handler()) + + // Now you can start workers and give every one of them a pointer to + // taskCounter and let it increment it whenever it completes a task. + taskCounter.Inc() // This has to happen somewhere in the worker code. + + // But wait, you want to see how individual workers perform. So you need + // a vector of counters, with one element for each worker. + taskCounterVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + + // Registering will fail because we already have a metric of that name. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + + // To fix, first unregister the old taskCounter. + if prometheus.Unregister(taskCounter) { + fmt.Println("taskCounter unregistered.") + } + + // Try registering taskCounterVec again. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Bummer! Still doesn't work. + + // Prometheus will not allow you to ever export metrics with + // inconsistent help strings or label names. After unregistering, the + // unregistered metrics will cease to show up in the /metrics HTTP + // response, but the registry still remembers that those metrics had + // been exported before. For this example, we will now choose a + // different name. (In a real program, you would obviously not export + // the obsolete metric in the first place.) + taskCounterVec = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_by_id", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Finally it worked! + + // The workers have to tell taskCounterVec their id to increment the + // right element in the metric vector. + taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42. + + // Each worker could also keep a reference to their own counter element + // around. Pick the counter at initialization time of the worker. + myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code. + myCounter.Inc() // Somewhere in the code of that worker. + + // Note that something like WithLabelValues("42", "spurious arg") would + // panic (because you have provided too many label values). If you want + // to get an error instead, use GetMetricWithLabelValues(...) instead. + notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg") + if err != nil { + fmt.Println("Worker initialization failed:", err) + } + if notMyCounter == nil { + fmt.Println("notMyCounter is nil.") + } + + // A different (and somewhat tricky) approach is to use + // ConstLabels. ConstLabels are pairs of label names and label values + // that never change. You might ask what those labels are good for (and + // rightfully so - if they never change, they could as well be part of + // the metric name). There are essentially two use-cases: The first is + // if labels are constant throughout the lifetime of a binary execution, + // but they vary over time or between different instances of a running + // binary. The second is what we have here: Each worker creates and + // registers an own Counter instance where the only difference is in the + // value of the ConstLabels. Those Counters can all be registered + // because the different ConstLabel values guarantee that each worker + // will increment a different Counter metric. + counterOpts := prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks", + Help: "Total number of tasks completed.", + ConstLabels: prometheus.Labels{"worker_id": "42"}, + } + taskCounterForWorker42 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker42); err != nil { + fmt.Println("taskCounterVForWorker42 not registered:", err) + } else { + fmt.Println("taskCounterForWorker42 registered.") + } + // Obviously, in real code, taskCounterForWorker42 would be a member + // variable of a worker struct, and the "42" would be retrieved with a + // GetId() method or something. The Counter would be created and + // registered in the initialization code of the worker. + + // For the creation of the next Counter, we can recycle + // counterOpts. Just change the ConstLabels. + counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"} + taskCounterForWorker2001 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker2001); err != nil { + fmt.Println("taskCounterVForWorker2001 not registered:", err) + } else { + fmt.Println("taskCounterForWorker2001 registered.") + } + + taskCounterForWorker2001.Inc() + taskCounterForWorker42.Inc() + taskCounterForWorker2001.Inc() + + // Yet another approach would be to turn the workers themselves into + // Collectors and register them. See the Collector example for details. + + // Output: + // taskCounter registered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounter unregistered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounterVec registered. + // Worker initialization failed: inconsistent label cardinality + // notMyCounter is nil. + // taskCounterForWorker42 registered. + // taskCounterForWorker2001 registered. +} + +func ExampleSummary() { + temps := prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > +} + +func ExampleSummaryVec() { + temps := prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + }, + []string{"species"}, + ) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) + } + + // Create a Summary without any observations. + temps.WithLabelValues("leiopelma-hochstetteri") + + // Just for demonstration, let's check the state of the summary vector + // by (ab)using its Collect method and the Write method of its elements + // (which is usually only used by Prometheus internally - code like the + // following will never appear in your own code). + metricChan := make(chan prometheus.Metric) + go func() { + defer close(metricChan) + temps.Collect(metricChan) + }() + + metricStrings := []string{} + for metric := range metricChan { + dtoMetric := &dto.Metric{} + metric.Write(dtoMetric) + metricStrings = append(metricStrings, proto.MarshalTextString(dtoMetric)) + } + sort.Strings(metricStrings) // For reproducible print order. + fmt.Println(metricStrings) + + // Output: + // [label: < + // name: "species" + // value: "leiopelma-hochstetteri" + // > + // summary: < + // sample_count: 0 + // sample_sum: 0 + // quantile: < + // quantile: 0.5 + // value: nan + // > + // quantile: < + // quantile: 0.9 + // value: nan + // > + // quantile: < + // quantile: 0.99 + // value: nan + // > + // > + // label: < + // name: "species" + // value: "lithobates-catesbeianus" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 31956.100000000017 + // quantile: < + // quantile: 0.5 + // value: 32.4 + // > + // quantile: < + // quantile: 0.9 + // value: 41.4 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // label: < + // name: "species" + // value: "litoria-caerulea" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // ] +} + +func ExampleConstSummary() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A summary of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant summary from values we got from a 3rd party telemetry system. + s := prometheus.MustNewConstSummary( + desc, + 4711, 403.34, + map[float64]float64{0.5: 42.3, 0.9: 323.3}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + s.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // summary: < + // sample_count: 4711 + // sample_sum: 403.34 + // quantile: < + // quantile: 0.5 + // value: 42.3 + // > + // quantile: < + // quantile: 0.9 + // value: 323.3 + // > + // > +} + +func ExampleHistogram() { + temps := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide. + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // histogram: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // bucket: < + // cumulative_count: 192 + // upper_bound: 20 + // > + // bucket: < + // cumulative_count: 366 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 501 + // upper_bound: 30 + // > + // bucket: < + // cumulative_count: 638 + // upper_bound: 35 + // > + // bucket: < + // cumulative_count: 816 + // upper_bound: 40 + // > + // > +} + +func ExampleConstHistogram() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A histogram of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant histogram from values we got from a 3rd party telemetry system. + h := prometheus.MustNewConstHistogram( + desc, + 4711, 403.34, + map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + h.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // histogram: < + // sample_count: 4711 + // sample_sum: 403.34 + // bucket: < + // cumulative_count: 121 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 2403 + // upper_bound: 50 + // > + // bucket: < + // cumulative_count: 3221 + // upper_bound: 100 + // > + // bucket: < + // cumulative_count: 4233 + // upper_bound: 200 + // > + // > +} + +func ExamplePushCollectors() { + hostname, _ := os.Hostname() + completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_completion_time", + Help: "The timestamp of the last succesful completion of a DB backup.", + }) + completionTime.Set(float64(time.Now().Unix())) + if err := prometheus.PushCollectors( + "db_backup", hostname, + "http://pushgateway:9091", + completionTime, + ); err != nil { + fmt.Println("Could not push completion time to Pushgateway:", err) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/expvar.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/expvar.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/expvar.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/expvar.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +// ExpvarCollector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the ExpvarCollector is inherently +// slow. Thus, the ExpvarCollector is probably great for experiments and +// prototying, but you should seriously consider a more direct implementation of +// Prometheus metrics for monitoring production systems. +// +// Use NewExpvarCollector to create new instances. +type ExpvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated ExpvarCollector that still has +// to be registered with the Prometheus registry. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector { + return &ExpvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *ExpvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *ExpvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/expvar_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/expvar_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/expvar_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/expvar_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,97 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "expvar" + "fmt" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleExpvarCollector() { + expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ + "memstats": prometheus.NewDesc( + "expvar_memstats", + "All numeric memstats as one metric family. Not a good role-model, actually... ;-)", + []string{"type"}, nil, + ), + "lone-int": prometheus.NewDesc( + "expvar_lone_int", + "Just an expvar int as an example.", + nil, nil, + ), + "http-request-map": prometheus.NewDesc( + "expvar_http_request_total", + "How many http requests processed, partitioned by status code and http method.", + []string{"code", "method"}, nil, + ), + }) + prometheus.MustRegister(expvarCollector) + + // The Prometheus part is done here. But to show that this example is + // doing anything, we have to manually export something via expvar. In + // real-life use-cases, some library would already have exported via + // expvar what we want to re-export as Prometheus metrics. + expvar.NewInt("lone-int").Set(42) + expvarMap := expvar.NewMap("http-request-map") + var ( + expvarMap1, expvarMap2 expvar.Map + expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int + ) + expvarMap1.Init() + expvarMap2.Init() + expvarInt11.Set(3) + expvarInt12.Set(13) + expvarInt21.Set(11) + expvarInt22.Set(212) + expvarMap1.Set("POST", &expvarInt11) + expvarMap1.Set("GET", &expvarInt12) + expvarMap2.Set("POST", &expvarInt21) + expvarMap2.Set("GET", &expvarInt22) + expvarMap.Set("404", &expvarMap1) + expvarMap.Set("200", &expvarMap2) + // Results in the following expvar map: + // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}} + + // Let's see what the scrape would yield, but exclude the memstats metrics. + metricStrings := []string{} + metric := dto.Metric{} + metricChan := make(chan prometheus.Metric) + go func() { + expvarCollector.Collect(metricChan) + close(metricChan) + }() + for m := range metricChan { + if strings.Index(m.Desc().String(), "expvar_memstats") == -1 { + metric.Reset() + m.Write(&metric) + metricStrings = append(metricStrings, metric.String()) + } + } + sort.Strings(metricStrings) + for _, s := range metricStrings { + fmt.Println(strings.TrimRight(s, " ")) + } + // Output: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // untyped: +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/fnv.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/fnv.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/fnv.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/fnv.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,29 @@ +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/gauge.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/gauge.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/gauge.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/gauge.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. + Inc() + // Dec decrements the Gauge by 1. + Dec() + // Add adds the given value to the Gauge. (The value can be + // negative, resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +func NewGauge(opts GaugeOpts) Gauge { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, 0) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + MetricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + newMetric: func(lvs ...string) Metric { + return newValue(desc, GaugeValue, 0, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Gauge and not a +// Metric so that no type conversion is required. +func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Gauge and not a Metric so that no +// type conversion is required. +func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { + return m.MetricVec.WithLabelValues(lvs...).(Gauge) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *GaugeVec) With(labels Labels) Gauge { + return m.MetricVec.With(labels).(Gauge) +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/gauge_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/gauge_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/gauge_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/gauge_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,182 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sync" + "testing" + "testing/quick" + + dto "github.com/prometheus/client_model/go" +) + +func listenGaugeStream(vals, result chan float64, done chan struct{}) { + var sum float64 +outer: + for { + select { + case <-done: + close(vals) + for v := range vals { + sum += v + } + break outer + case v := <-vals: + sum += v + } + } + result <- sum + close(result) +} + +func TestGaugeConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStream := make(chan float64, mutations*concLevel) + result := make(chan float64) + done := make(chan struct{}) + + go listenGaugeStream(sStream, result, done) + go func() { + end.Wait() + close(done) + }() + + gge := NewGauge(GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sStream <- v + gge.Add(v) + } + end.Done() + }(vals) + } + start.Done() + + if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeVecConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + vecLength := int(n%5 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStreams := make([]chan float64, vecLength) + results := make([]chan float64, vecLength) + done := make(chan struct{}) + + for i := 0; i < vecLength; i++ { + sStreams[i] = make(chan float64, mutations*concLevel) + results[i] = make(chan float64) + go listenGaugeStream(sStreams[i], results[i], done) + } + + go func() { + end.Wait() + close(done) + }() + + gge := NewGaugeVec( + GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }, + []string{"label"}, + ) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + pick := make([]int, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + pick[j] = rand.Intn(vecLength) + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sStreams[pick[i]] <- v + gge.WithLabelValues(string('A' + pick[i])).Add(v) + } + end.Done() + }(vals) + } + start.Done() + + for i := range sStreams { + if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeFunc(t *testing.T) { + gf := NewGaugeFunc( + GaugeOpts{ + Name: "test_name", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }, + func() float64 { return 3.1415 }, + ) + + if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } + + m := &dto.Metric{} + gf.Write(m) + + if expected, got := `label: label: gauge: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/.gitignore juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/.gitignore --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/.gitignore 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1 @@ +command-line-arguments.test diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/go_collector.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/go_collector.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/go_collector.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/go_collector.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,263 @@ +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutines Gauge + gcDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() *goCollector { + return &goCollector{ + goroutines: NewGauge(GaugeOpts{ + Namespace: "go", + Name: "goroutines", + Help: "Number of goroutines that currently exist.", + }), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained by system. Sum of all system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes_total"), + "Total number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutines.Desc() + ch <- c.gcDesc + + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + c.goroutines.Set(float64(runtime.NumGoroutine())) + ch <- c.goroutines + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,123 @@ +package prometheus + +import ( + "runtime" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func TestGoCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + old = -1 + ) + defer close(closec) + + go func() { + c.Collect(ch) + go func(c <-chan struct{}) { + <-c + }(closec) + <-waitc + c.Collect(ch) + }() + + for { + select { + case metric := <-ch: + switch m := metric.(type) { + // Attention, this also catches Counter... + case Gauge: + pb := &dto.Metric{} + m.Write(pb) + if pb.GetGauge() == nil { + continue + } + + if old == -1 { + old = int(pb.GetGauge().GetValue()) + close(waitc) + continue + } + + if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { + // TODO: This is flaky in highly concurrent situations. + t.Errorf("want 1 new goroutine, got %d", diff) + } + + // GoCollector performs two sends per call. + // On line 27 we need to receive the second send + // to shut down cleanly. + <-ch + return + } + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} + +func TestGCCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + oldGC uint64 + oldPause float64 + ) + defer close(closec) + + go func() { + c.Collect(ch) + // force GC + runtime.GC() + <-waitc + c.Collect(ch) + }() + + first := true + for { + select { + case metric := <-ch: + switch m := metric.(type) { + case *constSummary, *value: + pb := &dto.Metric{} + m.Write(pb) + if pb.GetSummary() == nil { + continue + } + + if len(pb.GetSummary().Quantile) != 5 { + t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile)) + } + for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} { + if *pb.GetSummary().Quantile[idx].Quantile != want { + t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want) + } + } + if first { + first = false + oldGC = *pb.GetSummary().SampleCount + oldPause = *pb.GetSummary().SampleSum + close(waitc) + continue + } + if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 { + t.Errorf("want 1 new garbage collection run, got %d", diff) + } + if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 { + t.Errorf("want moar pause, got %f", diff) + } + return + } + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/histogram.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/histogram.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/histogram.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/histogram.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,448 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +var ( + // DefBuckets are the default Histogram buckets. The default buckets are + // tailored to broadly measure the response time (in seconds) of a + // network service. Most likely, however, you will be required to define + // buckets customized to your use case. + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Histogram. Histograms with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // HistogramVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Histograms with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.Init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + SelfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + MetricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + newMetric: func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Histogram and not a +// Metric so that no type conversion is required. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Histogram and not a Metric so that no +// type conversion is required. +func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { + return m.MetricVec.WithLabelValues(lvs...).(Histogram) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *HistogramVec) With(labels Labels) Histogram { + return m.MetricVec.With(labels).(Histogram) +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/histogram_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/histogram_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/histogram_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/histogram_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,326 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "reflect" + "sort" + "sync" + "testing" + "testing/quick" + + dto "github.com/prometheus/client_model/go" +) + +func benchmarkHistogramObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramObserve1(b *testing.B) { + benchmarkHistogramObserve(1, b) +} + +func BenchmarkHistogramObserve2(b *testing.B) { + benchmarkHistogramObserve(2, b) +} + +func BenchmarkHistogramObserve4(b *testing.B) { + benchmarkHistogramObserve(4, b) +} + +func BenchmarkHistogramObserve8(b *testing.B) { + benchmarkHistogramObserve(8, b) +} + +func benchmarkHistogramWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramWrite1(b *testing.B) { + benchmarkHistogramWrite(1, b) +} + +func BenchmarkHistogramWrite2(b *testing.B) { + benchmarkHistogramWrite(2, b) +} + +func BenchmarkHistogramWrite4(b *testing.B) { + benchmarkHistogramWrite(4, b) +} + +func BenchmarkHistogramWrite8(b *testing.B) { + benchmarkHistogramWrite(8, b) +} + +// Intentionally adding +Inf here to test if that case is handled correctly. +// Also, getCumulativeCounts depends on it. +var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)} + +func TestHistogramConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewHistogram(HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: testBuckets, + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Histogram.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + for i, wantBound := range testBuckets { + if i == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestHistogramVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + his := NewHistogramVec( + HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}, + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + his.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := his.WithLabelValues(string('A' + i)) + s.Write(m) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars[i]) + + for j, wantBound := range testBuckets { + if j == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func getCumulativeCounts(vars []float64) []uint64 { + counts := make([]uint64, len(testBuckets)) + for _, v := range vars { + for i := len(testBuckets) - 1; i >= 0; i-- { + if v > testBuckets[i] { + break + } + counts[i]++ + } + } + return counts +} + +func TestBuckets(t *testing.T) { + got := LinearBuckets(-15, 5, 6) + want := []float64{-15, -10, -5, 0, 5, 10} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } + + got = ExponentialBuckets(100, 1.2, 3) + want = []float64{100, 120, 144} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/http.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/http.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/http.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/http.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,361 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "io" + "net" + "net/http" + "strconv" + "strings" + "time" +) + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler but provides more +// flexibility (at the cost of a more complex call syntax). As +// InstrumentHandler, this function registers four metric collectors, but it +// uses the provided SummaryOpts to create them. However, the fields "Name" and +// "Help" in the SummaryOpts are ignored. "Name" is replaced by +// "requests_total", "request_duration_microseconds", "request_size_bytes", and +// "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides +// more flexibility (at the cost of a more complex call syntax). See +// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + + regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) + regReqDur := MustRegisterOrGet(reqDur).(Summary) + regReqSz := MustRegisterOrGet(reqSz).(Summary) + regResSz := MustRegisterOrGet(resSz).(Summary) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := make(chan int) + urlLen := 0 + if r.URL != nil { + urlLen = len(r.URL.String()) + } + go computeApproximateRequestSize(r, out, urlLen) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + regReqCnt.WithLabelValues(method, code).Inc() + regReqDur.Observe(elapsed) + regResSz.Observe(float64(delegate.written)) + regReqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request, out chan int, s int) { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/http_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/http_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/http_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/http_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,121 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +type respBody string + +func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTeapot) + w.Write([]byte(b)) +} + +func TestInstrumentHandler(t *testing.T) { + defer func(n nower) { + now = n.(nower) + }(now) + + instant := time.Now() + end := instant.Add(30 * time.Second) + now = nowSeries(instant, end) + respBody := respBody("Howdy there!") + + hndlr := InstrumentHandler("test-handler", respBody) + + opts := SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": "test-handler"}, + } + + reqCnt := MustRegisterOrGet(NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + )).(*CounterVec) + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := MustRegisterOrGet(NewSummary(opts)).(Summary) + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + MustRegisterOrGet(NewSummary(opts)) + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + MustRegisterOrGet(NewSummary(opts)) + + reqCnt.Reset() + + resp := httptest.NewRecorder() + req := &http.Request{ + Method: "GET", + } + + hndlr.ServeHTTP(resp, req) + + if resp.Code != http.StatusTeapot { + t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code) + } + if string(resp.Body.Bytes()) != "Howdy there!" { + t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes())) + } + + out := &dto.Metric{} + reqDur.Write(out) + if want, got := "test-handler", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqDur, got %q", want, got) + } + if want, got := uint64(1), out.Summary.GetSampleCount(); want != got { + t.Errorf("want sample count %d in reqDur, got %d", want, got) + } + + out.Reset() + if want, got := 1, len(reqCnt.children); want != got { + t.Errorf("want %d children in reqCnt, got %d", want, got) + } + cnt, err := reqCnt.GetMetricWithLabelValues("get", "418") + if err != nil { + t.Fatal(err) + } + cnt.Write(out) + if want, got := "418", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "test-handler", out.Label[1].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "get", out.Label[2].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if out.Counter == nil { + t.Fatal("expected non-nil counter in reqCnt") + } + if want, got := 1., out.Counter.GetValue(); want != got { + t.Errorf("want reqCnt of %f, got %f", want, got) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/metric.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/metric.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/metric.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/metric.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,166 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementers of Metric in this package inclued Gauge, Counter, +// Untyped, and Summary. Users can implement their own Metric types, but that +// should be rarely needed. See the example for SelfCollector, which is also an +// example for a user-implemented Metric. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Implementers of custom Metric types must observe concurrency safety + // as reads of this metric may occur at any time, and any blocking + // occurs at the expense of total performance of rendering all + // registered metrics. Ideally Metric implementations should support + // concurrent readers. + // + // The Prometheus client library attempts to minimize memory allocations + // and will provide a pre-existing reset dto.Metric pointer. Prometheus + // may recycle the dto.Metric proto message, so Metric implementations + // should just populate the provided dto.Metric and then should not keep + // any reference to it. + // + // While populating dto.Metric, labels must be sorted lexicographically. + // (Implementers may find LabelPairSorter useful for that.) + Write(*dto.Metric) error +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a metric + // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels + // serve only special purposes. One is for the special case where the + // value of a label does not change during the lifetime of a process, + // e.g. if the revision of the running binary is put into a + // label. Another, more advanced purpose is if more than one Collector + // needs to collect Metrics with the same fully-qualified name. In that + // case, those Metrics must differ in the values of their + // ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/metric_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/metric_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/metric_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/metric_test.go 2016-10-13 14:32:22.000000000 +0000 @@ -0,0 +1,35 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "testing" + +func TestBuildFQName(t *testing.T) { + scenarios := []struct{ namespace, subsystem, name, result string }{ + {"a", "b", "c", "a_b_c"}, + {"", "b", "c", "b_c"}, + {"a", "", "c", "a_c"}, + {"", "", "c", "c"}, + {"a", "b", "", ""}, + {"a", "", "", ""}, + {"", "b", "", ""}, + {" ", "", "", ""}, + } + + for i, s := range scenarios { + if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got { + t.Errorf("%d. want %s, got %s", i, want, got) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/process_collector.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/process_collector.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/process_collector.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/process_collector.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,142 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + pid int + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal Counter + openFDs, maxFDs Gauge + vsize, rss Gauge + startTime Gauge +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) *processCollector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) *processCollector { + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewCounter(CounterOpts{ + Namespace: namespace, + Name: "process_cpu_seconds_total", + Help: "Total user and system CPU time spent in seconds.", + }), + openFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_open_fds", + Help: "Number of open file descriptors.", + }), + maxFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_max_fds", + Help: "Maximum number of open file descriptors.", + }), + vsize: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_virtual_memory_bytes", + Help: "Virtual memory size in bytes.", + }), + rss: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_resident_memory_bytes", + Help: "Resident memory size in bytes.", + }), + startTime: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + }), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal.Desc() + ch <- c.openFDs.Desc() + ch <- c.maxFDs.Desc() + ch <- c.vsize.Desc() + ch <- c.rss.Desc() + ch <- c.startTime.Desc() +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + c.cpuTotal.Set(stat.CPUTime()) + ch <- c.cpuTotal + c.vsize.Set(float64(stat.VirtualMemory())) + ch <- c.vsize + c.rss.Set(float64(stat.ResidentMemory())) + ch <- c.rss + + if startTime, err := stat.StartTime(); err == nil { + c.startTime.Set(startTime) + ch <- c.startTime + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + c.openFDs.Set(float64(fds)) + ch <- c.openFDs + } + + if limits, err := p.NewLimits(); err == nil { + c.maxFDs.Set(float64(limits.OpenFiles)) + ch <- c.maxFDs + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,54 @@ +package prometheus + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "regexp" + "testing" + + "github.com/prometheus/procfs" +) + +func TestProcessCollector(t *testing.T) { + if _, err := procfs.Self(); err != nil { + t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) + } + + registry := newRegistry() + registry.Register(NewProcessCollector(os.Getpid(), "")) + registry.Register(NewProcessCollectorPIDFn( + func() (int, error) { return os.Getpid(), nil }, "foobar")) + + s := httptest.NewServer(InstrumentHandler("prometheus", registry)) + defer s.Close() + r, err := http.Get(s.URL) + if err != nil { + t.Fatal(err) + } + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + + for _, re := range []*regexp.Regexp{ + regexp.MustCompile("process_cpu_seconds_total [0-9]"), + regexp.MustCompile("process_max_fds [1-9]"), + regexp.MustCompile("process_open_fds [1-9]"), + regexp.MustCompile("process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("process_resident_memory_bytes [1-9]"), + regexp.MustCompile("process_start_time_seconds [0-9.]{10,}"), + regexp.MustCompile("foobar_process_cpu_seconds_total [0-9]"), + regexp.MustCompile("foobar_process_max_fds [1-9]"), + regexp.MustCompile("foobar_process_open_fds [1-9]"), + regexp.MustCompile("foobar_process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"), + regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"), + } { + if !re.Match(body) { + t.Errorf("want body to match %s\n%s", re, body) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/push.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/push.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/push.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/push.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,65 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus + +// Push triggers a metric collection by the default registry and pushes all +// collected metrics to the Pushgateway specified by url. See the Pushgateway +// documentation for detailed implications of the job and instance +// parameter. instance can be left empty. You can use just host:port or ip:port +// as url, in which case 'http://' is added automatically. You can also include +// the schema in the URL. However, do not include the '/metrics/jobs/...' part. +// +// Note that all previously pushed metrics with the same job and instance will +// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT' +// to push to the Pushgateway.) +func Push(job, instance, url string) error { + return defRegistry.Push(job, instance, url, "PUT") +} + +// PushAdd works like Push, but only previously pushed metrics with the same +// name (and the same job and instance) will be replaced. (It uses HTTP method +// 'POST' to push to the Pushgateway.) +func PushAdd(job, instance, url string) error { + return defRegistry.Push(job, instance, url, "POST") +} + +// PushCollectors works like Push, but it does not collect from the default +// registry. Instead, it collects from the provided collectors. It is a +// convenient way to push only a few metrics. +func PushCollectors(job, instance, url string, collectors ...Collector) error { + return pushCollectors(job, instance, url, "PUT", collectors...) +} + +// PushAddCollectors works like PushAdd, but it does not collect from the +// default registry. Instead, it collects from the provided collectors. It is a +// convenient way to push only a few metrics. +func PushAddCollectors(job, instance, url string, collectors ...Collector) error { + return pushCollectors(job, instance, url, "POST", collectors...) +} + +func pushCollectors(job, instance, url, method string, collectors ...Collector) error { + r := newRegistry() + for _, collector := range collectors { + if _, err := r.Register(collector); err != nil { + return err + } + } + return r.Push(job, instance, url, method) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/README.md juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/README.md --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/README.md 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,53 @@ +# Overview +This is the [Prometheus](http://www.prometheus.io) telemetric +instrumentation client [Go](http://golang.org) client library. It +enable authors to define process-space metrics for their servers and +expose them through a web service interface for extraction, +aggregation, and a whole slew of other post processing techniques. + +# Installing + $ go get github.com/prometheus/client_golang/prometheus + +# Example +```go +package main + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + indexed = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "my_company", + Subsystem: "indexer", + Name: "documents_indexed", + Help: "The number of documents indexed.", + }) + size = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "my_company", + Subsystem: "storage", + Name: "documents_total_size_bytes", + Help: "The total size of all documents in the storage.", + }) +) + +func main() { + http.Handle("/metrics", prometheus.Handler()) + + indexed.Inc() + size.Set(5) + + http.ListenAndServe(":8080", nil) +} + +func init() { + prometheus.MustRegister(indexed) + prometheus.MustRegister(size) +} +``` + +# Documentation + +[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang) diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/registry.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/registry.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/registry.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/registry.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,736 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" +) + +var ( + defRegistry = newDefaultRegistry() + errAlreadyReg = errors.New("duplicate metrics collector registration attempted") +) + +// Constants relevant to the HTTP interface. +const ( + // APIVersion is the version of the format of the exported data. This + // will match this library's version, which subscribes to the Semantic + // Versioning scheme. + APIVersion = "0.0.4" + + // DelimitedTelemetryContentType is the content type set on telemetry + // data responses in delimited protobuf format. + DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited` + // TextTelemetryContentType is the content type set on telemetry data + // responses in text format. + TextTelemetryContentType = `text/plain; version=` + APIVersion + // ProtoTextTelemetryContentType is the content type set on telemetry + // data responses in protobuf text format. (Only used for debugging.) + ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text` + // ProtoCompactTextTelemetryContentType is the content type set on + // telemetry data responses in protobuf compact text format. (Only used + // for debugging.) + ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text` + + // Constants for object pools. + numBufs = 4 + numMetricFamilies = 1000 + numMetrics = 10000 + + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 + + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + + acceptEncodingHeader = "Accept-Encoding" + acceptHeader = "Accept" +) + +// Handler returns the HTTP handler for the global Prometheus registry. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). Usually the handler is used to handle the "/metrics" endpoint. +func Handler() http.Handler { + return InstrumentHandler("prometheus", defRegistry) +} + +// UninstrumentedHandler works in the same way as Handler, but the returned HTTP +// handler is not instrumented. This is useful if no instrumentation is desired +// (for whatever reason) or if the instrumentation has to happen with a +// different handler name (or with a different instrumentation approach +// altogether). See the InstrumentHandler example. +func UninstrumentedHandler() http.Handler { + return defRegistry +} + +// Register registers a new Collector to be included in metrics collection. It +// returns an error if the descriptors provided by the Collector are invalid or +// if they - in combination with descriptors of already registered Collectors - +// do not fulfill the consistency and uniqueness criteria described in the Desc +// documentation. +// +// Do not register the same Collector multiple times concurrently. (Registering +// the same Collector twice would result in an error anyway, but on top of that, +// it is not safe to do so concurrently.) +func Register(m Collector) error { + _, err := defRegistry.Register(m) + return err +} + +// MustRegister works like Register but panics where Register would have +// returned an error. +func MustRegister(m Collector) { + err := Register(m) + if err != nil { + panic(err) + } +} + +// RegisterOrGet works like Register but does not return an error if a Collector +// is registered that equals a previously registered Collector. (Two Collectors +// are considered equal if their Describe method yields the same set of +// descriptors.) Instead, the previously registered Collector is returned (which +// is helpful if the new and previously registered Collectors are equal but not +// identical, i.e. not pointers to the same object). +// +// As for Register, it is still not safe to call RegisterOrGet with the same +// Collector multiple times concurrently. +func RegisterOrGet(m Collector) (Collector, error) { + return defRegistry.RegisterOrGet(m) +} + +// MustRegisterOrGet works like Register but panics where RegisterOrGet would +// have returned an error. +func MustRegisterOrGet(m Collector) Collector { + existing, err := RegisterOrGet(m) + if err != nil { + panic(err) + } + return existing +} + +// Unregister unregisters the Collector that equals the Collector passed in as +// an argument. (Two Collectors are considered equal if their Describe method +// yields the same set of descriptors.) The function returns whether a Collector +// was unregistered. +func Unregister(c Collector) bool { + return defRegistry.Unregister(c) +} + +// SetMetricFamilyInjectionHook sets a function that is called whenever metrics +// are collected. The hook function must be set before metrics collection begins +// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The +// MetricFamily protobufs returned by the hook function are merged with the +// metrics collected in the usual way. +// +// This is a way to directly inject MetricFamily protobufs managed and owned by +// the caller. The caller has full responsibility. As no registration of the +// injected metrics has happened, there is no descriptor to check against, and +// there are no registration-time checks. If collect-time checks are disabled +// (see function EnableCollectChecks), no sanity checks are performed on the +// returned protobufs at all. If collect-checks are enabled, type and uniqueness +// checks are performed, but no further consistency checks (which would require +// knowledge of a metric descriptor). +// +// Sorting concerns: The caller is responsible for sorting the label pairs in +// each metric. However, the order of metrics will be sorted by the registry as +// it is required anyway after merging with the metric families collected +// conventionally. +// +// The function must be callable at any time and concurrently. +func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { + defRegistry.metricFamilyInjectionHook = hook +} + +// PanicOnCollectError sets the behavior whether a panic is caused upon an error +// while metrics are collected and served to the HTTP endpoint. By default, an +// internal server error (status code 500) is served with an error message. +func PanicOnCollectError(b bool) { + defRegistry.panicOnCollectError = b +} + +// EnableCollectChecks enables (or disables) additional consistency checks +// during metrics collection. These additional checks are not enabled by default +// because they inflict a performance penalty and the errors they check for can +// only happen if the used Metric and Collector types have internal programming +// errors. It can be helpful to enable these checks while working with custom +// Collectors or Metrics whose correctness is not well established yet. +func EnableCollectChecks(b bool) { + defRegistry.collectChecksEnabled = b +} + +// encoder is a function that writes a dto.MetricFamily to an io.Writer in a +// certain encoding. It returns the number of bytes written and any error +// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText +// are encoders. +type encoder func(io.Writer, *dto.MetricFamily) (int, error) + +type registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + bufPool chan *bytes.Buffer + metricFamilyPool chan *dto.MetricFamily + metricPool chan *dto.Metric + metricFamilyInjectionHook func() []*dto.MetricFamily + + panicOnCollectError, collectChecksEnabled bool +} + +func (r *registry) Register(c Collector) (Collector, error) { + descChan := make(chan *Desc, capDescChan) + go func() { + c.Describe(descChan) + close(descChan) + }() + + newDescIDs := map[uint64]struct{}{} + newDimHashesByName := map[string]uint64{} + var collectorID uint64 // Just a sum of all desc IDs. + var duplicateDescErr error + + r.mtx.Lock() + defer r.mtx.Unlock() + // Coduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return nil, errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return existing, errAlreadyReg + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return nil, duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return c, nil +} + +func (r *registry) RegisterOrGet(m Collector) (Collector, error) { + existing, err := r.Register(m) + if err != nil && err != errAlreadyReg { + return nil, err + } + return existing, nil +} + +func (r *registry) Unregister(c Collector) bool { + descChan := make(chan *Desc, capDescChan) + go func() { + c.Describe(descChan) + close(descChan) + }() + + descIDs := map[uint64]struct{}{} + var collectorID uint64 // Just a sum of the desc IDs. + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +func (r *registry) Push(job, instance, pushURL, method string) error { + if !strings.Contains(pushURL, "://") { + pushURL = "http://" + pushURL + } + if strings.HasSuffix(pushURL, "/") { + pushURL = pushURL[:len(pushURL)-1] + } + pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job)) + if instance != "" { + pushURL += "/instances/" + url.QueryEscape(instance) + } + buf := r.getBuf() + defer r.giveBuf(buf) + if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil { + if r.panicOnCollectError { + panic(err) + } + return err + } + req, err := http.NewRequest(method, pushURL, buf) + if err != nil { + return err + } + req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL) + } + return nil +} + +func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { + contentType := expfmt.Negotiate(req.Header) + buf := r.getBuf() + defer r.giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil { + if r.panicOnCollectError { + panic(err) + } + http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) +} + +func (r *registry) writePB(encoder expfmt.Encoder) error { + var metricHashes map[uint64]struct{} + if r.collectChecksEnabled { + metricHashes = make(map[uint64]struct{}) + } + metricChan := make(chan Metric, capMetricChan) + wg := sync.WaitGroup{} + + r.mtx.RLock() + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + + // Scatter. + // (Collectors could be complex and slow, so we call them all at once.) + wg.Add(len(r.collectorsByID)) + go func() { + wg.Wait() + close(metricChan) + }() + for _, collector := range r.collectorsByID { + go func(collector Collector) { + defer wg.Done() + collector.Collect(metricChan) + }(collector) + } + r.mtx.RUnlock() + + // Drain metricChan in case of premature return. + defer func() { + for _ = range metricChan { + } + }() + + // Gather. + for metric := range metricChan { + // This could be done concurrently, too, but it required locking + // of metricFamiliesByName (and of metricHashes if checks are + // enabled). Most likely not worth it. + desc := metric.Desc() + metricFamily, ok := metricFamiliesByName[desc.fqName] + if !ok { + metricFamily = r.getMetricFamily() + defer r.giveMetricFamily(metricFamily) + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + metricFamiliesByName[desc.fqName] = metricFamily + } + dtoMetric := r.getMetric() + defer r.giveMetric(dtoMetric) + if err := metric.Write(dtoMetric); err != nil { + // TODO: Consider different means of error reporting so + // that a single erroneous metric could be skipped + // instead of blowing up the whole collection. + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + switch { + case metricFamily.Type != nil: + // Type already set. We are good. + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if r.collectChecksEnabled { + if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + } + + if r.metricFamilyInjectionHook != nil { + for _, mf := range r.metricFamilyInjectionHook() { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if !exists { + metricFamiliesByName[mf.GetName()] = mf + if r.collectChecksEnabled { + for _, m := range mf.Metric { + if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil { + return err + } + } + } + continue + } + for _, m := range mf.Metric { + if r.collectChecksEnabled { + if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil { + return err + } + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + + // Now that MetricFamilies are all set, sort their Metrics + // lexicographically by their label values. + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + + // Write out MetricFamilies sorted by their name. + names := make([]string, 0, len(metricFamiliesByName)) + for name := range metricFamiliesByName { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + if err := encoder.Encode(metricFamiliesByName[name]); err != nil { + return err + } + } + return nil +} + +func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error { + + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := hashNew() + h = hashAdd(h, metricFamily.GetName()) + h = hashAddByte(h, separatorByte) + // Make sure label pairs are sorted. We depend on it for the consistency + // check. Label pairs must be sorted by contract. But the point of this + // method is to check for contract violations. So we better do the sort + // now. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + metricHashes[h] = struct{}{} + + if desc == nil { + return nil // Nothing left to check if we have no desc. + } + + // Desc consistency with metric family. + if metricFamily.GetName() != desc.fqName { + return fmt.Errorf( + "collected metric %s %s has name %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName, + ) + } + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + + r.mtx.RLock() // Remaining checks need the read lock. + defer r.mtx.RUnlock() + + // Is the desc registered? + if _, exist := r.descIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + + return nil +} + +func (r *registry) getBuf() *bytes.Buffer { + select { + case buf := <-r.bufPool: + return buf + default: + return &bytes.Buffer{} + } +} + +func (r *registry) giveBuf(buf *bytes.Buffer) { + buf.Reset() + select { + case r.bufPool <- buf: + default: + } +} + +func (r *registry) getMetricFamily() *dto.MetricFamily { + select { + case mf := <-r.metricFamilyPool: + return mf + default: + return &dto.MetricFamily{} + } +} + +func (r *registry) giveMetricFamily(mf *dto.MetricFamily) { + mf.Reset() + select { + case r.metricFamilyPool <- mf: + default: + } +} + +func (r *registry) getMetric() *dto.Metric { + select { + case m := <-r.metricPool: + return m + default: + return &dto.Metric{} + } +} + +func (r *registry) giveMetric(m *dto.Metric) { + m.Reset() + select { + case r.metricPool <- m: + default: + } +} + +func newRegistry() *registry { + return ®istry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + bufPool: make(chan *bytes.Buffer, numBufs), + metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies), + metricPool: make(chan *dto.Metric, numMetrics), + } +} + +func newDefaultRegistry() *registry { + r := newRegistry() + r.Register(NewProcessCollector(os.Getpid(), "")) + r.Register(NewGoCollector()) + return r +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/registry_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/registry_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/registry_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/registry_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,535 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus + +import ( + "bytes" + "encoding/binary" + "net/http" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" +) + +type fakeResponseWriter struct { + header http.Header + body bytes.Buffer +} + +func (r *fakeResponseWriter) Header() http.Header { + return r.header +} + +func (r *fakeResponseWriter) Write(d []byte) (l int, err error) { + return r.body.Write(d) +} + +func (r *fakeResponseWriter) WriteHeader(c int) { +} + +func testHandler(t testing.TB) { + + metricVec := NewCounterVec( + CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + + metricVec.WithLabelValues("val1").Inc() + metricVec.WithLabelValues("val2").Inc() + + varintBuf := make([]byte, binary.MaxVarintLen32) + + externalMetricFamily := &dto.MetricFamily{ + Name: proto.String("externalname"), + Help: proto.String("externaldocstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("externalconstname"), + Value: proto.String("externalconstvalue"), + }, + { + Name: proto.String("externallabelname"), + Value: proto.String("externalval1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily) + if err != nil { + t.Fatal(err) + } + var externalBuf bytes.Buffer + l := binary.PutUvarint(varintBuf, uint64(len(marshaledExternalMetricFamily))) + _, err = externalBuf.Write(varintBuf[:l]) + if err != nil { + t.Fatal(err) + } + _, err = externalBuf.Write(marshaledExternalMetricFamily) + if err != nil { + t.Fatal(err) + } + externalMetricFamilyAsBytes := externalBuf.Bytes() + externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring +# TYPE externalname counter +externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1 +`) + externalMetricFamilyAsProtoText := []byte(`name: "externalname" +help: "externaldocstring" +type: COUNTER +metric: < + label: < + name: "externalconstname" + value: "externalconstvalue" + > + label: < + name: "externallabelname" + value: "externalval1" + > + counter: < + value: 1 + > +> + +`) + externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric: label: counter: > +`) + + expectedMetricFamily := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + marshaledExpectedMetricFamily, err := proto.Marshal(expectedMetricFamily) + if err != nil { + t.Fatal(err) + } + var buf bytes.Buffer + l = binary.PutUvarint(varintBuf, uint64(len(marshaledExpectedMetricFamily))) + _, err = buf.Write(varintBuf[:l]) + if err != nil { + t.Fatal(err) + } + _, err = buf.Write(marshaledExpectedMetricFamily) + if err != nil { + t.Fatal(err) + } + expectedMetricFamilyAsBytes := buf.Bytes() + expectedMetricFamilyAsText := []byte(`# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +`) + expectedMetricFamilyAsProtoText := []byte(`name: "name" +help: "docstring" +type: COUNTER +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val1" + > + counter: < + value: 1 + > +> +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val2" + > + counter: < + value: 1 + > +> + +`) + expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > +`) + + externalMetricFamilyWithSameName := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("inconsistent help string does not matter here"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("different_val"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(42), + }, + }, + }, + } + + expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > metric: label: counter: > +`) + + type output struct { + headers map[string]string + body []byte + } + + var scenarios = []struct { + headers map[string]string + out output + collector Collector + externalMF []*dto.MetricFamily + }{ + { // 0 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, dings/bums;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 1 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/quark;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 2 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 3 + headers: map[string]string{ + "Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: []byte{}, + }, + }, + { // 4 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 5 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: expectedMetricFamilyAsBytes, + }, + collector: metricVec, + }, + { // 6 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: externalMetricFamilyAsText, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 7 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: externalMetricFamilyAsBytes, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 8 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 9 + headers: map[string]string{ + "Accept": "text/plain", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 10 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 11 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsText, + expectedMetricFamilyAsText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 12 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 13 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoText, + expectedMetricFamilyAsProtoText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 14 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 15 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyMergedWithExternalAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{ + externalMetricFamily, + externalMetricFamilyWithSameName, + }, + }, + } + for i, scenario := range scenarios { + registry := newRegistry() + registry.collectChecksEnabled = true + + if scenario.collector != nil { + registry.Register(scenario.collector) + } + if scenario.externalMF != nil { + registry.metricFamilyInjectionHook = func() []*dto.MetricFamily { + return scenario.externalMF + } + } + writer := &fakeResponseWriter{ + header: http.Header{}, + } + handler := InstrumentHandler("prometheus", registry) + request, _ := http.NewRequest("GET", "/", nil) + for key, value := range scenario.headers { + request.Header.Add(key, value) + } + handler(writer, request) + + for key, value := range scenario.out.headers { + if writer.Header().Get(key) != value { + t.Errorf( + "%d. expected %q for header %q, got %q", + i, value, key, writer.Header().Get(key), + ) + } + } + + if !bytes.Equal(scenario.out.body, writer.body.Bytes()) { + t.Errorf( + "%d. expected %q for body, got %q", + i, scenario.out.body, writer.body.Bytes(), + ) + } + } +} + +func TestHandler(t *testing.T) { + testHandler(t) +} + +func BenchmarkHandler(b *testing.B) { + for i := 0; i < b.N; i++ { + testHandler(b) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/summary.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/summary.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/summary.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/summary.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,538 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +var ( + // DefObjectives are the default Summary quantile values. + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Summary. Summaries with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // SummaryVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Summaries with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported + // for q will be the φ-quantile value for some φ between q-e and q+e. + // The default value is DefObjectives. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge +// method of perk/quantile is actually not working as advertised - and it might +// be unfixable, as the underlying algorithm is apparently not capable of +// merging summaries in the first place. To avoid using Merge, we are currently +// adding observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if len(opts.Objectives) == 0 { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.Init(s) // Init self-collection. + return s +} + +type summary struct { + SelfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + MetricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + newMetric: func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Summary and not a +// Metric so that no type conversion is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Summary and not a Metric so that no +// type conversion is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { + return m.MetricVec.WithLabelValues(lvs...).(Summary) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *SummaryVec) With(labels Labels) Summary { + return m.MetricVec.With(labels).(Summary) +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/summary_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/summary_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/summary_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/summary_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,347 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sort" + "sync" + "testing" + "testing/quick" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func benchmarkSummaryObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryObserve1(b *testing.B) { + benchmarkSummaryObserve(1, b) +} + +func BenchmarkSummaryObserve2(b *testing.B) { + benchmarkSummaryObserve(2, b) +} + +func BenchmarkSummaryObserve4(b *testing.B) { + benchmarkSummaryObserve(4, b) +} + +func BenchmarkSummaryObserve8(b *testing.B) { + benchmarkSummaryObserve(8, b) +} + +func benchmarkSummaryWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryWrite1(b *testing.B) { + benchmarkSummaryWrite(1, b) +} + +func BenchmarkSummaryWrite2(b *testing.B) { + benchmarkSummaryWrite(2, b) +} + +func BenchmarkSummaryWrite4(b *testing.B) { + benchmarkSummaryWrite(4, b) +} + +func BenchmarkSummaryWrite8(b *testing.B) { + benchmarkSummaryWrite(8, b) +} + +func TestSummaryConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Summary.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + for i, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[i].Quantile + gotV := *m.Summary.Quantile[i].Value + min, max := getBounds(allVars, wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f, want %f", gotQ, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummaryVec( + SummaryOpts{ + Name: "test_summary", + Help: "helpless", + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sum.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := sum.WithLabelValues(string('A' + i)) + s.Write(m) + if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) + } + if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want) + } + for j, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[j].Quantile + gotV := *m.Summary.Quantile[j].Value + min, max := getBounds(allVars[i], wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryDecay(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + // More because it depends on timing than because it is particularly long... + } + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + MaxAge: 100 * time.Millisecond, + Objectives: map[float64]float64{0.1: 0.001}, + AgeBuckets: 10, + }) + + m := &dto.Metric{} + i := 0 + tick := time.NewTicker(time.Millisecond) + for _ = range tick.C { + i++ + sum.Observe(float64(i)) + if i%10 == 0 { + sum.Write(m) + if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 { + t.Errorf("%d. got %f, want %f", i, got, want) + } + m.Reset() + } + if i >= 1000 { + break + } + } + tick.Stop() + // Wait for MaxAge without observations and make sure quantiles are NaN. + time.Sleep(100 * time.Millisecond) + sum.Write(m) + if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) { + t.Errorf("got %f, want NaN after expiration", got) + } +} + +func getBounds(vars []float64, q, ε float64) (min, max float64) { + // TODO: This currently tolerates an error of up to 2*ε. The error must + // be at most ε, but for some reason, it's sometimes slightly + // higher. That's a bug. + n := float64(len(vars)) + lower := int((q - 2*ε) * n) + upper := int(math.Ceil((q + 2*ε) * n)) + min = vars[0] + if lower > 1 { + min = vars[lower-1] + } + max = vars[len(vars)-1] + if upper < len(vars) { + max = vars[upper-1] + } + return +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/untyped.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/untyped.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/untyped.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/untyped.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,142 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Untyped is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// An Untyped metric works the same as a Gauge. The only difference is that to +// no type information is implied. +// +// To create Untyped instances, use NewUntyped. +type Untyped interface { + Metric + Collector + + // Set sets the Untyped metric to an arbitrary value. + Set(float64) + // Inc increments the Untyped metric by 1. + Inc() + // Dec decrements the Untyped metric by 1. + Dec() + // Add adds the given value to the Untyped metric. (The value can be + // negative, resulting in a decrease.) + Add(float64) + // Sub subtracts the given value from the Untyped metric. (The value can + // be negative, resulting in an increase.) + Sub(float64) +} + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// NewUntyped creates a new Untyped metric from the provided UntypedOpts. +func NewUntyped(opts UntypedOpts) Untyped { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, 0) +} + +// UntypedVec is a Collector that bundles a set of Untyped metrics that all +// share the same Desc, but have different values for their variable +// labels. This is used if you want to count the same thing partitioned by +// various dimensions. Create instances with NewUntypedVec. +type UntypedVec struct { + MetricVec +} + +// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &UntypedVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + newMetric: func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Untyped and not a +// Metric so that no type conversion is required. +func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Untyped and not a Metric so that no +// type conversion is required. +func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { + return m.MetricVec.WithLabelValues(lvs...).(Untyped) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *UntypedVec) With(labels Labels) Untyped { + return m.MetricVec.With(labels).(Untyped) +} + +// UntypedFunc is an Untyped whose value is determined at collect time by +// calling a provided function. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/value.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/value.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/value.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/value.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,234 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "math" + "sort" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +// value is a generic metric for simple values. It implements Metric, Collector, +// Counter, Gauge, and Untyped. Its effective type is determined by +// ValueType. This is a low-level building block used by the library to back the +// implementations of Counter, Gauge, and Untyped. +type value struct { + // valBits containst the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + SelfCollector + + desc *Desc + valType ValueType + labelPairs []*dto.LabelPair +} + +// newValue returns a newly allocated value with the given Desc, ValueType, +// sample value and label values. It panics if the number of label +// values is different from the number of variable labels in Desc. +func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { + if len(labelValues) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &value{ + desc: desc, + valType: valueType, + valBits: math.Float64bits(val), + labelPairs: makeLabelPairs(desc, labelValues), + } + result.Init(result) + return result +} + +func (v *value) Desc() *Desc { + return v.desc +} + +func (v *value) Set(val float64) { + atomic.StoreUint64(&v.valBits, math.Float64bits(val)) +} + +func (v *value) Inc() { + v.Add(1) +} + +func (v *value) Dec() { + v.Add(-1) +} + +func (v *value) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&v.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { + return + } + } +} + +func (v *value) Sub(val float64) { + v.Add(val * -1) +} + +func (v *value) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) + return populateMetric(v.valType, val, v.labelPairs, out) +} + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + SelfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.Init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/vec.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/vec.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/vec.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/vec.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,249 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" +) + +// MetricVec is a Collector to bundle metrics of the same name that +// differ in their label values. MetricVec is usually not used directly but as a +// building block for implementations of vectors of a given metric +// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already +// provided in this package. +type MetricVec struct { + mtx sync.RWMutex // Protects the children. + children map[uint64]Metric + desc *Desc + + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. The length of the returned slice +// is always one. +func (m *MetricVec) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metric := range m.children { + ch <- metric + } +} + +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created. +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it at its start value (e.g. a Summary or +// Histogram without any observations). See also the SummaryVec example. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + m.mtx.RLock() + metric, ok := m.children[h] + m.mtx.RUnlock() + if ok { + return metric, nil + } + + m.mtx.Lock() + defer m.mtx.Unlock() + return m.getOrCreateMetric(h, lvs...), nil +} + +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + m.mtx.RLock() + metric, ok := m.children[h] + m.mtx.RUnlock() + if ok { + return metric, nil + } + + lvs := make([]string, len(labels)) + for i, label := range m.desc.variableLabels { + lvs[i] = labels[label] + } + m.mtx.Lock() + defer m.mtx.Unlock() + return m.getOrCreateMetric(h, lvs...), nil +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics if an error +// occurs. The method allows neat syntax like: +// httpReqs.WithLabelValues("404", "POST").Inc() +func (m *MetricVec) WithLabelValues(lvs ...string) Metric { + metric, err := m.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return metric +} + +// With works as GetMetricWith, but panics if an error occurs. The method allows +// neat syntax like: +// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() +func (m *MetricVec) With(labels Labels) Metric { + metric, err := m.GetMetricWith(labels) + if err != nil { + panic(err) + } + return metric +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual Metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + if _, ok := m.children[h]; !ok { + return false + } + delete(m.children, h) + return true +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in the Desc of the MetricVec. However, such +// inconsistent Labels can never match an actual Metric, so the method will +// always return false in that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return false + } + if _, ok := m.children[h]; !ok { + return false + } + delete(m.children, h) + return true +} + +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.children { + delete(m.children, h) + } +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if len(vals) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, val := range vals { + h = hashAdd(h, val) + } + return h, nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if len(labels) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, label := range m.desc.variableLabels { + val, ok := labels[label] + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = hashAdd(h, val) + } + return h, nil +} + +func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric { + metric, ok := m.children[hash] + if !ok { + // Copy labelValues. Otherwise, they would be allocated even if we don't go + // down this code path. + copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...) + metric = m.newMetric(copiedLabelValues...) + m.children[hash] = metric + } + return metric +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/vec_test.go juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/vec_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/prometheus/vec_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/prometheus/vec_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,88 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "testing" +) + +func TestDelete(t *testing.T) { + desc := NewDesc("test", "helpless", []string{"l1", "l2"}, nil) + vec := MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + newMetric: func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }, + } + + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestDeleteLabelValues(t *testing.T) { + desc := NewDesc("test", "helpless", []string{"l1", "l2"}, nil) + vec := MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + newMetric: func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }, + } + + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/README.md juju-core-2.0.0/src/github.com/prometheus/client_golang/README.md --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/README.md 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,45 @@ +# Prometheus Go client library + +[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) + +This is the [Go](http://golang.org) client library for +[Prometheus](http://prometheus.io). It has two separate parts, one for +instrumenting application code, and one for creating clients that talk to the +Prometheus HTTP API. + +## Instrumenting applications + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) + +The +[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) +contains the instrumentation library. See the +[best practices section](http://prometheus.io/docs/practices/naming/) of the +Prometheus documentation to learn more about instrumenting applications. + +The +[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) +contains simple examples of instrumented code. + +## Client for the Prometheus HTTP API + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus) + +The +[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) +contains the client for the +[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you +to write Go applications that query time series data from a Prometheus server. + +## Where is `model`, `extraction`, and `text`? + +The `model` packages has been moved to +[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model). + +The `extraction` and `text` packages are now contained in +[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt). + +## Contributing and community + +See the [contributing guidelines](CONTRIBUTING.md) and the +[Community section](http://prometheus.io/community/) of the homepage. diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/.travis.yml juju-core-2.0.0/src/github.com/prometheus/client_golang/.travis.yml --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/.travis.yml 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,8 @@ +sudo: false +language: go + +go: + - 1.4 + +script: + - go test -short ./... diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_golang/VERSION juju-core-2.0.0/src/github.com/prometheus/client_golang/VERSION --- juju-core-2.0~beta15/src/github.com/prometheus/client_golang/VERSION 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_golang/VERSION 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +0.7.0 diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/AUTHORS.md juju-core-2.0.0/src/github.com/prometheus/client_model/AUTHORS.md --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/AUTHORS.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/AUTHORS.md 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,13 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Björn Rabenstein + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Björn Rabenstein +* Matt T. Proud +* Tobias Schmidt diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/CONTRIBUTING.md juju-core-2.0.0/src/github.com/prometheus/client_model/CONTRIBUTING.md --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/CONTRIBUTING.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/CONTRIBUTING.md 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines for the Go parts are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/cpp/metrics.pb.cc juju-core-2.0.0/src/github.com/prometheus/client_model/cpp/metrics.pb.cc --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/cpp/metrics.pb.cc 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/cpp/metrics.pb.cc 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,3380 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: metrics.proto + +#define INTERNAL_SUPPRESS_PROTOBUF_FIELD_DEPRECATION +#include "metrics.pb.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +// @@protoc_insertion_point(includes) + +namespace io { +namespace prometheus { +namespace client { + +namespace { + +const ::google::protobuf::Descriptor* LabelPair_descriptor_ = NULL; +const ::google::protobuf::internal::GeneratedMessageReflection* + LabelPair_reflection_ = NULL; +const ::google::protobuf::Descriptor* Gauge_descriptor_ = NULL; +const ::google::protobuf::internal::GeneratedMessageReflection* + Gauge_reflection_ = NULL; +const ::google::protobuf::Descriptor* Counter_descriptor_ = NULL; +const ::google::protobuf::internal::GeneratedMessageReflection* + Counter_reflection_ = NULL; +const ::google::protobuf::Descriptor* Quantile_descriptor_ = NULL; +const ::google::protobuf::internal::GeneratedMessageReflection* + Quantile_reflection_ = NULL; +const ::google::protobuf::Descriptor* Summary_descriptor_ = NULL; +const ::google::protobuf::internal::GeneratedMessageReflection* + Summary_reflection_ = NULL; +const ::google::protobuf::Descriptor* Untyped_descriptor_ = NULL; +const ::google::protobuf::internal::GeneratedMessageReflection* + Untyped_reflection_ = NULL; +const ::google::protobuf::Descriptor* Histogram_descriptor_ = NULL; +const ::google::protobuf::internal::GeneratedMessageReflection* + Histogram_reflection_ = NULL; +const ::google::protobuf::Descriptor* Bucket_descriptor_ = NULL; +const ::google::protobuf::internal::GeneratedMessageReflection* + Bucket_reflection_ = NULL; +const ::google::protobuf::Descriptor* Metric_descriptor_ = NULL; +const ::google::protobuf::internal::GeneratedMessageReflection* + Metric_reflection_ = NULL; +const ::google::protobuf::Descriptor* MetricFamily_descriptor_ = NULL; +const ::google::protobuf::internal::GeneratedMessageReflection* + MetricFamily_reflection_ = NULL; +const ::google::protobuf::EnumDescriptor* MetricType_descriptor_ = NULL; + +} // namespace + + +void protobuf_AssignDesc_metrics_2eproto() { + protobuf_AddDesc_metrics_2eproto(); + const ::google::protobuf::FileDescriptor* file = + ::google::protobuf::DescriptorPool::generated_pool()->FindFileByName( + "metrics.proto"); + GOOGLE_CHECK(file != NULL); + LabelPair_descriptor_ = file->message_type(0); + static const int LabelPair_offsets_[2] = { + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(LabelPair, name_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(LabelPair, value_), + }; + LabelPair_reflection_ = + new ::google::protobuf::internal::GeneratedMessageReflection( + LabelPair_descriptor_, + LabelPair::default_instance_, + LabelPair_offsets_, + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(LabelPair, _has_bits_[0]), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(LabelPair, _unknown_fields_), + -1, + ::google::protobuf::DescriptorPool::generated_pool(), + ::google::protobuf::MessageFactory::generated_factory(), + sizeof(LabelPair)); + Gauge_descriptor_ = file->message_type(1); + static const int Gauge_offsets_[1] = { + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Gauge, value_), + }; + Gauge_reflection_ = + new ::google::protobuf::internal::GeneratedMessageReflection( + Gauge_descriptor_, + Gauge::default_instance_, + Gauge_offsets_, + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Gauge, _has_bits_[0]), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Gauge, _unknown_fields_), + -1, + ::google::protobuf::DescriptorPool::generated_pool(), + ::google::protobuf::MessageFactory::generated_factory(), + sizeof(Gauge)); + Counter_descriptor_ = file->message_type(2); + static const int Counter_offsets_[1] = { + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Counter, value_), + }; + Counter_reflection_ = + new ::google::protobuf::internal::GeneratedMessageReflection( + Counter_descriptor_, + Counter::default_instance_, + Counter_offsets_, + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Counter, _has_bits_[0]), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Counter, _unknown_fields_), + -1, + ::google::protobuf::DescriptorPool::generated_pool(), + ::google::protobuf::MessageFactory::generated_factory(), + sizeof(Counter)); + Quantile_descriptor_ = file->message_type(3); + static const int Quantile_offsets_[2] = { + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Quantile, quantile_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Quantile, value_), + }; + Quantile_reflection_ = + new ::google::protobuf::internal::GeneratedMessageReflection( + Quantile_descriptor_, + Quantile::default_instance_, + Quantile_offsets_, + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Quantile, _has_bits_[0]), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Quantile, _unknown_fields_), + -1, + ::google::protobuf::DescriptorPool::generated_pool(), + ::google::protobuf::MessageFactory::generated_factory(), + sizeof(Quantile)); + Summary_descriptor_ = file->message_type(4); + static const int Summary_offsets_[3] = { + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Summary, sample_count_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Summary, sample_sum_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Summary, quantile_), + }; + Summary_reflection_ = + new ::google::protobuf::internal::GeneratedMessageReflection( + Summary_descriptor_, + Summary::default_instance_, + Summary_offsets_, + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Summary, _has_bits_[0]), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Summary, _unknown_fields_), + -1, + ::google::protobuf::DescriptorPool::generated_pool(), + ::google::protobuf::MessageFactory::generated_factory(), + sizeof(Summary)); + Untyped_descriptor_ = file->message_type(5); + static const int Untyped_offsets_[1] = { + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Untyped, value_), + }; + Untyped_reflection_ = + new ::google::protobuf::internal::GeneratedMessageReflection( + Untyped_descriptor_, + Untyped::default_instance_, + Untyped_offsets_, + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Untyped, _has_bits_[0]), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Untyped, _unknown_fields_), + -1, + ::google::protobuf::DescriptorPool::generated_pool(), + ::google::protobuf::MessageFactory::generated_factory(), + sizeof(Untyped)); + Histogram_descriptor_ = file->message_type(6); + static const int Histogram_offsets_[3] = { + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Histogram, sample_count_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Histogram, sample_sum_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Histogram, bucket_), + }; + Histogram_reflection_ = + new ::google::protobuf::internal::GeneratedMessageReflection( + Histogram_descriptor_, + Histogram::default_instance_, + Histogram_offsets_, + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Histogram, _has_bits_[0]), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Histogram, _unknown_fields_), + -1, + ::google::protobuf::DescriptorPool::generated_pool(), + ::google::protobuf::MessageFactory::generated_factory(), + sizeof(Histogram)); + Bucket_descriptor_ = file->message_type(7); + static const int Bucket_offsets_[2] = { + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Bucket, cumulative_count_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Bucket, upper_bound_), + }; + Bucket_reflection_ = + new ::google::protobuf::internal::GeneratedMessageReflection( + Bucket_descriptor_, + Bucket::default_instance_, + Bucket_offsets_, + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Bucket, _has_bits_[0]), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Bucket, _unknown_fields_), + -1, + ::google::protobuf::DescriptorPool::generated_pool(), + ::google::protobuf::MessageFactory::generated_factory(), + sizeof(Bucket)); + Metric_descriptor_ = file->message_type(8); + static const int Metric_offsets_[7] = { + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, label_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, gauge_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, counter_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, summary_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, untyped_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, histogram_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, timestamp_ms_), + }; + Metric_reflection_ = + new ::google::protobuf::internal::GeneratedMessageReflection( + Metric_descriptor_, + Metric::default_instance_, + Metric_offsets_, + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, _has_bits_[0]), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(Metric, _unknown_fields_), + -1, + ::google::protobuf::DescriptorPool::generated_pool(), + ::google::protobuf::MessageFactory::generated_factory(), + sizeof(Metric)); + MetricFamily_descriptor_ = file->message_type(9); + static const int MetricFamily_offsets_[4] = { + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, name_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, help_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, type_), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, metric_), + }; + MetricFamily_reflection_ = + new ::google::protobuf::internal::GeneratedMessageReflection( + MetricFamily_descriptor_, + MetricFamily::default_instance_, + MetricFamily_offsets_, + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, _has_bits_[0]), + GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricFamily, _unknown_fields_), + -1, + ::google::protobuf::DescriptorPool::generated_pool(), + ::google::protobuf::MessageFactory::generated_factory(), + sizeof(MetricFamily)); + MetricType_descriptor_ = file->enum_type(0); +} + +namespace { + +GOOGLE_PROTOBUF_DECLARE_ONCE(protobuf_AssignDescriptors_once_); +inline void protobuf_AssignDescriptorsOnce() { + ::google::protobuf::GoogleOnceInit(&protobuf_AssignDescriptors_once_, + &protobuf_AssignDesc_metrics_2eproto); +} + +void protobuf_RegisterTypes(const ::std::string&) { + protobuf_AssignDescriptorsOnce(); + ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( + LabelPair_descriptor_, &LabelPair::default_instance()); + ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( + Gauge_descriptor_, &Gauge::default_instance()); + ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( + Counter_descriptor_, &Counter::default_instance()); + ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( + Quantile_descriptor_, &Quantile::default_instance()); + ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( + Summary_descriptor_, &Summary::default_instance()); + ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( + Untyped_descriptor_, &Untyped::default_instance()); + ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( + Histogram_descriptor_, &Histogram::default_instance()); + ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( + Bucket_descriptor_, &Bucket::default_instance()); + ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( + Metric_descriptor_, &Metric::default_instance()); + ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage( + MetricFamily_descriptor_, &MetricFamily::default_instance()); +} + +} // namespace + +void protobuf_ShutdownFile_metrics_2eproto() { + delete LabelPair::default_instance_; + delete LabelPair_reflection_; + delete Gauge::default_instance_; + delete Gauge_reflection_; + delete Counter::default_instance_; + delete Counter_reflection_; + delete Quantile::default_instance_; + delete Quantile_reflection_; + delete Summary::default_instance_; + delete Summary_reflection_; + delete Untyped::default_instance_; + delete Untyped_reflection_; + delete Histogram::default_instance_; + delete Histogram_reflection_; + delete Bucket::default_instance_; + delete Bucket_reflection_; + delete Metric::default_instance_; + delete Metric_reflection_; + delete MetricFamily::default_instance_; + delete MetricFamily_reflection_; +} + +void protobuf_AddDesc_metrics_2eproto() { + static bool already_here = false; + if (already_here) return; + already_here = true; + GOOGLE_PROTOBUF_VERIFY_VERSION; + + ::google::protobuf::DescriptorPool::InternalAddGeneratedFile( + "\n\rmetrics.proto\022\024io.prometheus.client\"(\n" + "\tLabelPair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\t\"" + "\026\n\005Gauge\022\r\n\005value\030\001 \001(\001\"\030\n\007Counter\022\r\n\005va" + "lue\030\001 \001(\001\"+\n\010Quantile\022\020\n\010quantile\030\001 \001(\001\022" + "\r\n\005value\030\002 \001(\001\"e\n\007Summary\022\024\n\014sample_coun" + "t\030\001 \001(\004\022\022\n\nsample_sum\030\002 \001(\001\0220\n\010quantile\030" + "\003 \003(\0132\036.io.prometheus.client.Quantile\"\030\n" + "\007Untyped\022\r\n\005value\030\001 \001(\001\"c\n\tHistogram\022\024\n\014" + "sample_count\030\001 \001(\004\022\022\n\nsample_sum\030\002 \001(\001\022," + "\n\006bucket\030\003 \003(\0132\034.io.prometheus.client.Bu" + "cket\"7\n\006Bucket\022\030\n\020cumulative_count\030\001 \001(\004" + "\022\023\n\013upper_bound\030\002 \001(\001\"\276\002\n\006Metric\022.\n\005labe" + "l\030\001 \003(\0132\037.io.prometheus.client.LabelPair" + "\022*\n\005gauge\030\002 \001(\0132\033.io.prometheus.client.G" + "auge\022.\n\007counter\030\003 \001(\0132\035.io.prometheus.cl" + "ient.Counter\022.\n\007summary\030\004 \001(\0132\035.io.prome" + "theus.client.Summary\022.\n\007untyped\030\005 \001(\0132\035." + "io.prometheus.client.Untyped\0222\n\thistogra" + "m\030\007 \001(\0132\037.io.prometheus.client.Histogram" + "\022\024\n\014timestamp_ms\030\006 \001(\003\"\210\001\n\014MetricFamily\022" + "\014\n\004name\030\001 \001(\t\022\014\n\004help\030\002 \001(\t\022.\n\004type\030\003 \001(" + "\0162 .io.prometheus.client.MetricType\022,\n\006m" + "etric\030\004 \003(\0132\034.io.prometheus.client.Metri" + "c*M\n\nMetricType\022\013\n\007COUNTER\020\000\022\t\n\005GAUGE\020\001\022" + "\013\n\007SUMMARY\020\002\022\013\n\007UNTYPED\020\003\022\r\n\tHISTOGRAM\020\004" + "B\026\n\024io.prometheus.client", 1024); + ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile( + "metrics.proto", &protobuf_RegisterTypes); + LabelPair::default_instance_ = new LabelPair(); + Gauge::default_instance_ = new Gauge(); + Counter::default_instance_ = new Counter(); + Quantile::default_instance_ = new Quantile(); + Summary::default_instance_ = new Summary(); + Untyped::default_instance_ = new Untyped(); + Histogram::default_instance_ = new Histogram(); + Bucket::default_instance_ = new Bucket(); + Metric::default_instance_ = new Metric(); + MetricFamily::default_instance_ = new MetricFamily(); + LabelPair::default_instance_->InitAsDefaultInstance(); + Gauge::default_instance_->InitAsDefaultInstance(); + Counter::default_instance_->InitAsDefaultInstance(); + Quantile::default_instance_->InitAsDefaultInstance(); + Summary::default_instance_->InitAsDefaultInstance(); + Untyped::default_instance_->InitAsDefaultInstance(); + Histogram::default_instance_->InitAsDefaultInstance(); + Bucket::default_instance_->InitAsDefaultInstance(); + Metric::default_instance_->InitAsDefaultInstance(); + MetricFamily::default_instance_->InitAsDefaultInstance(); + ::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_metrics_2eproto); +} + +// Force AddDescriptors() to be called at static initialization time. +struct StaticDescriptorInitializer_metrics_2eproto { + StaticDescriptorInitializer_metrics_2eproto() { + protobuf_AddDesc_metrics_2eproto(); + } +} static_descriptor_initializer_metrics_2eproto_; +const ::google::protobuf::EnumDescriptor* MetricType_descriptor() { + protobuf_AssignDescriptorsOnce(); + return MetricType_descriptor_; +} +bool MetricType_IsValid(int value) { + switch(value) { + case 0: + case 1: + case 2: + case 3: + case 4: + return true; + default: + return false; + } +} + + +// =================================================================== + +#ifndef _MSC_VER +const int LabelPair::kNameFieldNumber; +const int LabelPair::kValueFieldNumber; +#endif // !_MSC_VER + +LabelPair::LabelPair() + : ::google::protobuf::Message() { + SharedCtor(); + // @@protoc_insertion_point(constructor:io.prometheus.client.LabelPair) +} + +void LabelPair::InitAsDefaultInstance() { +} + +LabelPair::LabelPair(const LabelPair& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); + // @@protoc_insertion_point(copy_constructor:io.prometheus.client.LabelPair) +} + +void LabelPair::SharedCtor() { + ::google::protobuf::internal::GetEmptyString(); + _cached_size_ = 0; + name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + value_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + ::memset(_has_bits_, 0, sizeof(_has_bits_)); +} + +LabelPair::~LabelPair() { + // @@protoc_insertion_point(destructor:io.prometheus.client.LabelPair) + SharedDtor(); +} + +void LabelPair::SharedDtor() { + if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + delete name_; + } + if (value_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + delete value_; + } + if (this != default_instance_) { + } +} + +void LabelPair::SetCachedSize(int size) const { + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); +} +const ::google::protobuf::Descriptor* LabelPair::descriptor() { + protobuf_AssignDescriptorsOnce(); + return LabelPair_descriptor_; +} + +const LabelPair& LabelPair::default_instance() { + if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto(); + return *default_instance_; +} + +LabelPair* LabelPair::default_instance_ = NULL; + +LabelPair* LabelPair::New() const { + return new LabelPair; +} + +void LabelPair::Clear() { + if (_has_bits_[0 / 32] & 3) { + if (has_name()) { + if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_->clear(); + } + } + if (has_value()) { + if (value_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + value_->clear(); + } + } + } + ::memset(_has_bits_, 0, sizeof(_has_bits_)); + mutable_unknown_fields()->Clear(); +} + +bool LabelPair::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:io.prometheus.client.LabelPair) + for (;;) { + ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // optional string name = 1; + case 1: { + if (tag == 10) { + DO_(::google::protobuf::internal::WireFormatLite::ReadString( + input, this->mutable_name())); + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->name().data(), this->name().length(), + ::google::protobuf::internal::WireFormat::PARSE, + "name"); + } else { + goto handle_unusual; + } + if (input->ExpectTag(18)) goto parse_value; + break; + } + + // optional string value = 2; + case 2: { + if (tag == 18) { + parse_value: + DO_(::google::protobuf::internal::WireFormatLite::ReadString( + input, this->mutable_value())); + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->value().data(), this->value().length(), + ::google::protobuf::internal::WireFormat::PARSE, + "value"); + } else { + goto handle_unusual; + } + if (input->ExpectAtEnd()) goto success; + break; + } + + default: { + handle_unusual: + if (tag == 0 || + ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == + ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:io.prometheus.client.LabelPair) + return true; +failure: + // @@protoc_insertion_point(parse_failure:io.prometheus.client.LabelPair) + return false; +#undef DO_ +} + +void LabelPair::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:io.prometheus.client.LabelPair) + // optional string name = 1; + if (has_name()) { + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->name().data(), this->name().length(), + ::google::protobuf::internal::WireFormat::SERIALIZE, + "name"); + ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased( + 1, this->name(), output); + } + + // optional string value = 2; + if (has_value()) { + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->value().data(), this->value().length(), + ::google::protobuf::internal::WireFormat::SERIALIZE, + "value"); + ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased( + 2, this->value(), output); + } + + if (!unknown_fields().empty()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:io.prometheus.client.LabelPair) +} + +::google::protobuf::uint8* LabelPair::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.LabelPair) + // optional string name = 1; + if (has_name()) { + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->name().data(), this->name().length(), + ::google::protobuf::internal::WireFormat::SERIALIZE, + "name"); + target = + ::google::protobuf::internal::WireFormatLite::WriteStringToArray( + 1, this->name(), target); + } + + // optional string value = 2; + if (has_value()) { + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->value().data(), this->value().length(), + ::google::protobuf::internal::WireFormat::SERIALIZE, + "value"); + target = + ::google::protobuf::internal::WireFormatLite::WriteStringToArray( + 2, this->value(), target); + } + + if (!unknown_fields().empty()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.LabelPair) + return target; +} + +int LabelPair::ByteSize() const { + int total_size = 0; + + if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) { + // optional string name = 1; + if (has_name()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::StringSize( + this->name()); + } + + // optional string value = 2; + if (has_value()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::StringSize( + this->value()); + } + + } + if (!unknown_fields().empty()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + unknown_fields()); + } + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = total_size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); + return total_size; +} + +void LabelPair::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const LabelPair* source = + ::google::protobuf::internal::dynamic_cast_if_available( + &from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void LabelPair::MergeFrom(const LabelPair& from) { + GOOGLE_CHECK_NE(&from, this); + if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { + if (from.has_name()) { + set_name(from.name()); + } + if (from.has_value()) { + set_value(from.value()); + } + } + mutable_unknown_fields()->MergeFrom(from.unknown_fields()); +} + +void LabelPair::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void LabelPair::CopyFrom(const LabelPair& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool LabelPair::IsInitialized() const { + + return true; +} + +void LabelPair::Swap(LabelPair* other) { + if (other != this) { + std::swap(name_, other->name_); + std::swap(value_, other->value_); + std::swap(_has_bits_[0], other->_has_bits_[0]); + _unknown_fields_.Swap(&other->_unknown_fields_); + std::swap(_cached_size_, other->_cached_size_); + } +} + +::google::protobuf::Metadata LabelPair::GetMetadata() const { + protobuf_AssignDescriptorsOnce(); + ::google::protobuf::Metadata metadata; + metadata.descriptor = LabelPair_descriptor_; + metadata.reflection = LabelPair_reflection_; + return metadata; +} + + +// =================================================================== + +#ifndef _MSC_VER +const int Gauge::kValueFieldNumber; +#endif // !_MSC_VER + +Gauge::Gauge() + : ::google::protobuf::Message() { + SharedCtor(); + // @@protoc_insertion_point(constructor:io.prometheus.client.Gauge) +} + +void Gauge::InitAsDefaultInstance() { +} + +Gauge::Gauge(const Gauge& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); + // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Gauge) +} + +void Gauge::SharedCtor() { + _cached_size_ = 0; + value_ = 0; + ::memset(_has_bits_, 0, sizeof(_has_bits_)); +} + +Gauge::~Gauge() { + // @@protoc_insertion_point(destructor:io.prometheus.client.Gauge) + SharedDtor(); +} + +void Gauge::SharedDtor() { + if (this != default_instance_) { + } +} + +void Gauge::SetCachedSize(int size) const { + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); +} +const ::google::protobuf::Descriptor* Gauge::descriptor() { + protobuf_AssignDescriptorsOnce(); + return Gauge_descriptor_; +} + +const Gauge& Gauge::default_instance() { + if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto(); + return *default_instance_; +} + +Gauge* Gauge::default_instance_ = NULL; + +Gauge* Gauge::New() const { + return new Gauge; +} + +void Gauge::Clear() { + value_ = 0; + ::memset(_has_bits_, 0, sizeof(_has_bits_)); + mutable_unknown_fields()->Clear(); +} + +bool Gauge::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:io.prometheus.client.Gauge) + for (;;) { + ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // optional double value = 1; + case 1: { + if (tag == 9) { + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>( + input, &value_))); + set_has_value(); + } else { + goto handle_unusual; + } + if (input->ExpectAtEnd()) goto success; + break; + } + + default: { + handle_unusual: + if (tag == 0 || + ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == + ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:io.prometheus.client.Gauge) + return true; +failure: + // @@protoc_insertion_point(parse_failure:io.prometheus.client.Gauge) + return false; +#undef DO_ +} + +void Gauge::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:io.prometheus.client.Gauge) + // optional double value = 1; + if (has_value()) { + ::google::protobuf::internal::WireFormatLite::WriteDouble(1, this->value(), output); + } + + if (!unknown_fields().empty()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:io.prometheus.client.Gauge) +} + +::google::protobuf::uint8* Gauge::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Gauge) + // optional double value = 1; + if (has_value()) { + target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(1, this->value(), target); + } + + if (!unknown_fields().empty()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Gauge) + return target; +} + +int Gauge::ByteSize() const { + int total_size = 0; + + if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) { + // optional double value = 1; + if (has_value()) { + total_size += 1 + 8; + } + + } + if (!unknown_fields().empty()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + unknown_fields()); + } + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = total_size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); + return total_size; +} + +void Gauge::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const Gauge* source = + ::google::protobuf::internal::dynamic_cast_if_available( + &from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void Gauge::MergeFrom(const Gauge& from) { + GOOGLE_CHECK_NE(&from, this); + if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { + if (from.has_value()) { + set_value(from.value()); + } + } + mutable_unknown_fields()->MergeFrom(from.unknown_fields()); +} + +void Gauge::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void Gauge::CopyFrom(const Gauge& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Gauge::IsInitialized() const { + + return true; +} + +void Gauge::Swap(Gauge* other) { + if (other != this) { + std::swap(value_, other->value_); + std::swap(_has_bits_[0], other->_has_bits_[0]); + _unknown_fields_.Swap(&other->_unknown_fields_); + std::swap(_cached_size_, other->_cached_size_); + } +} + +::google::protobuf::Metadata Gauge::GetMetadata() const { + protobuf_AssignDescriptorsOnce(); + ::google::protobuf::Metadata metadata; + metadata.descriptor = Gauge_descriptor_; + metadata.reflection = Gauge_reflection_; + return metadata; +} + + +// =================================================================== + +#ifndef _MSC_VER +const int Counter::kValueFieldNumber; +#endif // !_MSC_VER + +Counter::Counter() + : ::google::protobuf::Message() { + SharedCtor(); + // @@protoc_insertion_point(constructor:io.prometheus.client.Counter) +} + +void Counter::InitAsDefaultInstance() { +} + +Counter::Counter(const Counter& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); + // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Counter) +} + +void Counter::SharedCtor() { + _cached_size_ = 0; + value_ = 0; + ::memset(_has_bits_, 0, sizeof(_has_bits_)); +} + +Counter::~Counter() { + // @@protoc_insertion_point(destructor:io.prometheus.client.Counter) + SharedDtor(); +} + +void Counter::SharedDtor() { + if (this != default_instance_) { + } +} + +void Counter::SetCachedSize(int size) const { + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); +} +const ::google::protobuf::Descriptor* Counter::descriptor() { + protobuf_AssignDescriptorsOnce(); + return Counter_descriptor_; +} + +const Counter& Counter::default_instance() { + if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto(); + return *default_instance_; +} + +Counter* Counter::default_instance_ = NULL; + +Counter* Counter::New() const { + return new Counter; +} + +void Counter::Clear() { + value_ = 0; + ::memset(_has_bits_, 0, sizeof(_has_bits_)); + mutable_unknown_fields()->Clear(); +} + +bool Counter::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:io.prometheus.client.Counter) + for (;;) { + ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // optional double value = 1; + case 1: { + if (tag == 9) { + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>( + input, &value_))); + set_has_value(); + } else { + goto handle_unusual; + } + if (input->ExpectAtEnd()) goto success; + break; + } + + default: { + handle_unusual: + if (tag == 0 || + ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == + ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:io.prometheus.client.Counter) + return true; +failure: + // @@protoc_insertion_point(parse_failure:io.prometheus.client.Counter) + return false; +#undef DO_ +} + +void Counter::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:io.prometheus.client.Counter) + // optional double value = 1; + if (has_value()) { + ::google::protobuf::internal::WireFormatLite::WriteDouble(1, this->value(), output); + } + + if (!unknown_fields().empty()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:io.prometheus.client.Counter) +} + +::google::protobuf::uint8* Counter::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Counter) + // optional double value = 1; + if (has_value()) { + target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(1, this->value(), target); + } + + if (!unknown_fields().empty()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Counter) + return target; +} + +int Counter::ByteSize() const { + int total_size = 0; + + if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) { + // optional double value = 1; + if (has_value()) { + total_size += 1 + 8; + } + + } + if (!unknown_fields().empty()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + unknown_fields()); + } + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = total_size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); + return total_size; +} + +void Counter::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const Counter* source = + ::google::protobuf::internal::dynamic_cast_if_available( + &from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void Counter::MergeFrom(const Counter& from) { + GOOGLE_CHECK_NE(&from, this); + if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { + if (from.has_value()) { + set_value(from.value()); + } + } + mutable_unknown_fields()->MergeFrom(from.unknown_fields()); +} + +void Counter::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void Counter::CopyFrom(const Counter& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Counter::IsInitialized() const { + + return true; +} + +void Counter::Swap(Counter* other) { + if (other != this) { + std::swap(value_, other->value_); + std::swap(_has_bits_[0], other->_has_bits_[0]); + _unknown_fields_.Swap(&other->_unknown_fields_); + std::swap(_cached_size_, other->_cached_size_); + } +} + +::google::protobuf::Metadata Counter::GetMetadata() const { + protobuf_AssignDescriptorsOnce(); + ::google::protobuf::Metadata metadata; + metadata.descriptor = Counter_descriptor_; + metadata.reflection = Counter_reflection_; + return metadata; +} + + +// =================================================================== + +#ifndef _MSC_VER +const int Quantile::kQuantileFieldNumber; +const int Quantile::kValueFieldNumber; +#endif // !_MSC_VER + +Quantile::Quantile() + : ::google::protobuf::Message() { + SharedCtor(); + // @@protoc_insertion_point(constructor:io.prometheus.client.Quantile) +} + +void Quantile::InitAsDefaultInstance() { +} + +Quantile::Quantile(const Quantile& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); + // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Quantile) +} + +void Quantile::SharedCtor() { + _cached_size_ = 0; + quantile_ = 0; + value_ = 0; + ::memset(_has_bits_, 0, sizeof(_has_bits_)); +} + +Quantile::~Quantile() { + // @@protoc_insertion_point(destructor:io.prometheus.client.Quantile) + SharedDtor(); +} + +void Quantile::SharedDtor() { + if (this != default_instance_) { + } +} + +void Quantile::SetCachedSize(int size) const { + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); +} +const ::google::protobuf::Descriptor* Quantile::descriptor() { + protobuf_AssignDescriptorsOnce(); + return Quantile_descriptor_; +} + +const Quantile& Quantile::default_instance() { + if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto(); + return *default_instance_; +} + +Quantile* Quantile::default_instance_ = NULL; + +Quantile* Quantile::New() const { + return new Quantile; +} + +void Quantile::Clear() { +#define OFFSET_OF_FIELD_(f) (reinterpret_cast( \ + &reinterpret_cast(16)->f) - \ + reinterpret_cast(16)) + +#define ZR_(first, last) do { \ + size_t f = OFFSET_OF_FIELD_(first); \ + size_t n = OFFSET_OF_FIELD_(last) - f + sizeof(last); \ + ::memset(&first, 0, n); \ + } while (0) + + ZR_(quantile_, value_); + +#undef OFFSET_OF_FIELD_ +#undef ZR_ + + ::memset(_has_bits_, 0, sizeof(_has_bits_)); + mutable_unknown_fields()->Clear(); +} + +bool Quantile::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:io.prometheus.client.Quantile) + for (;;) { + ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // optional double quantile = 1; + case 1: { + if (tag == 9) { + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>( + input, &quantile_))); + set_has_quantile(); + } else { + goto handle_unusual; + } + if (input->ExpectTag(17)) goto parse_value; + break; + } + + // optional double value = 2; + case 2: { + if (tag == 17) { + parse_value: + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>( + input, &value_))); + set_has_value(); + } else { + goto handle_unusual; + } + if (input->ExpectAtEnd()) goto success; + break; + } + + default: { + handle_unusual: + if (tag == 0 || + ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == + ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:io.prometheus.client.Quantile) + return true; +failure: + // @@protoc_insertion_point(parse_failure:io.prometheus.client.Quantile) + return false; +#undef DO_ +} + +void Quantile::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:io.prometheus.client.Quantile) + // optional double quantile = 1; + if (has_quantile()) { + ::google::protobuf::internal::WireFormatLite::WriteDouble(1, this->quantile(), output); + } + + // optional double value = 2; + if (has_value()) { + ::google::protobuf::internal::WireFormatLite::WriteDouble(2, this->value(), output); + } + + if (!unknown_fields().empty()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:io.prometheus.client.Quantile) +} + +::google::protobuf::uint8* Quantile::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Quantile) + // optional double quantile = 1; + if (has_quantile()) { + target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(1, this->quantile(), target); + } + + // optional double value = 2; + if (has_value()) { + target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(2, this->value(), target); + } + + if (!unknown_fields().empty()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Quantile) + return target; +} + +int Quantile::ByteSize() const { + int total_size = 0; + + if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) { + // optional double quantile = 1; + if (has_quantile()) { + total_size += 1 + 8; + } + + // optional double value = 2; + if (has_value()) { + total_size += 1 + 8; + } + + } + if (!unknown_fields().empty()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + unknown_fields()); + } + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = total_size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); + return total_size; +} + +void Quantile::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const Quantile* source = + ::google::protobuf::internal::dynamic_cast_if_available( + &from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void Quantile::MergeFrom(const Quantile& from) { + GOOGLE_CHECK_NE(&from, this); + if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { + if (from.has_quantile()) { + set_quantile(from.quantile()); + } + if (from.has_value()) { + set_value(from.value()); + } + } + mutable_unknown_fields()->MergeFrom(from.unknown_fields()); +} + +void Quantile::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void Quantile::CopyFrom(const Quantile& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Quantile::IsInitialized() const { + + return true; +} + +void Quantile::Swap(Quantile* other) { + if (other != this) { + std::swap(quantile_, other->quantile_); + std::swap(value_, other->value_); + std::swap(_has_bits_[0], other->_has_bits_[0]); + _unknown_fields_.Swap(&other->_unknown_fields_); + std::swap(_cached_size_, other->_cached_size_); + } +} + +::google::protobuf::Metadata Quantile::GetMetadata() const { + protobuf_AssignDescriptorsOnce(); + ::google::protobuf::Metadata metadata; + metadata.descriptor = Quantile_descriptor_; + metadata.reflection = Quantile_reflection_; + return metadata; +} + + +// =================================================================== + +#ifndef _MSC_VER +const int Summary::kSampleCountFieldNumber; +const int Summary::kSampleSumFieldNumber; +const int Summary::kQuantileFieldNumber; +#endif // !_MSC_VER + +Summary::Summary() + : ::google::protobuf::Message() { + SharedCtor(); + // @@protoc_insertion_point(constructor:io.prometheus.client.Summary) +} + +void Summary::InitAsDefaultInstance() { +} + +Summary::Summary(const Summary& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); + // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Summary) +} + +void Summary::SharedCtor() { + _cached_size_ = 0; + sample_count_ = GOOGLE_ULONGLONG(0); + sample_sum_ = 0; + ::memset(_has_bits_, 0, sizeof(_has_bits_)); +} + +Summary::~Summary() { + // @@protoc_insertion_point(destructor:io.prometheus.client.Summary) + SharedDtor(); +} + +void Summary::SharedDtor() { + if (this != default_instance_) { + } +} + +void Summary::SetCachedSize(int size) const { + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); +} +const ::google::protobuf::Descriptor* Summary::descriptor() { + protobuf_AssignDescriptorsOnce(); + return Summary_descriptor_; +} + +const Summary& Summary::default_instance() { + if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto(); + return *default_instance_; +} + +Summary* Summary::default_instance_ = NULL; + +Summary* Summary::New() const { + return new Summary; +} + +void Summary::Clear() { +#define OFFSET_OF_FIELD_(f) (reinterpret_cast( \ + &reinterpret_cast(16)->f) - \ + reinterpret_cast(16)) + +#define ZR_(first, last) do { \ + size_t f = OFFSET_OF_FIELD_(first); \ + size_t n = OFFSET_OF_FIELD_(last) - f + sizeof(last); \ + ::memset(&first, 0, n); \ + } while (0) + + ZR_(sample_count_, sample_sum_); + +#undef OFFSET_OF_FIELD_ +#undef ZR_ + + quantile_.Clear(); + ::memset(_has_bits_, 0, sizeof(_has_bits_)); + mutable_unknown_fields()->Clear(); +} + +bool Summary::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:io.prometheus.client.Summary) + for (;;) { + ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // optional uint64 sample_count = 1; + case 1: { + if (tag == 8) { + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::uint64, ::google::protobuf::internal::WireFormatLite::TYPE_UINT64>( + input, &sample_count_))); + set_has_sample_count(); + } else { + goto handle_unusual; + } + if (input->ExpectTag(17)) goto parse_sample_sum; + break; + } + + // optional double sample_sum = 2; + case 2: { + if (tag == 17) { + parse_sample_sum: + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>( + input, &sample_sum_))); + set_has_sample_sum(); + } else { + goto handle_unusual; + } + if (input->ExpectTag(26)) goto parse_quantile; + break; + } + + // repeated .io.prometheus.client.Quantile quantile = 3; + case 3: { + if (tag == 26) { + parse_quantile: + DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual( + input, add_quantile())); + } else { + goto handle_unusual; + } + if (input->ExpectTag(26)) goto parse_quantile; + if (input->ExpectAtEnd()) goto success; + break; + } + + default: { + handle_unusual: + if (tag == 0 || + ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == + ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:io.prometheus.client.Summary) + return true; +failure: + // @@protoc_insertion_point(parse_failure:io.prometheus.client.Summary) + return false; +#undef DO_ +} + +void Summary::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:io.prometheus.client.Summary) + // optional uint64 sample_count = 1; + if (has_sample_count()) { + ::google::protobuf::internal::WireFormatLite::WriteUInt64(1, this->sample_count(), output); + } + + // optional double sample_sum = 2; + if (has_sample_sum()) { + ::google::protobuf::internal::WireFormatLite::WriteDouble(2, this->sample_sum(), output); + } + + // repeated .io.prometheus.client.Quantile quantile = 3; + for (int i = 0; i < this->quantile_size(); i++) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 3, this->quantile(i), output); + } + + if (!unknown_fields().empty()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:io.prometheus.client.Summary) +} + +::google::protobuf::uint8* Summary::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Summary) + // optional uint64 sample_count = 1; + if (has_sample_count()) { + target = ::google::protobuf::internal::WireFormatLite::WriteUInt64ToArray(1, this->sample_count(), target); + } + + // optional double sample_sum = 2; + if (has_sample_sum()) { + target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(2, this->sample_sum(), target); + } + + // repeated .io.prometheus.client.Quantile quantile = 3; + for (int i = 0; i < this->quantile_size(); i++) { + target = ::google::protobuf::internal::WireFormatLite:: + WriteMessageNoVirtualToArray( + 3, this->quantile(i), target); + } + + if (!unknown_fields().empty()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Summary) + return target; +} + +int Summary::ByteSize() const { + int total_size = 0; + + if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) { + // optional uint64 sample_count = 1; + if (has_sample_count()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::UInt64Size( + this->sample_count()); + } + + // optional double sample_sum = 2; + if (has_sample_sum()) { + total_size += 1 + 8; + } + + } + // repeated .io.prometheus.client.Quantile quantile = 3; + total_size += 1 * this->quantile_size(); + for (int i = 0; i < this->quantile_size(); i++) { + total_size += + ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( + this->quantile(i)); + } + + if (!unknown_fields().empty()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + unknown_fields()); + } + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = total_size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); + return total_size; +} + +void Summary::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const Summary* source = + ::google::protobuf::internal::dynamic_cast_if_available( + &from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void Summary::MergeFrom(const Summary& from) { + GOOGLE_CHECK_NE(&from, this); + quantile_.MergeFrom(from.quantile_); + if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { + if (from.has_sample_count()) { + set_sample_count(from.sample_count()); + } + if (from.has_sample_sum()) { + set_sample_sum(from.sample_sum()); + } + } + mutable_unknown_fields()->MergeFrom(from.unknown_fields()); +} + +void Summary::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void Summary::CopyFrom(const Summary& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Summary::IsInitialized() const { + + return true; +} + +void Summary::Swap(Summary* other) { + if (other != this) { + std::swap(sample_count_, other->sample_count_); + std::swap(sample_sum_, other->sample_sum_); + quantile_.Swap(&other->quantile_); + std::swap(_has_bits_[0], other->_has_bits_[0]); + _unknown_fields_.Swap(&other->_unknown_fields_); + std::swap(_cached_size_, other->_cached_size_); + } +} + +::google::protobuf::Metadata Summary::GetMetadata() const { + protobuf_AssignDescriptorsOnce(); + ::google::protobuf::Metadata metadata; + metadata.descriptor = Summary_descriptor_; + metadata.reflection = Summary_reflection_; + return metadata; +} + + +// =================================================================== + +#ifndef _MSC_VER +const int Untyped::kValueFieldNumber; +#endif // !_MSC_VER + +Untyped::Untyped() + : ::google::protobuf::Message() { + SharedCtor(); + // @@protoc_insertion_point(constructor:io.prometheus.client.Untyped) +} + +void Untyped::InitAsDefaultInstance() { +} + +Untyped::Untyped(const Untyped& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); + // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Untyped) +} + +void Untyped::SharedCtor() { + _cached_size_ = 0; + value_ = 0; + ::memset(_has_bits_, 0, sizeof(_has_bits_)); +} + +Untyped::~Untyped() { + // @@protoc_insertion_point(destructor:io.prometheus.client.Untyped) + SharedDtor(); +} + +void Untyped::SharedDtor() { + if (this != default_instance_) { + } +} + +void Untyped::SetCachedSize(int size) const { + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); +} +const ::google::protobuf::Descriptor* Untyped::descriptor() { + protobuf_AssignDescriptorsOnce(); + return Untyped_descriptor_; +} + +const Untyped& Untyped::default_instance() { + if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto(); + return *default_instance_; +} + +Untyped* Untyped::default_instance_ = NULL; + +Untyped* Untyped::New() const { + return new Untyped; +} + +void Untyped::Clear() { + value_ = 0; + ::memset(_has_bits_, 0, sizeof(_has_bits_)); + mutable_unknown_fields()->Clear(); +} + +bool Untyped::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:io.prometheus.client.Untyped) + for (;;) { + ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // optional double value = 1; + case 1: { + if (tag == 9) { + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>( + input, &value_))); + set_has_value(); + } else { + goto handle_unusual; + } + if (input->ExpectAtEnd()) goto success; + break; + } + + default: { + handle_unusual: + if (tag == 0 || + ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == + ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:io.prometheus.client.Untyped) + return true; +failure: + // @@protoc_insertion_point(parse_failure:io.prometheus.client.Untyped) + return false; +#undef DO_ +} + +void Untyped::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:io.prometheus.client.Untyped) + // optional double value = 1; + if (has_value()) { + ::google::protobuf::internal::WireFormatLite::WriteDouble(1, this->value(), output); + } + + if (!unknown_fields().empty()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:io.prometheus.client.Untyped) +} + +::google::protobuf::uint8* Untyped::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Untyped) + // optional double value = 1; + if (has_value()) { + target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(1, this->value(), target); + } + + if (!unknown_fields().empty()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Untyped) + return target; +} + +int Untyped::ByteSize() const { + int total_size = 0; + + if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) { + // optional double value = 1; + if (has_value()) { + total_size += 1 + 8; + } + + } + if (!unknown_fields().empty()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + unknown_fields()); + } + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = total_size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); + return total_size; +} + +void Untyped::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const Untyped* source = + ::google::protobuf::internal::dynamic_cast_if_available( + &from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void Untyped::MergeFrom(const Untyped& from) { + GOOGLE_CHECK_NE(&from, this); + if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { + if (from.has_value()) { + set_value(from.value()); + } + } + mutable_unknown_fields()->MergeFrom(from.unknown_fields()); +} + +void Untyped::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void Untyped::CopyFrom(const Untyped& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Untyped::IsInitialized() const { + + return true; +} + +void Untyped::Swap(Untyped* other) { + if (other != this) { + std::swap(value_, other->value_); + std::swap(_has_bits_[0], other->_has_bits_[0]); + _unknown_fields_.Swap(&other->_unknown_fields_); + std::swap(_cached_size_, other->_cached_size_); + } +} + +::google::protobuf::Metadata Untyped::GetMetadata() const { + protobuf_AssignDescriptorsOnce(); + ::google::protobuf::Metadata metadata; + metadata.descriptor = Untyped_descriptor_; + metadata.reflection = Untyped_reflection_; + return metadata; +} + + +// =================================================================== + +#ifndef _MSC_VER +const int Histogram::kSampleCountFieldNumber; +const int Histogram::kSampleSumFieldNumber; +const int Histogram::kBucketFieldNumber; +#endif // !_MSC_VER + +Histogram::Histogram() + : ::google::protobuf::Message() { + SharedCtor(); + // @@protoc_insertion_point(constructor:io.prometheus.client.Histogram) +} + +void Histogram::InitAsDefaultInstance() { +} + +Histogram::Histogram(const Histogram& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); + // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Histogram) +} + +void Histogram::SharedCtor() { + _cached_size_ = 0; + sample_count_ = GOOGLE_ULONGLONG(0); + sample_sum_ = 0; + ::memset(_has_bits_, 0, sizeof(_has_bits_)); +} + +Histogram::~Histogram() { + // @@protoc_insertion_point(destructor:io.prometheus.client.Histogram) + SharedDtor(); +} + +void Histogram::SharedDtor() { + if (this != default_instance_) { + } +} + +void Histogram::SetCachedSize(int size) const { + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); +} +const ::google::protobuf::Descriptor* Histogram::descriptor() { + protobuf_AssignDescriptorsOnce(); + return Histogram_descriptor_; +} + +const Histogram& Histogram::default_instance() { + if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto(); + return *default_instance_; +} + +Histogram* Histogram::default_instance_ = NULL; + +Histogram* Histogram::New() const { + return new Histogram; +} + +void Histogram::Clear() { +#define OFFSET_OF_FIELD_(f) (reinterpret_cast( \ + &reinterpret_cast(16)->f) - \ + reinterpret_cast(16)) + +#define ZR_(first, last) do { \ + size_t f = OFFSET_OF_FIELD_(first); \ + size_t n = OFFSET_OF_FIELD_(last) - f + sizeof(last); \ + ::memset(&first, 0, n); \ + } while (0) + + ZR_(sample_count_, sample_sum_); + +#undef OFFSET_OF_FIELD_ +#undef ZR_ + + bucket_.Clear(); + ::memset(_has_bits_, 0, sizeof(_has_bits_)); + mutable_unknown_fields()->Clear(); +} + +bool Histogram::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:io.prometheus.client.Histogram) + for (;;) { + ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // optional uint64 sample_count = 1; + case 1: { + if (tag == 8) { + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::uint64, ::google::protobuf::internal::WireFormatLite::TYPE_UINT64>( + input, &sample_count_))); + set_has_sample_count(); + } else { + goto handle_unusual; + } + if (input->ExpectTag(17)) goto parse_sample_sum; + break; + } + + // optional double sample_sum = 2; + case 2: { + if (tag == 17) { + parse_sample_sum: + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>( + input, &sample_sum_))); + set_has_sample_sum(); + } else { + goto handle_unusual; + } + if (input->ExpectTag(26)) goto parse_bucket; + break; + } + + // repeated .io.prometheus.client.Bucket bucket = 3; + case 3: { + if (tag == 26) { + parse_bucket: + DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual( + input, add_bucket())); + } else { + goto handle_unusual; + } + if (input->ExpectTag(26)) goto parse_bucket; + if (input->ExpectAtEnd()) goto success; + break; + } + + default: { + handle_unusual: + if (tag == 0 || + ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == + ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:io.prometheus.client.Histogram) + return true; +failure: + // @@protoc_insertion_point(parse_failure:io.prometheus.client.Histogram) + return false; +#undef DO_ +} + +void Histogram::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:io.prometheus.client.Histogram) + // optional uint64 sample_count = 1; + if (has_sample_count()) { + ::google::protobuf::internal::WireFormatLite::WriteUInt64(1, this->sample_count(), output); + } + + // optional double sample_sum = 2; + if (has_sample_sum()) { + ::google::protobuf::internal::WireFormatLite::WriteDouble(2, this->sample_sum(), output); + } + + // repeated .io.prometheus.client.Bucket bucket = 3; + for (int i = 0; i < this->bucket_size(); i++) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 3, this->bucket(i), output); + } + + if (!unknown_fields().empty()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:io.prometheus.client.Histogram) +} + +::google::protobuf::uint8* Histogram::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Histogram) + // optional uint64 sample_count = 1; + if (has_sample_count()) { + target = ::google::protobuf::internal::WireFormatLite::WriteUInt64ToArray(1, this->sample_count(), target); + } + + // optional double sample_sum = 2; + if (has_sample_sum()) { + target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(2, this->sample_sum(), target); + } + + // repeated .io.prometheus.client.Bucket bucket = 3; + for (int i = 0; i < this->bucket_size(); i++) { + target = ::google::protobuf::internal::WireFormatLite:: + WriteMessageNoVirtualToArray( + 3, this->bucket(i), target); + } + + if (!unknown_fields().empty()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Histogram) + return target; +} + +int Histogram::ByteSize() const { + int total_size = 0; + + if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) { + // optional uint64 sample_count = 1; + if (has_sample_count()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::UInt64Size( + this->sample_count()); + } + + // optional double sample_sum = 2; + if (has_sample_sum()) { + total_size += 1 + 8; + } + + } + // repeated .io.prometheus.client.Bucket bucket = 3; + total_size += 1 * this->bucket_size(); + for (int i = 0; i < this->bucket_size(); i++) { + total_size += + ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( + this->bucket(i)); + } + + if (!unknown_fields().empty()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + unknown_fields()); + } + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = total_size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); + return total_size; +} + +void Histogram::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const Histogram* source = + ::google::protobuf::internal::dynamic_cast_if_available( + &from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void Histogram::MergeFrom(const Histogram& from) { + GOOGLE_CHECK_NE(&from, this); + bucket_.MergeFrom(from.bucket_); + if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { + if (from.has_sample_count()) { + set_sample_count(from.sample_count()); + } + if (from.has_sample_sum()) { + set_sample_sum(from.sample_sum()); + } + } + mutable_unknown_fields()->MergeFrom(from.unknown_fields()); +} + +void Histogram::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void Histogram::CopyFrom(const Histogram& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Histogram::IsInitialized() const { + + return true; +} + +void Histogram::Swap(Histogram* other) { + if (other != this) { + std::swap(sample_count_, other->sample_count_); + std::swap(sample_sum_, other->sample_sum_); + bucket_.Swap(&other->bucket_); + std::swap(_has_bits_[0], other->_has_bits_[0]); + _unknown_fields_.Swap(&other->_unknown_fields_); + std::swap(_cached_size_, other->_cached_size_); + } +} + +::google::protobuf::Metadata Histogram::GetMetadata() const { + protobuf_AssignDescriptorsOnce(); + ::google::protobuf::Metadata metadata; + metadata.descriptor = Histogram_descriptor_; + metadata.reflection = Histogram_reflection_; + return metadata; +} + + +// =================================================================== + +#ifndef _MSC_VER +const int Bucket::kCumulativeCountFieldNumber; +const int Bucket::kUpperBoundFieldNumber; +#endif // !_MSC_VER + +Bucket::Bucket() + : ::google::protobuf::Message() { + SharedCtor(); + // @@protoc_insertion_point(constructor:io.prometheus.client.Bucket) +} + +void Bucket::InitAsDefaultInstance() { +} + +Bucket::Bucket(const Bucket& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); + // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Bucket) +} + +void Bucket::SharedCtor() { + _cached_size_ = 0; + cumulative_count_ = GOOGLE_ULONGLONG(0); + upper_bound_ = 0; + ::memset(_has_bits_, 0, sizeof(_has_bits_)); +} + +Bucket::~Bucket() { + // @@protoc_insertion_point(destructor:io.prometheus.client.Bucket) + SharedDtor(); +} + +void Bucket::SharedDtor() { + if (this != default_instance_) { + } +} + +void Bucket::SetCachedSize(int size) const { + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); +} +const ::google::protobuf::Descriptor* Bucket::descriptor() { + protobuf_AssignDescriptorsOnce(); + return Bucket_descriptor_; +} + +const Bucket& Bucket::default_instance() { + if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto(); + return *default_instance_; +} + +Bucket* Bucket::default_instance_ = NULL; + +Bucket* Bucket::New() const { + return new Bucket; +} + +void Bucket::Clear() { +#define OFFSET_OF_FIELD_(f) (reinterpret_cast( \ + &reinterpret_cast(16)->f) - \ + reinterpret_cast(16)) + +#define ZR_(first, last) do { \ + size_t f = OFFSET_OF_FIELD_(first); \ + size_t n = OFFSET_OF_FIELD_(last) - f + sizeof(last); \ + ::memset(&first, 0, n); \ + } while (0) + + ZR_(cumulative_count_, upper_bound_); + +#undef OFFSET_OF_FIELD_ +#undef ZR_ + + ::memset(_has_bits_, 0, sizeof(_has_bits_)); + mutable_unknown_fields()->Clear(); +} + +bool Bucket::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:io.prometheus.client.Bucket) + for (;;) { + ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // optional uint64 cumulative_count = 1; + case 1: { + if (tag == 8) { + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::uint64, ::google::protobuf::internal::WireFormatLite::TYPE_UINT64>( + input, &cumulative_count_))); + set_has_cumulative_count(); + } else { + goto handle_unusual; + } + if (input->ExpectTag(17)) goto parse_upper_bound; + break; + } + + // optional double upper_bound = 2; + case 2: { + if (tag == 17) { + parse_upper_bound: + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + double, ::google::protobuf::internal::WireFormatLite::TYPE_DOUBLE>( + input, &upper_bound_))); + set_has_upper_bound(); + } else { + goto handle_unusual; + } + if (input->ExpectAtEnd()) goto success; + break; + } + + default: { + handle_unusual: + if (tag == 0 || + ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == + ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:io.prometheus.client.Bucket) + return true; +failure: + // @@protoc_insertion_point(parse_failure:io.prometheus.client.Bucket) + return false; +#undef DO_ +} + +void Bucket::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:io.prometheus.client.Bucket) + // optional uint64 cumulative_count = 1; + if (has_cumulative_count()) { + ::google::protobuf::internal::WireFormatLite::WriteUInt64(1, this->cumulative_count(), output); + } + + // optional double upper_bound = 2; + if (has_upper_bound()) { + ::google::protobuf::internal::WireFormatLite::WriteDouble(2, this->upper_bound(), output); + } + + if (!unknown_fields().empty()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:io.prometheus.client.Bucket) +} + +::google::protobuf::uint8* Bucket::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Bucket) + // optional uint64 cumulative_count = 1; + if (has_cumulative_count()) { + target = ::google::protobuf::internal::WireFormatLite::WriteUInt64ToArray(1, this->cumulative_count(), target); + } + + // optional double upper_bound = 2; + if (has_upper_bound()) { + target = ::google::protobuf::internal::WireFormatLite::WriteDoubleToArray(2, this->upper_bound(), target); + } + + if (!unknown_fields().empty()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Bucket) + return target; +} + +int Bucket::ByteSize() const { + int total_size = 0; + + if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) { + // optional uint64 cumulative_count = 1; + if (has_cumulative_count()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::UInt64Size( + this->cumulative_count()); + } + + // optional double upper_bound = 2; + if (has_upper_bound()) { + total_size += 1 + 8; + } + + } + if (!unknown_fields().empty()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + unknown_fields()); + } + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = total_size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); + return total_size; +} + +void Bucket::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const Bucket* source = + ::google::protobuf::internal::dynamic_cast_if_available( + &from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void Bucket::MergeFrom(const Bucket& from) { + GOOGLE_CHECK_NE(&from, this); + if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { + if (from.has_cumulative_count()) { + set_cumulative_count(from.cumulative_count()); + } + if (from.has_upper_bound()) { + set_upper_bound(from.upper_bound()); + } + } + mutable_unknown_fields()->MergeFrom(from.unknown_fields()); +} + +void Bucket::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void Bucket::CopyFrom(const Bucket& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Bucket::IsInitialized() const { + + return true; +} + +void Bucket::Swap(Bucket* other) { + if (other != this) { + std::swap(cumulative_count_, other->cumulative_count_); + std::swap(upper_bound_, other->upper_bound_); + std::swap(_has_bits_[0], other->_has_bits_[0]); + _unknown_fields_.Swap(&other->_unknown_fields_); + std::swap(_cached_size_, other->_cached_size_); + } +} + +::google::protobuf::Metadata Bucket::GetMetadata() const { + protobuf_AssignDescriptorsOnce(); + ::google::protobuf::Metadata metadata; + metadata.descriptor = Bucket_descriptor_; + metadata.reflection = Bucket_reflection_; + return metadata; +} + + +// =================================================================== + +#ifndef _MSC_VER +const int Metric::kLabelFieldNumber; +const int Metric::kGaugeFieldNumber; +const int Metric::kCounterFieldNumber; +const int Metric::kSummaryFieldNumber; +const int Metric::kUntypedFieldNumber; +const int Metric::kHistogramFieldNumber; +const int Metric::kTimestampMsFieldNumber; +#endif // !_MSC_VER + +Metric::Metric() + : ::google::protobuf::Message() { + SharedCtor(); + // @@protoc_insertion_point(constructor:io.prometheus.client.Metric) +} + +void Metric::InitAsDefaultInstance() { + gauge_ = const_cast< ::io::prometheus::client::Gauge*>(&::io::prometheus::client::Gauge::default_instance()); + counter_ = const_cast< ::io::prometheus::client::Counter*>(&::io::prometheus::client::Counter::default_instance()); + summary_ = const_cast< ::io::prometheus::client::Summary*>(&::io::prometheus::client::Summary::default_instance()); + untyped_ = const_cast< ::io::prometheus::client::Untyped*>(&::io::prometheus::client::Untyped::default_instance()); + histogram_ = const_cast< ::io::prometheus::client::Histogram*>(&::io::prometheus::client::Histogram::default_instance()); +} + +Metric::Metric(const Metric& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); + // @@protoc_insertion_point(copy_constructor:io.prometheus.client.Metric) +} + +void Metric::SharedCtor() { + _cached_size_ = 0; + gauge_ = NULL; + counter_ = NULL; + summary_ = NULL; + untyped_ = NULL; + histogram_ = NULL; + timestamp_ms_ = GOOGLE_LONGLONG(0); + ::memset(_has_bits_, 0, sizeof(_has_bits_)); +} + +Metric::~Metric() { + // @@protoc_insertion_point(destructor:io.prometheus.client.Metric) + SharedDtor(); +} + +void Metric::SharedDtor() { + if (this != default_instance_) { + delete gauge_; + delete counter_; + delete summary_; + delete untyped_; + delete histogram_; + } +} + +void Metric::SetCachedSize(int size) const { + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); +} +const ::google::protobuf::Descriptor* Metric::descriptor() { + protobuf_AssignDescriptorsOnce(); + return Metric_descriptor_; +} + +const Metric& Metric::default_instance() { + if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto(); + return *default_instance_; +} + +Metric* Metric::default_instance_ = NULL; + +Metric* Metric::New() const { + return new Metric; +} + +void Metric::Clear() { + if (_has_bits_[0 / 32] & 126) { + if (has_gauge()) { + if (gauge_ != NULL) gauge_->::io::prometheus::client::Gauge::Clear(); + } + if (has_counter()) { + if (counter_ != NULL) counter_->::io::prometheus::client::Counter::Clear(); + } + if (has_summary()) { + if (summary_ != NULL) summary_->::io::prometheus::client::Summary::Clear(); + } + if (has_untyped()) { + if (untyped_ != NULL) untyped_->::io::prometheus::client::Untyped::Clear(); + } + if (has_histogram()) { + if (histogram_ != NULL) histogram_->::io::prometheus::client::Histogram::Clear(); + } + timestamp_ms_ = GOOGLE_LONGLONG(0); + } + label_.Clear(); + ::memset(_has_bits_, 0, sizeof(_has_bits_)); + mutable_unknown_fields()->Clear(); +} + +bool Metric::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:io.prometheus.client.Metric) + for (;;) { + ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // repeated .io.prometheus.client.LabelPair label = 1; + case 1: { + if (tag == 10) { + parse_label: + DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual( + input, add_label())); + } else { + goto handle_unusual; + } + if (input->ExpectTag(10)) goto parse_label; + if (input->ExpectTag(18)) goto parse_gauge; + break; + } + + // optional .io.prometheus.client.Gauge gauge = 2; + case 2: { + if (tag == 18) { + parse_gauge: + DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual( + input, mutable_gauge())); + } else { + goto handle_unusual; + } + if (input->ExpectTag(26)) goto parse_counter; + break; + } + + // optional .io.prometheus.client.Counter counter = 3; + case 3: { + if (tag == 26) { + parse_counter: + DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual( + input, mutable_counter())); + } else { + goto handle_unusual; + } + if (input->ExpectTag(34)) goto parse_summary; + break; + } + + // optional .io.prometheus.client.Summary summary = 4; + case 4: { + if (tag == 34) { + parse_summary: + DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual( + input, mutable_summary())); + } else { + goto handle_unusual; + } + if (input->ExpectTag(42)) goto parse_untyped; + break; + } + + // optional .io.prometheus.client.Untyped untyped = 5; + case 5: { + if (tag == 42) { + parse_untyped: + DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual( + input, mutable_untyped())); + } else { + goto handle_unusual; + } + if (input->ExpectTag(48)) goto parse_timestamp_ms; + break; + } + + // optional int64 timestamp_ms = 6; + case 6: { + if (tag == 48) { + parse_timestamp_ms: + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>( + input, ×tamp_ms_))); + set_has_timestamp_ms(); + } else { + goto handle_unusual; + } + if (input->ExpectTag(58)) goto parse_histogram; + break; + } + + // optional .io.prometheus.client.Histogram histogram = 7; + case 7: { + if (tag == 58) { + parse_histogram: + DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual( + input, mutable_histogram())); + } else { + goto handle_unusual; + } + if (input->ExpectAtEnd()) goto success; + break; + } + + default: { + handle_unusual: + if (tag == 0 || + ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == + ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:io.prometheus.client.Metric) + return true; +failure: + // @@protoc_insertion_point(parse_failure:io.prometheus.client.Metric) + return false; +#undef DO_ +} + +void Metric::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:io.prometheus.client.Metric) + // repeated .io.prometheus.client.LabelPair label = 1; + for (int i = 0; i < this->label_size(); i++) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 1, this->label(i), output); + } + + // optional .io.prometheus.client.Gauge gauge = 2; + if (has_gauge()) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 2, this->gauge(), output); + } + + // optional .io.prometheus.client.Counter counter = 3; + if (has_counter()) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 3, this->counter(), output); + } + + // optional .io.prometheus.client.Summary summary = 4; + if (has_summary()) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 4, this->summary(), output); + } + + // optional .io.prometheus.client.Untyped untyped = 5; + if (has_untyped()) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 5, this->untyped(), output); + } + + // optional int64 timestamp_ms = 6; + if (has_timestamp_ms()) { + ::google::protobuf::internal::WireFormatLite::WriteInt64(6, this->timestamp_ms(), output); + } + + // optional .io.prometheus.client.Histogram histogram = 7; + if (has_histogram()) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 7, this->histogram(), output); + } + + if (!unknown_fields().empty()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:io.prometheus.client.Metric) +} + +::google::protobuf::uint8* Metric::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.Metric) + // repeated .io.prometheus.client.LabelPair label = 1; + for (int i = 0; i < this->label_size(); i++) { + target = ::google::protobuf::internal::WireFormatLite:: + WriteMessageNoVirtualToArray( + 1, this->label(i), target); + } + + // optional .io.prometheus.client.Gauge gauge = 2; + if (has_gauge()) { + target = ::google::protobuf::internal::WireFormatLite:: + WriteMessageNoVirtualToArray( + 2, this->gauge(), target); + } + + // optional .io.prometheus.client.Counter counter = 3; + if (has_counter()) { + target = ::google::protobuf::internal::WireFormatLite:: + WriteMessageNoVirtualToArray( + 3, this->counter(), target); + } + + // optional .io.prometheus.client.Summary summary = 4; + if (has_summary()) { + target = ::google::protobuf::internal::WireFormatLite:: + WriteMessageNoVirtualToArray( + 4, this->summary(), target); + } + + // optional .io.prometheus.client.Untyped untyped = 5; + if (has_untyped()) { + target = ::google::protobuf::internal::WireFormatLite:: + WriteMessageNoVirtualToArray( + 5, this->untyped(), target); + } + + // optional int64 timestamp_ms = 6; + if (has_timestamp_ms()) { + target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(6, this->timestamp_ms(), target); + } + + // optional .io.prometheus.client.Histogram histogram = 7; + if (has_histogram()) { + target = ::google::protobuf::internal::WireFormatLite:: + WriteMessageNoVirtualToArray( + 7, this->histogram(), target); + } + + if (!unknown_fields().empty()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.Metric) + return target; +} + +int Metric::ByteSize() const { + int total_size = 0; + + if (_has_bits_[1 / 32] & (0xffu << (1 % 32))) { + // optional .io.prometheus.client.Gauge gauge = 2; + if (has_gauge()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( + this->gauge()); + } + + // optional .io.prometheus.client.Counter counter = 3; + if (has_counter()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( + this->counter()); + } + + // optional .io.prometheus.client.Summary summary = 4; + if (has_summary()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( + this->summary()); + } + + // optional .io.prometheus.client.Untyped untyped = 5; + if (has_untyped()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( + this->untyped()); + } + + // optional .io.prometheus.client.Histogram histogram = 7; + if (has_histogram()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( + this->histogram()); + } + + // optional int64 timestamp_ms = 6; + if (has_timestamp_ms()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::Int64Size( + this->timestamp_ms()); + } + + } + // repeated .io.prometheus.client.LabelPair label = 1; + total_size += 1 * this->label_size(); + for (int i = 0; i < this->label_size(); i++) { + total_size += + ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( + this->label(i)); + } + + if (!unknown_fields().empty()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + unknown_fields()); + } + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = total_size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); + return total_size; +} + +void Metric::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const Metric* source = + ::google::protobuf::internal::dynamic_cast_if_available( + &from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void Metric::MergeFrom(const Metric& from) { + GOOGLE_CHECK_NE(&from, this); + label_.MergeFrom(from.label_); + if (from._has_bits_[1 / 32] & (0xffu << (1 % 32))) { + if (from.has_gauge()) { + mutable_gauge()->::io::prometheus::client::Gauge::MergeFrom(from.gauge()); + } + if (from.has_counter()) { + mutable_counter()->::io::prometheus::client::Counter::MergeFrom(from.counter()); + } + if (from.has_summary()) { + mutable_summary()->::io::prometheus::client::Summary::MergeFrom(from.summary()); + } + if (from.has_untyped()) { + mutable_untyped()->::io::prometheus::client::Untyped::MergeFrom(from.untyped()); + } + if (from.has_histogram()) { + mutable_histogram()->::io::prometheus::client::Histogram::MergeFrom(from.histogram()); + } + if (from.has_timestamp_ms()) { + set_timestamp_ms(from.timestamp_ms()); + } + } + mutable_unknown_fields()->MergeFrom(from.unknown_fields()); +} + +void Metric::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void Metric::CopyFrom(const Metric& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool Metric::IsInitialized() const { + + return true; +} + +void Metric::Swap(Metric* other) { + if (other != this) { + label_.Swap(&other->label_); + std::swap(gauge_, other->gauge_); + std::swap(counter_, other->counter_); + std::swap(summary_, other->summary_); + std::swap(untyped_, other->untyped_); + std::swap(histogram_, other->histogram_); + std::swap(timestamp_ms_, other->timestamp_ms_); + std::swap(_has_bits_[0], other->_has_bits_[0]); + _unknown_fields_.Swap(&other->_unknown_fields_); + std::swap(_cached_size_, other->_cached_size_); + } +} + +::google::protobuf::Metadata Metric::GetMetadata() const { + protobuf_AssignDescriptorsOnce(); + ::google::protobuf::Metadata metadata; + metadata.descriptor = Metric_descriptor_; + metadata.reflection = Metric_reflection_; + return metadata; +} + + +// =================================================================== + +#ifndef _MSC_VER +const int MetricFamily::kNameFieldNumber; +const int MetricFamily::kHelpFieldNumber; +const int MetricFamily::kTypeFieldNumber; +const int MetricFamily::kMetricFieldNumber; +#endif // !_MSC_VER + +MetricFamily::MetricFamily() + : ::google::protobuf::Message() { + SharedCtor(); + // @@protoc_insertion_point(constructor:io.prometheus.client.MetricFamily) +} + +void MetricFamily::InitAsDefaultInstance() { +} + +MetricFamily::MetricFamily(const MetricFamily& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); + // @@protoc_insertion_point(copy_constructor:io.prometheus.client.MetricFamily) +} + +void MetricFamily::SharedCtor() { + ::google::protobuf::internal::GetEmptyString(); + _cached_size_ = 0; + name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + help_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + type_ = 0; + ::memset(_has_bits_, 0, sizeof(_has_bits_)); +} + +MetricFamily::~MetricFamily() { + // @@protoc_insertion_point(destructor:io.prometheus.client.MetricFamily) + SharedDtor(); +} + +void MetricFamily::SharedDtor() { + if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + delete name_; + } + if (help_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + delete help_; + } + if (this != default_instance_) { + } +} + +void MetricFamily::SetCachedSize(int size) const { + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); +} +const ::google::protobuf::Descriptor* MetricFamily::descriptor() { + protobuf_AssignDescriptorsOnce(); + return MetricFamily_descriptor_; +} + +const MetricFamily& MetricFamily::default_instance() { + if (default_instance_ == NULL) protobuf_AddDesc_metrics_2eproto(); + return *default_instance_; +} + +MetricFamily* MetricFamily::default_instance_ = NULL; + +MetricFamily* MetricFamily::New() const { + return new MetricFamily; +} + +void MetricFamily::Clear() { + if (_has_bits_[0 / 32] & 7) { + if (has_name()) { + if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_->clear(); + } + } + if (has_help()) { + if (help_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + help_->clear(); + } + } + type_ = 0; + } + metric_.Clear(); + ::memset(_has_bits_, 0, sizeof(_has_bits_)); + mutable_unknown_fields()->Clear(); +} + +bool MetricFamily::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { +#define DO_(EXPRESSION) if (!(EXPRESSION)) goto failure + ::google::protobuf::uint32 tag; + // @@protoc_insertion_point(parse_start:io.prometheus.client.MetricFamily) + for (;;) { + ::std::pair< ::google::protobuf::uint32, bool> p = input->ReadTagWithCutoff(127); + tag = p.first; + if (!p.second) goto handle_unusual; + switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) { + // optional string name = 1; + case 1: { + if (tag == 10) { + DO_(::google::protobuf::internal::WireFormatLite::ReadString( + input, this->mutable_name())); + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->name().data(), this->name().length(), + ::google::protobuf::internal::WireFormat::PARSE, + "name"); + } else { + goto handle_unusual; + } + if (input->ExpectTag(18)) goto parse_help; + break; + } + + // optional string help = 2; + case 2: { + if (tag == 18) { + parse_help: + DO_(::google::protobuf::internal::WireFormatLite::ReadString( + input, this->mutable_help())); + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->help().data(), this->help().length(), + ::google::protobuf::internal::WireFormat::PARSE, + "help"); + } else { + goto handle_unusual; + } + if (input->ExpectTag(24)) goto parse_type; + break; + } + + // optional .io.prometheus.client.MetricType type = 3; + case 3: { + if (tag == 24) { + parse_type: + int value; + DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive< + int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>( + input, &value))); + if (::io::prometheus::client::MetricType_IsValid(value)) { + set_type(static_cast< ::io::prometheus::client::MetricType >(value)); + } else { + mutable_unknown_fields()->AddVarint(3, value); + } + } else { + goto handle_unusual; + } + if (input->ExpectTag(34)) goto parse_metric; + break; + } + + // repeated .io.prometheus.client.Metric metric = 4; + case 4: { + if (tag == 34) { + parse_metric: + DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual( + input, add_metric())); + } else { + goto handle_unusual; + } + if (input->ExpectTag(34)) goto parse_metric; + if (input->ExpectAtEnd()) goto success; + break; + } + + default: { + handle_unusual: + if (tag == 0 || + ::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) == + ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) { + goto success; + } + DO_(::google::protobuf::internal::WireFormat::SkipField( + input, tag, mutable_unknown_fields())); + break; + } + } + } +success: + // @@protoc_insertion_point(parse_success:io.prometheus.client.MetricFamily) + return true; +failure: + // @@protoc_insertion_point(parse_failure:io.prometheus.client.MetricFamily) + return false; +#undef DO_ +} + +void MetricFamily::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + // @@protoc_insertion_point(serialize_start:io.prometheus.client.MetricFamily) + // optional string name = 1; + if (has_name()) { + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->name().data(), this->name().length(), + ::google::protobuf::internal::WireFormat::SERIALIZE, + "name"); + ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased( + 1, this->name(), output); + } + + // optional string help = 2; + if (has_help()) { + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->help().data(), this->help().length(), + ::google::protobuf::internal::WireFormat::SERIALIZE, + "help"); + ::google::protobuf::internal::WireFormatLite::WriteStringMaybeAliased( + 2, this->help(), output); + } + + // optional .io.prometheus.client.MetricType type = 3; + if (has_type()) { + ::google::protobuf::internal::WireFormatLite::WriteEnum( + 3, this->type(), output); + } + + // repeated .io.prometheus.client.Metric metric = 4; + for (int i = 0; i < this->metric_size(); i++) { + ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray( + 4, this->metric(i), output); + } + + if (!unknown_fields().empty()) { + ::google::protobuf::internal::WireFormat::SerializeUnknownFields( + unknown_fields(), output); + } + // @@protoc_insertion_point(serialize_end:io.prometheus.client.MetricFamily) +} + +::google::protobuf::uint8* MetricFamily::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* target) const { + // @@protoc_insertion_point(serialize_to_array_start:io.prometheus.client.MetricFamily) + // optional string name = 1; + if (has_name()) { + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->name().data(), this->name().length(), + ::google::protobuf::internal::WireFormat::SERIALIZE, + "name"); + target = + ::google::protobuf::internal::WireFormatLite::WriteStringToArray( + 1, this->name(), target); + } + + // optional string help = 2; + if (has_help()) { + ::google::protobuf::internal::WireFormat::VerifyUTF8StringNamedField( + this->help().data(), this->help().length(), + ::google::protobuf::internal::WireFormat::SERIALIZE, + "help"); + target = + ::google::protobuf::internal::WireFormatLite::WriteStringToArray( + 2, this->help(), target); + } + + // optional .io.prometheus.client.MetricType type = 3; + if (has_type()) { + target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray( + 3, this->type(), target); + } + + // repeated .io.prometheus.client.Metric metric = 4; + for (int i = 0; i < this->metric_size(); i++) { + target = ::google::protobuf::internal::WireFormatLite:: + WriteMessageNoVirtualToArray( + 4, this->metric(i), target); + } + + if (!unknown_fields().empty()) { + target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray( + unknown_fields(), target); + } + // @@protoc_insertion_point(serialize_to_array_end:io.prometheus.client.MetricFamily) + return target; +} + +int MetricFamily::ByteSize() const { + int total_size = 0; + + if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) { + // optional string name = 1; + if (has_name()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::StringSize( + this->name()); + } + + // optional string help = 2; + if (has_help()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::StringSize( + this->help()); + } + + // optional .io.prometheus.client.MetricType type = 3; + if (has_type()) { + total_size += 1 + + ::google::protobuf::internal::WireFormatLite::EnumSize(this->type()); + } + + } + // repeated .io.prometheus.client.Metric metric = 4; + total_size += 1 * this->metric_size(); + for (int i = 0; i < this->metric_size(); i++) { + total_size += + ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual( + this->metric(i)); + } + + if (!unknown_fields().empty()) { + total_size += + ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize( + unknown_fields()); + } + GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN(); + _cached_size_ = total_size; + GOOGLE_SAFE_CONCURRENT_WRITES_END(); + return total_size; +} + +void MetricFamily::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MetricFamily* source = + ::google::protobuf::internal::dynamic_cast_if_available( + &from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MetricFamily::MergeFrom(const MetricFamily& from) { + GOOGLE_CHECK_NE(&from, this); + metric_.MergeFrom(from.metric_); + if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { + if (from.has_name()) { + set_name(from.name()); + } + if (from.has_help()) { + set_help(from.help()); + } + if (from.has_type()) { + set_type(from.type()); + } + } + mutable_unknown_fields()->MergeFrom(from.unknown_fields()); +} + +void MetricFamily::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MetricFamily::CopyFrom(const MetricFamily& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +bool MetricFamily::IsInitialized() const { + + return true; +} + +void MetricFamily::Swap(MetricFamily* other) { + if (other != this) { + std::swap(name_, other->name_); + std::swap(help_, other->help_); + std::swap(type_, other->type_); + metric_.Swap(&other->metric_); + std::swap(_has_bits_[0], other->_has_bits_[0]); + _unknown_fields_.Swap(&other->_unknown_fields_); + std::swap(_cached_size_, other->_cached_size_); + } +} + +::google::protobuf::Metadata MetricFamily::GetMetadata() const { + protobuf_AssignDescriptorsOnce(); + ::google::protobuf::Metadata metadata; + metadata.descriptor = MetricFamily_descriptor_; + metadata.reflection = MetricFamily_reflection_; + return metadata; +} + + +// @@protoc_insertion_point(namespace_scope) + +} // namespace client +} // namespace prometheus +} // namespace io + +// @@protoc_insertion_point(global_scope) diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/cpp/metrics.pb.h juju-core-2.0.0/src/github.com/prometheus/client_model/cpp/metrics.pb.h --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/cpp/metrics.pb.h 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/cpp/metrics.pb.h 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,2072 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: metrics.proto + +#ifndef PROTOBUF_metrics_2eproto__INCLUDED +#define PROTOBUF_metrics_2eproto__INCLUDED + +#include + +#include + +#if GOOGLE_PROTOBUF_VERSION < 2006000 +#error This file was generated by a newer version of protoc which is +#error incompatible with your Protocol Buffer headers. Please update +#error your headers. +#endif +#if 2006001 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION +#error This file was generated by an older version of protoc which is +#error incompatible with your Protocol Buffer headers. Please +#error regenerate this file with a newer version of protoc. +#endif + +#include +#include +#include +#include +#include +#include +// @@protoc_insertion_point(includes) + +namespace io { +namespace prometheus { +namespace client { + +// Internal implementation detail -- do not call these. +void protobuf_AddDesc_metrics_2eproto(); +void protobuf_AssignDesc_metrics_2eproto(); +void protobuf_ShutdownFile_metrics_2eproto(); + +class LabelPair; +class Gauge; +class Counter; +class Quantile; +class Summary; +class Untyped; +class Histogram; +class Bucket; +class Metric; +class MetricFamily; + +enum MetricType { + COUNTER = 0, + GAUGE = 1, + SUMMARY = 2, + UNTYPED = 3, + HISTOGRAM = 4 +}; +bool MetricType_IsValid(int value); +const MetricType MetricType_MIN = COUNTER; +const MetricType MetricType_MAX = HISTOGRAM; +const int MetricType_ARRAYSIZE = MetricType_MAX + 1; + +const ::google::protobuf::EnumDescriptor* MetricType_descriptor(); +inline const ::std::string& MetricType_Name(MetricType value) { + return ::google::protobuf::internal::NameOfEnum( + MetricType_descriptor(), value); +} +inline bool MetricType_Parse( + const ::std::string& name, MetricType* value) { + return ::google::protobuf::internal::ParseNamedEnum( + MetricType_descriptor(), name, value); +} +// =================================================================== + +class LabelPair : public ::google::protobuf::Message { + public: + LabelPair(); + virtual ~LabelPair(); + + LabelPair(const LabelPair& from); + + inline LabelPair& operator=(const LabelPair& from) { + CopyFrom(from); + return *this; + } + + inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { + return _unknown_fields_; + } + + inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { + return &_unknown_fields_; + } + + static const ::google::protobuf::Descriptor* descriptor(); + static const LabelPair& default_instance(); + + void Swap(LabelPair* other); + + // implements Message ---------------------------------------------- + + LabelPair* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const LabelPair& from); + void MergeFrom(const LabelPair& from); + void Clear(); + bool IsInitialized() const; + + int ByteSize() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + public: + ::google::protobuf::Metadata GetMetadata() const; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // optional string name = 1; + inline bool has_name() const; + inline void clear_name(); + static const int kNameFieldNumber = 1; + inline const ::std::string& name() const; + inline void set_name(const ::std::string& value); + inline void set_name(const char* value); + inline void set_name(const char* value, size_t size); + inline ::std::string* mutable_name(); + inline ::std::string* release_name(); + inline void set_allocated_name(::std::string* name); + + // optional string value = 2; + inline bool has_value() const; + inline void clear_value(); + static const int kValueFieldNumber = 2; + inline const ::std::string& value() const; + inline void set_value(const ::std::string& value); + inline void set_value(const char* value); + inline void set_value(const char* value, size_t size); + inline ::std::string* mutable_value(); + inline ::std::string* release_value(); + inline void set_allocated_value(::std::string* value); + + // @@protoc_insertion_point(class_scope:io.prometheus.client.LabelPair) + private: + inline void set_has_name(); + inline void clear_has_name(); + inline void set_has_value(); + inline void clear_has_value(); + + ::google::protobuf::UnknownFieldSet _unknown_fields_; + + ::google::protobuf::uint32 _has_bits_[1]; + mutable int _cached_size_; + ::std::string* name_; + ::std::string* value_; + friend void protobuf_AddDesc_metrics_2eproto(); + friend void protobuf_AssignDesc_metrics_2eproto(); + friend void protobuf_ShutdownFile_metrics_2eproto(); + + void InitAsDefaultInstance(); + static LabelPair* default_instance_; +}; +// ------------------------------------------------------------------- + +class Gauge : public ::google::protobuf::Message { + public: + Gauge(); + virtual ~Gauge(); + + Gauge(const Gauge& from); + + inline Gauge& operator=(const Gauge& from) { + CopyFrom(from); + return *this; + } + + inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { + return _unknown_fields_; + } + + inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { + return &_unknown_fields_; + } + + static const ::google::protobuf::Descriptor* descriptor(); + static const Gauge& default_instance(); + + void Swap(Gauge* other); + + // implements Message ---------------------------------------------- + + Gauge* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const Gauge& from); + void MergeFrom(const Gauge& from); + void Clear(); + bool IsInitialized() const; + + int ByteSize() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + public: + ::google::protobuf::Metadata GetMetadata() const; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // optional double value = 1; + inline bool has_value() const; + inline void clear_value(); + static const int kValueFieldNumber = 1; + inline double value() const; + inline void set_value(double value); + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Gauge) + private: + inline void set_has_value(); + inline void clear_has_value(); + + ::google::protobuf::UnknownFieldSet _unknown_fields_; + + ::google::protobuf::uint32 _has_bits_[1]; + mutable int _cached_size_; + double value_; + friend void protobuf_AddDesc_metrics_2eproto(); + friend void protobuf_AssignDesc_metrics_2eproto(); + friend void protobuf_ShutdownFile_metrics_2eproto(); + + void InitAsDefaultInstance(); + static Gauge* default_instance_; +}; +// ------------------------------------------------------------------- + +class Counter : public ::google::protobuf::Message { + public: + Counter(); + virtual ~Counter(); + + Counter(const Counter& from); + + inline Counter& operator=(const Counter& from) { + CopyFrom(from); + return *this; + } + + inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { + return _unknown_fields_; + } + + inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { + return &_unknown_fields_; + } + + static const ::google::protobuf::Descriptor* descriptor(); + static const Counter& default_instance(); + + void Swap(Counter* other); + + // implements Message ---------------------------------------------- + + Counter* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const Counter& from); + void MergeFrom(const Counter& from); + void Clear(); + bool IsInitialized() const; + + int ByteSize() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + public: + ::google::protobuf::Metadata GetMetadata() const; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // optional double value = 1; + inline bool has_value() const; + inline void clear_value(); + static const int kValueFieldNumber = 1; + inline double value() const; + inline void set_value(double value); + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Counter) + private: + inline void set_has_value(); + inline void clear_has_value(); + + ::google::protobuf::UnknownFieldSet _unknown_fields_; + + ::google::protobuf::uint32 _has_bits_[1]; + mutable int _cached_size_; + double value_; + friend void protobuf_AddDesc_metrics_2eproto(); + friend void protobuf_AssignDesc_metrics_2eproto(); + friend void protobuf_ShutdownFile_metrics_2eproto(); + + void InitAsDefaultInstance(); + static Counter* default_instance_; +}; +// ------------------------------------------------------------------- + +class Quantile : public ::google::protobuf::Message { + public: + Quantile(); + virtual ~Quantile(); + + Quantile(const Quantile& from); + + inline Quantile& operator=(const Quantile& from) { + CopyFrom(from); + return *this; + } + + inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { + return _unknown_fields_; + } + + inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { + return &_unknown_fields_; + } + + static const ::google::protobuf::Descriptor* descriptor(); + static const Quantile& default_instance(); + + void Swap(Quantile* other); + + // implements Message ---------------------------------------------- + + Quantile* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const Quantile& from); + void MergeFrom(const Quantile& from); + void Clear(); + bool IsInitialized() const; + + int ByteSize() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + public: + ::google::protobuf::Metadata GetMetadata() const; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // optional double quantile = 1; + inline bool has_quantile() const; + inline void clear_quantile(); + static const int kQuantileFieldNumber = 1; + inline double quantile() const; + inline void set_quantile(double value); + + // optional double value = 2; + inline bool has_value() const; + inline void clear_value(); + static const int kValueFieldNumber = 2; + inline double value() const; + inline void set_value(double value); + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Quantile) + private: + inline void set_has_quantile(); + inline void clear_has_quantile(); + inline void set_has_value(); + inline void clear_has_value(); + + ::google::protobuf::UnknownFieldSet _unknown_fields_; + + ::google::protobuf::uint32 _has_bits_[1]; + mutable int _cached_size_; + double quantile_; + double value_; + friend void protobuf_AddDesc_metrics_2eproto(); + friend void protobuf_AssignDesc_metrics_2eproto(); + friend void protobuf_ShutdownFile_metrics_2eproto(); + + void InitAsDefaultInstance(); + static Quantile* default_instance_; +}; +// ------------------------------------------------------------------- + +class Summary : public ::google::protobuf::Message { + public: + Summary(); + virtual ~Summary(); + + Summary(const Summary& from); + + inline Summary& operator=(const Summary& from) { + CopyFrom(from); + return *this; + } + + inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { + return _unknown_fields_; + } + + inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { + return &_unknown_fields_; + } + + static const ::google::protobuf::Descriptor* descriptor(); + static const Summary& default_instance(); + + void Swap(Summary* other); + + // implements Message ---------------------------------------------- + + Summary* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const Summary& from); + void MergeFrom(const Summary& from); + void Clear(); + bool IsInitialized() const; + + int ByteSize() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + public: + ::google::protobuf::Metadata GetMetadata() const; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // optional uint64 sample_count = 1; + inline bool has_sample_count() const; + inline void clear_sample_count(); + static const int kSampleCountFieldNumber = 1; + inline ::google::protobuf::uint64 sample_count() const; + inline void set_sample_count(::google::protobuf::uint64 value); + + // optional double sample_sum = 2; + inline bool has_sample_sum() const; + inline void clear_sample_sum(); + static const int kSampleSumFieldNumber = 2; + inline double sample_sum() const; + inline void set_sample_sum(double value); + + // repeated .io.prometheus.client.Quantile quantile = 3; + inline int quantile_size() const; + inline void clear_quantile(); + static const int kQuantileFieldNumber = 3; + inline const ::io::prometheus::client::Quantile& quantile(int index) const; + inline ::io::prometheus::client::Quantile* mutable_quantile(int index); + inline ::io::prometheus::client::Quantile* add_quantile(); + inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Quantile >& + quantile() const; + inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Quantile >* + mutable_quantile(); + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Summary) + private: + inline void set_has_sample_count(); + inline void clear_has_sample_count(); + inline void set_has_sample_sum(); + inline void clear_has_sample_sum(); + + ::google::protobuf::UnknownFieldSet _unknown_fields_; + + ::google::protobuf::uint32 _has_bits_[1]; + mutable int _cached_size_; + ::google::protobuf::uint64 sample_count_; + double sample_sum_; + ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Quantile > quantile_; + friend void protobuf_AddDesc_metrics_2eproto(); + friend void protobuf_AssignDesc_metrics_2eproto(); + friend void protobuf_ShutdownFile_metrics_2eproto(); + + void InitAsDefaultInstance(); + static Summary* default_instance_; +}; +// ------------------------------------------------------------------- + +class Untyped : public ::google::protobuf::Message { + public: + Untyped(); + virtual ~Untyped(); + + Untyped(const Untyped& from); + + inline Untyped& operator=(const Untyped& from) { + CopyFrom(from); + return *this; + } + + inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { + return _unknown_fields_; + } + + inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { + return &_unknown_fields_; + } + + static const ::google::protobuf::Descriptor* descriptor(); + static const Untyped& default_instance(); + + void Swap(Untyped* other); + + // implements Message ---------------------------------------------- + + Untyped* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const Untyped& from); + void MergeFrom(const Untyped& from); + void Clear(); + bool IsInitialized() const; + + int ByteSize() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + public: + ::google::protobuf::Metadata GetMetadata() const; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // optional double value = 1; + inline bool has_value() const; + inline void clear_value(); + static const int kValueFieldNumber = 1; + inline double value() const; + inline void set_value(double value); + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Untyped) + private: + inline void set_has_value(); + inline void clear_has_value(); + + ::google::protobuf::UnknownFieldSet _unknown_fields_; + + ::google::protobuf::uint32 _has_bits_[1]; + mutable int _cached_size_; + double value_; + friend void protobuf_AddDesc_metrics_2eproto(); + friend void protobuf_AssignDesc_metrics_2eproto(); + friend void protobuf_ShutdownFile_metrics_2eproto(); + + void InitAsDefaultInstance(); + static Untyped* default_instance_; +}; +// ------------------------------------------------------------------- + +class Histogram : public ::google::protobuf::Message { + public: + Histogram(); + virtual ~Histogram(); + + Histogram(const Histogram& from); + + inline Histogram& operator=(const Histogram& from) { + CopyFrom(from); + return *this; + } + + inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { + return _unknown_fields_; + } + + inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { + return &_unknown_fields_; + } + + static const ::google::protobuf::Descriptor* descriptor(); + static const Histogram& default_instance(); + + void Swap(Histogram* other); + + // implements Message ---------------------------------------------- + + Histogram* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const Histogram& from); + void MergeFrom(const Histogram& from); + void Clear(); + bool IsInitialized() const; + + int ByteSize() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + public: + ::google::protobuf::Metadata GetMetadata() const; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // optional uint64 sample_count = 1; + inline bool has_sample_count() const; + inline void clear_sample_count(); + static const int kSampleCountFieldNumber = 1; + inline ::google::protobuf::uint64 sample_count() const; + inline void set_sample_count(::google::protobuf::uint64 value); + + // optional double sample_sum = 2; + inline bool has_sample_sum() const; + inline void clear_sample_sum(); + static const int kSampleSumFieldNumber = 2; + inline double sample_sum() const; + inline void set_sample_sum(double value); + + // repeated .io.prometheus.client.Bucket bucket = 3; + inline int bucket_size() const; + inline void clear_bucket(); + static const int kBucketFieldNumber = 3; + inline const ::io::prometheus::client::Bucket& bucket(int index) const; + inline ::io::prometheus::client::Bucket* mutable_bucket(int index); + inline ::io::prometheus::client::Bucket* add_bucket(); + inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Bucket >& + bucket() const; + inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Bucket >* + mutable_bucket(); + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Histogram) + private: + inline void set_has_sample_count(); + inline void clear_has_sample_count(); + inline void set_has_sample_sum(); + inline void clear_has_sample_sum(); + + ::google::protobuf::UnknownFieldSet _unknown_fields_; + + ::google::protobuf::uint32 _has_bits_[1]; + mutable int _cached_size_; + ::google::protobuf::uint64 sample_count_; + double sample_sum_; + ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Bucket > bucket_; + friend void protobuf_AddDesc_metrics_2eproto(); + friend void protobuf_AssignDesc_metrics_2eproto(); + friend void protobuf_ShutdownFile_metrics_2eproto(); + + void InitAsDefaultInstance(); + static Histogram* default_instance_; +}; +// ------------------------------------------------------------------- + +class Bucket : public ::google::protobuf::Message { + public: + Bucket(); + virtual ~Bucket(); + + Bucket(const Bucket& from); + + inline Bucket& operator=(const Bucket& from) { + CopyFrom(from); + return *this; + } + + inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { + return _unknown_fields_; + } + + inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { + return &_unknown_fields_; + } + + static const ::google::protobuf::Descriptor* descriptor(); + static const Bucket& default_instance(); + + void Swap(Bucket* other); + + // implements Message ---------------------------------------------- + + Bucket* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const Bucket& from); + void MergeFrom(const Bucket& from); + void Clear(); + bool IsInitialized() const; + + int ByteSize() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + public: + ::google::protobuf::Metadata GetMetadata() const; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // optional uint64 cumulative_count = 1; + inline bool has_cumulative_count() const; + inline void clear_cumulative_count(); + static const int kCumulativeCountFieldNumber = 1; + inline ::google::protobuf::uint64 cumulative_count() const; + inline void set_cumulative_count(::google::protobuf::uint64 value); + + // optional double upper_bound = 2; + inline bool has_upper_bound() const; + inline void clear_upper_bound(); + static const int kUpperBoundFieldNumber = 2; + inline double upper_bound() const; + inline void set_upper_bound(double value); + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Bucket) + private: + inline void set_has_cumulative_count(); + inline void clear_has_cumulative_count(); + inline void set_has_upper_bound(); + inline void clear_has_upper_bound(); + + ::google::protobuf::UnknownFieldSet _unknown_fields_; + + ::google::protobuf::uint32 _has_bits_[1]; + mutable int _cached_size_; + ::google::protobuf::uint64 cumulative_count_; + double upper_bound_; + friend void protobuf_AddDesc_metrics_2eproto(); + friend void protobuf_AssignDesc_metrics_2eproto(); + friend void protobuf_ShutdownFile_metrics_2eproto(); + + void InitAsDefaultInstance(); + static Bucket* default_instance_; +}; +// ------------------------------------------------------------------- + +class Metric : public ::google::protobuf::Message { + public: + Metric(); + virtual ~Metric(); + + Metric(const Metric& from); + + inline Metric& operator=(const Metric& from) { + CopyFrom(from); + return *this; + } + + inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { + return _unknown_fields_; + } + + inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { + return &_unknown_fields_; + } + + static const ::google::protobuf::Descriptor* descriptor(); + static const Metric& default_instance(); + + void Swap(Metric* other); + + // implements Message ---------------------------------------------- + + Metric* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const Metric& from); + void MergeFrom(const Metric& from); + void Clear(); + bool IsInitialized() const; + + int ByteSize() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + public: + ::google::protobuf::Metadata GetMetadata() const; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // repeated .io.prometheus.client.LabelPair label = 1; + inline int label_size() const; + inline void clear_label(); + static const int kLabelFieldNumber = 1; + inline const ::io::prometheus::client::LabelPair& label(int index) const; + inline ::io::prometheus::client::LabelPair* mutable_label(int index); + inline ::io::prometheus::client::LabelPair* add_label(); + inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::LabelPair >& + label() const; + inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::LabelPair >* + mutable_label(); + + // optional .io.prometheus.client.Gauge gauge = 2; + inline bool has_gauge() const; + inline void clear_gauge(); + static const int kGaugeFieldNumber = 2; + inline const ::io::prometheus::client::Gauge& gauge() const; + inline ::io::prometheus::client::Gauge* mutable_gauge(); + inline ::io::prometheus::client::Gauge* release_gauge(); + inline void set_allocated_gauge(::io::prometheus::client::Gauge* gauge); + + // optional .io.prometheus.client.Counter counter = 3; + inline bool has_counter() const; + inline void clear_counter(); + static const int kCounterFieldNumber = 3; + inline const ::io::prometheus::client::Counter& counter() const; + inline ::io::prometheus::client::Counter* mutable_counter(); + inline ::io::prometheus::client::Counter* release_counter(); + inline void set_allocated_counter(::io::prometheus::client::Counter* counter); + + // optional .io.prometheus.client.Summary summary = 4; + inline bool has_summary() const; + inline void clear_summary(); + static const int kSummaryFieldNumber = 4; + inline const ::io::prometheus::client::Summary& summary() const; + inline ::io::prometheus::client::Summary* mutable_summary(); + inline ::io::prometheus::client::Summary* release_summary(); + inline void set_allocated_summary(::io::prometheus::client::Summary* summary); + + // optional .io.prometheus.client.Untyped untyped = 5; + inline bool has_untyped() const; + inline void clear_untyped(); + static const int kUntypedFieldNumber = 5; + inline const ::io::prometheus::client::Untyped& untyped() const; + inline ::io::prometheus::client::Untyped* mutable_untyped(); + inline ::io::prometheus::client::Untyped* release_untyped(); + inline void set_allocated_untyped(::io::prometheus::client::Untyped* untyped); + + // optional .io.prometheus.client.Histogram histogram = 7; + inline bool has_histogram() const; + inline void clear_histogram(); + static const int kHistogramFieldNumber = 7; + inline const ::io::prometheus::client::Histogram& histogram() const; + inline ::io::prometheus::client::Histogram* mutable_histogram(); + inline ::io::prometheus::client::Histogram* release_histogram(); + inline void set_allocated_histogram(::io::prometheus::client::Histogram* histogram); + + // optional int64 timestamp_ms = 6; + inline bool has_timestamp_ms() const; + inline void clear_timestamp_ms(); + static const int kTimestampMsFieldNumber = 6; + inline ::google::protobuf::int64 timestamp_ms() const; + inline void set_timestamp_ms(::google::protobuf::int64 value); + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Metric) + private: + inline void set_has_gauge(); + inline void clear_has_gauge(); + inline void set_has_counter(); + inline void clear_has_counter(); + inline void set_has_summary(); + inline void clear_has_summary(); + inline void set_has_untyped(); + inline void clear_has_untyped(); + inline void set_has_histogram(); + inline void clear_has_histogram(); + inline void set_has_timestamp_ms(); + inline void clear_has_timestamp_ms(); + + ::google::protobuf::UnknownFieldSet _unknown_fields_; + + ::google::protobuf::uint32 _has_bits_[1]; + mutable int _cached_size_; + ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::LabelPair > label_; + ::io::prometheus::client::Gauge* gauge_; + ::io::prometheus::client::Counter* counter_; + ::io::prometheus::client::Summary* summary_; + ::io::prometheus::client::Untyped* untyped_; + ::io::prometheus::client::Histogram* histogram_; + ::google::protobuf::int64 timestamp_ms_; + friend void protobuf_AddDesc_metrics_2eproto(); + friend void protobuf_AssignDesc_metrics_2eproto(); + friend void protobuf_ShutdownFile_metrics_2eproto(); + + void InitAsDefaultInstance(); + static Metric* default_instance_; +}; +// ------------------------------------------------------------------- + +class MetricFamily : public ::google::protobuf::Message { + public: + MetricFamily(); + virtual ~MetricFamily(); + + MetricFamily(const MetricFamily& from); + + inline MetricFamily& operator=(const MetricFamily& from) { + CopyFrom(from); + return *this; + } + + inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { + return _unknown_fields_; + } + + inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { + return &_unknown_fields_; + } + + static const ::google::protobuf::Descriptor* descriptor(); + static const MetricFamily& default_instance(); + + void Swap(MetricFamily* other); + + // implements Message ---------------------------------------------- + + MetricFamily* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MetricFamily& from); + void MergeFrom(const MetricFamily& from); + void Clear(); + bool IsInitialized() const; + + int ByteSize() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + public: + ::google::protobuf::Metadata GetMetadata() const; + + // nested types ---------------------------------------------------- + + // accessors ------------------------------------------------------- + + // optional string name = 1; + inline bool has_name() const; + inline void clear_name(); + static const int kNameFieldNumber = 1; + inline const ::std::string& name() const; + inline void set_name(const ::std::string& value); + inline void set_name(const char* value); + inline void set_name(const char* value, size_t size); + inline ::std::string* mutable_name(); + inline ::std::string* release_name(); + inline void set_allocated_name(::std::string* name); + + // optional string help = 2; + inline bool has_help() const; + inline void clear_help(); + static const int kHelpFieldNumber = 2; + inline const ::std::string& help() const; + inline void set_help(const ::std::string& value); + inline void set_help(const char* value); + inline void set_help(const char* value, size_t size); + inline ::std::string* mutable_help(); + inline ::std::string* release_help(); + inline void set_allocated_help(::std::string* help); + + // optional .io.prometheus.client.MetricType type = 3; + inline bool has_type() const; + inline void clear_type(); + static const int kTypeFieldNumber = 3; + inline ::io::prometheus::client::MetricType type() const; + inline void set_type(::io::prometheus::client::MetricType value); + + // repeated .io.prometheus.client.Metric metric = 4; + inline int metric_size() const; + inline void clear_metric(); + static const int kMetricFieldNumber = 4; + inline const ::io::prometheus::client::Metric& metric(int index) const; + inline ::io::prometheus::client::Metric* mutable_metric(int index); + inline ::io::prometheus::client::Metric* add_metric(); + inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Metric >& + metric() const; + inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Metric >* + mutable_metric(); + + // @@protoc_insertion_point(class_scope:io.prometheus.client.MetricFamily) + private: + inline void set_has_name(); + inline void clear_has_name(); + inline void set_has_help(); + inline void clear_has_help(); + inline void set_has_type(); + inline void clear_has_type(); + + ::google::protobuf::UnknownFieldSet _unknown_fields_; + + ::google::protobuf::uint32 _has_bits_[1]; + mutable int _cached_size_; + ::std::string* name_; + ::std::string* help_; + ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Metric > metric_; + int type_; + friend void protobuf_AddDesc_metrics_2eproto(); + friend void protobuf_AssignDesc_metrics_2eproto(); + friend void protobuf_ShutdownFile_metrics_2eproto(); + + void InitAsDefaultInstance(); + static MetricFamily* default_instance_; +}; +// =================================================================== + + +// =================================================================== + +// LabelPair + +// optional string name = 1; +inline bool LabelPair::has_name() const { + return (_has_bits_[0] & 0x00000001u) != 0; +} +inline void LabelPair::set_has_name() { + _has_bits_[0] |= 0x00000001u; +} +inline void LabelPair::clear_has_name() { + _has_bits_[0] &= ~0x00000001u; +} +inline void LabelPair::clear_name() { + if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_->clear(); + } + clear_has_name(); +} +inline const ::std::string& LabelPair::name() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.LabelPair.name) + return *name_; +} +inline void LabelPair::set_name(const ::std::string& value) { + set_has_name(); + if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_ = new ::std::string; + } + name_->assign(value); + // @@protoc_insertion_point(field_set:io.prometheus.client.LabelPair.name) +} +inline void LabelPair::set_name(const char* value) { + set_has_name(); + if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_ = new ::std::string; + } + name_->assign(value); + // @@protoc_insertion_point(field_set_char:io.prometheus.client.LabelPair.name) +} +inline void LabelPair::set_name(const char* value, size_t size) { + set_has_name(); + if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_ = new ::std::string; + } + name_->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:io.prometheus.client.LabelPair.name) +} +inline ::std::string* LabelPair::mutable_name() { + set_has_name(); + if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_ = new ::std::string; + } + // @@protoc_insertion_point(field_mutable:io.prometheus.client.LabelPair.name) + return name_; +} +inline ::std::string* LabelPair::release_name() { + clear_has_name(); + if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + return NULL; + } else { + ::std::string* temp = name_; + name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + return temp; + } +} +inline void LabelPair::set_allocated_name(::std::string* name) { + if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + delete name_; + } + if (name) { + set_has_name(); + name_ = name; + } else { + clear_has_name(); + name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + } + // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.LabelPair.name) +} + +// optional string value = 2; +inline bool LabelPair::has_value() const { + return (_has_bits_[0] & 0x00000002u) != 0; +} +inline void LabelPair::set_has_value() { + _has_bits_[0] |= 0x00000002u; +} +inline void LabelPair::clear_has_value() { + _has_bits_[0] &= ~0x00000002u; +} +inline void LabelPair::clear_value() { + if (value_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + value_->clear(); + } + clear_has_value(); +} +inline const ::std::string& LabelPair::value() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.LabelPair.value) + return *value_; +} +inline void LabelPair::set_value(const ::std::string& value) { + set_has_value(); + if (value_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + value_ = new ::std::string; + } + value_->assign(value); + // @@protoc_insertion_point(field_set:io.prometheus.client.LabelPair.value) +} +inline void LabelPair::set_value(const char* value) { + set_has_value(); + if (value_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + value_ = new ::std::string; + } + value_->assign(value); + // @@protoc_insertion_point(field_set_char:io.prometheus.client.LabelPair.value) +} +inline void LabelPair::set_value(const char* value, size_t size) { + set_has_value(); + if (value_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + value_ = new ::std::string; + } + value_->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:io.prometheus.client.LabelPair.value) +} +inline ::std::string* LabelPair::mutable_value() { + set_has_value(); + if (value_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + value_ = new ::std::string; + } + // @@protoc_insertion_point(field_mutable:io.prometheus.client.LabelPair.value) + return value_; +} +inline ::std::string* LabelPair::release_value() { + clear_has_value(); + if (value_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + return NULL; + } else { + ::std::string* temp = value_; + value_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + return temp; + } +} +inline void LabelPair::set_allocated_value(::std::string* value) { + if (value_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + delete value_; + } + if (value) { + set_has_value(); + value_ = value; + } else { + clear_has_value(); + value_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + } + // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.LabelPair.value) +} + +// ------------------------------------------------------------------- + +// Gauge + +// optional double value = 1; +inline bool Gauge::has_value() const { + return (_has_bits_[0] & 0x00000001u) != 0; +} +inline void Gauge::set_has_value() { + _has_bits_[0] |= 0x00000001u; +} +inline void Gauge::clear_has_value() { + _has_bits_[0] &= ~0x00000001u; +} +inline void Gauge::clear_value() { + value_ = 0; + clear_has_value(); +} +inline double Gauge::value() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Gauge.value) + return value_; +} +inline void Gauge::set_value(double value) { + set_has_value(); + value_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Gauge.value) +} + +// ------------------------------------------------------------------- + +// Counter + +// optional double value = 1; +inline bool Counter::has_value() const { + return (_has_bits_[0] & 0x00000001u) != 0; +} +inline void Counter::set_has_value() { + _has_bits_[0] |= 0x00000001u; +} +inline void Counter::clear_has_value() { + _has_bits_[0] &= ~0x00000001u; +} +inline void Counter::clear_value() { + value_ = 0; + clear_has_value(); +} +inline double Counter::value() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Counter.value) + return value_; +} +inline void Counter::set_value(double value) { + set_has_value(); + value_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Counter.value) +} + +// ------------------------------------------------------------------- + +// Quantile + +// optional double quantile = 1; +inline bool Quantile::has_quantile() const { + return (_has_bits_[0] & 0x00000001u) != 0; +} +inline void Quantile::set_has_quantile() { + _has_bits_[0] |= 0x00000001u; +} +inline void Quantile::clear_has_quantile() { + _has_bits_[0] &= ~0x00000001u; +} +inline void Quantile::clear_quantile() { + quantile_ = 0; + clear_has_quantile(); +} +inline double Quantile::quantile() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Quantile.quantile) + return quantile_; +} +inline void Quantile::set_quantile(double value) { + set_has_quantile(); + quantile_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Quantile.quantile) +} + +// optional double value = 2; +inline bool Quantile::has_value() const { + return (_has_bits_[0] & 0x00000002u) != 0; +} +inline void Quantile::set_has_value() { + _has_bits_[0] |= 0x00000002u; +} +inline void Quantile::clear_has_value() { + _has_bits_[0] &= ~0x00000002u; +} +inline void Quantile::clear_value() { + value_ = 0; + clear_has_value(); +} +inline double Quantile::value() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Quantile.value) + return value_; +} +inline void Quantile::set_value(double value) { + set_has_value(); + value_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Quantile.value) +} + +// ------------------------------------------------------------------- + +// Summary + +// optional uint64 sample_count = 1; +inline bool Summary::has_sample_count() const { + return (_has_bits_[0] & 0x00000001u) != 0; +} +inline void Summary::set_has_sample_count() { + _has_bits_[0] |= 0x00000001u; +} +inline void Summary::clear_has_sample_count() { + _has_bits_[0] &= ~0x00000001u; +} +inline void Summary::clear_sample_count() { + sample_count_ = GOOGLE_ULONGLONG(0); + clear_has_sample_count(); +} +inline ::google::protobuf::uint64 Summary::sample_count() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Summary.sample_count) + return sample_count_; +} +inline void Summary::set_sample_count(::google::protobuf::uint64 value) { + set_has_sample_count(); + sample_count_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Summary.sample_count) +} + +// optional double sample_sum = 2; +inline bool Summary::has_sample_sum() const { + return (_has_bits_[0] & 0x00000002u) != 0; +} +inline void Summary::set_has_sample_sum() { + _has_bits_[0] |= 0x00000002u; +} +inline void Summary::clear_has_sample_sum() { + _has_bits_[0] &= ~0x00000002u; +} +inline void Summary::clear_sample_sum() { + sample_sum_ = 0; + clear_has_sample_sum(); +} +inline double Summary::sample_sum() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Summary.sample_sum) + return sample_sum_; +} +inline void Summary::set_sample_sum(double value) { + set_has_sample_sum(); + sample_sum_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Summary.sample_sum) +} + +// repeated .io.prometheus.client.Quantile quantile = 3; +inline int Summary::quantile_size() const { + return quantile_.size(); +} +inline void Summary::clear_quantile() { + quantile_.Clear(); +} +inline const ::io::prometheus::client::Quantile& Summary::quantile(int index) const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Summary.quantile) + return quantile_.Get(index); +} +inline ::io::prometheus::client::Quantile* Summary::mutable_quantile(int index) { + // @@protoc_insertion_point(field_mutable:io.prometheus.client.Summary.quantile) + return quantile_.Mutable(index); +} +inline ::io::prometheus::client::Quantile* Summary::add_quantile() { + // @@protoc_insertion_point(field_add:io.prometheus.client.Summary.quantile) + return quantile_.Add(); +} +inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Quantile >& +Summary::quantile() const { + // @@protoc_insertion_point(field_list:io.prometheus.client.Summary.quantile) + return quantile_; +} +inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Quantile >* +Summary::mutable_quantile() { + // @@protoc_insertion_point(field_mutable_list:io.prometheus.client.Summary.quantile) + return &quantile_; +} + +// ------------------------------------------------------------------- + +// Untyped + +// optional double value = 1; +inline bool Untyped::has_value() const { + return (_has_bits_[0] & 0x00000001u) != 0; +} +inline void Untyped::set_has_value() { + _has_bits_[0] |= 0x00000001u; +} +inline void Untyped::clear_has_value() { + _has_bits_[0] &= ~0x00000001u; +} +inline void Untyped::clear_value() { + value_ = 0; + clear_has_value(); +} +inline double Untyped::value() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Untyped.value) + return value_; +} +inline void Untyped::set_value(double value) { + set_has_value(); + value_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Untyped.value) +} + +// ------------------------------------------------------------------- + +// Histogram + +// optional uint64 sample_count = 1; +inline bool Histogram::has_sample_count() const { + return (_has_bits_[0] & 0x00000001u) != 0; +} +inline void Histogram::set_has_sample_count() { + _has_bits_[0] |= 0x00000001u; +} +inline void Histogram::clear_has_sample_count() { + _has_bits_[0] &= ~0x00000001u; +} +inline void Histogram::clear_sample_count() { + sample_count_ = GOOGLE_ULONGLONG(0); + clear_has_sample_count(); +} +inline ::google::protobuf::uint64 Histogram::sample_count() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Histogram.sample_count) + return sample_count_; +} +inline void Histogram::set_sample_count(::google::protobuf::uint64 value) { + set_has_sample_count(); + sample_count_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Histogram.sample_count) +} + +// optional double sample_sum = 2; +inline bool Histogram::has_sample_sum() const { + return (_has_bits_[0] & 0x00000002u) != 0; +} +inline void Histogram::set_has_sample_sum() { + _has_bits_[0] |= 0x00000002u; +} +inline void Histogram::clear_has_sample_sum() { + _has_bits_[0] &= ~0x00000002u; +} +inline void Histogram::clear_sample_sum() { + sample_sum_ = 0; + clear_has_sample_sum(); +} +inline double Histogram::sample_sum() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Histogram.sample_sum) + return sample_sum_; +} +inline void Histogram::set_sample_sum(double value) { + set_has_sample_sum(); + sample_sum_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Histogram.sample_sum) +} + +// repeated .io.prometheus.client.Bucket bucket = 3; +inline int Histogram::bucket_size() const { + return bucket_.size(); +} +inline void Histogram::clear_bucket() { + bucket_.Clear(); +} +inline const ::io::prometheus::client::Bucket& Histogram::bucket(int index) const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Histogram.bucket) + return bucket_.Get(index); +} +inline ::io::prometheus::client::Bucket* Histogram::mutable_bucket(int index) { + // @@protoc_insertion_point(field_mutable:io.prometheus.client.Histogram.bucket) + return bucket_.Mutable(index); +} +inline ::io::prometheus::client::Bucket* Histogram::add_bucket() { + // @@protoc_insertion_point(field_add:io.prometheus.client.Histogram.bucket) + return bucket_.Add(); +} +inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Bucket >& +Histogram::bucket() const { + // @@protoc_insertion_point(field_list:io.prometheus.client.Histogram.bucket) + return bucket_; +} +inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Bucket >* +Histogram::mutable_bucket() { + // @@protoc_insertion_point(field_mutable_list:io.prometheus.client.Histogram.bucket) + return &bucket_; +} + +// ------------------------------------------------------------------- + +// Bucket + +// optional uint64 cumulative_count = 1; +inline bool Bucket::has_cumulative_count() const { + return (_has_bits_[0] & 0x00000001u) != 0; +} +inline void Bucket::set_has_cumulative_count() { + _has_bits_[0] |= 0x00000001u; +} +inline void Bucket::clear_has_cumulative_count() { + _has_bits_[0] &= ~0x00000001u; +} +inline void Bucket::clear_cumulative_count() { + cumulative_count_ = GOOGLE_ULONGLONG(0); + clear_has_cumulative_count(); +} +inline ::google::protobuf::uint64 Bucket::cumulative_count() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Bucket.cumulative_count) + return cumulative_count_; +} +inline void Bucket::set_cumulative_count(::google::protobuf::uint64 value) { + set_has_cumulative_count(); + cumulative_count_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Bucket.cumulative_count) +} + +// optional double upper_bound = 2; +inline bool Bucket::has_upper_bound() const { + return (_has_bits_[0] & 0x00000002u) != 0; +} +inline void Bucket::set_has_upper_bound() { + _has_bits_[0] |= 0x00000002u; +} +inline void Bucket::clear_has_upper_bound() { + _has_bits_[0] &= ~0x00000002u; +} +inline void Bucket::clear_upper_bound() { + upper_bound_ = 0; + clear_has_upper_bound(); +} +inline double Bucket::upper_bound() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Bucket.upper_bound) + return upper_bound_; +} +inline void Bucket::set_upper_bound(double value) { + set_has_upper_bound(); + upper_bound_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Bucket.upper_bound) +} + +// ------------------------------------------------------------------- + +// Metric + +// repeated .io.prometheus.client.LabelPair label = 1; +inline int Metric::label_size() const { + return label_.size(); +} +inline void Metric::clear_label() { + label_.Clear(); +} +inline const ::io::prometheus::client::LabelPair& Metric::label(int index) const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.label) + return label_.Get(index); +} +inline ::io::prometheus::client::LabelPair* Metric::mutable_label(int index) { + // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.label) + return label_.Mutable(index); +} +inline ::io::prometheus::client::LabelPair* Metric::add_label() { + // @@protoc_insertion_point(field_add:io.prometheus.client.Metric.label) + return label_.Add(); +} +inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::LabelPair >& +Metric::label() const { + // @@protoc_insertion_point(field_list:io.prometheus.client.Metric.label) + return label_; +} +inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::LabelPair >* +Metric::mutable_label() { + // @@protoc_insertion_point(field_mutable_list:io.prometheus.client.Metric.label) + return &label_; +} + +// optional .io.prometheus.client.Gauge gauge = 2; +inline bool Metric::has_gauge() const { + return (_has_bits_[0] & 0x00000002u) != 0; +} +inline void Metric::set_has_gauge() { + _has_bits_[0] |= 0x00000002u; +} +inline void Metric::clear_has_gauge() { + _has_bits_[0] &= ~0x00000002u; +} +inline void Metric::clear_gauge() { + if (gauge_ != NULL) gauge_->::io::prometheus::client::Gauge::Clear(); + clear_has_gauge(); +} +inline const ::io::prometheus::client::Gauge& Metric::gauge() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.gauge) + return gauge_ != NULL ? *gauge_ : *default_instance_->gauge_; +} +inline ::io::prometheus::client::Gauge* Metric::mutable_gauge() { + set_has_gauge(); + if (gauge_ == NULL) gauge_ = new ::io::prometheus::client::Gauge; + // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.gauge) + return gauge_; +} +inline ::io::prometheus::client::Gauge* Metric::release_gauge() { + clear_has_gauge(); + ::io::prometheus::client::Gauge* temp = gauge_; + gauge_ = NULL; + return temp; +} +inline void Metric::set_allocated_gauge(::io::prometheus::client::Gauge* gauge) { + delete gauge_; + gauge_ = gauge; + if (gauge) { + set_has_gauge(); + } else { + clear_has_gauge(); + } + // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.Metric.gauge) +} + +// optional .io.prometheus.client.Counter counter = 3; +inline bool Metric::has_counter() const { + return (_has_bits_[0] & 0x00000004u) != 0; +} +inline void Metric::set_has_counter() { + _has_bits_[0] |= 0x00000004u; +} +inline void Metric::clear_has_counter() { + _has_bits_[0] &= ~0x00000004u; +} +inline void Metric::clear_counter() { + if (counter_ != NULL) counter_->::io::prometheus::client::Counter::Clear(); + clear_has_counter(); +} +inline const ::io::prometheus::client::Counter& Metric::counter() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.counter) + return counter_ != NULL ? *counter_ : *default_instance_->counter_; +} +inline ::io::prometheus::client::Counter* Metric::mutable_counter() { + set_has_counter(); + if (counter_ == NULL) counter_ = new ::io::prometheus::client::Counter; + // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.counter) + return counter_; +} +inline ::io::prometheus::client::Counter* Metric::release_counter() { + clear_has_counter(); + ::io::prometheus::client::Counter* temp = counter_; + counter_ = NULL; + return temp; +} +inline void Metric::set_allocated_counter(::io::prometheus::client::Counter* counter) { + delete counter_; + counter_ = counter; + if (counter) { + set_has_counter(); + } else { + clear_has_counter(); + } + // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.Metric.counter) +} + +// optional .io.prometheus.client.Summary summary = 4; +inline bool Metric::has_summary() const { + return (_has_bits_[0] & 0x00000008u) != 0; +} +inline void Metric::set_has_summary() { + _has_bits_[0] |= 0x00000008u; +} +inline void Metric::clear_has_summary() { + _has_bits_[0] &= ~0x00000008u; +} +inline void Metric::clear_summary() { + if (summary_ != NULL) summary_->::io::prometheus::client::Summary::Clear(); + clear_has_summary(); +} +inline const ::io::prometheus::client::Summary& Metric::summary() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.summary) + return summary_ != NULL ? *summary_ : *default_instance_->summary_; +} +inline ::io::prometheus::client::Summary* Metric::mutable_summary() { + set_has_summary(); + if (summary_ == NULL) summary_ = new ::io::prometheus::client::Summary; + // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.summary) + return summary_; +} +inline ::io::prometheus::client::Summary* Metric::release_summary() { + clear_has_summary(); + ::io::prometheus::client::Summary* temp = summary_; + summary_ = NULL; + return temp; +} +inline void Metric::set_allocated_summary(::io::prometheus::client::Summary* summary) { + delete summary_; + summary_ = summary; + if (summary) { + set_has_summary(); + } else { + clear_has_summary(); + } + // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.Metric.summary) +} + +// optional .io.prometheus.client.Untyped untyped = 5; +inline bool Metric::has_untyped() const { + return (_has_bits_[0] & 0x00000010u) != 0; +} +inline void Metric::set_has_untyped() { + _has_bits_[0] |= 0x00000010u; +} +inline void Metric::clear_has_untyped() { + _has_bits_[0] &= ~0x00000010u; +} +inline void Metric::clear_untyped() { + if (untyped_ != NULL) untyped_->::io::prometheus::client::Untyped::Clear(); + clear_has_untyped(); +} +inline const ::io::prometheus::client::Untyped& Metric::untyped() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.untyped) + return untyped_ != NULL ? *untyped_ : *default_instance_->untyped_; +} +inline ::io::prometheus::client::Untyped* Metric::mutable_untyped() { + set_has_untyped(); + if (untyped_ == NULL) untyped_ = new ::io::prometheus::client::Untyped; + // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.untyped) + return untyped_; +} +inline ::io::prometheus::client::Untyped* Metric::release_untyped() { + clear_has_untyped(); + ::io::prometheus::client::Untyped* temp = untyped_; + untyped_ = NULL; + return temp; +} +inline void Metric::set_allocated_untyped(::io::prometheus::client::Untyped* untyped) { + delete untyped_; + untyped_ = untyped; + if (untyped) { + set_has_untyped(); + } else { + clear_has_untyped(); + } + // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.Metric.untyped) +} + +// optional .io.prometheus.client.Histogram histogram = 7; +inline bool Metric::has_histogram() const { + return (_has_bits_[0] & 0x00000020u) != 0; +} +inline void Metric::set_has_histogram() { + _has_bits_[0] |= 0x00000020u; +} +inline void Metric::clear_has_histogram() { + _has_bits_[0] &= ~0x00000020u; +} +inline void Metric::clear_histogram() { + if (histogram_ != NULL) histogram_->::io::prometheus::client::Histogram::Clear(); + clear_has_histogram(); +} +inline const ::io::prometheus::client::Histogram& Metric::histogram() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.histogram) + return histogram_ != NULL ? *histogram_ : *default_instance_->histogram_; +} +inline ::io::prometheus::client::Histogram* Metric::mutable_histogram() { + set_has_histogram(); + if (histogram_ == NULL) histogram_ = new ::io::prometheus::client::Histogram; + // @@protoc_insertion_point(field_mutable:io.prometheus.client.Metric.histogram) + return histogram_; +} +inline ::io::prometheus::client::Histogram* Metric::release_histogram() { + clear_has_histogram(); + ::io::prometheus::client::Histogram* temp = histogram_; + histogram_ = NULL; + return temp; +} +inline void Metric::set_allocated_histogram(::io::prometheus::client::Histogram* histogram) { + delete histogram_; + histogram_ = histogram; + if (histogram) { + set_has_histogram(); + } else { + clear_has_histogram(); + } + // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.Metric.histogram) +} + +// optional int64 timestamp_ms = 6; +inline bool Metric::has_timestamp_ms() const { + return (_has_bits_[0] & 0x00000040u) != 0; +} +inline void Metric::set_has_timestamp_ms() { + _has_bits_[0] |= 0x00000040u; +} +inline void Metric::clear_has_timestamp_ms() { + _has_bits_[0] &= ~0x00000040u; +} +inline void Metric::clear_timestamp_ms() { + timestamp_ms_ = GOOGLE_LONGLONG(0); + clear_has_timestamp_ms(); +} +inline ::google::protobuf::int64 Metric::timestamp_ms() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.Metric.timestamp_ms) + return timestamp_ms_; +} +inline void Metric::set_timestamp_ms(::google::protobuf::int64 value) { + set_has_timestamp_ms(); + timestamp_ms_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.Metric.timestamp_ms) +} + +// ------------------------------------------------------------------- + +// MetricFamily + +// optional string name = 1; +inline bool MetricFamily::has_name() const { + return (_has_bits_[0] & 0x00000001u) != 0; +} +inline void MetricFamily::set_has_name() { + _has_bits_[0] |= 0x00000001u; +} +inline void MetricFamily::clear_has_name() { + _has_bits_[0] &= ~0x00000001u; +} +inline void MetricFamily::clear_name() { + if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_->clear(); + } + clear_has_name(); +} +inline const ::std::string& MetricFamily::name() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.MetricFamily.name) + return *name_; +} +inline void MetricFamily::set_name(const ::std::string& value) { + set_has_name(); + if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_ = new ::std::string; + } + name_->assign(value); + // @@protoc_insertion_point(field_set:io.prometheus.client.MetricFamily.name) +} +inline void MetricFamily::set_name(const char* value) { + set_has_name(); + if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_ = new ::std::string; + } + name_->assign(value); + // @@protoc_insertion_point(field_set_char:io.prometheus.client.MetricFamily.name) +} +inline void MetricFamily::set_name(const char* value, size_t size) { + set_has_name(); + if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_ = new ::std::string; + } + name_->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:io.prometheus.client.MetricFamily.name) +} +inline ::std::string* MetricFamily::mutable_name() { + set_has_name(); + if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + name_ = new ::std::string; + } + // @@protoc_insertion_point(field_mutable:io.prometheus.client.MetricFamily.name) + return name_; +} +inline ::std::string* MetricFamily::release_name() { + clear_has_name(); + if (name_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + return NULL; + } else { + ::std::string* temp = name_; + name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + return temp; + } +} +inline void MetricFamily::set_allocated_name(::std::string* name) { + if (name_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + delete name_; + } + if (name) { + set_has_name(); + name_ = name; + } else { + clear_has_name(); + name_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + } + // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.MetricFamily.name) +} + +// optional string help = 2; +inline bool MetricFamily::has_help() const { + return (_has_bits_[0] & 0x00000002u) != 0; +} +inline void MetricFamily::set_has_help() { + _has_bits_[0] |= 0x00000002u; +} +inline void MetricFamily::clear_has_help() { + _has_bits_[0] &= ~0x00000002u; +} +inline void MetricFamily::clear_help() { + if (help_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + help_->clear(); + } + clear_has_help(); +} +inline const ::std::string& MetricFamily::help() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.MetricFamily.help) + return *help_; +} +inline void MetricFamily::set_help(const ::std::string& value) { + set_has_help(); + if (help_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + help_ = new ::std::string; + } + help_->assign(value); + // @@protoc_insertion_point(field_set:io.prometheus.client.MetricFamily.help) +} +inline void MetricFamily::set_help(const char* value) { + set_has_help(); + if (help_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + help_ = new ::std::string; + } + help_->assign(value); + // @@protoc_insertion_point(field_set_char:io.prometheus.client.MetricFamily.help) +} +inline void MetricFamily::set_help(const char* value, size_t size) { + set_has_help(); + if (help_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + help_ = new ::std::string; + } + help_->assign(reinterpret_cast(value), size); + // @@protoc_insertion_point(field_set_pointer:io.prometheus.client.MetricFamily.help) +} +inline ::std::string* MetricFamily::mutable_help() { + set_has_help(); + if (help_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + help_ = new ::std::string; + } + // @@protoc_insertion_point(field_mutable:io.prometheus.client.MetricFamily.help) + return help_; +} +inline ::std::string* MetricFamily::release_help() { + clear_has_help(); + if (help_ == &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + return NULL; + } else { + ::std::string* temp = help_; + help_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + return temp; + } +} +inline void MetricFamily::set_allocated_help(::std::string* help) { + if (help_ != &::google::protobuf::internal::GetEmptyStringAlreadyInited()) { + delete help_; + } + if (help) { + set_has_help(); + help_ = help; + } else { + clear_has_help(); + help_ = const_cast< ::std::string*>(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); + } + // @@protoc_insertion_point(field_set_allocated:io.prometheus.client.MetricFamily.help) +} + +// optional .io.prometheus.client.MetricType type = 3; +inline bool MetricFamily::has_type() const { + return (_has_bits_[0] & 0x00000004u) != 0; +} +inline void MetricFamily::set_has_type() { + _has_bits_[0] |= 0x00000004u; +} +inline void MetricFamily::clear_has_type() { + _has_bits_[0] &= ~0x00000004u; +} +inline void MetricFamily::clear_type() { + type_ = 0; + clear_has_type(); +} +inline ::io::prometheus::client::MetricType MetricFamily::type() const { + // @@protoc_insertion_point(field_get:io.prometheus.client.MetricFamily.type) + return static_cast< ::io::prometheus::client::MetricType >(type_); +} +inline void MetricFamily::set_type(::io::prometheus::client::MetricType value) { + assert(::io::prometheus::client::MetricType_IsValid(value)); + set_has_type(); + type_ = value; + // @@protoc_insertion_point(field_set:io.prometheus.client.MetricFamily.type) +} + +// repeated .io.prometheus.client.Metric metric = 4; +inline int MetricFamily::metric_size() const { + return metric_.size(); +} +inline void MetricFamily::clear_metric() { + metric_.Clear(); +} +inline const ::io::prometheus::client::Metric& MetricFamily::metric(int index) const { + // @@protoc_insertion_point(field_get:io.prometheus.client.MetricFamily.metric) + return metric_.Get(index); +} +inline ::io::prometheus::client::Metric* MetricFamily::mutable_metric(int index) { + // @@protoc_insertion_point(field_mutable:io.prometheus.client.MetricFamily.metric) + return metric_.Mutable(index); +} +inline ::io::prometheus::client::Metric* MetricFamily::add_metric() { + // @@protoc_insertion_point(field_add:io.prometheus.client.MetricFamily.metric) + return metric_.Add(); +} +inline const ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Metric >& +MetricFamily::metric() const { + // @@protoc_insertion_point(field_list:io.prometheus.client.MetricFamily.metric) + return metric_; +} +inline ::google::protobuf::RepeatedPtrField< ::io::prometheus::client::Metric >* +MetricFamily::mutable_metric() { + // @@protoc_insertion_point(field_mutable_list:io.prometheus.client.MetricFamily.metric) + return &metric_; +} + + +// @@protoc_insertion_point(namespace_scope) + +} // namespace client +} // namespace prometheus +} // namespace io + +#ifndef SWIG +namespace google { +namespace protobuf { + +template <> struct is_proto_enum< ::io::prometheus::client::MetricType> : ::google::protobuf::internal::true_type {}; +template <> +inline const EnumDescriptor* GetEnumDescriptor< ::io::prometheus::client::MetricType>() { + return ::io::prometheus::client::MetricType_descriptor(); +} + +} // namespace google +} // namespace protobuf +#endif // SWIG + +// @@protoc_insertion_point(global_scope) + +#endif // PROTOBUF_metrics_2eproto__INCLUDED diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/.gitignore juju-core-2.0.0/src/github.com/prometheus/client_model/.gitignore --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/.gitignore 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1 @@ +target/ diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/go/metrics.pb.go juju-core-2.0.0/src/github.com/prometheus/client_model/go/metrics.pb.go --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/go/metrics.pb.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/go/metrics.pb.go 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,364 @@ +// Code generated by protoc-gen-go. +// source: metrics.proto +// DO NOT EDIT! + +/* +Package io_prometheus_client is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + LabelPair + Gauge + Counter + Quantile + Summary + Untyped + Histogram + Bucket + Metric + MetricFamily +*/ +package io_prometheus_client + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/LICENSE juju-core-2.0.0/src/github.com/prometheus/client_model/LICENSE --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/LICENSE 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/Makefile juju-core-2.0.0/src/github.com/prometheus/client_model/Makefile --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/Makefile 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,61 @@ +# Copyright 2013 Prometheus Team +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KEY_ID ?= _DEFINE_ME_ + +all: cpp go java python ruby + +SUFFIXES: + +cpp: cpp/metrics.pb.cc cpp/metrics.pb.h + +cpp/metrics.pb.cc: metrics.proto + protoc $< --cpp_out=cpp/ + +cpp/metrics.pb.h: metrics.proto + protoc $< --cpp_out=cpp/ + +go: go/metrics.pb.go + +go/metrics.pb.go: metrics.proto + protoc $< --go_out=go/ + +java: src/main/java/io/prometheus/client/Metrics.java pom.xml + mvn clean compile package + +src/main/java/io/prometheus/client/Metrics.java: metrics.proto + protoc $< --java_out=src/main/java + +python: python/prometheus/client/model/metrics_pb2.py + +python/prometheus/client/model/metrics_pb2.py: metrics.proto + protoc $< --python_out=python/prometheus/client/model + +ruby: + $(MAKE) -C ruby build + +clean: + -rm -rf cpp/* + -rm -rf go/* + -rm -rf java/* + -rm -rf python/* + -$(MAKE) -C ruby clean + -mvn clean + +maven-deploy-snapshot: java + mvn clean deploy -Dgpg.keyname=$(KEY_ID) -DperformRelease=true + +maven-deploy-release: java + mvn clean release:clean release:prepare release:perform -Dgpg.keyname=$(KEY_ID) -DperformRelease=true + +.PHONY: all clean cpp go java maven-deploy-snapshot maven-deploy-release python ruby diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/metrics.proto juju-core-2.0.0/src/github.com/prometheus/client_model/metrics.proto --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/metrics.proto 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/metrics.proto 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,81 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package io.prometheus.client; +option java_package = "io.prometheus.client"; + +message LabelPair { + optional string name = 1; + optional string value = 2; +} + +enum MetricType { + COUNTER = 0; + GAUGE = 1; + SUMMARY = 2; + UNTYPED = 3; + HISTOGRAM = 4; +} + +message Gauge { + optional double value = 1; +} + +message Counter { + optional double value = 1; +} + +message Quantile { + optional double quantile = 1; + optional double value = 2; +} + +message Summary { + optional uint64 sample_count = 1; + optional double sample_sum = 2; + repeated Quantile quantile = 3; +} + +message Untyped { + optional double value = 1; +} + +message Histogram { + optional uint64 sample_count = 1; + optional double sample_sum = 2; + repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional. +} + +message Bucket { + optional uint64 cumulative_count = 1; // Cumulative in increasing order. + optional double upper_bound = 2; // Inclusive. +} + +message Metric { + repeated LabelPair label = 1; + optional Gauge gauge = 2; + optional Counter counter = 3; + optional Summary summary = 4; + optional Untyped untyped = 5; + optional Histogram histogram = 7; + optional int64 timestamp_ms = 6; +} + +message MetricFamily { + optional string name = 1; + optional string help = 2; + optional MetricType type = 3; + repeated Metric metric = 4; +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/NOTICE juju-core-2.0.0/src/github.com/prometheus/client_model/NOTICE --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/NOTICE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/NOTICE 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/pom.xml juju-core-2.0.0/src/github.com/prometheus/client_model/pom.xml --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/pom.xml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/pom.xml 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,130 @@ + + + 4.0.0 + + io.prometheus.client + model + 0.0.3-SNAPSHOT + + + org.sonatype.oss + oss-parent + 7 + + + Prometheus Client Data Model + http://github.com/prometheus/client_model + + Prometheus Client Data Model: Generated Protocol Buffer Assets + + + + + The Apache Software License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + + scm:git:git@github.com:prometheus/client_model.git + scm:git:git@github.com:prometheus/client_model.git + git@github.com:prometheus/client_model.git + + + + + mtp + Matt T. Proud + matt.proud@gmail.com + + + + + + com.google.protobuf + protobuf-java + 2.5.0 + + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.8 + + UTF-8 + UTF-8 + true + + + + generate-javadoc-site-report + site + + javadoc + + + + attach-javadocs + + jar + + + + + + maven-compiler-plugin + + 1.6 + 1.6 + + 3.1 + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar + + + + + + + + + release-sign-artifacts + + + performRelease + true + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.4 + + + sign-artifacts + verify + + sign + + + + + + + + + diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/python/prometheus/client/__init__.py juju-core-2.0.0/src/github.com/prometheus/client_model/python/prometheus/client/__init__.py --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/python/prometheus/client/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/python/prometheus/client/__init__.py 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,12 @@ + # Copyright 2013 Prometheus Team + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + + # http://www.apache.org/licenses/LICENSE-2.0 + + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py juju-core-2.0.0/src/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/python/prometheus/client/model/__init__.py 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,14 @@ + # Copyright 2013 Prometheus Team + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + + # http://www.apache.org/licenses/LICENSE-2.0 + + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +__all__ = ['metrics_pb2'] diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py juju-core-2.0.0/src/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/python/prometheus/client/model/metrics_pb2.py 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,575 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: metrics.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='metrics.proto', + package='io.prometheus.client', + serialized_pb=_b('\n\rmetrics.proto\x12\x14io.prometheus.client\"(\n\tLabelPair\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x16\n\x05Gauge\x12\r\n\x05value\x18\x01 \x01(\x01\"\x18\n\x07\x43ounter\x12\r\n\x05value\x18\x01 \x01(\x01\"+\n\x08Quantile\x12\x10\n\x08quantile\x18\x01 \x01(\x01\x12\r\n\x05value\x18\x02 \x01(\x01\"e\n\x07Summary\x12\x14\n\x0csample_count\x18\x01 \x01(\x04\x12\x12\n\nsample_sum\x18\x02 \x01(\x01\x12\x30\n\x08quantile\x18\x03 \x03(\x0b\x32\x1e.io.prometheus.client.Quantile\"\x18\n\x07Untyped\x12\r\n\x05value\x18\x01 \x01(\x01\"c\n\tHistogram\x12\x14\n\x0csample_count\x18\x01 \x01(\x04\x12\x12\n\nsample_sum\x18\x02 \x01(\x01\x12,\n\x06\x62ucket\x18\x03 \x03(\x0b\x32\x1c.io.prometheus.client.Bucket\"7\n\x06\x42ucket\x12\x18\n\x10\x63umulative_count\x18\x01 \x01(\x04\x12\x13\n\x0bupper_bound\x18\x02 \x01(\x01\"\xbe\x02\n\x06Metric\x12.\n\x05label\x18\x01 \x03(\x0b\x32\x1f.io.prometheus.client.LabelPair\x12*\n\x05gauge\x18\x02 \x01(\x0b\x32\x1b.io.prometheus.client.Gauge\x12.\n\x07\x63ounter\x18\x03 \x01(\x0b\x32\x1d.io.prometheus.client.Counter\x12.\n\x07summary\x18\x04 \x01(\x0b\x32\x1d.io.prometheus.client.Summary\x12.\n\x07untyped\x18\x05 \x01(\x0b\x32\x1d.io.prometheus.client.Untyped\x12\x32\n\thistogram\x18\x07 \x01(\x0b\x32\x1f.io.prometheus.client.Histogram\x12\x14\n\x0ctimestamp_ms\x18\x06 \x01(\x03\"\x88\x01\n\x0cMetricFamily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04help\x18\x02 \x01(\t\x12.\n\x04type\x18\x03 \x01(\x0e\x32 .io.prometheus.client.MetricType\x12,\n\x06metric\x18\x04 \x03(\x0b\x32\x1c.io.prometheus.client.Metric*M\n\nMetricType\x12\x0b\n\x07\x43OUNTER\x10\x00\x12\t\n\x05GAUGE\x10\x01\x12\x0b\n\x07SUMMARY\x10\x02\x12\x0b\n\x07UNTYPED\x10\x03\x12\r\n\tHISTOGRAM\x10\x04\x42\x16\n\x14io.prometheus.client') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +_METRICTYPE = _descriptor.EnumDescriptor( + name='MetricType', + full_name='io.prometheus.client.MetricType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='COUNTER', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GAUGE', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SUMMARY', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UNTYPED', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HISTOGRAM', index=4, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=923, + serialized_end=1000, +) +_sym_db.RegisterEnumDescriptor(_METRICTYPE) + +MetricType = enum_type_wrapper.EnumTypeWrapper(_METRICTYPE) +COUNTER = 0 +GAUGE = 1 +SUMMARY = 2 +UNTYPED = 3 +HISTOGRAM = 4 + + + +_LABELPAIR = _descriptor.Descriptor( + name='LabelPair', + full_name='io.prometheus.client.LabelPair', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='io.prometheus.client.LabelPair.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='io.prometheus.client.LabelPair.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=39, + serialized_end=79, +) + + +_GAUGE = _descriptor.Descriptor( + name='Gauge', + full_name='io.prometheus.client.Gauge', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='io.prometheus.client.Gauge.value', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=81, + serialized_end=103, +) + + +_COUNTER = _descriptor.Descriptor( + name='Counter', + full_name='io.prometheus.client.Counter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='io.prometheus.client.Counter.value', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=105, + serialized_end=129, +) + + +_QUANTILE = _descriptor.Descriptor( + name='Quantile', + full_name='io.prometheus.client.Quantile', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='quantile', full_name='io.prometheus.client.Quantile.quantile', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='io.prometheus.client.Quantile.value', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=131, + serialized_end=174, +) + + +_SUMMARY = _descriptor.Descriptor( + name='Summary', + full_name='io.prometheus.client.Summary', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sample_count', full_name='io.prometheus.client.Summary.sample_count', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sample_sum', full_name='io.prometheus.client.Summary.sample_sum', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='quantile', full_name='io.prometheus.client.Summary.quantile', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=176, + serialized_end=277, +) + + +_UNTYPED = _descriptor.Descriptor( + name='Untyped', + full_name='io.prometheus.client.Untyped', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='io.prometheus.client.Untyped.value', index=0, + number=1, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=279, + serialized_end=303, +) + + +_HISTOGRAM = _descriptor.Descriptor( + name='Histogram', + full_name='io.prometheus.client.Histogram', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='sample_count', full_name='io.prometheus.client.Histogram.sample_count', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sample_sum', full_name='io.prometheus.client.Histogram.sample_sum', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bucket', full_name='io.prometheus.client.Histogram.bucket', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=305, + serialized_end=404, +) + + +_BUCKET = _descriptor.Descriptor( + name='Bucket', + full_name='io.prometheus.client.Bucket', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='cumulative_count', full_name='io.prometheus.client.Bucket.cumulative_count', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='upper_bound', full_name='io.prometheus.client.Bucket.upper_bound', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=406, + serialized_end=461, +) + + +_METRIC = _descriptor.Descriptor( + name='Metric', + full_name='io.prometheus.client.Metric', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='label', full_name='io.prometheus.client.Metric.label', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gauge', full_name='io.prometheus.client.Metric.gauge', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='counter', full_name='io.prometheus.client.Metric.counter', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='summary', full_name='io.prometheus.client.Metric.summary', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='untyped', full_name='io.prometheus.client.Metric.untyped', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='histogram', full_name='io.prometheus.client.Metric.histogram', index=5, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_ms', full_name='io.prometheus.client.Metric.timestamp_ms', index=6, + number=6, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=464, + serialized_end=782, +) + + +_METRICFAMILY = _descriptor.Descriptor( + name='MetricFamily', + full_name='io.prometheus.client.MetricFamily', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='io.prometheus.client.MetricFamily.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='help', full_name='io.prometheus.client.MetricFamily.help', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='io.prometheus.client.MetricFamily.type', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='metric', full_name='io.prometheus.client.MetricFamily.metric', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + oneofs=[ + ], + serialized_start=785, + serialized_end=921, +) + +_SUMMARY.fields_by_name['quantile'].message_type = _QUANTILE +_HISTOGRAM.fields_by_name['bucket'].message_type = _BUCKET +_METRIC.fields_by_name['label'].message_type = _LABELPAIR +_METRIC.fields_by_name['gauge'].message_type = _GAUGE +_METRIC.fields_by_name['counter'].message_type = _COUNTER +_METRIC.fields_by_name['summary'].message_type = _SUMMARY +_METRIC.fields_by_name['untyped'].message_type = _UNTYPED +_METRIC.fields_by_name['histogram'].message_type = _HISTOGRAM +_METRICFAMILY.fields_by_name['type'].enum_type = _METRICTYPE +_METRICFAMILY.fields_by_name['metric'].message_type = _METRIC +DESCRIPTOR.message_types_by_name['LabelPair'] = _LABELPAIR +DESCRIPTOR.message_types_by_name['Gauge'] = _GAUGE +DESCRIPTOR.message_types_by_name['Counter'] = _COUNTER +DESCRIPTOR.message_types_by_name['Quantile'] = _QUANTILE +DESCRIPTOR.message_types_by_name['Summary'] = _SUMMARY +DESCRIPTOR.message_types_by_name['Untyped'] = _UNTYPED +DESCRIPTOR.message_types_by_name['Histogram'] = _HISTOGRAM +DESCRIPTOR.message_types_by_name['Bucket'] = _BUCKET +DESCRIPTOR.message_types_by_name['Metric'] = _METRIC +DESCRIPTOR.message_types_by_name['MetricFamily'] = _METRICFAMILY +DESCRIPTOR.enum_types_by_name['MetricType'] = _METRICTYPE + +LabelPair = _reflection.GeneratedProtocolMessageType('LabelPair', (_message.Message,), dict( + DESCRIPTOR = _LABELPAIR, + __module__ = 'metrics_pb2' + # @@protoc_insertion_point(class_scope:io.prometheus.client.LabelPair) + )) +_sym_db.RegisterMessage(LabelPair) + +Gauge = _reflection.GeneratedProtocolMessageType('Gauge', (_message.Message,), dict( + DESCRIPTOR = _GAUGE, + __module__ = 'metrics_pb2' + # @@protoc_insertion_point(class_scope:io.prometheus.client.Gauge) + )) +_sym_db.RegisterMessage(Gauge) + +Counter = _reflection.GeneratedProtocolMessageType('Counter', (_message.Message,), dict( + DESCRIPTOR = _COUNTER, + __module__ = 'metrics_pb2' + # @@protoc_insertion_point(class_scope:io.prometheus.client.Counter) + )) +_sym_db.RegisterMessage(Counter) + +Quantile = _reflection.GeneratedProtocolMessageType('Quantile', (_message.Message,), dict( + DESCRIPTOR = _QUANTILE, + __module__ = 'metrics_pb2' + # @@protoc_insertion_point(class_scope:io.prometheus.client.Quantile) + )) +_sym_db.RegisterMessage(Quantile) + +Summary = _reflection.GeneratedProtocolMessageType('Summary', (_message.Message,), dict( + DESCRIPTOR = _SUMMARY, + __module__ = 'metrics_pb2' + # @@protoc_insertion_point(class_scope:io.prometheus.client.Summary) + )) +_sym_db.RegisterMessage(Summary) + +Untyped = _reflection.GeneratedProtocolMessageType('Untyped', (_message.Message,), dict( + DESCRIPTOR = _UNTYPED, + __module__ = 'metrics_pb2' + # @@protoc_insertion_point(class_scope:io.prometheus.client.Untyped) + )) +_sym_db.RegisterMessage(Untyped) + +Histogram = _reflection.GeneratedProtocolMessageType('Histogram', (_message.Message,), dict( + DESCRIPTOR = _HISTOGRAM, + __module__ = 'metrics_pb2' + # @@protoc_insertion_point(class_scope:io.prometheus.client.Histogram) + )) +_sym_db.RegisterMessage(Histogram) + +Bucket = _reflection.GeneratedProtocolMessageType('Bucket', (_message.Message,), dict( + DESCRIPTOR = _BUCKET, + __module__ = 'metrics_pb2' + # @@protoc_insertion_point(class_scope:io.prometheus.client.Bucket) + )) +_sym_db.RegisterMessage(Bucket) + +Metric = _reflection.GeneratedProtocolMessageType('Metric', (_message.Message,), dict( + DESCRIPTOR = _METRIC, + __module__ = 'metrics_pb2' + # @@protoc_insertion_point(class_scope:io.prometheus.client.Metric) + )) +_sym_db.RegisterMessage(Metric) + +MetricFamily = _reflection.GeneratedProtocolMessageType('MetricFamily', (_message.Message,), dict( + DESCRIPTOR = _METRICFAMILY, + __module__ = 'metrics_pb2' + # @@protoc_insertion_point(class_scope:io.prometheus.client.MetricFamily) + )) +_sym_db.RegisterMessage(MetricFamily) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\024io.prometheus.client')) +# @@protoc_insertion_point(module_scope) diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/python/prometheus/__init__.py juju-core-2.0.0/src/github.com/prometheus/client_model/python/prometheus/__init__.py --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/python/prometheus/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/python/prometheus/__init__.py 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,12 @@ + # Copyright 2013 Prometheus Team + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + + # http://www.apache.org/licenses/LICENSE-2.0 + + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/README.md juju-core-2.0.0/src/github.com/prometheus/client_model/README.md --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/README.md 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,26 @@ +# Background +Under most circumstances, manually downloading this repository should never +be required. + +# Prerequisites +# Base +* [Google Protocol Buffers](https://developers.google.com/protocol-buffers) + +## Java +* [Apache Maven](http://maven.apache.org) +* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository + +## Go +* [Go](http://golang.org) +* [goprotobuf](https://code.google.com/p/goprotobuf) + +## Ruby +* [Ruby](https://www.ruby-lang.org) +* [bundler](https://rubygems.org/gems/bundler) + +# Building + $ make + +# Getting Started + * The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go). + * All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/Gemfile juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/Gemfile --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/Gemfile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/Gemfile 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,4 @@ +source 'https://rubygems.org' + +# Specify your gem's dependencies in prometheus-client-model.gemspec +gemspec diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/.gitignore juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/.gitignore --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/.gitignore 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,5 @@ +*.gem +.bundle +Gemfile.lock +pkg +vendor/bundle diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/metrics.pb.rb 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,111 @@ +## Generated from metrics.proto for io.prometheus.client +require "beefcake" + +module Prometheus + module Client + + module MetricType + COUNTER = 0 + GAUGE = 1 + SUMMARY = 2 + UNTYPED = 3 + HISTOGRAM = 4 + end + + class LabelPair + include Beefcake::Message + end + + class Gauge + include Beefcake::Message + end + + class Counter + include Beefcake::Message + end + + class Quantile + include Beefcake::Message + end + + class Summary + include Beefcake::Message + end + + class Untyped + include Beefcake::Message + end + + class Histogram + include Beefcake::Message + end + + class Bucket + include Beefcake::Message + end + + class Metric + include Beefcake::Message + end + + class MetricFamily + include Beefcake::Message + end + + class LabelPair + optional :name, :string, 1 + optional :value, :string, 2 + end + + class Gauge + optional :value, :double, 1 + end + + class Counter + optional :value, :double, 1 + end + + class Quantile + optional :quantile, :double, 1 + optional :value, :double, 2 + end + + class Summary + optional :sample_count, :uint64, 1 + optional :sample_sum, :double, 2 + repeated :quantile, Quantile, 3 + end + + class Untyped + optional :value, :double, 1 + end + + class Histogram + optional :sample_count, :uint64, 1 + optional :sample_sum, :double, 2 + repeated :bucket, Bucket, 3 + end + + class Bucket + optional :cumulative_count, :uint64, 1 + optional :upper_bound, :double, 2 + end + + class Metric + repeated :label, LabelPair, 1 + optional :gauge, Gauge, 2 + optional :counter, Counter, 3 + optional :summary, Summary, 4 + optional :untyped, Untyped, 5 + optional :histogram, Histogram, 7 + optional :timestamp_ms, :int64, 6 + end + + class MetricFamily + optional :name, :string, 1 + optional :help, :string, 2 + optional :type, MetricType, 3 + repeated :metric, Metric, 4 + end + end +end diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model/version.rb 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,7 @@ +module Prometheus + module Client + module Model + VERSION = '0.1.0' + end + end +end diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/lib/prometheus/client/model.rb 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,2 @@ +require 'prometheus/client/model/metrics.pb' +require 'prometheus/client/model/version' diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/LICENSE juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/LICENSE --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/LICENSE 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/Makefile juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/Makefile --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/Makefile 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,17 @@ +VENDOR_BUNDLE = vendor/bundle + +build: $(VENDOR_BUNDLE)/.bundled + BEEFCAKE_NAMESPACE=Prometheus::Client protoc --beefcake_out lib/prometheus/client/model -I .. ../metrics.proto + +$(VENDOR_BUNDLE): + mkdir -p $@ + +$(VENDOR_BUNDLE)/.bundled: $(VENDOR_BUNDLE) Gemfile + bundle install --quiet --path $< + @touch $@ + +clean: + -rm -f lib/prometheus/client/model/metrics.pb.rb + -rm -rf $(VENDOR_BUNDLE) + +.PHONY: build clean diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/prometheus-client-model.gemspec juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/prometheus-client-model.gemspec --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/prometheus-client-model.gemspec 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/prometheus-client-model.gemspec 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,22 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'prometheus/client/model/version' + +Gem::Specification.new do |spec| + spec.name = 'prometheus-client-model' + spec.version = Prometheus::Client::Model::VERSION + spec.authors = ['Tobias Schmidt'] + spec.email = ['tobidt@gmail.com'] + spec.summary = 'Data model artifacts for the Prometheus Ruby client' + spec.homepage = 'https://github.com/prometheus/client_model/tree/master/ruby' + spec.license = 'Apache 2.0' + + spec.files = %w[README.md LICENSE] + Dir.glob('{lib/**/*}') + spec.require_paths = ['lib'] + + spec.add_dependency 'beefcake', '>= 0.4.0' + + spec.add_development_dependency 'bundler', '~> 1.3' + spec.add_development_dependency 'rake' +end diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/Rakefile juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/Rakefile --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/Rakefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/Rakefile 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1 @@ +require "bundler/gem_tasks" diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/README.md juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/README.md --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/ruby/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/ruby/README.md 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,31 @@ +# Prometheus Ruby client model + +Data model artifacts for the [Prometheus Ruby client][1]. + +## Installation + + gem install prometheus-client-model + +## Usage + +Build the artifacts from the protobuf specification: + + make build + +While this Gem's main purpose is to define the Prometheus data types for the +[client][1], it's possible to use it without the client to decode a stream of +delimited protobuf messages: + +```ruby +require 'open-uri' +require 'prometheus/client/model' + +CONTENT_TYPE = 'application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited' + +stream = open('http://localhost:9090/metrics', 'Accept' => CONTENT_TYPE).read +while family = Prometheus::Client::MetricFamily.read_delimited(stream) + puts family +end +``` + +[1]: https://github.com/prometheus/client_ruby diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/setup.py juju-core-2.0.0/src/github.com/prometheus/client_model/setup.py --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/setup.py 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/setup.py 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,23 @@ +#!/usr/bin/python + +from setuptools import setup + +setup( + name = 'prometheus_client_model', + version = '0.0.1', + author = 'Matt T. Proud', + author_email = 'matt.proud@gmail.com', + description = 'Data model artifacts for the Prometheus client.', + license = 'Apache License 2.0', + url = 'http://github.com/prometheus/client_model', + packages = ['prometheus', 'prometheus/client', 'prometheus/client/model'], + package_dir = {'': 'python'}, + requires = ['protobuf(==2.4.1)'], + platforms = 'Platform Independent', + classifiers = ['Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Topic :: Software Development :: Testing', + 'Topic :: System :: Monitoring']) diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java juju-core-2.0.0/src/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java --- juju-core-2.0~beta15/src/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/client_model/src/main/java/io/prometheus/client/Metrics.java 2016-10-13 14:31:54.000000000 +0000 @@ -0,0 +1,7683 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: metrics.proto + +package io.prometheus.client; + +public final class Metrics { + private Metrics() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + /** + * Protobuf enum {@code io.prometheus.client.MetricType} + */ + public enum MetricType + implements com.google.protobuf.ProtocolMessageEnum { + /** + * COUNTER = 0; + */ + COUNTER(0, 0), + /** + * GAUGE = 1; + */ + GAUGE(1, 1), + /** + * SUMMARY = 2; + */ + SUMMARY(2, 2), + /** + * UNTYPED = 3; + */ + UNTYPED(3, 3), + /** + * HISTOGRAM = 4; + */ + HISTOGRAM(4, 4), + ; + + /** + * COUNTER = 0; + */ + public static final int COUNTER_VALUE = 0; + /** + * GAUGE = 1; + */ + public static final int GAUGE_VALUE = 1; + /** + * SUMMARY = 2; + */ + public static final int SUMMARY_VALUE = 2; + /** + * UNTYPED = 3; + */ + public static final int UNTYPED_VALUE = 3; + /** + * HISTOGRAM = 4; + */ + public static final int HISTOGRAM_VALUE = 4; + + + public final int getNumber() { return value; } + + public static MetricType valueOf(int value) { + switch (value) { + case 0: return COUNTER; + case 1: return GAUGE; + case 2: return SUMMARY; + case 3: return UNTYPED; + case 4: return HISTOGRAM; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public MetricType findValueByNumber(int number) { + return MetricType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return io.prometheus.client.Metrics.getDescriptor().getEnumTypes().get(0); + } + + private static final MetricType[] VALUES = values(); + + public static MetricType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private MetricType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:io.prometheus.client.MetricType) + } + + public interface LabelPairOrBuilder extends + // @@protoc_insertion_point(interface_extends:io.prometheus.client.LabelPair) + com.google.protobuf.MessageOrBuilder { + + /** + * optional string name = 1; + */ + boolean hasName(); + /** + * optional string name = 1; + */ + java.lang.String getName(); + /** + * optional string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + * optional string value = 2; + */ + boolean hasValue(); + /** + * optional string value = 2; + */ + java.lang.String getValue(); + /** + * optional string value = 2; + */ + com.google.protobuf.ByteString + getValueBytes(); + } + /** + * Protobuf type {@code io.prometheus.client.LabelPair} + */ + public static final class LabelPair extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:io.prometheus.client.LabelPair) + LabelPairOrBuilder { + // Use LabelPair.newBuilder() to construct. + private LabelPair(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private LabelPair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final LabelPair defaultInstance; + public static LabelPair getDefaultInstance() { + return defaultInstance; + } + + public LabelPair getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private LabelPair( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + name_ = bs; + break; + } + case 18: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000002; + value_ = bs; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.LabelPair.class, io.prometheus.client.Metrics.LabelPair.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public LabelPair parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new LabelPair(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * optional string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * optional string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VALUE_FIELD_NUMBER = 2; + private java.lang.Object value_; + /** + * optional string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + value_ = s; + } + return s; + } + } + /** + * optional string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + name_ = ""; + value_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getValueBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getValueBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static io.prometheus.client.Metrics.LabelPair parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.LabelPair parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.LabelPair parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.LabelPair parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.LabelPair parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.LabelPair parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.LabelPair parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static io.prometheus.client.Metrics.LabelPair parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.LabelPair parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.LabelPair parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(io.prometheus.client.Metrics.LabelPair prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code io.prometheus.client.LabelPair} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:io.prometheus.client.LabelPair) + io.prometheus.client.Metrics.LabelPairOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.LabelPair.class, io.prometheus.client.Metrics.LabelPair.Builder.class); + } + + // Construct using io.prometheus.client.Metrics.LabelPair.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + value_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_LabelPair_descriptor; + } + + public io.prometheus.client.Metrics.LabelPair getDefaultInstanceForType() { + return io.prometheus.client.Metrics.LabelPair.getDefaultInstance(); + } + + public io.prometheus.client.Metrics.LabelPair build() { + io.prometheus.client.Metrics.LabelPair result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public io.prometheus.client.Metrics.LabelPair buildPartial() { + io.prometheus.client.Metrics.LabelPair result = new io.prometheus.client.Metrics.LabelPair(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof io.prometheus.client.Metrics.LabelPair) { + return mergeFrom((io.prometheus.client.Metrics.LabelPair)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(io.prometheus.client.Metrics.LabelPair other) { + if (other == io.prometheus.client.Metrics.LabelPair.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (other.hasValue()) { + bitField0_ |= 0x00000002; + value_ = other.value_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + io.prometheus.client.Metrics.LabelPair parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (io.prometheus.client.Metrics.LabelPair) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * optional string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * optional string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * optional string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + private java.lang.Object value_ = ""; + /** + * optional string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + value_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string value = 2; + */ + public com.google.protobuf.ByteString + getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string value = 2; + */ + public Builder setValue( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + /** + * optional string value = 2; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; + } + /** + * optional string value = 2; + */ + public Builder setValueBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:io.prometheus.client.LabelPair) + } + + static { + defaultInstance = new LabelPair(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:io.prometheus.client.LabelPair) + } + + public interface GaugeOrBuilder extends + // @@protoc_insertion_point(interface_extends:io.prometheus.client.Gauge) + com.google.protobuf.MessageOrBuilder { + + /** + * optional double value = 1; + */ + boolean hasValue(); + /** + * optional double value = 1; + */ + double getValue(); + } + /** + * Protobuf type {@code io.prometheus.client.Gauge} + */ + public static final class Gauge extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:io.prometheus.client.Gauge) + GaugeOrBuilder { + // Use Gauge.newBuilder() to construct. + private Gauge(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Gauge(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Gauge defaultInstance; + public static Gauge getDefaultInstance() { + return defaultInstance; + } + + public Gauge getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Gauge( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 9: { + bitField0_ |= 0x00000001; + value_ = input.readDouble(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Gauge.class, io.prometheus.client.Metrics.Gauge.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Gauge parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Gauge(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int VALUE_FIELD_NUMBER = 1; + private double value_; + /** + * optional double value = 1; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double value = 1; + */ + public double getValue() { + return value_; + } + + private void initFields() { + value_ = 0D; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeDouble(1, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(1, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static io.prometheus.client.Metrics.Gauge parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Gauge parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Gauge parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Gauge parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Gauge parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Gauge parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Gauge parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static io.prometheus.client.Metrics.Gauge parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Gauge parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Gauge parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(io.prometheus.client.Metrics.Gauge prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code io.prometheus.client.Gauge} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:io.prometheus.client.Gauge) + io.prometheus.client.Metrics.GaugeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Gauge.class, io.prometheus.client.Metrics.Gauge.Builder.class); + } + + // Construct using io.prometheus.client.Metrics.Gauge.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + value_ = 0D; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Gauge_descriptor; + } + + public io.prometheus.client.Metrics.Gauge getDefaultInstanceForType() { + return io.prometheus.client.Metrics.Gauge.getDefaultInstance(); + } + + public io.prometheus.client.Metrics.Gauge build() { + io.prometheus.client.Metrics.Gauge result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public io.prometheus.client.Metrics.Gauge buildPartial() { + io.prometheus.client.Metrics.Gauge result = new io.prometheus.client.Metrics.Gauge(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof io.prometheus.client.Metrics.Gauge) { + return mergeFrom((io.prometheus.client.Metrics.Gauge)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(io.prometheus.client.Metrics.Gauge other) { + if (other == io.prometheus.client.Metrics.Gauge.getDefaultInstance()) return this; + if (other.hasValue()) { + setValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + io.prometheus.client.Metrics.Gauge parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (io.prometheus.client.Metrics.Gauge) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private double value_ ; + /** + * optional double value = 1; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double value = 1; + */ + public double getValue() { + return value_; + } + /** + * optional double value = 1; + */ + public Builder setValue(double value) { + bitField0_ |= 0x00000001; + value_ = value; + onChanged(); + return this; + } + /** + * optional double value = 1; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000001); + value_ = 0D; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:io.prometheus.client.Gauge) + } + + static { + defaultInstance = new Gauge(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Gauge) + } + + public interface CounterOrBuilder extends + // @@protoc_insertion_point(interface_extends:io.prometheus.client.Counter) + com.google.protobuf.MessageOrBuilder { + + /** + * optional double value = 1; + */ + boolean hasValue(); + /** + * optional double value = 1; + */ + double getValue(); + } + /** + * Protobuf type {@code io.prometheus.client.Counter} + */ + public static final class Counter extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:io.prometheus.client.Counter) + CounterOrBuilder { + // Use Counter.newBuilder() to construct. + private Counter(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Counter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Counter defaultInstance; + public static Counter getDefaultInstance() { + return defaultInstance; + } + + public Counter getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Counter( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 9: { + bitField0_ |= 0x00000001; + value_ = input.readDouble(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Counter.class, io.prometheus.client.Metrics.Counter.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Counter parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Counter(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int VALUE_FIELD_NUMBER = 1; + private double value_; + /** + * optional double value = 1; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double value = 1; + */ + public double getValue() { + return value_; + } + + private void initFields() { + value_ = 0D; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeDouble(1, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(1, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static io.prometheus.client.Metrics.Counter parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Counter parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Counter parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Counter parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Counter parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Counter parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Counter parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static io.prometheus.client.Metrics.Counter parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Counter parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Counter parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(io.prometheus.client.Metrics.Counter prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code io.prometheus.client.Counter} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:io.prometheus.client.Counter) + io.prometheus.client.Metrics.CounterOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Counter.class, io.prometheus.client.Metrics.Counter.Builder.class); + } + + // Construct using io.prometheus.client.Metrics.Counter.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + value_ = 0D; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Counter_descriptor; + } + + public io.prometheus.client.Metrics.Counter getDefaultInstanceForType() { + return io.prometheus.client.Metrics.Counter.getDefaultInstance(); + } + + public io.prometheus.client.Metrics.Counter build() { + io.prometheus.client.Metrics.Counter result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public io.prometheus.client.Metrics.Counter buildPartial() { + io.prometheus.client.Metrics.Counter result = new io.prometheus.client.Metrics.Counter(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof io.prometheus.client.Metrics.Counter) { + return mergeFrom((io.prometheus.client.Metrics.Counter)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(io.prometheus.client.Metrics.Counter other) { + if (other == io.prometheus.client.Metrics.Counter.getDefaultInstance()) return this; + if (other.hasValue()) { + setValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + io.prometheus.client.Metrics.Counter parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (io.prometheus.client.Metrics.Counter) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private double value_ ; + /** + * optional double value = 1; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double value = 1; + */ + public double getValue() { + return value_; + } + /** + * optional double value = 1; + */ + public Builder setValue(double value) { + bitField0_ |= 0x00000001; + value_ = value; + onChanged(); + return this; + } + /** + * optional double value = 1; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000001); + value_ = 0D; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:io.prometheus.client.Counter) + } + + static { + defaultInstance = new Counter(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Counter) + } + + public interface QuantileOrBuilder extends + // @@protoc_insertion_point(interface_extends:io.prometheus.client.Quantile) + com.google.protobuf.MessageOrBuilder { + + /** + * optional double quantile = 1; + */ + boolean hasQuantile(); + /** + * optional double quantile = 1; + */ + double getQuantile(); + + /** + * optional double value = 2; + */ + boolean hasValue(); + /** + * optional double value = 2; + */ + double getValue(); + } + /** + * Protobuf type {@code io.prometheus.client.Quantile} + */ + public static final class Quantile extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:io.prometheus.client.Quantile) + QuantileOrBuilder { + // Use Quantile.newBuilder() to construct. + private Quantile(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Quantile(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Quantile defaultInstance; + public static Quantile getDefaultInstance() { + return defaultInstance; + } + + public Quantile getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Quantile( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 9: { + bitField0_ |= 0x00000001; + quantile_ = input.readDouble(); + break; + } + case 17: { + bitField0_ |= 0x00000002; + value_ = input.readDouble(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Quantile.class, io.prometheus.client.Metrics.Quantile.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Quantile parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Quantile(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int QUANTILE_FIELD_NUMBER = 1; + private double quantile_; + /** + * optional double quantile = 1; + */ + public boolean hasQuantile() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double quantile = 1; + */ + public double getQuantile() { + return quantile_; + } + + public static final int VALUE_FIELD_NUMBER = 2; + private double value_; + /** + * optional double value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double value = 2; + */ + public double getValue() { + return value_; + } + + private void initFields() { + quantile_ = 0D; + value_ = 0D; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeDouble(1, quantile_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeDouble(2, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(1, quantile_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(2, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static io.prometheus.client.Metrics.Quantile parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Quantile parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Quantile parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Quantile parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Quantile parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Quantile parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Quantile parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static io.prometheus.client.Metrics.Quantile parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Quantile parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Quantile parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(io.prometheus.client.Metrics.Quantile prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code io.prometheus.client.Quantile} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:io.prometheus.client.Quantile) + io.prometheus.client.Metrics.QuantileOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Quantile.class, io.prometheus.client.Metrics.Quantile.Builder.class); + } + + // Construct using io.prometheus.client.Metrics.Quantile.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + quantile_ = 0D; + bitField0_ = (bitField0_ & ~0x00000001); + value_ = 0D; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Quantile_descriptor; + } + + public io.prometheus.client.Metrics.Quantile getDefaultInstanceForType() { + return io.prometheus.client.Metrics.Quantile.getDefaultInstance(); + } + + public io.prometheus.client.Metrics.Quantile build() { + io.prometheus.client.Metrics.Quantile result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public io.prometheus.client.Metrics.Quantile buildPartial() { + io.prometheus.client.Metrics.Quantile result = new io.prometheus.client.Metrics.Quantile(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.quantile_ = quantile_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof io.prometheus.client.Metrics.Quantile) { + return mergeFrom((io.prometheus.client.Metrics.Quantile)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(io.prometheus.client.Metrics.Quantile other) { + if (other == io.prometheus.client.Metrics.Quantile.getDefaultInstance()) return this; + if (other.hasQuantile()) { + setQuantile(other.getQuantile()); + } + if (other.hasValue()) { + setValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + io.prometheus.client.Metrics.Quantile parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (io.prometheus.client.Metrics.Quantile) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private double quantile_ ; + /** + * optional double quantile = 1; + */ + public boolean hasQuantile() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double quantile = 1; + */ + public double getQuantile() { + return quantile_; + } + /** + * optional double quantile = 1; + */ + public Builder setQuantile(double value) { + bitField0_ |= 0x00000001; + quantile_ = value; + onChanged(); + return this; + } + /** + * optional double quantile = 1; + */ + public Builder clearQuantile() { + bitField0_ = (bitField0_ & ~0x00000001); + quantile_ = 0D; + onChanged(); + return this; + } + + private double value_ ; + /** + * optional double value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double value = 2; + */ + public double getValue() { + return value_; + } + /** + * optional double value = 2; + */ + public Builder setValue(double value) { + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + /** + * optional double value = 2; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = 0D; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:io.prometheus.client.Quantile) + } + + static { + defaultInstance = new Quantile(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Quantile) + } + + public interface SummaryOrBuilder extends + // @@protoc_insertion_point(interface_extends:io.prometheus.client.Summary) + com.google.protobuf.MessageOrBuilder { + + /** + * optional uint64 sample_count = 1; + */ + boolean hasSampleCount(); + /** + * optional uint64 sample_count = 1; + */ + long getSampleCount(); + + /** + * optional double sample_sum = 2; + */ + boolean hasSampleSum(); + /** + * optional double sample_sum = 2; + */ + double getSampleSum(); + + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + java.util.List + getQuantileList(); + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + io.prometheus.client.Metrics.Quantile getQuantile(int index); + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + int getQuantileCount(); + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + java.util.List + getQuantileOrBuilderList(); + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + io.prometheus.client.Metrics.QuantileOrBuilder getQuantileOrBuilder( + int index); + } + /** + * Protobuf type {@code io.prometheus.client.Summary} + */ + public static final class Summary extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:io.prometheus.client.Summary) + SummaryOrBuilder { + // Use Summary.newBuilder() to construct. + private Summary(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Summary(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Summary defaultInstance; + public static Summary getDefaultInstance() { + return defaultInstance; + } + + public Summary getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Summary( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + sampleCount_ = input.readUInt64(); + break; + } + case 17: { + bitField0_ |= 0x00000002; + sampleSum_ = input.readDouble(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + quantile_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + quantile_.add(input.readMessage(io.prometheus.client.Metrics.Quantile.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + quantile_ = java.util.Collections.unmodifiableList(quantile_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Summary.class, io.prometheus.client.Metrics.Summary.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Summary parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Summary(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int SAMPLE_COUNT_FIELD_NUMBER = 1; + private long sampleCount_; + /** + * optional uint64 sample_count = 1; + */ + public boolean hasSampleCount() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 sample_count = 1; + */ + public long getSampleCount() { + return sampleCount_; + } + + public static final int SAMPLE_SUM_FIELD_NUMBER = 2; + private double sampleSum_; + /** + * optional double sample_sum = 2; + */ + public boolean hasSampleSum() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double sample_sum = 2; + */ + public double getSampleSum() { + return sampleSum_; + } + + public static final int QUANTILE_FIELD_NUMBER = 3; + private java.util.List quantile_; + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public java.util.List getQuantileList() { + return quantile_; + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public java.util.List + getQuantileOrBuilderList() { + return quantile_; + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public int getQuantileCount() { + return quantile_.size(); + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public io.prometheus.client.Metrics.Quantile getQuantile(int index) { + return quantile_.get(index); + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public io.prometheus.client.Metrics.QuantileOrBuilder getQuantileOrBuilder( + int index) { + return quantile_.get(index); + } + + private void initFields() { + sampleCount_ = 0L; + sampleSum_ = 0D; + quantile_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, sampleCount_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeDouble(2, sampleSum_); + } + for (int i = 0; i < quantile_.size(); i++) { + output.writeMessage(3, quantile_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, sampleCount_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(2, sampleSum_); + } + for (int i = 0; i < quantile_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, quantile_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static io.prometheus.client.Metrics.Summary parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Summary parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Summary parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Summary parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Summary parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Summary parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Summary parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static io.prometheus.client.Metrics.Summary parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Summary parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Summary parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(io.prometheus.client.Metrics.Summary prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code io.prometheus.client.Summary} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:io.prometheus.client.Summary) + io.prometheus.client.Metrics.SummaryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Summary.class, io.prometheus.client.Metrics.Summary.Builder.class); + } + + // Construct using io.prometheus.client.Metrics.Summary.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getQuantileFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + sampleCount_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + sampleSum_ = 0D; + bitField0_ = (bitField0_ & ~0x00000002); + if (quantileBuilder_ == null) { + quantile_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + quantileBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Summary_descriptor; + } + + public io.prometheus.client.Metrics.Summary getDefaultInstanceForType() { + return io.prometheus.client.Metrics.Summary.getDefaultInstance(); + } + + public io.prometheus.client.Metrics.Summary build() { + io.prometheus.client.Metrics.Summary result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public io.prometheus.client.Metrics.Summary buildPartial() { + io.prometheus.client.Metrics.Summary result = new io.prometheus.client.Metrics.Summary(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.sampleCount_ = sampleCount_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.sampleSum_ = sampleSum_; + if (quantileBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + quantile_ = java.util.Collections.unmodifiableList(quantile_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.quantile_ = quantile_; + } else { + result.quantile_ = quantileBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof io.prometheus.client.Metrics.Summary) { + return mergeFrom((io.prometheus.client.Metrics.Summary)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(io.prometheus.client.Metrics.Summary other) { + if (other == io.prometheus.client.Metrics.Summary.getDefaultInstance()) return this; + if (other.hasSampleCount()) { + setSampleCount(other.getSampleCount()); + } + if (other.hasSampleSum()) { + setSampleSum(other.getSampleSum()); + } + if (quantileBuilder_ == null) { + if (!other.quantile_.isEmpty()) { + if (quantile_.isEmpty()) { + quantile_ = other.quantile_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureQuantileIsMutable(); + quantile_.addAll(other.quantile_); + } + onChanged(); + } + } else { + if (!other.quantile_.isEmpty()) { + if (quantileBuilder_.isEmpty()) { + quantileBuilder_.dispose(); + quantileBuilder_ = null; + quantile_ = other.quantile_; + bitField0_ = (bitField0_ & ~0x00000004); + quantileBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getQuantileFieldBuilder() : null; + } else { + quantileBuilder_.addAllMessages(other.quantile_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + io.prometheus.client.Metrics.Summary parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (io.prometheus.client.Metrics.Summary) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private long sampleCount_ ; + /** + * optional uint64 sample_count = 1; + */ + public boolean hasSampleCount() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 sample_count = 1; + */ + public long getSampleCount() { + return sampleCount_; + } + /** + * optional uint64 sample_count = 1; + */ + public Builder setSampleCount(long value) { + bitField0_ |= 0x00000001; + sampleCount_ = value; + onChanged(); + return this; + } + /** + * optional uint64 sample_count = 1; + */ + public Builder clearSampleCount() { + bitField0_ = (bitField0_ & ~0x00000001); + sampleCount_ = 0L; + onChanged(); + return this; + } + + private double sampleSum_ ; + /** + * optional double sample_sum = 2; + */ + public boolean hasSampleSum() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double sample_sum = 2; + */ + public double getSampleSum() { + return sampleSum_; + } + /** + * optional double sample_sum = 2; + */ + public Builder setSampleSum(double value) { + bitField0_ |= 0x00000002; + sampleSum_ = value; + onChanged(); + return this; + } + /** + * optional double sample_sum = 2; + */ + public Builder clearSampleSum() { + bitField0_ = (bitField0_ & ~0x00000002); + sampleSum_ = 0D; + onChanged(); + return this; + } + + private java.util.List quantile_ = + java.util.Collections.emptyList(); + private void ensureQuantileIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + quantile_ = new java.util.ArrayList(quantile_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.Quantile, io.prometheus.client.Metrics.Quantile.Builder, io.prometheus.client.Metrics.QuantileOrBuilder> quantileBuilder_; + + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public java.util.List getQuantileList() { + if (quantileBuilder_ == null) { + return java.util.Collections.unmodifiableList(quantile_); + } else { + return quantileBuilder_.getMessageList(); + } + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public int getQuantileCount() { + if (quantileBuilder_ == null) { + return quantile_.size(); + } else { + return quantileBuilder_.getCount(); + } + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public io.prometheus.client.Metrics.Quantile getQuantile(int index) { + if (quantileBuilder_ == null) { + return quantile_.get(index); + } else { + return quantileBuilder_.getMessage(index); + } + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public Builder setQuantile( + int index, io.prometheus.client.Metrics.Quantile value) { + if (quantileBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureQuantileIsMutable(); + quantile_.set(index, value); + onChanged(); + } else { + quantileBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public Builder setQuantile( + int index, io.prometheus.client.Metrics.Quantile.Builder builderForValue) { + if (quantileBuilder_ == null) { + ensureQuantileIsMutable(); + quantile_.set(index, builderForValue.build()); + onChanged(); + } else { + quantileBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public Builder addQuantile(io.prometheus.client.Metrics.Quantile value) { + if (quantileBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureQuantileIsMutable(); + quantile_.add(value); + onChanged(); + } else { + quantileBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public Builder addQuantile( + int index, io.prometheus.client.Metrics.Quantile value) { + if (quantileBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureQuantileIsMutable(); + quantile_.add(index, value); + onChanged(); + } else { + quantileBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public Builder addQuantile( + io.prometheus.client.Metrics.Quantile.Builder builderForValue) { + if (quantileBuilder_ == null) { + ensureQuantileIsMutable(); + quantile_.add(builderForValue.build()); + onChanged(); + } else { + quantileBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public Builder addQuantile( + int index, io.prometheus.client.Metrics.Quantile.Builder builderForValue) { + if (quantileBuilder_ == null) { + ensureQuantileIsMutable(); + quantile_.add(index, builderForValue.build()); + onChanged(); + } else { + quantileBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public Builder addAllQuantile( + java.lang.Iterable values) { + if (quantileBuilder_ == null) { + ensureQuantileIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, quantile_); + onChanged(); + } else { + quantileBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public Builder clearQuantile() { + if (quantileBuilder_ == null) { + quantile_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + quantileBuilder_.clear(); + } + return this; + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public Builder removeQuantile(int index) { + if (quantileBuilder_ == null) { + ensureQuantileIsMutable(); + quantile_.remove(index); + onChanged(); + } else { + quantileBuilder_.remove(index); + } + return this; + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public io.prometheus.client.Metrics.Quantile.Builder getQuantileBuilder( + int index) { + return getQuantileFieldBuilder().getBuilder(index); + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public io.prometheus.client.Metrics.QuantileOrBuilder getQuantileOrBuilder( + int index) { + if (quantileBuilder_ == null) { + return quantile_.get(index); } else { + return quantileBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public java.util.List + getQuantileOrBuilderList() { + if (quantileBuilder_ != null) { + return quantileBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(quantile_); + } + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public io.prometheus.client.Metrics.Quantile.Builder addQuantileBuilder() { + return getQuantileFieldBuilder().addBuilder( + io.prometheus.client.Metrics.Quantile.getDefaultInstance()); + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public io.prometheus.client.Metrics.Quantile.Builder addQuantileBuilder( + int index) { + return getQuantileFieldBuilder().addBuilder( + index, io.prometheus.client.Metrics.Quantile.getDefaultInstance()); + } + /** + * repeated .io.prometheus.client.Quantile quantile = 3; + */ + public java.util.List + getQuantileBuilderList() { + return getQuantileFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.Quantile, io.prometheus.client.Metrics.Quantile.Builder, io.prometheus.client.Metrics.QuantileOrBuilder> + getQuantileFieldBuilder() { + if (quantileBuilder_ == null) { + quantileBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.Quantile, io.prometheus.client.Metrics.Quantile.Builder, io.prometheus.client.Metrics.QuantileOrBuilder>( + quantile_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + quantile_ = null; + } + return quantileBuilder_; + } + + // @@protoc_insertion_point(builder_scope:io.prometheus.client.Summary) + } + + static { + defaultInstance = new Summary(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Summary) + } + + public interface UntypedOrBuilder extends + // @@protoc_insertion_point(interface_extends:io.prometheus.client.Untyped) + com.google.protobuf.MessageOrBuilder { + + /** + * optional double value = 1; + */ + boolean hasValue(); + /** + * optional double value = 1; + */ + double getValue(); + } + /** + * Protobuf type {@code io.prometheus.client.Untyped} + */ + public static final class Untyped extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:io.prometheus.client.Untyped) + UntypedOrBuilder { + // Use Untyped.newBuilder() to construct. + private Untyped(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Untyped(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Untyped defaultInstance; + public static Untyped getDefaultInstance() { + return defaultInstance; + } + + public Untyped getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Untyped( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 9: { + bitField0_ |= 0x00000001; + value_ = input.readDouble(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Untyped.class, io.prometheus.client.Metrics.Untyped.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Untyped parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Untyped(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int VALUE_FIELD_NUMBER = 1; + private double value_; + /** + * optional double value = 1; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double value = 1; + */ + public double getValue() { + return value_; + } + + private void initFields() { + value_ = 0D; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeDouble(1, value_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(1, value_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static io.prometheus.client.Metrics.Untyped parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Untyped parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Untyped parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Untyped parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Untyped parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Untyped parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Untyped parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static io.prometheus.client.Metrics.Untyped parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Untyped parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Untyped parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(io.prometheus.client.Metrics.Untyped prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code io.prometheus.client.Untyped} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:io.prometheus.client.Untyped) + io.prometheus.client.Metrics.UntypedOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Untyped.class, io.prometheus.client.Metrics.Untyped.Builder.class); + } + + // Construct using io.prometheus.client.Metrics.Untyped.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + value_ = 0D; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Untyped_descriptor; + } + + public io.prometheus.client.Metrics.Untyped getDefaultInstanceForType() { + return io.prometheus.client.Metrics.Untyped.getDefaultInstance(); + } + + public io.prometheus.client.Metrics.Untyped build() { + io.prometheus.client.Metrics.Untyped result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public io.prometheus.client.Metrics.Untyped buildPartial() { + io.prometheus.client.Metrics.Untyped result = new io.prometheus.client.Metrics.Untyped(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof io.prometheus.client.Metrics.Untyped) { + return mergeFrom((io.prometheus.client.Metrics.Untyped)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(io.prometheus.client.Metrics.Untyped other) { + if (other == io.prometheus.client.Metrics.Untyped.getDefaultInstance()) return this; + if (other.hasValue()) { + setValue(other.getValue()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + io.prometheus.client.Metrics.Untyped parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (io.prometheus.client.Metrics.Untyped) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private double value_ ; + /** + * optional double value = 1; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional double value = 1; + */ + public double getValue() { + return value_; + } + /** + * optional double value = 1; + */ + public Builder setValue(double value) { + bitField0_ |= 0x00000001; + value_ = value; + onChanged(); + return this; + } + /** + * optional double value = 1; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000001); + value_ = 0D; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:io.prometheus.client.Untyped) + } + + static { + defaultInstance = new Untyped(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Untyped) + } + + public interface HistogramOrBuilder extends + // @@protoc_insertion_point(interface_extends:io.prometheus.client.Histogram) + com.google.protobuf.MessageOrBuilder { + + /** + * optional uint64 sample_count = 1; + */ + boolean hasSampleCount(); + /** + * optional uint64 sample_count = 1; + */ + long getSampleCount(); + + /** + * optional double sample_sum = 2; + */ + boolean hasSampleSum(); + /** + * optional double sample_sum = 2; + */ + double getSampleSum(); + + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+     * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+     * 
+ */ + java.util.List + getBucketList(); + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+     * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+     * 
+ */ + io.prometheus.client.Metrics.Bucket getBucket(int index); + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+     * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+     * 
+ */ + int getBucketCount(); + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+     * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+     * 
+ */ + java.util.List + getBucketOrBuilderList(); + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+     * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+     * 
+ */ + io.prometheus.client.Metrics.BucketOrBuilder getBucketOrBuilder( + int index); + } + /** + * Protobuf type {@code io.prometheus.client.Histogram} + */ + public static final class Histogram extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:io.prometheus.client.Histogram) + HistogramOrBuilder { + // Use Histogram.newBuilder() to construct. + private Histogram(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Histogram(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Histogram defaultInstance; + public static Histogram getDefaultInstance() { + return defaultInstance; + } + + public Histogram getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Histogram( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + sampleCount_ = input.readUInt64(); + break; + } + case 17: { + bitField0_ |= 0x00000002; + sampleSum_ = input.readDouble(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + bucket_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + bucket_.add(input.readMessage(io.prometheus.client.Metrics.Bucket.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + bucket_ = java.util.Collections.unmodifiableList(bucket_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Histogram.class, io.prometheus.client.Metrics.Histogram.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Histogram parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Histogram(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int SAMPLE_COUNT_FIELD_NUMBER = 1; + private long sampleCount_; + /** + * optional uint64 sample_count = 1; + */ + public boolean hasSampleCount() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 sample_count = 1; + */ + public long getSampleCount() { + return sampleCount_; + } + + public static final int SAMPLE_SUM_FIELD_NUMBER = 2; + private double sampleSum_; + /** + * optional double sample_sum = 2; + */ + public boolean hasSampleSum() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double sample_sum = 2; + */ + public double getSampleSum() { + return sampleSum_; + } + + public static final int BUCKET_FIELD_NUMBER = 3; + private java.util.List bucket_; + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+     * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+     * 
+ */ + public java.util.List getBucketList() { + return bucket_; + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+     * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+     * 
+ */ + public java.util.List + getBucketOrBuilderList() { + return bucket_; + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+     * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+     * 
+ */ + public int getBucketCount() { + return bucket_.size(); + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+     * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+     * 
+ */ + public io.prometheus.client.Metrics.Bucket getBucket(int index) { + return bucket_.get(index); + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+     * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+     * 
+ */ + public io.prometheus.client.Metrics.BucketOrBuilder getBucketOrBuilder( + int index) { + return bucket_.get(index); + } + + private void initFields() { + sampleCount_ = 0L; + sampleSum_ = 0D; + bucket_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, sampleCount_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeDouble(2, sampleSum_); + } + for (int i = 0; i < bucket_.size(); i++) { + output.writeMessage(3, bucket_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, sampleCount_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(2, sampleSum_); + } + for (int i = 0; i < bucket_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, bucket_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static io.prometheus.client.Metrics.Histogram parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Histogram parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Histogram parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Histogram parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Histogram parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Histogram parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Histogram parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static io.prometheus.client.Metrics.Histogram parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Histogram parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Histogram parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(io.prometheus.client.Metrics.Histogram prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code io.prometheus.client.Histogram} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:io.prometheus.client.Histogram) + io.prometheus.client.Metrics.HistogramOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Histogram.class, io.prometheus.client.Metrics.Histogram.Builder.class); + } + + // Construct using io.prometheus.client.Metrics.Histogram.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getBucketFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + sampleCount_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + sampleSum_ = 0D; + bitField0_ = (bitField0_ & ~0x00000002); + if (bucketBuilder_ == null) { + bucket_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + bucketBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Histogram_descriptor; + } + + public io.prometheus.client.Metrics.Histogram getDefaultInstanceForType() { + return io.prometheus.client.Metrics.Histogram.getDefaultInstance(); + } + + public io.prometheus.client.Metrics.Histogram build() { + io.prometheus.client.Metrics.Histogram result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public io.prometheus.client.Metrics.Histogram buildPartial() { + io.prometheus.client.Metrics.Histogram result = new io.prometheus.client.Metrics.Histogram(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.sampleCount_ = sampleCount_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.sampleSum_ = sampleSum_; + if (bucketBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + bucket_ = java.util.Collections.unmodifiableList(bucket_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.bucket_ = bucket_; + } else { + result.bucket_ = bucketBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof io.prometheus.client.Metrics.Histogram) { + return mergeFrom((io.prometheus.client.Metrics.Histogram)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(io.prometheus.client.Metrics.Histogram other) { + if (other == io.prometheus.client.Metrics.Histogram.getDefaultInstance()) return this; + if (other.hasSampleCount()) { + setSampleCount(other.getSampleCount()); + } + if (other.hasSampleSum()) { + setSampleSum(other.getSampleSum()); + } + if (bucketBuilder_ == null) { + if (!other.bucket_.isEmpty()) { + if (bucket_.isEmpty()) { + bucket_ = other.bucket_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureBucketIsMutable(); + bucket_.addAll(other.bucket_); + } + onChanged(); + } + } else { + if (!other.bucket_.isEmpty()) { + if (bucketBuilder_.isEmpty()) { + bucketBuilder_.dispose(); + bucketBuilder_ = null; + bucket_ = other.bucket_; + bitField0_ = (bitField0_ & ~0x00000004); + bucketBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getBucketFieldBuilder() : null; + } else { + bucketBuilder_.addAllMessages(other.bucket_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + io.prometheus.client.Metrics.Histogram parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (io.prometheus.client.Metrics.Histogram) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private long sampleCount_ ; + /** + * optional uint64 sample_count = 1; + */ + public boolean hasSampleCount() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 sample_count = 1; + */ + public long getSampleCount() { + return sampleCount_; + } + /** + * optional uint64 sample_count = 1; + */ + public Builder setSampleCount(long value) { + bitField0_ |= 0x00000001; + sampleCount_ = value; + onChanged(); + return this; + } + /** + * optional uint64 sample_count = 1; + */ + public Builder clearSampleCount() { + bitField0_ = (bitField0_ & ~0x00000001); + sampleCount_ = 0L; + onChanged(); + return this; + } + + private double sampleSum_ ; + /** + * optional double sample_sum = 2; + */ + public boolean hasSampleSum() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double sample_sum = 2; + */ + public double getSampleSum() { + return sampleSum_; + } + /** + * optional double sample_sum = 2; + */ + public Builder setSampleSum(double value) { + bitField0_ |= 0x00000002; + sampleSum_ = value; + onChanged(); + return this; + } + /** + * optional double sample_sum = 2; + */ + public Builder clearSampleSum() { + bitField0_ = (bitField0_ & ~0x00000002); + sampleSum_ = 0D; + onChanged(); + return this; + } + + private java.util.List bucket_ = + java.util.Collections.emptyList(); + private void ensureBucketIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + bucket_ = new java.util.ArrayList(bucket_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.Bucket, io.prometheus.client.Metrics.Bucket.Builder, io.prometheus.client.Metrics.BucketOrBuilder> bucketBuilder_; + + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public java.util.List getBucketList() { + if (bucketBuilder_ == null) { + return java.util.Collections.unmodifiableList(bucket_); + } else { + return bucketBuilder_.getMessageList(); + } + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public int getBucketCount() { + if (bucketBuilder_ == null) { + return bucket_.size(); + } else { + return bucketBuilder_.getCount(); + } + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public io.prometheus.client.Metrics.Bucket getBucket(int index) { + if (bucketBuilder_ == null) { + return bucket_.get(index); + } else { + return bucketBuilder_.getMessage(index); + } + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public Builder setBucket( + int index, io.prometheus.client.Metrics.Bucket value) { + if (bucketBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketIsMutable(); + bucket_.set(index, value); + onChanged(); + } else { + bucketBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public Builder setBucket( + int index, io.prometheus.client.Metrics.Bucket.Builder builderForValue) { + if (bucketBuilder_ == null) { + ensureBucketIsMutable(); + bucket_.set(index, builderForValue.build()); + onChanged(); + } else { + bucketBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public Builder addBucket(io.prometheus.client.Metrics.Bucket value) { + if (bucketBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketIsMutable(); + bucket_.add(value); + onChanged(); + } else { + bucketBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public Builder addBucket( + int index, io.prometheus.client.Metrics.Bucket value) { + if (bucketBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureBucketIsMutable(); + bucket_.add(index, value); + onChanged(); + } else { + bucketBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public Builder addBucket( + io.prometheus.client.Metrics.Bucket.Builder builderForValue) { + if (bucketBuilder_ == null) { + ensureBucketIsMutable(); + bucket_.add(builderForValue.build()); + onChanged(); + } else { + bucketBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public Builder addBucket( + int index, io.prometheus.client.Metrics.Bucket.Builder builderForValue) { + if (bucketBuilder_ == null) { + ensureBucketIsMutable(); + bucket_.add(index, builderForValue.build()); + onChanged(); + } else { + bucketBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public Builder addAllBucket( + java.lang.Iterable values) { + if (bucketBuilder_ == null) { + ensureBucketIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, bucket_); + onChanged(); + } else { + bucketBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public Builder clearBucket() { + if (bucketBuilder_ == null) { + bucket_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + bucketBuilder_.clear(); + } + return this; + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public Builder removeBucket(int index) { + if (bucketBuilder_ == null) { + ensureBucketIsMutable(); + bucket_.remove(index); + onChanged(); + } else { + bucketBuilder_.remove(index); + } + return this; + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public io.prometheus.client.Metrics.Bucket.Builder getBucketBuilder( + int index) { + return getBucketFieldBuilder().getBuilder(index); + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public io.prometheus.client.Metrics.BucketOrBuilder getBucketOrBuilder( + int index) { + if (bucketBuilder_ == null) { + return bucket_.get(index); } else { + return bucketBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public java.util.List + getBucketOrBuilderList() { + if (bucketBuilder_ != null) { + return bucketBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(bucket_); + } + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public io.prometheus.client.Metrics.Bucket.Builder addBucketBuilder() { + return getBucketFieldBuilder().addBuilder( + io.prometheus.client.Metrics.Bucket.getDefaultInstance()); + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public io.prometheus.client.Metrics.Bucket.Builder addBucketBuilder( + int index) { + return getBucketFieldBuilder().addBuilder( + index, io.prometheus.client.Metrics.Bucket.getDefaultInstance()); + } + /** + * repeated .io.prometheus.client.Bucket bucket = 3; + * + *
+       * Ordered in increasing order of upper_bound, +Inf bucket is optional.
+       * 
+ */ + public java.util.List + getBucketBuilderList() { + return getBucketFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.Bucket, io.prometheus.client.Metrics.Bucket.Builder, io.prometheus.client.Metrics.BucketOrBuilder> + getBucketFieldBuilder() { + if (bucketBuilder_ == null) { + bucketBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.Bucket, io.prometheus.client.Metrics.Bucket.Builder, io.prometheus.client.Metrics.BucketOrBuilder>( + bucket_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + bucket_ = null; + } + return bucketBuilder_; + } + + // @@protoc_insertion_point(builder_scope:io.prometheus.client.Histogram) + } + + static { + defaultInstance = new Histogram(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Histogram) + } + + public interface BucketOrBuilder extends + // @@protoc_insertion_point(interface_extends:io.prometheus.client.Bucket) + com.google.protobuf.MessageOrBuilder { + + /** + * optional uint64 cumulative_count = 1; + * + *
+     * Cumulative in increasing order.
+     * 
+ */ + boolean hasCumulativeCount(); + /** + * optional uint64 cumulative_count = 1; + * + *
+     * Cumulative in increasing order.
+     * 
+ */ + long getCumulativeCount(); + + /** + * optional double upper_bound = 2; + * + *
+     * Inclusive.
+     * 
+ */ + boolean hasUpperBound(); + /** + * optional double upper_bound = 2; + * + *
+     * Inclusive.
+     * 
+ */ + double getUpperBound(); + } + /** + * Protobuf type {@code io.prometheus.client.Bucket} + */ + public static final class Bucket extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:io.prometheus.client.Bucket) + BucketOrBuilder { + // Use Bucket.newBuilder() to construct. + private Bucket(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Bucket(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Bucket defaultInstance; + public static Bucket getDefaultInstance() { + return defaultInstance; + } + + public Bucket getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Bucket( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + cumulativeCount_ = input.readUInt64(); + break; + } + case 17: { + bitField0_ |= 0x00000002; + upperBound_ = input.readDouble(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Bucket.class, io.prometheus.client.Metrics.Bucket.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Bucket parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Bucket(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int CUMULATIVE_COUNT_FIELD_NUMBER = 1; + private long cumulativeCount_; + /** + * optional uint64 cumulative_count = 1; + * + *
+     * Cumulative in increasing order.
+     * 
+ */ + public boolean hasCumulativeCount() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 cumulative_count = 1; + * + *
+     * Cumulative in increasing order.
+     * 
+ */ + public long getCumulativeCount() { + return cumulativeCount_; + } + + public static final int UPPER_BOUND_FIELD_NUMBER = 2; + private double upperBound_; + /** + * optional double upper_bound = 2; + * + *
+     * Inclusive.
+     * 
+ */ + public boolean hasUpperBound() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double upper_bound = 2; + * + *
+     * Inclusive.
+     * 
+ */ + public double getUpperBound() { + return upperBound_; + } + + private void initFields() { + cumulativeCount_ = 0L; + upperBound_ = 0D; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, cumulativeCount_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeDouble(2, upperBound_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, cumulativeCount_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeDoubleSize(2, upperBound_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static io.prometheus.client.Metrics.Bucket parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Bucket parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Bucket parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Bucket parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Bucket parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Bucket parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Bucket parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static io.prometheus.client.Metrics.Bucket parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Bucket parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Bucket parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(io.prometheus.client.Metrics.Bucket prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code io.prometheus.client.Bucket} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:io.prometheus.client.Bucket) + io.prometheus.client.Metrics.BucketOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Bucket.class, io.prometheus.client.Metrics.Bucket.Builder.class); + } + + // Construct using io.prometheus.client.Metrics.Bucket.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + cumulativeCount_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + upperBound_ = 0D; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Bucket_descriptor; + } + + public io.prometheus.client.Metrics.Bucket getDefaultInstanceForType() { + return io.prometheus.client.Metrics.Bucket.getDefaultInstance(); + } + + public io.prometheus.client.Metrics.Bucket build() { + io.prometheus.client.Metrics.Bucket result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public io.prometheus.client.Metrics.Bucket buildPartial() { + io.prometheus.client.Metrics.Bucket result = new io.prometheus.client.Metrics.Bucket(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.cumulativeCount_ = cumulativeCount_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.upperBound_ = upperBound_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof io.prometheus.client.Metrics.Bucket) { + return mergeFrom((io.prometheus.client.Metrics.Bucket)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(io.prometheus.client.Metrics.Bucket other) { + if (other == io.prometheus.client.Metrics.Bucket.getDefaultInstance()) return this; + if (other.hasCumulativeCount()) { + setCumulativeCount(other.getCumulativeCount()); + } + if (other.hasUpperBound()) { + setUpperBound(other.getUpperBound()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + io.prometheus.client.Metrics.Bucket parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (io.prometheus.client.Metrics.Bucket) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private long cumulativeCount_ ; + /** + * optional uint64 cumulative_count = 1; + * + *
+       * Cumulative in increasing order.
+       * 
+ */ + public boolean hasCumulativeCount() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 cumulative_count = 1; + * + *
+       * Cumulative in increasing order.
+       * 
+ */ + public long getCumulativeCount() { + return cumulativeCount_; + } + /** + * optional uint64 cumulative_count = 1; + * + *
+       * Cumulative in increasing order.
+       * 
+ */ + public Builder setCumulativeCount(long value) { + bitField0_ |= 0x00000001; + cumulativeCount_ = value; + onChanged(); + return this; + } + /** + * optional uint64 cumulative_count = 1; + * + *
+       * Cumulative in increasing order.
+       * 
+ */ + public Builder clearCumulativeCount() { + bitField0_ = (bitField0_ & ~0x00000001); + cumulativeCount_ = 0L; + onChanged(); + return this; + } + + private double upperBound_ ; + /** + * optional double upper_bound = 2; + * + *
+       * Inclusive.
+       * 
+ */ + public boolean hasUpperBound() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional double upper_bound = 2; + * + *
+       * Inclusive.
+       * 
+ */ + public double getUpperBound() { + return upperBound_; + } + /** + * optional double upper_bound = 2; + * + *
+       * Inclusive.
+       * 
+ */ + public Builder setUpperBound(double value) { + bitField0_ |= 0x00000002; + upperBound_ = value; + onChanged(); + return this; + } + /** + * optional double upper_bound = 2; + * + *
+       * Inclusive.
+       * 
+ */ + public Builder clearUpperBound() { + bitField0_ = (bitField0_ & ~0x00000002); + upperBound_ = 0D; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:io.prometheus.client.Bucket) + } + + static { + defaultInstance = new Bucket(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Bucket) + } + + public interface MetricOrBuilder extends + // @@protoc_insertion_point(interface_extends:io.prometheus.client.Metric) + com.google.protobuf.MessageOrBuilder { + + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + java.util.List + getLabelList(); + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + io.prometheus.client.Metrics.LabelPair getLabel(int index); + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + int getLabelCount(); + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + java.util.List + getLabelOrBuilderList(); + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + io.prometheus.client.Metrics.LabelPairOrBuilder getLabelOrBuilder( + int index); + + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + boolean hasGauge(); + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + io.prometheus.client.Metrics.Gauge getGauge(); + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + io.prometheus.client.Metrics.GaugeOrBuilder getGaugeOrBuilder(); + + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + boolean hasCounter(); + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + io.prometheus.client.Metrics.Counter getCounter(); + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + io.prometheus.client.Metrics.CounterOrBuilder getCounterOrBuilder(); + + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + boolean hasSummary(); + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + io.prometheus.client.Metrics.Summary getSummary(); + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + io.prometheus.client.Metrics.SummaryOrBuilder getSummaryOrBuilder(); + + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + boolean hasUntyped(); + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + io.prometheus.client.Metrics.Untyped getUntyped(); + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + io.prometheus.client.Metrics.UntypedOrBuilder getUntypedOrBuilder(); + + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + boolean hasHistogram(); + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + io.prometheus.client.Metrics.Histogram getHistogram(); + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + io.prometheus.client.Metrics.HistogramOrBuilder getHistogramOrBuilder(); + + /** + * optional int64 timestamp_ms = 6; + */ + boolean hasTimestampMs(); + /** + * optional int64 timestamp_ms = 6; + */ + long getTimestampMs(); + } + /** + * Protobuf type {@code io.prometheus.client.Metric} + */ + public static final class Metric extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:io.prometheus.client.Metric) + MetricOrBuilder { + // Use Metric.newBuilder() to construct. + private Metric(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Metric(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Metric defaultInstance; + public static Metric getDefaultInstance() { + return defaultInstance; + } + + public Metric getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Metric( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + label_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + label_.add(input.readMessage(io.prometheus.client.Metrics.LabelPair.PARSER, extensionRegistry)); + break; + } + case 18: { + io.prometheus.client.Metrics.Gauge.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = gauge_.toBuilder(); + } + gauge_ = input.readMessage(io.prometheus.client.Metrics.Gauge.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(gauge_); + gauge_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 26: { + io.prometheus.client.Metrics.Counter.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = counter_.toBuilder(); + } + counter_ = input.readMessage(io.prometheus.client.Metrics.Counter.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(counter_); + counter_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 34: { + io.prometheus.client.Metrics.Summary.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = summary_.toBuilder(); + } + summary_ = input.readMessage(io.prometheus.client.Metrics.Summary.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(summary_); + summary_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 42: { + io.prometheus.client.Metrics.Untyped.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = untyped_.toBuilder(); + } + untyped_ = input.readMessage(io.prometheus.client.Metrics.Untyped.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(untyped_); + untyped_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + case 48: { + bitField0_ |= 0x00000020; + timestampMs_ = input.readInt64(); + break; + } + case 58: { + io.prometheus.client.Metrics.Histogram.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = histogram_.toBuilder(); + } + histogram_ = input.readMessage(io.prometheus.client.Metrics.Histogram.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(histogram_); + histogram_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + label_ = java.util.Collections.unmodifiableList(label_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Metric.class, io.prometheus.client.Metrics.Metric.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Metric parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Metric(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int LABEL_FIELD_NUMBER = 1; + private java.util.List label_; + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public java.util.List getLabelList() { + return label_; + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public java.util.List + getLabelOrBuilderList() { + return label_; + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public int getLabelCount() { + return label_.size(); + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public io.prometheus.client.Metrics.LabelPair getLabel(int index) { + return label_.get(index); + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public io.prometheus.client.Metrics.LabelPairOrBuilder getLabelOrBuilder( + int index) { + return label_.get(index); + } + + public static final int GAUGE_FIELD_NUMBER = 2; + private io.prometheus.client.Metrics.Gauge gauge_; + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + public boolean hasGauge() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + public io.prometheus.client.Metrics.Gauge getGauge() { + return gauge_; + } + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + public io.prometheus.client.Metrics.GaugeOrBuilder getGaugeOrBuilder() { + return gauge_; + } + + public static final int COUNTER_FIELD_NUMBER = 3; + private io.prometheus.client.Metrics.Counter counter_; + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + public boolean hasCounter() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + public io.prometheus.client.Metrics.Counter getCounter() { + return counter_; + } + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + public io.prometheus.client.Metrics.CounterOrBuilder getCounterOrBuilder() { + return counter_; + } + + public static final int SUMMARY_FIELD_NUMBER = 4; + private io.prometheus.client.Metrics.Summary summary_; + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + public boolean hasSummary() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + public io.prometheus.client.Metrics.Summary getSummary() { + return summary_; + } + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + public io.prometheus.client.Metrics.SummaryOrBuilder getSummaryOrBuilder() { + return summary_; + } + + public static final int UNTYPED_FIELD_NUMBER = 5; + private io.prometheus.client.Metrics.Untyped untyped_; + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + public boolean hasUntyped() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + public io.prometheus.client.Metrics.Untyped getUntyped() { + return untyped_; + } + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + public io.prometheus.client.Metrics.UntypedOrBuilder getUntypedOrBuilder() { + return untyped_; + } + + public static final int HISTOGRAM_FIELD_NUMBER = 7; + private io.prometheus.client.Metrics.Histogram histogram_; + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + public boolean hasHistogram() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + public io.prometheus.client.Metrics.Histogram getHistogram() { + return histogram_; + } + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + public io.prometheus.client.Metrics.HistogramOrBuilder getHistogramOrBuilder() { + return histogram_; + } + + public static final int TIMESTAMP_MS_FIELD_NUMBER = 6; + private long timestampMs_; + /** + * optional int64 timestamp_ms = 6; + */ + public boolean hasTimestampMs() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional int64 timestamp_ms = 6; + */ + public long getTimestampMs() { + return timestampMs_; + } + + private void initFields() { + label_ = java.util.Collections.emptyList(); + gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance(); + counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance(); + summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance(); + untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance(); + histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance(); + timestampMs_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < label_.size(); i++) { + output.writeMessage(1, label_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(2, gauge_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(3, counter_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(4, summary_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(5, untyped_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeInt64(6, timestampMs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(7, histogram_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < label_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, label_.get(i)); + } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, gauge_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, counter_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, summary_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, untyped_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(6, timestampMs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, histogram_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static io.prometheus.client.Metrics.Metric parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Metric parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Metric parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.Metric parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.Metric parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Metric parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Metric parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static io.prometheus.client.Metrics.Metric parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.Metric parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.Metric parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(io.prometheus.client.Metrics.Metric prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code io.prometheus.client.Metric} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:io.prometheus.client.Metric) + io.prometheus.client.Metrics.MetricOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.Metric.class, io.prometheus.client.Metrics.Metric.Builder.class); + } + + // Construct using io.prometheus.client.Metrics.Metric.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getLabelFieldBuilder(); + getGaugeFieldBuilder(); + getCounterFieldBuilder(); + getSummaryFieldBuilder(); + getUntypedFieldBuilder(); + getHistogramFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (labelBuilder_ == null) { + label_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + labelBuilder_.clear(); + } + if (gaugeBuilder_ == null) { + gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance(); + } else { + gaugeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (counterBuilder_ == null) { + counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance(); + } else { + counterBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + if (summaryBuilder_ == null) { + summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance(); + } else { + summaryBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + if (untypedBuilder_ == null) { + untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance(); + } else { + untypedBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + if (histogramBuilder_ == null) { + histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance(); + } else { + histogramBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + timestampMs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000040); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_Metric_descriptor; + } + + public io.prometheus.client.Metrics.Metric getDefaultInstanceForType() { + return io.prometheus.client.Metrics.Metric.getDefaultInstance(); + } + + public io.prometheus.client.Metrics.Metric build() { + io.prometheus.client.Metrics.Metric result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public io.prometheus.client.Metrics.Metric buildPartial() { + io.prometheus.client.Metrics.Metric result = new io.prometheus.client.Metrics.Metric(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (labelBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + label_ = java.util.Collections.unmodifiableList(label_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.label_ = label_; + } else { + result.label_ = labelBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000001; + } + if (gaugeBuilder_ == null) { + result.gauge_ = gauge_; + } else { + result.gauge_ = gaugeBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + if (counterBuilder_ == null) { + result.counter_ = counter_; + } else { + result.counter_ = counterBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + if (summaryBuilder_ == null) { + result.summary_ = summary_; + } else { + result.summary_ = summaryBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + if (untypedBuilder_ == null) { + result.untyped_ = untyped_; + } else { + result.untyped_ = untypedBuilder_.build(); + } + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + if (histogramBuilder_ == null) { + result.histogram_ = histogram_; + } else { + result.histogram_ = histogramBuilder_.build(); + } + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000020; + } + result.timestampMs_ = timestampMs_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof io.prometheus.client.Metrics.Metric) { + return mergeFrom((io.prometheus.client.Metrics.Metric)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(io.prometheus.client.Metrics.Metric other) { + if (other == io.prometheus.client.Metrics.Metric.getDefaultInstance()) return this; + if (labelBuilder_ == null) { + if (!other.label_.isEmpty()) { + if (label_.isEmpty()) { + label_ = other.label_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureLabelIsMutable(); + label_.addAll(other.label_); + } + onChanged(); + } + } else { + if (!other.label_.isEmpty()) { + if (labelBuilder_.isEmpty()) { + labelBuilder_.dispose(); + labelBuilder_ = null; + label_ = other.label_; + bitField0_ = (bitField0_ & ~0x00000001); + labelBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getLabelFieldBuilder() : null; + } else { + labelBuilder_.addAllMessages(other.label_); + } + } + } + if (other.hasGauge()) { + mergeGauge(other.getGauge()); + } + if (other.hasCounter()) { + mergeCounter(other.getCounter()); + } + if (other.hasSummary()) { + mergeSummary(other.getSummary()); + } + if (other.hasUntyped()) { + mergeUntyped(other.getUntyped()); + } + if (other.hasHistogram()) { + mergeHistogram(other.getHistogram()); + } + if (other.hasTimestampMs()) { + setTimestampMs(other.getTimestampMs()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + io.prometheus.client.Metrics.Metric parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (io.prometheus.client.Metrics.Metric) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List label_ = + java.util.Collections.emptyList(); + private void ensureLabelIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + label_ = new java.util.ArrayList(label_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.LabelPair, io.prometheus.client.Metrics.LabelPair.Builder, io.prometheus.client.Metrics.LabelPairOrBuilder> labelBuilder_; + + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public java.util.List getLabelList() { + if (labelBuilder_ == null) { + return java.util.Collections.unmodifiableList(label_); + } else { + return labelBuilder_.getMessageList(); + } + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public int getLabelCount() { + if (labelBuilder_ == null) { + return label_.size(); + } else { + return labelBuilder_.getCount(); + } + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public io.prometheus.client.Metrics.LabelPair getLabel(int index) { + if (labelBuilder_ == null) { + return label_.get(index); + } else { + return labelBuilder_.getMessage(index); + } + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public Builder setLabel( + int index, io.prometheus.client.Metrics.LabelPair value) { + if (labelBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLabelIsMutable(); + label_.set(index, value); + onChanged(); + } else { + labelBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public Builder setLabel( + int index, io.prometheus.client.Metrics.LabelPair.Builder builderForValue) { + if (labelBuilder_ == null) { + ensureLabelIsMutable(); + label_.set(index, builderForValue.build()); + onChanged(); + } else { + labelBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public Builder addLabel(io.prometheus.client.Metrics.LabelPair value) { + if (labelBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLabelIsMutable(); + label_.add(value); + onChanged(); + } else { + labelBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public Builder addLabel( + int index, io.prometheus.client.Metrics.LabelPair value) { + if (labelBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureLabelIsMutable(); + label_.add(index, value); + onChanged(); + } else { + labelBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public Builder addLabel( + io.prometheus.client.Metrics.LabelPair.Builder builderForValue) { + if (labelBuilder_ == null) { + ensureLabelIsMutable(); + label_.add(builderForValue.build()); + onChanged(); + } else { + labelBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public Builder addLabel( + int index, io.prometheus.client.Metrics.LabelPair.Builder builderForValue) { + if (labelBuilder_ == null) { + ensureLabelIsMutable(); + label_.add(index, builderForValue.build()); + onChanged(); + } else { + labelBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public Builder addAllLabel( + java.lang.Iterable values) { + if (labelBuilder_ == null) { + ensureLabelIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, label_); + onChanged(); + } else { + labelBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public Builder clearLabel() { + if (labelBuilder_ == null) { + label_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + labelBuilder_.clear(); + } + return this; + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public Builder removeLabel(int index) { + if (labelBuilder_ == null) { + ensureLabelIsMutable(); + label_.remove(index); + onChanged(); + } else { + labelBuilder_.remove(index); + } + return this; + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public io.prometheus.client.Metrics.LabelPair.Builder getLabelBuilder( + int index) { + return getLabelFieldBuilder().getBuilder(index); + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public io.prometheus.client.Metrics.LabelPairOrBuilder getLabelOrBuilder( + int index) { + if (labelBuilder_ == null) { + return label_.get(index); } else { + return labelBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public java.util.List + getLabelOrBuilderList() { + if (labelBuilder_ != null) { + return labelBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(label_); + } + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public io.prometheus.client.Metrics.LabelPair.Builder addLabelBuilder() { + return getLabelFieldBuilder().addBuilder( + io.prometheus.client.Metrics.LabelPair.getDefaultInstance()); + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public io.prometheus.client.Metrics.LabelPair.Builder addLabelBuilder( + int index) { + return getLabelFieldBuilder().addBuilder( + index, io.prometheus.client.Metrics.LabelPair.getDefaultInstance()); + } + /** + * repeated .io.prometheus.client.LabelPair label = 1; + */ + public java.util.List + getLabelBuilderList() { + return getLabelFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.LabelPair, io.prometheus.client.Metrics.LabelPair.Builder, io.prometheus.client.Metrics.LabelPairOrBuilder> + getLabelFieldBuilder() { + if (labelBuilder_ == null) { + labelBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.LabelPair, io.prometheus.client.Metrics.LabelPair.Builder, io.prometheus.client.Metrics.LabelPairOrBuilder>( + label_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + label_ = null; + } + return labelBuilder_; + } + + private io.prometheus.client.Metrics.Gauge gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Gauge, io.prometheus.client.Metrics.Gauge.Builder, io.prometheus.client.Metrics.GaugeOrBuilder> gaugeBuilder_; + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + public boolean hasGauge() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + public io.prometheus.client.Metrics.Gauge getGauge() { + if (gaugeBuilder_ == null) { + return gauge_; + } else { + return gaugeBuilder_.getMessage(); + } + } + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + public Builder setGauge(io.prometheus.client.Metrics.Gauge value) { + if (gaugeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + gauge_ = value; + onChanged(); + } else { + gaugeBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + public Builder setGauge( + io.prometheus.client.Metrics.Gauge.Builder builderForValue) { + if (gaugeBuilder_ == null) { + gauge_ = builderForValue.build(); + onChanged(); + } else { + gaugeBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + public Builder mergeGauge(io.prometheus.client.Metrics.Gauge value) { + if (gaugeBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + gauge_ != io.prometheus.client.Metrics.Gauge.getDefaultInstance()) { + gauge_ = + io.prometheus.client.Metrics.Gauge.newBuilder(gauge_).mergeFrom(value).buildPartial(); + } else { + gauge_ = value; + } + onChanged(); + } else { + gaugeBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + public Builder clearGauge() { + if (gaugeBuilder_ == null) { + gauge_ = io.prometheus.client.Metrics.Gauge.getDefaultInstance(); + onChanged(); + } else { + gaugeBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + public io.prometheus.client.Metrics.Gauge.Builder getGaugeBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getGaugeFieldBuilder().getBuilder(); + } + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + public io.prometheus.client.Metrics.GaugeOrBuilder getGaugeOrBuilder() { + if (gaugeBuilder_ != null) { + return gaugeBuilder_.getMessageOrBuilder(); + } else { + return gauge_; + } + } + /** + * optional .io.prometheus.client.Gauge gauge = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Gauge, io.prometheus.client.Metrics.Gauge.Builder, io.prometheus.client.Metrics.GaugeOrBuilder> + getGaugeFieldBuilder() { + if (gaugeBuilder_ == null) { + gaugeBuilder_ = new com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Gauge, io.prometheus.client.Metrics.Gauge.Builder, io.prometheus.client.Metrics.GaugeOrBuilder>( + getGauge(), + getParentForChildren(), + isClean()); + gauge_ = null; + } + return gaugeBuilder_; + } + + private io.prometheus.client.Metrics.Counter counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Counter, io.prometheus.client.Metrics.Counter.Builder, io.prometheus.client.Metrics.CounterOrBuilder> counterBuilder_; + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + public boolean hasCounter() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + public io.prometheus.client.Metrics.Counter getCounter() { + if (counterBuilder_ == null) { + return counter_; + } else { + return counterBuilder_.getMessage(); + } + } + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + public Builder setCounter(io.prometheus.client.Metrics.Counter value) { + if (counterBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + counter_ = value; + onChanged(); + } else { + counterBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + public Builder setCounter( + io.prometheus.client.Metrics.Counter.Builder builderForValue) { + if (counterBuilder_ == null) { + counter_ = builderForValue.build(); + onChanged(); + } else { + counterBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + public Builder mergeCounter(io.prometheus.client.Metrics.Counter value) { + if (counterBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + counter_ != io.prometheus.client.Metrics.Counter.getDefaultInstance()) { + counter_ = + io.prometheus.client.Metrics.Counter.newBuilder(counter_).mergeFrom(value).buildPartial(); + } else { + counter_ = value; + } + onChanged(); + } else { + counterBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + public Builder clearCounter() { + if (counterBuilder_ == null) { + counter_ = io.prometheus.client.Metrics.Counter.getDefaultInstance(); + onChanged(); + } else { + counterBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + public io.prometheus.client.Metrics.Counter.Builder getCounterBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getCounterFieldBuilder().getBuilder(); + } + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + public io.prometheus.client.Metrics.CounterOrBuilder getCounterOrBuilder() { + if (counterBuilder_ != null) { + return counterBuilder_.getMessageOrBuilder(); + } else { + return counter_; + } + } + /** + * optional .io.prometheus.client.Counter counter = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Counter, io.prometheus.client.Metrics.Counter.Builder, io.prometheus.client.Metrics.CounterOrBuilder> + getCounterFieldBuilder() { + if (counterBuilder_ == null) { + counterBuilder_ = new com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Counter, io.prometheus.client.Metrics.Counter.Builder, io.prometheus.client.Metrics.CounterOrBuilder>( + getCounter(), + getParentForChildren(), + isClean()); + counter_ = null; + } + return counterBuilder_; + } + + private io.prometheus.client.Metrics.Summary summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Summary, io.prometheus.client.Metrics.Summary.Builder, io.prometheus.client.Metrics.SummaryOrBuilder> summaryBuilder_; + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + public boolean hasSummary() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + public io.prometheus.client.Metrics.Summary getSummary() { + if (summaryBuilder_ == null) { + return summary_; + } else { + return summaryBuilder_.getMessage(); + } + } + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + public Builder setSummary(io.prometheus.client.Metrics.Summary value) { + if (summaryBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + summary_ = value; + onChanged(); + } else { + summaryBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + public Builder setSummary( + io.prometheus.client.Metrics.Summary.Builder builderForValue) { + if (summaryBuilder_ == null) { + summary_ = builderForValue.build(); + onChanged(); + } else { + summaryBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + public Builder mergeSummary(io.prometheus.client.Metrics.Summary value) { + if (summaryBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + summary_ != io.prometheus.client.Metrics.Summary.getDefaultInstance()) { + summary_ = + io.prometheus.client.Metrics.Summary.newBuilder(summary_).mergeFrom(value).buildPartial(); + } else { + summary_ = value; + } + onChanged(); + } else { + summaryBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + public Builder clearSummary() { + if (summaryBuilder_ == null) { + summary_ = io.prometheus.client.Metrics.Summary.getDefaultInstance(); + onChanged(); + } else { + summaryBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + public io.prometheus.client.Metrics.Summary.Builder getSummaryBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getSummaryFieldBuilder().getBuilder(); + } + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + public io.prometheus.client.Metrics.SummaryOrBuilder getSummaryOrBuilder() { + if (summaryBuilder_ != null) { + return summaryBuilder_.getMessageOrBuilder(); + } else { + return summary_; + } + } + /** + * optional .io.prometheus.client.Summary summary = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Summary, io.prometheus.client.Metrics.Summary.Builder, io.prometheus.client.Metrics.SummaryOrBuilder> + getSummaryFieldBuilder() { + if (summaryBuilder_ == null) { + summaryBuilder_ = new com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Summary, io.prometheus.client.Metrics.Summary.Builder, io.prometheus.client.Metrics.SummaryOrBuilder>( + getSummary(), + getParentForChildren(), + isClean()); + summary_ = null; + } + return summaryBuilder_; + } + + private io.prometheus.client.Metrics.Untyped untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Untyped, io.prometheus.client.Metrics.Untyped.Builder, io.prometheus.client.Metrics.UntypedOrBuilder> untypedBuilder_; + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + public boolean hasUntyped() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + public io.prometheus.client.Metrics.Untyped getUntyped() { + if (untypedBuilder_ == null) { + return untyped_; + } else { + return untypedBuilder_.getMessage(); + } + } + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + public Builder setUntyped(io.prometheus.client.Metrics.Untyped value) { + if (untypedBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + untyped_ = value; + onChanged(); + } else { + untypedBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + public Builder setUntyped( + io.prometheus.client.Metrics.Untyped.Builder builderForValue) { + if (untypedBuilder_ == null) { + untyped_ = builderForValue.build(); + onChanged(); + } else { + untypedBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + public Builder mergeUntyped(io.prometheus.client.Metrics.Untyped value) { + if (untypedBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + untyped_ != io.prometheus.client.Metrics.Untyped.getDefaultInstance()) { + untyped_ = + io.prometheus.client.Metrics.Untyped.newBuilder(untyped_).mergeFrom(value).buildPartial(); + } else { + untyped_ = value; + } + onChanged(); + } else { + untypedBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + public Builder clearUntyped() { + if (untypedBuilder_ == null) { + untyped_ = io.prometheus.client.Metrics.Untyped.getDefaultInstance(); + onChanged(); + } else { + untypedBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + public io.prometheus.client.Metrics.Untyped.Builder getUntypedBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getUntypedFieldBuilder().getBuilder(); + } + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + public io.prometheus.client.Metrics.UntypedOrBuilder getUntypedOrBuilder() { + if (untypedBuilder_ != null) { + return untypedBuilder_.getMessageOrBuilder(); + } else { + return untyped_; + } + } + /** + * optional .io.prometheus.client.Untyped untyped = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Untyped, io.prometheus.client.Metrics.Untyped.Builder, io.prometheus.client.Metrics.UntypedOrBuilder> + getUntypedFieldBuilder() { + if (untypedBuilder_ == null) { + untypedBuilder_ = new com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Untyped, io.prometheus.client.Metrics.Untyped.Builder, io.prometheus.client.Metrics.UntypedOrBuilder>( + getUntyped(), + getParentForChildren(), + isClean()); + untyped_ = null; + } + return untypedBuilder_; + } + + private io.prometheus.client.Metrics.Histogram histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Histogram, io.prometheus.client.Metrics.Histogram.Builder, io.prometheus.client.Metrics.HistogramOrBuilder> histogramBuilder_; + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + public boolean hasHistogram() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + public io.prometheus.client.Metrics.Histogram getHistogram() { + if (histogramBuilder_ == null) { + return histogram_; + } else { + return histogramBuilder_.getMessage(); + } + } + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + public Builder setHistogram(io.prometheus.client.Metrics.Histogram value) { + if (histogramBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + histogram_ = value; + onChanged(); + } else { + histogramBuilder_.setMessage(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + public Builder setHistogram( + io.prometheus.client.Metrics.Histogram.Builder builderForValue) { + if (histogramBuilder_ == null) { + histogram_ = builderForValue.build(); + onChanged(); + } else { + histogramBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + public Builder mergeHistogram(io.prometheus.client.Metrics.Histogram value) { + if (histogramBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020) && + histogram_ != io.prometheus.client.Metrics.Histogram.getDefaultInstance()) { + histogram_ = + io.prometheus.client.Metrics.Histogram.newBuilder(histogram_).mergeFrom(value).buildPartial(); + } else { + histogram_ = value; + } + onChanged(); + } else { + histogramBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000020; + return this; + } + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + public Builder clearHistogram() { + if (histogramBuilder_ == null) { + histogram_ = io.prometheus.client.Metrics.Histogram.getDefaultInstance(); + onChanged(); + } else { + histogramBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + public io.prometheus.client.Metrics.Histogram.Builder getHistogramBuilder() { + bitField0_ |= 0x00000020; + onChanged(); + return getHistogramFieldBuilder().getBuilder(); + } + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + public io.prometheus.client.Metrics.HistogramOrBuilder getHistogramOrBuilder() { + if (histogramBuilder_ != null) { + return histogramBuilder_.getMessageOrBuilder(); + } else { + return histogram_; + } + } + /** + * optional .io.prometheus.client.Histogram histogram = 7; + */ + private com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Histogram, io.prometheus.client.Metrics.Histogram.Builder, io.prometheus.client.Metrics.HistogramOrBuilder> + getHistogramFieldBuilder() { + if (histogramBuilder_ == null) { + histogramBuilder_ = new com.google.protobuf.SingleFieldBuilder< + io.prometheus.client.Metrics.Histogram, io.prometheus.client.Metrics.Histogram.Builder, io.prometheus.client.Metrics.HistogramOrBuilder>( + getHistogram(), + getParentForChildren(), + isClean()); + histogram_ = null; + } + return histogramBuilder_; + } + + private long timestampMs_ ; + /** + * optional int64 timestamp_ms = 6; + */ + public boolean hasTimestampMs() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional int64 timestamp_ms = 6; + */ + public long getTimestampMs() { + return timestampMs_; + } + /** + * optional int64 timestamp_ms = 6; + */ + public Builder setTimestampMs(long value) { + bitField0_ |= 0x00000040; + timestampMs_ = value; + onChanged(); + return this; + } + /** + * optional int64 timestamp_ms = 6; + */ + public Builder clearTimestampMs() { + bitField0_ = (bitField0_ & ~0x00000040); + timestampMs_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:io.prometheus.client.Metric) + } + + static { + defaultInstance = new Metric(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:io.prometheus.client.Metric) + } + + public interface MetricFamilyOrBuilder extends + // @@protoc_insertion_point(interface_extends:io.prometheus.client.MetricFamily) + com.google.protobuf.MessageOrBuilder { + + /** + * optional string name = 1; + */ + boolean hasName(); + /** + * optional string name = 1; + */ + java.lang.String getName(); + /** + * optional string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + * optional string help = 2; + */ + boolean hasHelp(); + /** + * optional string help = 2; + */ + java.lang.String getHelp(); + /** + * optional string help = 2; + */ + com.google.protobuf.ByteString + getHelpBytes(); + + /** + * optional .io.prometheus.client.MetricType type = 3; + */ + boolean hasType(); + /** + * optional .io.prometheus.client.MetricType type = 3; + */ + io.prometheus.client.Metrics.MetricType getType(); + + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + java.util.List + getMetricList(); + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + io.prometheus.client.Metrics.Metric getMetric(int index); + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + int getMetricCount(); + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + java.util.List + getMetricOrBuilderList(); + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + io.prometheus.client.Metrics.MetricOrBuilder getMetricOrBuilder( + int index); + } + /** + * Protobuf type {@code io.prometheus.client.MetricFamily} + */ + public static final class MetricFamily extends + com.google.protobuf.GeneratedMessage implements + // @@protoc_insertion_point(message_implements:io.prometheus.client.MetricFamily) + MetricFamilyOrBuilder { + // Use MetricFamily.newBuilder() to construct. + private MetricFamily(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MetricFamily(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MetricFamily defaultInstance; + public static MetricFamily getDefaultInstance() { + return defaultInstance; + } + + public MetricFamily getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MetricFamily( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + name_ = bs; + break; + } + case 18: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000002; + help_ = bs; + break; + } + case 24: { + int rawValue = input.readEnum(); + io.prometheus.client.Metrics.MetricType value = io.prometheus.client.Metrics.MetricType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(3, rawValue); + } else { + bitField0_ |= 0x00000004; + type_ = value; + } + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + metric_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + metric_.add(input.readMessage(io.prometheus.client.Metrics.Metric.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + metric_ = java.util.Collections.unmodifiableList(metric_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.MetricFamily.class, io.prometheus.client.Metrics.MetricFamily.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MetricFamily parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MetricFamily(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * optional string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * optional string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int HELP_FIELD_NUMBER = 2; + private java.lang.Object help_; + /** + * optional string help = 2; + */ + public boolean hasHelp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string help = 2; + */ + public java.lang.String getHelp() { + java.lang.Object ref = help_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + help_ = s; + } + return s; + } + } + /** + * optional string help = 2; + */ + public com.google.protobuf.ByteString + getHelpBytes() { + java.lang.Object ref = help_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + help_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 3; + private io.prometheus.client.Metrics.MetricType type_; + /** + * optional .io.prometheus.client.MetricType type = 3; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .io.prometheus.client.MetricType type = 3; + */ + public io.prometheus.client.Metrics.MetricType getType() { + return type_; + } + + public static final int METRIC_FIELD_NUMBER = 4; + private java.util.List metric_; + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public java.util.List getMetricList() { + return metric_; + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public java.util.List + getMetricOrBuilderList() { + return metric_; + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public int getMetricCount() { + return metric_.size(); + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public io.prometheus.client.Metrics.Metric getMetric(int index) { + return metric_.get(index); + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public io.prometheus.client.Metrics.MetricOrBuilder getMetricOrBuilder( + int index) { + return metric_.get(index); + } + + private void initFields() { + name_ = ""; + help_ = ""; + type_ = io.prometheus.client.Metrics.MetricType.COUNTER; + metric_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getHelpBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeEnum(3, type_.getNumber()); + } + for (int i = 0; i < metric_.size(); i++) { + output.writeMessage(4, metric_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getHelpBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(3, type_.getNumber()); + } + for (int i = 0; i < metric_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, metric_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static io.prometheus.client.Metrics.MetricFamily parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.MetricFamily parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.MetricFamily parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static io.prometheus.client.Metrics.MetricFamily parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static io.prometheus.client.Metrics.MetricFamily parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.MetricFamily parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.MetricFamily parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static io.prometheus.client.Metrics.MetricFamily parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static io.prometheus.client.Metrics.MetricFamily parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static io.prometheus.client.Metrics.MetricFamily parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(io.prometheus.client.Metrics.MetricFamily prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code io.prometheus.client.MetricFamily} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder implements + // @@protoc_insertion_point(builder_implements:io.prometheus.client.MetricFamily) + io.prometheus.client.Metrics.MetricFamilyOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable + .ensureFieldAccessorsInitialized( + io.prometheus.client.Metrics.MetricFamily.class, io.prometheus.client.Metrics.MetricFamily.Builder.class); + } + + // Construct using io.prometheus.client.Metrics.MetricFamily.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getMetricFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + help_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + type_ = io.prometheus.client.Metrics.MetricType.COUNTER; + bitField0_ = (bitField0_ & ~0x00000004); + if (metricBuilder_ == null) { + metric_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + metricBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return io.prometheus.client.Metrics.internal_static_io_prometheus_client_MetricFamily_descriptor; + } + + public io.prometheus.client.Metrics.MetricFamily getDefaultInstanceForType() { + return io.prometheus.client.Metrics.MetricFamily.getDefaultInstance(); + } + + public io.prometheus.client.Metrics.MetricFamily build() { + io.prometheus.client.Metrics.MetricFamily result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public io.prometheus.client.Metrics.MetricFamily buildPartial() { + io.prometheus.client.Metrics.MetricFamily result = new io.prometheus.client.Metrics.MetricFamily(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.help_ = help_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.type_ = type_; + if (metricBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + metric_ = java.util.Collections.unmodifiableList(metric_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.metric_ = metric_; + } else { + result.metric_ = metricBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof io.prometheus.client.Metrics.MetricFamily) { + return mergeFrom((io.prometheus.client.Metrics.MetricFamily)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(io.prometheus.client.Metrics.MetricFamily other) { + if (other == io.prometheus.client.Metrics.MetricFamily.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (other.hasHelp()) { + bitField0_ |= 0x00000002; + help_ = other.help_; + onChanged(); + } + if (other.hasType()) { + setType(other.getType()); + } + if (metricBuilder_ == null) { + if (!other.metric_.isEmpty()) { + if (metric_.isEmpty()) { + metric_ = other.metric_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureMetricIsMutable(); + metric_.addAll(other.metric_); + } + onChanged(); + } + } else { + if (!other.metric_.isEmpty()) { + if (metricBuilder_.isEmpty()) { + metricBuilder_.dispose(); + metricBuilder_ = null; + metric_ = other.metric_; + bitField0_ = (bitField0_ & ~0x00000008); + metricBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getMetricFieldBuilder() : null; + } else { + metricBuilder_.addAllMessages(other.metric_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + io.prometheus.client.Metrics.MetricFamily parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (io.prometheus.client.Metrics.MetricFamily) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * optional string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * optional string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * optional string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + private java.lang.Object help_ = ""; + /** + * optional string help = 2; + */ + public boolean hasHelp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string help = 2; + */ + public java.lang.String getHelp() { + java.lang.Object ref = help_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + help_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string help = 2; + */ + public com.google.protobuf.ByteString + getHelpBytes() { + java.lang.Object ref = help_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + help_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string help = 2; + */ + public Builder setHelp( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + help_ = value; + onChanged(); + return this; + } + /** + * optional string help = 2; + */ + public Builder clearHelp() { + bitField0_ = (bitField0_ & ~0x00000002); + help_ = getDefaultInstance().getHelp(); + onChanged(); + return this; + } + /** + * optional string help = 2; + */ + public Builder setHelpBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + help_ = value; + onChanged(); + return this; + } + + private io.prometheus.client.Metrics.MetricType type_ = io.prometheus.client.Metrics.MetricType.COUNTER; + /** + * optional .io.prometheus.client.MetricType type = 3; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .io.prometheus.client.MetricType type = 3; + */ + public io.prometheus.client.Metrics.MetricType getType() { + return type_; + } + /** + * optional .io.prometheus.client.MetricType type = 3; + */ + public Builder setType(io.prometheus.client.Metrics.MetricType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + type_ = value; + onChanged(); + return this; + } + /** + * optional .io.prometheus.client.MetricType type = 3; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000004); + type_ = io.prometheus.client.Metrics.MetricType.COUNTER; + onChanged(); + return this; + } + + private java.util.List metric_ = + java.util.Collections.emptyList(); + private void ensureMetricIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + metric_ = new java.util.ArrayList(metric_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.Metric, io.prometheus.client.Metrics.Metric.Builder, io.prometheus.client.Metrics.MetricOrBuilder> metricBuilder_; + + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public java.util.List getMetricList() { + if (metricBuilder_ == null) { + return java.util.Collections.unmodifiableList(metric_); + } else { + return metricBuilder_.getMessageList(); + } + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public int getMetricCount() { + if (metricBuilder_ == null) { + return metric_.size(); + } else { + return metricBuilder_.getCount(); + } + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public io.prometheus.client.Metrics.Metric getMetric(int index) { + if (metricBuilder_ == null) { + return metric_.get(index); + } else { + return metricBuilder_.getMessage(index); + } + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public Builder setMetric( + int index, io.prometheus.client.Metrics.Metric value) { + if (metricBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetricIsMutable(); + metric_.set(index, value); + onChanged(); + } else { + metricBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public Builder setMetric( + int index, io.prometheus.client.Metrics.Metric.Builder builderForValue) { + if (metricBuilder_ == null) { + ensureMetricIsMutable(); + metric_.set(index, builderForValue.build()); + onChanged(); + } else { + metricBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public Builder addMetric(io.prometheus.client.Metrics.Metric value) { + if (metricBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetricIsMutable(); + metric_.add(value); + onChanged(); + } else { + metricBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public Builder addMetric( + int index, io.prometheus.client.Metrics.Metric value) { + if (metricBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureMetricIsMutable(); + metric_.add(index, value); + onChanged(); + } else { + metricBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public Builder addMetric( + io.prometheus.client.Metrics.Metric.Builder builderForValue) { + if (metricBuilder_ == null) { + ensureMetricIsMutable(); + metric_.add(builderForValue.build()); + onChanged(); + } else { + metricBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public Builder addMetric( + int index, io.prometheus.client.Metrics.Metric.Builder builderForValue) { + if (metricBuilder_ == null) { + ensureMetricIsMutable(); + metric_.add(index, builderForValue.build()); + onChanged(); + } else { + metricBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public Builder addAllMetric( + java.lang.Iterable values) { + if (metricBuilder_ == null) { + ensureMetricIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, metric_); + onChanged(); + } else { + metricBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public Builder clearMetric() { + if (metricBuilder_ == null) { + metric_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + metricBuilder_.clear(); + } + return this; + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public Builder removeMetric(int index) { + if (metricBuilder_ == null) { + ensureMetricIsMutable(); + metric_.remove(index); + onChanged(); + } else { + metricBuilder_.remove(index); + } + return this; + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public io.prometheus.client.Metrics.Metric.Builder getMetricBuilder( + int index) { + return getMetricFieldBuilder().getBuilder(index); + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public io.prometheus.client.Metrics.MetricOrBuilder getMetricOrBuilder( + int index) { + if (metricBuilder_ == null) { + return metric_.get(index); } else { + return metricBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public java.util.List + getMetricOrBuilderList() { + if (metricBuilder_ != null) { + return metricBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(metric_); + } + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public io.prometheus.client.Metrics.Metric.Builder addMetricBuilder() { + return getMetricFieldBuilder().addBuilder( + io.prometheus.client.Metrics.Metric.getDefaultInstance()); + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public io.prometheus.client.Metrics.Metric.Builder addMetricBuilder( + int index) { + return getMetricFieldBuilder().addBuilder( + index, io.prometheus.client.Metrics.Metric.getDefaultInstance()); + } + /** + * repeated .io.prometheus.client.Metric metric = 4; + */ + public java.util.List + getMetricBuilderList() { + return getMetricFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.Metric, io.prometheus.client.Metrics.Metric.Builder, io.prometheus.client.Metrics.MetricOrBuilder> + getMetricFieldBuilder() { + if (metricBuilder_ == null) { + metricBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + io.prometheus.client.Metrics.Metric, io.prometheus.client.Metrics.Metric.Builder, io.prometheus.client.Metrics.MetricOrBuilder>( + metric_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + metric_ = null; + } + return metricBuilder_; + } + + // @@protoc_insertion_point(builder_scope:io.prometheus.client.MetricFamily) + } + + static { + defaultInstance = new MetricFamily(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:io.prometheus.client.MetricFamily) + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_io_prometheus_client_LabelPair_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_io_prometheus_client_LabelPair_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_io_prometheus_client_Gauge_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_io_prometheus_client_Gauge_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_io_prometheus_client_Counter_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_io_prometheus_client_Counter_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_io_prometheus_client_Quantile_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_io_prometheus_client_Quantile_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_io_prometheus_client_Summary_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_io_prometheus_client_Summary_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_io_prometheus_client_Untyped_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_io_prometheus_client_Untyped_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_io_prometheus_client_Histogram_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_io_prometheus_client_Histogram_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_io_prometheus_client_Bucket_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_io_prometheus_client_Bucket_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_io_prometheus_client_Metric_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_io_prometheus_client_Metric_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_io_prometheus_client_MetricFamily_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\rmetrics.proto\022\024io.prometheus.client\"(\n" + + "\tLabelPair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\t\"" + + "\026\n\005Gauge\022\r\n\005value\030\001 \001(\001\"\030\n\007Counter\022\r\n\005va" + + "lue\030\001 \001(\001\"+\n\010Quantile\022\020\n\010quantile\030\001 \001(\001\022" + + "\r\n\005value\030\002 \001(\001\"e\n\007Summary\022\024\n\014sample_coun" + + "t\030\001 \001(\004\022\022\n\nsample_sum\030\002 \001(\001\0220\n\010quantile\030" + + "\003 \003(\0132\036.io.prometheus.client.Quantile\"\030\n" + + "\007Untyped\022\r\n\005value\030\001 \001(\001\"c\n\tHistogram\022\024\n\014" + + "sample_count\030\001 \001(\004\022\022\n\nsample_sum\030\002 \001(\001\022," + + "\n\006bucket\030\003 \003(\0132\034.io.prometheus.client.Bu", + "cket\"7\n\006Bucket\022\030\n\020cumulative_count\030\001 \001(\004" + + "\022\023\n\013upper_bound\030\002 \001(\001\"\276\002\n\006Metric\022.\n\005labe" + + "l\030\001 \003(\0132\037.io.prometheus.client.LabelPair" + + "\022*\n\005gauge\030\002 \001(\0132\033.io.prometheus.client.G" + + "auge\022.\n\007counter\030\003 \001(\0132\035.io.prometheus.cl" + + "ient.Counter\022.\n\007summary\030\004 \001(\0132\035.io.prome" + + "theus.client.Summary\022.\n\007untyped\030\005 \001(\0132\035." + + "io.prometheus.client.Untyped\0222\n\thistogra" + + "m\030\007 \001(\0132\037.io.prometheus.client.Histogram" + + "\022\024\n\014timestamp_ms\030\006 \001(\003\"\210\001\n\014MetricFamily\022", + "\014\n\004name\030\001 \001(\t\022\014\n\004help\030\002 \001(\t\022.\n\004type\030\003 \001(" + + "\0162 .io.prometheus.client.MetricType\022,\n\006m" + + "etric\030\004 \003(\0132\034.io.prometheus.client.Metri" + + "c*M\n\nMetricType\022\013\n\007COUNTER\020\000\022\t\n\005GAUGE\020\001\022" + + "\013\n\007SUMMARY\020\002\022\013\n\007UNTYPED\020\003\022\r\n\tHISTOGRAM\020\004" + + "B\026\n\024io.prometheus.client" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + internal_static_io_prometheus_client_LabelPair_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_io_prometheus_client_LabelPair_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_io_prometheus_client_LabelPair_descriptor, + new java.lang.String[] { "Name", "Value", }); + internal_static_io_prometheus_client_Gauge_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_io_prometheus_client_Gauge_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_io_prometheus_client_Gauge_descriptor, + new java.lang.String[] { "Value", }); + internal_static_io_prometheus_client_Counter_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_io_prometheus_client_Counter_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_io_prometheus_client_Counter_descriptor, + new java.lang.String[] { "Value", }); + internal_static_io_prometheus_client_Quantile_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_io_prometheus_client_Quantile_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_io_prometheus_client_Quantile_descriptor, + new java.lang.String[] { "Quantile", "Value", }); + internal_static_io_prometheus_client_Summary_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_io_prometheus_client_Summary_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_io_prometheus_client_Summary_descriptor, + new java.lang.String[] { "SampleCount", "SampleSum", "Quantile", }); + internal_static_io_prometheus_client_Untyped_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_io_prometheus_client_Untyped_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_io_prometheus_client_Untyped_descriptor, + new java.lang.String[] { "Value", }); + internal_static_io_prometheus_client_Histogram_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_io_prometheus_client_Histogram_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_io_prometheus_client_Histogram_descriptor, + new java.lang.String[] { "SampleCount", "SampleSum", "Bucket", }); + internal_static_io_prometheus_client_Bucket_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_io_prometheus_client_Bucket_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_io_prometheus_client_Bucket_descriptor, + new java.lang.String[] { "CumulativeCount", "UpperBound", }); + internal_static_io_prometheus_client_Metric_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_io_prometheus_client_Metric_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_io_prometheus_client_Metric_descriptor, + new java.lang.String[] { "Label", "Gauge", "Counter", "Summary", "Untyped", "Histogram", "TimestampMs", }); + internal_static_io_prometheus_client_MetricFamily_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_io_prometheus_client_MetricFamily_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_io_prometheus_client_MetricFamily_descriptor, + new java.lang.String[] { "Name", "Help", "Type", "Metric", }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/AUTHORS.md juju-core-2.0.0/src/github.com/prometheus/common/AUTHORS.md --- juju-core-2.0~beta15/src/github.com/prometheus/common/AUTHORS.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/AUTHORS.md 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,11 @@ +Maintainers of this repository: + +* Fabian Reinartz + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Björn Rabenstein +* Fabian Reinartz +* Julius Volz +* Miguel Molina diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/config/config.go juju-core-2.0.0/src/github.com/prometheus/common/config/config.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/config/config.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/config/config.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,30 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "fmt" + "strings" +) + +func checkOverflow(m map[string]interface{}, ctx string) error { + if len(m) > 0 { + var keys []string + for k := range m { + keys = append(keys, k) + } + return fmt.Errorf("unknown fields in %s: %s", ctx, strings.Join(keys, ", ")) + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml juju-core-2.0.0/src/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml --- juju-core-2.0~beta15/src/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +cert_file: somefile diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml juju-core-2.0.0/src/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml --- juju-core-2.0~beta15/src/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +insecure_skip_verify: true diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml juju-core-2.0.0/src/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml --- juju-core-2.0~beta15/src/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +something_invalid: true diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml juju-core-2.0.0/src/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml --- juju-core-2.0~beta15/src/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +key_file: somefile diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/config/tls_config.go juju-core-2.0.0/src/github.com/prometheus/common/config/tls_config.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/config/tls_config.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/config/tls_config.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,79 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" +) + +// TLSConfig configures the options for TLS connections. +type TLSConfig struct { + // The CA cert to use for the targets. + CAFile string `yaml:"ca_file,omitempty"` + // The client cert file for the targets. + CertFile string `yaml:"cert_file,omitempty"` + // The client key file for the targets. + KeyFile string `yaml:"key_file,omitempty"` + // Disable target certificate validation. + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + + // Catches all undefined fields and must be empty after parsing. + XXX map[string]interface{} `yaml:",inline"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain TLSConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + return checkOverflow(c.XXX, "TLS config") +} + +// GenerateConfig produces a tls.Config based on TLS connection options. +// It loads certificate files from disk if they are defined. +func (c *TLSConfig) GenerateConfig() (*tls.Config, error) { + tlsConfig := &tls.Config{InsecureSkipVerify: c.InsecureSkipVerify} + + // If a CA cert is provided then let's read it in so we can validate the + // scrape target's certificate properly. + if len(c.CAFile) > 0 { + caCertPool := x509.NewCertPool() + // Load CA cert. + caCert, err := ioutil.ReadFile(c.CAFile) + if err != nil { + return nil, fmt.Errorf("unable to use specified CA cert %s: %s", c.CAFile, err) + } + caCertPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caCertPool + } + + if len(c.CertFile) > 0 && len(c.KeyFile) == 0 { + return nil, fmt.Errorf("client cert file %q specified without client key file", c.CertFile) + } else if len(c.KeyFile) > 0 && len(c.CertFile) == 0 { + return nil, fmt.Errorf("client key file %q specified without client cert file", c.KeyFile) + } else if len(c.CertFile) > 0 && len(c.KeyFile) > 0 { + cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile) + if err != nil { + return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/config/tls_config_test.go juju-core-2.0.0/src/github.com/prometheus/common/config/tls_config_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/config/tls_config_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/config/tls_config_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,92 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "crypto/tls" + "io/ioutil" + "reflect" + "strings" + "testing" + + "gopkg.in/yaml.v2" +) + +// LoadTLSConfig parses the given YAML file into a tls.Config. +func LoadTLSConfig(filename string) (*tls.Config, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + cfg := &TLSConfig{} + if err = yaml.Unmarshal(content, cfg); err != nil { + return nil, err + } + return cfg.GenerateConfig() +} + +var expectedTLSConfigs = []struct { + filename string + config *tls.Config +}{ + { + filename: "tls_config.empty.good.yml", + config: &tls.Config{}, + }, { + filename: "tls_config.insecure.good.yml", + config: &tls.Config{InsecureSkipVerify: true}, + }, +} + +func TestValidTLSConfig(t *testing.T) { + for _, cfg := range expectedTLSConfigs { + cfg.config.BuildNameToCertificate() + got, err := LoadTLSConfig("testdata/" + cfg.filename) + if err != nil { + t.Errorf("Error parsing %s: %s", cfg.filename, err) + } + if !reflect.DeepEqual(*got, *cfg.config) { + t.Fatalf("%s: unexpected config result: \n\n%s\n expected\n\n%s", cfg.filename, got, cfg.config) + } + } +} + +var expectedTLSConfigErrors = []struct { + filename string + errMsg string +}{ + { + filename: "tls_config.invalid_field.bad.yml", + errMsg: "unknown fields in", + }, { + filename: "tls_config.cert_no_key.bad.yml", + errMsg: "specified without client key file", + }, { + filename: "tls_config.key_no_cert.bad.yml", + errMsg: "specified without client cert file", + }, +} + +func TestBadTLSConfigs(t *testing.T) { + for _, ee := range expectedTLSConfigErrors { + _, err := LoadTLSConfig("testdata/" + ee.filename) + if err == nil { + t.Errorf("Expected error parsing %s but got none", ee.filename) + continue + } + if !strings.Contains(err.Error(), ee.errMsg) { + t.Errorf("Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/CONTRIBUTING.md juju-core-2.0.0/src/github.com/prometheus/common/CONTRIBUTING.md --- juju-core-2.0~beta15/src/github.com/prometheus/common/CONTRIBUTING.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/CONTRIBUTING.md 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/bench_test.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/bench_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/bench_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/bench_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,171 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "compress/gzip" + "io" + "io/ioutil" + "testing" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + + dto "github.com/prometheus/client_model/go" +) + +var parser TextParser + +// Benchmarks to show how much penalty text format parsing actually inflicts. +// +// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4. +// +// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op +// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op +// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op +// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op +// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op +// +// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations. +// Without compression, it needs ~7x longer, but with compression (the more relevant scenario), +// the difference becomes less relevant, only ~4x. +// +// The test data contains 248 samples. +// +// BenchmarkProcessor002ParseOnly in the extraction package is not quite +// comparable to the benchmarks here, but it gives an idea: JSON parsing is even +// slower than text parsing and needs a comparable amount of allocs. + +// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric +// family DTOs. +func BenchmarkParseText(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/text") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape +// into metric family DTOs. +func BenchmarkParseTextGzip(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/text.gz") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + in, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + b.Fatal(err) + } + if _, err := parser.TextToMetricFamilies(in); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into +// metric family DTOs. Note that this does not build a map of metric families +// (as the text version does), because it is not required for Prometheus +// ingestion either. (However, it is required for the text-format parsing, as +// the metric family might be sprinkled all over the text, while the +// protobuf-format guarantees bundling at one place.) +func BenchmarkParseProto(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + family := &dto.MetricFamily{} + in := bytes.NewReader(data) + for { + family.Reset() + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } + } +} + +// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped +// protobuf format. +func BenchmarkParseProtoGzip(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf.gz") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + family := &dto.MetricFamily{} + in, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + b.Fatal(err) + } + for { + family.Reset() + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } + } +} + +// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed +// metric family DTOs into a map. This is not happening during Prometheus +// ingestion. It is just here to measure the overhead of that map creation and +// separate it from the overhead of the text format parsing. +func BenchmarkParseProtoMap(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + families := map[string]*dto.MetricFamily{} + in := bytes.NewReader(data) + for { + family := &dto.MetricFamily{} + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + families[family.GetName()] = family + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/decode.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/decode.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/decode.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/decode.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,433 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const ( + textType = "text/plain" + jsonType = "application/json" + ) + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + + case jsonType: + var prometheusAPIVersion string + + if params["schema"] == "prometheus/telemetry" && params["version"] != "" { + prometheusAPIVersion = params["version"] + } else { + prometheusAPIVersion = h.Get("X-Prometheus-API-Version") + } + + switch prometheusAPIVersion { + case "0.0.2", "": + return fmtJSON2 + default: + return FmtUnknown + } + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + case fmtJSON2: + return newJSON2Decoder(r) + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protcol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +func (sd *SampleDecoder) Decode(s *model.Vector) error { + if err := sd.Dec.Decode(&sd.f); err != nil { + return err + } + *s = extractSamples(&sd.f, sd.Opts) + return nil +} + +// Extract samples builds a slice of samples from the provided metric families. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector { + var all model.Vector + for _, f := range fams { + all = append(all, extractSamples(f, o)...) + } + return all +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f) + case dto.MetricType_GAUGE: + return extractGauge(o, f) + case dto.MetricType_SUMMARY: + return extractSummary(o, f) + case dto.MetricType_UNTYPED: + return extractUntyped(o, f) + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f) + } + panic("expfmt.extractSamples: unknown metric family type") +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/decode_test.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/decode_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/decode_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/decode_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,367 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "io" + "net/http" + "reflect" + "sort" + "strings" + "testing" + + "github.com/prometheus/common/model" +) + +func TestTextDecoder(t *testing.T) { + var ( + ts = model.Now() + in = ` +# Only a quite simple scenario with two metric families. +# More complicated tests of the parser itself can be found in the text package. +# TYPE mf2 counter +mf2 3 +mf1{label="value1"} -3.14 123456 +mf1{label="value2"} 42 +mf2 4 +` + out = model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf1", + "label": "value1", + }, + Value: -3.14, + Timestamp: 123456, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf1", + "label": "value2", + }, + Value: 42, + Timestamp: ts, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf2", + }, + Value: 3, + Timestamp: ts, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf2", + }, + Value: 4, + Timestamp: ts, + }, + } + ) + + dec := &SampleDecoder{ + Dec: &textDecoder{r: strings.NewReader(in)}, + Opts: &DecodeOptions{ + Timestamp: ts, + }, + } + var all model.Vector + for { + var smpls model.Vector + err := dec.Decode(&smpls) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + all = append(all, smpls...) + } + sort.Sort(all) + sort.Sort(out) + if !reflect.DeepEqual(all, out) { + t.Fatalf("output does not match") + } +} + +func TestProtoDecoder(t *testing.T) { + + var testTime = model.Now() + + scenarios := []struct { + in string + expected model.Vector + fail bool + }{ + { + in: "", + }, + { + in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_!abel_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", + fail: true, + }, + { + in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + }, + Value: -42, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "another_label_name": "another_label_value", + }, + Value: 84, + Timestamp: testTime, + }, + }, + }, + { + in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_count", + "some_label_name": "some_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_sum", + "some_label_name": "some_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + "quantile": "0.99", + }, + Value: -42, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + "quantile": "0.999", + }, + Value: -84, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_count", + "another_label_name": "another_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_sum", + "another_label_name": "another_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "another_label_name": "another_label_value", + "quantile": "0.5", + }, + Value: 10, + Timestamp: testTime, + }, + }, + }, + { + in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "100", + }, + Value: 123, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "120", + }, + Value: 412, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "144", + }, + Value: 592, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "172.8", + }, + Value: 1524, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "+Inf", + }, + Value: 2693, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_sum", + }, + Value: 1756047.3, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_count", + }, + Value: 2693, + Timestamp: testTime, + }, + }, + }, + { + // The metric type is unset in this protobuf, which needs to be handled + // correctly by the decoder. + in: "\x1c\n\rrequest_count\"\v\x1a\t\t\x00\x00\x00\x00\x00\x00\xf0?", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + }, + Value: 1, + Timestamp: testTime, + }, + }, + }, + } + + for i, scenario := range scenarios { + dec := &SampleDecoder{ + Dec: &protoDecoder{r: strings.NewReader(scenario.in)}, + Opts: &DecodeOptions{ + Timestamp: testTime, + }, + } + + var all model.Vector + for { + var smpls model.Vector + err := dec.Decode(&smpls) + if err == io.EOF { + break + } + if scenario.fail { + if err == nil { + t.Fatal("Expected error but got none") + } + break + } + if err != nil { + t.Fatal(err) + } + all = append(all, smpls...) + } + sort.Sort(all) + sort.Sort(scenario.expected) + if !reflect.DeepEqual(all, scenario.expected) { + t.Fatalf("%d. output does not match, want: %#v, got %#v", i, scenario.expected, all) + } + } +} + +func testDiscriminatorHTTPHeader(t testing.TB) { + var scenarios = []struct { + input map[string]string + output Format + err error + }{ + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`}, + output: FmtProtoDelim, + }, + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`}, + output: FmtUnknown, + }, + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`}, + output: FmtUnknown, + }, + { + input: map[string]string{"Content-Type": `text/plain; version=0.0.4`}, + output: FmtText, + }, + { + input: map[string]string{"Content-Type": `text/plain`}, + output: FmtText, + }, + { + input: map[string]string{"Content-Type": `text/plain; version=0.0.3`}, + output: FmtUnknown, + }, + } + + for i, scenario := range scenarios { + var header http.Header + + if len(scenario.input) > 0 { + header = http.Header{} + } + + for key, value := range scenario.input { + header.Add(key, value) + } + + actual := ResponseFormat(header) + + if scenario.output != actual { + t.Errorf("%d. expected %s, got %s", i, scenario.output, actual) + } + } +} + +func TestDiscriminatorHTTPHeader(t *testing.T) { + testDiscriminatorHTTPHeader(t) +} + +func BenchmarkDiscriminatorHTTPHeader(b *testing.B) { + for i := 0; i < b.N; i++ { + testDiscriminatorHTTPHeader(b) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/encode.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/encode.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/encode.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/encode.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/expfmt.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/expfmt.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/expfmt.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/expfmt.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,40 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A package for reading and writing Prometheus metrics. +package expfmt + +type Format string + +const ( + TextVersion = "0.0.4" + + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + + // fmtJSON2 is hidden as it is deprecated. + fmtJSON2 Format = `application/json; version=0.0.2` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,2 @@ + + diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,6 @@ + +minimal_metric 1.234 +another_metric -3e3 103948 +# Even that: +no_labels{} 3 +# HELP line for non-existing metric will be ignored. diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,12 @@ + +# A normal comment. +# +# TYPE name counter +name{labelname="val1",basename="basevalue"} NaN +name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890 +# HELP name two-line\n doc str\\ing + + # HELP name2 doc str"ing 2 + # TYPE name2 gauge +name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321 +name2{ labelname = "val1" , }-Inf diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,22 @@ + +# TYPE my_summary summary +my_summary{n1="val1",quantile="0.5"} 110 +decoy -1 -2 +my_summary{n1="val1",quantile="0.9"} 140 1 +my_summary_count{n1="val1"} 42 +# Latest timestamp wins in case of a summary. +my_summary_sum{n1="val1"} 4711 2 +fake_sum{n1="val1"} 2001 +# TYPE another_summary summary +another_summary_count{n2="val2",n1="val1"} 20 +my_summary_count{n2="val2",n1="val1"} 5 5 +another_summary{n1="val1",n2="val2",quantile=".3"} -1.2 +my_summary_sum{n1="val2"} 08 15 +my_summary{n1="val3", quantile="0.2"} 4711 + my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN +# some +# funny comments +# HELP +# HELP +# HELP my_summary +# HELP my_summary diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,10 @@ + +# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +bla 3.14 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +metric{label="\t"} 3.14 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +metric{label="bla"} 3.14 2 3 diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +metric{label="bla"} blubb diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,3 @@ + +# HELP metric one +# HELP metric two diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,3 @@ + +# TYPE metric counter +# TYPE metric untyped diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,3 @@ + +metric 4.12 +# TYPE metric counter diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,2 @@ + +# TYPE metric bla diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,2 @@ + +# TYPE met-ric diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +@invalidmetric{label="bla"} 3.14 2 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +{label="bla"} 3.14 2 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,3 @@ + +# TYPE metric histogram +metric_bucket{le="bla"} 3.14 diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,3 @@ + +metric{label="new +line"} 3.14 diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +metric{@="bla"} 3.14 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +metric{__name__="bla"} 3.14 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +metric{label+="bla"} 3.14 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +metric{label=bla} 3.14 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,3 @@ + +# TYPE metric summary +metric{quantile="bla"} 3.14 diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +metric{label="bla"+} 3.14 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +metric{label="bla"} 3.14 2.72 diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/minimal juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/minimal --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz/corpus/minimal 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz/corpus/minimal 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1 @@ +m{} 0 diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/fuzz.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/fuzz.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/client_golang/text +// go-fuzz -bin text-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/json_decode.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/json_decode.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/json_decode.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/json_decode.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,174 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "encoding/json" + "fmt" + "io" + "sort" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" +) + +type json2Decoder struct { + dec *json.Decoder + fams []*dto.MetricFamily +} + +func newJSON2Decoder(r io.Reader) Decoder { + return &json2Decoder{ + dec: json.NewDecoder(r), + } +} + +type histogram002 struct { + Labels model.LabelSet `json:"labels"` + Values map[string]float64 `json:"value"` +} + +type counter002 struct { + Labels model.LabelSet `json:"labels"` + Value float64 `json:"value"` +} + +func protoLabelSet(base, ext model.LabelSet) ([]*dto.LabelPair, error) { + labels := base.Clone().Merge(ext) + delete(labels, model.MetricNameLabel) + + names := make([]string, 0, len(labels)) + for ln := range labels { + names = append(names, string(ln)) + } + sort.Strings(names) + + pairs := make([]*dto.LabelPair, 0, len(labels)) + + for _, ln := range names { + if !model.LabelNameRE.MatchString(ln) { + return nil, fmt.Errorf("invalid label name %q", ln) + } + lv := labels[model.LabelName(ln)] + + pairs = append(pairs, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(string(lv)), + }) + } + + return pairs, nil +} + +func (d *json2Decoder) more() error { + var entities []struct { + BaseLabels model.LabelSet `json:"baseLabels"` + Docstring string `json:"docstring"` + Metric struct { + Type string `json:"type"` + Values json.RawMessage `json:"value"` + } `json:"metric"` + } + + if err := d.dec.Decode(&entities); err != nil { + return err + } + for _, e := range entities { + f := &dto.MetricFamily{ + Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])), + Help: proto.String(e.Docstring), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{}, + } + + d.fams = append(d.fams, f) + + switch e.Metric.Type { + case "counter", "gauge": + var values []counter002 + + if err := json.Unmarshal(e.Metric.Values, &values); err != nil { + return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err) + } + + for _, ctr := range values { + labels, err := protoLabelSet(e.BaseLabels, ctr.Labels) + if err != nil { + return err + } + f.Metric = append(f.Metric, &dto.Metric{ + Label: labels, + Untyped: &dto.Untyped{ + Value: proto.Float64(ctr.Value), + }, + }) + } + + case "histogram": + var values []histogram002 + + if err := json.Unmarshal(e.Metric.Values, &values); err != nil { + return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err) + } + + for _, hist := range values { + quants := make([]string, 0, len(values)) + for q := range hist.Values { + quants = append(quants, q) + } + + sort.Strings(quants) + + for _, q := range quants { + value := hist.Values[q] + // The correct label is "quantile" but to not break old expressions + // this remains "percentile" + hist.Labels["percentile"] = model.LabelValue(q) + + labels, err := protoLabelSet(e.BaseLabels, hist.Labels) + if err != nil { + return err + } + + f.Metric = append(f.Metric, &dto.Metric{ + Label: labels, + Untyped: &dto.Untyped{ + Value: proto.Float64(value), + }, + }) + } + } + + default: + return fmt.Errorf("unknown metric type %q", e.Metric.Type) + } + } + return nil +} + +// Decode implements the Decoder interface. +func (d *json2Decoder) Decode(v *dto.MetricFamily) error { + if len(d.fams) == 0 { + if err := d.more(); err != nil { + return err + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/json_decode_test.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/json_decode_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/json_decode_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/json_decode_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,139 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "os" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" +) + +func TestJSON2Decode(t *testing.T) { + f, err := os.Open("testdata/json2") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + dec := newJSON2Decoder(f) + + var v1 dto.MetricFamily + if err := dec.Decode(&v1); err != nil { + t.Fatal(err) + } + + exp1 := dto.MetricFamily{ + Type: dto.MetricType_UNTYPED.Enum(), + Help: proto.String("RPC calls."), + Name: proto.String("rpc_calls_total"), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("job"), + Value: proto.String("batch_job"), + }, { + Name: proto.String("service"), + Value: proto.String("zed"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(25), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String("job"), + Value: proto.String("batch_job"), + }, { + Name: proto.String("service"), + Value: proto.String("bar"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(24), + }, + }, + }, + } + + if !reflect.DeepEqual(v1, exp1) { + t.Fatalf("Expected %v, got %v", exp1, v1) + } + + var v2 dto.MetricFamily + if err := dec.Decode(&v2); err != nil { + t.Fatal(err) + } + + exp2 := dto.MetricFamily{ + Type: dto.MetricType_UNTYPED.Enum(), + Help: proto.String("RPC latency."), + Name: proto.String("rpc_latency_microseconds"), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("percentile"), + Value: proto.String("0.010000"), + }, { + Name: proto.String("service"), + Value: proto.String("foo"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(15), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String("percentile"), + Value: proto.String("0.990000"), + }, { + Name: proto.String("service"), + Value: proto.String("foo"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(17), + }, + }, + }, + } + + if !reflect.DeepEqual(v2, exp2) { + t.Fatalf("Expected %v, got %v", exp2, v2) + } + +} + +func TestJSON2DecodeError(t *testing.T) { + f, err := os.Open("testdata/json2_bad") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + dec := newJSON2Decoder(f) + + var v1 dto.MetricFamily + if err := dec.Decode(&v1); err == nil { + t.Fatal("Expected error but got none") + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/json2 juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/json2 --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/json2 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/json2 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,46 @@ +[ + { + "baseLabels": { + "__name__": "rpc_calls_total", + "job": "batch_job" + }, + "docstring": "RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "service": "bar" + }, + "value": 24 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds" + }, + "docstring": "RPC latency.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "service": "foo" + }, + "value": { + "0.010000": 15, + "0.990000": 17 + } + } + ] + } + } +] diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/json2_bad juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/json2_bad --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/json2_bad 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/json2_bad 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,46 @@ +[ + { + "baseLabels": { + "__name__": "rpc_calls_total", + "job": "batch_job" + }, + "docstring": "RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "servic|e": "zed" + }, + "value": 25 + }, + { + "labels": { + "service": "bar" + }, + "value": 24 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds" + }, + "docstring": "RPC latency.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "service": "foo" + }, + "value": { + "0.010000": 15, + "0.990000": 17 + } + } + ] + } + } +] diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/protobuf juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/protobuf --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/protobuf 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/protobuf 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,516 @@ +fc08 0a22 6874 7470 5f72 6571 7565 7374 +5f64 7572 6174 696f 6e5f 6d69 6372 6f73 +6563 6f6e 6473 122b 5468 6520 4854 5450 +2072 6571 7565 7374 206c 6174 656e 6369 +6573 2069 6e20 6d69 6372 6f73 6563 6f6e +6473 2e18 0222 570a 0c0a 0768 616e 646c +6572 1201 2f22 4708 0011 0000 0000 0000 +0000 1a12 0900 0000 0000 00e0 3f11 0000 +0000 0000 0000 1a12 09cd cccc cccc ccec +3f11 0000 0000 0000 0000 1a12 09ae 47e1 +7a14 aeef 3f11 0000 0000 0000 0000 225d +0a12 0a07 6861 6e64 6c65 7212 072f 616c +6572 7473 2247 0800 1100 0000 0000 0000 +001a 1209 0000 0000 0000 e03f 1100 0000 +0000 0000 001a 1209 cdcc cccc cccc ec3f +1100 0000 0000 0000 001a 1209 ae47 e17a +14ae ef3f 1100 0000 0000 0000 0022 620a +170a 0768 616e 646c 6572 120c 2f61 7069 +2f6d 6574 7269 6373 2247 0800 1100 0000 +0000 0000 001a 1209 0000 0000 0000 e03f +1100 0000 0000 0000 001a 1209 cdcc cccc +cccc ec3f 1100 0000 0000 0000 001a 1209 +ae47 e17a 14ae ef3f 1100 0000 0000 0000 +0022 600a 150a 0768 616e 646c 6572 120a +2f61 7069 2f71 7565 7279 2247 0800 1100 +0000 0000 0000 001a 1209 0000 0000 0000 +e03f 1100 0000 0000 0000 001a 1209 cdcc +cccc cccc ec3f 1100 0000 0000 0000 001a +1209 ae47 e17a 14ae ef3f 1100 0000 0000 +0000 0022 660a 1b0a 0768 616e 646c 6572 +1210 2f61 7069 2f71 7565 7279 5f72 616e +6765 2247 0800 1100 0000 0000 0000 001a +1209 0000 0000 0000 e03f 1100 0000 0000 +0000 001a 1209 cdcc cccc cccc ec3f 1100 +0000 0000 0000 001a 1209 ae47 e17a 14ae +ef3f 1100 0000 0000 0000 0022 620a 170a +0768 616e 646c 6572 120c 2f61 7069 2f74 +6172 6765 7473 2247 0800 1100 0000 0000 +0000 001a 1209 0000 0000 0000 e03f 1100 +0000 0000 0000 001a 1209 cdcc cccc cccc +ec3f 1100 0000 0000 0000 001a 1209 ae47 +e17a 14ae ef3f 1100 0000 0000 0000 0022 +600a 150a 0768 616e 646c 6572 120a 2f63 +6f6e 736f 6c65 732f 2247 0800 1100 0000 +0000 0000 001a 1209 0000 0000 0000 e03f +1100 0000 0000 0000 001a 1209 cdcc cccc +cccc ec3f 1100 0000 0000 0000 001a 1209 +ae47 e17a 14ae ef3f 1100 0000 0000 0000 +0022 5c0a 110a 0768 616e 646c 6572 1206 +2f67 7261 7068 2247 0800 1100 0000 0000 +0000 001a 1209 0000 0000 0000 e03f 1100 +0000 0000 0000 001a 1209 cdcc cccc cccc +ec3f 1100 0000 0000 0000 001a 1209 ae47 +e17a 14ae ef3f 1100 0000 0000 0000 0022 +5b0a 100a 0768 616e 646c 6572 1205 2f68 +6561 7022 4708 0011 0000 0000 0000 0000 +1a12 0900 0000 0000 00e0 3f11 0000 0000 +0000 0000 1a12 09cd cccc cccc ccec 3f11 +0000 0000 0000 0000 1a12 09ae 47e1 7a14 +aeef 3f11 0000 0000 0000 0000 225e 0a13 +0a07 6861 6e64 6c65 7212 082f 7374 6174 +6963 2f22 4708 0011 0000 0000 0000 0000 +1a12 0900 0000 0000 00e0 3f11 0000 0000 +0000 0000 1a12 09cd cccc cccc ccec 3f11 +0000 0000 0000 0000 1a12 09ae 47e1 7a14 +aeef 3f11 0000 0000 0000 0000 2260 0a15 +0a07 6861 6e64 6c65 7212 0a70 726f 6d65 +7468 6575 7322 4708 3b11 5b8f c2f5 083f +f440 1a12 0900 0000 0000 00e0 3f11 e17a +14ae c7af 9340 1a12 09cd cccc cccc ccec +3f11 2fdd 2406 81f0 9640 1a12 09ae 47e1 +7a14 aeef 3f11 3d0a d7a3 b095 a740 e608 +0a17 6874 7470 5f72 6571 7565 7374 5f73 +697a 655f 6279 7465 7312 2054 6865 2048 +5454 5020 7265 7175 6573 7420 7369 7a65 +7320 696e 2062 7974 6573 2e18 0222 570a +0c0a 0768 616e 646c 6572 1201 2f22 4708 +0011 0000 0000 0000 0000 1a12 0900 0000 +0000 00e0 3f11 0000 0000 0000 0000 1a12 +09cd cccc cccc ccec 3f11 0000 0000 0000 +0000 1a12 09ae 47e1 7a14 aeef 3f11 0000 +0000 0000 0000 225d 0a12 0a07 6861 6e64 +6c65 7212 072f 616c 6572 7473 2247 0800 +1100 0000 0000 0000 001a 1209 0000 0000 +0000 e03f 1100 0000 0000 0000 001a 1209 +cdcc cccc cccc ec3f 1100 0000 0000 0000 +001a 1209 ae47 e17a 14ae ef3f 1100 0000 +0000 0000 0022 620a 170a 0768 616e 646c +6572 120c 2f61 7069 2f6d 6574 7269 6373 +2247 0800 1100 0000 0000 0000 001a 1209 +0000 0000 0000 e03f 1100 0000 0000 0000 +001a 1209 cdcc cccc cccc ec3f 1100 0000 +0000 0000 001a 1209 ae47 e17a 14ae ef3f +1100 0000 0000 0000 0022 600a 150a 0768 +616e 646c 6572 120a 2f61 7069 2f71 7565 +7279 2247 0800 1100 0000 0000 0000 001a +1209 0000 0000 0000 e03f 1100 0000 0000 +0000 001a 1209 cdcc cccc cccc ec3f 1100 +0000 0000 0000 001a 1209 ae47 e17a 14ae +ef3f 1100 0000 0000 0000 0022 660a 1b0a +0768 616e 646c 6572 1210 2f61 7069 2f71 +7565 7279 5f72 616e 6765 2247 0800 1100 +0000 0000 0000 001a 1209 0000 0000 0000 +e03f 1100 0000 0000 0000 001a 1209 cdcc +cccc cccc ec3f 1100 0000 0000 0000 001a +1209 ae47 e17a 14ae ef3f 1100 0000 0000 +0000 0022 620a 170a 0768 616e 646c 6572 +120c 2f61 7069 2f74 6172 6765 7473 2247 +0800 1100 0000 0000 0000 001a 1209 0000 +0000 0000 e03f 1100 0000 0000 0000 001a +1209 cdcc cccc cccc ec3f 1100 0000 0000 +0000 001a 1209 ae47 e17a 14ae ef3f 1100 +0000 0000 0000 0022 600a 150a 0768 616e +646c 6572 120a 2f63 6f6e 736f 6c65 732f +2247 0800 1100 0000 0000 0000 001a 1209 +0000 0000 0000 e03f 1100 0000 0000 0000 +001a 1209 cdcc cccc cccc ec3f 1100 0000 +0000 0000 001a 1209 ae47 e17a 14ae ef3f +1100 0000 0000 0000 0022 5c0a 110a 0768 +616e 646c 6572 1206 2f67 7261 7068 2247 +0800 1100 0000 0000 0000 001a 1209 0000 +0000 0000 e03f 1100 0000 0000 0000 001a +1209 cdcc cccc cccc ec3f 1100 0000 0000 +0000 001a 1209 ae47 e17a 14ae ef3f 1100 +0000 0000 0000 0022 5b0a 100a 0768 616e +646c 6572 1205 2f68 6561 7022 4708 0011 +0000 0000 0000 0000 1a12 0900 0000 0000 +00e0 3f11 0000 0000 0000 0000 1a12 09cd +cccc cccc ccec 3f11 0000 0000 0000 0000 +1a12 09ae 47e1 7a14 aeef 3f11 0000 0000 +0000 0000 225e 0a13 0a07 6861 6e64 6c65 +7212 082f 7374 6174 6963 2f22 4708 0011 +0000 0000 0000 0000 1a12 0900 0000 0000 +00e0 3f11 0000 0000 0000 0000 1a12 09cd +cccc cccc ccec 3f11 0000 0000 0000 0000 +1a12 09ae 47e1 7a14 aeef 3f11 0000 0000 +0000 0000 2260 0a15 0a07 6861 6e64 6c65 +7212 0a70 726f 6d65 7468 6575 7322 4708 +3b11 0000 0000 40c4 d040 1a12 0900 0000 +0000 00e0 3f11 0000 0000 0030 7240 1a12 +09cd cccc cccc ccec 3f11 0000 0000 0030 +7240 1a12 09ae 47e1 7a14 aeef 3f11 0000 +0000 0030 7240 7c0a 1368 7474 705f 7265 +7175 6573 7473 5f74 6f74 616c 1223 546f +7461 6c20 6e75 6d62 6572 206f 6620 4854 +5450 2072 6571 7565 7374 7320 6d61 6465 +2e18 0022 3e0a 0b0a 0463 6f64 6512 0332 +3030 0a15 0a07 6861 6e64 6c65 7212 0a70 +726f 6d65 7468 6575 730a 0d0a 066d 6574 +686f 6412 0367 6574 1a09 0900 0000 0000 +804d 40e8 080a 1868 7474 705f 7265 7370 +6f6e 7365 5f73 697a 655f 6279 7465 7312 +2154 6865 2048 5454 5020 7265 7370 6f6e +7365 2073 697a 6573 2069 6e20 6279 7465 +732e 1802 2257 0a0c 0a07 6861 6e64 6c65 +7212 012f 2247 0800 1100 0000 0000 0000 +001a 1209 0000 0000 0000 e03f 1100 0000 +0000 0000 001a 1209 cdcc cccc cccc ec3f +1100 0000 0000 0000 001a 1209 ae47 e17a +14ae ef3f 1100 0000 0000 0000 0022 5d0a +120a 0768 616e 646c 6572 1207 2f61 6c65 +7274 7322 4708 0011 0000 0000 0000 0000 +1a12 0900 0000 0000 00e0 3f11 0000 0000 +0000 0000 1a12 09cd cccc cccc ccec 3f11 +0000 0000 0000 0000 1a12 09ae 47e1 7a14 +aeef 3f11 0000 0000 0000 0000 2262 0a17 +0a07 6861 6e64 6c65 7212 0c2f 6170 692f +6d65 7472 6963 7322 4708 0011 0000 0000 +0000 0000 1a12 0900 0000 0000 00e0 3f11 +0000 0000 0000 0000 1a12 09cd cccc cccc +ccec 3f11 0000 0000 0000 0000 1a12 09ae +47e1 7a14 aeef 3f11 0000 0000 0000 0000 +2260 0a15 0a07 6861 6e64 6c65 7212 0a2f +6170 692f 7175 6572 7922 4708 0011 0000 +0000 0000 0000 1a12 0900 0000 0000 00e0 +3f11 0000 0000 0000 0000 1a12 09cd cccc +cccc ccec 3f11 0000 0000 0000 0000 1a12 +09ae 47e1 7a14 aeef 3f11 0000 0000 0000 +0000 2266 0a1b 0a07 6861 6e64 6c65 7212 +102f 6170 692f 7175 6572 795f 7261 6e67 +6522 4708 0011 0000 0000 0000 0000 1a12 +0900 0000 0000 00e0 3f11 0000 0000 0000 +0000 1a12 09cd cccc cccc ccec 3f11 0000 +0000 0000 0000 1a12 09ae 47e1 7a14 aeef +3f11 0000 0000 0000 0000 2262 0a17 0a07 +6861 6e64 6c65 7212 0c2f 6170 692f 7461 +7267 6574 7322 4708 0011 0000 0000 0000 +0000 1a12 0900 0000 0000 00e0 3f11 0000 +0000 0000 0000 1a12 09cd cccc cccc ccec +3f11 0000 0000 0000 0000 1a12 09ae 47e1 +7a14 aeef 3f11 0000 0000 0000 0000 2260 +0a15 0a07 6861 6e64 6c65 7212 0a2f 636f +6e73 6f6c 6573 2f22 4708 0011 0000 0000 +0000 0000 1a12 0900 0000 0000 00e0 3f11 +0000 0000 0000 0000 1a12 09cd cccc cccc +ccec 3f11 0000 0000 0000 0000 1a12 09ae +47e1 7a14 aeef 3f11 0000 0000 0000 0000 +225c 0a11 0a07 6861 6e64 6c65 7212 062f +6772 6170 6822 4708 0011 0000 0000 0000 +0000 1a12 0900 0000 0000 00e0 3f11 0000 +0000 0000 0000 1a12 09cd cccc cccc ccec +3f11 0000 0000 0000 0000 1a12 09ae 47e1 +7a14 aeef 3f11 0000 0000 0000 0000 225b +0a10 0a07 6861 6e64 6c65 7212 052f 6865 +6170 2247 0800 1100 0000 0000 0000 001a +1209 0000 0000 0000 e03f 1100 0000 0000 +0000 001a 1209 cdcc cccc cccc ec3f 1100 +0000 0000 0000 001a 1209 ae47 e17a 14ae +ef3f 1100 0000 0000 0000 0022 5e0a 130a +0768 616e 646c 6572 1208 2f73 7461 7469 +632f 2247 0800 1100 0000 0000 0000 001a +1209 0000 0000 0000 e03f 1100 0000 0000 +0000 001a 1209 cdcc cccc cccc ec3f 1100 +0000 0000 0000 001a 1209 ae47 e17a 14ae +ef3f 1100 0000 0000 0000 0022 600a 150a +0768 616e 646c 6572 120a 7072 6f6d 6574 +6865 7573 2247 083b 1100 0000 00e0 b4fc +401a 1209 0000 0000 0000 e03f 1100 0000 +0000 349f 401a 1209 cdcc cccc cccc ec3f +1100 0000 0000 08a0 401a 1209 ae47 e17a +14ae ef3f 1100 0000 0000 0aa0 405c 0a19 +7072 6f63 6573 735f 6370 755f 7365 636f +6e64 735f 746f 7461 6c12 3054 6f74 616c +2075 7365 7220 616e 6420 7379 7374 656d +2043 5055 2074 696d 6520 7370 656e 7420 +696e 2073 6563 6f6e 6473 2e18 0022 0b1a +0909 a470 3d0a d7a3 d03f 4f0a 1270 726f +6365 7373 5f67 6f72 6f75 7469 6e65 7312 +2a4e 756d 6265 7220 6f66 2067 6f72 6f75 +7469 6e65 7320 7468 6174 2063 7572 7265 +6e74 6c79 2065 7869 7374 2e18 0122 0b12 +0909 0000 0000 0000 5140 4a0a 0f70 726f +6365 7373 5f6d 6178 5f66 6473 1228 4d61 +7869 6d75 6d20 6e75 6d62 6572 206f 6620 +6f70 656e 2066 696c 6520 6465 7363 7269 +7074 6f72 732e 1801 220b 1209 0900 0000 +0000 00c0 4043 0a10 7072 6f63 6573 735f +6f70 656e 5f66 6473 1220 4e75 6d62 6572 +206f 6620 6f70 656e 2066 696c 6520 6465 +7363 7269 7074 6f72 732e 1801 220b 1209 +0900 0000 0000 003d 404e 0a1d 7072 6f63 +6573 735f 7265 7369 6465 6e74 5f6d 656d +6f72 795f 6279 7465 7312 1e52 6573 6964 +656e 7420 6d65 6d6f 7279 2073 697a 6520 +696e 2062 7974 6573 2e18 0122 0b12 0909 +0000 0000 004b 8841 630a 1a70 726f 6365 +7373 5f73 7461 7274 5f74 696d 655f 7365 +636f 6e64 7312 3653 7461 7274 2074 696d +6520 6f66 2074 6865 2070 726f 6365 7373 +2073 696e 6365 2075 6e69 7820 6570 6f63 +6820 696e 2073 6563 6f6e 6473 2e18 0122 +0b12 0909 3d0a 172d e831 d541 4c0a 1c70 +726f 6365 7373 5f76 6972 7475 616c 5f6d +656d 6f72 795f 6279 7465 7312 1d56 6972 +7475 616c 206d 656d 6f72 7920 7369 7a65 +2069 6e20 6279 7465 732e 1801 220b 1209 +0900 0000 0020 12c0 415f 0a27 7072 6f6d +6574 6865 7573 5f64 6e73 5f73 645f 6c6f +6f6b 7570 5f66 6169 6c75 7265 735f 746f +7461 6c12 2554 6865 206e 756d 6265 7220 +6f66 2044 4e53 2d53 4420 6c6f 6f6b 7570 +2066 6169 6c75 7265 732e 1800 220b 1a09 +0900 0000 0000 0000 004f 0a1f 7072 6f6d +6574 6865 7573 5f64 6e73 5f73 645f 6c6f +6f6b 7570 735f 746f 7461 6c12 1d54 6865 +206e 756d 6265 7220 6f66 2044 4e53 2d53 +4420 6c6f 6f6b 7570 732e 1800 220b 1a09 +0900 0000 0000 0008 40cf 010a 2a70 726f +6d65 7468 6575 735f 6576 616c 7561 746f +725f 6475 7261 7469 6f6e 5f6d 696c 6c69 +7365 636f 6e64 7312 2c54 6865 2064 7572 +6174 696f 6e20 666f 7220 616c 6c20 6576 +616c 7561 7469 6f6e 7320 746f 2065 7865 +6375 7465 2e18 0222 7122 6f08 0b11 0000 +0000 0000 2240 1a12 097b 14ae 47e1 7a84 +3f11 0000 0000 0000 0000 1a12 099a 9999 +9999 99a9 3f11 0000 0000 0000 0000 1a12 +0900 0000 0000 00e0 3f11 0000 0000 0000 +0000 1a12 09cd cccc cccc ccec 3f11 0000 +0000 0000 f03f 1a12 09ae 47e1 7a14 aeef +3f11 0000 0000 0000 f03f a301 0a39 7072 +6f6d 6574 6865 7573 5f6c 6f63 616c 5f73 +746f 7261 6765 5f63 6865 636b 706f 696e +745f 6475 7261 7469 6f6e 5f6d 696c 6c69 +7365 636f 6e64 7312 5754 6865 2064 7572 +6174 696f 6e20 2869 6e20 6d69 6c6c 6973 +6563 6f6e 6473 2920 6974 2074 6f6f 6b20 +746f 2063 6865 636b 706f 696e 7420 696e +2d6d 656d 6f72 7920 6d65 7472 6963 7320 +616e 6420 6865 6164 2063 6875 6e6b 732e +1801 220b 1209 0900 0000 0000 0000 00f2 +010a 2870 726f 6d65 7468 6575 735f 6c6f +6361 6c5f 7374 6f72 6167 655f 6368 756e +6b5f 6f70 735f 746f 7461 6c12 3354 6865 +2074 6f74 616c 206e 756d 6265 7220 6f66 +2063 6875 6e6b 206f 7065 7261 7469 6f6e +7320 6279 2074 6865 6972 2074 7970 652e +1800 221b 0a0e 0a04 7479 7065 1206 6372 +6561 7465 1a09 0900 0000 0000 b880 4022 +1c0a 0f0a 0474 7970 6512 0770 6572 7369 +7374 1a09 0900 0000 0000 c05b 4022 180a +0b0a 0474 7970 6512 0370 696e 1a09 0900 +0000 0000 807b 4022 1e0a 110a 0474 7970 +6512 0974 7261 6e73 636f 6465 1a09 0900 +0000 0000 a06b 4022 1a0a 0d0a 0474 7970 +6512 0575 6e70 696e 1a09 0900 0000 0000 +807b 40c4 010a 3c70 726f 6d65 7468 6575 +735f 6c6f 6361 6c5f 7374 6f72 6167 655f +696e 6465 7869 6e67 5f62 6174 6368 5f6c +6174 656e 6379 5f6d 696c 6c69 7365 636f +6e64 7312 3751 7561 6e74 696c 6573 2066 +6f72 2062 6174 6368 2069 6e64 6578 696e +6720 6c61 7465 6e63 6965 7320 696e 206d +696c 6c69 7365 636f 6e64 732e 1802 2249 +2247 0801 1100 0000 0000 0000 001a 1209 +0000 0000 0000 e03f 1100 0000 0000 0000 +001a 1209 cdcc cccc cccc ec3f 1100 0000 +0000 0000 001a 1209 ae47 e17a 14ae ef3f +1100 0000 0000 0000 00bf 010a 2d70 726f +6d65 7468 6575 735f 6c6f 6361 6c5f 7374 +6f72 6167 655f 696e 6465 7869 6e67 5f62 +6174 6368 5f73 697a 6573 1241 5175 616e +7469 6c65 7320 666f 7220 696e 6465 7869 +6e67 2062 6174 6368 2073 697a 6573 2028 +6e75 6d62 6572 206f 6620 6d65 7472 6963 +7320 7065 7220 6261 7463 6829 2e18 0222 +4922 4708 0111 0000 0000 0000 0040 1a12 +0900 0000 0000 00e0 3f11 0000 0000 0000 +0040 1a12 09cd cccc cccc ccec 3f11 0000 +0000 0000 0040 1a12 09ae 47e1 7a14 aeef +3f11 0000 0000 0000 0040 660a 3070 726f +6d65 7468 6575 735f 6c6f 6361 6c5f 7374 +6f72 6167 655f 696e 6465 7869 6e67 5f71 +7565 7565 5f63 6170 6163 6974 7912 2354 +6865 2063 6170 6163 6974 7920 6f66 2074 +6865 2069 6e64 6578 696e 6720 7175 6575 +652e 1801 220b 1209 0900 0000 0000 00d0 +406d 0a2e 7072 6f6d 6574 6865 7573 5f6c +6f63 616c 5f73 746f 7261 6765 5f69 6e64 +6578 696e 675f 7175 6575 655f 6c65 6e67 +7468 122c 5468 6520 6e75 6d62 6572 206f +6620 6d65 7472 6963 7320 7761 6974 696e +6720 746f 2062 6520 696e 6465 7865 642e +1801 220b 1209 0900 0000 0000 0000 0067 +0a2f 7072 6f6d 6574 6865 7573 5f6c 6f63 +616c 5f73 746f 7261 6765 5f69 6e67 6573 +7465 645f 7361 6d70 6c65 735f 746f 7461 +6c12 2554 6865 2074 6f74 616c 206e 756d +6265 7220 6f66 2073 616d 706c 6573 2069 +6e67 6573 7465 642e 1800 220b 1a09 0900 +0000 0080 27cd 40c3 010a 3770 726f 6d65 +7468 6575 735f 6c6f 6361 6c5f 7374 6f72 +6167 655f 696e 7661 6c69 645f 7072 656c +6f61 645f 7265 7175 6573 7473 5f74 6f74 +616c 1279 5468 6520 746f 7461 6c20 6e75 +6d62 6572 206f 6620 7072 656c 6f61 6420 +7265 7175 6573 7473 2072 6566 6572 7269 +6e67 2074 6f20 6120 6e6f 6e2d 6578 6973 +7465 6e74 2073 6572 6965 732e 2054 6869 +7320 6973 2061 6e20 696e 6469 6361 7469 +6f6e 206f 6620 6f75 7464 6174 6564 206c +6162 656c 2069 6e64 6578 6573 2e18 0022 +0b1a 0909 0000 0000 0000 0000 6f0a 2a70 +726f 6d65 7468 6575 735f 6c6f 6361 6c5f +7374 6f72 6167 655f 6d65 6d6f 7279 5f63 +6875 6e6b 6465 7363 7312 3254 6865 2063 +7572 7265 6e74 206e 756d 6265 7220 6f66 +2063 6875 6e6b 2064 6573 6372 6970 746f +7273 2069 6e20 6d65 6d6f 7279 2e18 0122 +0b12 0909 0000 0000 0020 8f40 9c01 0a26 +7072 6f6d 6574 6865 7573 5f6c 6f63 616c +5f73 746f 7261 6765 5f6d 656d 6f72 795f +6368 756e 6b73 1263 5468 6520 6375 7272 +656e 7420 6e75 6d62 6572 206f 6620 6368 +756e 6b73 2069 6e20 6d65 6d6f 7279 2c20 +6578 636c 7564 696e 6720 636c 6f6e 6564 +2063 6875 6e6b 7320 2869 2e65 2e20 6368 +756e 6b73 2077 6974 686f 7574 2061 2064 +6573 6372 6970 746f 7229 2e18 0122 0b12 +0909 0000 0000 00e8 8d40 600a 2670 726f +6d65 7468 6575 735f 6c6f 6361 6c5f 7374 +6f72 6167 655f 6d65 6d6f 7279 5f73 6572 +6965 7312 2754 6865 2063 7572 7265 6e74 +206e 756d 6265 7220 6f66 2073 6572 6965 +7320 696e 206d 656d 6f72 792e 1801 220b +1209 0900 0000 0000 807a 40b7 010a 3570 +726f 6d65 7468 6575 735f 6c6f 6361 6c5f +7374 6f72 6167 655f 7065 7273 6973 745f +6c61 7465 6e63 795f 6d69 6372 6f73 6563 +6f6e 6473 1231 4120 7375 6d6d 6172 7920 +6f66 206c 6174 656e 6369 6573 2066 6f72 +2070 6572 7369 7374 696e 6720 6561 6368 +2063 6875 6e6b 2e18 0222 4922 4708 6f11 +1c2f dd24 e68c cc40 1a12 0900 0000 0000 +00e0 3f11 8d97 6e12 8360 3e40 1a12 09cd +cccc cccc ccec 3f11 0ad7 a370 3d62 6b40 +1a12 09ae 47e1 7a14 aeef 3f11 7b14 ae47 +e1b6 7240 6a0a 2f70 726f 6d65 7468 6575 +735f 6c6f 6361 6c5f 7374 6f72 6167 655f +7065 7273 6973 745f 7175 6575 655f 6361 +7061 6369 7479 1228 5468 6520 746f 7461 +6c20 6361 7061 6369 7479 206f 6620 7468 +6520 7065 7273 6973 7420 7175 6575 652e +1801 220b 1209 0900 0000 0000 0090 407a +0a2d 7072 6f6d 6574 6865 7573 5f6c 6f63 +616c 5f73 746f 7261 6765 5f70 6572 7369 +7374 5f71 7565 7565 5f6c 656e 6774 6812 +3a54 6865 2063 7572 7265 6e74 206e 756d +6265 7220 6f66 2063 6875 6e6b 7320 7761 +6974 696e 6720 696e 2074 6865 2070 6572 +7369 7374 2071 7565 7565 2e18 0122 0b12 +0909 0000 0000 0000 0000 ac01 0a29 7072 +6f6d 6574 6865 7573 5f6c 6f63 616c 5f73 +746f 7261 6765 5f73 6572 6965 735f 6f70 +735f 746f 7461 6c12 3454 6865 2074 6f74 +616c 206e 756d 6265 7220 6f66 2073 6572 +6965 7320 6f70 6572 6174 696f 6e73 2062 +7920 7468 6569 7220 7479 7065 2e18 0022 +1b0a 0e0a 0474 7970 6512 0663 7265 6174 +651a 0909 0000 0000 0000 0040 222a 0a1d +0a04 7479 7065 1215 6d61 696e 7465 6e61 +6e63 655f 696e 5f6d 656d 6f72 791a 0909 +0000 0000 0000 1440 d601 0a2d 7072 6f6d +6574 6865 7573 5f6e 6f74 6966 6963 6174 +696f 6e73 5f6c 6174 656e 6379 5f6d 696c +6c69 7365 636f 6e64 7312 584c 6174 656e +6379 2071 7561 6e74 696c 6573 2066 6f72 +2073 656e 6469 6e67 2061 6c65 7274 206e +6f74 6966 6963 6174 696f 6e73 2028 6e6f +7420 696e 636c 7564 696e 6720 6472 6f70 +7065 6420 6e6f 7469 6669 6361 7469 6f6e +7329 2e18 0222 4922 4708 0011 0000 0000 +0000 0000 1a12 0900 0000 0000 00e0 3f11 +0000 0000 0000 0000 1a12 09cd cccc cccc +ccec 3f11 0000 0000 0000 0000 1a12 09ae +47e1 7a14 aeef 3f11 0000 0000 0000 0000 +680a 2770 726f 6d65 7468 6575 735f 6e6f +7469 6669 6361 7469 6f6e 735f 7175 6575 +655f 6361 7061 6369 7479 122e 5468 6520 +6361 7061 6369 7479 206f 6620 7468 6520 +616c 6572 7420 6e6f 7469 6669 6361 7469 +6f6e 7320 7175 6575 652e 1801 220b 1209 +0900 0000 0000 0059 4067 0a25 7072 6f6d +6574 6865 7573 5f6e 6f74 6966 6963 6174 +696f 6e73 5f71 7565 7565 5f6c 656e 6774 +6812 2f54 6865 206e 756d 6265 7220 6f66 +2061 6c65 7274 206e 6f74 6966 6963 6174 +696f 6e73 2069 6e20 7468 6520 7175 6575 +652e 1801 220b 1209 0900 0000 0000 0000 +009e 020a 3070 726f 6d65 7468 6575 735f +7275 6c65 5f65 7661 6c75 6174 696f 6e5f +6475 7261 7469 6f6e 5f6d 696c 6c69 7365 +636f 6e64 7312 2354 6865 2064 7572 6174 +696f 6e20 666f 7220 6120 7275 6c65 2074 +6f20 6578 6563 7574 652e 1802 2260 0a15 +0a09 7275 6c65 5f74 7970 6512 0861 6c65 +7274 696e 6722 4708 3711 0000 0000 0000 +2840 1a12 0900 0000 0000 00e0 3f11 0000 +0000 0000 0000 1a12 09cd cccc cccc ccec +3f11 0000 0000 0000 0000 1a12 09ae 47e1 +7a14 aeef 3f11 0000 0000 0000 0840 2261 +0a16 0a09 7275 6c65 5f74 7970 6512 0972 +6563 6f72 6469 6e67 2247 0837 1100 0000 +0000 002e 401a 1209 0000 0000 0000 e03f +1100 0000 0000 0000 001a 1209 cdcc cccc +cccc ec3f 1100 0000 0000 0000 001a 1209 +ae47 e17a 14ae ef3f 1100 0000 0000 0008 +4069 0a29 7072 6f6d 6574 6865 7573 5f72 +756c 655f 6576 616c 7561 7469 6f6e 5f66 +6169 6c75 7265 735f 746f 7461 6c12 2d54 +6865 2074 6f74 616c 206e 756d 6265 7220 +6f66 2072 756c 6520 6576 616c 7561 7469 +6f6e 2066 6169 6c75 7265 732e 1800 220b +1a09 0900 0000 0000 0000 0060 0a21 7072 +6f6d 6574 6865 7573 5f73 616d 706c 6573 +5f71 7565 7565 5f63 6170 6163 6974 7912 +2c43 6170 6163 6974 7920 6f66 2074 6865 +2071 7565 7565 2066 6f72 2075 6e77 7269 +7474 656e 2073 616d 706c 6573 2e18 0122 +0b12 0909 0000 0000 0000 b040 da01 0a1f +7072 6f6d 6574 6865 7573 5f73 616d 706c +6573 5f71 7565 7565 5f6c 656e 6774 6812 +a701 4375 7272 656e 7420 6e75 6d62 6572 +206f 6620 6974 656d 7320 696e 2074 6865 +2071 7565 7565 2066 6f72 2075 6e77 7269 +7474 656e 2073 616d 706c 6573 2e20 4561 +6368 2069 7465 6d20 636f 6d70 7269 7365 +7320 616c 6c20 7361 6d70 6c65 7320 6578 +706f 7365 6420 6279 206f 6e65 2074 6172 +6765 7420 6173 206f 6e65 206d 6574 7269 +6320 6661 6d69 6c79 2028 692e 652e 206d +6574 7269 6373 206f 6620 7468 6520 7361 +6d65 206e 616d 6529 2e18 0122 0b12 0909 +0000 0000 0000 0000 d902 0a29 7072 6f6d +6574 6865 7573 5f74 6172 6765 745f 696e +7465 7276 616c 5f6c 656e 6774 685f 7365 +636f 6e64 7312 2141 6374 7561 6c20 696e +7465 7276 616c 7320 6265 7477 6565 6e20 +7363 7261 7065 732e 1802 2282 010a 0f0a +0869 6e74 6572 7661 6c12 0331 3573 226f +0804 1100 0000 0000 804d 401a 1209 7b14 +ae47 e17a 843f 1100 0000 0000 002c 401a +1209 9a99 9999 9999 a93f 1100 0000 0000 +002c 401a 1209 0000 0000 0000 e03f 1100 +0000 0000 002e 401a 1209 cdcc cccc cccc +ec3f 1100 0000 0000 002e 401a 1209 ae47 +e17a 14ae ef3f 1100 0000 0000 002e 4022 +8101 0a0e 0a08 696e 7465 7276 616c 1202 +3173 226f 083a 1100 0000 0000 003c 401a +1209 7b14 ae47 e17a 843f 1100 0000 0000 +0000 001a 1209 9a99 9999 9999 a93f 1100 +0000 0000 0000 001a 1209 0000 0000 0000 +e03f 1100 0000 0000 0000 001a 1209 cdcc +cccc cccc ec3f 1100 0000 0000 00f0 3f1a +1209 ae47 e17a 14ae ef3f 1100 0000 0000 +00f0 3f \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/protobuf.gz juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/protobuf.gz --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/protobuf.gz 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/protobuf.gz 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,129 @@ +1f8b 0808 efa0 c754 0003 7072 6f74 6f62 +7566 00ed 594d 8c1c c515 9eb1 8d3d 5b86 +6037 265e 8c4d ca03 c4bb ceee cc9a 9f58 +01cc f6ca 4424 041b 8837 21c8 24ed daee +9a99 cef6 1f55 d578 c7e4 b004 0e39 8088 +8448 048a 124b 4442 9110 e110 25b9 c54a +9072 01c5 9724 4a24 2472 413e 448a 8592 +1b87 bcea aeda eeea 99d9 3530 49a4 68e7 +b0bb 5355 fdde abf7 bef7 bdf7 7a3f 6ca0 +664f 88c4 61f4 8994 72e1 7829 23c2 8f23 +27f4 5d16 73ea c691 c7ad cf2d f628 fed2 +e2e2 c358 9dc3 0111 3472 7dca b11f e1f2 +d9d6 e496 e6a3 e86a b4a3 4722 2fa0 ccaa +b79b f737 6abb 6bea b3cf 9ac8 ff78 6fbe +bcf6 cedb f2f3 7763 ed8d fbff 766e cf1b +ff28 d69a df44 5621 7847 9bc0 2fc1 c727 +7e09 ed2d c45f dd26 89df 0ea9 60be 3b46 +1d67 d0f5 850e 94e9 008f b2fe f834 74d0 +8d85 865d 8506 8791 a84b ffa3 de12 8475 +e938 2352 f116 208c c701 e563 84d4 e368 +77a1 617b bbcb 48d2 1b9f f4d3 6857 21fd +aa76 8f92 647c c2bf 85ae 2b84 37da 5c40 +e6ba 6374 8de9 fc84 c590 0c3d 9aca f0de +bdfb f40b bffd 5763 fe9f 7659 8314 f0fb +9fbf 6897 35b4 dfbd 65fb d397 7f60 9735 +1c43 7f7e f5cd 975e b3df 6fa0 bd06 fb70 +ff1c 7596 fa82 720b 0f50 8edc cce8 263b +b0c9 339b 3cb3 c933 5afa ff2f cfc8 13f6 +5b17 ed01 0d73 cc1e d090 af99 1a60 ed3b +e8ba 32cd 7047 c482 04d6 cd8b f217 8ed2 +7089 321c 770c bae1 3824 1e6d 4dd6 9af7 +a29d 689b 1b7b d4da 7adb dcdc 085b d135 +68bb fc33 f6ac ad00 cd7d 13b9 b5ab 27ec +4b0d 34a9 b4f3 0470 45cb 2c77 b0c4 72f9 +ee26 cd7d 02ec 6cd2 dc26 cd7d 6ce1 ff73 +9a7b ef17 1f0e d2dc 1d3f 19a4 b9c6 f941 +9a43 e7ed c7d1 0d20 d5a5 9c3b 6e92 3a6a +2053 6437 9793 5dca 81ea c006 ccfb 5cd0 +101f 7ff8 6b58 f821 d04e 4223 2169 676d +8eab 3577 028d fd34 91dd dac5 f987 90a5 +8577 6316 a7c2 8f80 bf0e 9f5c 23cf 6215 +8b1e 11d8 4d19 0391 411f d315 9f8b d664 +bdb9 d352 b458 7bc4 7e00 5dab e585 64c5 +e9c0 9439 7582 acf8 611a 9618 3906 ab70 +c70f 28f6 2877 999f 8898 7153 d405 fb38 +daa5 45c9 f399 2c7c f2a3 c838 669f 4407 +b40c 6062 df03 cb9d 9086 31e4 79ce d437 +7d55 2de3 7c39 e3e9 124d 97c4 7de5 7b0b +2eda a7c5 018e 9870 a48f 7544 accf 9f92 +6bb9 dfc1 4040 0156 a741 6ae4 529c 46fe +0aa6 49ec f68c 88e4 3a8e a1bd b397 8efc +71e1 41b4 5feb 78d2 6722 2581 69f1 81af +e7ab 1b1a 8cad 0b0b 0e3a 5420 d2f1 22b0 +db73 8238 5e4e 13a7 43fc 2005 af28 24dd +2a6b 5611 a2fb 4e9e 9a3d 751f cecf 627d +56c3 47a3 ff21 f499 51f2 b5dc 03eb c8ad +c86b d87f a8a3 c325 81f4 4912 a404 025b +7e81 1104 bef6 f88c 94ad b770 2786 1c08 +02ac 9e82 25c0 6c0c 38a5 6e2a a82c b94f +34e3 c64e 95ba 4d99 6c4f ed91 e9f6 ac91 +e2af bc2c 3f3f 9bff 88f4 7079 7e90 1e2e +cfbf 5a47 5f28 5d28 885d 8827 871b 912e +75dc 1e75 9793 d88f c488 fb3d 6adc 6f2a +7b27 536c 4f63 1fd0 068e 94b7 2c64 0118 +6615 3654 5dce 9801 58d5 8353 69b4 5cc9 +925a ed83 3a9a 5ac7 4878 0432 50c7 f376 +6993 a8b4 58d9 2199 924c f97d a92f f1ef +332c fa49 d66e dd88 3e85 b6c9 2fd6 7697 +5122 a88e faaf 57ed e67e 74ad dadc 0122 +38f0 8ade bd70 da6e 4eca 4e2d dbdd 9af8 +d15a 0ff6 94dd bc09 ca52 be33 21a0 6e73 +d9ce e9fd f3cb 7673 1ff4 6ff9 fe55 6964 +3efb 561d dd33 f2ce 7ee4 01bb 455d 6789 +08b7 e7e4 6fc5 fa66 6c8e 3e92 9248 00ff +f00c 78d9 49ac 1fac be48 2b9e 9330 fc32 +d486 fa58 aacf 6fea 68f6 4a6f 9175 a0d6 +8269 f69a c1b9 fd79 973a 5504 5623 08c2 +921f 991e b8c0 6071 cbd7 aa17 182c 6eb0 +d641 731b db0f 8d59 0a40 2409 717d d187 +061f 10a8 bf69 a65d bb48 76d8 44f8 453b +44ad 2b55 13d0 a82b 7a39 b50c fae1 2cf1 +85d4 0219 b7a4 9452 af9a 4f5d d45e 475b +17c6 10ea 399c 8449 60b2 6f35 abd4 11ac +9f29 b3e5 eaa1 77ec dfd5 d1d1 7514 010d +fa9e 9330 1ac4 c4ab 4e49 fd61 0ad5 d962 +5862 b443 1953 1726 388a a3d9 acec cb82 +092d 07e0 bb85 177b 3e98 2849 46fa c377 +73b2 9215 3a15 1ea4 8107 c9b0 4403 e5ac +8112 121b 8c6f de41 15be 8c5d 6495 e7d6 +6d59 ecf3 1e64 807f 4a8d 4096 76d9 d346 +70f0 0bf6 8fea e8b3 57a4 905b ee3a ca4a +1a66 a0c4 b841 ea49 37b9 411c 51cd b3c0 +d82d dad2 5fce fa30 47a6 02dc 58d8 396d +5877 e979 fbcc c6c6 e57e b70e 0d37 2edf +1d71 fdd5 73f6 afea e8ce 911a 14f9 9608 +aff4 df82 230b 98a7 6148 5896 7305 c149 +1a51 0f4a 0f50 023c 925d 5933 45bc 7b7f +fbdd 5bde 7fee 6d83 299e ff61 643d 73e6 +5e83 29a0 254d 8e2d 2d1b 4c91 95e8 5f32 +fbdb eb24 95b6 bb42 1453 05c6 ab74 a19e +18c6 16df b7cf ad43 aaa6 2a45 1677 ad0b +14cd 1910 930d 54d7 6aaf d7d1 f448 dd79 +6c4b b5f8 8ea1 ac91 23e0 6315 6360 e4e6 +6174 406d 5e1f 12e8 2768 44a0 7905 3e51 +005c 3bbb c7fe 9359 7ea2 58f8 1d45 007c +78d5 fcc6 83f9 2adc be5c 8638 8db2 f4c9 +de55 6043 0e54 a358 f634 3ac3 3c16 2709 +a498 7168 ad2a 8d67 a8eb 196d b379 ad0a +c65a c38a d1b0 6b0c 09f7 6376 17dd ba81 +2285 b0b6 598e 8629 50f0 1a0a ab1f 6f31 +ea2c 4b03 ea14 6df2 88ee f3e6 c1ee 1acb +272b 4db5 1c80 2732 8919 681a 996d 1029 +88c6 51e5 d1a9 613d c215 46a3 6137 09fa +7459 c304 0303 9967 aa68 7d22 15be 9175 +55f7 5426 a5d9 6159 9739 a678 66e4 c474 +061d 2c69 d24d 4005 5433 c72b 80ca f6b3 +10a4 d159 e60b c821 dd1d 98a1 7ed3 fe6b +dd98 c94c 0d0a 4daf d58f 0f90 952f 6868 +8268 843e fc45 c9f0 f238 76e3 3061 8017 +9ecd 5dba 5da1 2b09 140d 4fd2 0e14 439c +bfee c284 67df f246 0adc 0350 ebab 02a9 +9b2b 7559 9003 5887 1fd3 5518 ff65 8b11 +a75c b223 398a 81e7 d5ed d6e6 f183 0b6e +3628 eb7d 2042 2ace 5279 1597 9124 7f0b +fbdd 3acc 1e0d 7dc4 da7a e44e 0e43 e2b6 +1c19 ab27 860c 8933 f6e0 9038 3304 7dad +214d 706b 4813 dcb2 9b4f d781 900b 23b6 +1c91 36dc a5f6 eff9 af0c aaff 06f1 48e5 +4433 2000 00 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/test.gz juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/test.gz --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/test.gz 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/test.gz 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,163 @@ +1f8b 0808 2aa1 c754 0003 7465 7874 00b5 +5b5d 939b 3816 7def 5fa1 ea79 99a9 4d3c +601b db3c f4c3 5426 55f3 309b ca6e 7ab7 +6a9e 281a d436 150c 04c4 a4bd 5df3 dff7 +4a88 361f 025d 094f 1e92 34e8 1cae 8ea4 +ab7b 04fd 03f9 ede3 ef9f c989 b122 28e9 +b79a 562c 88eb 3264 499e 05e7 242a f38a +4679 1657 e4f1 44c9 6f8f 8f9f 896c 46d2 +90d1 2c4a 6845 928c 749b aeee 7e20 8f7f +7cfe 8861 adea f339 2c2f 77fa a6af a730 +8b53 5a3e dcff 7cff ee5b 1d66 2c49 e9c3 +bdb3 f2ee ff22 ce12 027f 3101 9621 80ee +7659 90a8 28af 3366 8eeb 2042 f887 558b +7553 d158 a8a7 a4b1 d450 7259 2a69 84ee +e28a e4e7 3365 6512 dd40 d429 2e1b 6527 +b96c e5ed 10da 6a6c 4c31 0043 cbf2 7213 +9915 4c96 22ab 9816 48dc d02d 10d8 8440 +050d ca30 3bd2 db89 ace2 5b22 b592 6fa9 +e092 74a9 ec46 3403 0216 9647 7a8b cc3c +c565 29ba 9a6b 81e0 2de1 02b1 cd28 3a60 +f8b9 ca53 5a2d 2f1c 2698 2c44 9e62 b294 +f84a 6729 b029 4107 7a2c c3e2 b458 5a05 +8b85 ac2a 164b 491b 2a4b 394d c01d d889 +86c5 6225 c724 1642 2a48 2c75 144c 9632 +1a60 3ba8 8ac1 ed68 f96a 57f2 5868 a9e6 +b194 b325 b354 d40c 7e05 1665 0e45 dc89 +d68a bdca dd38 fbd5 7aef dd84 90cb e21e +bcc3 6ab7 59df 8690 336e 9cc3 7eb5 396c +8df5 eeb0 425c 7bff 70d8 ad3c 47fe 712d +46a0 4fe8 fa60 96c7 16bc 4afe 4783 a70b +a30a dfcd ef09 cf2d eeab cd76 07af 74d8 +d7fb 26b6 1a81 524c 6a0c 6a16 a675 cd9d +a67a abac 0c07 e98f d158 ac0c 5827 3c29 +c694 819d 9144 0fb1 34ba 6604 6889 4c2c +edb4 4e73 2674 4e2c 1cce cab1 9ac0 4dd4 +427a d359 ad26 fca4 4629 2d6a 81f5 3427 +31d6 0c6b 32f5 ca4d 5942 8c7e 7aac a587 +3423 3051 0fed 1667 959b f477 1ad5 1038 +2b33 6802 c7aa 6560 fb26 b59a b16a 334a +a150 c6ae 0e0b c5ea 83f4 6f93 da4c f8ae +195d b408 537b 8644 6215 c119 b149 41d4 +0e6a 460f 1dc0 c267 e1c1 5851 d08e 6a52 +9749 1f34 230d 0283 334c 6bdf b527 f017 +1368 1866 0cd0 66bb 3d1c b07a 619c 4e15 +b09c 8529 7914 7f67 f5f9 8996 247f ee39 +9e8a 9cc3 982a 8d4e 0b17 4fa6 e59d e2de +6b94 c7d0 edb5 e3dc bf53 4ac3 ff93 c70f +f7b0 8728 e3ac 0ac8 9c74 c292 3537 359e +6ccc 3030 65a3 0638 5786 87f9 96b0 79dc +8c31 1bb7 9d73 6673 1169 ad99 2918 ad85 +de9c e914 195b 2dbd 2e08 8cb1 3fb3 62c0 +eb84 7368 5ab1 d456 0ba1 1812 6868 d22c +f046 9269 6d1a 46b0 91e3 c2c9 a587 5939 +356b 1673 e1f4 5e0d 2ddf d870 1988 8800 +1bdb 352b 0623 0911 860d 239f c279 e1a4 +c300 0d3d 9b05 1e2d 19ca b5e9 0453 1a30 +bd5c 3898 8171 33c4 a245 d25a 379d 4023 +27a6 1747 0fc1 bb37 3328 5a16 9d7f d3a9 +32f4 637a 51b4 0823 0b67 8c46 2b83 3071 +3a71 148e 4caf 0f06 84f4 71ce d65f 4021 +7c98 e31d 9650 341c bb2d 52b1 9e27 5b6f +f79d 7758 5ae1 a6fc 1c5c 8f68 05cd 8b3a +685f 7a75 5d5d 5d81 a703 1252 5d2a 46cf +e4c3 e7ff 1096 9cc1 3515 3463 dc35 0d3f +1c9d 666c 8dde 740b 1819 6f18 d931 2ff3 +9a25 1938 af4f 6f16 b373 919d 4246 a2ba +2c21 9ef4 42e8 4b52 b151 309d f6c7 b03e +d23b c58d bd33 7cf4 397c 099e e38a fc33 +7c49 cef5 b963 7173 e83d 7986 7124 31ad +a232 2958 5e8e 2568 f1fd 47b6 570f aebf +1e3e 91f3 8a9b 9f0c 1ff5 06ec 3feb edf2 +7a34 e230 6992 1834 0bce f49c 432d d498 +db7f cbab a4b9 2acc f1d8 1bcf 73f4 4350 +b7f1 569b c3de f1fc 35fd 87b3 1f86 068b +bc64 019f 66ed fc20 5ff8 a566 e681 2630 +91db c610 6116 5152 67c9 0ba1 451e 9de6 +e6a4 82b8 1fac a281 bbda aed7 9bdd c1df +1e36 3b88 7624 e49f 49c9 ea30 edf7 efbf +cd45 9c8c 4a86 7e60 ca26 de6a eb6e f707 +dfe5 2a1e 3a71 c9a5 1ec4 1974 290e d23c +ff5a 17c1 7398 a435 0c47 bbc0 41c4 eb8c +fef5 d397 f75f 7e25 4d53 d236 ed86 8a22 +edac 7154 7b47 1735 225a 7d94 d8e8 da76 +7b45 54f4 cf30 ad43 587c dd4f 05d2 34e9 +7e63 dfde 21cf 3964 cd34 2512 0497 2051 +e590 9c68 5433 aa8a 5747 df9e 3ae1 21af +ddbd c671 c596 698b f696 a017 81c5 2725 +d660 5334 df70 89bb 3641 8839 45d6 1bc5 +9449 f308 966c 05d8 f048 83e8 44a3 af45 +9e64 0c33 837e 14bf 9871 bdfb 1349 20ff +c12c e5f3 e84a 0549 e5bd cc31 f218 45ec +d650 46c6 d0aa cebe 2a17 8761 606f a9c8 +12af 5ae4 430a 0815 76ab ee6a 6783 6365 +d186 6f87 a55c 504f 17be 1124 2561 9742 +b9a6 e69f a148 06b3 8057 fe98 87fb a8a4 +21e3 8706 9e7f 30c5 42ec 1594 27e2 6ba4 +ad31 38c9 00e8 af1d 5320 2bc3 ace2 27e9 +00df ba9e 29bc ceae 4fd6 8d63 92c5 5080 +65c7 e029 64d1 2968 7ecd e8d2 9f0d ff92 +0bb4 1259 5234 242d 6ef8 8b49 5798 7e7c +31cf 5664 5163 92f9 dcb6 8cce bf31 dd72 +3e91 1117 5234 29d2 359d 3dcd 8b99 fe74 +799b 28cd bc69 9afc 784d 126d 1284 95d6 +34f9 c978 e234 9ca6 3345 a046 5363 bd00 +ef2f c55b 1088 d136 c518 0fef b79a d690 +6dc2 228c 1276 11c9 feed 0759 ddbf 8db3 +686b 3086 036e cdd6 3505 7377 fc7b 53c3 +0ea5 343b b2d3 a052 6d27 e4f7 3061 bc3f +b07b 3fc9 eed1 d8b8 5ff2 1166 bd92 204c +f63e 5270 f971 5085 e722 a573 9bb1 6c41 +5a08 a627 4a72 ed2e 3c81 db38 dbbd bee6 +4a32 a8de 9238 284a 9ae6 613c 7a73 ade8 +996c 7a7d 815d d267 5a96 72ec 4292 e5d9 +7b71 c8c0 5d72 454b d8ab 5640 9480 16bc +f6e2 439b 444d 0dc7 dd7b cd62 4889 316c +6c4f 3495 e38e dacc 6603 47a8 368b d7cf +0569 3445 49c0 0f1e 9af2 549e b38c aab2 +ced1 84d8 b805 58df cbf1 4334 337b 0c70 +1dcf 37ea cc6c 473a d1bf 03b7 16a5 75cc +073e 4af3 8cb6 0535 94e6 2bba 6a7f f89e +b013 0c32 4c8c ab06 883d a71f 9141 af79 +8f11 8598 8434 f373 a2c7 f2a6 f978 4920 +2e6a d978 bbd6 e753 591e 778a 88ce 6f9b +ffd2 6ec9 3cf4 6b99 c88b 0289 e323 4543 +a80a 8450 fade cc3e 4ebb ffcf a147 75c0 +c659 6df6 fb1b 9035 47c6 9b95 b7f1 6fc1 +26e8 76eb dd6a bbdb d8f1 3515 8303 c3bb +9af5 16b3 1cb2 82d8 e3a7 88a2 8490 9971 +5048 4800 b68e 98e0 d74c f509 14ac 54d3 +1e75 6a88 c914 d596 12b0 7017 f710 5750 +2831 fa24 d42c 7d8d ad97 f9c1 ded7 8f9e +a2dd 1c87 88a1 b39f 2980 27a0 e730 8147 +6661 16f1 ad57 a63e f1a6 4521 5296 b3e4 +59d6 0895 daa7 fede 5c24 df7a e6a7 a299 +d88e c467 46a4 4703 1e28 e787 41ed 8e15 +9779 51c0 96d5 6ba4 dc97 10d1 2872 a11e +356f 930d f123 1f6b 8ab7 2018 3b5f 04a6 +c964 aaa5 d107 232c 906a 9427 d7f8 2cfb +6875 cfb6 761d 6cf8 4ac3 a30a 5b66 2aa3 +e8a7 32d3 4c5b 55dc 659d d2e0 7a0c 8f3e +bc27 1ca8 39b3 c771 2b56 0f0a f82a 5a35 +f945 880a eb5a f5ae fff6 bca3 c572 2bde +d189 048a 58bc 0557 91ff 3538 aac7 b135 +6fc6 27f8 fa25 8c71 bf4b b854 c67f c340 +4d10 2f1f a929 62f1 8bb7 8b87 eaca 0eda +9a4b 3b1e ab1e a1eb 2116 bce2 ade7 b004 +114b fd0a 997d fba9 a157 d41e 1a84 2a69 +b547 1d83 ccfc 61b0 4388 db22 5dd5 d9f7 +3261 b01f b507 33aa d027 5847 1976 a2dd +d6f1 77da 5865 26fe 30aa 5d13 46cf fd8d +6022 70f2 915b 38de 1cc4 3c17 25cc 854a +bc4b 6d8f 9ce8 4b01 c621 e665 22b8 72d2 +7c8e 48c2 4afc d41c b7c1 08c2 34ba 48a7 +de1e c149 d580 07f6 2bf8 4b59 0e29 bba3 +9168 66fb 69a2 0b78 7558 c214 904d df3e +2ef8 2512 5f09 b4b7 a1f6 a5ec 3be5 6a44 +6558 a887 5143 a9d8 6ee6 11af edf5 877b +d71b 7ca2 245e 1bbb db1b 9179 3724 f346 +19c5 9ecb bf25 9729 9948 997d 42fe 7ad0 +84a1 c992 238e b55d 8f54 53c0 b90d d568 +1fb4 a6ba 1dd3 e813 017b 2643 aae1 c8f3 +41f3 168d 7bf3 71df feee ff2d f9e8 431a +5200 00 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/text juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/text --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/testdata/text 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/testdata/text 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,322 @@ +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="/",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/"} 0 +http_request_duration_microseconds_count{handler="/"} 0 +http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/alerts"} 0 +http_request_duration_microseconds_count{handler="/alerts"} 0 +http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/metrics"} 0 +http_request_duration_microseconds_count{handler="/api/metrics"} 0 +http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/query"} 0 +http_request_duration_microseconds_count{handler="/api/query"} 0 +http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/query_range"} 0 +http_request_duration_microseconds_count{handler="/api/query_range"} 0 +http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/targets"} 0 +http_request_duration_microseconds_count{handler="/api/targets"} 0 +http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/consoles/"} 0 +http_request_duration_microseconds_count{handler="/consoles/"} 0 +http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/graph"} 0 +http_request_duration_microseconds_count{handler="/graph"} 0 +http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/heap"} 0 +http_request_duration_microseconds_count{handler="/heap"} 0 +http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/static/"} 0 +http_request_duration_microseconds_count{handler="/static/"} 0 +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384 +http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001 +http_request_duration_microseconds_count{handler="prometheus"} 119 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="/",quantile="0.5"} 0 +http_request_size_bytes{handler="/",quantile="0.9"} 0 +http_request_size_bytes{handler="/",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/"} 0 +http_request_size_bytes_count{handler="/"} 0 +http_request_size_bytes{handler="/alerts",quantile="0.5"} 0 +http_request_size_bytes{handler="/alerts",quantile="0.9"} 0 +http_request_size_bytes{handler="/alerts",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/alerts"} 0 +http_request_size_bytes_count{handler="/alerts"} 0 +http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/metrics"} 0 +http_request_size_bytes_count{handler="/api/metrics"} 0 +http_request_size_bytes{handler="/api/query",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/query",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/query",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/query"} 0 +http_request_size_bytes_count{handler="/api/query"} 0 +http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/query_range"} 0 +http_request_size_bytes_count{handler="/api/query_range"} 0 +http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/targets"} 0 +http_request_size_bytes_count{handler="/api/targets"} 0 +http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0 +http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0 +http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/consoles/"} 0 +http_request_size_bytes_count{handler="/consoles/"} 0 +http_request_size_bytes{handler="/graph",quantile="0.5"} 0 +http_request_size_bytes{handler="/graph",quantile="0.9"} 0 +http_request_size_bytes{handler="/graph",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/graph"} 0 +http_request_size_bytes_count{handler="/graph"} 0 +http_request_size_bytes{handler="/heap",quantile="0.5"} 0 +http_request_size_bytes{handler="/heap",quantile="0.9"} 0 +http_request_size_bytes{handler="/heap",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/heap"} 0 +http_request_size_bytes_count{handler="/heap"} 0 +http_request_size_bytes{handler="/static/",quantile="0.5"} 0 +http_request_size_bytes{handler="/static/",quantile="0.9"} 0 +http_request_size_bytes{handler="/static/",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/static/"} 0 +http_request_size_bytes_count{handler="/static/"} 0 +http_request_size_bytes{handler="prometheus",quantile="0.5"} 291 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 291 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 291 +http_request_size_bytes_sum{handler="prometheus"} 34488 +http_request_size_bytes_count{handler="prometheus"} 119 +# HELP http_requests_total Total number of HTTP requests made. +# TYPE http_requests_total counter +http_requests_total{code="200",handler="prometheus",method="get"} 119 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="/",quantile="0.5"} 0 +http_response_size_bytes{handler="/",quantile="0.9"} 0 +http_response_size_bytes{handler="/",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/"} 0 +http_response_size_bytes_count{handler="/"} 0 +http_response_size_bytes{handler="/alerts",quantile="0.5"} 0 +http_response_size_bytes{handler="/alerts",quantile="0.9"} 0 +http_response_size_bytes{handler="/alerts",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/alerts"} 0 +http_response_size_bytes_count{handler="/alerts"} 0 +http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/metrics"} 0 +http_response_size_bytes_count{handler="/api/metrics"} 0 +http_response_size_bytes{handler="/api/query",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/query",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/query",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/query"} 0 +http_response_size_bytes_count{handler="/api/query"} 0 +http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/query_range"} 0 +http_response_size_bytes_count{handler="/api/query_range"} 0 +http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/targets"} 0 +http_response_size_bytes_count{handler="/api/targets"} 0 +http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0 +http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0 +http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/consoles/"} 0 +http_response_size_bytes_count{handler="/consoles/"} 0 +http_response_size_bytes{handler="/graph",quantile="0.5"} 0 +http_response_size_bytes{handler="/graph",quantile="0.9"} 0 +http_response_size_bytes{handler="/graph",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/graph"} 0 +http_response_size_bytes_count{handler="/graph"} 0 +http_response_size_bytes{handler="/heap",quantile="0.5"} 0 +http_response_size_bytes{handler="/heap",quantile="0.9"} 0 +http_response_size_bytes{handler="/heap",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/heap"} 0 +http_response_size_bytes_count{handler="/heap"} 0 +http_response_size_bytes{handler="/static/",quantile="0.5"} 0 +http_response_size_bytes{handler="/static/",quantile="0.9"} 0 +http_response_size_bytes{handler="/static/",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/static/"} 0 +http_response_size_bytes_count{handler="/static/"} 0 +http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064 +http_response_size_bytes_sum{handler="prometheus"} 247001 +http_response_size_bytes_count{handler="prometheus"} 119 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 0.55 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 70 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 8192 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 29 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 5.3870592e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.42236894836e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 5.41478912e+08 +# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures. +# TYPE prometheus_dns_sd_lookup_failures_total counter +prometheus_dns_sd_lookup_failures_total 0 +# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups. +# TYPE prometheus_dns_sd_lookups_total counter +prometheus_dns_sd_lookups_total 7 +# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute. +# TYPE prometheus_evaluator_duration_milliseconds summary +prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0 +prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0 +prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0 +prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1 +prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1 +prometheus_evaluator_duration_milliseconds_sum 12 +prometheus_evaluator_duration_milliseconds_count 23 +# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks. +# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge +prometheus_local_storage_checkpoint_duration_milliseconds 0 +# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type. +# TYPE prometheus_local_storage_chunk_ops_total counter +prometheus_local_storage_chunk_ops_total{type="create"} 598 +prometheus_local_storage_chunk_ops_total{type="persist"} 174 +prometheus_local_storage_chunk_ops_total{type="pin"} 920 +prometheus_local_storage_chunk_ops_total{type="transcode"} 415 +prometheus_local_storage_chunk_ops_total{type="unpin"} 920 +# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds. +# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary +prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0 +prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0 +prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0 +prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0 +prometheus_local_storage_indexing_batch_latency_milliseconds_count 1 +# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch). +# TYPE prometheus_local_storage_indexing_batch_sizes summary +prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2 +prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2 +prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2 +prometheus_local_storage_indexing_batch_sizes_sum 2 +prometheus_local_storage_indexing_batch_sizes_count 1 +# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue. +# TYPE prometheus_local_storage_indexing_queue_capacity gauge +prometheus_local_storage_indexing_queue_capacity 16384 +# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed. +# TYPE prometheus_local_storage_indexing_queue_length gauge +prometheus_local_storage_indexing_queue_length 0 +# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested. +# TYPE prometheus_local_storage_ingested_samples_total counter +prometheus_local_storage_ingested_samples_total 30473 +# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes. +# TYPE prometheus_local_storage_invalid_preload_requests_total counter +prometheus_local_storage_invalid_preload_requests_total 0 +# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory. +# TYPE prometheus_local_storage_memory_chunkdescs gauge +prometheus_local_storage_memory_chunkdescs 1059 +# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor). +# TYPE prometheus_local_storage_memory_chunks gauge +prometheus_local_storage_memory_chunks 1020 +# HELP prometheus_local_storage_memory_series The current number of series in memory. +# TYPE prometheus_local_storage_memory_series gauge +prometheus_local_storage_memory_series 424 +# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk. +# TYPE prometheus_local_storage_persist_latency_microseconds summary +prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377 +prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539 +prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463 +prometheus_local_storage_persist_latency_microseconds_sum 20424.415 +prometheus_local_storage_persist_latency_microseconds_count 174 +# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue. +# TYPE prometheus_local_storage_persist_queue_capacity gauge +prometheus_local_storage_persist_queue_capacity 1024 +# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue. +# TYPE prometheus_local_storage_persist_queue_length gauge +prometheus_local_storage_persist_queue_length 0 +# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type. +# TYPE prometheus_local_storage_series_ops_total counter +prometheus_local_storage_series_ops_total{type="create"} 2 +prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11 +# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications). +# TYPE prometheus_notifications_latency_milliseconds summary +prometheus_notifications_latency_milliseconds{quantile="0.5"} 0 +prometheus_notifications_latency_milliseconds{quantile="0.9"} 0 +prometheus_notifications_latency_milliseconds{quantile="0.99"} 0 +prometheus_notifications_latency_milliseconds_sum 0 +prometheus_notifications_latency_milliseconds_count 0 +# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. +# TYPE prometheus_notifications_queue_capacity gauge +prometheus_notifications_queue_capacity 100 +# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. +# TYPE prometheus_notifications_queue_length gauge +prometheus_notifications_queue_length 0 +# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute. +# TYPE prometheus_rule_evaluation_duration_milliseconds summary +prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2 +prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12 +prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115 +prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3 +prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15 +prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115 +# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. +# TYPE prometheus_rule_evaluation_failures_total counter +prometheus_rule_evaluation_failures_total 0 +# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples. +# TYPE prometheus_samples_queue_capacity gauge +prometheus_samples_queue_capacity 4096 +# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name). +# TYPE prometheus_samples_queue_length gauge +prometheus_samples_queue_length 0 +# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. +# TYPE prometheus_target_interval_length_seconds summary +prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15 +prometheus_target_interval_length_seconds_sum{interval="15s"} 175 +prometheus_target_interval_length_seconds_count{interval="15s"} 12 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1 +prometheus_target_interval_length_seconds_sum{interval="1s"} 55 +prometheus_target_interval_length_seconds_count{interval="1s"} 117 diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/text_create.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/text_create.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/text_create.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/text_create.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,305 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "fmt" + "io" + "math" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. This function does not perform checks on the +// content of the metric and label names, i.e. invalid metric or label names +// will result in invalid text format output. +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { + var written int + + // Fail-fast checks. + if len(in.Metric) == 0 { + return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return written, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err := fmt.Fprintf( + out, "# HELP %s %s\n", + name, escapeString(*in.Help, false), + ) + written += n + if err != nil { + return written, err + } + } + metricType := in.GetType() + n, err := fmt.Fprintf( + out, "# TYPE %s %s\n", + name, strings.ToLower(metricType.String()), + ) + written += n + if err != nil { + return written, err + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Counter.GetValue(), + out, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Gauge.GetValue(), + out, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Untyped.GetValue(), + out, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + name, metric, + model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + q.GetValue(), + out, + ) + written += n + if err != nil { + return written, err + } + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Summary.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Summary.GetSampleCount()), + out, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, q := range metric.Histogram.Bucket { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, fmt.Sprint(q.GetUpperBound()), + float64(q.GetCumulativeCount()), + out, + ) + written += n + if err != nil { + return written, err + } + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, "+Inf", + float64(metric.Histogram.GetSampleCount()), + out, + ) + if err != nil { + return written, err + } + written += n + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Histogram.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Histogram.GetSampleCount()), + out, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeSample writes a single sample in text format to out, given the metric +// name, the metric proto message itself, optionally an additional label name +// and value (use empty strings if not required), and the value. The function +// returns the number of bytes written and any error encountered. +func writeSample( + name string, + metric *dto.Metric, + additionalLabelName, additionalLabelValue string, + value float64, + out io.Writer, +) (int, error) { + var written int + n, err := fmt.Fprint(out, name) + written += n + if err != nil { + return written, err + } + n, err = labelPairsToText( + metric.Label, + additionalLabelName, additionalLabelValue, + out, + ) + written += n + if err != nil { + return written, err + } + n, err = fmt.Fprintf(out, " %v", value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + n, err = out.Write([]byte{'\n'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// labelPairsToText converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'out'. An empty slice in combination with an +// empty string 'additionalLabelName' results in nothing being +// written. Otherwise, the label pairs are written, escaped as required by the +// text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. +func labelPairsToText( + in []*dto.LabelPair, + additionalLabelName, additionalLabelValue string, + out io.Writer, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var written int + separator := '{' + for _, lp := range in { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, lp.GetName(), escapeString(lp.GetValue(), true), + ) + written += n + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, additionalLabelName, + escapeString(additionalLabelValue, true), + ) + written += n + if err != nil { + return written, err + } + } + n, err := out.Write([]byte{'}'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// escapeString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +func escapeString(v string, includeDoubleQuote bool) string { + result := bytes.NewBuffer(make([]byte, 0, len(v))) + for _, c := range v { + switch { + case c == '\\': + result.WriteString(`\\`) + case includeDoubleQuote && c == '"': + result.WriteString(`\"`) + case c == '\n': + result.WriteString(`\n`) + default: + result.WriteRune(c) + } + } + return result.String() +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/text_create_test.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/text_create_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/text_create_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/text_create_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,443 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +func testCreate(t testing.TB) { + var scenarios = []struct { + in *dto.MetricFamily + out string + }{ + // 0: Counter, NaN as value, timestamp given. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("two-line\n doc str\\ing"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(math.NaN()), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(.23), + }, + TimestampMs: proto.Int64(1234567890), + }, + }, + }, + out: `# HELP name two-line\n doc str\\ing +# TYPE name counter +name{labelname="val1",basename="basevalue"} NaN +name{labelname="val2",basename="basevalue"} 0.23 1234567890 +`, + }, + // 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values. + { + in: &dto.MetricFamily{ + Name: proto.String("gauge_name"), + Help: proto.String("gauge\ndoc\nstr\"ing"), + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("val with\nnew line"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("val with \\backslash and \"quotes\""), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(+1)), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("Björn"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("ä½–ä½¥"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(3.14E42), + }, + }, + }, + }, + out: `# HELP gauge_name gauge\ndoc\nstr"ing +# TYPE gauge_name gauge +gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf +gauge_name{name_1="Björn",name_2="ä½–ä½¥"} 3.14e+42 +`, + }, + // 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label. + { + in: &dto.MetricFamily{ + Name: proto.String("untyped_name"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("value 1"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(-1.23e-45), + }, + }, + }, + }, + out: `# TYPE untyped_name untyped +untyped_name -Inf +untyped_name{name_1="value 1"} -1.23e-45 +`, + }, + // 3: Summary. + { + in: &dto.MetricFamily{ + Name: proto.String("summary_name"), + Help: proto.String("summary docstring"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Summary: &dto.Summary{ + SampleCount: proto.Uint64(42), + SampleSum: proto.Float64(-3.4567), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(-1.23), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(.2342354), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.99), + Value: proto.Float64(0), + }, + }, + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("value 1"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("value 2"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(4711), + SampleSum: proto.Float64(2010.1971), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(1), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(2), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.99), + Value: proto.Float64(3), + }, + }, + }, + }, + }, + }, + out: `# HELP summary_name summary docstring +# TYPE summary_name summary +summary_name{quantile="0.5"} -1.23 +summary_name{quantile="0.9"} 0.2342354 +summary_name{quantile="0.99"} 0 +summary_name_sum -3.4567 +summary_name_count 42 +summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1 +summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2 +summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3 +summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971 +summary_name_count{name_1="value 1",name_2="value 2"} 4711 +`, + }, + // 4: Histogram + { + in: &dto.MetricFamily{ + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + &dto.Bucket{ + UpperBound: proto.Float64(math.Inf(+1)), + CumulativeCount: proto.Uint64(2693), + }, + }, + }, + }, + }, + }, + out: `# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + }, + // 5: Histogram with missing +Inf bucket. + { + in: &dto.MetricFamily{ + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + }, + }, + }, + }, + }, + out: `# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + }, + // 6: No metric type, should result in default type Counter. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Metric: []*dto.Metric{ + &dto.Metric{ + Counter: &dto.Counter{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + out: `# HELP name doc string +# TYPE name counter +name -Inf +`, + }, + } + + for i, scenario := range scenarios { + out := bytes.NewBuffer(make([]byte, 0, len(scenario.out))) + n, err := MetricFamilyToText(out, scenario.in) + if err != nil { + t.Errorf("%d. error: %s", i, err) + continue + } + if expected, got := len(scenario.out), n; expected != got { + t.Errorf( + "%d. expected %d bytes written, got %d", + i, expected, got, + ) + } + if expected, got := scenario.out, out.String(); expected != got { + t.Errorf( + "%d. expected out=%q, got %q", + i, expected, got, + ) + } + } + +} + +func TestCreate(t *testing.T) { + testCreate(t) +} + +func BenchmarkCreate(b *testing.B) { + for i := 0; i < b.N; i++ { + testCreate(b) + } +} + +func testCreateError(t testing.TB) { + var scenarios = []struct { + in *dto.MetricFamily + err string + }{ + // 0: No metric. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{}, + }, + err: "MetricFamily has no metrics", + }, + // 1: No metric name. + { + in: &dto.MetricFamily{ + Help: proto.String("doc string"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + err: "MetricFamily has no name", + }, + // 2: Wrong type. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + err: "expected counter in metric", + }, + } + + for i, scenario := range scenarios { + var out bytes.Buffer + _, err := MetricFamilyToText(&out, scenario.in) + if err == nil { + t.Errorf("%d. expected error, got nil", i) + continue + } + if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { + t.Errorf( + "%d. expected error starting with %q, got %q", + i, expected, got, + ) + } + } + +} + +func TestCreateError(t *testing.T) { + testCreateError(t) +} + +func BenchmarkCreateError(b *testing.B) { + for i := 0; i < b.N; i++ { + testCreateError(b) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/text_parse.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/text_parse.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/text_parse.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/text_parse.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,753 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// nil value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All +// other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/text_parse_test.go juju-core-2.0.0/src/github.com/prometheus/common/expfmt/text_parse_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/expfmt/text_parse_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/expfmt/text_parse_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,588 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" +) + +func testTextParse(t testing.TB) { + var scenarios = []struct { + in string + out []*dto.MetricFamily + }{ + // 0: Empty lines as input. + { + in: ` + +`, + out: []*dto.MetricFamily{}, + }, + // 1: Minimal case. + { + in: ` +minimal_metric 1.234 +another_metric -3e3 103948 +# Even that: +no_labels{} 3 +# HELP line for non-existing metric will be ignored. +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("minimal_metric"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(1.234), + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("another_metric"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(-3e3), + }, + TimestampMs: proto.Int64(103948), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("no_labels"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(3), + }, + }, + }, + }, + }, + }, + // 2: Counters & gauges, docstrings, various whitespace, escape sequences. + { + in: ` +# A normal comment. +# +# TYPE name counter +name{labelname="val1",basename="basevalue"} NaN +name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890 +# HELP name two-line\n doc str\\ing + + # HELP name2 doc str"ing 2 + # TYPE name2 gauge +name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321 +name2{ labelname = "val1" , }-Inf +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("two-line\n doc str\\ing"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(math.NaN()), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("base\"v\\al\nue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(.23), + }, + TimestampMs: proto.Int64(1234567890), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("name2"), + Help: proto.String("doc str\"ing 2"), + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue2"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(+1)), + }, + TimestampMs: proto.Int64(54321), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + }, + }, + // 3: The evil summary, mixed with other types and funny comments. + { + in: ` +# TYPE my_summary summary +my_summary{n1="val1",quantile="0.5"} 110 +decoy -1 -2 +my_summary{n1="val1",quantile="0.9"} 140 1 +my_summary_count{n1="val1"} 42 +# Latest timestamp wins in case of a summary. +my_summary_sum{n1="val1"} 4711 2 +fake_sum{n1="val1"} 2001 +# TYPE another_summary summary +another_summary_count{n2="val2",n1="val1"} 20 +my_summary_count{n2="val2",n1="val1"} 5 5 +another_summary{n1="val1",n2="val2",quantile=".3"} -1.2 +my_summary_sum{n1="val2"} 08 15 +my_summary{n1="val3", quantile="0.2"} 4711 + my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN +# some +# funny comments +# HELP +# HELP +# HELP my_summary +# HELP my_summary +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("fake_sum"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(2001), + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("decoy"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(-1), + }, + TimestampMs: proto.Int64(-2), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("my_summary"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(42), + SampleSum: proto.Float64(4711), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(110), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(140), + }, + }, + }, + TimestampMs: proto.Int64(2), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n2"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(5), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(-12.34), + Value: proto.Float64(math.NaN()), + }, + }, + }, + TimestampMs: proto.Int64(5), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val2"), + }, + }, + Summary: &dto.Summary{ + SampleSum: proto.Float64(8), + }, + TimestampMs: proto.Int64(15), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val3"), + }, + }, + Summary: &dto.Summary{ + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.2), + Value: proto.Float64(4711), + }, + }, + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("another_summary"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n2"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(20), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.3), + Value: proto.Float64(-1.2), + }, + }, + }, + }, + }, + }, + }, + }, + // 4: The histogram. + { + in: ` +# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + out: []*dto.MetricFamily{ + { + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + &dto.Bucket{ + UpperBound: proto.Float64(math.Inf(+1)), + CumulativeCount: proto.Uint64(2693), + }, + }, + }, + }, + }, + }, + }, + }, + } + + for i, scenario := range scenarios { + out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) + if err != nil { + t.Errorf("%d. error: %s", i, err) + continue + } + if expected, got := len(scenario.out), len(out); expected != got { + t.Errorf( + "%d. expected %d MetricFamilies, got %d", + i, expected, got, + ) + } + for _, expected := range scenario.out { + got, ok := out[expected.GetName()] + if !ok { + t.Errorf( + "%d. expected MetricFamily %q, found none", + i, expected.GetName(), + ) + continue + } + if expected.String() != got.String() { + t.Errorf( + "%d. expected MetricFamily %s, got %s", + i, expected, got, + ) + } + } + } +} + +func TestTextParse(t *testing.T) { + testTextParse(t) +} + +func BenchmarkTextParse(b *testing.B) { + for i := 0; i < b.N; i++ { + testTextParse(b) + } +} + +func testTextParseError(t testing.TB) { + var scenarios = []struct { + in string + err string + }{ + // 0: No new-line at end of input. + { + in: ` +bla 3.14 +blubber 42`, + err: "text format parsing error in line 3: unexpected end of input stream", + }, + // 1: Invalid escape sequence in label value. + { + in: `metric{label="\t"} 3.14`, + err: "text format parsing error in line 1: invalid escape sequence", + }, + // 2: Newline in label value. + { + in: ` +metric{label="new +line"} 3.14 +`, + err: `text format parsing error in line 2: label value "new" contains unescaped new-line`, + }, + // 3: + { + in: `metric{@="bla"} 3.14`, + err: "text format parsing error in line 1: invalid label name for metric", + }, + // 4: + { + in: `metric{__name__="bla"} 3.14`, + err: `text format parsing error in line 1: label name "__name__" is reserved`, + }, + // 5: + { + in: `metric{label+="bla"} 3.14`, + err: "text format parsing error in line 1: expected '=' after label name", + }, + // 6: + { + in: `metric{label=bla} 3.14`, + err: "text format parsing error in line 1: expected '\"' at start of label value", + }, + // 7: + { + in: ` +# TYPE metric summary +metric{quantile="bla"} 3.14 +`, + err: "text format parsing error in line 3: expected float as value for 'quantile' label", + }, + // 8: + { + in: `metric{label="bla"+} 3.14`, + err: "text format parsing error in line 1: unexpected end of label value", + }, + // 9: + { + in: `metric{label="bla"} 3.14 2.72 +`, + err: "text format parsing error in line 1: expected integer as timestamp", + }, + // 10: + { + in: `metric{label="bla"} 3.14 2 3 +`, + err: "text format parsing error in line 1: spurious string after timestamp", + }, + // 11: + { + in: `metric{label="bla"} blubb +`, + err: "text format parsing error in line 1: expected float as value", + }, + // 12: + { + in: ` +# HELP metric one +# HELP metric two +`, + err: "text format parsing error in line 3: second HELP line for metric name", + }, + // 13: + { + in: ` +# TYPE metric counter +# TYPE metric untyped +`, + err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, + }, + // 14: + { + in: ` +metric 4.12 +# TYPE metric counter +`, + err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, + }, + // 14: + { + in: ` +# TYPE metric bla +`, + err: "text format parsing error in line 2: unknown metric type", + }, + // 15: + { + in: ` +# TYPE met-ric +`, + err: "text format parsing error in line 2: invalid metric name in comment", + }, + // 16: + { + in: `@invalidmetric{label="bla"} 3.14 2`, + err: "text format parsing error in line 1: invalid metric name", + }, + // 17: + { + in: `{label="bla"} 3.14 2`, + err: "text format parsing error in line 1: invalid metric name", + }, + // 18: + { + in: ` +# TYPE metric histogram +metric_bucket{le="bla"} 3.14 +`, + err: "text format parsing error in line 3: expected float as value for 'le' label", + }, + } + + for i, scenario := range scenarios { + _, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) + if err == nil { + t.Errorf("%d. expected error, got nil", i) + continue + } + if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { + t.Errorf( + "%d. expected error starting with %q, got %q", + i, expected, got, + ) + } + } + +} + +func TestTextParseError(t *testing.T) { + testTextParseError(t) +} + +func BenchmarkParseError(b *testing.B) { + for i := 0; i < b.N; i++ { + testTextParseError(b) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go juju-core-2.0.0/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,162 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go juju-core-2.0.0/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,33 @@ +package goautoneg + +import ( + "testing" +) + +var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5" + +func TestParseAccept(t *testing.T) { + alternatives := []string{"text/html", "image/png"} + content_type := Negotiate(chrome, alternatives) + if content_type != "image/png" { + t.Errorf("got %s expected image/png", content_type) + } + + alternatives = []string{"text/html", "text/plain", "text/n3"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/html" { + t.Errorf("got %s expected text/html", content_type) + } + + alternatives = []string{"text/n3", "text/plain"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/plain" { + t.Errorf("got %s expected text/plain", content_type) + } + + alternatives = []string{"text/n3", "application/rdf+xml"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/n3" { + t.Errorf("got %s expected text/n3", content_type) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt juju-core-2.0.0/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt --- juju-core-2.0~beta15/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/LICENSE juju-core-2.0.0/src/github.com/prometheus/common/LICENSE --- juju-core-2.0~beta15/src/github.com/prometheus/common/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/LICENSE 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/log/log.go juju-core-2.0.0/src/github.com/prometheus/common/log/log.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/log/log.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/log/log.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,304 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "flag" + "fmt" + "net/url" + "os" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" +) + +type levelFlag struct{} + +// String implements flag.Value. +func (f levelFlag) String() string { + return origLogger.Level.String() +} + +// Set implements flag.Value. +func (f levelFlag) Set(level string) error { + l, err := logrus.ParseLevel(level) + if err != nil { + return err + } + origLogger.Level = l + return nil +} + +// setSyslogFormatter is nil if the target architecture does not support syslog. +var setSyslogFormatter func(string, string) error + +func setJSONFormatter() { + origLogger.Formatter = &logrus.JSONFormatter{} +} + +type logFormatFlag struct{ uri string } + +// String implements flag.Value. +func (f logFormatFlag) String() string { + return f.uri +} + +// Set implements flag.Value. +func (f logFormatFlag) Set(format string) error { + f.uri = format + u, err := url.Parse(format) + if err != nil { + return err + } + if u.Scheme != "logger" { + return fmt.Errorf("invalid scheme %s", u.Scheme) + } + jsonq := u.Query().Get("json") + if jsonq == "true" { + setJSONFormatter() + } + + switch u.Opaque { + case "syslog": + if setSyslogFormatter == nil { + return fmt.Errorf("system does not support syslog") + } + appname := u.Query().Get("appname") + facility := u.Query().Get("local") + return setSyslogFormatter(appname, facility) + case "stdout": + origLogger.Out = os.Stdout + case "stderr": + origLogger.Out = os.Stderr + + default: + return fmt.Errorf("unsupported logger %s", u.Opaque) + } + return nil +} + +func init() { + // In order for these flags to take effect, the user of the package must call + // flag.Parse() before logging anything. + flag.Var(levelFlag{}, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal].") + flag.Var(logFormatFlag{}, "log.format", "If set use a syslog logger or JSON logging. Example: logger:syslog?appname=bob&local=7 or logger:stdout?json=true. Defaults to stderr.") +} + +type Logger interface { + Debug(...interface{}) + Debugln(...interface{}) + Debugf(string, ...interface{}) + + Info(...interface{}) + Infoln(...interface{}) + Infof(string, ...interface{}) + + Warn(...interface{}) + Warnln(...interface{}) + Warnf(string, ...interface{}) + + Error(...interface{}) + Errorln(...interface{}) + Errorf(string, ...interface{}) + + Fatal(...interface{}) + Fatalln(...interface{}) + Fatalf(string, ...interface{}) + + With(key string, value interface{}) Logger +} + +type logger struct { + entry *logrus.Entry +} + +func (l logger) With(key string, value interface{}) Logger { + return logger{l.entry.WithField(key, value)} +} + +// Debug logs a message at level Debug on the standard logger. +func (l logger) Debug(args ...interface{}) { + l.sourced().Debug(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func (l logger) Debugln(args ...interface{}) { + l.sourced().Debugln(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func (l logger) Debugf(format string, args ...interface{}) { + l.sourced().Debugf(format, args...) +} + +// Info logs a message at level Info on the standard logger. +func (l logger) Info(args ...interface{}) { + l.sourced().Info(args...) +} + +// Info logs a message at level Info on the standard logger. +func (l logger) Infoln(args ...interface{}) { + l.sourced().Infoln(args...) +} + +// Infof logs a message at level Info on the standard logger. +func (l logger) Infof(format string, args ...interface{}) { + l.sourced().Infof(format, args...) +} + +// Warn logs a message at level Warn on the standard logger. +func (l logger) Warn(args ...interface{}) { + l.sourced().Warn(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func (l logger) Warnln(args ...interface{}) { + l.sourced().Warnln(args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func (l logger) Warnf(format string, args ...interface{}) { + l.sourced().Warnf(format, args...) +} + +// Error logs a message at level Error on the standard logger. +func (l logger) Error(args ...interface{}) { + l.sourced().Error(args...) +} + +// Error logs a message at level Error on the standard logger. +func (l logger) Errorln(args ...interface{}) { + l.sourced().Errorln(args...) +} + +// Errorf logs a message at level Error on the standard logger. +func (l logger) Errorf(format string, args ...interface{}) { + l.sourced().Errorf(format, args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func (l logger) Fatal(args ...interface{}) { + l.sourced().Fatal(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func (l logger) Fatalln(args ...interface{}) { + l.sourced().Fatalln(args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func (l logger) Fatalf(format string, args ...interface{}) { + l.sourced().Fatalf(format, args...) +} + +// sourced adds a source field to the logger that contains +// the file name and line where the logging happened. +func (l logger) sourced() *logrus.Entry { + _, file, line, ok := runtime.Caller(2) + if !ok { + file = "" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + file = file[slash+1:] + } + return l.entry.WithField("source", fmt.Sprintf("%s:%d", file, line)) +} + +var origLogger = logrus.New() +var baseLogger = logger{entry: logrus.NewEntry(origLogger)} + +func Base() Logger { + return baseLogger +} + +func With(key string, value interface{}) Logger { + return baseLogger.With(key, value) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + baseLogger.sourced().Debug(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + baseLogger.sourced().Debugln(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + baseLogger.sourced().Debugf(format, args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + baseLogger.sourced().Info(args...) +} + +// Info logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + baseLogger.sourced().Infoln(args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + baseLogger.sourced().Infof(format, args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + baseLogger.sourced().Warn(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + baseLogger.sourced().Warnln(args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + baseLogger.sourced().Warnf(format, args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + baseLogger.sourced().Error(args...) +} + +// Error logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + baseLogger.sourced().Errorln(args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + baseLogger.sourced().Errorf(format, args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + baseLogger.sourced().Fatal(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + baseLogger.sourced().Fatalln(args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + baseLogger.sourced().Fatalf(format, args...) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/log/log_test.go juju-core-2.0.0/src/github.com/prometheus/common/log/log_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/log/log_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/log/log_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,39 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "bytes" + "regexp" + "testing" + + "github.com/Sirupsen/logrus" +) + +func TestFileLineLogging(t *testing.T) { + var buf bytes.Buffer + origLogger.Out = &buf + origLogger.Formatter = &logrus.TextFormatter{ + DisableColors: true, + } + + // The default logging level should be "info". + Debug("This debug-level line should not show up in the output.") + Infof("This %s-level line should show up in the output.", "info") + + re := `^time=".*" level=info msg="This info-level line should show up in the output." source="log_test.go:33" \n$` + if !regexp.MustCompile(re).Match(buf.Bytes()) { + t.Fatalf("%q did not match expected regex %q", buf.String(), re) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/log/syslog_formatter.go juju-core-2.0.0/src/github.com/prometheus/common/log/syslog_formatter.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/log/syslog_formatter.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/log/syslog_formatter.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,119 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!nacl,!plan9 + +package log + +import ( + "fmt" + "log/syslog" + "os" + + "github.com/Sirupsen/logrus" +) + +func init() { + setSyslogFormatter = func(appname, local string) error { + if appname == "" { + return fmt.Errorf("missing appname parameter") + } + if local == "" { + return fmt.Errorf("missing local parameter") + } + + fmter, err := newSyslogger(appname, local, origLogger.Formatter) + if err != nil { + fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err) + origLogger.Errorf("can't connect logger to syslog: %v", err) + return err + } + origLogger.Formatter = fmter + return nil + } +} + +var ceeTag = []byte("@cee:") + +type syslogger struct { + wrap logrus.Formatter + out *syslog.Writer +} + +func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*syslogger, error) { + priority, err := getFacility(facility) + if err != nil { + return nil, err + } + out, err := syslog.New(priority, appname) + return &syslogger{ + out: out, + wrap: fmter, + }, err +} + +func getFacility(facility string) (syslog.Priority, error) { + switch facility { + case "0": + return syslog.LOG_LOCAL0, nil + case "1": + return syslog.LOG_LOCAL1, nil + case "2": + return syslog.LOG_LOCAL2, nil + case "3": + return syslog.LOG_LOCAL3, nil + case "4": + return syslog.LOG_LOCAL4, nil + case "5": + return syslog.LOG_LOCAL5, nil + case "6": + return syslog.LOG_LOCAL6, nil + case "7": + return syslog.LOG_LOCAL7, nil + } + return syslog.LOG_LOCAL0, fmt.Errorf("invalid local(%s) for syslog", facility) +} + +func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) { + data, err := s.wrap.Format(e) + if err != nil { + fmt.Fprintf(os.Stderr, "syslogger: can't format entry: %v\n", err) + return data, err + } + // only append tag to data sent to syslog (line), not to what + // is returned + line := string(append(ceeTag, data...)) + + switch e.Level { + case logrus.PanicLevel: + err = s.out.Crit(line) + case logrus.FatalLevel: + err = s.out.Crit(line) + case logrus.ErrorLevel: + err = s.out.Err(line) + case logrus.WarnLevel: + err = s.out.Warning(line) + case logrus.InfoLevel: + err = s.out.Info(line) + case logrus.DebugLevel: + err = s.out.Debug(line) + default: + err = s.out.Notice(line) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "syslogger: can't send log to syslog: %v\n", err) + } + + return data, err +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/alert.go juju-core-2.0.0/src/github.com/prometheus/common/model/alert.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/alert.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/alert.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/alert_test.go juju-core-2.0.0/src/github.com/prometheus/common/model/alert_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/alert_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/alert_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,118 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "strings" + "testing" + "time" +) + +func TestAlertValidate(t *testing.T) { + ts := time.Now() + + var cases = []struct { + alert *Alert + err string + }{ + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + }, + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + }, + err: "start time missing", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + EndsAt: ts, + }, + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + EndsAt: ts.Add(1 * time.Minute), + }, + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + EndsAt: ts.Add(-1 * time.Minute), + }, + err: "start time must be before end time", + }, + { + alert: &Alert{ + StartsAt: ts, + }, + err: "at least one label pair required", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b", "!bad": "label"}, + StartsAt: ts, + }, + err: "invalid label set: invalid name", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b", "bad": "\xfflabel"}, + StartsAt: ts, + }, + err: "invalid label set: invalid value", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + Annotations: LabelSet{"!bad": "label"}, + StartsAt: ts, + }, + err: "invalid annotations: invalid name", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + Annotations: LabelSet{"bad": "\xfflabel"}, + StartsAt: ts, + }, + err: "invalid annotations: invalid value", + }, + } + + for i, c := range cases { + err := c.alert.Validate() + if err == nil { + if c.err == "" { + continue + } + t.Errorf("%d. Expected error %q but got none", i, c.err) + continue + } + if c.err == "" && err != nil { + t.Errorf("%d. Expected no error but got %q", i, err) + continue + } + if !strings.Contains(err.Error(), c.err) { + t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/fingerprinting.go juju-core-2.0.0/src/github.com/prometheus/common/model/fingerprinting.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/fingerprinting.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/fingerprinting.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/fnv.go juju-core-2.0.0/src/github.com/prometheus/common/model/fnv.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/fnv.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/fnv.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/labelset.go juju-core-2.0.0/src/github.com/prometheus/common/model/labelset.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/labelset.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/labelset.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !LabelNameRE.MatchString(string(ln)) { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/labels.go juju-core-2.0.0/src/github.com/prometheus/common/model/labels.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/labels.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/labels.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,206 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelNameRE.MatchString(s) { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelNameRE.MatchString(s) { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/labels_test.go juju-core-2.0.0/src/github.com/prometheus/common/model/labels_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/labels_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/labels_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,129 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" + "testing" +) + +func testLabelNames(t testing.TB) { + var scenarios = []struct { + in LabelNames + out LabelNames + }{ + { + in: LabelNames{"ZZZ", "zzz"}, + out: LabelNames{"ZZZ", "zzz"}, + }, + { + in: LabelNames{"aaa", "AAA"}, + out: LabelNames{"AAA", "aaa"}, + }, + } + + for i, scenario := range scenarios { + sort.Sort(scenario.in) + + for j, expected := range scenario.out { + if expected != scenario.in[j] { + t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) + } + } + } +} + +func TestLabelNames(t *testing.T) { + testLabelNames(t) +} + +func BenchmarkLabelNames(b *testing.B) { + for i := 0; i < b.N; i++ { + testLabelNames(b) + } +} + +func testLabelValues(t testing.TB) { + var scenarios = []struct { + in LabelValues + out LabelValues + }{ + { + in: LabelValues{"ZZZ", "zzz"}, + out: LabelValues{"ZZZ", "zzz"}, + }, + { + in: LabelValues{"aaa", "AAA"}, + out: LabelValues{"AAA", "aaa"}, + }, + } + + for i, scenario := range scenarios { + sort.Sort(scenario.in) + + for j, expected := range scenario.out { + if expected != scenario.in[j] { + t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) + } + } + } +} + +func TestLabelValues(t *testing.T) { + testLabelValues(t) +} + +func BenchmarkLabelValues(b *testing.B) { + for i := 0; i < b.N; i++ { + testLabelValues(b) + } +} + +func TestLabelNameIsValid(t *testing.T) { + var scenarios = []struct { + ln LabelName + valid bool + }{ + { + ln: "Avalid_23name", + valid: true, + }, + { + ln: "_Avalid_23name", + valid: true, + }, + { + ln: "1valid_23name", + valid: false, + }, + { + ln: "avalid_23name", + valid: true, + }, + { + ln: "Ava:lid_23name", + valid: false, + }, + { + ln: "a lid_23name", + valid: false, + }, + } + + for _, s := range scenarios { + if s.ln.IsValid() != s.valid { + t.Errorf("Expected %v for %q", s.valid, s.ln) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/metric.go juju-core-2.0.0/src/github.com/prometheus/common/model/metric.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/metric.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/metric.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,98 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + separator = []byte{0} + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := Metric{} + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/metric_test.go juju-core-2.0.0/src/github.com/prometheus/common/model/metric_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/metric_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/metric_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,121 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import "testing" + +func testMetric(t testing.TB) { + var scenarios = []struct { + input LabelSet + fingerprint Fingerprint + fastFingerprint Fingerprint + }{ + { + input: LabelSet{}, + fingerprint: 14695981039346656037, + fastFingerprint: 14695981039346656037, + }, + { + input: LabelSet{ + "first_name": "electro", + "occupation": "robot", + "manufacturer": "westinghouse", + }, + fingerprint: 5911716720268894962, + fastFingerprint: 11310079640881077873, + }, + { + input: LabelSet{ + "x": "y", + }, + fingerprint: 8241431561484471700, + fastFingerprint: 13948396922932177635, + }, + { + input: LabelSet{ + "a": "bb", + "b": "c", + }, + fingerprint: 3016285359649981711, + fastFingerprint: 3198632812309449502, + }, + { + input: LabelSet{ + "a": "b", + "bb": "c", + }, + fingerprint: 7122421792099404749, + fastFingerprint: 5774953389407657638, + }, + } + + for i, scenario := range scenarios { + input := Metric(scenario.input) + + if scenario.fingerprint != input.Fingerprint() { + t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, input.Fingerprint()) + } + if scenario.fastFingerprint != input.FastFingerprint() { + t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, input.FastFingerprint()) + } + } +} + +func TestMetric(t *testing.T) { + testMetric(t) +} + +func BenchmarkMetric(b *testing.B) { + for i := 0; i < b.N; i++ { + testMetric(b) + } +} + +func TestMetricNameIsValid(t *testing.T) { + var scenarios = []struct { + mn LabelValue + valid bool + }{ + { + mn: "Avalid_23name", + valid: true, + }, + { + mn: "_Avalid_23name", + valid: true, + }, + { + mn: "1valid_23name", + valid: false, + }, + { + mn: "avalid_23name", + valid: true, + }, + { + mn: "Ava:lid_23name", + valid: true, + }, + { + mn: "a lid_23name", + valid: false, + }, + } + + for _, s := range scenarios { + if IsValidMetricName(s.mn) != s.valid { + t.Errorf("Expected %v for %q", s.valid, s.mn) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/model.go juju-core-2.0.0/src/github.com/prometheus/common/model/model.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/model.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/model.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus componenets and libraries. +package model diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/signature.go juju-core-2.0.0/src/github.com/prometheus/common/model/signature.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/signature.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/signature.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/signature_test.go juju-core-2.0.0/src/github.com/prometheus/common/model/signature_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/signature_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/signature_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,314 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "runtime" + "sync" + "testing" +) + +func TestLabelsToSignature(t *testing.T) { + var scenarios = []struct { + in map[string]string + out uint64 + }{ + { + in: map[string]string{}, + out: 14695981039346656037, + }, + { + in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"}, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := LabelsToSignature(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestMetricToFingerprint(t *testing.T) { + var scenarios = []struct { + in LabelSet + out Fingerprint + }{ + { + in: LabelSet{}, + out: 14695981039346656037, + }, + { + in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"}, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := labelSetToFingerprint(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestMetricToFastFingerprint(t *testing.T) { + var scenarios = []struct { + in LabelSet + out Fingerprint + }{ + { + in: LabelSet{}, + out: 14695981039346656037, + }, + { + in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"}, + out: 12952432476264840823, + }, + } + + for i, scenario := range scenarios { + actual := labelSetToFastFingerprint(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestSignatureForLabels(t *testing.T) { + var scenarios = []struct { + in Metric + labels LabelNames + out uint64 + }{ + { + in: Metric{}, + labels: nil, + out: 14695981039346656037, + }, + { + in: Metric{}, + labels: LabelNames{"empty"}, + out: 7187873163539638612, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: LabelNames{"empty"}, + out: 7187873163539638612, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: LabelNames{"fear", "name"}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, + labels: LabelNames{"fear", "name"}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: LabelNames{}, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: nil, + out: 14695981039346656037, + }, + } + + for i, scenario := range scenarios { + actual := SignatureForLabels(scenario.in, scenario.labels...) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestSignatureWithoutLabels(t *testing.T) { + var scenarios = []struct { + in Metric + labels map[LabelName]struct{} + out uint64 + }{ + { + in: Metric{}, + labels: nil, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}}, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, + labels: map[LabelName]struct{}{"foo": struct{}{}}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: map[LabelName]struct{}{}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: nil, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := SignatureWithoutLabels(scenario.in, scenario.labels) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) { + for i := 0; i < b.N; i++ { + if a := LabelsToSignature(l); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, l, a) + } + } +} + +func BenchmarkLabelToSignatureScalar(b *testing.B) { + benchmarkLabelToSignature(b, nil, 14695981039346656037) +} + +func BenchmarkLabelToSignatureSingle(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169) +} + +func BenchmarkLabelToSignatureDouble(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) +} + +func BenchmarkLabelToSignatureTriple(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) +} + +func benchmarkMetricToFingerprint(b *testing.B, ls LabelSet, e Fingerprint) { + for i := 0; i < b.N; i++ { + if a := labelSetToFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } +} + +func BenchmarkMetricToFingerprintScalar(b *testing.B) { + benchmarkMetricToFingerprint(b, nil, 14695981039346656037) +} + +func BenchmarkMetricToFingerprintSingle(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5146282821936882169) +} + +func BenchmarkMetricToFingerprintDouble(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) +} + +func BenchmarkMetricToFingerprintTriple(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) +} + +func benchmarkMetricToFastFingerprint(b *testing.B, ls LabelSet, e Fingerprint) { + for i := 0; i < b.N; i++ { + if a := labelSetToFastFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } +} + +func BenchmarkMetricToFastFingerprintScalar(b *testing.B) { + benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037) +} + +func BenchmarkMetricToFastFingerprintSingle(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5147259542624943964) +} + +func BenchmarkMetricToFastFingerprintDouble(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528) +} + +func BenchmarkMetricToFastFingerprintTriple(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676) +} + +func BenchmarkEmptyLabelSignature(b *testing.B) { + input := []map[string]string{nil, {}} + + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + + alloc := ms.Alloc + + for _, labels := range input { + LabelsToSignature(labels) + } + + runtime.ReadMemStats(&ms) + + if got := ms.Alloc; alloc != got { + b.Fatal("expected LabelsToSignature with empty labels not to perform allocations") + } +} + +func benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerprint, concLevel int) { + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + for i := 0; i < concLevel; i++ { + go func() { + start.Wait() + for j := b.N / concLevel; j >= 0; j-- { + if a := labelSetToFastFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } + end.Done() + }() + } + b.ResetTimer() + start.Done() + end.Wait() +} + +func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1) +} + +func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2) +} + +func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4) +} + +func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/silence.go juju-core-2.0.0/src/github.com/prometheus/common/model/silence.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/silence.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/silence.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definiton +// in the Prometheus eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/silence_test.go juju-core-2.0.0/src/github.com/prometheus/common/model/silence_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/silence_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/silence_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,228 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "strings" + "testing" + "time" +) + +func TestMatcherValidate(t *testing.T) { + var cases = []struct { + matcher *Matcher + err string + }{ + { + matcher: &Matcher{ + Name: "name", + Value: "value", + }, + }, + { + matcher: &Matcher{ + Name: "name", + Value: "value", + IsRegex: true, + }, + }, + { + matcher: &Matcher{ + Name: "name!", + Value: "value", + }, + err: "invalid name", + }, + { + matcher: &Matcher{ + Name: "", + Value: "value", + }, + err: "invalid name", + }, + { + matcher: &Matcher{ + Name: "name", + Value: "value\xff", + }, + err: "invalid value", + }, + { + matcher: &Matcher{ + Name: "name", + Value: "", + }, + err: "invalid value", + }, + } + + for i, c := range cases { + err := c.matcher.Validate() + if err == nil { + if c.err == "" { + continue + } + t.Errorf("%d. Expected error %q but got none", i, c.err) + continue + } + if c.err == "" && err != nil { + t.Errorf("%d. Expected no error but got %q", i, err) + continue + } + if !strings.Contains(err.Error(), c.err) { + t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err) + } + } +} + +func TestSilenceValidate(t *testing.T) { + ts := time.Now() + + var cases = []struct { + sil *Silence + err string + }{ + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + {Name: "name", Value: "value"}, + {Name: "name", Value: "value"}, + {Name: "name", Value: "value", IsRegex: true}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts.Add(-1 * time.Minute), + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "start time must be before end time", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "end time missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "start time missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "!name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "invalid matcher", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + }, + err: "comment missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "creation timestamp missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + Comment: "comment", + }, + err: "creator information missing", + }, + } + + for i, c := range cases { + err := c.sil.Validate() + if err == nil { + if c.err == "" { + continue + } + t.Errorf("%d. Expected error %q but got none", i, c.err) + continue + } + if c.err == "" && err != nil { + t.Errorf("%d. Expected no error but got %q", i, err) + continue + } + if !strings.Contains(err.Error(), c.err) { + t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/time.go juju-core-2.0.0/src/github.com/prometheus/common/model/time.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/time.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/time.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,249 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes and interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + *t = Time(v + va) + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// StringToDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/time_test.go juju-core-2.0.0/src/github.com/prometheus/common/model/time_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/time_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/time_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,129 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "testing" + "time" +) + +func TestComparators(t *testing.T) { + t1a := TimeFromUnix(0) + t1b := TimeFromUnix(0) + t2 := TimeFromUnix(2*second - 1) + + if !t1a.Equal(t1b) { + t.Fatalf("Expected %s to be equal to %s", t1a, t1b) + } + if t1a.Equal(t2) { + t.Fatalf("Expected %s to not be equal to %s", t1a, t2) + } + + if !t1a.Before(t2) { + t.Fatalf("Expected %s to be before %s", t1a, t2) + } + if t1a.Before(t1b) { + t.Fatalf("Expected %s to not be before %s", t1a, t1b) + } + + if !t2.After(t1a) { + t.Fatalf("Expected %s to be after %s", t2, t1a) + } + if t1b.After(t1a) { + t.Fatalf("Expected %s to not be after %s", t1b, t1a) + } +} + +func TestTimeConversions(t *testing.T) { + unixSecs := int64(1136239445) + unixNsecs := int64(123456789) + unixNano := unixSecs*1e9 + unixNsecs + + t1 := time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick) + t2 := time.Unix(unixSecs, unixNsecs) + + ts := TimeFromUnixNano(unixNano) + if !ts.Time().Equal(t1) { + t.Fatalf("Expected %s, got %s", t1, ts.Time()) + } + + // Test available precision. + ts = TimeFromUnixNano(t2.UnixNano()) + if !ts.Time().Equal(t1) { + t.Fatalf("Expected %s, got %s", t1, ts.Time()) + } + + if ts.UnixNano() != unixNano-unixNano%nanosPerTick { + t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano()) + } +} + +func TestDuration(t *testing.T) { + duration := time.Second + time.Minute + time.Hour + goTime := time.Unix(1136239445, 0) + + ts := TimeFromUnix(goTime.Unix()) + if !goTime.Add(duration).Equal(ts.Add(duration).Time()) { + t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration)) + } + + earlier := ts.Add(-duration) + delta := ts.Sub(earlier) + if delta != duration { + t.Fatalf("Expected %s to be equal to %s", delta, duration) + } +} + +func TestParseDuration(t *testing.T) { + var cases = []struct { + in string + out time.Duration + }{ + { + in: "324ms", + out: 324 * time.Millisecond, + }, { + in: "3s", + out: 3 * time.Second, + }, { + in: "5m", + out: 5 * time.Minute, + }, { + in: "1h", + out: time.Hour, + }, { + in: "4d", + out: 4 * 24 * time.Hour, + }, { + in: "3w", + out: 3 * 7 * 24 * time.Hour, + }, { + in: "10y", + out: 10 * 365 * 24 * time.Hour, + }, + } + + for _, c := range cases { + d, err := ParseDuration(c.in) + if err != nil { + t.Errorf("Unexpected error on input %q", c.in) + } + if time.Duration(d) != c.out { + t.Errorf("Expected %v but got %v", c.out, d) + } + if d.String() != c.in { + t.Errorf("Expected duration string %q but got %q", c.in, d.String()) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/value.go juju-core-2.0.0/src/github.com/prometheus/common/model/value.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/value.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/value.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,395 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +func (v SampleValue) Equal(o SampleValue) bool { + return v == o +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + if s.Value != o.Value { + return false + } + + return true +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/model/value_test.go juju-core-2.0.0/src/github.com/prometheus/common/model/value_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/model/value_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/model/value_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,362 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "math" + "reflect" + "sort" + "testing" +) + +func TestSamplePairJSON(t *testing.T) { + input := []struct { + plain string + value SamplePair + }{ + { + plain: `[1234.567,"123.1"]`, + value: SamplePair{ + Value: 123.1, + Timestamp: 1234567, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sp SamplePair + err = json.Unmarshal(b, &sp) + if err != nil { + t.Error(err) + continue + } + + if sp != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sp) + } + } +} + +func TestSampleJSON(t *testing.T) { + input := []struct { + plain string + value Sample + }{ + { + plain: `{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}`, + value: Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv Sample + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if !reflect.DeepEqual(sv, test.value) { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestVectorJSON(t *testing.T) { + input := []struct { + plain string + value Vector + }{ + { + plain: `[]`, + value: Vector{}, + }, + { + plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}]`, + value: Vector{&Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }}, + }, + { + plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]},{"metric":{"foo":"bar"},"value":[1.234,"+Inf"]}]`, + value: Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }, + &Sample{ + Metric: Metric{ + "foo": "bar", + }, + Value: SampleValue(math.Inf(1)), + Timestamp: 1234, + }, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var vec Vector + err = json.Unmarshal(b, &vec) + if err != nil { + t.Error(err) + continue + } + + if !reflect.DeepEqual(vec, test.value) { + t.Errorf("decoding error: expected %v, got %v", test.value, vec) + } + } +} + +func TestScalarJSON(t *testing.T) { + input := []struct { + plain string + value Scalar + }{ + { + plain: `[123.456,"456"]`, + value: Scalar{ + Timestamp: 123456, + Value: 456, + }, + }, + { + plain: `[123123.456,"+Inf"]`, + value: Scalar{ + Timestamp: 123123456, + Value: SampleValue(math.Inf(1)), + }, + }, + { + plain: `[123123.456,"-Inf"]`, + value: Scalar{ + Timestamp: 123123456, + Value: SampleValue(math.Inf(-1)), + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv Scalar + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if sv != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestStringJSON(t *testing.T) { + input := []struct { + plain string + value String + }{ + { + plain: `[123.456,"test"]`, + value: String{ + Timestamp: 123456, + Value: "test", + }, + }, + { + plain: `[123123.456,"å°åŒ—"]`, + value: String{ + Timestamp: 123123456, + Value: "å°åŒ—", + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv String + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if sv != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestVectorSort(t *testing.T) { + input := Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 2, + }, + } + + expected := Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 2, + }, + } + + sort.Sort(input) + + for i, actual := range input { + actualFp := actual.Metric.Fingerprint() + expectedFp := expected[i].Metric.Fingerprint() + + if actualFp != expectedFp { + t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String()) + } + + if actual.Timestamp != expected[i].Timestamp { + t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/NOTICE juju-core-2.0.0/src/github.com/prometheus/common/NOTICE --- juju-core-2.0~beta15/src/github.com/prometheus/common/NOTICE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/NOTICE 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/README.md juju-core-2.0.0/src/github.com/prometheus/common/README.md --- juju-core-2.0~beta15/src/github.com/prometheus/common/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/README.md 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,12 @@ +# Common +[![Build Status](https://travis-ci.org/prometheus/common.svg)](https://travis-ci.org/prometheus/common) + +This repository contains Go libraries that are shared across Prometheus +components and libraries. + +* **config**: Common configuration structures +* **expfmt**: Decoding and encoding for the exposition format +* **log**: A logging wrapper around [logrus](https://github.com/Sirupsen/logrus) +* **model**: Shared data structures +* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context` +* **version**: Version informations and metric diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/route/route.go juju-core-2.0.0/src/github.com/prometheus/common/route/route.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/route/route.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/route/route.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,119 @@ +package route + +import ( + "net/http" + "sync" + + "github.com/julienschmidt/httprouter" + "golang.org/x/net/context" +) + +var ( + mtx = sync.RWMutex{} + ctxts = map[*http.Request]context.Context{} +) + +// Context returns the context for the request. +func Context(r *http.Request) context.Context { + mtx.RLock() + defer mtx.RUnlock() + return ctxts[r] +} + +type param string + +// Param returns param p for the context. +func Param(ctx context.Context, p string) string { + return ctx.Value(param(p)).(string) +} + +// WithParam returns a new context with param p set to v. +func WithParam(ctx context.Context, p, v string) context.Context { + return context.WithValue(ctx, param(p), v) +} + +// handle turns a Handle into httprouter.Handle +func handle(h http.HandlerFunc) httprouter.Handle { + return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for _, p := range params { + ctx = context.WithValue(ctx, param(p.Key), p.Value) + } + + mtx.Lock() + ctxts[r] = ctx + mtx.Unlock() + + h(w, r) + + mtx.Lock() + delete(ctxts, r) + mtx.Unlock() + } +} + +// Router wraps httprouter.Router and adds support for prefixed sub-routers. +type Router struct { + rtr *httprouter.Router + prefix string +} + +// New returns a new Router. +func New() *Router { + return &Router{rtr: httprouter.New()} +} + +// WithPrefix returns a router that prefixes all registered routes with prefix. +func (r *Router) WithPrefix(prefix string) *Router { + return &Router{rtr: r.rtr, prefix: r.prefix + prefix} +} + +// Get registers a new GET route. +func (r *Router) Get(path string, h http.HandlerFunc) { + r.rtr.GET(r.prefix+path, handle(h)) +} + +// Options registers a new OPTIONS route. +func (r *Router) Options(path string, h http.HandlerFunc) { + r.rtr.OPTIONS(r.prefix+path, handle(h)) +} + +// Del registers a new DELETE route. +func (r *Router) Del(path string, h http.HandlerFunc) { + r.rtr.DELETE(r.prefix+path, handle(h)) +} + +// Put registers a new PUT route. +func (r *Router) Put(path string, h http.HandlerFunc) { + r.rtr.PUT(r.prefix+path, handle(h)) +} + +// Post registers a new POST route. +func (r *Router) Post(path string, h http.HandlerFunc) { + r.rtr.POST(r.prefix+path, handle(h)) +} + +// Redirect takes an absolute path and sends an internal HTTP redirect for it, +// prefixed by the router's path prefix. Note that this method does not include +// functionality for handling relative paths or full URL redirects. +func (r *Router) Redirect(w http.ResponseWriter, req *http.Request, path string, code int) { + http.Redirect(w, req, r.prefix+path, code) +} + +// ServeHTTP implements http.Handler. +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + r.rtr.ServeHTTP(w, req) +} + +// FileServe returns a new http.HandlerFunc that serves files from dir. +// Using routes must provide the *filepath parameter. +func FileServe(dir string) http.HandlerFunc { + fs := http.FileServer(http.Dir(dir)) + + return func(w http.ResponseWriter, r *http.Request) { + r.URL.Path = Param(Context(r), "filepath") + fs.ServeHTTP(w, r) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/route/route_test.go juju-core-2.0.0/src/github.com/prometheus/common/route/route_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/route/route_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/route/route_test.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,27 @@ +package route + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestRedirect(t *testing.T) { + router := New().WithPrefix("/test/prefix") + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "http://localhost:9090/foo", nil) + if err != nil { + t.Fatalf("Error building test request: %s", err) + } + + router.Redirect(w, r, "/some/endpoint", http.StatusFound) + if w.Code != http.StatusFound { + t.Fatalf("Unexpected redirect status code: got %d, want %d", w.Code, http.StatusFound) + } + + want := "/test/prefix/some/endpoint" + got := w.Header()["Location"][0] + if want != got { + t.Fatalf("Unexpected redirect location: got %s, want %s", got, want) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/.travis.yml juju-core-2.0.0/src/github.com/prometheus/common/.travis.yml --- juju-core-2.0~beta15/src/github.com/prometheus/common/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/.travis.yml 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,8 @@ +sudo: false + +language: go +go: + - 1.4.3 + - 1.5.4 + - 1.6.2 + - tip diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/common/version/info.go juju-core-2.0.0/src/github.com/prometheus/common/version/info.go --- juju-core-2.0~beta15/src/github.com/prometheus/common/version/info.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/common/version/info.go 2016-10-13 14:32:23.000000000 +0000 @@ -0,0 +1,89 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "bytes" + "fmt" + "runtime" + "strings" + "text/template" + + "github.com/prometheus/client_golang/prometheus" +) + +// Build information. Populated at build-time. +var ( + Version string + Revision string + Branch string + BuildUser string + BuildDate string + GoVersion = runtime.Version() +) + +// NewCollector returns a collector which exports metrics about current version information. +func NewCollector(program string) *prometheus.GaugeVec { + buildInfo := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: program, + Name: "build_info", + Help: fmt.Sprintf( + "A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.", + program, + ), + }, + []string{"version", "revision", "branch", "goversion"}, + ) + buildInfo.WithLabelValues(Version, Revision, Branch, GoVersion).Set(1) + return buildInfo +} + +// versionInfoTmpl contains the template used by Info. +var versionInfoTmpl = ` +{{.program}}, version {{.version}} (branch: {{.branch}}, revision: {{.revision}}) + build user: {{.buildUser}} + build date: {{.buildDate}} + go version: {{.goVersion}} +` + +// Print returns version information. +func Print(program string) string { + m := map[string]string{ + "program": program, + "version": Version, + "revision": Revision, + "branch": Branch, + "buildUser": BuildUser, + "buildDate": BuildDate, + "goVersion": GoVersion, + } + t := template.Must(template.New("version").Parse(versionInfoTmpl)) + + var buf bytes.Buffer + if err := t.ExecuteTemplate(&buf, "version", m); err != nil { + panic(err) + } + return strings.TrimSpace(buf.String()) +} + +// Info returns version, branch and revision information. +func Info() string { + return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision) +} + +// BuildContext returns goVersion, buildUser and buildDate information. +func BuildContext() string { + return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/AUTHORS.md juju-core-2.0.0/src/github.com/prometheus/procfs/AUTHORS.md --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/AUTHORS.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/AUTHORS.md 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,20 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Tobias Schmidt + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Armen Baghumian +* Bjoern Rabenstein +* David Cournapeau +* Ji-Hoon, Seol +* Jonas Große Sundrup +* Julius Volz +* Matthias Rampke +* Nicky Gerritsen +* Rémi Audebert +* Tobias Schmidt diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/CONTRIBUTING.md juju-core-2.0.0/src/github.com/prometheus/procfs/CONTRIBUTING.md --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/CONTRIBUTING.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/CONTRIBUTING.md 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/doc.go juju-core-2.0.0/src/github.com/prometheus/procfs/doc.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/doc.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/fs.go juju-core-2.0.0/src/github.com/prometheus/procfs/fs.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/fs.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/fs.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,33 @@ +package procfs + +import ( + "fmt" + "os" + "path" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/fs_test.go juju-core-2.0.0/src/github.com/prometheus/procfs/fs_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/fs_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/fs_test.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,13 @@ +package procfs + +import "testing" + +func TestNewFS(t *testing.T) { + if _, err := NewFS("foobar"); err == nil { + t.Error("want NewFS to fail for non-existing mount point") + } + + if _, err := NewFS("procfs.go"); err == nil { + t.Error("want NewFS to fail if mount point is not a directory") + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/ipvs.go juju-core-2.0.0/src/github.com/prometheus/procfs/ipvs.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/ipvs.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/ipvs.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,224 @@ +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The transport protocol (TCP, UDP). + Proto string + // The remote (real) IP address. + RemoteAddress net.IP + // The remote (real) port. + RemotePort uint16 + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(string(scanner.Text())) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + tmp := strings.SplitN(s, ":", 2) + + if len(tmp) != 2 { + return nil, 0, fmt.Errorf("invalid IP:Port: %s", s) + } + + if len(tmp[0]) != 8 && len(tmp[0]) != 32 { + return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0]) + } + + ip, err := hex.DecodeString(tmp[0]) + if err != nil { + return nil, 0, err + } + + port, err := strconv.ParseUint(tmp[1], 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/ipvs_test.go juju-core-2.0.0/src/github.com/prometheus/procfs/ipvs_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/ipvs_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/ipvs_test.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,190 @@ +package procfs + +import ( + "net" + "testing" +) + +var ( + expectedIPVSStats = IPVSStats{ + Connections: 23765872, + IncomingPackets: 3811989221, + OutgoingPackets: 0, + IncomingBytes: 89991519156915, + OutgoingBytes: 0, + } + expectedIPVSBackendStatuses = []IPVSBackendStatus{ + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.22"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.82.22"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 248, + InactConn: 2, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.22"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.83.24"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 248, + InactConn: 2, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.22"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.83.21"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 248, + InactConn: 1, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.57"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.84.22"), + RemotePort: 3306, + Proto: "TCP", + Weight: 0, + ActiveConn: 0, + InactConn: 0, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.57"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.82.21"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 1499, + InactConn: 0, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.57"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.50.21"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 1498, + InactConn: 0, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.55"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.50.26"), + RemotePort: 3306, + Proto: "TCP", + Weight: 0, + ActiveConn: 0, + InactConn: 0, + }, + IPVSBackendStatus{ + LocalAddress: net.ParseIP("192.168.0.55"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.49.32"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 0, + InactConn: 0, + }, + } +) + +func TestIPVSStats(t *testing.T) { + stats, err := FS("fixtures").NewIPVSStats() + if err != nil { + t.Fatal(err) + } + + if stats != expectedIPVSStats { + t.Errorf("want %+v, have %+v", expectedIPVSStats, stats) + } +} + +func TestParseIPPort(t *testing.T) { + ip := net.ParseIP("192.168.0.22") + port := uint16(3306) + + gotIP, gotPort, err := parseIPPort("C0A80016:0CEA") + if err != nil { + t.Fatal(err) + } + if !(gotIP.Equal(ip) && port == gotPort) { + t.Errorf("want %s:%d, have %s:%d", ip, port, gotIP, gotPort) + } +} + +func TestParseIPPortInvalid(t *testing.T) { + testcases := []string{ + "", + "C0A80016", + "C0A800:1234", + "FOOBARBA:1234", + "C0A80016:0CEA:1234", + } + + for _, s := range testcases { + ip, port, err := parseIPPort(s) + if ip != nil || port != uint16(0) || err == nil { + t.Errorf("Expected error for input %s, have ip = %s, port = %v, err = %v", s, ip, port, err) + } + } +} + +func TestParseIPPortIPv6(t *testing.T) { + ip := net.ParseIP("dead:beef::1") + port := uint16(8080) + + gotIP, gotPort, err := parseIPPort("DEADBEEF000000000000000000000001:1F90") + if err != nil { + t.Fatal(err) + } + if !(gotIP.Equal(ip) && port == gotPort) { + t.Errorf("want %s:%d, have %s:%d", ip, port, gotIP, gotPort) + } + +} + +func TestIPVSBackendStatus(t *testing.T) { + backendStats, err := FS("fixtures").NewIPVSBackendStatus() + if err != nil { + t.Fatal(err) + } + if want, have := len(expectedIPVSBackendStatuses), len(backendStats); want != have { + t.Fatalf("want %d backend statuses, have %d", want, have) + } + + for idx, expect := range expectedIPVSBackendStatuses { + if !backendStats[idx].LocalAddress.Equal(expect.LocalAddress) { + t.Errorf("want LocalAddress %s, have %s", expect.LocalAddress, backendStats[idx].LocalAddress) + } + if backendStats[idx].LocalPort != expect.LocalPort { + t.Errorf("want LocalPort %d, have %d", expect.LocalPort, backendStats[idx].LocalPort) + } + if !backendStats[idx].RemoteAddress.Equal(expect.RemoteAddress) { + t.Errorf("want RemoteAddress %s, have %s", expect.RemoteAddress, backendStats[idx].RemoteAddress) + } + if backendStats[idx].RemotePort != expect.RemotePort { + t.Errorf("want RemotePort %d, have %d", expect.RemotePort, backendStats[idx].RemotePort) + } + if backendStats[idx].Proto != expect.Proto { + t.Errorf("want Proto %s, have %s", expect.Proto, backendStats[idx].Proto) + } + if backendStats[idx].Weight != expect.Weight { + t.Errorf("want Weight %d, have %d", expect.Weight, backendStats[idx].Weight) + } + if backendStats[idx].ActiveConn != expect.ActiveConn { + t.Errorf("want ActiveConn %d, have %d", expect.ActiveConn, backendStats[idx].ActiveConn) + } + if backendStats[idx].InactConn != expect.InactConn { + t.Errorf("want InactConn %d, have %d", expect.InactConn, backendStats[idx].InactConn) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/LICENSE juju-core-2.0.0/src/github.com/prometheus/procfs/LICENSE --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/LICENSE 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/Makefile juju-core-2.0.0/src/github.com/prometheus/procfs/Makefile --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/Makefile 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,6 @@ +ci: + ! gofmt -l *.go | read nothing + go vet + go test -v ./... + go get github.com/golang/lint/golint + golint *.go diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/mdstat.go juju-core-2.0.0/src/github.com/prometheus/procfs/mdstat.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/mdstat.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/mdstat.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,138 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") + for i, l := range lines { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + mdName := mainLine[0] + activityState := mainLine[2] + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) + } + + active, total, size, err := evalStatusline(lines[i+1]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // j is the line number of the syncing-line. + j := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + j = i + 3 + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{ + Name: mdName, + ActivityState: activityState, + DisksActive: active, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { + matches := buildlineRE.FindStringSubmatch(buildline) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected buildline: %s", buildline) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedBlocks, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/mdstat_test.go juju-core-2.0.0/src/github.com/prometheus/procfs/mdstat_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/mdstat_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/mdstat_test.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,31 @@ +package procfs + +import ( + "testing" +) + +func TestMDStat(t *testing.T) { + mdStates, err := FS("fixtures").ParseMDStat() + if err != nil { + t.Fatalf("parsing of reference-file failed entirely: %s", err) + } + + refs := map[string]MDStat{ + "md3": MDStat{"md3", "active", 8, 8, 5853468288, 5853468288}, + "md127": MDStat{"md127", "active", 2, 2, 312319552, 312319552}, + "md0": MDStat{"md0", "active", 2, 2, 248896, 248896}, + "md4": MDStat{"md4", "inactive", 2, 2, 4883648, 4883648}, + "md6": MDStat{"md6", "active", 1, 2, 195310144, 16775552}, + "md8": MDStat{"md8", "active", 2, 2, 195310144, 16775552}, + "md7": MDStat{"md7", "active", 3, 4, 7813735424, 7813735424}, + } + + if want, have := len(refs), len(mdStates); want != have { + t.Errorf("want %d parsed md-devices, have %d", want, have) + } + for _, md := range mdStates { + if want, have := refs[md.Name], md; want != have { + t.Errorf("%s: want %v, have %v", md.Name, want, have) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/NOTICE juju-core-2.0.0/src/github.com/prometheus/procfs/NOTICE --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/NOTICE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/NOTICE 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc.go juju-core-2.0.0/src/github.com/prometheus/procfs/proc.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/proc.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,212 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_io.go juju-core-2.0.0/src/github.com/prometheus/procfs/proc_io.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_io.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/proc_io.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,55 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + if err != nil { + return pio, err + } + + return pio, nil +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_io_test.go juju-core-2.0.0/src/github.com/prometheus/procfs/proc_io_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_io_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/proc_io_test.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,33 @@ +package procfs + +import "testing" + +func TestProcIO(t *testing.T) { + p, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + + s, err := p.NewIO() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want int64 + have int64 + }{ + {name: "RChar", want: 750339, have: int64(s.RChar)}, + {name: "WChar", want: 818609, have: int64(s.WChar)}, + {name: "SyscR", want: 7405, have: int64(s.SyscR)}, + {name: "SyscW", want: 5245, have: int64(s.SyscW)}, + {name: "ReadBytes", want: 1024, have: int64(s.ReadBytes)}, + {name: "WriteBytes", want: 2048, have: int64(s.WriteBytes)}, + {name: "CancelledWriteBytes", want: -1024, have: s.CancelledWriteBytes}, + } { + if test.want != test.have { + t.Errorf("want %s %d, have %d", test.name, test.want, test.have) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_limits.go juju-core-2.0.0/src/github.com/prometheus/procfs/proc_limits.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_limits.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/proc_limits.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,137 @@ +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int + // Maximum size of files that the process may create. + FileSize int + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int + // Maximum size of the process stack in bytes. + StackSize int + // Maximum size of a core file. + CoreFileSize int + // Limit of the process's resident set in pages. + ResidentSet int + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return int(i), nil +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_limits_test.go juju-core-2.0.0/src/github.com/prometheus/procfs/proc_limits_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_limits_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/proc_limits_test.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,31 @@ +package procfs + +import "testing" + +func TestNewLimits(t *testing.T) { + p, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + + l, err := p.NewLimits() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want int + have int + }{ + {name: "cpu time", want: -1, have: l.CPUTime}, + {name: "open files", want: 2048, have: l.OpenFiles}, + {name: "msgqueue size", want: 819200, have: l.MsqqueueSize}, + {name: "nice priority", want: 0, have: l.NicePriority}, + {name: "address space", want: -1, have: l.AddressSpace}, + } { + if test.want != test.have { + t.Errorf("want %s %d, have %d", test.name, test.want, test.have) + } + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_stat.go juju-core-2.0.0/src/github.com/prometheus/procfs/proc_stat.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_stat.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/proc_stat.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,175 @@ +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_stat_test.go juju-core-2.0.0/src/github.com/prometheus/procfs/proc_stat_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_stat_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/proc_stat_test.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,110 @@ +package procfs + +import ( + "os" + "testing" +) + +func TestProcStat(t *testing.T) { + p, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + + s, err := p.NewStat() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want int + have int + }{ + {name: "pid", want: 26231, have: s.PID}, + {name: "user time", want: 1677, have: int(s.UTime)}, + {name: "system time", want: 44, have: int(s.STime)}, + {name: "start time", want: 82375, have: int(s.Starttime)}, + {name: "virtual memory size", want: 56274944, have: s.VSize}, + {name: "resident set size", want: 1981, have: s.RSS}, + } { + if test.want != test.have { + t.Errorf("want %s %d, have %d", test.name, test.want, test.have) + } + } +} + +func TestProcStatComm(t *testing.T) { + s1, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + if want, have := "vim", s1.Comm; want != have { + t.Errorf("want comm %s, have %s", want, have) + } + + s2, err := testProcStat(584) + if err != nil { + t.Fatal(err) + } + if want, have := "(a b ) ( c d) ", s2.Comm; want != have { + t.Errorf("want comm %s, have %s", want, have) + } +} + +func TestProcStatVirtualMemory(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, have := 56274944, s.VirtualMemory(); want != have { + t.Errorf("want virtual memory %d, have %d", want, have) + } +} + +func TestProcStatResidentMemory(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, have := 1981*os.Getpagesize(), s.ResidentMemory(); want != have { + t.Errorf("want resident memory %d, have %d", want, have) + } +} + +func TestProcStatStartTime(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + time, err := s.StartTime() + if err != nil { + t.Fatal(err) + } + if want, have := 1418184099.75, time; want != have { + t.Errorf("want start time %f, have %f", want, have) + } +} + +func TestProcStatCPUTime(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, have := 17.21, s.CPUTime(); want != have { + t.Errorf("want cpu time %f, have %f", want, have) + } +} + +func testProcStat(pid int) (ProcStat, error) { + p, err := FS("fixtures").NewProc(pid) + if err != nil { + return ProcStat{}, err + } + + return p.NewStat() +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_test.go juju-core-2.0.0/src/github.com/prometheus/procfs/proc_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/proc_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/proc_test.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,160 @@ +package procfs + +import ( + "reflect" + "sort" + "testing" +) + +func TestSelf(t *testing.T) { + fs := FS("fixtures") + + p1, err := fs.NewProc(26231) + if err != nil { + t.Fatal(err) + } + p2, err := fs.Self() + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(p1, p2) { + t.Errorf("want process %v, have %v", p1, p2) + } +} + +func TestAllProcs(t *testing.T) { + procs, err := FS("fixtures").AllProcs() + if err != nil { + t.Fatal(err) + } + sort.Sort(procs) + for i, p := range []*Proc{{PID: 584}, {PID: 26231}} { + if want, have := p.PID, procs[i].PID; want != have { + t.Errorf("want processes %d, have %d", want, have) + } + } +} + +func TestCmdLine(t *testing.T) { + for _, tt := range []struct { + process int + want []string + }{ + {process: 26231, want: []string{"vim", "test.go", "+10"}}, + {process: 26232, want: []string{}}, + } { + p1, err := FS("fixtures").NewProc(tt.process) + if err != nil { + t.Fatal(err) + } + c1, err := p1.CmdLine() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(tt.want, c1) { + t.Errorf("want cmdline %v, have %v", tt.want, c1) + } + } +} + +func TestComm(t *testing.T) { + for _, tt := range []struct { + process int + want string + }{ + {process: 26231, want: "vim"}, + {process: 26232, want: "ata_sff"}, + } { + p1, err := FS("fixtures").NewProc(tt.process) + if err != nil { + t.Fatal(err) + } + c1, err := p1.Comm() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(tt.want, c1) { + t.Errorf("want comm %v, have %v", tt.want, c1) + } + } +} + +func TestExecutable(t *testing.T) { + for _, tt := range []struct { + process int + want string + }{ + {process: 26231, want: "/usr/bin/vim"}, + {process: 26232, want: ""}, + } { + p, err := FS("fixtures").NewProc(tt.process) + if err != nil { + t.Fatal(err) + } + exe, err := p.Executable() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(tt.want, exe) { + t.Errorf("want absolute path to cmdline %v, have %v", tt.want, exe) + } + } +} + +func TestFileDescriptors(t *testing.T) { + p1, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + fds, err := p1.FileDescriptors() + if err != nil { + t.Fatal(err) + } + sort.Sort(byUintptr(fds)) + if want := []uintptr{0, 1, 2, 3, 10}; !reflect.DeepEqual(want, fds) { + t.Errorf("want fds %v, have %v", want, fds) + } +} + +func TestFileDescriptorTargets(t *testing.T) { + p1, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + fds, err := p1.FileDescriptorTargets() + if err != nil { + t.Fatal(err) + } + sort.Strings(fds) + var want = []string{ + "../../symlinktargets/abc", + "../../symlinktargets/def", + "../../symlinktargets/ghi", + "../../symlinktargets/uvw", + "../../symlinktargets/xyz", + } + if !reflect.DeepEqual(want, fds) { + t.Errorf("want fds %v, have %v", want, fds) + } +} + +func TestFileDescriptorsLen(t *testing.T) { + p1, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + l, err := p1.FileDescriptorsLen() + if err != nil { + t.Fatal(err) + } + if want, have := 5, l; want != have { + t.Errorf("want fds %d, have %d", want, have) + } +} + +type byUintptr []uintptr + +func (a byUintptr) Len() int { return len(a) } +func (a byUintptr) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byUintptr) Less(i, j int) bool { return a[i] < a[j] } diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/README.md juju-core-2.0.0/src/github.com/prometheus/procfs/README.md --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/README.md 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,10 @@ +# procfs + +This procfs package provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/stat.go juju-core-2.0.0/src/github.com/prometheus/procfs/stat.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/stat.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/stat.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,56 @@ +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime int64 +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + line := s.Text() + if !strings.HasPrefix(line, "btime") { + continue + } + fields := strings.Fields(line) + if len(fields) != 2 { + return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) + } + i, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) + } + return Stat{BootTime: i}, nil + } + if err := s.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/stat_test.go juju-core-2.0.0/src/github.com/prometheus/procfs/stat_test.go --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/stat_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/stat_test.go 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,14 @@ +package procfs + +import "testing" + +func TestStat(t *testing.T) { + s, err := FS("fixtures").NewStat() + if err != nil { + t.Fatal(err) + } + + if want, have := int64(1418183276), s.BootTime; want != have { + t.Errorf("want boot time %d, have %d", want, have) + } +} diff -Nru juju-core-2.0~beta15/src/github.com/prometheus/procfs/.travis.yml juju-core-2.0.0/src/github.com/prometheus/procfs/.travis.yml --- juju-core-2.0~beta15/src/github.com/prometheus/procfs/.travis.yml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/github.com/prometheus/procfs/.travis.yml 2016-10-13 14:32:04.000000000 +0000 @@ -0,0 +1,5 @@ +sudo: false +language: go +go: + - 1.5 + - 1.6 diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/acme/acme.go juju-core-2.0.0/src/golang.org/x/crypto/acme/acme.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/acme/acme.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/acme/acme.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,944 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package acme provides an implementation of the +// Automatic Certificate Management Environment (ACME) spec. +// See https://tools.ietf.org/html/draft-ietf-acme-acme-02 for details. +// +// Most common scenarios will want to use autocert subdirectory instead, +// which provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +// +// This package is a work in progress and makes no API stability promises. +package acme + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// LetsEncryptURL is the Directory endpoint of Let's Encrypt CA. +const LetsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory" + +const ( + maxChainLen = 5 // max depth and breadth of a certificate chain + maxCertSize = 1 << 20 // max size of a certificate, in bytes +) + +// CertOption is an optional argument type for Client methods which manipulate +// certificate data. +type CertOption interface { + privateCertOpt() +} + +// WithKey creates an option holding a private/public key pair. +// The private part signs a certificate, and the public part represents the signee. +func WithKey(key crypto.Signer) CertOption { + return &certOptKey{key} +} + +type certOptKey struct { + key crypto.Signer +} + +func (*certOptKey) privateCertOpt() {} + +// WithTemplate creates an option for specifying a certificate template. +// See x509.CreateCertificate for template usage details. +// +// In TLSSNIxChallengeCert methods, the template is also used as parent, +// resulting in a self-signed certificate. +// The DNSNames field of t is always overwritten for tls-sni challenge certs. +func WithTemplate(t *x509.Certificate) CertOption { + return (*certOptTemplate)(t) +} + +type certOptTemplate x509.Certificate + +func (*certOptTemplate) privateCertOpt() {} + +// Client is an ACME client. +// The only required field is Key. An example of creating a client with a new key +// is as follows: +// +// key, err := rsa.GenerateKey(rand.Reader, 2048) +// if err != nil { +// log.Fatal(err) +// } +// client := &Client{Key: key} +// +type Client struct { + // Key is the account key used to register with a CA and sign requests. + // Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey. + Key crypto.Signer + + // HTTPClient optionally specifies an HTTP client to use + // instead of http.DefaultClient. + HTTPClient *http.Client + + // DirectoryURL points to the CA directory endpoint. + // If empty, LetsEncryptURL is used. + // Mutating this value after a successful call of Client's Discover method + // will have no effect. + DirectoryURL string + + dirMu sync.Mutex // guards writes to dir + dir *Directory // cached result of Client's Discover method +} + +// Discover performs ACME server discovery using c.DirectoryURL. +// +// It caches successful result. So, subsequent calls will not result in +// a network round-trip. This also means mutating c.DirectoryURL after successful call +// of this method will have no effect. +func (c *Client) Discover(ctx context.Context) (Directory, error) { + c.dirMu.Lock() + defer c.dirMu.Unlock() + if c.dir != nil { + return *c.dir, nil + } + + dirURL := c.DirectoryURL + if dirURL == "" { + dirURL = LetsEncryptURL + } + res, err := ctxhttp.Get(ctx, c.HTTPClient, dirURL) + if err != nil { + return Directory{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return Directory{}, responseError(res) + } + + var v struct { + Reg string `json:"new-reg"` + Authz string `json:"new-authz"` + Cert string `json:"new-cert"` + Revoke string `json:"revoke-cert"` + Meta struct { + Terms string `json:"terms-of-service"` + Website string `json:"website"` + CAA []string `json:"caa-identities"` + } + } + if json.NewDecoder(res.Body).Decode(&v); err != nil { + return Directory{}, err + } + c.dir = &Directory{ + RegURL: v.Reg, + AuthzURL: v.Authz, + CertURL: v.Cert, + RevokeURL: v.Revoke, + Terms: v.Meta.Terms, + Website: v.Meta.Website, + CAA: v.Meta.CAA, + } + return *c.dir, nil +} + +// CreateCert requests a new certificate using the Certificate Signing Request csr encoded in DER format. +// The exp argument indicates the desired certificate validity duration. CA may issue a certificate +// with a different duration. +// If the bundle argument is true, the returned value will also contain the CA (issuer) certificate chain. +// +// In the case where CA server does not provide the issued certificate in the response, +// CreateCert will poll certURL using c.FetchCert, which will result in additional round-trips. +// In such scenario the caller can cancel the polling with ctx. +// +// CreateCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features. +func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration, bundle bool) (der [][]byte, certURL string, err error) { + if _, err := c.Discover(ctx); err != nil { + return nil, "", err + } + + req := struct { + Resource string `json:"resource"` + CSR string `json:"csr"` + NotBefore string `json:"notBefore,omitempty"` + NotAfter string `json:"notAfter,omitempty"` + }{ + Resource: "new-cert", + CSR: base64.RawURLEncoding.EncodeToString(csr), + } + now := timeNow() + req.NotBefore = now.Format(time.RFC3339) + if exp > 0 { + req.NotAfter = now.Add(exp).Format(time.RFC3339) + } + + res, err := postJWS(ctx, c.HTTPClient, c.Key, c.dir.CertURL, req) + if err != nil { + return nil, "", err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + return nil, "", responseError(res) + } + + curl := res.Header.Get("location") // cert permanent URL + if res.ContentLength == 0 { + // no cert in the body; poll until we get it + cert, err := c.FetchCert(ctx, curl, bundle) + return cert, curl, err + } + // slurp issued cert and CA chain, if requested + cert, err := responseCert(ctx, c.HTTPClient, res, bundle) + return cert, curl, err +} + +// FetchCert retrieves already issued certificate from the given url, in DER format. +// It retries the request until the certificate is successfully retrieved, +// context is cancelled by the caller or an error response is received. +// +// The returned value will also contain the CA (issuer) certificate if the bundle argument is true. +// +// FetchCert returns an error if the CA's response or chain was unreasonably large. +// Callers are encouraged to parse the returned value to ensure the certificate is valid +// and has expected features. +func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) { + for { + res, err := ctxhttp.Get(ctx, c.HTTPClient, url) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode == http.StatusOK { + return responseCert(ctx, c.HTTPClient, res, bundle) + } + if res.StatusCode > 299 { + return nil, responseError(res) + } + d := retryAfter(res.Header.Get("retry-after"), 3*time.Second) + select { + case <-time.After(d): + // retry + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +// RevokeCert revokes a previously issued certificate cert, provided in DER format. +// +// The key argument, used to sign the request, must be authorized +// to revoke the certificate. It's up to the CA to decide which keys are authorized. +// For instance, the key pair of the certificate may be authorized. +// If the key is nil, c.Key is used instead. +func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error { + if _, err := c.Discover(ctx); err != nil { + return err + } + + body := &struct { + Resource string `json:"resource"` + Cert string `json:"certificate"` + Reason int `json:"reason"` + }{ + Resource: "revoke-cert", + Cert: base64.RawURLEncoding.EncodeToString(cert), + Reason: int(reason), + } + if key == nil { + key = c.Key + } + res, err := postJWS(ctx, c.HTTPClient, key, c.dir.RevokeURL, body) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return responseError(res) + } + return nil +} + +// AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service +// during account registration. See Register method of Client for more details. +func AcceptTOS(tosURL string) bool { return true } + +// Register creates a new account registration by following the "new-reg" flow. +// It returns registered account. The a argument is not modified. +// +// The registration may require the caller to agree to the CA's Terms of Service (TOS). +// If so, and the account has not indicated the acceptance of the terms (see Account for details), +// Register calls prompt with a TOS URL provided by the CA. Prompt should report +// whether the caller agrees to the terms. To always accept the terms, the caller can use AcceptTOS. +func (c *Client) Register(ctx context.Context, a *Account, prompt func(tosURL string) bool) (*Account, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + var err error + if a, err = c.doReg(ctx, c.dir.RegURL, "new-reg", a); err != nil { + return nil, err + } + var accept bool + if a.CurrentTerms != "" && a.CurrentTerms != a.AgreedTerms { + accept = prompt(a.CurrentTerms) + } + if accept { + a.AgreedTerms = a.CurrentTerms + a, err = c.UpdateReg(ctx, a) + } + return a, err +} + +// GetReg retrieves an existing registration. +// The url argument is an Account URI. +func (c *Client) GetReg(ctx context.Context, url string) (*Account, error) { + a, err := c.doReg(ctx, url, "reg", nil) + if err != nil { + return nil, err + } + a.URI = url + return a, nil +} + +// UpdateReg updates an existing registration. +// It returns an updated account copy. The provided account is not modified. +func (c *Client) UpdateReg(ctx context.Context, a *Account) (*Account, error) { + uri := a.URI + a, err := c.doReg(ctx, uri, "reg", a) + if err != nil { + return nil, err + } + a.URI = uri + return a, nil +} + +// Authorize performs the initial step in an authorization flow. +// The caller will then need to choose from and perform a set of returned +// challenges using c.Accept in order to successfully complete authorization. +// +// If an authorization has been previously granted, the CA may return +// a valid authorization (Authorization.Status is StatusValid). If so, the caller +// need not fulfill any challenge and can proceed to requesting a certificate. +func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization, error) { + if _, err := c.Discover(ctx); err != nil { + return nil, err + } + + type authzID struct { + Type string `json:"type"` + Value string `json:"value"` + } + req := struct { + Resource string `json:"resource"` + Identifier authzID `json:"identifier"` + }{ + Resource: "new-authz", + Identifier: authzID{Type: "dns", Value: domain}, + } + res, err := postJWS(ctx, c.HTTPClient, c.Key, c.dir.AuthzURL, req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusCreated { + return nil, responseError(res) + } + + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + if v.Status != StatusPending && v.Status != StatusValid { + return nil, fmt.Errorf("acme: unexpected status: %s", v.Status) + } + return v.authorization(res.Header.Get("Location")), nil +} + +// GetAuthorization retrieves an authorization identified by the given URL. +// +// If a caller needs to poll an authorization until its status is final, +// see the WaitAuthorization method. +func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) { + res, err := ctxhttp.Get(ctx, c.HTTPClient, url) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted { + return nil, responseError(res) + } + var v wireAuthz + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.authorization(url), nil +} + +// RevokeAuthorization relinquishes an existing authorization identified +// by the given URL. +// The url argument is an Authorization.URI value. +// +// If successful, the caller will be required to obtain a new authorization +// using the Authorize method before being able to request a new certificate +// for the domain associated with the authorization. +// +// It does not revoke existing certificates. +func (c *Client) RevokeAuthorization(ctx context.Context, url string) error { + req := struct { + Resource string `json:"resource"` + Delete bool `json:"delete"` + }{ + Resource: "authz", + Delete: true, + } + res, err := postJWS(ctx, c.HTTPClient, c.Key, url, req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return responseError(res) + } + return nil +} + +// WaitAuthorization polls an authorization at the given URL +// until it is in one of the final states, StatusValid or StatusInvalid, +// or the context is done. +// +// It returns a non-nil Authorization only if its Status is StatusValid. +// In all other cases WaitAuthorization returns an error. +// If the Status is StatusInvalid, the returned error is ErrAuthorizationFailed. +func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) { + var count int + sleep := func(v string, inc int) error { + count += inc + d := backoff(count, 10*time.Second) + d = retryAfter(v, d) + wakeup := time.NewTimer(d) + defer wakeup.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-wakeup.C: + return nil + } + } + + for { + res, err := ctxhttp.Get(ctx, c.HTTPClient, url) + if err != nil { + return nil, err + } + retry := res.Header.Get("retry-after") + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted { + res.Body.Close() + if err := sleep(retry, 1); err != nil { + return nil, err + } + continue + } + var raw wireAuthz + err = json.NewDecoder(res.Body).Decode(&raw) + res.Body.Close() + if err != nil { + if err := sleep(retry, 0); err != nil { + return nil, err + } + continue + } + if raw.Status == StatusValid { + return raw.authorization(url), nil + } + if raw.Status == StatusInvalid { + return nil, ErrAuthorizationFailed + } + if err := sleep(retry, 0); err != nil { + return nil, err + } + } +} + +// GetChallenge retrieves the current status of an challenge. +// +// A client typically polls a challenge status using this method. +func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) { + res, err := ctxhttp.Get(ctx, c.HTTPClient, url) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted { + return nil, responseError(res) + } + v := wireChallenge{URI: url} + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// Accept informs the server that the client accepts one of its challenges +// previously obtained with c.Authorize. +// +// The server will then perform the validation asynchronously. +func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error) { + auth, err := keyAuth(c.Key.Public(), chal.Token) + if err != nil { + return nil, err + } + + req := struct { + Resource string `json:"resource"` + Type string `json:"type"` + Auth string `json:"keyAuthorization"` + }{ + Resource: "challenge", + Type: chal.Type, + Auth: auth, + } + res, err := postJWS(ctx, c.HTTPClient, c.Key, chal.URI, req) + if err != nil { + return nil, err + } + defer res.Body.Close() + // Note: the protocol specifies 200 as the expected response code, but + // letsencrypt seems to be returning 202. + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted { + return nil, responseError(res) + } + + var v wireChallenge + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + return v.challenge(), nil +} + +// DNS01ChallengeRecord returns a DNS record value for a dns-01 challenge response. +// A TXT record containing the returned value must be provisioned under +// "_acme-challenge" name of the domain being validated. +// +// The token argument is a Challenge.Token value. +func (c *Client) DNS01ChallengeRecord(token string) (string, error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(ka)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} + +// HTTP01ChallengeResponse returns the response for an http-01 challenge. +// Servers should respond with the value to HTTP requests at the URL path +// provided by HTTP01ChallengePath to validate the challenge and prove control +// over a domain name. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengeResponse(token string) (string, error) { + return keyAuth(c.Key.Public(), token) +} + +// HTTP01ChallengePath returns the URL path at which the response for an http-01 challenge +// should be provided by the servers. +// The response value can be obtained with HTTP01ChallengeResponse. +// +// The token argument is a Challenge.Token value. +func (c *Client) HTTP01ChallengePath(token string) string { + return "/.well-known/acme-challenge/" + token +} + +// TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. +// +// The implementation is incomplete in that the returned value is a single certificate, +// computed only for Z0 of the key authorization. ACME CAs are expected to update +// their implementations to use the newer version, TLS-SNI-02. +// For more details on TLS-SNI-01 see https://tools.ietf.org/html/draft-ietf-acme-acme-01#section-7.3. +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name of the client hello matches exactly the returned name value. +func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b := sha256.Sum256([]byte(ka)) + h := hex.EncodeToString(b[:]) + name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:]) + cert, err = tlsChallengeCert([]string{name}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, name, nil +} + +// TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response. +// Servers can present the certificate to validate the challenge and prove control +// over a domain name. For more details on TLS-SNI-02 see +// https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-7.3. +// +// The token argument is a Challenge.Token value. +// If a WithKey option is provided, its private part signs the returned cert, +// and the public part is used to specify the signee. +// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve. +// +// The returned certificate is valid for the next 24 hours and must be presented only when +// the server name in the client hello matches exactly the returned name value. +func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) { + b := sha256.Sum256([]byte(token)) + h := hex.EncodeToString(b[:]) + sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:]) + + ka, err := keyAuth(c.Key.Public(), token) + if err != nil { + return tls.Certificate{}, "", err + } + b = sha256.Sum256([]byte(ka)) + h = hex.EncodeToString(b[:]) + sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:]) + + cert, err = tlsChallengeCert([]string{sanA, sanB}, opt) + if err != nil { + return tls.Certificate{}, "", err + } + return cert, sanA, nil +} + +// doReg sends all types of registration requests. +// The type of request is identified by typ argument, which is a "resource" +// in the ACME spec terms. +// +// A non-nil acct argument indicates whether the intention is to mutate data +// of the Account. Only Contact and Agreement of its fields are used +// in such cases. +func (c *Client) doReg(ctx context.Context, url string, typ string, acct *Account) (*Account, error) { + req := struct { + Resource string `json:"resource"` + Contact []string `json:"contact,omitempty"` + Agreement string `json:"agreement,omitempty"` + }{ + Resource: typ, + } + if acct != nil { + req.Contact = acct.Contact + req.Agreement = acct.AgreedTerms + } + res, err := postJWS(ctx, c.HTTPClient, c.Key, url, req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode < 200 || res.StatusCode > 299 { + return nil, responseError(res) + } + + var v struct { + Contact []string + Agreement string + Authorizations string + Certificates string + } + if err := json.NewDecoder(res.Body).Decode(&v); err != nil { + return nil, fmt.Errorf("acme: invalid response: %v", err) + } + var tos string + if v := linkHeader(res.Header, "terms-of-service"); len(v) > 0 { + tos = v[0] + } + var authz string + if v := linkHeader(res.Header, "next"); len(v) > 0 { + authz = v[0] + } + return &Account{ + URI: res.Header.Get("Location"), + Contact: v.Contact, + AgreedTerms: v.Agreement, + CurrentTerms: tos, + Authz: authz, + Authorizations: v.Authorizations, + Certificates: v.Certificates, + }, nil +} + +func responseCert(ctx context.Context, client *http.Client, res *http.Response, bundle bool) ([][]byte, error) { + b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) + if err != nil { + return nil, fmt.Errorf("acme: response stream: %v", err) + } + if len(b) > maxCertSize { + return nil, errors.New("acme: certificate is too big") + } + cert := [][]byte{b} + if !bundle { + return cert, nil + } + + // Append CA chain cert(s). + // At least one is required according to the spec: + // https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-6.3.1 + up := linkHeader(res.Header, "up") + if len(up) == 0 { + return nil, errors.New("acme: rel=up link not found") + } + if len(up) > maxChainLen { + return nil, errors.New("acme: rel=up link is too large") + } + for _, url := range up { + cc, err := chainCert(ctx, client, url, 0) + if err != nil { + return nil, err + } + cert = append(cert, cc...) + } + return cert, nil +} + +// responseError creates an error of Error type from resp. +func responseError(resp *http.Response) error { + // don't care if ReadAll returns an error: + // json.Unmarshal will fail in that case anyway + b, _ := ioutil.ReadAll(resp.Body) + e := struct { + Status int + Type string + Detail string + }{ + Status: resp.StatusCode, + } + if err := json.Unmarshal(b, &e); err != nil { + // this is not a regular error response: + // populate detail with anything we received, + // e.Status will already contain HTTP response code value + e.Detail = string(b) + if e.Detail == "" { + e.Detail = resp.Status + } + } + return &Error{ + StatusCode: e.Status, + ProblemType: e.Type, + Detail: e.Detail, + Header: resp.Header, + } +} + +// chainCert fetches CA certificate chain recursively by following "up" links. +// Each recursive call increments the depth by 1, resulting in an error +// if the recursion level reaches maxChainLen. +// +// First chainCert call starts with depth of 0. +func chainCert(ctx context.Context, client *http.Client, url string, depth int) ([][]byte, error) { + if depth >= maxChainLen { + return nil, errors.New("acme: certificate chain is too deep") + } + + res, err := ctxhttp.Get(ctx, client, url) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, responseError(res) + } + b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1)) + if err != nil { + return nil, err + } + if len(b) > maxCertSize { + return nil, errors.New("acme: certificate is too big") + } + chain := [][]byte{b} + + uplink := linkHeader(res.Header, "up") + if len(uplink) > maxChainLen { + return nil, errors.New("acme: certificate chain is too large") + } + for _, up := range uplink { + cc, err := chainCert(ctx, client, up, depth+1) + if err != nil { + return nil, err + } + chain = append(chain, cc...) + } + + return chain, nil +} + +// postJWS signs the body with the given key and POSTs it to the provided url. +// The body argument must be JSON-serializable. +func postJWS(ctx context.Context, client *http.Client, key crypto.Signer, url string, body interface{}) (*http.Response, error) { + nonce, err := fetchNonce(ctx, client, url) + if err != nil { + return nil, err + } + b, err := jwsEncodeJSON(body, key, nonce) + if err != nil { + return nil, err + } + return ctxhttp.Post(ctx, client, url, "application/jose+json", bytes.NewReader(b)) +} + +func fetchNonce(ctx context.Context, client *http.Client, url string) (string, error) { + resp, err := ctxhttp.Head(ctx, client, url) + if err != nil { + return "", nil + } + defer resp.Body.Close() + enc := resp.Header.Get("replay-nonce") + if enc == "" { + return "", errors.New("acme: nonce not found") + } + return enc, nil +} + +// linkHeader returns URI-Reference values of all Link headers +// with relation-type rel. +// See https://tools.ietf.org/html/rfc5988#section-5 for details. +func linkHeader(h http.Header, rel string) []string { + var links []string + for _, v := range h["Link"] { + parts := strings.Split(v, ";") + for _, p := range parts { + p = strings.TrimSpace(p) + if !strings.HasPrefix(p, "rel=") { + continue + } + if v := strings.Trim(p[4:], `"`); v == rel { + links = append(links, strings.Trim(parts[0], "<>")) + } + } + } + return links +} + +// retryAfter parses a Retry-After HTTP header value, +// trying to convert v into an int (seconds) or use http.ParseTime otherwise. +// It returns d if v cannot be parsed. +func retryAfter(v string, d time.Duration) time.Duration { + if i, err := strconv.Atoi(v); err == nil { + return time.Duration(i) * time.Second + } + t, err := http.ParseTime(v) + if err != nil { + return d + } + return t.Sub(timeNow()) +} + +// backoff computes a duration after which an n+1 retry iteration should occur +// using truncated exponential backoff algorithm. +// +// The n argument is always bounded between 0 and 30. +// The max argument defines upper bound for the returned value. +func backoff(n int, max time.Duration) time.Duration { + if n < 0 { + n = 0 + } + if n > 30 { + n = 30 + } + var d time.Duration + if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil { + d = time.Duration(x.Int64()) * time.Millisecond + } + d += time.Duration(1< max { + return max + } + return d +} + +// keyAuth generates a key authorization string for a given token. +func keyAuth(pub crypto.PublicKey, token string) (string, error) { + th, err := JWKThumbprint(pub) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", token, th), nil +} + +// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges +// with the given SANs and auto-generated public/private key pair. +// To create a cert with a custom key pair, specify WithKey option. +func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) { + var ( + key crypto.Signer + tmpl *x509.Certificate + ) + for _, o := range opt { + switch o := o.(type) { + case *certOptKey: + if key != nil { + return tls.Certificate{}, errors.New("acme: duplicate key option") + } + key = o.key + case *certOptTemplate: + var t = *(*x509.Certificate)(o) // shallow copy is ok + tmpl = &t + default: + // package's fault, if we let this happen: + panic(fmt.Sprintf("unsupported option type %T", o)) + } + } + if key == nil { + var err error + if key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { + return tls.Certificate{}, err + } + } + if tmpl == nil { + tmpl = &x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment, + } + } + tmpl.DNSNames = san + + der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + if err != nil { + return tls.Certificate{}, err + } + return tls.Certificate{ + Certificate: [][]byte{der}, + PrivateKey: key, + }, nil +} + +// encodePEM returns b encoded as PEM with block of type typ. +func encodePEM(typ string, b []byte) []byte { + pb := &pem.Block{Type: typ, Bytes: b} + return pem.EncodeToMemory(pb) +} + +// timeNow is useful for testing for fixed current time. +var timeNow = time.Now diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/acme/acme_test.go juju-core-2.0.0/src/golang.org/x/crypto/acme/acme_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/acme/acme_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/acme/acme_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,1185 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "math/big" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +// Decodes a JWS-encoded request and unmarshals the decoded JSON into a provided +// interface. +func decodeJWSRequest(t *testing.T, v interface{}, r *http.Request) { + // Decode request + var req struct{ Payload string } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + t.Fatal(err) + } + payload, err := base64.RawURLEncoding.DecodeString(req.Payload) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal(payload, v) + if err != nil { + t.Fatal(err) + } +} + +func TestDiscover(t *testing.T) { + const ( + reg = "https://example.com/acme/new-reg" + authz = "https://example.com/acme/new-authz" + cert = "https://example.com/acme/new-cert" + revoke = "https://example.com/acme/revoke-cert" + ) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("content-type", "application/json") + fmt.Fprintf(w, `{ + "new-reg": %q, + "new-authz": %q, + "new-cert": %q, + "revoke-cert": %q + }`, reg, authz, cert, revoke) + })) + defer ts.Close() + c := Client{DirectoryURL: ts.URL} + dir, err := c.Discover(context.Background()) + if err != nil { + t.Fatal(err) + } + if dir.RegURL != reg { + t.Errorf("dir.RegURL = %q; want %q", dir.RegURL, reg) + } + if dir.AuthzURL != authz { + t.Errorf("dir.AuthzURL = %q; want %q", dir.AuthzURL, authz) + } + if dir.CertURL != cert { + t.Errorf("dir.CertURL = %q; want %q", dir.CertURL, cert) + } + if dir.RevokeURL != revoke { + t.Errorf("dir.RevokeURL = %q; want %q", dir.RevokeURL, revoke) + } +} + +func TestRegister(t *testing.T) { + contacts := []string{"mailto:admin@example.com"} + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("replay-nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string + Contact []string + Agreement string + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "new-reg" { + t.Errorf("j.Resource = %q; want new-reg", j.Resource) + } + if !reflect.DeepEqual(j.Contact, contacts) { + t.Errorf("j.Contact = %v; want %v", j.Contact, contacts) + } + + w.Header().Set("Location", "https://ca.tld/acme/reg/1") + w.Header().Set("Link", `;rel="next"`) + w.Header().Add("Link", `;rel="recover"`) + w.Header().Add("Link", `;rel="terms-of-service"`) + w.WriteHeader(http.StatusCreated) + b, _ := json.Marshal(contacts) + fmt.Fprintf(w, `{"contact": %s}`, b) + })) + defer ts.Close() + + prompt := func(url string) bool { + const terms = "https://ca.tld/acme/terms" + if url != terms { + t.Errorf("prompt url = %q; want %q", url, terms) + } + return false + } + + c := Client{Key: testKeyEC, dir: &Directory{RegURL: ts.URL}} + a := &Account{Contact: contacts} + var err error + if a, err = c.Register(context.Background(), a, prompt); err != nil { + t.Fatal(err) + } + if a.URI != "https://ca.tld/acme/reg/1" { + t.Errorf("a.URI = %q; want https://ca.tld/acme/reg/1", a.URI) + } + if a.Authz != "https://ca.tld/acme/new-authz" { + t.Errorf("a.Authz = %q; want https://ca.tld/acme/new-authz", a.Authz) + } + if a.CurrentTerms != "https://ca.tld/acme/terms" { + t.Errorf("a.CurrentTerms = %q; want https://ca.tld/acme/terms", a.CurrentTerms) + } + if !reflect.DeepEqual(a.Contact, contacts) { + t.Errorf("a.Contact = %v; want %v", a.Contact, contacts) + } +} + +func TestUpdateReg(t *testing.T) { + const terms = "https://ca.tld/acme/terms" + contacts := []string{"mailto:admin@example.com"} + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("replay-nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string + Contact []string + Agreement string + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "reg" { + t.Errorf("j.Resource = %q; want reg", j.Resource) + } + if j.Agreement != terms { + t.Errorf("j.Agreement = %q; want %q", j.Agreement, terms) + } + if !reflect.DeepEqual(j.Contact, contacts) { + t.Errorf("j.Contact = %v; want %v", j.Contact, contacts) + } + + w.Header().Set("Link", `;rel="next"`) + w.Header().Add("Link", `;rel="recover"`) + w.Header().Add("Link", fmt.Sprintf(`<%s>;rel="terms-of-service"`, terms)) + w.WriteHeader(http.StatusOK) + b, _ := json.Marshal(contacts) + fmt.Fprintf(w, `{"contact":%s, "agreement":%q}`, b, terms) + })) + defer ts.Close() + + c := Client{Key: testKeyEC} + a := &Account{URI: ts.URL, Contact: contacts, AgreedTerms: terms} + var err error + if a, err = c.UpdateReg(context.Background(), a); err != nil { + t.Fatal(err) + } + if a.Authz != "https://ca.tld/acme/new-authz" { + t.Errorf("a.Authz = %q; want https://ca.tld/acme/new-authz", a.Authz) + } + if a.AgreedTerms != terms { + t.Errorf("a.AgreedTerms = %q; want %q", a.AgreedTerms, terms) + } + if a.CurrentTerms != terms { + t.Errorf("a.CurrentTerms = %q; want %q", a.CurrentTerms, terms) + } + if a.URI != ts.URL { + t.Errorf("a.URI = %q; want %q", a.URI, ts.URL) + } +} + +func TestGetReg(t *testing.T) { + const terms = "https://ca.tld/acme/terms" + const newTerms = "https://ca.tld/acme/new-terms" + contacts := []string{"mailto:admin@example.com"} + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("replay-nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string + Contact []string + Agreement string + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "reg" { + t.Errorf("j.Resource = %q; want reg", j.Resource) + } + if len(j.Contact) != 0 { + t.Errorf("j.Contact = %v", j.Contact) + } + if j.Agreement != "" { + t.Errorf("j.Agreement = %q", j.Agreement) + } + + w.Header().Set("Link", `;rel="next"`) + w.Header().Add("Link", `;rel="recover"`) + w.Header().Add("Link", fmt.Sprintf(`<%s>;rel="terms-of-service"`, newTerms)) + w.WriteHeader(http.StatusOK) + b, _ := json.Marshal(contacts) + fmt.Fprintf(w, `{"contact":%s, "agreement":%q}`, b, terms) + })) + defer ts.Close() + + c := Client{Key: testKeyEC} + a, err := c.GetReg(context.Background(), ts.URL) + if err != nil { + t.Fatal(err) + } + if a.Authz != "https://ca.tld/acme/new-authz" { + t.Errorf("a.AuthzURL = %q; want https://ca.tld/acme/new-authz", a.Authz) + } + if a.AgreedTerms != terms { + t.Errorf("a.AgreedTerms = %q; want %q", a.AgreedTerms, terms) + } + if a.CurrentTerms != newTerms { + t.Errorf("a.CurrentTerms = %q; want %q", a.CurrentTerms, newTerms) + } + if a.URI != ts.URL { + t.Errorf("a.URI = %q; want %q", a.URI, ts.URL) + } +} + +func TestAuthorize(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("replay-nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string + Identifier struct { + Type string + Value string + } + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "new-authz" { + t.Errorf("j.Resource = %q; want new-authz", j.Resource) + } + if j.Identifier.Type != "dns" { + t.Errorf("j.Identifier.Type = %q; want dns", j.Identifier.Type) + } + if j.Identifier.Value != "example.com" { + t.Errorf("j.Identifier.Value = %q; want example.com", j.Identifier.Value) + } + + w.Header().Set("Location", "https://ca.tld/acme/auth/1") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ + "identifier": {"type":"dns","value":"example.com"}, + "status":"pending", + "challenges":[ + { + "type":"http-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id1", + "token":"token1" + }, + { + "type":"tls-sni-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id2", + "token":"token2" + } + ], + "combinations":[[0],[1]]}`) + })) + defer ts.Close() + + cl := Client{Key: testKeyEC, dir: &Directory{AuthzURL: ts.URL}} + auth, err := cl.Authorize(context.Background(), "example.com") + if err != nil { + t.Fatal(err) + } + + if auth.URI != "https://ca.tld/acme/auth/1" { + t.Errorf("URI = %q; want https://ca.tld/acme/auth/1", auth.URI) + } + if auth.Status != "pending" { + t.Errorf("Status = %q; want pending", auth.Status) + } + if auth.Identifier.Type != "dns" { + t.Errorf("Identifier.Type = %q; want dns", auth.Identifier.Type) + } + if auth.Identifier.Value != "example.com" { + t.Errorf("Identifier.Value = %q; want example.com", auth.Identifier.Value) + } + + if n := len(auth.Challenges); n != 2 { + t.Fatalf("len(auth.Challenges) = %d; want 2", n) + } + + c := auth.Challenges[0] + if c.Type != "http-01" { + t.Errorf("c.Type = %q; want http-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id1" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id1", c.URI) + } + if c.Token != "token1" { + t.Errorf("c.Token = %q; want token1", c.Type) + } + + c = auth.Challenges[1] + if c.Type != "tls-sni-01" { + t.Errorf("c.Type = %q; want tls-sni-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id2" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id2", c.URI) + } + if c.Token != "token2" { + t.Errorf("c.Token = %q; want token2", c.Type) + } + + combs := [][]int{{0}, {1}} + if !reflect.DeepEqual(auth.Combinations, combs) { + t.Errorf("auth.Combinations: %+v\nwant: %+v\n", auth.Combinations, combs) + } +} + +func TestAuthorizeValid(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("replay-nonce", "nonce") + return + } + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"status":"valid"}`)) + })) + defer ts.Close() + client := Client{Key: testKey, dir: &Directory{AuthzURL: ts.URL}} + _, err := client.Authorize(context.Background(), "example.com") + if err != nil { + t.Errorf("err = %v", err) + } +} + +func TestGetAuthorization(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Errorf("r.Method = %q; want GET", r.Method) + } + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "identifier": {"type":"dns","value":"example.com"}, + "status":"pending", + "challenges":[ + { + "type":"http-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id1", + "token":"token1" + }, + { + "type":"tls-sni-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id2", + "token":"token2" + } + ], + "combinations":[[0],[1]]}`) + })) + defer ts.Close() + + cl := Client{Key: testKeyEC} + auth, err := cl.GetAuthorization(context.Background(), ts.URL) + if err != nil { + t.Fatal(err) + } + + if auth.Status != "pending" { + t.Errorf("Status = %q; want pending", auth.Status) + } + if auth.Identifier.Type != "dns" { + t.Errorf("Identifier.Type = %q; want dns", auth.Identifier.Type) + } + if auth.Identifier.Value != "example.com" { + t.Errorf("Identifier.Value = %q; want example.com", auth.Identifier.Value) + } + + if n := len(auth.Challenges); n != 2 { + t.Fatalf("len(set.Challenges) = %d; want 2", n) + } + + c := auth.Challenges[0] + if c.Type != "http-01" { + t.Errorf("c.Type = %q; want http-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id1" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id1", c.URI) + } + if c.Token != "token1" { + t.Errorf("c.Token = %q; want token1", c.Type) + } + + c = auth.Challenges[1] + if c.Type != "tls-sni-01" { + t.Errorf("c.Type = %q; want tls-sni-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id2" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id2", c.URI) + } + if c.Token != "token2" { + t.Errorf("c.Token = %q; want token2", c.Type) + } + + combs := [][]int{{0}, {1}} + if !reflect.DeepEqual(auth.Combinations, combs) { + t.Errorf("auth.Combinations: %+v\nwant: %+v\n", auth.Combinations, combs) + } +} + +func TestWaitAuthorization(t *testing.T) { + var count int + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + w.Header().Set("retry-after", "0") + if count > 1 { + fmt.Fprintf(w, `{"status":"valid"}`) + return + } + fmt.Fprintf(w, `{"status":"pending"}`) + })) + defer ts.Close() + + type res struct { + authz *Authorization + err error + } + done := make(chan res) + defer close(done) + go func() { + var client Client + a, err := client.WaitAuthorization(context.Background(), ts.URL) + done <- res{a, err} + }() + + select { + case <-time.After(5 * time.Second): + t.Fatal("WaitAuthz took too long to return") + case res := <-done: + if res.err != nil { + t.Fatalf("res.err = %v", res.err) + } + if res.authz == nil { + t.Fatal("res.authz is nil") + } + } +} + +func TestWaitAuthorizationInvalid(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, `{"status":"invalid"}`) + })) + defer ts.Close() + + res := make(chan error) + defer close(res) + go func() { + var client Client + _, err := client.WaitAuthorization(context.Background(), ts.URL) + res <- err + }() + + select { + case <-time.After(3 * time.Second): + t.Fatal("WaitAuthz took too long to return") + case err := <-res: + if err == nil { + t.Error("err is nil") + } + } +} + +func TestWaitAuthorizationCancel(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("retry-after", "60") + fmt.Fprintf(w, `{"status":"pending"}`) + })) + defer ts.Close() + + res := make(chan error) + defer close(res) + go func() { + var client Client + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + _, err := client.WaitAuthorization(ctx, ts.URL) + res <- err + }() + + select { + case <-time.After(time.Second): + t.Fatal("WaitAuthz took too long to return") + case err := <-res: + if err == nil { + t.Error("err is nil") + } + } +} + +func TestRevokeAuthorization(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("replay-nonce", "nonce") + return + } + switch r.URL.Path { + case "/1": + var req struct { + Resource string + Delete bool + } + decodeJWSRequest(t, &req, r) + if req.Resource != "authz" { + t.Errorf("req.Resource = %q; want authz", req.Resource) + } + if !req.Delete { + t.Errorf("req.Delete is false") + } + case "/2": + w.WriteHeader(http.StatusInternalServerError) + } + })) + defer ts.Close() + client := &Client{Key: testKey} + ctx := context.Background() + if err := client.RevokeAuthorization(ctx, ts.URL+"/1"); err != nil { + t.Errorf("err = %v", err) + } + if client.RevokeAuthorization(ctx, ts.URL+"/2") == nil { + t.Error("nil error") + } +} + +func TestPollChallenge(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Errorf("r.Method = %q; want GET", r.Method) + } + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "type":"http-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id1", + "token":"token1"}`) + })) + defer ts.Close() + + cl := Client{Key: testKeyEC} + chall, err := cl.GetChallenge(context.Background(), ts.URL) + if err != nil { + t.Fatal(err) + } + + if chall.Status != "pending" { + t.Errorf("Status = %q; want pending", chall.Status) + } + if chall.Type != "http-01" { + t.Errorf("c.Type = %q; want http-01", chall.Type) + } + if chall.URI != "https://ca.tld/acme/challenge/publickey/id1" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id1", chall.URI) + } + if chall.Token != "token1" { + t.Errorf("c.Token = %q; want token1", chall.Type) + } +} + +func TestAcceptChallenge(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("replay-nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string + Type string + Auth string `json:"keyAuthorization"` + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "challenge" { + t.Errorf(`resource = %q; want "challenge"`, j.Resource) + } + if j.Type != "http-01" { + t.Errorf(`type = %q; want "http-01"`, j.Type) + } + keyAuth := "token1." + testKeyECThumbprint + if j.Auth != keyAuth { + t.Errorf(`keyAuthorization = %q; want %q`, j.Auth, keyAuth) + } + + // Respond to request + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, `{ + "type":"http-01", + "status":"pending", + "uri":"https://ca.tld/acme/challenge/publickey/id1", + "token":"token1", + "keyAuthorization":%q + }`, keyAuth) + })) + defer ts.Close() + + cl := Client{Key: testKeyEC} + c, err := cl.Accept(context.Background(), &Challenge{ + URI: ts.URL, + Token: "token1", + Type: "http-01", + }) + if err != nil { + t.Fatal(err) + } + + if c.Type != "http-01" { + t.Errorf("c.Type = %q; want http-01", c.Type) + } + if c.URI != "https://ca.tld/acme/challenge/publickey/id1" { + t.Errorf("c.URI = %q; want https://ca.tld/acme/challenge/publickey/id1", c.URI) + } + if c.Token != "token1" { + t.Errorf("c.Token = %q; want token1", c.Type) + } +} + +func TestNewCert(t *testing.T) { + notBefore := time.Now() + notAfter := notBefore.AddDate(0, 2, 0) + timeNow = func() time.Time { return notBefore } + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("replay-nonce", "test-nonce") + return + } + if r.Method != "POST" { + t.Errorf("r.Method = %q; want POST", r.Method) + } + + var j struct { + Resource string `json:"resource"` + CSR string `json:"csr"` + NotBefore string `json:"notBefore,omitempty"` + NotAfter string `json:"notAfter,omitempty"` + } + decodeJWSRequest(t, &j, r) + + // Test request + if j.Resource != "new-cert" { + t.Errorf(`resource = %q; want "new-cert"`, j.Resource) + } + if j.NotBefore != notBefore.Format(time.RFC3339) { + t.Errorf(`notBefore = %q; wanted %q`, j.NotBefore, notBefore.Format(time.RFC3339)) + } + if j.NotAfter != notAfter.Format(time.RFC3339) { + t.Errorf(`notAfter = %q; wanted %q`, j.NotAfter, notAfter.Format(time.RFC3339)) + } + + // Respond to request + template := x509.Certificate{ + SerialNumber: big.NewInt(int64(1)), + Subject: pkix.Name{ + Organization: []string{"goacme"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + sampleCert, err := x509.CreateCertificate(rand.Reader, &template, &template, &testKeyEC.PublicKey, testKeyEC) + if err != nil { + t.Fatalf("Error creating certificate: %v", err) + } + + w.Header().Set("Location", "https://ca.tld/acme/cert/1") + w.WriteHeader(http.StatusCreated) + w.Write(sampleCert) + })) + defer ts.Close() + + csr := x509.CertificateRequest{ + Version: 0, + Subject: pkix.Name{ + CommonName: "example.com", + Organization: []string{"goacme"}, + }, + } + csrb, err := x509.CreateCertificateRequest(rand.Reader, &csr, testKeyEC) + if err != nil { + t.Fatal(err) + } + + c := Client{Key: testKeyEC, dir: &Directory{CertURL: ts.URL}} + cert, certURL, err := c.CreateCert(context.Background(), csrb, notAfter.Sub(notBefore), false) + if err != nil { + t.Fatal(err) + } + if cert == nil { + t.Errorf("cert is nil") + } + if certURL != "https://ca.tld/acme/cert/1" { + t.Errorf("certURL = %q; want https://ca.tld/acme/cert/1", certURL) + } +} + +func TestFetchCert(t *testing.T) { + var count byte + var ts *httptest.Server + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + if count < 3 { + up := fmt.Sprintf("<%s>;rel=up", ts.URL) + w.Header().Set("link", up) + } + w.Write([]byte{count}) + })) + defer ts.Close() + res, err := (&Client{}).FetchCert(context.Background(), ts.URL, true) + if err != nil { + t.Fatalf("FetchCert: %v", err) + } + cert := [][]byte{{1}, {2}, {3}} + if !reflect.DeepEqual(res, cert) { + t.Errorf("res = %v; want %v", res, cert) + } +} + +func TestFetchCertRetry(t *testing.T) { + var count int + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if count < 1 { + w.Header().Set("retry-after", "0") + w.WriteHeader(http.StatusAccepted) + count++ + return + } + w.Write([]byte{1}) + })) + defer ts.Close() + res, err := (&Client{}).FetchCert(context.Background(), ts.URL, false) + if err != nil { + t.Fatalf("FetchCert: %v", err) + } + cert := [][]byte{{1}} + if !reflect.DeepEqual(res, cert) { + t.Errorf("res = %v; want %v", res, cert) + } +} + +func TestFetchCertCancel(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("retry-after", "0") + w.WriteHeader(http.StatusAccepted) + })) + defer ts.Close() + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + var err error + go func() { + _, err = (&Client{}).FetchCert(ctx, ts.URL, false) + close(done) + }() + cancel() + <-done + if err != context.Canceled { + t.Errorf("err = %v; want %v", err, context.Canceled) + } +} + +func TestFetchCertDepth(t *testing.T) { + var count byte + var ts *httptest.Server + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + count++ + if count > maxChainLen+1 { + t.Errorf("count = %d; want at most %d", count, maxChainLen+1) + w.WriteHeader(http.StatusInternalServerError) + } + w.Header().Set("link", fmt.Sprintf("<%s>;rel=up", ts.URL)) + w.Write([]byte{count}) + })) + defer ts.Close() + _, err := (&Client{}).FetchCert(context.Background(), ts.URL, true) + if err == nil { + t.Errorf("err is nil") + } +} + +func TestFetchCertBreadth(t *testing.T) { + var ts *httptest.Server + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < maxChainLen+1; i++ { + w.Header().Add("link", fmt.Sprintf("<%s>;rel=up", ts.URL)) + } + w.Write([]byte{1}) + })) + defer ts.Close() + _, err := (&Client{}).FetchCert(context.Background(), ts.URL, true) + if err == nil { + t.Errorf("err is nil") + } +} + +func TestFetchCertSize(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b := bytes.Repeat([]byte{1}, maxCertSize+1) + w.Write(b) + })) + defer ts.Close() + _, err := (&Client{}).FetchCert(context.Background(), ts.URL, false) + if err == nil { + t.Errorf("err is nil") + } +} + +func TestRevokeCert(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "HEAD" { + w.Header().Set("replay-nonce", "nonce") + return + } + + var req struct { + Resource string + Certificate string + Reason int + } + decodeJWSRequest(t, &req, r) + if req.Resource != "revoke-cert" { + t.Errorf("req.Resource = %q; want revoke-cert", req.Resource) + } + if req.Reason != 1 { + t.Errorf("req.Reason = %d; want 1", req.Reason) + } + // echo -n cert | base64 | tr -d '=' | tr '/+' '_-' + cert := "Y2VydA" + if req.Certificate != cert { + t.Errorf("req.Certificate = %q; want %q", req.Certificate, cert) + } + })) + defer ts.Close() + client := &Client{ + Key: testKeyEC, + dir: &Directory{RevokeURL: ts.URL}, + } + ctx := context.Background() + if err := client.RevokeCert(ctx, nil, []byte("cert"), CRLReasonKeyCompromise); err != nil { + t.Fatal(err) + } +} + +func TestFetchNonce(t *testing.T) { + tests := []struct { + code int + nonce string + }{ + {http.StatusOK, "nonce1"}, + {http.StatusBadRequest, "nonce2"}, + {http.StatusOK, ""}, + } + var i int + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "HEAD" { + t.Errorf("%d: r.Method = %q; want HEAD", i, r.Method) + } + w.Header().Set("replay-nonce", tests[i].nonce) + w.WriteHeader(tests[i].code) + })) + defer ts.Close() + for ; i < len(tests); i++ { + test := tests[i] + n, err := fetchNonce(context.Background(), http.DefaultClient, ts.URL) + if n != test.nonce { + t.Errorf("%d: n=%q; want %q", i, n, test.nonce) + } + switch { + case err == nil && test.nonce == "": + t.Errorf("%d: n=%q, err=%v; want non-nil error", i, n, err) + case err != nil && test.nonce != "": + t.Errorf("%d: n=%q, err=%v; want %q", i, n, err, test.nonce) + } + } +} + +func TestLinkHeader(t *testing.T) { + h := http.Header{"Link": { + `;rel="next"`, + `; rel=recover`, + `; foo=bar; rel="terms-of-service"`, + `;rel="next"`, + }} + tests := []struct { + rel string + out []string + }{ + {"next", []string{"https://example.com/acme/new-authz", "dup"}}, + {"recover", []string{"https://example.com/acme/recover-reg"}}, + {"terms-of-service", []string{"https://example.com/acme/terms"}}, + {"empty", nil}, + } + for i, test := range tests { + if v := linkHeader(h, test.rel); !reflect.DeepEqual(v, test.out) { + t.Errorf("%d: linkHeader(%q): %v; want %v", i, test.rel, v, test.out) + } + } +} + +func TestErrorResponse(t *testing.T) { + s := `{ + "status": 400, + "type": "urn:acme:error:xxx", + "detail": "text" + }` + res := &http.Response{ + StatusCode: 400, + Status: "400 Bad Request", + Body: ioutil.NopCloser(strings.NewReader(s)), + Header: http.Header{"X-Foo": {"bar"}}, + } + err := responseError(res) + v, ok := err.(*Error) + if !ok { + t.Fatalf("err = %+v (%T); want *Error type", err, err) + } + if v.StatusCode != 400 { + t.Errorf("v.StatusCode = %v; want 400", v.StatusCode) + } + if v.ProblemType != "urn:acme:error:xxx" { + t.Errorf("v.ProblemType = %q; want urn:acme:error:xxx", v.ProblemType) + } + if v.Detail != "text" { + t.Errorf("v.Detail = %q; want text", v.Detail) + } + if !reflect.DeepEqual(v.Header, res.Header) { + t.Errorf("v.Header = %+v; want %+v", v.Header, res.Header) + } +} + +func TestTLSSNI01ChallengeCert(t *testing.T) { + const ( + token = "evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA" + // echo -n | shasum -a 256 + san = "dbbd5eefe7b4d06eb9d1d9f5acb4c7cd.a27d320e4b30332f0b6cb441734ad7b0.acme.invalid" + ) + + client := &Client{Key: testKeyEC} + tlscert, name, err := client.TLSSNI01ChallengeCert(token) + if err != nil { + t.Fatal(err) + } + + if n := len(tlscert.Certificate); n != 1 { + t.Fatalf("len(tlscert.Certificate) = %d; want 1", n) + } + cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + if len(cert.DNSNames) != 1 || cert.DNSNames[0] != san { + t.Fatalf("cert.DNSNames = %v; want %q", cert.DNSNames, san) + } + if cert.DNSNames[0] != name { + t.Errorf("cert.DNSNames[0] != name: %q vs %q", cert.DNSNames[0], name) + } +} + +func TestTLSSNI02ChallengeCert(t *testing.T) { + const ( + token = "evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA" + // echo -n evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA | shasum -a 256 + sanA = "7ea0aaa69214e71e02cebb18bb867736.09b730209baabf60e43d4999979ff139.token.acme.invalid" + // echo -n | shasum -a 256 + sanB = "dbbd5eefe7b4d06eb9d1d9f5acb4c7cd.a27d320e4b30332f0b6cb441734ad7b0.ka.acme.invalid" + ) + + client := &Client{Key: testKeyEC} + tlscert, name, err := client.TLSSNI02ChallengeCert(token) + if err != nil { + t.Fatal(err) + } + + if n := len(tlscert.Certificate); n != 1 { + t.Fatalf("len(tlscert.Certificate) = %d; want 1", n) + } + cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Fatal(err) + } + names := []string{sanA, sanB} + if !reflect.DeepEqual(cert.DNSNames, names) { + t.Fatalf("cert.DNSNames = %v;\nwant %v", cert.DNSNames, names) + } + sort.Strings(cert.DNSNames) + i := sort.SearchStrings(cert.DNSNames, name) + if i >= len(cert.DNSNames) || cert.DNSNames[i] != name { + t.Errorf("%v doesn't have %q", cert.DNSNames, name) + } +} + +func TestTLSChallengeCertOpt(t *testing.T) { + key, err := rsa.GenerateKey(rand.Reader, 512) + if err != nil { + t.Fatal(err) + } + tmpl := &x509.Certificate{ + SerialNumber: big.NewInt(2), + Subject: pkix.Name{Organization: []string{"Test"}}, + DNSNames: []string{"should-be-overwritten"}, + } + opts := []CertOption{WithKey(key), WithTemplate(tmpl)} + + client := &Client{Key: testKeyEC} + cert1, _, err := client.TLSSNI01ChallengeCert("token", opts...) + if err != nil { + t.Fatal(err) + } + cert2, _, err := client.TLSSNI02ChallengeCert("token", opts...) + if err != nil { + t.Fatal(err) + } + + for i, tlscert := range []tls.Certificate{cert1, cert2} { + // verify generated cert private key + tlskey, ok := tlscert.PrivateKey.(*rsa.PrivateKey) + if !ok { + t.Errorf("%d: tlscert.PrivateKey is %T; want *rsa.PrivateKey", i, tlscert.PrivateKey) + continue + } + if tlskey.D.Cmp(key.D) != 0 { + t.Errorf("%d: tlskey.D = %v; want %v", i, tlskey.D, key.D) + } + // verify generated cert public key + x509Cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Errorf("%d: %v", i, err) + continue + } + tlspub, ok := x509Cert.PublicKey.(*rsa.PublicKey) + if !ok { + t.Errorf("%d: x509Cert.PublicKey is %T; want *rsa.PublicKey", i, x509Cert.PublicKey) + continue + } + if tlspub.N.Cmp(key.N) != 0 { + t.Errorf("%d: tlspub.N = %v; want %v", i, tlspub.N, key.N) + } + // verify template option + sn := big.NewInt(2) + if x509Cert.SerialNumber.Cmp(sn) != 0 { + t.Errorf("%d: SerialNumber = %v; want %v", i, x509Cert.SerialNumber, sn) + } + org := []string{"Test"} + if !reflect.DeepEqual(x509Cert.Subject.Organization, org) { + t.Errorf("%d: Subject.Organization = %+v; want %+v", i, x509Cert.Subject.Organization, org) + } + for _, v := range x509Cert.DNSNames { + if !strings.HasSuffix(v, ".acme.invalid") { + t.Errorf("%d: invalid DNSNames element: %q", i, v) + } + } + } +} + +func TestHTTP01Challenge(t *testing.T) { + const ( + token = "xxx" + // thumbprint is precomputed for testKeyEC in jws_test.go + value = token + "." + testKeyECThumbprint + urlpath = "/.well-known/acme-challenge/" + token + ) + client := &Client{Key: testKeyEC} + val, err := client.HTTP01ChallengeResponse(token) + if err != nil { + t.Fatal(err) + } + if val != value { + t.Errorf("val = %q; want %q", val, value) + } + if path := client.HTTP01ChallengePath(token); path != urlpath { + t.Errorf("path = %q; want %q", path, urlpath) + } +} + +func TestDNS01ChallengeRecord(t *testing.T) { + // echo -n xxx. | \ + // openssl dgst -binary -sha256 | \ + // base64 | tr -d '=' | tr '/+' '_-' + const value = "8DERMexQ5VcdJ_prpPiA0mVdp7imgbCgjsG4SqqNMIo" + + client := &Client{Key: testKeyEC} + val, err := client.DNS01ChallengeRecord("xxx") + if err != nil { + t.Fatal(err) + } + if val != value { + t.Errorf("val = %q; want %q", val, value) + } +} + +func TestBackoff(t *testing.T) { + tt := []struct{ min, max time.Duration }{ + {time.Second, 2 * time.Second}, + {2 * time.Second, 3 * time.Second}, + {4 * time.Second, 5 * time.Second}, + {8 * time.Second, 9 * time.Second}, + } + for i, test := range tt { + d := backoff(i, time.Minute) + if d < test.min || test.max < d { + t.Errorf("%d: d = %v; want between %v and %v", i, d, test.min, test.max) + } + } + + min, max := time.Second, 2*time.Second + if d := backoff(-1, time.Minute); d < min || max < d { + t.Errorf("d = %v; want between %v and %v", d, min, max) + } + + bound := 10 * time.Second + if d := backoff(100, bound); d != bound { + t.Errorf("d = %v; want %v", d, bound) + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/autocert.go juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/autocert.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/autocert.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/autocert.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,776 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package autocert provides automatic access to certificates from Let's Encrypt +// and any other ACME-based CA. +// +// This package is a work in progress and makes no API stability promises. +package autocert + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + mathrand "math/rand" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/crypto/acme" + "golang.org/x/net/context" +) + +// pseudoRand is safe for concurrent use. +var pseudoRand *lockedMathRand + +func init() { + src := mathrand.NewSource(timeNow().UnixNano()) + pseudoRand = &lockedMathRand{rnd: mathrand.New(src)} +} + +// AcceptTOS always returns true to indicate the acceptance of a CA Terms of Service +// during account registration. +func AcceptTOS(tosURL string) bool { return true } + +// HostPolicy specifies which host names the Manager is allowed to respond to. +// It returns a non-nil error if the host should be rejected. +// The returned error is accessible via tls.Conn.Handshake and its callers. +// See Manager's HostPolicy field and GetCertificate method docs for more details. +type HostPolicy func(ctx context.Context, host string) error + +// HostWhitelist returns a policy where only the specified host names are allowed. +// Only exact matches are currently supported. Subdomains, regexp or wildcard +// will not match. +func HostWhitelist(hosts ...string) HostPolicy { + whitelist := make(map[string]bool, len(hosts)) + for _, h := range hosts { + whitelist[h] = true + } + return func(_ context.Context, host string) error { + if !whitelist[host] { + return errors.New("acme/autocert: host not configured") + } + return nil + } +} + +// defaultHostPolicy is used when Manager.HostPolicy is not set. +func defaultHostPolicy(context.Context, string) error { + return nil +} + +// Manager is a stateful certificate manager built on top of acme.Client. +// It obtains and refreshes certificates automatically, +// as well as providing them to a TLS server via tls.Config. +// +// A simple usage example: +// +// m := autocert.Manager{ +// Prompt: autocert.AcceptTOS, +// HostPolicy: autocert.HostWhitelist("example.org"), +// } +// s := &http.Server{ +// Addr: ":https", +// TLSConfig: &tls.Config{GetCertificate: m.GetCertificate}, +// } +// s.ListenAndServeTLS("", "") +// +// To preserve issued certificates and improve overall performance, +// use a cache implementation of Cache. For instance, DirCache. +type Manager struct { + // Prompt specifies a callback function to conditionally accept a CA's Terms of Service (TOS). + // The registration may require the caller to agree to the CA's TOS. + // If so, Manager calls Prompt with a TOS URL provided by the CA. Prompt should report + // whether the caller agrees to the terms. + // + // To always accept the terms, the callers can use AcceptTOS. + Prompt func(tosURL string) bool + + // Cache optionally stores and retrieves previously-obtained certificates. + // If nil, certs will only be cached for the lifetime of the Manager. + // + // Manager passes the Cache certificates data encoded in PEM, with private/public + // parts combined in a single Cache.Put call, private key first. + Cache Cache + + // HostPolicy controls which domains the Manager will attempt + // to retrieve new certificates for. It does not affect cached certs. + // + // If non-nil, HostPolicy is called before requesting a new cert. + // If nil, all hosts are currently allowed. This is not recommended, + // as it opens a potential attack where clients connect to a server + // by IP address and pretend to be asking for an incorrect host name. + // Manager will attempt to obtain a certificate for that host, incorrectly, + // eventually reaching the CA's rate limit for certificate requests + // and making it impossible to obtain actual certificates. + // + // See GetCertificate for more details. + HostPolicy HostPolicy + + // RenewBefore optionally specifies how early certificates should + // be renewed before they expire. + // + // If zero, they're renewed 1 week before expiration. + RenewBefore time.Duration + + // Client is used to perform low-level operations, such as account registration + // and requesting new certificates. + // If Client is nil, a zero-value acme.Client is used with acme.LetsEncryptURL + // directory endpoint and a newly-generated ECDSA P-256 key. + // + // Mutating the field after the first call of GetCertificate method will have no effect. + Client *acme.Client + + // Email optionally specifies a contact email address. + // This is used by CAs, such as Let's Encrypt, to notify about problems + // with issued certificates. + // + // If the Client's account key is already registered, Email is not used. + Email string + + clientMu sync.Mutex + client *acme.Client // initialized by acmeClient method + + stateMu sync.Mutex + state map[string]*certState // keyed by domain name + + // tokenCert is keyed by token domain name, which matches server name + // of ClientHello. Keys always have ".acme.invalid" suffix. + tokenCertMu sync.RWMutex + tokenCert map[string]*tls.Certificate + + // renewal tracks the set of domains currently running renewal timers. + // It is keyed by domain name. + renewalMu sync.Mutex + renewal map[string]*domainRenewal +} + +// GetCertificate implements the tls.Config.GetCertificate hook. +// It provides a TLS certificate for hello.ServerName host, including answering +// *.acme.invalid (TLS-SNI) challenges. All other fields of hello are ignored. +// +// If m.HostPolicy is non-nil, GetCertificate calls the policy before requesting +// a new cert. A non-nil error returned from m.HostPolicy halts TLS negotiation. +// The error is propagated back to the caller of GetCertificate and is user-visible. +// This does not affect cached certs. See HostPolicy field description for more details. +func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { + name := hello.ServerName + if name == "" { + return nil, errors.New("acme/autocert: missing server name") + } + + // check whether this is a token cert requested for TLS-SNI challenge + if strings.HasSuffix(name, ".acme.invalid") { + m.tokenCertMu.RLock() + defer m.tokenCertMu.RUnlock() + if cert := m.tokenCert[name]; cert != nil { + return cert, nil + } + if cert, err := m.cacheGet(name); err == nil { + return cert, nil + } + // TODO: cache error results? + return nil, fmt.Errorf("acme/autocert: no token cert for %q", name) + } + + // regular domain + cert, err := m.cert(name) + if err == nil { + return cert, nil + } + if err != ErrCacheMiss { + return nil, err + } + + // first-time + ctx := context.Background() // TODO: use a deadline? + if err := m.hostPolicy()(ctx, name); err != nil { + return nil, err + } + cert, err = m.createCert(ctx, name) + if err != nil { + return nil, err + } + m.cachePut(name, cert) + return cert, nil +} + +// cert returns an existing certificate either from m.state or cache. +// If a certificate is found in cache but not in m.state, the latter will be filled +// with the cached value. +func (m *Manager) cert(name string) (*tls.Certificate, error) { + m.stateMu.Lock() + if s, ok := m.state[name]; ok { + m.stateMu.Unlock() + s.RLock() + defer s.RUnlock() + return s.tlscert() + } + defer m.stateMu.Unlock() + cert, err := m.cacheGet(name) + if err != nil { + return nil, err + } + signer, ok := cert.PrivateKey.(crypto.Signer) + if !ok { + return nil, errors.New("acme/autocert: private key cannot sign") + } + if m.state == nil { + m.state = make(map[string]*certState) + } + s := &certState{ + key: signer, + cert: cert.Certificate, + leaf: cert.Leaf, + } + m.state[name] = s + go m.renew(name, s.key, s.leaf.NotAfter) + return cert, nil +} + +// cacheGet always returns a valid certificate, or an error otherwise. +func (m *Manager) cacheGet(domain string) (*tls.Certificate, error) { + if m.Cache == nil { + return nil, ErrCacheMiss + } + // TODO: might want to define a cache timeout on m + ctx := context.Background() + data, err := m.Cache.Get(ctx, domain) + if err != nil { + return nil, err + } + + // private + priv, pub := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, errors.New("acme/autocert: no private key found in cache") + } + privKey, err := parsePrivateKey(priv.Bytes) + if err != nil { + return nil, err + } + + // public + var pubDER [][]byte + for len(pub) > 0 { + var b *pem.Block + b, pub = pem.Decode(pub) + if b == nil { + break + } + pubDER = append(pubDER, b.Bytes) + } + if len(pub) > 0 { + return nil, errors.New("acme/autocert: invalid public key") + } + + // verify and create TLS cert + leaf, err := validCert(domain, pubDER, privKey) + if err != nil { + return nil, err + } + tlscert := &tls.Certificate{ + Certificate: pubDER, + PrivateKey: privKey, + Leaf: leaf, + } + return tlscert, nil +} + +func (m *Manager) cachePut(domain string, tlscert *tls.Certificate) error { + if m.Cache == nil { + return nil + } + + // contains PEM-encoded data + var buf bytes.Buffer + + // private + switch key := tlscert.PrivateKey.(type) { + case *ecdsa.PrivateKey: + if err := encodeECDSAKey(&buf, key); err != nil { + return err + } + case *rsa.PrivateKey: + b := x509.MarshalPKCS1PrivateKey(key) + pb := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + default: + return errors.New("acme/autocert: unknown private key type") + } + + // public + for _, b := range tlscert.Certificate { + pb := &pem.Block{Type: "CERTIFICATE", Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return err + } + } + + // TODO: might want to define a cache timeout on m + ctx := context.Background() + return m.Cache.Put(ctx, domain, buf.Bytes()) +} + +func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error { + b, err := x509.MarshalECPrivateKey(key) + if err != nil { + return err + } + pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + return pem.Encode(w, pb) +} + +// createCert starts the domain ownership verification and returns a certificate +// for that domain upon success. +// +// If the domain is already being verified, it waits for the existing verification to complete. +// Either way, createCert blocks for the duration of the whole process. +func (m *Manager) createCert(ctx context.Context, domain string) (*tls.Certificate, error) { + // TODO: maybe rewrite this whole piece using sync.Once + state, err := m.certState(domain) + if err != nil { + return nil, err + } + // state may exist if another goroutine is already working on it + // in which case just wait for it to finish + if !state.locked { + state.RLock() + defer state.RUnlock() + return state.tlscert() + } + + // We are the first; state is locked. + // Unblock the readers when domain ownership is verified + // and the we got the cert or the process failed. + defer state.Unlock() + state.locked = false + + der, leaf, err := m.authorizedCert(ctx, state.key, domain) + if err != nil { + return nil, err + } + state.cert = der + state.leaf = leaf + go m.renew(domain, state.key, state.leaf.NotAfter) + return state.tlscert() +} + +// certState returns a new or existing certState. +// If a new certState is returned, state.exist is false and the state is locked. +// The returned error is non-nil only in the case where a new state could not be created. +func (m *Manager) certState(domain string) (*certState, error) { + m.stateMu.Lock() + defer m.stateMu.Unlock() + if m.state == nil { + m.state = make(map[string]*certState) + } + // existing state + if state, ok := m.state[domain]; ok { + return state, nil + } + // new locked state + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, err + } + state := &certState{ + key: key, + locked: true, + } + state.Lock() // will be unlocked by m.certState caller + m.state[domain] = state + return state, nil +} + +// authorizedCert starts domain ownership verification process and requests a new cert upon success. +// The key argument is the certificate private key. +func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain string) (der [][]byte, leaf *x509.Certificate, err error) { + // TODO: make m.verify retry or retry m.verify calls here + if err := m.verify(ctx, domain); err != nil { + return nil, nil, err + } + client, err := m.acmeClient(ctx) + if err != nil { + return nil, nil, err + } + csr, err := certRequest(key, domain) + if err != nil { + return nil, nil, err + } + der, _, err = client.CreateCert(ctx, csr, 0, true) + if err != nil { + return nil, nil, err + } + leaf, err = validCert(domain, der, key) + if err != nil { + return nil, nil, err + } + return der, leaf, nil +} + +// verify starts a new identifier (domain) authorization flow. +// It prepares a challenge response and then blocks until the authorization +// is marked as "completed" by the CA (either succeeded or failed). +// +// verify returns nil iff the verification was successful. +func (m *Manager) verify(ctx context.Context, domain string) error { + client, err := m.acmeClient(ctx) + if err != nil { + return err + } + + // start domain authorization and get the challenge + authz, err := client.Authorize(ctx, domain) + if err != nil { + return err + } + // maybe don't need to at all + if authz.Status == acme.StatusValid { + return nil + } + + // pick a challenge: prefer tls-sni-02 over tls-sni-01 + // TODO: consider authz.Combinations + var chal *acme.Challenge + for _, c := range authz.Challenges { + if c.Type == "tls-sni-02" { + chal = c + break + } + if c.Type == "tls-sni-01" { + chal = c + } + } + if chal == nil { + return errors.New("acme/autocert: no supported challenge type found") + } + + // create a token cert for the challenge response + var ( + cert tls.Certificate + name string + ) + switch chal.Type { + case "tls-sni-01": + cert, name, err = client.TLSSNI01ChallengeCert(chal.Token) + case "tls-sni-02": + cert, name, err = client.TLSSNI02ChallengeCert(chal.Token) + default: + err = fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type) + } + if err != nil { + return err + } + m.putTokenCert(name, &cert) + defer func() { + // verification has ended at this point + // don't need token cert anymore + go m.deleteTokenCert(name) + }() + + // ready to fulfill the challenge + if _, err := client.Accept(ctx, chal); err != nil { + return err + } + // wait for the CA to validate + _, err = client.WaitAuthorization(ctx, authz.URI) + return err +} + +// putTokenCert stores the cert under the named key in both m.tokenCert map +// and m.Cache. +func (m *Manager) putTokenCert(name string, cert *tls.Certificate) { + m.tokenCertMu.Lock() + defer m.tokenCertMu.Unlock() + if m.tokenCert == nil { + m.tokenCert = make(map[string]*tls.Certificate) + } + m.tokenCert[name] = cert + m.cachePut(name, cert) +} + +// deleteTokenCert removes the token certificate for the specified domain name +// from both m.tokenCert map and m.Cache. +func (m *Manager) deleteTokenCert(name string) { + m.tokenCertMu.Lock() + defer m.tokenCertMu.Unlock() + delete(m.tokenCert, name) + if m.Cache != nil { + m.Cache.Delete(context.Background(), name) + } +} + +// renew starts a cert renewal timer loop, one per domain. +// +// The loop is scheduled in two cases: +// - a cert was fetched from cache for the first time (wasn't in m.state) +// - a new cert was created by m.createCert +// +// The key argument is a certificate private key. +// The exp argument is the cert expiration time (NotAfter). +func (m *Manager) renew(domain string, key crypto.Signer, exp time.Time) { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + if m.renewal[domain] != nil { + // another goroutine is already on it + return + } + if m.renewal == nil { + m.renewal = make(map[string]*domainRenewal) + } + dr := &domainRenewal{m: m, domain: domain, key: key} + m.renewal[domain] = dr + dr.start(exp) +} + +// stopRenew stops all currently running cert renewal timers. +// The timers are not restarted during the lifetime of the Manager. +func (m *Manager) stopRenew() { + m.renewalMu.Lock() + defer m.renewalMu.Unlock() + for name, dr := range m.renewal { + delete(m.renewal, name) + dr.stop() + } +} + +func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) { + const keyName = "acme_account.key" + + genKey := func() (*ecdsa.PrivateKey, error) { + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + } + + if m.Cache == nil { + return genKey() + } + + data, err := m.Cache.Get(ctx, keyName) + if err == ErrCacheMiss { + key, err := genKey() + if err != nil { + return nil, err + } + var buf bytes.Buffer + if err := encodeECDSAKey(&buf, key); err != nil { + return nil, err + } + if err := m.Cache.Put(ctx, keyName, buf.Bytes()); err != nil { + return nil, err + } + return key, nil + } + if err != nil { + return nil, err + } + + priv, _ := pem.Decode(data) + if priv == nil || !strings.Contains(priv.Type, "PRIVATE") { + return nil, errors.New("acme/autocert: invalid account key found in cache") + } + return parsePrivateKey(priv.Bytes) +} + +func (m *Manager) acmeClient(ctx context.Context) (*acme.Client, error) { + m.clientMu.Lock() + defer m.clientMu.Unlock() + if m.client != nil { + return m.client, nil + } + + client := m.Client + if client == nil { + client = &acme.Client{DirectoryURL: acme.LetsEncryptURL} + } + if client.Key == nil { + var err error + client.Key, err = m.accountKey(ctx) + if err != nil { + return nil, err + } + } + var contact []string + if m.Email != "" { + contact = []string{"mailto:" + m.Email} + } + a := &acme.Account{Contact: contact} + _, err := client.Register(ctx, a, m.Prompt) + if ae, ok := err.(*acme.Error); err == nil || ok && ae.StatusCode == http.StatusConflict { + // conflict indicates the key is already registered + m.client = client + err = nil + } + return m.client, err +} + +func (m *Manager) hostPolicy() HostPolicy { + if m.HostPolicy != nil { + return m.HostPolicy + } + return defaultHostPolicy +} + +func (m *Manager) renewBefore() time.Duration { + if m.RenewBefore > maxRandRenew { + return m.RenewBefore + } + return 7 * 24 * time.Hour // 1 week +} + +// certState is ready when its mutex is unlocked for reading. +type certState struct { + sync.RWMutex + locked bool // locked for read/write + key crypto.Signer // private key for cert + cert [][]byte // DER encoding + leaf *x509.Certificate // parsed cert[0]; always non-nil if cert != nil +} + +// tlscert creates a tls.Certificate from s.key and s.cert. +// Callers should wrap it in s.RLock() and s.RUnlock(). +func (s *certState) tlscert() (*tls.Certificate, error) { + if s.key == nil { + return nil, errors.New("acme/autocert: missing signer") + } + if len(s.cert) == 0 { + return nil, errors.New("acme/autocert: missing certificate") + } + return &tls.Certificate{ + PrivateKey: s.key, + Certificate: s.cert, + Leaf: s.leaf, + }, nil +} + +// certRequest creates a certificate request for the given common name cn +// and optional SANs. +func certRequest(key crypto.Signer, cn string, san ...string) ([]byte, error) { + req := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: cn}, + DNSNames: san, + } + return x509.CreateCertificateRequest(rand.Reader, req, key) +} + +// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates +// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. +// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. +// +// Inspired by parsePrivateKey in crypto/tls/tls.go. +func parsePrivateKey(der []byte) (crypto.Signer, error) { + if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { + return key, nil + } + if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { + switch key := key.(type) { + case *rsa.PrivateKey: + return key, nil + case *ecdsa.PrivateKey: + return key, nil + default: + return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping") + } + } + if key, err := x509.ParseECPrivateKey(der); err == nil { + return key, nil + } + + return nil, errors.New("acme/autocert: failed to parse private key") +} + +// validCert parses a cert chain provided as der argument and verifies the leaf, der[0], +// corresponds to the private key, as well as the domain match and expiration dates. +// It doesn't do any revocation checking. +// +// The returned value is the verified leaf cert. +func validCert(domain string, der [][]byte, key crypto.Signer) (leaf *x509.Certificate, err error) { + // parse public part(s) + var n int + for _, b := range der { + n += len(b) + } + pub := make([]byte, n) + n = 0 + for _, b := range der { + n += copy(pub[n:], b) + } + x509Cert, err := x509.ParseCertificates(pub) + if len(x509Cert) == 0 { + return nil, errors.New("acme/autocert: no public key found") + } + // verify the leaf is not expired and matches the domain name + leaf = x509Cert[0] + now := timeNow() + if now.Before(leaf.NotBefore) { + return nil, errors.New("acme/autocert: certificate is not valid yet") + } + if now.After(leaf.NotAfter) { + return nil, errors.New("acme/autocert: expired certificate") + } + if err := leaf.VerifyHostname(domain); err != nil { + return nil, err + } + // ensure the leaf corresponds to the private key + switch pub := leaf.PublicKey.(type) { + case *rsa.PublicKey: + prv, ok := key.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.N.Cmp(prv.N) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + case *ecdsa.PublicKey: + prv, ok := key.(*ecdsa.PrivateKey) + if !ok { + return nil, errors.New("acme/autocert: private key type does not match public key type") + } + if pub.X.Cmp(prv.X) != 0 || pub.Y.Cmp(prv.Y) != 0 { + return nil, errors.New("acme/autocert: private key does not match public key") + } + default: + return nil, errors.New("acme/autocert: unknown public key algorithm") + } + return leaf, nil +} + +func retryAfter(v string) time.Duration { + if i, err := strconv.Atoi(v); err == nil { + return time.Duration(i) * time.Second + } + if t, err := http.ParseTime(v); err == nil { + return t.Sub(timeNow()) + } + return time.Second +} + +type lockedMathRand struct { + sync.Mutex + rnd *mathrand.Rand +} + +func (r *lockedMathRand) int63n(max int64) int64 { + r.Lock() + n := r.rnd.Int63n(max) + r.Unlock() + return n +} + +// for easier testing +var timeNow = time.Now diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/autocert_test.go juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/autocert_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/autocert_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/autocert_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,390 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "fmt" + "html/template" + "io" + "math/big" + "net/http" + "net/http/httptest" + "reflect" + "testing" + "time" + + "golang.org/x/crypto/acme" + "golang.org/x/net/context" +) + +var discoTmpl = template.Must(template.New("disco").Parse(`{ + "new-reg": "{{.}}/new-reg", + "new-authz": "{{.}}/new-authz", + "new-cert": "{{.}}/new-cert" +}`)) + +var authzTmpl = template.Must(template.New("authz").Parse(`{ + "status": "pending", + "challenges": [ + { + "uri": "{{.}}/challenge/1", + "type": "tls-sni-01", + "token": "token-01" + }, + { + "uri": "{{.}}/challenge/2", + "type": "tls-sni-02", + "token": "token-02" + } + ] +}`)) + +type memCache map[string][]byte + +func (m memCache) Get(ctx context.Context, key string) ([]byte, error) { + v, ok := m[key] + if !ok { + return nil, ErrCacheMiss + } + return v, nil +} + +func (m memCache) Put(ctx context.Context, key string, data []byte) error { + m[key] = data + return nil +} + +func (m memCache) Delete(ctx context.Context, key string) error { + delete(m, key) + return nil +} + +func dummyCert(pub interface{}, san ...string) ([]byte, error) { + return dateDummyCert(pub, time.Now(), time.Now().Add(90*24*time.Hour), san...) +} + +func dateDummyCert(pub interface{}, start, end time.Time, san ...string) ([]byte, error) { + // use EC key to run faster on 386 + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, err + } + t := &x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: start, + NotAfter: end, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment, + DNSNames: san, + } + if pub == nil { + pub = &key.PublicKey + } + return x509.CreateCertificate(rand.Reader, t, t, pub, key) +} + +func decodePayload(v interface{}, r io.Reader) error { + var req struct{ Payload string } + if err := json.NewDecoder(r).Decode(&req); err != nil { + return err + } + payload, err := base64.RawURLEncoding.DecodeString(req.Payload) + if err != nil { + return err + } + return json.Unmarshal(payload, v) +} + +func TestGetCertificate(t *testing.T) { + const domain = "example.org" + man := &Manager{Prompt: AcceptTOS} + defer man.stopRenew() + + // echo token-02 | shasum -a 256 + // then divide result in 2 parts separated by dot + tokenCertName := "4e8eb87631187e9ff2153b56b13a4dec.13a35d002e485d60ff37354b32f665d9.token.acme.invalid" + verifyTokenCert := func() { + hello := &tls.ClientHelloInfo{ServerName: tokenCertName} + _, err := man.GetCertificate(hello) + if err != nil { + t.Errorf("verifyTokenCert: GetCertificate(%q): %v", tokenCertName, err) + return + } + } + + // ACME CA server stub + var ca *httptest.Server + ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("replay-nonce", "nonce") + if r.Method == "HEAD" { + // a nonce request + return + } + + switch r.URL.Path { + // discovery + case "/": + if err := discoTmpl.Execute(w, ca.URL); err != nil { + t.Fatalf("discoTmpl: %v", err) + } + // client key registration + case "/new-reg": + w.Write([]byte("{}")) + // domain authorization + case "/new-authz": + w.Header().Set("location", ca.URL+"/authz/1") + w.WriteHeader(http.StatusCreated) + if err := authzTmpl.Execute(w, ca.URL); err != nil { + t.Fatalf("authzTmpl: %v", err) + } + // accept tls-sni-02 challenge + case "/challenge/2": + verifyTokenCert() + w.Write([]byte("{}")) + // authorization status + case "/authz/1": + w.Write([]byte(`{"status": "valid"}`)) + // cert request + case "/new-cert": + var req struct { + CSR string `json:"csr"` + } + decodePayload(&req, r.Body) + b, _ := base64.RawURLEncoding.DecodeString(req.CSR) + csr, err := x509.ParseCertificateRequest(b) + if err != nil { + t.Fatalf("new-cert: CSR: %v", err) + } + der, err := dummyCert(csr.PublicKey, domain) + if err != nil { + t.Fatalf("new-cert: dummyCert: %v", err) + } + chainUp := fmt.Sprintf("<%s/ca-cert>; rel=up", ca.URL) + w.Header().Set("link", chainUp) + w.WriteHeader(http.StatusCreated) + w.Write(der) + // CA chain cert + case "/ca-cert": + der, err := dummyCert(nil, "ca") + if err != nil { + t.Fatalf("ca-cert: dummyCert: %v", err) + } + w.Write(der) + default: + t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path) + } + })) + defer ca.Close() + + // use EC key to run faster on 386 + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + man.Client = &acme.Client{ + Key: key, + DirectoryURL: ca.URL, + } + + // simulate tls.Config.GetCertificate + var tlscert *tls.Certificate + done := make(chan struct{}) + go func() { + hello := &tls.ClientHelloInfo{ServerName: domain} + tlscert, err = man.GetCertificate(hello) + close(done) + }() + select { + case <-time.After(time.Minute): + t.Fatal("man.GetCertificate took too long to return") + case <-done: + } + if err != nil { + t.Fatalf("man.GetCertificate: %v", err) + } + + // verify the tlscert is the same we responded with from the CA stub + if len(tlscert.Certificate) == 0 { + t.Fatal("len(tlscert.Certificate) is 0") + } + cert, err := x509.ParseCertificate(tlscert.Certificate[0]) + if err != nil { + t.Fatalf("x509.ParseCertificate: %v", err) + } + if len(cert.DNSNames) == 0 || cert.DNSNames[0] != domain { + t.Errorf("cert.DNSNames = %v; want %q", cert.DNSNames, domain) + } + + // make sure token cert was removed + done = make(chan struct{}) + go func() { + for { + hello := &tls.ClientHelloInfo{ServerName: tokenCertName} + if _, err := man.GetCertificate(hello); err != nil { + break + } + time.Sleep(100 * time.Millisecond) + } + close(done) + }() + select { + case <-time.After(5 * time.Second): + t.Error("token cert was not removed") + case <-done: + } +} + +func TestAccountKeyCache(t *testing.T) { + cache := make(memCache) + m := Manager{Cache: cache} + ctx := context.Background() + k1, err := m.accountKey(ctx) + if err != nil { + t.Fatal(err) + } + k2, err := m.accountKey(ctx) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(k1, k2) { + t.Errorf("account keys don't match: k1 = %#v; k2 = %#v", k1, k2) + } +} + +func TestCache(t *testing.T) { + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + tmpl := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: "example.org"}, + NotAfter: time.Now().Add(time.Hour), + } + pub, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &privKey.PublicKey, privKey) + if err != nil { + t.Fatal(err) + } + tlscert := &tls.Certificate{ + Certificate: [][]byte{pub}, + PrivateKey: privKey, + } + + cache := make(memCache) + man := &Manager{Cache: cache} + defer man.stopRenew() + if err := man.cachePut("example.org", tlscert); err != nil { + t.Fatalf("man.cachePut: %v", err) + } + res, err := man.cacheGet("example.org") + if err != nil { + t.Fatalf("man.cacheGet: %v", err) + } + if res == nil { + t.Fatal("res is nil") + } +} + +func TestHostWhitelist(t *testing.T) { + policy := HostWhitelist("example.com", "example.org", "*.example.net") + tt := []struct { + host string + allow bool + }{ + {"example.com", true}, + {"example.org", true}, + {"one.example.com", false}, + {"two.example.org", false}, + {"three.example.net", false}, + {"dummy", false}, + } + for i, test := range tt { + err := policy(nil, test.host) + if err != nil && test.allow { + t.Errorf("%d: policy(%q): %v; want nil", i, test.host, err) + } + if err == nil && !test.allow { + t.Errorf("%d: policy(%q): nil; want an error", i, test.host) + } + } +} + +func TestValidCert(t *testing.T) { + key1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + key2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + key3, err := rsa.GenerateKey(rand.Reader, 512) + if err != nil { + t.Fatal(err) + } + cert1, err := dummyCert(key1.Public(), "example.org") + if err != nil { + t.Fatal(err) + } + cert2, err := dummyCert(key2.Public(), "example.org") + if err != nil { + t.Fatal(err) + } + cert3, err := dummyCert(key3.Public(), "example.org") + if err != nil { + t.Fatal(err) + } + now := time.Now() + early, err := dateDummyCert(key1.Public(), now.Add(time.Hour), now.Add(2*time.Hour), "example.org") + if err != nil { + t.Fatal(err) + } + expired, err := dateDummyCert(key1.Public(), now.Add(-2*time.Hour), now.Add(-time.Hour), "example.org") + if err != nil { + t.Fatal(err) + } + + tt := []struct { + domain string + key crypto.Signer + cert [][]byte + ok bool + }{ + {"example.org", key1, [][]byte{cert1}, true}, + {"example.org", key3, [][]byte{cert3}, true}, + {"example.org", key1, [][]byte{cert1, cert2, cert3}, true}, + {"example.org", key1, [][]byte{cert1, {1}}, false}, + {"example.org", key1, [][]byte{{1}}, false}, + {"example.org", key1, [][]byte{cert2}, false}, + {"example.org", key2, [][]byte{cert1}, false}, + {"example.org", key1, [][]byte{cert3}, false}, + {"example.org", key3, [][]byte{cert1}, false}, + {"example.net", key1, [][]byte{cert1}, false}, + {"example.org", key1, [][]byte{early}, false}, + {"example.org", key1, [][]byte{expired}, false}, + } + for i, test := range tt { + leaf, err := validCert(test.domain, test.cert, test.key) + if err != nil && test.ok { + t.Errorf("%d: err = %v", i, err) + } + if err == nil && !test.ok { + t.Errorf("%d: err is nil", i) + } + if err == nil && test.ok && leaf == nil { + t.Errorf("%d: leaf is nil", i) + } + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/cache.go juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/cache.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/cache.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/cache.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,130 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "errors" + "io/ioutil" + "os" + "path/filepath" + + "golang.org/x/net/context" +) + +// ErrCacheMiss is returned when a certificate is not found in cache. +var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss") + +// Cache is used by Manager to store and retrieve previously obtained certificates +// as opaque data. +// +// The key argument of the methods refers to a domain name but need not be an FQDN. +// Cache implementations should not rely on the key naming pattern. +type Cache interface { + // Get returns a certificate data for the specified key. + // If there's no such key, Get returns ErrCacheMiss. + Get(ctx context.Context, key string) ([]byte, error) + + // Put stores the data in the cache under the specified key. + // Inderlying implementations may use any data storage format, + // as long as the reverse operation, Get, results in the original data. + Put(ctx context.Context, key string, data []byte) error + + // Delete removes a certificate data from the cache under the specified key. + // If there's no such key in the cache, Delete returns nil. + Delete(ctx context.Context, key string) error +} + +// DirCache implements Cache using a directory on the local filesystem. +// If the directory does not exist, it will be created with 0700 permissions. +type DirCache string + +// Get reads a certificate data from the specified file name. +func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) { + name = filepath.Join(string(d), name) + var ( + data []byte + err error + done = make(chan struct{}) + ) + go func() { + data, err = ioutil.ReadFile(name) + close(done) + }() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-done: + } + if os.IsNotExist(err) { + return nil, ErrCacheMiss + } + return data, err +} + +// Put writes the certificate data to the specified file name. +// The file will be created with 0600 permissions. +func (d DirCache) Put(ctx context.Context, name string, data []byte) error { + if err := os.MkdirAll(string(d), 0700); err != nil { + return err + } + + done := make(chan struct{}) + var err error + go func() { + defer close(done) + var tmp string + if tmp, err = d.writeTempFile(name, data); err != nil { + return + } + // prevent overwriting the file if the context was cancelled + if ctx.Err() != nil { + return // no need to set err + } + name = filepath.Join(string(d), name) + err = os.Rename(tmp, name) + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + return err +} + +// Delete removes the specified file name. +func (d DirCache) Delete(ctx context.Context, name string) error { + name = filepath.Join(string(d), name) + var ( + err error + done = make(chan struct{}) + ) + go func() { + err = os.Remove(name) + close(done) + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// writeTempFile writes b to a temporary file, closes the file and returns its path. +func (d DirCache) writeTempFile(prefix string, b []byte) (string, error) { + // TempFile uses 0600 permissions + f, err := ioutil.TempFile(string(d), prefix) + if err != nil { + return "", err + } + if _, err := f.Write(b); err != nil { + f.Close() + return "", err + } + return f.Name(), f.Close() +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/cache_test.go juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/cache_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/cache_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/cache_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + + "golang.org/x/net/context" +) + +// make sure DirCache satisfies Cache interface +var _ Cache = DirCache("/") + +func TestDirCache(t *testing.T) { + dir, err := ioutil.TempDir("", "autocert") + if err != nil { + t.Fatal(err) + } + dir = filepath.Join(dir, "certs") // a nonexistent dir + cache := DirCache(dir) + ctx := context.Background() + + // test cache miss + if _, err := cache.Get(ctx, "nonexistent"); err != ErrCacheMiss { + t.Errorf("get: %v; want ErrCacheMiss", err) + } + + // test put/get + b1 := []byte{1} + if err := cache.Put(ctx, "dummy", b1); err != nil { + t.Fatalf("put: %v", err) + } + b2, err := cache.Get(ctx, "dummy") + if err != nil { + t.Fatalf("get: %v", err) + } + if !reflect.DeepEqual(b1, b2) { + t.Errorf("b1 = %v; want %v", b1, b2) + } + name := filepath.Join(dir, "dummy") + if _, err := os.Stat(name); err != nil { + t.Error(err) + } + + // test delete + if err := cache.Delete(ctx, "dummy"); err != nil { + t.Fatalf("delete: %v", err) + } + if _, err := cache.Get(ctx, "dummy"); err != ErrCacheMiss { + t.Errorf("get: %v; want ErrCacheMiss", err) + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/renewal.go juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/renewal.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/renewal.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/renewal.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,125 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "crypto" + "sync" + "time" + + "golang.org/x/net/context" +) + +// maxRandRenew is a maximum deviation from Manager.RenewBefore. +const maxRandRenew = time.Hour + +// domainRenewal tracks the state used by the periodic timers +// renewing a single domain's cert. +type domainRenewal struct { + m *Manager + domain string + key crypto.Signer + + timerMu sync.Mutex + timer *time.Timer +} + +// start starts a cert renewal timer at the time +// defined by the certificate expiration time exp. +// +// If the timer is already started, calling start is a noop. +func (dr *domainRenewal) start(exp time.Time) { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer != nil { + return + } + dr.timer = time.AfterFunc(dr.next(exp), dr.renew) +} + +// stop stops the cert renewal timer. +// If the timer is already stopped, calling stop is a noop. +func (dr *domainRenewal) stop() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer == nil { + return + } + dr.timer.Stop() + dr.timer = nil +} + +// renew is called periodically by a timer. +// The first renew call is kicked off by dr.start. +func (dr *domainRenewal) renew() { + dr.timerMu.Lock() + defer dr.timerMu.Unlock() + if dr.timer == nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + // TODO: rotate dr.key at some point? + next, err := dr.do(ctx) + if err != nil { + next = maxRandRenew / 2 + next += time.Duration(pseudoRand.int63n(int64(next))) + } + dr.timer = time.AfterFunc(next, dr.renew) + testDidRenewLoop(next, err) +} + +// do is similar to Manager.createCert but it doesn't lock a Manager.state item. +// Instead, it requests a new certificate independently and, upon success, +// replaces dr.m.state item with a new one and updates cache for the given domain. +// +// It may return immediately if the expiration date of the currently cached cert +// is far enough in the future. +// +// The returned value is a time interval after which the renewal should occur again. +func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { + // a race is likely unavoidable in a distributed environment + // but we try nonetheless + if tlscert, err := dr.m.cacheGet(dr.domain); err == nil { + next := dr.next(tlscert.Leaf.NotAfter) + if next > dr.m.renewBefore()+maxRandRenew { + return next, nil + } + } + + der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.domain) + if err != nil { + return 0, err + } + state := &certState{ + key: dr.key, + cert: der, + leaf: leaf, + } + tlscert, err := state.tlscert() + if err != nil { + return 0, err + } + dr.m.cachePut(dr.domain, tlscert) + dr.m.stateMu.Lock() + defer dr.m.stateMu.Unlock() + // m.state is guaranteed to be non-nil at this point + dr.m.state[dr.domain] = state + return dr.next(leaf.NotAfter), nil +} + +func (dr *domainRenewal) next(expiry time.Time) time.Duration { + d := expiry.Sub(timeNow()) - dr.m.renewBefore() + // add a bit of randomness to renew deadline + n := pseudoRand.int63n(int64(maxRandRenew)) + d -= time.Duration(n) + if d < 0 { + return 0 + } + return d +} + +var testDidRenewLoop = func(next time.Duration, err error) {} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/renewal_test.go juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/renewal_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/acme/autocert/renewal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/acme/autocert/renewal_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,190 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package autocert + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "golang.org/x/crypto/acme" +) + +func TestRenewalNext(t *testing.T) { + now := time.Now() + timeNow = func() time.Time { return now } + defer func() { timeNow = time.Now }() + + man := &Manager{RenewBefore: 7 * 24 * time.Hour} + defer man.stopRenew() + tt := []struct { + expiry time.Time + min, max time.Duration + }{ + {now.Add(90 * 24 * time.Hour), 83*24*time.Hour - maxRandRenew, 83 * 24 * time.Hour}, + {now.Add(time.Hour), 0, 1}, + {now, 0, 1}, + {now.Add(-time.Hour), 0, 1}, + } + + dr := &domainRenewal{m: man} + for i, test := range tt { + next := dr.next(test.expiry) + if next < test.min || test.max < next { + t.Errorf("%d: next = %v; want between %v and %v", i, next, test.min, test.max) + } + } +} + +func TestRenewFromCache(t *testing.T) { + const domain = "example.org" + + // ACME CA server stub + var ca *httptest.Server + ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("replay-nonce", "nonce") + if r.Method == "HEAD" { + // a nonce request + return + } + + switch r.URL.Path { + // discovery + case "/": + if err := discoTmpl.Execute(w, ca.URL); err != nil { + t.Fatalf("discoTmpl: %v", err) + } + // client key registration + case "/new-reg": + w.Write([]byte("{}")) + // domain authorization + case "/new-authz": + w.Header().Set("location", ca.URL+"/authz/1") + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"status": "valid"}`)) + // cert request + case "/new-cert": + var req struct { + CSR string `json:"csr"` + } + decodePayload(&req, r.Body) + b, _ := base64.RawURLEncoding.DecodeString(req.CSR) + csr, err := x509.ParseCertificateRequest(b) + if err != nil { + t.Fatalf("new-cert: CSR: %v", err) + } + der, err := dummyCert(csr.PublicKey, domain) + if err != nil { + t.Fatalf("new-cert: dummyCert: %v", err) + } + chainUp := fmt.Sprintf("<%s/ca-cert>; rel=up", ca.URL) + w.Header().Set("link", chainUp) + w.WriteHeader(http.StatusCreated) + w.Write(der) + // CA chain cert + case "/ca-cert": + der, err := dummyCert(nil, "ca") + if err != nil { + t.Fatalf("ca-cert: dummyCert: %v", err) + } + w.Write(der) + default: + t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path) + } + })) + defer ca.Close() + + // use EC key to run faster on 386 + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + man := &Manager{ + Prompt: AcceptTOS, + Cache: make(memCache), + RenewBefore: 24 * time.Hour, + Client: &acme.Client{ + Key: key, + DirectoryURL: ca.URL, + }, + } + defer man.stopRenew() + + // cache an almost expired cert + now := time.Now() + cert, err := dateDummyCert(key.Public(), now.Add(-2*time.Hour), now.Add(time.Minute), domain) + if err != nil { + t.Fatal(err) + } + tlscert := &tls.Certificate{PrivateKey: key, Certificate: [][]byte{cert}} + if err := man.cachePut(domain, tlscert); err != nil { + t.Fatal(err) + } + + // veriy the renewal happened + defer func() { + testDidRenewLoop = func(next time.Duration, err error) {} + }() + done := make(chan struct{}) + testDidRenewLoop = func(next time.Duration, err error) { + defer close(done) + if err != nil { + t.Errorf("testDidRenewLoop: %v", err) + } + // Next should be about 90 days: + // dummyCert creates 90days expiry + account for man.RenewBefore. + // Previous expiration was within 1 min. + future := 88 * 24 * time.Hour + if next < future { + t.Errorf("testDidRenewLoop: next = %v; want >= %v", next, future) + } + + // ensure the new cert is cached + after := time.Now().Add(future) + tlscert, err := man.cacheGet(domain) + if err != nil { + t.Fatalf("man.cacheGet: %v", err) + } + if !tlscert.Leaf.NotAfter.After(after) { + t.Errorf("cache leaf.NotAfter = %v; want > %v", tlscert.Leaf.NotAfter, after) + } + + // verify the old cert is also replaced in memory + man.stateMu.Lock() + defer man.stateMu.Unlock() + s := man.state[domain] + if s == nil { + t.Fatalf("m.state[%q] is nil", domain) + } + tlscert, err = s.tlscert() + if err != nil { + t.Fatalf("s.tlscert: %v", err) + } + if !tlscert.Leaf.NotAfter.After(after) { + t.Errorf("state leaf.NotAfter = %v; want > %v", tlscert.Leaf.NotAfter, after) + } + } + + // trigger renew + hello := &tls.ClientHelloInfo{ServerName: domain} + if _, err := man.GetCertificate(hello); err != nil { + t.Fatal(err) + } + + // wait for renew loop + select { + case <-time.After(10 * time.Second): + t.Fatal("renew took too long to occur") + case <-done: + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/acme/jws.go juju-core-2.0.0/src/golang.org/x/crypto/acme/jws.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/acme/jws.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/acme/jws.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,153 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + _ "crypto/sha512" // need for EC keys + "encoding/base64" + "encoding/json" + "fmt" + "math/big" +) + +// jwsEncodeJSON signs claimset using provided key and a nonce. +// The result is serialized in JSON format. +// See https://tools.ietf.org/html/rfc7515#section-7. +func jwsEncodeJSON(claimset interface{}, key crypto.Signer, nonce string) ([]byte, error) { + jwk, err := jwkEncode(key.Public()) + if err != nil { + return nil, err + } + alg, sha := jwsHasher(key) + if alg == "" || !sha.Available() { + return nil, ErrUnsupportedKey + } + phead := fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q}`, alg, jwk, nonce) + phead = base64.RawURLEncoding.EncodeToString([]byte(phead)) + cs, err := json.Marshal(claimset) + if err != nil { + return nil, err + } + payload := base64.RawURLEncoding.EncodeToString(cs) + hash := sha.New() + hash.Write([]byte(phead + "." + payload)) + sig, err := jwsSign(key, sha, hash.Sum(nil)) + if err != nil { + return nil, err + } + + enc := struct { + Protected string `json:"protected"` + Payload string `json:"payload"` + Sig string `json:"signature"` + }{ + Protected: phead, + Payload: payload, + Sig: base64.RawURLEncoding.EncodeToString(sig), + } + return json.Marshal(&enc) +} + +// jwkEncode encodes public part of an RSA or ECDSA key into a JWK. +// The result is also suitable for creating a JWK thumbprint. +// https://tools.ietf.org/html/rfc7517 +func jwkEncode(pub crypto.PublicKey) (string, error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.3.1 + n := pub.N + e := big.NewInt(int64(pub.E)) + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`, + base64.RawURLEncoding.EncodeToString(e.Bytes()), + base64.RawURLEncoding.EncodeToString(n.Bytes()), + ), nil + case *ecdsa.PublicKey: + // https://tools.ietf.org/html/rfc7518#section-6.2.1 + p := pub.Curve.Params() + n := p.BitSize / 8 + if p.BitSize%8 != 0 { + n++ + } + x := pub.X.Bytes() + if n > len(x) { + x = append(make([]byte, n-len(x)), x...) + } + y := pub.Y.Bytes() + if n > len(y) { + y = append(make([]byte, n-len(y)), y...) + } + // Field order is important. + // See https://tools.ietf.org/html/rfc7638#section-3.3 for details. + return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`, + p.Name, + base64.RawURLEncoding.EncodeToString(x), + base64.RawURLEncoding.EncodeToString(y), + ), nil + } + return "", ErrUnsupportedKey +} + +// jwsSign signs the digest using the given key. +// It returns ErrUnsupportedKey if the key type is unknown. +// The hash is used only for RSA keys. +func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) { + switch key := key.(type) { + case *rsa.PrivateKey: + return key.Sign(rand.Reader, digest, hash) + case *ecdsa.PrivateKey: + r, s, err := ecdsa.Sign(rand.Reader, key, digest) + if err != nil { + return nil, err + } + rb, sb := r.Bytes(), s.Bytes() + size := key.Params().BitSize / 8 + if size%8 > 0 { + size++ + } + sig := make([]byte, size*2) + copy(sig[size-len(rb):], rb) + copy(sig[size*2-len(sb):], sb) + return sig, nil + } + return nil, ErrUnsupportedKey +} + +// jwsHasher indicates suitable JWS algorithm name and a hash function +// to use for signing a digest with the provided key. +// It returns ("", 0) if the key is not supported. +func jwsHasher(key crypto.Signer) (string, crypto.Hash) { + switch key := key.(type) { + case *rsa.PrivateKey: + return "RS256", crypto.SHA256 + case *ecdsa.PrivateKey: + switch key.Params().Name { + case "P-256": + return "ES256", crypto.SHA256 + case "P-384": + return "ES384", crypto.SHA384 + case "P-512": + return "ES512", crypto.SHA512 + } + } + return "", 0 +} + +// JWKThumbprint creates a JWK thumbprint out of pub +// as specified in https://tools.ietf.org/html/rfc7638. +func JWKThumbprint(pub crypto.PublicKey) (string, error) { + jwk, err := jwkEncode(pub) + if err != nil { + return "", err + } + b := sha256.Sum256([]byte(jwk)) + return base64.RawURLEncoding.EncodeToString(b[:]), nil +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/acme/jws_test.go juju-core-2.0.0/src/golang.org/x/crypto/acme/jws_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/acme/jws_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/acme/jws_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,266 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package acme + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "math/big" + "testing" +) + +const testKeyPEM = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA4xgZ3eRPkwoRvy7qeRUbmMDe0V+xH9eWLdu0iheeLlrmD2mq +WXfP9IeSKApbn34g8TuAS9g5zhq8ELQ3kmjr+KV86GAMgI6VAcGlq3QrzpTCf/30 +Ab7+zawrfRaFONa1HwEzPY1KHnGVkxJc85gNkwYI9SY2RHXtvln3zs5wITNrdosq +EXeaIkVYBEhbhNu54pp3kxo6TuWLi9e6pXeWetEwmlBwtWZlPoib2j3TxLBksKZf +oyFyek380mHgJAumQ/I2fjj98/97mk3ihOY4AgVdCDj1z/GCoZkG5Rq7nbCGyosy +KWyDX00Zs+nNqVhoLeIvXC4nnWdJMZ6rogxyQQIDAQABAoIBACIEZTOI1Kao9nmV +9IeIsuaR1Y61b9neOF/MLmIVIZu+AAJFCMB4Iw11FV6sFodwpEyeZhx2WkpWVN+H +r19eGiLX3zsL0DOdqBJoSIHDWCCMxgnYJ6nvS0nRxX3qVrBp8R2g12Ub+gNPbmFm +ecf/eeERIVxfifd9VsyRu34eDEvcmKFuLYbElFcPh62xE3x12UZvV/sN7gXbawpP +G+w255vbE5MoaKdnnO83cTFlcHvhn24M/78qP7Te5OAeelr1R89kYxQLpuGe4fbS +zc6E3ym5Td6urDetGGrSY1Eu10/8sMusX+KNWkm+RsBRbkyKq72ks/qKpOxOa+c6 +9gm+Y8ECgYEA/iNUyg1ubRdH11p82l8KHtFC1DPE0V1gSZsX29TpM5jS4qv46K+s +8Ym1zmrORM8x+cynfPx1VQZQ34EYeCMIX212ryJ+zDATl4NE0I4muMvSiH9vx6Xc +7FmhNnaYzPsBL5Tm9nmtQuP09YEn8poiOJFiDs/4olnD5ogA5O4THGkCgYEA5MIL +qWYBUuqbEWLRtMruUtpASclrBqNNsJEsMGbeqBJmoMxdHeSZckbLOrqm7GlMyNRJ +Ne/5uWRGSzaMYuGmwsPpERzqEvYFnSrpjW5YtXZ+JtxFXNVfm9Z1gLLgvGpOUCIU +RbpoDckDe1vgUuk3y5+DjZihs+rqIJ45XzXTzBkCgYBWuf3segruJZy5rEKhTv+o +JqeUvRn0jNYYKFpLBeyTVBrbie6GkbUGNIWbrK05pC+c3K9nosvzuRUOQQL1tJbd +4gA3oiD9U4bMFNr+BRTHyZ7OQBcIXdz3t1qhuHVKtnngIAN1p25uPlbRFUNpshnt +jgeVoHlsBhApcs5DUc+pyQKBgDzeHPg/+g4z+nrPznjKnktRY1W+0El93kgi+J0Q +YiJacxBKEGTJ1MKBb8X6sDurcRDm22wMpGfd9I5Cv2v4GsUsF7HD/cx5xdih+G73 +c4clNj/k0Ff5Nm1izPUno4C+0IOl7br39IPmfpSuR6wH/h6iHQDqIeybjxyKvT1G +N0rRAoGBAKGD+4ZI/E1MoJ5CXB8cDDMHagbE3cq/DtmYzE2v1DFpQYu5I4PCm5c7 +EQeIP6dZtv8IMgtGIb91QX9pXvP0aznzQKwYIA8nZgoENCPfiMTPiEDT9e/0lObO +9XWsXpbSTsRPj0sv1rB+UzBJ0PgjK4q2zOF0sNo7b1+6nlM3BWPx +-----END RSA PRIVATE KEY----- +` + +// This thumbprint is for the testKey defined above. +const testKeyThumbprint = "6nicxzh6WETQlrvdchkz-U3e3DOQZ4heJKU63rfqMqQ" + +const ( + // openssl ecparam -name secp256k1 -genkey -noout + testKeyECPEM = ` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIK07hGLr0RwyUdYJ8wbIiBS55CjnkMD23DWr+ccnypWLoAoGCCqGSM49 +AwEHoUQDQgAE5lhEug5xK4xBDZ2nAbaxLtaLiv85bxJ7ePd1dkO23HThqIrvawF5 +QAaS/RNouybCiRhRjI3EaxLkQwgrCw0gqQ== +-----END EC PRIVATE KEY----- +` + // 1. opnessl ec -in key.pem -noout -text + // 2. remove first byte, 04 (the header); the rest is X and Y + // 3. covert each with: echo | xxd -r -p | base64 | tr -d '=' | tr '/+' '_-' + testKeyECPubX = "5lhEug5xK4xBDZ2nAbaxLtaLiv85bxJ7ePd1dkO23HQ" + testKeyECPubY = "4aiK72sBeUAGkv0TaLsmwokYUYyNxGsS5EMIKwsNIKk" + // echo -n '{"crv":"P-256","kty":"EC","x":"","y":""}' | \ + // openssl dgst -binary -sha256 | base64 | tr -d '=' | tr '/+' '_-' + testKeyECThumbprint = "zedj-Bd1Zshp8KLePv2MB-lJ_Hagp7wAwdkA0NUTniU" +) + +var ( + testKey *rsa.PrivateKey + testKeyEC *ecdsa.PrivateKey +) + +func init() { + d, _ := pem.Decode([]byte(testKeyPEM)) + if d == nil { + panic("no block found in testKeyPEM") + } + var err error + testKey, err = x509.ParsePKCS1PrivateKey(d.Bytes) + if err != nil { + panic(err.Error()) + } + + if d, _ = pem.Decode([]byte(testKeyECPEM)); d == nil { + panic("no block found in testKeyECPEM") + } + testKeyEC, err = x509.ParseECPrivateKey(d.Bytes) + if err != nil { + panic(err.Error()) + } +} + +func TestJWSEncodeJSON(t *testing.T) { + claims := struct{ Msg string }{"Hello JWS"} + // JWS signed with testKey and "nonce" as the nonce value + // JSON-serialized JWS fields are split for easier testing + const ( + // {"alg":"RS256","jwk":{"e":"AQAB","kty":"RSA","n":"..."},"nonce":"nonce"} + protected = "eyJhbGciOiJSUzI1NiIsImp3ayI6eyJlIjoiQVFBQiIsImt0eSI6" + + "IlJTQSIsIm4iOiI0eGdaM2VSUGt3b1J2eTdxZVJVYm1NRGUwVi14" + + "SDllV0xkdTBpaGVlTGxybUQybXFXWGZQOUllU0tBcGJuMzRnOFR1" + + "QVM5ZzV6aHE4RUxRM2ttanItS1Y4NkdBTWdJNlZBY0dscTNRcnpw" + + "VENmXzMwQWI3LXphd3JmUmFGT05hMUh3RXpQWTFLSG5HVmt4SmM4" + + "NWdOa3dZSTlTWTJSSFh0dmxuM3pzNXdJVE5yZG9zcUVYZWFJa1ZZ" + + "QkVoYmhOdTU0cHAza3hvNlR1V0xpOWU2cFhlV2V0RXdtbEJ3dFda" + + "bFBvaWIyajNUeExCa3NLWmZveUZ5ZWszODBtSGdKQXVtUV9JMmZq" + + "ajk4Xzk3bWszaWhPWTRBZ1ZkQ0RqMXpfR0NvWmtHNVJxN25iQ0d5" + + "b3N5S1d5RFgwMFpzLW5OcVZob0xlSXZYQzRubldkSk1aNnJvZ3h5" + + "UVEifSwibm9uY2UiOiJub25jZSJ9" + // {"Msg":"Hello JWS"} + payload = "eyJNc2ciOiJIZWxsbyBKV1MifQ" + signature = "eAGUikStX_UxyiFhxSLMyuyBcIB80GeBkFROCpap2sW3EmkU_ggF" + + "knaQzxrTfItICSAXsCLIquZ5BbrSWA_4vdEYrwWtdUj7NqFKjHRa" + + "zpLHcoR7r1rEHvkoP1xj49lS5fc3Wjjq8JUhffkhGbWZ8ZVkgPdC" + + "4tMBWiQDoth-x8jELP_3LYOB_ScUXi2mETBawLgOT2K8rA0Vbbmx" + + "hWNlOWuUf-8hL5YX4IOEwsS8JK_TrTq5Zc9My0zHJmaieqDV0UlP" + + "k0onFjPFkGm7MrPSgd0MqRG-4vSAg2O4hDo7rKv4n8POjjXlNQvM" + + "9IPLr8qZ7usYBKhEGwX3yq_eicAwBw" + ) + + b, err := jwsEncodeJSON(claims, testKey, "nonce") + if err != nil { + t.Fatal(err) + } + var jws struct{ Protected, Payload, Signature string } + if err := json.Unmarshal(b, &jws); err != nil { + t.Fatal(err) + } + if jws.Protected != protected { + t.Errorf("protected:\n%s\nwant:\n%s", jws.Protected, protected) + } + if jws.Payload != payload { + t.Errorf("payload:\n%s\nwant:\n%s", jws.Payload, payload) + } + if jws.Signature != signature { + t.Errorf("signature:\n%s\nwant:\n%s", jws.Signature, signature) + } +} + +func TestJWSEncodeJSONEC(t *testing.T) { + claims := struct{ Msg string }{"Hello JWS"} + + b, err := jwsEncodeJSON(claims, testKeyEC, "nonce") + if err != nil { + t.Fatal(err) + } + var jws struct{ Protected, Payload, Signature string } + if err := json.Unmarshal(b, &jws); err != nil { + t.Fatal(err) + } + + if b, err = base64.RawURLEncoding.DecodeString(jws.Protected); err != nil { + t.Fatalf("jws.Protected: %v", err) + } + var head struct { + Alg string + Nonce string + JWK struct { + Crv string + Kty string + X string + Y string + } `json:"jwk"` + } + if err := json.Unmarshal(b, &head); err != nil { + t.Fatalf("jws.Protected: %v", err) + } + if head.Alg != "ES256" { + t.Errorf("head.Alg = %q; want ES256", head.Alg) + } + if head.Nonce != "nonce" { + t.Errorf("head.Nonce = %q; want nonce", head.Nonce) + } + if head.JWK.Crv != "P-256" { + t.Errorf("head.JWK.Crv = %q; want P-256", head.JWK.Crv) + } + if head.JWK.Kty != "EC" { + t.Errorf("head.JWK.Kty = %q; want EC", head.JWK.Kty) + } + if head.JWK.X != testKeyECPubX { + t.Errorf("head.JWK.X = %q; want %q", head.JWK.X, testKeyECPubX) + } + if head.JWK.Y != testKeyECPubY { + t.Errorf("head.JWK.Y = %q; want %q", head.JWK.Y, testKeyECPubY) + } +} + +func TestJWKThumbprintRSA(t *testing.T) { + // Key example from RFC 7638 + const base64N = "0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAt" + + "VT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn6" + + "4tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FD" + + "W2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n9" + + "1CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINH" + + "aQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw" + const base64E = "AQAB" + const expected = "NzbLsXh8uDCcd-6MNwXF4W_7noWXFZAfHkxZsRGC9Xs" + + b, err := base64.RawURLEncoding.DecodeString(base64N) + if err != nil { + t.Fatalf("Error parsing example key N: %v", err) + } + n := new(big.Int).SetBytes(b) + + b, err = base64.RawURLEncoding.DecodeString(base64E) + if err != nil { + t.Fatalf("Error parsing example key E: %v", err) + } + e := new(big.Int).SetBytes(b) + + pub := &rsa.PublicKey{N: n, E: int(e.Uint64())} + th, err := JWKThumbprint(pub) + if err != nil { + t.Error(err) + } + if th != expected { + t.Errorf("thumbprint = %q; want %q", th, expected) + } +} + +func TestJWKThumbprintEC(t *testing.T) { + // Key example from RFC 7520 + // expected was computed with + // echo -n '{"crv":"P-521","kty":"EC","x":"","y":""}' | \ + // openssl dgst -binary -sha256 | \ + // base64 | \ + // tr -d '=' | tr '/+' '_-' + const ( + base64X = "AHKZLLOsCOzz5cY97ewNUajB957y-C-U88c3v13nmGZx6sYl_oJXu9A5RkT" + + "KqjqvjyekWF-7ytDyRXYgCF5cj0Kt" + base64Y = "AdymlHvOiLxXkEhayXQnNCvDX4h9htZaCJN34kfmC6pV5OhQHiraVySsUda" + + "QkAgDPrwQrJmbnX9cwlGfP-HqHZR1" + expected = "dHri3SADZkrush5HU_50AoRhcKFryN-PI6jPBtPL55M" + ) + + b, err := base64.RawURLEncoding.DecodeString(base64X) + if err != nil { + t.Fatalf("Error parsing example key X: %v", err) + } + x := new(big.Int).SetBytes(b) + + b, err = base64.RawURLEncoding.DecodeString(base64Y) + if err != nil { + t.Fatalf("Error parsing example key Y: %v", err) + } + y := new(big.Int).SetBytes(b) + + pub := &ecdsa.PublicKey{Curve: elliptic.P521(), X: x, Y: y} + th, err := JWKThumbprint(pub) + if err != nil { + t.Error(err) + } + if th != expected { + t.Errorf("thumbprint = %q; want %q", th, expected) + } +} + +func TestJWKThumbprintErrUnsupportedKey(t *testing.T) { + _, err := JWKThumbprint(struct{}{}) + if err != ErrUnsupportedKey { + t.Errorf("err = %q; want %q", err, ErrUnsupportedKey) + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/acme/types.go juju-core-2.0.0/src/golang.org/x/crypto/acme/types.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/acme/types.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/acme/types.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,209 @@ +package acme + +import ( + "errors" + "fmt" + "net/http" +) + +// ACME server response statuses used to describe Authorization and Challenge states. +const ( + StatusUnknown = "unknown" + StatusPending = "pending" + StatusProcessing = "processing" + StatusValid = "valid" + StatusInvalid = "invalid" + StatusRevoked = "revoked" +) + +// CRLReasonCode identifies the reason for a certificate revocation. +type CRLReasonCode int + +// CRL reason codes as defined in RFC 5280. +const ( + CRLReasonUnspecified CRLReasonCode = 0 + CRLReasonKeyCompromise CRLReasonCode = 1 + CRLReasonCACompromise CRLReasonCode = 2 + CRLReasonAffiliationChanged CRLReasonCode = 3 + CRLReasonSuperseded CRLReasonCode = 4 + CRLReasonCessationOfOperation CRLReasonCode = 5 + CRLReasonCertificateHold CRLReasonCode = 6 + CRLReasonRemoveFromCRL CRLReasonCode = 8 + CRLReasonPrivilegeWithdrawn CRLReasonCode = 9 + CRLReasonAACompromise CRLReasonCode = 10 +) + +var ( + // ErrAuthorizationFailed indicates that an authorization for an identifier + // did not succeed. + ErrAuthorizationFailed = errors.New("acme: identifier authorization failed") + + // ErrUnsupportedKey is returned when an unsupported key type is encountered. + ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported") +) + +// Error is an ACME error, defined in Problem Details for HTTP APIs doc +// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem. +type Error struct { + // StatusCode is The HTTP status code generated by the origin server. + StatusCode int + // ProblemType is a URI reference that identifies the problem type, + // typically in a "urn:acme:error:xxx" form. + ProblemType string + // Detail is a human-readable explanation specific to this occurrence of the problem. + Detail string + // Header is the original server error response headers. + Header http.Header +} + +func (e *Error) Error() string { + return fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail) +} + +// Account is a user account. It is associated with a private key. +type Account struct { + // URI is the account unique ID, which is also a URL used to retrieve + // account data from the CA. + URI string + + // Contact is a slice of contact info used during registration. + Contact []string + + // The terms user has agreed to. + // A value not matching CurrentTerms indicates that the user hasn't agreed + // to the actual Terms of Service of the CA. + AgreedTerms string + + // Actual terms of a CA. + CurrentTerms string + + // Authz is the authorization URL used to initiate a new authz flow. + Authz string + + // Authorizations is a URI from which a list of authorizations + // granted to this account can be fetched via a GET request. + Authorizations string + + // Certificates is a URI from which a list of certificates + // issued for this account can be fetched via a GET request. + Certificates string +} + +// Directory is ACME server discovery data. +type Directory struct { + // RegURL is an account endpoint URL, allowing for creating new + // and modifying existing accounts. + RegURL string + + // AuthzURL is used to initiate Identifier Authorization flow. + AuthzURL string + + // CertURL is a new certificate issuance endpoint URL. + CertURL string + + // RevokeURL is used to initiate a certificate revocation flow. + RevokeURL string + + // Term is a URI identifying the current terms of service. + Terms string + + // Website is an HTTP or HTTPS URL locating a website + // providing more information about the ACME server. + Website string + + // CAA consists of lowercase hostname elements, which the ACME server + // recognises as referring to itself for the purposes of CAA record validation + // as defined in RFC6844. + CAA []string +} + +// Challenge encodes a returned CA challenge. +type Challenge struct { + // Type is the challenge type, e.g. "http-01", "tls-sni-02", "dns-01". + Type string + + // URI is where a challenge response can be posted to. + URI string + + // Token is a random value that uniquely identifies the challenge. + Token string + + // Status identifies the status of this challenge. + Status string +} + +// Authorization encodes an authorization response. +type Authorization struct { + // URI uniquely identifies a authorization. + URI string + + // Status identifies the status of an authorization. + Status string + + // Identifier is what the account is authorized to represent. + Identifier AuthzID + + // Challenges that the client needs to fulfill in order to prove possession + // of the identifier (for pending authorizations). + // For final authorizations, the challenges that were used. + Challenges []*Challenge + + // A collection of sets of challenges, each of which would be sufficient + // to prove possession of the identifier. + // Clients must complete a set of challenges that covers at least one set. + // Challenges are identified by their indices in the challenges array. + // If this field is empty, the client needs to complete all challenges. + Combinations [][]int +} + +// AuthzID is an identifier that an account is authorized to represent. +type AuthzID struct { + Type string // The type of identifier, e.g. "dns". + Value string // The identifier itself, e.g. "example.org". +} + +// wireAuthz is ACME JSON representation of Authorization objects. +type wireAuthz struct { + Status string + Challenges []wireChallenge + Combinations [][]int + Identifier struct { + Type string + Value string + } +} + +func (z *wireAuthz) authorization(uri string) *Authorization { + a := &Authorization{ + URI: uri, + Status: z.Status, + Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value}, + Combinations: z.Combinations, // shallow copy + Challenges: make([]*Challenge, len(z.Challenges)), + } + for i, v := range z.Challenges { + a.Challenges[i] = v.challenge() + } + return a +} + +// wireChallenge is ACME JSON challenge representation. +type wireChallenge struct { + URI string `json:"uri"` + Type string + Token string + Status string +} + +func (c *wireChallenge) challenge() *Challenge { + v := &Challenge{ + URI: c.URI, + Type: c.Type, + Token: c.Token, + Status: c.Status, + } + if v.Status == "" { + v.Status = StatusPending + } + return v +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/blowfish/cipher.go juju-core-2.0.0/src/golang.org/x/crypto/blowfish/cipher.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/blowfish/cipher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/blowfish/cipher.go 2016-10-13 14:32:00.000000000 +0000 @@ -39,7 +39,7 @@ // NewSaltedCipher creates a returns a Cipher that folds a salt into its key // schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatiblity, the key can be over 56 +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 // bytes. func NewSaltedCipher(key, salt []byte) (*Cipher, error) { if len(salt) == 0 { diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ed25519/ed25519.go juju-core-2.0.0/src/golang.org/x/crypto/ed25519/ed25519.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ed25519/ed25519.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ed25519/ed25519.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,181 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// http://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519†function defined in +// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05. +package ed25519 + +// This code is a port of the public domain, “ref10†implementation of ed25519 +// from SUPERCOP. + +import ( + "crypto" + cryptorand "crypto/rand" + "crypto/sha512" + "crypto/subtle" + "errors" + "io" + "strconv" + + "golang.org/x/crypto/ed25519/internal/edwards25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 +) + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[32:]) + return PublicKey(publicKey) +} + +// Sign signs the given message with priv. +// Ed25519 performs two passes over messages to be signed and therefore cannot +// handle pre-hashed messages. Thus opts.HashFunc() must return zero to +// indicate the message hasn't been hashed. This can be achieved by passing +// crypto.Hash(0) as the value for opts. +func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if opts.HashFunc() != crypto.Hash(0) { + return nil, errors.New("ed25519: cannot sign hashed message") + } + + return Sign(priv, message), nil +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) { + if rand == nil { + rand = cryptorand.Reader + } + + privateKey = make([]byte, PrivateKeySize) + publicKey = make([]byte, PublicKeySize) + _, err = io.ReadFull(rand, privateKey[:32]) + if err != nil { + return nil, nil, err + } + + digest := sha512.Sum512(privateKey[:32]) + digest[0] &= 248 + digest[31] &= 127 + digest[31] |= 64 + + var A edwards25519.ExtendedGroupElement + var hBytes [32]byte + copy(hBytes[:], digest[:]) + edwards25519.GeScalarMultBase(&A, &hBytes) + var publicKeyBytes [32]byte + A.ToBytes(&publicKeyBytes) + + copy(privateKey[32:], publicKeyBytes[:]) + copy(publicKey, publicKeyBytes[:]) + + return publicKey, privateKey, nil +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + h := sha512.New() + h.Write(privateKey[:32]) + + var digest1, messageDigest, hramDigest [64]byte + var expandedSecretKey [32]byte + h.Sum(digest1[:0]) + copy(expandedSecretKey[:], digest1[:]) + expandedSecretKey[0] &= 248 + expandedSecretKey[31] &= 63 + expandedSecretKey[31] |= 64 + + h.Reset() + h.Write(digest1[32:]) + h.Write(message) + h.Sum(messageDigest[:0]) + + var messageDigestReduced [32]byte + edwards25519.ScReduce(&messageDigestReduced, &messageDigest) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, &messageDigestReduced) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + h.Reset() + h.Write(encodedR[:]) + h.Write(privateKey[32:]) + h.Write(message) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var s [32]byte + edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) + + signature := make([]byte, SignatureSize) + copy(signature[:], encodedR[:]) + copy(signature[32:], s[:]) + + return signature +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + if l := len(publicKey); l != PublicKeySize { + panic("ed25519: bad public key length: " + strconv.Itoa(l)) + } + + if len(sig) != SignatureSize || sig[63]&224 != 0 { + return false + } + + var A edwards25519.ExtendedGroupElement + var publicKeyBytes [32]byte + copy(publicKeyBytes[:], publicKey) + if !A.FromBytes(&publicKeyBytes) { + return false + } + edwards25519.FeNeg(&A.X, &A.X) + edwards25519.FeNeg(&A.T, &A.T) + + h := sha512.New() + h.Write(sig[:32]) + h.Write(publicKey[:]) + h.Write(message) + var digest [64]byte + h.Sum(digest[:0]) + + var hReduced [32]byte + edwards25519.ScReduce(&hReduced, &digest) + + var R edwards25519.ProjectiveGroupElement + var b [32]byte + copy(b[:], sig[32:]) + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) + + var checkR [32]byte + R.ToBytes(&checkR) + return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1 +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ed25519/ed25519_test.go juju-core-2.0.0/src/golang.org/x/crypto/ed25519/ed25519_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ed25519/ed25519_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ed25519/ed25519_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,183 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ed25519 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto" + "crypto/rand" + "encoding/hex" + "os" + "strings" + "testing" + + "golang.org/x/crypto/ed25519/internal/edwards25519" +) + +type zeroReader struct{} + +func (zeroReader) Read(buf []byte) (int, error) { + for i := range buf { + buf[i] = 0 + } + return len(buf), nil +} + +func TestUnmarshalMarshal(t *testing.T) { + pub, _, _ := GenerateKey(rand.Reader) + + var A edwards25519.ExtendedGroupElement + var pubBytes [32]byte + copy(pubBytes[:], pub) + if !A.FromBytes(&pubBytes) { + t.Fatalf("ExtendedGroupElement.FromBytes failed") + } + + var pub2 [32]byte + A.ToBytes(&pub2) + + if pubBytes != pub2 { + t.Errorf("FromBytes(%v)->ToBytes does not round-trip, got %x\n", pubBytes, pub2) + } +} + +func TestSignVerify(t *testing.T) { + var zero zeroReader + public, private, _ := GenerateKey(zero) + + message := []byte("test message") + sig := Sign(private, message) + if !Verify(public, message, sig) { + t.Errorf("valid signature rejected") + } + + wrongMessage := []byte("wrong message") + if Verify(public, wrongMessage, sig) { + t.Errorf("signature of different message accepted") + } +} + +func TestCryptoSigner(t *testing.T) { + var zero zeroReader + public, private, _ := GenerateKey(zero) + + signer := crypto.Signer(private) + + publicInterface := signer.Public() + public2, ok := publicInterface.(PublicKey) + if !ok { + t.Fatalf("expected PublicKey from Public() but got %T", publicInterface) + } + + if !bytes.Equal(public, public2) { + t.Errorf("public keys do not match: original:%x vs Public():%x", public, public2) + } + + message := []byte("message") + var noHash crypto.Hash + signature, err := signer.Sign(zero, message, noHash) + if err != nil { + t.Fatalf("error from Sign(): %s", err) + } + + if !Verify(public, message, signature) { + t.Errorf("Verify failed on signature from Sign()") + } +} + +func TestGolden(t *testing.T) { + // sign.input.gz is a selection of test cases from + // http://ed25519.cr.yp.to/python/sign.input + testDataZ, err := os.Open("testdata/sign.input.gz") + if err != nil { + t.Fatal(err) + } + defer testDataZ.Close() + testData, err := gzip.NewReader(testDataZ) + if err != nil { + t.Fatal(err) + } + defer testData.Close() + + scanner := bufio.NewScanner(testData) + lineNo := 0 + + for scanner.Scan() { + lineNo++ + + line := scanner.Text() + parts := strings.Split(line, ":") + if len(parts) != 5 { + t.Fatalf("bad number of parts on line %d", lineNo) + } + + privBytes, _ := hex.DecodeString(parts[0]) + pubKey, _ := hex.DecodeString(parts[1]) + msg, _ := hex.DecodeString(parts[2]) + sig, _ := hex.DecodeString(parts[3]) + // The signatures in the test vectors also include the message + // at the end, but we just want R and S. + sig = sig[:SignatureSize] + + if l := len(pubKey); l != PublicKeySize { + t.Fatalf("bad public key length on line %d: got %d bytes", lineNo, l) + } + + var priv [PrivateKeySize]byte + copy(priv[:], privBytes) + copy(priv[32:], pubKey) + + sig2 := Sign(priv[:], msg) + if !bytes.Equal(sig, sig2[:]) { + t.Errorf("different signature result on line %d: %x vs %x", lineNo, sig, sig2) + } + + if !Verify(pubKey, msg, sig2) { + t.Errorf("signature failed to verify on line %d", lineNo) + } + } + + if err := scanner.Err(); err != nil { + t.Fatalf("error reading test data: %s", err) + } +} + +func BenchmarkKeyGeneration(b *testing.B) { + var zero zeroReader + for i := 0; i < b.N; i++ { + if _, _, err := GenerateKey(zero); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSigning(b *testing.B) { + var zero zeroReader + _, priv, err := GenerateKey(zero) + if err != nil { + b.Fatal(err) + } + message := []byte("Hello, world!") + b.ResetTimer() + for i := 0; i < b.N; i++ { + Sign(priv, message) + } +} + +func BenchmarkVerification(b *testing.B) { + var zero zeroReader + pub, priv, err := GenerateKey(zero) + if err != nil { + b.Fatal(err) + } + message := []byte("Hello, world!") + signature := Sign(priv, message) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Verify(pub, message, signature) + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ed25519/internal/edwards25519/const.go juju-core-2.0.0/src/golang.org/x/crypto/ed25519/internal/edwards25519/const.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ed25519/internal/edwards25519/const.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ed25519/internal/edwards25519/const.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,1422 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// These values are from the public domain, “ref10†implementation of ed25519 +// from SUPERCOP. + +// d is a constant in the Edwards curve equation. +var d = FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, +} + +// d2 is 2*d. +var d2 = FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, +} + +// SqrtM1 is the square-root of -1 in the field. +var SqrtM1 = FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, +} + +// A is a constant in the Montgomery-form of curve25519. +var A = FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +// bi contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var bi = [8]PreComputedGroupElement{ + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, + FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, + FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, + }, + { + FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, + FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, + FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, + }, + { + FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, + FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, + FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, + }, + { + FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, + FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, + FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, + }, +} + +// base contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var base = [32][8]PreComputedGroupElement{ + { + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, + FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, + FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, + FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, + FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, + FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, + FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, + FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, + FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, + }, + }, + { + { + FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, + FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, + FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, + }, + { + FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, + FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, + FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, + }, + { + FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, + FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, + FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, + }, + { + FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, + FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, + FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, + }, + { + FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, + FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, + FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, + }, + { + FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, + FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, + FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, + }, + { + FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, + FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, + FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, + }, + { + FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, + FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, + FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, + }, + }, + { + { + FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, + FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, + FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, + }, + { + FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, + FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, + FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, + }, + { + FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, + FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, + FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, + }, + { + FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, + FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, + FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, + }, + { + FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, + FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, + FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, + }, + { + FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, + FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, + FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, + }, + { + FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, + FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, + FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, + }, + { + FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, + FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, + FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, + }, + }, + { + { + FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, + FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, + FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, + }, + { + FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, + FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, + FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, + }, + { + FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, + FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, + FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, + }, + { + FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, + FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, + FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, + }, + { + FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, + FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, + FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, + }, + { + FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, + FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, + FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, + }, + { + FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, + FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, + FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, + }, + { + FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, + FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, + FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, + }, + }, + { + { + FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, + FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, + FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, + }, + { + FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, + FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, + FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, + }, + { + FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, + FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, + FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, + }, + { + FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, + FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, + FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, + }, + { + FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, + FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, + FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, + }, + { + FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, + FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, + FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, + }, + { + FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, + FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, + FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, + }, + { + FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, + FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, + FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, + }, + }, + { + { + FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, + FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, + FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, + }, + { + FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, + FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, + FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, + }, + { + FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, + FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, + FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, + }, + { + FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, + FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, + FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, + }, + { + FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, + FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, + FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, + }, + { + FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, + FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, + FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, + }, + { + FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, + FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, + FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, + }, + { + FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, + FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, + FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, + }, + }, + { + { + FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, + FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, + FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, + }, + { + FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, + FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, + FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, + }, + { + FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, + FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, + FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, + }, + { + FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, + FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, + FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, + }, + { + FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, + FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, + FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, + }, + { + FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, + FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, + FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, + }, + { + FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, + FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, + FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, + }, + { + FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, + FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, + FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, + }, + }, + { + { + FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, + FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, + FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, + }, + { + FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, + FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, + FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, + }, + { + FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, + FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, + FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, + }, + { + FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, + FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, + FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, + }, + { + FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, + FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, + FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, + }, + { + FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, + FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, + FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, + }, + { + FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, + FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, + FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, + }, + { + FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, + FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, + FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, + }, + }, + { + { + FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, + FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, + FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, + }, + { + FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, + FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, + FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, + }, + { + FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, + FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, + FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, + }, + { + FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, + FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, + FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, + }, + { + FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, + FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, + FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, + }, + { + FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, + FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, + FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, + }, + { + FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, + FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, + FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, + }, + { + FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, + FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, + FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, + }, + }, + { + { + FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, + FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, + FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, + }, + { + FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, + FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, + FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, + }, + { + FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, + FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, + FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, + }, + { + FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, + FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, + FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, + }, + { + FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, + FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, + FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, + }, + { + FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, + FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, + FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, + }, + { + FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, + FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, + FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, + }, + { + FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, + FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, + FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, + }, + }, + { + { + FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, + FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, + FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, + }, + { + FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, + FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, + FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, + }, + { + FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, + FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, + FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, + }, + { + FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, + FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, + FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, + }, + { + FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, + FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, + FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, + }, + { + FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, + FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, + FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, + }, + { + FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, + FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, + FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, + }, + { + FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, + FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, + FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, + }, + }, + { + { + FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, + FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, + FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, + }, + { + FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, + FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, + FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, + }, + { + FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, + FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, + FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, + }, + { + FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, + FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, + FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, + }, + { + FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, + FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, + FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, + }, + { + FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, + FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, + FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, + }, + { + FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, + FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, + FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, + }, + { + FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, + FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, + FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, + }, + }, + { + { + FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, + FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, + FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, + }, + { + FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, + FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, + FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, + }, + { + FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, + FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, + FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, + }, + { + FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, + FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, + FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, + }, + { + FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, + FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, + FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, + }, + { + FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, + FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, + FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, + }, + { + FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, + FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, + FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, + }, + { + FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, + FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, + FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, + }, + }, + { + { + FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, + FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, + FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, + }, + { + FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, + FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, + FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, + }, + { + FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, + FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, + FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, + }, + { + FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, + FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, + FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, + }, + { + FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, + FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, + FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, + }, + { + FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, + FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, + FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, + }, + { + FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, + FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, + FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, + }, + { + FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, + FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, + FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, + }, + }, + { + { + FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, + FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, + FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, + }, + { + FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, + FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, + FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, + }, + { + FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, + FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, + FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, + }, + { + FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, + FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, + FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, + }, + { + FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, + FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, + FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, + }, + { + FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, + FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, + FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, + }, + { + FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, + FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, + FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, + }, + { + FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, + FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, + FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, + }, + }, + { + { + FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, + FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, + FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, + }, + { + FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, + FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, + FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, + }, + { + FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, + FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, + FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, + }, + { + FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, + FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, + FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, + }, + { + FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, + FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, + FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, + }, + { + FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, + FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, + FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, + }, + { + FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, + FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, + FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, + }, + { + FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, + FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, + FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, + }, + }, + { + { + FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, + FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, + FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, + }, + { + FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, + FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, + FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, + }, + { + FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, + FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, + FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, + }, + { + FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, + FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, + FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, + }, + { + FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, + FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, + FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, + }, + { + FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, + FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, + FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, + }, + { + FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, + FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, + FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, + }, + { + FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, + FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, + FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, + }, + }, + { + { + FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, + FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, + FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, + }, + { + FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, + FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, + FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, + }, + { + FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, + FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, + FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, + }, + { + FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, + FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, + FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, + }, + { + FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, + FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, + FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, + }, + { + FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, + FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, + FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, + }, + { + FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, + FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, + FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, + }, + { + FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, + FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, + FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, + }, + }, + { + { + FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, + FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, + FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, + }, + { + FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, + FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, + FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, + }, + { + FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, + FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, + FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, + }, + { + FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, + FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, + FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, + }, + { + FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, + FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, + FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, + }, + { + FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, + FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, + FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, + }, + { + FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, + FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, + FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, + }, + { + FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, + FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, + FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, + }, + }, + { + { + FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, + FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, + FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, + }, + { + FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, + FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, + FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, + }, + { + FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, + FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, + FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, + }, + { + FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, + FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, + FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, + }, + { + FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, + FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, + FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, + }, + { + FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, + FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, + FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, + }, + { + FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, + FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, + FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, + }, + { + FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, + FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, + FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, + }, + }, + { + { + FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, + FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, + FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, + }, + { + FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, + FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, + FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, + }, + { + FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, + FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, + FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, + }, + { + FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, + FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, + FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, + }, + { + FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, + FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, + FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, + }, + { + FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, + FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, + FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, + }, + { + FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, + FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, + FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, + }, + { + FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, + FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, + FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, + }, + }, + { + { + FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, + FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, + FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, + }, + { + FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, + FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, + FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, + }, + { + FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, + FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, + FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, + }, + { + FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, + FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, + FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, + }, + { + FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, + FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, + FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, + }, + { + FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, + FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, + FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, + }, + { + FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, + FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, + FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, + }, + { + FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, + FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, + FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, + }, + }, + { + { + FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, + FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, + FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, + }, + { + FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, + FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, + FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, + }, + { + FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, + FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, + FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, + }, + { + FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, + FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, + FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, + }, + { + FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, + FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, + FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, + }, + { + FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, + FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, + FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, + }, + { + FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, + FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, + FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, + }, + { + FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, + FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, + FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, + }, + }, + { + { + FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, + FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, + FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, + }, + { + FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, + FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, + FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, + }, + { + FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, + FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, + FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, + }, + { + FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, + FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, + FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, + }, + { + FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, + FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, + FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, + }, + { + FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, + FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, + FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, + }, + { + FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, + FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, + FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, + }, + { + FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, + FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, + FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, + }, + }, + { + { + FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, + FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, + FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, + }, + { + FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, + FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, + FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, + }, + { + FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, + FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, + FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, + }, + { + FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, + FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, + FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, + }, + { + FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, + FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, + FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, + }, + { + FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, + FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, + FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, + }, + { + FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, + FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, + FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, + }, + { + FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, + FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, + FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, + }, + }, + { + { + FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, + FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, + FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, + }, + { + FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, + FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, + FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, + }, + { + FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, + FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, + FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, + }, + { + FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, + FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, + FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, + }, + { + FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, + FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, + FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, + }, + { + FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, + FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, + FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, + }, + { + FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, + FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, + FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, + }, + { + FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, + FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, + FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, + }, + }, + { + { + FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, + FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, + FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, + }, + { + FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, + FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, + FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, + }, + { + FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, + FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, + FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, + }, + { + FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, + FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, + FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, + }, + { + FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, + FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, + FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, + }, + { + FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, + FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, + FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, + }, + { + FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, + FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, + FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, + }, + { + FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, + FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, + FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, + }, + }, + { + { + FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, + FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, + FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, + }, + { + FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, + FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, + FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, + }, + { + FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, + FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, + FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, + }, + { + FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, + FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, + FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, + }, + { + FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, + FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, + FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, + }, + { + FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, + FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, + FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, + }, + { + FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, + FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, + FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, + }, + { + FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, + FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, + FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, + }, + }, + { + { + FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, + FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, + FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, + }, + { + FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, + FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, + FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, + }, + { + FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, + FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, + FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, + }, + { + FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, + FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, + FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, + }, + { + FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, + FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, + FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, + }, + { + FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, + FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, + FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, + }, + { + FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, + FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, + FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, + }, + { + FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, + FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, + FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, + }, + }, + { + { + FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, + FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, + FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, + }, + { + FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, + FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, + FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, + }, + { + FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, + FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, + FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, + }, + { + FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, + FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, + FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, + }, + { + FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, + FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, + FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, + }, + { + FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, + FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, + FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, + }, + { + FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, + FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, + FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, + }, + { + FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, + FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, + FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, + }, + }, + { + { + FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, + FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, + FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, + }, + { + FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, + FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, + FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, + }, + { + FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, + FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, + FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, + }, + { + FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, + FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, + FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, + }, + { + FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, + FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, + FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, + }, + { + FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, + FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, + FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, + }, + { + FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, + FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, + FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, + }, + { + FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, + FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, + FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, + }, + }, + { + { + FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, + FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, + FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, + }, + { + FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, + FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, + FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, + }, + { + FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, + FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, + FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, + }, + { + FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, + FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, + FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, + }, + { + FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, + FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, + FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, + }, + { + FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, + FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, + FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, + }, + { + FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, + FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, + FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, + }, + { + FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, + FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, + FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, + }, + }, +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go juju-core-2.0.0/src/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,1771 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// This code is a port of the public domain, “ref10†implementation of ed25519 +// from SUPERCOP. + +// FieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type FieldElement [10]int32 + +var zero FieldElement + +func FeZero(fe *FieldElement) { + copy(fe[:], zero[:]) +} + +func FeOne(fe *FieldElement) { + FeZero(fe) + fe[0] = 1 +} + +func FeAdd(dst, a, b *FieldElement) { + dst[0] = a[0] + b[0] + dst[1] = a[1] + b[1] + dst[2] = a[2] + b[2] + dst[3] = a[3] + b[3] + dst[4] = a[4] + b[4] + dst[5] = a[5] + b[5] + dst[6] = a[6] + b[6] + dst[7] = a[7] + b[7] + dst[8] = a[8] + b[8] + dst[9] = a[9] + b[9] +} + +func FeSub(dst, a, b *FieldElement) { + dst[0] = a[0] - b[0] + dst[1] = a[1] - b[1] + dst[2] = a[2] - b[2] + dst[3] = a[3] - b[3] + dst[4] = a[4] - b[4] + dst[5] = a[5] - b[5] + dst[6] = a[6] - b[6] + dst[7] = a[7] - b[7] + dst[8] = a[8] - b[8] + dst[9] = a[9] - b[9] +} + +func FeCopy(dst, src *FieldElement) { + copy(dst[:], src[:]) +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func FeCMove(f, g *FieldElement, b int32) { + b = -b + f[0] ^= b & (f[0] ^ g[0]) + f[1] ^= b & (f[1] ^ g[1]) + f[2] ^= b & (f[2] ^ g[2]) + f[3] ^= b & (f[3] ^ g[3]) + f[4] ^= b & (f[4] ^ g[4]) + f[5] ^= b & (f[5] ^ g[5]) + f[6] ^= b & (f[6] ^ g[6]) + f[7] ^= b & (f[7] ^ g[7]) + f[8] ^= b & (f[8] ^ g[8]) + f[9] ^= b & (f[9] ^ g[9]) +} + +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func FeFromBytes(dst *FieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 8388607) << 2 + + FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +func FeIsNegative(f *FieldElement) byte { + var s [32]byte + FeToBytes(&s, f) + return s[0] & 1 +} + +func FeIsNonZero(f *FieldElement) int32 { + var s [32]byte + FeToBytes(&s, f) + var x uint8 + for _, b := range s { + x |= b + } + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + return int32(x & 1) +} + +// FeNeg sets h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeNeg(h, f *FieldElement) { + h[0] = -f[0] + h[1] = -f[1] + h[2] = -f[2] + h[3] = -f[3] + h[4] = -f[4] + h[5] = -f[5] + h[6] = -f[6] + h[7] = -f[7] + h[8] = -f[8] + h[9] = -f[9] +} + +func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 + + /* + |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.51*2^58 */ + /* |h5| <= 1.51*2^58 */ + + c1 = (h1 + (1 << 24)) >> 25 + h2 += c1 + h1 -= c1 << 25 + c5 = (h5 + (1 << 24)) >> 25 + h6 += c5 + h5 -= c5 << 25 + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.21*2^59 */ + /* |h6| <= 1.21*2^59 */ + + c2 = (h2 + (1 << 25)) >> 26 + h3 += c2 + h2 -= c2 << 26 + c6 = (h6 + (1 << 25)) >> 26 + h7 += c6 + h6 -= c6 << 26 + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.51*2^58 */ + /* |h7| <= 1.51*2^58 */ + + c3 = (h3 + (1 << 24)) >> 25 + h4 += c3 + h3 -= c3 << 25 + c7 = (h7 + (1 << 24)) >> 25 + h8 += c7 + h7 -= c7 << 25 + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.52*2^33 */ + /* |h8| <= 1.52*2^33 */ + + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + c8 = (h8 + (1 << 25)) >> 26 + h9 += c8 + h8 -= c8 << 26 + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.51*2^58 */ + + c9 = (h9 + (1 << 24)) >> 25 + h0 += c9 * 19 + h9 -= c9 << 25 + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.8*2^37 */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs, can squeeze carries into int32. +func FeMul(h, f, g *FieldElement) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + + f1_2 := int64(2 * f[1]) + f3_2 := int64(2 * f[3]) + f5_2 := int64(2 * f[5]) + f7_2 := int64(2 * f[7]) + f9_2 := int64(2 * f[9]) + + g0 := int64(g[0]) + g1 := int64(g[1]) + g2 := int64(g[2]) + g3 := int64(g[3]) + g4 := int64(g[4]) + g5 := int64(g[5]) + g6 := int64(g[6]) + g7 := int64(g[7]) + g8 := int64(g[8]) + g9 := int64(g[9]) + + g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ + g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ + g3_19 := int64(19 * g[3]) + g4_19 := int64(19 * g[4]) + g5_19 := int64(19 * g[5]) + g6_19 := int64(19 * g[6]) + g7_19 := int64(19 * g[7]) + g8_19 := int64(19 * g[8]) + g9_19 := int64(19 * g[9]) + + h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 + h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 + h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 + h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 + h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 + h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 + h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 + h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 + h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 + h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + f0_2 := int64(2 * f[0]) + f1_2 := int64(2 * f[1]) + f2_2 := int64(2 * f[2]) + f3_2 := int64(2 * f[3]) + f4_2 := int64(2 * f[4]) + f5_2 := int64(2 * f[5]) + f6_2 := int64(2 * f[6]) + f7_2 := int64(2 * f[7]) + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + + h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 + h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 + h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 + h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 + h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 + h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 + h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 + h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 + h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 + h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 + + return +} + +// FeSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeSquare(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeSquare2 sets h = 2 * f * f +// +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// See fe_mul.c for discussion of implementation strategy. +func FeSquare2(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + + h0 += h0 + h1 += h1 + h2 += h2 + h3 += h3 + h4 += h4 + h5 += h5 + h6 += h6 + h7 += h7 + h8 += h8 + h9 += h9 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func FeInvert(out, z *FieldElement) { + var t0, t1, t2, t3 FieldElement + var i int + + FeSquare(&t0, z) // 2^1 + FeSquare(&t1, &t0) // 2^2 + for i = 1; i < 2; i++ { // 2^3 + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) // 2^3 + 2^0 + FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 + FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 + FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 + FeSquare(&t2, &t1) // 5,4,3,2,1 + for i = 1; i < 5; i++ { // 9,8,7,6,5 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 + FeSquare(&t2, &t1) // 10..1 + for i = 1; i < 10; i++ { // 19..10 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 19..0 + FeSquare(&t3, &t2) // 20..1 + for i = 1; i < 20; i++ { // 39..20 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 39..0 + FeSquare(&t2, &t2) // 40..1 + for i = 1; i < 10; i++ { // 49..10 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 49..0 + FeSquare(&t2, &t1) // 50..1 + for i = 1; i < 50; i++ { // 99..50 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 99..0 + FeSquare(&t3, &t2) // 100..1 + for i = 1; i < 100; i++ { // 199..100 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 199..0 + FeSquare(&t2, &t2) // 200..1 + for i = 1; i < 50; i++ { // 249..50 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 249..0 + FeSquare(&t1, &t1) // 250..1 + for i = 1; i < 5; i++ { // 254..5 + FeSquare(&t1, &t1) + } + FeMul(out, &t1, &t0) // 254..5,3,1,0 +} + +func fePow22523(out, z *FieldElement) { + var t0, t1, t2 FieldElement + var i int + + FeSquare(&t0, z) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeSquare(&t1, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) + FeMul(&t0, &t0, &t1) + FeSquare(&t0, &t0) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 5; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 20; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 100; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t0, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t0, &t0) + } + FeMul(out, &t0, z) +} + +// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * +// y^2 where d = -121665/121666. +// +// Several representations are used: +// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z +// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT +// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T +// PreComputedGroupElement: (y+x,y-x,2dxy) + +type ProjectiveGroupElement struct { + X, Y, Z FieldElement +} + +type ExtendedGroupElement struct { + X, Y, Z, T FieldElement +} + +type CompletedGroupElement struct { + X, Y, Z, T FieldElement +} + +type PreComputedGroupElement struct { + yPlusX, yMinusX, xy2d FieldElement +} + +type CachedGroupElement struct { + yPlusX, yMinusX, Z, T2d FieldElement +} + +func (p *ProjectiveGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) +} + +func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { + var t0 FieldElement + + FeSquare(&r.X, &p.X) + FeSquare(&r.Z, &p.Y) + FeSquare2(&r.T, &p.Z) + FeAdd(&r.Y, &p.X, &p.Y) + FeSquare(&t0, &r.Y) + FeAdd(&r.Y, &r.Z, &r.X) + FeSub(&r.Z, &r.Z, &r.X) + FeSub(&r.X, &t0, &r.Y) + FeSub(&r.T, &r.T, &r.Z) +} + +func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) + FeZero(&p.T) +} + +func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { + var q ProjectiveGroupElement + p.ToProjective(&q) + q.Double(r) +} + +func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { + FeAdd(&r.yPlusX, &p.Y, &p.X) + FeSub(&r.yMinusX, &p.Y, &p.X) + FeCopy(&r.Z, &p.Z) + FeMul(&r.T2d, &p.T, &d2) +} + +func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeCopy(&r.X, &p.X) + FeCopy(&r.Y, &p.Y) + FeCopy(&r.Z, &p.Z) +} + +func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { + var u, v, v3, vxx, check FieldElement + + FeFromBytes(&p.Y, s) + FeOne(&p.Z) + FeSquare(&u, &p.Y) + FeMul(&v, &u, &d) + FeSub(&u, &u, &p.Z) // y = y^2-1 + FeAdd(&v, &v, &p.Z) // v = dy^2+1 + + FeSquare(&v3, &v) + FeMul(&v3, &v3, &v) // v3 = v^3 + FeSquare(&p.X, &v3) + FeMul(&p.X, &p.X, &v) + FeMul(&p.X, &p.X, &u) // x = uv^7 + + fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) + FeMul(&p.X, &p.X, &v3) + FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) + + var tmpX, tmp2 [32]byte + + FeSquare(&vxx, &p.X) + FeMul(&vxx, &vxx, &v) + FeSub(&check, &vxx, &u) // vx^2-u + if FeIsNonZero(&check) == 1 { + FeAdd(&check, &vxx, &u) // vx^2+u + if FeIsNonZero(&check) == 1 { + return false + } + FeMul(&p.X, &p.X, &SqrtM1) + + FeToBytes(&tmpX, &p.X) + for i, v := range tmpX { + tmp2[31-i] = v + } + } + + if FeIsNegative(&p.X) != (s[31] >> 7) { + FeNeg(&p.X, &p.X) + } + + FeMul(&p.T, &p.X, &p.Y) + return true +} + +func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) +} + +func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) + FeMul(&r.T, &p.X, &p.Y) +} + +func (p *PreComputedGroupElement) Zero() { + FeOne(&p.yPlusX) + FeOne(&p.yMinusX) + FeZero(&p.xy2d) +} + +func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func slide(r *[256]int8, a *[32]byte) { + for i := range r { + r[i] = int8(1 & (a[i>>3] >> uint(i&7))) + } + + for i := range r { + if r[i] != 0 { + for b := 1; b <= 6 && i+b < 256; b++ { + if r[i+b] != 0 { + if r[i]+(r[i+b]<= -15 { + r[i] -= r[i+b] << uint(b) + for k := i + b; k < 256; k++ { + if r[k] == 0 { + r[k] = 1 + break + } + r[k] = 0 + } + } else { + break + } + } + } + } + } +} + +// GeDoubleScalarMultVartime sets r = a*A + b*B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { + var aSlide, bSlide [256]int8 + var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A + var t CompletedGroupElement + var u, A2 ExtendedGroupElement + var i int + + slide(&aSlide, a) + slide(&bSlide, b) + + A.ToCached(&Ai[0]) + A.Double(&t) + t.ToExtended(&A2) + + for i := 0; i < 7; i++ { + geAdd(&t, &A2, &Ai[i]) + t.ToExtended(&u) + u.ToCached(&Ai[i+1]) + } + + r.Zero() + + for i = 255; i >= 0; i-- { + if aSlide[i] != 0 || bSlide[i] != 0 { + break + } + } + + for ; i >= 0; i-- { + r.Double(&t) + + if aSlide[i] > 0 { + t.ToExtended(&u) + geAdd(&t, &u, &Ai[aSlide[i]/2]) + } else if aSlide[i] < 0 { + t.ToExtended(&u) + geSub(&t, &u, &Ai[(-aSlide[i])/2]) + } + + if bSlide[i] > 0 { + t.ToExtended(&u) + geMixedAdd(&t, &u, &bi[bSlide[i]/2]) + } else if bSlide[i] < 0 { + t.ToExtended(&u) + geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) + } + + t.ToProjective(r) + } +} + +// equal returns 1 if b == c and 0 otherwise, assuming that b and c are +// non-negative. +func equal(b, c int32) int32 { + x := uint32(b ^ c) + x-- + return int32(x >> 31) +} + +// negative returns 1 if b < 0 and 0 otherwise. +func negative(b int32) int32 { + return (b >> 31) & 1 +} + +func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { + FeCMove(&t.yPlusX, &u.yPlusX, b) + FeCMove(&t.yMinusX, &u.yMinusX, b) + FeCMove(&t.xy2d, &u.xy2d, b) +} + +func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { + var minusT PreComputedGroupElement + bNegative := negative(b) + bAbs := b - (((-bNegative) & b) << 1) + + t.Zero() + for i := int32(0); i < 8; i++ { + PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) + } + FeCopy(&minusT.yPlusX, &t.yMinusX) + FeCopy(&minusT.yMinusX, &t.yPlusX) + FeNeg(&minusT.xy2d, &t.xy2d) + PreComputedGroupElementCMove(t, &minusT, bNegative) +} + +// GeScalarMultBase computes h = a*B, where +// a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { + var e [64]int8 + + for i, v := range a { + e[2*i] = int8(v & 15) + e[2*i+1] = int8((v >> 4) & 15) + } + + // each e[i] is between 0 and 15 and e[63] is between 0 and 7. + + carry := int8(0) + for i := 0; i < 63; i++ { + e[i] += carry + carry = (e[i] + 8) >> 4 + e[i] -= carry << 4 + } + e[63] += carry + // each e[i] is between -8 and 8. + + h.Zero() + var t PreComputedGroupElement + var r CompletedGroupElement + for i := int32(1); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } + + var s ProjectiveGroupElement + + h.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToExtended(h) + + for i := int32(0); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } +} + +// The scalars are GF(2^252 + 27742317777372353535851937790883648493). + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} Binary files /tmp/tmpDKp0Le/llc_GcQn6Y/juju-core-2.0~beta15/src/golang.org/x/crypto/ed25519/testdata/sign.input.gz and /tmp/tmpDKp0Le/DWZX6UDllP/juju-core-2.0.0/src/golang.org/x/crypto/ed25519/testdata/sign.input.gz differ diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/.gitignore juju-core-2.0.0/src/golang.org/x/crypto/.gitignore --- juju-core-2.0~beta15/src/golang.org/x/crypto/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/.gitignore 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/nacl/box/box.go juju-core-2.0.0/src/golang.org/x/crypto/nacl/box/box.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/nacl/box/box.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/nacl/box/box.go 2016-10-13 14:32:00.000000000 +0000 @@ -13,15 +13,16 @@ message, etc. Nonces are long enough that randomly generated nonces have negligible risk of collision. -This package is interoperable with NaCl: http://nacl.cr.yp.to/box.html. +This package is interoperable with NaCl: https://nacl.cr.yp.to/box.html. */ package box // import "golang.org/x/crypto/nacl/box" import ( + "io" + "golang.org/x/crypto/curve25519" "golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/salsa20/salsa" - "io" ) // Overhead is the number of bytes of overhead when boxing a message. diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/nacl/secretbox/example_test.go juju-core-2.0.0/src/golang.org/x/crypto/nacl/secretbox/example_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/nacl/secretbox/example_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/nacl/secretbox/example_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,53 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package secretbox_test + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "io" + + "golang.org/x/crypto/nacl/secretbox" +) + +func Example() { + // Load your secret key from a safe place and reuse it across multiple + // Seal calls. (Obviously don't use this example key for anything + // real.) If you want to convert a passphrase to a key, use a suitable + // package like bcrypt or scrypt. + secretKeyBytes, err := hex.DecodeString("6368616e676520746869732070617373776f726420746f206120736563726574") + if err != nil { + panic(err) + } + + var secretKey [32]byte + copy(secretKey[:], secretKeyBytes) + + // You must use a different nonce for each message you encrypt with the + // same key. Since the nonce here is 192 bits long, a random value + // provides a sufficiently small probability of repeats. + var nonce [24]byte + if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil { + panic(err) + } + + // This encrypts "hello world" and appends the result to the nonce. + encrypted := secretbox.Seal(nonce[:], []byte("hello world"), &nonce, &secretKey) + + // When you decrypt, you must use the same nonce and key you used to + // encrypt the message. One way to achieve this is to store the nonce + // alongside the encrypted message. Above, we stored the nonce in the first + // 24 bytes of the encrypted text. + var decryptNonce [24]byte + copy(decryptNonce[:], encrypted[:24]) + decrypted, ok := secretbox.Open([]byte{}, encrypted[24:], &decryptNonce, &secretKey) + if !ok { + panic("decryption error") + } + + fmt.Println(string(decrypted)) + // Output: hello world +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/nacl/secretbox/secretbox.go juju-core-2.0.0/src/golang.org/x/crypto/nacl/secretbox/secretbox.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/nacl/secretbox/secretbox.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/nacl/secretbox/secretbox.go 2016-10-13 14:32:00.000000000 +0000 @@ -13,7 +13,7 @@ message, etc. Nonces are long enough that randomly generated nonces have negligible risk of collision. -This package is interoperable with NaCl: http://nacl.cr.yp.to/secretbox.html. +This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. */ package secretbox // import "golang.org/x/crypto/nacl/secretbox" diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ocsp/ocsp.go juju-core-2.0.0/src/golang.org/x/crypto/ocsp/ocsp.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ocsp/ocsp.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ocsp/ocsp.go 2016-10-13 14:32:00.000000000 +0000 @@ -19,23 +19,60 @@ "encoding/asn1" "errors" "math/big" + "strconv" "time" ) var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) -// These are internal structures that reflect the ASN.1 structure of an OCSP -// response. See RFC 2560, section 4.2. +// ResponseStatus contains the result of an OCSP request. See +// https://tools.ietf.org/html/rfc6960#section-2.3 +type ResponseStatus int const ( - ocspSuccess = 0 - ocspMalformed = 1 - ocspInternalError = 2 - ocspTryLater = 3 - ocspSigRequired = 4 - ocspUnauthorized = 5 + Success ResponseStatus = 0 + Malformed ResponseStatus = 1 + InternalError ResponseStatus = 2 + TryLater ResponseStatus = 3 + // Status code four is unused in OCSP. See + // https://tools.ietf.org/html/rfc6960#section-4.2.1 + SignatureRequired ResponseStatus = 5 + Unauthorized ResponseStatus = 6 ) +func (r ResponseStatus) String() string { + switch r { + case Success: + return "success" + case Malformed: + return "malformed" + case InternalError: + return "internal error" + case TryLater: + return "try later" + case SignatureRequired: + return "signature required" + case Unauthorized: + return "unauthorized" + default: + return "unknown OCSP status: " + strconv.Itoa(int(r)) + } +} + +// ResponseError is an error that may be returned by ParseResponse to indicate +// that the response itself is an error, not just that its indicating that a +// certificate is revoked, unknown, etc. +type ResponseError struct { + Status ResponseStatus +} + +func (r ResponseError) Error() string { + return "ocsp: error from server: " + r.Status.String() +} + +// These are internal structures that reflect the ASN.1 structure of an OCSP +// response. See RFC 2560, section 4.2. + type certID struct { HashAlgorithm pkix.AlgorithmIdentifier NameHash []byte @@ -60,7 +97,7 @@ type responseASN1 struct { Status asn1.Enumerated - Response responseBytes `asn1:"explicit,tag:0"` + Response responseBytes `asn1:"explicit,tag:0,optional"` } type responseBytes struct { @@ -77,7 +114,7 @@ type responseData struct { Raw asn1.RawContent - Version int `asn1:"optional,default:1,explicit,tag:0"` + Version int `asn1:"optional,default:0,explicit,tag:0"` RawResponderName asn1.RawValue `asn1:"optional,explicit,tag:1"` KeyHash []byte `asn1:"optional,explicit,tag:2"` ProducedAt time.Time `asn1:"generalized"` @@ -85,12 +122,13 @@ } type singleResponse struct { - CertID certID - Good asn1.Flag `asn1:"tag:0,optional"` - Revoked revokedInfo `asn1:"tag:1,optional"` - Unknown asn1.Flag `asn1:"tag:2,optional"` - ThisUpdate time.Time `asn1:"generalized"` - NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` + CertID certID + Good asn1.Flag `asn1:"tag:0,optional"` + Revoked revokedInfo `asn1:"tag:1,optional"` + Unknown asn1.Flag `asn1:"tag:2,optional"` + ThisUpdate time.Time `asn1:"generalized"` + NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` + SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"` } type revokedInfo struct { @@ -106,7 +144,7 @@ oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} - oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} @@ -235,11 +273,13 @@ // Good means that the certificate is valid. Good = iota // Revoked means that the certificate has been deliberately revoked. - Revoked = iota + Revoked // Unknown means that the OCSP responder doesn't know about the certificate. - Unknown = iota - // ServerFailed means that the OCSP responder failed to process the request. - ServerFailed = iota + Unknown + // ServerFailed is unused and was never used (see + // https://go-review.googlesource.com/#/c/18944). ParseResponse will + // return a ResponseError when an error response is parsed. + ServerFailed ) // The enumerated reasons for revoking a certificate. See RFC 5280. @@ -257,7 +297,7 @@ AACompromise = iota ) -// Request represents an OCSP request. See RFC 2560. +// Request represents an OCSP request. See RFC 6960. type Request struct { HashAlgorithm crypto.Hash IssuerNameHash []byte @@ -265,9 +305,10 @@ SerialNumber *big.Int } -// Response represents an OCSP response. See RFC 2560. +// Response represents an OCSP response containing a single SingleResponse. See +// RFC 6960. type Response struct { - // Status is one of {Good, Revoked, Unknown, ServerFailed} + // Status is one of {Good, Revoked, Unknown} Status int SerialNumber *big.Int ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time @@ -278,6 +319,20 @@ TBSResponseData []byte Signature []byte SignatureAlgorithm x509.SignatureAlgorithm + + // Extensions contains raw X.509 extensions from the singleExtensions field + // of the OCSP response. When parsing certificates, this can be used to + // extract non-critical extensions that are not parsed by this package. When + // marshaling OCSP responses, the Extensions field is ignored, see + // ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any marshaled + // OCSP response (in the singleExtensions field). Values override any + // extensions that would otherwise be produced based on the other fields. The + // ExtraExtensions field is not populated when parsing certificates, see + // Extensions. + ExtraExtensions []pkix.Extension } // These are pre-serialized error responses for the various non-success codes @@ -342,8 +397,10 @@ // ParseResponse parses an OCSP response in DER form. It only supports // responses for a single certificate. If the response contains a certificate // then the signature over the response is checked. If issuer is not nil then -// it will be used to validate the signature or embedded certificate. Invalid -// signatures or parse failures will result in a ParseError. +// it will be used to validate the signature or embedded certificate. +// +// Invalid signatures or parse failures will result in a ParseError. Error +// responses will result in a ResponseError. func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { var resp responseASN1 rest, err := asn1.Unmarshal(bytes, &resp) @@ -354,10 +411,8 @@ return nil, ParseError("trailing data in OCSP response") } - ret := new(Response) - if resp.Status != ocspSuccess { - ret.Status = ServerFailed - return ret, nil + if status := ResponseStatus(resp.Status); status != Success { + return nil, ResponseError{status} } if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { @@ -378,9 +433,11 @@ return nil, ParseError("OCSP response contains bad number of responses") } - ret.TBSResponseData = basicResp.TBSResponseData.Raw - ret.Signature = basicResp.Signature.RightAlign() - ret.SignatureAlgorithm = getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm) + ret := &Response{ + TBSResponseData: basicResp.TBSResponseData.Raw, + Signature: basicResp.Signature.RightAlign(), + SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm), + } if len(basicResp.Certificates) > 0 { ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) @@ -405,6 +462,13 @@ r := basicResp.TBSResponseData.Responses[0] + for _, ext := range r.SingleExtensions { + if ext.Critical { + return nil, ParseError("unsupported critical extension") + } + } + ret.Extensions = r.SingleExtensions + ret.SerialNumber = r.CertID.SerialNumber switch { @@ -534,8 +598,9 @@ IssuerKeyHash: issuerKeyHash, SerialNumber: template.SerialNumber, }, - ThisUpdate: template.ThisUpdate.UTC(), - NextUpdate: template.NextUpdate.UTC(), + ThisUpdate: template.ThisUpdate.UTC(), + NextUpdate: template.NextUpdate.UTC(), + SingleExtensions: template.ExtraExtensions, } switch template.Status { @@ -599,7 +664,7 @@ } return asn1.Marshal(responseASN1{ - Status: ocspSuccess, + Status: asn1.Enumerated(Success), Response: responseBytes{ ResponseType: idPKIXOCSPBasic, Response: responseDER, diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ocsp/ocsp_test.go juju-core-2.0.0/src/golang.org/x/crypto/ocsp/ocsp_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ocsp/ocsp_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ocsp/ocsp_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -62,6 +62,30 @@ } } +func TestOCSPDecodeWithExtensions(t *testing.T) { + responseBytes, _ := hex.DecodeString(ocspResponseWithCriticalExtensionHex) + _, err := ParseResponse(responseBytes, nil) + if err == nil { + t.Error(err) + } + + responseBytes, _ = hex.DecodeString(ocspResponseWithExtensionHex) + response, err := ParseResponse(responseBytes, nil) + if err != nil { + t.Fatal(err) + } + + if len(response.Extensions) != 1 { + t.Errorf("len(response.Extensions): got %v, want %v", len(response.Extensions), 1) + } + + extensionBytes := response.Extensions[0].Value + expectedBytes, _ := hex.DecodeString(ocspExtensionValueHex) + if !bytes.Equal(extensionBytes, expectedBytes) { + t.Errorf("response.Extensions[0]: got %x, want %x", extensionBytes, expectedBytes) + } +} + func TestOCSPSignature(t *testing.T) { issuerCert, _ := hex.DecodeString(startComHex) issuer, err := x509.ParseCertificate(issuerCert) @@ -162,6 +186,15 @@ t.Fatal(err) } + extensionBytes, _ := hex.DecodeString(ocspExtensionValueHex) + extensions := []pkix.Extension{ + pkix.Extension{ + Id: ocspExtensionOID, + Critical: false, + Value: extensionBytes, + }, + } + producedAt := time.Now().Truncate(time.Minute) thisUpdate := time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC) nextUpdate := time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC) @@ -173,6 +206,7 @@ RevokedAt: thisUpdate, RevocationReason: KeyCompromise, Certificate: responder, + ExtraExtensions: extensions, } responseBytes, err := CreateResponse(issuer, responder, template, responderPrivateKey) @@ -197,6 +231,10 @@ t.Errorf("resp.RevokedAt: got %d, want %d", resp.RevokedAt, template.RevokedAt) } + if !reflect.DeepEqual(resp.Extensions, template.ExtraExtensions) { + t.Errorf("resp.Extensions: got %v, want %v", resp.Extensions, template.ExtraExtensions) + } + if !resp.ProducedAt.Equal(producedAt) { t.Errorf("resp.ProducedAt: got %d, want %d", resp.ProducedAt, producedAt) } @@ -214,6 +252,19 @@ } } +func TestErrorResponse(t *testing.T) { + responseBytes, _ := hex.DecodeString(errorResponseHex) + _, err := ParseResponse(responseBytes, nil) + + respErr, ok := err.(ResponseError) + if !ok { + t.Fatalf("expected ResponseError from ParseResponse but got %#v", err) + } + if respErr.Status != Malformed { + t.Fatalf("expected Malformed status from ParseResponse but got %d", respErr.Status) + } +} + // This OCSP response was taken from Thawte's public OCSP responder. // To recreate: // $ openssl s_client -tls1 -showcerts -servername www.google.com -connect www.google.com:443 @@ -333,6 +384,84 @@ "20a1a65c7f0b6427a224b3c98edd96b9b61f706099951188b0289555ad30a216fb774651" + "5a35fca2e054dfa8" +// PKIX nonce extension +var ocspExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 2} +var ocspExtensionValueHex = "0403000000" + +const ocspResponseWithCriticalExtensionHex = "308204fe0a0100a08204f7308204f306092b0601050507300101048204e4308204e03081" + + "dba003020100a11b3019311730150603550403130e4f43535020526573706f6e64657218" + + "0f32303136303130343137303130305a3081a53081a23049300906052b0e03021a050004" + + "14c0fe0278fc99188891b3f212e9c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b21317" + + "7e6f8d157cd4f60210017f77deb3bcbb235d44ccc7dba62e72a116180f32303130303730" + + "373135303130355aa0030a0101180f32303130303730373135303130355aa011180f3230" + + "3130303730373138333531375aa1193017301506092b06010505073001020101ff040504" + + "03000000300d06092a864886f70d01010b0500038201010031c730ca60a7a0d92d8e4010" + + "911b469de95b4d27e89de6537552436237967694f76f701cf6b45c932bd308bca4a8d092" + + "5c604ba94796903091d9e6c000178e72c1f0a24a277dd262835af5d17d3f9d7869606c9f" + + "e7c8e708a41645699895beee38bfa63bb46296683761c5d1d65439b8ab868dc3017c9eeb" + + "b70b82dbf3a31c55b457d48bb9e82b335ed49f445042eaf606b06a3e0639824924c89c63" + + "eccddfe85e6694314138b2536f5e15e07085d0f6e26d4b2f8244bab0d70de07283ac6384" + + "a0501fc3dea7cf0adfd4c7f34871080900e252ddc403e3f0265f2a704af905d3727504ed" + + "28f3214a219d898a022463c78439799ca81c8cbafdbcec34ea937cd6a08202ea308202e6" + + "308202e2308201caa003020102020101300d06092a864886f70d01010b05003019311730" + + "150603550403130e4f43535020526573706f6e646572301e170d31353031333031353530" + + "33335a170d3136303133303135353033335a3019311730150603550403130e4f43535020" + + "526573706f6e64657230820122300d06092a864886f70d01010105000382010f00308201" + + "0a0282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616e" + + "c5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbc" + + "bec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b72" + + "3350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b898" + + "9ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d" + + "285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e6" + + "55b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31" + + "a77dcf920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030" + + "130603551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d" + + "06092a864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab8612" + + "31c15fd5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d2288" + + "9064f4aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f3267" + + "09dce52c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156" + + "d67156e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff5" + + "9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" + + "66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" + + "3a25439a94299a65a709756c7a3e568be049d5c38839" + +const ocspResponseWithExtensionHex = "308204fb0a0100a08204f4308204f006092b0601050507300101048204e1308204dd3081" + + "d8a003020100a11b3019311730150603550403130e4f43535020526573706f6e64657218" + + "0f32303136303130343136353930305a3081a230819f3049300906052b0e03021a050004" + + "14c0fe0278fc99188891b3f212e9c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b21317" + + "7e6f8d157cd4f60210017f77deb3bcbb235d44ccc7dba62e72a116180f32303130303730" + + "373135303130355aa0030a0101180f32303130303730373135303130355aa011180f3230" + + "3130303730373138333531375aa1163014301206092b0601050507300102040504030000" + + "00300d06092a864886f70d01010b05000382010100c09a33e0b2324c852421bb83f85ac9" + + "9113f5426012bd2d2279a8166e9241d18a33c870894250622ffc7ed0c4601b16d624f90b" + + "779265442cdb6868cf40ab304ab4b66e7315ed02cf663b1601d1d4751772b31bc299db23" + + "9aebac78ed6797c06ed815a7a8d18d63cfbb609cafb47ec2e89e37db255216eb09307848" + + "d01be0a3e943653c78212b96ff524b74c9ec456b17cdfb950cc97645c577b2e09ff41dde" + + "b03afb3adaa381cc0f7c1d95663ef22a0f72f2c45613ae8e2b2d1efc96e8463c7d1d8a1d" + + "7e3b35df8fe73a301fc3f804b942b2b3afa337ff105fc1462b7b1c1d75eb4566c8665e59" + + "f80393b0adbf8004ff6c3327ed34f007cb4a3348a7d55e06e3a08202ea308202e6308202" + + "e2308201caa003020102020101300d06092a864886f70d01010b05003019311730150603" + + "550403130e4f43535020526573706f6e646572301e170d3135303133303135353033335a" + + "170d3136303133303135353033335a3019311730150603550403130e4f43535020526573" + + "706f6e64657230820122300d06092a864886f70d01010105000382010f003082010a0282" + + "010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616ec5265b" + + "56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbcbec75a" + + "70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b723350f0" + + "a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b8989ad0f6" + + "3aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d285b6a" + + "04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e655b104" + + "9a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31a77dcf" + + "920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030130603" + + "551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d06092a" + + "864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab861231c15f" + + "d5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d22889064f4" + + "aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f326709dce5" + + "2c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156d67156" + + "e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff59e2005" + + "d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf966705d" + + "e17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d93a2543" + + "9a94299a65a709756c7a3e568be049d5c38839" + const ocspRequestHex = "3051304f304d304b3049300906052b0e03021a05000414c0fe0278fc99188891b3f212e9" + "c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b213177e6f8d157cd4f60210017f77deb3" + "bcbb235d44ccc7dba62e72" @@ -451,3 +580,5 @@ "9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" + "66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" + "3a25439a94299a65a709756c7a3e568be049d5c38839" + +const errorResponseHex = "30030a0101" diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/clearsign/clearsign.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/clearsign/clearsign.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/clearsign/clearsign.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/clearsign/clearsign.go 2016-10-13 14:32:00.000000000 +0000 @@ -118,6 +118,10 @@ start := rest line, rest = getLine(rest) + if len(line) == 0 && len(rest) == 0 { + // No armored data was found, so this isn't a complete message. + return nil, data + } if bytes.Equal(line, endText) { // Back up to the start of the line because armor expects to see the // header line. diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -44,6 +44,12 @@ testParse(t, clearsignInput2, "\r\n\r\n(This message has a couple of blank lines at the start and end.)\r\n\r\n", "\n\n(This message has a couple of blank lines at the start and end.)\n\n\n") } +func TestParseInvalid(t *testing.T) { + if b, _ := Decode(clearsignInput3); b != nil { + t.Fatal("decoded a bad clearsigned message without any error") + } +} + func TestParseWithNoNewlineAtEnd(t *testing.T) { input := clearsignInput input = input[:len(input)-len("trailing")-1] @@ -161,6 +167,13 @@ trailing`) +var clearsignInput3 = []byte(` +-----BEGIN PGP SIGNED MESSAGE----- +Hash: SHA256 + +(This message was truncated.) +`) + var signingKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- Version: GnuPG v1.4.10 (GNU/Linux) diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/keys.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/keys.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/keys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/keys.go 2016-10-13 14:32:00.000000000 +0000 @@ -504,6 +504,12 @@ }, } + // If the user passes in a DefaultHash via packet.Config, + // set the PreferredHash for the SelfSignature. + if config != nil && config.DefaultHash != 0 { + e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} + } + e.Subkeys = make([]Subkey, 1) e.Subkeys[0] = Subkey{ PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey), diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/keys_test.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/keys_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/keys_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/keys_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -2,6 +2,7 @@ import ( "bytes" + "crypto" "strings" "testing" "time" @@ -271,6 +272,39 @@ } } +func TestNewEntityWithPreferredHash(t *testing.T) { + c := &packet.Config{ + DefaultHash: crypto.SHA256, + } + entity, err := NewEntity("Golang Gopher", "Test Key", "no-reply@golang.com", c) + if err != nil { + t.Fatal(err) + } + + for _, identity := range entity.Identities { + if len(identity.SelfSignature.PreferredHash) == 0 { + t.Fatal("didn't find a preferred hash in self signature") + } + ph := hashToHashId(c.DefaultHash) + if identity.SelfSignature.PreferredHash[0] != ph { + t.Fatalf("Expected preferred hash to be %d, got %d", ph, identity.SelfSignature.PreferredHash[0]) + } + } +} + +func TestNewEntityWithoutPreferredHash(t *testing.T) { + entity, err := NewEntity("Golang Gopher", "Test Key", "no-reply@golang.com", nil) + if err != nil { + t.Fatal(err) + } + + for _, identity := range entity.Identities { + if len(identity.SelfSignature.PreferredHash) != 0 { + t.Fatal("Expected preferred hash to be empty but got length %d", len(identity.SelfSignature.PreferredHash)) + } + } +} + const expiringKeyHex = "988d0451d1ec5d010400ba3385721f2dc3f4ab096b2ee867ab77213f0a27a8538441c35d2fa225b08798a1439a66a5150e6bdc3f40f5d28d588c712394c632b6299f77db8c0d48d37903fb72ebd794d61be6aa774688839e5fdecfe06b2684cc115d240c98c66cb1ef22ae84e3aa0c2b0c28665c1e7d4d044e7f270706193f5223c8d44e0d70b7b8da830011010001b40f4578706972792074657374206b657988be041301020028050251d1ec5d021b03050900278d00060b090807030206150802090a0b0416020301021e01021780000a091072589ad75e237d8c033503fd10506d72837834eb7f994117740723adc39227104b0d326a1161871c0b415d25b4aedef946ca77ea4c05af9c22b32cf98be86ab890111fced1ee3f75e87b7cc3c00dc63bbc85dfab91c0dc2ad9de2c4d13a34659333a85c6acc1a669c5e1d6cecb0cf1e56c10e72d855ae177ddc9e766f9b2dda57ccbb75f57156438bbdb4e42b88d0451d1ec5d0104009c64906559866c5cb61578f5846a94fcee142a489c9b41e67b12bb54cfe86eb9bc8566460f9a720cb00d6526fbccfd4f552071a8e3f7744b1882d01036d811ee5a3fb91a1c568055758f43ba5d2c6a9676b012f3a1a89e47bbf624f1ad571b208f3cc6224eb378f1645dd3d47584463f9eadeacfd1ce6f813064fbfdcc4b5a53001101000188a504180102000f021b0c050251d1f06b050900093e89000a091072589ad75e237d8c20e00400ab8310a41461425b37889c4da28129b5fae6084fafbc0a47dd1adc74a264c6e9c9cc125f40462ee1433072a58384daef88c961c390ed06426a81b464a53194c4e291ddd7e2e2ba3efced01537d713bd111f48437bde2363446200995e8e0d4e528dda377fd1e8f8ede9c8e2198b393bd86852ce7457a7e3daf74d510461a5b77b88d0451d1ece8010400b3a519f83ab0010307e83bca895170acce8964a044190a2b368892f7a244758d9fc193482648acb1fb9780d28cc22d171931f38bb40279389fc9bf2110876d4f3db4fcfb13f22f7083877fe56592b3b65251312c36f83ffcb6d313c6a17f197dd471f0712aad15a8537b435a92471ba2e5b0c72a6c72536c3b567c558d7b6051001101000188a504180102000f021b0c050251d1f07b050900279091000a091072589ad75e237d8ce69e03fe286026afacf7c97ee20673864d4459a2240b5655219950643c7dba0ac384b1d4359c67805b21d98211f7b09c2a0ccf6410c8c04d4ff4a51293725d8d6570d9d8bb0e10c07d22357caeb49626df99c180be02d77d1fe8ed25e7a54481237646083a9f89a11566cd20b9e995b1487c5f9e02aeb434f3a1897cd416dd0a87861838da3e9e" const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/private_key.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/private_key.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/private_key.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/private_key.go 2016-10-13 14:32:00.000000000 +0000 @@ -8,16 +8,18 @@ "bytes" "crypto/cipher" "crypto/dsa" + "crypto/ecdsa" "crypto/rsa" "crypto/sha1" - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" "io" "io/ioutil" "math/big" "strconv" "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" ) // PrivateKey represents a possibly encrypted private key. See RFC 4880, @@ -47,6 +49,20 @@ return pk } +func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + func (pk *PrivateKey) parse(r io.Reader) (err error) { err = (&pk.PublicKey).parse(r) if err != nil { @@ -130,6 +146,10 @@ err = serializeRSAPrivateKey(privateKeyBuf, priv) case *dsa.PrivateKey: err = serializeDSAPrivateKey(privateKeyBuf, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(privateKeyBuf, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(privateKeyBuf, priv) default: err = errors.InvalidArgumentError("unknown private key type") } @@ -185,6 +205,14 @@ return writeBig(w, priv.X) } +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + return writeBig(w, priv.D) +} + // Decrypt decrypts an encrypted private key using a passphrase. func (pk *PrivateKey) Decrypt(passphrase []byte) error { if !pk.Encrypted { @@ -236,6 +264,8 @@ return pk.parseDSAPrivateKey(data) case PubKeyAlgoElGamal: return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) } panic("impossible") } @@ -309,5 +339,24 @@ pk.Encrypted = false pk.encryptedData = nil + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + + pk.PrivateKey = &ecdsa.PrivateKey{ + PublicKey: *ecdsaPub, + D: new(big.Int).SetBytes(d), + } + pk.Encrypted = false + pk.encryptedData = nil + return nil } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/private_key_test.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/private_key_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/private_key_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/private_key_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -5,6 +5,12 @@ package packet import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "hash" "testing" "time" ) @@ -56,6 +62,57 @@ } } +func populateHash(hashFunc crypto.Hash, msg []byte) (hash.Hash, error) { + h := hashFunc.New() + if _, err := h.Write(msg); err != nil { + return nil, err + } + return h, nil +} + +func TestECDSAPrivateKey(t *testing.T) { + ecdsaPriv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + if err := NewECDSAPrivateKey(time.Now(), ecdsaPriv).Serialize(&buf); err != nil { + t.Fatal(err) + } + + p, err := Read(&buf) + if err != nil { + t.Fatal(err) + } + + priv, ok := p.(*PrivateKey) + if !ok { + t.Fatal("didn't parse private key") + } + + sig := &Signature{ + PubKeyAlgo: PubKeyAlgoECDSA, + Hash: crypto.SHA256, + } + msg := []byte("Hello World!") + + h, err := populateHash(sig.Hash, msg) + if err != nil { + t.Fatal(err) + } + if err := sig.Sign(h, priv, nil); err != nil { + t.Fatal(err) + } + + if h, err = populateHash(sig.Hash, msg); err != nil { + t.Fatal(err) + } + if err := priv.VerifySignature(h, sig); err != nil { + t.Fatal(err) + } +} + func TestIssue11505(t *testing.T) { // parsing a rsa private key with p or q == 1 used to panic due to a divide by zero _, _ = Read(readerFromHex("9c3004303030300100000011303030000000000000010130303030303030303030303030303030303030303030303030303030303030303030303030303030303030")) diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/public_key.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/public_key.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/public_key.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/public_key.go 2016-10-13 14:32:00.000000000 +0000 @@ -209,6 +209,47 @@ return pk } +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: fromBig(pub.P), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + ec: new(ecdsaKey), + } + + switch pub.Curve { + case elliptic.P256(): + pk.ec.oid = oidCurveP256 + case elliptic.P384(): + pk.ec.oid = oidCurveP384 + case elliptic.P521(): + pk.ec.oid = oidCurveP521 + default: + panic("unknown elliptic curve") + } + + pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + pk.ec.p.bitLength = uint16(8 * len(pk.ec.p.bytes)) + + pk.setFingerPrintAndKeyId() + return pk +} + func (pk *PublicKey) parse(r io.Reader) (err error) { // RFC 4880, section 5.5.2 var buf [6]byte diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/signature.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/signature.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/signature.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/signature.go 2016-10-13 14:32:00.000000000 +0000 @@ -8,6 +8,7 @@ "bytes" "crypto" "crypto/dsa" + "crypto/ecdsa" "crypto/rsa" "encoding/binary" "hash" @@ -532,6 +533,12 @@ sig.DSASigS.bytes = s.Bytes() sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) } + case PubKeyAlgoECDSA: + r, s, err := ecdsa.Sign(config.Random(), priv.PrivateKey.(*ecdsa.PrivateKey), digest) + if err == nil { + sig.ECDSASigR = fromBig(r) + sig.ECDSASigS = fromBig(s) + } default: err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) } @@ -546,7 +553,7 @@ func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { h, err := userIdSignatureHash(id, pub, sig.Hash) if err != nil { - return nil + return err } return sig.Sign(h, priv, config) } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/signature_test.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/signature_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/signature_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/signature_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -39,4 +39,40 @@ } } +func TestSignUserId(t *testing.T) { + sig := &Signature{ + SigType: SigTypeGenericCert, + PubKeyAlgo: PubKeyAlgoRSA, + Hash: 0, // invalid hash function + } + + packet, err := Read(readerFromHex(rsaPkDataHex)) + if err != nil { + t.Fatalf("failed to deserialize public key: %v", err) + } + pubKey := packet.(*PublicKey) + + packet, err = Read(readerFromHex(privKeyRSAHex)) + if err != nil { + t.Fatalf("failed to deserialize private key: %v", err) + } + privKey := packet.(*PrivateKey) + + err = sig.SignUserId("", pubKey, privKey, nil) + if err == nil { + t.Errorf("did not receive an error when expected") + } + + sig.Hash = crypto.SHA256 + err = privKey.Decrypt([]byte("testing")) + if err != nil { + t.Fatalf("failed to decrypt private key: %v", err) + } + + err = sig.SignUserId("", pubKey, privKey, nil) + if err != nil { + t.Errorf("failed to sign user id: %v", err) + } +} + const signatureDataHex = "c2c05c04000102000605024cb45112000a0910ab105c91af38fb158f8d07ff5596ea368c5efe015bed6e78348c0f033c931d5f2ce5db54ce7f2a7e4b4ad64db758d65a7a71773edeab7ba2a9e0908e6a94a1175edd86c1d843279f045b021a6971a72702fcbd650efc393c5474d5b59a15f96d2eaad4c4c426797e0dcca2803ef41c6ff234d403eec38f31d610c344c06f2401c262f0993b2e66cad8a81ebc4322c723e0d4ba09fe917e8777658307ad8329adacba821420741009dfe87f007759f0982275d028a392c6ed983a0d846f890b36148c7358bdb8a516007fac760261ecd06076813831a36d0459075d1befa245ae7f7fb103d92ca759e9498fe60ef8078a39a3beda510deea251ea9f0a7f0df6ef42060f20780360686f3e400e" diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go 2016-10-13 14:32:00.000000000 +0000 @@ -22,20 +22,17 @@ // 4880, section 5.3. type SymmetricKeyEncrypted struct { CipherFunc CipherFunction - Encrypted bool - Key []byte // Empty unless Encrypted is false. s2k func(out, in []byte) encryptedKey []byte } const symmetricKeyEncryptedVersion = 4 -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) (err error) { +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { // RFC 4880, section 5.3. var buf [2]byte - _, err = readFull(r, buf[:]) - if err != nil { - return + if _, err := readFull(r, buf[:]); err != nil { + return err } if buf[0] != symmetricKeyEncryptedVersion { return errors.UnsupportedError("SymmetricKeyEncrypted version") @@ -46,9 +43,10 @@ return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) } + var err error ske.s2k, err = s2k.Parse(r) if err != nil { - return + return err } encryptedKey := make([]byte, maxSessionKeySizeInBytes) @@ -56,9 +54,9 @@ // out. If it exists then we limit it to maxSessionKeySizeInBytes. n, err := readFull(r, encryptedKey) if err != nil && err != io.ErrUnexpectedEOF { - return + return err } - err = nil + if n != 0 { if n == maxSessionKeySizeInBytes { return errors.UnsupportedError("oversized encrypted session key") @@ -66,42 +64,35 @@ ske.encryptedKey = encryptedKey[:n] } - ske.Encrypted = true - - return + return nil } -// Decrypt attempts to decrypt an encrypted session key. If it returns nil, -// ske.Key will contain the session key. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) error { - if !ske.Encrypted { - return nil - } - +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { key := make([]byte, ske.CipherFunc.KeySize()) ske.s2k(key, passphrase) if len(ske.encryptedKey) == 0 { - ske.Key = key - } else { - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - c.XORKeyStream(ske.encryptedKey, ske.encryptedKey) - ske.CipherFunc = CipherFunction(ske.encryptedKey[0]) - if ske.CipherFunc.blockSize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(ske.CipherFunc))) - } - ske.CipherFunc = CipherFunction(ske.encryptedKey[0]) - ske.Key = ske.encryptedKey[1:] - if len(ske.Key)%ske.CipherFunc.blockSize() != 0 { - ske.Key = nil - return errors.StructuralError("length of decrypted key not a multiple of block size") - } + return key, ske.CipherFunc, nil } - ske.Encrypted = false - return nil + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if l := len(plaintextKey); l == 0 || l%cipherFunc.blockSize() != 0 { + return nil, cipherFunc, errors.StructuralError("length of decrypted key not a multiple of block size") + } + + return plaintextKey, cipherFunc, nil } // SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -24,7 +24,7 @@ t.Error("didn't find SymmetricKeyEncrypted packet") return } - err = ske.Decrypt([]byte("password")) + key, cipherFunc, err := ske.Decrypt([]byte("password")) if err != nil { t.Error(err) return @@ -40,7 +40,7 @@ t.Error("didn't find SymmetricallyEncrypted packet") return } - r, err := se.Decrypt(ske.CipherFunc, ske.Key) + r, err := se.Decrypt(cipherFunc, key) if err != nil { t.Error(err) return @@ -64,8 +64,9 @@ func TestSerializeSymmetricKeyEncrypted(t *testing.T) { buf := bytes.NewBuffer(nil) passphrase := []byte("testing") + const cipherFunc = CipherAES128 config := &Config{ - DefaultCipher: CipherAES128, + DefaultCipher: cipherFunc, } key, err := SerializeSymmetricKeyEncrypted(buf, passphrase, config) @@ -85,18 +86,18 @@ return } - if !ske.Encrypted { - t.Errorf("SKE not encrypted but should be") - } if ske.CipherFunc != config.DefaultCipher { t.Errorf("SKE cipher function is %d (expected %d)", ske.CipherFunc, config.DefaultCipher) } - err = ske.Decrypt(passphrase) + parsedKey, parsedCipherFunc, err := ske.Decrypt(passphrase) if err != nil { t.Errorf("failed to decrypt reparsed SKE: %s", err) return } - if !bytes.Equal(key, ske.Key) { - t.Errorf("keys don't match after Decrpyt: %x (original) vs %x (parsed)", key, ske.Key) + if !bytes.Equal(key, parsedKey) { + t.Errorf("keys don't match after Decrypt: %x (original) vs %x (parsed)", key, parsedKey) + } + if parsedCipherFunc != cipherFunc { + t.Errorf("cipher function doesn't match after Decrypt: %d (original) vs %d (parsed)", cipherFunc, parsedCipherFunc) } } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/read.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/read.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/read.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/read.go 2016-10-13 14:32:00.000000000 +0000 @@ -56,8 +56,9 @@ // been consumed. Once EOF has been seen, the following fields are // valid. (An authentication code failure is reported as a // SignatureError error when reading from UnverifiedBody.) - SignatureError error // nil if the signature is good. - Signature *packet.Signature // the signature packet itself. + SignatureError error // nil if the signature is good. + Signature *packet.Signature // the signature packet itself, if v4 (default) + SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature decrypted io.ReadCloser } @@ -196,9 +197,9 @@ // Try the symmetric passphrase first if len(symKeys) != 0 && passphrase != nil { for _, s := range symKeys { - err = s.Decrypt(passphrase) - if err == nil && !s.Encrypted { - decrypted, err = se.Decrypt(s.CipherFunc, s.Key) + key, cipherFunc, err := s.Decrypt(passphrase) + if err == nil { + decrypted, err = se.Decrypt(cipherFunc, key) if err != nil && err != errors.ErrKeyIncorrect { return nil, err } @@ -334,13 +335,15 @@ } var ok bool - if scr.md.Signature, ok = p.(*packet.Signature); !ok { + if scr.md.Signature, ok = p.(*packet.Signature); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) + } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) + } else { scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") return } - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - // The SymmetricallyEncrypted packet, if any, might have an // unsigned hash of its own. In order to check this we need to // close that Reader. diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/read_test.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/read_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/read_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/read_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -13,6 +13,7 @@ "strings" "testing" + "golang.org/x/crypto/openpgp/armor" "golang.org/x/crypto/openpgp/errors" ) @@ -80,6 +81,17 @@ } } +func TestReadP256Key(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(p256TestKeyHex)) + if err != nil { + t.Error(err) + return + } + if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0x5918513E { + t.Errorf("bad parse: %#v", kring) + } +} + func TestDSAHashTruncatation(t *testing.T) { // dsaKeyWithSHA512 was generated with GnuPG and --cert-digest-algo // SHA512 in order to require DSA hash truncation to verify correctly. @@ -243,7 +255,7 @@ } func TestSymmetricallyEncrypted(t *testing.T) { - expected := "Symmetrically encrypted.\n" + firstTimeCalled := true prompt := func(keys []Key, symmetric bool) ([]byte, error) { if len(keys) != 0 { @@ -254,6 +266,11 @@ t.Errorf("symmetric is not set") } + if firstTimeCalled { + firstTimeCalled = false + return []byte("wrongpassword"), nil + } + return []byte("password"), nil } @@ -273,6 +290,7 @@ t.Errorf("LiteralData.Time is %d, want %d", md.LiteralData.Time, expectedCreationTime) } + const expected = "Symmetrically encrypted.\n" if string(contents) != expected { t.Errorf("contents got: %s want: %s", string(contents), expected) } @@ -320,6 +338,11 @@ testDetachedSignature(t, kring, readerFromHex(missingHashFunctionHex+detachedSignatureDSAHex), signedInput, "binary", testKey3KeyId) } +func TestDetachedSignatureP256(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(p256TestKeyHex)) + testDetachedSignature(t, kring, readerFromHex(detachedSignatureP256Hex), signedInput, "binary", testKeyP256KeyId) +} + func testHashFunctionError(t *testing.T, signatureHex string) { kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) _, err := CheckDetachedSignature(kring, nil, readerFromHex(signatureHex)) @@ -411,8 +434,53 @@ testReadMessageError(t, "9303000130303030303030303030983002303030303030030000000130") } +// TestSignatureV3Message tests the verification of V3 signature, generated +// with a modern V4-style key. Some people have their clients set to generate +// V3 signatures, so it's useful to be able to verify them. +func TestSignatureV3Message(t *testing.T) { + sig, err := armor.Decode(strings.NewReader(signedMessageV3)) + if err != nil { + t.Error(err) + return + } + key, err := ReadArmoredKeyRing(strings.NewReader(keyV4forVerifyingSignedMessageV3)) + if err != nil { + t.Error(err) + return + } + md, err := ReadMessage(sig.Body, key, nil, nil) + if err != nil { + t.Error(err) + return + } + + _, err = ioutil.ReadAll(md.UnverifiedBody) + if err != nil { + t.Error(err) + return + } + + // We'll see a sig error here after reading in the UnverifiedBody above, + // if there was one to see. + if err = md.SignatureError; err != nil { + t.Error(err) + return + } + + if md.SignatureV3 == nil { + t.Errorf("No available signature after checking signature") + return + } + if md.Signature != nil { + t.Errorf("Did not expect a signature V4 back") + return + } + return +} + const testKey1KeyId = 0xA34D7E18C20C31BB const testKey3KeyId = 0x338934250CCC0360 +const testKeyP256KeyId = 0xd44a2c495918513e const signedInput = "Signed message\nline 2\nline 3\n" const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" @@ -427,6 +495,8 @@ const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" +const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817" + const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" @@ -447,6 +517,10 @@ const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" +const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK----- Version: GnuPG v1.4.10 (GNU/Linux) @@ -504,3 +578,36 @@ const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` + +const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Comment: GPGTools - https://gpgtools.org + +mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY +BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z +tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 +JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV +/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ +K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H +JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx +YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 +b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi +UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M +pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM +AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz +786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd +EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB +=RZia +-----END PGP PUBLIC KEY BLOCK----- +` + +const signedMessageV3 = `-----BEGIN PGP MESSAGE----- +Comment: GPGTools - https://gpgtools.org + +owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP +q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka +uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka +DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d +iT57d/OhWwA= +=hG7R +-----END PGP MESSAGE----- +` diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/s2k/s2k.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/s2k/s2k.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/s2k/s2k.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/s2k/s2k.go 2016-10-13 14:32:00.000000000 +0000 @@ -251,7 +251,7 @@ } // HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id, or panics if id is unknown. +// given OpenPGP hash id. func HashIdToString(id byte) (name string, ok bool) { for _, m := range hashToHashIdMapping { if m.id == id { diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/write.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/write.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/write.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/write.go 2016-10-13 14:32:00.000000000 +0000 @@ -231,7 +231,7 @@ } cipher := packet.CipherFunction(candidateCiphers[0]) - // If the cipher specifed by config is a candidate, we'll use that. + // If the cipher specified by config is a candidate, we'll use that. configuredCipher := config.Cipher() for _, c := range candidateCiphers { cipherFunc := packet.CipherFunction(c) diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/write_test.go juju-core-2.0.0/src/golang.org/x/crypto/openpgp/write_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/openpgp/write_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/openpgp/write_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -50,6 +50,20 @@ testDetachedSignature(t, kring, out, signedInput, "check", testKey3KeyId) } +func TestSignDetachedP256(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(p256TestKeyPrivateHex)) + kring[0].PrivateKey.Decrypt([]byte("passphrase")) + + out := bytes.NewBuffer(nil) + message := bytes.NewBufferString(signedInput) + err := DetachSign(out, kring[0], message, nil) + if err != nil { + t.Error(err) + } + + testDetachedSignature(t, kring, out, signedInput, "check", testKeyP256KeyId) +} + func TestNewEntity(t *testing.T) { if testing.Short() { return diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/otr/libotr_test_helper.c juju-core-2.0.0/src/golang.org/x/crypto/otr/libotr_test_helper.c --- juju-core-2.0~beta15/src/golang.org/x/crypto/otr/libotr_test_helper.c 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/otr/libotr_test_helper.c 2016-10-13 14:32:00.000000000 +0000 @@ -13,6 +13,7 @@ #include #include +#include static int g_session_established = 0; @@ -20,92 +21,109 @@ return OTRL_POLICY_ALWAYS; } -int is_logged_in(void *opdata, const char *accountname, const char *protocol, const char *recipient) { +int is_logged_in(void *opdata, const char *accountname, const char *protocol, + const char *recipient) { return 1; } -void inject_message(void *opdata, const char *accountname, const char *protocol, const char *recipient, const char *message) { +void inject_message(void *opdata, const char *accountname, const char *protocol, + const char *recipient, const char *message) { printf("%s\n", message); fflush(stdout); fprintf(stderr, "libotr helper sent: %s\n", message); } -void notify(void *opdata, OtrlNotifyLevel level, const char *accountname, const char *protocol, const char *username, const char *title, const char *primary, const char *secondary) { - fprintf(stderr, "NOTIFY: %s %s %s %s\n", username, title, primary, secondary); -} +void update_context_list(void *opdata) {} -int display_otr_message(void *opdata, const char *accountname, const char *protocol, const char *username, const char *msg) { - fprintf(stderr, "MESSAGE: %s %s\n", username, msg); - return 1; +void new_fingerprint(void *opdata, OtrlUserState us, const char *accountname, + const char *protocol, const char *username, + unsigned char fingerprint[20]) { + fprintf(stderr, "NEW FINGERPRINT\n"); + g_session_established = 1; } -void update_context_list(void *opdata) { -} +void write_fingerprints(void *opdata) {} -const char *protocol_name(void *opdata, const char *protocol) { - return "PROTOCOL"; -} +void gone_secure(void *opdata, ConnContext *context) {} -void protocol_name_free(void *opdata, const char *protocol_name) { -} +void gone_insecure(void *opdata, ConnContext *context) {} -void new_fingerprint(void *opdata, OtrlUserState us, const char *accountname, const char *protocol, const char *username, unsigned char fingerprint[20]) { - fprintf(stderr, "NEW FINGERPRINT\n"); - g_session_established = 1; -} +void still_secure(void *opdata, ConnContext *context, int is_reply) {} -void write_fingerprints(void *opdata) { -} +int max_message_size(void *opdata, ConnContext *context) { return 99999; } -void gone_secure(void *opdata, ConnContext *context) { +const char *account_name(void *opdata, const char *account, + const char *protocol) { + return "ACCOUNT"; } -void gone_insecure(void *opdata, ConnContext *context) { -} +void account_name_free(void *opdata, const char *account_name) {} -void still_secure(void *opdata, ConnContext *context, int is_reply) { +const char *error_message(void *opdata, ConnContext *context, + OtrlErrorCode err_code) { + return "ERR"; } -void log_message(void *opdata, const char *message) { - fprintf(stderr, "MESSAGE: %s\n", message); -} +void error_message_free(void *opdata, const char *msg) {} -int max_message_size(void *opdata, ConnContext *context) { - return 99999; -} +void resent_msg_prefix_free(void *opdata, const char *prefix) {} -const char *account_name(void *opdata, const char *account, const char *protocol) { - return "ACCOUNT"; -} +void handle_smp_event(void *opdata, OtrlSMPEvent smp_event, + ConnContext *context, unsigned short progress_event, + char *question) {} -void account_name_free(void *opdata, const char *account_name) { +void handle_msg_event(void *opdata, OtrlMessageEvent msg_event, + ConnContext *context, const char *message, + gcry_error_t err) { + fprintf(stderr, "msg event: %d %s\n", msg_event, message); } OtrlMessageAppOps uiops = { - policy, - NULL, - is_logged_in, - inject_message, - notify, - display_otr_message, - update_context_list, - protocol_name, - protocol_name_free, - new_fingerprint, - write_fingerprints, - gone_secure, - gone_insecure, - still_secure, - log_message, - max_message_size, - account_name, - account_name_free, + policy, + NULL, + is_logged_in, + inject_message, + update_context_list, + new_fingerprint, + write_fingerprints, + gone_secure, + gone_insecure, + still_secure, + max_message_size, + account_name, + account_name_free, + NULL, /* received_symkey */ + error_message, + error_message_free, + NULL, /* resent_msg_prefix */ + resent_msg_prefix_free, + handle_smp_event, + handle_msg_event, + NULL /* create_instag */, + NULL /* convert_msg */, + NULL /* convert_free */, + NULL /* timer_control */, }; -static const char kPrivateKeyData[] = "(privkeys (account (name \"account\") (protocol proto) (private-key (dsa (p #00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB8C031D3561FECEE72EBB4A090D450A9B7A857#) (q #00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#) (g #535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57597766A2F9CE3857D7ACE3E1E3BC1FC6F26#) (y #0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A3C0FF501E3DC673B76D7BABF349009B6ECF#) (x #14D0345A3562C480A039E3C72764F72D79043216#)))))\n"; +static const char kPrivateKeyData[] = + "(privkeys (account (name \"account\") (protocol proto) (private-key (dsa " + "(p " + "#00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F" + "30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E" + "5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB" + "8C031D3561FECEE72EBB4A090D450A9B7A857#) (q " + "#00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#) (g " + "#535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F" + "1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F" + "6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57" + "597766A2F9CE3857D7ACE3E1E3BC1FC6F26#) (y " + "#0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF" + "2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93" + "454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A" + "3C0FF501E3DC673B76D7BABF349009B6ECF#) (x " + "#14D0345A3562C480A039E3C72764F72D79043216#)))))\n"; -int -main() { +int main() { OTRL_INIT; // We have to write the private key information to a file because the libotr @@ -116,12 +134,13 @@ } char private_key_file[256]; - snprintf(private_key_file, sizeof(private_key_file), "%s/libotr_test_helper_privatekeys-XXXXXX", tmpdir); + snprintf(private_key_file, sizeof(private_key_file), + "%s/libotr_test_helper_privatekeys-XXXXXX", tmpdir); int fd = mkstemp(private_key_file); if (fd == -1) { perror("creating temp file"); } - write(fd, kPrivateKeyData, sizeof(kPrivateKeyData)-1); + write(fd, kPrivateKeyData, sizeof(kPrivateKeyData) - 1); close(fd); OtrlUserState userstate = otrl_userstate_create(); @@ -133,7 +152,7 @@ char buf[4096]; for (;;) { - char* message = fgets(buf, sizeof(buf), stdin); + char *message = fgets(buf, sizeof(buf), stdin); if (strlen(message) == 0) { break; } @@ -142,9 +161,11 @@ char *newmessage = NULL; OtrlTLV *tlvs; - int ignore_message = otrl_message_receiving(userstate, &uiops, NULL, "account", "proto", "peer", message, &newmessage, &tlvs, NULL, NULL); + int ignore_message = otrl_message_receiving( + userstate, &uiops, NULL, "account", "proto", "peer", message, + &newmessage, &tlvs, NULL, NULL, NULL); if (tlvs) { - otrl_tlv_free(tlvs); + otrl_tlv_free(tlvs); } if (newmessage != NULL) { @@ -154,16 +175,21 @@ gcry_error_t err; char *newmessage = NULL; - err = otrl_message_sending(userstate, &uiops, NULL, "account", "proto", "peer", "test message", NULL, &newmessage, NULL, NULL); + err = otrl_message_sending(userstate, &uiops, NULL, "account", "proto", + "peer", 0, "test message", NULL, &newmessage, + OTRL_FRAGMENT_SEND_SKIP, NULL, NULL, NULL); if (newmessage == NULL) { fprintf(stderr, "libotr didn't encrypt message\n"); return 1; } write(1, newmessage, strlen(newmessage)); write(1, "\n", 1); - g_session_established = 0; + fprintf(stderr, "libotr sent: %s\n", newmessage); otrl_message_free(newmessage); + + g_session_established = 0; write(1, "?OTRv2?\n", 8); + fprintf(stderr, "libotr sent: ?OTRv2\n"); } } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/otr/otr.go juju-core-2.0.0/src/golang.org/x/crypto/otr/otr.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/otr/otr.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/otr/otr.go 2016-10-13 14:32:00.000000000 +0000 @@ -277,7 +277,7 @@ in = in[len(msgPrefix) : len(in)-1] } else if version := isQuery(in); version > 0 { c.authState = authStateAwaitingDHKey - c.myKeyId = 0 + c.reset() toSend = c.encode(c.generateDHCommit()) return } else { @@ -311,7 +311,7 @@ if err = c.processDHCommit(msg); err != nil { return } - c.myKeyId = 0 + c.reset() toSend = c.encode(c.generateDHKey()) return case authStateAwaitingDHKey: @@ -330,7 +330,7 @@ if err = c.processDHCommit(msg); err != nil { return } - c.myKeyId = 0 + c.reset() toSend = c.encode(c.generateDHKey()) return } @@ -343,7 +343,7 @@ if err = c.processDHCommit(msg); err != nil { return } - c.myKeyId = 0 + c.reset() toSend = c.encode(c.generateDHKey()) c.authState = authStateAwaitingRevealSig default: @@ -417,12 +417,11 @@ change = SMPSecretNeeded c.smp.saved = &inTLV return - } else if err == smpFailureError { + } + if err == smpFailureError { err = nil change = SMPFailed - return - } - if complete { + } else if complete { change = SMPComplete } if reply.typ != 0 { @@ -1037,8 +1036,7 @@ } } if slot == nil { - err = errors.New("otr: internal error: no key slots") - return + return nil, errors.New("otr: internal error: no more key slots") } var myPriv, myPub, theirPub *big.Int @@ -1164,6 +1162,14 @@ return ret } +func (c *Conversation) reset() { + c.myKeyId = 0 + + for i := range c.keySlots { + c.keySlots[i].used = false + } +} + type PublicKey struct { dsa.PublicKey } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/otr/otr_test.go juju-core-2.0.0/src/golang.org/x/crypto/otr/otr_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/otr/otr_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/otr/otr_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -121,11 +121,12 @@ } } -func TestConversation(t *testing.T) { +func setupConversation(t *testing.T) (alice, bob *Conversation) { alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex) bobPrivateKey, _ := hex.DecodeString(bobPrivateKeyHex) - var alice, bob Conversation + alice, bob = new(Conversation), new(Conversation) + alice.PrivateKey = new(PrivateKey) bob.PrivateKey = new(PrivateKey) alice.PrivateKey.Parse(alicePrivateKey) @@ -133,12 +134,6 @@ alice.FragmentSize = 100 bob.FragmentSize = 100 - var alicesMessage, bobsMessage [][]byte - var out []byte - var aliceChange, bobChange SecurityChange - var err error - alicesMessage = append(alicesMessage, []byte(QueryMessage)) - if alice.IsEncrypted() { t.Error("Alice believes that the conversation is secure before we've started") } @@ -146,6 +141,17 @@ t.Error("Bob believes that the conversation is secure before we've started") } + performHandshake(t, alice, bob) + return alice, bob +} + +func performHandshake(t *testing.T, alice, bob *Conversation) { + var alicesMessage, bobsMessage [][]byte + var out []byte + var aliceChange, bobChange SecurityChange + var err error + alicesMessage = append(alicesMessage, []byte(QueryMessage)) + for round := 0; len(alicesMessage) > 0 || len(bobsMessage) > 0; round++ { bobsMessage = nil for i, msg := range alicesMessage { @@ -193,80 +199,109 @@ if !bob.IsEncrypted() { t.Error("Bob doesn't believe that the conversation is secure") } +} - var testMessages = [][]byte{ - []byte("hello"), []byte("bye"), - } +const ( + firstRoundTrip = iota + subsequentRoundTrip + noMACKeyCheck +) - for j, testMessage := range testMessages { - alicesMessage, err = alice.Send(testMessage) +func roundTrip(t *testing.T, alice, bob *Conversation, message []byte, macKeyCheck int) { + alicesMessage, err := alice.Send(message) + if err != nil { + t.Errorf("Error from Alice sending message: %s", err) + } - if len(alice.oldMACs) != 0 { - t.Errorf("Alice has not revealed all MAC keys") - } + if len(alice.oldMACs) != 0 { + t.Errorf("Alice has not revealed all MAC keys") + } - for i, msg := range alicesMessage { - out, encrypted, _, _, err := bob.Receive(msg) + for i, msg := range alicesMessage { + out, encrypted, _, _, err := bob.Receive(msg) - if err != nil { - t.Errorf("Error generated while processing test message: %s", err.Error()) + if err != nil { + t.Errorf("Error generated while processing test message: %s", err.Error()) + } + if len(out) > 0 { + if i != len(alicesMessage)-1 { + t.Fatal("Bob produced a message while processing a fragment of Alice's") } - if len(out) > 0 { - if i != len(alicesMessage)-1 { - t.Fatal("Bob produced a message while processing a fragment of Alice's") - } - if !encrypted { - t.Errorf("Message was not marked as encrypted") - } - if !bytes.Equal(out, testMessage) { - t.Errorf("Message corrupted: got %x, want %x", out, testMessage) - } + if !encrypted { + t.Errorf("Message was not marked as encrypted") } - } - - if j == 0 { - if len(bob.oldMACs) != 0 { - t.Errorf("Bob should not have MAC keys to reveal") + if !bytes.Equal(out, message) { + t.Errorf("Message corrupted: got %x, want %x", out, message) } - } else if len(bob.oldMACs) != 40 { - t.Errorf("Bob does not have MAC keys to reveal") } + } - bobsMessage, err = bob.Send(testMessage) - + switch macKeyCheck { + case firstRoundTrip: if len(bob.oldMACs) != 0 { - t.Errorf("Bob has not revealed all MAC keys") + t.Errorf("Bob should not have MAC keys to reveal") } + case subsequentRoundTrip: + if len(bob.oldMACs) != 40 { + t.Errorf("Bob has %d bytes of MAC keys to reveal, but should have 40", len(bob.oldMACs)) + } + } - for i, msg := range bobsMessage { - out, encrypted, _, _, err := alice.Receive(msg) + bobsMessage, err := bob.Send(message) + if err != nil { + t.Errorf("Error from Bob sending message: %s", err) + } - if err != nil { - t.Errorf("Error generated while processing test message: %s", err.Error()) + if len(bob.oldMACs) != 0 { + t.Errorf("Bob has not revealed all MAC keys") + } + + for i, msg := range bobsMessage { + out, encrypted, _, _, err := alice.Receive(msg) + + if err != nil { + t.Errorf("Error generated while processing test message: %s", err.Error()) + } + if len(out) > 0 { + if i != len(bobsMessage)-1 { + t.Fatal("Alice produced a message while processing a fragment of Bob's") } - if len(out) > 0 { - if i != len(bobsMessage)-1 { - t.Fatal("Alice produced a message while processing a fragment of Bob's") - } - if !encrypted { - t.Errorf("Message was not marked as encrypted") - } - if !bytes.Equal(out, testMessage) { - t.Errorf("Message corrupted: got %x, want %x", out, testMessage) - } + if !encrypted { + t.Errorf("Message was not marked as encrypted") + } + if !bytes.Equal(out, message) { + t.Errorf("Message corrupted: got %x, want %x", out, message) } } + } - if j == 0 { - if len(alice.oldMACs) != 20 { - t.Errorf("Alice does not have MAC keys to reveal") - } - } else if len(alice.oldMACs) != 40 { - t.Errorf("Alice does not have MAC keys to reveal") + switch macKeyCheck { + case firstRoundTrip: + if len(alice.oldMACs) != 20 { + t.Errorf("Alice has %d bytes of MAC keys to reveal, but should have 20", len(alice.oldMACs)) + } + case subsequentRoundTrip: + if len(alice.oldMACs) != 40 { + t.Errorf("Alice has %d bytes of MAC keys to reveal, but should have 40", len(alice.oldMACs)) } } } +func TestConversation(t *testing.T) { + alice, bob := setupConversation(t) + + var testMessages = [][]byte{ + []byte("hello"), []byte("bye"), + } + + roundTripType := firstRoundTrip + + for _, testMessage := range testMessages { + roundTrip(t, alice, bob, testMessage, roundTripType) + roundTripType = subsequentRoundTrip + } +} + func TestGoodSMP(t *testing.T) { var alice, bob Conversation @@ -348,6 +383,21 @@ } } +func TestRehandshaking(t *testing.T) { + alice, bob := setupConversation(t) + roundTrip(t, alice, bob, []byte("test"), firstRoundTrip) + roundTrip(t, alice, bob, []byte("test 2"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 3"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 4"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 5"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 6"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 7"), subsequentRoundTrip) + roundTrip(t, alice, bob, []byte("test 8"), subsequentRoundTrip) + performHandshake(t, alice, bob) + roundTrip(t, alice, bob, []byte("test"), noMACKeyCheck) + roundTrip(t, alice, bob, []byte("test 2"), noMACKeyCheck) +} + func TestAgainstLibOTR(t *testing.T) { // This test requires otr.c.test to be built as /tmp/a.out. // If enabled, this tests runs forever performing OTR handshakes in a @@ -400,7 +450,7 @@ if change == NewKeys { alicesMessage, err := alice.Send([]byte("Go -> libotr test message")) if err != nil { - t.Errorf("error sending message: %s", err.Error()) + t.Fatalf("error sending message: %s", err.Error()) } else { for _, msg := range alicesMessage { out.Write(msg) @@ -410,10 +460,10 @@ } if len(text) > 0 { if !bytes.Equal(text, expectedText) { - t.Errorf("expected %x, but got %x", expectedText, text) + t.Fatalf("expected %x, but got %x", expectedText, text) } if !encrypted { - t.Error("message wasn't encrypted") + t.Fatal("message wasn't encrypted") } } } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/bmp-string.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/bmp-string.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/bmp-string.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/bmp-string.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/bmp-string_test.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/bmp-string_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/bmp-string_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/bmp-string_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,63 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "encoding/hex" + "testing" +) + +var bmpStringTests = []struct { + in string + expectedHex string + shouldFail bool +}{ + {"", "0000", false}, + // Example from https://tools.ietf.org/html/rfc7292#appendix-B. + {"Beavis", "0042006500610076006900730000", false}, + // Some characters from the "Letterlike Symbols Unicode block". + {"\u2115 - Double-struck N", "21150020002d00200044006f00750062006c0065002d00730074007200750063006b0020004e0000", false}, + // any character outside the BMP should trigger an error. + {"\U0001f000 East wind (Mahjong)", "", true}, +} + +func TestBMPString(t *testing.T) { + for i, test := range bmpStringTests { + expected, err := hex.DecodeString(test.expectedHex) + if err != nil { + t.Fatalf("#%d: failed to decode expectation", i) + } + + out, err := bmpString(test.in) + if err == nil && test.shouldFail { + t.Errorf("#%d: expected to fail, but produced %x", i, out) + continue + } + + if err != nil && !test.shouldFail { + t.Errorf("#%d: failed unexpectedly: %s", i, err) + continue + } + + if !test.shouldFail { + if !bytes.Equal(out, expected) { + t.Errorf("#%d: expected %s, got %x", i, test.expectedHex, out) + continue + } + + roundTrip, err := decodeBMPString(out) + if err != nil { + t.Errorf("#%d: decoding output gave an error: %s", i, err) + continue + } + + if roundTrip != test.in { + t.Errorf("#%d: decoding output resulted in %q, but it should have been %q", i, roundTrip, test.in) + continue + } + } + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/crypto.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/crypto.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/crypto.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/crypto.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts a object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/crypto_test.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/crypto_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/crypto_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/crypto_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,125 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/x509/pkix" + "encoding/asn1" + "testing" +) + +var sha1WithTripleDES = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + +func TestPbDecrypterFor(t *testing.T) { + params, _ := asn1.Marshal(pbeParams{ + Salt: []byte{1, 2, 3, 4, 5, 6, 7, 8}, + Iterations: 2048, + }) + alg := pkix.AlgorithmIdentifier{ + Algorithm: asn1.ObjectIdentifier([]int{1, 2, 3}), + Parameters: asn1.RawValue{ + FullBytes: params, + }, + } + + pass, _ := bmpString("Sesame open") + + _, _, err := pbDecrypterFor(alg, pass) + if _, ok := err.(NotImplementedError); !ok { + t.Errorf("expected not implemented error, got: %T %s", err, err) + } + + alg.Algorithm = sha1WithTripleDES + cbc, blockSize, err := pbDecrypterFor(alg, pass) + if err != nil { + t.Errorf("unexpected error from pbDecrypterFor %v", err) + } + if blockSize != 8 { + t.Errorf("unexpected block size %d, wanted 8", blockSize) + } + + plaintext := []byte{1, 2, 3, 4, 5, 6, 7, 8} + expectedCiphertext := []byte{185, 73, 135, 249, 137, 1, 122, 247} + ciphertext := make([]byte, len(plaintext)) + cbc.CryptBlocks(ciphertext, plaintext) + + if bytes.Compare(ciphertext, expectedCiphertext) != 0 { + t.Errorf("bad ciphertext, got %x but wanted %x", ciphertext, expectedCiphertext) + } +} + +var pbDecryptTests = []struct { + in []byte + expected []byte + expectedError error +}{ + { + []byte("\x33\x73\xf3\x9f\xda\x49\xae\xfc\xa0\x9a\xdf\x5a\x58\xa0\xea\x46"), // 7 padding bytes + []byte("A secret!"), + nil, + }, + { + []byte("\x33\x73\xf3\x9f\xda\x49\xae\xfc\x96\x24\x2f\x71\x7e\x32\x3f\xe7"), // 8 padding bytes + []byte("A secret"), + nil, + }, + { + []byte("\x35\x0c\xc0\x8d\xab\xa9\x5d\x30\x7f\x9a\xec\x6a\xd8\x9b\x9c\xd9"), // 9 padding bytes, incorrect + nil, + ErrDecryption, + }, + { + []byte("\xb2\xf9\x6e\x06\x60\xae\x20\xcf\x08\xa0\x7b\xd9\x6b\x20\xef\x41"), // incorrect padding bytes: [ ... 0x04 0x02 ] + nil, + ErrDecryption, + }, +} + +func TestPbDecrypt(t *testing.T) { + for i, test := range pbDecryptTests { + decryptable := testDecryptable{ + data: test.in, + algorithm: pkix.AlgorithmIdentifier{ + Algorithm: sha1WithTripleDES, + Parameters: pbeParams{ + Salt: []byte("\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8"), + Iterations: 4096, + }.RawASN1(), + }, + } + password, _ := bmpString("sesame") + + plaintext, err := pbDecrypt(decryptable, password) + if err != test.expectedError { + t.Errorf("#%d: got error %q, but wanted %q", i, err, test.expectedError) + continue + } + + if !bytes.Equal(plaintext, test.expected) { + t.Errorf("#%d: got %x, but wanted %x", i, plaintext, test.expected) + } + } +} + +type testDecryptable struct { + data []byte + algorithm pkix.AlgorithmIdentifier +} + +func (d testDecryptable) Algorithm() pkix.AlgorithmIdentifier { return d.algorithm } +func (d testDecryptable) Data() []byte { return d.data } + +func (params pbeParams) RawASN1() (raw asn1.RawValue) { + asn1Bytes, err := asn1.Marshal(params) + if err != nil { + panic(err) + } + _, err = asn1.Unmarshal(asn1Bytes, &raw) + if err != nil { + panic(err) + } + return +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/errors.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/errors.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/errors.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/errors.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,27 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rc2 + +import ( + "testing" +) + +func BenchmarkEncrypt(b *testing.B) { + r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64) + b.ResetTimer() + var src [8]byte + for i := 0; i < b.N; i++ { + r.Encrypt(src[:], src[:]) + } +} + +func BenchmarkDecrypt(b *testing.B) { + r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64) + b.ResetTimer() + var src [8]byte + for i := 0; i < b.N; i++ { + r.Decrypt(src[:], src[:]) + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,274 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rc2 implements the RC2 cipher +/* +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. +*/ +package rc2 + +import ( + "crypto/cipher" + "encoding/binary" +) + +// The rc2 block size in bytes +const BlockSize = 8 + +type rc2Cipher struct { + k [64]uint16 +} + +// New returns a new rc2 cipher with the given key and effective key length t1 +func New(key []byte, t1 int) (cipher.Block, error) { + // TODO(dgryski): error checking for key length + return &rc2Cipher{ + k: expandKey(key, t1), + }, nil +} + +func (*rc2Cipher) BlockSize() int { return BlockSize } + +var piTable = [256]byte{ + 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, + 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, + 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, + 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, + 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, + 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, + 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, + 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, + 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, + 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, + 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, + 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, + 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, + 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, + 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, + 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, +} + +func expandKey(key []byte, t1 int) [64]uint16 { + + l := make([]byte, 128) + copy(l, key) + + var t = len(key) + var t8 = (t1 + 7) / 8 + var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) + + for i := len(key); i < 128; i++ { + l[i] = piTable[l[i-1]+l[uint8(i-t)]] + } + + l[128-t8] = piTable[l[128-t8]&tm] + + for i := 127 - t8; i >= 0; i-- { + l[i] = piTable[l[i+1]^l[i+t8]] + } + + var k [64]uint16 + + for i := range k { + k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 + } + + return k +} + +func rotl16(x uint16, b uint) uint16 { + return (x >> (16 - b)) | (x << b) +} + +func (c *rc2Cipher) Encrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + var j int + + for j <= 16 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 40 { + + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 60 { + + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} + +func (c *rc2Cipher) Decrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + j := 63 + + for j >= 44 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 20 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 0 { + + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,93 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rc2 + +import ( + "bytes" + "encoding/hex" + "testing" +) + +func TestEncryptDecrypt(t *testing.T) { + + // TODO(dgryski): add the rest of the test vectors from the RFC + var tests = []struct { + key string + plain string + cipher string + t1 int + }{ + { + "0000000000000000", + "0000000000000000", + "ebb773f993278eff", + 63, + }, + { + "ffffffffffffffff", + "ffffffffffffffff", + "278b27e42e2f0d49", + 64, + }, + { + "3000000000000000", + "1000000000000001", + "30649edf9be7d2c2", + 64, + }, + { + "88", + "0000000000000000", + "61a8a244adacccf0", + 64, + }, + { + "88bca90e90875a", + "0000000000000000", + "6ccf4308974c267f", + 64, + }, + { + "88bca90e90875a7f0f79c384627bafb2", + "0000000000000000", + "1a807d272bbe5db1", + 64, + }, + { + "88bca90e90875a7f0f79c384627bafb2", + "0000000000000000", + "2269552ab0f85ca6", + 128, + }, + { + "88bca90e90875a7f0f79c384627bafb216f80a6f85920584c42fceb0be255daf1e", + "0000000000000000", + "5b78d3a43dfff1f1", + 129, + }, + } + + for _, tt := range tests { + k, _ := hex.DecodeString(tt.key) + p, _ := hex.DecodeString(tt.plain) + c, _ := hex.DecodeString(tt.cipher) + + b, _ := New(k, tt.t1) + + var dst [8]byte + + b.Encrypt(dst[:], p) + + if !bytes.Equal(dst[:], c) { + t.Errorf("encrypt failed: got % 2x wanted % 2x\n", dst, c) + } + + b.Decrypt(dst[:], c) + + if !bytes.Equal(dst[:], p) { + t.Errorf("decrypt failed: got % 2x wanted % 2x\n", dst, p) + } + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/mac.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/mac.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/mac.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/mac.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/mac_test.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/mac_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/mac_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/mac_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,42 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "encoding/asn1" + "testing" +) + +func TestVerifyMac(t *testing.T) { + td := macData{ + Mac: digestInfo{ + Digest: []byte{0x18, 0x20, 0x3d, 0xff, 0x1e, 0x16, 0xf4, 0x92, 0xf2, 0xaf, 0xc8, 0x91, 0xa9, 0xba, 0xd6, 0xca, 0x9d, 0xee, 0x51, 0x93}, + }, + MacSalt: []byte{1, 2, 3, 4, 5, 6, 7, 8}, + Iterations: 2048, + } + + message := []byte{11, 12, 13, 14, 15} + password, _ := bmpString("") + + td.Mac.Algorithm.Algorithm = asn1.ObjectIdentifier([]int{1, 2, 3}) + err := verifyMac(&td, message, password) + if _, ok := err.(NotImplementedError); !ok { + t.Errorf("err: %v", err) + } + + td.Mac.Algorithm.Algorithm = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) + err = verifyMac(&td, message, password) + if err != ErrIncorrectPassword { + t.Errorf("Expected incorrect password, got err: %v", err) + } + + password, _ = bmpString("Sesame open") + err = verifyMac(&td, message, password) + if err != nil { + t.Errorf("err: %v", err) + } + +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/pbkdf.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/pbkdf.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/pbkdf.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/pbkdf.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + var IjBuf []byte + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + // We expect Ijb to be exactly v bytes, + // if it is longer or shorter we must + // adjust it accordingly. + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + if len(Ijb) < v { + if IjBuf == nil { + IjBuf = make([]byte, v) + } + bytesShort := v - len(Ijb) + for i := 0; i < bytesShort; i++ { + IjBuf[i] = 0 + } + copy(IjBuf[bytesShort:], Ijb) + Ijb = IjBuf + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/pbkdf_test.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/pbkdf_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/pbkdf_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/pbkdf_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,34 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "testing" +) + +func TestThatPBKDFWorksCorrectlyForLongKeys(t *testing.T) { + cipherInfo := shaWithTripleDESCBC{} + + salt := []byte("\xff\xff\xff\xff\xff\xff\xff\xff") + password, _ := bmpString("sesame") + key := cipherInfo.deriveKey(salt, password, 2048) + + if expected := []byte("\x7c\xd9\xfd\x3e\x2b\x3b\xe7\x69\x1a\x44\xe3\xbe\xf0\xf9\xea\x0f\xb9\xb8\x97\xd4\xe3\x25\xd9\xd1"); bytes.Compare(key, expected) != 0 { + t.Fatalf("expected key '%x', but found '%x'", expected, key) + } +} + +func TestThatPBKDFHandlesLeadingZeros(t *testing.T) { + // This test triggers a case where I_j (in step 6C) ends up with leading zero + // byte, meaning that len(Ijb) < v (leading zeros get stripped by big.Int). + // This was previously causing bug whereby certain inputs would break the + // derivation and produce the wrong output. + key := pbkdf(sha1Sum, 20, 64, []byte("\xf3\x7e\x05\xb5\x18\x32\x4b\x4b"), []byte("\x00\x00"), 2048, 1, 24) + expected := []byte("\x00\xf7\x59\xff\x47\xd1\x4d\xd0\x36\x65\xd5\x94\x3c\xb3\xc4\xa3\x9a\x25\x55\xc0\x2a\xed\x66\xe1") + if bytes.Compare(key, expected) != 0 { + t.Fatalf("expected key '%x', but found '%x'", expected, key) + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/pkcs12.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/pkcs12.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/pkcs12.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/pkcs12.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,342 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String()) + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/pkcs12_test.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/pkcs12_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/pkcs12_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/pkcs12_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,138 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/rsa" + "crypto/tls" + "encoding/base64" + "encoding/pem" + "testing" +) + +func TestPfx(t *testing.T) { + for commonName, base64P12 := range testdata { + p12, _ := base64.StdEncoding.DecodeString(base64P12) + + priv, cert, err := Decode(p12, "") + if err != nil { + t.Fatal(err) + } + + if err := priv.(*rsa.PrivateKey).Validate(); err != nil { + t.Errorf("error while validating private key: %v", err) + } + + if cert.Subject.CommonName != commonName { + t.Errorf("expected common name to be %q, but found %q", commonName, cert.Subject.CommonName) + } + } +} + +func TestPEM(t *testing.T) { + for commonName, base64P12 := range testdata { + p12, _ := base64.StdEncoding.DecodeString(base64P12) + + blocks, err := ToPEM(p12, "") + if err != nil { + t.Fatalf("error while converting to PEM: %s", err) + } + + var pemData []byte + for _, b := range blocks { + pemData = append(pemData, pem.EncodeToMemory(b)...) + } + + cert, err := tls.X509KeyPair(pemData, pemData) + if err != nil { + t.Errorf("err while converting to key pair: %v", err) + } + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + config.BuildNameToCertificate() + + if _, exists := config.NameToCertificate[commonName]; !exists { + t.Errorf("did not find our cert in PEM?: %v", config.NameToCertificate) + } + } +} + +func ExampleToPEM() { + p12, _ := base64.StdEncoding.DecodeString(`MIIJzgIBAzCCCZQGCS ... CA+gwggPk==`) + + blocks, err := ToPEM(p12, "password") + if err != nil { + panic(err) + } + + var pemData []byte + for _, b := range blocks { + pemData = append(pemData, pem.EncodeToMemory(b)...) + } + + // then use PEM data for tls to construct tls certificate: + cert, err := tls.X509KeyPair(pemData, pemData) + if err != nil { + panic(err) + } + + config := &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + _ = config +} + +var testdata = map[string]string{ + // 'null' password test case + "Windows Azure Tools": `MIIKDAIBAzCCCcwGCSqGSIb3DQEHAaCCCb0Eggm5MIIJtTCCBe4GCSqGSIb3DQEHAaCCBd8EggXbMIIF1zCCBdMGCyqGSIb3DQEMCgECoIIE7jCCBOowHAYKKoZIhvcNAQwBAzAOBAhStUNnlTGV+gICB9AEggTIJ81JIossF6boFWpPtkiQRPtI6DW6e9QD4/WvHAVrM2bKdpMzSMsCML5NyuddANTKHBVq00Jc9keqGNAqJPKkjhSUebzQFyhe0E1oI9T4zY5UKr/I8JclOeccH4QQnsySzYUG2SnniXnQ+JrG3juetli7EKth9h6jLc6xbubPadY5HMB3wL/eG/kJymiXwU2KQ9Mgd4X6jbcV+NNCE/8jbZHvSTCPeYTJIjxfeX61Sj5kFKUCzERbsnpyevhY3X0eYtEDezZQarvGmXtMMdzf8HJHkWRdk9VLDLgjk8uiJif/+X4FohZ37ig0CpgC2+dP4DGugaZZ51hb8tN9GeCKIsrmWogMXDIVd0OACBp/EjJVmFB6y0kUCXxUE0TZt0XA1tjAGJcjDUpBvTntZjPsnH/4ZySy+s2d9OOhJ6pzRQBRm360TzkFdSwk9DLiLdGfv4pwMMu/vNGBlqjP/1sQtj+jprJiD1sDbCl4AdQZVoMBQHadF2uSD4/o17XG/Ci0r2h6Htc2yvZMAbEY4zMjjIn2a+vqIxD6onexaek1R3zbkS9j19D6EN9EWn8xgz80YRCyW65znZk8xaIhhvlU/mg7sTxeyuqroBZNcq6uDaQTehDpyH7bY2l4zWRpoj10a6JfH2q5shYz8Y6UZC/kOTfuGqbZDNZWro/9pYquvNNW0M847E5t9bsf9VkAAMHRGBbWoVoU9VpI0UnoXSfvpOo+aXa2DSq5sHHUTVY7A9eov3z5IqT+pligx11xcs+YhDWcU8di3BTJisohKvv5Y8WSkm/rloiZd4ig269k0jTRk1olP/vCksPli4wKG2wdsd5o42nX1yL7mFfXocOANZbB+5qMkiwdyoQSk+Vq+C8nAZx2bbKhUq2MbrORGMzOe0Hh0x2a0PeObycN1Bpyv7Mp3ZI9h5hBnONKCnqMhtyQHUj/nNvbJUnDVYNfoOEqDiEqqEwB7YqWzAKz8KW0OIqdlM8uiQ4JqZZlFllnWJUfaiDrdFM3lYSnFQBkzeVlts6GpDOOBjCYd7dcCNS6kq6pZC6p6HN60Twu0JnurZD6RT7rrPkIGE8vAenFt4iGe/yF52fahCSY8Ws4K0UTwN7bAS+4xRHVCWvE8sMRZsRCHizb5laYsVrPZJhE6+hux6OBb6w8kwPYXc+ud5v6UxawUWgt6uPwl8mlAtU9Z7Miw4Nn/wtBkiLL/ke1UI1gqJtcQXgHxx6mzsjh41+nAgTvdbsSEyU6vfOmxGj3Rwc1eOrIhJUqn5YjOWfzzsz/D5DzWKmwXIwdspt1p+u+kol1N3f2wT9fKPnd/RGCb4g/1hc3Aju4DQYgGY782l89CEEdalpQ/35bQczMFk6Fje12HykakWEXd/bGm9Unh82gH84USiRpeOfQvBDYoqEyrY3zkFZzBjhDqa+jEcAj41tcGx47oSfDq3iVYCdL7HSIjtnyEktVXd7mISZLoMt20JACFcMw+mrbjlug+eU7o2GR7T+LwtOp/p4LZqyLa7oQJDwde1BNZtm3TCK2P1mW94QDL0nDUps5KLtr1DaZXEkRbjSJub2ZE9WqDHyU3KA8G84Tq/rN1IoNu/if45jacyPje1Npj9IftUZSP22nV7HMwZtwQ4P4MYHRMBMGCSqGSIb3DQEJFTEGBAQBAAAAMFsGCSqGSIb3DQEJFDFOHkwAewBCADQAQQA0AEYARQBCADAALQBBADEAOABBAC0ANAA0AEIAQgAtAEIANQBGADIALQA0ADkAMQBFAEYAMQA1ADIAQgBBADEANgB9MF0GCSsGAQQBgjcRATFQHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAG8AZgB0AHcAYQByAGUAIABLAGUAeQAgAFMAdABvAHIAYQBnAGUAIABQAHIAbwB2AGkAZABlAHIwggO/BgkqhkiG9w0BBwagggOwMIIDrAIBADCCA6UGCSqGSIb3DQEHATAcBgoqhkiG9w0BDAEGMA4ECEBk5ZAYpu0WAgIH0ICCA3hik4mQFGpw9Ha8TQPtk+j2jwWdxfF0+sTk6S8PTsEfIhB7wPltjiCK92Uv2tCBQnodBUmatIfkpnRDEySmgmdglmOCzj204lWAMRs94PoALGn3JVBXbO1vIDCbAPOZ7Z0Hd0/1t2hmk8v3//QJGUg+qr59/4y/MuVfIg4qfkPcC2QSvYWcK3oTf6SFi5rv9B1IOWFgN5D0+C+x/9Lb/myPYX+rbOHrwtJ4W1fWKoz9g7wwmGFA9IJ2DYGuH8ifVFbDFT1Vcgsvs8arSX7oBsJVW0qrP7XkuDRe3EqCmKW7rBEwYrFznhxZcRDEpMwbFoSvgSIZ4XhFY9VKYglT+JpNH5iDceYEBOQL4vBLpxNUk3l5jKaBNxVa14AIBxq18bVHJ+STInhLhad4u10v/Xbx7wIL3f9DX1yLAkPrpBYbNHS2/ew6H/ySDJnoIDxkw2zZ4qJ+qUJZ1S0lbZVG+VT0OP5uF6tyOSpbMlcGkdl3z254n6MlCrTifcwkzscysDsgKXaYQw06rzrPW6RDub+t+hXzGny799fS9jhQMLDmOggaQ7+LA4oEZsfT89HLMWxJYDqjo3gIfjciV2mV54R684qLDS+AO09U49e6yEbwGlq8lpmO/pbXCbpGbB1b3EomcQbxdWxW2WEkkEd/VBn81K4M3obmywwXJkw+tPXDXfBmzzaqqCR+onMQ5ME1nMkY8ybnfoCc1bDIupjVWsEL2Wvq752RgI6KqzVNr1ew1IdqV5AWN2fOfek+0vi3Jd9FHF3hx8JMwjJL9dZsETV5kHtYJtE7wJ23J68BnCt2eI0GEuwXcCf5EdSKN/xXCTlIokc4Qk/gzRdIZsvcEJ6B1lGovKG54X4IohikqTjiepjbsMWj38yxDmK3mtENZ9ci8FPfbbvIEcOCZIinuY3qFUlRSbx7VUerEoV1IP3clUwexVQo4lHFee2jd7ocWsdSqSapW7OWUupBtDzRkqVhE7tGria+i1W2d6YLlJ21QTjyapWJehAMO637OdbJCCzDs1cXbodRRE7bsP492ocJy8OX66rKdhYbg8srSFNKdb3pF3UDNbN9jhI/t8iagRhNBhlQtTr1me2E/c86Q18qcRXl4bcXTt6acgCeffK6Y26LcVlrgjlD33AEYRRUeyC+rpxbT0aMjdFderlndKRIyG23mSp0HaUwNzAfMAcGBSsOAwIaBBRlviCbIyRrhIysg2dc/KbLFTc2vQQUg4rfwHMM4IKYRD/fsd1x6dda+wQ=`, + // empty string password test case + "testing@example.com": `MIIJzgIBAzCCCZQGCSqGSIb3DQEHAaCCCYUEggmBMIIJfTCCA/cGCSqGSIb3DQEHBqCCA+gwggPk +AgEAMIID3QYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYwDgQIIszfRGqcmPcCAggAgIIDsOZ9Eg1L +s5Wx8JhYoV3HAL4aRnkAWvTYB5NISZOgSgIQTssmt/3A7134dibTmaT/93LikkL3cTKLnQzJ4wDf +YZ1bprpVJvUqz+HFT79m27bP9zYXFrvxWBJbxjYKTSjQMgz+h8LAEpXXGajCmxMJ1oCOtdXkhhzc +LdZN6SAYgtmtyFnCdMEDskSggGuLb3fw84QEJ/Sj6FAULXunW/CPaS7Ce0TMsKmNU/jfFWj3yXXw +ro0kwjKiVLpVFlnBlHo2OoVU7hmkm59YpGhLgS7nxLD3n7nBroQ0ID1+8R01NnV9XLGoGzxMm1te +6UyTCkr5mj+kEQ8EP1Ys7g/TC411uhVWySMt/rcpkx7Vz1r9kYEAzJpONAfr6cuEVkPKrxpq4Fh0 +2fzlKBky0i/hrfIEUmngh+ERHUb/Mtv/fkv1j5w9suESbhsMLLiCXAlsP1UWMX+3bNizi3WVMEts +FM2k9byn+p8IUD/A8ULlE4kEaWeoc+2idkCNQkLGuIdGUXUFVm58se0auUkVRoRJx8x4CkMesT8j +b1H831W66YRWoEwwDQp2kK1lA2vQXxdVHWlFevMNxJeromLzj3ayiaFrfByeUXhR2S+Hpm+c0yNR +4UVU9WED2kacsZcpRm9nlEa5sr28mri5JdBrNa/K02OOhvKCxr5ZGmbOVzUQKla2z4w+Ku9k8POm +dfDNU/fGx1b5hcFWtghXe3msWVsSJrQihnN6q1ughzNiYZlJUGcHdZDRtiWwCFI0bR8h/Dmg9uO9 +4rawQQrjIRT7B8yF3UbkZyAqs8Ppb1TsMeNPHh1rxEfGVQknh/48ouJYsmtbnzugTUt3mJCXXiL+ +XcPMV6bBVAUu4aaVKSmg9+yJtY4/VKv10iw88ktv29fViIdBe3t6l/oPuvQgbQ8dqf4T8w0l/uKZ +9lS1Na9jfT1vCoS7F5TRi+tmyj1vL5kr/amEIW6xKEP6oeAMvCMtbPAzVEj38zdJ1R22FfuIBxkh +f0Zl7pdVbmzRxl/SBx9iIBJSqAvcXItiT0FIj8HxQ+0iZKqMQMiBuNWJf5pYOLWGrIyntCWwHuaQ +wrx0sTGuEL9YXLEAsBDrsvzLkx/56E4INGZFrH8G7HBdW6iGqb22IMI4GHltYSyBRKbB0gadYTyv +abPEoqww8o7/85aPSzOTJ/53ozD438Q+d0u9SyDuOb60SzCD/zPuCEd78YgtXJwBYTuUNRT27FaM +3LGMX8Hz+6yPNRnmnA2XKPn7dx/IlaqAjIs8MIIFfgYJKoZIhvcNAQcBoIIFbwSCBWswggVnMIIF +YwYLKoZIhvcNAQwKAQKgggTuMIIE6jAcBgoqhkiG9w0BDAEDMA4ECJr0cClYqOlcAgIIAASCBMhe +OQSiP2s0/46ONXcNeVAkz2ksW3u/+qorhSiskGZ0b3dFa1hhgBU2Q7JVIkc4Hf7OXaT1eVQ8oqND +uhqsNz83/kqYo70+LS8Hocj49jFgWAKrf/yQkdyP1daHa2yzlEw4mkpqOfnIORQHvYCa8nEApspZ +wVu8y6WVuLHKU67mel7db2xwstQp7PRuSAYqGjTfAylElog8ASdaqqYbYIrCXucF8iF9oVgmb/Qo +xrXshJ9aSLO4MuXlTPELmWgj07AXKSb90FKNihE+y0bWb9LPVFY1Sly3AX9PfrtkSXIZwqW3phpv +MxGxQl/R6mr1z+hlTfY9Wdpb5vlKXPKA0L0Rt8d2pOesylFi6esJoS01QgP1kJILjbrV731kvDc0 +Jsd+Oxv4BMwA7ClG8w1EAOInc/GrV1MWFGw/HeEqj3CZ/l/0jv9bwkbVeVCiIhoL6P6lVx9pXq4t +KZ0uKg/tk5TVJmG2vLcMLvezD0Yk3G2ZOMrywtmskrwoF7oAUpO9e87szoH6fEvUZlkDkPVW1NV4 +cZk3DBSQiuA3VOOg8qbo/tx/EE3H59P0axZWno2GSB0wFPWd1aj+b//tJEJHaaNR6qPRj4IWj9ru +Qbc8eRAcVWleHg8uAehSvUXlFpyMQREyrnpvMGddpiTC8N4UMrrBRhV7+UbCOWhxPCbItnInBqgl +1JpSZIP7iUtsIMdu3fEC2cdbXMTRul+4rdzUR7F9OaezV3jjvcAbDvgbK1CpyC+MJ1Mxm/iTgk9V +iUArydhlR8OniN84GyGYoYCW9O/KUwb6ASmeFOu/msx8x6kAsSQHIkKqMKv0TUR3kZnkxUvdpBGP +KTl4YCTvNGX4dYALBqrAETRDhua2KVBD/kEttDHwBNVbN2xi81+Mc7ml461aADfk0c66R/m2sjHB +2tN9+wG12OIWFQjL6wF/UfJMYamxx2zOOExiId29Opt57uYiNVLOO4ourPewHPeH0u8Gz35aero7 +lkt7cZAe1Q0038JUuE/QGlnK4lESK9UkSIQAjSaAlTsrcfwtQxB2EjoOoLhwH5mvxUEmcNGNnXUc +9xj3M5BD3zBz3Ft7G3YMMDwB1+zC2l+0UG0MGVjMVaeoy32VVNvxgX7jk22OXG1iaOB+PY9kdk+O +X+52BGSf/rD6X0EnqY7XuRPkMGgjtpZeAYxRQnFtCZgDY4wYheuxqSSpdF49yNczSPLkgB3CeCfS ++9NTKN7aC6hBbmW/8yYh6OvSiCEwY0lFS/T+7iaVxr1loE4zI1y/FFp4Pe1qfLlLttVlkygga2UU +SCunTQ8UB/M5IXWKkhMOO11dP4niWwb39Y7pCWpau7mwbXOKfRPX96cgHnQJK5uG+BesDD1oYnX0 +6frN7FOnTSHKruRIwuI8KnOQ/I+owmyz71wiv5LMQt+yM47UrEjB/EZa5X8dpEwOZvkdqL7utcyo +l0XH5kWMXdW856LL/FYftAqJIDAmtX1TXF/rbP6mPyN/IlDC0gjP84Uzd/a2UyTIWr+wk49Ek3vQ +/uDamq6QrwAxVmNh5Tset5Vhpc1e1kb7mRMZIzxSP8JcTuYd45oFKi98I8YjvueHVZce1g7OudQP +SbFQoJvdT46iBg1TTatlltpOiH2mFaxWVS0xYjAjBgkqhkiG9w0BCRUxFgQUdA9eVqvETX4an/c8 +p8SsTugkit8wOwYJKoZIhvcNAQkUMS4eLABGAHIAaQBlAG4AZABsAHkAIABuAGEAbQBlACAAZgBv +AHIAIABjAGUAcgB0MDEwITAJBgUrDgMCGgUABBRFsNz3Zd1O1GI8GTuFwCWuDOjEEwQIuBEfIcAy +HQ8CAggA`, +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/safebags.go juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/safebags.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/pkcs12/safebags.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/pkcs12/safebags.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/scrypt/scrypt.go juju-core-2.0.0/src/golang.org/x/crypto/scrypt/scrypt.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/scrypt/scrypt.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/scrypt/scrypt.go 2016-10-13 14:32:00.000000000 +0000 @@ -218,7 +218,7 @@ // For example, you can get a derived key for e.g. AES-256 (which needs a // 32-byte key) by doing: // -// dk := scrypt.Key([]byte("some password"), salt, 16384, 8, 1, 32) +// dk, err := scrypt.Key([]byte("some password"), salt, 16384, 8, 1, 32) // // The recommended parameters for interactive logins as of 2009 are N=16384, // r=8, p=1. They should be increased as memory latency and CPU parallelism diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/keccakf_amd64.go juju-core-2.0.0/src/golang.org/x/crypto/sha3/keccakf_amd64.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/keccakf_amd64.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/sha3/keccakf_amd64.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +package sha3 + +// This function is implemented in keccakf_amd64.s. + +//go:noescape + +func keccakF1600(a *[25]uint64) diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/keccakf_amd64.s juju-core-2.0.0/src/golang.org/x/crypto/sha3/keccakf_amd64.s --- juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/keccakf_amd64.s 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/sha3/keccakf_amd64.s 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,392 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +// This code was translated into a form compatible with 6a from the public +// domain sources at https://github.com/gvanas/KeccakCodePackage + +// Offsets in state +#define _ba (0*8) +#define _be (1*8) +#define _bi (2*8) +#define _bo (3*8) +#define _bu (4*8) +#define _ga (5*8) +#define _ge (6*8) +#define _gi (7*8) +#define _go (8*8) +#define _gu (9*8) +#define _ka (10*8) +#define _ke (11*8) +#define _ki (12*8) +#define _ko (13*8) +#define _ku (14*8) +#define _ma (15*8) +#define _me (16*8) +#define _mi (17*8) +#define _mo (18*8) +#define _mu (19*8) +#define _sa (20*8) +#define _se (21*8) +#define _si (22*8) +#define _so (23*8) +#define _su (24*8) + +// Temporary registers +#define rT1 AX + +// Round vars +#define rpState DI +#define rpStack SP + +#define rDa BX +#define rDe CX +#define rDi DX +#define rDo R8 +#define rDu R9 + +#define rBa R10 +#define rBe R11 +#define rBi R12 +#define rBo R13 +#define rBu R14 + +#define rCa SI +#define rCe BP +#define rCi rBi +#define rCo rBo +#define rCu R15 + +#define MOVQ_RBI_RCE MOVQ rBi, rCe +#define XORQ_RT1_RCA XORQ rT1, rCa +#define XORQ_RT1_RCE XORQ rT1, rCe +#define XORQ_RBA_RCU XORQ rBa, rCu +#define XORQ_RBE_RCU XORQ rBe, rCu +#define XORQ_RDU_RCU XORQ rDu, rCu +#define XORQ_RDA_RCA XORQ rDa, rCa +#define XORQ_RDE_RCE XORQ rDe, rCe + +#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \ + /* Prepare round */ \ + MOVQ rCe, rDa; \ + ROLQ $1, rDa; \ + \ + MOVQ _bi(iState), rCi; \ + XORQ _gi(iState), rDi; \ + XORQ rCu, rDa; \ + XORQ _ki(iState), rCi; \ + XORQ _mi(iState), rDi; \ + XORQ rDi, rCi; \ + \ + MOVQ rCi, rDe; \ + ROLQ $1, rDe; \ + \ + MOVQ _bo(iState), rCo; \ + XORQ _go(iState), rDo; \ + XORQ rCa, rDe; \ + XORQ _ko(iState), rCo; \ + XORQ _mo(iState), rDo; \ + XORQ rDo, rCo; \ + \ + MOVQ rCo, rDi; \ + ROLQ $1, rDi; \ + \ + MOVQ rCu, rDo; \ + XORQ rCe, rDi; \ + ROLQ $1, rDo; \ + \ + MOVQ rCa, rDu; \ + XORQ rCi, rDo; \ + ROLQ $1, rDu; \ + \ + /* Result b */ \ + MOVQ _ba(iState), rBa; \ + MOVQ _ge(iState), rBe; \ + XORQ rCo, rDu; \ + MOVQ _ki(iState), rBi; \ + MOVQ _mo(iState), rBo; \ + MOVQ _su(iState), rBu; \ + XORQ rDe, rBe; \ + ROLQ $44, rBe; \ + XORQ rDi, rBi; \ + XORQ rDa, rBa; \ + ROLQ $43, rBi; \ + \ + MOVQ rBe, rCa; \ + MOVQ rc, rT1; \ + ORQ rBi, rCa; \ + XORQ rBa, rT1; \ + XORQ rT1, rCa; \ + MOVQ rCa, _ba(oState); \ + \ + XORQ rDu, rBu; \ + ROLQ $14, rBu; \ + MOVQ rBa, rCu; \ + ANDQ rBe, rCu; \ + XORQ rBu, rCu; \ + MOVQ rCu, _bu(oState); \ + \ + XORQ rDo, rBo; \ + ROLQ $21, rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _bi(oState); \ + \ + NOTQ rBi; \ + ORQ rBa, rBu; \ + ORQ rBo, rBi; \ + XORQ rBo, rBu; \ + XORQ rBe, rBi; \ + MOVQ rBu, _bo(oState); \ + MOVQ rBi, _be(oState); \ + B_RBI_RCE; \ + \ + /* Result g */ \ + MOVQ _gu(iState), rBe; \ + XORQ rDu, rBe; \ + MOVQ _ka(iState), rBi; \ + ROLQ $20, rBe; \ + XORQ rDa, rBi; \ + ROLQ $3, rBi; \ + MOVQ _bo(iState), rBa; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDo, rBa; \ + MOVQ _me(iState), rBo; \ + MOVQ _si(iState), rBu; \ + ROLQ $28, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ga(oState); \ + G_RT1_RCA; \ + \ + XORQ rDe, rBo; \ + ROLQ $45, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ge(oState); \ + G_RT1_RCE; \ + \ + XORQ rDi, rBu; \ + ROLQ $61, rBu; \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _go(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _gu(oState); \ + NOTQ rBu; \ + G_RBA_RCU; \ + \ + ORQ rBu, rBo; \ + XORQ rBi, rBo; \ + MOVQ rBo, _gi(oState); \ + \ + /* Result k */ \ + MOVQ _be(iState), rBa; \ + MOVQ _gi(iState), rBe; \ + MOVQ _ko(iState), rBi; \ + MOVQ _mu(iState), rBo; \ + MOVQ _sa(iState), rBu; \ + XORQ rDi, rBe; \ + ROLQ $6, rBe; \ + XORQ rDo, rBi; \ + ROLQ $25, rBi; \ + MOVQ rBe, rT1; \ + ORQ rBi, rT1; \ + XORQ rDe, rBa; \ + ROLQ $1, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ka(oState); \ + K_RT1_RCA; \ + \ + XORQ rDu, rBo; \ + ROLQ $8, rBo; \ + MOVQ rBi, rT1; \ + ANDQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _ke(oState); \ + K_RT1_RCE; \ + \ + XORQ rDa, rBu; \ + ROLQ $18, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ANDQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _ki(oState); \ + \ + MOVQ rBu, rT1; \ + ORQ rBa, rT1; \ + XORQ rBo, rT1; \ + MOVQ rT1, _ko(oState); \ + \ + ANDQ rBe, rBa; \ + XORQ rBu, rBa; \ + MOVQ rBa, _ku(oState); \ + K_RBA_RCU; \ + \ + /* Result m */ \ + MOVQ _ga(iState), rBe; \ + XORQ rDa, rBe; \ + MOVQ _ke(iState), rBi; \ + ROLQ $36, rBe; \ + XORQ rDe, rBi; \ + MOVQ _bu(iState), rBa; \ + ROLQ $10, rBi; \ + MOVQ rBe, rT1; \ + MOVQ _mi(iState), rBo; \ + ANDQ rBi, rT1; \ + XORQ rDu, rBa; \ + MOVQ _so(iState), rBu; \ + ROLQ $27, rBa; \ + XORQ rBa, rT1; \ + MOVQ rT1, _ma(oState); \ + M_RT1_RCA; \ + \ + XORQ rDi, rBo; \ + ROLQ $15, rBo; \ + MOVQ rBi, rT1; \ + ORQ rBo, rT1; \ + XORQ rBe, rT1; \ + MOVQ rT1, _me(oState); \ + M_RT1_RCE; \ + \ + XORQ rDo, rBu; \ + ROLQ $56, rBu; \ + NOTQ rBo; \ + MOVQ rBo, rT1; \ + ORQ rBu, rT1; \ + XORQ rBi, rT1; \ + MOVQ rT1, _mi(oState); \ + \ + ORQ rBa, rBe; \ + XORQ rBu, rBe; \ + MOVQ rBe, _mu(oState); \ + \ + ANDQ rBa, rBu; \ + XORQ rBo, rBu; \ + MOVQ rBu, _mo(oState); \ + M_RBE_RCU; \ + \ + /* Result s */ \ + MOVQ _bi(iState), rBa; \ + MOVQ _go(iState), rBe; \ + MOVQ _ku(iState), rBi; \ + XORQ rDi, rBa; \ + MOVQ _ma(iState), rBo; \ + ROLQ $62, rBa; \ + XORQ rDo, rBe; \ + MOVQ _se(iState), rBu; \ + ROLQ $55, rBe; \ + \ + XORQ rDu, rBi; \ + MOVQ rBa, rDu; \ + XORQ rDe, rBu; \ + ROLQ $2, rBu; \ + ANDQ rBe, rDu; \ + XORQ rBu, rDu; \ + MOVQ rDu, _su(oState); \ + \ + ROLQ $39, rBi; \ + S_RDU_RCU; \ + NOTQ rBe; \ + XORQ rDa, rBo; \ + MOVQ rBe, rDa; \ + ANDQ rBi, rDa; \ + XORQ rBa, rDa; \ + MOVQ rDa, _sa(oState); \ + S_RDA_RCA; \ + \ + ROLQ $41, rBo; \ + MOVQ rBi, rDe; \ + ORQ rBo, rDe; \ + XORQ rBe, rDe; \ + MOVQ rDe, _se(oState); \ + S_RDE_RCE; \ + \ + MOVQ rBo, rDi; \ + MOVQ rBu, rDo; \ + ANDQ rBu, rDi; \ + ORQ rBa, rDo; \ + XORQ rBi, rDi; \ + XORQ rBo, rDo; \ + MOVQ rDi, _si(oState); \ + MOVQ rDo, _so(oState) \ + +// func keccakF1600(state *[25]uint64) +TEXT ·keccakF1600(SB), 0, $200-8 + MOVQ state+0(FP), rpState + SUBQ $(8*25), SP + + // Convert the user state into an internal state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + // Execute the KeccakF permutation + MOVQ _ba(rpState), rCa + MOVQ _be(rpState), rCe + MOVQ _bu(rpState), rCu + + XORQ _ga(rpState), rCa + XORQ _ge(rpState), rCe + XORQ _gu(rpState), rCu + + XORQ _ka(rpState), rCa + XORQ _ke(rpState), rCe + XORQ _ku(rpState), rCu + + XORQ _ma(rpState), rCa + XORQ _me(rpState), rCe + XORQ _mu(rpState), rCu + + XORQ _sa(rpState), rCa + XORQ _se(rpState), rCe + MOVQ _si(rpState), rDi + MOVQ _so(rpState), rDo + XORQ _su(rpState), rCu + + mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) + mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP) + + // Revert the internal state to the user state + NOTQ _be(rpState) + NOTQ _bi(rpState) + NOTQ _go(rpState) + NOTQ _ki(rpState) + NOTQ _mi(rpState) + NOTQ _sa(rpState) + + ADDQ $(8*25), SP + RET diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/keccakf.go juju-core-2.0.0/src/golang.org/x/crypto/sha3/keccakf.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/keccakf.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/sha3/keccakf.go 2016-10-13 14:32:00.000000000 +0000 @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !amd64 appengine gccgo + package sha3 // rc stores the round constants for use in the ι step. diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/sha3.go juju-core-2.0.0/src/golang.org/x/crypto/sha3/sha3.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/sha3.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/sha3/sha3.go 2016-10-13 14:32:00.000000000 +0000 @@ -42,7 +42,7 @@ storage [maxRate]byte // Specific to SHA-3 and SHAKE. - fixedOutput bool // whether this is a fixed-ouput-length instance + fixedOutput bool // whether this is a fixed-output-length instance outputLen int // the default output size in bytes state spongeDirection // whether the sponge is absorbing or squeezing } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/xor.go juju-core-2.0.0/src/golang.org/x/crypto/sha3/xor.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/xor.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/sha3/xor.go 2016-10-13 14:32:00.000000000 +0000 @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!386 appengine +// +build !amd64,!386,!ppc64le appengine package sha3 diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/xor_unaligned.go juju-core-2.0.0/src/golang.org/x/crypto/sha3/xor_unaligned.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/sha3/xor_unaligned.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/sha3/xor_unaligned.go 2016-10-13 14:32:00.000000000 +0000 @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64 386 +// +build amd64 386 ppc64le // +build !appengine package sha3 diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/client.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/client.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/client.go 2016-10-13 14:32:00.000000000 +0000 @@ -2,12 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -/* - Package agent implements a client to an ssh-agent daemon. - -References: - [PROTOCOL.agent]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent?rev=HEAD -*/ +// Package agent implements the ssh-agent protocol, and provides both +// a client and a server. The client can talk to a standard ssh-agent +// that uses UNIX sockets, and one could implement an alternative +// ssh-agent process using the sample server. +// +// References: +// [PROTOCOL.agent]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent?rev=HEAD package agent // import "golang.org/x/crypto/ssh/agent" import ( @@ -24,6 +25,7 @@ "math/big" "sync" + "golang.org/x/crypto/ed25519" "golang.org/x/crypto/ssh" ) @@ -75,7 +77,8 @@ // See [PROTOCOL.agent], section 3. const ( - agentRequestV1Identities = 1 + agentRequestV1Identities = 1 + agentRemoveAllV1Identities = 9 // 3.2 Requests from client to agent for protocol 2 key operations agentAddIdentity = 17 @@ -182,10 +185,13 @@ return k.Blob } -// Verify satisfies the ssh.PublicKey interface, but is not -// implemented for agent keys. +// Verify satisfies the ssh.PublicKey interface. func (k *Key) Verify(data []byte, sig *ssh.Signature) error { - return errors.New("agent: agent key does not know how to verify") + pubKey, err := ssh.ParsePublicKey(k.Blob) + if err != nil { + return fmt.Errorf("agent: bad public key: %v", err) + } + return pubKey.Verify(data, sig) } type wireKey struct { @@ -375,6 +381,8 @@ msg = new(identitiesAnswerAgentMsg) case agentSignResponse: msg = new(signResponseAgentMsg) + case agentV1IdentitiesAnswer: + msg = new(agentV1IdentityMsg) default: return nil, fmt.Errorf("agent: unknown type tag %d", packet[0]) } @@ -385,7 +393,7 @@ } type rsaKeyMsg struct { - Type string `sshtype:"17"` + Type string `sshtype:"17|25"` N *big.Int E *big.Int D *big.Int @@ -397,7 +405,7 @@ } type dsaKeyMsg struct { - Type string `sshtype:"17"` + Type string `sshtype:"17|25"` P *big.Int Q *big.Int G *big.Int @@ -408,7 +416,7 @@ } type ecdsaKeyMsg struct { - Type string `sshtype:"17"` + Type string `sshtype:"17|25"` Curve string KeyBytes []byte D *big.Int @@ -416,6 +424,14 @@ Constraints []byte `ssh:"rest"` } +type ed25519KeyMsg struct { + Type string `sshtype:"17|25"` + Pub []byte + Priv []byte + Comments string + Constraints []byte `ssh:"rest"` +} + // Insert adds a private key to the agent. func (c *client) insertKey(s interface{}, comment string, constraints []byte) error { var req []byte @@ -457,6 +473,14 @@ Comments: comment, Constraints: constraints, }) + case *ed25519.PrivateKey: + req = ssh.Marshal(ed25519KeyMsg{ + Type: ssh.KeyAlgoED25519, + Pub: []byte(*k)[32:], + Priv: []byte(*k), + Comments: comment, + Constraints: constraints, + }) default: return fmt.Errorf("agent: unsupported key type %T", s) } @@ -477,7 +501,7 @@ } type rsaCertMsg struct { - Type string `sshtype:"17"` + Type string `sshtype:"17|25"` CertBytes []byte D *big.Int Iqmp *big.Int // IQMP = Inverse Q Mod P @@ -488,7 +512,7 @@ } type dsaCertMsg struct { - Type string `sshtype:"17"` + Type string `sshtype:"17|25"` CertBytes []byte X *big.Int Comments string @@ -496,14 +520,23 @@ } type ecdsaCertMsg struct { - Type string `sshtype:"17"` + Type string `sshtype:"17|25"` CertBytes []byte D *big.Int Comments string Constraints []byte `ssh:"rest"` } -// Insert adds a private key to the agent. If a certificate is given, +type ed25519CertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + Pub []byte + Priv []byte + Comments string + Constraints []byte `ssh:"rest"` +} + +// Add adds a private key to the agent. If a certificate is given, // that certificate is added instead as public key. func (c *client) Add(key AddedKey) error { var constraints []byte @@ -547,17 +580,28 @@ }) case *dsa.PrivateKey: req = ssh.Marshal(dsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - X: k.X, - Comments: comment, + Type: cert.Type(), + CertBytes: cert.Marshal(), + X: k.X, + Comments: comment, + Constraints: constraints, }) case *ecdsa.PrivateKey: req = ssh.Marshal(ecdsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - D: k.D, - Comments: comment, + Type: cert.Type(), + CertBytes: cert.Marshal(), + D: k.D, + Comments: comment, + Constraints: constraints, + }) + case *ed25519.PrivateKey: + req = ssh.Marshal(ed25519CertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + Pub: []byte(*k)[32:], + Priv: []byte(*k), + Comments: comment, + Constraints: constraints, }) default: return fmt.Errorf("agent: unsupported key type %T", s) diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/client_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/client_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/client_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/client_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -14,6 +14,7 @@ "path/filepath" "strconv" "testing" + "time" "golang.org/x/crypto/ssh" ) @@ -85,6 +86,11 @@ testAgentInterface(t, agent, key, cert, lifetimeSecs) } +func testKeyring(t *testing.T, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) { + a := NewKeyring() + testAgentInterface(t, a, key, cert, lifetimeSecs) +} + func testAgentInterface(t *testing.T, agent Agent, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) { signer, err := ssh.NewSignerFromKey(key) if err != nil { @@ -136,11 +142,25 @@ if err := pubKey.Verify(data, sig); err != nil { t.Fatalf("Verify(%s): %v", pubKey.Type(), err) } + + // If the key has a lifetime, is it removed when it should be? + if lifetimeSecs > 0 { + time.Sleep(time.Second*time.Duration(lifetimeSecs) + 100*time.Millisecond) + keys, err := agent.List() + if err != nil { + t.Fatalf("List: %v", err) + } + if len(keys) > 0 { + t.Fatalf("key not expired") + } + } + } func TestAgent(t *testing.T) { - for _, keyType := range []string{"rsa", "dsa", "ecdsa"} { + for _, keyType := range []string{"rsa", "dsa", "ecdsa", "ed25519"} { testAgent(t, testPrivateKeys[keyType], nil, 0) + testKeyring(t, testPrivateKeys[keyType], nil, 1) } } @@ -153,10 +173,7 @@ cert.SignCert(rand.Reader, testSigners["ecdsa"]) testAgent(t, testPrivateKeys["rsa"], cert, 0) -} - -func TestConstraints(t *testing.T) { - testAgent(t, testPrivateKeys["rsa"], nil, 3600 /* lifetime in seconds */) + testKeyring(t, testPrivateKeys["rsa"], cert, 1) } // netPipe is analogous to net.Pipe, but it uses a real net.Conn, and @@ -285,3 +302,42 @@ t.Errorf("Want 1 keys, got %v", keys) } } + +func TestAgentLifetime(t *testing.T) { + agent, _, cleanup := startAgent(t) + defer cleanup() + + for _, keyType := range []string{"rsa", "dsa", "ecdsa"} { + // Add private keys to the agent. + err := agent.Add(AddedKey{ + PrivateKey: testPrivateKeys[keyType], + Comment: "comment", + LifetimeSecs: 1, + }) + if err != nil { + t.Fatalf("add: %v", err) + } + // Add certs to the agent. + cert := &ssh.Certificate{ + Key: testPublicKeys[keyType], + ValidBefore: ssh.CertTimeInfinity, + CertType: ssh.UserCert, + } + cert.SignCert(rand.Reader, testSigners[keyType]) + err = agent.Add(AddedKey{ + PrivateKey: testPrivateKeys[keyType], + Certificate: cert, + Comment: "comment", + LifetimeSecs: 1, + }) + if err != nil { + t.Fatalf("add: %v", err) + } + } + time.Sleep(1100 * time.Millisecond) + if keys, err := agent.List(); err != nil { + t.Errorf("List: %v", err) + } else if len(keys) != 0 { + t.Errorf("Want 0 keys, got %v", len(keys)) + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/example_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/example_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/example_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/example_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,40 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent_test + +import ( + "log" + "os" + "net" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +func ExampleClientAgent() { + // ssh-agent has a UNIX socket under $SSH_AUTH_SOCK + socket := os.Getenv("SSH_AUTH_SOCK") + conn, err := net.Dial("unix", socket) + if err != nil { + log.Fatalf("net.Dial: %v", err) + } + agentClient := agent.NewClient(conn) + config := &ssh.ClientConfig{ + User: "username", + Auth: []ssh.AuthMethod{ + // Use a callback rather than PublicKeys + // so we only consult the agent once the remote server + // wants it. + ssh.PublicKeysCallback(agentClient.Signers), + }, + } + + sshc, err := ssh.Dial("tcp", "localhost:22", config) + if err != nil { + log.Fatalf("Dial: %v", err) + } + // .. use sshc + sshc.Close() +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/keyring.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/keyring.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/keyring.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/keyring.go 2016-10-13 14:32:00.000000000 +0000 @@ -11,6 +11,7 @@ "errors" "fmt" "sync" + "time" "golang.org/x/crypto/ssh" ) @@ -18,6 +19,7 @@ type privKey struct { signer ssh.Signer comment string + expire *time.Time } type keyring struct { @@ -48,21 +50,15 @@ return nil } -// Remove removes all identities with the given public key. -func (r *keyring) Remove(key ssh.PublicKey) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - want := key.Marshal() +// removeLocked does the actual key removal. The caller must already be holding the +// keyring mutex. +func (r *keyring) removeLocked(want []byte) error { found := false for i := 0; i < len(r.keys); { if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) { found = true r.keys[i] = r.keys[len(r.keys)-1] - r.keys = r.keys[len(r.keys)-1:] + r.keys = r.keys[:len(r.keys)-1] continue } else { i++ @@ -75,7 +71,18 @@ return nil } -// Lock locks the agent. Sign and Remove will fail, and List will empty an empty list. +// Remove removes all identities with the given public key. +func (r *keyring) Remove(key ssh.PublicKey) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + return r.removeLocked(key.Marshal()) +} + +// Lock locks the agent. Sign and Remove will fail, and List will return an empty list. func (r *keyring) Lock(passphrase []byte) error { r.mu.Lock() defer r.mu.Unlock() @@ -104,6 +111,17 @@ return nil } +// expireKeysLocked removes expired keys from the keyring. If a key was added +// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have +// ellapsed, it is removed. The caller *must* be holding the keyring mutex. +func (r *keyring) expireKeysLocked() { + for _, k := range r.keys { + if k.expire != nil && time.Now().After(*k.expire) { + r.removeLocked(k.signer.PublicKey().Marshal()) + } + } +} + // List returns the identities known to the agent. func (r *keyring) List() ([]*Key, error) { r.mu.Lock() @@ -113,6 +131,7 @@ return nil, nil } + r.expireKeysLocked() var ids []*Key for _, k := range r.keys { pub := k.signer.PublicKey() @@ -146,7 +165,17 @@ } } - r.keys = append(r.keys, privKey{signer, key.Comment}) + p := privKey{ + signer: signer, + comment: key.Comment, + } + + if key.LifetimeSecs > 0 { + t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second) + p.expire = &t + } + + r.keys = append(r.keys, p) return nil } @@ -159,6 +188,7 @@ return nil, errLocked } + r.expireKeysLocked() wanted := key.Marshal() for _, k := range r.keys { if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) { @@ -176,6 +206,7 @@ return nil, errLocked } + r.expireKeysLocked() s := make([]ssh.Signer, 0, len(r.keys)) for _, k := range r.keys { s = append(s, k.signer) diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/keyring_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/keyring_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/keyring_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/keyring_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,76 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import "testing" + +func addTestKey(t *testing.T, a Agent, keyName string) { + err := a.Add(AddedKey{ + PrivateKey: testPrivateKeys[keyName], + Comment: keyName, + }) + if err != nil { + t.Fatalf("failed to add key %q: %v", keyName, err) + } +} + +func removeTestKey(t *testing.T, a Agent, keyName string) { + err := a.Remove(testPublicKeys[keyName]) + if err != nil { + t.Fatalf("failed to remove key %q: %v", keyName, err) + } +} + +func validateListedKeys(t *testing.T, a Agent, expectedKeys []string) { + listedKeys, err := a.List() + if err != nil { + t.Fatalf("failed to list keys: %v", err) + return + } + actualKeys := make(map[string]bool) + for _, key := range listedKeys { + actualKeys[key.Comment] = true + } + + matchedKeys := make(map[string]bool) + for _, expectedKey := range expectedKeys { + if !actualKeys[expectedKey] { + t.Fatalf("expected key %q, but was not found", expectedKey) + } else { + matchedKeys[expectedKey] = true + } + } + + for actualKey := range actualKeys { + if !matchedKeys[actualKey] { + t.Fatalf("key %q was found, but was not expected", actualKey) + } + } +} + +func TestKeyringAddingAndRemoving(t *testing.T) { + keyNames := []string{"dsa", "ecdsa", "rsa", "user"} + + // add all test private keys + k := NewKeyring() + for _, keyName := range keyNames { + addTestKey(t, k, keyName) + } + validateListedKeys(t, k, keyNames) + + // remove a key in the middle + keyToRemove := keyNames[1] + keyNames = append(keyNames[:1], keyNames[2:]...) + + removeTestKey(t, k, keyToRemove) + validateListedKeys(t, k, keyNames) + + // remove all keys + err := k.RemoveAll() + if err != nil { + t.Fatalf("failed to remove all keys: %v", err) + } + validateListedKeys(t, k, []string{}) +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/server.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/server.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/server.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/server.go 2016-10-13 14:32:00.000000000 +0000 @@ -5,13 +5,18 @@ package agent import ( + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" "crypto/rsa" "encoding/binary" + "errors" "fmt" "io" "log" "math/big" + "golang.org/x/crypto/ed25519" "golang.org/x/crypto/ssh" ) @@ -49,6 +54,9 @@ return ssh.Marshal(&record) } +// See [PROTOCOL.agent], section 2.5.1. +const agentV1IdentitiesAnswer = 2 + type agentV1IdentityMsg struct { Numkeys uint32 `sshtype:"2"` } @@ -69,6 +77,10 @@ switch data[0] { case agentRequestV1Identities: return &agentV1IdentityMsg{0}, nil + + case agentRemoveAllV1Identities: + return nil, nil + case agentRemoveIdentity: var req agentRemoveIdentityMsg if err := ssh.Unmarshal(data, &req); err != nil { @@ -121,6 +133,7 @@ return nil, err } return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil + case agentRequestIdentities: keys, err := s.agent.List() if err != nil { @@ -134,42 +147,271 @@ rep.Keys = append(rep.Keys, marshalKey(k)...) } return rep, nil - case agentAddIdentity: + + case agentAddIdConstrained, agentAddIdentity: return nil, s.insertIdentity(data) } return nil, fmt.Errorf("unknown opcode %d", data[0]) } +func parseRSAKey(req []byte) (*AddedKey, error) { + var k rsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + if k.E.BitLen() > 30 { + return nil, errors.New("agent: RSA public exponent too large") + } + priv := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + E: int(k.E.Int64()), + N: k.N, + }, + D: k.D, + Primes: []*big.Int{k.P, k.Q}, + } + priv.Precompute() + + return &AddedKey{PrivateKey: priv, Comment: k.Comments}, nil +} + +func parseEd25519Key(req []byte) (*AddedKey, error) { + var k ed25519KeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + priv := ed25519.PrivateKey(k.Priv) + return &AddedKey{PrivateKey: &priv, Comment: k.Comments}, nil +} + +func parseDSAKey(req []byte) (*AddedKey, error) { + var k dsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + + return &AddedKey{PrivateKey: priv, Comment: k.Comments}, nil +} + +func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) { + priv = &ecdsa.PrivateKey{ + D: privScalar, + } + + switch curveName { + case "nistp256": + priv.Curve = elliptic.P256() + case "nistp384": + priv.Curve = elliptic.P384() + case "nistp521": + priv.Curve = elliptic.P521() + default: + return nil, fmt.Errorf("agent: unknown curve %q", curveName) + } + + priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes) + if priv.X == nil || priv.Y == nil { + return nil, errors.New("agent: point not on curve") + } + + return priv, nil +} + +func parseEd25519Cert(req []byte) (*AddedKey, error) { + var k ed25519CertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + priv := ed25519.PrivateKey(k.Priv) + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad ED25519 certificate") + } + return &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}, nil +} + +func parseECDSAKey(req []byte) (*AddedKey, error) { + var k ecdsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D) + if err != nil { + return nil, err + } + + return &AddedKey{PrivateKey: priv, Comment: k.Comments}, nil +} + +func parseRSACert(req []byte) (*AddedKey, error) { + var k rsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad RSA certificate") + } + + // An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go + var rsaPub struct { + Name string + E *big.Int + N *big.Int + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil { + return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) + } + + if rsaPub.E.BitLen() > 30 { + return nil, errors.New("agent: RSA public exponent too large") + } + + priv := rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + E: int(rsaPub.E.Int64()), + N: rsaPub.N, + }, + D: k.D, + Primes: []*big.Int{k.Q, k.P}, + } + priv.Precompute() + + return &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}, nil +} + +func parseDSACert(req []byte) (*AddedKey, error) { + var k dsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad DSA certificate") + } + + // A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go + var w struct { + Name string + P, Q, G, Y *big.Int + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil { + return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) + } + + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + }, + Y: w.Y, + }, + X: k.X, + } + + return &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}, nil +} + +func parseECDSACert(req []byte) (*AddedKey, error) { + var k ecdsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad ECDSA certificate") + } + + // An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go + var ecdsaPub struct { + Name string + ID string + Key []byte + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil { + return nil, err + } + + priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D) + if err != nil { + return nil, err + } + + return &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}, nil +} + func (s *server) insertIdentity(req []byte) error { var record struct { - Type string `sshtype:"17"` + Type string `sshtype:"17|25"` Rest []byte `ssh:"rest"` } + if err := ssh.Unmarshal(req, &record); err != nil { return err } + var addedKey *AddedKey + var err error + switch record.Type { case ssh.KeyAlgoRSA: - var k rsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return err - } - - priv := rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - E: int(k.E.Int64()), - N: k.N, - }, - D: k.D, - Primes: []*big.Int{k.P, k.Q}, - } - priv.Precompute() + addedKey, err = parseRSAKey(req) + case ssh.KeyAlgoDSA: + addedKey, err = parseDSAKey(req) + case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521: + addedKey, err = parseECDSAKey(req) + case ssh.KeyAlgoED25519: + addedKey, err = parseEd25519Key(req) + case ssh.CertAlgoRSAv01: + addedKey, err = parseRSACert(req) + case ssh.CertAlgoDSAv01: + addedKey, err = parseDSACert(req) + case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01: + addedKey, err = parseECDSACert(req) + case ssh.CertAlgoED25519v01: + addedKey, err = parseEd25519Cert(req) + default: + return fmt.Errorf("agent: not implemented: %q", record.Type) + } - return s.agent.Add(AddedKey{PrivateKey: &priv, Comment: k.Comments}) + if err != nil { + return err } - return fmt.Errorf("not implemented: %s", record.Type) + return s.agent.Add(*addedKey) } // ServeAgent serves the agent protocol on the given connection. It diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/server_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/server_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/server_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/server_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -5,6 +5,9 @@ package agent import ( + "crypto" + "crypto/rand" + "fmt" "testing" "golang.org/x/crypto/ssh" @@ -75,3 +78,130 @@ testAgentInterface(t, agentClient, testPrivateKeys["rsa"], nil, 0) conn.Close() } + +func TestV1ProtocolMessages(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + c := NewClient(c1) + + go ServeAgent(NewKeyring(), c2) + + testV1ProtocolMessages(t, c.(*client)) +} + +func testV1ProtocolMessages(t *testing.T, c *client) { + reply, err := c.call([]byte{agentRequestV1Identities}) + if err != nil { + t.Fatalf("v1 request all failed: %v", err) + } + if msg, ok := reply.(*agentV1IdentityMsg); !ok || msg.Numkeys != 0 { + t.Fatalf("invalid request all response: %#v", reply) + } + + reply, err = c.call([]byte{agentRemoveAllV1Identities}) + if err != nil { + t.Fatalf("v1 remove all failed: %v", err) + } + if _, ok := reply.(*successAgentMsg); !ok { + t.Fatalf("invalid remove all response: %#v", reply) + } +} + +func verifyKey(sshAgent Agent) error { + keys, err := sshAgent.List() + if err != nil { + return fmt.Errorf("listing keys: %v", err) + } + + if len(keys) != 1 { + return fmt.Errorf("bad number of keys found. expected 1, got %d", len(keys)) + } + + buf := make([]byte, 128) + if _, err := rand.Read(buf); err != nil { + return fmt.Errorf("rand: %v", err) + } + + sig, err := sshAgent.Sign(keys[0], buf) + if err != nil { + return fmt.Errorf("sign: %v", err) + } + + if err := keys[0].Verify(buf, sig); err != nil { + return fmt.Errorf("verify: %v", err) + } + return nil +} + +func addKeyToAgent(key crypto.PrivateKey) error { + sshAgent := NewKeyring() + if err := sshAgent.Add(AddedKey{PrivateKey: key}); err != nil { + return fmt.Errorf("add: %v", err) + } + return verifyKey(sshAgent) +} + +func TestKeyTypes(t *testing.T) { + for k, v := range testPrivateKeys { + if err := addKeyToAgent(v); err != nil { + t.Errorf("error adding key type %s, %v", k, err) + } + if err := addCertToAgentSock(v, nil); err != nil { + t.Errorf("error adding key type %s, %v", k, err) + } + } +} + +func addCertToAgentSock(key crypto.PrivateKey, cert *ssh.Certificate) error { + a, b, err := netPipe() + if err != nil { + return err + } + agentServer := NewKeyring() + go ServeAgent(agentServer, a) + + agentClient := NewClient(b) + if err := agentClient.Add(AddedKey{PrivateKey: key, Certificate: cert}); err != nil { + return fmt.Errorf("add: %v", err) + } + return verifyKey(agentClient) +} + +func addCertToAgent(key crypto.PrivateKey, cert *ssh.Certificate) error { + sshAgent := NewKeyring() + if err := sshAgent.Add(AddedKey{PrivateKey: key, Certificate: cert}); err != nil { + return fmt.Errorf("add: %v", err) + } + return verifyKey(sshAgent) +} + +func TestCertTypes(t *testing.T) { + for keyType, key := range testPublicKeys { + cert := &ssh.Certificate{ + ValidPrincipals: []string{"gopher1"}, + ValidAfter: 0, + ValidBefore: ssh.CertTimeInfinity, + Key: key, + Serial: 1, + CertType: ssh.UserCert, + SignatureKey: testPublicKeys["rsa"], + Permissions: ssh.Permissions{ + CriticalOptions: map[string]string{}, + Extensions: map[string]string{}, + }, + } + if err := cert.SignCert(rand.Reader, testSigners["rsa"]); err != nil { + t.Fatalf("signcert: %v", err) + } + if err := addCertToAgent(testPrivateKeys[keyType], cert); err != nil { + t.Fatalf("%v", err) + } + if err := addCertToAgentSock(testPrivateKeys[keyType], cert); err != nil { + t.Fatalf("%v", err) + } + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/testdata_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/testdata_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/agent/testdata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/agent/testdata_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places: +// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places: // ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three // instances. diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/certs.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/certs.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/certs.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/certs.go 2016-10-13 14:32:00.000000000 +0000 @@ -22,6 +22,7 @@ CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" + CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" ) // Certificate types distinguish between host and user @@ -401,6 +402,7 @@ KeyAlgoECDSA256: CertAlgoECDSA256v01, KeyAlgoECDSA384: CertAlgoECDSA384v01, KeyAlgoECDSA521: CertAlgoECDSA521v01, + KeyAlgoED25519: CertAlgoED25519v01, } // certToPrivAlgo returns the underlying algorithm for a certificate algorithm. @@ -459,7 +461,7 @@ func (c *Certificate) Type() string { algo, ok := certAlgoNames[c.Key.Type()] if !ok { - panic("unknown cert key type") + panic("unknown cert key type " + c.Key.Type()) } return algo } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/channel.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/channel.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/channel.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/channel.go 2016-10-13 14:32:00.000000000 +0000 @@ -67,6 +67,8 @@ // boolean, otherwise the return value will be false. Channel // requests are out-of-band messages so they may be sent even // if the data stream is closed or blocked by flow control. + // If the channel is closed before a reply is returned, io.EOF + // is returned. SendRequest(name string, wantReply bool, payload []byte) (bool, error) // Stderr returns an io.ReadWriter that writes to this channel @@ -217,7 +219,7 @@ func (c *channel) sendMessage(msg interface{}) error { if debugMux { - log.Printf("send %d: %#v", c.mux.chanList.offset, msg) + log.Printf("send(%d): %#v", c.mux.chanList.offset, msg) } p := Marshal(msg) @@ -371,7 +373,7 @@ close(c.msg) close(c.incomingRequests) c.writeMu.Lock() - // This is not necesary for a normal channel teardown, but if + // This is not necessary for a normal channel teardown, but if // there was another error, it is. c.sentClose = true c.writeMu.Unlock() diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/cipher.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/cipher.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/cipher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/cipher.go 2016-10-13 14:32:00.000000000 +0000 @@ -7,6 +7,7 @@ import ( "crypto/aes" "crypto/cipher" + "crypto/des" "crypto/rc4" "crypto/subtle" "encoding/binary" @@ -115,9 +116,15 @@ // should invest a cleaner way to do this. gcmCipherID: {16, 12, 0, nil}, - // insecure cipher, see http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf - // uncomment below to enable it. - // aes128cbcID: {16, aes.BlockSize, 0, nil}, + // CBC mode is insecure and so is not included in the default config. + // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely + // needed, it's possible to specify a custom Config to enable it. + // You should expect that an active attacker can recover plaintext if + // you do. + aes128cbcID: {16, aes.BlockSize, 0, nil}, + + // 3des-cbc is insecure and is disabled by default. + tripledescbcID: {24, des.BlockSize, 0, nil}, } // prefixLen is the length of the packet prefix that contains the packet length @@ -365,12 +372,7 @@ oracleCamouflage uint32 } -func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - +func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { cbc := &cbcCipher{ mac: macModes[algs.MAC].new(macKey), decrypter: cipher.NewCBCDecrypter(c, iv), @@ -382,6 +384,34 @@ } return cbc, nil +} + +func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, iv, key, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, iv, key, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil } func maxUInt32(a, b int) uint32 { diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/cipher_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/cipher_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/cipher_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/cipher_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -21,7 +21,7 @@ } func TestPacketCiphers(t *testing.T) { - // Still test aes128cbc cipher althought it's commented out. + // Still test aes128cbc cipher although it's commented out. cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil} defer delete(cipherModes, aes128cbcID) diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/client_auth.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/client_auth.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/client_auth.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/client_auth.go 2016-10-13 14:32:00.000000000 +0000 @@ -321,8 +321,6 @@ return false, msg.Methods, nil case msgUserAuthSuccess: return true, nil, nil - case msgDisconnect: - return false, nil, io.EOF default: return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) } @@ -439,3 +437,37 @@ } } } + +type retryableAuthMethod struct { + authMethod AuthMethod + maxTries int +} + +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) { + for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { + ok, methods, err = r.authMethod.auth(session, user, c, rand) + if ok || err != nil { // either success or error terminate + return ok, methods, err + } + } + return ok, methods, err +} + +func (r *retryableAuthMethod) method() string { + return r.authMethod.method() +} + +// RetryableAuthMethod is a decorator for other auth methods enabling them to +// be retried up to maxTries before considering that AuthMethod itself failed. +// If maxTries is <= 0, will retry indefinitely +// +// This is useful for interactive clients using challenge/response type +// authentication (e.g. Keyboard-Interactive, Password, etc) where the user +// could mistype their response resulting in the server issuing a +// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 +// [keyboard-interactive]); Without this decorator, the non-retryable +// AuthMethod would be removed from future consideration, and never tried again +// (and so the user would never be able to retry their entry). +func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { + return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/client_auth_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/client_auth_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/client_auth_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/client_auth_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -9,6 +9,7 @@ "crypto/rand" "errors" "fmt" + "os" "strings" "testing" ) @@ -243,6 +244,9 @@ } func TestClientUnsupportedKex(t *testing.T) { + if os.Getenv("GO_BUILDER_NAME") != "" { + t.Skip("skipping known-flaky test on the Go build dashboard; see golang.org/issue/15198") + } config := &ClientConfig{ User: "testuser", Auth: []AuthMethod{ @@ -296,7 +300,7 @@ t.Log("sign with wrong key") cert.SignCert(rand.Reader, testSigners["dsa"]) if err := tryAuth(t, clientConfig); err == nil { - t.Errorf("cert login passed with non-authoritive key") + t.Errorf("cert login passed with non-authoritative key") } t.Log("host cert") @@ -391,3 +395,78 @@ func TestNoPermissionsPassing(t *testing.T) { testPermissionsPassing(false, t) } + +func TestRetryableAuth(t *testing.T) { + n := 0 + passwords := []string{"WRONG1", "WRONG2"} + + config := &ClientConfig{ + User: "testuser", + Auth: []AuthMethod{ + RetryableAuthMethod(PasswordCallback(func() (string, error) { + p := passwords[n] + n++ + return p, nil + }), 2), + PublicKeys(testSigners["rsa"]), + }, + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } + if n != 2 { + t.Fatalf("Did not try all passwords") + } +} + +func ExampleRetryableAuthMethod(t *testing.T) { + user := "testuser" + NumberOfPrompts := 3 + + // Normally this would be a callback that prompts the user to answer the + // provided questions + Cb := func(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + return []string{"answer1", "answer2"}, nil + } + + config := &ClientConfig{ + User: user, + Auth: []AuthMethod{ + RetryableAuthMethod(KeyboardInteractiveChallenge(Cb), NumberOfPrompts), + }, + } + + if err := tryAuth(t, config); err != nil { + t.Fatalf("unable to dial remote side: %s", err) + } +} + +// Test if username is received on server side when NoClientAuth is used +func TestClientAuthNone(t *testing.T) { + user := "testuser" + serverConfig := &ServerConfig{ + NoClientAuth: true, + } + serverConfig.AddHostKey(testSigners["rsa"]) + + clientConfig := &ClientConfig{ + User: user, + } + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewClientConn(c2, "", clientConfig) + serverConn, err := newServer(c1, serverConfig) + if err != nil { + t.Fatal("newServer: %v", err) + } + if serverConn.User() != user { + t.Fatalf("server: got %q, want %q", serverConn.User(), user) + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/client.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/client.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/client.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/client.go 2016-10-13 14:32:00.000000000 +0000 @@ -9,6 +9,7 @@ "fmt" "net" "sync" + "time" ) // Client implements a traditional SSH client that supports shells, @@ -96,16 +97,10 @@ c.transport = newClientTransport( newTransport(c.sshConn.conn, config.Rand, true /* is client */), c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.requestKeyChange(); err != nil { + if err := c.transport.requestInitialKeyChange(); err != nil { return err } - if packet, err := c.transport.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - // We just did the key change, so the session ID is established. c.sessionID = c.transport.getSessionID() @@ -169,7 +164,7 @@ // to incoming channels and requests, use net.Dial with NewClientConn // instead. func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.Dial(network, addr) + conn, err := net.DialTimeout(network, addr, config.Timeout) if err != nil { return nil, err } @@ -210,4 +205,9 @@ // string returned from PublicKey.Type method may be used, or // any of the CertAlgoXxxx and KeyAlgoXxxx constants. HostKeyAlgorithms []string + + // Timeout is the maximum amount of time for the TCP connection to establish. + // + // A Timeout of zero means no timeout. + Timeout time.Duration } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/common.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/common.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/common.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/common.go 2016-10-13 14:32:00.000000000 +0000 @@ -44,10 +44,12 @@ // of authenticating servers) in preference order. var supportedHostKeyAlgos = []string{ CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, + CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoRSA, KeyAlgoDSA, + + KeyAlgoED25519, } // supportedMACs specifies a default set of MAC algorithms in preference order. diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/connection.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/connection.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/connection.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/connection.go 2016-10-13 14:32:00.000000000 +0000 @@ -23,7 +23,6 @@ // ConnMetadata holds metadata for the connection. type ConnMetadata interface { // User returns the user ID for this connection. - // It is empty if no authentication is used. User() string // SessionID returns the sesson hash, also denoted by H. diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/example_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/example_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/example_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/example_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -113,8 +113,7 @@ } func ExampleDial() { - // An SSH client is represented with a ClientConn. Currently only - // the "password" authentication method is supported. + // An SSH client is represented with a ClientConn. // // To authenticate with the remote server you must pass at least one // implementation of AuthMethod via the Auth field in ClientConfig. @@ -147,6 +146,39 @@ fmt.Println(b.String()) } +func ExamplePublicKeys() { + // A public key may be used to authenticate against the remote + // server by using an unencrypted PEM-encoded private key file. + // + // If you have an encrypted private key, the crypto/x509 package + // can be used to decrypt it. + key, err := ioutil.ReadFile("/home/user/.ssh/id_rsa") + if err != nil { + log.Fatalf("unable to read private key: %v", err) + } + + // Create the Signer for this private key. + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + log.Fatalf("unable to parse private key: %v", err) + } + + config := &ssh.ClientConfig{ + User: "user", + Auth: []ssh.AuthMethod{ + // Use the PublicKeys method for remote authentication. + ssh.PublicKeys(signer), + }, + } + + // Connect to the remote server and perform the SSH handshake. + client, err := ssh.Dial("tcp", "host.com:22", config) + if err != nil { + log.Fatalf("unable to connect: %v", err) + } + defer client.Close() +} + func ExampleClient_Listen() { config := &ssh.ClientConfig{ User: "username", diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/handshake.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/handshake.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/handshake.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/handshake.go 2016-10-13 14:32:00.000000000 +0000 @@ -29,25 +29,6 @@ // direction will be effected if a msgNewKeys message is sent // or received. prepareKeyChange(*algorithms, *kexResult) error - - // getSessionID returns the session ID. prepareKeyChange must - // have been called once. - getSessionID() []byte -} - -// rekeyingTransport is the interface of handshakeTransport that we -// (internally) expose to ClientConn and ServerConn. -type rekeyingTransport interface { - packetConn - - // requestKeyChange asks the remote side to change keys. All - // writes are blocked until the key change succeeds, which is - // signaled by reading a msgNewKeys. - requestKeyChange() error - - // getSessionID returns the session ID. This is only valid - // after the first key change has completed. - getSessionID() []byte } // handshakeTransport implements rekeying on top of a keyingTransport @@ -86,6 +67,9 @@ sentInitMsg *kexInitMsg writtenSinceKex uint64 writeError error + + // The session ID or nil if first kex did not complete yet. + sessionID []byte } func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { @@ -122,7 +106,7 @@ } func (t *handshakeTransport) getSessionID() []byte { - return t.conn.getSessionID() + return t.sessionID } func (t *handshakeTransport) id() string { @@ -177,15 +161,22 @@ t.readSinceKex += uint64(len(p)) if debugHandshake { - msg, err := decode(p) - log.Printf("%s got %T %v (%v)", t.id(), msg, msg, err) + if p[0] == msgChannelData || p[0] == msgChannelExtendedData { + log.Printf("%s got data (packet %d bytes)", t.id(), len(p)) + } else { + msg, err := decode(p) + log.Printf("%s got %T %v (%v)", t.id(), msg, msg, err) + } } if p[0] != msgKexInit { return p, nil } - err = t.enterKeyExchange(p) t.mu.Lock() + + firstKex := t.sessionID == nil + + err = t.enterKeyExchangeLocked(p) if err != nil { // drop connection t.conn.Close() @@ -193,7 +184,7 @@ } if debugHandshake { - log.Printf("%s exited key exchange, err %v", t.id(), err) + log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) } // Unblock writers. @@ -208,28 +199,69 @@ } t.readSinceKex = 0 - return []byte{msgNewKeys}, nil -} + + // By default, a key exchange is hidden from higher layers by + // translating it into msgIgnore. + successPacket := []byte{msgIgnore} + if firstKex { + // sendKexInit() for the first kex waits for + // msgNewKeys so the authentication process is + // guaranteed to happen over an encrypted transport. + successPacket = []byte{msgNewKeys} + } + + return successPacket, nil +} + +// keyChangeCategory describes whether a key exchange is the first on a +// connection, or a subsequent one. +type keyChangeCategory bool + +const ( + firstKeyExchange keyChangeCategory = true + subsequentKeyExchange keyChangeCategory = false +) // sendKexInit sends a key change message, and returns the message // that was sent. After initiating the key change, all writes will be // blocked until the change is done, and a failed key change will // close the underlying transport. This function is safe for // concurrent use by multiple goroutines. -func (t *handshakeTransport) sendKexInit() (*kexInitMsg, []byte, error) { +func (t *handshakeTransport) sendKexInit(isFirst keyChangeCategory) error { + var err error + t.mu.Lock() - defer t.mu.Unlock() - return t.sendKexInitLocked() + // If this is the initial key change, but we already have a sessionID, + // then do nothing because the key exchange has already completed + // asynchronously. + if !isFirst || t.sessionID == nil { + _, _, err = t.sendKexInitLocked(isFirst) + } + t.mu.Unlock() + if err != nil { + return err + } + if isFirst { + if packet, err := t.readPacket(); err != nil { + return err + } else if packet[0] != msgNewKeys { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + } + return nil +} + +func (t *handshakeTransport) requestInitialKeyChange() error { + return t.sendKexInit(firstKeyExchange) } func (t *handshakeTransport) requestKeyChange() error { - _, _, err := t.sendKexInit() - return err + return t.sendKexInit(subsequentKeyExchange) } // sendKexInitLocked sends a key change message. t.mu must be locked // while this happens. -func (t *handshakeTransport) sendKexInitLocked() (*kexInitMsg, []byte, error) { +func (t *handshakeTransport) sendKexInitLocked(isFirst keyChangeCategory) (*kexInitMsg, []byte, error) { // kexInits may be sent either in response to the other side, // or because our side wants to initiate a key change, so we // may have already sent a kexInit. In that case, don't send a @@ -237,6 +269,7 @@ if t.sentInitMsg != nil { return t.sentInitMsg, t.sentInitPacket, nil } + msg := &kexInitMsg{ KexAlgos: t.config.KeyExchanges, CiphersClientServer: t.config.Ciphers, @@ -276,7 +309,7 @@ defer t.mu.Unlock() if t.writtenSinceKex > t.config.RekeyThreshold { - t.sendKexInitLocked() + t.sendKexInitLocked(subsequentKeyExchange) } for t.sentInitMsg != nil && t.writeError == nil { t.cond.Wait() @@ -300,12 +333,12 @@ return t.conn.Close() } -// enterKeyExchange runs the key exchange. -func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { +// enterKeyExchange runs the key exchange. t.mu must be held while running this. +func (t *handshakeTransport) enterKeyExchangeLocked(otherInitPacket []byte) error { if debugHandshake { log.Printf("%s entered key exchange", t.id()) } - myInit, myInitPacket, err := t.sendKexInit() + myInit, myInitPacket, err := t.sendKexInitLocked(subsequentKeyExchange) if err != nil { return err } @@ -338,7 +371,16 @@ } // We don't send FirstKexFollows, but we handle receiving it. - if otherInit.FirstKexFollows && algs.kex != otherInit.KexAlgos[0] { + // + // RFC 4253 section 7 defines the kex and the agreement method for + // first_kex_packet_follows. It states that the guessed packet + // should be ignored if the "kex algorithm and/or the host + // key algorithm is guessed wrong (server and client have + // different preferred algorithm), or if any of the other + // algorithms cannot be agreed upon". The other algorithms have + // already been checked above so the kex algorithm and host key + // algorithm are checked here. + if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { // other side sent a kex message for the wrong algorithm, // which we have to ignore. if _, err := t.conn.readPacket(); err != nil { @@ -362,6 +404,11 @@ return err } + if t.sessionID == nil { + t.sessionID = result.H + } + result.SessionID = t.sessionID + t.conn.prepareKeyChange(algs, result) if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { return err @@ -371,6 +418,7 @@ } else if packet[0] != msgNewKeys { return unexpectedMessageError(msgNewKeys, packet[0]) } + return nil } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/handshake_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/handshake_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/handshake_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/handshake_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -10,6 +10,7 @@ "errors" "fmt" "net" + "reflect" "runtime" "strings" "sync" @@ -103,7 +104,7 @@ } if i == 5 { // halfway through, we request a key change. - _, _, err := trC.sendKexInit() + err := trC.sendKexInit(subsequentKeyExchange) if err != nil { t.Fatalf("sendKexInit: %v", err) } @@ -160,7 +161,7 @@ } // Now request a key change. - _, _, err = trC.sendKexInit() + err = trC.sendKexInit(subsequentKeyExchange) if err != nil { t.Errorf("sendKexInit: %v", err) } @@ -183,6 +184,28 @@ } } +func TestForceFirstKex(t *testing.T) { + checker := &testChecker{} + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr") + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + + defer trC.Close() + defer trS.Close() + + trC.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})) + + // We setup the initial key exchange, but the remote side + // tries to send serviceRequestMsg in cleartext, which is + // disallowed. + + err = trS.sendKexInit(firstKeyExchange) + if err == nil { + t.Errorf("server first kex init should reject unexpected packet") + } +} + func TestHandshakeTwice(t *testing.T) { checker := &testChecker{} trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr") @@ -193,18 +216,25 @@ defer trC.Close() defer trS.Close() + // Both sides should ask for the first key exchange first. + err = trS.sendKexInit(firstKeyExchange) + if err != nil { + t.Errorf("server sendKexInit: %v", err) + } + + err = trC.sendKexInit(firstKeyExchange) + if err != nil { + t.Errorf("client sendKexInit: %v", err) + } + + sent := 0 // send a packet packet := make([]byte, 5) packet[0] = msgRequestSuccess if err := trC.writePacket(packet); err != nil { t.Errorf("writePacket: %v", err) } - - // Now request a key change. - _, _, err = trC.sendKexInit() - if err != nil { - t.Errorf("sendKexInit: %v", err) - } + sent++ // Send another packet. Use a fresh one, since writePacket destroys. packet = make([]byte, 5) @@ -212,9 +242,10 @@ if err := trC.writePacket(packet); err != nil { t.Errorf("writePacket: %v", err) } + sent++ // 2nd key change. - _, _, err = trC.sendKexInit() + err = trC.sendKexInit(subsequentKeyExchange) if err != nil { t.Errorf("sendKexInit: %v", err) } @@ -224,17 +255,15 @@ if err := trC.writePacket(packet); err != nil { t.Errorf("writePacket: %v", err) } + sent++ packet = make([]byte, 5) packet[0] = msgRequestSuccess - for i := 0; i < 5; i++ { + for i := 0; i < sent; i++ { msg, err := trS.readPacket() if err != nil { t.Fatalf("server closed too soon: %v", err) } - if msg[0] == msgNewKeys { - continue - } if bytes.Compare(msg, packet) != 0 { t.Errorf("packet %d: got %q want %q", i, msg, packet) @@ -413,3 +442,45 @@ wg.Wait() } + +func TestDisconnect(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } + checker := &testChecker{} + trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr") + if err != nil { + t.Fatalf("handshakePair: %v", err) + } + + defer trC.Close() + defer trS.Close() + + trC.writePacket([]byte{msgRequestSuccess, 0, 0}) + errMsg := &disconnectMsg{ + Reason: 42, + Message: "such is life", + } + trC.writePacket(Marshal(errMsg)) + trC.writePacket([]byte{msgRequestSuccess, 0, 0}) + + packet, err := trS.readPacket() + if err != nil { + t.Fatalf("readPacket 1: %v", err) + } + if packet[0] != msgRequestSuccess { + t.Errorf("got packet %v, want packet type %d", packet, msgRequestSuccess) + } + + _, err = trS.readPacket() + if err == nil { + t.Errorf("readPacket 2 succeeded") + } else if !reflect.DeepEqual(err, errMsg) { + t.Errorf("got error %#v, want %#v", err, errMsg) + } + + _, err = trS.readPacket() + if err == nil { + t.Errorf("readPacket 3 succeeded") + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/kex.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/kex.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/kex.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/kex.go 2016-10-13 14:32:00.000000000 +0000 @@ -8,8 +8,8 @@ "crypto" "crypto/ecdsa" "crypto/elliptic" - "crypto/subtle" "crypto/rand" + "crypto/subtle" "errors" "io" "math/big" @@ -46,7 +46,7 @@ Hash crypto.Hash // The session ID, which is the first H computed. This is used - // to signal data inside transport. + // to derive key material inside the transport. SessionID []byte } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/keys.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/keys.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/keys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/keys.go 2016-10-13 14:32:00.000000000 +0000 @@ -19,6 +19,9 @@ "fmt" "io" "math/big" + "strings" + + "golang.org/x/crypto/ed25519" ) // These constants represent the algorithm names for key types supported by this @@ -29,6 +32,7 @@ KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" + KeyAlgoED25519 = "ssh-ed25519" ) // parsePubKey parses a public key of the given algorithm. @@ -41,14 +45,16 @@ return parseDSA(in) case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: return parseECDSA(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01: + case KeyAlgoED25519: + return parseED25519(in) + case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: cert, err := parseCert(in, certToPrivAlgo(algo)) if err != nil { return nil, nil, err } return cert, nil, nil } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", err) + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) } // parseAuthorizedKey parses a public key in OpenSSH authorized_keys format @@ -77,6 +83,79 @@ return out, comment, nil } +// ParseKnownHosts parses an entry in the format of the known_hosts file. +// +// The known_hosts format is documented in the sshd(8) manual page. This +// function will parse a single entry from in. On successful return, marker +// will contain the optional marker value (i.e. "cert-authority" or "revoked") +// or else be empty, hosts will contain the hosts that this entry matches, +// pubKey will contain the public key and comment will contain any trailing +// comment at the end of the line. See the sshd(8) manual page for the various +// forms that a host string can take. +// +// The unparsed remainder of the input will be returned in rest. This function +// can be called repeatedly to parse multiple entries. +// +// If no entries were found in the input then err will be io.EOF. Otherwise a +// non-nil err value indicates a parse error. +func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + // Strip out the beginning of the known_host key. + // This is either an optional marker or a (set of) hostname(s). + keyFields := bytes.Fields(in) + if len(keyFields) < 3 || len(keyFields) > 5 { + return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") + } + + // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated + // list of hosts + marker := "" + if keyFields[0][0] == '@' { + marker = string(keyFields[0][1:]) + keyFields = keyFields[1:] + } + + hosts := string(keyFields[0]) + // keyFields[1] contains the key type (e.g. “ssh-rsaâ€). + // However, that information is duplicated inside the + // base64-encoded key and so is ignored here. + + key := bytes.Join(keyFields[2:], []byte(" ")) + if pubKey, comment, err = parseAuthorizedKey(key); err != nil { + return "", nil, nil, "", nil, err + } + + return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil + } + + return "", nil, nil, "", nil, io.EOF +} + // ParseAuthorizedKeys parses a public key from an authorized_keys // file used in OpenSSH according to the sshd(8) manual page. func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { @@ -245,6 +324,8 @@ func (r *rsaPublicKey) Marshal() []byte { e := new(big.Int).SetInt64(int64(r.E)) + // RSA publickey struct layout should match the struct used by + // parseRSACert in the x/crypto/ssh/agent package. wirekey := struct { Name string E *big.Int @@ -267,28 +348,6 @@ return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob) } -type rsaPrivateKey struct { - *rsa.PrivateKey -} - -func (r *rsaPrivateKey) PublicKey() PublicKey { - return (*rsaPublicKey)(&r.PrivateKey.PublicKey) -} - -func (r *rsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - blob, err := rsa.SignPKCS1v15(rand, r.PrivateKey, crypto.SHA1, digest) - if err != nil { - return nil, err - } - return &Signature{ - Format: r.PublicKey().Type(), - Blob: blob, - }, nil -} - type dsaPublicKey dsa.PublicKey func (r *dsaPublicKey) Type() string { @@ -317,6 +376,8 @@ } func (k *dsaPublicKey) Marshal() []byte { + // DSA publickey struct layout should match the struct used by + // parseDSACert in the x/crypto/ssh/agent package. w := struct { Name string P, Q, G, Y *big.Int @@ -403,6 +464,51 @@ panic("ssh: unsupported ecdsa key size") } +type ed25519PublicKey ed25519.PublicKey + +func (key ed25519PublicKey) Type() string { + return KeyAlgoED25519 +} + +func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := ed25519.PublicKey(w.KeyBytes) + + return (ed25519PublicKey)(key), w.Rest, nil +} + +func (key ed25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + }{ + KeyAlgoED25519, + []byte(key), + } + return Marshal(&w) +} + +func (key ed25519PublicKey) Verify(b []byte, sig *Signature) error { + if sig.Format != key.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type()) + } + + edKey := (ed25519.PublicKey)(key) + if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + func supportedEllipticCurve(curve elliptic.Curve) bool { return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() } @@ -455,6 +561,8 @@ func (key *ecdsaPublicKey) Marshal() []byte { // See RFC 5656, section 3.1. keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y) + // ECDSA publickey struct layout should match the struct used by + // parseECDSACert in the x/crypto/ssh/agent package. w := struct { Name string ID string @@ -496,72 +604,120 @@ return errors.New("ssh: signature did not verify") } -type ecdsaPrivateKey struct { - *ecdsa.PrivateKey +// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, +// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding +// Signer instance. ECDSA keys must use P-256, P-384 or P-521. +func NewSignerFromKey(key interface{}) (Signer, error) { + switch key := key.(type) { + case crypto.Signer: + return NewSignerFromSigner(key) + case *dsa.PrivateKey: + return &dsaPrivateKey{key}, nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } } -func (k *ecdsaPrivateKey) PublicKey() PublicKey { - return (*ecdsaPublicKey)(&k.PrivateKey.PublicKey) +type wrappedSigner struct { + signer crypto.Signer + pubKey PublicKey } -func (k *ecdsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - h := ecHash(k.PrivateKey.PublicKey.Curve).New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := ecdsa.Sign(rand, k.PrivateKey, digest) +// NewSignerFromSigner takes any crypto.Signer implementation and +// returns a corresponding Signer interface. This can be used, for +// example, with keys kept in hardware modules. +func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { + pubKey, err := NewPublicKey(signer.Public()) if err != nil { return nil, err } - sig := make([]byte, intLength(r)+intLength(s)) - rest := marshalInt(sig, r) - marshalInt(rest, s) - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil + return &wrappedSigner{signer, pubKey}, nil } -// NewSignerFromKey takes a pointer to rsa, dsa or ecdsa PrivateKey -// returns a corresponding Signer instance. EC keys should use P256, -// P384 or P521. -func NewSignerFromKey(k interface{}) (Signer, error) { - var sshKey Signer - switch t := k.(type) { - case *rsa.PrivateKey: - sshKey = &rsaPrivateKey{t} - case *dsa.PrivateKey: - sshKey = &dsaPrivateKey{t} - case *ecdsa.PrivateKey: - if !supportedEllipticCurve(t.Curve) { - return nil, errors.New("ssh: only P256, P384 and P521 EC keys are supported.") - } +func (s *wrappedSigner) PublicKey() PublicKey { + return s.pubKey +} + +func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + var hashFunc crypto.Hash - sshKey = &ecdsaPrivateKey{t} + switch key := s.pubKey.(type) { + case *rsaPublicKey, *dsaPublicKey: + hashFunc = crypto.SHA1 + case *ecdsaPublicKey: + hashFunc = ecHash(key.Curve) + case ed25519PublicKey: default: - return nil, fmt.Errorf("ssh: unsupported key type %T", k) + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } + + var digest []byte + if hashFunc != 0 { + h := hashFunc.New() + h.Write(data) + digest = h.Sum(nil) + } else { + digest = data + } + + signature, err := s.signer.Sign(rand, digest, hashFunc) + if err != nil { + return nil, err } - return sshKey, nil + + // crypto.Signer.Sign is expected to return an ASN.1-encoded signature + // for ECDSA and DSA, but that's not the encoding expected by SSH, so + // re-encode. + switch s.pubKey.(type) { + case *ecdsaPublicKey, *dsaPublicKey: + type asn1Signature struct { + R, S *big.Int + } + asn1Sig := new(asn1Signature) + _, err := asn1.Unmarshal(signature, asn1Sig) + if err != nil { + return nil, err + } + + switch s.pubKey.(type) { + case *ecdsaPublicKey: + signature = Marshal(asn1Sig) + + case *dsaPublicKey: + signature = make([]byte, 40) + r := asn1Sig.R.Bytes() + s := asn1Sig.S.Bytes() + copy(signature[20-len(r):20], r) + copy(signature[40-len(s):40], s) + } + } + + return &Signature{ + Format: s.pubKey.Type(), + Blob: signature, + }, nil } -// NewPublicKey takes a pointer to rsa, dsa or ecdsa PublicKey -// and returns a corresponding ssh PublicKey instance. EC keys should use P256, P384 or P521. -func NewPublicKey(k interface{}) (PublicKey, error) { - var sshKey PublicKey - switch t := k.(type) { +// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, +// ed25519.PublicKey, or any other crypto.Signer and returns a corresponding +// Signer instance. ECDSA keys must use P-256, P-384 or P-521. +func NewPublicKey(key interface{}) (PublicKey, error) { + switch key := key.(type) { case *rsa.PublicKey: - sshKey = (*rsaPublicKey)(t) + return (*rsaPublicKey)(key), nil case *ecdsa.PublicKey: - if !supportedEllipticCurve(t.Curve) { - return nil, errors.New("ssh: only P256, P384 and P521 EC keys are supported.") + if !supportedEllipticCurve(key.Curve) { + return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.") } - sshKey = (*ecdsaPublicKey)(t) + return (*ecdsaPublicKey)(key), nil case *dsa.PublicKey: - sshKey = (*dsaPublicKey)(t) + return (*dsaPublicKey)(key), nil + case ed25519.PublicKey: + return (ed25519PublicKey)(key), nil default: - return nil, fmt.Errorf("ssh: unsupported key type %T", k) + return nil, fmt.Errorf("ssh: unsupported key type %T", key) } - return sshKey, nil } // ParsePrivateKey returns a Signer from a PEM encoded private key. It supports @@ -590,6 +746,8 @@ return x509.ParseECPrivateKey(block.Bytes) case "DSA PRIVATE KEY": return ParseDSAPrivateKey(block.Bytes) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(block.Bytes) default: return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) } @@ -626,3 +784,63 @@ X: k.Pub, }, nil } + +// Implemented based on the documentation at +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key +func parseOpenSSHPrivateKey(key []byte) (*ed25519.PrivateKey, error) { + magic := append([]byte("openssh-key-v1"), 0) + if !bytes.Equal(magic, key[0:len(magic)]) { + return nil, errors.New("ssh: invalid openssh private key format") + } + remaining := key[len(magic):] + + var w struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte + } + + if err := Unmarshal(remaining, &w); err != nil { + return nil, err + } + + pk1 := struct { + Check1 uint32 + Check2 uint32 + Keytype string + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { + return nil, err + } + + if pk1.Check1 != pk1.Check2 { + return nil, errors.New("ssh: checkint mismatch") + } + + // we only handle ed25519 keys currently + if pk1.Keytype != KeyAlgoED25519 { + return nil, errors.New("ssh: unhandled key type") + } + + for i, b := range pk1.Pad { + if int(b) != i+1 { + return nil, errors.New("ssh: padding not as expected") + } + } + + if len(pk1.Priv) != ed25519.PrivateKeySize { + return nil, errors.New("ssh: private key unexpected length") + } + + pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) + copy(pk, pk1.Priv) + return &pk, nil +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/keys_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/keys_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/keys_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/keys_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -17,6 +17,7 @@ "strings" "testing" + "golang.org/x/crypto/ed25519" "golang.org/x/crypto/ssh/testdata" ) @@ -28,6 +29,8 @@ return (*dsa.PublicKey)(k) case *ecdsaPublicKey: return (*ecdsa.PublicKey)(k) + case ed25519PublicKey: + return (ed25519.PublicKey)(k) case *Certificate: return k } @@ -57,12 +60,12 @@ t.Fatalf("GenerateKey: %v", err) } - if _, err = NewSignerFromKey(raw); err == nil || !strings.Contains(err.Error(), "only P256") { - t.Fatalf("NewPrivateKey should not succeed with P224, got: %v", err) + if _, err = NewSignerFromKey(raw); err == nil || !strings.Contains(err.Error(), "only P-256") { + t.Fatalf("NewPrivateKey should not succeed with P-224, got: %v", err) } - if _, err = NewPublicKey(&raw.PublicKey); err == nil || !strings.Contains(err.Error(), "only P256") { - t.Fatalf("NewPublicKey should not succeed with P224, got: %v", err) + if _, err = NewPublicKey(&raw.PublicKey); err == nil || !strings.Contains(err.Error(), "only P-256") { + t.Fatalf("NewPublicKey should not succeed with P-224, got: %v", err) } } @@ -304,3 +307,134 @@ t.Errorf("got valid entry for %q", authInvalid) } } + +var knownHostsParseTests = []struct { + input string + err string + + marker string + comment string + hosts []string + rest string +} { + { + "", + "EOF", + + "", "", nil, "", + }, + { + "# Just a comment", + "EOF", + + "", "", nil, "", + }, + { + " \t ", + "EOF", + + "", "", nil, "", + }, + { + "localhost ssh-rsa {RSAPUB}", + "", + + "", "", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}", + "", + + "", "", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\n", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\n", + "", + + "", "comment comment", []string{"localhost"}, "", + }, + { + "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\nnext line", + "", + + "", "comment comment", []string{"localhost"}, "next line", + }, + { + "localhost,[host2:123]\tssh-rsa {RSAPUB}\tcomment comment", + "", + + "", "comment comment", []string{"localhost","[host2:123]"}, "", + }, + { + "@marker \tlocalhost,[host2:123]\tssh-rsa {RSAPUB}", + "", + + "marker", "", []string{"localhost","[host2:123]"}, "", + }, + { + "@marker \tlocalhost,[host2:123]\tssh-rsa aabbccdd", + "short read", + + "", "", nil, "", + }, +} + +func TestKnownHostsParsing(t *testing.T) { + rsaPub, rsaPubSerialized := getTestKey() + + for i, test := range knownHostsParseTests { + var expectedKey PublicKey + const rsaKeyToken = "{RSAPUB}" + + input := test.input + if strings.Contains(input, rsaKeyToken) { + expectedKey = rsaPub + input = strings.Replace(test.input, rsaKeyToken, rsaPubSerialized, -1) + } + + marker, hosts, pubKey, comment, rest, err := ParseKnownHosts([]byte(input)) + if err != nil { + if len(test.err) == 0 { + t.Errorf("#%d: unexpectedly failed with %q", i, err) + } else if !strings.Contains(err.Error(), test.err) { + t.Errorf("#%d: expected error containing %q, but got %q", i, test.err, err) + } + continue + } else if len(test.err) != 0 { + t.Errorf("#%d: succeeded but expected error including %q", i, test.err) + continue + } + + if !reflect.DeepEqual(expectedKey, pubKey) { + t.Errorf("#%d: expected key %#v, but got %#v", i, expectedKey, pubKey) + } + + if marker != test.marker { + t.Errorf("#%d: expected marker %q, but got %q", i, test.marker, marker) + } + + if comment != test.comment { + t.Errorf("#%d: expected comment %q, but got %q", i, test.comment, comment) + } + + if !reflect.DeepEqual(test.hosts, hosts) { + t.Errorf("#%d: expected hosts %#v, but got %#v", i, test.hosts, hosts) + } + + if rest := string(rest); rest != test.rest { + t.Errorf("#%d: expected remaining input to be %q, but got %q", i, test.rest, rest) + } + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/messages.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/messages.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/messages.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/messages.go 2016-10-13 14:32:00.000000000 +0000 @@ -13,6 +13,7 @@ "math/big" "reflect" "strconv" + "strings" ) // These are SSH message type numbers. They are scattered around several @@ -47,7 +48,7 @@ } func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect reason %d: %s", d.Reason, d.Message) + return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) } // See RFC 4253, section 7.1. @@ -124,6 +125,10 @@ Payload []byte `ssh:"rest"` } +// Used for debug printouts of packets. +type userAuthSuccessMsg struct { +} + // See RFC 4252, section 5.1 const msgUserAuthFailure = 51 @@ -158,6 +163,13 @@ const msgChannelExtendedData = 95 const msgChannelData = 94 +// Used for debug print outs of packets. +type channelDataMsg struct { + PeersId uint32 `sshtype:"94"` + Length uint32 + Rest []byte `ssh:"rest"` +} + // See RFC 4254, section 5.1. const msgChannelOpenConfirm = 91 @@ -255,17 +267,19 @@ PubKey []byte } -// typeTag returns the type byte for the given type. The type should -// be struct. -func typeTag(structType reflect.Type) byte { - var tag byte - var tagStr string - tagStr = structType.Field(0).Tag.Get("sshtype") - i, err := strconv.Atoi(tagStr) - if err == nil { - tag = byte(i) +// typeTags returns the possible type bytes for the given reflect.Type, which +// should be a struct. The possible values are separated by a '|' character. +func typeTags(structType reflect.Type) (tags []byte) { + tagStr := structType.Field(0).Tag.Get("sshtype") + + for _, tag := range strings.Split(tagStr, "|") { + i, err := strconv.Atoi(tag) + if err == nil { + tags = append(tags, byte(i)) + } } - return tag + + return tags } func fieldError(t reflect.Type, field int, problem string) error { @@ -279,19 +293,34 @@ // Unmarshal parses data in SSH wire format into a structure. The out // argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a number in decimal, the packet -// must start that number. In case of error, Unmarshal returns a -// ParseError or UnexpectedMessageError. +// struct has the "sshtype" tag set to a '|'-separated set of numbers +// in decimal, the packet must start with one of those numbers. In +// case of error, Unmarshal returns a ParseError or +// UnexpectedMessageError. func Unmarshal(data []byte, out interface{}) error { v := reflect.ValueOf(out).Elem() structType := v.Type() - expectedType := typeTag(structType) + expectedTypes := typeTags(structType) + + var expectedType byte + if len(expectedTypes) > 0 { + expectedType = expectedTypes[0] + } + if len(data) == 0 { return parseError(expectedType) } - if expectedType > 0 { - if data[0] != expectedType { - return unexpectedMessageError(expectedType, data[0]) + + if len(expectedTypes) > 0 { + goodType := false + for _, e := range expectedTypes { + if e > 0 && data[0] == e { + goodType = true + break + } + } + if !goodType { + return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) } data = data[1:] } @@ -375,7 +404,7 @@ return fieldError(structType, i, "pointer to unsupported type") } default: - return fieldError(structType, i, "unsupported type") + return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) } } @@ -398,9 +427,9 @@ func marshalStruct(out []byte, msg interface{}) []byte { v := reflect.Indirect(reflect.ValueOf(msg)) - msgType := typeTag(v.Type()) - if msgType > 0 { - out = append(out, msgType) + msgTypes := typeTags(v.Type()) + if len(msgTypes) > 0 { + out = append(out, msgTypes[0]) } for i, n := 0, v.NumField(); i < n; i++ { @@ -687,6 +716,8 @@ msg = new(kexDHReplyMsg) case msgUserAuthRequest: msg = new(userAuthRequestMsg) + case msgUserAuthSuccess: + return new(userAuthSuccessMsg), nil case msgUserAuthFailure: msg = new(userAuthFailureMsg) case msgUserAuthPubKeyOk: @@ -699,6 +730,8 @@ msg = new(globalRequestFailureMsg) case msgChannelOpen: msg = new(channelOpenMsg) + case msgChannelData: + msg = new(channelDataMsg) case msgChannelOpenConfirm: msg = new(channelOpenConfirmMsg) case msgChannelOpenFailure: diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/messages_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/messages_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/messages_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/messages_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -172,6 +172,40 @@ } } +func TestMarshalMultiTag(t *testing.T) { + var res struct { + A uint32 `sshtype:"1|2"` + } + + good1 := struct { + A uint32 `sshtype:"1"` + }{ + 1, + } + good2 := struct { + A uint32 `sshtype:"2"` + }{ + 1, + } + + if e := Unmarshal(Marshal(good1), &res); e != nil { + t.Errorf("error unmarshaling multipart tag: %v", e) + } + + if e := Unmarshal(Marshal(good2), &res); e != nil { + t.Errorf("error unmarshaling multipart tag: %v", e) + } + + bad1 := struct { + A uint32 `sshtype:"3"` + }{ + 1, + } + if e := Unmarshal(Marshal(bad1), &res); e == nil { + t.Errorf("bad struct unmarshaled without error") + } +} + func randomBytes(out []byte, rand *rand.Rand) { for i := 0; i < len(out); i++ { out[i] = byte(rand.Int31()) diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/mux.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/mux.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/mux.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/mux.go 2016-10-13 14:32:00.000000000 +0000 @@ -131,6 +131,9 @@ func (m *mux) sendMessage(msg interface{}) error { p := Marshal(msg) + if debugMux { + log.Printf("send global(%d): %#v", m.chanList.offset, msg) + } return m.conn.writePacket(p) } @@ -175,18 +178,6 @@ return m.sendMessage(globalRequestFailureMsg{Data: data}) } -// TODO(hanwen): Disconnect is a transport layer message. We should -// probably send and receive Disconnect somewhere in the transport -// code. - -// Disconnect sends a disconnect message. -func (m *mux) Disconnect(reason uint32, message string) error { - return m.sendMessage(disconnectMsg{ - Reason: reason, - Message: message, - }) -} - func (m *mux) Close() error { return m.conn.Close() } @@ -236,11 +227,6 @@ } switch packet[0] { - case msgNewKeys: - // Ignore notification of key change. - return nil - case msgDisconnect: - return m.handleDisconnect(packet) case msgChannelOpen: return m.handleChannelOpen(packet) case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: @@ -260,18 +246,6 @@ return ch.handlePacket(packet) } -func (m *mux) handleDisconnect(packet []byte) error { - var d disconnectMsg - if err := Unmarshal(packet, &d); err != nil { - return err - } - - if debugMux { - log.Printf("caught disconnect: %v", d) - } - return &d -} - func (m *mux) handleGlobalPacket(packet []byte) error { msg, err := decode(packet) if err != nil { diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/mux_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/mux_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/mux_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/mux_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -331,7 +331,6 @@ ok, data, err) } - clientMux.Disconnect(0, "") if !seen { t.Errorf("never saw 'peek' request") } @@ -378,28 +377,6 @@ } } -func TestMuxDisconnect(t *testing.T) { - a, b := muxPair() - defer a.Close() - defer b.Close() - - go func() { - for r := range b.incomingRequests { - r.Reply(true, nil) - } - }() - - a.Disconnect(42, "whatever") - ok, _, err := a.SendRequest("hello", true, nil) - if ok || err == nil { - t.Errorf("got reply after disconnecting") - } - err = b.Wait() - if d, ok := err.(*disconnectMsg); !ok || d.Reason != 42 { - t.Errorf("got %#v, want disconnectMsg{Reason:42}", err) - } -} - func TestMuxCloseChannel(t *testing.T) { r, w, mux := channelPair(t) defer mux.Close() diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/server.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/server.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/server.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/server.go 2016-10-13 14:32:00.000000000 +0000 @@ -66,9 +66,11 @@ // attempts. AuthLogCallback func(conn ConnMetadata, method string, err error) - // ServerVersion is the version identification string to - // announce in the public handshake. + // ServerVersion is the version identification string to announce in + // the public handshake. // If empty, a reasonable default is used. + // Note that RFC 4253 section 4.2 requires that this string start with + // "SSH-2.0-". ServerVersion string } @@ -186,16 +188,10 @@ tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - if err := s.transport.requestKeyChange(); err != nil { + if err := s.transport.requestInitialKeyChange(); err != nil { return nil, err } - if packet, err := s.transport.readPacket(); err != nil { - return nil, err - } else if packet[0] != msgNewKeys { - return nil, unexpectedMessageError(msgNewKeys, packet[0]) - } - // We just did the key change, so the session ID is established. s.sessionID = s.transport.getSessionID() @@ -228,7 +224,7 @@ func isAcceptableAlgo(algo string) bool { switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01: return true } @@ -288,7 +284,6 @@ switch userAuthReq.Method { case "none": if config.NoClientAuth { - s.user = "" authErr = nil } case "password": diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/session.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/session.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/session.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/session.go 2016-10-13 14:32:00.000000000 +0000 @@ -9,6 +9,7 @@ import ( "bytes" + "encoding/binary" "errors" "fmt" "io" @@ -281,9 +282,10 @@ // copying stdin, stdout, and stderr, and exits with a zero exit // status. // -// If the command fails to run or doesn't complete successfully, the -// error is of type *ExitError. Other error types may be -// returned for I/O problems. +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. func (s *Session) Run(cmd string) error { err := s.Start(cmd) if err != nil { @@ -339,7 +341,7 @@ ok, err := s.ch.SendRequest("shell", true, nil) if err == nil && !ok { - return fmt.Errorf("ssh: cound not start shell") + return errors.New("ssh: could not start shell") } if err != nil { return err @@ -370,9 +372,10 @@ // copying stdin, stdout, and stderr, and exits with a zero exit // status. // -// If the command fails to run or doesn't complete successfully, the -// error is of type *ExitError. Other error types may be -// returned for I/O problems. +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. func (s *Session) Wait() error { if !s.started { return errors.New("ssh: session not started") @@ -400,8 +403,7 @@ for msg := range reqs { switch msg.Type { case "exit-status": - d := msg.Payload - wm.status = int(d[0])<<24 | int(d[1])<<16 | int(d[2])<<8 | int(d[3]) + wm.status = int(binary.BigEndian.Uint32(msg.Payload)) case "exit-signal": var sigval struct { Signal string @@ -431,16 +433,29 @@ if wm.status == -1 { // exit-status was never sent from server if wm.signal == "" { - return errors.New("wait: remote command exited without exit status or exit signal") + // signal was not sent either. RFC 4254 + // section 6.10 recommends against this + // behavior, but it is allowed, so we let + // clients handle it. + return &ExitMissingError{} } wm.status = 128 if _, ok := signals[Signal(wm.signal)]; ok { wm.status += signals[Signal(wm.signal)] } } + return &ExitError{wm} } +// ExitMissingError is returned if a session is torn down cleanly, but +// the server sends no confirmation of the exit status. +type ExitMissingError struct{} + +func (e *ExitMissingError) Error() string { + return "wait: remote command exited without exit status or exit signal" +} + func (s *Session) stdin() { if s.stdinpipe { return @@ -601,5 +616,12 @@ } func (w Waitmsg) String() string { - return fmt.Sprintf("Process exited with: %v. Reason was: %v (%v)", w.status, w.msg, w.signal) + str := fmt.Sprintf("Process exited with status %v", w.status) + if w.signal != "" { + str += fmt.Sprintf(" from signal %v", w.signal) + } + if w.msg != "" { + str += fmt.Sprintf(". Reason was: %v", w.msg) + } + return str } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/session_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/session_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/session_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/session_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -297,7 +297,6 @@ } } -// Test WaitMsg is not returned if the channel closes abruptly. func TestExitWithoutStatusOrSignal(t *testing.T) { conn := dial(exitWithoutSignalOrStatus, t) defer conn.Close() @@ -313,11 +312,8 @@ if err == nil { t.Fatalf("expected command to fail but it didn't") } - _, ok := err.(*ExitError) - if ok { - // you can't actually test for errors.errorString - // because it's not exported. - t.Fatalf("expected *errorString but got %T", err) + if _, ok := err.(*ExitMissingError); !ok { + t.Fatalf("got %T want *ExitMissingError", err) } } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/terminal/terminal_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/terminal/terminal_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/terminal/terminal_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/terminal/terminal_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -6,6 +6,7 @@ import ( "io" + "os" "testing" ) @@ -267,3 +268,24 @@ } } } + +func TestMakeRawState(t *testing.T) { + fd := int(os.Stdout.Fd()) + if !IsTerminal(fd) { + t.Skip("stdout is not a terminal; skipping test") + } + + st, err := GetState(fd) + if err != nil { + t.Fatalf("failed to get terminal state from GetState: %s", err) + } + defer Restore(fd, st) + raw, err := MakeRaw(fd) + if err != nil { + t.Fatalf("failed to get terminal state from MakeRaw: %s", err) + } + + if *st != *raw { + t.Errorf("states do not match; was %v, expected %v", raw, st) + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/terminal/util.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/terminal/util.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/terminal/util.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/terminal/util.go 2016-10-13 14:32:00.000000000 +0000 @@ -44,8 +44,13 @@ } newState := oldState.termios - newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF - newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN + newState.Cflag &^= syscall.CSIZE | syscall.PARENB + newState.Cflag |= syscall.CS8 if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { return nil, err } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/terminal/util_plan9.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/terminal/util_plan9.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/terminal/util_plan9.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/terminal/util_plan9.go 2016-10-13 14:32:00.000000000 +0000 @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "fmt" + "runtime" +) + +type State struct{} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + return false +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/terminal/util_windows.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/terminal/util_windows.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/terminal/util_windows.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/terminal/util_windows.go 2016-10-13 14:32:00.000000000 +0000 @@ -87,8 +87,8 @@ if e != 0 { return nil, error(e) } - st &^= (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) - _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) if e != 0 { return nil, error(e) } diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/test/session_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/test/session_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/test/session_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/test/session_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -280,16 +280,16 @@ var config ssh.Config config.SetDefaults() cipherOrder := config.Ciphers - // This cipher will not be tested when commented out in cipher.go it will + // These ciphers will not be tested when commented out in cipher.go it will // fallback to the next available as per line 292. - cipherOrder = append(cipherOrder, "aes128-cbc") + cipherOrder = append(cipherOrder, "aes128-cbc", "3des-cbc") for _, ciph := range cipherOrder { server := newServer(t) defer server.Shutdown() conf := clientConfig() conf.Ciphers = []string{ciph} - // Don't fail if sshd doesnt have the cipher. + // Don't fail if sshd doesn't have the cipher. conf.Ciphers = append(conf.Ciphers, cipherOrder...) conn, err := server.TryDial(conf) if err == nil { @@ -310,7 +310,7 @@ defer server.Shutdown() conf := clientConfig() conf.MACs = []string{mac} - // Don't fail if sshd doesnt have the MAC. + // Don't fail if sshd doesn't have the MAC. conf.MACs = append(conf.MACs, macOrder...) if conn, err := server.TryDial(conf); err == nil { conn.Close() @@ -328,7 +328,7 @@ server := newServer(t) defer server.Shutdown() conf := clientConfig() - // Don't fail if sshd doesnt have the kex. + // Don't fail if sshd doesn't have the kex. conf.KeyExchanges = append([]string{kex}, kexOrder...) conn, err := server.TryDial(conf) if err == nil { @@ -338,3 +338,28 @@ } } } + +func TestClientAuthAlgorithms(t *testing.T) { + for _, key := range []string{ + "rsa", + "dsa", + "ecdsa", + "ed25519", + } { + server := newServer(t) + conf := clientConfig() + conf.SetDefaults() + conf.Auth = []ssh.AuthMethod{ + ssh.PublicKeys(testSigners[key]), + } + + conn, err := server.TryDial(conf) + if err == nil { + conn.Close() + } else { + t.Errorf("failed for key %q", key) + } + + server.Shutdown() + } +} diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/test/testdata_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/test/testdata_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/test/testdata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/test/testdata_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places: +// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places: // ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three // instances. diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/test/test_unix_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/test/test_unix_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/test/test_unix_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/test/test_unix_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -41,11 +41,12 @@ StrictModes no RSAAuthentication yes PubkeyAuthentication yes -AuthorizedKeysFile {{.Dir}}/id_user.pub +AuthorizedKeysFile {{.Dir}}/authorized_keys TrustedUserCAKeys {{.Dir}}/id_ecdsa.pub IgnoreRhosts yes RhostsRSAAuthentication no HostbasedAuthentication no +PubkeyAcceptedKeyTypes=* ` var configTmpl = template.Must(template.New("").Parse(sshd_config)) @@ -249,6 +250,12 @@ writeFile(filepath.Join(dir, filename+".pub"), ssh.MarshalAuthorizedKey(testPublicKeys[k])) } + var authkeys bytes.Buffer + for k, _ := range testdata.PEMBytes { + authkeys.Write(ssh.MarshalAuthorizedKey(testPublicKeys[k])) + } + writeFile(filepath.Join(dir, "authorized_keys"), authkeys.Bytes()) + return &server{ t: t, configfile: f.Name(), diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/testdata/keys.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/testdata/keys.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/testdata/keys.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/testdata/keys.go 2016-10-13 14:32:00.000000000 +0000 @@ -25,15 +25,29 @@ -----END EC PRIVATE KEY----- `), "rsa": []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIBOwIBAAJBALdGZxkXDAjsYk10ihwU6Id2KeILz1TAJuoq4tOgDWxEEGeTrcld -r/ZwVaFzjWzxaf6zQIJbfaSEAhqD5yo72+sCAwEAAQJBAK8PEVU23Wj8mV0QjwcJ -tZ4GcTUYQL7cF4+ezTCE9a1NrGnCP2RuQkHEKxuTVrxXt+6OF15/1/fuXnxKjmJC -nxkCIQDaXvPPBi0c7vAxGwNY9726x01/dNbHCE0CBtcotobxpwIhANbbQbh3JHVW -2haQh4fAG5mhesZKAGcxTyv4mQ7uMSQdAiAj+4dzMpJWdSzQ+qGHlHMIBvVHLkqB -y2VdEyF7DPCZewIhAI7GOI/6LDIFOvtPo6Bj2nNmyQ1HU6k/LRtNIXi4c9NJAiAr -rrxx26itVhJmcvoUhOjwuzSlP2bE5VHAvkGB352YBg== +MIICXAIBAAKBgQC8A6FGHDiWCSREAXCq6yBfNVr0xCVG2CzvktFNRpue+RXrGs/2 +a6ySEJQb3IYquw7HlJgu6fg3WIWhOmHCjfpG0PrL4CRwbqQ2LaPPXhJErWYejcD8 +Di00cF3677+G10KMZk9RXbmHtuBFZT98wxg8j+ZsBMqGM1+7yrWUvynswQIDAQAB +AoGAJMCk5vqfSRzyXOTXLGIYCuR4Kj6pdsbNSeuuRGfYBeR1F2c/XdFAg7D/8s5R +38p/Ih52/Ty5S8BfJtwtvgVY9ecf/JlU/rl/QzhG8/8KC0NG7KsyXklbQ7gJT8UT +Ojmw5QpMk+rKv17ipDVkQQmPaj+gJXYNAHqImke5mm/K/h0CQQDciPmviQ+DOhOq +2ZBqUfH8oXHgFmp7/6pXw80DpMIxgV3CwkxxIVx6a8lVH9bT/AFySJ6vXq4zTuV9 +6QmZcZzDAkEA2j/UXJPIs1fQ8z/6sONOkU/BjtoePFIWJlRxdN35cZjXnBraX5UR +fFHkePv4YwqmXNqrBOvSu+w2WdSDci+IKwJAcsPRc/jWmsrJW1q3Ha0hSf/WG/Bu +X7MPuXaKpP/DkzGoUmb8ks7yqj6XWnYkPNLjCc8izU5vRwIiyWBRf4mxMwJBAILa +NDvRS0rjwt6lJGv7zPZoqDc65VfrK2aNyHx2PgFyzwrEOtuF57bu7pnvEIxpLTeM +z26i6XVMeYXAWZMTloMCQBbpGgEERQpeUknLBqUHhg/wXF6+lFA+vEGnkY+Dwab2 +KCXFGd+SQ5GdUcEMe9isUH6DYj/6/yCDoFrXXmpQb+M= -----END RSA PRIVATE KEY----- `), + "ed25519": []byte(`-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACA+3f7hS7g5UWwXOGVTrMfhmxyrjqz7Sxxbx7I1j8DvvwAAAJhAFfkOQBX5 +DgAAAAtzc2gtZWQyNTUxOQAAACA+3f7hS7g5UWwXOGVTrMfhmxyrjqz7Sxxbx7I1j8Dvvw +AAAEAaYmXltfW6nhRo3iWGglRB48lYq0z0Q3I3KyrdutEr6j7d/uFLuDlRbBc4ZVOsx+Gb +HKuOrPtLHFvHsjWPwO+/AAAAE2dhcnRvbm1AZ2FydG9ubS14cHMBAg== +-----END OPENSSH PRIVATE KEY----- +`), "user": []byte(`-----BEGIN EC PRIVATE KEY----- MHcCAQEEILYCAeq8f7V4vSSypRw7pxy8yz3V5W4qg8kSC3zJhqpQoAoGCCqGSM49 AwEHoUQDQgAEYcO2xNKiRUYOLEHM7VYAp57HNyKbOdYtHD83Z4hzNPVC4tM5mdGD diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/testdata_test.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/testdata_test.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/testdata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/testdata_test.go 2016-10-13 14:32:00.000000000 +0000 @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places: +// IMPLEMENTATION NOTE: To avoid a package loop, this file is in three places: // ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three // instances. diff -Nru juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/transport.go juju-core-2.0.0/src/golang.org/x/crypto/ssh/transport.go --- juju-core-2.0~beta15/src/golang.org/x/crypto/ssh/transport.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/crypto/ssh/transport.go 2016-10-13 14:32:00.000000000 +0000 @@ -11,8 +11,9 @@ ) const ( - gcmCipherID = "aes128-gcm@openssh.com" - aes128cbcID = "aes128-cbc" + gcmCipherID = "aes128-gcm@openssh.com" + aes128cbcID = "aes128-cbc" + tripledescbcID = "3des-cbc" ) // packetConn represents a transport that implements packet based @@ -39,19 +40,6 @@ rand io.Reader io.Closer - - // Initial H used for the session ID. Once assigned this does - // not change, even during subsequent key exchanges. - sessionID []byte -} - -// getSessionID returns the ID of the SSH connection. The return value -// should not be modified. -func (t *transport) getSessionID() []byte { - if t.sessionID == nil { - panic("session ID not set yet") - } - return t.sessionID } // packetCipher represents a combination of SSH encryption/MAC @@ -81,12 +69,6 @@ // both directions are triggered by reading and writing a msgNewKey packet // respectively. func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - if t.sessionID == nil { - t.sessionID = kexResult.H - } - - kexResult.SessionID = t.sessionID - if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil { return err } else { @@ -114,12 +96,27 @@ err = errors.New("ssh: zero length packet") } - if len(packet) > 0 && packet[0] == msgNewKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message.") + if len(packet) > 0 { + switch packet[0] { + case msgNewKeys: + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + return nil, errors.New("ssh: got bogus newkeys message.") + } + + case msgDisconnect: + // Transform a disconnect message into an + // error. Since this is lowest level at which + // we interpret message types, doing it here + // ensures that we don't have to handle it + // elsewhere. + var msg disconnectMsg + if err := Unmarshal(packet, &msg); err != nil { + return nil, err + } + return nil, &msg } } @@ -223,6 +220,10 @@ return newAESCBCCipher(iv, key, macKey, algs) } + if algs.Cipher == tripledescbcID { + return newTripleDESCBCCipher(iv, key, macKey, algs) + } + c := &streamPacketCipher{ mac: macModes[algs.MAC].new(macKey), } diff -Nru juju-core-2.0~beta15/src/golang.org/x/net/.gitignore juju-core-2.0.0/src/golang.org/x/net/.gitignore --- juju-core-2.0~beta15/src/golang.org/x/net/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/golang.org/x/net/.gitignore 2016-10-13 14:32:27.000000000 +0000 @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff -Nru juju-core-2.0~beta15/src/google.golang.org/api/.hgignore juju-core-2.0.0/src/google.golang.org/api/.hgignore --- juju-core-2.0~beta15/src/google.golang.org/api/.hgignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/google.golang.org/api/.hgignore 2016-10-13 14:32:21.000000000 +0000 @@ -0,0 +1,11 @@ +_obj +_testmain.go +clientid.dat +clientsecret.dat +google-api-go-generator/google-api-go-gen + +syntax:glob +*.6 +*.8 +*~ +*.out diff -Nru juju-core-2.0~beta15/src/google.golang.org/api/.hgtags juju-core-2.0.0/src/google.golang.org/api/.hgtags --- juju-core-2.0~beta15/src/google.golang.org/api/.hgtags 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/google.golang.org/api/.hgtags 2016-10-13 14:32:21.000000000 +0000 @@ -0,0 +1 @@ +b571b553f8c057cb6952ce817dfb09b6e34a8c0b release diff -Nru juju-core-2.0~beta15/src/gopkg.in/amz.v3/ec2/ec2test/vpcs.go juju-core-2.0.0/src/gopkg.in/amz.v3/ec2/ec2test/vpcs.go --- juju-core-2.0~beta15/src/gopkg.in/amz.v3/ec2/ec2test/vpcs.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/amz.v3/ec2/ec2test/vpcs.go 2016-10-13 14:32:26.000000000 +0000 @@ -198,7 +198,9 @@ return v.State == value, nil case "vpc-id": return v.Id == value, nil - case "tag", "tag-key", "tag-value", "dhcp-options-id", "isDefault": + case "isDefault": + return v.IsDefault == (value == "true"), nil + case "tag", "tag-key", "tag-value", "dhcp-options-id": return false, fmt.Errorf("%q filter is not implemented", attr) } return false, fmt.Errorf("unknown attribute %q", attr) diff -Nru juju-core-2.0~beta15/src/gopkg.in/amz.v3/ec2/ec2test/vpcs_test.go juju-core-2.0.0/src/gopkg.in/amz.v3/ec2/ec2test/vpcs_test.go --- juju-core-2.0~beta15/src/gopkg.in/amz.v3/ec2/ec2test/vpcs_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/amz.v3/ec2/ec2test/vpcs_test.go 2016-10-13 14:32:26.000000000 +0000 @@ -227,6 +227,31 @@ s.assertOnlyZonesRemain(c) } +func (s *S) TestVPCsIsDefaultFilter(c *C) { + s.resetAllButZones(c) + s.srv.AddVPC(ec2.VPC{ + State: "insane", + CIDRBlock: "0.1.2.0/24", + IsDefault: false, + InstanceTenancy: "foo", + }) + vpc1 := s.srv.AddVPC(ec2.VPC{ + State: "insane", + CIDRBlock: "0.2.4.0/24", + IsDefault: true, + InstanceTenancy: "foo", + }) + + filter := ec2.NewFilter() + filter.Add("isDefault", "true") + resp, err := s.ec2.VPCs(nil, filter) + c.Assert(err, IsNil) + c.Assert(resp, NotNil) + c.Assert(resp.VPCs, HasLen, 1) + c.Assert(resp.VPCs[0].IsDefault, Equals, true) + c.Assert(resp.VPCs[0].Id, Equals, vpc1.Id) +} + // patchValue sets the value pointed to by the given destination to // the given value, and returns a function to restore it to its // original value. The value must be assignable to the element type of diff -Nru juju-core-2.0~beta15/src/gopkg.in/amz.v3/.gitignore juju-core-2.0.0/src/gopkg.in/amz.v3/.gitignore --- juju-core-2.0~beta15/src/gopkg.in/amz.v3/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/amz.v3/.gitignore 2016-10-13 14:32:26.000000000 +0000 @@ -0,0 +1,10 @@ +tags +!tags/ +TAGS +!TAGS/ +.emacs.desktop +.emacs.desktop.lock +*.test +*.sw[nop] +cover +.#* diff -Nru juju-core-2.0~beta15/src/gopkg.in/amz.v3/s3/s3.go juju-core-2.0.0/src/gopkg.in/amz.v3/s3/s3.go --- juju-core-2.0~beta15/src/gopkg.in/amz.v3/s3/s3.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/amz.v3/s3/s3.go 2016-10-13 14:32:26.000000000 +0000 @@ -434,7 +434,7 @@ if err != nil { return "", err } - req.Header.Add("date", time.Now().Format(aws.ISO8601BasicFormat)) + req.Header.Add("date", time.Now().UTC().Format(aws.ISO8601BasicFormat)) if err := aws.SignV4URL(req, b.Auth, b.Region.Name, "s3", expires); err != nil { return "", err diff -Nru juju-core-2.0~beta15/src/gopkg.in/check.v1/.gitignore juju-core-2.0.0/src/gopkg.in/check.v1/.gitignore --- juju-core-2.0~beta15/src/gopkg.in/check.v1/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/check.v1/.gitignore 2016-10-13 14:32:01.000000000 +0000 @@ -0,0 +1,4 @@ +_* +*.swp +*.[568] +[568].out diff -Nru juju-core-2.0~beta15/src/gopkg.in/goose.v1/.gitignore juju-core-2.0.0/src/gopkg.in/goose.v1/.gitignore --- juju-core-2.0~beta15/src/gopkg.in/goose.v1/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/goose.v1/.gitignore 2016-10-13 14:32:24.000000000 +0000 @@ -0,0 +1,29 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +./tags +testservices/testservices* +TAGS +*.sw[nop] diff -Nru juju-core-2.0~beta15/src/gopkg.in/ini.v1/.gitignore juju-core-2.0.0/src/gopkg.in/ini.v1/.gitignore --- juju-core-2.0~beta15/src/gopkg.in/ini.v1/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/ini.v1/.gitignore 2016-10-13 14:32:03.000000000 +0000 @@ -0,0 +1,4 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore.go juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore.go 2016-10-13 14:32:11.000000000 +0000 @@ -12,6 +12,7 @@ "net/url" "os" "path/filepath" + "sort" "github.com/juju/utils" "gopkg.in/errgo.v1" @@ -259,19 +260,44 @@ if explicitChannel != params.NoChannel { return explicitChannel } + if len(published) == 0 { + return params.UnpublishedChannel + } - bestChannel := params.UnpublishedChannel - for _, info := range published { - // TODO(ericsnow) Favor the one with info.Current == true? - switch info.Channel { - case params.StableChannel: - bestChannel = info.Channel - break - case params.DevelopmentChannel: - bestChannel = info.Channel - default: - panic(fmt.Sprintf("unknown channel %q", info.Channel)) + // Note the the meta/published endpoint returns results in stability level + // order. For instance, the stable channel comes first, then candidate etc. + // TODO frankban: that said, while the old charm store is being used, we + // still need to sort them. Later, we will be able to just + // "return published[0].Channel" here. + // TODO(ericsnow) Favor the one with info.Current == true? + channels := make([]params.Channel, len(published)) + for i, result := range published { + channels[i] = result.Channel + } + sortChannels(channels) + return channels[0] +} + +// oldChannels maps old charm store channels with their stability level. +var oldChannels = map[params.Channel]int{ + params.StableChannel: 1, + params.DevelopmentChannel: 2, + params.UnpublishedChannel: 3, +} + +// sortChannels sorts the given channels by stability level, most stable first. +func sortChannels(channels []params.Channel) { + for _, channel := range channels { + if _, ok := oldChannels[channel]; !ok { + return } } - return bestChannel + // All channels are old: sort in legacy order. + sort.Sort(orderedOldChannels(channels)) } + +type orderedOldChannels []params.Channel + +func (o orderedOldChannels) Len() int { return len(o) } +func (o orderedOldChannels) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o orderedOldChannels) Less(i, j int) bool { return oldChannels[o[i]] < oldChannels[o[j]] } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore_test.go juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/charmstore_test.go 2016-10-13 14:32:11.000000000 +0000 @@ -492,8 +492,8 @@ clientChannel: params.StableChannel, expected: params.StableChannel, }, { - clientChannel: params.DevelopmentChannel, - expected: params.DevelopmentChannel, + clientChannel: params.EdgeChannel, + expected: params.EdgeChannel, }, { clientChannel: params.UnpublishedChannel, expected: params.UnpublishedChannel, @@ -504,26 +504,35 @@ published: []params.Channel{params.StableChannel}, expected: params.StableChannel, }, { - published: []params.Channel{params.DevelopmentChannel}, - expected: params.DevelopmentChannel, + published: []params.Channel{params.EdgeChannel}, + expected: params.EdgeChannel, }, { - published: []params.Channel{params.StableChannel, params.DevelopmentChannel}, + published: []params.Channel{params.StableChannel, params.EdgeChannel}, expected: params.StableChannel, }, { - published: []params.Channel{params.DevelopmentChannel, params.StableChannel}, + published: []params.Channel{params.EdgeChannel, params.StableChannel}, expected: params.StableChannel, }, { + published: []params.Channel{params.EdgeChannel, params.BetaChannel, params.CandidateChannel}, + expected: params.CandidateChannel, + }, { clientChannel: params.StableChannel, - published: []params.Channel{params.DevelopmentChannel, params.StableChannel}, + published: []params.Channel{params.EdgeChannel, params.StableChannel}, expected: params.StableChannel, }, { - clientChannel: params.DevelopmentChannel, - published: []params.Channel{params.StableChannel, params.DevelopmentChannel}, - expected: params.DevelopmentChannel, + clientChannel: params.EdgeChannel, + published: []params.Channel{params.StableChannel, params.EdgeChannel}, + expected: params.EdgeChannel, }, { clientChannel: params.UnpublishedChannel, published: []params.Channel{params.StableChannel}, expected: params.UnpublishedChannel, + }, { + clientChannel: params.CandidateChannel, + published: []params.Channel{params.EdgeChannel, params.CandidateChannel, params.StableChannel}, + expected: params.CandidateChannel, + }, { + expected: params.UnpublishedChannel, }} ch := TestCharms.CharmArchive(c.MkDir(), "mysql") @@ -550,6 +559,45 @@ } } +var sortChannelsTests = []struct { + input []params.Channel + sorted []params.Channel +}{{ + input: []params.Channel{params.StableChannel, params.CandidateChannel, params.EdgeChannel}, + sorted: []params.Channel{params.StableChannel, params.CandidateChannel, params.EdgeChannel}, +}, { + input: []params.Channel{params.DevelopmentChannel, params.StableChannel}, + sorted: []params.Channel{params.StableChannel, params.DevelopmentChannel}, +}, { + input: []params.Channel{params.StableChannel, params.DevelopmentChannel}, + sorted: []params.Channel{params.StableChannel, params.DevelopmentChannel}, +}, { + input: []params.Channel{params.UnpublishedChannel, params.DevelopmentChannel}, + sorted: []params.Channel{params.DevelopmentChannel, params.UnpublishedChannel}, +}, { + input: []params.Channel{params.StableChannel, params.Channel("brand-new"), params.BetaChannel}, + sorted: []params.Channel{params.StableChannel, params.Channel("brand-new"), params.BetaChannel}, +}, { + input: []params.Channel{params.StableChannel}, + sorted: []params.Channel{params.StableChannel}, +}, { + input: []params.Channel{params.DevelopmentChannel}, + sorted: []params.Channel{params.DevelopmentChannel}, +}, { + input: []params.Channel{params.UnpublishedChannel}, + sorted: []params.Channel{params.UnpublishedChannel}, +}, { +// No channels provided. +}} + +func (s *charmStoreRepoSuite) TestSortChannels(c *gc.C) { + for i, test := range sortChannelsTests { + c.Logf("\ntest %d: %v", i, test.input) + charmrepo.SortChannels(test.input) + c.Assert(test.input, jc.DeepEquals, test.sorted) + } +} + // hashOfCharm returns the SHA256 hash sum for the given charm name. func hashOfCharm(c *gc.C, name string) string { path := TestCharms.CharmArchivePath(c.MkDir(), name) diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient_test.go juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient_test.go 2016-10-13 14:32:11.000000000 +0000 @@ -1183,8 +1183,8 @@ } c.Assert(makeRequest(client), gc.Equals, "") - devClient := client.WithChannel(params.DevelopmentChannel) - c.Assert(makeRequest(devClient), gc.Equals, "channel=development") + devClient := client.WithChannel(params.EdgeChannel) + c.Assert(makeRequest(devClient), gc.Equals, "channel="+string(params.EdgeChannel)) // Ensure the original client has not been mutated. c.Assert(makeRequest(client), gc.Equals, "") } @@ -1606,12 +1606,12 @@ url, err := s.client.UploadCharm(id, ch) c.Assert(err, gc.IsNil) - // have to make a new repo from the client, since the embedded repo is not + // Have to make a new repo from the client, since the embedded repo is not // authenticated. - err = s.client.Publish(url, []params.Channel{params.DevelopmentChannel}, nil) + err = s.client.Publish(url, []params.Channel{params.EdgeChannel}, nil) c.Assert(err, jc.ErrorIsNil) - client := s.client.WithChannel(params.DevelopmentChannel) + client := s.client.WithChannel(params.EdgeChannel) err = client.Get("/"+url.Path()+"/meta/id", nil) c.Assert(err, jc.ErrorIsNil) diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params.go juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params.go 2016-10-13 14:32:11.000000000 +0000 @@ -35,8 +35,15 @@ type Channel string const ( - // DevelopmentChannel is the channel used for charms or bundles under development. - DevelopmentChannel Channel = "development" + // EdgeChannel is the channel used for charms or bundles under development. + EdgeChannel Channel = "edge" + + // BetaChannel is the channel used for beta charms or bundles. + BetaChannel Channel = "beta" + + // CandidateChannel is the channel used for charms or bundles release + // candidates. + CandidateChannel Channel = "candidate" // StableChannel is the channel used for stable charms or bundles. StableChannel Channel = "stable" @@ -46,8 +53,30 @@ // NoChannel represents where no channel has been specifically requested. NoChannel Channel = "" + + // DevelopmentChannel is only defined for backward compatibility. + DevelopmentChannel Channel = "development" ) +// OrderedChannels holds the list of valid channels in order of publishing +// status, most stable first. +var OrderedChannels = []Channel{ + StableChannel, + CandidateChannel, + BetaChannel, + EdgeChannel, + UnpublishedChannel, +} + +// ValidChannels holds the set of all allowed channels for an entity. +var ValidChannels = func() map[Channel]bool { + channels := make(map[Channel]bool, len(OrderedChannels)) + for _, ch := range OrderedChannels { + channels[ch] = true + } + return channels +}() + // MetaAnyResponse holds the result of a meta/any request. // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaany type MetaAnyResponse EntityResult diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/dependencies.tsv juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/dependencies.tsv --- juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/dependencies.tsv 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/dependencies.tsv 2016-10-13 14:32:11.000000000 +0000 @@ -1,4 +1,6 @@ github.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z +github.com/beorn7/perks git 3ac7bf7a47d159a033b107610db8a1b6575507a4 2016-02-29T21:34:45Z +github.com/golang/protobuf git 34a5f244f1c01cdfee8e60324258cfbb97a42aec 2015-05-26T01:21:09Z github.com/juju/blobstore git 06056004b3d7b54bbb7984d830c537bad00fec21 2015-07-29T11:18:58Z github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z @@ -18,16 +20,21 @@ github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z github.com/juju/zip git f6b1e93fa2e29a1d7d49b566b2b51efb060c982a 2016-02-05T10:52:21Z github.com/julienschmidt/httprouter git 77a895ad01ebc98a4dc95d8355bc825ce80a56f6 2015-10-13T22:55:20Z +github.com/matttproud/golang_protobuf_extensions git c12348ce28de40eed0136aa2b644d0ee0650e56c 2016-04-24T11:30:07Z +github.com/prometheus/client_golang git b90ee0840e8e7dfb84c08d13b9c4f3a794586a21 2016-05-13T04:20:11Z +github.com/prometheus/client_model git fa8ad6fec33561be4280a8f0514318c79d7f6cb6 2015-02-12T10:17:44Z +github.com/prometheus/common git dd586c1c5abb0be59e60f942c22af711a2008cb4 2016-05-03T22:05:32Z +github.com/prometheus/procfs git abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 2016-04-11T19:08:41Z github.com/rogpeppe/fastuuid git 6724a57986aff9bff1a1770e9347036def7c89f6 2015-01-06T09:32:20Z golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z gopkg.in/juju/charm.v6-unstable git 503191ddf2590b15db9669c424660ed612d0a30a 2016-05-27T01:46:20Z -gopkg.in/juju/charmstore.v5-unstable git 2cb9f80553dddaae8c5e2161ea45f4be5d9afc00 2016-05-27T11:46:22Z -gopkg.in/juju/jujusvg.v1 git cc128825adce31ea13020d24e7b3302bac86a8c3 2016-05-30T22:53:36Z +gopkg.in/juju/charmstore.v5-unstable git 714c25cd43dbb8eb51f57a55d76d521fc2126455 2016-08-09T13:45:06Z +gopkg.in/juju/jujusvg.v2 git d82160011935ef79fc7aca84aba2c6f74700fe75 2016-06-09T10:52:15Z gopkg.in/juju/names.v2 git e38bc90539f22af61a9c656d35068bd5f0a5b30a 2016-05-25T23:07:23Z -gopkg.in/macaroon-bakery.v1 git b097c9d99b2537efaf54492e08f7e148f956ba51 2016-05-24T09:38:11Z +gopkg.in/macaroon-bakery.v1 git 469b44e6f1f9479e115c8ae879ef80695be624d5 2016-06-22T12:14:21Z gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z gopkg.in/natefinch/lumberjack.v2 git 514cbda263a734ae8caac038dadf05f8f3f9f738 2016-01-25T11:17:49Z diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/export_test.go juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/export_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmrepo.v2-unstable/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmrepo.v2-unstable/export_test.go 2016-10-13 14:32:11.000000000 +0000 @@ -0,0 +1,6 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo // import "gopkg.in/juju/charmrepo.v2-unstable" + +var SortChannels = sortChannels diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/config.yaml juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/config.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/config.yaml 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/config.yaml 2016-10-13 14:32:25.000000000 +0000 @@ -8,8 +8,9 @@ #identity-public-key: CIdWcEUN+0OZnKW9KwruRQnQDY/qqzVdD30CijwiWCk= #identity-api-url: http://localhost:8081 # For production identity manager. -identity-public-key: hmHaPgCC1UfuhYHUSX5+aihSAZesqpVdjRv0mgfIwjo= +identity-public-key: o/yOqSNWncMo1GURWuez/dGR30TscmmuIxgjztpoHEY= identity-api-url: https://api.jujucharms.com/identity +identity-location: https://api.jujucharms.com/identity/v1/discharger # Agent credentials. #agent-username: charmstore@admin@idm #agent-key: @@ -21,3 +22,4 @@ #search-cache-max-age: 0s # Uncomment to test with a terms service running locally #terms-location: localhost:8085 +access-log: /var/log/charmstore/access.log diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/main.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/main.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/main.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/main.go 2016-10-13 14:32:25.000000000 +0000 @@ -10,6 +10,7 @@ "os" "path/filepath" + "github.com/gorilla/handlers" "github.com/juju/loggo" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" @@ -20,7 +21,6 @@ "gopkg.in/juju/charmstore.v5-unstable" "gopkg.in/juju/charmstore.v5-unstable/config" "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" - "gopkg.in/juju/charmstore.v5-unstable/internal/debug" ) var ( @@ -116,9 +116,18 @@ if err != nil { return errgo.Notef(err, "cannot create new server at %q", conf.APIAddr) } - + handler := server.(http.Handler) + if conf.AccessLog != "" { + accesslog := &lumberjack.Logger{ + Filename: conf.AccessLog, + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, //days + } + handler = handlers.CombinedLoggingHandler(accesslog, handler) + } logger.Infof("starting the API server") - return http.ListenAndServe(conf.APIAddr, debug.Handler("", server)) + return http.ListenAndServe(conf.APIAddr, handler) } func addPublicKey(ring *bakery.PublicKeyRing, loc string, key *bakery.PublicKey) error { diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/config/config.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/config/config.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/config/config.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/config/config.go 2016-10-13 14:32:25.000000000 +0000 @@ -40,6 +40,7 @@ StatsCacheMaxAge DurationString `yaml:"stats-cache-max-age,omitempty"` SearchCacheMaxAge DurationString `yaml:"search-cache-max-age,omitempty"` Database string `yaml:"database,omitempty"` + AccessLog string `yaml:"access-log"` } func (c *Config) validate() error { diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/dependencies.tsv juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/dependencies.tsv --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/dependencies.tsv 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/dependencies.tsv 2016-10-13 14:32:25.000000000 +0000 @@ -1,4 +1,7 @@ github.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z +github.com/beorn7/perks git 3ac7bf7a47d159a033b107610db8a1b6575507a4 2016-02-29T21:34:45Z +github.com/golang/protobuf git 34a5f244f1c01cdfee8e60324258cfbb97a42aec 2015-05-26T01:21:09Z +github.com/gorilla/handlers git a24b39a6a2c8a7af2fe664dd4573205c99904035 2015-04-12T18:40:04Z github.com/juju/blobstore git 06056004b3d7b54bbb7984d830c537bad00fec21 2015-07-29T11:18:58Z github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z @@ -11,26 +14,32 @@ github.com/juju/mempool git 24974d6c264fe5a29716e7d56ea24c4bd904b7cc 2016-02-05T10:49:27Z github.com/juju/names git 8a0aa0963bbacdc790914892e9ff942e94d6f795 2016-03-30T15:05:33Z github.com/juju/schema git 1e25943f8c6fd6815282d6f1ac87091d21e14e19 2016-03-01T11:16:46Z -github.com/juju/testing git 162fafccebf20a4207ab93d63b986c230e3f4d2e 2016-04-04T09:43:17Z +github.com/juju/testing git b9cfe07211e464f16d78ca9304c503d636b8eefa 2016-05-19T00:49:35Z github.com/juju/txn git 99ec629d0066a4d73c54d8e021a7fc1dc07df614 2015-06-09T16:58:27Z -github.com/juju/utils git 53080499b3c86a913a57a3a8e2b31cfbb6fe5b1d 2016-04-26T09:38:41Z +github.com/juju/utils git ffea6ead0c374583e876c8357c9db6e98bc71476 2016-05-26T02:52:51Z github.com/juju/version git ef897ad7f130870348ce306f61332f5335355063 2015-11-27T20:34:00Z github.com/juju/webbrowser git 54b8c57083b4afb7dc75da7f13e2967b2606a507 2016-03-09T14:36:29Z github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z github.com/juju/zip git f6b1e93fa2e29a1d7d49b566b2b51efb060c982a 2016-02-05T10:52:21Z github.com/julienschmidt/httprouter git 77a895ad01ebc98a4dc95d8355bc825ce80a56f6 2015-10-13T22:55:20Z +github.com/matttproud/golang_protobuf_extensions git c12348ce28de40eed0136aa2b644d0ee0650e56c 2016-04-24T11:30:07Z +github.com/prometheus/client_golang git b90ee0840e8e7dfb84c08d13b9c4f3a794586a21 2016-05-13T04:20:11Z +github.com/prometheus/client_model git fa8ad6fec33561be4280a8f0514318c79d7f6cb6 2015-02-12T10:17:44Z +github.com/prometheus/common git dd586c1c5abb0be59e60f942c22af711a2008cb4 2016-05-03T22:05:32Z +github.com/prometheus/procfs git abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 2016-04-11T19:08:41Z +github.com/rogpeppe/fastuuid git 6724a57986aff9bff1a1770e9347036def7c89f6 2015-01-06T09:32:20Z golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z -gopkg.in/juju/charm.v6-unstable git 728a5ea3ff1c1ae8b4c3ac4779c0027f693d1ca5 2016-04-08T11:12:17Z -gopkg.in/juju/charmrepo.v2-unstable git b1af69d31ef0112656f79a74a1f29381af1cc109 2016-04-19T12:03:30Z -gopkg.in/juju/jujusvg.v1 git a60359df348ef2ca40ec3bcd58a01de54f05658e 2016-02-11T10:02:50Z -gopkg.in/macaroon-bakery.v1 git 2e7eb1fbcaaeddad5253ce652a57112adc7aa7db 2016-04-25T10:14:06Z +gopkg.in/juju/charm.v6-unstable git e62786fbece4c6a74945d0d1d19140a69018c07d 2016-09-16T08:55:15Z +gopkg.in/juju/charmrepo.v2-unstable git d8b2fa48bbfa115906b42405eced0c9871a09e13 2016-08-09T13:37:26Z +gopkg.in/juju/jujusvg.v2 git d82160011935ef79fc7aca84aba2c6f74700fe75 2016-06-09T10:52:15Z +gopkg.in/juju/names.v2 git e38bc90539f22af61a9c656d35068bd5f0a5b30a 2016-05-25T23:07:23Z +gopkg.in/macaroon-bakery.v1 git e7074941455a293ffb7905cd89ca20ee547a03ec 2016-05-27T11:55:54Z gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z gopkg.in/natefinch/lumberjack.v2 git 514cbda263a734ae8caac038dadf05f8f3f9f738 2016-01-25T11:17:49Z gopkg.in/tomb.v2 git 14b3d72120e8d10ea6e6b7f87f7175734b1faab8 2014-06-26T14:46:23Z -gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z gopkg.in/yaml.v2 git a83829b6f1293c91addabc89d0571c246397bbf4 2016-03-01T20:40:22Z launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/docs/API.md juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/docs/API.md --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/docs/API.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/docs/API.md 2016-10-13 14:32:25.000000000 +0000 @@ -97,7 +97,7 @@ Any entity in the charm store is considered to be part of one or more "channels" (think "distribution channels"). Currently supported channels are "unpublished", -"development" and "stable". All entities are initially (and always) part +"edge" and "stable". All entities are initially (and always) part of the "unpublished" channel; subsequent operations on the publish endpoint can make entities available in other channels. @@ -106,8 +106,8 @@ is chosen to resolve the ids. The default channel is "stable". For example, if wordpress-3 has just been published to the stable -channel, and wordpress-4 has been published to the development - then a GET of wordpress/meta/id-revision?channel=development +channel, and wordpress-4 has been published to the edge one, +then a GET of wordpress/meta/id-revision?channel=edge will return {"Revision": 4} and a GET of wordpress/wordpress/meta/id-revision will return {"Revision": 3} because the default channel is "stable". @@ -134,9 +134,6 @@ The expand-id path expands a general id into a set of specific ids. It strips any revision number and series from id, and returns a slice of all the possible ids matched by that, including all the versions and series. -If *id* is in the development channel, all development and non-development -revisions will be returned; if it is not, then only non-development -revisions will be returned. ```go []Id @@ -168,19 +165,6 @@ ] ``` -Example: `GET development/precise/wordpress-34/expand-id` - -```json -[ - {"Id": "development/trusty/wordpress-3"}, - {"Id": "trusty/wordpress-2"}, - {"Id": "trusty/wordpress-1"}, - {"Id": "precise/wordpress-2"}, - {"Id": "precise/wordpress-1"}, - ] -``` - - ### Archive #### GET *id*/archive @@ -270,7 +254,8 @@ If Promulgate is true, it means that any new charms published to ~*user*/*x*/*name* will also be given the alias *x*/*name*. The latest revision for all ids ~*user*/*anyseries*/*name* -will also be aliased likewise. +will also be aliased likewise. If any of the old or new entities are multi-series, +then only the latest multi-series id will be aliased. If Promulgate is false, any new charms published to ~*user*/*anyseries*/*name* will not be given a promulgated @@ -477,6 +462,7 @@ "charm-actions", "charm-config", "charm-metadata", + "charm-metrics", "charm-related", "extra-info", "hash", @@ -634,6 +620,7 @@ "charm-actions", "charm-config", "charm-metadata", + "charm-metrics", "charm-related", "extra-info", "id", @@ -753,7 +740,7 @@ #### GET *id*/meta/charm-metadata -The `/meta/charm.metadata` path returns the contents of the charm metadata file +The `/meta/charm-metadata` path returns the contents of the charm metadata file for a charm. The id must refer to a charm, not a bundle. ```go @@ -819,6 +806,47 @@ } ``` +#### GET *id*/meta/charm-metrics + +The `/meta/charm-metrics` path returns the contents of the charm metrics file +for a charm, or a 404 not found response if no metrics are defined for the +charm. The id must refer to a charm, not a bundle. + +```go +// Metrics contains the metrics declarations encoded in the metrics.yaml file. +type Metrics struct { + Metrics map[string]Metric +} + +// Metric represents a single metric definition +type Metric struct { + Type string + Description string +} +``` + +The possible values of a Metric Type are + +* gauge +* absolute + +Example: `GET wordpress/meta/charm-metrics` + +```json +{ + "Metrics": { + "juju-units": { + "Type": "absolute", + "Description": "The units!" + }, + "pings": { + "Type": "gauge", + "Description": "Description of the metric." + } + } +} +``` + #### GET *id*/meta/bundle-metadata The `meta/bundle-metadata` path returns the contents of the bundle metadata @@ -1103,7 +1131,7 @@ #### GET *id*/meta/terms The `meta/terms` path returns a list of terms and conditions (as recorded in -the terms field of the charm metadata) the user must agree to in order to +the terms field of the charm metadata) the user must agree to in order to obtain the archive of the given charm id. Example: `GET some-charm/meta/terms` @@ -1992,7 +2020,7 @@ The results are sorted according to the given sort field, which may be one of `owner`, `name` or `series`, corresponding to the filters of the same names. If the field is prefixed with a hyphen (-), the sorting order will be reversed. If -the sort field is not specified the order will be a server side logical order. +the sort field is not specified the order will be a server side logical order. It is possible to specify more than one sort field to get multi-level sorting, e.g. sort=name,-series will get charms in order of the charm name and then in reverse order of series. @@ -2189,8 +2217,8 @@ #### GET /macaroon -This endpoint returns a macaroon in JSON format that, when its third party -caveats are discharged, will allow access to the charm store. No prior +This endpoint returns a macaroon in JSON format that, when its third party +caveats are discharged, will allow access to the charm store. No prior authorization is required. #### GET /delegatable-macaroon @@ -2198,16 +2226,16 @@ This endpoint returns a macaroon in JSON format that can be passed to third parties to allow them to access the charm store on the user's behalf. If the "id" parameter is specified (url encoded), the returned -macaroon will be restricted for use only with the entity with the +macaroon will be restricted for use only with the entity with the given id. -A delegatable macaroon will only be returned to an authorized user (not -including admin). It will carry the same privileges as the macaroon used +A delegatable macaroon will only be returned to an authorized user (not +including admin). It will carry the same privileges as the macaroon used to authorize the request, but is suitable for use by third parties. #### GET /whoami -This endpoint returns the user name of the client and the list of groups the +This endpoint returns the user name of the client and the list of groups the user is a member of. This endpoint requires authorization. Example: `GET whoami` diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query.go 2016-10-13 14:32:25.000000000 +0000 @@ -40,9 +40,10 @@ // MatchQuery provides a query that matches against // a complete field. type MatchQuery struct { - Field string - Query string - Type string + Field string + Query string + Type string + Analyzer string } func (m MatchQuery) MarshalJSON() ([]byte, error) { @@ -50,6 +51,9 @@ if m.Type != "" { params["type"] = m.Type } + if m.Analyzer != "" { + params["analyzer"] = m.Analyzer + } return marshalNamedObject("match", map[string]interface{}{m.Field: params}) } @@ -58,13 +62,23 @@ type MultiMatchQuery struct { Query string Fields []string + + // MinimumShouldMatch optionally contains the value for the + // minimum_should_match parameter, For details of possible values + // please see: + // https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-minimum-should-match.html + MinimumShouldMatch string } func (m MultiMatchQuery) MarshalJSON() ([]byte, error) { - return marshalNamedObject("multi_match", map[string]interface{}{ + mm := map[string]interface{}{ "query": m.Query, "fields": m.Fields, - }) + } + if m.MinimumShouldMatch != "" { + mm["minimum_should_match"] = m.MinimumShouldMatch + } + return marshalNamedObject("multi_match", mm) } // FilteredQuery provides a query that includes a filter. diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/.gitignore juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/.gitignore --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/.gitignore 2016-10-13 14:32:25.000000000 +0000 @@ -0,0 +1,2 @@ +audit.log +version/init.go diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/addentity.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/addentity.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/addentity.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/addentity.go 2016-10-13 14:32:25.000000000 +0000 @@ -25,6 +25,7 @@ "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/monitoring" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/series" ) @@ -131,6 +132,8 @@ if err != nil { return errgo.Mask(err) } + m_upload := monitoring.NewUploadProcessingDuration() + defer m_upload.ObserveMetric() r, _, err := s.BlobStore.Open(blobName) if err != nil { return errgo.Notef(err, "cannot open newly created blob") @@ -185,14 +188,30 @@ chans: chans, } if id.URL.Series == "bundle" { + var addedCompat bool b, err := s.newBundle(id, r, blobSize) if err != nil { return errgo.Mask(err, errgo.Is(params.ErrInvalidEntity), errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed)) } - if err := s.addBundle(b, p); err != nil { - return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed)) + info, err := addPreV5BundleCompatibilityHackBlob(s.BlobStore, r, p.blobName, p.blobSize) + if err != nil && errgo.Cause(err) != errNoCompat { + return errgo.Notef(err, "cannot add pre-v5 compatibility blob") } - return nil + if err == nil { + addedCompat = true + p.preV5BlobHash = info.hash + p.preV5BlobHash256 = info.hash256 + p.preV5BlobSize = info.size + } + err = s.addBundle(b, p) + if err != nil && addedCompat { + // We added a compatibility blob so we need to remove it. + compatBlobName := preV5CompatibilityBlobName(p.blobName) + if err1 := s.BlobStore.Remove(compatBlobName); err1 != nil { + logger.Errorf("cannot remove blob %s after error: %v", compatBlobName, err1) + } + } + return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed)) } ch, err := s.newCharm(id, r, blobSize) if err != nil { @@ -203,7 +222,7 @@ return errgo.Notef(err, "cannot seek to start of archive") } logger.Infof("adding pre-v5 compat blob for %#v", id) - info, err := addPreV5CompatibilityHackBlob(s.BlobStore, r, p.blobName, p.blobSize) + info, err := addPreV5CharmCompatibilityHackBlob(s.BlobStore, r, p.blobName, p.blobSize) if err != nil { return errgo.Notef(err, "cannot add pre-v5 compatibility blob") } @@ -225,65 +244,107 @@ return nil } -type preV5CompatibilityHackBlobInfo struct { +type compatibilityHackBlobInfo struct { hash string hash256 string size int64 } -// addPreV5CompatibilityHackBlob adds a second blob to the blob store that +// addPreV5CharmCompatibilityHackBlob adds a second blob to the blob store that // contains a suffix to the zipped charm archive file that updates the zip // index to point to an updated version of metadata.yaml that does // not have a series field. The original blob is held in r. -// It updates the fields in p accordingly. // // We do this because earlier versions of the charm package have a version // of the series field that holds a single string rather than a slice of string // so will fail when reading the new slice-of-string form, and we // don't want to change the field name from "series". -func addPreV5CompatibilityHackBlob(blobStore *blobstore.Store, r io.ReadSeeker, blobName string, blobSize int64) (*preV5CompatibilityHackBlobInfo, error) { - readerAt := ReaderAtSeeker(r) - z, err := jujuzip.NewReader(readerAt, blobSize) +func addPreV5CharmCompatibilityHackBlob(blobStore *blobstore.Store, r io.ReadSeeker, blobName string, blobSize int64) (*compatibilityHackBlobInfo, error) { + data, err := updateZipFile(r, blobSize, "metadata.yaml", removeSeriesField) if err != nil { - return nil, errgo.Notef(err, "cannot open charm archive") + return nil, errgo.Mask(err) } - var metadataf *jujuzip.File - for _, f := range z.File { - if f.Name == "metadata.yaml" { - metadataf = f - break - } + info, err := addCompatibilityHackBlob(blobStore, r, preV5CompatibilityBlobName(blobName), blobSize, data) + if err != nil { + return nil, errgo.Mask(err) } - if metadataf == nil { - return nil, errgo.New("no metadata.yaml file found") + return info, nil +} + +func removeSeriesField(r io.Reader) ([]byte, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, errgo.Mask(err) + } + var meta map[string]interface{} + if err := yaml.Unmarshal(data, &meta); err != nil { + return nil, errgo.Notef(err, "cannot unmarshal metadata.yaml") + } + delete(meta, "series") + data, err = yaml.Marshal(meta) + if err != nil { + return nil, errgo.Notef(err, "cannot re-marshal metadata.yaml") } - fr, err := metadataf.Open() + return data, nil +} + +var errNoCompat = errgo.New("no compatibility blob required") + +// addPreV5BundleCompatibilityHackBlob adds a second blob to the blob +// store that contains a suffix to the zipped charm archive file that +// updates the zip index to point to an updated version of bundle.yaml +// that has a services field instead of a applications field. +// +// We do this because the bundle format has changed to use an +// applications field rather than a services field. This updates those +// bundles to be compatible with the older version of juju that cannot +// parse an applications field. +func addPreV5BundleCompatibilityHackBlob(blobStore *blobstore.Store, r io.ReadSeeker, blobName string, blobSize int64) (*compatibilityHackBlobInfo, error) { + r.Seek(0, 0) + data, err := updateZipFile(r, blobSize, "bundle.yaml", applicationsToServices) if err != nil { - return nil, errgo.Notef(err, "cannot open metadata.yaml from archive") + return nil, errgo.Mask(err, errgo.Is(errNoCompat)) } - defer fr.Close() - data, err := removeSeriesField(fr) + info, err := addCompatibilityHackBlob(blobStore, r, preV5CompatibilityBlobName(blobName), blobSize, data) if err != nil { - return nil, errgo.Notef(err, "cannot remove series field from metadata") + return nil, errgo.Mask(err) } - var appendedBlob bytes.Buffer - zw := z.Append(&appendedBlob) - updatedf := metadataf.FileHeader // Work around invalid duplicate FileHeader issue. - zwf, err := zw.CreateHeader(&updatedf) + return info, nil +} + +func applicationsToServices(r io.Reader) ([]byte, error) { + data, err := ioutil.ReadAll(r) if err != nil { - return nil, errgo.Notef(err, "cannot create appended metadata entry") + return nil, errgo.Mask(err) } - if _, err := zwf.Write(data); err != nil { - return nil, errgo.Notef(err, "cannot write appended metadata data") + var meta map[string]interface{} + if err := yaml.Unmarshal(data, &meta); err != nil { + return nil, errgo.Notef(err, "cannot unmarshal bundle.yaml") } - if err := zw.Close(); err != nil { - return nil, errgo.Notef(err, "cannot close zip file") + if _, ok := meta["services"]; ok { + return nil, errNoCompat } - data = appendedBlob.Bytes() - sha384sum := sha512.Sum384(data) - - err = blobStore.PutUnchallenged(&appendedBlob, preV5CompatibilityBlobName(blobName), int64(len(data)), fmt.Sprintf("%x", sha384sum[:])) + meta["services"] = meta["applications"] + delete(meta, "applications") + data, err = yaml.Marshal(meta) if err != nil { + return nil, errgo.Notef(err, "cannot re-marshal bundle.yaml") + } + return data, nil +} + +// addCompatibilityHackBlob adds a new blob to blobStore containing +// appendData. It then calculates the size, sha256 & sha384 of the +// combined contents of r and appendData and returns these values. +func addCompatibilityHackBlob(blobStore *blobstore.Store, r io.ReadSeeker, blobName string, blobSize int64, appendData []byte) (*compatibilityHackBlobInfo, error) { + sha384sum := sha512.Sum384(appendData) + + if err := blobStore.PutUnchallenged( + bytes.NewReader(appendData), + blobName, + int64(len(appendData)), + fmt.Sprintf("%x", sha384sum[:]), + ); err != nil { return nil, errgo.Notef(err, "cannot put archive blob") } @@ -296,35 +357,64 @@ if _, err := io.Copy(hashw, r); err != nil { return nil, errgo.Notef(err, "cannot recalculate blob checksum") } - hashw.Write(data) - return &preV5CompatibilityHackBlobInfo{ - size: blobSize + int64(len(data)), + hashw.Write(appendData) + return &compatibilityHackBlobInfo{ + size: blobSize + int64(len(appendData)), hash256: fmt.Sprintf("%x", sha256w.Sum(nil)), hash: fmt.Sprintf("%x", sha384w.Sum(nil)), }, nil } -// preV5CompatibilityBlobName returns the name of the zip file suffix used -// to overwrite the metadata.yaml file for pre-v5 compatibility purposes. -func preV5CompatibilityBlobName(blobName string) string { - return blobName + ".pre-v5-suffix" -} - -func removeSeriesField(r io.Reader) ([]byte, error) { - data, err := ioutil.ReadAll(r) +// UpdateZipFile finds filename in r and passes it to updatef for +// modification. It then returns the bytes that could be appended to r +// that cause the zip file to reference the modified version of the file. +// +// Any errors returned from updatef will not have the cause masked. +func updateZipFile(r io.ReadSeeker, size int64, filename string, updatef func(io.Reader) ([]byte, error)) ([]byte, error) { + readerAt := ReaderAtSeeker(r) + z, err := jujuzip.NewReader(readerAt, size) if err != nil { - return nil, errgo.Mask(err) + return nil, errgo.Notef(err, "cannot open archive") } - var meta map[string]interface{} - if err := yaml.Unmarshal(data, &meta); err != nil { - return nil, errgo.Notef(err, "cannot unmarshal metadata.yaml") + var uf *jujuzip.File + for _, f := range z.File { + if f.Name == filename { + uf = f + break + } } - delete(meta, "series") - data, err = yaml.Marshal(meta) + if uf == nil { + return nil, errgo.Newf("no %q file found", filename) + } + fr, err := uf.Open() if err != nil { - return nil, errgo.Notef(err, "cannot re-marshal metadata.yaml") + return nil, errgo.Notef(err, "cannot open %q from archive", filename) } - return data, nil + defer fr.Close() + data, err := updatef(fr) + if err != nil { + return nil, errgo.NoteMask(err, fmt.Sprintf("cannot update %q", filename), errgo.Any) + } + var appendedBlob bytes.Buffer + zw := z.Append(&appendedBlob) + header := uf.FileHeader // Work around invalid duplicate FileHeader issue. + zwf, err := zw.CreateHeader(&header) + if err != nil { + return nil, errgo.Notef(err, "cannot create appended %q entry", filename) + } + if _, err := zwf.Write(data); err != nil { + return nil, errgo.Notef(err, "cannot write appended %q data", filename) + } + if err := zw.Close(); err != nil { + return nil, errgo.Notef(err, "cannot close zip file") + } + return appendedBlob.Bytes(), nil +} + +// preV5CompatibilityBlobName returns the name of the zip file suffix used +// to overwrite the metadata.yaml file for pre-v5 compatibility purposes. +func preV5CompatibilityBlobName(blobName string) string { + return blobName + ".pre-v5-suffix" } // newCharm returns a new charm implementation from the archive blob @@ -440,6 +530,10 @@ CharmRequiredInterfaces: interfacesForRelations(c.Meta().Requires), SupportedSeries: c.Meta().Series, } + metrics := c.Metrics() + if metrics != nil && len(metrics.Metrics) > 0 { + entity.CharmMetrics = metrics + } denormalizeEntity(entity) setEntityChannels(entity, p.chans) @@ -466,14 +560,12 @@ } // setEntityChannels associates the entity with the given channels, ignoring -// unknown channels. +// unknown channels and the unpublished channel. func setEntityChannels(entity *mongodoc.Entity, chans []params.Channel) { + entity.Published = make(map[params.Channel]bool, len(chans)) for _, c := range chans { - switch c { - case params.DevelopmentChannel: - entity.Development = true - case params.StableChannel: - entity.Stable = true + if params.ValidChannels[c] && c != params.UnpublishedChannel { + entity.Published[c] = true } } } @@ -532,19 +624,18 @@ func (s *Store) addEntity(entity *mongodoc.Entity) (err error) { // Add the base entity to the database. perms := []string{entity.User} - acls := mongodoc.ACL{ - Read: perms, - Write: perms, + channelACLs := make(map[params.Channel]mongodoc.ACL, len(params.OrderedChannels)) + for _, ch := range params.OrderedChannels { + channelACLs[ch] = mongodoc.ACL{ + Read: perms, + Write: perms, + } } baseEntity := &mongodoc.BaseEntity{ - URL: entity.BaseURL, - User: entity.User, - Name: entity.Name, - ChannelACLs: map[params.Channel]mongodoc.ACL{ - params.UnpublishedChannel: acls, - params.DevelopmentChannel: acls, - params.StableChannel: acls, - }, + URL: entity.BaseURL, + User: entity.User, + Name: entity.Name, + ChannelACLs: channelACLs, Promulgated: entity.PromulgatedURL != nil, } err = s.DB.BaseEntities().Insert(baseEntity) diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/addentity_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/addentity_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/addentity_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/addentity_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -9,6 +9,7 @@ "fmt" "io" "io/ioutil" + "path/filepath" "regexp" "sort" "time" @@ -322,6 +323,39 @@ } } +func (s *AddEntitySuite) TestUploadBundleWithServices(c *gc.C) { + store := s.newStore(c, true) + defer store.Close() + blob := storetesting.NewBlob([]storetesting.File{{ + Name: "README.md", + Data: []byte("Readme\n"), + }, { + Name: "bundle.yaml", + Data: []byte(` +services: + mysql: + charm: mysql + num_units: 1 + wordpress: + charm: wordpress + num_units: 1 +`), + }}) + file := filepath.Join(c.MkDir(), "bundle.zip") + ioutil.WriteFile(file, blob.Bytes(), 0666) + bundle, err := charm.ReadBundle(file) + c.Assert(err, jc.ErrorIsNil) + c.Assert(bundle.Data().Applications, gc.HasLen, 2) + + url := router.MustNewResolvedURL("cs:~who/bundle/wordpress-bundle-33", 33) + s.addRequiredCharms(c, bundle) + err = store.AddBundleWithArchive(url, bundle) + c.Assert(err, gc.IsNil) + entity, err := store.FindEntity(url, nil) + c.Assert(err, gc.IsNil) + c.Assert(entity.BundleData.Applications, gc.DeepEquals, bundle.Data().Applications) +} + func (s *AddEntitySuite) checkAddCharm(c *gc.C, ch charm.Charm, url *router.ResolvedURL) { store := s.newStore(c, true) defer store.Close() @@ -462,12 +496,23 @@ if doc.CharmMeta != nil && len(doc.CharmMeta.Series) > 0 { // It's a multi-series charm, so the PreV5* fields should be active. if doc.PreV5BlobSize <= doc.Size { - c.Fatalf("pre-v5 blobsize %d is unexpectedly less than original blob size %d", doc.PreV5BlobSize, doc.Size) + c.Fatalf("pre-v5 blobsize %d is unexpectedly less than or equal to original blob size %d", doc.PreV5BlobSize, doc.Size) } c.Assert(doc.PreV5BlobHash, gc.Not(gc.Equals), "") c.Assert(doc.PreV5BlobHash, gc.Not(gc.Equals), hash) c.Assert(doc.PreV5BlobHash256, gc.Not(gc.Equals), "") c.Assert(doc.PreV5BlobHash256, gc.Not(gc.Equals), hash256) + } else if url.URL.Series == "bundle" && doc.PreV5BlobHash != doc.BlobHash { + // It's a bundle with a different PreV5BlobHash, check + // that all other fields are consistently different. + // TODO(mhilton) use BundleData.UnmarshaledWithServices + // to determine whether to make this check. + c.Assert(doc.PreV5BlobHash, gc.Not(gc.Equals), "") + c.Assert(doc.PreV5BlobHash256, gc.Not(gc.Equals), "") + c.Assert(doc.PreV5BlobHash256, gc.Not(gc.Equals), hash256) + if doc.PreV5BlobSize <= doc.Size { + c.Fatalf("pre-v5 blobsize %d is unexpectedly less than or equal to original blob size %d", doc.PreV5BlobSize, doc.Size) + } } else { c.Assert(doc.PreV5BlobSize, gc.Equals, doc.Size) c.Assert(doc.PreV5BlobHash, gc.Equals, doc.BlobHash) @@ -488,7 +533,9 @@ } expectACLs := map[params.Channel]mongodoc.ACL{ params.StableChannel: acls, - params.DevelopmentChannel: acls, + params.CandidateChannel: acls, + params.BetaChannel: acls, + params.EdgeChannel: acls, params.UnpublishedChannel: acls, } c.Assert(storetesting.NormalizeBaseEntity(baseEntity), jc.DeepEquals, storetesting.NormalizeBaseEntity(&mongodoc.BaseEntity{ diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/elasticsearch.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/elasticsearch.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/elasticsearch.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/elasticsearch.go 2016-10-13 14:32:25.000000000 +0000 @@ -10,7 +10,7 @@ esMapping = mustParseJSON(esMappingJSON) ) -const esSettingsVersion = 9 +const esSettingsVersion = 12 func mustParseJSON(s string) interface{} { var j json.RawMessage @@ -27,7 +27,7 @@ "analysis": { "filter": { "n3_20grams_filter": { - "type": "edgeNGram", + "type": "nGram", "min_gram": 3, "max_gram": 20 } @@ -40,6 +40,13 @@ "lowercase", "n3_20grams_filter" ] + }, + "lowercase_words": { + "type": "custom", + "tokenizer": "whitespace", + "filter": [ + "lowercase" + ] } } } @@ -69,10 +76,20 @@ "index_options": "docs" }, "User": { - "type": "string", - "index": "not_analyzed", - "omit_norms": true, - "index_options": "docs" + "type": "multi_field", + "fields": { + "User": { + "type": "string", + "index": "not_analyzed", + "omit_norms": true, + "index_options": "docs" + }, + "tok": { + "type": "string", + "analyzer": "lowercase_words", + "include_in_all": false + } + } }, "Name": { "type": "multi_field", @@ -85,7 +102,13 @@ }, "ngrams": { "type": "string", - "index_analyzer": "n3_20grams", + "analyzer": "n3_20grams", + "search_analyzer": "lowercase_words", + "include_in_all": false + }, + "tok": { + "type": "string", + "analyzer": "simple", "include_in_all": false } } @@ -213,16 +236,36 @@ } }, "Categories": { - "type": "string", - "index": "not_analyzed", - "omit_norms": true, - "index_options": "docs" + "type": "multi_field", + "fields": { + "Categories": { + "type": "string", + "index": "not_analyzed", + "omit_norms": true, + "index_options": "docs" + }, + "tok": { + "type": "string", + "analyzer": "lowercase_words", + "include_in_all": false + } + } }, "Tags": { - "type": "string", - "index": "not_analyzed", - "omit_norms": true, - "index_options": "docs" + "type": "multi_field", + "fields": { + "Tags": { + "type": "string", + "index": "not_analyzed", + "omit_norms": true, + "index_options": "docs" + }, + "tok": { + "type": "string", + "analyzer": "lowercase_words", + "include_in_all": false + } + } } } }, @@ -296,10 +339,20 @@ "index": "not_analyzed" }, "Tags": { - "type": "string", - "index": "not_analyzed", - "omit_norms": true, - "index_options": "docs" + "type": "multi_field", + "fields": { + "Tags": { + "type": "string", + "index": "not_analyzed", + "omit_norms": true, + "index_options": "docs" + }, + "tok": { + "type": "string", + "analyzer": "lowercase_words", + "include_in_all": false + } + } } } }, Binary files /tmp/tmpDKp0Le/llc_GcQn6Y/juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrationdump.4.4.3.zip and /tmp/tmpDKp0Le/DWZX6UDllP/juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrationdump.4.4.3.zip differ Binary files /tmp/tmpDKp0Le/llc_GcQn6Y/juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrationdump.4.5.6.zip and /tmp/tmpDKp0Le/DWZX6UDllP/juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrationdump.4.5.6.zip differ diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations.go 2016-10-13 14:32:25.000000000 +0000 @@ -4,13 +4,14 @@ package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( + "time" + "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" - "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" ) @@ -22,6 +23,11 @@ migrationAddPreV5CompatBlobBogus mongodoc.MigrationName = "add pre-v5 compatibility blobs" migrationAddPreV5CompatBlob mongodoc.MigrationName = "add pre-v5 compatibility blobs; second try" migrationNewChannelsModel mongodoc.MigrationName = "new channels model" + migrationStats mongodoc.MigrationName = "remove legacy download stats" + migrationEdgeEntities mongodoc.MigrationName = "rename development to edge in entities" + migrationEdgeBaseEntities mongodoc.MigrationName = "rename development to edge in base entities" + migrationPublishedEntities mongodoc.MigrationName = "include published status in a single entity field" + migrationCandidateBetaChannels mongodoc.MigrationName = "populate candidate and beta channel ACLs" ) // migrations holds all the migration functions that are executed in the order @@ -59,11 +65,23 @@ // fixed version. name: migrationAddPreV5CompatBlobBogus, }, { - name: migrationAddPreV5CompatBlob, - migrate: addPreV5CompatBlob, + name: migrationAddPreV5CompatBlob, +}, { + name: migrationNewChannelsModel, +}, { + name: migrationStats, +}, { + name: migrationEdgeEntities, + migrate: migrateEdgeEntities, +}, { + name: migrationEdgeBaseEntities, + migrate: migrateEdgeBaseEntities, }, { - name: migrationNewChannelsModel, - migrate: migrateToNewChannelsModel, + name: migrationPublishedEntities, + migrate: migratePublishedEntities, +}, { + name: migrationCandidateBetaChannels, + migrate: migrateCandidateBetaChannels, }} // migration holds a migration function with its corresponding name. @@ -74,6 +92,11 @@ // Migrate starts the migration process using the given database. func migrate(db StoreDatabase) error { + db = db.copy() + defer db.Close() + db.Session.SetSocketTimeout(10 * time.Minute) + // Set the socket timeout back to the default value of one minute. + defer db.Session.SetSocketTimeout(1 * time.Minute) // Retrieve already executed migrations. executed, err := getExecuted(db) if err != nil { @@ -132,87 +155,73 @@ return executed, nil } -func addPreV5CompatBlob(db StoreDatabase) error { - blobStore := blobstore.New(db.Database, "entitystore") - entities := db.Entities() - iter := entities.Find(nil).Select(map[string]int{ - "size": 1, - "blobhash": 1, - "blobname": 1, - "blobhash256": 1, - "charmmeta.series": 1, - }).Iter() - var entity mongodoc.Entity - for iter.Next(&entity) { - var info *preV5CompatibilityHackBlobInfo - - if entity.CharmMeta == nil || len(entity.CharmMeta.Series) == 0 { - info = &preV5CompatibilityHackBlobInfo{ - hash: entity.BlobHash, - hash256: entity.BlobHash256, - size: entity.Size, - } - } else { - r, _, err := blobStore.Open(entity.BlobName) - if err != nil { - return errgo.Notef(err, "cannot open original blob") - } - info, err = addPreV5CompatibilityHackBlob(blobStore, r, entity.BlobName, entity.Size) - r.Close() - if err != nil { - return errgo.Mask(err) - } - } - err := entities.UpdateId(entity.URL, bson.D{{ - "$set", bson.D{{ - "prev5blobhash", info.hash, - }, { - "prev5blobhash256", info.hash256, - }, { - "prev5blobsize", info.size, - }}, - }}) - if err != nil { - return errgo.Notef(err, "cannot update pre-v5 info") - } - } - if err := iter.Err(); err != nil { - return errgo.Notef(err, "cannot iterate through entities") +func setExecuted(db StoreDatabase, name mongodoc.MigrationName) error { + if _, err := db.Migrations().Upsert(nil, bson.D{{ + "$addToSet", bson.D{{"executed", name}}, + }}); err != nil { + return errgo.Notef(err, "cannot add %s to executed migrations", name) } return nil } -func migrateToNewChannelsModel(db StoreDatabase) error { - if err := ncmUpdateDevelopmentAndStable(db); err != nil { - return errgo.Mask(err) +// migrateEdgeEntities renames the "development" entity field to "edge". +func migrateEdgeEntities(db StoreDatabase) error { + if _, err := db.Entities().UpdateAll(nil, bson.D{{ + "$rename", bson.D{{"development", "edge"}}, + }}); err != nil { + return errgo.Notef(err, "cannot rename development field in entities") } - if err := ncmUpdateBaseEntities(db); err != nil { - return errgo.Mask(err) + return nil +} + +// migrateEdgeBaseEntities renames all "development" keys in base entity +// embedded documents to "edge". +func migrateEdgeBaseEntities(db StoreDatabase) error { + if _, err := db.BaseEntities().UpdateAll(nil, bson.D{{ + "$rename", bson.D{ + {"channelacls.development", "channelacls.edge"}, + {"channelentities.development", "channelentities.edge"}, + {"channelresources.development", "channelresources.edge"}, + }, + }}); err != nil { + return errgo.Notef(err, "cannot rename development keys in base entities") } return nil } -// ncmUpdateDevelopmentAndStable updates the Development and Stable -// entity fields to conform to the new channels model. -// All entities are treated as if they're in development; entities -// without the development field set are treated as stable. -func ncmUpdateDevelopmentAndStable(db StoreDatabase) error { +type preMigratePublishedEntitiesEntity struct { + URL *charm.URL `bson:"_id"` + Stable, Edge bool +} + +// migratePublishedEntities deletes the "edge" and "stable" boolean fields in +// the entity document and replaces them with a single "published" map. +func migratePublishedEntities(db StoreDatabase) error { entities := db.Entities() iter := entities.Find(bson.D{{ - "stable", bson.D{{"$exists", false}}, + // Assume that if an entity has the "stable" field, it also has the + // "edge" one and it hasn't been migrated yet. + "stable", bson.D{{"$exists", true}}, }}).Select(map[string]int{ - "_id": 1, - "development": 1, + "stable": 1, + "edge": 1, }).Iter() - // For every entity without a stable field, update - // its development and stable fields appropriately. - var entity mongodoc.Entity + // For every resulting entity populate the "published" field and then + // remove "stable" and "edge" ones. + var entity preMigratePublishedEntitiesEntity for iter.Next(&entity) { err := entities.UpdateId(entity.URL, bson.D{{ "$set", bson.D{ - {"development", true}, - {"stable", !entity.Development}, + {"published", map[params.Channel]bool{ + params.StableChannel: entity.Stable, + params.EdgeChannel: entity.Edge, + }}, + }, + }, { + "$unset", bson.D{ + {"stable", ""}, + {"edge", ""}, }, }}) if err != nil { @@ -225,58 +234,30 @@ return nil } -// preNCMBaseEntity holds the type of a base entity just before -// the new channels model migration. -type preNCMBaseEntity struct { - // URL holds the reference URL of of charm on bundle - // regardless of its revision, series or promulgation status - // (this omits the revision and series from URL). - // e.g., cs:~user/collection/foo - URL *charm.URL `bson:"_id"` - - // User holds the user part of the entity URL (for instance, "joe"). - User string - - // Name holds the name of the entity (for instance "wordpress"). - Name string - - // Public specifies whether the charm or bundle - // is available to all users. If this is true, the ACLs will - // be ignored when reading a charm. - Public bool - - // ACLs holds permission information relevant to the base entity. - // The permissions apply to all revisions. - ACLs mongodoc.ACL - - // DevelopmentACLs is similar to ACLs but applies to all development - // revisions. - DevelopmentACLs mongodoc.ACL - - // Promulgated specifies whether the charm or bundle should be - // promulgated. - Promulgated mongodoc.IntBool - - // CommonInfo holds arbitrary common extra metadata associated with - // the base entity. Thhose data apply to all revisions. - // The byte slices hold JSON-encoded data. - CommonInfo map[string][]byte `bson:",omitempty" json:",omitempty"` -} - -// ncmUpdateBaseEntities updates all the base entities to conform to -// the new channels model. It assumes that ncmUpdateDevelopmentAndStable -// has been run already. -func ncmUpdateBaseEntities(db StoreDatabase) error { +// migrateCandidateBetaChannels populates base entity ACLs for the candidate +// and beta channels. +func migrateCandidateBetaChannels(db StoreDatabase) error { baseEntities := db.BaseEntities() iter := baseEntities.Find(bson.D{{ - "channelentities", bson.D{{"$exists", false}}, - }}).Iter() - // For every base entity without a ChannelEntities field, update - // its ChannelEntities and and ChannelACLs field appropriately. - var baseEntity preNCMBaseEntity + // Assume that, if a base entity does not have the "channelacls.beta" + // field, then the "channelacls.candidate" one is also missing and the + // document must be migrated. + "channelacls.beta", bson.D{{"$exists", false}}, + }}).Select(map[string]int{"channelacls": 1}).Iter() + + // For every resulting base entity populate "channelacls.beta" and + // "channelacls.candidate" with contents from "channelacls.unpublished". + var baseEntity mongodoc.BaseEntity for iter.Next(&baseEntity) { - if err := ncmUpdateBaseEntity(db, &baseEntity); err != nil { - return errgo.Mask(err) + acls := baseEntity.ChannelACLs[params.UnpublishedChannel] + err := baseEntities.UpdateId(baseEntity.URL, bson.D{{ + "$set", bson.D{ + {"channelacls.candidate", acls}, + {"channelacls.beta", acls}, + }, + }}) + if err != nil { + return errgo.Notef(err, "cannot update base entity") } } if err := iter.Err(); err != nil { @@ -284,77 +265,3 @@ } return nil } - -// ncmUpdateBaseEntity updates a single base entity to conform to -// the new channels model. -func ncmUpdateBaseEntity(db StoreDatabase, baseEntity *preNCMBaseEntity) error { - channelEntities := make(map[params.Channel]map[string]*charm.URL) - - updateChannelURL := func(url *charm.URL, ch params.Channel, series string) { - if channelEntities[ch] == nil { - channelEntities[ch] = make(map[string]*charm.URL) - } - if oldURL := channelEntities[ch][series]; oldURL == nil || oldURL.Revision < url.Revision { - channelEntities[ch][series] = url - } - } - // updateChannelEntity updates the series entries in channelEntities - // for the given entity, setting the entity URL entry if the revision - // is greater than any already found. - updateChannelEntity := func(entity *mongodoc.Entity, ch params.Channel) { - if entity.URL.Series == "" { - for _, series := range entity.SupportedSeries { - updateChannelURL(entity.URL, ch, series) - } - } else { - updateChannelURL(entity.URL, ch, entity.URL.Series) - } - } - - // Iterate through all the entities associated with the base entity - // to find the most recent "published" entities so that we can - // populate the ChannelEntities field. - var entity mongodoc.Entity - iter := db.Entities().Find(bson.D{{"baseurl", baseEntity.URL}}).Iter() - for iter.Next(&entity) { - if entity.Development { - updateChannelEntity(&entity, params.DevelopmentChannel) - } - if entity.Stable { - updateChannelEntity(&entity, params.StableChannel) - } - } - if err := iter.Err(); err != nil { - return errgo.Notef(err, "cannot iterate through entities") - } - err := db.BaseEntities().UpdateId(baseEntity.URL, bson.D{{ - "$set", bson.D{{ - "channelentities", channelEntities, - }, { - "channelacls", map[params.Channel]mongodoc.ACL{ - params.UnpublishedChannel: baseEntity.DevelopmentACLs, - params.DevelopmentChannel: baseEntity.DevelopmentACLs, - params.StableChannel: baseEntity.ACLs, - }, - }}, - }, { - "$unset", bson.D{{ - "developmentacls", nil, - }, { - "acls", nil, - }}, - }}) - if err != nil { - return errgo.Notef(err, "cannot update base entity") - } - return nil -} - -func setExecuted(db StoreDatabase, name mongodoc.MigrationName) error { - if _, err := db.Migrations().Upsert(nil, bson.D{{ - "$addToSet", bson.D{{"executed", name}}, - }}); err != nil { - return errgo.Notef(err, "cannot add %s to executed migrations", name) - } - return nil -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations_integration_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations_integration_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations_integration_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations_integration_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -23,7 +23,7 @@ var _ = gc.Suite(&migrationsIntegrationSuite{}) -const earliestDeployedVersion = "4.4.3" +const earliestDeployedVersion = "4.5.6" var dumpMigrationHistoryFlag = flag.Bool("dump-migration-history", false, "dump migration history to file") @@ -59,22 +59,22 @@ }, { id: "~charmers/bundle/promulgatedbundle-0", promulgatedId: "bundle/promulgatedbundle-0", - entity: storetesting.NewBundle(&charm.BundleData{ - Applications: map[string]*charm.ApplicationSpec{ - "promulgated": { - Charm: "promulgated", - }, - }, - }), + entity: storetesting.NewBlob([]storetesting.File{{ + Name: "README.md", + Data: []byte("something"), + }, { + Name: "bundle.yaml", + Data: []byte(`services: {promulgated: {charm: promulgated}}`), + }}), }, { id: "~charmers/bundle/nonpromulgatedbundle-0", - entity: storetesting.NewBundle(&charm.BundleData{ - Applications: map[string]*charm.ApplicationSpec{ - "promulgated": { - Charm: "promulgated", - }, - }, - }), + entity: storetesting.NewBlob([]storetesting.File{{ + Name: "README.md", + Data: []byte("something"), + }, { + Name: "bundle.yaml", + Data: []byte(`services: {promulgated: {charm: promulgated}}`), + }}), }}) if err != nil { return errgo.Mask(err) @@ -168,6 +168,107 @@ } return nil }, +}, { + // V5 API. + // Copy from extra-info/legacy-download-stats to Archive Downloads. + // Create Charm 1 with 3 revisions set extrainfo legacy download stats on number 3 + // Create Charm 2 with 3 revisions set extrainfo legacy download stats on number 2 + // Create Charm 3 with 1 revision set extrainfo legacy download stats on it + // Create Charm 4 with 3 revisions no extrainfo legacy download stats + // Create Charm 5 with 1 revisions no extrainfo legacy download stats + // Check the results in increase by 10 for all revision when legacy is set. + version: "4.5.3", + update: func(db *mgo.Database, csv *charmStoreVersion) error { + err := csv.Upload("v5", []uploadSpec{{ + id: "~charmers/trusty/legacystats-setonlast-0", + entity: storetesting.NewCharm(nil), + }, { + id: "~charmers/trusty/legacystats-setonlast-1", + entity: storetesting.NewCharm(nil), + }, { + id: "~charmers/trusty/legacystats-setonlast-2", + entity: storetesting.NewCharm(nil), + }, { + id: "~charmers/trusty/legacystats-setonsecond-0", + entity: storetesting.NewCharm(nil), + }, { + id: "~charmers/trusty/legacystats-setonsecond-1", + entity: storetesting.NewCharm(nil), + }, { + id: "~charmers/trusty/legacystats-setonsecond-2", + entity: storetesting.NewCharm(nil), + }, { + id: "~charmers/trusty/legacystats-setonfirst-0", + entity: storetesting.NewCharm(nil), + }, { + id: "~charmers/trusty/legacystats-setonfirst-1", + entity: storetesting.NewCharm(nil), + }, { + id: "~charmers/trusty/legacystats-setonfirst-2", + entity: storetesting.NewCharm(nil), + }, { + id: "~charmers/trusty/legacystats-2rev-notset-0", + entity: storetesting.NewCharm(nil), + }, { + id: "~charmers/trusty/legacystats-2rev-notset-1", + entity: storetesting.NewCharm(nil), + }, { + id: "~charmers/trusty/legacystats-1rev-notset-0", + entity: storetesting.NewCharm(nil), + }, { + id: "~someone/trusty/empty-metered-42", + entity: storetesting.NewCharm(nil).WithMetrics(&charm.Metrics{}), + }}) + + if err != nil { + return errgo.Mask(err) + } + + if err := csv.Put("/v5/~charmers/trusty/legacystats-setonlast-2/meta/extra-info/legacy-download-stats", 10); err != nil { + return errgo.Mask(err) + } + if err := csv.Put("/v5/~charmers/trusty/legacystats-setonsecond-1/meta/extra-info/legacy-download-stats", 100); err != nil { + return errgo.Mask(err) + } + if err := csv.Put("/v5/~charmers/trusty/legacystats-setonfirst-0/meta/extra-info/legacy-download-stats", 1000); err != nil { + return errgo.Mask(err) + } + return nil + }, +}, { + // V5 API. + // Rename the development channel to "edge", in both entities and base + // entities. + // Deletes the "edge" and "stable" boolean fields in the entity document + // and replace them with a single "published" map. + // Populate base entity ACLs for the candidate and beta channels. + version: "4.5.6", + update: func(db *mgo.Database, csv *charmStoreVersion) error { + err := csv.Upload("v5", []uploadSpec{{ + id: "~charmers/trusty/different-acls-0", + entity: storetesting.NewCharm(nil), + }}) + if err != nil { + return errgo.Mask(err) + } + url := charm.MustParseURL("~charmers/different-acls") + err = db.C("base_entities").UpdateId(url, bson.D{{ + "$set", bson.D{ + {"channelacls.unpublished", mongodoc.ACL{ + Read: []string{"everyone", "unpublished"}, + Write: []string{"everyone", "charmers", "unpublished"}, + }}, + {"channelacls.development", mongodoc.ACL{ + Read: []string{"everyone", "edge"}, + Write: []string{"everyone", "charmers", "edge"}, + }}, + }, + }}) + if err != nil { + return errgo.Notef(err, "cannot update ACLs for base entity %q", url) + } + return nil + }, }} var migrationFromDumpEntityTests = []struct { @@ -178,7 +279,7 @@ checkers: []entityChecker{ hasPromulgatedRevision(0), hasCompatibilityBlob(false), - isDevelopment(true), + isEdge(true), isStable(true), }, }, { @@ -186,7 +287,7 @@ checkers: []entityChecker{ hasPromulgatedRevision(1), hasCompatibilityBlob(false), - isDevelopment(true), + isEdge(true), isStable(false), }, }, { @@ -194,7 +295,7 @@ checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), - isDevelopment(true), + isEdge(true), isStable(true), }, }, { @@ -202,7 +303,7 @@ checkers: []entityChecker{ hasPromulgatedRevision(0), hasCompatibilityBlob(false), - isDevelopment(true), + isEdge(true), isStable(true), }, }, { @@ -210,7 +311,7 @@ checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), - isDevelopment(true), + isEdge(true), isStable(true), }, }, { @@ -218,7 +319,7 @@ checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(true), - isDevelopment(true), + isEdge(true), isStable(true), }, }, { @@ -226,7 +327,7 @@ checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(true), - isDevelopment(true), + isEdge(true), isStable(true), }, }, { @@ -234,7 +335,7 @@ checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), - isDevelopment(true), + isEdge(true), isStable(true), }, }, { @@ -242,7 +343,7 @@ checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), - isDevelopment(true), + isEdge(true), isStable(false), }, }, { @@ -250,7 +351,7 @@ checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), - isDevelopment(true), + isEdge(true), isStable(false), }, }, { @@ -258,8 +359,14 @@ checkers: []entityChecker{ hasPromulgatedRevision(-1), hasCompatibilityBlob(false), - isDevelopment(true), + isEdge(true), isStable(true), + hasMetrics(nil), + }, +}, { + id: "~someone/trusty/empty-metered-42", + checkers: []entityChecker{ + hasMetrics(nil), }, }} @@ -275,7 +382,15 @@ Read: []string{"charmers"}, Write: []string{"charmers"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, @@ -285,7 +400,7 @@ }, }), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "precise": charm.MustParseURL("~charmers/precise/promulgated-1"), }, params.StableChannel: { @@ -302,7 +417,15 @@ Read: []string{"bobgroup"}, Write: []string{"bob", "someoneelse"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { + Read: []string{"bobgroup"}, + Write: []string{"bob", "someoneelse"}, + }, + params.BetaChannel: { + Read: []string{"bobgroup"}, + Write: []string{"bob", "someoneelse"}, + }, + params.CandidateChannel: { Read: []string{"bobgroup"}, Write: []string{"bob", "someoneelse"}, }, @@ -312,7 +435,7 @@ }, }), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "trusty": charm.MustParseURL("~bob/trusty/nonpromulgated-0"), }, params.StableChannel: { @@ -326,7 +449,7 @@ isPromulgated(true), hasAllACLs("charmers"), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "bundle": charm.MustParseURL("~charmers/bundle/promulgatedbundle-0"), }, params.StableChannel: { @@ -340,7 +463,7 @@ isPromulgated(false), hasAllACLs("charmers"), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "bundle": charm.MustParseURL("~charmers/bundle/nonpromulgatedbundle-0"), }, params.StableChannel: { @@ -354,7 +477,7 @@ isPromulgated(false), hasAllACLs("charmers"), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "precise": charm.MustParseURL("~charmers/multiseries-1"), "trusty": charm.MustParseURL("~charmers/multiseries-1"), "utopic": charm.MustParseURL("~charmers/multiseries-0"), @@ -374,7 +497,7 @@ isPromulgated(false), hasAllACLs("someone"), hasChannelEntities(map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "precise": charm.MustParseURL("~someone/precise/southerncharm-3"), "trusty": charm.MustParseURL("~someone/trusty/southerncharm-6"), }, @@ -384,6 +507,32 @@ }, }), }, +}, { + id: "cs:~charmers/different-acls", + checkers: []baseEntityChecker{ + hasACLs(map[params.Channel]mongodoc.ACL{ + params.UnpublishedChannel: { + Read: []string{"everyone", "unpublished"}, + Write: []string{"everyone", "charmers", "unpublished"}, + }, + params.EdgeChannel: { + Read: []string{"everyone", "edge"}, + Write: []string{"everyone", "charmers", "edge"}, + }, + params.BetaChannel: { + Read: []string{"everyone", "unpublished"}, + Write: []string{"everyone", "charmers", "unpublished"}, + }, + params.CandidateChannel: { + Read: []string{"everyone", "unpublished"}, + Write: []string{"everyone", "charmers", "unpublished"}, + }, + params.StableChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + }), + }, }} func (s *migrationsIntegrationSuite) TestMigrationFromDump(c *gc.C) { @@ -539,18 +688,13 @@ } ce, err := store.FindEntity(MustParseResolvedURL(url.String()), nil) c.Assert(err, gc.IsNil) - switch ch { - case params.DevelopmentChannel: - c.Assert(ce.Development, gc.Equals, true) - case params.StableChannel: - c.Assert(ce.Stable, gc.Equals, true) - default: + if !params.ValidChannels[ch] { c.Fatalf("unknown channel %q found", ch) } + c.Assert(ce.Published[ch], gc.Equals, true) if series != "bundle" && !stringInSlice(series, ce.SupportedSeries) { c.Fatalf("series %q not found in supported series %q", series, ce.SupportedSeries) } - } } } @@ -588,15 +732,21 @@ } } -func isDevelopment(isDev bool) entityChecker { +func isEdge(isDev bool) entityChecker { return func(c *gc.C, entity *mongodoc.Entity) { - c.Assert(entity.Development, gc.Equals, isDev) + c.Assert(entity.Published[params.EdgeChannel], gc.Equals, isDev) } } func isStable(isStable bool) entityChecker { return func(c *gc.C, entity *mongodoc.Entity) { - c.Assert(entity.Stable, gc.Equals, isStable) + c.Assert(entity.Published[params.StableChannel], gc.Equals, isStable) + } +} + +func hasMetrics(metrics *charm.Metrics) entityChecker { + return func(c *gc.C, entity *mongodoc.Entity) { + c.Assert(entity.CharmMetrics, jc.DeepEquals, metrics) } } @@ -621,7 +771,9 @@ } return hasACLs(map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: userACL, - params.DevelopmentChannel: userACL, + params.EdgeChannel: userACL, + params.BetaChannel: userACL, + params.CandidateChannel: userACL, params.StableChannel: userACL, }) } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/resources_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/resources_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/resources_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/resources_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -292,10 +292,10 @@ channel: params.StableChannel, expectResource: 1, }, { - about: "revision specified on development channel", + about: "revision specified on edge channel", name: "someResource", revision: 2, - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectResource: 2, }, { about: "revision specified that doesn't exist", @@ -317,10 +317,10 @@ channel: params.StableChannel, expectResource: 0, }, { - about: "no revision specified on development channel", + about: "no revision specified on edge channel", name: "someResource", revision: -1, - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectResource: 1, }, { about: "no revision specified on unpublished channel", @@ -360,7 +360,7 @@ err = store.Publish(id, map[string]int{ "someResource": 1, - }, params.DevelopmentChannel) + }, params.EdgeChannel) c.Assert(err, gc.IsNil) for i, test := range resolveResourceTests { diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search.go 2016-10-13 14:32:25.000000000 +0000 @@ -155,28 +155,6 @@ return nil } -// UpdateSearchFields updates the search record for the entity reference r -// with the updated values in fields. -func (s *Store) UpdateSearchFields(r *router.ResolvedURL, fields map[string]interface{}) error { - if s.ES == nil || s.ES.Database == nil { - return nil - } - var needUpdate bool - for k := range fields { - // Add any additional fields here that should update the search index. - if k == "extrainfo.legacy-download-stats" { - needUpdate = true - } - } - if !needUpdate { - return nil - } - if err := s.UpdateSearch(r); err != nil { - return errgo.Mask(err) - } - return nil -} - // searchDocFromEntity performs the processing required to convert a // mongodoc.Entity and the corresponding mongodoc.BaseEntity to an esDoc // for indexing. @@ -543,31 +521,6 @@ Results []*mongodoc.Entity } -// queryFields provides a map of fields to weighting to use with the -// elasticsearch query. -func queryFields(sp SearchParams) map[string]float64 { - var fields map[string]float64 - if sp.AutoComplete { - fields = map[string]float64{ - "Name.ngrams": 10, - } - } else { - fields = map[string]float64{ - "Name": 10, - "User": 7, - "CharmMeta.Categories": 5, - "CharmMeta.Tags": 5, - "BundleData.Tags": 5, - "Series": 5, - "CharmProvidedInterfaces": 3, - "CharmRequiredInterfaces": 3, - "CharmMeta.Description": 1, - "BundleReadMe": 1, - } - } - return fields -} - // encodeFields takes a map of field name to weight and builds a slice of strings // representing those weighted fields for a MultiMatchQuery. func encodeFields(fields map[string]float64) []string { @@ -588,12 +541,23 @@ // Full text search var q elasticsearch.Query + nameField := "Name.tok" + if sp.AutoComplete { + nameField = "Name.ngrams" + } if sp.Text == "" { q = elasticsearch.MatchAllQuery{} } else { q = elasticsearch.MultiMatchQuery{ - Query: sp.Text, - Fields: encodeFields(queryFields(sp)), + Query: sp.Text, + Fields: encodeFields(map[string]float64{ + nameField: 10, + "User.tok": 7, + "CharmMeta.Categories.tok": 5, + "CharmMeta.Tags.tok": 5, + "BundleData.Tags.tok": 5, + }), + MinimumShouldMatch: "100%", } } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -30,17 +30,6 @@ func (s *StoreSearchSuite) SetUpTest(c *gc.C) { s.IsolatedMgoESSuite.SetUpTest(c) - - // Temporarily set LegacyDownloadCountsEnabled to false, so that the real - // code path can be reached by tests in this suite. - // TODO (frankban): remove this block when removing the legacy counts - // logic. - original := LegacyDownloadCountsEnabled - LegacyDownloadCountsEnabled = false - s.AddCleanup(func(*gc.C) { - LegacyDownloadCountsEnabled = original - }) - s.index = SearchIndex{s.ES, s.TestIndex} s.ES.RefreshIndex(".versions") pool, err := NewPool(s.Session.DB("foo"), &s.index, nil, ServerParams{}) @@ -88,28 +77,28 @@ } var searchEntities = map[string]searchEntity{ - "wordpress": searchEntity{ + "wordpress": { entity: newEntity("cs:~charmers/precise/wordpress-23", 23), charmMeta: &charm.Meta{ Description: "blog", Requires: map[string]charm.Relation{ - "mysql": charm.Relation{ + "mysql": { Name: "mysql", Interface: "mysql", Scope: charm.ScopeGlobal, }, }, - Categories: []string{"wordpress"}, + Categories: []string{"wordpress", "wordpressCAT"}, Tags: []string{"wordpressTAG"}, }, acl: []string{params.Everyone}, }, - "mysql": searchEntity{ + "mysql": { entity: newEntity("cs:~openstack-charmers/xenial/mysql-7", 7), charmMeta: &charm.Meta{ Summary: "Database Engine", Provides: map[string]charm.Relation{ - "mysql": charm.Relation{ + "mysql": { Name: "mysql", Interface: "mysql", Scope: charm.ScopeGlobal, @@ -121,7 +110,7 @@ acl: []string{params.Everyone}, downloads: 3, }, - "varnish": searchEntity{ + "varnish": { entity: newEntity("cs:~foo/xenial/varnish-1", -1), charmMeta: &charm.Meta{ Summary: "Database Engine", @@ -131,7 +120,7 @@ acl: []string{params.Everyone}, downloads: 5, }, - "riak": searchEntity{ + "riak": { entity: newEntity("cs:~charmers/xenial/riak-67", 67), charmMeta: &charm.Meta{ Categories: []string{"riak"}, @@ -139,11 +128,11 @@ }, acl: []string{"charmers"}, }, - "wordpress-simple": searchEntity{ + "wordpress-simple": { entity: newEntity("cs:~charmers/bundle/wordpress-simple-4", 4), bundleData: &charm.BundleData{ Applications: map[string]*charm.ApplicationSpec{ - "wordpress": &charm.ApplicationSpec{ + "wordpress": { Charm: "wordpress", }, }, @@ -153,7 +142,7 @@ downloads: 1, }, // Note: "squid-forwardproxy" shares a trigram "dpr" with "wordpress". - "squid-forwardproxy": searchEntity{ + "squid-forwardproxy": { entity: newEntity("cs:~charmers/wily/squid-forwardproxy-3", 3), charmMeta: &charm.Meta{}, acl: []string{params.Everyone}, @@ -161,7 +150,7 @@ }, // Note: "cloud-controller-worker-v2" shares a trigram "wor" with "wordpress". - "cloud-controller-worker-v2": searchEntity{ + "cloud-controller-worker-v2": { entity: newEntity("cs:~cf-charmers/trusty/cloud-controller-worker-v2-7", -1), charmMeta: &charm.Meta{}, acl: []string{params.Everyone}, @@ -381,6 +370,26 @@ searchEntities["wordpress-simple"].entity, }, }, { + about: "autocomplete case insensitive", + sp: SearchParams{ + Text: "woRd", + AutoComplete: true, + }, + results: Entities{ + searchEntities["wordpress"].entity, + searchEntities["wordpress-simple"].entity, + }, + }, { + about: "autocomplete end of word", + sp: SearchParams{ + Text: "PRESS", + AutoComplete: true, + }, + results: Entities{ + searchEntities["wordpress"].entity, + searchEntities["wordpress-simple"].entity, + }, + }, { about: "non-matching autocomplete search", sp: SearchParams{ Text: "worm", @@ -388,6 +397,22 @@ }, results: Entities{}, }, { + about: "autocomplete with hyphen - match", + sp: SearchParams{ + Text: "squid-f", + AutoComplete: true, + }, + results: Entities{ + searchEntities["squid-forwardproxy"].entity, + }, + }, { + about: "autocomplete with hyphen - no match", + sp: SearchParams{ + Text: "squid-g", + AutoComplete: true, + }, + results: Entities{}, + }, { about: "description filter search", sp: SearchParams{ Text: "", @@ -652,6 +677,58 @@ results: Entities{ searchEntities["mysql"].entity, }, + }, { + about: "name search", + sp: SearchParams{ + Text: "wordpress", + }, + results: Entities{ + searchEntities["wordpress"].entity, + searchEntities["wordpress-simple"].entity, + }, + }, { + about: "case insensitive search", + sp: SearchParams{ + Text: "WORDPRESS", + }, + results: Entities{ + searchEntities["wordpress"].entity, + searchEntities["wordpress-simple"].entity, + }, + }, { + about: "case insensitive search on tags", + sp: SearchParams{ + Text: "WORDPRESSTAG", + }, + results: Entities{ + searchEntities["wordpress"].entity, + }, + }, { + about: "case insensitive search on categories", + sp: SearchParams{ + Text: "WORDPRESSCAT", + }, + results: Entities{ + searchEntities["wordpress"].entity, + }, + }, { + about: "autocomplete with spaces", + sp: SearchParams{ + Text: "wordpress simple", + AutoComplete: true, + }, + results: Entities{ + searchEntities["wordpress-simple"].entity, + }, + }, { + about: "autocomplete with spaces, reversed", + sp: SearchParams{ + Text: "simple wordpress", + AutoComplete: true, + }, + results: Entities{ + searchEntities["wordpress-simple"].entity, + }, }, } @@ -1020,8 +1097,8 @@ c.Logf("%d. %s", i, test.series) res, err := s.store.Search(SearchParams{ Filters: map[string][]string{ - "name": []string{"juju-gui"}, - "series": []string{test.series}, + "name": {"juju-gui"}, + "series": {test.series}, }, }) c.Assert(err, gc.IsNil) @@ -1070,7 +1147,7 @@ c.Assert(err, gc.IsNil) err = s.store.SetPerms(&id.URL, "read", "test", params.Everyone) c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&id.URL, "development.read", "test", params.Everyone) + err = s.store.SetPerms(&id.URL, "edge.read", "test", params.Everyone) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&id.URL, "stable.read", "test", params.Everyone) c.Assert(err, gc.IsNil) @@ -1082,7 +1159,7 @@ err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(&id.URL), &actual) c.Assert(err, gc.ErrorMatches, "elasticsearch document not found") - err = s.store.Publish(id, nil, params.DevelopmentChannel) + err = s.store.Publish(id, nil, params.EdgeChannel) c.Assert(err, gc.IsNil) err = s.store.UpdateSearch(id) c.Assert(err, gc.IsNil) diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server.go 2016-10-13 14:32:25.000000000 +0000 @@ -1,4 +1,4 @@ -// Copyright 2014 Canonical Ltd. +// Copyright 2014-2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // This is the internal version of the charmstore package. @@ -11,8 +11,10 @@ "strings" "time" + "github.com/prometheus/client_golang/prometheus" "gopkg.in/errgo.v1" "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/mgostorage" "gopkg.in/mgo.v2" "gopkg.in/natefinch/lumberjack.v2" @@ -83,8 +85,14 @@ // AuditLogger optionally holds the logger which will be used to // write audit log entries. AuditLogger *lumberjack.Logger + + // RootKeyPolicy holds the default policy used when creating + // macaroon root keys. + RootKeyPolicy mgostorage.Policy } +const defaultRootKeyExpiryDuration = 24 * time.Hour + // NewServer returns a handler that serves the given charm store API // versions using db to store that charm store data. // An optional elasticsearch configuration can be specified in si. If @@ -115,6 +123,9 @@ Location: "charmstore", Locator: config.PublicKeyLocator, } + if config.RootKeyPolicy.ExpiryDuration == 0 { + config.RootKeyPolicy.ExpiryDuration = defaultRootKeyExpiryDuration + } pool, err := NewPool(db, si, &bparams, config) if err != nil { return nil, errgo.Notef(err, "cannot make store") @@ -136,6 +147,7 @@ } // Version independent API. handle(srv.mux, "/debug", newServiceDebugHandler(pool, config, srv.mux)) + handle(srv.mux, "/metrics", prometheus.Handler()) for vers, newAPI := range versions { root := "/" + vers h := newAPI(pool, config, root) diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -8,6 +8,7 @@ "github.com/juju/testing/httptesting" gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/bakery/mgostorage" "gopkg.in/juju/charmstore.v5-unstable/internal/router" "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" @@ -122,6 +123,9 @@ AuthPassword: "test-password", IdentityAPIURL: "http://0.1.2.3", IdentityLocation: "http://0.1.2.3", + RootKeyPolicy: mgostorage.Policy{ + ExpiryDuration: defaultRootKeyExpiryDuration, + }, }, }) } @@ -153,6 +157,9 @@ AuthPassword: "test-password", IdentityAPIURL: "http://0.1.2.3", IdentityLocation: "http://0.1.2.3", + RootKeyPolicy: mgostorage.Policy{ + ExpiryDuration: defaultRootKeyExpiryDuration, + }, }, }) } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats.go 2016-10-13 14:32:25.000000000 +0000 @@ -4,7 +4,6 @@ package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( - "encoding/json" "fmt" "sort" "strconv" @@ -471,20 +470,6 @@ LastDay, LastWeek, LastMonth, Total int64 } -// LegacyDownloadCountsEnabled represents whether aggregated download counts -// must be retrieved from the legacy infrastructure. In essence, if the value -// is true (enabled), aggregated counts are not calculated based on the data -// stored in the charm store stats; they are instead retrieved from the entity -// extra-info. For this reason, enabling this we assume an external program -// updated the extra-info for the entity, specifically the -// "legacy-download-stats" key. -// TODO (frankban): this is a temporary hack, and can be removed once we have -// a more consistent way to import the download counts from the legacy charm -// store (charms) and from charmworld (bundles). To remove the legacy download -// counts logic in the future, grep the code for "LegacyDownloadCountsEnabled" -// and remove as required. -var LegacyDownloadCountsEnabled = true - // ArchiveDownloadCounts calculates the aggregated download counts for // a charm or bundle. func (s *Store) ArchiveDownloadCounts(id *charm.URL, refresh bool) (thisRevision, allRevisions AggregatedCounts, err error) { @@ -528,40 +513,6 @@ if err != nil { return nil, errgo.Notef(err, "cannot get aggregated count for %q", id) } - if !LegacyDownloadCountsEnabled { - return counts, nil - } - // TODO (frankban): remove this code when removing the legacy counts logic. - legacy, err := s.legacyDownloadCounts(id) - if err != nil { - return nil, err - } - counts.LastDay += legacy.LastDay - counts.LastWeek += legacy.LastWeek - counts.LastMonth += legacy.LastMonth - counts.Total += legacy.Total - return counts, nil -} - -// legacyDownloadCounts retrieves the aggregated stats from the entity -// extra-info. This is used when LegacyDownloadCountsEnabled is true. -// TODO (frankban): remove this method when removing the legacy counts logic. -func (s *Store) legacyDownloadCounts(id *charm.URL) (AggregatedCounts, error) { - counts := AggregatedCounts{} - entities, err := s.FindEntities(id, FieldSelector("extrainfo")) - if err != nil { - return counts, errgo.Mask(err, errgo.Is(params.ErrNotFound)) - } - if len(entities) == 0 { - return counts, errgo.WithCausef(nil, params.ErrNotFound, "entity not found") - } - entity := entities[0] - data, ok := entity.ExtraInfo[params.LegacyDownloadStats] - if ok { - if err := json.Unmarshal(data, &counts.Total); err != nil { - return counts, errgo.Notef(err, "cannot unmarshal extra-info value") - } - } return counts, nil } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -4,7 +4,6 @@ package charmstore_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" import ( - "fmt" "strconv" "sync" "time" @@ -432,12 +431,11 @@ } type testStatsEntity struct { - id *router.ResolvedURL - lastDay int - lastWeek int - lastMonth int - total int - legacyTotal int + id *router.ResolvedURL + lastDay int + lastWeek int + lastMonth int + total int } var archiveDownloadCountsTests = []struct { @@ -449,12 +447,11 @@ }{{ about: "single revision", charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 0, + id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), + lastDay: 1, + lastWeek: 2, + lastMonth: 3, + total: 4, }}, id: charm.MustParseURL("~charmers/trusty/wordpress-0"), expectThisRevision: charmstore.AggregatedCounts{ @@ -470,44 +467,19 @@ Total: 10, }, }, { - about: "single revision with legacy count", - charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 10, - }}, - id: charm.MustParseURL("~charmers/trusty/wordpress-0"), - expectThisRevision: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 20, - }, - expectAllRevisions: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 20, - }, -}, { about: "multiple revisions", charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 0, + id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), + lastDay: 1, + lastWeek: 2, + lastMonth: 3, + total: 4, }, { - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), - lastDay: 2, - lastWeek: 3, - lastMonth: 4, - total: 5, - legacyTotal: 0, + id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), + lastDay: 2, + lastWeek: 3, + lastMonth: 4, + total: 5, }}, id: charm.MustParseURL("~charmers/trusty/wordpress-1"), expectThisRevision: charmstore.AggregatedCounts{ @@ -523,44 +495,13 @@ Total: 24, }, }, { - about: "multiple revisions with legacy count", - charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 100, - }, { - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), - lastDay: 2, - lastWeek: 3, - lastMonth: 4, - total: 5, - legacyTotal: 100, - }}, - id: charm.MustParseURL("~charmers/trusty/wordpress-1"), - expectThisRevision: charmstore.AggregatedCounts{ - LastDay: 2, - LastWeek: 5, - LastMonth: 9, - Total: 114, - }, - expectAllRevisions: charmstore.AggregatedCounts{ - LastDay: 3, - LastWeek: 8, - LastMonth: 15, - Total: 124, - }, -}, { about: "promulgated revision", charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 0, + id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), + lastDay: 1, + lastWeek: 2, + lastMonth: 3, + total: 4, }}, id: charm.MustParseURL("trusty/wordpress-0"), expectThisRevision: charmstore.AggregatedCounts{ @@ -576,58 +517,31 @@ Total: 10, }, }, { - about: "promulgated revision with legacy count", - charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 10, - }}, - id: charm.MustParseURL("trusty/wordpress-0"), - expectThisRevision: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 20, - }, - expectAllRevisions: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 20, - }, -}, { about: "promulgated revision with changed owner", charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 10, - lastMonth: 100, - total: 1000, - legacyTotal: 0, + id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), + lastDay: 1, + lastWeek: 10, + lastMonth: 100, + total: 1000, }, { - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), - lastDay: 2, - lastWeek: 20, - lastMonth: 200, - total: 2000, - legacyTotal: 0, + id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), + lastDay: 2, + lastWeek: 20, + lastMonth: 200, + total: 2000, }, { - id: charmstore.MustParseResolvedURL("~wordpress-charmers/trusty/wordpress-0"), - lastDay: 3, - lastWeek: 30, - lastMonth: 300, - total: 3000, - legacyTotal: 0, + id: charmstore.MustParseResolvedURL("~wordpress-charmers/trusty/wordpress-0"), + lastDay: 3, + lastWeek: 30, + lastMonth: 300, + total: 3000, }, { - id: charmstore.MustParseResolvedURL("1 ~wordpress-charmers/trusty/wordpress-1"), - lastDay: 4, - lastWeek: 40, - lastMonth: 400, - total: 4000, - legacyTotal: 0, + id: charmstore.MustParseResolvedURL("1 ~wordpress-charmers/trusty/wordpress-1"), + lastDay: 4, + lastWeek: 40, + lastMonth: 400, + total: 4000, }}, id: charm.MustParseURL("trusty/wordpress-1"), expectThisRevision: charmstore.AggregatedCounts{ @@ -645,7 +559,6 @@ }} func (s *StatsSuite) TestArchiveDownloadCounts(c *gc.C) { - s.PatchValue(&charmstore.LegacyDownloadCountsEnabled, true) for i, test := range archiveDownloadCountsTests { c.Logf("%d: %s", i, test.about) // Clear everything @@ -670,13 +583,6 @@ setDownloadCounts(c, s.store, &url, now.Add(-10*24*time.Hour), charm.lastMonth) setDownloadCounts(c, s.store, &url, now.Add(-100*24*time.Hour), charm.total) } - extraInfo := map[string][]byte{ - params.LegacyDownloadStats: []byte(fmt.Sprintf("%d", charm.legacyTotal)), - } - err = s.store.UpdateEntity(charm.id, bson.D{{ - "$set", bson.D{{"extrainfo", extraInfo}}, - }}) - c.Assert(err, gc.IsNil) } thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(test.id, true) c.Assert(err, gc.IsNil) diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store.go 2016-10-13 14:32:25.000000000 +0000 @@ -42,11 +42,11 @@ // from the pool that can be used to process short-lived requests // to access and modify the store. type Pool struct { - db StoreDatabase - es *SearchIndex - bakeryParams *bakery.NewServiceParams - stats stats - run *parallel.Run + db StoreDatabase + es *SearchIndex + bakery *bakery.Service + stats stats + run *parallel.Run // statsCache holds a cache of AggregatedCounts // values, keyed by entity id. When the id has no @@ -72,6 +72,9 @@ // closed holds whether the handler has been closed. closed bool + + // rootKeys holds the cache of macaroon root keys. + rootKeys *mgostorage.RootKeys } // reqStoreCacheSize holds the maximum number of store @@ -102,6 +105,7 @@ config: config, run: parallel.NewRun(maxAsyncGoroutines), auditLogger: config.AuditLogger, + rootKeys: mgostorage.NewRootKeys(100), } if config.MaxMgoSessions > 0 { p.reqStoreC = make(chan *Store, config.MaxMgoSessions) @@ -109,23 +113,11 @@ p.reqStoreC = make(chan *Store, reqStoreCacheSize) } if bakeryParams != nil { - bp := *bakeryParams - // Fill out any bakery parameters explicitly here so - // that we use the same values when each Store is - // created. We don't fill out bp.Store field though, as - // that needs to hold the correct mongo session which we - // only know when the Store is created from the Pool. - if bp.Key == nil { - var err error - bp.Key, err = bakery.GenerateKey() - if err != nil { - return nil, errgo.Notef(err, "cannot generate bakery key") - } - } - if bp.Locator == nil { - bp.Locator = bakery.PublicKeyLocatorMap(nil) + bakerySvc, err := bakery.NewService(*bakeryParams) + if err != nil { + return nil, errgo.Notef(err, "cannot create bakery service") } - p.bakeryParams = &bp + p.bakery = bakerySvc } if config.AuditLogger != nil { @@ -231,12 +223,21 @@ stats: &p.stats, pool: p, } - if p.bakeryParams != nil { - store.Bakery = newBakery(db, *p.bakeryParams) - } + store.Bakery = store.BakeryWithPolicy(p.config.RootKeyPolicy) return store, nil } +// BakeryWithPolicy returns a copy of the Store's Bakery with a macaroon +// storage that returns root keys conforming to the given policy. +// +// If there is no configured bakery, it returns nil. +func (s *Store) BakeryWithPolicy(policy mgostorage.Policy) *bakery.Service { + if s.pool.bakery == nil { + return nil + } + return s.pool.bakery.WithRootKeyStore(s.pool.rootKeys.NewStorage(s.DB.Macaroons(), policy)) +} + func newBakery(db StoreDatabase, bp bakery.NewServiceParams) *bakery.Service { macStore, err := mgostorage.New(db.Macaroons()) if err != nil { @@ -274,9 +275,7 @@ s1 := *s s1.DB = s.DB.clone() s1.BlobStore = blobstore.New(s1.DB.Database, "entitystore") - if s.Bakery != nil { - s1.Bakery = newBakery(s1.DB, *s.pool.bakeryParams) - } + s1.Bakery = s1.BakeryWithPolicy(s.pool.config.RootKeyPolicy) s.pool.mu.Lock() s.pool.storeCount++ @@ -391,10 +390,10 @@ mgo.Index{Key: []string{"bundlecharms"}}, }, { s.DB.Entities(), - mgo.Index{Key: []string{"name", "development", "-promulgated-revision", "-supportedseries"}}, + mgo.Index{Key: []string{"name", "published", "-promulgated-revision", "-supportedseries"}}, }, { s.DB.Entities(), - mgo.Index{Key: []string{"name", "development", "user", "-revision", "-supportedseries"}}, + mgo.Index{Key: []string{"name", "published", "user", "-revision", "-supportedseries"}}, }, { s.DB.BaseEntities(), mgo.Index{Key: []string{"name"}}, @@ -415,6 +414,9 @@ return errgo.Notef(err, "cannot ensure index with keys %v on collection %s", idx.i, idx.c.Name) } } + if err := s.pool.rootKeys.EnsureIndex(s.DB.Macaroons()); err != nil { + return errgo.Notef(err, "cannot ensure root keys index") + } return nil } @@ -494,8 +496,7 @@ "promulgated-revision": 1, "series": 1, "revision": 1, - "development": 1, - "stable": 1, + "published": 1, } for f := range fields { nfields[f] = 1 @@ -513,15 +514,8 @@ // If a channel was specified make sure the entity is in that channel. // This is crucial because if we don't do this, then the user could choose // to use any chosen set of ACLs against any entity. - switch channel { - case params.StableChannel: - if !entity.Stable { - return nil, errgo.WithCausef(nil, params.ErrNotFound, "%s not found in stable channel", url) - } - case params.DevelopmentChannel: - if !entity.Development { - return nil, errgo.WithCausef(nil, params.ErrNotFound, "%s not found in development channel", url) - } + if params.ValidChannels[channel] && channel != params.UnpublishedChannel && !entity.Published[channel] { + return nil, errgo.WithCausef(nil, params.ErrNotFound, "%s not found in %s channel", url, channel) } return entity, nil } @@ -571,9 +565,26 @@ } var entityURL *charm.URL if url.Series == "" { - for _, u := range baseEntity.ChannelEntities[ch] { - if entityURL == nil || seriesScore[u.Series] > seriesScore[entityURL.Series] { + var entitySeries string + for s, u := range baseEntity.ChannelEntities[ch] { + // Determine the preferred URL from the available series. + // + // Note that because each of the series has a different + // score the only situation where the score in the URL is + // where there is more than one series supported by a + // multi-series charm. In this case the tie is broken by + // looking for the preferred series from the ones + // supported by the charm. To save fetching every charm + // to look at the supported series the key is used, + // because when a charm is listed as the published + // version for a series it must support that series. + if entityURL == nil || + seriesScore[u.Series] > seriesScore[entityURL.Series] || + // Note that if the two series are the same, they must both be + // multi-series URLs. + seriesScore[u.Series] == seriesScore[entityURL.Series] && seriesScore[s] > seriesScore[entitySeries] { entityURL = u + entitySeries = s } } } else { @@ -641,11 +652,9 @@ var seriesBundleOrEmpty = bson.D{{"$or", []bson.D{{{"series", "bundle"}}, {{"series", ""}}}}} -// EntitiesQuery creates a mgo.Query object that can be used to find -// entities matching the given URL. If the given URL has no user then -// the produced query will only match promulgated entities. If the given URL -// channel is not "development" then the produced query will only match -// published entities. +// EntitiesQuery creates a mgo.Query object that can be used to find entities +// matching the given URL. If the given URL has no user then the produced query +// will only match promulgated entities. func (s *Store) EntitiesQuery(url *charm.URL) *mgo.Query { entities := s.DB.Entities() query := make(bson.D, 1, 5) @@ -749,8 +758,9 @@ var ErrPublishResourceMismatch = errgo.Newf("charm published with incorrect resources") // Publish assigns channels to the entity corresponding to the given URL. -// An error is returned if no channels are provided. For the time being, -// the only supported channels are "development" and "stable". +// An error is returned if no channels are provided. See params.ValidChannels +// for the list of supported channels. The unpublished channel cannot +// be provided. // // If the given resources do not match those expected or they're not // found, an error with a ErrPublichResourceMismatch cause will be returned. @@ -759,12 +769,12 @@ // Throw away any channels that we don't like. actualChannels := make([]params.Channel, 0, len(channels)) for _, c := range channels { - switch c { - case params.StableChannel: + if !params.ValidChannels[c] || c == params.UnpublishedChannel { + continue + } + actualChannels = append(actualChannels, c) + if c == params.StableChannel { updateSearch = true - fallthrough - case params.DevelopmentChannel: - actualChannels = append(actualChannels, c) } } channels = actualChannels @@ -793,7 +803,7 @@ // Update the entity's published channels. update := make(bson.D, 0, len(channels)*(len(series)+1)) // ...ish. for _, c := range channels { - update = append(update, bson.DocElem{string(c), true}) + update = append(update, bson.DocElem{"published." + string(c), true}) } if err := s.UpdateEntity(url, bson.D{{"$set", update}}); err != nil { return errgo.Mask(err, errgo.Is(params.ErrNotFound)) @@ -807,7 +817,6 @@ } update = append(update, bson.DocElem{fmt.Sprintf("channelresources.%s", c), resourceDocs}) } - if err := s.UpdateBaseEntity(url, bson.D{{"$set", update}}); err != nil { return errgo.Mask(err) } @@ -1050,11 +1059,11 @@ } // SetPerms sets the ACL specified by which for the base entity with the -// given id. The which parameter is in the form "[channel].operation", -// where channel, if specified, is one of "development" or "stable" and -// operation is one of "read" or "write". If which does not specify a -// channel then the unpublished ACL is updated. This is only provided for -// testing. +// given id. The which parameter is in the form "channel.operation", +// where channel is the string corresponding to one of the ValidChannels +// and operation is one of "read" or "write". If which does not specify a +// channel then the unpublished ACL is updated. +// This is only provided for testing. func (s *Store) SetPerms(id *charm.URL, which string, acl ...string) error { return s.DB.BaseEntities().UpdateId(mongodoc.BaseURL(id), bson.D{{"$set", bson.D{{"channelacls." + which, acl}}, @@ -1211,8 +1220,6 @@ // allCollections holds for each collection used by the charm store a // function returns that collection. -// The macaroons collection is omitted because it does -// not exist until a macaroon is actually created. var allCollections = []func(StoreDatabase) *mgo.Collection{ StoreDatabase.StatCounters, StoreDatabase.StatTokens, @@ -1221,6 +1228,7 @@ StoreDatabase.Resources, StoreDatabase.Logs, StoreDatabase.Migrations, + StoreDatabase.Macaroons, } // Collections returns a slice of all the collections used @@ -1380,7 +1388,7 @@ func (lq *ListQuery) Iter(fields map[string]int) *mgo.Iter { qfields := FieldSelector( "promulgated-url", - "development", + "published", "name", "user", "series", @@ -1398,7 +1406,7 @@ "$baseurl", "$series", bson.D{{ - "$cond", []string{"$development", "true", "false"}, + "$cond", []string{"$published.edge", "true", "false"}, }}, }, }}}) diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -355,7 +355,15 @@ Read: []string{"charmers"}, Write: []string{"charmers"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, @@ -386,7 +394,15 @@ Read: []string{"who"}, Write: []string{"who"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { + Read: []string{"who"}, + Write: []string{"who"}, + }, + params.BetaChannel: { + Read: []string{"who"}, + Write: []string{"who"}, + }, + params.CandidateChannel: { Read: []string{"who"}, Write: []string{"who"}, }, @@ -909,7 +925,7 @@ panic("resolved URL with no revision") } return &router.ResolvedURL{ - URL: *url.WithChannel(""), + URL: *url, PromulgatedRevision: promRev, } } @@ -1129,7 +1145,6 @@ // Some collections don't have indexes so they are created only when used. createdOnUse := map[string]bool{ "migrations": true, - "macaroons": true, } // Check that all collections mentioned by Collections are actually created. for _, coll := range colls { @@ -1338,188 +1353,198 @@ } var findBestEntityCharms = []struct { - id *router.ResolvedURL - charm charm.Charm - development bool - stable bool + id *router.ResolvedURL + charm charm.Charm + edge bool + stable bool }{{ - id: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), - charm: storetesting.NewCharm(nil), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), - charm: storetesting.NewCharm(nil), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~charmers/trusty/wordpress-2", 2), - charm: storetesting.NewCharm(nil), - development: true, - stable: false, -}, { - id: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), - charm: storetesting.NewCharm(nil), - development: false, - stable: false, -}, { - id: router.MustNewResolvedURL("~charmers/precise/wordpress-4", 4), - charm: storetesting.NewCharm(nil), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~charmers/precise/wordpress-5", 5), - charm: storetesting.NewCharm(nil), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~charmers/precise/wordpress-6", 6), - charm: storetesting.NewCharm(nil), - development: true, - stable: false, -}, { - id: router.MustNewResolvedURL("~charmers/precise/wordpress-7", 7), - charm: storetesting.NewCharm(nil), - development: false, - stable: false, -}, { - id: router.MustNewResolvedURL("~charmers/mysql-0", 0), - charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~charmers/mysql-1", 1), - charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~charmers/mysql-2", 2), - charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), - development: true, - stable: false, -}, { - id: router.MustNewResolvedURL("~charmers/mysql-3", 3), - charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), - development: false, - stable: false, -}, { - id: router.MustNewResolvedURL("~charmers/trusty/mongodb-0", -1), - charm: storetesting.NewCharm(nil), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~charmers/trusty/mongodb-1", -1), - charm: storetesting.NewCharm(nil), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~charmers/trusty/mongodb-2", -1), - charm: storetesting.NewCharm(nil), - development: true, - stable: false, -}, { - id: router.MustNewResolvedURL("~charmers/trusty/mongodb-3", -1), - charm: storetesting.NewCharm(nil), - development: false, - stable: false, -}, { - id: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), - charm: storetesting.NewCharm(nil), - development: true, - stable: false, -}, { - id: router.MustNewResolvedURL("~charmers/trusty/nginx-0", 0), - charm: storetesting.NewCharm(nil), - development: false, - stable: false, -}, { - id: router.MustNewResolvedURL("~charmers/trusty/postgresql-0", 0), - charm: storetesting.NewCharm(nil), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~charmers/precise/postgresql-0", 0), - charm: storetesting.NewCharm(nil), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~charmers/postgresql-1", 1), - charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), - charm: storetesting.NewCharm(nil), - development: true, - stable: true, -}, { - id: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), - charm: storetesting.NewCharm(nil), - development: true, - stable: false, + id: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), + charm: storetesting.NewCharm(nil), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), + charm: storetesting.NewCharm(nil), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/trusty/wordpress-2", 2), + charm: storetesting.NewCharm(nil), + edge: true, + stable: false, +}, { + id: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), + charm: storetesting.NewCharm(nil), + edge: false, + stable: false, +}, { + id: router.MustNewResolvedURL("~charmers/precise/wordpress-4", 4), + charm: storetesting.NewCharm(nil), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/precise/wordpress-5", 5), + charm: storetesting.NewCharm(nil), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/precise/wordpress-6", 6), + charm: storetesting.NewCharm(nil), + edge: true, + stable: false, +}, { + id: router.MustNewResolvedURL("~charmers/precise/wordpress-7", 7), + charm: storetesting.NewCharm(nil), + edge: false, + stable: false, +}, { + id: router.MustNewResolvedURL("~charmers/mysql-0", 0), + charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/mysql-1", 1), + charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/mysql-2", 2), + charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), + edge: true, + stable: false, +}, { + id: router.MustNewResolvedURL("~charmers/mysql-3", 3), + charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), + edge: false, + stable: false, +}, { + id: router.MustNewResolvedURL("~charmers/trusty/mongodb-0", -1), + charm: storetesting.NewCharm(nil), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/trusty/mongodb-1", -1), + charm: storetesting.NewCharm(nil), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/trusty/mongodb-2", -1), + charm: storetesting.NewCharm(nil), + edge: true, + stable: false, +}, { + id: router.MustNewResolvedURL("~charmers/trusty/mongodb-3", -1), + charm: storetesting.NewCharm(nil), + edge: false, + stable: false, +}, { + id: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), + charm: storetesting.NewCharm(nil), + edge: true, + stable: false, +}, { + id: router.MustNewResolvedURL("~charmers/trusty/nginx-0", 0), + charm: storetesting.NewCharm(nil), + edge: false, + stable: false, +}, { + id: router.MustNewResolvedURL("~charmers/trusty/postgresql-0", 0), + charm: storetesting.NewCharm(nil), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/precise/postgresql-0", 0), + charm: storetesting.NewCharm(nil), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/postgresql-1", 1), + charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty", "precise")), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), + charm: storetesting.NewCharm(nil), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), + charm: storetesting.NewCharm(nil), + edge: true, + stable: false, +}, { + id: router.MustNewResolvedURL("~charmers/elasticsearch-0", -1), + charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "trusty")), + edge: true, + stable: true, +}, { + id: router.MustNewResolvedURL("~charmers/elasticsearch-1", -1), + charm: storetesting.NewCharm(storetesting.MetaWithSupportedSeries(nil, "xenial")), + edge: true, + stable: true, }} var findBestEntityBundles = []struct { - id *router.ResolvedURL - bundle charm.Bundle - development bool - stable bool + id *router.ResolvedURL + bundle charm.Bundle + edge bool + stable bool }{{ id: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), bundle: storetesting.NewBundle(&charm.BundleData{ Applications: map[string]*charm.ApplicationSpec{ - "wordpress": &charm.ApplicationSpec{ + "wordpress": { Charm: "cs:wordpress", }, - "mysql": &charm.ApplicationSpec{ + "mysql": { Charm: "cs:mysql", }, }, }), - development: true, - stable: true, + edge: true, + stable: true, }, { id: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), bundle: storetesting.NewBundle(&charm.BundleData{ Applications: map[string]*charm.ApplicationSpec{ - "wordpress": &charm.ApplicationSpec{ + "wordpress": { Charm: "cs:wordpress", }, - "mysql": &charm.ApplicationSpec{ + "mysql": { Charm: "cs:mysql", }, }, }), - development: true, - stable: true, + edge: true, + stable: true, }, { id: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 2), bundle: storetesting.NewBundle(&charm.BundleData{ Applications: map[string]*charm.ApplicationSpec{ - "wordpress": &charm.ApplicationSpec{ + "wordpress": { Charm: "cs:wordpress", }, - "mysql": &charm.ApplicationSpec{ + "mysql": { Charm: "cs:mysql", }, }, }), - development: true, - stable: false, + edge: true, + stable: false, }, { id: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), bundle: storetesting.NewBundle(&charm.BundleData{ Applications: map[string]*charm.ApplicationSpec{ - "wordpress": &charm.ApplicationSpec{ + "wordpress": { Charm: "cs:wordpress", }, - "mysql": &charm.ApplicationSpec{ + "mysql": { Charm: "cs:mysql", }, }, }), - development: false, - stable: false, + edge: false, + stable: false, }} var findBestEntityTests = []struct { @@ -1537,7 +1562,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "~charmers/trusty/wordpress-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "~charmers/trusty/wordpress-0", @@ -1552,8 +1577,8 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), }, { url: "~charmers/trusty/wordpress-3", - channel: params.DevelopmentChannel, - expectError: "cs:~charmers/trusty/wordpress-3 not found in development channel", + channel: params.EdgeChannel, + expectError: "cs:~charmers/trusty/wordpress-3 not found in edge channel", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/wordpress-2", @@ -1569,7 +1594,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "trusty/wordpress-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-0", 0), }, { url: "trusty/wordpress-0", @@ -1584,8 +1609,8 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-3", 3), }, { url: "trusty/wordpress-3", - channel: params.DevelopmentChannel, - expectError: "cs:trusty/wordpress-3 not found in development channel", + channel: params.EdgeChannel, + expectError: "cs:trusty/wordpress-3 not found in edge channel", expectErrorCause: params.ErrNotFound, }, { url: "trusty/wordpress-2", @@ -1601,7 +1626,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "~charmers/trusty/wordpress", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-2", 2), }, { url: "~charmers/trusty/wordpress", @@ -1616,7 +1641,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "trusty/wordpress", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-2", 2), }, { url: "trusty/wordpress", @@ -1631,7 +1656,7 @@ expectID: router.MustNewResolvedURL("~charmers/precise/wordpress-5", 5), }, { url: "precise/wordpress", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/precise/wordpress-6", 6), }, { url: "precise/wordpress", @@ -1646,7 +1671,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "wordpress", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-2", 2), }, { url: "wordpress", @@ -1661,7 +1686,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-1", 1), }, { url: "~charmers/wordpress", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/wordpress-2", 2), }, { url: "~charmers/wordpress", @@ -1678,7 +1703,7 @@ expectErrorCause: params.ErrNotFound, }, { url: "~charmers/wordpress-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectError: "no matching charm or bundle for cs:~charmers/wordpress-0", expectErrorCause: params.ErrNotFound, }, { @@ -1695,7 +1720,7 @@ expectID: router.MustNewResolvedURL("~charmers/mysql-0", 0), }, { url: "~charmers/mysql-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-0", 0), }, { url: "~charmers/mysql-0", @@ -1710,8 +1735,8 @@ expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "~charmers/mysql-3", - channel: params.DevelopmentChannel, - expectError: "cs:~charmers/mysql-3 not found in development channel", + channel: params.EdgeChannel, + expectError: "cs:~charmers/mysql-3 not found in edge channel", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/mysql-2", @@ -1727,7 +1752,7 @@ expectID: router.MustNewResolvedURL("~charmers/mysql-0", 0), }, { url: "mysql-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-0", 0), }, { url: "mysql-0", @@ -1742,8 +1767,8 @@ expectID: router.MustNewResolvedURL("~charmers/mysql-3", 3), }, { url: "mysql-3", - channel: params.DevelopmentChannel, - expectError: "cs:mysql-3 not found in development channel", + channel: params.EdgeChannel, + expectError: "cs:mysql-3 not found in edge channel", expectErrorCause: params.ErrNotFound, }, { url: "mysql-2", @@ -1759,7 +1784,7 @@ expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "~charmers/mysql", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "~charmers/mysql", @@ -1774,7 +1799,7 @@ expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "mysql", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "mysql", @@ -1789,7 +1814,7 @@ expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "~charmers/precise/mysql", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "~charmers/precise/mysql", @@ -1804,7 +1829,7 @@ expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "precise/mysql", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "precise/mysql", @@ -1819,7 +1844,7 @@ expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "~charmers/trusty/mysql", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "~charmers/trusty/mysql", @@ -1834,7 +1859,7 @@ expectID: router.MustNewResolvedURL("~charmers/mysql-1", 1), }, { url: "trusty/mysql", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/mysql-2", 2), }, { url: "trusty/mysql", @@ -1849,7 +1874,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-0", -1), }, { url: "~charmers/trusty/mongodb-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-0", -1), }, { url: "~charmers/trusty/mongodb-0", @@ -1864,8 +1889,8 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-3", -1), }, { url: "~charmers/trusty/mongodb-3", - channel: params.DevelopmentChannel, - expectError: "cs:~charmers/trusty/mongodb-3 not found in development channel", + channel: params.EdgeChannel, + expectError: "cs:~charmers/trusty/mongodb-3 not found in edge channel", expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/mongodb-2", @@ -1883,7 +1908,7 @@ expectErrorCause: params.ErrNotFound, }, { url: "trusty/mongodb-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectError: "no matching charm or bundle for cs:trusty/mongodb-0", expectErrorCause: params.ErrNotFound, }, { @@ -1900,7 +1925,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-1", -1), }, { url: "~charmers/trusty/mongodb", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/mongodb-2", -1), }, { url: "~charmers/trusty/mongodb", @@ -1917,7 +1942,7 @@ expectErrorCause: params.ErrNotFound, }, { url: "trusty/mongodb", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectError: "no matching charm or bundle for cs:trusty/mongodb", expectErrorCause: params.ErrNotFound, }, { @@ -1936,7 +1961,7 @@ expectErrorCause: params.ErrNotFound, }, { url: "mongodb", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectError: "no matching charm or bundle for cs:mongodb", expectErrorCause: params.ErrNotFound, }, { @@ -1955,7 +1980,7 @@ expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/apache", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "~charmers/trusty/apache", @@ -1970,7 +1995,7 @@ expectError: "cs:~charmers/trusty/apache-0 not found in stable channel", }, { url: "~charmers/trusty/apache-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "~charmers/trusty/apache-0", @@ -1987,7 +2012,7 @@ expectErrorCause: params.ErrNotFound, }, { url: "trusty/apache", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "trusty/apache", @@ -2002,7 +2027,7 @@ expectError: "cs:trusty/apache-0 not found in stable channel", }, { url: "trusty/apache-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/apache-0", 0), }, { url: "trusty/apache-0", @@ -2019,7 +2044,7 @@ expectErrorCause: params.ErrNotFound, }, { url: "~charmers/trusty/nginx", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectError: "no matching charm or bundle for cs:~charmers/trusty/nginx", expectErrorCause: params.ErrNotFound, }, { @@ -2035,8 +2060,8 @@ expectError: "cs:~charmers/trusty/nginx-0 not found in stable channel", }, { url: "~charmers/trusty/nginx-0", - channel: params.DevelopmentChannel, - expectError: "cs:~charmers/trusty/nginx-0 not found in development channel", + channel: params.EdgeChannel, + expectError: "cs:~charmers/trusty/nginx-0 not found in edge channel", }, { url: "~charmers/trusty/nginx-0", channel: params.UnpublishedChannel, @@ -2052,7 +2077,7 @@ expectErrorCause: params.ErrNotFound, }, { url: "trusty/nginx", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectError: "no matching charm or bundle for cs:trusty/nginx", expectErrorCause: params.ErrNotFound, }, { @@ -2068,8 +2093,8 @@ expectError: "cs:trusty/nginx-0 not found in stable channel", }, { url: "trusty/nginx-0", - channel: params.DevelopmentChannel, - expectError: "cs:trusty/nginx-0 not found in development channel", + channel: params.EdgeChannel, + expectError: "cs:trusty/nginx-0 not found in edge channel", }, { url: "trusty/nginx-0", channel: params.UnpublishedChannel, @@ -2083,7 +2108,7 @@ expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/bundle/wordpress-simple-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/bundle/wordpress-simple-0", @@ -2098,8 +2123,8 @@ expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), }, { url: "~charmers/bundle/wordpress-simple-3", - channel: params.DevelopmentChannel, - expectError: "cs:~charmers/bundle/wordpress-simple-3 not found in development channel", + channel: params.EdgeChannel, + expectError: "cs:~charmers/bundle/wordpress-simple-3 not found in edge channel", }, { url: "~charmers/bundle/wordpress-simple-3", channel: params.StableChannel, @@ -2113,7 +2138,7 @@ expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "bundle/wordpress-simple-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "bundle/wordpress-simple-0", @@ -2128,8 +2153,8 @@ expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-3", 3), }, { url: "bundle/wordpress-simple-3", - channel: params.DevelopmentChannel, - expectError: "cs:bundle/wordpress-simple-3 not found in development channel", + channel: params.EdgeChannel, + expectError: "cs:bundle/wordpress-simple-3 not found in edge channel", }, { url: "bundle/wordpress-simple-2", channel: params.StableChannel, @@ -2143,7 +2168,7 @@ expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "~charmers/bundle/wordpress-simple", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 2), }, { url: "~charmers/bundle/wordpress-simple", @@ -2158,7 +2183,7 @@ expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "bundle/wordpress-simple", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 2), }, { url: "bundle/wordpress-simple", @@ -2173,7 +2198,7 @@ expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "wordpress-simple", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 2), }, { url: "wordpress-simple", @@ -2188,7 +2213,7 @@ expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-1", 1), }, { url: "~charmers/wordpress-simple", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-2", 2), }, { url: "~charmers/wordpress-simple", @@ -2203,7 +2228,7 @@ expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/wordpress-simple-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/bundle/wordpress-simple-0", 0), }, { url: "~charmers/wordpress-simple-0", @@ -2223,7 +2248,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/postgresql-0", 0), }, { url: "~charmers/trusty/postgresql-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/postgresql-0", 0), }, { url: "~charmers/trusty/postgresql-0", @@ -2238,7 +2263,7 @@ expectID: router.MustNewResolvedURL("~charmers/precise/postgresql-0", 0), }, { url: "~charmers/precise/postgresql-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/precise/postgresql-0", 0), }, { url: "~charmers/precise/postgresql-0", @@ -2253,7 +2278,7 @@ expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/trusty/postgresql-1", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/trusty/postgresql-1", @@ -2268,7 +2293,7 @@ expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql-1", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql-1", @@ -2283,7 +2308,7 @@ expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/trusty/postgresql", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/trusty/postgresql", @@ -2298,7 +2323,7 @@ expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "~charmers/precise/postgresql", @@ -2313,7 +2338,7 @@ expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "trusty/postgresql", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "trusty/postgresql", @@ -2328,7 +2353,7 @@ expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "precise/postgresql", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "precise/postgresql", @@ -2343,7 +2368,7 @@ expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql", @@ -2358,7 +2383,7 @@ expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql-1", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/postgresql-1", 1), }, { url: "postgresql-1", @@ -2375,7 +2400,7 @@ expectErrorCause: params.ErrNotFound, }, { url: "postgresql-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectError: "no matching charm or bundle for cs:postgresql-0", expectErrorCause: params.ErrNotFound, }, { @@ -2392,7 +2417,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~charmers/trusty/ceph-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~charmers/trusty/ceph-0", @@ -2407,7 +2432,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~charmers/trusty/ceph", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "~charmers/trusty/ceph", @@ -2422,7 +2447,7 @@ expectError: "cs:~openstack-charmers/trusty/ceph-0 not found in stable channel", }, { url: "~openstack-charmers/trusty/ceph-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "~openstack-charmers/trusty/ceph-0", @@ -2439,7 +2464,7 @@ expectErrorCause: params.ErrNotFound, }, { url: "~openstack-charmers/trusty/ceph", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "~openstack-charmers/trusty/ceph", @@ -2454,7 +2479,7 @@ expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "trusty/ceph-0", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~charmers/trusty/ceph-0", 0), }, { url: "trusty/ceph-0", @@ -2469,7 +2494,7 @@ expectError: "cs:trusty/ceph-1 not found in stable channel", }, { url: "trusty/ceph-1", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "trusty/ceph-1", @@ -2486,7 +2511,7 @@ expectErrorCause: params.ErrNotFound, }, { url: "trusty/ceph", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "trusty/ceph", @@ -2503,12 +2528,16 @@ expectErrorCause: params.ErrNotFound, }, { url: "ceph", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), }, { url: "ceph", channel: params.UnpublishedChannel, expectID: router.MustNewResolvedURL("~openstack-charmers/trusty/ceph-0", 1), +}, { + url: "~charmers/elasticsearch", + channel: params.StableChannel, + expectID: router.MustNewResolvedURL("~charmers/elasticsearch-1", -1), }} func (s *StoreSuite) TestFindBestEntity(c *gc.C) { @@ -2519,8 +2548,8 @@ c.Assert(err, gc.IsNil) err = store.SetPromulgated(ch.id, ch.id.PromulgatedRevision != -1) c.Assert(err, gc.IsNil) - if ch.development { - err := store.Publish(ch.id, nil, params.DevelopmentChannel) + if ch.edge { + err := store.Publish(ch.id, nil, params.EdgeChannel) c.Assert(err, gc.IsNil) } if ch.stable { @@ -2534,8 +2563,8 @@ c.Assert(err, gc.IsNil) err = store.SetPromulgated(b.id, b.id.PromulgatedRevision != -1) c.Assert(err, gc.IsNil) - if b.development { - err := store.Publish(b.id, nil, params.DevelopmentChannel) + if b.edge { + err := store.Publish(b.id, nil, params.EdgeChannel) c.Assert(err, gc.IsNil) } if b.stable { @@ -2546,16 +2575,19 @@ for i, test := range findBestEntityTests { c.Logf("test %d: %s (%s)", i, test.url, test.channel) - entity, err := store.FindBestEntity(charm.MustParseURL(test.url), test.channel, nil) - if test.expectError != "" { - c.Assert(err, gc.ErrorMatches, test.expectError) - if test.expectErrorCause != nil { - c.Assert(errgo.Cause(err), gc.Equals, test.expectErrorCause) + // Run FindBestEntity a number of times to make sure resolution is predicatable. + for j := 0; j < 10; j++ { + entity, err := store.FindBestEntity(charm.MustParseURL(test.url), test.channel, nil) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + if test.expectErrorCause != nil { + c.Assert(errgo.Cause(err), gc.Equals, test.expectErrorCause) + } + continue } - continue + c.Assert(err, gc.IsNil) + c.Assert(EntityResolvedURL(entity), jc.DeepEquals, test.expectID) } - c.Assert(err, gc.IsNil) - c.Assert(EntityResolvedURL(entity), jc.DeepEquals, test.expectID) } } @@ -3473,9 +3505,9 @@ expectedBaseEntity *mongodoc.BaseEntity expectedErr string }{{ - about: "unpublished, single series, publish development", + about: "unpublished, single series, publish edge", url: MustParseResolvedURL("~who/trusty/django-42"), - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), }, @@ -3483,52 +3515,60 @@ URL: charm.MustParseURL("~who/django"), }, expectedEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/trusty/django-42"), - Development: true, + URL: charm.MustParseURL("~who/trusty/django-42"), + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, }, }, }, { - about: "development, single series, publish development", + about: "edge, single series, publish edge", url: MustParseResolvedURL("~who/trusty/django-42"), - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, initialEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/trusty/django-42"), - Development: true, + URL: charm.MustParseURL("~who/trusty/django-42"), + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + }, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "trusty": charm.MustParseURL("~who/trusty/django-41"), }, }, }, expectedEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/trusty/django-42"), - Development: true, + URL: charm.MustParseURL("~who/trusty/django-42"), + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, }, }, }, { - about: "stable, single series, publish development", + about: "stable, single series, publish edge", url: MustParseResolvedURL("~who/trusty/django-42"), - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, initialEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/trusty/django-42"), - Stable: true, + URL: charm.MustParseURL("~who/trusty/django-42"), + Published: map[params.Channel]bool{ + params.StableChannel: true, + }, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3539,9 +3579,11 @@ }, }, expectedEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/trusty/django-42"), - Stable: true, - Development: true, + URL: charm.MustParseURL("~who/trusty/django-42"), + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + params.StableChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3549,7 +3591,7 @@ params.StableChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, - params.DevelopmentChannel: { + params.EdgeChannel: { "trusty": charm.MustParseURL("~who/trusty/django-42"), }, }, @@ -3565,8 +3607,10 @@ URL: charm.MustParseURL("~who/django"), }, expectedEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/trusty/django-42"), - Stable: true, + URL: charm.MustParseURL("~who/trusty/django-42"), + Published: map[params.Channel]bool{ + params.StableChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3577,30 +3621,34 @@ }, }, }, { - about: "development, single series, publish stable", + about: "edge, single series, publish stable", url: MustParseResolvedURL("~who/trusty/django-42"), channels: []params.Channel{params.StableChannel}, initialEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/trusty/django-42"), - Development: true, + URL: charm.MustParseURL("~who/trusty/django-42"), + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + }, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "trusty": charm.MustParseURL("~who/trusty/django-41"), }, }, }, expectedEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/trusty/django-42"), - Development: true, - Stable: true, + URL: charm.MustParseURL("~who/trusty/django-42"), + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + params.StableChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "trusty": charm.MustParseURL("~who/trusty/django-41"), }, params.StableChannel: { @@ -3613,8 +3661,10 @@ url: MustParseResolvedURL("~who/trusty/django-42"), channels: []params.Channel{params.StableChannel}, initialEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/trusty/django-42"), - Stable: true, + URL: charm.MustParseURL("~who/trusty/django-42"), + Published: map[params.Channel]bool{ + params.StableChannel: true, + }, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3625,8 +3675,10 @@ }, }, expectedEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/trusty/django-42"), - Stable: true, + URL: charm.MustParseURL("~who/trusty/django-42"), + Published: map[params.Channel]bool{ + params.StableChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3637,9 +3689,9 @@ }, }, }, { - about: "unpublished, multi series, publish development", + about: "unpublished, multi series, publish edge", url: MustParseResolvedURL("~who/django-42"), - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily"}, @@ -3650,44 +3702,50 @@ expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily"}, - Development: true, + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "trusty": charm.MustParseURL("~who/django-42"), "wily": charm.MustParseURL("~who/django-42"), }, }, }, }, { - about: "development, multi series, publish development", + about: "edge, multi series, publish edge", url: MustParseResolvedURL("~who/django-42"), - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, initialEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/django-42"), - Development: true, + URL: charm.MustParseURL("~who/django-42"), + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + }, SupportedSeries: []string{"trusty", "wily"}, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "precise": charm.MustParseURL("~who/django-0"), "trusty": charm.MustParseURL("~who/trusty/django-0"), }, }, }, expectedEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/django-42"), - Development: true, + URL: charm.MustParseURL("~who/django-42"), + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + }, SupportedSeries: []string{"trusty", "wily"}, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "precise": charm.MustParseURL("~who/django-0"), "trusty": charm.MustParseURL("~who/django-42"), "wily": charm.MustParseURL("~who/django-42"), @@ -3695,13 +3753,15 @@ }, }, }, { - about: "stable, multi series, publish development", + about: "stable, multi series, publish edge", url: MustParseResolvedURL("~who/django-47"), - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-47"), SupportedSeries: []string{"trusty", "wily", "precise"}, - Stable: true, + Published: map[params.Channel]bool{ + params.StableChannel: true, + }, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3714,8 +3774,10 @@ expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-47"), SupportedSeries: []string{"trusty", "wily", "precise"}, - Stable: true, - Development: true, + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + params.StableChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3723,7 +3785,7 @@ params.StableChannel: { "trusty": charm.MustParseURL("~who/django-47"), }, - params.DevelopmentChannel: { + params.EdgeChannel: { "trusty": charm.MustParseURL("~who/django-47"), "wily": charm.MustParseURL("~who/django-47"), "precise": charm.MustParseURL("~who/django-47"), @@ -3744,7 +3806,9 @@ expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily", "precise"}, - Stable: true, + Published: map[params.Channel]bool{ + params.StableChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3757,18 +3821,20 @@ }, }, }, { - about: "development, multi series, publish stable", + about: "edge, multi series, publish stable", url: MustParseResolvedURL("~who/django-42"), channels: []params.Channel{params.StableChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"wily"}, - Development: true, + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + }, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { "trusty": charm.MustParseURL("~who/django-0"), }, }, @@ -3776,8 +3842,10 @@ expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"wily"}, - Development: true, - Stable: true, + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + params.StableChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3785,7 +3853,73 @@ params.StableChannel: { "wily": charm.MustParseURL("~who/django-42"), }, - params.DevelopmentChannel: { + params.EdgeChannel: { + "trusty": charm.MustParseURL("~who/django-0"), + }, + }, + }, +}, { + about: "unpublished, multi series, publish beta", + url: MustParseResolvedURL("~who/django-42"), + channels: []params.Channel{params.BetaChannel}, + initialEntity: &mongodoc.Entity{ + URL: charm.MustParseURL("~who/django-42"), + SupportedSeries: []string{"trusty", "wily", "precise"}, + }, + initialBaseEntity: &mongodoc.BaseEntity{ + URL: charm.MustParseURL("~who/django"), + }, + expectedEntity: &mongodoc.Entity{ + URL: charm.MustParseURL("~who/django-42"), + SupportedSeries: []string{"trusty", "wily", "precise"}, + Published: map[params.Channel]bool{ + params.BetaChannel: true, + }, + }, + expectedBaseEntity: &mongodoc.BaseEntity{ + URL: charm.MustParseURL("~who/django"), + ChannelEntities: map[params.Channel]map[string]*charm.URL{ + params.BetaChannel: { + "trusty": charm.MustParseURL("~who/django-42"), + "wily": charm.MustParseURL("~who/django-42"), + "precise": charm.MustParseURL("~who/django-42"), + }, + }, + }, +}, { + about: "beta, multi series, publish candidate", + url: MustParseResolvedURL("~who/django-42"), + channels: []params.Channel{params.CandidateChannel}, + initialEntity: &mongodoc.Entity{ + URL: charm.MustParseURL("~who/django-42"), + SupportedSeries: []string{"wily"}, + Published: map[params.Channel]bool{ + params.BetaChannel: true, + }, + }, + initialBaseEntity: &mongodoc.BaseEntity{ + URL: charm.MustParseURL("~who/django"), + ChannelEntities: map[params.Channel]map[string]*charm.URL{ + params.BetaChannel: { + "trusty": charm.MustParseURL("~who/django-0"), + }, + }, + }, + expectedEntity: &mongodoc.Entity{ + URL: charm.MustParseURL("~who/django-42"), + SupportedSeries: []string{"wily"}, + Published: map[params.Channel]bool{ + params.BetaChannel: true, + params.CandidateChannel: true, + }, + }, + expectedBaseEntity: &mongodoc.BaseEntity{ + URL: charm.MustParseURL("~who/django"), + ChannelEntities: map[params.Channel]map[string]*charm.URL{ + params.CandidateChannel: { + "wily": charm.MustParseURL("~who/django-42"), + }, + params.BetaChannel: { "trusty": charm.MustParseURL("~who/django-0"), }, }, @@ -3797,7 +3931,9 @@ initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily", "precise"}, - Stable: true, + Published: map[params.Channel]bool{ + params.StableChannel: true, + }, }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3813,7 +3949,9 @@ expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily", "precise"}, - Stable: true, + Published: map[params.Channel]bool{ + params.StableChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3838,8 +3976,10 @@ URL: charm.MustParseURL("~who/django"), }, expectedEntity: &mongodoc.Entity{ - URL: charm.MustParseURL("~who/bundle/django-42"), - Stable: true, + URL: charm.MustParseURL("~who/bundle/django-42"), + Published: map[params.Channel]bool{ + params.StableChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), @@ -3850,9 +3990,15 @@ }, }, }, { - about: "unpublished, multi series, publish multiple channels", - url: MustParseResolvedURL("~who/django-42"), - channels: []params.Channel{params.DevelopmentChannel, params.StableChannel, params.Channel("no-such")}, + about: "unpublished, multi series, publish multiple channels", + url: MustParseResolvedURL("~who/django-42"), + channels: []params.Channel{ + params.EdgeChannel, + params.StableChannel, + params.Channel("no-such"), + params.UnpublishedChannel, + params.CandidateChannel, + }, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily"}, @@ -3864,7 +4010,7 @@ "quantal": charm.MustParseURL("~who/django-1"), "trusty": charm.MustParseURL("~who/django-4"), }, - params.DevelopmentChannel: { + params.EdgeChannel: { "wily": charm.MustParseURL("~who/django-10"), }, }, @@ -3872,13 +4018,20 @@ expectedEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/django-42"), SupportedSeries: []string{"trusty", "wily"}, - Development: true, - Stable: true, + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + params.CandidateChannel: true, + params.StableChannel: true, + }, }, expectedBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), ChannelEntities: map[params.Channel]map[string]*charm.URL{ - params.DevelopmentChannel: { + params.EdgeChannel: { + "trusty": charm.MustParseURL("~who/django-42"), + "wily": charm.MustParseURL("~who/django-42"), + }, + params.CandidateChannel: { "trusty": charm.MustParseURL("~who/django-42"), "wily": charm.MustParseURL("~who/django-42"), }, @@ -3892,7 +4045,7 @@ }, { about: "not found", url: MustParseResolvedURL("~who/trusty/no-such-42"), - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), }, @@ -3907,6 +4060,17 @@ initialEntity: &mongodoc.Entity{ URL: charm.MustParseURL("~who/trusty/django-42"), }, + initialBaseEntity: &mongodoc.BaseEntity{ + URL: charm.MustParseURL("~who/django"), + }, + expectedErr: `cannot update "cs:~who/trusty/django-42": no valid channels provided`, +}, { + about: "unpublished channel provided", + url: MustParseResolvedURL("~who/trusty/django-42"), + channels: []params.Channel{params.UnpublishedChannel}, + initialEntity: &mongodoc.Entity{ + URL: charm.MustParseURL("~who/trusty/django-42"), + }, initialBaseEntity: &mongodoc.BaseEntity{ URL: charm.MustParseURL("~who/django"), }, diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/debug/handler.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/debug/handler.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/debug/handler.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/debug/handler.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The debug package holds various functions that may -// be used for debugging but should not be included -// in production code. -package debug // import "gopkg.in/juju/charmstore.v5-unstable/internal/debug" - -import ( - "log" - "net/http" -) - -// Handler returns a new handler that wraps h -// and logs the given message with the URL path -// every time the request is invoked. -func Handler(msg string, h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - log.Printf("%s got request at URL %q; headers %q", msg, req.URL, req.Header) - h.ServeHTTP(w, req) - }) -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc.go 2016-10-13 14:32:25.000000000 +0000 @@ -91,6 +91,7 @@ // TODO(rog) verify that all these types marshal to the expected // JSON form. CharmMeta *charm.Meta + CharmMetrics *charm.Metrics CharmConfig *charm.Config CharmActions *charm.Actions @@ -135,16 +136,8 @@ // If the entity is not promulgated this should be set to -1. PromulgatedRevision int `bson:"promulgated-revision"` - // TODO we could potentially use map[params.Channel] bool - // instead of having a separate field for each channel. - - // Development holds whether the entity has been published in the - // "development" channel. - Development bool - - // Stable holds whether the entity has been published in the - // "stable" channel. - Stable bool + // Published holds whether the entity has been published on a channel. + Published map[params.Channel]bool `json:",omitempty" bson:",omitempty"` } // PreferredURL returns the preferred way to refer to this entity. If diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -7,6 +7,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/mgo.v2/bson" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" @@ -77,14 +78,18 @@ entity: &mongodoc.Entity{ URL: charm.MustParseURL("~dmr/trusty/c-1"), PromulgatedURL: charm.MustParseURL("trusty/c-2"), - Development: true, + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + }, }, expectURLFalse: "cs:~dmr/trusty/c-1", expectURLTrue: "cs:trusty/c-2", }, { entity: &mongodoc.Entity{ - URL: charm.MustParseURL("~dmr/trusty/c-1"), - Development: true, + URL: charm.MustParseURL("~dmr/trusty/c-1"), + Published: map[params.Channel]bool{ + params.EdgeChannel: true, + }, }, expectURLFalse: "cs:~dmr/trusty/c-1", expectURLTrue: "cs:~dmr/trusty/c-1", diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/monitoring.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/monitoring.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/monitoring.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/monitoring.go 2016-10-13 14:32:25.000000000 +0000 @@ -0,0 +1,27 @@ +// Copyright 2016 Canonical Ltd. + +package monitoring + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +var ( + requestDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Namespace: "charmstore", + Subsystem: "handler", + Name: "request_duration", + Help: "The duration of a web request in seconds.", + }, []string{"path_pattern"}) + uploadProcessingDuration = prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: "charmstore", + Subsystem: "archive", + Name: "processing_duration", + Help: "The processing duration of a charm upload in seconds.", + }) +) + +func init() { + prometheus.MustRegister(requestDuration) + prometheus.MustRegister(uploadProcessingDuration) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/request.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/request.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/request.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/request.go 2016-10-13 14:32:25.000000000 +0000 @@ -0,0 +1,33 @@ +// Copyright 2016 Canonical Ltd. + +package monitoring + +import ( + "time" +) + +// Request represents a monitoring request. +type Request struct { + startTime time.Time + label string +} + +// Reset the monitor start time to now and the label to blank. +func (r *Request) Reset() { + r.startTime = time.Now() + r.label = "" +} + +// AppendLabel appends the given label value to the label of the monitor. +// This supports piecing together parameterized routes as labels. +func (r *Request) AppendLabel(label string) { + r.label += label +} + +// ObserveMetric observes this metric. +func (r *Request) ObserveMetric() { + requestDuration.WithLabelValues(r.label).Observe(float64(time.Since(r.startTime)) / float64(time.Second)) +} + +// Label returns unexported label for testing. +func (r *Request) Label() string { return r.label } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/upload.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/upload.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/upload.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/monitoring/upload.go 2016-10-13 14:32:25.000000000 +0000 @@ -0,0 +1,24 @@ +// Copyright 2016 Canonical Ltd. + +package monitoring + +import ( + "time" +) + +// UploadProcessingDuration represents a monitoring duration. +type UploadProcessingDuration struct { + startTime time.Time +} + +// Return a new UploadProcessingDuration with its start time set to now. +func NewUploadProcessingDuration() *UploadProcessingDuration { + return &UploadProcessingDuration{ + startTime: time.Now(), + } +} + +// ObserveMetric observes this metric. +func (r *UploadProcessingDuration) ObserveMetric() { + uploadProcessingDuration.Observe(float64(time.Since(r.startTime)) / float64(time.Microsecond)) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router.go 2016-10-13 14:32:25.000000000 +0000 @@ -1,4 +1,4 @@ -// Copyright 2014 Canonical Ltd. +// Copyright 2014-2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. // The router package implements an HTTP request router for charm store @@ -22,6 +22,7 @@ "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/juju/charmstore.v5-unstable/internal/monitoring" "gopkg.in/juju/charmstore.v5-unstable/internal/series" ) @@ -121,6 +122,9 @@ handlers *Handlers handler http.Handler + + // monitor holds a metric monitor to time a request. + Monitor monitoring.Request } // ResolvedURL represents a URL that has been resolved by resolveURL. @@ -259,7 +263,11 @@ for path, handler := range r.handlers.Global { path = "/" + path prefix := strings.TrimSuffix(path, "/") - mux.Handle(path, http.StripPrefix(prefix, handler)) + handler := handler + mux.Handle(path, http.StripPrefix(prefix, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + r.Monitor.AppendLabel(prefix) + handler.ServeHTTP(w, req) + }))) } mux.Handle("/", HandleErrors(r.serveIds)) r.handler = mux @@ -299,6 +307,7 @@ return } r.handler.ServeHTTP(w, req) + r.Monitor.ObserveMetric() } // Handlers returns the set of handlers that the router was created with. @@ -325,6 +334,7 @@ } handler := r.handlers.Id[key] if handler != nil { + r.Monitor.AppendLabel("/:id/" + key + path) req.URL.Path = path err := handler(url, w, req) // Note: preserve error cause from handlers. @@ -415,12 +425,15 @@ if key == "" { // GET id/meta // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmeta + r.Monitor.AppendLabel(":id/meta") return r.metaNames(), nil } if key == "any" { + r.Monitor.AppendLabel("/:meta/" + key + path) return r.serveMetaGetAny(rurl, req) } if handler := r.handlers.Meta[key]; handler != nil { + r.Monitor.AppendLabel("/:meta/" + key + path) results, err := handler.HandleGet([]BulkIncludeHandler{handler}, rurl, []string{path}, req.Form, req) if err != nil { // Note: preserve error cause from handlers. @@ -432,6 +445,7 @@ } return results[0], nil } + r.Monitor.AppendLabel("/:meta/" + key + path) return nil, errgo.WithCausef(nil, params.ErrNotFound, "unknown metadata %q", strings.TrimPrefix(req.URL.Path, "/")) } @@ -550,6 +564,7 @@ // serveBulkMeta serves bulk metadata requests (requests to /meta/...). func (r *Router) serveBulkMeta(w http.ResponseWriter, req *http.Request) error { + r.Monitor.AppendLabel("/meta") switch req.Method { case "GET", "HEAD": // A bare meta returns all endpoints. @@ -893,8 +908,5 @@ if err != nil { return nil, err } - if u.Channel != "" { - return nil, errgo.Newf("charmstore ids must not contain a channel") - } return u, nil } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -47,6 +47,7 @@ resolveURL func(*charm.URL) (*ResolvedURL, error) authorize func(*ResolvedURL, *http.Request) error exists func(*ResolvedURL, *http.Request) (bool, error) + monitorLabel string }{{ about: "global handler", handlers: Handlers{ @@ -66,6 +67,7 @@ Method: "GET", Path: "", }, + monitorLabel: "/foo", }, { about: "global handler with sub-path and flags", handlers: Handlers{ @@ -89,6 +91,7 @@ "b": {"two"}, }, }, + monitorLabel: "/foo/bar", }, { about: "invalid form", urlStr: "/foo?a=%", @@ -109,6 +112,7 @@ Method: "GET", CharmURL: "cs:precise/wordpress-34", }, + monitorLabel: "/:id/foo", }, { about: "development id handler", handlers: Handlers{ @@ -148,6 +152,7 @@ Method: "GET", CharmURL: "cs:win81/visualstudio-2012", }, + monitorLabel: "/:id/foo", }, { about: "wily id handler", handlers: Handlers{ @@ -161,6 +166,7 @@ Method: "GET", CharmURL: "cs:wily/wordpress-34", }, + monitorLabel: "/:id/foo", }, { about: "id handler with no series in id", handlers: Handlers{ @@ -174,6 +180,7 @@ Method: "GET", CharmURL: "cs:wordpress-34", }, + monitorLabel: "/:id/foo", }, { about: "id handler with no revision in id", handlers: Handlers{ @@ -187,6 +194,7 @@ Method: "GET", CharmURL: "cs:precise/wordpress", }, + monitorLabel: "/:id/foo", }, { about: "id handler with channel and name only", handlers: Handlers{ @@ -214,6 +222,7 @@ CharmURL: "cs:precise/wordpress-34", Path: "/blah/arble", }, + monitorLabel: "/:id/foo//blah/arble", }, { about: "id handler with allowed extra path but none given", handlers: Handlers{ @@ -253,6 +262,7 @@ Method: "GET", CharmURL: "cs:~joe/precise/wordpress-34", }, + monitorLabel: "/:id/foo", }, { about: "wily handler with user", handlers: Handlers{ @@ -266,6 +276,7 @@ Method: "GET", CharmURL: "cs:~joe/wily/wordpress-34", }, + monitorLabel: "/:id/foo", }, { about: "id handler with user and extra path", handlers: Handlers{ @@ -280,6 +291,7 @@ CharmURL: "cs:~joe/precise/wordpress-34", Path: "/blah/arble", }, + monitorLabel: "/:id/foo//blah/arble", }, { about: "development id handler with user and extra path", handlers: Handlers{ @@ -318,6 +330,7 @@ expectBody: params.Error{ Message: "errorIdHandler error", }, + monitorLabel: "/:id/foo//blah/arble", }, { about: "id handler that returns a not-found error", handlers: Handlers{ @@ -333,6 +346,7 @@ Message: "not found", Code: params.ErrNotFound, }, + monitorLabel: "/:id/foo", }, { about: "id handler that returns some other kind of coded error", handlers: Handlers{ @@ -348,6 +362,7 @@ Message: "a message", Code: "foo", }, + monitorLabel: "/:id/foo", }, { about: "id with unspecified series and revision, not resolved", handlers: Handlers{ @@ -362,6 +377,7 @@ Method: "GET", CharmURL: "cs:~joe/wordpress", }, + monitorLabel: "/:id/foo", }, { about: "id with error on resolving", handlers: Handlers{ @@ -403,6 +419,7 @@ urlStr: "/precise/wordpress-42/meta", expectStatus: http.StatusOK, expectBody: []string{"bar", "baz", "foo"}, + monitorLabel: ":id/meta", }, { about: "meta list at root", handlers: Handlers{ @@ -417,6 +434,7 @@ urlStr: "/meta", expectStatus: http.StatusOK, expectBody: []string{"bar", "baz", "foo"}, + monitorLabel: "/meta", }, { about: "meta list at root with trailing /", handlers: Handlers{ @@ -431,6 +449,7 @@ urlStr: "/meta/", expectStatus: http.StatusOK, expectBody: []string{"bar", "baz", "foo"}, + monitorLabel: "/meta", }, { about: "meta handler", handlers: Handlers{ @@ -444,6 +463,7 @@ expectBody: &metaHandlerTestResp{ CharmURL: "cs:precise/wordpress-42", }, + monitorLabel: "/:meta/foo", }, { about: "meta handler with additional elements", handlers: Handlers{ @@ -458,6 +478,7 @@ CharmURL: "cs:precise/wordpress-42", Path: "/bar/baz", }, + monitorLabel: "/:meta/foo//bar/baz", }, { about: "meta handler with params", handlers: Handlers{ @@ -475,6 +496,7 @@ "two": {"b"}, }, }, + monitorLabel: "/:meta/foo", }, { about: "meta handler that's not found", urlStr: "/precise/wordpress-42/meta/foo", @@ -484,6 +506,7 @@ Code: params.ErrNotFound, Message: `unknown metadata "foo"`, }, + monitorLabel: "/:meta/foo", }, { about: "meta sub-handler that's not found", urlStr: "/precise/wordpress-42/meta/foo/bar", @@ -493,6 +516,7 @@ Code: params.ErrNotFound, Message: `unknown metadata "foo/bar"`, }, + monitorLabel: "/:meta/foo//bar", }, { about: "meta handler with nil data", handlers: Handlers{ @@ -507,6 +531,7 @@ Code: params.ErrMetadataNotFound, Message: "metadata not found", }, + monitorLabel: "/:meta/foo", }, { about: "meta handler with typed nil data", handlers: Handlers{ @@ -521,6 +546,7 @@ Code: params.ErrMetadataNotFound, Message: "metadata not found", }, + monitorLabel: "/:meta/foo", }, { about: "meta handler with field selector", urlStr: "/precise/wordpress-42/meta/foo", @@ -540,6 +566,7 @@ }, Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), }, + monitorLabel: "/:meta/foo", }, { about: "meta handler returning error with code", urlStr: "/precise/wordpress-42/meta/foo", @@ -554,6 +581,7 @@ Code: "arble", Message: "a message", }, + monitorLabel: "/:meta/foo", }, { about: "unauthorized meta handler", urlStr: "/precise/wordpress-42/meta/foo", @@ -576,6 +604,7 @@ expectBody: params.MetaAnyResponse{ Id: charm.MustParseURL("cs:precise/wordpress-42"), }, + monitorLabel: "/:meta/any", }, { about: "meta/any, no includes, id does not exist", urlStr: "/precise/wordpress/meta/any", @@ -627,6 +656,7 @@ }, }, }, + monitorLabel: "/:meta/any", }, { about: "meta/any, includes with additional path elements", urlStr: "/precise/wordpress-42/meta/any?include=item1/foo&include=item2/bar&include=item1", @@ -671,6 +701,7 @@ }, }, }, + monitorLabel: "/:meta/any", }, { about: "meta/any, nil metadata omitted", urlStr: "/precise/wordpress-42/meta/any?include=ok&include=nil", @@ -691,6 +722,7 @@ }, }, }, + monitorLabel: "/:meta/any", }, { about: "meta/any, handler returns error with cause", urlStr: "/precise/wordpress-42/meta/any?include=error", @@ -705,6 +737,7 @@ Code: "foo", Message: "a message", }, + monitorLabel: "/:meta/any", }, { about: "bulk meta handler, single id", urlStr: "/meta/foo?id=precise/wordpress-42", @@ -720,6 +753,7 @@ CharmURL: "cs:precise/wordpress-42", }, }, + monitorLabel: "/meta/:meta/foo", }, { about: "bulk meta handler, single id with invalid channel", urlStr: "/meta/foo?id=~user/bad-wolf/wily/wordpress-42", @@ -734,6 +768,7 @@ Code: params.ErrBadRequest, Message: `bad request: charm or bundle URL has invalid form: "~user/bad-wolf/wily/wordpress-42"`, }, + monitorLabel: "/meta", }, { about: "bulk meta handler, several ids", urlStr: "/meta/foo?id=precise/wordpress-42&id=utopic/foo-32&id=django", @@ -755,6 +790,7 @@ CharmURL: "cs:precise/django-0", }, }, + monitorLabel: "/meta/:meta/foo/:meta/foo/:meta/foo", }, { about: "bulk meta/any handler, several ids", urlStr: "/meta/any?id=precise/wordpress-42&id=utopic/foo-32&id=django-47&include=foo&include=bar/something", @@ -804,6 +840,7 @@ }, }, }, + monitorLabel: "/meta/:meta/any/:meta/any/:meta/any", }, { about: "bulk meta/any handler, several ids, invalid channel", urlStr: "/meta/any?id=precise/wordpress-42&id=staging/trusty/django&include=foo&include=bar/something", @@ -819,6 +856,7 @@ Code: params.ErrBadRequest, Message: `bad request: charm or bundle URL has invalid form: "staging/trusty/django"`, }, + monitorLabel: "/meta", }, { about: "bulk meta/any handler, discharge required", urlStr: "/meta/any?id=precise/wordpress-42&include=foo", @@ -833,6 +871,7 @@ expectBody: params.Error{ Message: "discharge required", }, + monitorLabel: "/meta", }, { about: "bulk meta/any handler, discharge required, ignore authorization", urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=1", @@ -845,6 +884,7 @@ expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: map[string]params.MetaAnyResponse{}, + monitorLabel: "/meta", }, { about: "bulk meta/any handler, some unauthorized, ignore authorization", urlStr: "/meta/any?id=precise/wordpress-42&id=utopic/foo-32&include=foo&ignore-auth=1", @@ -866,6 +906,7 @@ }, }, }, + monitorLabel: "/meta/:meta/any", }, { about: "bulk meta/any handler, unauthorized", urlStr: "/meta/any?id=precise/wordpress-42&include=foo", @@ -880,6 +921,7 @@ expectBody: params.Error{ Message: "bad wolf", }, + monitorLabel: "/meta", }, { about: "bulk meta/any handler, unauthorized, ignore authorization", urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=1", @@ -892,6 +934,7 @@ expectWillIncludeMetadata: []string{"foo"}, expectStatus: http.StatusOK, expectBody: map[string]params.MetaAnyResponse{}, + monitorLabel: "/meta", }, { about: "bulk meta/any handler, invalid ignore-auth flag", urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=meh", @@ -900,6 +943,7 @@ Code: params.ErrBadRequest, Message: `bad request: unexpected bool value "meh" (must be "0" or "1")`, }, + monitorLabel: "/meta", }, { about: "bulk meta handler with unresolved id", urlStr: "/meta/foo/bar?id=wordpress", @@ -917,6 +961,7 @@ Path: "/bar", }, }, + monitorLabel: "/meta/:meta/foo//bar", }, { about: "bulk meta handler with extra flags", urlStr: "/meta/foo/bar?id=wordpress&arble=bletch&z=w&z=p", @@ -938,6 +983,7 @@ }, }, }, + monitorLabel: "/meta/:meta/foo//bar", }, { about: "bulk meta handler with no ids", urlStr: "/meta/foo/bar", @@ -951,6 +997,7 @@ Code: params.ErrBadRequest, Message: "no ids specified in meta request", }, + monitorLabel: "/meta", }, { about: "bulk meta handler with unresolvable id", urlStr: "/meta/foo?id=unresolved&id=~foo/precise/wordpress-23", @@ -972,6 +1019,7 @@ CharmURL: "cs:precise/wordpress-99", }, }, + monitorLabel: "/meta/:meta/foo", }, { about: "bulk meta handler with id resolution error", urlStr: "/meta/foo?id=resolveerror&id=precise/wordpress-23", @@ -991,6 +1039,7 @@ expectBody: params.Error{ Message: "an error", }, + monitorLabel: "/meta", }, { about: "bulk meta handler with some nil data", urlStr: "/meta/foo?id=bundle/something-24&id=precise/wordpress-23", @@ -1006,6 +1055,7 @@ expectBody: map[string]string{ "bundle/something-24": "bundlefoo", }, + monitorLabel: "/meta/:meta/foo/:meta/foo", }, { about: "bulk meta handler with entity not found", urlStr: "/meta/foo?id=bundle/something-24&id=precise/wordpress-23", @@ -1024,6 +1074,7 @@ expectBody: map[string]string{ "bundle/something-24": "something", }, + monitorLabel: "/meta/:meta/foo/:meta/foo", }, { about: "meta request with invalid entity reference", urlStr: "/robots.txt/meta/any", @@ -1033,6 +1084,7 @@ Code: params.ErrNotFound, Message: `not found: URL has invalid charm or bundle name: "robots.txt"`, }, + monitorLabel: "", }, { about: "bulk meta handler, invalid id", urlStr: "/meta/foo?id=robots.txt", @@ -1043,6 +1095,7 @@ Code: params.ErrBadRequest, Message: `bad request: URL has invalid charm or bundle name: "robots.txt"`, }, + monitorLabel: "/meta", }} // resolveTo returns a URL resolver that resolves @@ -1123,6 +1176,7 @@ }) c.Assert(queryCount, gc.Equals, test.expectQueryCount) c.Assert(includedMetadata, jc.DeepEquals, test.expectWillIncludeMetadata) + c.Assert(router.Monitor.Label(), gc.Equals, test.monitorLabel) } } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm.go 2016-10-13 14:32:25.000000000 +0000 @@ -30,10 +30,9 @@ // Note that because it implements charmstore.ArchiverTo, // it can be used as an argument to charmstore.Store.AddBundleWithArchive. type Bundle struct { - blob []byte - blobHash string - data *charm.BundleData - readMe string + *Blob + data *charm.BundleData + readMe string } // Data implements charm.Bundle.Data. @@ -46,22 +45,6 @@ return b.readMe } -// ArchiveTo implements charmstore.ArchiverTo. -func (b *Bundle) ArchiveTo(w io.Writer) error { - _, err := w.Write(b.blob) - return err -} - -// Bytes returns the contents of the bundle's archive. -func (b *Bundle) Bytes() []byte { - return b.blob -} - -// Size returns the size of the bundle's archive blob. -func (b *Bundle) Size() int64 { - return int64(len(b.blob)) -} - // NewBundle returns a bundle implementation // that contains the given bundle data. func NewBundle(data *charm.BundleData) *Bundle { @@ -70,18 +53,16 @@ panic(err) } readMe := "boring" - blob, hash := newBlob([]file{{ - name: "bundle.yaml", - data: dataYAML, - }, { - name: "README.md", - data: []byte(readMe), - }}) return &Bundle{ - blob: blob, - blobHash: hash, - data: data, - readMe: readMe, + data: data, + readMe: readMe, + Blob: NewBlob([]File{{ + Name: "bundle.yaml", + Data: dataYAML, + }, { + Name: "README.md", + Data: []byte(readMe), + }}), } } @@ -91,9 +72,9 @@ // Note that because it implements charmstore.ArchiverTo, // it can be used as an argument to charmstore.Store.AddCharmWithArchive. type Charm struct { - blob []byte - blobHash string - meta *charm.Meta + blob *Blob + meta *charm.Meta + metrics *charm.Metrics } var _ charm.Charm = (*Charm)(nil) @@ -105,22 +86,42 @@ if meta == nil { meta = new(charm.Meta) } - metaYAML, err := yaml.Marshal(meta) + return &Charm{ + meta: meta, + } +} + +func (c *Charm) initBlob() { + if c.blob != nil { + return + } + metaYAML, err := yaml.Marshal(c.meta) if err != nil { panic(err) } - blob, hash := newBlob([]file{{ - name: "metadata.yaml", - data: metaYAML, + files := []File{{ + Name: "metadata.yaml", + Data: metaYAML, }, { - name: "README.md", - data: []byte("boring"), - }}) - return &Charm{ - blob: blob, - blobHash: hash, - meta: meta, + Name: "README.md", + Data: []byte("boring"), + }} + if c.metrics != nil { + metricsYAML, err := yaml.Marshal(c.metrics) + if err != nil { + panic(err) + } + files = append(files, File{ + Name: "metrics.yaml", + Data: metricsYAML, + }) } + c.blob = NewBlob(files) +} + +func (c *Charm) WithMetrics(metrics *charm.Metrics) *Charm { + c.metrics = metrics + return c } // Meta implements charm.Charm.Meta. @@ -135,7 +136,7 @@ // Metrics implements charm.Charm.Metrics. func (c *Charm) Metrics() *charm.Metrics { - return nil + return c.metrics } // Actions implements charm.Charm.Actions. @@ -150,35 +151,38 @@ // ArchiveTo implements charmstore.ArchiverTo. func (c *Charm) ArchiveTo(w io.Writer) error { - _, err := w.Write(c.blob) - return err + c.initBlob() + return c.blob.ArchiveTo(w) } // Bytes returns the contents of the charm's archive. func (c *Charm) Bytes() []byte { - return c.blob + c.initBlob() + return c.blob.Bytes() } // Size returns the size of the charm's archive blob. func (c *Charm) Size() int64 { - return int64(len(c.blob)) + c.initBlob() + return c.blob.Size() } -type file struct { - name string - data []byte +// File represents a file which will be added to a new blob. +type File struct { + Name string + Data []byte } -// newBlob returns a zip archive containing the given files. -func newBlob(files []file) ([]byte, string) { +// NewBlob returns a blob that holds the given files. +func NewBlob(files []File) *Blob { var blob bytes.Buffer zw := zip.NewWriter(&blob) for _, f := range files { - w, err := zw.Create(f.name) + w, err := zw.Create(f.Name) if err != nil { panic(err) } - if _, err := w.Write(f.data); err != nil { + if _, err := w.Write(f.Data); err != nil { panic(err) } } @@ -187,7 +191,35 @@ } h := blobstore.NewHash() h.Write(blob.Bytes()) - return blob.Bytes(), fmt.Sprintf("%x", h.Sum(nil)) + return &Blob{ + data: blob.Bytes(), + hash: fmt.Sprintf("%x", h.Sum(nil)), + } +} + +// Blob represents a blob of data - a zip archive. +// Since it implements charmstore.ArchiverTo, it +// can be used to add charms or bundles with specific +// contents to the charm store. +type Blob struct { + data []byte + hash string +} + +// Bytes returns the contents of the blob. +func (b *Blob) Bytes() []byte { + return b.data +} + +// Size returns the size of the blob. +func (b *Blob) Size() int64 { + return int64(len(b.data)) +} + +// ArchiveTo implements charmstore.ArchiverTo.ArchiveTo. +func (b *Blob) ArchiveTo(w io.Writer) error { + _, err := w.Write(b.data) + return err } // MetaWithSupportedSeries returns m with Series diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/metadata.yaml juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/metadata.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/metadata.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/metadata.yaml 2016-10-13 14:32:25.000000000 +0000 @@ -0,0 +1,3 @@ +name: metered +summary: "A metered charm with custom metrics" +description: "" diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/metrics.yaml juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/metrics.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/metrics.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/metrics.yaml 2016-10-13 14:32:25.000000000 +0000 @@ -0,0 +1,5 @@ +metrics: + pings: + type: gauge + description: Description of the metric. + juju-units: diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/revision juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/revision --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/revision 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/metered/revision 2016-10-13 14:32:25.000000000 +0000 @@ -0,0 +1 @@ +1 \ No newline at end of file diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api.go 2016-10-13 14:32:25.000000000 +0000 @@ -1,9 +1,10 @@ -// Copyright 2015 Canonical Ltd. +// Copyright 2015-2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v4 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" import ( + "encoding/json" "net/http" "net/url" @@ -63,6 +64,7 @@ return } defer rh.Close() + rh.Router.Monitor.AppendLabel("/v4") rh.ServeHTTP(w, req) } @@ -94,7 +96,7 @@ // TODO Why is the v4 API accepting a channel parameter anyway? We // should probably always use "stable". for _, ch := range req.Form["channel"] { - if !v5.ValidChannels[params.Channel(ch)] { + if !params.ValidChannels[params.Channel(ch)] { return ReqHandler{}, badRequestf(nil, "invalid channel %q specified in request", ch) } } @@ -125,6 +127,7 @@ authId := h.AuthIdHandler handlers := v5.RouterHandlers(h.ReqHandler) handlers.Global["search"] = router.HandleJSON(h.serveSearch) + handlers.Meta["bundle-metadata"] = h.EntityHandler(h.metaBundleMetadata, "bundledata") handlers.Meta["charm-related"] = h.EntityHandler(h.metaCharmRelated, "charmprovidedinterfaces", "charmrequiredinterfaces") handlers.Meta["charm-metadata"] = h.EntityHandler(h.metaCharmMetadata, "charmmeta") handlers.Meta["revision-info"] = router.SingleIncludeHandler(h.metaRevisionInfo) @@ -211,6 +214,28 @@ return v5.StatsEnabled(req) } +// GET id/meta/bundle-metadata +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-metadata +func (h ReqHandler) metaBundleMetadata(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + m := entity.BundleData + if m == nil { + return nil, nil + } + data, err := json.Marshal(m) + if err != nil { + return nil, errgo.Notef(err, "cannot marshal bundle-metadata") + } + var metadata map[string]interface{} + if err := json.Unmarshal(data, &metadata); err != nil { + return nil, errgo.Notef(err, "cannot unmarshal bundle-metadata") + } + if ap, ok := metadata["applications"]; ok { + metadata["Services"] = ap + delete(metadata, "applications") + } + return metadata, nil +} + // GET id/meta/charm-metadata // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-metadata func (h ReqHandler) metaCharmMetadata(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -29,6 +29,7 @@ "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "gopkg.in/mgo.v2/bson" + "gopkg.in/yaml.v2" "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" @@ -79,17 +80,6 @@ var _ = gc.Suite(&APISuite{}) -// patchLegacyDownloadCountsEnabled sets LegacyDownloadCountsEnabled to the -// given value for the duration of the test. -// TODO (frankban): remove this function when removing the legacy counts logic. -func patchLegacyDownloadCountsEnabled(addCleanup func(func(*gc.C)), value bool) { - original := charmstore.LegacyDownloadCountsEnabled - charmstore.LegacyDownloadCountsEnabled = value - addCleanup(func(*gc.C) { - charmstore.LegacyDownloadCountsEnabled = original - }) -} - type metaEndpointExpectedValueGetter func(*charmstore.Store, *router.ResolvedURL) (interface{}, error) type metaEndpoint struct { @@ -134,12 +124,45 @@ c.Assert(data.(*charm.Meta).Summary, gc.Equals, "Blog engine") }, }, { + name: "charm-metrics", + exclusive: charmOnly, + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + if entity.CharmMetrics == nil { + return nil + } + return entity.CharmMetrics + }), + checkURL: newResolvedURL("~charmers/xenial/metered-42", 42), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*charm.Metrics).Metrics, gc.DeepEquals, map[string]charm.Metric{ + "juju-units": {}, + "pings": {Type: "gauge", Description: "Description of the metric."}, + }) + }, +}, { name: "bundle-metadata", exclusive: bundleOnly, - get: entityFieldGetter("BundleData"), - checkURL: newResolvedURL("cs:~charmers/bundle/wordpress-simple-42", 42), + get: func(s *charmstore.Store, r *router.ResolvedURL) (interface{}, error) { + // V4 SPECIFIC + data, err := entityFieldGetter("BundleData")(s, r) + if err != nil { + return nil, err + } + if data == nil { + return nil, nil + } + return v4BundleMetadata(data.(*charm.BundleData)), nil + }, + checkURL: newResolvedURL("cs:~charmers/bundle/wordpress-simple-42", 42), assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data.(*charm.BundleData).Applications["wordpress"].Charm, gc.Equals, "wordpress") + // V4 SPECIFIC + services, ok := data.(map[string]interface{})["Services"] + c.Assert(ok, gc.Equals, true) + wordpress, ok := services.(map[string]interface{})["wordpress"] + c.Assert(ok, gc.Equals, true) + charm, ok := wordpress.(map[string]interface{})["Charm"] + c.Assert(ok, gc.Equals, true) + c.Assert(charm.(string), gc.Equals, "wordpress") }, }, { name: "bundle-unit-count", @@ -188,7 +211,7 @@ name: "hash", get: entityGetter(func(entity *mongodoc.Entity) interface{} { return ¶ms.HashResponse{ - Sum: entity.BlobHash, + Sum: entity.PreV5BlobHash, // V4 SPECIFIC } }), checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), @@ -199,7 +222,7 @@ name: "hash256", get: entityGetter(func(entity *mongodoc.Entity) interface{} { return ¶ms.HashResponse{ - Sum: entity.BlobHash256, + Sum: entity.PreV5BlobHash256, // V4 SPECIFIC } }), checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), @@ -566,6 +589,8 @@ newResolvedURL("cs:~charmers/utopic/category-2", 2), // A charm with a different user. newResolvedURL("cs:~bob/utopic/wordpress-2", -1), + // A charm with metrics. + newResolvedURL("cs:~charmers/xenial/metered-42", 42), } func (s *APISuite) addTestEntities(c *gc.C) []*router.ResolvedURL { @@ -639,7 +664,15 @@ Read: []string{"charmers"}, Write: []string{"charmers"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, @@ -681,7 +714,15 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, @@ -691,20 +732,20 @@ }, }) - // Publish one of the revisions to development, then PUT to meta/perm - // and check that the development ACLs have changed. - err := s.store.Publish(newResolvedURL("~charmers/precise/wordpress-23", 23), nil, params.DevelopmentChannel) + // Publish one of the revisions to edge, then PUT to meta/perm + // and check that the edge ACLs have changed. + err := s.store.Publish(newResolvedURL("~charmers/precise/wordpress-23", 23), nil, params.EdgeChannel) c.Assert(err, gc.IsNil) s.doAsUser("bob", func() { // Check that we aren't allowed to put to the newly published entity as bob. - s.assertPutIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=development", []string{}, `unauthorized: access denied for user "bob"`) + s.assertPutIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=edge", []string{}, `unauthorized: access denied for user "bob"`) }) s.doAsUser("charmers", func() { s.discharge = dischargeForUser("charmers") s.assertPut(c, "precise/wordpress-23/meta/perm/read", []string{"bob", "charlie"}) - s.assertGetIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=development", `unauthorized: access denied for user "charmers"`) + s.assertGetIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=edge", `unauthorized: access denied for user "charmers"`) }) s.doAsUser("bob", func() { @@ -734,10 +775,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, @@ -775,7 +824,7 @@ }, }) - // The development-channel entity should still see the development ACLS. + // The edge-channel entity should still see the edge ACLS. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), @@ -792,10 +841,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"doris"}, @@ -814,10 +871,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"bob", params.Everyone}, Write: []string{"doris"}, @@ -836,10 +901,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"bob", params.Everyone}, Write: []string{"doris"}, @@ -874,10 +947,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{}, Write: []string{}, @@ -894,10 +975,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"bob"}, Write: []string{"admin"}, @@ -916,10 +1005,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"joe"}, Write: []string{}, @@ -942,11 +1039,11 @@ Read: []string{"foo"}, Write: []string{"bar"}, }, - URL: storeURL("trusty/wordpress-1/meta/perm?channel=development"), + URL: storeURL("trusty/wordpress-1/meta/perm?channel=edge"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, - Message: `cs:trusty/wordpress-1 not found in development channel`, + Message: `cs:trusty/wordpress-1 not found in edge channel`, }, }) }) @@ -958,7 +1055,7 @@ Read: []string{"bob"}, Write: []string{"admin"}, }) - s.assertGet(c, "wordpress/meta/perm?channel=development", params.PermResponse{ + s.assertGet(c, "wordpress/meta/perm?channel=edge", params.PermResponse{ Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }) @@ -971,11 +1068,11 @@ Do: bakeryDo(nil), Method: "PUT", JSONBody: []string{"arble"}, - URL: storeURL("trusty/wordpress-1/meta/perm/read?channel=development"), + URL: storeURL("trusty/wordpress-1/meta/perm/read?channel=edge"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, - Message: `cs:trusty/wordpress-1 not found in development channel`, + Message: `cs:trusty/wordpress-1 not found in edge channel`, }, }) }) @@ -984,10 +1081,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"joe"}, Write: []string{"bob"}, @@ -1409,6 +1514,65 @@ s.assertGet(c, "multi-series/meta/charm-metadata", &expectMeta) } +// V4 SPECIFIC +func (s *APISuite) TestMetaBundleMetadataReplacesApplicationsWithServices(c *gc.C) { + url := newResolvedURL("~charmers/bundle/wordpress-simple-2", 2) + s.addPublicBundle(c, storetesting.NewBundle(&charm.BundleData{ + Applications: map[string]*charm.ApplicationSpec{ + "wordpress": { + Charm: "wordpress", + }, + }, + }), url, true) + expectMeta := map[string]interface{}{ + "Services": map[string]interface{}{ + "wordpress": map[string]interface{}{ + "Charm": "wordpress", + }, + }, + } + s.assertGet(c, "bundle/wordpress-simple/meta/bundle-metadata", &expectMeta) +} + +// V4 SPECIFIC +func (s *APISuite) TestBundleArchiveReplacesApplicationsWithServices(c *gc.C) { + url := newResolvedURL("~charmers/bundle/wordpress-simple-2", 2) + s.addPublicBundle(c, storetesting.NewBundle(&charm.BundleData{ + Applications: map[string]*charm.ApplicationSpec{ + "wordpress": { + Charm: "wordpress", + }, + }, + }), url, true) + rr := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("bundle/wordpress-simple/archive"), + }) + c.Assert(rr.Code, gc.Equals, http.StatusOK) + r, err := zip.NewReader(bytes.NewReader(rr.Body.Bytes()), int64(rr.Body.Len())) + c.Assert(err, jc.ErrorIsNil) + var bundleMetadataFound bool + for _, f := range r.File { + if f.Name != "bundle.yaml" { + continue + } + bundleMetadataFound = true + rc, err := f.Open() + c.Assert(err, jc.ErrorIsNil) + metadata, err := ioutil.ReadAll(rc) + c.Assert(err, jc.ErrorIsNil) + var m map[string]interface{} + err = yaml.Unmarshal(metadata, &m) + c.Assert(err, jc.ErrorIsNil) + _, ok := m["services"] + c.Assert(ok, gc.Equals, true) + _, ok = m["applications"] + c.Assert(ok, gc.Equals, false) + defer rc.Close() + } + c.Assert(bundleMetadataFound, gc.Equals, true) +} + func (s *APISuite) TestBulkMeta(c *gc.C) { // We choose an arbitrary set of ids and metadata here, just to smoke-test // whether the meta/any logic is hooked up correctly. @@ -1777,7 +1941,7 @@ s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-47", 47)) err := s.store.AddCharmWithArchive(newResolvedURL("cs:~charmers/trusty/wordpress-48", 48), storetesting.NewCharm(nil)) c.Assert(err, gc.IsNil) - err = s.store.Publish(newResolvedURL("cs:~charmers/trusty/wordpress-48", 48), nil, params.DevelopmentChannel) + err = s.store.Publish(newResolvedURL("cs:~charmers/trusty/wordpress-48", 48), nil, params.EdgeChannel) c.Assert(err, gc.IsNil) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/wordpress-5", 49)) @@ -2224,9 +2388,6 @@ if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } - // TODO (frankban): remove this call when removing the legacy counts logic. - patchLegacyDownloadCountsEnabled(s.AddCleanup, false) - today := time.Now() for i, test := range metaStatsTests { c.Logf("test %d: %s", i, test.about) @@ -2276,80 +2437,6 @@ } } -var metaStatsWithLegacyDownloadCountsTests = []struct { - about string - count string - expectValue int64 - expectError string -}{{ - about: "no extra-info", -}, { - about: "zero downloads", - count: "0", -}, { - about: "some downloads", - count: "47", - expectValue: 47, -}, { - about: "invalid value", - count: "invalid", - expectError: "cannot unmarshal extra-info value: invalid character 'i' looking for beginning of value", -}} - -// Tests meta/stats with LegacyDownloadCountsEnabled set to true. -// TODO (frankban): remove this test case when removing the legacy counts -// logic. -func (s *APISuite) TestMetaStatsWithLegacyDownloadCounts(c *gc.C) { - patchLegacyDownloadCountsEnabled(s.AddCleanup, true) - id, _ := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/utopic/wordpress-42", 42)) - url := storeURL("utopic/wordpress-42/meta/stats") - - for i, test := range metaStatsWithLegacyDownloadCountsTests { - c.Logf("test %d: %s", i, test.about) - - // Update the entity extra info if required. - if test.count != "" { - extraInfo := map[string][]byte{ - params.LegacyDownloadStats: []byte(test.count), - } - err := s.store.UpdateEntity(id, bson.D{{ - "$set", bson.D{{"extrainfo", extraInfo}}, - }}) - c.Assert(err, gc.IsNil) - } - - var expectBody interface{} - var expectStatus int - if test.expectError == "" { - // Ensure the downloads count is correctly returned. - expectBody = params.StatsResponse{ - ArchiveDownloadCount: test.expectValue, - ArchiveDownload: params.StatsCount{ - Total: test.expectValue, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: test.expectValue, - }, - } - expectStatus = http.StatusOK - } else { - // Ensure an error is returned. - expectBody = params.Error{ - Message: test.expectError, - } - expectStatus = http.StatusInternalServerError - } - - // Perform the request. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: url, - ExpectStatus: expectStatus, - ExpectBody: expectBody, - }) - } -} - type publishSpec struct { id *router.ResolvedURL time string @@ -2597,7 +2684,7 @@ channel: params.StableChannel, }, { id: newResolvedURL("~charmers/precise/wordpress-1", 1), - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, }, { id: newResolvedURL("~charmers/precise/wordpress-2", 2), channel: params.UnpublishedChannel, @@ -2621,7 +2708,7 @@ expectURL: "cs:precise/wordpress-0", }, { url: "wordpress", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectURL: "cs:precise/wordpress-1", }, { url: "wordpress", @@ -3350,10 +3437,29 @@ return mongodoc.ACL{}, err } ch := params.UnpublishedChannel - if e.Stable { + if e.Published[params.StableChannel] { ch = params.StableChannel - } else if e.Development { - ch = params.DevelopmentChannel + } else if e.Published[params.EdgeChannel] { + ch = params.EdgeChannel } return be.ChannelACLs[ch], nil } + +// V4 SPECIFIC +// v4BundleMetadata creates a representation of data with the v4 +// compatible format. +func v4BundleMetadata(data *charm.BundleData) map[string]interface{} { + buf, err := json.Marshal(data) + if err != nil { + panic(err) + } + var m map[string]interface{} + if err := json.Unmarshal(buf, &m); err != nil { + panic(err) + } + if ap, ok := m["applications"]; ok { + m["Services"] = ap + delete(m, "applications") + } + return m +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/archive_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/archive_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/archive_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/archive_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -349,7 +349,7 @@ // by closing errorBodies. try := make(chan struct{}) go func(try chan struct{}) { - for _ = range try { + for range try { var wg sync.WaitGroup for p := 0; p < 5; p++ { wg.Add(1) @@ -718,9 +718,9 @@ // Here we exercise both bundle internal verification (bad relation) and // validation with respect to charms (wordpress and mysql are missing). expectErr := `bundle verification failed: [` + - `"relation [\"foo:db\" \"mysql:server\"] refers to application \"foo\" not defined in this bundle",` + `"application \"mysql\" refers to non-existent charm \"mysql\"",` + - `"application \"wordpress\" refers to non-existent charm \"wordpress\""]` + `"application \"wordpress\" refers to non-existent charm \"wordpress\"",` + + `"relation [\"foo:db\" \"mysql:server\"] refers to application \"foo\" not defined in this bundle"]` s.assertCannotUpload(c, "~charmers/bundle/wordpress", f, http.StatusBadRequest, params.ErrInvalidEntity, expectErr) } @@ -870,7 +870,7 @@ Id: id, Meta: entityMetaInfo{ ArchiveSize: ¶ms.ArchiveSizeResponse{Size: size}, - BundleMeta: b.Data(), + BundleMeta: v4BundleMetadata(b.Data()), // V4 SPECIFIC }, }, ) @@ -928,7 +928,7 @@ c.Assert(entity.PreV5BlobSize, gc.Not(gc.Equals), int64(0)) c.Assert(entity.PromulgatedURL, gc.DeepEquals, url.PromulgatedURL()) - c.Assert(entity.Development, gc.Equals, false) + c.Assert(entity.Published, gc.IsNil) return expectId, entity.PreV5BlobSize } @@ -1371,7 +1371,7 @@ CharmMeta *charm.Meta `json:"charm-metadata,omitempty"` CharmConfig *charm.Config `json:"charm-config,omitempty"` CharmActions *charm.Actions `json:"charm-actions,omitempty"` - BundleMeta *charm.BundleData `json:"bundle-metadata,omitempty"` + BundleMeta interface{} `json:"bundle-metadata,omitempty"` // V4 SPECIFIC } func (s *ArchiveSuite) assertEntityInfo(c *gc.C, expect entityInfo) { @@ -1446,8 +1446,6 @@ func (s *ArchiveSearchSuite) SetUpTest(c *gc.C) { s.commonSuite.SetUpTest(c) - // TODO (frankban): remove this call when removing the legacy counts logic. - patchLegacyDownloadCountsEnabled(s.AddCleanup, false) } func (s *ArchiveSearchSuite) TestGetSearchUpdate(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/auth_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/auth_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/auth_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/auth_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -215,8 +215,8 @@ // unpublishedReadPerm stores a list of users with read permissions on // on the unpublished entities. unpublishedReadPerm []string - // developmentReadPerm stores a list of users with read permissions on the development channel. - developmentReadPerm []string + // edgeReadPerm stores a list of users with read permissions on the edge channel. + edgeReadPerm []string // stableReadPerm stores a list of users with read permissions on the stable channel. stableReadPerm []string // channels contains a list of channels, to which the entity belongs. @@ -303,18 +303,18 @@ Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through development channel", + about: "access provided through edge channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, - developmentReadPerm: []string{"group1"}, - channels: []params.Channel{params.DevelopmentChannel}, + edgeReadPerm: []string{"group1"}, + channels: []params.Channel{params.EdgeChannel}, }, { - about: "access provided through development channel, but charm not published", + about: "access provided through edge channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, - developmentReadPerm: []string{"group1"}, + edgeReadPerm: []string{"group1"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, @@ -325,31 +325,31 @@ username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, - developmentReadPerm: []string{"group12"}, + edgeReadPerm: []string{"group12"}, stableReadPerm: []string{"group2"}, - channels: []params.Channel{params.DevelopmentChannel, params.StableChannel}, + channels: []params.Channel{params.EdgeChannel, params.StableChannel}, }, { about: "access provided through stable channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, - developmentReadPerm: []string{"group12"}, + edgeReadPerm: []string{"group12"}, stableReadPerm: []string{"group2"}, - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through development channel, but charm on stable channel", + about: "access provided through edge channel, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, - developmentReadPerm: []string{"group1"}, + edgeReadPerm: []string{"group1"}, stableReadPerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, @@ -364,7 +364,7 @@ unpublishedReadPerm: []string{"picard", "sisko", "group42", "group1"}, stableReadPerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, @@ -373,13 +373,13 @@ Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through unpublished ACL, but charm on development channel", + about: "access provided through unpublished ACL, but charm on edge channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group1"}, - developmentReadPerm: []string{"group11"}, + edgeReadPerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ @@ -419,7 +419,7 @@ // Change the ACLs for the testing charm. err = s.store.SetPerms(&rurl.URL, "unpublished.read", test.unpublishedReadPerm...) c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&rurl.URL, "development.read", test.developmentReadPerm...) + err = s.store.SetPerms(&rurl.URL, "edge.read", test.edgeReadPerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "stable.read", test.stableReadPerm...) c.Assert(err, gc.IsNil) @@ -463,8 +463,8 @@ groups []string // writePerm stores a list of users with write permissions. unpublishedWritePerm []string - // developmentWritePerm stores a list of users with write permissions on the development channel. - developmentWritePerm []string + // edgeWritePerm stores a list of users with write permissions on the edge channel. + edgeWritePerm []string // stableWritePerm stores a list of users with write permissions on the stable channel. stableWritePerm []string // channels contains a list of channels, to which the entity belongs. @@ -538,18 +538,18 @@ Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through development channel", + about: "access provided through edge channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, - developmentWritePerm: []string{"group1"}, - channels: []params.Channel{params.DevelopmentChannel}, + edgeWritePerm: []string{"group1"}, + channels: []params.Channel{params.EdgeChannel}, }, { - about: "access provided through development channel, but charm not published", + about: "access provided through edge channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, - developmentWritePerm: []string{"group1"}, + edgeWritePerm: []string{"group1"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, @@ -560,31 +560,31 @@ username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, - developmentWritePerm: []string{"group12"}, + edgeWritePerm: []string{"group12"}, stableWritePerm: []string{"group2"}, - channels: []params.Channel{params.DevelopmentChannel, params.StableChannel}, + channels: []params.Channel{params.EdgeChannel, params.StableChannel}, }, { about: "access provided through stable channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, - developmentWritePerm: []string{"group12"}, + edgeWritePerm: []string{"group12"}, stableWritePerm: []string{"group2"}, - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through development channel, but charm on stable channel", + about: "access provided through edge channel, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, - developmentWritePerm: []string{"group1"}, + edgeWritePerm: []string{"group1"}, stableWritePerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, @@ -599,7 +599,7 @@ unpublishedWritePerm: []string{"picard", "sisko", "group42", "group1"}, stableWritePerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, @@ -608,13 +608,13 @@ Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through unpublished ACL, but charm on development channel", + about: "access provided through unpublished ACL, but charm on edge channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group1"}, - developmentWritePerm: []string{"group11"}, + edgeWritePerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ @@ -646,7 +646,7 @@ // Change the ACLs for the testing charm. err = s.store.SetPerms(&rurl.URL, "unpublished.write", test.unpublishedWritePerm...) c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&rurl.URL, "development.write", test.developmentWritePerm...) + err = s.store.SetPerms(&rurl.URL, "edge.write", test.edgeWritePerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "stable.write", test.stableWritePerm...) c.Assert(err, gc.IsNil) diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/list_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/list_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/list_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/list_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -174,7 +174,7 @@ about: "bundle-metadata", query: "name=wordpress-simple&type=bundle&include=bundle-metadata", meta: map[string]interface{}{ - "bundle-metadata": getListBundle("wordpress-simple").Data(), + "bundle-metadata": v4BundleMetadata(getListBundle("wordpress-simple").Data()), // V4 SPECIFIC }, }, { about: "bundle-machine-count", diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/relations_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/relations_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/relations_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/relations_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -226,7 +226,6 @@ Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"wordpress"}, "charm-metadata": &charm.Meta{ - Format: 1, Provides: map[string]charm.Relation{ "website": { Name: "website", @@ -374,7 +373,7 @@ Id: charm.MustParseURL("bundle/wordpress-complex-1"), Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"wordpress-complex"}, - "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-complex-1"].Data(), + "bundle-metadata": v4BundleMetadata(metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-complex-1"].Data()), // V4 SPECIFIC }, }}, }, { @@ -527,19 +526,19 @@ Id: charm.MustParseURL("bundle/useless-0"), Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"useless"}, - "bundle-metadata": metaBundlesContainingBundles["0 ~charmers/bundle/useless-0"].Data(), + "bundle-metadata": v4BundleMetadata(metaBundlesContainingBundles["0 ~charmers/bundle/useless-0"].Data()), //V4 SPECIFIC }, }, { Id: charm.MustParseURL("bundle/wordpress-complex-1"), Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"wordpress-complex"}, - "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-complex-1"].Data(), + "bundle-metadata": v4BundleMetadata(metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-complex-1"].Data()), //V4 SPECIFIC }, }, { Id: charm.MustParseURL("bundle/wordpress-simple-1"), Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"wordpress-simple"}, - "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-simple-1"].Data(), + "bundle-metadata": v4BundleMetadata(metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-simple-1"].Data()), //V4 SPECIFIC }, }}, }, { @@ -672,7 +671,7 @@ panic(fmt.Sprintf("resolved URL %q does not contain user", urlStr)) } return &router.ResolvedURL{ - URL: *url.WithChannel(""), + URL: *url, PromulgatedRevision: promRev, } } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search.go 2016-10-13 14:32:25.000000000 +0000 @@ -15,6 +15,7 @@ // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-search func (h ReqHandler) serveSearch(_ http.Header, req *http.Request) (interface{}, error) { sp, err := v5.ParseSearchParams(req) + sp.AutoComplete = true if err != nil { return "", err } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -320,7 +320,7 @@ about: "bundle-metadata", query: "name=wordpress-simple&type=bundle&include=bundle-metadata", meta: map[string]interface{}{ - "bundle-metadata": getSearchBundle("wordpress-simple").Data(), + "bundle-metadata": v4BundleMetadata(getSearchBundle("wordpress-simple").Data()), // V4 SPECIFIC }, }, { about: "bundle-machine-count", @@ -591,8 +591,6 @@ } func (s *SearchSuite) TestDownloadsBoost(c *gc.C) { - // TODO (frankban): remove this call when removing the legacy counts logic. - patchLegacyDownloadCountsEnabled(s.AddCleanup, false) charmDownloads := map[string]int{ "mysql": 0, "wordpress": 1, @@ -622,18 +620,6 @@ c.Assert(sr.Results[2].Id.Name, gc.Equals, "mysql") } -// TODO(mhilton) remove this test when removing legacy counts logic. -func (s *SearchSuite) TestLegacyStatsUpdatesSearch(c *gc.C) { - patchLegacyDownloadCountsEnabled(s.AddCleanup, true) - doc, err := s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) - c.Assert(err, gc.IsNil) - c.Assert(doc.TotalDownloads, gc.Equals, int64(0)) - s.assertPutAsAdmin(c, "~openstack-charmers/trusty/mysql-7/meta/extra-info/"+params.LegacyDownloadStats, 57) - doc, err = s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) - c.Assert(err, gc.IsNil) - c.Assert(doc.TotalDownloads, gc.Equals, int64(57)) -} - func (s *SearchSuite) TestSearchWithAdminCredentials(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api.go 2016-10-13 14:32:25.000000000 +0000 @@ -1,4 +1,4 @@ -// Copyright 2014 Canonical Ltd. +// Copyright 2014-2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" @@ -23,6 +23,7 @@ "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" "gopkg.in/macaroon-bakery.v1/bakery" "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakery/mgostorage" "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" @@ -149,8 +150,7 @@ "series", "promulgated-revision", "promulgated-url", - "development", - "stable", + "published", ) RequiredBaseEntityFields = charmstore.FieldSelector( "user", @@ -176,14 +176,6 @@ return s.Store.FindBaseEntity(url, fields) } -// ValidChannels holds the set of all allowed channels -// that can be passed as a "?channel=" parameter. -var ValidChannels = map[params.Channel]bool{ - params.UnpublishedChannel: true, - params.DevelopmentChannel: true, - params.StableChannel: true, -} - // NewReqHandler returns an instance of a *ReqHandler // suitable for handling the given HTTP request. After use, the ReqHandler.Close // method should be called to close it. @@ -196,7 +188,7 @@ // most endpoints will only ever use the first one. // PUT to an archive is the notable exception. for _, ch := range req.Form["channel"] { - if !ValidChannels[params.Channel(ch)] { + if !params.ValidChannels[params.Channel(ch)] { return nil, badRequestf(nil, "invalid channel %q specified in request", ch) } } @@ -263,10 +255,11 @@ "bundle-metadata": h.EntityHandler(h.metaBundleMetadata, "bundledata"), "bundles-containing": h.EntityHandler(h.metaBundlesContaining), "bundle-unit-count": h.EntityHandler(h.metaBundleUnitCount, "bundleunitcount"), - "published": h.EntityHandler(h.metaPublished, "development", "stable"), + "published": h.EntityHandler(h.metaPublished, "published"), "charm-actions": h.EntityHandler(h.metaCharmActions, "charmactions"), "charm-config": h.EntityHandler(h.metaCharmConfig, "charmconfig"), "charm-metadata": h.EntityHandler(h.metaCharmMetadata, "charmmeta"), + "charm-metrics": h.EntityHandler(h.metaCharmMetrics, "charmmetrics"), "charm-related": h.EntityHandler(h.metaCharmRelated, "charmprovidedinterfaces", "charmrequiredinterfaces"), "common-info": h.puttableBaseEntityHandler( h.metaCommonInfo, @@ -304,7 +297,7 @@ "resources": h.EntityHandler(h.metaResources, "charmmeta"), "resources/": h.EntityHandler(h.metaResourcesSingle, "charmmeta"), "revision-info": router.SingleIncludeHandler(h.metaRevisionInfo), - "stats": h.EntityHandler(h.metaStats), + "stats": h.EntityHandler(h.metaStats, "supportedseries"), "supported-series": h.EntityHandler(h.metaSupportedSeries, "supportedseries"), "tags": h.EntityHandler(h.metaTags, "charmmeta", "bundledata"), "terms": h.EntityHandler(h.metaTerms, "charmmeta"), @@ -333,6 +326,8 @@ return } defer rh.Close() + rh.Router.Monitor.Reset() + rh.Router.Monitor.AppendLabel("/v5") rh.ServeHTTP(w, req) } @@ -514,10 +509,6 @@ if err != nil { return errgo.Notef(err, "cannot update %q", &id.URL) } - err = h.Store.UpdateSearchFields(id, fields) - if err != nil { - return errgo.Notef(err, "cannot update %q", &id.URL) - } h.processEntries(entries) return nil } @@ -642,6 +633,12 @@ return entity.CharmMeta, nil } +// GET id/meta/charm-metrics +// https://github.com/juju/charmstore/blob/v5-unstable/docs/API.md#get-idmetacharm-metrics +func (h *ReqHandler) metaCharmMetrics(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return entity.CharmMetrics, nil +} + // GET id/meta/bundle-metadata // https://github.com/juju/charmstore/blob/v5-unstable/docs/API.md#get-idmetabundle-metadata func (h *ReqHandler) metaBundleMetadata(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { @@ -776,10 +773,30 @@ if err != nil { return charmstore.SearchParams{}, badRequestf(err, "invalid refresh parameter") } - counts, countsAllRevisions, err := h.Store.ArchiveDownloadCounts(id.PreferredURL(), refresh) + + preferredURL := id.PreferredURL() + counts, countsAllRevisions, err := h.Store.ArchiveDownloadCounts(preferredURL, refresh) if err != nil { return nil, errgo.Mask(err) } + if entity.Series == "" { + // Concatenate all the supported series for a multi-series entity. + for _, series := range entity.SupportedSeries { + preferredURL.Series = series + countsSeries, countsAllRevisionsSeries, err := h.Store.ArchiveDownloadCounts(preferredURL, refresh) + if err != nil { + return nil, errgo.Mask(err) + } + counts.Total += countsSeries.Total + counts.LastDay += countsSeries.LastDay + counts.LastWeek += countsSeries.LastWeek + counts.LastMonth += countsSeries.LastMonth + countsAllRevisions.Total += countsAllRevisionsSeries.Total + countsAllRevisions.LastDay += countsAllRevisionsSeries.LastDay + countsAllRevisions.LastWeek += countsAllRevisionsSeries.LastWeek + countsAllRevisions.LastMonth += countsAllRevisionsSeries.LastMonth + } + } // Return the response. return ¶ms.StatsResponse{ ArchiveDownloadCount: counts.Total, @@ -818,7 +835,7 @@ if err := h.AuthorizeEntityForOp(rurl, req, OpReadWithNoTerms); err != nil { // We're not authorized to see the entity, so leave it out. // Note that the only time this will happen is when - // the original URL is promulgated and has a development channel, + // the original URL is promulgated and has a edge channel, // the charm has changed owners, and the old owner and // the new one have different dev ACLs. It's easiest // and most reliable just to check everything though. @@ -1158,25 +1175,29 @@ if err != nil { return nil, errgo.Mask(err) } - info := make([]params.PublishedInfo, 0, 2) - if entity.Development { - info = append(info, params.PublishedInfo{ - Channel: params.DevelopmentChannel, - }) - } - if entity.Stable { - info = append(info, params.PublishedInfo{ - Channel: params.StableChannel, - }) - } - for i, pinfo := range info { - // The entity is current for a channel if any series within - // a channel refers to the entity. - for _, url := range baseEntity.ChannelEntities[pinfo.Channel] { + results := make(map[params.Channel]params.PublishedInfo, len(entity.Published)) + for channel, published := range entity.Published { + if !published { + continue + } + var current bool + for _, url := range baseEntity.ChannelEntities[channel] { if *url == *entity.URL { - info[i].Current = true + current = true + break } } + results[channel] = params.PublishedInfo{ + Channel: channel, + Current: current, + } + } + // Reorder results by stability level. + info := make([]params.PublishedInfo, 0, len(results)) + for _, channel := range params.OrderedChannels { + if result, ok := results[channel]; ok { + info = append(info, result) + } } return ¶ms.PublishedResponse{ Info: info, @@ -1283,6 +1304,9 @@ return nil, errgo.WithCausef(nil, params.ErrForbidden, "delegatable macaroon is not obtainable using admin credentials") } // TODO propagate expiry time from macaroons in request. + + // Note that we don't use a root key store with a short term + // expiry, as we don't want to create a new root key every minute. m, err := h.Store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat(UsernameAttr, auth.Username), checkers.TimeBeforeCaveat(time.Now().Add(DelegatableMacaroonExpiry)), @@ -1329,12 +1353,16 @@ return nil, errgo.WithCausef(nil, params.ErrForbidden, "delegatable macaroon is not obtainable using admin credentials (admin %v)", auth.Admin) } + longTermBakery := h.Store.BakeryWithPolicy(mgostorage.Policy{ + ExpiryDuration: 1e6 * time.Hour, // 116 years... + GenerateInterval: 30 * 24 * time.Hour, // Roughly monthly. + }) // After this time, clients will be forced to renew the macaroon, even // though it remains technically valid. activeExpireTime := time.Now().Add(DelegatableMacaroonExpiry) // TODO propagate expiry time from macaroons in request. - m, err := h.Store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ + m, err := longTermBakery.NewMacaroon("", nil, []checkers.Caveat{ checkers.DeclaredCaveat(UsernameAttr, auth.Username), isEntityCaveat(ids), activeTimeBeforeCaveat(activeExpireTime), @@ -1423,13 +1451,6 @@ return nil } -// validPublishChannels holds the set of channels that can -// be the target of a publish request. -var validPublishChannels = map[params.Channel]bool{ - params.DevelopmentChannel: true, - params.StableChannel: true, -} - // PUT id/publish // See https://github.com/juju/charmstore/blob/v5-unstable/docs/API.md#put-idpublish func (h *ReqHandler) servePublish(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { @@ -1450,8 +1471,14 @@ return badRequestf(nil, "no channels provided") } for _, c := range chans { - if !validPublishChannels[c] { - return badRequestf(nil, "cannot publish to %q", c) + if c == params.NoChannel { + return badRequestf(nil, "cannot publish to an empty channel") + } + if !params.ValidChannels[c] { + return badRequestf(nil, "unrecognized channel %q", c) + } + if c == params.UnpublishedChannel { + return badRequestf(nil, "cannot publish to the unpublished channel") } } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -74,17 +74,6 @@ var _ = gc.Suite(&APISuite{}) -// patchLegacyDownloadCountsEnabled sets LegacyDownloadCountsEnabled to the -// given value for the duration of the test. -// TODO (frankban): remove this function when removing the legacy counts logic. -func patchLegacyDownloadCountsEnabled(addCleanup func(func(*gc.C)), value bool) { - original := charmstore.LegacyDownloadCountsEnabled - charmstore.LegacyDownloadCountsEnabled = value - addCleanup(func(*gc.C) { - charmstore.LegacyDownloadCountsEnabled = original - }) -} - type metaEndpointExpectedValueGetter func(*charmstore.Store, *router.ResolvedURL) (interface{}, error) type metaEndpoint struct { @@ -140,6 +129,22 @@ c.Assert(data.(*charm.Meta).Summary, gc.Equals, "Blog engine") }, }, { + name: "charm-metrics", + exclusive: charmOnly, + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + if entity.CharmMetrics == nil { + return nil + } + return entity.CharmMetrics + }), + checkURL: newResolvedURL("~charmers/xenial/metered-42", 42), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*charm.Metrics).Metrics, gc.DeepEquals, map[string]charm.Metric{ + "juju-units": {}, + "pings": {Type: "gauge", Description: "Description of the metric."}, + }) + }, +}, { name: "bundle-metadata", exclusive: bundleOnly, get: entityFieldGetter("BundleData"), @@ -632,7 +637,7 @@ }, { name: "published", get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - // All the entities published are in stable, not development, + // All the entities published are in stable, not edge, // and there's only one for each base entity. return ¶ms.PublishedResponse{ Info: []params.PublishedInfo{{ @@ -714,6 +719,8 @@ newResolvedURL("cs:~charmers/precise/terms-42", 42), // A charm with resources. newResolvedURL("cs:~charmers/utopic/starsay-17", 17), + // A charm with metrics. + newResolvedURL("cs:~charmers/xenial/metered-42", 42), } func (s *APISuite) addTestEntities(c *gc.C) []*router.ResolvedURL { @@ -792,21 +799,21 @@ }, { id: "~charmers/precise/wordpress-1", entity: storetesting.NewCharm(nil), - channels: []params.Channel{params.DevelopmentChannel, params.StableChannel}, + channels: []params.Channel{params.EdgeChannel, params.StableChannel}, expect: params.PublishedResponse{ Info: []params.PublishedInfo{{ - Channel: params.DevelopmentChannel, - }, { Channel: params.StableChannel, + }, { + Channel: params.EdgeChannel, }}, }, }, { id: "~charmers/precise/wordpress-3", entity: storetesting.NewCharm(nil), - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, expect: params.PublishedResponse{ Info: []params.PublishedInfo{{ - Channel: params.DevelopmentChannel, + Channel: params.EdgeChannel, }}, }, }, { @@ -818,20 +825,20 @@ }, { id: "~charmers/precise/wordpress-5", entity: storetesting.NewCharm(nil), - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, expect: params.PublishedResponse{ Info: []params.PublishedInfo{{ - Channel: params.DevelopmentChannel, + Channel: params.EdgeChannel, Current: true, }}, }, }, { id: "~charmers/trusty/wordpress-0", entity: storetesting.NewCharm(nil), - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, expect: params.PublishedResponse{ Info: []params.PublishedInfo{{ - Channel: params.DevelopmentChannel, + Channel: params.EdgeChannel, Current: true, }}, }, @@ -858,6 +865,21 @@ }}, }, }, { + id: "~charmers/wordpress-8", + entity: storetesting.NewCharm(&charm.Meta{ + Series: []string{"wily"}, + }), + channels: []params.Channel{params.CandidateChannel, params.BetaChannel}, + expect: params.PublishedResponse{ + Info: []params.PublishedInfo{{ + Channel: params.CandidateChannel, + Current: true, + }, { + Channel: params.BetaChannel, + Current: true, + }}, + }, +}, { id: "~bob/bundle/mybundle-2", entity: storetesting.NewBundle(&charm.BundleData{ Applications: map[string]*charm.ApplicationSpec{ @@ -994,7 +1016,15 @@ Read: []string{"charmers"}, Write: []string{"charmers"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, @@ -1036,7 +1066,15 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, @@ -1046,20 +1084,20 @@ }, }) - // Publish one of the revisions to development, then PUT to meta/perm - // and check that the development ACLs have changed. - err := s.store.Publish(newResolvedURL("~charmers/precise/wordpress-23", 23), nil, params.DevelopmentChannel) + // Publish one of the revisions to edge, then PUT to meta/perm + // and check that the edge ACLs have changed. + err := s.store.Publish(newResolvedURL("~charmers/precise/wordpress-23", 23), nil, params.EdgeChannel) c.Assert(err, gc.IsNil) s.doAsUser("bob", func() { // Check that we aren't allowed to put to the newly published entity as bob. - s.assertPutIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=development", []string{}, `unauthorized: access denied for user "bob"`) + s.assertPutIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=edge", []string{}, `unauthorized: access denied for user "bob"`) }) s.doAsUser("charmers", func() { s.discharge = dischargeForUser("charmers") s.assertPut(c, "precise/wordpress-23/meta/perm/read", []string{"bob", "charlie"}) - s.assertGetIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=development", `unauthorized: access denied for user "charmers"`) + s.assertGetIsUnauthorized(c, "~charmers/precise/wordpress/meta/perm/read?channel=edge", `unauthorized: access denied for user "charmers"`) }) s.doAsUser("bob", func() { @@ -1089,15 +1127,24 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"charmers"}, }, }) + // Publish wordpress-1 to stable and check that the stable ACLs // have changed. err = s.store.Publish(newResolvedURL("~charmers/trusty/wordpress-1", 1), nil, params.StableChannel) @@ -1130,7 +1177,7 @@ }, }) - // The development-channel entity should still see the development ACLS. + // The edge-channel entity should still see the edge ACLS. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Do: bakeryDo(nil), @@ -1147,10 +1194,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"charmers"}, Write: []string{"doris"}, @@ -1169,10 +1224,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"bob", params.Everyone}, Write: []string{"doris"}, @@ -1191,10 +1254,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"bob", params.Everyone}, Write: []string{"doris"}, @@ -1229,10 +1300,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{}, Write: []string{}, @@ -1249,10 +1328,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"bob"}, Write: []string{"admin"}, @@ -1271,10 +1358,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"joe"}, Write: []string{}, @@ -1297,11 +1392,11 @@ Read: []string{"foo"}, Write: []string{"bar"}, }, - URL: storeURL("trusty/wordpress-1/meta/perm?channel=development"), + URL: storeURL("trusty/wordpress-1/meta/perm?channel=edge"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, - Message: `cs:trusty/wordpress-1 not found in development channel`, + Message: `cs:trusty/wordpress-1 not found in edge channel`, }, }) }) @@ -1313,7 +1408,7 @@ Read: []string{"bob"}, Write: []string{"admin"}, }) - s.assertGet(c, "wordpress/meta/perm?channel=development", params.PermResponse{ + s.assertGet(c, "wordpress/meta/perm?channel=edge", params.PermResponse{ Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }) @@ -1326,11 +1421,11 @@ Do: bakeryDo(nil), Method: "PUT", JSONBody: []string{"arble"}, - URL: storeURL("trusty/wordpress-1/meta/perm/read?channel=development"), + URL: storeURL("trusty/wordpress-1/meta/perm/read?channel=edge"), ExpectStatus: http.StatusNotFound, ExpectBody: params.Error{ Code: params.ErrNotFound, - Message: `cs:trusty/wordpress-1 not found in development channel`, + Message: `cs:trusty/wordpress-1 not found in edge channel`, }, }) }) @@ -1339,10 +1434,18 @@ Read: []string{"bob"}, Write: []string{"admin"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob", "charlie"}, Write: []string{"charmers"}, }, + params.BetaChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + params.CandidateChannel: { + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, params.StableChannel: { Read: []string{"joe"}, Write: []string{"bob"}, @@ -2153,7 +2256,7 @@ s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-47", 47)) err := s.store.AddCharmWithArchive(newResolvedURL("cs:~charmers/trusty/wordpress-48", 48), storetesting.NewCharm(nil)) c.Assert(err, gc.IsNil) - err = s.store.Publish(newResolvedURL("cs:~charmers/trusty/wordpress-48", 48), nil, params.DevelopmentChannel) + err = s.store.Publish(newResolvedURL("cs:~charmers/trusty/wordpress-48", 48), nil, params.EdgeChannel) c.Assert(err, gc.IsNil) s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/wordpress-5", 49)) @@ -2600,9 +2703,6 @@ if !storetesting.MongoJSEnabled() { c.Skip("MongoDB JavaScript not available") } - // TODO (frankban): remove this call when removing the legacy counts logic. - patchLegacyDownloadCountsEnabled(s.AddCleanup, false) - today := time.Now() for i, test := range metaStatsTests { c.Logf("test %d: %s", i, test.about) @@ -2652,78 +2752,78 @@ } } -var metaStatsWithLegacyDownloadCountsTests = []struct { - about string - count string - expectValue int64 - expectError string -}{{ - about: "no extra-info", -}, { - about: "zero downloads", - count: "0", -}, { - about: "some downloads", - count: "47", - expectValue: 47, -}, { - about: "invalid value", - count: "invalid", - expectError: "cannot unmarshal extra-info value: invalid character 'i' looking for beginning of value", -}} - -// Tests meta/stats with LegacyDownloadCountsEnabled set to true. -// TODO (frankban): remove this test case when removing the legacy counts -// logic. -func (s *APISuite) TestMetaStatsWithLegacyDownloadCounts(c *gc.C) { - patchLegacyDownloadCountsEnabled(s.AddCleanup, true) - id, _ := s.addPublicCharmFromRepo(c, "wordpress", newResolvedURL("~charmers/utopic/wordpress-42", 42)) - url := storeURL("utopic/wordpress-42/meta/stats") +func (s *APISuite) TestMetaStatsWhenChangedtoMultiSeries(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + today := time.Now() + url := &router.ResolvedURL{ + URL: *charm.MustParseURL("utopic/django-0"), + PromulgatedRevision: -1, + } + url.URL.User = "charmers" + url.PromulgatedRevision = url.URL.Revision - for i, test := range metaStatsWithLegacyDownloadCountsTests { - c.Logf("test %d: %s", i, test.about) + // Add the required entities to the database. + s.addPublicCharmFromRepo(c, "wordpress", url) - // Update the entity extra info if required. - if test.count != "" { - extraInfo := map[string][]byte{ - params.LegacyDownloadStats: []byte(test.count), - } - err := s.store.UpdateEntity(id, bson.D{{ - "$set", bson.D{{"extrainfo", extraInfo}}, - }}) + // Simulate the entity was downloaded at the specified dates. + downloadsPerDay := map[int]int{2: 5} + for daysAgo, downloads := range downloadsPerDay { + date := today.AddDate(0, 0, -daysAgo) + key := []string{params.StatsArchiveDownload, url.URL.Series, url.URL.Name, url.URL.User, strconv.Itoa(url.URL.Revision)} + for i := 0; i < downloads; i++ { + err := s.store.IncCounterAtTime(key, date) c.Assert(err, gc.IsNil) } - - var expectBody interface{} - var expectStatus int - if test.expectError == "" { - // Ensure the downloads count is correctly returned. - expectBody = params.StatsResponse{ - ArchiveDownloadCount: test.expectValue, - ArchiveDownload: params.StatsCount{ - Total: test.expectValue, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: test.expectValue, - }, - } - expectStatus = http.StatusOK - } else { - // Ensure an error is returned. - expectBody = params.Error{ - Message: test.expectError, + if url.PromulgatedRevision > -1 { + key := []string{params.StatsArchiveDownloadPromulgated, url.URL.Series, url.URL.Name, "", strconv.Itoa(url.PromulgatedRevision)} + for i := 0; i < downloads; i++ { + err := s.store.IncCounterAtTime(key, date) + c.Assert(err, gc.IsNil) } - expectStatus = http.StatusInternalServerError } + } + expectResponse := params.StatsResponse{ + ArchiveDownloadCount: 5, + ArchiveDownload: params.StatsCount{ + Total: 5, + Week: 5, + Month: 5, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 5, + Week: 5, + Month: 5, + }, + } + s.assertGet(c, "utopic/django-0/meta/stats", expectResponse) - // Perform the request. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: url, - ExpectStatus: expectStatus, - ExpectBody: expectBody, - }) + // change it to a multiseries charm. + s.addPublicCharmFromRepo(c, "multi-series", newResolvedURL("cs:~charmers/django-1", 1)) + + // Check we get the counts from previous revision on utopic. + expectResponse = params.StatsResponse{ + ArchiveDownloadCount: 0, + ArchiveDownload: params.StatsCount{ + Total: 0, + Week: 0, + Month: 0, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 5, + Week: 5, + Month: 5, + }, } + s.assertGet(c, "django-1/meta/stats", expectResponse) + + // Clean up the collections. + _, err := s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.StatCounters().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } type publishSpec struct { @@ -2914,7 +3014,7 @@ expectStatus: http.StatusBadRequest, expectBody: params.Error{ Code: params.ErrBadRequest, - Message: `cannot unmarshal publish request body: cannot unmarshal into field: unexpected content type text/invalid; want application/json; content: "{\"Channels\":[\"development\"]}"`, + Message: `cannot unmarshal publish request body: cannot unmarshal into field: unexpected content type text/invalid; want application/json; content: "{\"Channels\":[\"edge\"]}"`, }, }, { about: "invalid body", @@ -2954,7 +3054,7 @@ }), expectStatus: http.StatusBadRequest, expectBody: params.Error{ - Message: `cannot publish to "bad"`, + Message: `unrecognized channel "bad"`, Code: params.ErrBadRequest, }, }, { @@ -2966,7 +3066,7 @@ }), expectStatus: http.StatusBadRequest, expectBody: params.Error{ - Message: `cannot publish to ""`, + Message: `cannot publish to an empty channel`, Code: params.ErrBadRequest, }, }, { @@ -2978,7 +3078,7 @@ }), expectStatus: http.StatusBadRequest, expectBody: params.Error{ - Message: `cannot publish to "unpublished"`, + Message: `cannot publish to the unpublished channel`, Code: params.ErrBadRequest, }, }, { @@ -3009,7 +3109,7 @@ body := test.body if body == "" { body = mustMarshalJSON(params.PublishRequest{ - Channels: []params.Channel{params.DevelopmentChannel}, + Channels: []params.Channel{params.EdgeChannel}, }) } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ @@ -3044,7 +3144,7 @@ Read: []string{"bob"}, Write: []string{"bob"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, @@ -3053,7 +3153,7 @@ Write: []string{"bob"}, }, }, - channels: []params.Channel{"development"}, + channels: []params.Channel{"edge"}, }, { about: "all perms allow bob; publish to several channels", acls: map[params.Channel]mongodoc.ACL{ @@ -3061,7 +3161,7 @@ Read: []string{"bob"}, Write: []string{"bob"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, @@ -3070,12 +3170,12 @@ Write: []string{"bob"}, }, }, - channels: []params.Channel{"development", "stable"}, + channels: []params.Channel{"edge", "stable"}, }, { about: "publish on an entity without perms on its current channel", acls: map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: {}, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, @@ -3084,7 +3184,7 @@ Write: []string{"bob"}, }, }, - channels: []params.Channel{"development"}, + channels: []params.Channel{"edge"}, }, { about: "publish on channels without access", acls: map[params.Channel]mongodoc.ACL{ @@ -3092,7 +3192,7 @@ Read: []string{"everyone"}, Write: []string{"everyone"}, }, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"alice"}, Write: []string{"alice"}, }, @@ -3101,13 +3201,13 @@ Write: []string{"everyone"}, }, }, - channels: []params.Channel{"development"}, + channels: []params.Channel{"edge"}, expectError: true, }, { about: "publish on several channels without access to all", acls: map[params.Channel]mongodoc.ACL{ params.UnpublishedChannel: {}, - params.DevelopmentChannel: { + params.EdgeChannel: { Read: []string{"bob"}, Write: []string{"bob"}, }, @@ -3116,7 +3216,7 @@ Write: []string{"alice"}, }, }, - channels: []params.Channel{"development", "stable"}, + channels: []params.Channel{"edge", "stable"}, expectError: true, }} @@ -3184,14 +3284,14 @@ c.Assert(err, gc.IsNil) s.uploadResource(c, id0, "someResource", "stuff 0") s.uploadResource(c, id0, "someResource", "stuff 1") - err = s.store.Publish(id0, map[string]int{"someResource": 0}, params.DevelopmentChannel, params.StableChannel) + err = s.store.Publish(id0, map[string]int{"someResource": 0}, params.EdgeChannel, params.StableChannel) c.Assert(err, gc.IsNil) // Add an unpublished entity. err = s.store.AddCharmWithArchive(newResolvedURL("cs:~bob/precise/wordpress-1", -1), storetesting.NewCharm(meta)) c.Assert(err, gc.IsNil) - // Publish it to the development channel. + // Publish it to the edge channel. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, Method: "PUT", @@ -3201,7 +3301,7 @@ Resources: map[string]int{ "someResource": 1, }, - Channels: []params.Channel{params.DevelopmentChannel}, + Channels: []params.Channel{params.EdgeChannel}, }, }) @@ -3220,14 +3320,14 @@ }) } assertResolvesTo(params.UnpublishedChannel, 1) - assertResolvesTo(params.DevelopmentChannel, 1) + assertResolvesTo(params.EdgeChannel, 1) assertResolvesTo(params.StableChannel, 0) assertResolvesTo(params.NoChannel, 0) // Check that the associated resource has actually been published. httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ Handler: s.srv, - URL: storeURL("~bob/precise/wordpress/meta/resources?channel=development"), + URL: storeURL("~bob/precise/wordpress/meta/resources?channel=edge"), Do: bakeryDo(nil), ExpectBody: []params.Resource{{ Name: "someResource", @@ -3318,7 +3418,7 @@ channel: params.StableChannel, }, { id: newResolvedURL("~charmers/precise/wordpress-1", 1), - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, }, { id: newResolvedURL("~charmers/precise/wordpress-2", 2), channel: params.UnpublishedChannel, @@ -3342,7 +3442,7 @@ expectURL: "cs:precise/wordpress-0", }, { url: "wordpress", - channel: params.DevelopmentChannel, + channel: params.EdgeChannel, expectURL: "cs:precise/wordpress-1", }, { url: "wordpress", @@ -4089,10 +4189,10 @@ return mongodoc.ACL{}, err } ch := params.UnpublishedChannel - if e.Stable { + if e.Published[params.StableChannel] { ch = params.StableChannel - } else if e.Development { - ch = params.DevelopmentChannel + } else if e.Published[params.EdgeChannel] { + ch = params.EdgeChannel } return be.ChannelACLs[ch], nil } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive.go 2016-10-13 14:32:25.000000000 +0000 @@ -125,6 +125,7 @@ setArchiveCacheControl(w.Header(), h.isPublic(id)) header.Set(params.ContentHashHeader, blob.Hash) header.Set(params.EntityIdHeader, id.PreferredURL().String()) + header.Set("Content-Disposition", "attachment; filename="+id.PreferredURL().Name+".zip") if StatsEnabled(req) { h.Store.IncrementDownloadCountsAsync(id) @@ -182,9 +183,7 @@ PromulgatedId: oldURL.PromulgatedURL(), }) } - rid := &router.ResolvedURL{ - URL: *id.WithChannel(""), - } + rid := &router.ResolvedURL{URL: *id} // Choose the next revision number for the upload. if oldURL == nil { rid.URL.Revision = 0 @@ -247,13 +246,13 @@ var chans []params.Channel for _, c := range req.Form["channel"] { c := params.Channel(c) - if c != params.DevelopmentChannel && c != params.StableChannel { + if !params.ValidChannels[c] || c == params.UnpublishedChannel { return badRequestf(nil, "cannot put entity into channel %q", c) } chans = append(chans, c) } rid := &router.ResolvedURL{ - URL: *id.WithChannel(""), + URL: *id, PromulgatedRevision: -1, } // Get the PromulgatedURL from the request parameters. When ingesting diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -88,6 +88,7 @@ ch.Bytes(), ) c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-0") + c.Assert(rec.Header().Get("Content-Disposition"), gc.Equals, "attachment; filename=wordpress.zip") assertCacheControl(c, rec.Header(), true) // Check that the HTTP range logic is plugged in OK. If this @@ -345,7 +346,7 @@ // by closing errorBodies. try := make(chan struct{}) go func(try chan struct{}) { - for _ = range try { + for range try { var wg sync.WaitGroup for p := 0; p < 5; p++ { wg.Add(1) @@ -482,9 +483,9 @@ } func (s *ArchiveSuite) TestPutCharmWithChannel(c *gc.C) { - s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-0", -1), "wordpress", []params.Channel{params.DevelopmentChannel}) + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-0", -1), "wordpress", []params.Channel{params.EdgeChannel}) s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-1", -1), "wordpress", []params.Channel{params.StableChannel}) - s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-2", -1), "wordpress", []params.Channel{params.StableChannel, params.DevelopmentChannel}) + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-2", -1), "wordpress", []params.Channel{params.StableChannel, params.EdgeChannel}) } func (s *ArchiveSuite) TestPutCharmWithInvalidChannel(c *gc.C) { @@ -494,7 +495,7 @@ charm.MustParseURL("~charmers/saucy/juju-gui-0"), nil, "wordpress", - []params.Channel{params.DevelopmentChannel, "bad"}, + []params.Channel{params.EdgeChannel, "bad"}, http.StatusBadRequest, params.Error{ Message: `invalid channel "bad" specified in request`, @@ -819,9 +820,9 @@ // Here we exercise both bundle internal verification (bad relation) and // validation with respect to charms (wordpress and mysql are missing). expectErr := `bundle verification failed: [` + - `"relation [\"foo:db\" \"mysql:server\"] refers to application \"foo\" not defined in this bundle",` + `"application \"mysql\" refers to non-existent charm \"mysql\"",` + - `"application \"wordpress\" refers to non-existent charm \"wordpress\""]` + `"application \"wordpress\" refers to non-existent charm \"wordpress\"",` + + `"relation [\"foo:db\" \"mysql:server\"] refers to application \"foo\" not defined in this bundle"]` s.assertCannotUpload(c, "~charmers/bundle/wordpress", f, http.StatusBadRequest, params.ErrInvalidEntity, expectErr) } @@ -1044,12 +1045,7 @@ for _, ch := range p.chans { expectChans[ch] = true } - - for _, ch := range []params.Channel{ - params.UnpublishedChannel, - params.DevelopmentChannel, - params.StableChannel, - } { + for _, ch := range params.OrderedChannels { _, err := s.store.FindBestEntity(&p.id.URL, ch, nil) if expectChans[ch] { c.Assert(err, gc.IsNil) @@ -1066,8 +1062,13 @@ c.Assert(entity.BlobHash256, gc.Equals, hash256Sum) } c.Assert(entity.PromulgatedURL, gc.DeepEquals, p.id.PromulgatedURL()) - c.Assert(entity.Development, gc.Equals, expectChans[params.DevelopmentChannel]) - c.Assert(entity.Stable, gc.Equals, expectChans[params.StableChannel]) + + delete(expectChans, params.UnpublishedChannel) + if len(expectChans) == 0 { + c.Assert(entity.Published, gc.IsNil) + } else { + c.Assert(entity.Published, gc.DeepEquals, expectChans) + } // Test that the expected entry has been created // in the blob store. @@ -1645,8 +1646,6 @@ func (s *ArchiveSearchSuite) SetUpTest(c *gc.C) { s.commonSuite.SetUpTest(c) - // TODO (frankban): remove this call when removing the legacy counts logic. - patchLegacyDownloadCountsEnabled(s.AddCleanup, false) } func (s *ArchiveSearchSuite) TestGetSearchUpdate(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth.go 2016-10-13 14:32:25.000000000 +0000 @@ -420,11 +420,11 @@ return authorization{}, h.newDischargeRequiredError(newm, errgo.New("active lifetime expired; renew macaroon"), p.req, false) } -// entityACLs calculates the ACLs for the specified entity. If the entity -// has been published to the stable channel then the StableChannel ACLs will be -// used; if the entity has been published to development, but not stable -// then the DevelopmentChannel ACLs will be used; otherwise -// the unpublished ACLs are used. +// entityACLs calculates the ACLs for the specified entity. If the channel has +// been specified via the "?channel=" query then the corresponding channel ACLs +// are used. Otherwise, if the entity has been published to a channel then ACLs +// for that channel are used, in this order: StableChannel, EdgeChannel. If the +// entity was never published, the unpublished ACLs are used. func (h *ReqHandler) entityACLs(id *router.ResolvedURL) (mongodoc.ACL, error) { ch, err := h.entityChannel(id) if err != nil { @@ -536,28 +536,24 @@ // mentioned a channel, that channel is used; otherwise // a channel will be selected from the channels that the // entity has been published to: in order of preference, -// stable, development and unpublished. +// stable, edge and unpublished. func (h *ReqHandler) entityChannel(id *router.ResolvedURL) (params.Channel, error) { if h.Store.Channel != params.NoChannel { return h.Store.Channel, nil } - entity, err := h.Cache.Entity(&id.URL, charmstore.FieldSelector("development", "stable")) + entity, err := h.Cache.Entity(&id.URL, charmstore.FieldSelector("published")) if err != nil { if errgo.Cause(err) == params.ErrNotFound { return params.NoChannel, errgo.WithCausef(nil, params.ErrNotFound, "entity %q not found", id) } return params.NoChannel, errgo.Notef(err, "cannot retrieve entity %q for authorization", id) } - var ch params.Channel - switch { - case entity.Stable: - ch = params.StableChannel - case entity.Development: - ch = params.DevelopmentChannel - default: - ch = params.UnpublishedChannel + for _, ch := range params.OrderedChannels { + if entity.Published[ch] { + return ch, nil + } } - return ch, nil + return params.UnpublishedChannel, nil } // newMacaroon returns a new macaroon that allows only the given operations diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -194,8 +194,8 @@ // unpublishedReadPerm stores a list of users with read permissions on // on the unpublished entities. unpublishedReadPerm []string - // developmentReadPerm stores a list of users with read permissions on the development channel. - developmentReadPerm []string + // edgeReadPerm stores a list of users with read permissions on the edge channel. + edgeReadPerm []string // stableReadPerm stores a list of users with read permissions on the stable channel. stableReadPerm []string // channels contains a list of channels, to which the entity belongs. @@ -282,18 +282,18 @@ Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through development channel", + about: "access provided through edge channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, - developmentReadPerm: []string{"group1"}, - channels: []params.Channel{params.DevelopmentChannel}, + edgeReadPerm: []string{"group1"}, + channels: []params.Channel{params.EdgeChannel}, }, { - about: "access provided through development channel, but charm not published", + about: "access provided through edge channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, - developmentReadPerm: []string{"group1"}, + edgeReadPerm: []string{"group1"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, @@ -304,31 +304,31 @@ username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, - developmentReadPerm: []string{"group12"}, + edgeReadPerm: []string{"group12"}, stableReadPerm: []string{"group2"}, - channels: []params.Channel{params.DevelopmentChannel, params.StableChannel}, + channels: []params.Channel{params.EdgeChannel, params.StableChannel}, }, { about: "access provided through stable channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, - developmentReadPerm: []string{"group12"}, + edgeReadPerm: []string{"group12"}, stableReadPerm: []string{"group2"}, - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through development channel, but charm on stable channel", + about: "access provided through edge channel, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group47"}, - developmentReadPerm: []string{"group1"}, + edgeReadPerm: []string{"group1"}, stableReadPerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, @@ -343,7 +343,7 @@ unpublishedReadPerm: []string{"picard", "sisko", "group42", "group1"}, stableReadPerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, @@ -352,13 +352,13 @@ Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through unpublished ACL, but charm on development channel", + about: "access provided through unpublished ACL, but charm on edge channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedReadPerm: []string{"picard", "sisko", "group42", "group1"}, - developmentReadPerm: []string{"group11"}, + edgeReadPerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ @@ -390,7 +390,7 @@ // Change the ACLs for the testing charm. err = s.store.SetPerms(&rurl.URL, "unpublished.read", test.unpublishedReadPerm...) c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&rurl.URL, "development.read", test.developmentReadPerm...) + err = s.store.SetPerms(&rurl.URL, "edge.read", test.edgeReadPerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "stable.read", test.stableReadPerm...) c.Assert(err, gc.IsNil) @@ -434,8 +434,8 @@ groups []string // writePerm stores a list of users with write permissions. unpublishedWritePerm []string - // developmentWritePerm stores a list of users with write permissions on the development channel. - developmentWritePerm []string + // edgeWritePerm stores a list of users with write permissions on the edge channel. + edgeWritePerm []string // stableWritePerm stores a list of users with write permissions on the stable channel. stableWritePerm []string // channels contains a list of channels, to which the entity belongs. @@ -509,18 +509,18 @@ Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through development channel", + about: "access provided through edge channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, - developmentWritePerm: []string{"group1"}, - channels: []params.Channel{params.DevelopmentChannel}, + edgeWritePerm: []string{"group1"}, + channels: []params.Channel{params.EdgeChannel}, }, { - about: "access provided through development channel, but charm not published", + about: "access provided through edge channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, - developmentWritePerm: []string{"group1"}, + edgeWritePerm: []string{"group1"}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, @@ -531,31 +531,31 @@ username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, - developmentWritePerm: []string{"group12"}, + edgeWritePerm: []string{"group12"}, stableWritePerm: []string{"group2"}, - channels: []params.Channel{params.DevelopmentChannel, params.StableChannel}, + channels: []params.Channel{params.EdgeChannel, params.StableChannel}, }, { about: "access provided through stable channel, but charm not published", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, - developmentWritePerm: []string{"group12"}, + edgeWritePerm: []string{"group12"}, stableWritePerm: []string{"group2"}, - channels: []params.Channel{params.DevelopmentChannel}, + channels: []params.Channel{params.EdgeChannel}, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ Code: params.ErrUnauthorized, Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through development channel, but charm on stable channel", + about: "access provided through edge channel, but charm on stable channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group47"}, - developmentWritePerm: []string{"group1"}, + edgeWritePerm: []string{"group1"}, stableWritePerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, @@ -570,7 +570,7 @@ unpublishedWritePerm: []string{"picard", "sisko", "group42", "group1"}, stableWritePerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, params.StableChannel, }, expectStatus: http.StatusUnauthorized, @@ -579,13 +579,13 @@ Message: `unauthorized: access denied for user "kirk"`, }, }, { - about: "access provided through unpublished ACL, but charm on development channel", + about: "access provided through unpublished ACL, but charm on edge channel", username: "kirk", groups: []string{"group1", "group2", "group3"}, unpublishedWritePerm: []string{"picard", "sisko", "group42", "group1"}, - developmentWritePerm: []string{"group11"}, + edgeWritePerm: []string{"group11"}, channels: []params.Channel{ - params.DevelopmentChannel, + params.EdgeChannel, }, expectStatus: http.StatusUnauthorized, expectBody: params.Error{ @@ -617,7 +617,7 @@ // Change the ACLs for the testing charm. err = s.store.SetPerms(&rurl.URL, "unpublished.write", test.unpublishedWritePerm...) c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&rurl.URL, "development.write", test.developmentWritePerm...) + err = s.store.SetPerms(&rurl.URL, "edge.write", test.edgeWritePerm...) c.Assert(err, gc.IsNil) err = s.store.SetPerms(&rurl.URL, "stable.write", test.stableWritePerm...) c.Assert(err, gc.IsNil) diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content.go 2016-10-13 14:32:25.000000000 +0000 @@ -16,7 +16,7 @@ "gopkg.in/errgo.v1" "gopkg.in/juju/charm.v6-unstable" "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" - "gopkg.in/juju/jujusvg.v1" + "gopkg.in/juju/jujusvg.v2" "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/list.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/list.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/list.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/list.go 2016-10-13 14:32:25.000000000 +0000 @@ -18,6 +18,7 @@ // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-list func (h *ReqHandler) serveList(_ http.Header, req *http.Request) (interface{}, error) { sp, err := ParseSearchParams(req) + sp.AutoComplete = false if err != nil { return "", err } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/relations_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/relations_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/relations_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/relations_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -229,7 +229,6 @@ Meta: map[string]interface{}{ "id-name": params.IdNameResponse{"wordpress"}, "charm-metadata": &charm.Meta{ - Format: 1, Provides: map[string]charm.Relation{ "website": { Name: "website", @@ -261,7 +260,7 @@ about: "don't show charms if you don't have perms for 'em", charms: metaCharmRelatedCharms, readACLs: map[string][]string{ - "~charmers/memcached": []string{"noone"}, + "~charmers/memcached": {"noone"}, }, id: "utopic/wordpress-0", expectBody: params.RelatedResponse{ @@ -701,7 +700,7 @@ panic(fmt.Sprintf("resolved URL %q does not contain user", urlStr)) } return &router.ResolvedURL{ - URL: *url.WithChannel(""), + URL: *url, PromulgatedRevision: promRev, } } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search.go 2016-10-13 14:32:25.000000000 +0000 @@ -110,6 +110,7 @@ // ParseSearchParms extracts the search paramaters from the request func ParseSearchParams(req *http.Request) (charmstore.SearchParams, error) { sp := charmstore.SearchParams{} + sp.AutoComplete = true var err error for k, v := range req.Form { switch k { diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -89,14 +89,23 @@ }{{ about: "bare search", query: "", + expectParams: charmstore.SearchParams{ + AutoComplete: true, + }, }, { about: "text search", - query: "text=test", + query: "text=test&autocomplete=0", expectParams: charmstore.SearchParams{ Text: "test", }, }, { - about: "autocomplete", + about: "autocomplete=0", + query: "autocomplete=0", + expectParams: charmstore.SearchParams{ + AutoComplete: false, + }, + }, { + about: "autocomplete=1", query: "autocomplete=1", expectParams: charmstore.SearchParams{ AutoComplete: true, @@ -107,39 +116,39 @@ expectError: `invalid autocomplete parameter: unexpected bool value "true" (must be "0" or "1")`, }, { about: "limit", - query: "limit=20", + query: "limit=20&autocomplete=0", expectParams: charmstore.SearchParams{ Limit: 20, }, }, { about: "invalid limit", - query: "limit=twenty", + query: "limit=twenty&autocomplete=0", expectError: `invalid limit parameter: could not parse integer: strconv.ParseInt: parsing "twenty": invalid syntax`, }, { about: "limit too low", - query: "limit=-1", + query: "limit=-1&autocomplete=0", expectError: "invalid limit parameter: expected integer greater than zero", }, { about: "include", - query: "include=archive-size", + query: "include=archive-size&autocomplete=0", expectParams: charmstore.SearchParams{ Include: []string{"archive-size"}, }, }, { about: "include many", - query: "include=archive-size&include=bundle-data", + query: "include=archive-size&include=bundle-data&autocomplete=0", expectParams: charmstore.SearchParams{ Include: []string{"archive-size", "bundle-data"}, }, }, { about: "include many with blanks", - query: "include=archive-size&include=&include=bundle-data", + query: "include=archive-size&include=&include=bundle-data&autocomplete=0", expectParams: charmstore.SearchParams{ Include: []string{"archive-size", "bundle-data"}, }, }, { about: "description filter", - query: "description=text", + query: "description=text&autocomplete=0", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "description": {"text"}, @@ -147,7 +156,7 @@ }, }, { about: "name filter", - query: "name=text", + query: "name=text&autocomplete=0", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "name": {"text"}, @@ -155,7 +164,7 @@ }, }, { about: "owner filter", - query: "owner=text", + query: "owner=text&autocomplete=0", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "owner": {"text"}, @@ -163,7 +172,7 @@ }, }, { about: "provides filter", - query: "provides=text", + query: "provides=text&autocomplete=0", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "provides": {"text"}, @@ -171,7 +180,7 @@ }, }, { about: "requires filter", - query: "requires=text", + query: "requires=text&autocomplete=0", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "requires": {"text"}, @@ -179,7 +188,7 @@ }, }, { about: "series filter", - query: "series=text", + query: "series=text&autocomplete=0", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "series": {"text"}, @@ -187,7 +196,7 @@ }, }, { about: "tags filter", - query: "tags=text", + query: "tags=text&autocomplete=0", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "tags": {"text"}, @@ -195,7 +204,7 @@ }, }, { about: "type filter", - query: "type=text", + query: "type=text&autocomplete=0", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "type": {"text"}, @@ -203,7 +212,7 @@ }, }, { about: "many filters", - query: "name=name&owner=owner&series=series1&series=series2", + query: "name=name&owner=owner&series=series1&series=series2&autocomplete=0", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "name": {"name"}, @@ -217,7 +226,7 @@ expectError: "invalid parameter: a", }, { about: "skip", - query: "skip=20", + query: "skip=20&autocomplete=0", expectParams: charmstore.SearchParams{ Skip: 20, }, @@ -231,7 +240,7 @@ expectError: "invalid skip parameter: expected non-negative integer", }, { about: "promulgated filter", - query: "promulgated=1", + query: "promulgated=1&autocomplete=0", expectParams: charmstore.SearchParams{ Filters: map[string][]string{ "promulgated": {"1"}, @@ -692,8 +701,6 @@ } func (s *SearchSuite) TestDownloadsBoost(c *gc.C) { - // TODO (frankban): remove this call when removing the legacy counts logic. - patchLegacyDownloadCountsEnabled(s.AddCleanup, false) charmDownloads := map[string]int{ "mysql": 0, "wordpress": 1, @@ -723,18 +730,6 @@ c.Assert(sr.Results[2].Id.Name, gc.Equals, "mysql") } -// TODO(mhilton) remove this test when removing legacy counts logic. -func (s *SearchSuite) TestLegacyStatsUpdatesSearch(c *gc.C) { - patchLegacyDownloadCountsEnabled(s.AddCleanup, true) - doc, err := s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) - c.Assert(err, gc.IsNil) - c.Assert(doc.TotalDownloads, gc.Equals, int64(0)) - s.assertPutAsAdmin(c, "~openstack-charmers/trusty/mysql-7/meta/extra-info/"+params.LegacyDownloadStats, 57) - doc, err = s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) - c.Assert(err, gc.IsNil) - c.Assert(doc.TotalDownloads, gc.Equals, int64(57)) -} - func (s *SearchSuite) TestSearchWithAdminCredentials(c *gc.C) { rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ Handler: s.srv, @@ -776,6 +771,25 @@ assertResultSet(c, sr, expected) } +func (s *SearchSuite) TestSearchDoesNotCreateExtraMacaroons(c *gc.C) { + // Ensure that there's a macaroon already in the store + // that can be reused. + _, err := s.store.Bakery.NewMacaroon("", nil, nil) + c.Assert(err, gc.IsNil) + n, err := s.store.DB.Macaroons().Find(nil).Count() + c.Assert(err, gc.IsNil) + c.Assert(n, gc.Equals, 1) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search"), + Do: s.bakeryDoAsUser(c, "noone"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + n, err = s.store.DB.Macaroons().Find(nil).Count() + c.Assert(err, gc.IsNil) + c.Assert(n, gc.Equals, 1) +} + func (s *SearchSuite) TestSearchWithUserInGroups(c *gc.C) { s.idM.groups = map[string][]string{ "bob": {"test-user", "test-user2"}, diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats_test.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats_test.go 2016-10-13 14:32:25.000000000 +0000 @@ -307,7 +307,7 @@ s.discharge = dischargeForUser("statsupdate") s.idM.groups = map[string][]string{ - "statsupdate": []string{"statsupdate@cs"}, + "statsupdate": {"statsupdate@cs"}, } httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/Makefile juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/Makefile --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/Makefile 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/Makefile 2016-10-13 14:32:25.000000000 +0000 @@ -85,7 +85,7 @@ # Run the charmd server. server: install - charmd -logging-config INFO cmd/charmd/config.yaml + charmd -logging-config '=DEBUG;mgo=INFO;bakery=INFO;httpbakery=INFO' cmd/charmd/config.yaml # Update the project Go dependencies to the required revision. deps: $(GOPATH)/bin/godeps diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/server.go juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/server.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charmstore.v5-unstable/server.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charmstore.v5-unstable/server.go 2016-10-13 14:32:25.000000000 +0000 @@ -10,6 +10,7 @@ "time" "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/mgostorage" "gopkg.in/mgo.v2" "gopkg.in/natefinch/lumberjack.v2" @@ -50,7 +51,7 @@ return vs } -// ServerParams holds configuration for a new API server. +// ServerParams holds configuration for a new internal API server. type ServerParams struct { // AuthUsername and AuthPassword hold the credentials // used for HTTP basic authentication. @@ -102,6 +103,10 @@ // AuditLogger optionally holds the logger which will be used to // write audit log entries. AuditLogger *lumberjack.Logger + + // RootKeyPolicy holds the default policy used when creating + // macaroon root keys. + RootKeyPolicy mgostorage.Policy } // NewServer returns a new handler that handles charm store requests and stores diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/bundledata_test.go juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/bundledata_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/bundledata_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/bundledata_test.go 2016-10-13 14:32:05.000000000 +0000 @@ -384,7 +384,7 @@ `machine "bogus" is not referred to by a placement directive`, `invalid machine id "bogus" found in machines`, `invalid constraints "bad constraints" in machine "0": bad constraint`, - `invalid charm URL in application "mediawiki": charm or bundle URL has invalid schema: "bogus:precise/mediawiki-10"`, + `invalid charm URL in application "mediawiki": cannot parse URL "bogus:precise/mediawiki-10": schema "bogus" not valid`, `charm path in application "riak" does not exist: internal/test-charm-repo/bundle/somepath`, `invalid constraints "bad constraints" in application "mysql": bad constraint`, `negative number of units specified on application "mediawiki"`, diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/.gitignore juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/.gitignore --- juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/.gitignore 2016-10-13 14:32:05.000000000 +0000 @@ -0,0 +1,5 @@ +tags +TAGS +.emacs* +*.test +*.sw[nop] diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/meta.go juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/meta.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/meta.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/meta.go 2016-10-13 14:32:05.000000000 +0000 @@ -242,7 +242,7 @@ return result } -var validTermName = regexp.MustCompile(`^[a-zA-Z][\w-]+$`) +var validTermName = regexp.MustCompile(`^[a-z](-?[a-z0-9]+)+$`) // TermsId represents a single term id. The term can either be owned // or "public" (meaning there is no owner). diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/meta_test.go juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/meta_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/meta_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/meta_test.go 2016-10-13 14:32:05.000000000 +0000 @@ -61,14 +61,11 @@ "foobar", "foobar/27", "foo/003", - "owner/Foo-bar", "owner/foobar/27", "owner/foobar", "owner/foo-bar", - "owner/foo_bar", "own-er/foobar", "ibm/j9-jvm/2", - "term_123-23aAf/1", "cs:foobar/27", "cs:foobar", } @@ -103,14 +100,11 @@ terms := []string{ "foobar", "foobar/27", - "owner/Foo-bar", "owner/foobar/27", "owner/foobar", "owner/foo-bar", - "owner/foo_bar", "own-er/foobar", "ibm/j9-jvm/2", - "term_123-23aAf/1", "cs:foobar/27", } for i, term := range terms { @@ -152,11 +146,33 @@ terms: []string{"term/1", "term about a term"}, expectError: `wrong term name format "term about a term"`, }, { - about: "term name must start with lowercase letter", - terms: []string{"Term/1"}, - }, { - about: "term name match the regexp", - terms: []string{"term_123-23aAf/1"}, + about: "term name must start with lowercase letter", + terms: []string{"Term/1"}, + expectError: `wrong term name format "Term"`, + }, { + about: "term name cannot contain capital letters", + terms: []string{"owner/foO-Bar"}, + expectError: `wrong term name format "foO-Bar"`, + }, { + about: "term name cannot contain underscores, that's what dashes are for", + terms: []string{"owner/foo_bar"}, + expectError: `wrong term name format "foo_bar"`, + }, { + about: "term name can't end with a dash", + terms: []string{"o-/1"}, + expectError: `wrong term name format "o-"`, + }, { + about: "term name can't contain consecutive dashes", + terms: []string{"o-oo--ooo---o/1"}, + expectError: `wrong term name format "o-oo--ooo---o"`, + }, { + about: "term name more than a single char", + terms: []string{"z/1"}, + expectError: `wrong term name format "z"`, + }, { + about: "term name match the regexp", + terms: []string{"term_123-23aAf/1"}, + expectError: `wrong term name format "term_123-23aAf"`, }, } for i, test := range tests { @@ -214,18 +230,10 @@ term: "term about a term", expectError: `wrong term name format "term about a term"`, }, { - about: "term name may start with an uppercase letter", - term: "Term/1", - expectTerm: charm.TermsId{"", "", "Term", 1}, - }, { about: "term name must not start with a number", term: "1Term/1", expectError: `wrong term name format "1Term"`, }, { - about: "term name match the regexp", - term: "term_123-23aAf/1", - expectTerm: charm.TermsId{"", "", "term_123-23aAf", 1}, - }, { about: "full term with tenant", term: "tenant:owner/term/1", expectTerm: charm.TermsId{"tenant", "owner", "term", 1}, @@ -539,7 +547,7 @@ charmMeta := fmt.Sprintf("%s\nmin-juju-version: ", dummyMetadata) vals := []version.Number{ {Major: 1, Minor: 25}, - {Major: 1, Minor: 25, Tag: "alpha1"}, + {Major: 1, Minor: 25, Tag: "alpha"}, {Major: 1, Minor: 25, Patch: 1}, } for _, ver := range vals { diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/metrics.go juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/metrics.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/metrics.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/metrics.go 2016-10-13 14:32:05.000000000 +0000 @@ -58,14 +58,20 @@ // Metric represents a single metric definition type Metric struct { - Type MetricType - Description string + Type MetricType `yaml:"type"` + Description string `yaml:"description"` +} + +// Plan represents the plan section of metrics.yaml +type Plan struct { + Required bool `yaml:"required,omitempty"` } // Metrics contains the metrics declarations encoded in the metrics.yaml // file. type Metrics struct { - Metrics map[string]Metric + Metrics map[string]Metric `yaml:"metrics"` + Plan *Plan `yaml:"plan,omitempty"` } // ReadMetrics reads a MetricsDeclaration in YAML format. @@ -112,3 +118,8 @@ } return metric.Type.validateValue(value) } + +// PlanRequired reports whether these metrics require a plan. +func (m Metrics) PlanRequired() bool { + return m.Plan != nil && m.Plan.Required +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/metrics_test.go juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/metrics_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/metrics_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/metrics_test.go 2016-10-13 14:32:05.000000000 +0000 @@ -225,3 +225,50 @@ } } } + +func (s *MetricsSuite) TestPlanRequired(c *gc.C) { + tests := []struct { + about string + input string + planRequired bool + }{{ + about: "not specified", + input: ` +metrics: + some-metric: + type: gauge + description: thing +`, + planRequired: false, + }, { + about: "plan optional", + input: ` +plan: + required: false +metrics: +`, + planRequired: false, + }, { + about: "plan required", + input: ` +plan: + required: true +metrics: +`, + planRequired: true, + }, { + about: "not set", + input: ` +plan: +metrics: +`, + planRequired: false, + }, + } + for i, test := range tests { + c.Logf("testplanrequired %d: %s", i, test.about) + metrics, err := charm.ReadMetrics(strings.NewReader(test.input)) + c.Assert(err, gc.IsNil) + c.Assert(metrics.PlanRequired(), gc.Equals, test.planRequired) + } +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/url.go juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/url.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/url.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/url.go 2016-10-13 14:32:05.000000000 +0000 @@ -11,6 +11,8 @@ "strconv" "strings" + "github.com/juju/errors" + "gopkg.in/juju/names.v2" "gopkg.in/mgo.v2/bson" ) @@ -34,40 +36,39 @@ // cs:~joe/development/wordpress // type URL struct { - Schema string // "cs" or "local". - User string // "joe". - Name string // "wordpress". - Revision int // -1 if unset, N otherwise. - Series string // "precise" or "" if unset; "bundle" if it's a bundle. - Channel Channel // "development" or "" if no channel. -} - -var ErrUnresolvedUrl error = fmt.Errorf("charm or bundle url series is not resolved") - -// Channel represents different stages in the development of a charm or bundle. -type Channel string - -const ( - // DevelopmentChannel is the channel used for charms or bundles under - // development. - DevelopmentChannel Channel = "development" -) + Schema string // "cs" or "local". + User string // "joe". + Name string // "wordpress". + Revision int // -1 if unset, N otherwise. + Series string // "precise" or "" if unset; "bundle" if it's a bundle. +} var ( - validSeries = regexp.MustCompile("^[a-z]+([a-z0-9]+)?$") - validName = regexp.MustCompile("^[a-z][a-z0-9]*(-[a-z0-9]*[a-z][a-z0-9]*)*$") + ErrUnresolvedUrl error = fmt.Errorf("charm or bundle url series is not resolved") + validSeries = regexp.MustCompile("^[a-z]+([a-z0-9]+)?$") + validName = regexp.MustCompile("^[a-z][a-z0-9]*(-[a-z0-9]*[a-z][a-z0-9]*)*$") ) +// ValidateSchema returns an error if the schema is invalid. +func ValidateSchema(schema string) error { + if schema != "cs" && schema != "local" { + return errors.NotValidf("schema %q", schema) + } + return nil +} + // IsValidSeries reports whether series is a valid series in charm or bundle // URLs. func IsValidSeries(series string) bool { return validSeries.MatchString(series) } -// IsValidChannel reports whether channel is a valid channel in charm or bundle -// URLs. -func IsValidChannel(channel Channel) bool { - return channel == DevelopmentChannel +// ValidateSeries returns an error if the given series is invalid. +func ValidateSeries(series string) error { + if IsValidSeries(series) == false { + return errors.NotValidf("series name %q", series) + } + return nil } // IsValidName reports whether name is a valid charm or bundle name. @@ -75,6 +76,14 @@ return validName.MatchString(name) } +// ValidateName returns an error if the given name is invalid. +func ValidateName(name string) error { + if IsValidName(name) == false { + return errors.NotValidf("name %q", name) + } + return nil +} + // WithRevision returns a URL equivalent to url but with Revision set // to revision. func (url *URL) WithRevision(revision int) *URL { @@ -83,13 +92,6 @@ return &urlCopy } -// WithChannel returns a URL equivalent to url but with the given channel. -func (url *URL) WithChannel(channel Channel) *URL { - urlCopy := *url - urlCopy.Channel = channel - return &urlCopy -} - // MustParseURL works like ParseURL, but panics in case of errors. func MustParseURL(url string) *URL { u, err := ParseURL(url) @@ -128,10 +130,10 @@ // Check if we're dealing with a v1 or v2 URL. u, err := gourl.Parse(url) if err != nil { - return nil, fmt.Errorf("cannot parse charm or bundle URL: %q", url) + return nil, errors.Errorf("cannot parse charm or bundle URL: %q", url) } if u.RawQuery != "" || u.Fragment != "" || u.User != nil { - return nil, fmt.Errorf("charm or bundle URL %q has unrecognized parts", url) + return nil, errors.Errorf("charm or bundle URL %q has unrecognized parts", url) } var curl *URL switch { @@ -148,7 +150,7 @@ curl, err = parseV1URL(u, url) } if err != nil { - return nil, err + return nil, errors.Trace(err) } if curl.Schema == "" { curl.Schema = "cs" @@ -160,47 +162,37 @@ var r URL if url.Scheme != "" { r.Schema = url.Scheme - if r.Schema != "cs" && r.Schema != "local" { - return nil, fmt.Errorf("charm or bundle URL has invalid schema: %q", originalURL) + if err := ValidateSchema(r.Schema); err != nil { + return nil, errors.Annotatef(err, "cannot parse URL %q", url) } } i := 0 parts := strings.Split(url.Path[i:], "/") if len(parts) < 1 || len(parts) > 4 { - return nil, fmt.Errorf("charm or bundle URL has invalid form: %q", originalURL) + return nil, errors.Errorf("charm or bundle URL has invalid form: %q", originalURL) } // ~ if strings.HasPrefix(parts[0], "~") { if r.Schema == "local" { - return nil, fmt.Errorf("local charm or bundle URL with user name: %q", originalURL) + return nil, errors.Errorf("local charm or bundle URL with user name: %q", originalURL) } r.User, parts = parts[0][1:], parts[1:] } - // - if len(parts) > 1 { - if IsValidChannel(Channel(parts[0])) { - if r.Schema == "local" { - return nil, fmt.Errorf("local charm or bundle URL with channel: %q", originalURL) - } - r.Channel, parts = Channel(parts[0]), parts[1:] - } - } - if len(parts) > 2 { - return nil, fmt.Errorf("charm or bundle URL has invalid form: %q", originalURL) + return nil, errors.Errorf("charm or bundle URL has invalid form: %q", originalURL) } // if len(parts) == 2 { r.Series, parts = parts[0], parts[1:] - if !IsValidSeries(r.Series) { - return nil, fmt.Errorf("charm or bundle URL has invalid series: %q", originalURL) + if err := ValidateSeries(r.Series); err != nil { + return nil, errors.Annotatef(err, "cannot parse URL %q", originalURL) } } if len(parts) < 1 { - return nil, fmt.Errorf("URL without charm or bundle name: %q", originalURL) + return nil, errors.Errorf("URL without charm or bundle name: %q", originalURL) } // [-] @@ -223,11 +215,11 @@ } if r.User != "" { if !names.IsValidUser(r.User) { - return nil, fmt.Errorf("charm or bundle URL has invalid user name: %q", originalURL) + return nil, errors.Errorf("charm or bundle URL has invalid user name: %q", originalURL) } } - if !IsValidName(r.Name) { - return nil, fmt.Errorf("URL has invalid charm or bundle name: %q", originalURL) + if err := ValidateName(r.Name); err != nil { + return nil, errors.Annotatef(err, "cannot parse URL %q", url) } return &r, nil } @@ -238,13 +230,10 @@ parts := strings.Split(strings.Trim(url.Path, "/"), "/") if parts[0] == "u" { if len(parts) < 3 { - return nil, fmt.Errorf(`charm or bundle URL %q malformed, expected "/u//"`, url) + return nil, errors.Errorf(`charm or bundle URL %q malformed, expected "/u//"`, url) } r.User, parts = parts[1], parts[2:] } - if len(parts) > 1 && IsValidChannel(Channel(parts[0])) { - r.Channel, parts = Channel(parts[0]), parts[1:] - } r.Name, parts = parts[0], parts[1:] r.Revision = -1 if len(parts) > 0 { @@ -253,29 +242,29 @@ r.Revision = revision } else { r.Series = parts[0] - if !IsValidSeries(r.Series) { - return nil, fmt.Errorf("charm or bundle URL has invalid series: %q", url) + if err := ValidateSeries(r.Series); err != nil { + return nil, errors.Annotatef(err, "cannot parse URL %q", url) } parts = parts[1:] if len(parts) == 1 { r.Revision, err = strconv.Atoi(parts[0]) if err != nil { - return nil, fmt.Errorf("charm or bundle URL has malformed revision: %q in %q", parts[0], url) + return nil, errors.Errorf("charm or bundle URL has malformed revision: %q in %q", parts[0], url) } } else { if len(parts) != 0 { - return nil, fmt.Errorf("charm or bundle URL has invalid form: %q", url) + return nil, errors.Errorf("charm or bundle URL has invalid form: %q", url) } } } } if r.User != "" { if !names.IsValidUser(r.User) { - return nil, fmt.Errorf("charm or bundle URL has invalid user name: %q", url) + return nil, errors.Errorf("charm or bundle URL has invalid user name: %q", url) } } - if !IsValidName(r.Name) { - return nil, fmt.Errorf("URL has invalid charm or bundle name: %q", url) + if err := ValidateName(r.Name); err != nil { + return nil, errors.Annotatef(err, "cannot parse URL %q", url) } return &r, nil } @@ -285,9 +274,6 @@ if r.User != "" { parts = append(parts, fmt.Sprintf("~%s", r.User)) } - if r.Channel != "" { - parts = append(parts, string(r.Channel)) - } if r.Series != "" { parts = append(parts, r.Series) } @@ -314,7 +300,7 @@ } if u.Series == "" { if defaultSeries == "" { - return nil, fmt.Errorf("cannot infer charm or bundle URL for %q: charm or bundle url series is not resolved", src) + return nil, errors.Errorf("cannot infer charm or bundle URL for %q: charm or bundle url series is not resolved", src) } u.Series = defaultSeries } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/url_test.go juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/url_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/charm.v6-unstable/url_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/charm.v6-unstable/url_test.go 2016-10-13 14:32:05.000000000 +0000 @@ -26,178 +26,116 @@ url *charm.URL }{{ s: "cs:~user/series/name", - url: &charm.URL{"cs", "user", "name", -1, "series", ""}, + url: &charm.URL{"cs", "user", "name", -1, "series"}, }, { s: "cs:~user/series/name-0", - url: &charm.URL{"cs", "user", "name", 0, "series", ""}, + url: &charm.URL{"cs", "user", "name", 0, "series"}, }, { s: "cs:series/name", - url: &charm.URL{"cs", "", "name", -1, "series", ""}, + url: &charm.URL{"cs", "", "name", -1, "series"}, }, { s: "cs:series/name-42", - url: &charm.URL{"cs", "", "name", 42, "series", ""}, + url: &charm.URL{"cs", "", "name", 42, "series"}, }, { s: "local:series/name-1", - url: &charm.URL{"local", "", "name", 1, "series", ""}, + url: &charm.URL{"local", "", "name", 1, "series"}, }, { s: "local:series/name", - url: &charm.URL{"local", "", "name", -1, "series", ""}, + url: &charm.URL{"local", "", "name", -1, "series"}, }, { s: "local:series/n0-0n-n0", - url: &charm.URL{"local", "", "n0-0n-n0", -1, "series", ""}, + url: &charm.URL{"local", "", "n0-0n-n0", -1, "series"}, }, { s: "cs:~user/name", - url: &charm.URL{"cs", "user", "name", -1, "", ""}, + url: &charm.URL{"cs", "user", "name", -1, ""}, }, { s: "cs:name", - url: &charm.URL{"cs", "", "name", -1, "", ""}, + url: &charm.URL{"cs", "", "name", -1, ""}, }, { s: "local:name", - url: &charm.URL{"local", "", "name", -1, "", ""}, -}, { - s: "cs:~user/development/series/name-0", - url: &charm.URL{"cs", "user", "name", 0, "series", charm.DevelopmentChannel}, -}, { - s: "cs:~user/development/series/name-0", - url: &charm.URL{"cs", "user", "name", 0, "series", charm.DevelopmentChannel}, -}, { - s: "cs:development/series/name", - url: &charm.URL{"cs", "", "name", -1, "series", charm.DevelopmentChannel}, -}, { - s: "cs:development/series/name-42", - url: &charm.URL{"cs", "", "name", 42, "series", charm.DevelopmentChannel}, -}, { - s: "cs:~user/development/name", - url: &charm.URL{"cs", "user", "name", -1, "", charm.DevelopmentChannel}, -}, { - s: "cs:development/name", - url: &charm.URL{"cs", "", "name", -1, "", charm.DevelopmentChannel}, + url: &charm.URL{"local", "", "name", -1, ""}, }, { s: "http://jujucharms.com/u/user/name/series/1", - url: &charm.URL{"cs", "user", "name", 1, "series", ""}, + url: &charm.URL{"cs", "user", "name", 1, "series"}, exact: "cs:~user/series/name-1", }, { s: "http://www.jujucharms.com/u/user/name/series/1", - url: &charm.URL{"cs", "user", "name", 1, "series", ""}, + url: &charm.URL{"cs", "user", "name", 1, "series"}, exact: "cs:~user/series/name-1", }, { s: "https://www.jujucharms.com/u/user/name/series/1", - url: &charm.URL{"cs", "user", "name", 1, "series", ""}, + url: &charm.URL{"cs", "user", "name", 1, "series"}, exact: "cs:~user/series/name-1", }, { s: "https://jujucharms.com/u/user/name/series/1", - url: &charm.URL{"cs", "user", "name", 1, "series", ""}, + url: &charm.URL{"cs", "user", "name", 1, "series"}, exact: "cs:~user/series/name-1", }, { s: "https://jujucharms.com/u/user/name/series", - url: &charm.URL{"cs", "user", "name", -1, "series", ""}, + url: &charm.URL{"cs", "user", "name", -1, "series"}, exact: "cs:~user/series/name", }, { s: "https://jujucharms.com/u/user/name/1", - url: &charm.URL{"cs", "user", "name", 1, "", ""}, + url: &charm.URL{"cs", "user", "name", 1, ""}, exact: "cs:~user/name-1", }, { s: "https://jujucharms.com/u/user/name", - url: &charm.URL{"cs", "user", "name", -1, "", ""}, + url: &charm.URL{"cs", "user", "name", -1, ""}, exact: "cs:~user/name", }, { s: "https://jujucharms.com/name", - url: &charm.URL{"cs", "", "name", -1, "", ""}, + url: &charm.URL{"cs", "", "name", -1, ""}, exact: "cs:name", }, { s: "https://jujucharms.com/name/series", - url: &charm.URL{"cs", "", "name", -1, "series", ""}, + url: &charm.URL{"cs", "", "name", -1, "series"}, exact: "cs:series/name", }, { s: "https://jujucharms.com/name/1", - url: &charm.URL{"cs", "", "name", 1, "", ""}, + url: &charm.URL{"cs", "", "name", 1, ""}, exact: "cs:name-1", }, { s: "https://jujucharms.com/name/series/1", - url: &charm.URL{"cs", "", "name", 1, "series", ""}, + url: &charm.URL{"cs", "", "name", 1, "series"}, exact: "cs:series/name-1", }, { s: "https://jujucharms.com/u/user/name/series/1/", - url: &charm.URL{"cs", "user", "name", 1, "series", ""}, + url: &charm.URL{"cs", "user", "name", 1, "series"}, exact: "cs:~user/series/name-1", }, { s: "https://jujucharms.com/u/user/name/series/", - url: &charm.URL{"cs", "user", "name", -1, "series", ""}, + url: &charm.URL{"cs", "user", "name", -1, "series"}, exact: "cs:~user/series/name", }, { s: "https://jujucharms.com/u/user/name/1/", - url: &charm.URL{"cs", "user", "name", 1, "", ""}, + url: &charm.URL{"cs", "user", "name", 1, ""}, exact: "cs:~user/name-1", }, { s: "https://jujucharms.com/u/user/name/", - url: &charm.URL{"cs", "user", "name", -1, "", ""}, + url: &charm.URL{"cs", "user", "name", -1, ""}, exact: "cs:~user/name", }, { s: "https://jujucharms.com/name/", - url: &charm.URL{"cs", "", "name", -1, "", ""}, + url: &charm.URL{"cs", "", "name", -1, ""}, exact: "cs:name", }, { s: "https://jujucharms.com/name/series/", - url: &charm.URL{"cs", "", "name", -1, "series", ""}, + url: &charm.URL{"cs", "", "name", -1, "series"}, exact: "cs:series/name", }, { s: "https://jujucharms.com/name/1/", - url: &charm.URL{"cs", "", "name", 1, "", ""}, + url: &charm.URL{"cs", "", "name", 1, ""}, exact: "cs:name-1", }, { s: "https://jujucharms.com/name/series/1/", - url: &charm.URL{"cs", "", "name", 1, "series", ""}, + url: &charm.URL{"cs", "", "name", 1, "series"}, exact: "cs:series/name-1", }, { - s: "https://jujucharms.com/u/user/development/name/series/1", - url: &charm.URL{"cs", "user", "name", 1, "series", charm.DevelopmentChannel}, - exact: "cs:~user/development/series/name-1", -}, { - s: "https://jujucharms.com/u/user/development/name/series", - url: &charm.URL{"cs", "user", "name", -1, "series", charm.DevelopmentChannel}, - exact: "cs:~user/development/series/name", -}, { - s: "https://jujucharms.com/u/user/development/name/1", - url: &charm.URL{"cs", "user", "name", 1, "", charm.DevelopmentChannel}, - exact: "cs:~user/development/name-1", -}, { - s: "https://jujucharms.com/u/user/development/name", - url: &charm.URL{"cs", "user", "name", -1, "", charm.DevelopmentChannel}, - exact: "cs:~user/development/name", -}, { - s: "https://jujucharms.com/development/name", - url: &charm.URL{"cs", "", "name", -1, "", charm.DevelopmentChannel}, - exact: "cs:development/name", -}, { - s: "https://jujucharms.com/development/name/series", - url: &charm.URL{"cs", "", "name", -1, "series", charm.DevelopmentChannel}, - exact: "cs:development/series/name", -}, { - s: "https://jujucharms.com/development/name/1", - url: &charm.URL{"cs", "", "name", 1, "", charm.DevelopmentChannel}, - exact: "cs:development/name-1", -}, { - s: "https://jujucharms.com/development/name/series/1", - url: &charm.URL{"cs", "", "name", 1, "series", charm.DevelopmentChannel}, - exact: "cs:development/series/name-1", -}, { - s: "https://jujucharms.com/u/user/development/name/series/", - url: &charm.URL{"cs", "user", "name", -1, "series", charm.DevelopmentChannel}, - exact: "cs:~user/development/series/name", -}, { - s: "https://jujucharms.com/u/user/development/name/1/", - url: &charm.URL{"cs", "user", "name", 1, "", charm.DevelopmentChannel}, - exact: "cs:~user/development/name-1", -}, { - s: "https://jujucharms.com/u/user/development/name/", - url: &charm.URL{"cs", "user", "name", -1, "", charm.DevelopmentChannel}, - exact: "cs:~user/development/name", -}, { s: "https://jujucharms.com/", - err: `URL has invalid charm or bundle name: $URL`, + err: `cannot parse URL $URL: name "" not valid`, }, { s: "https://jujucharms.com/bad.wolf", - err: `URL has invalid charm or bundle name: $URL`, + err: `cannot parse URL $URL: name "bad.wolf" not valid`, }, { s: "https://jujucharms.com/u/", err: "charm or bundle URL $URL malformed, expected \"/u//\"", @@ -209,7 +147,7 @@ err: "charm or bundle URL has malformed revision: \"badwolf\" in $URL", }, { s: "https://jujucharms.com/name/bad.wolf/42", - err: `charm or bundle URL has invalid series: $URL`, + err: `cannot parse URL $URL: series name "bad.wolf" not valid`, }, { s: "https://badwolf@jujucharms.com/name/series/42", err: `charm or bundle URL $URL has unrecognized parts`, @@ -221,7 +159,7 @@ err: `charm or bundle URL $URL has unrecognized parts`, }, { s: "bs:~user/series/name-1", - err: `charm or bundle URL has invalid schema: $URL`, + err: `cannot parse URL $URL: schema "bs" not valid`, }, { s: ":foo", err: `cannot parse charm or bundle URL: $URL`, @@ -233,19 +171,19 @@ err: `URL without charm or bundle name: $URL`, }, { s: "cs:~user/1/name-1", - err: `charm or bundle URL has invalid series: $URL`, + err: `cannot parse URL $URL: series name "1" not valid`, }, { s: "cs:~user/series/name-1-2", - err: `URL has invalid charm or bundle name: $URL`, + err: `cannot parse URL $URL: name "name-1" not valid`, }, { s: "cs:~user/series/name-1-name-2", - err: `URL has invalid charm or bundle name: $URL`, + err: `cannot parse URL $URL: name "name-1-name" not valid`, }, { s: "cs:~user/series/name--name-2", - err: `URL has invalid charm or bundle name: $URL`, + err: `cannot parse URL $URL: name "name--name" not valid`, }, { s: "cs:foo-1-2", - err: `URL has invalid charm or bundle name: $URL`, + err: `cannot parse URL $URL: name "foo-1" not valid`, }, { s: "cs:~user/series/huh/name-1", err: `charm or bundle URL has invalid form: $URL`, @@ -253,11 +191,8 @@ s: "cs:~user/production/series/name-1", err: `charm or bundle URL has invalid form: $URL`, }, { - s: "cs:~user/development/series/badwolf/name-1", - err: `charm or bundle URL has invalid form: $URL`, -}, { s: "cs:/name", - err: `charm or bundle URL has invalid series: $URL`, + err: `cannot parse URL $URL: series name "" not valid`, }, { s: "local:~user/series/name", err: `local charm or bundle URL with user name: $URL`, @@ -265,61 +200,39 @@ s: "local:~user/name", err: `local charm or bundle URL with user name: $URL`, }, { - s: "local:development/name", - err: `local charm or bundle URL with channel: $URL`, -}, { - s: "local:development/series/name-1", - err: `local charm or bundle URL with channel: $URL`, -}, { s: "precise/wordpress", exact: "cs:precise/wordpress", - url: &charm.URL{"cs", "", "wordpress", -1, "precise", ""}, + url: &charm.URL{"cs", "", "wordpress", -1, "precise"}, }, { s: "foo", exact: "cs:foo", - url: &charm.URL{"cs", "", "foo", -1, "", ""}, + url: &charm.URL{"cs", "", "foo", -1, ""}, }, { s: "foo-1", exact: "cs:foo-1", - url: &charm.URL{"cs", "", "foo", 1, "", ""}, + url: &charm.URL{"cs", "", "foo", 1, ""}, }, { s: "n0-n0-n0", exact: "cs:n0-n0-n0", - url: &charm.URL{"cs", "", "n0-n0-n0", -1, "", ""}, + url: &charm.URL{"cs", "", "n0-n0-n0", -1, ""}, }, { s: "cs:foo", exact: "cs:foo", - url: &charm.URL{"cs", "", "foo", -1, "", ""}, + url: &charm.URL{"cs", "", "foo", -1, ""}, }, { s: "local:foo", exact: "local:foo", - url: &charm.URL{"local", "", "foo", -1, "", ""}, + url: &charm.URL{"local", "", "foo", -1, ""}, }, { s: "series/foo", exact: "cs:series/foo", - url: &charm.URL{"cs", "", "foo", -1, "series", ""}, -}, { - s: "development/foo", - exact: "cs:development/foo", - url: &charm.URL{"cs", "", "foo", -1, "", charm.DevelopmentChannel}, -}, { - s: "development/foo-1", - exact: "cs:development/foo-1", - url: &charm.URL{"cs", "", "foo", 1, "", charm.DevelopmentChannel}, -}, { - s: "development/n0-n0-n0", - exact: "cs:development/n0-n0-n0", - url: &charm.URL{"cs", "", "n0-n0-n0", -1, "", charm.DevelopmentChannel}, -}, { - s: "development/series/foo", - exact: "cs:development/series/foo", - url: &charm.URL{"cs", "", "foo", -1, "series", charm.DevelopmentChannel}, + url: &charm.URL{"cs", "", "foo", -1, "series"}, }, { s: "series/foo/bar", err: `charm or bundle URL has invalid form: "series/foo/bar"`, }, { s: "cs:foo/~blah", - err: `URL has invalid charm or bundle name: "cs:foo/~blah"`, + err: `cannot parse URL $URL: name "~blah" not valid`, }} func (s *URLSuite) TestParseURL(c *gc.C) { @@ -333,13 +246,13 @@ url, uerr := charm.ParseURL(t.s) if t.err != "" { t.err = strings.Replace(t.err, "$URL", regexp.QuoteMeta(fmt.Sprintf("%q", t.s)), -1) - c.Assert(uerr, gc.ErrorMatches, t.err) - c.Assert(url, gc.IsNil) + c.Check(uerr, gc.ErrorMatches, t.err) + c.Check(url, gc.IsNil) continue } c.Assert(uerr, gc.IsNil) - c.Assert(url, gc.DeepEquals, t.url) - c.Assert(url.String(), gc.Equals, expectStr) + c.Check(url, gc.DeepEquals, t.url) + c.Check(url.String(), gc.Equals, expectStr) // URL strings are generated as expected. Reversability is preserved // with v1 URLs. @@ -368,13 +281,6 @@ {"bs:foo", "bs:defseries/foo"}, {"cs:~1/foo", "cs:~1/defseries/foo"}, {"cs:foo-1-2", "cs:defseries/foo-1-2"}, - {"development/foo", "cs:development/defseries/foo"}, - {"development/foo-1", "cs:development/defseries/foo-1"}, - {"development/series/foo", "cs:development/series/foo"}, - {"local:development/series/foo", "local:development/series/foo"}, - {"cs:~user/development/foo", "cs:~user/development/defseries/foo"}, - {"local:~user/development/series/foo", "local:~user/development/series/foo"}, - {"cs:~1/development/foo", "cs:~1/development/defseries/foo"}, } func (s *URLSuite) TestInferURL(c *gc.C) { @@ -412,13 +318,6 @@ {"series/foo", "cs:series/foo", true}, {"cs:series/foo", "cs:series/foo", true}, {"cs:~user/series/foo", "cs:~user/series/foo", true}, - {"development/foo", "", false}, - {"development/foo-1", "", false}, - {"cs:development/foo", "", false}, - {"cs:~user/development/foo", "", false}, - {"development/series/foo", "cs:development/series/foo", true}, - {"cs:development/series/foo", "cs:development/series/foo", true}, - {"cs:~user/development/series/foo", "cs:~user/development/series/foo", true}, } func (s *URLSuite) TestInferURLNoDefaultSeries(c *gc.C) { @@ -474,32 +373,11 @@ } } -var isValidChannelTests = []struct { - channel charm.Channel - expect bool -}{{ - channel: charm.DevelopmentChannel, - expect: true, -}, { - channel: "", -}, { - channel: "-development", -}, { - channel: "bad wolf", -}} - -func (s *URLSuite) TestIsValidChannel(c *gc.C) { - for i, t := range isValidChannelTests { - c.Logf("test %d: %s", i, t.channel) - c.Assert(charm.IsValidChannel(t.channel), gc.Equals, t.expect, gc.Commentf("%s", t.channel)) - } -} - func (s *URLSuite) TestMustParseURL(c *gc.C) { url := charm.MustParseURL("cs:series/name") - c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) + c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series"}) f := func() { charm.MustParseURL("local:@@/name") } - c.Assert(f, gc.PanicMatches, "charm or bundle URL has invalid series: .*") + c.Assert(f, gc.PanicMatches, "cannot parse URL \"local:@@/name\": series name \"@@\" not valid") f = func() { charm.MustParseURL("cs:~user") } c.Assert(f, gc.PanicMatches, "URL without charm or bundle name: .*") f = func() { charm.MustParseURL("cs:~user") } @@ -509,28 +387,14 @@ func (s *URLSuite) TestWithRevision(c *gc.C) { url := charm.MustParseURL("cs:series/name") other := url.WithRevision(1) - c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) - c.Assert(other, gc.DeepEquals, &charm.URL{"cs", "", "name", 1, "series", ""}) + c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series"}) + c.Assert(other, gc.DeepEquals, &charm.URL{"cs", "", "name", 1, "series"}) // Should always copy. The opposite behavior is error prone. c.Assert(other.WithRevision(1), gc.Not(gc.Equals), other) c.Assert(other.WithRevision(1), gc.DeepEquals, other) } -func (s *URLSuite) TestWithChannel(c *gc.C) { - url := charm.MustParseURL("cs:series/name") - other := url.WithChannel("development") - c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) - c.Assert(other, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", "development"}) - - // Should always copy. The opposite behavior is error prone. - c.Assert(other.WithRevision(1), gc.Not(gc.Equals), other) - - // Set the channel back to empty. - other = url.WithChannel("") - c.Assert(other, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) -} - var codecs = []struct { Name string Marshal func(interface{}) ([]byte, error) diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,5 +0,0 @@ -package assets - -var RelationIconHealthy = ` - -` diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.svg juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.svg --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.svg 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.svg 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ - - diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/assets/service_module.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/assets/service_module.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/assets/service_module.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/assets/service_module.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -package assets - -// This is the SVG for the service module block used in the bundle diagram. -// Note that there MUST NOT be anything (processing instructions, xml -// declarations, or directives) before the tag. -var ServiceModule = ` - - - - - - - - - - - - - - - - - - - -` diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/assets/service_module.svg juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/assets/service_module.svg --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/assets/service_module.svg 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/assets/service_module.svg 1970-01-01 00:00:00.000000000 +0000 @@ -1,41 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/canvas.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/canvas.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/canvas.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/canvas.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,299 +0,0 @@ -package jujusvg - -import ( - "bytes" - "fmt" - "image" - "io" - "math" - - svg "github.com/ajstarks/svgo" - - "gopkg.in/juju/jujusvg.v1/assets" -) - -const ( - iconSize = 96 - applicationBlockSize = 180 - healthCircleRadius = 8 - relationLineWidth = 1 - maxInt = int(^uint(0) >> 1) - minInt = -(maxInt - 1) - maxHeight = 450 - maxWidth = 1000 - - fontColor = "#505050" - relationColor = "#a7a7a7" -) - -// Canvas holds the parsed form of a bundle or model. -type Canvas struct { - applications []*application - relations []*applicationRelation - iconsRendered map[string]bool - iconIds map[string]string -} - -// application represents a application deployed to a model and contains the -// point of the top-left corner of the icon, icon URL, and additional metadata. -type application struct { - name string - charmPath string - iconUrl string - iconSrc []byte - point image.Point -} - -// applicationRelation represents a relation created between two applications. -type applicationRelation struct { - name string - applicationA *application - applicationB *application -} - -// line represents a line segment with two endpoints. -type line struct { - p0, p1 image.Point -} - -// definition creates any necessary defs that can be used later in the SVG. -func (s *application) definition(canvas *svg.SVG, iconsRendered map[string]bool, iconIds map[string]string) error { - if len(s.iconSrc) == 0 || iconsRendered[s.charmPath] { - return nil - } - iconsRendered[s.charmPath] = true - iconIds[s.charmPath] = fmt.Sprintf("icon-%d", len(iconsRendered)) - - // Temporary solution: - iconBuf := bytes.NewBuffer(s.iconSrc) - return processIcon(iconBuf, canvas.Writer, iconIds[s.charmPath]) -} - -// usage creates any necessary tags for actually using the application in the SVG. -func (s *application) usage(canvas *svg.SVG, iconIds map[string]string) { - canvas.Group(fmt.Sprintf(`transform="translate(%d,%d)"`, s.point.X, s.point.Y)) - defer canvas.Gend() - canvas.Title(s.name) - canvas.Circle( - applicationBlockSize/2, - applicationBlockSize/2, - applicationBlockSize/2, - `class="application-block" fill="#f5f5f5" stroke="#888" stroke-width="1"`) - if len(s.iconSrc) > 0 { - canvas.Use( - 0, - 0, - "#"+iconIds[s.charmPath], - fmt.Sprintf(`transform="translate(%d,%d)" width="%d" height="%d" clip-path="url(#clip-mask)"`, applicationBlockSize/2-iconSize/2, applicationBlockSize/2-iconSize/2, iconSize, iconSize), - ) - } else { - canvas.Image( - applicationBlockSize/2-iconSize/2, - applicationBlockSize/2-iconSize/2, - iconSize, - iconSize, - s.iconUrl, - `clip-path="url(#clip-mask)"`, - ) - } - name := s.name - if len(name) > 20 { - name = fmt.Sprintf("%s...", name[:17]) - } - canvas.Rect( - 0, - applicationBlockSize-45, - applicationBlockSize, - 32, - `rx="2" ry="2" fill="rgba(220, 220, 220, 0.8)"`) - canvas.Text( - applicationBlockSize/2, - applicationBlockSize-23, - name, - `text-anchor="middle" style="font-weight:200"`) -} - -// definition creates any necessary defs that can be used later in the SVG. -func (r *applicationRelation) definition(canvas *svg.SVG) { -} - -// usage creates any necessary tags for actually using the relation in the SVG. -func (r *applicationRelation) usage(canvas *svg.SVG) { - canvas.Group() - defer canvas.Gend() - canvas.Title(r.name) - l := line{ - p0: r.applicationA.point.Add(point(applicationBlockSize/2, applicationBlockSize/2)), - p1: r.applicationB.point.Add(point(applicationBlockSize/2, applicationBlockSize/2)), - } - canvas.Line( - l.p0.X, - l.p0.Y, - l.p1.X, - l.p1.Y, - fmt.Sprintf(`stroke=%q`, relationColor), - fmt.Sprintf(`stroke-width="%dpx"`, relationLineWidth), - fmt.Sprintf(`stroke-dasharray=%q`, strokeDashArray(l)), - ) - mid := l.p0.Add(l.p1).Div(2).Sub(point(healthCircleRadius, healthCircleRadius)) - canvas.Use(mid.X, mid.Y, "#healthCircle") - - deg := math.Atan2(float64(l.p0.Y-l.p1.Y), float64(l.p0.X-l.p1.X)) - canvas.Circle( - int(float64(l.p0.X)-math.Cos(deg)*(applicationBlockSize/2)), - int(float64(l.p0.Y)-math.Sin(deg)*(applicationBlockSize/2)), - 4, - fmt.Sprintf(`fill=%q`, relationColor)) - canvas.Circle( - int(float64(l.p1.X)+math.Cos(deg)*(applicationBlockSize/2)), - int(float64(l.p1.Y)+math.Sin(deg)*(applicationBlockSize/2)), - 4, - fmt.Sprintf(`fill=%q`, relationColor)) -} - -// strokeDashArray generates the stroke-dasharray attribute content so that -// the relation health indicator is placed in an empty space. -func strokeDashArray(l line) string { - return fmt.Sprintf("%.2f, %d", l.length()/2-healthCircleRadius, healthCircleRadius*2) -} - -// length calculates the length of a line. -func (l *line) length() float64 { - dp := l.p0.Sub(l.p1) - return math.Sqrt(square(float64(dp.X)) + square(float64(dp.Y))) -} - -// addApplication adds a new application to the canvas. -func (c *Canvas) addApplication(s *application) { - c.applications = append(c.applications, s) -} - -// addRelation adds a new relation to the canvas. -func (c *Canvas) addRelation(r *applicationRelation) { - c.relations = append(c.relations, r) -} - -// layout adjusts all items so that they are positioned appropriately, -// and returns the overall size of the canvas. -func (c *Canvas) layout() (int, int) { - minWidth := maxInt - minHeight := maxInt - maxWidth := minInt - maxHeight := minInt - - for _, application := range c.applications { - if application.point.X < minWidth { - minWidth = application.point.X - } - if application.point.Y < minHeight { - minHeight = application.point.Y - } - if application.point.X > maxWidth { - maxWidth = application.point.X - } - if application.point.Y > maxHeight { - maxHeight = application.point.Y - } - } - for _, application := range c.applications { - application.point = application.point.Sub(point(minWidth, minHeight)) - } - return abs(maxWidth-minWidth) + applicationBlockSize + 1, - abs(maxHeight-minHeight) + applicationBlockSize + 1 -} - -func (c *Canvas) definition(canvas *svg.SVG) { - canvas.Def() - defer canvas.DefEnd() - - // Relation health circle. - canvas.Group(`id="healthCircle"`, - `transform="scale(1.1)"`) - io.WriteString(canvas.Writer, assets.RelationIconHealthy) - canvas.Gend() - - // Application and relation specific defs. - for _, relation := range c.relations { - relation.definition(canvas) - } - for _, application := range c.applications { - application.definition(canvas, c.iconsRendered, c.iconIds) - } -} - -func (c *Canvas) relationsGroup(canvas *svg.SVG) { - canvas.Gid("relations") - defer canvas.Gend() - for _, relation := range c.relations { - relation.usage(canvas) - } -} - -func (c *Canvas) applicationsGroup(canvas *svg.SVG) { - canvas.Gid("applications") - defer canvas.Gend() - for _, application := range c.applications { - application.usage(canvas, c.iconIds) - } -} - -func (c *Canvas) iconClipPath(canvas *svg.SVG) { - canvas.Circle( - applicationBlockSize/2-iconSize/2+5, // for these two, add an offset to help - applicationBlockSize/2-iconSize/2+7, // hide the embossed border. - applicationBlockSize/4, - `id="application-icon-mask" fill="none"`) - canvas.ClipPath(`id="clip-mask"`) - defer canvas.ClipEnd() - canvas.Use( - 0, - 0, - `#application-icon-mask`) -} - -// Marshal renders the SVG to the given io.Writer. -func (c *Canvas) Marshal(w io.Writer) { - // Initialize maps for application icons, which are used both in definition - // and use methods for applications. - c.iconsRendered = make(map[string]bool) - c.iconIds = make(map[string]string) - - // TODO check write errors and return an error from - // Marshal if the write fails. The svg package does not - // itself check or return write errors; a possible work-around - // is to wrap the writer in a custom writer that panics - // on error, and catch the panic here. - width, height := c.layout() - - canvas := svg.New(w) - canvas.Start( - width, - height, - fmt.Sprintf(`style="font-family:Ubuntu, sans-serif;" viewBox="0 0 %d %d"`, - width, height), - ) - defer canvas.End() - c.definition(canvas) - c.iconClipPath(canvas) - c.relationsGroup(canvas) - c.applicationsGroup(canvas) -} - -// abs returns the absolute value of a number. -func abs(x int) int { - if x < 0 { - return -x - } else { - return x - } -} - -// square multiplies a number by itself. -func square(x float64) float64 { - return x * x -} - -// point generates an image.Point given its coordinates. -func point(x, y int) image.Point { - return image.Point{x, y} -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/canvas_test.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/canvas_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/canvas_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/canvas_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,299 +0,0 @@ -package jujusvg - -import ( - "bytes" - "encoding/xml" - "image" - "io" - - "github.com/ajstarks/svgo" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/jujusvg.v1/assets" -) - -type CanvasSuite struct{} - -var _ = gc.Suite(&CanvasSuite{}) - -func (s *CanvasSuite) TestApplicationRender(c *gc.C) { - // Ensure that the Application's definition and usage methods output the - // proper SVG elements. - var tests = []struct { - about string - application application - expected string - }{ - { - about: "Application without iconSrc, no def created", - application: application{ - name: "foo", - point: image.Point{ - X: 0, - Y: 0, - }, - iconUrl: "foo", - }, - expected: ` -foo - - - -foo - -`, - }, - { - about: "Application with iconSrc", - application: application{ - name: "bar", - charmPath: "bar", - point: image.Point{ - X: 0, - Y: 0, - }, - iconSrc: []byte("bar"), - }, - expected: `bar -bar - - - -bar - -`, - }, - { - about: "Application with already def'd icon", - application: application{ - name: "baz", - charmPath: "bar", - point: image.Point{ - X: 0, - Y: 0, - }, - iconSrc: []byte("bar"), - }, - expected: ` -baz - - - -baz - -`, - }, - } - // Maintain our list of rendered icons outside the loop. - iconsRendered := make(map[string]bool) - iconIds := make(map[string]string) - for _, test := range tests { - var buf bytes.Buffer - svg := svg.New(&buf) - test.application.definition(svg, iconsRendered, iconIds) - test.application.usage(svg, iconIds) - c.Log(test.about) - c.Log(buf.String()) - c.Assert(buf.String(), gc.Equals, test.expected) - } -} - -func (s *CanvasSuite) TestRelationRender(c *gc.C) { - // Ensure that the Relation's definition and usage methods output the - // proper SVG elements. - var buf bytes.Buffer - svg := svg.New(&buf) - relation := applicationRelation{ - name: "foo", - applicationA: &application{ - point: image.Point{ - X: 0, - Y: 0, - }, - }, - applicationB: &application{ - point: image.Point{ - X: 100, - Y: 100, - }, - }, - } - relation.definition(svg) - relation.usage(svg) - c.Assert(buf.String(), gc.Equals, - ` -foo - - - - - -`) -} - -func (s *CanvasSuite) TestIconClipPath(c *gc.C) { - // Ensure that the icon ClipPath returns the correctly sizes clipping Circle - var buf bytes.Buffer - svg := svg.New(&buf) - canvas := Canvas{} - canvas.iconClipPath(svg) - c.Assert(buf.String(), gc.Equals, - ` - - -`) -} - -func (s *CanvasSuite) TestLayout(c *gc.C) { - // Ensure that the SVG is sized exactly around the positioned applications. - canvas := Canvas{} - canvas.addApplication(&application{ - name: "application1", - point: image.Point{ - X: 0, - Y: 0, - }, - }) - canvas.addApplication(&application{ - name: "application2", - point: image.Point{ - X: 100, - Y: 100, - }, - }) - width, height := canvas.layout() - c.Assert(width, gc.Equals, 281) - c.Assert(height, gc.Equals, 281) - canvas.addApplication(&application{ - name: "application3", - point: image.Point{ - X: -100, - Y: -100, - }, - }) - canvas.addApplication(&application{ - name: "application4", - point: image.Point{ - X: -100, - Y: 100, - }, - }) - canvas.addApplication(&application{ - name: "application5", - point: image.Point{ - X: 200, - Y: -100, - }, - }) - width, height = canvas.layout() - c.Assert(width, gc.Equals, 481) - c.Assert(height, gc.Equals, 381) -} - -func (s *CanvasSuite) TestMarshal(c *gc.C) { - // Ensure that the internal representation of the canvas can be marshalled - // to SVG. - var buf bytes.Buffer - canvas := Canvas{} - applicationA := &application{ - name: "application-a", - charmPath: "trusty/svc-a", - point: image.Point{ - X: 0, - Y: 0, - }, - iconSrc: []byte(` - - - `), - } - applicationB := &application{ - name: "application-b", - point: image.Point{ - X: 100, - Y: 100, - }, - } - canvas.addApplication(applicationA) - canvas.addApplication(applicationB) - canvas.addRelation(&applicationRelation{ - name: "relation", - applicationA: applicationA, - applicationB: applicationB, - }) - canvas.Marshal(&buf) - c.Logf("%s", buf.Bytes()) - assertXMLEqual(c, buf.Bytes(), []byte(` - - - - -`+assets.RelationIconHealthy+` - - - - - - - - - -relation - - - - - - - - -application-a - - - -application-a - - -application-b - - - -application-b - - - -`)) -} - -func assertXMLEqual(c *gc.C, obtained, expected []byte) { - toksObtained := xmlTokens(c, obtained) - toksExpected := xmlTokens(c, expected) - c.Assert(toksObtained, jc.DeepEquals, toksExpected) -} - -func xmlTokens(c *gc.C, data []byte) []xml.Token { - dec := xml.NewDecoder(bytes.NewReader(data)) - var toks []xml.Token - for { - tok, err := dec.Token() - if err == io.EOF { - return toks - } - c.Assert(err, gc.IsNil) - - if cdata, ok := tok.(xml.CharData); ok { - // It's char data - trim all white space and ignore it - // if it's all blank. - cdata = bytes.TrimSpace(cdata) - if len(cdata) == 0 { - continue - } - tok = cdata - } - toks = append(toks, xml.CopyToken(tok)) - } -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/dependencies.tsv juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/dependencies.tsv --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/dependencies.tsv 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/dependencies.tsv 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -github.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z -github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z -github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z -github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z -github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z -github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z -github.com/juju/schema git 1e25943f8c6fd6815282d6f1ac87091d21e14e19 2016-03-01T11:16:46Z -github.com/juju/testing git 162fafccebf20a4207ab93d63b986c230e3f4d2e 2016-04-04T09:43:17Z -github.com/juju/utils git ffea6ead0c374583e876c8357c9db6e98bc71476 2016-05-26T02:52:51Z -github.com/juju/version git ef897ad7f130870348ce306f61332f5335355063 2015-11-27T20:34:00Z -github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z -golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z -gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z -gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z -gopkg.in/juju/charm.v6-unstable git 503191ddf2590b15db9669c424660ed612d0a30a 2016-05-27T01:46:20Z -gopkg.in/juju/names.v2 git e38bc90539f22af61a9c656d35068bd5f0a5b30a 2016-05-25T23:07:23Z -gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z -gopkg.in/yaml.v2 git a83829b6f1293c91addabc89d0571c246397bbf4 2016-03-01T20:40:22Z -launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/doc.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/doc.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/doc.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/doc.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -// Copyright 2014 Canonical, Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -// jujusvg generates SVG representations of various Juju artifacts, such as -// charm bundles or live environments. -// -// For more information, please refer to the README file in this directory. -package jujusvg diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/charmworld-missing-placement.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/charmworld-missing-placement.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/charmworld-missing-placement.yaml 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/charmworld-missing-placement.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -applications: - mongodb: - charm: "cs:precise/mongodb-21" - num_units: 1 - annotations: - "gui-x": "940.5" - "gui-y": "388.7698359714502" - constraints: "mem=2G cpu-cores=1" - elasticsearch: - charm: "cs:~charming-devs/precise/elasticsearch-2" - num_units: 1 - constraints: "mem=2G cpu-cores=1" - charmworld: - charm: "cs:~juju-jitsu/precise/charmworld-58" - num_units: 1 - expose: true - annotations: - "gui-x": "813.5" - "gui-y": "112.23016402854975" - options: - charm_import_limit: -1 - source: "lp:~bac/charmworld/ingest-local-charms" - revno: 511 -relations: - - - "charmworld:essearch" - - "elasticsearch:essearch" - - - "charmworld:database" - - "mongodb:database" -series: precise diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/charmworld-no-placement.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/charmworld-no-placement.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/charmworld-no-placement.yaml 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/charmworld-no-placement.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,23 +0,0 @@ -applications: - mongodb: - charm: "cs:precise/mongodb-21" - num_units: 1 - constraints: "mem=2G cpu-cores=1" - elasticsearch: - charm: "cs:~charming-devs/precise/elasticsearch-2" - num_units: 1 - constraints: "mem=2G cpu-cores=1" - charmworld: - charm: "cs:~juju-jitsu/precise/charmworld-58" - num_units: 1 - expose: true - options: - charm_import_limit: -1 - source: "lp:~bac/charmworld/ingest-local-charms" - revno: 511 -relations: - - - "charmworld:essearch" - - "elasticsearch:essearch" - - - "charmworld:database" - - "mongodb:database" -series: precise diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/charmworld.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/charmworld.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/charmworld.yaml 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/charmworld.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,32 +0,0 @@ -applications: - mongodb: - charm: "cs:precise/mongodb-21" - num_units: 1 - annotations: - "gui-x": "940.5" - "gui-y": "388.7698359714502" - constraints: "mem=2G cpu-cores=1" - elasticsearch: - charm: "cs:~charming-devs/precise/elasticsearch-2" - num_units: 1 - annotations: - "gui-x": "490.5" - "gui-y": "369.7698359714502" - constraints: "mem=2G cpu-cores=1" - charmworld: - charm: "cs:~juju-jitsu/precise/charmworld-58" - num_units: 1 - expose: true - annotations: - "gui-x": "813.5" - "gui-y": "112.23016402854975" - options: - charm_import_limit: -1 - source: "lp:~bac/charmworld/ingest-local-charms" - revno: 511 -relations: - - - "charmworld:essearch" - - "elasticsearch:essearch" - - - "charmworld:database" - - "mongodb:database" -series: precise diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/generatesvg.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/generatesvg.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/generatesvg.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/generatesvg.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,56 +0,0 @@ -package main - -// This is a demo application that uses the jujusvg library to build a bundle SVG -// from a given bundle.yaml file. - -import ( - "io/ioutil" - "log" - "os" - "strings" - - "gopkg.in/juju/charm.v6-unstable" - - // Import the jujusvg library and the juju charm library - "gopkg.in/juju/jujusvg.v1" -) - -// iconURL takes a reference to a charm and returns the URL for that charm's icon. -// In this case, we're using the api.jujucharms.com API to provide the icon's URL. -func iconURL(ref *charm.URL) string { - return "https://api.jujucharms.com/charmstore/v4/" + ref.Path() + "/icon.svg" -} - -func main() { - if len(os.Args) != 2 { - log.Fatalf("Please provide the name of a bundle file as the first argument") - } - - // First, we need to read our bundle data into a []byte - bundle_data, err := ioutil.ReadFile(os.Args[1]) - if err != nil { - log.Fatalf("Error reading bundle: %s\n", err) - } - - // Next, generate a charm.Bundle from the bytearray by passing it to ReadNewBundleData. - // This gives us an in-memory object representation of the bundle that we can pass to jujusvg - bundle, err := charm.ReadBundleData(strings.NewReader(string(bundle_data))) - if err != nil { - log.Fatalf("Error parsing bundle: %s\n", err) - } - - fetcher := &jujusvg.HTTPFetcher{ - IconURL: iconURL, - } - // Next, build a canvas of the bundle. This is a simplified version of a charm.Bundle - // that contains just the position information and charm icon URLs necessary to build - // the SVG representation of the bundle - canvas, err := jujusvg.NewFromBundle(bundle, iconURL, fetcher) - if err != nil { - log.Fatalf("Error generating canvas: %s\n", err) - } - - // Finally, marshal that canvas as SVG to os.Stdout; this will print the SVG data - // required to generate an image of the bundle. - canvas.Marshal(os.Stdout) -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/kubernetes-bundle.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/kubernetes-bundle.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/kubernetes-bundle.yaml 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/kubernetes-bundle.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -applications: - "kubernetes-master": - charm: cs:~kubernetes/trusty/kubernetes-master-5 - annotations: - "gui-x": "600" - "gui-y": "0" - expose: true - docker: - charm: cs:trusty/docker-2 - num_units: 2 - annotations: - "gui-x": "0" - "gui-y": "0" - flannel-docker: - charm: cs:trusty/flannel-docker-5 - annotations: - "gui-x": "0" - "gui-y": "300" - kubernetes: - charm: cs:~kubernetes/trusty/kubernetes-5 - annotations: - "gui-x": "300" - "gui-y": "300" - etcd: - charm: cs:~kubernetes/trusty/etcd-2 - annotations: - "gui-x": "300" - "gui-y": "0" -relations: - - - "flannel-docker:network" - - "docker:network" - - - "flannel-docker:docker-host" - - "docker:juju-info" - - - "flannel-docker:db" - - "etcd:client" - - - "kubernetes:docker-host" - - "docker:juju-info" - - - "etcd:client" - - "kubernetes:etcd" - - - "etcd:client" - - "kubernetes-master:etcd" - - - "kubernetes-master:minions-api" - - "kubernetes:api" -series: trusty diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/mediawiki-scalable.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/mediawiki-scalable.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/mediawiki-scalable.yaml 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/mediawiki-scalable.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,114 +0,0 @@ -applications: - haproxy: - charm: cs:precise/haproxy-35 - num_units: 1 - options: - default_log: global - default_mode: http - default_options: httplog, dontlognull - default_retries: 3 - default_timeouts: queue 20000, client 50000, connect 5000, server 50000 - enable_monitoring: false - global_debug: false - global_group: haproxy - global_log: 127.0.0.1 local0, 127.0.0.1 local1 notice - global_maxconn: 4096 - global_quiet: false - global_spread_checks: 0 - global_user: haproxy - monitoring_allowed_cidr: 127.0.0.1/32 - monitoring_password: changeme - monitoring_port: 10000 - monitoring_stats_refresh: 3 - monitoring_username: haproxy - nagios_context: juju - package_status: install - applications: "- service_name: haproxy_service\n service_host: \"0.0.0.0\"\n service_port: - 80\n service_options: [balance leastconn]\n server_options: maxconn 100\n" - sysctl: "" - annotations: - gui-x: "619" - gui-y: "-406" - mediawiki: - charm: cs:precise/mediawiki-10 - num_units: 1 - options: - debug: false - name: Please set name of wiki - skin: vector - annotations: - gui-x: "618" - gui-y: "-128" - memcached: - charm: cs:precise/memcached-7 - num_units: 1 - options: - connection-limit: 1024 - disable-auto-cleanup: "no" - disable-cas: "no" - disable-large-pages: "no" - extra-options: "" - factor: 1.25 - min-item-size: -1 - nagios_context: juju - request-limit: -1 - size: 768 - slab-page-size: -1 - tcp-port: 11211 - threads: -1 - udp-port: 0 - annotations: - gui-x: "926" - gui-y: "-125" - mysql: - charm: cs:precise/mysql-28 - num_units: 1 - options: - binlog-format: MIXED - block-size: 5 - dataset-size: 80% - flavor: distro - ha-bindiface: eth0 - ha-mcastport: 5411 - max-connections: -1 - preferred-storage-engine: InnoDB - query-cache-size: -1 - query-cache-type: "OFF" - rbd-name: mysql1 - tuning-level: safest - vip_cidr: 24 - vip_iface: eth0 - annotations: - gui-x: "926" - gui-y: "123" - mysql-slave: - charm: cs:precise/mysql-28 - num_units: 1 - options: - binlog-format: MIXED - block-size: 5 - dataset-size: 80% - flavor: distro - ha-bindiface: eth0 - ha-mcastport: 5411 - max-connections: -1 - preferred-storage-engine: InnoDB - query-cache-size: -1 - query-cache-type: "OFF" - rbd-name: mysql1 - tuning-level: safest - vip_cidr: 24 - vip_iface: eth0 - annotations: - gui-x: "619" - gui-y: "124" -series: precise -relations: -- - mediawiki:cache - - memcached:cache -- - haproxy:reverseproxy - - mediawiki:website -- - mysql-slave:slave - - mysql:master -- - mediawiki:slave - - mysql-slave:db diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/openstack.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/openstack.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/examples/openstack.yaml 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/examples/openstack.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,297 +0,0 @@ -machines: - '0': - constraints: arch=amd64 - series: trusty - '1': - constraints: arch=amd64 - series: trusty - '2': - constraints: arch=amd64 - series: trusty - '3': - constraints: arch=amd64 - series: trusty -relations: -- - nova-compute:amqp - - rabbitmq-server:amqp -- - neutron-gateway:amqp - - rabbitmq-server:amqp -- - keystone:shared-db - - mysql:shared-db -- - nova-cloud-controller:identity-service - - keystone:identity-service -- - glance:identity-service - - keystone:identity-service -- - neutron-api:identity-service - - keystone:identity-service -- - neutron-openvswitch:neutron-plugin-api - - neutron-api:neutron-plugin-api -- - neutron-api:shared-db - - mysql:shared-db -- - neutron-api:amqp - - rabbitmq-server:amqp -- - neutron-gateway:neutron-plugin-api - - neutron-api:neutron-plugin-api -- - glance:shared-db - - mysql:shared-db -- - glance:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:image-service - - glance:image-service -- - nova-compute:image-service - - glance:image-service -- - nova-cloud-controller:cloud-compute - - nova-compute:cloud-compute -- - nova-cloud-controller:amqp - - rabbitmq-server:amqp -- - nova-cloud-controller:quantum-network-service - - neutron-gateway:quantum-network-service -- - nova-compute:neutron-plugin - - neutron-openvswitch:neutron-plugin -- - neutron-openvswitch:amqp - - rabbitmq-server:amqp -- - openstack-dashboard:identity-service - - keystone:identity-service -- - nova-cloud-controller:shared-db - - mysql:shared-db -- - nova-cloud-controller:neutron-api - - neutron-api:neutron-api -- - cinder:image-service - - glance:image-service -- - cinder:amqp - - rabbitmq-server:amqp -- - cinder:identity-service - - keystone:identity-service -- - cinder:cinder-volume-service - - nova-cloud-controller:cinder-volume-service -- - cinder-ceph:storage-backend - - cinder:storage-backend -- - ceph:client - - nova-compute:ceph -- - cinder:shared-db - - mysql:shared-db -- - ceph:client - - cinder-ceph:ceph -- - ceph:client - - glance:ceph -- - ceph-osd:mon - - ceph:osd -- - ntp:juju-info - - nova-compute:juju-info -- - ntp:juju-info - - neutron-gateway:juju-info -- - ceph-radosgw:mon - - ceph:radosgw -- - ceph-radosgw:identity-service - - keystone:identity-service -- - ceilometer:amqp - - rabbitmq-server:amqp -- - ceilometer-agent:ceilometer-service - - ceilometer:ceilometer-service -- - ceilometer:identity-service - - keystone:identity-service -- - ceilometer:identity-notifications - - keystone:identity-notifications -- - ceilometer-agent:nova-ceilometer - - nova-compute:nova-ceilometer -- - ceilometer:shared-db - - mongodb:database -series: trusty -applications: - ceilometer: - annotations: - gui-x: '1288.8744298356794' - gui-y: '0.7040786325134718' - charm: cs:trusty/ceilometer-15 - num_units: 1 - options: - openstack-origin: cloud:trusty-liberty - to: - - lxc:2 - ceilometer-agent: - annotations: - gui-x: '1288.9999389648438' - gui-y: '503' - charm: cs:trusty/ceilometer-agent-11 - ceph: - annotations: - gui-x: '750' - gui-y: '500' - charm: cs:trusty/ceph-42 - num_units: 3 - options: - fsid: 5a791d94-980b-11e4-b6f6-3c970e8b1cf7 - monitor-secret: AQAi5a9UeJXUExAA+By9u+GPhl8/XiUQ4nwI3A== - osd-devices: /dev/sdb - osd-reformat: 'yes' - source: cloud:trusty-liberty - to: - - '1' - - '2' - - '3' - ceph-osd: - annotations: - gui-x: '1000' - gui-y: '500' - charm: cs:trusty/ceph-osd-14 - num_units: 1 - options: - osd-devices: /dev/sdb - osd-reformat: 'yes' - source: cloud:trusty-liberty - to: - - '0' - ceph-radosgw: - annotations: - gui-x: '1000' - gui-y: '250' - charm: cs:trusty/ceph-radosgw-18 - num_units: 1 - options: - source: cloud:trusty-liberty - use-embedded-webserver: true - to: - - lxc:0 - cinder: - annotations: - gui-x: '750' - gui-y: '0' - charm: cs:trusty/cinder-31 - num_units: 1 - options: - block-device: None - glance-api-version: 2 - ha-mcastport: 5401 - openstack-origin: cloud:trusty-liberty - to: - - lxc:1 - cinder-ceph: - annotations: - gui-x: '750' - gui-y: '250' - charm: cs:trusty/cinder-ceph-14 - num_units: 0 - glance: - annotations: - gui-x: '250' - gui-y: '0' - charm: cs:trusty/glance-28 - num_units: 1 - options: - ha-mcastport: 5402 - openstack-origin: cloud:trusty-liberty - to: - - lxc:2 - keystone: - annotations: - gui-x: '500' - gui-y: '0' - charm: cs:trusty/keystone-31 - num_units: 1 - options: - admin-password: openstack - ha-mcastport: 5403 - openstack-origin: cloud:trusty-liberty - to: - - lxc:3 - mongodb: - annotations: - gui-x: '1287.9999389648438' - gui-y: '251.24996948242188' - charm: cs:trusty/mongodb-28 - num_units: 1 - to: - - lxc:1 - mysql: - annotations: - gui-x: '0' - gui-y: '250' - charm: cs:trusty/percona-cluster-31 - num_units: 1 - options: - max-connections: 20000 - to: - - lxc:0 - neutron-api: - annotations: - gui-x: '500' - gui-y: '500' - charm: cs:trusty/neutron-api-21 - num_units: 1 - options: - neutron-security-groups: true - openstack-origin: cloud:trusty-liberty - to: - - lxc:1 - neutron-gateway: - annotations: - gui-x: '0' - gui-y: '0' - charm: cs:trusty/neutron-gateway-7 - num_units: 1 - options: - ext-port: eth1 - openstack-origin: cloud:trusty-liberty - to: - - '0' - neutron-openvswitch: - annotations: - gui-x: '250' - gui-y: '500' - charm: cs:trusty/neutron-openvswitch-13 - num_units: 0 - nova-cloud-controller: - annotations: - gui-x: '0' - gui-y: '500' - charm: cs:trusty/nova-cloud-controller-64 - num_units: 1 - options: - network-manager: Neutron - openstack-origin: cloud:trusty-liberty - quantum-security-groups: 'yes' - to: - - lxc:2 - nova-compute: - annotations: - gui-x: '250' - gui-y: '250' - charm: cs:trusty/nova-compute-33 - num_units: 3 - options: - enable-live-migration: true - enable-resize: true - manage-neutron-plugin-legacy-mode: false - migration-auth-type: ssh - openstack-origin: cloud:trusty-liberty - to: - - '1' - - '2' - - '3' - ntp: - annotations: - gui-x: '1000' - gui-y: '0' - charm: cs:trusty/ntp-14 - num_units: 0 - openstack-dashboard: - annotations: - gui-x: '500' - gui-y: '-250' - charm: cs:trusty/openstack-dashboard-19 - num_units: 1 - options: - openstack-origin: cloud:trusty-liberty - to: - - lxc:3 - rabbitmq-server: - annotations: - gui-x: '500' - gui-y: '250' - charm: cs:trusty/rabbitmq-server-42 - num_units: 1 - options: - source: cloud:trusty-liberty - to: - - lxc:0 - diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/hull.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/hull.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/hull.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/hull.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,105 +0,0 @@ -package jujusvg - -import ( - "image" - "math" - "sort" -) - -// getPointOutside returns a point that is outside the hull of existing placed -// vertices so that an object can be placed on the canvas without overlapping -// others. -func getPointOutside(vertices []image.Point, padding image.Point) image.Point { - // Shortcut some easy solutions. - switch len(vertices) { - case 0: - return image.Point{0, 0} - case 1: - return image.Point{ - vertices[0].X + padding.X, - vertices[0].Y + padding.Y, - } - case 2: - return image.Point{ - int(math.Max(float64(vertices[0].X), float64(vertices[1].X))) + padding.X, - int(math.Max(float64(vertices[0].Y), float64(vertices[1].Y))) + padding.Y, - } - } - hull := convexHull(vertices) - // Find point that is the furthest to the right on the hull. - var rightmost image.Point - maxDistance := 0.0 - for _, vertex := range hull { - fromOrigin := line{p0: vertex, p1: image.Point{0, 0}} - distance := fromOrigin.length() - if math.Abs(distance) > maxDistance { - maxDistance = math.Abs(distance) - rightmost = vertex - } - } - return image.Point{ - rightmost.X + padding.X, - rightmost.Y + padding.Y, - } -} - -// vertexSet implements sort.Interface for image.Point, sorting first by X, then -// by Y -type vertexSet []image.Point - -func (vs vertexSet) Len() int { return len(vs) } -func (vs vertexSet) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } -func (vs vertexSet) Less(i, j int) bool { - if vs[i].X == vs[j].X { - return vs[i].Y < vs[j].Y - } - return vs[i].X < vs[j].X -} - -// convexHull takes a list of vertices and returns the set of vertices which -// make up the convex hull encapsulating all vertices on a plane. -func convexHull(vertices []image.Point) []image.Point { - // Simple cases can be shortcutted. - if len(vertices) == 0 { - return []image.Point{ - {0, 0}, - } - } - // For our purposes, we can assume that three vertices form a hull. - if len(vertices) < 4 { - return vertices - } - - sort.Sort(vertexSet(vertices)) - var lower, upper []image.Point - for _, vertex := range vertices { - for len(lower) >= 2 && cross(lower[len(lower)-2], lower[len(lower)-1], vertex) <= 0 { - lower = lower[:len(lower)-1] - } - lower = append(lower, vertex) - } - - for _, vertex := range reverse(vertices) { - for len(upper) >= 2 && cross(upper[len(upper)-2], upper[len(upper)-1], vertex) <= 0 { - upper = upper[:len(upper)-1] - } - upper = append(upper, vertex) - } - return append(lower[:len(lower)-1], upper[:len(upper)-1]...) -} - -// cross finds the 2D cross-product of OA and OB vectors. -// Returns a positive value if OAB makes a counter-clockwise turn, a negative -// value if OAB makes a clockwise turn, and zero if the points are collinear. -func cross(o, a, b image.Point) int { - return (a.X-o.X)*(b.Y-o.Y) - (a.Y-o.Y)*(b.X-o.X) -} - -// reverse reverses a slice of Points for use in finding the upper hull. -func reverse(vertices []image.Point) []image.Point { - for i := 0; i < len(vertices)/2; i++ { - opp := len(vertices) - (i + 1) - vertices[i], vertices[opp] = vertices[opp], vertices[i] - } - return vertices -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/hull_test.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/hull_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/hull_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/hull_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,77 +0,0 @@ -package jujusvg - -import ( - "image" - - gc "gopkg.in/check.v1" -) - -type HullSuite struct{} - -var _ = gc.Suite(&HullSuite{}) - -func (s *HullSuite) TestGetPointOutside(c *gc.C) { - var tests = []struct { - about string - vertices []image.Point - expected image.Point - }{ - { - about: "zero vertices", - vertices: []image.Point{}, - expected: image.Point{0, 0}, - }, - { - about: "one vertex", - vertices: []image.Point{{0, 0}}, - expected: image.Point{10, 10}, - }, - { - about: "two vertices", - vertices: []image.Point{{0, 0}, {10, 10}}, - expected: image.Point{20, 20}, - }, - { - about: "three vertices (convexHull fall through)", - vertices: []image.Point{{0, 0}, {0, 10}, {10, 0}}, - expected: image.Point{10, 20}, - }, - { - about: "four vertices", - vertices: []image.Point{{0, 0}, {0, 10}, {10, 0}, {10, 10}}, - expected: image.Point{20, 20}, - }, - } - for _, test := range tests { - c.Log(test.about) - c.Assert(getPointOutside(test.vertices, image.Point{10, 10}), gc.Equals, test.expected) - } -} - -func (s *HullSuite) TestConvexHull(c *gc.C) { - // Zero vertices - vertices := []image.Point{} - c.Assert(convexHull(vertices), gc.DeepEquals, []image.Point{{0, 0}}) - - // Identities - vertices = []image.Point{{1, 1}} - c.Assert(convexHull(vertices), gc.DeepEquals, vertices) - - vertices = []image.Point{{1, 1}, {2, 2}} - c.Assert(convexHull(vertices), gc.DeepEquals, vertices) - - vertices = []image.Point{{1, 1}, {2, 2}, {1, 2}} - c.Assert(convexHull(vertices), gc.DeepEquals, vertices) - - // > 3 vertices - vertices = []image.Point{} - for i := 0; i < 100; i++ { - vertices = append(vertices, image.Point{i / 10, i % 10}) - } - c.Assert(convexHull(vertices), gc.DeepEquals, []image.Point{ - {0, 0}, - {9, 0}, - {9, 9}, - {0, 9}, - }) -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/iconfetcher.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/iconfetcher.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/iconfetcher.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/iconfetcher.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,139 +0,0 @@ -package jujusvg - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "sync" - - "github.com/juju/utils/parallel" - "github.com/juju/xml" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v6-unstable" -) - -// An IconFetcher provides functionality for retrieving icons for the charms -// within a given bundle. The FetchIcons function accepts a bundle, and -// returns a map from charm paths to icon data. -type IconFetcher interface { - FetchIcons(*charm.BundleData) (map[string][]byte, error) -} - -// LinkFetcher fetches icons as links so that they are included within the SVG -// as remote resources using SVG tags. -type LinkFetcher struct { - // IconURL returns the URL of the entity for embedding - IconURL func(*charm.URL) string -} - -// FetchIcons generates the svg image tags given an appropriate URL, generating -// tags only for unique icons. -func (l *LinkFetcher) FetchIcons(b *charm.BundleData) (map[string][]byte, error) { - // Maintain a list of icons that have already been fetched. - alreadyFetched := make(map[string]bool) - - // Build the map of icons. - icons := make(map[string][]byte) - for _, applicationData := range b.Applications { - charmId, err := charm.ParseURL(applicationData.Charm) - if err != nil { - return nil, errgo.Notef(err, "cannot parse charm %q", applicationData.Charm) - } - path := charmId.Path() - - // Don't duplicate icons in the map. - if !alreadyFetched[path] { - alreadyFetched[path] = true - icons[path] = []byte(fmt.Sprintf(` - - - `, escapeString(l.IconURL(charmId)))) - } - } - return icons, nil -} - -// Wrap around xml.EscapeText to make it more string-friendly. -func escapeString(s string) string { - var buf bytes.Buffer - xml.EscapeText(&buf, []byte(s)) - return buf.String() -} - -// HTTPFetcher is an implementation of IconFetcher which retrieves charm -// icons from the web using the URL generated by IconURL on that charm. The -// HTTP Client used may be overridden by an instance of http.Client. The icons -// may optionally be fetched concurrently. -type HTTPFetcher struct { - // Concurrency specifies the number of GoRoutines to use when fetching - // icons. If it is not positive, 10 will be used. Setting this to 1 - // makes this method nominally synchronous. - Concurrency int - - // IconURL returns the URL from which to fetch the given entity's icon SVG. - IconURL func(*charm.URL) string - - // Client specifies what HTTP client to use; if it is not provided, - // http.DefaultClient will be used. - Client *http.Client -} - -// FetchIcons retrieves icon SVGs over HTTP. If specified in the struct, icons -// will be fetched concurrently. -func (h *HTTPFetcher) FetchIcons(b *charm.BundleData) (map[string][]byte, error) { - client := http.DefaultClient - if h.Client != nil { - client = h.Client - } - concurrency := h.Concurrency - if concurrency <= 0 { - concurrency = 10 - } - var iconsMu sync.Mutex // Guards icons. - icons := make(map[string][]byte) - alreadyFetched := make(map[string]bool) - run := parallel.NewRun(concurrency) - for _, applicationData := range b.Applications { - charmId, err := charm.ParseURL(applicationData.Charm) - if err != nil { - return nil, errgo.Notef(err, "cannot parse charm %q", applicationData.Charm) - } - path := charmId.Path() - if alreadyFetched[path] { - continue - } - alreadyFetched[path] = true - run.Do(func() error { - icon, err := h.fetchIcon(h.IconURL(charmId), client) - if err != nil { - return err - } - iconsMu.Lock() - defer iconsMu.Unlock() - icons[path] = icon - return nil - }) - } - if err := run.Wait(); err != nil { - return nil, err - } - return icons, nil -} - -// fetchIcon retrieves a single icon svg over HTTP. -func (h *HTTPFetcher) fetchIcon(url string, client *http.Client) ([]byte, error) { - resp, err := client.Get(url) - if err != nil { - return nil, errgo.Notef(err, "HTTP error fetching %s: %v", url, err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return nil, errgo.Newf("cannot retrieve icon from %s: %s", url, resp.Status) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, errgo.Notef(err, "could not read icon data from url %s", url) - } - return body, nil -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/iconfetcher_test.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/iconfetcher_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/iconfetcher_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/iconfetcher_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -package jujusvg - -import ( - "fmt" - "net/http" - "net/http/httptest" - "strings" - - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v6-unstable" -) - -type IconFetcherSuite struct{} - -var _ = gc.Suite(&IconFetcherSuite{}) - -func (s *IconFetcherSuite) TestLinkFetchIcons(c *gc.C) { - tests := map[string][]byte{ - "~charming-devs/precise/elasticsearch-2": []byte(` - - - `), - "~juju-jitsu/precise/charmworld-58": []byte(` - - - `), - "precise/mongodb-21": []byte(` - - - `), - } - iconURL := func(ref *charm.URL) string { - return "/" + ref.Path() + ".svg" - } - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - err = b.Verify(nil, nil) - c.Assert(err, gc.IsNil) - fetcher := LinkFetcher{ - IconURL: iconURL, - } - iconMap, err := fetcher.FetchIcons(b) - c.Assert(err, gc.IsNil) - for charm, link := range tests { - assertXMLEqual(c, []byte(iconMap[charm]), []byte(link)) - } -} - -func (s *IconFetcherSuite) TestHTTPFetchIcons(c *gc.C) { - fetchCount := 0 - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fetchCount++ - fmt.Fprintln(w, fmt.Sprintf("%s", r.URL.Path)) - })) - defer ts.Close() - - tsIconURL := func(ref *charm.URL) string { - return ts.URL + "/" + ref.Path() + ".svg" - } - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - err = b.Verify(nil, nil) - c.Assert(err, gc.IsNil) - // Only one copy of precise/mongodb-21 - b.Applications["duplicateApplication"] = &charm.ApplicationSpec{ - Charm: "cs:precise/mongodb-21", - NumUnits: 1, - } - fetcher := HTTPFetcher{ - Concurrency: 1, - IconURL: tsIconURL, - } - iconMap, err := fetcher.FetchIcons(b) - c.Assert(err, gc.IsNil) - c.Assert(iconMap, gc.DeepEquals, map[string][]byte{ - "~charming-devs/precise/elasticsearch-2": []byte("/~charming-devs/precise/elasticsearch-2.svg\n"), - "~juju-jitsu/precise/charmworld-58": []byte("/~juju-jitsu/precise/charmworld-58.svg\n"), - "precise/mongodb-21": []byte("/precise/mongodb-21.svg\n"), - }) - - fetcher.Concurrency = 10 - iconMap, err = fetcher.FetchIcons(b) - c.Assert(err, gc.IsNil) - c.Assert(iconMap, gc.DeepEquals, map[string][]byte{ - "~charming-devs/precise/elasticsearch-2": []byte("/~charming-devs/precise/elasticsearch-2.svg\n"), - "~juju-jitsu/precise/charmworld-58": []byte("/~juju-jitsu/precise/charmworld-58.svg\n"), - "precise/mongodb-21": []byte("/precise/mongodb-21.svg\n"), - }) - c.Assert(fetchCount, gc.Equals, 6) -} - -func (s *IconFetcherSuite) TestHTTPBadIconURL(c *gc.C) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "bad-wolf", http.StatusForbidden) - return - })) - defer ts.Close() - - tsIconURL := func(ref *charm.URL) string { - return ts.URL + "/" + ref.Path() + ".svg" - } - - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - err = b.Verify(nil, nil) - c.Assert(err, gc.IsNil) - fetcher := HTTPFetcher{ - Concurrency: 1, - IconURL: tsIconURL, - } - iconMap, err := fetcher.FetchIcons(b) - c.Assert(err, gc.ErrorMatches, fmt.Sprintf("cannot retrieve icon from %s.+\\.svg: 403 Forbidden.*", ts.URL)) - c.Assert(iconMap, gc.IsNil) - - fetcher.Concurrency = 10 - iconMap, err = fetcher.FetchIcons(b) - c.Assert(err, gc.ErrorMatches, fmt.Sprintf("cannot retrieve icon from %s.+\\.svg: 403 Forbidden.*", ts.URL)) - c.Assert(iconMap, gc.IsNil) -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/jujusvg.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/jujusvg.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/jujusvg.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/jujusvg.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -package jujusvg // import "gopkg.in/juju/jujusvg.v1" - -import ( - "fmt" - "image" - "math" - "sort" - "strconv" - "strings" - - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v6-unstable" -) - -// NewFromBundle returns a new Canvas that can be used -// to generate a graphical representation of the given bundle -// data. The iconURL function is used to generate a URL -// that refers to an SVG for the supplied charm URL. -// If fetcher is non-nil, it will be used to fetch icon -// contents for any icons embedded within the charm, -// allowing the generated bundle to be self-contained. If fetcher -// is nil, a default fetcher which refers to icons by their -// URLs as svg tags will be used. -func NewFromBundle(b *charm.BundleData, iconURL func(*charm.URL) string, fetcher IconFetcher) (*Canvas, error) { - if fetcher == nil { - fetcher = &LinkFetcher{ - IconURL: iconURL, - } - } - iconMap, err := fetcher.FetchIcons(b) - if err != nil { - return nil, err - } - - var canvas Canvas - - // Verify the bundle to make sure that all the invariants - // that we depend on below actually hold true. - if err := b.Verify(nil, nil); err != nil { - return nil, errgo.Notef(err, "cannot verify bundle") - } - // Go through all applications in alphabetical order so that - // we get consistent results. - applicationNames := make([]string, 0, len(b.Applications)) - for name := range b.Applications { - applicationNames = append(applicationNames, name) - } - sort.Strings(applicationNames) - applications := make(map[string]*application) - applicationsNeedingPlacement := make(map[string]bool) - for _, name := range applicationNames { - applicationData := b.Applications[name] - x, xerr := strconv.ParseFloat(applicationData.Annotations["gui-x"], 64) - y, yerr := strconv.ParseFloat(applicationData.Annotations["gui-y"], 64) - if xerr != nil || yerr != nil { - if applicationData.Annotations["gui-x"] == "" && applicationData.Annotations["gui-y"] == "" { - applicationsNeedingPlacement[name] = true - x = 0 - y = 0 - } else { - return nil, errgo.Newf("application %q does not have a valid position", name) - } - } - charmID, err := charm.ParseURL(applicationData.Charm) - if err != nil { - // cannot actually happen, as we've verified it. - return nil, errgo.Notef(err, "cannot parse charm %q", applicationData.Charm) - } - icon := iconMap[charmID.Path()] - svc := &application{ - name: name, - charmPath: charmID.Path(), - point: image.Point{int(x), int(y)}, - iconUrl: iconURL(charmID), - iconSrc: icon, - } - applications[name] = svc - } - padding := image.Point{int(math.Floor(applicationBlockSize * 1.5)), int(math.Floor(applicationBlockSize * 0.5))} - for name := range applicationsNeedingPlacement { - vertices := []image.Point{} - for n, svc := range applications { - if !applicationsNeedingPlacement[n] { - vertices = append(vertices, svc.point) - } - } - applications[name].point = getPointOutside(vertices, padding) - applicationsNeedingPlacement[name] = false - } - for _, name := range applicationNames { - canvas.addApplication(applications[name]) - } - for _, relation := range b.Relations { - canvas.addRelation(&applicationRelation{ - name: fmt.Sprintf("%s %s", relation[0], relation[1]), - applicationA: applications[strings.Split(relation[0], ":")[0]], - applicationB: applications[strings.Split(relation[1], ":")[0]], - }) - } - return &canvas, nil -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/jujusvg_test.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/jujusvg_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/jujusvg_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/jujusvg_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,416 +0,0 @@ -package jujusvg - -import ( - "bytes" - "fmt" - "net/http" - "net/http/httptest" - "strings" - "testing" - - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v6-unstable" -) - -func Test(t *testing.T) { gc.TestingT(t) } - -type newSuite struct{} - -var _ = gc.Suite(&newSuite{}) - -var bundle = ` -applications: - mongodb: - charm: "cs:precise/mongodb-21" - num_units: 1 - annotations: - "gui-x": "940.5" - "gui-y": "388.7698359714502" - constraints: "mem=2G cpu-cores=1" - elasticsearch: - charm: "cs:~charming-devs/precise/elasticsearch-2" - num_units: 1 - annotations: - "gui-x": "490.5" - "gui-y": "369.7698359714502" - constraints: "mem=2G cpu-cores=1" - charmworld: - charm: "cs:~juju-jitsu/precise/charmworld-58" - num_units: 1 - expose: true - annotations: - "gui-x": "813.5" - "gui-y": "112.23016402854975" - options: - charm_import_limit: -1 - source: "lp:~bac/charmworld/ingest-local-charms" - revno: 511 -relations: - - - "charmworld:essearch" - - "elasticsearch:essearch" - - - "charmworld:database" - - "mongodb:database" -series: precise -` - -func iconURL(ref *charm.URL) string { - return "http://0.1.2.3/" + ref.Path() + ".svg" -} - -type emptyFetcher struct{} - -func (f *emptyFetcher) FetchIcons(*charm.BundleData) (map[string][]byte, error) { - return nil, nil -} - -type errFetcher string - -func (f *errFetcher) FetchIcons(*charm.BundleData) (map[string][]byte, error) { - return nil, fmt.Errorf("%s", *f) -} - -func (s *newSuite) TestNewFromBundle(c *gc.C) { - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - err = b.Verify(nil, nil) - c.Assert(err, gc.IsNil) - - cvs, err := NewFromBundle(b, iconURL, nil) - c.Assert(err, gc.IsNil) - - var buf bytes.Buffer - cvs.Marshal(&buf) - c.Logf("%s", buf.String()) - assertXMLEqual(c, buf.Bytes(), []byte(` - - - - - - - - - - - - - - - - - - - - -charmworld:essearch elasticsearch:essearch - - - - - - -charmworld:database mongodb:database - - - - - - - - -charmworld - - - -charmworld - - -elasticsearch - - - -elasticsearch - - -mongodb - - - -mongodb - - - -`)) -} - -func (s *newSuite) TestNewFromBundleWithUnplacedApplication(c *gc.C) { - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - err = b.Verify(nil, nil) - c.Assert(err, gc.IsNil) - b.Applications["charmworld"].Annotations["gui-x"] = "" - b.Applications["charmworld"].Annotations["gui-y"] = "" - - cvs, err := NewFromBundle(b, iconURL, nil) - c.Assert(err, gc.IsNil) - - var buf bytes.Buffer - cvs.Marshal(&buf) - c.Logf("%s", buf.String()) - assertXMLEqual(c, buf.Bytes(), []byte(` - - - - - - - - - - - - - - - - - - - - -charmworld:essearch elasticsearch:essearch - - - - - - -charmworld:database mongodb:database - - - - - - - - -charmworld - - - -charmworld - - -elasticsearch - - - -elasticsearch - - -mongodb - - - -mongodb - - - -`)) -} - -func (s *newSuite) TestWithFetcher(c *gc.C) { - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - err = b.Verify(nil, nil) - c.Assert(err, gc.IsNil) - - cvs, err := NewFromBundle(b, iconURL, new(emptyFetcher)) - c.Assert(err, gc.IsNil) - - var buf bytes.Buffer - cvs.Marshal(&buf) - c.Logf("%s", buf.String()) - assertXMLEqual(c, buf.Bytes(), []byte(` - - - - - - - - - - - - - - -charmworld:essearch elasticsearch:essearch - - - - - - -charmworld:database mongodb:database - - - - - - - - -charmworld - - - -charmworld - - -elasticsearch - - - -elasticsearch - - -mongodb - - - -mongodb - - - -`)) -} - -func (s *newSuite) TestDefaultHTTPFetcher(c *gc.C) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "") - })) - defer ts.Close() - - tsIconUrl := func(ref *charm.URL) string { - return ts.URL + "/" + ref.Path() + ".svg" - } - - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - err = b.Verify(nil, nil) - c.Assert(err, gc.IsNil) - - cvs, err := NewFromBundle(b, tsIconUrl, &HTTPFetcher{IconURL: tsIconUrl}) - c.Assert(err, gc.IsNil) - - var buf bytes.Buffer - cvs.Marshal(&buf) - c.Logf("%s", buf.String()) - assertXMLEqual(c, buf.Bytes(), []byte(` - - - - - - - - - - - - - - -charmworld:essearch elasticsearch:essearch - - - - - - -charmworld:database mongodb:database - - - - - - - - -charmworld - - - -charmworld - - -elasticsearch - - - -elasticsearch - - -mongodb - - - -mongodb - - - -`)) - -} - -func (s *newSuite) TestFetcherError(c *gc.C) { - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - err = b.Verify(nil, nil) - c.Assert(err, gc.IsNil) - - ef := errFetcher("bad-wolf") - _, err = NewFromBundle(b, iconURL, &ef) - c.Assert(err, gc.ErrorMatches, "bad-wolf") -} - -func (s *newSuite) TestWithBadBundle(c *gc.C) { - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - b.Relations[0][0] = "evil-unknown-application" - cvs, err := NewFromBundle(b, iconURL, nil) - c.Assert(err, gc.ErrorMatches, "cannot verify bundle: .*") - c.Assert(cvs, gc.IsNil) -} - -func (s *newSuite) TestWithBadPosition(c *gc.C) { - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - - b.Applications["charmworld"].Annotations["gui-x"] = "bad" - cvs, err := NewFromBundle(b, iconURL, nil) - c.Assert(err, gc.ErrorMatches, `application "charmworld" does not have a valid position`) - c.Assert(cvs, gc.IsNil) - - b, err = charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - - b.Applications["charmworld"].Annotations["gui-y"] = "bad" - cvs, err = NewFromBundle(b, iconURL, nil) - c.Assert(err, gc.ErrorMatches, `application "charmworld" does not have a valid position`) - c.Assert(cvs, gc.IsNil) -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/LICENSE juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/LICENSE --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/LICENSE 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/LICENSE 1970-01-01 00:00:00.000000000 +0000 @@ -1,191 +0,0 @@ -All files in this repository are licensed as follows. If you contribute -to this repository, it is assumed that you license your contribution -under the same license unless you state otherwise. - -All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/Makefile juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/Makefile --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/Makefile 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/Makefile 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -ifndef GOPATH - $(warning You need to set up a GOPATH.) -endif - -PROJECT := gopkg.in/juju/jujusvg.v1 -PROJECT_DIR := $(shell go list -e -f '{{.Dir}}' $(PROJECT)) - -help: - @echo "Available targets:" - @echo " deps - fetch all dependencies" - @echo " build - build the project" - @echo " check - run tests" - @echo " install - install the library in your GOPATH" - @echo " clean - clean the project" - -# Start of GOPATH-dependent targets. Some targets only make sense - -# and will only work - when this tree is found on the GOPATH. -ifeq ($(CURDIR),$(PROJECT_DIR)) - -deps: - go get -v -t $(PROJECT)/... - -build: - go build $(PROJECT)/... - -check: - go test $(PROJECT)/... - -install: - go install $(INSTALL_FLAGS) -v $(PROJECT)/... - -clean: - go clean $(PROJECT)/... - -else - -deps: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -build: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -check: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -install: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -clean: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -endif -# End of GOPATH-dependent targets. - -.PHONY: help deps build check install clean diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/README.md juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/README.md --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/README.md 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,70 +0,0 @@ -jujusvg -======= - -A library for generating SVGs from Juju bundles and environments. - -Installation ------------- - -To start using jujusvg, first ensure you have a valid Go environment, then run -the following: - - go get gopkg.in/juju/jujusvg.v1 - -Dependencies ------------- - -The project uses godeps (https://launchpad.net/godeps) to manage Go -dependencies. To install this, run: - - - go get launchpad.net/godeps - -After installing it, you can update the dependencies to the revision specified -in the `dependencies.tsv` file with the following: - - make deps - -Use `make create-deps` to update the dependencies file. - -Usage ------ - -Given a Juju bundle, you can convert this to an SVG programatically. This -generates a simple SVG representation of a bundle or bundles that can then be -included in a webpage as a visualization. - -For an example of how to use this library, please see `examples/generatesvg.go`. -You can run this example like: - - go run generatesvg.go bundle.yaml > bundle.svg - -The examples directory also includes three sample bundles that you can play -around with, or you can use the [Juju GUI](https://demo.jujucharms.com) to -generate your own bundles. - -Design-related assets ---------------------- - -Some assets are specified based on assets provided by the design team. These -assets are specified in the defs section of the generated SVG, and can thus -be found in the Canvas.definition() method. These assets are, except where -indicated, embedded in a go file assigned to an exported variable, so that they -may be used like so: - -```go -import ( - "io" - - "gopkg.in/juju/jujusvg.v1/assets" -) - -// ... - -io.WriteString(canvas.Writer, assets.AssetToWrite) -``` - -Current assets in use: - -* ~~The service block~~ *the service block has been deprecated and is now handled with SVGo* -* The relation health indicator diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/svg.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/svg.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/svg.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/svg.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,98 +0,0 @@ -package jujusvg - -import ( - "io" - - "github.com/juju/xml" - "gopkg.in/errgo.v1" -) - -const svgNamespace = "http://www.w3.org/2000/svg" - -// Process an icon SVG file from a reader, removing anything surrounding -// the tags, which would be invalid in this context (such as -// decls, directives, etc), writing out to a writer. In -// addition, loosely check that the icon is a valid SVG file. The id -// argument provides a unique identifier for the icon SVG so that it can -// be referenced within the bundle diagram. If an id attribute on the SVG -// tag already exists, it will be replaced with this argument. -func processIcon(r io.Reader, w io.Writer, id string) error { - dec := xml.NewDecoder(r) - dec.DefaultSpace = svgNamespace - - enc := xml.NewEncoder(w) - - svgStartFound := false - svgEndFound := false - depth := 0 - for depth < 1 { - tok, err := dec.Token() - if err != nil { - if err == io.EOF { - break - } - return errgo.Notef(err, "cannot get token") - } - tag, ok := tok.(xml.StartElement) - if ok && tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { - svgStartFound = true - depth++ - tag.Attr = setXMLAttr(tag.Attr, xml.Name{ - Local: "id", - }, id) - if err := enc.EncodeToken(tag); err != nil { - return errgo.Notef(err, "cannot encode token %#v", tag) - } - } - } - for depth > 0 { - tok, err := dec.Token() - if err != nil { - if err == io.EOF { - break - } - return errgo.Notef(err, "cannot get token") - } - switch tag := tok.(type) { - case xml.StartElement: - if tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { - depth++ - } - case xml.EndElement: - if tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { - depth-- - if depth == 0 { - svgEndFound = true - } - } - } - if err := enc.EncodeToken(tok); err != nil { - return errgo.Notef(err, "cannot encode token %#v", tok) - } - } - - if !svgStartFound || !svgEndFound { - return errgo.Newf("icon does not appear to be a valid SVG") - } - - if err := enc.Flush(); err != nil { - return err - } - - return nil -} - -// setXMLAttr returns the given attributes with the given attribute name set to -// val, adding an attribute if necessary. -func setXMLAttr(attrs []xml.Attr, name xml.Name, val string) []xml.Attr { - for i := range attrs { - if attrs[i].Name == name { - attrs[i].Value = val - return attrs - } - } - return append(attrs, xml.Attr{ - Name: name, - Value: val, - }) -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/svg_test.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/svg_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v1/svg_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v1/svg_test.go 1970-01-01 00:00:00.000000000 +0000 @@ -1,217 +0,0 @@ -package jujusvg - -import ( - "bytes" - "fmt" - - "github.com/juju/xml" - gc "gopkg.in/check.v1" -) - -type SVGSuite struct{} - -var _ = gc.Suite(&SVGSuite{}) - -func (s *SVGSuite) TestProcessIcon(c *gc.C) { - tests := []struct { - about string - icon string - expected string - err string - }{ - { - about: "Nothing stripped", - icon: ` - - - - `, - expected: ` - - - `, - }, - { - about: "SVG inside an SVG", - icon: ` - - - - - - - `, - expected: ` - - - - - - `, - }, - { - about: "ProcInst at start stripped", - icon: ` - - - - - `, - expected: ` - - - `, - }, - { - about: "Directive at start stripped", - icon: ` - - - - - `, - expected: ` - - - `, - }, - { - about: "ProcInst at end stripped", - icon: ` - - - - - `, - expected: ` - - - `, - }, - { - about: "Directive at end stripped", - icon: ` - - - - - `, - expected: ` - - - `, - }, - { - about: "ProcInsts/Directives inside svg left in place", - icon: ` - - - - - - `, - expected: ` - - - - - `, - }, - { - about: "Not an SVG", - icon: ` - - bad-wolf - - `, - err: "icon does not appear to be a valid SVG", - }, - } - for i, test := range tests { - in := bytes.NewBuffer([]byte(test.icon)) - out := bytes.Buffer{} - err := processIcon(in, &out, fmt.Sprintf("test-%d", i)) - if test.err != "" { - c.Assert(err, gc.ErrorMatches, test.err) - } else { - c.Assert(err, gc.IsNil) - assertXMLEqual(c, out.Bytes(), []byte(test.expected)) - } - } -} - -func (s *SVGSuite) TestSetXMLAttr(c *gc.C) { - // Attribute is added. - expected := []xml.Attr{ - { - Name: xml.Name{ - Local: "id", - }, - Value: "foo", - }, - } - - result := setXMLAttr([]xml.Attr{}, xml.Name{ - Local: "id", - }, "foo") - c.Assert(result, gc.DeepEquals, expected) - - // Attribute is changed. - result = setXMLAttr([]xml.Attr{ - { - Name: xml.Name{ - Local: "id", - }, - Value: "bar", - }, - }, xml.Name{ - Local: "id", - }, "foo") - c.Assert(result, gc.DeepEquals, expected) - - // Attribute is changed, existing attributes unchanged. - expected = []xml.Attr{ - { - Name: xml.Name{ - Local: "class", - }, - Value: "bar", - }, - { - Name: xml.Name{ - Local: "id", - }, - Value: "foo", - }, - } - result = setXMLAttr([]xml.Attr{ - { - Name: xml.Name{ - Local: "class", - }, - Value: "bar", - }, - { - Name: xml.Name{ - Local: "id", - }, - Value: "bar", - }, - }, xml.Name{ - Local: "id", - }, "foo") - c.Assert(result, gc.DeepEquals, expected) - - // Attribute is added, existing attributes unchanged. - result = setXMLAttr([]xml.Attr{ - { - Name: xml.Name{ - Local: "class", - }, - Value: "bar", - }, - }, xml.Name{ - Local: "id", - }, "foo") - c.Assert(result, gc.DeepEquals, expected) -} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/assets/relation-icon-healthy.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/assets/relation-icon-healthy.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/assets/relation-icon-healthy.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/assets/relation-icon-healthy.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,5 @@ +package assets + +var RelationIconHealthy = ` + +` diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/assets/relation-icon-healthy.svg juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/assets/relation-icon-healthy.svg --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/assets/relation-icon-healthy.svg 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/assets/relation-icon-healthy.svg 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,2 @@ + + diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/assets/service_module.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/assets/service_module.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/assets/service_module.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/assets/service_module.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,45 @@ +package assets + +// This is the SVG for the service module block used in the bundle diagram. +// Note that there MUST NOT be anything (processing instructions, xml +// declarations, or directives) before the tag. +var ServiceModule = ` + + + + + + + + + + + + + + + + + + + +` diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/assets/service_module.svg juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/assets/service_module.svg --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/assets/service_module.svg 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/assets/service_module.svg 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/canvas.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/canvas.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/canvas.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/canvas.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,299 @@ +package jujusvg + +import ( + "bytes" + "fmt" + "image" + "io" + "math" + + svg "github.com/ajstarks/svgo" + + "gopkg.in/juju/jujusvg.v2/assets" +) + +const ( + iconSize = 96 + applicationBlockSize = 180 + healthCircleRadius = 8 + relationLineWidth = 1 + maxInt = int(^uint(0) >> 1) + minInt = -(maxInt - 1) + maxHeight = 450 + maxWidth = 1000 + + fontColor = "#505050" + relationColor = "#a7a7a7" +) + +// Canvas holds the parsed form of a bundle or model. +type Canvas struct { + applications []*application + relations []*applicationRelation + iconsRendered map[string]bool + iconIds map[string]string +} + +// application represents a application deployed to a model and contains the +// point of the top-left corner of the icon, icon URL, and additional metadata. +type application struct { + name string + charmPath string + iconUrl string + iconSrc []byte + point image.Point +} + +// applicationRelation represents a relation created between two applications. +type applicationRelation struct { + name string + applicationA *application + applicationB *application +} + +// line represents a line segment with two endpoints. +type line struct { + p0, p1 image.Point +} + +// definition creates any necessary defs that can be used later in the SVG. +func (s *application) definition(canvas *svg.SVG, iconsRendered map[string]bool, iconIds map[string]string) error { + if len(s.iconSrc) == 0 || iconsRendered[s.charmPath] { + return nil + } + iconsRendered[s.charmPath] = true + iconIds[s.charmPath] = fmt.Sprintf("icon-%d", len(iconsRendered)) + + // Temporary solution: + iconBuf := bytes.NewBuffer(s.iconSrc) + return processIcon(iconBuf, canvas.Writer, iconIds[s.charmPath]) +} + +// usage creates any necessary tags for actually using the application in the SVG. +func (s *application) usage(canvas *svg.SVG, iconIds map[string]string) { + canvas.Group(fmt.Sprintf(`transform="translate(%d,%d)"`, s.point.X, s.point.Y)) + defer canvas.Gend() + canvas.Title(s.name) + canvas.Circle( + applicationBlockSize/2, + applicationBlockSize/2, + applicationBlockSize/2, + `class="application-block" fill="#f5f5f5" stroke="#888" stroke-width="1"`) + if len(s.iconSrc) > 0 { + canvas.Use( + 0, + 0, + "#"+iconIds[s.charmPath], + fmt.Sprintf(`transform="translate(%d,%d)" width="%d" height="%d" clip-path="url(#clip-mask)"`, applicationBlockSize/2-iconSize/2, applicationBlockSize/2-iconSize/2, iconSize, iconSize), + ) + } else { + canvas.Image( + applicationBlockSize/2-iconSize/2, + applicationBlockSize/2-iconSize/2, + iconSize, + iconSize, + s.iconUrl, + `clip-path="url(#clip-mask)"`, + ) + } + name := s.name + if len(name) > 20 { + name = fmt.Sprintf("%s...", name[:17]) + } + canvas.Rect( + 0, + applicationBlockSize-45, + applicationBlockSize, + 32, + `rx="2" ry="2" fill="rgba(220, 220, 220, 0.8)"`) + canvas.Text( + applicationBlockSize/2, + applicationBlockSize-23, + name, + `text-anchor="middle" style="font-weight:200"`) +} + +// definition creates any necessary defs that can be used later in the SVG. +func (r *applicationRelation) definition(canvas *svg.SVG) { +} + +// usage creates any necessary tags for actually using the relation in the SVG. +func (r *applicationRelation) usage(canvas *svg.SVG) { + canvas.Group() + defer canvas.Gend() + canvas.Title(r.name) + l := line{ + p0: r.applicationA.point.Add(point(applicationBlockSize/2, applicationBlockSize/2)), + p1: r.applicationB.point.Add(point(applicationBlockSize/2, applicationBlockSize/2)), + } + canvas.Line( + l.p0.X, + l.p0.Y, + l.p1.X, + l.p1.Y, + fmt.Sprintf(`stroke=%q`, relationColor), + fmt.Sprintf(`stroke-width="%dpx"`, relationLineWidth), + fmt.Sprintf(`stroke-dasharray=%q`, strokeDashArray(l)), + ) + mid := l.p0.Add(l.p1).Div(2).Sub(point(healthCircleRadius, healthCircleRadius)) + canvas.Use(mid.X, mid.Y, "#healthCircle") + + deg := math.Atan2(float64(l.p0.Y-l.p1.Y), float64(l.p0.X-l.p1.X)) + canvas.Circle( + int(float64(l.p0.X)-math.Cos(deg)*(applicationBlockSize/2)), + int(float64(l.p0.Y)-math.Sin(deg)*(applicationBlockSize/2)), + 4, + fmt.Sprintf(`fill=%q`, relationColor)) + canvas.Circle( + int(float64(l.p1.X)+math.Cos(deg)*(applicationBlockSize/2)), + int(float64(l.p1.Y)+math.Sin(deg)*(applicationBlockSize/2)), + 4, + fmt.Sprintf(`fill=%q`, relationColor)) +} + +// strokeDashArray generates the stroke-dasharray attribute content so that +// the relation health indicator is placed in an empty space. +func strokeDashArray(l line) string { + return fmt.Sprintf("%.2f, %d", l.length()/2-healthCircleRadius, healthCircleRadius*2) +} + +// length calculates the length of a line. +func (l *line) length() float64 { + dp := l.p0.Sub(l.p1) + return math.Sqrt(square(float64(dp.X)) + square(float64(dp.Y))) +} + +// addApplication adds a new application to the canvas. +func (c *Canvas) addApplication(s *application) { + c.applications = append(c.applications, s) +} + +// addRelation adds a new relation to the canvas. +func (c *Canvas) addRelation(r *applicationRelation) { + c.relations = append(c.relations, r) +} + +// layout adjusts all items so that they are positioned appropriately, +// and returns the overall size of the canvas. +func (c *Canvas) layout() (int, int) { + minWidth := maxInt + minHeight := maxInt + maxWidth := minInt + maxHeight := minInt + + for _, application := range c.applications { + if application.point.X < minWidth { + minWidth = application.point.X + } + if application.point.Y < minHeight { + minHeight = application.point.Y + } + if application.point.X > maxWidth { + maxWidth = application.point.X + } + if application.point.Y > maxHeight { + maxHeight = application.point.Y + } + } + for _, application := range c.applications { + application.point = application.point.Sub(point(minWidth, minHeight)) + } + return abs(maxWidth-minWidth) + applicationBlockSize + 1, + abs(maxHeight-minHeight) + applicationBlockSize + 1 +} + +func (c *Canvas) definition(canvas *svg.SVG) { + canvas.Def() + defer canvas.DefEnd() + + // Relation health circle. + canvas.Group(`id="healthCircle"`, + `transform="scale(1.1)"`) + io.WriteString(canvas.Writer, assets.RelationIconHealthy) + canvas.Gend() + + // Application and relation specific defs. + for _, relation := range c.relations { + relation.definition(canvas) + } + for _, application := range c.applications { + application.definition(canvas, c.iconsRendered, c.iconIds) + } +} + +func (c *Canvas) relationsGroup(canvas *svg.SVG) { + canvas.Gid("relations") + defer canvas.Gend() + for _, relation := range c.relations { + relation.usage(canvas) + } +} + +func (c *Canvas) applicationsGroup(canvas *svg.SVG) { + canvas.Gid("applications") + defer canvas.Gend() + for _, application := range c.applications { + application.usage(canvas, c.iconIds) + } +} + +func (c *Canvas) iconClipPath(canvas *svg.SVG) { + canvas.Circle( + applicationBlockSize/2-iconSize/2+5, // for these two, add an offset to help + applicationBlockSize/2-iconSize/2+7, // hide the embossed border. + applicationBlockSize/4, + `id="application-icon-mask" fill="none"`) + canvas.ClipPath(`id="clip-mask"`) + defer canvas.ClipEnd() + canvas.Use( + 0, + 0, + `#application-icon-mask`) +} + +// Marshal renders the SVG to the given io.Writer. +func (c *Canvas) Marshal(w io.Writer) { + // Initialize maps for application icons, which are used both in definition + // and use methods for applications. + c.iconsRendered = make(map[string]bool) + c.iconIds = make(map[string]string) + + // TODO check write errors and return an error from + // Marshal if the write fails. The svg package does not + // itself check or return write errors; a possible work-around + // is to wrap the writer in a custom writer that panics + // on error, and catch the panic here. + width, height := c.layout() + + canvas := svg.New(w) + canvas.Start( + width, + height, + fmt.Sprintf(`style="font-family:Ubuntu, sans-serif;" viewBox="0 0 %d %d"`, + width, height), + ) + defer canvas.End() + c.definition(canvas) + c.iconClipPath(canvas) + c.relationsGroup(canvas) + c.applicationsGroup(canvas) +} + +// abs returns the absolute value of a number. +func abs(x int) int { + if x < 0 { + return -x + } else { + return x + } +} + +// square multiplies a number by itself. +func square(x float64) float64 { + return x * x +} + +// point generates an image.Point given its coordinates. +func point(x, y int) image.Point { + return image.Point{x, y} +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/canvas_test.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/canvas_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/canvas_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/canvas_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,299 @@ +package jujusvg + +import ( + "bytes" + "encoding/xml" + "image" + "io" + + "github.com/ajstarks/svgo" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/jujusvg.v2/assets" +) + +type CanvasSuite struct{} + +var _ = gc.Suite(&CanvasSuite{}) + +func (s *CanvasSuite) TestApplicationRender(c *gc.C) { + // Ensure that the Application's definition and usage methods output the + // proper SVG elements. + var tests = []struct { + about string + application application + expected string + }{ + { + about: "Application without iconSrc, no def created", + application: application{ + name: "foo", + point: image.Point{ + X: 0, + Y: 0, + }, + iconUrl: "foo", + }, + expected: ` +foo + + + +foo + +`, + }, + { + about: "Application with iconSrc", + application: application{ + name: "bar", + charmPath: "bar", + point: image.Point{ + X: 0, + Y: 0, + }, + iconSrc: []byte("bar"), + }, + expected: `bar +bar + + + +bar + +`, + }, + { + about: "Application with already def'd icon", + application: application{ + name: "baz", + charmPath: "bar", + point: image.Point{ + X: 0, + Y: 0, + }, + iconSrc: []byte("bar"), + }, + expected: ` +baz + + + +baz + +`, + }, + } + // Maintain our list of rendered icons outside the loop. + iconsRendered := make(map[string]bool) + iconIds := make(map[string]string) + for _, test := range tests { + var buf bytes.Buffer + svg := svg.New(&buf) + test.application.definition(svg, iconsRendered, iconIds) + test.application.usage(svg, iconIds) + c.Log(test.about) + c.Log(buf.String()) + c.Assert(buf.String(), gc.Equals, test.expected) + } +} + +func (s *CanvasSuite) TestRelationRender(c *gc.C) { + // Ensure that the Relation's definition and usage methods output the + // proper SVG elements. + var buf bytes.Buffer + svg := svg.New(&buf) + relation := applicationRelation{ + name: "foo", + applicationA: &application{ + point: image.Point{ + X: 0, + Y: 0, + }, + }, + applicationB: &application{ + point: image.Point{ + X: 100, + Y: 100, + }, + }, + } + relation.definition(svg) + relation.usage(svg) + c.Assert(buf.String(), gc.Equals, + ` +foo + + + + + +`) +} + +func (s *CanvasSuite) TestIconClipPath(c *gc.C) { + // Ensure that the icon ClipPath returns the correctly sizes clipping Circle + var buf bytes.Buffer + svg := svg.New(&buf) + canvas := Canvas{} + canvas.iconClipPath(svg) + c.Assert(buf.String(), gc.Equals, + ` + + +`) +} + +func (s *CanvasSuite) TestLayout(c *gc.C) { + // Ensure that the SVG is sized exactly around the positioned applications. + canvas := Canvas{} + canvas.addApplication(&application{ + name: "application1", + point: image.Point{ + X: 0, + Y: 0, + }, + }) + canvas.addApplication(&application{ + name: "application2", + point: image.Point{ + X: 100, + Y: 100, + }, + }) + width, height := canvas.layout() + c.Assert(width, gc.Equals, 281) + c.Assert(height, gc.Equals, 281) + canvas.addApplication(&application{ + name: "application3", + point: image.Point{ + X: -100, + Y: -100, + }, + }) + canvas.addApplication(&application{ + name: "application4", + point: image.Point{ + X: -100, + Y: 100, + }, + }) + canvas.addApplication(&application{ + name: "application5", + point: image.Point{ + X: 200, + Y: -100, + }, + }) + width, height = canvas.layout() + c.Assert(width, gc.Equals, 481) + c.Assert(height, gc.Equals, 381) +} + +func (s *CanvasSuite) TestMarshal(c *gc.C) { + // Ensure that the internal representation of the canvas can be marshalled + // to SVG. + var buf bytes.Buffer + canvas := Canvas{} + applicationA := &application{ + name: "application-a", + charmPath: "trusty/svc-a", + point: image.Point{ + X: 0, + Y: 0, + }, + iconSrc: []byte(` + + + `), + } + applicationB := &application{ + name: "application-b", + point: image.Point{ + X: 100, + Y: 100, + }, + } + canvas.addApplication(applicationA) + canvas.addApplication(applicationB) + canvas.addRelation(&applicationRelation{ + name: "relation", + applicationA: applicationA, + applicationB: applicationB, + }) + canvas.Marshal(&buf) + c.Logf("%s", buf.Bytes()) + assertXMLEqual(c, buf.Bytes(), []byte(` + + + + +`+assets.RelationIconHealthy+` + + + + + + + + + +relation + + + + + + + + +application-a + + + +application-a + + +application-b + + + +application-b + + + +`)) +} + +func assertXMLEqual(c *gc.C, obtained, expected []byte) { + toksObtained := xmlTokens(c, obtained) + toksExpected := xmlTokens(c, expected) + c.Assert(toksObtained, jc.DeepEquals, toksExpected) +} + +func xmlTokens(c *gc.C, data []byte) []xml.Token { + dec := xml.NewDecoder(bytes.NewReader(data)) + var toks []xml.Token + for { + tok, err := dec.Token() + if err == io.EOF { + return toks + } + c.Assert(err, gc.IsNil) + + if cdata, ok := tok.(xml.CharData); ok { + // It's char data - trim all white space and ignore it + // if it's all blank. + cdata = bytes.TrimSpace(cdata) + if len(cdata) == 0 { + continue + } + tok = cdata + } + toks = append(toks, xml.CopyToken(tok)) + } +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/dependencies.tsv juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/dependencies.tsv --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/dependencies.tsv 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/dependencies.tsv 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,19 @@ +github.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z +github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z +github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z +github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z +github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z +github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z +github.com/juju/schema git 1e25943f8c6fd6815282d6f1ac87091d21e14e19 2016-03-01T11:16:46Z +github.com/juju/testing git 162fafccebf20a4207ab93d63b986c230e3f4d2e 2016-04-04T09:43:17Z +github.com/juju/utils git ffea6ead0c374583e876c8357c9db6e98bc71476 2016-05-26T02:52:51Z +github.com/juju/version git ef897ad7f130870348ce306f61332f5335355063 2015-11-27T20:34:00Z +github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z +golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z +gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z +gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z +gopkg.in/juju/charm.v6-unstable git 503191ddf2590b15db9669c424660ed612d0a30a 2016-05-27T01:46:20Z +gopkg.in/juju/names.v2 git e38bc90539f22af61a9c656d35068bd5f0a5b30a 2016-05-25T23:07:23Z +gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z +gopkg.in/yaml.v2 git a83829b6f1293c91addabc89d0571c246397bbf4 2016-03-01T20:40:22Z +launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/doc.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/doc.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/doc.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/doc.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,8 @@ +// Copyright 2014 Canonical, Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// jujusvg generates SVG representations of various Juju artifacts, such as +// charm bundles or live environments. +// +// For more information, please refer to the README file in this directory. +package jujusvg diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/charmworld-missing-placement.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/charmworld-missing-placement.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/charmworld-missing-placement.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/charmworld-missing-placement.yaml 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,29 @@ +applications: + mongodb: + charm: "cs:precise/mongodb-21" + num_units: 1 + annotations: + "gui-x": "940.5" + "gui-y": "388.7698359714502" + constraints: "mem=2G cpu-cores=1" + elasticsearch: + charm: "cs:~charming-devs/precise/elasticsearch-2" + num_units: 1 + constraints: "mem=2G cpu-cores=1" + charmworld: + charm: "cs:~juju-jitsu/precise/charmworld-58" + num_units: 1 + expose: true + annotations: + "gui-x": "813.5" + "gui-y": "112.23016402854975" + options: + charm_import_limit: -1 + source: "lp:~bac/charmworld/ingest-local-charms" + revno: 511 +relations: + - - "charmworld:essearch" + - "elasticsearch:essearch" + - - "charmworld:database" + - "mongodb:database" +series: precise diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/charmworld-no-placement.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/charmworld-no-placement.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/charmworld-no-placement.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/charmworld-no-placement.yaml 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,23 @@ +applications: + mongodb: + charm: "cs:precise/mongodb-21" + num_units: 1 + constraints: "mem=2G cpu-cores=1" + elasticsearch: + charm: "cs:~charming-devs/precise/elasticsearch-2" + num_units: 1 + constraints: "mem=2G cpu-cores=1" + charmworld: + charm: "cs:~juju-jitsu/precise/charmworld-58" + num_units: 1 + expose: true + options: + charm_import_limit: -1 + source: "lp:~bac/charmworld/ingest-local-charms" + revno: 511 +relations: + - - "charmworld:essearch" + - "elasticsearch:essearch" + - - "charmworld:database" + - "mongodb:database" +series: precise diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/charmworld.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/charmworld.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/charmworld.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/charmworld.yaml 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,32 @@ +applications: + mongodb: + charm: "cs:precise/mongodb-21" + num_units: 1 + annotations: + "gui-x": "940.5" + "gui-y": "388.7698359714502" + constraints: "mem=2G cpu-cores=1" + elasticsearch: + charm: "cs:~charming-devs/precise/elasticsearch-2" + num_units: 1 + annotations: + "gui-x": "490.5" + "gui-y": "369.7698359714502" + constraints: "mem=2G cpu-cores=1" + charmworld: + charm: "cs:~juju-jitsu/precise/charmworld-58" + num_units: 1 + expose: true + annotations: + "gui-x": "813.5" + "gui-y": "112.23016402854975" + options: + charm_import_limit: -1 + source: "lp:~bac/charmworld/ingest-local-charms" + revno: 511 +relations: + - - "charmworld:essearch" + - "elasticsearch:essearch" + - - "charmworld:database" + - "mongodb:database" +series: precise diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/generatesvg.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/generatesvg.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/generatesvg.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/generatesvg.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,56 @@ +package main + +// This is a demo application that uses the jujusvg library to build a bundle SVG +// from a given bundle.yaml file. + +import ( + "io/ioutil" + "log" + "os" + "strings" + + "gopkg.in/juju/charm.v6-unstable" + + // Import the jujusvg library and the juju charm library + "gopkg.in/juju/jujusvg.v2" +) + +// iconURL takes a reference to a charm and returns the URL for that charm's icon. +// In this case, we're using the api.jujucharms.com API to provide the icon's URL. +func iconURL(ref *charm.URL) string { + return "https://api.jujucharms.com/charmstore/v4/" + ref.Path() + "/icon.svg" +} + +func main() { + if len(os.Args) != 2 { + log.Fatalf("Please provide the name of a bundle file as the first argument") + } + + // First, we need to read our bundle data into a []byte + bundle_data, err := ioutil.ReadFile(os.Args[1]) + if err != nil { + log.Fatalf("Error reading bundle: %s\n", err) + } + + // Next, generate a charm.Bundle from the bytearray by passing it to ReadNewBundleData. + // This gives us an in-memory object representation of the bundle that we can pass to jujusvg + bundle, err := charm.ReadBundleData(strings.NewReader(string(bundle_data))) + if err != nil { + log.Fatalf("Error parsing bundle: %s\n", err) + } + + fetcher := &jujusvg.HTTPFetcher{ + IconURL: iconURL, + } + // Next, build a canvas of the bundle. This is a simplified version of a charm.Bundle + // that contains just the position information and charm icon URLs necessary to build + // the SVG representation of the bundle + canvas, err := jujusvg.NewFromBundle(bundle, iconURL, fetcher) + if err != nil { + log.Fatalf("Error generating canvas: %s\n", err) + } + + // Finally, marshal that canvas as SVG to os.Stdout; this will print the SVG data + // required to generate an image of the bundle. + canvas.Marshal(os.Stdout) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/kubernetes-bundle.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/kubernetes-bundle.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/kubernetes-bundle.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/kubernetes-bundle.yaml 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,44 @@ +applications: + "kubernetes-master": + charm: cs:~kubernetes/trusty/kubernetes-master-5 + annotations: + "gui-x": "600" + "gui-y": "0" + expose: true + docker: + charm: cs:trusty/docker-2 + num_units: 2 + annotations: + "gui-x": "0" + "gui-y": "0" + flannel-docker: + charm: cs:trusty/flannel-docker-5 + annotations: + "gui-x": "0" + "gui-y": "300" + kubernetes: + charm: cs:~kubernetes/trusty/kubernetes-5 + annotations: + "gui-x": "300" + "gui-y": "300" + etcd: + charm: cs:~kubernetes/trusty/etcd-2 + annotations: + "gui-x": "300" + "gui-y": "0" +relations: + - - "flannel-docker:network" + - "docker:network" + - - "flannel-docker:docker-host" + - "docker:juju-info" + - - "flannel-docker:db" + - "etcd:client" + - - "kubernetes:docker-host" + - "docker:juju-info" + - - "etcd:client" + - "kubernetes:etcd" + - - "etcd:client" + - "kubernetes-master:etcd" + - - "kubernetes-master:minions-api" + - "kubernetes:api" +series: trusty diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/mediawiki-scalable.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/mediawiki-scalable.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/mediawiki-scalable.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/mediawiki-scalable.yaml 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,114 @@ +applications: + haproxy: + charm: cs:precise/haproxy-35 + num_units: 1 + options: + default_log: global + default_mode: http + default_options: httplog, dontlognull + default_retries: 3 + default_timeouts: queue 20000, client 50000, connect 5000, server 50000 + enable_monitoring: false + global_debug: false + global_group: haproxy + global_log: 127.0.0.1 local0, 127.0.0.1 local1 notice + global_maxconn: 4096 + global_quiet: false + global_spread_checks: 0 + global_user: haproxy + monitoring_allowed_cidr: 127.0.0.1/32 + monitoring_password: changeme + monitoring_port: 10000 + monitoring_stats_refresh: 3 + monitoring_username: haproxy + nagios_context: juju + package_status: install + applications: "- service_name: haproxy_service\n service_host: \"0.0.0.0\"\n service_port: + 80\n service_options: [balance leastconn]\n server_options: maxconn 100\n" + sysctl: "" + annotations: + gui-x: "619" + gui-y: "-406" + mediawiki: + charm: cs:precise/mediawiki-10 + num_units: 1 + options: + debug: false + name: Please set name of wiki + skin: vector + annotations: + gui-x: "618" + gui-y: "-128" + memcached: + charm: cs:precise/memcached-7 + num_units: 1 + options: + connection-limit: 1024 + disable-auto-cleanup: "no" + disable-cas: "no" + disable-large-pages: "no" + extra-options: "" + factor: 1.25 + min-item-size: -1 + nagios_context: juju + request-limit: -1 + size: 768 + slab-page-size: -1 + tcp-port: 11211 + threads: -1 + udp-port: 0 + annotations: + gui-x: "926" + gui-y: "-125" + mysql: + charm: cs:precise/mysql-28 + num_units: 1 + options: + binlog-format: MIXED + block-size: 5 + dataset-size: 80% + flavor: distro + ha-bindiface: eth0 + ha-mcastport: 5411 + max-connections: -1 + preferred-storage-engine: InnoDB + query-cache-size: -1 + query-cache-type: "OFF" + rbd-name: mysql1 + tuning-level: safest + vip_cidr: 24 + vip_iface: eth0 + annotations: + gui-x: "926" + gui-y: "123" + mysql-slave: + charm: cs:precise/mysql-28 + num_units: 1 + options: + binlog-format: MIXED + block-size: 5 + dataset-size: 80% + flavor: distro + ha-bindiface: eth0 + ha-mcastport: 5411 + max-connections: -1 + preferred-storage-engine: InnoDB + query-cache-size: -1 + query-cache-type: "OFF" + rbd-name: mysql1 + tuning-level: safest + vip_cidr: 24 + vip_iface: eth0 + annotations: + gui-x: "619" + gui-y: "124" +series: precise +relations: +- - mediawiki:cache + - memcached:cache +- - haproxy:reverseproxy + - mediawiki:website +- - mysql-slave:slave + - mysql:master +- - mediawiki:slave + - mysql-slave:db diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/openstack.yaml juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/openstack.yaml --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/examples/openstack.yaml 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/examples/openstack.yaml 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,297 @@ +machines: + '0': + constraints: arch=amd64 + series: trusty + '1': + constraints: arch=amd64 + series: trusty + '2': + constraints: arch=amd64 + series: trusty + '3': + constraints: arch=amd64 + series: trusty +relations: +- - nova-compute:amqp + - rabbitmq-server:amqp +- - neutron-gateway:amqp + - rabbitmq-server:amqp +- - keystone:shared-db + - mysql:shared-db +- - nova-cloud-controller:identity-service + - keystone:identity-service +- - glance:identity-service + - keystone:identity-service +- - neutron-api:identity-service + - keystone:identity-service +- - neutron-openvswitch:neutron-plugin-api + - neutron-api:neutron-plugin-api +- - neutron-api:shared-db + - mysql:shared-db +- - neutron-api:amqp + - rabbitmq-server:amqp +- - neutron-gateway:neutron-plugin-api + - neutron-api:neutron-plugin-api +- - glance:shared-db + - mysql:shared-db +- - glance:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:image-service + - glance:image-service +- - nova-compute:image-service + - glance:image-service +- - nova-cloud-controller:cloud-compute + - nova-compute:cloud-compute +- - nova-cloud-controller:amqp + - rabbitmq-server:amqp +- - nova-cloud-controller:quantum-network-service + - neutron-gateway:quantum-network-service +- - nova-compute:neutron-plugin + - neutron-openvswitch:neutron-plugin +- - neutron-openvswitch:amqp + - rabbitmq-server:amqp +- - openstack-dashboard:identity-service + - keystone:identity-service +- - nova-cloud-controller:shared-db + - mysql:shared-db +- - nova-cloud-controller:neutron-api + - neutron-api:neutron-api +- - cinder:image-service + - glance:image-service +- - cinder:amqp + - rabbitmq-server:amqp +- - cinder:identity-service + - keystone:identity-service +- - cinder:cinder-volume-service + - nova-cloud-controller:cinder-volume-service +- - cinder-ceph:storage-backend + - cinder:storage-backend +- - ceph:client + - nova-compute:ceph +- - cinder:shared-db + - mysql:shared-db +- - ceph:client + - cinder-ceph:ceph +- - ceph:client + - glance:ceph +- - ceph-osd:mon + - ceph:osd +- - ntp:juju-info + - nova-compute:juju-info +- - ntp:juju-info + - neutron-gateway:juju-info +- - ceph-radosgw:mon + - ceph:radosgw +- - ceph-radosgw:identity-service + - keystone:identity-service +- - ceilometer:amqp + - rabbitmq-server:amqp +- - ceilometer-agent:ceilometer-service + - ceilometer:ceilometer-service +- - ceilometer:identity-service + - keystone:identity-service +- - ceilometer:identity-notifications + - keystone:identity-notifications +- - ceilometer-agent:nova-ceilometer + - nova-compute:nova-ceilometer +- - ceilometer:shared-db + - mongodb:database +series: trusty +applications: + ceilometer: + annotations: + gui-x: '1288.8744298356794' + gui-y: '0.7040786325134718' + charm: cs:trusty/ceilometer-15 + num_units: 1 + options: + openstack-origin: cloud:trusty-liberty + to: + - lxc:2 + ceilometer-agent: + annotations: + gui-x: '1288.9999389648438' + gui-y: '503' + charm: cs:trusty/ceilometer-agent-11 + ceph: + annotations: + gui-x: '750' + gui-y: '500' + charm: cs:trusty/ceph-42 + num_units: 3 + options: + fsid: 5a791d94-980b-11e4-b6f6-3c970e8b1cf7 + monitor-secret: AQAi5a9UeJXUExAA+By9u+GPhl8/XiUQ4nwI3A== + osd-devices: /dev/sdb + osd-reformat: 'yes' + source: cloud:trusty-liberty + to: + - '1' + - '2' + - '3' + ceph-osd: + annotations: + gui-x: '1000' + gui-y: '500' + charm: cs:trusty/ceph-osd-14 + num_units: 1 + options: + osd-devices: /dev/sdb + osd-reformat: 'yes' + source: cloud:trusty-liberty + to: + - '0' + ceph-radosgw: + annotations: + gui-x: '1000' + gui-y: '250' + charm: cs:trusty/ceph-radosgw-18 + num_units: 1 + options: + source: cloud:trusty-liberty + use-embedded-webserver: true + to: + - lxc:0 + cinder: + annotations: + gui-x: '750' + gui-y: '0' + charm: cs:trusty/cinder-31 + num_units: 1 + options: + block-device: None + glance-api-version: 2 + ha-mcastport: 5401 + openstack-origin: cloud:trusty-liberty + to: + - lxc:1 + cinder-ceph: + annotations: + gui-x: '750' + gui-y: '250' + charm: cs:trusty/cinder-ceph-14 + num_units: 0 + glance: + annotations: + gui-x: '250' + gui-y: '0' + charm: cs:trusty/glance-28 + num_units: 1 + options: + ha-mcastport: 5402 + openstack-origin: cloud:trusty-liberty + to: + - lxc:2 + keystone: + annotations: + gui-x: '500' + gui-y: '0' + charm: cs:trusty/keystone-31 + num_units: 1 + options: + admin-password: openstack + ha-mcastport: 5403 + openstack-origin: cloud:trusty-liberty + to: + - lxc:3 + mongodb: + annotations: + gui-x: '1287.9999389648438' + gui-y: '251.24996948242188' + charm: cs:trusty/mongodb-28 + num_units: 1 + to: + - lxc:1 + mysql: + annotations: + gui-x: '0' + gui-y: '250' + charm: cs:trusty/percona-cluster-31 + num_units: 1 + options: + max-connections: 20000 + to: + - lxc:0 + neutron-api: + annotations: + gui-x: '500' + gui-y: '500' + charm: cs:trusty/neutron-api-21 + num_units: 1 + options: + neutron-security-groups: true + openstack-origin: cloud:trusty-liberty + to: + - lxc:1 + neutron-gateway: + annotations: + gui-x: '0' + gui-y: '0' + charm: cs:trusty/neutron-gateway-7 + num_units: 1 + options: + ext-port: eth1 + openstack-origin: cloud:trusty-liberty + to: + - '0' + neutron-openvswitch: + annotations: + gui-x: '250' + gui-y: '500' + charm: cs:trusty/neutron-openvswitch-13 + num_units: 0 + nova-cloud-controller: + annotations: + gui-x: '0' + gui-y: '500' + charm: cs:trusty/nova-cloud-controller-64 + num_units: 1 + options: + network-manager: Neutron + openstack-origin: cloud:trusty-liberty + quantum-security-groups: 'yes' + to: + - lxc:2 + nova-compute: + annotations: + gui-x: '250' + gui-y: '250' + charm: cs:trusty/nova-compute-33 + num_units: 3 + options: + enable-live-migration: true + enable-resize: true + manage-neutron-plugin-legacy-mode: false + migration-auth-type: ssh + openstack-origin: cloud:trusty-liberty + to: + - '1' + - '2' + - '3' + ntp: + annotations: + gui-x: '1000' + gui-y: '0' + charm: cs:trusty/ntp-14 + num_units: 0 + openstack-dashboard: + annotations: + gui-x: '500' + gui-y: '-250' + charm: cs:trusty/openstack-dashboard-19 + num_units: 1 + options: + openstack-origin: cloud:trusty-liberty + to: + - lxc:3 + rabbitmq-server: + annotations: + gui-x: '500' + gui-y: '250' + charm: cs:trusty/rabbitmq-server-42 + num_units: 1 + options: + source: cloud:trusty-liberty + to: + - lxc:0 + diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/.gitignore juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/.gitignore --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/.gitignore 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +bundle.svg diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/hull.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/hull.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/hull.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/hull.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,105 @@ +package jujusvg + +import ( + "image" + "math" + "sort" +) + +// getPointOutside returns a point that is outside the hull of existing placed +// vertices so that an object can be placed on the canvas without overlapping +// others. +func getPointOutside(vertices []image.Point, padding image.Point) image.Point { + // Shortcut some easy solutions. + switch len(vertices) { + case 0: + return image.Point{0, 0} + case 1: + return image.Point{ + vertices[0].X + padding.X, + vertices[0].Y + padding.Y, + } + case 2: + return image.Point{ + int(math.Max(float64(vertices[0].X), float64(vertices[1].X))) + padding.X, + int(math.Max(float64(vertices[0].Y), float64(vertices[1].Y))) + padding.Y, + } + } + hull := convexHull(vertices) + // Find point that is the furthest to the right on the hull. + var rightmost image.Point + maxDistance := 0.0 + for _, vertex := range hull { + fromOrigin := line{p0: vertex, p1: image.Point{0, 0}} + distance := fromOrigin.length() + if math.Abs(distance) > maxDistance { + maxDistance = math.Abs(distance) + rightmost = vertex + } + } + return image.Point{ + rightmost.X + padding.X, + rightmost.Y + padding.Y, + } +} + +// vertexSet implements sort.Interface for image.Point, sorting first by X, then +// by Y +type vertexSet []image.Point + +func (vs vertexSet) Len() int { return len(vs) } +func (vs vertexSet) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs vertexSet) Less(i, j int) bool { + if vs[i].X == vs[j].X { + return vs[i].Y < vs[j].Y + } + return vs[i].X < vs[j].X +} + +// convexHull takes a list of vertices and returns the set of vertices which +// make up the convex hull encapsulating all vertices on a plane. +func convexHull(vertices []image.Point) []image.Point { + // Simple cases can be shortcutted. + if len(vertices) == 0 { + return []image.Point{ + {0, 0}, + } + } + // For our purposes, we can assume that three vertices form a hull. + if len(vertices) < 4 { + return vertices + } + + sort.Sort(vertexSet(vertices)) + var lower, upper []image.Point + for _, vertex := range vertices { + for len(lower) >= 2 && cross(lower[len(lower)-2], lower[len(lower)-1], vertex) <= 0 { + lower = lower[:len(lower)-1] + } + lower = append(lower, vertex) + } + + for _, vertex := range reverse(vertices) { + for len(upper) >= 2 && cross(upper[len(upper)-2], upper[len(upper)-1], vertex) <= 0 { + upper = upper[:len(upper)-1] + } + upper = append(upper, vertex) + } + return append(lower[:len(lower)-1], upper[:len(upper)-1]...) +} + +// cross finds the 2D cross-product of OA and OB vectors. +// Returns a positive value if OAB makes a counter-clockwise turn, a negative +// value if OAB makes a clockwise turn, and zero if the points are collinear. +func cross(o, a, b image.Point) int { + return (a.X-o.X)*(b.Y-o.Y) - (a.Y-o.Y)*(b.X-o.X) +} + +// reverse reverses a slice of Points for use in finding the upper hull. +func reverse(vertices []image.Point) []image.Point { + for i := 0; i < len(vertices)/2; i++ { + opp := len(vertices) - (i + 1) + vertices[i], vertices[opp] = vertices[opp], vertices[i] + } + return vertices +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/hull_test.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/hull_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/hull_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/hull_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,77 @@ +package jujusvg + +import ( + "image" + + gc "gopkg.in/check.v1" +) + +type HullSuite struct{} + +var _ = gc.Suite(&HullSuite{}) + +func (s *HullSuite) TestGetPointOutside(c *gc.C) { + var tests = []struct { + about string + vertices []image.Point + expected image.Point + }{ + { + about: "zero vertices", + vertices: []image.Point{}, + expected: image.Point{0, 0}, + }, + { + about: "one vertex", + vertices: []image.Point{{0, 0}}, + expected: image.Point{10, 10}, + }, + { + about: "two vertices", + vertices: []image.Point{{0, 0}, {10, 10}}, + expected: image.Point{20, 20}, + }, + { + about: "three vertices (convexHull fall through)", + vertices: []image.Point{{0, 0}, {0, 10}, {10, 0}}, + expected: image.Point{10, 20}, + }, + { + about: "four vertices", + vertices: []image.Point{{0, 0}, {0, 10}, {10, 0}, {10, 10}}, + expected: image.Point{20, 20}, + }, + } + for _, test := range tests { + c.Log(test.about) + c.Assert(getPointOutside(test.vertices, image.Point{10, 10}), gc.Equals, test.expected) + } +} + +func (s *HullSuite) TestConvexHull(c *gc.C) { + // Zero vertices + vertices := []image.Point{} + c.Assert(convexHull(vertices), gc.DeepEquals, []image.Point{{0, 0}}) + + // Identities + vertices = []image.Point{{1, 1}} + c.Assert(convexHull(vertices), gc.DeepEquals, vertices) + + vertices = []image.Point{{1, 1}, {2, 2}} + c.Assert(convexHull(vertices), gc.DeepEquals, vertices) + + vertices = []image.Point{{1, 1}, {2, 2}, {1, 2}} + c.Assert(convexHull(vertices), gc.DeepEquals, vertices) + + // > 3 vertices + vertices = []image.Point{} + for i := 0; i < 100; i++ { + vertices = append(vertices, image.Point{i / 10, i % 10}) + } + c.Assert(convexHull(vertices), gc.DeepEquals, []image.Point{ + {0, 0}, + {9, 0}, + {9, 9}, + {0, 9}, + }) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/iconfetcher.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/iconfetcher.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/iconfetcher.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/iconfetcher.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,139 @@ +package jujusvg + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "sync" + + "github.com/juju/utils/parallel" + "github.com/juju/xml" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +// An IconFetcher provides functionality for retrieving icons for the charms +// within a given bundle. The FetchIcons function accepts a bundle, and +// returns a map from charm paths to icon data. +type IconFetcher interface { + FetchIcons(*charm.BundleData) (map[string][]byte, error) +} + +// LinkFetcher fetches icons as links so that they are included within the SVG +// as remote resources using SVG tags. +type LinkFetcher struct { + // IconURL returns the URL of the entity for embedding + IconURL func(*charm.URL) string +} + +// FetchIcons generates the svg image tags given an appropriate URL, generating +// tags only for unique icons. +func (l *LinkFetcher) FetchIcons(b *charm.BundleData) (map[string][]byte, error) { + // Maintain a list of icons that have already been fetched. + alreadyFetched := make(map[string]bool) + + // Build the map of icons. + icons := make(map[string][]byte) + for _, applicationData := range b.Applications { + charmId, err := charm.ParseURL(applicationData.Charm) + if err != nil { + return nil, errgo.Notef(err, "cannot parse charm %q", applicationData.Charm) + } + path := charmId.Path() + + // Don't duplicate icons in the map. + if !alreadyFetched[path] { + alreadyFetched[path] = true + icons[path] = []byte(fmt.Sprintf(` + + + `, escapeString(l.IconURL(charmId)))) + } + } + return icons, nil +} + +// Wrap around xml.EscapeText to make it more string-friendly. +func escapeString(s string) string { + var buf bytes.Buffer + xml.EscapeText(&buf, []byte(s)) + return buf.String() +} + +// HTTPFetcher is an implementation of IconFetcher which retrieves charm +// icons from the web using the URL generated by IconURL on that charm. The +// HTTP Client used may be overridden by an instance of http.Client. The icons +// may optionally be fetched concurrently. +type HTTPFetcher struct { + // Concurrency specifies the number of GoRoutines to use when fetching + // icons. If it is not positive, 10 will be used. Setting this to 1 + // makes this method nominally synchronous. + Concurrency int + + // IconURL returns the URL from which to fetch the given entity's icon SVG. + IconURL func(*charm.URL) string + + // Client specifies what HTTP client to use; if it is not provided, + // http.DefaultClient will be used. + Client *http.Client +} + +// FetchIcons retrieves icon SVGs over HTTP. If specified in the struct, icons +// will be fetched concurrently. +func (h *HTTPFetcher) FetchIcons(b *charm.BundleData) (map[string][]byte, error) { + client := http.DefaultClient + if h.Client != nil { + client = h.Client + } + concurrency := h.Concurrency + if concurrency <= 0 { + concurrency = 10 + } + var iconsMu sync.Mutex // Guards icons. + icons := make(map[string][]byte) + alreadyFetched := make(map[string]bool) + run := parallel.NewRun(concurrency) + for _, applicationData := range b.Applications { + charmId, err := charm.ParseURL(applicationData.Charm) + if err != nil { + return nil, errgo.Notef(err, "cannot parse charm %q", applicationData.Charm) + } + path := charmId.Path() + if alreadyFetched[path] { + continue + } + alreadyFetched[path] = true + run.Do(func() error { + icon, err := h.fetchIcon(h.IconURL(charmId), client) + if err != nil { + return err + } + iconsMu.Lock() + defer iconsMu.Unlock() + icons[path] = icon + return nil + }) + } + if err := run.Wait(); err != nil { + return nil, err + } + return icons, nil +} + +// fetchIcon retrieves a single icon svg over HTTP. +func (h *HTTPFetcher) fetchIcon(url string, client *http.Client) ([]byte, error) { + resp, err := client.Get(url) + if err != nil { + return nil, errgo.Notef(err, "HTTP error fetching %s: %v", url, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errgo.Newf("cannot retrieve icon from %s: %s", url, resp.Status) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errgo.Notef(err, "could not read icon data from url %s", url) + } + return body, nil +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/iconfetcher_test.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/iconfetcher_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/iconfetcher_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/iconfetcher_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,119 @@ +package jujusvg + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" + + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +type IconFetcherSuite struct{} + +var _ = gc.Suite(&IconFetcherSuite{}) + +func (s *IconFetcherSuite) TestLinkFetchIcons(c *gc.C) { + tests := map[string][]byte{ + "~charming-devs/precise/elasticsearch-2": []byte(` + + + `), + "~juju-jitsu/precise/charmworld-58": []byte(` + + + `), + "precise/mongodb-21": []byte(` + + + `), + } + iconURL := func(ref *charm.URL) string { + return "/" + ref.Path() + ".svg" + } + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + fetcher := LinkFetcher{ + IconURL: iconURL, + } + iconMap, err := fetcher.FetchIcons(b) + c.Assert(err, gc.IsNil) + for charm, link := range tests { + assertXMLEqual(c, []byte(iconMap[charm]), []byte(link)) + } +} + +func (s *IconFetcherSuite) TestHTTPFetchIcons(c *gc.C) { + fetchCount := 0 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fetchCount++ + fmt.Fprintln(w, fmt.Sprintf("%s", r.URL.Path)) + })) + defer ts.Close() + + tsIconURL := func(ref *charm.URL) string { + return ts.URL + "/" + ref.Path() + ".svg" + } + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + // Only one copy of precise/mongodb-21 + b.Applications["duplicateApplication"] = &charm.ApplicationSpec{ + Charm: "cs:precise/mongodb-21", + NumUnits: 1, + } + fetcher := HTTPFetcher{ + Concurrency: 1, + IconURL: tsIconURL, + } + iconMap, err := fetcher.FetchIcons(b) + c.Assert(err, gc.IsNil) + c.Assert(iconMap, gc.DeepEquals, map[string][]byte{ + "~charming-devs/precise/elasticsearch-2": []byte("/~charming-devs/precise/elasticsearch-2.svg\n"), + "~juju-jitsu/precise/charmworld-58": []byte("/~juju-jitsu/precise/charmworld-58.svg\n"), + "precise/mongodb-21": []byte("/precise/mongodb-21.svg\n"), + }) + + fetcher.Concurrency = 10 + iconMap, err = fetcher.FetchIcons(b) + c.Assert(err, gc.IsNil) + c.Assert(iconMap, gc.DeepEquals, map[string][]byte{ + "~charming-devs/precise/elasticsearch-2": []byte("/~charming-devs/precise/elasticsearch-2.svg\n"), + "~juju-jitsu/precise/charmworld-58": []byte("/~juju-jitsu/precise/charmworld-58.svg\n"), + "precise/mongodb-21": []byte("/precise/mongodb-21.svg\n"), + }) + c.Assert(fetchCount, gc.Equals, 6) +} + +func (s *IconFetcherSuite) TestHTTPBadIconURL(c *gc.C) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "bad-wolf", http.StatusForbidden) + return + })) + defer ts.Close() + + tsIconURL := func(ref *charm.URL) string { + return ts.URL + "/" + ref.Path() + ".svg" + } + + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + fetcher := HTTPFetcher{ + Concurrency: 1, + IconURL: tsIconURL, + } + iconMap, err := fetcher.FetchIcons(b) + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("cannot retrieve icon from %s.+\\.svg: 403 Forbidden.*", ts.URL)) + c.Assert(iconMap, gc.IsNil) + + fetcher.Concurrency = 10 + iconMap, err = fetcher.FetchIcons(b) + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("cannot retrieve icon from %s.+\\.svg: 403 Forbidden.*", ts.URL)) + c.Assert(iconMap, gc.IsNil) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/jujusvg.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/jujusvg.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/jujusvg.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/jujusvg.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,101 @@ +package jujusvg // import "gopkg.in/juju/jujusvg.v2" + +import ( + "fmt" + "image" + "math" + "sort" + "strconv" + "strings" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +// NewFromBundle returns a new Canvas that can be used +// to generate a graphical representation of the given bundle +// data. The iconURL function is used to generate a URL +// that refers to an SVG for the supplied charm URL. +// If fetcher is non-nil, it will be used to fetch icon +// contents for any icons embedded within the charm, +// allowing the generated bundle to be self-contained. If fetcher +// is nil, a default fetcher which refers to icons by their +// URLs as svg tags will be used. +func NewFromBundle(b *charm.BundleData, iconURL func(*charm.URL) string, fetcher IconFetcher) (*Canvas, error) { + if fetcher == nil { + fetcher = &LinkFetcher{ + IconURL: iconURL, + } + } + iconMap, err := fetcher.FetchIcons(b) + if err != nil { + return nil, err + } + + var canvas Canvas + + // Verify the bundle to make sure that all the invariants + // that we depend on below actually hold true. + if err := b.Verify(nil, nil); err != nil { + return nil, errgo.Notef(err, "cannot verify bundle") + } + // Go through all applications in alphabetical order so that + // we get consistent results. + applicationNames := make([]string, 0, len(b.Applications)) + for name := range b.Applications { + applicationNames = append(applicationNames, name) + } + sort.Strings(applicationNames) + applications := make(map[string]*application) + applicationsNeedingPlacement := make(map[string]bool) + for _, name := range applicationNames { + applicationData := b.Applications[name] + x, xerr := strconv.ParseFloat(applicationData.Annotations["gui-x"], 64) + y, yerr := strconv.ParseFloat(applicationData.Annotations["gui-y"], 64) + if xerr != nil || yerr != nil { + if applicationData.Annotations["gui-x"] == "" && applicationData.Annotations["gui-y"] == "" { + applicationsNeedingPlacement[name] = true + x = 0 + y = 0 + } else { + return nil, errgo.Newf("application %q does not have a valid position", name) + } + } + charmID, err := charm.ParseURL(applicationData.Charm) + if err != nil { + // cannot actually happen, as we've verified it. + return nil, errgo.Notef(err, "cannot parse charm %q", applicationData.Charm) + } + icon := iconMap[charmID.Path()] + svc := &application{ + name: name, + charmPath: charmID.Path(), + point: image.Point{int(x), int(y)}, + iconUrl: iconURL(charmID), + iconSrc: icon, + } + applications[name] = svc + } + padding := image.Point{int(math.Floor(applicationBlockSize * 1.5)), int(math.Floor(applicationBlockSize * 0.5))} + for name := range applicationsNeedingPlacement { + vertices := []image.Point{} + for n, svc := range applications { + if !applicationsNeedingPlacement[n] { + vertices = append(vertices, svc.point) + } + } + applications[name].point = getPointOutside(vertices, padding) + applicationsNeedingPlacement[name] = false + } + for _, name := range applicationNames { + canvas.addApplication(applications[name]) + } + for _, relation := range b.Relations { + canvas.addRelation(&applicationRelation{ + name: fmt.Sprintf("%s %s", relation[0], relation[1]), + applicationA: applications[strings.Split(relation[0], ":")[0]], + applicationB: applications[strings.Split(relation[1], ":")[0]], + }) + } + return &canvas, nil +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/jujusvg_test.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/jujusvg_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/jujusvg_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/jujusvg_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,416 @@ +package jujusvg + +import ( + "bytes" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +func Test(t *testing.T) { gc.TestingT(t) } + +type newSuite struct{} + +var _ = gc.Suite(&newSuite{}) + +var bundle = ` +applications: + mongodb: + charm: "cs:precise/mongodb-21" + num_units: 1 + annotations: + "gui-x": "940.5" + "gui-y": "388.7698359714502" + constraints: "mem=2G cpu-cores=1" + elasticsearch: + charm: "cs:~charming-devs/precise/elasticsearch-2" + num_units: 1 + annotations: + "gui-x": "490.5" + "gui-y": "369.7698359714502" + constraints: "mem=2G cpu-cores=1" + charmworld: + charm: "cs:~juju-jitsu/precise/charmworld-58" + num_units: 1 + expose: true + annotations: + "gui-x": "813.5" + "gui-y": "112.23016402854975" + options: + charm_import_limit: -1 + source: "lp:~bac/charmworld/ingest-local-charms" + revno: 511 +relations: + - - "charmworld:essearch" + - "elasticsearch:essearch" + - - "charmworld:database" + - "mongodb:database" +series: precise +` + +func iconURL(ref *charm.URL) string { + return "http://0.1.2.3/" + ref.Path() + ".svg" +} + +type emptyFetcher struct{} + +func (f *emptyFetcher) FetchIcons(*charm.BundleData) (map[string][]byte, error) { + return nil, nil +} + +type errFetcher string + +func (f *errFetcher) FetchIcons(*charm.BundleData) (map[string][]byte, error) { + return nil, fmt.Errorf("%s", *f) +} + +func (s *newSuite) TestNewFromBundle(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + + cvs, err := NewFromBundle(b, iconURL, nil) + c.Assert(err, gc.IsNil) + + var buf bytes.Buffer + cvs.Marshal(&buf) + c.Logf("%s", buf.String()) + assertXMLEqual(c, buf.Bytes(), []byte(` + + + + + + + + + + + + + + + + + + + + +charmworld:essearch elasticsearch:essearch + + + + + + +charmworld:database mongodb:database + + + + + + + + +charmworld + + + +charmworld + + +elasticsearch + + + +elasticsearch + + +mongodb + + + +mongodb + + + +`)) +} + +func (s *newSuite) TestNewFromBundleWithUnplacedApplication(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + b.Applications["charmworld"].Annotations["gui-x"] = "" + b.Applications["charmworld"].Annotations["gui-y"] = "" + + cvs, err := NewFromBundle(b, iconURL, nil) + c.Assert(err, gc.IsNil) + + var buf bytes.Buffer + cvs.Marshal(&buf) + c.Logf("%s", buf.String()) + assertXMLEqual(c, buf.Bytes(), []byte(` + + + + + + + + + + + + + + + + + + + + +charmworld:essearch elasticsearch:essearch + + + + + + +charmworld:database mongodb:database + + + + + + + + +charmworld + + + +charmworld + + +elasticsearch + + + +elasticsearch + + +mongodb + + + +mongodb + + + +`)) +} + +func (s *newSuite) TestWithFetcher(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + + cvs, err := NewFromBundle(b, iconURL, new(emptyFetcher)) + c.Assert(err, gc.IsNil) + + var buf bytes.Buffer + cvs.Marshal(&buf) + c.Logf("%s", buf.String()) + assertXMLEqual(c, buf.Bytes(), []byte(` + + + + + + + + + + + + + + +charmworld:essearch elasticsearch:essearch + + + + + + +charmworld:database mongodb:database + + + + + + + + +charmworld + + + +charmworld + + +elasticsearch + + + +elasticsearch + + +mongodb + + + +mongodb + + + +`)) +} + +func (s *newSuite) TestDefaultHTTPFetcher(c *gc.C) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "") + })) + defer ts.Close() + + tsIconUrl := func(ref *charm.URL) string { + return ts.URL + "/" + ref.Path() + ".svg" + } + + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + + cvs, err := NewFromBundle(b, tsIconUrl, &HTTPFetcher{IconURL: tsIconUrl}) + c.Assert(err, gc.IsNil) + + var buf bytes.Buffer + cvs.Marshal(&buf) + c.Logf("%s", buf.String()) + assertXMLEqual(c, buf.Bytes(), []byte(` + + + + + + + + + + + + + + +charmworld:essearch elasticsearch:essearch + + + + + + +charmworld:database mongodb:database + + + + + + + + +charmworld + + + +charmworld + + +elasticsearch + + + +elasticsearch + + +mongodb + + + +mongodb + + + +`)) + +} + +func (s *newSuite) TestFetcherError(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + + ef := errFetcher("bad-wolf") + _, err = NewFromBundle(b, iconURL, &ef) + c.Assert(err, gc.ErrorMatches, "bad-wolf") +} + +func (s *newSuite) TestWithBadBundle(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + b.Relations[0][0] = "evil-unknown-application" + cvs, err := NewFromBundle(b, iconURL, nil) + c.Assert(err, gc.ErrorMatches, "cannot verify bundle: .*") + c.Assert(cvs, gc.IsNil) +} + +func (s *newSuite) TestWithBadPosition(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + + b.Applications["charmworld"].Annotations["gui-x"] = "bad" + cvs, err := NewFromBundle(b, iconURL, nil) + c.Assert(err, gc.ErrorMatches, `application "charmworld" does not have a valid position`) + c.Assert(cvs, gc.IsNil) + + b, err = charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + + b.Applications["charmworld"].Annotations["gui-y"] = "bad" + cvs, err = NewFromBundle(b, iconURL, nil) + c.Assert(err, gc.ErrorMatches, `application "charmworld" does not have a valid position`) + c.Assert(cvs, gc.IsNil) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/LICENSE juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/LICENSE --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/LICENSE 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,191 @@ +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/Makefile juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/Makefile --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/Makefile 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,55 @@ +ifndef GOPATH + $(warning You need to set up a GOPATH.) +endif + +PROJECT := gopkg.in/juju/jujusvg.v2 +PROJECT_DIR := $(shell go list -e -f '{{.Dir}}' $(PROJECT)) + +help: + @echo "Available targets:" + @echo " deps - fetch all dependencies" + @echo " build - build the project" + @echo " check - run tests" + @echo " install - install the library in your GOPATH" + @echo " clean - clean the project" + +# Start of GOPATH-dependent targets. Some targets only make sense - +# and will only work - when this tree is found on the GOPATH. +ifeq ($(CURDIR),$(PROJECT_DIR)) + +deps: + go get -v -t $(PROJECT)/... + +build: + go build $(PROJECT)/... + +check: + go test $(PROJECT)/... + +install: + go install $(INSTALL_FLAGS) -v $(PROJECT)/... + +clean: + go clean $(PROJECT)/... + +else + +deps: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +build: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +check: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +install: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +clean: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +endif +# End of GOPATH-dependent targets. + +.PHONY: help deps build check install clean diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/README.md juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/README.md --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/README.md 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/README.md 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,70 @@ +jujusvg +======= + +A library for generating SVGs from Juju bundles and environments. + +Installation +------------ + +To start using jujusvg, first ensure you have a valid Go environment, then run +the following: + + go get gopkg.in/juju/jujusvg.v2 + +Dependencies +------------ + +The project uses godeps (https://launchpad.net/godeps) to manage Go +dependencies. To install this, run: + + + go get github.com/rogpeppe/godeps + +After installing it, you can update the dependencies to the revision specified +in the `dependencies.tsv` file with the following: + + make deps + +Use `make create-deps` to update the dependencies file. + +Usage +----- + +Given a Juju bundle, you can convert this to an SVG programatically. This +generates a simple SVG representation of a bundle or bundles that can then be +included in a webpage as a visualization. + +For an example of how to use this library, please see `examples/generatesvg.go`. +You can run this example like: + + go run generatesvg.go bundle.yaml > bundle.svg + +The examples directory also includes three sample bundles that you can play +around with, or you can use the [Juju GUI](https://demo.jujucharms.com) to +generate your own bundles. + +Design-related assets +--------------------- + +Some assets are specified based on assets provided by the design team. These +assets are specified in the defs section of the generated SVG, and can thus +be found in the Canvas.definition() method. These assets are, except where +indicated, embedded in a go file assigned to an exported variable, so that they +may be used like so: + +```go +import ( + "io" + + "gopkg.in/juju/jujusvg.v2/assets" +) + +// ... + +io.WriteString(canvas.Writer, assets.AssetToWrite) +``` + +Current assets in use: + +* ~~The service block~~ *the service block has been deprecated and is now handled with SVGo* +* The relation health indicator diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/svg.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/svg.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/svg.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/svg.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,98 @@ +package jujusvg + +import ( + "io" + + "github.com/juju/xml" + "gopkg.in/errgo.v1" +) + +const svgNamespace = "http://www.w3.org/2000/svg" + +// Process an icon SVG file from a reader, removing anything surrounding +// the tags, which would be invalid in this context (such as +// decls, directives, etc), writing out to a writer. In +// addition, loosely check that the icon is a valid SVG file. The id +// argument provides a unique identifier for the icon SVG so that it can +// be referenced within the bundle diagram. If an id attribute on the SVG +// tag already exists, it will be replaced with this argument. +func processIcon(r io.Reader, w io.Writer, id string) error { + dec := xml.NewDecoder(r) + dec.DefaultSpace = svgNamespace + + enc := xml.NewEncoder(w) + + svgStartFound := false + svgEndFound := false + depth := 0 + for depth < 1 { + tok, err := dec.Token() + if err != nil { + if err == io.EOF { + break + } + return errgo.Notef(err, "cannot get token") + } + tag, ok := tok.(xml.StartElement) + if ok && tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { + svgStartFound = true + depth++ + tag.Attr = setXMLAttr(tag.Attr, xml.Name{ + Local: "id", + }, id) + if err := enc.EncodeToken(tag); err != nil { + return errgo.Notef(err, "cannot encode token %#v", tag) + } + } + } + for depth > 0 { + tok, err := dec.Token() + if err != nil { + if err == io.EOF { + break + } + return errgo.Notef(err, "cannot get token") + } + switch tag := tok.(type) { + case xml.StartElement: + if tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { + depth++ + } + case xml.EndElement: + if tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { + depth-- + if depth == 0 { + svgEndFound = true + } + } + } + if err := enc.EncodeToken(tok); err != nil { + return errgo.Notef(err, "cannot encode token %#v", tok) + } + } + + if !svgStartFound || !svgEndFound { + return errgo.Newf("icon does not appear to be a valid SVG") + } + + if err := enc.Flush(); err != nil { + return err + } + + return nil +} + +// setXMLAttr returns the given attributes with the given attribute name set to +// val, adding an attribute if necessary. +func setXMLAttr(attrs []xml.Attr, name xml.Name, val string) []xml.Attr { + for i := range attrs { + if attrs[i].Name == name { + attrs[i].Value = val + return attrs + } + } + return append(attrs, xml.Attr{ + Name: name, + Value: val, + }) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/svg_test.go juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/svg_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/jujusvg.v2/svg_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/jujusvg.v2/svg_test.go 2016-10-13 14:32:06.000000000 +0000 @@ -0,0 +1,217 @@ +package jujusvg + +import ( + "bytes" + "fmt" + + "github.com/juju/xml" + gc "gopkg.in/check.v1" +) + +type SVGSuite struct{} + +var _ = gc.Suite(&SVGSuite{}) + +func (s *SVGSuite) TestProcessIcon(c *gc.C) { + tests := []struct { + about string + icon string + expected string + err string + }{ + { + about: "Nothing stripped", + icon: ` + + + + `, + expected: ` + + + `, + }, + { + about: "SVG inside an SVG", + icon: ` + + + + + + + `, + expected: ` + + + + + + `, + }, + { + about: "ProcInst at start stripped", + icon: ` + + + + + `, + expected: ` + + + `, + }, + { + about: "Directive at start stripped", + icon: ` + + + + + `, + expected: ` + + + `, + }, + { + about: "ProcInst at end stripped", + icon: ` + + + + + `, + expected: ` + + + `, + }, + { + about: "Directive at end stripped", + icon: ` + + + + + `, + expected: ` + + + `, + }, + { + about: "ProcInsts/Directives inside svg left in place", + icon: ` + + + + + + `, + expected: ` + + + + + `, + }, + { + about: "Not an SVG", + icon: ` + + bad-wolf + + `, + err: "icon does not appear to be a valid SVG", + }, + } + for i, test := range tests { + in := bytes.NewBuffer([]byte(test.icon)) + out := bytes.Buffer{} + err := processIcon(in, &out, fmt.Sprintf("test-%d", i)) + if test.err != "" { + c.Assert(err, gc.ErrorMatches, test.err) + } else { + c.Assert(err, gc.IsNil) + assertXMLEqual(c, out.Bytes(), []byte(test.expected)) + } + } +} + +func (s *SVGSuite) TestSetXMLAttr(c *gc.C) { + // Attribute is added. + expected := []xml.Attr{ + { + Name: xml.Name{ + Local: "id", + }, + Value: "foo", + }, + } + + result := setXMLAttr([]xml.Attr{}, xml.Name{ + Local: "id", + }, "foo") + c.Assert(result, gc.DeepEquals, expected) + + // Attribute is changed. + result = setXMLAttr([]xml.Attr{ + { + Name: xml.Name{ + Local: "id", + }, + Value: "bar", + }, + }, xml.Name{ + Local: "id", + }, "foo") + c.Assert(result, gc.DeepEquals, expected) + + // Attribute is changed, existing attributes unchanged. + expected = []xml.Attr{ + { + Name: xml.Name{ + Local: "class", + }, + Value: "bar", + }, + { + Name: xml.Name{ + Local: "id", + }, + Value: "foo", + }, + } + result = setXMLAttr([]xml.Attr{ + { + Name: xml.Name{ + Local: "class", + }, + Value: "bar", + }, + { + Name: xml.Name{ + Local: "id", + }, + Value: "bar", + }, + }, xml.Name{ + Local: "id", + }, "foo") + c.Assert(result, gc.DeepEquals, expected) + + // Attribute is added, existing attributes unchanged. + result = setXMLAttr([]xml.Attr{ + { + Name: xml.Name{ + Local: "class", + }, + Value: "bar", + }, + }, xml.Name{ + Local: "id", + }, "foo") + c.Assert(result, gc.DeepEquals, expected) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/charm.go juju-core-2.0.0/src/gopkg.in/juju/names.v2/charm.go --- juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/charm.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/names.v2/charm.go 2016-10-13 14:31:55.000000000 +0000 @@ -11,7 +11,11 @@ // CharmTagKind specifies charm tag kind const CharmTagKind = "charm" -// Valid charm url is of the form +// Valid charm url can be either in V1 or V3 format. (V2 is a +// charmstore web URL like https://jujucharms.com/postgresql/105, but +// that's not valid as a tag.) +// +// V1 is of the form: // schema:~user/series/name-revision // where // schema is optional and can be either "local" or "cs". @@ -20,6 +24,10 @@ // series is optional and is a valid series name // name is mandatory and is the name of the charm // revision is optional and can be -1 if revision is unset +// +// V3 is of the form +// schema:user/name/series/revision +// with the same fields and constraints as the V1 format. var ( // SeriesSnippet is a regular expression representing series @@ -28,16 +36,25 @@ // CharmNameSnippet is a regular expression representing charm name CharmNameSnippet = "[a-z][a-z0-9]*(-[a-z0-9]*[a-z][a-z0-9]*)*" - localSchemaSnippet = "local:" - charmStoreSchemaSnippet = "cs:(~" + validUserPart + "/)?" - revisionSnippet = "(-1|0|[1-9][0-9]*)" + localSchemaSnippet = "local:" + v1CharmStoreSchemaSnippet = "cs:(~" + validUserNameSnippet + "/)?" + revisionSnippet = "(-1|0|[1-9][0-9]*)" - validCharmRegEx = regexp.MustCompile("^(" + + validV1CharmRegEx = regexp.MustCompile("^(" + localSchemaSnippet + "|" + - charmStoreSchemaSnippet + ")?(" + + v1CharmStoreSchemaSnippet + ")?(" + SeriesSnippet + "/)?" + CharmNameSnippet + "(-" + revisionSnippet + ")?$") + + v3CharmStoreSchemaSnippet = "(cs:)?(" + validUserNameSnippet + "/)?" + + validV3CharmRegEx = regexp.MustCompile("^(" + + localSchemaSnippet + "|" + + v3CharmStoreSchemaSnippet + ")" + + CharmNameSnippet + "(/" + + SeriesSnippet + ")?(/" + + revisionSnippet + ")?$") ) // CharmTag represents tag for charm @@ -84,5 +101,5 @@ // IsValidCharm returns whether name is a valid charm url. func IsValidCharm(url string) bool { - return validCharmRegEx.MatchString(url) + return validV1CharmRegEx.MatchString(url) || validV3CharmRegEx.MatchString(url) } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/charm_test.go juju-core-2.0.0/src/gopkg.in/juju/names.v2/charm_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/charm_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/names.v2/charm_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -17,6 +17,7 @@ var _ = gc.Suite(&charmSuite{}) var validCharmURLs = []string{"charm", + // V1 charm urls. "local:charm", "local:charm--1", "local:charm-1", @@ -28,6 +29,7 @@ "cs:~user/series/charm", "cs:~user/series/charm-1", "cs:series/charm", + "cs:series/charm-with-long-name", "cs:series/charm-3", "cs:series/charm-0", "cs:charm", @@ -37,6 +39,25 @@ "charm-1", "series/charm", "series/charm-1", + + // V3 charm urls. + "local:charm-with-long2-name/series/2", + "local:charm-with-long2-name/series", + "local:charm-with-long2-name/2", + "cs:user/charm-with-long-name/series/2", + "cs:charm-with-long2-name/series/2", + "cs:user/charm-with-long-name/2", + "cs:user/charm-with-long-name/series", + "cs:charm-with-long-name/2", + "cs:charm-with-long-name/series", + "cs:user/charm-with-long-name", + "user/charm-with-long-name/series/2", + "charm-with-long2-name/series/2", + "user/charm-with-long-name/2", + "user/charm-with-long-name/series", + "charm-with-long-name/2", + "charm-with-long-name/series", + "user/charm-with-long-name", } func (s *charmSuite) TestValidCharmURLs(c *gc.C) { @@ -54,6 +75,7 @@ "local:charm--2", // false: only -1 is a valid negative revision "blah:charm-2", // false: invalid schema "local:series/charm-01", // false: revision is funny + "local:user/name/series/2", // false: local charms can't have users } for _, url := range invalidURLs { c.Logf("Processing tag %q", url) @@ -71,7 +93,6 @@ func (s *charmSuite) TestParseCharmTagInvalid(c *gc.C) { invalidTags := []string{"", "blah", - "charm-blah/0", "charm", "user-blah", } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/cloudcredential.go juju-core-2.0.0/src/gopkg.in/juju/names.v2/cloudcredential.go --- juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/cloudcredential.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/names.v2/cloudcredential.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,107 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package names + +import ( + "fmt" + "net/url" + "regexp" + "strings" +) + +const CloudCredentialTagKind = "cloudcred" + +var ( + cloudCredentialNameSnippet = "[a-zA-Z][a-zA-Z0-9.@_-]*" + validCloudCredentialName = regexp.MustCompile("^" + cloudCredentialNameSnippet + "$") + validCloudCredential = regexp.MustCompile( + "^" + + "(" + cloudSnippet + ")" + + "/(" + validUserSnippet + ")" + // credential owner + "/(" + cloudCredentialNameSnippet + ")" + + "$", + ) +) + +type CloudCredentialTag struct { + cloud CloudTag + owner UserTag + name string +} + +// Kind is part of the Tag interface. +func (t CloudCredentialTag) Kind() string { return CloudCredentialTagKind } + +// Id is part of the Tag interface. +func (t CloudCredentialTag) Id() string { + return fmt.Sprintf("%s/%s/%s", t.cloud.Id(), t.owner.Id(), t.name) +} + +func quoteCredentialSeparator(in string) string { + return strings.Replace(in, "_", `%5f`, -1) +} + +// String is part of the Tag interface. +func (t CloudCredentialTag) String() string { + return fmt.Sprintf("%s-%s_%s_%s", t.Kind(), + quoteCredentialSeparator(t.cloud.Id()), + quoteCredentialSeparator(t.owner.Id()), + quoteCredentialSeparator(t.name)) +} + +// Cloud returns the tag of the cloud to which the credential pertains. +func (t CloudCredentialTag) Cloud() CloudTag { + return t.cloud +} + +// Owner returns the tag of the user that owns the credential. +func (t CloudCredentialTag) Owner() UserTag { + return t.owner +} + +// Name returns the cloud credential name, excluding the +// cloud and owner qualifiers. +func (t CloudCredentialTag) Name() string { + return t.name +} + +// NewCloudCredentialTag returns the tag for the cloud with the given ID. +// It will panic if the given cloud ID is not valid. +func NewCloudCredentialTag(id string) CloudCredentialTag { + parts := validCloudCredential.FindStringSubmatch(id) + if len(parts) != 4 { + panic(fmt.Sprintf("%q is not a valid cloud credential ID", id)) + } + cloud := NewCloudTag(parts[1]) + owner := NewUserTag(parts[2]) + return CloudCredentialTag{cloud, owner, parts[3]} +} + +// ParseCloudCredentialTag parses a cloud tag string. +func ParseCloudCredentialTag(s string) (CloudCredentialTag, error) { + tag, err := ParseTag(s) + if err != nil { + return CloudCredentialTag{}, err + } + dt, ok := tag.(CloudCredentialTag) + if !ok { + return CloudCredentialTag{}, invalidTagError(s, CloudCredentialTagKind) + } + return dt, nil +} + +// IsValidCloudCredential returns whether id is a valid cloud credential ID. +func IsValidCloudCredential(id string) bool { + return validCloudCredential.MatchString(id) +} + +// IsValidCloudCredentialName returns whether name is a valid cloud credential name. +func IsValidCloudCredentialName(name string) bool { + return validCloudCredentialName.MatchString(name) +} + +func cloudCredentialTagSuffixToId(s string) (string, error) { + s = strings.Replace(s, "_", "/", -1) + return url.QueryUnescape(s) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/cloudcredential_test.go juju-core-2.0.0/src/gopkg.in/juju/names.v2/cloudcredential_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/cloudcredential_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/names.v2/cloudcredential_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -0,0 +1,132 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package names_test + +import ( + gc "gopkg.in/check.v1" + + "gopkg.in/juju/names.v2" +) + +type cloudCredentialSuite struct{} + +var _ = gc.Suite(&cloudCredentialSuite{}) + +func (s *cloudCredentialSuite) TestCloudCredentialTag(c *gc.C) { + for i, t := range []struct { + input string + string string + cloud names.CloudTag + owner names.UserTag + name string + }{ + { + input: "aws/bob/foo", + string: "cloudcred-aws_bob_foo", + cloud: names.NewCloudTag("aws"), + owner: names.NewUserTag("bob"), + name: "foo", + }, { + input: "aws/bob@remote/foo", + string: "cloudcred-aws_bob@remote_foo", + cloud: names.NewCloudTag("aws"), + owner: names.NewUserTag("bob@remote"), + name: "foo", + }, { + input: "aws/bob@remote/foo@somewhere.com", + string: "cloudcred-aws_bob@remote_foo@somewhere.com", + cloud: names.NewCloudTag("aws"), + owner: names.NewUserTag("bob@remote"), + name: "foo@somewhere.com", + }, { + input: "aws/bob@remote/foo_bar", + string: `cloudcred-aws_bob@remote_foo%5fbar`, + cloud: names.NewCloudTag("aws"), + owner: names.NewUserTag("bob@remote"), + name: "foo_bar", + }, + } { + c.Logf("test %d: %s", i, t.input) + cloudTag := names.NewCloudCredentialTag(t.input) + c.Check(cloudTag.String(), gc.Equals, t.string) + c.Check(cloudTag.Id(), gc.Equals, t.input) + c.Check(cloudTag.Cloud(), gc.Equals, t.cloud) + c.Check(cloudTag.Owner(), gc.Equals, t.owner) + c.Check(cloudTag.Name(), gc.Equals, t.name) + } +} + +func (s *cloudCredentialSuite) TestIsValidCloudCredential(c *gc.C) { + for i, t := range []struct { + string string + expect bool + }{ + {"", false}, + {"aws/bob/foo", true}, + {"aws/bob@local/foo", true}, + {"/bob/foo", false}, + {"aws//foo", false}, + {"aws/bob/", false}, + } { + c.Logf("test %d: %s", i, t.string) + c.Assert(names.IsValidCloudCredential(t.string), gc.Equals, t.expect, gc.Commentf("%s", t.string)) + } +} + +func (s *cloudCredentialSuite) TestIsValidCloudCredentialName(c *gc.C) { + for i, t := range []struct { + string string + expect bool + }{ + {"", false}, + {"foo", true}, + {"f00b4r", true}, + {"foo-bar", true}, + {"foo@bar", true}, + {"foo_bar", true}, + {"123", false}, + {"0foo", false}, + } { + c.Logf("test %d: %s", i, t.string) + c.Check(names.IsValidCloudCredentialName(t.string), gc.Equals, t.expect, gc.Commentf("%s", t.string)) + } +} + +func (s *cloudCredentialSuite) TestParseCloudCredentialTag(c *gc.C) { + for i, t := range []struct { + tag string + expected names.Tag + err error + }{{ + tag: "", + err: names.InvalidTagError("", ""), + }, { + tag: "cloudcred-aws_bob_foo", + expected: names.NewCloudCredentialTag("aws/bob/foo"), + }, { + tag: "cloudcred-aws-china_bob_foo-manchu", + expected: names.NewCloudCredentialTag("aws-china/bob/foo-manchu"), + }, { + tag: "cloudcred-aws-china_bob_foo@somewhere.com", + expected: names.NewCloudCredentialTag("aws-china/bob/foo@somewhere.com"), + }, { + tag: `cloudcred-aws-china_bob_foo%5fbar`, + expected: names.NewCloudCredentialTag("aws-china/bob/foo_bar"), + }, { + tag: "foo", + err: names.InvalidTagError("foo", ""), + }, { + tag: "unit-aws", + err: names.InvalidTagError("unit-aws", names.UnitTagKind), // not a valid unit name either + }} { + c.Logf("test %d: %s", i, t.tag) + got, err := names.ParseCloudCredentialTag(t.tag) + if err != nil || t.err != nil { + c.Check(err, gc.DeepEquals, t.err) + continue + } + c.Check(got, gc.FitsTypeOf, t.expected) + c.Check(got, gc.Equals, t.expected) + } +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/equality_test.go juju-core-2.0.0/src/gopkg.in/juju/names.v2/equality_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/equality_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/names.v2/equality_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -19,7 +19,7 @@ {NewRelationTag("wordpress:haproxy"), RelationTag{key: "wordpress.haproxy"}}, {NewEnvironTag("deadbeef-0123-4567-89ab-feedfacebeef"), EnvironTag{uuid: "deadbeef-0123-4567-89ab-feedfacebeef"}}, {NewUserTag("admin"), UserTag{name: "admin"}}, - {NewUserTag("admin@local"), UserTag{name: "admin", domain: "local"}}, + {NewUserTag("admin@local"), UserTag{name: "admin", domain: ""}}, {NewUserTag("admin@foobar"), UserTag{name: "admin", domain: "foobar"}}, {NewActionTag("01234567-aaaa-4bbb-8ccc-012345678901"), ActionTag{ID: stringToUUID("01234567-aaaa-4bbb-8ccc-012345678901")}}, } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/tag.go juju-core-2.0.0/src/gopkg.in/juju/names.v2/tag.go --- juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/tag.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/names.v2/tag.go 2016-10-13 14:31:55.000000000 +0000 @@ -7,6 +7,7 @@ "fmt" "strings" + "github.com/juju/errors" "github.com/juju/utils" ) @@ -63,7 +64,7 @@ case UnitTagKind, MachineTagKind, ApplicationTagKind, EnvironTagKind, UserTagKind, RelationTagKind, ActionTagKind, VolumeTagKind, CharmTagKind, StorageTagKind, FilesystemTagKind, IPAddressTagKind, SpaceTagKind, SubnetTagKind, - PayloadTagKind, ModelTagKind, ControllerTagKind, CloudTagKind: + PayloadTagKind, ModelTagKind, ControllerTagKind, CloudTagKind, CloudCredentialTagKind: return true } return false @@ -182,6 +183,15 @@ return nil, invalidTagError(tag, kind) } return NewCloudTag(id), nil + case CloudCredentialTagKind: + id, err = cloudCredentialTagSuffixToId(id) + if err != nil { + return nil, errors.Wrap(err, invalidTagError(tag, kind)) + } + if !IsValidCloudCredential(id) { + return nil, invalidTagError(tag, kind) + } + return NewCloudCredentialTag(id), nil default: return nil, invalidTagError(tag, "") } diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/tag_test.go juju-core-2.0.0/src/gopkg.in/juju/names.v2/tag_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/tag_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/names.v2/tag_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -43,6 +43,8 @@ {tag: "space-42", kind: names.SpaceTagKind}, {tag: "cloud", err: `"cloud" is not a valid tag`}, {tag: "cloud-aws", kind: names.CloudTagKind}, + {tag: "cloudcred", err: `"cloudcred" is not a valid tag`}, + {tag: "cloudcred-aws_admin_foo", kind: names.CloudCredentialTagKind}, } func (*tagSuite) TestTagKind(c *gc.C) { @@ -141,10 +143,10 @@ expectType: names.UserTag{}, resultId: "foo", }, { - tag: "user-foo@local", + tag: "user-foo@remote", expectKind: names.UserTagKind, expectType: names.UserTag{}, - resultId: "foo@local", + resultId: "foo@remote", }, { tag: "user-/", expectKind: names.UserTagKind, @@ -212,23 +214,35 @@ expectKind: names.SpaceTagKind, expectType: names.SpaceTag{}, resultId: "myspace1", +}, { + tag: "cloud-aws", + expectKind: names.CloudTagKind, + expectType: names.CloudTag{}, + resultId: "aws", +}, { + tag: "cloudcred-aws_admin_foo%5fbar", + expectKind: names.CloudCredentialTagKind, + expectType: names.CloudCredentialTag{}, + resultId: "aws/admin/foo_bar", }} var makeTag = map[string]func(string) names.Tag{ - names.MachineTagKind: func(tag string) names.Tag { return names.NewMachineTag(tag) }, - names.UnitTagKind: func(tag string) names.Tag { return names.NewUnitTag(tag) }, - names.ApplicationTagKind: func(tag string) names.Tag { return names.NewApplicationTag(tag) }, - names.RelationTagKind: func(tag string) names.Tag { return names.NewRelationTag(tag) }, - names.EnvironTagKind: func(tag string) names.Tag { return names.NewEnvironTag(tag) }, - names.ModelTagKind: func(tag string) names.Tag { return names.NewModelTag(tag) }, - names.UserTagKind: func(tag string) names.Tag { return names.NewUserTag(tag) }, - names.ActionTagKind: func(tag string) names.Tag { return names.NewActionTag(tag) }, - names.VolumeTagKind: func(tag string) names.Tag { return names.NewVolumeTag(tag) }, - names.FilesystemTagKind: func(tag string) names.Tag { return names.NewFilesystemTag(tag) }, - names.StorageTagKind: func(tag string) names.Tag { return names.NewStorageTag(tag) }, - names.IPAddressTagKind: func(tag string) names.Tag { return names.NewIPAddressTag(tag) }, - names.SubnetTagKind: func(tag string) names.Tag { return names.NewSubnetTag(tag) }, - names.SpaceTagKind: func(tag string) names.Tag { return names.NewSpaceTag(tag) }, + names.MachineTagKind: func(tag string) names.Tag { return names.NewMachineTag(tag) }, + names.UnitTagKind: func(tag string) names.Tag { return names.NewUnitTag(tag) }, + names.ApplicationTagKind: func(tag string) names.Tag { return names.NewApplicationTag(tag) }, + names.RelationTagKind: func(tag string) names.Tag { return names.NewRelationTag(tag) }, + names.EnvironTagKind: func(tag string) names.Tag { return names.NewEnvironTag(tag) }, + names.ModelTagKind: func(tag string) names.Tag { return names.NewModelTag(tag) }, + names.UserTagKind: func(tag string) names.Tag { return names.NewUserTag(tag) }, + names.ActionTagKind: func(tag string) names.Tag { return names.NewActionTag(tag) }, + names.VolumeTagKind: func(tag string) names.Tag { return names.NewVolumeTag(tag) }, + names.FilesystemTagKind: func(tag string) names.Tag { return names.NewFilesystemTag(tag) }, + names.StorageTagKind: func(tag string) names.Tag { return names.NewStorageTag(tag) }, + names.IPAddressTagKind: func(tag string) names.Tag { return names.NewIPAddressTag(tag) }, + names.SubnetTagKind: func(tag string) names.Tag { return names.NewSubnetTag(tag) }, + names.SpaceTagKind: func(tag string) names.Tag { return names.NewSpaceTag(tag) }, + names.CloudTagKind: func(tag string) names.Tag { return names.NewCloudTag(tag) }, + names.CloudCredentialTagKind: func(tag string) names.Tag { return names.NewCloudCredentialTag(tag) }, } func (*tagSuite) TestParseTag(c *gc.C) { diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/user.go juju-core-2.0.0/src/gopkg.in/juju/names.v2/user.go --- juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/user.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/names.v2/user.go 2016-10-13 14:31:55.000000000 +0000 @@ -17,9 +17,10 @@ // TODO this does not allow single character usernames or // domains. Is that deliberate? // https://github.com/juju/names/issues/54 - validUserPart = "[a-zA-Z0-9][a-zA-Z0-9.+-]*[a-zA-Z0-9]" - validName = regexp.MustCompile(fmt.Sprintf("^(?P%s)(?:@(?P%s))?$", validUserPart, validUserPart)) - validUserName = regexp.MustCompile("^" + validUserPart + "$") + validUserNameSnippet = "[a-zA-Z0-9][a-zA-Z0-9.+-]*[a-zA-Z0-9]" + validUserSnippet = fmt.Sprintf("(?:%s(?:@%s)?)", validUserNameSnippet, validUserNameSnippet) + validName = regexp.MustCompile(fmt.Sprintf("^(?P%s)(?:@(?P%s))?$", validUserNameSnippet, validUserNameSnippet)) + validUserName = regexp.MustCompile("^" + validUserNameSnippet + "$") ) // IsValidUser returns whether id is a valid user id. @@ -54,12 +55,10 @@ func (t UserTag) Kind() string { return UserTagKind } func (t UserTag) String() string { return UserTagKind + "-" + t.Id() } -// Id implements Tag.Id. It always returns the same id that it was -// created with, so NewUserTag(x).Id() == x for all valid users x. This -// means that local users might or might not have an @local domain in -// their id. +// Id implements Tag.Id. Local users will always have +// an Id value without any domain. func (t UserTag) Id() string { - if t.domain == "" { + if t.domain == "" || t.domain == LocalUserDomain { return t.name } return t.name + "@" + t.domain @@ -69,25 +68,15 @@ // without its associated domain. func (t UserTag) Name() string { return t.name } -// Canonical returns the user name and its domain in canonical form. -// Specifically, user tags in the local domain will always return an -// @local prefix, regardless of the id the user was created with. This -// is the only difference from the Id method. -func (t UserTag) Canonical() string { - return t.name + "@" + t.Domain() -} - // IsLocal returns true if the tag represents a local user. +// Users without an explicit domain are considered local. func (t UserTag) IsLocal() bool { - return t.Domain() == LocalUserDomain + return t.Domain() == LocalUserDomain || t.Domain() == "" } // Domain returns the user domain. Users in the local database // are from the LocalDomain. Other users are considered 'remote' users. func (t UserTag) Domain() string { - if t.domain == "" { - return LocalUserDomain - } return t.domain } @@ -112,7 +101,11 @@ if len(parts) != 3 { panic(fmt.Sprintf("invalid user tag %q", userName)) } - return UserTag{name: parts[1], domain: parts[2]} + domain := parts[2] + if domain == LocalUserDomain { + domain = "" + } + return UserTag{name: parts[1], domain: domain} } // NewLocalUserTag returns the tag for a local user with the given name. @@ -120,7 +113,7 @@ if !IsValidUserName(name) { panic(fmt.Sprintf("invalid user name %q", name)) } - return UserTag{name: name, domain: LocalUserDomain} + return UserTag{name: name} } // ParseUserTag parses a user tag string. diff -Nru juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/user_test.go juju-core-2.0.0/src/gopkg.in/juju/names.v2/user_test.go --- juju-core-2.0~beta15/src/gopkg.in/juju/names.v2/user_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/juju/names.v2/user_test.go 2016-10-13 14:31:55.000000000 +0000 @@ -27,14 +27,12 @@ input: "bob", string: "user-bob", name: "bob", - domain: names.LocalUserDomain, - username: "bob@local", + username: "bob", }, { input: "bob@local", - string: "user-bob@local", + string: "user-bob", name: "bob", - domain: names.LocalUserDomain, - username: "bob@local", + username: "bob", }, { input: "bob@foo", string: "user-bob@foo", @@ -46,11 +44,10 @@ c.Logf("test %d: %s", i, t.input) userTag := names.NewUserTag(t.input) c.Check(userTag.String(), gc.Equals, t.string) - c.Check(userTag.Id(), gc.Equals, t.input) + c.Check(userTag.Id(), gc.Equals, t.username) c.Check(userTag.Name(), gc.Equals, t.name) c.Check(userTag.Domain(), gc.Equals, t.domain) - c.Check(userTag.IsLocal(), gc.Equals, t.domain == names.LocalUserDomain) - c.Check(userTag.Canonical(), gc.Equals, t.username) + c.Check(userTag.IsLocal(), gc.Equals, t.domain == "") } } @@ -61,7 +58,7 @@ }{{ id: "bob", domain: names.LocalUserDomain, - expectId: "bob@local", + expectId: "bob", }, { id: "bob@local", domain: "foo", @@ -72,7 +69,7 @@ }, { id: "bob@foo", domain: names.LocalUserDomain, - expectId: "bob@local", + expectId: "bob", }, { id: "bob", domain: "@foo", @@ -229,11 +226,10 @@ func (s *userSuite) TestNewLocalUserTag(c *gc.C) { user := names.NewLocalUserTag("bob") - c.Assert(user.Canonical(), gc.Equals, "bob@local") c.Assert(user.Name(), gc.Equals, "bob") - c.Assert(user.Domain(), gc.Equals, "local") + c.Assert(user.Domain(), gc.Equals, "") c.Assert(user.IsLocal(), gc.Equals, true) - c.Assert(user.String(), gc.Equals, "user-bob@local") + c.Assert(user.String(), gc.Equals, "user-bob") c.Assert(func() { names.NewLocalUserTag("bob@local") }, gc.PanicMatches, `invalid user name "bob@local"`) c.Assert(func() { names.NewLocalUserTag("") }, gc.PanicMatches, `invalid user name ""`) diff -Nru juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/codec.go juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/codec.go --- juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/codec.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/codec.go 2016-10-13 14:32:13.000000000 +0000 @@ -4,12 +4,20 @@ "bytes" "crypto/rand" "encoding/base64" + "encoding/binary" "encoding/json" - "fmt" "golang.org/x/crypto/nacl/box" + + "gopkg.in/errgo.v1" ) +type caveatInfo struct { + peerPublicKey *PublicKey + rootKey []byte + condition string +} + type caveatIdRecord struct { RootKey []byte Condition string @@ -23,109 +31,189 @@ Id string } -// boxEncoder encodes caveat ids confidentially to a third-party service using -// authenticated public key encryption compatible with NaCl box. -type boxEncoder struct { - key *KeyPair -} - -// newBoxEncoder creates a new boxEncoder that uses the given public key pair. -func newBoxEncoder(key *KeyPair) *boxEncoder { - return &boxEncoder{ - key: key, - } -} - -func (enc *boxEncoder) encodeCaveatId(condition string, rootKey []byte, thirdPartyPub *PublicKey) (string, error) { - id, err := enc.newCaveatId(condition, rootKey, thirdPartyPub) - if err != nil { - return "", err - } - data, err := json.Marshal(id) - if err != nil { - return "", fmt.Errorf("cannot marshal %#v: %v", id, err) - } - return base64.StdEncoding.EncodeToString(data), nil -} - -func (enc *boxEncoder) newCaveatId(condition string, rootKey []byte, thirdPartyPub *PublicKey) (*caveatId, error) { +// encodeJSONCaveatId creates a JSON encoded third-party caveat. +func encodeJSONCaveatId(key *KeyPair, ci caveatInfo) ([]byte, error) { var nonce [NonceLen]byte if _, err := rand.Read(nonce[:]); err != nil { - return nil, fmt.Errorf("cannot generate random number for nonce: %v", err) + return nil, errgo.Notef(err, "cannot generate random number for nonce") } plain := caveatIdRecord{ - RootKey: rootKey, - Condition: condition, + RootKey: ci.rootKey, + Condition: ci.condition, } plainData, err := json.Marshal(&plain) if err != nil { - return nil, fmt.Errorf("cannot marshal %#v: %v", &plain, err) + return nil, errgo.Notef(err, "cannot marshal %#v", &plain) } - sealed := box.Seal(nil, plainData, &nonce, thirdPartyPub.boxKey(), enc.key.Private.boxKey()) - return &caveatId{ - ThirdPartyPublicKey: thirdPartyPub, - FirstPartyPublicKey: &enc.key.Public, + sealed := box.Seal(nil, plainData, &nonce, ci.peerPublicKey.boxKey(), key.Private.boxKey()) + id := caveatId{ + ThirdPartyPublicKey: ci.peerPublicKey, + FirstPartyPublicKey: &key.Public, Nonce: nonce[:], Id: base64.StdEncoding.EncodeToString(sealed), - }, nil + } + data, err := json.Marshal(id) + if err != nil { + return nil, errgo.Notef(err, "cannot marshal %#v", id) + } + buf := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(buf, data) + return buf, nil } -// boxDecoder decodes caveat ids for third-party service that were encoded to -// the third-party with authenticated public key encryption compatible with -// NaCl box. -type boxDecoder struct { - key *KeyPair -} +const ( + publicKeyPrefixLen = 4 +) -// newBoxDecoder creates a new BoxDecoder using the given key pair. -func newBoxDecoder(key *KeyPair) *boxDecoder { - return &boxDecoder{ - key: key, +// encodeCaveatIdV0 creates a version 0 third-party caveat. +// +// The v0 format has the following packed binary fields: +// version 0 [1 byte] +// first 4 bytes of third-party Curve25519 public key [4 bytes] +// first-party Curve25519 public key [32 bytes] +// nonce [24 bytes] +// encrypted secret part [rest of message] +func encodeCaveatIdV0(key *KeyPair, ci caveatInfo) ([]byte, error) { + var nonce [NonceLen]byte + if _, err := rand.Read(nonce[:]); err != nil { + return nil, errgo.Notef(err, "cannot generate random number for nonce") + } + data := make([]byte, 0, 1+publicKeyPrefixLen+KeyLen+NonceLen+1+binary.MaxVarintLen64+len(ci.rootKey)+len(ci.condition)+box.Overhead) + data = append(data, 0) //version + data = append(data, ci.peerPublicKey.Key[:publicKeyPrefixLen]...) + data = append(data, key.Public.Key[:]...) + data = append(data, nonce[:]...) + data = box.Seal(data, encodeSecretPartV0(ci), &nonce, ci.peerPublicKey.boxKey(), key.Private.boxKey()) + return data, nil +} + +// encodeSecretPartV0 creates a version 0 secret part of the third party +// caveat. The generated secret part is not encrypted. +// +// The v0 format has the following packed binary fields: +// version 0 [1 byte] +// root key [24 bytes] +// predicate [rest of message] +func encodeSecretPartV0(ci caveatInfo) []byte { + data := make([]byte, 0, 1+binary.MaxVarintLen64+len(ci.rootKey)+len(ci.condition)) + data = append(data, 0) // version + n := binary.PutUvarint(data[1:1+binary.MaxVarintLen64], uint64(len(ci.rootKey))) + data = data[0 : len(data)+n] + data = append(data, ci.rootKey...) + data = append(data, ci.condition...) + return data +} + +// decodeCaveatId attempts to decode id decrypting the encrypted part +// using key. +func decodeCaveatId(key *KeyPair, id []byte) (caveatInfo, error) { + if len(id) == 0 { + return caveatInfo{}, errgo.New("caveat id empty") + } + switch id[0] { + case 0: + return decodeCaveatIdV0(key, []byte(id)) + case 'e': + // 'e' will be the first byte if the caveatid is a base64 encoded JSON object. + return decodeJSONCaveatId(key, id) + default: + return caveatInfo{}, errgo.Newf("caveat id has unsupported version %d", id[0]) } } -func (d *boxDecoder) decodeCaveatId(id string) (rootKey []byte, condition string, err error) { - data, err := base64.StdEncoding.DecodeString(id) +// decodeJSONCaveatId attempts to decode a base64 encoded JSON id. This +// encoding is nominally version -1. +func decodeJSONCaveatId(key *KeyPair, id []byte) (caveatInfo, error) { + data := make([]byte, (3*len(id)+3)/4) + n, err := base64.StdEncoding.Decode(data, id) if err != nil { - return nil, "", fmt.Errorf("cannot base64-decode caveat id: %v", err) + return caveatInfo{}, errgo.Notef(err, "cannot base64-decode caveat id") } + data = data[:n] var tpid caveatId if err := json.Unmarshal(data, &tpid); err != nil { - return nil, "", fmt.Errorf("cannot unmarshal caveat id %q: %v", data, err) + return caveatInfo{}, errgo.Notef(err, "cannot unmarshal caveat id %q", data) } - var recordData []byte - - recordData, err = d.encryptedCaveatId(tpid) + if !bytes.Equal(key.Public.Key[:], tpid.ThirdPartyPublicKey.Key[:]) { + return caveatInfo{}, errgo.New("public key mismatch") + } + if tpid.FirstPartyPublicKey == nil { + return caveatInfo{}, errgo.New("target service public key not specified") + } + // The encrypted string is base64 encoded in the JSON representation. + secret, err := base64.StdEncoding.DecodeString(tpid.Id) if err != nil { - return nil, "", err + return caveatInfo{}, errgo.Notef(err, "cannot base64-decode encrypted data") + } + var nonce [NonceLen]byte + if copy(nonce[:], tpid.Nonce) < NonceLen { + return caveatInfo{}, errgo.Newf("nonce too short %x", tpid.Nonce) + } + cid, ok := box.Open(nil, secret, &nonce, tpid.FirstPartyPublicKey.boxKey(), key.Private.boxKey()) + if !ok { + return caveatInfo{}, errgo.Newf("cannot decrypt caveat id %#v", tpid) } var record caveatIdRecord - if err := json.Unmarshal(recordData, &record); err != nil { - return nil, "", fmt.Errorf("cannot decode third party caveat record: %v", err) + if err := json.Unmarshal(cid, &record); err != nil { + return caveatInfo{}, errgo.Notef(err, "cannot decode third party caveat record") } - return record.RootKey, record.Condition, nil + return caveatInfo{ + peerPublicKey: tpid.FirstPartyPublicKey, + rootKey: record.RootKey, + condition: record.Condition, + }, nil } -func (d *boxDecoder) encryptedCaveatId(id caveatId) ([]byte, error) { - if d.key == nil { - return nil, fmt.Errorf("no public key for caveat id decryption") +// decodeCaveatIdV0 decodes a version 0 caveat id. +func decodeCaveatIdV0(key *KeyPair, id []byte) (caveatInfo, error) { + if len(id) < 1+publicKeyPrefixLen+KeyLen+NonceLen+box.Overhead { + return caveatInfo{}, errgo.New("caveat id too short") } - if !bytes.Equal(d.key.Public.Key[:], id.ThirdPartyPublicKey.Key[:]) { - return nil, fmt.Errorf("public key mismatch") + id = id[1:] // skip version (already checked) + + publicKeyPrefix, id := id[:publicKeyPrefixLen], id[publicKeyPrefixLen:] + if !bytes.Equal(key.Public.Key[:publicKeyPrefixLen], publicKeyPrefix) { + return caveatInfo{}, errgo.New("public key mismatch") } + + var peerPublicKey PublicKey + copy(peerPublicKey.Key[:], id[:KeyLen]) + id = id[KeyLen:] + var nonce [NonceLen]byte - if len(id.Nonce) != len(nonce) { - return nil, fmt.Errorf("bad nonce length") - } - copy(nonce[:], id.Nonce) + copy(nonce[:], id[:NonceLen]) + id = id[NonceLen:] - sealed, err := base64.StdEncoding.DecodeString(id.Id) + data, ok := box.Open(nil, id, &nonce, peerPublicKey.boxKey(), key.Private.boxKey()) + if !ok { + return caveatInfo{}, errgo.Newf("cannot decrypt caveat id") + } + ci, err := decodeSecretPartV0(data) if err != nil { - return nil, fmt.Errorf("cannot base64-decode encrypted caveat id: %v", err) + return caveatInfo{}, errgo.Notef(err, "invalid secret part") } - out, ok := box.Open(nil, sealed, &nonce, id.FirstPartyPublicKey.boxKey(), d.key.Private.boxKey()) - if !ok { - return nil, fmt.Errorf("decryption of public-key encrypted caveat id %#v failed", id) + ci.peerPublicKey = &peerPublicKey + return ci, nil +} + +func decodeSecretPartV0(data []byte) (caveatInfo, error) { + if len(data) < 1 { + return caveatInfo{}, errgo.New("secret part too short") } - return out, nil + + version, data := data[0], data[1:] + if version != 0 { + return caveatInfo{}, errgo.Newf("unsupported secret part version %d", version) + } + + l, n := binary.Uvarint(data) + if n <= 0 || uint64(n)+l > uint64(len(data)) { + return caveatInfo{}, errgo.Newf("invalid root key length") + } + data = data[n:] + + return caveatInfo{ + rootKey: data[:l], + condition: string(data[l:]), + }, nil } diff -Nru juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/codec_test.go juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/codec_test.go --- juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/codec_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/codec_test.go 2016-10-13 14:32:13.000000000 +0000 @@ -0,0 +1,158 @@ +package bakery + +import ( + "bytes" + + "golang.org/x/crypto/nacl/box" + + gc "gopkg.in/check.v1" +) + +type codecSuite struct { + firstPartyKey *KeyPair + thirdPartyKey *KeyPair +} + +var _ = gc.Suite(&codecSuite{}) + +func (s *codecSuite) SetUpTest(c *gc.C) { + var err error + s.firstPartyKey, err = GenerateKey() + c.Assert(err, gc.IsNil) + s.thirdPartyKey, err = GenerateKey() + c.Assert(err, gc.IsNil) +} + +func (s *codecSuite) TestJSONRoundTrip(c *gc.C) { + cid, err := encodeJSONCaveatId(s.firstPartyKey, caveatInfo{ + peerPublicKey: &s.thirdPartyKey.Public, + rootKey: []byte("a random string"), + condition: "is-authenticated-user", + }) + c.Assert(err, gc.IsNil) + + res, err := decodeCaveatId(s.thirdPartyKey, cid) + c.Assert(err, gc.IsNil) + c.Assert(res.peerPublicKey, gc.DeepEquals, &s.firstPartyKey.Public) + c.Assert(res.rootKey, gc.DeepEquals, []byte("a random string")) + c.Assert(res.condition, gc.Equals, "is-authenticated-user") +} + +func (s *codecSuite) TestV0RoundTrip(c *gc.C) { + cid, err := encodeCaveatIdV0(s.firstPartyKey, caveatInfo{ + peerPublicKey: &s.thirdPartyKey.Public, + rootKey: []byte("a random string"), + condition: "is-authenticated-user", + }) + c.Assert(err, gc.IsNil) + + res, err := decodeCaveatId(s.thirdPartyKey, cid) + c.Assert(err, gc.IsNil) + c.Assert(res.peerPublicKey, gc.DeepEquals, &s.firstPartyKey.Public) + c.Assert(res.rootKey, gc.DeepEquals, []byte("a random string")) + c.Assert(res.condition, gc.Equals, "is-authenticated-user") +} + +func (s *codecSuite) TestEmptyCaveatId(c *gc.C) { + _, err := decodeCaveatId(s.thirdPartyKey, []byte{}) + c.Assert(err, gc.ErrorMatches, "caveat id empty") +} + +func (s *codecSuite) TestCaveatIdBadVersion(c *gc.C) { + _, err := decodeCaveatId(s.thirdPartyKey, []byte{1}) + c.Assert(err, gc.ErrorMatches, "caveat id has unsupported version 1") +} + +func (s *codecSuite) TestV0TooShort(c *gc.C) { + _, err := decodeCaveatId(s.thirdPartyKey, []byte{0}) + c.Assert(err, gc.ErrorMatches, "caveat id too short") +} + +func (s *codecSuite) TestV0BadKey(c *gc.C) { + cid, err := encodeCaveatIdV0(s.firstPartyKey, caveatInfo{ + peerPublicKey: &s.thirdPartyKey.Public, + rootKey: []byte("a random string"), + condition: "is-authenticated-user", + }) + c.Assert(err, gc.IsNil) + cid[1] ^= 1 + + _, err = decodeCaveatId(s.thirdPartyKey, cid) + c.Assert(err, gc.ErrorMatches, "public key mismatch") +} + +func (s *codecSuite) TestV0DecryptionError(c *gc.C) { + cid, err := encodeCaveatIdV0(s.firstPartyKey, caveatInfo{ + peerPublicKey: &s.thirdPartyKey.Public, + rootKey: []byte("a random string"), + condition: "is-authenticated-user", + }) + c.Assert(err, gc.IsNil) + cid[5] ^= 1 + + _, err = decodeCaveatId(s.thirdPartyKey, cid) + c.Assert(err, gc.ErrorMatches, "cannot decrypt caveat id") +} + +func (s *codecSuite) TestV0EmptySecretPart(c *gc.C) { + cid, err := encodeCaveatIdV0(s.firstPartyKey, caveatInfo{ + peerPublicKey: &s.thirdPartyKey.Public, + rootKey: []byte("a random string"), + condition: "is-authenticated-user", + }) + c.Assert(err, gc.IsNil) + cid = s.replaceV0SecretPart(cid, []byte{}) + + _, err = decodeCaveatId(s.thirdPartyKey, cid) + c.Assert(err, gc.ErrorMatches, "invalid secret part: secret part too short") +} + +func (s *codecSuite) TestV0BadSecretPartVersion(c *gc.C) { + cid, err := encodeCaveatIdV0(s.firstPartyKey, caveatInfo{ + peerPublicKey: &s.thirdPartyKey.Public, + rootKey: []byte("a random string"), + condition: "is-authenticated-user", + }) + c.Assert(err, gc.IsNil) + cid = s.replaceV0SecretPart(cid, []byte{1}) + + _, err = decodeCaveatId(s.thirdPartyKey, cid) + c.Assert(err, gc.ErrorMatches, "invalid secret part: unsupported secret part version 1") +} + +func (s *codecSuite) TestV0EmptyRootKey(c *gc.C) { + cid, err := encodeCaveatIdV0(s.firstPartyKey, caveatInfo{ + peerPublicKey: &s.thirdPartyKey.Public, + rootKey: []byte{}, + condition: "is-authenticated-user", + }) + c.Assert(err, gc.IsNil) + + res, err := decodeCaveatId(s.thirdPartyKey, cid) + c.Assert(err, gc.IsNil) + c.Assert(res.peerPublicKey, gc.DeepEquals, &s.firstPartyKey.Public) + c.Assert(res.rootKey, gc.DeepEquals, []byte{}) + c.Assert(res.condition, gc.Equals, "is-authenticated-user") +} + +func (s *codecSuite) TestV0LongRootKey(c *gc.C) { + cid, err := encodeCaveatIdV0(s.firstPartyKey, caveatInfo{ + peerPublicKey: &s.thirdPartyKey.Public, + rootKey: bytes.Repeat([]byte{0}, 65536), + condition: "is-authenticated-user", + }) + c.Assert(err, gc.IsNil) + + res, err := decodeCaveatId(s.thirdPartyKey, cid) + c.Assert(err, gc.IsNil) + c.Assert(res.peerPublicKey, gc.DeepEquals, &s.firstPartyKey.Public) + c.Assert(res.rootKey, gc.DeepEquals, bytes.Repeat([]byte{0}, 65536)) + c.Assert(res.condition, gc.Equals, "is-authenticated-user") +} + +func (s *codecSuite) replaceV0SecretPart(cid, replacement []byte) []byte { + cid = cid[:1+publicKeyPrefixLen+KeyLen+NonceLen] + var nonce [NonceLen]byte + copy(nonce[:], cid[1+publicKeyPrefixLen+KeyLen:]) + return box.Seal(cid, replacement, &nonce, s.firstPartyKey.Public.boxKey(), s.thirdPartyKey.Private.boxKey()) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/export_test.go juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/export_test.go --- juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/export_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/export_test.go 2016-10-13 14:32:13.000000000 +0000 @@ -0,0 +1,12 @@ +package mgostorage + +var ( + TimeNow = &timeNow + MgoCollectionFindId = &mgoCollectionFindId +) + +type RootKey rootKey + +func IsValidWithPolicy(k RootKey, p Policy) bool { + return rootKey(k).isValidWithPolicy(p) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/rootkey.go juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/rootkey.go --- juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/rootkey.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/rootkey.go 2016-10-13 14:32:13.000000000 +0000 @@ -0,0 +1,348 @@ +package mgostorage + +import ( + "crypto/rand" + "fmt" + "sync" + "time" + + "github.com/juju/loggo" + "gopkg.in/errgo.v1" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/macaroon-bakery.v1/bakery" +) + +// Functions defined as variables so they can be overidden +// for testing. +var ( + timeNow = time.Now + + mgoCollectionFindId = (*mgo.Collection).FindId +) + +var logger = loggo.GetLogger("bakery.mgostorage") + +// maxPolicyCache holds the maximum number of storage policies that can +// hold cached keys in a given RootKeys instance. +// +// 100 is probably overkill, given that practical systems will +// likely only have a small number of active policies on any given +// macaroon collection. +const maxPolicyCache = 100 + +// RootKeys represents a cache of macaroon root keys. +type RootKeys struct { + maxCacheSize int + + // TODO (rogpeppe) use RWMutex instead of Mutex here so that + // it's faster in the probably-common case that we + // have many contended readers. + mu sync.Mutex + oldCache map[string]rootKey + cache map[string]rootKey + + // current holds the current root key for each storage policy. + current map[Policy]rootKey +} + +type rootKey struct { + Id string `bson:"_id"` + Created time.Time + Expires time.Time + RootKey []byte +} + +// isValid reports whether the root key contains a key. Note that we +// always generate non-empty root keys, so we use this to find +// whether the root key is empty or not. +func (rk rootKey) isValid() bool { + return rk.RootKey != nil +} + +// isValidWithPolicy reports whether the given root key +// is currently valid to use with the given storage policy. +func (rk rootKey) isValidWithPolicy(p Policy) bool { + if !rk.isValid() { + return false + } + now := timeNow() + return afterEq(rk.Created, now.Add(-p.GenerateInterval)) && + afterEq(rk.Expires, now.Add(p.ExpiryDuration)) && + beforeEq(rk.Expires, now.Add(p.ExpiryDuration+p.GenerateInterval)) +} + +// NewRootKeys returns a root-keys cache that +// is limited in size to approximately the given size. +// +// The NewStorageMethod returns a storage implementation +// that uses a specific mongo collection and storage +// policy. +func NewRootKeys(maxCacheSize int) *RootKeys { + return &RootKeys{ + maxCacheSize: maxCacheSize, + cache: make(map[string]rootKey), + current: make(map[Policy]rootKey), + } +} + +// Policy holds a storage policy for root keys. +type Policy struct { + // GenerateInterval holds the maximum length of time + // for which a root key will be returned from RootKey. + // If this is zero, it defaults to ExpiryDuration. + GenerateInterval time.Duration + + // ExpiryDuration holds the minimum length of time that + // root keys will be valid for after they are returned from + // RootKey. The maximum length of time that they + // will be valid for is ExpiryDuration + GenerateInterval. + ExpiryDuration time.Duration +} + +// NewStorage returns a new RootKeyStorage implementation that +// stores and obtains root keys from the given collection. +// +// Root keys will be generated and stored following the +// given storage policy. +// +// It is expected that all collections passed to a given RootKey's +// NewStorage method should refer to the same underlying collection. +func (s *RootKeys) NewStorage(c *mgo.Collection, policy Policy) bakery.RootKeyStorage { + if policy.GenerateInterval == 0 { + policy.GenerateInterval = policy.ExpiryDuration + } + return &rootKeyStorage{ + keys: s, + coll: c, + policy: policy, + } +} + +var indexes = []mgo.Index{{ + Key: []string{"-created"}, +}, { + Key: []string{"expires"}, + ExpireAfter: time.Second, +}} + +// EnsureIndex ensures that the required indexes exist on the +// collection that will be used for root key storage. +// This should be called at least once before using NewStorage. +func (s *RootKeys) EnsureIndex(c *mgo.Collection) error { + for _, idx := range indexes { + if err := c.EnsureIndex(idx); err != nil { + return errgo.Notef(err, "cannot ensure index for %q on %q", idx.Key, c.Name) + } + } + return nil +} + +// get gets the root key for the given id, trying the cache first and +// falling back to calling fallback if it's not found there. +// +// If the key does not exist or has expired, it returns +// bakery.ErrNotFound. +// +// Called with s.mu locked. +func (s *RootKeys) get(id string, fallback func(id string) (rootKey, error)) (rootKey, error) { + key, cached, err := s.get0(id, fallback) + if err != nil && err != bakery.ErrNotFound { + return rootKey{}, errgo.Mask(err) + } + if err == nil && timeNow().After(key.Expires) { + key = rootKey{} + err = bakery.ErrNotFound + } + if !cached { + s.addCache(id, key) + } + return key, err +} + +// get0 is the inner version of RootKeys.get. It returns an item and reports +// whether it was found in the cache, but doesn't check whether the +// item has expired or move the returned item to s.cache. +func (s *RootKeys) get0(id string, fallback func(id string) (rootKey, error)) (key rootKey, inCache bool, err error) { + if k, ok := s.cache[id]; ok { + if !k.isValid() { + return rootKey{}, true, bakery.ErrNotFound + } + return k, true, nil + } + if k, ok := s.oldCache[id]; ok { + if !k.isValid() { + return rootKey{}, false, bakery.ErrNotFound + } + return k, false, nil + } + logger.Infof("cache miss for %q", id) + k, err := fallback(id) + return k, false, err +} + +// addCache adds the given key to the cache. +// Called with s.mu locked. +func (s *RootKeys) addCache(id string, k rootKey) { + if len(s.cache) >= s.maxCacheSize { + s.oldCache = s.cache + s.cache = make(map[string]rootKey) + } + s.cache[id] = k +} + +// setCurrent sets the current key for the given storage policy. +// Called with s.mu locked. +func (s *RootKeys) setCurrent(policy Policy, key rootKey) { + if len(s.current) > maxPolicyCache { + // Sanity check to avoid possibly memory leak: + // if some client is using arbitrarily many storage + // policies, we don't want s.keys.current to endlessly + // expand, so just kill the cache if it grows too big. + // This will result in worse performance but it shouldn't + // happen in practice and it's better than using endless + // space. + s.current = make(map[Policy]rootKey) + } + s.current[policy] = key +} + +type rootKeyStorage struct { + keys *RootKeys + policy Policy + coll *mgo.Collection +} + +// Get implements bakery.RootKeyStorage.Get. +func (s *rootKeyStorage) Get(id string) ([]byte, error) { + s.keys.mu.Lock() + defer s.keys.mu.Unlock() + + key, err := s.keys.get(id, s.getFromMongo) + if err != nil { + return nil, err + } + return key.RootKey, nil +} + +func (s *rootKeyStorage) getFromMongo(id string) (rootKey, error) { + var key rootKey + err := mgoCollectionFindId(s.coll, id).One(&key) + if err != nil { + if err == mgo.ErrNotFound { + return rootKey{}, bakery.ErrNotFound + } + return rootKey{}, errgo.Notef(err, "cannot get key from database") + } + return key, nil +} + +// RootKey implements bakery.RootKeyStorage.RootKey by +// returning an existing key from the cache when compatible +// with the current policy. +func (s *rootKeyStorage) RootKey() ([]byte, string, error) { + if key := s.rootKeyFromCache(); key.isValid() { + return key.RootKey, key.Id, nil + } + logger.Debugf("root key cache miss") + // Try to find a root key from the collection. + // It doesn't matter much if two concurrent mongo + // clients are doing this at the same time because + // we don't mind if there are more keys than necessary. + // + // Note that this query mirrors the logic found in + // rootKeyStorage.rootKeyFromCache. + now := timeNow() + var key rootKey + err := s.coll.Find(bson.D{{ + "created", bson.D{{"$gte", now.Add(-s.policy.GenerateInterval)}}, + }, { + "expires", bson.D{ + {"$gte", now.Add(s.policy.ExpiryDuration)}, + {"$lte", now.Add(s.policy.ExpiryDuration + s.policy.GenerateInterval)}, + }, + }}).Sort("-created").One(&key) + if err != nil && err != mgo.ErrNotFound { + return nil, "", errgo.Notef(err, "cannot query existing keys") + } + if !key.isValid() { + // No keys found anywhere, so let's create one. + var err error + key, err = s.generateKey() + if err != nil { + return nil, "", errgo.Notef(err, "cannot generate key") + } + logger.Infof("new root key id %q", key.Id) + if err := s.coll.Insert(key); err != nil { + return nil, "", errgo.Notef(err, "cannot create root key") + } + } + s.keys.mu.Lock() + defer s.keys.mu.Unlock() + s.keys.addCache(key.Id, key) + s.keys.setCurrent(s.policy, key) + return key.RootKey, key.Id, nil +} + +// rootKeyFromCache returns a root key from the cached keys. +// If no keys are found that are valid for s.policy, it returns +// the zero key. +func (s *rootKeyStorage) rootKeyFromCache() rootKey { + s.keys.mu.Lock() + defer s.keys.mu.Unlock() + if k, ok := s.keys.current[s.policy]; ok && k.isValidWithPolicy(s.policy) { + return k + } + + // Find the most recently created key that's consistent with the + // storage policy. + var current rootKey + for _, k := range s.keys.cache { + if k.isValidWithPolicy(s.policy) && k.Created.After(current.Created) { + current = k + } + } + if current.isValid() { + s.keys.current[s.policy] = current + return current + } + return rootKey{} +} + +func (s *rootKeyStorage) generateKey() (rootKey, error) { + newKey, err := randomBytes(24) + if err != nil { + return rootKey{}, err + } + newId, err := randomBytes(16) + if err != nil { + return rootKey{}, err + } + now := timeNow() + return rootKey{ + Created: now, + Expires: now.Add(s.policy.ExpiryDuration + s.policy.GenerateInterval), + Id: fmt.Sprintf("%x", newId), + RootKey: newKey, + }, nil +} + +func randomBytes(n int) ([]byte, error) { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + return nil, fmt.Errorf("cannot generate %d random bytes: %v", n, err) + } + return b, nil +} + +// afterEq reports whether t0 is after or equal to t1. +func afterEq(t0, t1 time.Time) bool { + return !t0.Before(t1) +} + +// beforeEq reports whether t1 is before or equal to t0. +func beforeEq(t0, t1 time.Time) bool { + return !t0.After(t1) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/rootkey_test.go juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/rootkey_test.go --- juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/rootkey_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/rootkey_test.go 2016-10-13 14:32:13.000000000 +0000 @@ -0,0 +1,475 @@ +package mgostorage_test + +import ( + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/mgo.v2" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/mgostorage" +) + +type RootKeyStorageSuite struct { + testing.IsolatedMgoSuite +} + +var _ = gc.Suite(&RootKeyStorageSuite{}) + +var epoch = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC) + +var isValidWithPolicyTests = []struct { + about string + policy mgostorage.Policy + now time.Time + key mgostorage.RootKey + expect bool +}{{ + about: "success", + policy: mgostorage.Policy{ + GenerateInterval: 2 * time.Minute, + ExpiryDuration: 3 * time.Minute, + }, + now: epoch.Add(20 * time.Minute), + key: mgostorage.RootKey{ + Created: epoch.Add(19 * time.Minute), + Expires: epoch.Add(24 * time.Minute), + Id: "id", + RootKey: []byte("key"), + }, + expect: true, +}, { + about: "empty root key", + policy: mgostorage.Policy{ + GenerateInterval: 2 * time.Minute, + ExpiryDuration: 3 * time.Minute, + }, + now: epoch.Add(20 * time.Minute), + key: mgostorage.RootKey{}, + expect: false, +}, { + about: "created too early", + policy: mgostorage.Policy{ + GenerateInterval: 2 * time.Minute, + ExpiryDuration: 3 * time.Minute, + }, + now: epoch.Add(20 * time.Minute), + key: mgostorage.RootKey{ + Created: epoch.Add(18*time.Minute - time.Millisecond), + Expires: epoch.Add(24 * time.Minute), + Id: "id", + RootKey: []byte("key"), + }, + expect: false, +}, { + about: "expires too early", + policy: mgostorage.Policy{ + GenerateInterval: 2 * time.Minute, + ExpiryDuration: 3 * time.Minute, + }, + now: epoch.Add(20 * time.Minute), + key: mgostorage.RootKey{ + Created: epoch.Add(19 * time.Minute), + Expires: epoch.Add(21 * time.Minute), + Id: "id", + RootKey: []byte("key"), + }, + expect: false, +}, { + about: "expires too late", + policy: mgostorage.Policy{ + GenerateInterval: 2 * time.Minute, + ExpiryDuration: 3 * time.Minute, + }, + now: epoch.Add(20 * time.Minute), + key: mgostorage.RootKey{ + Created: epoch.Add(19 * time.Minute), + Expires: epoch.Add(25*time.Minute + time.Millisecond), + Id: "id", + RootKey: []byte("key"), + }, + expect: false, +}} + +func (s *RootKeyStorageSuite) TestIsValidWithPolicy(c *gc.C) { + var now time.Time + s.PatchValue(mgostorage.TimeNow, func() time.Time { + return now + }) + for i, test := range isValidWithPolicyTests { + c.Logf("test %d: %v", i, test.about) + now = test.now + c.Assert(mgostorage.IsValidWithPolicy(test.key, test.policy), gc.Equals, test.expect) + } +} + +func (s *RootKeyStorageSuite) TestRootKeyUsesKeysValidWithPolicy(c *gc.C) { + // We re-use the TestIsValidWithPolicy tests so that we + // know that the mongo logic uses the same behaviour. + var now time.Time + s.PatchValue(mgostorage.TimeNow, func() time.Time { + return now + }) + for i, test := range isValidWithPolicyTests { + c.Logf("test %d: %v", i, test.about) + if test.key.RootKey == nil { + // We don't store empty root keys in the database. + c.Logf("skipping test with empty root key") + continue + } + // Prime the collection with the root key document. + _, err := s.coll().RemoveAll(nil) + c.Assert(err, gc.IsNil) + err = s.coll().Insert(test.key) + c.Assert(err, gc.IsNil) + + store := mgostorage.NewRootKeys(10).NewStorage(s.coll(), test.policy) + now = test.now + key, id, err := store.RootKey() + c.Assert(err, gc.IsNil) + if test.expect { + c.Assert(id, gc.Equals, "id") + c.Assert(string(key), gc.Equals, "key") + } else { + // If it didn't match then RootKey will have + // generated a new key. + c.Assert(key, gc.HasLen, 24) + c.Assert(id, gc.Matches, "[0-9a-f]{32}") + } + } +} + +func (s *RootKeyStorageSuite) TestRootKey(c *gc.C) { + now := epoch + s.PatchValue(mgostorage.TimeNow, func() time.Time { + return now + }) + + store := mgostorage.NewRootKeys(10).NewStorage(s.coll(), mgostorage.Policy{ + GenerateInterval: 2 * time.Minute, + ExpiryDuration: 5 * time.Minute, + }) + key, id, err := store.RootKey() + c.Assert(err, gc.IsNil) + c.Assert(key, gc.HasLen, 24) + c.Assert(id, gc.Matches, "[0-9a-f]{32}") + + // If we get a key within the generate interval, we should + // get the same one. + now = epoch.Add(time.Minute) + key1, id1, err := store.RootKey() + c.Assert(err, gc.IsNil) + c.Assert(key1, gc.DeepEquals, key) + c.Assert(id1, gc.Equals, id) + + // A different storage instance should get the same root key. + store1 := mgostorage.NewRootKeys(10).NewStorage(s.coll(), mgostorage.Policy{ + GenerateInterval: 2 * time.Minute, + ExpiryDuration: 5 * time.Minute, + }) + key1, id1, err = store1.RootKey() + c.Assert(err, gc.IsNil) + c.Assert(key1, gc.DeepEquals, key) + c.Assert(id1, gc.Equals, id) + + // After the generation interval has passed, we should generate a new key. + now = epoch.Add(2*time.Minute + time.Second) + key1, id1, err = store.RootKey() + c.Assert(err, gc.IsNil) + c.Assert(key, gc.HasLen, 24) + c.Assert(id, gc.Matches, "[0-9a-f]{32}") + c.Assert(key1, gc.Not(gc.DeepEquals), key) + c.Assert(id1, gc.Not(gc.Equals), id) + + // The other store should pick it up too. + key2, id2, err := store1.RootKey() + c.Assert(err, gc.IsNil) + c.Assert(key2, gc.DeepEquals, key1) + c.Assert(id2, gc.Equals, id1) +} + +func (s *RootKeyStorageSuite) TestRootKeyDefaultGenerateInterval(c *gc.C) { + now := epoch + s.PatchValue(mgostorage.TimeNow, func() time.Time { + return now + }) + store := mgostorage.NewRootKeys(10).NewStorage(s.coll(), mgostorage.Policy{ + ExpiryDuration: 5 * time.Minute, + }) + key, id, err := store.RootKey() + c.Assert(err, gc.IsNil) + + now = epoch.Add(5 * time.Minute) + key1, id1, err := store.RootKey() + c.Assert(err, gc.IsNil) + c.Assert(key1, jc.DeepEquals, key) + c.Assert(id1, gc.Equals, id) + + now = epoch.Add(5*time.Minute + time.Millisecond) + key1, id1, err = store.RootKey() + c.Assert(err, gc.IsNil) + c.Assert(key1, gc.Not(gc.DeepEquals), key) + c.Assert(id1, gc.Not(gc.Equals), id) +} + +var preferredRootKeyTests = []struct { + about string + now time.Time + keys []mgostorage.RootKey + policy mgostorage.Policy + expectId string +}{{ + about: "latest creation time is preferred", + now: epoch.Add(5 * time.Minute), + keys: []mgostorage.RootKey{{ + Created: epoch.Add(4 * time.Minute), + Expires: epoch.Add(15 * time.Minute), + Id: "id0", + RootKey: []byte("key0"), + }, { + Created: epoch.Add(5*time.Minute + 30*time.Second), + Expires: epoch.Add(16 * time.Minute), + Id: "id1", + RootKey: []byte("key1"), + }, { + Created: epoch.Add(5 * time.Minute), + Expires: epoch.Add(16 * time.Minute), + Id: "id2", + RootKey: []byte("key2"), + }}, + policy: mgostorage.Policy{ + GenerateInterval: 5 * time.Minute, + ExpiryDuration: 7 * time.Minute, + }, + expectId: "id1", +}, { + about: "ineligible keys are exluded", + now: epoch.Add(5 * time.Minute), + keys: []mgostorage.RootKey{{ + Created: epoch.Add(4 * time.Minute), + Expires: epoch.Add(15 * time.Minute), + Id: "id0", + RootKey: []byte("key0"), + }, { + Created: epoch.Add(5 * time.Minute), + Expires: epoch.Add(16*time.Minute + 30*time.Second), + Id: "id1", + RootKey: []byte("key1"), + }, { + Created: epoch.Add(6 * time.Minute), + Expires: epoch.Add(time.Hour), + Id: "id2", + RootKey: []byte("key2"), + }}, + policy: mgostorage.Policy{ + GenerateInterval: 5 * time.Minute, + ExpiryDuration: 7 * time.Minute, + }, + expectId: "id1", +}} + +func (s *RootKeyStorageSuite) TestPreferredRootKeyFromDatabase(c *gc.C) { + var now time.Time + s.PatchValue(mgostorage.TimeNow, func() time.Time { + return now + }) + for i, test := range preferredRootKeyTests { + c.Logf("%d: %v", i, test.about) + _, err := s.coll().RemoveAll(nil) + c.Assert(err, gc.IsNil) + for _, key := range test.keys { + err := s.coll().Insert(key) + c.Assert(err, gc.IsNil) + } + store := mgostorage.NewRootKeys(10).NewStorage(s.coll(), test.policy) + now = test.now + _, id, err := store.RootKey() + c.Assert(err, gc.IsNil) + c.Assert(id, gc.Equals, test.expectId) + } +} + +func (s *RootKeyStorageSuite) TestPreferredRootKeyFromCache(c *gc.C) { + var now time.Time + s.PatchValue(mgostorage.TimeNow, func() time.Time { + return now + }) + for i, test := range preferredRootKeyTests { + c.Logf("%d: %v", i, test.about) + for _, key := range test.keys { + err := s.coll().Insert(key) + c.Assert(err, gc.IsNil) + } + store := mgostorage.NewRootKeys(10).NewStorage(s.coll(), test.policy) + // Ensure that all the keys are in cache by getting all of them. + for _, key := range test.keys { + got, err := store.Get(key.Id) + c.Assert(err, gc.IsNil) + c.Assert(got, jc.DeepEquals, key.RootKey) + } + // Remove all the keys from the collection so that + // we know we must be acquiring them from the cache. + _, err := s.coll().RemoveAll(nil) + c.Assert(err, gc.IsNil) + + // Test that RootKey returns the expected key. + now = test.now + _, id, err := store.RootKey() + c.Assert(err, gc.IsNil) + c.Assert(id, gc.Equals, test.expectId) + } +} + +func (s *RootKeyStorageSuite) TestGet(c *gc.C) { + now := epoch + s.PatchValue(mgostorage.TimeNow, func() time.Time { + return now + }) + + store := mgostorage.NewRootKeys(5).NewStorage(s.coll(), mgostorage.Policy{ + GenerateInterval: 1 * time.Minute, + ExpiryDuration: 30 * time.Minute, + }) + type idKey struct { + id string + key []byte + } + var keys []idKey + keyIds := make(map[string]bool) + for i := 0; i < 20; i++ { + key, id, err := store.RootKey() + c.Assert(err, gc.IsNil) + c.Assert(keyIds[id], gc.Equals, false) + keys = append(keys, idKey{id, key}) + now = now.Add(time.Minute + time.Second) + } + for i, k := range keys { + key, err := store.Get(k.id) + c.Assert(err, gc.IsNil, gc.Commentf("key %d (%s)", i, k.id)) + c.Assert(key, gc.DeepEquals, k.key, gc.Commentf("key %d (%s)", i, k.id)) + } + // Check that the keys are cached. + // + // Since the cache size is 5, the most recent 5 items will be in + // the primary cache; the 5 items before that will be in the old + // cache and nothing else will be cached. + // + // The first time we fetch an item from the old cache, a new + // primary cache will be allocated, all existing items in the + // old cache except that item will be evicted, and all items in + // the current primary cache moved to the old cache. + // + // The upshot of that is that all but the first 6 calls to Get + // should result in a database fetch. + + var fetched []string + s.PatchValue(mgostorage.MgoCollectionFindId, func(coll *mgo.Collection, id interface{}) *mgo.Query { + fetched = append(fetched, id.(string)) + return coll.FindId(id) + }) + c.Logf("testing cache") + + for i := len(keys) - 1; i >= 0; i-- { + k := keys[i] + key, err := store.Get(k.id) + c.Assert(err, gc.IsNil) + c.Assert(err, gc.IsNil, gc.Commentf("key %d (%s)", i, k.id)) + c.Assert(key, gc.DeepEquals, k.key, gc.Commentf("key %d (%s)", i, k.id)) + } + c.Assert(len(fetched), gc.Equals, len(keys)-6) + for i, id := range fetched { + c.Assert(id, gc.Equals, keys[len(keys)-6-i-1].id) + } +} + +func (s *RootKeyStorageSuite) TestGetCachesMisses(c *gc.C) { + store := mgostorage.NewRootKeys(5).NewStorage(s.coll(), mgostorage.Policy{ + GenerateInterval: 1 * time.Minute, + ExpiryDuration: 30 * time.Minute, + }) + var fetched []string + s.PatchValue(mgostorage.MgoCollectionFindId, func(coll *mgo.Collection, id interface{}) *mgo.Query { + fetched = append(fetched, id.(string)) + return coll.FindId(id) + }) + key, err := store.Get("foo") + c.Assert(err, gc.Equals, bakery.ErrNotFound) + c.Assert(key, gc.IsNil) + c.Assert(fetched, jc.DeepEquals, []string{"foo"}) + fetched = nil + + key, err = store.Get("foo") + c.Assert(err, gc.Equals, bakery.ErrNotFound) + c.Assert(key, gc.IsNil) + c.Assert(fetched, gc.IsNil) +} + +func (s *RootKeyStorageSuite) TestGetExpiredItemFromCache(c *gc.C) { + now := epoch + s.PatchValue(mgostorage.TimeNow, func() time.Time { + return now + }) + store := mgostorage.NewRootKeys(10).NewStorage(s.coll(), mgostorage.Policy{ + ExpiryDuration: 5 * time.Minute, + }) + _, id, err := store.RootKey() + c.Assert(err, gc.IsNil) + + s.PatchValue(mgostorage.MgoCollectionFindId, func(*mgo.Collection, interface{}) *mgo.Query { + c.Errorf("FindId unexpectedly called") + return nil + }) + + now = epoch.Add(15 * time.Minute) + + _, err = store.Get(id) + c.Assert(err, gc.Equals, bakery.ErrNotFound) +} + +func (s *RootKeyStorageSuite) TestEnsureIndex(c *gc.C) { + keys := mgostorage.NewRootKeys(5) + err := keys.EnsureIndex(s.coll()) + c.Assert(err, gc.IsNil) + + // This code can take up to 60s to run; there's no way + // to force it to run more quickly, but it provides reassurance + // that the code actually works. + // Reenable the rest of this test if concerned about index behaviour. + + c.SucceedNow() + + _, id1, err := keys.NewStorage(s.coll(), mgostorage.Policy{ + ExpiryDuration: 100 * time.Millisecond, + }).RootKey() + c.Assert(err, gc.IsNil) + + _, id2, err := keys.NewStorage(s.coll(), mgostorage.Policy{ + ExpiryDuration: time.Hour, + }).RootKey() + c.Assert(err, gc.IsNil) + c.Assert(id2, gc.Not(gc.Equals), id1) + + // Sanity check that the keys are in the collection. + n, err := s.coll().Find(nil).Count() + c.Assert(err, gc.IsNil) + c.Assert(n, gc.Equals, 2) + for i := 0; i < 100; i++ { + n, err := s.coll().Find(nil).Count() + c.Assert(err, gc.IsNil) + switch n { + case 1: + c.SucceedNow() + case 2: + time.Sleep(time.Second) + default: + c.Fatalf("unexpected key count %v", n) + } + } + c.Fatalf("key was never removed from database") +} + +func (s *RootKeyStorageSuite) coll() *mgo.Collection { + return s.Session.DB("test").C("items") +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/service.go juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/service.go --- juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/bakery/service.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/bakery/service.go 2016-10-13 14:32:13.000000000 +0000 @@ -28,7 +28,6 @@ store storage rkStore RootKeyStorage checker FirstPartyChecker - encoder *boxEncoder key *KeyPair locator PublicKeyLocator } @@ -89,7 +88,6 @@ svc.locator = PublicKeyLocatorMap(nil) } svc.key = p.Key - svc.encoder = newBoxEncoder(p.Key) return svc, nil } @@ -345,11 +343,15 @@ if err != nil { return errgo.Notef(err, "cannot generate third party secret") } - id, err := svc.encoder.encodeCaveatId(cav.Condition, rootKey, thirdPartyPub) + id, err := encodeJSONCaveatId(svc.key, caveatInfo{ + peerPublicKey: thirdPartyPub, + rootKey: rootKey, + condition: cav.Condition, + }) if err != nil { return errgo.Notef(err, "cannot create third party caveat id at %q", cav.Location) } - if err := m.AddThirdPartyCaveat(rootKey, id, cav.Location); err != nil { + if err := m.AddThirdPartyCaveat(rootKey, string(id), cav.Location); err != nil { return errgo.Notef(err, "cannot add third party caveat") } return nil @@ -361,22 +363,20 @@ // it is valid, a new macaroon is returned which discharges the caveat // along with any caveats returned from the checker. func Discharge(key *KeyPair, checker ThirdPartyChecker, id string) (*macaroon.Macaroon, []checkers.Caveat, error) { - decoder := newBoxDecoder(key) - logger.Infof("server attempting to discharge %q", id) - rootKey, condition, err := decoder.decodeCaveatId(id) + cid, err := decodeCaveatId(key, []byte(id)) if err != nil { return nil, nil, errgo.Notef(err, "discharger cannot decode caveat id") } // Note that we don't check the error - we allow the // third party checker to see even caveats that we can't // understand. - cond, arg, _ := checkers.ParseCaveat(condition) + cond, arg, _ := checkers.ParseCaveat(cid.condition) var caveats []checkers.Caveat if cond == checkers.CondNeedDeclared { caveats, err = checkNeedDeclared(id, arg, checker) } else { - caveats, err = checker.CheckThirdPartyCaveat(id, condition) + caveats, err = checker.CheckThirdPartyCaveat(id, cid.condition) } if err != nil { return nil, nil, errgo.Mask(err, errgo.Any) @@ -385,7 +385,7 @@ // be stored persistently. Indeed, it would be a problem if // we did, because then the macaroon could potentially be used // for normal authorization with the third party. - m, err := macaroon.New(rootKey, id, "") + m, err := macaroon.New(cid.rootKey, id, "") if err != nil { return nil, nil, errgo.Mask(err) } @@ -395,7 +395,7 @@ // Discharge calls Discharge with the service's key and uses the service // to add any returned caveats to the discharge macaroon. func (svc *Service) Discharge(checker ThirdPartyChecker, id string) (*macaroon.Macaroon, error) { - m, caveats, err := Discharge(svc.encoder.key, checker, id) + m, caveats, err := Discharge(svc.key, checker, id) if err != nil { return nil, errgo.Mask(err, errgo.Any) } diff -Nru juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/.gitignore juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/.gitignore --- juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/.gitignore 2016-10-13 14:32:13.000000000 +0000 @@ -0,0 +1 @@ +*.test diff -Nru juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/httpbakery/error.go juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/httpbakery/error.go --- juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/httpbakery/error.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/httpbakery/error.go 2016-10-13 14:32:13.000000000 +0000 @@ -285,9 +285,14 @@ return version0 } v, err := strconv.Atoi(vs) - if err != nil || version(v) < 0 || version(v) > latestVersion { + if err != nil || version(v) < 0 { // Badly formed header - use backward compatibility mode. return version0 } + if version(v) > latestVersion { + // Later version than we know about - use the + // latest version that we can. + return latestVersion + } return version(v) } diff -Nru juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/httpbakery/error_test.go juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/httpbakery/error_test.go --- juju-core-2.0~beta15/src/gopkg.in/macaroon-bakery.v1/httpbakery/error_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/macaroon-bakery.v1/httpbakery/error_test.go 2016-10-13 14:32:13.000000000 +0000 @@ -119,4 +119,27 @@ }, }) + // With a request with a later version header, the response + // should be also be 401. + req.Header.Set("Bakery-Protocol-Version", "2") + + err = httpbakery.NewInteractionRequiredError("/visit", "/wait", nil, req) + code, resp = httpbakery.ErrorToResponse(err) + c.Assert(code, gc.Equals, http.StatusUnauthorized) + + h = make(http.Header) + resp.(httprequest.HeaderSetter).SetHeader(h) + c.Assert(h.Get("WWW-Authenticate"), gc.Equals, "Macaroon") + + data, err = json.Marshal(resp) + c.Assert(err, gc.IsNil) + + c.Assert(string(data), jc.JSONEquals, &httpbakery.Error{ + Code: httpbakery.ErrInteractionRequired, + Message: httpbakery.ErrInteractionRequired.Error(), + Info: &httpbakery.ErrorInfo{ + VisitURL: "/visit", + WaitURL: "/wait", + }, + }) } diff -Nru juju-core-2.0~beta15/src/gopkg.in/macaroon.v1/.gitignore juju-core-2.0.0/src/gopkg.in/macaroon.v1/.gitignore --- juju-core-2.0~beta15/src/gopkg.in/macaroon.v1/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/macaroon.v1/.gitignore 2016-10-13 14:32:30.000000000 +0000 @@ -0,0 +1 @@ +*.test diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/auth_test.go juju-core-2.0.0/src/gopkg.in/mgo.v2/auth_test.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/auth_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/auth_test.go 2016-10-13 14:32:16.000000000 +0000 @@ -904,7 +904,7 @@ c.Skip("server does not support SSL") } - clientCertPEM, err := ioutil.ReadFile("testdb/client.pem") + clientCertPEM, err := ioutil.ReadFile("harness/certs/client.pem") c.Assert(err, IsNil) clientCert, err := tls.X509KeyPair(clientCertPEM, clientCertPEM) diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/bson.go juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/bson.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/bson.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/bson.go 2016-10-13 14:32:16.000000000 +0000 @@ -38,6 +38,7 @@ "crypto/rand" "encoding/binary" "encoding/hex" + "encoding/json" "errors" "fmt" "io" @@ -204,6 +205,7 @@ // machineId stores machine id generated once and used in subsequent calls // to NewObjectId function. var machineId = readMachineId() +var processId = os.Getpid() // readMachineId generates and returns a machine id. // If this function fails to get the hostname it will cause a runtime error. @@ -234,9 +236,8 @@ b[5] = machineId[1] b[6] = machineId[2] // Pid, 2 bytes, specs don't specify endianness, but we use big endian. - pid := os.Getpid() - b[7] = byte(pid >> 8) - b[8] = byte(pid) + b[7] = byte(processId >> 8) + b[8] = byte(processId) // Increment, 3 bytes, big endian i := atomic.AddUint32(&objectIdCounter, 1) b[9] = byte(i >> 16) @@ -276,6 +277,22 @@ // UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller. func (id *ObjectId) UnmarshalJSON(data []byte) error { + if len(data) > 0 && (data[0] == '{' || data[0] == 'O') { + var v struct { + Id json.RawMessage `json:"$oid"` + Func struct { + Id json.RawMessage + } `json:"$oidFunc"` + } + err := jdec(data, &v) + if err == nil { + if len(v.Id) > 0 { + data = []byte(v.Id) + } else { + data = []byte(v.Func.Id) + } + } + } if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) { *id = "" return nil diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/bson_test.go juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/bson_test.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/bson_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/bson_test.go 2016-10-13 14:32:16.000000000 +0000 @@ -71,10 +71,10 @@ case reflect.Ptr: pv := reflect.New(v.Type().Elem()) zero = pv.Interface() - case reflect.Slice, reflect.Int: + case reflect.Slice, reflect.Int, reflect.Int64, reflect.Struct: zero = reflect.New(t).Interface() default: - panic("unsupported doc type") + panic("unsupported doc type: " + t.Name()) } return zero } @@ -1055,7 +1055,7 @@ } type inlineUnexported struct { M map[string]interface{} ",inline" - unexported ",inline" + unexported ",inline" } type unexported struct { A int @@ -1580,6 +1580,9 @@ } } +// -------------------------------------------------------------------------- +// Spec tests + type specTest struct { Description string Documents []struct { @@ -1821,3 +1824,9 @@ panic(err) } } + +func (s *S) BenchmarkNewObjectId(c *C) { + for i := 0; i < c.N; i++ { + bson.NewObjectId() + } +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/decimal.go juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/decimal.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/decimal.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/decimal.go 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,310 @@ +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package bson + +import ( + "fmt" + "strconv" + "strings" +) + +// Decimal128 holds decimal128 BSON values. +type Decimal128 struct { + h, l uint64 +} + +func (d Decimal128) String() string { + var pos int // positive sign + var e int // exponent + var h, l uint64 // significand high/low + + if d.h>>63&1 == 0 { + pos = 1 + } + + switch d.h >> 58 & (1<<5 - 1) { + case 0x1F: + return "NaN" + case 0x1E: + return "-Inf"[pos:] + } + + l = d.l + if d.h>>61&3 == 3 { + // Bits: 1*sign 2*ignored 14*exponent 111*significand. + // Implicit 0b100 prefix in significand. + e = int(d.h>>47&(1<<14-1)) - 6176 + //h = 4<<47 | d.h&(1<<47-1) + // Spec says all of these values are out of range. + h, l = 0, 0 + } else { + // Bits: 1*sign 14*exponent 113*significand + e = int(d.h>>49&(1<<14-1)) - 6176 + h = d.h & (1<<49 - 1) + } + + // Would be handled by the logic below, but that's trivial and common. + if h == 0 && l == 0 && e == 0 { + return "-0"[pos:] + } + + var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero. + var last = len(repr) + var i = len(repr) + var dot = len(repr) + e + var rem uint32 +Loop: + for d9 := 0; d9 < 5; d9++ { + h, l, rem = divmod(h, l, 1e9) + for d1 := 0; d1 < 9; d1++ { + // Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc. + if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) { + e += len(repr) - i + i-- + repr[i] = '.' + last = i - 1 + dot = len(repr) // Unmark. + } + c := '0' + byte(rem%10) + rem /= 10 + i-- + repr[i] = c + // Handle "0E+3", "1E+3", etc. + if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) { + last = i + break Loop + } + if c != '0' { + last = i + } + // Break early. Works without it, but why. + if dot > i && l == 0 && h == 0 && rem == 0 { + break Loop + } + } + } + repr[last-1] = '-' + last-- + + if e > 0 { + return string(repr[last+pos:]) + "E+" + strconv.Itoa(e) + } + if e < 0 { + return string(repr[last+pos:]) + "E" + strconv.Itoa(e) + } + return string(repr[last+pos:]) +} + +func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) { + div64 := uint64(div) + a := h >> 32 + aq := a / div64 + ar := a % div64 + b := ar<<32 + h&(1<<32-1) + bq := b / div64 + br := b % div64 + c := br<<32 + l>>32 + cq := c / div64 + cr := c % div64 + d := cr<<32 + l&(1<<32-1) + dq := d / div64 + dr := d % div64 + return (aq<<32 | bq), (cq<<32 | dq), uint32(dr) +} + +var dNaN = Decimal128{0x1F << 58, 0} +var dPosInf = Decimal128{0x1E << 58, 0} +var dNegInf = Decimal128{0x3E << 58, 0} + +func dErr(s string) (Decimal128, error) { + return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s) +} + +func ParseDecimal128(s string) (Decimal128, error) { + orig := s + if s == "" { + return dErr(orig) + } + neg := s[0] == '-' + if neg || s[0] == '+' { + s = s[1:] + } + + if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') { + if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") { + return dNaN, nil + } + if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") { + if neg { + return dNegInf, nil + } + return dPosInf, nil + } + return dErr(orig) + } + + var h, l uint64 + var e int + + var add, ovr uint32 + var mul uint32 = 1 + var dot = -1 + var digits = 0 + var i = 0 + for i < len(s) { + c := s[i] + if mul == 1e9 { + h, l, ovr = muladd(h, l, mul, add) + mul, add = 1, 0 + if ovr > 0 || h&((1<<15-1)<<49) > 0 { + return dErr(orig) + } + } + if c >= '0' && c <= '9' { + i++ + if c > '0' || digits > 0 { + digits++ + } + if digits > 34 { + if c == '0' { + // Exact rounding. + e++ + continue + } + return dErr(orig) + } + mul *= 10 + add *= 10 + add += uint32(c - '0') + continue + } + if c == '.' { + i++ + if dot >= 0 || i == 1 && len(s) == 1 { + return dErr(orig) + } + if i == len(s) { + break + } + if s[i] < '0' || s[i] > '9' || e > 0 { + return dErr(orig) + } + dot = i + continue + } + break + } + if i == 0 { + return dErr(orig) + } + if mul > 1 { + h, l, ovr = muladd(h, l, mul, add) + if ovr > 0 || h&((1<<15-1)<<49) > 0 { + return dErr(orig) + } + } + if dot >= 0 { + e += dot - i + } + if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') { + i++ + eneg := s[i] == '-' + if eneg || s[i] == '+' { + i++ + if i == len(s) { + return dErr(orig) + } + } + n := 0 + for i < len(s) && n < 1e4 { + c := s[i] + i++ + if c < '0' || c > '9' { + return dErr(orig) + } + n *= 10 + n += int(c - '0') + } + if eneg { + n = -n + } + e += n + for e < -6176 { + // Subnormal. + var div uint32 = 1 + for div < 1e9 && e < -6176 { + div *= 10 + e++ + } + var rem uint32 + h, l, rem = divmod(h, l, div) + if rem > 0 { + return dErr(orig) + } + } + for e > 6111 { + // Clamped. + var mul uint32 = 1 + for mul < 1e9 && e > 6111 { + mul *= 10 + e-- + } + h, l, ovr = muladd(h, l, mul, 0) + if ovr > 0 || h&((1<<15-1)<<49) > 0 { + return dErr(orig) + } + } + if e < -6176 || e > 6111 { + return dErr(orig) + } + } + + if i < len(s) { + return dErr(orig) + } + + h |= uint64(e+6176) & uint64(1<<14-1) << 49 + if neg { + h |= 1 << 63 + } + return Decimal128{h, l}, nil +} + +func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) { + mul64 := uint64(mul) + a := mul64 * (l & (1<<32 - 1)) + b := a>>32 + mul64*(l>>32) + c := b>>32 + mul64*(h&(1<<32-1)) + d := c>>32 + mul64*(h>>32) + + a = a&(1<<32-1) + uint64(add) + b = b&(1<<32-1) + a>>32 + c = c&(1<<32-1) + b>>32 + d = d&(1<<32-1) + c>>32 + + return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32) +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/decimal_test.go juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/decimal_test.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/decimal_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/decimal_test.go 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,4109 @@ +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package bson_test + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "regexp" + "strings" + + "gopkg.in/mgo.v2/bson" + + . "gopkg.in/check.v1" +) + +// -------------------------------------------------------------------------- +// Decimal tests + +type decimalTests struct { + Valid []struct { + Description string `json:"description"` + BSON string `json:"bson"` + CanonicalBSON string `json:"canonical_bson"` + ExtJSON string `json:"extjson"` + CanonicalExtJSON string `json:"canonical_extjson"` + Lossy bool `json:"lossy"` + } `json:"valid"` + + ParseErrors []struct { + Description string `json:"description"` + String string `json:"string"` + } `json:"parseErrors"` +} + +func extJSONRepr(s string) string { + var value struct { + D struct { + Repr string `json:"$numberDecimal"` + } `json:"d"` + } + err := json.Unmarshal([]byte(s), &value) + if err != nil { + panic(err) + } + return value.D.Repr +} + +func (s *S) TestDecimalTests(c *C) { + // These also conform to the spec and are used by Go elsewhere. + // (e.g. math/big won't parse "Infinity"). + goStr := func(s string) string { + switch s { + case "Infinity": + return "Inf" + case "-Infinity": + return "-Inf" + } + return s + } + + for _, testEntry := range decimalTestsJSON { + testFile := testEntry.file + + var tests decimalTests + err := json.Unmarshal([]byte(testEntry.json), &tests) + c.Assert(err, IsNil) + + for _, test := range tests.Valid { + c.Logf("Running %s test: %s", testFile, test.Description) + + test.BSON = strings.ToLower(test.BSON) + + // Unmarshal value from BSON data. + bsonData, err := hex.DecodeString(test.BSON) + var bsonValue struct{ D interface{} } + err = bson.Unmarshal(bsonData, &bsonValue) + c.Assert(err, IsNil) + dec128, ok := bsonValue.D.(bson.Decimal128) + c.Assert(ok, Equals, true) + + // Extract ExtJSON representations (canonical and not). + extjRepr := extJSONRepr(test.ExtJSON) + cextjRepr := extjRepr + if test.CanonicalExtJSON != "" { + cextjRepr = extJSONRepr(test.CanonicalExtJSON) + } + + wantRepr := goStr(cextjRepr) + + // Generate canonical representation. + c.Assert(dec128.String(), Equals, wantRepr) + + // Parse original canonical representation. + parsed, err := bson.ParseDecimal128(cextjRepr) + c.Assert(err, IsNil) + c.Assert(parsed.String(), Equals, wantRepr) + + // Parse non-canonical representation. + parsed, err = bson.ParseDecimal128(extjRepr) + c.Assert(err, IsNil) + c.Assert(parsed.String(), Equals, wantRepr) + + // Parse Go canonical representation (Inf vs. Infinity). + parsed, err = bson.ParseDecimal128(wantRepr) + c.Assert(err, IsNil) + c.Assert(parsed.String(), Equals, wantRepr) + + // Marshal original value back into BSON data. + data, err := bson.Marshal(bsonValue) + c.Assert(err, IsNil) + c.Assert(hex.EncodeToString(data), Equals, test.BSON) + + if test.Lossy { + continue + } + + // Marshal the parsed canonical representation. + var parsedValue struct{ D interface{} } + parsedValue.D = parsed + data, err = bson.Marshal(parsedValue) + c.Assert(err, IsNil) + c.Assert(hex.EncodeToString(data), Equals, test.BSON) + } + + for _, test := range tests.ParseErrors { + c.Logf("Running %s parse error test: %s (string %q)", testFile, test.Description, test.String) + + _, err := bson.ParseDecimal128(test.String) + quoted := regexp.QuoteMeta(fmt.Sprintf("%q", test.String)) + c.Assert(err, ErrorMatches, `cannot parse `+quoted+` as a decimal128`) + } + } +} + +const decBenchNum = "9.999999999999999999999999999999999E+6144" + +func (s *S) BenchmarkDecimal128String(c *C) { + d, err := bson.ParseDecimal128(decBenchNum) + c.Assert(err, IsNil) + c.Assert(d.String(), Equals, decBenchNum) + + c.ResetTimer() + for i := 0; i < c.N; i++ { + d.String() + } +} + +func (s *S) BenchmarkDecimal128Parse(c *C) { + var err error + c.ResetTimer() + for i := 0; i < c.N; i++ { + _, err = bson.ParseDecimal128(decBenchNum) + } + if err != nil { + panic(err) + } +} + +var decimalTestsJSON = []struct{ file, json string }{ + {"decimal128-1.json", ` +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "Special - Canonical NaN", + "bson": "180000001364000000000000000000000000000000007C00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "Special - Negative NaN", + "bson": "18000000136400000000000000000000000000000000FC00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - Negative NaN", + "bson": "18000000136400000000000000000000000000000000FC00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-NaN\"}}", + "lossy": true + }, + { + "description": "Special - Canonical SNaN", + "bson": "180000001364000000000000000000000000000000007E00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - Negative SNaN", + "bson": "18000000136400000000000000000000000000000000FE00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - NaN with a payload", + "bson": "180000001364001200000000000000000000000000007E00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - Canonical Positive Infinity", + "bson": "180000001364000000000000000000000000000000007800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Special - Canonical Negative Infinity", + "bson": "18000000136400000000000000000000000000000000F800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Special - Invalid representation treated as 0", + "bson": "180000001364000000000000000000000000000000106C00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}", + "lossy": true + }, + { + "description": "Special - Invalid representation treated as -0", + "bson": "18000000136400DCBA9876543210DEADBEEF00000010EC00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}", + "lossy": true + }, + { + "description": "Special - Invalid representation treated as 0E3", + "bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF116C00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}", + "lossy": true + }, + { + "description": "Regular - Adjusted Exponent Limit", + "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF22F00", + "extjson": "{\"d\": { \"$numberDecimal\": \"0.000001234567890123456789012345678901234\" }}" + }, + { + "description": "Regular - Smallest", + "bson": "18000000136400D204000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001234\"}}" + }, + { + "description": "Regular - Smallest with Trailing Zeros", + "bson": "1800000013640040EF5A07000000000000000000002A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00123400000\"}}" + }, + { + "description": "Regular - 0.1", + "bson": "1800000013640001000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1\"}}" + }, + { + "description": "Regular - 0.1234567890123456789012345678901234", + "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFC2F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1234567890123456789012345678901234\"}}" + }, + { + "description": "Regular - 0", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "Regular - -0", + "bson": "18000000136400000000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "Regular - -0.0", + "bson": "1800000013640000000000000000000000000000003EB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "Regular - 2", + "bson": "180000001364000200000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"2\"}}" + }, + { + "description": "Regular - 2.000", + "bson": "18000000136400D0070000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"2.000\"}}" + }, + { + "description": "Regular - Largest", + "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" + }, + { + "description": "Scientific - Tiniest", + "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED010000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E-6143\"}}" + }, + { + "description": "Scientific - Tiny", + "bson": "180000001364000100000000000000000000000000000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "Scientific - Negative Tiny", + "bson": "180000001364000100000000000000000000000000008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "Scientific - Adjusted Exponent Limit", + "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF02F00", + "extjson": "{\"d\": { \"$numberDecimal\": \"1.234567890123456789012345678901234E-7\" }}" + }, + { + "description": "Scientific - Fractional", + "bson": "1800000013640064000000000000000000000000002CB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}" + }, + { + "description": "Scientific - 0 with Exponent", + "bson": "180000001364000000000000000000000000000000205F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6000\"}}" + }, + { + "description": "Scientific - 0 with Negative Exponent", + "bson": "1800000013640000000000000000000000000000007A2B00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-611\"}}" + }, + { + "description": "Scientific - No Decimal with Signed Exponent", + "bson": "180000001364000100000000000000000000000000463000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" + }, + { + "description": "Scientific - Trailing Zero", + "bson": "180000001364001A04000000000000000000000000423000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.050E+4\"}}" + }, + { + "description": "Scientific - With Decimal", + "bson": "180000001364006900000000000000000000000000423000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.05E+3\"}}" + }, + { + "description": "Scientific - Full", + "bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"5192296858534827628530496329220095\"}}" + }, + { + "description": "Scientific - Large", + "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "Scientific - Largest", + "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}" + }, + { + "description": "Non-Canonical Parsing - Exponent Normalization", + "bson": "1800000013640064000000000000000000000000002CB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-100E-10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}" + }, + { + "description": "Non-Canonical Parsing - Unsigned Positive Exponent", + "bson": "180000001364000100000000000000000000000000463000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" + }, + { + "description": "Non-Canonical Parsing - Lowercase Exponent Identifier", + "bson": "180000001364000100000000000000000000000000463000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" + }, + { + "description": "Non-Canonical Parsing - Long Significand with Exponent", + "bson": "1800000013640079D9E0F9763ADA429D0200000000583000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345689012345789012345E+12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.2345689012345789012345E+34\"}}" + }, + { + "description": "Non-Canonical Parsing - Positive Sign", + "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+1234567890123456789012345678901234\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" + }, + { + "description": "Non-Canonical Parsing - Long Decimal String", + "bson": "180000001364000100000000000000000000000000722800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \".000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-999\"}}" + }, + { + "description": "Non-Canonical Parsing - nan", + "bson": "180000001364000000000000000000000000000000007C00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"nan\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "Non-Canonical Parsing - nAn", + "bson": "180000001364000000000000000000000000000000007C00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"nAn\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "Non-Canonical Parsing - +infinity", + "bson": "180000001364000000000000000000000000000000007800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - infinity", + "bson": "180000001364000000000000000000000000000000007800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - infiniTY", + "bson": "180000001364000000000000000000000000000000007800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"infiniTY\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - inf", + "bson": "180000001364000000000000000000000000000000007800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"inf\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - inF", + "bson": "180000001364000000000000000000000000000000007800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"inF\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -infinity", + "bson": "18000000136400000000000000000000000000000000F800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -infiniTy", + "bson": "18000000136400000000000000000000000000000000F800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infiniTy\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -Inf", + "bson": "18000000136400000000000000000000000000000000F800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -inf", + "bson": "18000000136400000000000000000000000000000000F800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inf\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -inF", + "bson": "18000000136400000000000000000000000000000000F800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inF\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Rounded Subnormal number", + "bson": "180000001364000100000000000000000000000000000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E-6177\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "Clamped", + "bson": "180000001364000a00000000000000000000000000fe5f00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" + }, + { + "description": "Exact rounding", + "bson": "18000000136400000000000a5bc138938d44c64d31cc3700", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+999\"}}" + } + ] +} +`}, + + {"decimal128-2.json", ` +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[decq021] Normality", + "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C40B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234567890123456789012345678901234\"}}" + }, + { + "description": "[decq823] values around [u]int32 edges (zeros done earlier)", + "bson": "18000000136400010000800000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483649\"}}" + }, + { + "description": "[decq822] values around [u]int32 edges (zeros done earlier)", + "bson": "18000000136400000000800000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483648\"}}" + }, + { + "description": "[decq821] values around [u]int32 edges (zeros done earlier)", + "bson": "18000000136400FFFFFF7F0000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483647\"}}" + }, + { + "description": "[decq820] values around [u]int32 edges (zeros done earlier)", + "bson": "18000000136400FEFFFF7F0000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483646\"}}" + }, + { + "description": "[decq152] fold-downs (more below)", + "bson": "18000000136400393000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-12345\"}}" + }, + { + "description": "[decq154] fold-downs (more below)", + "bson": "18000000136400D20400000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234\"}}" + }, + { + "description": "[decq006] derivative canonical plain strings", + "bson": "18000000136400EE0200000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-750\"}}" + }, + { + "description": "[decq164] fold-downs (more below)", + "bson": "1800000013640039300000000000000000000000003CB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-123.45\"}}" + }, + { + "description": "[decq156] fold-downs (more below)", + "bson": "180000001364007B0000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-123\"}}" + }, + { + "description": "[decq008] derivative canonical plain strings", + "bson": "18000000136400EE020000000000000000000000003EB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-75.0\"}}" + }, + { + "description": "[decq158] fold-downs (more below)", + "bson": "180000001364000C0000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-12\"}}" + }, + { + "description": "[decq122] Nmax and similar", + "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFFDF00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999999999999999999999999999999999E+6144\"}}" + }, + { + "description": "[decq002] (mostly derived from the Strawman 4 document and examples)", + "bson": "18000000136400EE020000000000000000000000003CB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50\"}}" + }, + { + "description": "[decq004] derivative canonical plain strings", + "bson": "18000000136400EE0200000000000000000000000042B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E+3\"}}" + }, + { + "description": "[decq018] derivative canonical plain strings", + "bson": "18000000136400EE020000000000000000000000002EB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E-7\"}}" + }, + { + "description": "[decq125] Nmax and similar", + "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFEDF00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.234567890123456789012345678901234E+6144\"}}" + }, + { + "description": "[decq131] fold-downs (more below)", + "bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq162] fold-downs (more below)", + "bson": "180000001364007B000000000000000000000000003CB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23\"}}" + }, + { + "description": "[decq176] Nmin and below", + "bson": "18000000136400010000000A5BC138938D44C64D31008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000001E-6143\"}}" + }, + { + "description": "[decq174] Nmin and below", + "bson": "18000000136400000000000A5BC138938D44C64D31008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E-6143\"}}" + }, + { + "description": "[decq133] fold-downs (more below)", + "bson": "18000000136400000000000A5BC138938D44C64D31FEDF00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq160] fold-downs (more below)", + "bson": "18000000136400010000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}" + }, + { + "description": "[decq172] Nmin and below", + "bson": "180000001364000100000000000000000000000000428000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6143\"}}" + }, + { + "description": "[decq010] derivative canonical plain strings", + "bson": "18000000136400EE020000000000000000000000003AB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.750\"}}" + }, + { + "description": "[decq012] derivative canonical plain strings", + "bson": "18000000136400EE0200000000000000000000000038B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0750\"}}" + }, + { + "description": "[decq014] derivative canonical plain strings", + "bson": "18000000136400EE0200000000000000000000000034B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000750\"}}" + }, + { + "description": "[decq016] derivative canonical plain strings", + "bson": "18000000136400EE0200000000000000000000000030B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000750\"}}" + }, + { + "description": "[decq404] zeros", + "bson": "180000001364000000000000000000000000000000000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq424] negative zeros", + "bson": "180000001364000000000000000000000000000000008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq407] zeros", + "bson": "1800000013640000000000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[decq427] negative zeros", + "bson": "1800000013640000000000000000000000000000003CB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[decq409] zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[decq428] negative zeros", + "bson": "18000000136400000000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[decq700] Selected DPD codes", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[decq406] zeros", + "bson": "1800000013640000000000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[decq426] negative zeros", + "bson": "1800000013640000000000000000000000000000003CB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[decq410] zeros", + "bson": "180000001364000000000000000000000000000000463000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[decq431] negative zeros", + "bson": "18000000136400000000000000000000000000000046B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+3\"}}" + }, + { + "description": "[decq419] clamped zeros...", + "bson": "180000001364000000000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq432] negative zeros", + "bson": "180000001364000000000000000000000000000000FEDF00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq405] zeros", + "bson": "180000001364000000000000000000000000000000000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq425] negative zeros", + "bson": "180000001364000000000000000000000000000000008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq508] Specials", + "bson": "180000001364000000000000000000000000000000007800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "[decq528] Specials", + "bson": "18000000136400000000000000000000000000000000F800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "[decq541] Specials", + "bson": "180000001364000000000000000000000000000000007C00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "[decq074] Nmin and below", + "bson": "18000000136400000000000A5BC138938D44C64D31000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E-6143\"}}" + }, + { + "description": "[decq602] fold-down full sequence", + "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq604] fold-down full sequence", + "bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}" + }, + { + "description": "[decq606] fold-down full sequence", + "bson": "1800000013640000000080264B91C02220BE377E00FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}" + }, + { + "description": "[decq608] fold-down full sequence", + "bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}" + }, + { + "description": "[decq610] fold-down full sequence", + "bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}" + }, + { + "description": "[decq612] fold-down full sequence", + "bson": "18000000136400000000106102253E5ECE4F200000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}" + }, + { + "description": "[decq614] fold-down full sequence", + "bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}" + }, + { + "description": "[decq616] fold-down full sequence", + "bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}" + }, + { + "description": "[decq618] fold-down full sequence", + "bson": "180000001364000000004A48011416954508000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}" + }, + { + "description": "[decq620] fold-down full sequence", + "bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}" + }, + { + "description": "[decq622] fold-down full sequence", + "bson": "18000000136400000080F64AE1C7022D1500000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}" + }, + { + "description": "[decq624] fold-down full sequence", + "bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}" + }, + { + "description": "[decq626] fold-down full sequence", + "bson": "180000001364000000A0DEC5ADC935360000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}" + }, + { + "description": "[decq628] fold-down full sequence", + "bson": "18000000136400000010632D5EC76B050000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}" + }, + { + "description": "[decq630] fold-down full sequence", + "bson": "180000001364000000E8890423C78A000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}" + }, + { + "description": "[decq632] fold-down full sequence", + "bson": "18000000136400000064A7B3B6E00D000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}" + }, + { + "description": "[decq634] fold-down full sequence", + "bson": "1800000013640000008A5D78456301000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}" + }, + { + "description": "[decq636] fold-down full sequence", + "bson": "180000001364000000C16FF2862300000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}" + }, + { + "description": "[decq638] fold-down full sequence", + "bson": "180000001364000080C6A47E8D0300000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}" + }, + { + "description": "[decq640] fold-down full sequence", + "bson": "1800000013640000407A10F35A0000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}" + }, + { + "description": "[decq642] fold-down full sequence", + "bson": "1800000013640000A0724E18090000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}" + }, + { + "description": "[decq644] fold-down full sequence", + "bson": "180000001364000010A5D4E8000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}" + }, + { + "description": "[decq646] fold-down full sequence", + "bson": "1800000013640000E8764817000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}" + }, + { + "description": "[decq648] fold-down full sequence", + "bson": "1800000013640000E40B5402000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}" + }, + { + "description": "[decq650] fold-down full sequence", + "bson": "1800000013640000CA9A3B00000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}" + }, + { + "description": "[decq652] fold-down full sequence", + "bson": "1800000013640000E1F50500000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}" + }, + { + "description": "[decq654] fold-down full sequence", + "bson": "180000001364008096980000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}" + }, + { + "description": "[decq656] fold-down full sequence", + "bson": "1800000013640040420F0000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}" + }, + { + "description": "[decq658] fold-down full sequence", + "bson": "18000000136400A086010000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}" + }, + { + "description": "[decq660] fold-down full sequence", + "bson": "180000001364001027000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}" + }, + { + "description": "[decq662] fold-down full sequence", + "bson": "18000000136400E803000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}" + }, + { + "description": "[decq664] fold-down full sequence", + "bson": "180000001364006400000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}" + }, + { + "description": "[decq666] fold-down full sequence", + "bson": "180000001364000A00000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" + }, + { + "description": "[decq060] fold-downs (more below)", + "bson": "180000001364000100000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}" + }, + { + "description": "[decq670] fold-down full sequence", + "bson": "180000001364000100000000000000000000000000FC5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6110\"}}" + }, + { + "description": "[decq668] fold-down full sequence", + "bson": "180000001364000100000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6111\"}}" + }, + { + "description": "[decq072] Nmin and below", + "bson": "180000001364000100000000000000000000000000420000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6143\"}}" + }, + { + "description": "[decq076] Nmin and below", + "bson": "18000000136400010000000A5BC138938D44C64D31000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000001E-6143\"}}" + }, + { + "description": "[decq036] fold-downs (more below)", + "bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq062] fold-downs (more below)", + "bson": "180000001364007B000000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23\"}}" + }, + { + "description": "[decq034] Nmax and similar", + "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234567890123456789012345678901234E+6144\"}}" + }, + { + "description": "[decq441] exponent lengths", + "bson": "180000001364000700000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}" + }, + { + "description": "[decq449] exponent lengths", + "bson": "1800000013640007000000000000000000000000001E5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5999\"}}" + }, + { + "description": "[decq447] exponent lengths", + "bson": "1800000013640007000000000000000000000000000E3800", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+999\"}}" + }, + { + "description": "[decq445] exponent lengths", + "bson": "180000001364000700000000000000000000000000063100", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+99\"}}" + }, + { + "description": "[decq443] exponent lengths", + "bson": "180000001364000700000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}" + }, + { + "description": "[decq842] VG testcase", + "bson": "180000001364000000FED83F4E7C9FE4E269E38A5BCD1700", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7.049000000000010795488000000000000E-3097\"}}" + }, + { + "description": "[decq841] VG testcase", + "bson": "180000001364000000203B9DB5056F000000000000002400", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"8.000000000000000000E-1550\"}}" + }, + { + "description": "[decq840] VG testcase", + "bson": "180000001364003C17258419D710C42F0000000000002400", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"8.81125000000001349436E-1548\"}}" + }, + { + "description": "[decq701] Selected DPD codes", + "bson": "180000001364000900000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"9\"}}" + }, + { + "description": "[decq032] Nmax and similar", + "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}" + }, + { + "description": "[decq702] Selected DPD codes", + "bson": "180000001364000A00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" + }, + { + "description": "[decq057] fold-downs (more below)", + "bson": "180000001364000C00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}" + }, + { + "description": "[decq703] Selected DPD codes", + "bson": "180000001364001300000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"19\"}}" + }, + { + "description": "[decq704] Selected DPD codes", + "bson": "180000001364001400000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"20\"}}" + }, + { + "description": "[decq705] Selected DPD codes", + "bson": "180000001364001D00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"29\"}}" + }, + { + "description": "[decq706] Selected DPD codes", + "bson": "180000001364001E00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"30\"}}" + }, + { + "description": "[decq707] Selected DPD codes", + "bson": "180000001364002700000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"39\"}}" + }, + { + "description": "[decq708] Selected DPD codes", + "bson": "180000001364002800000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"40\"}}" + }, + { + "description": "[decq709] Selected DPD codes", + "bson": "180000001364003100000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"49\"}}" + }, + { + "description": "[decq710] Selected DPD codes", + "bson": "180000001364003200000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"50\"}}" + }, + { + "description": "[decq711] Selected DPD codes", + "bson": "180000001364003B00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"59\"}}" + }, + { + "description": "[decq712] Selected DPD codes", + "bson": "180000001364003C00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"60\"}}" + }, + { + "description": "[decq713] Selected DPD codes", + "bson": "180000001364004500000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"69\"}}" + }, + { + "description": "[decq714] Selected DPD codes", + "bson": "180000001364004600000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"70\"}}" + }, + { + "description": "[decq715] Selected DPD codes", + "bson": "180000001364004700000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"71\"}}" + }, + { + "description": "[decq716] Selected DPD codes", + "bson": "180000001364004800000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"72\"}}" + }, + { + "description": "[decq717] Selected DPD codes", + "bson": "180000001364004900000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"73\"}}" + }, + { + "description": "[decq718] Selected DPD codes", + "bson": "180000001364004A00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"74\"}}" + }, + { + "description": "[decq719] Selected DPD codes", + "bson": "180000001364004B00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"75\"}}" + }, + { + "description": "[decq720] Selected DPD codes", + "bson": "180000001364004C00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"76\"}}" + }, + { + "description": "[decq721] Selected DPD codes", + "bson": "180000001364004D00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"77\"}}" + }, + { + "description": "[decq722] Selected DPD codes", + "bson": "180000001364004E00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"78\"}}" + }, + { + "description": "[decq723] Selected DPD codes", + "bson": "180000001364004F00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"79\"}}" + }, + { + "description": "[decq056] fold-downs (more below)", + "bson": "180000001364007B00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"123\"}}" + }, + { + "description": "[decq064] fold-downs (more below)", + "bson": "1800000013640039300000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"123.45\"}}" + }, + { + "description": "[decq732] Selected DPD codes", + "bson": "180000001364000802000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"520\"}}" + }, + { + "description": "[decq733] Selected DPD codes", + "bson": "180000001364000902000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"521\"}}" + }, + { + "description": "[decq740] DPD: one of each of the huffman groups", + "bson": "180000001364000903000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"777\"}}" + }, + { + "description": "[decq741] DPD: one of each of the huffman groups", + "bson": "180000001364000A03000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"778\"}}" + }, + { + "description": "[decq742] DPD: one of each of the huffman groups", + "bson": "180000001364001303000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"787\"}}" + }, + { + "description": "[decq746] DPD: one of each of the huffman groups", + "bson": "180000001364001F03000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"799\"}}" + }, + { + "description": "[decq743] DPD: one of each of the huffman groups", + "bson": "180000001364006D03000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"877\"}}" + }, + { + "description": "[decq753] DPD all-highs cases (includes the 24 redundant codes)", + "bson": "180000001364007803000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"888\"}}" + }, + { + "description": "[decq754] DPD all-highs cases (includes the 24 redundant codes)", + "bson": "180000001364007903000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"889\"}}" + }, + { + "description": "[decq760] DPD all-highs cases (includes the 24 redundant codes)", + "bson": "180000001364008203000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"898\"}}" + }, + { + "description": "[decq764] DPD all-highs cases (includes the 24 redundant codes)", + "bson": "180000001364008303000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"899\"}}" + }, + { + "description": "[decq745] DPD: one of each of the huffman groups", + "bson": "18000000136400D303000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"979\"}}" + }, + { + "description": "[decq770] DPD all-highs cases (includes the 24 redundant codes)", + "bson": "18000000136400DC03000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"988\"}}" + }, + { + "description": "[decq774] DPD all-highs cases (includes the 24 redundant codes)", + "bson": "18000000136400DD03000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"989\"}}" + }, + { + "description": "[decq730] Selected DPD codes", + "bson": "18000000136400E203000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"994\"}}" + }, + { + "description": "[decq731] Selected DPD codes", + "bson": "18000000136400E303000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"995\"}}" + }, + { + "description": "[decq744] DPD: one of each of the huffman groups", + "bson": "18000000136400E503000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"997\"}}" + }, + { + "description": "[decq780] DPD all-highs cases (includes the 24 redundant codes)", + "bson": "18000000136400E603000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"998\"}}" + }, + { + "description": "[decq787] DPD all-highs cases (includes the 24 redundant codes)", + "bson": "18000000136400E703000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"999\"}}" + }, + { + "description": "[decq053] fold-downs (more below)", + "bson": "18000000136400D204000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234\"}}" + }, + { + "description": "[decq052] fold-downs (more below)", + "bson": "180000001364003930000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345\"}}" + }, + { + "description": "[decq792] Miscellaneous (testers' queries, etc.)", + "bson": "180000001364003075000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"30000\"}}" + }, + { + "description": "[decq793] Miscellaneous (testers' queries, etc.)", + "bson": "1800000013640090940D0000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"890000\"}}" + }, + { + "description": "[decq824] values around [u]int32 edges (zeros done earlier)", + "bson": "18000000136400FEFFFF7F00000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483646\"}}" + }, + { + "description": "[decq825] values around [u]int32 edges (zeros done earlier)", + "bson": "18000000136400FFFFFF7F00000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483647\"}}" + }, + { + "description": "[decq826] values around [u]int32 edges (zeros done earlier)", + "bson": "180000001364000000008000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483648\"}}" + }, + { + "description": "[decq827] values around [u]int32 edges (zeros done earlier)", + "bson": "180000001364000100008000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483649\"}}" + }, + { + "description": "[decq828] values around [u]int32 edges (zeros done earlier)", + "bson": "18000000136400FEFFFFFF00000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967294\"}}" + }, + { + "description": "[decq829] values around [u]int32 edges (zeros done earlier)", + "bson": "18000000136400FFFFFFFF00000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967295\"}}" + }, + { + "description": "[decq830] values around [u]int32 edges (zeros done earlier)", + "bson": "180000001364000000000001000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967296\"}}" + }, + { + "description": "[decq831] values around [u]int32 edges (zeros done earlier)", + "bson": "180000001364000100000001000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967297\"}}" + }, + { + "description": "[decq022] Normality", + "bson": "18000000136400C7711CC7B548F377DC80A131C836403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1111111111111111111111111111111111\"}}" + }, + { + "description": "[decq020] Normality", + "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" + }, + { + "description": "[decq550] Specials", + "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED413000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"9999999999999999999999999999999999\"}}" + } + ] +} +`}, + + {"decimal128-3.json", ` +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[basx066] strings without E cannot generate E in result", + "bson": "18000000136400185C0ACE0000000000000000000038B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" + }, + { + "description": "[basx065] strings without E cannot generate E in result", + "bson": "18000000136400185C0ACE0000000000000000000038B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" + }, + { + "description": "[basx064] strings without E cannot generate E in result", + "bson": "18000000136400185C0ACE0000000000000000000038B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" + }, + { + "description": "[basx041] strings without E cannot generate E in result", + "bson": "180000001364004C0000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-76\"}}" + }, + { + "description": "[basx027] conform to rules and exponent will be in permitted range).", + "bson": "180000001364000F270000000000000000000000003AB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999\"}}" + }, + { + "description": "[basx026] conform to rules and exponent will be in permitted range).", + "bson": "180000001364009F230000000000000000000000003AB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.119\"}}" + }, + { + "description": "[basx025] conform to rules and exponent will be in permitted range).", + "bson": "180000001364008F030000000000000000000000003CB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.11\"}}" + }, + { + "description": "[basx024] conform to rules and exponent will be in permitted range).", + "bson": "180000001364005B000000000000000000000000003EB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.1\"}}" + }, + { + "description": "[dqbsr531] negatives (Rounded)", + "bson": "1800000013640099761CC7B548F377DC80A131C836FEAF00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.1111111111111111111111111111123450\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.111111111111111111111111111112345\"}}" + }, + { + "description": "[basx022] conform to rules and exponent will be in permitted range).", + "bson": "180000001364000A000000000000000000000000003EB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0\"}}" + }, + { + "description": "[basx021] conform to rules and exponent will be in permitted range).", + "bson": "18000000136400010000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}" + }, + { + "description": "[basx601] Zeros", + "bson": "1800000013640000000000000000000000000000002E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx622] Zeros", + "bson": "1800000013640000000000000000000000000000002EB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-9\"}}" + }, + { + "description": "[basx602] Zeros", + "bson": "180000001364000000000000000000000000000000303000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" + }, + { + "description": "[basx621] Zeros", + "bson": "18000000136400000000000000000000000000000030B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8\"}}" + }, + { + "description": "[basx603] Zeros", + "bson": "180000001364000000000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx620] Zeros", + "bson": "18000000136400000000000000000000000000000032B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}" + }, + { + "description": "[basx604] Zeros", + "bson": "180000001364000000000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx619] Zeros", + "bson": "18000000136400000000000000000000000000000034B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}" + }, + { + "description": "[basx605] Zeros", + "bson": "180000001364000000000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx618] Zeros", + "bson": "18000000136400000000000000000000000000000036B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" + }, + { + "description": "[basx680] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"000000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx606] Zeros", + "bson": "180000001364000000000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx617] Zeros", + "bson": "18000000136400000000000000000000000000000038B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx681] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"00000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx686] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+00000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx687] Zeros", + "bson": "18000000136400000000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx019] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640000000000000000000000000000003CB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00.00\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[basx607] Zeros", + "bson": "1800000013640000000000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx616] Zeros", + "bson": "1800000013640000000000000000000000000000003AB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" + }, + { + "description": "[basx682] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx155] Numbers with E", + "bson": "1800000013640000000000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000e+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx130] Numbers with E", + "bson": "180000001364000000000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx290] some more negative zeros [systematic tests below]", + "bson": "18000000136400000000000000000000000000000038B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx131] Numbers with E", + "bson": "180000001364000000000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx291] some more negative zeros [systematic tests below]", + "bson": "18000000136400000000000000000000000000000036B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" + }, + { + "description": "[basx132] Numbers with E", + "bson": "180000001364000000000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx292] some more negative zeros [systematic tests below]", + "bson": "18000000136400000000000000000000000000000034B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}" + }, + { + "description": "[basx133] Numbers with E", + "bson": "180000001364000000000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx293] some more negative zeros [systematic tests below]", + "bson": "18000000136400000000000000000000000000000032B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}" + }, + { + "description": "[basx608] Zeros", + "bson": "1800000013640000000000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx615] Zeros", + "bson": "1800000013640000000000000000000000000000003CB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[basx683] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx630] Zeros", + "bson": "1800000013640000000000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx670] Zeros", + "bson": "1800000013640000000000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx631] Zeros", + "bson": "1800000013640000000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx671] Zeros", + "bson": "1800000013640000000000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx134] Numbers with E", + "bson": "180000001364000000000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx294] some more negative zeros [systematic tests below]", + "bson": "18000000136400000000000000000000000000000038B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx632] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx672] Zeros", + "bson": "180000001364000000000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx135] Numbers with E", + "bson": "180000001364000000000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx295] some more negative zeros [systematic tests below]", + "bson": "18000000136400000000000000000000000000000036B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" + }, + { + "description": "[basx633] Zeros", + "bson": "180000001364000000000000000000000000000000423000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" + }, + { + "description": "[basx673] Zeros", + "bson": "180000001364000000000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx136] Numbers with E", + "bson": "180000001364000000000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx674] Zeros", + "bson": "180000001364000000000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx634] Zeros", + "bson": "180000001364000000000000000000000000000000443000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" + }, + { + "description": "[basx137] Numbers with E", + "bson": "180000001364000000000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx635] Zeros", + "bson": "180000001364000000000000000000000000000000463000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[basx675] Zeros", + "bson": "180000001364000000000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx636] Zeros", + "bson": "180000001364000000000000000000000000000000483000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" + }, + { + "description": "[basx676] Zeros", + "bson": "180000001364000000000000000000000000000000303000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" + }, + { + "description": "[basx637] Zeros", + "bson": "1800000013640000000000000000000000000000004A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" + }, + { + "description": "[basx677] Zeros", + "bson": "1800000013640000000000000000000000000000002E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx638] Zeros", + "bson": "1800000013640000000000000000000000000000004C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" + }, + { + "description": "[basx678] Zeros", + "bson": "1800000013640000000000000000000000000000002C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}" + }, + { + "description": "[basx149] Numbers with E", + "bson": "180000001364000000000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"000E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx639] Zeros", + "bson": "1800000013640000000000000000000000000000004E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" + }, + { + "description": "[basx679] Zeros", + "bson": "1800000013640000000000000000000000000000002A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-11\"}}" + }, + { + "description": "[basx063] strings without E cannot generate E in result", + "bson": "18000000136400185C0ACE00000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+00345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx018] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640000000000000000000000000000003EB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "[basx609] Zeros", + "bson": "1800000013640000000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx614] Zeros", + "bson": "1800000013640000000000000000000000000000003EB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "[basx684] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"00.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx640] Zeros", + "bson": "1800000013640000000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx660] Zeros", + "bson": "1800000013640000000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx641] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx661] Zeros", + "bson": "1800000013640000000000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx296] some more negative zeros [systematic tests below]", + "bson": "1800000013640000000000000000000000000000003AB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" + }, + { + "description": "[basx642] Zeros", + "bson": "180000001364000000000000000000000000000000423000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" + }, + { + "description": "[basx662] Zeros", + "bson": "1800000013640000000000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx297] some more negative zeros [systematic tests below]", + "bson": "18000000136400000000000000000000000000000038B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx643] Zeros", + "bson": "180000001364000000000000000000000000000000443000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" + }, + { + "description": "[basx663] Zeros", + "bson": "180000001364000000000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx644] Zeros", + "bson": "180000001364000000000000000000000000000000463000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[basx664] Zeros", + "bson": "180000001364000000000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx645] Zeros", + "bson": "180000001364000000000000000000000000000000483000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" + }, + { + "description": "[basx665] Zeros", + "bson": "180000001364000000000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx646] Zeros", + "bson": "1800000013640000000000000000000000000000004A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" + }, + { + "description": "[basx666] Zeros", + "bson": "180000001364000000000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx647] Zeros", + "bson": "1800000013640000000000000000000000000000004C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" + }, + { + "description": "[basx667] Zeros", + "bson": "180000001364000000000000000000000000000000303000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" + }, + { + "description": "[basx648] Zeros", + "bson": "1800000013640000000000000000000000000000004E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" + }, + { + "description": "[basx668] Zeros", + "bson": "1800000013640000000000000000000000000000002E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx160] Numbers with E", + "bson": "180000001364000000000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"00E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx161] Numbers with E", + "bson": "1800000013640000000000000000000000000000002E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"00E-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx649] Zeros", + "bson": "180000001364000000000000000000000000000000503000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}" + }, + { + "description": "[basx669] Zeros", + "bson": "1800000013640000000000000000000000000000002C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}" + }, + { + "description": "[basx062] strings without E cannot generate E in result", + "bson": "18000000136400185C0ACE00000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx001] conform to rules and exponent will be in permitted range).", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx017] conform to rules and exponent will be in permitted range).", + "bson": "18000000136400000000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx611] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx613] Zeros", + "bson": "18000000136400000000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx685] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx688] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx689] Zeros", + "bson": "18000000136400000000000000000000000000000040B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx650] Zeros", + "bson": "180000001364000000000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx651] Zeros", + "bson": "180000001364000000000000000000000000000000423000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" + }, + { + "description": "[basx298] some more negative zeros [systematic tests below]", + "bson": "1800000013640000000000000000000000000000003CB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[basx652] Zeros", + "bson": "180000001364000000000000000000000000000000443000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" + }, + { + "description": "[basx299] some more negative zeros [systematic tests below]", + "bson": "1800000013640000000000000000000000000000003AB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" + }, + { + "description": "[basx653] Zeros", + "bson": "180000001364000000000000000000000000000000463000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[basx654] Zeros", + "bson": "180000001364000000000000000000000000000000483000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" + }, + { + "description": "[basx655] Zeros", + "bson": "1800000013640000000000000000000000000000004A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" + }, + { + "description": "[basx656] Zeros", + "bson": "1800000013640000000000000000000000000000004C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" + }, + { + "description": "[basx657] Zeros", + "bson": "1800000013640000000000000000000000000000004E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" + }, + { + "description": "[basx658] Zeros", + "bson": "180000001364000000000000000000000000000000503000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}" + }, + { + "description": "[basx138] Numbers with E", + "bson": "180000001364000000000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx139] Numbers with E", + "bson": "18000000136400000000000000000000000000000052B000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+9\"}}" + }, + { + "description": "[basx144] Numbers with E", + "bson": "180000001364000000000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx154] Numbers with E", + "bson": "180000001364000000000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx659] Zeros", + "bson": "180000001364000000000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx042] strings without E cannot generate E in result", + "bson": "18000000136400FC040000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx143] Numbers with E", + "bson": "180000001364000100000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+1E+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx061] strings without E cannot generate E in result", + "bson": "18000000136400185C0ACE00000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx036] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640015CD5B0700000000000000000000203000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000123456789\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-8\"}}" + }, + { + "description": "[basx035] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640015CD5B0700000000000000000000223000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000123456789\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-7\"}}" + }, + { + "description": "[basx034] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640015CD5B0700000000000000000000243000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000123456789\"}}" + }, + { + "description": "[basx053] strings without E cannot generate E in result", + "bson": "180000001364003200000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}" + }, + { + "description": "[basx033] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640015CD5B0700000000000000000000263000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000123456789\"}}" + }, + { + "description": "[basx016] conform to rules and exponent will be in permitted range).", + "bson": "180000001364000C000000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.012\"}}" + }, + { + "description": "[basx015] conform to rules and exponent will be in permitted range).", + "bson": "180000001364007B000000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123\"}}" + }, + { + "description": "[basx037] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640078DF0D8648700000000000000000223000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012344\"}}" + }, + { + "description": "[basx038] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640079DF0D8648700000000000000000223000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012345\"}}" + }, + { + "description": "[basx250] Numbers with E", + "bson": "18000000136400F104000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx257] Numbers with E", + "bson": "18000000136400F104000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx256] Numbers with E", + "bson": "18000000136400F104000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx258] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx251] Numbers with E", + "bson": "18000000136400F104000000000000000000000000103000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-21\"}}" + }, + { + "description": "[basx263] Numbers with E", + "bson": "18000000136400F104000000000000000000000000603000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+19\"}}" + }, + { + "description": "[basx255] Numbers with E", + "bson": "18000000136400F104000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" + }, + { + "description": "[basx259] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx254] Numbers with E", + "bson": "18000000136400F104000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}" + }, + { + "description": "[basx260] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx253] Numbers with E", + "bson": "18000000136400F104000000000000000000000000303000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}" + }, + { + "description": "[basx261] Numbers with E", + "bson": "18000000136400F104000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx252] Numbers with E", + "bson": "18000000136400F104000000000000000000000000283000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-9\"}}" + }, + { + "description": "[basx262] Numbers with E", + "bson": "18000000136400F104000000000000000000000000483000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}" + }, + { + "description": "[basx159] Numbers with E", + "bson": "1800000013640049000000000000000000000000002E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.73e-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7.3E-8\"}}" + }, + { + "description": "[basx004] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640064000000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00\"}}" + }, + { + "description": "[basx003] conform to rules and exponent will be in permitted range).", + "bson": "180000001364000A000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}" + }, + { + "description": "[basx002] conform to rules and exponent will be in permitted range).", + "bson": "180000001364000100000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}" + }, + { + "description": "[basx148] Numbers with E", + "bson": "180000001364000100000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx153] Numbers with E", + "bson": "180000001364000100000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx141] Numbers with E", + "bson": "180000001364000100000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx146] Numbers with E", + "bson": "180000001364000100000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx151] Numbers with E", + "bson": "180000001364000100000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx142] Numbers with E", + "bson": "180000001364000100000000000000000000000000F43000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" + }, + { + "description": "[basx147] Numbers with E", + "bson": "180000001364000100000000000000000000000000F43000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" + }, + { + "description": "[basx152] Numbers with E", + "bson": "180000001364000100000000000000000000000000F43000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" + }, + { + "description": "[basx140] Numbers with E", + "bson": "180000001364000100000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx150] Numbers with E", + "bson": "180000001364000100000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx014] conform to rules and exponent will be in permitted range).", + "bson": "18000000136400D2040000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234\"}}" + }, + { + "description": "[basx170] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx177] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx176] Numbers with E", + "bson": "18000000136400F104000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx178] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx171] Numbers with E", + "bson": "18000000136400F104000000000000000000000000123000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-20\"}}" + }, + { + "description": "[basx183] Numbers with E", + "bson": "18000000136400F104000000000000000000000000623000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+20\"}}" + }, + { + "description": "[basx175] Numbers with E", + "bson": "18000000136400F104000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx179] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx174] Numbers with E", + "bson": "18000000136400F104000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" + }, + { + "description": "[basx180] Numbers with E", + "bson": "18000000136400F104000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx173] Numbers with E", + "bson": "18000000136400F104000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}" + }, + { + "description": "[basx181] Numbers with E", + "bson": "18000000136400F104000000000000000000000000423000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx172] Numbers with E", + "bson": "18000000136400F1040000000000000000000000002A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-8\"}}" + }, + { + "description": "[basx182] Numbers with E", + "bson": "18000000136400F1040000000000000000000000004A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+8\"}}" + }, + { + "description": "[basx157] Numbers with E", + "bson": "180000001364000400000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"4E+9\"}}" + }, + { + "description": "[basx067] examples", + "bson": "180000001364000500000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}" + }, + { + "description": "[basx069] examples", + "bson": "180000001364000500000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}" + }, + { + "description": "[basx385] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}" + }, + { + "description": "[basx365] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000543000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+10\"}}" + }, + { + "description": "[basx405] Engineering notation tests", + "bson": "1800000013640007000000000000000000000000002C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-10\"}}" + }, + { + "description": "[basx363] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000563000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E11\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+11\"}}" + }, + { + "description": "[basx407] Engineering notation tests", + "bson": "1800000013640007000000000000000000000000002A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-11\"}}" + }, + { + "description": "[basx361] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000583000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+12\"}}" + }, + { + "description": "[basx409] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000283000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-12\"}}" + }, + { + "description": "[basx411] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000263000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-13\"}}" + }, + { + "description": "[basx383] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000423000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+1\"}}" + }, + { + "description": "[basx387] Engineering notation tests", + "bson": "1800000013640007000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.7\"}}" + }, + { + "description": "[basx381] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000443000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+2\"}}" + }, + { + "description": "[basx389] Engineering notation tests", + "bson": "1800000013640007000000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.07\"}}" + }, + { + "description": "[basx379] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000463000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+3\"}}" + }, + { + "description": "[basx391] Engineering notation tests", + "bson": "1800000013640007000000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.007\"}}" + }, + { + "description": "[basx377] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000483000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+4\"}}" + }, + { + "description": "[basx393] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0007\"}}" + }, + { + "description": "[basx375] Engineering notation tests", + "bson": "1800000013640007000000000000000000000000004A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5\"}}" + }, + { + "description": "[basx395] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00007\"}}" + }, + { + "description": "[basx373] Engineering notation tests", + "bson": "1800000013640007000000000000000000000000004C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+6\"}}" + }, + { + "description": "[basx397] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000007\"}}" + }, + { + "description": "[basx371] Engineering notation tests", + "bson": "1800000013640007000000000000000000000000004E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+7\"}}" + }, + { + "description": "[basx399] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-7\"}}" + }, + { + "description": "[basx369] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000503000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+8\"}}" + }, + { + "description": "[basx401] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000303000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-8\"}}" + }, + { + "description": "[basx367] Engineering notation tests", + "bson": "180000001364000700000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}" + }, + { + "description": "[basx403] Engineering notation tests", + "bson": "1800000013640007000000000000000000000000002E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-9\"}}" + }, + { + "description": "[basx007] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640064000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.0\"}}" + }, + { + "description": "[basx005] conform to rules and exponent will be in permitted range).", + "bson": "180000001364000A00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" + }, + { + "description": "[basx165] Numbers with E", + "bson": "180000001364000A00000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx163] Numbers with E", + "bson": "180000001364000A00000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx325] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" + }, + { + "description": "[basx305] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000543000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+11\"}}" + }, + { + "description": "[basx345] Engineering notation tests", + "bson": "180000001364000A000000000000000000000000002C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-9\"}}" + }, + { + "description": "[basx303] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000563000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e11\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+12\"}}" + }, + { + "description": "[basx347] Engineering notation tests", + "bson": "180000001364000A000000000000000000000000002A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-11\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-10\"}}" + }, + { + "description": "[basx301] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000583000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+13\"}}" + }, + { + "description": "[basx349] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000283000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-11\"}}" + }, + { + "description": "[basx351] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000263000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-13\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-12\"}}" + }, + { + "description": "[basx323] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000423000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+2\"}}" + }, + { + "description": "[basx327] Engineering notation tests", + "bson": "180000001364000A000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}" + }, + { + "description": "[basx321] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000443000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+3\"}}" + }, + { + "description": "[basx329] Engineering notation tests", + "bson": "180000001364000A000000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.10\"}}" + }, + { + "description": "[basx319] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000463000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+4\"}}" + }, + { + "description": "[basx331] Engineering notation tests", + "bson": "180000001364000A000000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.010\"}}" + }, + { + "description": "[basx317] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000483000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+5\"}}" + }, + { + "description": "[basx333] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0010\"}}" + }, + { + "description": "[basx315] Engineering notation tests", + "bson": "180000001364000A000000000000000000000000004A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6\"}}" + }, + { + "description": "[basx335] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00010\"}}" + }, + { + "description": "[basx313] Engineering notation tests", + "bson": "180000001364000A000000000000000000000000004C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+7\"}}" + }, + { + "description": "[basx337] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000010\"}}" + }, + { + "description": "[basx311] Engineering notation tests", + "bson": "180000001364000A000000000000000000000000004E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+8\"}}" + }, + { + "description": "[basx339] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000010\"}}" + }, + { + "description": "[basx309] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000503000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+9\"}}" + }, + { + "description": "[basx341] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000303000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-7\"}}" + }, + { + "description": "[basx164] Numbers with E", + "bson": "180000001364000A00000000000000000000000000F43000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e+90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+91\"}}" + }, + { + "description": "[basx162] Numbers with E", + "bson": "180000001364000A00000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx307] Engineering notation tests", + "bson": "180000001364000A00000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx343] Engineering notation tests", + "bson": "180000001364000A000000000000000000000000002E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-8\"}}" + }, + { + "description": "[basx008] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640065000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.1\"}}" + }, + { + "description": "[basx009] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640068000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.4\"}}" + }, + { + "description": "[basx010] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640069000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.5\"}}" + }, + { + "description": "[basx011] conform to rules and exponent will be in permitted range).", + "bson": "180000001364006A000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.6\"}}" + }, + { + "description": "[basx012] conform to rules and exponent will be in permitted range).", + "bson": "180000001364006D000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.9\"}}" + }, + { + "description": "[basx013] conform to rules and exponent will be in permitted range).", + "bson": "180000001364006E000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"11.0\"}}" + }, + { + "description": "[basx040] strings without E cannot generate E in result", + "bson": "180000001364000C00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}" + }, + { + "description": "[basx190] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx197] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx196] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx198] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx191] Numbers with E", + "bson": "18000000136400F104000000000000000000000000143000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-19\"}}" + }, + { + "description": "[basx203] Numbers with E", + "bson": "18000000136400F104000000000000000000000000643000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+21\"}}" + }, + { + "description": "[basx195] Numbers with E", + "bson": "18000000136400F104000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx199] Numbers with E", + "bson": "18000000136400F104000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx194] Numbers with E", + "bson": "18000000136400F104000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx200] Numbers with E", + "bson": "18000000136400F104000000000000000000000000423000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx193] Numbers with E", + "bson": "18000000136400F104000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" + }, + { + "description": "[basx201] Numbers with E", + "bson": "18000000136400F104000000000000000000000000443000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" + }, + { + "description": "[basx192] Numbers with E", + "bson": "18000000136400F1040000000000000000000000002C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-7\"}}" + }, + { + "description": "[basx202] Numbers with E", + "bson": "18000000136400F1040000000000000000000000004C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+9\"}}" + }, + { + "description": "[basx044] strings without E cannot generate E in result", + "bson": "18000000136400FC040000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"012.76\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx042] strings without E cannot generate E in result", + "bson": "18000000136400FC040000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx046] strings without E cannot generate E in result", + "bson": "180000001364001100000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"17.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"17\"}}" + }, + { + "description": "[basx049] strings without E cannot generate E in result", + "bson": "180000001364002C00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0044\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}" + }, + { + "description": "[basx048] strings without E cannot generate E in result", + "bson": "180000001364002C00000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"044\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}" + }, + { + "description": "[basx158] Numbers with E", + "bson": "180000001364002C00000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"44E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4.4E+10\"}}" + }, + { + "description": "[basx068] examples", + "bson": "180000001364003200000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"50E-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}" + }, + { + "description": "[basx169] Numbers with E", + "bson": "180000001364006400000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" + }, + { + "description": "[basx167] Numbers with E", + "bson": "180000001364006400000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" + }, + { + "description": "[basx168] Numbers with E", + "bson": "180000001364006400000000000000000000000000F43000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"100E+90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+92\"}}" + }, + { + "description": "[basx166] Numbers with E", + "bson": "180000001364006400000000000000000000000000523000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" + }, + { + "description": "[basx210] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx217] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx216] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx218] Numbers with E", + "bson": "18000000136400F104000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx211] Numbers with E", + "bson": "18000000136400F104000000000000000000000000163000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-18\"}}" + }, + { + "description": "[basx223] Numbers with E", + "bson": "18000000136400F104000000000000000000000000663000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+22\"}}" + }, + { + "description": "[basx215] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx219] Numbers with E", + "bson": "18000000136400F104000000000000000000000000423000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx214] Numbers with E", + "bson": "18000000136400F104000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx220] Numbers with E", + "bson": "18000000136400F104000000000000000000000000443000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" + }, + { + "description": "[basx213] Numbers with E", + "bson": "18000000136400F104000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx221] Numbers with E", + "bson": "18000000136400F104000000000000000000000000463000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}" + }, + { + "description": "[basx212] Numbers with E", + "bson": "18000000136400F1040000000000000000000000002E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000001265\"}}" + }, + { + "description": "[basx222] Numbers with E", + "bson": "18000000136400F1040000000000000000000000004E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+10\"}}" + }, + { + "description": "[basx006] conform to rules and exponent will be in permitted range).", + "bson": "18000000136400E803000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1000\"}}" + }, + { + "description": "[basx230] Numbers with E", + "bson": "18000000136400F104000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx237] Numbers with E", + "bson": "18000000136400F104000000000000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx236] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx238] Numbers with E", + "bson": "18000000136400F104000000000000000000000000423000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx231] Numbers with E", + "bson": "18000000136400F104000000000000000000000000183000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-17\"}}" + }, + { + "description": "[basx243] Numbers with E", + "bson": "18000000136400F104000000000000000000000000683000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+23\"}}" + }, + { + "description": "[basx235] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx239] Numbers with E", + "bson": "18000000136400F104000000000000000000000000443000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" + }, + { + "description": "[basx234] Numbers with E", + "bson": "18000000136400F1040000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx240] Numbers with E", + "bson": "18000000136400F104000000000000000000000000463000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}" + }, + { + "description": "[basx233] Numbers with E", + "bson": "18000000136400F104000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx241] Numbers with E", + "bson": "18000000136400F104000000000000000000000000483000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}" + }, + { + "description": "[basx232] Numbers with E", + "bson": "18000000136400F104000000000000000000000000303000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}" + }, + { + "description": "[basx242] Numbers with E", + "bson": "18000000136400F104000000000000000000000000503000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+11\"}}" + }, + { + "description": "[basx060] strings without E cannot generate E in result", + "bson": "18000000136400185C0ACE00000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx059] strings without E cannot generate E in result", + "bson": "18000000136400F198670C08000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0345678.54321\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.54321\"}}" + }, + { + "description": "[basx058] strings without E cannot generate E in result", + "bson": "180000001364006AF90B7C50000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.543210\"}}" + }, + { + "description": "[basx057] strings without E cannot generate E in result", + "bson": "180000001364006A19562522020000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"2345678.543210\"}}" + }, + { + "description": "[basx056] strings without E cannot generate E in result", + "bson": "180000001364006AB9C8733A0B0000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345678.543210\"}}" + }, + { + "description": "[basx031] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640040AF0D8648700000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.000000\"}}" + }, + { + "description": "[basx030] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640080910F8648700000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.123456\"}}" + }, + { + "description": "[basx032] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640080910F8648700000000000000000403000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789123456\"}}" + } + ] +} +`}, + + {"decimal128-4.json", ` +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[basx023] conform to rules and exponent will be in permitted range).", + "bson": "1800000013640001000000000000000000000000003EB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.1\"}}" + }, + + { + "description": "[basx045] strings without E cannot generate E in result", + "bson": "1800000013640003000000000000000000000000003A3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.003\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.003\"}}" + }, + { + "description": "[basx610] Zeros", + "bson": "1800000013640000000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \".0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx612] Zeros", + "bson": "1800000013640000000000000000000000000000003EB000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-.0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "[basx043] strings without E cannot generate E in result", + "bson": "18000000136400FC040000000000000000000000003C3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx055] strings without E cannot generate E in result", + "bson": "180000001364000500000000000000000000000000303000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000005\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-8\"}}" + }, + { + "description": "[basx054] strings without E cannot generate E in result", + "bson": "180000001364000500000000000000000000000000323000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000005\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}" + }, + { + "description": "[basx052] strings without E cannot generate E in result", + "bson": "180000001364000500000000000000000000000000343000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}" + }, + { + "description": "[basx051] strings without E cannot generate E in result", + "bson": "180000001364000500000000000000000000000000363000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"00.00005\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00005\"}}" + }, + { + "description": "[basx050] strings without E cannot generate E in result", + "bson": "180000001364000500000000000000000000000000383000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0005\"}}" + }, + { + "description": "[basx047] strings without E cannot generate E in result", + "bson": "1800000013640005000000000000000000000000003E3000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \".5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.5\"}}" + }, + { + "description": "[dqbsr431] check rounding modes heeded (Rounded)", + "bson": "1800000013640099761CC7B548F377DC80A131C836FE2F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.1111111111111111111111111111123450\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.111111111111111111111111111112345\"}}" + }, + { + "description": "OK2", + "bson": "18000000136400000000000A5BC138938D44C64D31FC2F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \".100000000000000000000000000000000000000000000000000000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1000000000000000000000000000000000\"}}" + } + ], + "parseErrors": [ + { + "description": "[basx564] Near-specials (Conversion_syntax)", + "string": "Infi" + }, + { + "description": "[basx565] Near-specials (Conversion_syntax)", + "string": "Infin" + }, + { + "description": "[basx566] Near-specials (Conversion_syntax)", + "string": "Infini" + }, + { + "description": "[basx567] Near-specials (Conversion_syntax)", + "string": "Infinit" + }, + { + "description": "[basx568] Near-specials (Conversion_syntax)", + "string": "-Infinit" + }, + { + "description": "[basx590] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".Infinity" + }, + { + "description": "[basx562] Near-specials (Conversion_syntax)", + "string": "NaNq" + }, + { + "description": "[basx563] Near-specials (Conversion_syntax)", + "string": "NaNs" + }, + { + "description": "[dqbas939] overflow results at different rounding modes (Overflow & Inexact & Rounded)", + "string": "-7e10000" + }, + { + "description": "[dqbsr534] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234650" + }, + { + "description": "[dqbsr535] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234551" + }, + { + "description": "[dqbsr533] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234550" + }, + { + "description": "[dqbsr532] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234549" + }, + { + "description": "[dqbsr432] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234549" + }, + { + "description": "[dqbsr433] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234550" + }, + { + "description": "[dqbsr435] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234551" + }, + { + "description": "[dqbsr434] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234650" + }, + { + "description": "[dqbas938] overflow results at different rounding modes (Overflow & Inexact & Rounded)", + "string": "7e10000" + }, + { + "description": "Inexact rounding#1", + "string": "100000000000000000000000000000000000000000000000000000000001" + }, + { + "description": "Inexact rounding#2", + "string": "1E-6177" + } + ] +} +`}, + + {"decimal128-5.json", ` +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[decq035] fold-downs (more below) (Clamped)", + "bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq037] fold-downs (more below) (Clamped)", + "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq077] Nmin and below (Subnormal)", + "bson": "180000001364000000000081EFAC855B416D2DEE04000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.100000000000000000000000000000000E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq078] Nmin and below (Subnormal)", + "bson": "180000001364000000000081EFAC855B416D2DEE04000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq079] Nmin and below (Subnormal)", + "bson": "180000001364000A00000000000000000000000000000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000010E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}" + }, + { + "description": "[decq080] Nmin and below (Subnormal)", + "bson": "180000001364000A00000000000000000000000000000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}" + }, + { + "description": "[decq081] Nmin and below (Subnormal)", + "bson": "180000001364000100000000000000000000000000020000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}" + }, + { + "description": "[decq082] Nmin and below (Subnormal)", + "bson": "180000001364000100000000000000000000000000020000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}" + }, + { + "description": "[decq083] Nmin and below (Subnormal)", + "bson": "180000001364000100000000000000000000000000000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "[decq084] Nmin and below (Subnormal)", + "bson": "180000001364000100000000000000000000000000000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "[decq090] underflows cannot be tested for simple copies, check edge cases (Subnormal)", + "bson": "180000001364000100000000000000000000000000000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "[decq100] underflows cannot be tested for simple copies, check edge cases (Subnormal)", + "bson": "18000000136400FFFFFFFF095BC138938D44C64D31000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"999999999999999999999999999999999e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.99999999999999999999999999999999E-6144\"}}" + }, + { + "description": "[decq130] fold-downs (more below) (Clamped)", + "bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq132] fold-downs (more below) (Clamped)", + "bson": "18000000136400000000000A5BC138938D44C64D31FEDF00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq177] Nmin and below (Subnormal)", + "bson": "180000001364000000000081EFAC855B416D2DEE04008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.100000000000000000000000000000000E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq178] Nmin and below (Subnormal)", + "bson": "180000001364000000000081EFAC855B416D2DEE04008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq179] Nmin and below (Subnormal)", + "bson": "180000001364000A00000000000000000000000000008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000010E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}" + }, + { + "description": "[decq180] Nmin and below (Subnormal)", + "bson": "180000001364000A00000000000000000000000000008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}" + }, + { + "description": "[decq181] Nmin and below (Subnormal)", + "bson": "180000001364000100000000000000000000000000028000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}" + }, + { + "description": "[decq182] Nmin and below (Subnormal)", + "bson": "180000001364000100000000000000000000000000028000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}" + }, + { + "description": "[decq183] Nmin and below (Subnormal)", + "bson": "180000001364000100000000000000000000000000008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "[decq184] Nmin and below (Subnormal)", + "bson": "180000001364000100000000000000000000000000008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "[decq190] underflow edge cases (Subnormal)", + "bson": "180000001364000100000000000000000000000000008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "[decq200] underflow edge cases (Subnormal)", + "bson": "18000000136400FFFFFFFF095BC138938D44C64D31008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-999999999999999999999999999999999e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.99999999999999999999999999999999E-6144\"}}" + }, + { + "description": "[decq400] zeros (Clamped)", + "bson": "180000001364000000000000000000000000000000000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq401] zeros (Clamped)", + "bson": "180000001364000000000000000000000000000000000000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6177\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq414] clamped zeros... (Clamped)", + "bson": "180000001364000000000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq416] clamped zeros... (Clamped)", + "bson": "180000001364000000000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq418] clamped zeros... (Clamped)", + "bson": "180000001364000000000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq420] negative zeros (Clamped)", + "bson": "180000001364000000000000000000000000000000008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq421] negative zeros (Clamped)", + "bson": "180000001364000000000000000000000000000000008000", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6177\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq434] clamped zeros... (Clamped)", + "bson": "180000001364000000000000000000000000000000FEDF00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq436] clamped zeros... (Clamped)", + "bson": "180000001364000000000000000000000000000000FEDF00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq438] clamped zeros... (Clamped)", + "bson": "180000001364000000000000000000000000000000FEDF00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq601] fold-down full sequence (Clamped)", + "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq603] fold-down full sequence (Clamped)", + "bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}" + }, + { + "description": "[decq605] fold-down full sequence (Clamped)", + "bson": "1800000013640000000080264B91C02220BE377E00FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6142\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}" + }, + { + "description": "[decq607] fold-down full sequence (Clamped)", + "bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6141\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}" + }, + { + "description": "[decq609] fold-down full sequence (Clamped)", + "bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6140\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}" + }, + { + "description": "[decq611] fold-down full sequence (Clamped)", + "bson": "18000000136400000000106102253E5ECE4F200000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6139\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}" + }, + { + "description": "[decq613] fold-down full sequence (Clamped)", + "bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6138\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}" + }, + { + "description": "[decq615] fold-down full sequence (Clamped)", + "bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6137\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}" + }, + { + "description": "[decq617] fold-down full sequence (Clamped)", + "bson": "180000001364000000004A48011416954508000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6136\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}" + }, + { + "description": "[decq619] fold-down full sequence (Clamped)", + "bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6135\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}" + }, + { + "description": "[decq621] fold-down full sequence (Clamped)", + "bson": "18000000136400000080F64AE1C7022D1500000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6134\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}" + }, + { + "description": "[decq623] fold-down full sequence (Clamped)", + "bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6133\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}" + }, + { + "description": "[decq625] fold-down full sequence (Clamped)", + "bson": "180000001364000000A0DEC5ADC935360000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6132\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}" + }, + { + "description": "[decq627] fold-down full sequence (Clamped)", + "bson": "18000000136400000010632D5EC76B050000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6131\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}" + }, + { + "description": "[decq629] fold-down full sequence (Clamped)", + "bson": "180000001364000000E8890423C78A000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6130\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}" + }, + { + "description": "[decq631] fold-down full sequence (Clamped)", + "bson": "18000000136400000064A7B3B6E00D000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6129\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}" + }, + { + "description": "[decq633] fold-down full sequence (Clamped)", + "bson": "1800000013640000008A5D78456301000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6128\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}" + }, + { + "description": "[decq635] fold-down full sequence (Clamped)", + "bson": "180000001364000000C16FF2862300000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6127\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}" + }, + { + "description": "[decq637] fold-down full sequence (Clamped)", + "bson": "180000001364000080C6A47E8D0300000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6126\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}" + }, + { + "description": "[decq639] fold-down full sequence (Clamped)", + "bson": "1800000013640000407A10F35A0000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6125\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}" + }, + { + "description": "[decq641] fold-down full sequence (Clamped)", + "bson": "1800000013640000A0724E18090000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6124\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}" + }, + { + "description": "[decq643] fold-down full sequence (Clamped)", + "bson": "180000001364000010A5D4E8000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6123\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}" + }, + { + "description": "[decq645] fold-down full sequence (Clamped)", + "bson": "1800000013640000E8764817000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6122\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}" + }, + { + "description": "[decq647] fold-down full sequence (Clamped)", + "bson": "1800000013640000E40B5402000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6121\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}" + }, + { + "description": "[decq649] fold-down full sequence (Clamped)", + "bson": "1800000013640000CA9A3B00000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6120\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}" + }, + { + "description": "[decq651] fold-down full sequence (Clamped)", + "bson": "1800000013640000E1F50500000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6119\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}" + }, + { + "description": "[decq653] fold-down full sequence (Clamped)", + "bson": "180000001364008096980000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6118\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}" + }, + { + "description": "[decq655] fold-down full sequence (Clamped)", + "bson": "1800000013640040420F0000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6117\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}" + }, + { + "description": "[decq657] fold-down full sequence (Clamped)", + "bson": "18000000136400A086010000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6116\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}" + }, + { + "description": "[decq659] fold-down full sequence (Clamped)", + "bson": "180000001364001027000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6115\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}" + }, + { + "description": "[decq661] fold-down full sequence (Clamped)", + "bson": "18000000136400E803000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6114\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}" + }, + { + "description": "[decq663] fold-down full sequence (Clamped)", + "bson": "180000001364006400000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6113\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}" + }, + { + "description": "[decq665] fold-down full sequence (Clamped)", + "bson": "180000001364000A00000000000000000000000000FE5F00", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" + } + ] +} +`}, + + {"decimal128-6.json", ` +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "parseErrors": [ + { + "description": "Incomplete Exponent", + "string": "1e" + }, + { + "description": "Exponent at the beginning", + "string": "E01" + }, + { + "description": "Just a decimal place", + "string": "." + }, + { + "description": "2 decimal places", + "string": "..3" + }, + { + "description": "2 decimal places", + "string": ".13.3" + }, + { + "description": "2 decimal places", + "string": "1..3" + }, + { + "description": "2 decimal places", + "string": "1.3.4" + }, + { + "description": "2 decimal places", + "string": "1.34." + }, + { + "description": "Decimal with no digits", + "string": ".e" + }, + { + "description": "2 signs", + "string": "+-32.4" + }, + { + "description": "2 signs", + "string": "-+32.4" + }, + { + "description": "2 negative signs", + "string": "--32.4" + }, + { + "description": "2 negative signs", + "string": "-32.-4" + }, + { + "description": "End in negative sign", + "string": "32.0-" + }, + { + "description": "2 negative signs", + "string": "32.4E--21" + }, + { + "description": "2 negative signs", + "string": "32.4E-2-1" + }, + { + "description": "2 signs", + "string": "32.4E+-21" + }, + { + "description": "Empty string", + "string": "" + }, + { + "description": "leading white space positive number", + "string": " 1" + }, + { + "description": "leading white space negative number", + "string": " -1" + }, + { + "description": "trailing white space", + "string": "1 " + }, + { + "description": "Invalid", + "string": "E" + }, + { + "description": "Invalid", + "string": "invalid" + }, + { + "description": "Invalid", + "string": "i" + }, + { + "description": "Invalid", + "string": "in" + }, + { + "description": "Invalid", + "string": "-in" + }, + { + "description": "Invalid", + "string": "Na" + }, + { + "description": "Invalid", + "string": "-Na" + }, + { + "description": "Invalid", + "string": "1.23abc" + }, + { + "description": "Invalid", + "string": "1.23abcE+02" + }, + { + "description": "Invalid", + "string": "1.23E+0aabs2" + } + ] +} +`}, + + {"decimal128-7.json", ` +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "parseErrors": [ + { + "description": "[basx572] Near-specials (Conversion_syntax)", + "string": "-9Inf" + }, + { + "description": "[basx516] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "-1-" + }, + { + "description": "[basx533] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "0000.." + }, + { + "description": "[basx534] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": ".0000." + }, + { + "description": "[basx535] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "00..00" + }, + { + "description": "[basx569] Near-specials (Conversion_syntax)", + "string": "0Inf" + }, + { + "description": "[basx571] Near-specials (Conversion_syntax)", + "string": "-0Inf" + }, + { + "description": "[basx575] Near-specials (Conversion_syntax)", + "string": "0sNaN" + }, + { + "description": "[basx503] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "++1" + }, + { + "description": "[basx504] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "--1" + }, + { + "description": "[basx505] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "-+1" + }, + { + "description": "[basx506] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "+-1" + }, + { + "description": "[basx510] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": " +1" + }, + { + "description": "[basx513] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": " + 1" + }, + { + "description": "[basx514] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": " - 1" + }, + { + "description": "[basx501] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "." + }, + { + "description": "[basx502] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": ".." + }, + { + "description": "[basx519] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "" + }, + { + "description": "[basx525] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "e100" + }, + { + "description": "[basx549] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "e+1" + }, + { + "description": "[basx577] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".e+1" + }, + { + "description": "[basx578] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.e+1" + }, + { + "description": "[basx581] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "E+1" + }, + { + "description": "[basx582] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".E+1" + }, + { + "description": "[basx583] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.E+1" + }, + { + "description": "[basx579] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.e+" + }, + { + "description": "[basx580] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.e" + }, + { + "description": "[basx584] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.E+" + }, + { + "description": "[basx585] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.E" + }, + { + "description": "[basx589] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.Inf" + }, + { + "description": "[basx586] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".NaN" + }, + { + "description": "[basx587] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.NaN" + }, + { + "description": "[basx545] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "ONE" + }, + { + "description": "[basx561] Near-specials (Conversion_syntax)", + "string": "qNaN" + }, + { + "description": "[basx573] Near-specials (Conversion_syntax)", + "string": "-sNa" + }, + { + "description": "[basx588] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.sNaN" + }, + { + "description": "[basx544] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "ten" + }, + { + "description": "[basx527] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "u0b65" + }, + { + "description": "[basx526] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "u0e5a" + }, + { + "description": "[basx515] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "x" + }, + { + "description": "[basx574] Near-specials (Conversion_syntax)", + "string": "xNaN" + }, + { + "description": "[basx530] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": ".123.5" + }, + { + "description": "[basx500] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1..2" + }, + { + "description": "[basx542] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e1.0" + }, + { + "description": "[basx553] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E+1.2.3" + }, + { + "description": "[basx543] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e123e" + }, + { + "description": "[basx552] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E+1.2" + }, + { + "description": "[basx546] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e.1" + }, + { + "description": "[basx547] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e1." + }, + { + "description": "[basx554] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E++1" + }, + { + "description": "[basx555] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E--1" + }, + { + "description": "[basx556] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E+-1" + }, + { + "description": "[basx557] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E-+1" + }, + { + "description": "[basx558] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E'1" + }, + { + "description": "[basx559] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E\"1" + }, + { + "description": "[basx520] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e-" + }, + { + "description": "[basx560] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E" + }, + { + "description": "[basx548] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1ee" + }, + { + "description": "[basx551] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1.2.1" + }, + { + "description": "[basx550] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1.23.4" + }, + { + "description": "[basx529] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1.34.5" + }, + { + "description": "[basx531] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "01.35." + }, + { + "description": "[basx532] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "01.35-" + }, + { + "description": "[basx518] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "3+" + }, + { + "description": "[basx521] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "7e99999a" + }, + { + "description": "[basx570] Near-specials (Conversion_syntax)", + "string": "9Inf" + }, + { + "description": "[basx512] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12 " + }, + { + "description": "[basx517] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12-" + }, + { + "description": "[basx507] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12e" + }, + { + "description": "[basx508] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12e++" + }, + { + "description": "[basx509] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12f4" + }, + { + "description": "[basx536] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e*123" + }, + { + "description": "[basx537] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e123-" + }, + { + "description": "[basx540] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e1*23" + }, + { + "description": "[basx538] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e+12+" + }, + { + "description": "[basx539] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e1-3-" + }, + { + "description": "[basx541] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111E1e+3" + }, + { + "description": "[basx528] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "123,65" + }, + { + "description": "[basx523] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "7e12356789012x" + }, + { + "description": "[basx522] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "7e123567890x" + } + ] +} +`}, +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/decode.go juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/decode.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/decode.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/decode.go 2016-10-13 14:32:16.000000000 +0000 @@ -539,6 +539,11 @@ in = MongoTimestamp(d.readInt64()) case 0x12: // Int64 in = d.readInt64() + case 0x13: // Decimal128 + in = Decimal128{ + l: uint64(d.readInt64()), + h: uint64(d.readInt64()), + } case 0x7F: // Max key in = MaxKey case 0xFF: // Min key diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/encode.go juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/encode.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/encode.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/encode.go 2016-10-13 14:32:16.000000000 +0000 @@ -247,7 +247,7 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { if !v.IsValid() { - e.addElemName('\x0A', name) + e.addElemName(0x0A, name) return } @@ -276,29 +276,29 @@ panic("ObjectIDs must be exactly 12 bytes long (got " + strconv.Itoa(len(s)) + ")") } - e.addElemName('\x07', name) + e.addElemName(0x07, name) e.addBytes([]byte(s)...) case typeSymbol: - e.addElemName('\x0E', name) + e.addElemName(0x0E, name) e.addStr(s) case typeJSONNumber: n := v.Interface().(json.Number) if i, err := n.Int64(); err == nil { - e.addElemName('\x12', name) + e.addElemName(0x12, name) e.addInt64(i) } else if f, err := n.Float64(); err == nil { - e.addElemName('\x01', name) + e.addElemName(0x01, name) e.addFloat64(f) } else { panic("failed to convert json.Number to a number: " + s) } default: - e.addElemName('\x02', name) + e.addElemName(0x02, name) e.addStr(s) } case reflect.Float32, reflect.Float64: - e.addElemName('\x01', name) + e.addElemName(0x01, name) e.addFloat64(v.Float()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: @@ -306,40 +306,40 @@ if int64(u) < 0 { panic("BSON has no uint64 type, and value is too large to fit correctly in an int64") } else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) { - e.addElemName('\x10', name) + e.addElemName(0x10, name) e.addInt32(int32(u)) } else { - e.addElemName('\x12', name) + e.addElemName(0x12, name) e.addInt64(int64(u)) } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch v.Type() { case typeMongoTimestamp: - e.addElemName('\x11', name) + e.addElemName(0x11, name) e.addInt64(v.Int()) case typeOrderKey: if v.Int() == int64(MaxKey) { - e.addElemName('\x7F', name) + e.addElemName(0x7F, name) } else { - e.addElemName('\xFF', name) + e.addElemName(0xFF, name) } default: i := v.Int() if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 { // It fits into an int32, encode as such. - e.addElemName('\x10', name) + e.addElemName(0x10, name) e.addInt32(int32(i)) } else { - e.addElemName('\x12', name) + e.addElemName(0x12, name) e.addInt64(i) } } case reflect.Bool: - e.addElemName('\x08', name) + e.addElemName(0x08, name) if v.Bool() { e.addBytes(1) } else { @@ -347,40 +347,40 @@ } case reflect.Map: - e.addElemName('\x03', name) + e.addElemName(0x03, name) e.addDoc(v) case reflect.Slice: vt := v.Type() et := vt.Elem() if et.Kind() == reflect.Uint8 { - e.addElemName('\x05', name) - e.addBinary('\x00', v.Bytes()) + e.addElemName(0x05, name) + e.addBinary(0x00, v.Bytes()) } else if et == typeDocElem || et == typeRawDocElem { - e.addElemName('\x03', name) + e.addElemName(0x03, name) e.addDoc(v) } else { - e.addElemName('\x04', name) + e.addElemName(0x04, name) e.addDoc(v) } case reflect.Array: et := v.Type().Elem() if et.Kind() == reflect.Uint8 { - e.addElemName('\x05', name) + e.addElemName(0x05, name) if v.CanAddr() { - e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte)) + e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte)) } else { n := v.Len() e.addInt32(int32(n)) - e.addBytes('\x00') + e.addBytes(0x00) for i := 0; i < n; i++ { el := v.Index(i) e.addBytes(byte(el.Uint())) } } } else { - e.addElemName('\x04', name) + e.addElemName(0x04, name) e.addDoc(v) } @@ -399,11 +399,16 @@ e.addBytes(s.Data...) case Binary: - e.addElemName('\x05', name) + e.addElemName(0x05, name) e.addBinary(s.Kind, s.Data) + case Decimal128: + e.addElemName(0x13, name) + e.addInt64(int64(s.l)) + e.addInt64(int64(s.h)) + case DBPointer: - e.addElemName('\x0C', name) + e.addElemName(0x0C, name) e.addStr(s.Namespace) if len(s.Id) != 12 { panic("ObjectIDs must be exactly 12 bytes long (got " + @@ -412,16 +417,16 @@ e.addBytes([]byte(s.Id)...) case RegEx: - e.addElemName('\x0B', name) + e.addElemName(0x0B, name) e.addCStr(s.Pattern) e.addCStr(s.Options) case JavaScript: if s.Scope == nil { - e.addElemName('\x0D', name) + e.addElemName(0x0D, name) e.addStr(s.Code) } else { - e.addElemName('\x0F', name) + e.addElemName(0x0F, name) start := e.reserveInt32() e.addStr(s.Code) e.addDoc(reflect.ValueOf(s.Scope)) @@ -430,18 +435,18 @@ case time.Time: // MongoDB handles timestamps as milliseconds. - e.addElemName('\x09', name) + e.addElemName(0x09, name) e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6)) case url.URL: - e.addElemName('\x02', name) + e.addElemName(0x02, name) e.addStr(s.String()) case undefined: - e.addElemName('\x06', name) + e.addElemName(0x06, name) default: - e.addElemName('\x03', name) + e.addElemName(0x03, name) e.addDoc(v) } diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/json.go juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/json.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/json.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/json.go 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,380 @@ +package bson + +import ( + "bytes" + "encoding/base64" + "fmt" + "gopkg.in/mgo.v2/internal/json" + "strconv" + "time" +) + +// UnmarshalJSON unmarshals a JSON value that may hold non-standard +// syntax as defined in BSON's extended JSON specification. +func UnmarshalJSON(data []byte, value interface{}) error { + d := json.NewDecoder(bytes.NewBuffer(data)) + d.Extend(&jsonExt) + return d.Decode(value) +} + +// MarshalJSON marshals a JSON value that may hold non-standard +// syntax as defined in BSON's extended JSON specification. +func MarshalJSON(value interface{}) ([]byte, error) { + var buf bytes.Buffer + e := json.NewEncoder(&buf) + e.Extend(&jsonExt) + err := e.Encode(value) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// jdec is used internally by the JSON decoding functions +// so they may unmarshal functions without getting into endless +// recursion due to keyed objects. +func jdec(data []byte, value interface{}) error { + d := json.NewDecoder(bytes.NewBuffer(data)) + d.Extend(&funcExt) + return d.Decode(value) +} + +var jsonExt json.Extension +var funcExt json.Extension + +// TODO +// - Shell regular expressions ("/regexp/opts") + +func init() { + jsonExt.DecodeUnquotedKeys(true) + jsonExt.DecodeTrailingCommas(true) + + funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary") + jsonExt.DecodeKeyed("$binary", jdecBinary) + jsonExt.DecodeKeyed("$binaryFunc", jdecBinary) + jsonExt.EncodeType([]byte(nil), jencBinarySlice) + jsonExt.EncodeType(Binary{}, jencBinaryType) + + funcExt.DecodeFunc("ISODate", "$dateFunc", "S") + funcExt.DecodeFunc("new Date", "$dateFunc", "S") + jsonExt.DecodeKeyed("$date", jdecDate) + jsonExt.DecodeKeyed("$dateFunc", jdecDate) + jsonExt.EncodeType(time.Time{}, jencDate) + + funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i") + jsonExt.DecodeKeyed("$timestamp", jdecTimestamp) + jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp) + + funcExt.DecodeConst("undefined", Undefined) + + jsonExt.DecodeKeyed("$regex", jdecRegEx) + jsonExt.EncodeType(RegEx{}, jencRegEx) + + funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id") + jsonExt.DecodeKeyed("$oid", jdecObjectId) + jsonExt.DecodeKeyed("$oidFunc", jdecObjectId) + jsonExt.EncodeType(ObjectId(""), jencObjectId) + + funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id") + jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef) + + funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N") + jsonExt.DecodeKeyed("$numberLong", jdecNumberLong) + jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong) + jsonExt.EncodeType(int64(0), jencNumberLong) + jsonExt.EncodeType(int(0), jencInt) + + funcExt.DecodeConst("MinKey", MinKey) + funcExt.DecodeConst("MaxKey", MaxKey) + jsonExt.DecodeKeyed("$minKey", jdecMinKey) + jsonExt.DecodeKeyed("$maxKey", jdecMaxKey) + jsonExt.EncodeType(orderKey(0), jencMinMaxKey) + + jsonExt.DecodeKeyed("$undefined", jdecUndefined) + jsonExt.EncodeType(Undefined, jencUndefined) + + jsonExt.Extend(&funcExt) +} + +func fbytes(format string, args ...interface{}) []byte { + var buf bytes.Buffer + fmt.Fprintf(&buf, format, args...) + return buf.Bytes() +} + +func jdecBinary(data []byte) (interface{}, error) { + var v struct { + Binary []byte `json:"$binary"` + Type string `json:"$type"` + Func struct { + Binary []byte `json:"$binary"` + Type int64 `json:"$type"` + } `json:"$binaryFunc"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + + var binData []byte + var binKind int64 + if v.Type == "" && v.Binary == nil { + binData = v.Func.Binary + binKind = v.Func.Type + } else if v.Type == "" { + return v.Binary, nil + } else { + binData = v.Binary + binKind, err = strconv.ParseInt(v.Type, 0, 64) + if err != nil { + binKind = -1 + } + } + + if binKind == 0 { + return binData, nil + } + if binKind < 0 || binKind > 255 { + return nil, fmt.Errorf("invalid type in binary object: %s", data) + } + + return Binary{Kind: byte(binKind), Data: binData}, nil +} + +func jencBinarySlice(v interface{}) ([]byte, error) { + in := v.([]byte) + out := make([]byte, base64.StdEncoding.EncodedLen(len(in))) + base64.StdEncoding.Encode(out, in) + return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil +} + +func jencBinaryType(v interface{}) ([]byte, error) { + in := v.(Binary) + out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data))) + base64.StdEncoding.Encode(out, in.Data) + return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil +} + +const jdateFormat = "2006-01-02T15:04:05.999Z" + +func jdecDate(data []byte) (interface{}, error) { + var v struct { + S string `json:"$date"` + Func struct { + S string + } `json:"$dateFunc"` + } + _ = jdec(data, &v) + if v.S == "" { + v.S = v.Func.S + } + if v.S != "" { + for _, format := range []string{jdateFormat, "2006-01-02"} { + t, err := time.Parse(format, v.S) + if err == nil { + return t, nil + } + } + return nil, fmt.Errorf("cannot parse date: %q", v.S) + } + + var vn struct { + Date struct { + N int64 `json:"$numberLong,string"` + } `json:"$date"` + Func struct { + S int64 + } `json:"$dateFunc"` + } + err := jdec(data, &vn) + if err != nil { + return nil, fmt.Errorf("cannot parse date: %q", data) + } + n := vn.Date.N + if n == 0 { + n = vn.Func.S + } + return time.Unix(n/1000, n%1000*1e6).UTC(), nil +} + +func jencDate(v interface{}) ([]byte, error) { + t := v.(time.Time) + return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil +} + +func jdecTimestamp(data []byte) (interface{}, error) { + var v struct { + Func struct { + T int32 `json:"t"` + I int32 `json:"i"` + } `json:"$timestamp"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil +} + +func jencTimestamp(v interface{}) ([]byte, error) { + ts := uint64(v.(MongoTimestamp)) + return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil +} + +func jdecRegEx(data []byte) (interface{}, error) { + var v struct { + Regex string `json:"$regex"` + Options string `json:"$options"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + return RegEx{v.Regex, v.Options}, nil +} + +func jencRegEx(v interface{}) ([]byte, error) { + re := v.(RegEx) + type regex struct { + Regex string `json:"$regex"` + Options string `json:"$options"` + } + return json.Marshal(regex{re.Pattern, re.Options}) +} + +func jdecObjectId(data []byte) (interface{}, error) { + var v struct { + Id string `json:"$oid"` + Func struct { + Id string + } `json:"$oidFunc"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + if v.Id == "" { + v.Id = v.Func.Id + } + return ObjectIdHex(v.Id), nil +} + +func jencObjectId(v interface{}) ([]byte, error) { + return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil +} + +func jdecDBRef(data []byte) (interface{}, error) { + // TODO Support unmarshaling $ref and $id into the input value. + var v struct { + Obj map[string]interface{} `json:"$dbrefFunc"` + } + // TODO Fix this. Must not be required. + v.Obj = make(map[string]interface{}) + err := jdec(data, &v) + if err != nil { + return nil, err + } + return v.Obj, nil +} + +func jdecNumberLong(data []byte) (interface{}, error) { + var v struct { + N int64 `json:"$numberLong,string"` + Func struct { + N int64 `json:",string"` + } `json:"$numberLongFunc"` + } + var vn struct { + N int64 `json:"$numberLong"` + Func struct { + N int64 + } `json:"$numberLongFunc"` + } + err := jdec(data, &v) + if err != nil { + err = jdec(data, &vn) + v.N = vn.N + v.Func.N = vn.Func.N + } + if err != nil { + return nil, err + } + if v.N != 0 { + return v.N, nil + } + return v.Func.N, nil +} + +func jencNumberLong(v interface{}) ([]byte, error) { + n := v.(int64) + f := `{"$numberLong":"%d"}` + if n <= 1<<53 { + f = `{"$numberLong":%d}` + } + return fbytes(f, n), nil +} + +func jencInt(v interface{}) ([]byte, error) { + n := v.(int) + f := `{"$numberLong":"%d"}` + if n <= 1<<53 { + f = `%d` + } + return fbytes(f, n), nil +} + +func jdecMinKey(data []byte) (interface{}, error) { + var v struct { + N int64 `json:"$minKey"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + if v.N != 1 { + return nil, fmt.Errorf("invalid $minKey object: %s", data) + } + return MinKey, nil +} + +func jdecMaxKey(data []byte) (interface{}, error) { + var v struct { + N int64 `json:"$maxKey"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + if v.N != 1 { + return nil, fmt.Errorf("invalid $maxKey object: %s", data) + } + return MaxKey, nil +} + +func jencMinMaxKey(v interface{}) ([]byte, error) { + switch v.(orderKey) { + case MinKey: + return []byte(`{"$minKey":1}`), nil + case MaxKey: + return []byte(`{"$maxKey":1}`), nil + } + panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v)) +} + +func jdecUndefined(data []byte) (interface{}, error) { + var v struct { + B bool `json:"$undefined"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + if !v.B { + return nil, fmt.Errorf("invalid $undefined object: %s", data) + } + return Undefined, nil +} + +func jencUndefined(v interface{}) ([]byte, error) { + return []byte(`{"$undefined":true}`), nil +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/json_test.go juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/json_test.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/bson/json_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/bson/json_test.go 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,184 @@ +package bson_test + +import ( + "gopkg.in/mgo.v2/bson" + + . "gopkg.in/check.v1" + "reflect" + "strings" + "time" +) + +type jsonTest struct { + a interface{} // value encoded into JSON (optional) + b string // JSON expected as output of
, and used as input to + c interface{} // Value expected from decoding , defaults to + e string // error string, if decoding (b) should fail +} + +var jsonTests = []jsonTest{ + // $binary + { + a: []byte("foo"), + b: `{"$binary":"Zm9v","$type":"0x0"}`, + }, { + a: bson.Binary{Kind: 2, Data: []byte("foo")}, + b: `{"$binary":"Zm9v","$type":"0x2"}`, + }, { + b: `BinData(2,"Zm9v")`, + c: bson.Binary{Kind: 2, Data: []byte("foo")}, + }, + + // $date + { + a: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC), + b: `{"$date":"2016-05-15T01:02:03.004Z"}`, + }, { + b: `{"$date": {"$numberLong": "1002"}}`, + c: time.Date(1970, 1, 1, 0, 0, 1, 2e6, time.UTC), + }, { + b: `ISODate("2016-05-15T01:02:03.004Z")`, + c: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC), + }, { + b: `new Date(1000)`, + c: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), + }, { + b: `new Date("2016-05-15")`, + c: time.Date(2016, 5, 15, 0, 0, 0, 0, time.UTC), + }, + + // $timestamp + { + a: bson.MongoTimestamp(4294967298), + b: `{"$timestamp":{"t":1,"i":2}}`, + }, { + b: `Timestamp(1, 2)`, + c: bson.MongoTimestamp(4294967298), + }, + + // $regex + { + a: bson.RegEx{"pattern", "options"}, + b: `{"$regex":"pattern","$options":"options"}`, + }, + + // $oid + { + a: bson.ObjectIdHex("0123456789abcdef01234567"), + b: `{"$oid":"0123456789abcdef01234567"}`, + }, { + b: `ObjectId("0123456789abcdef01234567")`, + c: bson.ObjectIdHex("0123456789abcdef01234567"), + }, + + // $ref (no special type) + { + b: `DBRef("name", "id")`, + c: map[string]interface{}{"$ref": "name", "$id": "id"}, + }, + + // $numberLong + { + a: 123, + b: `123`, + }, { + a: int64(9007199254740992), + b: `{"$numberLong":9007199254740992}`, + }, { + a: int64(1<<53 + 1), + b: `{"$numberLong":"9007199254740993"}`, + }, { + a: 1<<53 + 1, + b: `{"$numberLong":"9007199254740993"}`, + c: int64(9007199254740993), + }, { + b: `NumberLong(9007199254740992)`, + c: int64(1 << 53), + }, { + b: `NumberLong("9007199254740993")`, + c: int64(1<<53 + 1), + }, + + // $minKey, $maxKey + { + a: bson.MinKey, + b: `{"$minKey":1}`, + }, { + a: bson.MaxKey, + b: `{"$maxKey":1}`, + }, { + b: `MinKey`, + c: bson.MinKey, + }, { + b: `MaxKey`, + c: bson.MaxKey, + }, { + b: `{"$minKey":0}`, + e: `invalid $minKey object: {"$minKey":0}`, + }, { + b: `{"$maxKey":0}`, + e: `invalid $maxKey object: {"$maxKey":0}`, + }, + + { + a: bson.Undefined, + b: `{"$undefined":true}`, + }, { + b: `undefined`, + c: bson.Undefined, + }, { + b: `{"v": undefined}`, + c: struct{ V interface{} }{bson.Undefined}, + }, + + // Unquoted keys and trailing commas + { + b: `{$foo: ["bar",],}`, + c: map[string]interface{}{"$foo": []interface{}{"bar"}}, + }, +} + +func (s *S) TestJSON(c *C) { + for i, item := range jsonTests { + c.Logf("------------ (#%d)", i) + c.Logf("A: %#v", item.a) + c.Logf("B: %#v", item.b) + + if item.c == nil { + item.c = item.a + } else { + c.Logf("C: %#v", item.c) + } + if item.e != "" { + c.Logf("E: %s", item.e) + } + + if item.a != nil { + data, err := bson.MarshalJSON(item.a) + c.Assert(err, IsNil) + c.Logf("Dumped: %#v", string(data)) + c.Assert(strings.TrimSuffix(string(data), "\n"), Equals, item.b) + } + + var zero interface{} + if item.c == nil { + zero = &struct{}{} + } else { + zero = reflect.New(reflect.TypeOf(item.c)).Interface() + } + err := bson.UnmarshalJSON([]byte(item.b), zero) + if item.e != "" { + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, item.e) + continue + } + c.Assert(err, IsNil) + zerov := reflect.ValueOf(zero) + value := zerov.Interface() + if zerov.Kind() == reflect.Ptr { + value = zerov.Elem().Interface() + } + c.Logf("Loaded: %#v", value) + c.Assert(value, DeepEquals, item.c) + } +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/cluster.go juju-core-2.0.0/src/gopkg.in/mgo.v2/cluster.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/cluster.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/cluster.go 2016-10-13 14:32:16.000000000 +0000 @@ -588,7 +588,10 @@ mastersLen := cluster.masters.Len() slavesLen := cluster.servers.Len() - mastersLen debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen) - if !(slaveOk && mode == Secondary) && mastersLen > 0 || slaveOk && slavesLen > 0 { + if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk { + break + } + if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() { break } if started.IsZero() { diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/cluster_test.go juju-core-2.0.0/src/gopkg.in/mgo.v2/cluster_test.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/cluster_test.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/cluster_test.go 2016-10-13 14:32:16.000000000 +0000 @@ -1366,6 +1366,118 @@ c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above. } +func (s *S) TestSecondaryModeWithMongos(c *C) { + session, err := mgo.Dial("localhost:40021") + c.Assert(err, IsNil) + defer session.Close() + + ssresult := &struct{ Host string }{} + imresult := &struct{ IsMaster bool }{} + + // Figure the master while still using the strong session. + err = session.Run("serverStatus", ssresult) + c.Assert(err, IsNil) + err = session.Run("isMaster", imresult) + c.Assert(err, IsNil) + master := ssresult.Host + c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) + + // Ensure mongos is aware about the current topology. + s.Stop(":40201") + s.StartAll() + + mongos, err := mgo.Dial("localhost:40202") + c.Assert(err, IsNil) + defer mongos.Close() + + mongos.SetSyncTimeout(5 * time.Second) + + // Insert some data as otherwise 3.2+ doesn't seem to run the query at all. + err = mongos.DB("mydb").C("mycoll").Insert(bson.M{"n": 1}) + c.Assert(err, IsNil) + + // Wait until all servers see the data. + for _, addr := range []string{"localhost:40021", "localhost:40022", "localhost:40023"} { + session, err := mgo.Dial(addr + "?connect=direct") + c.Assert(err, IsNil) + defer session.Close() + session.SetMode(mgo.Monotonic, true) + for i := 300; i >= 0; i-- { + n, err := session.DB("mydb").C("mycoll").Find(nil).Count() + c.Assert(err, IsNil) + if n == 1 { + break + } + if i == 0 { + c.Fatalf("Inserted data never reached " + addr) + } + time.Sleep(100 * time.Millisecond) + } + } + + // Collect op counters for everyone. + q21a := s.countQueries(c, "localhost:40021") + q22a := s.countQueries(c, "localhost:40022") + q23a := s.countQueries(c, "localhost:40023") + + // Do a Secondary query through MongoS + + mongos.SetMode(mgo.Secondary, true) + + coll := mongos.DB("mydb").C("mycoll") + var result struct{ N int } + for i := 0; i != 5; i++ { + err = coll.Find(nil).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 1) + } + + // Collect op counters for everyone again. + q21b := s.countQueries(c, "localhost:40021") + q22b := s.countQueries(c, "localhost:40022") + q23b := s.countQueries(c, "localhost:40023") + + var masterDelta, slaveDelta int + switch hostPort(master) { + case "40021": + masterDelta = q21b - q21a + slaveDelta = (q22b - q22a) + (q23b - q23a) + case "40022": + masterDelta = q22b - q22a + slaveDelta = (q21b - q21a) + (q23b - q23a) + case "40023": + masterDelta = q23b - q23a + slaveDelta = (q21b - q21a) + (q22b - q22a) + default: + c.Fatal("Uh?") + } + + c.Check(masterDelta, Equals, 0) // Just the counting itself. + c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above. +} + +func (s *S) TestSecondaryModeWithMongosInsert(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40202") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Secondary, true) + session.SetSyncTimeout(4 * time.Second) + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + var result struct{ A int } + coll.Find(nil).One(&result) + c.Assert(result.A, Equals, 1) +} + + func (s *S) TestRemovalOfClusterMember(c *C) { if *fast { c.Skip("-fast") diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/gridfs.go juju-core-2.0.0/src/gopkg.in/mgo.v2/gridfs.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/gridfs.go 2016-08-16 08:56:25.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/gridfs.go 2016-10-13 14:32:16.000000000 +0000 @@ -359,7 +359,7 @@ // SetChunkSize sets size of saved chunks. Once the file is written to, it // will be split in blocks of that size and each block saved into an -// independent chunk document. The default chunk size is 256kb. +// independent chunk document. The default chunk size is 255kb. // // It is a runtime error to call this function once the file has started // being written to. diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/client.crt juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/client.crt --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/client.crt 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/client.crt 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV +BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl +cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw +OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH +DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls +b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H +4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ +616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I +AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd +7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO +Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx +l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5 +CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW +DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47 +PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR +OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI +/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r +z3A= +-----END CERTIFICATE----- diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/client.key juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/client.key --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/client.key 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/client.key 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7 +wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ +r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ +Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI +KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5 +Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu +La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq +KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv +bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f +Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA +Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp +QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo +DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl +QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F +Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ ++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F +jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB +K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy +HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP +Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E +xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB +28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z +ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ +4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo +I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk= +-----END RSA PRIVATE KEY----- diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/client.pem juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/client.pem --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/client.pem 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/client.pem 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,57 @@ +To regenerate the key: + + openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key + cat server.key server.crt > server.pem + openssl genrsa -out client.key 2048 + openssl req -key client.key -new -out client.req + openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt + cat client.key client.crt > client.pem + +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7 +wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ +r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ +Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI +KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5 +Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu +La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq +KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv +bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f +Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA +Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp +QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo +DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl +QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F +Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ ++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F +jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB +K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy +HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP +Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E +xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB +28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z +ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ +4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo +I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk= +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV +BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl +cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw +OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH +DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls +b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H +4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ +616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I +AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd +7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO +Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx +l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5 +CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW +DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47 +PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR +OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI +/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r +z3A= +-----END CERTIFICATE----- + diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/client.req juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/client.req --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/client.req 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/client.req 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICoTCCAYkCAQAwXDELMAkGA1UEBhMCR08xDDAKBgNVBAgMA01HTzEMMAoGA1UE +BwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBkNsaWVudDESMBAGA1UEAwwJ +bG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtFIkIZk/ +h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7wQidZwLul+cyDfPRDzzo3za4 +GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJr4f/tItg0riOEBbLslQDzNTt +CAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJQ6DYEQgCa2BTIWq0Uw3WO20M +3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AIKBhAZwa7vND0RaRYqpO9kyZF +zh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5Hx+ftNTXnl/69TnxG44BP8M8 +8ZfDWlpzwpsTXwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKbOFblIscxlXalV +sEGNm2oz380RN2QoLhN6nKtAiv0jWm6iKhdAhOIQIeaRPhUP3cyi8bcBvLdMeQ3d +ZYIByB55/R0VSP1vs4qkXJCQegHcpMpyuIzsMV8p3Q4lxzGKyKtPA6Bb5c49p8Sk +ncD+LL4ymrMEia4cBPsHL9hhFOm4gqDacbU8+ETLTpuoSvUZiw7OwngqhE2r+kMv +KDweq5TOPeb+ftKzQKrrfB+XVdBoTKYw6CwARpogbc0/7mvottVcJ/0yAgC1fBbM +vupkohkXwKfjxKl6nKNL3R2GkzHQOh91hglAx5zyybKQn2YMM328Vk4X6csBg+pg +tb1s0MA= +-----END CERTIFICATE REQUEST----- diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/server.crt juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/server.crt --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/server.crt 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/server.crt 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP +MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw +ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM +A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl +cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm +6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK +IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5 +GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji +fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP +JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd +OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu +2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG +TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw +nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s +UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C +W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL +yQ== +-----END CERTIFICATE----- diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/server.key juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/server.key --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/server.key 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/server.key 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB +Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk +mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi +xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb +YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R +ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs +uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9 +wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu +MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi +wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby +yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk +eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3 +ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC +tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB +xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6 +MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9 +Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3 +IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q +Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl +QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z +GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do +4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1 +ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7 +1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt +9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk +SruEA1+5bfBRMW0P+h7Qfe4= +-----END PRIVATE KEY----- diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/server.pem juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/server.pem --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/certs/server.pem 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/certs/server.pem 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,50 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB +Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk +mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi +xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb +YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R +ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs +uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9 +wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu +MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi +wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby +yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk +eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3 +ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC +tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB +xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6 +MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9 +Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3 +IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q +Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl +QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z +GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do +4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1 +ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7 +1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt +9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk +SruEA1+5bfBRMW0P+h7Qfe4= +-----END PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP +MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw +ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM +A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl +cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm +6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK +IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5 +GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji +fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP +JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd +OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu +2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG +TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw +nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s +UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C +W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL +yQ== +-----END CERTIFICATE----- Binary files /tmp/tmpDKp0Le/llc_GcQn6Y/juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/journal/tempLatencyTest and /tmp/tmpDKp0Le/DWZX6UDllP/juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/journal/tempLatencyTest differ diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg1/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg1/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg1/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg1/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONCOPTS \ + --port 40101 \ + --configsvr + diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg2/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg2/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg2/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg2/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONCOPTS \ + --port 40102 \ + --configsvr + diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg3/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg3/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/cfg3/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/cfg3/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,9 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONCOPTS \ + --port 40103 \ + --configsvr \ + --auth \ + --keyFile=../../certs/keyfile diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db1/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db1/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db1/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db1/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db1/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db1/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db1/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db1/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,15 @@ +#!/bin/sh + +. ../.env + +if [ x$NOIPV6 = x1 ]; then + BINDIP="127.0.0.1" +else + BINDIP="127.0.0.1,::1" +fi + +exec mongod $COMMONDOPTSNOIP \ + --shardsvr \ + --bind_ip=$BINDIP \ + --port 40001 \ + --ipv6 diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db2/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db2/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db2/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db2/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db2/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db2/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db2/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db2/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --port 40002 \ + --auth diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db3/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db3/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db3/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db3/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db3/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db3/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/db3/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/db3/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,12 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --port 40003 \ + --auth \ + --sslMode preferSSL \ + --sslCAFile ../../certs/server.pem \ + --sslPEMKeyFile ../../certs/server.pem + diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/.env juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/.env --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/.env 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/.env 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,57 @@ + +set -e + +MONGOVERSION=$(mongod --version | sed -n 's/.*v\([0-9]\+\.[0-9]\+\)\..*/\1/p') +MONGOMAJOR=$(echo $MONGOVERSION | sed 's/\([0-9]\+\)\..*/\1/') +MONGOMINOR=$(echo $MONGOVERSION | sed 's/[0-9]\+\.\([0-9]\+\)/\1/') + +versionAtLeast() { + TESTMAJOR="$1" + TESTMINOR="$2" + if [ "$MONGOMAJOR" -gt "$TESTMAJOR" ]; then + return 0 + fi + if [ "$MONGOMAJOR" -lt "$TESTMAJOR" ]; then + return 100 + fi + if [ "$MONGOMINOR" -ge "$TESTMINOR" ]; then + return 0 + fi + return 100 +} + +COMMONDOPTSNOIP=" + --nohttpinterface + --noprealloc + --nojournal + --smallfiles + --nssize=1 + --oplogSize=1 + --dbpath ./db + " +COMMONDOPTS=" + $COMMONDOPTSNOIP + --bind_ip=127.0.0.1 + " +COMMONCOPTS=" + $COMMONDOPTS + " +COMMONSOPTS=" + --chunkSize 1 + --bind_ip=127.0.0.1 + " + +if versionAtLeast 3 2; then + # 3.2 doesn't like --nojournal on config servers. + #COMMONCOPTS="$(echo "$COMMONCOPTS" | sed '/--nojournal/d')" + # Using a hacked version of MongoDB 3.2 for now. + + # Go back to MMAPv1 so it's not super sluggish. :-( + COMMONDOPTSNOIP="--storageEngine=mmapv1 $COMMONDOPTSNOIP" + COMMONDOPTS="--storageEngine=mmapv1 $COMMONDOPTS" + COMMONCOPTS="--storageEngine=mmapv1 $COMMONCOPTS" +fi + +if [ "$TRAVIS" = true ]; then + set -x +fi diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1a/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1a/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1a/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1a/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs1 \ + --port 40011 diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1b/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1b/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1b/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1b/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs1 \ + --port 40012 diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1c/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1c/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs1c/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs1c/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs1 \ + --port 40013 diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2a/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2a/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2a/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2a/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs2 \ + --port 40021 diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2b/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2b/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2b/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2b/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs2 \ + --port 40022 diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2c/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2c/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs2c/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs2c/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs2 \ + --port 40023 diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3a/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3a/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3a/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3a/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,9 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs3 \ + --port 40031 \ + --keyFile=../../certs/keyfile diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3b/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3b/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3b/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3b/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,9 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs3 \ + --port 40032 \ + --keyFile=../../certs/keyfile diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3c/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3c/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs3c/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs3c/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,9 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs3 \ + --port 40033 \ + --keyFile=../../certs/keyfile diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs4a/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs4a/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/rs4a/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/rs4a/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs4 \ + --port 40041 diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s1/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s1/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s1/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s1/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s1/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s1/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s1/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s1/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,7 @@ +#!/bin/sh + +. ../.env + +exec mongos $COMMONSOPTS \ + --port 40201 \ + --configdb 127.0.0.1:40101 diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s2/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s2/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s2/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s2/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s2/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s2/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s2/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s2/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,7 @@ +#!/bin/sh + +. ../.env + +exec mongos $COMMONSOPTS \ + --port 40202 \ + --configdb 127.0.0.1:40102 diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s3/log/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s3/log/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s3/log/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s3/log/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s3/run juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s3/run --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/daemons/s3/run 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/daemons/s3/run 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongos $COMMONSOPTS \ + --port 40203 \ + --configdb 127.0.0.1:40103 \ + --keyFile=../../certs/keyfile diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/mongojs/dropall.js juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/mongojs/dropall.js --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/mongojs/dropall.js 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/mongojs/dropall.js 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,66 @@ + +var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 40101, 40102, 40103, 40201, 40202, 40203] +var auth = [40002, 40103, 40203, 40031] +var db1 = new Mongo("localhost:40001") + +if (db1.getDB("admin").serverBuildInfo().OpenSSLVersion) { + ports.push(40003) + auth.push(40003) +} + +for (var i in ports) { + var port = ports[i] + var server = "localhost:" + port + var mongo = new Mongo("localhost:" + port) + var admin = mongo.getDB("admin") + + for (var j in auth) { + if (auth[j] == port) { + admin.auth("root", "rapadura") + admin.system.users.find().forEach(function(u) { + if (u.user == "root" || u.user == "reader") { + return; + } + if (typeof admin.dropUser == "function") { + mongo.getDB(u.db).dropUser(u.user); + } else { + admin.removeUser(u.user); + } + }) + break + } + } + var result = admin.runCommand({"listDatabases": 1}) + for (var j = 0; j != 100; j++) { + if (typeof result.databases != "undefined" || notMaster(result)) { + break + } + result = admin.runCommand({"listDatabases": 1}) + } + if (notMaster(result)) { + continue + } + if (typeof result.databases == "undefined") { + print("Could not list databases. Command result:") + print(JSON.stringify(result)) + quit(12) + } + var dbs = result.databases + for (var j = 0; j != dbs.length; j++) { + var db = dbs[j] + switch (db.name) { + case "admin": + case "local": + case "config": + break + default: + mongo.getDB(db.name).dropDatabase() + } + } +} + +function notMaster(result) { + return typeof result.errmsg != "undefined" && (result.errmsg.indexOf("not master") >= 0 || result.errmsg.indexOf("no master found")) +} + +// vim:ts=4:sw=4:et diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/mongojs/init.js juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/mongojs/init.js --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/mongojs/init.js 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/mongojs/init.js 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,132 @@ +//var settings = {heartbeatSleep: 0.05, heartbeatTimeout: 0.5} +var settings = {}; + +// We know the master of the first set (pri=1), but not of the second. +var rs1cfg = {_id: "rs1", + members: [{_id: 1, host: "127.0.0.1:40011", priority: 1, tags: {rs1: "a"}}, + {_id: 2, host: "127.0.0.1:40012", priority: 0, tags: {rs1: "b"}}, + {_id: 3, host: "127.0.0.1:40013", priority: 0, tags: {rs1: "c"}}], + settings: settings} +var rs2cfg = {_id: "rs2", + members: [{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}, + {_id: 2, host: "127.0.0.1:40022", priority: 1, tags: {rs2: "b"}}, + {_id: 3, host: "127.0.0.1:40023", priority: 1, tags: {rs2: "c"}}], + settings: settings} +var rs3cfg = {_id: "rs3", + members: [{_id: 1, host: "127.0.0.1:40031", priority: 1, tags: {rs3: "a"}}, + {_id: 2, host: "127.0.0.1:40032", priority: 1, tags: {rs3: "b"}}, + {_id: 3, host: "127.0.0.1:40033", priority: 1, tags: {rs3: "c"}}], + settings: settings} + +for (var i = 0; i != 60; i++) { + try { + db1 = new Mongo("127.0.0.1:40001").getDB("admin") + db2 = new Mongo("127.0.0.1:40002").getDB("admin") + rs1a = new Mongo("127.0.0.1:40011").getDB("admin") + rs2a = new Mongo("127.0.0.1:40021").getDB("admin") + rs3a = new Mongo("127.0.0.1:40031").getDB("admin") + break + } catch(err) { + print("Can't connect yet...") + } + sleep(1000) +} + +function hasSSL() { + return Boolean(db1.serverBuildInfo().OpenSSLVersion) +} + +rs1a.runCommand({replSetInitiate: rs1cfg}) +rs2a.runCommand({replSetInitiate: rs2cfg}) +rs3a.runCommand({replSetInitiate: rs3cfg}) + +function configShards() { + cfg1 = new Mongo("127.0.0.1:40201").getDB("admin") + cfg1.runCommand({addshard: "127.0.0.1:40001"}) + cfg1.runCommand({addshard: "rs1/127.0.0.1:40011"}) + + cfg2 = new Mongo("127.0.0.1:40202").getDB("admin") + cfg2.runCommand({addshard: "rs2/127.0.0.1:40021"}) + + cfg3 = new Mongo("127.0.0.1:40203").getDB("admin") + cfg3.runCommand({addshard: "rs3/127.0.0.1:40031"}) +} + +function configAuth() { + var addrs = ["127.0.0.1:40002", "127.0.0.1:40203", "127.0.0.1:40031"] + if (hasSSL()) { + addrs.push("127.0.0.1:40003") + } + for (var i in addrs) { + print("Configuring auth for", addrs[i]) + var db = new Mongo(addrs[i]).getDB("admin") + var v = db.serverBuildInfo().versionArray + var timedOut = false + if (v < [2, 5]) { + db.addUser("root", "rapadura") + } else { + try { + db.createUser({user: "root", pwd: "rapadura", roles: ["root"]}) + } catch (err) { + // 3.2 consistently fails replication of creds on 40031 (config server) + print("createUser command returned an error: " + err) + if (String(err).indexOf("timed out") >= 0) { + timedOut = true; + } + } + } + for (var i = 0; i < 60; i++) { + var ok = db.auth("root", "rapadura") + if (ok || !timedOut) { + break + } + sleep(1000); + } + if (v >= [2, 6]) { + db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) + } else if (v >= [2, 4]) { + db.addUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) + } else { + db.addUser("reader", "rapadura", true) + } + } +} + +function countHealthy(rs) { + var status = rs.runCommand({replSetGetStatus: 1}) + var count = 0 + var primary = 0 + if (typeof status.members != "undefined") { + for (var i = 0; i != status.members.length; i++) { + var m = status.members[i] + if (m.health == 1 && (m.state == 1 || m.state == 2)) { + count += 1 + if (m.state == 1) { + primary = 1 + } + } + } + } + if (primary == 0) { + count = 0 + } + return count +} + +var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length + +for (var i = 0; i != 60; i++) { + var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) + print("Replica sets have", count, "healthy nodes.") + if (count == totalRSMembers) { + configShards() + configAuth() + quit(0) + } + sleep(1000) +} + +print("Replica sets didn't sync up properly.") +quit(12) + +// vim:ts=4:sw=4:et diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/mongojs/wait.js juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/mongojs/wait.js --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/mongojs/wait.js 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/mongojs/wait.js 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,67 @@ +// We know the master of the first set (pri=1), but not of the second. +var settings = {} +var rs1cfg = {_id: "rs1", + members: [{_id: 1, host: "127.0.0.1:40011", priority: 1}, + {_id: 2, host: "127.0.0.1:40012", priority: 0}, + {_id: 3, host: "127.0.0.1:40013", priority: 0}]} +var rs2cfg = {_id: "rs2", + members: [{_id: 1, host: "127.0.0.1:40021", priority: 1}, + {_id: 2, host: "127.0.0.1:40022", priority: 1}, + {_id: 3, host: "127.0.0.1:40023", priority: 0}]} +var rs3cfg = {_id: "rs3", + members: [{_id: 1, host: "127.0.0.1:40031", priority: 1}, + {_id: 2, host: "127.0.0.1:40032", priority: 1}, + {_id: 3, host: "127.0.0.1:40033", priority: 1}], + settings: settings} + +for (var i = 0; i != 60; i++) { + try { + rs1a = new Mongo("127.0.0.1:40011").getDB("admin") + rs2a = new Mongo("127.0.0.1:40021").getDB("admin") + rs3a = new Mongo("127.0.0.1:40031").getDB("admin") + rs3a.auth("root", "rapadura") + db1 = new Mongo("127.0.0.1:40001").getDB("admin") + db2 = new Mongo("127.0.0.1:40002").getDB("admin") + break + } catch(err) { + print("Can't connect yet...") + } + sleep(1000) +} + +function countHealthy(rs) { + var status = rs.runCommand({replSetGetStatus: 1}) + var count = 0 + var primary = 0 + if (typeof status.members != "undefined") { + for (var i = 0; i != status.members.length; i++) { + var m = status.members[i] + if (m.health == 1 && (m.state == 1 || m.state == 2)) { + count += 1 + if (m.state == 1) { + primary = 1 + } + } + } + } + if (primary == 0) { + count = 0 + } + return count +} + +var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length + +for (var i = 0; i != 90; i++) { + var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) + print("Replica sets have", count, "healthy nodes.") + if (count == totalRSMembers) { + quit(0) + } + sleep(1000) +} + +print("Replica sets didn't sync up properly.") +quit(12) + +// vim:ts=4:sw=4:et diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/setup.sh juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/setup.sh --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/harness/setup.sh 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/harness/setup.sh 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,96 @@ +#!/bin/sh -e + +LINE="---------------" + +start() { + if [ -d _harness ]; then + echo "Daemon setup already in place, stop it first." + exit 1 + fi + mkdir -p _harness + cd _harness + cp -a ../harness/daemons . + cp -a ../harness/certs . + echo keyfile > certs/keyfile + chmod 600 certs/keyfile + if ! mongod --help | grep -q -- --ssl; then + rm -rf daemons/db3 + fi + COUNT=$(ls daemons | wc -l) + echo "Running daemons..." + svscan daemons & + SVSCANPID=$! + echo $SVSCANPID > svscan.pid + if ! kill -0 $SVSCANPID; then + echo "Cannot execute svscan." + exit 1 + fi + echo "Starting $COUNT processes..." + for i in $(seq 30); do + UP=$(svstat daemons/* | grep ' up ' | grep -v ' [0-3] seconds' | wc -l) + echo "$UP processes up..." + if [ x$COUNT = x$UP ]; then + echo "Running setup.js with mongo..." + mongo --nodb ../harness/mongojs/init.js + exit 0 + fi + sleep 1 + done + echo "Failed to start processes. svstat _harness/daemons/* output:" + echo $LINE + svstat daemons/* + echo $LINE + for DAEMON in daemons/*; do + if $(svstat $DAEMON | grep ' up ' | grep ' [0-3] seconds' > /dev/null); then + echo "Logs for _harness/$DAEMON:" + echo $LINE + cat $DAEMON/log/log.txt + echo $LINE + fi + done + exit 1 +} + +stop() { + if [ -d _harness ]; then + cd _harness + if [ -f svscan.pid ]; then + kill -9 $(cat svscan.pid) 2> /dev/null || true + svc -dx daemons/* daemons/*/log > /dev/null 2>&1 || true + COUNT=$(ls daemons | wc -l) + echo "Shutting down $COUNT processes..." + while true; do + DOWN=$(svstat daemons/* | grep 'supervise not running' | wc -l) + echo "$DOWN processes down..." + if [ x$DOWN = x$COUNT ]; then + break + fi + sleep 1 + done + rm svscan.pid + echo "Done." + fi + cd .. + rm -rf _harness + fi +} + + +if [ ! -f suite_test.go ]; then + echo "This script must be run from within the source directory." + exit 1 +fi + +case "$1" in + + start) + start $2 + ;; + + stop) + stop $2 + ;; + +esac + +# vim:ts=4:sw=4:et diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/internal/json/bench_test.go juju-core-2.0.0/src/gopkg.in/mgo.v2/internal/json/bench_test.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/internal/json/bench_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/internal/json/bench_test.go 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Large data benchmark. +// The JSON data is a summary of agl's changes in the +// go, webkit, and chromium open source projects. +// We benchmark converting between the JSON form +// and in-memory data structures. + +package json + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "os" + "strings" + "testing" +) + +type codeResponse struct { + Tree *codeNode `json:"tree"` + Username string `json:"username"` +} + +type codeNode struct { + Name string `json:"name"` + Kids []*codeNode `json:"kids"` + CLWeight float64 `json:"cl_weight"` + Touches int `json:"touches"` + MinT int64 `json:"min_t"` + MaxT int64 `json:"max_t"` + MeanT int64 `json:"mean_t"` +} + +var codeJSON []byte +var codeStruct codeResponse + +func codeInit() { + f, err := os.Open("testdata/code.json.gz") + if err != nil { + panic(err) + } + defer f.Close() + gz, err := gzip.NewReader(f) + if err != nil { + panic(err) + } + data, err := ioutil.ReadAll(gz) + if err != nil { + panic(err) + } + + codeJSON = data + + if err := Unmarshal(codeJSON, &codeStruct); err != nil { + panic("unmarshal code.json: " + err.Error()) + } + + if data, err = Marshal(&codeStruct); err != nil { + panic("marshal code.json: " + err.Error()) + } + + if !bytes.Equal(data, codeJSON) { + println("different lengths", len(data), len(codeJSON)) + for i := 0; i < len(data) && i < len(codeJSON); i++ { + if data[i] != codeJSON[i] { + println("re-marshal: changed at byte", i) + println("orig: ", string(codeJSON[i-10:i+10])) + println("new: ", string(data[i-10:i+10])) + break + } + } + panic("re-marshal code.json: different result") + } +} + +func BenchmarkCodeEncoder(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + enc := NewEncoder(ioutil.Discard) + for i := 0; i < b.N; i++ { + if err := enc.Encode(&codeStruct); err != nil { + b.Fatal("Encode:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkCodeMarshal(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + for i := 0; i < b.N; i++ { + if _, err := Marshal(&codeStruct); err != nil { + b.Fatal("Marshal:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkCodeDecoder(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + var buf bytes.Buffer + dec := NewDecoder(&buf) + var r codeResponse + for i := 0; i < b.N; i++ { + buf.Write(codeJSON) + // hide EOF + buf.WriteByte('\n') + buf.WriteByte('\n') + buf.WriteByte('\n') + if err := dec.Decode(&r); err != nil { + b.Fatal("Decode:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkDecoderStream(b *testing.B) { + b.StopTimer() + var buf bytes.Buffer + dec := NewDecoder(&buf) + buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n") + var x interface{} + if err := dec.Decode(&x); err != nil { + b.Fatal("Decode:", err) + } + ones := strings.Repeat(" 1\n", 300000) + "\n\n\n" + b.StartTimer() + for i := 0; i < b.N; i++ { + if i%300000 == 0 { + buf.WriteString(ones) + } + x = nil + if err := dec.Decode(&x); err != nil || x != 1.0 { + b.Fatalf("Decode: %v after %d", err, i) + } + } +} + +func BenchmarkCodeUnmarshal(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + for i := 0; i < b.N; i++ { + var r codeResponse + if err := Unmarshal(codeJSON, &r); err != nil { + b.Fatal("Unmarshal:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkCodeUnmarshalReuse(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + var r codeResponse + for i := 0; i < b.N; i++ { + if err := Unmarshal(codeJSON, &r); err != nil { + b.Fatal("Unmarshal:", err) + } + } +} + +func BenchmarkUnmarshalString(b *testing.B) { + data := []byte(`"hello, world"`) + var s string + + for i := 0; i < b.N; i++ { + if err := Unmarshal(data, &s); err != nil { + b.Fatal("Unmarshal:", err) + } + } +} + +func BenchmarkUnmarshalFloat64(b *testing.B) { + var f float64 + data := []byte(`3.14`) + + for i := 0; i < b.N; i++ { + if err := Unmarshal(data, &f); err != nil { + b.Fatal("Unmarshal:", err) + } + } +} + +func BenchmarkUnmarshalInt64(b *testing.B) { + var x int64 + data := []byte(`3`) + + for i := 0; i < b.N; i++ { + if err := Unmarshal(data, &x); err != nil { + b.Fatal("Unmarshal:", err) + } + } +} + +func BenchmarkIssue10335(b *testing.B) { + b.ReportAllocs() + var s struct{} + j := []byte(`{"a":{ }}`) + for n := 0; n < b.N; n++ { + if err := Unmarshal(j, &s); err != nil { + b.Fatal(err) + } + } +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/internal/json/decode.go juju-core-2.0.0/src/gopkg.in/mgo.v2/internal/json/decode.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/internal/json/decode.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/internal/json/decode.go 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,1685 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use, If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores key- +// value pairs from the JSON object into the map. The map's key type must +// either be a string or implement encoding.TextUnmarshaler. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + useNumber bool + ext Extension +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else if c == '[' { + d.scan.step(&d.scan, ']') + } else { + // Was inside a function name. Get out of it. + d.scan.step(&d.scan, '(') + d.scan.step(&d.scan, ')') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + + case scanBeginName: + d.name(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginName: + switch v := d.nameInterface().(type) { + case nil, string: + return v + } + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, v + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, v + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if d.storeKeyed(pv) { + return + } + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: + // struct or + // map[string]T or map[encoding.TextUnmarshaler]T + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind or be an encoding.TextUnmarshaler. + t := v.Type() + if t.Key().Kind() != reflect.String && + !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + + empty := true + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + if !empty && !d.ext.trailingCommas { + d.syntaxError("beginning of object key string") + } + break + } + empty = false + if op == scanBeginName { + if !d.ext.unquotedKeys { + d.syntaxError("beginning of object key string") + } + } else if op != scanBeginLiteral { + d.error(errPhase) + } + unquotedKey := op == scanBeginName + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + var key []byte + if unquotedKey { + key = item + // TODO Fix code below to quote item when necessary. + } else { + var ok bool + key, ok = unquoteBytes(item) + if !ok { + d.error(errPhase) + } + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := v.Type().Key() + var kv reflect.Value + switch { + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(v.Type().Key()) + case reflect.PtrTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(v.Type().Key()) + d.literalStore(item, kv, true) + kv = kv.Elem() + default: + panic("json: Unexpected key type") // should never occur + } + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// isNull returns whether there's a null literal at the provided offset. +func (d *decodeState) isNull(off int) bool { + if off+4 >= len(d.data) || d.data[off] != 'n' || d.data[off+1] != 'u' || d.data[off+2] != 'l' || d.data[off+3] != 'l' { + return false + } + d.nextscan.reset() + for i, c := range d.data[off:] { + if i > 4 { + return false + } + switch d.nextscan.step(&d.nextscan, c) { + case scanContinue, scanBeginName: + continue + } + break + } + return true +} + +// name consumes a const or function from d.data[d.off-1:], decoding into the value v. +// the first byte of the function name has been read already. +func (d *decodeState) name(v reflect.Value) { + if d.isNull(d.off-1) { + d.literal(v) + return + } + + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if d.storeKeyed(pv) { + return + } + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over function in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + out := d.nameInterface() + if out == nil { + v.Set(reflect.Zero(v.Type())) + } else { + v.Set(reflect.ValueOf(out)) + } + return + } + + nameStart := d.off - 1 + + op := d.scanWhile(scanContinue) + + name := d.data[nameStart : d.off-1] + if op != scanParam { + // Back up so the byte just read is consumed next. + d.off-- + d.scan.undo(op) + if l, ok := d.convertLiteral(name); ok { + d.storeValue(v, l) + return + } + d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)}) + } + + funcName := string(name) + funcData := d.ext.funcs[funcName] + if funcData.key == "" { + d.error(fmt.Errorf("json: unknown function %q", funcName)) + } + + // Check type of target: + // struct or + // map[string]T or map[encoding.TextUnmarshaler]T + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind or be an encoding.TextUnmarshaler. + t := v.Type() + if t.Key().Kind() != reflect.String && + !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + // TODO Fix case of func field as map. + //topv := v + + // Figure out field corresponding to function. + key := []byte(funcData.key) + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + v = reflect.New(elemType).Elem() + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + } + } + if f != nil { + for _, i := range f.index { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = v.Field(i) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + } + + // Check for unmarshaler on func field itself. + u, ut, pv = d.indirect(v, false) + if u != nil { + d.off = nameStart + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + + var mapElem reflect.Value + + // Parse function arguments. + for i := 0; ; i++ { + // closing ) - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndParams { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + if i >= len(funcData.args) { + d.error(fmt.Errorf("json: too many arguments for function %s", funcName)) + } + key := []byte(funcData.args[i]) + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := v.Type().Key() + var kv reflect.Value + switch { + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(v.Type().Key()) + case reflect.PtrTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(v.Type().Key()) + d.literalStore(key, kv, true) + kv = kv.Elem() + default: + panic("json: Unexpected key type") // should never occur + } + v.SetMapIndex(kv, subv) + } + + // Next token must be , or ). + op = d.scanWhile(scanSkipSpace) + if op == scanEndParams { + break + } + if op != scanParam { + d.error(errPhase) + } + } +} + +// keyed attempts to decode an object or function using a keyed doc extension, +// and returns the value and true on success, or nil and false otherwise. +func (d *decodeState) keyed() (interface{}, bool) { + if len(d.ext.keyed) == 0 { + return nil, false + } + + unquote := false + + // Look-ahead first key to check for a keyed document extension. + d.nextscan.reset() + var start, end int + for i, c := range d.data[d.off-1:] { + switch op := d.nextscan.step(&d.nextscan, c); op { + case scanSkipSpace, scanContinue, scanBeginObject: + continue + case scanBeginLiteral, scanBeginName: + unquote = op == scanBeginLiteral + start = i + continue + } + end = i + break + } + + name := d.data[d.off-1+start : d.off-1+end] + + var key []byte + var ok bool + if unquote { + key, ok = unquoteBytes(name) + if !ok { + d.error(errPhase) + } + } else { + funcData, ok := d.ext.funcs[string(name)] + if !ok { + return nil, false + } + key = []byte(funcData.key) + } + + decode, ok := d.ext.keyed[string(key)] + if !ok { + return nil, false + } + + d.off-- + out, err := decode(d.next()) + if err != nil { + d.error(err) + } + return out, true +} + +func (d *decodeState) storeKeyed(v reflect.Value) bool { + keyed, ok := d.keyed() + if !ok { + return false + } + d.storeValue(v, keyed) + return true +} + +var ( + trueBytes = []byte("true") + falseBytes = []byte("false") + nullBytes = []byte("null") +) + +func (d *decodeState) storeValue(v reflect.Value, from interface{}) { + switch from { + case nil: + d.literalStore(nullBytes, v, false) + return + case true: + d.literalStore(trueBytes, v, false) + return + case false: + d.literalStore(falseBytes, v, false) + return + } + fromv := reflect.ValueOf(from) + for fromv.Kind() == reflect.Ptr && !fromv.IsNil() { + fromv = fromv.Elem() + } + fromt := fromv.Type() + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + vt := v.Type() + if fromt.AssignableTo(vt) { + v.Set(fromv) + } else if fromt.ConvertibleTo(vt) { + v.Set(fromv.Convert(vt)) + } else { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + } +} + +func (d *decodeState) convertLiteral(name []byte) (interface{}, bool) { + if len(name) == 0 { + return nil, false + } + switch name[0] { + case 't': + if bytes.Equal(name, trueBytes) { + return true, true + } + case 'f': + if bytes.Equal(name, falseBytes) { + return false, true + } + case 'n': + if bytes.Equal(name, nullBytes) { + return nil, true + } + } + if l, ok := d.ext.consts[string(name)]; ok { + return l, true + } + return nil, false +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + case scanBeginName: + return d.nameInterface() + } +} + +func (d *decodeState) syntaxError(expected string) { + msg := fmt.Sprintf("invalid character '%c' looking for %s", d.data[d.off-1], expected) + d.error(&SyntaxError{msg, int64(d.off)}) +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + if len(v) > 0 && !d.ext.trailingCommas { + d.syntaxError("beginning of value") + } + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() interface{} { + v, ok := d.keyed() + if ok { + return v + } + + m := make(map[string]interface{}) + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + if len(m) > 0 && !d.ext.trailingCommas { + d.syntaxError("beginning of object key string") + } + break + } + if op == scanBeginName { + if !d.ext.unquotedKeys { + d.syntaxError("beginning of object key string") + } + } else if op != scanBeginLiteral { + d.error(errPhase) + } + unquotedKey := op == scanBeginName + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + var key string + if unquotedKey { + key = string(item) + } else { + var ok bool + key, ok = unquote(item) + if !ok { + d.error(errPhase) + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// nameInterface is like function but returns map[string]interface{}. +func (d *decodeState) nameInterface() interface{} { + v, ok := d.keyed() + if ok { + return v + } + + nameStart := d.off - 1 + + op := d.scanWhile(scanContinue) + + name := d.data[nameStart : d.off-1] + if op != scanParam { + // Back up so the byte just read is consumed next. + d.off-- + d.scan.undo(op) + if l, ok := d.convertLiteral(name); ok { + return l + } + d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)}) + } + + funcName := string(name) + funcData := d.ext.funcs[funcName] + if funcData.key == "" { + d.error(fmt.Errorf("json: unknown function %q", funcName)) + } + + m := make(map[string]interface{}) + for i := 0; ; i++ { + // Look ahead for ) - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndParams { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + if i >= len(funcData.args) { + d.error(fmt.Errorf("json: too many arguments for function %s", funcName)) + } + m[funcData.args[i]] = d.valueInterface() + + // Next token must be , or ). + op = d.scanWhile(scanSkipSpace) + if op == scanEndParams { + break + } + if op != scanParam { + d.error(errPhase) + } + } + return map[string]interface{}{funcData.key: m} +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/internal/json/decode_test.go juju-core-2.0.0/src/gopkg.in/mgo.v2/internal/json/decode_test.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/internal/json/decode_test.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/internal/json/decode_test.go 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,1512 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "image" + "net" + "reflect" + "strings" + "testing" + "time" +) + +type T struct { + X string + Y int + Z int `json:"-"` +} + +type U struct { + Alphabet string `json:"alpha"` +} + +type V struct { + F1 interface{} + F2 int32 + F3 Number +} + +// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and +// without UseNumber +var ifaceNumAsFloat64 = map[string]interface{}{ + "k1": float64(1), + "k2": "s", + "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)}, + "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)}, +} + +var ifaceNumAsNumber = map[string]interface{}{ + "k1": Number("1"), + "k2": "s", + "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")}, + "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")}, +} + +type tx struct { + x int +} + +// A type that can unmarshal itself. + +type unmarshaler struct { + T bool +} + +func (u *unmarshaler) UnmarshalJSON(b []byte) error { + *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called. + return nil +} + +type ustruct struct { + M unmarshaler +} + +type unmarshalerText struct { + A, B string +} + +// needed for re-marshaling tests +func (u unmarshalerText) MarshalText() ([]byte, error) { + return []byte(u.A + ":" + u.B), nil +} + +func (u *unmarshalerText) UnmarshalText(b []byte) error { + pos := bytes.Index(b, []byte(":")) + if pos == -1 { + return errors.New("missing separator") + } + u.A, u.B = string(b[:pos]), string(b[pos+1:]) + return nil +} + +var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil) + +type ustructText struct { + M unmarshalerText +} + +var ( + um0, um1 unmarshaler // target2 of unmarshaling + ump = &um1 + umtrue = unmarshaler{true} + umslice = []unmarshaler{{true}} + umslicep = new([]unmarshaler) + umstruct = ustruct{unmarshaler{true}} + + um0T, um1T unmarshalerText // target2 of unmarshaling + umpType = &um1T + umtrueXY = unmarshalerText{"x", "y"} + umsliceXY = []unmarshalerText{{"x", "y"}} + umslicepType = new([]unmarshalerText) + umstructType = new(ustructText) + umstructXY = ustructText{unmarshalerText{"x", "y"}} + + ummapType = map[unmarshalerText]bool{} + ummapXY = map[unmarshalerText]bool{unmarshalerText{"x", "y"}: true} +) + +// Test data structures for anonymous fields. + +type Point struct { + Z int +} + +type Top struct { + Level0 int + Embed0 + *Embed0a + *Embed0b `json:"e,omitempty"` // treated as named + Embed0c `json:"-"` // ignored + Loop + Embed0p // has Point with X, Y, used + Embed0q // has Point with Z, used + embed // contains exported field +} + +type Embed0 struct { + Level1a int // overridden by Embed0a's Level1a with json tag + Level1b int // used because Embed0a's Level1b is renamed + Level1c int // used because Embed0a's Level1c is ignored + Level1d int // annihilated by Embed0a's Level1d + Level1e int `json:"x"` // annihilated by Embed0a.Level1e +} + +type Embed0a struct { + Level1a int `json:"Level1a,omitempty"` + Level1b int `json:"LEVEL1B,omitempty"` + Level1c int `json:"-"` + Level1d int // annihilated by Embed0's Level1d + Level1f int `json:"x"` // annihilated by Embed0's Level1e +} + +type Embed0b Embed0 + +type Embed0c Embed0 + +type Embed0p struct { + image.Point +} + +type Embed0q struct { + Point +} + +type embed struct { + Q int +} + +type Loop struct { + Loop1 int `json:",omitempty"` + Loop2 int `json:",omitempty"` + *Loop +} + +// From reflect test: +// The X in S6 and S7 annihilate, but they also block the X in S8.S9. +type S5 struct { + S6 + S7 + S8 +} + +type S6 struct { + X int +} + +type S7 S6 + +type S8 struct { + S9 +} + +type S9 struct { + X int + Y int +} + +// From reflect test: +// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. +type S10 struct { + S11 + S12 + S13 +} + +type S11 struct { + S6 +} + +type S12 struct { + S6 +} + +type S13 struct { + S8 +} + +type unmarshalTest struct { + in string + ptr interface{} + out interface{} + err error + useNumber bool +} + +type Ambig struct { + // Given "hello", the first match should win. + First int `json:"HELLO"` + Second int `json:"Hello"` +} + +type XYZ struct { + X interface{} + Y interface{} + Z interface{} +} + +func sliceAddr(x []int) *[]int { return &x } +func mapAddr(x map[string]int) *map[string]int { return &x } + +var unmarshalTests = []unmarshalTest{ + // basic types + {in: `true`, ptr: new(bool), out: true}, + {in: `1`, ptr: new(int), out: 1}, + {in: `1.2`, ptr: new(float64), out: 1.2}, + {in: `-5`, ptr: new(int16), out: int16(-5)}, + {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true}, + {in: `2`, ptr: new(Number), out: Number("2")}, + {in: `2`, ptr: new(interface{}), out: float64(2.0)}, + {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true}, + {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"}, + {in: `"http:\/\/"`, ptr: new(string), out: "http://"}, + {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"}, + {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"}, + {in: "null", ptr: new(interface{}), out: nil}, + {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7}}, + {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, + {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}}, + {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true}, + {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64}, + {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true}, + + // raw values with whitespace + {in: "\n true ", ptr: new(bool), out: true}, + {in: "\t 1 ", ptr: new(int), out: 1}, + {in: "\r 1.2 ", ptr: new(float64), out: 1.2}, + {in: "\t -5 \n", ptr: new(int16), out: int16(-5)}, + {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"}, + + // Z has a "-" tag. + {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}}, + + {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}}, + {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}}, + {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}}, + + // syntax errors + {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}}, + {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}}, + {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true}, + + // raw value errors + {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}}, + {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}}, + {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}}, + {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}}, + + // array tests + {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}}, + {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}}, + {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, + + // empty array to interface test + {in: `[]`, ptr: new([]interface{}), out: []interface{}{}}, + {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)}, + {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}}, + {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}}, + + // composite tests + {in: allValueIndent, ptr: new(All), out: allValue}, + {in: allValueCompact, ptr: new(All), out: allValue}, + {in: allValueIndent, ptr: new(*All), out: &allValue}, + {in: allValueCompact, ptr: new(*All), out: &allValue}, + {in: pallValueIndent, ptr: new(All), out: pallValue}, + {in: pallValueCompact, ptr: new(All), out: pallValue}, + {in: pallValueIndent, ptr: new(*All), out: &pallValue}, + {in: pallValueCompact, ptr: new(*All), out: &pallValue}, + + // unmarshal interface test + {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called + {in: `{"T":false}`, ptr: &ump, out: &umtrue}, + {in: `[{"T":false}]`, ptr: &umslice, out: umslice}, + {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice}, + {in: `{"M":{"T":"x:y"}}`, ptr: &umstruct, out: umstruct}, + + // UnmarshalText interface test + {in: `"x:y"`, ptr: &um0T, out: umtrueXY}, + {in: `"x:y"`, ptr: &umpType, out: &umtrueXY}, + {in: `["x:y"]`, ptr: &umsliceXY, out: umsliceXY}, + {in: `["x:y"]`, ptr: &umslicepType, out: &umsliceXY}, + {in: `{"M":"x:y"}`, ptr: umstructType, out: umstructXY}, + + // Map keys can be encoding.TextUnmarshalers + {in: `{"x:y":true}`, ptr: &ummapType, out: ummapXY}, + // If multiple values for the same key exists, only the most recent value is used. + {in: `{"x:y":false,"x:y":true}`, ptr: &ummapType, out: ummapXY}, + + // Overwriting of data. + // This is different from package xml, but it's what we've always done. + // Now documented and tested. + {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}}, + {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}}, + + { + in: `{ + "Level0": 1, + "Level1b": 2, + "Level1c": 3, + "x": 4, + "Level1a": 5, + "LEVEL1B": 6, + "e": { + "Level1a": 8, + "Level1b": 9, + "Level1c": 10, + "Level1d": 11, + "x": 12 + }, + "Loop1": 13, + "Loop2": 14, + "X": 15, + "Y": 16, + "Z": 17, + "Q": 18 + }`, + ptr: new(Top), + out: Top{ + Level0: 1, + Embed0: Embed0{ + Level1b: 2, + Level1c: 3, + }, + Embed0a: &Embed0a{ + Level1a: 5, + Level1b: 6, + }, + Embed0b: &Embed0b{ + Level1a: 8, + Level1b: 9, + Level1c: 10, + Level1d: 11, + Level1e: 12, + }, + Loop: Loop{ + Loop1: 13, + Loop2: 14, + }, + Embed0p: Embed0p{ + Point: image.Point{X: 15, Y: 16}, + }, + Embed0q: Embed0q{ + Point: Point{Z: 17}, + }, + embed: embed{ + Q: 18, + }, + }, + }, + { + in: `{"hello": 1}`, + ptr: new(Ambig), + out: Ambig{First: 1}, + }, + + { + in: `{"X": 1,"Y":2}`, + ptr: new(S5), + out: S5{S8: S8{S9: S9{Y: 2}}}, + }, + { + in: `{"X": 1,"Y":2}`, + ptr: new(S10), + out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, + }, + + // invalid UTF-8 is coerced to valid UTF-8. + { + in: "\"hello\xffworld\"", + ptr: new(string), + out: "hello\ufffdworld", + }, + { + in: "\"hello\xc2\xc2world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\xc2\xffworld\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\\ud800world\"", + ptr: new(string), + out: "hello\ufffdworld", + }, + { + in: "\"hello\\ud800\\ud800world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\\ud800\\ud800world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"", + ptr: new(string), + out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld", + }, + + // Used to be issue 8305, but time.Time implements encoding.TextUnmarshaler so this works now. + { + in: `{"2009-11-10T23:00:00Z": "hello world"}`, + ptr: &map[time.Time]string{}, + out: map[time.Time]string{time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC): "hello world"}, + }, + + // issue 8305 + { + in: `{"2009-11-10T23:00:00Z": "hello world"}`, + ptr: &map[Point]string{}, + err: &UnmarshalTypeError{"object", reflect.TypeOf(map[Point]string{}), 1}, + }, + { + in: `{"asdf": "hello world"}`, + ptr: &map[unmarshaler]string{}, + err: &UnmarshalTypeError{"object", reflect.TypeOf(map[unmarshaler]string{}), 1}, + }, +} + +func TestMarshal(t *testing.T) { + b, err := Marshal(allValue) + if err != nil { + t.Fatalf("Marshal allValue: %v", err) + } + if string(b) != allValueCompact { + t.Errorf("Marshal allValueCompact") + diff(t, b, []byte(allValueCompact)) + return + } + + b, err = Marshal(pallValue) + if err != nil { + t.Fatalf("Marshal pallValue: %v", err) + } + if string(b) != pallValueCompact { + t.Errorf("Marshal pallValueCompact") + diff(t, b, []byte(pallValueCompact)) + return + } +} + +var badUTF8 = []struct { + in, out string +}{ + {"hello\xffworld", `"hello\ufffdworld"`}, + {"", `""`}, + {"\xff", `"\ufffd"`}, + {"\xff\xff", `"\ufffd\ufffd"`}, + {"a\xffb", `"a\ufffdb"`}, + {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`}, +} + +func TestMarshalBadUTF8(t *testing.T) { + for _, tt := range badUTF8 { + b, err := Marshal(tt.in) + if string(b) != tt.out || err != nil { + t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out) + } + } +} + +func TestMarshalNumberZeroVal(t *testing.T) { + var n Number + out, err := Marshal(n) + if err != nil { + t.Fatal(err) + } + outStr := string(out) + if outStr != "0" { + t.Fatalf("Invalid zero val for Number: %q", outStr) + } +} + +func TestMarshalEmbeds(t *testing.T) { + top := &Top{ + Level0: 1, + Embed0: Embed0{ + Level1b: 2, + Level1c: 3, + }, + Embed0a: &Embed0a{ + Level1a: 5, + Level1b: 6, + }, + Embed0b: &Embed0b{ + Level1a: 8, + Level1b: 9, + Level1c: 10, + Level1d: 11, + Level1e: 12, + }, + Loop: Loop{ + Loop1: 13, + Loop2: 14, + }, + Embed0p: Embed0p{ + Point: image.Point{X: 15, Y: 16}, + }, + Embed0q: Embed0q{ + Point: Point{Z: 17}, + }, + embed: embed{ + Q: 18, + }, + } + b, err := Marshal(top) + if err != nil { + t.Fatal(err) + } + want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}" + if string(b) != want { + t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want) + } +} + +func TestUnmarshal(t *testing.T) { + for i, tt := range unmarshalTests { + var scan scanner + in := []byte(tt.in) + if err := checkValid(in, &scan); err != nil { + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("#%d: checkValid: %#v", i, err) + continue + } + } + if tt.ptr == nil { + continue + } + + // v = new(right-type) + v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) + dec := NewDecoder(bytes.NewReader(in)) + if tt.useNumber { + dec.UseNumber() + } + if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) { + t.Errorf("#%d: %v, want %v", i, err, tt.err) + continue + } else if err != nil { + continue + } + if !reflect.DeepEqual(v.Elem().Interface(), tt.out) { + t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out) + data, _ := Marshal(v.Elem().Interface()) + println(string(data)) + data, _ = Marshal(tt.out) + println(string(data)) + continue + } + + // Check round trip. + if tt.err == nil { + enc, err := Marshal(v.Interface()) + if err != nil { + t.Errorf("#%d: error re-marshaling: %v", i, err) + continue + } + vv := reflect.New(reflect.TypeOf(tt.ptr).Elem()) + dec = NewDecoder(bytes.NewReader(enc)) + if tt.useNumber { + dec.UseNumber() + } + if err := dec.Decode(vv.Interface()); err != nil { + t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err) + continue + } + if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) { + t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface()) + t.Errorf(" In: %q", strings.Map(noSpace, string(in))) + t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc))) + continue + } + } + } +} + +func TestUnmarshalMarshal(t *testing.T) { + initBig() + var v interface{} + if err := Unmarshal(jsonBig, &v); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + b, err := Marshal(v) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(jsonBig, b) { + t.Errorf("Marshal jsonBig") + diff(t, b, jsonBig) + return + } +} + +var numberTests = []struct { + in string + i int64 + intErr string + f float64 + floatErr string +}{ + {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1}, + {in: "-12", i: -12, f: -12.0}, + {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"}, +} + +// Independent of Decode, basic coverage of the accessors in Number +func TestNumberAccessors(t *testing.T) { + for _, tt := range numberTests { + n := Number(tt.in) + if s := n.String(); s != tt.in { + t.Errorf("Number(%q).String() is %q", tt.in, s) + } + if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i { + t.Errorf("Number(%q).Int64() is %d", tt.in, i) + } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) { + t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err) + } + if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f { + t.Errorf("Number(%q).Float64() is %g", tt.in, f) + } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) { + t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err) + } + } +} + +func TestLargeByteSlice(t *testing.T) { + s0 := make([]byte, 2000) + for i := range s0 { + s0[i] = byte(i) + } + b, err := Marshal(s0) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + var s1 []byte + if err := Unmarshal(b, &s1); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !bytes.Equal(s0, s1) { + t.Errorf("Marshal large byte slice") + diff(t, s0, s1) + } +} + +type Xint struct { + X int +} + +func TestUnmarshalInterface(t *testing.T) { + var xint Xint + var i interface{} = &xint + if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if xint.X != 1 { + t.Fatalf("Did not write to xint") + } +} + +func TestUnmarshalPtrPtr(t *testing.T) { + var xint Xint + pxint := &xint + if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if xint.X != 1 { + t.Fatalf("Did not write to xint") + } +} + +func TestEscape(t *testing.T) { + const input = `"foobar"` + " [\u2028 \u2029]" + const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"` + b, err := Marshal(input) + if err != nil { + t.Fatalf("Marshal error: %v", err) + } + if s := string(b); s != expected { + t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected) + } +} + +// WrongString is a struct that's misusing the ,string modifier. +type WrongString struct { + Message string `json:"result,string"` +} + +type wrongStringTest struct { + in, err string +} + +var wrongStringTests = []wrongStringTest{ + {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`}, + {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`}, + {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`}, + {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`}, +} + +// If people misuse the ,string modifier, the error message should be +// helpful, telling the user that they're doing it wrong. +func TestErrorMessageFromMisusedString(t *testing.T) { + for n, tt := range wrongStringTests { + r := strings.NewReader(tt.in) + var s WrongString + err := NewDecoder(r).Decode(&s) + got := fmt.Sprintf("%v", err) + if got != tt.err { + t.Errorf("%d. got err = %q, want %q", n, got, tt.err) + } + } +} + +func noSpace(c rune) rune { + if isSpace(byte(c)) { //only used for ascii + return -1 + } + return c +} + +type All struct { + Bool bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint uint + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + + Foo string `json:"bar"` + Foo2 string `json:"bar2,dummyopt"` + + IntStr int64 `json:",string"` + + PBool *bool + PInt *int + PInt8 *int8 + PInt16 *int16 + PInt32 *int32 + PInt64 *int64 + PUint *uint + PUint8 *uint8 + PUint16 *uint16 + PUint32 *uint32 + PUint64 *uint64 + PUintptr *uintptr + PFloat32 *float32 + PFloat64 *float64 + + String string + PString *string + + Map map[string]Small + MapP map[string]*Small + PMap *map[string]Small + PMapP *map[string]*Small + + EmptyMap map[string]Small + NilMap map[string]Small + + Slice []Small + SliceP []*Small + PSlice *[]Small + PSliceP *[]*Small + + EmptySlice []Small + NilSlice []Small + + StringSlice []string + ByteSlice []byte + + Small Small + PSmall *Small + PPSmall **Small + + Interface interface{} + PInterface *interface{} + + unexported int +} + +type Small struct { + Tag string +} + +var allValue = All{ + Bool: true, + Int: 2, + Int8: 3, + Int16: 4, + Int32: 5, + Int64: 6, + Uint: 7, + Uint8: 8, + Uint16: 9, + Uint32: 10, + Uint64: 11, + Uintptr: 12, + Float32: 14.1, + Float64: 15.1, + Foo: "foo", + Foo2: "foo2", + IntStr: 42, + String: "16", + Map: map[string]Small{ + "17": {Tag: "tag17"}, + "18": {Tag: "tag18"}, + }, + MapP: map[string]*Small{ + "19": {Tag: "tag19"}, + "20": nil, + }, + EmptyMap: map[string]Small{}, + Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}}, + SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}}, + EmptySlice: []Small{}, + StringSlice: []string{"str24", "str25", "str26"}, + ByteSlice: []byte{27, 28, 29}, + Small: Small{Tag: "tag30"}, + PSmall: &Small{Tag: "tag31"}, + Interface: 5.2, +} + +var pallValue = All{ + PBool: &allValue.Bool, + PInt: &allValue.Int, + PInt8: &allValue.Int8, + PInt16: &allValue.Int16, + PInt32: &allValue.Int32, + PInt64: &allValue.Int64, + PUint: &allValue.Uint, + PUint8: &allValue.Uint8, + PUint16: &allValue.Uint16, + PUint32: &allValue.Uint32, + PUint64: &allValue.Uint64, + PUintptr: &allValue.Uintptr, + PFloat32: &allValue.Float32, + PFloat64: &allValue.Float64, + PString: &allValue.String, + PMap: &allValue.Map, + PMapP: &allValue.MapP, + PSlice: &allValue.Slice, + PSliceP: &allValue.SliceP, + PPSmall: &allValue.PSmall, + PInterface: &allValue.Interface, +} + +var allValueIndent = `{ + "Bool": true, + "Int": 2, + "Int8": 3, + "Int16": 4, + "Int32": 5, + "Int64": 6, + "Uint": 7, + "Uint8": 8, + "Uint16": 9, + "Uint32": 10, + "Uint64": 11, + "Uintptr": 12, + "Float32": 14.1, + "Float64": 15.1, + "bar": "foo", + "bar2": "foo2", + "IntStr": "42", + "PBool": null, + "PInt": null, + "PInt8": null, + "PInt16": null, + "PInt32": null, + "PInt64": null, + "PUint": null, + "PUint8": null, + "PUint16": null, + "PUint32": null, + "PUint64": null, + "PUintptr": null, + "PFloat32": null, + "PFloat64": null, + "String": "16", + "PString": null, + "Map": { + "17": { + "Tag": "tag17" + }, + "18": { + "Tag": "tag18" + } + }, + "MapP": { + "19": { + "Tag": "tag19" + }, + "20": null + }, + "PMap": null, + "PMapP": null, + "EmptyMap": {}, + "NilMap": null, + "Slice": [ + { + "Tag": "tag20" + }, + { + "Tag": "tag21" + } + ], + "SliceP": [ + { + "Tag": "tag22" + }, + null, + { + "Tag": "tag23" + } + ], + "PSlice": null, + "PSliceP": null, + "EmptySlice": [], + "NilSlice": null, + "StringSlice": [ + "str24", + "str25", + "str26" + ], + "ByteSlice": "Gxwd", + "Small": { + "Tag": "tag30" + }, + "PSmall": { + "Tag": "tag31" + }, + "PPSmall": null, + "Interface": 5.2, + "PInterface": null +}` + +var allValueCompact = strings.Map(noSpace, allValueIndent) + +var pallValueIndent = `{ + "Bool": false, + "Int": 0, + "Int8": 0, + "Int16": 0, + "Int32": 0, + "Int64": 0, + "Uint": 0, + "Uint8": 0, + "Uint16": 0, + "Uint32": 0, + "Uint64": 0, + "Uintptr": 0, + "Float32": 0, + "Float64": 0, + "bar": "", + "bar2": "", + "IntStr": "0", + "PBool": true, + "PInt": 2, + "PInt8": 3, + "PInt16": 4, + "PInt32": 5, + "PInt64": 6, + "PUint": 7, + "PUint8": 8, + "PUint16": 9, + "PUint32": 10, + "PUint64": 11, + "PUintptr": 12, + "PFloat32": 14.1, + "PFloat64": 15.1, + "String": "", + "PString": "16", + "Map": null, + "MapP": null, + "PMap": { + "17": { + "Tag": "tag17" + }, + "18": { + "Tag": "tag18" + } + }, + "PMapP": { + "19": { + "Tag": "tag19" + }, + "20": null + }, + "EmptyMap": null, + "NilMap": null, + "Slice": null, + "SliceP": null, + "PSlice": [ + { + "Tag": "tag20" + }, + { + "Tag": "tag21" + } + ], + "PSliceP": [ + { + "Tag": "tag22" + }, + null, + { + "Tag": "tag23" + } + ], + "EmptySlice": null, + "NilSlice": null, + "StringSlice": null, + "ByteSlice": null, + "Small": { + "Tag": "" + }, + "PSmall": null, + "PPSmall": { + "Tag": "tag31" + }, + "Interface": null, + "PInterface": 5.2 +}` + +var pallValueCompact = strings.Map(noSpace, pallValueIndent) + +func TestRefUnmarshal(t *testing.T) { + type S struct { + // Ref is defined in encode_test.go. + R0 Ref + R1 *Ref + R2 RefText + R3 *RefText + } + want := S{ + R0: 12, + R1: new(Ref), + R2: 13, + R3: new(RefText), + } + *want.R1 = 12 + *want.R3 = 13 + + var got S + if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +// Test that the empty string doesn't panic decoding when ,string is specified +// Issue 3450 +func TestEmptyString(t *testing.T) { + type T2 struct { + Number1 int `json:",string"` + Number2 int `json:",string"` + } + data := `{"Number1":"1", "Number2":""}` + dec := NewDecoder(strings.NewReader(data)) + var t2 T2 + err := dec.Decode(&t2) + if err == nil { + t.Fatal("Decode: did not return error") + } + if t2.Number1 != 1 { + t.Fatal("Decode: did not set Number1") + } +} + +// Test that a null for ,string is not replaced with the previous quoted string (issue 7046). +// It should also not be an error (issue 2540, issue 8587). +func TestNullString(t *testing.T) { + type T struct { + A int `json:",string"` + B int `json:",string"` + C *int `json:",string"` + } + data := []byte(`{"A": "1", "B": null, "C": null}`) + var s T + s.B = 1 + s.C = new(int) + *s.C = 2 + err := Unmarshal(data, &s) + if err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if s.B != 1 || s.C != nil { + t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C) + } +} + +func intp(x int) *int { + p := new(int) + *p = x + return p +} + +func intpp(x *int) **int { + pp := new(*int) + *pp = x + return pp +} + +var interfaceSetTests = []struct { + pre interface{} + json string + post interface{} +}{ + {"foo", `"bar"`, "bar"}, + {"foo", `2`, 2.0}, + {"foo", `true`, true}, + {"foo", `null`, nil}, + + {nil, `null`, nil}, + {new(int), `null`, nil}, + {(*int)(nil), `null`, nil}, + {new(*int), `null`, new(*int)}, + {(**int)(nil), `null`, nil}, + {intp(1), `null`, nil}, + {intpp(nil), `null`, intpp(nil)}, + {intpp(intp(1)), `null`, intpp(nil)}, +} + +func TestInterfaceSet(t *testing.T) { + for _, tt := range interfaceSetTests { + b := struct{ X interface{} }{tt.pre} + blob := `{"X":` + tt.json + `}` + if err := Unmarshal([]byte(blob), &b); err != nil { + t.Errorf("Unmarshal %#q: %v", blob, err) + continue + } + if !reflect.DeepEqual(b.X, tt.post) { + t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post) + } + } +} + +// JSON null values should be ignored for primitives and string values instead of resulting in an error. +// Issue 2540 +func TestUnmarshalNulls(t *testing.T) { + jsonData := []byte(`{ + "Bool" : null, + "Int" : null, + "Int8" : null, + "Int16" : null, + "Int32" : null, + "Int64" : null, + "Uint" : null, + "Uint8" : null, + "Uint16" : null, + "Uint32" : null, + "Uint64" : null, + "Float32" : null, + "Float64" : null, + "String" : null}`) + + nulls := All{ + Bool: true, + Int: 2, + Int8: 3, + Int16: 4, + Int32: 5, + Int64: 6, + Uint: 7, + Uint8: 8, + Uint16: 9, + Uint32: 10, + Uint64: 11, + Float32: 12.1, + Float64: 13.1, + String: "14"} + + err := Unmarshal(jsonData, &nulls) + if err != nil { + t.Errorf("Unmarshal of null values failed: %v", err) + } + if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 || + nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 || + nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" { + + t.Errorf("Unmarshal of null values affected primitives") + } +} + +func TestStringKind(t *testing.T) { + type stringKind string + + var m1, m2 map[stringKind]int + m1 = map[stringKind]int{ + "foo": 42, + } + + data, err := Marshal(m1) + if err != nil { + t.Errorf("Unexpected error marshaling: %v", err) + } + + err = Unmarshal(data, &m2) + if err != nil { + t.Errorf("Unexpected error unmarshaling: %v", err) + } + + if !reflect.DeepEqual(m1, m2) { + t.Error("Items should be equal after encoding and then decoding") + } +} + +// Custom types with []byte as underlying type could not be marshalled +// and then unmarshalled. +// Issue 8962. +func TestByteKind(t *testing.T) { + type byteKind []byte + + a := byteKind("hello") + + data, err := Marshal(a) + if err != nil { + t.Error(err) + } + var b byteKind + err = Unmarshal(data, &b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, b) { + t.Errorf("expected %v == %v", a, b) + } +} + +// The fix for issue 8962 introduced a regression. +// Issue 12921. +func TestSliceOfCustomByte(t *testing.T) { + type Uint8 uint8 + + a := []Uint8("hello") + + data, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + var b []Uint8 + err = Unmarshal(data, &b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, b) { + t.Fatalf("expected %v == %v", a, b) + } +} + +var decodeTypeErrorTests = []struct { + dest interface{} + src string +}{ + {new(string), `{"user": "name"}`}, // issue 4628. + {new(error), `{}`}, // issue 4222 + {new(error), `[]`}, + {new(error), `""`}, + {new(error), `123`}, + {new(error), `true`}, +} + +func TestUnmarshalTypeError(t *testing.T) { + for _, item := range decodeTypeErrorTests { + err := Unmarshal([]byte(item.src), item.dest) + if _, ok := err.(*UnmarshalTypeError); !ok { + t.Errorf("expected type error for Unmarshal(%q, type %T): got %T", + item.src, item.dest, err) + } + } +} + +var unmarshalSyntaxTests = []string{ + "tru", + "fals", + "nul", + "123e", + `"hello`, + `[1,2,3`, + `{"key":1`, + `{"key":1,`, +} + +func TestUnmarshalSyntax(t *testing.T) { + var x interface{} + for _, src := range unmarshalSyntaxTests { + err := Unmarshal([]byte(src), &x) + if _, ok := err.(*SyntaxError); !ok { + t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err) + } + } +} + +// Test handling of unexported fields that should be ignored. +// Issue 4660 +type unexportedFields struct { + Name string + m map[string]interface{} `json:"-"` + m2 map[string]interface{} `json:"abcd"` +} + +func TestUnmarshalUnexported(t *testing.T) { + input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}` + want := &unexportedFields{Name: "Bob"} + + out := &unexportedFields{} + err := Unmarshal([]byte(input), out) + if err != nil { + t.Errorf("got error %v, expected nil", err) + } + if !reflect.DeepEqual(out, want) { + t.Errorf("got %q, want %q", out, want) + } +} + +// Time3339 is a time.Time which encodes to and from JSON +// as an RFC 3339 time in UTC. +type Time3339 time.Time + +func (t *Time3339) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b) + } + tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1])) + if err != nil { + return err + } + *t = Time3339(tm) + return nil +} + +func TestUnmarshalJSONLiteralError(t *testing.T) { + var t3 Time3339 + err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3) + if err == nil { + t.Fatalf("expected error; got time %v", time.Time(t3)) + } + if !strings.Contains(err.Error(), "range") { + t.Errorf("got err = %v; want out of range error", err) + } +} + +// Test that extra object elements in an array do not result in a +// "data changing underfoot" error. +// Issue 3717 +func TestSkipArrayObjects(t *testing.T) { + json := `[{}]` + var dest [0]interface{} + + err := Unmarshal([]byte(json), &dest) + if err != nil { + t.Errorf("got error %q, want nil", err) + } +} + +// Test semantics of pre-filled struct fields and pre-filled map fields. +// Issue 4900. +func TestPrefilled(t *testing.T) { + ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m } + + // Values here change, cannot reuse table across runs. + var prefillTests = []struct { + in string + ptr interface{} + out interface{} + }{ + { + in: `{"X": 1, "Y": 2}`, + ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5}, + out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5}, + }, + { + in: `{"X": 1, "Y": 2}`, + ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}), + out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}), + }, + } + + for _, tt := range prefillTests { + ptrstr := fmt.Sprintf("%v", tt.ptr) + err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here + if err != nil { + t.Errorf("Unmarshal: %v", err) + } + if !reflect.DeepEqual(tt.ptr, tt.out) { + t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out) + } + } +} + +var invalidUnmarshalTests = []struct { + v interface{} + want string +}{ + {nil, "json: Unmarshal(nil)"}, + {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, + {(*int)(nil), "json: Unmarshal(nil *int)"}, +} + +func TestInvalidUnmarshal(t *testing.T) { + buf := []byte(`{"a":"1"}`) + for _, tt := range invalidUnmarshalTests { + err := Unmarshal(buf, tt.v) + if err == nil { + t.Errorf("Unmarshal expecting error, got nil") + continue + } + if got := err.Error(); got != tt.want { + t.Errorf("Unmarshal = %q; want %q", got, tt.want) + } + } +} + +var invalidUnmarshalTextTests = []struct { + v interface{} + want string +}{ + {nil, "json: Unmarshal(nil)"}, + {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, + {(*int)(nil), "json: Unmarshal(nil *int)"}, + {new(net.IP), "json: cannot unmarshal string into Go value of type *net.IP"}, +} + +func TestInvalidUnmarshalText(t *testing.T) { + buf := []byte(`123`) + for _, tt := range invalidUnmarshalTextTests { + err := Unmarshal(buf, tt.v) + if err == nil { + t.Errorf("Unmarshal expecting error, got nil") + continue + } + if got := err.Error(); got != tt.want { + t.Errorf("Unmarshal = %q; want %q", got, tt.want) + } + } +} + +// Test that string option is ignored for invalid types. +// Issue 9812. +func TestInvalidStringOption(t *testing.T) { + num := 0 + item := struct { + T time.Time `json:",string"` + M map[string]string `json:",string"` + S []string `json:",string"` + A [1]string `json:",string"` + I interface{} `json:",string"` + P *int `json:",string"` + }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num} + + data, err := Marshal(item) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + err = Unmarshal(data, &item) + if err != nil { + t.Fatalf("Unmarshal: %v", err) + } +} diff -Nru juju-core-2.0~beta15/src/gopkg.in/mgo.v2/internal/json/encode.go juju-core-2.0.0/src/gopkg.in/mgo.v2/internal/json/encode.go --- juju-core-2.0~beta15/src/gopkg.in/mgo.v2/internal/json/encode.go 1970-01-01 00:00:00.000000000 +0000 +++ juju-core-2.0.0/src/gopkg.in/mgo.v2/internal/json/encode.go 2016-10-13 14:32:16.000000000 +0000 @@ -0,0 +1,1256 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 4627. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// This escaping can be disabled using an Encoder with DisableHTMLEscaping. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON value. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. The map's key type must either be a string +// or implement encoding.TextMarshaler. The map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON value. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON value. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v, encOpts{escapeHTML: true}) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML